2017-04-06 02:14:05 +02:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-16 01:03:42 +02:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2017-04-06 02:14:05 +02:00
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
2019-06-06 22:52:39 +02:00
|
|
|
#include <cinttypes>
|
2021-08-19 02:39:00 +02:00
|
|
|
#include <deque>
|
2017-04-06 02:14:05 +02:00
|
|
|
|
|
|
|
#include "db/builder.h"
|
2020-08-17 20:52:23 +02:00
|
|
|
#include "db/db_impl/db_impl.h"
|
2018-06-28 21:23:57 +02:00
|
|
|
#include "db/error_handler.h"
|
2017-06-23 04:30:39 +02:00
|
|
|
#include "db/event_helpers.h"
|
2019-05-30 05:44:08 +02:00
|
|
|
#include "file/sst_file_manager_impl.h"
|
2021-09-29 13:01:57 +02:00
|
|
|
#include "logging/logging.h"
|
2017-04-06 04:02:00 +02:00
|
|
|
#include "monitoring/iostats_context_imp.h"
|
|
|
|
#include "monitoring/perf_context_imp.h"
|
|
|
|
#include "monitoring/thread_status_updater.h"
|
|
|
|
#include "monitoring/thread_status_util.h"
|
2019-05-30 20:21:38 +02:00
|
|
|
#include "test_util/sync_point.h"
|
2019-08-27 19:57:28 +02:00
|
|
|
#include "util/cast_util.h"
|
2019-05-31 02:39:43 +02:00
|
|
|
#include "util/concurrent_task_limiter_impl.h"
|
2017-04-06 02:14:05 +02:00
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2018-02-09 21:09:55 +01:00
|
|
|
|
2018-04-03 04:53:19 +02:00
|
|
|
bool DBImpl::EnoughRoomForCompaction(
|
Auto recovery from out of space errors (#4164)
Summary:
This commit implements automatic recovery from a Status::NoSpace() error
during background operations such as write callback, flush and
compaction. The broad design is as follows -
1. Compaction errors are treated as soft errors and don't put the
database in read-only mode. A compaction is delayed until enough free
disk space is available to accomodate the compaction outputs, which is
estimated based on the input size. This means that users can continue to
write, and we rely on the WriteController to delay or stop writes if the
compaction debt becomes too high due to persistent low disk space
condition
2. Errors during write callback and flush are treated as hard errors,
i.e the database is put in read-only mode and goes back to read-write
only fater certain recovery actions are taken.
3. Both types of recovery rely on the SstFileManagerImpl to poll for
sufficient disk space. We assume that there is a 1-1 mapping between an
SFM and the underlying OS storage container. For cases where multiple
DBs are hosted on a single storage container, the user is expected to
allocate a single SFM instance and use the same one for all the DBs. If
no SFM is specified by the user, DBImpl::Open() will allocate one, but
this will be one per DB and each DB will recover independently. The
recovery implemented by SFM is as follows -
a) On the first occurance of an out of space error during compaction,
subsequent
compactions will be delayed until the disk free space check indicates
enough available space. The required space is computed as the sum of
input sizes.
b) The free space check requirement will be removed once the amount of
free space is greater than the size reserved by in progress
compactions when the first error occured
c) If the out of space error is a hard error, a background thread in
SFM will poll for sufficient headroom before triggering the recovery
of the database and putting it in write-only mode. The headroom is
calculated as the sum of the write_buffer_size of all the DB instances
associated with the SFM
4. EventListener callbacks will be called at the start and completion of
automatic recovery. Users can disable the auto recov ery in the start
callback, and later initiate it manually by calling DB::Resume()
Todo:
1. More extensive testing
2. Add disk full condition to db_stress (follow-on PR)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4164
Differential Revision: D9846378
Pulled By: anand1976
fbshipit-source-id: 80ea875dbd7f00205e19c82215ff6e37da10da4a
2018-09-15 22:36:19 +02:00
|
|
|
ColumnFamilyData* cfd, const std::vector<CompactionInputFiles>& inputs,
|
2018-04-03 04:53:19 +02:00
|
|
|
bool* sfm_reserved_compact_space, LogBuffer* log_buffer) {
|
|
|
|
// Check if we have enough room to do the compaction
|
|
|
|
bool enough_room = true;
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
auto sfm = static_cast<SstFileManagerImpl*>(
|
|
|
|
immutable_db_options_.sst_file_manager.get());
|
|
|
|
if (sfm) {
|
Auto recovery from out of space errors (#4164)
Summary:
This commit implements automatic recovery from a Status::NoSpace() error
during background operations such as write callback, flush and
compaction. The broad design is as follows -
1. Compaction errors are treated as soft errors and don't put the
database in read-only mode. A compaction is delayed until enough free
disk space is available to accomodate the compaction outputs, which is
estimated based on the input size. This means that users can continue to
write, and we rely on the WriteController to delay or stop writes if the
compaction debt becomes too high due to persistent low disk space
condition
2. Errors during write callback and flush are treated as hard errors,
i.e the database is put in read-only mode and goes back to read-write
only fater certain recovery actions are taken.
3. Both types of recovery rely on the SstFileManagerImpl to poll for
sufficient disk space. We assume that there is a 1-1 mapping between an
SFM and the underlying OS storage container. For cases where multiple
DBs are hosted on a single storage container, the user is expected to
allocate a single SFM instance and use the same one for all the DBs. If
no SFM is specified by the user, DBImpl::Open() will allocate one, but
this will be one per DB and each DB will recover independently. The
recovery implemented by SFM is as follows -
a) On the first occurance of an out of space error during compaction,
subsequent
compactions will be delayed until the disk free space check indicates
enough available space. The required space is computed as the sum of
input sizes.
b) The free space check requirement will be removed once the amount of
free space is greater than the size reserved by in progress
compactions when the first error occured
c) If the out of space error is a hard error, a background thread in
SFM will poll for sufficient headroom before triggering the recovery
of the database and putting it in write-only mode. The headroom is
calculated as the sum of the write_buffer_size of all the DB instances
associated with the SFM
4. EventListener callbacks will be called at the start and completion of
automatic recovery. Users can disable the auto recov ery in the start
callback, and later initiate it manually by calling DB::Resume()
Todo:
1. More extensive testing
2. Add disk full condition to db_stress (follow-on PR)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4164
Differential Revision: D9846378
Pulled By: anand1976
fbshipit-source-id: 80ea875dbd7f00205e19c82215ff6e37da10da4a
2018-09-15 22:36:19 +02:00
|
|
|
// Pass the current bg_error_ to SFM so it can decide what checks to
|
|
|
|
// perform. If this DB instance hasn't seen any error yet, the SFM can be
|
|
|
|
// optimistic and not do disk space checks
|
2021-01-06 23:14:01 +01:00
|
|
|
Status bg_error = error_handler_.GetBGError();
|
|
|
|
enough_room = sfm->EnoughRoomForCompaction(cfd, inputs, bg_error);
|
|
|
|
bg_error.PermitUncheckedError(); // bg_error is just a copy of the Status
|
|
|
|
// from the error_handler_
|
2018-04-03 04:53:19 +02:00
|
|
|
if (enough_room) {
|
|
|
|
*sfm_reserved_compact_space = true;
|
|
|
|
}
|
|
|
|
}
|
2018-04-13 02:55:14 +02:00
|
|
|
#else
|
Auto recovery from out of space errors (#4164)
Summary:
This commit implements automatic recovery from a Status::NoSpace() error
during background operations such as write callback, flush and
compaction. The broad design is as follows -
1. Compaction errors are treated as soft errors and don't put the
database in read-only mode. A compaction is delayed until enough free
disk space is available to accomodate the compaction outputs, which is
estimated based on the input size. This means that users can continue to
write, and we rely on the WriteController to delay or stop writes if the
compaction debt becomes too high due to persistent low disk space
condition
2. Errors during write callback and flush are treated as hard errors,
i.e the database is put in read-only mode and goes back to read-write
only fater certain recovery actions are taken.
3. Both types of recovery rely on the SstFileManagerImpl to poll for
sufficient disk space. We assume that there is a 1-1 mapping between an
SFM and the underlying OS storage container. For cases where multiple
DBs are hosted on a single storage container, the user is expected to
allocate a single SFM instance and use the same one for all the DBs. If
no SFM is specified by the user, DBImpl::Open() will allocate one, but
this will be one per DB and each DB will recover independently. The
recovery implemented by SFM is as follows -
a) On the first occurance of an out of space error during compaction,
subsequent
compactions will be delayed until the disk free space check indicates
enough available space. The required space is computed as the sum of
input sizes.
b) The free space check requirement will be removed once the amount of
free space is greater than the size reserved by in progress
compactions when the first error occured
c) If the out of space error is a hard error, a background thread in
SFM will poll for sufficient headroom before triggering the recovery
of the database and putting it in write-only mode. The headroom is
calculated as the sum of the write_buffer_size of all the DB instances
associated with the SFM
4. EventListener callbacks will be called at the start and completion of
automatic recovery. Users can disable the auto recov ery in the start
callback, and later initiate it manually by calling DB::Resume()
Todo:
1. More extensive testing
2. Add disk full condition to db_stress (follow-on PR)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4164
Differential Revision: D9846378
Pulled By: anand1976
fbshipit-source-id: 80ea875dbd7f00205e19c82215ff6e37da10da4a
2018-09-15 22:36:19 +02:00
|
|
|
(void)cfd;
|
2018-04-13 02:55:14 +02:00
|
|
|
(void)inputs;
|
|
|
|
(void)sfm_reserved_compact_space;
|
2018-04-03 04:53:19 +02:00
|
|
|
#endif // ROCKSDB_LITE
|
|
|
|
if (!enough_room) {
|
|
|
|
// Just in case tests want to change the value of enough_room
|
|
|
|
TEST_SYNC_POINT_CALLBACK(
|
|
|
|
"DBImpl::BackgroundCompaction():CancelledCompaction", &enough_room);
|
|
|
|
ROCKS_LOG_BUFFER(log_buffer,
|
|
|
|
"Cancelled compaction because not enough room");
|
|
|
|
RecordTick(stats_, COMPACTION_CANCELLED, 1);
|
|
|
|
}
|
|
|
|
return enough_room;
|
|
|
|
}
|
|
|
|
|
2019-01-02 18:56:39 +01:00
|
|
|
bool DBImpl::RequestCompactionToken(ColumnFamilyData* cfd, bool force,
|
Concurrent task limiter for compaction thread control (#4332)
Summary:
The PR is targeting to resolve the issue of:
https://github.com/facebook/rocksdb/issues/3972#issue-330771918
We have a rocksdb created with leveled-compaction with multiple column families (CFs), some of CFs are using HDD to store big and less frequently accessed data and others are using SSD.
When there are continuously write traffics going on to all CFs, the compaction thread pool is mostly occupied by those slow HDD compactions, which blocks fully utilize SSD bandwidth.
Since atomic write and transaction is needed across CFs, so splitting it to multiple rocksdb instance is not an option for us.
With the compaction thread control, we got 30%+ HDD write throughput gain, and also a lot smooth SSD write since less write stall happening.
ConcurrentTaskLimiter can be shared with multi-CFs across rocksdb instances, so the feature does not only work for multi-CFs scenarios, but also for multi-rocksdbs scenarios, who need disk IO resource control per tenant.
The usage is straight forward:
e.g.:
//
// Enable compaction thread limiter thru ColumnFamilyOptions
//
std::shared_ptr<ConcurrentTaskLimiter> ctl(NewConcurrentTaskLimiter("foo_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = ctl;
...
//
// Compaction thread limiter can be tuned or disabled on-the-fly
//
ctl->SetMaxOutstandingTask(12); // enlarge to 12 tasks
...
ctl->ResetMaxOutstandingTask(); // disable (bypass) thread limiter
ctl->SetMaxOutstandingTask(-1); // Same as above
...
ctl->SetMaxOutstandingTask(0); // full throttle (0 task)
//
// Sharing compaction thread limiter among CFs (to resolve multiple storage perf issue)
//
std::shared_ptr<ConcurrentTaskLimiter> ctl_ssd(NewConcurrentTaskLimiter("ssd_limiter", 8));
std::shared_ptr<ConcurrentTaskLimiter> ctl_hdd(NewConcurrentTaskLimiter("hdd_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt_ssd1(options);
ColumnFamilyOptions cf_opt_ssd2(options);
ColumnFamilyOptions cf_opt_hdd1(options);
ColumnFamilyOptions cf_opt_hdd2(options);
ColumnFamilyOptions cf_opt_hdd3(options);
// SSD CFs
cf_opt_ssd1.compaction_thread_limiter = ctl_ssd;
cf_opt_ssd2.compaction_thread_limiter = ctl_ssd;
// HDD CFs
cf_opt_hdd1.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd2.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd3.compaction_thread_limiter = ctl_hdd;
...
//
// The limiter is disabled by default (or set to nullptr explicitly)
//
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = nullptr;
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4332
Differential Revision: D13226590
Pulled By: siying
fbshipit-source-id: 14307aec55b8bd59c8223d04aa6db3c03d1b0c1d
2018-12-13 22:16:04 +01:00
|
|
|
std::unique_ptr<TaskLimiterToken>* token,
|
|
|
|
LogBuffer* log_buffer) {
|
|
|
|
assert(*token == nullptr);
|
|
|
|
auto limiter = static_cast<ConcurrentTaskLimiterImpl*>(
|
|
|
|
cfd->ioptions()->compaction_thread_limiter.get());
|
|
|
|
if (limiter == nullptr) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
*token = limiter->GetToken(force);
|
|
|
|
if (*token != nullptr) {
|
|
|
|
ROCKS_LOG_BUFFER(log_buffer,
|
2019-01-02 18:56:39 +01:00
|
|
|
"Thread limiter [%s] increase [%s] compaction task, "
|
|
|
|
"force: %s, tasks after: %d",
|
|
|
|
limiter->GetName().c_str(), cfd->GetName().c_str(),
|
|
|
|
force ? "true" : "false", limiter->GetOutstandingTask());
|
Concurrent task limiter for compaction thread control (#4332)
Summary:
The PR is targeting to resolve the issue of:
https://github.com/facebook/rocksdb/issues/3972#issue-330771918
We have a rocksdb created with leveled-compaction with multiple column families (CFs), some of CFs are using HDD to store big and less frequently accessed data and others are using SSD.
When there are continuously write traffics going on to all CFs, the compaction thread pool is mostly occupied by those slow HDD compactions, which blocks fully utilize SSD bandwidth.
Since atomic write and transaction is needed across CFs, so splitting it to multiple rocksdb instance is not an option for us.
With the compaction thread control, we got 30%+ HDD write throughput gain, and also a lot smooth SSD write since less write stall happening.
ConcurrentTaskLimiter can be shared with multi-CFs across rocksdb instances, so the feature does not only work for multi-CFs scenarios, but also for multi-rocksdbs scenarios, who need disk IO resource control per tenant.
The usage is straight forward:
e.g.:
//
// Enable compaction thread limiter thru ColumnFamilyOptions
//
std::shared_ptr<ConcurrentTaskLimiter> ctl(NewConcurrentTaskLimiter("foo_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = ctl;
...
//
// Compaction thread limiter can be tuned or disabled on-the-fly
//
ctl->SetMaxOutstandingTask(12); // enlarge to 12 tasks
...
ctl->ResetMaxOutstandingTask(); // disable (bypass) thread limiter
ctl->SetMaxOutstandingTask(-1); // Same as above
...
ctl->SetMaxOutstandingTask(0); // full throttle (0 task)
//
// Sharing compaction thread limiter among CFs (to resolve multiple storage perf issue)
//
std::shared_ptr<ConcurrentTaskLimiter> ctl_ssd(NewConcurrentTaskLimiter("ssd_limiter", 8));
std::shared_ptr<ConcurrentTaskLimiter> ctl_hdd(NewConcurrentTaskLimiter("hdd_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt_ssd1(options);
ColumnFamilyOptions cf_opt_ssd2(options);
ColumnFamilyOptions cf_opt_hdd1(options);
ColumnFamilyOptions cf_opt_hdd2(options);
ColumnFamilyOptions cf_opt_hdd3(options);
// SSD CFs
cf_opt_ssd1.compaction_thread_limiter = ctl_ssd;
cf_opt_ssd2.compaction_thread_limiter = ctl_ssd;
// HDD CFs
cf_opt_hdd1.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd2.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd3.compaction_thread_limiter = ctl_hdd;
...
//
// The limiter is disabled by default (or set to nullptr explicitly)
//
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = nullptr;
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4332
Differential Revision: D13226590
Pulled By: siying
fbshipit-source-id: 14307aec55b8bd59c8223d04aa6db3c03d1b0c1d
2018-12-13 22:16:04 +01:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
IOStatus DBImpl::SyncClosedLogs(JobContext* job_context) {
|
2017-04-06 02:14:05 +02:00
|
|
|
TEST_SYNC_POINT("DBImpl::SyncClosedLogs:Start");
|
|
|
|
mutex_.AssertHeld();
|
|
|
|
autovector<log::Writer*, 1> logs_to_sync;
|
|
|
|
uint64_t current_log_number = logfile_number_;
|
|
|
|
while (logs_.front().number < current_log_number &&
|
|
|
|
logs_.front().getting_synced) {
|
|
|
|
log_sync_cv_.Wait();
|
|
|
|
}
|
|
|
|
for (auto it = logs_.begin();
|
|
|
|
it != logs_.end() && it->number < current_log_number; ++it) {
|
|
|
|
auto& log = *it;
|
|
|
|
assert(!log.getting_synced);
|
|
|
|
log.getting_synced = true;
|
|
|
|
logs_to_sync.push_back(log.writer);
|
|
|
|
}
|
|
|
|
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
IOStatus io_s;
|
2017-04-06 02:14:05 +02:00
|
|
|
if (!logs_to_sync.empty()) {
|
|
|
|
mutex_.Unlock();
|
|
|
|
|
2021-11-19 18:55:10 +01:00
|
|
|
assert(job_context);
|
|
|
|
|
2017-04-06 02:14:05 +02:00
|
|
|
for (log::Writer* log : logs_to_sync) {
|
|
|
|
ROCKS_LOG_INFO(immutable_db_options_.info_log,
|
|
|
|
"[JOB %d] Syncing log #%" PRIu64, job_context->job_id,
|
|
|
|
log->get_log_number());
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
io_s = log->file()->Sync(immutable_db_options_.use_fsync);
|
|
|
|
if (!io_s.ok()) {
|
2018-04-19 23:00:52 +02:00
|
|
|
break;
|
|
|
|
}
|
2019-06-08 00:31:40 +02:00
|
|
|
|
|
|
|
if (immutable_db_options_.recycle_log_file_num > 0) {
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
io_s = log->Close();
|
|
|
|
if (!io_s.ok()) {
|
2019-06-08 00:31:40 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
if (io_s.ok()) {
|
2021-11-03 20:20:19 +01:00
|
|
|
io_s = directories_.GetWalDir()->FsyncWithDirOptions(
|
|
|
|
IOOptions(), nullptr,
|
|
|
|
DirFsyncOptions(DirFsyncOptions::FsyncReason::kNewFileSynced));
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
|
2021-11-19 18:55:10 +01:00
|
|
|
TEST_SYNC_POINT_CALLBACK("DBImpl::SyncClosedLogs:BeforeReLock",
|
|
|
|
/*arg=*/nullptr);
|
2017-04-06 02:14:05 +02:00
|
|
|
mutex_.Lock();
|
|
|
|
|
|
|
|
// "number <= current_log_number - 1" is equivalent to
|
|
|
|
// "number < current_log_number".
|
2020-11-07 01:30:44 +01:00
|
|
|
if (io_s.ok()) {
|
|
|
|
io_s = status_to_io_status(MarkLogsSynced(current_log_number - 1, true));
|
|
|
|
} else {
|
|
|
|
MarkLogsNotSynced(current_log_number - 1);
|
|
|
|
}
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
if (!io_s.ok()) {
|
2017-04-06 02:14:05 +02:00
|
|
|
TEST_SYNC_POINT("DBImpl::SyncClosedLogs:Failed");
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
return io_s;
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
}
|
2021-03-18 22:31:30 +01:00
|
|
|
TEST_SYNC_POINT("DBImpl::SyncClosedLogs:end");
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
return io_s;
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Status DBImpl::FlushMemTableToOutputFile(
|
|
|
|
ColumnFamilyData* cfd, const MutableCFOptions& mutable_cf_options,
|
2018-08-24 22:17:29 +02:00
|
|
|
bool* made_progress, JobContext* job_context,
|
2019-01-31 20:53:29 +01:00
|
|
|
SuperVersionContext* superversion_context,
|
|
|
|
std::vector<SequenceNumber>& snapshot_seqs,
|
|
|
|
SequenceNumber earliest_write_conflict_snapshot,
|
2019-03-20 01:24:09 +01:00
|
|
|
SnapshotChecker* snapshot_checker, LogBuffer* log_buffer,
|
|
|
|
Env::Priority thread_pri) {
|
2017-04-06 02:14:05 +02:00
|
|
|
mutex_.AssertHeld();
|
2020-09-15 06:10:09 +02:00
|
|
|
assert(cfd);
|
2021-11-19 18:55:10 +01:00
|
|
|
assert(cfd->imm());
|
2017-04-06 02:14:05 +02:00
|
|
|
assert(cfd->imm()->NumNotFlushed() != 0);
|
|
|
|
assert(cfd->imm()->IsFlushPending());
|
2021-11-19 18:55:10 +01:00
|
|
|
assert(versions_);
|
|
|
|
assert(versions_->GetColumnFamilySet());
|
|
|
|
// If there are more than one column families, we need to make sure that
|
|
|
|
// all the log files except the most recent one are synced. Otherwise if
|
|
|
|
// the host crashes after flushing and before WAL is persistent, the
|
|
|
|
// flushed SST may contain data from write batches whose updates to
|
|
|
|
// other (unflushed) column families are missing.
|
|
|
|
const bool needs_to_sync_closed_wals =
|
|
|
|
logfile_number_ > 0 &&
|
|
|
|
versions_->GetColumnFamilySet()->NumberOfColumnFamilies() > 1;
|
2022-03-03 06:03:14 +01:00
|
|
|
|
2021-11-19 18:55:10 +01:00
|
|
|
// If needs_to_sync_closed_wals is true, we need to record the current
|
|
|
|
// maximum memtable ID of this column family so that a later PickMemtables()
|
|
|
|
// call will not pick memtables whose IDs are higher. This is due to the fact
|
|
|
|
// that SyncClosedLogs() may release the db mutex, and memtable switch can
|
|
|
|
// happen for this column family in the meantime. The newly created memtables
|
|
|
|
// have their data backed by unsynced WALs, thus they cannot be included in
|
|
|
|
// this flush job.
|
2022-03-03 06:03:14 +01:00
|
|
|
// Another reason why we must record the current maximum memtable ID of this
|
|
|
|
// column family: SyncClosedLogs() may release db mutex, thus it's possible
|
|
|
|
// for application to continue to insert into memtables increasing db's
|
|
|
|
// sequence number. The application may take a snapshot, but this snapshot is
|
|
|
|
// not included in `snapshot_seqs` which will be passed to flush job because
|
|
|
|
// `snapshot_seqs` has already been computed before this function starts.
|
|
|
|
// Recording the max memtable ID ensures that the flush job does not flush
|
|
|
|
// a memtable without knowing such snapshot(s).
|
2021-11-19 18:55:10 +01:00
|
|
|
uint64_t max_memtable_id = needs_to_sync_closed_wals
|
|
|
|
? cfd->imm()->GetLatestMemTableID()
|
|
|
|
: port::kMaxUint64;
|
2022-03-03 06:03:14 +01:00
|
|
|
|
|
|
|
// If needs_to_sync_closed_wals is false, then the flush job will pick ALL
|
|
|
|
// existing memtables of the column family when PickMemTable() is called
|
|
|
|
// later. Although we won't call SyncClosedLogs() in this case, we may still
|
|
|
|
// call the callbacks of the listeners, i.e. NotifyOnFlushBegin() which also
|
|
|
|
// releases and re-acquires the db mutex. In the meantime, the application
|
|
|
|
// can still insert into the memtables and increase the db's sequence number.
|
|
|
|
// The application can take a snapshot, hoping that the latest visible state
|
|
|
|
// to this snapshto is preserved. This is hard to guarantee since db mutex
|
|
|
|
// not held. This newly-created snapshot is not included in `snapshot_seqs`
|
|
|
|
// and the flush job is unaware of its presence. Consequently, the flush job
|
|
|
|
// may drop certain keys when generating the L0, causing incorrect data to be
|
|
|
|
// returned for snapshot read using this snapshot.
|
|
|
|
// To address this, we make sure NotifyOnFlushBegin() executes after memtable
|
|
|
|
// picking so that no new snapshot can be taken between the two functions.
|
|
|
|
|
2017-04-06 02:14:05 +02:00
|
|
|
FlushJob flush_job(
|
2021-11-19 18:55:10 +01:00
|
|
|
dbname_, cfd, immutable_db_options_, mutable_cf_options, max_memtable_id,
|
|
|
|
file_options_for_compaction_, versions_.get(), &mutex_, &shutting_down_,
|
|
|
|
snapshot_seqs, earliest_write_conflict_snapshot, snapshot_checker,
|
|
|
|
job_context, log_buffer, directories_.GetDbDir(), GetDataDir(cfd, 0U),
|
2017-04-06 02:14:05 +02:00
|
|
|
GetCompressionFlush(*cfd->ioptions(), mutable_cf_options), stats_,
|
2018-10-16 04:59:20 +02:00
|
|
|
&event_logger_, mutable_cf_options.report_bg_io_stats,
|
2020-06-17 19:55:42 +02:00
|
|
|
true /* sync_output_directory */, true /* write_manifest */, thread_pri,
|
2021-03-18 04:43:22 +01:00
|
|
|
io_tracer_, db_id_, db_session_id_, cfd->GetFullHistoryTsLow(),
|
|
|
|
&blob_callback_);
|
2017-04-06 02:14:05 +02:00
|
|
|
FileMetaData file_meta;
|
|
|
|
|
|
|
|
Status s;
|
2021-03-18 22:31:30 +01:00
|
|
|
bool need_cancel = false;
|
|
|
|
IOStatus log_io_s = IOStatus::OK();
|
2021-11-19 18:55:10 +01:00
|
|
|
if (needs_to_sync_closed_wals) {
|
2017-04-06 02:14:05 +02:00
|
|
|
// SyncClosedLogs() may unlock and re-lock the db_mutex.
|
2021-03-18 22:31:30 +01:00
|
|
|
log_io_s = SyncClosedLogs(job_context);
|
|
|
|
if (!log_io_s.ok() && !log_io_s.IsShutdownInProgress() &&
|
|
|
|
!log_io_s.IsColumnFamilyDropped()) {
|
|
|
|
error_handler_.SetBGError(log_io_s, BackgroundErrorReason::kFlush);
|
2020-09-18 05:22:35 +02:00
|
|
|
}
|
2018-11-13 20:27:32 +01:00
|
|
|
} else {
|
|
|
|
TEST_SYNC_POINT("DBImpl::SyncClosedLogs:Skip");
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
2021-03-19 05:51:21 +01:00
|
|
|
s = log_io_s;
|
2017-04-06 02:14:05 +02:00
|
|
|
|
2021-03-18 22:31:30 +01:00
|
|
|
// If the log sync failed, we do not need to pick memtable. Otherwise,
|
|
|
|
// num_flush_not_started_ needs to be rollback.
|
|
|
|
TEST_SYNC_POINT("DBImpl::FlushMemTableToOutputFile:BeforePickMemtables");
|
|
|
|
if (s.ok()) {
|
|
|
|
flush_job.PickMemTable();
|
|
|
|
need_cancel = true;
|
|
|
|
}
|
2021-11-19 18:55:10 +01:00
|
|
|
TEST_SYNC_POINT_CALLBACK(
|
|
|
|
"DBImpl::FlushMemTableToOutputFile:AfterPickMemtables", &flush_job);
|
2022-03-03 06:03:14 +01:00
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
// may temporarily unlock and lock the mutex.
|
|
|
|
NotifyOnFlushBegin(cfd, &file_meta, mutable_cf_options, job_context->job_id);
|
|
|
|
#endif // ROCKSDB_LITE
|
|
|
|
|
2021-08-19 02:39:00 +02:00
|
|
|
bool switched_to_mempurge = false;
|
2017-04-06 02:14:05 +02:00
|
|
|
// Within flush_job.Run, rocksdb may call event listener to notify
|
|
|
|
// file creation and deletion.
|
|
|
|
//
|
|
|
|
// Note that flush_job.Run will unlock and lock the db_mutex,
|
|
|
|
// and EventListener callback will be called when the db_mutex
|
|
|
|
// is unlocked by the current thread.
|
|
|
|
if (s.ok()) {
|
2021-08-19 02:39:00 +02:00
|
|
|
s = flush_job.Run(&logs_with_prep_tracker_, &file_meta,
|
|
|
|
&switched_to_mempurge);
|
2021-03-18 22:31:30 +01:00
|
|
|
need_cancel = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!s.ok() && need_cancel) {
|
2017-04-06 02:14:05 +02:00
|
|
|
flush_job.Cancel();
|
|
|
|
}
|
2021-03-18 22:31:30 +01:00
|
|
|
IOStatus io_s = IOStatus::OK();
|
|
|
|
io_s = flush_job.io_status();
|
|
|
|
if (s.ok()) {
|
|
|
|
s = io_s;
|
2020-07-15 20:02:44 +02:00
|
|
|
}
|
2017-04-06 02:14:05 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2018-08-24 22:17:29 +02:00
|
|
|
InstallSuperVersionAndScheduleWork(cfd, superversion_context,
|
|
|
|
mutable_cf_options);
|
2017-04-06 02:14:05 +02:00
|
|
|
if (made_progress) {
|
2018-10-16 04:59:20 +02:00
|
|
|
*made_progress = true;
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
2020-09-15 06:10:09 +02:00
|
|
|
|
|
|
|
const std::string& column_family_name = cfd->GetName();
|
|
|
|
|
|
|
|
Version* const current = cfd->current();
|
|
|
|
assert(current);
|
|
|
|
|
|
|
|
const VersionStorageInfo* const storage_info = current->storage_info();
|
|
|
|
assert(storage_info);
|
|
|
|
|
2017-04-06 02:14:05 +02:00
|
|
|
VersionStorageInfo::LevelSummaryStorage tmp;
|
|
|
|
ROCKS_LOG_BUFFER(log_buffer, "[%s] Level summary: %s\n",
|
2020-09-15 06:10:09 +02:00
|
|
|
column_family_name.c_str(),
|
|
|
|
storage_info->LevelSummary(&tmp));
|
|
|
|
|
|
|
|
const auto& blob_files = storage_info->GetBlobFiles();
|
|
|
|
if (!blob_files.empty()) {
|
Use a sorted vector instead of a map to store blob file metadata (#9526)
Summary:
The patch replaces `std::map` with a sorted `std::vector` for
`VersionStorageInfo::blob_files_` and preallocates the space
for the `vector` before saving the `BlobFileMetaData` into the
new `VersionStorageInfo` in `VersionBuilder::Rep::SaveBlobFilesTo`.
These changes reduce the time the DB mutex is held while
saving new `Version`s, and using a sorted `vector` also makes
lookups faster thanks to better memory locality.
In addition, the patch introduces helper methods
`VersionStorageInfo::GetBlobFileMetaData` and
`VersionStorageInfo::GetBlobFileMetaDataLB` that can be used by
clients to perform lookups in the `vector`, and does some general
cleanup in the parts of code where blob file metadata are used.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9526
Test Plan:
Ran `make check` and the crash test script for a while.
Performance was tested using a load-optimized benchmark (`fillseq` with vector memtable, no WAL) and small file sizes so that a significant number of files are produced:
```
numactl --interleave=all ./db_bench --benchmarks=fillseq --allow_concurrent_memtable_write=false --level0_file_num_compaction_trigger=4 --level0_slowdown_writes_trigger=20 --level0_stop_writes_trigger=30 --max_background_jobs=8 --max_write_buffer_number=8 --db=/data/ltamasi-dbbench --wal_dir=/data/ltamasi-dbbench --num=800000000 --num_levels=8 --key_size=20 --value_size=400 --block_size=8192 --cache_size=51539607552 --cache_numshardbits=6 --compression_max_dict_bytes=0 --compression_ratio=0.5 --compression_type=lz4 --bytes_per_sync=8388608 --cache_index_and_filter_blocks=1 --cache_high_pri_pool_ratio=0.5 --benchmark_write_rate_limit=0 --write_buffer_size=16777216 --target_file_size_base=16777216 --max_bytes_for_level_base=67108864 --verify_checksum=1 --delete_obsolete_files_period_micros=62914560 --max_bytes_for_level_multiplier=8 --statistics=0 --stats_per_interval=1 --stats_interval_seconds=20 --histogram=1 --memtablerep=skip_list --bloom_bits=10 --open_files=-1 --subcompactions=1 --compaction_style=0 --min_level_to_compress=3 --level_compaction_dynamic_level_bytes=true --pin_l0_filter_and_index_blocks_in_cache=1 --soft_pending_compaction_bytes_limit=167503724544 --hard_pending_compaction_bytes_limit=335007449088 --min_level_to_compress=0 --use_existing_db=0 --sync=0 --threads=1 --memtablerep=vector --allow_concurrent_memtable_write=false --disable_wal=1 --enable_blob_files=1 --blob_file_size=16777216 --min_blob_size=0 --blob_compression_type=lz4 --enable_blob_garbage_collection=1 --seed=<some value>
```
Final statistics before the patch:
```
Cumulative writes: 0 writes, 700M keys, 0 commit groups, 0.0 writes per commit group, ingest: 284.62 GB, 121.27 MB/s
Interval writes: 0 writes, 334K keys, 0 commit groups, 0.0 writes per commit group, ingest: 139.28 MB, 72.46 MB/s
```
With the patch:
```
Cumulative writes: 0 writes, 760M keys, 0 commit groups, 0.0 writes per commit group, ingest: 308.66 GB, 131.52 MB/s
Interval writes: 0 writes, 445K keys, 0 commit groups, 0.0 writes per commit group, ingest: 185.35 MB, 93.15 MB/s
```
Total time to complete the benchmark is 2611 seconds with the patch, down from 2986 secs.
Reviewed By: riversand963
Differential Revision: D34082728
Pulled By: ltamasi
fbshipit-source-id: fc598abf676dce436734d06bb9d2d99a26a004fc
2022-02-09 21:35:39 +01:00
|
|
|
assert(blob_files.front());
|
|
|
|
assert(blob_files.back());
|
|
|
|
|
|
|
|
ROCKS_LOG_BUFFER(
|
|
|
|
log_buffer,
|
|
|
|
"[%s] Blob file summary: head=%" PRIu64 ", tail=%" PRIu64 "\n",
|
|
|
|
column_family_name.c_str(), blob_files.front()->GetBlobFileNumber(),
|
|
|
|
blob_files.back()->GetBlobFileNumber());
|
2020-09-15 06:10:09 +02:00
|
|
|
}
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
|
2019-05-20 19:37:37 +02:00
|
|
|
if (!s.ok() && !s.IsShutdownInProgress() && !s.IsColumnFamilyDropped()) {
|
2020-03-29 04:05:54 +02:00
|
|
|
if (!io_s.ok() && !io_s.IsShutdownInProgress() &&
|
|
|
|
!io_s.IsColumnFamilyDropped()) {
|
2021-03-18 22:31:30 +01:00
|
|
|
assert(log_io_s.ok());
|
First step towards handling MANIFEST write error (#6949)
Summary:
This PR provides preliminary support for handling IO error during MANIFEST write.
File write/sync is not guaranteed to be atomic. If we encounter an IOError while writing/syncing to the MANIFEST file, we cannot be sure about the state of the MANIFEST file. The version edits may or may not have reached the file. During cleanup, if we delete the newly-generated SST files referenced by the pending version edit(s), but the version edit(s) actually are persistent in the MANIFEST, then next recovery attempt will process the version edits(s) and then fail since the SST files have already been deleted.
One approach is to truncate the MANIFEST after write/sync error, so that it is safe to delete the SST files. However, file truncation may not be supported on certain file systems. Therefore, we take the following approach.
If an IOError is detected during MANIFEST write/sync, we disable file deletions for the faulty database. Depending on whether the IOError is retryable (set by underlying file system), either RocksDB or application can call `DB::Resume()`, or simply shutdown and restart. During `Resume()`, RocksDB will try to switch to a new MANIFEST and write all existing in-memory version storage in the new file. If this succeeds, then RocksDB may proceed. If all recovery is completed, then file deletions will be re-enabled.
Note that multiple threads can call `LogAndApply()` at the same time, though only one of them will be going through the process MANIFEST write, possibly batching the version edits of other threads. When the leading MANIFEST writer finishes, all of the MANIFEST writing threads in this batch will have the same IOError. They will all call `ErrorHandler::SetBGError()` in which file deletion will be disabled.
Possible future directions:
- Add an `ErrorContext` structure so that it is easier to pass more info to `ErrorHandler`. Currently, as in this example, a new `BackgroundErrorReason` has to be added.
Test plan (dev server):
make check
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6949
Reviewed By: anand1976
Differential Revision: D22026020
Pulled By: riversand963
fbshipit-source-id: f3c68a2ef45d9b505d0d625c7c5e0c88495b91c8
2020-06-25 04:05:47 +02:00
|
|
|
// Error while writing to MANIFEST.
|
|
|
|
// In fact, versions_->io_status() can also be the result of renaming
|
|
|
|
// CURRENT file. With current code, it's just difficult to tell. So just
|
|
|
|
// be pessimistic and try write to a new MANIFEST.
|
|
|
|
// TODO: distinguish between MANIFEST write and CURRENT renaming
|
2020-09-18 05:22:35 +02:00
|
|
|
if (!versions_->io_status().ok()) {
|
2021-03-26 05:41:44 +01:00
|
|
|
// If WAL sync is successful (either WAL size is 0 or there is no IO
|
|
|
|
// error), all the Manifest write will be map to soft error.
|
|
|
|
// TODO: kManifestWriteNoWAL and kFlushNoWAL are misleading. Refactor is
|
|
|
|
// needed.
|
|
|
|
error_handler_.SetBGError(io_s,
|
|
|
|
BackgroundErrorReason::kManifestWriteNoWAL);
|
2020-09-18 05:22:35 +02:00
|
|
|
} else {
|
2021-03-26 05:41:44 +01:00
|
|
|
// If WAL sync is successful (either WAL size is 0 or there is no IO
|
|
|
|
// error), all the other SST file write errors will be set as
|
|
|
|
// kFlushNoWAL.
|
2020-12-08 05:09:55 +01:00
|
|
|
error_handler_.SetBGError(io_s, BackgroundErrorReason::kFlushNoWAL);
|
2020-09-18 05:22:35 +02:00
|
|
|
}
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
} else {
|
2021-03-18 22:31:30 +01:00
|
|
|
if (log_io_s.ok()) {
|
|
|
|
Status new_bg_error = s;
|
|
|
|
error_handler_.SetBGError(new_bg_error, BackgroundErrorReason::kFlush);
|
|
|
|
}
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
}
|
2020-08-21 04:16:56 +02:00
|
|
|
} else {
|
|
|
|
// If we got here, then we decided not to care about the i_os status (either
|
|
|
|
// from never needing it or ignoring the flush job status
|
|
|
|
io_s.PermitUncheckedError();
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
2021-08-19 02:39:00 +02:00
|
|
|
// If flush ran smoothly and no mempurge happened
|
|
|
|
// install new SST file path.
|
|
|
|
if (s.ok() && (!switched_to_mempurge)) {
|
2017-04-06 02:14:05 +02:00
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
// may temporarily unlock and lock the mutex.
|
2019-10-16 19:39:00 +02:00
|
|
|
NotifyOnFlushCompleted(cfd, mutable_cf_options,
|
|
|
|
flush_job.GetCommittedFlushJobsInfo());
|
2017-04-06 02:14:05 +02:00
|
|
|
auto sfm = static_cast<SstFileManagerImpl*>(
|
|
|
|
immutable_db_options_.sst_file_manager.get());
|
|
|
|
if (sfm) {
|
|
|
|
// Notify sst_file_manager that a new file was added
|
|
|
|
std::string file_path = MakeTableFileName(
|
2018-04-06 04:49:06 +02:00
|
|
|
cfd->ioptions()->cf_paths[0].path, file_meta.fd.GetNumber());
|
2021-01-04 20:07:10 +01:00
|
|
|
// TODO (PR7798). We should only add the file to the FileManager if it
|
|
|
|
// exists. Otherwise, some tests may fail. Ignore the error in the
|
|
|
|
// interim.
|
|
|
|
sfm->OnAddFile(file_path).PermitUncheckedError();
|
2018-06-28 21:23:57 +02:00
|
|
|
if (sfm->IsMaxAllowedSpaceReached()) {
|
2019-03-28 00:13:08 +01:00
|
|
|
Status new_bg_error =
|
|
|
|
Status::SpaceLimit("Max allowed space was reached");
|
2017-04-06 02:14:05 +02:00
|
|
|
TEST_SYNC_POINT_CALLBACK(
|
|
|
|
"DBImpl::FlushMemTableToOutputFile:MaxAllowedSpaceReached",
|
2017-06-23 04:30:39 +02:00
|
|
|
&new_bg_error);
|
2020-12-08 05:09:55 +01:00
|
|
|
error_handler_.SetBGError(new_bg_error, BackgroundErrorReason::kFlush);
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif // ROCKSDB_LITE
|
|
|
|
}
|
2019-10-16 19:39:00 +02:00
|
|
|
TEST_SYNC_POINT("DBImpl::FlushMemTableToOutputFile:Finish");
|
2017-04-06 02:14:05 +02:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2018-08-24 22:17:29 +02:00
|
|
|
Status DBImpl::FlushMemTablesToOutputFiles(
|
|
|
|
const autovector<BGFlushArg>& bg_flush_args, bool* made_progress,
|
2019-03-20 01:24:09 +01:00
|
|
|
JobContext* job_context, LogBuffer* log_buffer, Env::Priority thread_pri) {
|
2018-11-12 21:22:10 +01:00
|
|
|
if (immutable_db_options_.atomic_flush) {
|
2019-03-28 00:13:08 +01:00
|
|
|
return AtomicFlushMemTablesToOutputFiles(
|
|
|
|
bg_flush_args, made_progress, job_context, log_buffer, thread_pri);
|
2018-10-27 00:06:44 +02:00
|
|
|
}
|
2020-12-02 18:29:50 +01:00
|
|
|
assert(bg_flush_args.size() == 1);
|
2019-01-31 20:53:29 +01:00
|
|
|
std::vector<SequenceNumber> snapshot_seqs;
|
|
|
|
SequenceNumber earliest_write_conflict_snapshot;
|
|
|
|
SnapshotChecker* snapshot_checker;
|
|
|
|
GetSnapshotContext(job_context, &snapshot_seqs,
|
|
|
|
&earliest_write_conflict_snapshot, &snapshot_checker);
|
2020-12-02 18:29:50 +01:00
|
|
|
const auto& bg_flush_arg = bg_flush_args[0];
|
|
|
|
ColumnFamilyData* cfd = bg_flush_arg.cfd_;
|
2022-03-08 20:26:40 +01:00
|
|
|
// intentional infrequent copy for each flush
|
|
|
|
MutableCFOptions mutable_cf_options_copy = *cfd->GetLatestMutableCFOptions();
|
2020-12-02 18:29:50 +01:00
|
|
|
SuperVersionContext* superversion_context =
|
|
|
|
bg_flush_arg.superversion_context_;
|
|
|
|
Status s = FlushMemTableToOutputFile(
|
2022-03-08 20:26:40 +01:00
|
|
|
cfd, mutable_cf_options_copy, made_progress, job_context,
|
|
|
|
superversion_context, snapshot_seqs, earliest_write_conflict_snapshot,
|
|
|
|
snapshot_checker, log_buffer, thread_pri);
|
2020-12-02 18:29:50 +01:00
|
|
|
return s;
|
2018-08-24 22:17:29 +02:00
|
|
|
}
|
|
|
|
|
2018-10-16 04:59:20 +02:00
|
|
|
/*
|
|
|
|
* Atomically flushes multiple column families.
|
|
|
|
*
|
|
|
|
* For each column family, all memtables with ID smaller than or equal to the
|
|
|
|
* ID specified in bg_flush_args will be flushed. Only after all column
|
|
|
|
* families finish flush will this function commit to MANIFEST. If any of the
|
|
|
|
* column families are not flushed successfully, this function does not have
|
|
|
|
* any side-effect on the state of the database.
|
|
|
|
*/
|
|
|
|
Status DBImpl::AtomicFlushMemTablesToOutputFiles(
|
|
|
|
const autovector<BGFlushArg>& bg_flush_args, bool* made_progress,
|
2019-03-20 01:24:09 +01:00
|
|
|
JobContext* job_context, LogBuffer* log_buffer, Env::Priority thread_pri) {
|
2018-10-16 04:59:20 +02:00
|
|
|
mutex_.AssertHeld();
|
|
|
|
|
|
|
|
autovector<ColumnFamilyData*> cfds;
|
|
|
|
for (const auto& arg : bg_flush_args) {
|
|
|
|
cfds.emplace_back(arg.cfd_);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef NDEBUG
|
|
|
|
for (const auto cfd : cfds) {
|
|
|
|
assert(cfd->imm()->NumNotFlushed() != 0);
|
|
|
|
assert(cfd->imm()->IsFlushPending());
|
|
|
|
}
|
|
|
|
#endif /* !NDEBUG */
|
|
|
|
|
2019-01-16 06:32:15 +01:00
|
|
|
std::vector<SequenceNumber> snapshot_seqs;
|
2018-10-16 04:59:20 +02:00
|
|
|
SequenceNumber earliest_write_conflict_snapshot;
|
2019-01-16 06:32:15 +01:00
|
|
|
SnapshotChecker* snapshot_checker;
|
|
|
|
GetSnapshotContext(job_context, &snapshot_seqs,
|
|
|
|
&earliest_write_conflict_snapshot, &snapshot_checker);
|
2018-10-16 04:59:20 +02:00
|
|
|
|
2020-03-03 01:14:00 +01:00
|
|
|
autovector<FSDirectory*> distinct_output_dirs;
|
2019-02-12 21:01:55 +01:00
|
|
|
autovector<std::string> distinct_output_dir_paths;
|
2019-10-16 19:39:00 +02:00
|
|
|
std::vector<std::unique_ptr<FlushJob>> jobs;
|
2019-01-12 02:40:44 +01:00
|
|
|
std::vector<MutableCFOptions> all_mutable_cf_options;
|
2018-10-16 04:59:20 +02:00
|
|
|
int num_cfs = static_cast<int>(cfds.size());
|
2019-01-12 02:40:44 +01:00
|
|
|
all_mutable_cf_options.reserve(num_cfs);
|
2018-10-16 04:59:20 +02:00
|
|
|
for (int i = 0; i < num_cfs; ++i) {
|
|
|
|
auto cfd = cfds[i];
|
2020-03-03 01:14:00 +01:00
|
|
|
FSDirectory* data_dir = GetDataDir(cfd, 0U);
|
2019-02-12 21:01:55 +01:00
|
|
|
const std::string& curr_path = cfd->ioptions()->cf_paths[0].path;
|
2018-10-16 04:59:20 +02:00
|
|
|
|
|
|
|
// Add to distinct output directories if eligible. Use linear search. Since
|
|
|
|
// the number of elements in the vector is not large, performance should be
|
|
|
|
// tolerable.
|
|
|
|
bool found = false;
|
2019-02-12 21:01:55 +01:00
|
|
|
for (const auto& path : distinct_output_dir_paths) {
|
|
|
|
if (path == curr_path) {
|
2018-10-16 04:59:20 +02:00
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!found) {
|
2019-02-12 21:01:55 +01:00
|
|
|
distinct_output_dir_paths.emplace_back(curr_path);
|
2018-10-16 04:59:20 +02:00
|
|
|
distinct_output_dirs.emplace_back(data_dir);
|
|
|
|
}
|
|
|
|
|
2019-01-12 02:40:44 +01:00
|
|
|
all_mutable_cf_options.emplace_back(*cfd->GetLatestMutableCFOptions());
|
|
|
|
const MutableCFOptions& mutable_cf_options = all_mutable_cf_options.back();
|
2020-12-02 18:29:50 +01:00
|
|
|
uint64_t max_memtable_id = bg_flush_args[i].max_memtable_id_;
|
2019-10-16 19:39:00 +02:00
|
|
|
jobs.emplace_back(new FlushJob(
|
2019-02-12 21:01:55 +01:00
|
|
|
dbname_, cfd, immutable_db_options_, mutable_cf_options,
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
2019-12-13 23:47:08 +01:00
|
|
|
max_memtable_id, file_options_for_compaction_, versions_.get(), &mutex_,
|
2018-10-16 04:59:20 +02:00
|
|
|
&shutting_down_, snapshot_seqs, earliest_write_conflict_snapshot,
|
|
|
|
snapshot_checker, job_context, log_buffer, directories_.GetDbDir(),
|
|
|
|
data_dir, GetCompressionFlush(*cfd->ioptions(), mutable_cf_options),
|
|
|
|
stats_, &event_logger_, mutable_cf_options.report_bg_io_stats,
|
2019-03-20 01:24:09 +01:00
|
|
|
false /* sync_output_directory */, false /* write_manifest */,
|
2020-12-05 23:17:11 +01:00
|
|
|
thread_pri, io_tracer_, db_id_, db_session_id_,
|
2021-08-20 20:37:53 +02:00
|
|
|
cfd->GetFullHistoryTsLow(), &blob_callback_));
|
2018-10-16 04:59:20 +02:00
|
|
|
}
|
|
|
|
|
2019-01-31 23:28:53 +01:00
|
|
|
std::vector<FileMetaData> file_meta(num_cfs);
|
2021-08-19 02:39:00 +02:00
|
|
|
// Use of deque<bool> because vector<bool>
|
|
|
|
// is specific and doesn't allow &v[i].
|
|
|
|
std::deque<bool> switched_to_mempurge(num_cfs, false);
|
2018-10-16 04:59:20 +02:00
|
|
|
Status s;
|
2021-03-18 22:31:30 +01:00
|
|
|
IOStatus log_io_s = IOStatus::OK();
|
2018-10-16 04:59:20 +02:00
|
|
|
assert(num_cfs == static_cast<int>(jobs.size()));
|
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE
|
2019-01-31 23:28:53 +01:00
|
|
|
for (int i = 0; i != num_cfs; ++i) {
|
|
|
|
const MutableCFOptions& mutable_cf_options = all_mutable_cf_options.at(i);
|
2018-10-16 04:59:20 +02:00
|
|
|
// may temporarily unlock and lock the mutex.
|
|
|
|
NotifyOnFlushBegin(cfds[i], &file_meta[i], mutable_cf_options,
|
2019-10-16 19:39:00 +02:00
|
|
|
job_context->job_id);
|
2018-10-16 04:59:20 +02:00
|
|
|
}
|
2019-01-31 23:28:53 +01:00
|
|
|
#endif /* !ROCKSDB_LITE */
|
2018-10-16 04:59:20 +02:00
|
|
|
|
|
|
|
if (logfile_number_ > 0) {
|
|
|
|
// TODO (yanqin) investigate whether we should sync the closed logs for
|
|
|
|
// single column family case.
|
2021-03-18 22:31:30 +01:00
|
|
|
log_io_s = SyncClosedLogs(job_context);
|
|
|
|
if (!log_io_s.ok() && !log_io_s.IsShutdownInProgress() &&
|
|
|
|
!log_io_s.IsColumnFamilyDropped()) {
|
|
|
|
if (total_log_size_ > 0) {
|
|
|
|
error_handler_.SetBGError(log_io_s, BackgroundErrorReason::kFlush);
|
|
|
|
} else {
|
|
|
|
// If the WAL is empty, we use different error reason
|
|
|
|
error_handler_.SetBGError(log_io_s, BackgroundErrorReason::kFlushNoWAL);
|
|
|
|
}
|
|
|
|
}
|
2018-10-16 04:59:20 +02:00
|
|
|
}
|
2021-03-19 05:51:21 +01:00
|
|
|
s = log_io_s;
|
2018-10-16 04:59:20 +02:00
|
|
|
|
2018-10-27 00:06:44 +02:00
|
|
|
// exec_status stores the execution status of flush_jobs as
|
|
|
|
// <bool /* executed */, Status /* status code */>
|
|
|
|
autovector<std::pair<bool, Status>> exec_status;
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
autovector<IOStatus> io_status;
|
2021-03-18 22:31:30 +01:00
|
|
|
std::vector<bool> pick_status;
|
2018-10-27 00:06:44 +02:00
|
|
|
for (int i = 0; i != num_cfs; ++i) {
|
|
|
|
// Initially all jobs are not executed, with status OK.
|
2018-12-14 00:10:16 +01:00
|
|
|
exec_status.emplace_back(false, Status::OK());
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
io_status.emplace_back(IOStatus::OK());
|
2021-03-18 22:31:30 +01:00
|
|
|
pick_status.push_back(false);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s.ok()) {
|
|
|
|
for (int i = 0; i != num_cfs; ++i) {
|
|
|
|
jobs[i]->PickMemTable();
|
|
|
|
pick_status[i] = true;
|
|
|
|
}
|
2018-10-27 00:06:44 +02:00
|
|
|
}
|
|
|
|
|
2018-10-16 04:59:20 +02:00
|
|
|
if (s.ok()) {
|
2021-08-19 02:39:00 +02:00
|
|
|
assert(switched_to_mempurge.size() ==
|
|
|
|
static_cast<long unsigned int>(num_cfs));
|
2018-10-16 04:59:20 +02:00
|
|
|
// TODO (yanqin): parallelize jobs with threads.
|
2018-11-15 05:52:21 +01:00
|
|
|
for (int i = 1; i != num_cfs; ++i) {
|
2018-10-27 00:06:44 +02:00
|
|
|
exec_status[i].second =
|
2021-08-19 02:39:00 +02:00
|
|
|
jobs[i]->Run(&logs_with_prep_tracker_, &file_meta[i],
|
|
|
|
&(switched_to_mempurge.at(i)));
|
2018-10-27 00:06:44 +02:00
|
|
|
exec_status[i].first = true;
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
io_status[i] = jobs[i]->io_status();
|
2018-10-16 04:59:20 +02:00
|
|
|
}
|
2018-11-15 05:52:21 +01:00
|
|
|
if (num_cfs > 1) {
|
|
|
|
TEST_SYNC_POINT(
|
|
|
|
"DBImpl::AtomicFlushMemTablesToOutputFiles:SomeFlushJobsComplete:1");
|
|
|
|
TEST_SYNC_POINT(
|
|
|
|
"DBImpl::AtomicFlushMemTablesToOutputFiles:SomeFlushJobsComplete:2");
|
|
|
|
}
|
2019-10-16 19:39:00 +02:00
|
|
|
assert(exec_status.size() > 0);
|
|
|
|
assert(!file_meta.empty());
|
2021-08-19 02:39:00 +02:00
|
|
|
exec_status[0].second = jobs[0]->Run(
|
|
|
|
&logs_with_prep_tracker_, file_meta.data() /* &file_meta[0] */,
|
|
|
|
switched_to_mempurge.empty() ? nullptr : &(switched_to_mempurge.at(0)));
|
2018-12-14 00:10:16 +01:00
|
|
|
exec_status[0].first = true;
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
io_status[0] = jobs[0]->io_status();
|
2018-12-14 00:10:16 +01:00
|
|
|
|
|
|
|
Status error_status;
|
|
|
|
for (const auto& e : exec_status) {
|
|
|
|
if (!e.second.ok()) {
|
|
|
|
s = e.second;
|
2019-05-20 19:37:37 +02:00
|
|
|
if (!e.second.IsShutdownInProgress() &&
|
|
|
|
!e.second.IsColumnFamilyDropped()) {
|
2018-12-14 00:10:16 +01:00
|
|
|
// If a flush job did not return OK, and the CF is not dropped, and
|
|
|
|
// the DB is not shutting down, then we have to return this result to
|
|
|
|
// caller later.
|
|
|
|
error_status = e.second;
|
|
|
|
}
|
2018-11-15 05:52:21 +01:00
|
|
|
}
|
|
|
|
}
|
2018-12-14 00:10:16 +01:00
|
|
|
|
|
|
|
s = error_status.ok() ? s : error_status;
|
2018-10-16 04:59:20 +02:00
|
|
|
}
|
|
|
|
|
2021-03-18 22:31:30 +01:00
|
|
|
IOStatus io_s = IOStatus::OK();
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
if (io_s.ok()) {
|
|
|
|
IOStatus io_error = IOStatus::OK();
|
|
|
|
for (int i = 0; i != static_cast<int>(io_status.size()); i++) {
|
|
|
|
if (!io_status[i].ok() && !io_status[i].IsShutdownInProgress() &&
|
|
|
|
!io_status[i].IsColumnFamilyDropped()) {
|
|
|
|
io_error = io_status[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
io_s = io_error;
|
|
|
|
if (s.ok() && !io_s.ok()) {
|
|
|
|
s = io_s;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-20 19:37:37 +02:00
|
|
|
if (s.IsColumnFamilyDropped()) {
|
2019-04-29 21:29:57 +02:00
|
|
|
s = Status::OK();
|
|
|
|
}
|
|
|
|
|
2020-02-07 19:50:17 +01:00
|
|
|
if (s.ok() || s.IsShutdownInProgress()) {
|
2018-10-16 04:59:20 +02:00
|
|
|
// Sync on all distinct output directories.
|
|
|
|
for (auto dir : distinct_output_dirs) {
|
|
|
|
if (dir != nullptr) {
|
2021-11-03 20:20:19 +01:00
|
|
|
Status error_status = dir->FsyncWithDirOptions(
|
|
|
|
IOOptions(), nullptr,
|
|
|
|
DirFsyncOptions(DirFsyncOptions::FsyncReason::kNewFileSynced));
|
2019-04-29 21:29:57 +02:00
|
|
|
if (!error_status.ok()) {
|
|
|
|
s = error_status;
|
2018-10-16 04:59:20 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-02-07 19:50:17 +01:00
|
|
|
} else {
|
|
|
|
// Need to undo atomic flush if something went wrong, i.e. s is not OK and
|
|
|
|
// it is not because of CF drop.
|
|
|
|
// Have to cancel the flush jobs that have NOT executed because we need to
|
|
|
|
// unref the versions.
|
|
|
|
for (int i = 0; i != num_cfs; ++i) {
|
2021-03-18 22:31:30 +01:00
|
|
|
if (pick_status[i] && !exec_status[i].first) {
|
2020-02-07 19:50:17 +01:00
|
|
|
jobs[i]->Cancel();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (int i = 0; i != num_cfs; ++i) {
|
2021-03-19 05:51:21 +01:00
|
|
|
if (exec_status[i].second.ok() && exec_status[i].first) {
|
2020-02-07 19:50:17 +01:00
|
|
|
auto& mems = jobs[i]->GetMemTables();
|
|
|
|
cfds[i]->imm()->RollbackMemtableFlush(mems,
|
|
|
|
file_meta[i].fd.GetNumber());
|
|
|
|
}
|
|
|
|
}
|
2019-01-04 05:53:52 +01:00
|
|
|
}
|
2018-10-16 04:59:20 +02:00
|
|
|
|
2019-01-04 05:53:52 +01:00
|
|
|
if (s.ok()) {
|
Fix atomic flush waiting forever for MANIFEST write (#9034)
Summary:
In atomic flush, concurrent background flush threads will commit to the MANIFEST
one by one, in the order of the IDs of their picked memtables for all included column
families. Each time, a background flush thread decides whether to wait based on two
criteria:
- Is db stopped? If so, don't wait.
- Am I the one to commit the currently earliest memtable? If so, don't wait and ready to go.
When atomic flush was implemented, error writing to or syncing the MANIFEST would
cause the db to be stopped. Therefore, this background thread does not have to check
for the background error while waiting. If there has been such an error, `DBStopped()`
would have been true, and this thread will **not** wait forever.
After we improved error handling, RocksDB may map an IOError while writing to MANIFEST
to a soft error, if there is no WAL. This requires the background threads to check for
background error while waiting. Otherwise, a background flush thread may wait forever.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9034
Test Plan: make check
Reviewed By: zhichao-cao
Differential Revision: D31639225
Pulled By: riversand963
fbshipit-source-id: e9ab07c4d8f2eade238adeefe3e42dd9a5a3ebbd
2021-10-21 06:33:32 +02:00
|
|
|
const auto wait_to_install_func =
|
|
|
|
[&]() -> std::pair<Status, bool /*continue to wait*/> {
|
|
|
|
if (!versions_->io_status().ok()) {
|
|
|
|
// Something went wrong elsewhere, we cannot count on waiting for our
|
|
|
|
// turn to write/sync to MANIFEST or CURRENT. Just return.
|
|
|
|
return std::make_pair(versions_->io_status(), false);
|
|
|
|
} else if (shutting_down_.load(std::memory_order_acquire)) {
|
|
|
|
return std::make_pair(Status::ShutdownInProgress(), false);
|
|
|
|
}
|
2019-01-04 05:53:52 +01:00
|
|
|
bool ready = true;
|
|
|
|
for (size_t i = 0; i != cfds.size(); ++i) {
|
2019-10-16 19:39:00 +02:00
|
|
|
const auto& mems = jobs[i]->GetMemTables();
|
2019-01-04 05:53:52 +01:00
|
|
|
if (cfds[i]->IsDropped()) {
|
|
|
|
// If the column family is dropped, then do not wait.
|
2018-12-14 00:10:16 +01:00
|
|
|
continue;
|
2019-01-04 05:53:52 +01:00
|
|
|
} else if (!mems.empty() &&
|
|
|
|
cfds[i]->imm()->GetEarliestMemTableID() < mems[0]->GetID()) {
|
|
|
|
// If a flush job needs to install the flush result for mems and
|
|
|
|
// mems[0] is not the earliest memtable, it means another thread must
|
|
|
|
// be installing flush results for the same column family, then the
|
|
|
|
// current thread needs to wait.
|
|
|
|
ready = false;
|
|
|
|
break;
|
|
|
|
} else if (mems.empty() && cfds[i]->imm()->GetEarliestMemTableID() <=
|
|
|
|
bg_flush_args[i].max_memtable_id_) {
|
|
|
|
// If a flush job does not need to install flush results, then it has
|
|
|
|
// to wait until all memtables up to max_memtable_id_ (inclusive) are
|
|
|
|
// installed.
|
|
|
|
ready = false;
|
|
|
|
break;
|
2018-12-14 00:10:16 +01:00
|
|
|
}
|
2018-10-16 04:59:20 +02:00
|
|
|
}
|
Fix atomic flush waiting forever for MANIFEST write (#9034)
Summary:
In atomic flush, concurrent background flush threads will commit to the MANIFEST
one by one, in the order of the IDs of their picked memtables for all included column
families. Each time, a background flush thread decides whether to wait based on two
criteria:
- Is db stopped? If so, don't wait.
- Am I the one to commit the currently earliest memtable? If so, don't wait and ready to go.
When atomic flush was implemented, error writing to or syncing the MANIFEST would
cause the db to be stopped. Therefore, this background thread does not have to check
for the background error while waiting. If there has been such an error, `DBStopped()`
would have been true, and this thread will **not** wait forever.
After we improved error handling, RocksDB may map an IOError while writing to MANIFEST
to a soft error, if there is no WAL. This requires the background threads to check for
background error while waiting. Otherwise, a background flush thread may wait forever.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9034
Test Plan: make check
Reviewed By: zhichao-cao
Differential Revision: D31639225
Pulled By: riversand963
fbshipit-source-id: e9ab07c4d8f2eade238adeefe3e42dd9a5a3ebbd
2021-10-21 06:33:32 +02:00
|
|
|
return std::make_pair(Status::OK(), !ready);
|
2019-01-04 05:53:52 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
bool resuming_from_bg_err = error_handler_.IsDBStopped();
|
Fix atomic flush waiting forever for MANIFEST write (#9034)
Summary:
In atomic flush, concurrent background flush threads will commit to the MANIFEST
one by one, in the order of the IDs of their picked memtables for all included column
families. Each time, a background flush thread decides whether to wait based on two
criteria:
- Is db stopped? If so, don't wait.
- Am I the one to commit the currently earliest memtable? If so, don't wait and ready to go.
When atomic flush was implemented, error writing to or syncing the MANIFEST would
cause the db to be stopped. Therefore, this background thread does not have to check
for the background error while waiting. If there has been such an error, `DBStopped()`
would have been true, and this thread will **not** wait forever.
After we improved error handling, RocksDB may map an IOError while writing to MANIFEST
to a soft error, if there is no WAL. This requires the background threads to check for
background error while waiting. Otherwise, a background flush thread may wait forever.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9034
Test Plan: make check
Reviewed By: zhichao-cao
Differential Revision: D31639225
Pulled By: riversand963
fbshipit-source-id: e9ab07c4d8f2eade238adeefe3e42dd9a5a3ebbd
2021-10-21 06:33:32 +02:00
|
|
|
while ((!resuming_from_bg_err || error_handler_.GetRecoveryError().ok())) {
|
|
|
|
std::pair<Status, bool> res = wait_to_install_func();
|
|
|
|
|
|
|
|
TEST_SYNC_POINT_CALLBACK(
|
|
|
|
"DBImpl::AtomicFlushMemTablesToOutputFiles:WaitToCommit", &res);
|
|
|
|
|
|
|
|
if (!res.first.ok()) {
|
|
|
|
s = res.first;
|
|
|
|
break;
|
|
|
|
} else if (!res.second) {
|
|
|
|
break;
|
|
|
|
}
|
2019-01-04 05:53:52 +01:00
|
|
|
atomic_flush_install_cv_.Wait();
|
Fix atomic flush waiting forever for MANIFEST write (#9034)
Summary:
In atomic flush, concurrent background flush threads will commit to the MANIFEST
one by one, in the order of the IDs of their picked memtables for all included column
families. Each time, a background flush thread decides whether to wait based on two
criteria:
- Is db stopped? If so, don't wait.
- Am I the one to commit the currently earliest memtable? If so, don't wait and ready to go.
When atomic flush was implemented, error writing to or syncing the MANIFEST would
cause the db to be stopped. Therefore, this background thread does not have to check
for the background error while waiting. If there has been such an error, `DBStopped()`
would have been true, and this thread will **not** wait forever.
After we improved error handling, RocksDB may map an IOError while writing to MANIFEST
to a soft error, if there is no WAL. This requires the background threads to check for
background error while waiting. Otherwise, a background flush thread may wait forever.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9034
Test Plan: make check
Reviewed By: zhichao-cao
Differential Revision: D31639225
Pulled By: riversand963
fbshipit-source-id: e9ab07c4d8f2eade238adeefe3e42dd9a5a3ebbd
2021-10-21 06:33:32 +02:00
|
|
|
|
|
|
|
resuming_from_bg_err = error_handler_.IsDBStopped();
|
2019-01-04 05:53:52 +01:00
|
|
|
}
|
|
|
|
|
Fix atomic flush waiting forever for MANIFEST write (#9034)
Summary:
In atomic flush, concurrent background flush threads will commit to the MANIFEST
one by one, in the order of the IDs of their picked memtables for all included column
families. Each time, a background flush thread decides whether to wait based on two
criteria:
- Is db stopped? If so, don't wait.
- Am I the one to commit the currently earliest memtable? If so, don't wait and ready to go.
When atomic flush was implemented, error writing to or syncing the MANIFEST would
cause the db to be stopped. Therefore, this background thread does not have to check
for the background error while waiting. If there has been such an error, `DBStopped()`
would have been true, and this thread will **not** wait forever.
After we improved error handling, RocksDB may map an IOError while writing to MANIFEST
to a soft error, if there is no WAL. This requires the background threads to check for
background error while waiting. Otherwise, a background flush thread may wait forever.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9034
Test Plan: make check
Reviewed By: zhichao-cao
Differential Revision: D31639225
Pulled By: riversand963
fbshipit-source-id: e9ab07c4d8f2eade238adeefe3e42dd9a5a3ebbd
2021-10-21 06:33:32 +02:00
|
|
|
if (!resuming_from_bg_err) {
|
|
|
|
// If not resuming from bg err, then we determine future action based on
|
|
|
|
// whether we hit background error.
|
|
|
|
if (s.ok()) {
|
|
|
|
s = error_handler_.GetBGError();
|
|
|
|
}
|
|
|
|
} else if (s.ok()) {
|
|
|
|
// If resuming from bg err, we still rely on wait_to_install_func()'s
|
|
|
|
// result to determine future action. If wait_to_install_func() returns
|
|
|
|
// non-ok already, then we should not proceed to flush result
|
|
|
|
// installation.
|
|
|
|
s = error_handler_.GetRecoveryError();
|
|
|
|
}
|
2019-01-04 05:53:52 +01:00
|
|
|
}
|
2018-10-16 04:59:20 +02:00
|
|
|
|
2019-01-04 05:53:52 +01:00
|
|
|
if (s.ok()) {
|
|
|
|
autovector<ColumnFamilyData*> tmp_cfds;
|
|
|
|
autovector<const autovector<MemTable*>*> mems_list;
|
|
|
|
autovector<const MutableCFOptions*> mutable_cf_options_list;
|
2019-01-31 23:28:53 +01:00
|
|
|
autovector<FileMetaData*> tmp_file_meta;
|
2021-08-03 22:30:05 +02:00
|
|
|
autovector<std::list<std::unique_ptr<FlushJobInfo>>*>
|
|
|
|
committed_flush_jobs_info;
|
2019-01-04 05:53:52 +01:00
|
|
|
for (int i = 0; i != num_cfs; ++i) {
|
2019-10-16 19:39:00 +02:00
|
|
|
const auto& mems = jobs[i]->GetMemTables();
|
2019-01-04 05:53:52 +01:00
|
|
|
if (!cfds[i]->IsDropped() && !mems.empty()) {
|
|
|
|
tmp_cfds.emplace_back(cfds[i]);
|
|
|
|
mems_list.emplace_back(&mems);
|
2019-01-12 02:40:44 +01:00
|
|
|
mutable_cf_options_list.emplace_back(&all_mutable_cf_options[i]);
|
2019-01-31 23:28:53 +01:00
|
|
|
tmp_file_meta.emplace_back(&file_meta[i]);
|
2021-08-03 22:30:05 +02:00
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
committed_flush_jobs_info.emplace_back(
|
|
|
|
jobs[i]->GetCommittedFlushJobsInfo());
|
|
|
|
#endif //! ROCKSDB_LITE
|
2019-01-04 05:53:52 +01:00
|
|
|
}
|
2018-10-16 04:59:20 +02:00
|
|
|
}
|
2019-01-04 05:53:52 +01:00
|
|
|
|
|
|
|
s = InstallMemtableAtomicFlushResults(
|
|
|
|
nullptr /* imm_lists */, tmp_cfds, mutable_cf_options_list, mems_list,
|
2020-12-04 04:21:08 +01:00
|
|
|
versions_.get(), &logs_with_prep_tracker_, &mutex_, tmp_file_meta,
|
2021-08-03 22:30:05 +02:00
|
|
|
committed_flush_jobs_info, &job_context->memtables_to_free,
|
|
|
|
directories_.GetDbDir(), log_buffer);
|
2018-10-16 04:59:20 +02:00
|
|
|
}
|
|
|
|
|
2019-04-29 21:29:57 +02:00
|
|
|
if (s.ok()) {
|
2018-10-16 04:59:20 +02:00
|
|
|
assert(num_cfs ==
|
|
|
|
static_cast<int>(job_context->superversion_contexts.size()));
|
|
|
|
for (int i = 0; i != num_cfs; ++i) {
|
2020-09-15 06:10:09 +02:00
|
|
|
assert(cfds[i]);
|
|
|
|
|
2018-12-14 00:10:16 +01:00
|
|
|
if (cfds[i]->IsDropped()) {
|
|
|
|
continue;
|
|
|
|
}
|
2018-10-16 04:59:20 +02:00
|
|
|
InstallSuperVersionAndScheduleWork(cfds[i],
|
|
|
|
&job_context->superversion_contexts[i],
|
2019-01-31 23:28:53 +01:00
|
|
|
all_mutable_cf_options[i]);
|
2020-09-15 06:10:09 +02:00
|
|
|
|
|
|
|
const std::string& column_family_name = cfds[i]->GetName();
|
|
|
|
|
|
|
|
Version* const current = cfds[i]->current();
|
|
|
|
assert(current);
|
|
|
|
|
|
|
|
const VersionStorageInfo* const storage_info = current->storage_info();
|
|
|
|
assert(storage_info);
|
|
|
|
|
2018-10-16 04:59:20 +02:00
|
|
|
VersionStorageInfo::LevelSummaryStorage tmp;
|
|
|
|
ROCKS_LOG_BUFFER(log_buffer, "[%s] Level summary: %s\n",
|
2020-09-15 06:10:09 +02:00
|
|
|
column_family_name.c_str(),
|
|
|
|
storage_info->LevelSummary(&tmp));
|
|
|
|
|
|
|
|
const auto& blob_files = storage_info->GetBlobFiles();
|
|
|
|
if (!blob_files.empty()) {
|
Use a sorted vector instead of a map to store blob file metadata (#9526)
Summary:
The patch replaces `std::map` with a sorted `std::vector` for
`VersionStorageInfo::blob_files_` and preallocates the space
for the `vector` before saving the `BlobFileMetaData` into the
new `VersionStorageInfo` in `VersionBuilder::Rep::SaveBlobFilesTo`.
These changes reduce the time the DB mutex is held while
saving new `Version`s, and using a sorted `vector` also makes
lookups faster thanks to better memory locality.
In addition, the patch introduces helper methods
`VersionStorageInfo::GetBlobFileMetaData` and
`VersionStorageInfo::GetBlobFileMetaDataLB` that can be used by
clients to perform lookups in the `vector`, and does some general
cleanup in the parts of code where blob file metadata are used.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9526
Test Plan:
Ran `make check` and the crash test script for a while.
Performance was tested using a load-optimized benchmark (`fillseq` with vector memtable, no WAL) and small file sizes so that a significant number of files are produced:
```
numactl --interleave=all ./db_bench --benchmarks=fillseq --allow_concurrent_memtable_write=false --level0_file_num_compaction_trigger=4 --level0_slowdown_writes_trigger=20 --level0_stop_writes_trigger=30 --max_background_jobs=8 --max_write_buffer_number=8 --db=/data/ltamasi-dbbench --wal_dir=/data/ltamasi-dbbench --num=800000000 --num_levels=8 --key_size=20 --value_size=400 --block_size=8192 --cache_size=51539607552 --cache_numshardbits=6 --compression_max_dict_bytes=0 --compression_ratio=0.5 --compression_type=lz4 --bytes_per_sync=8388608 --cache_index_and_filter_blocks=1 --cache_high_pri_pool_ratio=0.5 --benchmark_write_rate_limit=0 --write_buffer_size=16777216 --target_file_size_base=16777216 --max_bytes_for_level_base=67108864 --verify_checksum=1 --delete_obsolete_files_period_micros=62914560 --max_bytes_for_level_multiplier=8 --statistics=0 --stats_per_interval=1 --stats_interval_seconds=20 --histogram=1 --memtablerep=skip_list --bloom_bits=10 --open_files=-1 --subcompactions=1 --compaction_style=0 --min_level_to_compress=3 --level_compaction_dynamic_level_bytes=true --pin_l0_filter_and_index_blocks_in_cache=1 --soft_pending_compaction_bytes_limit=167503724544 --hard_pending_compaction_bytes_limit=335007449088 --min_level_to_compress=0 --use_existing_db=0 --sync=0 --threads=1 --memtablerep=vector --allow_concurrent_memtable_write=false --disable_wal=1 --enable_blob_files=1 --blob_file_size=16777216 --min_blob_size=0 --blob_compression_type=lz4 --enable_blob_garbage_collection=1 --seed=<some value>
```
Final statistics before the patch:
```
Cumulative writes: 0 writes, 700M keys, 0 commit groups, 0.0 writes per commit group, ingest: 284.62 GB, 121.27 MB/s
Interval writes: 0 writes, 334K keys, 0 commit groups, 0.0 writes per commit group, ingest: 139.28 MB, 72.46 MB/s
```
With the patch:
```
Cumulative writes: 0 writes, 760M keys, 0 commit groups, 0.0 writes per commit group, ingest: 308.66 GB, 131.52 MB/s
Interval writes: 0 writes, 445K keys, 0 commit groups, 0.0 writes per commit group, ingest: 185.35 MB, 93.15 MB/s
```
Total time to complete the benchmark is 2611 seconds with the patch, down from 2986 secs.
Reviewed By: riversand963
Differential Revision: D34082728
Pulled By: ltamasi
fbshipit-source-id: fc598abf676dce436734d06bb9d2d99a26a004fc
2022-02-09 21:35:39 +01:00
|
|
|
assert(blob_files.front());
|
|
|
|
assert(blob_files.back());
|
|
|
|
|
|
|
|
ROCKS_LOG_BUFFER(
|
|
|
|
log_buffer,
|
|
|
|
"[%s] Blob file summary: head=%" PRIu64 ", tail=%" PRIu64 "\n",
|
|
|
|
column_family_name.c_str(), blob_files.front()->GetBlobFileNumber(),
|
|
|
|
blob_files.back()->GetBlobFileNumber());
|
2020-09-15 06:10:09 +02:00
|
|
|
}
|
2018-10-16 04:59:20 +02:00
|
|
|
}
|
|
|
|
if (made_progress) {
|
|
|
|
*made_progress = true;
|
|
|
|
}
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
auto sfm = static_cast<SstFileManagerImpl*>(
|
|
|
|
immutable_db_options_.sst_file_manager.get());
|
2019-10-16 19:39:00 +02:00
|
|
|
assert(all_mutable_cf_options.size() == static_cast<size_t>(num_cfs));
|
2020-12-24 01:54:05 +01:00
|
|
|
for (int i = 0; s.ok() && i != num_cfs; ++i) {
|
2021-08-19 02:39:00 +02:00
|
|
|
// If mempurge happened instead of Flush,
|
|
|
|
// no NotifyOnFlushCompleted call (no SST file created).
|
|
|
|
if (switched_to_mempurge[i]) {
|
|
|
|
continue;
|
|
|
|
}
|
2018-12-14 00:10:16 +01:00
|
|
|
if (cfds[i]->IsDropped()) {
|
|
|
|
continue;
|
|
|
|
}
|
2019-10-16 19:39:00 +02:00
|
|
|
NotifyOnFlushCompleted(cfds[i], all_mutable_cf_options[i],
|
|
|
|
jobs[i]->GetCommittedFlushJobsInfo());
|
2018-10-16 04:59:20 +02:00
|
|
|
if (sfm) {
|
|
|
|
std::string file_path = MakeTableFileName(
|
|
|
|
cfds[i]->ioptions()->cf_paths[0].path, file_meta[i].fd.GetNumber());
|
2021-01-04 20:07:10 +01:00
|
|
|
// TODO (PR7798). We should only add the file to the FileManager if it
|
|
|
|
// exists. Otherwise, some tests may fail. Ignore the error in the
|
|
|
|
// interim.
|
|
|
|
sfm->OnAddFile(file_path).PermitUncheckedError();
|
2018-10-16 04:59:20 +02:00
|
|
|
if (sfm->IsMaxAllowedSpaceReached() &&
|
|
|
|
error_handler_.GetBGError().ok()) {
|
|
|
|
Status new_bg_error =
|
|
|
|
Status::SpaceLimit("Max allowed space was reached");
|
2020-12-08 05:09:55 +01:00
|
|
|
error_handler_.SetBGError(new_bg_error,
|
|
|
|
BackgroundErrorReason::kFlush);
|
2018-10-16 04:59:20 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif // ROCKSDB_LITE
|
|
|
|
}
|
|
|
|
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
// Need to undo atomic flush if something went wrong, i.e. s is not OK and
|
|
|
|
// it is not because of CF drop.
|
|
|
|
if (!s.ok() && !s.IsColumnFamilyDropped()) {
|
2020-06-04 17:28:44 +02:00
|
|
|
if (!io_s.ok() && !io_s.IsColumnFamilyDropped()) {
|
2021-03-18 22:31:30 +01:00
|
|
|
assert(log_io_s.ok());
|
First step towards handling MANIFEST write error (#6949)
Summary:
This PR provides preliminary support for handling IO error during MANIFEST write.
File write/sync is not guaranteed to be atomic. If we encounter an IOError while writing/syncing to the MANIFEST file, we cannot be sure about the state of the MANIFEST file. The version edits may or may not have reached the file. During cleanup, if we delete the newly-generated SST files referenced by the pending version edit(s), but the version edit(s) actually are persistent in the MANIFEST, then next recovery attempt will process the version edits(s) and then fail since the SST files have already been deleted.
One approach is to truncate the MANIFEST after write/sync error, so that it is safe to delete the SST files. However, file truncation may not be supported on certain file systems. Therefore, we take the following approach.
If an IOError is detected during MANIFEST write/sync, we disable file deletions for the faulty database. Depending on whether the IOError is retryable (set by underlying file system), either RocksDB or application can call `DB::Resume()`, or simply shutdown and restart. During `Resume()`, RocksDB will try to switch to a new MANIFEST and write all existing in-memory version storage in the new file. If this succeeds, then RocksDB may proceed. If all recovery is completed, then file deletions will be re-enabled.
Note that multiple threads can call `LogAndApply()` at the same time, though only one of them will be going through the process MANIFEST write, possibly batching the version edits of other threads. When the leading MANIFEST writer finishes, all of the MANIFEST writing threads in this batch will have the same IOError. They will all call `ErrorHandler::SetBGError()` in which file deletion will be disabled.
Possible future directions:
- Add an `ErrorContext` structure so that it is easier to pass more info to `ErrorHandler`. Currently, as in this example, a new `BackgroundErrorReason` has to be added.
Test plan (dev server):
make check
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6949
Reviewed By: anand1976
Differential Revision: D22026020
Pulled By: riversand963
fbshipit-source-id: f3c68a2ef45d9b505d0d625c7c5e0c88495b91c8
2020-06-25 04:05:47 +02:00
|
|
|
// Error while writing to MANIFEST.
|
|
|
|
// In fact, versions_->io_status() can also be the result of renaming
|
|
|
|
// CURRENT file. With current code, it's just difficult to tell. So just
|
|
|
|
// be pessimistic and try write to a new MANIFEST.
|
|
|
|
// TODO: distinguish between MANIFEST write and CURRENT renaming
|
2020-09-18 05:22:35 +02:00
|
|
|
if (!versions_->io_status().ok()) {
|
2021-03-26 05:41:44 +01:00
|
|
|
// If WAL sync is successful (either WAL size is 0 or there is no IO
|
|
|
|
// error), all the Manifest write will be map to soft error.
|
|
|
|
// TODO: kManifestWriteNoWAL and kFlushNoWAL are misleading. Refactor
|
|
|
|
// is needed.
|
|
|
|
error_handler_.SetBGError(io_s,
|
|
|
|
BackgroundErrorReason::kManifestWriteNoWAL);
|
2020-09-18 05:22:35 +02:00
|
|
|
} else {
|
2021-03-26 05:41:44 +01:00
|
|
|
// If WAL sync is successful (either WAL size is 0 or there is no IO
|
|
|
|
// error), all the other SST file write errors will be set as
|
|
|
|
// kFlushNoWAL.
|
2020-12-08 05:09:55 +01:00
|
|
|
error_handler_.SetBGError(io_s, BackgroundErrorReason::kFlushNoWAL);
|
2020-09-18 05:22:35 +02:00
|
|
|
}
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
} else {
|
2021-03-18 22:31:30 +01:00
|
|
|
if (log_io_s.ok()) {
|
|
|
|
Status new_bg_error = s;
|
|
|
|
error_handler_.SetBGError(new_bg_error, BackgroundErrorReason::kFlush);
|
|
|
|
}
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
}
|
2018-10-16 04:59:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2017-04-18 21:00:36 +02:00
|
|
|
void DBImpl::NotifyOnFlushBegin(ColumnFamilyData* cfd, FileMetaData* file_meta,
|
|
|
|
const MutableCFOptions& mutable_cf_options,
|
2019-10-16 19:39:00 +02:00
|
|
|
int job_id) {
|
2017-04-18 21:00:36 +02:00
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
if (immutable_db_options_.listeners.size() == 0U) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
mutex_.AssertHeld();
|
|
|
|
if (shutting_down_.load(std::memory_order_acquire)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
bool triggered_writes_slowdown =
|
|
|
|
(cfd->current()->storage_info()->NumLevelFiles(0) >=
|
|
|
|
mutable_cf_options.level0_slowdown_writes_trigger);
|
|
|
|
bool triggered_writes_stop =
|
|
|
|
(cfd->current()->storage_info()->NumLevelFiles(0) >=
|
|
|
|
mutable_cf_options.level0_stop_writes_trigger);
|
|
|
|
// release lock while notifying events
|
|
|
|
mutex_.Unlock();
|
|
|
|
{
|
2019-11-01 19:44:59 +01:00
|
|
|
FlushJobInfo info{};
|
2018-12-12 05:25:45 +01:00
|
|
|
info.cf_id = cfd->GetID();
|
2017-04-18 21:00:36 +02:00
|
|
|
info.cf_name = cfd->GetName();
|
|
|
|
// TODO(yhchiang): make db_paths dynamic in case flush does not
|
|
|
|
// go to L0 in the future.
|
2019-10-24 23:42:43 +02:00
|
|
|
const uint64_t file_number = file_meta->fd.GetNumber();
|
|
|
|
info.file_path =
|
|
|
|
MakeTableFileName(cfd->ioptions()->cf_paths[0].path, file_number);
|
|
|
|
info.file_number = file_number;
|
2017-04-18 21:00:36 +02:00
|
|
|
info.thread_id = env_->GetThreadID();
|
|
|
|
info.job_id = job_id;
|
|
|
|
info.triggered_writes_slowdown = triggered_writes_slowdown;
|
|
|
|
info.triggered_writes_stop = triggered_writes_stop;
|
2018-07-28 01:00:26 +02:00
|
|
|
info.smallest_seqno = file_meta->fd.smallest_seqno;
|
|
|
|
info.largest_seqno = file_meta->fd.largest_seqno;
|
2018-02-09 21:09:55 +01:00
|
|
|
info.flush_reason = cfd->GetFlushReason();
|
2017-04-18 21:00:36 +02:00
|
|
|
for (auto listener : immutable_db_options_.listeners) {
|
|
|
|
listener->OnFlushBegin(this, info);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mutex_.Lock();
|
|
|
|
// no need to signal bg_cv_ as it will be signaled at the end of the
|
|
|
|
// flush process.
|
2018-04-13 02:55:14 +02:00
|
|
|
#else
|
|
|
|
(void)cfd;
|
|
|
|
(void)file_meta;
|
|
|
|
(void)mutable_cf_options;
|
|
|
|
(void)job_id;
|
2017-04-18 21:00:36 +02:00
|
|
|
#endif // ROCKSDB_LITE
|
|
|
|
}
|
|
|
|
|
2019-10-16 19:39:00 +02:00
|
|
|
void DBImpl::NotifyOnFlushCompleted(
|
|
|
|
ColumnFamilyData* cfd, const MutableCFOptions& mutable_cf_options,
|
|
|
|
std::list<std::unique_ptr<FlushJobInfo>>* flush_jobs_info) {
|
2017-04-06 02:14:05 +02:00
|
|
|
#ifndef ROCKSDB_LITE
|
2019-10-16 19:39:00 +02:00
|
|
|
assert(flush_jobs_info != nullptr);
|
2017-04-06 02:14:05 +02:00
|
|
|
if (immutable_db_options_.listeners.size() == 0U) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
mutex_.AssertHeld();
|
|
|
|
if (shutting_down_.load(std::memory_order_acquire)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
bool triggered_writes_slowdown =
|
|
|
|
(cfd->current()->storage_info()->NumLevelFiles(0) >=
|
|
|
|
mutable_cf_options.level0_slowdown_writes_trigger);
|
|
|
|
bool triggered_writes_stop =
|
|
|
|
(cfd->current()->storage_info()->NumLevelFiles(0) >=
|
|
|
|
mutable_cf_options.level0_stop_writes_trigger);
|
|
|
|
// release lock while notifying events
|
|
|
|
mutex_.Unlock();
|
|
|
|
{
|
2019-10-16 19:39:00 +02:00
|
|
|
for (auto& info : *flush_jobs_info) {
|
|
|
|
info->triggered_writes_slowdown = triggered_writes_slowdown;
|
|
|
|
info->triggered_writes_stop = triggered_writes_stop;
|
|
|
|
for (auto listener : immutable_db_options_.listeners) {
|
|
|
|
listener->OnFlushCompleted(this, *info);
|
|
|
|
}
|
Fix TSAN data race in EventListenerTest.MultiCF (#9528)
Summary:
**Context:**
`EventListenerTest.MultiCF` occasionally failed on TSAN data race as below:
```
WARNING: ThreadSanitizer: data race (pid=2047633)
Read of size 8 at 0x7b6000001440 by main thread:
#0 std::vector<rocksdb::DB*, std::allocator<rocksdb::DB*> >::size() const /usr/bin/../lib/gcc/x86_64-linux-gnu/9/../../../../include/c++/9/bits/stl_vector.h:916:40 (listener_test+0x52337c)
https://github.com/facebook/rocksdb/issues/1 rocksdb::EventListenerTest_MultiCF_Test::TestBody() /home/circleci/project/db/listener_test.cc:384:7 (listener_test+0x52337c)
Previous write of size 8 at 0x7b6000001440 by thread T2:
#0 void std::vector<rocksdb::DB*, std::allocator<rocksdb::DB*> >::_M_realloc_insert<rocksdb::DB* const&>(__gnu_cxx::__normal_iterator<rocksdb::DB**, std::vector<rocksdb::DB*, std::allocator<rocksdb::DB*> > >, rocksdb::DB* const&) /usr/bin/../lib/gcc/x86_64-linux-gnu/9/../../../../include/c++/9/bits/vector.tcc:503:31 (listener_test+0x550654)
https://github.com/facebook/rocksdb/issues/1 std::vector<rocksdb::DB*, std::allocator<rocksdb::DB*> >::push_back(rocksdb::DB* const&) /usr/bin/../lib/gcc/x86_64-linux-gnu/9/../../../../include/c++/9/bits/stl_vector.h:1195:4 (listener_test+0x550654)
https://github.com/facebook/rocksdb/issues/2 rocksdb::TestFlushListener::OnFlushCompleted(rocksdb::DB*, rocksdb::FlushJobInfo const&) /home/circleci/project/db/listener_test.cc:255:18 (listener_test+0x550654)
```
After investigation, it is due to the following:
(1) `ASSERT_OK(Flush(i));` before the read `std::vector::size()` is supposed to be [blocked on `DB::Impl::bg_cv_` for memtable flush to finish](https://github.com/facebook/rocksdb/blob/320d9a8e8a1b6998f92934f87fc71ad8bd6d4596/db/db_impl/db_impl_compaction_flush.cc#L2319) and get signaled [at the end of background flush ](https://github.com/facebook/rocksdb/blob/320d9a8e8a1b6998f92934f87fc71ad8bd6d4596/db/db_impl/db_impl_compaction_flush.cc#L2830), which happens after the write `std::vector::push_back()` . So the sequence of execution should have been synchronized as `call flush() -> write -> return from flush() -> read` and would not cause any TSAN data race.
- The subsequent `ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());` serves a similar purpose based on [the previous attempt to deflake the test.](https://github.com/facebook/rocksdb/pull/9084)
(2) However, there are multiple places in the code can signal this `DB::Impl::bg_cv_` and mistakenly wake up `ASSERT_OK(Flush(i));` (or `ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());`) too early (and with the lock available to them), resulting in non-synchronized read and write thus a TSAN data race.
- Reproduced by the following, suggested by ajkr:
```
diff --git a/db/db_impl/db_impl_compaction_flush.cc b/db/db_impl/db_impl_compaction_flush.cc
index 4ff87c1e4..52492e9cf 100644
--- a/db/db_impl/db_impl_compaction_flush.cc
+++ b/db/db_impl/db_impl_compaction_flush.cc
@@ -22,7 +22,7 @@
#include "test_util/sync_point.h"
#include "util/cast_util.h"
#include "util/concurrent_task_limiter_impl.h"
namespace ROCKSDB_NAMESPACE {
bool DBImpl::EnoughRoomForCompaction(
@@ -855,6 +855,7 @@ void DBImpl::NotifyOnFlushCompleted(
mutable_cf_options.level0_stop_writes_trigger);
// release lock while notifying events
mutex_.Unlock();
+ bg_cv_.SignalAll();
```
**Summary:**
- Added synchornization between read and write by ` ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency()` mechanism
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9528
Test Plan:
`./listener_test --gtest_filter=EventListenerTest.MultiCF --gtest_repeat=10`
- pre-fix:
```
Repeating all tests (iteration 3)
Note: Google Test filter = EventListenerTest.MultiCF
[==========] Running 1 test from 1 test case.
[----------] Global test environment set-up.
[----------] 1 test from EventListenerTest
[ RUN ] EventListenerTest.MultiCF
==================
WARNING: ThreadSanitizer: data race (pid=3377137)
Read of size 8 at 0x7b6000000840 by main thread:
#0 std::vector<rocksdb::DB*, std::allocator<rocksdb::DB*> >::size()
https://github.com/facebook/rocksdb/issues/1 rocksdb::EventListenerTest_MultiCF_Test::TestBody() db/listener_test.cc:384 (listener_test+0x4bb300)
Previous write of size 8 at 0x7b6000000840 by thread T2:
#0 void std::vector<rocksdb::DB*, std::allocator<rocksdb::DB*> >::_M_realloc_insert<rocksdb::DB* const&>(__gnu_cxx::__normal_iterator<rocksdb::DB**, std::vector<rocksdb::DB*, std::allocator<rocksdb::DB*> > >, rocksdb::DB* const&)
https://github.com/facebook/rocksdb/issues/1 std::vector<rocksdb::DB*, std::allocator<rocksdb::DB*> >::push_back(rocksdb::DB* const&)
https://github.com/facebook/rocksdb/issues/2 rocksdb::TestFlushListener::OnFlushCompleted(rocksdb::DB*, rocksdb::FlushJobInfo const&) db/listener_test.cc:255 (listener_test+0x4e820f)
```
- post-fix: `All passed`
Reviewed By: ajkr
Differential Revision: D34085791
Pulled By: hx235
fbshipit-source-id: f877aa687ea1d5cb6f31ef8c4772625d22868e8b
2022-02-10 19:17:53 +01:00
|
|
|
TEST_SYNC_POINT(
|
|
|
|
"DBImpl::NotifyOnFlushCompleted::PostAllOnFlushCompleted");
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
2019-10-16 19:39:00 +02:00
|
|
|
flush_jobs_info->clear();
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
mutex_.Lock();
|
|
|
|
// no need to signal bg_cv_ as it will be signaled at the end of the
|
|
|
|
// flush process.
|
2018-04-13 02:55:14 +02:00
|
|
|
#else
|
|
|
|
(void)cfd;
|
|
|
|
(void)mutable_cf_options;
|
2019-10-16 19:39:00 +02:00
|
|
|
(void)flush_jobs_info;
|
2017-04-06 02:14:05 +02:00
|
|
|
#endif // ROCKSDB_LITE
|
|
|
|
}
|
|
|
|
|
|
|
|
Status DBImpl::CompactRange(const CompactRangeOptions& options,
|
|
|
|
ColumnFamilyHandle* column_family,
|
2020-12-02 21:59:23 +01:00
|
|
|
const Slice* begin_without_ts,
|
|
|
|
const Slice* end_without_ts) {
|
2021-04-22 00:23:04 +02:00
|
|
|
if (manual_compaction_paused_.load(std::memory_order_acquire) > 0) {
|
|
|
|
return Status::Incomplete(Status::SubCode::kManualCompactionPaused);
|
|
|
|
}
|
|
|
|
|
2021-06-07 20:40:31 +02:00
|
|
|
if (options.canceled && options.canceled->load(std::memory_order_acquire)) {
|
|
|
|
return Status::Incomplete(Status::SubCode::kManualCompactionPaused);
|
|
|
|
}
|
|
|
|
|
2020-12-02 21:59:23 +01:00
|
|
|
const Comparator* const ucmp = column_family->GetComparator();
|
|
|
|
assert(ucmp);
|
|
|
|
size_t ts_sz = ucmp->timestamp_size();
|
|
|
|
if (ts_sz == 0) {
|
|
|
|
return CompactRangeInternal(options, column_family, begin_without_ts,
|
2022-03-12 01:13:23 +01:00
|
|
|
end_without_ts, "" /*trim_ts*/);
|
2020-12-02 21:59:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
std::string begin_str;
|
|
|
|
std::string end_str;
|
|
|
|
|
|
|
|
// CompactRange compact all keys: [begin, end] inclusively. Add maximum
|
|
|
|
// timestamp to include all `begin` keys, and add minimal timestamp to include
|
|
|
|
// all `end` keys.
|
|
|
|
if (begin_without_ts != nullptr) {
|
|
|
|
AppendKeyWithMaxTimestamp(&begin_str, *begin_without_ts, ts_sz);
|
|
|
|
}
|
|
|
|
if (end_without_ts != nullptr) {
|
|
|
|
AppendKeyWithMinTimestamp(&end_str, *end_without_ts, ts_sz);
|
|
|
|
}
|
|
|
|
Slice begin(begin_str);
|
|
|
|
Slice end(end_str);
|
|
|
|
|
|
|
|
Slice* begin_with_ts = begin_without_ts ? &begin : nullptr;
|
|
|
|
Slice* end_with_ts = end_without_ts ? &end : nullptr;
|
|
|
|
|
|
|
|
return CompactRangeInternal(options, column_family, begin_with_ts,
|
2022-03-12 01:13:23 +01:00
|
|
|
end_with_ts, "" /*trim_ts*/);
|
2020-12-02 21:59:23 +01:00
|
|
|
}
|
|
|
|
|
2021-12-23 20:02:43 +01:00
|
|
|
Status DBImpl::IncreaseFullHistoryTsLow(ColumnFamilyHandle* column_family,
|
2021-02-08 22:43:23 +01:00
|
|
|
std::string ts_low) {
|
2021-12-23 20:02:43 +01:00
|
|
|
ColumnFamilyData* cfd = nullptr;
|
|
|
|
if (column_family == nullptr) {
|
|
|
|
cfd = default_cf_handle_->cfd();
|
|
|
|
} else {
|
|
|
|
auto cfh = static_cast_with_check<ColumnFamilyHandleImpl>(column_family);
|
|
|
|
assert(cfh != nullptr);
|
|
|
|
cfd = cfh->cfd();
|
|
|
|
}
|
|
|
|
assert(cfd != nullptr && cfd->user_comparator() != nullptr);
|
|
|
|
if (cfd->user_comparator()->timestamp_size() == 0) {
|
|
|
|
return Status::InvalidArgument(
|
|
|
|
"Timestamp is not enabled in this column family");
|
|
|
|
}
|
|
|
|
if (cfd->user_comparator()->timestamp_size() != ts_low.size()) {
|
|
|
|
return Status::InvalidArgument("ts_low size mismatch");
|
|
|
|
}
|
|
|
|
return IncreaseFullHistoryTsLowImpl(cfd, ts_low);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status DBImpl::IncreaseFullHistoryTsLowImpl(ColumnFamilyData* cfd,
|
|
|
|
std::string ts_low) {
|
2021-02-08 22:43:23 +01:00
|
|
|
VersionEdit edit;
|
|
|
|
edit.SetColumnFamily(cfd->GetID());
|
|
|
|
edit.SetFullHistoryTsLow(ts_low);
|
|
|
|
|
|
|
|
InstrumentedMutexLock l(&mutex_);
|
|
|
|
std::string current_ts_low = cfd->GetFullHistoryTsLow();
|
|
|
|
const Comparator* ucmp = cfd->user_comparator();
|
2021-12-23 20:02:43 +01:00
|
|
|
assert(ucmp->timestamp_size() == ts_low.size() && !ts_low.empty());
|
2021-02-08 22:43:23 +01:00
|
|
|
if (!current_ts_low.empty() &&
|
|
|
|
ucmp->CompareTimestamp(ts_low, current_ts_low) < 0) {
|
|
|
|
return Status::InvalidArgument(
|
|
|
|
"Cannot decrease full_history_timestamp_low");
|
|
|
|
}
|
|
|
|
|
|
|
|
return versions_->LogAndApply(cfd, *cfd->GetLatestMutableCFOptions(), &edit,
|
|
|
|
&mutex_);
|
|
|
|
}
|
|
|
|
|
2020-12-02 21:59:23 +01:00
|
|
|
Status DBImpl::CompactRangeInternal(const CompactRangeOptions& options,
|
|
|
|
ColumnFamilyHandle* column_family,
|
2022-03-12 01:13:23 +01:00
|
|
|
const Slice* begin, const Slice* end,
|
|
|
|
const std::string& trim_ts) {
|
2020-07-03 04:24:25 +02:00
|
|
|
auto cfh = static_cast_with_check<ColumnFamilyHandleImpl>(column_family);
|
2018-04-06 04:49:06 +02:00
|
|
|
auto cfd = cfh->cfd();
|
|
|
|
|
|
|
|
if (options.target_path_id >= cfd->ioptions()->cf_paths.size()) {
|
2017-04-06 02:14:05 +02:00
|
|
|
return Status::InvalidArgument("Invalid target path ID");
|
|
|
|
}
|
|
|
|
|
2018-02-13 00:34:39 +01:00
|
|
|
bool flush_needed = true;
|
2021-02-08 22:43:23 +01:00
|
|
|
|
|
|
|
// Update full_history_ts_low if it's set
|
|
|
|
if (options.full_history_ts_low != nullptr &&
|
|
|
|
!options.full_history_ts_low->empty()) {
|
|
|
|
std::string ts_low = options.full_history_ts_low->ToString();
|
|
|
|
if (begin != nullptr || end != nullptr) {
|
|
|
|
return Status::InvalidArgument(
|
|
|
|
"Cannot specify compaction range with full_history_ts_low");
|
|
|
|
}
|
2021-12-23 20:02:43 +01:00
|
|
|
Status s = IncreaseFullHistoryTsLowImpl(cfd, ts_low);
|
2021-02-08 22:43:23 +01:00
|
|
|
if (!s.ok()) {
|
|
|
|
LogFlush(immutable_db_options_.info_log);
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-24 01:54:05 +01:00
|
|
|
Status s;
|
2018-02-28 02:08:34 +01:00
|
|
|
if (begin != nullptr && end != nullptr) {
|
|
|
|
// TODO(ajkr): We could also optimize away the flush in certain cases where
|
|
|
|
// one/both sides of the interval are unbounded. But it requires more
|
|
|
|
// changes to RangesOverlapWithMemtables.
|
|
|
|
Range range(*begin, *end);
|
2019-12-17 22:20:42 +01:00
|
|
|
SuperVersion* super_version = cfd->GetReferencedSuperVersion(this);
|
2020-12-24 01:54:05 +01:00
|
|
|
s = cfd->RangesOverlapWithMemtables(
|
|
|
|
{range}, super_version, immutable_db_options_.allow_data_in_errors,
|
|
|
|
&flush_needed);
|
2018-02-28 02:08:34 +01:00
|
|
|
CleanupSuperVersion(super_version);
|
|
|
|
}
|
|
|
|
|
2020-12-24 01:54:05 +01:00
|
|
|
if (s.ok() && flush_needed) {
|
2018-08-29 20:58:13 +02:00
|
|
|
FlushOptions fo;
|
|
|
|
fo.allow_write_stall = options.allow_write_stall;
|
2018-11-12 21:22:10 +01:00
|
|
|
if (immutable_db_options_.atomic_flush) {
|
2018-10-27 00:06:44 +02:00
|
|
|
autovector<ColumnFamilyData*> cfds;
|
2018-12-05 22:08:46 +01:00
|
|
|
mutex_.Lock();
|
2018-10-27 00:06:44 +02:00
|
|
|
SelectColumnFamiliesForAtomicFlush(&cfds);
|
2018-12-05 22:08:46 +01:00
|
|
|
mutex_.Unlock();
|
2018-10-27 00:06:44 +02:00
|
|
|
s = AtomicFlushMemTables(cfds, fo, FlushReason::kManualCompaction,
|
|
|
|
false /* writes_stopped */);
|
|
|
|
} else {
|
|
|
|
s = FlushMemTable(cfd, fo, FlushReason::kManualCompaction,
|
|
|
|
false /* writes_stopped*/);
|
|
|
|
}
|
2018-02-13 00:34:39 +01:00
|
|
|
if (!s.ok()) {
|
|
|
|
LogFlush(immutable_db_options_.info_log);
|
|
|
|
return s;
|
|
|
|
}
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
|
2020-03-05 05:12:23 +01:00
|
|
|
constexpr int kInvalidLevel = -1;
|
|
|
|
int final_output_level = kInvalidLevel;
|
|
|
|
bool exclusive = options.exclusive_manual_compaction;
|
2017-04-06 02:14:05 +02:00
|
|
|
if (cfd->ioptions()->compaction_style == kCompactionStyleUniversal &&
|
|
|
|
cfd->NumberLevels() > 1) {
|
|
|
|
// Always compact all files together.
|
2017-05-17 20:32:26 +02:00
|
|
|
final_output_level = cfd->NumberLevels() - 1;
|
|
|
|
// if bottom most level is reserved
|
|
|
|
if (immutable_db_options_.allow_ingest_behind) {
|
|
|
|
final_output_level--;
|
|
|
|
}
|
2017-04-06 02:14:05 +02:00
|
|
|
s = RunManualCompaction(cfd, ColumnFamilyData::kCompactAllLevels,
|
2019-04-17 08:29:32 +02:00
|
|
|
final_output_level, options, begin, end, exclusive,
|
2022-03-12 01:13:23 +01:00
|
|
|
false, port::kMaxUint64, trim_ts);
|
2017-04-06 02:14:05 +02:00
|
|
|
} else {
|
2020-03-05 05:12:23 +01:00
|
|
|
int first_overlapped_level = kInvalidLevel;
|
|
|
|
int max_overlapped_level = kInvalidLevel;
|
|
|
|
{
|
|
|
|
SuperVersion* super_version = cfd->GetReferencedSuperVersion(this);
|
|
|
|
Version* current_version = super_version->current;
|
|
|
|
ReadOptions ro;
|
|
|
|
ro.total_order_seek = true;
|
|
|
|
bool overlap;
|
|
|
|
for (int level = 0;
|
|
|
|
level < current_version->storage_info()->num_non_empty_levels();
|
|
|
|
level++) {
|
|
|
|
overlap = true;
|
|
|
|
if (begin != nullptr && end != nullptr) {
|
|
|
|
Status status = current_version->OverlapWithLevelIterator(
|
|
|
|
ro, file_options_, *begin, *end, level, &overlap);
|
|
|
|
if (!status.ok()) {
|
|
|
|
overlap = current_version->storage_info()->OverlapInLevel(
|
|
|
|
level, begin, end);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
overlap = current_version->storage_info()->OverlapInLevel(level,
|
|
|
|
begin, end);
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
2020-03-05 05:12:23 +01:00
|
|
|
if (overlap) {
|
|
|
|
if (first_overlapped_level == kInvalidLevel) {
|
|
|
|
first_overlapped_level = level;
|
|
|
|
}
|
|
|
|
max_overlapped_level = level;
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
}
|
2020-03-05 05:12:23 +01:00
|
|
|
CleanupSuperVersion(super_version);
|
|
|
|
}
|
|
|
|
if (s.ok() && first_overlapped_level != kInvalidLevel) {
|
|
|
|
// max_file_num_to_ignore can be used to filter out newly created SST
|
|
|
|
// files, useful for bottom level compaction in a manual compaction
|
|
|
|
uint64_t max_file_num_to_ignore = port::kMaxUint64;
|
|
|
|
uint64_t next_file_number = versions_->current_next_file_number();
|
|
|
|
final_output_level = max_overlapped_level;
|
|
|
|
int output_level;
|
|
|
|
for (int level = first_overlapped_level; level <= max_overlapped_level;
|
|
|
|
level++) {
|
2020-10-07 22:16:40 +02:00
|
|
|
bool disallow_trivial_move = false;
|
2020-03-05 05:12:23 +01:00
|
|
|
// in case the compaction is universal or if we're compacting the
|
|
|
|
// bottom-most level, the output level will be the same as input one.
|
|
|
|
// level 0 can never be the bottommost level (i.e. if all files are in
|
|
|
|
// level 0, we will compact to level 1)
|
|
|
|
if (cfd->ioptions()->compaction_style == kCompactionStyleUniversal ||
|
|
|
|
cfd->ioptions()->compaction_style == kCompactionStyleFIFO) {
|
|
|
|
output_level = level;
|
|
|
|
} else if (level == max_overlapped_level && level > 0) {
|
|
|
|
if (options.bottommost_level_compaction ==
|
|
|
|
BottommostLevelCompaction::kSkip) {
|
|
|
|
// Skip bottommost level compaction
|
|
|
|
continue;
|
|
|
|
} else if (options.bottommost_level_compaction ==
|
|
|
|
BottommostLevelCompaction::kIfHaveCompactionFilter &&
|
|
|
|
cfd->ioptions()->compaction_filter == nullptr &&
|
|
|
|
cfd->ioptions()->compaction_filter_factory == nullptr) {
|
|
|
|
// Skip bottommost level compaction since we don't have a compaction
|
|
|
|
// filter
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
output_level = level;
|
|
|
|
// update max_file_num_to_ignore only for bottom level compaction
|
|
|
|
// because data in newly compacted files in middle levels may still
|
|
|
|
// need to be pushed down
|
|
|
|
max_file_num_to_ignore = next_file_number;
|
|
|
|
} else {
|
|
|
|
output_level = level + 1;
|
|
|
|
if (cfd->ioptions()->compaction_style == kCompactionStyleLevel &&
|
|
|
|
cfd->ioptions()->level_compaction_dynamic_level_bytes &&
|
|
|
|
level == 0) {
|
|
|
|
output_level = ColumnFamilyData::kCompactToBaseLevel;
|
|
|
|
}
|
2020-10-07 22:16:40 +02:00
|
|
|
// if it's a BottommostLevel compaction and `kForce*` compaction is
|
|
|
|
// set, disallow trivial move
|
|
|
|
if (level == max_overlapped_level &&
|
|
|
|
(options.bottommost_level_compaction ==
|
|
|
|
BottommostLevelCompaction::kForce ||
|
|
|
|
options.bottommost_level_compaction ==
|
|
|
|
BottommostLevelCompaction::kForceOptimized)) {
|
|
|
|
disallow_trivial_move = true;
|
|
|
|
}
|
2020-03-05 05:12:23 +01:00
|
|
|
}
|
2022-03-12 01:13:23 +01:00
|
|
|
// trim_ts need real compaction to remove latest record
|
|
|
|
if (!trim_ts.empty()) {
|
|
|
|
disallow_trivial_move = true;
|
|
|
|
}
|
2020-03-05 05:12:23 +01:00
|
|
|
s = RunManualCompaction(cfd, level, output_level, options, begin, end,
|
2020-10-07 22:16:40 +02:00
|
|
|
exclusive, disallow_trivial_move,
|
2022-03-12 01:13:23 +01:00
|
|
|
max_file_num_to_ignore, trim_ts);
|
2020-03-05 05:12:23 +01:00
|
|
|
if (!s.ok()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (output_level == ColumnFamilyData::kCompactToBaseLevel) {
|
|
|
|
final_output_level = cfd->NumberLevels() - 1;
|
|
|
|
} else if (output_level > final_output_level) {
|
|
|
|
final_output_level = output_level;
|
|
|
|
}
|
|
|
|
TEST_SYNC_POINT("DBImpl::RunManualCompaction()::1");
|
|
|
|
TEST_SYNC_POINT("DBImpl::RunManualCompaction()::2");
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-03-05 05:12:23 +01:00
|
|
|
if (!s.ok() || final_output_level == kInvalidLevel) {
|
2017-04-06 02:14:05 +02:00
|
|
|
LogFlush(immutable_db_options_.info_log);
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (options.change_level) {
|
2020-08-17 23:20:21 +02:00
|
|
|
TEST_SYNC_POINT("DBImpl::CompactRange:BeforeRefit:1");
|
|
|
|
TEST_SYNC_POINT("DBImpl::CompactRange:BeforeRefit:2");
|
|
|
|
|
2017-04-06 02:14:05 +02:00
|
|
|
ROCKS_LOG_INFO(immutable_db_options_.info_log,
|
|
|
|
"[RefitLevel] waiting for background threads to stop");
|
2020-08-14 20:28:12 +02:00
|
|
|
DisableManualCompaction();
|
2017-04-06 02:14:05 +02:00
|
|
|
s = PauseBackgroundWork();
|
|
|
|
if (s.ok()) {
|
2020-08-14 20:28:12 +02:00
|
|
|
TEST_SYNC_POINT("DBImpl::CompactRange:PreRefitLevel");
|
2017-04-06 02:14:05 +02:00
|
|
|
s = ReFitLevel(cfd, final_output_level, options.target_level);
|
2020-08-14 20:28:12 +02:00
|
|
|
TEST_SYNC_POINT("DBImpl::CompactRange:PostRefitLevel");
|
2020-09-25 06:47:43 +02:00
|
|
|
// ContinueBackgroundWork always return Status::OK().
|
2020-10-22 05:16:30 +02:00
|
|
|
Status temp_s = ContinueBackgroundWork();
|
|
|
|
assert(temp_s.ok());
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
2020-08-14 20:28:12 +02:00
|
|
|
EnableManualCompaction();
|
Prevent corruption with parallel manual compactions and `change_level == true` (#9077)
Summary:
The bug can impact the following scenario. There must be two `CompactRange()`s, call them A and B. Compaction A must have `change_level=true`. Compactions A and B must run in parallel, and new data must be added while they run as well.
Now, on to the details of the race condition. Compaction A must reach the refitting phase while B's next step is to trivial move new data (i.e., data that has been inserted behind A) down to the same level that A's refit targets (`CompactRangeOptions::target_level`). B must be unregistered (i.e., has not yet called `AddManualCompaction()` for the current `RunManualCompaction()`) while A invokes `DisableManualCompaction()`s to prepare for refitting. In the old code, B could still proceed to register a manual compaction, while A had disabled manual compaction.
The next part of the race condition is B picks and schedules a trivial move while A has released the lock in refitting phase in order to persist the LSM state change (i.e., the log phase of `LogAndApply()`). That way, B does not see the refitted data when picking a trivial-move compaction. So it is susceptible to picking one that overlaps.
Finally, B executes the picked trivial-move compaction. Trivial-move compactions are special in that they never check whether manual compaction is disabled. So the picked compaction causing overlap ends up being applied, leading to LSM corruption if `force_consistency_checks=false`, or entering read-only mode with `Status::Corruption` if `force_consistency_checks=true` (the default).
The fix is just to prevent B from registering itself in `RunManualCompaction()` while manual compactions are disabled, consequently preventing any trivial move or other compaction from being picked/scheduled.
Thanks to siying for finding the bug.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9077
Test Plan: The test does not go all the way in exposing the bug because it requires a compaction to be picked/scheduled while logging LSM state change for RefitLevel(). But the fix is to make such a compaction not picked/scheduled in the first place, so any repro of that scenario would end up hanging RefitLevel() logging. So instead I just verified no such compaction is registered in the scenario where `RefitLevel()` disables manual compactions.
Reviewed By: siying
Differential Revision: D31921908
Pulled By: ajkr
fbshipit-source-id: 9bb5d0e847ad428211227f40830c685c209fbecb
2021-10-28 08:07:29 +02:00
|
|
|
TEST_SYNC_POINT(
|
|
|
|
"DBImpl::CompactRange:PostRefitLevel:ManualCompactionEnabled");
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
LogFlush(immutable_db_options_.info_log);
|
|
|
|
|
|
|
|
{
|
|
|
|
InstrumentedMutexLock l(&mutex_);
|
|
|
|
// an automatic compaction that has been scheduled might have been
|
|
|
|
// preempted by the manual compactions. Need to schedule it back.
|
|
|
|
MaybeScheduleFlushOrCompaction();
|
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2018-04-13 02:55:14 +02:00
|
|
|
Status DBImpl::CompactFiles(const CompactionOptions& compact_options,
|
|
|
|
ColumnFamilyHandle* column_family,
|
|
|
|
const std::vector<std::string>& input_file_names,
|
|
|
|
const int output_level, const int output_path_id,
|
2018-12-13 23:12:02 +01:00
|
|
|
std::vector<std::string>* const output_file_names,
|
|
|
|
CompactionJobInfo* compaction_job_info) {
|
2017-04-06 02:14:05 +02:00
|
|
|
#ifdef ROCKSDB_LITE
|
2018-04-13 02:55:14 +02:00
|
|
|
(void)compact_options;
|
|
|
|
(void)column_family;
|
|
|
|
(void)input_file_names;
|
|
|
|
(void)output_level;
|
|
|
|
(void)output_path_id;
|
|
|
|
(void)output_file_names;
|
2018-12-13 23:12:02 +01:00
|
|
|
(void)compaction_job_info;
|
2018-04-13 02:55:14 +02:00
|
|
|
// not supported in lite version
|
2017-04-06 02:14:05 +02:00
|
|
|
return Status::NotSupported("Not supported in ROCKSDB LITE");
|
|
|
|
#else
|
|
|
|
if (column_family == nullptr) {
|
|
|
|
return Status::InvalidArgument("ColumnFamilyHandle must be non-null.");
|
|
|
|
}
|
|
|
|
|
2020-07-03 04:24:25 +02:00
|
|
|
auto cfd =
|
|
|
|
static_cast_with_check<ColumnFamilyHandleImpl>(column_family)->cfd();
|
2017-04-06 02:14:05 +02:00
|
|
|
assert(cfd);
|
|
|
|
|
|
|
|
Status s;
|
2021-05-20 06:40:43 +02:00
|
|
|
JobContext job_context(next_job_id_.fetch_add(1), true);
|
2017-04-06 02:14:05 +02:00
|
|
|
LogBuffer log_buffer(InfoLogLevel::INFO_LEVEL,
|
|
|
|
immutable_db_options_.info_log.get());
|
|
|
|
|
|
|
|
// Perform CompactFiles
|
2018-11-12 23:30:21 +01:00
|
|
|
TEST_SYNC_POINT("TestCompactFiles::IngestExternalFile2");
|
2017-04-06 02:14:05 +02:00
|
|
|
{
|
|
|
|
InstrumentedMutexLock l(&mutex_);
|
|
|
|
|
|
|
|
// This call will unlock/lock the mutex to wait for current running
|
|
|
|
// IngestExternalFile() calls to finish.
|
|
|
|
WaitForIngestFile();
|
|
|
|
|
2018-11-12 23:30:21 +01:00
|
|
|
// We need to get current after `WaitForIngestFile`, because
|
|
|
|
// `IngestExternalFile` may add files that overlap with `input_file_names`
|
|
|
|
auto* current = cfd->current();
|
|
|
|
current->Ref();
|
|
|
|
|
|
|
|
s = CompactFilesImpl(compact_options, cfd, current, input_file_names,
|
2018-04-13 02:55:14 +02:00
|
|
|
output_file_names, output_level, output_path_id,
|
2018-12-13 23:12:02 +01:00
|
|
|
&job_context, &log_buffer, compaction_job_info);
|
2018-11-12 23:30:21 +01:00
|
|
|
|
|
|
|
current->Unref();
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Find and delete obsolete files
|
|
|
|
{
|
|
|
|
InstrumentedMutexLock l(&mutex_);
|
|
|
|
// If !s.ok(), this means that Compaction failed. In that case, we want
|
|
|
|
// to delete all obsolete files we might have created and we force
|
|
|
|
// FindObsoleteFiles(). This is because job_context does not
|
|
|
|
// catch all created files if compaction failed.
|
|
|
|
FindObsoleteFiles(&job_context, !s.ok());
|
|
|
|
} // release the mutex
|
|
|
|
|
|
|
|
// delete unnecessary files if any, this is done outside the mutex
|
2018-01-12 22:16:39 +01:00
|
|
|
if (job_context.HaveSomethingToClean() ||
|
|
|
|
job_context.HaveSomethingToDelete() || !log_buffer.IsEmpty()) {
|
2017-04-06 02:14:05 +02:00
|
|
|
// Have to flush the info logs before bg_compaction_scheduled_--
|
|
|
|
// because if bg_flush_scheduled_ becomes 0 and the lock is
|
|
|
|
// released, the deconstructor of DB can kick in and destroy all the
|
|
|
|
// states of DB so info_log might not be available after that point.
|
|
|
|
// It also applies to access other states that DB owns.
|
|
|
|
log_buffer.FlushBufferToLog();
|
|
|
|
if (job_context.HaveSomethingToDelete()) {
|
|
|
|
// no mutex is locked here. No need to Unlock() and Lock() here.
|
|
|
|
PurgeObsoleteFiles(job_context);
|
|
|
|
}
|
|
|
|
job_context.Clean();
|
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
#endif // ROCKSDB_LITE
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
Status DBImpl::CompactFilesImpl(
|
|
|
|
const CompactionOptions& compact_options, ColumnFamilyData* cfd,
|
|
|
|
Version* version, const std::vector<std::string>& input_file_names,
|
2018-04-13 02:55:14 +02:00
|
|
|
std::vector<std::string>* const output_file_names, const int output_level,
|
2018-12-13 23:12:02 +01:00
|
|
|
int output_path_id, JobContext* job_context, LogBuffer* log_buffer,
|
|
|
|
CompactionJobInfo* compaction_job_info) {
|
2017-04-06 02:14:05 +02:00
|
|
|
mutex_.AssertHeld();
|
|
|
|
|
|
|
|
if (shutting_down_.load(std::memory_order_acquire)) {
|
|
|
|
return Status::ShutdownInProgress();
|
|
|
|
}
|
2020-08-14 20:28:12 +02:00
|
|
|
if (manual_compaction_paused_.load(std::memory_order_acquire) > 0) {
|
2019-09-17 06:00:13 +02:00
|
|
|
return Status::Incomplete(Status::SubCode::kManualCompactionPaused);
|
|
|
|
}
|
2017-04-06 02:14:05 +02:00
|
|
|
|
|
|
|
std::unordered_set<uint64_t> input_set;
|
2018-10-10 02:13:53 +02:00
|
|
|
for (const auto& file_name : input_file_names) {
|
2017-04-06 02:14:05 +02:00
|
|
|
input_set.insert(TableFileNameToNumber(file_name));
|
|
|
|
}
|
|
|
|
|
|
|
|
ColumnFamilyMetaData cf_meta;
|
|
|
|
// TODO(yhchiang): can directly use version here if none of the
|
|
|
|
// following functions call is pluggable to external developers.
|
|
|
|
version->GetColumnFamilyMetaData(&cf_meta);
|
|
|
|
|
|
|
|
if (output_path_id < 0) {
|
2018-04-06 04:49:06 +02:00
|
|
|
if (cfd->ioptions()->cf_paths.size() == 1U) {
|
2017-04-06 02:14:05 +02:00
|
|
|
output_path_id = 0;
|
|
|
|
} else {
|
|
|
|
return Status::NotSupported(
|
|
|
|
"Automatic output path selection is not "
|
|
|
|
"yet supported in CompactFiles()");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Status s = cfd->compaction_picker()->SanitizeCompactionInputFiles(
|
|
|
|
&input_set, cf_meta, output_level);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<CompactionInputFiles> input_files;
|
|
|
|
s = cfd->compaction_picker()->GetCompactionInputsFromFileNumbers(
|
|
|
|
&input_files, &input_set, version->storage_info(), compact_options);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2018-10-10 02:13:53 +02:00
|
|
|
for (const auto& inputs : input_files) {
|
2017-04-07 05:06:34 +02:00
|
|
|
if (cfd->compaction_picker()->AreFilesInCompaction(inputs.files)) {
|
2017-04-06 02:14:05 +02:00
|
|
|
return Status::Aborted(
|
|
|
|
"Some of the necessary compaction input "
|
|
|
|
"files are already being compacted");
|
|
|
|
}
|
|
|
|
}
|
2018-04-03 04:53:19 +02:00
|
|
|
bool sfm_reserved_compact_space = false;
|
|
|
|
// First check if we have enough room to do the compaction
|
|
|
|
bool enough_room = EnoughRoomForCompaction(
|
Auto recovery from out of space errors (#4164)
Summary:
This commit implements automatic recovery from a Status::NoSpace() error
during background operations such as write callback, flush and
compaction. The broad design is as follows -
1. Compaction errors are treated as soft errors and don't put the
database in read-only mode. A compaction is delayed until enough free
disk space is available to accomodate the compaction outputs, which is
estimated based on the input size. This means that users can continue to
write, and we rely on the WriteController to delay or stop writes if the
compaction debt becomes too high due to persistent low disk space
condition
2. Errors during write callback and flush are treated as hard errors,
i.e the database is put in read-only mode and goes back to read-write
only fater certain recovery actions are taken.
3. Both types of recovery rely on the SstFileManagerImpl to poll for
sufficient disk space. We assume that there is a 1-1 mapping between an
SFM and the underlying OS storage container. For cases where multiple
DBs are hosted on a single storage container, the user is expected to
allocate a single SFM instance and use the same one for all the DBs. If
no SFM is specified by the user, DBImpl::Open() will allocate one, but
this will be one per DB and each DB will recover independently. The
recovery implemented by SFM is as follows -
a) On the first occurance of an out of space error during compaction,
subsequent
compactions will be delayed until the disk free space check indicates
enough available space. The required space is computed as the sum of
input sizes.
b) The free space check requirement will be removed once the amount of
free space is greater than the size reserved by in progress
compactions when the first error occured
c) If the out of space error is a hard error, a background thread in
SFM will poll for sufficient headroom before triggering the recovery
of the database and putting it in write-only mode. The headroom is
calculated as the sum of the write_buffer_size of all the DB instances
associated with the SFM
4. EventListener callbacks will be called at the start and completion of
automatic recovery. Users can disable the auto recov ery in the start
callback, and later initiate it manually by calling DB::Resume()
Todo:
1. More extensive testing
2. Add disk full condition to db_stress (follow-on PR)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4164
Differential Revision: D9846378
Pulled By: anand1976
fbshipit-source-id: 80ea875dbd7f00205e19c82215ff6e37da10da4a
2018-09-15 22:36:19 +02:00
|
|
|
cfd, input_files, &sfm_reserved_compact_space, log_buffer);
|
2018-04-03 04:53:19 +02:00
|
|
|
|
|
|
|
if (!enough_room) {
|
|
|
|
// m's vars will get set properly at the end of this function,
|
|
|
|
// as long as status == CompactionTooLarge
|
|
|
|
return Status::CompactionTooLarge();
|
|
|
|
}
|
2017-04-06 02:14:05 +02:00
|
|
|
|
|
|
|
// At this point, CompactFiles will be run.
|
|
|
|
bg_compaction_scheduled_++;
|
|
|
|
|
2018-11-09 20:17:34 +01:00
|
|
|
std::unique_ptr<Compaction> c;
|
2017-04-06 02:14:05 +02:00
|
|
|
assert(cfd->compaction_picker());
|
2017-04-07 05:06:34 +02:00
|
|
|
c.reset(cfd->compaction_picker()->CompactFiles(
|
2017-04-06 02:14:05 +02:00
|
|
|
compact_options, input_files, output_level, version->storage_info(),
|
2020-07-23 03:31:25 +02:00
|
|
|
*cfd->GetLatestMutableCFOptions(), mutable_db_options_, output_path_id));
|
2017-09-14 00:31:34 +02:00
|
|
|
// we already sanitized the set of input files and checked for conflicts
|
|
|
|
// without releasing the lock, so we're guaranteed a compaction can be formed.
|
|
|
|
assert(c != nullptr);
|
|
|
|
|
2017-04-06 02:14:05 +02:00
|
|
|
c->SetInputVersion(version);
|
|
|
|
// deletion compaction currently not allowed in CompactFiles.
|
|
|
|
assert(!c->deletion_compaction());
|
|
|
|
|
2019-01-16 06:32:15 +01:00
|
|
|
std::vector<SequenceNumber> snapshot_seqs;
|
2017-04-06 02:14:05 +02:00
|
|
|
SequenceNumber earliest_write_conflict_snapshot;
|
2019-01-16 06:32:15 +01:00
|
|
|
SnapshotChecker* snapshot_checker;
|
|
|
|
GetSnapshotContext(job_context, &snapshot_seqs,
|
|
|
|
&earliest_write_conflict_snapshot, &snapshot_checker);
|
2017-04-06 02:14:05 +02:00
|
|
|
|
2019-10-08 23:18:48 +02:00
|
|
|
std::unique_ptr<std::list<uint64_t>::iterator> pending_outputs_inserted_elem(
|
|
|
|
new std::list<uint64_t>::iterator(
|
|
|
|
CaptureCurrentFileNumberInPendingOutputs()));
|
2017-04-06 02:14:05 +02:00
|
|
|
|
|
|
|
assert(is_snapshot_supported_ || snapshots_.empty());
|
2018-12-13 23:12:02 +01:00
|
|
|
CompactionJobStats compaction_job_stats;
|
2017-04-06 02:14:05 +02:00
|
|
|
CompactionJob compaction_job(
|
2021-05-20 06:40:43 +02:00
|
|
|
job_context->job_id, c.get(), immutable_db_options_, mutable_db_options_,
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
2019-12-13 23:47:08 +01:00
|
|
|
file_options_for_compaction_, versions_.get(), &shutting_down_,
|
Added support for differential snapshots
Summary:
The motivation for this PR is to add to RocksDB support for differential (incremental) snapshots, as snapshot of the DB changes between two points in time (one can think of it as diff between to sequence numbers, or the diff D which can be thought of as an SST file or just set of KVs that can be applied to sequence number S1 to get the database to the state at sequence number S2).
This feature would be useful for various distributed storages layers built on top of RocksDB, as it should help reduce resources (time and network bandwidth) needed to recover and rebuilt DB instances as replicas in the context of distributed storages.
From the API standpoint that would like client app requesting iterator between (start seqnum) and current DB state, and reading the "diff".
This is a very draft PR for initial review in the discussion on the approach, i'm going to rework some parts and keep updating the PR.
For now, what's done here according to initial discussions:
Preserving deletes:
- We want to be able to optionally preserve recent deletes for some defined period of time, so that if a delete came in recently and might need to be included in the next incremental snapshot it would't get dropped by a compaction. This is done by adding new param to Options (preserve deletes flag) and new variable to DB Impl where we keep track of the sequence number after which we don't want to drop tombstones, even if they are otherwise eligible for deletion.
- I also added a new API call for clients to be able to advance this cutoff seqnum after which we drop deletes; i assume it's more flexible to let clients control this, since otherwise we'd need to keep some kind of timestamp < -- > seqnum mapping inside the DB, which sounds messy and painful to support. Clients could make use of it by periodically calling GetLatestSequenceNumber(), noting the timestamp, doing some calculation and figuring out by how much we need to advance the cutoff seqnum.
- Compaction codepath in compaction_iterator.cc has been modified to avoid dropping tombstones with seqnum > cutoff seqnum.
Iterator changes:
- couple params added to ReadOptions, to optionally allow client to request internal keys instead of user keys (so that client can get the latest value of a key, be it delete marker or a put), as well as min timestamp and min seqnum.
TableCache changes:
- I modified table_cache code to be able to quickly exclude SST files from iterators heep if creation_time on the file is less then iter_start_ts as passed in ReadOptions. That would help a lot in some DB settings (like reading very recent data only or using FIFO compactions), but not so much for universal compaction with more or less long iterator time span.
What's left:
- Still looking at how to best plug that inside DBIter codepath. So far it seems that FindNextUserKeyInternal only parses values as UserKeys, and iter->key() call generally returns user key. Can we add new API to DBIter as internal_key(), and modify this internal method to optionally set saved_key_ to point to the full internal key? I don't need to store actual seqnum there, but I do need to store type.
Closes https://github.com/facebook/rocksdb/pull/2999
Differential Revision: D6175602
Pulled By: mikhail-antonov
fbshipit-source-id: c779a6696ee2d574d86c69cec866a3ae095aa900
2017-11-02 02:43:29 +01:00
|
|
|
preserve_deletes_seqnum_.load(), log_buffer, directories_.GetDbDir(),
|
2020-10-26 21:50:03 +01:00
|
|
|
GetDataDir(c->column_family_data(), c->output_path_id()),
|
|
|
|
GetDataDir(c->column_family_data(), 0), stats_, &mutex_, &error_handler_,
|
|
|
|
snapshot_seqs, earliest_write_conflict_snapshot, snapshot_checker,
|
|
|
|
table_cache_, &event_logger_,
|
2017-11-17 02:46:43 +01:00
|
|
|
c->mutable_cf_options()->paranoid_file_checks,
|
2017-04-06 02:14:05 +02:00
|
|
|
c->mutable_cf_options()->report_bg_io_stats, dbname_,
|
2020-08-13 02:28:10 +02:00
|
|
|
&compaction_job_stats, Env::Priority::USER, io_tracer_,
|
2021-06-07 20:40:31 +02:00
|
|
|
&manual_compaction_paused_, nullptr, db_id_, db_session_id_,
|
2022-03-12 01:13:23 +01:00
|
|
|
c->column_family_data()->GetFullHistoryTsLow(), c->trim_ts(),
|
|
|
|
&blob_callback_);
|
2017-04-06 02:14:05 +02:00
|
|
|
|
|
|
|
// Creating a compaction influences the compaction score because the score
|
|
|
|
// takes running compactions into account (by skipping files that are already
|
|
|
|
// being compacted). Since we just changed compaction score, we recalculate it
|
|
|
|
// here.
|
|
|
|
version->storage_info()->ComputeCompactionScore(*cfd->ioptions(),
|
|
|
|
*c->mutable_cf_options());
|
|
|
|
|
|
|
|
compaction_job.Prepare();
|
|
|
|
|
|
|
|
mutex_.Unlock();
|
|
|
|
TEST_SYNC_POINT("CompactFilesImpl:0");
|
|
|
|
TEST_SYNC_POINT("CompactFilesImpl:1");
|
2020-12-24 01:54:05 +01:00
|
|
|
// Ignore the status here, as it will be checked in the Install down below...
|
|
|
|
compaction_job.Run().PermitUncheckedError();
|
2017-04-06 02:14:05 +02:00
|
|
|
TEST_SYNC_POINT("CompactFilesImpl:2");
|
|
|
|
TEST_SYNC_POINT("CompactFilesImpl:3");
|
|
|
|
mutex_.Lock();
|
|
|
|
|
|
|
|
Status status = compaction_job.Install(*c->mutable_cf_options());
|
|
|
|
if (status.ok()) {
|
2020-09-30 03:21:49 +02:00
|
|
|
assert(compaction_job.io_status().ok());
|
2019-01-02 18:56:39 +01:00
|
|
|
InstallSuperVersionAndScheduleWork(c->column_family_data(),
|
|
|
|
&job_context->superversion_contexts[0],
|
|
|
|
*c->mutable_cf_options());
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
2020-10-06 23:40:37 +02:00
|
|
|
// status above captures any error during compaction_job.Install, so its ok
|
|
|
|
// not check compaction_job.io_status() explicitly if we're not calling
|
|
|
|
// SetBGError
|
|
|
|
compaction_job.io_status().PermitUncheckedError();
|
2017-04-06 02:14:05 +02:00
|
|
|
c->ReleaseCompactionFiles(s);
|
2018-04-03 04:53:19 +02:00
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
// Need to make sure SstFileManager does its bookkeeping
|
|
|
|
auto sfm = static_cast<SstFileManagerImpl*>(
|
|
|
|
immutable_db_options_.sst_file_manager.get());
|
|
|
|
if (sfm && sfm_reserved_compact_space) {
|
|
|
|
sfm->OnCompactionCompletion(c.get());
|
|
|
|
}
|
|
|
|
#endif // ROCKSDB_LITE
|
2017-04-06 02:14:05 +02:00
|
|
|
|
|
|
|
ReleaseFileNumberFromPendingOutputs(pending_outputs_inserted_elem);
|
|
|
|
|
2018-12-13 23:12:02 +01:00
|
|
|
if (compaction_job_info != nullptr) {
|
|
|
|
BuildCompactionJobInfo(cfd, c.get(), s, compaction_job_stats,
|
|
|
|
job_context->job_id, version, compaction_job_info);
|
|
|
|
}
|
|
|
|
|
2017-04-06 02:14:05 +02:00
|
|
|
if (status.ok()) {
|
|
|
|
// Done
|
2019-06-04 07:37:40 +02:00
|
|
|
} else if (status.IsColumnFamilyDropped() || status.IsShutdownInProgress()) {
|
2017-04-06 02:14:05 +02:00
|
|
|
// Ignore compaction errors found during shutting down
|
2019-09-17 06:00:13 +02:00
|
|
|
} else if (status.IsManualCompactionPaused()) {
|
|
|
|
// Don't report stopping manual compaction as error
|
|
|
|
ROCKS_LOG_INFO(immutable_db_options_.info_log,
|
|
|
|
"[%s] [JOB %d] Stopping manual compaction",
|
|
|
|
c->column_family_data()->GetName().c_str(),
|
|
|
|
job_context->job_id);
|
2017-04-06 02:14:05 +02:00
|
|
|
} else {
|
|
|
|
ROCKS_LOG_WARN(immutable_db_options_.info_log,
|
|
|
|
"[%s] [JOB %d] Compaction error: %s",
|
|
|
|
c->column_family_data()->GetName().c_str(),
|
|
|
|
job_context->job_id, status.ToString().c_str());
|
2020-07-15 20:02:44 +02:00
|
|
|
IOStatus io_s = compaction_job.io_status();
|
|
|
|
if (!io_s.ok()) {
|
2020-12-08 05:09:55 +01:00
|
|
|
error_handler_.SetBGError(io_s, BackgroundErrorReason::kCompaction);
|
2020-07-15 20:02:44 +02:00
|
|
|
} else {
|
2020-12-08 05:09:55 +01:00
|
|
|
error_handler_.SetBGError(status, BackgroundErrorReason::kCompaction);
|
2020-07-15 20:02:44 +02:00
|
|
|
}
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
|
2018-03-15 19:46:16 +01:00
|
|
|
if (output_file_names != nullptr) {
|
2020-03-03 17:39:14 +01:00
|
|
|
for (const auto& newf : c->edit()->GetNewFiles()) {
|
2021-09-17 02:17:40 +02:00
|
|
|
output_file_names->push_back(TableFileName(
|
|
|
|
c->immutable_options()->cf_paths, newf.second.fd.GetNumber(),
|
|
|
|
newf.second.fd.GetPathId()));
|
|
|
|
}
|
|
|
|
|
|
|
|
for (const auto& blob_file : c->edit()->GetBlobFileAdditions()) {
|
|
|
|
output_file_names->push_back(
|
|
|
|
BlobFileName(c->immutable_options()->cf_paths.front().path,
|
|
|
|
blob_file.GetBlobFileNumber()));
|
2018-03-15 19:46:16 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-06 02:14:05 +02:00
|
|
|
c.reset();
|
|
|
|
|
|
|
|
bg_compaction_scheduled_--;
|
|
|
|
if (bg_compaction_scheduled_ == 0) {
|
|
|
|
bg_cv_.SignalAll();
|
|
|
|
}
|
2019-02-05 20:20:37 +01:00
|
|
|
MaybeScheduleFlushOrCompaction();
|
2018-04-03 04:53:19 +02:00
|
|
|
TEST_SYNC_POINT("CompactFilesImpl:End");
|
2017-04-06 02:14:05 +02:00
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
#endif // ROCKSDB_LITE
|
|
|
|
|
|
|
|
Status DBImpl::PauseBackgroundWork() {
|
|
|
|
InstrumentedMutexLock guard_lock(&mutex_);
|
|
|
|
bg_compaction_paused_++;
|
2017-08-04 00:36:28 +02:00
|
|
|
while (bg_bottom_compaction_scheduled_ > 0 || bg_compaction_scheduled_ > 0 ||
|
|
|
|
bg_flush_scheduled_ > 0) {
|
2017-04-06 02:14:05 +02:00
|
|
|
bg_cv_.Wait();
|
|
|
|
}
|
|
|
|
bg_work_paused_++;
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status DBImpl::ContinueBackgroundWork() {
|
|
|
|
InstrumentedMutexLock guard_lock(&mutex_);
|
|
|
|
if (bg_work_paused_ == 0) {
|
|
|
|
return Status::InvalidArgument();
|
|
|
|
}
|
|
|
|
assert(bg_work_paused_ > 0);
|
|
|
|
assert(bg_compaction_paused_ > 0);
|
|
|
|
bg_compaction_paused_--;
|
|
|
|
bg_work_paused_--;
|
|
|
|
// It's sufficient to check just bg_work_paused_ here since
|
|
|
|
// bg_work_paused_ is always no greater than bg_compaction_paused_
|
|
|
|
if (bg_work_paused_ == 0) {
|
|
|
|
MaybeScheduleFlushOrCompaction();
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2019-03-28 00:13:08 +01:00
|
|
|
void DBImpl::NotifyOnCompactionBegin(ColumnFamilyData* cfd, Compaction* c,
|
|
|
|
const Status& st,
|
2018-10-11 02:30:22 +02:00
|
|
|
const CompactionJobStats& job_stats,
|
|
|
|
int job_id) {
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
if (immutable_db_options_.listeners.empty()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
mutex_.AssertHeld();
|
|
|
|
if (shutting_down_.load(std::memory_order_acquire)) {
|
|
|
|
return;
|
|
|
|
}
|
2019-09-17 06:00:13 +02:00
|
|
|
if (c->is_manual_compaction() &&
|
2020-08-14 20:28:12 +02:00
|
|
|
manual_compaction_paused_.load(std::memory_order_acquire) > 0) {
|
2019-09-17 06:00:13 +02:00
|
|
|
return;
|
|
|
|
}
|
2021-07-02 04:17:21 +02:00
|
|
|
|
|
|
|
c->SetNotifyOnCompactionCompleted();
|
2018-10-11 02:30:22 +02:00
|
|
|
Version* current = cfd->current();
|
|
|
|
current->Ref();
|
|
|
|
// release lock while notifying events
|
|
|
|
mutex_.Unlock();
|
|
|
|
TEST_SYNC_POINT("DBImpl::NotifyOnCompactionBegin::UnlockMutex");
|
|
|
|
{
|
2019-11-01 19:44:59 +01:00
|
|
|
CompactionJobInfo info{};
|
2020-08-17 20:52:23 +02:00
|
|
|
BuildCompactionJobInfo(cfd, c, st, job_stats, job_id, current, &info);
|
2018-10-11 02:30:22 +02:00
|
|
|
for (auto listener : immutable_db_options_.listeners) {
|
|
|
|
listener->OnCompactionBegin(this, info);
|
|
|
|
}
|
2020-09-25 06:47:43 +02:00
|
|
|
info.status.PermitUncheckedError();
|
2018-10-11 02:30:22 +02:00
|
|
|
}
|
|
|
|
mutex_.Lock();
|
|
|
|
current->Unref();
|
|
|
|
#else
|
|
|
|
(void)cfd;
|
|
|
|
(void)c;
|
|
|
|
(void)st;
|
|
|
|
(void)job_stats;
|
|
|
|
(void)job_id;
|
|
|
|
#endif // ROCKSDB_LITE
|
|
|
|
}
|
|
|
|
|
2017-04-06 02:14:05 +02:00
|
|
|
void DBImpl::NotifyOnCompactionCompleted(
|
2018-04-13 02:55:14 +02:00
|
|
|
ColumnFamilyData* cfd, Compaction* c, const Status& st,
|
|
|
|
const CompactionJobStats& compaction_job_stats, const int job_id) {
|
2017-04-06 02:14:05 +02:00
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
if (immutable_db_options_.listeners.size() == 0U) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
mutex_.AssertHeld();
|
|
|
|
if (shutting_down_.load(std::memory_order_acquire)) {
|
|
|
|
return;
|
|
|
|
}
|
2021-07-02 04:17:21 +02:00
|
|
|
|
|
|
|
if (c->ShouldNotifyOnCompactionCompleted() == false) {
|
2019-09-17 06:00:13 +02:00
|
|
|
return;
|
|
|
|
}
|
2021-06-07 20:40:31 +02:00
|
|
|
|
2017-09-15 20:45:33 +02:00
|
|
|
Version* current = cfd->current();
|
|
|
|
current->Ref();
|
2017-04-06 02:14:05 +02:00
|
|
|
// release lock while notifying events
|
|
|
|
mutex_.Unlock();
|
|
|
|
TEST_SYNC_POINT("DBImpl::NotifyOnCompactionCompleted::UnlockMutex");
|
|
|
|
{
|
2019-11-01 19:44:59 +01:00
|
|
|
CompactionJobInfo info{};
|
2018-12-13 23:12:02 +01:00
|
|
|
BuildCompactionJobInfo(cfd, c, st, compaction_job_stats, job_id, current,
|
|
|
|
&info);
|
2017-04-06 02:14:05 +02:00
|
|
|
for (auto listener : immutable_db_options_.listeners) {
|
|
|
|
listener->OnCompactionCompleted(this, info);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mutex_.Lock();
|
2017-09-15 20:45:33 +02:00
|
|
|
current->Unref();
|
2017-04-06 02:14:05 +02:00
|
|
|
// no need to signal bg_cv_ as it will be signaled at the end of the
|
|
|
|
// flush process.
|
2018-04-13 02:55:14 +02:00
|
|
|
#else
|
|
|
|
(void)cfd;
|
|
|
|
(void)c;
|
|
|
|
(void)st;
|
|
|
|
(void)compaction_job_stats;
|
|
|
|
(void)job_id;
|
2017-04-06 02:14:05 +02:00
|
|
|
#endif // ROCKSDB_LITE
|
|
|
|
}
|
|
|
|
|
|
|
|
// REQUIREMENT: block all background work by calling PauseBackgroundWork()
|
|
|
|
// before calling this function
|
|
|
|
Status DBImpl::ReFitLevel(ColumnFamilyData* cfd, int level, int target_level) {
|
|
|
|
assert(level < cfd->NumberLevels());
|
|
|
|
if (target_level >= cfd->NumberLevels()) {
|
|
|
|
return Status::InvalidArgument("Target level exceeds number of levels");
|
|
|
|
}
|
|
|
|
|
2017-10-06 03:00:38 +02:00
|
|
|
SuperVersionContext sv_context(/* create_superversion */ true);
|
2017-04-06 02:14:05 +02:00
|
|
|
|
|
|
|
InstrumentedMutexLock guard_lock(&mutex_);
|
|
|
|
|
|
|
|
// only allow one thread refitting
|
|
|
|
if (refitting_level_) {
|
|
|
|
ROCKS_LOG_INFO(immutable_db_options_.info_log,
|
|
|
|
"[ReFitLevel] another thread is refitting");
|
|
|
|
return Status::NotSupported("another thread is refitting");
|
|
|
|
}
|
|
|
|
refitting_level_ = true;
|
|
|
|
|
|
|
|
const MutableCFOptions mutable_cf_options = *cfd->GetLatestMutableCFOptions();
|
|
|
|
// move to a smaller level
|
|
|
|
int to_level = target_level;
|
|
|
|
if (target_level < 0) {
|
|
|
|
to_level = FindMinimumEmptyLevelFitting(cfd, mutable_cf_options, level);
|
|
|
|
}
|
|
|
|
|
|
|
|
auto* vstorage = cfd->current()->storage_info();
|
2020-08-17 23:20:21 +02:00
|
|
|
if (to_level != level) {
|
|
|
|
if (to_level > level) {
|
|
|
|
if (level == 0) {
|
2020-09-28 20:33:51 +02:00
|
|
|
refitting_level_ = false;
|
2017-04-06 02:14:05 +02:00
|
|
|
return Status::NotSupported(
|
2020-08-17 23:20:21 +02:00
|
|
|
"Cannot change from level 0 to other levels.");
|
|
|
|
}
|
|
|
|
// Check levels are empty for a trivial move
|
|
|
|
for (int l = level + 1; l <= to_level; l++) {
|
|
|
|
if (vstorage->NumLevelFiles(l) > 0) {
|
2020-09-28 20:33:51 +02:00
|
|
|
refitting_level_ = false;
|
2020-08-17 23:20:21 +02:00
|
|
|
return Status::NotSupported(
|
|
|
|
"Levels between source and target are not empty for a move.");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// to_level < level
|
|
|
|
// Check levels are empty for a trivial move
|
|
|
|
for (int l = to_level; l < level; l++) {
|
|
|
|
if (vstorage->NumLevelFiles(l) > 0) {
|
2020-09-28 20:33:51 +02:00
|
|
|
refitting_level_ = false;
|
2020-08-17 23:20:21 +02:00
|
|
|
return Status::NotSupported(
|
|
|
|
"Levels between source and target are not empty for a move.");
|
|
|
|
}
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
ROCKS_LOG_DEBUG(immutable_db_options_.info_log,
|
|
|
|
"[%s] Before refitting:\n%s", cfd->GetName().c_str(),
|
|
|
|
cfd->current()->DebugString().data());
|
|
|
|
|
|
|
|
VersionEdit edit;
|
|
|
|
edit.SetColumnFamily(cfd->GetID());
|
|
|
|
for (const auto& f : vstorage->LevelFiles(level)) {
|
|
|
|
edit.DeleteFile(level, f->fd.GetNumber());
|
2021-11-10 19:47:53 +01:00
|
|
|
edit.AddFile(
|
|
|
|
to_level, f->fd.GetNumber(), f->fd.GetPathId(), f->fd.GetFileSize(),
|
|
|
|
f->smallest, f->largest, f->fd.smallest_seqno, f->fd.largest_seqno,
|
2021-12-03 23:42:05 +01:00
|
|
|
f->marked_for_compaction, f->temperature, f->oldest_blob_file_number,
|
2021-11-10 19:47:53 +01:00
|
|
|
f->oldest_ancester_time, f->file_creation_time, f->file_checksum,
|
|
|
|
f->file_checksum_func_name, f->min_timestamp, f->max_timestamp);
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
ROCKS_LOG_DEBUG(immutable_db_options_.info_log,
|
|
|
|
"[%s] Apply version edit:\n%s", cfd->GetName().c_str(),
|
|
|
|
edit.DebugString().data());
|
|
|
|
|
2020-12-23 08:44:44 +01:00
|
|
|
Status status = versions_->LogAndApply(cfd, mutable_cf_options, &edit,
|
|
|
|
&mutex_, directories_.GetDbDir());
|
|
|
|
|
2017-10-06 03:00:38 +02:00
|
|
|
InstallSuperVersionAndScheduleWork(cfd, &sv_context, mutable_cf_options);
|
2017-04-06 02:14:05 +02:00
|
|
|
|
|
|
|
ROCKS_LOG_DEBUG(immutable_db_options_.info_log, "[%s] LogAndApply: %s\n",
|
|
|
|
cfd->GetName().c_str(), status.ToString().data());
|
|
|
|
|
|
|
|
if (status.ok()) {
|
|
|
|
ROCKS_LOG_DEBUG(immutable_db_options_.info_log,
|
|
|
|
"[%s] After refitting:\n%s", cfd->GetName().c_str(),
|
|
|
|
cfd->current()->DebugString().data());
|
|
|
|
}
|
2020-12-23 08:44:44 +01:00
|
|
|
sv_context.Clean();
|
|
|
|
refitting_level_ = false;
|
|
|
|
|
|
|
|
return status;
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
refitting_level_ = false;
|
2020-12-23 08:44:44 +01:00
|
|
|
return Status::OK();
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
int DBImpl::NumberLevels(ColumnFamilyHandle* column_family) {
|
2020-07-03 04:24:25 +02:00
|
|
|
auto cfh = static_cast_with_check<ColumnFamilyHandleImpl>(column_family);
|
2017-04-06 02:14:05 +02:00
|
|
|
return cfh->cfd()->NumberLevels();
|
|
|
|
}
|
|
|
|
|
2018-03-05 22:08:17 +01:00
|
|
|
int DBImpl::MaxMemCompactionLevel(ColumnFamilyHandle* /*column_family*/) {
|
2017-04-06 02:14:05 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int DBImpl::Level0StopWriteTrigger(ColumnFamilyHandle* column_family) {
|
2020-07-03 04:24:25 +02:00
|
|
|
auto cfh = static_cast_with_check<ColumnFamilyHandleImpl>(column_family);
|
2017-04-06 02:14:05 +02:00
|
|
|
InstrumentedMutexLock l(&mutex_);
|
2018-04-13 02:55:14 +02:00
|
|
|
return cfh->cfd()
|
|
|
|
->GetSuperVersion()
|
|
|
|
->mutable_cf_options.level0_stop_writes_trigger;
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Status DBImpl::Flush(const FlushOptions& flush_options,
|
|
|
|
ColumnFamilyHandle* column_family) {
|
2020-07-03 04:24:25 +02:00
|
|
|
auto cfh = static_cast_with_check<ColumnFamilyHandleImpl>(column_family);
|
2018-01-19 02:32:50 +01:00
|
|
|
ROCKS_LOG_INFO(immutable_db_options_.info_log, "[%s] Manual flush start.",
|
|
|
|
cfh->GetName().c_str());
|
2018-10-27 00:06:44 +02:00
|
|
|
Status s;
|
2018-11-12 21:22:10 +01:00
|
|
|
if (immutable_db_options_.atomic_flush) {
|
2018-10-27 00:06:44 +02:00
|
|
|
s = AtomicFlushMemTables({cfh->cfd()}, flush_options,
|
|
|
|
FlushReason::kManualFlush);
|
|
|
|
} else {
|
|
|
|
s = FlushMemTable(cfh->cfd(), flush_options, FlushReason::kManualFlush);
|
|
|
|
}
|
|
|
|
|
2018-01-19 02:32:50 +01:00
|
|
|
ROCKS_LOG_INFO(immutable_db_options_.info_log,
|
|
|
|
"[%s] Manual flush finished, status: %s\n",
|
|
|
|
cfh->GetName().c_str(), s.ToString().c_str());
|
|
|
|
return s;
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
|
2018-10-27 00:06:44 +02:00
|
|
|
Status DBImpl::Flush(const FlushOptions& flush_options,
|
|
|
|
const std::vector<ColumnFamilyHandle*>& column_families) {
|
Auto recovery from out of space errors (#4164)
Summary:
This commit implements automatic recovery from a Status::NoSpace() error
during background operations such as write callback, flush and
compaction. The broad design is as follows -
1. Compaction errors are treated as soft errors and don't put the
database in read-only mode. A compaction is delayed until enough free
disk space is available to accomodate the compaction outputs, which is
estimated based on the input size. This means that users can continue to
write, and we rely on the WriteController to delay or stop writes if the
compaction debt becomes too high due to persistent low disk space
condition
2. Errors during write callback and flush are treated as hard errors,
i.e the database is put in read-only mode and goes back to read-write
only fater certain recovery actions are taken.
3. Both types of recovery rely on the SstFileManagerImpl to poll for
sufficient disk space. We assume that there is a 1-1 mapping between an
SFM and the underlying OS storage container. For cases where multiple
DBs are hosted on a single storage container, the user is expected to
allocate a single SFM instance and use the same one for all the DBs. If
no SFM is specified by the user, DBImpl::Open() will allocate one, but
this will be one per DB and each DB will recover independently. The
recovery implemented by SFM is as follows -
a) On the first occurance of an out of space error during compaction,
subsequent
compactions will be delayed until the disk free space check indicates
enough available space. The required space is computed as the sum of
input sizes.
b) The free space check requirement will be removed once the amount of
free space is greater than the size reserved by in progress
compactions when the first error occured
c) If the out of space error is a hard error, a background thread in
SFM will poll for sufficient headroom before triggering the recovery
of the database and putting it in write-only mode. The headroom is
calculated as the sum of the write_buffer_size of all the DB instances
associated with the SFM
4. EventListener callbacks will be called at the start and completion of
automatic recovery. Users can disable the auto recov ery in the start
callback, and later initiate it manually by calling DB::Resume()
Todo:
1. More extensive testing
2. Add disk full condition to db_stress (follow-on PR)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4164
Differential Revision: D9846378
Pulled By: anand1976
fbshipit-source-id: 80ea875dbd7f00205e19c82215ff6e37da10da4a
2018-09-15 22:36:19 +02:00
|
|
|
Status s;
|
2018-11-12 21:22:10 +01:00
|
|
|
if (!immutable_db_options_.atomic_flush) {
|
2018-10-27 00:06:44 +02:00
|
|
|
for (auto cfh : column_families) {
|
|
|
|
s = Flush(flush_options, cfh);
|
|
|
|
if (!s.ok()) {
|
|
|
|
break;
|
|
|
|
}
|
Auto recovery from out of space errors (#4164)
Summary:
This commit implements automatic recovery from a Status::NoSpace() error
during background operations such as write callback, flush and
compaction. The broad design is as follows -
1. Compaction errors are treated as soft errors and don't put the
database in read-only mode. A compaction is delayed until enough free
disk space is available to accomodate the compaction outputs, which is
estimated based on the input size. This means that users can continue to
write, and we rely on the WriteController to delay or stop writes if the
compaction debt becomes too high due to persistent low disk space
condition
2. Errors during write callback and flush are treated as hard errors,
i.e the database is put in read-only mode and goes back to read-write
only fater certain recovery actions are taken.
3. Both types of recovery rely on the SstFileManagerImpl to poll for
sufficient disk space. We assume that there is a 1-1 mapping between an
SFM and the underlying OS storage container. For cases where multiple
DBs are hosted on a single storage container, the user is expected to
allocate a single SFM instance and use the same one for all the DBs. If
no SFM is specified by the user, DBImpl::Open() will allocate one, but
this will be one per DB and each DB will recover independently. The
recovery implemented by SFM is as follows -
a) On the first occurance of an out of space error during compaction,
subsequent
compactions will be delayed until the disk free space check indicates
enough available space. The required space is computed as the sum of
input sizes.
b) The free space check requirement will be removed once the amount of
free space is greater than the size reserved by in progress
compactions when the first error occured
c) If the out of space error is a hard error, a background thread in
SFM will poll for sufficient headroom before triggering the recovery
of the database and putting it in write-only mode. The headroom is
calculated as the sum of the write_buffer_size of all the DB instances
associated with the SFM
4. EventListener callbacks will be called at the start and completion of
automatic recovery. Users can disable the auto recov ery in the start
callback, and later initiate it manually by calling DB::Resume()
Todo:
1. More extensive testing
2. Add disk full condition to db_stress (follow-on PR)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4164
Differential Revision: D9846378
Pulled By: anand1976
fbshipit-source-id: 80ea875dbd7f00205e19c82215ff6e37da10da4a
2018-09-15 22:36:19 +02:00
|
|
|
}
|
2018-10-27 00:06:44 +02:00
|
|
|
} else {
|
|
|
|
ROCKS_LOG_INFO(immutable_db_options_.info_log,
|
|
|
|
"Manual atomic flush start.\n"
|
|
|
|
"=====Column families:=====");
|
|
|
|
for (auto cfh : column_families) {
|
|
|
|
auto cfhi = static_cast<ColumnFamilyHandleImpl*>(cfh);
|
|
|
|
ROCKS_LOG_INFO(immutable_db_options_.info_log, "%s",
|
|
|
|
cfhi->GetName().c_str());
|
Auto recovery from out of space errors (#4164)
Summary:
This commit implements automatic recovery from a Status::NoSpace() error
during background operations such as write callback, flush and
compaction. The broad design is as follows -
1. Compaction errors are treated as soft errors and don't put the
database in read-only mode. A compaction is delayed until enough free
disk space is available to accomodate the compaction outputs, which is
estimated based on the input size. This means that users can continue to
write, and we rely on the WriteController to delay or stop writes if the
compaction debt becomes too high due to persistent low disk space
condition
2. Errors during write callback and flush are treated as hard errors,
i.e the database is put in read-only mode and goes back to read-write
only fater certain recovery actions are taken.
3. Both types of recovery rely on the SstFileManagerImpl to poll for
sufficient disk space. We assume that there is a 1-1 mapping between an
SFM and the underlying OS storage container. For cases where multiple
DBs are hosted on a single storage container, the user is expected to
allocate a single SFM instance and use the same one for all the DBs. If
no SFM is specified by the user, DBImpl::Open() will allocate one, but
this will be one per DB and each DB will recover independently. The
recovery implemented by SFM is as follows -
a) On the first occurance of an out of space error during compaction,
subsequent
compactions will be delayed until the disk free space check indicates
enough available space. The required space is computed as the sum of
input sizes.
b) The free space check requirement will be removed once the amount of
free space is greater than the size reserved by in progress
compactions when the first error occured
c) If the out of space error is a hard error, a background thread in
SFM will poll for sufficient headroom before triggering the recovery
of the database and putting it in write-only mode. The headroom is
calculated as the sum of the write_buffer_size of all the DB instances
associated with the SFM
4. EventListener callbacks will be called at the start and completion of
automatic recovery. Users can disable the auto recov ery in the start
callback, and later initiate it manually by calling DB::Resume()
Todo:
1. More extensive testing
2. Add disk full condition to db_stress (follow-on PR)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4164
Differential Revision: D9846378
Pulled By: anand1976
fbshipit-source-id: 80ea875dbd7f00205e19c82215ff6e37da10da4a
2018-09-15 22:36:19 +02:00
|
|
|
}
|
2018-10-27 00:06:44 +02:00
|
|
|
ROCKS_LOG_INFO(immutable_db_options_.info_log,
|
|
|
|
"=====End of column families list=====");
|
|
|
|
autovector<ColumnFamilyData*> cfds;
|
|
|
|
std::for_each(column_families.begin(), column_families.end(),
|
|
|
|
[&cfds](ColumnFamilyHandle* elem) {
|
|
|
|
auto cfh = static_cast<ColumnFamilyHandleImpl*>(elem);
|
|
|
|
cfds.emplace_back(cfh->cfd());
|
|
|
|
});
|
|
|
|
s = AtomicFlushMemTables(cfds, flush_options, FlushReason::kManualFlush);
|
|
|
|
ROCKS_LOG_INFO(immutable_db_options_.info_log,
|
2019-04-04 21:05:42 +02:00
|
|
|
"Manual atomic flush finished, status: %s\n"
|
|
|
|
"=====Column families:=====",
|
|
|
|
s.ToString().c_str());
|
2018-10-27 00:06:44 +02:00
|
|
|
for (auto cfh : column_families) {
|
|
|
|
auto cfhi = static_cast<ColumnFamilyHandleImpl*>(cfh);
|
|
|
|
ROCKS_LOG_INFO(immutable_db_options_.info_log, "%s",
|
|
|
|
cfhi->GetName().c_str());
|
Auto recovery from out of space errors (#4164)
Summary:
This commit implements automatic recovery from a Status::NoSpace() error
during background operations such as write callback, flush and
compaction. The broad design is as follows -
1. Compaction errors are treated as soft errors and don't put the
database in read-only mode. A compaction is delayed until enough free
disk space is available to accomodate the compaction outputs, which is
estimated based on the input size. This means that users can continue to
write, and we rely on the WriteController to delay or stop writes if the
compaction debt becomes too high due to persistent low disk space
condition
2. Errors during write callback and flush are treated as hard errors,
i.e the database is put in read-only mode and goes back to read-write
only fater certain recovery actions are taken.
3. Both types of recovery rely on the SstFileManagerImpl to poll for
sufficient disk space. We assume that there is a 1-1 mapping between an
SFM and the underlying OS storage container. For cases where multiple
DBs are hosted on a single storage container, the user is expected to
allocate a single SFM instance and use the same one for all the DBs. If
no SFM is specified by the user, DBImpl::Open() will allocate one, but
this will be one per DB and each DB will recover independently. The
recovery implemented by SFM is as follows -
a) On the first occurance of an out of space error during compaction,
subsequent
compactions will be delayed until the disk free space check indicates
enough available space. The required space is computed as the sum of
input sizes.
b) The free space check requirement will be removed once the amount of
free space is greater than the size reserved by in progress
compactions when the first error occured
c) If the out of space error is a hard error, a background thread in
SFM will poll for sufficient headroom before triggering the recovery
of the database and putting it in write-only mode. The headroom is
calculated as the sum of the write_buffer_size of all the DB instances
associated with the SFM
4. EventListener callbacks will be called at the start and completion of
automatic recovery. Users can disable the auto recov ery in the start
callback, and later initiate it manually by calling DB::Resume()
Todo:
1. More extensive testing
2. Add disk full condition to db_stress (follow-on PR)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4164
Differential Revision: D9846378
Pulled By: anand1976
fbshipit-source-id: 80ea875dbd7f00205e19c82215ff6e37da10da4a
2018-09-15 22:36:19 +02:00
|
|
|
}
|
2018-10-27 00:06:44 +02:00
|
|
|
ROCKS_LOG_INFO(immutable_db_options_.info_log,
|
|
|
|
"=====End of column families list=====");
|
Auto recovery from out of space errors (#4164)
Summary:
This commit implements automatic recovery from a Status::NoSpace() error
during background operations such as write callback, flush and
compaction. The broad design is as follows -
1. Compaction errors are treated as soft errors and don't put the
database in read-only mode. A compaction is delayed until enough free
disk space is available to accomodate the compaction outputs, which is
estimated based on the input size. This means that users can continue to
write, and we rely on the WriteController to delay or stop writes if the
compaction debt becomes too high due to persistent low disk space
condition
2. Errors during write callback and flush are treated as hard errors,
i.e the database is put in read-only mode and goes back to read-write
only fater certain recovery actions are taken.
3. Both types of recovery rely on the SstFileManagerImpl to poll for
sufficient disk space. We assume that there is a 1-1 mapping between an
SFM and the underlying OS storage container. For cases where multiple
DBs are hosted on a single storage container, the user is expected to
allocate a single SFM instance and use the same one for all the DBs. If
no SFM is specified by the user, DBImpl::Open() will allocate one, but
this will be one per DB and each DB will recover independently. The
recovery implemented by SFM is as follows -
a) On the first occurance of an out of space error during compaction,
subsequent
compactions will be delayed until the disk free space check indicates
enough available space. The required space is computed as the sum of
input sizes.
b) The free space check requirement will be removed once the amount of
free space is greater than the size reserved by in progress
compactions when the first error occured
c) If the out of space error is a hard error, a background thread in
SFM will poll for sufficient headroom before triggering the recovery
of the database and putting it in write-only mode. The headroom is
calculated as the sum of the write_buffer_size of all the DB instances
associated with the SFM
4. EventListener callbacks will be called at the start and completion of
automatic recovery. Users can disable the auto recov ery in the start
callback, and later initiate it manually by calling DB::Resume()
Todo:
1. More extensive testing
2. Add disk full condition to db_stress (follow-on PR)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4164
Differential Revision: D9846378
Pulled By: anand1976
fbshipit-source-id: 80ea875dbd7f00205e19c82215ff6e37da10da4a
2018-09-15 22:36:19 +02:00
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2019-04-17 08:29:32 +02:00
|
|
|
Status DBImpl::RunManualCompaction(
|
|
|
|
ColumnFamilyData* cfd, int input_level, int output_level,
|
|
|
|
const CompactRangeOptions& compact_range_options, const Slice* begin,
|
|
|
|
const Slice* end, bool exclusive, bool disallow_trivial_move,
|
2022-03-12 01:13:23 +01:00
|
|
|
uint64_t max_file_num_to_ignore, const std::string& trim_ts) {
|
2017-04-06 02:14:05 +02:00
|
|
|
assert(input_level == ColumnFamilyData::kCompactAllLevels ||
|
|
|
|
input_level >= 0);
|
|
|
|
|
|
|
|
InternalKey begin_storage, end_storage;
|
2022-02-16 02:59:31 +01:00
|
|
|
CompactionArg* ca = nullptr;
|
2017-04-06 02:14:05 +02:00
|
|
|
|
|
|
|
bool scheduled = false;
|
2022-03-02 22:43:00 +01:00
|
|
|
Env::Priority thread_pool_priority = Env::Priority::TOTAL;
|
2017-04-06 02:14:05 +02:00
|
|
|
bool manual_conflict = false;
|
2022-03-13 05:07:04 +01:00
|
|
|
|
|
|
|
auto manual = std::make_shared<ManualCompactionState>(
|
|
|
|
cfd, input_level, output_level, compact_range_options.target_path_id,
|
|
|
|
exclusive, disallow_trivial_move, compact_range_options.canceled);
|
2017-04-06 02:14:05 +02:00
|
|
|
// For universal compaction, we enforce every manual compaction to compact
|
|
|
|
// all files.
|
|
|
|
if (begin == nullptr ||
|
|
|
|
cfd->ioptions()->compaction_style == kCompactionStyleUniversal ||
|
|
|
|
cfd->ioptions()->compaction_style == kCompactionStyleFIFO) {
|
2022-03-13 05:07:04 +01:00
|
|
|
manual->begin = nullptr;
|
2017-04-06 02:14:05 +02:00
|
|
|
} else {
|
2017-09-13 02:16:44 +02:00
|
|
|
begin_storage.SetMinPossibleForUserKey(*begin);
|
2022-03-13 05:07:04 +01:00
|
|
|
manual->begin = &begin_storage;
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
if (end == nullptr ||
|
|
|
|
cfd->ioptions()->compaction_style == kCompactionStyleUniversal ||
|
|
|
|
cfd->ioptions()->compaction_style == kCompactionStyleFIFO) {
|
2022-03-13 05:07:04 +01:00
|
|
|
manual->end = nullptr;
|
2017-04-06 02:14:05 +02:00
|
|
|
} else {
|
2017-09-13 02:16:44 +02:00
|
|
|
end_storage.SetMaxPossibleForUserKey(*end);
|
2022-03-13 05:07:04 +01:00
|
|
|
manual->end = &end_storage;
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_SYNC_POINT("DBImpl::RunManualCompaction:0");
|
|
|
|
TEST_SYNC_POINT("DBImpl::RunManualCompaction:1");
|
|
|
|
InstrumentedMutexLock l(&mutex_);
|
|
|
|
|
Prevent corruption with parallel manual compactions and `change_level == true` (#9077)
Summary:
The bug can impact the following scenario. There must be two `CompactRange()`s, call them A and B. Compaction A must have `change_level=true`. Compactions A and B must run in parallel, and new data must be added while they run as well.
Now, on to the details of the race condition. Compaction A must reach the refitting phase while B's next step is to trivial move new data (i.e., data that has been inserted behind A) down to the same level that A's refit targets (`CompactRangeOptions::target_level`). B must be unregistered (i.e., has not yet called `AddManualCompaction()` for the current `RunManualCompaction()`) while A invokes `DisableManualCompaction()`s to prepare for refitting. In the old code, B could still proceed to register a manual compaction, while A had disabled manual compaction.
The next part of the race condition is B picks and schedules a trivial move while A has released the lock in refitting phase in order to persist the LSM state change (i.e., the log phase of `LogAndApply()`). That way, B does not see the refitted data when picking a trivial-move compaction. So it is susceptible to picking one that overlaps.
Finally, B executes the picked trivial-move compaction. Trivial-move compactions are special in that they never check whether manual compaction is disabled. So the picked compaction causing overlap ends up being applied, leading to LSM corruption if `force_consistency_checks=false`, or entering read-only mode with `Status::Corruption` if `force_consistency_checks=true` (the default).
The fix is just to prevent B from registering itself in `RunManualCompaction()` while manual compactions are disabled, consequently preventing any trivial move or other compaction from being picked/scheduled.
Thanks to siying for finding the bug.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9077
Test Plan: The test does not go all the way in exposing the bug because it requires a compaction to be picked/scheduled while logging LSM state change for RefitLevel(). But the fix is to make such a compaction not picked/scheduled in the first place, so any repro of that scenario would end up hanging RefitLevel() logging. So instead I just verified no such compaction is registered in the scenario where `RefitLevel()` disables manual compactions.
Reviewed By: siying
Differential Revision: D31921908
Pulled By: ajkr
fbshipit-source-id: 9bb5d0e847ad428211227f40830c685c209fbecb
2021-10-28 08:07:29 +02:00
|
|
|
if (manual_compaction_paused_ > 0) {
|
|
|
|
// Does not make sense to `AddManualCompaction()` in this scenario since
|
|
|
|
// `DisableManualCompaction()` just waited for the manual compaction queue
|
|
|
|
// to drain. So return immediately.
|
|
|
|
TEST_SYNC_POINT("DBImpl::RunManualCompaction:PausedAtStart");
|
2022-03-13 05:07:04 +01:00
|
|
|
manual->status =
|
Prevent corruption with parallel manual compactions and `change_level == true` (#9077)
Summary:
The bug can impact the following scenario. There must be two `CompactRange()`s, call them A and B. Compaction A must have `change_level=true`. Compactions A and B must run in parallel, and new data must be added while they run as well.
Now, on to the details of the race condition. Compaction A must reach the refitting phase while B's next step is to trivial move new data (i.e., data that has been inserted behind A) down to the same level that A's refit targets (`CompactRangeOptions::target_level`). B must be unregistered (i.e., has not yet called `AddManualCompaction()` for the current `RunManualCompaction()`) while A invokes `DisableManualCompaction()`s to prepare for refitting. In the old code, B could still proceed to register a manual compaction, while A had disabled manual compaction.
The next part of the race condition is B picks and schedules a trivial move while A has released the lock in refitting phase in order to persist the LSM state change (i.e., the log phase of `LogAndApply()`). That way, B does not see the refitted data when picking a trivial-move compaction. So it is susceptible to picking one that overlaps.
Finally, B executes the picked trivial-move compaction. Trivial-move compactions are special in that they never check whether manual compaction is disabled. So the picked compaction causing overlap ends up being applied, leading to LSM corruption if `force_consistency_checks=false`, or entering read-only mode with `Status::Corruption` if `force_consistency_checks=true` (the default).
The fix is just to prevent B from registering itself in `RunManualCompaction()` while manual compactions are disabled, consequently preventing any trivial move or other compaction from being picked/scheduled.
Thanks to siying for finding the bug.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9077
Test Plan: The test does not go all the way in exposing the bug because it requires a compaction to be picked/scheduled while logging LSM state change for RefitLevel(). But the fix is to make such a compaction not picked/scheduled in the first place, so any repro of that scenario would end up hanging RefitLevel() logging. So instead I just verified no such compaction is registered in the scenario where `RefitLevel()` disables manual compactions.
Reviewed By: siying
Differential Revision: D31921908
Pulled By: ajkr
fbshipit-source-id: 9bb5d0e847ad428211227f40830c685c209fbecb
2021-10-28 08:07:29 +02:00
|
|
|
Status::Incomplete(Status::SubCode::kManualCompactionPaused);
|
2022-03-13 05:07:04 +01:00
|
|
|
manual->done = true;
|
|
|
|
return manual->status;
|
Prevent corruption with parallel manual compactions and `change_level == true` (#9077)
Summary:
The bug can impact the following scenario. There must be two `CompactRange()`s, call them A and B. Compaction A must have `change_level=true`. Compactions A and B must run in parallel, and new data must be added while they run as well.
Now, on to the details of the race condition. Compaction A must reach the refitting phase while B's next step is to trivial move new data (i.e., data that has been inserted behind A) down to the same level that A's refit targets (`CompactRangeOptions::target_level`). B must be unregistered (i.e., has not yet called `AddManualCompaction()` for the current `RunManualCompaction()`) while A invokes `DisableManualCompaction()`s to prepare for refitting. In the old code, B could still proceed to register a manual compaction, while A had disabled manual compaction.
The next part of the race condition is B picks and schedules a trivial move while A has released the lock in refitting phase in order to persist the LSM state change (i.e., the log phase of `LogAndApply()`). That way, B does not see the refitted data when picking a trivial-move compaction. So it is susceptible to picking one that overlaps.
Finally, B executes the picked trivial-move compaction. Trivial-move compactions are special in that they never check whether manual compaction is disabled. So the picked compaction causing overlap ends up being applied, leading to LSM corruption if `force_consistency_checks=false`, or entering read-only mode with `Status::Corruption` if `force_consistency_checks=true` (the default).
The fix is just to prevent B from registering itself in `RunManualCompaction()` while manual compactions are disabled, consequently preventing any trivial move or other compaction from being picked/scheduled.
Thanks to siying for finding the bug.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9077
Test Plan: The test does not go all the way in exposing the bug because it requires a compaction to be picked/scheduled while logging LSM state change for RefitLevel(). But the fix is to make such a compaction not picked/scheduled in the first place, so any repro of that scenario would end up hanging RefitLevel() logging. So instead I just verified no such compaction is registered in the scenario where `RefitLevel()` disables manual compactions.
Reviewed By: siying
Differential Revision: D31921908
Pulled By: ajkr
fbshipit-source-id: 9bb5d0e847ad428211227f40830c685c209fbecb
2021-10-28 08:07:29 +02:00
|
|
|
}
|
|
|
|
|
2017-04-06 02:14:05 +02:00
|
|
|
// When a manual compaction arrives, temporarily disable scheduling of
|
|
|
|
// non-manual compactions and wait until the number of scheduled compaction
|
2021-10-08 00:22:34 +02:00
|
|
|
// jobs drops to zero. This used to be needed to ensure that this manual
|
|
|
|
// compaction can compact any range of keys/files. Now it is optional
|
|
|
|
// (see `CompactRangeOptions::exclusive_manual_compaction`). The use case for
|
|
|
|
// `exclusive_manual_compaction=true` (the default) is unclear beyond not
|
|
|
|
// trusting the new code.
|
2017-04-06 02:14:05 +02:00
|
|
|
//
|
|
|
|
// HasPendingManualCompaction() is true when at least one thread is inside
|
|
|
|
// RunManualCompaction(), i.e. during that time no other compaction will
|
|
|
|
// get scheduled (see MaybeScheduleFlushOrCompaction).
|
|
|
|
//
|
|
|
|
// Note that the following loop doesn't stop more that one thread calling
|
|
|
|
// RunManualCompaction() from getting to the second while loop below.
|
|
|
|
// However, only one of them will actually schedule compaction, while
|
|
|
|
// others will wait on a condition variable until it completes.
|
|
|
|
|
2022-03-13 05:07:04 +01:00
|
|
|
AddManualCompaction(manual.get());
|
2017-04-06 02:14:05 +02:00
|
|
|
TEST_SYNC_POINT_CALLBACK("DBImpl::RunManualCompaction:NotScheduled", &mutex_);
|
|
|
|
if (exclusive) {
|
2021-10-08 00:22:34 +02:00
|
|
|
// Limitation: there's no way to wake up the below loop when user sets
|
|
|
|
// `*manual.canceled`. So `CompactRangeOptions::exclusive_manual_compaction`
|
|
|
|
// and `CompactRangeOptions::canceled` might not work well together.
|
2017-08-04 00:36:28 +02:00
|
|
|
while (bg_bottom_compaction_scheduled_ > 0 ||
|
|
|
|
bg_compaction_scheduled_ > 0) {
|
2021-10-08 00:22:34 +02:00
|
|
|
if (manual_compaction_paused_ > 0 ||
|
2022-03-13 05:07:04 +01:00
|
|
|
(manual->canceled != nullptr && *manual->canceled == true)) {
|
2021-10-08 00:22:34 +02:00
|
|
|
// Pretend the error came from compaction so the below cleanup/error
|
|
|
|
// handling code can process it.
|
2022-03-13 05:07:04 +01:00
|
|
|
manual->done = true;
|
|
|
|
manual->status =
|
2021-10-08 00:22:34 +02:00
|
|
|
Status::Incomplete(Status::SubCode::kManualCompactionPaused);
|
|
|
|
break;
|
|
|
|
}
|
2017-05-03 00:01:07 +02:00
|
|
|
TEST_SYNC_POINT("DBImpl::RunManualCompaction:WaitScheduled");
|
2017-04-06 02:14:05 +02:00
|
|
|
ROCKS_LOG_INFO(
|
|
|
|
immutable_db_options_.info_log,
|
|
|
|
"[%s] Manual compaction waiting for all other scheduled background "
|
|
|
|
"compactions to finish",
|
|
|
|
cfd->GetName().c_str());
|
|
|
|
bg_cv_.Wait();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ROCKS_LOG_INFO(immutable_db_options_.info_log,
|
|
|
|
"[%s] Manual compaction starting", cfd->GetName().c_str());
|
|
|
|
|
Concurrent task limiter for compaction thread control (#4332)
Summary:
The PR is targeting to resolve the issue of:
https://github.com/facebook/rocksdb/issues/3972#issue-330771918
We have a rocksdb created with leveled-compaction with multiple column families (CFs), some of CFs are using HDD to store big and less frequently accessed data and others are using SSD.
When there are continuously write traffics going on to all CFs, the compaction thread pool is mostly occupied by those slow HDD compactions, which blocks fully utilize SSD bandwidth.
Since atomic write and transaction is needed across CFs, so splitting it to multiple rocksdb instance is not an option for us.
With the compaction thread control, we got 30%+ HDD write throughput gain, and also a lot smooth SSD write since less write stall happening.
ConcurrentTaskLimiter can be shared with multi-CFs across rocksdb instances, so the feature does not only work for multi-CFs scenarios, but also for multi-rocksdbs scenarios, who need disk IO resource control per tenant.
The usage is straight forward:
e.g.:
//
// Enable compaction thread limiter thru ColumnFamilyOptions
//
std::shared_ptr<ConcurrentTaskLimiter> ctl(NewConcurrentTaskLimiter("foo_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = ctl;
...
//
// Compaction thread limiter can be tuned or disabled on-the-fly
//
ctl->SetMaxOutstandingTask(12); // enlarge to 12 tasks
...
ctl->ResetMaxOutstandingTask(); // disable (bypass) thread limiter
ctl->SetMaxOutstandingTask(-1); // Same as above
...
ctl->SetMaxOutstandingTask(0); // full throttle (0 task)
//
// Sharing compaction thread limiter among CFs (to resolve multiple storage perf issue)
//
std::shared_ptr<ConcurrentTaskLimiter> ctl_ssd(NewConcurrentTaskLimiter("ssd_limiter", 8));
std::shared_ptr<ConcurrentTaskLimiter> ctl_hdd(NewConcurrentTaskLimiter("hdd_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt_ssd1(options);
ColumnFamilyOptions cf_opt_ssd2(options);
ColumnFamilyOptions cf_opt_hdd1(options);
ColumnFamilyOptions cf_opt_hdd2(options);
ColumnFamilyOptions cf_opt_hdd3(options);
// SSD CFs
cf_opt_ssd1.compaction_thread_limiter = ctl_ssd;
cf_opt_ssd2.compaction_thread_limiter = ctl_ssd;
// HDD CFs
cf_opt_hdd1.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd2.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd3.compaction_thread_limiter = ctl_hdd;
...
//
// The limiter is disabled by default (or set to nullptr explicitly)
//
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = nullptr;
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4332
Differential Revision: D13226590
Pulled By: siying
fbshipit-source-id: 14307aec55b8bd59c8223d04aa6db3c03d1b0c1d
2018-12-13 22:16:04 +01:00
|
|
|
LogBuffer log_buffer(InfoLogLevel::INFO_LEVEL,
|
|
|
|
immutable_db_options_.info_log.get());
|
2017-04-06 02:14:05 +02:00
|
|
|
// We don't check bg_error_ here, because if we get the error in compaction,
|
|
|
|
// the compaction will set manual.status to bg_error_ and set manual.done to
|
|
|
|
// true.
|
2022-03-13 05:07:04 +01:00
|
|
|
while (!manual->done) {
|
2017-04-06 02:14:05 +02:00
|
|
|
assert(HasPendingManualCompaction());
|
|
|
|
manual_conflict = false;
|
2017-10-19 19:48:47 +02:00
|
|
|
Compaction* compaction = nullptr;
|
2022-03-13 05:07:04 +01:00
|
|
|
if (ShouldntRunManualCompaction(manual.get()) ||
|
|
|
|
(manual->in_progress == true) || scheduled ||
|
|
|
|
(((manual->manual_end = &manual->tmp_storage1) != nullptr) &&
|
|
|
|
((compaction = manual->cfd->CompactRange(
|
|
|
|
*manual->cfd->GetLatestMutableCFOptions(), mutable_db_options_,
|
|
|
|
manual->input_level, manual->output_level, compact_range_options,
|
|
|
|
manual->begin, manual->end, &manual->manual_end,
|
|
|
|
&manual_conflict, max_file_num_to_ignore, trim_ts)) == nullptr &&
|
2018-04-13 02:55:14 +02:00
|
|
|
manual_conflict))) {
|
2017-04-06 02:14:05 +02:00
|
|
|
// exclusive manual compactions should not see a conflict during
|
|
|
|
// CompactRange
|
|
|
|
assert(!exclusive || !manual_conflict);
|
|
|
|
// Running either this or some other manual compaction
|
|
|
|
bg_cv_.Wait();
|
2022-03-13 05:07:04 +01:00
|
|
|
if (manual_compaction_paused_ > 0) {
|
|
|
|
manual->done = true;
|
|
|
|
manual->status =
|
2022-02-16 02:59:31 +01:00
|
|
|
Status::Incomplete(Status::SubCode::kManualCompactionPaused);
|
2022-03-13 05:07:04 +01:00
|
|
|
if (scheduled) {
|
|
|
|
assert(thread_pool_priority != Env::Priority::TOTAL);
|
|
|
|
auto unscheduled_task_num = env_->UnSchedule(
|
|
|
|
GetTaskTag(TaskType::kManualCompaction), thread_pool_priority);
|
|
|
|
if (unscheduled_task_num > 0) {
|
|
|
|
ROCKS_LOG_INFO(
|
|
|
|
immutable_db_options_.info_log,
|
|
|
|
"[%s] Unscheduled %d number of manual compactions from the "
|
|
|
|
"thread-pool",
|
|
|
|
cfd->GetName().c_str(), unscheduled_task_num);
|
|
|
|
}
|
|
|
|
}
|
2022-02-16 02:59:31 +01:00
|
|
|
break;
|
|
|
|
}
|
2022-03-13 05:07:04 +01:00
|
|
|
if (scheduled && manual->incomplete == true) {
|
|
|
|
assert(!manual->in_progress);
|
2017-04-06 02:14:05 +02:00
|
|
|
scheduled = false;
|
2022-03-13 05:07:04 +01:00
|
|
|
manual->incomplete = false;
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
} else if (!scheduled) {
|
2017-08-04 00:36:28 +02:00
|
|
|
if (compaction == nullptr) {
|
2022-03-13 05:07:04 +01:00
|
|
|
manual->done = true;
|
2017-04-06 02:14:05 +02:00
|
|
|
bg_cv_.SignalAll();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
ca = new CompactionArg;
|
|
|
|
ca->db = this;
|
2017-08-04 00:36:28 +02:00
|
|
|
ca->prepicked_compaction = new PrepickedCompaction;
|
2022-03-13 05:07:04 +01:00
|
|
|
ca->prepicked_compaction->manual_compaction_state = manual;
|
2017-08-04 00:36:28 +02:00
|
|
|
ca->prepicked_compaction->compaction = compaction;
|
2019-01-02 18:56:39 +01:00
|
|
|
if (!RequestCompactionToken(
|
|
|
|
cfd, true, &ca->prepicked_compaction->task_token, &log_buffer)) {
|
|
|
|
// Don't throttle manual compaction, only count outstanding tasks.
|
|
|
|
assert(false);
|
Concurrent task limiter for compaction thread control (#4332)
Summary:
The PR is targeting to resolve the issue of:
https://github.com/facebook/rocksdb/issues/3972#issue-330771918
We have a rocksdb created with leveled-compaction with multiple column families (CFs), some of CFs are using HDD to store big and less frequently accessed data and others are using SSD.
When there are continuously write traffics going on to all CFs, the compaction thread pool is mostly occupied by those slow HDD compactions, which blocks fully utilize SSD bandwidth.
Since atomic write and transaction is needed across CFs, so splitting it to multiple rocksdb instance is not an option for us.
With the compaction thread control, we got 30%+ HDD write throughput gain, and also a lot smooth SSD write since less write stall happening.
ConcurrentTaskLimiter can be shared with multi-CFs across rocksdb instances, so the feature does not only work for multi-CFs scenarios, but also for multi-rocksdbs scenarios, who need disk IO resource control per tenant.
The usage is straight forward:
e.g.:
//
// Enable compaction thread limiter thru ColumnFamilyOptions
//
std::shared_ptr<ConcurrentTaskLimiter> ctl(NewConcurrentTaskLimiter("foo_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = ctl;
...
//
// Compaction thread limiter can be tuned or disabled on-the-fly
//
ctl->SetMaxOutstandingTask(12); // enlarge to 12 tasks
...
ctl->ResetMaxOutstandingTask(); // disable (bypass) thread limiter
ctl->SetMaxOutstandingTask(-1); // Same as above
...
ctl->SetMaxOutstandingTask(0); // full throttle (0 task)
//
// Sharing compaction thread limiter among CFs (to resolve multiple storage perf issue)
//
std::shared_ptr<ConcurrentTaskLimiter> ctl_ssd(NewConcurrentTaskLimiter("ssd_limiter", 8));
std::shared_ptr<ConcurrentTaskLimiter> ctl_hdd(NewConcurrentTaskLimiter("hdd_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt_ssd1(options);
ColumnFamilyOptions cf_opt_ssd2(options);
ColumnFamilyOptions cf_opt_hdd1(options);
ColumnFamilyOptions cf_opt_hdd2(options);
ColumnFamilyOptions cf_opt_hdd3(options);
// SSD CFs
cf_opt_ssd1.compaction_thread_limiter = ctl_ssd;
cf_opt_ssd2.compaction_thread_limiter = ctl_ssd;
// HDD CFs
cf_opt_hdd1.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd2.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd3.compaction_thread_limiter = ctl_hdd;
...
//
// The limiter is disabled by default (or set to nullptr explicitly)
//
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = nullptr;
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4332
Differential Revision: D13226590
Pulled By: siying
fbshipit-source-id: 14307aec55b8bd59c8223d04aa6db3c03d1b0c1d
2018-12-13 22:16:04 +01:00
|
|
|
}
|
2022-03-13 05:07:04 +01:00
|
|
|
manual->incomplete = false;
|
2020-03-25 04:20:33 +01:00
|
|
|
if (compaction->bottommost_level() &&
|
|
|
|
env_->GetBackgroundThreads(Env::Priority::BOTTOM) > 0) {
|
2021-11-19 02:26:39 +01:00
|
|
|
bg_bottom_compaction_scheduled_++;
|
|
|
|
ca->compaction_pri_ = Env::Priority::BOTTOM;
|
|
|
|
env_->Schedule(&DBImpl::BGWorkBottomCompaction, ca,
|
2022-03-02 22:43:00 +01:00
|
|
|
Env::Priority::BOTTOM,
|
|
|
|
GetTaskTag(TaskType::kManualCompaction),
|
2021-11-19 02:26:39 +01:00
|
|
|
&DBImpl::UnscheduleCompactionCallback);
|
2022-03-02 22:43:00 +01:00
|
|
|
thread_pool_priority = Env::Priority::BOTTOM;
|
2021-11-19 02:26:39 +01:00
|
|
|
} else {
|
|
|
|
bg_compaction_scheduled_++;
|
|
|
|
ca->compaction_pri_ = Env::Priority::LOW;
|
2022-03-02 22:43:00 +01:00
|
|
|
env_->Schedule(&DBImpl::BGWorkCompaction, ca, Env::Priority::LOW,
|
|
|
|
GetTaskTag(TaskType::kManualCompaction),
|
2021-11-19 02:26:39 +01:00
|
|
|
&DBImpl::UnscheduleCompactionCallback);
|
2022-03-02 22:43:00 +01:00
|
|
|
thread_pool_priority = Env::Priority::LOW;
|
2020-03-25 04:20:33 +01:00
|
|
|
}
|
2017-04-06 02:14:05 +02:00
|
|
|
scheduled = true;
|
2022-02-16 02:59:31 +01:00
|
|
|
TEST_SYNC_POINT("DBImpl::RunManualCompaction:Scheduled");
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Concurrent task limiter for compaction thread control (#4332)
Summary:
The PR is targeting to resolve the issue of:
https://github.com/facebook/rocksdb/issues/3972#issue-330771918
We have a rocksdb created with leveled-compaction with multiple column families (CFs), some of CFs are using HDD to store big and less frequently accessed data and others are using SSD.
When there are continuously write traffics going on to all CFs, the compaction thread pool is mostly occupied by those slow HDD compactions, which blocks fully utilize SSD bandwidth.
Since atomic write and transaction is needed across CFs, so splitting it to multiple rocksdb instance is not an option for us.
With the compaction thread control, we got 30%+ HDD write throughput gain, and also a lot smooth SSD write since less write stall happening.
ConcurrentTaskLimiter can be shared with multi-CFs across rocksdb instances, so the feature does not only work for multi-CFs scenarios, but also for multi-rocksdbs scenarios, who need disk IO resource control per tenant.
The usage is straight forward:
e.g.:
//
// Enable compaction thread limiter thru ColumnFamilyOptions
//
std::shared_ptr<ConcurrentTaskLimiter> ctl(NewConcurrentTaskLimiter("foo_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = ctl;
...
//
// Compaction thread limiter can be tuned or disabled on-the-fly
//
ctl->SetMaxOutstandingTask(12); // enlarge to 12 tasks
...
ctl->ResetMaxOutstandingTask(); // disable (bypass) thread limiter
ctl->SetMaxOutstandingTask(-1); // Same as above
...
ctl->SetMaxOutstandingTask(0); // full throttle (0 task)
//
// Sharing compaction thread limiter among CFs (to resolve multiple storage perf issue)
//
std::shared_ptr<ConcurrentTaskLimiter> ctl_ssd(NewConcurrentTaskLimiter("ssd_limiter", 8));
std::shared_ptr<ConcurrentTaskLimiter> ctl_hdd(NewConcurrentTaskLimiter("hdd_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt_ssd1(options);
ColumnFamilyOptions cf_opt_ssd2(options);
ColumnFamilyOptions cf_opt_hdd1(options);
ColumnFamilyOptions cf_opt_hdd2(options);
ColumnFamilyOptions cf_opt_hdd3(options);
// SSD CFs
cf_opt_ssd1.compaction_thread_limiter = ctl_ssd;
cf_opt_ssd2.compaction_thread_limiter = ctl_ssd;
// HDD CFs
cf_opt_hdd1.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd2.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd3.compaction_thread_limiter = ctl_hdd;
...
//
// The limiter is disabled by default (or set to nullptr explicitly)
//
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = nullptr;
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4332
Differential Revision: D13226590
Pulled By: siying
fbshipit-source-id: 14307aec55b8bd59c8223d04aa6db3c03d1b0c1d
2018-12-13 22:16:04 +01:00
|
|
|
log_buffer.FlushBufferToLog();
|
2022-03-13 05:07:04 +01:00
|
|
|
assert(!manual->in_progress);
|
2017-04-06 02:14:05 +02:00
|
|
|
assert(HasPendingManualCompaction());
|
2022-03-13 05:07:04 +01:00
|
|
|
RemoveManualCompaction(manual.get());
|
2022-03-02 22:43:00 +01:00
|
|
|
// if the manual job is unscheduled, try schedule other jobs in case there's
|
|
|
|
// any unscheduled compaction job which was blocked by exclusive manual
|
|
|
|
// compaction.
|
2022-03-13 05:07:04 +01:00
|
|
|
if (manual->status.IsIncomplete() &&
|
|
|
|
manual->status.subcode() == Status::SubCode::kManualCompactionPaused) {
|
2022-03-02 22:43:00 +01:00
|
|
|
MaybeScheduleFlushOrCompaction();
|
|
|
|
}
|
2017-04-06 02:14:05 +02:00
|
|
|
bg_cv_.SignalAll();
|
2022-03-13 05:07:04 +01:00
|
|
|
return manual->status;
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
|
2018-10-27 00:06:44 +02:00
|
|
|
void DBImpl::GenerateFlushRequest(const autovector<ColumnFamilyData*>& cfds,
|
|
|
|
FlushRequest* req) {
|
|
|
|
assert(req != nullptr);
|
2019-01-12 02:40:44 +01:00
|
|
|
req->reserve(cfds.size());
|
2018-10-27 00:06:44 +02:00
|
|
|
for (const auto cfd : cfds) {
|
|
|
|
if (nullptr == cfd) {
|
|
|
|
// cfd may be null, see DBImpl::ScheduleFlushes
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
uint64_t max_memtable_id = cfd->imm()->GetLatestMemTableID();
|
|
|
|
req->emplace_back(cfd, max_memtable_id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-06 02:14:05 +02:00
|
|
|
Status DBImpl::FlushMemTable(ColumnFamilyData* cfd,
|
|
|
|
const FlushOptions& flush_options,
|
2018-02-09 21:09:55 +01:00
|
|
|
FlushReason flush_reason, bool writes_stopped) {
|
2020-12-02 18:29:50 +01:00
|
|
|
// This method should not be called if atomic_flush is true.
|
|
|
|
assert(!immutable_db_options_.atomic_flush);
|
2017-04-06 02:14:05 +02:00
|
|
|
Status s;
|
2018-08-29 20:58:13 +02:00
|
|
|
if (!flush_options.allow_write_stall) {
|
|
|
|
bool flush_needed = true;
|
|
|
|
s = WaitUntilFlushWouldNotStallWrites(cfd, &flush_needed);
|
|
|
|
TEST_SYNC_POINT("DBImpl::FlushMemTable:StallWaitDone");
|
|
|
|
if (!s.ok() || !flush_needed) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
}
|
2020-09-18 05:22:35 +02:00
|
|
|
|
2020-12-02 18:29:50 +01:00
|
|
|
autovector<FlushRequest> flush_reqs;
|
|
|
|
autovector<uint64_t> memtable_ids_to_wait;
|
2017-04-06 02:14:05 +02:00
|
|
|
{
|
|
|
|
WriteContext context;
|
|
|
|
InstrumentedMutexLock guard_lock(&mutex_);
|
|
|
|
|
|
|
|
WriteThread::Writer w;
|
2019-11-15 22:59:03 +01:00
|
|
|
WriteThread::Writer nonmem_w;
|
2017-04-06 02:14:05 +02:00
|
|
|
if (!writes_stopped) {
|
|
|
|
write_thread_.EnterUnbatched(&w, &mutex_);
|
2019-11-15 22:59:03 +01:00
|
|
|
if (two_write_queues_) {
|
|
|
|
nonmem_write_thread_.EnterUnbatched(&nonmem_w, &mutex_);
|
|
|
|
}
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
2019-12-12 23:05:48 +01:00
|
|
|
WaitForPendingWrites();
|
2017-04-06 02:14:05 +02:00
|
|
|
|
Fix assert(cfd->imm()->NumNotFlushed() > 0) in FlushMemtable (#7744)
Summary:
In current code base, in FlushMemtable, when `(Flush_reason == FlushReason::kErrorRecoveryRetryFlush && (!cfd->mem()->IsEmpty() || !cached_recoverable_state_empty_.load()))`, we assert that cfd->imm()->NumNotFlushed() > 0. However, there are some corner cases that can fail this assert: 1) if there are multiple CFs, some CF has immutable memtable, some CFs don't. In ResumeImpl, all CFs will call FlushMemtable, which will hit the assert. 2) Regular flush is scheduled and running, the resume thread is waiting. New KVs are inserted and SchedulePendingFlush is called. Regular flush will continue call MaybeScheduleFlushAndCompaction until all the immutable memtables are flushed. When regular flush ends and auto resume thread starts to schedule new flushes, cfd->imm()->NumNotFlushed() can be 0.
Remove the assert and added the comments.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7744
Test Plan: make check and pass the stress test
Reviewed By: riversand963
Differential Revision: D25340573
Pulled By: zhichao-cao
fbshipit-source-id: eac357bdace660247c197f01a9ff6857e3c97672
2020-12-05 05:30:23 +01:00
|
|
|
if (flush_reason != FlushReason::kErrorRecoveryRetryFlush &&
|
|
|
|
(!cfd->mem()->IsEmpty() || !cached_recoverable_state_empty_.load())) {
|
|
|
|
// Note that, when flush reason is kErrorRecoveryRetryFlush, during the
|
|
|
|
// auto retry resume, we want to avoid creating new small memtables.
|
|
|
|
// Therefore, SwitchMemtable will not be called. Also, since ResumeImpl
|
|
|
|
// will iterate through all the CFs and call FlushMemtable during auto
|
|
|
|
// retry resume, it is possible that in some CFs,
|
|
|
|
// cfd->imm()->NumNotFlushed() = 0. In this case, so no flush request will
|
|
|
|
// be created and scheduled, status::OK() will be returned.
|
|
|
|
s = SwitchMemtable(cfd, &context);
|
2018-12-19 01:43:12 +01:00
|
|
|
}
|
2020-12-02 18:29:50 +01:00
|
|
|
const uint64_t flush_memtable_id = port::kMaxUint64;
|
2018-12-19 01:43:12 +01:00
|
|
|
if (s.ok()) {
|
|
|
|
if (cfd->imm()->NumNotFlushed() != 0 || !cfd->mem()->IsEmpty() ||
|
|
|
|
!cached_recoverable_state_empty_.load()) {
|
2020-12-02 18:29:50 +01:00
|
|
|
FlushRequest req{{cfd, flush_memtable_id}};
|
|
|
|
flush_reqs.emplace_back(std::move(req));
|
|
|
|
memtable_ids_to_wait.emplace_back(cfd->imm()->GetLatestMemTableID());
|
2018-12-19 01:43:12 +01:00
|
|
|
}
|
2020-09-18 05:22:35 +02:00
|
|
|
if (immutable_db_options_.persist_stats_to_disk &&
|
|
|
|
flush_reason != FlushReason::kErrorRecoveryRetryFlush) {
|
2019-07-01 20:53:25 +02:00
|
|
|
ColumnFamilyData* cfd_stats =
|
|
|
|
versions_->GetColumnFamilySet()->GetColumnFamily(
|
|
|
|
kPersistentStatsColumnFamilyName);
|
|
|
|
if (cfd_stats != nullptr && cfd_stats != cfd &&
|
|
|
|
!cfd_stats->mem()->IsEmpty()) {
|
|
|
|
// only force flush stats CF when it will be the only CF lagging
|
|
|
|
// behind after the current flush
|
|
|
|
bool stats_cf_flush_needed = true;
|
|
|
|
for (auto* loop_cfd : *versions_->GetColumnFamilySet()) {
|
|
|
|
if (loop_cfd == cfd_stats || loop_cfd == cfd) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (loop_cfd->GetLogNumber() <= cfd_stats->GetLogNumber()) {
|
|
|
|
stats_cf_flush_needed = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (stats_cf_flush_needed) {
|
|
|
|
ROCKS_LOG_INFO(immutable_db_options_.info_log,
|
|
|
|
"Force flushing stats CF with manual flush of %s "
|
2019-09-20 21:00:55 +02:00
|
|
|
"to avoid holding old logs",
|
|
|
|
cfd->GetName().c_str());
|
2019-07-01 20:53:25 +02:00
|
|
|
s = SwitchMemtable(cfd_stats, &context);
|
2020-12-02 18:29:50 +01:00
|
|
|
FlushRequest req{{cfd_stats, flush_memtable_id}};
|
|
|
|
flush_reqs.emplace_back(std::move(req));
|
|
|
|
memtable_ids_to_wait.emplace_back(
|
|
|
|
cfd->imm()->GetLatestMemTableID());
|
2019-07-01 20:53:25 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-08-24 22:17:29 +02:00
|
|
|
}
|
2020-12-02 18:29:50 +01:00
|
|
|
|
|
|
|
if (s.ok() && !flush_reqs.empty()) {
|
|
|
|
for (const auto& req : flush_reqs) {
|
|
|
|
assert(req.size() == 1);
|
|
|
|
ColumnFamilyData* loop_cfd = req[0].first;
|
2018-08-24 22:17:29 +02:00
|
|
|
loop_cfd->imm()->FlushRequested();
|
|
|
|
}
|
2019-07-01 23:04:10 +02:00
|
|
|
// If the caller wants to wait for this flush to complete, it indicates
|
|
|
|
// that the caller expects the ColumnFamilyData not to be free'ed by
|
|
|
|
// other threads which may drop the column family concurrently.
|
|
|
|
// Therefore, we increase the cfd's ref count.
|
|
|
|
if (flush_options.wait) {
|
2020-12-02 18:29:50 +01:00
|
|
|
for (const auto& req : flush_reqs) {
|
|
|
|
assert(req.size() == 1);
|
|
|
|
ColumnFamilyData* loop_cfd = req[0].first;
|
2019-07-01 23:04:10 +02:00
|
|
|
loop_cfd->Ref();
|
|
|
|
}
|
|
|
|
}
|
2020-12-02 18:29:50 +01:00
|
|
|
for (const auto& req : flush_reqs) {
|
|
|
|
SchedulePendingFlush(req, flush_reason);
|
|
|
|
}
|
2018-08-24 22:17:29 +02:00
|
|
|
MaybeScheduleFlushOrCompaction();
|
|
|
|
}
|
2017-04-06 02:14:05 +02:00
|
|
|
|
|
|
|
if (!writes_stopped) {
|
|
|
|
write_thread_.ExitUnbatched(&w);
|
2019-11-15 22:59:03 +01:00
|
|
|
if (two_write_queues_) {
|
|
|
|
nonmem_write_thread_.ExitUnbatched(&nonmem_w);
|
|
|
|
}
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
}
|
2019-07-01 23:04:10 +02:00
|
|
|
TEST_SYNC_POINT("DBImpl::FlushMemTable:AfterScheduleFlush");
|
|
|
|
TEST_SYNC_POINT("DBImpl::FlushMemTable:BeforeWaitForBgFlush");
|
2017-04-06 02:14:05 +02:00
|
|
|
if (s.ok() && flush_options.wait) {
|
2018-08-24 22:17:29 +02:00
|
|
|
autovector<ColumnFamilyData*> cfds;
|
|
|
|
autovector<const uint64_t*> flush_memtable_ids;
|
2020-12-02 18:29:50 +01:00
|
|
|
assert(flush_reqs.size() == memtable_ids_to_wait.size());
|
|
|
|
for (size_t i = 0; i < flush_reqs.size(); ++i) {
|
|
|
|
assert(flush_reqs[i].size() == 1);
|
|
|
|
cfds.push_back(flush_reqs[i][0].first);
|
|
|
|
flush_memtable_ids.push_back(&(memtable_ids_to_wait[i]));
|
2018-08-24 22:17:29 +02:00
|
|
|
}
|
2020-09-18 05:22:35 +02:00
|
|
|
s = WaitForFlushMemTables(
|
|
|
|
cfds, flush_memtable_ids,
|
|
|
|
(flush_reason == FlushReason::kErrorRecovery ||
|
|
|
|
flush_reason == FlushReason::kErrorRecoveryRetryFlush));
|
2019-12-13 04:02:51 +01:00
|
|
|
InstrumentedMutexLock lock_guard(&mutex_);
|
2019-07-01 23:04:10 +02:00
|
|
|
for (auto* tmp_cfd : cfds) {
|
2019-12-13 04:02:51 +01:00
|
|
|
tmp_cfd->UnrefAndTryDelete();
|
2019-07-01 23:04:10 +02:00
|
|
|
}
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
2019-12-12 23:05:48 +01:00
|
|
|
TEST_SYNC_POINT("DBImpl::FlushMemTable:FlushMemTableFinished");
|
2017-04-06 02:14:05 +02:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2019-12-12 23:05:48 +01:00
|
|
|
// Flush all elements in 'column_family_datas'
|
2018-10-27 00:06:44 +02:00
|
|
|
// and atomically record the result to the MANIFEST.
|
|
|
|
Status DBImpl::AtomicFlushMemTables(
|
|
|
|
const autovector<ColumnFamilyData*>& column_family_datas,
|
|
|
|
const FlushOptions& flush_options, FlushReason flush_reason,
|
|
|
|
bool writes_stopped) {
|
|
|
|
Status s;
|
|
|
|
if (!flush_options.allow_write_stall) {
|
|
|
|
int num_cfs_to_flush = 0;
|
|
|
|
for (auto cfd : column_family_datas) {
|
|
|
|
bool flush_needed = true;
|
|
|
|
s = WaitUntilFlushWouldNotStallWrites(cfd, &flush_needed);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
} else if (flush_needed) {
|
|
|
|
++num_cfs_to_flush;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (0 == num_cfs_to_flush) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
FlushRequest flush_req;
|
|
|
|
autovector<ColumnFamilyData*> cfds;
|
|
|
|
{
|
|
|
|
WriteContext context;
|
|
|
|
InstrumentedMutexLock guard_lock(&mutex_);
|
|
|
|
|
|
|
|
WriteThread::Writer w;
|
2019-11-15 22:59:03 +01:00
|
|
|
WriteThread::Writer nonmem_w;
|
2018-10-27 00:06:44 +02:00
|
|
|
if (!writes_stopped) {
|
|
|
|
write_thread_.EnterUnbatched(&w, &mutex_);
|
2019-11-15 22:59:03 +01:00
|
|
|
if (two_write_queues_) {
|
|
|
|
nonmem_write_thread_.EnterUnbatched(&nonmem_w, &mutex_);
|
|
|
|
}
|
2018-10-27 00:06:44 +02:00
|
|
|
}
|
2019-12-12 23:05:48 +01:00
|
|
|
WaitForPendingWrites();
|
2018-10-27 00:06:44 +02:00
|
|
|
|
|
|
|
for (auto cfd : column_family_datas) {
|
|
|
|
if (cfd->IsDropped()) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (cfd->imm()->NumNotFlushed() != 0 || !cfd->mem()->IsEmpty() ||
|
|
|
|
!cached_recoverable_state_empty_.load()) {
|
|
|
|
cfds.emplace_back(cfd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (auto cfd : cfds) {
|
2020-09-18 05:22:35 +02:00
|
|
|
if ((cfd->mem()->IsEmpty() && cached_recoverable_state_empty_.load()) ||
|
|
|
|
flush_reason == FlushReason::kErrorRecoveryRetryFlush) {
|
2018-12-19 01:43:12 +01:00
|
|
|
continue;
|
|
|
|
}
|
2018-10-27 00:06:44 +02:00
|
|
|
cfd->Ref();
|
|
|
|
s = SwitchMemtable(cfd, &context);
|
2019-12-13 04:02:51 +01:00
|
|
|
cfd->UnrefAndTryDelete();
|
2018-10-27 00:06:44 +02:00
|
|
|
if (!s.ok()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (s.ok()) {
|
|
|
|
AssignAtomicFlushSeq(cfds);
|
|
|
|
for (auto cfd : cfds) {
|
|
|
|
cfd->imm()->FlushRequested();
|
|
|
|
}
|
2019-07-01 23:04:10 +02:00
|
|
|
// If the caller wants to wait for this flush to complete, it indicates
|
|
|
|
// that the caller expects the ColumnFamilyData not to be free'ed by
|
|
|
|
// other threads which may drop the column family concurrently.
|
|
|
|
// Therefore, we increase the cfd's ref count.
|
|
|
|
if (flush_options.wait) {
|
|
|
|
for (auto cfd : cfds) {
|
|
|
|
cfd->Ref();
|
|
|
|
}
|
|
|
|
}
|
2018-10-27 00:06:44 +02:00
|
|
|
GenerateFlushRequest(cfds, &flush_req);
|
|
|
|
SchedulePendingFlush(flush_req, flush_reason);
|
|
|
|
MaybeScheduleFlushOrCompaction();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!writes_stopped) {
|
|
|
|
write_thread_.ExitUnbatched(&w);
|
2019-11-15 22:59:03 +01:00
|
|
|
if (two_write_queues_) {
|
|
|
|
nonmem_write_thread_.ExitUnbatched(&nonmem_w);
|
|
|
|
}
|
2018-10-27 00:06:44 +02:00
|
|
|
}
|
|
|
|
}
|
2018-12-14 00:10:16 +01:00
|
|
|
TEST_SYNC_POINT("DBImpl::AtomicFlushMemTables:AfterScheduleFlush");
|
2019-07-01 23:04:10 +02:00
|
|
|
TEST_SYNC_POINT("DBImpl::AtomicFlushMemTables:BeforeWaitForBgFlush");
|
2018-10-27 00:06:44 +02:00
|
|
|
if (s.ok() && flush_options.wait) {
|
|
|
|
autovector<const uint64_t*> flush_memtable_ids;
|
|
|
|
for (auto& iter : flush_req) {
|
|
|
|
flush_memtable_ids.push_back(&(iter.second));
|
|
|
|
}
|
2020-09-18 05:22:35 +02:00
|
|
|
s = WaitForFlushMemTables(
|
|
|
|
cfds, flush_memtable_ids,
|
|
|
|
(flush_reason == FlushReason::kErrorRecovery ||
|
|
|
|
flush_reason == FlushReason::kErrorRecoveryRetryFlush));
|
2019-12-13 04:02:51 +01:00
|
|
|
InstrumentedMutexLock lock_guard(&mutex_);
|
2019-07-01 23:04:10 +02:00
|
|
|
for (auto* cfd : cfds) {
|
2019-12-13 04:02:51 +01:00
|
|
|
cfd->UnrefAndTryDelete();
|
2019-07-01 23:04:10 +02:00
|
|
|
}
|
2018-10-27 00:06:44 +02:00
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2018-08-29 20:58:13 +02:00
|
|
|
// Calling FlushMemTable(), whether from DB::Flush() or from Backup Engine, can
|
|
|
|
// cause write stall, for example if one memtable is being flushed already.
|
|
|
|
// This method tries to avoid write stall (similar to CompactRange() behavior)
|
|
|
|
// it emulates how the SuperVersion / LSM would change if flush happens, checks
|
|
|
|
// it against various constrains and delays flush if it'd cause write stall.
|
|
|
|
// Called should check status and flush_needed to see if flush already happened.
|
|
|
|
Status DBImpl::WaitUntilFlushWouldNotStallWrites(ColumnFamilyData* cfd,
|
2019-03-28 00:13:08 +01:00
|
|
|
bool* flush_needed) {
|
2018-08-29 20:58:13 +02:00
|
|
|
{
|
|
|
|
*flush_needed = true;
|
|
|
|
InstrumentedMutexLock l(&mutex_);
|
|
|
|
uint64_t orig_active_memtable_id = cfd->mem()->GetID();
|
|
|
|
WriteStallCondition write_stall_condition = WriteStallCondition::kNormal;
|
|
|
|
do {
|
|
|
|
if (write_stall_condition != WriteStallCondition::kNormal) {
|
2018-11-01 23:23:20 +01:00
|
|
|
// Same error handling as user writes: Don't wait if there's a
|
|
|
|
// background error, even if it's a soft error. We might wait here
|
|
|
|
// indefinitely as the pending flushes/compactions may never finish
|
|
|
|
// successfully, resulting in the stall condition lasting indefinitely
|
|
|
|
if (error_handler_.IsBGWorkStopped()) {
|
|
|
|
return error_handler_.GetBGError();
|
|
|
|
}
|
|
|
|
|
2018-08-29 20:58:13 +02:00
|
|
|
TEST_SYNC_POINT("DBImpl::WaitUntilFlushWouldNotStallWrites:StallWait");
|
|
|
|
ROCKS_LOG_INFO(immutable_db_options_.info_log,
|
|
|
|
"[%s] WaitUntilFlushWouldNotStallWrites"
|
|
|
|
" waiting on stall conditions to clear",
|
|
|
|
cfd->GetName().c_str());
|
|
|
|
bg_cv_.Wait();
|
|
|
|
}
|
2019-05-20 19:37:37 +02:00
|
|
|
if (cfd->IsDropped()) {
|
|
|
|
return Status::ColumnFamilyDropped();
|
|
|
|
}
|
|
|
|
if (shutting_down_.load(std::memory_order_acquire)) {
|
2018-08-29 20:58:13 +02:00
|
|
|
return Status::ShutdownInProgress();
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t earliest_memtable_id =
|
|
|
|
std::min(cfd->mem()->GetID(), cfd->imm()->GetEarliestMemTableID());
|
|
|
|
if (earliest_memtable_id > orig_active_memtable_id) {
|
|
|
|
// We waited so long that the memtable we were originally waiting on was
|
|
|
|
// flushed.
|
|
|
|
*flush_needed = false;
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto& mutable_cf_options = *cfd->GetLatestMutableCFOptions();
|
|
|
|
const auto* vstorage = cfd->current()->storage_info();
|
|
|
|
|
|
|
|
// Skip stalling check if we're below auto-flush and auto-compaction
|
|
|
|
// triggers. If it stalled in these conditions, that'd mean the stall
|
|
|
|
// triggers are so low that stalling is needed for any background work. In
|
|
|
|
// that case we shouldn't wait since background work won't be scheduled.
|
|
|
|
if (cfd->imm()->NumNotFlushed() <
|
|
|
|
cfd->ioptions()->min_write_buffer_number_to_merge &&
|
|
|
|
vstorage->l0_delay_trigger_count() <
|
|
|
|
mutable_cf_options.level0_file_num_compaction_trigger) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// check whether one extra immutable memtable or an extra L0 file would
|
|
|
|
// cause write stalling mode to be entered. It could still enter stall
|
|
|
|
// mode due to pending compaction bytes, but that's less common
|
2021-03-09 11:19:28 +01:00
|
|
|
write_stall_condition = ColumnFamilyData::GetWriteStallConditionAndCause(
|
|
|
|
cfd->imm()->NumNotFlushed() + 1,
|
|
|
|
vstorage->l0_delay_trigger_count() + 1,
|
|
|
|
vstorage->estimated_compaction_needed_bytes(),
|
|
|
|
mutable_cf_options, *cfd->ioptions())
|
|
|
|
.first;
|
2018-08-29 20:58:13 +02:00
|
|
|
} while (write_stall_condition != WriteStallCondition::kNormal);
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2018-08-24 22:17:29 +02:00
|
|
|
// Wait for memtables to be flushed for multiple column families.
|
|
|
|
// let N = cfds.size()
|
|
|
|
// for i in [0, N),
|
|
|
|
// 1) if flush_memtable_ids[i] is not null, then the memtables with lower IDs
|
|
|
|
// have to be flushed for THIS column family;
|
|
|
|
// 2) if flush_memtable_ids[i] is null, then all memtables in THIS column
|
|
|
|
// family have to be flushed.
|
|
|
|
// Finish waiting when ALL column families finish flushing memtables.
|
2018-10-27 00:06:44 +02:00
|
|
|
// resuming_from_bg_err indicates whether the caller is trying to resume from
|
|
|
|
// background error or in normal processing.
|
2018-08-24 22:17:29 +02:00
|
|
|
Status DBImpl::WaitForFlushMemTables(
|
|
|
|
const autovector<ColumnFamilyData*>& cfds,
|
2018-10-27 00:06:44 +02:00
|
|
|
const autovector<const uint64_t*>& flush_memtable_ids,
|
|
|
|
bool resuming_from_bg_err) {
|
2018-08-24 22:17:29 +02:00
|
|
|
int num = static_cast<int>(cfds.size());
|
2017-04-06 02:14:05 +02:00
|
|
|
// Wait until the compaction completes
|
|
|
|
InstrumentedMutexLock l(&mutex_);
|
2022-01-12 22:20:46 +01:00
|
|
|
Status s;
|
2018-10-27 00:06:44 +02:00
|
|
|
// If the caller is trying to resume from bg error, then
|
|
|
|
// error_handler_.IsDBStopped() is true.
|
|
|
|
while (resuming_from_bg_err || !error_handler_.IsDBStopped()) {
|
2017-04-06 02:14:05 +02:00
|
|
|
if (shutting_down_.load(std::memory_order_acquire)) {
|
2022-01-12 22:20:46 +01:00
|
|
|
s = Status::ShutdownInProgress();
|
|
|
|
return s;
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
2018-10-27 00:06:44 +02:00
|
|
|
// If an error has occurred during resumption, then no need to wait.
|
2022-01-12 22:20:46 +01:00
|
|
|
// But flush operation may fail because of this error, so need to
|
|
|
|
// return the status.
|
2018-10-27 00:06:44 +02:00
|
|
|
if (!error_handler_.GetRecoveryError().ok()) {
|
2022-01-12 22:20:46 +01:00
|
|
|
s = error_handler_.GetRecoveryError();
|
2018-10-27 00:06:44 +02:00
|
|
|
break;
|
|
|
|
}
|
2020-09-18 05:22:35 +02:00
|
|
|
// If BGWorkStopped, which indicate that there is a BG error and
|
|
|
|
// 1) soft error but requires no BG work, 2) no in auto_recovery_
|
|
|
|
if (!resuming_from_bg_err && error_handler_.IsBGWorkStopped() &&
|
|
|
|
error_handler_.GetBGError().severity() < Status::Severity::kHardError) {
|
2022-01-12 22:20:46 +01:00
|
|
|
s = error_handler_.GetBGError();
|
|
|
|
return s;
|
2020-09-18 05:22:35 +02:00
|
|
|
}
|
|
|
|
|
2018-08-24 22:17:29 +02:00
|
|
|
// Number of column families that have been dropped.
|
|
|
|
int num_dropped = 0;
|
|
|
|
// Number of column families that have finished flush.
|
|
|
|
int num_finished = 0;
|
|
|
|
for (int i = 0; i < num; ++i) {
|
|
|
|
if (cfds[i]->IsDropped()) {
|
|
|
|
++num_dropped;
|
|
|
|
} else if (cfds[i]->imm()->NumNotFlushed() == 0 ||
|
|
|
|
(flush_memtable_ids[i] != nullptr &&
|
|
|
|
cfds[i]->imm()->GetEarliestMemTableID() >
|
|
|
|
*flush_memtable_ids[i])) {
|
|
|
|
++num_finished;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (1 == num_dropped && 1 == num) {
|
2022-01-12 22:20:46 +01:00
|
|
|
s = Status::ColumnFamilyDropped();
|
|
|
|
return s;
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
2018-08-24 22:17:29 +02:00
|
|
|
// Column families involved in this flush request have either been dropped
|
|
|
|
// or finished flush. Then it's time to finish waiting.
|
|
|
|
if (num_dropped + num_finished == num) {
|
|
|
|
break;
|
|
|
|
}
|
2017-04-06 02:14:05 +02:00
|
|
|
bg_cv_.Wait();
|
|
|
|
}
|
2018-10-27 00:06:44 +02:00
|
|
|
// If not resuming from bg error, and an error has caused the DB to stop,
|
|
|
|
// then report the bg error to caller.
|
|
|
|
if (!resuming_from_bg_err && error_handler_.IsDBStopped()) {
|
2018-06-28 21:23:57 +02:00
|
|
|
s = error_handler_.GetBGError();
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status DBImpl::EnableAutoCompaction(
|
|
|
|
const std::vector<ColumnFamilyHandle*>& column_family_handles) {
|
|
|
|
Status s;
|
|
|
|
for (auto cf_ptr : column_family_handles) {
|
|
|
|
Status status =
|
|
|
|
this->SetOptions(cf_ptr, {{"disable_auto_compactions", "false"}});
|
|
|
|
if (!status.ok()) {
|
|
|
|
s = status;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2019-09-17 06:00:13 +02:00
|
|
|
void DBImpl::DisableManualCompaction() {
|
2020-08-14 20:28:12 +02:00
|
|
|
InstrumentedMutexLock l(&mutex_);
|
|
|
|
manual_compaction_paused_.fetch_add(1, std::memory_order_release);
|
2021-10-08 00:22:34 +02:00
|
|
|
|
|
|
|
// Wake up manual compactions waiting to start.
|
|
|
|
bg_cv_.SignalAll();
|
|
|
|
|
2020-08-14 20:28:12 +02:00
|
|
|
// Wait for any pending manual compactions to finish (typically through
|
|
|
|
// failing with `Status::Incomplete`) prior to returning. This way we are
|
|
|
|
// guaranteed no pending manual compaction will commit while manual
|
|
|
|
// compactions are "disabled".
|
|
|
|
while (HasPendingManualCompaction()) {
|
|
|
|
bg_cv_.Wait();
|
|
|
|
}
|
2019-09-17 06:00:13 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void DBImpl::EnableManualCompaction() {
|
2020-08-14 20:28:12 +02:00
|
|
|
InstrumentedMutexLock l(&mutex_);
|
|
|
|
assert(manual_compaction_paused_ > 0);
|
|
|
|
manual_compaction_paused_.fetch_sub(1, std::memory_order_release);
|
2019-09-17 06:00:13 +02:00
|
|
|
}
|
|
|
|
|
2017-04-06 02:14:05 +02:00
|
|
|
void DBImpl::MaybeScheduleFlushOrCompaction() {
|
|
|
|
mutex_.AssertHeld();
|
|
|
|
if (!opened_successfully_) {
|
|
|
|
// Compaction may introduce data race to DB open
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (bg_work_paused_ > 0) {
|
|
|
|
// we paused the background work
|
|
|
|
return;
|
Auto recovery from out of space errors (#4164)
Summary:
This commit implements automatic recovery from a Status::NoSpace() error
during background operations such as write callback, flush and
compaction. The broad design is as follows -
1. Compaction errors are treated as soft errors and don't put the
database in read-only mode. A compaction is delayed until enough free
disk space is available to accomodate the compaction outputs, which is
estimated based on the input size. This means that users can continue to
write, and we rely on the WriteController to delay or stop writes if the
compaction debt becomes too high due to persistent low disk space
condition
2. Errors during write callback and flush are treated as hard errors,
i.e the database is put in read-only mode and goes back to read-write
only fater certain recovery actions are taken.
3. Both types of recovery rely on the SstFileManagerImpl to poll for
sufficient disk space. We assume that there is a 1-1 mapping between an
SFM and the underlying OS storage container. For cases where multiple
DBs are hosted on a single storage container, the user is expected to
allocate a single SFM instance and use the same one for all the DBs. If
no SFM is specified by the user, DBImpl::Open() will allocate one, but
this will be one per DB and each DB will recover independently. The
recovery implemented by SFM is as follows -
a) On the first occurance of an out of space error during compaction,
subsequent
compactions will be delayed until the disk free space check indicates
enough available space. The required space is computed as the sum of
input sizes.
b) The free space check requirement will be removed once the amount of
free space is greater than the size reserved by in progress
compactions when the first error occured
c) If the out of space error is a hard error, a background thread in
SFM will poll for sufficient headroom before triggering the recovery
of the database and putting it in write-only mode. The headroom is
calculated as the sum of the write_buffer_size of all the DB instances
associated with the SFM
4. EventListener callbacks will be called at the start and completion of
automatic recovery. Users can disable the auto recov ery in the start
callback, and later initiate it manually by calling DB::Resume()
Todo:
1. More extensive testing
2. Add disk full condition to db_stress (follow-on PR)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4164
Differential Revision: D9846378
Pulled By: anand1976
fbshipit-source-id: 80ea875dbd7f00205e19c82215ff6e37da10da4a
2018-09-15 22:36:19 +02:00
|
|
|
} else if (error_handler_.IsBGWorkStopped() &&
|
2019-03-28 00:13:08 +01:00
|
|
|
!error_handler_.IsRecoveryInProgress()) {
|
Auto recovery from out of space errors (#4164)
Summary:
This commit implements automatic recovery from a Status::NoSpace() error
during background operations such as write callback, flush and
compaction. The broad design is as follows -
1. Compaction errors are treated as soft errors and don't put the
database in read-only mode. A compaction is delayed until enough free
disk space is available to accomodate the compaction outputs, which is
estimated based on the input size. This means that users can continue to
write, and we rely on the WriteController to delay or stop writes if the
compaction debt becomes too high due to persistent low disk space
condition
2. Errors during write callback and flush are treated as hard errors,
i.e the database is put in read-only mode and goes back to read-write
only fater certain recovery actions are taken.
3. Both types of recovery rely on the SstFileManagerImpl to poll for
sufficient disk space. We assume that there is a 1-1 mapping between an
SFM and the underlying OS storage container. For cases where multiple
DBs are hosted on a single storage container, the user is expected to
allocate a single SFM instance and use the same one for all the DBs. If
no SFM is specified by the user, DBImpl::Open() will allocate one, but
this will be one per DB and each DB will recover independently. The
recovery implemented by SFM is as follows -
a) On the first occurance of an out of space error during compaction,
subsequent
compactions will be delayed until the disk free space check indicates
enough available space. The required space is computed as the sum of
input sizes.
b) The free space check requirement will be removed once the amount of
free space is greater than the size reserved by in progress
compactions when the first error occured
c) If the out of space error is a hard error, a background thread in
SFM will poll for sufficient headroom before triggering the recovery
of the database and putting it in write-only mode. The headroom is
calculated as the sum of the write_buffer_size of all the DB instances
associated with the SFM
4. EventListener callbacks will be called at the start and completion of
automatic recovery. Users can disable the auto recov ery in the start
callback, and later initiate it manually by calling DB::Resume()
Todo:
1. More extensive testing
2. Add disk full condition to db_stress (follow-on PR)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4164
Differential Revision: D9846378
Pulled By: anand1976
fbshipit-source-id: 80ea875dbd7f00205e19c82215ff6e37da10da4a
2018-09-15 22:36:19 +02:00
|
|
|
// There has been a hard error and this call is not part of the recovery
|
|
|
|
// sequence. Bail out here so we don't get into an endless loop of
|
|
|
|
// scheduling BG work which will again call this function
|
|
|
|
return;
|
2017-04-06 02:14:05 +02:00
|
|
|
} else if (shutting_down_.load(std::memory_order_acquire)) {
|
|
|
|
// DB is being deleted; no more background compactions
|
|
|
|
return;
|
|
|
|
}
|
2017-05-24 20:25:38 +02:00
|
|
|
auto bg_job_limits = GetBGJobLimits();
|
2017-05-23 20:04:25 +02:00
|
|
|
bool is_flush_pool_empty =
|
2018-04-13 02:55:14 +02:00
|
|
|
env_->GetBackgroundThreads(Env::Priority::HIGH) == 0;
|
2017-05-23 20:04:25 +02:00
|
|
|
while (!is_flush_pool_empty && unscheduled_flushes_ > 0 &&
|
2017-05-24 20:25:38 +02:00
|
|
|
bg_flush_scheduled_ < bg_job_limits.max_flushes) {
|
2017-04-06 02:14:05 +02:00
|
|
|
bg_flush_scheduled_++;
|
2019-03-20 01:24:09 +01:00
|
|
|
FlushThreadArg* fta = new FlushThreadArg;
|
|
|
|
fta->db_ = this;
|
|
|
|
fta->thread_pri_ = Env::Priority::HIGH;
|
|
|
|
env_->Schedule(&DBImpl::BGWorkFlush, fta, Env::Priority::HIGH, this,
|
|
|
|
&DBImpl::UnscheduleFlushCallback);
|
2019-11-27 23:46:38 +01:00
|
|
|
--unscheduled_flushes_;
|
|
|
|
TEST_SYNC_POINT_CALLBACK(
|
|
|
|
"DBImpl::MaybeScheduleFlushOrCompaction:AfterSchedule:0",
|
|
|
|
&unscheduled_flushes_);
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
|
2017-05-23 20:04:25 +02:00
|
|
|
// special case -- if high-pri (flush) thread pool is empty, then schedule
|
|
|
|
// flushes in low-pri (compaction) thread pool.
|
|
|
|
if (is_flush_pool_empty) {
|
2017-04-06 02:14:05 +02:00
|
|
|
while (unscheduled_flushes_ > 0 &&
|
|
|
|
bg_flush_scheduled_ + bg_compaction_scheduled_ <
|
2017-05-24 20:25:38 +02:00
|
|
|
bg_job_limits.max_flushes) {
|
2017-04-06 02:14:05 +02:00
|
|
|
bg_flush_scheduled_++;
|
2019-03-20 01:24:09 +01:00
|
|
|
FlushThreadArg* fta = new FlushThreadArg;
|
|
|
|
fta->db_ = this;
|
|
|
|
fta->thread_pri_ = Env::Priority::LOW;
|
|
|
|
env_->Schedule(&DBImpl::BGWorkFlush, fta, Env::Priority::LOW, this,
|
|
|
|
&DBImpl::UnscheduleFlushCallback);
|
2019-11-27 23:46:38 +01:00
|
|
|
--unscheduled_flushes_;
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bg_compaction_paused_ > 0) {
|
|
|
|
// we paused the background compaction
|
|
|
|
return;
|
Auto recovery from out of space errors (#4164)
Summary:
This commit implements automatic recovery from a Status::NoSpace() error
during background operations such as write callback, flush and
compaction. The broad design is as follows -
1. Compaction errors are treated as soft errors and don't put the
database in read-only mode. A compaction is delayed until enough free
disk space is available to accomodate the compaction outputs, which is
estimated based on the input size. This means that users can continue to
write, and we rely on the WriteController to delay or stop writes if the
compaction debt becomes too high due to persistent low disk space
condition
2. Errors during write callback and flush are treated as hard errors,
i.e the database is put in read-only mode and goes back to read-write
only fater certain recovery actions are taken.
3. Both types of recovery rely on the SstFileManagerImpl to poll for
sufficient disk space. We assume that there is a 1-1 mapping between an
SFM and the underlying OS storage container. For cases where multiple
DBs are hosted on a single storage container, the user is expected to
allocate a single SFM instance and use the same one for all the DBs. If
no SFM is specified by the user, DBImpl::Open() will allocate one, but
this will be one per DB and each DB will recover independently. The
recovery implemented by SFM is as follows -
a) On the first occurance of an out of space error during compaction,
subsequent
compactions will be delayed until the disk free space check indicates
enough available space. The required space is computed as the sum of
input sizes.
b) The free space check requirement will be removed once the amount of
free space is greater than the size reserved by in progress
compactions when the first error occured
c) If the out of space error is a hard error, a background thread in
SFM will poll for sufficient headroom before triggering the recovery
of the database and putting it in write-only mode. The headroom is
calculated as the sum of the write_buffer_size of all the DB instances
associated with the SFM
4. EventListener callbacks will be called at the start and completion of
automatic recovery. Users can disable the auto recov ery in the start
callback, and later initiate it manually by calling DB::Resume()
Todo:
1. More extensive testing
2. Add disk full condition to db_stress (follow-on PR)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4164
Differential Revision: D9846378
Pulled By: anand1976
fbshipit-source-id: 80ea875dbd7f00205e19c82215ff6e37da10da4a
2018-09-15 22:36:19 +02:00
|
|
|
} else if (error_handler_.IsBGWorkStopped()) {
|
|
|
|
// Compaction is not part of the recovery sequence from a hard error. We
|
|
|
|
// might get here because recovery might do a flush and install a new
|
|
|
|
// super version, which will try to schedule pending compactions. Bail
|
|
|
|
// out here and let the higher level recovery handle compactions
|
|
|
|
return;
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (HasExclusiveManualCompaction()) {
|
|
|
|
// only manual compactions are allowed to run. don't schedule automatic
|
|
|
|
// compactions
|
2018-01-17 07:56:47 +01:00
|
|
|
TEST_SYNC_POINT("DBImpl::MaybeScheduleFlushOrCompaction:Conflict");
|
2017-04-06 02:14:05 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-11-19 02:26:39 +01:00
|
|
|
while (bg_compaction_scheduled_ + bg_bottom_compaction_scheduled_ <
|
|
|
|
bg_job_limits.max_compactions &&
|
2017-04-06 02:14:05 +02:00
|
|
|
unscheduled_compactions_ > 0) {
|
|
|
|
CompactionArg* ca = new CompactionArg;
|
|
|
|
ca->db = this;
|
Fix possible hang issue in ~DBImpl() when flush is scheduled in LOW pool (#8125)
Summary:
In DBImpl::CloseHelper, we wait for bg_compaction_scheduled_
and bg_flush_scheduled_ to drop to 0. Unschedule is called prior
to cancel any unscheduled flushes/compactions. It is assumed that
anything in the high priority is a flush, and anything in the low
priority pool is a compaction. This assumption, however, is broken when
the high-pri pool is full.
As a result, bg_compaction_scheduled_ can go < 0 and bg_flush_scheduled_
will remain > 0 and DB can be in hang state.
The fix is, we decrement the `bg_{flush,compaction,bottom_compaction}_scheduled_`
inside the `Unschedule{Flush,Compaction,BottomCompaction}Callback()`s. DB
`mutex_` will make the counts atomic in `Unschedule`.
Related discussion: https://github.com/facebook/rocksdb/issues/7928
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8125
Test Plan: Added new test case which hangs without the fix.
Reviewed By: jay-zhuang
Differential Revision: D27390043
Pulled By: ajkr
fbshipit-source-id: 78a367fba9a59ac5607ad24bd1c46dc16d5ec110
2021-03-31 03:34:11 +02:00
|
|
|
ca->compaction_pri_ = Env::Priority::LOW;
|
2017-08-04 00:36:28 +02:00
|
|
|
ca->prepicked_compaction = nullptr;
|
2017-04-06 02:14:05 +02:00
|
|
|
bg_compaction_scheduled_++;
|
|
|
|
unscheduled_compactions_--;
|
|
|
|
env_->Schedule(&DBImpl::BGWorkCompaction, ca, Env::Priority::LOW, this,
|
2019-03-20 01:24:09 +01:00
|
|
|
&DBImpl::UnscheduleCompactionCallback);
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-24 20:25:38 +02:00
|
|
|
DBImpl::BGJobLimits DBImpl::GetBGJobLimits() const {
|
2017-04-06 02:14:05 +02:00
|
|
|
mutex_.AssertHeld();
|
2020-04-21 01:17:25 +02:00
|
|
|
return GetBGJobLimits(mutable_db_options_.max_background_flushes,
|
2017-05-24 20:25:38 +02:00
|
|
|
mutable_db_options_.max_background_compactions,
|
|
|
|
mutable_db_options_.max_background_jobs,
|
|
|
|
write_controller_.NeedSpeedupCompaction());
|
|
|
|
}
|
|
|
|
|
|
|
|
DBImpl::BGJobLimits DBImpl::GetBGJobLimits(int max_background_flushes,
|
|
|
|
int max_background_compactions,
|
|
|
|
int max_background_jobs,
|
|
|
|
bool parallelize_compactions) {
|
|
|
|
BGJobLimits res;
|
|
|
|
if (max_background_flushes == -1 && max_background_compactions == -1) {
|
|
|
|
// for our first stab implementing max_background_jobs, simply allocate a
|
|
|
|
// quarter of the threads to flushes.
|
|
|
|
res.max_flushes = std::max(1, max_background_jobs / 4);
|
|
|
|
res.max_compactions = std::max(1, max_background_jobs - res.max_flushes);
|
2017-04-06 02:14:05 +02:00
|
|
|
} else {
|
2017-05-24 20:25:38 +02:00
|
|
|
// compatibility code in case users haven't migrated to max_background_jobs,
|
|
|
|
// which automatically computes flush/compaction limits
|
|
|
|
res.max_flushes = std::max(1, max_background_flushes);
|
|
|
|
res.max_compactions = std::max(1, max_background_compactions);
|
|
|
|
}
|
|
|
|
if (!parallelize_compactions) {
|
|
|
|
// throttle background compactions until we deem necessary
|
|
|
|
res.max_compactions = 1;
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
2017-05-24 20:25:38 +02:00
|
|
|
return res;
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void DBImpl::AddToCompactionQueue(ColumnFamilyData* cfd) {
|
2018-04-27 20:11:12 +02:00
|
|
|
assert(!cfd->queued_for_compaction());
|
2017-04-06 02:14:05 +02:00
|
|
|
cfd->Ref();
|
|
|
|
compaction_queue_.push_back(cfd);
|
2018-04-27 20:11:12 +02:00
|
|
|
cfd->set_queued_for_compaction(true);
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
ColumnFamilyData* DBImpl::PopFirstFromCompactionQueue() {
|
|
|
|
assert(!compaction_queue_.empty());
|
|
|
|
auto cfd = *compaction_queue_.begin();
|
|
|
|
compaction_queue_.pop_front();
|
2018-04-27 20:11:12 +02:00
|
|
|
assert(cfd->queued_for_compaction());
|
|
|
|
cfd->set_queued_for_compaction(false);
|
2017-04-06 02:14:05 +02:00
|
|
|
return cfd;
|
|
|
|
}
|
|
|
|
|
2018-08-24 22:17:29 +02:00
|
|
|
DBImpl::FlushRequest DBImpl::PopFirstFromFlushQueue() {
|
2017-04-06 02:14:05 +02:00
|
|
|
assert(!flush_queue_.empty());
|
2018-08-24 22:17:29 +02:00
|
|
|
FlushRequest flush_req = flush_queue_.front();
|
2017-04-06 02:14:05 +02:00
|
|
|
flush_queue_.pop_front();
|
2020-12-02 18:29:50 +01:00
|
|
|
if (!immutable_db_options_.atomic_flush) {
|
|
|
|
assert(flush_req.size() == 1);
|
|
|
|
}
|
|
|
|
for (const auto& elem : flush_req) {
|
|
|
|
if (!immutable_db_options_.atomic_flush) {
|
|
|
|
ColumnFamilyData* cfd = elem.first;
|
|
|
|
assert(cfd);
|
|
|
|
assert(cfd->queued_for_flush());
|
|
|
|
cfd->set_queued_for_flush(false);
|
|
|
|
}
|
|
|
|
}
|
2018-02-09 21:09:55 +01:00
|
|
|
// TODO: need to unset flush reason?
|
2018-08-24 22:17:29 +02:00
|
|
|
return flush_req;
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
|
Concurrent task limiter for compaction thread control (#4332)
Summary:
The PR is targeting to resolve the issue of:
https://github.com/facebook/rocksdb/issues/3972#issue-330771918
We have a rocksdb created with leveled-compaction with multiple column families (CFs), some of CFs are using HDD to store big and less frequently accessed data and others are using SSD.
When there are continuously write traffics going on to all CFs, the compaction thread pool is mostly occupied by those slow HDD compactions, which blocks fully utilize SSD bandwidth.
Since atomic write and transaction is needed across CFs, so splitting it to multiple rocksdb instance is not an option for us.
With the compaction thread control, we got 30%+ HDD write throughput gain, and also a lot smooth SSD write since less write stall happening.
ConcurrentTaskLimiter can be shared with multi-CFs across rocksdb instances, so the feature does not only work for multi-CFs scenarios, but also for multi-rocksdbs scenarios, who need disk IO resource control per tenant.
The usage is straight forward:
e.g.:
//
// Enable compaction thread limiter thru ColumnFamilyOptions
//
std::shared_ptr<ConcurrentTaskLimiter> ctl(NewConcurrentTaskLimiter("foo_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = ctl;
...
//
// Compaction thread limiter can be tuned or disabled on-the-fly
//
ctl->SetMaxOutstandingTask(12); // enlarge to 12 tasks
...
ctl->ResetMaxOutstandingTask(); // disable (bypass) thread limiter
ctl->SetMaxOutstandingTask(-1); // Same as above
...
ctl->SetMaxOutstandingTask(0); // full throttle (0 task)
//
// Sharing compaction thread limiter among CFs (to resolve multiple storage perf issue)
//
std::shared_ptr<ConcurrentTaskLimiter> ctl_ssd(NewConcurrentTaskLimiter("ssd_limiter", 8));
std::shared_ptr<ConcurrentTaskLimiter> ctl_hdd(NewConcurrentTaskLimiter("hdd_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt_ssd1(options);
ColumnFamilyOptions cf_opt_ssd2(options);
ColumnFamilyOptions cf_opt_hdd1(options);
ColumnFamilyOptions cf_opt_hdd2(options);
ColumnFamilyOptions cf_opt_hdd3(options);
// SSD CFs
cf_opt_ssd1.compaction_thread_limiter = ctl_ssd;
cf_opt_ssd2.compaction_thread_limiter = ctl_ssd;
// HDD CFs
cf_opt_hdd1.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd2.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd3.compaction_thread_limiter = ctl_hdd;
...
//
// The limiter is disabled by default (or set to nullptr explicitly)
//
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = nullptr;
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4332
Differential Revision: D13226590
Pulled By: siying
fbshipit-source-id: 14307aec55b8bd59c8223d04aa6db3c03d1b0c1d
2018-12-13 22:16:04 +01:00
|
|
|
ColumnFamilyData* DBImpl::PickCompactionFromQueue(
|
|
|
|
std::unique_ptr<TaskLimiterToken>* token, LogBuffer* log_buffer) {
|
|
|
|
assert(!compaction_queue_.empty());
|
|
|
|
assert(*token == nullptr);
|
|
|
|
autovector<ColumnFamilyData*> throttled_candidates;
|
|
|
|
ColumnFamilyData* cfd = nullptr;
|
|
|
|
while (!compaction_queue_.empty()) {
|
|
|
|
auto first_cfd = *compaction_queue_.begin();
|
|
|
|
compaction_queue_.pop_front();
|
|
|
|
assert(first_cfd->queued_for_compaction());
|
|
|
|
if (!RequestCompactionToken(first_cfd, false, token, log_buffer)) {
|
|
|
|
throttled_candidates.push_back(first_cfd);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
cfd = first_cfd;
|
|
|
|
cfd->set_queued_for_compaction(false);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
// Add throttled compaction candidates back to queue in the original order.
|
2019-01-02 18:56:39 +01:00
|
|
|
for (auto iter = throttled_candidates.rbegin();
|
|
|
|
iter != throttled_candidates.rend(); ++iter) {
|
Concurrent task limiter for compaction thread control (#4332)
Summary:
The PR is targeting to resolve the issue of:
https://github.com/facebook/rocksdb/issues/3972#issue-330771918
We have a rocksdb created with leveled-compaction with multiple column families (CFs), some of CFs are using HDD to store big and less frequently accessed data and others are using SSD.
When there are continuously write traffics going on to all CFs, the compaction thread pool is mostly occupied by those slow HDD compactions, which blocks fully utilize SSD bandwidth.
Since atomic write and transaction is needed across CFs, so splitting it to multiple rocksdb instance is not an option for us.
With the compaction thread control, we got 30%+ HDD write throughput gain, and also a lot smooth SSD write since less write stall happening.
ConcurrentTaskLimiter can be shared with multi-CFs across rocksdb instances, so the feature does not only work for multi-CFs scenarios, but also for multi-rocksdbs scenarios, who need disk IO resource control per tenant.
The usage is straight forward:
e.g.:
//
// Enable compaction thread limiter thru ColumnFamilyOptions
//
std::shared_ptr<ConcurrentTaskLimiter> ctl(NewConcurrentTaskLimiter("foo_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = ctl;
...
//
// Compaction thread limiter can be tuned or disabled on-the-fly
//
ctl->SetMaxOutstandingTask(12); // enlarge to 12 tasks
...
ctl->ResetMaxOutstandingTask(); // disable (bypass) thread limiter
ctl->SetMaxOutstandingTask(-1); // Same as above
...
ctl->SetMaxOutstandingTask(0); // full throttle (0 task)
//
// Sharing compaction thread limiter among CFs (to resolve multiple storage perf issue)
//
std::shared_ptr<ConcurrentTaskLimiter> ctl_ssd(NewConcurrentTaskLimiter("ssd_limiter", 8));
std::shared_ptr<ConcurrentTaskLimiter> ctl_hdd(NewConcurrentTaskLimiter("hdd_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt_ssd1(options);
ColumnFamilyOptions cf_opt_ssd2(options);
ColumnFamilyOptions cf_opt_hdd1(options);
ColumnFamilyOptions cf_opt_hdd2(options);
ColumnFamilyOptions cf_opt_hdd3(options);
// SSD CFs
cf_opt_ssd1.compaction_thread_limiter = ctl_ssd;
cf_opt_ssd2.compaction_thread_limiter = ctl_ssd;
// HDD CFs
cf_opt_hdd1.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd2.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd3.compaction_thread_limiter = ctl_hdd;
...
//
// The limiter is disabled by default (or set to nullptr explicitly)
//
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = nullptr;
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4332
Differential Revision: D13226590
Pulled By: siying
fbshipit-source-id: 14307aec55b8bd59c8223d04aa6db3c03d1b0c1d
2018-12-13 22:16:04 +01:00
|
|
|
compaction_queue_.push_front(*iter);
|
|
|
|
}
|
|
|
|
return cfd;
|
|
|
|
}
|
|
|
|
|
2018-08-24 22:17:29 +02:00
|
|
|
void DBImpl::SchedulePendingFlush(const FlushRequest& flush_req,
|
2018-02-09 21:09:55 +01:00
|
|
|
FlushReason flush_reason) {
|
2020-12-02 18:29:50 +01:00
|
|
|
mutex_.AssertHeld();
|
2018-08-24 22:17:29 +02:00
|
|
|
if (flush_req.empty()) {
|
|
|
|
return;
|
|
|
|
}
|
2020-12-02 18:29:50 +01:00
|
|
|
if (!immutable_db_options_.atomic_flush) {
|
|
|
|
// For the non-atomic flush case, we never schedule multiple column
|
|
|
|
// families in the same flush request.
|
|
|
|
assert(flush_req.size() == 1);
|
|
|
|
ColumnFamilyData* cfd = flush_req[0].first;
|
|
|
|
assert(cfd);
|
2021-07-16 02:48:17 +02:00
|
|
|
// Note: SchedulePendingFlush is always preceded
|
|
|
|
// with an imm()->FlushRequested() call. However,
|
|
|
|
// we want to make this code snipper more resilient to
|
|
|
|
// future changes. Therefore, we add the following if
|
|
|
|
// statement - note that calling it twice (or more)
|
|
|
|
// doesn't break anything.
|
2021-08-11 03:07:48 +02:00
|
|
|
if (immutable_db_options_.experimental_mempurge_threshold > 0.0) {
|
2021-07-16 02:48:17 +02:00
|
|
|
// If imm() contains silent memtables,
|
|
|
|
// requesting a flush will mark the imm_needed as true.
|
|
|
|
cfd->imm()->FlushRequested();
|
|
|
|
}
|
2020-12-02 18:29:50 +01:00
|
|
|
if (!cfd->queued_for_flush() && cfd->imm()->IsFlushPending()) {
|
|
|
|
cfd->Ref();
|
|
|
|
cfd->set_queued_for_flush(true);
|
|
|
|
cfd->SetFlushReason(flush_reason);
|
|
|
|
++unscheduled_flushes_;
|
|
|
|
flush_queue_.push_back(flush_req);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (auto& iter : flush_req) {
|
|
|
|
ColumnFamilyData* cfd = iter.first;
|
|
|
|
cfd->Ref();
|
|
|
|
cfd->SetFlushReason(flush_reason);
|
|
|
|
}
|
|
|
|
++unscheduled_flushes_;
|
|
|
|
flush_queue_.push_back(flush_req);
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void DBImpl::SchedulePendingCompaction(ColumnFamilyData* cfd) {
|
2020-12-02 18:29:50 +01:00
|
|
|
mutex_.AssertHeld();
|
2018-04-27 20:11:12 +02:00
|
|
|
if (!cfd->queued_for_compaction() && cfd->NeedsCompaction()) {
|
2017-04-06 02:14:05 +02:00
|
|
|
AddToCompactionQueue(cfd);
|
|
|
|
++unscheduled_compactions_;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-26 22:51:39 +02:00
|
|
|
void DBImpl::SchedulePendingPurge(std::string fname, std::string dir_to_sync,
|
|
|
|
FileType type, uint64_t number, int job_id) {
|
2017-04-06 02:14:05 +02:00
|
|
|
mutex_.AssertHeld();
|
2018-04-26 22:51:39 +02:00
|
|
|
PurgeFileInfo file_info(fname, dir_to_sync, type, number, job_id);
|
2019-09-18 01:43:07 +02:00
|
|
|
purge_files_.insert({{number, std::move(file_info)}});
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
|
2019-03-20 01:24:09 +01:00
|
|
|
void DBImpl::BGWorkFlush(void* arg) {
|
|
|
|
FlushThreadArg fta = *(reinterpret_cast<FlushThreadArg*>(arg));
|
|
|
|
delete reinterpret_cast<FlushThreadArg*>(arg);
|
|
|
|
|
|
|
|
IOSTATS_SET_THREAD_POOL_ID(fta.thread_pri_);
|
2017-04-06 02:14:05 +02:00
|
|
|
TEST_SYNC_POINT("DBImpl::BGWorkFlush");
|
2020-04-29 22:06:27 +02:00
|
|
|
static_cast_with_check<DBImpl>(fta.db_)->BackgroundCallFlush(fta.thread_pri_);
|
2017-04-06 02:14:05 +02:00
|
|
|
TEST_SYNC_POINT("DBImpl::BGWorkFlush:done");
|
|
|
|
}
|
|
|
|
|
|
|
|
void DBImpl::BGWorkCompaction(void* arg) {
|
|
|
|
CompactionArg ca = *(reinterpret_cast<CompactionArg*>(arg));
|
|
|
|
delete reinterpret_cast<CompactionArg*>(arg);
|
|
|
|
IOSTATS_SET_THREAD_POOL_ID(Env::Priority::LOW);
|
|
|
|
TEST_SYNC_POINT("DBImpl::BGWorkCompaction");
|
2017-08-04 00:36:28 +02:00
|
|
|
auto prepicked_compaction =
|
|
|
|
static_cast<PrepickedCompaction*>(ca.prepicked_compaction);
|
2020-04-29 22:06:27 +02:00
|
|
|
static_cast_with_check<DBImpl>(ca.db)->BackgroundCallCompaction(
|
2017-08-04 00:36:28 +02:00
|
|
|
prepicked_compaction, Env::Priority::LOW);
|
|
|
|
delete prepicked_compaction;
|
|
|
|
}
|
|
|
|
|
|
|
|
void DBImpl::BGWorkBottomCompaction(void* arg) {
|
|
|
|
CompactionArg ca = *(static_cast<CompactionArg*>(arg));
|
|
|
|
delete static_cast<CompactionArg*>(arg);
|
|
|
|
IOSTATS_SET_THREAD_POOL_ID(Env::Priority::BOTTOM);
|
|
|
|
TEST_SYNC_POINT("DBImpl::BGWorkBottomCompaction");
|
|
|
|
auto* prepicked_compaction = ca.prepicked_compaction;
|
2021-11-19 02:26:39 +01:00
|
|
|
assert(prepicked_compaction && prepicked_compaction->compaction);
|
2017-08-04 00:36:28 +02:00
|
|
|
ca.db->BackgroundCallCompaction(prepicked_compaction, Env::Priority::BOTTOM);
|
|
|
|
delete prepicked_compaction;
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void DBImpl::BGWorkPurge(void* db) {
|
|
|
|
IOSTATS_SET_THREAD_POOL_ID(Env::Priority::HIGH);
|
|
|
|
TEST_SYNC_POINT("DBImpl::BGWorkPurge:start");
|
|
|
|
reinterpret_cast<DBImpl*>(db)->BackgroundCallPurge();
|
|
|
|
TEST_SYNC_POINT("DBImpl::BGWorkPurge:end");
|
|
|
|
}
|
|
|
|
|
2019-03-20 01:24:09 +01:00
|
|
|
void DBImpl::UnscheduleCompactionCallback(void* arg) {
|
Fix possible hang issue in ~DBImpl() when flush is scheduled in LOW pool (#8125)
Summary:
In DBImpl::CloseHelper, we wait for bg_compaction_scheduled_
and bg_flush_scheduled_ to drop to 0. Unschedule is called prior
to cancel any unscheduled flushes/compactions. It is assumed that
anything in the high priority is a flush, and anything in the low
priority pool is a compaction. This assumption, however, is broken when
the high-pri pool is full.
As a result, bg_compaction_scheduled_ can go < 0 and bg_flush_scheduled_
will remain > 0 and DB can be in hang state.
The fix is, we decrement the `bg_{flush,compaction,bottom_compaction}_scheduled_`
inside the `Unschedule{Flush,Compaction,BottomCompaction}Callback()`s. DB
`mutex_` will make the counts atomic in `Unschedule`.
Related discussion: https://github.com/facebook/rocksdb/issues/7928
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8125
Test Plan: Added new test case which hangs without the fix.
Reviewed By: jay-zhuang
Differential Revision: D27390043
Pulled By: ajkr
fbshipit-source-id: 78a367fba9a59ac5607ad24bd1c46dc16d5ec110
2021-03-31 03:34:11 +02:00
|
|
|
CompactionArg* ca_ptr = reinterpret_cast<CompactionArg*>(arg);
|
|
|
|
Env::Priority compaction_pri = ca_ptr->compaction_pri_;
|
|
|
|
if (Env::Priority::BOTTOM == compaction_pri) {
|
|
|
|
// Decrement bg_bottom_compaction_scheduled_ if priority is BOTTOM
|
|
|
|
ca_ptr->db->bg_bottom_compaction_scheduled_--;
|
|
|
|
} else if (Env::Priority::LOW == compaction_pri) {
|
|
|
|
// Decrement bg_compaction_scheduled_ if priority is LOW
|
|
|
|
ca_ptr->db->bg_compaction_scheduled_--;
|
|
|
|
}
|
|
|
|
CompactionArg ca = *(ca_ptr);
|
2017-04-06 02:14:05 +02:00
|
|
|
delete reinterpret_cast<CompactionArg*>(arg);
|
2017-08-04 00:36:28 +02:00
|
|
|
if (ca.prepicked_compaction != nullptr) {
|
|
|
|
if (ca.prepicked_compaction->compaction != nullptr) {
|
2022-03-02 22:43:00 +01:00
|
|
|
ca.prepicked_compaction->compaction->ReleaseCompactionFiles(
|
|
|
|
Status::Incomplete(Status::SubCode::kManualCompactionPaused));
|
2017-08-04 00:36:28 +02:00
|
|
|
delete ca.prepicked_compaction->compaction;
|
|
|
|
}
|
|
|
|
delete ca.prepicked_compaction;
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
2019-03-20 01:24:09 +01:00
|
|
|
TEST_SYNC_POINT("DBImpl::UnscheduleCompactionCallback");
|
|
|
|
}
|
|
|
|
|
|
|
|
void DBImpl::UnscheduleFlushCallback(void* arg) {
|
Fix possible hang issue in ~DBImpl() when flush is scheduled in LOW pool (#8125)
Summary:
In DBImpl::CloseHelper, we wait for bg_compaction_scheduled_
and bg_flush_scheduled_ to drop to 0. Unschedule is called prior
to cancel any unscheduled flushes/compactions. It is assumed that
anything in the high priority is a flush, and anything in the low
priority pool is a compaction. This assumption, however, is broken when
the high-pri pool is full.
As a result, bg_compaction_scheduled_ can go < 0 and bg_flush_scheduled_
will remain > 0 and DB can be in hang state.
The fix is, we decrement the `bg_{flush,compaction,bottom_compaction}_scheduled_`
inside the `Unschedule{Flush,Compaction,BottomCompaction}Callback()`s. DB
`mutex_` will make the counts atomic in `Unschedule`.
Related discussion: https://github.com/facebook/rocksdb/issues/7928
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8125
Test Plan: Added new test case which hangs without the fix.
Reviewed By: jay-zhuang
Differential Revision: D27390043
Pulled By: ajkr
fbshipit-source-id: 78a367fba9a59ac5607ad24bd1c46dc16d5ec110
2021-03-31 03:34:11 +02:00
|
|
|
// Decrement bg_flush_scheduled_ in flush callback
|
|
|
|
reinterpret_cast<FlushThreadArg*>(arg)->db_->bg_flush_scheduled_--;
|
|
|
|
Env::Priority flush_pri = reinterpret_cast<FlushThreadArg*>(arg)->thread_pri_;
|
|
|
|
if (Env::Priority::LOW == flush_pri) {
|
|
|
|
TEST_SYNC_POINT("DBImpl::UnscheduleLowFlushCallback");
|
|
|
|
} else if (Env::Priority::HIGH == flush_pri) {
|
|
|
|
TEST_SYNC_POINT("DBImpl::UnscheduleHighFlushCallback");
|
|
|
|
}
|
2019-03-20 01:24:09 +01:00
|
|
|
delete reinterpret_cast<FlushThreadArg*>(arg);
|
|
|
|
TEST_SYNC_POINT("DBImpl::UnscheduleFlushCallback");
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Status DBImpl::BackgroundFlush(bool* made_progress, JobContext* job_context,
|
2019-03-20 01:24:09 +01:00
|
|
|
LogBuffer* log_buffer, FlushReason* reason,
|
|
|
|
Env::Priority thread_pri) {
|
2017-04-06 02:14:05 +02:00
|
|
|
mutex_.AssertHeld();
|
|
|
|
|
2018-06-28 21:23:57 +02:00
|
|
|
Status status;
|
Auto recovery from out of space errors (#4164)
Summary:
This commit implements automatic recovery from a Status::NoSpace() error
during background operations such as write callback, flush and
compaction. The broad design is as follows -
1. Compaction errors are treated as soft errors and don't put the
database in read-only mode. A compaction is delayed until enough free
disk space is available to accomodate the compaction outputs, which is
estimated based on the input size. This means that users can continue to
write, and we rely on the WriteController to delay or stop writes if the
compaction debt becomes too high due to persistent low disk space
condition
2. Errors during write callback and flush are treated as hard errors,
i.e the database is put in read-only mode and goes back to read-write
only fater certain recovery actions are taken.
3. Both types of recovery rely on the SstFileManagerImpl to poll for
sufficient disk space. We assume that there is a 1-1 mapping between an
SFM and the underlying OS storage container. For cases where multiple
DBs are hosted on a single storage container, the user is expected to
allocate a single SFM instance and use the same one for all the DBs. If
no SFM is specified by the user, DBImpl::Open() will allocate one, but
this will be one per DB and each DB will recover independently. The
recovery implemented by SFM is as follows -
a) On the first occurance of an out of space error during compaction,
subsequent
compactions will be delayed until the disk free space check indicates
enough available space. The required space is computed as the sum of
input sizes.
b) The free space check requirement will be removed once the amount of
free space is greater than the size reserved by in progress
compactions when the first error occured
c) If the out of space error is a hard error, a background thread in
SFM will poll for sufficient headroom before triggering the recovery
of the database and putting it in write-only mode. The headroom is
calculated as the sum of the write_buffer_size of all the DB instances
associated with the SFM
4. EventListener callbacks will be called at the start and completion of
automatic recovery. Users can disable the auto recov ery in the start
callback, and later initiate it manually by calling DB::Resume()
Todo:
1. More extensive testing
2. Add disk full condition to db_stress (follow-on PR)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4164
Differential Revision: D9846378
Pulled By: anand1976
fbshipit-source-id: 80ea875dbd7f00205e19c82215ff6e37da10da4a
2018-09-15 22:36:19 +02:00
|
|
|
*reason = FlushReason::kOthers;
|
|
|
|
// If BG work is stopped due to an error, but a recovery is in progress,
|
|
|
|
// that means this flush is part of the recovery. So allow it to go through
|
2018-06-28 21:23:57 +02:00
|
|
|
if (!error_handler_.IsBGWorkStopped()) {
|
|
|
|
if (shutting_down_.load(std::memory_order_acquire)) {
|
|
|
|
status = Status::ShutdownInProgress();
|
|
|
|
}
|
Auto recovery from out of space errors (#4164)
Summary:
This commit implements automatic recovery from a Status::NoSpace() error
during background operations such as write callback, flush and
compaction. The broad design is as follows -
1. Compaction errors are treated as soft errors and don't put the
database in read-only mode. A compaction is delayed until enough free
disk space is available to accomodate the compaction outputs, which is
estimated based on the input size. This means that users can continue to
write, and we rely on the WriteController to delay or stop writes if the
compaction debt becomes too high due to persistent low disk space
condition
2. Errors during write callback and flush are treated as hard errors,
i.e the database is put in read-only mode and goes back to read-write
only fater certain recovery actions are taken.
3. Both types of recovery rely on the SstFileManagerImpl to poll for
sufficient disk space. We assume that there is a 1-1 mapping between an
SFM and the underlying OS storage container. For cases where multiple
DBs are hosted on a single storage container, the user is expected to
allocate a single SFM instance and use the same one for all the DBs. If
no SFM is specified by the user, DBImpl::Open() will allocate one, but
this will be one per DB and each DB will recover independently. The
recovery implemented by SFM is as follows -
a) On the first occurance of an out of space error during compaction,
subsequent
compactions will be delayed until the disk free space check indicates
enough available space. The required space is computed as the sum of
input sizes.
b) The free space check requirement will be removed once the amount of
free space is greater than the size reserved by in progress
compactions when the first error occured
c) If the out of space error is a hard error, a background thread in
SFM will poll for sufficient headroom before triggering the recovery
of the database and putting it in write-only mode. The headroom is
calculated as the sum of the write_buffer_size of all the DB instances
associated with the SFM
4. EventListener callbacks will be called at the start and completion of
automatic recovery. Users can disable the auto recov ery in the start
callback, and later initiate it manually by calling DB::Resume()
Todo:
1. More extensive testing
2. Add disk full condition to db_stress (follow-on PR)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4164
Differential Revision: D9846378
Pulled By: anand1976
fbshipit-source-id: 80ea875dbd7f00205e19c82215ff6e37da10da4a
2018-09-15 22:36:19 +02:00
|
|
|
} else if (!error_handler_.IsRecoveryInProgress()) {
|
2018-06-28 21:23:57 +02:00
|
|
|
status = error_handler_.GetBGError();
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!status.ok()) {
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2018-08-24 22:17:29 +02:00
|
|
|
autovector<BGFlushArg> bg_flush_args;
|
|
|
|
std::vector<SuperVersionContext>& superversion_contexts =
|
|
|
|
job_context->superversion_contexts;
|
2019-05-11 02:53:41 +02:00
|
|
|
autovector<ColumnFamilyData*> column_families_not_to_flush;
|
2017-04-06 02:14:05 +02:00
|
|
|
while (!flush_queue_.empty()) {
|
|
|
|
// This cfd is already referenced
|
2018-08-24 22:17:29 +02:00
|
|
|
const FlushRequest& flush_req = PopFirstFromFlushQueue();
|
|
|
|
superversion_contexts.clear();
|
|
|
|
superversion_contexts.reserve(flush_req.size());
|
|
|
|
|
|
|
|
for (const auto& iter : flush_req) {
|
|
|
|
ColumnFamilyData* cfd = iter.first;
|
2021-08-11 03:07:48 +02:00
|
|
|
if (immutable_db_options_.experimental_mempurge_threshold > 0.0) {
|
2021-07-16 02:48:17 +02:00
|
|
|
// If imm() contains silent memtables,
|
|
|
|
// requesting a flush will mark the imm_needed as true.
|
|
|
|
cfd->imm()->FlushRequested();
|
|
|
|
}
|
2018-08-24 22:17:29 +02:00
|
|
|
if (cfd->IsDropped() || !cfd->imm()->IsFlushPending()) {
|
|
|
|
// can't flush this CF, try next one
|
2019-05-11 02:53:41 +02:00
|
|
|
column_families_not_to_flush.push_back(cfd);
|
2018-08-24 22:17:29 +02:00
|
|
|
continue;
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
2018-08-24 22:17:29 +02:00
|
|
|
superversion_contexts.emplace_back(SuperVersionContext(true));
|
|
|
|
bg_flush_args.emplace_back(cfd, iter.second,
|
|
|
|
&(superversion_contexts.back()));
|
|
|
|
}
|
|
|
|
if (!bg_flush_args.empty()) {
|
|
|
|
break;
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-24 22:17:29 +02:00
|
|
|
if (!bg_flush_args.empty()) {
|
2017-05-24 20:25:38 +02:00
|
|
|
auto bg_job_limits = GetBGJobLimits();
|
2018-08-24 22:17:29 +02:00
|
|
|
for (const auto& arg : bg_flush_args) {
|
|
|
|
ColumnFamilyData* cfd = arg.cfd_;
|
|
|
|
ROCKS_LOG_BUFFER(
|
|
|
|
log_buffer,
|
|
|
|
"Calling FlushMemTableToOutputFile with column "
|
|
|
|
"family [%s], flush slots available %d, compaction slots available "
|
|
|
|
"%d, "
|
|
|
|
"flush slots scheduled %d, compaction slots scheduled %d",
|
|
|
|
cfd->GetName().c_str(), bg_job_limits.max_flushes,
|
|
|
|
bg_job_limits.max_compactions, bg_flush_scheduled_,
|
|
|
|
bg_compaction_scheduled_);
|
|
|
|
}
|
|
|
|
status = FlushMemTablesToOutputFiles(bg_flush_args, made_progress,
|
2019-03-20 01:24:09 +01:00
|
|
|
job_context, log_buffer, thread_pri);
|
2019-07-01 23:04:10 +02:00
|
|
|
TEST_SYNC_POINT("DBImpl::BackgroundFlush:BeforeFlush");
|
Auto recovery from out of space errors (#4164)
Summary:
This commit implements automatic recovery from a Status::NoSpace() error
during background operations such as write callback, flush and
compaction. The broad design is as follows -
1. Compaction errors are treated as soft errors and don't put the
database in read-only mode. A compaction is delayed until enough free
disk space is available to accomodate the compaction outputs, which is
estimated based on the input size. This means that users can continue to
write, and we rely on the WriteController to delay or stop writes if the
compaction debt becomes too high due to persistent low disk space
condition
2. Errors during write callback and flush are treated as hard errors,
i.e the database is put in read-only mode and goes back to read-write
only fater certain recovery actions are taken.
3. Both types of recovery rely on the SstFileManagerImpl to poll for
sufficient disk space. We assume that there is a 1-1 mapping between an
SFM and the underlying OS storage container. For cases where multiple
DBs are hosted on a single storage container, the user is expected to
allocate a single SFM instance and use the same one for all the DBs. If
no SFM is specified by the user, DBImpl::Open() will allocate one, but
this will be one per DB and each DB will recover independently. The
recovery implemented by SFM is as follows -
a) On the first occurance of an out of space error during compaction,
subsequent
compactions will be delayed until the disk free space check indicates
enough available space. The required space is computed as the sum of
input sizes.
b) The free space check requirement will be removed once the amount of
free space is greater than the size reserved by in progress
compactions when the first error occured
c) If the out of space error is a hard error, a background thread in
SFM will poll for sufficient headroom before triggering the recovery
of the database and putting it in write-only mode. The headroom is
calculated as the sum of the write_buffer_size of all the DB instances
associated with the SFM
4. EventListener callbacks will be called at the start and completion of
automatic recovery. Users can disable the auto recov ery in the start
callback, and later initiate it manually by calling DB::Resume()
Todo:
1. More extensive testing
2. Add disk full condition to db_stress (follow-on PR)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4164
Differential Revision: D9846378
Pulled By: anand1976
fbshipit-source-id: 80ea875dbd7f00205e19c82215ff6e37da10da4a
2018-09-15 22:36:19 +02:00
|
|
|
// All the CFDs in the FlushReq must have the same flush reason, so just
|
|
|
|
// grab the first one
|
|
|
|
*reason = bg_flush_args[0].cfd_->GetFlushReason();
|
2018-08-24 22:17:29 +02:00
|
|
|
for (auto& arg : bg_flush_args) {
|
|
|
|
ColumnFamilyData* cfd = arg.cfd_;
|
2019-12-13 04:02:51 +01:00
|
|
|
if (cfd->UnrefAndTryDelete()) {
|
2018-08-24 22:17:29 +02:00
|
|
|
arg.cfd_ = nullptr;
|
|
|
|
}
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
}
|
2019-05-11 02:53:41 +02:00
|
|
|
for (auto cfd : column_families_not_to_flush) {
|
2019-12-13 04:02:51 +01:00
|
|
|
cfd->UnrefAndTryDelete();
|
2019-05-11 02:53:41 +02:00
|
|
|
}
|
2017-04-06 02:14:05 +02:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
2019-03-20 01:24:09 +01:00
|
|
|
void DBImpl::BackgroundCallFlush(Env::Priority thread_pri) {
|
2017-04-06 02:14:05 +02:00
|
|
|
bool made_progress = false;
|
|
|
|
JobContext job_context(next_job_id_.fetch_add(1), true);
|
|
|
|
|
Fix atomic flush waiting forever for MANIFEST write (#9034)
Summary:
In atomic flush, concurrent background flush threads will commit to the MANIFEST
one by one, in the order of the IDs of their picked memtables for all included column
families. Each time, a background flush thread decides whether to wait based on two
criteria:
- Is db stopped? If so, don't wait.
- Am I the one to commit the currently earliest memtable? If so, don't wait and ready to go.
When atomic flush was implemented, error writing to or syncing the MANIFEST would
cause the db to be stopped. Therefore, this background thread does not have to check
for the background error while waiting. If there has been such an error, `DBStopped()`
would have been true, and this thread will **not** wait forever.
After we improved error handling, RocksDB may map an IOError while writing to MANIFEST
to a soft error, if there is no WAL. This requires the background threads to check for
background error while waiting. Otherwise, a background flush thread may wait forever.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9034
Test Plan: make check
Reviewed By: zhichao-cao
Differential Revision: D31639225
Pulled By: riversand963
fbshipit-source-id: e9ab07c4d8f2eade238adeefe3e42dd9a5a3ebbd
2021-10-21 06:33:32 +02:00
|
|
|
TEST_SYNC_POINT_CALLBACK("DBImpl::BackgroundCallFlush:start", nullptr);
|
2017-04-06 02:14:05 +02:00
|
|
|
|
|
|
|
LogBuffer log_buffer(InfoLogLevel::INFO_LEVEL,
|
|
|
|
immutable_db_options_.info_log.get());
|
Fix the false positive alert of CF consistency check in WAL recovery (#8207)
Summary:
In current RocksDB, in recover the information form WAL, we do the consistency check for each column family when one WAL file is corrupted and PointInTimeRecovery is set. However, it will report a false positive alert on "SST file is ahead of WALs" when one of the CF current log number is greater than the corrupted WAL number (CF contains the data beyond the corrupted WAl) due to a new column family creation during flush. In this case, a new WAL is created (it is empty) during a flush. Also, due to some reason (e.g., storage issue or crash happens before SyncCloseLog is called), the old WAL is corrupted. The new CF has no data, therefore, it does not have the consistency issue.
Fix: when checking cfd->GetLogNumber() > corrupted_wal_number also check cfd->GetLiveSstFilesSize() > 0. So the CFs with no SST file data will skip the check here.
Note potential ignored inconsistency caused due to fix: empty CF can also be caused by write+delete. In this case, after flush, there is no SST files being generated. However, this CF still have the log in the WAL. When the WAL is corrupted, the DB might be inconsistent.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8207
Test Plan: added unit test, make crash_test
Reviewed By: riversand963
Differential Revision: D27898839
Pulled By: zhichao-cao
fbshipit-source-id: 931fc2d8b92dd00b4169bf84b94e712fd688a83e
2021-04-22 19:27:56 +02:00
|
|
|
TEST_SYNC_POINT("DBImpl::BackgroundCallFlush:Start:1");
|
|
|
|
TEST_SYNC_POINT("DBImpl::BackgroundCallFlush:Start:2");
|
2017-04-06 02:14:05 +02:00
|
|
|
{
|
|
|
|
InstrumentedMutexLock l(&mutex_);
|
2017-06-21 01:43:09 +02:00
|
|
|
assert(bg_flush_scheduled_);
|
2017-04-06 02:14:05 +02:00
|
|
|
num_running_flushes_++;
|
|
|
|
|
2019-10-08 23:18:48 +02:00
|
|
|
std::unique_ptr<std::list<uint64_t>::iterator>
|
|
|
|
pending_outputs_inserted_elem(new std::list<uint64_t>::iterator(
|
|
|
|
CaptureCurrentFileNumberInPendingOutputs()));
|
Auto recovery from out of space errors (#4164)
Summary:
This commit implements automatic recovery from a Status::NoSpace() error
during background operations such as write callback, flush and
compaction. The broad design is as follows -
1. Compaction errors are treated as soft errors and don't put the
database in read-only mode. A compaction is delayed until enough free
disk space is available to accomodate the compaction outputs, which is
estimated based on the input size. This means that users can continue to
write, and we rely on the WriteController to delay or stop writes if the
compaction debt becomes too high due to persistent low disk space
condition
2. Errors during write callback and flush are treated as hard errors,
i.e the database is put in read-only mode and goes back to read-write
only fater certain recovery actions are taken.
3. Both types of recovery rely on the SstFileManagerImpl to poll for
sufficient disk space. We assume that there is a 1-1 mapping between an
SFM and the underlying OS storage container. For cases where multiple
DBs are hosted on a single storage container, the user is expected to
allocate a single SFM instance and use the same one for all the DBs. If
no SFM is specified by the user, DBImpl::Open() will allocate one, but
this will be one per DB and each DB will recover independently. The
recovery implemented by SFM is as follows -
a) On the first occurance of an out of space error during compaction,
subsequent
compactions will be delayed until the disk free space check indicates
enough available space. The required space is computed as the sum of
input sizes.
b) The free space check requirement will be removed once the amount of
free space is greater than the size reserved by in progress
compactions when the first error occured
c) If the out of space error is a hard error, a background thread in
SFM will poll for sufficient headroom before triggering the recovery
of the database and putting it in write-only mode. The headroom is
calculated as the sum of the write_buffer_size of all the DB instances
associated with the SFM
4. EventListener callbacks will be called at the start and completion of
automatic recovery. Users can disable the auto recov ery in the start
callback, and later initiate it manually by calling DB::Resume()
Todo:
1. More extensive testing
2. Add disk full condition to db_stress (follow-on PR)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4164
Differential Revision: D9846378
Pulled By: anand1976
fbshipit-source-id: 80ea875dbd7f00205e19c82215ff6e37da10da4a
2018-09-15 22:36:19 +02:00
|
|
|
FlushReason reason;
|
2017-04-06 02:14:05 +02:00
|
|
|
|
2019-03-20 01:24:09 +01:00
|
|
|
Status s = BackgroundFlush(&made_progress, &job_context, &log_buffer,
|
|
|
|
&reason, thread_pri);
|
2019-05-20 19:37:37 +02:00
|
|
|
if (!s.ok() && !s.IsShutdownInProgress() && !s.IsColumnFamilyDropped() &&
|
Auto recovery from out of space errors (#4164)
Summary:
This commit implements automatic recovery from a Status::NoSpace() error
during background operations such as write callback, flush and
compaction. The broad design is as follows -
1. Compaction errors are treated as soft errors and don't put the
database in read-only mode. A compaction is delayed until enough free
disk space is available to accomodate the compaction outputs, which is
estimated based on the input size. This means that users can continue to
write, and we rely on the WriteController to delay or stop writes if the
compaction debt becomes too high due to persistent low disk space
condition
2. Errors during write callback and flush are treated as hard errors,
i.e the database is put in read-only mode and goes back to read-write
only fater certain recovery actions are taken.
3. Both types of recovery rely on the SstFileManagerImpl to poll for
sufficient disk space. We assume that there is a 1-1 mapping between an
SFM and the underlying OS storage container. For cases where multiple
DBs are hosted on a single storage container, the user is expected to
allocate a single SFM instance and use the same one for all the DBs. If
no SFM is specified by the user, DBImpl::Open() will allocate one, but
this will be one per DB and each DB will recover independently. The
recovery implemented by SFM is as follows -
a) On the first occurance of an out of space error during compaction,
subsequent
compactions will be delayed until the disk free space check indicates
enough available space. The required space is computed as the sum of
input sizes.
b) The free space check requirement will be removed once the amount of
free space is greater than the size reserved by in progress
compactions when the first error occured
c) If the out of space error is a hard error, a background thread in
SFM will poll for sufficient headroom before triggering the recovery
of the database and putting it in write-only mode. The headroom is
calculated as the sum of the write_buffer_size of all the DB instances
associated with the SFM
4. EventListener callbacks will be called at the start and completion of
automatic recovery. Users can disable the auto recov ery in the start
callback, and later initiate it manually by calling DB::Resume()
Todo:
1. More extensive testing
2. Add disk full condition to db_stress (follow-on PR)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4164
Differential Revision: D9846378
Pulled By: anand1976
fbshipit-source-id: 80ea875dbd7f00205e19c82215ff6e37da10da4a
2018-09-15 22:36:19 +02:00
|
|
|
reason != FlushReason::kErrorRecovery) {
|
2017-04-06 02:14:05 +02:00
|
|
|
// Wait a little bit before retrying background flush in
|
|
|
|
// case this is an environmental problem and we do not want to
|
|
|
|
// chew up resources for failed flushes for the duration of
|
|
|
|
// the problem.
|
|
|
|
uint64_t error_cnt =
|
2018-04-13 02:55:14 +02:00
|
|
|
default_cf_internal_stats_->BumpAndGetBackgroundErrorCount();
|
2017-04-06 02:14:05 +02:00
|
|
|
bg_cv_.SignalAll(); // In case a waiter can proceed despite the error
|
|
|
|
mutex_.Unlock();
|
|
|
|
ROCKS_LOG_ERROR(immutable_db_options_.info_log,
|
|
|
|
"Waiting after background flush error: %s"
|
|
|
|
"Accumulated background error counts: %" PRIu64,
|
|
|
|
s.ToString().c_str(), error_cnt);
|
|
|
|
log_buffer.FlushBufferToLog();
|
|
|
|
LogFlush(immutable_db_options_.info_log);
|
2021-03-15 12:32:24 +01:00
|
|
|
immutable_db_options_.clock->SleepForMicroseconds(1000000);
|
2017-04-06 02:14:05 +02:00
|
|
|
mutex_.Lock();
|
|
|
|
}
|
|
|
|
|
2018-10-27 00:06:44 +02:00
|
|
|
TEST_SYNC_POINT("DBImpl::BackgroundCallFlush:FlushFinish:0");
|
2017-04-06 02:14:05 +02:00
|
|
|
ReleaseFileNumberFromPendingOutputs(pending_outputs_inserted_elem);
|
|
|
|
|
|
|
|
// If flush failed, we want to delete all temporary files that we might have
|
|
|
|
// created. Thus, we force full scan in FindObsoleteFiles()
|
2019-05-20 19:37:37 +02:00
|
|
|
FindObsoleteFiles(&job_context, !s.ok() && !s.IsShutdownInProgress() &&
|
|
|
|
!s.IsColumnFamilyDropped());
|
2017-04-06 02:14:05 +02:00
|
|
|
// delete unnecessary files if any, this is done outside the mutex
|
2018-01-12 22:16:39 +01:00
|
|
|
if (job_context.HaveSomethingToClean() ||
|
|
|
|
job_context.HaveSomethingToDelete() || !log_buffer.IsEmpty()) {
|
2017-04-06 02:14:05 +02:00
|
|
|
mutex_.Unlock();
|
2018-01-18 02:37:10 +01:00
|
|
|
TEST_SYNC_POINT("DBImpl::BackgroundCallFlush:FilesFound");
|
2017-04-06 02:14:05 +02:00
|
|
|
// Have to flush the info logs before bg_flush_scheduled_--
|
|
|
|
// because if bg_flush_scheduled_ becomes 0 and the lock is
|
|
|
|
// released, the deconstructor of DB can kick in and destroy all the
|
|
|
|
// states of DB so info_log might not be available after that point.
|
|
|
|
// It also applies to access other states that DB owns.
|
|
|
|
log_buffer.FlushBufferToLog();
|
|
|
|
if (job_context.HaveSomethingToDelete()) {
|
|
|
|
PurgeObsoleteFiles(job_context);
|
|
|
|
}
|
|
|
|
job_context.Clean();
|
|
|
|
mutex_.Lock();
|
|
|
|
}
|
2018-11-10 01:43:08 +01:00
|
|
|
TEST_SYNC_POINT("DBImpl::BackgroundCallFlush:ContextCleanedUp");
|
2017-04-06 02:14:05 +02:00
|
|
|
|
|
|
|
assert(num_running_flushes_ > 0);
|
|
|
|
num_running_flushes_--;
|
|
|
|
bg_flush_scheduled_--;
|
|
|
|
// See if there's more work to be done
|
|
|
|
MaybeScheduleFlushOrCompaction();
|
2019-01-04 05:53:52 +01:00
|
|
|
atomic_flush_install_cv_.SignalAll();
|
2017-04-06 02:14:05 +02:00
|
|
|
bg_cv_.SignalAll();
|
|
|
|
// IMPORTANT: there should be no code after calling SignalAll. This call may
|
|
|
|
// signal the DB destructor that it's OK to proceed with destruction. In
|
|
|
|
// that case, all DB variables will be dealloacated and referencing them
|
|
|
|
// will cause trouble.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-04 00:36:28 +02:00
|
|
|
void DBImpl::BackgroundCallCompaction(PrepickedCompaction* prepicked_compaction,
|
|
|
|
Env::Priority bg_thread_pri) {
|
2017-04-06 02:14:05 +02:00
|
|
|
bool made_progress = false;
|
2022-03-02 22:43:00 +01:00
|
|
|
JobContext job_context(next_job_id_.fetch_add(1), true);
|
2017-04-06 02:14:05 +02:00
|
|
|
TEST_SYNC_POINT("BackgroundCallCompaction:0");
|
|
|
|
LogBuffer log_buffer(InfoLogLevel::INFO_LEVEL,
|
|
|
|
immutable_db_options_.info_log.get());
|
|
|
|
{
|
|
|
|
InstrumentedMutexLock l(&mutex_);
|
|
|
|
|
2022-03-02 22:43:00 +01:00
|
|
|
// This call will unlock/lock the mutex to wait for current running
|
|
|
|
// IngestExternalFile() calls to finish.
|
|
|
|
WaitForIngestFile();
|
|
|
|
|
|
|
|
num_running_compactions_++;
|
|
|
|
|
|
|
|
std::unique_ptr<std::list<uint64_t>::iterator>
|
|
|
|
pending_outputs_inserted_elem(new std::list<uint64_t>::iterator(
|
|
|
|
CaptureCurrentFileNumberInPendingOutputs()));
|
|
|
|
|
|
|
|
assert((bg_thread_pri == Env::Priority::BOTTOM &&
|
|
|
|
bg_bottom_compaction_scheduled_) ||
|
|
|
|
(bg_thread_pri == Env::Priority::LOW && bg_compaction_scheduled_));
|
|
|
|
Status s = BackgroundCompaction(&made_progress, &job_context, &log_buffer,
|
|
|
|
prepicked_compaction, bg_thread_pri);
|
|
|
|
TEST_SYNC_POINT("BackgroundCallCompaction:1");
|
|
|
|
if (s.IsBusy()) {
|
|
|
|
bg_cv_.SignalAll(); // In case a waiter can proceed despite the error
|
|
|
|
mutex_.Unlock();
|
|
|
|
immutable_db_options_.clock->SleepForMicroseconds(
|
|
|
|
10000); // prevent hot loop
|
|
|
|
mutex_.Lock();
|
|
|
|
} else if (!s.ok() && !s.IsShutdownInProgress() &&
|
|
|
|
!s.IsManualCompactionPaused() && !s.IsColumnFamilyDropped()) {
|
|
|
|
// Wait a little bit before retrying background compaction in
|
|
|
|
// case this is an environmental problem and we do not want to
|
|
|
|
// chew up resources for failed compactions for the duration of
|
|
|
|
// the problem.
|
|
|
|
uint64_t error_cnt =
|
|
|
|
default_cf_internal_stats_->BumpAndGetBackgroundErrorCount();
|
|
|
|
bg_cv_.SignalAll(); // In case a waiter can proceed despite the error
|
|
|
|
mutex_.Unlock();
|
|
|
|
log_buffer.FlushBufferToLog();
|
|
|
|
ROCKS_LOG_ERROR(immutable_db_options_.info_log,
|
|
|
|
"Waiting after background compaction error: %s, "
|
|
|
|
"Accumulated background error counts: %" PRIu64,
|
|
|
|
s.ToString().c_str(), error_cnt);
|
|
|
|
LogFlush(immutable_db_options_.info_log);
|
|
|
|
immutable_db_options_.clock->SleepForMicroseconds(1000000);
|
|
|
|
mutex_.Lock();
|
|
|
|
} else if (s.IsManualCompactionPaused()) {
|
|
|
|
assert(prepicked_compaction);
|
2022-03-13 05:07:04 +01:00
|
|
|
auto m = prepicked_compaction->manual_compaction_state;
|
2022-03-02 22:43:00 +01:00
|
|
|
assert(m);
|
|
|
|
ROCKS_LOG_BUFFER(&log_buffer, "[%s] [JOB %d] Manual compaction paused",
|
|
|
|
m->cfd->GetName().c_str(), job_context.job_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
ReleaseFileNumberFromPendingOutputs(pending_outputs_inserted_elem);
|
2022-02-16 02:59:31 +01:00
|
|
|
|
2022-03-02 22:43:00 +01:00
|
|
|
// If compaction failed, we want to delete all temporary files that we
|
|
|
|
// might have created (they might not be all recorded in job_context in
|
|
|
|
// case of a failure). Thus, we force full scan in FindObsoleteFiles()
|
|
|
|
FindObsoleteFiles(&job_context, !s.ok() && !s.IsShutdownInProgress() &&
|
|
|
|
!s.IsManualCompactionPaused() &&
|
|
|
|
!s.IsColumnFamilyDropped() &&
|
|
|
|
!s.IsBusy());
|
|
|
|
TEST_SYNC_POINT("DBImpl::BackgroundCallCompaction:FoundObsoleteFiles");
|
|
|
|
|
|
|
|
// delete unnecessary files if any, this is done outside the mutex
|
|
|
|
if (job_context.HaveSomethingToClean() ||
|
|
|
|
job_context.HaveSomethingToDelete() || !log_buffer.IsEmpty()) {
|
|
|
|
mutex_.Unlock();
|
|
|
|
// Have to flush the info logs before bg_compaction_scheduled_--
|
|
|
|
// because if bg_flush_scheduled_ becomes 0 and the lock is
|
|
|
|
// released, the deconstructor of DB can kick in and destroy all the
|
|
|
|
// states of DB so info_log might not be available after that point.
|
|
|
|
// It also applies to access other states that DB owns.
|
|
|
|
log_buffer.FlushBufferToLog();
|
|
|
|
if (job_context.HaveSomethingToDelete()) {
|
|
|
|
PurgeObsoleteFiles(job_context);
|
|
|
|
TEST_SYNC_POINT("DBImpl::BackgroundCallCompaction:PurgedObsoleteFiles");
|
|
|
|
}
|
|
|
|
job_context.Clean();
|
|
|
|
mutex_.Lock();
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
|
2022-03-02 22:43:00 +01:00
|
|
|
assert(num_running_compactions_ > 0);
|
|
|
|
num_running_compactions_--;
|
|
|
|
|
2017-08-04 00:36:28 +02:00
|
|
|
if (bg_thread_pri == Env::Priority::LOW) {
|
|
|
|
bg_compaction_scheduled_--;
|
|
|
|
} else {
|
|
|
|
assert(bg_thread_pri == Env::Priority::BOTTOM);
|
|
|
|
bg_bottom_compaction_scheduled_--;
|
|
|
|
}
|
2017-04-06 02:14:05 +02:00
|
|
|
|
|
|
|
versions_->GetColumnFamilySet()->FreeDeadColumnFamilies();
|
|
|
|
|
|
|
|
// See if there's more work to be done
|
|
|
|
MaybeScheduleFlushOrCompaction();
|
2021-05-05 02:26:23 +02:00
|
|
|
|
|
|
|
if (prepicked_compaction != nullptr &&
|
|
|
|
prepicked_compaction->task_token != nullptr) {
|
2021-07-22 02:36:48 +02:00
|
|
|
// Releasing task tokens affects (and asserts on) the DB state, so
|
|
|
|
// must be done before we potentially signal the DB close process to
|
|
|
|
// proceed below.
|
|
|
|
prepicked_compaction->task_token.reset();
|
2021-05-05 02:26:23 +02:00
|
|
|
}
|
|
|
|
|
2017-08-04 00:36:28 +02:00
|
|
|
if (made_progress ||
|
|
|
|
(bg_compaction_scheduled_ == 0 &&
|
|
|
|
bg_bottom_compaction_scheduled_ == 0) ||
|
2018-03-07 01:13:05 +01:00
|
|
|
HasPendingManualCompaction() || unscheduled_compactions_ == 0) {
|
2017-04-06 02:14:05 +02:00
|
|
|
// signal if
|
|
|
|
// * made_progress -- need to wakeup DelayWrite
|
2017-08-04 00:36:28 +02:00
|
|
|
// * bg_{bottom,}_compaction_scheduled_ == 0 -- need to wakeup ~DBImpl
|
2017-04-06 02:14:05 +02:00
|
|
|
// * HasPendingManualCompaction -- need to wakeup RunManualCompaction
|
|
|
|
// If none of this is true, there is no need to signal since nobody is
|
|
|
|
// waiting for it
|
|
|
|
bg_cv_.SignalAll();
|
|
|
|
}
|
|
|
|
// IMPORTANT: there should be no code after calling SignalAll. This call may
|
|
|
|
// signal the DB destructor that it's OK to proceed with destruction. In
|
|
|
|
// that case, all DB variables will be dealloacated and referencing them
|
|
|
|
// will cause trouble.
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Status DBImpl::BackgroundCompaction(bool* made_progress,
|
|
|
|
JobContext* job_context,
|
2017-08-04 00:36:28 +02:00
|
|
|
LogBuffer* log_buffer,
|
2019-03-20 01:24:09 +01:00
|
|
|
PrepickedCompaction* prepicked_compaction,
|
|
|
|
Env::Priority thread_pri) {
|
2022-03-13 05:07:04 +01:00
|
|
|
std::shared_ptr<ManualCompactionState> manual_compaction =
|
2017-08-04 00:36:28 +02:00
|
|
|
prepicked_compaction == nullptr
|
|
|
|
? nullptr
|
|
|
|
: prepicked_compaction->manual_compaction_state;
|
2017-04-06 02:14:05 +02:00
|
|
|
*made_progress = false;
|
|
|
|
mutex_.AssertHeld();
|
|
|
|
TEST_SYNC_POINT("DBImpl::BackgroundCompaction:Start");
|
|
|
|
|
|
|
|
bool is_manual = (manual_compaction != nullptr);
|
2018-11-09 20:17:34 +01:00
|
|
|
std::unique_ptr<Compaction> c;
|
2017-08-04 00:36:28 +02:00
|
|
|
if (prepicked_compaction != nullptr &&
|
|
|
|
prepicked_compaction->compaction != nullptr) {
|
|
|
|
c.reset(prepicked_compaction->compaction);
|
|
|
|
}
|
|
|
|
bool is_prepicked = is_manual || c;
|
2017-04-06 02:14:05 +02:00
|
|
|
|
|
|
|
// (manual_compaction->in_progress == false);
|
|
|
|
bool trivial_move_disallowed =
|
|
|
|
is_manual && manual_compaction->disallow_trivial_move;
|
|
|
|
|
|
|
|
CompactionJobStats compaction_job_stats;
|
2018-06-28 21:23:57 +02:00
|
|
|
Status status;
|
|
|
|
if (!error_handler_.IsBGWorkStopped()) {
|
|
|
|
if (shutting_down_.load(std::memory_order_acquire)) {
|
|
|
|
status = Status::ShutdownInProgress();
|
2019-09-17 06:00:13 +02:00
|
|
|
} else if (is_manual &&
|
2020-08-14 20:28:12 +02:00
|
|
|
manual_compaction_paused_.load(std::memory_order_acquire) > 0) {
|
2019-09-17 06:00:13 +02:00
|
|
|
status = Status::Incomplete(Status::SubCode::kManualCompactionPaused);
|
2021-06-07 20:40:31 +02:00
|
|
|
} else if (is_manual && manual_compaction->canceled &&
|
|
|
|
manual_compaction->canceled->load(std::memory_order_acquire)) {
|
|
|
|
status = Status::Incomplete(Status::SubCode::kManualCompactionPaused);
|
2018-06-28 21:23:57 +02:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
status = error_handler_.GetBGError();
|
Auto recovery from out of space errors (#4164)
Summary:
This commit implements automatic recovery from a Status::NoSpace() error
during background operations such as write callback, flush and
compaction. The broad design is as follows -
1. Compaction errors are treated as soft errors and don't put the
database in read-only mode. A compaction is delayed until enough free
disk space is available to accomodate the compaction outputs, which is
estimated based on the input size. This means that users can continue to
write, and we rely on the WriteController to delay or stop writes if the
compaction debt becomes too high due to persistent low disk space
condition
2. Errors during write callback and flush are treated as hard errors,
i.e the database is put in read-only mode and goes back to read-write
only fater certain recovery actions are taken.
3. Both types of recovery rely on the SstFileManagerImpl to poll for
sufficient disk space. We assume that there is a 1-1 mapping between an
SFM and the underlying OS storage container. For cases where multiple
DBs are hosted on a single storage container, the user is expected to
allocate a single SFM instance and use the same one for all the DBs. If
no SFM is specified by the user, DBImpl::Open() will allocate one, but
this will be one per DB and each DB will recover independently. The
recovery implemented by SFM is as follows -
a) On the first occurance of an out of space error during compaction,
subsequent
compactions will be delayed until the disk free space check indicates
enough available space. The required space is computed as the sum of
input sizes.
b) The free space check requirement will be removed once the amount of
free space is greater than the size reserved by in progress
compactions when the first error occured
c) If the out of space error is a hard error, a background thread in
SFM will poll for sufficient headroom before triggering the recovery
of the database and putting it in write-only mode. The headroom is
calculated as the sum of the write_buffer_size of all the DB instances
associated with the SFM
4. EventListener callbacks will be called at the start and completion of
automatic recovery. Users can disable the auto recov ery in the start
callback, and later initiate it manually by calling DB::Resume()
Todo:
1. More extensive testing
2. Add disk full condition to db_stress (follow-on PR)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4164
Differential Revision: D9846378
Pulled By: anand1976
fbshipit-source-id: 80ea875dbd7f00205e19c82215ff6e37da10da4a
2018-09-15 22:36:19 +02:00
|
|
|
// If we get here, it means a hard error happened after this compaction
|
|
|
|
// was scheduled by MaybeScheduleFlushOrCompaction(), but before it got
|
|
|
|
// a chance to execute. Since we didn't pop a cfd from the compaction
|
|
|
|
// queue, increment unscheduled_compactions_
|
|
|
|
unscheduled_compactions_++;
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!status.ok()) {
|
|
|
|
if (is_manual) {
|
|
|
|
manual_compaction->status = status;
|
2022-03-13 05:07:04 +01:00
|
|
|
manual_compaction->status
|
|
|
|
.PermitUncheckedError(); // the manual compaction thread may exit
|
|
|
|
// first, which won't be able to check the
|
|
|
|
// status
|
2017-04-06 02:14:05 +02:00
|
|
|
manual_compaction->done = true;
|
|
|
|
manual_compaction->in_progress = false;
|
|
|
|
manual_compaction = nullptr;
|
|
|
|
}
|
2018-11-01 01:22:23 +01:00
|
|
|
if (c) {
|
|
|
|
c->ReleaseCompactionFiles(status);
|
|
|
|
c.reset();
|
|
|
|
}
|
2017-04-06 02:14:05 +02:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_manual) {
|
|
|
|
// another thread cannot pick up the same work
|
|
|
|
manual_compaction->in_progress = true;
|
|
|
|
}
|
|
|
|
|
Concurrent task limiter for compaction thread control (#4332)
Summary:
The PR is targeting to resolve the issue of:
https://github.com/facebook/rocksdb/issues/3972#issue-330771918
We have a rocksdb created with leveled-compaction with multiple column families (CFs), some of CFs are using HDD to store big and less frequently accessed data and others are using SSD.
When there are continuously write traffics going on to all CFs, the compaction thread pool is mostly occupied by those slow HDD compactions, which blocks fully utilize SSD bandwidth.
Since atomic write and transaction is needed across CFs, so splitting it to multiple rocksdb instance is not an option for us.
With the compaction thread control, we got 30%+ HDD write throughput gain, and also a lot smooth SSD write since less write stall happening.
ConcurrentTaskLimiter can be shared with multi-CFs across rocksdb instances, so the feature does not only work for multi-CFs scenarios, but also for multi-rocksdbs scenarios, who need disk IO resource control per tenant.
The usage is straight forward:
e.g.:
//
// Enable compaction thread limiter thru ColumnFamilyOptions
//
std::shared_ptr<ConcurrentTaskLimiter> ctl(NewConcurrentTaskLimiter("foo_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = ctl;
...
//
// Compaction thread limiter can be tuned or disabled on-the-fly
//
ctl->SetMaxOutstandingTask(12); // enlarge to 12 tasks
...
ctl->ResetMaxOutstandingTask(); // disable (bypass) thread limiter
ctl->SetMaxOutstandingTask(-1); // Same as above
...
ctl->SetMaxOutstandingTask(0); // full throttle (0 task)
//
// Sharing compaction thread limiter among CFs (to resolve multiple storage perf issue)
//
std::shared_ptr<ConcurrentTaskLimiter> ctl_ssd(NewConcurrentTaskLimiter("ssd_limiter", 8));
std::shared_ptr<ConcurrentTaskLimiter> ctl_hdd(NewConcurrentTaskLimiter("hdd_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt_ssd1(options);
ColumnFamilyOptions cf_opt_ssd2(options);
ColumnFamilyOptions cf_opt_hdd1(options);
ColumnFamilyOptions cf_opt_hdd2(options);
ColumnFamilyOptions cf_opt_hdd3(options);
// SSD CFs
cf_opt_ssd1.compaction_thread_limiter = ctl_ssd;
cf_opt_ssd2.compaction_thread_limiter = ctl_ssd;
// HDD CFs
cf_opt_hdd1.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd2.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd3.compaction_thread_limiter = ctl_hdd;
...
//
// The limiter is disabled by default (or set to nullptr explicitly)
//
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = nullptr;
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4332
Differential Revision: D13226590
Pulled By: siying
fbshipit-source-id: 14307aec55b8bd59c8223d04aa6db3c03d1b0c1d
2018-12-13 22:16:04 +01:00
|
|
|
std::unique_ptr<TaskLimiterToken> task_token;
|
|
|
|
|
2017-04-06 02:14:05 +02:00
|
|
|
// InternalKey manual_end_storage;
|
|
|
|
// InternalKey* manual_end = &manual_end_storage;
|
2018-04-03 04:53:19 +02:00
|
|
|
bool sfm_reserved_compact_space = false;
|
2017-04-06 02:14:05 +02:00
|
|
|
if (is_manual) {
|
2022-03-13 05:07:04 +01:00
|
|
|
auto m = manual_compaction;
|
2017-04-06 02:14:05 +02:00
|
|
|
assert(m->in_progress);
|
|
|
|
if (!c) {
|
|
|
|
m->done = true;
|
|
|
|
m->manual_end = nullptr;
|
2020-08-17 20:52:23 +02:00
|
|
|
ROCKS_LOG_BUFFER(
|
|
|
|
log_buffer,
|
|
|
|
"[%s] Manual compaction from level-%d from %s .. "
|
|
|
|
"%s; nothing to do\n",
|
|
|
|
m->cfd->GetName().c_str(), m->input_level,
|
|
|
|
(m->begin ? m->begin->DebugString(true).c_str() : "(begin)"),
|
|
|
|
(m->end ? m->end->DebugString(true).c_str() : "(end)"));
|
2017-04-06 02:14:05 +02:00
|
|
|
} else {
|
2018-04-03 04:53:19 +02:00
|
|
|
// First check if we have enough room to do the compaction
|
|
|
|
bool enough_room = EnoughRoomForCompaction(
|
Auto recovery from out of space errors (#4164)
Summary:
This commit implements automatic recovery from a Status::NoSpace() error
during background operations such as write callback, flush and
compaction. The broad design is as follows -
1. Compaction errors are treated as soft errors and don't put the
database in read-only mode. A compaction is delayed until enough free
disk space is available to accomodate the compaction outputs, which is
estimated based on the input size. This means that users can continue to
write, and we rely on the WriteController to delay or stop writes if the
compaction debt becomes too high due to persistent low disk space
condition
2. Errors during write callback and flush are treated as hard errors,
i.e the database is put in read-only mode and goes back to read-write
only fater certain recovery actions are taken.
3. Both types of recovery rely on the SstFileManagerImpl to poll for
sufficient disk space. We assume that there is a 1-1 mapping between an
SFM and the underlying OS storage container. For cases where multiple
DBs are hosted on a single storage container, the user is expected to
allocate a single SFM instance and use the same one for all the DBs. If
no SFM is specified by the user, DBImpl::Open() will allocate one, but
this will be one per DB and each DB will recover independently. The
recovery implemented by SFM is as follows -
a) On the first occurance of an out of space error during compaction,
subsequent
compactions will be delayed until the disk free space check indicates
enough available space. The required space is computed as the sum of
input sizes.
b) The free space check requirement will be removed once the amount of
free space is greater than the size reserved by in progress
compactions when the first error occured
c) If the out of space error is a hard error, a background thread in
SFM will poll for sufficient headroom before triggering the recovery
of the database and putting it in write-only mode. The headroom is
calculated as the sum of the write_buffer_size of all the DB instances
associated with the SFM
4. EventListener callbacks will be called at the start and completion of
automatic recovery. Users can disable the auto recov ery in the start
callback, and later initiate it manually by calling DB::Resume()
Todo:
1. More extensive testing
2. Add disk full condition to db_stress (follow-on PR)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4164
Differential Revision: D9846378
Pulled By: anand1976
fbshipit-source-id: 80ea875dbd7f00205e19c82215ff6e37da10da4a
2018-09-15 22:36:19 +02:00
|
|
|
m->cfd, *(c->inputs()), &sfm_reserved_compact_space, log_buffer);
|
2018-04-03 04:53:19 +02:00
|
|
|
|
|
|
|
if (!enough_room) {
|
|
|
|
// Then don't do the compaction
|
|
|
|
c->ReleaseCompactionFiles(status);
|
|
|
|
c.reset();
|
|
|
|
// m's vars will get set properly at the end of this function,
|
|
|
|
// as long as status == CompactionTooLarge
|
|
|
|
status = Status::CompactionTooLarge();
|
|
|
|
} else {
|
|
|
|
ROCKS_LOG_BUFFER(
|
|
|
|
log_buffer,
|
|
|
|
"[%s] Manual compaction from level-%d to level-%d from %s .. "
|
|
|
|
"%s; will stop at %s\n",
|
|
|
|
m->cfd->GetName().c_str(), m->input_level, c->output_level(),
|
2020-04-07 02:38:59 +02:00
|
|
|
(m->begin ? m->begin->DebugString(true).c_str() : "(begin)"),
|
|
|
|
(m->end ? m->end->DebugString(true).c_str() : "(end)"),
|
2018-04-03 04:53:19 +02:00
|
|
|
((m->done || m->manual_end == nullptr)
|
|
|
|
? "(end)"
|
2020-04-07 02:38:59 +02:00
|
|
|
: m->manual_end->DebugString(true).c_str()));
|
2018-04-03 04:53:19 +02:00
|
|
|
}
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
2017-08-04 00:36:28 +02:00
|
|
|
} else if (!is_prepicked && !compaction_queue_.empty()) {
|
2018-01-16 22:10:34 +01:00
|
|
|
if (HasExclusiveManualCompaction()) {
|
2017-05-03 00:01:07 +02:00
|
|
|
// Can't compact right now, but try again later
|
|
|
|
TEST_SYNC_POINT("DBImpl::BackgroundCompaction()::Conflict");
|
|
|
|
|
2017-05-18 08:03:54 +02:00
|
|
|
// Stay in the compaction queue.
|
2017-05-03 00:01:07 +02:00
|
|
|
unscheduled_compactions_++;
|
|
|
|
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
Concurrent task limiter for compaction thread control (#4332)
Summary:
The PR is targeting to resolve the issue of:
https://github.com/facebook/rocksdb/issues/3972#issue-330771918
We have a rocksdb created with leveled-compaction with multiple column families (CFs), some of CFs are using HDD to store big and less frequently accessed data and others are using SSD.
When there are continuously write traffics going on to all CFs, the compaction thread pool is mostly occupied by those slow HDD compactions, which blocks fully utilize SSD bandwidth.
Since atomic write and transaction is needed across CFs, so splitting it to multiple rocksdb instance is not an option for us.
With the compaction thread control, we got 30%+ HDD write throughput gain, and also a lot smooth SSD write since less write stall happening.
ConcurrentTaskLimiter can be shared with multi-CFs across rocksdb instances, so the feature does not only work for multi-CFs scenarios, but also for multi-rocksdbs scenarios, who need disk IO resource control per tenant.
The usage is straight forward:
e.g.:
//
// Enable compaction thread limiter thru ColumnFamilyOptions
//
std::shared_ptr<ConcurrentTaskLimiter> ctl(NewConcurrentTaskLimiter("foo_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = ctl;
...
//
// Compaction thread limiter can be tuned or disabled on-the-fly
//
ctl->SetMaxOutstandingTask(12); // enlarge to 12 tasks
...
ctl->ResetMaxOutstandingTask(); // disable (bypass) thread limiter
ctl->SetMaxOutstandingTask(-1); // Same as above
...
ctl->SetMaxOutstandingTask(0); // full throttle (0 task)
//
// Sharing compaction thread limiter among CFs (to resolve multiple storage perf issue)
//
std::shared_ptr<ConcurrentTaskLimiter> ctl_ssd(NewConcurrentTaskLimiter("ssd_limiter", 8));
std::shared_ptr<ConcurrentTaskLimiter> ctl_hdd(NewConcurrentTaskLimiter("hdd_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt_ssd1(options);
ColumnFamilyOptions cf_opt_ssd2(options);
ColumnFamilyOptions cf_opt_hdd1(options);
ColumnFamilyOptions cf_opt_hdd2(options);
ColumnFamilyOptions cf_opt_hdd3(options);
// SSD CFs
cf_opt_ssd1.compaction_thread_limiter = ctl_ssd;
cf_opt_ssd2.compaction_thread_limiter = ctl_ssd;
// HDD CFs
cf_opt_hdd1.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd2.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd3.compaction_thread_limiter = ctl_hdd;
...
//
// The limiter is disabled by default (or set to nullptr explicitly)
//
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = nullptr;
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4332
Differential Revision: D13226590
Pulled By: siying
fbshipit-source-id: 14307aec55b8bd59c8223d04aa6db3c03d1b0c1d
2018-12-13 22:16:04 +01:00
|
|
|
auto cfd = PickCompactionFromQueue(&task_token, log_buffer);
|
|
|
|
if (cfd == nullptr) {
|
|
|
|
// Can't find any executable task from the compaction queue.
|
|
|
|
// All tasks have been throttled by compaction thread limiter.
|
|
|
|
++unscheduled_compactions_;
|
|
|
|
return Status::Busy();
|
|
|
|
}
|
|
|
|
|
2017-04-06 02:14:05 +02:00
|
|
|
// We unreference here because the following code will take a Ref() on
|
|
|
|
// this cfd if it is going to use it (Compaction class holds a
|
|
|
|
// reference).
|
|
|
|
// This will all happen under a mutex so we don't have to be afraid of
|
|
|
|
// somebody else deleting it.
|
2019-12-13 04:02:51 +01:00
|
|
|
if (cfd->UnrefAndTryDelete()) {
|
2017-04-06 02:14:05 +02:00
|
|
|
// This was the last reference of the column family, so no need to
|
|
|
|
// compact.
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Pick up latest mutable CF Options and use it throughout the
|
|
|
|
// compaction job
|
|
|
|
// Compaction makes a copy of the latest MutableCFOptions. It should be used
|
|
|
|
// throughout the compaction procedure to make sure consistency. It will
|
|
|
|
// eventually be installed into SuperVersion
|
|
|
|
auto* mutable_cf_options = cfd->GetLatestMutableCFOptions();
|
|
|
|
if (!mutable_cf_options->disable_auto_compactions && !cfd->IsDropped()) {
|
|
|
|
// NOTE: try to avoid unnecessary copy of MutableCFOptions if
|
|
|
|
// compaction is not necessary. Need to make sure mutex is held
|
|
|
|
// until we make a copy in the following code
|
|
|
|
TEST_SYNC_POINT("DBImpl::BackgroundCompaction():BeforePickCompaction");
|
2020-07-23 03:31:25 +02:00
|
|
|
c.reset(cfd->PickCompaction(*mutable_cf_options, mutable_db_options_,
|
|
|
|
log_buffer));
|
2017-04-06 02:14:05 +02:00
|
|
|
TEST_SYNC_POINT("DBImpl::BackgroundCompaction():AfterPickCompaction");
|
2018-03-07 01:13:05 +01:00
|
|
|
|
2017-04-06 02:14:05 +02:00
|
|
|
if (c != nullptr) {
|
2018-04-03 04:53:19 +02:00
|
|
|
bool enough_room = EnoughRoomForCompaction(
|
Auto recovery from out of space errors (#4164)
Summary:
This commit implements automatic recovery from a Status::NoSpace() error
during background operations such as write callback, flush and
compaction. The broad design is as follows -
1. Compaction errors are treated as soft errors and don't put the
database in read-only mode. A compaction is delayed until enough free
disk space is available to accomodate the compaction outputs, which is
estimated based on the input size. This means that users can continue to
write, and we rely on the WriteController to delay or stop writes if the
compaction debt becomes too high due to persistent low disk space
condition
2. Errors during write callback and flush are treated as hard errors,
i.e the database is put in read-only mode and goes back to read-write
only fater certain recovery actions are taken.
3. Both types of recovery rely on the SstFileManagerImpl to poll for
sufficient disk space. We assume that there is a 1-1 mapping between an
SFM and the underlying OS storage container. For cases where multiple
DBs are hosted on a single storage container, the user is expected to
allocate a single SFM instance and use the same one for all the DBs. If
no SFM is specified by the user, DBImpl::Open() will allocate one, but
this will be one per DB and each DB will recover independently. The
recovery implemented by SFM is as follows -
a) On the first occurance of an out of space error during compaction,
subsequent
compactions will be delayed until the disk free space check indicates
enough available space. The required space is computed as the sum of
input sizes.
b) The free space check requirement will be removed once the amount of
free space is greater than the size reserved by in progress
compactions when the first error occured
c) If the out of space error is a hard error, a background thread in
SFM will poll for sufficient headroom before triggering the recovery
of the database and putting it in write-only mode. The headroom is
calculated as the sum of the write_buffer_size of all the DB instances
associated with the SFM
4. EventListener callbacks will be called at the start and completion of
automatic recovery. Users can disable the auto recov ery in the start
callback, and later initiate it manually by calling DB::Resume()
Todo:
1. More extensive testing
2. Add disk full condition to db_stress (follow-on PR)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4164
Differential Revision: D9846378
Pulled By: anand1976
fbshipit-source-id: 80ea875dbd7f00205e19c82215ff6e37da10da4a
2018-09-15 22:36:19 +02:00
|
|
|
cfd, *(c->inputs()), &sfm_reserved_compact_space, log_buffer);
|
2018-04-03 04:53:19 +02:00
|
|
|
|
2018-03-07 01:13:05 +01:00
|
|
|
if (!enough_room) {
|
|
|
|
// Then don't do the compaction
|
|
|
|
c->ReleaseCompactionFiles(status);
|
|
|
|
c->column_family_data()
|
|
|
|
->current()
|
|
|
|
->storage_info()
|
2021-06-17 01:50:43 +02:00
|
|
|
->ComputeCompactionScore(*(c->immutable_options()),
|
2018-03-07 01:13:05 +01:00
|
|
|
*(c->mutable_cf_options()));
|
2017-04-06 02:14:05 +02:00
|
|
|
AddToCompactionQueue(cfd);
|
|
|
|
++unscheduled_compactions_;
|
2018-03-07 01:13:05 +01:00
|
|
|
|
|
|
|
c.reset();
|
|
|
|
// Don't need to sleep here, because BackgroundCallCompaction
|
|
|
|
// will sleep if !s.ok()
|
|
|
|
status = Status::CompactionTooLarge();
|
|
|
|
} else {
|
|
|
|
// update statistics
|
2021-11-01 20:56:25 +01:00
|
|
|
size_t num_files = 0;
|
|
|
|
for (auto& each_level : *c->inputs()) {
|
|
|
|
num_files += each_level.files.size();
|
|
|
|
}
|
|
|
|
RecordInHistogram(stats_, NUM_FILES_IN_SINGLE_COMPACTION, num_files);
|
|
|
|
|
2018-03-07 01:13:05 +01:00
|
|
|
// There are three things that can change compaction score:
|
|
|
|
// 1) When flush or compaction finish. This case is covered by
|
|
|
|
// InstallSuperVersionAndScheduleWork
|
|
|
|
// 2) When MutableCFOptions changes. This case is also covered by
|
|
|
|
// InstallSuperVersionAndScheduleWork, because this is when the new
|
|
|
|
// options take effect.
|
|
|
|
// 3) When we Pick a new compaction, we "remove" those files being
|
|
|
|
// compacted from the calculation, which then influences compaction
|
|
|
|
// score. Here we check if we need the new compaction even without the
|
|
|
|
// files that are currently being compacted. If we need another
|
|
|
|
// compaction, we might be able to execute it in parallel, so we add
|
|
|
|
// it to the queue and schedule a new thread.
|
|
|
|
if (cfd->NeedsCompaction()) {
|
|
|
|
// Yes, we need more compactions!
|
|
|
|
AddToCompactionQueue(cfd);
|
|
|
|
++unscheduled_compactions_;
|
|
|
|
MaybeScheduleFlushOrCompaction();
|
|
|
|
}
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
IOStatus io_s;
|
2017-04-06 02:14:05 +02:00
|
|
|
if (!c) {
|
|
|
|
// Nothing to do
|
|
|
|
ROCKS_LOG_BUFFER(log_buffer, "Compaction nothing to do");
|
|
|
|
} else if (c->deletion_compaction()) {
|
|
|
|
// TODO(icanadi) Do we want to honor snapshots here? i.e. not delete old
|
|
|
|
// file if there is alive snapshot pointing to it
|
Concurrent task limiter for compaction thread control (#4332)
Summary:
The PR is targeting to resolve the issue of:
https://github.com/facebook/rocksdb/issues/3972#issue-330771918
We have a rocksdb created with leveled-compaction with multiple column families (CFs), some of CFs are using HDD to store big and less frequently accessed data and others are using SSD.
When there are continuously write traffics going on to all CFs, the compaction thread pool is mostly occupied by those slow HDD compactions, which blocks fully utilize SSD bandwidth.
Since atomic write and transaction is needed across CFs, so splitting it to multiple rocksdb instance is not an option for us.
With the compaction thread control, we got 30%+ HDD write throughput gain, and also a lot smooth SSD write since less write stall happening.
ConcurrentTaskLimiter can be shared with multi-CFs across rocksdb instances, so the feature does not only work for multi-CFs scenarios, but also for multi-rocksdbs scenarios, who need disk IO resource control per tenant.
The usage is straight forward:
e.g.:
//
// Enable compaction thread limiter thru ColumnFamilyOptions
//
std::shared_ptr<ConcurrentTaskLimiter> ctl(NewConcurrentTaskLimiter("foo_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = ctl;
...
//
// Compaction thread limiter can be tuned or disabled on-the-fly
//
ctl->SetMaxOutstandingTask(12); // enlarge to 12 tasks
...
ctl->ResetMaxOutstandingTask(); // disable (bypass) thread limiter
ctl->SetMaxOutstandingTask(-1); // Same as above
...
ctl->SetMaxOutstandingTask(0); // full throttle (0 task)
//
// Sharing compaction thread limiter among CFs (to resolve multiple storage perf issue)
//
std::shared_ptr<ConcurrentTaskLimiter> ctl_ssd(NewConcurrentTaskLimiter("ssd_limiter", 8));
std::shared_ptr<ConcurrentTaskLimiter> ctl_hdd(NewConcurrentTaskLimiter("hdd_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt_ssd1(options);
ColumnFamilyOptions cf_opt_ssd2(options);
ColumnFamilyOptions cf_opt_hdd1(options);
ColumnFamilyOptions cf_opt_hdd2(options);
ColumnFamilyOptions cf_opt_hdd3(options);
// SSD CFs
cf_opt_ssd1.compaction_thread_limiter = ctl_ssd;
cf_opt_ssd2.compaction_thread_limiter = ctl_ssd;
// HDD CFs
cf_opt_hdd1.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd2.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd3.compaction_thread_limiter = ctl_hdd;
...
//
// The limiter is disabled by default (or set to nullptr explicitly)
//
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = nullptr;
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4332
Differential Revision: D13226590
Pulled By: siying
fbshipit-source-id: 14307aec55b8bd59c8223d04aa6db3c03d1b0c1d
2018-12-13 22:16:04 +01:00
|
|
|
TEST_SYNC_POINT_CALLBACK("DBImpl::BackgroundCompaction:BeforeCompaction",
|
|
|
|
c->column_family_data());
|
2017-04-06 02:14:05 +02:00
|
|
|
assert(c->num_input_files(1) == 0);
|
|
|
|
assert(c->level() == 0);
|
|
|
|
assert(c->column_family_data()->ioptions()->compaction_style ==
|
|
|
|
kCompactionStyleFIFO);
|
|
|
|
|
|
|
|
compaction_job_stats.num_input_files = c->num_input_files(0);
|
|
|
|
|
2018-10-11 02:30:22 +02:00
|
|
|
NotifyOnCompactionBegin(c->column_family_data(), c.get(), status,
|
|
|
|
compaction_job_stats, job_context->job_id);
|
|
|
|
|
2017-04-06 02:14:05 +02:00
|
|
|
for (const auto& f : *c->inputs(0)) {
|
|
|
|
c->edit()->DeleteFile(c->level(), f->fd.GetNumber());
|
|
|
|
}
|
|
|
|
status = versions_->LogAndApply(c->column_family_data(),
|
|
|
|
*c->mutable_cf_options(), c->edit(),
|
|
|
|
&mutex_, directories_.GetDbDir());
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
io_s = versions_->io_status();
|
2019-01-02 18:56:39 +01:00
|
|
|
InstallSuperVersionAndScheduleWork(c->column_family_data(),
|
|
|
|
&job_context->superversion_contexts[0],
|
|
|
|
*c->mutable_cf_options());
|
2017-04-06 02:14:05 +02:00
|
|
|
ROCKS_LOG_BUFFER(log_buffer, "[%s] Deleted %d files\n",
|
|
|
|
c->column_family_data()->GetName().c_str(),
|
|
|
|
c->num_input_files(0));
|
|
|
|
*made_progress = true;
|
Concurrent task limiter for compaction thread control (#4332)
Summary:
The PR is targeting to resolve the issue of:
https://github.com/facebook/rocksdb/issues/3972#issue-330771918
We have a rocksdb created with leveled-compaction with multiple column families (CFs), some of CFs are using HDD to store big and less frequently accessed data and others are using SSD.
When there are continuously write traffics going on to all CFs, the compaction thread pool is mostly occupied by those slow HDD compactions, which blocks fully utilize SSD bandwidth.
Since atomic write and transaction is needed across CFs, so splitting it to multiple rocksdb instance is not an option for us.
With the compaction thread control, we got 30%+ HDD write throughput gain, and also a lot smooth SSD write since less write stall happening.
ConcurrentTaskLimiter can be shared with multi-CFs across rocksdb instances, so the feature does not only work for multi-CFs scenarios, but also for multi-rocksdbs scenarios, who need disk IO resource control per tenant.
The usage is straight forward:
e.g.:
//
// Enable compaction thread limiter thru ColumnFamilyOptions
//
std::shared_ptr<ConcurrentTaskLimiter> ctl(NewConcurrentTaskLimiter("foo_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = ctl;
...
//
// Compaction thread limiter can be tuned or disabled on-the-fly
//
ctl->SetMaxOutstandingTask(12); // enlarge to 12 tasks
...
ctl->ResetMaxOutstandingTask(); // disable (bypass) thread limiter
ctl->SetMaxOutstandingTask(-1); // Same as above
...
ctl->SetMaxOutstandingTask(0); // full throttle (0 task)
//
// Sharing compaction thread limiter among CFs (to resolve multiple storage perf issue)
//
std::shared_ptr<ConcurrentTaskLimiter> ctl_ssd(NewConcurrentTaskLimiter("ssd_limiter", 8));
std::shared_ptr<ConcurrentTaskLimiter> ctl_hdd(NewConcurrentTaskLimiter("hdd_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt_ssd1(options);
ColumnFamilyOptions cf_opt_ssd2(options);
ColumnFamilyOptions cf_opt_hdd1(options);
ColumnFamilyOptions cf_opt_hdd2(options);
ColumnFamilyOptions cf_opt_hdd3(options);
// SSD CFs
cf_opt_ssd1.compaction_thread_limiter = ctl_ssd;
cf_opt_ssd2.compaction_thread_limiter = ctl_ssd;
// HDD CFs
cf_opt_hdd1.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd2.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd3.compaction_thread_limiter = ctl_hdd;
...
//
// The limiter is disabled by default (or set to nullptr explicitly)
//
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = nullptr;
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4332
Differential Revision: D13226590
Pulled By: siying
fbshipit-source-id: 14307aec55b8bd59c8223d04aa6db3c03d1b0c1d
2018-12-13 22:16:04 +01:00
|
|
|
TEST_SYNC_POINT_CALLBACK("DBImpl::BackgroundCompaction:AfterCompaction",
|
|
|
|
c->column_family_data());
|
2017-04-06 02:14:05 +02:00
|
|
|
} else if (!trivial_move_disallowed && c->IsTrivialMove()) {
|
|
|
|
TEST_SYNC_POINT("DBImpl::BackgroundCompaction:TrivialMove");
|
Concurrent task limiter for compaction thread control (#4332)
Summary:
The PR is targeting to resolve the issue of:
https://github.com/facebook/rocksdb/issues/3972#issue-330771918
We have a rocksdb created with leveled-compaction with multiple column families (CFs), some of CFs are using HDD to store big and less frequently accessed data and others are using SSD.
When there are continuously write traffics going on to all CFs, the compaction thread pool is mostly occupied by those slow HDD compactions, which blocks fully utilize SSD bandwidth.
Since atomic write and transaction is needed across CFs, so splitting it to multiple rocksdb instance is not an option for us.
With the compaction thread control, we got 30%+ HDD write throughput gain, and also a lot smooth SSD write since less write stall happening.
ConcurrentTaskLimiter can be shared with multi-CFs across rocksdb instances, so the feature does not only work for multi-CFs scenarios, but also for multi-rocksdbs scenarios, who need disk IO resource control per tenant.
The usage is straight forward:
e.g.:
//
// Enable compaction thread limiter thru ColumnFamilyOptions
//
std::shared_ptr<ConcurrentTaskLimiter> ctl(NewConcurrentTaskLimiter("foo_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = ctl;
...
//
// Compaction thread limiter can be tuned or disabled on-the-fly
//
ctl->SetMaxOutstandingTask(12); // enlarge to 12 tasks
...
ctl->ResetMaxOutstandingTask(); // disable (bypass) thread limiter
ctl->SetMaxOutstandingTask(-1); // Same as above
...
ctl->SetMaxOutstandingTask(0); // full throttle (0 task)
//
// Sharing compaction thread limiter among CFs (to resolve multiple storage perf issue)
//
std::shared_ptr<ConcurrentTaskLimiter> ctl_ssd(NewConcurrentTaskLimiter("ssd_limiter", 8));
std::shared_ptr<ConcurrentTaskLimiter> ctl_hdd(NewConcurrentTaskLimiter("hdd_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt_ssd1(options);
ColumnFamilyOptions cf_opt_ssd2(options);
ColumnFamilyOptions cf_opt_hdd1(options);
ColumnFamilyOptions cf_opt_hdd2(options);
ColumnFamilyOptions cf_opt_hdd3(options);
// SSD CFs
cf_opt_ssd1.compaction_thread_limiter = ctl_ssd;
cf_opt_ssd2.compaction_thread_limiter = ctl_ssd;
// HDD CFs
cf_opt_hdd1.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd2.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd3.compaction_thread_limiter = ctl_hdd;
...
//
// The limiter is disabled by default (or set to nullptr explicitly)
//
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = nullptr;
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4332
Differential Revision: D13226590
Pulled By: siying
fbshipit-source-id: 14307aec55b8bd59c8223d04aa6db3c03d1b0c1d
2018-12-13 22:16:04 +01:00
|
|
|
TEST_SYNC_POINT_CALLBACK("DBImpl::BackgroundCompaction:BeforeCompaction",
|
|
|
|
c->column_family_data());
|
2017-04-06 02:14:05 +02:00
|
|
|
// Instrument for event update
|
|
|
|
// TODO(yhchiang): add op details for showing trivial-move.
|
|
|
|
ThreadStatusUtil::SetColumnFamily(
|
|
|
|
c->column_family_data(), c->column_family_data()->ioptions()->env,
|
|
|
|
immutable_db_options_.enable_thread_tracking);
|
|
|
|
ThreadStatusUtil::SetThreadOperation(ThreadStatus::OP_COMPACTION);
|
|
|
|
|
|
|
|
compaction_job_stats.num_input_files = c->num_input_files(0);
|
|
|
|
|
2018-10-11 02:30:22 +02:00
|
|
|
NotifyOnCompactionBegin(c->column_family_data(), c.get(), status,
|
|
|
|
compaction_job_stats, job_context->job_id);
|
|
|
|
|
2017-04-06 02:14:05 +02:00
|
|
|
// Move files to next level
|
|
|
|
int32_t moved_files = 0;
|
|
|
|
int64_t moved_bytes = 0;
|
|
|
|
for (unsigned int l = 0; l < c->num_input_levels(); l++) {
|
|
|
|
if (c->level(l) == c->output_level()) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
for (size_t i = 0; i < c->num_input_files(l); i++) {
|
|
|
|
FileMetaData* f = c->input(l, i);
|
|
|
|
c->edit()->DeleteFile(c->level(l), f->fd.GetNumber());
|
2021-11-10 19:47:53 +01:00
|
|
|
c->edit()->AddFile(
|
|
|
|
c->output_level(), f->fd.GetNumber(), f->fd.GetPathId(),
|
|
|
|
f->fd.GetFileSize(), f->smallest, f->largest, f->fd.smallest_seqno,
|
2021-12-03 23:42:05 +01:00
|
|
|
f->fd.largest_seqno, f->marked_for_compaction, f->temperature,
|
2021-11-10 19:47:53 +01:00
|
|
|
f->oldest_blob_file_number, f->oldest_ancester_time,
|
|
|
|
f->file_creation_time, f->file_checksum, f->file_checksum_func_name,
|
|
|
|
f->min_timestamp, f->max_timestamp);
|
2017-04-06 02:14:05 +02:00
|
|
|
|
2018-04-13 02:55:14 +02:00
|
|
|
ROCKS_LOG_BUFFER(
|
|
|
|
log_buffer,
|
|
|
|
"[%s] Moving #%" PRIu64 " to level-%d %" PRIu64 " bytes\n",
|
|
|
|
c->column_family_data()->GetName().c_str(), f->fd.GetNumber(),
|
|
|
|
c->output_level(), f->fd.GetFileSize());
|
2017-04-06 02:14:05 +02:00
|
|
|
++moved_files;
|
|
|
|
moved_bytes += f->fd.GetFileSize();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
status = versions_->LogAndApply(c->column_family_data(),
|
|
|
|
*c->mutable_cf_options(), c->edit(),
|
|
|
|
&mutex_, directories_.GetDbDir());
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
io_s = versions_->io_status();
|
2017-04-06 02:14:05 +02:00
|
|
|
// Use latest MutableCFOptions
|
2019-01-02 18:56:39 +01:00
|
|
|
InstallSuperVersionAndScheduleWork(c->column_family_data(),
|
|
|
|
&job_context->superversion_contexts[0],
|
|
|
|
*c->mutable_cf_options());
|
2017-04-06 02:14:05 +02:00
|
|
|
|
|
|
|
VersionStorageInfo::LevelSummaryStorage tmp;
|
|
|
|
c->column_family_data()->internal_stats()->IncBytesMoved(c->output_level(),
|
|
|
|
moved_bytes);
|
|
|
|
{
|
|
|
|
event_logger_.LogToBuffer(log_buffer)
|
|
|
|
<< "job" << job_context->job_id << "event"
|
|
|
|
<< "trivial_move"
|
|
|
|
<< "destination_level" << c->output_level() << "files" << moved_files
|
|
|
|
<< "total_files_size" << moved_bytes;
|
|
|
|
}
|
|
|
|
ROCKS_LOG_BUFFER(
|
|
|
|
log_buffer,
|
|
|
|
"[%s] Moved #%d files to level-%d %" PRIu64 " bytes %s: %s\n",
|
|
|
|
c->column_family_data()->GetName().c_str(), moved_files,
|
|
|
|
c->output_level(), moved_bytes, status.ToString().c_str(),
|
|
|
|
c->column_family_data()->current()->storage_info()->LevelSummary(&tmp));
|
|
|
|
*made_progress = true;
|
|
|
|
|
|
|
|
// Clear Instrument
|
|
|
|
ThreadStatusUtil::ResetThreadStatus();
|
Concurrent task limiter for compaction thread control (#4332)
Summary:
The PR is targeting to resolve the issue of:
https://github.com/facebook/rocksdb/issues/3972#issue-330771918
We have a rocksdb created with leveled-compaction with multiple column families (CFs), some of CFs are using HDD to store big and less frequently accessed data and others are using SSD.
When there are continuously write traffics going on to all CFs, the compaction thread pool is mostly occupied by those slow HDD compactions, which blocks fully utilize SSD bandwidth.
Since atomic write and transaction is needed across CFs, so splitting it to multiple rocksdb instance is not an option for us.
With the compaction thread control, we got 30%+ HDD write throughput gain, and also a lot smooth SSD write since less write stall happening.
ConcurrentTaskLimiter can be shared with multi-CFs across rocksdb instances, so the feature does not only work for multi-CFs scenarios, but also for multi-rocksdbs scenarios, who need disk IO resource control per tenant.
The usage is straight forward:
e.g.:
//
// Enable compaction thread limiter thru ColumnFamilyOptions
//
std::shared_ptr<ConcurrentTaskLimiter> ctl(NewConcurrentTaskLimiter("foo_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = ctl;
...
//
// Compaction thread limiter can be tuned or disabled on-the-fly
//
ctl->SetMaxOutstandingTask(12); // enlarge to 12 tasks
...
ctl->ResetMaxOutstandingTask(); // disable (bypass) thread limiter
ctl->SetMaxOutstandingTask(-1); // Same as above
...
ctl->SetMaxOutstandingTask(0); // full throttle (0 task)
//
// Sharing compaction thread limiter among CFs (to resolve multiple storage perf issue)
//
std::shared_ptr<ConcurrentTaskLimiter> ctl_ssd(NewConcurrentTaskLimiter("ssd_limiter", 8));
std::shared_ptr<ConcurrentTaskLimiter> ctl_hdd(NewConcurrentTaskLimiter("hdd_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt_ssd1(options);
ColumnFamilyOptions cf_opt_ssd2(options);
ColumnFamilyOptions cf_opt_hdd1(options);
ColumnFamilyOptions cf_opt_hdd2(options);
ColumnFamilyOptions cf_opt_hdd3(options);
// SSD CFs
cf_opt_ssd1.compaction_thread_limiter = ctl_ssd;
cf_opt_ssd2.compaction_thread_limiter = ctl_ssd;
// HDD CFs
cf_opt_hdd1.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd2.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd3.compaction_thread_limiter = ctl_hdd;
...
//
// The limiter is disabled by default (or set to nullptr explicitly)
//
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = nullptr;
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4332
Differential Revision: D13226590
Pulled By: siying
fbshipit-source-id: 14307aec55b8bd59c8223d04aa6db3c03d1b0c1d
2018-12-13 22:16:04 +01:00
|
|
|
TEST_SYNC_POINT_CALLBACK("DBImpl::BackgroundCompaction:AfterCompaction",
|
|
|
|
c->column_family_data());
|
2018-05-14 23:44:04 +02:00
|
|
|
} else if (!is_prepicked && c->output_level() > 0 &&
|
2017-08-04 00:36:28 +02:00
|
|
|
c->output_level() ==
|
|
|
|
c->column_family_data()
|
|
|
|
->current()
|
|
|
|
->storage_info()
|
|
|
|
->MaxOutputLevel(
|
|
|
|
immutable_db_options_.allow_ingest_behind) &&
|
|
|
|
env_->GetBackgroundThreads(Env::Priority::BOTTOM) > 0) {
|
2018-05-14 23:44:04 +02:00
|
|
|
// Forward compactions involving last level to the bottom pool if it exists,
|
|
|
|
// such that compactions unlikely to contribute to write stalls can be
|
|
|
|
// delayed or deprioritized.
|
2017-08-04 00:36:28 +02:00
|
|
|
TEST_SYNC_POINT("DBImpl::BackgroundCompaction:ForwardToBottomPriPool");
|
|
|
|
CompactionArg* ca = new CompactionArg;
|
|
|
|
ca->db = this;
|
Fix possible hang issue in ~DBImpl() when flush is scheduled in LOW pool (#8125)
Summary:
In DBImpl::CloseHelper, we wait for bg_compaction_scheduled_
and bg_flush_scheduled_ to drop to 0. Unschedule is called prior
to cancel any unscheduled flushes/compactions. It is assumed that
anything in the high priority is a flush, and anything in the low
priority pool is a compaction. This assumption, however, is broken when
the high-pri pool is full.
As a result, bg_compaction_scheduled_ can go < 0 and bg_flush_scheduled_
will remain > 0 and DB can be in hang state.
The fix is, we decrement the `bg_{flush,compaction,bottom_compaction}_scheduled_`
inside the `Unschedule{Flush,Compaction,BottomCompaction}Callback()`s. DB
`mutex_` will make the counts atomic in `Unschedule`.
Related discussion: https://github.com/facebook/rocksdb/issues/7928
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8125
Test Plan: Added new test case which hangs without the fix.
Reviewed By: jay-zhuang
Differential Revision: D27390043
Pulled By: ajkr
fbshipit-source-id: 78a367fba9a59ac5607ad24bd1c46dc16d5ec110
2021-03-31 03:34:11 +02:00
|
|
|
ca->compaction_pri_ = Env::Priority::BOTTOM;
|
2017-08-04 00:36:28 +02:00
|
|
|
ca->prepicked_compaction = new PrepickedCompaction;
|
|
|
|
ca->prepicked_compaction->compaction = c.release();
|
|
|
|
ca->prepicked_compaction->manual_compaction_state = nullptr;
|
Concurrent task limiter for compaction thread control (#4332)
Summary:
The PR is targeting to resolve the issue of:
https://github.com/facebook/rocksdb/issues/3972#issue-330771918
We have a rocksdb created with leveled-compaction with multiple column families (CFs), some of CFs are using HDD to store big and less frequently accessed data and others are using SSD.
When there are continuously write traffics going on to all CFs, the compaction thread pool is mostly occupied by those slow HDD compactions, which blocks fully utilize SSD bandwidth.
Since atomic write and transaction is needed across CFs, so splitting it to multiple rocksdb instance is not an option for us.
With the compaction thread control, we got 30%+ HDD write throughput gain, and also a lot smooth SSD write since less write stall happening.
ConcurrentTaskLimiter can be shared with multi-CFs across rocksdb instances, so the feature does not only work for multi-CFs scenarios, but also for multi-rocksdbs scenarios, who need disk IO resource control per tenant.
The usage is straight forward:
e.g.:
//
// Enable compaction thread limiter thru ColumnFamilyOptions
//
std::shared_ptr<ConcurrentTaskLimiter> ctl(NewConcurrentTaskLimiter("foo_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = ctl;
...
//
// Compaction thread limiter can be tuned or disabled on-the-fly
//
ctl->SetMaxOutstandingTask(12); // enlarge to 12 tasks
...
ctl->ResetMaxOutstandingTask(); // disable (bypass) thread limiter
ctl->SetMaxOutstandingTask(-1); // Same as above
...
ctl->SetMaxOutstandingTask(0); // full throttle (0 task)
//
// Sharing compaction thread limiter among CFs (to resolve multiple storage perf issue)
//
std::shared_ptr<ConcurrentTaskLimiter> ctl_ssd(NewConcurrentTaskLimiter("ssd_limiter", 8));
std::shared_ptr<ConcurrentTaskLimiter> ctl_hdd(NewConcurrentTaskLimiter("hdd_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt_ssd1(options);
ColumnFamilyOptions cf_opt_ssd2(options);
ColumnFamilyOptions cf_opt_hdd1(options);
ColumnFamilyOptions cf_opt_hdd2(options);
ColumnFamilyOptions cf_opt_hdd3(options);
// SSD CFs
cf_opt_ssd1.compaction_thread_limiter = ctl_ssd;
cf_opt_ssd2.compaction_thread_limiter = ctl_ssd;
// HDD CFs
cf_opt_hdd1.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd2.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd3.compaction_thread_limiter = ctl_hdd;
...
//
// The limiter is disabled by default (or set to nullptr explicitly)
//
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = nullptr;
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4332
Differential Revision: D13226590
Pulled By: siying
fbshipit-source-id: 14307aec55b8bd59c8223d04aa6db3c03d1b0c1d
2018-12-13 22:16:04 +01:00
|
|
|
// Transfer requested token, so it doesn't need to do it again.
|
|
|
|
ca->prepicked_compaction->task_token = std::move(task_token);
|
2017-08-04 00:36:28 +02:00
|
|
|
++bg_bottom_compaction_scheduled_;
|
|
|
|
env_->Schedule(&DBImpl::BGWorkBottomCompaction, ca, Env::Priority::BOTTOM,
|
2019-03-20 01:24:09 +01:00
|
|
|
this, &DBImpl::UnscheduleCompactionCallback);
|
2017-04-06 02:14:05 +02:00
|
|
|
} else {
|
Concurrent task limiter for compaction thread control (#4332)
Summary:
The PR is targeting to resolve the issue of:
https://github.com/facebook/rocksdb/issues/3972#issue-330771918
We have a rocksdb created with leveled-compaction with multiple column families (CFs), some of CFs are using HDD to store big and less frequently accessed data and others are using SSD.
When there are continuously write traffics going on to all CFs, the compaction thread pool is mostly occupied by those slow HDD compactions, which blocks fully utilize SSD bandwidth.
Since atomic write and transaction is needed across CFs, so splitting it to multiple rocksdb instance is not an option for us.
With the compaction thread control, we got 30%+ HDD write throughput gain, and also a lot smooth SSD write since less write stall happening.
ConcurrentTaskLimiter can be shared with multi-CFs across rocksdb instances, so the feature does not only work for multi-CFs scenarios, but also for multi-rocksdbs scenarios, who need disk IO resource control per tenant.
The usage is straight forward:
e.g.:
//
// Enable compaction thread limiter thru ColumnFamilyOptions
//
std::shared_ptr<ConcurrentTaskLimiter> ctl(NewConcurrentTaskLimiter("foo_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = ctl;
...
//
// Compaction thread limiter can be tuned or disabled on-the-fly
//
ctl->SetMaxOutstandingTask(12); // enlarge to 12 tasks
...
ctl->ResetMaxOutstandingTask(); // disable (bypass) thread limiter
ctl->SetMaxOutstandingTask(-1); // Same as above
...
ctl->SetMaxOutstandingTask(0); // full throttle (0 task)
//
// Sharing compaction thread limiter among CFs (to resolve multiple storage perf issue)
//
std::shared_ptr<ConcurrentTaskLimiter> ctl_ssd(NewConcurrentTaskLimiter("ssd_limiter", 8));
std::shared_ptr<ConcurrentTaskLimiter> ctl_hdd(NewConcurrentTaskLimiter("hdd_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt_ssd1(options);
ColumnFamilyOptions cf_opt_ssd2(options);
ColumnFamilyOptions cf_opt_hdd1(options);
ColumnFamilyOptions cf_opt_hdd2(options);
ColumnFamilyOptions cf_opt_hdd3(options);
// SSD CFs
cf_opt_ssd1.compaction_thread_limiter = ctl_ssd;
cf_opt_ssd2.compaction_thread_limiter = ctl_ssd;
// HDD CFs
cf_opt_hdd1.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd2.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd3.compaction_thread_limiter = ctl_hdd;
...
//
// The limiter is disabled by default (or set to nullptr explicitly)
//
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = nullptr;
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4332
Differential Revision: D13226590
Pulled By: siying
fbshipit-source-id: 14307aec55b8bd59c8223d04aa6db3c03d1b0c1d
2018-12-13 22:16:04 +01:00
|
|
|
TEST_SYNC_POINT_CALLBACK("DBImpl::BackgroundCompaction:BeforeCompaction",
|
|
|
|
c->column_family_data());
|
2018-04-13 02:55:14 +02:00
|
|
|
int output_level __attribute__((__unused__));
|
2017-10-23 23:20:53 +02:00
|
|
|
output_level = c->output_level();
|
2017-04-06 02:14:05 +02:00
|
|
|
TEST_SYNC_POINT_CALLBACK("DBImpl::BackgroundCompaction:NonTrivial",
|
|
|
|
&output_level);
|
2019-01-16 06:32:15 +01:00
|
|
|
std::vector<SequenceNumber> snapshot_seqs;
|
2017-04-06 02:14:05 +02:00
|
|
|
SequenceNumber earliest_write_conflict_snapshot;
|
2019-01-16 06:32:15 +01:00
|
|
|
SnapshotChecker* snapshot_checker;
|
|
|
|
GetSnapshotContext(job_context, &snapshot_seqs,
|
|
|
|
&earliest_write_conflict_snapshot, &snapshot_checker);
|
2017-04-06 02:14:05 +02:00
|
|
|
assert(is_snapshot_supported_ || snapshots_.empty());
|
|
|
|
CompactionJob compaction_job(
|
2017-09-28 02:37:08 +02:00
|
|
|
job_context->job_id, c.get(), immutable_db_options_,
|
2021-05-20 06:40:43 +02:00
|
|
|
mutable_db_options_, file_options_for_compaction_, versions_.get(),
|
|
|
|
&shutting_down_, preserve_deletes_seqnum_.load(), log_buffer,
|
|
|
|
directories_.GetDbDir(),
|
2020-10-26 21:50:03 +01:00
|
|
|
GetDataDir(c->column_family_data(), c->output_path_id()),
|
|
|
|
GetDataDir(c->column_family_data(), 0), stats_, &mutex_,
|
|
|
|
&error_handler_, snapshot_seqs, earliest_write_conflict_snapshot,
|
|
|
|
snapshot_checker, table_cache_, &event_logger_,
|
|
|
|
c->mutable_cf_options()->paranoid_file_checks,
|
2017-04-06 02:14:05 +02:00
|
|
|
c->mutable_cf_options()->report_bg_io_stats, dbname_,
|
2020-08-13 02:28:10 +02:00
|
|
|
&compaction_job_stats, thread_pri, io_tracer_,
|
2021-06-07 20:40:31 +02:00
|
|
|
is_manual ? &manual_compaction_paused_ : nullptr,
|
|
|
|
is_manual ? manual_compaction->canceled : nullptr, db_id_,
|
2021-08-20 20:37:53 +02:00
|
|
|
db_session_id_, c->column_family_data()->GetFullHistoryTsLow(),
|
2022-03-12 01:13:23 +01:00
|
|
|
c->trim_ts(), &blob_callback_);
|
2017-04-06 02:14:05 +02:00
|
|
|
compaction_job.Prepare();
|
|
|
|
|
2018-10-11 02:30:22 +02:00
|
|
|
NotifyOnCompactionBegin(c->column_family_data(), c.get(), status,
|
|
|
|
compaction_job_stats, job_context->job_id);
|
2017-04-06 02:14:05 +02:00
|
|
|
mutex_.Unlock();
|
2019-06-04 07:37:40 +02:00
|
|
|
TEST_SYNC_POINT_CALLBACK(
|
|
|
|
"DBImpl::BackgroundCompaction:NonTrivial:BeforeRun", nullptr);
|
2020-09-29 18:47:33 +02:00
|
|
|
// Should handle erorr?
|
|
|
|
compaction_job.Run().PermitUncheckedError();
|
2017-04-06 02:14:05 +02:00
|
|
|
TEST_SYNC_POINT("DBImpl::BackgroundCompaction:NonTrivial:AfterRun");
|
|
|
|
mutex_.Lock();
|
|
|
|
|
|
|
|
status = compaction_job.Install(*c->mutable_cf_options());
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
io_s = compaction_job.io_status();
|
2017-04-06 02:14:05 +02:00
|
|
|
if (status.ok()) {
|
2019-01-02 18:56:39 +01:00
|
|
|
InstallSuperVersionAndScheduleWork(c->column_family_data(),
|
|
|
|
&job_context->superversion_contexts[0],
|
|
|
|
*c->mutable_cf_options());
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
*made_progress = true;
|
Concurrent task limiter for compaction thread control (#4332)
Summary:
The PR is targeting to resolve the issue of:
https://github.com/facebook/rocksdb/issues/3972#issue-330771918
We have a rocksdb created with leveled-compaction with multiple column families (CFs), some of CFs are using HDD to store big and less frequently accessed data and others are using SSD.
When there are continuously write traffics going on to all CFs, the compaction thread pool is mostly occupied by those slow HDD compactions, which blocks fully utilize SSD bandwidth.
Since atomic write and transaction is needed across CFs, so splitting it to multiple rocksdb instance is not an option for us.
With the compaction thread control, we got 30%+ HDD write throughput gain, and also a lot smooth SSD write since less write stall happening.
ConcurrentTaskLimiter can be shared with multi-CFs across rocksdb instances, so the feature does not only work for multi-CFs scenarios, but also for multi-rocksdbs scenarios, who need disk IO resource control per tenant.
The usage is straight forward:
e.g.:
//
// Enable compaction thread limiter thru ColumnFamilyOptions
//
std::shared_ptr<ConcurrentTaskLimiter> ctl(NewConcurrentTaskLimiter("foo_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = ctl;
...
//
// Compaction thread limiter can be tuned or disabled on-the-fly
//
ctl->SetMaxOutstandingTask(12); // enlarge to 12 tasks
...
ctl->ResetMaxOutstandingTask(); // disable (bypass) thread limiter
ctl->SetMaxOutstandingTask(-1); // Same as above
...
ctl->SetMaxOutstandingTask(0); // full throttle (0 task)
//
// Sharing compaction thread limiter among CFs (to resolve multiple storage perf issue)
//
std::shared_ptr<ConcurrentTaskLimiter> ctl_ssd(NewConcurrentTaskLimiter("ssd_limiter", 8));
std::shared_ptr<ConcurrentTaskLimiter> ctl_hdd(NewConcurrentTaskLimiter("hdd_limiter", 4));
Options options;
ColumnFamilyOptions cf_opt_ssd1(options);
ColumnFamilyOptions cf_opt_ssd2(options);
ColumnFamilyOptions cf_opt_hdd1(options);
ColumnFamilyOptions cf_opt_hdd2(options);
ColumnFamilyOptions cf_opt_hdd3(options);
// SSD CFs
cf_opt_ssd1.compaction_thread_limiter = ctl_ssd;
cf_opt_ssd2.compaction_thread_limiter = ctl_ssd;
// HDD CFs
cf_opt_hdd1.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd2.compaction_thread_limiter = ctl_hdd;
cf_opt_hdd3.compaction_thread_limiter = ctl_hdd;
...
//
// The limiter is disabled by default (or set to nullptr explicitly)
//
Options options;
ColumnFamilyOptions cf_opt(options);
cf_opt.compaction_thread_limiter = nullptr;
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4332
Differential Revision: D13226590
Pulled By: siying
fbshipit-source-id: 14307aec55b8bd59c8223d04aa6db3c03d1b0c1d
2018-12-13 22:16:04 +01:00
|
|
|
TEST_SYNC_POINT_CALLBACK("DBImpl::BackgroundCompaction:AfterCompaction",
|
|
|
|
c->column_family_data());
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
|
|
|
|
if (status.ok() && !io_s.ok()) {
|
|
|
|
status = io_s;
|
2020-09-29 18:47:33 +02:00
|
|
|
} else {
|
|
|
|
io_s.PermitUncheckedError();
|
Pass IOStatus to write path and set retryable IO Error as hard error in BG jobs (#6487)
Summary:
In the current code base, we use Status to get and store the returned status from the call. Specifically, for IO related functions, the current Status cannot reflect the IO Error details such as error scope, error retryable attribute, and others. With the implementation of https://github.com/facebook/rocksdb/issues/5761, we have the new Wrapper for IO, which returns IOStatus instead of Status. However, the IOStatus is purged at the lower level of write path and transferred to Status.
The first job of this PR is to pass the IOStatus to the write path (flush, WAL write, and Compaction). The second job is to identify the Retryable IO Error as HardError, and set the bg_error_ as HardError. In this case, the DB Instance becomes read only. User is informed of the Status and need to take actions to deal with it (e.g., call db->Resume()).
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6487
Test Plan: Added the testing case to error_handler_fs_test. Pass make asan_check
Reviewed By: anand1976
Differential Revision: D20685017
Pulled By: zhichao-cao
fbshipit-source-id: ff85f042896243abcd6ef37877834e26f36b6eb0
2020-03-28 00:03:05 +01:00
|
|
|
}
|
|
|
|
|
2017-04-06 02:14:05 +02:00
|
|
|
if (c != nullptr) {
|
|
|
|
c->ReleaseCompactionFiles(status);
|
|
|
|
*made_progress = true;
|
2018-03-07 01:13:05 +01:00
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
// Need to make sure SstFileManager does its bookkeeping
|
|
|
|
auto sfm = static_cast<SstFileManagerImpl*>(
|
|
|
|
immutable_db_options_.sst_file_manager.get());
|
2018-04-03 04:53:19 +02:00
|
|
|
if (sfm && sfm_reserved_compact_space) {
|
2018-03-07 01:13:05 +01:00
|
|
|
sfm->OnCompactionCompletion(c.get());
|
|
|
|
}
|
|
|
|
#endif // ROCKSDB_LITE
|
|
|
|
|
2018-04-13 02:55:14 +02:00
|
|
|
NotifyOnCompactionCompleted(c->column_family_data(), c.get(), status,
|
|
|
|
compaction_job_stats, job_context->job_id);
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
|
2019-09-17 06:00:13 +02:00
|
|
|
if (status.ok() || status.IsCompactionTooLarge() ||
|
|
|
|
status.IsManualCompactionPaused()) {
|
2017-04-06 02:14:05 +02:00
|
|
|
// Done
|
2019-06-04 07:37:40 +02:00
|
|
|
} else if (status.IsColumnFamilyDropped() || status.IsShutdownInProgress()) {
|
2017-04-06 02:14:05 +02:00
|
|
|
// Ignore compaction errors found during shutting down
|
|
|
|
} else {
|
|
|
|
ROCKS_LOG_WARN(immutable_db_options_.info_log, "Compaction error: %s",
|
|
|
|
status.ToString().c_str());
|
2020-03-29 04:05:54 +02:00
|
|
|
if (!io_s.ok()) {
|
First step towards handling MANIFEST write error (#6949)
Summary:
This PR provides preliminary support for handling IO error during MANIFEST write.
File write/sync is not guaranteed to be atomic. If we encounter an IOError while writing/syncing to the MANIFEST file, we cannot be sure about the state of the MANIFEST file. The version edits may or may not have reached the file. During cleanup, if we delete the newly-generated SST files referenced by the pending version edit(s), but the version edit(s) actually are persistent in the MANIFEST, then next recovery attempt will process the version edits(s) and then fail since the SST files have already been deleted.
One approach is to truncate the MANIFEST after write/sync error, so that it is safe to delete the SST files. However, file truncation may not be supported on certain file systems. Therefore, we take the following approach.
If an IOError is detected during MANIFEST write/sync, we disable file deletions for the faulty database. Depending on whether the IOError is retryable (set by underlying file system), either RocksDB or application can call `DB::Resume()`, or simply shutdown and restart. During `Resume()`, RocksDB will try to switch to a new MANIFEST and write all existing in-memory version storage in the new file. If this succeeds, then RocksDB may proceed. If all recovery is completed, then file deletions will be re-enabled.
Note that multiple threads can call `LogAndApply()` at the same time, though only one of them will be going through the process MANIFEST write, possibly batching the version edits of other threads. When the leading MANIFEST writer finishes, all of the MANIFEST writing threads in this batch will have the same IOError. They will all call `ErrorHandler::SetBGError()` in which file deletion will be disabled.
Possible future directions:
- Add an `ErrorContext` structure so that it is easier to pass more info to `ErrorHandler`. Currently, as in this example, a new `BackgroundErrorReason` has to be added.
Test plan (dev server):
make check
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6949
Reviewed By: anand1976
Differential Revision: D22026020
Pulled By: riversand963
fbshipit-source-id: f3c68a2ef45d9b505d0d625c7c5e0c88495b91c8
2020-06-25 04:05:47 +02:00
|
|
|
// Error while writing to MANIFEST.
|
|
|
|
// In fact, versions_->io_status() can also be the result of renaming
|
|
|
|
// CURRENT file. With current code, it's just difficult to tell. So just
|
|
|
|
// be pessimistic and try write to a new MANIFEST.
|
|
|
|
// TODO: distinguish between MANIFEST write and CURRENT renaming
|
|
|
|
auto err_reason = versions_->io_status().ok()
|
|
|
|
? BackgroundErrorReason::kCompaction
|
|
|
|
: BackgroundErrorReason::kManifestWrite;
|
2020-12-08 05:09:55 +01:00
|
|
|
error_handler_.SetBGError(io_s, err_reason);
|
2020-03-29 04:05:54 +02:00
|
|
|
} else {
|
2020-12-08 05:09:55 +01:00
|
|
|
error_handler_.SetBGError(status, BackgroundErrorReason::kCompaction);
|
2020-03-29 04:05:54 +02:00
|
|
|
}
|
Auto recovery from out of space errors (#4164)
Summary:
This commit implements automatic recovery from a Status::NoSpace() error
during background operations such as write callback, flush and
compaction. The broad design is as follows -
1. Compaction errors are treated as soft errors and don't put the
database in read-only mode. A compaction is delayed until enough free
disk space is available to accomodate the compaction outputs, which is
estimated based on the input size. This means that users can continue to
write, and we rely on the WriteController to delay or stop writes if the
compaction debt becomes too high due to persistent low disk space
condition
2. Errors during write callback and flush are treated as hard errors,
i.e the database is put in read-only mode and goes back to read-write
only fater certain recovery actions are taken.
3. Both types of recovery rely on the SstFileManagerImpl to poll for
sufficient disk space. We assume that there is a 1-1 mapping between an
SFM and the underlying OS storage container. For cases where multiple
DBs are hosted on a single storage container, the user is expected to
allocate a single SFM instance and use the same one for all the DBs. If
no SFM is specified by the user, DBImpl::Open() will allocate one, but
this will be one per DB and each DB will recover independently. The
recovery implemented by SFM is as follows -
a) On the first occurance of an out of space error during compaction,
subsequent
compactions will be delayed until the disk free space check indicates
enough available space. The required space is computed as the sum of
input sizes.
b) The free space check requirement will be removed once the amount of
free space is greater than the size reserved by in progress
compactions when the first error occured
c) If the out of space error is a hard error, a background thread in
SFM will poll for sufficient headroom before triggering the recovery
of the database and putting it in write-only mode. The headroom is
calculated as the sum of the write_buffer_size of all the DB instances
associated with the SFM
4. EventListener callbacks will be called at the start and completion of
automatic recovery. Users can disable the auto recov ery in the start
callback, and later initiate it manually by calling DB::Resume()
Todo:
1. More extensive testing
2. Add disk full condition to db_stress (follow-on PR)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4164
Differential Revision: D9846378
Pulled By: anand1976
fbshipit-source-id: 80ea875dbd7f00205e19c82215ff6e37da10da4a
2018-09-15 22:36:19 +02:00
|
|
|
if (c != nullptr && !is_manual && !error_handler_.IsBGWorkStopped()) {
|
|
|
|
// Put this cfd back in the compaction queue so we can retry after some
|
|
|
|
// time
|
|
|
|
auto cfd = c->column_family_data();
|
|
|
|
assert(cfd != nullptr);
|
|
|
|
// Since this compaction failed, we need to recompute the score so it
|
|
|
|
// takes the original input files into account
|
|
|
|
c->column_family_data()
|
|
|
|
->current()
|
|
|
|
->storage_info()
|
2021-06-17 01:50:43 +02:00
|
|
|
->ComputeCompactionScore(*(c->immutable_options()),
|
Auto recovery from out of space errors (#4164)
Summary:
This commit implements automatic recovery from a Status::NoSpace() error
during background operations such as write callback, flush and
compaction. The broad design is as follows -
1. Compaction errors are treated as soft errors and don't put the
database in read-only mode. A compaction is delayed until enough free
disk space is available to accomodate the compaction outputs, which is
estimated based on the input size. This means that users can continue to
write, and we rely on the WriteController to delay or stop writes if the
compaction debt becomes too high due to persistent low disk space
condition
2. Errors during write callback and flush are treated as hard errors,
i.e the database is put in read-only mode and goes back to read-write
only fater certain recovery actions are taken.
3. Both types of recovery rely on the SstFileManagerImpl to poll for
sufficient disk space. We assume that there is a 1-1 mapping between an
SFM and the underlying OS storage container. For cases where multiple
DBs are hosted on a single storage container, the user is expected to
allocate a single SFM instance and use the same one for all the DBs. If
no SFM is specified by the user, DBImpl::Open() will allocate one, but
this will be one per DB and each DB will recover independently. The
recovery implemented by SFM is as follows -
a) On the first occurance of an out of space error during compaction,
subsequent
compactions will be delayed until the disk free space check indicates
enough available space. The required space is computed as the sum of
input sizes.
b) The free space check requirement will be removed once the amount of
free space is greater than the size reserved by in progress
compactions when the first error occured
c) If the out of space error is a hard error, a background thread in
SFM will poll for sufficient headroom before triggering the recovery
of the database and putting it in write-only mode. The headroom is
calculated as the sum of the write_buffer_size of all the DB instances
associated with the SFM
4. EventListener callbacks will be called at the start and completion of
automatic recovery. Users can disable the auto recov ery in the start
callback, and later initiate it manually by calling DB::Resume()
Todo:
1. More extensive testing
2. Add disk full condition to db_stress (follow-on PR)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4164
Differential Revision: D9846378
Pulled By: anand1976
fbshipit-source-id: 80ea875dbd7f00205e19c82215ff6e37da10da4a
2018-09-15 22:36:19 +02:00
|
|
|
*(c->mutable_cf_options()));
|
|
|
|
if (!cfd->queued_for_compaction()) {
|
|
|
|
AddToCompactionQueue(cfd);
|
|
|
|
++unscheduled_compactions_;
|
|
|
|
}
|
|
|
|
}
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
Auto recovery from out of space errors (#4164)
Summary:
This commit implements automatic recovery from a Status::NoSpace() error
during background operations such as write callback, flush and
compaction. The broad design is as follows -
1. Compaction errors are treated as soft errors and don't put the
database in read-only mode. A compaction is delayed until enough free
disk space is available to accomodate the compaction outputs, which is
estimated based on the input size. This means that users can continue to
write, and we rely on the WriteController to delay or stop writes if the
compaction debt becomes too high due to persistent low disk space
condition
2. Errors during write callback and flush are treated as hard errors,
i.e the database is put in read-only mode and goes back to read-write
only fater certain recovery actions are taken.
3. Both types of recovery rely on the SstFileManagerImpl to poll for
sufficient disk space. We assume that there is a 1-1 mapping between an
SFM and the underlying OS storage container. For cases where multiple
DBs are hosted on a single storage container, the user is expected to
allocate a single SFM instance and use the same one for all the DBs. If
no SFM is specified by the user, DBImpl::Open() will allocate one, but
this will be one per DB and each DB will recover independently. The
recovery implemented by SFM is as follows -
a) On the first occurance of an out of space error during compaction,
subsequent
compactions will be delayed until the disk free space check indicates
enough available space. The required space is computed as the sum of
input sizes.
b) The free space check requirement will be removed once the amount of
free space is greater than the size reserved by in progress
compactions when the first error occured
c) If the out of space error is a hard error, a background thread in
SFM will poll for sufficient headroom before triggering the recovery
of the database and putting it in write-only mode. The headroom is
calculated as the sum of the write_buffer_size of all the DB instances
associated with the SFM
4. EventListener callbacks will be called at the start and completion of
automatic recovery. Users can disable the auto recov ery in the start
callback, and later initiate it manually by calling DB::Resume()
Todo:
1. More extensive testing
2. Add disk full condition to db_stress (follow-on PR)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4164
Differential Revision: D9846378
Pulled By: anand1976
fbshipit-source-id: 80ea875dbd7f00205e19c82215ff6e37da10da4a
2018-09-15 22:36:19 +02:00
|
|
|
// this will unref its input_version and column_family_data
|
|
|
|
c.reset();
|
2017-04-06 02:14:05 +02:00
|
|
|
|
|
|
|
if (is_manual) {
|
2022-03-13 05:07:04 +01:00
|
|
|
auto m = manual_compaction;
|
2017-04-06 02:14:05 +02:00
|
|
|
if (!status.ok()) {
|
|
|
|
m->status = status;
|
|
|
|
m->done = true;
|
|
|
|
}
|
|
|
|
// For universal compaction:
|
|
|
|
// Because universal compaction always happens at level 0, so one
|
|
|
|
// compaction will pick up all overlapped files. No files will be
|
|
|
|
// filtered out due to size limit and left for a successive compaction.
|
|
|
|
// So we can safely conclude the current compaction.
|
|
|
|
//
|
|
|
|
// Also note that, if we don't stop here, then the current compaction
|
|
|
|
// writes a new file back to level 0, which will be used in successive
|
|
|
|
// compaction. Hence the manual compaction will never finish.
|
|
|
|
//
|
|
|
|
// Stop the compaction if manual_end points to nullptr -- this means
|
|
|
|
// that we compacted the whole range. manual_end should always point
|
|
|
|
// to nullptr in case of universal compaction
|
|
|
|
if (m->manual_end == nullptr) {
|
|
|
|
m->done = true;
|
|
|
|
}
|
|
|
|
if (!m->done) {
|
|
|
|
// We only compacted part of the requested range. Update *m
|
|
|
|
// to the range that is left to be compacted.
|
|
|
|
// Universal and FIFO compactions should always compact the whole range
|
|
|
|
assert(m->cfd->ioptions()->compaction_style !=
|
|
|
|
kCompactionStyleUniversal ||
|
|
|
|
m->cfd->ioptions()->num_levels > 1);
|
|
|
|
assert(m->cfd->ioptions()->compaction_style != kCompactionStyleFIFO);
|
|
|
|
m->tmp_storage = *m->manual_end;
|
|
|
|
m->begin = &m->tmp_storage;
|
|
|
|
m->incomplete = true;
|
|
|
|
}
|
2018-04-13 02:55:14 +02:00
|
|
|
m->in_progress = false; // not being processed anymore
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
TEST_SYNC_POINT("DBImpl::BackgroundCompaction:Finish");
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool DBImpl::HasPendingManualCompaction() {
|
|
|
|
return (!manual_compaction_dequeue_.empty());
|
|
|
|
}
|
|
|
|
|
2017-08-04 00:36:28 +02:00
|
|
|
void DBImpl::AddManualCompaction(DBImpl::ManualCompactionState* m) {
|
Prevent corruption with parallel manual compactions and `change_level == true` (#9077)
Summary:
The bug can impact the following scenario. There must be two `CompactRange()`s, call them A and B. Compaction A must have `change_level=true`. Compactions A and B must run in parallel, and new data must be added while they run as well.
Now, on to the details of the race condition. Compaction A must reach the refitting phase while B's next step is to trivial move new data (i.e., data that has been inserted behind A) down to the same level that A's refit targets (`CompactRangeOptions::target_level`). B must be unregistered (i.e., has not yet called `AddManualCompaction()` for the current `RunManualCompaction()`) while A invokes `DisableManualCompaction()`s to prepare for refitting. In the old code, B could still proceed to register a manual compaction, while A had disabled manual compaction.
The next part of the race condition is B picks and schedules a trivial move while A has released the lock in refitting phase in order to persist the LSM state change (i.e., the log phase of `LogAndApply()`). That way, B does not see the refitted data when picking a trivial-move compaction. So it is susceptible to picking one that overlaps.
Finally, B executes the picked trivial-move compaction. Trivial-move compactions are special in that they never check whether manual compaction is disabled. So the picked compaction causing overlap ends up being applied, leading to LSM corruption if `force_consistency_checks=false`, or entering read-only mode with `Status::Corruption` if `force_consistency_checks=true` (the default).
The fix is just to prevent B from registering itself in `RunManualCompaction()` while manual compactions are disabled, consequently preventing any trivial move or other compaction from being picked/scheduled.
Thanks to siying for finding the bug.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9077
Test Plan: The test does not go all the way in exposing the bug because it requires a compaction to be picked/scheduled while logging LSM state change for RefitLevel(). But the fix is to make such a compaction not picked/scheduled in the first place, so any repro of that scenario would end up hanging RefitLevel() logging. So instead I just verified no such compaction is registered in the scenario where `RefitLevel()` disables manual compactions.
Reviewed By: siying
Differential Revision: D31921908
Pulled By: ajkr
fbshipit-source-id: 9bb5d0e847ad428211227f40830c685c209fbecb
2021-10-28 08:07:29 +02:00
|
|
|
assert(manual_compaction_paused_ == 0);
|
2017-04-06 02:14:05 +02:00
|
|
|
manual_compaction_dequeue_.push_back(m);
|
|
|
|
}
|
|
|
|
|
2017-08-04 00:36:28 +02:00
|
|
|
void DBImpl::RemoveManualCompaction(DBImpl::ManualCompactionState* m) {
|
2017-04-06 02:14:05 +02:00
|
|
|
// Remove from queue
|
2017-08-04 00:36:28 +02:00
|
|
|
std::deque<ManualCompactionState*>::iterator it =
|
2017-04-06 02:14:05 +02:00
|
|
|
manual_compaction_dequeue_.begin();
|
|
|
|
while (it != manual_compaction_dequeue_.end()) {
|
|
|
|
if (m == (*it)) {
|
|
|
|
it = manual_compaction_dequeue_.erase(it);
|
|
|
|
return;
|
|
|
|
}
|
2019-05-15 22:14:18 +02:00
|
|
|
++it;
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
assert(false);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-08-04 00:36:28 +02:00
|
|
|
bool DBImpl::ShouldntRunManualCompaction(ManualCompactionState* m) {
|
2017-04-06 02:14:05 +02:00
|
|
|
if (num_running_ingest_file_ > 0) {
|
|
|
|
// We need to wait for other IngestExternalFile() calls to finish
|
|
|
|
// before running a manual compaction.
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (m->exclusive) {
|
2017-08-04 00:36:28 +02:00
|
|
|
return (bg_bottom_compaction_scheduled_ > 0 ||
|
|
|
|
bg_compaction_scheduled_ > 0);
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
2017-08-04 00:36:28 +02:00
|
|
|
std::deque<ManualCompactionState*>::iterator it =
|
2017-04-06 02:14:05 +02:00
|
|
|
manual_compaction_dequeue_.begin();
|
|
|
|
bool seen = false;
|
|
|
|
while (it != manual_compaction_dequeue_.end()) {
|
|
|
|
if (m == (*it)) {
|
2019-05-15 22:14:18 +02:00
|
|
|
++it;
|
2017-04-06 02:14:05 +02:00
|
|
|
seen = true;
|
|
|
|
continue;
|
|
|
|
} else if (MCOverlap(m, (*it)) && (!seen && !(*it)->in_progress)) {
|
|
|
|
// Consider the other manual compaction *it, conflicts if:
|
|
|
|
// overlaps with m
|
|
|
|
// and (*it) is ahead in the queue and is not yet in progress
|
|
|
|
return true;
|
|
|
|
}
|
2019-05-15 22:14:18 +02:00
|
|
|
++it;
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool DBImpl::HaveManualCompaction(ColumnFamilyData* cfd) {
|
|
|
|
// Remove from priority queue
|
2017-08-04 00:36:28 +02:00
|
|
|
std::deque<ManualCompactionState*>::iterator it =
|
2017-04-06 02:14:05 +02:00
|
|
|
manual_compaction_dequeue_.begin();
|
|
|
|
while (it != manual_compaction_dequeue_.end()) {
|
|
|
|
if ((*it)->exclusive) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if ((cfd == (*it)->cfd) && (!((*it)->in_progress || (*it)->done))) {
|
|
|
|
// Allow automatic compaction if manual compaction is
|
2017-06-14 01:46:17 +02:00
|
|
|
// in progress
|
2017-04-06 02:14:05 +02:00
|
|
|
return true;
|
|
|
|
}
|
2019-05-15 22:14:18 +02:00
|
|
|
++it;
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool DBImpl::HasExclusiveManualCompaction() {
|
|
|
|
// Remove from priority queue
|
2017-08-04 00:36:28 +02:00
|
|
|
std::deque<ManualCompactionState*>::iterator it =
|
2017-04-06 02:14:05 +02:00
|
|
|
manual_compaction_dequeue_.begin();
|
|
|
|
while (it != manual_compaction_dequeue_.end()) {
|
|
|
|
if ((*it)->exclusive) {
|
|
|
|
return true;
|
|
|
|
}
|
2019-05-15 22:14:18 +02:00
|
|
|
++it;
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-08-04 00:36:28 +02:00
|
|
|
bool DBImpl::MCOverlap(ManualCompactionState* m, ManualCompactionState* m1) {
|
2017-04-06 02:14:05 +02:00
|
|
|
if ((m->exclusive) || (m1->exclusive)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (m->cfd != m1->cfd) {
|
|
|
|
return false;
|
|
|
|
}
|
2021-05-20 06:40:43 +02:00
|
|
|
return false;
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
|
|
|
|
2018-12-13 23:12:02 +01:00
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
void DBImpl::BuildCompactionJobInfo(
|
|
|
|
const ColumnFamilyData* cfd, Compaction* c, const Status& st,
|
|
|
|
const CompactionJobStats& compaction_job_stats, const int job_id,
|
|
|
|
const Version* current, CompactionJobInfo* compaction_job_info) const {
|
|
|
|
assert(compaction_job_info != nullptr);
|
|
|
|
compaction_job_info->cf_id = cfd->GetID();
|
|
|
|
compaction_job_info->cf_name = cfd->GetName();
|
|
|
|
compaction_job_info->status = st;
|
|
|
|
compaction_job_info->thread_id = env_->GetThreadID();
|
|
|
|
compaction_job_info->job_id = job_id;
|
|
|
|
compaction_job_info->base_input_level = c->start_level();
|
|
|
|
compaction_job_info->output_level = c->output_level();
|
|
|
|
compaction_job_info->stats = compaction_job_stats;
|
|
|
|
compaction_job_info->table_properties = c->GetOutputTableProperties();
|
|
|
|
compaction_job_info->compaction_reason = c->compaction_reason();
|
|
|
|
compaction_job_info->compression = c->output_compression();
|
|
|
|
for (size_t i = 0; i < c->num_input_levels(); ++i) {
|
|
|
|
for (const auto fmd : *c->inputs(i)) {
|
2019-10-24 23:42:43 +02:00
|
|
|
const FileDescriptor& desc = fmd->fd;
|
|
|
|
const uint64_t file_number = desc.GetNumber();
|
2021-06-17 01:50:43 +02:00
|
|
|
auto fn = TableFileName(c->immutable_options()->cf_paths, file_number,
|
2019-10-24 23:42:43 +02:00
|
|
|
desc.GetPathId());
|
2018-12-13 23:12:02 +01:00
|
|
|
compaction_job_info->input_files.push_back(fn);
|
2019-10-24 23:42:43 +02:00
|
|
|
compaction_job_info->input_file_infos.push_back(CompactionFileInfo{
|
|
|
|
static_cast<int>(i), file_number, fmd->oldest_blob_file_number});
|
2018-12-13 23:12:02 +01:00
|
|
|
if (compaction_job_info->table_properties.count(fn) == 0) {
|
2019-03-27 18:18:56 +01:00
|
|
|
std::shared_ptr<const TableProperties> tp;
|
2018-12-13 23:12:02 +01:00
|
|
|
auto s = current->GetTableProperties(&tp, fmd, &fn);
|
|
|
|
if (s.ok()) {
|
|
|
|
compaction_job_info->table_properties[fn] = tp;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (const auto& newf : c->edit()->GetNewFiles()) {
|
2019-10-24 23:42:43 +02:00
|
|
|
const FileMetaData& meta = newf.second;
|
|
|
|
const FileDescriptor& desc = meta.fd;
|
|
|
|
const uint64_t file_number = desc.GetNumber();
|
|
|
|
compaction_job_info->output_files.push_back(TableFileName(
|
2021-06-17 01:50:43 +02:00
|
|
|
c->immutable_options()->cf_paths, file_number, desc.GetPathId()));
|
2019-10-24 23:42:43 +02:00
|
|
|
compaction_job_info->output_file_infos.push_back(CompactionFileInfo{
|
|
|
|
newf.first, file_number, meta.oldest_blob_file_number});
|
2018-12-13 23:12:02 +01:00
|
|
|
}
|
2021-09-17 02:17:40 +02:00
|
|
|
compaction_job_info->blob_compression_type =
|
|
|
|
c->mutable_cf_options()->blob_compression_type;
|
|
|
|
|
|
|
|
// Update BlobFilesInfo.
|
|
|
|
for (const auto& blob_file : c->edit()->GetBlobFileAdditions()) {
|
|
|
|
BlobFileAdditionInfo blob_file_addition_info(
|
|
|
|
BlobFileName(c->immutable_options()->cf_paths.front().path,
|
|
|
|
blob_file.GetBlobFileNumber()) /*blob_file_path*/,
|
|
|
|
blob_file.GetBlobFileNumber(), blob_file.GetTotalBlobCount(),
|
|
|
|
blob_file.GetTotalBlobBytes());
|
|
|
|
compaction_job_info->blob_file_addition_infos.emplace_back(
|
|
|
|
std::move(blob_file_addition_info));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update BlobFilesGarbageInfo.
|
|
|
|
for (const auto& blob_file : c->edit()->GetBlobFileGarbages()) {
|
|
|
|
BlobFileGarbageInfo blob_file_garbage_info(
|
|
|
|
BlobFileName(c->immutable_options()->cf_paths.front().path,
|
|
|
|
blob_file.GetBlobFileNumber()) /*blob_file_path*/,
|
|
|
|
blob_file.GetBlobFileNumber(), blob_file.GetGarbageBlobCount(),
|
|
|
|
blob_file.GetGarbageBlobBytes());
|
|
|
|
compaction_job_info->blob_file_garbage_infos.emplace_back(
|
|
|
|
std::move(blob_file_garbage_info));
|
|
|
|
}
|
2018-12-13 23:12:02 +01:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-10-06 03:00:38 +02:00
|
|
|
// SuperVersionContext gets created and destructed outside of the lock --
|
2018-04-11 00:47:54 +02:00
|
|
|
// we use this conveniently to:
|
2017-04-06 02:14:05 +02:00
|
|
|
// * malloc one SuperVersion() outside of the lock -- new_superversion
|
|
|
|
// * delete SuperVersion()s outside of the lock -- superversions_to_free
|
|
|
|
//
|
|
|
|
// However, if InstallSuperVersionAndScheduleWork() gets called twice with the
|
2017-10-06 03:00:38 +02:00
|
|
|
// same sv_context, we can't reuse the SuperVersion() that got
|
2017-04-06 02:14:05 +02:00
|
|
|
// malloced because
|
|
|
|
// first call already used it. In that rare case, we take a hit and create a
|
|
|
|
// new SuperVersion() inside of the mutex. We do similar thing
|
|
|
|
// for superversion_to_free
|
|
|
|
|
2017-10-06 03:00:38 +02:00
|
|
|
void DBImpl::InstallSuperVersionAndScheduleWork(
|
|
|
|
ColumnFamilyData* cfd, SuperVersionContext* sv_context,
|
Make mempurge a background process (equivalent to in-memory compaction). (#8505)
Summary:
In https://github.com/facebook/rocksdb/issues/8454, I introduced a new process baptized `MemPurge` (memtable garbage collection). This new PR is built upon this past mempurge prototype.
In this PR, I made the `mempurge` process a background task, which provides superior performance since the mempurge process does not cling on the db_mutex anymore, and addresses severe restrictions from the past iteration (including a scenario where the past mempurge was failling, when a memtable was mempurged but was still referred to by an iterator/snapshot/...).
Now the mempurge process ressembles an in-memory compaction process: the stack of immutable memtables is filtered out, and the useful payload is used to populate an output memtable. If the output memtable is filled at more than 60% capacity (arbitrary heuristic) the mempurge process is aborted and a regular flush process takes place, else the output memtable is kept in the immutable memtable stack. Note that adding this output memtable to the `imm()` memtable stack does not trigger another flush process, so that the flush thread can go to sleep at the end of a successful mempurge.
MemPurge is activated by making the `experimental_allow_mempurge` flag `true`. When activated, the `MemPurge` process will always happen when the flush reason is `kWriteBufferFull`.
The 3 unit tests confirm that this process supports `Put`, `Get`, `Delete`, `DeleteRange` operators and is compatible with `Iterators` and `CompactionFilters`.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8505
Reviewed By: pdillinger
Differential Revision: D29619283
Pulled By: bjlemaire
fbshipit-source-id: 8a99bee76b63a8211bff1a00e0ae32360aaece95
2021-07-10 02:16:00 +02:00
|
|
|
const MutableCFOptions& mutable_cf_options) {
|
2017-04-06 02:14:05 +02:00
|
|
|
mutex_.AssertHeld();
|
|
|
|
|
|
|
|
// Update max_total_in_memory_state_
|
|
|
|
size_t old_memtable_size = 0;
|
|
|
|
auto* old_sv = cfd->GetSuperVersion();
|
|
|
|
if (old_sv) {
|
|
|
|
old_memtable_size = old_sv->mutable_cf_options.write_buffer_size *
|
|
|
|
old_sv->mutable_cf_options.max_write_buffer_number;
|
|
|
|
}
|
|
|
|
|
2018-07-27 23:02:07 +02:00
|
|
|
// this branch is unlikely to step in
|
|
|
|
if (UNLIKELY(sv_context->new_superversion == nullptr)) {
|
2017-10-06 03:00:38 +02:00
|
|
|
sv_context->NewSuperVersion();
|
|
|
|
}
|
Fix a race in ColumnFamilyData::UnrefAndTryDelete (#8605)
Summary:
The `ColumnFamilyData::UnrefAndTryDelete` code currently on the trunk
unlocks the DB mutex before destroying the `ThreadLocalPtr` holding
the per-thread `SuperVersion` pointers when the only remaining reference
is the back reference from `super_version_`. The idea behind this was to
break the circular dependency between `ColumnFamilyData` and `SuperVersion`:
when the penultimate reference goes away, `ColumnFamilyData` can clean up
the `SuperVersion`, which can in turn clean up `ColumnFamilyData`. (Assuming there
is a `SuperVersion` and it is not referenced by anything else.) However,
unlocking the mutex throws a wrench in this plan by making it possible for another thread
to jump in and take another reference to the `ColumnFamilyData`, keeping the
object alive in a zombie `ThreadLocalPtr`-less state. This can cause issues like
https://github.com/facebook/rocksdb/issues/8440 ,
https://github.com/facebook/rocksdb/issues/8382 ,
and might also explain the `was_last_ref` assertion failures from the `ColumnFamilySet`
destructor we sometimes observe during close in our stress tests.
Digging through the archives, this unlocking goes way back to 2014 (or earlier). The original
rationale was that `SuperVersionUnrefHandle` used to lock the mutex so it can call
`SuperVersion::Cleanup`; however, this logic turned out to be deadlock-prone.
https://github.com/facebook/rocksdb/pull/3510 fixed the deadlock but left the
unlocking in place. https://github.com/facebook/rocksdb/pull/6147 then introduced
the circular dependency and associated cleanup logic described above (in order
to enable iterators to keep the `ColumnFamilyData` for dropped column families alive),
and moved the unlocking-relocking snippet to its present location in `UnrefAndTryDelete`.
Finally, https://github.com/facebook/rocksdb/pull/7749 fixed a memory leak but
apparently exacerbated the race by (otherwise correctly) switching to `UnrefAndTryDelete`
in `SuperVersion::Cleanup`.
The patch simply eliminates the unlocking and relocking, which has been unnecessary
ever since https://github.com/facebook/rocksdb/issues/3510 made `SuperVersionUnrefHandle` lock-free.
This closes the window during which another thread could increase the reference count,
and hopefully fixes the issues above.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8605
Test Plan: Ran `make check` and stress tests locally.
Reviewed By: pdillinger
Differential Revision: D30051035
Pulled By: ltamasi
fbshipit-source-id: 8fe559e4b4ad69fc142579f8bc393ef525918528
2021-08-03 03:10:57 +02:00
|
|
|
cfd->InstallSuperVersion(sv_context, mutable_cf_options);
|
2017-04-06 02:14:05 +02:00
|
|
|
|
2019-03-26 03:14:04 +01:00
|
|
|
// There may be a small data race here. The snapshot tricking bottommost
|
|
|
|
// compaction may already be released here. But assuming there will always be
|
|
|
|
// newer snapshot created and released frequently, the compaction will be
|
|
|
|
// triggered soon anyway.
|
|
|
|
bottommost_files_mark_threshold_ = kMaxSequenceNumber;
|
|
|
|
for (auto* my_cfd : *versions_->GetColumnFamilySet()) {
|
|
|
|
bottommost_files_mark_threshold_ = std::min(
|
|
|
|
bottommost_files_mark_threshold_,
|
|
|
|
my_cfd->current()->storage_info()->bottommost_files_mark_threshold());
|
|
|
|
}
|
|
|
|
|
2017-04-06 02:14:05 +02:00
|
|
|
// Whenever we install new SuperVersion, we might need to issue new flushes or
|
|
|
|
// compactions.
|
|
|
|
SchedulePendingCompaction(cfd);
|
|
|
|
MaybeScheduleFlushOrCompaction();
|
|
|
|
|
|
|
|
// Update max_total_in_memory_state_
|
2018-04-13 02:55:14 +02:00
|
|
|
max_total_in_memory_state_ = max_total_in_memory_state_ - old_memtable_size +
|
|
|
|
mutable_cf_options.write_buffer_size *
|
|
|
|
mutable_cf_options.max_write_buffer_number;
|
2017-04-06 02:14:05 +02:00
|
|
|
}
|
2017-10-06 19:26:38 +02:00
|
|
|
|
2018-03-28 19:23:31 +02:00
|
|
|
// ShouldPurge is called by FindObsoleteFiles when doing a full scan,
|
2019-09-18 01:43:07 +02:00
|
|
|
// and db mutex (mutex_) should already be held.
|
2018-03-28 19:23:31 +02:00
|
|
|
// Actually, the current implementation of FindObsoleteFiles with
|
|
|
|
// full_scan=true can issue I/O requests to obtain list of files in
|
|
|
|
// directories, e.g. env_->getChildren while holding db mutex.
|
|
|
|
bool DBImpl::ShouldPurge(uint64_t file_number) const {
|
2019-09-18 01:43:07 +02:00
|
|
|
return files_grabbed_for_purge_.find(file_number) ==
|
|
|
|
files_grabbed_for_purge_.end() &&
|
|
|
|
purge_files_.find(file_number) == purge_files_.end();
|
2018-03-28 19:23:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// MarkAsGrabbedForPurge is called by FindObsoleteFiles, and db mutex
|
|
|
|
// (mutex_) should already be held.
|
|
|
|
void DBImpl::MarkAsGrabbedForPurge(uint64_t file_number) {
|
2019-09-18 01:43:07 +02:00
|
|
|
files_grabbed_for_purge_.insert(file_number);
|
2018-03-28 19:23:31 +02:00
|
|
|
}
|
|
|
|
|
2017-10-06 19:26:38 +02:00
|
|
|
void DBImpl::SetSnapshotChecker(SnapshotChecker* snapshot_checker) {
|
|
|
|
InstrumentedMutexLock l(&mutex_);
|
|
|
|
// snapshot_checker_ should only set once. If we need to set it multiple
|
|
|
|
// times, we need to make sure the old one is not deleted while it is still
|
|
|
|
// using by a compaction job.
|
|
|
|
assert(!snapshot_checker_);
|
|
|
|
snapshot_checker_.reset(snapshot_checker);
|
|
|
|
}
|
2019-01-16 06:32:15 +01:00
|
|
|
|
|
|
|
void DBImpl::GetSnapshotContext(
|
|
|
|
JobContext* job_context, std::vector<SequenceNumber>* snapshot_seqs,
|
|
|
|
SequenceNumber* earliest_write_conflict_snapshot,
|
|
|
|
SnapshotChecker** snapshot_checker_ptr) {
|
|
|
|
mutex_.AssertHeld();
|
|
|
|
assert(job_context != nullptr);
|
|
|
|
assert(snapshot_seqs != nullptr);
|
|
|
|
assert(earliest_write_conflict_snapshot != nullptr);
|
|
|
|
assert(snapshot_checker_ptr != nullptr);
|
|
|
|
|
|
|
|
*snapshot_checker_ptr = snapshot_checker_.get();
|
|
|
|
if (use_custom_gc_ && *snapshot_checker_ptr == nullptr) {
|
|
|
|
*snapshot_checker_ptr = DisableGCSnapshotChecker::Instance();
|
|
|
|
}
|
|
|
|
if (*snapshot_checker_ptr != nullptr) {
|
|
|
|
// If snapshot_checker is used, that means the flush/compaction may
|
|
|
|
// contain values not visible to snapshot taken after
|
|
|
|
// flush/compaction job starts. Take a snapshot and it will appear
|
|
|
|
// in snapshot_seqs and force compaction iterator to consider such
|
|
|
|
// snapshots.
|
|
|
|
const Snapshot* job_snapshot =
|
|
|
|
GetSnapshotImpl(false /*write_conflict_boundary*/, false /*lock*/);
|
|
|
|
job_context->job_snapshot.reset(new ManagedSnapshot(this, job_snapshot));
|
|
|
|
}
|
|
|
|
*snapshot_seqs = snapshots_.GetAll(earliest_write_conflict_snapshot);
|
|
|
|
}
|
2022-02-01 18:00:46 +01:00
|
|
|
|
|
|
|
Status DBImpl::WaitForCompact(bool wait_unscheduled) {
|
|
|
|
// Wait until the compaction completes
|
|
|
|
|
|
|
|
// TODO: a bug here. This function actually does not necessarily
|
|
|
|
// wait for compact. It actually waits for scheduled compaction
|
|
|
|
// OR flush to finish.
|
|
|
|
|
|
|
|
InstrumentedMutexLock l(&mutex_);
|
|
|
|
while ((bg_bottom_compaction_scheduled_ || bg_compaction_scheduled_ ||
|
|
|
|
bg_flush_scheduled_ ||
|
|
|
|
(wait_unscheduled && unscheduled_compactions_)) &&
|
|
|
|
(error_handler_.GetBGError().ok())) {
|
|
|
|
bg_cv_.Wait();
|
|
|
|
}
|
|
|
|
return error_handler_.GetBGError();
|
|
|
|
}
|
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|