2016-02-10 00:12:00 +01:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-16 01:03:42 +02:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
Add more table properties to EventLogger
Summary:
Example output:
{"time_micros": 1431463794310521, "job": 353, "event": "table_file_creation", "file_number": 387, "file_size": 86937, "table_info": {"data_size": "81801", "index_size": "9751", "filter_size": "0", "raw_key_size": "23448", "raw_average_key_size": "24.000000", "raw_value_size": "990571", "raw_average_value_size": "1013.890481", "num_data_blocks": "245", "num_entries": "977", "filter_policy_name": "", "kDeletedKeys": "0"}}
Also fixed a bug where BuildTable() in recovery was passing Env::IOHigh argument into paranoid_checks_file parameter.
Test Plan: make check + check out the output in the log
Reviewers: sdong, rven, yhchiang
Reviewed By: yhchiang
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D38343
2015-05-13 00:53:55 +02:00
|
|
|
|
2015-05-28 22:37:47 +02:00
|
|
|
#include "db/event_helpers.h"
|
Add more table properties to EventLogger
Summary:
Example output:
{"time_micros": 1431463794310521, "job": 353, "event": "table_file_creation", "file_number": 387, "file_size": 86937, "table_info": {"data_size": "81801", "index_size": "9751", "filter_size": "0", "raw_key_size": "23448", "raw_average_key_size": "24.000000", "raw_value_size": "990571", "raw_average_value_size": "1013.890481", "num_data_blocks": "245", "num_entries": "977", "filter_policy_name": "", "kDeletedKeys": "0"}}
Also fixed a bug where BuildTable() in recovery was passing Env::IOHigh argument into paranoid_checks_file parameter.
Test Plan: make check + check out the output in the log
Reviewers: sdong, rven, yhchiang
Reviewed By: yhchiang
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D38343
2015-05-13 00:53:55 +02:00
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
Add more table properties to EventLogger
Summary:
Example output:
{"time_micros": 1431463794310521, "job": 353, "event": "table_file_creation", "file_number": 387, "file_size": 86937, "table_info": {"data_size": "81801", "index_size": "9751", "filter_size": "0", "raw_key_size": "23448", "raw_average_key_size": "24.000000", "raw_value_size": "990571", "raw_average_value_size": "1013.890481", "num_data_blocks": "245", "num_entries": "977", "filter_policy_name": "", "kDeletedKeys": "0"}}
Also fixed a bug where BuildTable() in recovery was passing Env::IOHigh argument into paranoid_checks_file parameter.
Test Plan: make check + check out the output in the log
Reviewers: sdong, rven, yhchiang
Reviewed By: yhchiang
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D38343
2015-05-13 00:53:55 +02:00
|
|
|
|
|
|
|
namespace {
|
2018-04-13 02:55:14 +02:00
|
|
|
template <class T>
|
2015-11-19 20:47:12 +01:00
|
|
|
inline T SafeDivide(T a, T b) {
|
|
|
|
return b == 0 ? 0 : a / b;
|
|
|
|
}
|
Add more table properties to EventLogger
Summary:
Example output:
{"time_micros": 1431463794310521, "job": 353, "event": "table_file_creation", "file_number": 387, "file_size": 86937, "table_info": {"data_size": "81801", "index_size": "9751", "filter_size": "0", "raw_key_size": "23448", "raw_average_key_size": "24.000000", "raw_value_size": "990571", "raw_average_value_size": "1013.890481", "num_data_blocks": "245", "num_entries": "977", "filter_policy_name": "", "kDeletedKeys": "0"}}
Also fixed a bug where BuildTable() in recovery was passing Env::IOHigh argument into paranoid_checks_file parameter.
Test Plan: make check + check out the output in the log
Reviewers: sdong, rven, yhchiang
Reviewed By: yhchiang
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D38343
2015-05-13 00:53:55 +02:00
|
|
|
} // namespace
|
|
|
|
|
2015-05-28 22:37:47 +02:00
|
|
|
void EventHelpers::AppendCurrentTime(JSONWriter* jwriter) {
|
2015-05-22 00:39:30 +02:00
|
|
|
*jwriter << "time_micros"
|
|
|
|
<< std::chrono::duration_cast<std::chrono::microseconds>(
|
2018-04-13 02:55:14 +02:00
|
|
|
std::chrono::system_clock::now().time_since_epoch())
|
|
|
|
.count();
|
2015-05-22 00:39:30 +02:00
|
|
|
}
|
|
|
|
|
Added EventListener::OnTableFileCreationStarted() callback
Summary: Added EventListener::OnTableFileCreationStarted. EventListener::OnTableFileCreated will be called on failure case. User can check creation status via TableFileCreationInfo::status.
Test Plan: unit test.
Reviewers: dhruba, yhchiang, ott, sdong
Reviewed By: sdong
Subscribers: sdong, kradhakrishnan, IslamAbdelRahman, andrewkr, yhchiang, leveldb, ott, dhruba
Differential Revision: https://reviews.facebook.net/D56337
2016-04-29 20:35:00 +02:00
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
void EventHelpers::NotifyTableFileCreationStarted(
|
|
|
|
const std::vector<std::shared_ptr<EventListener>>& listeners,
|
|
|
|
const std::string& db_name, const std::string& cf_name,
|
|
|
|
const std::string& file_path, int job_id, TableFileCreationReason reason) {
|
|
|
|
TableFileCreationBriefInfo info;
|
|
|
|
info.db_name = db_name;
|
|
|
|
info.cf_name = cf_name;
|
|
|
|
info.file_path = file_path;
|
|
|
|
info.job_id = job_id;
|
|
|
|
info.reason = reason;
|
|
|
|
for (auto& listener : listeners) {
|
|
|
|
listener->OnTableFileCreationStarted(info);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif // !ROCKSDB_LITE
|
|
|
|
|
2017-06-23 04:30:39 +02:00
|
|
|
void EventHelpers::NotifyOnBackgroundError(
|
|
|
|
const std::vector<std::shared_ptr<EventListener>>& listeners,
|
Auto recovery from out of space errors (#4164)
Summary:
This commit implements automatic recovery from a Status::NoSpace() error
during background operations such as write callback, flush and
compaction. The broad design is as follows -
1. Compaction errors are treated as soft errors and don't put the
database in read-only mode. A compaction is delayed until enough free
disk space is available to accomodate the compaction outputs, which is
estimated based on the input size. This means that users can continue to
write, and we rely on the WriteController to delay or stop writes if the
compaction debt becomes too high due to persistent low disk space
condition
2. Errors during write callback and flush are treated as hard errors,
i.e the database is put in read-only mode and goes back to read-write
only fater certain recovery actions are taken.
3. Both types of recovery rely on the SstFileManagerImpl to poll for
sufficient disk space. We assume that there is a 1-1 mapping between an
SFM and the underlying OS storage container. For cases where multiple
DBs are hosted on a single storage container, the user is expected to
allocate a single SFM instance and use the same one for all the DBs. If
no SFM is specified by the user, DBImpl::Open() will allocate one, but
this will be one per DB and each DB will recover independently. The
recovery implemented by SFM is as follows -
a) On the first occurance of an out of space error during compaction,
subsequent
compactions will be delayed until the disk free space check indicates
enough available space. The required space is computed as the sum of
input sizes.
b) The free space check requirement will be removed once the amount of
free space is greater than the size reserved by in progress
compactions when the first error occured
c) If the out of space error is a hard error, a background thread in
SFM will poll for sufficient headroom before triggering the recovery
of the database and putting it in write-only mode. The headroom is
calculated as the sum of the write_buffer_size of all the DB instances
associated with the SFM
4. EventListener callbacks will be called at the start and completion of
automatic recovery. Users can disable the auto recov ery in the start
callback, and later initiate it manually by calling DB::Resume()
Todo:
1. More extensive testing
2. Add disk full condition to db_stress (follow-on PR)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4164
Differential Revision: D9846378
Pulled By: anand1976
fbshipit-source-id: 80ea875dbd7f00205e19c82215ff6e37da10da4a
2018-09-15 22:36:19 +02:00
|
|
|
BackgroundErrorReason reason, Status* bg_error, InstrumentedMutex* db_mutex,
|
|
|
|
bool* auto_recovery) {
|
2017-06-23 04:30:39 +02:00
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
if (listeners.size() == 0U) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
db_mutex->AssertHeld();
|
|
|
|
// release lock while notifying events
|
|
|
|
db_mutex->Unlock();
|
|
|
|
for (auto& listener : listeners) {
|
|
|
|
listener->OnBackgroundError(reason, bg_error);
|
2020-10-03 01:39:17 +02:00
|
|
|
bg_error->PermitUncheckedError();
|
Auto recovery from out of space errors (#4164)
Summary:
This commit implements automatic recovery from a Status::NoSpace() error
during background operations such as write callback, flush and
compaction. The broad design is as follows -
1. Compaction errors are treated as soft errors and don't put the
database in read-only mode. A compaction is delayed until enough free
disk space is available to accomodate the compaction outputs, which is
estimated based on the input size. This means that users can continue to
write, and we rely on the WriteController to delay or stop writes if the
compaction debt becomes too high due to persistent low disk space
condition
2. Errors during write callback and flush are treated as hard errors,
i.e the database is put in read-only mode and goes back to read-write
only fater certain recovery actions are taken.
3. Both types of recovery rely on the SstFileManagerImpl to poll for
sufficient disk space. We assume that there is a 1-1 mapping between an
SFM and the underlying OS storage container. For cases where multiple
DBs are hosted on a single storage container, the user is expected to
allocate a single SFM instance and use the same one for all the DBs. If
no SFM is specified by the user, DBImpl::Open() will allocate one, but
this will be one per DB and each DB will recover independently. The
recovery implemented by SFM is as follows -
a) On the first occurance of an out of space error during compaction,
subsequent
compactions will be delayed until the disk free space check indicates
enough available space. The required space is computed as the sum of
input sizes.
b) The free space check requirement will be removed once the amount of
free space is greater than the size reserved by in progress
compactions when the first error occured
c) If the out of space error is a hard error, a background thread in
SFM will poll for sufficient headroom before triggering the recovery
of the database and putting it in write-only mode. The headroom is
calculated as the sum of the write_buffer_size of all the DB instances
associated with the SFM
4. EventListener callbacks will be called at the start and completion of
automatic recovery. Users can disable the auto recov ery in the start
callback, and later initiate it manually by calling DB::Resume()
Todo:
1. More extensive testing
2. Add disk full condition to db_stress (follow-on PR)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4164
Differential Revision: D9846378
Pulled By: anand1976
fbshipit-source-id: 80ea875dbd7f00205e19c82215ff6e37da10da4a
2018-09-15 22:36:19 +02:00
|
|
|
if (*auto_recovery) {
|
|
|
|
listener->OnErrorRecoveryBegin(reason, *bg_error, auto_recovery);
|
|
|
|
}
|
2017-06-23 04:30:39 +02:00
|
|
|
}
|
|
|
|
db_mutex->Lock();
|
2018-04-13 02:55:14 +02:00
|
|
|
#else
|
|
|
|
(void)listeners;
|
|
|
|
(void)reason;
|
|
|
|
(void)bg_error;
|
|
|
|
(void)db_mutex;
|
Auto recovery from out of space errors (#4164)
Summary:
This commit implements automatic recovery from a Status::NoSpace() error
during background operations such as write callback, flush and
compaction. The broad design is as follows -
1. Compaction errors are treated as soft errors and don't put the
database in read-only mode. A compaction is delayed until enough free
disk space is available to accomodate the compaction outputs, which is
estimated based on the input size. This means that users can continue to
write, and we rely on the WriteController to delay or stop writes if the
compaction debt becomes too high due to persistent low disk space
condition
2. Errors during write callback and flush are treated as hard errors,
i.e the database is put in read-only mode and goes back to read-write
only fater certain recovery actions are taken.
3. Both types of recovery rely on the SstFileManagerImpl to poll for
sufficient disk space. We assume that there is a 1-1 mapping between an
SFM and the underlying OS storage container. For cases where multiple
DBs are hosted on a single storage container, the user is expected to
allocate a single SFM instance and use the same one for all the DBs. If
no SFM is specified by the user, DBImpl::Open() will allocate one, but
this will be one per DB and each DB will recover independently. The
recovery implemented by SFM is as follows -
a) On the first occurance of an out of space error during compaction,
subsequent
compactions will be delayed until the disk free space check indicates
enough available space. The required space is computed as the sum of
input sizes.
b) The free space check requirement will be removed once the amount of
free space is greater than the size reserved by in progress
compactions when the first error occured
c) If the out of space error is a hard error, a background thread in
SFM will poll for sufficient headroom before triggering the recovery
of the database and putting it in write-only mode. The headroom is
calculated as the sum of the write_buffer_size of all the DB instances
associated with the SFM
4. EventListener callbacks will be called at the start and completion of
automatic recovery. Users can disable the auto recov ery in the start
callback, and later initiate it manually by calling DB::Resume()
Todo:
1. More extensive testing
2. Add disk full condition to db_stress (follow-on PR)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4164
Differential Revision: D9846378
Pulled By: anand1976
fbshipit-source-id: 80ea875dbd7f00205e19c82215ff6e37da10da4a
2018-09-15 22:36:19 +02:00
|
|
|
(void)auto_recovery;
|
2017-06-23 04:30:39 +02:00
|
|
|
#endif // ROCKSDB_LITE
|
|
|
|
}
|
|
|
|
|
Added EventListener::OnTableFileCreationStarted() callback
Summary: Added EventListener::OnTableFileCreationStarted. EventListener::OnTableFileCreated will be called on failure case. User can check creation status via TableFileCreationInfo::status.
Test Plan: unit test.
Reviewers: dhruba, yhchiang, ott, sdong
Reviewed By: sdong
Subscribers: sdong, kradhakrishnan, IslamAbdelRahman, andrewkr, yhchiang, leveldb, ott, dhruba
Differential Revision: https://reviews.facebook.net/D56337
2016-04-29 20:35:00 +02:00
|
|
|
void EventHelpers::LogAndNotifyTableFileCreationFinished(
|
2015-06-02 23:12:23 +02:00
|
|
|
EventLogger* event_logger,
|
|
|
|
const std::vector<std::shared_ptr<EventListener>>& listeners,
|
Added EventListener::OnTableFileCreationStarted() callback
Summary: Added EventListener::OnTableFileCreationStarted. EventListener::OnTableFileCreated will be called on failure case. User can check creation status via TableFileCreationInfo::status.
Test Plan: unit test.
Reviewers: dhruba, yhchiang, ott, sdong
Reviewed By: sdong
Subscribers: sdong, kradhakrishnan, IslamAbdelRahman, andrewkr, yhchiang, leveldb, ott, dhruba
Differential Revision: https://reviews.facebook.net/D56337
2016-04-29 20:35:00 +02:00
|
|
|
const std::string& db_name, const std::string& cf_name,
|
|
|
|
const std::string& file_path, int job_id, const FileDescriptor& fd,
|
2019-10-15 00:19:31 +02:00
|
|
|
uint64_t oldest_blob_file_number, const TableProperties& table_properties,
|
2020-08-25 19:44:39 +02:00
|
|
|
TableFileCreationReason reason, const Status& s,
|
|
|
|
const std::string& file_checksum,
|
|
|
|
const std::string& file_checksum_func_name) {
|
Added EventListener::OnTableFileCreationStarted() callback
Summary: Added EventListener::OnTableFileCreationStarted. EventListener::OnTableFileCreated will be called on failure case. User can check creation status via TableFileCreationInfo::status.
Test Plan: unit test.
Reviewers: dhruba, yhchiang, ott, sdong
Reviewed By: sdong
Subscribers: sdong, kradhakrishnan, IslamAbdelRahman, andrewkr, yhchiang, leveldb, ott, dhruba
Differential Revision: https://reviews.facebook.net/D56337
2016-04-29 20:35:00 +02:00
|
|
|
if (s.ok() && event_logger) {
|
|
|
|
JSONWriter jwriter;
|
|
|
|
AppendCurrentTime(&jwriter);
|
|
|
|
jwriter << "cf_name" << cf_name << "job" << job_id << "event"
|
|
|
|
<< "table_file_creation"
|
|
|
|
<< "file_number" << fd.GetNumber() << "file_size"
|
2020-08-25 19:44:39 +02:00
|
|
|
<< fd.GetFileSize() << "file_checksum" << file_checksum
|
|
|
|
<< "file_checksum_func_name" << file_checksum_func_name;
|
Added EventListener::OnTableFileCreationStarted() callback
Summary: Added EventListener::OnTableFileCreationStarted. EventListener::OnTableFileCreated will be called on failure case. User can check creation status via TableFileCreationInfo::status.
Test Plan: unit test.
Reviewers: dhruba, yhchiang, ott, sdong
Reviewed By: sdong
Subscribers: sdong, kradhakrishnan, IslamAbdelRahman, andrewkr, yhchiang, leveldb, ott, dhruba
Differential Revision: https://reviews.facebook.net/D56337
2016-04-29 20:35:00 +02:00
|
|
|
|
|
|
|
// table_properties
|
|
|
|
{
|
|
|
|
jwriter << "table_properties";
|
|
|
|
jwriter.StartObject();
|
|
|
|
|
|
|
|
// basic properties:
|
|
|
|
jwriter << "data_size" << table_properties.data_size << "index_size"
|
2019-04-11 23:28:08 +02:00
|
|
|
<< table_properties.index_size << "index_partitions"
|
|
|
|
<< table_properties.index_partitions << "top_level_index_size"
|
|
|
|
<< table_properties.top_level_index_size
|
|
|
|
<< "index_key_is_user_key"
|
|
|
|
<< table_properties.index_key_is_user_key
|
|
|
|
<< "index_value_is_delta_encoded"
|
|
|
|
<< table_properties.index_value_is_delta_encoded << "filter_size"
|
Added EventListener::OnTableFileCreationStarted() callback
Summary: Added EventListener::OnTableFileCreationStarted. EventListener::OnTableFileCreated will be called on failure case. User can check creation status via TableFileCreationInfo::status.
Test Plan: unit test.
Reviewers: dhruba, yhchiang, ott, sdong
Reviewed By: sdong
Subscribers: sdong, kradhakrishnan, IslamAbdelRahman, andrewkr, yhchiang, leveldb, ott, dhruba
Differential Revision: https://reviews.facebook.net/D56337
2016-04-29 20:35:00 +02:00
|
|
|
<< table_properties.filter_size << "raw_key_size"
|
|
|
|
<< table_properties.raw_key_size << "raw_average_key_size"
|
|
|
|
<< SafeDivide(table_properties.raw_key_size,
|
|
|
|
table_properties.num_entries)
|
|
|
|
<< "raw_value_size" << table_properties.raw_value_size
|
|
|
|
<< "raw_average_value_size"
|
|
|
|
<< SafeDivide(table_properties.raw_value_size,
|
|
|
|
table_properties.num_entries)
|
|
|
|
<< "num_data_blocks" << table_properties.num_data_blocks
|
|
|
|
<< "num_entries" << table_properties.num_entries
|
2021-05-22 02:10:29 +02:00
|
|
|
<< "num_filter_entries" << table_properties.num_filter_entries
|
2019-04-11 23:28:08 +02:00
|
|
|
<< "num_deletions" << table_properties.num_deletions
|
|
|
|
<< "num_merge_operands" << table_properties.num_merge_operands
|
Fix wrong info log printing for num_range_deletions (#5617)
Summary:
num_range_deletions printing is wrong in this log line:
2019/07/18-12:59:15.309271 7f869f9ff700 EVENT_LOG_v1 {"time_micros": 1563479955309228, "cf_name": "5", "job": 955, "event": "table_file_creation", "file_number": 34579, "file_size": 2239842, "table_properties": {"data_size": 1988792, "index_size": 3067, "index_partitions": 0, "top_level_index_size": 0, "index_key_is_user_key": 0, "index_value_is_delta_encoded": 1, "filter_size": 170821, "raw_key_size": 1951792, "raw_average_key_size": 16, "raw_value_size": 1731720, "raw_average_value_size": 14, "num_data_blocks": 199, "num_entries": 121987, "num_deletions": 15184, "num_merge_operands": 86512, "num_range_deletions": 86512, "format_version": 0, "fixed_key_len": 0, "filter_policy": "rocksdb.BuiltinBloomFilter", "column_family_name": "5", "column_family_id": 5, "comparator": "leveldb.BytewiseComparator", "merge_operator": "PutOperator", "prefix_extractor_name": "rocksdb.FixedPrefix.7", "property_collectors": "[]", "compression": "ZSTD", "compression_options": "window_bits=-14; level=32767; strategy=0; max_dict_bytes=0; zstd_max_train_bytes=0; enabled=0; ", "creation_time": 1563479951, "oldest_key_time": 0, "file_creation_time": 1563479954}}
It actually prints "num_merge_operands" number. Fix it.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5617
Test Plan: Just build.
Differential Revision: D16453110
fbshipit-source-id: fc1024b3cd5650312ed47a1379f0d2cf8b2d8a8f
2019-07-24 04:34:56 +02:00
|
|
|
<< "num_range_deletions" << table_properties.num_range_deletions
|
2019-04-11 23:28:08 +02:00
|
|
|
<< "format_version" << table_properties.format_version
|
|
|
|
<< "fixed_key_len" << table_properties.fixed_key_len
|
|
|
|
<< "filter_policy" << table_properties.filter_policy_name
|
|
|
|
<< "column_family_name" << table_properties.column_family_name
|
|
|
|
<< "column_family_id" << table_properties.column_family_id
|
|
|
|
<< "comparator" << table_properties.comparator_name
|
|
|
|
<< "merge_operator" << table_properties.merge_operator_name
|
|
|
|
<< "prefix_extractor_name"
|
|
|
|
<< table_properties.prefix_extractor_name << "property_collectors"
|
|
|
|
<< table_properties.property_collectors_names << "compression"
|
|
|
|
<< table_properties.compression_name << "compression_options"
|
|
|
|
<< table_properties.compression_options << "creation_time"
|
|
|
|
<< table_properties.creation_time << "oldest_key_time"
|
2019-04-23 00:24:04 +02:00
|
|
|
<< table_properties.oldest_key_time << "file_creation_time"
|
2021-04-01 03:20:44 +02:00
|
|
|
<< table_properties.file_creation_time
|
|
|
|
<< "slow_compression_estimated_data_size"
|
|
|
|
<< table_properties.slow_compression_estimated_data_size
|
|
|
|
<< "fast_compression_estimated_data_size"
|
|
|
|
<< table_properties.fast_compression_estimated_data_size
|
|
|
|
<< "db_id" << table_properties.db_id << "db_session_id"
|
2020-06-17 19:55:42 +02:00
|
|
|
<< table_properties.db_session_id;
|
Added EventListener::OnTableFileCreationStarted() callback
Summary: Added EventListener::OnTableFileCreationStarted. EventListener::OnTableFileCreated will be called on failure case. User can check creation status via TableFileCreationInfo::status.
Test Plan: unit test.
Reviewers: dhruba, yhchiang, ott, sdong
Reviewed By: sdong
Subscribers: sdong, kradhakrishnan, IslamAbdelRahman, andrewkr, yhchiang, leveldb, ott, dhruba
Differential Revision: https://reviews.facebook.net/D56337
2016-04-29 20:35:00 +02:00
|
|
|
|
|
|
|
// user collected properties
|
|
|
|
for (const auto& prop : table_properties.readable_properties) {
|
|
|
|
jwriter << prop.first << prop.second;
|
|
|
|
}
|
|
|
|
jwriter.EndObject();
|
2015-05-22 00:39:30 +02:00
|
|
|
}
|
2019-10-15 00:19:31 +02:00
|
|
|
|
|
|
|
if (oldest_blob_file_number != kInvalidBlobFileNumber) {
|
|
|
|
jwriter << "oldest_blob_file_number" << oldest_blob_file_number;
|
|
|
|
}
|
|
|
|
|
2015-05-22 00:39:30 +02:00
|
|
|
jwriter.EndObject();
|
Add more table properties to EventLogger
Summary:
Example output:
{"time_micros": 1431463794310521, "job": 353, "event": "table_file_creation", "file_number": 387, "file_size": 86937, "table_info": {"data_size": "81801", "index_size": "9751", "filter_size": "0", "raw_key_size": "23448", "raw_average_key_size": "24.000000", "raw_value_size": "990571", "raw_average_value_size": "1013.890481", "num_data_blocks": "245", "num_entries": "977", "filter_policy_name": "", "kDeletedKeys": "0"}}
Also fixed a bug where BuildTable() in recovery was passing Env::IOHigh argument into paranoid_checks_file parameter.
Test Plan: make check + check out the output in the log
Reviewers: sdong, rven, yhchiang
Reviewed By: yhchiang
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D38343
2015-05-13 00:53:55 +02:00
|
|
|
|
Added EventListener::OnTableFileCreationStarted() callback
Summary: Added EventListener::OnTableFileCreationStarted. EventListener::OnTableFileCreated will be called on failure case. User can check creation status via TableFileCreationInfo::status.
Test Plan: unit test.
Reviewers: dhruba, yhchiang, ott, sdong
Reviewed By: sdong
Subscribers: sdong, kradhakrishnan, IslamAbdelRahman, andrewkr, yhchiang, leveldb, ott, dhruba
Differential Revision: https://reviews.facebook.net/D56337
2016-04-29 20:35:00 +02:00
|
|
|
event_logger->Log(jwriter);
|
|
|
|
}
|
2015-06-02 23:12:23 +02:00
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
if (listeners.size() == 0) {
|
|
|
|
return;
|
|
|
|
}
|
Added EventListener::OnTableFileCreationStarted() callback
Summary: Added EventListener::OnTableFileCreationStarted. EventListener::OnTableFileCreated will be called on failure case. User can check creation status via TableFileCreationInfo::status.
Test Plan: unit test.
Reviewers: dhruba, yhchiang, ott, sdong
Reviewed By: sdong
Subscribers: sdong, kradhakrishnan, IslamAbdelRahman, andrewkr, yhchiang, leveldb, ott, dhruba
Differential Revision: https://reviews.facebook.net/D56337
2016-04-29 20:35:00 +02:00
|
|
|
TableFileCreationInfo info;
|
|
|
|
info.db_name = db_name;
|
|
|
|
info.cf_name = cf_name;
|
|
|
|
info.file_path = file_path;
|
|
|
|
info.file_size = fd.file_size;
|
|
|
|
info.job_id = job_id;
|
|
|
|
info.table_properties = table_properties;
|
|
|
|
info.reason = reason;
|
|
|
|
info.status = s;
|
2020-08-25 19:44:39 +02:00
|
|
|
info.file_checksum = file_checksum;
|
|
|
|
info.file_checksum_func_name = file_checksum_func_name;
|
Added EventListener::OnTableFileCreationStarted() callback
Summary: Added EventListener::OnTableFileCreationStarted. EventListener::OnTableFileCreated will be called on failure case. User can check creation status via TableFileCreationInfo::status.
Test Plan: unit test.
Reviewers: dhruba, yhchiang, ott, sdong
Reviewed By: sdong
Subscribers: sdong, kradhakrishnan, IslamAbdelRahman, andrewkr, yhchiang, leveldb, ott, dhruba
Differential Revision: https://reviews.facebook.net/D56337
2016-04-29 20:35:00 +02:00
|
|
|
for (auto& listener : listeners) {
|
2015-06-02 23:12:23 +02:00
|
|
|
listener->OnTableFileCreated(info);
|
|
|
|
}
|
2020-09-17 00:45:30 +02:00
|
|
|
info.status.PermitUncheckedError();
|
2018-04-13 02:55:14 +02:00
|
|
|
#else
|
|
|
|
(void)listeners;
|
|
|
|
(void)db_name;
|
|
|
|
(void)cf_name;
|
|
|
|
(void)file_path;
|
|
|
|
(void)reason;
|
2015-06-02 23:12:23 +02:00
|
|
|
#endif // !ROCKSDB_LITE
|
Add more table properties to EventLogger
Summary:
Example output:
{"time_micros": 1431463794310521, "job": 353, "event": "table_file_creation", "file_number": 387, "file_size": 86937, "table_info": {"data_size": "81801", "index_size": "9751", "filter_size": "0", "raw_key_size": "23448", "raw_average_key_size": "24.000000", "raw_value_size": "990571", "raw_average_value_size": "1013.890481", "num_data_blocks": "245", "num_entries": "977", "filter_policy_name": "", "kDeletedKeys": "0"}}
Also fixed a bug where BuildTable() in recovery was passing Env::IOHigh argument into paranoid_checks_file parameter.
Test Plan: make check + check out the output in the log
Reviewers: sdong, rven, yhchiang
Reviewed By: yhchiang
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D38343
2015-05-13 00:53:55 +02:00
|
|
|
}
|
|
|
|
|
2015-06-04 04:57:01 +02:00
|
|
|
void EventHelpers::LogAndNotifyTableFileDeletion(
|
2018-04-13 02:55:14 +02:00
|
|
|
EventLogger* event_logger, int job_id, uint64_t file_number,
|
|
|
|
const std::string& file_path, const Status& status,
|
|
|
|
const std::string& dbname,
|
2015-06-04 04:57:01 +02:00
|
|
|
const std::vector<std::shared_ptr<EventListener>>& listeners) {
|
|
|
|
JSONWriter jwriter;
|
|
|
|
AppendCurrentTime(&jwriter);
|
|
|
|
|
2018-04-13 02:55:14 +02:00
|
|
|
jwriter << "job" << job_id << "event"
|
|
|
|
<< "table_file_deletion"
|
2015-06-04 04:57:01 +02:00
|
|
|
<< "file_number" << file_number;
|
|
|
|
if (!status.ok()) {
|
|
|
|
jwriter << "status" << status.ToString();
|
|
|
|
}
|
|
|
|
|
|
|
|
jwriter.EndObject();
|
|
|
|
|
|
|
|
event_logger->Log(jwriter);
|
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
TableFileDeletionInfo info;
|
|
|
|
info.db_name = dbname;
|
|
|
|
info.job_id = job_id;
|
|
|
|
info.file_path = file_path;
|
|
|
|
info.status = status;
|
2017-01-11 19:38:07 +01:00
|
|
|
for (auto& listener : listeners) {
|
2015-06-04 04:57:01 +02:00
|
|
|
listener->OnTableFileDeleted(info);
|
|
|
|
}
|
2020-09-17 00:45:30 +02:00
|
|
|
info.status.PermitUncheckedError();
|
2018-04-13 02:55:14 +02:00
|
|
|
#else
|
|
|
|
(void)file_path;
|
|
|
|
(void)dbname;
|
|
|
|
(void)listeners;
|
2015-06-04 04:57:01 +02:00
|
|
|
#endif // !ROCKSDB_LITE
|
|
|
|
}
|
|
|
|
|
Auto recovery from out of space errors (#4164)
Summary:
This commit implements automatic recovery from a Status::NoSpace() error
during background operations such as write callback, flush and
compaction. The broad design is as follows -
1. Compaction errors are treated as soft errors and don't put the
database in read-only mode. A compaction is delayed until enough free
disk space is available to accomodate the compaction outputs, which is
estimated based on the input size. This means that users can continue to
write, and we rely on the WriteController to delay or stop writes if the
compaction debt becomes too high due to persistent low disk space
condition
2. Errors during write callback and flush are treated as hard errors,
i.e the database is put in read-only mode and goes back to read-write
only fater certain recovery actions are taken.
3. Both types of recovery rely on the SstFileManagerImpl to poll for
sufficient disk space. We assume that there is a 1-1 mapping between an
SFM and the underlying OS storage container. For cases where multiple
DBs are hosted on a single storage container, the user is expected to
allocate a single SFM instance and use the same one for all the DBs. If
no SFM is specified by the user, DBImpl::Open() will allocate one, but
this will be one per DB and each DB will recover independently. The
recovery implemented by SFM is as follows -
a) On the first occurance of an out of space error during compaction,
subsequent
compactions will be delayed until the disk free space check indicates
enough available space. The required space is computed as the sum of
input sizes.
b) The free space check requirement will be removed once the amount of
free space is greater than the size reserved by in progress
compactions when the first error occured
c) If the out of space error is a hard error, a background thread in
SFM will poll for sufficient headroom before triggering the recovery
of the database and putting it in write-only mode. The headroom is
calculated as the sum of the write_buffer_size of all the DB instances
associated with the SFM
4. EventListener callbacks will be called at the start and completion of
automatic recovery. Users can disable the auto recov ery in the start
callback, and later initiate it manually by calling DB::Resume()
Todo:
1. More extensive testing
2. Add disk full condition to db_stress (follow-on PR)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4164
Differential Revision: D9846378
Pulled By: anand1976
fbshipit-source-id: 80ea875dbd7f00205e19c82215ff6e37da10da4a
2018-09-15 22:36:19 +02:00
|
|
|
void EventHelpers::NotifyOnErrorRecoveryCompleted(
|
|
|
|
const std::vector<std::shared_ptr<EventListener>>& listeners,
|
|
|
|
Status old_bg_error, InstrumentedMutex* db_mutex) {
|
|
|
|
#ifndef ROCKSDB_LITE
|
2021-01-06 23:14:01 +01:00
|
|
|
if (listeners.size() > 0) {
|
|
|
|
db_mutex->AssertHeld();
|
|
|
|
// release lock while notifying events
|
|
|
|
db_mutex->Unlock();
|
|
|
|
for (auto& listener : listeners) {
|
|
|
|
listener->OnErrorRecoveryCompleted(old_bg_error);
|
|
|
|
}
|
|
|
|
db_mutex->Lock();
|
Auto recovery from out of space errors (#4164)
Summary:
This commit implements automatic recovery from a Status::NoSpace() error
during background operations such as write callback, flush and
compaction. The broad design is as follows -
1. Compaction errors are treated as soft errors and don't put the
database in read-only mode. A compaction is delayed until enough free
disk space is available to accomodate the compaction outputs, which is
estimated based on the input size. This means that users can continue to
write, and we rely on the WriteController to delay or stop writes if the
compaction debt becomes too high due to persistent low disk space
condition
2. Errors during write callback and flush are treated as hard errors,
i.e the database is put in read-only mode and goes back to read-write
only fater certain recovery actions are taken.
3. Both types of recovery rely on the SstFileManagerImpl to poll for
sufficient disk space. We assume that there is a 1-1 mapping between an
SFM and the underlying OS storage container. For cases where multiple
DBs are hosted on a single storage container, the user is expected to
allocate a single SFM instance and use the same one for all the DBs. If
no SFM is specified by the user, DBImpl::Open() will allocate one, but
this will be one per DB and each DB will recover independently. The
recovery implemented by SFM is as follows -
a) On the first occurance of an out of space error during compaction,
subsequent
compactions will be delayed until the disk free space check indicates
enough available space. The required space is computed as the sum of
input sizes.
b) The free space check requirement will be removed once the amount of
free space is greater than the size reserved by in progress
compactions when the first error occured
c) If the out of space error is a hard error, a background thread in
SFM will poll for sufficient headroom before triggering the recovery
of the database and putting it in write-only mode. The headroom is
calculated as the sum of the write_buffer_size of all the DB instances
associated with the SFM
4. EventListener callbacks will be called at the start and completion of
automatic recovery. Users can disable the auto recov ery in the start
callback, and later initiate it manually by calling DB::Resume()
Todo:
1. More extensive testing
2. Add disk full condition to db_stress (follow-on PR)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4164
Differential Revision: D9846378
Pulled By: anand1976
fbshipit-source-id: 80ea875dbd7f00205e19c82215ff6e37da10da4a
2018-09-15 22:36:19 +02:00
|
|
|
}
|
2020-10-03 01:39:17 +02:00
|
|
|
old_bg_error.PermitUncheckedError();
|
Auto recovery from out of space errors (#4164)
Summary:
This commit implements automatic recovery from a Status::NoSpace() error
during background operations such as write callback, flush and
compaction. The broad design is as follows -
1. Compaction errors are treated as soft errors and don't put the
database in read-only mode. A compaction is delayed until enough free
disk space is available to accomodate the compaction outputs, which is
estimated based on the input size. This means that users can continue to
write, and we rely on the WriteController to delay or stop writes if the
compaction debt becomes too high due to persistent low disk space
condition
2. Errors during write callback and flush are treated as hard errors,
i.e the database is put in read-only mode and goes back to read-write
only fater certain recovery actions are taken.
3. Both types of recovery rely on the SstFileManagerImpl to poll for
sufficient disk space. We assume that there is a 1-1 mapping between an
SFM and the underlying OS storage container. For cases where multiple
DBs are hosted on a single storage container, the user is expected to
allocate a single SFM instance and use the same one for all the DBs. If
no SFM is specified by the user, DBImpl::Open() will allocate one, but
this will be one per DB and each DB will recover independently. The
recovery implemented by SFM is as follows -
a) On the first occurance of an out of space error during compaction,
subsequent
compactions will be delayed until the disk free space check indicates
enough available space. The required space is computed as the sum of
input sizes.
b) The free space check requirement will be removed once the amount of
free space is greater than the size reserved by in progress
compactions when the first error occured
c) If the out of space error is a hard error, a background thread in
SFM will poll for sufficient headroom before triggering the recovery
of the database and putting it in write-only mode. The headroom is
calculated as the sum of the write_buffer_size of all the DB instances
associated with the SFM
4. EventListener callbacks will be called at the start and completion of
automatic recovery. Users can disable the auto recov ery in the start
callback, and later initiate it manually by calling DB::Resume()
Todo:
1. More extensive testing
2. Add disk full condition to db_stress (follow-on PR)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4164
Differential Revision: D9846378
Pulled By: anand1976
fbshipit-source-id: 80ea875dbd7f00205e19c82215ff6e37da10da4a
2018-09-15 22:36:19 +02:00
|
|
|
#else
|
|
|
|
(void)listeners;
|
|
|
|
(void)old_bg_error;
|
|
|
|
(void)db_mutex;
|
|
|
|
#endif // ROCKSDB_LITE
|
|
|
|
}
|
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|