improve comments for CompactionJob (#5341)
Summary: add class/function level comments to the header file Pull Request resolved: https://github.com/facebook/rocksdb/pull/5341 Differential Revision: D15485442 Pulled By: miasantreble fbshipit-source-id: 9f11e2a1cd3ce0f4990f01353d0a6f4b050615cf
This commit is contained in:
parent
38a06aa225
commit
09b534cc2f
@ -415,7 +415,6 @@ void CompactionJob::Prepare() {
|
||||
|
||||
write_hint_ =
|
||||
c->column_family_data()->CalculateSSTWriteHint(c->output_level());
|
||||
// Is this compaction producing files at the bottommost level?
|
||||
bottommost_level_ = c->bottommost_level();
|
||||
|
||||
if (c->ShouldFormSubcompactions()) {
|
||||
@ -445,11 +444,6 @@ struct RangeWithSize {
|
||||
: range(a, b), size(s) {}
|
||||
};
|
||||
|
||||
// Generates a histogram representing potential divisions of key ranges from
|
||||
// the input. It adds the starting and/or ending keys of certain input files
|
||||
// to the working set and then finds the approximate size of data in between
|
||||
// each consecutive pair of slices. Then it divides these ranges into
|
||||
// consecutive groups such that each group has a similar size.
|
||||
void CompactionJob::GenSubcompactionBoundaries() {
|
||||
auto* c = compact_->compaction;
|
||||
auto* cfd = c->column_family_data();
|
||||
|
@ -55,6 +55,11 @@ class Version;
|
||||
class VersionEdit;
|
||||
class VersionSet;
|
||||
|
||||
// CompactionJob is responsible for executing the compaction. Each (manual or
|
||||
// automated) compaction corresponds to a CompactionJob object, and usually
|
||||
// goes through the stages of `Prepare()`->`Run()`->`Install()`. CompactionJob
|
||||
// will divide the compaction into subcompactions and execute them in parallel
|
||||
// if needed.
|
||||
class CompactionJob {
|
||||
public:
|
||||
CompactionJob(
|
||||
@ -80,17 +85,28 @@ class CompactionJob {
|
||||
CompactionJob& operator=(const CompactionJob& job) = delete;
|
||||
|
||||
// REQUIRED: mutex held
|
||||
// Prepare for the compaction by setting up boundaries for each subcompaction
|
||||
void Prepare();
|
||||
// REQUIRED mutex not held
|
||||
// Launch threads for each subcompaction and wait for them to finish. After
|
||||
// that, verify table is usable and finally do bookkeeping to unify
|
||||
// subcompaction results
|
||||
Status Run();
|
||||
|
||||
// REQUIRED: mutex held
|
||||
// Add compaction input/output to the current version
|
||||
Status Install(const MutableCFOptions& mutable_cf_options);
|
||||
|
||||
private:
|
||||
struct SubcompactionState;
|
||||
|
||||
void AggregateStatistics();
|
||||
|
||||
// Generates a histogram representing potential divisions of key ranges from
|
||||
// the input. It adds the starting and/or ending keys of certain input files
|
||||
// to the working set and then finds the approximate size of data in between
|
||||
// each consecutive pair of slices. Then it divides these ranges into
|
||||
// consecutive groups such that each group has a similar size.
|
||||
void GenSubcompactionBoundaries();
|
||||
|
||||
// update the thread status for starting a compaction.
|
||||
@ -163,6 +179,7 @@ class CompactionJob {
|
||||
|
||||
EventLogger* event_logger_;
|
||||
|
||||
// Is this compaction creating a file in the bottom most level?
|
||||
bool bottommost_level_;
|
||||
bool paranoid_file_checks_;
|
||||
bool measure_io_stats_;
|
||||
|
Loading…
Reference in New Issue
Block a user