Move the file copy out of the mutex.
Summary: We now release the mutex before copying the files in the case of the trivial move. This path does not use the compaction job. Test Plan: DBTest.LevelCompactionThirdPath Reviewers: yhchiang, igor, sdong Reviewed By: sdong Subscribers: dhruba, leveldb Differential Revision: https://reviews.facebook.net/D30381
This commit is contained in:
parent
153f4f0719
commit
7661e5a76e
@ -3,6 +3,10 @@
|
||||
### Unreleased Features
|
||||
* Changed the LRU caching algorithm so that referenced blocks (by iterators) are never evicted
|
||||
* By default we now optimize the compilation for the compilation platform (using -march=native). If you want to build portable binary, use 'PORTABLE=1' before the make command.
|
||||
* We now allow level-compaction to place files in different paths by
|
||||
specifying them in db_paths along with the target_size.
|
||||
Lower numbered levels will be placed earlier in the db_paths and higher
|
||||
numbered levels will be placed later in the db_paths vector.
|
||||
|
||||
### 3.9.0 (12/8/2014)
|
||||
|
||||
@ -17,10 +21,6 @@
|
||||
* New API LinkFile added to Env. If you implement your own Env class, an
|
||||
implementation of the API LinkFile will have to be provided.
|
||||
* MemTableRep takes MemTableAllocator instead of Arena
|
||||
* We now allow level-compaction to place files in different paths by
|
||||
specifying them in db_paths along with the target_size.
|
||||
Lower numbered levels will be placed earlier in the db_paths and higher
|
||||
numbered levels will be placed later in the db_paths vector.
|
||||
|
||||
### Improvements
|
||||
* RocksDBLite library now becomes smaller and will be compiled with -fno-exceptions flag.
|
||||
|
@ -140,6 +140,7 @@ bool Compaction::IsTrivialMove() const {
|
||||
num_input_levels() == 2 &&
|
||||
num_input_files(0) == 1 &&
|
||||
num_input_files(1) == 0 &&
|
||||
input(0, 0)->fd.GetPathId() == GetOutputPathId() &&
|
||||
TotalFileSize(grandparents_) <= max_grandparent_overlap_bytes_);
|
||||
}
|
||||
|
||||
|
@ -119,7 +119,7 @@ class Compaction {
|
||||
// moving a single input file to the next level (no merging or splitting)
|
||||
bool IsTrivialMove() const;
|
||||
|
||||
// If true, then the comaction can be done by simply deleting input files.
|
||||
// If true, then the compaction can be done by simply deleting input files.
|
||||
bool IsDeletionCompaction() const {
|
||||
return deletion_compaction_;
|
||||
}
|
||||
|
@ -2060,31 +2060,10 @@ Status DBImpl::BackgroundCompaction(bool* madeProgress, JobContext* job_context,
|
||||
// Move file to next level
|
||||
assert(c->num_input_files(0) == 1);
|
||||
FileMetaData* f = c->input(0, 0);
|
||||
FileMetaData ftemp;
|
||||
uint64_t fdnum = f->fd.GetNumber();
|
||||
uint32_t fdpath = f->fd.GetPathId();
|
||||
c->edit()->DeleteFile(c->level(), f->fd.GetNumber());
|
||||
// Need to move file if file is to be stored in a new path
|
||||
if (c->GetOutputPathId() != f->fd.GetPathId()) {
|
||||
fdnum = versions_->NewFileNumber();
|
||||
std::string source = TableFileName(db_options_.db_paths,
|
||||
f->fd.GetNumber(), f->fd.GetPathId());
|
||||
std::string destination =
|
||||
TableFileName(db_options_.db_paths, fdnum, c->GetOutputPathId());
|
||||
Status s = CopyFile(env_, source, destination, 0);
|
||||
if (s.ok()) {
|
||||
fdpath = c->GetOutputPathId();
|
||||
} else {
|
||||
fdnum = f->fd.GetNumber();
|
||||
if (!s.IsShutdownInProgress()) {
|
||||
Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log,
|
||||
"Compaction error: %s", s.ToString().c_str());
|
||||
}
|
||||
}
|
||||
}
|
||||
c->edit()->AddFile(c->level() + 1, fdnum, fdpath, f->fd.GetFileSize(),
|
||||
f->smallest, f->largest, f->smallest_seqno,
|
||||
f->largest_seqno);
|
||||
c->edit()->AddFile(c->level() + 1, f->fd.GetNumber(), f->fd.GetPathId(),
|
||||
f->fd.GetFileSize(), f->smallest, f->largest,
|
||||
f->smallest_seqno, f->largest_seqno);
|
||||
status = versions_->LogAndApply(c->column_family_data(),
|
||||
*c->mutable_cf_options(),
|
||||
c->edit(), &mutex_, db_directory_.get());
|
||||
|
Loading…
Reference in New Issue
Block a user