Fix a number of object lifetime/ownership issues
Summary: Replace manual memory management with std::unique_ptr in a number of places; not exhaustive, but this fixes a few leaks with file handles as well as clarifies semantics of the ownership of file handles with log classes. Test Plan: db_stress, make check Reviewers: dhruba Reviewed By: dhruba CC: zshao, leveldb, heyongqiang Differential Revision: https://reviews.facebook.net/D8043
This commit is contained in:
parent
88b79b24f3
commit
2fdf91a4f8
@ -26,13 +26,13 @@ Status BuildTable(const std::string& dbname,
|
|||||||
|
|
||||||
std::string fname = TableFileName(dbname, meta->number);
|
std::string fname = TableFileName(dbname, meta->number);
|
||||||
if (iter->Valid()) {
|
if (iter->Valid()) {
|
||||||
WritableFile* file;
|
unique_ptr<WritableFile> file;
|
||||||
s = env->NewWritableFile(fname, &file);
|
s = env->NewWritableFile(fname, &file);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
TableBuilder* builder = new TableBuilder(options, file, 0);
|
TableBuilder* builder = new TableBuilder(options, file.get(), 0);
|
||||||
meta->smallest.DecodeFrom(iter->key());
|
meta->smallest.DecodeFrom(iter->key());
|
||||||
for (; iter->Valid(); iter->Next()) {
|
for (; iter->Valid(); iter->Next()) {
|
||||||
Slice key = iter->key();
|
Slice key = iter->key();
|
||||||
@ -63,8 +63,6 @@ Status BuildTable(const std::string& dbname,
|
|||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
s = file->Close();
|
s = file->Close();
|
||||||
}
|
}
|
||||||
delete file;
|
|
||||||
file = NULL;
|
|
||||||
|
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
// Verify that the table is usable
|
// Verify that the table is usable
|
||||||
|
13
db/c.cc
13
db/c.cc
@ -39,6 +39,8 @@ using leveldb::WritableFile;
|
|||||||
using leveldb::WriteBatch;
|
using leveldb::WriteBatch;
|
||||||
using leveldb::WriteOptions;
|
using leveldb::WriteOptions;
|
||||||
|
|
||||||
|
using std::shared_ptr;
|
||||||
|
|
||||||
extern "C" {
|
extern "C" {
|
||||||
|
|
||||||
struct leveldb_t { DB* rep; };
|
struct leveldb_t { DB* rep; };
|
||||||
@ -48,12 +50,12 @@ struct leveldb_snapshot_t { const Snapshot* rep; };
|
|||||||
struct leveldb_readoptions_t { ReadOptions rep; };
|
struct leveldb_readoptions_t { ReadOptions rep; };
|
||||||
struct leveldb_writeoptions_t { WriteOptions rep; };
|
struct leveldb_writeoptions_t { WriteOptions rep; };
|
||||||
struct leveldb_options_t { Options rep; };
|
struct leveldb_options_t { Options rep; };
|
||||||
struct leveldb_cache_t { Cache* rep; };
|
|
||||||
struct leveldb_seqfile_t { SequentialFile* rep; };
|
struct leveldb_seqfile_t { SequentialFile* rep; };
|
||||||
struct leveldb_randomfile_t { RandomAccessFile* rep; };
|
struct leveldb_randomfile_t { RandomAccessFile* rep; };
|
||||||
struct leveldb_writablefile_t { WritableFile* rep; };
|
struct leveldb_writablefile_t { WritableFile* rep; };
|
||||||
struct leveldb_logger_t { Logger* rep; };
|
|
||||||
struct leveldb_filelock_t { FileLock* rep; };
|
struct leveldb_filelock_t { FileLock* rep; };
|
||||||
|
struct leveldb_logger_t { shared_ptr<Logger> rep; };
|
||||||
|
struct leveldb_cache_t { shared_ptr<Cache> rep; };
|
||||||
|
|
||||||
struct leveldb_comparator_t : public Comparator {
|
struct leveldb_comparator_t : public Comparator {
|
||||||
void* state_;
|
void* state_;
|
||||||
@ -421,7 +423,9 @@ void leveldb_options_set_env(leveldb_options_t* opt, leveldb_env_t* env) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_options_set_info_log(leveldb_options_t* opt, leveldb_logger_t* l) {
|
void leveldb_options_set_info_log(leveldb_options_t* opt, leveldb_logger_t* l) {
|
||||||
opt->rep.info_log = (l ? l->rep : NULL);
|
if (l) {
|
||||||
|
opt->rep.info_log = l->rep;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_options_set_write_buffer_size(leveldb_options_t* opt, size_t s) {
|
void leveldb_options_set_write_buffer_size(leveldb_options_t* opt, size_t s) {
|
||||||
@ -433,7 +437,9 @@ void leveldb_options_set_max_open_files(leveldb_options_t* opt, int n) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_options_set_cache(leveldb_options_t* opt, leveldb_cache_t* c) {
|
void leveldb_options_set_cache(leveldb_options_t* opt, leveldb_cache_t* c) {
|
||||||
|
if (c) {
|
||||||
opt->rep.block_cache = c->rep;
|
opt->rep.block_cache = c->rep;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_options_set_block_size(leveldb_options_t* opt, size_t s) {
|
void leveldb_options_set_block_size(leveldb_options_t* opt, size_t s) {
|
||||||
@ -647,7 +653,6 @@ leveldb_cache_t* leveldb_cache_create_lru(size_t capacity) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void leveldb_cache_destroy(leveldb_cache_t* cache) {
|
void leveldb_cache_destroy(leveldb_cache_t* cache) {
|
||||||
delete cache->rep;
|
|
||||||
delete cache;
|
delete cache;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@ class CorruptionTest {
|
|||||||
public:
|
public:
|
||||||
test::ErrorEnv env_;
|
test::ErrorEnv env_;
|
||||||
std::string dbname_;
|
std::string dbname_;
|
||||||
Cache* tiny_cache_;
|
shared_ptr<Cache> tiny_cache_;
|
||||||
Options options_;
|
Options options_;
|
||||||
DB* db_;
|
DB* db_;
|
||||||
|
|
||||||
@ -47,7 +47,6 @@ class CorruptionTest {
|
|||||||
~CorruptionTest() {
|
~CorruptionTest() {
|
||||||
delete db_;
|
delete db_;
|
||||||
DestroyDB(dbname_, Options());
|
DestroyDB(dbname_, Options());
|
||||||
delete tiny_cache_;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Status TryReopen(Options* options = NULL) {
|
Status TryReopen(Options* options = NULL) {
|
||||||
|
@ -469,7 +469,7 @@ struct ThreadState {
|
|||||||
|
|
||||||
class Benchmark {
|
class Benchmark {
|
||||||
private:
|
private:
|
||||||
Cache* cache_;
|
shared_ptr<Cache> cache_;
|
||||||
const FilterPolicy* filter_policy_;
|
const FilterPolicy* filter_policy_;
|
||||||
DB* db_;
|
DB* db_;
|
||||||
long num_;
|
long num_;
|
||||||
@ -655,7 +655,6 @@ class Benchmark {
|
|||||||
|
|
||||||
~Benchmark() {
|
~Benchmark() {
|
||||||
delete db_;
|
delete db_;
|
||||||
delete cache_;
|
|
||||||
delete filter_policy_;
|
delete filter_policy_;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1276,14 +1275,13 @@ class Benchmark {
|
|||||||
void HeapProfile() {
|
void HeapProfile() {
|
||||||
char fname[100];
|
char fname[100];
|
||||||
snprintf(fname, sizeof(fname), "%s/heap-%04d", FLAGS_db, ++heap_counter_);
|
snprintf(fname, sizeof(fname), "%s/heap-%04d", FLAGS_db, ++heap_counter_);
|
||||||
WritableFile* file;
|
unique_ptr<WritableFile> file;
|
||||||
Status s = FLAGS_env->NewWritableFile(fname, &file);
|
Status s = FLAGS_env->NewWritableFile(fname, &file);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
fprintf(stderr, "%s\n", s.ToString().c_str());
|
fprintf(stderr, "%s\n", s.ToString().c_str());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
bool ok = port::GetHeapProfile(WriteToFile, file);
|
bool ok = port::GetHeapProfile(WriteToFile, file.get());
|
||||||
delete file;
|
|
||||||
if (!ok) {
|
if (!ok) {
|
||||||
fprintf(stderr, "heap profiling not supported\n");
|
fprintf(stderr, "heap profiling not supported\n");
|
||||||
FLAGS_env->DeleteFile(fname);
|
FLAGS_env->DeleteFile(fname);
|
||||||
|
139
db/db_impl.cc
139
db/db_impl.cc
@ -48,18 +48,17 @@ static Status NewLogger(const std::string& dbname,
|
|||||||
const std::string& db_log_dir,
|
const std::string& db_log_dir,
|
||||||
Env* env,
|
Env* env,
|
||||||
size_t max_log_file_size,
|
size_t max_log_file_size,
|
||||||
Logger** logger) {
|
shared_ptr<Logger>* logger) {
|
||||||
std::string db_absolute_path;
|
std::string db_absolute_path;
|
||||||
env->GetAbsolutePath(dbname, &db_absolute_path);
|
env->GetAbsolutePath(dbname, &db_absolute_path);
|
||||||
|
|
||||||
if (max_log_file_size > 0) { // need to auto split the log file?
|
if (max_log_file_size > 0) { // need to auto split the log file?
|
||||||
AutoSplitLogger<Logger>* auto_split_logger =
|
auto logger_ptr =
|
||||||
new AutoSplitLogger<Logger>(env, dbname, db_log_dir, max_log_file_size);
|
new AutoSplitLogger<Logger>(env, dbname, db_log_dir, max_log_file_size);
|
||||||
Status s = auto_split_logger->GetStatus();
|
logger->reset(logger_ptr);
|
||||||
|
Status s = logger_ptr->GetStatus();
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
delete auto_split_logger;
|
logger->reset();
|
||||||
} else {
|
|
||||||
*logger = auto_split_logger;
|
|
||||||
}
|
}
|
||||||
return s;
|
return s;
|
||||||
} else {
|
} else {
|
||||||
@ -103,8 +102,8 @@ struct DBImpl::CompactionState {
|
|||||||
std::list<uint64_t> allocated_file_numbers;
|
std::list<uint64_t> allocated_file_numbers;
|
||||||
|
|
||||||
// State kept for output being generated
|
// State kept for output being generated
|
||||||
WritableFile* outfile;
|
unique_ptr<WritableFile> outfile;
|
||||||
TableBuilder* builder;
|
unique_ptr<TableBuilder> builder;
|
||||||
|
|
||||||
uint64_t total_bytes;
|
uint64_t total_bytes;
|
||||||
|
|
||||||
@ -112,8 +111,6 @@ struct DBImpl::CompactionState {
|
|||||||
|
|
||||||
explicit CompactionState(Compaction* c)
|
explicit CompactionState(Compaction* c)
|
||||||
: compaction(c),
|
: compaction(c),
|
||||||
outfile(NULL),
|
|
||||||
builder(NULL),
|
|
||||||
total_bytes(0) {
|
total_bytes(0) {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -178,14 +175,11 @@ DBImpl::DBImpl(const Options& options, const std::string& dbname)
|
|||||||
dbname, &internal_comparator_, &internal_filter_policy_, options)),
|
dbname, &internal_comparator_, &internal_filter_policy_, options)),
|
||||||
internal_filter_policy_(options.filter_policy),
|
internal_filter_policy_(options.filter_policy),
|
||||||
owns_info_log_(options_.info_log != options.info_log),
|
owns_info_log_(options_.info_log != options.info_log),
|
||||||
owns_cache_(options_.block_cache != options.block_cache),
|
|
||||||
db_lock_(NULL),
|
db_lock_(NULL),
|
||||||
shutting_down_(NULL),
|
shutting_down_(NULL),
|
||||||
bg_cv_(&mutex_),
|
bg_cv_(&mutex_),
|
||||||
mem_(new MemTable(internal_comparator_, NumberLevels())),
|
mem_(new MemTable(internal_comparator_, NumberLevels())),
|
||||||
logfile_(NULL),
|
|
||||||
logfile_number_(0),
|
logfile_number_(0),
|
||||||
log_(NULL),
|
|
||||||
tmp_batch_(new WriteBatch),
|
tmp_batch_(new WriteBatch),
|
||||||
bg_compaction_scheduled_(0),
|
bg_compaction_scheduled_(0),
|
||||||
bg_logstats_scheduled_(false),
|
bg_logstats_scheduled_(false),
|
||||||
@ -206,13 +200,13 @@ DBImpl::DBImpl(const Options& options, const std::string& dbname)
|
|||||||
stats_ = new CompactionStats[options.num_levels];
|
stats_ = new CompactionStats[options.num_levels];
|
||||||
// Reserve ten files or so for other uses and give the rest to TableCache.
|
// Reserve ten files or so for other uses and give the rest to TableCache.
|
||||||
const int table_cache_size = options_.max_open_files - 10;
|
const int table_cache_size = options_.max_open_files - 10;
|
||||||
table_cache_ = new TableCache(dbname_, &options_, table_cache_size);
|
table_cache_.reset(new TableCache(dbname_, &options_, table_cache_size));
|
||||||
|
|
||||||
versions_ = new VersionSet(dbname_, &options_, table_cache_,
|
versions_.reset(new VersionSet(dbname_, &options_, table_cache_.get(),
|
||||||
&internal_comparator_);
|
&internal_comparator_));
|
||||||
|
|
||||||
dumpLeveldbBuildVersion(options_.info_log);
|
dumpLeveldbBuildVersion(options_.info_log.get());
|
||||||
options_.Dump(options_.info_log);
|
options_.Dump(options_.info_log.get());
|
||||||
|
|
||||||
#ifdef USE_SCRIBE
|
#ifdef USE_SCRIBE
|
||||||
logger_ = new ScribeLogger("localhost", 1456);
|
logger_ = new ScribeLogger("localhost", 1456);
|
||||||
@ -246,21 +240,11 @@ DBImpl::~DBImpl() {
|
|||||||
env_->UnlockFile(db_lock_);
|
env_->UnlockFile(db_lock_);
|
||||||
}
|
}
|
||||||
|
|
||||||
delete versions_;
|
|
||||||
if (mem_ != NULL) mem_->Unref();
|
if (mem_ != NULL) mem_->Unref();
|
||||||
imm_.UnrefAll();
|
imm_.UnrefAll();
|
||||||
delete tmp_batch_;
|
delete tmp_batch_;
|
||||||
delete log_;
|
|
||||||
delete logfile_;
|
|
||||||
delete table_cache_;
|
|
||||||
delete[] stats_;
|
delete[] stats_;
|
||||||
|
|
||||||
if (owns_info_log_) {
|
|
||||||
delete options_.info_log;
|
|
||||||
}
|
|
||||||
if (owns_cache_) {
|
|
||||||
delete options_.block_cache;
|
|
||||||
}
|
|
||||||
if (options_.compression_per_level != NULL) {
|
if (options_.compression_per_level != NULL) {
|
||||||
delete[] options_.compression_per_level;
|
delete[] options_.compression_per_level;
|
||||||
}
|
}
|
||||||
@ -288,6 +272,10 @@ void DBImpl::TEST_Destroy_DBImpl() {
|
|||||||
if (db_lock_ != NULL) {
|
if (db_lock_ != NULL) {
|
||||||
env_->UnlockFile(db_lock_);
|
env_->UnlockFile(db_lock_);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log_.reset();
|
||||||
|
versions_.reset();
|
||||||
|
table_cache_.reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t DBImpl::TEST_Current_Manifest_FileNo() {
|
uint64_t DBImpl::TEST_Current_Manifest_FileNo() {
|
||||||
@ -302,21 +290,17 @@ Status DBImpl::NewDB() {
|
|||||||
new_db.SetLastSequence(0);
|
new_db.SetLastSequence(0);
|
||||||
|
|
||||||
const std::string manifest = DescriptorFileName(dbname_, 1);
|
const std::string manifest = DescriptorFileName(dbname_, 1);
|
||||||
WritableFile* file;
|
unique_ptr<WritableFile> file;
|
||||||
Status s = env_->NewWritableFile(manifest, &file);
|
Status s = env_->NewWritableFile(manifest, &file);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
log::Writer log(file);
|
log::Writer log(std::move(file));
|
||||||
std::string record;
|
std::string record;
|
||||||
new_db.EncodeTo(&record);
|
new_db.EncodeTo(&record);
|
||||||
s = log.AddRecord(record);
|
s = log.AddRecord(record);
|
||||||
if (s.ok()) {
|
|
||||||
s = file->Close();
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
delete file;
|
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
// Make "CURRENT" file that points to the new manifest file.
|
// Make "CURRENT" file that points to the new manifest file.
|
||||||
s = SetCurrentFile(env_, dbname_, 1);
|
s = SetCurrentFile(env_, dbname_, 1);
|
||||||
@ -628,7 +612,7 @@ Status DBImpl::RecoverLogFile(uint64_t log_number,
|
|||||||
|
|
||||||
// Open the log file
|
// Open the log file
|
||||||
std::string fname = LogFileName(dbname_, log_number);
|
std::string fname = LogFileName(dbname_, log_number);
|
||||||
SequentialFile* file;
|
unique_ptr<SequentialFile> file;
|
||||||
Status status = env_->NewSequentialFile(fname, &file);
|
Status status = env_->NewSequentialFile(fname, &file);
|
||||||
if (!status.ok()) {
|
if (!status.ok()) {
|
||||||
MaybeIgnoreError(&status);
|
MaybeIgnoreError(&status);
|
||||||
@ -638,14 +622,14 @@ Status DBImpl::RecoverLogFile(uint64_t log_number,
|
|||||||
// Create the log reader.
|
// Create the log reader.
|
||||||
LogReporter reporter;
|
LogReporter reporter;
|
||||||
reporter.env = env_;
|
reporter.env = env_;
|
||||||
reporter.info_log = options_.info_log;
|
reporter.info_log = options_.info_log.get();
|
||||||
reporter.fname = fname.c_str();
|
reporter.fname = fname.c_str();
|
||||||
reporter.status = (options_.paranoid_checks ? &status : NULL);
|
reporter.status = (options_.paranoid_checks ? &status : NULL);
|
||||||
// We intentially make log::Reader do checksumming even if
|
// We intentially make log::Reader do checksumming even if
|
||||||
// paranoid_checks==false so that corruptions cause entire commits
|
// paranoid_checks==false so that corruptions cause entire commits
|
||||||
// to be skipped instead of propagating bad information (like overly
|
// to be skipped instead of propagating bad information (like overly
|
||||||
// large sequence numbers).
|
// large sequence numbers).
|
||||||
log::Reader reader(file, &reporter, true/*checksum*/,
|
log::Reader reader(std::move(file), &reporter, true/*checksum*/,
|
||||||
0/*initial_offset*/);
|
0/*initial_offset*/);
|
||||||
Log(options_.info_log, "Recovering log #%llu",
|
Log(options_.info_log, "Recovering log #%llu",
|
||||||
(unsigned long long) log_number);
|
(unsigned long long) log_number);
|
||||||
@ -703,7 +687,6 @@ Status DBImpl::RecoverLogFile(uint64_t log_number,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (mem != NULL && !external_table) mem->Unref();
|
if (mem != NULL && !external_table) mem->Unref();
|
||||||
delete file;
|
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -720,7 +703,7 @@ Status DBImpl::WriteLevel0TableForRecovery(MemTable* mem, VersionEdit* edit) {
|
|||||||
Status s;
|
Status s;
|
||||||
{
|
{
|
||||||
mutex_.Unlock();
|
mutex_.Unlock();
|
||||||
s = BuildTable(dbname_, env_, options_, table_cache_, iter, &meta);
|
s = BuildTable(dbname_, env_, options_, table_cache_.get(), iter, &meta);
|
||||||
mutex_.Lock();
|
mutex_.Lock();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -766,7 +749,7 @@ Status DBImpl::WriteLevel0Table(MemTable* mem, VersionEdit* edit,
|
|||||||
Status s;
|
Status s;
|
||||||
{
|
{
|
||||||
mutex_.Unlock();
|
mutex_.Unlock();
|
||||||
s = BuildTable(dbname_, env_, options_, table_cache_, iter, &meta);
|
s = BuildTable(dbname_, env_, options_, table_cache_.get(), iter, &meta);
|
||||||
mutex_.Lock();
|
mutex_.Lock();
|
||||||
}
|
}
|
||||||
base->Unref();
|
base->Unref();
|
||||||
@ -845,8 +828,9 @@ Status DBImpl::CompactMemTable(bool* madeProgress) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Replace immutable memtable with the generated Table
|
// Replace immutable memtable with the generated Table
|
||||||
s = imm_.InstallMemtableFlushResults(m, versions_, s, &mutex_,
|
s = imm_.InstallMemtableFlushResults(
|
||||||
options_.info_log, file_number, pending_outputs_);
|
m, versions_.get(), s, &mutex_, options_.info_log.get(),
|
||||||
|
file_number, pending_outputs_);
|
||||||
|
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
if (madeProgress) {
|
if (madeProgress) {
|
||||||
@ -1009,7 +993,7 @@ Status DBImpl::ReadFirstLine(const std::string& fname,
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
SequentialFile* file;
|
unique_ptr<SequentialFile> file;
|
||||||
Status status = env_->NewSequentialFile(fname, &file);
|
Status status = env_->NewSequentialFile(fname, &file);
|
||||||
|
|
||||||
if (!status.ok()) {
|
if (!status.ok()) {
|
||||||
@ -1019,10 +1003,10 @@ Status DBImpl::ReadFirstLine(const std::string& fname,
|
|||||||
|
|
||||||
LogReporter reporter;
|
LogReporter reporter;
|
||||||
reporter.env = env_;
|
reporter.env = env_;
|
||||||
reporter.info_log = options_.info_log;
|
reporter.info_log = options_.info_log.get();
|
||||||
reporter.fname = fname.c_str();
|
reporter.fname = fname.c_str();
|
||||||
reporter.status = (options_.paranoid_checks ? &status : NULL);
|
reporter.status = (options_.paranoid_checks ? &status : NULL);
|
||||||
log::Reader reader(file, &reporter, true/*checksum*/,
|
log::Reader reader(std::move(file), &reporter, true/*checksum*/,
|
||||||
0/*initial_offset*/);
|
0/*initial_offset*/);
|
||||||
std::string scratch;
|
std::string scratch;
|
||||||
Slice record;
|
Slice record;
|
||||||
@ -1243,7 +1227,7 @@ Status DBImpl::BackgroundCompaction(bool* madeProgress,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Compaction* c = NULL;
|
unique_ptr<Compaction> c;
|
||||||
bool is_manual = (manual_compaction_ != NULL) &&
|
bool is_manual = (manual_compaction_ != NULL) &&
|
||||||
(manual_compaction_->in_progress == false);
|
(manual_compaction_->in_progress == false);
|
||||||
InternalKey manual_end;
|
InternalKey manual_end;
|
||||||
@ -1251,10 +1235,11 @@ Status DBImpl::BackgroundCompaction(bool* madeProgress,
|
|||||||
ManualCompaction* m = manual_compaction_;
|
ManualCompaction* m = manual_compaction_;
|
||||||
assert(!m->in_progress);
|
assert(!m->in_progress);
|
||||||
m->in_progress = true; // another thread cannot pick up the same work
|
m->in_progress = true; // another thread cannot pick up the same work
|
||||||
c = versions_->CompactRange(m->level, m->begin, m->end);
|
c.reset(versions_->CompactRange(m->level, m->begin, m->end));
|
||||||
m->done = (c == NULL);
|
if (c) {
|
||||||
if (c != NULL) {
|
|
||||||
manual_end = c->input(0, c->num_input_files(0) - 1)->largest;
|
manual_end = c->input(0, c->num_input_files(0) - 1)->largest;
|
||||||
|
} else {
|
||||||
|
m->done = true;
|
||||||
}
|
}
|
||||||
Log(options_.info_log,
|
Log(options_.info_log,
|
||||||
"Manual compaction at level-%d from %s .. %s; will stop at %s\n",
|
"Manual compaction at level-%d from %s .. %s; will stop at %s\n",
|
||||||
@ -1263,11 +1248,11 @@ Status DBImpl::BackgroundCompaction(bool* madeProgress,
|
|||||||
(m->end ? m->end->DebugString().c_str() : "(end)"),
|
(m->end ? m->end->DebugString().c_str() : "(end)"),
|
||||||
(m->done ? "(end)" : manual_end.DebugString().c_str()));
|
(m->done ? "(end)" : manual_end.DebugString().c_str()));
|
||||||
} else if (!options_.disable_auto_compactions) {
|
} else if (!options_.disable_auto_compactions) {
|
||||||
c = versions_->PickCompaction();
|
c.reset(versions_->PickCompaction());
|
||||||
}
|
}
|
||||||
|
|
||||||
Status status;
|
Status status;
|
||||||
if (c == NULL) {
|
if (!c) {
|
||||||
// Nothing to do
|
// Nothing to do
|
||||||
Log(options_.info_log, "Compaction nothing to do");
|
Log(options_.info_log, "Compaction nothing to do");
|
||||||
} else if (!is_manual && c->IsTrivialMove()) {
|
} else if (!is_manual && c->IsTrivialMove()) {
|
||||||
@ -1285,18 +1270,18 @@ Status DBImpl::BackgroundCompaction(bool* madeProgress,
|
|||||||
static_cast<unsigned long long>(f->file_size),
|
static_cast<unsigned long long>(f->file_size),
|
||||||
status.ToString().c_str(),
|
status.ToString().c_str(),
|
||||||
versions_->LevelSummary(&tmp));
|
versions_->LevelSummary(&tmp));
|
||||||
versions_->ReleaseCompactionFiles(c, status);
|
versions_->ReleaseCompactionFiles(c.get(), status);
|
||||||
*madeProgress = true;
|
*madeProgress = true;
|
||||||
} else {
|
} else {
|
||||||
CompactionState* compact = new CompactionState(c);
|
CompactionState* compact = new CompactionState(c.get());
|
||||||
status = DoCompactionWork(compact);
|
status = DoCompactionWork(compact);
|
||||||
CleanupCompaction(compact);
|
CleanupCompaction(compact);
|
||||||
versions_->ReleaseCompactionFiles(c, status);
|
versions_->ReleaseCompactionFiles(c.get(), status);
|
||||||
c->ReleaseInputs();
|
c->ReleaseInputs();
|
||||||
FindObsoleteFiles(deletion_state);
|
FindObsoleteFiles(deletion_state);
|
||||||
*madeProgress = true;
|
*madeProgress = true;
|
||||||
}
|
}
|
||||||
delete c;
|
c.reset();
|
||||||
|
|
||||||
if (status.ok()) {
|
if (status.ok()) {
|
||||||
// Done
|
// Done
|
||||||
@ -1332,11 +1317,10 @@ void DBImpl::CleanupCompaction(CompactionState* compact) {
|
|||||||
if (compact->builder != NULL) {
|
if (compact->builder != NULL) {
|
||||||
// May happen if we get a shutdown call in the middle of compaction
|
// May happen if we get a shutdown call in the middle of compaction
|
||||||
compact->builder->Abandon();
|
compact->builder->Abandon();
|
||||||
delete compact->builder;
|
compact->builder.reset();
|
||||||
} else {
|
} else {
|
||||||
assert(compact->outfile == NULL);
|
assert(compact->outfile == NULL);
|
||||||
}
|
}
|
||||||
delete compact->outfile;
|
|
||||||
for (size_t i = 0; i < compact->outputs.size(); i++) {
|
for (size_t i = 0; i < compact->outputs.size(); i++) {
|
||||||
const CompactionState::Output& out = compact->outputs[i];
|
const CompactionState::Output& out = compact->outputs[i];
|
||||||
pending_outputs_.erase(out.number);
|
pending_outputs_.erase(out.number);
|
||||||
@ -1397,8 +1381,8 @@ Status DBImpl::OpenCompactionOutputFile(CompactionState* compact) {
|
|||||||
std::string fname = TableFileName(dbname_, file_number);
|
std::string fname = TableFileName(dbname_, file_number);
|
||||||
Status s = env_->NewWritableFile(fname, &compact->outfile);
|
Status s = env_->NewWritableFile(fname, &compact->outfile);
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
compact->builder = new TableBuilder(options_, compact->outfile,
|
compact->builder.reset(new TableBuilder(options_, compact->outfile.get(),
|
||||||
compact->compaction->level() + 1);
|
compact->compaction->level() + 1));
|
||||||
}
|
}
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
@ -1406,7 +1390,7 @@ Status DBImpl::OpenCompactionOutputFile(CompactionState* compact) {
|
|||||||
Status DBImpl::FinishCompactionOutputFile(CompactionState* compact,
|
Status DBImpl::FinishCompactionOutputFile(CompactionState* compact,
|
||||||
Iterator* input) {
|
Iterator* input) {
|
||||||
assert(compact != NULL);
|
assert(compact != NULL);
|
||||||
assert(compact->outfile != NULL);
|
assert(compact->outfile);
|
||||||
assert(compact->builder != NULL);
|
assert(compact->builder != NULL);
|
||||||
|
|
||||||
const uint64_t output_number = compact->current_output()->number;
|
const uint64_t output_number = compact->current_output()->number;
|
||||||
@ -1423,8 +1407,7 @@ Status DBImpl::FinishCompactionOutputFile(CompactionState* compact,
|
|||||||
const uint64_t current_bytes = compact->builder->FileSize();
|
const uint64_t current_bytes = compact->builder->FileSize();
|
||||||
compact->current_output()->file_size = current_bytes;
|
compact->current_output()->file_size = current_bytes;
|
||||||
compact->total_bytes += current_bytes;
|
compact->total_bytes += current_bytes;
|
||||||
delete compact->builder;
|
compact->builder.reset();
|
||||||
compact->builder = NULL;
|
|
||||||
|
|
||||||
// Finish and check for file errors
|
// Finish and check for file errors
|
||||||
if (s.ok() && !options_.disableDataSync) {
|
if (s.ok() && !options_.disableDataSync) {
|
||||||
@ -1437,8 +1420,7 @@ Status DBImpl::FinishCompactionOutputFile(CompactionState* compact,
|
|||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
s = compact->outfile->Close();
|
s = compact->outfile->Close();
|
||||||
}
|
}
|
||||||
delete compact->outfile;
|
compact->outfile.reset();
|
||||||
compact->outfile = NULL;
|
|
||||||
|
|
||||||
if (s.ok() && current_entries > 0) {
|
if (s.ok() && current_entries > 0) {
|
||||||
// Verify that the table is usable
|
// Verify that the table is usable
|
||||||
@ -1537,7 +1519,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
|
|||||||
|
|
||||||
assert(versions_->NumLevelFiles(compact->compaction->level()) > 0);
|
assert(versions_->NumLevelFiles(compact->compaction->level()) > 0);
|
||||||
assert(compact->builder == NULL);
|
assert(compact->builder == NULL);
|
||||||
assert(compact->outfile == NULL);
|
assert(!compact->outfile);
|
||||||
|
|
||||||
SequenceNumber visible_at_tip = 0;
|
SequenceNumber visible_at_tip = 0;
|
||||||
SequenceNumber earliest_snapshot;
|
SequenceNumber earliest_snapshot;
|
||||||
@ -1560,7 +1542,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
|
|||||||
mutex_.Unlock();
|
mutex_.Unlock();
|
||||||
|
|
||||||
const uint64_t start_micros = env_->NowMicros();
|
const uint64_t start_micros = env_->NowMicros();
|
||||||
Iterator* input = versions_->MakeInputIterator(compact->compaction);
|
unique_ptr<Iterator> input(versions_->MakeInputIterator(compact->compaction));
|
||||||
input->SeekToFirst();
|
input->SeekToFirst();
|
||||||
Status status;
|
Status status;
|
||||||
ParsedInternalKey ikey;
|
ParsedInternalKey ikey;
|
||||||
@ -1587,7 +1569,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
|
|||||||
Slice* compaction_filter_value = NULL;
|
Slice* compaction_filter_value = NULL;
|
||||||
if (compact->compaction->ShouldStopBefore(key) &&
|
if (compact->compaction->ShouldStopBefore(key) &&
|
||||||
compact->builder != NULL) {
|
compact->builder != NULL) {
|
||||||
status = FinishCompactionOutputFile(compact, input);
|
status = FinishCompactionOutputFile(compact, input.get());
|
||||||
if (!status.ok()) {
|
if (!status.ok()) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1690,7 +1672,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
|
|||||||
// Close output file if it is big enough
|
// Close output file if it is big enough
|
||||||
if (compact->builder->FileSize() >=
|
if (compact->builder->FileSize() >=
|
||||||
compact->compaction->MaxOutputFileSize()) {
|
compact->compaction->MaxOutputFileSize()) {
|
||||||
status = FinishCompactionOutputFile(compact, input);
|
status = FinishCompactionOutputFile(compact, input.get());
|
||||||
if (!status.ok()) {
|
if (!status.ok()) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1704,13 +1686,12 @@ Status DBImpl::DoCompactionWork(CompactionState* compact) {
|
|||||||
status = Status::IOError("Deleting DB during compaction");
|
status = Status::IOError("Deleting DB during compaction");
|
||||||
}
|
}
|
||||||
if (status.ok() && compact->builder != NULL) {
|
if (status.ok() && compact->builder != NULL) {
|
||||||
status = FinishCompactionOutputFile(compact, input);
|
status = FinishCompactionOutputFile(compact, input.get());
|
||||||
}
|
}
|
||||||
if (status.ok()) {
|
if (status.ok()) {
|
||||||
status = input->status();
|
status = input->status();
|
||||||
}
|
}
|
||||||
delete input;
|
input.reset();
|
||||||
input = NULL;
|
|
||||||
|
|
||||||
CompactionStats stats;
|
CompactionStats stats;
|
||||||
stats.micros = env_->NowMicros() - start_micros - imm_micros;
|
stats.micros = env_->NowMicros() - start_micros - imm_micros;
|
||||||
@ -1950,9 +1931,9 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
|
|||||||
status = log_->AddRecord(WriteBatchInternal::Contents(updates));
|
status = log_->AddRecord(WriteBatchInternal::Contents(updates));
|
||||||
if (status.ok() && options.sync) {
|
if (status.ok() && options.sync) {
|
||||||
if (options_.use_fsync) {
|
if (options_.use_fsync) {
|
||||||
status = logfile_->Fsync();
|
status = log_->file()->Fsync();
|
||||||
} else {
|
} else {
|
||||||
status = logfile_->Sync();
|
status = log_->file()->Sync();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2117,18 +2098,15 @@ Status DBImpl::MakeRoomForWrite(bool force) {
|
|||||||
DelayLoggingAndReset();
|
DelayLoggingAndReset();
|
||||||
assert(versions_->PrevLogNumber() == 0);
|
assert(versions_->PrevLogNumber() == 0);
|
||||||
uint64_t new_log_number = versions_->NewFileNumber();
|
uint64_t new_log_number = versions_->NewFileNumber();
|
||||||
WritableFile* lfile = NULL;
|
unique_ptr<WritableFile> lfile;
|
||||||
s = env_->NewWritableFile(LogFileName(dbname_, new_log_number), &lfile);
|
s = env_->NewWritableFile(LogFileName(dbname_, new_log_number), &lfile);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
// Avoid chewing through file number space in a tight loop.
|
// Avoid chewing through file number space in a tight loop.
|
||||||
versions_->ReuseFileNumber(new_log_number);
|
versions_->ReuseFileNumber(new_log_number);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
delete log_;
|
|
||||||
delete logfile_;
|
|
||||||
logfile_ = lfile;
|
|
||||||
logfile_number_ = new_log_number;
|
logfile_number_ = new_log_number;
|
||||||
log_ = new log::Writer(lfile);
|
log_.reset(new log::Writer(std::move(lfile)));
|
||||||
imm_.Add(mem_);
|
imm_.Add(mem_);
|
||||||
mem_ = new MemTable(internal_comparator_, NumberLevels());
|
mem_ = new MemTable(internal_comparator_, NumberLevels());
|
||||||
mem_->Ref();
|
mem_->Ref();
|
||||||
@ -2310,14 +2288,13 @@ Status DB::Open(const Options& options, const std::string& dbname,
|
|||||||
s = impl->Recover(&edit); // Handles create_if_missing, error_if_exists
|
s = impl->Recover(&edit); // Handles create_if_missing, error_if_exists
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
uint64_t new_log_number = impl->versions_->NewFileNumber();
|
uint64_t new_log_number = impl->versions_->NewFileNumber();
|
||||||
WritableFile* lfile;
|
unique_ptr<WritableFile> lfile;
|
||||||
s = options.env->NewWritableFile(LogFileName(dbname, new_log_number),
|
s = options.env->NewWritableFile(LogFileName(dbname, new_log_number),
|
||||||
&lfile);
|
&lfile);
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
edit.SetLogNumber(new_log_number);
|
edit.SetLogNumber(new_log_number);
|
||||||
impl->logfile_ = lfile;
|
|
||||||
impl->logfile_number_ = new_log_number;
|
impl->logfile_number_ = new_log_number;
|
||||||
impl->log_ = new log::Writer(lfile);
|
impl->log_.reset(new log::Writer(std::move(lfile)));
|
||||||
s = impl->versions_->LogAndApply(&edit, &impl->mutex_);
|
s = impl->versions_->LogAndApply(&edit, &impl->mutex_);
|
||||||
}
|
}
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
|
@ -90,7 +90,7 @@ class DBImpl : public DB {
|
|||||||
protected:
|
protected:
|
||||||
Env* const env_;
|
Env* const env_;
|
||||||
const std::string dbname_;
|
const std::string dbname_;
|
||||||
VersionSet* versions_;
|
unique_ptr<VersionSet> versions_;
|
||||||
const InternalKeyComparator internal_comparator_;
|
const InternalKeyComparator internal_comparator_;
|
||||||
const Options options_; // options_.comparator == &internal_comparator_
|
const Options options_; // options_.comparator == &internal_comparator_
|
||||||
|
|
||||||
@ -202,10 +202,9 @@ class DBImpl : public DB {
|
|||||||
// Constant after construction
|
// Constant after construction
|
||||||
const InternalFilterPolicy internal_filter_policy_;
|
const InternalFilterPolicy internal_filter_policy_;
|
||||||
bool owns_info_log_;
|
bool owns_info_log_;
|
||||||
bool owns_cache_;
|
|
||||||
|
|
||||||
// table_cache_ provides its own synchronization
|
// table_cache_ provides its own synchronization
|
||||||
TableCache* table_cache_;
|
unique_ptr<TableCache> table_cache_;
|
||||||
|
|
||||||
// Lock over the persistent DB state. Non-NULL iff successfully acquired.
|
// Lock over the persistent DB state. Non-NULL iff successfully acquired.
|
||||||
FileLock* db_lock_;
|
FileLock* db_lock_;
|
||||||
@ -216,9 +215,8 @@ class DBImpl : public DB {
|
|||||||
port::CondVar bg_cv_; // Signalled when background work finishes
|
port::CondVar bg_cv_; // Signalled when background work finishes
|
||||||
MemTable* mem_;
|
MemTable* mem_;
|
||||||
MemTableList imm_; // Memtable that are not changing
|
MemTableList imm_; // Memtable that are not changing
|
||||||
WritableFile* logfile_;
|
|
||||||
uint64_t logfile_number_;
|
uint64_t logfile_number_;
|
||||||
log::Writer* log_;
|
unique_ptr<log::Writer> log_;
|
||||||
|
|
||||||
std::string host_name_;
|
std::string host_name_;
|
||||||
|
|
||||||
|
@ -100,18 +100,17 @@ class SpecialEnv : public EnvWrapper {
|
|||||||
manifest_write_error_.Release_Store(NULL);
|
manifest_write_error_.Release_Store(NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
Status NewWritableFile(const std::string& f, WritableFile** r) {
|
Status NewWritableFile(const std::string& f, unique_ptr<WritableFile>* r) {
|
||||||
class SSTableFile : public WritableFile {
|
class SSTableFile : public WritableFile {
|
||||||
private:
|
private:
|
||||||
SpecialEnv* env_;
|
SpecialEnv* env_;
|
||||||
WritableFile* base_;
|
unique_ptr<WritableFile> base_;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
SSTableFile(SpecialEnv* env, WritableFile* base)
|
SSTableFile(SpecialEnv* env, unique_ptr<WritableFile>&& base)
|
||||||
: env_(env),
|
: env_(env),
|
||||||
base_(base) {
|
base_(std::move(base)) {
|
||||||
}
|
}
|
||||||
~SSTableFile() { delete base_; }
|
|
||||||
Status Append(const Slice& data) {
|
Status Append(const Slice& data) {
|
||||||
if (env_->no_space_.Acquire_Load() != NULL) {
|
if (env_->no_space_.Acquire_Load() != NULL) {
|
||||||
// Drop writes on the floor
|
// Drop writes on the floor
|
||||||
@ -132,10 +131,10 @@ class SpecialEnv : public EnvWrapper {
|
|||||||
class ManifestFile : public WritableFile {
|
class ManifestFile : public WritableFile {
|
||||||
private:
|
private:
|
||||||
SpecialEnv* env_;
|
SpecialEnv* env_;
|
||||||
WritableFile* base_;
|
unique_ptr<WritableFile> base_;
|
||||||
public:
|
public:
|
||||||
ManifestFile(SpecialEnv* env, WritableFile* b) : env_(env), base_(b) { }
|
ManifestFile(SpecialEnv* env, unique_ptr<WritableFile>&& b)
|
||||||
~ManifestFile() { delete base_; }
|
: env_(env), base_(std::move(b)) { }
|
||||||
Status Append(const Slice& data) {
|
Status Append(const Slice& data) {
|
||||||
if (env_->manifest_write_error_.Acquire_Load() != NULL) {
|
if (env_->manifest_write_error_.Acquire_Load() != NULL) {
|
||||||
return Status::IOError("simulated writer error");
|
return Status::IOError("simulated writer error");
|
||||||
@ -161,24 +160,25 @@ class SpecialEnv : public EnvWrapper {
|
|||||||
Status s = target()->NewWritableFile(f, r);
|
Status s = target()->NewWritableFile(f, r);
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
if (strstr(f.c_str(), ".sst") != NULL) {
|
if (strstr(f.c_str(), ".sst") != NULL) {
|
||||||
*r = new SSTableFile(this, *r);
|
r->reset(new SSTableFile(this, std::move(*r)));
|
||||||
} else if (strstr(f.c_str(), "MANIFEST") != NULL) {
|
} else if (strstr(f.c_str(), "MANIFEST") != NULL) {
|
||||||
*r = new ManifestFile(this, *r);
|
r->reset(new ManifestFile(this, std::move(*r)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
Status NewRandomAccessFile(const std::string& f, RandomAccessFile** r) {
|
Status NewRandomAccessFile(const std::string& f,
|
||||||
|
unique_ptr<RandomAccessFile>* r) {
|
||||||
class CountingFile : public RandomAccessFile {
|
class CountingFile : public RandomAccessFile {
|
||||||
private:
|
private:
|
||||||
RandomAccessFile* target_;
|
unique_ptr<RandomAccessFile> target_;
|
||||||
anon::AtomicCounter* counter_;
|
anon::AtomicCounter* counter_;
|
||||||
public:
|
public:
|
||||||
CountingFile(RandomAccessFile* target, anon::AtomicCounter* counter)
|
CountingFile(unique_ptr<RandomAccessFile>&& target,
|
||||||
: target_(target), counter_(counter) {
|
anon::AtomicCounter* counter)
|
||||||
|
: target_(std::move(target)), counter_(counter) {
|
||||||
}
|
}
|
||||||
virtual ~CountingFile() { delete target_; }
|
|
||||||
virtual Status Read(uint64_t offset, size_t n, Slice* result,
|
virtual Status Read(uint64_t offset, size_t n, Slice* result,
|
||||||
char* scratch) const {
|
char* scratch) const {
|
||||||
counter_->Increment();
|
counter_->Increment();
|
||||||
@ -188,7 +188,7 @@ class SpecialEnv : public EnvWrapper {
|
|||||||
|
|
||||||
Status s = target()->NewRandomAccessFile(f, r);
|
Status s = target()->NewRandomAccessFile(f, r);
|
||||||
if (s.ok() && count_random_reads_) {
|
if (s.ok() && count_random_reads_) {
|
||||||
*r = new CountingFile(*r, &random_read_counter_);
|
r->reset(new CountingFile(std::move(*r), &random_read_counter_));
|
||||||
}
|
}
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
@ -2199,7 +2199,6 @@ TEST(DBTest, BloomFilter) {
|
|||||||
|
|
||||||
env_->delay_sstable_sync_.Release_Store(NULL);
|
env_->delay_sstable_sync_.Release_Store(NULL);
|
||||||
Close();
|
Close();
|
||||||
delete options.block_cache;
|
|
||||||
delete options.filter_policy;
|
delete options.filter_policy;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2257,9 +2256,9 @@ TEST(DBTest, SnapshotFiles) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
SequentialFile* srcfile;
|
unique_ptr<SequentialFile> srcfile;
|
||||||
ASSERT_OK(env_->NewSequentialFile(src, &srcfile));
|
ASSERT_OK(env_->NewSequentialFile(src, &srcfile));
|
||||||
WritableFile* destfile;
|
unique_ptr<WritableFile> destfile;
|
||||||
ASSERT_OK(env_->NewWritableFile(dest, &destfile));
|
ASSERT_OK(env_->NewWritableFile(dest, &destfile));
|
||||||
|
|
||||||
char buffer[4096];
|
char buffer[4096];
|
||||||
@ -2271,8 +2270,6 @@ TEST(DBTest, SnapshotFiles) {
|
|||||||
size -= slice.size();
|
size -= slice.size();
|
||||||
}
|
}
|
||||||
ASSERT_OK(destfile->Close());
|
ASSERT_OK(destfile->Close());
|
||||||
delete destfile;
|
|
||||||
delete srcfile;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// release file snapshot
|
// release file snapshot
|
||||||
@ -2519,7 +2516,6 @@ TEST(DBTest, ReadCompaction) {
|
|||||||
ASSERT_TRUE(NumTableFilesAtLevel(0) < l1 ||
|
ASSERT_TRUE(NumTableFilesAtLevel(0) < l1 ||
|
||||||
NumTableFilesAtLevel(1) < l2 ||
|
NumTableFilesAtLevel(1) < l2 ||
|
||||||
NumTableFilesAtLevel(2) < l3);
|
NumTableFilesAtLevel(2) < l3);
|
||||||
delete options.block_cache;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2633,7 +2629,6 @@ class ModelDB: public DB {
|
|||||||
};
|
};
|
||||||
|
|
||||||
explicit ModelDB(const Options& options): options_(options) { }
|
explicit ModelDB(const Options& options): options_(options) { }
|
||||||
~ModelDB() { }
|
|
||||||
virtual Status Put(const WriteOptions& o, const Slice& k, const Slice& v) {
|
virtual Status Put(const WriteOptions& o, const Slice& k, const Slice& v) {
|
||||||
return DB::Put(o, k, v);
|
return DB::Put(o, k, v);
|
||||||
}
|
}
|
||||||
|
@ -15,9 +15,9 @@ namespace log {
|
|||||||
Reader::Reporter::~Reporter() {
|
Reader::Reporter::~Reporter() {
|
||||||
}
|
}
|
||||||
|
|
||||||
Reader::Reader(SequentialFile* file, Reporter* reporter, bool checksum,
|
Reader::Reader(unique_ptr<SequentialFile>&& file, Reporter* reporter,
|
||||||
uint64_t initial_offset)
|
bool checksum, uint64_t initial_offset)
|
||||||
: file_(file),
|
: file_(std::move(file)),
|
||||||
reporter_(reporter),
|
reporter_(reporter),
|
||||||
checksum_(checksum),
|
checksum_(checksum),
|
||||||
backing_store_(new char[kBlockSize]),
|
backing_store_(new char[kBlockSize]),
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
#ifndef STORAGE_LEVELDB_DB_LOG_READER_H_
|
#ifndef STORAGE_LEVELDB_DB_LOG_READER_H_
|
||||||
#define STORAGE_LEVELDB_DB_LOG_READER_H_
|
#define STORAGE_LEVELDB_DB_LOG_READER_H_
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
#include "db/log_format.h"
|
#include "db/log_format.h"
|
||||||
@ -14,6 +15,7 @@
|
|||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
class SequentialFile;
|
class SequentialFile;
|
||||||
|
using std::unique_ptr;
|
||||||
|
|
||||||
namespace log {
|
namespace log {
|
||||||
|
|
||||||
@ -40,8 +42,8 @@ class Reader {
|
|||||||
//
|
//
|
||||||
// The Reader will start reading at the first record located at physical
|
// The Reader will start reading at the first record located at physical
|
||||||
// position >= initial_offset within the file.
|
// position >= initial_offset within the file.
|
||||||
Reader(SequentialFile* file, Reporter* reporter, bool checksum,
|
Reader(unique_ptr<SequentialFile>&& file, Reporter* reporter,
|
||||||
uint64_t initial_offset);
|
bool checksum, uint64_t initial_offset);
|
||||||
|
|
||||||
~Reader();
|
~Reader();
|
||||||
|
|
||||||
@ -57,8 +59,10 @@ class Reader {
|
|||||||
// Undefined before the first call to ReadRecord.
|
// Undefined before the first call to ReadRecord.
|
||||||
uint64_t LastRecordOffset();
|
uint64_t LastRecordOffset();
|
||||||
|
|
||||||
|
SequentialFile* file() { return file_.get(); }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
SequentialFile* const file_;
|
const unique_ptr<SequentialFile> file_;
|
||||||
Reporter* const reporter_;
|
Reporter* const reporter_;
|
||||||
bool const checksum_;
|
bool const checksum_;
|
||||||
char* const backing_store_;
|
char* const backing_store_;
|
||||||
|
@ -100,8 +100,26 @@ class LogTest {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
StringDest dest_;
|
std::string& dest_contents() {
|
||||||
StringSource source_;
|
auto dest = dynamic_cast<StringDest*>(writer_.file());
|
||||||
|
assert(dest);
|
||||||
|
return dest->contents_;
|
||||||
|
}
|
||||||
|
|
||||||
|
const std::string& dest_contents() const {
|
||||||
|
auto dest = dynamic_cast<const StringDest*>(writer_.file());
|
||||||
|
assert(dest);
|
||||||
|
return dest->contents_;
|
||||||
|
}
|
||||||
|
|
||||||
|
void reset_source_contents() {
|
||||||
|
auto src = dynamic_cast<StringSource*>(reader_.file());
|
||||||
|
assert(src);
|
||||||
|
src->contents_ = dest_contents();
|
||||||
|
}
|
||||||
|
|
||||||
|
unique_ptr<StringDest> dest_holder_;
|
||||||
|
unique_ptr<StringSource> source_holder_;
|
||||||
ReportCollector report_;
|
ReportCollector report_;
|
||||||
bool reading_;
|
bool reading_;
|
||||||
Writer writer_;
|
Writer writer_;
|
||||||
@ -112,9 +130,11 @@ class LogTest {
|
|||||||
static uint64_t initial_offset_last_record_offsets_[];
|
static uint64_t initial_offset_last_record_offsets_[];
|
||||||
|
|
||||||
public:
|
public:
|
||||||
LogTest() : reading_(false),
|
LogTest() : dest_holder_(new StringDest),
|
||||||
writer_(&dest_),
|
source_holder_(new StringSource),
|
||||||
reader_(&source_, &report_, true/*checksum*/,
|
reading_(false),
|
||||||
|
writer_(std::move(dest_holder_)),
|
||||||
|
reader_(std::move(source_holder_), &report_, true/*checksum*/,
|
||||||
0/*initial_offset*/) {
|
0/*initial_offset*/) {
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -124,13 +144,13 @@ class LogTest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
size_t WrittenBytes() const {
|
size_t WrittenBytes() const {
|
||||||
return dest_.contents_.size();
|
return dest_contents().size();
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string Read() {
|
std::string Read() {
|
||||||
if (!reading_) {
|
if (!reading_) {
|
||||||
reading_ = true;
|
reading_ = true;
|
||||||
source_.contents_ = Slice(dest_.contents_);
|
reset_source_contents();
|
||||||
}
|
}
|
||||||
std::string scratch;
|
std::string scratch;
|
||||||
Slice record;
|
Slice record;
|
||||||
@ -142,26 +162,27 @@ class LogTest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void IncrementByte(int offset, int delta) {
|
void IncrementByte(int offset, int delta) {
|
||||||
dest_.contents_[offset] += delta;
|
dest_contents()[offset] += delta;
|
||||||
}
|
}
|
||||||
|
|
||||||
void SetByte(int offset, char new_byte) {
|
void SetByte(int offset, char new_byte) {
|
||||||
dest_.contents_[offset] = new_byte;
|
dest_contents()[offset] = new_byte;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ShrinkSize(int bytes) {
|
void ShrinkSize(int bytes) {
|
||||||
dest_.contents_.resize(dest_.contents_.size() - bytes);
|
dest_contents().resize(dest_contents().size() - bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
void FixChecksum(int header_offset, int len) {
|
void FixChecksum(int header_offset, int len) {
|
||||||
// Compute crc of type/len/data
|
// Compute crc of type/len/data
|
||||||
uint32_t crc = crc32c::Value(&dest_.contents_[header_offset+6], 1 + len);
|
uint32_t crc = crc32c::Value(&dest_contents()[header_offset+6], 1 + len);
|
||||||
crc = crc32c::Mask(crc);
|
crc = crc32c::Mask(crc);
|
||||||
EncodeFixed32(&dest_.contents_[header_offset], crc);
|
EncodeFixed32(&dest_contents()[header_offset], crc);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ForceError() {
|
void ForceError() {
|
||||||
source_.force_error_ = true;
|
auto src = dynamic_cast<StringSource*>(reader_.file());
|
||||||
|
src->force_error_ = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t DroppedBytes() const {
|
size_t DroppedBytes() const {
|
||||||
@ -192,22 +213,25 @@ class LogTest {
|
|||||||
void CheckOffsetPastEndReturnsNoRecords(uint64_t offset_past_end) {
|
void CheckOffsetPastEndReturnsNoRecords(uint64_t offset_past_end) {
|
||||||
WriteInitialOffsetLog();
|
WriteInitialOffsetLog();
|
||||||
reading_ = true;
|
reading_ = true;
|
||||||
source_.contents_ = Slice(dest_.contents_);
|
unique_ptr<StringSource> source(new StringSource);
|
||||||
Reader* offset_reader = new Reader(&source_, &report_, true/*checksum*/,
|
source->contents_ = dest_contents();
|
||||||
WrittenBytes() + offset_past_end);
|
unique_ptr<Reader> offset_reader(
|
||||||
|
new Reader(std::move(source), &report_, true/*checksum*/,
|
||||||
|
WrittenBytes() + offset_past_end));
|
||||||
Slice record;
|
Slice record;
|
||||||
std::string scratch;
|
std::string scratch;
|
||||||
ASSERT_TRUE(!offset_reader->ReadRecord(&record, &scratch));
|
ASSERT_TRUE(!offset_reader->ReadRecord(&record, &scratch));
|
||||||
delete offset_reader;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void CheckInitialOffsetRecord(uint64_t initial_offset,
|
void CheckInitialOffsetRecord(uint64_t initial_offset,
|
||||||
int expected_record_offset) {
|
int expected_record_offset) {
|
||||||
WriteInitialOffsetLog();
|
WriteInitialOffsetLog();
|
||||||
reading_ = true;
|
reading_ = true;
|
||||||
source_.contents_ = Slice(dest_.contents_);
|
unique_ptr<StringSource> source(new StringSource);
|
||||||
Reader* offset_reader = new Reader(&source_, &report_, true/*checksum*/,
|
source->contents_ = dest_contents();
|
||||||
initial_offset);
|
unique_ptr<Reader> offset_reader(
|
||||||
|
new Reader(std::move(source), &report_, true/*checksum*/,
|
||||||
|
initial_offset));
|
||||||
Slice record;
|
Slice record;
|
||||||
std::string scratch;
|
std::string scratch;
|
||||||
ASSERT_TRUE(offset_reader->ReadRecord(&record, &scratch));
|
ASSERT_TRUE(offset_reader->ReadRecord(&record, &scratch));
|
||||||
@ -216,7 +240,6 @@ class LogTest {
|
|||||||
ASSERT_EQ(initial_offset_last_record_offsets_[expected_record_offset],
|
ASSERT_EQ(initial_offset_last_record_offsets_[expected_record_offset],
|
||||||
offset_reader->LastRecordOffset());
|
offset_reader->LastRecordOffset());
|
||||||
ASSERT_EQ((char)('a' + expected_record_offset), record.data()[0]);
|
ASSERT_EQ((char)('a' + expected_record_offset), record.data()[0]);
|
||||||
delete offset_reader;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
|
@ -12,8 +12,8 @@
|
|||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
namespace log {
|
namespace log {
|
||||||
|
|
||||||
Writer::Writer(WritableFile* dest)
|
Writer::Writer(unique_ptr<WritableFile>&& dest)
|
||||||
: dest_(dest),
|
: dest_(std::move(dest)),
|
||||||
block_offset_(0) {
|
block_offset_(0) {
|
||||||
for (int i = 0; i <= kMaxRecordType; i++) {
|
for (int i = 0; i <= kMaxRecordType; i++) {
|
||||||
char t = static_cast<char>(i);
|
char t = static_cast<char>(i);
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
#ifndef STORAGE_LEVELDB_DB_LOG_WRITER_H_
|
#ifndef STORAGE_LEVELDB_DB_LOG_WRITER_H_
|
||||||
#define STORAGE_LEVELDB_DB_LOG_WRITER_H_
|
#define STORAGE_LEVELDB_DB_LOG_WRITER_H_
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include "db/log_format.h"
|
#include "db/log_format.h"
|
||||||
#include "leveldb/slice.h"
|
#include "leveldb/slice.h"
|
||||||
@ -14,6 +15,8 @@ namespace leveldb {
|
|||||||
|
|
||||||
class WritableFile;
|
class WritableFile;
|
||||||
|
|
||||||
|
using std::unique_ptr;
|
||||||
|
|
||||||
namespace log {
|
namespace log {
|
||||||
|
|
||||||
class Writer {
|
class Writer {
|
||||||
@ -21,13 +24,16 @@ class Writer {
|
|||||||
// Create a writer that will append data to "*dest".
|
// Create a writer that will append data to "*dest".
|
||||||
// "*dest" must be initially empty.
|
// "*dest" must be initially empty.
|
||||||
// "*dest" must remain live while this Writer is in use.
|
// "*dest" must remain live while this Writer is in use.
|
||||||
explicit Writer(WritableFile* dest);
|
explicit Writer(unique_ptr<WritableFile>&& dest);
|
||||||
~Writer();
|
~Writer();
|
||||||
|
|
||||||
Status AddRecord(const Slice& slice);
|
Status AddRecord(const Slice& slice);
|
||||||
|
|
||||||
|
WritableFile* file() { return dest_.get(); }
|
||||||
|
const WritableFile* file() const { return dest_.get(); }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
WritableFile* dest_;
|
unique_ptr<WritableFile> dest_;
|
||||||
int block_offset_; // Current offset in block
|
int block_offset_; // Current offset in block
|
||||||
|
|
||||||
// crc32c values for all supported record types. These are
|
// crc32c values for all supported record types. These are
|
||||||
|
25
db/repair.cc
25
db/repair.cc
@ -50,8 +50,6 @@ class Repairer {
|
|||||||
icmp_(options.comparator),
|
icmp_(options.comparator),
|
||||||
ipolicy_(options.filter_policy),
|
ipolicy_(options.filter_policy),
|
||||||
options_(SanitizeOptions(dbname, &icmp_, &ipolicy_, options)),
|
options_(SanitizeOptions(dbname, &icmp_, &ipolicy_, options)),
|
||||||
owns_info_log_(options_.info_log != options.info_log),
|
|
||||||
owns_cache_(options_.block_cache != options.block_cache),
|
|
||||||
next_file_number_(1) {
|
next_file_number_(1) {
|
||||||
// TableCache can be small since we expect each table to be opened once.
|
// TableCache can be small since we expect each table to be opened once.
|
||||||
table_cache_ = new TableCache(dbname_, &options_, 10);
|
table_cache_ = new TableCache(dbname_, &options_, 10);
|
||||||
@ -61,12 +59,6 @@ class Repairer {
|
|||||||
~Repairer() {
|
~Repairer() {
|
||||||
delete table_cache_;
|
delete table_cache_;
|
||||||
delete edit_;
|
delete edit_;
|
||||||
if (owns_info_log_) {
|
|
||||||
delete options_.info_log;
|
|
||||||
}
|
|
||||||
if (owns_cache_) {
|
|
||||||
delete options_.block_cache;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Status Run() {
|
Status Run() {
|
||||||
@ -104,7 +96,6 @@ class Repairer {
|
|||||||
InternalKeyComparator const icmp_;
|
InternalKeyComparator const icmp_;
|
||||||
InternalFilterPolicy const ipolicy_;
|
InternalFilterPolicy const ipolicy_;
|
||||||
Options const options_;
|
Options const options_;
|
||||||
bool owns_info_log_;
|
|
||||||
bool owns_cache_;
|
bool owns_cache_;
|
||||||
TableCache* table_cache_;
|
TableCache* table_cache_;
|
||||||
VersionEdit* edit_;
|
VersionEdit* edit_;
|
||||||
@ -164,7 +155,7 @@ class Repairer {
|
|||||||
Status ConvertLogToTable(uint64_t log) {
|
Status ConvertLogToTable(uint64_t log) {
|
||||||
struct LogReporter : public log::Reader::Reporter {
|
struct LogReporter : public log::Reader::Reporter {
|
||||||
Env* env;
|
Env* env;
|
||||||
Logger* info_log;
|
std::shared_ptr<Logger> info_log;
|
||||||
uint64_t lognum;
|
uint64_t lognum;
|
||||||
virtual void Corruption(size_t bytes, const Status& s) {
|
virtual void Corruption(size_t bytes, const Status& s) {
|
||||||
// We print error messages for corruption, but continue repairing.
|
// We print error messages for corruption, but continue repairing.
|
||||||
@ -177,7 +168,7 @@ class Repairer {
|
|||||||
|
|
||||||
// Open the log file
|
// Open the log file
|
||||||
std::string logname = LogFileName(dbname_, log);
|
std::string logname = LogFileName(dbname_, log);
|
||||||
SequentialFile* lfile;
|
unique_ptr<SequentialFile> lfile;
|
||||||
Status status = env_->NewSequentialFile(logname, &lfile);
|
Status status = env_->NewSequentialFile(logname, &lfile);
|
||||||
if (!status.ok()) {
|
if (!status.ok()) {
|
||||||
return status;
|
return status;
|
||||||
@ -192,7 +183,7 @@ class Repairer {
|
|||||||
// corruptions cause entire commits to be skipped instead of
|
// corruptions cause entire commits to be skipped instead of
|
||||||
// propagating bad information (like overly large sequence
|
// propagating bad information (like overly large sequence
|
||||||
// numbers).
|
// numbers).
|
||||||
log::Reader reader(lfile, &reporter, false/*do not checksum*/,
|
log::Reader reader(std::move(lfile), &reporter, false/*do not checksum*/,
|
||||||
0/*initial_offset*/);
|
0/*initial_offset*/);
|
||||||
|
|
||||||
// Read all the records and add to a memtable
|
// Read all the records and add to a memtable
|
||||||
@ -219,7 +210,6 @@ class Repairer {
|
|||||||
status = Status::OK(); // Keep going with rest of file
|
status = Status::OK(); // Keep going with rest of file
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
delete lfile;
|
|
||||||
|
|
||||||
// Do not record a version edit for this conversion to a Table
|
// Do not record a version edit for this conversion to a Table
|
||||||
// since ExtractMetaData() will also generate edits.
|
// since ExtractMetaData() will also generate edits.
|
||||||
@ -304,7 +294,7 @@ class Repairer {
|
|||||||
|
|
||||||
Status WriteDescriptor() {
|
Status WriteDescriptor() {
|
||||||
std::string tmp = TempFileName(dbname_, 1);
|
std::string tmp = TempFileName(dbname_, 1);
|
||||||
WritableFile* file;
|
unique_ptr<WritableFile> file;
|
||||||
Status status = env_->NewWritableFile(tmp, &file);
|
Status status = env_->NewWritableFile(tmp, &file);
|
||||||
if (!status.ok()) {
|
if (!status.ok()) {
|
||||||
return status;
|
return status;
|
||||||
@ -331,16 +321,11 @@ class Repairer {
|
|||||||
|
|
||||||
//fprintf(stderr, "NewDescriptor:\n%s\n", edit_.DebugString().c_str());
|
//fprintf(stderr, "NewDescriptor:\n%s\n", edit_.DebugString().c_str());
|
||||||
{
|
{
|
||||||
log::Writer log(file);
|
log::Writer log(std::move(file));
|
||||||
std::string record;
|
std::string record;
|
||||||
edit_->EncodeTo(&record);
|
edit_->EncodeTo(&record);
|
||||||
status = log.AddRecord(record);
|
status = log.AddRecord(record);
|
||||||
}
|
}
|
||||||
if (status.ok()) {
|
|
||||||
status = file->Close();
|
|
||||||
}
|
|
||||||
delete file;
|
|
||||||
file = NULL;
|
|
||||||
|
|
||||||
if (!status.ok()) {
|
if (!status.ok()) {
|
||||||
env_->DeleteFile(tmp);
|
env_->DeleteFile(tmp);
|
||||||
|
@ -13,17 +13,17 @@
|
|||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
struct TableAndFile {
|
struct TableAndFile {
|
||||||
RandomAccessFile* file;
|
unique_ptr<RandomAccessFile> file;
|
||||||
Table* table;
|
unique_ptr<Table> table;
|
||||||
};
|
};
|
||||||
|
|
||||||
static class DBStatistics* dbstatistics;
|
static class DBStatistics* dbstatistics;
|
||||||
|
|
||||||
static void DeleteEntry(const Slice& key, void* value) {
|
static void DeleteEntry(const Slice& key, void* value) {
|
||||||
TableAndFile* tf = reinterpret_cast<TableAndFile*>(value);
|
TableAndFile* tf = reinterpret_cast<TableAndFile*>(value);
|
||||||
delete tf->table;
|
if (dbstatistics) {
|
||||||
delete tf->file;
|
dbstatistics->incNumFileCloses();
|
||||||
dbstatistics ? dbstatistics->incNumFileCloses() : (void)0;
|
}
|
||||||
delete tf;
|
delete tf;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -44,7 +44,6 @@ TableCache::TableCache(const std::string& dbname,
|
|||||||
}
|
}
|
||||||
|
|
||||||
TableCache::~TableCache() {
|
TableCache::~TableCache() {
|
||||||
delete cache_;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Status TableCache::FindTable(uint64_t file_number, uint64_t file_size,
|
Status TableCache::FindTable(uint64_t file_number, uint64_t file_size,
|
||||||
@ -60,24 +59,23 @@ Status TableCache::FindTable(uint64_t file_number, uint64_t file_size,
|
|||||||
*tableIO = true; // we had to do IO from storage
|
*tableIO = true; // we had to do IO from storage
|
||||||
}
|
}
|
||||||
std::string fname = TableFileName(dbname_, file_number);
|
std::string fname = TableFileName(dbname_, file_number);
|
||||||
RandomAccessFile* file = NULL;
|
unique_ptr<RandomAccessFile> file;
|
||||||
Table* table = NULL;
|
unique_ptr<Table> table;
|
||||||
s = env_->NewRandomAccessFile(fname, &file);
|
s = env_->NewRandomAccessFile(fname, &file);
|
||||||
stats ? stats->incNumFileOpens() : (void)0;
|
stats ? stats->incNumFileOpens() : (void)0;
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
s = Table::Open(*options_, file, file_size, &table);
|
s = Table::Open(*options_, std::move(file), file_size, &table);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
assert(table == NULL);
|
assert(table == NULL);
|
||||||
delete file;
|
|
||||||
stats ? stats->incNumFileErrors() : (void)0;
|
stats ? stats->incNumFileErrors() : (void)0;
|
||||||
// We do not cache error results so that if the error is transient,
|
// We do not cache error results so that if the error is transient,
|
||||||
// or somebody repairs the file, we recover automatically.
|
// or somebody repairs the file, we recover automatically.
|
||||||
} else {
|
} else {
|
||||||
TableAndFile* tf = new TableAndFile;
|
TableAndFile* tf = new TableAndFile;
|
||||||
tf->file = file;
|
tf->file = std::move(file);
|
||||||
tf->table = table;
|
tf->table = std::move(table);
|
||||||
*handle = cache_->Insert(key, tf, 1, &DeleteEntry);
|
*handle = cache_->Insert(key, tf, 1, &DeleteEntry);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -98,9 +96,10 @@ Iterator* TableCache::NewIterator(const ReadOptions& options,
|
|||||||
return NewErrorIterator(s);
|
return NewErrorIterator(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
Table* table = reinterpret_cast<TableAndFile*>(cache_->Value(handle))->table;
|
Table* table =
|
||||||
|
reinterpret_cast<TableAndFile*>(cache_->Value(handle))->table.get();
|
||||||
Iterator* result = table->NewIterator(options);
|
Iterator* result = table->NewIterator(options);
|
||||||
result->RegisterCleanup(&UnrefEntry, cache_, handle);
|
result->RegisterCleanup(&UnrefEntry, cache_.get(), handle);
|
||||||
if (tableptr != NULL) {
|
if (tableptr != NULL) {
|
||||||
*tableptr = table;
|
*tableptr = table;
|
||||||
}
|
}
|
||||||
@ -117,7 +116,8 @@ Status TableCache::Get(const ReadOptions& options,
|
|||||||
Cache::Handle* handle = NULL;
|
Cache::Handle* handle = NULL;
|
||||||
Status s = FindTable(file_number, file_size, &handle, tableIO);
|
Status s = FindTable(file_number, file_size, &handle, tableIO);
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
Table* t = reinterpret_cast<TableAndFile*>(cache_->Value(handle))->table;
|
Table* t =
|
||||||
|
reinterpret_cast<TableAndFile*>(cache_->Value(handle))->table.get();
|
||||||
s = t->InternalGet(options, k, arg, saver);
|
s = t->InternalGet(options, k, arg, saver);
|
||||||
cache_->Release(handle);
|
cache_->Release(handle);
|
||||||
}
|
}
|
||||||
|
@ -52,7 +52,7 @@ class TableCache {
|
|||||||
Env* const env_;
|
Env* const env_;
|
||||||
const std::string dbname_;
|
const std::string dbname_;
|
||||||
const Options* options_;
|
const Options* options_;
|
||||||
Cache* cache_;
|
std::shared_ptr<Cache> cache_;
|
||||||
|
|
||||||
Status FindTable(uint64_t file_number, uint64_t file_size, Cache::Handle**,
|
Status FindTable(uint64_t file_number, uint64_t file_size, Cache::Handle**,
|
||||||
bool* tableIO = NULL);
|
bool* tableIO = NULL);
|
||||||
|
@ -14,22 +14,23 @@ TransactionLogIteratorImpl::TransactionLogIteratorImpl(
|
|||||||
files_(files),
|
files_(files),
|
||||||
started_(false),
|
started_(false),
|
||||||
isValid_(true),
|
isValid_(true),
|
||||||
currentFileIndex_(0),
|
currentFileIndex_(0) {
|
||||||
currentLogReader_(NULL) {
|
assert(files_ != NULL);
|
||||||
assert( files_ != NULL);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
LogReporter
|
LogReporter
|
||||||
TransactionLogIteratorImpl::NewLogReporter(const uint64_t logNumber) {
|
TransactionLogIteratorImpl::NewLogReporter(const uint64_t logNumber) {
|
||||||
LogReporter reporter;
|
LogReporter reporter;
|
||||||
reporter.env = options_->env;
|
reporter.env = options_->env;
|
||||||
reporter.info_log = options_->info_log;
|
reporter.info_log = options_->info_log.get();
|
||||||
reporter.log_number = logNumber;
|
reporter.log_number = logNumber;
|
||||||
return reporter;
|
return reporter;
|
||||||
}
|
}
|
||||||
|
|
||||||
Status TransactionLogIteratorImpl::OpenLogFile(const LogFile& logFile,
|
Status TransactionLogIteratorImpl::OpenLogFile(
|
||||||
SequentialFile** file) {
|
const LogFile& logFile,
|
||||||
|
unique_ptr<SequentialFile>* file)
|
||||||
|
{
|
||||||
Env* env = options_->env;
|
Env* env = options_->env;
|
||||||
if (logFile.type == kArchivedLogFile) {
|
if (logFile.type == kArchivedLogFile) {
|
||||||
std::string fname = ArchivedLogFileName(dbname_, logFile.logNumber);
|
std::string fname = ArchivedLogFileName(dbname_, logFile.logNumber);
|
||||||
@ -73,17 +74,18 @@ void TransactionLogIteratorImpl::Next() {
|
|||||||
std::string scratch;
|
std::string scratch;
|
||||||
Slice record;
|
Slice record;
|
||||||
if (!started_) {
|
if (!started_) {
|
||||||
SequentialFile* file = NULL;
|
unique_ptr<SequentialFile> file;
|
||||||
Status status = OpenLogFile(currentLogFile, &file);
|
Status status = OpenLogFile(currentLogFile, &file);
|
||||||
if (!status.ok()) {
|
if (!status.ok()) {
|
||||||
isValid_ = false;
|
isValid_ = false;
|
||||||
currentStatus_ = status;
|
currentStatus_ = status;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
assert(file != NULL);
|
assert(file);
|
||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
log::Reader* reader = new log::Reader(file, &reporter, true, 0);
|
unique_ptr<log::Reader> reader(
|
||||||
assert(reader != NULL);
|
new log::Reader(std::move(file), &reporter, true, 0));
|
||||||
|
assert(reader);
|
||||||
while (reader->ReadRecord(&record, &scratch)) {
|
while (reader->ReadRecord(&record, &scratch)) {
|
||||||
if (record.size() < 12) {
|
if (record.size() < 12) {
|
||||||
reporter.Corruption(
|
reporter.Corruption(
|
||||||
@ -95,7 +97,7 @@ void TransactionLogIteratorImpl::Next() {
|
|||||||
if (currentNum >= sequenceNumber_) {
|
if (currentNum >= sequenceNumber_) {
|
||||||
isValid_ = true;
|
isValid_ = true;
|
||||||
currentRecord_ = record;
|
currentRecord_ = record;
|
||||||
currentLogReader_ = reader;
|
currentLogReader_ = std::move(reader);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -108,7 +110,7 @@ void TransactionLogIteratorImpl::Next() {
|
|||||||
started_ = true;
|
started_ = true;
|
||||||
} else {
|
} else {
|
||||||
LOOK_NEXT_FILE:
|
LOOK_NEXT_FILE:
|
||||||
assert(currentLogReader_ != NULL);
|
assert(currentLogReader_);
|
||||||
bool openNextFile = true;
|
bool openNextFile = true;
|
||||||
while (currentLogReader_->ReadRecord(&record, &scratch)) {
|
while (currentLogReader_->ReadRecord(&record, &scratch)) {
|
||||||
if (record.size() < 12) {
|
if (record.size() < 12) {
|
||||||
@ -125,15 +127,16 @@ LOOK_NEXT_FILE:
|
|||||||
if (openNextFile) {
|
if (openNextFile) {
|
||||||
if (currentFileIndex_ < files_->size() - 1) {
|
if (currentFileIndex_ < files_->size() - 1) {
|
||||||
++currentFileIndex_;
|
++currentFileIndex_;
|
||||||
delete currentLogReader_;
|
currentLogReader_.reset();
|
||||||
SequentialFile *file;
|
unique_ptr<SequentialFile> file;
|
||||||
Status status = OpenLogFile(files_->at(currentFileIndex_), &file);
|
Status status = OpenLogFile(files_->at(currentFileIndex_), &file);
|
||||||
if (!status.ok()) {
|
if (!status.ok()) {
|
||||||
isValid_ = false;
|
isValid_ = false;
|
||||||
currentStatus_ = status;
|
currentStatus_ = status;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
currentLogReader_ = new log::Reader(file, &reporter, true, 0);
|
currentLogReader_.reset(
|
||||||
|
new log::Reader(std::move(file), &reporter, true, 0));
|
||||||
goto LOOK_NEXT_FILE;
|
goto LOOK_NEXT_FILE;
|
||||||
} else {
|
} else {
|
||||||
// LOOKED AT FILES. WE ARE DONE HERE.
|
// LOOKED AT FILES. WE ARE DONE HERE.
|
||||||
|
@ -31,9 +31,6 @@ class TransactionLogIteratorImpl : public TransactionLogIterator {
|
|||||||
std::vector<LogFile>* files);
|
std::vector<LogFile>* files);
|
||||||
virtual ~TransactionLogIteratorImpl() {
|
virtual ~TransactionLogIteratorImpl() {
|
||||||
// TODO move to cc file.
|
// TODO move to cc file.
|
||||||
if (currentLogReader_ != NULL) {
|
|
||||||
delete currentLogReader_;
|
|
||||||
}
|
|
||||||
delete files_;
|
delete files_;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -55,8 +52,8 @@ class TransactionLogIteratorImpl : public TransactionLogIterator {
|
|||||||
Status currentStatus_;
|
Status currentStatus_;
|
||||||
size_t currentFileIndex_;
|
size_t currentFileIndex_;
|
||||||
Slice currentRecord_;
|
Slice currentRecord_;
|
||||||
log::Reader* currentLogReader_;
|
unique_ptr<log::Reader> currentLogReader_;
|
||||||
Status OpenLogFile(const LogFile& logFile, SequentialFile** file);
|
Status OpenLogFile(const LogFile& logFile, unique_ptr<SequentialFile>* file);
|
||||||
LogReporter NewLogReporter(uint64_t logNumber);
|
LogReporter NewLogReporter(uint64_t logNumber);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -896,8 +896,6 @@ VersionSet::VersionSet(const std::string& dbname,
|
|||||||
log_number_(0),
|
log_number_(0),
|
||||||
prev_log_number_(0),
|
prev_log_number_(0),
|
||||||
num_levels_(options_->num_levels),
|
num_levels_(options_->num_levels),
|
||||||
descriptor_file_(NULL),
|
|
||||||
descriptor_log_(NULL),
|
|
||||||
dummy_versions_(this),
|
dummy_versions_(this),
|
||||||
current_(NULL),
|
current_(NULL),
|
||||||
compactions_in_progress_(options_->num_levels),
|
compactions_in_progress_(options_->num_levels),
|
||||||
@ -914,8 +912,6 @@ VersionSet::~VersionSet() {
|
|||||||
delete[] compact_pointer_;
|
delete[] compact_pointer_;
|
||||||
delete[] max_file_size_;
|
delete[] max_file_size_;
|
||||||
delete[] level_max_bytes_;
|
delete[] level_max_bytes_;
|
||||||
delete descriptor_log_;
|
|
||||||
delete descriptor_file_;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void VersionSet::Init(int num_levels) {
|
void VersionSet::Init(int num_levels) {
|
||||||
@ -994,16 +990,17 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu,
|
|||||||
manifest_file_number_ = NewFileNumber(); // Change manifest file no.
|
manifest_file_number_ = NewFileNumber(); // Change manifest file no.
|
||||||
}
|
}
|
||||||
|
|
||||||
if (descriptor_log_ == NULL || new_descriptor_log) {
|
if (!descriptor_log_ || new_descriptor_log) {
|
||||||
// No reason to unlock *mu here since we only hit this path in the
|
// No reason to unlock *mu here since we only hit this path in the
|
||||||
// first call to LogAndApply (when opening the database).
|
// first call to LogAndApply (when opening the database).
|
||||||
assert(descriptor_file_ == NULL || new_descriptor_log);
|
assert(!descriptor_log_ || new_descriptor_log);
|
||||||
new_manifest_file = DescriptorFileName(dbname_, manifest_file_number_);
|
new_manifest_file = DescriptorFileName(dbname_, manifest_file_number_);
|
||||||
edit->SetNextFile(next_file_number_);
|
edit->SetNextFile(next_file_number_);
|
||||||
s = env_->NewWritableFile(new_manifest_file, &descriptor_file_);
|
unique_ptr<WritableFile> descriptor_file;
|
||||||
|
s = env_->NewWritableFile(new_manifest_file, &descriptor_file);
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
descriptor_log_ = new log::Writer(descriptor_file_);
|
descriptor_log_.reset(new log::Writer(std::move(descriptor_file)));
|
||||||
s = WriteSnapshot(descriptor_log_);
|
s = WriteSnapshot(descriptor_log_.get());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1029,9 +1026,9 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu,
|
|||||||
}
|
}
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
if (options_->use_fsync) {
|
if (options_->use_fsync) {
|
||||||
s = descriptor_file_->Fsync();
|
s = descriptor_log_->file()->Fsync();
|
||||||
} else {
|
} else {
|
||||||
s = descriptor_file_->Sync();
|
s = descriptor_log_->file()->Sync();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
@ -1052,7 +1049,7 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// find offset in manifest file where this version is stored.
|
// find offset in manifest file where this version is stored.
|
||||||
new_manifest_file_size = descriptor_file_->GetFileSize();
|
new_manifest_file_size = descriptor_log_->file()->GetFileSize();
|
||||||
|
|
||||||
mu->Lock();
|
mu->Lock();
|
||||||
// cache the manifest_file_size so that it can be used to rollover in the
|
// cache the manifest_file_size so that it can be used to rollover in the
|
||||||
@ -1072,10 +1069,7 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu,
|
|||||||
v->GetVersionNumber());
|
v->GetVersionNumber());
|
||||||
delete v;
|
delete v;
|
||||||
if (!new_manifest_file.empty()) {
|
if (!new_manifest_file.empty()) {
|
||||||
delete descriptor_log_;
|
descriptor_log_.reset();
|
||||||
delete descriptor_file_;
|
|
||||||
descriptor_log_ = NULL;
|
|
||||||
descriptor_file_ = NULL;
|
|
||||||
env_->DeleteFile(new_manifest_file);
|
env_->DeleteFile(new_manifest_file);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1142,7 +1136,7 @@ Status VersionSet::Recover() {
|
|||||||
current.c_str());
|
current.c_str());
|
||||||
|
|
||||||
std::string dscname = dbname_ + "/" + current;
|
std::string dscname = dbname_ + "/" + current;
|
||||||
SequentialFile* file;
|
unique_ptr<SequentialFile> file;
|
||||||
s = env_->NewSequentialFile(dscname, &file);
|
s = env_->NewSequentialFile(dscname, &file);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
return s;
|
return s;
|
||||||
@ -1166,7 +1160,8 @@ Status VersionSet::Recover() {
|
|||||||
{
|
{
|
||||||
LogReporter reporter;
|
LogReporter reporter;
|
||||||
reporter.status = &s;
|
reporter.status = &s;
|
||||||
log::Reader reader(file, &reporter, true/*checksum*/, 0/*initial_offset*/);
|
log::Reader reader(std::move(file), &reporter, true/*checksum*/,
|
||||||
|
0/*initial_offset*/);
|
||||||
Slice record;
|
Slice record;
|
||||||
std::string scratch;
|
std::string scratch;
|
||||||
while (reader.ReadRecord(&record, &scratch) && s.ok()) {
|
while (reader.ReadRecord(&record, &scratch) && s.ok()) {
|
||||||
@ -1206,8 +1201,7 @@ Status VersionSet::Recover() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
delete file;
|
file.reset();
|
||||||
file = NULL;
|
|
||||||
|
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
if (!have_next_file) {
|
if (!have_next_file) {
|
||||||
@ -1260,7 +1254,7 @@ Status VersionSet::DumpManifest(Options& options, std::string& dscname,
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Open the specified manifest file.
|
// Open the specified manifest file.
|
||||||
SequentialFile* file;
|
unique_ptr<SequentialFile> file;
|
||||||
Status s = options.env->NewSequentialFile(dscname, &file);
|
Status s = options.env->NewSequentialFile(dscname, &file);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
return s;
|
return s;
|
||||||
@ -1280,7 +1274,8 @@ Status VersionSet::DumpManifest(Options& options, std::string& dscname,
|
|||||||
{
|
{
|
||||||
LogReporter reporter;
|
LogReporter reporter;
|
||||||
reporter.status = &s;
|
reporter.status = &s;
|
||||||
log::Reader reader(file, &reporter, true/*checksum*/, 0/*initial_offset*/);
|
log::Reader reader(std::move(file), &reporter, true/*checksum*/,
|
||||||
|
0/*initial_offset*/);
|
||||||
Slice record;
|
Slice record;
|
||||||
std::string scratch;
|
std::string scratch;
|
||||||
while (reader.ReadRecord(&record, &scratch) && s.ok()) {
|
while (reader.ReadRecord(&record, &scratch) && s.ok()) {
|
||||||
@ -1327,8 +1322,7 @@ Status VersionSet::DumpManifest(Options& options, std::string& dscname,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
delete file;
|
file.reset();
|
||||||
file = NULL;
|
|
||||||
|
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
if (!have_next_file) {
|
if (!have_next_file) {
|
||||||
@ -1561,13 +1555,13 @@ const char* VersionSet::LevelDataSizeSummary(
|
|||||||
bool VersionSet::ManifestContains(const std::string& record) const {
|
bool VersionSet::ManifestContains(const std::string& record) const {
|
||||||
std::string fname = DescriptorFileName(dbname_, manifest_file_number_);
|
std::string fname = DescriptorFileName(dbname_, manifest_file_number_);
|
||||||
Log(options_->info_log, "ManifestContains: checking %s\n", fname.c_str());
|
Log(options_->info_log, "ManifestContains: checking %s\n", fname.c_str());
|
||||||
SequentialFile* file = NULL;
|
unique_ptr<SequentialFile> file;
|
||||||
Status s = env_->NewSequentialFile(fname, &file);
|
Status s = env_->NewSequentialFile(fname, &file);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
Log(options_->info_log, "ManifestContains: %s\n", s.ToString().c_str());
|
Log(options_->info_log, "ManifestContains: %s\n", s.ToString().c_str());
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
log::Reader reader(file, NULL, true/*checksum*/, 0);
|
log::Reader reader(std::move(file), NULL, true/*checksum*/, 0);
|
||||||
Slice r;
|
Slice r;
|
||||||
std::string scratch;
|
std::string scratch;
|
||||||
bool result = false;
|
bool result = false;
|
||||||
@ -1577,7 +1571,6 @@ bool VersionSet::ManifestContains(const std::string& record) const {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
delete file;
|
|
||||||
Log(options_->info_log, "ManifestContains: result = %d\n", result ? 1 : 0);
|
Log(options_->info_log, "ManifestContains: result = %d\n", result ? 1 : 0);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
@ -16,6 +16,7 @@
|
|||||||
#define STORAGE_LEVELDB_DB_VERSION_SET_H_
|
#define STORAGE_LEVELDB_DB_VERSION_SET_H_
|
||||||
|
|
||||||
#include <map>
|
#include <map>
|
||||||
|
#include <memory>
|
||||||
#include <set>
|
#include <set>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <deque>
|
#include <deque>
|
||||||
@ -419,8 +420,7 @@ class VersionSet {
|
|||||||
int num_levels_;
|
int num_levels_;
|
||||||
|
|
||||||
// Opened lazily
|
// Opened lazily
|
||||||
WritableFile* descriptor_file_;
|
unique_ptr<log::Writer> descriptor_log_;
|
||||||
log::Writer* descriptor_log_;
|
|
||||||
Version dummy_versions_; // Head of circular doubly-linked list of versions.
|
Version dummy_versions_; // Head of circular doubly-linked list of versions.
|
||||||
Version* current_; // == dummy_versions_.prev_
|
Version* current_; // == dummy_versions_.prev_
|
||||||
|
|
||||||
|
@ -235,13 +235,17 @@ class HdfsEnv : public Env {
|
|||||||
}
|
}
|
||||||
|
|
||||||
virtual Status NewSequentialFile(const std::string& fname,
|
virtual Status NewSequentialFile(const std::string& fname,
|
||||||
SequentialFile** result);
|
unique_ptr<SequentialFile>* result);
|
||||||
|
|
||||||
virtual Status NewRandomAccessFile(const std::string& fname,
|
virtual Status NewRandomAccessFile(const std::string& fname,
|
||||||
RandomAccessFile** result){ return notsup;}
|
unique_ptr<RandomAccessFile>* result) {
|
||||||
|
return notsup;
|
||||||
|
}
|
||||||
|
|
||||||
virtual Status NewWritableFile(const std::string& fname,
|
virtual Status NewWritableFile(const std::string& fname,
|
||||||
WritableFile** result){return notsup;}
|
unique_ptr<WritableFile>* result) {
|
||||||
|
return notsup;
|
||||||
|
}
|
||||||
|
|
||||||
virtual bool FileExists(const std::string& fname){return false;}
|
virtual bool FileExists(const std::string& fname){return false;}
|
||||||
|
|
||||||
@ -269,7 +273,8 @@ class HdfsEnv : public Env {
|
|||||||
|
|
||||||
virtual Status UnlockFile(FileLock* lock){return notsup;}
|
virtual Status UnlockFile(FileLock* lock){return notsup;}
|
||||||
|
|
||||||
virtual Status NewLogger(const std::string& fname, Logger** result){return notsup;}
|
virtual Status NewLogger(const std::string& fname,
|
||||||
|
shared_ptr<Logger>* result){return notsup;}
|
||||||
|
|
||||||
virtual void Schedule( void (*function)(void* arg), void* arg) {}
|
virtual void Schedule( void (*function)(void* arg), void* arg) {}
|
||||||
|
|
||||||
|
@ -233,31 +233,31 @@ class InMemoryEnv : public EnvWrapper {
|
|||||||
|
|
||||||
// Partial implementation of the Env interface.
|
// Partial implementation of the Env interface.
|
||||||
virtual Status NewSequentialFile(const std::string& fname,
|
virtual Status NewSequentialFile(const std::string& fname,
|
||||||
SequentialFile** result) {
|
unique_ptr<SequentialFile>* result) {
|
||||||
MutexLock lock(&mutex_);
|
MutexLock lock(&mutex_);
|
||||||
if (file_map_.find(fname) == file_map_.end()) {
|
if (file_map_.find(fname) == file_map_.end()) {
|
||||||
*result = NULL;
|
*result = NULL;
|
||||||
return Status::IOError(fname, "File not found");
|
return Status::IOError(fname, "File not found");
|
||||||
}
|
}
|
||||||
|
|
||||||
*result = new SequentialFileImpl(file_map_[fname]);
|
result->reset(new SequentialFileImpl(file_map_[fname]));
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual Status NewRandomAccessFile(const std::string& fname,
|
virtual Status NewRandomAccessFile(const std::string& fname,
|
||||||
RandomAccessFile** result) {
|
unique_ptr<RandomAccessFile>* result) {
|
||||||
MutexLock lock(&mutex_);
|
MutexLock lock(&mutex_);
|
||||||
if (file_map_.find(fname) == file_map_.end()) {
|
if (file_map_.find(fname) == file_map_.end()) {
|
||||||
*result = NULL;
|
*result = NULL;
|
||||||
return Status::IOError(fname, "File not found");
|
return Status::IOError(fname, "File not found");
|
||||||
}
|
}
|
||||||
|
|
||||||
*result = new RandomAccessFileImpl(file_map_[fname]);
|
result->reset(new RandomAccessFileImpl(file_map_[fname]));
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual Status NewWritableFile(const std::string& fname,
|
virtual Status NewWritableFile(const std::string& fname,
|
||||||
WritableFile** result) {
|
unique_ptr<WritableFile>* result) {
|
||||||
MutexLock lock(&mutex_);
|
MutexLock lock(&mutex_);
|
||||||
if (file_map_.find(fname) != file_map_.end()) {
|
if (file_map_.find(fname) != file_map_.end()) {
|
||||||
DeleteFileInternal(fname);
|
DeleteFileInternal(fname);
|
||||||
@ -267,7 +267,7 @@ class InMemoryEnv : public EnvWrapper {
|
|||||||
file->Ref();
|
file->Ref();
|
||||||
file_map_[fname] = file;
|
file_map_[fname] = file;
|
||||||
|
|
||||||
*result = new WritableFileImpl(file);
|
result->reset(new WritableFileImpl(file));
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
#include "leveldb/db.h"
|
#include "leveldb/db.h"
|
||||||
#include "leveldb/env.h"
|
#include "leveldb/env.h"
|
||||||
#include "util/testharness.h"
|
#include "util/testharness.h"
|
||||||
|
#include <memory>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
@ -27,7 +28,7 @@ class MemEnvTest {
|
|||||||
|
|
||||||
TEST(MemEnvTest, Basics) {
|
TEST(MemEnvTest, Basics) {
|
||||||
uint64_t file_size;
|
uint64_t file_size;
|
||||||
WritableFile* writable_file;
|
unique_ptr<WritableFile> writable_file;
|
||||||
std::vector<std::string> children;
|
std::vector<std::string> children;
|
||||||
|
|
||||||
ASSERT_OK(env_->CreateDir("/dir"));
|
ASSERT_OK(env_->CreateDir("/dir"));
|
||||||
@ -40,7 +41,7 @@ TEST(MemEnvTest, Basics) {
|
|||||||
|
|
||||||
// Create a file.
|
// Create a file.
|
||||||
ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file));
|
ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file));
|
||||||
delete writable_file;
|
writable_file.reset();
|
||||||
|
|
||||||
// Check that the file exists.
|
// Check that the file exists.
|
||||||
ASSERT_TRUE(env_->FileExists("/dir/f"));
|
ASSERT_TRUE(env_->FileExists("/dir/f"));
|
||||||
@ -53,7 +54,7 @@ TEST(MemEnvTest, Basics) {
|
|||||||
// Write to the file.
|
// Write to the file.
|
||||||
ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file));
|
ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file));
|
||||||
ASSERT_OK(writable_file->Append("abc"));
|
ASSERT_OK(writable_file->Append("abc"));
|
||||||
delete writable_file;
|
writable_file.reset();
|
||||||
|
|
||||||
// Check for expected size.
|
// Check for expected size.
|
||||||
ASSERT_OK(env_->GetFileSize("/dir/f", &file_size));
|
ASSERT_OK(env_->GetFileSize("/dir/f", &file_size));
|
||||||
@ -68,8 +69,8 @@ TEST(MemEnvTest, Basics) {
|
|||||||
ASSERT_EQ(3U, file_size);
|
ASSERT_EQ(3U, file_size);
|
||||||
|
|
||||||
// Check that opening non-existent file fails.
|
// Check that opening non-existent file fails.
|
||||||
SequentialFile* seq_file;
|
unique_ptr<SequentialFile> seq_file;
|
||||||
RandomAccessFile* rand_file;
|
unique_ptr<RandomAccessFile> rand_file;
|
||||||
ASSERT_TRUE(!env_->NewSequentialFile("/dir/non_existent", &seq_file).ok());
|
ASSERT_TRUE(!env_->NewSequentialFile("/dir/non_existent", &seq_file).ok());
|
||||||
ASSERT_TRUE(!seq_file);
|
ASSERT_TRUE(!seq_file);
|
||||||
ASSERT_TRUE(!env_->NewRandomAccessFile("/dir/non_existent", &rand_file).ok());
|
ASSERT_TRUE(!env_->NewRandomAccessFile("/dir/non_existent", &rand_file).ok());
|
||||||
@ -85,9 +86,9 @@ TEST(MemEnvTest, Basics) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST(MemEnvTest, ReadWrite) {
|
TEST(MemEnvTest, ReadWrite) {
|
||||||
WritableFile* writable_file;
|
unique_ptr<WritableFile> writable_file;
|
||||||
SequentialFile* seq_file;
|
unique_ptr<SequentialFile> seq_file;
|
||||||
RandomAccessFile* rand_file;
|
unique_ptr<RandomAccessFile> rand_file;
|
||||||
Slice result;
|
Slice result;
|
||||||
char scratch[100];
|
char scratch[100];
|
||||||
|
|
||||||
@ -96,7 +97,7 @@ TEST(MemEnvTest, ReadWrite) {
|
|||||||
ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file));
|
ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file));
|
||||||
ASSERT_OK(writable_file->Append("hello "));
|
ASSERT_OK(writable_file->Append("hello "));
|
||||||
ASSERT_OK(writable_file->Append("world"));
|
ASSERT_OK(writable_file->Append("world"));
|
||||||
delete writable_file;
|
writable_file.reset();
|
||||||
|
|
||||||
// Read sequentially.
|
// Read sequentially.
|
||||||
ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file));
|
ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file));
|
||||||
@ -110,7 +111,6 @@ TEST(MemEnvTest, ReadWrite) {
|
|||||||
ASSERT_OK(seq_file->Skip(100)); // Try to skip past end of file.
|
ASSERT_OK(seq_file->Skip(100)); // Try to skip past end of file.
|
||||||
ASSERT_OK(seq_file->Read(1000, &result, scratch));
|
ASSERT_OK(seq_file->Read(1000, &result, scratch));
|
||||||
ASSERT_EQ(0U, result.size());
|
ASSERT_EQ(0U, result.size());
|
||||||
delete seq_file;
|
|
||||||
|
|
||||||
// Random reads.
|
// Random reads.
|
||||||
ASSERT_OK(env_->NewRandomAccessFile("/dir/f", &rand_file));
|
ASSERT_OK(env_->NewRandomAccessFile("/dir/f", &rand_file));
|
||||||
@ -123,7 +123,6 @@ TEST(MemEnvTest, ReadWrite) {
|
|||||||
|
|
||||||
// Too high offset.
|
// Too high offset.
|
||||||
ASSERT_TRUE(!rand_file->Read(1000, 5, &result, scratch).ok());
|
ASSERT_TRUE(!rand_file->Read(1000, 5, &result, scratch).ok());
|
||||||
delete rand_file;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(MemEnvTest, Locks) {
|
TEST(MemEnvTest, Locks) {
|
||||||
@ -139,14 +138,14 @@ TEST(MemEnvTest, Misc) {
|
|||||||
ASSERT_OK(env_->GetTestDirectory(&test_dir));
|
ASSERT_OK(env_->GetTestDirectory(&test_dir));
|
||||||
ASSERT_TRUE(!test_dir.empty());
|
ASSERT_TRUE(!test_dir.empty());
|
||||||
|
|
||||||
WritableFile* writable_file;
|
unique_ptr<WritableFile> writable_file;
|
||||||
ASSERT_OK(env_->NewWritableFile("/a/b", &writable_file));
|
ASSERT_OK(env_->NewWritableFile("/a/b", &writable_file));
|
||||||
|
|
||||||
// These are no-ops, but we test they return success.
|
// These are no-ops, but we test they return success.
|
||||||
ASSERT_OK(writable_file->Sync());
|
ASSERT_OK(writable_file->Sync());
|
||||||
ASSERT_OK(writable_file->Flush());
|
ASSERT_OK(writable_file->Flush());
|
||||||
ASSERT_OK(writable_file->Close());
|
ASSERT_OK(writable_file->Close());
|
||||||
delete writable_file;
|
writable_file.reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(MemEnvTest, LargeWrite) {
|
TEST(MemEnvTest, LargeWrite) {
|
||||||
@ -158,13 +157,13 @@ TEST(MemEnvTest, LargeWrite) {
|
|||||||
write_data.append(1, static_cast<char>(i));
|
write_data.append(1, static_cast<char>(i));
|
||||||
}
|
}
|
||||||
|
|
||||||
WritableFile* writable_file;
|
unique_ptr<WritableFile> writable_file;
|
||||||
ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file));
|
ASSERT_OK(env_->NewWritableFile("/dir/f", &writable_file));
|
||||||
ASSERT_OK(writable_file->Append("foo"));
|
ASSERT_OK(writable_file->Append("foo"));
|
||||||
ASSERT_OK(writable_file->Append(write_data));
|
ASSERT_OK(writable_file->Append(write_data));
|
||||||
delete writable_file;
|
writable_file.reset();
|
||||||
|
|
||||||
SequentialFile* seq_file;
|
unique_ptr<SequentialFile> seq_file;
|
||||||
Slice result;
|
Slice result;
|
||||||
ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file));
|
ASSERT_OK(env_->NewSequentialFile("/dir/f", &seq_file));
|
||||||
ASSERT_OK(seq_file->Read(3, &result, scratch)); // Read "foo".
|
ASSERT_OK(seq_file->Read(3, &result, scratch)); // Read "foo".
|
||||||
@ -178,7 +177,6 @@ TEST(MemEnvTest, LargeWrite) {
|
|||||||
read += result.size();
|
read += result.size();
|
||||||
}
|
}
|
||||||
ASSERT_TRUE(write_data == read_data);
|
ASSERT_TRUE(write_data == read_data);
|
||||||
delete seq_file;
|
|
||||||
delete [] scratch;
|
delete [] scratch;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,17 +18,20 @@
|
|||||||
#ifndef STORAGE_LEVELDB_INCLUDE_CACHE_H_
|
#ifndef STORAGE_LEVELDB_INCLUDE_CACHE_H_
|
||||||
#define STORAGE_LEVELDB_INCLUDE_CACHE_H_
|
#define STORAGE_LEVELDB_INCLUDE_CACHE_H_
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include "leveldb/slice.h"
|
#include "leveldb/slice.h"
|
||||||
|
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
|
|
||||||
|
using std::shared_ptr;
|
||||||
|
|
||||||
class Cache;
|
class Cache;
|
||||||
|
|
||||||
// Create a new cache with a fixed size capacity. This implementation
|
// Create a new cache with a fixed size capacity. This implementation
|
||||||
// of Cache uses a least-recently-used eviction policy.
|
// of Cache uses a least-recently-used eviction policy.
|
||||||
extern Cache* NewLRUCache(size_t capacity);
|
extern shared_ptr<Cache> NewLRUCache(size_t capacity);
|
||||||
extern Cache* NewLRUCache(size_t capacity, int numShardBits);
|
extern shared_ptr<Cache> NewLRUCache(size_t capacity, int numShardBits);
|
||||||
|
|
||||||
class Cache {
|
class Cache {
|
||||||
public:
|
public:
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
|
|
||||||
#include <cstdarg>
|
#include <cstdarg>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
#include <memory>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include "leveldb/status.h"
|
#include "leveldb/status.h"
|
||||||
@ -28,6 +29,9 @@ class SequentialFile;
|
|||||||
class Slice;
|
class Slice;
|
||||||
class WritableFile;
|
class WritableFile;
|
||||||
|
|
||||||
|
using std::unique_ptr;
|
||||||
|
using std::shared_ptr;
|
||||||
|
|
||||||
class Env {
|
class Env {
|
||||||
public:
|
public:
|
||||||
Env() { }
|
Env() { }
|
||||||
@ -47,7 +51,7 @@ class Env {
|
|||||||
//
|
//
|
||||||
// The returned file will only be accessed by one thread at a time.
|
// The returned file will only be accessed by one thread at a time.
|
||||||
virtual Status NewSequentialFile(const std::string& fname,
|
virtual Status NewSequentialFile(const std::string& fname,
|
||||||
SequentialFile** result) = 0;
|
unique_ptr<SequentialFile>* result) = 0;
|
||||||
|
|
||||||
// Create a brand new random access read-only file with the
|
// Create a brand new random access read-only file with the
|
||||||
// specified name. On success, stores a pointer to the new file in
|
// specified name. On success, stores a pointer to the new file in
|
||||||
@ -57,7 +61,7 @@ class Env {
|
|||||||
//
|
//
|
||||||
// The returned file may be concurrently accessed by multiple threads.
|
// The returned file may be concurrently accessed by multiple threads.
|
||||||
virtual Status NewRandomAccessFile(const std::string& fname,
|
virtual Status NewRandomAccessFile(const std::string& fname,
|
||||||
RandomAccessFile** result) = 0;
|
unique_ptr<RandomAccessFile>* result) = 0;
|
||||||
|
|
||||||
// Create an object that writes to a new file with the specified
|
// Create an object that writes to a new file with the specified
|
||||||
// name. Deletes any existing file with the same name and creates a
|
// name. Deletes any existing file with the same name and creates a
|
||||||
@ -67,7 +71,7 @@ class Env {
|
|||||||
//
|
//
|
||||||
// The returned file will only be accessed by one thread at a time.
|
// The returned file will only be accessed by one thread at a time.
|
||||||
virtual Status NewWritableFile(const std::string& fname,
|
virtual Status NewWritableFile(const std::string& fname,
|
||||||
WritableFile** result) = 0;
|
unique_ptr<WritableFile>* result) = 0;
|
||||||
|
|
||||||
// Returns true iff the named file exists.
|
// Returns true iff the named file exists.
|
||||||
virtual bool FileExists(const std::string& fname) = 0;
|
virtual bool FileExists(const std::string& fname) = 0;
|
||||||
@ -143,7 +147,8 @@ class Env {
|
|||||||
virtual Status GetTestDirectory(std::string* path) = 0;
|
virtual Status GetTestDirectory(std::string* path) = 0;
|
||||||
|
|
||||||
// Create and return a log file for storing informational messages.
|
// Create and return a log file for storing informational messages.
|
||||||
virtual Status NewLogger(const std::string& fname, Logger** result) = 0;
|
virtual Status NewLogger(const std::string& fname,
|
||||||
|
shared_ptr<Logger>* result) = 0;
|
||||||
|
|
||||||
// Returns the number of micro-seconds since some fixed point in time. Only
|
// Returns the number of micro-seconds since some fixed point in time. Only
|
||||||
// useful for computing deltas of time.
|
// useful for computing deltas of time.
|
||||||
@ -288,6 +293,12 @@ class FileLock {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Log the specified data to *info_log if info_log is non-NULL.
|
// Log the specified data to *info_log if info_log is non-NULL.
|
||||||
|
extern void Log(const shared_ptr<Logger>& info_log, const char* format, ...)
|
||||||
|
# if defined(__GNUC__) || defined(__clang__)
|
||||||
|
__attribute__((__format__ (__printf__, 2, 3)))
|
||||||
|
# endif
|
||||||
|
;
|
||||||
|
|
||||||
extern void Log(Logger* info_log, const char* format, ...)
|
extern void Log(Logger* info_log, const char* format, ...)
|
||||||
# if defined(__GNUC__) || defined(__clang__)
|
# if defined(__GNUC__) || defined(__clang__)
|
||||||
__attribute__((__format__ (__printf__, 2, 3)))
|
__attribute__((__format__ (__printf__, 2, 3)))
|
||||||
@ -315,13 +326,15 @@ class EnvWrapper : public Env {
|
|||||||
Env* target() const { return target_; }
|
Env* target() const { return target_; }
|
||||||
|
|
||||||
// The following text is boilerplate that forwards all methods to target()
|
// The following text is boilerplate that forwards all methods to target()
|
||||||
Status NewSequentialFile(const std::string& f, SequentialFile** r) {
|
Status NewSequentialFile(const std::string& f,
|
||||||
|
unique_ptr<SequentialFile>* r) {
|
||||||
return target_->NewSequentialFile(f, r);
|
return target_->NewSequentialFile(f, r);
|
||||||
}
|
}
|
||||||
Status NewRandomAccessFile(const std::string& f, RandomAccessFile** r) {
|
Status NewRandomAccessFile(const std::string& f,
|
||||||
|
unique_ptr<RandomAccessFile>* r) {
|
||||||
return target_->NewRandomAccessFile(f, r);
|
return target_->NewRandomAccessFile(f, r);
|
||||||
}
|
}
|
||||||
Status NewWritableFile(const std::string& f, WritableFile** r) {
|
Status NewWritableFile(const std::string& f, unique_ptr<WritableFile>* r) {
|
||||||
return target_->NewWritableFile(f, r);
|
return target_->NewWritableFile(f, r);
|
||||||
}
|
}
|
||||||
bool FileExists(const std::string& f) { return target_->FileExists(f); }
|
bool FileExists(const std::string& f) { return target_->FileExists(f); }
|
||||||
@ -359,7 +372,8 @@ class EnvWrapper : public Env {
|
|||||||
virtual Status GetTestDirectory(std::string* path) {
|
virtual Status GetTestDirectory(std::string* path) {
|
||||||
return target_->GetTestDirectory(path);
|
return target_->GetTestDirectory(path);
|
||||||
}
|
}
|
||||||
virtual Status NewLogger(const std::string& fname, Logger** result) {
|
virtual Status NewLogger(const std::string& fname,
|
||||||
|
shared_ptr<Logger>* result) {
|
||||||
return target_->NewLogger(fname, result);
|
return target_->NewLogger(fname, result);
|
||||||
}
|
}
|
||||||
uint64_t NowMicros() {
|
uint64_t NowMicros() {
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
|
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
#include <memory>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include "leveldb/slice.h"
|
#include "leveldb/slice.h"
|
||||||
|
|
||||||
@ -20,6 +21,8 @@ class Logger;
|
|||||||
class Snapshot;
|
class Snapshot;
|
||||||
class Statistics;
|
class Statistics;
|
||||||
|
|
||||||
|
using std::shared_ptr;
|
||||||
|
|
||||||
// DB contents are stored in a set of blocks, each of which holds a
|
// DB contents are stored in a set of blocks, each of which holds a
|
||||||
// sequence of key,value pairs. Each block may be compressed before
|
// sequence of key,value pairs. Each block may be compressed before
|
||||||
// being stored in a file. The following enum describes which
|
// being stored in a file. The following enum describes which
|
||||||
@ -84,7 +87,7 @@ struct Options {
|
|||||||
// be written to info_log if it is non-NULL, or to a file stored
|
// be written to info_log if it is non-NULL, or to a file stored
|
||||||
// in the same directory as the DB contents if info_log is NULL.
|
// in the same directory as the DB contents if info_log is NULL.
|
||||||
// Default: NULL
|
// Default: NULL
|
||||||
Logger* info_log;
|
shared_ptr<Logger> info_log;
|
||||||
|
|
||||||
// -------------------
|
// -------------------
|
||||||
// Parameters that affect performance
|
// Parameters that affect performance
|
||||||
@ -121,7 +124,7 @@ struct Options {
|
|||||||
// If non-NULL, use the specified cache for blocks.
|
// If non-NULL, use the specified cache for blocks.
|
||||||
// If NULL, leveldb will automatically create and use an 8MB internal cache.
|
// If NULL, leveldb will automatically create and use an 8MB internal cache.
|
||||||
// Default: NULL
|
// Default: NULL
|
||||||
Cache* block_cache;
|
shared_ptr<Cache> block_cache;
|
||||||
|
|
||||||
// Approximate size of user data packed per block. Note that the
|
// Approximate size of user data packed per block. Note that the
|
||||||
// block size specified here corresponds to uncompressed data. The
|
// block size specified here corresponds to uncompressed data. The
|
||||||
@ -326,7 +329,7 @@ struct Options {
|
|||||||
// Create an Options object with default values for all fields.
|
// Create an Options object with default values for all fields.
|
||||||
Options();
|
Options();
|
||||||
|
|
||||||
void Dump(Logger * log) const;
|
void Dump(Logger* log) const;
|
||||||
|
|
||||||
// This method allows an application to modify/delete a key-value at
|
// This method allows an application to modify/delete a key-value at
|
||||||
// the time of compaction. The compaction process invokes this
|
// the time of compaction. The compaction process invokes this
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
#ifndef STORAGE_LEVELDB_INCLUDE_TABLE_H_
|
#ifndef STORAGE_LEVELDB_INCLUDE_TABLE_H_
|
||||||
#define STORAGE_LEVELDB_INCLUDE_TABLE_H_
|
#define STORAGE_LEVELDB_INCLUDE_TABLE_H_
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include "leveldb/iterator.h"
|
#include "leveldb/iterator.h"
|
||||||
|
|
||||||
@ -18,6 +19,8 @@ class RandomAccessFile;
|
|||||||
struct ReadOptions;
|
struct ReadOptions;
|
||||||
class TableCache;
|
class TableCache;
|
||||||
|
|
||||||
|
using std::unique_ptr;
|
||||||
|
|
||||||
// A Table is a sorted map from strings to strings. Tables are
|
// A Table is a sorted map from strings to strings. Tables are
|
||||||
// immutable and persistent. A Table may be safely accessed from
|
// immutable and persistent. A Table may be safely accessed from
|
||||||
// multiple threads without external synchronization.
|
// multiple threads without external synchronization.
|
||||||
@ -36,9 +39,9 @@ class Table {
|
|||||||
//
|
//
|
||||||
// *file must remain live while this Table is in use.
|
// *file must remain live while this Table is in use.
|
||||||
static Status Open(const Options& options,
|
static Status Open(const Options& options,
|
||||||
RandomAccessFile* file,
|
unique_ptr<RandomAccessFile>&& file,
|
||||||
uint64_t file_size,
|
uint64_t file_size,
|
||||||
Table** table);
|
unique_ptr<Table>* table);
|
||||||
|
|
||||||
~Table();
|
~Table();
|
||||||
|
|
||||||
|
@ -27,7 +27,7 @@ struct Table::Rep {
|
|||||||
|
|
||||||
Options options;
|
Options options;
|
||||||
Status status;
|
Status status;
|
||||||
RandomAccessFile* file;
|
unique_ptr<RandomAccessFile> file;
|
||||||
uint64_t cache_id;
|
uint64_t cache_id;
|
||||||
FilterBlockReader* filter;
|
FilterBlockReader* filter;
|
||||||
const char* filter_data;
|
const char* filter_data;
|
||||||
@ -37,10 +37,10 @@ struct Table::Rep {
|
|||||||
};
|
};
|
||||||
|
|
||||||
Status Table::Open(const Options& options,
|
Status Table::Open(const Options& options,
|
||||||
RandomAccessFile* file,
|
unique_ptr<RandomAccessFile>&& file,
|
||||||
uint64_t size,
|
uint64_t size,
|
||||||
Table** table) {
|
unique_ptr<Table>* table) {
|
||||||
*table = NULL;
|
table->reset();
|
||||||
if (size < Footer::kEncodedLength) {
|
if (size < Footer::kEncodedLength) {
|
||||||
return Status::InvalidArgument("file is too short to be an sstable");
|
return Status::InvalidArgument("file is too short to be an sstable");
|
||||||
}
|
}
|
||||||
@ -66,7 +66,7 @@ Status Table::Open(const Options& options,
|
|||||||
BlockContents contents;
|
BlockContents contents;
|
||||||
Block* index_block = NULL;
|
Block* index_block = NULL;
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
s = ReadBlock(file, ReadOptions(), footer.index_handle(), &contents);
|
s = ReadBlock(file.get(), ReadOptions(), footer.index_handle(), &contents);
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
index_block = new Block(contents);
|
index_block = new Block(contents);
|
||||||
}
|
}
|
||||||
@ -77,13 +77,13 @@ Status Table::Open(const Options& options,
|
|||||||
// ready to serve requests.
|
// ready to serve requests.
|
||||||
Rep* rep = new Table::Rep;
|
Rep* rep = new Table::Rep;
|
||||||
rep->options = options;
|
rep->options = options;
|
||||||
rep->file = file;
|
rep->file = std::move(file);
|
||||||
rep->metaindex_handle = footer.metaindex_handle();
|
rep->metaindex_handle = footer.metaindex_handle();
|
||||||
rep->index_block = index_block;
|
rep->index_block = index_block;
|
||||||
rep->cache_id = (options.block_cache ? options.block_cache->NewId() : 0);
|
rep->cache_id = (options.block_cache ? options.block_cache->NewId() : 0);
|
||||||
rep->filter_data = NULL;
|
rep->filter_data = NULL;
|
||||||
rep->filter = NULL;
|
rep->filter = NULL;
|
||||||
*table = new Table(rep);
|
table->reset(new Table(rep));
|
||||||
(*table)->ReadMeta(footer);
|
(*table)->ReadMeta(footer);
|
||||||
} else {
|
} else {
|
||||||
if (index_block) delete index_block;
|
if (index_block) delete index_block;
|
||||||
@ -101,7 +101,8 @@ void Table::ReadMeta(const Footer& footer) {
|
|||||||
// it is an empty block.
|
// it is an empty block.
|
||||||
ReadOptions opt;
|
ReadOptions opt;
|
||||||
BlockContents contents;
|
BlockContents contents;
|
||||||
if (!ReadBlock(rep_->file, opt, footer.metaindex_handle(), &contents).ok()) {
|
if (!ReadBlock(rep_->file.get(), opt, footer.metaindex_handle(),
|
||||||
|
&contents).ok()) {
|
||||||
// Do not propagate errors since meta info is not needed for operation
|
// Do not propagate errors since meta info is not needed for operation
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -129,7 +130,7 @@ void Table::ReadFilter(const Slice& filter_handle_value) {
|
|||||||
// requiring checksum verification in Table::Open.
|
// requiring checksum verification in Table::Open.
|
||||||
ReadOptions opt;
|
ReadOptions opt;
|
||||||
BlockContents block;
|
BlockContents block;
|
||||||
if (!ReadBlock(rep_->file, opt, filter_handle, &block).ok()) {
|
if (!ReadBlock(rep_->file.get(), opt, filter_handle, &block).ok()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (block.heap_allocated) {
|
if (block.heap_allocated) {
|
||||||
@ -164,7 +165,7 @@ Iterator* Table::BlockReader(void* arg,
|
|||||||
const Slice& index_value,
|
const Slice& index_value,
|
||||||
bool* didIO) {
|
bool* didIO) {
|
||||||
Table* table = reinterpret_cast<Table*>(arg);
|
Table* table = reinterpret_cast<Table*>(arg);
|
||||||
Cache* block_cache = table->rep_->options.block_cache;
|
Cache* block_cache = table->rep_->options.block_cache.get();
|
||||||
Statistics* const statistics = table->rep_->options.statistics;
|
Statistics* const statistics = table->rep_->options.statistics;
|
||||||
Block* block = NULL;
|
Block* block = NULL;
|
||||||
Cache::Handle* cache_handle = NULL;
|
Cache::Handle* cache_handle = NULL;
|
||||||
@ -188,7 +189,7 @@ Iterator* Table::BlockReader(void* arg,
|
|||||||
|
|
||||||
RecordTick(statistics, BLOCK_CACHE_HIT);
|
RecordTick(statistics, BLOCK_CACHE_HIT);
|
||||||
} else {
|
} else {
|
||||||
s = ReadBlock(table->rep_->file, options, handle, &contents);
|
s = ReadBlock(table->rep_->file.get(), options, handle, &contents);
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
block = new Block(contents);
|
block = new Block(contents);
|
||||||
if (contents.cachable && options.fill_cache) {
|
if (contents.cachable && options.fill_cache) {
|
||||||
@ -203,7 +204,7 @@ Iterator* Table::BlockReader(void* arg,
|
|||||||
RecordTick(statistics, BLOCK_CACHE_MISS);
|
RecordTick(statistics, BLOCK_CACHE_MISS);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
s = ReadBlock(table->rep_->file, options, handle, &contents);
|
s = ReadBlock(table->rep_->file.get(), options, handle, &contents);
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
block = new Block(contents);
|
block = new Block(contents);
|
||||||
}
|
}
|
||||||
|
@ -221,8 +221,7 @@ class BlockConstructor: public Constructor {
|
|||||||
class TableConstructor: public Constructor {
|
class TableConstructor: public Constructor {
|
||||||
public:
|
public:
|
||||||
TableConstructor(const Comparator* cmp)
|
TableConstructor(const Comparator* cmp)
|
||||||
: Constructor(cmp),
|
: Constructor(cmp) {
|
||||||
source_(NULL), table_(NULL) {
|
|
||||||
}
|
}
|
||||||
~TableConstructor() {
|
~TableConstructor() {
|
||||||
Reset();
|
Reset();
|
||||||
@ -244,11 +243,12 @@ class TableConstructor: public Constructor {
|
|||||||
ASSERT_EQ(sink.contents().size(), builder.FileSize());
|
ASSERT_EQ(sink.contents().size(), builder.FileSize());
|
||||||
|
|
||||||
// Open the table
|
// Open the table
|
||||||
source_ = new StringSource(sink.contents());
|
source_.reset(new StringSource(sink.contents()));
|
||||||
Options table_options;
|
Options table_options;
|
||||||
table_options.comparator = options.comparator;
|
table_options.comparator = options.comparator;
|
||||||
table_options.compression_opts = options.compression_opts;
|
table_options.compression_opts = options.compression_opts;
|
||||||
return Table::Open(table_options, source_, sink.contents().size(), &table_);
|
return Table::Open(table_options, std::move(source_),
|
||||||
|
sink.contents().size(), &table_);
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual Iterator* NewIterator() const {
|
virtual Iterator* NewIterator() const {
|
||||||
@ -261,14 +261,12 @@ class TableConstructor: public Constructor {
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
void Reset() {
|
void Reset() {
|
||||||
delete table_;
|
table_.reset();
|
||||||
delete source_;
|
source_.reset();
|
||||||
table_ = NULL;
|
|
||||||
source_ = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
StringSource* source_;
|
unique_ptr<StringSource> source_;
|
||||||
Table* table_;
|
unique_ptr<Table> table_;
|
||||||
|
|
||||||
TableConstructor();
|
TableConstructor();
|
||||||
};
|
};
|
||||||
|
@ -433,7 +433,6 @@ class StressTest {
|
|||||||
|
|
||||||
~StressTest() {
|
~StressTest() {
|
||||||
delete db_;
|
delete db_;
|
||||||
delete cache_;
|
|
||||||
delete filter_policy_;
|
delete filter_policy_;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -757,7 +756,7 @@ class StressTest {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Cache* cache_;
|
shared_ptr<Cache> cache_;
|
||||||
const FilterPolicy* filter_policy_;
|
const FilterPolicy* filter_policy_;
|
||||||
DB* db_;
|
DB* db_;
|
||||||
int num_times_reopened_;
|
int num_times_reopened_;
|
||||||
|
@ -45,16 +45,16 @@ SstFileReader::SstFileReader(std::string file_path,
|
|||||||
|
|
||||||
Status SstFileReader::ReadSequential(bool print_kv, uint64_t read_num)
|
Status SstFileReader::ReadSequential(bool print_kv, uint64_t read_num)
|
||||||
{
|
{
|
||||||
Table* table;
|
unique_ptr<Table> table;
|
||||||
Options table_options;
|
Options table_options;
|
||||||
RandomAccessFile* file = NULL;
|
unique_ptr<RandomAccessFile> file;
|
||||||
Status s = table_options.env->NewRandomAccessFile(file_name_, &file);
|
Status s = table_options.env->NewRandomAccessFile(file_name_, &file);
|
||||||
if(!s.ok()) {
|
if(!s.ok()) {
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
uint64_t file_size;
|
uint64_t file_size;
|
||||||
table_options.env->GetFileSize(file_name_, &file_size);
|
table_options.env->GetFileSize(file_name_, &file_size);
|
||||||
s = Table::Open(table_options, file, file_size, &table);
|
s = Table::Open(table_options, std::move(file), file_size, &table);
|
||||||
if(!s.ok()) {
|
if(!s.ok()) {
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,7 @@ class AutoSplitLogger : public Logger {
|
|||||||
std::string db_log_dir_;
|
std::string db_log_dir_;
|
||||||
std::string db_absolute_path_;
|
std::string db_absolute_path_;
|
||||||
Env* env_;
|
Env* env_;
|
||||||
UnderlyingLogger* logger_;
|
shared_ptr<UnderlyingLogger> logger_;
|
||||||
const size_t MAX_LOG_FILE_SIZE;
|
const size_t MAX_LOG_FILE_SIZE;
|
||||||
Status status_;
|
Status status_;
|
||||||
|
|
||||||
@ -42,7 +42,6 @@ class AutoSplitLogger : public Logger {
|
|||||||
log_fname_ = InfoLogFileName(dbname_, db_absolute_path_, db_log_dir_);
|
log_fname_ = InfoLogFileName(dbname_, db_absolute_path_, db_log_dir_);
|
||||||
InitLogger();
|
InitLogger();
|
||||||
}
|
}
|
||||||
~AutoSplitLogger() { delete logger_; }
|
|
||||||
|
|
||||||
virtual void Logv(const char* format, va_list ap) {
|
virtual void Logv(const char* format, va_list ap) {
|
||||||
assert(GetStatus().ok());
|
assert(GetStatus().ok());
|
||||||
@ -50,7 +49,6 @@ class AutoSplitLogger : public Logger {
|
|||||||
logger_->Logv(format, ap);
|
logger_->Logv(format, ap);
|
||||||
// Check if the log file should be splitted.
|
// Check if the log file should be splitted.
|
||||||
if (logger_->GetLogFileSize() > MAX_LOG_FILE_SIZE) {
|
if (logger_->GetLogFileSize() > MAX_LOG_FILE_SIZE) {
|
||||||
delete logger_;
|
|
||||||
std::string old_fname = OldInfoLogFileName(
|
std::string old_fname = OldInfoLogFileName(
|
||||||
dbname_, env_->NowMicros(), db_absolute_path_, db_log_dir_);
|
dbname_, env_->NowMicros(), db_absolute_path_, db_log_dir_);
|
||||||
env_->RenameFile(log_fname_, old_fname);
|
env_->RenameFile(log_fname_, old_fname);
|
||||||
|
@ -338,15 +338,15 @@ class ShardedLRUCache : public Cache {
|
|||||||
|
|
||||||
} // end anonymous namespace
|
} // end anonymous namespace
|
||||||
|
|
||||||
Cache* NewLRUCache(size_t capacity) {
|
shared_ptr<Cache> NewLRUCache(size_t capacity) {
|
||||||
return new ShardedLRUCache(capacity);
|
return std::make_shared<ShardedLRUCache>(capacity);
|
||||||
}
|
}
|
||||||
|
|
||||||
Cache* NewLRUCache(size_t capacity, int numShardBits) {
|
shared_ptr<Cache> NewLRUCache(size_t capacity, int numShardBits) {
|
||||||
if (numShardBits >= 20) {
|
if (numShardBits >= 20) {
|
||||||
return NULL; // the cache cannot be sharded into too many fine pieces
|
return NULL; // the cache cannot be sharded into too many fine pieces
|
||||||
}
|
}
|
||||||
return new ShardedLRUCache(capacity, numShardBits);
|
return std::make_shared<ShardedLRUCache>(capacity, numShardBits);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace leveldb
|
} // namespace leveldb
|
||||||
|
@ -35,14 +35,13 @@ class CacheTest {
|
|||||||
static const int kCacheSize = 1000;
|
static const int kCacheSize = 1000;
|
||||||
std::vector<int> deleted_keys_;
|
std::vector<int> deleted_keys_;
|
||||||
std::vector<int> deleted_values_;
|
std::vector<int> deleted_values_;
|
||||||
Cache* cache_;
|
shared_ptr<Cache> cache_;
|
||||||
|
|
||||||
CacheTest() : cache_(NewLRUCache(kCacheSize)) {
|
CacheTest() : cache_(NewLRUCache(kCacheSize)) {
|
||||||
current_ = this;
|
current_ = this;
|
||||||
}
|
}
|
||||||
|
|
||||||
~CacheTest() {
|
~CacheTest() {
|
||||||
delete cache_;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int Lookup(int key) {
|
int Lookup(int key) {
|
||||||
|
20
util/env.cc
20
util/env.cc
@ -25,7 +25,16 @@ FileLock::~FileLock() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void Log(Logger* info_log, const char* format, ...) {
|
void Log(Logger* info_log, const char* format, ...) {
|
||||||
if (info_log != NULL) {
|
if (info_log) {
|
||||||
|
va_list ap;
|
||||||
|
va_start(ap, format);
|
||||||
|
info_log->Logv(format, ap);
|
||||||
|
va_end(ap);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Log(const shared_ptr<Logger>& info_log, const char* format, ...) {
|
||||||
|
if (info_log) {
|
||||||
va_list ap;
|
va_list ap;
|
||||||
va_start(ap, format);
|
va_start(ap, format);
|
||||||
info_log->Logv(format, ap);
|
info_log->Logv(format, ap);
|
||||||
@ -36,7 +45,7 @@ void Log(Logger* info_log, const char* format, ...) {
|
|||||||
static Status DoWriteStringToFile(Env* env, const Slice& data,
|
static Status DoWriteStringToFile(Env* env, const Slice& data,
|
||||||
const std::string& fname,
|
const std::string& fname,
|
||||||
bool should_sync) {
|
bool should_sync) {
|
||||||
WritableFile* file;
|
unique_ptr<WritableFile> file;
|
||||||
Status s = env->NewWritableFile(fname, &file);
|
Status s = env->NewWritableFile(fname, &file);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
return s;
|
return s;
|
||||||
@ -45,10 +54,6 @@ static Status DoWriteStringToFile(Env* env, const Slice& data,
|
|||||||
if (s.ok() && should_sync) {
|
if (s.ok() && should_sync) {
|
||||||
s = file->Sync();
|
s = file->Sync();
|
||||||
}
|
}
|
||||||
if (s.ok()) {
|
|
||||||
s = file->Close();
|
|
||||||
}
|
|
||||||
delete file; // Will auto-close if we did not close above
|
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
env->DeleteFile(fname);
|
env->DeleteFile(fname);
|
||||||
}
|
}
|
||||||
@ -67,7 +72,7 @@ Status WriteStringToFileSync(Env* env, const Slice& data,
|
|||||||
|
|
||||||
Status ReadFileToString(Env* env, const std::string& fname, std::string* data) {
|
Status ReadFileToString(Env* env, const std::string& fname, std::string* data) {
|
||||||
data->clear();
|
data->clear();
|
||||||
SequentialFile* file;
|
unique_ptr<SequentialFile> file;
|
||||||
Status s = env->NewSequentialFile(fname, &file);
|
Status s = env->NewSequentialFile(fname, &file);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
return s;
|
return s;
|
||||||
@ -86,7 +91,6 @@ Status ReadFileToString(Env* env, const std::string& fname, std::string* data) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
delete[] space;
|
delete[] space;
|
||||||
delete file;
|
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -490,7 +490,8 @@ Status HdfsEnv::UnlockFile(FileLock* lock) {
|
|||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
|
||||||
Status HdfsEnv::NewLogger(const std::string& fname, Logger** result) {
|
Status HdfsEnv::NewLogger(const std::string& fname,
|
||||||
|
shared_ptr<Logger>* result) {
|
||||||
HdfsWritableFile* f = new HdfsWritableFile(fileSys_, fname);
|
HdfsWritableFile* f = new HdfsWritableFile(fileSys_, fname);
|
||||||
if (f == NULL || !f->isValid()) {
|
if (f == NULL || !f->isValid()) {
|
||||||
*result = NULL;
|
*result = NULL;
|
||||||
@ -515,7 +516,7 @@ Status HdfsEnv::NewLogger(const std::string& fname, Logger** result) {
|
|||||||
#include "hdfs/env_hdfs.h"
|
#include "hdfs/env_hdfs.h"
|
||||||
namespace leveldb {
|
namespace leveldb {
|
||||||
Status HdfsEnv::NewSequentialFile(const std::string& fname,
|
Status HdfsEnv::NewSequentialFile(const std::string& fname,
|
||||||
SequentialFile** result) {
|
unique_ptr<SequentialFile>* result) {
|
||||||
return Status::NotSupported("Not compiled with hdfs support");
|
return Status::NotSupported("Not compiled with hdfs support");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -510,20 +510,21 @@ class PosixEnv : public Env {
|
|||||||
}
|
}
|
||||||
|
|
||||||
virtual Status NewSequentialFile(const std::string& fname,
|
virtual Status NewSequentialFile(const std::string& fname,
|
||||||
SequentialFile** result) {
|
unique_ptr<SequentialFile>* result) {
|
||||||
|
result->reset();
|
||||||
FILE* f = fopen(fname.c_str(), "r");
|
FILE* f = fopen(fname.c_str(), "r");
|
||||||
if (f == NULL) {
|
if (f == NULL) {
|
||||||
*result = NULL;
|
*result = NULL;
|
||||||
return IOError(fname, errno);
|
return IOError(fname, errno);
|
||||||
} else {
|
} else {
|
||||||
*result = new PosixSequentialFile(fname, f);
|
result->reset(new PosixSequentialFile(fname, f));
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual Status NewRandomAccessFile(const std::string& fname,
|
virtual Status NewRandomAccessFile(const std::string& fname,
|
||||||
RandomAccessFile** result) {
|
unique_ptr<RandomAccessFile>* result) {
|
||||||
*result = NULL;
|
result->reset();
|
||||||
Status s;
|
Status s;
|
||||||
int fd = open(fname.c_str(), O_RDONLY);
|
int fd = open(fname.c_str(), O_RDONLY);
|
||||||
if (fd < 0) {
|
if (fd < 0) {
|
||||||
@ -537,30 +538,30 @@ class PosixEnv : public Env {
|
|||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
void* base = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, 0);
|
void* base = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, 0);
|
||||||
if (base != MAP_FAILED) {
|
if (base != MAP_FAILED) {
|
||||||
*result = new PosixMmapReadableFile(fname, base, size);
|
result->reset(new PosixMmapReadableFile(fname, base, size));
|
||||||
} else {
|
} else {
|
||||||
s = IOError(fname, errno);
|
s = IOError(fname, errno);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
close(fd);
|
close(fd);
|
||||||
} else {
|
} else {
|
||||||
*result = new PosixRandomAccessFile(fname, fd);
|
result->reset(new PosixRandomAccessFile(fname, fd));
|
||||||
}
|
}
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual Status NewWritableFile(const std::string& fname,
|
virtual Status NewWritableFile(const std::string& fname,
|
||||||
WritableFile** result) {
|
unique_ptr<WritableFile>* result) {
|
||||||
|
result->reset();
|
||||||
Status s;
|
Status s;
|
||||||
const int fd = open(fname.c_str(), O_CREAT | O_RDWR | O_TRUNC, 0644);
|
const int fd = open(fname.c_str(), O_CREAT | O_RDWR | O_TRUNC, 0644);
|
||||||
if (fd < 0) {
|
if (fd < 0) {
|
||||||
*result = NULL;
|
|
||||||
s = IOError(fname, errno);
|
s = IOError(fname, errno);
|
||||||
} else {
|
} else {
|
||||||
if (useMmapWrite) {
|
if (useMmapWrite) {
|
||||||
*result = new PosixMmapFile(fname, fd, page_size_);
|
result->reset(new PosixMmapFile(fname, fd, page_size_));
|
||||||
} else {
|
} else {
|
||||||
*result = new PosixWritableFile(fname, fd, 65536);
|
result->reset(new PosixWritableFile(fname, fd, 65536));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return s;
|
return s;
|
||||||
@ -706,13 +707,14 @@ class PosixEnv : public Env {
|
|||||||
return thread_id;
|
return thread_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual Status NewLogger(const std::string& fname, Logger** result) {
|
virtual Status NewLogger(const std::string& fname,
|
||||||
|
shared_ptr<Logger>* result) {
|
||||||
FILE* f = fopen(fname.c_str(), "w");
|
FILE* f = fopen(fname.c_str(), "w");
|
||||||
if (f == NULL) {
|
if (f == NULL) {
|
||||||
*result = NULL;
|
result->reset();
|
||||||
return IOError(fname, errno);
|
return IOError(fname, errno);
|
||||||
} else {
|
} else {
|
||||||
*result = new PosixLogger(f, &PosixEnv::gettid);
|
result->reset(new PosixLogger(f, &PosixEnv::gettid));
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -606,7 +606,7 @@ void WALDumper::DoCommand() {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
SequentialFile* file;
|
unique_ptr<SequentialFile> file;
|
||||||
Env* env_ = Env::Default();
|
Env* env_ = Env::Default();
|
||||||
Status status = env_->NewSequentialFile(wal_file_, &file);
|
Status status = env_->NewSequentialFile(wal_file_, &file);
|
||||||
if (!status.ok()) {
|
if (!status.ok()) {
|
||||||
@ -614,7 +614,7 @@ void WALDumper::DoCommand() {
|
|||||||
status.ToString());
|
status.ToString());
|
||||||
} else {
|
} else {
|
||||||
StdErrReporter reporter;
|
StdErrReporter reporter;
|
||||||
log::Reader reader(file, &reporter, true, 0);
|
log::Reader reader(std::move(file), &reporter, true, 0);
|
||||||
std::string scratch;
|
std::string scratch;
|
||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
Slice record;
|
Slice record;
|
||||||
|
@ -23,7 +23,6 @@ Options::Options()
|
|||||||
write_buffer_size(4<<20),
|
write_buffer_size(4<<20),
|
||||||
max_write_buffer_number(2),
|
max_write_buffer_number(2),
|
||||||
max_open_files(1000),
|
max_open_files(1000),
|
||||||
block_cache(NULL),
|
|
||||||
block_size(4096),
|
block_size(4096),
|
||||||
block_restart_interval(16),
|
block_restart_interval(16),
|
||||||
compression(kSnappyCompression),
|
compression(kSnappyCompression),
|
||||||
@ -61,19 +60,18 @@ Options::Options()
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
Options::Dump(
|
Options::Dump(Logger* log) const
|
||||||
Logger * log) const
|
|
||||||
{
|
{
|
||||||
Log(log," Options.comparator: %s", comparator->Name());
|
Log(log," Options.comparator: %s", comparator->Name());
|
||||||
Log(log," Options.create_if_missing: %d", create_if_missing);
|
Log(log," Options.create_if_missing: %d", create_if_missing);
|
||||||
Log(log," Options.error_if_exists: %d", error_if_exists);
|
Log(log," Options.error_if_exists: %d", error_if_exists);
|
||||||
Log(log," Options.paranoid_checks: %d", paranoid_checks);
|
Log(log," Options.paranoid_checks: %d", paranoid_checks);
|
||||||
Log(log," Options.env: %p", env);
|
Log(log," Options.env: %p", env);
|
||||||
Log(log," Options.info_log: %p", info_log);
|
Log(log," Options.info_log: %p", info_log.get());
|
||||||
Log(log," Options.write_buffer_size: %zd", write_buffer_size);
|
Log(log," Options.write_buffer_size: %zd", write_buffer_size);
|
||||||
Log(log," Options.max_write_buffer_number: %d", max_write_buffer_number);
|
Log(log," Options.max_write_buffer_number: %d", max_write_buffer_number);
|
||||||
Log(log," Options.max_open_files: %d", max_open_files);
|
Log(log," Options.max_open_files: %d", max_open_files);
|
||||||
Log(log," Options.block_cache: %p", block_cache);
|
Log(log," Options.block_cache: %p", block_cache.get());
|
||||||
if (block_cache) {
|
if (block_cache) {
|
||||||
Log(log," Options.block_cache_size: %zd",
|
Log(log," Options.block_cache_size: %zd",
|
||||||
block_cache->GetCapacity());
|
block_cache->GetCapacity());
|
||||||
|
@ -37,10 +37,10 @@ class ErrorEnv : public EnvWrapper {
|
|||||||
num_writable_file_errors_(0) { }
|
num_writable_file_errors_(0) { }
|
||||||
|
|
||||||
virtual Status NewWritableFile(const std::string& fname,
|
virtual Status NewWritableFile(const std::string& fname,
|
||||||
WritableFile** result) {
|
unique_ptr<WritableFile>* result) {
|
||||||
|
result->reset();
|
||||||
if (writable_file_error_) {
|
if (writable_file_error_) {
|
||||||
++num_writable_file_errors_;
|
++num_writable_file_errors_;
|
||||||
*result = NULL;
|
|
||||||
return Status::IOError(fname, "fake error");
|
return Status::IOError(fname, "fake error");
|
||||||
}
|
}
|
||||||
return target()->NewWritableFile(fname, result);
|
return target()->NewWritableFile(fname, result);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user