2012-11-30 02:28:37 +01:00
|
|
|
#include "db/transaction_log_iterator_impl.h"
|
|
|
|
#include "db/write_batch_internal.h"
|
|
|
|
#include "db/filename.h"
|
2013-03-21 23:12:35 +01:00
|
|
|
|
2012-11-30 02:28:37 +01:00
|
|
|
namespace leveldb {
|
|
|
|
|
|
|
|
TransactionLogIteratorImpl::TransactionLogIteratorImpl(
|
2013-03-04 19:44:04 +01:00
|
|
|
const std::string& dbname,
|
|
|
|
const Options* options,
|
2013-03-15 01:00:04 +01:00
|
|
|
const StorageOptions& soptions,
|
2013-03-04 19:44:04 +01:00
|
|
|
SequenceNumber& seq,
|
|
|
|
std::vector<LogFile>* files,
|
|
|
|
SequenceNumber const * const lastFlushedSequence) :
|
2012-11-30 02:28:37 +01:00
|
|
|
dbname_(dbname),
|
|
|
|
options_(options),
|
2013-03-15 01:00:04 +01:00
|
|
|
soptions_(soptions),
|
2012-11-30 02:28:37 +01:00
|
|
|
sequenceNumber_(seq),
|
|
|
|
files_(files),
|
|
|
|
started_(false),
|
|
|
|
isValid_(true),
|
2013-03-04 19:44:04 +01:00
|
|
|
currentFileIndex_(0),
|
|
|
|
lastFlushedSequence_(lastFlushedSequence) {
|
2013-03-01 03:04:58 +01:00
|
|
|
assert(files_ != nullptr);
|
2013-03-04 19:44:04 +01:00
|
|
|
assert(lastFlushedSequence_);
|
2012-12-17 06:01:02 +01:00
|
|
|
}
|
2012-11-30 02:28:37 +01:00
|
|
|
|
|
|
|
LogReporter
|
|
|
|
TransactionLogIteratorImpl::NewLogReporter(const uint64_t logNumber) {
|
|
|
|
LogReporter reporter;
|
|
|
|
reporter.env = options_->env;
|
2013-01-20 11:07:13 +01:00
|
|
|
reporter.info_log = options_->info_log.get();
|
2012-11-30 02:28:37 +01:00
|
|
|
reporter.log_number = logNumber;
|
|
|
|
return reporter;
|
|
|
|
}
|
|
|
|
|
2013-01-20 11:07:13 +01:00
|
|
|
Status TransactionLogIteratorImpl::OpenLogFile(
|
|
|
|
const LogFile& logFile,
|
2013-03-04 19:44:04 +01:00
|
|
|
unique_ptr<SequentialFile>* file) {
|
2012-11-30 02:28:37 +01:00
|
|
|
Env* env = options_->env;
|
|
|
|
if (logFile.type == kArchivedLogFile) {
|
|
|
|
std::string fname = ArchivedLogFileName(dbname_, logFile.logNumber);
|
2013-03-15 01:00:04 +01:00
|
|
|
return env->NewSequentialFile(fname, file, soptions_);
|
2012-11-30 02:28:37 +01:00
|
|
|
} else {
|
|
|
|
std::string fname = LogFileName(dbname_, logFile.logNumber);
|
2013-03-15 01:00:04 +01:00
|
|
|
Status status = env->NewSequentialFile(fname, file, soptions_);
|
2012-11-30 02:28:37 +01:00
|
|
|
if (!status.ok()) {
|
|
|
|
// If cannot open file in DB directory.
|
|
|
|
// Try the archive dir, as it could have moved in the meanwhile.
|
|
|
|
fname = ArchivedLogFileName(dbname_, logFile.logNumber);
|
2013-03-15 01:00:04 +01:00
|
|
|
status = env->NewSequentialFile(fname, file, soptions_);
|
2012-11-30 02:28:37 +01:00
|
|
|
if (!status.ok()) {
|
|
|
|
return Status::IOError(" Requested file not present in the dir");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-04 19:44:04 +01:00
|
|
|
BatchResult TransactionLogIteratorImpl::GetBatch() {
|
2012-11-30 02:28:37 +01:00
|
|
|
assert(isValid_); // cannot call in a non valid state.
|
2013-03-04 19:44:04 +01:00
|
|
|
BatchResult result;
|
|
|
|
result.sequence = currentSequence_;
|
|
|
|
result.writeBatchPtr = std::move(currentBatch_);
|
|
|
|
return result;
|
2012-11-30 02:28:37 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionLogIteratorImpl::status() {
|
|
|
|
return currentStatus_;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool TransactionLogIteratorImpl::Valid() {
|
|
|
|
return started_ && isValid_;
|
|
|
|
}
|
|
|
|
|
|
|
|
void TransactionLogIteratorImpl::Next() {
|
|
|
|
// First seek to the given seqNo. in the current file.
|
|
|
|
LogFile currentLogFile = files_->at(currentFileIndex_);
|
|
|
|
LogReporter reporter = NewLogReporter(currentLogFile.logNumber);
|
|
|
|
std::string scratch;
|
|
|
|
Slice record;
|
2013-03-04 19:44:04 +01:00
|
|
|
|
2012-11-30 02:28:37 +01:00
|
|
|
if (!started_) {
|
2013-03-04 19:44:04 +01:00
|
|
|
isValid_ = false;
|
|
|
|
if (sequenceNumber_ > *lastFlushedSequence_) {
|
|
|
|
currentStatus_ = Status::IOError("Looking for a sequence, "
|
|
|
|
"which is not flushed yet.");
|
|
|
|
return;
|
|
|
|
}
|
2013-01-20 11:07:13 +01:00
|
|
|
unique_ptr<SequentialFile> file;
|
2012-11-30 02:28:37 +01:00
|
|
|
Status status = OpenLogFile(currentLogFile, &file);
|
|
|
|
if (!status.ok()) {
|
|
|
|
currentStatus_ = status;
|
|
|
|
return;
|
|
|
|
}
|
2013-01-20 11:07:13 +01:00
|
|
|
assert(file);
|
|
|
|
unique_ptr<log::Reader> reader(
|
|
|
|
new log::Reader(std::move(file), &reporter, true, 0));
|
|
|
|
assert(reader);
|
2012-11-30 02:28:37 +01:00
|
|
|
while (reader->ReadRecord(&record, &scratch)) {
|
|
|
|
if (record.size() < 12) {
|
|
|
|
reporter.Corruption(
|
|
|
|
record.size(), Status::Corruption("log record too small"));
|
|
|
|
continue;
|
|
|
|
}
|
2013-03-04 19:44:04 +01:00
|
|
|
UpdateCurrentWriteBatch(record);
|
|
|
|
if (currentSequence_ >= sequenceNumber_) {
|
2013-03-21 23:12:35 +01:00
|
|
|
assert(currentSequence_ <= *lastFlushedSequence_);
|
2012-11-30 02:28:37 +01:00
|
|
|
isValid_ = true;
|
2013-01-20 11:07:13 +01:00
|
|
|
currentLogReader_ = std::move(reader);
|
2012-11-30 02:28:37 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!isValid_) {
|
|
|
|
// TODO read the entire first file. and did not find the seq number.
|
|
|
|
// Error out.
|
|
|
|
currentStatus_ =
|
|
|
|
Status::NotFound("Did not find the Seq no. in first file");
|
|
|
|
}
|
|
|
|
started_ = true;
|
|
|
|
} else {
|
|
|
|
LOOK_NEXT_FILE:
|
2013-01-20 11:07:13 +01:00
|
|
|
assert(currentLogReader_);
|
2012-11-30 02:28:37 +01:00
|
|
|
bool openNextFile = true;
|
2013-03-21 23:12:35 +01:00
|
|
|
if (currentSequence_ < *lastFlushedSequence_) {
|
|
|
|
if (currentLogReader_->IsEOF()) {
|
|
|
|
currentLogReader_->UnmarkEOF();
|
|
|
|
}
|
|
|
|
while (currentLogReader_->ReadRecord(&record, &scratch)) {
|
|
|
|
if (record.size() < 12) {
|
|
|
|
reporter.Corruption(
|
|
|
|
record.size(), Status::Corruption("log record too small"));
|
|
|
|
continue;
|
|
|
|
} else {
|
|
|
|
UpdateCurrentWriteBatch(record);
|
|
|
|
openNextFile = false;
|
|
|
|
break;
|
|
|
|
}
|
2012-11-30 02:28:37 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (openNextFile) {
|
|
|
|
if (currentFileIndex_ < files_->size() - 1) {
|
|
|
|
++currentFileIndex_;
|
2013-01-20 11:07:13 +01:00
|
|
|
currentLogReader_.reset();
|
|
|
|
unique_ptr<SequentialFile> file;
|
2012-11-30 02:28:37 +01:00
|
|
|
Status status = OpenLogFile(files_->at(currentFileIndex_), &file);
|
|
|
|
if (!status.ok()) {
|
|
|
|
isValid_ = false;
|
|
|
|
currentStatus_ = status;
|
|
|
|
return;
|
|
|
|
}
|
2013-01-20 11:07:13 +01:00
|
|
|
currentLogReader_.reset(
|
|
|
|
new log::Reader(std::move(file), &reporter, true, 0));
|
2012-11-30 02:28:37 +01:00
|
|
|
goto LOOK_NEXT_FILE;
|
2013-03-21 23:12:35 +01:00
|
|
|
} else if (currentSequence_ == *lastFlushedSequence_) {
|
|
|
|
// The last update has been read. and next is being called.
|
|
|
|
isValid_ = false;
|
|
|
|
currentStatus_ = Status::OK();
|
2012-11-30 02:28:37 +01:00
|
|
|
} else {
|
|
|
|
// LOOKED AT FILES. WE ARE DONE HERE.
|
|
|
|
isValid_ = false;
|
|
|
|
currentStatus_ = Status::IOError(" NO MORE DATA LEFT");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-04 19:44:04 +01:00
|
|
|
void TransactionLogIteratorImpl::UpdateCurrentWriteBatch(const Slice& record) {
|
|
|
|
WriteBatch* batch = new WriteBatch();
|
|
|
|
WriteBatchInternal::SetContents(batch, record);
|
|
|
|
currentSequence_ = WriteBatchInternal::Sequence(batch);
|
|
|
|
currentBatch_.reset(batch);
|
2013-03-21 23:12:35 +01:00
|
|
|
isValid_ = true;
|
2013-03-21 23:49:20 +01:00
|
|
|
currentStatus_ = Status::OK();
|
2013-03-04 19:44:04 +01:00
|
|
|
}
|
|
|
|
|
2012-11-30 02:28:37 +01:00
|
|
|
} // namespace leveldb
|