2013-10-16 23:59:46 +02:00
|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
2011-03-18 23:37:00 +01:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
//
|
|
|
|
// WriteBatch::rep_ :=
|
|
|
|
// sequence: fixed64
|
|
|
|
// count: fixed32
|
|
|
|
// data: record[count]
|
|
|
|
// record :=
|
2013-03-21 23:59:47 +01:00
|
|
|
// kTypeValue varstring varstring
|
|
|
|
// kTypeMerge varstring varstring
|
2011-03-18 23:37:00 +01:00
|
|
|
// kTypeDeletion varstring
|
|
|
|
// varstring :=
|
|
|
|
// len: varint32
|
|
|
|
// data: uint8[len]
|
|
|
|
|
2013-08-23 17:38:13 +02:00
|
|
|
#include "rocksdb/write_batch.h"
|
|
|
|
#include "rocksdb/options.h"
|
2014-01-11 02:33:56 +01:00
|
|
|
#include "rocksdb/merge_operator.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "db/dbformat.h"
|
2013-07-13 01:56:52 +02:00
|
|
|
#include "db/db_impl.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "db/memtable.h"
|
2013-07-26 21:57:01 +02:00
|
|
|
#include "db/snapshot.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "db/write_batch_internal.h"
|
|
|
|
#include "util/coding.h"
|
2013-11-22 23:14:05 +01:00
|
|
|
#include "util/statistics_imp.h"
|
2013-03-21 23:59:47 +01:00
|
|
|
#include <stdexcept>
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
namespace rocksdb {
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2012-03-09 01:23:21 +01:00
|
|
|
// WriteBatch header has an 8-byte sequence number followed by a 4-byte count.
|
|
|
|
static const size_t kHeader = 12;
|
|
|
|
|
2014-01-14 19:42:36 +01:00
|
|
|
WriteBatch::WriteBatch(size_t reserved_bytes) {
|
|
|
|
rep_.reserve((reserved_bytes > kHeader) ? reserved_bytes : kHeader);
|
2011-03-18 23:37:00 +01:00
|
|
|
Clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
WriteBatch::~WriteBatch() { }
|
|
|
|
|
2011-05-21 04:17:43 +02:00
|
|
|
WriteBatch::Handler::~Handler() { }
|
|
|
|
|
2013-03-21 23:59:47 +01:00
|
|
|
void WriteBatch::Handler::Merge(const Slice& key, const Slice& value) {
|
|
|
|
throw std::runtime_error("Handler::Merge not implemented!");
|
|
|
|
}
|
|
|
|
|
2013-08-15 01:32:46 +02:00
|
|
|
void WriteBatch::Handler::LogData(const Slice& blob) {
|
|
|
|
// If the user has not specified something to do with blobs, then we ignore
|
|
|
|
// them.
|
|
|
|
}
|
|
|
|
|
2013-08-22 03:27:48 +02:00
|
|
|
bool WriteBatch::Handler::Continue() {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
void WriteBatch::Clear() {
|
|
|
|
rep_.clear();
|
2013-12-21 03:14:17 +01:00
|
|
|
rep_.resize(kHeader);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2013-06-26 19:50:58 +02:00
|
|
|
int WriteBatch::Count() const {
|
|
|
|
return WriteBatchInternal::Count(this);
|
|
|
|
}
|
|
|
|
|
2011-05-21 04:17:43 +02:00
|
|
|
Status WriteBatch::Iterate(Handler* handler) const {
|
|
|
|
Slice input(rep_);
|
2012-03-09 01:23:21 +01:00
|
|
|
if (input.size() < kHeader) {
|
2011-05-21 04:17:43 +02:00
|
|
|
return Status::Corruption("malformed WriteBatch (too small)");
|
|
|
|
}
|
|
|
|
|
2012-03-09 01:23:21 +01:00
|
|
|
input.remove_prefix(kHeader);
|
2013-08-15 01:32:46 +02:00
|
|
|
Slice key, value, blob;
|
2011-05-21 04:17:43 +02:00
|
|
|
int found = 0;
|
2013-08-22 03:27:48 +02:00
|
|
|
while (!input.empty() && handler->Continue()) {
|
2011-05-21 04:17:43 +02:00
|
|
|
char tag = input[0];
|
|
|
|
input.remove_prefix(1);
|
|
|
|
switch (tag) {
|
|
|
|
case kTypeValue:
|
|
|
|
if (GetLengthPrefixedSlice(&input, &key) &&
|
|
|
|
GetLengthPrefixedSlice(&input, &value)) {
|
|
|
|
handler->Put(key, value);
|
2013-08-15 01:32:46 +02:00
|
|
|
found++;
|
2011-05-21 04:17:43 +02:00
|
|
|
} else {
|
|
|
|
return Status::Corruption("bad WriteBatch Put");
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case kTypeDeletion:
|
|
|
|
if (GetLengthPrefixedSlice(&input, &key)) {
|
|
|
|
handler->Delete(key);
|
2013-08-15 01:32:46 +02:00
|
|
|
found++;
|
2011-05-21 04:17:43 +02:00
|
|
|
} else {
|
|
|
|
return Status::Corruption("bad WriteBatch Delete");
|
|
|
|
}
|
|
|
|
break;
|
2013-03-21 23:59:47 +01:00
|
|
|
case kTypeMerge:
|
|
|
|
if (GetLengthPrefixedSlice(&input, &key) &&
|
|
|
|
GetLengthPrefixedSlice(&input, &value)) {
|
|
|
|
handler->Merge(key, value);
|
2013-08-15 01:32:46 +02:00
|
|
|
found++;
|
2013-03-21 23:59:47 +01:00
|
|
|
} else {
|
|
|
|
return Status::Corruption("bad WriteBatch Merge");
|
|
|
|
}
|
|
|
|
break;
|
2013-08-15 01:32:46 +02:00
|
|
|
case kTypeLogData:
|
|
|
|
if (GetLengthPrefixedSlice(&input, &blob)) {
|
|
|
|
handler->LogData(blob);
|
|
|
|
} else {
|
|
|
|
return Status::Corruption("bad WriteBatch Blob");
|
|
|
|
}
|
|
|
|
break;
|
2011-05-21 04:17:43 +02:00
|
|
|
default:
|
|
|
|
return Status::Corruption("unknown WriteBatch tag");
|
|
|
|
}
|
|
|
|
}
|
2014-01-14 16:55:16 +01:00
|
|
|
if (found != WriteBatchInternal::Count(this)) {
|
2011-05-21 04:17:43 +02:00
|
|
|
return Status::Corruption("WriteBatch has wrong count");
|
|
|
|
} else {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
int WriteBatchInternal::Count(const WriteBatch* b) {
|
|
|
|
return DecodeFixed32(b->rep_.data() + 8);
|
|
|
|
}
|
|
|
|
|
|
|
|
void WriteBatchInternal::SetCount(WriteBatch* b, int n) {
|
|
|
|
EncodeFixed32(&b->rep_[8], n);
|
|
|
|
}
|
|
|
|
|
|
|
|
SequenceNumber WriteBatchInternal::Sequence(const WriteBatch* b) {
|
|
|
|
return SequenceNumber(DecodeFixed64(b->rep_.data()));
|
|
|
|
}
|
|
|
|
|
|
|
|
void WriteBatchInternal::SetSequence(WriteBatch* b, SequenceNumber seq) {
|
|
|
|
EncodeFixed64(&b->rep_[0], seq);
|
|
|
|
}
|
|
|
|
|
|
|
|
void WriteBatch::Put(const Slice& key, const Slice& value) {
|
|
|
|
WriteBatchInternal::SetCount(this, WriteBatchInternal::Count(this) + 1);
|
|
|
|
rep_.push_back(static_cast<char>(kTypeValue));
|
|
|
|
PutLengthPrefixedSlice(&rep_, key);
|
|
|
|
PutLengthPrefixedSlice(&rep_, value);
|
|
|
|
}
|
|
|
|
|
2013-11-07 21:37:58 +01:00
|
|
|
void WriteBatch::Put(const SliceParts& key, const SliceParts& value) {
|
|
|
|
WriteBatchInternal::SetCount(this, WriteBatchInternal::Count(this) + 1);
|
|
|
|
rep_.push_back(static_cast<char>(kTypeValue));
|
|
|
|
PutLengthPrefixedSliceParts(&rep_, key);
|
|
|
|
PutLengthPrefixedSliceParts(&rep_, value);
|
|
|
|
}
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
void WriteBatch::Delete(const Slice& key) {
|
|
|
|
WriteBatchInternal::SetCount(this, WriteBatchInternal::Count(this) + 1);
|
|
|
|
rep_.push_back(static_cast<char>(kTypeDeletion));
|
|
|
|
PutLengthPrefixedSlice(&rep_, key);
|
|
|
|
}
|
|
|
|
|
2013-03-21 23:59:47 +01:00
|
|
|
void WriteBatch::Merge(const Slice& key, const Slice& value) {
|
|
|
|
WriteBatchInternal::SetCount(this, WriteBatchInternal::Count(this) + 1);
|
|
|
|
rep_.push_back(static_cast<char>(kTypeMerge));
|
|
|
|
PutLengthPrefixedSlice(&rep_, key);
|
|
|
|
PutLengthPrefixedSlice(&rep_, value);
|
|
|
|
}
|
|
|
|
|
2013-08-15 01:32:46 +02:00
|
|
|
void WriteBatch::PutLogData(const Slice& blob) {
|
|
|
|
rep_.push_back(static_cast<char>(kTypeLogData));
|
|
|
|
PutLengthPrefixedSlice(&rep_, blob);
|
|
|
|
}
|
2013-03-21 23:59:47 +01:00
|
|
|
|
2011-05-21 04:17:43 +02:00
|
|
|
namespace {
|
|
|
|
class MemTableInserter : public WriteBatch::Handler {
|
|
|
|
public:
|
|
|
|
SequenceNumber sequence_;
|
|
|
|
MemTable* mem_;
|
2013-07-13 01:56:52 +02:00
|
|
|
const Options* options_;
|
|
|
|
DBImpl* db_;
|
|
|
|
const bool filter_deletes_;
|
|
|
|
|
|
|
|
MemTableInserter(SequenceNumber sequence, MemTable* mem, const Options* opts,
|
|
|
|
DB* db, const bool filter_deletes)
|
|
|
|
: sequence_(sequence),
|
|
|
|
mem_(mem),
|
|
|
|
options_(opts),
|
|
|
|
db_(reinterpret_cast<DBImpl*>(db)),
|
|
|
|
filter_deletes_(filter_deletes) {
|
|
|
|
assert(mem_);
|
|
|
|
if (filter_deletes_) {
|
|
|
|
assert(options_);
|
|
|
|
assert(db_);
|
|
|
|
}
|
|
|
|
}
|
2011-05-21 04:17:43 +02:00
|
|
|
|
|
|
|
virtual void Put(const Slice& key, const Slice& value) {
|
2014-01-14 16:55:16 +01:00
|
|
|
if (!options_->inplace_update_support) {
|
|
|
|
mem_->Add(sequence_, kTypeValue, key, value);
|
|
|
|
} else if (options_->inplace_callback == nullptr) {
|
|
|
|
mem_->Update(sequence_, key, value);
|
2013-11-22 23:14:05 +01:00
|
|
|
RecordTick(options_->statistics.get(), NUMBER_KEYS_UPDATED);
|
In-place updates for equal keys and similar sized values
Summary:
Currently for each put, a fresh memory is allocated, and a new entry is added to the memtable with a new sequence number irrespective of whether the key already exists in the memtable. This diff is an attempt to update the value inplace for existing keys. It currently handles a very simple case:
1. Key already exists in the current memtable. Does not inplace update values in immutable memtable or snapshot
2. Latest value type is a 'put' ie kTypeValue
3. New value size is less than existing value, to avoid reallocating memory
TODO: For a put of an existing key, deallocate memory take by values, for other value types till a kTypeValue is found, ie. remove kTypeMerge.
TODO: Update the transaction log, to allow consistent reload of the memtable.
Test Plan: Added a unit test verifying the inplace update. But some other unit tests broken due to invalid sequence number checks. WIll fix them next.
Reviewers: xinyaohu, sumeet, haobo, dhruba
CC: leveldb
Differential Revision: https://reviews.facebook.net/D12423
Automatic commit by arc
2013-08-19 23:12:47 +02:00
|
|
|
} else {
|
2014-01-14 16:55:16 +01:00
|
|
|
if (mem_->UpdateCallback(sequence_, key, value, *options_)) {
|
|
|
|
RecordTick(options_->statistics.get(), NUMBER_KEYS_UPDATED);
|
|
|
|
} else {
|
|
|
|
// key not found in memtable. Do sst get/update/add
|
|
|
|
SnapshotImpl read_from_snapshot;
|
|
|
|
read_from_snapshot.number_ = sequence_;
|
|
|
|
ReadOptions ropts;
|
|
|
|
ropts.snapshot = &read_from_snapshot;
|
|
|
|
|
|
|
|
std::string prev_value;
|
|
|
|
std::string merged_value;
|
|
|
|
Status s = db_->Get(ropts, key, &prev_value);
|
|
|
|
char* prev_buffer = const_cast<char*>(prev_value.c_str());
|
|
|
|
size_t prev_size = prev_value.size();
|
|
|
|
if (options_->inplace_callback(s.ok() ? prev_buffer: nullptr,
|
|
|
|
s.ok() ? prev_size: 0,
|
|
|
|
value, &merged_value)) {
|
|
|
|
// prev_value is updated in-place with final value.
|
|
|
|
mem_->Add(sequence_, kTypeValue, key, Slice(prev_buffer, prev_size));
|
|
|
|
RecordTick(options_->statistics.get(), NUMBER_KEYS_WRITTEN);
|
|
|
|
} else {
|
|
|
|
// merged_value contains the final value. Only add if not empty.
|
|
|
|
if (!merged_value.empty()) {
|
|
|
|
mem_->Add(sequence_, kTypeValue, key, Slice(merged_value));
|
|
|
|
RecordTick(options_->statistics.get(), NUMBER_KEYS_WRITTEN);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
In-place updates for equal keys and similar sized values
Summary:
Currently for each put, a fresh memory is allocated, and a new entry is added to the memtable with a new sequence number irrespective of whether the key already exists in the memtable. This diff is an attempt to update the value inplace for existing keys. It currently handles a very simple case:
1. Key already exists in the current memtable. Does not inplace update values in immutable memtable or snapshot
2. Latest value type is a 'put' ie kTypeValue
3. New value size is less than existing value, to avoid reallocating memory
TODO: For a put of an existing key, deallocate memory take by values, for other value types till a kTypeValue is found, ie. remove kTypeMerge.
TODO: Update the transaction log, to allow consistent reload of the memtable.
Test Plan: Added a unit test verifying the inplace update. But some other unit tests broken due to invalid sequence number checks. WIll fix them next.
Reviewers: xinyaohu, sumeet, haobo, dhruba
CC: leveldb
Differential Revision: https://reviews.facebook.net/D12423
Automatic commit by arc
2013-08-19 23:12:47 +02:00
|
|
|
}
|
2011-05-21 04:17:43 +02:00
|
|
|
sequence_++;
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
2014-01-14 16:55:16 +01:00
|
|
|
|
2013-03-21 23:59:47 +01:00
|
|
|
virtual void Merge(const Slice& key, const Slice& value) {
|
2014-01-11 02:33:56 +01:00
|
|
|
bool perform_merge = false;
|
|
|
|
|
|
|
|
if (options_->max_successive_merges > 0 && db_ != nullptr) {
|
|
|
|
LookupKey lkey(key, sequence_);
|
|
|
|
|
|
|
|
// Count the number of successive merges at the head
|
|
|
|
// of the key in the memtable
|
|
|
|
size_t num_merges = mem_->CountSuccessiveMergeEntries(lkey);
|
|
|
|
|
|
|
|
if (num_merges >= options_->max_successive_merges) {
|
|
|
|
perform_merge = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (perform_merge) {
|
|
|
|
// 1) Get the existing value
|
|
|
|
std::string get_value;
|
|
|
|
|
|
|
|
// Pass in the sequence number so that we also include previous merge
|
|
|
|
// operations in the same batch.
|
|
|
|
SnapshotImpl read_from_snapshot;
|
|
|
|
read_from_snapshot.number_ = sequence_;
|
|
|
|
ReadOptions read_options;
|
|
|
|
read_options.snapshot = &read_from_snapshot;
|
|
|
|
|
|
|
|
db_->Get(read_options, key, &get_value);
|
|
|
|
Slice get_value_slice = Slice(get_value);
|
|
|
|
|
|
|
|
// 2) Apply this merge
|
|
|
|
auto merge_operator = options_->merge_operator.get();
|
|
|
|
assert(merge_operator);
|
|
|
|
|
|
|
|
std::deque<std::string> operands;
|
|
|
|
operands.push_front(value.ToString());
|
|
|
|
std::string new_value;
|
|
|
|
if (!merge_operator->FullMerge(key,
|
|
|
|
&get_value_slice,
|
|
|
|
operands,
|
|
|
|
&new_value,
|
|
|
|
options_->info_log.get())) {
|
|
|
|
// Failed to merge!
|
|
|
|
RecordTick(options_->statistics.get(), NUMBER_MERGE_FAILURES);
|
|
|
|
|
|
|
|
// Store the delta in memtable
|
|
|
|
perform_merge = false;
|
|
|
|
} else {
|
|
|
|
// 3) Add value to memtable
|
|
|
|
mem_->Add(sequence_, kTypeValue, key, new_value);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!perform_merge) {
|
|
|
|
// Add merge operator to memtable
|
|
|
|
mem_->Add(sequence_, kTypeMerge, key, value);
|
|
|
|
}
|
|
|
|
|
2013-03-21 23:59:47 +01:00
|
|
|
sequence_++;
|
|
|
|
}
|
2011-05-21 04:17:43 +02:00
|
|
|
virtual void Delete(const Slice& key) {
|
2013-07-26 21:57:01 +02:00
|
|
|
if (filter_deletes_) {
|
|
|
|
SnapshotImpl read_from_snapshot;
|
|
|
|
read_from_snapshot.number_ = sequence_;
|
|
|
|
ReadOptions ropts;
|
|
|
|
ropts.snapshot = &read_from_snapshot;
|
|
|
|
std::string value;
|
|
|
|
if (!db_->KeyMayExist(ropts, key, &value)) {
|
2013-11-22 23:14:05 +01:00
|
|
|
RecordTick(options_->statistics.get(), NUMBER_FILTERED_DELETES);
|
2013-07-26 21:57:01 +02:00
|
|
|
return;
|
|
|
|
}
|
2013-07-13 01:56:52 +02:00
|
|
|
}
|
2011-05-21 04:17:43 +02:00
|
|
|
mem_->Add(sequence_, kTypeDeletion, key, Slice());
|
|
|
|
sequence_++;
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
2011-05-21 04:17:43 +02:00
|
|
|
};
|
2011-10-31 18:22:06 +01:00
|
|
|
} // namespace
|
2011-05-21 04:17:43 +02:00
|
|
|
|
2013-07-13 01:56:52 +02:00
|
|
|
Status WriteBatchInternal::InsertInto(const WriteBatch* b, MemTable* mem,
|
|
|
|
const Options* opts, DB* db,
|
|
|
|
const bool filter_deletes) {
|
|
|
|
MemTableInserter inserter(WriteBatchInternal::Sequence(b), mem, opts, db,
|
|
|
|
filter_deletes);
|
2011-05-21 04:17:43 +02:00
|
|
|
return b->Iterate(&inserter);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void WriteBatchInternal::SetContents(WriteBatch* b, const Slice& contents) {
|
2012-03-09 01:23:21 +01:00
|
|
|
assert(contents.size() >= kHeader);
|
2011-03-18 23:37:00 +01:00
|
|
|
b->rep_.assign(contents.data(), contents.size());
|
|
|
|
}
|
|
|
|
|
2012-03-09 01:23:21 +01:00
|
|
|
void WriteBatchInternal::Append(WriteBatch* dst, const WriteBatch* src) {
|
|
|
|
SetCount(dst, Count(dst) + Count(src));
|
|
|
|
assert(src->rep_.size() >= kHeader);
|
|
|
|
dst->rep_.append(src->rep_.data() + kHeader, src->rep_.size() - kHeader);
|
|
|
|
}
|
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
} // namespace rocksdb
|