2016-02-10 00:12:00 +01:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-16 01:03:42 +02:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2014-08-19 00:19:17 +02:00
|
|
|
|
2015-06-19 00:54:05 +02:00
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
|
2014-08-19 00:19:17 +02:00
|
|
|
#include "rocksdb/utilities/write_batch_with_index.h"
|
2014-10-11 01:21:34 +02:00
|
|
|
|
2015-09-02 22:58:22 +02:00
|
|
|
#include <memory>
|
2014-10-11 01:21:34 +02:00
|
|
|
|
2014-08-19 00:19:17 +02:00
|
|
|
#include "db/column_family.h"
|
2019-05-31 20:52:59 +02:00
|
|
|
#include "db/db_impl/db_impl.h"
|
2015-05-11 23:51:51 +02:00
|
|
|
#include "db/merge_context.h"
|
|
|
|
#include "db/merge_helper.h"
|
2019-05-31 02:39:43 +02:00
|
|
|
#include "memory/arena.h"
|
2017-04-06 22:59:31 +02:00
|
|
|
#include "memtable/skiplist.h"
|
2017-04-06 04:02:00 +02:00
|
|
|
#include "options/db_options.h"
|
2015-09-02 22:58:22 +02:00
|
|
|
#include "rocksdb/comparator.h"
|
|
|
|
#include "rocksdb/iterator.h"
|
2017-09-14 02:21:35 +02:00
|
|
|
#include "util/cast_util.h"
|
2017-11-11 20:23:43 +01:00
|
|
|
#include "util/string_util.h"
|
2015-05-11 23:51:51 +02:00
|
|
|
#include "utilities/write_batch_with_index/write_batch_with_index_internal.h"
|
2014-08-19 00:19:17 +02:00
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2014-10-07 18:47:16 +02:00
|
|
|
|
2014-08-19 00:19:17 +02:00
|
|
|
typedef SkipList<WriteBatchIndexEntry*, const WriteBatchEntryComparator&>
|
|
|
|
WriteBatchEntrySkipList;
|
|
|
|
|
|
|
|
class WBWIIteratorImpl : public WBWIIterator {
|
|
|
|
public:
|
|
|
|
WBWIIteratorImpl(uint32_t column_family_id,
|
|
|
|
WriteBatchEntrySkipList* skip_list,
|
|
|
|
const ReadableWriteBatch* write_batch)
|
|
|
|
: column_family_id_(column_family_id),
|
|
|
|
skip_list_iter_(skip_list),
|
2015-06-10 21:57:38 +02:00
|
|
|
write_batch_(write_batch) {}
|
2014-08-19 00:19:17 +02:00
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
~WBWIIteratorImpl() override {}
|
2014-08-19 00:19:17 +02:00
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
bool Valid() const override {
|
2015-06-10 21:57:38 +02:00
|
|
|
if (!skip_list_iter_.Valid()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
const WriteBatchIndexEntry* iter_entry = skip_list_iter_.key();
|
|
|
|
return (iter_entry != nullptr &&
|
|
|
|
iter_entry->column_family == column_family_id_);
|
|
|
|
}
|
2014-08-19 00:19:17 +02:00
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
void SeekToFirst() override {
|
2018-10-19 23:38:32 +02:00
|
|
|
WriteBatchIndexEntry search_entry(
|
|
|
|
nullptr /* search_key */, column_family_id_,
|
|
|
|
true /* is_forward_direction */, true /* is_seek_to_first */);
|
2014-10-11 01:11:40 +02:00
|
|
|
skip_list_iter_.Seek(&search_entry);
|
|
|
|
}
|
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
void SeekToLast() override {
|
2018-10-19 23:38:32 +02:00
|
|
|
WriteBatchIndexEntry search_entry(
|
|
|
|
nullptr /* search_key */, column_family_id_ + 1,
|
|
|
|
true /* is_forward_direction */, true /* is_seek_to_first */);
|
2014-10-11 01:11:40 +02:00
|
|
|
skip_list_iter_.Seek(&search_entry);
|
|
|
|
if (!skip_list_iter_.Valid()) {
|
|
|
|
skip_list_iter_.SeekToLast();
|
|
|
|
} else {
|
|
|
|
skip_list_iter_.Prev();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
void Seek(const Slice& key) override {
|
2018-10-19 23:38:32 +02:00
|
|
|
WriteBatchIndexEntry search_entry(&key, column_family_id_,
|
|
|
|
true /* is_forward_direction */,
|
|
|
|
false /* is_seek_to_first */);
|
2014-08-19 00:19:17 +02:00
|
|
|
skip_list_iter_.Seek(&search_entry);
|
|
|
|
}
|
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
void SeekForPrev(const Slice& key) override {
|
2018-10-19 23:38:32 +02:00
|
|
|
WriteBatchIndexEntry search_entry(&key, column_family_id_,
|
|
|
|
false /* is_forward_direction */,
|
|
|
|
false /* is_seek_to_first */);
|
2016-09-28 03:20:57 +02:00
|
|
|
skip_list_iter_.SeekForPrev(&search_entry);
|
|
|
|
}
|
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
void Next() override { skip_list_iter_.Next(); }
|
2014-08-19 00:19:17 +02:00
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
void Prev() override { skip_list_iter_.Prev(); }
|
2014-08-19 00:19:17 +02:00
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
WriteEntry Entry() const override {
|
2015-06-10 21:57:38 +02:00
|
|
|
WriteEntry ret;
|
Modification of WriteBatch to support two phase commit
Summary: Adds three new WriteBatch data types: Prepare(xid), Commit(xid), Rollback(xid). Prepare(xid) should precede the (single) operation to which is applies. There can obviously be multiple Prepare(xid) markers. There should only be one Rollback(xid) or Commit(xid) marker yet not both. None of this logic is currently enforced and will most likely be implemented further up such as in the memtableinserter. All three markers are similar to PutLogData in that they are writebatch meta-data, ie stored but not counted. All three markers differ from PutLogData in that they will actually be written to disk. As for WriteBatchWithIndex, Prepare, Commit, Rollback are all implemented just as PutLogData and none are tested just as PutLogData.
Test Plan: single unit test in write_batch_test.
Reviewers: hermanlee4, sdong, anthony
Subscribers: leveldb, dhruba, vasilep, andrewkr
Differential Revision: https://reviews.facebook.net/D57867
2016-04-08 08:35:51 +02:00
|
|
|
Slice blob, xid;
|
2015-06-10 21:57:38 +02:00
|
|
|
const WriteBatchIndexEntry* iter_entry = skip_list_iter_.key();
|
|
|
|
// this is guaranteed with Valid()
|
|
|
|
assert(iter_entry != nullptr &&
|
|
|
|
iter_entry->column_family == column_family_id_);
|
Modification of WriteBatch to support two phase commit
Summary: Adds three new WriteBatch data types: Prepare(xid), Commit(xid), Rollback(xid). Prepare(xid) should precede the (single) operation to which is applies. There can obviously be multiple Prepare(xid) markers. There should only be one Rollback(xid) or Commit(xid) marker yet not both. None of this logic is currently enforced and will most likely be implemented further up such as in the memtableinserter. All three markers are similar to PutLogData in that they are writebatch meta-data, ie stored but not counted. All three markers differ from PutLogData in that they will actually be written to disk. As for WriteBatchWithIndex, Prepare, Commit, Rollback are all implemented just as PutLogData and none are tested just as PutLogData.
Test Plan: single unit test in write_batch_test.
Reviewers: hermanlee4, sdong, anthony
Subscribers: leveldb, dhruba, vasilep, andrewkr
Differential Revision: https://reviews.facebook.net/D57867
2016-04-08 08:35:51 +02:00
|
|
|
auto s = write_batch_->GetEntryFromDataOffset(
|
|
|
|
iter_entry->offset, &ret.type, &ret.key, &ret.value, &blob, &xid);
|
2015-06-10 21:57:38 +02:00
|
|
|
assert(s.ok());
|
|
|
|
assert(ret.type == kPutRecord || ret.type == kDeleteRecord ||
|
2016-08-16 17:16:04 +02:00
|
|
|
ret.type == kSingleDeleteRecord || ret.type == kDeleteRangeRecord ||
|
|
|
|
ret.type == kMergeRecord);
|
2015-06-10 21:57:38 +02:00
|
|
|
return ret;
|
|
|
|
}
|
2015-06-10 20:05:09 +02:00
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
Status status() const override {
|
2015-06-10 21:57:38 +02:00
|
|
|
// this is in-memory data structure, so the only way status can be non-ok is
|
|
|
|
// through memory corruption
|
|
|
|
return Status::OK();
|
|
|
|
}
|
2015-06-10 20:05:09 +02:00
|
|
|
|
2014-10-10 22:31:28 +02:00
|
|
|
const WriteBatchIndexEntry* GetRawEntry() const {
|
|
|
|
return skip_list_iter_.key();
|
|
|
|
}
|
|
|
|
|
2014-08-19 00:19:17 +02:00
|
|
|
private:
|
|
|
|
uint32_t column_family_id_;
|
|
|
|
WriteBatchEntrySkipList::Iterator skip_list_iter_;
|
|
|
|
const ReadableWriteBatch* write_batch_;
|
|
|
|
};
|
2014-09-22 20:37:35 +02:00
|
|
|
|
|
|
|
struct WriteBatchWithIndex::Rep {
|
2017-08-23 19:01:17 +02:00
|
|
|
explicit Rep(const Comparator* index_comparator, size_t reserved_bytes = 0,
|
|
|
|
size_t max_bytes = 0, bool _overwrite_key = false)
|
2017-04-11 00:38:34 +02:00
|
|
|
: write_batch(reserved_bytes, max_bytes),
|
2014-09-22 20:37:35 +02:00
|
|
|
comparator(index_comparator, &write_batch),
|
2014-10-10 22:31:28 +02:00
|
|
|
skip_list(comparator, &arena),
|
2014-10-31 19:59:54 +01:00
|
|
|
overwrite_key(_overwrite_key),
|
2018-02-23 03:05:14 +01:00
|
|
|
last_entry_offset(0),
|
|
|
|
last_sub_batch_offset(0),
|
|
|
|
sub_batch_cnt(1) {}
|
2014-09-22 20:37:35 +02:00
|
|
|
ReadableWriteBatch write_batch;
|
|
|
|
WriteBatchEntryComparator comparator;
|
|
|
|
Arena arena;
|
|
|
|
WriteBatchEntrySkipList skip_list;
|
2014-10-10 22:31:28 +02:00
|
|
|
bool overwrite_key;
|
|
|
|
size_t last_entry_offset;
|
2018-02-23 03:05:14 +01:00
|
|
|
// The starting offset of the last sub-batch. A sub-batch starts right before
|
|
|
|
// inserting a key that is a duplicate of a key in the last sub-batch. Zero,
|
|
|
|
// the default, means that no duplicate key is detected so far.
|
|
|
|
size_t last_sub_batch_offset;
|
|
|
|
// Total number of sub-batches in the write batch. Default is 1.
|
|
|
|
size_t sub_batch_cnt;
|
2014-10-10 22:31:28 +02:00
|
|
|
|
|
|
|
// Remember current offset of internal write batch, which is used as
|
|
|
|
// the starting offset of the next record.
|
|
|
|
void SetLastEntryOffset() { last_entry_offset = write_batch.GetDataSize(); }
|
|
|
|
|
|
|
|
// In overwrite mode, find the existing entry for the same key and update it
|
|
|
|
// to point to the current entry.
|
|
|
|
// Return true if the key is found and updated.
|
|
|
|
bool UpdateExistingEntry(ColumnFamilyHandle* column_family, const Slice& key);
|
|
|
|
bool UpdateExistingEntryWithCfId(uint32_t column_family_id, const Slice& key);
|
|
|
|
|
|
|
|
// Add the recent entry to the update.
|
|
|
|
// In overwrite mode, if key already exists in the index, update it.
|
|
|
|
void AddOrUpdateIndex(ColumnFamilyHandle* column_family, const Slice& key);
|
|
|
|
void AddOrUpdateIndex(const Slice& key);
|
|
|
|
|
|
|
|
// Allocate an index entry pointing to the last entry in the write batch and
|
|
|
|
// put it to skip list.
|
|
|
|
void AddNewEntry(uint32_t column_family_id);
|
2015-02-24 02:49:23 +01:00
|
|
|
|
|
|
|
// Clear all updates buffered in this batch.
|
|
|
|
void Clear();
|
2015-07-11 05:15:45 +02:00
|
|
|
void ClearIndex();
|
|
|
|
|
|
|
|
// Rebuild index by reading all records from the batch.
|
|
|
|
// Returns non-ok status on corruption.
|
|
|
|
Status ReBuildIndex();
|
2014-10-10 22:31:28 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
bool WriteBatchWithIndex::Rep::UpdateExistingEntry(
|
|
|
|
ColumnFamilyHandle* column_family, const Slice& key) {
|
|
|
|
uint32_t cf_id = GetColumnFamilyID(column_family);
|
|
|
|
return UpdateExistingEntryWithCfId(cf_id, key);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool WriteBatchWithIndex::Rep::UpdateExistingEntryWithCfId(
|
|
|
|
uint32_t column_family_id, const Slice& key) {
|
|
|
|
if (!overwrite_key) {
|
|
|
|
return false;
|
|
|
|
}
|
2014-09-22 20:37:35 +02:00
|
|
|
|
2014-10-10 22:31:28 +02:00
|
|
|
WBWIIteratorImpl iter(column_family_id, &skip_list, &write_batch);
|
|
|
|
iter.Seek(key);
|
|
|
|
if (!iter.Valid()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (comparator.CompareKey(column_family_id, key, iter.Entry().key) != 0) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
WriteBatchIndexEntry* non_const_entry =
|
|
|
|
const_cast<WriteBatchIndexEntry*>(iter.GetRawEntry());
|
2018-02-23 03:05:14 +01:00
|
|
|
if (LIKELY(last_sub_batch_offset <= non_const_entry->offset)) {
|
|
|
|
last_sub_batch_offset = last_entry_offset;
|
|
|
|
sub_batch_cnt++;
|
|
|
|
}
|
2014-10-10 22:31:28 +02:00
|
|
|
non_const_entry->offset = last_entry_offset;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void WriteBatchWithIndex::Rep::AddOrUpdateIndex(
|
|
|
|
ColumnFamilyHandle* column_family, const Slice& key) {
|
|
|
|
if (!UpdateExistingEntry(column_family, key)) {
|
2014-09-22 20:37:35 +02:00
|
|
|
uint32_t cf_id = GetColumnFamilyID(column_family);
|
|
|
|
const auto* cf_cmp = GetColumnFamilyUserComparator(column_family);
|
|
|
|
if (cf_cmp != nullptr) {
|
|
|
|
comparator.SetComparatorForCF(cf_id, cf_cmp);
|
|
|
|
}
|
2014-10-10 22:31:28 +02:00
|
|
|
AddNewEntry(cf_id);
|
|
|
|
}
|
|
|
|
}
|
2014-09-22 20:37:35 +02:00
|
|
|
|
2014-10-10 22:31:28 +02:00
|
|
|
void WriteBatchWithIndex::Rep::AddOrUpdateIndex(const Slice& key) {
|
|
|
|
if (!UpdateExistingEntryWithCfId(0, key)) {
|
|
|
|
AddNewEntry(0);
|
2014-09-22 20:37:35 +02:00
|
|
|
}
|
2014-10-10 22:31:28 +02:00
|
|
|
}
|
2014-09-22 20:37:35 +02:00
|
|
|
|
2014-10-10 22:31:28 +02:00
|
|
|
void WriteBatchWithIndex::Rep::AddNewEntry(uint32_t column_family_id) {
|
2016-04-02 00:23:46 +02:00
|
|
|
const std::string& wb_data = write_batch.Data();
|
|
|
|
Slice entry_ptr = Slice(wb_data.data() + last_entry_offset,
|
|
|
|
wb_data.size() - last_entry_offset);
|
|
|
|
// Extract key
|
|
|
|
Slice key;
|
2017-10-23 23:20:53 +02:00
|
|
|
bool success __attribute__((__unused__));
|
|
|
|
success =
|
2016-04-04 20:10:46 +02:00
|
|
|
ReadKeyFromWriteBatchEntry(&entry_ptr, &key, column_family_id != 0);
|
2016-04-02 00:23:46 +02:00
|
|
|
assert(success);
|
|
|
|
|
2018-02-15 20:01:42 +01:00
|
|
|
auto* mem = arena.Allocate(sizeof(WriteBatchIndexEntry));
|
|
|
|
auto* index_entry =
|
|
|
|
new (mem) WriteBatchIndexEntry(last_entry_offset, column_family_id,
|
|
|
|
key.data() - wb_data.data(), key.size());
|
|
|
|
skip_list.Insert(index_entry);
|
|
|
|
}
|
2015-07-11 05:15:45 +02:00
|
|
|
|
2018-02-15 20:01:42 +01:00
|
|
|
void WriteBatchWithIndex::Rep::Clear() {
|
|
|
|
write_batch.Clear();
|
|
|
|
ClearIndex();
|
|
|
|
}
|
2015-02-24 02:49:23 +01:00
|
|
|
|
2018-02-15 20:01:42 +01:00
|
|
|
void WriteBatchWithIndex::Rep::ClearIndex() {
|
|
|
|
skip_list.~WriteBatchEntrySkipList();
|
|
|
|
arena.~Arena();
|
|
|
|
new (&arena) Arena();
|
|
|
|
new (&skip_list) WriteBatchEntrySkipList(comparator, &arena);
|
|
|
|
last_entry_offset = 0;
|
2018-02-23 03:05:14 +01:00
|
|
|
last_sub_batch_offset = 0;
|
|
|
|
sub_batch_cnt = 1;
|
2018-02-15 20:01:42 +01:00
|
|
|
}
|
2015-07-11 05:15:45 +02:00
|
|
|
|
2018-02-15 20:01:42 +01:00
|
|
|
Status WriteBatchWithIndex::Rep::ReBuildIndex() {
|
|
|
|
Status s;
|
2015-07-11 05:15:45 +02:00
|
|
|
|
2018-02-15 20:01:42 +01:00
|
|
|
ClearIndex();
|
2015-07-11 05:15:45 +02:00
|
|
|
|
2018-02-15 20:01:42 +01:00
|
|
|
if (write_batch.Count() == 0) {
|
|
|
|
// Nothing to re-index
|
|
|
|
return s;
|
|
|
|
}
|
2015-07-11 05:15:45 +02:00
|
|
|
|
2018-02-15 20:01:42 +01:00
|
|
|
size_t offset = WriteBatchInternal::GetFirstOffset(&write_batch);
|
2015-07-11 05:15:45 +02:00
|
|
|
|
2018-02-15 20:01:42 +01:00
|
|
|
Slice input(write_batch.Data());
|
|
|
|
input.remove_prefix(offset);
|
2015-07-11 05:15:45 +02:00
|
|
|
|
2018-02-15 20:01:42 +01:00
|
|
|
// Loop through all entries in Rep and add each one to the index
|
2019-09-09 20:22:28 +02:00
|
|
|
uint32_t found = 0;
|
2018-02-15 20:01:42 +01:00
|
|
|
while (s.ok() && !input.empty()) {
|
|
|
|
Slice key, value, blob, xid;
|
|
|
|
uint32_t column_family_id = 0; // default
|
|
|
|
char tag = 0;
|
2015-07-11 05:15:45 +02:00
|
|
|
|
2018-02-15 20:01:42 +01:00
|
|
|
// set offset of current entry for call to AddNewEntry()
|
|
|
|
last_entry_offset = input.data() - write_batch.Data().data();
|
2015-07-11 05:15:45 +02:00
|
|
|
|
2018-02-15 20:01:42 +01:00
|
|
|
s = ReadRecordFromWriteBatch(&input, &tag, &column_family_id, &key,
|
|
|
|
&value, &blob, &xid);
|
|
|
|
if (!s.ok()) {
|
|
|
|
break;
|
2015-07-11 05:15:45 +02:00
|
|
|
}
|
|
|
|
|
2018-02-15 20:01:42 +01:00
|
|
|
switch (tag) {
|
|
|
|
case kTypeColumnFamilyValue:
|
|
|
|
case kTypeValue:
|
|
|
|
case kTypeColumnFamilyDeletion:
|
|
|
|
case kTypeDeletion:
|
|
|
|
case kTypeColumnFamilySingleDeletion:
|
|
|
|
case kTypeSingleDeletion:
|
|
|
|
case kTypeColumnFamilyMerge:
|
|
|
|
case kTypeMerge:
|
|
|
|
found++;
|
|
|
|
if (!UpdateExistingEntryWithCfId(column_family_id, key)) {
|
|
|
|
AddNewEntry(column_family_id);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case kTypeLogData:
|
|
|
|
case kTypeBeginPrepareXID:
|
|
|
|
case kTypeBeginPersistedPrepareXID:
|
2018-06-29 03:46:39 +02:00
|
|
|
case kTypeBeginUnprepareXID:
|
2018-02-15 20:01:42 +01:00
|
|
|
case kTypeEndPrepareXID:
|
|
|
|
case kTypeCommitXID:
|
|
|
|
case kTypeRollbackXID:
|
|
|
|
case kTypeNoop:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return Status::Corruption("unknown WriteBatch tag in ReBuildIndex",
|
|
|
|
ToString(static_cast<unsigned int>(tag)));
|
2015-07-11 05:15:45 +02:00
|
|
|
}
|
2018-02-15 20:01:42 +01:00
|
|
|
}
|
2015-07-11 05:15:45 +02:00
|
|
|
|
2018-02-15 20:01:42 +01:00
|
|
|
if (s.ok() && found != write_batch.Count()) {
|
|
|
|
s = Status::Corruption("WriteBatch has wrong count");
|
2015-07-11 05:15:45 +02:00
|
|
|
}
|
2014-08-19 00:19:17 +02:00
|
|
|
|
2018-02-15 20:01:42 +01:00
|
|
|
return s;
|
|
|
|
}
|
2014-08-19 00:19:17 +02:00
|
|
|
|
2018-02-15 20:01:42 +01:00
|
|
|
WriteBatchWithIndex::WriteBatchWithIndex(
|
|
|
|
const Comparator* default_index_comparator, size_t reserved_bytes,
|
|
|
|
bool overwrite_key, size_t max_bytes)
|
|
|
|
: rep(new Rep(default_index_comparator, reserved_bytes, max_bytes,
|
|
|
|
overwrite_key)) {}
|
2014-08-19 00:19:17 +02:00
|
|
|
|
2018-02-15 20:01:42 +01:00
|
|
|
WriteBatchWithIndex::~WriteBatchWithIndex() {}
|
2014-08-19 00:19:17 +02:00
|
|
|
|
2019-07-31 22:36:22 +02:00
|
|
|
WriteBatchWithIndex::WriteBatchWithIndex(WriteBatchWithIndex&&) = default;
|
|
|
|
|
|
|
|
WriteBatchWithIndex& WriteBatchWithIndex::operator=(WriteBatchWithIndex&&) =
|
|
|
|
default;
|
|
|
|
|
2018-02-15 20:01:42 +01:00
|
|
|
WriteBatch* WriteBatchWithIndex::GetWriteBatch() { return &rep->write_batch; }
|
|
|
|
|
2018-02-23 03:05:14 +01:00
|
|
|
size_t WriteBatchWithIndex::SubBatchCnt() { return rep->sub_batch_cnt; }
|
2017-10-05 16:28:37 +02:00
|
|
|
|
2018-02-15 20:01:42 +01:00
|
|
|
WBWIIterator* WriteBatchWithIndex::NewIterator() {
|
|
|
|
return new WBWIIteratorImpl(0, &(rep->skip_list), &rep->write_batch);
|
2014-08-19 00:19:17 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
WBWIIterator* WriteBatchWithIndex::NewIterator(
|
|
|
|
ColumnFamilyHandle* column_family) {
|
|
|
|
return new WBWIIteratorImpl(GetColumnFamilyID(column_family),
|
|
|
|
&(rep->skip_list), &rep->write_batch);
|
|
|
|
}
|
|
|
|
|
2014-10-11 01:21:34 +02:00
|
|
|
Iterator* WriteBatchWithIndex::NewIteratorWithBase(
|
2019-11-05 20:29:31 +01:00
|
|
|
ColumnFamilyHandle* column_family, Iterator* base_iterator,
|
2019-11-06 02:17:36 +01:00
|
|
|
const ReadOptions* read_options) {
|
2014-10-11 01:21:34 +02:00
|
|
|
if (rep->overwrite_key == false) {
|
|
|
|
assert(false);
|
|
|
|
return nullptr;
|
|
|
|
}
|
2018-12-04 08:36:32 +01:00
|
|
|
return new BaseDeltaIterator(base_iterator, NewIterator(column_family),
|
2019-11-05 20:29:31 +01:00
|
|
|
GetColumnFamilyUserComparator(column_family),
|
|
|
|
read_options);
|
2014-10-11 01:21:34 +02:00
|
|
|
}
|
|
|
|
|
2018-12-04 08:36:32 +01:00
|
|
|
Iterator* WriteBatchWithIndex::NewIteratorWithBase(Iterator* base_iterator) {
|
2015-02-03 07:29:43 +01:00
|
|
|
if (rep->overwrite_key == false) {
|
|
|
|
assert(false);
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
// default column family's comparator
|
2018-12-04 08:36:32 +01:00
|
|
|
return new BaseDeltaIterator(base_iterator, NewIterator(),
|
2015-02-03 07:29:43 +01:00
|
|
|
rep->comparator.default_comparator());
|
|
|
|
}
|
|
|
|
|
2017-04-11 00:38:34 +02:00
|
|
|
Status WriteBatchWithIndex::Put(ColumnFamilyHandle* column_family,
|
|
|
|
const Slice& key, const Slice& value) {
|
2014-10-10 22:31:28 +02:00
|
|
|
rep->SetLastEntryOffset();
|
2017-04-11 00:38:34 +02:00
|
|
|
auto s = rep->write_batch.Put(column_family, key, value);
|
|
|
|
if (s.ok()) {
|
|
|
|
rep->AddOrUpdateIndex(column_family, key);
|
|
|
|
}
|
|
|
|
return s;
|
2014-08-19 00:19:17 +02:00
|
|
|
}
|
|
|
|
|
2017-04-11 00:38:34 +02:00
|
|
|
Status WriteBatchWithIndex::Put(const Slice& key, const Slice& value) {
|
2014-10-10 22:31:28 +02:00
|
|
|
rep->SetLastEntryOffset();
|
2017-04-11 00:38:34 +02:00
|
|
|
auto s = rep->write_batch.Put(key, value);
|
|
|
|
if (s.ok()) {
|
|
|
|
rep->AddOrUpdateIndex(key);
|
|
|
|
}
|
|
|
|
return s;
|
2014-08-19 00:19:17 +02:00
|
|
|
}
|
|
|
|
|
2017-04-11 00:38:34 +02:00
|
|
|
Status WriteBatchWithIndex::Delete(ColumnFamilyHandle* column_family,
|
|
|
|
const Slice& key) {
|
2014-10-10 22:31:28 +02:00
|
|
|
rep->SetLastEntryOffset();
|
2017-04-11 00:38:34 +02:00
|
|
|
auto s = rep->write_batch.Delete(column_family, key);
|
|
|
|
if (s.ok()) {
|
|
|
|
rep->AddOrUpdateIndex(column_family, key);
|
|
|
|
}
|
|
|
|
return s;
|
2014-08-19 00:19:17 +02:00
|
|
|
}
|
|
|
|
|
2017-04-11 00:38:34 +02:00
|
|
|
Status WriteBatchWithIndex::Delete(const Slice& key) {
|
2014-10-10 22:31:28 +02:00
|
|
|
rep->SetLastEntryOffset();
|
2017-04-11 00:38:34 +02:00
|
|
|
auto s = rep->write_batch.Delete(key);
|
|
|
|
if (s.ok()) {
|
|
|
|
rep->AddOrUpdateIndex(key);
|
|
|
|
}
|
|
|
|
return s;
|
2014-08-19 00:19:17 +02:00
|
|
|
}
|
|
|
|
|
2017-04-11 00:38:34 +02:00
|
|
|
Status WriteBatchWithIndex::SingleDelete(ColumnFamilyHandle* column_family,
|
|
|
|
const Slice& key) {
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 20:42:56 +02:00
|
|
|
rep->SetLastEntryOffset();
|
2017-04-11 00:38:34 +02:00
|
|
|
auto s = rep->write_batch.SingleDelete(column_family, key);
|
|
|
|
if (s.ok()) {
|
|
|
|
rep->AddOrUpdateIndex(column_family, key);
|
|
|
|
}
|
|
|
|
return s;
|
2014-08-19 00:19:17 +02:00
|
|
|
}
|
|
|
|
|
2017-04-11 00:38:34 +02:00
|
|
|
Status WriteBatchWithIndex::SingleDelete(const Slice& key) {
|
2014-10-10 22:31:28 +02:00
|
|
|
rep->SetLastEntryOffset();
|
2017-04-11 00:38:34 +02:00
|
|
|
auto s = rep->write_batch.SingleDelete(key);
|
|
|
|
if (s.ok()) {
|
|
|
|
rep->AddOrUpdateIndex(key);
|
|
|
|
}
|
|
|
|
return s;
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 20:42:56 +02:00
|
|
|
}
|
|
|
|
|
2017-04-11 00:38:34 +02:00
|
|
|
Status WriteBatchWithIndex::Merge(ColumnFamilyHandle* column_family,
|
|
|
|
const Slice& key, const Slice& value) {
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 20:42:56 +02:00
|
|
|
rep->SetLastEntryOffset();
|
2017-04-11 00:38:34 +02:00
|
|
|
auto s = rep->write_batch.Merge(column_family, key, value);
|
|
|
|
if (s.ok()) {
|
|
|
|
rep->AddOrUpdateIndex(column_family, key);
|
|
|
|
}
|
|
|
|
return s;
|
2014-08-19 00:19:17 +02:00
|
|
|
}
|
|
|
|
|
2017-04-11 00:38:34 +02:00
|
|
|
Status WriteBatchWithIndex::Merge(const Slice& key, const Slice& value) {
|
2014-10-10 22:31:28 +02:00
|
|
|
rep->SetLastEntryOffset();
|
2017-04-11 00:38:34 +02:00
|
|
|
auto s = rep->write_batch.Merge(key, value);
|
|
|
|
if (s.ok()) {
|
|
|
|
rep->AddOrUpdateIndex(key);
|
|
|
|
}
|
|
|
|
return s;
|
2014-08-19 00:19:17 +02:00
|
|
|
}
|
|
|
|
|
2017-04-11 00:38:34 +02:00
|
|
|
Status WriteBatchWithIndex::PutLogData(const Slice& blob) {
|
|
|
|
return rep->write_batch.PutLogData(blob);
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 20:42:56 +02:00
|
|
|
}
|
|
|
|
|
2015-02-24 02:49:23 +01:00
|
|
|
void WriteBatchWithIndex::Clear() { rep->Clear(); }
|
|
|
|
|
2015-05-11 23:51:51 +02:00
|
|
|
Status WriteBatchWithIndex::GetFromBatch(ColumnFamilyHandle* column_family,
|
|
|
|
const DBOptions& options,
|
|
|
|
const Slice& key, std::string* value) {
|
|
|
|
Status s;
|
|
|
|
MergeContext merge_context;
|
2016-10-18 22:19:26 +02:00
|
|
|
const ImmutableDBOptions immuable_db_options(options);
|
2015-05-11 23:51:51 +02:00
|
|
|
|
|
|
|
WriteBatchWithIndexInternal::Result result =
|
2015-09-29 04:24:44 +02:00
|
|
|
WriteBatchWithIndexInternal::GetFromBatch(
|
2016-10-18 22:19:26 +02:00
|
|
|
immuable_db_options, this, column_family, key, &merge_context,
|
|
|
|
&rep->comparator, value, rep->overwrite_key, &s);
|
2015-05-11 23:51:51 +02:00
|
|
|
|
|
|
|
switch (result) {
|
|
|
|
case WriteBatchWithIndexInternal::Result::kFound:
|
|
|
|
case WriteBatchWithIndexInternal::Result::kError:
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
2015-05-26 02:37:33 +02:00
|
|
|
// use returned status
|
|
|
|
break;
|
2015-05-11 23:51:51 +02:00
|
|
|
case WriteBatchWithIndexInternal::Result::kDeleted:
|
|
|
|
case WriteBatchWithIndexInternal::Result::kNotFound:
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
2015-05-26 02:37:33 +02:00
|
|
|
s = Status::NotFound();
|
|
|
|
break;
|
2015-05-11 23:51:51 +02:00
|
|
|
case WriteBatchWithIndexInternal::Result::kMergeInProgress:
|
Simplify querying of merge results
Summary:
While working on supporting mixing merge operators with
single deletes ( https://reviews.facebook.net/D43179 ),
I realized that returning and dealing with merge results
can be made simpler. Submitting this as a separate diff
because it is not directly related to single deletes.
Before, callers of merge helper had to retrieve the merge
result in one of two ways depending on whether the merge
was successful or not (success = result of merge was single
kTypeValue). For successful merges, the caller could query
the resulting key/value pair and for unsuccessful merges,
the result could be retrieved in the form of two deques of
keys and values. However, with single deletes, a successful merge
does not return a single key/value pair (if merge
operands are merged with a single delete, we have to generate
a value and keep the original single delete around to make
sure that we are not accidentially producing a key overwrite).
In addition, the two existing call sites of the merge
helper were taking the same actions independently from whether
the merge was successful or not, so this patch simplifies that.
Test Plan: make clean all check
Reviewers: rven, sdong, yhchiang, anthony, igor
Reviewed By: igor
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43353
2015-08-18 02:34:38 +02:00
|
|
|
s = Status::MergeInProgress();
|
Pessimistic Transactions
Summary:
Initial implementation of Pessimistic Transactions. This diff contains the api changes discussed in D38913. This diff is pretty large, so let me know if people would prefer to meet up to discuss it.
MyRocks folks: please take a look at the API in include/rocksdb/utilities/transaction[_db].h and let me know if you have any issues.
Also, you'll notice a couple of TODOs in the implementation of RollbackToSavePoint(). After chatting with Siying, I'm going to send out a separate diff for an alternate implementation of this feature that implements the rollback inside of WriteBatch/WriteBatchWithIndex. We can then decide which route is preferable.
Next, I'm planning on doing some perf testing and then integrating this diff into MongoRocks for further testing.
Test Plan: Unit tests, db_bench parallel testing.
Reviewers: igor, rven, sdong, yhchiang, yoshinorim
Reviewed By: sdong
Subscribers: hermanlee4, maykov, spetrunia, leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D40869
2015-05-26 02:37:33 +02:00
|
|
|
break;
|
2015-05-11 23:51:51 +02:00
|
|
|
default:
|
|
|
|
assert(false);
|
2014-08-19 00:19:17 +02:00
|
|
|
}
|
|
|
|
|
2015-05-11 23:51:51 +02:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status WriteBatchWithIndex::GetFromBatchAndDB(DB* db,
|
|
|
|
const ReadOptions& read_options,
|
|
|
|
const Slice& key,
|
|
|
|
std::string* value) {
|
2017-08-23 19:01:17 +02:00
|
|
|
assert(value != nullptr);
|
|
|
|
PinnableSlice pinnable_val(value);
|
|
|
|
assert(!pinnable_val.IsPinned());
|
|
|
|
auto s = GetFromBatchAndDB(db, read_options, db->DefaultColumnFamily(), key,
|
|
|
|
&pinnable_val);
|
|
|
|
if (s.ok() && pinnable_val.IsPinned()) {
|
|
|
|
value->assign(pinnable_val.data(), pinnable_val.size());
|
|
|
|
} // else value is already assigned
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status WriteBatchWithIndex::GetFromBatchAndDB(DB* db,
|
|
|
|
const ReadOptions& read_options,
|
|
|
|
const Slice& key,
|
|
|
|
PinnableSlice* pinnable_val) {
|
2015-05-11 23:51:51 +02:00
|
|
|
return GetFromBatchAndDB(db, read_options, db->DefaultColumnFamily(), key,
|
2017-08-23 19:01:17 +02:00
|
|
|
pinnable_val);
|
2015-05-11 23:51:51 +02:00
|
|
|
}
|
2014-10-15 04:46:19 +02:00
|
|
|
|
2015-05-11 23:51:51 +02:00
|
|
|
Status WriteBatchWithIndex::GetFromBatchAndDB(DB* db,
|
|
|
|
const ReadOptions& read_options,
|
|
|
|
ColumnFamilyHandle* column_family,
|
|
|
|
const Slice& key,
|
|
|
|
std::string* value) {
|
2017-08-23 19:01:17 +02:00
|
|
|
assert(value != nullptr);
|
|
|
|
PinnableSlice pinnable_val(value);
|
|
|
|
assert(!pinnable_val.IsPinned());
|
|
|
|
auto s =
|
|
|
|
GetFromBatchAndDB(db, read_options, column_family, key, &pinnable_val);
|
|
|
|
if (s.ok() && pinnable_val.IsPinned()) {
|
|
|
|
value->assign(pinnable_val.data(), pinnable_val.size());
|
|
|
|
} // else value is already assigned
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status WriteBatchWithIndex::GetFromBatchAndDB(DB* db,
|
|
|
|
const ReadOptions& read_options,
|
|
|
|
ColumnFamilyHandle* column_family,
|
|
|
|
const Slice& key,
|
|
|
|
PinnableSlice* pinnable_val) {
|
2017-09-11 17:58:52 +02:00
|
|
|
return GetFromBatchAndDB(db, read_options, column_family, key, pinnable_val,
|
|
|
|
nullptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status WriteBatchWithIndex::GetFromBatchAndDB(
|
|
|
|
DB* db, const ReadOptions& read_options, ColumnFamilyHandle* column_family,
|
|
|
|
const Slice& key, PinnableSlice* pinnable_val, ReadCallback* callback) {
|
2014-08-19 00:19:17 +02:00
|
|
|
Status s;
|
2015-05-11 23:51:51 +02:00
|
|
|
MergeContext merge_context;
|
2016-10-18 22:19:26 +02:00
|
|
|
const ImmutableDBOptions& immuable_db_options =
|
2020-04-29 22:06:27 +02:00
|
|
|
static_cast_with_check<DBImpl>(db->GetRootDB())->immutable_db_options();
|
2015-05-11 23:51:51 +02:00
|
|
|
|
2017-08-23 19:01:17 +02:00
|
|
|
// Since the lifetime of the WriteBatch is the same as that of the transaction
|
|
|
|
// we cannot pin it as otherwise the returned value will not be available
|
|
|
|
// after the transaction finishes.
|
|
|
|
std::string& batch_value = *pinnable_val->GetSelf();
|
2015-05-11 23:51:51 +02:00
|
|
|
WriteBatchWithIndexInternal::Result result =
|
|
|
|
WriteBatchWithIndexInternal::GetFromBatch(
|
2016-10-18 22:19:26 +02:00
|
|
|
immuable_db_options, this, column_family, key, &merge_context,
|
|
|
|
&rep->comparator, &batch_value, rep->overwrite_key, &s);
|
2015-05-11 23:51:51 +02:00
|
|
|
|
|
|
|
if (result == WriteBatchWithIndexInternal::Result::kFound) {
|
2017-08-23 19:01:17 +02:00
|
|
|
pinnable_val->PinSelf();
|
2015-05-11 23:51:51 +02:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
if (result == WriteBatchWithIndexInternal::Result::kDeleted) {
|
|
|
|
return Status::NotFound();
|
|
|
|
}
|
|
|
|
if (result == WriteBatchWithIndexInternal::Result::kError) {
|
|
|
|
return s;
|
|
|
|
}
|
2015-09-29 04:24:44 +02:00
|
|
|
if (result == WriteBatchWithIndexInternal::Result::kMergeInProgress &&
|
|
|
|
rep->overwrite_key == true) {
|
|
|
|
// Since we've overwritten keys, we do not know what other operations are
|
|
|
|
// in this batch for this key, so we cannot do a Merge to compute the
|
|
|
|
// result. Instead, we will simply return MergeInProgress.
|
|
|
|
return Status::MergeInProgress();
|
|
|
|
}
|
|
|
|
|
2015-05-11 23:51:51 +02:00
|
|
|
assert(result == WriteBatchWithIndexInternal::Result::kMergeInProgress ||
|
|
|
|
result == WriteBatchWithIndexInternal::Result::kNotFound);
|
|
|
|
|
|
|
|
// Did not find key in batch OR could not resolve Merges. Try DB.
|
2017-09-11 17:58:52 +02:00
|
|
|
if (!callback) {
|
|
|
|
s = db->Get(read_options, column_family, key, pinnable_val);
|
|
|
|
} else {
|
New API to get all merge operands for a Key (#5604)
Summary:
This is a new API added to db.h to allow for fetching all merge operands associated with a Key. The main motivation for this API is to support use cases where doing a full online merge is not necessary as it is performance sensitive. Example use-cases:
1. Update subset of columns and read subset of columns -
Imagine a SQL Table, a row is encoded as a K/V pair (as it is done in MyRocks). If there are many columns and users only updated one of them, we can use merge operator to reduce write amplification. While users only read one or two columns in the read query, this feature can avoid a full merging of the whole row, and save some CPU.
2. Updating very few attributes in a value which is a JSON-like document -
Updating one attribute can be done efficiently using merge operator, while reading back one attribute can be done more efficiently if we don't need to do a full merge.
----------------------------------------------------------------------------------------------------
API :
Status GetMergeOperands(
const ReadOptions& options, ColumnFamilyHandle* column_family,
const Slice& key, PinnableSlice* merge_operands,
GetMergeOperandsOptions* get_merge_operands_options,
int* number_of_operands)
Example usage :
int size = 100;
int number_of_operands = 0;
std::vector<PinnableSlice> values(size);
GetMergeOperandsOptions merge_operands_info;
db_->GetMergeOperands(ReadOptions(), db_->DefaultColumnFamily(), "k1", values.data(), merge_operands_info, &number_of_operands);
Description :
Returns all the merge operands corresponding to the key. If the number of merge operands in DB is greater than merge_operands_options.expected_max_number_of_operands no merge operands are returned and status is Incomplete. Merge operands returned are in the order of insertion.
merge_operands-> Points to an array of at-least merge_operands_options.expected_max_number_of_operands and the caller is responsible for allocating it. If the status returned is Incomplete then number_of_operands will contain the total number of merge operands found in DB for key.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5604
Test Plan:
Added unit test and perf test in db_bench that can be run using the command:
./db_bench -benchmarks=getmergeoperands --merge_operator=sortlist
Differential Revision: D16657366
Pulled By: vjnadimpalli
fbshipit-source-id: 0faadd752351745224ee12d4ae9ef3cb529951bf
2019-08-06 23:22:34 +02:00
|
|
|
DBImpl::GetImplOptions get_impl_options;
|
|
|
|
get_impl_options.column_family = column_family;
|
|
|
|
get_impl_options.value = pinnable_val;
|
|
|
|
get_impl_options.callback = callback;
|
2020-04-29 22:06:27 +02:00
|
|
|
s = static_cast_with_check<DBImpl>(db->GetRootDB())
|
New API to get all merge operands for a Key (#5604)
Summary:
This is a new API added to db.h to allow for fetching all merge operands associated with a Key. The main motivation for this API is to support use cases where doing a full online merge is not necessary as it is performance sensitive. Example use-cases:
1. Update subset of columns and read subset of columns -
Imagine a SQL Table, a row is encoded as a K/V pair (as it is done in MyRocks). If there are many columns and users only updated one of them, we can use merge operator to reduce write amplification. While users only read one or two columns in the read query, this feature can avoid a full merging of the whole row, and save some CPU.
2. Updating very few attributes in a value which is a JSON-like document -
Updating one attribute can be done efficiently using merge operator, while reading back one attribute can be done more efficiently if we don't need to do a full merge.
----------------------------------------------------------------------------------------------------
API :
Status GetMergeOperands(
const ReadOptions& options, ColumnFamilyHandle* column_family,
const Slice& key, PinnableSlice* merge_operands,
GetMergeOperandsOptions* get_merge_operands_options,
int* number_of_operands)
Example usage :
int size = 100;
int number_of_operands = 0;
std::vector<PinnableSlice> values(size);
GetMergeOperandsOptions merge_operands_info;
db_->GetMergeOperands(ReadOptions(), db_->DefaultColumnFamily(), "k1", values.data(), merge_operands_info, &number_of_operands);
Description :
Returns all the merge operands corresponding to the key. If the number of merge operands in DB is greater than merge_operands_options.expected_max_number_of_operands no merge operands are returned and status is Incomplete. Merge operands returned are in the order of insertion.
merge_operands-> Points to an array of at-least merge_operands_options.expected_max_number_of_operands and the caller is responsible for allocating it. If the status returned is Incomplete then number_of_operands will contain the total number of merge operands found in DB for key.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5604
Test Plan:
Added unit test and perf test in db_bench that can be run using the command:
./db_bench -benchmarks=getmergeoperands --merge_operator=sortlist
Differential Revision: D16657366
Pulled By: vjnadimpalli
fbshipit-source-id: 0faadd752351745224ee12d4ae9ef3cb529951bf
2019-08-06 23:22:34 +02:00
|
|
|
->GetImpl(read_options, key, get_impl_options);
|
2017-09-11 17:58:52 +02:00
|
|
|
}
|
2015-05-11 23:51:51 +02:00
|
|
|
|
2015-12-10 17:54:48 +01:00
|
|
|
if (s.ok() || s.IsNotFound()) { // DB Get Succeeded
|
2015-05-11 23:51:51 +02:00
|
|
|
if (result == WriteBatchWithIndexInternal::Result::kMergeInProgress) {
|
|
|
|
// Merge result from DB with merges in Batch
|
2020-07-03 04:24:25 +02:00
|
|
|
auto cfh = static_cast_with_check<ColumnFamilyHandleImpl>(column_family);
|
2015-05-11 23:51:51 +02:00
|
|
|
const MergeOperator* merge_operator =
|
|
|
|
cfh->cfd()->ioptions()->merge_operator;
|
2016-10-18 22:19:26 +02:00
|
|
|
Statistics* statistics = immuable_db_options.statistics.get();
|
|
|
|
Env* env = immuable_db_options.env;
|
|
|
|
Logger* logger = immuable_db_options.info_log.get();
|
2015-05-11 23:51:51 +02:00
|
|
|
|
|
|
|
Slice* merge_data;
|
|
|
|
if (s.ok()) {
|
2017-08-23 19:01:17 +02:00
|
|
|
merge_data = pinnable_val;
|
2015-05-11 23:51:51 +02:00
|
|
|
} else { // Key not present in db (s.IsNotFound())
|
|
|
|
merge_data = nullptr;
|
|
|
|
}
|
2014-08-19 00:19:17 +02:00
|
|
|
|
2016-06-14 01:17:26 +02:00
|
|
|
if (merge_operator) {
|
Fix WriteBatchWithIndex with MergeOperator bug (#5577)
Summary:
```
TEST_F(WriteBatchWithIndexTest, TestGetFromBatchAndDBMerge3) {
DB* db;
Options options;
options.create_if_missing = true;
std::string dbname = test::PerThreadDBPath("write_batch_with_index_test");
options.merge_operator = MergeOperators::CreateFromStringId("stringappend");
DestroyDB(dbname, options);
Status s = DB::Open(options, dbname, &db);
assert(s.ok());
ReadOptions read_options;
WriteOptions write_options;
FlushOptions flush_options;
std::string value;
WriteBatchWithIndex batch;
ASSERT_OK(db->Put(write_options, "A", "1"));
ASSERT_OK(db->Flush(flush_options, db->DefaultColumnFamily()));
ASSERT_OK(batch.Merge("A", "2"));
ASSERT_OK(batch.GetFromBatchAndDB(db, read_options, "A", &value));
ASSERT_EQ(value, "1,2");
delete db;
DestroyDB(dbname, options);
}
```
Fix ASSERT in batch.GetFromBatchAndDB()
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5577
Differential Revision: D16379847
fbshipit-source-id: b1320e24ec8e71350c525083cc0a16180a63f752
2019-09-06 02:50:45 +02:00
|
|
|
std::string merge_result;
|
2019-09-20 21:00:55 +02:00
|
|
|
s = MergeHelper::TimedFullMerge(merge_operator, key, merge_data,
|
|
|
|
merge_context.GetOperands(),
|
|
|
|
&merge_result, logger, statistics, env);
|
Fix WriteBatchWithIndex with MergeOperator bug (#5577)
Summary:
```
TEST_F(WriteBatchWithIndexTest, TestGetFromBatchAndDBMerge3) {
DB* db;
Options options;
options.create_if_missing = true;
std::string dbname = test::PerThreadDBPath("write_batch_with_index_test");
options.merge_operator = MergeOperators::CreateFromStringId("stringappend");
DestroyDB(dbname, options);
Status s = DB::Open(options, dbname, &db);
assert(s.ok());
ReadOptions read_options;
WriteOptions write_options;
FlushOptions flush_options;
std::string value;
WriteBatchWithIndex batch;
ASSERT_OK(db->Put(write_options, "A", "1"));
ASSERT_OK(db->Flush(flush_options, db->DefaultColumnFamily()));
ASSERT_OK(batch.Merge("A", "2"));
ASSERT_OK(batch.GetFromBatchAndDB(db, read_options, "A", &value));
ASSERT_EQ(value, "1,2");
delete db;
DestroyDB(dbname, options);
}
```
Fix ASSERT in batch.GetFromBatchAndDB()
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5577
Differential Revision: D16379847
fbshipit-source-id: b1320e24ec8e71350c525083cc0a16180a63f752
2019-09-06 02:50:45 +02:00
|
|
|
pinnable_val->Reset();
|
|
|
|
*pinnable_val->GetSelf() = std::move(merge_result);
|
2017-08-23 19:01:17 +02:00
|
|
|
pinnable_val->PinSelf();
|
2016-06-14 01:17:26 +02:00
|
|
|
} else {
|
|
|
|
s = Status::InvalidArgument("Options::merge_operator must be set");
|
|
|
|
}
|
2015-05-11 23:51:51 +02:00
|
|
|
}
|
2014-08-19 00:19:17 +02:00
|
|
|
}
|
|
|
|
|
2015-05-11 23:51:51 +02:00
|
|
|
return s;
|
2014-10-10 22:31:28 +02:00
|
|
|
}
|
|
|
|
|
2019-04-23 23:08:24 +02:00
|
|
|
void WriteBatchWithIndex::MultiGetFromBatchAndDB(
|
|
|
|
DB* db, const ReadOptions& read_options, ColumnFamilyHandle* column_family,
|
|
|
|
const size_t num_keys, const Slice* keys, PinnableSlice* values,
|
|
|
|
Status* statuses, bool sorted_input) {
|
|
|
|
MultiGetFromBatchAndDB(db, read_options, column_family, num_keys, keys,
|
|
|
|
values, statuses, sorted_input, nullptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
void WriteBatchWithIndex::MultiGetFromBatchAndDB(
|
|
|
|
DB* db, const ReadOptions& read_options, ColumnFamilyHandle* column_family,
|
|
|
|
const size_t num_keys, const Slice* keys, PinnableSlice* values,
|
|
|
|
Status* statuses, bool sorted_input, ReadCallback* callback) {
|
|
|
|
const ImmutableDBOptions& immuable_db_options =
|
2020-04-29 22:06:27 +02:00
|
|
|
static_cast_with_check<DBImpl>(db->GetRootDB())->immutable_db_options();
|
2019-04-23 23:08:24 +02:00
|
|
|
|
|
|
|
autovector<KeyContext, MultiGetContext::MAX_BATCH_SIZE> key_context;
|
2019-11-12 22:51:18 +01:00
|
|
|
autovector<KeyContext*, MultiGetContext::MAX_BATCH_SIZE> sorted_keys;
|
2019-04-23 23:08:24 +02:00
|
|
|
// To hold merges from the write batch
|
|
|
|
autovector<std::pair<WriteBatchWithIndexInternal::Result, MergeContext>,
|
|
|
|
MultiGetContext::MAX_BATCH_SIZE>
|
|
|
|
merges;
|
|
|
|
// Since the lifetime of the WriteBatch is the same as that of the transaction
|
|
|
|
// we cannot pin it as otherwise the returned value will not be available
|
|
|
|
// after the transaction finishes.
|
|
|
|
for (size_t i = 0; i < num_keys; ++i) {
|
|
|
|
MergeContext merge_context;
|
|
|
|
PinnableSlice* pinnable_val = &values[i];
|
|
|
|
std::string& batch_value = *pinnable_val->GetSelf();
|
|
|
|
Status* s = &statuses[i];
|
|
|
|
WriteBatchWithIndexInternal::Result result =
|
|
|
|
WriteBatchWithIndexInternal::GetFromBatch(
|
|
|
|
immuable_db_options, this, column_family, keys[i], &merge_context,
|
|
|
|
&rep->comparator, &batch_value, rep->overwrite_key, s);
|
|
|
|
|
|
|
|
if (result == WriteBatchWithIndexInternal::Result::kFound) {
|
|
|
|
pinnable_val->PinSelf();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (result == WriteBatchWithIndexInternal::Result::kDeleted) {
|
|
|
|
*s = Status::NotFound();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (result == WriteBatchWithIndexInternal::Result::kError) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (result == WriteBatchWithIndexInternal::Result::kMergeInProgress &&
|
|
|
|
rep->overwrite_key == true) {
|
|
|
|
// Since we've overwritten keys, we do not know what other operations are
|
|
|
|
// in this batch for this key, so we cannot do a Merge to compute the
|
|
|
|
// result. Instead, we will simply return MergeInProgress.
|
|
|
|
*s = Status::MergeInProgress();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(result == WriteBatchWithIndexInternal::Result::kMergeInProgress ||
|
|
|
|
result == WriteBatchWithIndexInternal::Result::kNotFound);
|
2020-03-24 19:21:10 +01:00
|
|
|
key_context.emplace_back(column_family, keys[i], &values[i],
|
|
|
|
/*timestamp*/ nullptr, &statuses[i]);
|
2019-04-23 23:08:24 +02:00
|
|
|
merges.emplace_back(result, std::move(merge_context));
|
|
|
|
}
|
|
|
|
|
2019-12-17 05:35:11 +01:00
|
|
|
for (KeyContext& key : key_context) {
|
|
|
|
sorted_keys.emplace_back(&key);
|
|
|
|
}
|
|
|
|
|
2019-04-23 23:08:24 +02:00
|
|
|
// Did not find key in batch OR could not resolve Merges. Try DB.
|
2020-04-29 22:06:27 +02:00
|
|
|
static_cast_with_check<DBImpl>(db->GetRootDB())
|
2019-11-12 22:51:18 +01:00
|
|
|
->PrepareMultiGetKeys(key_context.size(), sorted_input, &sorted_keys);
|
2020-04-29 22:06:27 +02:00
|
|
|
static_cast_with_check<DBImpl>(db->GetRootDB())
|
2019-11-12 22:51:18 +01:00
|
|
|
->MultiGetWithCallback(read_options, column_family, callback,
|
|
|
|
&sorted_keys);
|
2019-04-23 23:08:24 +02:00
|
|
|
|
|
|
|
ColumnFamilyHandleImpl* cfh =
|
2020-07-03 04:24:25 +02:00
|
|
|
static_cast_with_check<ColumnFamilyHandleImpl>(column_family);
|
2019-04-23 23:08:24 +02:00
|
|
|
const MergeOperator* merge_operator = cfh->cfd()->ioptions()->merge_operator;
|
|
|
|
for (auto iter = key_context.begin(); iter != key_context.end(); ++iter) {
|
|
|
|
KeyContext& key = *iter;
|
|
|
|
if (key.s->ok() || key.s->IsNotFound()) { // DB Get Succeeded
|
|
|
|
size_t index = iter - key_context.begin();
|
|
|
|
std::pair<WriteBatchWithIndexInternal::Result, MergeContext>&
|
|
|
|
merge_result = merges[index];
|
|
|
|
if (merge_result.first ==
|
|
|
|
WriteBatchWithIndexInternal::Result::kMergeInProgress) {
|
|
|
|
// Merge result from DB with merges in Batch
|
|
|
|
Statistics* statistics = immuable_db_options.statistics.get();
|
|
|
|
Env* env = immuable_db_options.env;
|
|
|
|
Logger* logger = immuable_db_options.info_log.get();
|
|
|
|
|
|
|
|
Slice* merge_data;
|
|
|
|
if (key.s->ok()) {
|
|
|
|
merge_data = iter->value;
|
|
|
|
} else { // Key not present in db (s.IsNotFound())
|
|
|
|
merge_data = nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (merge_operator) {
|
|
|
|
*key.s = MergeHelper::TimedFullMerge(
|
|
|
|
merge_operator, *key.key, merge_data,
|
|
|
|
merge_result.second.GetOperands(), key.value->GetSelf(), logger,
|
|
|
|
statistics, env);
|
|
|
|
key.value->PinSelf();
|
|
|
|
} else {
|
|
|
|
*key.s =
|
|
|
|
Status::InvalidArgument("Options::merge_operator must be set");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-11 05:15:45 +02:00
|
|
|
void WriteBatchWithIndex::SetSavePoint() { rep->write_batch.SetSavePoint(); }
|
|
|
|
|
|
|
|
Status WriteBatchWithIndex::RollbackToSavePoint() {
|
|
|
|
Status s = rep->write_batch.RollbackToSavePoint();
|
|
|
|
|
|
|
|
if (s.ok()) {
|
2018-02-23 03:05:14 +01:00
|
|
|
rep->sub_batch_cnt = 1;
|
|
|
|
rep->last_sub_batch_offset = 0;
|
2018-02-06 03:32:54 +01:00
|
|
|
s = rep->ReBuildIndex();
|
2015-07-11 05:15:45 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2017-05-03 19:54:07 +02:00
|
|
|
Status WriteBatchWithIndex::PopSavePoint() {
|
|
|
|
return rep->write_batch.PopSavePoint();
|
|
|
|
}
|
|
|
|
|
2017-04-11 00:38:34 +02:00
|
|
|
void WriteBatchWithIndex::SetMaxBytes(size_t max_bytes) {
|
|
|
|
rep->write_batch.SetMaxBytes(max_bytes);
|
|
|
|
}
|
|
|
|
|
2018-07-24 09:09:18 +02:00
|
|
|
size_t WriteBatchWithIndex::GetDataSize() const {
|
|
|
|
return rep->write_batch.GetDataSize();
|
|
|
|
}
|
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2015-06-19 00:54:05 +02:00
|
|
|
#endif // !ROCKSDB_LITE
|