2016-02-10 00:12:00 +01:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-16 01:03:42 +02:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
|
|
|
|
#include "utilities/transactions/transaction_base.h"
|
|
|
|
|
2015-12-08 21:25:48 +01:00
|
|
|
#include "db/db_impl.h"
|
2015-08-22 00:47:21 +02:00
|
|
|
#include "db/column_family.h"
|
|
|
|
#include "rocksdb/comparator.h"
|
|
|
|
#include "rocksdb/db.h"
|
|
|
|
#include "rocksdb/status.h"
|
|
|
|
#include "util/string_util.h"
|
|
|
|
|
|
|
|
namespace rocksdb {
|
|
|
|
|
|
|
|
TransactionBaseImpl::TransactionBaseImpl(DB* db,
|
|
|
|
const WriteOptions& write_options)
|
|
|
|
: db_(db),
|
2016-04-18 20:15:50 +02:00
|
|
|
dbimpl_(reinterpret_cast<DBImpl*>(db)),
|
2015-08-22 00:47:21 +02:00
|
|
|
write_options_(write_options),
|
|
|
|
cmp_(GetColumnFamilyUserComparator(db->DefaultColumnFamily())),
|
2016-01-28 02:11:44 +01:00
|
|
|
start_time_(db_->GetEnv()->NowMicros()),
|
2017-04-11 00:38:34 +02:00
|
|
|
write_batch_(cmp_, 0, true, 0),
|
2016-04-18 20:15:50 +02:00
|
|
|
indexing_enabled_(true) {
|
|
|
|
assert(dynamic_cast<DBImpl*>(db_) != nullptr);
|
|
|
|
log_number_ = 0;
|
|
|
|
if (dbimpl_->allow_2pc()) {
|
|
|
|
WriteBatchInternal::InsertNoop(write_batch_.GetWriteBatch());
|
|
|
|
}
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
|
2016-02-03 04:19:17 +01:00
|
|
|
TransactionBaseImpl::~TransactionBaseImpl() {
|
|
|
|
// Release snapshot if snapshot is set
|
|
|
|
SetSnapshotInternal(nullptr);
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
|
2015-08-25 04:13:18 +02:00
|
|
|
void TransactionBaseImpl::Clear() {
|
|
|
|
save_points_.reset(nullptr);
|
2016-01-28 02:11:44 +01:00
|
|
|
write_batch_.Clear();
|
2016-04-18 20:15:50 +02:00
|
|
|
commit_time_batch_.Clear();
|
2015-09-12 03:10:50 +02:00
|
|
|
tracked_keys_.clear();
|
2015-08-25 04:13:18 +02:00
|
|
|
num_puts_ = 0;
|
|
|
|
num_deletes_ = 0;
|
|
|
|
num_merges_ = 0;
|
2016-04-18 20:15:50 +02:00
|
|
|
|
|
|
|
if (dbimpl_->allow_2pc()) {
|
|
|
|
WriteBatchInternal::InsertNoop(write_batch_.GetWriteBatch());
|
|
|
|
}
|
2015-08-25 04:13:18 +02:00
|
|
|
}
|
|
|
|
|
2016-03-04 00:36:26 +01:00
|
|
|
void TransactionBaseImpl::Reinitialize(DB* db,
|
|
|
|
const WriteOptions& write_options) {
|
2016-02-03 04:19:17 +01:00
|
|
|
Clear();
|
2016-03-04 00:36:26 +01:00
|
|
|
ClearSnapshot();
|
|
|
|
db_ = db;
|
2016-04-18 20:15:50 +02:00
|
|
|
name_.clear();
|
|
|
|
log_number_ = 0;
|
2016-02-03 04:19:17 +01:00
|
|
|
write_options_ = write_options;
|
|
|
|
start_time_ = db_->GetEnv()->NowMicros();
|
2016-03-04 00:36:26 +01:00
|
|
|
indexing_enabled_ = true;
|
|
|
|
cmp_ = GetColumnFamilyUserComparator(db_->DefaultColumnFamily());
|
2016-02-03 04:19:17 +01:00
|
|
|
}
|
|
|
|
|
2015-08-22 00:47:21 +02:00
|
|
|
void TransactionBaseImpl::SetSnapshot() {
|
2016-04-18 20:15:50 +02:00
|
|
|
const Snapshot* snapshot = dbimpl_->GetSnapshotForWriteConflictBoundary();
|
2016-02-03 04:19:17 +01:00
|
|
|
SetSnapshotInternal(snapshot);
|
|
|
|
}
|
|
|
|
|
|
|
|
void TransactionBaseImpl::SetSnapshotInternal(const Snapshot* snapshot) {
|
2016-01-28 02:11:44 +01:00
|
|
|
// Set a custom deleter for the snapshot_ SharedPtr as the snapshot needs to
|
|
|
|
// be released, not deleted when it is no longer referenced.
|
|
|
|
snapshot_.reset(snapshot, std::bind(&TransactionBaseImpl::ReleaseSnapshot,
|
|
|
|
this, std::placeholders::_1, db_));
|
2015-09-28 21:12:17 +02:00
|
|
|
snapshot_needed_ = false;
|
2015-12-04 19:12:27 +01:00
|
|
|
snapshot_notifier_ = nullptr;
|
2015-09-28 21:12:17 +02:00
|
|
|
}
|
|
|
|
|
2015-12-04 19:12:27 +01:00
|
|
|
void TransactionBaseImpl::SetSnapshotOnNextOperation(
|
|
|
|
std::shared_ptr<TransactionNotifier> notifier) {
|
2015-09-28 21:12:17 +02:00
|
|
|
snapshot_needed_ = true;
|
2015-12-04 19:12:27 +01:00
|
|
|
snapshot_notifier_ = notifier;
|
2015-09-28 21:12:17 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void TransactionBaseImpl::SetSnapshotIfNeeded() {
|
|
|
|
if (snapshot_needed_) {
|
2015-12-04 19:12:27 +01:00
|
|
|
std::shared_ptr<TransactionNotifier> notifier = snapshot_notifier_;
|
2015-09-28 21:12:17 +02:00
|
|
|
SetSnapshot();
|
2015-12-04 19:12:27 +01:00
|
|
|
if (notifier != nullptr) {
|
|
|
|
notifier->SnapshotCreated(GetSnapshot());
|
|
|
|
}
|
2015-09-28 21:12:17 +02:00
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::TryLock(ColumnFamilyHandle* column_family,
|
2015-09-15 02:11:52 +02:00
|
|
|
const SliceParts& key, bool read_only,
|
2016-12-06 02:18:14 +01:00
|
|
|
bool exclusive, bool untracked) {
|
2015-08-22 00:47:21 +02:00
|
|
|
size_t key_size = 0;
|
|
|
|
for (int i = 0; i < key.num_parts; ++i) {
|
|
|
|
key_size += key.parts[i].size();
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string str;
|
|
|
|
str.reserve(key_size);
|
|
|
|
|
|
|
|
for (int i = 0; i < key.num_parts; ++i) {
|
|
|
|
str.append(key.parts[i].data(), key.parts[i].size());
|
|
|
|
}
|
|
|
|
|
2016-12-06 02:18:14 +01:00
|
|
|
return TryLock(column_family, str, read_only, exclusive, untracked);
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void TransactionBaseImpl::SetSavePoint() {
|
|
|
|
if (save_points_ == nullptr) {
|
2015-08-25 04:13:18 +02:00
|
|
|
save_points_.reset(new std::stack<TransactionBaseImpl::SavePoint>());
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
2015-12-04 19:12:27 +01:00
|
|
|
save_points_->emplace(snapshot_, snapshot_needed_, snapshot_notifier_,
|
|
|
|
num_puts_, num_deletes_, num_merges_);
|
2016-01-28 02:11:44 +01:00
|
|
|
write_batch_.SetSavePoint();
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::RollbackToSavePoint() {
|
|
|
|
if (save_points_ != nullptr && save_points_->size() > 0) {
|
2015-08-25 04:13:18 +02:00
|
|
|
// Restore saved SavePoint
|
|
|
|
TransactionBaseImpl::SavePoint& save_point = save_points_->top();
|
|
|
|
snapshot_ = save_point.snapshot_;
|
2015-09-28 21:12:17 +02:00
|
|
|
snapshot_needed_ = save_point.snapshot_needed_;
|
2015-12-04 19:12:27 +01:00
|
|
|
snapshot_notifier_ = save_point.snapshot_notifier_;
|
2015-08-25 04:13:18 +02:00
|
|
|
num_puts_ = save_point.num_puts_;
|
|
|
|
num_deletes_ = save_point.num_deletes_;
|
|
|
|
num_merges_ = save_point.num_merges_;
|
|
|
|
|
2015-08-22 00:47:21 +02:00
|
|
|
// Rollback batch
|
2016-01-28 02:11:44 +01:00
|
|
|
Status s = write_batch_.RollbackToSavePoint();
|
2015-08-22 00:47:21 +02:00
|
|
|
assert(s.ok());
|
|
|
|
|
2015-09-12 03:10:50 +02:00
|
|
|
// Rollback any keys that were tracked since the last savepoint
|
2015-09-15 02:11:52 +02:00
|
|
|
const TransactionKeyMap& key_map = save_point.new_keys_;
|
|
|
|
for (const auto& key_map_iter : key_map) {
|
2015-09-12 03:10:50 +02:00
|
|
|
uint32_t column_family_id = key_map_iter.first;
|
|
|
|
auto& keys = key_map_iter.second;
|
|
|
|
|
2015-09-15 02:11:52 +02:00
|
|
|
auto& cf_tracked_keys = tracked_keys_[column_family_id];
|
|
|
|
|
|
|
|
for (const auto& key_iter : keys) {
|
2015-09-12 03:10:50 +02:00
|
|
|
const std::string& key = key_iter.first;
|
2015-09-15 02:11:52 +02:00
|
|
|
uint32_t num_reads = key_iter.second.num_reads;
|
|
|
|
uint32_t num_writes = key_iter.second.num_writes;
|
|
|
|
|
|
|
|
auto tracked_keys_iter = cf_tracked_keys.find(key);
|
|
|
|
assert(tracked_keys_iter != cf_tracked_keys.end());
|
|
|
|
|
|
|
|
// Decrement the total reads/writes of this key by the number of
|
|
|
|
// reads/writes done since the last SavePoint.
|
|
|
|
if (num_reads > 0) {
|
|
|
|
assert(tracked_keys_iter->second.num_reads >= num_reads);
|
|
|
|
tracked_keys_iter->second.num_reads -= num_reads;
|
|
|
|
}
|
|
|
|
if (num_writes > 0) {
|
|
|
|
assert(tracked_keys_iter->second.num_writes >= num_writes);
|
|
|
|
tracked_keys_iter->second.num_writes -= num_writes;
|
|
|
|
}
|
|
|
|
if (tracked_keys_iter->second.num_reads == 0 &&
|
|
|
|
tracked_keys_iter->second.num_writes == 0) {
|
|
|
|
tracked_keys_[column_family_id].erase(tracked_keys_iter);
|
|
|
|
}
|
2015-09-12 03:10:50 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
save_points_->pop();
|
|
|
|
|
2015-08-22 00:47:21 +02:00
|
|
|
return s;
|
|
|
|
} else {
|
2016-01-28 02:11:44 +01:00
|
|
|
assert(write_batch_.RollbackToSavePoint().IsNotFound());
|
2015-08-22 00:47:21 +02:00
|
|
|
return Status::NotFound();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::Get(const ReadOptions& read_options,
|
|
|
|
ColumnFamilyHandle* column_family,
|
|
|
|
const Slice& key, std::string* value) {
|
2016-01-28 02:11:44 +01:00
|
|
|
return write_batch_.GetFromBatchAndDB(db_, read_options, column_family, key,
|
|
|
|
value);
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::GetForUpdate(const ReadOptions& read_options,
|
|
|
|
ColumnFamilyHandle* column_family,
|
2016-12-06 02:18:14 +01:00
|
|
|
const Slice& key, std::string* value,
|
|
|
|
bool exclusive) {
|
|
|
|
Status s = TryLock(column_family, key, true /* read_only */, exclusive);
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
if (s.ok() && value != nullptr) {
|
|
|
|
s = Get(read_options, column_family, key, value);
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<Status> TransactionBaseImpl::MultiGet(
|
|
|
|
const ReadOptions& read_options,
|
|
|
|
const std::vector<ColumnFamilyHandle*>& column_family,
|
|
|
|
const std::vector<Slice>& keys, std::vector<std::string>* values) {
|
|
|
|
size_t num_keys = keys.size();
|
|
|
|
values->resize(num_keys);
|
|
|
|
|
|
|
|
std::vector<Status> stat_list(num_keys);
|
|
|
|
for (size_t i = 0; i < num_keys; ++i) {
|
|
|
|
std::string* value = values ? &(*values)[i] : nullptr;
|
|
|
|
stat_list[i] = Get(read_options, column_family[i], keys[i], value);
|
|
|
|
}
|
|
|
|
|
|
|
|
return stat_list;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<Status> TransactionBaseImpl::MultiGetForUpdate(
|
|
|
|
const ReadOptions& read_options,
|
|
|
|
const std::vector<ColumnFamilyHandle*>& column_family,
|
|
|
|
const std::vector<Slice>& keys, std::vector<std::string>* values) {
|
|
|
|
// Regardless of whether the MultiGet succeeded, track these keys.
|
|
|
|
size_t num_keys = keys.size();
|
|
|
|
values->resize(num_keys);
|
|
|
|
|
|
|
|
// Lock all keys
|
|
|
|
for (size_t i = 0; i < num_keys; ++i) {
|
2016-12-06 02:18:14 +01:00
|
|
|
Status s = TryLock(column_family[i], keys[i], true /* read_only */,
|
|
|
|
true /* exclusive */);
|
2015-08-22 00:47:21 +02:00
|
|
|
if (!s.ok()) {
|
|
|
|
// Fail entire multiget if we cannot lock all keys
|
|
|
|
return std::vector<Status>(num_keys, s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(agiardullo): optimize multiget?
|
|
|
|
std::vector<Status> stat_list(num_keys);
|
|
|
|
for (size_t i = 0; i < num_keys; ++i) {
|
|
|
|
std::string* value = values ? &(*values)[i] : nullptr;
|
|
|
|
stat_list[i] = Get(read_options, column_family[i], keys[i], value);
|
|
|
|
}
|
|
|
|
|
|
|
|
return stat_list;
|
|
|
|
}
|
|
|
|
|
|
|
|
Iterator* TransactionBaseImpl::GetIterator(const ReadOptions& read_options) {
|
|
|
|
Iterator* db_iter = db_->NewIterator(read_options);
|
|
|
|
assert(db_iter);
|
|
|
|
|
2016-01-28 02:11:44 +01:00
|
|
|
return write_batch_.NewIteratorWithBase(db_iter);
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Iterator* TransactionBaseImpl::GetIterator(const ReadOptions& read_options,
|
|
|
|
ColumnFamilyHandle* column_family) {
|
|
|
|
Iterator* db_iter = db_->NewIterator(read_options, column_family);
|
|
|
|
assert(db_iter);
|
|
|
|
|
2016-01-28 02:11:44 +01:00
|
|
|
return write_batch_.NewIteratorWithBase(column_family, db_iter);
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::Put(ColumnFamilyHandle* column_family,
|
|
|
|
const Slice& key, const Slice& value) {
|
2016-12-06 02:18:14 +01:00
|
|
|
Status s =
|
|
|
|
TryLock(column_family, key, false /* read_only */, true /* exclusive */);
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->Put(column_family, key, value);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_puts_++;
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::Put(ColumnFamilyHandle* column_family,
|
|
|
|
const SliceParts& key,
|
|
|
|
const SliceParts& value) {
|
2016-12-06 02:18:14 +01:00
|
|
|
Status s =
|
|
|
|
TryLock(column_family, key, false /* read_only */, true /* exclusive */);
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->Put(column_family, key, value);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_puts_++;
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::Merge(ColumnFamilyHandle* column_family,
|
|
|
|
const Slice& key, const Slice& value) {
|
2016-12-06 02:18:14 +01:00
|
|
|
Status s =
|
|
|
|
TryLock(column_family, key, false /* read_only */, true /* exclusive */);
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->Merge(column_family, key, value);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_merges_++;
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::Delete(ColumnFamilyHandle* column_family,
|
|
|
|
const Slice& key) {
|
2016-12-06 02:18:14 +01:00
|
|
|
Status s =
|
|
|
|
TryLock(column_family, key, false /* read_only */, true /* exclusive */);
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->Delete(column_family, key);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_deletes_++;
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::Delete(ColumnFamilyHandle* column_family,
|
|
|
|
const SliceParts& key) {
|
2016-12-06 02:18:14 +01:00
|
|
|
Status s =
|
|
|
|
TryLock(column_family, key, false /* read_only */, true /* exclusive */);
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->Delete(column_family, key);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_deletes_++;
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2015-09-25 03:31:32 +02:00
|
|
|
Status TransactionBaseImpl::SingleDelete(ColumnFamilyHandle* column_family,
|
|
|
|
const Slice& key) {
|
2016-12-06 02:18:14 +01:00
|
|
|
Status s =
|
|
|
|
TryLock(column_family, key, false /* read_only */, true /* exclusive */);
|
2015-09-25 03:31:32 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->SingleDelete(column_family, key);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_deletes_++;
|
|
|
|
}
|
2015-09-25 03:31:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::SingleDelete(ColumnFamilyHandle* column_family,
|
|
|
|
const SliceParts& key) {
|
2016-12-06 02:18:14 +01:00
|
|
|
Status s =
|
|
|
|
TryLock(column_family, key, false /* read_only */, true /* exclusive */);
|
2015-09-25 03:31:32 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->SingleDelete(column_family, key);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_deletes_++;
|
|
|
|
}
|
2015-09-25 03:31:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2015-08-22 00:47:21 +02:00
|
|
|
Status TransactionBaseImpl::PutUntracked(ColumnFamilyHandle* column_family,
|
|
|
|
const Slice& key, const Slice& value) {
|
2016-12-06 02:18:14 +01:00
|
|
|
Status s = TryLock(column_family, key, false /* read_only */,
|
|
|
|
true /* exclusive */, true /* untracked */);
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->Put(column_family, key, value);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_puts_++;
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::PutUntracked(ColumnFamilyHandle* column_family,
|
|
|
|
const SliceParts& key,
|
|
|
|
const SliceParts& value) {
|
2016-12-06 02:18:14 +01:00
|
|
|
Status s = TryLock(column_family, key, false /* read_only */,
|
|
|
|
true /* exclusive */, true /* untracked */);
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->Put(column_family, key, value);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_puts_++;
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::MergeUntracked(ColumnFamilyHandle* column_family,
|
|
|
|
const Slice& key,
|
|
|
|
const Slice& value) {
|
2016-12-06 02:18:14 +01:00
|
|
|
Status s = TryLock(column_family, key, false /* read_only */,
|
|
|
|
true /* exclusive */, true /* untracked */);
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->Merge(column_family, key, value);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_merges_++;
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::DeleteUntracked(ColumnFamilyHandle* column_family,
|
|
|
|
const Slice& key) {
|
2016-12-06 02:18:14 +01:00
|
|
|
Status s = TryLock(column_family, key, false /* read_only */,
|
|
|
|
true /* exclusive */, true /* untracked */);
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->Delete(column_family, key);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_deletes_++;
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::DeleteUntracked(ColumnFamilyHandle* column_family,
|
|
|
|
const SliceParts& key) {
|
2016-12-06 02:18:14 +01:00
|
|
|
Status s = TryLock(column_family, key, false /* read_only */,
|
|
|
|
true /* exclusive */, true /* untracked */);
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->Delete(column_family, key);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_deletes_++;
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
void TransactionBaseImpl::PutLogData(const Slice& blob) {
|
2016-01-28 02:11:44 +01:00
|
|
|
write_batch_.PutLogData(blob);
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
WriteBatchWithIndex* TransactionBaseImpl::GetWriteBatch() {
|
2016-01-28 02:11:44 +01:00
|
|
|
return &write_batch_;
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
2015-08-25 04:13:18 +02:00
|
|
|
uint64_t TransactionBaseImpl::GetElapsedTime() const {
|
|
|
|
return (db_->GetEnv()->NowMicros() - start_time_) / 1000;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t TransactionBaseImpl::GetNumPuts() const { return num_puts_; }
|
|
|
|
|
|
|
|
uint64_t TransactionBaseImpl::GetNumDeletes() const { return num_deletes_; }
|
|
|
|
|
|
|
|
uint64_t TransactionBaseImpl::GetNumMerges() const { return num_merges_; }
|
|
|
|
|
2015-09-12 03:10:50 +02:00
|
|
|
uint64_t TransactionBaseImpl::GetNumKeys() const {
|
|
|
|
uint64_t count = 0;
|
|
|
|
|
|
|
|
// sum up locked keys in all column families
|
|
|
|
for (const auto& key_map_iter : tracked_keys_) {
|
|
|
|
const auto& keys = key_map_iter.second;
|
|
|
|
count += keys.size();
|
|
|
|
}
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
void TransactionBaseImpl::TrackKey(uint32_t cfh_id, const std::string& key,
|
2017-04-11 00:47:20 +02:00
|
|
|
SequenceNumber seq, bool read_only,
|
|
|
|
bool exclusive) {
|
2015-09-15 02:11:52 +02:00
|
|
|
// Update map of all tracked keys for this transaction
|
2017-04-11 00:47:20 +02:00
|
|
|
TrackKey(&tracked_keys_, cfh_id, key, seq, read_only, exclusive);
|
2015-09-15 02:11:52 +02:00
|
|
|
|
|
|
|
if (save_points_ != nullptr && !save_points_->empty()) {
|
|
|
|
// Update map of tracked keys in this SavePoint
|
2017-04-11 00:47:20 +02:00
|
|
|
TrackKey(&save_points_->top().new_keys_, cfh_id, key, seq, read_only,
|
|
|
|
exclusive);
|
2015-09-15 02:11:52 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add a key to the given TransactionKeyMap
|
|
|
|
void TransactionBaseImpl::TrackKey(TransactionKeyMap* key_map, uint32_t cfh_id,
|
|
|
|
const std::string& key, SequenceNumber seq,
|
2017-04-11 00:47:20 +02:00
|
|
|
bool read_only, bool exclusive) {
|
2015-09-15 02:11:52 +02:00
|
|
|
auto& cf_key_map = (*key_map)[cfh_id];
|
|
|
|
auto iter = cf_key_map.find(key);
|
|
|
|
if (iter == cf_key_map.end()) {
|
|
|
|
auto result = cf_key_map.insert({key, TransactionKeyMapInfo(seq)});
|
|
|
|
iter = result.first;
|
|
|
|
} else if (seq < iter->second.seq) {
|
2015-09-12 03:10:50 +02:00
|
|
|
// Now tracking this key with an earlier sequence number
|
2015-09-15 02:11:52 +02:00
|
|
|
iter->second.seq = seq;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (read_only) {
|
|
|
|
iter->second.num_reads++;
|
|
|
|
} else {
|
|
|
|
iter->second.num_writes++;
|
2015-09-12 03:10:50 +02:00
|
|
|
}
|
2017-04-11 00:47:20 +02:00
|
|
|
iter->second.exclusive |= exclusive;
|
2015-09-12 03:10:50 +02:00
|
|
|
}
|
|
|
|
|
2015-09-15 02:11:52 +02:00
|
|
|
std::unique_ptr<TransactionKeyMap>
|
|
|
|
TransactionBaseImpl::GetTrackedKeysSinceSavePoint() {
|
2015-09-12 03:10:50 +02:00
|
|
|
if (save_points_ != nullptr && !save_points_->empty()) {
|
2015-09-15 02:11:52 +02:00
|
|
|
// Examine the number of reads/writes performed on all keys written
|
|
|
|
// since the last SavePoint and compare to the total number of reads/writes
|
|
|
|
// for each key.
|
|
|
|
TransactionKeyMap* result = new TransactionKeyMap();
|
|
|
|
for (const auto& key_map_iter : save_points_->top().new_keys_) {
|
|
|
|
uint32_t column_family_id = key_map_iter.first;
|
|
|
|
auto& keys = key_map_iter.second;
|
|
|
|
|
|
|
|
auto& cf_tracked_keys = tracked_keys_[column_family_id];
|
|
|
|
|
|
|
|
for (const auto& key_iter : keys) {
|
|
|
|
const std::string& key = key_iter.first;
|
|
|
|
uint32_t num_reads = key_iter.second.num_reads;
|
|
|
|
uint32_t num_writes = key_iter.second.num_writes;
|
|
|
|
|
|
|
|
auto total_key_info = cf_tracked_keys.find(key);
|
|
|
|
assert(total_key_info != cf_tracked_keys.end());
|
|
|
|
assert(total_key_info->second.num_reads >= num_reads);
|
|
|
|
assert(total_key_info->second.num_writes >= num_writes);
|
|
|
|
|
|
|
|
if (total_key_info->second.num_reads == num_reads &&
|
|
|
|
total_key_info->second.num_writes == num_writes) {
|
|
|
|
// All the reads/writes to this key were done in the last savepoint.
|
|
|
|
bool read_only = (num_writes == 0);
|
|
|
|
TrackKey(result, column_family_id, key, key_iter.second.seq,
|
2017-04-11 00:47:20 +02:00
|
|
|
read_only, key_iter.second.exclusive);
|
2015-09-15 02:11:52 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return std::unique_ptr<TransactionKeyMap>(result);
|
2015-09-12 03:10:50 +02:00
|
|
|
}
|
|
|
|
|
2015-09-15 02:11:52 +02:00
|
|
|
// No SavePoint
|
2015-09-12 03:10:50 +02:00
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2015-10-09 22:31:10 +02:00
|
|
|
// Gets the write batch that should be used for Put/Merge/Deletes.
|
|
|
|
//
|
|
|
|
// Returns either a WriteBatch or WriteBatchWithIndex depending on whether
|
|
|
|
// DisableIndexing() has been called.
|
|
|
|
WriteBatchBase* TransactionBaseImpl::GetBatchForWrite() {
|
|
|
|
if (indexing_enabled_) {
|
|
|
|
// Use WriteBatchWithIndex
|
2016-01-28 02:11:44 +01:00
|
|
|
return &write_batch_;
|
2015-10-09 22:31:10 +02:00
|
|
|
} else {
|
|
|
|
// Don't use WriteBatchWithIndex. Return base WriteBatch.
|
2016-01-28 02:11:44 +01:00
|
|
|
return write_batch_.GetWriteBatch();
|
2015-10-09 22:31:10 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-28 02:11:44 +01:00
|
|
|
void TransactionBaseImpl::ReleaseSnapshot(const Snapshot* snapshot, DB* db) {
|
2016-02-03 04:19:17 +01:00
|
|
|
if (snapshot != nullptr) {
|
|
|
|
db->ReleaseSnapshot(snapshot);
|
|
|
|
}
|
2016-01-28 02:11:44 +01:00
|
|
|
}
|
|
|
|
|
2015-09-15 02:11:52 +02:00
|
|
|
void TransactionBaseImpl::UndoGetForUpdate(ColumnFamilyHandle* column_family,
|
|
|
|
const Slice& key) {
|
|
|
|
uint32_t column_family_id = GetColumnFamilyID(column_family);
|
|
|
|
auto& cf_tracked_keys = tracked_keys_[column_family_id];
|
|
|
|
std::string key_str = key.ToString();
|
|
|
|
bool can_decrement = false;
|
2016-02-16 20:24:40 +01:00
|
|
|
bool can_unlock __attribute__((unused)) = false;
|
2015-09-15 02:11:52 +02:00
|
|
|
|
|
|
|
if (save_points_ != nullptr && !save_points_->empty()) {
|
|
|
|
// Check if this key was fetched ForUpdate in this SavePoint
|
|
|
|
auto& cf_savepoint_keys = save_points_->top().new_keys_[column_family_id];
|
|
|
|
|
|
|
|
auto savepoint_iter = cf_savepoint_keys.find(key_str);
|
|
|
|
if (savepoint_iter != cf_savepoint_keys.end()) {
|
|
|
|
if (savepoint_iter->second.num_reads > 0) {
|
|
|
|
savepoint_iter->second.num_reads--;
|
|
|
|
can_decrement = true;
|
|
|
|
|
|
|
|
if (savepoint_iter->second.num_reads == 0 &&
|
|
|
|
savepoint_iter->second.num_writes == 0) {
|
|
|
|
// No other GetForUpdates or write on this key in this SavePoint
|
|
|
|
cf_savepoint_keys.erase(savepoint_iter);
|
|
|
|
can_unlock = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// No SavePoint set
|
|
|
|
can_decrement = true;
|
|
|
|
can_unlock = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We can only decrement the read count for this key if we were able to
|
|
|
|
// decrement the read count in the current SavePoint, OR if there is no
|
|
|
|
// SavePoint set.
|
|
|
|
if (can_decrement) {
|
|
|
|
auto key_iter = cf_tracked_keys.find(key_str);
|
|
|
|
|
|
|
|
if (key_iter != cf_tracked_keys.end()) {
|
|
|
|
if (key_iter->second.num_reads > 0) {
|
|
|
|
key_iter->second.num_reads--;
|
|
|
|
|
|
|
|
if (key_iter->second.num_reads == 0 &&
|
|
|
|
key_iter->second.num_writes == 0) {
|
|
|
|
// No other GetForUpdates or writes on this key
|
|
|
|
assert(can_unlock);
|
|
|
|
cf_tracked_keys.erase(key_iter);
|
|
|
|
UnlockGetForUpdate(column_family, key);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-18 20:15:50 +02:00
|
|
|
Status TransactionBaseImpl::RebuildFromWriteBatch(WriteBatch* src_batch) {
|
|
|
|
struct IndexedWriteBatchBuilder : public WriteBatch::Handler {
|
|
|
|
Transaction* txn_;
|
|
|
|
DBImpl* db_;
|
|
|
|
IndexedWriteBatchBuilder(Transaction* txn, DBImpl* db)
|
|
|
|
: txn_(txn), db_(db) {
|
|
|
|
assert(dynamic_cast<TransactionBaseImpl*>(txn_) != nullptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status PutCF(uint32_t cf, const Slice& key, const Slice& val) override {
|
|
|
|
return txn_->Put(db_->GetColumnFamilyHandle(cf), key, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status DeleteCF(uint32_t cf, const Slice& key) override {
|
|
|
|
return txn_->Delete(db_->GetColumnFamilyHandle(cf), key);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status SingleDeleteCF(uint32_t cf, const Slice& key) override {
|
|
|
|
return txn_->SingleDelete(db_->GetColumnFamilyHandle(cf), key);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status MergeCF(uint32_t cf, const Slice& key, const Slice& val) override {
|
|
|
|
return txn_->Merge(db_->GetColumnFamilyHandle(cf), key, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
// this is used for reconstructing prepared transactions upon
|
|
|
|
// recovery. there should not be any meta markers in the batches
|
|
|
|
// we are processing.
|
|
|
|
Status MarkBeginPrepare() override { return Status::InvalidArgument(); }
|
|
|
|
|
|
|
|
Status MarkEndPrepare(const Slice&) override {
|
|
|
|
return Status::InvalidArgument();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status MarkCommit(const Slice&) override {
|
|
|
|
return Status::InvalidArgument();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status MarkRollback(const Slice&) override {
|
|
|
|
return Status::InvalidArgument();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
IndexedWriteBatchBuilder copycat(this, dbimpl_);
|
|
|
|
return src_batch->Iterate(©cat);
|
|
|
|
}
|
|
|
|
|
|
|
|
WriteBatch* TransactionBaseImpl::GetCommitTimeWriteBatch() {
|
|
|
|
return &commit_time_batch_;
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
} // namespace rocksdb
|
|
|
|
|
|
|
|
#endif // ROCKSDB_LITE
|