2016-02-10 00:12:00 +01:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-16 01:03:42 +02:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
|
|
|
|
#include "utilities/transactions/transaction_base.h"
|
|
|
|
|
2019-06-06 22:52:39 +02:00
|
|
|
#include <cinttypes>
|
2019-03-07 16:26:36 +01:00
|
|
|
|
2015-08-22 00:47:21 +02:00
|
|
|
#include "db/column_family.h"
|
2019-06-01 00:21:36 +02:00
|
|
|
#include "db/db_impl/db_impl.h"
|
2015-08-22 00:47:21 +02:00
|
|
|
#include "rocksdb/comparator.h"
|
|
|
|
#include "rocksdb/db.h"
|
|
|
|
#include "rocksdb/status.h"
|
2019-08-27 19:57:28 +02:00
|
|
|
#include "util/cast_util.h"
|
2015-08-22 00:47:21 +02:00
|
|
|
#include "util/string_util.h"
|
Replace tracked_keys with a new LockTracker interface in TransactionDB (#7013)
Summary:
We're going to support more locking protocols such as range lock in transaction.
However, in current design, `TransactionBase` has a member `tracked_keys` which assumes that point lock (lock a single key) is used, and is used in snapshot checking (isolation protocol). When using range lock, we may use read committed instead of snapshot checking as the isolation protocol.
The most significant usage scenarios of `tracked_keys` are:
1. pessimistic transaction uses it to track the locked keys, and unlock these keys when commit or rollback.
2. optimistic transaction does not lock keys upfront, it only tracks the lock intentions in tracked_keys, and do write conflict checking when commit.
3. each `SavePoint` tracks the keys that are locked since the `SavePoint`, `RollbackToSavePoint` or `PopSavePoint` relies on both the tracked keys in `SavePoint`s and `tracked_keys`.
Based on these scenarios, if we can abstract out a `LockTracker` interface to hold a set of tracked locks (can be keys or key ranges), and have methods that can be composed together to implement the scenarios, then `tracked_keys` can be an internal data structure of one implementation of `LockTracker`. See `utilities/transactions/lock/lock_tracker.h` for the detailed interface design, and `utilities/transactions/lock/point_lock_tracker.cc` for the implementation.
In the future, a `RangeLockTracker` can be implemented to track range locks without affecting other components.
After this PR, a clean interface for lock manager should be possible, and then ideally, we can have pluggable locking protocols.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7013
Test Plan: Run `transaction_test` and `optimistic_transaction_test`.
Reviewed By: ajkr
Differential Revision: D22163706
Pulled By: cheng-chang
fbshipit-source-id: f2860577b5334e31dd2994f5bc6d7c40d502b1b4
2020-08-06 21:36:48 +02:00
|
|
|
#include "utilities/transactions/lock/lock_tracker.h"
|
2015-08-22 00:47:21 +02:00
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2015-08-22 00:47:21 +02:00
|
|
|
|
2020-10-19 19:12:53 +02:00
|
|
|
TransactionBaseImpl::TransactionBaseImpl(
|
|
|
|
DB* db, const WriteOptions& write_options,
|
|
|
|
const LockTrackerFactory& lock_tracker_factory)
|
2015-08-22 00:47:21 +02:00
|
|
|
: db_(db),
|
2020-04-29 22:06:27 +02:00
|
|
|
dbimpl_(static_cast_with_check<DBImpl>(db)),
|
2015-08-22 00:47:21 +02:00
|
|
|
write_options_(write_options),
|
|
|
|
cmp_(GetColumnFamilyUserComparator(db->DefaultColumnFamily())),
|
2020-10-19 19:12:53 +02:00
|
|
|
lock_tracker_factory_(lock_tracker_factory),
|
2021-03-15 12:32:24 +01:00
|
|
|
start_time_(dbimpl_->GetSystemClock()->NowMicros()),
|
2017-04-11 00:38:34 +02:00
|
|
|
write_batch_(cmp_, 0, true, 0),
|
2020-10-19 19:12:53 +02:00
|
|
|
tracked_locks_(lock_tracker_factory_.Create()),
|
2016-04-18 20:15:50 +02:00
|
|
|
indexing_enabled_(true) {
|
|
|
|
assert(dynamic_cast<DBImpl*>(db_) != nullptr);
|
|
|
|
log_number_ = 0;
|
|
|
|
if (dbimpl_->allow_2pc()) {
|
2019-07-31 22:36:22 +02:00
|
|
|
InitWriteBatch();
|
2016-04-18 20:15:50 +02:00
|
|
|
}
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
|
2016-02-03 04:19:17 +01:00
|
|
|
TransactionBaseImpl::~TransactionBaseImpl() {
|
|
|
|
// Release snapshot if snapshot is set
|
|
|
|
SetSnapshotInternal(nullptr);
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
|
2015-08-25 04:13:18 +02:00
|
|
|
void TransactionBaseImpl::Clear() {
|
|
|
|
save_points_.reset(nullptr);
|
2016-01-28 02:11:44 +01:00
|
|
|
write_batch_.Clear();
|
2016-04-18 20:15:50 +02:00
|
|
|
commit_time_batch_.Clear();
|
Replace tracked_keys with a new LockTracker interface in TransactionDB (#7013)
Summary:
We're going to support more locking protocols such as range lock in transaction.
However, in current design, `TransactionBase` has a member `tracked_keys` which assumes that point lock (lock a single key) is used, and is used in snapshot checking (isolation protocol). When using range lock, we may use read committed instead of snapshot checking as the isolation protocol.
The most significant usage scenarios of `tracked_keys` are:
1. pessimistic transaction uses it to track the locked keys, and unlock these keys when commit or rollback.
2. optimistic transaction does not lock keys upfront, it only tracks the lock intentions in tracked_keys, and do write conflict checking when commit.
3. each `SavePoint` tracks the keys that are locked since the `SavePoint`, `RollbackToSavePoint` or `PopSavePoint` relies on both the tracked keys in `SavePoint`s and `tracked_keys`.
Based on these scenarios, if we can abstract out a `LockTracker` interface to hold a set of tracked locks (can be keys or key ranges), and have methods that can be composed together to implement the scenarios, then `tracked_keys` can be an internal data structure of one implementation of `LockTracker`. See `utilities/transactions/lock/lock_tracker.h` for the detailed interface design, and `utilities/transactions/lock/point_lock_tracker.cc` for the implementation.
In the future, a `RangeLockTracker` can be implemented to track range locks without affecting other components.
After this PR, a clean interface for lock manager should be possible, and then ideally, we can have pluggable locking protocols.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7013
Test Plan: Run `transaction_test` and `optimistic_transaction_test`.
Reviewed By: ajkr
Differential Revision: D22163706
Pulled By: cheng-chang
fbshipit-source-id: f2860577b5334e31dd2994f5bc6d7c40d502b1b4
2020-08-06 21:36:48 +02:00
|
|
|
tracked_locks_->Clear();
|
2015-08-25 04:13:18 +02:00
|
|
|
num_puts_ = 0;
|
|
|
|
num_deletes_ = 0;
|
|
|
|
num_merges_ = 0;
|
2016-04-18 20:15:50 +02:00
|
|
|
|
|
|
|
if (dbimpl_->allow_2pc()) {
|
2019-07-31 22:36:22 +02:00
|
|
|
InitWriteBatch();
|
2016-04-18 20:15:50 +02:00
|
|
|
}
|
2015-08-25 04:13:18 +02:00
|
|
|
}
|
|
|
|
|
2016-03-04 00:36:26 +01:00
|
|
|
void TransactionBaseImpl::Reinitialize(DB* db,
|
|
|
|
const WriteOptions& write_options) {
|
2016-02-03 04:19:17 +01:00
|
|
|
Clear();
|
2016-03-04 00:36:26 +01:00
|
|
|
ClearSnapshot();
|
2017-10-06 23:18:30 +02:00
|
|
|
id_ = 0;
|
2016-03-04 00:36:26 +01:00
|
|
|
db_ = db;
|
2016-04-18 20:15:50 +02:00
|
|
|
name_.clear();
|
|
|
|
log_number_ = 0;
|
2016-02-03 04:19:17 +01:00
|
|
|
write_options_ = write_options;
|
2021-03-15 12:32:24 +01:00
|
|
|
start_time_ = dbimpl_->GetSystemClock()->NowMicros();
|
2016-03-04 00:36:26 +01:00
|
|
|
indexing_enabled_ = true;
|
|
|
|
cmp_ = GetColumnFamilyUserComparator(db_->DefaultColumnFamily());
|
2016-02-03 04:19:17 +01:00
|
|
|
}
|
|
|
|
|
2015-08-22 00:47:21 +02:00
|
|
|
void TransactionBaseImpl::SetSnapshot() {
|
2016-04-18 20:15:50 +02:00
|
|
|
const Snapshot* snapshot = dbimpl_->GetSnapshotForWriteConflictBoundary();
|
2016-02-03 04:19:17 +01:00
|
|
|
SetSnapshotInternal(snapshot);
|
|
|
|
}
|
|
|
|
|
|
|
|
void TransactionBaseImpl::SetSnapshotInternal(const Snapshot* snapshot) {
|
2016-01-28 02:11:44 +01:00
|
|
|
// Set a custom deleter for the snapshot_ SharedPtr as the snapshot needs to
|
|
|
|
// be released, not deleted when it is no longer referenced.
|
|
|
|
snapshot_.reset(snapshot, std::bind(&TransactionBaseImpl::ReleaseSnapshot,
|
|
|
|
this, std::placeholders::_1, db_));
|
2015-09-28 21:12:17 +02:00
|
|
|
snapshot_needed_ = false;
|
2015-12-04 19:12:27 +01:00
|
|
|
snapshot_notifier_ = nullptr;
|
2015-09-28 21:12:17 +02:00
|
|
|
}
|
|
|
|
|
2015-12-04 19:12:27 +01:00
|
|
|
void TransactionBaseImpl::SetSnapshotOnNextOperation(
|
|
|
|
std::shared_ptr<TransactionNotifier> notifier) {
|
2015-09-28 21:12:17 +02:00
|
|
|
snapshot_needed_ = true;
|
2015-12-04 19:12:27 +01:00
|
|
|
snapshot_notifier_ = notifier;
|
2015-09-28 21:12:17 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void TransactionBaseImpl::SetSnapshotIfNeeded() {
|
|
|
|
if (snapshot_needed_) {
|
2015-12-04 19:12:27 +01:00
|
|
|
std::shared_ptr<TransactionNotifier> notifier = snapshot_notifier_;
|
2015-09-28 21:12:17 +02:00
|
|
|
SetSnapshot();
|
2015-12-04 19:12:27 +01:00
|
|
|
if (notifier != nullptr) {
|
|
|
|
notifier->SnapshotCreated(GetSnapshot());
|
|
|
|
}
|
2015-09-28 21:12:17 +02:00
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::TryLock(ColumnFamilyHandle* column_family,
|
2015-09-15 02:11:52 +02:00
|
|
|
const SliceParts& key, bool read_only,
|
2018-12-07 02:46:57 +01:00
|
|
|
bool exclusive, const bool do_validate,
|
|
|
|
const bool assume_tracked) {
|
2015-08-22 00:47:21 +02:00
|
|
|
size_t key_size = 0;
|
|
|
|
for (int i = 0; i < key.num_parts; ++i) {
|
|
|
|
key_size += key.parts[i].size();
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string str;
|
|
|
|
str.reserve(key_size);
|
|
|
|
|
|
|
|
for (int i = 0; i < key.num_parts; ++i) {
|
|
|
|
str.append(key.parts[i].data(), key.parts[i].size());
|
|
|
|
}
|
|
|
|
|
2018-12-07 02:46:57 +01:00
|
|
|
return TryLock(column_family, str, read_only, exclusive, do_validate,
|
|
|
|
assume_tracked);
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void TransactionBaseImpl::SetSavePoint() {
|
|
|
|
if (save_points_ == nullptr) {
|
refactor SavePoints (#5192)
Summary:
Savepoints are assumed to be used in a stack-wise fashion (only
the top element should be used), so they were stored by `WriteBatch`
in a member variable `save_points` using an std::stack.
Conceptually this is fine, but the implementation had a few issues:
- the `save_points_` instance variable was a plain pointer to a heap-
allocated `SavePoints` struct. The destructor of `WriteBatch` simply
deletes this pointer. However, the copy constructor of WriteBatch
just copied that pointer, meaning that copying a WriteBatch with
active savepoints will very likely have crashed before. Now a proper
copy of the savepoints is made in the copy constructor, and not just
a copy of the pointer
- `save_points_` was an std::stack, which defaults to `std::deque` for
the underlying container. A deque is a bit over the top here, as we
only need access to the most recent savepoint (i.e. stack.top()) but
never any elements at the front. std::deque is rather expensive to
initialize in common environments. For example, the STL implementation
shipped with GNU g++ will perform a heap allocation of more than 500
bytes to create an empty deque object. Although the `save_points_`
container is created lazily by RocksDB, moving from a deque to a plain
`std::vector` is much more memory-efficient. So `save_points_` is now
a vector.
- `save_points_` was changed from a plain pointer to an `std::unique_ptr`,
making ownership more explicit.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5192
Differential Revision: D15024074
Pulled By: maysamyabandeh
fbshipit-source-id: 5b128786d3789cde94e46465c9e91badd07a25d7
2019-04-20 05:30:03 +02:00
|
|
|
save_points_.reset(new std::stack<TransactionBaseImpl::SavePoint, autovector<TransactionBaseImpl::SavePoint>>());
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
2015-12-04 19:12:27 +01:00
|
|
|
save_points_->emplace(snapshot_, snapshot_needed_, snapshot_notifier_,
|
2020-10-19 19:12:53 +02:00
|
|
|
num_puts_, num_deletes_, num_merges_,
|
|
|
|
lock_tracker_factory_);
|
2016-01-28 02:11:44 +01:00
|
|
|
write_batch_.SetSavePoint();
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::RollbackToSavePoint() {
|
|
|
|
if (save_points_ != nullptr && save_points_->size() > 0) {
|
2015-08-25 04:13:18 +02:00
|
|
|
// Restore saved SavePoint
|
|
|
|
TransactionBaseImpl::SavePoint& save_point = save_points_->top();
|
|
|
|
snapshot_ = save_point.snapshot_;
|
2015-09-28 21:12:17 +02:00
|
|
|
snapshot_needed_ = save_point.snapshot_needed_;
|
2015-12-04 19:12:27 +01:00
|
|
|
snapshot_notifier_ = save_point.snapshot_notifier_;
|
2015-08-25 04:13:18 +02:00
|
|
|
num_puts_ = save_point.num_puts_;
|
|
|
|
num_deletes_ = save_point.num_deletes_;
|
|
|
|
num_merges_ = save_point.num_merges_;
|
|
|
|
|
2015-08-22 00:47:21 +02:00
|
|
|
// Rollback batch
|
2016-01-28 02:11:44 +01:00
|
|
|
Status s = write_batch_.RollbackToSavePoint();
|
2015-08-22 00:47:21 +02:00
|
|
|
assert(s.ok());
|
|
|
|
|
2015-09-12 03:10:50 +02:00
|
|
|
// Rollback any keys that were tracked since the last savepoint
|
Replace tracked_keys with a new LockTracker interface in TransactionDB (#7013)
Summary:
We're going to support more locking protocols such as range lock in transaction.
However, in current design, `TransactionBase` has a member `tracked_keys` which assumes that point lock (lock a single key) is used, and is used in snapshot checking (isolation protocol). When using range lock, we may use read committed instead of snapshot checking as the isolation protocol.
The most significant usage scenarios of `tracked_keys` are:
1. pessimistic transaction uses it to track the locked keys, and unlock these keys when commit or rollback.
2. optimistic transaction does not lock keys upfront, it only tracks the lock intentions in tracked_keys, and do write conflict checking when commit.
3. each `SavePoint` tracks the keys that are locked since the `SavePoint`, `RollbackToSavePoint` or `PopSavePoint` relies on both the tracked keys in `SavePoint`s and `tracked_keys`.
Based on these scenarios, if we can abstract out a `LockTracker` interface to hold a set of tracked locks (can be keys or key ranges), and have methods that can be composed together to implement the scenarios, then `tracked_keys` can be an internal data structure of one implementation of `LockTracker`. See `utilities/transactions/lock/lock_tracker.h` for the detailed interface design, and `utilities/transactions/lock/point_lock_tracker.cc` for the implementation.
In the future, a `RangeLockTracker` can be implemented to track range locks without affecting other components.
After this PR, a clean interface for lock manager should be possible, and then ideally, we can have pluggable locking protocols.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7013
Test Plan: Run `transaction_test` and `optimistic_transaction_test`.
Reviewed By: ajkr
Differential Revision: D22163706
Pulled By: cheng-chang
fbshipit-source-id: f2860577b5334e31dd2994f5bc6d7c40d502b1b4
2020-08-06 21:36:48 +02:00
|
|
|
tracked_locks_->Subtract(*save_point.new_locks_);
|
2015-09-12 03:10:50 +02:00
|
|
|
|
|
|
|
save_points_->pop();
|
|
|
|
|
2015-08-22 00:47:21 +02:00
|
|
|
return s;
|
|
|
|
} else {
|
2016-01-28 02:11:44 +01:00
|
|
|
assert(write_batch_.RollbackToSavePoint().IsNotFound());
|
2015-08-22 00:47:21 +02:00
|
|
|
return Status::NotFound();
|
|
|
|
}
|
|
|
|
}
|
2018-12-13 23:12:02 +01:00
|
|
|
|
2018-08-17 20:53:33 +02:00
|
|
|
Status TransactionBaseImpl::PopSavePoint() {
|
|
|
|
if (save_points_ == nullptr ||
|
|
|
|
save_points_->empty()) {
|
|
|
|
// No SavePoint yet.
|
|
|
|
assert(write_batch_.PopSavePoint().IsNotFound());
|
|
|
|
return Status::NotFound();
|
|
|
|
}
|
|
|
|
|
2018-12-13 23:12:02 +01:00
|
|
|
assert(!save_points_->empty());
|
2019-07-26 20:31:46 +02:00
|
|
|
// If there is another savepoint A below the current savepoint B, then A needs
|
|
|
|
// to inherit tracked_keys in B so that if we rollback to savepoint A, we
|
|
|
|
// remember to unlock keys in B. If there is no other savepoint below, then we
|
|
|
|
// can safely discard savepoint info.
|
|
|
|
if (save_points_->size() == 1) {
|
|
|
|
save_points_->pop();
|
|
|
|
} else {
|
2020-10-19 19:12:53 +02:00
|
|
|
TransactionBaseImpl::SavePoint top(lock_tracker_factory_);
|
2019-07-26 20:31:46 +02:00
|
|
|
std::swap(top, save_points_->top());
|
|
|
|
save_points_->pop();
|
|
|
|
|
Replace tracked_keys with a new LockTracker interface in TransactionDB (#7013)
Summary:
We're going to support more locking protocols such as range lock in transaction.
However, in current design, `TransactionBase` has a member `tracked_keys` which assumes that point lock (lock a single key) is used, and is used in snapshot checking (isolation protocol). When using range lock, we may use read committed instead of snapshot checking as the isolation protocol.
The most significant usage scenarios of `tracked_keys` are:
1. pessimistic transaction uses it to track the locked keys, and unlock these keys when commit or rollback.
2. optimistic transaction does not lock keys upfront, it only tracks the lock intentions in tracked_keys, and do write conflict checking when commit.
3. each `SavePoint` tracks the keys that are locked since the `SavePoint`, `RollbackToSavePoint` or `PopSavePoint` relies on both the tracked keys in `SavePoint`s and `tracked_keys`.
Based on these scenarios, if we can abstract out a `LockTracker` interface to hold a set of tracked locks (can be keys or key ranges), and have methods that can be composed together to implement the scenarios, then `tracked_keys` can be an internal data structure of one implementation of `LockTracker`. See `utilities/transactions/lock/lock_tracker.h` for the detailed interface design, and `utilities/transactions/lock/point_lock_tracker.cc` for the implementation.
In the future, a `RangeLockTracker` can be implemented to track range locks without affecting other components.
After this PR, a clean interface for lock manager should be possible, and then ideally, we can have pluggable locking protocols.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7013
Test Plan: Run `transaction_test` and `optimistic_transaction_test`.
Reviewed By: ajkr
Differential Revision: D22163706
Pulled By: cheng-chang
fbshipit-source-id: f2860577b5334e31dd2994f5bc6d7c40d502b1b4
2020-08-06 21:36:48 +02:00
|
|
|
save_points_->top().new_locks_->Merge(*top.new_locks_);
|
2019-07-26 20:31:46 +02:00
|
|
|
}
|
|
|
|
|
2018-08-17 20:53:33 +02:00
|
|
|
return write_batch_.PopSavePoint();
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
Status TransactionBaseImpl::Get(const ReadOptions& read_options,
|
|
|
|
ColumnFamilyHandle* column_family,
|
|
|
|
const Slice& key, std::string* value) {
|
2017-08-23 19:01:17 +02:00
|
|
|
assert(value != nullptr);
|
|
|
|
PinnableSlice pinnable_val(value);
|
|
|
|
assert(!pinnable_val.IsPinned());
|
|
|
|
auto s = Get(read_options, column_family, key, &pinnable_val);
|
|
|
|
if (s.ok() && pinnable_val.IsPinned()) {
|
|
|
|
value->assign(pinnable_val.data(), pinnable_val.size());
|
|
|
|
} // else value is already assigned
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::Get(const ReadOptions& read_options,
|
|
|
|
ColumnFamilyHandle* column_family,
|
|
|
|
const Slice& key, PinnableSlice* pinnable_val) {
|
2016-01-28 02:11:44 +01:00
|
|
|
return write_batch_.GetFromBatchAndDB(db_, read_options, column_family, key,
|
2017-08-23 19:01:17 +02:00
|
|
|
pinnable_val);
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::GetForUpdate(const ReadOptions& read_options,
|
|
|
|
ColumnFamilyHandle* column_family,
|
2016-12-06 02:18:14 +01:00
|
|
|
const Slice& key, std::string* value,
|
2018-12-07 02:46:57 +01:00
|
|
|
bool exclusive,
|
|
|
|
const bool do_validate) {
|
|
|
|
if (!do_validate && read_options.snapshot != nullptr) {
|
|
|
|
return Status::InvalidArgument(
|
|
|
|
"If do_validate is false then GetForUpdate with snapshot is not "
|
|
|
|
"defined.");
|
|
|
|
}
|
|
|
|
Status s =
|
|
|
|
TryLock(column_family, key, true /* read_only */, exclusive, do_validate);
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
if (s.ok() && value != nullptr) {
|
2017-08-23 19:01:17 +02:00
|
|
|
assert(value != nullptr);
|
|
|
|
PinnableSlice pinnable_val(value);
|
|
|
|
assert(!pinnable_val.IsPinned());
|
|
|
|
s = Get(read_options, column_family, key, &pinnable_val);
|
|
|
|
if (s.ok() && pinnable_val.IsPinned()) {
|
|
|
|
value->assign(pinnable_val.data(), pinnable_val.size());
|
|
|
|
} // else value is already assigned
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::GetForUpdate(const ReadOptions& read_options,
|
|
|
|
ColumnFamilyHandle* column_family,
|
|
|
|
const Slice& key,
|
|
|
|
PinnableSlice* pinnable_val,
|
2018-12-07 02:46:57 +01:00
|
|
|
bool exclusive,
|
|
|
|
const bool do_validate) {
|
|
|
|
if (!do_validate && read_options.snapshot != nullptr) {
|
|
|
|
return Status::InvalidArgument(
|
|
|
|
"If do_validate is false then GetForUpdate with snapshot is not "
|
|
|
|
"defined.");
|
|
|
|
}
|
|
|
|
Status s =
|
|
|
|
TryLock(column_family, key, true /* read_only */, exclusive, do_validate);
|
2017-08-23 19:01:17 +02:00
|
|
|
|
|
|
|
if (s.ok() && pinnable_val != nullptr) {
|
|
|
|
s = Get(read_options, column_family, key, pinnable_val);
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<Status> TransactionBaseImpl::MultiGet(
|
|
|
|
const ReadOptions& read_options,
|
|
|
|
const std::vector<ColumnFamilyHandle*>& column_family,
|
|
|
|
const std::vector<Slice>& keys, std::vector<std::string>* values) {
|
|
|
|
size_t num_keys = keys.size();
|
|
|
|
values->resize(num_keys);
|
|
|
|
|
|
|
|
std::vector<Status> stat_list(num_keys);
|
|
|
|
for (size_t i = 0; i < num_keys; ++i) {
|
2020-06-05 00:44:05 +02:00
|
|
|
stat_list[i] = Get(read_options, column_family[i], keys[i], &(*values)[i]);
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return stat_list;
|
|
|
|
}
|
|
|
|
|
2019-04-23 23:08:24 +02:00
|
|
|
void TransactionBaseImpl::MultiGet(const ReadOptions& read_options,
|
|
|
|
ColumnFamilyHandle* column_family,
|
|
|
|
const size_t num_keys, const Slice* keys,
|
|
|
|
PinnableSlice* values, Status* statuses,
|
2019-11-27 01:55:46 +01:00
|
|
|
const bool sorted_input) {
|
2019-04-23 23:08:24 +02:00
|
|
|
write_batch_.MultiGetFromBatchAndDB(db_, read_options, column_family,
|
|
|
|
num_keys, keys, values, statuses,
|
|
|
|
sorted_input);
|
|
|
|
}
|
|
|
|
|
2015-08-22 00:47:21 +02:00
|
|
|
std::vector<Status> TransactionBaseImpl::MultiGetForUpdate(
|
|
|
|
const ReadOptions& read_options,
|
|
|
|
const std::vector<ColumnFamilyHandle*>& column_family,
|
|
|
|
const std::vector<Slice>& keys, std::vector<std::string>* values) {
|
|
|
|
// Regardless of whether the MultiGet succeeded, track these keys.
|
|
|
|
size_t num_keys = keys.size();
|
|
|
|
values->resize(num_keys);
|
|
|
|
|
|
|
|
// Lock all keys
|
|
|
|
for (size_t i = 0; i < num_keys; ++i) {
|
2016-12-06 02:18:14 +01:00
|
|
|
Status s = TryLock(column_family[i], keys[i], true /* read_only */,
|
|
|
|
true /* exclusive */);
|
2015-08-22 00:47:21 +02:00
|
|
|
if (!s.ok()) {
|
|
|
|
// Fail entire multiget if we cannot lock all keys
|
|
|
|
return std::vector<Status>(num_keys, s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(agiardullo): optimize multiget?
|
|
|
|
std::vector<Status> stat_list(num_keys);
|
|
|
|
for (size_t i = 0; i < num_keys; ++i) {
|
2020-06-05 00:44:05 +02:00
|
|
|
stat_list[i] = Get(read_options, column_family[i], keys[i], &(*values)[i]);
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return stat_list;
|
|
|
|
}
|
|
|
|
|
|
|
|
Iterator* TransactionBaseImpl::GetIterator(const ReadOptions& read_options) {
|
|
|
|
Iterator* db_iter = db_->NewIterator(read_options);
|
|
|
|
assert(db_iter);
|
|
|
|
|
2018-12-04 08:36:32 +01:00
|
|
|
return write_batch_.NewIteratorWithBase(db_iter);
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Iterator* TransactionBaseImpl::GetIterator(const ReadOptions& read_options,
|
|
|
|
ColumnFamilyHandle* column_family) {
|
|
|
|
Iterator* db_iter = db_->NewIterator(read_options, column_family);
|
|
|
|
assert(db_iter);
|
|
|
|
|
2019-11-05 20:29:31 +01:00
|
|
|
return write_batch_.NewIteratorWithBase(column_family, db_iter,
|
|
|
|
&read_options);
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::Put(ColumnFamilyHandle* column_family,
|
2018-12-07 02:46:57 +01:00
|
|
|
const Slice& key, const Slice& value,
|
|
|
|
const bool assume_tracked) {
|
|
|
|
const bool do_validate = !assume_tracked;
|
|
|
|
Status s = TryLock(column_family, key, false /* read_only */,
|
|
|
|
true /* exclusive */, do_validate, assume_tracked);
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->Put(column_family, key, value);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_puts_++;
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::Put(ColumnFamilyHandle* column_family,
|
2018-12-07 02:46:57 +01:00
|
|
|
const SliceParts& key, const SliceParts& value,
|
|
|
|
const bool assume_tracked) {
|
|
|
|
const bool do_validate = !assume_tracked;
|
|
|
|
Status s = TryLock(column_family, key, false /* read_only */,
|
|
|
|
true /* exclusive */, do_validate, assume_tracked);
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->Put(column_family, key, value);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_puts_++;
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::Merge(ColumnFamilyHandle* column_family,
|
2018-12-07 02:46:57 +01:00
|
|
|
const Slice& key, const Slice& value,
|
|
|
|
const bool assume_tracked) {
|
|
|
|
const bool do_validate = !assume_tracked;
|
|
|
|
Status s = TryLock(column_family, key, false /* read_only */,
|
|
|
|
true /* exclusive */, do_validate, assume_tracked);
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->Merge(column_family, key, value);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_merges_++;
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::Delete(ColumnFamilyHandle* column_family,
|
2018-12-07 02:46:57 +01:00
|
|
|
const Slice& key,
|
|
|
|
const bool assume_tracked) {
|
|
|
|
const bool do_validate = !assume_tracked;
|
|
|
|
Status s = TryLock(column_family, key, false /* read_only */,
|
|
|
|
true /* exclusive */, do_validate, assume_tracked);
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->Delete(column_family, key);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_deletes_++;
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::Delete(ColumnFamilyHandle* column_family,
|
2018-12-07 02:46:57 +01:00
|
|
|
const SliceParts& key,
|
|
|
|
const bool assume_tracked) {
|
|
|
|
const bool do_validate = !assume_tracked;
|
|
|
|
Status s = TryLock(column_family, key, false /* read_only */,
|
|
|
|
true /* exclusive */, do_validate, assume_tracked);
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->Delete(column_family, key);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_deletes_++;
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2015-09-25 03:31:32 +02:00
|
|
|
Status TransactionBaseImpl::SingleDelete(ColumnFamilyHandle* column_family,
|
2018-12-07 02:46:57 +01:00
|
|
|
const Slice& key,
|
|
|
|
const bool assume_tracked) {
|
|
|
|
const bool do_validate = !assume_tracked;
|
|
|
|
Status s = TryLock(column_family, key, false /* read_only */,
|
|
|
|
true /* exclusive */, do_validate, assume_tracked);
|
2015-09-25 03:31:32 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->SingleDelete(column_family, key);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_deletes_++;
|
|
|
|
}
|
2015-09-25 03:31:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::SingleDelete(ColumnFamilyHandle* column_family,
|
2018-12-07 02:46:57 +01:00
|
|
|
const SliceParts& key,
|
|
|
|
const bool assume_tracked) {
|
|
|
|
const bool do_validate = !assume_tracked;
|
|
|
|
Status s = TryLock(column_family, key, false /* read_only */,
|
|
|
|
true /* exclusive */, do_validate, assume_tracked);
|
2015-09-25 03:31:32 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->SingleDelete(column_family, key);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_deletes_++;
|
|
|
|
}
|
2015-09-25 03:31:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2015-08-22 00:47:21 +02:00
|
|
|
Status TransactionBaseImpl::PutUntracked(ColumnFamilyHandle* column_family,
|
|
|
|
const Slice& key, const Slice& value) {
|
2016-12-06 02:18:14 +01:00
|
|
|
Status s = TryLock(column_family, key, false /* read_only */,
|
2018-12-07 02:46:57 +01:00
|
|
|
true /* exclusive */, false /* do_validate */);
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->Put(column_family, key, value);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_puts_++;
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::PutUntracked(ColumnFamilyHandle* column_family,
|
|
|
|
const SliceParts& key,
|
|
|
|
const SliceParts& value) {
|
2016-12-06 02:18:14 +01:00
|
|
|
Status s = TryLock(column_family, key, false /* read_only */,
|
2018-12-07 02:46:57 +01:00
|
|
|
true /* exclusive */, false /* do_validate */);
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->Put(column_family, key, value);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_puts_++;
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::MergeUntracked(ColumnFamilyHandle* column_family,
|
|
|
|
const Slice& key,
|
|
|
|
const Slice& value) {
|
2016-12-06 02:18:14 +01:00
|
|
|
Status s = TryLock(column_family, key, false /* read_only */,
|
2018-12-07 02:46:57 +01:00
|
|
|
true /* exclusive */, false /* do_validate */);
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->Merge(column_family, key, value);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_merges_++;
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::DeleteUntracked(ColumnFamilyHandle* column_family,
|
|
|
|
const Slice& key) {
|
2016-12-06 02:18:14 +01:00
|
|
|
Status s = TryLock(column_family, key, false /* read_only */,
|
2018-12-07 02:46:57 +01:00
|
|
|
true /* exclusive */, false /* do_validate */);
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->Delete(column_family, key);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_deletes_++;
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::DeleteUntracked(ColumnFamilyHandle* column_family,
|
|
|
|
const SliceParts& key) {
|
2016-12-06 02:18:14 +01:00
|
|
|
Status s = TryLock(column_family, key, false /* read_only */,
|
2018-12-07 02:46:57 +01:00
|
|
|
true /* exclusive */, false /* do_validate */);
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->Delete(column_family, key);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_deletes_++;
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2017-09-27 19:24:42 +02:00
|
|
|
Status TransactionBaseImpl::SingleDeleteUntracked(
|
|
|
|
ColumnFamilyHandle* column_family, const Slice& key) {
|
|
|
|
Status s = TryLock(column_family, key, false /* read_only */,
|
2018-12-07 02:46:57 +01:00
|
|
|
true /* exclusive */, false /* do_validate */);
|
2017-09-27 19:24:42 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
|
|
|
s = GetBatchForWrite()->SingleDelete(column_family, key);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_deletes_++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2015-08-22 00:47:21 +02:00
|
|
|
void TransactionBaseImpl::PutLogData(const Slice& blob) {
|
2020-10-21 23:02:00 +02:00
|
|
|
auto s = write_batch_.PutLogData(blob);
|
|
|
|
(void)s;
|
|
|
|
assert(s.ok());
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
WriteBatchWithIndex* TransactionBaseImpl::GetWriteBatch() {
|
2016-01-28 02:11:44 +01:00
|
|
|
return &write_batch_;
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
2015-08-25 04:13:18 +02:00
|
|
|
uint64_t TransactionBaseImpl::GetElapsedTime() const {
|
2021-03-15 12:32:24 +01:00
|
|
|
return (dbimpl_->GetSystemClock()->NowMicros() - start_time_) / 1000;
|
2015-08-25 04:13:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t TransactionBaseImpl::GetNumPuts() const { return num_puts_; }
|
|
|
|
|
|
|
|
uint64_t TransactionBaseImpl::GetNumDeletes() const { return num_deletes_; }
|
|
|
|
|
|
|
|
uint64_t TransactionBaseImpl::GetNumMerges() const { return num_merges_; }
|
|
|
|
|
2015-09-12 03:10:50 +02:00
|
|
|
uint64_t TransactionBaseImpl::GetNumKeys() const {
|
Replace tracked_keys with a new LockTracker interface in TransactionDB (#7013)
Summary:
We're going to support more locking protocols such as range lock in transaction.
However, in current design, `TransactionBase` has a member `tracked_keys` which assumes that point lock (lock a single key) is used, and is used in snapshot checking (isolation protocol). When using range lock, we may use read committed instead of snapshot checking as the isolation protocol.
The most significant usage scenarios of `tracked_keys` are:
1. pessimistic transaction uses it to track the locked keys, and unlock these keys when commit or rollback.
2. optimistic transaction does not lock keys upfront, it only tracks the lock intentions in tracked_keys, and do write conflict checking when commit.
3. each `SavePoint` tracks the keys that are locked since the `SavePoint`, `RollbackToSavePoint` or `PopSavePoint` relies on both the tracked keys in `SavePoint`s and `tracked_keys`.
Based on these scenarios, if we can abstract out a `LockTracker` interface to hold a set of tracked locks (can be keys or key ranges), and have methods that can be composed together to implement the scenarios, then `tracked_keys` can be an internal data structure of one implementation of `LockTracker`. See `utilities/transactions/lock/lock_tracker.h` for the detailed interface design, and `utilities/transactions/lock/point_lock_tracker.cc` for the implementation.
In the future, a `RangeLockTracker` can be implemented to track range locks without affecting other components.
After this PR, a clean interface for lock manager should be possible, and then ideally, we can have pluggable locking protocols.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7013
Test Plan: Run `transaction_test` and `optimistic_transaction_test`.
Reviewed By: ajkr
Differential Revision: D22163706
Pulled By: cheng-chang
fbshipit-source-id: f2860577b5334e31dd2994f5bc6d7c40d502b1b4
2020-08-06 21:36:48 +02:00
|
|
|
return tracked_locks_->GetNumPointLocks();
|
2015-09-12 03:10:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void TransactionBaseImpl::TrackKey(uint32_t cfh_id, const std::string& key,
|
2017-04-11 00:47:20 +02:00
|
|
|
SequenceNumber seq, bool read_only,
|
|
|
|
bool exclusive) {
|
Replace tracked_keys with a new LockTracker interface in TransactionDB (#7013)
Summary:
We're going to support more locking protocols such as range lock in transaction.
However, in current design, `TransactionBase` has a member `tracked_keys` which assumes that point lock (lock a single key) is used, and is used in snapshot checking (isolation protocol). When using range lock, we may use read committed instead of snapshot checking as the isolation protocol.
The most significant usage scenarios of `tracked_keys` are:
1. pessimistic transaction uses it to track the locked keys, and unlock these keys when commit or rollback.
2. optimistic transaction does not lock keys upfront, it only tracks the lock intentions in tracked_keys, and do write conflict checking when commit.
3. each `SavePoint` tracks the keys that are locked since the `SavePoint`, `RollbackToSavePoint` or `PopSavePoint` relies on both the tracked keys in `SavePoint`s and `tracked_keys`.
Based on these scenarios, if we can abstract out a `LockTracker` interface to hold a set of tracked locks (can be keys or key ranges), and have methods that can be composed together to implement the scenarios, then `tracked_keys` can be an internal data structure of one implementation of `LockTracker`. See `utilities/transactions/lock/lock_tracker.h` for the detailed interface design, and `utilities/transactions/lock/point_lock_tracker.cc` for the implementation.
In the future, a `RangeLockTracker` can be implemented to track range locks without affecting other components.
After this PR, a clean interface for lock manager should be possible, and then ideally, we can have pluggable locking protocols.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7013
Test Plan: Run `transaction_test` and `optimistic_transaction_test`.
Reviewed By: ajkr
Differential Revision: D22163706
Pulled By: cheng-chang
fbshipit-source-id: f2860577b5334e31dd2994f5bc6d7c40d502b1b4
2020-08-06 21:36:48 +02:00
|
|
|
PointLockRequest r;
|
|
|
|
r.column_family_id = cfh_id;
|
|
|
|
r.key = key;
|
|
|
|
r.seq = seq;
|
|
|
|
r.read_only = read_only;
|
|
|
|
r.exclusive = exclusive;
|
|
|
|
|
2015-09-15 02:11:52 +02:00
|
|
|
// Update map of all tracked keys for this transaction
|
Replace tracked_keys with a new LockTracker interface in TransactionDB (#7013)
Summary:
We're going to support more locking protocols such as range lock in transaction.
However, in current design, `TransactionBase` has a member `tracked_keys` which assumes that point lock (lock a single key) is used, and is used in snapshot checking (isolation protocol). When using range lock, we may use read committed instead of snapshot checking as the isolation protocol.
The most significant usage scenarios of `tracked_keys` are:
1. pessimistic transaction uses it to track the locked keys, and unlock these keys when commit or rollback.
2. optimistic transaction does not lock keys upfront, it only tracks the lock intentions in tracked_keys, and do write conflict checking when commit.
3. each `SavePoint` tracks the keys that are locked since the `SavePoint`, `RollbackToSavePoint` or `PopSavePoint` relies on both the tracked keys in `SavePoint`s and `tracked_keys`.
Based on these scenarios, if we can abstract out a `LockTracker` interface to hold a set of tracked locks (can be keys or key ranges), and have methods that can be composed together to implement the scenarios, then `tracked_keys` can be an internal data structure of one implementation of `LockTracker`. See `utilities/transactions/lock/lock_tracker.h` for the detailed interface design, and `utilities/transactions/lock/point_lock_tracker.cc` for the implementation.
In the future, a `RangeLockTracker` can be implemented to track range locks without affecting other components.
After this PR, a clean interface for lock manager should be possible, and then ideally, we can have pluggable locking protocols.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7013
Test Plan: Run `transaction_test` and `optimistic_transaction_test`.
Reviewed By: ajkr
Differential Revision: D22163706
Pulled By: cheng-chang
fbshipit-source-id: f2860577b5334e31dd2994f5bc6d7c40d502b1b4
2020-08-06 21:36:48 +02:00
|
|
|
tracked_locks_->Track(r);
|
2015-09-15 02:11:52 +02:00
|
|
|
|
|
|
|
if (save_points_ != nullptr && !save_points_->empty()) {
|
|
|
|
// Update map of tracked keys in this SavePoint
|
Replace tracked_keys with a new LockTracker interface in TransactionDB (#7013)
Summary:
We're going to support more locking protocols such as range lock in transaction.
However, in current design, `TransactionBase` has a member `tracked_keys` which assumes that point lock (lock a single key) is used, and is used in snapshot checking (isolation protocol). When using range lock, we may use read committed instead of snapshot checking as the isolation protocol.
The most significant usage scenarios of `tracked_keys` are:
1. pessimistic transaction uses it to track the locked keys, and unlock these keys when commit or rollback.
2. optimistic transaction does not lock keys upfront, it only tracks the lock intentions in tracked_keys, and do write conflict checking when commit.
3. each `SavePoint` tracks the keys that are locked since the `SavePoint`, `RollbackToSavePoint` or `PopSavePoint` relies on both the tracked keys in `SavePoint`s and `tracked_keys`.
Based on these scenarios, if we can abstract out a `LockTracker` interface to hold a set of tracked locks (can be keys or key ranges), and have methods that can be composed together to implement the scenarios, then `tracked_keys` can be an internal data structure of one implementation of `LockTracker`. See `utilities/transactions/lock/lock_tracker.h` for the detailed interface design, and `utilities/transactions/lock/point_lock_tracker.cc` for the implementation.
In the future, a `RangeLockTracker` can be implemented to track range locks without affecting other components.
After this PR, a clean interface for lock manager should be possible, and then ideally, we can have pluggable locking protocols.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7013
Test Plan: Run `transaction_test` and `optimistic_transaction_test`.
Reviewed By: ajkr
Differential Revision: D22163706
Pulled By: cheng-chang
fbshipit-source-id: f2860577b5334e31dd2994f5bc6d7c40d502b1b4
2020-08-06 21:36:48 +02:00
|
|
|
save_points_->top().new_locks_->Track(r);
|
2015-09-12 03:10:50 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-09 22:31:10 +02:00
|
|
|
// Gets the write batch that should be used for Put/Merge/Deletes.
|
|
|
|
//
|
|
|
|
// Returns either a WriteBatch or WriteBatchWithIndex depending on whether
|
|
|
|
// DisableIndexing() has been called.
|
|
|
|
WriteBatchBase* TransactionBaseImpl::GetBatchForWrite() {
|
|
|
|
if (indexing_enabled_) {
|
|
|
|
// Use WriteBatchWithIndex
|
2016-01-28 02:11:44 +01:00
|
|
|
return &write_batch_;
|
2015-10-09 22:31:10 +02:00
|
|
|
} else {
|
|
|
|
// Don't use WriteBatchWithIndex. Return base WriteBatch.
|
2016-01-28 02:11:44 +01:00
|
|
|
return write_batch_.GetWriteBatch();
|
2015-10-09 22:31:10 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-28 02:11:44 +01:00
|
|
|
void TransactionBaseImpl::ReleaseSnapshot(const Snapshot* snapshot, DB* db) {
|
2016-02-03 04:19:17 +01:00
|
|
|
if (snapshot != nullptr) {
|
2019-02-20 01:52:50 +01:00
|
|
|
ROCKS_LOG_DETAILS(dbimpl_->immutable_db_options().info_log,
|
|
|
|
"ReleaseSnapshot %" PRIu64 " Set",
|
|
|
|
snapshot->GetSequenceNumber());
|
2016-02-03 04:19:17 +01:00
|
|
|
db->ReleaseSnapshot(snapshot);
|
|
|
|
}
|
2016-01-28 02:11:44 +01:00
|
|
|
}
|
|
|
|
|
2015-09-15 02:11:52 +02:00
|
|
|
void TransactionBaseImpl::UndoGetForUpdate(ColumnFamilyHandle* column_family,
|
|
|
|
const Slice& key) {
|
Replace tracked_keys with a new LockTracker interface in TransactionDB (#7013)
Summary:
We're going to support more locking protocols such as range lock in transaction.
However, in current design, `TransactionBase` has a member `tracked_keys` which assumes that point lock (lock a single key) is used, and is used in snapshot checking (isolation protocol). When using range lock, we may use read committed instead of snapshot checking as the isolation protocol.
The most significant usage scenarios of `tracked_keys` are:
1. pessimistic transaction uses it to track the locked keys, and unlock these keys when commit or rollback.
2. optimistic transaction does not lock keys upfront, it only tracks the lock intentions in tracked_keys, and do write conflict checking when commit.
3. each `SavePoint` tracks the keys that are locked since the `SavePoint`, `RollbackToSavePoint` or `PopSavePoint` relies on both the tracked keys in `SavePoint`s and `tracked_keys`.
Based on these scenarios, if we can abstract out a `LockTracker` interface to hold a set of tracked locks (can be keys or key ranges), and have methods that can be composed together to implement the scenarios, then `tracked_keys` can be an internal data structure of one implementation of `LockTracker`. See `utilities/transactions/lock/lock_tracker.h` for the detailed interface design, and `utilities/transactions/lock/point_lock_tracker.cc` for the implementation.
In the future, a `RangeLockTracker` can be implemented to track range locks without affecting other components.
After this PR, a clean interface for lock manager should be possible, and then ideally, we can have pluggable locking protocols.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7013
Test Plan: Run `transaction_test` and `optimistic_transaction_test`.
Reviewed By: ajkr
Differential Revision: D22163706
Pulled By: cheng-chang
fbshipit-source-id: f2860577b5334e31dd2994f5bc6d7c40d502b1b4
2020-08-06 21:36:48 +02:00
|
|
|
PointLockRequest r;
|
|
|
|
r.column_family_id = GetColumnFamilyID(column_family);
|
|
|
|
r.key = key.ToString();
|
|
|
|
r.read_only = true;
|
2015-09-15 02:11:52 +02:00
|
|
|
|
Replace tracked_keys with a new LockTracker interface in TransactionDB (#7013)
Summary:
We're going to support more locking protocols such as range lock in transaction.
However, in current design, `TransactionBase` has a member `tracked_keys` which assumes that point lock (lock a single key) is used, and is used in snapshot checking (isolation protocol). When using range lock, we may use read committed instead of snapshot checking as the isolation protocol.
The most significant usage scenarios of `tracked_keys` are:
1. pessimistic transaction uses it to track the locked keys, and unlock these keys when commit or rollback.
2. optimistic transaction does not lock keys upfront, it only tracks the lock intentions in tracked_keys, and do write conflict checking when commit.
3. each `SavePoint` tracks the keys that are locked since the `SavePoint`, `RollbackToSavePoint` or `PopSavePoint` relies on both the tracked keys in `SavePoint`s and `tracked_keys`.
Based on these scenarios, if we can abstract out a `LockTracker` interface to hold a set of tracked locks (can be keys or key ranges), and have methods that can be composed together to implement the scenarios, then `tracked_keys` can be an internal data structure of one implementation of `LockTracker`. See `utilities/transactions/lock/lock_tracker.h` for the detailed interface design, and `utilities/transactions/lock/point_lock_tracker.cc` for the implementation.
In the future, a `RangeLockTracker` can be implemented to track range locks without affecting other components.
After this PR, a clean interface for lock manager should be possible, and then ideally, we can have pluggable locking protocols.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7013
Test Plan: Run `transaction_test` and `optimistic_transaction_test`.
Reviewed By: ajkr
Differential Revision: D22163706
Pulled By: cheng-chang
fbshipit-source-id: f2860577b5334e31dd2994f5bc6d7c40d502b1b4
2020-08-06 21:36:48 +02:00
|
|
|
bool can_untrack = false;
|
2015-09-15 02:11:52 +02:00
|
|
|
if (save_points_ != nullptr && !save_points_->empty()) {
|
Replace tracked_keys with a new LockTracker interface in TransactionDB (#7013)
Summary:
We're going to support more locking protocols such as range lock in transaction.
However, in current design, `TransactionBase` has a member `tracked_keys` which assumes that point lock (lock a single key) is used, and is used in snapshot checking (isolation protocol). When using range lock, we may use read committed instead of snapshot checking as the isolation protocol.
The most significant usage scenarios of `tracked_keys` are:
1. pessimistic transaction uses it to track the locked keys, and unlock these keys when commit or rollback.
2. optimistic transaction does not lock keys upfront, it only tracks the lock intentions in tracked_keys, and do write conflict checking when commit.
3. each `SavePoint` tracks the keys that are locked since the `SavePoint`, `RollbackToSavePoint` or `PopSavePoint` relies on both the tracked keys in `SavePoint`s and `tracked_keys`.
Based on these scenarios, if we can abstract out a `LockTracker` interface to hold a set of tracked locks (can be keys or key ranges), and have methods that can be composed together to implement the scenarios, then `tracked_keys` can be an internal data structure of one implementation of `LockTracker`. See `utilities/transactions/lock/lock_tracker.h` for the detailed interface design, and `utilities/transactions/lock/point_lock_tracker.cc` for the implementation.
In the future, a `RangeLockTracker` can be implemented to track range locks without affecting other components.
After this PR, a clean interface for lock manager should be possible, and then ideally, we can have pluggable locking protocols.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7013
Test Plan: Run `transaction_test` and `optimistic_transaction_test`.
Reviewed By: ajkr
Differential Revision: D22163706
Pulled By: cheng-chang
fbshipit-source-id: f2860577b5334e31dd2994f5bc6d7c40d502b1b4
2020-08-06 21:36:48 +02:00
|
|
|
// If there is no GetForUpdate of the key in this save point,
|
|
|
|
// then cannot untrack from the global lock tracker.
|
|
|
|
UntrackStatus s = save_points_->top().new_locks_->Untrack(r);
|
|
|
|
can_untrack = (s != UntrackStatus::NOT_TRACKED);
|
2015-09-15 02:11:52 +02:00
|
|
|
} else {
|
Replace tracked_keys with a new LockTracker interface in TransactionDB (#7013)
Summary:
We're going to support more locking protocols such as range lock in transaction.
However, in current design, `TransactionBase` has a member `tracked_keys` which assumes that point lock (lock a single key) is used, and is used in snapshot checking (isolation protocol). When using range lock, we may use read committed instead of snapshot checking as the isolation protocol.
The most significant usage scenarios of `tracked_keys` are:
1. pessimistic transaction uses it to track the locked keys, and unlock these keys when commit or rollback.
2. optimistic transaction does not lock keys upfront, it only tracks the lock intentions in tracked_keys, and do write conflict checking when commit.
3. each `SavePoint` tracks the keys that are locked since the `SavePoint`, `RollbackToSavePoint` or `PopSavePoint` relies on both the tracked keys in `SavePoint`s and `tracked_keys`.
Based on these scenarios, if we can abstract out a `LockTracker` interface to hold a set of tracked locks (can be keys or key ranges), and have methods that can be composed together to implement the scenarios, then `tracked_keys` can be an internal data structure of one implementation of `LockTracker`. See `utilities/transactions/lock/lock_tracker.h` for the detailed interface design, and `utilities/transactions/lock/point_lock_tracker.cc` for the implementation.
In the future, a `RangeLockTracker` can be implemented to track range locks without affecting other components.
After this PR, a clean interface for lock manager should be possible, and then ideally, we can have pluggable locking protocols.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7013
Test Plan: Run `transaction_test` and `optimistic_transaction_test`.
Reviewed By: ajkr
Differential Revision: D22163706
Pulled By: cheng-chang
fbshipit-source-id: f2860577b5334e31dd2994f5bc6d7c40d502b1b4
2020-08-06 21:36:48 +02:00
|
|
|
// No save point, so can untrack from the global lock tracker.
|
|
|
|
can_untrack = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (can_untrack) {
|
|
|
|
// If erased from the global tracker, then can unlock the key.
|
|
|
|
UntrackStatus s = tracked_locks_->Untrack(r);
|
|
|
|
bool can_unlock = (s == UntrackStatus::REMOVED);
|
|
|
|
if (can_unlock) {
|
|
|
|
UnlockGetForUpdate(column_family, key);
|
2015-09-15 02:11:52 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-18 20:15:50 +02:00
|
|
|
Status TransactionBaseImpl::RebuildFromWriteBatch(WriteBatch* src_batch) {
|
|
|
|
struct IndexedWriteBatchBuilder : public WriteBatch::Handler {
|
|
|
|
Transaction* txn_;
|
|
|
|
DBImpl* db_;
|
|
|
|
IndexedWriteBatchBuilder(Transaction* txn, DBImpl* db)
|
|
|
|
: txn_(txn), db_(db) {
|
|
|
|
assert(dynamic_cast<TransactionBaseImpl*>(txn_) != nullptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status PutCF(uint32_t cf, const Slice& key, const Slice& val) override {
|
|
|
|
return txn_->Put(db_->GetColumnFamilyHandle(cf), key, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status DeleteCF(uint32_t cf, const Slice& key) override {
|
|
|
|
return txn_->Delete(db_->GetColumnFamilyHandle(cf), key);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status SingleDeleteCF(uint32_t cf, const Slice& key) override {
|
|
|
|
return txn_->SingleDelete(db_->GetColumnFamilyHandle(cf), key);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status MergeCF(uint32_t cf, const Slice& key, const Slice& val) override {
|
|
|
|
return txn_->Merge(db_->GetColumnFamilyHandle(cf), key, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
// this is used for reconstructing prepared transactions upon
|
|
|
|
// recovery. there should not be any meta markers in the batches
|
|
|
|
// we are processing.
|
2018-07-07 02:17:36 +02:00
|
|
|
Status MarkBeginPrepare(bool) override { return Status::InvalidArgument(); }
|
2016-04-18 20:15:50 +02:00
|
|
|
|
|
|
|
Status MarkEndPrepare(const Slice&) override {
|
|
|
|
return Status::InvalidArgument();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status MarkCommit(const Slice&) override {
|
|
|
|
return Status::InvalidArgument();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status MarkRollback(const Slice&) override {
|
|
|
|
return Status::InvalidArgument();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
IndexedWriteBatchBuilder copycat(this, dbimpl_);
|
|
|
|
return src_batch->Iterate(©cat);
|
|
|
|
}
|
|
|
|
|
|
|
|
WriteBatch* TransactionBaseImpl::GetCommitTimeWriteBatch() {
|
|
|
|
return &commit_time_batch_;
|
|
|
|
}
|
2020-02-20 21:07:53 +01:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
#endif // ROCKSDB_LITE
|