2016-02-10 00:12:00 +01:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-16 01:03:42 +02:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
|
|
|
|
#include "utilities/transactions/transaction_base.h"
|
|
|
|
|
2019-06-06 22:52:39 +02:00
|
|
|
#include <cinttypes>
|
2019-03-07 16:26:36 +01:00
|
|
|
|
2015-08-22 00:47:21 +02:00
|
|
|
#include "db/column_family.h"
|
2019-06-01 00:21:36 +02:00
|
|
|
#include "db/db_impl/db_impl.h"
|
2015-08-22 00:47:21 +02:00
|
|
|
#include "rocksdb/comparator.h"
|
|
|
|
#include "rocksdb/db.h"
|
|
|
|
#include "rocksdb/status.h"
|
2019-08-27 19:57:28 +02:00
|
|
|
#include "util/cast_util.h"
|
2015-08-22 00:47:21 +02:00
|
|
|
#include "util/string_util.h"
|
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
TransactionBaseImpl::TransactionBaseImpl(DB* db,
|
|
|
|
const WriteOptions& write_options)
|
|
|
|
: db_(db),
|
2020-04-29 22:06:27 +02:00
|
|
|
dbimpl_(static_cast_with_check<DBImpl>(db)),
|
2015-08-22 00:47:21 +02:00
|
|
|
write_options_(write_options),
|
|
|
|
cmp_(GetColumnFamilyUserComparator(db->DefaultColumnFamily())),
|
2016-01-28 02:11:44 +01:00
|
|
|
start_time_(db_->GetEnv()->NowMicros()),
|
2017-04-11 00:38:34 +02:00
|
|
|
write_batch_(cmp_, 0, true, 0),
|
2016-04-18 20:15:50 +02:00
|
|
|
indexing_enabled_(true) {
|
|
|
|
assert(dynamic_cast<DBImpl*>(db_) != nullptr);
|
|
|
|
log_number_ = 0;
|
|
|
|
if (dbimpl_->allow_2pc()) {
|
2019-07-31 22:36:22 +02:00
|
|
|
InitWriteBatch();
|
2016-04-18 20:15:50 +02:00
|
|
|
}
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
|
2016-02-03 04:19:17 +01:00
|
|
|
TransactionBaseImpl::~TransactionBaseImpl() {
|
|
|
|
// Release snapshot if snapshot is set
|
|
|
|
SetSnapshotInternal(nullptr);
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
|
2015-08-25 04:13:18 +02:00
|
|
|
void TransactionBaseImpl::Clear() {
|
|
|
|
save_points_.reset(nullptr);
|
2016-01-28 02:11:44 +01:00
|
|
|
write_batch_.Clear();
|
2016-04-18 20:15:50 +02:00
|
|
|
commit_time_batch_.Clear();
|
2015-09-12 03:10:50 +02:00
|
|
|
tracked_keys_.clear();
|
2015-08-25 04:13:18 +02:00
|
|
|
num_puts_ = 0;
|
|
|
|
num_deletes_ = 0;
|
|
|
|
num_merges_ = 0;
|
2016-04-18 20:15:50 +02:00
|
|
|
|
|
|
|
if (dbimpl_->allow_2pc()) {
|
2019-07-31 22:36:22 +02:00
|
|
|
InitWriteBatch();
|
2016-04-18 20:15:50 +02:00
|
|
|
}
|
2015-08-25 04:13:18 +02:00
|
|
|
}
|
|
|
|
|
2016-03-04 00:36:26 +01:00
|
|
|
void TransactionBaseImpl::Reinitialize(DB* db,
|
|
|
|
const WriteOptions& write_options) {
|
2016-02-03 04:19:17 +01:00
|
|
|
Clear();
|
2016-03-04 00:36:26 +01:00
|
|
|
ClearSnapshot();
|
2017-10-06 23:18:30 +02:00
|
|
|
id_ = 0;
|
2016-03-04 00:36:26 +01:00
|
|
|
db_ = db;
|
2016-04-18 20:15:50 +02:00
|
|
|
name_.clear();
|
|
|
|
log_number_ = 0;
|
2016-02-03 04:19:17 +01:00
|
|
|
write_options_ = write_options;
|
|
|
|
start_time_ = db_->GetEnv()->NowMicros();
|
2016-03-04 00:36:26 +01:00
|
|
|
indexing_enabled_ = true;
|
|
|
|
cmp_ = GetColumnFamilyUserComparator(db_->DefaultColumnFamily());
|
2016-02-03 04:19:17 +01:00
|
|
|
}
|
|
|
|
|
2015-08-22 00:47:21 +02:00
|
|
|
void TransactionBaseImpl::SetSnapshot() {
|
2016-04-18 20:15:50 +02:00
|
|
|
const Snapshot* snapshot = dbimpl_->GetSnapshotForWriteConflictBoundary();
|
2016-02-03 04:19:17 +01:00
|
|
|
SetSnapshotInternal(snapshot);
|
|
|
|
}
|
|
|
|
|
|
|
|
void TransactionBaseImpl::SetSnapshotInternal(const Snapshot* snapshot) {
|
2016-01-28 02:11:44 +01:00
|
|
|
// Set a custom deleter for the snapshot_ SharedPtr as the snapshot needs to
|
|
|
|
// be released, not deleted when it is no longer referenced.
|
|
|
|
snapshot_.reset(snapshot, std::bind(&TransactionBaseImpl::ReleaseSnapshot,
|
|
|
|
this, std::placeholders::_1, db_));
|
2015-09-28 21:12:17 +02:00
|
|
|
snapshot_needed_ = false;
|
2015-12-04 19:12:27 +01:00
|
|
|
snapshot_notifier_ = nullptr;
|
2015-09-28 21:12:17 +02:00
|
|
|
}
|
|
|
|
|
2015-12-04 19:12:27 +01:00
|
|
|
void TransactionBaseImpl::SetSnapshotOnNextOperation(
|
|
|
|
std::shared_ptr<TransactionNotifier> notifier) {
|
2015-09-28 21:12:17 +02:00
|
|
|
snapshot_needed_ = true;
|
2015-12-04 19:12:27 +01:00
|
|
|
snapshot_notifier_ = notifier;
|
2015-09-28 21:12:17 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void TransactionBaseImpl::SetSnapshotIfNeeded() {
|
|
|
|
if (snapshot_needed_) {
|
2015-12-04 19:12:27 +01:00
|
|
|
std::shared_ptr<TransactionNotifier> notifier = snapshot_notifier_;
|
2015-09-28 21:12:17 +02:00
|
|
|
SetSnapshot();
|
2015-12-04 19:12:27 +01:00
|
|
|
if (notifier != nullptr) {
|
|
|
|
notifier->SnapshotCreated(GetSnapshot());
|
|
|
|
}
|
2015-09-28 21:12:17 +02:00
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::TryLock(ColumnFamilyHandle* column_family,
|
2015-09-15 02:11:52 +02:00
|
|
|
const SliceParts& key, bool read_only,
|
2018-12-07 02:46:57 +01:00
|
|
|
bool exclusive, const bool do_validate,
|
|
|
|
const bool assume_tracked) {
|
2015-08-22 00:47:21 +02:00
|
|
|
size_t key_size = 0;
|
|
|
|
for (int i = 0; i < key.num_parts; ++i) {
|
|
|
|
key_size += key.parts[i].size();
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string str;
|
|
|
|
str.reserve(key_size);
|
|
|
|
|
|
|
|
for (int i = 0; i < key.num_parts; ++i) {
|
|
|
|
str.append(key.parts[i].data(), key.parts[i].size());
|
|
|
|
}
|
|
|
|
|
2018-12-07 02:46:57 +01:00
|
|
|
return TryLock(column_family, str, read_only, exclusive, do_validate,
|
|
|
|
assume_tracked);
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void TransactionBaseImpl::SetSavePoint() {
|
|
|
|
if (save_points_ == nullptr) {
|
refactor SavePoints (#5192)
Summary:
Savepoints are assumed to be used in a stack-wise fashion (only
the top element should be used), so they were stored by `WriteBatch`
in a member variable `save_points` using an std::stack.
Conceptually this is fine, but the implementation had a few issues:
- the `save_points_` instance variable was a plain pointer to a heap-
allocated `SavePoints` struct. The destructor of `WriteBatch` simply
deletes this pointer. However, the copy constructor of WriteBatch
just copied that pointer, meaning that copying a WriteBatch with
active savepoints will very likely have crashed before. Now a proper
copy of the savepoints is made in the copy constructor, and not just
a copy of the pointer
- `save_points_` was an std::stack, which defaults to `std::deque` for
the underlying container. A deque is a bit over the top here, as we
only need access to the most recent savepoint (i.e. stack.top()) but
never any elements at the front. std::deque is rather expensive to
initialize in common environments. For example, the STL implementation
shipped with GNU g++ will perform a heap allocation of more than 500
bytes to create an empty deque object. Although the `save_points_`
container is created lazily by RocksDB, moving from a deque to a plain
`std::vector` is much more memory-efficient. So `save_points_` is now
a vector.
- `save_points_` was changed from a plain pointer to an `std::unique_ptr`,
making ownership more explicit.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5192
Differential Revision: D15024074
Pulled By: maysamyabandeh
fbshipit-source-id: 5b128786d3789cde94e46465c9e91badd07a25d7
2019-04-20 05:30:03 +02:00
|
|
|
save_points_.reset(new std::stack<TransactionBaseImpl::SavePoint, autovector<TransactionBaseImpl::SavePoint>>());
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
2015-12-04 19:12:27 +01:00
|
|
|
save_points_->emplace(snapshot_, snapshot_needed_, snapshot_notifier_,
|
|
|
|
num_puts_, num_deletes_, num_merges_);
|
2016-01-28 02:11:44 +01:00
|
|
|
write_batch_.SetSavePoint();
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::RollbackToSavePoint() {
|
|
|
|
if (save_points_ != nullptr && save_points_->size() > 0) {
|
2015-08-25 04:13:18 +02:00
|
|
|
// Restore saved SavePoint
|
|
|
|
TransactionBaseImpl::SavePoint& save_point = save_points_->top();
|
|
|
|
snapshot_ = save_point.snapshot_;
|
2015-09-28 21:12:17 +02:00
|
|
|
snapshot_needed_ = save_point.snapshot_needed_;
|
2015-12-04 19:12:27 +01:00
|
|
|
snapshot_notifier_ = save_point.snapshot_notifier_;
|
2015-08-25 04:13:18 +02:00
|
|
|
num_puts_ = save_point.num_puts_;
|
|
|
|
num_deletes_ = save_point.num_deletes_;
|
|
|
|
num_merges_ = save_point.num_merges_;
|
|
|
|
|
2015-08-22 00:47:21 +02:00
|
|
|
// Rollback batch
|
2016-01-28 02:11:44 +01:00
|
|
|
Status s = write_batch_.RollbackToSavePoint();
|
2015-08-22 00:47:21 +02:00
|
|
|
assert(s.ok());
|
|
|
|
|
2015-09-12 03:10:50 +02:00
|
|
|
// Rollback any keys that were tracked since the last savepoint
|
2015-09-15 02:11:52 +02:00
|
|
|
const TransactionKeyMap& key_map = save_point.new_keys_;
|
|
|
|
for (const auto& key_map_iter : key_map) {
|
2015-09-12 03:10:50 +02:00
|
|
|
uint32_t column_family_id = key_map_iter.first;
|
|
|
|
auto& keys = key_map_iter.second;
|
|
|
|
|
2015-09-15 02:11:52 +02:00
|
|
|
auto& cf_tracked_keys = tracked_keys_[column_family_id];
|
|
|
|
|
|
|
|
for (const auto& key_iter : keys) {
|
2015-09-12 03:10:50 +02:00
|
|
|
const std::string& key = key_iter.first;
|
2015-09-15 02:11:52 +02:00
|
|
|
uint32_t num_reads = key_iter.second.num_reads;
|
|
|
|
uint32_t num_writes = key_iter.second.num_writes;
|
|
|
|
|
|
|
|
auto tracked_keys_iter = cf_tracked_keys.find(key);
|
|
|
|
assert(tracked_keys_iter != cf_tracked_keys.end());
|
|
|
|
|
|
|
|
// Decrement the total reads/writes of this key by the number of
|
|
|
|
// reads/writes done since the last SavePoint.
|
|
|
|
if (num_reads > 0) {
|
|
|
|
assert(tracked_keys_iter->second.num_reads >= num_reads);
|
|
|
|
tracked_keys_iter->second.num_reads -= num_reads;
|
|
|
|
}
|
|
|
|
if (num_writes > 0) {
|
|
|
|
assert(tracked_keys_iter->second.num_writes >= num_writes);
|
|
|
|
tracked_keys_iter->second.num_writes -= num_writes;
|
|
|
|
}
|
|
|
|
if (tracked_keys_iter->second.num_reads == 0 &&
|
|
|
|
tracked_keys_iter->second.num_writes == 0) {
|
2019-10-07 21:21:39 +02:00
|
|
|
cf_tracked_keys.erase(tracked_keys_iter);
|
2015-09-15 02:11:52 +02:00
|
|
|
}
|
2015-09-12 03:10:50 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
save_points_->pop();
|
|
|
|
|
2015-08-22 00:47:21 +02:00
|
|
|
return s;
|
|
|
|
} else {
|
2016-01-28 02:11:44 +01:00
|
|
|
assert(write_batch_.RollbackToSavePoint().IsNotFound());
|
2015-08-22 00:47:21 +02:00
|
|
|
return Status::NotFound();
|
|
|
|
}
|
|
|
|
}
|
2018-12-13 23:12:02 +01:00
|
|
|
|
2018-08-17 20:53:33 +02:00
|
|
|
Status TransactionBaseImpl::PopSavePoint() {
|
|
|
|
if (save_points_ == nullptr ||
|
|
|
|
save_points_->empty()) {
|
|
|
|
// No SavePoint yet.
|
|
|
|
assert(write_batch_.PopSavePoint().IsNotFound());
|
|
|
|
return Status::NotFound();
|
|
|
|
}
|
|
|
|
|
2018-12-13 23:12:02 +01:00
|
|
|
assert(!save_points_->empty());
|
2019-07-26 20:31:46 +02:00
|
|
|
// If there is another savepoint A below the current savepoint B, then A needs
|
|
|
|
// to inherit tracked_keys in B so that if we rollback to savepoint A, we
|
|
|
|
// remember to unlock keys in B. If there is no other savepoint below, then we
|
|
|
|
// can safely discard savepoint info.
|
|
|
|
if (save_points_->size() == 1) {
|
|
|
|
save_points_->pop();
|
|
|
|
} else {
|
|
|
|
TransactionBaseImpl::SavePoint top;
|
|
|
|
std::swap(top, save_points_->top());
|
|
|
|
save_points_->pop();
|
|
|
|
|
|
|
|
const TransactionKeyMap& curr_cf_key_map = top.new_keys_;
|
|
|
|
TransactionKeyMap& prev_cf_key_map = save_points_->top().new_keys_;
|
|
|
|
|
|
|
|
for (const auto& curr_cf_key_iter : curr_cf_key_map) {
|
|
|
|
uint32_t column_family_id = curr_cf_key_iter.first;
|
|
|
|
const std::unordered_map<std::string, TransactionKeyMapInfo>& curr_keys =
|
|
|
|
curr_cf_key_iter.second;
|
|
|
|
|
|
|
|
// If cfid was not previously tracked, just copy everything over.
|
|
|
|
auto prev_keys_iter = prev_cf_key_map.find(column_family_id);
|
|
|
|
if (prev_keys_iter == prev_cf_key_map.end()) {
|
|
|
|
prev_cf_key_map.emplace(curr_cf_key_iter);
|
|
|
|
} else {
|
|
|
|
std::unordered_map<std::string, TransactionKeyMapInfo>& prev_keys =
|
|
|
|
prev_keys_iter->second;
|
|
|
|
for (const auto& key_iter : curr_keys) {
|
|
|
|
const std::string& key = key_iter.first;
|
|
|
|
const TransactionKeyMapInfo& info = key_iter.second;
|
|
|
|
// If key was not previously tracked, just copy the whole struct over.
|
|
|
|
// Otherwise, some merging needs to occur.
|
|
|
|
auto prev_info = prev_keys.find(key);
|
|
|
|
if (prev_info == prev_keys.end()) {
|
|
|
|
prev_keys.emplace(key_iter);
|
|
|
|
} else {
|
|
|
|
prev_info->second.Merge(info);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-17 20:53:33 +02:00
|
|
|
return write_batch_.PopSavePoint();
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
Status TransactionBaseImpl::Get(const ReadOptions& read_options,
|
|
|
|
ColumnFamilyHandle* column_family,
|
|
|
|
const Slice& key, std::string* value) {
|
2017-08-23 19:01:17 +02:00
|
|
|
assert(value != nullptr);
|
|
|
|
PinnableSlice pinnable_val(value);
|
|
|
|
assert(!pinnable_val.IsPinned());
|
|
|
|
auto s = Get(read_options, column_family, key, &pinnable_val);
|
|
|
|
if (s.ok() && pinnable_val.IsPinned()) {
|
|
|
|
value->assign(pinnable_val.data(), pinnable_val.size());
|
|
|
|
} // else value is already assigned
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::Get(const ReadOptions& read_options,
|
|
|
|
ColumnFamilyHandle* column_family,
|
|
|
|
const Slice& key, PinnableSlice* pinnable_val) {
|
2016-01-28 02:11:44 +01:00
|
|
|
return write_batch_.GetFromBatchAndDB(db_, read_options, column_family, key,
|
2017-08-23 19:01:17 +02:00
|
|
|
pinnable_val);
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::GetForUpdate(const ReadOptions& read_options,
|
|
|
|
ColumnFamilyHandle* column_family,
|
2016-12-06 02:18:14 +01:00
|
|
|
const Slice& key, std::string* value,
|
2018-12-07 02:46:57 +01:00
|
|
|
bool exclusive,
|
|
|
|
const bool do_validate) {
|
|
|
|
if (!do_validate && read_options.snapshot != nullptr) {
|
|
|
|
return Status::InvalidArgument(
|
|
|
|
"If do_validate is false then GetForUpdate with snapshot is not "
|
|
|
|
"defined.");
|
|
|
|
}
|
|
|
|
Status s =
|
|
|
|
TryLock(column_family, key, true /* read_only */, exclusive, do_validate);
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
if (s.ok() && value != nullptr) {
|
2017-08-23 19:01:17 +02:00
|
|
|
assert(value != nullptr);
|
|
|
|
PinnableSlice pinnable_val(value);
|
|
|
|
assert(!pinnable_val.IsPinned());
|
|
|
|
s = Get(read_options, column_family, key, &pinnable_val);
|
|
|
|
if (s.ok() && pinnable_val.IsPinned()) {
|
|
|
|
value->assign(pinnable_val.data(), pinnable_val.size());
|
|
|
|
} // else value is already assigned
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::GetForUpdate(const ReadOptions& read_options,
|
|
|
|
ColumnFamilyHandle* column_family,
|
|
|
|
const Slice& key,
|
|
|
|
PinnableSlice* pinnable_val,
|
2018-12-07 02:46:57 +01:00
|
|
|
bool exclusive,
|
|
|
|
const bool do_validate) {
|
|
|
|
if (!do_validate && read_options.snapshot != nullptr) {
|
|
|
|
return Status::InvalidArgument(
|
|
|
|
"If do_validate is false then GetForUpdate with snapshot is not "
|
|
|
|
"defined.");
|
|
|
|
}
|
|
|
|
Status s =
|
|
|
|
TryLock(column_family, key, true /* read_only */, exclusive, do_validate);
|
2017-08-23 19:01:17 +02:00
|
|
|
|
|
|
|
if (s.ok() && pinnable_val != nullptr) {
|
|
|
|
s = Get(read_options, column_family, key, pinnable_val);
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<Status> TransactionBaseImpl::MultiGet(
|
|
|
|
const ReadOptions& read_options,
|
|
|
|
const std::vector<ColumnFamilyHandle*>& column_family,
|
|
|
|
const std::vector<Slice>& keys, std::vector<std::string>* values) {
|
|
|
|
size_t num_keys = keys.size();
|
|
|
|
values->resize(num_keys);
|
|
|
|
|
|
|
|
std::vector<Status> stat_list(num_keys);
|
|
|
|
for (size_t i = 0; i < num_keys; ++i) {
|
2020-06-05 00:44:05 +02:00
|
|
|
stat_list[i] = Get(read_options, column_family[i], keys[i], &(*values)[i]);
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return stat_list;
|
|
|
|
}
|
|
|
|
|
2019-04-23 23:08:24 +02:00
|
|
|
void TransactionBaseImpl::MultiGet(const ReadOptions& read_options,
|
|
|
|
ColumnFamilyHandle* column_family,
|
|
|
|
const size_t num_keys, const Slice* keys,
|
|
|
|
PinnableSlice* values, Status* statuses,
|
2019-11-27 01:55:46 +01:00
|
|
|
const bool sorted_input) {
|
2019-04-23 23:08:24 +02:00
|
|
|
write_batch_.MultiGetFromBatchAndDB(db_, read_options, column_family,
|
|
|
|
num_keys, keys, values, statuses,
|
|
|
|
sorted_input);
|
|
|
|
}
|
|
|
|
|
2015-08-22 00:47:21 +02:00
|
|
|
std::vector<Status> TransactionBaseImpl::MultiGetForUpdate(
|
|
|
|
const ReadOptions& read_options,
|
|
|
|
const std::vector<ColumnFamilyHandle*>& column_family,
|
|
|
|
const std::vector<Slice>& keys, std::vector<std::string>* values) {
|
|
|
|
// Regardless of whether the MultiGet succeeded, track these keys.
|
|
|
|
size_t num_keys = keys.size();
|
|
|
|
values->resize(num_keys);
|
|
|
|
|
|
|
|
// Lock all keys
|
|
|
|
for (size_t i = 0; i < num_keys; ++i) {
|
2016-12-06 02:18:14 +01:00
|
|
|
Status s = TryLock(column_family[i], keys[i], true /* read_only */,
|
|
|
|
true /* exclusive */);
|
2015-08-22 00:47:21 +02:00
|
|
|
if (!s.ok()) {
|
|
|
|
// Fail entire multiget if we cannot lock all keys
|
|
|
|
return std::vector<Status>(num_keys, s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(agiardullo): optimize multiget?
|
|
|
|
std::vector<Status> stat_list(num_keys);
|
|
|
|
for (size_t i = 0; i < num_keys; ++i) {
|
2020-06-05 00:44:05 +02:00
|
|
|
stat_list[i] = Get(read_options, column_family[i], keys[i], &(*values)[i]);
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return stat_list;
|
|
|
|
}
|
|
|
|
|
|
|
|
Iterator* TransactionBaseImpl::GetIterator(const ReadOptions& read_options) {
|
|
|
|
Iterator* db_iter = db_->NewIterator(read_options);
|
|
|
|
assert(db_iter);
|
|
|
|
|
2018-12-04 08:36:32 +01:00
|
|
|
return write_batch_.NewIteratorWithBase(db_iter);
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Iterator* TransactionBaseImpl::GetIterator(const ReadOptions& read_options,
|
|
|
|
ColumnFamilyHandle* column_family) {
|
|
|
|
Iterator* db_iter = db_->NewIterator(read_options, column_family);
|
|
|
|
assert(db_iter);
|
|
|
|
|
2019-11-05 20:29:31 +01:00
|
|
|
return write_batch_.NewIteratorWithBase(column_family, db_iter,
|
|
|
|
&read_options);
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::Put(ColumnFamilyHandle* column_family,
|
2018-12-07 02:46:57 +01:00
|
|
|
const Slice& key, const Slice& value,
|
|
|
|
const bool assume_tracked) {
|
|
|
|
const bool do_validate = !assume_tracked;
|
|
|
|
Status s = TryLock(column_family, key, false /* read_only */,
|
|
|
|
true /* exclusive */, do_validate, assume_tracked);
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->Put(column_family, key, value);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_puts_++;
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::Put(ColumnFamilyHandle* column_family,
|
2018-12-07 02:46:57 +01:00
|
|
|
const SliceParts& key, const SliceParts& value,
|
|
|
|
const bool assume_tracked) {
|
|
|
|
const bool do_validate = !assume_tracked;
|
|
|
|
Status s = TryLock(column_family, key, false /* read_only */,
|
|
|
|
true /* exclusive */, do_validate, assume_tracked);
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->Put(column_family, key, value);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_puts_++;
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::Merge(ColumnFamilyHandle* column_family,
|
2018-12-07 02:46:57 +01:00
|
|
|
const Slice& key, const Slice& value,
|
|
|
|
const bool assume_tracked) {
|
|
|
|
const bool do_validate = !assume_tracked;
|
|
|
|
Status s = TryLock(column_family, key, false /* read_only */,
|
|
|
|
true /* exclusive */, do_validate, assume_tracked);
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->Merge(column_family, key, value);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_merges_++;
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::Delete(ColumnFamilyHandle* column_family,
|
2018-12-07 02:46:57 +01:00
|
|
|
const Slice& key,
|
|
|
|
const bool assume_tracked) {
|
|
|
|
const bool do_validate = !assume_tracked;
|
|
|
|
Status s = TryLock(column_family, key, false /* read_only */,
|
|
|
|
true /* exclusive */, do_validate, assume_tracked);
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->Delete(column_family, key);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_deletes_++;
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::Delete(ColumnFamilyHandle* column_family,
|
2018-12-07 02:46:57 +01:00
|
|
|
const SliceParts& key,
|
|
|
|
const bool assume_tracked) {
|
|
|
|
const bool do_validate = !assume_tracked;
|
|
|
|
Status s = TryLock(column_family, key, false /* read_only */,
|
|
|
|
true /* exclusive */, do_validate, assume_tracked);
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->Delete(column_family, key);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_deletes_++;
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2015-09-25 03:31:32 +02:00
|
|
|
Status TransactionBaseImpl::SingleDelete(ColumnFamilyHandle* column_family,
|
2018-12-07 02:46:57 +01:00
|
|
|
const Slice& key,
|
|
|
|
const bool assume_tracked) {
|
|
|
|
const bool do_validate = !assume_tracked;
|
|
|
|
Status s = TryLock(column_family, key, false /* read_only */,
|
|
|
|
true /* exclusive */, do_validate, assume_tracked);
|
2015-09-25 03:31:32 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->SingleDelete(column_family, key);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_deletes_++;
|
|
|
|
}
|
2015-09-25 03:31:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::SingleDelete(ColumnFamilyHandle* column_family,
|
2018-12-07 02:46:57 +01:00
|
|
|
const SliceParts& key,
|
|
|
|
const bool assume_tracked) {
|
|
|
|
const bool do_validate = !assume_tracked;
|
|
|
|
Status s = TryLock(column_family, key, false /* read_only */,
|
|
|
|
true /* exclusive */, do_validate, assume_tracked);
|
2015-09-25 03:31:32 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->SingleDelete(column_family, key);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_deletes_++;
|
|
|
|
}
|
2015-09-25 03:31:32 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2015-08-22 00:47:21 +02:00
|
|
|
Status TransactionBaseImpl::PutUntracked(ColumnFamilyHandle* column_family,
|
|
|
|
const Slice& key, const Slice& value) {
|
2016-12-06 02:18:14 +01:00
|
|
|
Status s = TryLock(column_family, key, false /* read_only */,
|
2018-12-07 02:46:57 +01:00
|
|
|
true /* exclusive */, false /* do_validate */);
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->Put(column_family, key, value);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_puts_++;
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::PutUntracked(ColumnFamilyHandle* column_family,
|
|
|
|
const SliceParts& key,
|
|
|
|
const SliceParts& value) {
|
2016-12-06 02:18:14 +01:00
|
|
|
Status s = TryLock(column_family, key, false /* read_only */,
|
2018-12-07 02:46:57 +01:00
|
|
|
true /* exclusive */, false /* do_validate */);
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->Put(column_family, key, value);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_puts_++;
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::MergeUntracked(ColumnFamilyHandle* column_family,
|
|
|
|
const Slice& key,
|
|
|
|
const Slice& value) {
|
2016-12-06 02:18:14 +01:00
|
|
|
Status s = TryLock(column_family, key, false /* read_only */,
|
2018-12-07 02:46:57 +01:00
|
|
|
true /* exclusive */, false /* do_validate */);
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->Merge(column_family, key, value);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_merges_++;
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::DeleteUntracked(ColumnFamilyHandle* column_family,
|
|
|
|
const Slice& key) {
|
2016-12-06 02:18:14 +01:00
|
|
|
Status s = TryLock(column_family, key, false /* read_only */,
|
2018-12-07 02:46:57 +01:00
|
|
|
true /* exclusive */, false /* do_validate */);
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->Delete(column_family, key);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_deletes_++;
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TransactionBaseImpl::DeleteUntracked(ColumnFamilyHandle* column_family,
|
|
|
|
const SliceParts& key) {
|
2016-12-06 02:18:14 +01:00
|
|
|
Status s = TryLock(column_family, key, false /* read_only */,
|
2018-12-07 02:46:57 +01:00
|
|
|
true /* exclusive */, false /* do_validate */);
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2017-04-11 00:38:34 +02:00
|
|
|
s = GetBatchForWrite()->Delete(column_family, key);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_deletes_++;
|
|
|
|
}
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2017-09-27 19:24:42 +02:00
|
|
|
Status TransactionBaseImpl::SingleDeleteUntracked(
|
|
|
|
ColumnFamilyHandle* column_family, const Slice& key) {
|
|
|
|
Status s = TryLock(column_family, key, false /* read_only */,
|
2018-12-07 02:46:57 +01:00
|
|
|
true /* exclusive */, false /* do_validate */);
|
2017-09-27 19:24:42 +02:00
|
|
|
|
|
|
|
if (s.ok()) {
|
|
|
|
s = GetBatchForWrite()->SingleDelete(column_family, key);
|
|
|
|
if (s.ok()) {
|
|
|
|
num_deletes_++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2015-08-22 00:47:21 +02:00
|
|
|
void TransactionBaseImpl::PutLogData(const Slice& blob) {
|
2016-01-28 02:11:44 +01:00
|
|
|
write_batch_.PutLogData(blob);
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
WriteBatchWithIndex* TransactionBaseImpl::GetWriteBatch() {
|
2016-01-28 02:11:44 +01:00
|
|
|
return &write_batch_;
|
2015-08-22 00:47:21 +02:00
|
|
|
}
|
|
|
|
|
2015-08-25 04:13:18 +02:00
|
|
|
uint64_t TransactionBaseImpl::GetElapsedTime() const {
|
|
|
|
return (db_->GetEnv()->NowMicros() - start_time_) / 1000;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t TransactionBaseImpl::GetNumPuts() const { return num_puts_; }
|
|
|
|
|
|
|
|
uint64_t TransactionBaseImpl::GetNumDeletes() const { return num_deletes_; }
|
|
|
|
|
|
|
|
uint64_t TransactionBaseImpl::GetNumMerges() const { return num_merges_; }
|
|
|
|
|
2015-09-12 03:10:50 +02:00
|
|
|
uint64_t TransactionBaseImpl::GetNumKeys() const {
|
|
|
|
uint64_t count = 0;
|
|
|
|
|
|
|
|
// sum up locked keys in all column families
|
|
|
|
for (const auto& key_map_iter : tracked_keys_) {
|
|
|
|
const auto& keys = key_map_iter.second;
|
|
|
|
count += keys.size();
|
|
|
|
}
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
void TransactionBaseImpl::TrackKey(uint32_t cfh_id, const std::string& key,
|
2017-04-11 00:47:20 +02:00
|
|
|
SequenceNumber seq, bool read_only,
|
|
|
|
bool exclusive) {
|
2015-09-15 02:11:52 +02:00
|
|
|
// Update map of all tracked keys for this transaction
|
2017-04-11 00:47:20 +02:00
|
|
|
TrackKey(&tracked_keys_, cfh_id, key, seq, read_only, exclusive);
|
2015-09-15 02:11:52 +02:00
|
|
|
|
|
|
|
if (save_points_ != nullptr && !save_points_->empty()) {
|
|
|
|
// Update map of tracked keys in this SavePoint
|
2017-04-11 00:47:20 +02:00
|
|
|
TrackKey(&save_points_->top().new_keys_, cfh_id, key, seq, read_only,
|
|
|
|
exclusive);
|
2015-09-15 02:11:52 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add a key to the given TransactionKeyMap
|
2017-11-11 22:08:22 +01:00
|
|
|
// seq for pessimistic transactions is the sequence number from which we know
|
|
|
|
// there has not been a concurrent update to the key.
|
2015-09-15 02:11:52 +02:00
|
|
|
void TransactionBaseImpl::TrackKey(TransactionKeyMap* key_map, uint32_t cfh_id,
|
|
|
|
const std::string& key, SequenceNumber seq,
|
2017-04-11 00:47:20 +02:00
|
|
|
bool read_only, bool exclusive) {
|
2015-09-15 02:11:52 +02:00
|
|
|
auto& cf_key_map = (*key_map)[cfh_id];
|
2019-09-05 22:57:59 +02:00
|
|
|
#ifdef __cpp_lib_unordered_map_try_emplace
|
|
|
|
// use c++17's try_emplace if available, to avoid rehashing the key
|
|
|
|
// in case it is not already in the map
|
|
|
|
auto result = cf_key_map.try_emplace(key, seq);
|
|
|
|
auto iter = result.first;
|
2019-09-20 21:00:55 +02:00
|
|
|
if (!result.second && seq < iter->second.seq) {
|
2019-09-05 22:57:59 +02:00
|
|
|
// Now tracking this key with an earlier sequence number
|
|
|
|
iter->second.seq = seq;
|
|
|
|
}
|
|
|
|
#else
|
2015-09-15 02:11:52 +02:00
|
|
|
auto iter = cf_key_map.find(key);
|
|
|
|
if (iter == cf_key_map.end()) {
|
2019-09-06 19:16:21 +02:00
|
|
|
auto result = cf_key_map.emplace(key, TransactionKeyMapInfo(seq));
|
2015-09-15 02:11:52 +02:00
|
|
|
iter = result.first;
|
|
|
|
} else if (seq < iter->second.seq) {
|
2015-09-12 03:10:50 +02:00
|
|
|
// Now tracking this key with an earlier sequence number
|
2015-09-15 02:11:52 +02:00
|
|
|
iter->second.seq = seq;
|
|
|
|
}
|
2019-09-05 22:57:59 +02:00
|
|
|
#endif
|
2017-11-11 22:08:22 +01:00
|
|
|
// else we do not update the seq. The smaller the tracked seq, the stronger it
|
|
|
|
// the guarantee since it implies from the seq onward there has not been a
|
|
|
|
// concurrent update to the key. So we update the seq if it implies stronger
|
2019-09-05 22:57:59 +02:00
|
|
|
// guarantees, i.e., if it is smaller than the existing tracked seq.
|
2015-09-15 02:11:52 +02:00
|
|
|
|
|
|
|
if (read_only) {
|
|
|
|
iter->second.num_reads++;
|
|
|
|
} else {
|
|
|
|
iter->second.num_writes++;
|
2015-09-12 03:10:50 +02:00
|
|
|
}
|
2017-04-11 00:47:20 +02:00
|
|
|
iter->second.exclusive |= exclusive;
|
2015-09-12 03:10:50 +02:00
|
|
|
}
|
|
|
|
|
2015-09-15 02:11:52 +02:00
|
|
|
std::unique_ptr<TransactionKeyMap>
|
|
|
|
TransactionBaseImpl::GetTrackedKeysSinceSavePoint() {
|
2015-09-12 03:10:50 +02:00
|
|
|
if (save_points_ != nullptr && !save_points_->empty()) {
|
2015-09-15 02:11:52 +02:00
|
|
|
// Examine the number of reads/writes performed on all keys written
|
|
|
|
// since the last SavePoint and compare to the total number of reads/writes
|
|
|
|
// for each key.
|
|
|
|
TransactionKeyMap* result = new TransactionKeyMap();
|
|
|
|
for (const auto& key_map_iter : save_points_->top().new_keys_) {
|
|
|
|
uint32_t column_family_id = key_map_iter.first;
|
|
|
|
auto& keys = key_map_iter.second;
|
|
|
|
|
|
|
|
auto& cf_tracked_keys = tracked_keys_[column_family_id];
|
|
|
|
|
|
|
|
for (const auto& key_iter : keys) {
|
|
|
|
const std::string& key = key_iter.first;
|
|
|
|
uint32_t num_reads = key_iter.second.num_reads;
|
|
|
|
uint32_t num_writes = key_iter.second.num_writes;
|
|
|
|
|
|
|
|
auto total_key_info = cf_tracked_keys.find(key);
|
|
|
|
assert(total_key_info != cf_tracked_keys.end());
|
|
|
|
assert(total_key_info->second.num_reads >= num_reads);
|
|
|
|
assert(total_key_info->second.num_writes >= num_writes);
|
|
|
|
|
|
|
|
if (total_key_info->second.num_reads == num_reads &&
|
|
|
|
total_key_info->second.num_writes == num_writes) {
|
|
|
|
// All the reads/writes to this key were done in the last savepoint.
|
|
|
|
bool read_only = (num_writes == 0);
|
|
|
|
TrackKey(result, column_family_id, key, key_iter.second.seq,
|
2017-04-11 00:47:20 +02:00
|
|
|
read_only, key_iter.second.exclusive);
|
2015-09-15 02:11:52 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return std::unique_ptr<TransactionKeyMap>(result);
|
2015-09-12 03:10:50 +02:00
|
|
|
}
|
|
|
|
|
2015-09-15 02:11:52 +02:00
|
|
|
// No SavePoint
|
2015-09-12 03:10:50 +02:00
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2015-10-09 22:31:10 +02:00
|
|
|
// Gets the write batch that should be used for Put/Merge/Deletes.
|
|
|
|
//
|
|
|
|
// Returns either a WriteBatch or WriteBatchWithIndex depending on whether
|
|
|
|
// DisableIndexing() has been called.
|
|
|
|
WriteBatchBase* TransactionBaseImpl::GetBatchForWrite() {
|
|
|
|
if (indexing_enabled_) {
|
|
|
|
// Use WriteBatchWithIndex
|
2016-01-28 02:11:44 +01:00
|
|
|
return &write_batch_;
|
2015-10-09 22:31:10 +02:00
|
|
|
} else {
|
|
|
|
// Don't use WriteBatchWithIndex. Return base WriteBatch.
|
2016-01-28 02:11:44 +01:00
|
|
|
return write_batch_.GetWriteBatch();
|
2015-10-09 22:31:10 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-28 02:11:44 +01:00
|
|
|
void TransactionBaseImpl::ReleaseSnapshot(const Snapshot* snapshot, DB* db) {
|
2016-02-03 04:19:17 +01:00
|
|
|
if (snapshot != nullptr) {
|
2019-02-20 01:52:50 +01:00
|
|
|
ROCKS_LOG_DETAILS(dbimpl_->immutable_db_options().info_log,
|
|
|
|
"ReleaseSnapshot %" PRIu64 " Set",
|
|
|
|
snapshot->GetSequenceNumber());
|
2016-02-03 04:19:17 +01:00
|
|
|
db->ReleaseSnapshot(snapshot);
|
|
|
|
}
|
2016-01-28 02:11:44 +01:00
|
|
|
}
|
|
|
|
|
2015-09-15 02:11:52 +02:00
|
|
|
void TransactionBaseImpl::UndoGetForUpdate(ColumnFamilyHandle* column_family,
|
|
|
|
const Slice& key) {
|
|
|
|
uint32_t column_family_id = GetColumnFamilyID(column_family);
|
|
|
|
auto& cf_tracked_keys = tracked_keys_[column_family_id];
|
|
|
|
std::string key_str = key.ToString();
|
|
|
|
bool can_decrement = false;
|
2018-02-02 21:14:42 +01:00
|
|
|
bool can_unlock __attribute__((__unused__)) = false;
|
2015-09-15 02:11:52 +02:00
|
|
|
|
|
|
|
if (save_points_ != nullptr && !save_points_->empty()) {
|
|
|
|
// Check if this key was fetched ForUpdate in this SavePoint
|
|
|
|
auto& cf_savepoint_keys = save_points_->top().new_keys_[column_family_id];
|
|
|
|
|
|
|
|
auto savepoint_iter = cf_savepoint_keys.find(key_str);
|
|
|
|
if (savepoint_iter != cf_savepoint_keys.end()) {
|
|
|
|
if (savepoint_iter->second.num_reads > 0) {
|
|
|
|
savepoint_iter->second.num_reads--;
|
|
|
|
can_decrement = true;
|
|
|
|
|
|
|
|
if (savepoint_iter->second.num_reads == 0 &&
|
|
|
|
savepoint_iter->second.num_writes == 0) {
|
|
|
|
// No other GetForUpdates or write on this key in this SavePoint
|
|
|
|
cf_savepoint_keys.erase(savepoint_iter);
|
|
|
|
can_unlock = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// No SavePoint set
|
|
|
|
can_decrement = true;
|
|
|
|
can_unlock = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We can only decrement the read count for this key if we were able to
|
|
|
|
// decrement the read count in the current SavePoint, OR if there is no
|
|
|
|
// SavePoint set.
|
|
|
|
if (can_decrement) {
|
|
|
|
auto key_iter = cf_tracked_keys.find(key_str);
|
|
|
|
|
|
|
|
if (key_iter != cf_tracked_keys.end()) {
|
|
|
|
if (key_iter->second.num_reads > 0) {
|
|
|
|
key_iter->second.num_reads--;
|
|
|
|
|
|
|
|
if (key_iter->second.num_reads == 0 &&
|
|
|
|
key_iter->second.num_writes == 0) {
|
|
|
|
// No other GetForUpdates or writes on this key
|
|
|
|
assert(can_unlock);
|
|
|
|
cf_tracked_keys.erase(key_iter);
|
|
|
|
UnlockGetForUpdate(column_family, key);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-18 20:15:50 +02:00
|
|
|
Status TransactionBaseImpl::RebuildFromWriteBatch(WriteBatch* src_batch) {
|
|
|
|
struct IndexedWriteBatchBuilder : public WriteBatch::Handler {
|
|
|
|
Transaction* txn_;
|
|
|
|
DBImpl* db_;
|
|
|
|
IndexedWriteBatchBuilder(Transaction* txn, DBImpl* db)
|
|
|
|
: txn_(txn), db_(db) {
|
|
|
|
assert(dynamic_cast<TransactionBaseImpl*>(txn_) != nullptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status PutCF(uint32_t cf, const Slice& key, const Slice& val) override {
|
|
|
|
return txn_->Put(db_->GetColumnFamilyHandle(cf), key, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status DeleteCF(uint32_t cf, const Slice& key) override {
|
|
|
|
return txn_->Delete(db_->GetColumnFamilyHandle(cf), key);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status SingleDeleteCF(uint32_t cf, const Slice& key) override {
|
|
|
|
return txn_->SingleDelete(db_->GetColumnFamilyHandle(cf), key);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status MergeCF(uint32_t cf, const Slice& key, const Slice& val) override {
|
|
|
|
return txn_->Merge(db_->GetColumnFamilyHandle(cf), key, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
// this is used for reconstructing prepared transactions upon
|
|
|
|
// recovery. there should not be any meta markers in the batches
|
|
|
|
// we are processing.
|
2018-07-07 02:17:36 +02:00
|
|
|
Status MarkBeginPrepare(bool) override { return Status::InvalidArgument(); }
|
2016-04-18 20:15:50 +02:00
|
|
|
|
|
|
|
Status MarkEndPrepare(const Slice&) override {
|
|
|
|
return Status::InvalidArgument();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status MarkCommit(const Slice&) override {
|
|
|
|
return Status::InvalidArgument();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status MarkRollback(const Slice&) override {
|
|
|
|
return Status::InvalidArgument();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
IndexedWriteBatchBuilder copycat(this, dbimpl_);
|
|
|
|
return src_batch->Iterate(©cat);
|
|
|
|
}
|
|
|
|
|
|
|
|
WriteBatch* TransactionBaseImpl::GetCommitTimeWriteBatch() {
|
|
|
|
return &commit_time_batch_;
|
|
|
|
}
|
2020-02-20 21:07:53 +01:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2015-08-22 00:47:21 +02:00
|
|
|
|
|
|
|
#endif // ROCKSDB_LITE
|