2017-08-31 18:27:14 +02:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
|
|
|
|
#pragma once
|
|
|
|
|
|
|
|
#include <algorithm>
|
2019-09-20 21:00:55 +02:00
|
|
|
#include <cinttypes>
|
2017-08-31 18:27:14 +02:00
|
|
|
#include <functional>
|
|
|
|
#include <string>
|
|
|
|
#include <thread>
|
|
|
|
|
2019-05-31 20:52:59 +02:00
|
|
|
#include "db/db_impl/db_impl.h"
|
2020-08-24 22:20:32 +02:00
|
|
|
#include "db/db_test_util.h"
|
2020-07-09 23:33:42 +02:00
|
|
|
#include "port/port.h"
|
2017-08-31 18:27:14 +02:00
|
|
|
#include "rocksdb/db.h"
|
|
|
|
#include "rocksdb/options.h"
|
|
|
|
#include "rocksdb/utilities/transaction.h"
|
|
|
|
#include "rocksdb/utilities/transaction_db.h"
|
|
|
|
#include "table/mock_table.h"
|
2019-05-30 20:21:38 +02:00
|
|
|
#include "test_util/sync_point.h"
|
|
|
|
#include "test_util/testharness.h"
|
|
|
|
#include "test_util/testutil.h"
|
|
|
|
#include "test_util/transaction_test_util.h"
|
2019-05-31 02:39:43 +02:00
|
|
|
#include "util/random.h"
|
|
|
|
#include "util/string_util.h"
|
2020-07-09 23:33:42 +02:00
|
|
|
#include "utilities/fault_injection_env.h"
|
2017-08-31 18:27:14 +02:00
|
|
|
#include "utilities/merge_operators.h"
|
|
|
|
#include "utilities/merge_operators/string_append/stringappend.h"
|
|
|
|
#include "utilities/transactions/pessimistic_transaction_db.h"
|
2019-08-12 21:11:21 +02:00
|
|
|
#include "utilities/transactions/write_unprepared_txn_db.h"
|
2017-08-31 18:27:14 +02:00
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2017-08-31 18:27:14 +02:00
|
|
|
|
2018-02-13 01:27:39 +01:00
|
|
|
// Return true if the ith bit is set in combination represented by comb
|
|
|
|
bool IsInCombination(size_t i, size_t comb) { return comb & (size_t(1) << i); }
|
|
|
|
|
2019-05-14 02:43:47 +02:00
|
|
|
enum WriteOrdering : bool { kOrderedWrite, kUnorderedWrite };
|
|
|
|
|
2018-01-30 02:03:23 +01:00
|
|
|
class TransactionTestBase : public ::testing::Test {
|
2017-08-31 18:27:14 +02:00
|
|
|
public:
|
|
|
|
TransactionDB* db;
|
2020-08-24 22:20:32 +02:00
|
|
|
SpecialEnv special_env;
|
2017-08-31 18:27:14 +02:00
|
|
|
FaultInjectionTestEnv* env;
|
|
|
|
std::string dbname;
|
|
|
|
Options options;
|
|
|
|
|
|
|
|
TransactionDBOptions txn_db_options;
|
2018-01-30 02:03:23 +01:00
|
|
|
bool use_stackable_db_;
|
2017-08-31 18:27:14 +02:00
|
|
|
|
2018-01-30 02:03:23 +01:00
|
|
|
TransactionTestBase(bool use_stackable_db, bool two_write_queue,
|
2019-05-14 02:43:47 +02:00
|
|
|
TxnDBWritePolicy write_policy,
|
|
|
|
WriteOrdering write_ordering)
|
2020-08-24 22:20:32 +02:00
|
|
|
: db(nullptr),
|
|
|
|
special_env(Env::Default()),
|
|
|
|
env(nullptr),
|
|
|
|
use_stackable_db_(use_stackable_db) {
|
2017-08-31 18:27:14 +02:00
|
|
|
options.create_if_missing = true;
|
|
|
|
options.max_write_buffer_number = 2;
|
|
|
|
options.write_buffer_size = 4 * 1024;
|
2019-05-14 02:43:47 +02:00
|
|
|
options.unordered_write = write_ordering == kUnorderedWrite;
|
2017-08-31 18:27:14 +02:00
|
|
|
options.level0_file_num_compaction_trigger = 2;
|
|
|
|
options.merge_operator = MergeOperators::CreateFromStringId("stringappend");
|
2020-11-06 23:16:07 +01:00
|
|
|
special_env.skip_fsync_ = true;
|
2020-08-24 22:20:32 +02:00
|
|
|
env = new FaultInjectionTestEnv(&special_env);
|
2017-08-31 18:27:14 +02:00
|
|
|
options.env = env;
|
2018-01-30 02:03:23 +01:00
|
|
|
options.two_write_queues = two_write_queue;
|
2018-07-14 02:18:39 +02:00
|
|
|
dbname = test::PerThreadDBPath("transaction_testdb");
|
2017-08-31 18:27:14 +02:00
|
|
|
|
2020-12-10 06:19:55 +01:00
|
|
|
EXPECT_OK(DestroyDB(dbname, options));
|
2017-08-31 18:27:14 +02:00
|
|
|
txn_db_options.transaction_lock_timeout = 0;
|
|
|
|
txn_db_options.default_lock_timeout = 0;
|
2018-01-30 02:03:23 +01:00
|
|
|
txn_db_options.write_policy = write_policy;
|
2018-04-12 20:52:15 +02:00
|
|
|
txn_db_options.rollback_merge_operands = true;
|
2019-08-12 21:11:21 +02:00
|
|
|
// This will stress write unprepared, by forcing write batch flush on every
|
|
|
|
// write.
|
|
|
|
txn_db_options.default_write_batch_flush_threshold = 1;
|
|
|
|
// Write unprepared requires all transactions to be named. This setting
|
|
|
|
// autogenerates the name so that existing tests can pass.
|
|
|
|
txn_db_options.autogenerate_name = true;
|
2017-08-31 18:27:14 +02:00
|
|
|
Status s;
|
2018-01-30 02:03:23 +01:00
|
|
|
if (use_stackable_db == false) {
|
2017-08-31 18:27:14 +02:00
|
|
|
s = TransactionDB::Open(options, txn_db_options, dbname, &db);
|
|
|
|
} else {
|
|
|
|
s = OpenWithStackableDB();
|
|
|
|
}
|
2020-12-10 06:19:55 +01:00
|
|
|
EXPECT_OK(s);
|
2017-08-31 18:27:14 +02:00
|
|
|
}
|
|
|
|
|
2018-01-30 02:03:23 +01:00
|
|
|
~TransactionTestBase() {
|
2017-08-31 18:27:14 +02:00
|
|
|
delete db;
|
2018-05-12 00:08:16 +02:00
|
|
|
db = nullptr;
|
2018-01-04 20:10:03 +01:00
|
|
|
// This is to skip the assert statement in FaultInjectionTestEnv. There
|
|
|
|
// seems to be a bug in btrfs that the makes readdir return recently
|
|
|
|
// unlink-ed files. By using the default fs we simply ignore errors resulted
|
|
|
|
// from attempting to delete such files in DestroyDB.
|
2021-08-18 07:13:21 +02:00
|
|
|
if (getenv("KEEP_DB") == nullptr) {
|
|
|
|
options.env = Env::Default();
|
|
|
|
EXPECT_OK(DestroyDB(dbname, options));
|
|
|
|
} else {
|
|
|
|
fprintf(stdout, "db is still in %s\n", dbname.c_str());
|
|
|
|
}
|
2017-08-31 18:27:14 +02:00
|
|
|
delete env;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status ReOpenNoDelete() {
|
|
|
|
delete db;
|
|
|
|
db = nullptr;
|
|
|
|
env->AssertNoOpenFile();
|
|
|
|
env->DropUnsyncedFileData();
|
|
|
|
env->ResetState();
|
|
|
|
Status s;
|
2018-01-30 02:03:23 +01:00
|
|
|
if (use_stackable_db_ == false) {
|
2017-08-31 18:27:14 +02:00
|
|
|
s = TransactionDB::Open(options, txn_db_options, dbname, &db);
|
|
|
|
} else {
|
|
|
|
s = OpenWithStackableDB();
|
|
|
|
}
|
2018-08-22 01:28:41 +02:00
|
|
|
assert(!s.ok() || db != nullptr);
|
2017-08-31 18:27:14 +02:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2018-03-05 19:48:29 +01:00
|
|
|
Status ReOpenNoDelete(std::vector<ColumnFamilyDescriptor>& cfs,
|
|
|
|
std::vector<ColumnFamilyHandle*>* handles) {
|
|
|
|
for (auto h : *handles) {
|
|
|
|
delete h;
|
|
|
|
}
|
|
|
|
handles->clear();
|
|
|
|
delete db;
|
|
|
|
db = nullptr;
|
|
|
|
env->AssertNoOpenFile();
|
|
|
|
env->DropUnsyncedFileData();
|
|
|
|
env->ResetState();
|
|
|
|
Status s;
|
|
|
|
if (use_stackable_db_ == false) {
|
|
|
|
s = TransactionDB::Open(options, txn_db_options, dbname, cfs, handles,
|
|
|
|
&db);
|
|
|
|
} else {
|
|
|
|
s = OpenWithStackableDB(cfs, handles);
|
|
|
|
}
|
2020-01-29 20:39:11 +01:00
|
|
|
assert(!s.ok() || db != nullptr);
|
2018-03-05 19:48:29 +01:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2017-08-31 18:27:14 +02:00
|
|
|
Status ReOpen() {
|
|
|
|
delete db;
|
2018-05-12 00:08:16 +02:00
|
|
|
db = nullptr;
|
2017-08-31 18:27:14 +02:00
|
|
|
DestroyDB(dbname, options);
|
|
|
|
Status s;
|
2018-01-30 02:03:23 +01:00
|
|
|
if (use_stackable_db_ == false) {
|
2017-08-31 18:27:14 +02:00
|
|
|
s = TransactionDB::Open(options, txn_db_options, dbname, &db);
|
|
|
|
} else {
|
|
|
|
s = OpenWithStackableDB();
|
|
|
|
}
|
2018-08-22 01:28:41 +02:00
|
|
|
assert(db != nullptr);
|
2017-08-31 18:27:14 +02:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2018-03-05 19:48:29 +01:00
|
|
|
Status OpenWithStackableDB(std::vector<ColumnFamilyDescriptor>& cfs,
|
|
|
|
std::vector<ColumnFamilyHandle*>* handles) {
|
|
|
|
std::vector<size_t> compaction_enabled_cf_indices;
|
|
|
|
TransactionDB::PrepareWrap(&options, &cfs, &compaction_enabled_cf_indices);
|
2018-05-12 00:08:16 +02:00
|
|
|
DB* root_db = nullptr;
|
2018-03-05 19:48:29 +01:00
|
|
|
Options options_copy(options);
|
|
|
|
const bool use_seq_per_batch =
|
2018-06-01 23:57:55 +02:00
|
|
|
txn_db_options.write_policy == WRITE_PREPARED ||
|
|
|
|
txn_db_options.write_policy == WRITE_UNPREPARED;
|
2018-06-29 03:46:39 +02:00
|
|
|
const bool use_batch_per_txn =
|
|
|
|
txn_db_options.write_policy == WRITE_COMMITTED ||
|
|
|
|
txn_db_options.write_policy == WRITE_PREPARED;
|
2018-03-05 19:48:29 +01:00
|
|
|
Status s = DBImpl::Open(options_copy, dbname, cfs, handles, &root_db,
|
2018-06-29 03:46:39 +02:00
|
|
|
use_seq_per_batch, use_batch_per_txn);
|
Support user-defined timestamps in write-committed txns (#9629)
Summary:
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9629
Pessimistic transactions use pessimistic concurrency control, i.e. locking. Keys are
locked upon first operation that writes the key or has the intention of writing. For example,
`PessimisticTransaction::Put()`, `PessimisticTransaction::Delete()`,
`PessimisticTransaction::SingleDelete()` will write to or delete a key, while
`PessimisticTransaction::GetForUpdate()` is used by application to indicate
to RocksDB that the transaction has the intention of performing write operation later
in the same transaction.
Pessimistic transactions support two-phase commit (2PC). A transaction can be
`Prepared()`'ed and then `Commit()`. The prepare phase is similar to a promise: once
`Prepare()` succeeds, the transaction has acquired the necessary resources to commit.
The resources include locks, persistence of WAL, etc.
Write-committed transaction is the default pessimistic transaction implementation. In
RocksDB write-committed transaction, `Prepare()` will write data to the WAL as a prepare
section. `Commit()` will write a commit marker to the WAL and then write data to the
memtables. While writing to the memtables, different keys in the transaction's write batch
will be assigned different sequence numbers in ascending order.
Until commit/rollback, the transaction holds locks on the keys so that no other transaction
can write to the same keys. Furthermore, the keys' sequence numbers represent the order
in which they are committed and should be made visible. This is convenient for us to
implement support for user-defined timestamps.
Since column families with and without timestamps can co-exist in the same database,
a transaction may or may not involve timestamps. Based on this observation, we add two
optional members to each `PessimisticTransaction`, `read_timestamp_` and
`commit_timestamp_`. If no key in the transaction's write batch has timestamp, then
setting these two variables do not have any effect. For the rest of this commit, we discuss
only the cases when these two variables are meaningful.
read_timestamp_ is used mainly for validation, and should be set before first call to
`GetForUpdate()`. Otherwise, the latter will return non-ok status. `GetForUpdate()` calls
`TryLock()` that can verify if another transaction has written the same key since
`read_timestamp_` till this call to `GetForUpdate()`. If another transaction has indeed
written the same key, then validation fails, and RocksDB allows this transaction to
refine `read_timestamp_` by increasing it. Note that a transaction can still use `Get()`
with a different timestamp to read, but the result of the read should not be used to
determine data that will be written later.
commit_timestamp_ must be set after finishing writing and before transaction commit.
This applies to both 2PC and non-2PC cases. In the case of 2PC, it's usually set after
prepare phase succeeds.
We currently require that the commit timestamp be chosen after all keys are locked. This
means we disallow the `TransactionDB`-level APIs if user-defined timestamp is used
by the transaction. Specifically, calling `PessimisticTransactionDB::Put()`,
`PessimisticTransactionDB::Delete()`, `PessimisticTransactionDB::SingleDelete()`,
etc. will return non-ok status because they specify timestamps before locking the keys.
Users are also prompted to use the `Transaction` APIs when they receive the non-ok status.
Reviewed By: ltamasi
Differential Revision: D31822445
fbshipit-source-id: b82abf8e230216dc89cc519564a588224a88fd43
2022-03-09 01:20:59 +01:00
|
|
|
auto stackable_db = std::make_unique<StackableDB>(root_db);
|
2018-03-05 19:48:29 +01:00
|
|
|
if (s.ok()) {
|
2018-05-12 00:08:16 +02:00
|
|
|
assert(root_db != nullptr);
|
2022-02-12 05:18:06 +01:00
|
|
|
// If WrapStackableDB() returns non-ok, then stackable_db is already
|
|
|
|
// deleted within WrapStackableDB().
|
Support user-defined timestamps in write-committed txns (#9629)
Summary:
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9629
Pessimistic transactions use pessimistic concurrency control, i.e. locking. Keys are
locked upon first operation that writes the key or has the intention of writing. For example,
`PessimisticTransaction::Put()`, `PessimisticTransaction::Delete()`,
`PessimisticTransaction::SingleDelete()` will write to or delete a key, while
`PessimisticTransaction::GetForUpdate()` is used by application to indicate
to RocksDB that the transaction has the intention of performing write operation later
in the same transaction.
Pessimistic transactions support two-phase commit (2PC). A transaction can be
`Prepared()`'ed and then `Commit()`. The prepare phase is similar to a promise: once
`Prepare()` succeeds, the transaction has acquired the necessary resources to commit.
The resources include locks, persistence of WAL, etc.
Write-committed transaction is the default pessimistic transaction implementation. In
RocksDB write-committed transaction, `Prepare()` will write data to the WAL as a prepare
section. `Commit()` will write a commit marker to the WAL and then write data to the
memtables. While writing to the memtables, different keys in the transaction's write batch
will be assigned different sequence numbers in ascending order.
Until commit/rollback, the transaction holds locks on the keys so that no other transaction
can write to the same keys. Furthermore, the keys' sequence numbers represent the order
in which they are committed and should be made visible. This is convenient for us to
implement support for user-defined timestamps.
Since column families with and without timestamps can co-exist in the same database,
a transaction may or may not involve timestamps. Based on this observation, we add two
optional members to each `PessimisticTransaction`, `read_timestamp_` and
`commit_timestamp_`. If no key in the transaction's write batch has timestamp, then
setting these two variables do not have any effect. For the rest of this commit, we discuss
only the cases when these two variables are meaningful.
read_timestamp_ is used mainly for validation, and should be set before first call to
`GetForUpdate()`. Otherwise, the latter will return non-ok status. `GetForUpdate()` calls
`TryLock()` that can verify if another transaction has written the same key since
`read_timestamp_` till this call to `GetForUpdate()`. If another transaction has indeed
written the same key, then validation fails, and RocksDB allows this transaction to
refine `read_timestamp_` by increasing it. Note that a transaction can still use `Get()`
with a different timestamp to read, but the result of the read should not be used to
determine data that will be written later.
commit_timestamp_ must be set after finishing writing and before transaction commit.
This applies to both 2PC and non-2PC cases. In the case of 2PC, it's usually set after
prepare phase succeeds.
We currently require that the commit timestamp be chosen after all keys are locked. This
means we disallow the `TransactionDB`-level APIs if user-defined timestamp is used
by the transaction. Specifically, calling `PessimisticTransactionDB::Put()`,
`PessimisticTransactionDB::Delete()`, `PessimisticTransactionDB::SingleDelete()`,
etc. will return non-ok status because they specify timestamps before locking the keys.
Users are also prompted to use the `Transaction` APIs when they receive the non-ok status.
Reviewed By: ltamasi
Differential Revision: D31822445
fbshipit-source-id: b82abf8e230216dc89cc519564a588224a88fd43
2022-03-09 01:20:59 +01:00
|
|
|
s = TransactionDB::WrapStackableDB(stackable_db.release(), txn_db_options,
|
2018-05-12 00:08:16 +02:00
|
|
|
compaction_enabled_cf_indices,
|
|
|
|
*handles, &db);
|
|
|
|
}
|
2018-03-05 19:48:29 +01:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2017-08-31 18:27:14 +02:00
|
|
|
Status OpenWithStackableDB() {
|
|
|
|
std::vector<size_t> compaction_enabled_cf_indices;
|
|
|
|
std::vector<ColumnFamilyDescriptor> column_families{ColumnFamilyDescriptor(
|
|
|
|
kDefaultColumnFamilyName, ColumnFamilyOptions(options))};
|
|
|
|
|
|
|
|
TransactionDB::PrepareWrap(&options, &column_families,
|
|
|
|
&compaction_enabled_cf_indices);
|
|
|
|
std::vector<ColumnFamilyHandle*> handles;
|
2018-05-12 00:08:16 +02:00
|
|
|
DB* root_db = nullptr;
|
2017-08-31 18:27:14 +02:00
|
|
|
Options options_copy(options);
|
2017-11-11 02:18:01 +01:00
|
|
|
const bool use_seq_per_batch =
|
2018-06-01 23:57:55 +02:00
|
|
|
txn_db_options.write_policy == WRITE_PREPARED ||
|
|
|
|
txn_db_options.write_policy == WRITE_UNPREPARED;
|
2018-06-29 03:46:39 +02:00
|
|
|
const bool use_batch_per_txn =
|
|
|
|
txn_db_options.write_policy == WRITE_COMMITTED ||
|
|
|
|
txn_db_options.write_policy == WRITE_PREPARED;
|
2017-11-11 02:18:01 +01:00
|
|
|
Status s = DBImpl::Open(options_copy, dbname, column_families, &handles,
|
2018-06-29 03:46:39 +02:00
|
|
|
&root_db, use_seq_per_batch, use_batch_per_txn);
|
2018-08-22 01:28:41 +02:00
|
|
|
if (!s.ok()) {
|
|
|
|
delete root_db;
|
|
|
|
return s;
|
2017-08-31 18:27:14 +02:00
|
|
|
}
|
2018-08-22 01:28:41 +02:00
|
|
|
StackableDB* stackable_db = new StackableDB(root_db);
|
|
|
|
assert(root_db != nullptr);
|
|
|
|
assert(handles.size() == 1);
|
|
|
|
s = TransactionDB::WrapStackableDB(stackable_db, txn_db_options,
|
|
|
|
compaction_enabled_cf_indices, handles,
|
|
|
|
&db);
|
|
|
|
delete handles[0];
|
2018-05-12 00:08:16 +02:00
|
|
|
if (!s.ok()) {
|
|
|
|
delete stackable_db;
|
|
|
|
}
|
2017-08-31 18:27:14 +02:00
|
|
|
return s;
|
|
|
|
}
|
2017-09-29 01:43:04 +02:00
|
|
|
|
|
|
|
std::atomic<size_t> linked = {0};
|
|
|
|
std::atomic<size_t> exp_seq = {0};
|
|
|
|
std::atomic<size_t> commit_writes = {0};
|
|
|
|
std::atomic<size_t> expected_commits = {0};
|
2019-05-28 23:18:24 +02:00
|
|
|
// Without Prepare, the commit does not write to WAL
|
|
|
|
std::atomic<size_t> with_empty_commits = {0};
|
2017-09-29 01:43:04 +02:00
|
|
|
std::function<void(size_t, Status)> txn_t0_with_status = [&](size_t index,
|
|
|
|
Status exp_s) {
|
|
|
|
// Test DB's internal txn. It involves no prepare phase nor a commit marker.
|
|
|
|
WriteOptions wopts;
|
|
|
|
auto s = db->Put(wopts, "key" + std::to_string(index), "value");
|
|
|
|
ASSERT_EQ(exp_s, s);
|
|
|
|
if (txn_db_options.write_policy == TxnDBWritePolicy::WRITE_COMMITTED) {
|
|
|
|
// Consume one seq per key
|
|
|
|
exp_seq++;
|
|
|
|
} else {
|
|
|
|
// Consume one seq per batch
|
|
|
|
exp_seq++;
|
2017-12-18 17:03:18 +01:00
|
|
|
if (options.two_write_queues) {
|
2017-10-23 23:20:22 +02:00
|
|
|
// Consume one seq for commit
|
|
|
|
exp_seq++;
|
2017-12-18 17:03:18 +01:00
|
|
|
}
|
2017-09-29 01:43:04 +02:00
|
|
|
}
|
2019-05-28 23:18:24 +02:00
|
|
|
with_empty_commits++;
|
2017-09-29 01:43:04 +02:00
|
|
|
};
|
|
|
|
std::function<void(size_t)> txn_t0 = [&](size_t index) {
|
|
|
|
return txn_t0_with_status(index, Status::OK());
|
|
|
|
};
|
|
|
|
std::function<void(size_t)> txn_t1 = [&](size_t index) {
|
|
|
|
// Testing directly writing a write batch. Functionality-wise it is
|
|
|
|
// equivalent to commit without prepare.
|
|
|
|
WriteBatch wb;
|
|
|
|
auto istr = std::to_string(index);
|
2018-05-12 00:08:16 +02:00
|
|
|
ASSERT_OK(wb.Put("k1" + istr, "v1"));
|
|
|
|
ASSERT_OK(wb.Put("k2" + istr, "v2"));
|
|
|
|
ASSERT_OK(wb.Put("k3" + istr, "v3"));
|
2017-09-29 01:43:04 +02:00
|
|
|
WriteOptions wopts;
|
|
|
|
auto s = db->Write(wopts, &wb);
|
|
|
|
if (txn_db_options.write_policy == TxnDBWritePolicy::WRITE_COMMITTED) {
|
|
|
|
// Consume one seq per key
|
|
|
|
exp_seq += 3;
|
|
|
|
} else {
|
|
|
|
// Consume one seq per batch
|
|
|
|
exp_seq++;
|
2017-12-18 17:03:18 +01:00
|
|
|
if (options.two_write_queues) {
|
2017-10-23 23:20:22 +02:00
|
|
|
// Consume one seq for commit
|
|
|
|
exp_seq++;
|
2017-12-18 17:03:18 +01:00
|
|
|
}
|
2017-09-29 01:43:04 +02:00
|
|
|
}
|
|
|
|
ASSERT_OK(s);
|
2019-05-28 23:18:24 +02:00
|
|
|
with_empty_commits++;
|
2017-09-29 01:43:04 +02:00
|
|
|
};
|
|
|
|
std::function<void(size_t)> txn_t2 = [&](size_t index) {
|
|
|
|
// Commit without prepare. It should write to DB without a commit marker.
|
|
|
|
TransactionOptions txn_options;
|
|
|
|
WriteOptions write_options;
|
|
|
|
Transaction* txn = db->BeginTransaction(write_options, txn_options);
|
|
|
|
auto istr = std::to_string(index);
|
2018-05-12 00:08:16 +02:00
|
|
|
ASSERT_OK(txn->SetName("xid" + istr));
|
|
|
|
ASSERT_OK(txn->Put(Slice("foo" + istr), Slice("bar")));
|
|
|
|
ASSERT_OK(txn->Put(Slice("foo2" + istr), Slice("bar2")));
|
|
|
|
ASSERT_OK(txn->Put(Slice("foo3" + istr), Slice("bar3")));
|
|
|
|
ASSERT_OK(txn->Put(Slice("foo4" + istr), Slice("bar4")));
|
|
|
|
ASSERT_OK(txn->Commit());
|
2017-09-29 01:43:04 +02:00
|
|
|
if (txn_db_options.write_policy == TxnDBWritePolicy::WRITE_COMMITTED) {
|
|
|
|
// Consume one seq per key
|
|
|
|
exp_seq += 4;
|
2019-08-12 21:11:21 +02:00
|
|
|
} else if (txn_db_options.write_policy ==
|
|
|
|
TxnDBWritePolicy::WRITE_PREPARED) {
|
2017-09-29 01:43:04 +02:00
|
|
|
// Consume one seq per batch
|
|
|
|
exp_seq++;
|
2017-12-18 17:03:18 +01:00
|
|
|
if (options.two_write_queues) {
|
2017-10-23 23:20:22 +02:00
|
|
|
// Consume one seq for commit
|
|
|
|
exp_seq++;
|
2017-12-18 17:03:18 +01:00
|
|
|
}
|
2019-08-12 21:11:21 +02:00
|
|
|
} else {
|
|
|
|
// Flushed after each key, consume one seq per flushed batch
|
|
|
|
exp_seq += 4;
|
|
|
|
// WriteUnprepared implements CommitWithoutPrepareInternal by simply
|
|
|
|
// calling Prepare then Commit. Consume one seq for the prepare.
|
|
|
|
exp_seq++;
|
2017-09-29 01:43:04 +02:00
|
|
|
}
|
|
|
|
delete txn;
|
2019-05-28 23:18:24 +02:00
|
|
|
with_empty_commits++;
|
2017-09-29 01:43:04 +02:00
|
|
|
};
|
|
|
|
std::function<void(size_t)> txn_t3 = [&](size_t index) {
|
|
|
|
// A full 2pc txn that also involves a commit marker.
|
|
|
|
TransactionOptions txn_options;
|
|
|
|
WriteOptions write_options;
|
|
|
|
Transaction* txn = db->BeginTransaction(write_options, txn_options);
|
|
|
|
auto istr = std::to_string(index);
|
2018-05-12 00:08:16 +02:00
|
|
|
ASSERT_OK(txn->SetName("xid" + istr));
|
|
|
|
ASSERT_OK(txn->Put(Slice("foo" + istr), Slice("bar")));
|
|
|
|
ASSERT_OK(txn->Put(Slice("foo2" + istr), Slice("bar2")));
|
|
|
|
ASSERT_OK(txn->Put(Slice("foo3" + istr), Slice("bar3")));
|
|
|
|
ASSERT_OK(txn->Put(Slice("foo4" + istr), Slice("bar4")));
|
|
|
|
ASSERT_OK(txn->Put(Slice("foo5" + istr), Slice("bar5")));
|
2017-09-29 01:43:04 +02:00
|
|
|
expected_commits++;
|
2018-05-12 00:08:16 +02:00
|
|
|
ASSERT_OK(txn->Prepare());
|
2017-09-29 01:43:04 +02:00
|
|
|
commit_writes++;
|
2018-05-12 00:08:16 +02:00
|
|
|
ASSERT_OK(txn->Commit());
|
2017-09-29 01:43:04 +02:00
|
|
|
if (txn_db_options.write_policy == TxnDBWritePolicy::WRITE_COMMITTED) {
|
|
|
|
// Consume one seq per key
|
|
|
|
exp_seq += 5;
|
2019-08-12 21:11:21 +02:00
|
|
|
} else if (txn_db_options.write_policy ==
|
|
|
|
TxnDBWritePolicy::WRITE_PREPARED) {
|
2017-09-29 01:43:04 +02:00
|
|
|
// Consume one seq per batch
|
|
|
|
exp_seq++;
|
|
|
|
// Consume one seq per commit marker
|
|
|
|
exp_seq++;
|
2019-08-12 21:11:21 +02:00
|
|
|
} else {
|
|
|
|
// Flushed after each key, consume one seq per flushed batch
|
|
|
|
exp_seq += 5;
|
|
|
|
// Consume one seq per commit marker
|
|
|
|
exp_seq++;
|
2017-09-29 01:43:04 +02:00
|
|
|
}
|
|
|
|
delete txn;
|
|
|
|
};
|
2017-11-15 17:19:57 +01:00
|
|
|
std::function<void(size_t)> txn_t4 = [&](size_t index) {
|
|
|
|
// A full 2pc txn that also involves a commit marker.
|
|
|
|
TransactionOptions txn_options;
|
|
|
|
WriteOptions write_options;
|
|
|
|
Transaction* txn = db->BeginTransaction(write_options, txn_options);
|
|
|
|
auto istr = std::to_string(index);
|
2018-05-12 00:08:16 +02:00
|
|
|
ASSERT_OK(txn->SetName("xid" + istr));
|
|
|
|
ASSERT_OK(txn->Put(Slice("foo" + istr), Slice("bar")));
|
|
|
|
ASSERT_OK(txn->Put(Slice("foo2" + istr), Slice("bar2")));
|
|
|
|
ASSERT_OK(txn->Put(Slice("foo3" + istr), Slice("bar3")));
|
|
|
|
ASSERT_OK(txn->Put(Slice("foo4" + istr), Slice("bar4")));
|
|
|
|
ASSERT_OK(txn->Put(Slice("foo5" + istr), Slice("bar5")));
|
2017-11-15 17:19:57 +01:00
|
|
|
expected_commits++;
|
2018-05-12 00:08:16 +02:00
|
|
|
ASSERT_OK(txn->Prepare());
|
2017-11-15 17:19:57 +01:00
|
|
|
commit_writes++;
|
2018-05-12 00:08:16 +02:00
|
|
|
ASSERT_OK(txn->Rollback());
|
2017-11-15 17:19:57 +01:00
|
|
|
if (txn_db_options.write_policy == TxnDBWritePolicy::WRITE_COMMITTED) {
|
|
|
|
// No seq is consumed for deleting the txn buffer
|
|
|
|
exp_seq += 0;
|
2019-08-12 21:11:21 +02:00
|
|
|
} else if (txn_db_options.write_policy ==
|
|
|
|
TxnDBWritePolicy::WRITE_PREPARED) {
|
2017-11-15 17:19:57 +01:00
|
|
|
// Consume one seq per batch
|
|
|
|
exp_seq++;
|
|
|
|
// Consume one seq per rollback batch
|
|
|
|
exp_seq++;
|
2017-12-18 17:03:18 +01:00
|
|
|
if (options.two_write_queues) {
|
2017-11-15 17:19:57 +01:00
|
|
|
// Consume one seq for rollback commit
|
|
|
|
exp_seq++;
|
2017-12-18 17:03:18 +01:00
|
|
|
}
|
2019-08-12 21:11:21 +02:00
|
|
|
} else {
|
|
|
|
// Flushed after each key, consume one seq per flushed batch
|
|
|
|
exp_seq += 5;
|
|
|
|
// Consume one seq per rollback batch
|
|
|
|
exp_seq++;
|
|
|
|
if (options.two_write_queues) {
|
|
|
|
// Consume one seq for rollback commit
|
|
|
|
exp_seq++;
|
|
|
|
}
|
2017-11-15 17:19:57 +01:00
|
|
|
}
|
|
|
|
delete txn;
|
|
|
|
};
|
2017-11-11 20:23:43 +01:00
|
|
|
|
|
|
|
// Test that we can change write policy after a clean shutdown (which would
|
|
|
|
// empty the WAL)
|
|
|
|
void CrossCompatibilityTest(TxnDBWritePolicy from_policy,
|
|
|
|
TxnDBWritePolicy to_policy, bool empty_wal) {
|
|
|
|
TransactionOptions txn_options;
|
|
|
|
ReadOptions read_options;
|
|
|
|
WriteOptions write_options;
|
|
|
|
uint32_t index = 0;
|
|
|
|
Random rnd(1103);
|
|
|
|
options.write_buffer_size = 1024; // To create more sst files
|
|
|
|
std::unordered_map<std::string, std::string> committed_kvs;
|
|
|
|
Transaction* txn;
|
|
|
|
|
|
|
|
txn_db_options.write_policy = from_policy;
|
2019-05-14 02:43:47 +02:00
|
|
|
if (txn_db_options.write_policy == WRITE_COMMITTED) {
|
|
|
|
options.unordered_write = false;
|
|
|
|
}
|
2020-12-10 06:19:55 +01:00
|
|
|
ASSERT_OK(ReOpen());
|
2017-11-11 20:23:43 +01:00
|
|
|
|
|
|
|
for (int i = 0; i < 1024; i++) {
|
|
|
|
auto istr = std::to_string(index);
|
|
|
|
auto k = Slice("foo-" + istr).ToString();
|
|
|
|
auto v = Slice("bar-" + istr).ToString();
|
|
|
|
// For test the duplicate keys
|
|
|
|
auto v2 = Slice("bar2-" + istr).ToString();
|
|
|
|
auto type = rnd.Uniform(4);
|
|
|
|
switch (type) {
|
|
|
|
case 0:
|
|
|
|
committed_kvs[k] = v;
|
2018-05-12 00:08:16 +02:00
|
|
|
ASSERT_OK(db->Put(write_options, k, v));
|
2017-11-11 20:23:43 +01:00
|
|
|
committed_kvs[k] = v2;
|
2018-05-12 00:08:16 +02:00
|
|
|
ASSERT_OK(db->Put(write_options, k, v2));
|
2017-11-11 20:23:43 +01:00
|
|
|
break;
|
|
|
|
case 1: {
|
|
|
|
WriteBatch wb;
|
|
|
|
committed_kvs[k] = v;
|
2020-12-10 06:19:55 +01:00
|
|
|
ASSERT_OK(wb.Put(k, v));
|
2018-04-21 00:13:47 +02:00
|
|
|
committed_kvs[k] = v2;
|
2020-12-10 06:19:55 +01:00
|
|
|
ASSERT_OK(wb.Put(k, v2));
|
2018-05-12 00:08:16 +02:00
|
|
|
ASSERT_OK(db->Write(write_options, &wb));
|
|
|
|
|
2017-11-11 20:23:43 +01:00
|
|
|
} break;
|
|
|
|
case 2:
|
|
|
|
case 3:
|
|
|
|
txn = db->BeginTransaction(write_options, txn_options);
|
2018-05-12 00:08:16 +02:00
|
|
|
ASSERT_OK(txn->SetName("xid" + istr));
|
2017-11-11 20:23:43 +01:00
|
|
|
committed_kvs[k] = v;
|
2018-05-12 00:08:16 +02:00
|
|
|
ASSERT_OK(txn->Put(k, v));
|
2018-04-21 00:13:47 +02:00
|
|
|
committed_kvs[k] = v2;
|
2018-05-12 00:08:16 +02:00
|
|
|
ASSERT_OK(txn->Put(k, v2));
|
|
|
|
|
2017-11-11 20:23:43 +01:00
|
|
|
if (type == 3) {
|
2018-05-12 00:08:16 +02:00
|
|
|
ASSERT_OK(txn->Prepare());
|
2017-11-11 20:23:43 +01:00
|
|
|
}
|
2018-05-12 00:08:16 +02:00
|
|
|
ASSERT_OK(txn->Commit());
|
2017-11-11 20:23:43 +01:00
|
|
|
delete txn;
|
|
|
|
break;
|
|
|
|
default:
|
2020-12-10 06:19:55 +01:00
|
|
|
FAIL();
|
2017-11-11 20:23:43 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
index++;
|
|
|
|
} // for i
|
|
|
|
|
|
|
|
txn_db_options.write_policy = to_policy;
|
2019-05-14 02:43:47 +02:00
|
|
|
if (txn_db_options.write_policy == WRITE_COMMITTED) {
|
|
|
|
options.unordered_write = false;
|
|
|
|
}
|
2020-07-03 04:24:25 +02:00
|
|
|
auto db_impl = static_cast_with_check<DBImpl>(db->GetRootDB());
|
2017-11-11 20:23:43 +01:00
|
|
|
// Before upgrade/downgrade the WAL must be emptied
|
|
|
|
if (empty_wal) {
|
2020-12-10 06:19:55 +01:00
|
|
|
ASSERT_OK(db_impl->TEST_FlushMemTable());
|
2017-11-11 20:23:43 +01:00
|
|
|
} else {
|
2020-12-10 06:19:55 +01:00
|
|
|
ASSERT_OK(db_impl->FlushWAL(true));
|
2017-11-11 20:23:43 +01:00
|
|
|
}
|
|
|
|
auto s = ReOpenNoDelete();
|
|
|
|
if (empty_wal) {
|
|
|
|
ASSERT_OK(s);
|
|
|
|
} else {
|
2018-08-10 01:49:45 +02:00
|
|
|
// Test that we can detect the WAL that is produced by an incompatible
|
2017-11-11 20:23:43 +01:00
|
|
|
// WritePolicy and fail fast before mis-interpreting the WAL.
|
|
|
|
ASSERT_TRUE(s.IsNotSupported());
|
|
|
|
return;
|
|
|
|
}
|
2020-07-03 04:24:25 +02:00
|
|
|
db_impl = static_cast_with_check<DBImpl>(db->GetRootDB());
|
2017-11-11 20:23:43 +01:00
|
|
|
// Check that WAL is empty
|
|
|
|
VectorLogPtr log_files;
|
2020-12-10 06:19:55 +01:00
|
|
|
ASSERT_OK(db_impl->GetSortedWalFiles(log_files));
|
2017-11-11 20:23:43 +01:00
|
|
|
ASSERT_EQ(0, log_files.size());
|
|
|
|
|
|
|
|
for (auto& kv : committed_kvs) {
|
|
|
|
std::string value;
|
|
|
|
s = db->Get(read_options, kv.first, &value);
|
|
|
|
if (s.IsNotFound()) {
|
|
|
|
printf("key = %s\n", kv.first.c_str());
|
|
|
|
}
|
|
|
|
ASSERT_OK(s);
|
|
|
|
if (kv.second != value) {
|
|
|
|
printf("key = %s\n", kv.first.c_str());
|
|
|
|
}
|
|
|
|
ASSERT_EQ(kv.second, value);
|
|
|
|
}
|
|
|
|
}
|
2017-08-31 18:27:14 +02:00
|
|
|
};
|
|
|
|
|
2019-05-14 02:43:47 +02:00
|
|
|
class TransactionTest
|
|
|
|
: public TransactionTestBase,
|
|
|
|
virtual public ::testing::WithParamInterface<
|
|
|
|
std::tuple<bool, bool, TxnDBWritePolicy, WriteOrdering>> {
|
2018-01-30 02:03:23 +01:00
|
|
|
public:
|
|
|
|
TransactionTest()
|
|
|
|
: TransactionTestBase(std::get<0>(GetParam()), std::get<1>(GetParam()),
|
2019-05-14 02:43:47 +02:00
|
|
|
std::get<2>(GetParam()), std::get<3>(GetParam())){};
|
2018-01-30 02:03:23 +01:00
|
|
|
};
|
|
|
|
|
2018-07-13 22:55:13 +02:00
|
|
|
class TransactionStressTest : public TransactionTest {};
|
|
|
|
|
2019-02-20 01:52:50 +01:00
|
|
|
class MySQLStyleTransactionTest
|
|
|
|
: public TransactionTestBase,
|
|
|
|
virtual public ::testing::WithParamInterface<
|
2019-05-14 02:43:47 +02:00
|
|
|
std::tuple<bool, bool, TxnDBWritePolicy, WriteOrdering, bool>> {
|
2019-02-20 01:52:50 +01:00
|
|
|
public:
|
|
|
|
MySQLStyleTransactionTest()
|
|
|
|
: TransactionTestBase(std::get<0>(GetParam()), std::get<1>(GetParam()),
|
2019-05-14 02:43:47 +02:00
|
|
|
std::get<2>(GetParam()), std::get<3>(GetParam())),
|
|
|
|
with_slow_threads_(std::get<4>(GetParam())) {
|
2019-02-20 01:52:50 +01:00
|
|
|
if (with_slow_threads_ &&
|
|
|
|
(txn_db_options.write_policy == WRITE_PREPARED ||
|
|
|
|
txn_db_options.write_policy == WRITE_UNPREPARED)) {
|
|
|
|
// The corner case with slow threads involves the caches filling
|
|
|
|
// over which would not happen even with artifial delays. To help
|
|
|
|
// such cases to show up we lower the size of the cache-related data
|
|
|
|
// structures.
|
|
|
|
txn_db_options.wp_snapshot_cache_bits = 1;
|
|
|
|
txn_db_options.wp_commit_cache_bits = 10;
|
2019-05-15 22:48:59 +02:00
|
|
|
options.write_buffer_size = 1024;
|
2019-02-20 01:52:50 +01:00
|
|
|
EXPECT_OK(ReOpen());
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
protected:
|
|
|
|
// Also emulate slow threads by addin artiftial delays
|
|
|
|
const bool with_slow_threads_;
|
|
|
|
};
|
2017-08-31 18:27:14 +02:00
|
|
|
|
Support user-defined timestamps in write-committed txns (#9629)
Summary:
Pull Request resolved: https://github.com/facebook/rocksdb/pull/9629
Pessimistic transactions use pessimistic concurrency control, i.e. locking. Keys are
locked upon first operation that writes the key or has the intention of writing. For example,
`PessimisticTransaction::Put()`, `PessimisticTransaction::Delete()`,
`PessimisticTransaction::SingleDelete()` will write to or delete a key, while
`PessimisticTransaction::GetForUpdate()` is used by application to indicate
to RocksDB that the transaction has the intention of performing write operation later
in the same transaction.
Pessimistic transactions support two-phase commit (2PC). A transaction can be
`Prepared()`'ed and then `Commit()`. The prepare phase is similar to a promise: once
`Prepare()` succeeds, the transaction has acquired the necessary resources to commit.
The resources include locks, persistence of WAL, etc.
Write-committed transaction is the default pessimistic transaction implementation. In
RocksDB write-committed transaction, `Prepare()` will write data to the WAL as a prepare
section. `Commit()` will write a commit marker to the WAL and then write data to the
memtables. While writing to the memtables, different keys in the transaction's write batch
will be assigned different sequence numbers in ascending order.
Until commit/rollback, the transaction holds locks on the keys so that no other transaction
can write to the same keys. Furthermore, the keys' sequence numbers represent the order
in which they are committed and should be made visible. This is convenient for us to
implement support for user-defined timestamps.
Since column families with and without timestamps can co-exist in the same database,
a transaction may or may not involve timestamps. Based on this observation, we add two
optional members to each `PessimisticTransaction`, `read_timestamp_` and
`commit_timestamp_`. If no key in the transaction's write batch has timestamp, then
setting these two variables do not have any effect. For the rest of this commit, we discuss
only the cases when these two variables are meaningful.
read_timestamp_ is used mainly for validation, and should be set before first call to
`GetForUpdate()`. Otherwise, the latter will return non-ok status. `GetForUpdate()` calls
`TryLock()` that can verify if another transaction has written the same key since
`read_timestamp_` till this call to `GetForUpdate()`. If another transaction has indeed
written the same key, then validation fails, and RocksDB allows this transaction to
refine `read_timestamp_` by increasing it. Note that a transaction can still use `Get()`
with a different timestamp to read, but the result of the read should not be used to
determine data that will be written later.
commit_timestamp_ must be set after finishing writing and before transaction commit.
This applies to both 2PC and non-2PC cases. In the case of 2PC, it's usually set after
prepare phase succeeds.
We currently require that the commit timestamp be chosen after all keys are locked. This
means we disallow the `TransactionDB`-level APIs if user-defined timestamp is used
by the transaction. Specifically, calling `PessimisticTransactionDB::Put()`,
`PessimisticTransactionDB::Delete()`, `PessimisticTransactionDB::SingleDelete()`,
etc. will return non-ok status because they specify timestamps before locking the keys.
Users are also prompted to use the `Transaction` APIs when they receive the non-ok status.
Reviewed By: ltamasi
Differential Revision: D31822445
fbshipit-source-id: b82abf8e230216dc89cc519564a588224a88fd43
2022-03-09 01:20:59 +01:00
|
|
|
class WriteCommittedTxnWithTsTest
|
|
|
|
: public TransactionTestBase,
|
|
|
|
public ::testing::WithParamInterface<std::tuple<bool, bool, bool>> {
|
|
|
|
public:
|
|
|
|
WriteCommittedTxnWithTsTest()
|
|
|
|
: TransactionTestBase(std::get<0>(GetParam()), std::get<1>(GetParam()),
|
|
|
|
WRITE_COMMITTED, kOrderedWrite) {}
|
|
|
|
~WriteCommittedTxnWithTsTest() override {
|
|
|
|
for (auto* h : handles_) {
|
|
|
|
delete h;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Status GetFromDb(ReadOptions read_opts, ColumnFamilyHandle* column_family,
|
|
|
|
const Slice& key, TxnTimestamp ts, std::string* value) {
|
|
|
|
std::string ts_buf;
|
|
|
|
PutFixed64(&ts_buf, ts);
|
|
|
|
Slice ts_slc = ts_buf;
|
|
|
|
read_opts.timestamp = &ts_slc;
|
|
|
|
assert(db);
|
|
|
|
return db->Get(read_opts, column_family, key, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
Transaction* NewTxn(WriteOptions write_opts, TransactionOptions txn_opts) {
|
|
|
|
assert(db);
|
|
|
|
auto* txn = db->BeginTransaction(write_opts, txn_opts);
|
|
|
|
assert(txn);
|
|
|
|
const bool enable_indexing = std::get<2>(GetParam());
|
|
|
|
if (enable_indexing) {
|
|
|
|
txn->EnableIndexing();
|
|
|
|
} else {
|
|
|
|
txn->DisableIndexing();
|
|
|
|
}
|
|
|
|
return txn;
|
|
|
|
}
|
|
|
|
|
|
|
|
protected:
|
|
|
|
std::vector<ColumnFamilyHandle*> handles_{};
|
|
|
|
};
|
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|