2018-05-31 19:42:44 +02:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
|
|
|
|
#pragma once
|
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
|
2018-07-24 09:09:18 +02:00
|
|
|
#include <set>
|
|
|
|
|
2018-05-31 19:42:44 +02:00
|
|
|
#include "utilities/transactions/write_prepared_txn.h"
|
2018-06-27 21:05:29 +02:00
|
|
|
#include "utilities/transactions/write_unprepared_txn_db.h"
|
2018-05-31 19:42:44 +02:00
|
|
|
|
|
|
|
namespace rocksdb {
|
|
|
|
|
2018-06-27 21:05:29 +02:00
|
|
|
class WriteUnpreparedTxnDB;
|
|
|
|
class WriteUnpreparedTxn;
|
|
|
|
|
|
|
|
class WriteUnpreparedTxnReadCallback : public ReadCallback {
|
|
|
|
public:
|
|
|
|
WriteUnpreparedTxnReadCallback(WritePreparedTxnDB* db,
|
|
|
|
SequenceNumber snapshot,
|
|
|
|
SequenceNumber min_uncommitted,
|
|
|
|
WriteUnpreparedTxn* txn)
|
2019-04-02 23:43:03 +02:00
|
|
|
// Pass our last uncommitted seq as the snapshot to the parent class to
|
|
|
|
// ensure that the parent will not prematurely filter out own writes. We
|
|
|
|
// will do the exact comparison agaisnt snapshots in IsVisibleFullCheck
|
|
|
|
// override.
|
|
|
|
: ReadCallback(CalcMaxVisibleSeq(txn, snapshot), min_uncommitted),
|
2019-02-27 01:52:20 +01:00
|
|
|
db_(db),
|
|
|
|
txn_(txn),
|
|
|
|
wup_snapshot_(snapshot) {}
|
|
|
|
|
|
|
|
virtual bool IsVisibleFullCheck(SequenceNumber seq) override;
|
2018-06-27 21:05:29 +02:00
|
|
|
|
2019-04-02 23:43:03 +02:00
|
|
|
bool CanReseekToSkip() override {
|
|
|
|
return wup_snapshot_ == max_visible_seq_;
|
|
|
|
// Otherwise our own writes uncommitted are in db, and the assumptions
|
|
|
|
// behind reseek optimizations are no longer valid.
|
|
|
|
}
|
|
|
|
|
2019-04-12 23:36:36 +02:00
|
|
|
void Refresh(SequenceNumber seq) override {
|
|
|
|
max_visible_seq_ = std::max(max_visible_seq_, seq);
|
|
|
|
wup_snapshot_ = seq;
|
|
|
|
}
|
|
|
|
|
2018-06-27 21:05:29 +02:00
|
|
|
private:
|
2019-04-04 00:43:34 +02:00
|
|
|
static SequenceNumber CalcMaxVisibleSeq(WriteUnpreparedTxn* txn,
|
|
|
|
SequenceNumber snapshot_seq) {
|
2019-04-02 23:43:03 +02:00
|
|
|
SequenceNumber max_unprepared = CalcMaxUnpreparedSequenceNumber(txn);
|
|
|
|
return std::max(max_unprepared, snapshot_seq);
|
|
|
|
}
|
2019-04-04 00:43:34 +02:00
|
|
|
static SequenceNumber CalcMaxUnpreparedSequenceNumber(
|
|
|
|
WriteUnpreparedTxn* txn);
|
2018-06-27 21:05:29 +02:00
|
|
|
WritePreparedTxnDB* db_;
|
|
|
|
WriteUnpreparedTxn* txn_;
|
2019-02-27 01:52:20 +01:00
|
|
|
SequenceNumber wup_snapshot_;
|
2018-06-27 21:05:29 +02:00
|
|
|
};
|
|
|
|
|
2018-05-31 19:42:44 +02:00
|
|
|
class WriteUnpreparedTxn : public WritePreparedTxn {
|
2018-06-27 21:05:29 +02:00
|
|
|
public:
|
|
|
|
WriteUnpreparedTxn(WriteUnpreparedTxnDB* db,
|
|
|
|
const WriteOptions& write_options,
|
|
|
|
const TransactionOptions& txn_options);
|
|
|
|
|
2018-07-24 09:09:18 +02:00
|
|
|
virtual ~WriteUnpreparedTxn();
|
|
|
|
|
|
|
|
using TransactionBaseImpl::Put;
|
|
|
|
virtual Status Put(ColumnFamilyHandle* column_family, const Slice& key,
|
2018-12-07 02:46:57 +01:00
|
|
|
const Slice& value,
|
|
|
|
const bool assume_tracked = false) override;
|
2018-07-24 09:09:18 +02:00
|
|
|
virtual Status Put(ColumnFamilyHandle* column_family, const SliceParts& key,
|
2018-12-07 02:46:57 +01:00
|
|
|
const SliceParts& value,
|
|
|
|
const bool assume_tracked = false) override;
|
2018-07-24 09:09:18 +02:00
|
|
|
|
|
|
|
using TransactionBaseImpl::Merge;
|
|
|
|
virtual Status Merge(ColumnFamilyHandle* column_family, const Slice& key,
|
2018-12-07 02:46:57 +01:00
|
|
|
const Slice& value,
|
|
|
|
const bool assume_tracked = false) override;
|
2018-07-24 09:09:18 +02:00
|
|
|
|
|
|
|
using TransactionBaseImpl::Delete;
|
2018-12-07 02:46:57 +01:00
|
|
|
virtual Status Delete(ColumnFamilyHandle* column_family, const Slice& key,
|
|
|
|
const bool assume_tracked = false) override;
|
2018-07-24 09:09:18 +02:00
|
|
|
virtual Status Delete(ColumnFamilyHandle* column_family,
|
2018-12-07 02:46:57 +01:00
|
|
|
const SliceParts& key,
|
|
|
|
const bool assume_tracked = false) override;
|
2018-07-24 09:09:18 +02:00
|
|
|
|
|
|
|
using TransactionBaseImpl::SingleDelete;
|
|
|
|
virtual Status SingleDelete(ColumnFamilyHandle* column_family,
|
2018-12-07 02:46:57 +01:00
|
|
|
const Slice& key,
|
|
|
|
const bool assume_tracked = false) override;
|
2018-07-24 09:09:18 +02:00
|
|
|
virtual Status SingleDelete(ColumnFamilyHandle* column_family,
|
2018-12-07 02:46:57 +01:00
|
|
|
const SliceParts& key,
|
|
|
|
const bool assume_tracked = false) override;
|
2018-07-24 09:09:18 +02:00
|
|
|
|
|
|
|
virtual Status RebuildFromWriteBatch(WriteBatch*) override {
|
|
|
|
// This function was only useful for recovering prepared transactions, but
|
|
|
|
// is unused for write prepared because a transaction may consist of
|
|
|
|
// multiple write batches.
|
|
|
|
//
|
|
|
|
// If there are use cases outside of recovery that can make use of this,
|
|
|
|
// then support could be added.
|
|
|
|
return Status::NotSupported("Not supported for WriteUnprepared");
|
|
|
|
}
|
|
|
|
|
|
|
|
const std::map<SequenceNumber, size_t>& GetUnpreparedSequenceNumbers();
|
|
|
|
|
|
|
|
void UpdateWriteKeySet(uint32_t cfid, const Slice& key);
|
|
|
|
|
|
|
|
protected:
|
|
|
|
void Initialize(const TransactionOptions& txn_options) override;
|
|
|
|
|
|
|
|
Status PrepareInternal() override;
|
|
|
|
|
|
|
|
Status CommitWithoutPrepareInternal() override;
|
|
|
|
Status CommitInternal() override;
|
|
|
|
|
|
|
|
Status RollbackInternal() override;
|
2018-06-27 21:05:29 +02:00
|
|
|
|
|
|
|
// Get and GetIterator needs to be overridden so that a ReadCallback to
|
|
|
|
// handle read-your-own-write is used.
|
|
|
|
using Transaction::Get;
|
|
|
|
virtual Status Get(const ReadOptions& options,
|
|
|
|
ColumnFamilyHandle* column_family, const Slice& key,
|
|
|
|
PinnableSlice* value) override;
|
|
|
|
|
|
|
|
using Transaction::GetIterator;
|
|
|
|
virtual Iterator* GetIterator(const ReadOptions& options) override;
|
|
|
|
virtual Iterator* GetIterator(const ReadOptions& options,
|
|
|
|
ColumnFamilyHandle* column_family) override;
|
|
|
|
|
|
|
|
private:
|
|
|
|
friend class WriteUnpreparedTransactionTest_ReadYourOwnWrite_Test;
|
2018-07-24 09:09:18 +02:00
|
|
|
friend class WriteUnpreparedTransactionTest_RecoveryTest_Test;
|
|
|
|
friend class WriteUnpreparedTransactionTest_UnpreparedBatch_Test;
|
2018-07-07 02:17:36 +02:00
|
|
|
friend class WriteUnpreparedTxnDB;
|
2018-06-27 21:05:29 +02:00
|
|
|
|
2018-07-24 09:09:18 +02:00
|
|
|
Status MaybeFlushWriteBatchToDB();
|
|
|
|
Status FlushWriteBatchToDB(bool prepared);
|
|
|
|
|
|
|
|
// For write unprepared, we check on every writebatch append to see if
|
|
|
|
// max_write_batch_size_ has been exceeded, and then call
|
|
|
|
// FlushWriteBatchToDB if so. This logic is encapsulated in
|
|
|
|
// MaybeFlushWriteBatchToDB.
|
|
|
|
size_t max_write_batch_size_;
|
2018-06-27 21:05:29 +02:00
|
|
|
WriteUnpreparedTxnDB* wupt_db_;
|
2018-05-31 19:42:44 +02:00
|
|
|
|
2018-06-27 21:05:29 +02:00
|
|
|
// Ordered list of unprep_seq sequence numbers that we have already written
|
|
|
|
// to DB.
|
|
|
|
//
|
2018-07-24 09:09:18 +02:00
|
|
|
// This maps unprep_seq => prepare_batch_cnt for each unprepared batch
|
|
|
|
// written by this transaction.
|
|
|
|
//
|
|
|
|
// Note that this contains both prepared and unprepared batches, since they
|
|
|
|
// are treated similarily in prepare heap/commit map, so it simplifies the
|
|
|
|
// commit callbacks.
|
2018-06-27 21:05:29 +02:00
|
|
|
std::map<SequenceNumber, size_t> unprep_seqs_;
|
2018-07-24 09:09:18 +02:00
|
|
|
|
|
|
|
// Set of keys that have written to that have already been written to DB
|
|
|
|
// (ie. not in write_batch_).
|
|
|
|
//
|
|
|
|
std::map<uint32_t, std::vector<std::string>> write_set_keys_;
|
2018-05-31 19:42:44 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace rocksdb
|
|
|
|
|
|
|
|
#endif // ROCKSDB_LITE
|