e5a1b727c0
Summary: While GC, blob DB use optimistic transaction to delete or replace the index entry in LSM, to guarantee correctness if there's a normal write writing to the same key. However, the previous implementation doesn't call SetSnapshot() nor use GetForUpdate() of transaction API, instead it do its own sequence number checking before beginning the transaction. A normal write can sneak in after the sequence number check and overwrite the key, and the GC will delete or relocate the old version of the key by mistake. Update the code to property use GetForUpdate() to check the existing index entry. After the patch the sequence number store with each blob record is useless, So I'm considering remove the sequence number from blob record, in another patch. Closes https://github.com/facebook/rocksdb/pull/2703 Differential Revision: D5589178 Pulled By: yiwu-arbug fbshipit-source-id: 8dc960cd5f4e61b36024ba7c32d05584ce149c24
96 lines
2.7 KiB
C++
96 lines
2.7 KiB
C++
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
//
|
|
#pragma once
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
#include <cstdint>
|
|
#include <memory>
|
|
#include <string>
|
|
|
|
#include "rocksdb/options.h"
|
|
#include "rocksdb/slice.h"
|
|
#include "rocksdb/status.h"
|
|
#include "utilities/blob_db/blob_log_format.h"
|
|
|
|
namespace rocksdb {
|
|
|
|
class SequentialFileReader;
|
|
class Logger;
|
|
|
|
namespace blob_db {
|
|
|
|
/**
|
|
* Reader is a general purpose log stream reader implementation. The actual job
|
|
* of reading from the device is implemented by the SequentialFile interface.
|
|
*
|
|
* Please see Writer for details on the file and record layout.
|
|
*/
|
|
class Reader {
|
|
public:
|
|
enum ReadLevel {
|
|
kReadHdrFooter,
|
|
kReadHdrKeyFooter,
|
|
kReadHdrKeyBlobFooter,
|
|
};
|
|
|
|
// Create a reader that will return log records from "*file".
|
|
// "*file" must remain live while this Reader is in use.
|
|
//
|
|
// If "reporter" is non-nullptr, it is notified whenever some data is
|
|
// dropped due to a detected corruption. "*reporter" must remain
|
|
// live while this Reader is in use.
|
|
//
|
|
// If "checksum" is true, verify checksums if available.
|
|
//
|
|
// The Reader will start reading at the first record located at physical
|
|
// position >= initial_offset within the file.
|
|
Reader(std::shared_ptr<Logger> info_log,
|
|
std::unique_ptr<SequentialFileReader>&& file);
|
|
|
|
~Reader();
|
|
|
|
Status ReadHeader(BlobLogHeader* header);
|
|
|
|
// Read the next record into *record. Returns true if read
|
|
// successfully, false if we hit end of the input. May use
|
|
// "*scratch" as temporary storage. The contents filled in *record
|
|
// will only be valid until the next mutating operation on this
|
|
// reader or the next mutation to *scratch.
|
|
// If blob_offset is non-null, return offset of the blob through it.
|
|
Status ReadRecord(BlobLogRecord* record, ReadLevel level = kReadHdrFooter,
|
|
uint64_t* blob_offset = nullptr);
|
|
|
|
SequentialFileReader* file() { return file_.get(); }
|
|
|
|
void ResetNextByte() { next_byte_ = 0; }
|
|
|
|
uint64_t GetNextByte() const { return next_byte_; }
|
|
|
|
const SequentialFileReader* file_reader() const { return file_.get(); }
|
|
|
|
private:
|
|
char* GetReadBuffer() { return &(backing_store_[0]); }
|
|
|
|
private:
|
|
std::shared_ptr<Logger> info_log_;
|
|
const std::unique_ptr<SequentialFileReader> file_;
|
|
|
|
std::string backing_store_;
|
|
Slice buffer_;
|
|
|
|
// which byte to read next. For asserting proper usage
|
|
uint64_t next_byte_;
|
|
|
|
// No copying allowed
|
|
Reader(const Reader&) = delete;
|
|
Reader& operator=(const Reader&) = delete;
|
|
};
|
|
|
|
} // namespace blob_db
|
|
} // namespace rocksdb
|
|
#endif // ROCKSDB_LITE
|