Run automatic formatter against public header files (#5115)
Summary: Automatically format public headers so it looks more consistent. Pull Request resolved: https://github.com/facebook/rocksdb/pull/5115 Differential Revision: D14632854 Pulled By: siying fbshipit-source-id: ce9929ea62f9dcd65c69660b23eed1931cb0ae84
This commit is contained in:
parent
5f6adf3f6a
commit
1f7f5a5a79
@ -189,7 +189,7 @@ class CompactionFilter {
|
||||
// application to know about different compactions
|
||||
class CompactionFilterFactory {
|
||||
public:
|
||||
virtual ~CompactionFilterFactory() { }
|
||||
virtual ~CompactionFilterFactory() {}
|
||||
|
||||
virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter(
|
||||
const CompactionFilter::Context& context) = 0;
|
||||
|
@ -55,9 +55,8 @@ class Comparator {
|
||||
// If *start < limit, changes *start to a short string in [start,limit).
|
||||
// Simple comparator implementations may return with *start unchanged,
|
||||
// i.e., an implementation of this method that does nothing is correct.
|
||||
virtual void FindShortestSeparator(
|
||||
std::string* start,
|
||||
const Slice& limit) const = 0;
|
||||
virtual void FindShortestSeparator(std::string* start,
|
||||
const Slice& limit) const = 0;
|
||||
|
||||
// Changes *key to a short string >= *key.
|
||||
// Simple comparator implementations may return with *key unchanged,
|
||||
|
@ -16,7 +16,6 @@ namespace rocksdb {
|
||||
|
||||
class ConcurrentTaskLimiter {
|
||||
public:
|
||||
|
||||
virtual ~ConcurrentTaskLimiter() {}
|
||||
|
||||
// Returns a name that identifies this concurrent task limiter.
|
||||
@ -41,7 +40,7 @@ class ConcurrentTaskLimiter {
|
||||
// @param limit: max concurrent tasks.
|
||||
// limit = 0 means no new task allowed.
|
||||
// limit < 0 means no limitation.
|
||||
extern ConcurrentTaskLimiter* NewConcurrentTaskLimiter(
|
||||
const std::string& name, int32_t limit);
|
||||
extern ConcurrentTaskLimiter* NewConcurrentTaskLimiter(const std::string& name,
|
||||
int32_t limit);
|
||||
|
||||
} // namespace rocksdb
|
||||
|
@ -277,15 +277,13 @@ Status GetPlainTableOptionsFromMap(
|
||||
// BlockBasedTableOptions as part of the string for block-based table factory:
|
||||
// "write_buffer_size=1024;block_based_table_factory={block_size=4k};"
|
||||
// "max_write_buffer_num=2"
|
||||
Status GetColumnFamilyOptionsFromString(
|
||||
const ColumnFamilyOptions& base_options,
|
||||
const std::string& opts_str,
|
||||
ColumnFamilyOptions* new_options);
|
||||
Status GetColumnFamilyOptionsFromString(const ColumnFamilyOptions& base_options,
|
||||
const std::string& opts_str,
|
||||
ColumnFamilyOptions* new_options);
|
||||
|
||||
Status GetDBOptionsFromString(
|
||||
const DBOptions& base_options,
|
||||
const std::string& opts_str,
|
||||
DBOptions* new_options);
|
||||
Status GetDBOptionsFromString(const DBOptions& base_options,
|
||||
const std::string& opts_str,
|
||||
DBOptions* new_options);
|
||||
|
||||
Status GetStringFromDBOptions(std::string* opts_str,
|
||||
const DBOptions& db_options,
|
||||
@ -301,14 +299,12 @@ Status GetStringFromCompressionType(std::string* compression_str,
|
||||
std::vector<CompressionType> GetSupportedCompressions();
|
||||
|
||||
Status GetBlockBasedTableOptionsFromString(
|
||||
const BlockBasedTableOptions& table_options,
|
||||
const std::string& opts_str,
|
||||
const BlockBasedTableOptions& table_options, const std::string& opts_str,
|
||||
BlockBasedTableOptions* new_table_options);
|
||||
|
||||
Status GetPlainTableOptionsFromString(
|
||||
const PlainTableOptions& table_options,
|
||||
const std::string& opts_str,
|
||||
PlainTableOptions* new_table_options);
|
||||
Status GetPlainTableOptionsFromString(const PlainTableOptions& table_options,
|
||||
const std::string& opts_str,
|
||||
PlainTableOptions* new_table_options);
|
||||
|
||||
Status GetMemTableRepFactoryFromString(
|
||||
const std::string& opts_str,
|
||||
|
@ -97,16 +97,16 @@ struct Range {
|
||||
Slice start;
|
||||
Slice limit;
|
||||
|
||||
Range() { }
|
||||
Range(const Slice& s, const Slice& l) : start(s), limit(l) { }
|
||||
Range() {}
|
||||
Range(const Slice& s, const Slice& l) : start(s), limit(l) {}
|
||||
};
|
||||
|
||||
struct RangePtr {
|
||||
const Slice* start;
|
||||
const Slice* limit;
|
||||
|
||||
RangePtr() : start(nullptr), limit(nullptr) { }
|
||||
RangePtr(const Slice* s, const Slice* l) : start(s), limit(l) { }
|
||||
RangePtr() : start(nullptr), limit(nullptr) {}
|
||||
RangePtr(const Slice* s, const Slice* l) : start(s), limit(l) {}
|
||||
};
|
||||
|
||||
struct IngestExternalFileArg {
|
||||
@ -131,8 +131,7 @@ class DB {
|
||||
// OK on success.
|
||||
// Stores nullptr in *dbptr and returns a non-OK status on error.
|
||||
// Caller should delete *dbptr when it is no longer needed.
|
||||
static Status Open(const Options& options,
|
||||
const std::string& name,
|
||||
static Status Open(const Options& options, const std::string& name,
|
||||
DB** dbptr);
|
||||
|
||||
// Open the database for read only. All DB interfaces
|
||||
@ -142,9 +141,9 @@ class DB {
|
||||
//
|
||||
// Not supported in ROCKSDB_LITE, in which case the function will
|
||||
// return Status::NotSupported.
|
||||
static Status OpenForReadOnly(const Options& options,
|
||||
const std::string& name, DB** dbptr,
|
||||
bool error_if_log_file_exist = false);
|
||||
static Status OpenForReadOnly(const Options& options, const std::string& name,
|
||||
DB** dbptr,
|
||||
bool error_if_log_file_exist = false);
|
||||
|
||||
// Open the database for read only with column families. When opening DB with
|
||||
// read only, you can specify only a subset of column families in the
|
||||
@ -246,7 +245,7 @@ class DB {
|
||||
const std::string& name,
|
||||
std::vector<std::string>* column_families);
|
||||
|
||||
DB() { }
|
||||
DB() {}
|
||||
virtual ~DB();
|
||||
|
||||
// Create a column_family and return the handle of column family
|
||||
@ -394,7 +393,8 @@ class DB {
|
||||
virtual Status Get(const ReadOptions& options,
|
||||
ColumnFamilyHandle* column_family, const Slice& key,
|
||||
PinnableSlice* value) = 0;
|
||||
virtual Status Get(const ReadOptions& options, const Slice& key, std::string* value) {
|
||||
virtual Status Get(const ReadOptions& options, const Slice& key,
|
||||
std::string* value) {
|
||||
return Get(options, DefaultColumnFamily(), key, value);
|
||||
}
|
||||
|
||||
@ -415,9 +415,10 @@ class DB {
|
||||
virtual std::vector<Status> MultiGet(const ReadOptions& options,
|
||||
const std::vector<Slice>& keys,
|
||||
std::vector<std::string>* values) {
|
||||
return MultiGet(options, std::vector<ColumnFamilyHandle*>(
|
||||
keys.size(), DefaultColumnFamily()),
|
||||
keys, values);
|
||||
return MultiGet(
|
||||
options,
|
||||
std::vector<ColumnFamilyHandle*>(keys.size(), DefaultColumnFamily()),
|
||||
keys, values);
|
||||
}
|
||||
|
||||
// If the key definitely does not exist in the database, then this method
|
||||
@ -779,13 +780,10 @@ class DB {
|
||||
// include_flags should be of type DB::SizeApproximationFlags
|
||||
virtual void GetApproximateSizes(ColumnFamilyHandle* column_family,
|
||||
const Range* range, int n, uint64_t* sizes,
|
||||
uint8_t include_flags
|
||||
= INCLUDE_FILES) = 0;
|
||||
uint8_t include_flags = INCLUDE_FILES) = 0;
|
||||
virtual void GetApproximateSizes(const Range* range, int n, uint64_t* sizes,
|
||||
uint8_t include_flags
|
||||
= INCLUDE_FILES) {
|
||||
GetApproximateSizes(DefaultColumnFamily(), range, n, sizes,
|
||||
include_flags);
|
||||
uint8_t include_flags = INCLUDE_FILES) {
|
||||
GetApproximateSizes(DefaultColumnFamily(), range, n, sizes, include_flags);
|
||||
}
|
||||
|
||||
// The method is similar to GetApproximateSizes, except it
|
||||
@ -802,8 +800,7 @@ class DB {
|
||||
|
||||
// Deprecated versions of GetApproximateSizes
|
||||
ROCKSDB_DEPRECATED_FUNC virtual void GetApproximateSizes(
|
||||
const Range* range, int n, uint64_t* sizes,
|
||||
bool include_memtable) {
|
||||
const Range* range, int n, uint64_t* sizes, bool include_memtable) {
|
||||
uint8_t include_flags = SizeApproximationFlags::INCLUDE_FILES;
|
||||
if (include_memtable) {
|
||||
include_flags |= SizeApproximationFlags::INCLUDE_MEMTABLES;
|
||||
@ -811,9 +808,8 @@ class DB {
|
||||
GetApproximateSizes(DefaultColumnFamily(), range, n, sizes, include_flags);
|
||||
}
|
||||
ROCKSDB_DEPRECATED_FUNC virtual void GetApproximateSizes(
|
||||
ColumnFamilyHandle* column_family,
|
||||
const Range* range, int n, uint64_t* sizes,
|
||||
bool include_memtable) {
|
||||
ColumnFamilyHandle* column_family, const Range* range, int n,
|
||||
uint64_t* sizes, bool include_memtable) {
|
||||
uint8_t include_flags = SizeApproximationFlags::INCLUDE_FILES;
|
||||
if (include_memtable) {
|
||||
include_flags |= SizeApproximationFlags::INCLUDE_MEMTABLES;
|
||||
@ -1073,8 +1069,7 @@ class DB {
|
||||
ColumnFamilyMetaData* /*metadata*/) {}
|
||||
|
||||
// Get the metadata of the default column family.
|
||||
void GetColumnFamilyMetaData(
|
||||
ColumnFamilyMetaData* metadata) {
|
||||
void GetColumnFamilyMetaData(ColumnFamilyMetaData* metadata) {
|
||||
GetColumnFamilyMetaData(DefaultColumnFamily(), metadata);
|
||||
}
|
||||
|
||||
@ -1275,8 +1270,8 @@ class DB {
|
||||
|
||||
// Given a time window, return an iterator for accessing stats history
|
||||
// User is responsible for deleting StatsHistoryIterator after use
|
||||
virtual Status GetStatsHistory(uint64_t /*start_time*/,
|
||||
uint64_t /*end_time*/,
|
||||
virtual Status GetStatsHistory(
|
||||
uint64_t /*start_time*/, uint64_t /*end_time*/,
|
||||
std::unique_ptr<StatsHistoryIterator>* /*stats_iterator*/) {
|
||||
return Status::NotSupported("GetStatsHistory() is not implemented.");
|
||||
}
|
||||
@ -1308,7 +1303,7 @@ class DB {
|
||||
// Be very careful using this method.
|
||||
Status DestroyDB(const std::string& name, const Options& options,
|
||||
const std::vector<ColumnFamilyDescriptor>& column_families =
|
||||
std::vector<ColumnFamilyDescriptor>());
|
||||
std::vector<ColumnFamilyDescriptor>());
|
||||
|
||||
#ifndef ROCKSDB_LITE
|
||||
// If a DB cannot be opened, you may attempt to call this method to
|
||||
|
@ -54,17 +54,16 @@ const size_t kDefaultPageSize = 4 * 1024;
|
||||
|
||||
// Options while opening a file to read/write
|
||||
struct EnvOptions {
|
||||
|
||||
// Construct with default Options
|
||||
EnvOptions();
|
||||
|
||||
// Construct from Options
|
||||
explicit EnvOptions(const DBOptions& options);
|
||||
|
||||
// If true, then use mmap to read data
|
||||
// If true, then use mmap to read data
|
||||
bool use_mmap_reads = false;
|
||||
|
||||
// If true, then use mmap to write data
|
||||
// If true, then use mmap to write data
|
||||
bool use_mmap_writes = true;
|
||||
|
||||
// If true, then use O_DIRECT for reading data
|
||||
@ -150,12 +149,12 @@ class Env {
|
||||
// These values match Linux definition
|
||||
// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/include/uapi/linux/fcntl.h#n56
|
||||
enum WriteLifeTimeHint {
|
||||
WLTH_NOT_SET = 0, // No hint information set
|
||||
WLTH_NONE, // No hints about write life time
|
||||
WLTH_SHORT, // Data written has a short life time
|
||||
WLTH_MEDIUM, // Data written has a medium life time
|
||||
WLTH_LONG, // Data written has a long life time
|
||||
WLTH_EXTREME, // Data written has an extremely long life time
|
||||
WLTH_NOT_SET = 0, // No hint information set
|
||||
WLTH_NONE, // No hints about write life time
|
||||
WLTH_SHORT, // Data written has a short life time
|
||||
WLTH_MEDIUM, // Data written has a medium life time
|
||||
WLTH_LONG, // Data written has a long life time
|
||||
WLTH_EXTREME, // Data written has an extremely long life time
|
||||
};
|
||||
|
||||
// Create an object that writes to a new file with the specified
|
||||
@ -321,11 +320,7 @@ class Env {
|
||||
static std::string PriorityToString(Priority priority);
|
||||
|
||||
// Priority for requesting bytes in rate limiter scheduler
|
||||
enum IOPriority {
|
||||
IO_LOW = 0,
|
||||
IO_HIGH = 1,
|
||||
IO_TOTAL = 2
|
||||
};
|
||||
enum IOPriority { IO_LOW = 0, IO_HIGH = 1, IO_TOTAL = 2 };
|
||||
|
||||
// Arrange to run "(*function)(arg)" once in a background thread, in
|
||||
// the thread pool specified by pri. By default, jobs go to the 'LOW'
|
||||
@ -377,9 +372,7 @@ class Env {
|
||||
// Default implementation simply relies on NowMicros.
|
||||
// In platform-specific implementations, NowNanos() should return time points
|
||||
// that are MONOTONIC.
|
||||
virtual uint64_t NowNanos() {
|
||||
return NowMicros() * 1000;
|
||||
}
|
||||
virtual uint64_t NowNanos() { return NowMicros() * 1000; }
|
||||
|
||||
// 0 indicates not supported.
|
||||
virtual uint64_t NowCPUNanos() { return 0; }
|
||||
@ -396,7 +389,7 @@ class Env {
|
||||
|
||||
// Get full directory name for this db.
|
||||
virtual Status GetAbsolutePath(const std::string& db_path,
|
||||
std::string* output_path) = 0;
|
||||
std::string* output_path) = 0;
|
||||
|
||||
// The number of background worker threads of a specific thread pool
|
||||
// for this environment. 'LOW' is the default pool.
|
||||
@ -503,7 +496,7 @@ ThreadStatusUpdater* CreateThreadStatusUpdater();
|
||||
// A file abstraction for reading sequentially through a file
|
||||
class SequentialFile {
|
||||
public:
|
||||
SequentialFile() { }
|
||||
SequentialFile() {}
|
||||
virtual ~SequentialFile();
|
||||
|
||||
// Read up to "n" bytes from the file. "scratch[0..n-1]" may be
|
||||
@ -551,8 +544,7 @@ class SequentialFile {
|
||||
// A file abstraction for randomly reading the contents of a file.
|
||||
class RandomAccessFile {
|
||||
public:
|
||||
|
||||
RandomAccessFile() { }
|
||||
RandomAccessFile() {}
|
||||
virtual ~RandomAccessFile();
|
||||
|
||||
// Read up to "n" bytes from the file starting at "offset".
|
||||
@ -589,8 +581,8 @@ class RandomAccessFile {
|
||||
//
|
||||
// Note: these IDs are only valid for the duration of the process.
|
||||
virtual size_t GetUniqueId(char* /*id*/, size_t /*max_size*/) const {
|
||||
return 0; // Default implementation to prevent issues with backwards
|
||||
// compatibility.
|
||||
return 0; // Default implementation to prevent issues with backwards
|
||||
// compatibility.
|
||||
};
|
||||
|
||||
enum AccessPattern { NORMAL, RANDOM, SEQUENTIAL, WILLNEED, DONTNEED };
|
||||
@ -619,11 +611,10 @@ class RandomAccessFile {
|
||||
class WritableFile {
|
||||
public:
|
||||
WritableFile()
|
||||
: last_preallocated_block_(0),
|
||||
preallocation_block_size_(0),
|
||||
io_priority_(Env::IO_TOTAL),
|
||||
write_hint_(Env::WLTH_NOT_SET) {
|
||||
}
|
||||
: last_preallocated_block_(0),
|
||||
preallocation_block_size_(0),
|
||||
io_priority_(Env::IO_TOTAL),
|
||||
write_hint_(Env::WLTH_NOT_SET) {}
|
||||
virtual ~WritableFile();
|
||||
|
||||
// Append data to the end of the file
|
||||
@ -651,7 +642,8 @@ class WritableFile {
|
||||
//
|
||||
// PositionedAppend() requires aligned buffer to be passed in. The alignment
|
||||
// required is queried via GetRequiredBufferAlignment()
|
||||
virtual Status PositionedAppend(const Slice& /* data */, uint64_t /* offset */) {
|
||||
virtual Status PositionedAppend(const Slice& /* data */,
|
||||
uint64_t /* offset */) {
|
||||
return Status::NotSupported();
|
||||
}
|
||||
|
||||
@ -662,7 +654,7 @@ class WritableFile {
|
||||
virtual Status Truncate(uint64_t /*size*/) { return Status::OK(); }
|
||||
virtual Status Close() = 0;
|
||||
virtual Status Flush() = 0;
|
||||
virtual Status Sync() = 0; // sync data
|
||||
virtual Status Sync() = 0; // sync data
|
||||
|
||||
/*
|
||||
* Sync data and/or metadata as well.
|
||||
@ -670,15 +662,11 @@ class WritableFile {
|
||||
* Override this method for environments where we need to sync
|
||||
* metadata as well.
|
||||
*/
|
||||
virtual Status Fsync() {
|
||||
return Sync();
|
||||
}
|
||||
virtual Status Fsync() { return Sync(); }
|
||||
|
||||
// true if Sync() and Fsync() are safe to call concurrently with Append()
|
||||
// and Flush().
|
||||
virtual bool IsSyncThreadSafe() const {
|
||||
return false;
|
||||
}
|
||||
virtual bool IsSyncThreadSafe() const { return false; }
|
||||
|
||||
// Indicates the upper layers if the current WritableFile implementation
|
||||
// uses direct IO.
|
||||
@ -691,9 +679,7 @@ class WritableFile {
|
||||
* Change the priority in rate limiter if rate limiting is enabled.
|
||||
* If rate limiting is not enabled, this call has no effect.
|
||||
*/
|
||||
virtual void SetIOPriority(Env::IOPriority pri) {
|
||||
io_priority_ = pri;
|
||||
}
|
||||
virtual void SetIOPriority(Env::IOPriority pri) { io_priority_ = pri; }
|
||||
|
||||
virtual Env::IOPriority GetIOPriority() { return io_priority_; }
|
||||
|
||||
@ -705,9 +691,7 @@ class WritableFile {
|
||||
/*
|
||||
* Get the size of valid data in the file.
|
||||
*/
|
||||
virtual uint64_t GetFileSize() {
|
||||
return 0;
|
||||
}
|
||||
virtual uint64_t GetFileSize() { return 0; }
|
||||
|
||||
/*
|
||||
* Get and set the default pre-allocation block size for writes to
|
||||
@ -727,7 +711,7 @@ class WritableFile {
|
||||
|
||||
// For documentation, refer to RandomAccessFile::GetUniqueId()
|
||||
virtual size_t GetUniqueId(char* /*id*/, size_t /*max_size*/) const {
|
||||
return 0; // Default implementation to prevent issues with backwards
|
||||
return 0; // Default implementation to prevent issues with backwards
|
||||
}
|
||||
|
||||
// Remove any kind of caching of data from the offset to offset+length
|
||||
@ -762,10 +746,10 @@ class WritableFile {
|
||||
// cover this write would be and Allocate to that point.
|
||||
const auto block_size = preallocation_block_size_;
|
||||
size_t new_last_preallocated_block =
|
||||
(offset + len + block_size - 1) / block_size;
|
||||
(offset + len + block_size - 1) / block_size;
|
||||
if (new_last_preallocated_block > last_preallocated_block_) {
|
||||
size_t num_spanned_blocks =
|
||||
new_last_preallocated_block - last_preallocated_block_;
|
||||
new_last_preallocated_block - last_preallocated_block_;
|
||||
Allocate(block_size * last_preallocated_block_,
|
||||
block_size * num_spanned_blocks);
|
||||
last_preallocated_block_ = new_last_preallocated_block;
|
||||
@ -835,7 +819,7 @@ class RandomRWFile {
|
||||
// MemoryMappedFileBuffer object represents a memory-mapped file's raw buffer.
|
||||
// Subclasses should release the mapping upon destruction.
|
||||
class MemoryMappedFileBuffer {
|
||||
public:
|
||||
public:
|
||||
MemoryMappedFileBuffer(void* _base, size_t _length)
|
||||
: base_(_base), length_(_length) {}
|
||||
|
||||
@ -846,11 +830,11 @@ public:
|
||||
MemoryMappedFileBuffer(const MemoryMappedFileBuffer&) = delete;
|
||||
MemoryMappedFileBuffer& operator=(const MemoryMappedFileBuffer&) = delete;
|
||||
|
||||
void* GetBase() const { return base_; }
|
||||
size_t GetLen() const { return length_; }
|
||||
void* GetBase() const { return base_; }
|
||||
size_t GetLen() const { return length_; }
|
||||
|
||||
protected:
|
||||
void* base_;
|
||||
protected:
|
||||
void* base_;
|
||||
const size_t length_;
|
||||
};
|
||||
|
||||
@ -907,7 +891,8 @@ class Logger {
|
||||
// and format. Any log with level under the internal log level
|
||||
// of *this (see @SetInfoLogLevel and @GetInfoLogLevel) will not be
|
||||
// printed.
|
||||
virtual void Logv(const InfoLogLevel log_level, const char* format, va_list ap);
|
||||
virtual void Logv(const InfoLogLevel log_level, const char* format,
|
||||
va_list ap);
|
||||
|
||||
virtual size_t GetLogFileSize() const { return kDoNotSupportGetLogFileSize; }
|
||||
// Flush to the OS buffers
|
||||
@ -928,12 +913,12 @@ class Logger {
|
||||
InfoLogLevel log_level_;
|
||||
};
|
||||
|
||||
|
||||
// Identifies a locked file.
|
||||
class FileLock {
|
||||
public:
|
||||
FileLock() { }
|
||||
FileLock() {}
|
||||
virtual ~FileLock();
|
||||
|
||||
private:
|
||||
// No copying allowed
|
||||
FileLock(const FileLock&);
|
||||
@ -964,21 +949,21 @@ extern void Fatal(const std::shared_ptr<Logger>& info_log, const char* format,
|
||||
// The default info log level is InfoLogLevel::INFO_LEVEL.
|
||||
extern void Log(const std::shared_ptr<Logger>& info_log, const char* format,
|
||||
...)
|
||||
# if defined(__GNUC__) || defined(__clang__)
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
__attribute__((__format__(__printf__, 2, 3)))
|
||||
# endif
|
||||
#endif
|
||||
;
|
||||
|
||||
extern void LogFlush(Logger *info_log);
|
||||
extern void LogFlush(Logger* info_log);
|
||||
|
||||
extern void Log(const InfoLogLevel log_level, Logger* info_log,
|
||||
const char* format, ...);
|
||||
|
||||
// The default info log level is InfoLogLevel::INFO_LEVEL.
|
||||
extern void Log(Logger* info_log, const char* format, ...)
|
||||
# if defined(__GNUC__) || defined(__clang__)
|
||||
__attribute__((__format__ (__printf__, 2, 3)))
|
||||
# endif
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
__attribute__((__format__(__printf__, 2, 3)))
|
||||
#endif
|
||||
;
|
||||
|
||||
// a set of log functions with different log levels.
|
||||
@ -1004,7 +989,7 @@ extern Status ReadFileToString(Env* env, const std::string& fname,
|
||||
class EnvWrapper : public Env {
|
||||
public:
|
||||
// Initialize an EnvWrapper that delegates all calls to *t
|
||||
explicit EnvWrapper(Env* t) : target_(t) { }
|
||||
explicit EnvWrapper(Env* t) : target_(t) {}
|
||||
~EnvWrapper() override;
|
||||
|
||||
// Return the target to which this Env forwards all calls
|
||||
@ -1174,9 +1159,7 @@ class EnvWrapper : public Env {
|
||||
return target_->GetThreadStatusUpdater();
|
||||
}
|
||||
|
||||
uint64_t GetThreadID() const override {
|
||||
return target_->GetThreadID();
|
||||
}
|
||||
uint64_t GetThreadID() const override { return target_->GetThreadID(); }
|
||||
|
||||
std::string GenerateUniqueId() override {
|
||||
return target_->GenerateUniqueId();
|
||||
@ -1219,7 +1202,7 @@ class EnvWrapper : public Env {
|
||||
// protected virtual methods.
|
||||
class WritableFileWrapper : public WritableFile {
|
||||
public:
|
||||
explicit WritableFileWrapper(WritableFile* t) : target_(t) { }
|
||||
explicit WritableFileWrapper(WritableFile* t) : target_(t) {}
|
||||
|
||||
Status Append(const Slice& data) override { return target_->Append(data); }
|
||||
Status PositionedAppend(const Slice& data, uint64_t offset) override {
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#if !defined(ROCKSDB_LITE)
|
||||
#if !defined(ROCKSDB_LITE)
|
||||
|
||||
#include <string>
|
||||
|
||||
@ -15,184 +15,190 @@ namespace rocksdb {
|
||||
|
||||
class EncryptionProvider;
|
||||
|
||||
// Returns an Env that encrypts data when stored on disk and decrypts data when
|
||||
// Returns an Env that encrypts data when stored on disk and decrypts data when
|
||||
// read from disk.
|
||||
Env* NewEncryptedEnv(Env* base_env, EncryptionProvider* provider);
|
||||
|
||||
// BlockAccessCipherStream is the base class for any cipher stream that
|
||||
// supports random access at block level (without requiring data from other blocks).
|
||||
// E.g. CTR (Counter operation mode) supports this requirement.
|
||||
// BlockAccessCipherStream is the base class for any cipher stream that
|
||||
// supports random access at block level (without requiring data from other
|
||||
// blocks). E.g. CTR (Counter operation mode) supports this requirement.
|
||||
class BlockAccessCipherStream {
|
||||
public:
|
||||
virtual ~BlockAccessCipherStream() {};
|
||||
public:
|
||||
virtual ~BlockAccessCipherStream(){};
|
||||
|
||||
// BlockSize returns the size of each block supported by this cipher stream.
|
||||
virtual size_t BlockSize() = 0;
|
||||
// BlockSize returns the size of each block supported by this cipher stream.
|
||||
virtual size_t BlockSize() = 0;
|
||||
|
||||
// Encrypt one or more (partial) blocks of data at the file offset.
|
||||
// Length of data is given in dataSize.
|
||||
virtual Status Encrypt(uint64_t fileOffset, char *data, size_t dataSize);
|
||||
// Encrypt one or more (partial) blocks of data at the file offset.
|
||||
// Length of data is given in dataSize.
|
||||
virtual Status Encrypt(uint64_t fileOffset, char* data, size_t dataSize);
|
||||
|
||||
// Decrypt one or more (partial) blocks of data at the file offset.
|
||||
// Length of data is given in dataSize.
|
||||
virtual Status Decrypt(uint64_t fileOffset, char *data, size_t dataSize);
|
||||
// Decrypt one or more (partial) blocks of data at the file offset.
|
||||
// Length of data is given in dataSize.
|
||||
virtual Status Decrypt(uint64_t fileOffset, char* data, size_t dataSize);
|
||||
|
||||
protected:
|
||||
// Allocate scratch space which is passed to EncryptBlock/DecryptBlock.
|
||||
virtual void AllocateScratch(std::string&) = 0;
|
||||
protected:
|
||||
// Allocate scratch space which is passed to EncryptBlock/DecryptBlock.
|
||||
virtual void AllocateScratch(std::string&) = 0;
|
||||
|
||||
// Encrypt a block of data at the given block index.
|
||||
// Length of data is equal to BlockSize();
|
||||
virtual Status EncryptBlock(uint64_t blockIndex, char *data, char* scratch) = 0;
|
||||
// Encrypt a block of data at the given block index.
|
||||
// Length of data is equal to BlockSize();
|
||||
virtual Status EncryptBlock(uint64_t blockIndex, char* data,
|
||||
char* scratch) = 0;
|
||||
|
||||
// Decrypt a block of data at the given block index.
|
||||
// Length of data is equal to BlockSize();
|
||||
virtual Status DecryptBlock(uint64_t blockIndex, char *data, char* scratch) = 0;
|
||||
// Decrypt a block of data at the given block index.
|
||||
// Length of data is equal to BlockSize();
|
||||
virtual Status DecryptBlock(uint64_t blockIndex, char* data,
|
||||
char* scratch) = 0;
|
||||
};
|
||||
|
||||
// BlockCipher
|
||||
// BlockCipher
|
||||
class BlockCipher {
|
||||
public:
|
||||
virtual ~BlockCipher() {};
|
||||
public:
|
||||
virtual ~BlockCipher(){};
|
||||
|
||||
// BlockSize returns the size of each block supported by this cipher stream.
|
||||
virtual size_t BlockSize() = 0;
|
||||
// BlockSize returns the size of each block supported by this cipher stream.
|
||||
virtual size_t BlockSize() = 0;
|
||||
|
||||
// Encrypt a block of data.
|
||||
// Length of data is equal to BlockSize().
|
||||
virtual Status Encrypt(char *data) = 0;
|
||||
// Encrypt a block of data.
|
||||
// Length of data is equal to BlockSize().
|
||||
virtual Status Encrypt(char* data) = 0;
|
||||
|
||||
// Decrypt a block of data.
|
||||
// Length of data is equal to BlockSize().
|
||||
virtual Status Decrypt(char *data) = 0;
|
||||
// Decrypt a block of data.
|
||||
// Length of data is equal to BlockSize().
|
||||
virtual Status Decrypt(char* data) = 0;
|
||||
};
|
||||
|
||||
// Implements a BlockCipher using ROT13.
|
||||
//
|
||||
// Note: This is a sample implementation of BlockCipher,
|
||||
// Note: This is a sample implementation of BlockCipher,
|
||||
// it is NOT considered safe and should NOT be used in production.
|
||||
class ROT13BlockCipher : public BlockCipher {
|
||||
private:
|
||||
size_t blockSize_;
|
||||
public:
|
||||
ROT13BlockCipher(size_t blockSize)
|
||||
: blockSize_(blockSize) {}
|
||||
virtual ~ROT13BlockCipher() {};
|
||||
private:
|
||||
size_t blockSize_;
|
||||
|
||||
// BlockSize returns the size of each block supported by this cipher stream.
|
||||
virtual size_t BlockSize() override { return blockSize_; }
|
||||
public:
|
||||
ROT13BlockCipher(size_t blockSize) : blockSize_(blockSize) {}
|
||||
virtual ~ROT13BlockCipher(){};
|
||||
|
||||
// Encrypt a block of data.
|
||||
// Length of data is equal to BlockSize().
|
||||
virtual Status Encrypt(char *data) override;
|
||||
// BlockSize returns the size of each block supported by this cipher stream.
|
||||
virtual size_t BlockSize() override { return blockSize_; }
|
||||
|
||||
// Decrypt a block of data.
|
||||
// Length of data is equal to BlockSize().
|
||||
virtual Status Decrypt(char *data) override;
|
||||
// Encrypt a block of data.
|
||||
// Length of data is equal to BlockSize().
|
||||
virtual Status Encrypt(char* data) override;
|
||||
|
||||
// Decrypt a block of data.
|
||||
// Length of data is equal to BlockSize().
|
||||
virtual Status Decrypt(char* data) override;
|
||||
};
|
||||
|
||||
// CTRCipherStream implements BlockAccessCipherStream using an
|
||||
// Counter operations mode.
|
||||
// CTRCipherStream implements BlockAccessCipherStream using an
|
||||
// Counter operations mode.
|
||||
// See https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation
|
||||
//
|
||||
// Note: This is a possible implementation of BlockAccessCipherStream,
|
||||
// Note: This is a possible implementation of BlockAccessCipherStream,
|
||||
// it is considered suitable for use.
|
||||
class CTRCipherStream final : public BlockAccessCipherStream {
|
||||
private:
|
||||
BlockCipher& cipher_;
|
||||
std::string iv_;
|
||||
uint64_t initialCounter_;
|
||||
public:
|
||||
CTRCipherStream(BlockCipher& c, const char *iv, uint64_t initialCounter)
|
||||
: cipher_(c), iv_(iv, c.BlockSize()), initialCounter_(initialCounter) {};
|
||||
virtual ~CTRCipherStream() {};
|
||||
private:
|
||||
BlockCipher& cipher_;
|
||||
std::string iv_;
|
||||
uint64_t initialCounter_;
|
||||
|
||||
// BlockSize returns the size of each block supported by this cipher stream.
|
||||
virtual size_t BlockSize() override { return cipher_.BlockSize(); }
|
||||
public:
|
||||
CTRCipherStream(BlockCipher& c, const char* iv, uint64_t initialCounter)
|
||||
: cipher_(c), iv_(iv, c.BlockSize()), initialCounter_(initialCounter){};
|
||||
virtual ~CTRCipherStream(){};
|
||||
|
||||
protected:
|
||||
// Allocate scratch space which is passed to EncryptBlock/DecryptBlock.
|
||||
virtual void AllocateScratch(std::string&) override;
|
||||
// BlockSize returns the size of each block supported by this cipher stream.
|
||||
virtual size_t BlockSize() override { return cipher_.BlockSize(); }
|
||||
|
||||
// Encrypt a block of data at the given block index.
|
||||
// Length of data is equal to BlockSize();
|
||||
virtual Status EncryptBlock(uint64_t blockIndex, char *data, char *scratch) override;
|
||||
protected:
|
||||
// Allocate scratch space which is passed to EncryptBlock/DecryptBlock.
|
||||
virtual void AllocateScratch(std::string&) override;
|
||||
|
||||
// Decrypt a block of data at the given block index.
|
||||
// Length of data is equal to BlockSize();
|
||||
virtual Status DecryptBlock(uint64_t blockIndex, char *data, char *scratch) override;
|
||||
// Encrypt a block of data at the given block index.
|
||||
// Length of data is equal to BlockSize();
|
||||
virtual Status EncryptBlock(uint64_t blockIndex, char* data,
|
||||
char* scratch) override;
|
||||
|
||||
// Decrypt a block of data at the given block index.
|
||||
// Length of data is equal to BlockSize();
|
||||
virtual Status DecryptBlock(uint64_t blockIndex, char* data,
|
||||
char* scratch) override;
|
||||
};
|
||||
|
||||
// The encryption provider is used to create a cipher stream for a specific file.
|
||||
// The returned cipher stream will be used for actual encryption/decryption
|
||||
// actions.
|
||||
// The encryption provider is used to create a cipher stream for a specific
|
||||
// file. The returned cipher stream will be used for actual
|
||||
// encryption/decryption actions.
|
||||
class EncryptionProvider {
|
||||
public:
|
||||
virtual ~EncryptionProvider() {};
|
||||
virtual ~EncryptionProvider(){};
|
||||
|
||||
// GetPrefixLength returns the length of the prefix that is added to every file
|
||||
// and used for storing encryption options.
|
||||
// For optimal performance, the prefix length should be a multiple of
|
||||
// the page size.
|
||||
virtual size_t GetPrefixLength() = 0;
|
||||
// GetPrefixLength returns the length of the prefix that is added to every
|
||||
// file and used for storing encryption options. For optimal performance, the
|
||||
// prefix length should be a multiple of the page size.
|
||||
virtual size_t GetPrefixLength() = 0;
|
||||
|
||||
// CreateNewPrefix initialized an allocated block of prefix memory
|
||||
// for a new file.
|
||||
virtual Status CreateNewPrefix(const std::string& fname, char *prefix, size_t prefixLength) = 0;
|
||||
// CreateNewPrefix initialized an allocated block of prefix memory
|
||||
// for a new file.
|
||||
virtual Status CreateNewPrefix(const std::string& fname, char* prefix,
|
||||
size_t prefixLength) = 0;
|
||||
|
||||
// CreateCipherStream creates a block access cipher stream for a file given
|
||||
// given name and options.
|
||||
virtual Status CreateCipherStream(
|
||||
const std::string& fname, const EnvOptions& options, Slice& prefix,
|
||||
std::unique_ptr<BlockAccessCipherStream>* result) = 0;
|
||||
// CreateCipherStream creates a block access cipher stream for a file given
|
||||
// given name and options.
|
||||
virtual Status CreateCipherStream(
|
||||
const std::string& fname, const EnvOptions& options, Slice& prefix,
|
||||
std::unique_ptr<BlockAccessCipherStream>* result) = 0;
|
||||
};
|
||||
|
||||
// This encryption provider uses a CTR cipher stream, with a given block cipher
|
||||
// This encryption provider uses a CTR cipher stream, with a given block cipher
|
||||
// and IV.
|
||||
//
|
||||
// Note: This is a possible implementation of EncryptionProvider,
|
||||
// Note: This is a possible implementation of EncryptionProvider,
|
||||
// it is considered suitable for use, provided a safe BlockCipher is used.
|
||||
class CTREncryptionProvider : public EncryptionProvider {
|
||||
private:
|
||||
BlockCipher& cipher_;
|
||||
protected:
|
||||
const static size_t defaultPrefixLength = 4096;
|
||||
private:
|
||||
BlockCipher& cipher_;
|
||||
|
||||
protected:
|
||||
const static size_t defaultPrefixLength = 4096;
|
||||
|
||||
public:
|
||||
CTREncryptionProvider(BlockCipher& c)
|
||||
: cipher_(c) {};
|
||||
virtual ~CTREncryptionProvider() {}
|
||||
CTREncryptionProvider(BlockCipher& c) : cipher_(c){};
|
||||
virtual ~CTREncryptionProvider() {}
|
||||
|
||||
// GetPrefixLength returns the length of the prefix that is added to every file
|
||||
// and used for storing encryption options.
|
||||
// For optimal performance, the prefix length should be a multiple of
|
||||
// the page size.
|
||||
virtual size_t GetPrefixLength() override;
|
||||
// GetPrefixLength returns the length of the prefix that is added to every
|
||||
// file and used for storing encryption options. For optimal performance, the
|
||||
// prefix length should be a multiple of the page size.
|
||||
virtual size_t GetPrefixLength() override;
|
||||
|
||||
// CreateNewPrefix initialized an allocated block of prefix memory
|
||||
// for a new file.
|
||||
virtual Status CreateNewPrefix(const std::string& fname, char *prefix, size_t prefixLength) override;
|
||||
// CreateNewPrefix initialized an allocated block of prefix memory
|
||||
// for a new file.
|
||||
virtual Status CreateNewPrefix(const std::string& fname, char* prefix,
|
||||
size_t prefixLength) override;
|
||||
|
||||
// CreateCipherStream creates a block access cipher stream for a file given
|
||||
// given name and options.
|
||||
virtual Status CreateCipherStream(
|
||||
const std::string& fname, const EnvOptions& options, Slice& prefix,
|
||||
std::unique_ptr<BlockAccessCipherStream>* result) override;
|
||||
// CreateCipherStream creates a block access cipher stream for a file given
|
||||
// given name and options.
|
||||
virtual Status CreateCipherStream(
|
||||
const std::string& fname, const EnvOptions& options, Slice& prefix,
|
||||
std::unique_ptr<BlockAccessCipherStream>* result) override;
|
||||
|
||||
protected:
|
||||
// PopulateSecretPrefixPart initializes the data into a new prefix block
|
||||
// that will be encrypted. This function will store the data in plain text.
|
||||
// It will be encrypted later (before written to disk).
|
||||
// Returns the amount of space (starting from the start of the prefix)
|
||||
// that has been initialized.
|
||||
virtual size_t PopulateSecretPrefixPart(char *prefix, size_t prefixLength, size_t blockSize);
|
||||
protected:
|
||||
// PopulateSecretPrefixPart initializes the data into a new prefix block
|
||||
// that will be encrypted. This function will store the data in plain text.
|
||||
// It will be encrypted later (before written to disk).
|
||||
// Returns the amount of space (starting from the start of the prefix)
|
||||
// that has been initialized.
|
||||
virtual size_t PopulateSecretPrefixPart(char* prefix, size_t prefixLength,
|
||||
size_t blockSize);
|
||||
|
||||
// CreateCipherStreamFromPrefix creates a block access cipher stream for a file given
|
||||
// given name and options. The given prefix is already decrypted.
|
||||
virtual Status CreateCipherStreamFromPrefix(
|
||||
const std::string& fname, const EnvOptions& options,
|
||||
uint64_t initialCounter, const Slice& iv, const Slice& prefix,
|
||||
std::unique_ptr<BlockAccessCipherStream>* result);
|
||||
// CreateCipherStreamFromPrefix creates a block access cipher stream for a
|
||||
// file given given name and options. The given prefix is already decrypted.
|
||||
virtual Status CreateCipherStreamFromPrefix(
|
||||
const std::string& fname, const EnvOptions& options,
|
||||
uint64_t initialCounter, const Slice& iv, const Slice& prefix,
|
||||
std::unique_ptr<BlockAccessCipherStream>* result);
|
||||
};
|
||||
|
||||
} // namespace rocksdb
|
||||
|
@ -19,9 +19,9 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <memory>
|
||||
#include <stdexcept>
|
||||
#include <stdlib.h>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
@ -47,7 +47,7 @@ class FilterBitsBuilder {
|
||||
// Calculate num of entries fit into a space.
|
||||
#if defined(_MSC_VER)
|
||||
#pragma warning(push)
|
||||
#pragma warning(disable : 4702) // unreachable code
|
||||
#pragma warning(disable : 4702) // unreachable code
|
||||
#endif
|
||||
virtual int CalculateNumEntry(const uint32_t /*space*/) {
|
||||
#ifndef ROCKSDB_LITE
|
||||
@ -102,8 +102,8 @@ class FilterPolicy {
|
||||
//
|
||||
// Warning: do not change the initial contents of *dst. Instead,
|
||||
// append the newly constructed filter to *dst.
|
||||
virtual void CreateFilter(const Slice* keys, int n, std::string* dst)
|
||||
const = 0;
|
||||
virtual void CreateFilter(const Slice* keys, int n,
|
||||
std::string* dst) const = 0;
|
||||
|
||||
// "filter" contains the data appended by a preceding call to
|
||||
// CreateFilter() on this class. This method must return true if
|
||||
@ -114,9 +114,7 @@ class FilterPolicy {
|
||||
|
||||
// Get the FilterBitsBuilder, which is ONLY used for full filter block
|
||||
// It contains interface to take individual key, then generate filter
|
||||
virtual FilterBitsBuilder* GetFilterBitsBuilder() const {
|
||||
return nullptr;
|
||||
}
|
||||
virtual FilterBitsBuilder* GetFilterBitsBuilder() const { return nullptr; }
|
||||
|
||||
// Get the FilterBitsReader, which is ONLY used for full filter block
|
||||
// It contains interface to tell if key can be in filter
|
||||
@ -147,4 +145,4 @@ class FilterPolicy {
|
||||
// trailing spaces in keys.
|
||||
extern const FilterPolicy* NewBloomFilterPolicy(
|
||||
int bits_per_key, bool use_block_based_builder = false);
|
||||
}
|
||||
} // namespace rocksdb
|
||||
|
@ -20,10 +20,9 @@ class FlushBlockPolicy {
|
||||
public:
|
||||
// Keep track of the key/value sequences and return the boolean value to
|
||||
// determine if table builder should flush current data block.
|
||||
virtual bool Update(const Slice& key,
|
||||
const Slice& value) = 0;
|
||||
virtual bool Update(const Slice& key, const Slice& value) = 0;
|
||||
|
||||
virtual ~FlushBlockPolicy() { }
|
||||
virtual ~FlushBlockPolicy() {}
|
||||
};
|
||||
|
||||
class FlushBlockPolicyFactory {
|
||||
@ -41,7 +40,7 @@ class FlushBlockPolicyFactory {
|
||||
const BlockBasedTableOptions& table_options,
|
||||
const BlockBuilder& data_block_builder) const = 0;
|
||||
|
||||
virtual ~FlushBlockPolicyFactory() { }
|
||||
virtual ~FlushBlockPolicyFactory() {}
|
||||
};
|
||||
|
||||
class FlushBlockBySizePolicyFactory : public FlushBlockPolicyFactory {
|
||||
@ -59,4 +58,4 @@ class FlushBlockBySizePolicyFactory : public FlushBlockPolicyFactory {
|
||||
const BlockBuilder& data_block_builder);
|
||||
};
|
||||
|
||||
} // rocksdb
|
||||
} // namespace rocksdb
|
||||
|
@ -38,6 +38,6 @@ class LDBTool {
|
||||
const std::vector<ColumnFamilyDescriptor>* column_families = nullptr);
|
||||
};
|
||||
|
||||
} // namespace rocksdb
|
||||
} // namespace rocksdb
|
||||
|
||||
#endif // ROCKSDB_LITE
|
||||
|
@ -192,8 +192,8 @@ struct FlushJobInfo {
|
||||
|
||||
struct CompactionJobInfo {
|
||||
CompactionJobInfo() = default;
|
||||
explicit CompactionJobInfo(const CompactionJobStats& _stats) :
|
||||
stats(_stats) {}
|
||||
explicit CompactionJobInfo(const CompactionJobStats& _stats)
|
||||
: stats(_stats) {}
|
||||
|
||||
// the id of the column family where the compaction happened.
|
||||
uint32_t cf_id;
|
||||
@ -244,7 +244,6 @@ struct MemTableInfo {
|
||||
uint64_t num_entries;
|
||||
// Total number of deletes in memtable
|
||||
uint64_t num_deletes;
|
||||
|
||||
};
|
||||
|
||||
struct ExternalFileIngestionInfo {
|
||||
@ -324,8 +323,7 @@ class EventListener {
|
||||
// Note that the this function must be implemented in a way such that
|
||||
// it should not run for an extended period of time before the function
|
||||
// returns. Otherwise, RocksDB may be blocked.
|
||||
virtual void OnCompactionBegin(DB* /*db*/,
|
||||
const CompactionJobInfo& /*ci*/) {}
|
||||
virtual void OnCompactionBegin(DB* /*db*/, const CompactionJobInfo& /*ci*/) {}
|
||||
|
||||
// A callback function for RocksDB which will be called whenever
|
||||
// a registered RocksDB compacts a file. The default implementation
|
||||
@ -380,8 +378,7 @@ class EventListener {
|
||||
// Note that if applications would like to use the passed reference
|
||||
// outside this function call, they should make copies from these
|
||||
// returned value.
|
||||
virtual void OnMemTableSealed(
|
||||
const MemTableInfo& /*info*/) {}
|
||||
virtual void OnMemTableSealed(const MemTableInfo& /*info*/) {}
|
||||
|
||||
// A callback function for RocksDB which will be called before
|
||||
// a column family handle is deleted.
|
||||
@ -457,8 +454,7 @@ class EventListener {
|
||||
|
||||
#else
|
||||
|
||||
class EventListener {
|
||||
};
|
||||
class EventListener {};
|
||||
|
||||
#endif // ROCKSDB_LITE
|
||||
|
||||
|
@ -35,11 +35,11 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <stdexcept>
|
||||
#include <rocksdb/slice.h>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <rocksdb/slice.h>
|
||||
#include <memory>
|
||||
#include <stdexcept>
|
||||
|
||||
namespace rocksdb {
|
||||
|
||||
@ -75,7 +75,7 @@ class MemTableRep {
|
||||
virtual int operator()(const char* prefix_len_key,
|
||||
const Slice& key) const = 0;
|
||||
|
||||
virtual ~KeyComparator() { }
|
||||
virtual ~KeyComparator() {}
|
||||
};
|
||||
|
||||
explicit MemTableRep(Allocator* allocator) : allocator_(allocator) {}
|
||||
@ -142,7 +142,7 @@ class MemTableRep {
|
||||
// does nothing. After MarkReadOnly() is called, this table rep will
|
||||
// not be written to (ie No more calls to Allocate(), Insert(),
|
||||
// or any writes done directly to entries accessed through the iterator.)
|
||||
virtual void MarkReadOnly() { }
|
||||
virtual void MarkReadOnly() {}
|
||||
|
||||
// Notify this table rep that it has been flushed to stable storage.
|
||||
// By default, does nothing.
|
||||
@ -150,7 +150,7 @@ class MemTableRep {
|
||||
// Invariant: MarkReadOnly() is called, before MarkFlushed().
|
||||
// Note that this method if overridden, should not run for an extended period
|
||||
// of time. Otherwise, RocksDB may be blocked.
|
||||
virtual void MarkFlushed() { }
|
||||
virtual void MarkFlushed() {}
|
||||
|
||||
// Look up key from the mem table, since the first key in the mem table whose
|
||||
// user_key matches the one given k, call the function callback_func(), with
|
||||
@ -176,7 +176,7 @@ class MemTableRep {
|
||||
// that was allocated through the allocator. Safe to call from any thread.
|
||||
virtual size_t ApproximateMemoryUsage() = 0;
|
||||
|
||||
virtual ~MemTableRep() { }
|
||||
virtual ~MemTableRep() {}
|
||||
|
||||
// Iteration over the contents of a skip collection
|
||||
class Iterator {
|
||||
@ -317,16 +317,14 @@ class VectorRepFactory : public MemTableRepFactory {
|
||||
const size_t count_;
|
||||
|
||||
public:
|
||||
explicit VectorRepFactory(size_t count = 0) : count_(count) { }
|
||||
explicit VectorRepFactory(size_t count = 0) : count_(count) {}
|
||||
|
||||
using MemTableRepFactory::CreateMemTableRep;
|
||||
virtual MemTableRep* CreateMemTableRep(const MemTableRep::KeyComparator&,
|
||||
Allocator*, const SliceTransform*,
|
||||
Logger* logger) override;
|
||||
|
||||
virtual const char* Name() const override {
|
||||
return "VectorRepFactory";
|
||||
}
|
||||
virtual const char* Name() const override { return "VectorRepFactory"; }
|
||||
};
|
||||
|
||||
// This class contains a fixed array of buckets, each
|
||||
@ -337,8 +335,7 @@ class VectorRepFactory : public MemTableRepFactory {
|
||||
// link lists in the skiplist
|
||||
extern MemTableRepFactory* NewHashSkipListRepFactory(
|
||||
size_t bucket_count = 1000000, int32_t skiplist_height = 4,
|
||||
int32_t skiplist_branching_factor = 4
|
||||
);
|
||||
int32_t skiplist_branching_factor = 4);
|
||||
|
||||
// The factory is to create memtables based on a hash table:
|
||||
// it contains a fixed array of buckets, each pointing to either a linked list
|
||||
|
@ -239,13 +239,10 @@ class AssociativeMergeOperator : public MergeOperator {
|
||||
// returns false, it is because client specified bad data or there was
|
||||
// internal corruption. The client should assume that this will be treated
|
||||
// as an error by the library.
|
||||
virtual bool Merge(const Slice& key,
|
||||
const Slice* existing_value,
|
||||
const Slice& value,
|
||||
std::string* new_value,
|
||||
virtual bool Merge(const Slice& key, const Slice* existing_value,
|
||||
const Slice& value, std::string* new_value,
|
||||
Logger* logger) const = 0;
|
||||
|
||||
|
||||
private:
|
||||
// Default implementations of the MergeOperator functions
|
||||
bool FullMergeV2(const MergeOperationInput& merge_in,
|
||||
|
@ -22,8 +22,8 @@ struct SstFileMetaData;
|
||||
struct ColumnFamilyMetaData {
|
||||
ColumnFamilyMetaData() : size(0), file_count(0), name("") {}
|
||||
ColumnFamilyMetaData(const std::string& _name, uint64_t _size,
|
||||
const std::vector<LevelMetaData>&& _levels) :
|
||||
size(_size), name(_name), levels(_levels) {}
|
||||
const std::vector<LevelMetaData>&& _levels)
|
||||
: size(_size), name(_name), levels(_levels) {}
|
||||
|
||||
// The size of this column family in bytes, which is equal to the sum of
|
||||
// the file size of its "levels".
|
||||
@ -39,9 +39,8 @@ struct ColumnFamilyMetaData {
|
||||
// The metadata that describes a level.
|
||||
struct LevelMetaData {
|
||||
LevelMetaData(int _level, uint64_t _size,
|
||||
const std::vector<SstFileMetaData>&& _files) :
|
||||
level(_level), size(_size),
|
||||
files(_files) {}
|
||||
const std::vector<SstFileMetaData>&& _files)
|
||||
: level(_level), size(_size), files(_files) {}
|
||||
|
||||
// The level which this meta data describes.
|
||||
const int level;
|
||||
@ -94,9 +93,9 @@ struct SstFileMetaData {
|
||||
|
||||
SequenceNumber smallest_seqno; // Smallest sequence number in file.
|
||||
SequenceNumber largest_seqno; // Largest sequence number in file.
|
||||
std::string smallestkey; // Smallest user defined key in the file.
|
||||
std::string largestkey; // Largest user defined key in the file.
|
||||
uint64_t num_reads_sampled; // How many times the file is read.
|
||||
std::string smallestkey; // Smallest user defined key in the file.
|
||||
std::string largestkey; // Largest user defined key in the file.
|
||||
uint64_t num_reads_sampled; // How many times the file is read.
|
||||
bool being_compacted; // true if the file is currently being compacted.
|
||||
|
||||
uint64_t num_entries;
|
||||
@ -106,7 +105,7 @@ struct SstFileMetaData {
|
||||
// The full set of metadata associated with each SST file.
|
||||
struct LiveFileMetaData : SstFileMetaData {
|
||||
std::string column_family_name; // Name of the column family
|
||||
int level; // Level at which this file resides.
|
||||
int level; // Level at which this file resides.
|
||||
LiveFileMetaData() : column_family_name(), level(0) {}
|
||||
};
|
||||
} // namespace rocksdb
|
||||
|
@ -10,11 +10,11 @@
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <string>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include <limits>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
#include "rocksdb/advanced_options.h"
|
||||
#include "rocksdb/comparator.h"
|
||||
@ -94,8 +94,7 @@ struct ColumnFamilyOptions : public AdvancedColumnFamilyOptions {
|
||||
// an iterator, only Put() and Get() API calls
|
||||
//
|
||||
// Not supported in ROCKSDB_LITE
|
||||
ColumnFamilyOptions* OptimizeForPointLookup(
|
||||
uint64_t block_cache_size_mb);
|
||||
ColumnFamilyOptions* OptimizeForPointLookup(uint64_t block_cache_size_mb);
|
||||
|
||||
// Default values for some parameters in ColumnFamilyOptions are not
|
||||
// optimized for heavy workloads and big datasets, which means you might
|
||||
@ -341,7 +340,6 @@ struct DbPath {
|
||||
DbPath(const std::string& p, uint64_t t) : path(p), target_size(t) {}
|
||||
};
|
||||
|
||||
|
||||
struct DBOptions {
|
||||
// The function recovers options to the option as in version 4.6.
|
||||
DBOptions* OldDefaults(int rocksdb_major_version = 4,
|
||||
@ -416,9 +414,9 @@ struct DBOptions {
|
||||
std::shared_ptr<Logger> info_log = nullptr;
|
||||
|
||||
#ifdef NDEBUG
|
||||
InfoLogLevel info_log_level = INFO_LEVEL;
|
||||
InfoLogLevel info_log_level = INFO_LEVEL;
|
||||
#else
|
||||
InfoLogLevel info_log_level = DEBUG_LEVEL;
|
||||
InfoLogLevel info_log_level = DEBUG_LEVEL;
|
||||
#endif // NDEBUG
|
||||
|
||||
// Number of open files that can be used by the DB. You may need to
|
||||
@ -722,12 +720,7 @@ struct DBOptions {
|
||||
// Specify the file access pattern once a compaction is started.
|
||||
// It will be applied to all input files of a compaction.
|
||||
// Default: NORMAL
|
||||
enum AccessHint {
|
||||
NONE,
|
||||
NORMAL,
|
||||
SEQUENTIAL,
|
||||
WILLNEED
|
||||
};
|
||||
enum AccessHint { NONE, NORMAL, SEQUENTIAL, WILLNEED };
|
||||
AccessHint access_hint_on_compaction_start = NORMAL;
|
||||
|
||||
// If true, always create a new file descriptor and new table reader
|
||||
@ -782,7 +775,6 @@ struct DBOptions {
|
||||
// Dynamically changeable through SetDBOptions() API.
|
||||
size_t writable_file_max_buffer_size = 1024 * 1024;
|
||||
|
||||
|
||||
// Use adaptive mutex, which spins in the user space before resorting
|
||||
// to kernel. This could reduce context switch when the mutex is not
|
||||
// heavily contended. However, if the mutex is hot, we could end up
|
||||
@ -1364,7 +1356,7 @@ struct IngestExternalFileOptions {
|
||||
bool verify_checksums_before_ingest = false;
|
||||
};
|
||||
|
||||
enum TraceFilterType: uint64_t {
|
||||
enum TraceFilterType : uint64_t {
|
||||
// Trace all the operations
|
||||
kTraceFilterNone = 0x0,
|
||||
// Do not trace the get operations
|
||||
|
@ -5,8 +5,8 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <map>
|
||||
#include <stdint.h>
|
||||
#include <map>
|
||||
#include <string>
|
||||
|
||||
#include "rocksdb/perf_level.h"
|
||||
@ -35,14 +35,13 @@ struct PerfContextByLevel {
|
||||
// total nanos spent on reading data from SST files
|
||||
uint64_t get_from_table_nanos;
|
||||
|
||||
uint64_t block_cache_hit_count = 0; // total number of block cache hits
|
||||
uint64_t block_cache_miss_count = 0; // total number of block cache misses
|
||||
uint64_t block_cache_hit_count = 0; // total number of block cache hits
|
||||
uint64_t block_cache_miss_count = 0; // total number of block cache misses
|
||||
|
||||
void Reset(); // reset all performance counters to zero
|
||||
void Reset(); // reset all performance counters to zero
|
||||
};
|
||||
|
||||
struct PerfContext {
|
||||
|
||||
~PerfContext();
|
||||
|
||||
PerfContext() {}
|
||||
@ -51,7 +50,7 @@ struct PerfContext {
|
||||
PerfContext& operator=(const PerfContext&);
|
||||
PerfContext(PerfContext&&) noexcept;
|
||||
|
||||
void Reset(); // reset all performance counters to zero
|
||||
void Reset(); // reset all performance counters to zero
|
||||
|
||||
std::string ToString(bool exclude_zero_counters = false) const;
|
||||
|
||||
@ -64,18 +63,18 @@ struct PerfContext {
|
||||
// free the space for PerfContextByLevel, also disable per level perf context
|
||||
void ClearPerLevelPerfContext();
|
||||
|
||||
uint64_t user_key_comparison_count; // total number of user key comparisons
|
||||
uint64_t block_cache_hit_count; // total number of block cache hits
|
||||
uint64_t block_read_count; // total number of block reads (with IO)
|
||||
uint64_t block_read_byte; // total number of bytes from block reads
|
||||
uint64_t block_read_time; // total nanos spent on block reads
|
||||
uint64_t block_cache_index_hit_count; // total number of index block hits
|
||||
uint64_t index_block_read_count; // total number of index block reads
|
||||
uint64_t block_cache_filter_hit_count; // total number of filter block hits
|
||||
uint64_t filter_block_read_count; // total number of filter block reads
|
||||
uint64_t user_key_comparison_count; // total number of user key comparisons
|
||||
uint64_t block_cache_hit_count; // total number of block cache hits
|
||||
uint64_t block_read_count; // total number of block reads (with IO)
|
||||
uint64_t block_read_byte; // total number of bytes from block reads
|
||||
uint64_t block_read_time; // total nanos spent on block reads
|
||||
uint64_t block_cache_index_hit_count; // total number of index block hits
|
||||
uint64_t index_block_read_count; // total number of index block reads
|
||||
uint64_t block_cache_filter_hit_count; // total number of filter block hits
|
||||
uint64_t filter_block_read_count; // total number of filter block reads
|
||||
uint64_t compression_dict_block_read_count; // total number of compression
|
||||
// dictionary block reads
|
||||
uint64_t block_checksum_time; // total nanos spent on block checksum
|
||||
uint64_t block_checksum_time; // total nanos spent on block checksum
|
||||
uint64_t block_decompress_time; // total nanos spent on block decompression
|
||||
|
||||
uint64_t get_read_bytes; // bytes for vals returned by Get
|
||||
@ -116,9 +115,9 @@ struct PerfContext {
|
||||
//
|
||||
uint64_t internal_merge_count;
|
||||
|
||||
uint64_t get_snapshot_time; // total nanos spent on getting snapshot
|
||||
uint64_t get_from_memtable_time; // total nanos spent on querying memtables
|
||||
uint64_t get_from_memtable_count; // number of mem tables queried
|
||||
uint64_t get_snapshot_time; // total nanos spent on getting snapshot
|
||||
uint64_t get_from_memtable_time; // total nanos spent on querying memtables
|
||||
uint64_t get_from_memtable_count; // number of mem tables queried
|
||||
// total nanos spent after Get() finds a key
|
||||
uint64_t get_post_process_time;
|
||||
uint64_t get_from_output_files_time; // total nanos reading from output files
|
||||
@ -230,4 +229,4 @@ struct PerfContext {
|
||||
// if defined(NPERF_CONTEXT), then the pointer is not thread-local
|
||||
PerfContext* get_perf_context();
|
||||
|
||||
}
|
||||
} // namespace rocksdb
|
||||
|
@ -19,9 +19,9 @@
|
||||
#pragma once
|
||||
|
||||
#include <assert.h>
|
||||
#include <cstdio>
|
||||
#include <stddef.h>
|
||||
#include <string.h>
|
||||
#include <cstdio>
|
||||
#include <string>
|
||||
|
||||
#ifdef __cpp_lib_string_view
|
||||
@ -35,14 +35,14 @@ namespace rocksdb {
|
||||
class Slice {
|
||||
public:
|
||||
// Create an empty slice.
|
||||
Slice() : data_(""), size_(0) { }
|
||||
Slice() : data_(""), size_(0) {}
|
||||
|
||||
// Create a slice that refers to d[0,n-1].
|
||||
Slice(const char* d, size_t n) : data_(d), size_(n) { }
|
||||
Slice(const char* d, size_t n) : data_(d), size_(n) {}
|
||||
|
||||
// Create a slice that refers to the contents of "s"
|
||||
/* implicit */
|
||||
Slice(const std::string& s) : data_(s.data()), size_(s.size()) { }
|
||||
Slice(const std::string& s) : data_(s.data()), size_(s.size()) {}
|
||||
|
||||
#ifdef __cpp_lib_string_view
|
||||
// Create a slice that refers to the same contents as "sv"
|
||||
@ -52,9 +52,7 @@ class Slice {
|
||||
|
||||
// Create a slice that refers to s[0,strlen(s)-1]
|
||||
/* implicit */
|
||||
Slice(const char* s) : data_(s) {
|
||||
size_ = (s == nullptr) ? 0 : strlen(s);
|
||||
}
|
||||
Slice(const char* s) : data_(s) { size_ = (s == nullptr) ? 0 : strlen(s); }
|
||||
|
||||
// Create a single slice from SliceParts using buf as storage.
|
||||
// buf must exist as long as the returned Slice exists.
|
||||
@ -77,7 +75,10 @@ class Slice {
|
||||
}
|
||||
|
||||
// Change this slice to refer to an empty array
|
||||
void clear() { data_ = ""; size_ = 0; }
|
||||
void clear() {
|
||||
data_ = "";
|
||||
size_ = 0;
|
||||
}
|
||||
|
||||
// Drop the first "n" bytes from this slice.
|
||||
void remove_prefix(size_t n) {
|
||||
@ -117,8 +118,7 @@ class Slice {
|
||||
|
||||
// Return true iff "x" is a prefix of "*this"
|
||||
bool starts_with(const Slice& x) const {
|
||||
return ((size_ >= x.size_) &&
|
||||
(memcmp(data_, x.data_, x.size_) == 0));
|
||||
return ((size_ >= x.size_) && (memcmp(data_, x.data_, x.size_) == 0));
|
||||
}
|
||||
|
||||
bool ends_with(const Slice& x) const {
|
||||
@ -129,7 +129,7 @@ class Slice {
|
||||
// Compare two slices and returns the first byte where they differ
|
||||
size_t difference_offset(const Slice& b) const;
|
||||
|
||||
// private: make these public for rocksdbjni access
|
||||
// private: make these public for rocksdbjni access
|
||||
const char* data_;
|
||||
size_t size_;
|
||||
|
||||
@ -219,8 +219,8 @@ class PinnableSlice : public Slice, public Cleanable {
|
||||
// A set of Slices that are virtually concatenated together. 'parts' points
|
||||
// to an array of Slices. The number of elements in the array is 'num_parts'.
|
||||
struct SliceParts {
|
||||
SliceParts(const Slice* _parts, int _num_parts) :
|
||||
parts(_parts), num_parts(_num_parts) { }
|
||||
SliceParts(const Slice* _parts, int _num_parts)
|
||||
: parts(_parts), num_parts(_num_parts) {}
|
||||
SliceParts() : parts(nullptr), num_parts(0) {}
|
||||
|
||||
const Slice* parts;
|
||||
@ -232,17 +232,17 @@ inline bool operator==(const Slice& x, const Slice& y) {
|
||||
(memcmp(x.data(), y.data(), x.size()) == 0));
|
||||
}
|
||||
|
||||
inline bool operator!=(const Slice& x, const Slice& y) {
|
||||
return !(x == y);
|
||||
}
|
||||
inline bool operator!=(const Slice& x, const Slice& y) { return !(x == y); }
|
||||
|
||||
inline int Slice::compare(const Slice& b) const {
|
||||
assert(data_ != nullptr && b.data_ != nullptr);
|
||||
const size_t min_len = (size_ < b.size_) ? size_ : b.size_;
|
||||
int r = memcmp(data_, b.data_, min_len);
|
||||
if (r == 0) {
|
||||
if (size_ < b.size_) r = -1;
|
||||
else if (size_ > b.size_) r = +1;
|
||||
if (size_ < b.size_)
|
||||
r = -1;
|
||||
else if (size_ > b.size_)
|
||||
r = +1;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
@ -28,7 +28,7 @@ class Slice;
|
||||
*/
|
||||
class SliceTransform {
|
||||
public:
|
||||
virtual ~SliceTransform() {};
|
||||
virtual ~SliceTransform(){};
|
||||
|
||||
// Return the name of this transformation.
|
||||
virtual const char* Name() const = 0;
|
||||
@ -98,4 +98,4 @@ extern const SliceTransform* NewCappedPrefixTransform(size_t cap_len);
|
||||
|
||||
extern const SliceTransform* NewNoopTransform();
|
||||
|
||||
}
|
||||
} // namespace rocksdb
|
||||
|
@ -77,8 +77,9 @@ class SstFileWriter {
|
||||
// be ingested into this column_family, note that passing nullptr means that
|
||||
// the column_family is unknown.
|
||||
// If invalidate_page_cache is set to true, SstFileWriter will give the OS a
|
||||
// hint that this file pages is not needed every time we write 1MB to the file.
|
||||
// To use the rate limiter an io_priority smaller than IO_TOTAL can be passed.
|
||||
// hint that this file pages is not needed every time we write 1MB to the
|
||||
// file. To use the rate limiter an io_priority smaller than IO_TOTAL can be
|
||||
// passed.
|
||||
SstFileWriter(const EnvOptions& env_options, const Options& options,
|
||||
ColumnFamilyHandle* column_family = nullptr,
|
||||
bool invalidate_page_cache = true,
|
||||
|
@ -495,9 +495,7 @@ class Statistics {
|
||||
}
|
||||
|
||||
// Resets all ticker and histogram stats
|
||||
virtual Status Reset() {
|
||||
return Status::NotSupported("Not implemented");
|
||||
}
|
||||
virtual Status Reset() { return Status::NotSupported("Not implemented"); }
|
||||
|
||||
// String representation of the statistic object.
|
||||
virtual std::string ToString() const {
|
||||
|
@ -305,11 +305,12 @@ class Status {
|
||||
static const char* CopyState(const char* s);
|
||||
};
|
||||
|
||||
inline Status::Status(const Status& s) : code_(s.code_), subcode_(s.subcode_), sev_(s.sev_) {
|
||||
inline Status::Status(const Status& s)
|
||||
: code_(s.code_), subcode_(s.subcode_), sev_(s.sev_) {
|
||||
state_ = (s.state_ == nullptr) ? nullptr : CopyState(s.state_);
|
||||
}
|
||||
inline Status::Status(const Status& s, Severity sev)
|
||||
: code_(s.code_), subcode_(s.subcode_), sev_(sev) {
|
||||
: code_(s.code_), subcode_(s.subcode_), sev_(sev) {
|
||||
state_ = (s.state_ == nullptr) ? nullptr : CopyState(s.state_);
|
||||
}
|
||||
inline Status& Status::operator=(const Status& s) {
|
||||
|
@ -356,13 +356,13 @@ struct PlainTableOptions {
|
||||
};
|
||||
|
||||
// -- Plain Table with prefix-only seek
|
||||
// For this factory, you need to set Options.prefix_extractor properly to make it
|
||||
// work. Look-up will starts with prefix hash lookup for key prefix. Inside the
|
||||
// hash bucket found, a binary search is executed for hash conflicts. Finally,
|
||||
// a linear search is used.
|
||||
// For this factory, you need to set Options.prefix_extractor properly to make
|
||||
// it work. Look-up will starts with prefix hash lookup for key prefix. Inside
|
||||
// the hash bucket found, a binary search is executed for hash conflicts.
|
||||
// Finally, a linear search is used.
|
||||
|
||||
extern TableFactory* NewPlainTableFactory(const PlainTableOptions& options =
|
||||
PlainTableOptions());
|
||||
extern TableFactory* NewPlainTableFactory(
|
||||
const PlainTableOptions& options = PlainTableOptions());
|
||||
|
||||
struct CuckooTablePropertyNames {
|
||||
// The key that is used to fill empty buckets.
|
||||
@ -496,9 +496,8 @@ class TableFactory {
|
||||
//
|
||||
// If the function cannot find a way to sanitize the input DB Options,
|
||||
// a non-ok Status will be returned.
|
||||
virtual Status SanitizeOptions(
|
||||
const DBOptions& db_opts,
|
||||
const ColumnFamilyOptions& cf_opts) const = 0;
|
||||
virtual Status SanitizeOptions(const DBOptions& db_opts,
|
||||
const ColumnFamilyOptions& cf_opts) const = 0;
|
||||
|
||||
// Return a string that contains printable format of table configurations.
|
||||
// RocksDB prints configurations at DB Open().
|
||||
@ -538,7 +537,8 @@ class TableFactory {
|
||||
// @block_based_table_factory: block based table factory to use. If NULL, use
|
||||
// a default one.
|
||||
// @plain_table_factory: plain table factory to use. If NULL, use a default one.
|
||||
// @cuckoo_table_factory: cuckoo table factory to use. If NULL, use a default one.
|
||||
// @cuckoo_table_factory: cuckoo table factory to use. If NULL, use a default
|
||||
// one.
|
||||
extern TableFactory* NewAdaptiveTableFactory(
|
||||
std::shared_ptr<TableFactory> table_factory_to_write = nullptr,
|
||||
std::shared_ptr<TableFactory> block_based_table_factory = nullptr,
|
||||
|
@ -20,8 +20,7 @@
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#if !defined(ROCKSDB_LITE) && \
|
||||
!defined(NROCKSDB_THREAD_STATUS) && \
|
||||
#if !defined(ROCKSDB_LITE) && !defined(NROCKSDB_THREAD_STATUS) && \
|
||||
defined(ROCKSDB_SUPPORT_THREAD_LOCAL)
|
||||
#define ROCKSDB_USING_THREAD_STATUS
|
||||
#endif
|
||||
@ -43,9 +42,9 @@ struct ThreadStatus {
|
||||
// The type of a thread.
|
||||
enum ThreadType : int {
|
||||
HIGH_PRIORITY = 0, // RocksDB BG thread in high-pri thread pool
|
||||
LOW_PRIORITY, // RocksDB BG thread in low-pri thread pool
|
||||
USER, // User thread (Non-RocksDB BG thread)
|
||||
BOTTOM_PRIORITY, // RocksDB BG thread in bottom-pri thread pool
|
||||
LOW_PRIORITY, // RocksDB BG thread in low-pri thread pool
|
||||
USER, // User thread (Non-RocksDB BG thread)
|
||||
BOTTOM_PRIORITY, // RocksDB BG thread in bottom-pri thread pool
|
||||
NUM_THREAD_TYPES
|
||||
};
|
||||
|
||||
@ -105,22 +104,20 @@ struct ThreadStatus {
|
||||
NUM_STATE_TYPES
|
||||
};
|
||||
|
||||
ThreadStatus(const uint64_t _id,
|
||||
const ThreadType _thread_type,
|
||||
const std::string& _db_name,
|
||||
const std::string& _cf_name,
|
||||
ThreadStatus(const uint64_t _id, const ThreadType _thread_type,
|
||||
const std::string& _db_name, const std::string& _cf_name,
|
||||
const OperationType _operation_type,
|
||||
const uint64_t _op_elapsed_micros,
|
||||
const OperationStage _operation_stage,
|
||||
const uint64_t _op_props[],
|
||||
const StateType _state_type) :
|
||||
thread_id(_id), thread_type(_thread_type),
|
||||
db_name(_db_name),
|
||||
cf_name(_cf_name),
|
||||
operation_type(_operation_type),
|
||||
op_elapsed_micros(_op_elapsed_micros),
|
||||
operation_stage(_operation_stage),
|
||||
state_type(_state_type) {
|
||||
const uint64_t _op_props[], const StateType _state_type)
|
||||
: thread_id(_id),
|
||||
thread_type(_thread_type),
|
||||
db_name(_db_name),
|
||||
cf_name(_cf_name),
|
||||
operation_type(_operation_type),
|
||||
op_elapsed_micros(_op_elapsed_micros),
|
||||
operation_stage(_operation_stage),
|
||||
state_type(_state_type) {
|
||||
for (int i = 0; i < kNumOperationProperties; ++i) {
|
||||
op_properties[i] = _op_props[i];
|
||||
}
|
||||
@ -172,23 +169,20 @@ struct ThreadStatus {
|
||||
static const std::string MicrosToString(uint64_t op_elapsed_time);
|
||||
|
||||
// Obtain a human-readable string describing the specified operation stage.
|
||||
static const std::string& GetOperationStageName(
|
||||
OperationStage stage);
|
||||
static const std::string& GetOperationStageName(OperationStage stage);
|
||||
|
||||
// Obtain the name of the "i"th operation property of the
|
||||
// specified operation.
|
||||
static const std::string& GetOperationPropertyName(
|
||||
OperationType op_type, int i);
|
||||
static const std::string& GetOperationPropertyName(OperationType op_type,
|
||||
int i);
|
||||
|
||||
// Translate the "i"th property of the specified operation given
|
||||
// a property value.
|
||||
static std::map<std::string, uint64_t>
|
||||
InterpretOperationProperties(
|
||||
OperationType op_type, const uint64_t* op_properties);
|
||||
static std::map<std::string, uint64_t> InterpretOperationProperties(
|
||||
OperationType op_type, const uint64_t* op_properties);
|
||||
|
||||
// Obtain the name of a state given its type.
|
||||
static const std::string& GetStateName(StateType state_type);
|
||||
};
|
||||
|
||||
|
||||
} // namespace rocksdb
|
||||
|
@ -47,7 +47,6 @@ class ThreadPool {
|
||||
virtual void SubmitJob(const std::function<void()>&) = 0;
|
||||
// This moves the function in for efficiency
|
||||
virtual void SubmitJob(std::function<void()>&&) = 0;
|
||||
|
||||
};
|
||||
|
||||
// NewThreadPool() is a function that could be used to create a ThreadPool
|
||||
|
@ -5,18 +5,18 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include "rocksdb/status.h"
|
||||
#include "rocksdb/types.h"
|
||||
#include "rocksdb/write_batch.h"
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
namespace rocksdb {
|
||||
|
||||
class LogFile;
|
||||
typedef std::vector<std::unique_ptr<LogFile>> VectorLogPtr;
|
||||
|
||||
enum WalFileType {
|
||||
enum WalFileType {
|
||||
/* Indicates that WAL file is in archive directory. WAL files are moved from
|
||||
* the main db directory to archive directory once they are not live and stay
|
||||
* there until cleaned up. Files are cleaned depending on archive size
|
||||
@ -27,7 +27,7 @@ enum WalFileType {
|
||||
|
||||
/* Indicates that WAL file is live and resides in the main db directory */
|
||||
kAliveLogFile = 1
|
||||
} ;
|
||||
};
|
||||
|
||||
class LogFile {
|
||||
public:
|
||||
@ -39,7 +39,6 @@ class LogFile {
|
||||
// For an archived-log-file = /archive/000003.log
|
||||
virtual std::string PathName() const = 0;
|
||||
|
||||
|
||||
// Primary identifier for log file.
|
||||
// This is directly proportional to creation time of the log file
|
||||
virtual uint64_t LogNumber() const = 0;
|
||||
@ -119,4 +118,4 @@ class TransactionLogIterator {
|
||||
: verify_checksums_(verify_checksums) {}
|
||||
};
|
||||
};
|
||||
} // namespace rocksdb
|
||||
} // namespace rocksdb
|
||||
|
@ -32,11 +32,9 @@ struct FullKey {
|
||||
SequenceNumber sequence;
|
||||
EntryType type;
|
||||
|
||||
FullKey()
|
||||
: sequence(0)
|
||||
{} // Intentionally left uninitialized (for speed)
|
||||
FullKey() : sequence(0) {} // Intentionally left uninitialized (for speed)
|
||||
FullKey(const Slice& u, const SequenceNumber& seq, EntryType t)
|
||||
: user_key(u), sequence(seq), type(t) { }
|
||||
: user_key(u), sequence(seq), type(t) {}
|
||||
std::string DebugString(bool hex = false) const;
|
||||
|
||||
void clear() {
|
||||
|
@ -16,13 +16,12 @@ namespace rocksdb {
|
||||
// into a single compaction run
|
||||
//
|
||||
enum CompactionStopStyle {
|
||||
kCompactionStopStyleSimilarSize, // pick files of similar size
|
||||
kCompactionStopStyleTotalSize // total size of picked files > next file
|
||||
kCompactionStopStyleSimilarSize, // pick files of similar size
|
||||
kCompactionStopStyleTotalSize // total size of picked files > next file
|
||||
};
|
||||
|
||||
class CompactionOptionsUniversal {
|
||||
public:
|
||||
|
||||
// Percentage flexibility while comparing file size. If the candidate file(s)
|
||||
// size is 1% smaller than the next file's size, then include next file into
|
||||
// this candidate set. // Default: 1
|
||||
|
@ -15,10 +15,10 @@
|
||||
#endif
|
||||
|
||||
#include <inttypes.h>
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <vector>
|
||||
#include <functional>
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "rocksdb/utilities/stackable_db.h"
|
||||
|
||||
@ -257,8 +257,7 @@ class BackupEngine {
|
||||
|
||||
// BackupableDBOptions have to be the same as the ones used in previous
|
||||
// BackupEngines for the same backup directory.
|
||||
static Status Open(Env* db_env,
|
||||
const BackupableDBOptions& options,
|
||||
static Status Open(Env* db_env, const BackupableDBOptions& options,
|
||||
BackupEngine** backup_engine_ptr);
|
||||
|
||||
// same as CreateNewBackup, but stores extra application metadata
|
||||
|
@ -9,8 +9,8 @@
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "rocksdb/utilities/stackable_db.h"
|
||||
#include "rocksdb/db.h"
|
||||
#include "rocksdb/utilities/stackable_db.h"
|
||||
|
||||
namespace rocksdb {
|
||||
|
||||
@ -60,9 +60,9 @@ class DBWithTTL : public StackableDB {
|
||||
DBWithTTL** dbptr, std::vector<int32_t> ttls,
|
||||
bool read_only = false);
|
||||
|
||||
virtual void SetTtl(int32_t ttl) = 0;
|
||||
virtual void SetTtl(int32_t ttl) = 0;
|
||||
|
||||
virtual void SetTtl(ColumnFamilyHandle *h, int32_t ttl) = 0;
|
||||
virtual void SetTtl(ColumnFamilyHandle* h, int32_t ttl) = 0;
|
||||
|
||||
protected:
|
||||
explicit DBWithTTL(DB* db) : StackableDB(db) {}
|
||||
|
@ -172,4 +172,4 @@ class EnvLibrados : public EnvWrapper {
|
||||
librados::IoCtx* _GetIoctx(const std::string& prefix);
|
||||
friend class LibradosWritableFile;
|
||||
};
|
||||
}
|
||||
} // namespace rocksdb
|
||||
|
@ -19,8 +19,8 @@
|
||||
|
||||
#ifndef ROCKSDB_LITE
|
||||
|
||||
#include <iostream>
|
||||
#include <algorithm>
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
#include "rocksdb/env.h"
|
||||
|
||||
@ -31,21 +31,15 @@ class RandomAccessFileMirror;
|
||||
class WritableFileMirror;
|
||||
|
||||
class EnvMirror : public EnvWrapper {
|
||||
Env* a_, *b_;
|
||||
Env *a_, *b_;
|
||||
bool free_a_, free_b_;
|
||||
|
||||
public:
|
||||
EnvMirror(Env* a, Env* b, bool free_a=false, bool free_b=false)
|
||||
: EnvWrapper(a),
|
||||
a_(a),
|
||||
b_(b),
|
||||
free_a_(free_a),
|
||||
free_b_(free_b) {}
|
||||
EnvMirror(Env* a, Env* b, bool free_a = false, bool free_b = false)
|
||||
: EnvWrapper(a), a_(a), b_(b), free_a_(free_a), free_b_(free_b) {}
|
||||
~EnvMirror() {
|
||||
if (free_a_)
|
||||
delete a_;
|
||||
if (free_b_)
|
||||
delete b_;
|
||||
if (free_a_) delete a_;
|
||||
if (free_b_) delete b_;
|
||||
}
|
||||
|
||||
Status NewSequentialFile(const std::string& f,
|
||||
@ -157,12 +151,12 @@ class EnvMirror : public EnvWrapper {
|
||||
|
||||
class FileLockMirror : public FileLock {
|
||||
public:
|
||||
FileLock* a_, *b_;
|
||||
FileLock *a_, *b_;
|
||||
FileLockMirror(FileLock* a, FileLock* b) : a_(a), b_(b) {}
|
||||
};
|
||||
|
||||
Status LockFile(const std::string& f, FileLock** l) override {
|
||||
FileLock* al, *bl;
|
||||
FileLock *al, *bl;
|
||||
Status as = a_->LockFile(f, &al);
|
||||
Status bs = b_->LockFile(f, &bl);
|
||||
assert(as == bs);
|
||||
|
@ -12,26 +12,28 @@
|
||||
namespace rocksdb {
|
||||
|
||||
class LDBCommandExecuteResult {
|
||||
public:
|
||||
public:
|
||||
enum State {
|
||||
EXEC_NOT_STARTED = 0, EXEC_SUCCEED = 1, EXEC_FAILED = 2,
|
||||
EXEC_NOT_STARTED = 0,
|
||||
EXEC_SUCCEED = 1,
|
||||
EXEC_FAILED = 2,
|
||||
};
|
||||
|
||||
LDBCommandExecuteResult() : state_(EXEC_NOT_STARTED), message_("") {}
|
||||
|
||||
LDBCommandExecuteResult(State state, std::string& msg) :
|
||||
state_(state), message_(msg) {}
|
||||
LDBCommandExecuteResult(State state, std::string& msg)
|
||||
: state_(state), message_(msg) {}
|
||||
|
||||
std::string ToString() {
|
||||
std::string ret;
|
||||
switch (state_) {
|
||||
case EXEC_SUCCEED:
|
||||
break;
|
||||
case EXEC_FAILED:
|
||||
ret.append("Failed: ");
|
||||
break;
|
||||
case EXEC_NOT_STARTED:
|
||||
ret.append("Not started: ");
|
||||
case EXEC_SUCCEED:
|
||||
break;
|
||||
case EXEC_FAILED:
|
||||
ret.append("Failed: ");
|
||||
break;
|
||||
case EXEC_NOT_STARTED:
|
||||
ret.append("Not started: ");
|
||||
}
|
||||
if (!message_.empty()) {
|
||||
ret.append(message_);
|
||||
@ -44,17 +46,11 @@ public:
|
||||
message_ = "";
|
||||
}
|
||||
|
||||
bool IsSucceed() {
|
||||
return state_ == EXEC_SUCCEED;
|
||||
}
|
||||
bool IsSucceed() { return state_ == EXEC_SUCCEED; }
|
||||
|
||||
bool IsNotStarted() {
|
||||
return state_ == EXEC_NOT_STARTED;
|
||||
}
|
||||
bool IsNotStarted() { return state_ == EXEC_NOT_STARTED; }
|
||||
|
||||
bool IsFailed() {
|
||||
return state_ == EXEC_FAILED;
|
||||
}
|
||||
bool IsFailed() { return state_ == EXEC_FAILED; }
|
||||
|
||||
static LDBCommandExecuteResult Succeed(std::string msg) {
|
||||
return LDBCommandExecuteResult(EXEC_SUCCEED, msg);
|
||||
@ -64,7 +60,7 @@ public:
|
||||
return LDBCommandExecuteResult(EXEC_FAILED, msg);
|
||||
}
|
||||
|
||||
private:
|
||||
private:
|
||||
State state_;
|
||||
std::string message_;
|
||||
|
||||
@ -72,4 +68,4 @@ private:
|
||||
bool operator!=(const LDBCommandExecuteResult&);
|
||||
};
|
||||
|
||||
}
|
||||
} // namespace rocksdb
|
||||
|
@ -73,7 +73,8 @@ class SimCache : public Cache {
|
||||
// stop logging to the file automatically after reaching a specific size in
|
||||
// bytes, a values of 0 disable this feature
|
||||
virtual Status StartActivityLogging(const std::string& activity_log_file,
|
||||
Env* env, uint64_t max_logging_size = 0) = 0;
|
||||
Env* env,
|
||||
uint64_t max_logging_size = 0) = 0;
|
||||
|
||||
// Stop cache activity logging if any
|
||||
virtual void StopActivityLogging() = 0;
|
||||
|
@ -13,7 +13,6 @@
|
||||
#undef DeleteFile
|
||||
#endif
|
||||
|
||||
|
||||
namespace rocksdb {
|
||||
|
||||
// This class contains APIs to stack rocksdb wrappers.Eg. Stack TTL over base d
|
||||
@ -37,9 +36,7 @@ class StackableDB : public DB {
|
||||
|
||||
virtual Status Close() override { return db_->Close(); }
|
||||
|
||||
virtual DB* GetBaseDB() {
|
||||
return db_;
|
||||
}
|
||||
virtual DB* GetBaseDB() { return db_; }
|
||||
|
||||
virtual DB* GetRootDB() override { return db_->GetRootDB(); }
|
||||
|
||||
@ -144,10 +141,8 @@ class StackableDB : public DB {
|
||||
return db_->Merge(options, column_family, key, value);
|
||||
}
|
||||
|
||||
|
||||
virtual Status Write(const WriteOptions& opts, WriteBatch* updates)
|
||||
override {
|
||||
return db_->Write(opts, updates);
|
||||
virtual Status Write(const WriteOptions& opts, WriteBatch* updates) override {
|
||||
return db_->Write(opts, updates);
|
||||
}
|
||||
|
||||
using DB::NewIterator;
|
||||
@ -163,10 +158,7 @@ class StackableDB : public DB {
|
||||
return db_->NewIterators(options, column_families, iterators);
|
||||
}
|
||||
|
||||
|
||||
virtual const Snapshot* GetSnapshot() override {
|
||||
return db_->GetSnapshot();
|
||||
}
|
||||
virtual const Snapshot* GetSnapshot() override { return db_->GetSnapshot(); }
|
||||
|
||||
virtual void ReleaseSnapshot(const Snapshot* snapshot) override {
|
||||
return db_->ReleaseSnapshot(snapshot);
|
||||
@ -197,12 +189,10 @@ class StackableDB : public DB {
|
||||
}
|
||||
|
||||
using DB::GetApproximateSizes;
|
||||
virtual void GetApproximateSizes(ColumnFamilyHandle* column_family,
|
||||
const Range* r, int n, uint64_t* sizes,
|
||||
uint8_t include_flags
|
||||
= INCLUDE_FILES) override {
|
||||
return db_->GetApproximateSizes(column_family, r, n, sizes,
|
||||
include_flags);
|
||||
virtual void GetApproximateSizes(
|
||||
ColumnFamilyHandle* column_family, const Range* r, int n, uint64_t* sizes,
|
||||
uint8_t include_flags = INCLUDE_FILES) override {
|
||||
return db_->GetApproximateSizes(column_family, r, n, sizes, include_flags);
|
||||
}
|
||||
|
||||
using DB::GetApproximateMemTableStats;
|
||||
@ -251,24 +241,20 @@ class StackableDB : public DB {
|
||||
}
|
||||
|
||||
using DB::MaxMemCompactionLevel;
|
||||
virtual int MaxMemCompactionLevel(ColumnFamilyHandle* column_family)
|
||||
override {
|
||||
virtual int MaxMemCompactionLevel(
|
||||
ColumnFamilyHandle* column_family) override {
|
||||
return db_->MaxMemCompactionLevel(column_family);
|
||||
}
|
||||
|
||||
using DB::Level0StopWriteTrigger;
|
||||
virtual int Level0StopWriteTrigger(ColumnFamilyHandle* column_family)
|
||||
override {
|
||||
virtual int Level0StopWriteTrigger(
|
||||
ColumnFamilyHandle* column_family) override {
|
||||
return db_->Level0StopWriteTrigger(column_family);
|
||||
}
|
||||
|
||||
virtual const std::string& GetName() const override {
|
||||
return db_->GetName();
|
||||
}
|
||||
virtual const std::string& GetName() const override { return db_->GetName(); }
|
||||
|
||||
virtual Env* GetEnv() const override {
|
||||
return db_->GetEnv();
|
||||
}
|
||||
virtual Env* GetEnv() const override { return db_->GetEnv(); }
|
||||
|
||||
using DB::GetOptions;
|
||||
virtual Options GetOptions(ColumnFamilyHandle* column_family) const override {
|
||||
@ -291,9 +277,7 @@ class StackableDB : public DB {
|
||||
return db_->Flush(fopts, column_families);
|
||||
}
|
||||
|
||||
virtual Status SyncWAL() override {
|
||||
return db_->SyncWAL();
|
||||
}
|
||||
virtual Status SyncWAL() override { return db_->SyncWAL(); }
|
||||
|
||||
virtual Status FlushWAL(bool sync) override { return db_->FlushWAL(sync); }
|
||||
|
||||
@ -312,9 +296,8 @@ class StackableDB : public DB {
|
||||
db_->GetLiveFilesMetaData(metadata);
|
||||
}
|
||||
|
||||
virtual void GetColumnFamilyMetaData(
|
||||
ColumnFamilyHandle *column_family,
|
||||
ColumnFamilyMetaData* cf_meta) override {
|
||||
virtual void GetColumnFamilyMetaData(ColumnFamilyHandle* column_family,
|
||||
ColumnFamilyMetaData* cf_meta) override {
|
||||
db_->GetColumnFamilyMetaData(column_family, cf_meta);
|
||||
}
|
||||
|
||||
@ -322,14 +305,15 @@ class StackableDB : public DB {
|
||||
|
||||
virtual Status GetLiveFiles(std::vector<std::string>& vec, uint64_t* mfs,
|
||||
bool flush_memtable = true) override {
|
||||
return db_->GetLiveFiles(vec, mfs, flush_memtable);
|
||||
return db_->GetLiveFiles(vec, mfs, flush_memtable);
|
||||
}
|
||||
|
||||
virtual SequenceNumber GetLatestSequenceNumber() const override {
|
||||
return db_->GetLatestSequenceNumber();
|
||||
}
|
||||
|
||||
virtual bool SetPreserveDeletesSequenceNumber(SequenceNumber seqnum) override {
|
||||
virtual bool SetPreserveDeletesSequenceNumber(
|
||||
SequenceNumber seqnum) override {
|
||||
return db_->SetPreserveDeletesSequenceNumber(seqnum);
|
||||
}
|
||||
|
||||
@ -401,4 +385,4 @@ class StackableDB : public DB {
|
||||
std::shared_ptr<DB> shared_db_ptr_;
|
||||
};
|
||||
|
||||
} // namespace rocksdb
|
||||
} // namespace rocksdb
|
||||
|
@ -40,20 +40,18 @@ class CompactOnDeletionCollectorFactory
|
||||
|
||||
private:
|
||||
friend std::shared_ptr<CompactOnDeletionCollectorFactory>
|
||||
NewCompactOnDeletionCollectorFactory(
|
||||
size_t sliding_window_size,
|
||||
size_t deletion_trigger);
|
||||
NewCompactOnDeletionCollectorFactory(size_t sliding_window_size,
|
||||
size_t deletion_trigger);
|
||||
// A factory of a table property collector that marks a SST
|
||||
// file as need-compaction when it observe at least "D" deletion
|
||||
// entries in any "N" consecutive entires.
|
||||
//
|
||||
// @param sliding_window_size "N"
|
||||
// @param deletion_trigger "D"
|
||||
CompactOnDeletionCollectorFactory(
|
||||
size_t sliding_window_size,
|
||||
size_t deletion_trigger) :
|
||||
sliding_window_size_(sliding_window_size),
|
||||
deletion_trigger_(deletion_trigger) {}
|
||||
CompactOnDeletionCollectorFactory(size_t sliding_window_size,
|
||||
size_t deletion_trigger)
|
||||
: sliding_window_size_(sliding_window_size),
|
||||
deletion_trigger_(deletion_trigger) {}
|
||||
|
||||
std::atomic<size_t> sliding_window_size_;
|
||||
std::atomic<size_t> deletion_trigger_;
|
||||
@ -69,9 +67,8 @@ class CompactOnDeletionCollectorFactory
|
||||
// @param deletion_trigger "D". Note that even when "N" is changed,
|
||||
// the specified number for "D" will not be changed.
|
||||
extern std::shared_ptr<CompactOnDeletionCollectorFactory>
|
||||
NewCompactOnDeletionCollectorFactory(
|
||||
size_t sliding_window_size,
|
||||
size_t deletion_trigger);
|
||||
NewCompactOnDeletionCollectorFactory(size_t sliding_window_size,
|
||||
size_t deletion_trigger);
|
||||
} // namespace rocksdb
|
||||
|
||||
#endif // !ROCKSDB_LITE
|
||||
|
@ -127,7 +127,6 @@ struct TransactionOptions {
|
||||
// return 0 if
|
||||
// a.compare(b) returns 0.
|
||||
|
||||
|
||||
// If positive, specifies the wait timeout in milliseconds when
|
||||
// a transaction attempts to lock a key.
|
||||
//
|
||||
|
@ -4,12 +4,12 @@
|
||||
|
||||
#pragma once
|
||||
#ifndef ROCKSDB_LITE
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "rocksdb/utilities/stackable_db.h"
|
||||
#include "rocksdb/utilities/db_ttl.h"
|
||||
#include "rocksdb/db.h"
|
||||
#include "rocksdb/utilities/db_ttl.h"
|
||||
#include "rocksdb/utilities/stackable_db.h"
|
||||
|
||||
namespace rocksdb {
|
||||
|
||||
@ -22,14 +22,12 @@ class UtilityDB {
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
__attribute__((deprecated))
|
||||
#elif _WIN32
|
||||
__declspec(deprecated)
|
||||
__declspec(deprecated)
|
||||
#endif
|
||||
static Status OpenTtlDB(const Options& options,
|
||||
const std::string& name,
|
||||
StackableDB** dbptr,
|
||||
int32_t ttl = 0,
|
||||
bool read_only = false);
|
||||
static Status
|
||||
OpenTtlDB(const Options& options, const std::string& name,
|
||||
StackableDB** dbptr, int32_t ttl = 0, bool read_only = false);
|
||||
};
|
||||
|
||||
} // namespace rocksdb
|
||||
} // namespace rocksdb
|
||||
#endif // ROCKSDB_LITE
|
||||
|
@ -5,8 +5,8 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include <map>
|
||||
#include <string>
|
||||
|
||||
namespace rocksdb {
|
||||
|
||||
@ -34,7 +34,7 @@ class WalFilter {
|
||||
virtual ~WalFilter() {}
|
||||
|
||||
// Provide ColumnFamily->LogNumber map to filter
|
||||
// so that filter can determine whether a log number applies to a given
|
||||
// so that filter can determine whether a log number applies to a given
|
||||
// column family (i.e. that log hasn't been flushed to SST already for the
|
||||
// column family).
|
||||
// We also pass in name->id map as only name is known during
|
||||
@ -83,8 +83,8 @@ class WalFilter {
|
||||
return LogRecord(batch, new_batch, batch_changed);
|
||||
}
|
||||
|
||||
// Please see the comments for LogRecord above. This function is for
|
||||
// compatibility only and contains a subset of parameters.
|
||||
// Please see the comments for LogRecord above. This function is for
|
||||
// compatibility only and contains a subset of parameters.
|
||||
// New code should use the function above.
|
||||
virtual WalProcessingOption LogRecord(const WriteBatch& /*batch*/,
|
||||
WriteBatch* /*new_batch*/,
|
||||
|
@ -24,10 +24,10 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
#include <atomic>
|
||||
#include <stack>
|
||||
#include <string>
|
||||
#include <stdint.h>
|
||||
#include "rocksdb/status.h"
|
||||
#include "rocksdb/write_batch_base.h"
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user