Fix spelling in comments in include/rocksdb/ (#8120)
Summary: Ran a spell check over the comments in the include/rocksdb directory and fixed any mis-spellings. There are still some variable names that are spelled incorrectly (like SizeApproximationOptions::include_memtabtles, SstFileMetaData::oldest_ancester_time) that were not fixed, as those would break compilation. Pull Request resolved: https://github.com/facebook/rocksdb/pull/8120 Reviewed By: zhichao-cao Differential Revision: D27366034 Pulled By: mrambacher fbshipit-source-id: 6a3f3674890bb6acc751e9c5887a8fbb6adca5df
This commit is contained in:
parent
ae7a795686
commit
524b10bd6e
@ -705,7 +705,7 @@ struct AdvancedColumnFamilyOptions {
|
||||
// updated from the file system.
|
||||
// Pre-req: This needs max_open_files to be set to -1.
|
||||
// In Level: Non-bottom-level files older than TTL will go through the
|
||||
// compation process.
|
||||
// compaction process.
|
||||
// In FIFO: Files older than TTL will be deleted.
|
||||
// unit: seconds. Ex: 1 day = 1 * 24 * 60 * 60
|
||||
// In FIFO, this option will have the same meaning as
|
||||
|
@ -151,7 +151,7 @@ class Cache {
|
||||
// - Name-value option pairs -- "capacity=1M; num_shard_bits=4;
|
||||
// For the LRUCache, the values are defined in LRUCacheOptions.
|
||||
// @param result The new Cache object
|
||||
// @return OK if the cache was sucessfully created
|
||||
// @return OK if the cache was successfully created
|
||||
// @return NotFound if an invalid name was specified in the value
|
||||
// @return InvalidArgument if either the options were not valid
|
||||
static Status CreateFromString(const ConfigOptions& config_options,
|
||||
|
@ -33,7 +33,7 @@ class ConcurrentTaskLimiter {
|
||||
virtual int32_t GetOutstandingTask() const = 0;
|
||||
};
|
||||
|
||||
// Create a ConcurrentTaskLimiter that can be shared with mulitple CFs
|
||||
// Create a ConcurrentTaskLimiter that can be shared with multiple CFs
|
||||
// across RocksDB instances to control concurrent tasks.
|
||||
//
|
||||
// @param name: Name of the limiter.
|
||||
|
@ -28,7 +28,7 @@ struct DBOptions;
|
||||
// standard way of configuring objects. A Configurable object can:
|
||||
// -> Populate itself given:
|
||||
// - One or more "name/value" pair strings
|
||||
// - A string repesenting the set of name=value properties
|
||||
// - A string representing the set of name=value properties
|
||||
// - A map of name/value properties.
|
||||
// -> Convert itself into its string representation
|
||||
// -> Dump itself to a Logger
|
||||
@ -166,7 +166,7 @@ class Configurable {
|
||||
// This is the inverse of ConfigureFromString.
|
||||
// @param config_options Controls how serialization happens.
|
||||
// @param result The string representation of this object.
|
||||
// @return OK If the options for this object wer successfully serialized.
|
||||
// @return OK If the options for this object were successfully serialized.
|
||||
// @return InvalidArgument If one or more of the options could not be
|
||||
// serialized.
|
||||
Status GetOptionString(const ConfigOptions& config_options,
|
||||
@ -276,7 +276,7 @@ class Configurable {
|
||||
// Classes may override this method to provide further specialization (such as
|
||||
// returning a sub-option)
|
||||
//
|
||||
// The default implemntation looks at the registered options. If the
|
||||
// The default implementation looks at the registered options. If the
|
||||
// input name matches that of a registered option, the pointer registered
|
||||
// with that name is returned.
|
||||
// e.g,, RegisterOptions("X", &my_ptr, ...); GetOptionsPtr("X") returns
|
||||
|
@ -93,7 +93,7 @@ struct ConfigOptions {
|
||||
#ifndef ROCKSDB_LITE
|
||||
|
||||
// The following set of functions provide a way to construct RocksDB Options
|
||||
// from a string or a string-to-string map. Here're the general rule of
|
||||
// from a string or a string-to-string map. Here is the general rule of
|
||||
// setting option values from strings by type. Some RocksDB types are also
|
||||
// supported in these APIs. Please refer to the comment of the function itself
|
||||
// to find more information about how to config those RocksDB types.
|
||||
@ -149,7 +149,7 @@ struct ConfigOptions {
|
||||
// ColumnFamilyOptions "new_options".
|
||||
//
|
||||
// Below are the instructions of how to config some non-primitive-typed
|
||||
// options in ColumnFOptions:
|
||||
// options in ColumnFamilyOptions:
|
||||
//
|
||||
// * table_factory:
|
||||
// table_factory can be configured using our custom nested-option syntax.
|
||||
@ -191,7 +191,7 @@ struct ConfigOptions {
|
||||
// * {"memtable", "skip_list:5"} is equivalent to setting
|
||||
// memtable to SkipListFactory(5).
|
||||
// - PrefixHash:
|
||||
// Pass "prfix_hash:<hash_bucket_count>" to config memtable
|
||||
// Pass "prefix_hash:<hash_bucket_count>" to config memtable
|
||||
// to use PrefixHash, or simply "prefix_hash" to use the default
|
||||
// PrefixHash.
|
||||
// [Example]:
|
||||
|
@ -112,7 +112,7 @@ struct RangePtr {
|
||||
};
|
||||
|
||||
// It is valid that files_checksums and files_checksum_func_names are both
|
||||
// empty (no checksum informaiton is provided for ingestion). Otherwise,
|
||||
// empty (no checksum information is provided for ingestion). Otherwise,
|
||||
// their sizes should be the same as external_files. The file order should
|
||||
// be the same in three vectors and guaranteed by the caller.
|
||||
struct IngestExternalFileArg {
|
||||
@ -205,11 +205,11 @@ class DB {
|
||||
// to open the primary instance.
|
||||
// The secondary_path argument points to a directory where the secondary
|
||||
// instance stores its info log.
|
||||
// The column_families argument specifieds a list of column families to open.
|
||||
// The column_families argument specifies a list of column families to open.
|
||||
// If any of the column families does not exist, the function returns non-OK
|
||||
// status.
|
||||
// The handles is an out-arg corresponding to the opened database column
|
||||
// familiy handles.
|
||||
// family handles.
|
||||
// The dbptr is an out-arg corresponding to the opened secondary instance.
|
||||
// The pointer points to a heap-allocated database, and the caller should
|
||||
// delete it after use. Before deleting the dbptr, the user should also
|
||||
@ -745,7 +745,7 @@ class DB {
|
||||
static const std::string kCFStats;
|
||||
|
||||
// "rocksdb.cfstats-no-file-histogram" - returns a multi-line string with
|
||||
// general columm family stats per-level over db's lifetime ("L<n>"),
|
||||
// general column family stats per-level over db's lifetime ("L<n>"),
|
||||
// aggregated over db's lifetime ("Sum"), and aggregated over the
|
||||
// interval since the last retrieval ("Int").
|
||||
static const std::string kCFStatsNoFileHistogram;
|
||||
@ -1025,7 +1025,7 @@ class DB {
|
||||
uint64_t* sizes) = 0;
|
||||
|
||||
// Simpler versions of the GetApproximateSizes() method above.
|
||||
// The include_flags argumenbt must of type DB::SizeApproximationFlags
|
||||
// The include_flags argument must of type DB::SizeApproximationFlags
|
||||
// and can not be NONE.
|
||||
virtual Status GetApproximateSizes(ColumnFamilyHandle* column_family,
|
||||
const Range* ranges, int n,
|
||||
|
@ -437,7 +437,7 @@ class Env {
|
||||
virtual Status GetTestDirectory(std::string* path) = 0;
|
||||
|
||||
// Create and returns a default logger (an instance of EnvLogger) for storing
|
||||
// informational messages. Derived classes can overide to provide custom
|
||||
// informational messages. Derived classes can override to provide custom
|
||||
// logger.
|
||||
virtual Status NewLogger(const std::string& fname,
|
||||
std::shared_ptr<Logger>* result);
|
||||
@ -798,7 +798,7 @@ class WritableFile {
|
||||
virtual ~WritableFile();
|
||||
|
||||
// Append data to the end of the file
|
||||
// Note: A WriteabelFile object must support either Append or
|
||||
// Note: A WriteableFile object must support either Append or
|
||||
// PositionedAppend, so the users cannot mix the two.
|
||||
virtual Status Append(const Slice& data) = 0;
|
||||
|
||||
|
@ -73,7 +73,7 @@ class BlockCipher {
|
||||
// - ROT13 Create a ROT13 Cipher
|
||||
// - ROT13:nn Create a ROT13 Cipher with block size of nn
|
||||
// @param result The new cipher object
|
||||
// @return OK if the cipher was sucessfully created
|
||||
// @return OK if the cipher was successfully created
|
||||
// @return NotFound if an invalid name was specified in the value
|
||||
// @return InvalidArgument if either the options were not valid
|
||||
static Status CreateFromString(const ConfigOptions& config_options,
|
||||
@ -118,7 +118,7 @@ class EncryptionProvider {
|
||||
// - CTR Create a CTR provider
|
||||
// - test://CTR Create a CTR provider and initialize it for tests.
|
||||
// @param result The new provider object
|
||||
// @return OK if the provider was sucessfully created
|
||||
// @return OK if the provider was successfully created
|
||||
// @return NotFound if an invalid name was specified in the value
|
||||
// @return InvalidArgument if either the options were not valid
|
||||
static Status CreateFromString(const ConfigOptions& config_options,
|
||||
|
@ -116,7 +116,7 @@ class FileChecksumList {
|
||||
// Create a new file checksum list.
|
||||
extern FileChecksumList* NewFileChecksumList();
|
||||
|
||||
// Return a shared_ptr of the builtin Crc32c based file checksum generatory
|
||||
// Return a shared_ptr of the builtin Crc32c based file checksum generator
|
||||
// factory object, which can be shared to create the Crc32c based checksum
|
||||
// generator object.
|
||||
// Note: this implementation is compatible with many other crc32c checksum
|
||||
|
@ -473,7 +473,7 @@ class FileSystem {
|
||||
IODebugContext* dbg) = 0;
|
||||
|
||||
// Create and returns a default logger (an instance of EnvLogger) for storing
|
||||
// informational messages. Derived classes can overide to provide custom
|
||||
// informational messages. Derived classes can override to provide custom
|
||||
// logger.
|
||||
virtual IOStatus NewLogger(const std::string& fname, const IOOptions& io_opts,
|
||||
std::shared_ptr<Logger>* result,
|
||||
@ -725,7 +725,7 @@ class FSRandomAccessFile {
|
||||
};
|
||||
|
||||
// A data structure brings the data verification information, which is
|
||||
// used togther with data being written to a file.
|
||||
// used together with data being written to a file.
|
||||
struct DataVerificationInfo {
|
||||
// checksum of the data being written.
|
||||
Slice checksum;
|
||||
@ -753,7 +753,7 @@ class FSWritableFile {
|
||||
virtual ~FSWritableFile() {}
|
||||
|
||||
// Append data to the end of the file
|
||||
// Note: A WriteabelFile object must support either Append or
|
||||
// Note: A WriteableFile object must support either Append or
|
||||
// PositionedAppend, so the users cannot mix the two.
|
||||
virtual IOStatus Append(const Slice& data, const IOOptions& options,
|
||||
IODebugContext* dbg) = 0;
|
||||
|
@ -176,7 +176,7 @@ class MergeOperator {
|
||||
// PartialMergeMulti should combine them into a single merge operation that is
|
||||
// saved into *new_value, and then it should return true. *new_value should
|
||||
// be constructed such that a call to DB::Merge(key, *new_value) would yield
|
||||
// the same result as subquential individual calls to DB::Merge(key, operand)
|
||||
// the same result as sequential individual calls to DB::Merge(key, operand)
|
||||
// for each operand in operand_list from front() to back().
|
||||
//
|
||||
// The string that new_value is pointing to will be empty.
|
||||
|
@ -116,7 +116,7 @@ struct SstFileMetaData {
|
||||
// referenced by the file.
|
||||
// An SST file may be generated by compactions whose input files may
|
||||
// in turn be generated by earlier compactions. The creation time of the
|
||||
// oldest SST file that is the compaction ancester of this file.
|
||||
// oldest SST file that is the compaction ancestor of this file.
|
||||
// The timestamp is provided SystemClock::GetCurrentTime().
|
||||
// 0 if the information is not available.
|
||||
//
|
||||
|
@ -836,7 +836,7 @@ struct DBOptions {
|
||||
// Allows OS to incrementally sync files to disk while they are being
|
||||
// written, asynchronously, in the background. This operation can be used
|
||||
// to smooth out write I/Os over time. Users shouldn't rely on it for
|
||||
// persistency guarantee.
|
||||
// persistence guarantee.
|
||||
// Issue one request for every bytes_per_sync written. 0 turns it off.
|
||||
//
|
||||
// You may consider using rate_limiter to regulate write rate to device.
|
||||
@ -1190,7 +1190,7 @@ struct DBOptions {
|
||||
// writing a file, by tracing back to the writing host. These corruptions
|
||||
// may not be caught by the checksum since they happen before checksumming.
|
||||
// If left as default, the table writer will substitute it with the actual
|
||||
// hostname when writing the SST file. If set to an empty stirng, the
|
||||
// hostname when writing the SST file. If set to an empty string, the
|
||||
// property will not be written to the SST file.
|
||||
//
|
||||
// Default: hostname
|
||||
@ -1279,7 +1279,7 @@ struct ReadOptions {
|
||||
// Default: nullptr
|
||||
const Slice* iterate_lower_bound;
|
||||
|
||||
// "iterate_upper_bound" defines the extent upto which the forward iterator
|
||||
// "iterate_upper_bound" defines the extent up to which the forward iterator
|
||||
// can returns entries. Once the bound is reached, Valid() will be false.
|
||||
// "iterate_upper_bound" is exclusive ie the bound value is
|
||||
// not a valid entry. If prefix_extractor is not null, the Seek target
|
||||
@ -1291,7 +1291,7 @@ struct ReadOptions {
|
||||
|
||||
// RocksDB does auto-readahead for iterators on noticing more than two reads
|
||||
// for a table file. The readahead starts at 8KB and doubles on every
|
||||
// additional read upto 256KB.
|
||||
// additional read up to 256KB.
|
||||
// This option can help if most of the range scans are large, and if it is
|
||||
// determined that a larger readahead than that enabled by auto-readahead is
|
||||
// needed.
|
||||
@ -1349,7 +1349,7 @@ struct ReadOptions {
|
||||
// When true, by default use total_order_seek = true, and RocksDB can
|
||||
// selectively enable prefix seek mode if won't generate a different result
|
||||
// from total_order_seek, based on seek key, and iterator upper bound.
|
||||
// Not suppported in ROCKSDB_LITE mode, in the way that even with value true
|
||||
// Not supported in ROCKSDB_LITE mode, in the way that even with value true
|
||||
// prefix mode is not used.
|
||||
// Default: false
|
||||
bool auto_prefix_mode;
|
||||
@ -1425,7 +1425,7 @@ struct ReadOptions {
|
||||
// A timeout in microseconds to be passed to the underlying FileSystem for
|
||||
// reads. As opposed to deadline, this determines the timeout for each
|
||||
// individual file read request. If a MultiGet/Get/Seek/Next etc call
|
||||
// results in multiple reads, each read can last upto io_timeout us.
|
||||
// results in multiple reads, each read can last up to io_timeout us.
|
||||
std::chrono::microseconds io_timeout;
|
||||
|
||||
// It limits the maximum cumulative value size of the keys in batch while
|
||||
@ -1479,7 +1479,7 @@ struct WriteOptions {
|
||||
bool no_slowdown;
|
||||
|
||||
// If true, this write request is of lower priority if compaction is
|
||||
// behind. In this case, no_slowdown = true, the request will be cancelled
|
||||
// behind. In this case, no_slowdown = true, the request will be canceled
|
||||
// immediately with Status::Incomplete() returned. Otherwise, it will be
|
||||
// slowed down. The slowdown value is determined by RocksDB to guarantee
|
||||
// it introduces minimum impacts to high priority writes.
|
||||
@ -1616,7 +1616,7 @@ struct IngestExternalFileOptions {
|
||||
bool allow_blocking_flush = true;
|
||||
// Set to true if you would like duplicate keys in the file being ingested
|
||||
// to be skipped rather than overwriting existing data under that key.
|
||||
// Usecase: back-fill of some historical data in the database without
|
||||
// Use case: back-fill of some historical data in the database without
|
||||
// over-writing existing newer version of data.
|
||||
// This option could only be used if the DB has been running
|
||||
// with allow_ingest_behind=true since the dawn of time.
|
||||
@ -1656,7 +1656,7 @@ struct IngestExternalFileOptions {
|
||||
// will be ignored; 2) If DB enable the checksum function, we calculate the
|
||||
// sst file checksum after the file is moved or copied and compare the
|
||||
// checksum and checksum name. If checksum or checksum function name does
|
||||
// not match, ingestion will be failed. If the verification is sucessful,
|
||||
// not match, ingestion will be failed. If the verification is successful,
|
||||
// checksum and checksum function name will be stored in Manifest.
|
||||
// If this option is set to FALSE, 1) if DB does not enable checksum,
|
||||
// the ingested checksum information will be ignored; 2) if DB enable the
|
||||
|
@ -57,7 +57,7 @@ struct PerfContext {
|
||||
// enable per level perf context and allocate storage for PerfContextByLevel
|
||||
void EnablePerLevelPerfContext();
|
||||
|
||||
// temporarily disable per level perf contxt by setting the flag to false
|
||||
// temporarily disable per level perf context by setting the flag to false
|
||||
void DisablePerLevelPerfContext();
|
||||
|
||||
// free the space for PerfContextByLevel, also disable per level perf context
|
||||
|
@ -62,7 +62,7 @@ class SliceTransform {
|
||||
virtual bool InRange(const Slice& /*dst*/) const { return false; }
|
||||
|
||||
// Some SliceTransform will have a full length which can be used to
|
||||
// determine if two keys are consecuitive. Can be disabled by always
|
||||
// determine if two keys are consecutive. Can be disabled by always
|
||||
// returning 0
|
||||
virtual bool FullLengthEnabled(size_t* /*len*/) const { return false; }
|
||||
|
||||
|
@ -51,12 +51,12 @@ class SstPartitioner {
|
||||
// It is called for all keys in compaction. When partitioner want to create
|
||||
// new SST file it needs to return true. It means compaction job will finish
|
||||
// current SST file where last key is "prev_user_key" parameter and start new
|
||||
// SST file where first key is "current_user_key". Returns decission if
|
||||
// SST file where first key is "current_user_key". Returns decision if
|
||||
// partition boundary was detected and compaction should create new file.
|
||||
virtual PartitionerResult ShouldPartition(
|
||||
const PartitionerRequest& request) = 0;
|
||||
|
||||
// Called with smallest and largest keys in SST file when compation try to do
|
||||
// Called with smallest and largest keys in SST file when compaction try to do
|
||||
// trivial move. Returns true is partitioner allows to do trivial move.
|
||||
virtual bool CanDoTrivialMove(const Slice& smallest_user_key,
|
||||
const Slice& largest_user_key) = 0;
|
||||
|
@ -117,7 +117,7 @@ enum Tickers : uint32_t {
|
||||
COMPACTION_RANGE_DEL_DROP_OBSOLETE, // all keys in range were deleted.
|
||||
// Deletions obsoleted before bottom level due to file gap optimization.
|
||||
COMPACTION_OPTIMIZED_DEL_DROP_OBSOLETE,
|
||||
// If a compaction was cancelled in sfm to prevent ENOSPC
|
||||
// If a compaction was canceled in sfm to prevent ENOSPC
|
||||
COMPACTION_CANCELLED,
|
||||
|
||||
// Number of keys written to the database via the Put and Write call's
|
||||
@ -183,7 +183,7 @@ enum Tickers : uint32_t {
|
||||
// over large number of keys with same userkey.
|
||||
NUMBER_OF_RESEEKS_IN_ITERATION,
|
||||
|
||||
// Record the number of calls to GetUpadtesSince. Useful to keep track of
|
||||
// Record the number of calls to GetUpdatesSince. Useful to keep track of
|
||||
// transaction log iterator refreshes
|
||||
GET_UPDATES_SINCE_CALLS,
|
||||
BLOCK_CACHE_COMPRESSED_MISS, // miss in the compressed block cache
|
||||
@ -447,7 +447,7 @@ enum Histograms : uint32_t {
|
||||
BLOB_DB_VALUE_SIZE,
|
||||
// BlobDB Put/PutWithTTL/PutUntil/Write latency.
|
||||
BLOB_DB_WRITE_MICROS,
|
||||
// BlobDB Get lagency.
|
||||
// BlobDB Get latency.
|
||||
BLOB_DB_GET_MICROS,
|
||||
// BlobDB MultiGet latency.
|
||||
BLOB_DB_MULTIGET_MICROS,
|
||||
|
@ -555,7 +555,7 @@ struct PlainTableOptions {
|
||||
|
||||
// @store_index_in_file: compute plain table index and bloom filter during
|
||||
// file building and store it in file. When reading
|
||||
// file, index will be mmaped instead of recomputation.
|
||||
// file, index will be mapped instead of recomputation.
|
||||
bool store_index_in_file = false;
|
||||
};
|
||||
|
||||
|
@ -28,7 +28,7 @@ class TraceWriter {
|
||||
};
|
||||
|
||||
// TraceReader allows reading RocksDB traces from any system, one operation at
|
||||
// a time. A RocksDB Replayer could depend on this to replay opertions.
|
||||
// a time. A RocksDB Replayer could depend on this to replay operations.
|
||||
class TraceReader {
|
||||
public:
|
||||
TraceReader() {}
|
||||
|
@ -36,12 +36,12 @@ class CompactionOptionsUniversal {
|
||||
// The size amplification is defined as the amount (in percentage) of
|
||||
// additional storage needed to store a single byte of data in the database.
|
||||
// For example, a size amplification of 2% means that a database that
|
||||
// contains 100 bytes of user-data may occupy upto 102 bytes of
|
||||
// contains 100 bytes of user-data may occupy up to 102 bytes of
|
||||
// physical storage. By this definition, a fully compacted database has
|
||||
// a size amplification of 0%. Rocksdb uses the following heuristic
|
||||
// to calculate size amplification: it assumes that all files excluding
|
||||
// the earliest file contribute to the size amplification.
|
||||
// Default: 200, which means that a 100 byte database could require upto
|
||||
// Default: 200, which means that a 100 byte database could require up to
|
||||
// 300 bytes of storage.
|
||||
unsigned int max_size_amplification_percent;
|
||||
|
||||
|
@ -470,7 +470,7 @@ class BackupEngine {
|
||||
virtual Status DeleteBackup(BackupID backup_id) = 0;
|
||||
|
||||
// Call this from another thread if you want to stop the backup
|
||||
// that is currently happening. It will return immediatelly, will
|
||||
// that is currently happening. It will return immediately, will
|
||||
// not wait for the backup to stop.
|
||||
// The backup will stop ASAP and the call to CreateNewBackup will
|
||||
// return Status::Incomplete(). It will not clean up after itself, but
|
||||
|
@ -255,7 +255,7 @@ class OptionTypeInfo {
|
||||
// - Create a static map of string values to the corresponding enum value
|
||||
// - Call this method passing the static map in as a parameter.
|
||||
// Note that it is not necessary to add a new OptionType or make any
|
||||
// other changes -- the returned object handles parsing, serialiation, and
|
||||
// other changes -- the returned object handles parsing, serialization, and
|
||||
// comparisons.
|
||||
//
|
||||
// @param offset The offset in the option object for this enum
|
||||
@ -718,7 +718,7 @@ class OptionTypeInfo {
|
||||
// @param opts The string in which to find the next token
|
||||
// @param delimiter The delimiter between tokens
|
||||
// @param start The position in opts to start looking for the token
|
||||
// @parem ed Returns the end position in opts of the token
|
||||
// @param ed Returns the end position in opts of the token
|
||||
// @param token Returns the token
|
||||
// @returns OK if a token was found
|
||||
// @return InvalidArgument if the braces mismatch
|
||||
@ -859,7 +859,7 @@ Status SerializeVector(const ConfigOptions& config_options,
|
||||
// @param vec1,vec2 The vectors to compare.
|
||||
// @param mismatch If the vectors are not equivalent, mismatch will point to
|
||||
// the first
|
||||
// element of the comparison tht did not match.
|
||||
// element of the comparison that did not match.
|
||||
// @return true If vec1 and vec2 are "equal", false otherwise
|
||||
template <typename T>
|
||||
bool VectorsAreEqual(const ConfigOptions& config_options,
|
||||
|
@ -51,7 +51,7 @@ struct ConfigOptions;
|
||||
// BlockBasedTableOptions and making necessary changes.
|
||||
//
|
||||
// ignore_unknown_options can be set to true if you want to ignore options
|
||||
// that are from a newer version of the db, esentially for forward
|
||||
// that are from a newer version of the db, essentially for forward
|
||||
// compatibility.
|
||||
//
|
||||
// config_options contains a set of options that controls the processing
|
||||
@ -66,7 +66,7 @@ struct ConfigOptions;
|
||||
// @return the function returns an OK status when it went successfully. If
|
||||
// the specified "dbpath" does not contain any option file, then a
|
||||
// Status::NotFound will be returned. A return value other than
|
||||
// Status::OK or Status::NotFound indicates there're some error related
|
||||
// Status::OK or Status::NotFound indicates there is some error related
|
||||
// to the options file itself.
|
||||
//
|
||||
// @see LoadOptionsFromFile
|
||||
|
@ -25,7 +25,7 @@ class SimCache;
|
||||
// can help users tune their current block cache size, and determine how
|
||||
// efficient they are using the memory.
|
||||
//
|
||||
// Since GetSimCapacity() returns the capacity for simulutation, it differs from
|
||||
// Since GetSimCapacity() returns the capacity for simulation, it differs from
|
||||
// actual memory usage, which can be estimated as:
|
||||
// sim_capacity * entry_size / (entry_size + block_size),
|
||||
// where 76 <= entry_size <= 104,
|
||||
@ -60,7 +60,7 @@ class SimCache : public Cache {
|
||||
// sets the maximum configured capacity of the simcache. When the new
|
||||
// capacity is less than the old capacity and the existing usage is
|
||||
// greater than new capacity, the implementation will purge old entries
|
||||
// to fit new capapicty.
|
||||
// to fit new capacity.
|
||||
virtual void SetSimCapacity(size_t capacity) = 0;
|
||||
|
||||
// returns the lookup times of simcache
|
||||
|
@ -194,7 +194,7 @@ struct TransactionDBOptions {
|
||||
|
||||
// If true, the TransactionDB implementation might skip concurrency control
|
||||
// unless it is overridden by TransactionOptions or
|
||||
// TransactionDBWriteOptimizations. This can be used in conjuction with
|
||||
// TransactionDBWriteOptimizations. This can be used in conjunction with
|
||||
// DBOptions::unordered_write when the TransactionDB is used solely for write
|
||||
// ordering rather than concurrency control.
|
||||
bool skip_concurrency_control = false;
|
||||
|
@ -61,7 +61,7 @@ class TransactionDBCondVar {
|
||||
//
|
||||
// Returns OK if notified.
|
||||
// Returns TimedOut if timeout is reached.
|
||||
// Returns other status if TransactionDB should otherwis stop waiting and
|
||||
// Returns other status if TransactionDB should otherwise stop waiting and
|
||||
// fail the operation.
|
||||
// May return OK spuriously even if not notified.
|
||||
virtual Status WaitFor(std::shared_ptr<TransactionDBMutex> mutex,
|
||||
|
@ -169,7 +169,7 @@ class WriteBatchWithIndex : public WriteBatchBase {
|
||||
// returned iterator will also delete the base_iterator.
|
||||
//
|
||||
// Updating write batch with the current key of the iterator is not safe.
|
||||
// We strongly recommand users not to do it. It will invalidate the current
|
||||
// We strongly recommend users not to do it. It will invalidate the current
|
||||
// key() and value() of the iterator. This invalidation happens even before
|
||||
// the write batch update finishes. The state may recover after Next() is
|
||||
// called.
|
||||
|
@ -312,10 +312,10 @@ class WriteBatch : public WriteBatchBase {
|
||||
// Returns true if MarkEndPrepare will be called during Iterate
|
||||
bool HasEndPrepare() const;
|
||||
|
||||
// Returns trie if MarkCommit will be called during Iterate
|
||||
// Returns true if MarkCommit will be called during Iterate
|
||||
bool HasCommit() const;
|
||||
|
||||
// Returns trie if MarkRollback will be called during Iterate
|
||||
// Returns true if MarkRollback will be called during Iterate
|
||||
bool HasRollback() const;
|
||||
|
||||
// Assign timestamp to write batch
|
||||
|
Loading…
x
Reference in New Issue
Block a user