Fixed some spelling mistakes
Summary: Closes https://github.com/facebook/rocksdb/pull/2314 Differential Revision: D5079601 Pulled By: sagar0 fbshipit-source-id: ae5696fd735718f544435c64c3179c49b8c04349
This commit is contained in:
parent
146b7718f0
commit
f5ba131bf8
@ -256,7 +256,7 @@
|
|||||||
* Added a new way to report QPS from db_bench (check out --report_file and --report_interval_seconds)
|
* Added a new way to report QPS from db_bench (check out --report_file and --report_interval_seconds)
|
||||||
* Added a cache for individual rows. See DBOptions::row_cache for more info.
|
* Added a cache for individual rows. See DBOptions::row_cache for more info.
|
||||||
* Several new features on EventListener (see include/rocksdb/listener.h):
|
* Several new features on EventListener (see include/rocksdb/listener.h):
|
||||||
- OnCompationCompleted() now returns per-compaciton job statistics, defined in include/rocksdb/compaction_job_stats.h.
|
- OnCompationCompleted() now returns per-compaction job statistics, defined in include/rocksdb/compaction_job_stats.h.
|
||||||
- Added OnTableFileCreated() and OnTableFileDeleted().
|
- Added OnTableFileCreated() and OnTableFileDeleted().
|
||||||
* Add compaction_options_universal.enable_trivial_move to true, to allow trivial move while performing universal compaction. Trivial move will happen only when all the input files are non overlapping.
|
* Add compaction_options_universal.enable_trivial_move to true, to allow trivial move while performing universal compaction. Trivial move will happen only when all the input files are non overlapping.
|
||||||
|
|
||||||
|
@ -391,7 +391,7 @@ class DBImpl : public DB {
|
|||||||
|
|
||||||
// Return the lastest MutableCFOptions of a column family
|
// Return the lastest MutableCFOptions of a column family
|
||||||
Status TEST_GetLatestMutableCFOptions(ColumnFamilyHandle* column_family,
|
Status TEST_GetLatestMutableCFOptions(ColumnFamilyHandle* column_family,
|
||||||
MutableCFOptions* mutable_cf_opitons);
|
MutableCFOptions* mutable_cf_options);
|
||||||
|
|
||||||
Cache* TEST_table_cache() { return table_cache_.get(); }
|
Cache* TEST_table_cache() { return table_cache_.get(); }
|
||||||
|
|
||||||
@ -1098,7 +1098,7 @@ class DBImpl : public DB {
|
|||||||
// Indicate DB was opened successfully
|
// Indicate DB was opened successfully
|
||||||
bool opened_successfully_;
|
bool opened_successfully_;
|
||||||
|
|
||||||
// minmum log number still containing prepared data.
|
// minimum log number still containing prepared data.
|
||||||
// this is used by FindObsoleteFiles to determine which
|
// this is used by FindObsoleteFiles to determine which
|
||||||
// flushed logs we must keep around because they still
|
// flushed logs we must keep around because they still
|
||||||
// contain prepared data which has not been flushed or rolled back
|
// contain prepared data which has not been flushed or rolled back
|
||||||
@ -1111,7 +1111,7 @@ class DBImpl : public DB {
|
|||||||
// to prepared_section_completed_ which maps LOG -> instance_count
|
// to prepared_section_completed_ which maps LOG -> instance_count
|
||||||
// since a log could contain multiple prepared sections
|
// since a log could contain multiple prepared sections
|
||||||
//
|
//
|
||||||
// when trying to determine the minmum log still active we first
|
// when trying to determine the minimum log still active we first
|
||||||
// consult min_log_with_prep_. while that root value maps to
|
// consult min_log_with_prep_. while that root value maps to
|
||||||
// a value > 0 in prepared_section_completed_ we decrement the
|
// a value > 0 in prepared_section_completed_ we decrement the
|
||||||
// instance_count for that log and pop the root value in
|
// instance_count for that log and pop the root value in
|
||||||
|
@ -1403,7 +1403,7 @@ Status DBImpl::BackgroundCompaction(bool* made_progress,
|
|||||||
// Can't compact right now, but try again later
|
// Can't compact right now, but try again later
|
||||||
TEST_SYNC_POINT("DBImpl::BackgroundCompaction()::Conflict");
|
TEST_SYNC_POINT("DBImpl::BackgroundCompaction()::Conflict");
|
||||||
|
|
||||||
// Stay in the compaciton queue.
|
// Stay in the compaction queue.
|
||||||
unscheduled_compactions_++;
|
unscheduled_compactions_++;
|
||||||
|
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
|
@ -118,7 +118,7 @@ class DelayFilterFactory : public CompactionFilterFactory {
|
|||||||
};
|
};
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
// Make sure we don't trigger a problem if the trigger conditon is given
|
// Make sure we don't trigger a problem if the trigger condtion is given
|
||||||
// to be 0, which is invalid.
|
// to be 0, which is invalid.
|
||||||
TEST_P(DBTestUniversalCompaction, UniversalCompactionSingleSortedRun) {
|
TEST_P(DBTestUniversalCompaction, UniversalCompactionSingleSortedRun) {
|
||||||
Options options = CurrentOptions();
|
Options options = CurrentOptions();
|
||||||
|
@ -37,11 +37,11 @@ enum CompactionStyle : char {
|
|||||||
kCompactionStyleNone = 0x3,
|
kCompactionStyleNone = 0x3,
|
||||||
};
|
};
|
||||||
|
|
||||||
// In Level-based comapction, it Determines which file from a level to be
|
// In Level-based compaction, it Determines which file from a level to be
|
||||||
// picked to merge to the next level. We suggest people try
|
// picked to merge to the next level. We suggest people try
|
||||||
// kMinOverlappingRatio first when you tune your database.
|
// kMinOverlappingRatio first when you tune your database.
|
||||||
enum CompactionPri : char {
|
enum CompactionPri : char {
|
||||||
// Slightly Priotize larger files by size compensated by #deletes
|
// Slightly prioritize larger files by size compensated by #deletes
|
||||||
kByCompensatedSize = 0x0,
|
kByCompensatedSize = 0x0,
|
||||||
// First compact files whose data's latest update time is oldest.
|
// First compact files whose data's latest update time is oldest.
|
||||||
// Try this if you only update some hot keys in small ranges.
|
// Try this if you only update some hot keys in small ranges.
|
||||||
|
@ -182,7 +182,7 @@ class Cache {
|
|||||||
bool thread_safe) = 0;
|
bool thread_safe) = 0;
|
||||||
|
|
||||||
// Remove all entries.
|
// Remove all entries.
|
||||||
// Prerequisit: no entry is referenced.
|
// Prerequisite: no entry is referenced.
|
||||||
virtual void EraseUnRefEntries() = 0;
|
virtual void EraseUnRefEntries() = 0;
|
||||||
|
|
||||||
virtual std::string GetPrintableOptions() const { return ""; }
|
virtual std::string GetPrintableOptions() const { return ""; }
|
||||||
|
@ -15,7 +15,7 @@ struct CompactionJobStats {
|
|||||||
// Aggregate the CompactionJobStats from another instance with this one
|
// Aggregate the CompactionJobStats from another instance with this one
|
||||||
void Add(const CompactionJobStats& stats);
|
void Add(const CompactionJobStats& stats);
|
||||||
|
|
||||||
// the elapsed time in micro of this compaction.
|
// the elapsed time of this compaction in microseconds.
|
||||||
uint64_t elapsed_micros;
|
uint64_t elapsed_micros;
|
||||||
|
|
||||||
// the number of compaction input records.
|
// the number of compaction input records.
|
||||||
|
@ -34,7 +34,7 @@ namespace rocksdb {
|
|||||||
// - "optimize_filters_for_hits=true" in GetColumnFamilyOptionsFromString.
|
// - "optimize_filters_for_hits=true" in GetColumnFamilyOptionsFromString.
|
||||||
//
|
//
|
||||||
// * Integers:
|
// * Integers:
|
||||||
// Integers are converted directly from string, in addtion to the following
|
// Integers are converted directly from string, in addition to the following
|
||||||
// units that we support:
|
// units that we support:
|
||||||
// - 'k' or 'K' => 2^10
|
// - 'k' or 'K' => 2^10
|
||||||
// - 'm' or 'M' => 2^20
|
// - 'm' or 'M' => 2^20
|
||||||
|
@ -439,7 +439,7 @@ class DB {
|
|||||||
// It could also be used to return the stats in the format of the map.
|
// It could also be used to return the stats in the format of the map.
|
||||||
// In this case there will a pair of string to array of double for
|
// In this case there will a pair of string to array of double for
|
||||||
// each level as well as for "Sum". "Int" stats will not be affected
|
// each level as well as for "Sum". "Int" stats will not be affected
|
||||||
// when this form of stats are retrived.
|
// when this form of stats are retrieved.
|
||||||
static const std::string kCFStatsNoFileHistogram;
|
static const std::string kCFStatsNoFileHistogram;
|
||||||
|
|
||||||
// "rocksdb.cf-file-histogram" - print out how many file reads to every
|
// "rocksdb.cf-file-histogram" - print out how many file reads to every
|
||||||
@ -538,7 +538,7 @@ class DB {
|
|||||||
// by iterators or unfinished compactions.
|
// by iterators or unfinished compactions.
|
||||||
static const std::string kNumLiveVersions;
|
static const std::string kNumLiveVersions;
|
||||||
|
|
||||||
// "rocksdb.current-super-version-number" - returns number of curent LSM
|
// "rocksdb.current-super-version-number" - returns number of current LSM
|
||||||
// version. It is a uint64_t integer number, incremented after there is
|
// version. It is a uint64_t integer number, incremented after there is
|
||||||
// any change to the LSM tree. The number is not preserved after restarting
|
// any change to the LSM tree. The number is not preserved after restarting
|
||||||
// the DB. After DB restart, it will start from 0 again.
|
// the DB. After DB restart, it will start from 0 again.
|
||||||
@ -548,7 +548,7 @@ class DB {
|
|||||||
// live data in bytes.
|
// live data in bytes.
|
||||||
static const std::string kEstimateLiveDataSize;
|
static const std::string kEstimateLiveDataSize;
|
||||||
|
|
||||||
// "rocksdb.min-log-number-to-keep" - return the minmum log number of the
|
// "rocksdb.min-log-number-to-keep" - return the minimum log number of the
|
||||||
// log files that should be kept.
|
// log files that should be kept.
|
||||||
static const std::string kMinLogNumberToKeep;
|
static const std::string kMinLogNumberToKeep;
|
||||||
|
|
||||||
@ -956,7 +956,7 @@ class DB {
|
|||||||
//
|
//
|
||||||
// (1) External SST files can be created using SstFileWriter
|
// (1) External SST files can be created using SstFileWriter
|
||||||
// (2) We will try to ingest the files to the lowest possible level
|
// (2) We will try to ingest the files to the lowest possible level
|
||||||
// even if the file compression dont match the level compression
|
// even if the file compression doesn't match the level compression
|
||||||
// (3) If IngestExternalFileOptions->ingest_behind is set to true,
|
// (3) If IngestExternalFileOptions->ingest_behind is set to true,
|
||||||
// we always ingest at the bottommost level, which should be reserved
|
// we always ingest at the bottommost level, which should be reserved
|
||||||
// for this purpose (see DBOPtions::allow_ingest_behind flag).
|
// for this purpose (see DBOPtions::allow_ingest_behind flag).
|
||||||
|
@ -183,7 +183,7 @@ class Env {
|
|||||||
unique_ptr<WritableFile>* result,
|
unique_ptr<WritableFile>* result,
|
||||||
const EnvOptions& options);
|
const EnvOptions& options);
|
||||||
|
|
||||||
// Open `fname` for random read and write, if file dont exist the file
|
// Open `fname` for random read and write, if file doesn't exist the file
|
||||||
// will be created. On success, stores a pointer to the new file in
|
// will be created. On success, stores a pointer to the new file in
|
||||||
// *result and returns OK. On failure returns non-OK.
|
// *result and returns OK. On failure returns non-OK.
|
||||||
//
|
//
|
||||||
@ -318,7 +318,7 @@ class Env {
|
|||||||
// Wait for all threads started by StartThread to terminate.
|
// Wait for all threads started by StartThread to terminate.
|
||||||
virtual void WaitForJoin() {}
|
virtual void WaitForJoin() {}
|
||||||
|
|
||||||
// Get thread pool queue length for specific thrad pool.
|
// Get thread pool queue length for specific thread pool.
|
||||||
virtual unsigned int GetThreadPoolQueueLen(Priority pri = LOW) const {
|
virtual unsigned int GetThreadPoolQueueLen(Priority pri = LOW) const {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -687,7 +687,7 @@ class WritableFile {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// If this write would cross one or more preallocation blocks,
|
// If this write would cross one or more preallocation blocks,
|
||||||
// determine what the last preallocation block necesessary to
|
// determine what the last preallocation block necessary to
|
||||||
// cover this write would be and Allocate to that point.
|
// cover this write would be and Allocate to that point.
|
||||||
const auto block_size = preallocation_block_size_;
|
const auto block_size = preallocation_block_size_;
|
||||||
size_t new_last_preallocated_block =
|
size_t new_last_preallocated_block =
|
||||||
|
@ -112,7 +112,7 @@ class FilterPolicy {
|
|||||||
//
|
//
|
||||||
// bits_per_key: bits per key in bloom filter. A good value for bits_per_key
|
// bits_per_key: bits per key in bloom filter. A good value for bits_per_key
|
||||||
// is 10, which yields a filter with ~ 1% false positive rate.
|
// is 10, which yields a filter with ~ 1% false positive rate.
|
||||||
// use_block_based_builder: use block based filter rather than full fiter.
|
// use_block_based_builder: use block based filter rather than full filter.
|
||||||
// If you want to builder full filter, it needs to be set to false.
|
// If you want to builder full filter, it needs to be set to false.
|
||||||
//
|
//
|
||||||
// Callers must delete the result after any database that is using the
|
// Callers must delete the result after any database that is using the
|
||||||
|
@ -136,7 +136,7 @@ struct ColumnFamilyOptions : public AdvancedColumnFamilyOptions {
|
|||||||
// the same DB. The only exception is reserved for upgrade, where a DB
|
// the same DB. The only exception is reserved for upgrade, where a DB
|
||||||
// previously without a merge operator is introduced to Merge operation
|
// previously without a merge operator is introduced to Merge operation
|
||||||
// for the first time. It's necessary to specify a merge operator when
|
// for the first time. It's necessary to specify a merge operator when
|
||||||
// openning the DB in this case.
|
// opening the DB in this case.
|
||||||
// Default: nullptr
|
// Default: nullptr
|
||||||
std::shared_ptr<MergeOperator> merge_operator = nullptr;
|
std::shared_ptr<MergeOperator> merge_operator = nullptr;
|
||||||
|
|
||||||
@ -578,7 +578,7 @@ struct DBOptions {
|
|||||||
//
|
//
|
||||||
// Files will be opened in "direct I/O" mode
|
// Files will be opened in "direct I/O" mode
|
||||||
// which means that data r/w from the disk will not be cached or
|
// which means that data r/w from the disk will not be cached or
|
||||||
// bufferized. The hardware buffer of the devices may however still
|
// buffered. The hardware buffer of the devices may however still
|
||||||
// be used. Memory mapped files are not impacted by these parameters.
|
// be used. Memory mapped files are not impacted by these parameters.
|
||||||
|
|
||||||
// Use O_DIRECT for user reads
|
// Use O_DIRECT for user reads
|
||||||
@ -946,7 +946,7 @@ struct ReadOptions {
|
|||||||
// and iterator_upper_bound need to have the same prefix.
|
// and iterator_upper_bound need to have the same prefix.
|
||||||
// This is because ordering is not guaranteed outside of prefix domain.
|
// This is because ordering is not guaranteed outside of prefix domain.
|
||||||
// There is no lower bound on the iterator. If needed, that can be easily
|
// There is no lower bound on the iterator. If needed, that can be easily
|
||||||
// implemented
|
// implemented.
|
||||||
//
|
//
|
||||||
// Default: nullptr
|
// Default: nullptr
|
||||||
const Slice* iterate_upper_bound;
|
const Slice* iterate_upper_bound;
|
||||||
|
@ -68,9 +68,9 @@ class RateLimiter {
|
|||||||
// The default should work for most cases.
|
// The default should work for most cases.
|
||||||
// @fairness: RateLimiter accepts high-pri requests and low-pri requests.
|
// @fairness: RateLimiter accepts high-pri requests and low-pri requests.
|
||||||
// A low-pri request is usually blocked in favor of hi-pri request. Currently,
|
// A low-pri request is usually blocked in favor of hi-pri request. Currently,
|
||||||
// RocksDB assigns low-pri to request from compaciton and high-pri to request
|
// RocksDB assigns low-pri to request from compaction and high-pri to request
|
||||||
// from flush. Low-pri requests can get blocked if flush requests come in
|
// from flush. Low-pri requests can get blocked if flush requests come in
|
||||||
// continuouly. This fairness parameter grants low-pri requests permission by
|
// continuously. This fairness parameter grants low-pri requests permission by
|
||||||
// 1/fairness chance even though high-pri requests exist to avoid starvation.
|
// 1/fairness chance even though high-pri requests exist to avoid starvation.
|
||||||
// You should be good by leaving it at default 10.
|
// You should be good by leaving it at default 10.
|
||||||
extern RateLimiter* NewGenericRateLimiter(
|
extern RateLimiter* NewGenericRateLimiter(
|
||||||
|
@ -122,7 +122,7 @@ class Slice {
|
|||||||
* A Slice that can be pinned with some cleanup tasks, which will be run upon
|
* A Slice that can be pinned with some cleanup tasks, which will be run upon
|
||||||
* ::Reset() or object destruction, whichever is invoked first. This can be used
|
* ::Reset() or object destruction, whichever is invoked first. This can be used
|
||||||
* to avoid memcpy by having the PinnsableSlice object referring to the data
|
* to avoid memcpy by having the PinnsableSlice object referring to the data
|
||||||
* that is locked in the memory and release them after the data is consuned.
|
* that is locked in the memory and release them after the data is consumed.
|
||||||
*/
|
*/
|
||||||
class PinnableSlice : public Slice, public Cleanable {
|
class PinnableSlice : public Slice, public Cleanable {
|
||||||
public:
|
public:
|
||||||
|
@ -72,8 +72,8 @@ class SliceTransform {
|
|||||||
// by setting ReadOptions.total_order_seek = true.
|
// by setting ReadOptions.total_order_seek = true.
|
||||||
//
|
//
|
||||||
// Here is an example: Suppose we implement a slice transform that returns
|
// Here is an example: Suppose we implement a slice transform that returns
|
||||||
// the first part of the string after spliting it using deimiter ",":
|
// the first part of the string after spliting it using delimiter ",":
|
||||||
// 1. SameResultWhenAppended("abc,") should return true. If aplying prefix
|
// 1. SameResultWhenAppended("abc,") should return true. If applying prefix
|
||||||
// bloom filter using it, all slices matching "abc:.*" will be extracted
|
// bloom filter using it, all slices matching "abc:.*" will be extracted
|
||||||
// to "abc,", so any SST file or memtable containing any of those key
|
// to "abc,", so any SST file or memtable containing any of those key
|
||||||
// will not be filtered out.
|
// will not be filtered out.
|
||||||
|
@ -68,7 +68,7 @@ class SstFileManager {
|
|||||||
// == Deletion rate limiting specific arguments ==
|
// == Deletion rate limiting specific arguments ==
|
||||||
// @param trash_dir: Path to the directory where deleted files will be moved
|
// @param trash_dir: Path to the directory where deleted files will be moved
|
||||||
// to be deleted in a background thread while applying rate limiting. If this
|
// to be deleted in a background thread while applying rate limiting. If this
|
||||||
// directory dont exist, it will be created. This directory should not be
|
// directory doesn't exist, it will be created. This directory should not be
|
||||||
// used by any other process or any other SstFileManager, Set to "" to
|
// used by any other process or any other SstFileManager, Set to "" to
|
||||||
// disable deletion rate limiting.
|
// disable deletion rate limiting.
|
||||||
// @param rate_bytes_per_sec: How many bytes should be deleted per second, If
|
// @param rate_bytes_per_sec: How many bytes should be deleted per second, If
|
||||||
|
@ -174,7 +174,7 @@ enum Tickers : uint32_t {
|
|||||||
GET_UPDATES_SINCE_CALLS,
|
GET_UPDATES_SINCE_CALLS,
|
||||||
BLOCK_CACHE_COMPRESSED_MISS, // miss in the compressed block cache
|
BLOCK_CACHE_COMPRESSED_MISS, // miss in the compressed block cache
|
||||||
BLOCK_CACHE_COMPRESSED_HIT, // hit in the compressed block cache
|
BLOCK_CACHE_COMPRESSED_HIT, // hit in the compressed block cache
|
||||||
// Number of blocks added to comopressed block cache
|
// Number of blocks added to compressed block cache
|
||||||
BLOCK_CACHE_COMPRESSED_ADD,
|
BLOCK_CACHE_COMPRESSED_ADD,
|
||||||
// Number of failures when adding blocks to compressed block cache
|
// Number of failures when adding blocks to compressed block cache
|
||||||
BLOCK_CACHE_COMPRESSED_ADD_FAILURES,
|
BLOCK_CACHE_COMPRESSED_ADD_FAILURES,
|
||||||
@ -328,7 +328,7 @@ const std::vector<std::pair<Tickers, std::string>> TickersNameMap = {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Keep adding histogram's here.
|
* Keep adding histogram's here.
|
||||||
* Any histogram whould have value less than HISTOGRAM_ENUM_MAX
|
* Any histogram should have value less than HISTOGRAM_ENUM_MAX
|
||||||
* Add a new Histogram by assigning it the current value of HISTOGRAM_ENUM_MAX
|
* Add a new Histogram by assigning it the current value of HISTOGRAM_ENUM_MAX
|
||||||
* Add a string representation in HistogramsNameMap below
|
* Add a string representation in HistogramsNameMap below
|
||||||
* And increment HISTOGRAM_ENUM_MAX
|
* And increment HISTOGRAM_ENUM_MAX
|
||||||
|
@ -67,7 +67,7 @@ struct BlockBasedTableOptions {
|
|||||||
|
|
||||||
// If cache_index_and_filter_blocks is enabled, cache index and filter
|
// If cache_index_and_filter_blocks is enabled, cache index and filter
|
||||||
// blocks with high priority. If set to true, depending on implementation of
|
// blocks with high priority. If set to true, depending on implementation of
|
||||||
// block cache, index and filter blocks may be less likely to be eviected
|
// block cache, index and filter blocks may be less likely to be evicted
|
||||||
// than data blocks.
|
// than data blocks.
|
||||||
bool cache_index_and_filter_blocks_with_high_priority = false;
|
bool cache_index_and_filter_blocks_with_high_priority = false;
|
||||||
|
|
||||||
@ -147,7 +147,7 @@ struct BlockBasedTableOptions {
|
|||||||
// Block size for partitioned metadata. Currently applied to indexes when
|
// Block size for partitioned metadata. Currently applied to indexes when
|
||||||
// kTwoLevelIndexSearch is used and to filters when partition_filters is used.
|
// kTwoLevelIndexSearch is used and to filters when partition_filters is used.
|
||||||
// Note: Since in the current implementation the filters and index partitions
|
// Note: Since in the current implementation the filters and index partitions
|
||||||
// are aligned, an index/filter block is created when eitehr index or filter
|
// are aligned, an index/filter block is created when either index or filter
|
||||||
// block size reaches the specified limit.
|
// block size reaches the specified limit.
|
||||||
// Note: this limit is currently applied to only index blocks; a filter
|
// Note: this limit is currently applied to only index blocks; a filter
|
||||||
// partition is cut right after an index block is cut
|
// partition is cut right after an index block is cut
|
||||||
@ -418,7 +418,7 @@ class TableFactory {
|
|||||||
// (1) TableCache::FindTable() calls the function when table cache miss
|
// (1) TableCache::FindTable() calls the function when table cache miss
|
||||||
// and cache the table object returned.
|
// and cache the table object returned.
|
||||||
// (2) SstFileReader (for SST Dump) opens the table and dump the table
|
// (2) SstFileReader (for SST Dump) opens the table and dump the table
|
||||||
// contents using the interator of the table.
|
// contents using the iterator of the table.
|
||||||
// (3) DBImpl::AddFile() calls this function to read the contents of
|
// (3) DBImpl::AddFile() calls this function to read the contents of
|
||||||
// the sst file it's attempting to add
|
// the sst file it's attempting to add
|
||||||
//
|
//
|
||||||
@ -446,7 +446,7 @@ class TableFactory {
|
|||||||
// (4) When running Repairer, it creates a table builder to convert logs to
|
// (4) When running Repairer, it creates a table builder to convert logs to
|
||||||
// SST files (In Repairer::ConvertLogToTable() by calling BuildTable())
|
// SST files (In Repairer::ConvertLogToTable() by calling BuildTable())
|
||||||
//
|
//
|
||||||
// Multiple configured can be acceseed from there, including and not limited
|
// Multiple configured can be accessed from there, including and not limited
|
||||||
// to compression options. file is a handle of a writable file.
|
// to compression options. file is a handle of a writable file.
|
||||||
// It is the caller's responsibility to keep the file open and close the file
|
// It is the caller's responsibility to keep the file open and close the file
|
||||||
// after closing the table builder. compression_type is the compression type
|
// after closing the table builder. compression_type is the compression type
|
||||||
@ -472,7 +472,7 @@ class TableFactory {
|
|||||||
// Since the return value is a raw pointer, the TableFactory owns the
|
// Since the return value is a raw pointer, the TableFactory owns the
|
||||||
// pointer and the caller should not delete the pointer.
|
// pointer and the caller should not delete the pointer.
|
||||||
//
|
//
|
||||||
// In certan case, it is desirable to alter the underlying options when the
|
// In certain case, it is desirable to alter the underlying options when the
|
||||||
// TableFactory is not used by any open DB by casting the returned pointer
|
// TableFactory is not used by any open DB by casting the returned pointer
|
||||||
// to the right class. For instance, if BlockBasedTableFactory is used,
|
// to the right class. For instance, if BlockBasedTableFactory is used,
|
||||||
// then the pointer can be casted to BlockBasedTableOptions.
|
// then the pointer can be casted to BlockBasedTableOptions.
|
||||||
|
@ -145,7 +145,7 @@ struct ThreadStatus {
|
|||||||
// The operation (high-level action) that the current thread is involved.
|
// The operation (high-level action) that the current thread is involved.
|
||||||
const OperationType operation_type;
|
const OperationType operation_type;
|
||||||
|
|
||||||
// The elapsed time in micros of the current thread operation.
|
// The elapsed time of the current thread operation in microseconds.
|
||||||
const uint64_t op_elapsed_micros;
|
const uint64_t op_elapsed_micros;
|
||||||
|
|
||||||
// An integer showing the current stage where the thread is involved
|
// An integer showing the current stage where the thread is involved
|
||||||
|
@ -24,7 +24,7 @@ enum CompactionStopStyle {
|
|||||||
class CompactionOptionsUniversal {
|
class CompactionOptionsUniversal {
|
||||||
public:
|
public:
|
||||||
|
|
||||||
// Percentage flexibilty while comparing file size. If the candidate file(s)
|
// Percentage flexibility while comparing file size. If the candidate file(s)
|
||||||
// size is 1% smaller than the next file's size, then include next file into
|
// size is 1% smaller than the next file's size, then include next file into
|
||||||
// this candidate set. // Default: 1
|
// this candidate set. // Default: 1
|
||||||
unsigned int size_ratio;
|
unsigned int size_ratio;
|
||||||
|
@ -72,7 +72,7 @@ class WriteBatch : public WriteBatchBase {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Variant of Put() that gathers output like writev(2). The key and value
|
// Variant of Put() that gathers output like writev(2). The key and value
|
||||||
// that will be written to the database are concatentations of arrays of
|
// that will be written to the database are concatenations of arrays of
|
||||||
// slices.
|
// slices.
|
||||||
Status Put(ColumnFamilyHandle* column_family, const SliceParts& key,
|
Status Put(ColumnFamilyHandle* column_family, const SliceParts& key,
|
||||||
const SliceParts& value) override;
|
const SliceParts& value) override;
|
||||||
@ -144,7 +144,7 @@ class WriteBatch : public WriteBatchBase {
|
|||||||
// it will not be persisted to the SST files. When iterating over this
|
// it will not be persisted to the SST files. When iterating over this
|
||||||
// WriteBatch, WriteBatch::Handler::LogData will be called with the contents
|
// WriteBatch, WriteBatch::Handler::LogData will be called with the contents
|
||||||
// of the blob as it is encountered. Blobs, puts, deletes, and merges will be
|
// of the blob as it is encountered. Blobs, puts, deletes, and merges will be
|
||||||
// encountered in the same order in thich they were inserted. The blob will
|
// encountered in the same order in which they were inserted. The blob will
|
||||||
// NOT consume sequence number(s) and will NOT increase the count of the batch
|
// NOT consume sequence number(s) and will NOT increase the count of the batch
|
||||||
//
|
//
|
||||||
// Example application: add timestamps to the transaction log for use in
|
// Example application: add timestamps to the transaction log for use in
|
||||||
|
@ -31,7 +31,7 @@ class WriteBatchBase {
|
|||||||
virtual Status Put(const Slice& key, const Slice& value) = 0;
|
virtual Status Put(const Slice& key, const Slice& value) = 0;
|
||||||
|
|
||||||
// Variant of Put() that gathers output like writev(2). The key and value
|
// Variant of Put() that gathers output like writev(2). The key and value
|
||||||
// that will be written to the database are concatentations of arrays of
|
// that will be written to the database are concatenations of arrays of
|
||||||
// slices.
|
// slices.
|
||||||
virtual Status Put(ColumnFamilyHandle* column_family, const SliceParts& key,
|
virtual Status Put(ColumnFamilyHandle* column_family, const SliceParts& key,
|
||||||
const SliceParts& value);
|
const SliceParts& value);
|
||||||
@ -87,7 +87,7 @@ class WriteBatchBase {
|
|||||||
// it will not be persisted to the SST files. When iterating over this
|
// it will not be persisted to the SST files. When iterating over this
|
||||||
// WriteBatch, WriteBatch::Handler::LogData will be called with the contents
|
// WriteBatch, WriteBatch::Handler::LogData will be called with the contents
|
||||||
// of the blob as it is encountered. Blobs, puts, deletes, and merges will be
|
// of the blob as it is encountered. Blobs, puts, deletes, and merges will be
|
||||||
// encountered in the same order in thich they were inserted. The blob will
|
// encountered in the same order in which they were inserted. The blob will
|
||||||
// NOT consume sequence number(s) and will NOT increase the count of the batch
|
// NOT consume sequence number(s) and will NOT increase the count of the batch
|
||||||
//
|
//
|
||||||
// Example application: add timestamps to the transaction log for use in
|
// Example application: add timestamps to the transaction log for use in
|
||||||
|
@ -108,7 +108,7 @@ class CuckooBuilderTest : public testing::Test {
|
|||||||
std::find(expected_locations.begin(), expected_locations.end(), i) -
|
std::find(expected_locations.begin(), expected_locations.end(), i) -
|
||||||
expected_locations.begin();
|
expected_locations.begin();
|
||||||
if (key_idx == keys.size()) {
|
if (key_idx == keys.size()) {
|
||||||
// i is not one of the expected locaitons. Empty bucket.
|
// i is not one of the expected locations. Empty bucket.
|
||||||
ASSERT_EQ(read_slice.compare(expected_unused_bucket), 0);
|
ASSERT_EQ(read_slice.compare(expected_unused_bucket), 0);
|
||||||
} else {
|
} else {
|
||||||
keys_found[key_idx] = true;
|
keys_found[key_idx] = true;
|
||||||
|
@ -517,7 +517,7 @@ TEST_F(CuckooReaderTest, TestReadPerformance) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
double hash_ratio = 0.95;
|
double hash_ratio = 0.95;
|
||||||
// These numbers are chosen to have a hash utilizaiton % close to
|
// These numbers are chosen to have a hash utilization % close to
|
||||||
// 0.9, 0.75, 0.6 and 0.5 respectively.
|
// 0.9, 0.75, 0.6 and 0.5 respectively.
|
||||||
// They all create 128 M buckets.
|
// They all create 128 M buckets.
|
||||||
std::vector<uint64_t> nums = {120*1024*1024, 100*1024*1024, 80*1024*1024,
|
std::vector<uint64_t> nums = {120*1024*1024, 100*1024*1024, 80*1024*1024,
|
||||||
|
@ -12,7 +12,7 @@
|
|||||||
|
|
||||||
namespace rocksdb {
|
namespace rocksdb {
|
||||||
|
|
||||||
// An implementaiton of Env that mirrors all work over two backend
|
// An implementation of Env that mirrors all work over two backend
|
||||||
// Env's. This is useful for debugging purposes.
|
// Env's. This is useful for debugging purposes.
|
||||||
class SequentialFileMirror : public SequentialFile {
|
class SequentialFileMirror : public SequentialFile {
|
||||||
public:
|
public:
|
||||||
|
Loading…
Reference in New Issue
Block a user