commit
93ab1473dc
@ -614,7 +614,7 @@ bool ColumnFamilyData::ReturnThreadLocalSuperVersion(SuperVersion* sv) {
|
||||
void* expected = SuperVersion::kSVInUse;
|
||||
if (local_sv_->CompareAndSwap(static_cast<void*>(sv), expected)) {
|
||||
// When we see kSVInUse in the ThreadLocal, we are sure ThreadLocal
|
||||
// storage has not been altered and no Scrape has happend. The
|
||||
// storage has not been altered and no Scrape has happened. The
|
||||
// SuperVersion is still current.
|
||||
return true;
|
||||
} else {
|
||||
|
@ -268,7 +268,7 @@ class ColumnFamilyTest : public testing::Test {
|
||||
VectorLogPtr wal_files;
|
||||
Status s;
|
||||
// GetSortedWalFiles is a flakey function -- it gets all the wal_dir
|
||||
// children files and then later checks for their existance. if some of the
|
||||
// children files and then later checks for their existence. if some of the
|
||||
// log files doesn't exist anymore, it reports an error. it does all of this
|
||||
// without DB mutex held, so if a background process deletes the log file
|
||||
// while the function is being executed, it returns an error. We retry the
|
||||
|
@ -1248,12 +1248,12 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp(
|
||||
cf_name.c_str(), file_num_buf, loop);
|
||||
}
|
||||
|
||||
// Check if the suceeding files need compaction.
|
||||
// Check if the succeeding files need compaction.
|
||||
for (unsigned int i = loop + 1;
|
||||
candidate_count < max_files_to_compact && i < sorted_runs.size();
|
||||
i++) {
|
||||
const SortedRun* suceeding_sr = &sorted_runs[i];
|
||||
if (suceeding_sr->being_compacted) {
|
||||
const SortedRun* succeeding_sr = &sorted_runs[i];
|
||||
if (succeeding_sr->being_compacted) {
|
||||
break;
|
||||
}
|
||||
// Pick files if the total/last candidate file size (increased by the
|
||||
@ -1263,14 +1263,14 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp(
|
||||
// kCompactionStopStyleSimilarSize, it's simply the size of the last
|
||||
// picked file.
|
||||
double sz = candidate_size * (100.0 + ratio) / 100.0;
|
||||
if (sz < static_cast<double>(suceeding_sr->size)) {
|
||||
if (sz < static_cast<double>(succeeding_sr->size)) {
|
||||
break;
|
||||
}
|
||||
if (ioptions_.compaction_options_universal.stop_style ==
|
||||
kCompactionStopStyleSimilarSize) {
|
||||
// Similar-size stopping rule: also check the last picked file isn't
|
||||
// far larger than the next candidate file.
|
||||
sz = (suceeding_sr->size * (100.0 + ratio)) / 100.0;
|
||||
sz = (succeeding_sr->size * (100.0 + ratio)) / 100.0;
|
||||
if (sz < static_cast<double>(candidate_size)) {
|
||||
// If the small file we've encountered begins a run of similar-size
|
||||
// files, we'll pick them up on a future iteration of the outer
|
||||
@ -1278,9 +1278,9 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp(
|
||||
// by the last-resort read amp strategy which disregards size ratios.
|
||||
break;
|
||||
}
|
||||
candidate_size = suceeding_sr->compensated_file_size;
|
||||
candidate_size = succeeding_sr->compensated_file_size;
|
||||
} else { // default kCompactionStopStyleTotalSize
|
||||
candidate_size += suceeding_sr->compensated_file_size;
|
||||
candidate_size += succeeding_sr->compensated_file_size;
|
||||
}
|
||||
candidate_count++;
|
||||
}
|
||||
|
@ -103,7 +103,7 @@ class CorruptionTest : public testing::Test {
|
||||
// db itself will raise errors because data is corrupted.
|
||||
// Instead, we want the reads to be successful and this test
|
||||
// will detect whether the appropriate corruptions have
|
||||
// occured.
|
||||
// occurred.
|
||||
Iterator* iter = db_->NewIterator(ReadOptions(false, true));
|
||||
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
||||
uint64_t key;
|
||||
|
@ -254,7 +254,7 @@ void DBIter::FindNextUserEntryInternal(bool skipping) {
|
||||
}
|
||||
// If we have sequentially iterated via numerous keys and still not
|
||||
// found the next user-key, then it is better to seek so that we can
|
||||
// avoid too many key comparisons. We seek to the last occurence of
|
||||
// avoid too many key comparisons. We seek to the last occurrence of
|
||||
// our current key by looking for sequence number 0.
|
||||
if (skipping && num_skipped > max_skip_) {
|
||||
num_skipped = 0;
|
||||
|
@ -1295,7 +1295,7 @@ static long TestGetTickerCount(const Options& options, Tickers ticker_type) {
|
||||
|
||||
// A helper function that ensures the table properties returned in
|
||||
// `GetPropertiesOfAllTablesTest` is correct.
|
||||
// This test assumes entries size is differnt for each of the tables.
|
||||
// This test assumes entries size is different for each of the tables.
|
||||
namespace {
|
||||
void VerifyTableProperties(DB* db, uint64_t expected_entries_size) {
|
||||
TablePropertiesCollection props;
|
||||
@ -1955,7 +1955,7 @@ TEST_F(DBTest, GetEncountersEmptyLevel) {
|
||||
// * sstable B in level 2
|
||||
// Then do enough Get() calls to arrange for an automatic compaction
|
||||
// of sstable A. A bug would cause the compaction to be marked as
|
||||
// occuring at level 1 (instead of the correct level 0).
|
||||
// occurring at level 1 (instead of the correct level 0).
|
||||
|
||||
// Step 1: First place sstables in levels 0 and 2
|
||||
int compaction_count = 0;
|
||||
@ -11648,7 +11648,7 @@ TEST_F(DBTest, DynamicCompactionOptions) {
|
||||
|
||||
|
||||
// Test max_mem_compaction_level.
|
||||
// Destory DB and start from scratch
|
||||
// Destroy DB and start from scratch
|
||||
options.max_background_compactions = 1;
|
||||
options.max_background_flushes = 0;
|
||||
options.max_mem_compaction_level = 2;
|
||||
|
@ -161,7 +161,7 @@ void MemTableList::RollbackMemtableFlush(const autovector<MemTable*>& mems,
|
||||
assert(!mems.empty());
|
||||
|
||||
// If the flush was not successful, then just reset state.
|
||||
// Maybe a suceeding attempt to flush will be successful.
|
||||
// Maybe a succeeding attempt to flush will be successful.
|
||||
for (MemTable* m : mems) {
|
||||
assert(m->flush_in_progress_);
|
||||
assert(m->file_number_ == 0);
|
||||
@ -184,7 +184,7 @@ Status MemTableList::InstallMemtableFlushResults(
|
||||
ThreadStatus::STAGE_MEMTABLE_INSTALL_FLUSH_RESULTS);
|
||||
mu->AssertHeld();
|
||||
|
||||
// flush was sucessful
|
||||
// flush was successful
|
||||
for (size_t i = 0; i < mems.size(); ++i) {
|
||||
// All the edits are associated with the first memtable of this batch.
|
||||
assert(i == 0 || mems[i]->GetEdits()->NumEntries() == 0);
|
||||
@ -193,7 +193,7 @@ Status MemTableList::InstallMemtableFlushResults(
|
||||
mems[i]->file_number_ = file_number;
|
||||
}
|
||||
|
||||
// if some other thread is already commiting, then return
|
||||
// if some other thread is already committing, then return
|
||||
Status s;
|
||||
if (commit_in_progress_) {
|
||||
return s;
|
||||
|
@ -275,7 +275,7 @@ class FilePicker {
|
||||
static_cast<uint32_t>(search_right_bound_));
|
||||
} else {
|
||||
// search_left_bound > search_right_bound, key does not exist in
|
||||
// this level. Since no comparision is done in this level, it will
|
||||
// this level. Since no comparison is done in this level, it will
|
||||
// need to search all files in the next level.
|
||||
search_left_bound_ = 0;
|
||||
search_right_bound_ = FileIndexer::kLevelMaxIndex;
|
||||
|
@ -208,7 +208,7 @@ class HashIndexBuilder : public IndexBuilder {
|
||||
pending_entry_index_ = static_cast<uint32_t>(current_restart_index_);
|
||||
} else {
|
||||
// entry number increments when keys share the prefix reside in
|
||||
// differnt data blocks.
|
||||
// different data blocks.
|
||||
auto last_restart_index = pending_entry_index_ + pending_block_num_ - 1;
|
||||
assert(last_restart_index <= current_restart_index_);
|
||||
if (last_restart_index != current_restart_index_) {
|
||||
@ -383,7 +383,7 @@ extern const uint64_t kLegacyBlockBasedTableMagicNumber = 0xdb4775248b80fb57ull;
|
||||
// A collector that collects properties of interest to block-based table.
|
||||
// For now this class looks heavy-weight since we only write one additional
|
||||
// property.
|
||||
// But in the forseeable future, we will add more and more properties that are
|
||||
// But in the foreseeable future, we will add more and more properties that are
|
||||
// specific to block-based table.
|
||||
class BlockBasedTableBuilder::BlockBasedTablePropertiesCollector
|
||||
: public IntTblPropCollector {
|
||||
|
@ -1347,7 +1347,7 @@ Status BlockBasedTable::CreateIndexReader(IndexReader** index_reader,
|
||||
Log(InfoLogLevel::WARN_LEVEL, rep_->ioptions.info_log,
|
||||
"BlockBasedTableOptions::kHashSearch requires "
|
||||
"options.prefix_extractor to be set."
|
||||
" Fall back to binary seach index.");
|
||||
" Fall back to binary search index.");
|
||||
index_type_on_file = BlockBasedTableOptions::kBinarySearch;
|
||||
}
|
||||
|
||||
@ -1367,7 +1367,7 @@ Status BlockBasedTable::CreateIndexReader(IndexReader** index_reader,
|
||||
// problem with prefix hash index loading.
|
||||
Log(InfoLogLevel::WARN_LEVEL, rep_->ioptions.info_log,
|
||||
"Unable to read the metaindex block."
|
||||
" Fall back to binary seach index.");
|
||||
" Fall back to binary search index.");
|
||||
return BinarySearchIndexReader::Create(
|
||||
file, footer, footer.index_handle(), env, comparator, index_reader);
|
||||
}
|
||||
|
@ -98,7 +98,7 @@ BlockHashIndex* CreateBlockHashIndexOnTheFly(
|
||||
pending_entry_index = current_restart_index;
|
||||
} else {
|
||||
// entry number increments when keys share the prefix reside in
|
||||
// differnt data blocks.
|
||||
// different data blocks.
|
||||
auto last_restart_index = pending_entry_index + pending_block_num - 1;
|
||||
assert(last_restart_index <= current_restart_index);
|
||||
if (last_restart_index != current_restart_index) {
|
||||
|
@ -174,7 +174,7 @@ DEFINE_int32(compaction_thread_pool_adjust_interval, 0,
|
||||
"The interval (in milliseconds) to adjust compaction thread pool "
|
||||
"size. Don't change it periodically if the value is 0.");
|
||||
|
||||
DEFINE_int32(compaction_thread_pool_varations, 2,
|
||||
DEFINE_int32(compaction_thread_pool_variations, 2,
|
||||
"Range of bakground thread pool size variations when adjusted "
|
||||
"periodically.");
|
||||
|
||||
|
@ -43,7 +43,7 @@ class FullFilterBitsBuilder : public FilterBitsBuilder {
|
||||
// When creating filter, it is ensured that
|
||||
// total_bits = num_lines * CACHE_LINE_SIZE * 8
|
||||
// dst len is >= 5, 1 for num_probes, 4 for num_lines
|
||||
// Then total_bits = (len - 5) * 8, and cache_line_size could be calulated
|
||||
// Then total_bits = (len - 5) * 8, and cache_line_size could be calculated
|
||||
// +----------------------------------------------------------------+
|
||||
// | filter data with length total_bits/8 |
|
||||
// +----------------------------------------------------------------+
|
||||
|
@ -562,7 +562,7 @@ Status HdfsEnv::GetFileModificationTime(const std::string& fname,
|
||||
}
|
||||
|
||||
// The rename is not atomic. HDFS does not allow a renaming if the
|
||||
// target already exists. So, we delete the target before attemting the
|
||||
// target already exists. So, we delete the target before attempting the
|
||||
// rename.
|
||||
Status HdfsEnv::RenameFile(const std::string& src, const std::string& target) {
|
||||
hdfsDelete(fileSys_, target.c_str(), 1);
|
||||
|
@ -19,7 +19,7 @@ namespace rocksdb {
|
||||
HistogramBucketMapper::HistogramBucketMapper()
|
||||
:
|
||||
// Add newer bucket index here.
|
||||
// Should be alwyas added in sorted order.
|
||||
// Should be always added in sorted order.
|
||||
// If you change this, you also need to change
|
||||
// size of array buckets_ in HistogramImpl
|
||||
bucketValues_(
|
||||
|
@ -33,7 +33,7 @@ TEST_F(HistogramTest, BasicOperation) {
|
||||
ASSERT_TRUE(percentile99 >= percentile85);
|
||||
}
|
||||
|
||||
ASSERT_EQ(histogram.Average(), 50.5); // avg is acurately caluclated.
|
||||
ASSERT_EQ(histogram.Average(), 50.5); // avg is acurately calculated.
|
||||
}
|
||||
|
||||
TEST_F(HistogramTest, EmptyHistogram) {
|
||||
|
@ -419,7 +419,7 @@ BackupEngineImpl::BackupEngineImpl(Env* db_env,
|
||||
&backuped_file_infos_, backup_env_)))));
|
||||
}
|
||||
|
||||
if (options_.destroy_old_data) { // Destory old data
|
||||
if (options_.destroy_old_data) { // Destroy old data
|
||||
assert(!read_only_);
|
||||
Log(options_.info_log,
|
||||
"Backup Engine started with destroy_old_data == true, deleting all "
|
||||
|
Loading…
Reference in New Issue
Block a user