diff --git a/build_tools/RocksDBCommonHelper.php b/build_tools/RocksDBCommonHelper.php index 4f4663cbe..e7bfb5203 100644 --- a/build_tools/RocksDBCommonHelper.php +++ b/build_tools/RocksDBCommonHelper.php @@ -97,7 +97,7 @@ function getSteps($applyDiff, $diffID, $username, $test) { } // fbcode is a sub-repo. We cannot patch until we add it to ignore otherwise - // Git thinks it is an uncommited change. + // Git thinks it is an uncommitted change. $fix_git_ignore = array( "name" => "Fix git ignore", "shell" => "echo fbcode >> .git/info/exclude", diff --git a/db/column_family_test.cc b/db/column_family_test.cc index 07cc5dd13..286926bde 100644 --- a/db/column_family_test.cc +++ b/db/column_family_test.cc @@ -2042,7 +2042,7 @@ TEST_P(ColumnFamilyTest, SameCFAutomaticManualCompactions) { } #endif // !ROCKSDB_LITE -#ifndef ROCKSDB_LITE // Tailing interator not supported +#ifndef ROCKSDB_LITE // Tailing iterator not supported namespace { std::string IterStatus(Iterator* iter) { std::string result; diff --git a/db/db_block_cache_test.cc b/db/db_block_cache_test.cc index 0cefef3bb..612e727b6 100644 --- a/db/db_block_cache_test.cc +++ b/db/db_block_cache_test.cc @@ -173,7 +173,7 @@ TEST_F(DBBlockCacheTest, TestWithoutCompressedBlockCache) { delete iter; iter = nullptr; - // Release interators and access cache again. + // Release iterators and access cache again. for (size_t i = 0; i < kNumBlocks - 1; i++) { iterators[i].reset(); CheckCacheCounters(options, 0, 0, 0, 0); diff --git a/db/db_impl.cc b/db/db_impl.cc index e5e500450..9ab47a9ba 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -1868,7 +1868,7 @@ Status DBImpl::NewIterators( if (read_options.tailing) { #ifdef ROCKSDB_LITE return Status::InvalidArgument( - "Tailing interator not supported in RocksDB lite"); + "Tailing iterator not supported in RocksDB lite"); #else for (auto cfh : column_families) { auto cfd = reinterpret_cast(cfh)->cfd(); diff --git a/db/db_impl.h b/db/db_impl.h index 9a9794603..d6e62bcd7 100644 --- a/db/db_impl.h +++ b/db/db_impl.h @@ -1472,7 +1472,7 @@ class DBImpl : public DB { std::atomic has_unpersisted_data_; // if an attempt was made to flush all column families that - // the oldest log depends on but uncommited data in the oldest + // the oldest log depends on but uncommitted data in the oldest // log prevents the log from being released. // We must attempt to free the dependent memtables again // at a later time after the transaction in the oldest diff --git a/db/db_impl_write.cc b/db/db_impl_write.cc index 6bfc98025..3a239ee5c 100644 --- a/db/db_impl_write.cc +++ b/db/db_impl_write.cc @@ -1048,22 +1048,22 @@ Status DBImpl::SwitchWAL(WriteContext* write_context) { auto oldest_alive_log = alive_log_files_.begin()->number; bool flush_wont_release_oldest_log = false; if (allow_2pc()) { - auto oldest_log_with_uncommited_prep = + auto oldest_log_with_uncommitted_prep = logs_with_prep_tracker_.FindMinLogContainingOutstandingPrep(); - assert(oldest_log_with_uncommited_prep == 0 || - oldest_log_with_uncommited_prep >= oldest_alive_log); - if (oldest_log_with_uncommited_prep > 0 && - oldest_log_with_uncommited_prep == oldest_alive_log) { + assert(oldest_log_with_uncommitted_prep == 0 || + oldest_log_with_uncommitted_prep >= oldest_alive_log); + if (oldest_log_with_uncommitted_prep > 0 && + oldest_log_with_uncommitted_prep == oldest_alive_log) { if (unable_to_release_oldest_log_) { // we already attempted to flush all column families dependent on - // the oldest alive log but the log still contained uncommited + // the oldest alive log but the log still contained uncommitted // transactions so there is still nothing that we can do. return status; } else { ROCKS_LOG_WARN( immutable_db_options_.info_log, - "Unable to release oldest log due to uncommited transaction"); + "Unable to release oldest log due to uncommitted transaction"); unable_to_release_oldest_log_ = true; flush_wont_release_oldest_log = true; } diff --git a/db/version_edit.cc b/db/version_edit.cc index e9f497999..7fa291f5f 100644 --- a/db/version_edit.cc +++ b/db/version_edit.cc @@ -263,7 +263,7 @@ const char* VersionEdit::DecodeNewFile4From(Slice* input) { break; } if (!GetLengthPrefixedSlice(input, &field)) { - return "new-file4 custom field lenth prefixed slice error"; + return "new-file4 custom field length prefixed slice error"; } switch (custom_tag) { case kPathId: diff --git a/utilities/transactions/write_prepared_txn.cc b/utilities/transactions/write_prepared_txn.cc index cb20d1439..0245f3138 100644 --- a/utilities/transactions/write_prepared_txn.cc +++ b/utilities/transactions/write_prepared_txn.cc @@ -419,7 +419,7 @@ void WritePreparedTxn::SetSnapshot() { // the mutex overhead, we call SmallestUnCommittedSeq BEFORE taking the // snapshot. Since we always updated the list of unprepared seq (via // AddPrepared) AFTER the last sequence is updated, this guarantees that the - // smallest uncommited seq that we pair with the snapshot is smaller or equal + // smallest uncommitted seq that we pair with the snapshot is smaller or equal // the value that would be obtained otherwise atomically. That is ok since // this optimization works as long as min_uncommitted is less than or equal // than the smallest uncommitted seq when the snapshot was taken.