diff --git a/include/rocksdb/utilities/transaction_db.h b/include/rocksdb/utilities/transaction_db.h index 3d7bc355a..1a692f2a7 100644 --- a/include/rocksdb/utilities/transaction_db.h +++ b/include/rocksdb/utilities/transaction_db.h @@ -171,8 +171,8 @@ struct KeyLockInfo { struct DeadlockInfo { TransactionID m_txn_id; uint32_t m_cf_id; - std::string m_waiting_key; bool m_exclusive; + std::string m_waiting_key; }; struct DeadlockPath { diff --git a/memtable/write_buffer_manager.cc b/memtable/write_buffer_manager.cc index 21b18c8f7..7f2e664ab 100644 --- a/memtable/write_buffer_manager.cc +++ b/memtable/write_buffer_manager.cc @@ -79,7 +79,7 @@ WriteBufferManager::~WriteBufferManager() { void WriteBufferManager::ReserveMemWithCache(size_t mem) { #ifndef ROCKSDB_LITE assert(cache_rep_ != nullptr); - // Use a mutex to protect various data structures. Can be optimzied to a + // Use a mutex to protect various data structures. Can be optimized to a // lock-free solution if it ends up with a performance bottleneck. std::lock_guard lock(cache_rep_->cache_mutex_); @@ -102,14 +102,14 @@ void WriteBufferManager::ReserveMemWithCache(size_t mem) { void WriteBufferManager::FreeMemWithCache(size_t mem) { #ifndef ROCKSDB_LITE assert(cache_rep_ != nullptr); - // Use a mutex to protect various data structures. Can be optimzied to a + // Use a mutex to protect various data structures. Can be optimized to a // lock-free solution if it ends up with a performance bottleneck. std::lock_guard lock(cache_rep_->cache_mutex_); size_t new_mem_used = memory_used_.load(std::memory_order_relaxed) - mem; memory_used_.store(new_mem_used, std::memory_order_relaxed); // Gradually shrink memory costed in the block cache if the actual // usage is less than 3/4 of what we reserve from the block cache. - // We do this becausse: + // We do this because: // 1. we don't pay the cost of the block cache immediately a memtable is // freed, as block cache insert is expensive; // 2. eventually, if we walk away from a temporary memtable size increase, diff --git a/util/threadpool_imp.cc b/util/threadpool_imp.cc index d850b7c9e..b431830ee 100644 --- a/util/threadpool_imp.cc +++ b/util/threadpool_imp.cc @@ -188,7 +188,7 @@ void ThreadPoolImpl::Impl::BGThread(size_t thread_id) { bool low_cpu_priority = false; while (true) { -// Wait until there is an item that is ready to run + // Wait until there is an item that is ready to run std::unique_lock lock(mu_); // Stop waiting if the thread needs to do work or needs to terminate. while (!exit_all_threads_ && !IsLastExcessiveThread(thread_id) && @@ -198,7 +198,7 @@ void ThreadPoolImpl::Impl::BGThread(size_t thread_id) { if (exit_all_threads_) { // mechanism to let BG threads exit safely - if(!wait_for_jobs_to_complete_ || + if (!wait_for_jobs_to_complete_ || queue_.empty()) { break; } diff --git a/utilities/transactions/transaction_lock_mgr.cc b/utilities/transactions/transaction_lock_mgr.cc index d285fd30e..cd70c329d 100644 --- a/utilities/transactions/transaction_lock_mgr.cc +++ b/utilities/transactions/transaction_lock_mgr.cc @@ -104,7 +104,7 @@ void DeadlockInfoBuffer::AddNewPath(DeadlockPath path) { return; } - paths_buffer_[buffer_idx_] = path; + paths_buffer_[buffer_idx_] = std::move(path); buffer_idx_ = (buffer_idx_ + 1) % paths_buffer_.size(); } @@ -494,8 +494,8 @@ bool TransactionLockMgr::IncrementWaiters( auto extracted_info = wait_txn_map_.Get(queue_values[head]); path.push_back({queue_values[head], extracted_info.m_cf_id, - extracted_info.m_waiting_key, - extracted_info.m_exclusive}); + extracted_info.m_exclusive, + extracted_info.m_waiting_key}); head = queue_parents[head]; } env->GetCurrentTime(&deadlock_time); diff --git a/utilities/transactions/write_prepared_txn_db.h b/utilities/transactions/write_prepared_txn_db.h index ec76e2716..0316a57a9 100644 --- a/utilities/transactions/write_prepared_txn_db.h +++ b/utilities/transactions/write_prepared_txn_db.h @@ -549,7 +549,7 @@ class WritePreparedTxnDB : public PessimisticTransactionDB { static const size_t DEF_SNAPSHOT_CACHE_BITS = static_cast(7); const size_t SNAPSHOT_CACHE_BITS; const size_t SNAPSHOT_CACHE_SIZE; - unique_ptr[]> snapshot_cache_; + std::unique_ptr[]> snapshot_cache_; // 2nd list for storing snapshots. The list sorted in ascending order. // Thread-safety is provided with snapshots_mutex_. std::vector snapshots_; @@ -567,7 +567,7 @@ class WritePreparedTxnDB : public PessimisticTransactionDB { const CommitEntry64bFormat FORMAT; // commit_cache_ must be initialized to zero to tell apart an empty index from // a filled one. Thread-safety is provided with commit_cache_mutex_. - unique_ptr[]> commit_cache_; + std::unique_ptr[]> commit_cache_; // The largest evicted *commit* sequence number from the commit_cache_. If a // seq is smaller than max_evicted_seq_ is might or might not be present in // commit_cache_. So commit_cache_ must first be checked before consulting