Small issues (#4564)

Summary:
Couple of very minor improvements (typos in comments, full qualification of class name, reordering members of a struct to make it smaller)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4564

Differential Revision: D10510183

Pulled By: maysamyabandeh

fbshipit-source-id: c7ddf9bfbf2db08cd31896c3fd93789d3fa68c8b
This commit is contained in:
jsteemann 2018-10-23 10:33:55 -07:00 committed by Facebook Github Bot
parent c34cc40424
commit d1c0d3f358
5 changed files with 11 additions and 11 deletions

View File

@ -171,8 +171,8 @@ struct KeyLockInfo {
struct DeadlockInfo {
TransactionID m_txn_id;
uint32_t m_cf_id;
std::string m_waiting_key;
bool m_exclusive;
std::string m_waiting_key;
};
struct DeadlockPath {

View File

@ -79,7 +79,7 @@ WriteBufferManager::~WriteBufferManager() {
void WriteBufferManager::ReserveMemWithCache(size_t mem) {
#ifndef ROCKSDB_LITE
assert(cache_rep_ != nullptr);
// Use a mutex to protect various data structures. Can be optimzied to a
// Use a mutex to protect various data structures. Can be optimized to a
// lock-free solution if it ends up with a performance bottleneck.
std::lock_guard<std::mutex> lock(cache_rep_->cache_mutex_);
@ -102,14 +102,14 @@ void WriteBufferManager::ReserveMemWithCache(size_t mem) {
void WriteBufferManager::FreeMemWithCache(size_t mem) {
#ifndef ROCKSDB_LITE
assert(cache_rep_ != nullptr);
// Use a mutex to protect various data structures. Can be optimzied to a
// Use a mutex to protect various data structures. Can be optimized to a
// lock-free solution if it ends up with a performance bottleneck.
std::lock_guard<std::mutex> lock(cache_rep_->cache_mutex_);
size_t new_mem_used = memory_used_.load(std::memory_order_relaxed) - mem;
memory_used_.store(new_mem_used, std::memory_order_relaxed);
// Gradually shrink memory costed in the block cache if the actual
// usage is less than 3/4 of what we reserve from the block cache.
// We do this becausse:
// We do this because:
// 1. we don't pay the cost of the block cache immediately a memtable is
// freed, as block cache insert is expensive;
// 2. eventually, if we walk away from a temporary memtable size increase,

View File

@ -188,7 +188,7 @@ void ThreadPoolImpl::Impl::BGThread(size_t thread_id) {
bool low_cpu_priority = false;
while (true) {
// Wait until there is an item that is ready to run
// Wait until there is an item that is ready to run
std::unique_lock<std::mutex> lock(mu_);
// Stop waiting if the thread needs to do work or needs to terminate.
while (!exit_all_threads_ && !IsLastExcessiveThread(thread_id) &&
@ -198,7 +198,7 @@ void ThreadPoolImpl::Impl::BGThread(size_t thread_id) {
if (exit_all_threads_) { // mechanism to let BG threads exit safely
if(!wait_for_jobs_to_complete_ ||
if (!wait_for_jobs_to_complete_ ||
queue_.empty()) {
break;
}

View File

@ -104,7 +104,7 @@ void DeadlockInfoBuffer::AddNewPath(DeadlockPath path) {
return;
}
paths_buffer_[buffer_idx_] = path;
paths_buffer_[buffer_idx_] = std::move(path);
buffer_idx_ = (buffer_idx_ + 1) % paths_buffer_.size();
}
@ -494,8 +494,8 @@ bool TransactionLockMgr::IncrementWaiters(
auto extracted_info = wait_txn_map_.Get(queue_values[head]);
path.push_back({queue_values[head], extracted_info.m_cf_id,
extracted_info.m_waiting_key,
extracted_info.m_exclusive});
extracted_info.m_exclusive,
extracted_info.m_waiting_key});
head = queue_parents[head];
}
env->GetCurrentTime(&deadlock_time);

View File

@ -549,7 +549,7 @@ class WritePreparedTxnDB : public PessimisticTransactionDB {
static const size_t DEF_SNAPSHOT_CACHE_BITS = static_cast<size_t>(7);
const size_t SNAPSHOT_CACHE_BITS;
const size_t SNAPSHOT_CACHE_SIZE;
unique_ptr<std::atomic<SequenceNumber>[]> snapshot_cache_;
std::unique_ptr<std::atomic<SequenceNumber>[]> snapshot_cache_;
// 2nd list for storing snapshots. The list sorted in ascending order.
// Thread-safety is provided with snapshots_mutex_.
std::vector<SequenceNumber> snapshots_;
@ -567,7 +567,7 @@ class WritePreparedTxnDB : public PessimisticTransactionDB {
const CommitEntry64bFormat FORMAT;
// commit_cache_ must be initialized to zero to tell apart an empty index from
// a filled one. Thread-safety is provided with commit_cache_mutex_.
unique_ptr<std::atomic<CommitEntry64b>[]> commit_cache_;
std::unique_ptr<std::atomic<CommitEntry64b>[]> commit_cache_;
// The largest evicted *commit* sequence number from the commit_cache_. If a
// seq is smaller than max_evicted_seq_ is might or might not be present in
// commit_cache_. So commit_cache_ must first be checked before consulting