Use more efficient hash map for deadlock detection

Summary:
Currently, deadlock cycles are held in std::unordered_map. The problem with it is that it allocates/deallocates memory on every insertion/deletion. This limits throughput since we're doing this expensive operation while holding a global mutex. Fix this by using a vector which caches memory instead.

Running the deadlock stress test, this change increased throughput from 39k txns/s -> 49k txns/s. The effect is more noticeable in MyRocks.
Closes https://github.com/facebook/rocksdb/pull/1545

Differential Revision: D4205662

Pulled By: lth

fbshipit-source-id: ff990e4
This commit is contained in:
Manuel Ung 2016-11-19 11:34:26 -08:00 committed by Facebook Github Bot
parent a13bde39ee
commit e63350e726
3 changed files with 86 additions and 13 deletions

67
util/hash_map.h Normal file
View File

@ -0,0 +1,67 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. This
// source code is licensed under the BSD-style license found in the LICENSE
// file in the root directory of this source tree. An additional grant of
// patent rights can be found in the PATENTS file in the same directory.
//
#pragma once
#include <algorithm>
#include <array>
#include <utility>
#include "util/autovector.h"
namespace rocksdb {
// This is similar to std::unordered_map, except that it tries to avoid
// allocating or deallocating memory as much as possible. With
// std::unordered_map, an allocation/deallocation is made for every insertion
// or deletion because of the requirement that iterators remain valid even
// with insertions or deletions. This means that the hash chains will be
// implemented as linked lists.
//
// This implementation uses autovector as hash chains insteads.
//
template <typename K, typename V, size_t size = 128>
class HashMap {
std::array<autovector<std::pair<K, V>, 1>, size> table_;
public:
bool Contains(K key) {
auto& bucket = table_[key % size];
auto it = std::find_if(
bucket.begin(), bucket.end(),
[key](const std::pair<K, V>& p) { return p.first == key; });
return it != bucket.end();
}
void Insert(K key, V value) {
auto& bucket = table_[key % size];
bucket.push_back({key, value});
}
void Delete(K key) {
auto& bucket = table_[key % size];
auto it = std::find_if(
bucket.begin(), bucket.end(),
[key](const std::pair<K, V>& p) { return p.first == key; });
if (it != bucket.end()) {
auto last = bucket.end() - 1;
if (it != last) {
*it = *last;
}
bucket.pop_back();
}
}
V& Get(K key) {
auto& bucket = table_[key % size];
auto it = std::find_if(
bucket.begin(), bucket.end(),
[key](const std::pair<K, V>& p) { return p.first == key; });
return it->second;
}
};
} // namespace rocksdb

View File

@ -351,12 +351,12 @@ void TransactionLockMgr::DecrementWaiters(const TransactionImpl* txn,
void TransactionLockMgr::DecrementWaitersImpl(const TransactionImpl* txn, void TransactionLockMgr::DecrementWaitersImpl(const TransactionImpl* txn,
TransactionID wait_id) { TransactionID wait_id) {
auto id = txn->GetID(); auto id = txn->GetID();
assert(wait_txn_map_.count(id) > 0); assert(wait_txn_map_.Contains(id));
wait_txn_map_.erase(id); wait_txn_map_.Delete(id);
rev_wait_txn_map_[wait_id]--; rev_wait_txn_map_.Get(wait_id)--;
if (rev_wait_txn_map_[wait_id] == 0) { if (rev_wait_txn_map_.Get(wait_id) == 0) {
rev_wait_txn_map_.erase(wait_id); rev_wait_txn_map_.Delete(wait_id);
} }
} }
@ -364,12 +364,17 @@ bool TransactionLockMgr::IncrementWaiters(const TransactionImpl* txn,
TransactionID wait_id) { TransactionID wait_id) {
auto id = txn->GetID(); auto id = txn->GetID();
std::lock_guard<std::mutex> lock(wait_txn_map_mutex_); std::lock_guard<std::mutex> lock(wait_txn_map_mutex_);
assert(wait_txn_map_.count(id) == 0); assert(!wait_txn_map_.Contains(id));
wait_txn_map_[id] = wait_id; wait_txn_map_.Insert(id, wait_id);
rev_wait_txn_map_[wait_id]++;
if (rev_wait_txn_map_.Contains(wait_id)) {
rev_wait_txn_map_.Get(wait_id)++;
} else {
rev_wait_txn_map_.Insert(wait_id, 1);
}
// No deadlock if nobody is waiting on self. // No deadlock if nobody is waiting on self.
if (rev_wait_txn_map_.count(id) == 0) { if (!rev_wait_txn_map_.Contains(id)) {
return false; return false;
} }
@ -378,10 +383,10 @@ bool TransactionLockMgr::IncrementWaiters(const TransactionImpl* txn,
if (next == id) { if (next == id) {
DecrementWaitersImpl(txn, wait_id); DecrementWaitersImpl(txn, wait_id);
return true; return true;
} else if (wait_txn_map_.count(next) == 0) { } else if (!wait_txn_map_.Contains(next)) {
return false; return false;
} else { } else {
next = wait_txn_map_[next]; next = wait_txn_map_.Get(next);
} }
} }

View File

@ -13,6 +13,7 @@
#include <vector> #include <vector>
#include "rocksdb/utilities/transaction.h" #include "rocksdb/utilities/transaction.h"
#include "util/hash_map.h"
#include "util/instrumented_mutex.h" #include "util/instrumented_mutex.h"
#include "util/thread_local.h" #include "util/thread_local.h"
#include "utilities/transactions/transaction_impl.h" #include "utilities/transactions/transaction_impl.h"
@ -88,9 +89,9 @@ class TransactionLockMgr {
std::mutex wait_txn_map_mutex_; std::mutex wait_txn_map_mutex_;
// Maps from waitee -> number of waiters. // Maps from waitee -> number of waiters.
std::unordered_map<TransactionID, int> rev_wait_txn_map_; HashMap<TransactionID, int> rev_wait_txn_map_;
// Maps from waiter -> waitee. // Maps from waiter -> waitee.
std::unordered_map<TransactionID, TransactionID> wait_txn_map_; HashMap<TransactionID, TransactionID> wait_txn_map_;
// Used to allocate mutexes/condvars to use when locking keys // Used to allocate mutexes/condvars to use when locking keys
std::shared_ptr<TransactionDBMutexFactory> mutex_factory_; std::shared_ptr<TransactionDBMutexFactory> mutex_factory_;