Consolidate hash function used for non-persistent data in a new function (#5155)

Summary:
Create new function NPHash64() and GetSliceNPHash64(), which are currently
implemented using murmurhash.
Replace the current direct call of murmurhash() to use the new functions
if the hash results are not used in on-disk format.
This will make it easier to try out or switch to alternative functions
in the uses where data format compatibility doesn't need to be considered.
This part shouldn't have any performance impact.

Also, the sharded cache hash function is changed to the new format, because
it falls into this categoery. It doesn't show visible performance impact
in db_bench results. CPU showed by perf is increased from about 0.2% to 0.4%
in an extreme benchmark setting (4KB blocks, no-compression, everything
cached in block cache). We've known that the current hash function used,
our own Hash() has serious hash quality problem. It can generate a lots of
conflicts with similar input. In this use case, it means extra lock contention
for reads from the same file. This slight CPU regression is worthy to me
to counter the potential bad performance with hot keys. And hopefully this
will get further improved in the future with a better hash function.

cache_test's condition is relaxed a little bit to. The new hash is slightly
more skewed in this use case, but I manually checked the data and see
the hash results are still in a reasonable range.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5155

Differential Revision: D14834821

Pulled By: siying

fbshipit-source-id: ec9a2c0a2f8ae4b54d08b13a5c2e9cc97aa80cb5
This commit is contained in:
Siying Dong 2019-04-08 13:24:29 -07:00 committed by Facebook Github Bot
parent de00f28132
commit 0bb555630f
8 changed files with 25 additions and 13 deletions

4
cache/cache_test.cc vendored
View File

@ -306,7 +306,7 @@ TEST_P(CacheTest, EvictionPolicy) {
Insert(200, 201); Insert(200, 201);
// Frequently used entry must be kept around // Frequently used entry must be kept around
for (int i = 0; i < kCacheSize + 100; i++) { for (int i = 0; i < kCacheSize + 200; i++) {
Insert(1000+i, 2000+i); Insert(1000+i, 2000+i);
ASSERT_EQ(101, Lookup(100)); ASSERT_EQ(101, Lookup(100));
} }
@ -359,7 +359,7 @@ TEST_P(CacheTest, EvictionPolicyRef) {
Insert(303, 104); Insert(303, 104);
// Insert entries much more than Cache capacity // Insert entries much more than Cache capacity
for (int i = 0; i < kCacheSize + 100; i++) { for (int i = 0; i < kCacheSize + 200; i++) {
Insert(1000 + i, 2000 + i); Insert(1000 + i, 2000 + i);
} }

View File

@ -83,7 +83,7 @@ class ShardedCache : public Cache {
private: private:
static inline uint32_t HashSlice(const Slice& s) { static inline uint32_t HashSlice(const Slice& s) {
return Hash(s.data(), s.size(), 0); return static_cast<uint32_t>(GetSliceNPHash64(s));
} }
uint32_t Shard(uint32_t hash) { uint32_t Shard(uint32_t hash) {

View File

@ -35,7 +35,6 @@
#include "util/autovector.h" #include "util/autovector.h"
#include "util/coding.h" #include "util/coding.h"
#include "util/memory_usage.h" #include "util/memory_usage.h"
#include "util/murmurhash.h"
#include "util/mutexlock.h" #include "util/mutexlock.h"
#include "util/util.h" #include "util/util.h"
@ -438,8 +437,7 @@ FragmentedRangeTombstoneIterator* MemTable::NewRangeTombstoneIterator(
} }
port::RWMutex* MemTable::GetLock(const Slice& key) { port::RWMutex* MemTable::GetLock(const Slice& key) {
static murmur_hash hash; return &locks_[static_cast<size_t>(GetSliceNPHash64(key)) % locks_.size()];
return &locks_[hash(key) % locks_.size()];
} }
MemTable::MemTableStats MemTable::ApproximateStats(const Slice& start_ikey, MemTable::MemTableStats MemTable::ApproximateStats(const Slice& start_ikey,

View File

@ -17,7 +17,7 @@
#include "rocksdb/slice.h" #include "rocksdb/slice.h"
#include "rocksdb/slice_transform.h" #include "rocksdb/slice_transform.h"
#include "util/arena.h" #include "util/arena.h"
#include "util/murmurhash.h" #include "util/hash.h"
namespace rocksdb { namespace rocksdb {
namespace { namespace {
@ -218,7 +218,7 @@ class HashLinkListRep : public MemTableRep {
} }
size_t GetHash(const Slice& slice) const { size_t GetHash(const Slice& slice) const {
return MurmurHash(slice.data(), static_cast<int>(slice.size()), 0) % return NPHash64(slice.data(), static_cast<int>(slice.size()), 0) %
bucket_size_; bucket_size_;
} }

View File

@ -11,7 +11,6 @@
#include "rocksdb/memtablerep.h" #include "rocksdb/memtablerep.h"
#include "rocksdb/slice.h" #include "rocksdb/slice.h"
#include "util/coding.h" #include "util/coding.h"
#include "util/murmurhash.h"
namespace rocksdb { namespace rocksdb {
namespace stl_wrappers { namespace stl_wrappers {

View File

@ -14,19 +14,36 @@
#include <stdint.h> #include <stdint.h>
#include "rocksdb/slice.h" #include "rocksdb/slice.h"
#include "util/murmurhash.h"
namespace rocksdb { namespace rocksdb {
// Non-persistent hash. Only used for in-memory data structure
// The hash results are applicable to change.
extern uint64_t NPHash64(const char* data, size_t n, uint32_t seed);
extern uint32_t Hash(const char* data, size_t n, uint32_t seed); extern uint32_t Hash(const char* data, size_t n, uint32_t seed);
inline uint32_t BloomHash(const Slice& key) { inline uint32_t BloomHash(const Slice& key) {
return Hash(key.data(), key.size(), 0xbc9f1d34); return Hash(key.data(), key.size(), 0xbc9f1d34);
} }
inline uint64_t GetSliceNPHash64(const Slice& s) {
return NPHash64(s.data(), s.size(), 0);
}
inline uint32_t GetSliceHash(const Slice& s) { inline uint32_t GetSliceHash(const Slice& s) {
return Hash(s.data(), s.size(), 397); return Hash(s.data(), s.size(), 397);
} }
inline uint64_t NPHash64(const char* data, size_t n, uint32_t seed) {
// Right now murmurhash2B is used. It should able to be freely
// changed to a better hash, without worrying about backward
// compatibility issue.
return MURMUR_HASH(data, static_cast<int>(n),
static_cast<unsigned int>(seed));
}
// std::hash compatible interface. // std::hash compatible interface.
struct SliceHasher { struct SliceHasher {
uint32_t operator()(const Slice& s) const { return GetSliceHash(s); } uint32_t operator()(const Slice& s) const { return GetSliceHash(s); }

View File

@ -10,7 +10,6 @@
#include "rocksdb/comparator.h" #include "rocksdb/comparator.h"
#include "rocksdb/slice.h" #include "rocksdb/slice.h"
#include "util/coding.h" #include "util/coding.h"
#include "util/murmurhash.h"
namespace rocksdb { namespace rocksdb {
namespace stl_wrappers { namespace stl_wrappers {

View File

@ -24,7 +24,7 @@
#include "rocksdb/slice.h" #include "rocksdb/slice.h"
#include "rocksdb/utilities/transaction_db_mutex.h" #include "rocksdb/utilities/transaction_db_mutex.h"
#include "util/cast_util.h" #include "util/cast_util.h"
#include "util/murmurhash.h" #include "util/hash.h"
#include "util/sync_point.h" #include "util/sync_point.h"
#include "util/thread_local.h" #include "util/thread_local.h"
#include "utilities/transactions/pessimistic_transaction_db.h" #include "utilities/transactions/pessimistic_transaction_db.h"
@ -183,8 +183,7 @@ TransactionLockMgr::~TransactionLockMgr() {}
size_t LockMap::GetStripe(const std::string& key) const { size_t LockMap::GetStripe(const std::string& key) const {
assert(num_stripes_ > 0); assert(num_stripes_ > 0);
static murmur_hash hash; size_t stripe = static_cast<size_t>(GetSliceNPHash64(key)) % num_stripes_;
size_t stripe = hash(key) % num_stripes_;
return stripe; return stripe;
} }