From 0bb555630f5e85a1471843f8dc0dabec297c1c49 Mon Sep 17 00:00:00 2001 From: Siying Dong Date: Mon, 8 Apr 2019 13:24:29 -0700 Subject: [PATCH] Consolidate hash function used for non-persistent data in a new function (#5155) Summary: Create new function NPHash64() and GetSliceNPHash64(), which are currently implemented using murmurhash. Replace the current direct call of murmurhash() to use the new functions if the hash results are not used in on-disk format. This will make it easier to try out or switch to alternative functions in the uses where data format compatibility doesn't need to be considered. This part shouldn't have any performance impact. Also, the sharded cache hash function is changed to the new format, because it falls into this categoery. It doesn't show visible performance impact in db_bench results. CPU showed by perf is increased from about 0.2% to 0.4% in an extreme benchmark setting (4KB blocks, no-compression, everything cached in block cache). We've known that the current hash function used, our own Hash() has serious hash quality problem. It can generate a lots of conflicts with similar input. In this use case, it means extra lock contention for reads from the same file. This slight CPU regression is worthy to me to counter the potential bad performance with hot keys. And hopefully this will get further improved in the future with a better hash function. cache_test's condition is relaxed a little bit to. The new hash is slightly more skewed in this use case, but I manually checked the data and see the hash results are still in a reasonable range. Pull Request resolved: https://github.com/facebook/rocksdb/pull/5155 Differential Revision: D14834821 Pulled By: siying fbshipit-source-id: ec9a2c0a2f8ae4b54d08b13a5c2e9cc97aa80cb5 --- cache/cache_test.cc | 4 ++-- cache/sharded_cache.h | 2 +- db/memtable.cc | 4 +--- memtable/hash_linklist_rep.cc | 4 ++-- memtable/stl_wrappers.h | 1 - util/hash.h | 17 +++++++++++++++++ util/kv_map.h | 1 - utilities/transactions/transaction_lock_mgr.cc | 5 ++--- 8 files changed, 25 insertions(+), 13 deletions(-) diff --git a/cache/cache_test.cc b/cache/cache_test.cc index f9db65f31..f9f77234c 100644 --- a/cache/cache_test.cc +++ b/cache/cache_test.cc @@ -306,7 +306,7 @@ TEST_P(CacheTest, EvictionPolicy) { Insert(200, 201); // Frequently used entry must be kept around - for (int i = 0; i < kCacheSize + 100; i++) { + for (int i = 0; i < kCacheSize + 200; i++) { Insert(1000+i, 2000+i); ASSERT_EQ(101, Lookup(100)); } @@ -359,7 +359,7 @@ TEST_P(CacheTest, EvictionPolicyRef) { Insert(303, 104); // Insert entries much more than Cache capacity - for (int i = 0; i < kCacheSize + 100; i++) { + for (int i = 0; i < kCacheSize + 200; i++) { Insert(1000 + i, 2000 + i); } diff --git a/cache/sharded_cache.h b/cache/sharded_cache.h index 543286b9e..920898b87 100644 --- a/cache/sharded_cache.h +++ b/cache/sharded_cache.h @@ -83,7 +83,7 @@ class ShardedCache : public Cache { private: static inline uint32_t HashSlice(const Slice& s) { - return Hash(s.data(), s.size(), 0); + return static_cast(GetSliceNPHash64(s)); } uint32_t Shard(uint32_t hash) { diff --git a/db/memtable.cc b/db/memtable.cc index 33a6378ac..16b5c8ee0 100644 --- a/db/memtable.cc +++ b/db/memtable.cc @@ -35,7 +35,6 @@ #include "util/autovector.h" #include "util/coding.h" #include "util/memory_usage.h" -#include "util/murmurhash.h" #include "util/mutexlock.h" #include "util/util.h" @@ -438,8 +437,7 @@ FragmentedRangeTombstoneIterator* MemTable::NewRangeTombstoneIterator( } port::RWMutex* MemTable::GetLock(const Slice& key) { - static murmur_hash hash; - return &locks_[hash(key) % locks_.size()]; + return &locks_[static_cast(GetSliceNPHash64(key)) % locks_.size()]; } MemTable::MemTableStats MemTable::ApproximateStats(const Slice& start_ikey, diff --git a/memtable/hash_linklist_rep.cc b/memtable/hash_linklist_rep.cc index 7aa0063e9..878d23383 100644 --- a/memtable/hash_linklist_rep.cc +++ b/memtable/hash_linklist_rep.cc @@ -17,7 +17,7 @@ #include "rocksdb/slice.h" #include "rocksdb/slice_transform.h" #include "util/arena.h" -#include "util/murmurhash.h" +#include "util/hash.h" namespace rocksdb { namespace { @@ -218,7 +218,7 @@ class HashLinkListRep : public MemTableRep { } size_t GetHash(const Slice& slice) const { - return MurmurHash(slice.data(), static_cast(slice.size()), 0) % + return NPHash64(slice.data(), static_cast(slice.size()), 0) % bucket_size_; } diff --git a/memtable/stl_wrappers.h b/memtable/stl_wrappers.h index 19fa15148..0287f4f8f 100644 --- a/memtable/stl_wrappers.h +++ b/memtable/stl_wrappers.h @@ -11,7 +11,6 @@ #include "rocksdb/memtablerep.h" #include "rocksdb/slice.h" #include "util/coding.h" -#include "util/murmurhash.h" namespace rocksdb { namespace stl_wrappers { diff --git a/util/hash.h b/util/hash.h index 4a13f4564..ed42b0894 100644 --- a/util/hash.h +++ b/util/hash.h @@ -14,19 +14,36 @@ #include #include "rocksdb/slice.h" +#include "util/murmurhash.h" namespace rocksdb { +// Non-persistent hash. Only used for in-memory data structure +// The hash results are applicable to change. +extern uint64_t NPHash64(const char* data, size_t n, uint32_t seed); + extern uint32_t Hash(const char* data, size_t n, uint32_t seed); inline uint32_t BloomHash(const Slice& key) { return Hash(key.data(), key.size(), 0xbc9f1d34); } +inline uint64_t GetSliceNPHash64(const Slice& s) { + return NPHash64(s.data(), s.size(), 0); +} + inline uint32_t GetSliceHash(const Slice& s) { return Hash(s.data(), s.size(), 397); } +inline uint64_t NPHash64(const char* data, size_t n, uint32_t seed) { + // Right now murmurhash2B is used. It should able to be freely + // changed to a better hash, without worrying about backward + // compatibility issue. + return MURMUR_HASH(data, static_cast(n), + static_cast(seed)); +} + // std::hash compatible interface. struct SliceHasher { uint32_t operator()(const Slice& s) const { return GetSliceHash(s); } diff --git a/util/kv_map.h b/util/kv_map.h index 784a244ae..d5ba3307f 100644 --- a/util/kv_map.h +++ b/util/kv_map.h @@ -10,7 +10,6 @@ #include "rocksdb/comparator.h" #include "rocksdb/slice.h" #include "util/coding.h" -#include "util/murmurhash.h" namespace rocksdb { namespace stl_wrappers { diff --git a/utilities/transactions/transaction_lock_mgr.cc b/utilities/transactions/transaction_lock_mgr.cc index 8086f7c7c..9074a1494 100644 --- a/utilities/transactions/transaction_lock_mgr.cc +++ b/utilities/transactions/transaction_lock_mgr.cc @@ -24,7 +24,7 @@ #include "rocksdb/slice.h" #include "rocksdb/utilities/transaction_db_mutex.h" #include "util/cast_util.h" -#include "util/murmurhash.h" +#include "util/hash.h" #include "util/sync_point.h" #include "util/thread_local.h" #include "utilities/transactions/pessimistic_transaction_db.h" @@ -183,8 +183,7 @@ TransactionLockMgr::~TransactionLockMgr() {} size_t LockMap::GetStripe(const std::string& key) const { assert(num_stripes_ > 0); - static murmur_hash hash; - size_t stripe = hash(key) % num_stripes_; + size_t stripe = static_cast(GetSliceNPHash64(key)) % num_stripes_; return stripe; }