xxhash 64 support
Summary: Pull Request resolved: https://github.com/facebook/rocksdb/pull/4607 Reviewed By: siying Differential Revision: D12836696 Pulled By: jsjhoubo fbshipit-source-id: 7122ccb712d0b0f1cd998aa4477e0da1401bd870
This commit is contained in:
parent
5c794d94c4
commit
cd9404bb77
@ -794,7 +794,7 @@ TEST_F(DBBasicTest, ChecksumTest) {
|
||||
BlockBasedTableOptions table_options;
|
||||
Options options = CurrentOptions();
|
||||
// change when new checksum type added
|
||||
int max_checksum = static_cast<int>(kxxHash);
|
||||
int max_checksum = static_cast<int>(kxxHash64);
|
||||
const int kNumPerFile = 2;
|
||||
|
||||
// generate one table with each type of checksum
|
||||
@ -809,7 +809,7 @@ TEST_F(DBBasicTest, ChecksumTest) {
|
||||
}
|
||||
|
||||
// verify data with each type of checksum
|
||||
for (int i = 0; i <= kxxHash; ++i) {
|
||||
for (int i = 0; i <= kxxHash64; ++i) {
|
||||
table_options.checksum = static_cast<ChecksumType>(i);
|
||||
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
||||
Reopen(options);
|
||||
|
@ -430,6 +430,10 @@ Options DBTestBase::GetOptions(
|
||||
table_options.checksum = kxxHash;
|
||||
break;
|
||||
}
|
||||
case kxxHash64Checksum: {
|
||||
table_options.checksum = kxxHash64;
|
||||
break;
|
||||
}
|
||||
case kFIFOCompaction: {
|
||||
options.compaction_style = kCompactionStyleFIFO;
|
||||
break;
|
||||
|
@ -675,6 +675,7 @@ class DBTestBase : public testing::Test {
|
||||
kBlockBasedTableWithPartitionedIndexFormat4,
|
||||
kPartitionedFilterWithNewTableReaderForCompactions,
|
||||
kUniversalSubcompactions,
|
||||
kxxHash64Checksum,
|
||||
// This must be the last line
|
||||
kEnd,
|
||||
};
|
||||
|
@ -47,6 +47,7 @@ enum ChecksumType : char {
|
||||
kNoChecksum = 0x0,
|
||||
kCRC32c = 0x1,
|
||||
kxxHash = 0x2,
|
||||
kxxHash64 = 0x3,
|
||||
};
|
||||
|
||||
// For advanced user only
|
||||
|
@ -215,7 +215,8 @@ std::map<CompactionStopStyle, std::string>
|
||||
std::unordered_map<std::string, ChecksumType>
|
||||
OptionsHelper::checksum_type_string_map = {{"kNoChecksum", kNoChecksum},
|
||||
{"kCRC32c", kCRC32c},
|
||||
{"kxxHash", kxxHash}};
|
||||
{"kxxHash", kxxHash},
|
||||
{"kxxHash64", kxxHash64}};
|
||||
|
||||
std::unordered_map<std::string, CompressionType>
|
||||
OptionsHelper::compression_type_string_map = {
|
||||
|
@ -615,6 +615,18 @@ void BlockBasedTableBuilder::WriteRawBlock(const Slice& block_contents,
|
||||
EncodeFixed32(trailer_without_type, XXH32_digest(xxh));
|
||||
break;
|
||||
}
|
||||
case kxxHash64: {
|
||||
XXH64_state_t* const state = XXH64_createState();
|
||||
XXH64_reset(state, 0);
|
||||
XXH64_update(state, block_contents.data(),
|
||||
static_cast<uint32_t>(block_contents.size()));
|
||||
XXH64_update(state, trailer, 1); // Extend to cover block type
|
||||
EncodeFixed32(trailer_without_type,
|
||||
static_cast<uint32_t>(XXH64_digest(state) & // lower 32 bits
|
||||
uint64_t{0xffffffff}));
|
||||
XXH64_freeState(state);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
assert(r->status.ok());
|
||||
|
@ -49,6 +49,12 @@ void BlockFetcher::CheckBlockChecksum() {
|
||||
case kxxHash:
|
||||
actual = XXH32(data, static_cast<int>(block_size_) + 1, 0);
|
||||
break;
|
||||
case kxxHash64:
|
||||
actual =static_cast<uint32_t> (
|
||||
XXH64(data, static_cast<int>(block_size_) + 1, 0) &
|
||||
uint64_t{0xffffffff}
|
||||
);
|
||||
break;
|
||||
default:
|
||||
status_ = Status::Corruption(
|
||||
"unknown checksum type " + ToString(footer_.checksum()) + " in " +
|
||||
|
@ -3004,6 +3004,26 @@ TEST_F(HarnessTest, FooterTests) {
|
||||
ASSERT_EQ(decoded_footer.index_handle().size(), index.size());
|
||||
ASSERT_EQ(decoded_footer.version(), 1U);
|
||||
}
|
||||
{
|
||||
// xxhash64 block based
|
||||
std::string encoded;
|
||||
Footer footer(kBlockBasedTableMagicNumber, 1);
|
||||
BlockHandle meta_index(10, 5), index(20, 15);
|
||||
footer.set_metaindex_handle(meta_index);
|
||||
footer.set_index_handle(index);
|
||||
footer.set_checksum(kxxHash64);
|
||||
footer.EncodeTo(&encoded);
|
||||
Footer decoded_footer;
|
||||
Slice encoded_slice(encoded);
|
||||
decoded_footer.DecodeFrom(&encoded_slice);
|
||||
ASSERT_EQ(decoded_footer.table_magic_number(), kBlockBasedTableMagicNumber);
|
||||
ASSERT_EQ(decoded_footer.checksum(), kxxHash64);
|
||||
ASSERT_EQ(decoded_footer.metaindex_handle().offset(), meta_index.offset());
|
||||
ASSERT_EQ(decoded_footer.metaindex_handle().size(), meta_index.size());
|
||||
ASSERT_EQ(decoded_footer.index_handle().offset(), index.offset());
|
||||
ASSERT_EQ(decoded_footer.index_handle().size(), index.size());
|
||||
ASSERT_EQ(decoded_footer.version(), 1U);
|
||||
}
|
||||
// Plain table is not supported in ROCKSDB_LITE
|
||||
#ifndef ROCKSDB_LITE
|
||||
{
|
||||
|
599
util/xxhash.cc
599
util/xxhash.cc
@ -34,6 +34,39 @@ You can contact the author at :
|
||||
//**************************************
|
||||
// Tuning parameters
|
||||
//**************************************
|
||||
/*!XXH_FORCE_MEMORY_ACCESS :
|
||||
* By default, access to unaligned memory is controlled by `memcpy()`, which is
|
||||
* safe and portable. Unfortunately, on some target/compiler combinations, the
|
||||
* generated assembly is sub-optimal. The below switch allow to select different
|
||||
* access method for improved performance. Method 0 (default) : use `memcpy()`.
|
||||
* Safe and portable. Method 1 : `__packed` statement. It depends on compiler
|
||||
* extension (ie, not portable). This method is safe if your compiler supports
|
||||
* it, and *generally* as fast or faster than `memcpy`. Method 2 : direct
|
||||
* access. This method doesn't depend on compiler but violate C standard. It can
|
||||
* generate buggy code on targets which do not support unaligned memory
|
||||
* accesses. But in some circumstances, it's the only known way to get the most
|
||||
* performance (ie GCC + ARMv6) See http://stackoverflow.com/a/32095106/646947
|
||||
* for details. Prefer these methods in priority order (0 > 1 > 2)
|
||||
*/
|
||||
|
||||
#include "util/util.h"
|
||||
|
||||
#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line \
|
||||
for example */
|
||||
#if defined(__GNUC__) && \
|
||||
(defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \
|
||||
defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \
|
||||
defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__))
|
||||
#define XXH_FORCE_MEMORY_ACCESS 2
|
||||
#elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \
|
||||
(defined(__GNUC__) && \
|
||||
(defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \
|
||||
defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || \
|
||||
defined(__ARM_ARCH_7S__)))
|
||||
#define XXH_FORCE_MEMORY_ACCESS 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// Unaligned memory access is automatically enabled for "common" CPU, such as x86.
|
||||
// For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected.
|
||||
// If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance.
|
||||
@ -58,6 +91,21 @@ You can contact the author at :
|
||||
// This option has no impact on Little_Endian CPU.
|
||||
#define XXH_FORCE_NATIVE_FORMAT 0
|
||||
|
||||
/*!XXH_FORCE_ALIGN_CHECK :
|
||||
* This is a minor performance trick, only useful with lots of very small keys.
|
||||
* It means : check for aligned/unaligned input.
|
||||
* The check costs one initial branch per hash;
|
||||
* set it to 0 when the input is guaranteed to be aligned,
|
||||
* or when alignment doesn't matter for performance.
|
||||
*/
|
||||
#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
|
||||
#if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || \
|
||||
defined(_M_X64)
|
||||
#define XXH_FORCE_ALIGN_CHECK 0
|
||||
#else
|
||||
#define XXH_FORCE_ALIGN_CHECK 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
//**************************************
|
||||
// Compiler Specific Options
|
||||
@ -91,7 +139,7 @@ FORCE_INLINE void XXH_free (void* p) { free(p); }
|
||||
// for memcpy()
|
||||
#include <string.h>
|
||||
FORCE_INLINE void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
|
||||
|
||||
#include <assert.h> /* assert */
|
||||
|
||||
namespace rocksdb {
|
||||
//**************************************
|
||||
@ -134,6 +182,34 @@ typedef struct _U32_S { U32 v; } _PACKED U32_S;
|
||||
|
||||
#define A32(x) (((U32_S *)(x))->v)
|
||||
|
||||
#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 2))
|
||||
|
||||
/* Force direct memory access. Only works on CPU which support unaligned memory
|
||||
* access in hardware */
|
||||
static U32 XXH_read32(const void* memPtr) { return *(const U32*)memPtr; }
|
||||
|
||||
#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 1))
|
||||
|
||||
/* __pack instructions are safer, but compiler specific, hence potentially
|
||||
* problematic for some compilers */
|
||||
/* currently only defined for gcc and icc */
|
||||
typedef union {
|
||||
U32 u32;
|
||||
} __attribute__((packed)) unalign;
|
||||
static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
|
||||
|
||||
#else
|
||||
|
||||
/* portable and safe solution. Generally efficient.
|
||||
* see : http://stackoverflow.com/a/32095106/646947
|
||||
*/
|
||||
static U32 XXH_read32(const void* memPtr) {
|
||||
U32 val;
|
||||
memcpy(&val, memPtr, sizeof(val));
|
||||
return val;
|
||||
}
|
||||
|
||||
#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
|
||||
|
||||
//***************************************
|
||||
// Compiler-specific Functions and Macros
|
||||
@ -143,8 +219,10 @@ typedef struct _U32_S { U32 v; } _PACKED U32_S;
|
||||
// Note : although _rotl exists for minGW (GCC under windows), performance seems poor
|
||||
#if defined(_MSC_VER)
|
||||
# define XXH_rotl32(x,r) _rotl(x,r)
|
||||
#define XXH_rotl64(x, r) _rotl64(x, r)
|
||||
#else
|
||||
# define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
|
||||
#define XXH_rotl64(x, r) ((x << r) | (x >> (64 - r)))
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER) // Visual Studio
|
||||
@ -199,12 +277,25 @@ FORCE_INLINE U32 XXH_readLE32_align(const U32* ptr, XXH_endianess endian, XXH_al
|
||||
return endian==XXH_littleEndian ? *ptr : XXH_swap32(*ptr);
|
||||
}
|
||||
|
||||
FORCE_INLINE U32 XXH_readLE32(const U32* ptr, XXH_endianess endian) { return XXH_readLE32_align(ptr, endian, XXH_unaligned); }
|
||||
FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian,
|
||||
XXH_alignment align) {
|
||||
if (align == XXH_unaligned)
|
||||
return endian == XXH_littleEndian ? XXH_read32(ptr)
|
||||
: XXH_swap32(XXH_read32(ptr));
|
||||
else
|
||||
return endian == XXH_littleEndian ? *(const U32*)ptr
|
||||
: XXH_swap32(*(const U32*)ptr);
|
||||
}
|
||||
|
||||
FORCE_INLINE U32 XXH_readLE32(const U32* ptr, XXH_endianess endian) {
|
||||
return XXH_readLE32_align(ptr, endian, XXH_unaligned);
|
||||
}
|
||||
|
||||
//****************************
|
||||
// Simple Hash Functions
|
||||
//****************************
|
||||
#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
|
||||
|
||||
FORCE_INLINE U32 XXH32_endian_align(const void* input, int len, U32 seed, XXH_endianess endian, XXH_alignment align)
|
||||
{
|
||||
const BYTE* p = (const BYTE*)input;
|
||||
@ -476,4 +567,508 @@ U32 XXH32_digest (void* state_in)
|
||||
return h32;
|
||||
}
|
||||
|
||||
/* *******************************************************************
|
||||
* 64-bit hash functions
|
||||
*********************************************************************/
|
||||
|
||||
#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
|
||||
|
||||
/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
|
||||
static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; }
|
||||
|
||||
#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
|
||||
|
||||
/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
|
||||
/* currently only defined for gcc and icc */
|
||||
typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign64;
|
||||
static U64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; }
|
||||
|
||||
#else
|
||||
|
||||
/* portable and safe solution. Generally efficient.
|
||||
* see : http://stackoverflow.com/a/32095106/646947
|
||||
*/
|
||||
|
||||
static U64 XXH_read64(const void* memPtr)
|
||||
{
|
||||
U64 val;
|
||||
memcpy(&val, memPtr, sizeof(val));
|
||||
return val;
|
||||
}
|
||||
#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
|
||||
|
||||
#if defined(_MSC_VER) /* Visual Studio */
|
||||
#define XXH_swap64 _byteswap_uint64
|
||||
#elif XXH_GCC_VERSION >= 403
|
||||
#define XXH_swap64 __builtin_bswap64
|
||||
#else
|
||||
static U64 XXH_swap64(U64 x) {
|
||||
return ((x << 56) & 0xff00000000000000ULL) |
|
||||
((x << 40) & 0x00ff000000000000ULL) |
|
||||
((x << 24) & 0x0000ff0000000000ULL) |
|
||||
((x << 8) & 0x000000ff00000000ULL) |
|
||||
((x >> 8) & 0x00000000ff000000ULL) |
|
||||
((x >> 24) & 0x0000000000ff0000ULL) |
|
||||
((x >> 40) & 0x000000000000ff00ULL) |
|
||||
((x >> 56) & 0x00000000000000ffULL);
|
||||
}
|
||||
#endif
|
||||
|
||||
FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian,
|
||||
XXH_alignment align) {
|
||||
if (align == XXH_unaligned)
|
||||
return endian == XXH_littleEndian ? XXH_read64(ptr)
|
||||
: XXH_swap64(XXH_read64(ptr));
|
||||
else
|
||||
return endian == XXH_littleEndian ? *(const U64*)ptr
|
||||
: XXH_swap64(*(const U64*)ptr);
|
||||
}
|
||||
|
||||
FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian) {
|
||||
return XXH_readLE64_align(ptr, endian, XXH_unaligned);
|
||||
}
|
||||
|
||||
static U64 XXH_readBE64(const void* ptr) {
|
||||
return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
|
||||
}
|
||||
|
||||
/*====== xxh64 ======*/
|
||||
|
||||
static const U64 PRIME64_1 =
|
||||
11400714785074694791ULL; /* 0b1001111000110111011110011011000110000101111010111100101010000111
|
||||
*/
|
||||
static const U64 PRIME64_2 =
|
||||
14029467366897019727ULL; /* 0b1100001010110010101011100011110100100111110101001110101101001111
|
||||
*/
|
||||
static const U64 PRIME64_3 =
|
||||
1609587929392839161ULL; /* 0b0001011001010110011001111011000110011110001101110111100111111001
|
||||
*/
|
||||
static const U64 PRIME64_4 =
|
||||
9650029242287828579ULL; /* 0b1000010111101011110010100111011111000010101100101010111001100011
|
||||
*/
|
||||
static const U64 PRIME64_5 =
|
||||
2870177450012600261ULL; /* 0b0010011111010100111010110010111100010110010101100110011111000101
|
||||
*/
|
||||
|
||||
static U64 XXH64_round(U64 acc, U64 input) {
|
||||
acc += input * PRIME64_2;
|
||||
acc = XXH_rotl64(acc, 31);
|
||||
acc *= PRIME64_1;
|
||||
return acc;
|
||||
}
|
||||
|
||||
static U64 XXH64_mergeRound(U64 acc, U64 val) {
|
||||
val = XXH64_round(0, val);
|
||||
acc ^= val;
|
||||
acc = acc * PRIME64_1 + PRIME64_4;
|
||||
return acc;
|
||||
}
|
||||
|
||||
static U64 XXH64_avalanche(U64 h64) {
|
||||
h64 ^= h64 >> 33;
|
||||
h64 *= PRIME64_2;
|
||||
h64 ^= h64 >> 29;
|
||||
h64 *= PRIME64_3;
|
||||
h64 ^= h64 >> 32;
|
||||
return h64;
|
||||
}
|
||||
|
||||
#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
|
||||
|
||||
static U64 XXH64_finalize(U64 h64, const void* ptr, size_t len,
|
||||
XXH_endianess endian, XXH_alignment align) {
|
||||
const BYTE* p = (const BYTE*)ptr;
|
||||
|
||||
#define PROCESS1_64 \
|
||||
h64 ^= (*p++) * PRIME64_5; \
|
||||
h64 = XXH_rotl64(h64, 11) * PRIME64_1;
|
||||
|
||||
#define PROCESS4_64 \
|
||||
h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; \
|
||||
p += 4; \
|
||||
h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
|
||||
|
||||
#define PROCESS8_64 \
|
||||
{ \
|
||||
U64 const k1 = XXH64_round(0, XXH_get64bits(p)); \
|
||||
p += 8; \
|
||||
h64 ^= k1; \
|
||||
h64 = XXH_rotl64(h64, 27) * PRIME64_1 + PRIME64_4; \
|
||||
}
|
||||
|
||||
switch (len & 31) {
|
||||
case 24:
|
||||
PROCESS8_64;
|
||||
FALLTHROUGH_INTENDED;
|
||||
/* fallthrough */
|
||||
case 16:
|
||||
PROCESS8_64;
|
||||
FALLTHROUGH_INTENDED;
|
||||
/* fallthrough */
|
||||
case 8:
|
||||
PROCESS8_64;
|
||||
return XXH64_avalanche(h64);
|
||||
|
||||
case 28:
|
||||
PROCESS8_64;
|
||||
FALLTHROUGH_INTENDED;
|
||||
/* fallthrough */
|
||||
case 20:
|
||||
PROCESS8_64;
|
||||
FALLTHROUGH_INTENDED;
|
||||
/* fallthrough */
|
||||
case 12:
|
||||
PROCESS8_64;
|
||||
FALLTHROUGH_INTENDED;
|
||||
/* fallthrough */
|
||||
case 4:
|
||||
PROCESS4_64;
|
||||
return XXH64_avalanche(h64);
|
||||
|
||||
case 25:
|
||||
PROCESS8_64;
|
||||
FALLTHROUGH_INTENDED;
|
||||
/* fallthrough */
|
||||
case 17:
|
||||
PROCESS8_64;
|
||||
FALLTHROUGH_INTENDED;
|
||||
/* fallthrough */
|
||||
case 9:
|
||||
PROCESS8_64;
|
||||
PROCESS1_64;
|
||||
return XXH64_avalanche(h64);
|
||||
|
||||
case 29:
|
||||
PROCESS8_64;
|
||||
FALLTHROUGH_INTENDED;
|
||||
/* fallthrough */
|
||||
case 21:
|
||||
PROCESS8_64;
|
||||
FALLTHROUGH_INTENDED;
|
||||
/* fallthrough */
|
||||
case 13:
|
||||
PROCESS8_64;
|
||||
FALLTHROUGH_INTENDED;
|
||||
/* fallthrough */
|
||||
case 5:
|
||||
PROCESS4_64;
|
||||
PROCESS1_64;
|
||||
return XXH64_avalanche(h64);
|
||||
|
||||
case 26:
|
||||
PROCESS8_64;
|
||||
FALLTHROUGH_INTENDED;
|
||||
/* fallthrough */
|
||||
case 18:
|
||||
PROCESS8_64;
|
||||
FALLTHROUGH_INTENDED;
|
||||
/* fallthrough */
|
||||
case 10:
|
||||
PROCESS8_64;
|
||||
PROCESS1_64;
|
||||
PROCESS1_64;
|
||||
return XXH64_avalanche(h64);
|
||||
|
||||
case 30:
|
||||
PROCESS8_64;
|
||||
FALLTHROUGH_INTENDED;
|
||||
/* fallthrough */
|
||||
case 22:
|
||||
PROCESS8_64;
|
||||
FALLTHROUGH_INTENDED;
|
||||
/* fallthrough */
|
||||
case 14:
|
||||
PROCESS8_64;
|
||||
FALLTHROUGH_INTENDED;
|
||||
/* fallthrough */
|
||||
case 6:
|
||||
PROCESS4_64;
|
||||
PROCESS1_64;
|
||||
PROCESS1_64;
|
||||
return XXH64_avalanche(h64);
|
||||
|
||||
case 27:
|
||||
PROCESS8_64;
|
||||
FALLTHROUGH_INTENDED;
|
||||
/* fallthrough */
|
||||
case 19:
|
||||
PROCESS8_64;
|
||||
FALLTHROUGH_INTENDED;
|
||||
/* fallthrough */
|
||||
case 11:
|
||||
PROCESS8_64;
|
||||
PROCESS1_64;
|
||||
PROCESS1_64;
|
||||
PROCESS1_64;
|
||||
return XXH64_avalanche(h64);
|
||||
|
||||
case 31:
|
||||
PROCESS8_64;
|
||||
FALLTHROUGH_INTENDED;
|
||||
/* fallthrough */
|
||||
case 23:
|
||||
PROCESS8_64;
|
||||
FALLTHROUGH_INTENDED;
|
||||
/* fallthrough */
|
||||
case 15:
|
||||
PROCESS8_64;
|
||||
FALLTHROUGH_INTENDED;
|
||||
/* fallthrough */
|
||||
case 7:
|
||||
PROCESS4_64;
|
||||
FALLTHROUGH_INTENDED;
|
||||
/* fallthrough */
|
||||
case 3:
|
||||
PROCESS1_64;
|
||||
FALLTHROUGH_INTENDED;
|
||||
/* fallthrough */
|
||||
case 2:
|
||||
PROCESS1_64;
|
||||
FALLTHROUGH_INTENDED;
|
||||
/* fallthrough */
|
||||
case 1:
|
||||
PROCESS1_64;
|
||||
FALLTHROUGH_INTENDED;
|
||||
/* fallthrough */
|
||||
case 0:
|
||||
return XXH64_avalanche(h64);
|
||||
}
|
||||
|
||||
/* impossible to reach */
|
||||
assert(0);
|
||||
return 0; /* unreachable, but some compilers complain without it */
|
||||
}
|
||||
|
||||
FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed,
|
||||
XXH_endianess endian, XXH_alignment align) {
|
||||
const BYTE* p = (const BYTE*)input;
|
||||
const BYTE* bEnd = p + len;
|
||||
U64 h64;
|
||||
|
||||
#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && \
|
||||
(XXH_ACCEPT_NULL_INPUT_POINTER >= 1)
|
||||
if (p == NULL) {
|
||||
len = 0;
|
||||
bEnd = p = (const BYTE*)(size_t)32;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (len >= 32) {
|
||||
const BYTE* const limit = bEnd - 32;
|
||||
U64 v1 = seed + PRIME64_1 + PRIME64_2;
|
||||
U64 v2 = seed + PRIME64_2;
|
||||
U64 v3 = seed + 0;
|
||||
U64 v4 = seed - PRIME64_1;
|
||||
|
||||
do {
|
||||
v1 = XXH64_round(v1, XXH_get64bits(p));
|
||||
p += 8;
|
||||
v2 = XXH64_round(v2, XXH_get64bits(p));
|
||||
p += 8;
|
||||
v3 = XXH64_round(v3, XXH_get64bits(p));
|
||||
p += 8;
|
||||
v4 = XXH64_round(v4, XXH_get64bits(p));
|
||||
p += 8;
|
||||
} while (p <= limit);
|
||||
|
||||
h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) +
|
||||
XXH_rotl64(v4, 18);
|
||||
h64 = XXH64_mergeRound(h64, v1);
|
||||
h64 = XXH64_mergeRound(h64, v2);
|
||||
h64 = XXH64_mergeRound(h64, v3);
|
||||
h64 = XXH64_mergeRound(h64, v4);
|
||||
|
||||
} else {
|
||||
h64 = seed + PRIME64_5;
|
||||
}
|
||||
|
||||
h64 += (U64)len;
|
||||
|
||||
return XXH64_finalize(h64, p, len, endian, align);
|
||||
}
|
||||
|
||||
unsigned long long XXH64(const void* input, size_t len,
|
||||
unsigned long long seed) {
|
||||
#if 0
|
||||
/* Simple version, good for code maintenance, but unfortunately slow for small inputs */
|
||||
XXH64_state_t state;
|
||||
XXH64_reset(&state, seed);
|
||||
XXH64_update(&state, input, len);
|
||||
return XXH64_digest(&state);
|
||||
#else
|
||||
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
||||
|
||||
if (XXH_FORCE_ALIGN_CHECK) {
|
||||
if ((((size_t)input) & 7) ==
|
||||
0) { /* Input is aligned, let's leverage the speed advantage */
|
||||
if ((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
||||
return XXH64_endian_align(input, len, seed, XXH_littleEndian,
|
||||
XXH_aligned);
|
||||
else
|
||||
return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
|
||||
}
|
||||
}
|
||||
|
||||
if ((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
||||
return XXH64_endian_align(input, len, seed, XXH_littleEndian,
|
||||
XXH_unaligned);
|
||||
else
|
||||
return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*====== Hash Streaming ======*/
|
||||
|
||||
XXH64_state_t* XXH64_createState(void) {
|
||||
return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
|
||||
}
|
||||
XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) {
|
||||
XXH_free(statePtr);
|
||||
return XXH_OK;
|
||||
}
|
||||
|
||||
void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState) {
|
||||
memcpy(dstState, srcState, sizeof(*dstState));
|
||||
}
|
||||
|
||||
XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed) {
|
||||
XXH64_state_t state; /* using a local state to memcpy() in order to avoid
|
||||
strict-aliasing warnings */
|
||||
memset(&state, 0, sizeof(state));
|
||||
state.v1 = seed + PRIME64_1 + PRIME64_2;
|
||||
state.v2 = seed + PRIME64_2;
|
||||
state.v3 = seed + 0;
|
||||
state.v4 = seed - PRIME64_1;
|
||||
/* do not write into reserved, planned to be removed in a future version */
|
||||
memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));
|
||||
return XXH_OK;
|
||||
}
|
||||
|
||||
FORCE_INLINE XXH_errorcode XXH64_update_endian(XXH64_state_t* state,
|
||||
const void* input, size_t len,
|
||||
XXH_endianess endian) {
|
||||
if (input == NULL)
|
||||
#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && \
|
||||
(XXH_ACCEPT_NULL_INPUT_POINTER >= 1)
|
||||
return XXH_OK;
|
||||
#else
|
||||
return XXH_ERROR;
|
||||
#endif
|
||||
|
||||
{
|
||||
const BYTE* p = (const BYTE*)input;
|
||||
const BYTE* const bEnd = p + len;
|
||||
|
||||
state->total_len += len;
|
||||
|
||||
if (state->memsize + len < 32) { /* fill in tmp buffer */
|
||||
XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
|
||||
state->memsize += (U32)len;
|
||||
return XXH_OK;
|
||||
}
|
||||
|
||||
if (state->memsize) { /* tmp buffer is full */
|
||||
XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input,
|
||||
32 - state->memsize);
|
||||
state->v1 =
|
||||
XXH64_round(state->v1, XXH_readLE64(state->mem64 + 0, endian));
|
||||
state->v2 =
|
||||
XXH64_round(state->v2, XXH_readLE64(state->mem64 + 1, endian));
|
||||
state->v3 =
|
||||
XXH64_round(state->v3, XXH_readLE64(state->mem64 + 2, endian));
|
||||
state->v4 =
|
||||
XXH64_round(state->v4, XXH_readLE64(state->mem64 + 3, endian));
|
||||
p += 32 - state->memsize;
|
||||
state->memsize = 0;
|
||||
}
|
||||
|
||||
if (p + 32 <= bEnd) {
|
||||
const BYTE* const limit = bEnd - 32;
|
||||
U64 v1 = state->v1;
|
||||
U64 v2 = state->v2;
|
||||
U64 v3 = state->v3;
|
||||
U64 v4 = state->v4;
|
||||
|
||||
do {
|
||||
v1 = XXH64_round(v1, XXH_readLE64(p, endian));
|
||||
p += 8;
|
||||
v2 = XXH64_round(v2, XXH_readLE64(p, endian));
|
||||
p += 8;
|
||||
v3 = XXH64_round(v3, XXH_readLE64(p, endian));
|
||||
p += 8;
|
||||
v4 = XXH64_round(v4, XXH_readLE64(p, endian));
|
||||
p += 8;
|
||||
} while (p <= limit);
|
||||
|
||||
state->v1 = v1;
|
||||
state->v2 = v2;
|
||||
state->v3 = v3;
|
||||
state->v4 = v4;
|
||||
}
|
||||
|
||||
if (p < bEnd) {
|
||||
XXH_memcpy(state->mem64, p, (size_t)(bEnd - p));
|
||||
state->memsize = (unsigned)(bEnd - p);
|
||||
}
|
||||
}
|
||||
|
||||
return XXH_OK;
|
||||
}
|
||||
|
||||
XXH_errorcode XXH64_update(XXH64_state_t* state_in, const void* input,
|
||||
size_t len) {
|
||||
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
||||
|
||||
if ((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
||||
return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
|
||||
else
|
||||
return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
|
||||
}
|
||||
|
||||
FORCE_INLINE U64 XXH64_digest_endian(const XXH64_state_t* state,
|
||||
XXH_endianess endian) {
|
||||
U64 h64;
|
||||
|
||||
if (state->total_len >= 32) {
|
||||
U64 const v1 = state->v1;
|
||||
U64 const v2 = state->v2;
|
||||
U64 const v3 = state->v3;
|
||||
U64 const v4 = state->v4;
|
||||
|
||||
h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) +
|
||||
XXH_rotl64(v4, 18);
|
||||
h64 = XXH64_mergeRound(h64, v1);
|
||||
h64 = XXH64_mergeRound(h64, v2);
|
||||
h64 = XXH64_mergeRound(h64, v3);
|
||||
h64 = XXH64_mergeRound(h64, v4);
|
||||
} else {
|
||||
h64 = state->v3 /*seed*/ + PRIME64_5;
|
||||
}
|
||||
|
||||
h64 += (U64)state->total_len;
|
||||
|
||||
return XXH64_finalize(h64, state->mem64, (size_t)state->total_len, endian,
|
||||
XXH_aligned);
|
||||
}
|
||||
|
||||
unsigned long long XXH64_digest(const XXH64_state_t* state_in) {
|
||||
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
|
||||
|
||||
if ((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
|
||||
return XXH64_digest_endian(state_in, XXH_littleEndian);
|
||||
else
|
||||
return XXH64_digest_endian(state_in, XXH_bigEndian);
|
||||
}
|
||||
|
||||
/*====== Canonical representation ======*/
|
||||
|
||||
void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash) {
|
||||
XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
|
||||
if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
|
||||
memcpy(dst, &hash, sizeof(*dst));
|
||||
}
|
||||
|
||||
XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src) {
|
||||
return XXH_readBE64(src);
|
||||
}
|
||||
} // namespace rocksdb
|
||||
|
@ -58,7 +58,7 @@ It depends on successfully passing SMHasher test set.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <stdlib.h>
|
||||
#if defined (__cplusplus)
|
||||
namespace rocksdb {
|
||||
#endif
|
||||
@ -67,6 +67,7 @@ namespace rocksdb {
|
||||
//****************************
|
||||
// Type
|
||||
//****************************
|
||||
/* size_t */
|
||||
typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
|
||||
|
||||
|
||||
@ -157,7 +158,75 @@ To free memory context, use XXH32_digest(), or free().
|
||||
#define XXH32_result XXH32_digest
|
||||
#define XXH32_getIntermediateResult XXH32_intermediateDigest
|
||||
|
||||
/*-**********************************************************************
|
||||
* 64-bit hash
|
||||
************************************************************************/
|
||||
typedef unsigned long long XXH64_hash_t;
|
||||
|
||||
/*! XXH64() :
|
||||
Calculate the 64-bit hash of sequence of length "len" stored at memory
|
||||
address "input". "seed" can be used to alter the result predictably. This
|
||||
function runs faster on 64-bit systems, but slower on 32-bit systems (see
|
||||
benchmark).
|
||||
*/
|
||||
XXH64_hash_t XXH64(const void* input, size_t length, unsigned long long seed);
|
||||
|
||||
/*====== Streaming ======*/
|
||||
typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
|
||||
XXH64_state_t* XXH64_createState(void);
|
||||
XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr);
|
||||
void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state);
|
||||
|
||||
XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed);
|
||||
XXH_errorcode XXH64_update(XXH64_state_t* statePtr, const void* input,
|
||||
size_t length);
|
||||
XXH64_hash_t XXH64_digest(const XXH64_state_t* statePtr);
|
||||
|
||||
/*====== Canonical representation ======*/
|
||||
typedef struct {
|
||||
unsigned char digest[8];
|
||||
} XXH64_canonical_t;
|
||||
void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash);
|
||||
XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src);
|
||||
|
||||
/* These definitions are only present to allow
|
||||
* static allocation of XXH state, on stack or in a struct for example.
|
||||
* Never **ever** use members directly. */
|
||||
|
||||
#if !defined(__VMS) && \
|
||||
(defined(__cplusplus) || \
|
||||
(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */))
|
||||
#include <stdint.h>
|
||||
|
||||
struct XXH64_state_s {
|
||||
uint64_t total_len;
|
||||
uint64_t v1;
|
||||
uint64_t v2;
|
||||
uint64_t v3;
|
||||
uint64_t v4;
|
||||
uint64_t mem64[4];
|
||||
uint32_t memsize;
|
||||
uint32_t reserved[2]; /* never read nor write, might be removed in a future
|
||||
version */
|
||||
}; /* typedef'd to XXH64_state_t */
|
||||
|
||||
#else
|
||||
|
||||
#ifndef XXH_NO_LONG_LONG /* remove 64-bit support */
|
||||
struct XXH64_state_s {
|
||||
unsigned long long total_len;
|
||||
unsigned long long v1;
|
||||
unsigned long long v2;
|
||||
unsigned long long v3;
|
||||
unsigned long long v4;
|
||||
unsigned long long mem64[4];
|
||||
unsigned memsize;
|
||||
unsigned reserved[2]; /* never read nor write, might be removed in a future
|
||||
version */
|
||||
}; /* typedef'd to XXH64_state_t */
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#if defined (__cplusplus)
|
||||
} // namespace rocksdb
|
||||
|
Loading…
x
Reference in New Issue
Block a user