79 #if defined (__cplusplus)
102 #if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \
103 && !defined(XXH_INLINE_ALL_31684351384)
105 # define XXH_INLINE_ALL_31684351384
107 # undef XXH_STATIC_LINKING_ONLY
108 # define XXH_STATIC_LINKING_ONLY
110 # undef XXH_PUBLIC_API
111 # if defined(__GNUC__)
112 # define XXH_PUBLIC_API static __inline __attribute__((unused))
113 # elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) )
114 # define XXH_PUBLIC_API static inline
115 # elif defined(_MSC_VER)
116 # define XXH_PUBLIC_API static __inline
119 # define XXH_PUBLIC_API static
135 # undef XXH_versionNumber
138 # undef XXH32_createState
139 # undef XXH32_freeState
143 # undef XXH32_copyState
144 # undef XXH32_canonicalFromHash
145 # undef XXH32_hashFromCanonical
148 # undef XXH64_createState
149 # undef XXH64_freeState
153 # undef XXH64_copyState
154 # undef XXH64_canonicalFromHash
155 # undef XXH64_hashFromCanonical
158 # undef XXH3_64bits_withSecret
159 # undef XXH3_64bits_withSeed
160 # undef XXH3_64bits_withSecretandSeed
161 # undef XXH3_createState
162 # undef XXH3_freeState
163 # undef XXH3_copyState
164 # undef XXH3_64bits_reset
165 # undef XXH3_64bits_reset_withSeed
166 # undef XXH3_64bits_reset_withSecret
167 # undef XXH3_64bits_update
168 # undef XXH3_64bits_digest
169 # undef XXH3_generateSecret
173 # undef XXH3_128bits_withSeed
174 # undef XXH3_128bits_withSecret
175 # undef XXH3_128bits_reset
176 # undef XXH3_128bits_reset_withSeed
177 # undef XXH3_128bits_reset_withSecret
178 # undef XXH3_128bits_reset_withSecretandSeed
179 # undef XXH3_128bits_update
180 # undef XXH3_128bits_digest
181 # undef XXH128_isEqual
183 # undef XXH128_canonicalFromHash
184 # undef XXH128_hashFromCanonical
186 # undef XXH_NAMESPACE
189 # define XXH_NAMESPACE XXH_INLINE_
197 # define XXH_IPREF(Id) XXH_NAMESPACE ## Id
198 # define XXH_OK XXH_IPREF(XXH_OK)
199 # define XXH_ERROR XXH_IPREF(XXH_ERROR)
200 # define XXH_errorcode XXH_IPREF(XXH_errorcode)
201 # define XXH32_canonical_t XXH_IPREF(XXH32_canonical_t)
202 # define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t)
203 # define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t)
204 # define XXH32_state_s XXH_IPREF(XXH32_state_s)
205 # define XXH32_state_t XXH_IPREF(XXH32_state_t)
206 # define XXH64_state_s XXH_IPREF(XXH64_state_s)
207 # define XXH64_state_t XXH_IPREF(XXH64_state_t)
208 # define XXH3_state_s XXH_IPREF(XXH3_state_s)
209 # define XXH3_state_t XXH_IPREF(XXH3_state_t)
210 # define XXH128_hash_t XXH_IPREF(XXH128_hash_t)
212 # undef XXHASH_H_5627135585666179
213 # undef XXHASH_H_STATIC_13879238742
221 #ifndef XXHASH_H_5627135585666179
222 #define XXHASH_H_5627135585666179 1
231 #if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
232 # if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
234 # define XXH_PUBLIC_API __declspec(dllexport)
236 # define XXH_PUBLIC_API __declspec(dllimport)
239 # define XXH_PUBLIC_API
257 # define XXH_NAMESPACE
258 # undef XXH_NAMESPACE
262 # define XXH_CAT(A,B) A##B
263 # define XXH_NAME2(A,B) XXH_CAT(A,B)
264 # define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
266 # define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
267 # define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
268 # define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
269 # define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
270 # define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
271 # define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
272 # define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
273 # define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
274 # define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
276 # define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
277 # define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
278 # define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
279 # define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
280 # define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
281 # define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
282 # define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
283 # define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
284 # define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
286 # define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits)
287 # define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret)
288 # define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed)
289 # define XXH3_64bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed)
290 # define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState)
291 # define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState)
292 # define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState)
293 # define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset)
294 # define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed)
295 # define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret)
296 # define XXH3_64bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed)
297 # define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update)
298 # define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest)
299 # define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret)
300 # define XXH3_generateSecret_fromSeed XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed)
302 # define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128)
303 # define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits)
304 # define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed)
305 # define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret)
306 # define XXH3_128bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed)
307 # define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset)
308 # define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed)
309 # define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret)
310 # define XXH3_128bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed)
311 # define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update)
312 # define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest)
313 # define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual)
314 # define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp)
315 # define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash)
316 # define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical)
323 #define XXH_VERSION_MAJOR 0
324 #define XXH_VERSION_MINOR 8
325 #define XXH_VERSION_RELEASE 1
326 #define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
349 #if defined(XXH_DOXYGEN)
357 #elif !defined (__VMS) \
358 && (defined (__cplusplus) \
359 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) ) )
365 # if UINT_MAX == 0xFFFFFFFFUL
366 typedef unsigned int XXH32_hash_t;
368 # if ULONG_MAX == 0xFFFFFFFFUL
369 typedef unsigned long XXH32_hash_t;
371 # error "unsupported platform: need a 32-bit type"
576 unsigned char digest[4];
603 #ifdef __has_attribute
604 # define XXH_HAS_ATTRIBUTE(x) __has_attribute(x)
606 # define XXH_HAS_ATTRIBUTE(x) 0
610 #if defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201710L) && defined(__has_c_attribute)
611 # define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x)
613 # define XXH_HAS_C_ATTRIBUTE(x) 0
616 #if defined(__cplusplus) && defined(__has_cpp_attribute)
617 # define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
619 # define XXH_HAS_CPP_ATTRIBUTE(x) 0
628 #if XXH_HAS_C_ATTRIBUTE(x)
629 # define XXH_FALLTHROUGH [[fallthrough]]
630 #elif XXH_HAS_CPP_ATTRIBUTE(x)
631 # define XXH_FALLTHROUGH [[fallthrough]]
632 #elif XXH_HAS_ATTRIBUTE(__fallthrough__)
633 # define XXH_FALLTHROUGH __attribute__ ((fallthrough))
635 # define XXH_FALLTHROUGH
644 #ifndef XXH_NO_LONG_LONG
648 #if defined(XXH_DOXYGEN)
655 #elif !defined (__VMS) \
656 && (defined (__cplusplus) \
657 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) ) )
662 # if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL
801 #define XXH3_SECRET_SIZE_MIN 136
947 #if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742)
948 #define XXHASH_H_STATIC_13879238742
975 struct XXH32_state_s {
976 XXH32_hash_t total_len_32;
977 XXH32_hash_t large_len;
979 XXH32_hash_t mem32[4];
980 XXH32_hash_t memsize;
981 XXH32_hash_t reserved;
985 #ifndef XXH_NO_LONG_LONG
999 struct XXH64_state_s {
1003 XXH32_hash_t memsize;
1004 XXH32_hash_t reserved32;
1008 #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
1009 # include <stdalign.h>
1010 # define XXH_ALIGN(n) alignas(n)
1011 #elif defined(__cplusplus) && (__cplusplus >= 201103L)
1013 # define XXH_ALIGN(n) alignas(n)
1014 #elif defined(__GNUC__)
1015 # define XXH_ALIGN(n) __attribute__ ((aligned(n)))
1016 #elif defined(_MSC_VER)
1017 # define XXH_ALIGN(n) __declspec(align(n))
1019 # define XXH_ALIGN(n)
1023 #if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) \
1024 && ! (defined(__cplusplus) && (__cplusplus >= 201103L)) \
1025 && defined(__GNUC__)
1026 # define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align)
1028 # define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type
1038 #define XXH3_INTERNALBUFFER_SIZE 256
1047 #define XXH3_SECRET_DEFAULT_SIZE 192
1071 struct XXH3_state_s {
1074 XXH_ALIGN_MEMBER(64,
unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]);
1076 XXH_ALIGN_MEMBER(64,
unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]);
1078 XXH32_hash_t bufferedSize;
1080 XXH32_hash_t useSeed;
1082 size_t nbStripesSoFar;
1086 size_t nbStripesPerBlock;
1094 const unsigned char* extSecret;
1100 #undef XXH_ALIGN_MEMBER
1113 #define XXH3_INITSTATE(XXH3_state_ptr) { (XXH3_state_ptr)->seed = 0; }
1153 XXH_PUBLIC_API XXH_errorcode XXH3_generateSecret(
void* secretBuffer,
size_t secretSize,
const void* customSeed,
size_t customSeedSize);
1199 XXH3_64bits_withSecretandSeed(
const void*
data,
size_t len,
1200 const void* secret,
size_t secretSize,
1204 XXH3_128bits_withSecretandSeed(
const void*
data,
size_t len,
1205 const void* secret,
size_t secretSize,
1209 XXH3_64bits_reset_withSecretandSeed(
XXH3_state_t* statePtr,
1210 const void* secret,
size_t secretSize,
1214 XXH3_128bits_reset_withSecretandSeed(
XXH3_state_t* statePtr,
1215 const void* secret,
size_t secretSize,
1220 #if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
1221 # define XXH_IMPLEMENTATION
1254 #if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \
1255 || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387)
1256 # define XXH_IMPLEM_13a8737387
1274 # define XXH_NO_LONG_LONG
1275 # undef XXH_NO_LONG_LONG
1326 # define XXH_FORCE_MEMORY_ACCESS 0
1354 # define XXH_FORCE_ALIGN_CHECK 0
1376 # define XXH_NO_INLINE_HINTS 0
1388 # define XXH32_ENDJMP 0
1397 # define XXH_OLD_NAMES
1398 # undef XXH_OLD_NAMES
1404 #ifndef XXH_FORCE_MEMORY_ACCESS
1406 # if !defined(__clang__) && \
1408 (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \
1410 defined(__GNUC__) && ( \
1411 (defined(__ARM_ARCH) && __ARM_ARCH >= 7) || \
1413 defined(__mips__) && \
1414 (__mips <= 5 || __mips_isa_rev < 6) && \
1415 (!defined(__mips16) || defined(__mips_mips16e2)) \
1420 # define XXH_FORCE_MEMORY_ACCESS 1
1424 #ifndef XXH_FORCE_ALIGN_CHECK
1425 # if defined(__i386) || defined(__x86_64__) || defined(__aarch64__) \
1426 || defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64)
1427 # define XXH_FORCE_ALIGN_CHECK 0
1429 # define XXH_FORCE_ALIGN_CHECK 1
1433 #ifndef XXH_NO_INLINE_HINTS
1434 # if defined(__OPTIMIZE_SIZE__) \
1435 || defined(__NO_INLINE__)
1436 # define XXH_NO_INLINE_HINTS 1
1438 # define XXH_NO_INLINE_HINTS 0
1442 #ifndef XXH32_ENDJMP
1444 # define XXH32_ENDJMP 0
1466 static void* XXH_malloc(
size_t s) {
return malloc(s); }
1472 static void XXH_free(
void*
p) { free(
p); }
1480 static void* XXH_memcpy(
void*
dest,
const void* src,
size_t size)
1492 # pragma warning(disable : 4127)
1495 #if XXH_NO_INLINE_HINTS
1496 # if defined(__GNUC__) || defined(__clang__)
1497 # define XXH_FORCE_INLINE static __attribute__((unused))
1499 # define XXH_FORCE_INLINE static
1501 # define XXH_NO_INLINE static
1503 #elif defined(__GNUC__) || defined(__clang__)
1504 # define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused))
1505 # define XXH_NO_INLINE static __attribute__((noinline))
1506 #elif defined(_MSC_VER)
1507 # define XXH_FORCE_INLINE static __forceinline
1508 # define XXH_NO_INLINE static __declspec(noinline)
1509 #elif defined (__cplusplus) \
1510 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L))
1511 # define XXH_FORCE_INLINE static inline
1512 # define XXH_NO_INLINE static
1514 # define XXH_FORCE_INLINE static
1515 # define XXH_NO_INLINE static
1531 #ifndef XXH_DEBUGLEVEL
1533 # define XXH_DEBUGLEVEL DEBUGLEVEL
1535 # define XXH_DEBUGLEVEL 0
1539 #if (XXH_DEBUGLEVEL>=1)
1540 # include <assert.h>
1541 # define XXH_ASSERT(c) assert(c)
1543 # define XXH_ASSERT(c) ((void)0)
1547 #ifndef XXH_STATIC_ASSERT
1548 # if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
1549 # include <assert.h>
1550 # define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0)
1551 # elif defined(__cplusplus) && (__cplusplus >= 201103L)
1552 # define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0)
1554 # define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { struct xxh_sa { char x[(c) ? 1 : -1]; }; } while(0)
1556 # define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c),#c)
1575 #if defined(__GNUC__) || defined(__clang__)
1576 # define XXH_COMPILER_GUARD(var) __asm__ __volatile__("" : "+r" (var))
1578 # define XXH_COMPILER_GUARD(var) ((void)0)
1584 #if !defined (__VMS) \
1585 && (defined (__cplusplus) \
1586 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) ) )
1590 typedef unsigned char xxh_u8;
1592 typedef XXH32_hash_t xxh_u32;
1594 #ifdef XXH_OLD_NAMES
1595 # define BYTE xxh_u8
1597 # define U32 xxh_u32
1652 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
1657 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
1663 static xxh_u32 XXH_read32(
const void* memPtr) {
return *(
const xxh_u32*) memPtr; }
1665 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
1673 #ifdef XXH_OLD_NAMES
1674 typedef union { xxh_u32 u32; }
__attribute__((packed)) unalign;
1676 static xxh_u32 XXH_read32(
const void*
ptr)
1678 typedef union { xxh_u32 u32; }
__attribute__((packed)) xxh_unalign;
1679 return ((
const xxh_unalign*)
ptr)->u32;
1688 static xxh_u32 XXH_read32(
const void* memPtr)
1691 XXH_memcpy(&val, memPtr,
sizeof(val));
1716 #ifndef XXH_CPU_LITTLE_ENDIAN
1721 # if defined(_WIN32) \
1722 || defined(__LITTLE_ENDIAN__) \
1723 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
1724 # define XXH_CPU_LITTLE_ENDIAN 1
1725 # elif defined(__BIG_ENDIAN__) \
1726 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
1727 # define XXH_CPU_LITTLE_ENDIAN 0
1735 static int XXH_isLittleEndian(
void)
1741 const union { xxh_u32
u; xxh_u8
c[4]; } one = { 1 };
1744 # define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian()
1754 #define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
1756 #ifdef __has_builtin
1757 # define XXH_HAS_BUILTIN(x) __has_builtin(x)
1759 # define XXH_HAS_BUILTIN(x) 0
1775 #if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \
1776 && XXH_HAS_BUILTIN(__builtin_rotateleft64)
1777 # define XXH_rotl32 __builtin_rotateleft32
1778 # define XXH_rotl64 __builtin_rotateleft64
1780 #elif defined(_MSC_VER)
1781 # define XXH_rotl32(x,r) _rotl(x,r)
1782 # define XXH_rotl64(x,r) _rotl64(x,r)
1784 # define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
1785 # define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r))))
1796 #if defined(_MSC_VER)
1797 # define XXH_swap32 _byteswap_ulong
1798 #elif XXH_GCC_VERSION >= 403
1799 # define XXH_swap32 __builtin_bswap32
1801 static xxh_u32 XXH_swap32 (xxh_u32
x)
1803 return ((
x << 24) & 0xff000000 ) |
1804 ((
x << 8) & 0x00ff0000 ) |
1805 ((
x >> 8) & 0x0000ff00 ) |
1806 ((
x >> 24) & 0x000000ff );
1829 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
1831 XXH_FORCE_INLINE xxh_u32 XXH_readLE32(
const void* memPtr)
1833 const xxh_u8* bytePtr = (
const xxh_u8 *)memPtr;
1835 | ((xxh_u32)bytePtr[1] << 8)
1836 | ((xxh_u32)bytePtr[2] << 16)
1837 | ((xxh_u32)bytePtr[3] << 24);
1840 XXH_FORCE_INLINE xxh_u32 XXH_readBE32(
const void* memPtr)
1842 const xxh_u8* bytePtr = (
const xxh_u8 *)memPtr;
1844 | ((xxh_u32)bytePtr[2] << 8)
1845 | ((xxh_u32)bytePtr[1] << 16)
1846 | ((xxh_u32)bytePtr[0] << 24);
1850 XXH_FORCE_INLINE xxh_u32 XXH_readLE32(
const void*
ptr)
1852 return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(
ptr) : XXH_swap32(XXH_read32(
ptr));
1855 static xxh_u32 XXH_readBE32(
const void*
ptr)
1857 return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(
ptr)) : XXH_read32(
ptr);
1861 XXH_FORCE_INLINE xxh_u32
1862 XXH_readLE32_align(
const void*
ptr, XXH_alignment
align)
1864 if (
align==XXH_unaligned) {
1865 return XXH_readLE32(
ptr);
1867 return XXH_CPU_LITTLE_ENDIAN ? *(
const xxh_u32*)
ptr : XXH_swap32(*(
const xxh_u32*)
ptr);
1889 #define XXH_PRIME32_1 0x9E3779B1U
1890 #define XXH_PRIME32_2 0x85EBCA77U
1891 #define XXH_PRIME32_3 0xC2B2AE3DU
1892 #define XXH_PRIME32_4 0x27D4EB2FU
1893 #define XXH_PRIME32_5 0x165667B1U
1895 #ifdef XXH_OLD_NAMES
1896 # define PRIME32_1 XXH_PRIME32_1
1897 # define PRIME32_2 XXH_PRIME32_2
1898 # define PRIME32_3 XXH_PRIME32_3
1899 # define PRIME32_4 XXH_PRIME32_4
1900 # define PRIME32_5 XXH_PRIME32_5
1914 static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32
input)
1916 acc +=
input * XXH_PRIME32_2;
1917 acc = XXH_rotl32(acc, 13);
1918 acc *= XXH_PRIME32_1;
1919 #if (defined(__SSE4_1__) || defined(__aarch64__)) && !defined(XXH_ENABLE_AUTOVECTORIZE)
1953 XXH_COMPILER_GUARD(acc);
1968 static xxh_u32 XXH32_avalanche(xxh_u32 h32)
1971 h32 *= XXH_PRIME32_2;
1973 h32 *= XXH_PRIME32_3;
1978 #define XXH_get32bits(p) XXH_readLE32_align(p, align)
1995 XXH32_finalize(xxh_u32 h32,
const xxh_u8*
ptr,
size_t len, XXH_alignment
align)
1997 #define XXH_PROCESS1 do { \
1998 h32 += (*ptr++) * XXH_PRIME32_5; \
1999 h32 = XXH_rotl32(h32, 11) * XXH_PRIME32_1; \
2002 #define XXH_PROCESS4 do { \
2003 h32 += XXH_get32bits(ptr) * XXH_PRIME32_3; \
2005 h32 = XXH_rotl32(h32, 17) * XXH_PRIME32_4; \
2008 if (
ptr==NULL) XXH_ASSERT(
len == 0);
2011 if (!XXH32_ENDJMP) {
2021 return XXH32_avalanche(h32);
2024 case 12: XXH_PROCESS4;
2026 case 8: XXH_PROCESS4;
2028 case 4: XXH_PROCESS4;
2029 return XXH32_avalanche(h32);
2031 case 13: XXH_PROCESS4;
2033 case 9: XXH_PROCESS4;
2035 case 5: XXH_PROCESS4;
2037 return XXH32_avalanche(h32);
2039 case 14: XXH_PROCESS4;
2041 case 10: XXH_PROCESS4;
2043 case 6: XXH_PROCESS4;
2046 return XXH32_avalanche(h32);
2048 case 15: XXH_PROCESS4;
2050 case 11: XXH_PROCESS4;
2052 case 7: XXH_PROCESS4;
2054 case 3: XXH_PROCESS1;
2056 case 2: XXH_PROCESS1;
2058 case 1: XXH_PROCESS1;
2060 case 0:
return XXH32_avalanche(h32);
2067 #ifdef XXH_OLD_NAMES
2068 # define PROCESS1 XXH_PROCESS1
2069 # define PROCESS4 XXH_PROCESS4
2071 # undef XXH_PROCESS1
2072 # undef XXH_PROCESS4
2083 XXH_FORCE_INLINE xxh_u32
2084 XXH32_endian_align(
const xxh_u8*
input,
size_t len, xxh_u32
seed, XXH_alignment
align)
2088 if (
input==NULL) XXH_ASSERT(
len == 0);
2091 const xxh_u8*
const bEnd =
input +
len;
2092 const xxh_u8*
const limit = bEnd - 15;
2093 xxh_u32 v1 =
seed + XXH_PRIME32_1 + XXH_PRIME32_2;
2094 xxh_u32 v2 =
seed + XXH_PRIME32_2;
2095 xxh_u32 v3 =
seed + 0;
2096 xxh_u32 v4 =
seed - XXH_PRIME32_1;
2099 v1 = XXH32_round(v1, XXH_get32bits(
input));
input += 4;
2100 v2 = XXH32_round(v2, XXH_get32bits(
input));
input += 4;
2101 v3 = XXH32_round(v3, XXH_get32bits(
input));
input += 4;
2102 v4 = XXH32_round(v4, XXH_get32bits(
input));
input += 4;
2103 }
while (
input < limit);
2105 h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7)
2106 + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
2108 h32 =
seed + XXH_PRIME32_5;
2111 h32 += (xxh_u32)
len;
2126 if (XXH_FORCE_ALIGN_CHECK) {
2127 if ((((
size_t)
input) & 3) == 0) {
2128 return XXH32_endian_align((
const xxh_u8*)
input,
len,
seed, XXH_aligned);
2131 return XXH32_endian_align((
const xxh_u8*)
input,
len,
seed, XXH_unaligned);
2155 XXH_memcpy(dstState, srcState,
sizeof(*dstState));
2163 state.v[0] =
seed + XXH_PRIME32_1 + XXH_PRIME32_2;
2178 XXH_ASSERT(
len == 0);
2182 {
const xxh_u8*
p = (
const xxh_u8*)
input;
2183 const xxh_u8*
const bEnd =
p +
len;
2185 state->total_len_32 += (XXH32_hash_t)
len;
2186 state->large_len |= (XXH32_hash_t)((
len>=16) | (
state->total_len_32>=16));
2190 state->memsize += (XXH32_hash_t)
len;
2194 if (
state->memsize) {
2196 {
const xxh_u32* p32 =
state->mem32;
2197 state->v[0] = XXH32_round(
state->v[0], XXH_readLE32(p32)); p32++;
2198 state->v[1] = XXH32_round(
state->v[1], XXH_readLE32(p32)); p32++;
2199 state->v[2] = XXH32_round(
state->v[2], XXH_readLE32(p32)); p32++;
2200 state->v[3] = XXH32_round(
state->v[3], XXH_readLE32(p32));
2207 const xxh_u8*
const limit = bEnd - 16;
2210 state->v[0] = XXH32_round(
state->v[0], XXH_readLE32(
p));
p+=4;
2211 state->v[1] = XXH32_round(
state->v[1], XXH_readLE32(
p));
p+=4;
2212 state->v[2] = XXH32_round(
state->v[2], XXH_readLE32(
p));
p+=4;
2213 state->v[3] = XXH32_round(
state->v[3], XXH_readLE32(
p));
p+=4;
2219 XXH_memcpy(
state->mem32,
p, (
size_t)(bEnd-
p));
2220 state->memsize = (unsigned)(bEnd-
p);
2233 if (
state->large_len) {
2234 h32 = XXH_rotl32(
state->v[0], 1)
2235 + XXH_rotl32(
state->v[1], 7)
2236 + XXH_rotl32(
state->v[2], 12)
2237 + XXH_rotl32(
state->v[3], 18);
2239 h32 =
state->v[2] + XXH_PRIME32_5;
2242 h32 +=
state->total_len_32;
2244 return XXH32_finalize(h32, (
const xxh_u8*)
state->mem32,
state->memsize, XXH_aligned);
2267 if (XXH_CPU_LITTLE_ENDIAN)
hash = XXH_swap32(
hash);
2273 return XXH_readBE32(src);
2277 #ifndef XXH_NO_LONG_LONG
2291 #ifdef XXH_OLD_NAMES
2292 # define U64 xxh_u64
2295 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
2300 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
2303 static xxh_u64 XXH_read64(
const void* memPtr)
2305 return *(
const xxh_u64*) memPtr;
2308 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
2316 #ifdef XXH_OLD_NAMES
2317 typedef union { xxh_u32 u32; xxh_u64 u64; }
__attribute__((packed)) unalign64;
2319 static xxh_u64 XXH_read64(
const void*
ptr)
2321 typedef union { xxh_u32 u32; xxh_u64 u64; }
__attribute__((packed)) xxh_unalign64;
2322 return ((
const xxh_unalign64*)
ptr)->u64;
2331 static xxh_u64 XXH_read64(
const void* memPtr)
2334 XXH_memcpy(&val, memPtr,
sizeof(val));
2340 #if defined(_MSC_VER)
2341 # define XXH_swap64 _byteswap_uint64
2342 #elif XXH_GCC_VERSION >= 403
2343 # define XXH_swap64 __builtin_bswap64
2345 static xxh_u64 XXH_swap64(xxh_u64
x)
2347 return ((
x << 56) & 0xff00000000000000ULL) |
2348 ((
x << 40) & 0x00ff000000000000ULL) |
2349 ((
x << 24) & 0x0000ff0000000000ULL) |
2350 ((
x << 8) & 0x000000ff00000000ULL) |
2351 ((
x >> 8) & 0x00000000ff000000ULL) |
2352 ((
x >> 24) & 0x0000000000ff0000ULL) |
2353 ((
x >> 40) & 0x000000000000ff00ULL) |
2354 ((
x >> 56) & 0x00000000000000ffULL);
2360 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
2362 XXH_FORCE_INLINE xxh_u64 XXH_readLE64(
const void* memPtr)
2364 const xxh_u8* bytePtr = (
const xxh_u8 *)memPtr;
2366 | ((xxh_u64)bytePtr[1] << 8)
2367 | ((xxh_u64)bytePtr[2] << 16)
2368 | ((xxh_u64)bytePtr[3] << 24)
2369 | ((xxh_u64)bytePtr[4] << 32)
2370 | ((xxh_u64)bytePtr[5] << 40)
2371 | ((xxh_u64)bytePtr[6] << 48)
2372 | ((xxh_u64)bytePtr[7] << 56);
2375 XXH_FORCE_INLINE xxh_u64 XXH_readBE64(
const void* memPtr)
2377 const xxh_u8* bytePtr = (
const xxh_u8 *)memPtr;
2379 | ((xxh_u64)bytePtr[6] << 8)
2380 | ((xxh_u64)bytePtr[5] << 16)
2381 | ((xxh_u64)bytePtr[4] << 24)
2382 | ((xxh_u64)bytePtr[3] << 32)
2383 | ((xxh_u64)bytePtr[2] << 40)
2384 | ((xxh_u64)bytePtr[1] << 48)
2385 | ((xxh_u64)bytePtr[0] << 56);
2389 XXH_FORCE_INLINE xxh_u64 XXH_readLE64(
const void*
ptr)
2391 return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(
ptr) : XXH_swap64(XXH_read64(
ptr));
2394 static xxh_u64 XXH_readBE64(
const void*
ptr)
2396 return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(
ptr)) : XXH_read64(
ptr);
2400 XXH_FORCE_INLINE xxh_u64
2401 XXH_readLE64_align(
const void*
ptr, XXH_alignment
align)
2403 if (
align==XXH_unaligned)
2404 return XXH_readLE64(
ptr);
2406 return XXH_CPU_LITTLE_ENDIAN ? *(
const xxh_u64*)
ptr : XXH_swap64(*(
const xxh_u64*)
ptr);
2418 #define XXH_PRIME64_1 0x9E3779B185EBCA87ULL
2419 #define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL
2420 #define XXH_PRIME64_3 0x165667B19E3779F9ULL
2421 #define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL
2422 #define XXH_PRIME64_5 0x27D4EB2F165667C5ULL
2424 #ifdef XXH_OLD_NAMES
2425 # define PRIME64_1 XXH_PRIME64_1
2426 # define PRIME64_2 XXH_PRIME64_2
2427 # define PRIME64_3 XXH_PRIME64_3
2428 # define PRIME64_4 XXH_PRIME64_4
2429 # define PRIME64_5 XXH_PRIME64_5
2432 static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64
input)
2434 acc +=
input * XXH_PRIME64_2;
2435 acc = XXH_rotl64(acc, 31);
2436 acc *= XXH_PRIME64_1;
2440 static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val)
2442 val = XXH64_round(0, val);
2444 acc = acc * XXH_PRIME64_1 + XXH_PRIME64_4;
2448 static xxh_u64 XXH64_avalanche(xxh_u64 h64)
2451 h64 *= XXH_PRIME64_2;
2453 h64 *= XXH_PRIME64_3;
2459 #define XXH_get64bits(p) XXH_readLE64_align(p, align)
2462 XXH64_finalize(xxh_u64 h64,
const xxh_u8*
ptr,
size_t len, XXH_alignment
align)
2464 if (
ptr==NULL) XXH_ASSERT(
len == 0);
2467 xxh_u64
const k1 = XXH64_round(0, XXH_get64bits(
ptr));
2470 h64 = XXH_rotl64(h64,27) * XXH_PRIME64_1 + XXH_PRIME64_4;
2474 h64 ^= (xxh_u64)(XXH_get32bits(
ptr)) * XXH_PRIME64_1;
2476 h64 = XXH_rotl64(h64, 23) * XXH_PRIME64_2 + XXH_PRIME64_3;
2480 h64 ^= (*
ptr++) * XXH_PRIME64_5;
2481 h64 = XXH_rotl64(h64, 11) * XXH_PRIME64_1;
2484 return XXH64_avalanche(h64);
2487 #ifdef XXH_OLD_NAMES
2488 # define PROCESS1_64 XXH_PROCESS1_64
2489 # define PROCESS4_64 XXH_PROCESS4_64
2490 # define PROCESS8_64 XXH_PROCESS8_64
2492 # undef XXH_PROCESS1_64
2493 # undef XXH_PROCESS4_64
2494 # undef XXH_PROCESS8_64
2497 XXH_FORCE_INLINE xxh_u64
2498 XXH64_endian_align(
const xxh_u8*
input,
size_t len, xxh_u64
seed, XXH_alignment
align)
2501 if (
input==NULL) XXH_ASSERT(
len == 0);
2504 const xxh_u8*
const bEnd =
input +
len;
2505 const xxh_u8*
const limit = bEnd - 31;
2506 xxh_u64 v1 =
seed + XXH_PRIME64_1 + XXH_PRIME64_2;
2507 xxh_u64 v2 =
seed + XXH_PRIME64_2;
2508 xxh_u64 v3 =
seed + 0;
2509 xxh_u64 v4 =
seed - XXH_PRIME64_1;
2512 v1 = XXH64_round(v1, XXH_get64bits(
input));
input+=8;
2513 v2 = XXH64_round(v2, XXH_get64bits(
input));
input+=8;
2514 v3 = XXH64_round(v3, XXH_get64bits(
input));
input+=8;
2515 v4 = XXH64_round(v4, XXH_get64bits(
input));
input+=8;
2516 }
while (
input<limit);
2518 h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
2519 h64 = XXH64_mergeRound(h64, v1);
2520 h64 = XXH64_mergeRound(h64, v2);
2521 h64 = XXH64_mergeRound(h64, v3);
2522 h64 = XXH64_mergeRound(h64, v4);
2525 h64 =
seed + XXH_PRIME64_5;
2528 h64 += (xxh_u64)
len;
2544 if (XXH_FORCE_ALIGN_CHECK) {
2545 if ((((
size_t)
input) & 7)==0) {
2546 return XXH64_endian_align((
const xxh_u8*)
input,
len,
seed, XXH_aligned);
2549 return XXH64_endian_align((
const xxh_u8*)
input,
len,
seed, XXH_unaligned);
2571 XXH_memcpy(dstState, srcState,
sizeof(*dstState));
2579 state.v[0] =
seed + XXH_PRIME64_1 + XXH_PRIME64_2;
2584 XXH_memcpy(statePtr, &
state,
sizeof(
state) -
sizeof(
state.reserved64));
2593 XXH_ASSERT(
len == 0);
2597 {
const xxh_u8*
p = (
const xxh_u8*)
input;
2598 const xxh_u8*
const bEnd =
p +
len;
2608 if (
state->memsize) {
2614 p += 32 -
state->memsize;
2619 const xxh_u8*
const limit = bEnd - 32;
2622 state->v[0] = XXH64_round(
state->v[0], XXH_readLE64(
p));
p+=8;
2623 state->v[1] = XXH64_round(
state->v[1], XXH_readLE64(
p));
p+=8;
2624 state->v[2] = XXH64_round(
state->v[2], XXH_readLE64(
p));
p+=8;
2625 state->v[3] = XXH64_round(
state->v[3], XXH_readLE64(
p));
p+=8;
2631 XXH_memcpy(
state->mem64,
p, (
size_t)(bEnd-
p));
2632 state->memsize = (unsigned)(bEnd-
p);
2645 if (
state->total_len >= 32) {
2646 h64 = XXH_rotl64(
state->v[0], 1) + XXH_rotl64(
state->v[1], 7) + XXH_rotl64(
state->v[2], 12) + XXH_rotl64(
state->v[3], 18);
2647 h64 = XXH64_mergeRound(h64,
state->v[0]);
2648 h64 = XXH64_mergeRound(h64,
state->v[1]);
2649 h64 = XXH64_mergeRound(h64,
state->v[2]);
2650 h64 = XXH64_mergeRound(h64,
state->v[3]);
2652 h64 =
state->v[2] + XXH_PRIME64_5;
2655 h64 += (xxh_u64)
state->total_len;
2657 return XXH64_finalize(h64, (
const xxh_u8*)
state->mem64, (size_t)
state->total_len, XXH_aligned);
2667 if (XXH_CPU_LITTLE_ENDIAN)
hash = XXH_swap64(
hash);
2674 return XXH_readBE64(src);
2692 #if ((defined(sun) || defined(__sun)) && __cplusplus)
2693 # define XXH_RESTRICT
2694 #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
2695 # define XXH_RESTRICT restrict
2698 # define XXH_RESTRICT
2701 #if (defined(__GNUC__) && (__GNUC__ >= 3)) \
2702 || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) \
2703 || defined(__clang__)
2704 # define XXH_likely(x) __builtin_expect(x, 1)
2705 # define XXH_unlikely(x) __builtin_expect(x, 0)
2707 # define XXH_likely(x) (x)
2708 # define XXH_unlikely(x) (x)
2711 #if defined(__GNUC__)
2712 # if defined(__AVX2__)
2713 # include <immintrin.h>
2714 # elif defined(__SSE2__)
2715 # include <emmintrin.h>
2716 # elif defined(__ARM_NEON__) || defined(__ARM_NEON)
2717 # define inline __inline__
2718 # include <arm_neon.h>
2721 #elif defined(_MSC_VER)
2794 #if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM)
2795 # warning "XXH3 is highly inefficient without ARM or Thumb-2."
2813 # define XXH_VECTOR XXH_SCALAR
2823 enum XXH_VECTOR_TYPE {
2845 # define XXH_ACC_ALIGN 8
2850 # define XXH_SCALAR 0
2853 # define XXH_AVX512 3
2859 # if defined(__AVX512F__)
2860 # define XXH_VECTOR XXH_AVX512
2861 # elif defined(__AVX2__)
2862 # define XXH_VECTOR XXH_AVX2
2863 # elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2))
2864 # define XXH_VECTOR XXH_SSE2
2866 defined(__ARM_NEON__) || defined(__ARM_NEON) \
2867 || defined(_M_ARM64) || defined(_M_ARM_ARMV7VE) \
2869 defined(_WIN32) || defined(__LITTLE_ENDIAN__) \
2870 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \
2872 # define XXH_VECTOR XXH_NEON
2873 # elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \
2874 || (defined(__s390x__) && defined(__VEC__)) \
2875 && defined(__GNUC__)
2876 # define XXH_VECTOR XXH_VSX
2878 # define XXH_VECTOR XXH_SCALAR
2886 #ifndef XXH_ACC_ALIGN
2887 # if defined(XXH_X86DISPATCH)
2888 # define XXH_ACC_ALIGN 64
2889 # elif XXH_VECTOR == XXH_SCALAR
2890 # define XXH_ACC_ALIGN 8
2891 # elif XXH_VECTOR == XXH_SSE2
2892 # define XXH_ACC_ALIGN 16
2893 # elif XXH_VECTOR == XXH_AVX2
2894 # define XXH_ACC_ALIGN 32
2895 # elif XXH_VECTOR == XXH_NEON
2896 # define XXH_ACC_ALIGN 16
2897 # elif XXH_VECTOR == XXH_VSX
2898 # define XXH_ACC_ALIGN 16
2899 # elif XXH_VECTOR == XXH_AVX512
2900 # define XXH_ACC_ALIGN 64
2904 #if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \
2905 || XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512
2906 # define XXH_SEC_ALIGN XXH_ACC_ALIGN
2908 # define XXH_SEC_ALIGN 8
2932 #if XXH_VECTOR == XXH_AVX2 \
2933 && defined(__GNUC__) && !defined(__clang__) \
2934 && defined(__OPTIMIZE__) && !defined(__OPTIMIZE_SIZE__)
2935 # pragma GCC push_options
2936 # pragma GCC optimize("-O2")
2940 #if XXH_VECTOR == XXH_NEON
3021 # if !defined(XXH_NO_VZIP_HACK) \
3022 && defined(__GNUC__) \
3023 && !defined(__aarch64__) && !defined(__arm64__) && !defined(_M_ARM64)
3024 # define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \
3029 __asm__("vzip.32 %e0, %f0" : "+w" (in)); \
3030 (outLo) = vget_low_u32 (vreinterpretq_u32_u64(in)); \
3031 (outHi) = vget_high_u32(vreinterpretq_u32_u64(in)); \
3034 # define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \
3036 (outLo) = vmovn_u64 (in); \
3037 (outHi) = vshrn_n_u64 ((in), 32); \
3050 #if XXH_VECTOR == XXH_VSX
3051 # if defined(__s390x__)
3052 # include <s390intrin.h>
3062 # if defined(__GNUC__) && !defined(__APPLE_ALTIVEC__)
3063 # define __APPLE_ALTIVEC__
3065 # include <altivec.h>
3068 typedef __vector
unsigned long long xxh_u64x2;
3069 typedef __vector
unsigned char xxh_u8x16;
3070 typedef __vector
unsigned xxh_u32x4;
3073 # if defined(__BIG_ENDIAN__) \
3074 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
3075 # define XXH_VSX_BE 1
3076 # elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__
3077 # warning "-maltivec=be is not recommended. Please use native endianness."
3078 # define XXH_VSX_BE 1
3080 # define XXH_VSX_BE 0
3085 # if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__))
3086 # define XXH_vec_revb vec_revb
3091 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val)
3093 xxh_u8x16
const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
3094 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 };
3095 return vec_perm(val, val, vByteSwap);
3103 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(
const void *
ptr)
3106 XXH_memcpy(&
ret,
ptr,
sizeof(xxh_u64x2));
3119 # if defined(__s390x__)
3121 # define XXH_vec_mulo vec_mulo
3122 # define XXH_vec_mule vec_mule
3123 # elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw)
3125 # define XXH_vec_mulo __builtin_altivec_vmulouw
3126 # define XXH_vec_mule __builtin_altivec_vmuleuw
3130 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4
a, xxh_u32x4
b)
3133 __asm__(
"vmulouw %0, %1, %2" :
"=v" (
result) :
"v" (
a),
"v" (
b));
3136 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4
a, xxh_u32x4
b)
3139 __asm__(
"vmuleuw %0, %1, %2" :
"=v" (
result) :
"v" (
a),
"v" (
b));
3148 #if defined(XXH_NO_PREFETCH)
3149 # define XXH_PREFETCH(ptr) (void)(ptr)
3151 # if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86))
3152 # include <mmintrin.h>
3153 # define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
3154 # elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
3155 # define XXH_PREFETCH(ptr) __builtin_prefetch((ptr), 0 , 3 )
3157 # define XXH_PREFETCH(ptr) (void)(ptr)
3166 #define XXH_SECRET_DEFAULT_SIZE 192
3168 #if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN)
3169 # error "default keyset is not large enough"
3173 XXH_ALIGN(64) static
const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = {
3174 0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c,
3175 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f,
3176 0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21,
3177 0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c,
3178 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3,
3179 0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8,
3180 0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d,
3181 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64,
3182 0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb,
3183 0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e,
3184 0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce,
3185 0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e,
3189 #ifdef XXH_OLD_NAMES
3190 # define kSecret XXH3_kSecret
3210 XXH_FORCE_INLINE xxh_u64
3211 XXH_mult32to64(xxh_u64
x, xxh_u64
y)
3213 return (
x & 0xFFFFFFFF) * (
y & 0xFFFFFFFF);
3215 #elif defined(_MSC_VER) && defined(_M_IX86)
3217 # define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y))
3226 # define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y))
3239 XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs)
3256 #if defined(__GNUC__) && !defined(__wasm__) \
3257 && defined(__SIZEOF_INT128__) \
3258 || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
3260 __uint128_t
const product = (__uint128_t)lhs * (__uint128_t)rhs;
3262 r128.
low64 = (xxh_u64)(product);
3263 r128.
high64 = (xxh_u64)(product >> 64);
3273 #elif defined(_M_X64) || defined(_M_IA64)
3276 # pragma intrinsic(_umul128)
3278 xxh_u64 product_high;
3279 xxh_u64
const product_low = _umul128(lhs, rhs, &product_high);
3281 r128.
low64 = product_low;
3282 r128.
high64 = product_high;
3290 #elif defined(_M_ARM64)
3293 # pragma intrinsic(__umulh)
3296 r128.
low64 = lhs * rhs;
3297 r128.
high64 = __umulh(lhs, rhs);
3345 xxh_u64
const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF);
3346 xxh_u64
const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF);
3347 xxh_u64
const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32);
3348 xxh_u64
const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32);
3351 xxh_u64
const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi;
3352 xxh_u64
const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi;
3353 xxh_u64
const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF);
3373 XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs)
3380 XXH_FORCE_INLINE xxh_u64 XXH_xorshift64(xxh_u64 v64,
int shift)
3382 XXH_ASSERT(0 <= shift && shift < 64);
3383 return v64 ^ (v64 >> shift);
3392 h64 = XXH_xorshift64(h64, 37);
3393 h64 *= 0x165667919E3779F9
ULL;
3394 h64 = XXH_xorshift64(h64, 32);
3406 h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24);
3407 h64 *= 0x9FB21C651E98DF25
ULL;
3408 h64 ^= (h64 >> 35) +
len ;
3409 h64 *= 0x9FB21C651E98DF25
ULL;
3410 return XXH_xorshift64(h64, 28);
3450 XXH_ASSERT(
input != NULL);
3451 XXH_ASSERT(1 <=
len &&
len <= 3);
3452 XXH_ASSERT(secret != NULL);
3461 xxh_u32
const combined = ((xxh_u32)
c1 << 16) | ((xxh_u32)
c2 << 24)
3462 | ((xxh_u32)c3 << 0) | ((xxh_u32)
len << 8);
3463 xxh_u64
const bitflip = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) +
seed;
3464 xxh_u64
const keyed = (xxh_u64)combined ^ bitflip;
3465 return XXH64_avalanche(keyed);
3472 XXH_ASSERT(
input != NULL);
3473 XXH_ASSERT(secret != NULL);
3474 XXH_ASSERT(4 <=
len &&
len <= 8);
3475 seed ^= (xxh_u64)XXH_swap32((xxh_u32)
seed) << 32;
3476 { xxh_u32
const input1 = XXH_readLE32(
input);
3477 xxh_u32
const input2 = XXH_readLE32(
input +
len - 4);
3478 xxh_u64
const bitflip = (XXH_readLE64(secret+8) ^ XXH_readLE64(secret+16)) -
seed;
3479 xxh_u64
const input64 = input2 + (((xxh_u64)input1) << 32);
3480 xxh_u64
const keyed = input64 ^ bitflip;
3481 return XXH3_rrmxmx(keyed,
len);
3488 XXH_ASSERT(
input != NULL);
3489 XXH_ASSERT(secret != NULL);
3490 XXH_ASSERT(9 <=
len &&
len <= 16);
3491 { xxh_u64
const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) +
seed;
3492 xxh_u64
const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) -
seed;
3493 xxh_u64
const input_lo = XXH_readLE64(
input) ^ bitflip1;
3494 xxh_u64
const input_hi = XXH_readLE64(
input +
len - 8) ^ bitflip2;
3495 xxh_u64
const acc =
len
3496 + XXH_swap64(input_lo) + input_hi
3497 + XXH3_mul128_fold64(input_lo, input_hi);
3498 return XXH3_avalanche(acc);
3505 XXH_ASSERT(
len <= 16);
3506 {
if (XXH_likely(
len > 8))
return XXH3_len_9to16_64b(
input,
len, secret,
seed);
3507 if (XXH_likely(
len >= 4))
return XXH3_len_4to8_64b(
input,
len, secret,
seed);
3509 return XXH64_avalanche(
seed ^ (XXH_readLE64(secret+56) ^ XXH_readLE64(secret+64)));
3539 XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(
const xxh_u8* XXH_RESTRICT
input,
3540 const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64)
3542 #if defined(__GNUC__) && !defined(__clang__) \
3543 && defined(__i386__) && defined(__SSE2__) \
3544 && !defined(XXH_ENABLE_AUTOVECTORIZE)
3560 XXH_COMPILER_GUARD(seed64);
3562 { xxh_u64
const input_lo = XXH_readLE64(
input);
3563 xxh_u64
const input_hi = XXH_readLE64(
input+8);
3564 return XXH3_mul128_fold64(
3565 input_lo ^ (XXH_readLE64(secret) + seed64),
3566 input_hi ^ (XXH_readLE64(secret+8) - seed64)
3573 XXH3_len_17to128_64b(
const xxh_u8* XXH_RESTRICT
input,
size_t len,
3574 const xxh_u8* XXH_RESTRICT secret,
size_t secretSize,
3578 XXH_ASSERT(16 <
len &&
len <= 128);
3580 { xxh_u64 acc =
len * XXH_PRIME64_1;
3584 acc += XXH3_mix16B(
input+48, secret+96,
seed);
3587 acc += XXH3_mix16B(
input+32, secret+64,
seed);
3590 acc += XXH3_mix16B(
input+16, secret+32,
seed);
3593 acc += XXH3_mix16B(
input+0, secret+0,
seed);
3596 return XXH3_avalanche(acc);
3600 #define XXH3_MIDSIZE_MAX 240
3603 XXH3_len_129to240_64b(
const xxh_u8* XXH_RESTRICT
input,
size_t len,
3604 const xxh_u8* XXH_RESTRICT secret,
size_t secretSize,
3608 XXH_ASSERT(128 <
len &&
len <= XXH3_MIDSIZE_MAX);
3610 #define XXH3_MIDSIZE_STARTOFFSET 3
3611 #define XXH3_MIDSIZE_LASTOFFSET 17
3613 { xxh_u64 acc =
len * XXH_PRIME64_1;
3614 int const nbRounds = (
int)
len / 16;
3616 for (
i=0;
i<8;
i++) {
3617 acc += XXH3_mix16B(
input+(16*
i), secret+(16*
i),
seed);
3619 acc = XXH3_avalanche(acc);
3620 XXH_ASSERT(nbRounds >= 8);
3621 #if defined(__clang__) \
3622 && (defined(__ARM_NEON) || defined(__ARM_NEON__)) \
3623 && !defined(XXH_ENABLE_AUTOVECTORIZE)
3644 #pragma clang loop vectorize(disable)
3646 for (
i=8 ;
i < nbRounds;
i++) {
3647 acc += XXH3_mix16B(
input+(16*
i), secret+(16*(
i-8)) + XXH3_MIDSIZE_STARTOFFSET,
seed);
3651 return XXH3_avalanche(acc);
3658 #define XXH_STRIPE_LEN 64
3659 #define XXH_SECRET_CONSUME_RATE 8
3660 #define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64))
3662 #ifdef XXH_OLD_NAMES
3663 # define STRIPE_LEN XXH_STRIPE_LEN
3664 # define ACC_NB XXH_ACC_NB
3667 XXH_FORCE_INLINE
void XXH_writeLE64(
void*
dst, xxh_u64 v64)
3669 if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64);
3670 XXH_memcpy(
dst, &v64,
sizeof(v64));
3678 #if !defined (__VMS) \
3679 && (defined (__cplusplus) \
3680 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) ) )
3684 typedef long long xxh_i64;
3710 #if (XXH_VECTOR == XXH_AVX512) \
3711 || (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0)
3713 #ifndef XXH_TARGET_AVX512
3714 # define XXH_TARGET_AVX512
3717 XXH_FORCE_INLINE XXH_TARGET_AVX512
void
3718 XXH3_accumulate_512_avx512(
void* XXH_RESTRICT acc,
3719 const void* XXH_RESTRICT
input,
3720 const void* XXH_RESTRICT secret)
3722 __m512i*
const xacc = (__m512i *) acc;
3723 XXH_ASSERT((((
size_t)acc) & 63) == 0);
3724 XXH_STATIC_ASSERT(XXH_STRIPE_LEN ==
sizeof(__m512i));
3728 __m512i
const data_vec = _mm512_loadu_si512 (
input);
3730 __m512i
const key_vec = _mm512_loadu_si512 (secret);
3732 __m512i
const data_key = _mm512_xor_si512 (data_vec, key_vec);
3734 __m512i
const data_key_lo = _mm512_shuffle_epi32 (data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1));
3736 __m512i
const product = _mm512_mul_epu32 (data_key, data_key_lo);
3738 __m512i
const data_swap = _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2));
3739 __m512i
const sum = _mm512_add_epi64(*xacc, data_swap);
3741 *xacc = _mm512_add_epi64(product,
sum);
3766 XXH_FORCE_INLINE XXH_TARGET_AVX512
void
3767 XXH3_scrambleAcc_avx512(
void* XXH_RESTRICT acc,
const void* XXH_RESTRICT secret)
3769 XXH_ASSERT((((
size_t)acc) & 63) == 0);
3770 XXH_STATIC_ASSERT(XXH_STRIPE_LEN ==
sizeof(__m512i));
3771 { __m512i*
const xacc = (__m512i*) acc;
3772 const __m512i prime32 = _mm512_set1_epi32((
int)XXH_PRIME32_1);
3775 __m512i
const acc_vec = *xacc;
3776 __m512i
const shifted = _mm512_srli_epi64 (acc_vec, 47);
3777 __m512i
const data_vec = _mm512_xor_si512 (acc_vec, shifted);
3779 __m512i
const key_vec = _mm512_loadu_si512 (secret);
3780 __m512i
const data_key = _mm512_xor_si512 (data_vec, key_vec);
3783 __m512i
const data_key_hi = _mm512_shuffle_epi32 (data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1));
3784 __m512i
const prod_lo = _mm512_mul_epu32 (data_key, prime32);
3785 __m512i
const prod_hi = _mm512_mul_epu32 (data_key_hi, prime32);
3786 *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32));
3790 XXH_FORCE_INLINE XXH_TARGET_AVX512
void
3791 XXH3_initCustomSecret_avx512(
void* XXH_RESTRICT customSecret, xxh_u64 seed64)
3793 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0);
3794 XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64);
3795 XXH_ASSERT(((
size_t)customSecret & 63) == 0);
3796 (void)(&XXH_writeLE64);
3797 {
int const nbRounds = XXH_SECRET_DEFAULT_SIZE /
sizeof(__m512i);
3798 __m512i
const seed = _mm512_mask_set1_epi64(_mm512_set1_epi64((xxh_i64)seed64), 0xAA, (xxh_i64)(0U - seed64));
3800 const __m512i*
const src = (
const __m512i*) ((
const void*) XXH3_kSecret);
3801 __m512i*
const dest = ( __m512i*) customSecret;
3803 XXH_ASSERT(((
size_t)src & 63) == 0);
3804 XXH_ASSERT(((
size_t)
dest & 63) == 0);
3805 for (
i=0;
i < nbRounds; ++
i) {
3811 } remote_const_void;
3812 remote_const_void.cp = src +
i;
3813 dest[
i] = _mm512_add_epi64(_mm512_stream_load_si512(remote_const_void.p),
seed);
3819 #if (XXH_VECTOR == XXH_AVX2) \
3820 || (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0)
3822 #ifndef XXH_TARGET_AVX2
3823 # define XXH_TARGET_AVX2
3826 XXH_FORCE_INLINE XXH_TARGET_AVX2
void
3827 XXH3_accumulate_512_avx2(
void* XXH_RESTRICT acc,
3828 const void* XXH_RESTRICT
input,
3829 const void* XXH_RESTRICT secret)
3831 XXH_ASSERT((((
size_t)acc) & 31) == 0);
3832 { __m256i*
const xacc = (__m256i *) acc;
3835 const __m256i*
const xinput = (
const __m256i *)
input;
3838 const __m256i*
const xsecret = (
const __m256i *) secret;
3841 for (
i=0;
i < XXH_STRIPE_LEN/
sizeof(__m256i);
i++) {
3843 __m256i
const data_vec = _mm256_loadu_si256 (xinput+
i);
3845 __m256i
const key_vec = _mm256_loadu_si256 (xsecret+
i);
3847 __m256i
const data_key = _mm256_xor_si256 (data_vec, key_vec);
3849 __m256i
const data_key_lo = _mm256_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
3851 __m256i
const product = _mm256_mul_epu32 (data_key, data_key_lo);
3853 __m256i
const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2));
3854 __m256i
const sum = _mm256_add_epi64(xacc[
i], data_swap);
3856 xacc[
i] = _mm256_add_epi64(product,
sum);
3860 XXH_FORCE_INLINE XXH_TARGET_AVX2
void
3861 XXH3_scrambleAcc_avx2(
void* XXH_RESTRICT acc,
const void* XXH_RESTRICT secret)
3863 XXH_ASSERT((((
size_t)acc) & 31) == 0);
3864 { __m256i*
const xacc = (__m256i*) acc;
3867 const __m256i*
const xsecret = (
const __m256i *) secret;
3868 const __m256i prime32 = _mm256_set1_epi32((
int)XXH_PRIME32_1);
3871 for (
i=0;
i < XXH_STRIPE_LEN/
sizeof(__m256i);
i++) {
3873 __m256i
const acc_vec = xacc[
i];
3874 __m256i
const shifted = _mm256_srli_epi64 (acc_vec, 47);
3875 __m256i
const data_vec = _mm256_xor_si256 (acc_vec, shifted);
3877 __m256i
const key_vec = _mm256_loadu_si256 (xsecret+
i);
3878 __m256i
const data_key = _mm256_xor_si256 (data_vec, key_vec);
3881 __m256i
const data_key_hi = _mm256_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
3882 __m256i
const prod_lo = _mm256_mul_epu32 (data_key, prime32);
3883 __m256i
const prod_hi = _mm256_mul_epu32 (data_key_hi, prime32);
3884 xacc[
i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32));
3889 XXH_FORCE_INLINE XXH_TARGET_AVX2
void XXH3_initCustomSecret_avx2(
void* XXH_RESTRICT customSecret, xxh_u64 seed64)
3891 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0);
3892 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE /
sizeof(__m256i)) == 6);
3893 XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64);
3894 (void)(&XXH_writeLE64);
3895 XXH_PREFETCH(customSecret);
3896 { __m256i
const seed = _mm256_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64, (xxh_i64)(0U - seed64), (xxh_i64)seed64);
3898 const __m256i*
const src = (
const __m256i*) ((
const void*) XXH3_kSecret);
3899 __m256i*
dest = ( __m256i*) customSecret;
3901 # if defined(__GNUC__) || defined(__clang__)
3907 XXH_COMPILER_GUARD(
dest);
3909 XXH_ASSERT(((
size_t)src & 31) == 0);
3910 XXH_ASSERT(((
size_t)
dest & 31) == 0);
3913 dest[0] = _mm256_add_epi64(_mm256_stream_load_si256(src+0),
seed);
3914 dest[1] = _mm256_add_epi64(_mm256_stream_load_si256(src+1),
seed);
3915 dest[2] = _mm256_add_epi64(_mm256_stream_load_si256(src+2),
seed);
3916 dest[3] = _mm256_add_epi64(_mm256_stream_load_si256(src+3),
seed);
3917 dest[4] = _mm256_add_epi64(_mm256_stream_load_si256(src+4),
seed);
3918 dest[5] = _mm256_add_epi64(_mm256_stream_load_si256(src+5),
seed);
3925 #if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH)
3927 #ifndef XXH_TARGET_SSE2
3928 # define XXH_TARGET_SSE2
3931 XXH_FORCE_INLINE XXH_TARGET_SSE2
void
3932 XXH3_accumulate_512_sse2(
void* XXH_RESTRICT acc,
3933 const void* XXH_RESTRICT
input,
3934 const void* XXH_RESTRICT secret)
3937 XXH_ASSERT((((
size_t)acc) & 15) == 0);
3938 { __m128i*
const xacc = (__m128i *) acc;
3941 const __m128i*
const xinput = (
const __m128i *)
input;
3944 const __m128i*
const xsecret = (
const __m128i *) secret;
3947 for (
i=0;
i < XXH_STRIPE_LEN/
sizeof(__m128i);
i++) {
3949 __m128i
const data_vec = _mm_loadu_si128 (xinput+
i);
3951 __m128i
const key_vec = _mm_loadu_si128 (xsecret+
i);
3953 __m128i
const data_key = _mm_xor_si128 (data_vec, key_vec);
3955 __m128i
const data_key_lo = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
3957 __m128i
const product = _mm_mul_epu32 (data_key, data_key_lo);
3959 __m128i
const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2));
3960 __m128i
const sum = _mm_add_epi64(xacc[
i], data_swap);
3962 xacc[
i] = _mm_add_epi64(product,
sum);
3966 XXH_FORCE_INLINE XXH_TARGET_SSE2
void
3967 XXH3_scrambleAcc_sse2(
void* XXH_RESTRICT acc,
const void* XXH_RESTRICT secret)
3969 XXH_ASSERT((((
size_t)acc) & 15) == 0);
3970 { __m128i*
const xacc = (__m128i*) acc;
3973 const __m128i*
const xsecret = (
const __m128i *) secret;
3974 const __m128i prime32 = _mm_set1_epi32((
int)XXH_PRIME32_1);
3977 for (
i=0;
i < XXH_STRIPE_LEN/
sizeof(__m128i);
i++) {
3979 __m128i
const acc_vec = xacc[
i];
3980 __m128i
const shifted = _mm_srli_epi64 (acc_vec, 47);
3981 __m128i
const data_vec = _mm_xor_si128 (acc_vec, shifted);
3983 __m128i
const key_vec = _mm_loadu_si128 (xsecret+
i);
3984 __m128i
const data_key = _mm_xor_si128 (data_vec, key_vec);
3987 __m128i
const data_key_hi = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
3988 __m128i
const prod_lo = _mm_mul_epu32 (data_key, prime32);
3989 __m128i
const prod_hi = _mm_mul_epu32 (data_key_hi, prime32);
3990 xacc[
i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32));
3995 XXH_FORCE_INLINE XXH_TARGET_SSE2
void XXH3_initCustomSecret_sse2(
void* XXH_RESTRICT customSecret, xxh_u64 seed64)
3997 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
3998 (void)(&XXH_writeLE64);
3999 {
int const nbRounds = XXH_SECRET_DEFAULT_SIZE /
sizeof(__m128i);
4001 # if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900
4003 XXH_ALIGN(16)
const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, (xxh_i64)(0U - seed64) };
4004 __m128i
const seed = _mm_load_si128((__m128i
const*)seed64x2);
4006 __m128i
const seed = _mm_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64);
4010 const void*
const src16 = XXH3_kSecret;
4011 __m128i* dst16 = (__m128i*) customSecret;
4012 # if defined(__GNUC__) || defined(__clang__)
4018 XXH_COMPILER_GUARD(dst16);
4020 XXH_ASSERT(((
size_t)src16 & 15) == 0);
4021 XXH_ASSERT(((
size_t)dst16 & 15) == 0);
4023 for (
i=0;
i < nbRounds; ++
i) {
4024 dst16[
i] = _mm_add_epi64(_mm_load_si128((
const __m128i *)src16+
i),
seed);
4030 #if (XXH_VECTOR == XXH_NEON)
4032 XXH_FORCE_INLINE
void
4033 XXH3_accumulate_512_neon(
void* XXH_RESTRICT acc,
4034 const void* XXH_RESTRICT
input,
4035 const void* XXH_RESTRICT secret)
4037 XXH_ASSERT((((
size_t)acc) & 15) == 0);
4039 uint64x2_t*
const xacc = (uint64x2_t *) acc;
4045 for (
i=0;
i < XXH_STRIPE_LEN /
sizeof(uint64x2_t);
i++) {
4047 uint8x16_t data_vec = vld1q_u8(xinput + (
i * 16));
4049 uint8x16_t key_vec = vld1q_u8(xsecret + (
i * 16));
4050 uint64x2_t data_key;
4051 uint32x2_t data_key_lo, data_key_hi;
4053 uint64x2_t
const data64 = vreinterpretq_u64_u8(data_vec);
4054 uint64x2_t
const swapped = vextq_u64(data64, data64, 1);
4055 xacc[
i] = vaddq_u64 (xacc[
i], swapped);
4057 data_key = vreinterpretq_u64_u8(veorq_u8(data_vec, key_vec));
4061 XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi);
4063 xacc[
i] = vmlal_u32 (xacc[
i], data_key_lo, data_key_hi);
4069 XXH_FORCE_INLINE
void
4070 XXH3_scrambleAcc_neon(
void* XXH_RESTRICT acc,
const void* XXH_RESTRICT secret)
4072 XXH_ASSERT((((
size_t)acc) & 15) == 0);
4074 { uint64x2_t* xacc = (uint64x2_t*) acc;
4076 uint32x2_t prime = vdup_n_u32 (XXH_PRIME32_1);
4079 for (
i=0;
i < XXH_STRIPE_LEN/
sizeof(uint64x2_t);
i++) {
4081 uint64x2_t acc_vec = xacc[
i];
4082 uint64x2_t shifted = vshrq_n_u64 (acc_vec, 47);
4083 uint64x2_t data_vec = veorq_u64 (acc_vec, shifted);
4086 uint8x16_t key_vec = vld1q_u8 (xsecret + (
i * 16));
4087 uint64x2_t data_key = veorq_u64 (data_vec, vreinterpretq_u64_u8(key_vec));
4090 uint32x2_t data_key_lo, data_key_hi;
4094 XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi);
4113 uint64x2_t prod_hi = vmull_u32 (data_key_hi, prime);
4115 xacc[
i] = vshlq_n_u64(prod_hi, 32);
4117 xacc[
i] = vmlal_u32(xacc[
i], data_key_lo, prime);
4124 #if (XXH_VECTOR == XXH_VSX)
4126 XXH_FORCE_INLINE
void
4127 XXH3_accumulate_512_vsx(
void* XXH_RESTRICT acc,
4128 const void* XXH_RESTRICT
input,
4129 const void* XXH_RESTRICT secret)
4132 unsigned long long*
const xacc = (
unsigned long long*) acc;
4133 xxh_u64x2
const*
const xinput = (xxh_u64x2
const*)
input;
4134 xxh_u64x2
const*
const xsecret = (xxh_u64x2
const*) secret;
4135 xxh_u64x2
const v32 = { 32, 32 };
4137 for (
i = 0;
i < XXH_STRIPE_LEN /
sizeof(xxh_u64x2);
i++) {
4139 xxh_u64x2
const data_vec = XXH_vec_loadu(xinput +
i);
4141 xxh_u64x2
const key_vec = XXH_vec_loadu(xsecret +
i);
4142 xxh_u64x2
const data_key = data_vec ^ key_vec;
4144 xxh_u32x4
const shuffled = (xxh_u32x4)vec_rl(data_key, v32);
4146 xxh_u64x2
const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled);
4148 xxh_u64x2 acc_vec = vec_xl(0, xacc + 2 *
i);
4153 acc_vec += vec_permi(data_vec, data_vec, 2);
4155 acc_vec += vec_xxpermdi(data_vec, data_vec, 2);
4158 vec_xst(acc_vec, 0, xacc + 2 *
i);
4162 XXH_FORCE_INLINE
void
4163 XXH3_scrambleAcc_vsx(
void* XXH_RESTRICT acc,
const void* XXH_RESTRICT secret)
4165 XXH_ASSERT((((
size_t)acc) & 15) == 0);
4167 { xxh_u64x2*
const xacc = (xxh_u64x2*) acc;
4168 const xxh_u64x2*
const xsecret = (
const xxh_u64x2*) secret;
4170 xxh_u64x2
const v32 = { 32, 32 };
4171 xxh_u64x2
const v47 = { 47, 47 };
4172 xxh_u32x4
const prime = { XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1 };
4174 for (
i = 0;
i < XXH_STRIPE_LEN /
sizeof(xxh_u64x2);
i++) {
4176 xxh_u64x2
const acc_vec = xacc[
i];
4177 xxh_u64x2
const data_vec = acc_vec ^ (acc_vec >> v47);
4180 xxh_u64x2
const key_vec = XXH_vec_loadu(xsecret +
i);
4181 xxh_u64x2
const data_key = data_vec ^ key_vec;
4185 xxh_u64x2
const prod_even = XXH_vec_mule((xxh_u32x4)data_key, prime);
4187 xxh_u64x2
const prod_odd = XXH_vec_mulo((xxh_u32x4)data_key, prime);
4188 xacc[
i] = prod_odd + (prod_even << v32);
4196 XXH_FORCE_INLINE
void
4197 XXH3_accumulate_512_scalar(
void* XXH_RESTRICT acc,
4198 const void* XXH_RESTRICT
input,
4199 const void* XXH_RESTRICT secret)
4201 xxh_u64*
const xacc = (xxh_u64*) acc;
4202 const xxh_u8*
const xinput = (
const xxh_u8*)
input;
4203 const xxh_u8*
const xsecret = (
const xxh_u8*) secret;
4205 XXH_ASSERT(((
size_t)acc & (XXH_ACC_ALIGN-1)) == 0);
4206 for (
i=0;
i < XXH_ACC_NB;
i++) {
4207 xxh_u64
const data_val = XXH_readLE64(xinput + 8*
i);
4208 xxh_u64
const data_key = data_val ^ XXH_readLE64(xsecret +
i*8);
4209 xacc[
i ^ 1] += data_val;
4210 xacc[
i] += XXH_mult32to64(data_key & 0xFFFFFFFF, data_key >> 32);
4214 XXH_FORCE_INLINE
void
4215 XXH3_scrambleAcc_scalar(
void* XXH_RESTRICT acc,
const void* XXH_RESTRICT secret)
4217 xxh_u64*
const xacc = (xxh_u64*) acc;
4218 const xxh_u8*
const xsecret = (
const xxh_u8*) secret;
4220 XXH_ASSERT((((
size_t)acc) & (XXH_ACC_ALIGN-1)) == 0);
4221 for (
i=0;
i < XXH_ACC_NB;
i++) {
4222 xxh_u64
const key64 = XXH_readLE64(xsecret + 8*
i);
4223 xxh_u64 acc64 = xacc[
i];
4224 acc64 = XXH_xorshift64(acc64, 47);
4226 acc64 *= XXH_PRIME32_1;
4231 XXH_FORCE_INLINE
void
4232 XXH3_initCustomSecret_scalar(
void* XXH_RESTRICT customSecret, xxh_u64 seed64)
4239 const xxh_u8* kSecretPtr = XXH3_kSecret;
4240 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
4242 #if defined(__clang__) && defined(__aarch64__)
4271 XXH_COMPILER_GUARD(kSecretPtr);
4277 XXH_ASSERT(kSecretPtr == XXH3_kSecret);
4279 {
int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16;
4281 for (
i=0;
i < nbRounds;
i++) {
4288 xxh_u64 lo = XXH_readLE64(kSecretPtr + 16*
i) + seed64;
4289 xxh_u64 hi = XXH_readLE64(kSecretPtr + 16*
i + 8) - seed64;
4290 XXH_writeLE64((xxh_u8*)customSecret + 16*
i, lo);
4291 XXH_writeLE64((xxh_u8*)customSecret + 16*
i + 8, hi);
4296 typedef void (*XXH3_f_accumulate_512)(
void* XXH_RESTRICT,
const void*,
const void*);
4297 typedef void (*XXH3_f_scrambleAcc)(
void* XXH_RESTRICT,
const void*);
4298 typedef void (*XXH3_f_initCustomSecret)(
void* XXH_RESTRICT, xxh_u64);
4301 #if (XXH_VECTOR == XXH_AVX512)
4303 #define XXH3_accumulate_512 XXH3_accumulate_512_avx512
4304 #define XXH3_scrambleAcc XXH3_scrambleAcc_avx512
4305 #define XXH3_initCustomSecret XXH3_initCustomSecret_avx512
4307 #elif (XXH_VECTOR == XXH_AVX2)
4309 #define XXH3_accumulate_512 XXH3_accumulate_512_avx2
4310 #define XXH3_scrambleAcc XXH3_scrambleAcc_avx2
4311 #define XXH3_initCustomSecret XXH3_initCustomSecret_avx2
4313 #elif (XXH_VECTOR == XXH_SSE2)
4315 #define XXH3_accumulate_512 XXH3_accumulate_512_sse2
4316 #define XXH3_scrambleAcc XXH3_scrambleAcc_sse2
4317 #define XXH3_initCustomSecret XXH3_initCustomSecret_sse2
4319 #elif (XXH_VECTOR == XXH_NEON)
4321 #define XXH3_accumulate_512 XXH3_accumulate_512_neon
4322 #define XXH3_scrambleAcc XXH3_scrambleAcc_neon
4323 #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
4325 #elif (XXH_VECTOR == XXH_VSX)
4327 #define XXH3_accumulate_512 XXH3_accumulate_512_vsx
4328 #define XXH3_scrambleAcc XXH3_scrambleAcc_vsx
4329 #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
4333 #define XXH3_accumulate_512 XXH3_accumulate_512_scalar
4334 #define XXH3_scrambleAcc XXH3_scrambleAcc_scalar
4335 #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
4341 #ifndef XXH_PREFETCH_DIST
4343 # define XXH_PREFETCH_DIST 320
4345 # if (XXH_VECTOR == XXH_AVX512)
4346 # define XXH_PREFETCH_DIST 512
4348 # define XXH_PREFETCH_DIST 384
4358 XXH_FORCE_INLINE
void
4359 XXH3_accumulate( xxh_u64* XXH_RESTRICT acc,
4360 const xxh_u8* XXH_RESTRICT
input,
4361 const xxh_u8* XXH_RESTRICT secret,
4363 XXH3_f_accumulate_512 f_acc512)
4366 for (
n = 0;
n < nbStripes;
n++ ) {
4367 const xxh_u8*
const in =
input +
n*XXH_STRIPE_LEN;
4368 XXH_PREFETCH(
in + XXH_PREFETCH_DIST);
4371 secret +
n*XXH_SECRET_CONSUME_RATE);
4375 XXH_FORCE_INLINE
void
4376 XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc,
4377 const xxh_u8* XXH_RESTRICT
input,
size_t len,
4378 const xxh_u8* XXH_RESTRICT secret,
size_t secretSize,
4379 XXH3_f_accumulate_512 f_acc512,
4380 XXH3_f_scrambleAcc f_scramble)
4382 size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE;
4383 size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock;
4384 size_t const nb_blocks = (
len - 1) / block_len;
4390 for (
n = 0;
n < nb_blocks;
n++) {
4391 XXH3_accumulate(acc,
input +
n*block_len, secret, nbStripesPerBlock, f_acc512);
4392 f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN);
4396 XXH_ASSERT(
len > XXH_STRIPE_LEN);
4397 {
size_t const nbStripes = ((
len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN;
4398 XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE));
4399 XXH3_accumulate(acc,
input + nb_blocks*block_len, secret, nbStripes, f_acc512);
4402 {
const xxh_u8*
const p =
input +
len - XXH_STRIPE_LEN;
4403 #define XXH_SECRET_LASTACC_START 7
4404 f_acc512(acc,
p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START);
4408 XXH_FORCE_INLINE xxh_u64
4409 XXH3_mix2Accs(
const xxh_u64* XXH_RESTRICT acc,
const xxh_u8* XXH_RESTRICT secret)
4411 return XXH3_mul128_fold64(
4412 acc[0] ^ XXH_readLE64(secret),
4413 acc[1] ^ XXH_readLE64(secret+8) );
4417 XXH3_mergeAccs(
const xxh_u64* XXH_RESTRICT acc,
const xxh_u8* XXH_RESTRICT secret, xxh_u64
start)
4419 xxh_u64 result64 =
start;
4422 for (
i = 0;
i < 4;
i++) {
4423 result64 += XXH3_mix2Accs(acc+2*
i, secret + 16*
i);
4424 #if defined(__clang__) \
4425 && (defined(__arm__) || defined(__thumb__)) \
4426 && (defined(__ARM_NEON) || defined(__ARM_NEON__)) \
4427 && !defined(XXH_ENABLE_AUTOVECTORIZE)
4436 XXH_COMPILER_GUARD(result64);
4440 return XXH3_avalanche(result64);
4443 #define XXH3_INIT_ACC { XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \
4444 XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 }
4447 XXH3_hashLong_64b_internal(
const void* XXH_RESTRICT
input,
size_t len,
4448 const void* XXH_RESTRICT secret,
size_t secretSize,
4449 XXH3_f_accumulate_512 f_acc512,
4450 XXH3_f_scrambleAcc f_scramble)
4452 XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
4454 XXH3_hashLong_internal_loop(acc, (
const xxh_u8*)
input,
len, (
const xxh_u8*)secret, secretSize, f_acc512, f_scramble);
4457 XXH_STATIC_ASSERT(
sizeof(acc) == 64);
4459 #define XXH_SECRET_MERGEACCS_START 11
4460 XXH_ASSERT(secretSize >=
sizeof(acc) + XXH_SECRET_MERGEACCS_START);
4461 return XXH3_mergeAccs(acc, (
const xxh_u8*)secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)
len * XXH_PRIME64_1);
4470 XXH3_hashLong_64b_withSecret(
const void* XXH_RESTRICT
input,
size_t len,
4471 XXH64_hash_t seed64,
const xxh_u8* XXH_RESTRICT secret,
size_t secretLen)
4474 return XXH3_hashLong_64b_internal(
input,
len, secret, secretLen, XXH3_accumulate_512, XXH3_scrambleAcc);
4484 XXH3_hashLong_64b_default(
const void* XXH_RESTRICT
input,
size_t len,
4485 XXH64_hash_t seed64,
const xxh_u8* XXH_RESTRICT secret,
size_t secretLen)
4487 (void)seed64; (void)secret; (void)secretLen;
4488 return XXH3_hashLong_64b_internal(
input,
len, XXH3_kSecret,
sizeof(XXH3_kSecret), XXH3_accumulate_512, XXH3_scrambleAcc);
4503 XXH3_hashLong_64b_withSeed_internal(
const void*
input,
size_t len,
4505 XXH3_f_accumulate_512 f_acc512,
4506 XXH3_f_scrambleAcc f_scramble,
4507 XXH3_f_initCustomSecret f_initSec)
4510 return XXH3_hashLong_64b_internal(
input,
len,
4511 XXH3_kSecret,
sizeof(XXH3_kSecret),
4512 f_acc512, f_scramble);
4513 { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
4514 f_initSec(secret,
seed);
4515 return XXH3_hashLong_64b_internal(
input,
len, secret,
sizeof(secret),
4516 f_acc512, f_scramble);
4524 XXH3_hashLong_64b_withSeed(
const void*
input,
size_t len,
4527 (void)secret; (void)secretLen;
4528 return XXH3_hashLong_64b_withSeed_internal(
input,
len,
seed,
4529 XXH3_accumulate_512, XXH3_scrambleAcc, XXH3_initCustomSecret);
4533 typedef XXH64_hash_t (*XXH3_hashLong64_f)(
const void* XXH_RESTRICT, size_t,
4537 XXH3_64bits_internal(
const void* XXH_RESTRICT
input,
size_t len,
4538 XXH64_hash_t seed64,
const void* XXH_RESTRICT secret,
size_t secretLen,
4539 XXH3_hashLong64_f f_hashLong)
4550 return XXH3_len_0to16_64b((
const xxh_u8*)
input,
len, (
const xxh_u8*)secret, seed64);
4552 return XXH3_len_17to128_64b((
const xxh_u8*)
input,
len, (
const xxh_u8*)secret, secretLen, seed64);
4553 if (
len <= XXH3_MIDSIZE_MAX)
4554 return XXH3_len_129to240_64b((
const xxh_u8*)
input,
len, (
const xxh_u8*)secret, secretLen, seed64);
4555 return f_hashLong(
input,
len, seed64, (
const xxh_u8*)secret, secretLen);
4564 return XXH3_64bits_internal(
input,
len, 0, XXH3_kSecret,
sizeof(XXH3_kSecret), XXH3_hashLong_64b_default);
4571 return XXH3_64bits_internal(
input,
len, 0, secret, secretSize, XXH3_hashLong_64b_withSecret);
4578 return XXH3_64bits_internal(
input,
len,
seed, XXH3_kSecret,
sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed);
4582 XXH3_64bits_withSecretandSeed(
const void*
input,
size_t len,
const void* secret,
size_t secretSize,
XXH64_hash_t seed)
4584 if (
len <= XXH3_MIDSIZE_MAX)
4585 return XXH3_64bits_internal(
input,
len,
seed, XXH3_kSecret,
sizeof(XXH3_kSecret), NULL);
4586 return XXH3_hashLong_64b_withSecret(
input,
len,
seed, (
const xxh_u8*)secret, secretSize);
4615 static void* XXH_alignedMalloc(
size_t s,
size_t align)
4617 XXH_ASSERT(align <= 128 && align >= 8);
4619 XXH_ASSERT(s != 0 && s < (s +
align));
4621 xxh_u8*
base = (xxh_u8*)XXH_malloc(s +
align);
4633 XXH_ASSERT((
size_t)
ptr %
align == 0);
4646 static void XXH_alignedFree(
void*
p)
4649 xxh_u8*
ptr = (xxh_u8*)
p;
4661 if (
state==NULL)
return NULL;
4662 XXH3_INITSTATE(
state);
4669 XXH_alignedFree(statePtr);
4677 XXH_memcpy(dst_state, src_state,
sizeof(*dst_state));
4683 const void* secret,
size_t secretSize)
4685 size_t const initStart = offsetof(
XXH3_state_t, bufferedSize);
4686 size_t const initLength = offsetof(
XXH3_state_t, nbStripesPerBlock) - initStart;
4687 XXH_ASSERT(offsetof(
XXH3_state_t, nbStripesPerBlock) > initStart);
4688 XXH_ASSERT(statePtr != NULL);
4690 memset((
char*)statePtr + initStart, 0, initLength);
4691 statePtr->acc[0] = XXH_PRIME32_3;
4692 statePtr->acc[1] = XXH_PRIME64_1;
4693 statePtr->acc[2] = XXH_PRIME64_2;
4694 statePtr->acc[3] = XXH_PRIME64_3;
4695 statePtr->acc[4] = XXH_PRIME64_4;
4696 statePtr->acc[5] = XXH_PRIME32_2;
4697 statePtr->acc[6] = XXH_PRIME64_5;
4698 statePtr->acc[7] = XXH_PRIME32_1;
4699 statePtr->seed =
seed;
4700 statePtr->useSeed = (
seed != 0);
4701 statePtr->extSecret = (
const unsigned char*)secret;
4703 statePtr->secretLimit = secretSize - XXH_STRIPE_LEN;
4704 statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE;
4712 XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE);
4721 XXH3_reset_internal(statePtr, 0, secret, secretSize);
4733 if ((
seed != statePtr->seed) || (statePtr->extSecret != NULL))
4734 XXH3_initCustomSecret(statePtr->customSecret,
seed);
4735 XXH3_reset_internal(statePtr,
seed, NULL, XXH_SECRET_DEFAULT_SIZE);
4741 XXH3_64bits_reset_withSecretandSeed(
XXH3_state_t* statePtr,
const void* secret,
size_t secretSize,
XXH64_hash_t seed64)
4746 XXH3_reset_internal(statePtr, seed64, secret, secretSize);
4747 statePtr->useSeed = 1;
4754 XXH_FORCE_INLINE
void
4755 XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc,
4756 size_t* XXH_RESTRICT nbStripesSoFarPtr,
size_t nbStripesPerBlock,
4757 const xxh_u8* XXH_RESTRICT
input,
size_t nbStripes,
4758 const xxh_u8* XXH_RESTRICT secret,
size_t secretLimit,
4759 XXH3_f_accumulate_512 f_acc512,
4760 XXH3_f_scrambleAcc f_scramble)
4762 XXH_ASSERT(nbStripes <= nbStripesPerBlock);
4763 XXH_ASSERT(*nbStripesSoFarPtr < nbStripesPerBlock);
4764 if (nbStripesPerBlock - *nbStripesSoFarPtr <= nbStripes) {
4766 size_t const nbStripesToEndofBlock = nbStripesPerBlock - *nbStripesSoFarPtr;
4767 size_t const nbStripesAfterBlock = nbStripes - nbStripesToEndofBlock;
4768 XXH3_accumulate(acc,
input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripesToEndofBlock, f_acc512);
4769 f_scramble(acc, secret + secretLimit);
4770 XXH3_accumulate(acc,
input + nbStripesToEndofBlock * XXH_STRIPE_LEN, secret, nbStripesAfterBlock, f_acc512);
4771 *nbStripesSoFarPtr = nbStripesAfterBlock;
4773 XXH3_accumulate(acc,
input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripes, f_acc512);
4774 *nbStripesSoFarPtr += nbStripes;
4778 #ifndef XXH3_STREAM_USE_STACK
4780 # define XXH3_STREAM_USE_STACK 1
4788 const xxh_u8* XXH_RESTRICT
input,
size_t len,
4789 XXH3_f_accumulate_512 f_acc512,
4790 XXH3_f_scrambleAcc f_scramble)
4793 XXH_ASSERT(
len == 0);
4797 XXH_ASSERT(
state != NULL);
4798 {
const xxh_u8*
const bEnd =
input +
len;
4799 const unsigned char*
const secret = (
state->extSecret == NULL) ?
state->customSecret :
state->extSecret;
4800 #
if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
4805 XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[8];
memcpy(acc,
state->acc,
sizeof(acc));
4807 xxh_u64* XXH_RESTRICT
const acc =
state->acc;
4810 XXH_ASSERT(
state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE);
4813 if (
state->bufferedSize +
len <= XXH3_INTERNALBUFFER_SIZE) {
4815 state->bufferedSize += (XXH32_hash_t)
len;
4820 #define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN)
4821 XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0);
4827 if (
state->bufferedSize) {
4828 size_t const loadSize = XXH3_INTERNALBUFFER_SIZE -
state->bufferedSize;
4831 XXH3_consumeStripes(acc,
4832 &
state->nbStripesSoFar,
state->nbStripesPerBlock,
4833 state->buffer, XXH3_INTERNALBUFFER_STRIPES,
4834 secret,
state->secretLimit,
4835 f_acc512, f_scramble);
4836 state->bufferedSize = 0;
4838 XXH_ASSERT(
input < bEnd);
4841 if ((
size_t)(bEnd -
input) >
state->nbStripesPerBlock * XXH_STRIPE_LEN) {
4842 size_t nbStripes = (size_t)(bEnd - 1 -
input) / XXH_STRIPE_LEN;
4843 XXH_ASSERT(
state->nbStripesPerBlock >=
state->nbStripesSoFar);
4845 {
size_t const nbStripesToEnd =
state->nbStripesPerBlock -
state->nbStripesSoFar;
4846 XXH_ASSERT(nbStripes <= nbStripes);
4847 XXH3_accumulate(acc,
input, secret +
state->nbStripesSoFar * XXH_SECRET_CONSUME_RATE, nbStripesToEnd, f_acc512);
4848 f_scramble(acc, secret +
state->secretLimit);
4849 state->nbStripesSoFar = 0;
4850 input += nbStripesToEnd * XXH_STRIPE_LEN;
4851 nbStripes -= nbStripesToEnd;
4854 while(nbStripes >=
state->nbStripesPerBlock) {
4855 XXH3_accumulate(acc,
input, secret,
state->nbStripesPerBlock, f_acc512);
4856 f_scramble(acc, secret +
state->secretLimit);
4857 input +=
state->nbStripesPerBlock * XXH_STRIPE_LEN;
4858 nbStripes -=
state->nbStripesPerBlock;
4861 XXH3_accumulate(acc,
input, secret, nbStripes, f_acc512);
4862 input += nbStripes * XXH_STRIPE_LEN;
4863 XXH_ASSERT(
input < bEnd);
4864 state->nbStripesSoFar = nbStripes;
4866 XXH_memcpy(
state->buffer +
sizeof(
state->buffer) - XXH_STRIPE_LEN,
input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
4867 XXH_ASSERT(bEnd -
input <= XXH_STRIPE_LEN);
4871 if (bEnd -
input > XXH3_INTERNALBUFFER_SIZE) {
4872 const xxh_u8*
const limit = bEnd - XXH3_INTERNALBUFFER_SIZE;
4874 XXH3_consumeStripes(acc,
4875 &
state->nbStripesSoFar,
state->nbStripesPerBlock,
4876 input, XXH3_INTERNALBUFFER_STRIPES,
4877 secret,
state->secretLimit,
4878 f_acc512, f_scramble);
4879 input += XXH3_INTERNALBUFFER_SIZE;
4880 }
while (
input<limit);
4882 XXH_memcpy(
state->buffer +
sizeof(
state->buffer) - XXH_STRIPE_LEN,
input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
4887 XXH_ASSERT(
input < bEnd);
4888 XXH_ASSERT(bEnd -
input <= XXH3_INTERNALBUFFER_SIZE);
4889 XXH_ASSERT(
state->bufferedSize == 0);
4891 state->bufferedSize = (XXH32_hash_t)(bEnd-
input);
4892 #if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
4906 XXH3_accumulate_512, XXH3_scrambleAcc);
4910 XXH_FORCE_INLINE
void
4913 const unsigned char* secret)
4919 XXH_memcpy(acc,
state->acc,
sizeof(
state->acc));
4920 if (
state->bufferedSize >= XXH_STRIPE_LEN) {
4921 size_t const nbStripes = (
state->bufferedSize - 1) / XXH_STRIPE_LEN;
4922 size_t nbStripesSoFar =
state->nbStripesSoFar;
4923 XXH3_consumeStripes(acc,
4924 &nbStripesSoFar,
state->nbStripesPerBlock,
4925 state->buffer, nbStripes,
4926 secret,
state->secretLimit,
4927 XXH3_accumulate_512, XXH3_scrambleAcc);
4929 XXH3_accumulate_512(acc,
4930 state->buffer +
state->bufferedSize - XXH_STRIPE_LEN,
4931 secret +
state->secretLimit - XXH_SECRET_LASTACC_START);
4933 xxh_u8 lastStripe[XXH_STRIPE_LEN];
4934 size_t const catchupSize = XXH_STRIPE_LEN -
state->bufferedSize;
4935 XXH_ASSERT(
state->bufferedSize > 0);
4936 XXH_memcpy(lastStripe,
state->buffer +
sizeof(
state->buffer) - catchupSize, catchupSize);
4937 XXH_memcpy(lastStripe + catchupSize,
state->buffer,
state->bufferedSize);
4938 XXH3_accumulate_512(acc,
4940 secret +
state->secretLimit - XXH_SECRET_LASTACC_START);
4947 const unsigned char*
const secret = (
state->extSecret == NULL) ?
state->customSecret :
state->extSecret;
4948 if (
state->totalLen > XXH3_MIDSIZE_MAX) {
4950 XXH3_digest_long(acc,
state, secret);
4951 return XXH3_mergeAccs(acc,
4952 secret + XXH_SECRET_MERGEACCS_START,
4953 (xxh_u64)
state->totalLen * XXH_PRIME64_1);
4959 secret,
state->secretLimit + XXH_STRIPE_LEN);
4985 XXH_ASSERT(
input != NULL);
4986 XXH_ASSERT(1 <=
len &&
len <= 3);
4987 XXH_ASSERT(secret != NULL);
4996 xxh_u32
const combinedl = ((xxh_u32)
c1 <<16) | ((xxh_u32)
c2 << 24)
4997 | ((xxh_u32)c3 << 0) | ((xxh_u32)
len << 8);
4998 xxh_u32
const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13);
4999 xxh_u64
const bitflipl = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) +
seed;
5000 xxh_u64
const bitfliph = (XXH_readLE32(secret+8) ^ XXH_readLE32(secret+12)) -
seed;
5001 xxh_u64
const keyed_lo = (xxh_u64)combinedl ^ bitflipl;
5002 xxh_u64
const keyed_hi = (xxh_u64)combinedh ^ bitfliph;
5004 h128.
low64 = XXH64_avalanche(keyed_lo);
5005 h128.
high64 = XXH64_avalanche(keyed_hi);
5013 XXH_ASSERT(
input != NULL);
5014 XXH_ASSERT(secret != NULL);
5015 XXH_ASSERT(4 <=
len &&
len <= 8);
5016 seed ^= (xxh_u64)XXH_swap32((xxh_u32)
seed) << 32;
5017 { xxh_u32
const input_lo = XXH_readLE32(
input);
5018 xxh_u32
const input_hi = XXH_readLE32(
input +
len - 4);
5019 xxh_u64
const input_64 = input_lo + ((xxh_u64)input_hi << 32);
5020 xxh_u64
const bitflip = (XXH_readLE64(secret+16) ^ XXH_readLE64(secret+24)) +
seed;
5021 xxh_u64
const keyed = input_64 ^ bitflip;
5030 m128.
low64 *= 0x9FB21C651E98DF25
ULL;
5040 XXH_ASSERT(
input != NULL);
5041 XXH_ASSERT(secret != NULL);
5042 XXH_ASSERT(9 <=
len &&
len <= 16);
5043 { xxh_u64
const bitflipl = (XXH_readLE64(secret+32) ^ XXH_readLE64(secret+40)) -
seed;
5044 xxh_u64
const bitfliph = (XXH_readLE64(secret+48) ^ XXH_readLE64(secret+56)) +
seed;
5045 xxh_u64
const input_lo = XXH_readLE64(
input);
5046 xxh_u64 input_hi = XXH_readLE64(
input +
len - 8);
5047 XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1);
5052 m128.
low64 += (xxh_u64)(
len - 1) << 54;
5053 input_hi ^= bitfliph;
5061 if (
sizeof(
void *) <
sizeof(xxh_u64)) {
5068 m128.
high64 += (input_hi & 0xFFFFFFFF00000000
ULL) + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2);
5094 m128.
high64 += input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1);
5115 XXH_ASSERT(
len <= 16);
5120 xxh_u64
const bitflipl = XXH_readLE64(secret+64) ^ XXH_readLE64(secret+72);
5121 xxh_u64
const bitfliph = XXH_readLE64(secret+80) ^ XXH_readLE64(secret+88);
5122 h128.
low64 = XXH64_avalanche(
seed ^ bitflipl);
5123 h128.
high64 = XXH64_avalanche(
seed ^ bitfliph);
5132 XXH128_mix32B(
XXH128_hash_t acc,
const xxh_u8* input_1,
const xxh_u8* input_2,
5135 acc.
low64 += XXH3_mix16B (input_1, secret+0,
seed);
5136 acc.
low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8);
5137 acc.
high64 += XXH3_mix16B (input_2, secret+16,
seed);
5138 acc.
high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8);
5144 XXH3_len_17to128_128b(
const xxh_u8* XXH_RESTRICT
input,
size_t len,
5145 const xxh_u8* XXH_RESTRICT secret,
size_t secretSize,
5149 XXH_ASSERT(16 <
len &&
len <= 128);
5167 + (acc.
high64 * XXH_PRIME64_4)
5168 + ((
len -
seed) * XXH_PRIME64_2);
5177 XXH3_len_129to240_128b(
const xxh_u8* XXH_RESTRICT
input,
size_t len,
5178 const xxh_u8* XXH_RESTRICT secret,
size_t secretSize,
5182 XXH_ASSERT(128 <
len &&
len <= XXH3_MIDSIZE_MAX);
5185 int const nbRounds = (
int)
len / 32;
5189 for (
i=0;
i<4;
i++) {
5190 acc = XXH128_mix32B(acc,
5198 XXH_ASSERT(nbRounds >= 4);
5199 for (
i=4 ;
i < nbRounds;
i++) {
5200 acc = XXH128_mix32B(acc,
5203 secret + XXH3_MIDSIZE_STARTOFFSET + (32 * (
i - 4)),
5207 acc = XXH128_mix32B(acc,
5216 + (acc.
high64 * XXH_PRIME64_4)
5217 + ((
len -
seed) * XXH_PRIME64_2);
5226 XXH3_hashLong_128b_internal(
const void* XXH_RESTRICT
input,
size_t len,
5227 const xxh_u8* XXH_RESTRICT secret,
size_t secretSize,
5228 XXH3_f_accumulate_512 f_acc512,
5229 XXH3_f_scrambleAcc f_scramble)
5231 XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
5233 XXH3_hashLong_internal_loop(acc, (
const xxh_u8*)
input,
len, secret, secretSize, f_acc512, f_scramble);
5236 XXH_STATIC_ASSERT(
sizeof(acc) == 64);
5237 XXH_ASSERT(secretSize >=
sizeof(acc) + XXH_SECRET_MERGEACCS_START);
5239 h128.
low64 = XXH3_mergeAccs(acc,
5240 secret + XXH_SECRET_MERGEACCS_START,
5241 (xxh_u64)
len * XXH_PRIME64_1);
5242 h128.
high64 = XXH3_mergeAccs(acc,
5244 -
sizeof(acc) - XXH_SECRET_MERGEACCS_START,
5245 ~((xxh_u64)
len * XXH_PRIME64_2));
5254 XXH3_hashLong_128b_default(
const void* XXH_RESTRICT
input,
size_t len,
5256 const void* XXH_RESTRICT secret,
size_t secretLen)
5258 (void)seed64; (void)secret; (void)secretLen;
5259 return XXH3_hashLong_128b_internal(
input,
len, XXH3_kSecret,
sizeof(XXH3_kSecret),
5260 XXH3_accumulate_512, XXH3_scrambleAcc);
5268 XXH3_hashLong_128b_withSecret(
const void* XXH_RESTRICT
input,
size_t len,
5270 const void* XXH_RESTRICT secret,
size_t secretLen)
5273 return XXH3_hashLong_128b_internal(
input,
len, (
const xxh_u8*)secret, secretLen,
5274 XXH3_accumulate_512, XXH3_scrambleAcc);
5278 XXH3_hashLong_128b_withSeed_internal(
const void* XXH_RESTRICT
input,
size_t len,
5280 XXH3_f_accumulate_512 f_acc512,
5281 XXH3_f_scrambleAcc f_scramble,
5282 XXH3_f_initCustomSecret f_initSec)
5285 return XXH3_hashLong_128b_internal(
input,
len,
5286 XXH3_kSecret,
sizeof(XXH3_kSecret),
5287 f_acc512, f_scramble);
5288 { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
5289 f_initSec(secret, seed64);
5290 return XXH3_hashLong_128b_internal(
input,
len, (
const xxh_u8*)secret,
sizeof(secret),
5291 f_acc512, f_scramble);
5299 XXH3_hashLong_128b_withSeed(
const void*
input,
size_t len,
5300 XXH64_hash_t seed64,
const void* XXH_RESTRICT secret,
size_t secretLen)
5302 (void)secret; (void)secretLen;
5303 return XXH3_hashLong_128b_withSeed_internal(
input,
len, seed64,
5304 XXH3_accumulate_512, XXH3_scrambleAcc, XXH3_initCustomSecret);
5307 typedef XXH128_hash_t (*XXH3_hashLong128_f)(
const void* XXH_RESTRICT, size_t,
5311 XXH3_128bits_internal(
const void*
input,
size_t len,
5312 XXH64_hash_t seed64,
const void* XXH_RESTRICT secret,
size_t secretLen,
5313 XXH3_hashLong128_f f_hl128)
5323 return XXH3_len_0to16_128b((
const xxh_u8*)
input,
len, (
const xxh_u8*)secret, seed64);
5325 return XXH3_len_17to128_128b((
const xxh_u8*)
input,
len, (
const xxh_u8*)secret, secretLen, seed64);
5326 if (
len <= XXH3_MIDSIZE_MAX)
5327 return XXH3_len_129to240_128b((
const xxh_u8*)
input,
len, (
const xxh_u8*)secret, secretLen, seed64);
5328 return f_hl128(
input,
len, seed64, secret, secretLen);
5337 return XXH3_128bits_internal(
input,
len, 0,
5338 XXH3_kSecret,
sizeof(XXH3_kSecret),
5339 XXH3_hashLong_128b_default);
5346 return XXH3_128bits_internal(
input,
len, 0,
5347 (
const xxh_u8*)secret, secretSize,
5348 XXH3_hashLong_128b_withSecret);
5356 XXH3_kSecret,
sizeof(XXH3_kSecret),
5357 XXH3_hashLong_128b_withSeed);
5362 XXH3_128bits_withSecretandSeed(
const void*
input,
size_t len,
const void* secret,
size_t secretSize,
XXH64_hash_t seed)
5364 if (
len <= XXH3_MIDSIZE_MAX)
5365 return XXH3_128bits_internal(
input,
len,
seed, XXH3_kSecret,
sizeof(XXH3_kSecret), NULL);
5366 return XXH3_hashLong_128b_withSecret(
input,
len,
seed, secret, secretSize);
5409 return XXH3_64bits_reset_withSecretandSeed(statePtr, secret, secretSize,
seed);
5417 XXH3_accumulate_512, XXH3_scrambleAcc);
5423 const unsigned char*
const secret = (
state->extSecret == NULL) ?
state->customSecret :
state->extSecret;
5424 if (
state->totalLen > XXH3_MIDSIZE_MAX) {
5426 XXH3_digest_long(acc,
state, secret);
5427 XXH_ASSERT(
state->secretLimit + XXH_STRIPE_LEN >=
sizeof(acc) + XXH_SECRET_MERGEACCS_START);
5429 h128.
low64 = XXH3_mergeAccs(acc,
5430 secret + XXH_SECRET_MERGEACCS_START,
5431 (xxh_u64)
state->totalLen * XXH_PRIME64_1);
5432 h128.
high64 = XXH3_mergeAccs(acc,
5433 secret +
state->secretLimit + XXH_STRIPE_LEN
5434 -
sizeof(acc) - XXH_SECRET_MERGEACCS_START,
5435 ~((xxh_u64)
state->totalLen * XXH_PRIME64_2));
5443 secret,
state->secretLimit + XXH_STRIPE_LEN);
5455 return !(memcmp(&h1, &h2,
sizeof(h1)));
5469 if (hcmp)
return hcmp;
5480 if (XXH_CPU_LITTLE_ENDIAN) {
5481 hash.high64 = XXH_swap64(
hash.high64);
5482 hash.low64 = XXH_swap64(
hash.low64);
5484 XXH_memcpy(
dst, &
hash.high64,
sizeof(
hash.high64));
5485 XXH_memcpy((
char*)
dst +
sizeof(
hash.high64), &
hash.low64,
sizeof(
hash.low64));
5493 h.high64 = XXH_readBE64(src);
5494 h.low64 = XXH_readBE64(src->
digest + 8);
5504 #define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x))
5508 XXH_writeLE64(
dst, XXH_readLE64(
dst) ^ h128.
low64 );
5509 XXH_writeLE64( (
char*)
dst+8, XXH_readLE64((
char*)
dst+8) ^ h128.
high64 );
5514 XXH3_generateSecret(
void* secretBuffer,
size_t secretSize,
const void* customSeed,
size_t customSeedSize)
5516 XXH_ASSERT(secretBuffer != NULL);
5517 if (secretBuffer == NULL)
return XXH_ERROR;
5520 if (customSeedSize == 0) {
5521 customSeed = XXH3_kSecret;
5522 customSeedSize = XXH_SECRET_DEFAULT_SIZE;
5524 XXH_ASSERT(customSeed != NULL);
5525 if (customSeed == NULL)
return XXH_ERROR;
5529 while (
pos < secretSize) {
5530 size_t const toCopy = XXH_MIN((secretSize -
pos), customSeedSize);
5531 memcpy((
char*)secretBuffer +
pos, customSeed, toCopy);
5535 {
size_t const nbSeg16 = secretSize / 16;
5539 for (
n=0;
n<nbSeg16;
n++) {
5540 XXH128_hash_t const h128 = XXH128(&scrambler,
sizeof(scrambler),
n);
5541 XXH3_combine16((
char*)secretBuffer +
n*16, h128);
5553 XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
5554 XXH3_initCustomSecret(secret,
seed);
5555 XXH_ASSERT(secretBuffer != NULL);
5556 memcpy(secretBuffer, secret, XXH_SECRET_DEFAULT_SIZE);
5562 #if XXH_VECTOR == XXH_AVX2 \
5563 && defined(__GNUC__) && !defined(__clang__) \
5564 && defined(__OPTIMIZE__) && !defined(__OPTIMIZE_SIZE__)
5565 # pragma GCC pop_options
5578 #if defined (__cplusplus)