xxhash.h
Go to the documentation of this file.
1 /*
2  * xxHash - Extremely Fast Hash algorithm
3  * Header File
4  * Copyright (C) 2012-2020 Yann Collet
5  *
6  * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php)
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are
10  * met:
11  *
12  * * Redistributions of source code must retain the above copyright
13  * notice, this list of conditions and the following disclaimer.
14  * * Redistributions in binary form must reproduce the above
15  * copyright notice, this list of conditions and the following disclaimer
16  * in the documentation and/or other materials provided with the
17  * distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  *
31  * You can contact the author at:
32  * - xxHash homepage: https://www.xxhash.com
33  * - xxHash source repository: https://github.com/Cyan4973/xxHash
34  */
41 /* TODO: update */
42 /* Notice extracted from xxHash homepage:
43 
44 xxHash is an extremely fast hash algorithm, running at RAM speed limits.
45 It also successfully passes all tests from the SMHasher suite.
46 
47 Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz)
48 
49 Name Speed Q.Score Author
50 xxHash 5.4 GB/s 10
51 CrapWow 3.2 GB/s 2 Andrew
52 MurmurHash 3a 2.7 GB/s 10 Austin Appleby
53 SpookyHash 2.0 GB/s 10 Bob Jenkins
54 SBox 1.4 GB/s 9 Bret Mulvey
55 Lookup3 1.2 GB/s 9 Bob Jenkins
56 SuperFastHash 1.2 GB/s 1 Paul Hsieh
57 CityHash64 1.05 GB/s 10 Pike & Alakuijala
58 FNV 0.55 GB/s 5 Fowler, Noll, Vo
59 CRC32 0.43 GB/s 9
60 MD5-32 0.33 GB/s 10 Ronald L. Rivest
61 SHA1-32 0.28 GB/s 10
62 
63 Q.Score is a measure of quality of the hash function.
64 It depends on successfully passing SMHasher test set.
65 10 is a perfect score.
66 
67 Note: SMHasher's CRC32 implementation is not the fastest one.
68 Other speed-oriented implementations can be faster,
69 especially in combination with PCLMUL instruction:
70 https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html?showComment=1552696407071#c3490092340461170735
71 
72 A 64-bit version, named XXH64, is available since r35.
73 It offers much better speed, but for 64-bit applications only.
74 Name Speed on 64 bits Speed on 32 bits
75 XXH64 13.8 GB/s 1.9 GB/s
76 XXH32 6.8 GB/s 6.0 GB/s
77 */
78 
79 #if defined (__cplusplus)
80 extern "C" {
81 #endif
82 
83 /* ****************************
84  * INLINE mode
85  ******************************/
102 #if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \
103  && !defined(XXH_INLINE_ALL_31684351384)
104  /* this section should be traversed only once */
105 # define XXH_INLINE_ALL_31684351384
106  /* give access to the advanced API, required to compile implementations */
107 # undef XXH_STATIC_LINKING_ONLY /* avoid macro redef */
108 # define XXH_STATIC_LINKING_ONLY
109  /* make all functions private */
110 # undef XXH_PUBLIC_API
111 # if defined(__GNUC__)
112 # define XXH_PUBLIC_API static __inline __attribute__((unused))
113 # elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
114 # define XXH_PUBLIC_API static inline
115 # elif defined(_MSC_VER)
116 # define XXH_PUBLIC_API static __inline
117 # else
118  /* note: this version may generate warnings for unused static functions */
119 # define XXH_PUBLIC_API static
120 # endif
121 
122  /*
123  * This part deals with the special case where a unit wants to inline xxHash,
124  * but "xxhash.h" has previously been included without XXH_INLINE_ALL,
125  * such as part of some previously included *.h header file.
126  * Without further action, the new include would just be ignored,
127  * and functions would effectively _not_ be inlined (silent failure).
128  * The following macros solve this situation by prefixing all inlined names,
129  * avoiding naming collision with previous inclusions.
130  */
131  /* Before that, we unconditionally #undef all symbols,
132  * in case they were already defined with XXH_NAMESPACE.
133  * They will then be redefined for XXH_INLINE_ALL
134  */
135 # undef XXH_versionNumber
136  /* XXH32 */
137 # undef XXH32
138 # undef XXH32_createState
139 # undef XXH32_freeState
140 # undef XXH32_reset
141 # undef XXH32_update
142 # undef XXH32_digest
143 # undef XXH32_copyState
144 # undef XXH32_canonicalFromHash
145 # undef XXH32_hashFromCanonical
146  /* XXH64 */
147 # undef XXH64
148 # undef XXH64_createState
149 # undef XXH64_freeState
150 # undef XXH64_reset
151 # undef XXH64_update
152 # undef XXH64_digest
153 # undef XXH64_copyState
154 # undef XXH64_canonicalFromHash
155 # undef XXH64_hashFromCanonical
156  /* XXH3_64bits */
157 # undef XXH3_64bits
158 # undef XXH3_64bits_withSecret
159 # undef XXH3_64bits_withSeed
160 # undef XXH3_64bits_withSecretandSeed
161 # undef XXH3_createState
162 # undef XXH3_freeState
163 # undef XXH3_copyState
164 # undef XXH3_64bits_reset
165 # undef XXH3_64bits_reset_withSeed
166 # undef XXH3_64bits_reset_withSecret
167 # undef XXH3_64bits_update
168 # undef XXH3_64bits_digest
169 # undef XXH3_generateSecret
170  /* XXH3_128bits */
171 # undef XXH128
172 # undef XXH3_128bits
173 # undef XXH3_128bits_withSeed
174 # undef XXH3_128bits_withSecret
175 # undef XXH3_128bits_reset
176 # undef XXH3_128bits_reset_withSeed
177 # undef XXH3_128bits_reset_withSecret
178 # undef XXH3_128bits_reset_withSecretandSeed
179 # undef XXH3_128bits_update
180 # undef XXH3_128bits_digest
181 # undef XXH128_isEqual
182 # undef XXH128_cmp
183 # undef XXH128_canonicalFromHash
184 # undef XXH128_hashFromCanonical
185  /* Finally, free the namespace itself */
186 # undef XXH_NAMESPACE
187 
188  /* employ the namespace for XXH_INLINE_ALL */
189 # define XXH_NAMESPACE XXH_INLINE_
190  /*
191  * Some identifiers (enums, type names) are not symbols,
192  * but they must nonetheless be renamed to avoid redeclaration.
193  * Alternative solution: do not redeclare them.
194  * However, this requires some #ifdefs, and has a more dispersed impact.
195  * Meanwhile, renaming can be achieved in a single place.
196  */
197 # define XXH_IPREF(Id) XXH_NAMESPACE ## Id
198 # define XXH_OK XXH_IPREF(XXH_OK)
199 # define XXH_ERROR XXH_IPREF(XXH_ERROR)
200 # define XXH_errorcode XXH_IPREF(XXH_errorcode)
201 # define XXH32_canonical_t XXH_IPREF(XXH32_canonical_t)
202 # define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t)
203 # define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t)
204 # define XXH32_state_s XXH_IPREF(XXH32_state_s)
205 # define XXH32_state_t XXH_IPREF(XXH32_state_t)
206 # define XXH64_state_s XXH_IPREF(XXH64_state_s)
207 # define XXH64_state_t XXH_IPREF(XXH64_state_t)
208 # define XXH3_state_s XXH_IPREF(XXH3_state_s)
209 # define XXH3_state_t XXH_IPREF(XXH3_state_t)
210 # define XXH128_hash_t XXH_IPREF(XXH128_hash_t)
211  /* Ensure the header is parsed again, even if it was previously included */
212 # undef XXHASH_H_5627135585666179
213 # undef XXHASH_H_STATIC_13879238742
214 #endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */
215 
216 
217 
218 /* ****************************************************************
219  * Stable API
220  *****************************************************************/
221 #ifndef XXHASH_H_5627135585666179
222 #define XXHASH_H_5627135585666179 1
223 
224 
230 /* specific declaration modes for Windows */
231 #if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
232 # if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
233 # ifdef XXH_EXPORT
234 # define XXH_PUBLIC_API __declspec(dllexport)
235 # elif XXH_IMPORT
236 # define XXH_PUBLIC_API __declspec(dllimport)
237 # endif
238 # else
239 # define XXH_PUBLIC_API /* do nothing */
240 # endif
241 #endif
242 
243 #ifdef XXH_DOXYGEN
244 
257 # define XXH_NAMESPACE /* YOUR NAME HERE */
258 # undef XXH_NAMESPACE
259 #endif
260 
261 #ifdef XXH_NAMESPACE
262 # define XXH_CAT(A,B) A##B
263 # define XXH_NAME2(A,B) XXH_CAT(A,B)
264 # define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
265 /* XXH32 */
266 # define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
267 # define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
268 # define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
269 # define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
270 # define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
271 # define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
272 # define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
273 # define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
274 # define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
275 /* XXH64 */
276 # define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
277 # define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
278 # define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
279 # define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
280 # define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
281 # define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
282 # define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
283 # define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
284 # define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
285 /* XXH3_64bits */
286 # define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits)
287 # define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret)
288 # define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed)
289 # define XXH3_64bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed)
290 # define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState)
291 # define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState)
292 # define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState)
293 # define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset)
294 # define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed)
295 # define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret)
296 # define XXH3_64bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed)
297 # define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update)
298 # define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest)
299 # define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret)
300 # define XXH3_generateSecret_fromSeed XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed)
301 /* XXH3_128bits */
302 # define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128)
303 # define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits)
304 # define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed)
305 # define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret)
306 # define XXH3_128bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed)
307 # define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset)
308 # define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed)
309 # define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret)
310 # define XXH3_128bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed)
311 # define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update)
312 # define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest)
313 # define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual)
314 # define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp)
315 # define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash)
316 # define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical)
317 #endif
318 
319 
320 /* *************************************
321 * Version
322 ***************************************/
323 #define XXH_VERSION_MAJOR 0
324 #define XXH_VERSION_MINOR 8
325 #define XXH_VERSION_RELEASE 1
326 #define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
327 
336 XXH_PUBLIC_API unsigned XXH_versionNumber (void);
337 
338 
339 /* ****************************
340 * Common basic types
341 ******************************/
342 #include <stddef.h> /* size_t */
343 typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
344 
345 
346 /*-**********************************************************************
347 * 32-bit hash
348 ************************************************************************/
349 #if defined(XXH_DOXYGEN) /* Don't show <stdint.h> include */
350 
355 typedef uint32_t XXH32_hash_t;
356 
357 #elif !defined (__VMS) \
358  && (defined (__cplusplus) \
359  || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
360 # include <stdint.h>
361  typedef uint32_t XXH32_hash_t;
362 
363 #else
364 # include <limits.h>
365 # if UINT_MAX == 0xFFFFFFFFUL
366  typedef unsigned int XXH32_hash_t;
367 # else
368 # if ULONG_MAX == 0xFFFFFFFFUL
369  typedef unsigned long XXH32_hash_t;
370 # else
371 # error "unsupported platform: need a 32-bit type"
372 # endif
373 # endif
374 #endif
375 
415 XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed);
416 
473 typedef struct XXH32_state_s XXH32_state_t;
474 
498 XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state);
499 
513 XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, XXH32_hash_t seed);
514 
533 XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
534 
549 XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr);
550 
551 /******* Canonical representation *******/
552 
553 /*
554  * The default return values from XXH functions are unsigned 32 and 64 bit
555  * integers.
556  * This the simplest and fastest format for further post-processing.
557  *
558  * However, this leaves open the question of what is the order on the byte level,
559  * since little and big endian conventions will store the same number differently.
560  *
561  * The canonical representation settles this issue by mandating big-endian
562  * convention, the same convention as human-readable numbers (large digits first).
563  *
564  * When writing hash values to storage, sending them over a network, or printing
565  * them, it's highly recommended to use the canonical representation to ensure
566  * portability across a wider range of systems, present and future.
567  *
568  * The following functions allow transformation of hash values to and from
569  * canonical format.
570  */
571 
575 typedef struct {
576  unsigned char digest[4];
578 
589 
601 
602 
603 #ifdef __has_attribute
604 # define XXH_HAS_ATTRIBUTE(x) __has_attribute(x)
605 #else
606 # define XXH_HAS_ATTRIBUTE(x) 0
607 #endif
608 
609 /* C-language Attributes are added in C23. */
610 #if defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201710L) && defined(__has_c_attribute)
611 # define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x)
612 #else
613 # define XXH_HAS_C_ATTRIBUTE(x) 0
614 #endif
615 
616 #if defined(__cplusplus) && defined(__has_cpp_attribute)
617 # define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
618 #else
619 # define XXH_HAS_CPP_ATTRIBUTE(x) 0
620 #endif
621 
622 /*
623 Define XXH_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute
624 introduced in CPP17 and C23.
625 CPP17 : https://en.cppreference.com/w/cpp/language/attributes/fallthrough
626 C23 : https://en.cppreference.com/w/c/language/attributes/fallthrough
627 */
628 #if XXH_HAS_C_ATTRIBUTE(x)
629 # define XXH_FALLTHROUGH [[fallthrough]]
630 #elif XXH_HAS_CPP_ATTRIBUTE(x)
631 # define XXH_FALLTHROUGH [[fallthrough]]
632 #elif XXH_HAS_ATTRIBUTE(__fallthrough__)
633 # define XXH_FALLTHROUGH __attribute__ ((fallthrough))
634 #else
635 # define XXH_FALLTHROUGH
636 #endif
637 
644 #ifndef XXH_NO_LONG_LONG
645 /*-**********************************************************************
646 * 64-bit hash
647 ************************************************************************/
648 #if defined(XXH_DOXYGEN) /* don't include <stdint.h> */
649 
654 typedef uint64_t XXH64_hash_t;
655 #elif !defined (__VMS) \
656  && (defined (__cplusplus) \
657  || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
658 # include <stdint.h>
659  typedef uint64_t XXH64_hash_t;
660 #else
661 # include <limits.h>
662 # if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL
663  /* LP64 ABI says uint64_t is unsigned long */
664  typedef unsigned long XXH64_hash_t;
665 # else
666  /* the following type must have a width of 64-bit */
667  typedef unsigned long long XXH64_hash_t;
668 # endif
669 #endif
670 
710 
711 /******* Streaming *******/
717 typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
720 XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state);
721 
723 XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length);
725 
726 /******* Canonical representation *******/
727 typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t;
730 
776 /*-**********************************************************************
777 * XXH3 64-bit variant
778 ************************************************************************/
779 
780 /* XXH3_64bits():
781  * default 64-bit variant, using default secret and default seed of 0.
782  * It's the fastest variant. */
783 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* data, size_t len);
784 
785 /*
786  * XXH3_64bits_withSeed():
787  * This variant generates a custom secret on the fly
788  * based on default secret altered using the `seed` value.
789  * While this operation is decently fast, note that it's not completely free.
790  * Note: seed==0 produces the same results as XXH3_64bits().
791  */
793 
801 #define XXH3_SECRET_SIZE_MIN 136
802 
803 /*
804  * XXH3_64bits_withSecret():
805  * It's possible to provide any blob of bytes as a "secret" to generate the hash.
806  * This makes it more difficult for an external actor to prepare an intentional collision.
807  * The main condition is that secretSize *must* be large enough (>= XXH3_SECRET_SIZE_MIN).
808  * However, the quality of the secret impacts the dispersion of the hash algorithm.
809  * Therefore, the secret _must_ look like a bunch of random bytes.
810  * Avoid "trivial" or structured data such as repeated sequences or a text document.
811  * Whenever in doubt about the "randomness" of the blob of bytes,
812  * consider employing "XXH3_generateSecret()" instead (see below).
813  * It will generate a proper high entropy secret derived from the blob of bytes.
814  * Another advantage of using XXH3_generateSecret() is that
815  * it guarantees that all bits within the initial blob of bytes
816  * will impact every bit of the output.
817  * This is not necessarily the case when using the blob of bytes directly
818  * because, when hashing _small_ inputs, only a portion of the secret is employed.
819  */
820 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize);
821 
822 
823 /******* Streaming *******/
824 /*
825  * Streaming requires state maintenance.
826  * This operation costs memory and CPU.
827  * As a consequence, streaming is slower than one-shot hashing.
828  * For better performance, prefer one-shot functions whenever applicable.
829  */
830 
836 typedef struct XXH3_state_s XXH3_state_t;
839 XXH_PUBLIC_API void XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state);
840 
841 /*
842  * XXH3_64bits_reset():
843  * Initialize with default parameters.
844  * digest will be equivalent to `XXH3_64bits()`.
845  */
847 /*
848  * XXH3_64bits_reset_withSeed():
849  * Generate a custom secret from `seed`, and store it into `statePtr`.
850  * digest will be equivalent to `XXH3_64bits_withSeed()`.
851  */
853 /*
854  * XXH3_64bits_reset_withSecret():
855  * `secret` is referenced, it _must outlive_ the hash streaming session.
856  * Similar to one-shot API, `secretSize` must be >= `XXH3_SECRET_SIZE_MIN`,
857  * and the quality of produced hash values depends on secret's entropy
858  * (secret's content should look like a bunch of random bytes).
859  * When in doubt about the randomness of a candidate `secret`,
860  * consider employing `XXH3_generateSecret()` instead (see below).
861  */
862 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize);
863 
866 
867 /* note : canonical representation of XXH3 is the same as XXH64
868  * since they both produce XXH64_hash_t values */
869 
870 
871 /*-**********************************************************************
872 * XXH3 128-bit variant
873 ************************************************************************/
874 
881 typedef struct {
884 } XXH128_hash_t;
885 
886 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* data, size_t len);
888 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize);
889 
890 /******* Streaming *******/
891 /*
892  * Streaming requires state maintenance.
893  * This operation costs memory and CPU.
894  * As a consequence, streaming is slower than one-shot hashing.
895  * For better performance, prefer one-shot functions whenever applicable.
896  *
897  * XXH3_128bits uses the same XXH3_state_t as XXH3_64bits().
898  * Use already declared XXH3_createState() and XXH3_freeState().
899  *
900  * All reset and streaming functions have same meaning as their 64-bit counterpart.
901  */
902 
905 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize);
906 
909 
910 /* Following helper functions make it possible to compare XXH128_hast_t values.
911  * Since XXH128_hash_t is a structure, this capability is not offered by the language.
912  * Note: For better performance, these functions can be inlined using XXH_INLINE_ALL */
913 
919 
929 XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2);
930 
931 
932 /******* Canonical representation *******/
933 typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical_t;
936 
937 
938 #endif /* XXH_NO_LONG_LONG */
939 
943 #endif /* XXHASH_H_5627135585666179 */
944 
945 
946 
947 #if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742)
948 #define XXHASH_H_STATIC_13879238742
949 /* ****************************************************************************
950  * This section contains declarations which are not guaranteed to remain stable.
951  * They may change in future versions, becoming incompatible with a different
952  * version of the library.
953  * These declarations should only be used with static linking.
954  * Never use them in association with dynamic linking!
955  ***************************************************************************** */
956 
957 /*
958  * These definitions are only present to allow static allocation
959  * of XXH states, on stack or in a struct, for example.
960  * Never **ever** access their members directly.
961  */
962 
975 struct XXH32_state_s {
976  XXH32_hash_t total_len_32;
977  XXH32_hash_t large_len;
978  XXH32_hash_t v[4];
979  XXH32_hash_t mem32[4];
980  XXH32_hash_t memsize;
981  XXH32_hash_t reserved;
982 }; /* typedef'd to XXH32_state_t */
983 
984 
985 #ifndef XXH_NO_LONG_LONG /* defined when there is no 64-bit support */
986 
999 struct XXH64_state_s {
1000  XXH64_hash_t total_len;
1001  XXH64_hash_t v[4];
1002  XXH64_hash_t mem64[4];
1003  XXH32_hash_t memsize;
1004  XXH32_hash_t reserved32;
1005  XXH64_hash_t reserved64;
1006 }; /* typedef'd to XXH64_state_t */
1007 
1008 #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* >= C11 */
1009 # include <stdalign.h>
1010 # define XXH_ALIGN(n) alignas(n)
1011 #elif defined(__cplusplus) && (__cplusplus >= 201103L) /* >= C++11 */
1012 /* In C++ alignas() is a keyword */
1013 # define XXH_ALIGN(n) alignas(n)
1014 #elif defined(__GNUC__)
1015 # define XXH_ALIGN(n) __attribute__ ((aligned(n)))
1016 #elif defined(_MSC_VER)
1017 # define XXH_ALIGN(n) __declspec(align(n))
1018 #else
1019 # define XXH_ALIGN(n) /* disabled */
1020 #endif
1021 
1022 /* Old GCC versions only accept the attribute after the type in structures. */
1023 #if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) /* C11+ */ \
1024  && ! (defined(__cplusplus) && (__cplusplus >= 201103L)) /* >= C++11 */ \
1025  && defined(__GNUC__)
1026 # define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align)
1027 #else
1028 # define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type
1029 #endif
1030 
1038 #define XXH3_INTERNALBUFFER_SIZE 256
1039 
1047 #define XXH3_SECRET_DEFAULT_SIZE 192
1048 
1071 struct XXH3_state_s {
1072  XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]);
1074  XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]);
1076  XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]);
1078  XXH32_hash_t bufferedSize;
1080  XXH32_hash_t useSeed;
1082  size_t nbStripesSoFar;
1084  XXH64_hash_t totalLen;
1086  size_t nbStripesPerBlock;
1088  size_t secretLimit;
1092  XXH64_hash_t reserved64;
1094  const unsigned char* extSecret;
1097  /* note: there may be some padding at the end due to alignment on 64 bytes */
1098 }; /* typedef'd to XXH3_state_t */
1099 
1100 #undef XXH_ALIGN_MEMBER
1101 
1113 #define XXH3_INITSTATE(XXH3_state_ptr) { (XXH3_state_ptr)->seed = 0; }
1114 
1115 
1116 /* XXH128() :
1117  * simple alias to pre-selected XXH3_128bits variant
1118  */
1119 XXH_PUBLIC_API XXH128_hash_t XXH128(const void* data, size_t len, XXH64_hash_t seed);
1120 
1121 
1122 /* === Experimental API === */
1123 /* Symbols defined below must be considered tied to a specific library version. */
1124 
1125 /*
1126  * XXH3_generateSecret():
1127  *
1128  * Derive a high-entropy secret from any user-defined content, named customSeed.
1129  * The generated secret can be used in combination with `*_withSecret()` functions.
1130  * The `_withSecret()` variants are useful to provide a higher level of protection than 64-bit seed,
1131  * as it becomes much more difficult for an external actor to guess how to impact the calculation logic.
1132  *
1133  * The function accepts as input a custom seed of any length and any content,
1134  * and derives from it a high-entropy secret of length @secretSize
1135  * into an already allocated buffer @secretBuffer.
1136  * @secretSize must be >= XXH3_SECRET_SIZE_MIN
1137  *
1138  * The generated secret can then be used with any `*_withSecret()` variant.
1139  * Functions `XXH3_128bits_withSecret()`, `XXH3_64bits_withSecret()`,
1140  * `XXH3_128bits_reset_withSecret()` and `XXH3_64bits_reset_withSecret()`
1141  * are part of this list. They all accept a `secret` parameter
1142  * which must be large enough for implementation reasons (>= XXH3_SECRET_SIZE_MIN)
1143  * _and_ feature very high entropy (consist of random-looking bytes).
1144  * These conditions can be a high bar to meet, so
1145  * XXH3_generateSecret() can be employed to ensure proper quality.
1146  *
1147  * customSeed can be anything. It can have any size, even small ones,
1148  * and its content can be anything, even "poor entropy" sources such as a bunch of zeroes.
1149  * The resulting `secret` will nonetheless provide all required qualities.
1150  *
1151  * When customSeedSize > 0, supplying NULL as customSeed is undefined behavior.
1152  */
1153 XXH_PUBLIC_API XXH_errorcode XXH3_generateSecret(void* secretBuffer, size_t secretSize, const void* customSeed, size_t customSeedSize);
1154 
1155 
1156 /*
1157  * XXH3_generateSecret_fromSeed():
1158  *
1159  * Generate the same secret as the _withSeed() variants.
1160  *
1161  * The resulting secret has a length of XXH3_SECRET_DEFAULT_SIZE (necessarily).
1162  * @secretBuffer must be already allocated, of size at least XXH3_SECRET_DEFAULT_SIZE bytes.
1163  *
1164  * The generated secret can be used in combination with
1165  *`*_withSecret()` and `_withSecretandSeed()` variants.
1166  * This generator is notably useful in combination with `_withSecretandSeed()`,
1167  * as a way to emulate a faster `_withSeed()` variant.
1168  */
1169 XXH_PUBLIC_API void XXH3_generateSecret_fromSeed(void* secretBuffer, XXH64_hash_t seed);
1170 
1171 /*
1172  * *_withSecretandSeed() :
1173  * These variants generate hash values using either
1174  * @seed for "short" keys (< XXH3_MIDSIZE_MAX = 240 bytes)
1175  * or @secret for "large" keys (>= XXH3_MIDSIZE_MAX).
1176  *
1177  * This generally benefits speed, compared to `_withSeed()` or `_withSecret()`.
1178  * `_withSeed()` has to generate the secret on the fly for "large" keys.
1179  * It's fast, but can be perceptible for "not so large" keys (< 1 KB).
1180  * `_withSecret()` has to generate the masks on the fly for "small" keys,
1181  * which requires more instructions than _withSeed() variants.
1182  * Therefore, _withSecretandSeed variant combines the best of both worlds.
1183  *
1184  * When @secret has been generated by XXH3_generateSecret_fromSeed(),
1185  * this variant produces *exactly* the same results as `_withSeed()` variant,
1186  * hence offering only a pure speed benefit on "large" input,
1187  * by skipping the need to regenerate the secret for every large input.
1188  *
1189  * Another usage scenario is to hash the secret to a 64-bit hash value,
1190  * for example with XXH3_64bits(), which then becomes the seed,
1191  * and then employ both the seed and the secret in _withSecretandSeed().
1192  * On top of speed, an added benefit is that each bit in the secret
1193  * has a 50% chance to swap each bit in the output,
1194  * via its impact to the seed.
1195  * This is not guaranteed when using the secret directly in "small data" scenarios,
1196  * because only portions of the secret are employed for small data.
1197  */
1199 XXH3_64bits_withSecretandSeed(const void* data, size_t len,
1200  const void* secret, size_t secretSize,
1201  XXH64_hash_t seed);
1202 
1204 XXH3_128bits_withSecretandSeed(const void* data, size_t len,
1205  const void* secret, size_t secretSize,
1206  XXH64_hash_t seed64);
1207 
1209 XXH3_64bits_reset_withSecretandSeed(XXH3_state_t* statePtr,
1210  const void* secret, size_t secretSize,
1211  XXH64_hash_t seed64);
1212 
1214 XXH3_128bits_reset_withSecretandSeed(XXH3_state_t* statePtr,
1215  const void* secret, size_t secretSize,
1216  XXH64_hash_t seed64);
1217 
1218 
1219 #endif /* XXH_NO_LONG_LONG */
1220 #if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
1221 # define XXH_IMPLEMENTATION
1222 #endif
1223 
1224 #endif /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */
1225 
1226 
1227 /* ======================================================================== */
1228 /* ======================================================================== */
1229 /* ======================================================================== */
1230 
1231 
1232 /*-**********************************************************************
1233  * xxHash implementation
1234  *-**********************************************************************
1235  * xxHash's implementation used to be hosted inside xxhash.c.
1236  *
1237  * However, inlining requires implementation to be visible to the compiler,
1238  * hence be included alongside the header.
1239  * Previously, implementation was hosted inside xxhash.c,
1240  * which was then #included when inlining was activated.
1241  * This construction created issues with a few build and install systems,
1242  * as it required xxhash.c to be stored in /include directory.
1243  *
1244  * xxHash implementation is now directly integrated within xxhash.h.
1245  * As a consequence, xxhash.c is no longer needed in /include.
1246  *
1247  * xxhash.c is still available and is still useful.
1248  * In a "normal" setup, when xxhash is not inlined,
1249  * xxhash.h only exposes the prototypes and public symbols,
1250  * while xxhash.c can be built into an object file xxhash.o
1251  * which can then be linked into the final binary.
1252  ************************************************************************/
1253 
1254 #if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \
1255  || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387)
1256 # define XXH_IMPLEM_13a8737387
1257 
1258 /* *************************************
1259 * Tuning parameters
1260 ***************************************/
1261 
1268 #ifdef XXH_DOXYGEN
1269 
1274 # define XXH_NO_LONG_LONG
1275 # undef XXH_NO_LONG_LONG /* don't actually */
1276 
1326 # define XXH_FORCE_MEMORY_ACCESS 0
1327 
1354 # define XXH_FORCE_ALIGN_CHECK 0
1355 
1376 # define XXH_NO_INLINE_HINTS 0
1377 
1388 # define XXH32_ENDJMP 0
1389 
1397 # define XXH_OLD_NAMES
1398 # undef XXH_OLD_NAMES /* don't actually use, it is ugly. */
1399 #endif /* XXH_DOXYGEN */
1400 
1404 #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
1405  /* prefer __packed__ structures (method 1) for gcc on armv7+ and mips */
1406 # if !defined(__clang__) && \
1407 ( \
1408  (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \
1409  ( \
1410  defined(__GNUC__) && ( \
1411  (defined(__ARM_ARCH) && __ARM_ARCH >= 7) || \
1412  ( \
1413  defined(__mips__) && \
1414  (__mips <= 5 || __mips_isa_rev < 6) && \
1415  (!defined(__mips16) || defined(__mips_mips16e2)) \
1416  ) \
1417  ) \
1418  ) \
1419 )
1420 # define XXH_FORCE_MEMORY_ACCESS 1
1421 # endif
1422 #endif
1423 
1424 #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
1425 # if defined(__i386) || defined(__x86_64__) || defined(__aarch64__) \
1426  || defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) /* visual */
1427 # define XXH_FORCE_ALIGN_CHECK 0
1428 # else
1429 # define XXH_FORCE_ALIGN_CHECK 1
1430 # endif
1431 #endif
1432 
1433 #ifndef XXH_NO_INLINE_HINTS
1434 # if defined(__OPTIMIZE_SIZE__) /* -Os, -Oz */ \
1435  || defined(__NO_INLINE__) /* -O0, -fno-inline */
1436 # define XXH_NO_INLINE_HINTS 1
1437 # else
1438 # define XXH_NO_INLINE_HINTS 0
1439 # endif
1440 #endif
1441 
1442 #ifndef XXH32_ENDJMP
1443 /* generally preferable for performance */
1444 # define XXH32_ENDJMP 0
1445 #endif
1446 
1453 /* *************************************
1454 * Includes & Memory related functions
1455 ***************************************/
1456 /*
1457  * Modify the local functions below should you wish to use
1458  * different memory routines for malloc() and free()
1459  */
1460 #include <stdlib.h>
1461 
1466 static void* XXH_malloc(size_t s) { return malloc(s); }
1467 
1472 static void XXH_free(void* p) { free(p); }
1473 
1474 #include <string.h>
1475 
1480 static void* XXH_memcpy(void* dest, const void* src, size_t size)
1481 {
1482  return memcpy(dest,src,size);
1483 }
1484 
1485 #include <limits.h> /* ULLONG_MAX */
1486 
1487 
1488 /* *************************************
1489 * Compiler Specific Options
1490 ***************************************/
1491 #ifdef _MSC_VER /* Visual Studio warning fix */
1492 # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
1493 #endif
1494 
1495 #if XXH_NO_INLINE_HINTS /* disable inlining hints */
1496 # if defined(__GNUC__) || defined(__clang__)
1497 # define XXH_FORCE_INLINE static __attribute__((unused))
1498 # else
1499 # define XXH_FORCE_INLINE static
1500 # endif
1501 # define XXH_NO_INLINE static
1502 /* enable inlining hints */
1503 #elif defined(__GNUC__) || defined(__clang__)
1504 # define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused))
1505 # define XXH_NO_INLINE static __attribute__((noinline))
1506 #elif defined(_MSC_VER) /* Visual Studio */
1507 # define XXH_FORCE_INLINE static __forceinline
1508 # define XXH_NO_INLINE static __declspec(noinline)
1509 #elif defined (__cplusplus) \
1510  || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* C99 */
1511 # define XXH_FORCE_INLINE static inline
1512 # define XXH_NO_INLINE static
1513 #else
1514 # define XXH_FORCE_INLINE static
1515 # define XXH_NO_INLINE static
1516 #endif
1517 
1518 
1519 
1520 /* *************************************
1521 * Debug
1522 ***************************************/
1531 #ifndef XXH_DEBUGLEVEL
1532 # ifdef DEBUGLEVEL /* backwards compat */
1533 # define XXH_DEBUGLEVEL DEBUGLEVEL
1534 # else
1535 # define XXH_DEBUGLEVEL 0
1536 # endif
1537 #endif
1538 
1539 #if (XXH_DEBUGLEVEL>=1)
1540 # include <assert.h> /* note: can still be disabled with NDEBUG */
1541 # define XXH_ASSERT(c) assert(c)
1542 #else
1543 # define XXH_ASSERT(c) ((void)0)
1544 #endif
1545 
1546 /* note: use after variable declarations */
1547 #ifndef XXH_STATIC_ASSERT
1548 # if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */
1549 # include <assert.h>
1550 # define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0)
1551 # elif defined(__cplusplus) && (__cplusplus >= 201103L) /* C++11 */
1552 # define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0)
1553 # else
1554 # define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { struct xxh_sa { char x[(c) ? 1 : -1]; }; } while(0)
1555 # endif
1556 # define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c),#c)
1557 #endif
1558 
1575 #if defined(__GNUC__) || defined(__clang__)
1576 # define XXH_COMPILER_GUARD(var) __asm__ __volatile__("" : "+r" (var))
1577 #else
1578 # define XXH_COMPILER_GUARD(var) ((void)0)
1579 #endif
1580 
1581 /* *************************************
1582 * Basic Types
1583 ***************************************/
1584 #if !defined (__VMS) \
1585  && (defined (__cplusplus) \
1586  || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
1587 # include <stdint.h>
1588  typedef uint8_t xxh_u8;
1589 #else
1590  typedef unsigned char xxh_u8;
1591 #endif
1592 typedef XXH32_hash_t xxh_u32;
1593 
1594 #ifdef XXH_OLD_NAMES
1595 # define BYTE xxh_u8
1596 # define U8 xxh_u8
1597 # define U32 xxh_u32
1598 #endif
1599 
1600 /* *** Memory access *** */
1601 
1652 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
1653 /*
1654  * Manual byteshift. Best for old compilers which don't inline memcpy.
1655  * We actually directly use XXH_readLE32 and XXH_readBE32.
1656  */
1657 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
1658 
1659 /*
1660  * Force direct memory access. Only works on CPU which support unaligned memory
1661  * access in hardware.
1662  */
1663 static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; }
1664 
1665 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
1666 
1667 /*
1668  * __pack instructions are safer but compiler specific, hence potentially
1669  * problematic for some compilers.
1670  *
1671  * Currently only defined for GCC and ICC.
1672  */
1673 #ifdef XXH_OLD_NAMES
1674 typedef union { xxh_u32 u32; } __attribute__((packed)) unalign;
1675 #endif
1676 static xxh_u32 XXH_read32(const void* ptr)
1677 {
1678  typedef union { xxh_u32 u32; } __attribute__((packed)) xxh_unalign;
1679  return ((const xxh_unalign*)ptr)->u32;
1680 }
1681 
1682 #else
1683 
1684 /*
1685  * Portable and safe solution. Generally efficient.
1686  * see: http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
1687  */
1688 static xxh_u32 XXH_read32(const void* memPtr)
1689 {
1690  xxh_u32 val;
1691  XXH_memcpy(&val, memPtr, sizeof(val));
1692  return val;
1693 }
1694 
1695 #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
1696 
1697 
1698 /* *** Endianness *** */
1699 
1716 #ifndef XXH_CPU_LITTLE_ENDIAN
1717 /*
1718  * Try to detect endianness automatically, to avoid the nonstandard behavior
1719  * in `XXH_isLittleEndian()`
1720  */
1721 # if defined(_WIN32) /* Windows is always little endian */ \
1722  || defined(__LITTLE_ENDIAN__) \
1723  || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
1724 # define XXH_CPU_LITTLE_ENDIAN 1
1725 # elif defined(__BIG_ENDIAN__) \
1726  || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
1727 # define XXH_CPU_LITTLE_ENDIAN 0
1728 # else
1729 
1735 static int XXH_isLittleEndian(void)
1736 {
1737  /*
1738  * Portable and well-defined behavior.
1739  * Don't use static: it is detrimental to performance.
1740  */
1741  const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 };
1742  return one.c[0];
1743 }
1744 # define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian()
1745 # endif
1746 #endif
1747 
1748 
1749 
1750 
1751 /* ****************************************
1752 * Compiler-specific Functions and Macros
1753 ******************************************/
1754 #define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
1755 
1756 #ifdef __has_builtin
1757 # define XXH_HAS_BUILTIN(x) __has_builtin(x)
1758 #else
1759 # define XXH_HAS_BUILTIN(x) 0
1760 #endif
1761 
1775 #if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \
1776  && XXH_HAS_BUILTIN(__builtin_rotateleft64)
1777 # define XXH_rotl32 __builtin_rotateleft32
1778 # define XXH_rotl64 __builtin_rotateleft64
1779 /* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */
1780 #elif defined(_MSC_VER)
1781 # define XXH_rotl32(x,r) _rotl(x,r)
1782 # define XXH_rotl64(x,r) _rotl64(x,r)
1783 #else
1784 # define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
1785 # define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r))))
1786 #endif
1787 
1796 #if defined(_MSC_VER) /* Visual Studio */
1797 # define XXH_swap32 _byteswap_ulong
1798 #elif XXH_GCC_VERSION >= 403
1799 # define XXH_swap32 __builtin_bswap32
1800 #else
1801 static xxh_u32 XXH_swap32 (xxh_u32 x)
1802 {
1803  return ((x << 24) & 0xff000000 ) |
1804  ((x << 8) & 0x00ff0000 ) |
1805  ((x >> 8) & 0x0000ff00 ) |
1806  ((x >> 24) & 0x000000ff );
1807 }
1808 #endif
1809 
1810 
1811 /* ***************************
1812 * Memory reads
1813 *****************************/
1814 
1819 typedef enum {
1820  XXH_aligned,
1821  XXH_unaligned
1822 } XXH_alignment;
1823 
1824 /*
1825  * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load.
1826  *
1827  * This is ideal for older compilers which don't inline memcpy.
1828  */
1829 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
1830 
1831 XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* memPtr)
1832 {
1833  const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
1834  return bytePtr[0]
1835  | ((xxh_u32)bytePtr[1] << 8)
1836  | ((xxh_u32)bytePtr[2] << 16)
1837  | ((xxh_u32)bytePtr[3] << 24);
1838 }
1839 
1840 XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void* memPtr)
1841 {
1842  const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
1843  return bytePtr[3]
1844  | ((xxh_u32)bytePtr[2] << 8)
1845  | ((xxh_u32)bytePtr[1] << 16)
1846  | ((xxh_u32)bytePtr[0] << 24);
1847 }
1848 
1849 #else
1850 XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr)
1851 {
1852  return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
1853 }
1854 
1855 static xxh_u32 XXH_readBE32(const void* ptr)
1856 {
1857  return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
1858 }
1859 #endif
1860 
1861 XXH_FORCE_INLINE xxh_u32
1862 XXH_readLE32_align(const void* ptr, XXH_alignment align)
1863 {
1864  if (align==XXH_unaligned) {
1865  return XXH_readLE32(ptr);
1866  } else {
1867  return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr);
1868  }
1869 }
1870 
1871 
1872 /* *************************************
1873 * Misc
1874 ***************************************/
1876 XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
1877 
1878 
1879 /* *******************************************************************
1880 * 32-bit hash functions
1881 *********************************************************************/
1888  /* #define instead of static const, to be used as initializers */
1889 #define XXH_PRIME32_1 0x9E3779B1U
1890 #define XXH_PRIME32_2 0x85EBCA77U
1891 #define XXH_PRIME32_3 0xC2B2AE3DU
1892 #define XXH_PRIME32_4 0x27D4EB2FU
1893 #define XXH_PRIME32_5 0x165667B1U
1895 #ifdef XXH_OLD_NAMES
1896 # define PRIME32_1 XXH_PRIME32_1
1897 # define PRIME32_2 XXH_PRIME32_2
1898 # define PRIME32_3 XXH_PRIME32_3
1899 # define PRIME32_4 XXH_PRIME32_4
1900 # define PRIME32_5 XXH_PRIME32_5
1901 #endif
1902 
1914 static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input)
1915 {
1916  acc += input * XXH_PRIME32_2;
1917  acc = XXH_rotl32(acc, 13);
1918  acc *= XXH_PRIME32_1;
1919 #if (defined(__SSE4_1__) || defined(__aarch64__)) && !defined(XXH_ENABLE_AUTOVECTORIZE)
1920  /*
1921  * UGLY HACK:
1922  * A compiler fence is the only thing that prevents GCC and Clang from
1923  * autovectorizing the XXH32 loop (pragmas and attributes don't work for some
1924  * reason) without globally disabling SSE4.1.
1925  *
1926  * The reason we want to avoid vectorization is because despite working on
1927  * 4 integers at a time, there are multiple factors slowing XXH32 down on
1928  * SSE4:
1929  * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on
1930  * newer chips!) making it slightly slower to multiply four integers at
1931  * once compared to four integers independently. Even when pmulld was
1932  * fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE
1933  * just to multiply unless doing a long operation.
1934  *
1935  * - Four instructions are required to rotate,
1936  * movqda tmp, v // not required with VEX encoding
1937  * pslld tmp, 13 // tmp <<= 13
1938  * psrld v, 19 // x >>= 19
1939  * por v, tmp // x |= tmp
1940  * compared to one for scalar:
1941  * roll v, 13 // reliably fast across the board
1942  * shldl v, v, 13 // Sandy Bridge and later prefer this for some reason
1943  *
1944  * - Instruction level parallelism is actually more beneficial here because
1945  * the SIMD actually serializes this operation: While v1 is rotating, v2
1946  * can load data, while v3 can multiply. SSE forces them to operate
1947  * together.
1948  *
1949  * This is also enabled on AArch64, as Clang autovectorizes it incorrectly
1950  * and it is pointless writing a NEON implementation that is basically the
1951  * same speed as scalar for XXH32.
1952  */
1953  XXH_COMPILER_GUARD(acc);
1954 #endif
1955  return acc;
1956 }
1957 
1968 static xxh_u32 XXH32_avalanche(xxh_u32 h32)
1969 {
1970  h32 ^= h32 >> 15;
1971  h32 *= XXH_PRIME32_2;
1972  h32 ^= h32 >> 13;
1973  h32 *= XXH_PRIME32_3;
1974  h32 ^= h32 >> 16;
1975  return(h32);
1976 }
1977 
1978 #define XXH_get32bits(p) XXH_readLE32_align(p, align)
1979 
1994 static xxh_u32
1995 XXH32_finalize(xxh_u32 h32, const xxh_u8* ptr, size_t len, XXH_alignment align)
1996 {
1997 #define XXH_PROCESS1 do { \
1998  h32 += (*ptr++) * XXH_PRIME32_5; \
1999  h32 = XXH_rotl32(h32, 11) * XXH_PRIME32_1; \
2000 } while (0)
2001 
2002 #define XXH_PROCESS4 do { \
2003  h32 += XXH_get32bits(ptr) * XXH_PRIME32_3; \
2004  ptr += 4; \
2005  h32 = XXH_rotl32(h32, 17) * XXH_PRIME32_4; \
2006 } while (0)
2007 
2008  if (ptr==NULL) XXH_ASSERT(len == 0);
2009 
2010  /* Compact rerolled version; generally faster */
2011  if (!XXH32_ENDJMP) {
2012  len &= 15;
2013  while (len >= 4) {
2014  XXH_PROCESS4;
2015  len -= 4;
2016  }
2017  while (len > 0) {
2018  XXH_PROCESS1;
2019  --len;
2020  }
2021  return XXH32_avalanche(h32);
2022  } else {
2023  switch(len&15) /* or switch(bEnd - p) */ {
2024  case 12: XXH_PROCESS4;
2026  case 8: XXH_PROCESS4;
2028  case 4: XXH_PROCESS4;
2029  return XXH32_avalanche(h32);
2030 
2031  case 13: XXH_PROCESS4;
2033  case 9: XXH_PROCESS4;
2035  case 5: XXH_PROCESS4;
2036  XXH_PROCESS1;
2037  return XXH32_avalanche(h32);
2038 
2039  case 14: XXH_PROCESS4;
2041  case 10: XXH_PROCESS4;
2043  case 6: XXH_PROCESS4;
2044  XXH_PROCESS1;
2045  XXH_PROCESS1;
2046  return XXH32_avalanche(h32);
2047 
2048  case 15: XXH_PROCESS4;
2050  case 11: XXH_PROCESS4;
2052  case 7: XXH_PROCESS4;
2054  case 3: XXH_PROCESS1;
2056  case 2: XXH_PROCESS1;
2058  case 1: XXH_PROCESS1;
2060  case 0: return XXH32_avalanche(h32);
2061  }
2062  XXH_ASSERT(0);
2063  return h32; /* reaching this point is deemed impossible */
2064  }
2065 }
2066 
2067 #ifdef XXH_OLD_NAMES
2068 # define PROCESS1 XXH_PROCESS1
2069 # define PROCESS4 XXH_PROCESS4
2070 #else
2071 # undef XXH_PROCESS1
2072 # undef XXH_PROCESS4
2073 #endif
2074 
2083 XXH_FORCE_INLINE xxh_u32
2084 XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align)
2085 {
2086  xxh_u32 h32;
2087 
2088  if (input==NULL) XXH_ASSERT(len == 0);
2089 
2090  if (len>=16) {
2091  const xxh_u8* const bEnd = input + len;
2092  const xxh_u8* const limit = bEnd - 15;
2093  xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
2094  xxh_u32 v2 = seed + XXH_PRIME32_2;
2095  xxh_u32 v3 = seed + 0;
2096  xxh_u32 v4 = seed - XXH_PRIME32_1;
2097 
2098  do {
2099  v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4;
2100  v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4;
2101  v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4;
2102  v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4;
2103  } while (input < limit);
2104 
2105  h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7)
2106  + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
2107  } else {
2108  h32 = seed + XXH_PRIME32_5;
2109  }
2110 
2111  h32 += (xxh_u32)len;
2112 
2113  return XXH32_finalize(h32, input, len&15, align);
2114 }
2115 
2117 XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed)
2118 {
2119 #if 0
2120  /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
2122  XXH32_reset(&state, seed);
2123  XXH32_update(&state, (const xxh_u8*)input, len);
2124  return XXH32_digest(&state);
2125 #else
2126  if (XXH_FORCE_ALIGN_CHECK) {
2127  if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
2128  return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
2129  } }
2130 
2131  return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
2132 #endif
2133 }
2134 
2135 
2136 
2137 /******* Hash streaming *******/
2142 {
2143  return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
2144 }
2147 {
2148  XXH_free(statePtr);
2149  return XXH_OK;
2150 }
2151 
2153 XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState)
2154 {
2155  XXH_memcpy(dstState, srcState, sizeof(*dstState));
2156 }
2157 
2160 {
2161  XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
2162  memset(&state, 0, sizeof(state));
2163  state.v[0] = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
2164  state.v[1] = seed + XXH_PRIME32_2;
2165  state.v[2] = seed + 0;
2166  state.v[3] = seed - XXH_PRIME32_1;
2167  /* do not write into reserved, planned to be removed in a future version */
2168  XXH_memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));
2169  return XXH_OK;
2170 }
2171 
2172 
2175 XXH32_update(XXH32_state_t* state, const void* input, size_t len)
2176 {
2177  if (input==NULL) {
2178  XXH_ASSERT(len == 0);
2179  return XXH_OK;
2180  }
2181 
2182  { const xxh_u8* p = (const xxh_u8*)input;
2183  const xxh_u8* const bEnd = p + len;
2184 
2185  state->total_len_32 += (XXH32_hash_t)len;
2186  state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16));
2187 
2188  if (state->memsize + len < 16) { /* fill in tmp buffer */
2189  XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len);
2190  state->memsize += (XXH32_hash_t)len;
2191  return XXH_OK;
2192  }
2193 
2194  if (state->memsize) { /* some data left from previous update */
2195  XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize);
2196  { const xxh_u32* p32 = state->mem32;
2197  state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p32)); p32++;
2198  state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p32)); p32++;
2199  state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p32)); p32++;
2200  state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p32));
2201  }
2202  p += 16-state->memsize;
2203  state->memsize = 0;
2204  }
2205 
2206  if (p <= bEnd-16) {
2207  const xxh_u8* const limit = bEnd - 16;
2208 
2209  do {
2210  state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p)); p+=4;
2211  state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p)); p+=4;
2212  state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p)); p+=4;
2213  state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p)); p+=4;
2214  } while (p<=limit);
2215 
2216  }
2217 
2218  if (p < bEnd) {
2219  XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
2220  state->memsize = (unsigned)(bEnd-p);
2221  }
2222  }
2223 
2224  return XXH_OK;
2225 }
2226 
2227 
2229 XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t* state)
2230 {
2231  xxh_u32 h32;
2232 
2233  if (state->large_len) {
2234  h32 = XXH_rotl32(state->v[0], 1)
2235  + XXH_rotl32(state->v[1], 7)
2236  + XXH_rotl32(state->v[2], 12)
2237  + XXH_rotl32(state->v[3], 18);
2238  } else {
2239  h32 = state->v[2] /* == seed */ + XXH_PRIME32_5;
2240  }
2241 
2242  h32 += state->total_len_32;
2243 
2244  return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned);
2245 }
2246 
2247 
2248 /******* Canonical representation *******/
2249 
2265 {
2266  XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
2267  if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
2268  XXH_memcpy(dst, &hash, sizeof(*dst));
2269 }
2272 {
2273  return XXH_readBE32(src);
2274 }
2275 
2276 
2277 #ifndef XXH_NO_LONG_LONG
2278 
2279 /* *******************************************************************
2280 * 64-bit hash functions
2281 *********************************************************************/
2287 /******* Memory access *******/
2288 
2289 typedef XXH64_hash_t xxh_u64;
2290 
2291 #ifdef XXH_OLD_NAMES
2292 # define U64 xxh_u64
2293 #endif
2294 
2295 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
2296 /*
2297  * Manual byteshift. Best for old compilers which don't inline memcpy.
2298  * We actually directly use XXH_readLE64 and XXH_readBE64.
2299  */
2300 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
2301 
2302 /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
2303 static xxh_u64 XXH_read64(const void* memPtr)
2304 {
2305  return *(const xxh_u64*) memPtr;
2306 }
2307 
2308 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
2309 
2310 /*
2311  * __pack instructions are safer, but compiler specific, hence potentially
2312  * problematic for some compilers.
2313  *
2314  * Currently only defined for GCC and ICC.
2315  */
2316 #ifdef XXH_OLD_NAMES
2317 typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64;
2318 #endif
2319 static xxh_u64 XXH_read64(const void* ptr)
2320 {
2321  typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) xxh_unalign64;
2322  return ((const xxh_unalign64*)ptr)->u64;
2323 }
2324 
2325 #else
2326 
2327 /*
2328  * Portable and safe solution. Generally efficient.
2329  * see: http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
2330  */
2331 static xxh_u64 XXH_read64(const void* memPtr)
2332 {
2333  xxh_u64 val;
2334  XXH_memcpy(&val, memPtr, sizeof(val));
2335  return val;
2336 }
2337 
2338 #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
2339 
2340 #if defined(_MSC_VER) /* Visual Studio */
2341 # define XXH_swap64 _byteswap_uint64
2342 #elif XXH_GCC_VERSION >= 403
2343 # define XXH_swap64 __builtin_bswap64
2344 #else
2345 static xxh_u64 XXH_swap64(xxh_u64 x)
2346 {
2347  return ((x << 56) & 0xff00000000000000ULL) |
2348  ((x << 40) & 0x00ff000000000000ULL) |
2349  ((x << 24) & 0x0000ff0000000000ULL) |
2350  ((x << 8) & 0x000000ff00000000ULL) |
2351  ((x >> 8) & 0x00000000ff000000ULL) |
2352  ((x >> 24) & 0x0000000000ff0000ULL) |
2353  ((x >> 40) & 0x000000000000ff00ULL) |
2354  ((x >> 56) & 0x00000000000000ffULL);
2355 }
2356 #endif
2357 
2358 
2359 /* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */
2360 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
2361 
2362 XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* memPtr)
2363 {
2364  const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
2365  return bytePtr[0]
2366  | ((xxh_u64)bytePtr[1] << 8)
2367  | ((xxh_u64)bytePtr[2] << 16)
2368  | ((xxh_u64)bytePtr[3] << 24)
2369  | ((xxh_u64)bytePtr[4] << 32)
2370  | ((xxh_u64)bytePtr[5] << 40)
2371  | ((xxh_u64)bytePtr[6] << 48)
2372  | ((xxh_u64)bytePtr[7] << 56);
2373 }
2374 
2375 XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void* memPtr)
2376 {
2377  const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
2378  return bytePtr[7]
2379  | ((xxh_u64)bytePtr[6] << 8)
2380  | ((xxh_u64)bytePtr[5] << 16)
2381  | ((xxh_u64)bytePtr[4] << 24)
2382  | ((xxh_u64)bytePtr[3] << 32)
2383  | ((xxh_u64)bytePtr[2] << 40)
2384  | ((xxh_u64)bytePtr[1] << 48)
2385  | ((xxh_u64)bytePtr[0] << 56);
2386 }
2387 
2388 #else
2389 XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr)
2390 {
2391  return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
2392 }
2393 
2394 static xxh_u64 XXH_readBE64(const void* ptr)
2395 {
2396  return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
2397 }
2398 #endif
2399 
2400 XXH_FORCE_INLINE xxh_u64
2401 XXH_readLE64_align(const void* ptr, XXH_alignment align)
2402 {
2403  if (align==XXH_unaligned)
2404  return XXH_readLE64(ptr);
2405  else
2406  return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr);
2407 }
2408 
2409 
2410 /******* xxh64 *******/
2417 /* #define rather that static const, to be used as initializers */
2418 #define XXH_PRIME64_1 0x9E3779B185EBCA87ULL
2419 #define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL
2420 #define XXH_PRIME64_3 0x165667B19E3779F9ULL
2421 #define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL
2422 #define XXH_PRIME64_5 0x27D4EB2F165667C5ULL
2424 #ifdef XXH_OLD_NAMES
2425 # define PRIME64_1 XXH_PRIME64_1
2426 # define PRIME64_2 XXH_PRIME64_2
2427 # define PRIME64_3 XXH_PRIME64_3
2428 # define PRIME64_4 XXH_PRIME64_4
2429 # define PRIME64_5 XXH_PRIME64_5
2430 #endif
2431 
2432 static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input)
2433 {
2434  acc += input * XXH_PRIME64_2;
2435  acc = XXH_rotl64(acc, 31);
2436  acc *= XXH_PRIME64_1;
2437  return acc;
2438 }
2439 
2440 static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val)
2441 {
2442  val = XXH64_round(0, val);
2443  acc ^= val;
2444  acc = acc * XXH_PRIME64_1 + XXH_PRIME64_4;
2445  return acc;
2446 }
2447 
2448 static xxh_u64 XXH64_avalanche(xxh_u64 h64)
2449 {
2450  h64 ^= h64 >> 33;
2451  h64 *= XXH_PRIME64_2;
2452  h64 ^= h64 >> 29;
2453  h64 *= XXH_PRIME64_3;
2454  h64 ^= h64 >> 32;
2455  return h64;
2456 }
2457 
2458 
2459 #define XXH_get64bits(p) XXH_readLE64_align(p, align)
2460 
2461 static xxh_u64
2462 XXH64_finalize(xxh_u64 h64, const xxh_u8* ptr, size_t len, XXH_alignment align)
2463 {
2464  if (ptr==NULL) XXH_ASSERT(len == 0);
2465  len &= 31;
2466  while (len >= 8) {
2467  xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr));
2468  ptr += 8;
2469  h64 ^= k1;
2470  h64 = XXH_rotl64(h64,27) * XXH_PRIME64_1 + XXH_PRIME64_4;
2471  len -= 8;
2472  }
2473  if (len >= 4) {
2474  h64 ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1;
2475  ptr += 4;
2476  h64 = XXH_rotl64(h64, 23) * XXH_PRIME64_2 + XXH_PRIME64_3;
2477  len -= 4;
2478  }
2479  while (len > 0) {
2480  h64 ^= (*ptr++) * XXH_PRIME64_5;
2481  h64 = XXH_rotl64(h64, 11) * XXH_PRIME64_1;
2482  --len;
2483  }
2484  return XXH64_avalanche(h64);
2485 }
2486 
2487 #ifdef XXH_OLD_NAMES
2488 # define PROCESS1_64 XXH_PROCESS1_64
2489 # define PROCESS4_64 XXH_PROCESS4_64
2490 # define PROCESS8_64 XXH_PROCESS8_64
2491 #else
2492 # undef XXH_PROCESS1_64
2493 # undef XXH_PROCESS4_64
2494 # undef XXH_PROCESS8_64
2495 #endif
2496 
2497 XXH_FORCE_INLINE xxh_u64
2498 XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align)
2499 {
2500  xxh_u64 h64;
2501  if (input==NULL) XXH_ASSERT(len == 0);
2502 
2503  if (len>=32) {
2504  const xxh_u8* const bEnd = input + len;
2505  const xxh_u8* const limit = bEnd - 31;
2506  xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
2507  xxh_u64 v2 = seed + XXH_PRIME64_2;
2508  xxh_u64 v3 = seed + 0;
2509  xxh_u64 v4 = seed - XXH_PRIME64_1;
2510 
2511  do {
2512  v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8;
2513  v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8;
2514  v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8;
2515  v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8;
2516  } while (input<limit);
2517 
2518  h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
2519  h64 = XXH64_mergeRound(h64, v1);
2520  h64 = XXH64_mergeRound(h64, v2);
2521  h64 = XXH64_mergeRound(h64, v3);
2522  h64 = XXH64_mergeRound(h64, v4);
2523 
2524  } else {
2525  h64 = seed + XXH_PRIME64_5;
2526  }
2527 
2528  h64 += (xxh_u64) len;
2529 
2530  return XXH64_finalize(h64, input, len, align);
2531 }
2532 
2533 
2536 {
2537 #if 0
2538  /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
2540  XXH64_reset(&state, seed);
2541  XXH64_update(&state, (const xxh_u8*)input, len);
2542  return XXH64_digest(&state);
2543 #else
2544  if (XXH_FORCE_ALIGN_CHECK) {
2545  if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
2546  return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
2547  } }
2548 
2549  return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
2550 
2551 #endif
2552 }
2553 
2554 /******* Hash Streaming *******/
2555 
2558 {
2559  return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
2560 }
2563 {
2564  XXH_free(statePtr);
2565  return XXH_OK;
2566 }
2567 
2569 XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState)
2570 {
2571  XXH_memcpy(dstState, srcState, sizeof(*dstState));
2572 }
2573 
2576 {
2577  XXH64_state_t state; /* use a local state to memcpy() in order to avoid strict-aliasing warnings */
2578  memset(&state, 0, sizeof(state));
2579  state.v[0] = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
2580  state.v[1] = seed + XXH_PRIME64_2;
2581  state.v[2] = seed + 0;
2582  state.v[3] = seed - XXH_PRIME64_1;
2583  /* do not write into reserved64, might be removed in a future version */
2584  XXH_memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved64));
2585  return XXH_OK;
2586 }
2587 
2590 XXH64_update (XXH64_state_t* state, const void* input, size_t len)
2591 {
2592  if (input==NULL) {
2593  XXH_ASSERT(len == 0);
2594  return XXH_OK;
2595  }
2596 
2597  { const xxh_u8* p = (const xxh_u8*)input;
2598  const xxh_u8* const bEnd = p + len;
2599 
2600  state->total_len += len;
2601 
2602  if (state->memsize + len < 32) { /* fill in tmp buffer */
2603  XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len);
2604  state->memsize += (xxh_u32)len;
2605  return XXH_OK;
2606  }
2607 
2608  if (state->memsize) { /* tmp buffer is full */
2609  XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize);
2610  state->v[0] = XXH64_round(state->v[0], XXH_readLE64(state->mem64+0));
2611  state->v[1] = XXH64_round(state->v[1], XXH_readLE64(state->mem64+1));
2612  state->v[2] = XXH64_round(state->v[2], XXH_readLE64(state->mem64+2));
2613  state->v[3] = XXH64_round(state->v[3], XXH_readLE64(state->mem64+3));
2614  p += 32 - state->memsize;
2615  state->memsize = 0;
2616  }
2617 
2618  if (p+32 <= bEnd) {
2619  const xxh_u8* const limit = bEnd - 32;
2620 
2621  do {
2622  state->v[0] = XXH64_round(state->v[0], XXH_readLE64(p)); p+=8;
2623  state->v[1] = XXH64_round(state->v[1], XXH_readLE64(p)); p+=8;
2624  state->v[2] = XXH64_round(state->v[2], XXH_readLE64(p)); p+=8;
2625  state->v[3] = XXH64_round(state->v[3], XXH_readLE64(p)); p+=8;
2626  } while (p<=limit);
2627 
2628  }
2629 
2630  if (p < bEnd) {
2631  XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
2632  state->memsize = (unsigned)(bEnd-p);
2633  }
2634  }
2635 
2636  return XXH_OK;
2637 }
2638 
2639 
2642 {
2643  xxh_u64 h64;
2644 
2645  if (state->total_len >= 32) {
2646  h64 = XXH_rotl64(state->v[0], 1) + XXH_rotl64(state->v[1], 7) + XXH_rotl64(state->v[2], 12) + XXH_rotl64(state->v[3], 18);
2647  h64 = XXH64_mergeRound(h64, state->v[0]);
2648  h64 = XXH64_mergeRound(h64, state->v[1]);
2649  h64 = XXH64_mergeRound(h64, state->v[2]);
2650  h64 = XXH64_mergeRound(h64, state->v[3]);
2651  } else {
2652  h64 = state->v[2] /*seed*/ + XXH_PRIME64_5;
2653  }
2654 
2655  h64 += (xxh_u64) state->total_len;
2656 
2657  return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned);
2658 }
2659 
2660 
2661 /******* Canonical representation *******/
2662 
2665 {
2666  XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
2667  if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
2668  XXH_memcpy(dst, &hash, sizeof(*dst));
2669 }
2670 
2673 {
2674  return XXH_readBE64(src);
2675 }
2676 
2677 #ifndef XXH_NO_XXH3
2678 
2679 /* *********************************************************************
2680 * XXH3
2681 * New generation hash designed for speed on small keys and vectorization
2682 ************************************************************************ */
2690 /* === Compiler specifics === */
2691 
2692 #if ((defined(sun) || defined(__sun)) && __cplusplus) /* Solaris includes __STDC_VERSION__ with C++. Tested with GCC 5.5 */
2693 # define XXH_RESTRICT /* disable */
2694 #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* >= C99 */
2695 # define XXH_RESTRICT restrict
2696 #else
2697 /* Note: it might be useful to define __restrict or __restrict__ for some C++ compilers */
2698 # define XXH_RESTRICT /* disable */
2699 #endif
2700 
2701 #if (defined(__GNUC__) && (__GNUC__ >= 3)) \
2702  || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) \
2703  || defined(__clang__)
2704 # define XXH_likely(x) __builtin_expect(x, 1)
2705 # define XXH_unlikely(x) __builtin_expect(x, 0)
2706 #else
2707 # define XXH_likely(x) (x)
2708 # define XXH_unlikely(x) (x)
2709 #endif
2710 
2711 #if defined(__GNUC__)
2712 # if defined(__AVX2__)
2713 # include <immintrin.h>
2714 # elif defined(__SSE2__)
2715 # include <emmintrin.h>
2716 # elif defined(__ARM_NEON__) || defined(__ARM_NEON)
2717 # define inline __inline__ /* circumvent a clang bug */
2718 # include <arm_neon.h>
2719 # undef inline
2720 # endif
2721 #elif defined(_MSC_VER)
2722 # include <intrin.h>
2723 #endif
2724 
2725 /*
2726  * One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while
2727  * remaining a true 64-bit/128-bit hash function.
2728  *
2729  * This is done by prioritizing a subset of 64-bit operations that can be
2730  * emulated without too many steps on the average 32-bit machine.
2731  *
2732  * For example, these two lines seem similar, and run equally fast on 64-bit:
2733  *
2734  * xxh_u64 x;
2735  * x ^= (x >> 47); // good
2736  * x ^= (x >> 13); // bad
2737  *
2738  * However, to a 32-bit machine, there is a major difference.
2739  *
2740  * x ^= (x >> 47) looks like this:
2741  *
2742  * x.lo ^= (x.hi >> (47 - 32));
2743  *
2744  * while x ^= (x >> 13) looks like this:
2745  *
2746  * // note: funnel shifts are not usually cheap.
2747  * x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13));
2748  * x.hi ^= (x.hi >> 13);
2749  *
2750  * The first one is significantly faster than the second, simply because the
2751  * shift is larger than 32. This means:
2752  * - All the bits we need are in the upper 32 bits, so we can ignore the lower
2753  * 32 bits in the shift.
2754  * - The shift result will always fit in the lower 32 bits, and therefore,
2755  * we can ignore the upper 32 bits in the xor.
2756  *
2757  * Thanks to this optimization, XXH3 only requires these features to be efficient:
2758  *
2759  * - Usable unaligned access
2760  * - A 32-bit or 64-bit ALU
2761  * - If 32-bit, a decent ADC instruction
2762  * - A 32 or 64-bit multiply with a 64-bit result
2763  * - For the 128-bit variant, a decent byteswap helps short inputs.
2764  *
2765  * The first two are already required by XXH32, and almost all 32-bit and 64-bit
2766  * platforms which can run XXH32 can run XXH3 efficiently.
2767  *
2768  * Thumb-1, the classic 16-bit only subset of ARM's instruction set, is one
2769  * notable exception.
2770  *
2771  * First of all, Thumb-1 lacks support for the UMULL instruction which
2772  * performs the important long multiply. This means numerous __aeabi_lmul
2773  * calls.
2774  *
2775  * Second of all, the 8 functional registers are just not enough.
2776  * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic need
2777  * Lo registers, and this shuffling results in thousands more MOVs than A32.
2778  *
2779  * A32 and T32 don't have this limitation. They can access all 14 registers,
2780  * do a 32->64 multiply with UMULL, and the flexible operand allowing free
2781  * shifts is helpful, too.
2782  *
2783  * Therefore, we do a quick sanity check.
2784  *
2785  * If compiling Thumb-1 for a target which supports ARM instructions, we will
2786  * emit a warning, as it is not a "sane" platform to compile for.
2787  *
2788  * Usually, if this happens, it is because of an accident and you probably need
2789  * to specify -march, as you likely meant to compile for a newer architecture.
2790  *
2791  * Credit: large sections of the vectorial and asm source code paths
2792  * have been contributed by @easyaspi314
2793  */
2794 #if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM)
2795 # warning "XXH3 is highly inefficient without ARM or Thumb-2."
2796 #endif
2797 
2798 /* ==========================================
2799  * Vectorization detection
2800  * ========================================== */
2801 
2802 #ifdef XXH_DOXYGEN
2803 
2813 # define XXH_VECTOR XXH_SCALAR
2814 
2823 enum XXH_VECTOR_TYPE /* fake enum */ {
2824  XXH_SCALAR = 0,
2825  XXH_SSE2 = 1,
2831  XXH_AVX2 = 2,
2832  XXH_AVX512 = 3,
2833  XXH_NEON = 4,
2834  XXH_VSX = 5,
2835 };
2845 # define XXH_ACC_ALIGN 8
2846 #endif
2847 
2848 /* Actual definition */
2849 #ifndef XXH_DOXYGEN
2850 # define XXH_SCALAR 0
2851 # define XXH_SSE2 1
2852 # define XXH_AVX2 2
2853 # define XXH_AVX512 3
2854 # define XXH_NEON 4
2855 # define XXH_VSX 5
2856 #endif
2857 
2858 #ifndef XXH_VECTOR /* can be defined on command line */
2859 # if defined(__AVX512F__)
2860 # define XXH_VECTOR XXH_AVX512
2861 # elif defined(__AVX2__)
2862 # define XXH_VECTOR XXH_AVX2
2863 # elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2))
2864 # define XXH_VECTOR XXH_SSE2
2865 # elif ( \
2866  defined(__ARM_NEON__) || defined(__ARM_NEON) /* gcc */ \
2867  || defined(_M_ARM64) || defined(_M_ARM_ARMV7VE) /* msvc */ \
2868  ) && ( \
2869  defined(_WIN32) || defined(__LITTLE_ENDIAN__) /* little endian only */ \
2870  || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \
2871  )
2872 # define XXH_VECTOR XXH_NEON
2873 # elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \
2874  || (defined(__s390x__) && defined(__VEC__)) \
2875  && defined(__GNUC__) /* TODO: IBM XL */
2876 # define XXH_VECTOR XXH_VSX
2877 # else
2878 # define XXH_VECTOR XXH_SCALAR
2879 # endif
2880 #endif
2881 
2882 /*
2883  * Controls the alignment of the accumulator,
2884  * for compatibility with aligned vector loads, which are usually faster.
2885  */
2886 #ifndef XXH_ACC_ALIGN
2887 # if defined(XXH_X86DISPATCH)
2888 # define XXH_ACC_ALIGN 64 /* for compatibility with avx512 */
2889 # elif XXH_VECTOR == XXH_SCALAR /* scalar */
2890 # define XXH_ACC_ALIGN 8
2891 # elif XXH_VECTOR == XXH_SSE2 /* sse2 */
2892 # define XXH_ACC_ALIGN 16
2893 # elif XXH_VECTOR == XXH_AVX2 /* avx2 */
2894 # define XXH_ACC_ALIGN 32
2895 # elif XXH_VECTOR == XXH_NEON /* neon */
2896 # define XXH_ACC_ALIGN 16
2897 # elif XXH_VECTOR == XXH_VSX /* vsx */
2898 # define XXH_ACC_ALIGN 16
2899 # elif XXH_VECTOR == XXH_AVX512 /* avx512 */
2900 # define XXH_ACC_ALIGN 64
2901 # endif
2902 #endif
2903 
2904 #if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \
2905  || XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512
2906 # define XXH_SEC_ALIGN XXH_ACC_ALIGN
2907 #else
2908 # define XXH_SEC_ALIGN 8
2909 #endif
2910 
2911 /*
2912  * UGLY HACK:
2913  * GCC usually generates the best code with -O3 for xxHash.
2914  *
2915  * However, when targeting AVX2, it is overzealous in its unrolling resulting
2916  * in code roughly 3/4 the speed of Clang.
2917  *
2918  * There are other issues, such as GCC splitting _mm256_loadu_si256 into
2919  * _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization which
2920  * only applies to Sandy and Ivy Bridge... which don't even support AVX2.
2921  *
2922  * That is why when compiling the AVX2 version, it is recommended to use either
2923  * -O2 -mavx2 -march=haswell
2924  * or
2925  * -O2 -mavx2 -mno-avx256-split-unaligned-load
2926  * for decent performance, or to use Clang instead.
2927  *
2928  * Fortunately, we can control the first one with a pragma that forces GCC into
2929  * -O2, but the other one we can't control without "failed to inline always
2930  * inline function due to target mismatch" warnings.
2931  */
2932 #if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
2933  && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
2934  && defined(__OPTIMIZE__) && !defined(__OPTIMIZE_SIZE__) /* respect -O0 and -Os */
2935 # pragma GCC push_options
2936 # pragma GCC optimize("-O2")
2937 #endif
2938 
2939 
2940 #if XXH_VECTOR == XXH_NEON
2941 /*
2942  * NEON's setup for vmlal_u32 is a little more complicated than it is on
2943  * SSE2, AVX2, and VSX.
2944  *
2945  * While PMULUDQ and VMULEUW both perform a mask, VMLAL.U32 performs an upcast.
2946  *
2947  * To do the same operation, the 128-bit 'Q' register needs to be split into
2948  * two 64-bit 'D' registers, performing this operation::
2949  *
2950  * [ a | b ]
2951  * | '---------. .--------' |
2952  * | x |
2953  * | .---------' '--------. |
2954  * [ a & 0xFFFFFFFF | b & 0xFFFFFFFF ],[ a >> 32 | b >> 32 ]
2955  *
2956  * Due to significant changes in aarch64, the fastest method for aarch64 is
2957  * completely different than the fastest method for ARMv7-A.
2958  *
2959  * ARMv7-A treats D registers as unions overlaying Q registers, so modifying
2960  * D11 will modify the high half of Q5. This is similar to how modifying AH
2961  * will only affect bits 8-15 of AX on x86.
2962  *
2963  * VZIP takes two registers, and puts even lanes in one register and odd lanes
2964  * in the other.
2965  *
2966  * On ARMv7-A, this strangely modifies both parameters in place instead of
2967  * taking the usual 3-operand form.
2968  *
2969  * Therefore, if we want to do this, we can simply use a D-form VZIP.32 on the
2970  * lower and upper halves of the Q register to end up with the high and low
2971  * halves where we want - all in one instruction.
2972  *
2973  * vzip.32 d10, d11 @ d10 = { d10[0], d11[0] }; d11 = { d10[1], d11[1] }
2974  *
2975  * Unfortunately we need inline assembly for this: Instructions modifying two
2976  * registers at once is not possible in GCC or Clang's IR, and they have to
2977  * create a copy.
2978  *
2979  * aarch64 requires a different approach.
2980  *
2981  * In order to make it easier to write a decent compiler for aarch64, many
2982  * quirks were removed, such as conditional execution.
2983  *
2984  * NEON was also affected by this.
2985  *
2986  * aarch64 cannot access the high bits of a Q-form register, and writes to a
2987  * D-form register zero the high bits, similar to how writes to W-form scalar
2988  * registers (or DWORD registers on x86_64) work.
2989  *
2990  * The formerly free vget_high intrinsics now require a vext (with a few
2991  * exceptions)
2992  *
2993  * Additionally, VZIP was replaced by ZIP1 and ZIP2, which are the equivalent
2994  * of PUNPCKL* and PUNPCKH* in SSE, respectively, in order to only modify one
2995  * operand.
2996  *
2997  * The equivalent of the VZIP.32 on the lower and upper halves would be this
2998  * mess:
2999  *
3000  * ext v2.4s, v0.4s, v0.4s, #2 // v2 = { v0[2], v0[3], v0[0], v0[1] }
3001  * zip1 v1.2s, v0.2s, v2.2s // v1 = { v0[0], v2[0] }
3002  * zip2 v0.2s, v0.2s, v1.2s // v0 = { v0[1], v2[1] }
3003  *
3004  * Instead, we use a literal downcast, vmovn_u64 (XTN), and vshrn_n_u64 (SHRN):
3005  *
3006  * shrn v1.2s, v0.2d, #32 // v1 = (uint32x2_t)(v0 >> 32);
3007  * xtn v0.2s, v0.2d // v0 = (uint32x2_t)(v0 & 0xFFFFFFFF);
3008  *
3009  * This is available on ARMv7-A, but is less efficient than a single VZIP.32.
3010  */
3011 
3021 # if !defined(XXH_NO_VZIP_HACK) /* define to disable */ \
3022  && defined(__GNUC__) \
3023  && !defined(__aarch64__) && !defined(__arm64__) && !defined(_M_ARM64)
3024 # define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \
3025  do { \
3026  /* Undocumented GCC/Clang operand modifier: %e0 = lower D half, %f0 = upper D half */ \
3027  /* https://github.com/gcc-mirror/gcc/blob/38cf91e5/gcc/config/arm/arm.c#L22486 */ \
3028  /* https://github.com/llvm-mirror/llvm/blob/2c4ca683/lib/Target/ARM/ARMAsmPrinter.cpp#L399 */ \
3029  __asm__("vzip.32 %e0, %f0" : "+w" (in)); \
3030  (outLo) = vget_low_u32 (vreinterpretq_u32_u64(in)); \
3031  (outHi) = vget_high_u32(vreinterpretq_u32_u64(in)); \
3032  } while (0)
3033 # else
3034 # define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \
3035  do { \
3036  (outLo) = vmovn_u64 (in); \
3037  (outHi) = vshrn_n_u64 ((in), 32); \
3038  } while (0)
3039 # endif
3040 #endif /* XXH_VECTOR == XXH_NEON */
3041 
3042 /*
3043  * VSX and Z Vector helpers.
3044  *
3045  * This is very messy, and any pull requests to clean this up are welcome.
3046  *
3047  * There are a lot of problems with supporting VSX and s390x, due to
3048  * inconsistent intrinsics, spotty coverage, and multiple endiannesses.
3049  */
3050 #if XXH_VECTOR == XXH_VSX
3051 # if defined(__s390x__)
3052 # include <s390intrin.h>
3053 # else
3054 /* gcc's altivec.h can have the unwanted consequence to unconditionally
3055  * #define bool, vector, and pixel keywords,
3056  * with bad consequences for programs already using these keywords for other purposes.
3057  * The paragraph defining these macros is skipped when __APPLE_ALTIVEC__ is defined.
3058  * __APPLE_ALTIVEC__ is _generally_ defined automatically by the compiler,
3059  * but it seems that, in some cases, it isn't.
3060  * Force the build macro to be defined, so that keywords are not altered.
3061  */
3062 # if defined(__GNUC__) && !defined(__APPLE_ALTIVEC__)
3063 # define __APPLE_ALTIVEC__
3064 # endif
3065 # include <altivec.h>
3066 # endif
3067 
3068 typedef __vector unsigned long long xxh_u64x2;
3069 typedef __vector unsigned char xxh_u8x16;
3070 typedef __vector unsigned xxh_u32x4;
3071 
3072 # ifndef XXH_VSX_BE
3073 # if defined(__BIG_ENDIAN__) \
3074  || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
3075 # define XXH_VSX_BE 1
3076 # elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__
3077 # warning "-maltivec=be is not recommended. Please use native endianness."
3078 # define XXH_VSX_BE 1
3079 # else
3080 # define XXH_VSX_BE 0
3081 # endif
3082 # endif /* !defined(XXH_VSX_BE) */
3083 
3084 # if XXH_VSX_BE
3085 # if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__))
3086 # define XXH_vec_revb vec_revb
3087 # else
3088 
3091 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val)
3092 {
3093  xxh_u8x16 const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
3094  0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 };
3095  return vec_perm(val, val, vByteSwap);
3096 }
3097 # endif
3098 # endif /* XXH_VSX_BE */
3099 
3103 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr)
3104 {
3105  xxh_u64x2 ret;
3106  XXH_memcpy(&ret, ptr, sizeof(xxh_u64x2));
3107 # if XXH_VSX_BE
3108  ret = XXH_vec_revb(ret);
3109 # endif
3110  return ret;
3111 }
3112 
3113 /*
3114  * vec_mulo and vec_mule are very problematic intrinsics on PowerPC
3115  *
3116  * These intrinsics weren't added until GCC 8, despite existing for a while,
3117  * and they are endian dependent. Also, their meaning swap depending on version.
3118  * */
3119 # if defined(__s390x__)
3120  /* s390x is always big endian, no issue on this platform */
3121 # define XXH_vec_mulo vec_mulo
3122 # define XXH_vec_mule vec_mule
3123 # elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw)
3124 /* Clang has a better way to control this, we can just use the builtin which doesn't swap. */
3125 # define XXH_vec_mulo __builtin_altivec_vmulouw
3126 # define XXH_vec_mule __builtin_altivec_vmuleuw
3127 # else
3128 /* gcc needs inline assembly */
3129 /* Adapted from https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */
3130 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b)
3131 {
3132  xxh_u64x2 result;
3133  __asm__("vmulouw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
3134  return result;
3135 }
3136 XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b)
3137 {
3138  xxh_u64x2 result;
3139  __asm__("vmuleuw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
3140  return result;
3141 }
3142 # endif /* XXH_vec_mulo, XXH_vec_mule */
3143 #endif /* XXH_VECTOR == XXH_VSX */
3144 
3145 
3146 /* prefetch
3147  * can be disabled, by declaring XXH_NO_PREFETCH build macro */
3148 #if defined(XXH_NO_PREFETCH)
3149 # define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */
3150 #else
3151 # if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) /* _mm_prefetch() not defined outside of x86/x64 */
3152 # include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
3153 # define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
3154 # elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
3155 # define XXH_PREFETCH(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
3156 # else
3157 # define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */
3158 # endif
3159 #endif /* XXH_NO_PREFETCH */
3160 
3161 
3162 /* ==========================================
3163  * XXH3 default settings
3164  * ========================================== */
3165 
3166 #define XXH_SECRET_DEFAULT_SIZE 192 /* minimum XXH3_SECRET_SIZE_MIN */
3167 
3168 #if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN)
3169 # error "default keyset is not large enough"
3170 #endif
3171 
3173 XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = {
3174  0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c,
3175  0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f,
3176  0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21,
3177  0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c,
3178  0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3,
3179  0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8,
3180  0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d,
3181  0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64,
3182  0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb,
3183  0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e,
3184  0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce,
3185  0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e,
3186 };
3187 
3188 
3189 #ifdef XXH_OLD_NAMES
3190 # define kSecret XXH3_kSecret
3191 #endif
3192 
3193 #ifdef XXH_DOXYGEN
3194 
3210 XXH_FORCE_INLINE xxh_u64
3211 XXH_mult32to64(xxh_u64 x, xxh_u64 y)
3212 {
3213  return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF);
3214 }
3215 #elif defined(_MSC_VER) && defined(_M_IX86)
3216 # include <intrin.h>
3217 # define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y))
3218 #else
3219 /*
3220  * Downcast + upcast is usually better than masking on older compilers like
3221  * GCC 4.2 (especially 32-bit ones), all without affecting newer compilers.
3222  *
3223  * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both operands
3224  * and perform a full 64x64 multiply -- entirely redundant on 32-bit.
3225  */
3226 # define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y))
3227 #endif
3228 
3238 static XXH128_hash_t
3239 XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs)
3240 {
3241  /*
3242  * GCC/Clang __uint128_t method.
3243  *
3244  * On most 64-bit targets, GCC and Clang define a __uint128_t type.
3245  * This is usually the best way as it usually uses a native long 64-bit
3246  * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64.
3247  *
3248  * Usually.
3249  *
3250  * Despite being a 32-bit platform, Clang (and emscripten) define this type
3251  * despite not having the arithmetic for it. This results in a laggy
3252  * compiler builtin call which calculates a full 128-bit multiply.
3253  * In that case it is best to use the portable one.
3254  * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677
3255  */
3256 #if defined(__GNUC__) && !defined(__wasm__) \
3257  && defined(__SIZEOF_INT128__) \
3258  || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
3259 
3260  __uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs;
3261  XXH128_hash_t r128;
3262  r128.low64 = (xxh_u64)(product);
3263  r128.high64 = (xxh_u64)(product >> 64);
3264  return r128;
3265 
3266  /*
3267  * MSVC for x64's _umul128 method.
3268  *
3269  * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 *HighProduct);
3270  *
3271  * This compiles to single operand MUL on x64.
3272  */
3273 #elif defined(_M_X64) || defined(_M_IA64)
3274 
3275 #ifndef _MSC_VER
3276 # pragma intrinsic(_umul128)
3277 #endif
3278  xxh_u64 product_high;
3279  xxh_u64 const product_low = _umul128(lhs, rhs, &product_high);
3280  XXH128_hash_t r128;
3281  r128.low64 = product_low;
3282  r128.high64 = product_high;
3283  return r128;
3284 
3285  /*
3286  * MSVC for ARM64's __umulh method.
3287  *
3288  * This compiles to the same MUL + UMULH as GCC/Clang's __uint128_t method.
3289  */
3290 #elif defined(_M_ARM64)
3291 
3292 #ifndef _MSC_VER
3293 # pragma intrinsic(__umulh)
3294 #endif
3295  XXH128_hash_t r128;
3296  r128.low64 = lhs * rhs;
3297  r128.high64 = __umulh(lhs, rhs);
3298  return r128;
3299 
3300 #else
3301  /*
3302  * Portable scalar method. Optimized for 32-bit and 64-bit ALUs.
3303  *
3304  * This is a fast and simple grade school multiply, which is shown below
3305  * with base 10 arithmetic instead of base 0x100000000.
3306  *
3307  * 9 3 // D2 lhs = 93
3308  * x 7 5 // D2 rhs = 75
3309  * ----------
3310  * 1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15
3311  * 4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45
3312  * 2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21
3313  * + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63
3314  * ---------
3315  * 2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27
3316  * + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67
3317  * ---------
3318  * 6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975
3319  *
3320  * The reasons for adding the products like this are:
3321  * 1. It avoids manual carry tracking. Just like how
3322  * (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX.
3323  * This avoids a lot of complexity.
3324  *
3325  * 2. It hints for, and on Clang, compiles to, the powerful UMAAL
3326  * instruction available in ARM's Digital Signal Processing extension
3327  * in 32-bit ARMv6 and later, which is shown below:
3328  *
3329  * void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm)
3330  * {
3331  * xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm;
3332  * *RdLo = (xxh_u32)(product & 0xFFFFFFFF);
3333  * *RdHi = (xxh_u32)(product >> 32);
3334  * }
3335  *
3336  * This instruction was designed for efficient long multiplication, and
3337  * allows this to be calculated in only 4 instructions at speeds
3338  * comparable to some 64-bit ALUs.
3339  *
3340  * 3. It isn't terrible on other platforms. Usually this will be a couple
3341  * of 32-bit ADD/ADCs.
3342  */
3343 
3344  /* First calculate all of the cross products. */
3345  xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF);
3346  xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF);
3347  xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32);
3348  xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32);
3349 
3350  /* Now add the products together. These will never overflow. */
3351  xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi;
3352  xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi;
3353  xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF);
3354 
3355  XXH128_hash_t r128;
3356  r128.low64 = lower;
3357  r128.high64 = upper;
3358  return r128;
3359 #endif
3360 }
3361 
3372 static xxh_u64
3373 XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs)
3374 {
3375  XXH128_hash_t product = XXH_mult64to128(lhs, rhs);
3376  return product.low64 ^ product.high64;
3377 }
3378 
3380 XXH_FORCE_INLINE xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift)
3381 {
3382  XXH_ASSERT(0 <= shift && shift < 64);
3383  return v64 ^ (v64 >> shift);
3384 }
3385 
3386 /*
3387  * This is a fast avalanche stage,
3388  * suitable when input bits are already partially mixed
3389  */
3390 static XXH64_hash_t XXH3_avalanche(xxh_u64 h64)
3391 {
3392  h64 = XXH_xorshift64(h64, 37);
3393  h64 *= 0x165667919E3779F9ULL;
3394  h64 = XXH_xorshift64(h64, 32);
3395  return h64;
3396 }
3397 
3398 /*
3399  * This is a stronger avalanche,
3400  * inspired by Pelle Evensen's rrmxmx
3401  * preferable when input has not been previously mixed
3402  */
3403 static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len)
3404 {
3405  /* this mix is inspired by Pelle Evensen's rrmxmx */
3406  h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24);
3407  h64 *= 0x9FB21C651E98DF25ULL;
3408  h64 ^= (h64 >> 35) + len ;
3409  h64 *= 0x9FB21C651E98DF25ULL;
3410  return XXH_xorshift64(h64, 28);
3411 }
3412 
3413 
3414 /* ==========================================
3415  * Short keys
3416  * ==========================================
3417  * One of the shortcomings of XXH32 and XXH64 was that their performance was
3418  * sub-optimal on short lengths. It used an iterative algorithm which strongly
3419  * favored lengths that were a multiple of 4 or 8.
3420  *
3421  * Instead of iterating over individual inputs, we use a set of single shot
3422  * functions which piece together a range of lengths and operate in constant time.
3423  *
3424  * Additionally, the number of multiplies has been significantly reduced. This
3425  * reduces latency, especially when emulating 64-bit multiplies on 32-bit.
3426  *
3427  * Depending on the platform, this may or may not be faster than XXH32, but it
3428  * is almost guaranteed to be faster than XXH64.
3429  */
3430 
3431 /*
3432  * At very short lengths, there isn't enough input to fully hide secrets, or use
3433  * the entire secret.
3434  *
3435  * There is also only a limited amount of mixing we can do before significantly
3436  * impacting performance.
3437  *
3438  * Therefore, we use different sections of the secret and always mix two secret
3439  * samples with an XOR. This should have no effect on performance on the
3440  * seedless or withSeed variants because everything _should_ be constant folded
3441  * by modern compilers.
3442  *
3443  * The XOR mixing hides individual parts of the secret and increases entropy.
3444  *
3445  * This adds an extra layer of strength for custom secrets.
3446  */
3447 XXH_FORCE_INLINE XXH64_hash_t
3448 XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3449 {
3450  XXH_ASSERT(input != NULL);
3451  XXH_ASSERT(1 <= len && len <= 3);
3452  XXH_ASSERT(secret != NULL);
3453  /*
3454  * len = 1: combined = { input[0], 0x01, input[0], input[0] }
3455  * len = 2: combined = { input[1], 0x02, input[0], input[1] }
3456  * len = 3: combined = { input[2], 0x03, input[0], input[1] }
3457  */
3458  { xxh_u8 const c1 = input[0];
3459  xxh_u8 const c2 = input[len >> 1];
3460  xxh_u8 const c3 = input[len - 1];
3461  xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24)
3462  | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
3463  xxh_u64 const bitflip = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
3464  xxh_u64 const keyed = (xxh_u64)combined ^ bitflip;
3465  return XXH64_avalanche(keyed);
3466  }
3467 }
3468 
3469 XXH_FORCE_INLINE XXH64_hash_t
3470 XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3471 {
3472  XXH_ASSERT(input != NULL);
3473  XXH_ASSERT(secret != NULL);
3474  XXH_ASSERT(4 <= len && len <= 8);
3475  seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
3476  { xxh_u32 const input1 = XXH_readLE32(input);
3477  xxh_u32 const input2 = XXH_readLE32(input + len - 4);
3478  xxh_u64 const bitflip = (XXH_readLE64(secret+8) ^ XXH_readLE64(secret+16)) - seed;
3479  xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32);
3480  xxh_u64 const keyed = input64 ^ bitflip;
3481  return XXH3_rrmxmx(keyed, len);
3482  }
3483 }
3484 
3485 XXH_FORCE_INLINE XXH64_hash_t
3486 XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3487 {
3488  XXH_ASSERT(input != NULL);
3489  XXH_ASSERT(secret != NULL);
3490  XXH_ASSERT(9 <= len && len <= 16);
3491  { xxh_u64 const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) + seed;
3492  xxh_u64 const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) - seed;
3493  xxh_u64 const input_lo = XXH_readLE64(input) ^ bitflip1;
3494  xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2;
3495  xxh_u64 const acc = len
3496  + XXH_swap64(input_lo) + input_hi
3497  + XXH3_mul128_fold64(input_lo, input_hi);
3498  return XXH3_avalanche(acc);
3499  }
3500 }
3501 
3502 XXH_FORCE_INLINE XXH64_hash_t
3503 XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
3504 {
3505  XXH_ASSERT(len <= 16);
3506  { if (XXH_likely(len > 8)) return XXH3_len_9to16_64b(input, len, secret, seed);
3507  if (XXH_likely(len >= 4)) return XXH3_len_4to8_64b(input, len, secret, seed);
3508  if (len) return XXH3_len_1to3_64b(input, len, secret, seed);
3509  return XXH64_avalanche(seed ^ (XXH_readLE64(secret+56) ^ XXH_readLE64(secret+64)));
3510  }
3511 }
3512 
3513 /*
3514  * DISCLAIMER: There are known *seed-dependent* multicollisions here due to
3515  * multiplication by zero, affecting hashes of lengths 17 to 240.
3516  *
3517  * However, they are very unlikely.
3518  *
3519  * Keep this in mind when using the unseeded XXH3_64bits() variant: As with all
3520  * unseeded non-cryptographic hashes, it does not attempt to defend itself
3521  * against specially crafted inputs, only random inputs.
3522  *
3523  * Compared to classic UMAC where a 1 in 2^31 chance of 4 consecutive bytes
3524  * cancelling out the secret is taken an arbitrary number of times (addressed
3525  * in XXH3_accumulate_512), this collision is very unlikely with random inputs
3526  * and/or proper seeding:
3527  *
3528  * This only has a 1 in 2^63 chance of 8 consecutive bytes cancelling out, in a
3529  * function that is only called up to 16 times per hash with up to 240 bytes of
3530  * input.
3531  *
3532  * This is not too bad for a non-cryptographic hash function, especially with
3533  * only 64 bit outputs.
3534  *
3535  * The 128-bit variant (which trades some speed for strength) is NOT affected
3536  * by this, although it is always a good idea to use a proper seed if you care
3537  * about strength.
3538  */
3539 XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input,
3540  const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64)
3541 {
3542 #if defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
3543  && defined(__i386__) && defined(__SSE2__) /* x86 + SSE2 */ \
3544  && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable like XXH32 hack */
3545  /*
3546  * UGLY HACK:
3547  * GCC for x86 tends to autovectorize the 128-bit multiply, resulting in
3548  * slower code.
3549  *
3550  * By forcing seed64 into a register, we disrupt the cost model and
3551  * cause it to scalarize. See `XXH32_round()`
3552  *
3553  * FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600,
3554  * XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on
3555  * GCC 9.2, despite both emitting scalar code.
3556  *
3557  * GCC generates much better scalar code than Clang for the rest of XXH3,
3558  * which is why finding a more optimal codepath is an interest.
3559  */
3560  XXH_COMPILER_GUARD(seed64);
3561 #endif
3562  { xxh_u64 const input_lo = XXH_readLE64(input);
3563  xxh_u64 const input_hi = XXH_readLE64(input+8);
3564  return XXH3_mul128_fold64(
3565  input_lo ^ (XXH_readLE64(secret) + seed64),
3566  input_hi ^ (XXH_readLE64(secret+8) - seed64)
3567  );
3568  }
3569 }
3570 
3571 /* For mid range keys, XXH3 uses a Mum-hash variant. */
3572 XXH_FORCE_INLINE XXH64_hash_t
3573 XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
3574  const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
3576 {
3577  XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
3578  XXH_ASSERT(16 < len && len <= 128);
3579 
3580  { xxh_u64 acc = len * XXH_PRIME64_1;
3581  if (len > 32) {
3582  if (len > 64) {
3583  if (len > 96) {
3584  acc += XXH3_mix16B(input+48, secret+96, seed);
3585  acc += XXH3_mix16B(input+len-64, secret+112, seed);
3586  }
3587  acc += XXH3_mix16B(input+32, secret+64, seed);
3588  acc += XXH3_mix16B(input+len-48, secret+80, seed);
3589  }
3590  acc += XXH3_mix16B(input+16, secret+32, seed);
3591  acc += XXH3_mix16B(input+len-32, secret+48, seed);
3592  }
3593  acc += XXH3_mix16B(input+0, secret+0, seed);
3594  acc += XXH3_mix16B(input+len-16, secret+16, seed);
3595 
3596  return XXH3_avalanche(acc);
3597  }
3598 }
3599 
3600 #define XXH3_MIDSIZE_MAX 240
3601 
3602 XXH_NO_INLINE XXH64_hash_t
3603 XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
3604  const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
3606 {
3607  XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
3608  XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
3609 
3610  #define XXH3_MIDSIZE_STARTOFFSET 3
3611  #define XXH3_MIDSIZE_LASTOFFSET 17
3612 
3613  { xxh_u64 acc = len * XXH_PRIME64_1;
3614  int const nbRounds = (int)len / 16;
3615  int i;
3616  for (i=0; i<8; i++) {
3617  acc += XXH3_mix16B(input+(16*i), secret+(16*i), seed);
3618  }
3619  acc = XXH3_avalanche(acc);
3620  XXH_ASSERT(nbRounds >= 8);
3621 #if defined(__clang__) /* Clang */ \
3622  && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
3623  && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */
3624  /*
3625  * UGLY HACK:
3626  * Clang for ARMv7-A tries to vectorize this loop, similar to GCC x86.
3627  * In everywhere else, it uses scalar code.
3628  *
3629  * For 64->128-bit multiplies, even if the NEON was 100% optimal, it
3630  * would still be slower than UMAAL (see XXH_mult64to128).
3631  *
3632  * Unfortunately, Clang doesn't handle the long multiplies properly and
3633  * converts them to the nonexistent "vmulq_u64" intrinsic, which is then
3634  * scalarized into an ugly mess of VMOV.32 instructions.
3635  *
3636  * This mess is difficult to avoid without turning autovectorization
3637  * off completely, but they are usually relatively minor and/or not
3638  * worth it to fix.
3639  *
3640  * This loop is the easiest to fix, as unlike XXH32, this pragma
3641  * _actually works_ because it is a loop vectorization instead of an
3642  * SLP vectorization.
3643  */
3644  #pragma clang loop vectorize(disable)
3645 #endif
3646  for (i=8 ; i < nbRounds; i++) {
3647  acc += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed);
3648  }
3649  /* last bytes */
3650  acc += XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed);
3651  return XXH3_avalanche(acc);
3652  }
3653 }
3654 
3655 
3656 /* ======= Long Keys ======= */
3657 
3658 #define XXH_STRIPE_LEN 64
3659 #define XXH_SECRET_CONSUME_RATE 8 /* nb of secret bytes consumed at each accumulation */
3660 #define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64))
3661 
3662 #ifdef XXH_OLD_NAMES
3663 # define STRIPE_LEN XXH_STRIPE_LEN
3664 # define ACC_NB XXH_ACC_NB
3665 #endif
3666 
3667 XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64)
3668 {
3669  if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64);
3670  XXH_memcpy(dst, &v64, sizeof(v64));
3671 }
3672 
3673 /* Several intrinsic functions below are supposed to accept __int64 as argument,
3674  * as documented in https://software.intel.com/sites/landingpage/IntrinsicsGuide/ .
3675  * However, several environments do not define __int64 type,
3676  * requiring a workaround.
3677  */
3678 #if !defined (__VMS) \
3679  && (defined (__cplusplus) \
3680  || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
3681  typedef int64_t xxh_i64;
3682 #else
3683  /* the following type must have a width of 64-bit */
3684  typedef long long xxh_i64;
3685 #endif
3686 
3687 /*
3688  * XXH3_accumulate_512 is the tightest loop for long inputs, and it is the most optimized.
3689  *
3690  * It is a hardened version of UMAC, based off of FARSH's implementation.
3691  *
3692  * This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD
3693  * implementations, and it is ridiculously fast.
3694  *
3695  * We harden it by mixing the original input to the accumulators as well as the product.
3696  *
3697  * This means that in the (relatively likely) case of a multiply by zero, the
3698  * original input is preserved.
3699  *
3700  * On 128-bit inputs, we swap 64-bit pairs when we add the input to improve
3701  * cross-pollination, as otherwise the upper and lower halves would be
3702  * essentially independent.
3703  *
3704  * This doesn't matter on 64-bit hashes since they all get merged together in
3705  * the end, so we skip the extra step.
3706  *
3707  * Both XXH3_64bits and XXH3_128bits use this subroutine.
3708  */
3709 
3710 #if (XXH_VECTOR == XXH_AVX512) \
3711  || (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0)
3712 
3713 #ifndef XXH_TARGET_AVX512
3714 # define XXH_TARGET_AVX512 /* disable attribute target */
3715 #endif
3716 
3717 XXH_FORCE_INLINE XXH_TARGET_AVX512 void
3718 XXH3_accumulate_512_avx512(void* XXH_RESTRICT acc,
3719  const void* XXH_RESTRICT input,
3720  const void* XXH_RESTRICT secret)
3721 {
3722  __m512i* const xacc = (__m512i *) acc;
3723  XXH_ASSERT((((size_t)acc) & 63) == 0);
3724  XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
3725 
3726  {
3727  /* data_vec = input[0]; */
3728  __m512i const data_vec = _mm512_loadu_si512 (input);
3729  /* key_vec = secret[0]; */
3730  __m512i const key_vec = _mm512_loadu_si512 (secret);
3731  /* data_key = data_vec ^ key_vec; */
3732  __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec);
3733  /* data_key_lo = data_key >> 32; */
3734  __m512i const data_key_lo = _mm512_shuffle_epi32 (data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1));
3735  /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
3736  __m512i const product = _mm512_mul_epu32 (data_key, data_key_lo);
3737  /* xacc[0] += swap(data_vec); */
3738  __m512i const data_swap = _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2));
3739  __m512i const sum = _mm512_add_epi64(*xacc, data_swap);
3740  /* xacc[0] += product; */
3741  *xacc = _mm512_add_epi64(product, sum);
3742  }
3743 }
3744 
3745 /*
3746  * XXH3_scrambleAcc: Scrambles the accumulators to improve mixing.
3747  *
3748  * Multiplication isn't perfect, as explained by Google in HighwayHash:
3749  *
3750  * // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to
3751  * // varying degrees. In descending order of goodness, bytes
3752  * // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32.
3753  * // As expected, the upper and lower bytes are much worse.
3754  *
3755  * Source: https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291
3756  *
3757  * Since our algorithm uses a pseudorandom secret to add some variance into the
3758  * mix, we don't need to (or want to) mix as often or as much as HighwayHash does.
3759  *
3760  * This isn't as tight as XXH3_accumulate, but still written in SIMD to avoid
3761  * extraction.
3762  *
3763  * Both XXH3_64bits and XXH3_128bits use this subroutine.
3764  */
3765 
3766 XXH_FORCE_INLINE XXH_TARGET_AVX512 void
3767 XXH3_scrambleAcc_avx512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
3768 {
3769  XXH_ASSERT((((size_t)acc) & 63) == 0);
3770  XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
3771  { __m512i* const xacc = (__m512i*) acc;
3772  const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1);
3773 
3774  /* xacc[0] ^= (xacc[0] >> 47) */
3775  __m512i const acc_vec = *xacc;
3776  __m512i const shifted = _mm512_srli_epi64 (acc_vec, 47);
3777  __m512i const data_vec = _mm512_xor_si512 (acc_vec, shifted);
3778  /* xacc[0] ^= secret; */
3779  __m512i const key_vec = _mm512_loadu_si512 (secret);
3780  __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec);
3781 
3782  /* xacc[0] *= XXH_PRIME32_1; */
3783  __m512i const data_key_hi = _mm512_shuffle_epi32 (data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1));
3784  __m512i const prod_lo = _mm512_mul_epu32 (data_key, prime32);
3785  __m512i const prod_hi = _mm512_mul_epu32 (data_key_hi, prime32);
3786  *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32));
3787  }
3788 }
3789 
3790 XXH_FORCE_INLINE XXH_TARGET_AVX512 void
3791 XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
3792 {
3793  XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0);
3794  XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64);
3795  XXH_ASSERT(((size_t)customSecret & 63) == 0);
3796  (void)(&XXH_writeLE64);
3797  { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i);
3798  __m512i const seed = _mm512_mask_set1_epi64(_mm512_set1_epi64((xxh_i64)seed64), 0xAA, (xxh_i64)(0U - seed64));
3799 
3800  const __m512i* const src = (const __m512i*) ((const void*) XXH3_kSecret);
3801  __m512i* const dest = ( __m512i*) customSecret;
3802  int i;
3803  XXH_ASSERT(((size_t)src & 63) == 0); /* control alignment */
3804  XXH_ASSERT(((size_t)dest & 63) == 0);
3805  for (i=0; i < nbRounds; ++i) {
3806  /* GCC has a bug, _mm512_stream_load_si512 accepts 'void*', not 'void const*',
3807  * this will warn "discards 'const' qualifier". */
3808  union {
3809  const __m512i* cp;
3810  void* p;
3811  } remote_const_void;
3812  remote_const_void.cp = src + i;
3813  dest[i] = _mm512_add_epi64(_mm512_stream_load_si512(remote_const_void.p), seed);
3814  } }
3815 }
3816 
3817 #endif
3818 
3819 #if (XXH_VECTOR == XXH_AVX2) \
3820  || (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0)
3821 
3822 #ifndef XXH_TARGET_AVX2
3823 # define XXH_TARGET_AVX2 /* disable attribute target */
3824 #endif
3825 
3826 XXH_FORCE_INLINE XXH_TARGET_AVX2 void
3827 XXH3_accumulate_512_avx2( void* XXH_RESTRICT acc,
3828  const void* XXH_RESTRICT input,
3829  const void* XXH_RESTRICT secret)
3830 {
3831  XXH_ASSERT((((size_t)acc) & 31) == 0);
3832  { __m256i* const xacc = (__m256i *) acc;
3833  /* Unaligned. This is mainly for pointer arithmetic, and because
3834  * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
3835  const __m256i* const xinput = (const __m256i *) input;
3836  /* Unaligned. This is mainly for pointer arithmetic, and because
3837  * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
3838  const __m256i* const xsecret = (const __m256i *) secret;
3839 
3840  size_t i;
3841  for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
3842  /* data_vec = xinput[i]; */
3843  __m256i const data_vec = _mm256_loadu_si256 (xinput+i);
3844  /* key_vec = xsecret[i]; */
3845  __m256i const key_vec = _mm256_loadu_si256 (xsecret+i);
3846  /* data_key = data_vec ^ key_vec; */
3847  __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec);
3848  /* data_key_lo = data_key >> 32; */
3849  __m256i const data_key_lo = _mm256_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
3850  /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
3851  __m256i const product = _mm256_mul_epu32 (data_key, data_key_lo);
3852  /* xacc[i] += swap(data_vec); */
3853  __m256i const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2));
3854  __m256i const sum = _mm256_add_epi64(xacc[i], data_swap);
3855  /* xacc[i] += product; */
3856  xacc[i] = _mm256_add_epi64(product, sum);
3857  } }
3858 }
3859 
3860 XXH_FORCE_INLINE XXH_TARGET_AVX2 void
3861 XXH3_scrambleAcc_avx2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
3862 {
3863  XXH_ASSERT((((size_t)acc) & 31) == 0);
3864  { __m256i* const xacc = (__m256i*) acc;
3865  /* Unaligned. This is mainly for pointer arithmetic, and because
3866  * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
3867  const __m256i* const xsecret = (const __m256i *) secret;
3868  const __m256i prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1);
3869 
3870  size_t i;
3871  for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
3872  /* xacc[i] ^= (xacc[i] >> 47) */
3873  __m256i const acc_vec = xacc[i];
3874  __m256i const shifted = _mm256_srli_epi64 (acc_vec, 47);
3875  __m256i const data_vec = _mm256_xor_si256 (acc_vec, shifted);
3876  /* xacc[i] ^= xsecret; */
3877  __m256i const key_vec = _mm256_loadu_si256 (xsecret+i);
3878  __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec);
3879 
3880  /* xacc[i] *= XXH_PRIME32_1; */
3881  __m256i const data_key_hi = _mm256_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
3882  __m256i const prod_lo = _mm256_mul_epu32 (data_key, prime32);
3883  __m256i const prod_hi = _mm256_mul_epu32 (data_key_hi, prime32);
3884  xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32));
3885  }
3886  }
3887 }
3888 
3889 XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
3890 {
3891  XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0);
3892  XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6);
3893  XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64);
3894  (void)(&XXH_writeLE64);
3895  XXH_PREFETCH(customSecret);
3896  { __m256i const seed = _mm256_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64, (xxh_i64)(0U - seed64), (xxh_i64)seed64);
3897 
3898  const __m256i* const src = (const __m256i*) ((const void*) XXH3_kSecret);
3899  __m256i* dest = ( __m256i*) customSecret;
3900 
3901 # if defined(__GNUC__) || defined(__clang__)
3902  /*
3903  * On GCC & Clang, marking 'dest' as modified will cause the compiler:
3904  * - do not extract the secret from sse registers in the internal loop
3905  * - use less common registers, and avoid pushing these reg into stack
3906  */
3907  XXH_COMPILER_GUARD(dest);
3908 # endif
3909  XXH_ASSERT(((size_t)src & 31) == 0); /* control alignment */
3910  XXH_ASSERT(((size_t)dest & 31) == 0);
3911 
3912  /* GCC -O2 need unroll loop manually */
3913  dest[0] = _mm256_add_epi64(_mm256_stream_load_si256(src+0), seed);
3914  dest[1] = _mm256_add_epi64(_mm256_stream_load_si256(src+1), seed);
3915  dest[2] = _mm256_add_epi64(_mm256_stream_load_si256(src+2), seed);
3916  dest[3] = _mm256_add_epi64(_mm256_stream_load_si256(src+3), seed);
3917  dest[4] = _mm256_add_epi64(_mm256_stream_load_si256(src+4), seed);
3918  dest[5] = _mm256_add_epi64(_mm256_stream_load_si256(src+5), seed);
3919  }
3920 }
3921 
3922 #endif
3923 
3924 /* x86dispatch always generates SSE2 */
3925 #if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH)
3926 
3927 #ifndef XXH_TARGET_SSE2
3928 # define XXH_TARGET_SSE2 /* disable attribute target */
3929 #endif
3930 
3931 XXH_FORCE_INLINE XXH_TARGET_SSE2 void
3932 XXH3_accumulate_512_sse2( void* XXH_RESTRICT acc,
3933  const void* XXH_RESTRICT input,
3934  const void* XXH_RESTRICT secret)
3935 {
3936  /* SSE2 is just a half-scale version of the AVX2 version. */
3937  XXH_ASSERT((((size_t)acc) & 15) == 0);
3938  { __m128i* const xacc = (__m128i *) acc;
3939  /* Unaligned. This is mainly for pointer arithmetic, and because
3940  * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
3941  const __m128i* const xinput = (const __m128i *) input;
3942  /* Unaligned. This is mainly for pointer arithmetic, and because
3943  * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
3944  const __m128i* const xsecret = (const __m128i *) secret;
3945 
3946  size_t i;
3947  for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
3948  /* data_vec = xinput[i]; */
3949  __m128i const data_vec = _mm_loadu_si128 (xinput+i);
3950  /* key_vec = xsecret[i]; */
3951  __m128i const key_vec = _mm_loadu_si128 (xsecret+i);
3952  /* data_key = data_vec ^ key_vec; */
3953  __m128i const data_key = _mm_xor_si128 (data_vec, key_vec);
3954  /* data_key_lo = data_key >> 32; */
3955  __m128i const data_key_lo = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
3956  /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
3957  __m128i const product = _mm_mul_epu32 (data_key, data_key_lo);
3958  /* xacc[i] += swap(data_vec); */
3959  __m128i const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2));
3960  __m128i const sum = _mm_add_epi64(xacc[i], data_swap);
3961  /* xacc[i] += product; */
3962  xacc[i] = _mm_add_epi64(product, sum);
3963  } }
3964 }
3965 
3966 XXH_FORCE_INLINE XXH_TARGET_SSE2 void
3967 XXH3_scrambleAcc_sse2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
3968 {
3969  XXH_ASSERT((((size_t)acc) & 15) == 0);
3970  { __m128i* const xacc = (__m128i*) acc;
3971  /* Unaligned. This is mainly for pointer arithmetic, and because
3972  * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
3973  const __m128i* const xsecret = (const __m128i *) secret;
3974  const __m128i prime32 = _mm_set1_epi32((int)XXH_PRIME32_1);
3975 
3976  size_t i;
3977  for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
3978  /* xacc[i] ^= (xacc[i] >> 47) */
3979  __m128i const acc_vec = xacc[i];
3980  __m128i const shifted = _mm_srli_epi64 (acc_vec, 47);
3981  __m128i const data_vec = _mm_xor_si128 (acc_vec, shifted);
3982  /* xacc[i] ^= xsecret[i]; */
3983  __m128i const key_vec = _mm_loadu_si128 (xsecret+i);
3984  __m128i const data_key = _mm_xor_si128 (data_vec, key_vec);
3985 
3986  /* xacc[i] *= XXH_PRIME32_1; */
3987  __m128i const data_key_hi = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
3988  __m128i const prod_lo = _mm_mul_epu32 (data_key, prime32);
3989  __m128i const prod_hi = _mm_mul_epu32 (data_key_hi, prime32);
3990  xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32));
3991  }
3992  }
3993 }
3994 
3995 XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
3996 {
3997  XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
3998  (void)(&XXH_writeLE64);
3999  { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i);
4000 
4001 # if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900
4002  /* MSVC 32bit mode does not support _mm_set_epi64x before 2015 */
4003  XXH_ALIGN(16) const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, (xxh_i64)(0U - seed64) };
4004  __m128i const seed = _mm_load_si128((__m128i const*)seed64x2);
4005 # else
4006  __m128i const seed = _mm_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64);
4007 # endif
4008  int i;
4009 
4010  const void* const src16 = XXH3_kSecret;
4011  __m128i* dst16 = (__m128i*) customSecret;
4012 # if defined(__GNUC__) || defined(__clang__)
4013  /*
4014  * On GCC & Clang, marking 'dest' as modified will cause the compiler:
4015  * - do not extract the secret from sse registers in the internal loop
4016  * - use less common registers, and avoid pushing these reg into stack
4017  */
4018  XXH_COMPILER_GUARD(dst16);
4019 # endif
4020  XXH_ASSERT(((size_t)src16 & 15) == 0); /* control alignment */
4021  XXH_ASSERT(((size_t)dst16 & 15) == 0);
4022 
4023  for (i=0; i < nbRounds; ++i) {
4024  dst16[i] = _mm_add_epi64(_mm_load_si128((const __m128i *)src16+i), seed);
4025  } }
4026 }
4027 
4028 #endif
4029 
4030 #if (XXH_VECTOR == XXH_NEON)
4031 
4032 XXH_FORCE_INLINE void
4033 XXH3_accumulate_512_neon( void* XXH_RESTRICT acc,
4034  const void* XXH_RESTRICT input,
4035  const void* XXH_RESTRICT secret)
4036 {
4037  XXH_ASSERT((((size_t)acc) & 15) == 0);
4038  {
4039  uint64x2_t* const xacc = (uint64x2_t *) acc;
4040  /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */
4041  uint8_t const* const xinput = (const uint8_t *) input;
4042  uint8_t const* const xsecret = (const uint8_t *) secret;
4043 
4044  size_t i;
4045  for (i=0; i < XXH_STRIPE_LEN / sizeof(uint64x2_t); i++) {
4046  /* data_vec = xinput[i]; */
4047  uint8x16_t data_vec = vld1q_u8(xinput + (i * 16));
4048  /* key_vec = xsecret[i]; */
4049  uint8x16_t key_vec = vld1q_u8(xsecret + (i * 16));
4050  uint64x2_t data_key;
4051  uint32x2_t data_key_lo, data_key_hi;
4052  /* xacc[i] += swap(data_vec); */
4053  uint64x2_t const data64 = vreinterpretq_u64_u8(data_vec);
4054  uint64x2_t const swapped = vextq_u64(data64, data64, 1);
4055  xacc[i] = vaddq_u64 (xacc[i], swapped);
4056  /* data_key = data_vec ^ key_vec; */
4057  data_key = vreinterpretq_u64_u8(veorq_u8(data_vec, key_vec));
4058  /* data_key_lo = (uint32x2_t) (data_key & 0xFFFFFFFF);
4059  * data_key_hi = (uint32x2_t) (data_key >> 32);
4060  * data_key = UNDEFINED; */
4061  XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi);
4062  /* xacc[i] += (uint64x2_t) data_key_lo * (uint64x2_t) data_key_hi; */
4063  xacc[i] = vmlal_u32 (xacc[i], data_key_lo, data_key_hi);
4064 
4065  }
4066  }
4067 }
4068 
4069 XXH_FORCE_INLINE void
4070 XXH3_scrambleAcc_neon(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4071 {
4072  XXH_ASSERT((((size_t)acc) & 15) == 0);
4073 
4074  { uint64x2_t* xacc = (uint64x2_t*) acc;
4075  uint8_t const* xsecret = (uint8_t const*) secret;
4076  uint32x2_t prime = vdup_n_u32 (XXH_PRIME32_1);
4077 
4078  size_t i;
4079  for (i=0; i < XXH_STRIPE_LEN/sizeof(uint64x2_t); i++) {
4080  /* xacc[i] ^= (xacc[i] >> 47); */
4081  uint64x2_t acc_vec = xacc[i];
4082  uint64x2_t shifted = vshrq_n_u64 (acc_vec, 47);
4083  uint64x2_t data_vec = veorq_u64 (acc_vec, shifted);
4084 
4085  /* xacc[i] ^= xsecret[i]; */
4086  uint8x16_t key_vec = vld1q_u8 (xsecret + (i * 16));
4087  uint64x2_t data_key = veorq_u64 (data_vec, vreinterpretq_u64_u8(key_vec));
4088 
4089  /* xacc[i] *= XXH_PRIME32_1 */
4090  uint32x2_t data_key_lo, data_key_hi;
4091  /* data_key_lo = (uint32x2_t) (xacc[i] & 0xFFFFFFFF);
4092  * data_key_hi = (uint32x2_t) (xacc[i] >> 32);
4093  * xacc[i] = UNDEFINED; */
4094  XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi);
4095  { /*
4096  * prod_hi = (data_key >> 32) * XXH_PRIME32_1;
4097  *
4098  * Avoid vmul_u32 + vshll_n_u32 since Clang 6 and 7 will
4099  * incorrectly "optimize" this:
4100  * tmp = vmul_u32(vmovn_u64(a), vmovn_u64(b));
4101  * shifted = vshll_n_u32(tmp, 32);
4102  * to this:
4103  * tmp = "vmulq_u64"(a, b); // no such thing!
4104  * shifted = vshlq_n_u64(tmp, 32);
4105  *
4106  * However, unlike SSE, Clang lacks a 64-bit multiply routine
4107  * for NEON, and it scalarizes two 64-bit multiplies instead.
4108  *
4109  * vmull_u32 has the same timing as vmul_u32, and it avoids
4110  * this bug completely.
4111  * See https://bugs.llvm.org/show_bug.cgi?id=39967
4112  */
4113  uint64x2_t prod_hi = vmull_u32 (data_key_hi, prime);
4114  /* xacc[i] = prod_hi << 32; */
4115  xacc[i] = vshlq_n_u64(prod_hi, 32);
4116  /* xacc[i] += (prod_hi & 0xFFFFFFFF) * XXH_PRIME32_1; */
4117  xacc[i] = vmlal_u32(xacc[i], data_key_lo, prime);
4118  }
4119  } }
4120 }
4121 
4122 #endif
4123 
4124 #if (XXH_VECTOR == XXH_VSX)
4125 
4126 XXH_FORCE_INLINE void
4127 XXH3_accumulate_512_vsx( void* XXH_RESTRICT acc,
4128  const void* XXH_RESTRICT input,
4129  const void* XXH_RESTRICT secret)
4130 {
4131  /* presumed aligned */
4132  unsigned long long* const xacc = (unsigned long long*) acc;
4133  xxh_u64x2 const* const xinput = (xxh_u64x2 const*) input; /* no alignment restriction */
4134  xxh_u64x2 const* const xsecret = (xxh_u64x2 const*) secret; /* no alignment restriction */
4135  xxh_u64x2 const v32 = { 32, 32 };
4136  size_t i;
4137  for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
4138  /* data_vec = xinput[i]; */
4139  xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + i);
4140  /* key_vec = xsecret[i]; */
4141  xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + i);
4142  xxh_u64x2 const data_key = data_vec ^ key_vec;
4143  /* shuffled = (data_key << 32) | (data_key >> 32); */
4144  xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32);
4145  /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & 0xFFFFFFFF); */
4146  xxh_u64x2 const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled);
4147  /* acc_vec = xacc[i]; */
4148  xxh_u64x2 acc_vec = vec_xl(0, xacc + 2 * i);
4149  acc_vec += product;
4150 
4151  /* swap high and low halves */
4152 #ifdef __s390x__
4153  acc_vec += vec_permi(data_vec, data_vec, 2);
4154 #else
4155  acc_vec += vec_xxpermdi(data_vec, data_vec, 2);
4156 #endif
4157  /* xacc[i] = acc_vec; */
4158  vec_xst(acc_vec, 0, xacc + 2 * i);
4159  }
4160 }
4161 
4162 XXH_FORCE_INLINE void
4163 XXH3_scrambleAcc_vsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4164 {
4165  XXH_ASSERT((((size_t)acc) & 15) == 0);
4166 
4167  { xxh_u64x2* const xacc = (xxh_u64x2*) acc;
4168  const xxh_u64x2* const xsecret = (const xxh_u64x2*) secret;
4169  /* constants */
4170  xxh_u64x2 const v32 = { 32, 32 };
4171  xxh_u64x2 const v47 = { 47, 47 };
4172  xxh_u32x4 const prime = { XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1 };
4173  size_t i;
4174  for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
4175  /* xacc[i] ^= (xacc[i] >> 47); */
4176  xxh_u64x2 const acc_vec = xacc[i];
4177  xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47);
4178 
4179  /* xacc[i] ^= xsecret[i]; */
4180  xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + i);
4181  xxh_u64x2 const data_key = data_vec ^ key_vec;
4182 
4183  /* xacc[i] *= XXH_PRIME32_1 */
4184  /* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime & 0xFFFFFFFF); */
4185  xxh_u64x2 const prod_even = XXH_vec_mule((xxh_u32x4)data_key, prime);
4186  /* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32); */
4187  xxh_u64x2 const prod_odd = XXH_vec_mulo((xxh_u32x4)data_key, prime);
4188  xacc[i] = prod_odd + (prod_even << v32);
4189  } }
4190 }
4191 
4192 #endif
4193 
4194 /* scalar variants - universal */
4195 
4196 XXH_FORCE_INLINE void
4197 XXH3_accumulate_512_scalar(void* XXH_RESTRICT acc,
4198  const void* XXH_RESTRICT input,
4199  const void* XXH_RESTRICT secret)
4200 {
4201  xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */
4202  const xxh_u8* const xinput = (const xxh_u8*) input; /* no alignment restriction */
4203  const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */
4204  size_t i;
4205  XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN-1)) == 0);
4206  for (i=0; i < XXH_ACC_NB; i++) {
4207  xxh_u64 const data_val = XXH_readLE64(xinput + 8*i);
4208  xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + i*8);
4209  xacc[i ^ 1] += data_val; /* swap adjacent lanes */
4210  xacc[i] += XXH_mult32to64(data_key & 0xFFFFFFFF, data_key >> 32);
4211  }
4212 }
4213 
4214 XXH_FORCE_INLINE void
4215 XXH3_scrambleAcc_scalar(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4216 {
4217  xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */
4218  const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */
4219  size_t i;
4220  XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0);
4221  for (i=0; i < XXH_ACC_NB; i++) {
4222  xxh_u64 const key64 = XXH_readLE64(xsecret + 8*i);
4223  xxh_u64 acc64 = xacc[i];
4224  acc64 = XXH_xorshift64(acc64, 47);
4225  acc64 ^= key64;
4226  acc64 *= XXH_PRIME32_1;
4227  xacc[i] = acc64;
4228  }
4229 }
4230 
4231 XXH_FORCE_INLINE void
4232 XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
4233 {
4234  /*
4235  * We need a separate pointer for the hack below,
4236  * which requires a non-const pointer.
4237  * Any decent compiler will optimize this out otherwise.
4238  */
4239  const xxh_u8* kSecretPtr = XXH3_kSecret;
4240  XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
4241 
4242 #if defined(__clang__) && defined(__aarch64__)
4243  /*
4244  * UGLY HACK:
4245  * Clang generates a bunch of MOV/MOVK pairs for aarch64, and they are
4246  * placed sequentially, in order, at the top of the unrolled loop.
4247  *
4248  * While MOVK is great for generating constants (2 cycles for a 64-bit
4249  * constant compared to 4 cycles for LDR), long MOVK chains stall the
4250  * integer pipelines:
4251  * I L S
4252  * MOVK
4253  * MOVK
4254  * MOVK
4255  * MOVK
4256  * ADD
4257  * SUB STR
4258  * STR
4259  * By forcing loads from memory (as the asm line causes Clang to assume
4260  * that XXH3_kSecretPtr has been changed), the pipelines are used more
4261  * efficiently:
4262  * I L S
4263  * LDR
4264  * ADD LDR
4265  * SUB STR
4266  * STR
4267  * XXH3_64bits_withSeed, len == 256, Snapdragon 835
4268  * without hack: 2654.4 MB/s
4269  * with hack: 3202.9 MB/s
4270  */
4271  XXH_COMPILER_GUARD(kSecretPtr);
4272 #endif
4273  /*
4274  * Note: in debug mode, this overrides the asm optimization
4275  * and Clang will emit MOVK chains again.
4276  */
4277  XXH_ASSERT(kSecretPtr == XXH3_kSecret);
4278 
4279  { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16;
4280  int i;
4281  for (i=0; i < nbRounds; i++) {
4282  /*
4283  * The asm hack causes Clang to assume that kSecretPtr aliases with
4284  * customSecret, and on aarch64, this prevented LDP from merging two
4285  * loads together for free. Putting the loads together before the stores
4286  * properly generates LDP.
4287  */
4288  xxh_u64 lo = XXH_readLE64(kSecretPtr + 16*i) + seed64;
4289  xxh_u64 hi = XXH_readLE64(kSecretPtr + 16*i + 8) - seed64;
4290  XXH_writeLE64((xxh_u8*)customSecret + 16*i, lo);
4291  XXH_writeLE64((xxh_u8*)customSecret + 16*i + 8, hi);
4292  } }
4293 }
4294 
4295 
4296 typedef void (*XXH3_f_accumulate_512)(void* XXH_RESTRICT, const void*, const void*);
4297 typedef void (*XXH3_f_scrambleAcc)(void* XXH_RESTRICT, const void*);
4298 typedef void (*XXH3_f_initCustomSecret)(void* XXH_RESTRICT, xxh_u64);
4299 
4300 
4301 #if (XXH_VECTOR == XXH_AVX512)
4302 
4303 #define XXH3_accumulate_512 XXH3_accumulate_512_avx512
4304 #define XXH3_scrambleAcc XXH3_scrambleAcc_avx512
4305 #define XXH3_initCustomSecret XXH3_initCustomSecret_avx512
4306 
4307 #elif (XXH_VECTOR == XXH_AVX2)
4308 
4309 #define XXH3_accumulate_512 XXH3_accumulate_512_avx2
4310 #define XXH3_scrambleAcc XXH3_scrambleAcc_avx2
4311 #define XXH3_initCustomSecret XXH3_initCustomSecret_avx2
4312 
4313 #elif (XXH_VECTOR == XXH_SSE2)
4314 
4315 #define XXH3_accumulate_512 XXH3_accumulate_512_sse2
4316 #define XXH3_scrambleAcc XXH3_scrambleAcc_sse2
4317 #define XXH3_initCustomSecret XXH3_initCustomSecret_sse2
4318 
4319 #elif (XXH_VECTOR == XXH_NEON)
4320 
4321 #define XXH3_accumulate_512 XXH3_accumulate_512_neon
4322 #define XXH3_scrambleAcc XXH3_scrambleAcc_neon
4323 #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
4324 
4325 #elif (XXH_VECTOR == XXH_VSX)
4326 
4327 #define XXH3_accumulate_512 XXH3_accumulate_512_vsx
4328 #define XXH3_scrambleAcc XXH3_scrambleAcc_vsx
4329 #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
4330 
4331 #else /* scalar */
4332 
4333 #define XXH3_accumulate_512 XXH3_accumulate_512_scalar
4334 #define XXH3_scrambleAcc XXH3_scrambleAcc_scalar
4335 #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
4336 
4337 #endif
4338 
4339 
4340 
4341 #ifndef XXH_PREFETCH_DIST
4342 # ifdef __clang__
4343 # define XXH_PREFETCH_DIST 320
4344 # else
4345 # if (XXH_VECTOR == XXH_AVX512)
4346 # define XXH_PREFETCH_DIST 512
4347 # else
4348 # define XXH_PREFETCH_DIST 384
4349 # endif
4350 # endif /* __clang__ */
4351 #endif /* XXH_PREFETCH_DIST */
4352 
4353 /*
4354  * XXH3_accumulate()
4355  * Loops over XXH3_accumulate_512().
4356  * Assumption: nbStripes will not overflow the secret size
4357  */
4358 XXH_FORCE_INLINE void
4359 XXH3_accumulate( xxh_u64* XXH_RESTRICT acc,
4360  const xxh_u8* XXH_RESTRICT input,
4361  const xxh_u8* XXH_RESTRICT secret,
4362  size_t nbStripes,
4363  XXH3_f_accumulate_512 f_acc512)
4364 {
4365  size_t n;
4366  for (n = 0; n < nbStripes; n++ ) {
4367  const xxh_u8* const in = input + n*XXH_STRIPE_LEN;
4368  XXH_PREFETCH(in + XXH_PREFETCH_DIST);
4369  f_acc512(acc,
4370  in,
4371  secret + n*XXH_SECRET_CONSUME_RATE);
4372  }
4373 }
4374 
4375 XXH_FORCE_INLINE void
4376 XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc,
4377  const xxh_u8* XXH_RESTRICT input, size_t len,
4378  const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
4379  XXH3_f_accumulate_512 f_acc512,
4380  XXH3_f_scrambleAcc f_scramble)
4381 {
4382  size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE;
4383  size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock;
4384  size_t const nb_blocks = (len - 1) / block_len;
4385 
4386  size_t n;
4387 
4388  XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
4389 
4390  for (n = 0; n < nb_blocks; n++) {
4391  XXH3_accumulate(acc, input + n*block_len, secret, nbStripesPerBlock, f_acc512);
4392  f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN);
4393  }
4394 
4395  /* last partial block */
4396  XXH_ASSERT(len > XXH_STRIPE_LEN);
4397  { size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN;
4398  XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE));
4399  XXH3_accumulate(acc, input + nb_blocks*block_len, secret, nbStripes, f_acc512);
4400 
4401  /* last stripe */
4402  { const xxh_u8* const p = input + len - XXH_STRIPE_LEN;
4403 #define XXH_SECRET_LASTACC_START 7 /* not aligned on 8, last secret is different from acc & scrambler */
4404  f_acc512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START);
4405  } }
4406 }
4407 
4408 XXH_FORCE_INLINE xxh_u64
4409 XXH3_mix2Accs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret)
4410 {
4411  return XXH3_mul128_fold64(
4412  acc[0] ^ XXH_readLE64(secret),
4413  acc[1] ^ XXH_readLE64(secret+8) );
4414 }
4415 
4416 static XXH64_hash_t
4417 XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 start)
4418 {
4419  xxh_u64 result64 = start;
4420  size_t i = 0;
4421 
4422  for (i = 0; i < 4; i++) {
4423  result64 += XXH3_mix2Accs(acc+2*i, secret + 16*i);
4424 #if defined(__clang__) /* Clang */ \
4425  && (defined(__arm__) || defined(__thumb__)) /* ARMv7 */ \
4426  && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
4427  && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */
4428  /*
4429  * UGLY HACK:
4430  * Prevent autovectorization on Clang ARMv7-a. Exact same problem as
4431  * the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b.
4432  * XXH3_64bits, len == 256, Snapdragon 835:
4433  * without hack: 2063.7 MB/s
4434  * with hack: 2560.7 MB/s
4435  */
4436  XXH_COMPILER_GUARD(result64);
4437 #endif
4438  }
4439 
4440  return XXH3_avalanche(result64);
4441 }
4442 
4443 #define XXH3_INIT_ACC { XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \
4444  XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 }
4445 
4446 XXH_FORCE_INLINE XXH64_hash_t
4447 XXH3_hashLong_64b_internal(const void* XXH_RESTRICT input, size_t len,
4448  const void* XXH_RESTRICT secret, size_t secretSize,
4449  XXH3_f_accumulate_512 f_acc512,
4450  XXH3_f_scrambleAcc f_scramble)
4451 {
4452  XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
4453 
4454  XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, f_acc512, f_scramble);
4455 
4456  /* converge into final hash */
4457  XXH_STATIC_ASSERT(sizeof(acc) == 64);
4458  /* do not align on 8, so that the secret is different from the accumulator */
4459 #define XXH_SECRET_MERGEACCS_START 11
4460  XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
4461  return XXH3_mergeAccs(acc, (const xxh_u8*)secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * XXH_PRIME64_1);
4462 }
4463 
4464 /*
4465  * It's important for performance to transmit secret's size (when it's static)
4466  * so that the compiler can properly optimize the vectorized loop.
4467  * This makes a big performance difference for "medium" keys (<1 KB) when using AVX instruction set.
4468  */
4469 XXH_FORCE_INLINE XXH64_hash_t
4470 XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len,
4471  XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
4472 {
4473  (void)seed64;
4474  return XXH3_hashLong_64b_internal(input, len, secret, secretLen, XXH3_accumulate_512, XXH3_scrambleAcc);
4475 }
4476 
4477 /*
4478  * It's preferable for performance that XXH3_hashLong is not inlined,
4479  * as it results in a smaller function for small data, easier to the instruction cache.
4480  * Note that inside this no_inline function, we do inline the internal loop,
4481  * and provide a statically defined secret size to allow optimization of vector loop.
4482  */
4483 XXH_NO_INLINE XXH64_hash_t
4484 XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len,
4485  XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
4486 {
4487  (void)seed64; (void)secret; (void)secretLen;
4488  return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_accumulate_512, XXH3_scrambleAcc);
4489 }
4490 
4491 /*
4492  * XXH3_hashLong_64b_withSeed():
4493  * Generate a custom key based on alteration of default XXH3_kSecret with the seed,
4494  * and then use this key for long mode hashing.
4495  *
4496  * This operation is decently fast but nonetheless costs a little bit of time.
4497  * Try to avoid it whenever possible (typically when seed==0).
4498  *
4499  * It's important for performance that XXH3_hashLong is not inlined. Not sure
4500  * why (uop cache maybe?), but the difference is large and easily measurable.
4501  */
4502 XXH_FORCE_INLINE XXH64_hash_t
4503 XXH3_hashLong_64b_withSeed_internal(const void* input, size_t len,
4505  XXH3_f_accumulate_512 f_acc512,
4506  XXH3_f_scrambleAcc f_scramble,
4507  XXH3_f_initCustomSecret f_initSec)
4508 {
4509  if (seed == 0)
4510  return XXH3_hashLong_64b_internal(input, len,
4511  XXH3_kSecret, sizeof(XXH3_kSecret),
4512  f_acc512, f_scramble);
4513  { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
4514  f_initSec(secret, seed);
4515  return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret),
4516  f_acc512, f_scramble);
4517  }
4518 }
4519 
4520 /*
4521  * It's important for performance that XXH3_hashLong is not inlined.
4522  */
4523 XXH_NO_INLINE XXH64_hash_t
4524 XXH3_hashLong_64b_withSeed(const void* input, size_t len,
4525  XXH64_hash_t seed, const xxh_u8* secret, size_t secretLen)
4526 {
4527  (void)secret; (void)secretLen;
4528  return XXH3_hashLong_64b_withSeed_internal(input, len, seed,
4529  XXH3_accumulate_512, XXH3_scrambleAcc, XXH3_initCustomSecret);
4530 }
4531 
4532 
4533 typedef XXH64_hash_t (*XXH3_hashLong64_f)(const void* XXH_RESTRICT, size_t,
4534  XXH64_hash_t, const xxh_u8* XXH_RESTRICT, size_t);
4535 
4536 XXH_FORCE_INLINE XXH64_hash_t
4537 XXH3_64bits_internal(const void* XXH_RESTRICT input, size_t len,
4538  XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
4539  XXH3_hashLong64_f f_hashLong)
4540 {
4541  XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
4542  /*
4543  * If an action is to be taken if `secretLen` condition is not respected,
4544  * it should be done here.
4545  * For now, it's a contract pre-condition.
4546  * Adding a check and a branch here would cost performance at every hash.
4547  * Also, note that function signature doesn't offer room to return an error.
4548  */
4549  if (len <= 16)
4550  return XXH3_len_0to16_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
4551  if (len <= 128)
4552  return XXH3_len_17to128_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
4553  if (len <= XXH3_MIDSIZE_MAX)
4554  return XXH3_len_129to240_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
4555  return f_hashLong(input, len, seed64, (const xxh_u8*)secret, secretLen);
4556 }
4557 
4558 
4559 /* === Public entry point === */
4560 
4562 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* input, size_t len)
4563 {
4564  return XXH3_64bits_internal(input, len, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_default);
4565 }
4566 
4569 XXH3_64bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize)
4570 {
4571  return XXH3_64bits_internal(input, len, 0, secret, secretSize, XXH3_hashLong_64b_withSecret);
4572 }
4573 
4576 XXH3_64bits_withSeed(const void* input, size_t len, XXH64_hash_t seed)
4577 {
4578  return XXH3_64bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed);
4579 }
4580 
4582 XXH3_64bits_withSecretandSeed(const void* input, size_t len, const void* secret, size_t secretSize, XXH64_hash_t seed)
4583 {
4584  if (len <= XXH3_MIDSIZE_MAX)
4585  return XXH3_64bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL);
4586  return XXH3_hashLong_64b_withSecret(input, len, seed, (const xxh_u8*)secret, secretSize);
4587 }
4588 
4589 
4590 /* === XXH3 streaming === */
4591 
4592 /*
4593  * Malloc's a pointer that is always aligned to align.
4594  *
4595  * This must be freed with `XXH_alignedFree()`.
4596  *
4597  * malloc typically guarantees 16 byte alignment on 64-bit systems and 8 byte
4598  * alignment on 32-bit. This isn't enough for the 32 byte aligned loads in AVX2
4599  * or on 32-bit, the 16 byte aligned loads in SSE2 and NEON.
4600  *
4601  * This underalignment previously caused a rather obvious crash which went
4602  * completely unnoticed due to XXH3_createState() not actually being tested.
4603  * Credit to RedSpah for noticing this bug.
4604  *
4605  * The alignment is done manually: Functions like posix_memalign or _mm_malloc
4606  * are avoided: To maintain portability, we would have to write a fallback
4607  * like this anyways, and besides, testing for the existence of library
4608  * functions without relying on external build tools is impossible.
4609  *
4610  * The method is simple: Overallocate, manually align, and store the offset
4611  * to the original behind the returned pointer.
4612  *
4613  * Align must be a power of 2 and 8 <= align <= 128.
4614  */
4615 static void* XXH_alignedMalloc(size_t s, size_t align)
4616 {
4617  XXH_ASSERT(align <= 128 && align >= 8); /* range check */
4618  XXH_ASSERT((align & (align-1)) == 0); /* power of 2 */
4619  XXH_ASSERT(s != 0 && s < (s + align)); /* empty/overflow */
4620  { /* Overallocate to make room for manual realignment and an offset byte */
4621  xxh_u8* base = (xxh_u8*)XXH_malloc(s + align);
4622  if (base != NULL) {
4623  /*
4624  * Get the offset needed to align this pointer.
4625  *
4626  * Even if the returned pointer is aligned, there will always be
4627  * at least one byte to store the offset to the original pointer.
4628  */
4629  size_t offset = align - ((size_t)base & (align - 1)); /* base % align */
4630  /* Add the offset for the now-aligned pointer */
4631  xxh_u8* ptr = base + offset;
4632 
4633  XXH_ASSERT((size_t)ptr % align == 0);
4634 
4635  /* Store the offset immediately before the returned pointer. */
4636  ptr[-1] = (xxh_u8)offset;
4637  return ptr;
4638  }
4639  return NULL;
4640  }
4641 }
4642 /*
4643  * Frees an aligned pointer allocated by XXH_alignedMalloc(). Don't pass
4644  * normal malloc'd pointers, XXH_alignedMalloc has a specific data layout.
4645  */
4646 static void XXH_alignedFree(void* p)
4647 {
4648  if (p != NULL) {
4649  xxh_u8* ptr = (xxh_u8*)p;
4650  /* Get the offset byte we added in XXH_malloc. */
4651  xxh_u8 offset = ptr[-1];
4652  /* Free the original malloc'd pointer */
4653  xxh_u8* base = ptr - offset;
4654  XXH_free(base);
4655  }
4656 }
4659 {
4660  XXH3_state_t* const state = (XXH3_state_t*)XXH_alignedMalloc(sizeof(XXH3_state_t), 64);
4661  if (state==NULL) return NULL;
4662  XXH3_INITSTATE(state);
4663  return state;
4664 }
4665 
4668 {
4669  XXH_alignedFree(statePtr);
4670  return XXH_OK;
4671 }
4672 
4674 XXH_PUBLIC_API void
4675 XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state)
4676 {
4677  XXH_memcpy(dst_state, src_state, sizeof(*dst_state));
4678 }
4679 
4680 static void
4681 XXH3_reset_internal(XXH3_state_t* statePtr,
4683  const void* secret, size_t secretSize)
4684 {
4685  size_t const initStart = offsetof(XXH3_state_t, bufferedSize);
4686  size_t const initLength = offsetof(XXH3_state_t, nbStripesPerBlock) - initStart;
4687  XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart);
4688  XXH_ASSERT(statePtr != NULL);
4689  /* set members from bufferedSize to nbStripesPerBlock (excluded) to 0 */
4690  memset((char*)statePtr + initStart, 0, initLength);
4691  statePtr->acc[0] = XXH_PRIME32_3;
4692  statePtr->acc[1] = XXH_PRIME64_1;
4693  statePtr->acc[2] = XXH_PRIME64_2;
4694  statePtr->acc[3] = XXH_PRIME64_3;
4695  statePtr->acc[4] = XXH_PRIME64_4;
4696  statePtr->acc[5] = XXH_PRIME32_2;
4697  statePtr->acc[6] = XXH_PRIME64_5;
4698  statePtr->acc[7] = XXH_PRIME32_1;
4699  statePtr->seed = seed;
4700  statePtr->useSeed = (seed != 0);
4701  statePtr->extSecret = (const unsigned char*)secret;
4702  XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
4703  statePtr->secretLimit = secretSize - XXH_STRIPE_LEN;
4704  statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE;
4705 }
4706 
4710 {
4711  if (statePtr == NULL) return XXH_ERROR;
4712  XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE);
4713  return XXH_OK;
4714 }
4715 
4718 XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize)
4719 {
4720  if (statePtr == NULL) return XXH_ERROR;
4721  XXH3_reset_internal(statePtr, 0, secret, secretSize);
4722  if (secret == NULL) return XXH_ERROR;
4723  if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
4724  return XXH_OK;
4725 }
4726 
4730 {
4731  if (statePtr == NULL) return XXH_ERROR;
4732  if (seed==0) return XXH3_64bits_reset(statePtr);
4733  if ((seed != statePtr->seed) || (statePtr->extSecret != NULL))
4734  XXH3_initCustomSecret(statePtr->customSecret, seed);
4735  XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE);
4736  return XXH_OK;
4737 }
4738 
4741 XXH3_64bits_reset_withSecretandSeed(XXH3_state_t* statePtr, const void* secret, size_t secretSize, XXH64_hash_t seed64)
4742 {
4743  if (statePtr == NULL) return XXH_ERROR;
4744  if (secret == NULL) return XXH_ERROR;
4745  if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
4746  XXH3_reset_internal(statePtr, seed64, secret, secretSize);
4747  statePtr->useSeed = 1; /* always, even if seed64==0 */
4748  return XXH_OK;
4749 }
4750 
4751 /* Note : when XXH3_consumeStripes() is invoked,
4752  * there must be a guarantee that at least one more byte must be consumed from input
4753  * so that the function can blindly consume all stripes using the "normal" secret segment */
4754 XXH_FORCE_INLINE void
4755 XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc,
4756  size_t* XXH_RESTRICT nbStripesSoFarPtr, size_t nbStripesPerBlock,
4757  const xxh_u8* XXH_RESTRICT input, size_t nbStripes,
4758  const xxh_u8* XXH_RESTRICT secret, size_t secretLimit,
4759  XXH3_f_accumulate_512 f_acc512,
4760  XXH3_f_scrambleAcc f_scramble)
4761 {
4762  XXH_ASSERT(nbStripes <= nbStripesPerBlock); /* can handle max 1 scramble per invocation */
4763  XXH_ASSERT(*nbStripesSoFarPtr < nbStripesPerBlock);
4764  if (nbStripesPerBlock - *nbStripesSoFarPtr <= nbStripes) {
4765  /* need a scrambling operation */
4766  size_t const nbStripesToEndofBlock = nbStripesPerBlock - *nbStripesSoFarPtr;
4767  size_t const nbStripesAfterBlock = nbStripes - nbStripesToEndofBlock;
4768  XXH3_accumulate(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripesToEndofBlock, f_acc512);
4769  f_scramble(acc, secret + secretLimit);
4770  XXH3_accumulate(acc, input + nbStripesToEndofBlock * XXH_STRIPE_LEN, secret, nbStripesAfterBlock, f_acc512);
4771  *nbStripesSoFarPtr = nbStripesAfterBlock;
4772  } else {
4773  XXH3_accumulate(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripes, f_acc512);
4774  *nbStripesSoFarPtr += nbStripes;
4775  }
4776 }
4777 
4778 #ifndef XXH3_STREAM_USE_STACK
4779 # ifndef __clang__ /* clang doesn't need additional stack space */
4780 # define XXH3_STREAM_USE_STACK 1
4781 # endif
4782 #endif
4783 /*
4784  * Both XXH3_64bits_update and XXH3_128bits_update use this routine.
4785  */
4786 XXH_FORCE_INLINE XXH_errorcode
4787 XXH3_update(XXH3_state_t* XXH_RESTRICT const state,
4788  const xxh_u8* XXH_RESTRICT input, size_t len,
4789  XXH3_f_accumulate_512 f_acc512,
4790  XXH3_f_scrambleAcc f_scramble)
4791 {
4792  if (input==NULL) {
4793  XXH_ASSERT(len == 0);
4794  return XXH_OK;
4795  }
4796 
4797  XXH_ASSERT(state != NULL);
4798  { const xxh_u8* const bEnd = input + len;
4799  const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
4800 #if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
4801  /* For some reason, gcc and MSVC seem to suffer greatly
4802  * when operating accumulators directly into state.
4803  * Operating into stack space seems to enable proper optimization.
4804  * clang, on the other hand, doesn't seem to need this trick */
4805  XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[8]; memcpy(acc, state->acc, sizeof(acc));
4806 #else
4807  xxh_u64* XXH_RESTRICT const acc = state->acc;
4808 #endif
4809  state->totalLen += len;
4810  XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE);
4811 
4812  /* small input : just fill in tmp buffer */
4813  if (state->bufferedSize + len <= XXH3_INTERNALBUFFER_SIZE) {
4814  XXH_memcpy(state->buffer + state->bufferedSize, input, len);
4815  state->bufferedSize += (XXH32_hash_t)len;
4816  return XXH_OK;
4817  }
4818 
4819  /* total input is now > XXH3_INTERNALBUFFER_SIZE */
4820  #define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN)
4821  XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0); /* clean multiple */
4822 
4823  /*
4824  * Internal buffer is partially filled (always, except at beginning)
4825  * Complete it, then consume it.
4826  */
4827  if (state->bufferedSize) {
4828  size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize;
4829  XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize);
4830  input += loadSize;
4831  XXH3_consumeStripes(acc,
4832  &state->nbStripesSoFar, state->nbStripesPerBlock,
4833  state->buffer, XXH3_INTERNALBUFFER_STRIPES,
4834  secret, state->secretLimit,
4835  f_acc512, f_scramble);
4836  state->bufferedSize = 0;
4837  }
4838  XXH_ASSERT(input < bEnd);
4839 
4840  /* large input to consume : ingest per full block */
4841  if ((size_t)(bEnd - input) > state->nbStripesPerBlock * XXH_STRIPE_LEN) {
4842  size_t nbStripes = (size_t)(bEnd - 1 - input) / XXH_STRIPE_LEN;
4843  XXH_ASSERT(state->nbStripesPerBlock >= state->nbStripesSoFar);
4844  /* join to current block's end */
4845  { size_t const nbStripesToEnd = state->nbStripesPerBlock - state->nbStripesSoFar;
4846  XXH_ASSERT(nbStripes <= nbStripes);
4847  XXH3_accumulate(acc, input, secret + state->nbStripesSoFar * XXH_SECRET_CONSUME_RATE, nbStripesToEnd, f_acc512);
4848  f_scramble(acc, secret + state->secretLimit);
4849  state->nbStripesSoFar = 0;
4850  input += nbStripesToEnd * XXH_STRIPE_LEN;
4851  nbStripes -= nbStripesToEnd;
4852  }
4853  /* consume per entire blocks */
4854  while(nbStripes >= state->nbStripesPerBlock) {
4855  XXH3_accumulate(acc, input, secret, state->nbStripesPerBlock, f_acc512);
4856  f_scramble(acc, secret + state->secretLimit);
4857  input += state->nbStripesPerBlock * XXH_STRIPE_LEN;
4858  nbStripes -= state->nbStripesPerBlock;
4859  }
4860  /* consume last partial block */
4861  XXH3_accumulate(acc, input, secret, nbStripes, f_acc512);
4862  input += nbStripes * XXH_STRIPE_LEN;
4863  XXH_ASSERT(input < bEnd); /* at least some bytes left */
4864  state->nbStripesSoFar = nbStripes;
4865  /* buffer predecessor of last partial stripe */
4866  XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
4867  XXH_ASSERT(bEnd - input <= XXH_STRIPE_LEN);
4868  } else {
4869  /* content to consume <= block size */
4870  /* Consume input by a multiple of internal buffer size */
4871  if (bEnd - input > XXH3_INTERNALBUFFER_SIZE) {
4872  const xxh_u8* const limit = bEnd - XXH3_INTERNALBUFFER_SIZE;
4873  do {
4874  XXH3_consumeStripes(acc,
4875  &state->nbStripesSoFar, state->nbStripesPerBlock,
4876  input, XXH3_INTERNALBUFFER_STRIPES,
4877  secret, state->secretLimit,
4878  f_acc512, f_scramble);
4879  input += XXH3_INTERNALBUFFER_SIZE;
4880  } while (input<limit);
4881  /* buffer predecessor of last partial stripe */
4882  XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
4883  }
4884  }
4885 
4886  /* Some remaining input (always) : buffer it */
4887  XXH_ASSERT(input < bEnd);
4888  XXH_ASSERT(bEnd - input <= XXH3_INTERNALBUFFER_SIZE);
4889  XXH_ASSERT(state->bufferedSize == 0);
4890  XXH_memcpy(state->buffer, input, (size_t)(bEnd-input));
4891  state->bufferedSize = (XXH32_hash_t)(bEnd-input);
4892 #if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
4893  /* save stack accumulators into state */
4894  memcpy(state->acc, acc, sizeof(acc));
4895 #endif
4896  }
4897 
4898  return XXH_OK;
4899 }
4900 
4903 XXH3_64bits_update(XXH3_state_t* state, const void* input, size_t len)
4904 {
4905  return XXH3_update(state, (const xxh_u8*)input, len,
4906  XXH3_accumulate_512, XXH3_scrambleAcc);
4907 }
4908 
4909 
4910 XXH_FORCE_INLINE void
4911 XXH3_digest_long (XXH64_hash_t* acc,
4912  const XXH3_state_t* state,
4913  const unsigned char* secret)
4914 {
4915  /*
4916  * Digest on a local copy. This way, the state remains unaltered, and it can
4917  * continue ingesting more input afterwards.
4918  */
4919  XXH_memcpy(acc, state->acc, sizeof(state->acc));
4920  if (state->bufferedSize >= XXH_STRIPE_LEN) {
4921  size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN;
4922  size_t nbStripesSoFar = state->nbStripesSoFar;
4923  XXH3_consumeStripes(acc,
4924  &nbStripesSoFar, state->nbStripesPerBlock,
4925  state->buffer, nbStripes,
4926  secret, state->secretLimit,
4927  XXH3_accumulate_512, XXH3_scrambleAcc);
4928  /* last stripe */
4929  XXH3_accumulate_512(acc,
4930  state->buffer + state->bufferedSize - XXH_STRIPE_LEN,
4931  secret + state->secretLimit - XXH_SECRET_LASTACC_START);
4932  } else { /* bufferedSize < XXH_STRIPE_LEN */
4933  xxh_u8 lastStripe[XXH_STRIPE_LEN];
4934  size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize;
4935  XXH_ASSERT(state->bufferedSize > 0); /* there is always some input buffered */
4936  XXH_memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize);
4937  XXH_memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize);
4938  XXH3_accumulate_512(acc,
4939  lastStripe,
4940  secret + state->secretLimit - XXH_SECRET_LASTACC_START);
4941  }
4942 }
4943 
4946 {
4947  const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
4948  if (state->totalLen > XXH3_MIDSIZE_MAX) {
4949  XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
4950  XXH3_digest_long(acc, state, secret);
4951  return XXH3_mergeAccs(acc,
4952  secret + XXH_SECRET_MERGEACCS_START,
4953  (xxh_u64)state->totalLen * XXH_PRIME64_1);
4954  }
4955  /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */
4956  if (state->useSeed)
4957  return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
4958  return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen),
4959  secret, state->secretLimit + XXH_STRIPE_LEN);
4960 }
4961 
4962 
4963 
4964 /* ==========================================
4965  * XXH3 128 bits (a.k.a XXH128)
4966  * ==========================================
4967  * XXH3's 128-bit variant has better mixing and strength than the 64-bit variant,
4968  * even without counting the significantly larger output size.
4969  *
4970  * For example, extra steps are taken to avoid the seed-dependent collisions
4971  * in 17-240 byte inputs (See XXH3_mix16B and XXH128_mix32B).
4972  *
4973  * This strength naturally comes at the cost of some speed, especially on short
4974  * lengths. Note that longer hashes are about as fast as the 64-bit version
4975  * due to it using only a slight modification of the 64-bit loop.
4976  *
4977  * XXH128 is also more oriented towards 64-bit machines. It is still extremely
4978  * fast for a _128-bit_ hash on 32-bit (it usually clears XXH64).
4979  */
4980 
4981 XXH_FORCE_INLINE XXH128_hash_t
4982 XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
4983 {
4984  /* A doubled version of 1to3_64b with different constants. */
4985  XXH_ASSERT(input != NULL);
4986  XXH_ASSERT(1 <= len && len <= 3);
4987  XXH_ASSERT(secret != NULL);
4988  /*
4989  * len = 1: combinedl = { input[0], 0x01, input[0], input[0] }
4990  * len = 2: combinedl = { input[1], 0x02, input[0], input[1] }
4991  * len = 3: combinedl = { input[2], 0x03, input[0], input[1] }
4992  */
4993  { xxh_u8 const c1 = input[0];
4994  xxh_u8 const c2 = input[len >> 1];
4995  xxh_u8 const c3 = input[len - 1];
4996  xxh_u32 const combinedl = ((xxh_u32)c1 <<16) | ((xxh_u32)c2 << 24)
4997  | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
4998  xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13);
4999  xxh_u64 const bitflipl = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
5000  xxh_u64 const bitfliph = (XXH_readLE32(secret+8) ^ XXH_readLE32(secret+12)) - seed;
5001  xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl;
5002  xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph;
5003  XXH128_hash_t h128;
5004  h128.low64 = XXH64_avalanche(keyed_lo);
5005  h128.high64 = XXH64_avalanche(keyed_hi);
5006  return h128;
5007  }
5008 }
5009 
5010 XXH_FORCE_INLINE XXH128_hash_t
5011 XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
5012 {
5013  XXH_ASSERT(input != NULL);
5014  XXH_ASSERT(secret != NULL);
5015  XXH_ASSERT(4 <= len && len <= 8);
5016  seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
5017  { xxh_u32 const input_lo = XXH_readLE32(input);
5018  xxh_u32 const input_hi = XXH_readLE32(input + len - 4);
5019  xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32);
5020  xxh_u64 const bitflip = (XXH_readLE64(secret+16) ^ XXH_readLE64(secret+24)) + seed;
5021  xxh_u64 const keyed = input_64 ^ bitflip;
5022 
5023  /* Shift len to the left to ensure it is even, this avoids even multiplies. */
5024  XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2));
5025 
5026  m128.high64 += (m128.low64 << 1);
5027  m128.low64 ^= (m128.high64 >> 3);
5028 
5029  m128.low64 = XXH_xorshift64(m128.low64, 35);
5030  m128.low64 *= 0x9FB21C651E98DF25ULL;
5031  m128.low64 = XXH_xorshift64(m128.low64, 28);
5032  m128.high64 = XXH3_avalanche(m128.high64);
5033  return m128;
5034  }
5035 }
5036 
5037 XXH_FORCE_INLINE XXH128_hash_t
5038 XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
5039 {
5040  XXH_ASSERT(input != NULL);
5041  XXH_ASSERT(secret != NULL);
5042  XXH_ASSERT(9 <= len && len <= 16);
5043  { xxh_u64 const bitflipl = (XXH_readLE64(secret+32) ^ XXH_readLE64(secret+40)) - seed;
5044  xxh_u64 const bitfliph = (XXH_readLE64(secret+48) ^ XXH_readLE64(secret+56)) + seed;
5045  xxh_u64 const input_lo = XXH_readLE64(input);
5046  xxh_u64 input_hi = XXH_readLE64(input + len - 8);
5047  XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1);
5048  /*
5049  * Put len in the middle of m128 to ensure that the length gets mixed to
5050  * both the low and high bits in the 128x64 multiply below.
5051  */
5052  m128.low64 += (xxh_u64)(len - 1) << 54;
5053  input_hi ^= bitfliph;
5054  /*
5055  * Add the high 32 bits of input_hi to the high 32 bits of m128, then
5056  * add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to
5057  * the high 64 bits of m128.
5058  *
5059  * The best approach to this operation is different on 32-bit and 64-bit.
5060  */
5061  if (sizeof(void *) < sizeof(xxh_u64)) { /* 32-bit */
5062  /*
5063  * 32-bit optimized version, which is more readable.
5064  *
5065  * On 32-bit, it removes an ADC and delays a dependency between the two
5066  * halves of m128.high64, but it generates an extra mask on 64-bit.
5067  */
5068  m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2);
5069  } else {
5070  /*
5071  * 64-bit optimized (albeit more confusing) version.
5072  *
5073  * Uses some properties of addition and multiplication to remove the mask:
5074  *
5075  * Let:
5076  * a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF)
5077  * b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000)
5078  * c = XXH_PRIME32_2
5079  *
5080  * a + (b * c)
5081  * Inverse Property: x + y - x == y
5082  * a + (b * (1 + c - 1))
5083  * Distributive Property: x * (y + z) == (x * y) + (x * z)
5084  * a + (b * 1) + (b * (c - 1))
5085  * Identity Property: x * 1 == x
5086  * a + b + (b * (c - 1))
5087  *
5088  * Substitute a, b, and c:
5089  * input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
5090  *
5091  * Since input_hi.hi + input_hi.lo == input_hi, we get this:
5092  * input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
5093  */
5094  m128.high64 += input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1);
5095  }
5096  /* m128 ^= XXH_swap64(m128 >> 64); */
5097  m128.low64 ^= XXH_swap64(m128.high64);
5098 
5099  { /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */
5100  XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2);
5101  h128.high64 += m128.high64 * XXH_PRIME64_2;
5102 
5103  h128.low64 = XXH3_avalanche(h128.low64);
5104  h128.high64 = XXH3_avalanche(h128.high64);
5105  return h128;
5106  } }
5107 }
5108 
5109 /*
5110  * Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN
5111  */
5112 XXH_FORCE_INLINE XXH128_hash_t
5113 XXH3_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
5114 {
5115  XXH_ASSERT(len <= 16);
5116  { if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed);
5117  if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed);
5118  if (len) return XXH3_len_1to3_128b(input, len, secret, seed);
5119  { XXH128_hash_t h128;
5120  xxh_u64 const bitflipl = XXH_readLE64(secret+64) ^ XXH_readLE64(secret+72);
5121  xxh_u64 const bitfliph = XXH_readLE64(secret+80) ^ XXH_readLE64(secret+88);
5122  h128.low64 = XXH64_avalanche(seed ^ bitflipl);
5123  h128.high64 = XXH64_avalanche( seed ^ bitfliph);
5124  return h128;
5125  } }
5126 }
5127 
5128 /*
5129  * A bit slower than XXH3_mix16B, but handles multiply by zero better.
5130  */
5131 XXH_FORCE_INLINE XXH128_hash_t
5132 XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2,
5133  const xxh_u8* secret, XXH64_hash_t seed)
5134 {
5135  acc.low64 += XXH3_mix16B (input_1, secret+0, seed);
5136  acc.low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8);
5137  acc.high64 += XXH3_mix16B (input_2, secret+16, seed);
5138  acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8);
5139  return acc;
5140 }
5141 
5142 
5143 XXH_FORCE_INLINE XXH128_hash_t
5144 XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
5145  const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
5147 {
5148  XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
5149  XXH_ASSERT(16 < len && len <= 128);
5150 
5151  { XXH128_hash_t acc;
5152  acc.low64 = len * XXH_PRIME64_1;
5153  acc.high64 = 0;
5154  if (len > 32) {
5155  if (len > 64) {
5156  if (len > 96) {
5157  acc = XXH128_mix32B(acc, input+48, input+len-64, secret+96, seed);
5158  }
5159  acc = XXH128_mix32B(acc, input+32, input+len-48, secret+64, seed);
5160  }
5161  acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed);
5162  }
5163  acc = XXH128_mix32B(acc, input, input+len-16, secret, seed);
5164  { XXH128_hash_t h128;
5165  h128.low64 = acc.low64 + acc.high64;
5166  h128.high64 = (acc.low64 * XXH_PRIME64_1)
5167  + (acc.high64 * XXH_PRIME64_4)
5168  + ((len - seed) * XXH_PRIME64_2);
5169  h128.low64 = XXH3_avalanche(h128.low64);
5170  h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
5171  return h128;
5172  }
5173  }
5174 }
5175 
5176 XXH_NO_INLINE XXH128_hash_t
5177 XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
5178  const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
5180 {
5181  XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
5182  XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
5183 
5184  { XXH128_hash_t acc;
5185  int const nbRounds = (int)len / 32;
5186  int i;
5187  acc.low64 = len * XXH_PRIME64_1;
5188  acc.high64 = 0;
5189  for (i=0; i<4; i++) {
5190  acc = XXH128_mix32B(acc,
5191  input + (32 * i),
5192  input + (32 * i) + 16,
5193  secret + (32 * i),
5194  seed);
5195  }
5196  acc.low64 = XXH3_avalanche(acc.low64);
5197  acc.high64 = XXH3_avalanche(acc.high64);
5198  XXH_ASSERT(nbRounds >= 4);
5199  for (i=4 ; i < nbRounds; i++) {
5200  acc = XXH128_mix32B(acc,
5201  input + (32 * i),
5202  input + (32 * i) + 16,
5203  secret + XXH3_MIDSIZE_STARTOFFSET + (32 * (i - 4)),
5204  seed);
5205  }
5206  /* last bytes */
5207  acc = XXH128_mix32B(acc,
5208  input + len - 16,
5209  input + len - 32,
5210  secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16,
5211  0ULL - seed);
5212 
5213  { XXH128_hash_t h128;
5214  h128.low64 = acc.low64 + acc.high64;
5215  h128.high64 = (acc.low64 * XXH_PRIME64_1)
5216  + (acc.high64 * XXH_PRIME64_4)
5217  + ((len - seed) * XXH_PRIME64_2);
5218  h128.low64 = XXH3_avalanche(h128.low64);
5219  h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
5220  return h128;
5221  }
5222  }
5223 }
5224 
5225 XXH_FORCE_INLINE XXH128_hash_t
5226 XXH3_hashLong_128b_internal(const void* XXH_RESTRICT input, size_t len,
5227  const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
5228  XXH3_f_accumulate_512 f_acc512,
5229  XXH3_f_scrambleAcc f_scramble)
5230 {
5231  XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
5232 
5233  XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, secret, secretSize, f_acc512, f_scramble);
5234 
5235  /* converge into final hash */
5236  XXH_STATIC_ASSERT(sizeof(acc) == 64);
5237  XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
5238  { XXH128_hash_t h128;
5239  h128.low64 = XXH3_mergeAccs(acc,
5240  secret + XXH_SECRET_MERGEACCS_START,
5241  (xxh_u64)len * XXH_PRIME64_1);
5242  h128.high64 = XXH3_mergeAccs(acc,
5243  secret + secretSize
5244  - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
5245  ~((xxh_u64)len * XXH_PRIME64_2));
5246  return h128;
5247  }
5248 }
5249 
5250 /*
5251  * It's important for performance that XXH3_hashLong is not inlined.
5252  */
5253 XXH_NO_INLINE XXH128_hash_t
5254 XXH3_hashLong_128b_default(const void* XXH_RESTRICT input, size_t len,
5255  XXH64_hash_t seed64,
5256  const void* XXH_RESTRICT secret, size_t secretLen)
5257 {
5258  (void)seed64; (void)secret; (void)secretLen;
5259  return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret),
5260  XXH3_accumulate_512, XXH3_scrambleAcc);
5261 }
5262 
5263 /*
5264  * It's important for performance to pass @secretLen (when it's static)
5265  * to the compiler, so that it can properly optimize the vectorized loop.
5266  */
5267 XXH_FORCE_INLINE XXH128_hash_t
5268 XXH3_hashLong_128b_withSecret(const void* XXH_RESTRICT input, size_t len,
5269  XXH64_hash_t seed64,
5270  const void* XXH_RESTRICT secret, size_t secretLen)
5271 {
5272  (void)seed64;
5273  return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, secretLen,
5274  XXH3_accumulate_512, XXH3_scrambleAcc);
5275 }
5276 
5277 XXH_FORCE_INLINE XXH128_hash_t
5278 XXH3_hashLong_128b_withSeed_internal(const void* XXH_RESTRICT input, size_t len,
5279  XXH64_hash_t seed64,
5280  XXH3_f_accumulate_512 f_acc512,
5281  XXH3_f_scrambleAcc f_scramble,
5282  XXH3_f_initCustomSecret f_initSec)
5283 {
5284  if (seed64 == 0)
5285  return XXH3_hashLong_128b_internal(input, len,
5286  XXH3_kSecret, sizeof(XXH3_kSecret),
5287  f_acc512, f_scramble);
5288  { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
5289  f_initSec(secret, seed64);
5290  return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, sizeof(secret),
5291  f_acc512, f_scramble);
5292  }
5293 }
5294 
5295 /*
5296  * It's important for performance that XXH3_hashLong is not inlined.
5297  */
5298 XXH_NO_INLINE XXH128_hash_t
5299 XXH3_hashLong_128b_withSeed(const void* input, size_t len,
5300  XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen)
5301 {
5302  (void)secret; (void)secretLen;
5303  return XXH3_hashLong_128b_withSeed_internal(input, len, seed64,
5304  XXH3_accumulate_512, XXH3_scrambleAcc, XXH3_initCustomSecret);
5305 }
5306 
5307 typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void* XXH_RESTRICT, size_t,
5308  XXH64_hash_t, const void* XXH_RESTRICT, size_t);
5309 
5310 XXH_FORCE_INLINE XXH128_hash_t
5311 XXH3_128bits_internal(const void* input, size_t len,
5312  XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
5313  XXH3_hashLong128_f f_hl128)
5314 {
5315  XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
5316  /*
5317  * If an action is to be taken if `secret` conditions are not respected,
5318  * it should be done here.
5319  * For now, it's a contract pre-condition.
5320  * Adding a check and a branch here would cost performance at every hash.
5321  */
5322  if (len <= 16)
5323  return XXH3_len_0to16_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
5324  if (len <= 128)
5325  return XXH3_len_17to128_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
5326  if (len <= XXH3_MIDSIZE_MAX)
5327  return XXH3_len_129to240_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
5328  return f_hl128(input, len, seed64, secret, secretLen);
5329 }
5330 
5331 
5332 /* === Public XXH128 API === */
5333 
5335 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* input, size_t len)
5336 {
5337  return XXH3_128bits_internal(input, len, 0,
5338  XXH3_kSecret, sizeof(XXH3_kSecret),
5339  XXH3_hashLong_128b_default);
5340 }
5341 
5344 XXH3_128bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize)
5345 {
5346  return XXH3_128bits_internal(input, len, 0,
5347  (const xxh_u8*)secret, secretSize,
5348  XXH3_hashLong_128b_withSecret);
5349 }
5350 
5353 XXH3_128bits_withSeed(const void* input, size_t len, XXH64_hash_t seed)
5354 {
5355  return XXH3_128bits_internal(input, len, seed,
5356  XXH3_kSecret, sizeof(XXH3_kSecret),
5357  XXH3_hashLong_128b_withSeed);
5358 }
5359 
5362 XXH3_128bits_withSecretandSeed(const void* input, size_t len, const void* secret, size_t secretSize, XXH64_hash_t seed)
5363 {
5364  if (len <= XXH3_MIDSIZE_MAX)
5365  return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL);
5366  return XXH3_hashLong_128b_withSecret(input, len, seed, secret, secretSize);
5367 }
5368 
5371 XXH128(const void* input, size_t len, XXH64_hash_t seed)
5372 {
5373  return XXH3_128bits_withSeed(input, len, seed);
5374 }
5375 
5376 
5377 /* === XXH3 128-bit streaming === */
5378 
5379 /*
5380  * All initialization and update functions are identical to 64-bit streaming variant.
5381  * The only difference is the finalization routine.
5382  */
5383 
5387 {
5388  return XXH3_64bits_reset(statePtr);
5389 }
5390 
5393 XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize)
5394 {
5395  return XXH3_64bits_reset_withSecret(statePtr, secret, secretSize);
5396 }
5397 
5401 {
5402  return XXH3_64bits_reset_withSeed(statePtr, seed);
5403 }
5404 
5407 XXH3_128bits_reset_withSecretandSeed(XXH3_state_t* statePtr, const void* secret, size_t secretSize, XXH64_hash_t seed)
5408 {
5409  return XXH3_64bits_reset_withSecretandSeed(statePtr, secret, secretSize, seed);
5410 }
5411 
5414 XXH3_128bits_update(XXH3_state_t* state, const void* input, size_t len)
5415 {
5416  return XXH3_update(state, (const xxh_u8*)input, len,
5417  XXH3_accumulate_512, XXH3_scrambleAcc);
5418 }
5419 
5422 {
5423  const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
5424  if (state->totalLen > XXH3_MIDSIZE_MAX) {
5425  XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
5426  XXH3_digest_long(acc, state, secret);
5427  XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
5428  { XXH128_hash_t h128;
5429  h128.low64 = XXH3_mergeAccs(acc,
5430  secret + XXH_SECRET_MERGEACCS_START,
5431  (xxh_u64)state->totalLen * XXH_PRIME64_1);
5432  h128.high64 = XXH3_mergeAccs(acc,
5433  secret + state->secretLimit + XXH_STRIPE_LEN
5434  - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
5435  ~((xxh_u64)state->totalLen * XXH_PRIME64_2));
5436  return h128;
5437  }
5438  }
5439  /* len <= XXH3_MIDSIZE_MAX : short code */
5440  if (state->seed)
5441  return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
5442  return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen),
5443  secret, state->secretLimit + XXH_STRIPE_LEN);
5444 }
5445 
5446 /* 128-bit utility functions */
5447 
5448 #include <string.h> /* memcmp, memcpy */
5449 
5450 /* return : 1 is equal, 0 if different */
5453 {
5454  /* note : XXH128_hash_t is compact, it has no padding byte */
5455  return !(memcmp(&h1, &h2, sizeof(h1)));
5456 }
5457 
5458 /* This prototype is compatible with stdlib's qsort().
5459  * return : >0 if *h128_1 > *h128_2
5460  * <0 if *h128_1 < *h128_2
5461  * =0 if *h128_1 == *h128_2 */
5463 XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2)
5464 {
5465  XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1;
5466  XXH128_hash_t const h2 = *(const XXH128_hash_t*)h128_2;
5467  int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64);
5468  /* note : bets that, in most cases, hash values are different */
5469  if (hcmp) return hcmp;
5470  return (h1.low64 > h2.low64) - (h2.low64 > h1.low64);
5471 }
5472 
5473 
5474 /*====== Canonical representation ======*/
5476 XXH_PUBLIC_API void
5478 {
5479  XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t));
5480  if (XXH_CPU_LITTLE_ENDIAN) {
5481  hash.high64 = XXH_swap64(hash.high64);
5482  hash.low64 = XXH_swap64(hash.low64);
5483  }
5484  XXH_memcpy(dst, &hash.high64, sizeof(hash.high64));
5485  XXH_memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64));
5486 }
5487 
5491 {
5492  XXH128_hash_t h;
5493  h.high64 = XXH_readBE64(src);
5494  h.low64 = XXH_readBE64(src->digest + 8);
5495  return h;
5496 }
5497 
5498 
5499 
5500 /* ==========================================
5501  * Secret generators
5502  * ==========================================
5503  */
5504 #define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x))
5505 
5506 static void XXH3_combine16(void* dst, XXH128_hash_t h128)
5507 {
5508  XXH_writeLE64( dst, XXH_readLE64(dst) ^ h128.low64 );
5509  XXH_writeLE64( (char*)dst+8, XXH_readLE64((char*)dst+8) ^ h128.high64 );
5510 }
5511 
5514 XXH3_generateSecret(void* secretBuffer, size_t secretSize, const void* customSeed, size_t customSeedSize)
5515 {
5516  XXH_ASSERT(secretBuffer != NULL);
5517  if (secretBuffer == NULL) return XXH_ERROR;
5518  XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
5519  if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
5520  if (customSeedSize == 0) {
5521  customSeed = XXH3_kSecret;
5522  customSeedSize = XXH_SECRET_DEFAULT_SIZE;
5523  }
5524  XXH_ASSERT(customSeed != NULL);
5525  if (customSeed == NULL) return XXH_ERROR;
5526 
5527  /* Fill secretBuffer with a copy of customSeed - repeat as needed */
5528  { size_t pos = 0;
5529  while (pos < secretSize) {
5530  size_t const toCopy = XXH_MIN((secretSize - pos), customSeedSize);
5531  memcpy((char*)secretBuffer + pos, customSeed, toCopy);
5532  pos += toCopy;
5533  } }
5534 
5535  { size_t const nbSeg16 = secretSize / 16;
5536  size_t n;
5537  XXH128_canonical_t scrambler;
5538  XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0));
5539  for (n=0; n<nbSeg16; n++) {
5540  XXH128_hash_t const h128 = XXH128(&scrambler, sizeof(scrambler), n);
5541  XXH3_combine16((char*)secretBuffer + n*16, h128);
5542  }
5543  /* last segment */
5544  XXH3_combine16((char*)secretBuffer + secretSize - 16, XXH128_hashFromCanonical(&scrambler));
5545  }
5546  return XXH_OK;
5547 }
5548 
5550 XXH_PUBLIC_API void
5551 XXH3_generateSecret_fromSeed(void* secretBuffer, XXH64_hash_t seed)
5552 {
5553  XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
5554  XXH3_initCustomSecret(secret, seed);
5555  XXH_ASSERT(secretBuffer != NULL);
5556  memcpy(secretBuffer, secret, XXH_SECRET_DEFAULT_SIZE);
5557 }
5558 
5559 
5560 
5561 /* Pop our optimization override from above */
5562 #if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
5563  && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
5564  && defined(__OPTIMIZE__) && !defined(__OPTIMIZE_SIZE__) /* respect -O0 and -Os */
5565 # pragma GCC pop_options
5566 #endif
5567 
5568 #endif /* XXH_NO_LONG_LONG */
5569 
5570 #endif /* XXH_NO_XXH3 */
5571 
5575 #endif /* XXH_IMPLEMENTATION */
5576 
5577 
5578 #if defined (__cplusplus)
5579 }
5580 #endif
ptr
char * ptr
Definition: abseil-cpp/absl/base/internal/low_level_alloc_test.cc:45
_gevent_test_main.result
result
Definition: _gevent_test_main.py:96
XXH3_128bits
XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void *data, size_t len)
dst
static const char dst[]
Definition: test-fs-copyfile.c:37
XXH32_createState
XXH_PUBLIC_API XXH32_state_t * XXH32_createState(void)
Allocates an XXH32_state_t.
XXH128_hashFromCanonical
XXH_PUBLIC_API XXH128_hash_t XXH128_hashFromCanonical(const XXH128_canonical_t *src)
pos
int pos
Definition: libuv/docs/code/tty-gravity/main.c:11
const
#define const
Definition: bloaty/third_party/zlib/zconf.h:230
memset
return memset(p, 0, total)
XXH3_64bits_digest
XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest(const XXH3_state_t *statePtr)
XXH3_copyState
XXH_PUBLIC_API void XXH3_copyState(XXH3_state_t *dst_state, const XXH3_state_t *src_state)
y
const double y
Definition: bloaty/third_party/googletest/googlemock/test/gmock-matchers_test.cc:3611
grpc::testing::sum
double sum(const T &container, F functor)
Definition: test/cpp/qps/stats.h:30
string.h
XXH32_hashFromCanonical
XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t *src)
Converts an XXH32_canonical_t to a native XXH32_hash_t.
seed
static const uint8_t seed[20]
Definition: dsa_test.cc:79
XXH3_64bits_reset_withSeed
XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH3_state_t *statePtr, XXH64_hash_t seed)
XXH_VERSION_NUMBER
#define XXH_VERSION_NUMBER
Definition: xxhash.h:326
u
OPENSSL_EXPORT pem_password_cb void * u
Definition: pem.h:351
XXH3_64bits_withSeed
XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void *data, size_t len, XXH64_hash_t seed)
XXH3_64bits_withSecret
XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void *data, size_t len, const void *secret, size_t secretSize)
a
int a
Definition: abseil-cpp/absl/container/internal/hash_policy_traits_test.cc:88
XXH64_hash_t
unsigned long long XXH64_hash_t
Definition: xxhash.h:667
xds_manager.p
p
Definition: xds_manager.py:60
XXH3_64bits
XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void *data, size_t len)
uint8_t
unsigned char uint8_t
Definition: stdint-msvc2008.h:78
hash
uint64_t hash
Definition: ring_hash.cc:284
uint32_t
unsigned int uint32_t
Definition: stdint-msvc2008.h:80
XXH_OK
@ XXH_OK
Definition: xxhash.h:343
ULL
#define ULL(x)
Definition: bloaty/third_party/protobuf/src/google/protobuf/io/coded_stream_unittest.cc:57
memcpy
memcpy(mem, inblock.get(), min(CONTAINING_RECORD(inblock.get(), MEMBLOCK, data) ->size, size))
in
const char * in
Definition: third_party/abseil-cpp/absl/strings/internal/str_format/parser_test.cc:391
XXH64_hashFromCanonical
XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t *src)
start
static uint64_t start
Definition: benchmark-pound.c:74
c
void c(T a)
Definition: miscompile_with_no_unique_address_test.cc:40
xds_interop_client.int
int
Definition: xds_interop_client.py:113
XXH3_SECRET_SIZE_MIN
#define XXH3_SECRET_SIZE_MIN
Definition: xxhash.h:801
int64_t
signed __int64 int64_t
Definition: stdint-msvc2008.h:89
XXH_FALLTHROUGH
#define XXH_FALLTHROUGH
Definition: xxhash.h:635
XXH64_digest
XXH_PUBLIC_API XXH64_hash_t XXH64_digest(const XXH64_state_t *statePtr)
XXH3_64bits_update
XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update(XXH3_state_t *statePtr, const void *input, size_t length)
setup.v
v
Definition: third_party/bloaty/third_party/capstone/bindings/python/setup.py:42
XXH128_hash_t
The return value from 128-bit hashes.
Definition: xxhash.h:881
XXH64_reset
XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t *statePtr, XXH64_hash_t seed)
uint64_t
unsigned __int64 uint64_t
Definition: stdint-msvc2008.h:90
XXH_PUBLIC_API
#define XXH_PUBLIC_API
Definition: xxhash.h:239
__attribute__
__attribute__(void) start
XXH_versionNumber
XXH_PUBLIC_API unsigned XXH_versionNumber(void)
Obtains the xxHash version.
XXH3_128bits_withSeed
XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSeed(const void *data, size_t len, XXH64_hash_t seed)
x
int x
Definition: bloaty/third_party/googletest/googlemock/test/gmock-matchers_test.cc:3610
gen_synthetic_protos.base
base
Definition: gen_synthetic_protos.py:31
data
char data[kBufferLength]
Definition: abseil-cpp/absl/strings/internal/str_format/float_conversion.cc:1006
XXH64
XXH_PUBLIC_API XXH64_hash_t XXH64(const void *input, size_t length, XXH64_hash_t seed)
Calculates the 64-bit hash of input using xxHash64.
buffer
char buffer[1024]
Definition: libuv/docs/code/idle-compute/main.c:8
XXH32_reset
XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t *statePtr, XXH32_hash_t seed)
Resets an XXH32_state_t to begin a new hash.
XXH3_freeState
XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t *statePtr)
b
uint64_t b
Definition: abseil-cpp/absl/container/internal/layout_test.cc:53
XXH32_canonicalFromHash
XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t *dst, XXH32_hash_t hash)
Converts an XXH32_hash_t to a big endian XXH32_canonical_t.
XXH32_freeState
XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t *statePtr)
Frees an XXH32_state_t.
n
int n
Definition: abseil-cpp/absl/container/btree_test.cc:1080
tests.qps.qps_worker.dest
dest
Definition: qps_worker.py:45
stdint.h
XXH3_128bits_reset_withSeed
XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH3_state_t *statePtr, XXH64_hash_t seed)
XXH32
XXH_PUBLIC_API XXH32_hash_t XXH32(const void *input, size_t length, XXH32_hash_t seed)
Calculates the 32-bit hash of input using xxHash32.
XXH128_canonicalFromHash
XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH128_canonical_t *dst, XXH128_hash_t hash)
XXH64_update
XXH_PUBLIC_API XXH_errorcode XXH64_update(XXH64_state_t *statePtr, const void *input, size_t length)
absl::hash_internal::c1
static const uint32_t c1
Definition: abseil-cpp/absl/hash/internal/city.cc:58
XXH64_canonicalFromHash
XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t *dst, XXH64_hash_t hash)
XXH128_cmp
XXH_PUBLIC_API int XXH128_cmp(const void *h128_1, const void *h128_2)
absl::time_internal::cctz::detail::align
CONSTEXPR_F fields align(second_tag, fields f) noexcept
Definition: abseil-cpp/absl/time/internal/cctz/include/cctz/civil_time_detail.h:325
XXH64_freeState
XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t *statePtr)
XXH3_64bits_reset
XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH3_state_t *statePtr)
XXH64_createState
XXH_PUBLIC_API XXH64_state_t * XXH64_createState(void)
XXH3_64bits_reset_withSecret
XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH3_state_t *statePtr, const void *secret, size_t secretSize)
XXH32_canonical_t
Canonical (big endian) representation of XXH32_hash_t.
Definition: xxhash.h:575
XXH64_copyState
XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t *dst_state, const XXH64_state_t *src_state)
ret
UniquePtr< SSL_SESSION > ret
Definition: ssl_x509.cc:1029
XXH32_update
XXH_PUBLIC_API XXH_errorcode XXH32_update(XXH32_state_t *statePtr, const void *input, size_t length)
Consumes a block of input to an XXH32_state_t.
XXH3_128bits_reset
XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH3_state_t *statePtr)
XXH32_digest
XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t *statePtr)
Returns the calculated hash value from an XXH32_state_t.
XXH_errorcode
XXH_errorcode
Definition: xxhash.h:343
absl::hash_internal::k1
static const uint64_t k1
Definition: abseil-cpp/absl/hash/internal/city.cc:54
state
Definition: bloaty/third_party/zlib/contrib/blast/blast.c:41
XXH128_canonical_t::digest
unsigned char digest[sizeof(XXH128_hash_t)]
Definition: xxhash.h:933
XXH3_128bits_update
XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update(XXH3_state_t *statePtr, const void *input, size_t length)
input
std::string input
Definition: bloaty/third_party/protobuf/src/google/protobuf/io/tokenizer_unittest.cc:197
XXH3_128bits_reset_withSecret
XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH3_state_t *statePtr, const void *secret, size_t secretSize)
XXH3_state_t
struct XXH3_state_s XXH3_state_t
The state struct for the XXH3 streaming API.
Definition: xxhash.h:836
XXH32_state_t
struct XXH32_state_s XXH32_state_t
The opaque state struct for the XXH32 streaming API.
Definition: xxhash.h:473
len
int len
Definition: abseil-cpp/absl/base/internal/low_level_alloc_test.cc:46
intrin.h
XXH3_128bits_withSecret
XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSecret(const void *data, size_t len, const void *secret, size_t secretSize)
XXH64_canonical_t
Definition: xxhash.h:727
size
voidpf void uLong size
Definition: bloaty/third_party/zlib/contrib/minizip/ioapi.h:136
length
std::size_t length
Definition: abseil-cpp/absl/time/internal/test_util.cc:57
XXH3_128bits_digest
XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest(const XXH3_state_t *statePtr)
XXH3_createState
XXH_PUBLIC_API XXH3_state_t * XXH3_createState(void)
XXH_ERROR
@ XXH_ERROR
Definition: xxhash.h:343
XXH64_state_t
struct XXH64_state_s XXH64_state_t
The opaque state struct for the XXH64 streaming API.
Definition: xxhash.h:717
absl::str_format_internal::LengthMod::h
@ h
XXH32_copyState
XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t *dst_state, const XXH32_state_t *src_state)
Copies one XXH32_state_t to another.
XXH128_canonical_t
Definition: xxhash.h:933
XXH128_isEqual
XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2)
XXH128_hash_t::high64
XXH64_hash_t high64
Definition: xxhash.h:883
if
if(p->owned &&p->wrapped !=NULL)
Definition: call.c:42
i
uint64_t i
Definition: abseil-cpp/absl/container/btree_benchmark.cc:230
state
static struct rpc_state state
Definition: bad_server_response_test.cc:87
offset
voidpf uLong offset
Definition: bloaty/third_party/zlib/contrib/minizip/ioapi.h:142
XXH128_hash_t::low64
XXH64_hash_t low64
Definition: xxhash.h:882
absl::hash_internal::c2
static const uint32_t c2
Definition: abseil-cpp/absl/hash/internal/city.cc:59


grpc
Author(s):
autogenerated on Thu Mar 13 2025 03:01:58