xref: /aosp_15_r20/external/zstd/lib/common/xxhash.h (revision 01826a4963a0d8a59bc3812d29bdf0fb76416722)
1*01826a49SYabin Cui /*
2*01826a49SYabin Cui  * xxHash - Extremely Fast Hash algorithm
3*01826a49SYabin Cui  * Header File
4*01826a49SYabin Cui  * Copyright (c) Yann Collet - Meta Platforms, Inc
5*01826a49SYabin Cui  *
6*01826a49SYabin Cui  * This source code is licensed under both the BSD-style license (found in the
7*01826a49SYabin Cui  * LICENSE file in the root directory of this source tree) and the GPLv2 (found
8*01826a49SYabin Cui  * in the COPYING file in the root directory of this source tree).
9*01826a49SYabin Cui  * You may select, at your option, one of the above-listed licenses.
10*01826a49SYabin Cui  */
11*01826a49SYabin Cui 
12*01826a49SYabin Cui /* Local adaptations for Zstandard */
13*01826a49SYabin Cui 
14*01826a49SYabin Cui #ifndef XXH_NO_XXH3
15*01826a49SYabin Cui # define XXH_NO_XXH3
16*01826a49SYabin Cui #endif
17*01826a49SYabin Cui 
18*01826a49SYabin Cui #ifndef XXH_NAMESPACE
19*01826a49SYabin Cui # define XXH_NAMESPACE ZSTD_
20*01826a49SYabin Cui #endif
21*01826a49SYabin Cui 
22*01826a49SYabin Cui /*!
23*01826a49SYabin Cui  * @mainpage xxHash
24*01826a49SYabin Cui  *
25*01826a49SYabin Cui  * xxHash is an extremely fast non-cryptographic hash algorithm, working at RAM speed
26*01826a49SYabin Cui  * limits.
27*01826a49SYabin Cui  *
28*01826a49SYabin Cui  * It is proposed in four flavors, in three families:
29*01826a49SYabin Cui  * 1. @ref XXH32_family
30*01826a49SYabin Cui  *   - Classic 32-bit hash function. Simple, compact, and runs on almost all
31*01826a49SYabin Cui  *     32-bit and 64-bit systems.
32*01826a49SYabin Cui  * 2. @ref XXH64_family
33*01826a49SYabin Cui  *   - Classic 64-bit adaptation of XXH32. Just as simple, and runs well on most
34*01826a49SYabin Cui  *     64-bit systems (but _not_ 32-bit systems).
35*01826a49SYabin Cui  * 3. @ref XXH3_family
36*01826a49SYabin Cui  *   - Modern 64-bit and 128-bit hash function family which features improved
37*01826a49SYabin Cui  *     strength and performance across the board, especially on smaller data.
38*01826a49SYabin Cui  *     It benefits greatly from SIMD and 64-bit without requiring it.
39*01826a49SYabin Cui  *
40*01826a49SYabin Cui  * Benchmarks
41*01826a49SYabin Cui  * ---
42*01826a49SYabin Cui  * The reference system uses an Intel i7-9700K CPU, and runs Ubuntu x64 20.04.
43*01826a49SYabin Cui  * The open source benchmark program is compiled with clang v10.0 using -O3 flag.
44*01826a49SYabin Cui  *
45*01826a49SYabin Cui  * | Hash Name            | ISA ext | Width | Large Data Speed | Small Data Velocity |
46*01826a49SYabin Cui  * | -------------------- | ------- | ----: | ---------------: | ------------------: |
47*01826a49SYabin Cui  * | XXH3_64bits()        | @b AVX2 |    64 |        59.4 GB/s |               133.1 |
48*01826a49SYabin Cui  * | MeowHash             | AES-NI  |   128 |        58.2 GB/s |                52.5 |
49*01826a49SYabin Cui  * | XXH3_128bits()       | @b AVX2 |   128 |        57.9 GB/s |               118.1 |
50*01826a49SYabin Cui  * | CLHash               | PCLMUL  |    64 |        37.1 GB/s |                58.1 |
51*01826a49SYabin Cui  * | XXH3_64bits()        | @b SSE2 |    64 |        31.5 GB/s |               133.1 |
52*01826a49SYabin Cui  * | XXH3_128bits()       | @b SSE2 |   128 |        29.6 GB/s |               118.1 |
53*01826a49SYabin Cui  * | RAM sequential read  |         |   N/A |        28.0 GB/s |                 N/A |
54*01826a49SYabin Cui  * | ahash                | AES-NI  |    64 |        22.5 GB/s |               107.2 |
55*01826a49SYabin Cui  * | City64               |         |    64 |        22.0 GB/s |                76.6 |
56*01826a49SYabin Cui  * | T1ha2                |         |    64 |        22.0 GB/s |                99.0 |
57*01826a49SYabin Cui  * | City128              |         |   128 |        21.7 GB/s |                57.7 |
58*01826a49SYabin Cui  * | FarmHash             | AES-NI  |    64 |        21.3 GB/s |                71.9 |
59*01826a49SYabin Cui  * | XXH64()              |         |    64 |        19.4 GB/s |                71.0 |
60*01826a49SYabin Cui  * | SpookyHash           |         |    64 |        19.3 GB/s |                53.2 |
61*01826a49SYabin Cui  * | Mum                  |         |    64 |        18.0 GB/s |                67.0 |
62*01826a49SYabin Cui  * | CRC32C               | SSE4.2  |    32 |        13.0 GB/s |                57.9 |
63*01826a49SYabin Cui  * | XXH32()              |         |    32 |         9.7 GB/s |                71.9 |
64*01826a49SYabin Cui  * | City32               |         |    32 |         9.1 GB/s |                66.0 |
65*01826a49SYabin Cui  * | Blake3*              | @b AVX2 |   256 |         4.4 GB/s |                 8.1 |
66*01826a49SYabin Cui  * | Murmur3              |         |    32 |         3.9 GB/s |                56.1 |
67*01826a49SYabin Cui  * | SipHash*             |         |    64 |         3.0 GB/s |                43.2 |
68*01826a49SYabin Cui  * | Blake3*              | @b SSE2 |   256 |         2.4 GB/s |                 8.1 |
69*01826a49SYabin Cui  * | HighwayHash          |         |    64 |         1.4 GB/s |                 6.0 |
70*01826a49SYabin Cui  * | FNV64                |         |    64 |         1.2 GB/s |                62.7 |
71*01826a49SYabin Cui  * | Blake2*              |         |   256 |         1.1 GB/s |                 5.1 |
72*01826a49SYabin Cui  * | SHA1*                |         |   160 |         0.8 GB/s |                 5.6 |
73*01826a49SYabin Cui  * | MD5*                 |         |   128 |         0.6 GB/s |                 7.8 |
74*01826a49SYabin Cui  * @note
75*01826a49SYabin Cui  *   - Hashes which require a specific ISA extension are noted. SSE2 is also noted,
76*01826a49SYabin Cui  *     even though it is mandatory on x64.
77*01826a49SYabin Cui  *   - Hashes with an asterisk are cryptographic. Note that MD5 is non-cryptographic
78*01826a49SYabin Cui  *     by modern standards.
79*01826a49SYabin Cui  *   - Small data velocity is a rough average of algorithm's efficiency for small
80*01826a49SYabin Cui  *     data. For more accurate information, see the wiki.
81*01826a49SYabin Cui  *   - More benchmarks and strength tests are found on the wiki:
82*01826a49SYabin Cui  *         https://github.com/Cyan4973/xxHash/wiki
83*01826a49SYabin Cui  *
84*01826a49SYabin Cui  * Usage
85*01826a49SYabin Cui  * ------
86*01826a49SYabin Cui  * All xxHash variants use a similar API. Changing the algorithm is a trivial
87*01826a49SYabin Cui  * substitution.
88*01826a49SYabin Cui  *
89*01826a49SYabin Cui  * @pre
90*01826a49SYabin Cui  *    For functions which take an input and length parameter, the following
91*01826a49SYabin Cui  *    requirements are assumed:
92*01826a49SYabin Cui  *    - The range from [`input`, `input + length`) is valid, readable memory.
93*01826a49SYabin Cui  *      - The only exception is if the `length` is `0`, `input` may be `NULL`.
94*01826a49SYabin Cui  *    - For C++, the objects must have the *TriviallyCopyable* property, as the
95*01826a49SYabin Cui  *      functions access bytes directly as if it was an array of `unsigned char`.
96*01826a49SYabin Cui  *
97*01826a49SYabin Cui  * @anchor single_shot_example
98*01826a49SYabin Cui  * **Single Shot**
99*01826a49SYabin Cui  *
100*01826a49SYabin Cui  * These functions are stateless functions which hash a contiguous block of memory,
101*01826a49SYabin Cui  * immediately returning the result. They are the easiest and usually the fastest
102*01826a49SYabin Cui  * option.
103*01826a49SYabin Cui  *
104*01826a49SYabin Cui  * XXH32(), XXH64(), XXH3_64bits(), XXH3_128bits()
105*01826a49SYabin Cui  *
106*01826a49SYabin Cui  * @code{.c}
107*01826a49SYabin Cui  *   #include <string.h>
108*01826a49SYabin Cui  *   #include "xxhash.h"
109*01826a49SYabin Cui  *
110*01826a49SYabin Cui  *   // Example for a function which hashes a null terminated string with XXH32().
111*01826a49SYabin Cui  *   XXH32_hash_t hash_string(const char* string, XXH32_hash_t seed)
112*01826a49SYabin Cui  *   {
113*01826a49SYabin Cui  *       // NULL pointers are only valid if the length is zero
114*01826a49SYabin Cui  *       size_t length = (string == NULL) ? 0 : strlen(string);
115*01826a49SYabin Cui  *       return XXH32(string, length, seed);
116*01826a49SYabin Cui  *   }
117*01826a49SYabin Cui  * @endcode
118*01826a49SYabin Cui  *
119*01826a49SYabin Cui  *
120*01826a49SYabin Cui  * @anchor streaming_example
121*01826a49SYabin Cui  * **Streaming**
122*01826a49SYabin Cui  *
123*01826a49SYabin Cui  * These groups of functions allow incremental hashing of unknown size, even
124*01826a49SYabin Cui  * more than what would fit in a size_t.
125*01826a49SYabin Cui  *
126*01826a49SYabin Cui  * XXH32_reset(), XXH64_reset(), XXH3_64bits_reset(), XXH3_128bits_reset()
127*01826a49SYabin Cui  *
128*01826a49SYabin Cui  * @code{.c}
129*01826a49SYabin Cui  *   #include <stdio.h>
130*01826a49SYabin Cui  *   #include <assert.h>
131*01826a49SYabin Cui  *   #include "xxhash.h"
132*01826a49SYabin Cui  *   // Example for a function which hashes a FILE incrementally with XXH3_64bits().
133*01826a49SYabin Cui  *   XXH64_hash_t hashFile(FILE* f)
134*01826a49SYabin Cui  *   {
135*01826a49SYabin Cui  *       // Allocate a state struct. Do not just use malloc() or new.
136*01826a49SYabin Cui  *       XXH3_state_t* state = XXH3_createState();
137*01826a49SYabin Cui  *       assert(state != NULL && "Out of memory!");
138*01826a49SYabin Cui  *       // Reset the state to start a new hashing session.
139*01826a49SYabin Cui  *       XXH3_64bits_reset(state);
140*01826a49SYabin Cui  *       char buffer[4096];
141*01826a49SYabin Cui  *       size_t count;
142*01826a49SYabin Cui  *       // Read the file in chunks
143*01826a49SYabin Cui  *       while ((count = fread(buffer, 1, sizeof(buffer), f)) != 0) {
144*01826a49SYabin Cui  *           // Run update() as many times as necessary to process the data
145*01826a49SYabin Cui  *           XXH3_64bits_update(state, buffer, count);
146*01826a49SYabin Cui  *       }
147*01826a49SYabin Cui  *       // Retrieve the finalized hash. This will not change the state.
148*01826a49SYabin Cui  *       XXH64_hash_t result = XXH3_64bits_digest(state);
149*01826a49SYabin Cui  *       // Free the state. Do not use free().
150*01826a49SYabin Cui  *       XXH3_freeState(state);
151*01826a49SYabin Cui  *       return result;
152*01826a49SYabin Cui  *   }
153*01826a49SYabin Cui  * @endcode
154*01826a49SYabin Cui  *
155*01826a49SYabin Cui  * Streaming functions generate the xxHash value from an incremental input.
156*01826a49SYabin Cui  * This method is slower than single-call functions, due to state management.
157*01826a49SYabin Cui  * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized.
158*01826a49SYabin Cui  *
159*01826a49SYabin Cui  * An XXH state must first be allocated using `XXH*_createState()`.
160*01826a49SYabin Cui  *
161*01826a49SYabin Cui  * Start a new hash by initializing the state with a seed using `XXH*_reset()`.
162*01826a49SYabin Cui  *
163*01826a49SYabin Cui  * Then, feed the hash state by calling `XXH*_update()` as many times as necessary.
164*01826a49SYabin Cui  *
165*01826a49SYabin Cui  * The function returns an error code, with 0 meaning OK, and any other value
166*01826a49SYabin Cui  * meaning there is an error.
167*01826a49SYabin Cui  *
168*01826a49SYabin Cui  * Finally, a hash value can be produced anytime, by using `XXH*_digest()`.
169*01826a49SYabin Cui  * This function returns the nn-bits hash as an int or long long.
170*01826a49SYabin Cui  *
171*01826a49SYabin Cui  * It's still possible to continue inserting input into the hash state after a
172*01826a49SYabin Cui  * digest, and generate new hash values later on by invoking `XXH*_digest()`.
173*01826a49SYabin Cui  *
174*01826a49SYabin Cui  * When done, release the state using `XXH*_freeState()`.
175*01826a49SYabin Cui  *
176*01826a49SYabin Cui  *
177*01826a49SYabin Cui  * @anchor canonical_representation_example
178*01826a49SYabin Cui  * **Canonical Representation**
179*01826a49SYabin Cui  *
180*01826a49SYabin Cui  * The default return values from XXH functions are unsigned 32, 64 and 128 bit
181*01826a49SYabin Cui  * integers.
182*01826a49SYabin Cui  * This the simplest and fastest format for further post-processing.
183*01826a49SYabin Cui  *
184*01826a49SYabin Cui  * However, this leaves open the question of what is the order on the byte level,
185*01826a49SYabin Cui  * since little and big endian conventions will store the same number differently.
186*01826a49SYabin Cui  *
187*01826a49SYabin Cui  * The canonical representation settles this issue by mandating big-endian
188*01826a49SYabin Cui  * convention, the same convention as human-readable numbers (large digits first).
189*01826a49SYabin Cui  *
190*01826a49SYabin Cui  * When writing hash values to storage, sending them over a network, or printing
191*01826a49SYabin Cui  * them, it's highly recommended to use the canonical representation to ensure
192*01826a49SYabin Cui  * portability across a wider range of systems, present and future.
193*01826a49SYabin Cui  *
194*01826a49SYabin Cui  * The following functions allow transformation of hash values to and from
195*01826a49SYabin Cui  * canonical format.
196*01826a49SYabin Cui  *
197*01826a49SYabin Cui  * XXH32_canonicalFromHash(), XXH32_hashFromCanonical(),
198*01826a49SYabin Cui  * XXH64_canonicalFromHash(), XXH64_hashFromCanonical(),
199*01826a49SYabin Cui  * XXH128_canonicalFromHash(), XXH128_hashFromCanonical(),
200*01826a49SYabin Cui  *
201*01826a49SYabin Cui  * @code{.c}
202*01826a49SYabin Cui  *   #include <stdio.h>
203*01826a49SYabin Cui  *   #include "xxhash.h"
204*01826a49SYabin Cui  *
205*01826a49SYabin Cui  *   // Example for a function which prints XXH32_hash_t in human readable format
206*01826a49SYabin Cui  *   void printXxh32(XXH32_hash_t hash)
207*01826a49SYabin Cui  *   {
208*01826a49SYabin Cui  *       XXH32_canonical_t cano;
209*01826a49SYabin Cui  *       XXH32_canonicalFromHash(&cano, hash);
210*01826a49SYabin Cui  *       size_t i;
211*01826a49SYabin Cui  *       for(i = 0; i < sizeof(cano.digest); ++i) {
212*01826a49SYabin Cui  *           printf("%02x", cano.digest[i]);
213*01826a49SYabin Cui  *       }
214*01826a49SYabin Cui  *       printf("\n");
215*01826a49SYabin Cui  *   }
216*01826a49SYabin Cui  *
217*01826a49SYabin Cui  *   // Example for a function which converts XXH32_canonical_t to XXH32_hash_t
218*01826a49SYabin Cui  *   XXH32_hash_t convertCanonicalToXxh32(XXH32_canonical_t cano)
219*01826a49SYabin Cui  *   {
220*01826a49SYabin Cui  *       XXH32_hash_t hash = XXH32_hashFromCanonical(&cano);
221*01826a49SYabin Cui  *       return hash;
222*01826a49SYabin Cui  *   }
223*01826a49SYabin Cui  * @endcode
224*01826a49SYabin Cui  *
225*01826a49SYabin Cui  *
226*01826a49SYabin Cui  * @file xxhash.h
227*01826a49SYabin Cui  * xxHash prototypes and implementation
228*01826a49SYabin Cui  */
229*01826a49SYabin Cui 
230*01826a49SYabin Cui #if defined (__cplusplus)
231*01826a49SYabin Cui extern "C" {
232*01826a49SYabin Cui #endif
233*01826a49SYabin Cui 
234*01826a49SYabin Cui /* ****************************
235*01826a49SYabin Cui  *  INLINE mode
236*01826a49SYabin Cui  ******************************/
237*01826a49SYabin Cui /*!
238*01826a49SYabin Cui  * @defgroup public Public API
239*01826a49SYabin Cui  * Contains details on the public xxHash functions.
240*01826a49SYabin Cui  * @{
241*01826a49SYabin Cui  */
242*01826a49SYabin Cui #ifdef XXH_DOXYGEN
243*01826a49SYabin Cui /*!
244*01826a49SYabin Cui  * @brief Gives access to internal state declaration, required for static allocation.
245*01826a49SYabin Cui  *
246*01826a49SYabin Cui  * Incompatible with dynamic linking, due to risks of ABI changes.
247*01826a49SYabin Cui  *
248*01826a49SYabin Cui  * Usage:
249*01826a49SYabin Cui  * @code{.c}
250*01826a49SYabin Cui  *     #define XXH_STATIC_LINKING_ONLY
251*01826a49SYabin Cui  *     #include "xxhash.h"
252*01826a49SYabin Cui  * @endcode
253*01826a49SYabin Cui  */
254*01826a49SYabin Cui #  define XXH_STATIC_LINKING_ONLY
255*01826a49SYabin Cui /* Do not undef XXH_STATIC_LINKING_ONLY for Doxygen */
256*01826a49SYabin Cui 
257*01826a49SYabin Cui /*!
258*01826a49SYabin Cui  * @brief Gives access to internal definitions.
259*01826a49SYabin Cui  *
260*01826a49SYabin Cui  * Usage:
261*01826a49SYabin Cui  * @code{.c}
262*01826a49SYabin Cui  *     #define XXH_STATIC_LINKING_ONLY
263*01826a49SYabin Cui  *     #define XXH_IMPLEMENTATION
264*01826a49SYabin Cui  *     #include "xxhash.h"
265*01826a49SYabin Cui  * @endcode
266*01826a49SYabin Cui  */
267*01826a49SYabin Cui #  define XXH_IMPLEMENTATION
268*01826a49SYabin Cui /* Do not undef XXH_IMPLEMENTATION for Doxygen */
269*01826a49SYabin Cui 
270*01826a49SYabin Cui /*!
271*01826a49SYabin Cui  * @brief Exposes the implementation and marks all functions as `inline`.
272*01826a49SYabin Cui  *
273*01826a49SYabin Cui  * Use these build macros to inline xxhash into the target unit.
274*01826a49SYabin Cui  * Inlining improves performance on small inputs, especially when the length is
275*01826a49SYabin Cui  * expressed as a compile-time constant:
276*01826a49SYabin Cui  *
277*01826a49SYabin Cui  *  https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html
278*01826a49SYabin Cui  *
279*01826a49SYabin Cui  * It also keeps xxHash symbols private to the unit, so they are not exported.
280*01826a49SYabin Cui  *
281*01826a49SYabin Cui  * Usage:
282*01826a49SYabin Cui  * @code{.c}
283*01826a49SYabin Cui  *     #define XXH_INLINE_ALL
284*01826a49SYabin Cui  *     #include "xxhash.h"
285*01826a49SYabin Cui  * @endcode
286*01826a49SYabin Cui  * Do not compile and link xxhash.o as a separate object, as it is not useful.
287*01826a49SYabin Cui  */
288*01826a49SYabin Cui #  define XXH_INLINE_ALL
289*01826a49SYabin Cui #  undef XXH_INLINE_ALL
290*01826a49SYabin Cui /*!
291*01826a49SYabin Cui  * @brief Exposes the implementation without marking functions as inline.
292*01826a49SYabin Cui  */
293*01826a49SYabin Cui #  define XXH_PRIVATE_API
294*01826a49SYabin Cui #  undef XXH_PRIVATE_API
295*01826a49SYabin Cui /*!
296*01826a49SYabin Cui  * @brief Emulate a namespace by transparently prefixing all symbols.
297*01826a49SYabin Cui  *
298*01826a49SYabin Cui  * If you want to include _and expose_ xxHash functions from within your own
299*01826a49SYabin Cui  * library, but also want to avoid symbol collisions with other libraries which
300*01826a49SYabin Cui  * may also include xxHash, you can use @ref XXH_NAMESPACE to automatically prefix
301*01826a49SYabin Cui  * any public symbol from xxhash library with the value of @ref XXH_NAMESPACE
302*01826a49SYabin Cui  * (therefore, avoid empty or numeric values).
303*01826a49SYabin Cui  *
304*01826a49SYabin Cui  * Note that no change is required within the calling program as long as it
305*01826a49SYabin Cui  * includes `xxhash.h`: Regular symbol names will be automatically translated
306*01826a49SYabin Cui  * by this header.
307*01826a49SYabin Cui  */
308*01826a49SYabin Cui #  define XXH_NAMESPACE /* YOUR NAME HERE */
309*01826a49SYabin Cui #  undef XXH_NAMESPACE
310*01826a49SYabin Cui #endif
311*01826a49SYabin Cui 
312*01826a49SYabin Cui #if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \
313*01826a49SYabin Cui     && !defined(XXH_INLINE_ALL_31684351384)
314*01826a49SYabin Cui    /* this section should be traversed only once */
315*01826a49SYabin Cui #  define XXH_INLINE_ALL_31684351384
316*01826a49SYabin Cui    /* give access to the advanced API, required to compile implementations */
317*01826a49SYabin Cui #  undef XXH_STATIC_LINKING_ONLY   /* avoid macro redef */
318*01826a49SYabin Cui #  define XXH_STATIC_LINKING_ONLY
319*01826a49SYabin Cui    /* make all functions private */
320*01826a49SYabin Cui #  undef XXH_PUBLIC_API
321*01826a49SYabin Cui #  if defined(__GNUC__)
322*01826a49SYabin Cui #    define XXH_PUBLIC_API static __inline __attribute__((unused))
323*01826a49SYabin Cui #  elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
324*01826a49SYabin Cui #    define XXH_PUBLIC_API static inline
325*01826a49SYabin Cui #  elif defined(_MSC_VER)
326*01826a49SYabin Cui #    define XXH_PUBLIC_API static __inline
327*01826a49SYabin Cui #  else
328*01826a49SYabin Cui      /* note: this version may generate warnings for unused static functions */
329*01826a49SYabin Cui #    define XXH_PUBLIC_API static
330*01826a49SYabin Cui #  endif
331*01826a49SYabin Cui 
332*01826a49SYabin Cui    /*
333*01826a49SYabin Cui     * This part deals with the special case where a unit wants to inline xxHash,
334*01826a49SYabin Cui     * but "xxhash.h" has previously been included without XXH_INLINE_ALL,
335*01826a49SYabin Cui     * such as part of some previously included *.h header file.
336*01826a49SYabin Cui     * Without further action, the new include would just be ignored,
337*01826a49SYabin Cui     * and functions would effectively _not_ be inlined (silent failure).
338*01826a49SYabin Cui     * The following macros solve this situation by prefixing all inlined names,
339*01826a49SYabin Cui     * avoiding naming collision with previous inclusions.
340*01826a49SYabin Cui     */
341*01826a49SYabin Cui    /* Before that, we unconditionally #undef all symbols,
342*01826a49SYabin Cui     * in case they were already defined with XXH_NAMESPACE.
343*01826a49SYabin Cui     * They will then be redefined for XXH_INLINE_ALL
344*01826a49SYabin Cui     */
345*01826a49SYabin Cui #  undef XXH_versionNumber
346*01826a49SYabin Cui     /* XXH32 */
347*01826a49SYabin Cui #  undef XXH32
348*01826a49SYabin Cui #  undef XXH32_createState
349*01826a49SYabin Cui #  undef XXH32_freeState
350*01826a49SYabin Cui #  undef XXH32_reset
351*01826a49SYabin Cui #  undef XXH32_update
352*01826a49SYabin Cui #  undef XXH32_digest
353*01826a49SYabin Cui #  undef XXH32_copyState
354*01826a49SYabin Cui #  undef XXH32_canonicalFromHash
355*01826a49SYabin Cui #  undef XXH32_hashFromCanonical
356*01826a49SYabin Cui     /* XXH64 */
357*01826a49SYabin Cui #  undef XXH64
358*01826a49SYabin Cui #  undef XXH64_createState
359*01826a49SYabin Cui #  undef XXH64_freeState
360*01826a49SYabin Cui #  undef XXH64_reset
361*01826a49SYabin Cui #  undef XXH64_update
362*01826a49SYabin Cui #  undef XXH64_digest
363*01826a49SYabin Cui #  undef XXH64_copyState
364*01826a49SYabin Cui #  undef XXH64_canonicalFromHash
365*01826a49SYabin Cui #  undef XXH64_hashFromCanonical
366*01826a49SYabin Cui     /* XXH3_64bits */
367*01826a49SYabin Cui #  undef XXH3_64bits
368*01826a49SYabin Cui #  undef XXH3_64bits_withSecret
369*01826a49SYabin Cui #  undef XXH3_64bits_withSeed
370*01826a49SYabin Cui #  undef XXH3_64bits_withSecretandSeed
371*01826a49SYabin Cui #  undef XXH3_createState
372*01826a49SYabin Cui #  undef XXH3_freeState
373*01826a49SYabin Cui #  undef XXH3_copyState
374*01826a49SYabin Cui #  undef XXH3_64bits_reset
375*01826a49SYabin Cui #  undef XXH3_64bits_reset_withSeed
376*01826a49SYabin Cui #  undef XXH3_64bits_reset_withSecret
377*01826a49SYabin Cui #  undef XXH3_64bits_update
378*01826a49SYabin Cui #  undef XXH3_64bits_digest
379*01826a49SYabin Cui #  undef XXH3_generateSecret
380*01826a49SYabin Cui     /* XXH3_128bits */
381*01826a49SYabin Cui #  undef XXH128
382*01826a49SYabin Cui #  undef XXH3_128bits
383*01826a49SYabin Cui #  undef XXH3_128bits_withSeed
384*01826a49SYabin Cui #  undef XXH3_128bits_withSecret
385*01826a49SYabin Cui #  undef XXH3_128bits_reset
386*01826a49SYabin Cui #  undef XXH3_128bits_reset_withSeed
387*01826a49SYabin Cui #  undef XXH3_128bits_reset_withSecret
388*01826a49SYabin Cui #  undef XXH3_128bits_reset_withSecretandSeed
389*01826a49SYabin Cui #  undef XXH3_128bits_update
390*01826a49SYabin Cui #  undef XXH3_128bits_digest
391*01826a49SYabin Cui #  undef XXH128_isEqual
392*01826a49SYabin Cui #  undef XXH128_cmp
393*01826a49SYabin Cui #  undef XXH128_canonicalFromHash
394*01826a49SYabin Cui #  undef XXH128_hashFromCanonical
395*01826a49SYabin Cui     /* Finally, free the namespace itself */
396*01826a49SYabin Cui #  undef XXH_NAMESPACE
397*01826a49SYabin Cui 
398*01826a49SYabin Cui     /* employ the namespace for XXH_INLINE_ALL */
399*01826a49SYabin Cui #  define XXH_NAMESPACE XXH_INLINE_
400*01826a49SYabin Cui    /*
401*01826a49SYabin Cui     * Some identifiers (enums, type names) are not symbols,
402*01826a49SYabin Cui     * but they must nonetheless be renamed to avoid redeclaration.
403*01826a49SYabin Cui     * Alternative solution: do not redeclare them.
404*01826a49SYabin Cui     * However, this requires some #ifdefs, and has a more dispersed impact.
405*01826a49SYabin Cui     * Meanwhile, renaming can be achieved in a single place.
406*01826a49SYabin Cui     */
407*01826a49SYabin Cui #  define XXH_IPREF(Id)   XXH_NAMESPACE ## Id
408*01826a49SYabin Cui #  define XXH_OK XXH_IPREF(XXH_OK)
409*01826a49SYabin Cui #  define XXH_ERROR XXH_IPREF(XXH_ERROR)
410*01826a49SYabin Cui #  define XXH_errorcode XXH_IPREF(XXH_errorcode)
411*01826a49SYabin Cui #  define XXH32_canonical_t  XXH_IPREF(XXH32_canonical_t)
412*01826a49SYabin Cui #  define XXH64_canonical_t  XXH_IPREF(XXH64_canonical_t)
413*01826a49SYabin Cui #  define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t)
414*01826a49SYabin Cui #  define XXH32_state_s XXH_IPREF(XXH32_state_s)
415*01826a49SYabin Cui #  define XXH32_state_t XXH_IPREF(XXH32_state_t)
416*01826a49SYabin Cui #  define XXH64_state_s XXH_IPREF(XXH64_state_s)
417*01826a49SYabin Cui #  define XXH64_state_t XXH_IPREF(XXH64_state_t)
418*01826a49SYabin Cui #  define XXH3_state_s  XXH_IPREF(XXH3_state_s)
419*01826a49SYabin Cui #  define XXH3_state_t  XXH_IPREF(XXH3_state_t)
420*01826a49SYabin Cui #  define XXH128_hash_t XXH_IPREF(XXH128_hash_t)
421*01826a49SYabin Cui    /* Ensure the header is parsed again, even if it was previously included */
422*01826a49SYabin Cui #  undef XXHASH_H_5627135585666179
423*01826a49SYabin Cui #  undef XXHASH_H_STATIC_13879238742
424*01826a49SYabin Cui #endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */
425*01826a49SYabin Cui 
426*01826a49SYabin Cui /* ****************************************************************
427*01826a49SYabin Cui  *  Stable API
428*01826a49SYabin Cui  *****************************************************************/
429*01826a49SYabin Cui #ifndef XXHASH_H_5627135585666179
430*01826a49SYabin Cui #define XXHASH_H_5627135585666179 1
431*01826a49SYabin Cui 
432*01826a49SYabin Cui /*! @brief Marks a global symbol. */
433*01826a49SYabin Cui #if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
434*01826a49SYabin Cui #  if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
435*01826a49SYabin Cui #    ifdef XXH_EXPORT
436*01826a49SYabin Cui #      define XXH_PUBLIC_API __declspec(dllexport)
437*01826a49SYabin Cui #    elif XXH_IMPORT
438*01826a49SYabin Cui #      define XXH_PUBLIC_API __declspec(dllimport)
439*01826a49SYabin Cui #    endif
440*01826a49SYabin Cui #  else
441*01826a49SYabin Cui #    define XXH_PUBLIC_API   /* do nothing */
442*01826a49SYabin Cui #  endif
443*01826a49SYabin Cui #endif
444*01826a49SYabin Cui 
445*01826a49SYabin Cui #ifdef XXH_NAMESPACE
446*01826a49SYabin Cui #  define XXH_CAT(A,B) A##B
447*01826a49SYabin Cui #  define XXH_NAME2(A,B) XXH_CAT(A,B)
448*01826a49SYabin Cui #  define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
449*01826a49SYabin Cui /* XXH32 */
450*01826a49SYabin Cui #  define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
451*01826a49SYabin Cui #  define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
452*01826a49SYabin Cui #  define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
453*01826a49SYabin Cui #  define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
454*01826a49SYabin Cui #  define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
455*01826a49SYabin Cui #  define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
456*01826a49SYabin Cui #  define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
457*01826a49SYabin Cui #  define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
458*01826a49SYabin Cui #  define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
459*01826a49SYabin Cui /* XXH64 */
460*01826a49SYabin Cui #  define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
461*01826a49SYabin Cui #  define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
462*01826a49SYabin Cui #  define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
463*01826a49SYabin Cui #  define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
464*01826a49SYabin Cui #  define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
465*01826a49SYabin Cui #  define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
466*01826a49SYabin Cui #  define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
467*01826a49SYabin Cui #  define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
468*01826a49SYabin Cui #  define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
469*01826a49SYabin Cui /* XXH3_64bits */
470*01826a49SYabin Cui #  define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits)
471*01826a49SYabin Cui #  define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret)
472*01826a49SYabin Cui #  define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed)
473*01826a49SYabin Cui #  define XXH3_64bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed)
474*01826a49SYabin Cui #  define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState)
475*01826a49SYabin Cui #  define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState)
476*01826a49SYabin Cui #  define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState)
477*01826a49SYabin Cui #  define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset)
478*01826a49SYabin Cui #  define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed)
479*01826a49SYabin Cui #  define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret)
480*01826a49SYabin Cui #  define XXH3_64bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed)
481*01826a49SYabin Cui #  define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update)
482*01826a49SYabin Cui #  define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest)
483*01826a49SYabin Cui #  define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret)
484*01826a49SYabin Cui #  define XXH3_generateSecret_fromSeed XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed)
485*01826a49SYabin Cui /* XXH3_128bits */
486*01826a49SYabin Cui #  define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128)
487*01826a49SYabin Cui #  define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits)
488*01826a49SYabin Cui #  define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed)
489*01826a49SYabin Cui #  define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret)
490*01826a49SYabin Cui #  define XXH3_128bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed)
491*01826a49SYabin Cui #  define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset)
492*01826a49SYabin Cui #  define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed)
493*01826a49SYabin Cui #  define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret)
494*01826a49SYabin Cui #  define XXH3_128bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed)
495*01826a49SYabin Cui #  define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update)
496*01826a49SYabin Cui #  define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest)
497*01826a49SYabin Cui #  define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual)
498*01826a49SYabin Cui #  define XXH128_cmp     XXH_NAME2(XXH_NAMESPACE, XXH128_cmp)
499*01826a49SYabin Cui #  define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash)
500*01826a49SYabin Cui #  define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical)
501*01826a49SYabin Cui #endif
502*01826a49SYabin Cui 
503*01826a49SYabin Cui 
504*01826a49SYabin Cui /* *************************************
505*01826a49SYabin Cui *  Compiler specifics
506*01826a49SYabin Cui ***************************************/
507*01826a49SYabin Cui 
508*01826a49SYabin Cui /* specific declaration modes for Windows */
509*01826a49SYabin Cui #if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
510*01826a49SYabin Cui #  if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
511*01826a49SYabin Cui #    ifdef XXH_EXPORT
512*01826a49SYabin Cui #      define XXH_PUBLIC_API __declspec(dllexport)
513*01826a49SYabin Cui #    elif XXH_IMPORT
514*01826a49SYabin Cui #      define XXH_PUBLIC_API __declspec(dllimport)
515*01826a49SYabin Cui #    endif
516*01826a49SYabin Cui #  else
517*01826a49SYabin Cui #    define XXH_PUBLIC_API   /* do nothing */
518*01826a49SYabin Cui #  endif
519*01826a49SYabin Cui #endif
520*01826a49SYabin Cui 
521*01826a49SYabin Cui #if defined (__GNUC__)
522*01826a49SYabin Cui # define XXH_CONSTF  __attribute__((const))
523*01826a49SYabin Cui # define XXH_PUREF   __attribute__((pure))
524*01826a49SYabin Cui # define XXH_MALLOCF __attribute__((malloc))
525*01826a49SYabin Cui #else
526*01826a49SYabin Cui # define XXH_CONSTF  /* disable */
527*01826a49SYabin Cui # define XXH_PUREF
528*01826a49SYabin Cui # define XXH_MALLOCF
529*01826a49SYabin Cui #endif
530*01826a49SYabin Cui 
531*01826a49SYabin Cui /* *************************************
532*01826a49SYabin Cui *  Version
533*01826a49SYabin Cui ***************************************/
534*01826a49SYabin Cui #define XXH_VERSION_MAJOR    0
535*01826a49SYabin Cui #define XXH_VERSION_MINOR    8
536*01826a49SYabin Cui #define XXH_VERSION_RELEASE  2
537*01826a49SYabin Cui /*! @brief Version number, encoded as two digits each */
538*01826a49SYabin Cui #define XXH_VERSION_NUMBER  (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
539*01826a49SYabin Cui 
540*01826a49SYabin Cui /*!
541*01826a49SYabin Cui  * @brief Obtains the xxHash version.
542*01826a49SYabin Cui  *
543*01826a49SYabin Cui  * This is mostly useful when xxHash is compiled as a shared library,
544*01826a49SYabin Cui  * since the returned value comes from the library, as opposed to header file.
545*01826a49SYabin Cui  *
546*01826a49SYabin Cui  * @return @ref XXH_VERSION_NUMBER of the invoked library.
547*01826a49SYabin Cui  */
548*01826a49SYabin Cui XXH_PUBLIC_API XXH_CONSTF unsigned XXH_versionNumber (void);
549*01826a49SYabin Cui 
550*01826a49SYabin Cui 
551*01826a49SYabin Cui /* ****************************
552*01826a49SYabin Cui *  Common basic types
553*01826a49SYabin Cui ******************************/
554*01826a49SYabin Cui #include <stddef.h>   /* size_t */
555*01826a49SYabin Cui /*!
556*01826a49SYabin Cui  * @brief Exit code for the streaming API.
557*01826a49SYabin Cui  */
558*01826a49SYabin Cui typedef enum {
559*01826a49SYabin Cui     XXH_OK = 0, /*!< OK */
560*01826a49SYabin Cui     XXH_ERROR   /*!< Error */
561*01826a49SYabin Cui } XXH_errorcode;
562*01826a49SYabin Cui 
563*01826a49SYabin Cui 
564*01826a49SYabin Cui /*-**********************************************************************
565*01826a49SYabin Cui *  32-bit hash
566*01826a49SYabin Cui ************************************************************************/
567*01826a49SYabin Cui #if defined(XXH_DOXYGEN) /* Don't show <stdint.h> include */
568*01826a49SYabin Cui /*!
569*01826a49SYabin Cui  * @brief An unsigned 32-bit integer.
570*01826a49SYabin Cui  *
571*01826a49SYabin Cui  * Not necessarily defined to `uint32_t` but functionally equivalent.
572*01826a49SYabin Cui  */
573*01826a49SYabin Cui typedef uint32_t XXH32_hash_t;
574*01826a49SYabin Cui 
575*01826a49SYabin Cui #elif !defined (__VMS) \
576*01826a49SYabin Cui   && (defined (__cplusplus) \
577*01826a49SYabin Cui   || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
578*01826a49SYabin Cui #   ifdef _AIX
579*01826a49SYabin Cui #     include <inttypes.h>
580*01826a49SYabin Cui #   else
581*01826a49SYabin Cui #     include <stdint.h>
582*01826a49SYabin Cui #   endif
583*01826a49SYabin Cui     typedef uint32_t XXH32_hash_t;
584*01826a49SYabin Cui 
585*01826a49SYabin Cui #else
586*01826a49SYabin Cui #   include <limits.h>
587*01826a49SYabin Cui #   if UINT_MAX == 0xFFFFFFFFUL
588*01826a49SYabin Cui       typedef unsigned int XXH32_hash_t;
589*01826a49SYabin Cui #   elif ULONG_MAX == 0xFFFFFFFFUL
590*01826a49SYabin Cui       typedef unsigned long XXH32_hash_t;
591*01826a49SYabin Cui #   else
592*01826a49SYabin Cui #     error "unsupported platform: need a 32-bit type"
593*01826a49SYabin Cui #   endif
594*01826a49SYabin Cui #endif
595*01826a49SYabin Cui 
596*01826a49SYabin Cui /*!
597*01826a49SYabin Cui  * @}
598*01826a49SYabin Cui  *
599*01826a49SYabin Cui  * @defgroup XXH32_family XXH32 family
600*01826a49SYabin Cui  * @ingroup public
601*01826a49SYabin Cui  * Contains functions used in the classic 32-bit xxHash algorithm.
602*01826a49SYabin Cui  *
603*01826a49SYabin Cui  * @note
604*01826a49SYabin Cui  *   XXH32 is useful for older platforms, with no or poor 64-bit performance.
605*01826a49SYabin Cui  *   Note that the @ref XXH3_family provides competitive speed for both 32-bit
606*01826a49SYabin Cui  *   and 64-bit systems, and offers true 64/128 bit hash results.
607*01826a49SYabin Cui  *
608*01826a49SYabin Cui  * @see @ref XXH64_family, @ref XXH3_family : Other xxHash families
609*01826a49SYabin Cui  * @see @ref XXH32_impl for implementation details
610*01826a49SYabin Cui  * @{
611*01826a49SYabin Cui  */
612*01826a49SYabin Cui 
613*01826a49SYabin Cui /*!
614*01826a49SYabin Cui  * @brief Calculates the 32-bit hash of @p input using xxHash32.
615*01826a49SYabin Cui  *
616*01826a49SYabin Cui  * @param input The block of data to be hashed, at least @p length bytes in size.
617*01826a49SYabin Cui  * @param length The length of @p input, in bytes.
618*01826a49SYabin Cui  * @param seed The 32-bit seed to alter the hash's output predictably.
619*01826a49SYabin Cui  *
620*01826a49SYabin Cui  * @pre
621*01826a49SYabin Cui  *   The memory between @p input and @p input + @p length must be valid,
622*01826a49SYabin Cui  *   readable, contiguous memory. However, if @p length is `0`, @p input may be
623*01826a49SYabin Cui  *   `NULL`. In C++, this also must be *TriviallyCopyable*.
624*01826a49SYabin Cui  *
625*01826a49SYabin Cui  * @return The calculated 32-bit xxHash32 value.
626*01826a49SYabin Cui  *
627*01826a49SYabin Cui  * @see @ref single_shot_example "Single Shot Example" for an example.
628*01826a49SYabin Cui  */
629*01826a49SYabin Cui XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed);
630*01826a49SYabin Cui 
631*01826a49SYabin Cui #ifndef XXH_NO_STREAM
632*01826a49SYabin Cui /*!
633*01826a49SYabin Cui  * @typedef struct XXH32_state_s XXH32_state_t
634*01826a49SYabin Cui  * @brief The opaque state struct for the XXH32 streaming API.
635*01826a49SYabin Cui  *
636*01826a49SYabin Cui  * @see XXH32_state_s for details.
637*01826a49SYabin Cui  */
638*01826a49SYabin Cui typedef struct XXH32_state_s XXH32_state_t;
639*01826a49SYabin Cui 
640*01826a49SYabin Cui /*!
641*01826a49SYabin Cui  * @brief Allocates an @ref XXH32_state_t.
642*01826a49SYabin Cui  *
643*01826a49SYabin Cui  * @return An allocated pointer of @ref XXH32_state_t on success.
644*01826a49SYabin Cui  * @return `NULL` on failure.
645*01826a49SYabin Cui  *
646*01826a49SYabin Cui  * @note Must be freed with XXH32_freeState().
647*01826a49SYabin Cui  */
648*01826a49SYabin Cui XXH_PUBLIC_API XXH_MALLOCF XXH32_state_t* XXH32_createState(void);
649*01826a49SYabin Cui /*!
650*01826a49SYabin Cui  * @brief Frees an @ref XXH32_state_t.
651*01826a49SYabin Cui  *
652*01826a49SYabin Cui  * @param statePtr A pointer to an @ref XXH32_state_t allocated with @ref XXH32_createState().
653*01826a49SYabin Cui  *
654*01826a49SYabin Cui  * @return @ref XXH_OK.
655*01826a49SYabin Cui  *
656*01826a49SYabin Cui  * @note @p statePtr must be allocated with XXH32_createState().
657*01826a49SYabin Cui  *
658*01826a49SYabin Cui  */
659*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode  XXH32_freeState(XXH32_state_t* statePtr);
660*01826a49SYabin Cui /*!
661*01826a49SYabin Cui  * @brief Copies one @ref XXH32_state_t to another.
662*01826a49SYabin Cui  *
663*01826a49SYabin Cui  * @param dst_state The state to copy to.
664*01826a49SYabin Cui  * @param src_state The state to copy from.
665*01826a49SYabin Cui  * @pre
666*01826a49SYabin Cui  *   @p dst_state and @p src_state must not be `NULL` and must not overlap.
667*01826a49SYabin Cui  */
668*01826a49SYabin Cui XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state);
669*01826a49SYabin Cui 
670*01826a49SYabin Cui /*!
671*01826a49SYabin Cui  * @brief Resets an @ref XXH32_state_t to begin a new hash.
672*01826a49SYabin Cui  *
673*01826a49SYabin Cui  * @param statePtr The state struct to reset.
674*01826a49SYabin Cui  * @param seed The 32-bit seed to alter the hash result predictably.
675*01826a49SYabin Cui  *
676*01826a49SYabin Cui  * @pre
677*01826a49SYabin Cui  *   @p statePtr must not be `NULL`.
678*01826a49SYabin Cui  *
679*01826a49SYabin Cui  * @return @ref XXH_OK on success.
680*01826a49SYabin Cui  * @return @ref XXH_ERROR on failure.
681*01826a49SYabin Cui  *
682*01826a49SYabin Cui  * @note This function resets and seeds a state. Call it before @ref XXH32_update().
683*01826a49SYabin Cui  */
684*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode XXH32_reset  (XXH32_state_t* statePtr, XXH32_hash_t seed);
685*01826a49SYabin Cui 
686*01826a49SYabin Cui /*!
687*01826a49SYabin Cui  * @brief Consumes a block of @p input to an @ref XXH32_state_t.
688*01826a49SYabin Cui  *
689*01826a49SYabin Cui  * @param statePtr The state struct to update.
690*01826a49SYabin Cui  * @param input The block of data to be hashed, at least @p length bytes in size.
691*01826a49SYabin Cui  * @param length The length of @p input, in bytes.
692*01826a49SYabin Cui  *
693*01826a49SYabin Cui  * @pre
694*01826a49SYabin Cui  *   @p statePtr must not be `NULL`.
695*01826a49SYabin Cui  * @pre
696*01826a49SYabin Cui  *   The memory between @p input and @p input + @p length must be valid,
697*01826a49SYabin Cui  *   readable, contiguous memory. However, if @p length is `0`, @p input may be
698*01826a49SYabin Cui  *   `NULL`. In C++, this also must be *TriviallyCopyable*.
699*01826a49SYabin Cui  *
700*01826a49SYabin Cui  * @return @ref XXH_OK on success.
701*01826a49SYabin Cui  * @return @ref XXH_ERROR on failure.
702*01826a49SYabin Cui  *
703*01826a49SYabin Cui  * @note Call this to incrementally consume blocks of data.
704*01826a49SYabin Cui  */
705*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
706*01826a49SYabin Cui 
707*01826a49SYabin Cui /*!
708*01826a49SYabin Cui  * @brief Returns the calculated hash value from an @ref XXH32_state_t.
709*01826a49SYabin Cui  *
710*01826a49SYabin Cui  * @param statePtr The state struct to calculate the hash from.
711*01826a49SYabin Cui  *
712*01826a49SYabin Cui  * @pre
713*01826a49SYabin Cui  *  @p statePtr must not be `NULL`.
714*01826a49SYabin Cui  *
715*01826a49SYabin Cui  * @return The calculated 32-bit xxHash32 value from that state.
716*01826a49SYabin Cui  *
717*01826a49SYabin Cui  * @note
718*01826a49SYabin Cui  *   Calling XXH32_digest() will not affect @p statePtr, so you can update,
719*01826a49SYabin Cui  *   digest, and update again.
720*01826a49SYabin Cui  */
721*01826a49SYabin Cui XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr);
722*01826a49SYabin Cui #endif /* !XXH_NO_STREAM */
723*01826a49SYabin Cui 
724*01826a49SYabin Cui /*******   Canonical representation   *******/
725*01826a49SYabin Cui 
726*01826a49SYabin Cui /*!
727*01826a49SYabin Cui  * @brief Canonical (big endian) representation of @ref XXH32_hash_t.
728*01826a49SYabin Cui  */
729*01826a49SYabin Cui typedef struct {
730*01826a49SYabin Cui     unsigned char digest[4]; /*!< Hash bytes, big endian */
731*01826a49SYabin Cui } XXH32_canonical_t;
732*01826a49SYabin Cui 
733*01826a49SYabin Cui /*!
734*01826a49SYabin Cui  * @brief Converts an @ref XXH32_hash_t to a big endian @ref XXH32_canonical_t.
735*01826a49SYabin Cui  *
736*01826a49SYabin Cui  * @param dst  The @ref XXH32_canonical_t pointer to be stored to.
737*01826a49SYabin Cui  * @param hash The @ref XXH32_hash_t to be converted.
738*01826a49SYabin Cui  *
739*01826a49SYabin Cui  * @pre
740*01826a49SYabin Cui  *   @p dst must not be `NULL`.
741*01826a49SYabin Cui  *
742*01826a49SYabin Cui  * @see @ref canonical_representation_example "Canonical Representation Example"
743*01826a49SYabin Cui  */
744*01826a49SYabin Cui XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
745*01826a49SYabin Cui 
746*01826a49SYabin Cui /*!
747*01826a49SYabin Cui  * @brief Converts an @ref XXH32_canonical_t to a native @ref XXH32_hash_t.
748*01826a49SYabin Cui  *
749*01826a49SYabin Cui  * @param src The @ref XXH32_canonical_t to convert.
750*01826a49SYabin Cui  *
751*01826a49SYabin Cui  * @pre
752*01826a49SYabin Cui  *   @p src must not be `NULL`.
753*01826a49SYabin Cui  *
754*01826a49SYabin Cui  * @return The converted hash.
755*01826a49SYabin Cui  *
756*01826a49SYabin Cui  * @see @ref canonical_representation_example "Canonical Representation Example"
757*01826a49SYabin Cui  */
758*01826a49SYabin Cui XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
759*01826a49SYabin Cui 
760*01826a49SYabin Cui 
761*01826a49SYabin Cui /*! @cond Doxygen ignores this part */
762*01826a49SYabin Cui #ifdef __has_attribute
763*01826a49SYabin Cui # define XXH_HAS_ATTRIBUTE(x) __has_attribute(x)
764*01826a49SYabin Cui #else
765*01826a49SYabin Cui # define XXH_HAS_ATTRIBUTE(x) 0
766*01826a49SYabin Cui #endif
767*01826a49SYabin Cui /*! @endcond */
768*01826a49SYabin Cui 
769*01826a49SYabin Cui /*! @cond Doxygen ignores this part */
770*01826a49SYabin Cui /*
771*01826a49SYabin Cui  * C23 __STDC_VERSION__ number hasn't been specified yet. For now
772*01826a49SYabin Cui  * leave as `201711L` (C17 + 1).
773*01826a49SYabin Cui  * TODO: Update to correct value when its been specified.
774*01826a49SYabin Cui  */
775*01826a49SYabin Cui #define XXH_C23_VN 201711L
776*01826a49SYabin Cui /*! @endcond */
777*01826a49SYabin Cui 
778*01826a49SYabin Cui /*! @cond Doxygen ignores this part */
779*01826a49SYabin Cui /* C-language Attributes are added in C23. */
780*01826a49SYabin Cui #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN) && defined(__has_c_attribute)
781*01826a49SYabin Cui # define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x)
782*01826a49SYabin Cui #else
783*01826a49SYabin Cui # define XXH_HAS_C_ATTRIBUTE(x) 0
784*01826a49SYabin Cui #endif
785*01826a49SYabin Cui /*! @endcond */
786*01826a49SYabin Cui 
787*01826a49SYabin Cui /*! @cond Doxygen ignores this part */
788*01826a49SYabin Cui #if defined(__cplusplus) && defined(__has_cpp_attribute)
789*01826a49SYabin Cui # define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
790*01826a49SYabin Cui #else
791*01826a49SYabin Cui # define XXH_HAS_CPP_ATTRIBUTE(x) 0
792*01826a49SYabin Cui #endif
793*01826a49SYabin Cui /*! @endcond */
794*01826a49SYabin Cui 
795*01826a49SYabin Cui /*! @cond Doxygen ignores this part */
796*01826a49SYabin Cui /*
797*01826a49SYabin Cui  * Define XXH_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute
798*01826a49SYabin Cui  * introduced in CPP17 and C23.
799*01826a49SYabin Cui  * CPP17 : https://en.cppreference.com/w/cpp/language/attributes/fallthrough
800*01826a49SYabin Cui  * C23   : https://en.cppreference.com/w/c/language/attributes/fallthrough
801*01826a49SYabin Cui  */
802*01826a49SYabin Cui #if XXH_HAS_C_ATTRIBUTE(fallthrough) || XXH_HAS_CPP_ATTRIBUTE(fallthrough)
803*01826a49SYabin Cui # define XXH_FALLTHROUGH [[fallthrough]]
804*01826a49SYabin Cui #elif XXH_HAS_ATTRIBUTE(__fallthrough__)
805*01826a49SYabin Cui # define XXH_FALLTHROUGH __attribute__ ((__fallthrough__))
806*01826a49SYabin Cui #else
807*01826a49SYabin Cui # define XXH_FALLTHROUGH /* fallthrough */
808*01826a49SYabin Cui #endif
809*01826a49SYabin Cui /*! @endcond */
810*01826a49SYabin Cui 
811*01826a49SYabin Cui /*! @cond Doxygen ignores this part */
812*01826a49SYabin Cui /*
813*01826a49SYabin Cui  * Define XXH_NOESCAPE for annotated pointers in public API.
814*01826a49SYabin Cui  * https://clang.llvm.org/docs/AttributeReference.html#noescape
815*01826a49SYabin Cui  * As of writing this, only supported by clang.
816*01826a49SYabin Cui  */
817*01826a49SYabin Cui #if XXH_HAS_ATTRIBUTE(noescape)
818*01826a49SYabin Cui # define XXH_NOESCAPE __attribute__((noescape))
819*01826a49SYabin Cui #else
820*01826a49SYabin Cui # define XXH_NOESCAPE
821*01826a49SYabin Cui #endif
822*01826a49SYabin Cui /*! @endcond */
823*01826a49SYabin Cui 
824*01826a49SYabin Cui 
825*01826a49SYabin Cui /*!
826*01826a49SYabin Cui  * @}
827*01826a49SYabin Cui  * @ingroup public
828*01826a49SYabin Cui  * @{
829*01826a49SYabin Cui  */
830*01826a49SYabin Cui 
831*01826a49SYabin Cui #ifndef XXH_NO_LONG_LONG
832*01826a49SYabin Cui /*-**********************************************************************
833*01826a49SYabin Cui *  64-bit hash
834*01826a49SYabin Cui ************************************************************************/
835*01826a49SYabin Cui #if defined(XXH_DOXYGEN) /* don't include <stdint.h> */
836*01826a49SYabin Cui /*!
837*01826a49SYabin Cui  * @brief An unsigned 64-bit integer.
838*01826a49SYabin Cui  *
839*01826a49SYabin Cui  * Not necessarily defined to `uint64_t` but functionally equivalent.
840*01826a49SYabin Cui  */
841*01826a49SYabin Cui typedef uint64_t XXH64_hash_t;
842*01826a49SYabin Cui #elif !defined (__VMS) \
843*01826a49SYabin Cui   && (defined (__cplusplus) \
844*01826a49SYabin Cui   || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
845*01826a49SYabin Cui #   ifdef _AIX
846*01826a49SYabin Cui #     include <inttypes.h>
847*01826a49SYabin Cui #   else
848*01826a49SYabin Cui #     include <stdint.h>
849*01826a49SYabin Cui #   endif
850*01826a49SYabin Cui    typedef uint64_t XXH64_hash_t;
851*01826a49SYabin Cui #else
852*01826a49SYabin Cui #  include <limits.h>
853*01826a49SYabin Cui #  if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL
854*01826a49SYabin Cui      /* LP64 ABI says uint64_t is unsigned long */
855*01826a49SYabin Cui      typedef unsigned long XXH64_hash_t;
856*01826a49SYabin Cui #  else
857*01826a49SYabin Cui      /* the following type must have a width of 64-bit */
858*01826a49SYabin Cui      typedef unsigned long long XXH64_hash_t;
859*01826a49SYabin Cui #  endif
860*01826a49SYabin Cui #endif
861*01826a49SYabin Cui 
862*01826a49SYabin Cui /*!
863*01826a49SYabin Cui  * @}
864*01826a49SYabin Cui  *
865*01826a49SYabin Cui  * @defgroup XXH64_family XXH64 family
866*01826a49SYabin Cui  * @ingroup public
867*01826a49SYabin Cui  * @{
868*01826a49SYabin Cui  * Contains functions used in the classic 64-bit xxHash algorithm.
869*01826a49SYabin Cui  *
870*01826a49SYabin Cui  * @note
871*01826a49SYabin Cui  *   XXH3 provides competitive speed for both 32-bit and 64-bit systems,
872*01826a49SYabin Cui  *   and offers true 64/128 bit hash results.
873*01826a49SYabin Cui  *   It provides better speed for systems with vector processing capabilities.
874*01826a49SYabin Cui  */
875*01826a49SYabin Cui 
876*01826a49SYabin Cui /*!
877*01826a49SYabin Cui  * @brief Calculates the 64-bit hash of @p input using xxHash64.
878*01826a49SYabin Cui  *
879*01826a49SYabin Cui  * @param input The block of data to be hashed, at least @p length bytes in size.
880*01826a49SYabin Cui  * @param length The length of @p input, in bytes.
881*01826a49SYabin Cui  * @param seed The 64-bit seed to alter the hash's output predictably.
882*01826a49SYabin Cui  *
883*01826a49SYabin Cui  * @pre
884*01826a49SYabin Cui  *   The memory between @p input and @p input + @p length must be valid,
885*01826a49SYabin Cui  *   readable, contiguous memory. However, if @p length is `0`, @p input may be
886*01826a49SYabin Cui  *   `NULL`. In C++, this also must be *TriviallyCopyable*.
887*01826a49SYabin Cui  *
888*01826a49SYabin Cui  * @return The calculated 64-bit xxHash64 value.
889*01826a49SYabin Cui  *
890*01826a49SYabin Cui  * @see @ref single_shot_example "Single Shot Example" for an example.
891*01826a49SYabin Cui  */
892*01826a49SYabin Cui XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed);
893*01826a49SYabin Cui 
894*01826a49SYabin Cui /*******   Streaming   *******/
895*01826a49SYabin Cui #ifndef XXH_NO_STREAM
896*01826a49SYabin Cui /*!
897*01826a49SYabin Cui  * @brief The opaque state struct for the XXH64 streaming API.
898*01826a49SYabin Cui  *
899*01826a49SYabin Cui  * @see XXH64_state_s for details.
900*01826a49SYabin Cui  */
901*01826a49SYabin Cui typedef struct XXH64_state_s XXH64_state_t;   /* incomplete type */
902*01826a49SYabin Cui 
903*01826a49SYabin Cui /*!
904*01826a49SYabin Cui  * @brief Allocates an @ref XXH64_state_t.
905*01826a49SYabin Cui  *
906*01826a49SYabin Cui  * @return An allocated pointer of @ref XXH64_state_t on success.
907*01826a49SYabin Cui  * @return `NULL` on failure.
908*01826a49SYabin Cui  *
909*01826a49SYabin Cui  * @note Must be freed with XXH64_freeState().
910*01826a49SYabin Cui  */
911*01826a49SYabin Cui XXH_PUBLIC_API XXH_MALLOCF XXH64_state_t* XXH64_createState(void);
912*01826a49SYabin Cui 
913*01826a49SYabin Cui /*!
914*01826a49SYabin Cui  * @brief Frees an @ref XXH64_state_t.
915*01826a49SYabin Cui  *
916*01826a49SYabin Cui  * @param statePtr A pointer to an @ref XXH64_state_t allocated with @ref XXH64_createState().
917*01826a49SYabin Cui  *
918*01826a49SYabin Cui  * @return @ref XXH_OK.
919*01826a49SYabin Cui  *
920*01826a49SYabin Cui  * @note @p statePtr must be allocated with XXH64_createState().
921*01826a49SYabin Cui  */
922*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode  XXH64_freeState(XXH64_state_t* statePtr);
923*01826a49SYabin Cui 
924*01826a49SYabin Cui /*!
925*01826a49SYabin Cui  * @brief Copies one @ref XXH64_state_t to another.
926*01826a49SYabin Cui  *
927*01826a49SYabin Cui  * @param dst_state The state to copy to.
928*01826a49SYabin Cui  * @param src_state The state to copy from.
929*01826a49SYabin Cui  * @pre
930*01826a49SYabin Cui  *   @p dst_state and @p src_state must not be `NULL` and must not overlap.
931*01826a49SYabin Cui  */
932*01826a49SYabin Cui XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dst_state, const XXH64_state_t* src_state);
933*01826a49SYabin Cui 
934*01826a49SYabin Cui /*!
935*01826a49SYabin Cui  * @brief Resets an @ref XXH64_state_t to begin a new hash.
936*01826a49SYabin Cui  *
937*01826a49SYabin Cui  * @param statePtr The state struct to reset.
938*01826a49SYabin Cui  * @param seed The 64-bit seed to alter the hash result predictably.
939*01826a49SYabin Cui  *
940*01826a49SYabin Cui  * @pre
941*01826a49SYabin Cui  *   @p statePtr must not be `NULL`.
942*01826a49SYabin Cui  *
943*01826a49SYabin Cui  * @return @ref XXH_OK on success.
944*01826a49SYabin Cui  * @return @ref XXH_ERROR on failure.
945*01826a49SYabin Cui  *
946*01826a49SYabin Cui  * @note This function resets and seeds a state. Call it before @ref XXH64_update().
947*01826a49SYabin Cui  */
948*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode XXH64_reset  (XXH_NOESCAPE XXH64_state_t* statePtr, XXH64_hash_t seed);
949*01826a49SYabin Cui 
950*01826a49SYabin Cui /*!
951*01826a49SYabin Cui  * @brief Consumes a block of @p input to an @ref XXH64_state_t.
952*01826a49SYabin Cui  *
953*01826a49SYabin Cui  * @param statePtr The state struct to update.
954*01826a49SYabin Cui  * @param input The block of data to be hashed, at least @p length bytes in size.
955*01826a49SYabin Cui  * @param length The length of @p input, in bytes.
956*01826a49SYabin Cui  *
957*01826a49SYabin Cui  * @pre
958*01826a49SYabin Cui  *   @p statePtr must not be `NULL`.
959*01826a49SYabin Cui  * @pre
960*01826a49SYabin Cui  *   The memory between @p input and @p input + @p length must be valid,
961*01826a49SYabin Cui  *   readable, contiguous memory. However, if @p length is `0`, @p input may be
962*01826a49SYabin Cui  *   `NULL`. In C++, this also must be *TriviallyCopyable*.
963*01826a49SYabin Cui  *
964*01826a49SYabin Cui  * @return @ref XXH_OK on success.
965*01826a49SYabin Cui  * @return @ref XXH_ERROR on failure.
966*01826a49SYabin Cui  *
967*01826a49SYabin Cui  * @note Call this to incrementally consume blocks of data.
968*01826a49SYabin Cui  */
969*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH_NOESCAPE XXH64_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length);
970*01826a49SYabin Cui 
971*01826a49SYabin Cui /*!
972*01826a49SYabin Cui  * @brief Returns the calculated hash value from an @ref XXH64_state_t.
973*01826a49SYabin Cui  *
974*01826a49SYabin Cui  * @param statePtr The state struct to calculate the hash from.
975*01826a49SYabin Cui  *
976*01826a49SYabin Cui  * @pre
977*01826a49SYabin Cui  *  @p statePtr must not be `NULL`.
978*01826a49SYabin Cui  *
979*01826a49SYabin Cui  * @return The calculated 64-bit xxHash64 value from that state.
980*01826a49SYabin Cui  *
981*01826a49SYabin Cui  * @note
982*01826a49SYabin Cui  *   Calling XXH64_digest() will not affect @p statePtr, so you can update,
983*01826a49SYabin Cui  *   digest, and update again.
984*01826a49SYabin Cui  */
985*01826a49SYabin Cui XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_digest (XXH_NOESCAPE const XXH64_state_t* statePtr);
986*01826a49SYabin Cui #endif /* !XXH_NO_STREAM */
987*01826a49SYabin Cui /*******   Canonical representation   *******/
988*01826a49SYabin Cui 
989*01826a49SYabin Cui /*!
990*01826a49SYabin Cui  * @brief Canonical (big endian) representation of @ref XXH64_hash_t.
991*01826a49SYabin Cui  */
992*01826a49SYabin Cui typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t;
993*01826a49SYabin Cui 
994*01826a49SYabin Cui /*!
995*01826a49SYabin Cui  * @brief Converts an @ref XXH64_hash_t to a big endian @ref XXH64_canonical_t.
996*01826a49SYabin Cui  *
997*01826a49SYabin Cui  * @param dst The @ref XXH64_canonical_t pointer to be stored to.
998*01826a49SYabin Cui  * @param hash The @ref XXH64_hash_t to be converted.
999*01826a49SYabin Cui  *
1000*01826a49SYabin Cui  * @pre
1001*01826a49SYabin Cui  *   @p dst must not be `NULL`.
1002*01826a49SYabin Cui  *
1003*01826a49SYabin Cui  * @see @ref canonical_representation_example "Canonical Representation Example"
1004*01826a49SYabin Cui  */
1005*01826a49SYabin Cui XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash);
1006*01826a49SYabin Cui 
1007*01826a49SYabin Cui /*!
1008*01826a49SYabin Cui  * @brief Converts an @ref XXH64_canonical_t to a native @ref XXH64_hash_t.
1009*01826a49SYabin Cui  *
1010*01826a49SYabin Cui  * @param src The @ref XXH64_canonical_t to convert.
1011*01826a49SYabin Cui  *
1012*01826a49SYabin Cui  * @pre
1013*01826a49SYabin Cui  *   @p src must not be `NULL`.
1014*01826a49SYabin Cui  *
1015*01826a49SYabin Cui  * @return The converted hash.
1016*01826a49SYabin Cui  *
1017*01826a49SYabin Cui  * @see @ref canonical_representation_example "Canonical Representation Example"
1018*01826a49SYabin Cui  */
1019*01826a49SYabin Cui XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src);
1020*01826a49SYabin Cui 
1021*01826a49SYabin Cui #ifndef XXH_NO_XXH3
1022*01826a49SYabin Cui 
1023*01826a49SYabin Cui /*!
1024*01826a49SYabin Cui  * @}
1025*01826a49SYabin Cui  * ************************************************************************
1026*01826a49SYabin Cui  * @defgroup XXH3_family XXH3 family
1027*01826a49SYabin Cui  * @ingroup public
1028*01826a49SYabin Cui  * @{
1029*01826a49SYabin Cui  *
1030*01826a49SYabin Cui  * XXH3 is a more recent hash algorithm featuring:
1031*01826a49SYabin Cui  *  - Improved speed for both small and large inputs
1032*01826a49SYabin Cui  *  - True 64-bit and 128-bit outputs
1033*01826a49SYabin Cui  *  - SIMD acceleration
1034*01826a49SYabin Cui  *  - Improved 32-bit viability
1035*01826a49SYabin Cui  *
1036*01826a49SYabin Cui  * Speed analysis methodology is explained here:
1037*01826a49SYabin Cui  *
1038*01826a49SYabin Cui  *    https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html
1039*01826a49SYabin Cui  *
1040*01826a49SYabin Cui  * Compared to XXH64, expect XXH3 to run approximately
1041*01826a49SYabin Cui  * ~2x faster on large inputs and >3x faster on small ones,
1042*01826a49SYabin Cui  * exact differences vary depending on platform.
1043*01826a49SYabin Cui  *
1044*01826a49SYabin Cui  * XXH3's speed benefits greatly from SIMD and 64-bit arithmetic,
1045*01826a49SYabin Cui  * but does not require it.
1046*01826a49SYabin Cui  * Most 32-bit and 64-bit targets that can run XXH32 smoothly can run XXH3
1047*01826a49SYabin Cui  * at competitive speeds, even without vector support. Further details are
1048*01826a49SYabin Cui  * explained in the implementation.
1049*01826a49SYabin Cui  *
1050*01826a49SYabin Cui  * XXH3 has a fast scalar implementation, but it also includes accelerated SIMD
1051*01826a49SYabin Cui  * implementations for many common platforms:
1052*01826a49SYabin Cui  *   - AVX512
1053*01826a49SYabin Cui  *   - AVX2
1054*01826a49SYabin Cui  *   - SSE2
1055*01826a49SYabin Cui  *   - ARM NEON
1056*01826a49SYabin Cui  *   - WebAssembly SIMD128
1057*01826a49SYabin Cui  *   - POWER8 VSX
1058*01826a49SYabin Cui  *   - s390x ZVector
1059*01826a49SYabin Cui  * This can be controlled via the @ref XXH_VECTOR macro, but it automatically
1060*01826a49SYabin Cui  * selects the best version according to predefined macros. For the x86 family, an
1061*01826a49SYabin Cui  * automatic runtime dispatcher is included separately in @ref xxh_x86dispatch.c.
1062*01826a49SYabin Cui  *
1063*01826a49SYabin Cui  * XXH3 implementation is portable:
1064*01826a49SYabin Cui  * it has a generic C90 formulation that can be compiled on any platform,
1065*01826a49SYabin Cui  * all implementations generate exactly the same hash value on all platforms.
1066*01826a49SYabin Cui  * Starting from v0.8.0, it's also labelled "stable", meaning that
1067*01826a49SYabin Cui  * any future version will also generate the same hash value.
1068*01826a49SYabin Cui  *
1069*01826a49SYabin Cui  * XXH3 offers 2 variants, _64bits and _128bits.
1070*01826a49SYabin Cui  *
1071*01826a49SYabin Cui  * When only 64 bits are needed, prefer invoking the _64bits variant, as it
1072*01826a49SYabin Cui  * reduces the amount of mixing, resulting in faster speed on small inputs.
1073*01826a49SYabin Cui  * It's also generally simpler to manipulate a scalar return type than a struct.
1074*01826a49SYabin Cui  *
1075*01826a49SYabin Cui  * The API supports one-shot hashing, streaming mode, and custom secrets.
1076*01826a49SYabin Cui  */
1077*01826a49SYabin Cui /*-**********************************************************************
1078*01826a49SYabin Cui *  XXH3 64-bit variant
1079*01826a49SYabin Cui ************************************************************************/
1080*01826a49SYabin Cui 
1081*01826a49SYabin Cui /*!
1082*01826a49SYabin Cui  * @brief Calculates 64-bit unseeded variant of XXH3 hash of @p input.
1083*01826a49SYabin Cui  *
1084*01826a49SYabin Cui  * @param input  The block of data to be hashed, at least @p length bytes in size.
1085*01826a49SYabin Cui  * @param length The length of @p input, in bytes.
1086*01826a49SYabin Cui  *
1087*01826a49SYabin Cui  * @pre
1088*01826a49SYabin Cui  *   The memory between @p input and @p input + @p length must be valid,
1089*01826a49SYabin Cui  *   readable, contiguous memory. However, if @p length is `0`, @p input may be
1090*01826a49SYabin Cui  *   `NULL`. In C++, this also must be *TriviallyCopyable*.
1091*01826a49SYabin Cui  *
1092*01826a49SYabin Cui  * @return The calculated 64-bit XXH3 hash value.
1093*01826a49SYabin Cui  *
1094*01826a49SYabin Cui  * @note
1095*01826a49SYabin Cui  *   This is equivalent to @ref XXH3_64bits_withSeed() with a seed of `0`, however
1096*01826a49SYabin Cui  *   it may have slightly better performance due to constant propagation of the
1097*01826a49SYabin Cui  *   defaults.
1098*01826a49SYabin Cui  *
1099*01826a49SYabin Cui  * @see
1100*01826a49SYabin Cui  *    XXH3_64bits_withSeed(), XXH3_64bits_withSecret(): other seeding variants
1101*01826a49SYabin Cui  * @see @ref single_shot_example "Single Shot Example" for an example.
1102*01826a49SYabin Cui  */
1103*01826a49SYabin Cui XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length);
1104*01826a49SYabin Cui 
1105*01826a49SYabin Cui /*!
1106*01826a49SYabin Cui  * @brief Calculates 64-bit seeded variant of XXH3 hash of @p input.
1107*01826a49SYabin Cui  *
1108*01826a49SYabin Cui  * @param input  The block of data to be hashed, at least @p length bytes in size.
1109*01826a49SYabin Cui  * @param length The length of @p input, in bytes.
1110*01826a49SYabin Cui  * @param seed   The 64-bit seed to alter the hash result predictably.
1111*01826a49SYabin Cui  *
1112*01826a49SYabin Cui  * @pre
1113*01826a49SYabin Cui  *   The memory between @p input and @p input + @p length must be valid,
1114*01826a49SYabin Cui  *   readable, contiguous memory. However, if @p length is `0`, @p input may be
1115*01826a49SYabin Cui  *   `NULL`. In C++, this also must be *TriviallyCopyable*.
1116*01826a49SYabin Cui  *
1117*01826a49SYabin Cui  * @return The calculated 64-bit XXH3 hash value.
1118*01826a49SYabin Cui  *
1119*01826a49SYabin Cui  * @note
1120*01826a49SYabin Cui  *    seed == 0 produces the same results as @ref XXH3_64bits().
1121*01826a49SYabin Cui  *
1122*01826a49SYabin Cui  * This variant generates a custom secret on the fly based on default secret
1123*01826a49SYabin Cui  * altered using the @p seed value.
1124*01826a49SYabin Cui  *
1125*01826a49SYabin Cui  * While this operation is decently fast, note that it's not completely free.
1126*01826a49SYabin Cui  *
1127*01826a49SYabin Cui  * @see @ref single_shot_example "Single Shot Example" for an example.
1128*01826a49SYabin Cui  */
1129*01826a49SYabin Cui XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed);
1130*01826a49SYabin Cui 
1131*01826a49SYabin Cui /*!
1132*01826a49SYabin Cui  * The bare minimum size for a custom secret.
1133*01826a49SYabin Cui  *
1134*01826a49SYabin Cui  * @see
1135*01826a49SYabin Cui  *  XXH3_64bits_withSecret(), XXH3_64bits_reset_withSecret(),
1136*01826a49SYabin Cui  *  XXH3_128bits_withSecret(), XXH3_128bits_reset_withSecret().
1137*01826a49SYabin Cui  */
1138*01826a49SYabin Cui #define XXH3_SECRET_SIZE_MIN 136
1139*01826a49SYabin Cui 
1140*01826a49SYabin Cui /*!
1141*01826a49SYabin Cui  * @brief Calculates 64-bit variant of XXH3 with a custom "secret".
1142*01826a49SYabin Cui  *
1143*01826a49SYabin Cui  * @param data       The block of data to be hashed, at least @p len bytes in size.
1144*01826a49SYabin Cui  * @param len        The length of @p data, in bytes.
1145*01826a49SYabin Cui  * @param secret     The secret data.
1146*01826a49SYabin Cui  * @param secretSize The length of @p secret, in bytes.
1147*01826a49SYabin Cui  *
1148*01826a49SYabin Cui  * @return The calculated 64-bit XXH3 hash value.
1149*01826a49SYabin Cui  *
1150*01826a49SYabin Cui  * @pre
1151*01826a49SYabin Cui  *   The memory between @p data and @p data + @p len must be valid,
1152*01826a49SYabin Cui  *   readable, contiguous memory. However, if @p length is `0`, @p data may be
1153*01826a49SYabin Cui  *   `NULL`. In C++, this also must be *TriviallyCopyable*.
1154*01826a49SYabin Cui  *
1155*01826a49SYabin Cui  * It's possible to provide any blob of bytes as a "secret" to generate the hash.
1156*01826a49SYabin Cui  * This makes it more difficult for an external actor to prepare an intentional collision.
1157*01826a49SYabin Cui  * The main condition is that @p secretSize *must* be large enough (>= @ref XXH3_SECRET_SIZE_MIN).
1158*01826a49SYabin Cui  * However, the quality of the secret impacts the dispersion of the hash algorithm.
1159*01826a49SYabin Cui  * Therefore, the secret _must_ look like a bunch of random bytes.
1160*01826a49SYabin Cui  * Avoid "trivial" or structured data such as repeated sequences or a text document.
1161*01826a49SYabin Cui  * Whenever in doubt about the "randomness" of the blob of bytes,
1162*01826a49SYabin Cui  * consider employing @ref XXH3_generateSecret() instead (see below).
1163*01826a49SYabin Cui  * It will generate a proper high entropy secret derived from the blob of bytes.
1164*01826a49SYabin Cui  * Another advantage of using XXH3_generateSecret() is that
1165*01826a49SYabin Cui  * it guarantees that all bits within the initial blob of bytes
1166*01826a49SYabin Cui  * will impact every bit of the output.
1167*01826a49SYabin Cui  * This is not necessarily the case when using the blob of bytes directly
1168*01826a49SYabin Cui  * because, when hashing _small_ inputs, only a portion of the secret is employed.
1169*01826a49SYabin Cui  *
1170*01826a49SYabin Cui  * @see @ref single_shot_example "Single Shot Example" for an example.
1171*01826a49SYabin Cui  */
1172*01826a49SYabin Cui XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize);
1173*01826a49SYabin Cui 
1174*01826a49SYabin Cui 
1175*01826a49SYabin Cui /*******   Streaming   *******/
1176*01826a49SYabin Cui #ifndef XXH_NO_STREAM
1177*01826a49SYabin Cui /*
1178*01826a49SYabin Cui  * Streaming requires state maintenance.
1179*01826a49SYabin Cui  * This operation costs memory and CPU.
1180*01826a49SYabin Cui  * As a consequence, streaming is slower than one-shot hashing.
1181*01826a49SYabin Cui  * For better performance, prefer one-shot functions whenever applicable.
1182*01826a49SYabin Cui  */
1183*01826a49SYabin Cui 
1184*01826a49SYabin Cui /*!
1185*01826a49SYabin Cui  * @brief The opaque state struct for the XXH3 streaming API.
1186*01826a49SYabin Cui  *
1187*01826a49SYabin Cui  * @see XXH3_state_s for details.
1188*01826a49SYabin Cui  */
1189*01826a49SYabin Cui typedef struct XXH3_state_s XXH3_state_t;
1190*01826a49SYabin Cui XXH_PUBLIC_API XXH_MALLOCF XXH3_state_t* XXH3_createState(void);
1191*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr);
1192*01826a49SYabin Cui 
1193*01826a49SYabin Cui /*!
1194*01826a49SYabin Cui  * @brief Copies one @ref XXH3_state_t to another.
1195*01826a49SYabin Cui  *
1196*01826a49SYabin Cui  * @param dst_state The state to copy to.
1197*01826a49SYabin Cui  * @param src_state The state to copy from.
1198*01826a49SYabin Cui  * @pre
1199*01826a49SYabin Cui  *   @p dst_state and @p src_state must not be `NULL` and must not overlap.
1200*01826a49SYabin Cui  */
1201*01826a49SYabin Cui XXH_PUBLIC_API void XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state);
1202*01826a49SYabin Cui 
1203*01826a49SYabin Cui /*!
1204*01826a49SYabin Cui  * @brief Resets an @ref XXH3_state_t to begin a new hash.
1205*01826a49SYabin Cui  *
1206*01826a49SYabin Cui  * @param statePtr The state struct to reset.
1207*01826a49SYabin Cui  *
1208*01826a49SYabin Cui  * @pre
1209*01826a49SYabin Cui  *   @p statePtr must not be `NULL`.
1210*01826a49SYabin Cui  *
1211*01826a49SYabin Cui  * @return @ref XXH_OK on success.
1212*01826a49SYabin Cui  * @return @ref XXH_ERROR on failure.
1213*01826a49SYabin Cui  *
1214*01826a49SYabin Cui  * @note
1215*01826a49SYabin Cui  *   - This function resets `statePtr` and generate a secret with default parameters.
1216*01826a49SYabin Cui  *   - Call this function before @ref XXH3_64bits_update().
1217*01826a49SYabin Cui  *   - Digest will be equivalent to `XXH3_64bits()`.
1218*01826a49SYabin Cui  *
1219*01826a49SYabin Cui  */
1220*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr);
1221*01826a49SYabin Cui 
1222*01826a49SYabin Cui /*!
1223*01826a49SYabin Cui  * @brief Resets an @ref XXH3_state_t with 64-bit seed to begin a new hash.
1224*01826a49SYabin Cui  *
1225*01826a49SYabin Cui  * @param statePtr The state struct to reset.
1226*01826a49SYabin Cui  * @param seed     The 64-bit seed to alter the hash result predictably.
1227*01826a49SYabin Cui  *
1228*01826a49SYabin Cui  * @pre
1229*01826a49SYabin Cui  *   @p statePtr must not be `NULL`.
1230*01826a49SYabin Cui  *
1231*01826a49SYabin Cui  * @return @ref XXH_OK on success.
1232*01826a49SYabin Cui  * @return @ref XXH_ERROR on failure.
1233*01826a49SYabin Cui  *
1234*01826a49SYabin Cui  * @note
1235*01826a49SYabin Cui  *   - This function resets `statePtr` and generate a secret from `seed`.
1236*01826a49SYabin Cui  *   - Call this function before @ref XXH3_64bits_update().
1237*01826a49SYabin Cui  *   - Digest will be equivalent to `XXH3_64bits_withSeed()`.
1238*01826a49SYabin Cui  *
1239*01826a49SYabin Cui  */
1240*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed);
1241*01826a49SYabin Cui 
1242*01826a49SYabin Cui /*!
1243*01826a49SYabin Cui  * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash.
1244*01826a49SYabin Cui  *
1245*01826a49SYabin Cui  * @param statePtr The state struct to reset.
1246*01826a49SYabin Cui  * @param secret     The secret data.
1247*01826a49SYabin Cui  * @param secretSize The length of @p secret, in bytes.
1248*01826a49SYabin Cui  *
1249*01826a49SYabin Cui  * @pre
1250*01826a49SYabin Cui  *   @p statePtr must not be `NULL`.
1251*01826a49SYabin Cui  *
1252*01826a49SYabin Cui  * @return @ref XXH_OK on success.
1253*01826a49SYabin Cui  * @return @ref XXH_ERROR on failure.
1254*01826a49SYabin Cui  *
1255*01826a49SYabin Cui  * @note
1256*01826a49SYabin Cui  *   `secret` is referenced, it _must outlive_ the hash streaming session.
1257*01826a49SYabin Cui  *
1258*01826a49SYabin Cui  * Similar to one-shot API, `secretSize` must be >= @ref XXH3_SECRET_SIZE_MIN,
1259*01826a49SYabin Cui  * and the quality of produced hash values depends on secret's entropy
1260*01826a49SYabin Cui  * (secret's content should look like a bunch of random bytes).
1261*01826a49SYabin Cui  * When in doubt about the randomness of a candidate `secret`,
1262*01826a49SYabin Cui  * consider employing `XXH3_generateSecret()` instead (see below).
1263*01826a49SYabin Cui  */
1264*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize);
1265*01826a49SYabin Cui 
1266*01826a49SYabin Cui /*!
1267*01826a49SYabin Cui  * @brief Consumes a block of @p input to an @ref XXH3_state_t.
1268*01826a49SYabin Cui  *
1269*01826a49SYabin Cui  * @param statePtr The state struct to update.
1270*01826a49SYabin Cui  * @param input The block of data to be hashed, at least @p length bytes in size.
1271*01826a49SYabin Cui  * @param length The length of @p input, in bytes.
1272*01826a49SYabin Cui  *
1273*01826a49SYabin Cui  * @pre
1274*01826a49SYabin Cui  *   @p statePtr must not be `NULL`.
1275*01826a49SYabin Cui  * @pre
1276*01826a49SYabin Cui  *   The memory between @p input and @p input + @p length must be valid,
1277*01826a49SYabin Cui  *   readable, contiguous memory. However, if @p length is `0`, @p input may be
1278*01826a49SYabin Cui  *   `NULL`. In C++, this also must be *TriviallyCopyable*.
1279*01826a49SYabin Cui  *
1280*01826a49SYabin Cui  * @return @ref XXH_OK on success.
1281*01826a49SYabin Cui  * @return @ref XXH_ERROR on failure.
1282*01826a49SYabin Cui  *
1283*01826a49SYabin Cui  * @note Call this to incrementally consume blocks of data.
1284*01826a49SYabin Cui  */
1285*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length);
1286*01826a49SYabin Cui 
1287*01826a49SYabin Cui /*!
1288*01826a49SYabin Cui  * @brief Returns the calculated XXH3 64-bit hash value from an @ref XXH3_state_t.
1289*01826a49SYabin Cui  *
1290*01826a49SYabin Cui  * @param statePtr The state struct to calculate the hash from.
1291*01826a49SYabin Cui  *
1292*01826a49SYabin Cui  * @pre
1293*01826a49SYabin Cui  *  @p statePtr must not be `NULL`.
1294*01826a49SYabin Cui  *
1295*01826a49SYabin Cui  * @return The calculated XXH3 64-bit hash value from that state.
1296*01826a49SYabin Cui  *
1297*01826a49SYabin Cui  * @note
1298*01826a49SYabin Cui  *   Calling XXH3_64bits_digest() will not affect @p statePtr, so you can update,
1299*01826a49SYabin Cui  *   digest, and update again.
1300*01826a49SYabin Cui  */
1301*01826a49SYabin Cui XXH_PUBLIC_API XXH_PUREF XXH64_hash_t  XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr);
1302*01826a49SYabin Cui #endif /* !XXH_NO_STREAM */
1303*01826a49SYabin Cui 
1304*01826a49SYabin Cui /* note : canonical representation of XXH3 is the same as XXH64
1305*01826a49SYabin Cui  * since they both produce XXH64_hash_t values */
1306*01826a49SYabin Cui 
1307*01826a49SYabin Cui 
1308*01826a49SYabin Cui /*-**********************************************************************
1309*01826a49SYabin Cui *  XXH3 128-bit variant
1310*01826a49SYabin Cui ************************************************************************/
1311*01826a49SYabin Cui 
1312*01826a49SYabin Cui /*!
1313*01826a49SYabin Cui  * @brief The return value from 128-bit hashes.
1314*01826a49SYabin Cui  *
1315*01826a49SYabin Cui  * Stored in little endian order, although the fields themselves are in native
1316*01826a49SYabin Cui  * endianness.
1317*01826a49SYabin Cui  */
1318*01826a49SYabin Cui typedef struct {
1319*01826a49SYabin Cui     XXH64_hash_t low64;   /*!< `value & 0xFFFFFFFFFFFFFFFF` */
1320*01826a49SYabin Cui     XXH64_hash_t high64;  /*!< `value >> 64` */
1321*01826a49SYabin Cui } XXH128_hash_t;
1322*01826a49SYabin Cui 
1323*01826a49SYabin Cui /*!
1324*01826a49SYabin Cui  * @brief Calculates 128-bit unseeded variant of XXH3 of @p data.
1325*01826a49SYabin Cui  *
1326*01826a49SYabin Cui  * @param data The block of data to be hashed, at least @p length bytes in size.
1327*01826a49SYabin Cui  * @param len  The length of @p data, in bytes.
1328*01826a49SYabin Cui  *
1329*01826a49SYabin Cui  * @return The calculated 128-bit variant of XXH3 value.
1330*01826a49SYabin Cui  *
1331*01826a49SYabin Cui  * The 128-bit variant of XXH3 has more strength, but it has a bit of overhead
1332*01826a49SYabin Cui  * for shorter inputs.
1333*01826a49SYabin Cui  *
1334*01826a49SYabin Cui  * This is equivalent to @ref XXH3_128bits_withSeed() with a seed of `0`, however
1335*01826a49SYabin Cui  * it may have slightly better performance due to constant propagation of the
1336*01826a49SYabin Cui  * defaults.
1337*01826a49SYabin Cui  *
1338*01826a49SYabin Cui  * @see XXH3_128bits_withSeed(), XXH3_128bits_withSecret(): other seeding variants
1339*01826a49SYabin Cui  * @see @ref single_shot_example "Single Shot Example" for an example.
1340*01826a49SYabin Cui  */
1341*01826a49SYabin Cui XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* data, size_t len);
1342*01826a49SYabin Cui /*! @brief Calculates 128-bit seeded variant of XXH3 hash of @p data.
1343*01826a49SYabin Cui  *
1344*01826a49SYabin Cui  * @param data The block of data to be hashed, at least @p length bytes in size.
1345*01826a49SYabin Cui  * @param len  The length of @p data, in bytes.
1346*01826a49SYabin Cui  * @param seed The 64-bit seed to alter the hash result predictably.
1347*01826a49SYabin Cui  *
1348*01826a49SYabin Cui  * @return The calculated 128-bit variant of XXH3 value.
1349*01826a49SYabin Cui  *
1350*01826a49SYabin Cui  * @note
1351*01826a49SYabin Cui  *    seed == 0 produces the same results as @ref XXH3_64bits().
1352*01826a49SYabin Cui  *
1353*01826a49SYabin Cui  * This variant generates a custom secret on the fly based on default secret
1354*01826a49SYabin Cui  * altered using the @p seed value.
1355*01826a49SYabin Cui  *
1356*01826a49SYabin Cui  * While this operation is decently fast, note that it's not completely free.
1357*01826a49SYabin Cui  *
1358*01826a49SYabin Cui  * @see XXH3_128bits(), XXH3_128bits_withSecret(): other seeding variants
1359*01826a49SYabin Cui  * @see @ref single_shot_example "Single Shot Example" for an example.
1360*01826a49SYabin Cui  */
1361*01826a49SYabin Cui XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSeed(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed);
1362*01826a49SYabin Cui /*!
1363*01826a49SYabin Cui  * @brief Calculates 128-bit variant of XXH3 with a custom "secret".
1364*01826a49SYabin Cui  *
1365*01826a49SYabin Cui  * @param data       The block of data to be hashed, at least @p len bytes in size.
1366*01826a49SYabin Cui  * @param len        The length of @p data, in bytes.
1367*01826a49SYabin Cui  * @param secret     The secret data.
1368*01826a49SYabin Cui  * @param secretSize The length of @p secret, in bytes.
1369*01826a49SYabin Cui  *
1370*01826a49SYabin Cui  * @return The calculated 128-bit variant of XXH3 value.
1371*01826a49SYabin Cui  *
1372*01826a49SYabin Cui  * It's possible to provide any blob of bytes as a "secret" to generate the hash.
1373*01826a49SYabin Cui  * This makes it more difficult for an external actor to prepare an intentional collision.
1374*01826a49SYabin Cui  * The main condition is that @p secretSize *must* be large enough (>= @ref XXH3_SECRET_SIZE_MIN).
1375*01826a49SYabin Cui  * However, the quality of the secret impacts the dispersion of the hash algorithm.
1376*01826a49SYabin Cui  * Therefore, the secret _must_ look like a bunch of random bytes.
1377*01826a49SYabin Cui  * Avoid "trivial" or structured data such as repeated sequences or a text document.
1378*01826a49SYabin Cui  * Whenever in doubt about the "randomness" of the blob of bytes,
1379*01826a49SYabin Cui  * consider employing @ref XXH3_generateSecret() instead (see below).
1380*01826a49SYabin Cui  * It will generate a proper high entropy secret derived from the blob of bytes.
1381*01826a49SYabin Cui  * Another advantage of using XXH3_generateSecret() is that
1382*01826a49SYabin Cui  * it guarantees that all bits within the initial blob of bytes
1383*01826a49SYabin Cui  * will impact every bit of the output.
1384*01826a49SYabin Cui  * This is not necessarily the case when using the blob of bytes directly
1385*01826a49SYabin Cui  * because, when hashing _small_ inputs, only a portion of the secret is employed.
1386*01826a49SYabin Cui  *
1387*01826a49SYabin Cui  * @see @ref single_shot_example "Single Shot Example" for an example.
1388*01826a49SYabin Cui  */
1389*01826a49SYabin Cui XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize);
1390*01826a49SYabin Cui 
1391*01826a49SYabin Cui /*******   Streaming   *******/
1392*01826a49SYabin Cui #ifndef XXH_NO_STREAM
1393*01826a49SYabin Cui /*
1394*01826a49SYabin Cui  * Streaming requires state maintenance.
1395*01826a49SYabin Cui  * This operation costs memory and CPU.
1396*01826a49SYabin Cui  * As a consequence, streaming is slower than one-shot hashing.
1397*01826a49SYabin Cui  * For better performance, prefer one-shot functions whenever applicable.
1398*01826a49SYabin Cui  *
1399*01826a49SYabin Cui  * XXH3_128bits uses the same XXH3_state_t as XXH3_64bits().
1400*01826a49SYabin Cui  * Use already declared XXH3_createState() and XXH3_freeState().
1401*01826a49SYabin Cui  *
1402*01826a49SYabin Cui  * All reset and streaming functions have same meaning as their 64-bit counterpart.
1403*01826a49SYabin Cui  */
1404*01826a49SYabin Cui 
1405*01826a49SYabin Cui /*!
1406*01826a49SYabin Cui  * @brief Resets an @ref XXH3_state_t to begin a new hash.
1407*01826a49SYabin Cui  *
1408*01826a49SYabin Cui  * @param statePtr The state struct to reset.
1409*01826a49SYabin Cui  *
1410*01826a49SYabin Cui  * @pre
1411*01826a49SYabin Cui  *   @p statePtr must not be `NULL`.
1412*01826a49SYabin Cui  *
1413*01826a49SYabin Cui  * @return @ref XXH_OK on success.
1414*01826a49SYabin Cui  * @return @ref XXH_ERROR on failure.
1415*01826a49SYabin Cui  *
1416*01826a49SYabin Cui  * @note
1417*01826a49SYabin Cui  *   - This function resets `statePtr` and generate a secret with default parameters.
1418*01826a49SYabin Cui  *   - Call it before @ref XXH3_128bits_update().
1419*01826a49SYabin Cui  *   - Digest will be equivalent to `XXH3_128bits()`.
1420*01826a49SYabin Cui  */
1421*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr);
1422*01826a49SYabin Cui 
1423*01826a49SYabin Cui /*!
1424*01826a49SYabin Cui  * @brief Resets an @ref XXH3_state_t with 64-bit seed to begin a new hash.
1425*01826a49SYabin Cui  *
1426*01826a49SYabin Cui  * @param statePtr The state struct to reset.
1427*01826a49SYabin Cui  * @param seed     The 64-bit seed to alter the hash result predictably.
1428*01826a49SYabin Cui  *
1429*01826a49SYabin Cui  * @pre
1430*01826a49SYabin Cui  *   @p statePtr must not be `NULL`.
1431*01826a49SYabin Cui  *
1432*01826a49SYabin Cui  * @return @ref XXH_OK on success.
1433*01826a49SYabin Cui  * @return @ref XXH_ERROR on failure.
1434*01826a49SYabin Cui  *
1435*01826a49SYabin Cui  * @note
1436*01826a49SYabin Cui  *   - This function resets `statePtr` and generate a secret from `seed`.
1437*01826a49SYabin Cui  *   - Call it before @ref XXH3_128bits_update().
1438*01826a49SYabin Cui  *   - Digest will be equivalent to `XXH3_128bits_withSeed()`.
1439*01826a49SYabin Cui  */
1440*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed);
1441*01826a49SYabin Cui /*!
1442*01826a49SYabin Cui  * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash.
1443*01826a49SYabin Cui  *
1444*01826a49SYabin Cui  * @param statePtr   The state struct to reset.
1445*01826a49SYabin Cui  * @param secret     The secret data.
1446*01826a49SYabin Cui  * @param secretSize The length of @p secret, in bytes.
1447*01826a49SYabin Cui  *
1448*01826a49SYabin Cui  * @pre
1449*01826a49SYabin Cui  *   @p statePtr must not be `NULL`.
1450*01826a49SYabin Cui  *
1451*01826a49SYabin Cui  * @return @ref XXH_OK on success.
1452*01826a49SYabin Cui  * @return @ref XXH_ERROR on failure.
1453*01826a49SYabin Cui  *
1454*01826a49SYabin Cui  * `secret` is referenced, it _must outlive_ the hash streaming session.
1455*01826a49SYabin Cui  * Similar to one-shot API, `secretSize` must be >= @ref XXH3_SECRET_SIZE_MIN,
1456*01826a49SYabin Cui  * and the quality of produced hash values depends on secret's entropy
1457*01826a49SYabin Cui  * (secret's content should look like a bunch of random bytes).
1458*01826a49SYabin Cui  * When in doubt about the randomness of a candidate `secret`,
1459*01826a49SYabin Cui  * consider employing `XXH3_generateSecret()` instead (see below).
1460*01826a49SYabin Cui  */
1461*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize);
1462*01826a49SYabin Cui 
1463*01826a49SYabin Cui /*!
1464*01826a49SYabin Cui  * @brief Consumes a block of @p input to an @ref XXH3_state_t.
1465*01826a49SYabin Cui  *
1466*01826a49SYabin Cui  * Call this to incrementally consume blocks of data.
1467*01826a49SYabin Cui  *
1468*01826a49SYabin Cui  * @param statePtr The state struct to update.
1469*01826a49SYabin Cui  * @param input The block of data to be hashed, at least @p length bytes in size.
1470*01826a49SYabin Cui  * @param length The length of @p input, in bytes.
1471*01826a49SYabin Cui  *
1472*01826a49SYabin Cui  * @pre
1473*01826a49SYabin Cui  *   @p statePtr must not be `NULL`.
1474*01826a49SYabin Cui  *
1475*01826a49SYabin Cui  * @return @ref XXH_OK on success.
1476*01826a49SYabin Cui  * @return @ref XXH_ERROR on failure.
1477*01826a49SYabin Cui  *
1478*01826a49SYabin Cui  * @note
1479*01826a49SYabin Cui  *   The memory between @p input and @p input + @p length must be valid,
1480*01826a49SYabin Cui  *   readable, contiguous memory. However, if @p length is `0`, @p input may be
1481*01826a49SYabin Cui  *   `NULL`. In C++, this also must be *TriviallyCopyable*.
1482*01826a49SYabin Cui  *
1483*01826a49SYabin Cui  */
1484*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length);
1485*01826a49SYabin Cui 
1486*01826a49SYabin Cui /*!
1487*01826a49SYabin Cui  * @brief Returns the calculated XXH3 128-bit hash value from an @ref XXH3_state_t.
1488*01826a49SYabin Cui  *
1489*01826a49SYabin Cui  * @param statePtr The state struct to calculate the hash from.
1490*01826a49SYabin Cui  *
1491*01826a49SYabin Cui  * @pre
1492*01826a49SYabin Cui  *  @p statePtr must not be `NULL`.
1493*01826a49SYabin Cui  *
1494*01826a49SYabin Cui  * @return The calculated XXH3 128-bit hash value from that state.
1495*01826a49SYabin Cui  *
1496*01826a49SYabin Cui  * @note
1497*01826a49SYabin Cui  *   Calling XXH3_128bits_digest() will not affect @p statePtr, so you can update,
1498*01826a49SYabin Cui  *   digest, and update again.
1499*01826a49SYabin Cui  *
1500*01826a49SYabin Cui  */
1501*01826a49SYabin Cui XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr);
1502*01826a49SYabin Cui #endif /* !XXH_NO_STREAM */
1503*01826a49SYabin Cui 
1504*01826a49SYabin Cui /* Following helper functions make it possible to compare XXH128_hast_t values.
1505*01826a49SYabin Cui  * Since XXH128_hash_t is a structure, this capability is not offered by the language.
1506*01826a49SYabin Cui  * Note: For better performance, these functions can be inlined using XXH_INLINE_ALL */
1507*01826a49SYabin Cui 
1508*01826a49SYabin Cui /*!
1509*01826a49SYabin Cui  * @brief Check equality of two XXH128_hash_t values
1510*01826a49SYabin Cui  *
1511*01826a49SYabin Cui  * @param h1 The 128-bit hash value.
1512*01826a49SYabin Cui  * @param h2 Another 128-bit hash value.
1513*01826a49SYabin Cui  *
1514*01826a49SYabin Cui  * @return `1` if `h1` and `h2` are equal.
1515*01826a49SYabin Cui  * @return `0` if they are not.
1516*01826a49SYabin Cui  */
1517*01826a49SYabin Cui XXH_PUBLIC_API XXH_PUREF int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2);
1518*01826a49SYabin Cui 
1519*01826a49SYabin Cui /*!
1520*01826a49SYabin Cui  * @brief Compares two @ref XXH128_hash_t
1521*01826a49SYabin Cui  *
1522*01826a49SYabin Cui  * This comparator is compatible with stdlib's `qsort()`/`bsearch()`.
1523*01826a49SYabin Cui  *
1524*01826a49SYabin Cui  * @param h128_1 Left-hand side value
1525*01826a49SYabin Cui  * @param h128_2 Right-hand side value
1526*01826a49SYabin Cui  *
1527*01826a49SYabin Cui  * @return >0 if @p h128_1  > @p h128_2
1528*01826a49SYabin Cui  * @return =0 if @p h128_1 == @p h128_2
1529*01826a49SYabin Cui  * @return <0 if @p h128_1  < @p h128_2
1530*01826a49SYabin Cui  */
1531*01826a49SYabin Cui XXH_PUBLIC_API XXH_PUREF int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2);
1532*01826a49SYabin Cui 
1533*01826a49SYabin Cui 
1534*01826a49SYabin Cui /*******   Canonical representation   *******/
1535*01826a49SYabin Cui typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical_t;
1536*01826a49SYabin Cui 
1537*01826a49SYabin Cui 
1538*01826a49SYabin Cui /*!
1539*01826a49SYabin Cui  * @brief Converts an @ref XXH128_hash_t to a big endian @ref XXH128_canonical_t.
1540*01826a49SYabin Cui  *
1541*01826a49SYabin Cui  * @param dst  The @ref XXH128_canonical_t pointer to be stored to.
1542*01826a49SYabin Cui  * @param hash The @ref XXH128_hash_t to be converted.
1543*01826a49SYabin Cui  *
1544*01826a49SYabin Cui  * @pre
1545*01826a49SYabin Cui  *   @p dst must not be `NULL`.
1546*01826a49SYabin Cui  * @see @ref canonical_representation_example "Canonical Representation Example"
1547*01826a49SYabin Cui  */
1548*01826a49SYabin Cui XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash);
1549*01826a49SYabin Cui 
1550*01826a49SYabin Cui /*!
1551*01826a49SYabin Cui  * @brief Converts an @ref XXH128_canonical_t to a native @ref XXH128_hash_t.
1552*01826a49SYabin Cui  *
1553*01826a49SYabin Cui  * @param src The @ref XXH128_canonical_t to convert.
1554*01826a49SYabin Cui  *
1555*01826a49SYabin Cui  * @pre
1556*01826a49SYabin Cui  *   @p src must not be `NULL`.
1557*01826a49SYabin Cui  *
1558*01826a49SYabin Cui  * @return The converted hash.
1559*01826a49SYabin Cui  * @see @ref canonical_representation_example "Canonical Representation Example"
1560*01826a49SYabin Cui  */
1561*01826a49SYabin Cui XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src);
1562*01826a49SYabin Cui 
1563*01826a49SYabin Cui 
1564*01826a49SYabin Cui #endif  /* !XXH_NO_XXH3 */
1565*01826a49SYabin Cui #endif  /* XXH_NO_LONG_LONG */
1566*01826a49SYabin Cui 
1567*01826a49SYabin Cui /*!
1568*01826a49SYabin Cui  * @}
1569*01826a49SYabin Cui  */
1570*01826a49SYabin Cui #endif /* XXHASH_H_5627135585666179 */
1571*01826a49SYabin Cui 
1572*01826a49SYabin Cui 
1573*01826a49SYabin Cui 
1574*01826a49SYabin Cui #if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742)
1575*01826a49SYabin Cui #define XXHASH_H_STATIC_13879238742
1576*01826a49SYabin Cui /* ****************************************************************************
1577*01826a49SYabin Cui  * This section contains declarations which are not guaranteed to remain stable.
1578*01826a49SYabin Cui  * They may change in future versions, becoming incompatible with a different
1579*01826a49SYabin Cui  * version of the library.
1580*01826a49SYabin Cui  * These declarations should only be used with static linking.
1581*01826a49SYabin Cui  * Never use them in association with dynamic linking!
1582*01826a49SYabin Cui  ***************************************************************************** */
1583*01826a49SYabin Cui 
1584*01826a49SYabin Cui /*
1585*01826a49SYabin Cui  * These definitions are only present to allow static allocation
1586*01826a49SYabin Cui  * of XXH states, on stack or in a struct, for example.
1587*01826a49SYabin Cui  * Never **ever** access their members directly.
1588*01826a49SYabin Cui  */
1589*01826a49SYabin Cui 
1590*01826a49SYabin Cui /*!
1591*01826a49SYabin Cui  * @internal
1592*01826a49SYabin Cui  * @brief Structure for XXH32 streaming API.
1593*01826a49SYabin Cui  *
1594*01826a49SYabin Cui  * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
1595*01826a49SYabin Cui  * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
1596*01826a49SYabin Cui  * an opaque type. This allows fields to safely be changed.
1597*01826a49SYabin Cui  *
1598*01826a49SYabin Cui  * Typedef'd to @ref XXH32_state_t.
1599*01826a49SYabin Cui  * Do not access the members of this struct directly.
1600*01826a49SYabin Cui  * @see XXH64_state_s, XXH3_state_s
1601*01826a49SYabin Cui  */
1602*01826a49SYabin Cui struct XXH32_state_s {
1603*01826a49SYabin Cui    XXH32_hash_t total_len_32; /*!< Total length hashed, modulo 2^32 */
1604*01826a49SYabin Cui    XXH32_hash_t large_len;    /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */
1605*01826a49SYabin Cui    XXH32_hash_t v[4];         /*!< Accumulator lanes */
1606*01826a49SYabin Cui    XXH32_hash_t mem32[4];     /*!< Internal buffer for partial reads. Treated as unsigned char[16]. */
1607*01826a49SYabin Cui    XXH32_hash_t memsize;      /*!< Amount of data in @ref mem32 */
1608*01826a49SYabin Cui    XXH32_hash_t reserved;     /*!< Reserved field. Do not read nor write to it. */
1609*01826a49SYabin Cui };   /* typedef'd to XXH32_state_t */
1610*01826a49SYabin Cui 
1611*01826a49SYabin Cui 
1612*01826a49SYabin Cui #ifndef XXH_NO_LONG_LONG  /* defined when there is no 64-bit support */
1613*01826a49SYabin Cui 
1614*01826a49SYabin Cui /*!
1615*01826a49SYabin Cui  * @internal
1616*01826a49SYabin Cui  * @brief Structure for XXH64 streaming API.
1617*01826a49SYabin Cui  *
1618*01826a49SYabin Cui  * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
1619*01826a49SYabin Cui  * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
1620*01826a49SYabin Cui  * an opaque type. This allows fields to safely be changed.
1621*01826a49SYabin Cui  *
1622*01826a49SYabin Cui  * Typedef'd to @ref XXH64_state_t.
1623*01826a49SYabin Cui  * Do not access the members of this struct directly.
1624*01826a49SYabin Cui  * @see XXH32_state_s, XXH3_state_s
1625*01826a49SYabin Cui  */
1626*01826a49SYabin Cui struct XXH64_state_s {
1627*01826a49SYabin Cui    XXH64_hash_t total_len;    /*!< Total length hashed. This is always 64-bit. */
1628*01826a49SYabin Cui    XXH64_hash_t v[4];         /*!< Accumulator lanes */
1629*01826a49SYabin Cui    XXH64_hash_t mem64[4];     /*!< Internal buffer for partial reads. Treated as unsigned char[32]. */
1630*01826a49SYabin Cui    XXH32_hash_t memsize;      /*!< Amount of data in @ref mem64 */
1631*01826a49SYabin Cui    XXH32_hash_t reserved32;   /*!< Reserved field, needed for padding anyways*/
1632*01826a49SYabin Cui    XXH64_hash_t reserved64;   /*!< Reserved field. Do not read or write to it. */
1633*01826a49SYabin Cui };   /* typedef'd to XXH64_state_t */
1634*01826a49SYabin Cui 
1635*01826a49SYabin Cui #ifndef XXH_NO_XXH3
1636*01826a49SYabin Cui 
1637*01826a49SYabin Cui #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* >= C11 */
1638*01826a49SYabin Cui #  include <stdalign.h>
1639*01826a49SYabin Cui #  define XXH_ALIGN(n)      alignas(n)
1640*01826a49SYabin Cui #elif defined(__cplusplus) && (__cplusplus >= 201103L) /* >= C++11 */
1641*01826a49SYabin Cui /* In C++ alignas() is a keyword */
1642*01826a49SYabin Cui #  define XXH_ALIGN(n)      alignas(n)
1643*01826a49SYabin Cui #elif defined(__GNUC__)
1644*01826a49SYabin Cui #  define XXH_ALIGN(n)      __attribute__ ((aligned(n)))
1645*01826a49SYabin Cui #elif defined(_MSC_VER)
1646*01826a49SYabin Cui #  define XXH_ALIGN(n)      __declspec(align(n))
1647*01826a49SYabin Cui #else
1648*01826a49SYabin Cui #  define XXH_ALIGN(n)   /* disabled */
1649*01826a49SYabin Cui #endif
1650*01826a49SYabin Cui 
1651*01826a49SYabin Cui /* Old GCC versions only accept the attribute after the type in structures. */
1652*01826a49SYabin Cui #if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L))   /* C11+ */ \
1653*01826a49SYabin Cui     && ! (defined(__cplusplus) && (__cplusplus >= 201103L)) /* >= C++11 */ \
1654*01826a49SYabin Cui     && defined(__GNUC__)
1655*01826a49SYabin Cui #   define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align)
1656*01826a49SYabin Cui #else
1657*01826a49SYabin Cui #   define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type
1658*01826a49SYabin Cui #endif
1659*01826a49SYabin Cui 
1660*01826a49SYabin Cui /*!
1661*01826a49SYabin Cui  * @brief The size of the internal XXH3 buffer.
1662*01826a49SYabin Cui  *
1663*01826a49SYabin Cui  * This is the optimal update size for incremental hashing.
1664*01826a49SYabin Cui  *
1665*01826a49SYabin Cui  * @see XXH3_64b_update(), XXH3_128b_update().
1666*01826a49SYabin Cui  */
1667*01826a49SYabin Cui #define XXH3_INTERNALBUFFER_SIZE 256
1668*01826a49SYabin Cui 
1669*01826a49SYabin Cui /*!
1670*01826a49SYabin Cui  * @internal
1671*01826a49SYabin Cui  * @brief Default size of the secret buffer (and @ref XXH3_kSecret).
1672*01826a49SYabin Cui  *
1673*01826a49SYabin Cui  * This is the size used in @ref XXH3_kSecret and the seeded functions.
1674*01826a49SYabin Cui  *
1675*01826a49SYabin Cui  * Not to be confused with @ref XXH3_SECRET_SIZE_MIN.
1676*01826a49SYabin Cui  */
1677*01826a49SYabin Cui #define XXH3_SECRET_DEFAULT_SIZE 192
1678*01826a49SYabin Cui 
1679*01826a49SYabin Cui /*!
1680*01826a49SYabin Cui  * @internal
1681*01826a49SYabin Cui  * @brief Structure for XXH3 streaming API.
1682*01826a49SYabin Cui  *
1683*01826a49SYabin Cui  * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
1684*01826a49SYabin Cui  * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined.
1685*01826a49SYabin Cui  * Otherwise it is an opaque type.
1686*01826a49SYabin Cui  * Never use this definition in combination with dynamic library.
1687*01826a49SYabin Cui  * This allows fields to safely be changed in the future.
1688*01826a49SYabin Cui  *
1689*01826a49SYabin Cui  * @note ** This structure has a strict alignment requirement of 64 bytes!! **
1690*01826a49SYabin Cui  * Do not allocate this with `malloc()` or `new`,
1691*01826a49SYabin Cui  * it will not be sufficiently aligned.
1692*01826a49SYabin Cui  * Use @ref XXH3_createState() and @ref XXH3_freeState(), or stack allocation.
1693*01826a49SYabin Cui  *
1694*01826a49SYabin Cui  * Typedef'd to @ref XXH3_state_t.
1695*01826a49SYabin Cui  * Do never access the members of this struct directly.
1696*01826a49SYabin Cui  *
1697*01826a49SYabin Cui  * @see XXH3_INITSTATE() for stack initialization.
1698*01826a49SYabin Cui  * @see XXH3_createState(), XXH3_freeState().
1699*01826a49SYabin Cui  * @see XXH32_state_s, XXH64_state_s
1700*01826a49SYabin Cui  */
1701*01826a49SYabin Cui struct XXH3_state_s {
1702*01826a49SYabin Cui    XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]);
1703*01826a49SYabin Cui        /*!< The 8 accumulators. See @ref XXH32_state_s::v and @ref XXH64_state_s::v */
1704*01826a49SYabin Cui    XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]);
1705*01826a49SYabin Cui        /*!< Used to store a custom secret generated from a seed. */
1706*01826a49SYabin Cui    XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]);
1707*01826a49SYabin Cui        /*!< The internal buffer. @see XXH32_state_s::mem32 */
1708*01826a49SYabin Cui    XXH32_hash_t bufferedSize;
1709*01826a49SYabin Cui        /*!< The amount of memory in @ref buffer, @see XXH32_state_s::memsize */
1710*01826a49SYabin Cui    XXH32_hash_t useSeed;
1711*01826a49SYabin Cui        /*!< Reserved field. Needed for padding on 64-bit. */
1712*01826a49SYabin Cui    size_t nbStripesSoFar;
1713*01826a49SYabin Cui        /*!< Number or stripes processed. */
1714*01826a49SYabin Cui    XXH64_hash_t totalLen;
1715*01826a49SYabin Cui        /*!< Total length hashed. 64-bit even on 32-bit targets. */
1716*01826a49SYabin Cui    size_t nbStripesPerBlock;
1717*01826a49SYabin Cui        /*!< Number of stripes per block. */
1718*01826a49SYabin Cui    size_t secretLimit;
1719*01826a49SYabin Cui        /*!< Size of @ref customSecret or @ref extSecret */
1720*01826a49SYabin Cui    XXH64_hash_t seed;
1721*01826a49SYabin Cui        /*!< Seed for _withSeed variants. Must be zero otherwise, @see XXH3_INITSTATE() */
1722*01826a49SYabin Cui    XXH64_hash_t reserved64;
1723*01826a49SYabin Cui        /*!< Reserved field. */
1724*01826a49SYabin Cui    const unsigned char* extSecret;
1725*01826a49SYabin Cui        /*!< Reference to an external secret for the _withSecret variants, NULL
1726*01826a49SYabin Cui         *   for other variants. */
1727*01826a49SYabin Cui    /* note: there may be some padding at the end due to alignment on 64 bytes */
1728*01826a49SYabin Cui }; /* typedef'd to XXH3_state_t */
1729*01826a49SYabin Cui 
1730*01826a49SYabin Cui #undef XXH_ALIGN_MEMBER
1731*01826a49SYabin Cui 
1732*01826a49SYabin Cui /*!
1733*01826a49SYabin Cui  * @brief Initializes a stack-allocated `XXH3_state_s`.
1734*01826a49SYabin Cui  *
1735*01826a49SYabin Cui  * When the @ref XXH3_state_t structure is merely emplaced on stack,
1736*01826a49SYabin Cui  * it should be initialized with XXH3_INITSTATE() or a memset()
1737*01826a49SYabin Cui  * in case its first reset uses XXH3_NNbits_reset_withSeed().
1738*01826a49SYabin Cui  * This init can be omitted if the first reset uses default or _withSecret mode.
1739*01826a49SYabin Cui  * This operation isn't necessary when the state is created with XXH3_createState().
1740*01826a49SYabin Cui  * Note that this doesn't prepare the state for a streaming operation,
1741*01826a49SYabin Cui  * it's still necessary to use XXH3_NNbits_reset*() afterwards.
1742*01826a49SYabin Cui  */
1743*01826a49SYabin Cui #define XXH3_INITSTATE(XXH3_state_ptr)                       \
1744*01826a49SYabin Cui     do {                                                     \
1745*01826a49SYabin Cui         XXH3_state_t* tmp_xxh3_state_ptr = (XXH3_state_ptr); \
1746*01826a49SYabin Cui         tmp_xxh3_state_ptr->seed = 0;                        \
1747*01826a49SYabin Cui         tmp_xxh3_state_ptr->extSecret = NULL;                \
1748*01826a49SYabin Cui     } while(0)
1749*01826a49SYabin Cui 
1750*01826a49SYabin Cui 
1751*01826a49SYabin Cui /*!
1752*01826a49SYabin Cui  * @brief Calculates the 128-bit hash of @p data using XXH3.
1753*01826a49SYabin Cui  *
1754*01826a49SYabin Cui  * @param data The block of data to be hashed, at least @p len bytes in size.
1755*01826a49SYabin Cui  * @param len  The length of @p data, in bytes.
1756*01826a49SYabin Cui  * @param seed The 64-bit seed to alter the hash's output predictably.
1757*01826a49SYabin Cui  *
1758*01826a49SYabin Cui  * @pre
1759*01826a49SYabin Cui  *   The memory between @p data and @p data + @p len must be valid,
1760*01826a49SYabin Cui  *   readable, contiguous memory. However, if @p len is `0`, @p data may be
1761*01826a49SYabin Cui  *   `NULL`. In C++, this also must be *TriviallyCopyable*.
1762*01826a49SYabin Cui  *
1763*01826a49SYabin Cui  * @return The calculated 128-bit XXH3 value.
1764*01826a49SYabin Cui  *
1765*01826a49SYabin Cui  * @see @ref single_shot_example "Single Shot Example" for an example.
1766*01826a49SYabin Cui  */
1767*01826a49SYabin Cui XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed);
1768*01826a49SYabin Cui 
1769*01826a49SYabin Cui 
1770*01826a49SYabin Cui /* ===   Experimental API   === */
1771*01826a49SYabin Cui /* Symbols defined below must be considered tied to a specific library version. */
1772*01826a49SYabin Cui 
1773*01826a49SYabin Cui /*!
1774*01826a49SYabin Cui  * @brief Derive a high-entropy secret from any user-defined content, named customSeed.
1775*01826a49SYabin Cui  *
1776*01826a49SYabin Cui  * @param secretBuffer    A writable buffer for derived high-entropy secret data.
1777*01826a49SYabin Cui  * @param secretSize      Size of secretBuffer, in bytes.  Must be >= XXH3_SECRET_DEFAULT_SIZE.
1778*01826a49SYabin Cui  * @param customSeed      A user-defined content.
1779*01826a49SYabin Cui  * @param customSeedSize  Size of customSeed, in bytes.
1780*01826a49SYabin Cui  *
1781*01826a49SYabin Cui  * @return @ref XXH_OK on success.
1782*01826a49SYabin Cui  * @return @ref XXH_ERROR on failure.
1783*01826a49SYabin Cui  *
1784*01826a49SYabin Cui  * The generated secret can be used in combination with `*_withSecret()` functions.
1785*01826a49SYabin Cui  * The `_withSecret()` variants are useful to provide a higher level of protection
1786*01826a49SYabin Cui  * than 64-bit seed, as it becomes much more difficult for an external actor to
1787*01826a49SYabin Cui  * guess how to impact the calculation logic.
1788*01826a49SYabin Cui  *
1789*01826a49SYabin Cui  * The function accepts as input a custom seed of any length and any content,
1790*01826a49SYabin Cui  * and derives from it a high-entropy secret of length @p secretSize into an
1791*01826a49SYabin Cui  * already allocated buffer @p secretBuffer.
1792*01826a49SYabin Cui  *
1793*01826a49SYabin Cui  * The generated secret can then be used with any `*_withSecret()` variant.
1794*01826a49SYabin Cui  * The functions @ref XXH3_128bits_withSecret(), @ref XXH3_64bits_withSecret(),
1795*01826a49SYabin Cui  * @ref XXH3_128bits_reset_withSecret() and @ref XXH3_64bits_reset_withSecret()
1796*01826a49SYabin Cui  * are part of this list. They all accept a `secret` parameter
1797*01826a49SYabin Cui  * which must be large enough for implementation reasons (>= @ref XXH3_SECRET_SIZE_MIN)
1798*01826a49SYabin Cui  * _and_ feature very high entropy (consist of random-looking bytes).
1799*01826a49SYabin Cui  * These conditions can be a high bar to meet, so @ref XXH3_generateSecret() can
1800*01826a49SYabin Cui  * be employed to ensure proper quality.
1801*01826a49SYabin Cui  *
1802*01826a49SYabin Cui  * @p customSeed can be anything. It can have any size, even small ones,
1803*01826a49SYabin Cui  * and its content can be anything, even "poor entropy" sources such as a bunch
1804*01826a49SYabin Cui  * of zeroes. The resulting `secret` will nonetheless provide all required qualities.
1805*01826a49SYabin Cui  *
1806*01826a49SYabin Cui  * @pre
1807*01826a49SYabin Cui  *   - @p secretSize must be >= @ref XXH3_SECRET_SIZE_MIN
1808*01826a49SYabin Cui  *   - When @p customSeedSize > 0, supplying NULL as customSeed is undefined behavior.
1809*01826a49SYabin Cui  *
1810*01826a49SYabin Cui  * Example code:
1811*01826a49SYabin Cui  * @code{.c}
1812*01826a49SYabin Cui  *    #include <stdio.h>
1813*01826a49SYabin Cui  *    #include <stdlib.h>
1814*01826a49SYabin Cui  *    #include <string.h>
1815*01826a49SYabin Cui  *    #define XXH_STATIC_LINKING_ONLY // expose unstable API
1816*01826a49SYabin Cui  *    #include "xxhash.h"
1817*01826a49SYabin Cui  *    // Hashes argv[2] using the entropy from argv[1].
1818*01826a49SYabin Cui  *    int main(int argc, char* argv[])
1819*01826a49SYabin Cui  *    {
1820*01826a49SYabin Cui  *        char secret[XXH3_SECRET_SIZE_MIN];
1821*01826a49SYabin Cui  *        if (argv != 3) { return 1; }
1822*01826a49SYabin Cui  *        XXH3_generateSecret(secret, sizeof(secret), argv[1], strlen(argv[1]));
1823*01826a49SYabin Cui  *        XXH64_hash_t h = XXH3_64bits_withSecret(
1824*01826a49SYabin Cui  *             argv[2], strlen(argv[2]),
1825*01826a49SYabin Cui  *             secret, sizeof(secret)
1826*01826a49SYabin Cui  *        );
1827*01826a49SYabin Cui  *        printf("%016llx\n", (unsigned long long) h);
1828*01826a49SYabin Cui  *    }
1829*01826a49SYabin Cui  * @endcode
1830*01826a49SYabin Cui  */
1831*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize);
1832*01826a49SYabin Cui 
1833*01826a49SYabin Cui /*!
1834*01826a49SYabin Cui  * @brief Generate the same secret as the _withSeed() variants.
1835*01826a49SYabin Cui  *
1836*01826a49SYabin Cui  * @param secretBuffer A writable buffer of @ref XXH3_SECRET_SIZE_MIN bytes
1837*01826a49SYabin Cui  * @param seed         The 64-bit seed to alter the hash result predictably.
1838*01826a49SYabin Cui  *
1839*01826a49SYabin Cui  * The generated secret can be used in combination with
1840*01826a49SYabin Cui  *`*_withSecret()` and `_withSecretandSeed()` variants.
1841*01826a49SYabin Cui  *
1842*01826a49SYabin Cui  * Example C++ `std::string` hash class:
1843*01826a49SYabin Cui  * @code{.cpp}
1844*01826a49SYabin Cui  *    #include <string>
1845*01826a49SYabin Cui  *    #define XXH_STATIC_LINKING_ONLY // expose unstable API
1846*01826a49SYabin Cui  *    #include "xxhash.h"
1847*01826a49SYabin Cui  *    // Slow, seeds each time
1848*01826a49SYabin Cui  *    class HashSlow {
1849*01826a49SYabin Cui  *        XXH64_hash_t seed;
1850*01826a49SYabin Cui  *    public:
1851*01826a49SYabin Cui  *        HashSlow(XXH64_hash_t s) : seed{s} {}
1852*01826a49SYabin Cui  *        size_t operator()(const std::string& x) const {
1853*01826a49SYabin Cui  *            return size_t{XXH3_64bits_withSeed(x.c_str(), x.length(), seed)};
1854*01826a49SYabin Cui  *        }
1855*01826a49SYabin Cui  *    };
1856*01826a49SYabin Cui  *    // Fast, caches the seeded secret for future uses.
1857*01826a49SYabin Cui  *    class HashFast {
1858*01826a49SYabin Cui  *        unsigned char secret[XXH3_SECRET_SIZE_MIN];
1859*01826a49SYabin Cui  *    public:
1860*01826a49SYabin Cui  *        HashFast(XXH64_hash_t s) {
1861*01826a49SYabin Cui  *            XXH3_generateSecret_fromSeed(secret, seed);
1862*01826a49SYabin Cui  *        }
1863*01826a49SYabin Cui  *        size_t operator()(const std::string& x) const {
1864*01826a49SYabin Cui  *            return size_t{
1865*01826a49SYabin Cui  *                XXH3_64bits_withSecret(x.c_str(), x.length(), secret, sizeof(secret))
1866*01826a49SYabin Cui  *            };
1867*01826a49SYabin Cui  *        }
1868*01826a49SYabin Cui  *    };
1869*01826a49SYabin Cui  * @endcode
1870*01826a49SYabin Cui  */
1871*01826a49SYabin Cui XXH_PUBLIC_API void XXH3_generateSecret_fromSeed(XXH_NOESCAPE void* secretBuffer, XXH64_hash_t seed);
1872*01826a49SYabin Cui 
1873*01826a49SYabin Cui /*!
1874*01826a49SYabin Cui  * @brief Calculates 64/128-bit seeded variant of XXH3 hash of @p data.
1875*01826a49SYabin Cui  *
1876*01826a49SYabin Cui  * @param data       The block of data to be hashed, at least @p len bytes in size.
1877*01826a49SYabin Cui  * @param len        The length of @p data, in bytes.
1878*01826a49SYabin Cui  * @param secret     The secret data.
1879*01826a49SYabin Cui  * @param secretSize The length of @p secret, in bytes.
1880*01826a49SYabin Cui  * @param seed       The 64-bit seed to alter the hash result predictably.
1881*01826a49SYabin Cui  *
1882*01826a49SYabin Cui  * These variants generate hash values using either
1883*01826a49SYabin Cui  * @p seed for "short" keys (< @ref XXH3_MIDSIZE_MAX = 240 bytes)
1884*01826a49SYabin Cui  * or @p secret for "large" keys (>= @ref XXH3_MIDSIZE_MAX).
1885*01826a49SYabin Cui  *
1886*01826a49SYabin Cui  * This generally benefits speed, compared to `_withSeed()` or `_withSecret()`.
1887*01826a49SYabin Cui  * `_withSeed()` has to generate the secret on the fly for "large" keys.
1888*01826a49SYabin Cui  * It's fast, but can be perceptible for "not so large" keys (< 1 KB).
1889*01826a49SYabin Cui  * `_withSecret()` has to generate the masks on the fly for "small" keys,
1890*01826a49SYabin Cui  * which requires more instructions than _withSeed() variants.
1891*01826a49SYabin Cui  * Therefore, _withSecretandSeed variant combines the best of both worlds.
1892*01826a49SYabin Cui  *
1893*01826a49SYabin Cui  * When @p secret has been generated by XXH3_generateSecret_fromSeed(),
1894*01826a49SYabin Cui  * this variant produces *exactly* the same results as `_withSeed()` variant,
1895*01826a49SYabin Cui  * hence offering only a pure speed benefit on "large" input,
1896*01826a49SYabin Cui  * by skipping the need to regenerate the secret for every large input.
1897*01826a49SYabin Cui  *
1898*01826a49SYabin Cui  * Another usage scenario is to hash the secret to a 64-bit hash value,
1899*01826a49SYabin Cui  * for example with XXH3_64bits(), which then becomes the seed,
1900*01826a49SYabin Cui  * and then employ both the seed and the secret in _withSecretandSeed().
1901*01826a49SYabin Cui  * On top of speed, an added benefit is that each bit in the secret
1902*01826a49SYabin Cui  * has a 50% chance to swap each bit in the output, via its impact to the seed.
1903*01826a49SYabin Cui  *
1904*01826a49SYabin Cui  * This is not guaranteed when using the secret directly in "small data" scenarios,
1905*01826a49SYabin Cui  * because only portions of the secret are employed for small data.
1906*01826a49SYabin Cui  */
1907*01826a49SYabin Cui XXH_PUBLIC_API XXH_PUREF XXH64_hash_t
1908*01826a49SYabin Cui XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* data, size_t len,
1909*01826a49SYabin Cui                               XXH_NOESCAPE const void* secret, size_t secretSize,
1910*01826a49SYabin Cui                               XXH64_hash_t seed);
1911*01826a49SYabin Cui /*!
1912*01826a49SYabin Cui  * @brief Calculates 128-bit seeded variant of XXH3 hash of @p data.
1913*01826a49SYabin Cui  *
1914*01826a49SYabin Cui  * @param input      The block of data to be hashed, at least @p len bytes in size.
1915*01826a49SYabin Cui  * @param length     The length of @p data, in bytes.
1916*01826a49SYabin Cui  * @param secret     The secret data.
1917*01826a49SYabin Cui  * @param secretSize The length of @p secret, in bytes.
1918*01826a49SYabin Cui  * @param seed64     The 64-bit seed to alter the hash result predictably.
1919*01826a49SYabin Cui  *
1920*01826a49SYabin Cui  * @return @ref XXH_OK on success.
1921*01826a49SYabin Cui  * @return @ref XXH_ERROR on failure.
1922*01826a49SYabin Cui  *
1923*01826a49SYabin Cui  * @see XXH3_64bits_withSecretandSeed()
1924*01826a49SYabin Cui  */
1925*01826a49SYabin Cui XXH_PUBLIC_API XXH_PUREF XXH128_hash_t
1926*01826a49SYabin Cui XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length,
1927*01826a49SYabin Cui                                XXH_NOESCAPE const void* secret, size_t secretSize,
1928*01826a49SYabin Cui                                XXH64_hash_t seed64);
1929*01826a49SYabin Cui #ifndef XXH_NO_STREAM
1930*01826a49SYabin Cui /*!
1931*01826a49SYabin Cui  * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash.
1932*01826a49SYabin Cui  *
1933*01826a49SYabin Cui  * @param statePtr   A pointer to an @ref XXH3_state_t allocated with @ref XXH3_createState().
1934*01826a49SYabin Cui  * @param secret     The secret data.
1935*01826a49SYabin Cui  * @param secretSize The length of @p secret, in bytes.
1936*01826a49SYabin Cui  * @param seed64     The 64-bit seed to alter the hash result predictably.
1937*01826a49SYabin Cui  *
1938*01826a49SYabin Cui  * @return @ref XXH_OK on success.
1939*01826a49SYabin Cui  * @return @ref XXH_ERROR on failure.
1940*01826a49SYabin Cui  *
1941*01826a49SYabin Cui  * @see XXH3_64bits_withSecretandSeed()
1942*01826a49SYabin Cui  */
1943*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode
1944*01826a49SYabin Cui XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr,
1945*01826a49SYabin Cui                                     XXH_NOESCAPE const void* secret, size_t secretSize,
1946*01826a49SYabin Cui                                     XXH64_hash_t seed64);
1947*01826a49SYabin Cui /*!
1948*01826a49SYabin Cui  * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash.
1949*01826a49SYabin Cui  *
1950*01826a49SYabin Cui  * @param statePtr   A pointer to an @ref XXH3_state_t allocated with @ref XXH3_createState().
1951*01826a49SYabin Cui  * @param secret     The secret data.
1952*01826a49SYabin Cui  * @param secretSize The length of @p secret, in bytes.
1953*01826a49SYabin Cui  * @param seed64     The 64-bit seed to alter the hash result predictably.
1954*01826a49SYabin Cui  *
1955*01826a49SYabin Cui  * @return @ref XXH_OK on success.
1956*01826a49SYabin Cui  * @return @ref XXH_ERROR on failure.
1957*01826a49SYabin Cui  *
1958*01826a49SYabin Cui  * @see XXH3_64bits_withSecretandSeed()
1959*01826a49SYabin Cui  */
1960*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode
1961*01826a49SYabin Cui XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr,
1962*01826a49SYabin Cui                                      XXH_NOESCAPE const void* secret, size_t secretSize,
1963*01826a49SYabin Cui                                      XXH64_hash_t seed64);
1964*01826a49SYabin Cui #endif /* !XXH_NO_STREAM */
1965*01826a49SYabin Cui 
1966*01826a49SYabin Cui #endif  /* !XXH_NO_XXH3 */
1967*01826a49SYabin Cui #endif  /* XXH_NO_LONG_LONG */
1968*01826a49SYabin Cui #if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
1969*01826a49SYabin Cui #  define XXH_IMPLEMENTATION
1970*01826a49SYabin Cui #endif
1971*01826a49SYabin Cui 
1972*01826a49SYabin Cui #endif  /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */
1973*01826a49SYabin Cui 
1974*01826a49SYabin Cui 
1975*01826a49SYabin Cui /* ======================================================================== */
1976*01826a49SYabin Cui /* ======================================================================== */
1977*01826a49SYabin Cui /* ======================================================================== */
1978*01826a49SYabin Cui 
1979*01826a49SYabin Cui 
1980*01826a49SYabin Cui /*-**********************************************************************
1981*01826a49SYabin Cui  * xxHash implementation
1982*01826a49SYabin Cui  *-**********************************************************************
1983*01826a49SYabin Cui  * xxHash's implementation used to be hosted inside xxhash.c.
1984*01826a49SYabin Cui  *
1985*01826a49SYabin Cui  * However, inlining requires implementation to be visible to the compiler,
1986*01826a49SYabin Cui  * hence be included alongside the header.
1987*01826a49SYabin Cui  * Previously, implementation was hosted inside xxhash.c,
1988*01826a49SYabin Cui  * which was then #included when inlining was activated.
1989*01826a49SYabin Cui  * This construction created issues with a few build and install systems,
1990*01826a49SYabin Cui  * as it required xxhash.c to be stored in /include directory.
1991*01826a49SYabin Cui  *
1992*01826a49SYabin Cui  * xxHash implementation is now directly integrated within xxhash.h.
1993*01826a49SYabin Cui  * As a consequence, xxhash.c is no longer needed in /include.
1994*01826a49SYabin Cui  *
1995*01826a49SYabin Cui  * xxhash.c is still available and is still useful.
1996*01826a49SYabin Cui  * In a "normal" setup, when xxhash is not inlined,
1997*01826a49SYabin Cui  * xxhash.h only exposes the prototypes and public symbols,
1998*01826a49SYabin Cui  * while xxhash.c can be built into an object file xxhash.o
1999*01826a49SYabin Cui  * which can then be linked into the final binary.
2000*01826a49SYabin Cui  ************************************************************************/
2001*01826a49SYabin Cui 
2002*01826a49SYabin Cui #if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \
2003*01826a49SYabin Cui    || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387)
2004*01826a49SYabin Cui #  define XXH_IMPLEM_13a8737387
2005*01826a49SYabin Cui 
2006*01826a49SYabin Cui /* *************************************
2007*01826a49SYabin Cui *  Tuning parameters
2008*01826a49SYabin Cui ***************************************/
2009*01826a49SYabin Cui 
2010*01826a49SYabin Cui /*!
2011*01826a49SYabin Cui  * @defgroup tuning Tuning parameters
2012*01826a49SYabin Cui  * @{
2013*01826a49SYabin Cui  *
2014*01826a49SYabin Cui  * Various macros to control xxHash's behavior.
2015*01826a49SYabin Cui  */
2016*01826a49SYabin Cui #ifdef XXH_DOXYGEN
2017*01826a49SYabin Cui /*!
2018*01826a49SYabin Cui  * @brief Define this to disable 64-bit code.
2019*01826a49SYabin Cui  *
2020*01826a49SYabin Cui  * Useful if only using the @ref XXH32_family and you have a strict C90 compiler.
2021*01826a49SYabin Cui  */
2022*01826a49SYabin Cui #  define XXH_NO_LONG_LONG
2023*01826a49SYabin Cui #  undef XXH_NO_LONG_LONG /* don't actually */
2024*01826a49SYabin Cui /*!
2025*01826a49SYabin Cui  * @brief Controls how unaligned memory is accessed.
2026*01826a49SYabin Cui  *
2027*01826a49SYabin Cui  * By default, access to unaligned memory is controlled by `memcpy()`, which is
2028*01826a49SYabin Cui  * safe and portable.
2029*01826a49SYabin Cui  *
2030*01826a49SYabin Cui  * Unfortunately, on some target/compiler combinations, the generated assembly
2031*01826a49SYabin Cui  * is sub-optimal.
2032*01826a49SYabin Cui  *
2033*01826a49SYabin Cui  * The below switch allow selection of a different access method
2034*01826a49SYabin Cui  * in the search for improved performance.
2035*01826a49SYabin Cui  *
2036*01826a49SYabin Cui  * @par Possible options:
2037*01826a49SYabin Cui  *
2038*01826a49SYabin Cui  *  - `XXH_FORCE_MEMORY_ACCESS=0` (default): `memcpy`
2039*01826a49SYabin Cui  *   @par
2040*01826a49SYabin Cui  *     Use `memcpy()`. Safe and portable. Note that most modern compilers will
2041*01826a49SYabin Cui  *     eliminate the function call and treat it as an unaligned access.
2042*01826a49SYabin Cui  *
2043*01826a49SYabin Cui  *  - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((aligned(1)))`
2044*01826a49SYabin Cui  *   @par
2045*01826a49SYabin Cui  *     Depends on compiler extensions and is therefore not portable.
2046*01826a49SYabin Cui  *     This method is safe _if_ your compiler supports it,
2047*01826a49SYabin Cui  *     and *generally* as fast or faster than `memcpy`.
2048*01826a49SYabin Cui  *
2049*01826a49SYabin Cui  *  - `XXH_FORCE_MEMORY_ACCESS=2`: Direct cast
2050*01826a49SYabin Cui  *  @par
2051*01826a49SYabin Cui  *     Casts directly and dereferences. This method doesn't depend on the
2052*01826a49SYabin Cui  *     compiler, but it violates the C standard as it directly dereferences an
2053*01826a49SYabin Cui  *     unaligned pointer. It can generate buggy code on targets which do not
2054*01826a49SYabin Cui  *     support unaligned memory accesses, but in some circumstances, it's the
2055*01826a49SYabin Cui  *     only known way to get the most performance.
2056*01826a49SYabin Cui  *
2057*01826a49SYabin Cui  *  - `XXH_FORCE_MEMORY_ACCESS=3`: Byteshift
2058*01826a49SYabin Cui  *  @par
2059*01826a49SYabin Cui  *     Also portable. This can generate the best code on old compilers which don't
2060*01826a49SYabin Cui  *     inline small `memcpy()` calls, and it might also be faster on big-endian
2061*01826a49SYabin Cui  *     systems which lack a native byteswap instruction. However, some compilers
2062*01826a49SYabin Cui  *     will emit literal byteshifts even if the target supports unaligned access.
2063*01826a49SYabin Cui  *
2064*01826a49SYabin Cui  *
2065*01826a49SYabin Cui  * @warning
2066*01826a49SYabin Cui  *   Methods 1 and 2 rely on implementation-defined behavior. Use these with
2067*01826a49SYabin Cui  *   care, as what works on one compiler/platform/optimization level may cause
2068*01826a49SYabin Cui  *   another to read garbage data or even crash.
2069*01826a49SYabin Cui  *
2070*01826a49SYabin Cui  * See https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html for details.
2071*01826a49SYabin Cui  *
2072*01826a49SYabin Cui  * Prefer these methods in priority order (0 > 3 > 1 > 2)
2073*01826a49SYabin Cui  */
2074*01826a49SYabin Cui #  define XXH_FORCE_MEMORY_ACCESS 0
2075*01826a49SYabin Cui 
2076*01826a49SYabin Cui /*!
2077*01826a49SYabin Cui  * @def XXH_SIZE_OPT
2078*01826a49SYabin Cui  * @brief Controls how much xxHash optimizes for size.
2079*01826a49SYabin Cui  *
2080*01826a49SYabin Cui  * xxHash, when compiled, tends to result in a rather large binary size. This
2081*01826a49SYabin Cui  * is mostly due to heavy usage to forced inlining and constant folding of the
2082*01826a49SYabin Cui  * @ref XXH3_family to increase performance.
2083*01826a49SYabin Cui  *
2084*01826a49SYabin Cui  * However, some developers prefer size over speed. This option can
2085*01826a49SYabin Cui  * significantly reduce the size of the generated code. When using the `-Os`
2086*01826a49SYabin Cui  * or `-Oz` options on GCC or Clang, this is defined to 1 by default,
2087*01826a49SYabin Cui  * otherwise it is defined to 0.
2088*01826a49SYabin Cui  *
2089*01826a49SYabin Cui  * Most of these size optimizations can be controlled manually.
2090*01826a49SYabin Cui  *
2091*01826a49SYabin Cui  * This is a number from 0-2.
2092*01826a49SYabin Cui  *  - `XXH_SIZE_OPT` == 0: Default. xxHash makes no size optimizations. Speed
2093*01826a49SYabin Cui  *    comes first.
2094*01826a49SYabin Cui  *  - `XXH_SIZE_OPT` == 1: Default for `-Os` and `-Oz`. xxHash is more
2095*01826a49SYabin Cui  *    conservative and disables hacks that increase code size. It implies the
2096*01826a49SYabin Cui  *    options @ref XXH_NO_INLINE_HINTS == 1, @ref XXH_FORCE_ALIGN_CHECK == 0,
2097*01826a49SYabin Cui  *    and @ref XXH3_NEON_LANES == 8 if they are not already defined.
2098*01826a49SYabin Cui  *  - `XXH_SIZE_OPT` == 2: xxHash tries to make itself as small as possible.
2099*01826a49SYabin Cui  *    Performance may cry. For example, the single shot functions just use the
2100*01826a49SYabin Cui  *    streaming API.
2101*01826a49SYabin Cui  */
2102*01826a49SYabin Cui #  define XXH_SIZE_OPT 0
2103*01826a49SYabin Cui 
2104*01826a49SYabin Cui /*!
2105*01826a49SYabin Cui  * @def XXH_FORCE_ALIGN_CHECK
2106*01826a49SYabin Cui  * @brief If defined to non-zero, adds a special path for aligned inputs (XXH32()
2107*01826a49SYabin Cui  * and XXH64() only).
2108*01826a49SYabin Cui  *
2109*01826a49SYabin Cui  * This is an important performance trick for architectures without decent
2110*01826a49SYabin Cui  * unaligned memory access performance.
2111*01826a49SYabin Cui  *
2112*01826a49SYabin Cui  * It checks for input alignment, and when conditions are met, uses a "fast
2113*01826a49SYabin Cui  * path" employing direct 32-bit/64-bit reads, resulting in _dramatically
2114*01826a49SYabin Cui  * faster_ read speed.
2115*01826a49SYabin Cui  *
2116*01826a49SYabin Cui  * The check costs one initial branch per hash, which is generally negligible,
2117*01826a49SYabin Cui  * but not zero.
2118*01826a49SYabin Cui  *
2119*01826a49SYabin Cui  * Moreover, it's not useful to generate an additional code path if memory
2120*01826a49SYabin Cui  * access uses the same instruction for both aligned and unaligned
2121*01826a49SYabin Cui  * addresses (e.g. x86 and aarch64).
2122*01826a49SYabin Cui  *
2123*01826a49SYabin Cui  * In these cases, the alignment check can be removed by setting this macro to 0.
2124*01826a49SYabin Cui  * Then the code will always use unaligned memory access.
2125*01826a49SYabin Cui  * Align check is automatically disabled on x86, x64, ARM64, and some ARM chips
2126*01826a49SYabin Cui  * which are platforms known to offer good unaligned memory accesses performance.
2127*01826a49SYabin Cui  *
2128*01826a49SYabin Cui  * It is also disabled by default when @ref XXH_SIZE_OPT >= 1.
2129*01826a49SYabin Cui  *
2130*01826a49SYabin Cui  * This option does not affect XXH3 (only XXH32 and XXH64).
2131*01826a49SYabin Cui  */
2132*01826a49SYabin Cui #  define XXH_FORCE_ALIGN_CHECK 0
2133*01826a49SYabin Cui 
2134*01826a49SYabin Cui /*!
2135*01826a49SYabin Cui  * @def XXH_NO_INLINE_HINTS
2136*01826a49SYabin Cui  * @brief When non-zero, sets all functions to `static`.
2137*01826a49SYabin Cui  *
2138*01826a49SYabin Cui  * By default, xxHash tries to force the compiler to inline almost all internal
2139*01826a49SYabin Cui  * functions.
2140*01826a49SYabin Cui  *
2141*01826a49SYabin Cui  * This can usually improve performance due to reduced jumping and improved
2142*01826a49SYabin Cui  * constant folding, but significantly increases the size of the binary which
2143*01826a49SYabin Cui  * might not be favorable.
2144*01826a49SYabin Cui  *
2145*01826a49SYabin Cui  * Additionally, sometimes the forced inlining can be detrimental to performance,
2146*01826a49SYabin Cui  * depending on the architecture.
2147*01826a49SYabin Cui  *
2148*01826a49SYabin Cui  * XXH_NO_INLINE_HINTS marks all internal functions as static, giving the
2149*01826a49SYabin Cui  * compiler full control on whether to inline or not.
2150*01826a49SYabin Cui  *
2151*01826a49SYabin Cui  * When not optimizing (-O0), using `-fno-inline` with GCC or Clang, or if
2152*01826a49SYabin Cui  * @ref XXH_SIZE_OPT >= 1, this will automatically be defined.
2153*01826a49SYabin Cui  */
2154*01826a49SYabin Cui #  define XXH_NO_INLINE_HINTS 0
2155*01826a49SYabin Cui 
2156*01826a49SYabin Cui /*!
2157*01826a49SYabin Cui  * @def XXH3_INLINE_SECRET
2158*01826a49SYabin Cui  * @brief Determines whether to inline the XXH3 withSecret code.
2159*01826a49SYabin Cui  *
2160*01826a49SYabin Cui  * When the secret size is known, the compiler can improve the performance
2161*01826a49SYabin Cui  * of XXH3_64bits_withSecret() and XXH3_128bits_withSecret().
2162*01826a49SYabin Cui  *
2163*01826a49SYabin Cui  * However, if the secret size is not known, it doesn't have any benefit. This
2164*01826a49SYabin Cui  * happens when xxHash is compiled into a global symbol. Therefore, if
2165*01826a49SYabin Cui  * @ref XXH_INLINE_ALL is *not* defined, this will be defined to 0.
2166*01826a49SYabin Cui  *
2167*01826a49SYabin Cui  * Additionally, this defaults to 0 on GCC 12+, which has an issue with function pointers
2168*01826a49SYabin Cui  * that are *sometimes* force inline on -Og, and it is impossible to automatically
2169*01826a49SYabin Cui  * detect this optimization level.
2170*01826a49SYabin Cui  */
2171*01826a49SYabin Cui #  define XXH3_INLINE_SECRET 0
2172*01826a49SYabin Cui 
2173*01826a49SYabin Cui /*!
2174*01826a49SYabin Cui  * @def XXH32_ENDJMP
2175*01826a49SYabin Cui  * @brief Whether to use a jump for `XXH32_finalize`.
2176*01826a49SYabin Cui  *
2177*01826a49SYabin Cui  * For performance, `XXH32_finalize` uses multiple branches in the finalizer.
2178*01826a49SYabin Cui  * This is generally preferable for performance,
2179*01826a49SYabin Cui  * but depending on exact architecture, a jmp may be preferable.
2180*01826a49SYabin Cui  *
2181*01826a49SYabin Cui  * This setting is only possibly making a difference for very small inputs.
2182*01826a49SYabin Cui  */
2183*01826a49SYabin Cui #  define XXH32_ENDJMP 0
2184*01826a49SYabin Cui 
2185*01826a49SYabin Cui /*!
2186*01826a49SYabin Cui  * @internal
2187*01826a49SYabin Cui  * @brief Redefines old internal names.
2188*01826a49SYabin Cui  *
2189*01826a49SYabin Cui  * For compatibility with code that uses xxHash's internals before the names
2190*01826a49SYabin Cui  * were changed to improve namespacing. There is no other reason to use this.
2191*01826a49SYabin Cui  */
2192*01826a49SYabin Cui #  define XXH_OLD_NAMES
2193*01826a49SYabin Cui #  undef XXH_OLD_NAMES /* don't actually use, it is ugly. */
2194*01826a49SYabin Cui 
2195*01826a49SYabin Cui /*!
2196*01826a49SYabin Cui  * @def XXH_NO_STREAM
2197*01826a49SYabin Cui  * @brief Disables the streaming API.
2198*01826a49SYabin Cui  *
2199*01826a49SYabin Cui  * When xxHash is not inlined and the streaming functions are not used, disabling
2200*01826a49SYabin Cui  * the streaming functions can improve code size significantly, especially with
2201*01826a49SYabin Cui  * the @ref XXH3_family which tends to make constant folded copies of itself.
2202*01826a49SYabin Cui  */
2203*01826a49SYabin Cui #  define XXH_NO_STREAM
2204*01826a49SYabin Cui #  undef XXH_NO_STREAM /* don't actually */
2205*01826a49SYabin Cui #endif /* XXH_DOXYGEN */
2206*01826a49SYabin Cui /*!
2207*01826a49SYabin Cui  * @}
2208*01826a49SYabin Cui  */
2209*01826a49SYabin Cui 
2210*01826a49SYabin Cui #ifndef XXH_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
2211*01826a49SYabin Cui    /* prefer __packed__ structures (method 1) for GCC
2212*01826a49SYabin Cui     * < ARMv7 with unaligned access (e.g. Raspbian armhf) still uses byte shifting, so we use memcpy
2213*01826a49SYabin Cui     * which for some reason does unaligned loads. */
2214*01826a49SYabin Cui #  if defined(__GNUC__) && !(defined(__ARM_ARCH) && __ARM_ARCH < 7 && defined(__ARM_FEATURE_UNALIGNED))
2215*01826a49SYabin Cui #    define XXH_FORCE_MEMORY_ACCESS 1
2216*01826a49SYabin Cui #  endif
2217*01826a49SYabin Cui #endif
2218*01826a49SYabin Cui 
2219*01826a49SYabin Cui #ifndef XXH_SIZE_OPT
2220*01826a49SYabin Cui    /* default to 1 for -Os or -Oz */
2221*01826a49SYabin Cui #  if (defined(__GNUC__) || defined(__clang__)) && defined(__OPTIMIZE_SIZE__)
2222*01826a49SYabin Cui #    define XXH_SIZE_OPT 1
2223*01826a49SYabin Cui #  else
2224*01826a49SYabin Cui #    define XXH_SIZE_OPT 0
2225*01826a49SYabin Cui #  endif
2226*01826a49SYabin Cui #endif
2227*01826a49SYabin Cui 
2228*01826a49SYabin Cui #ifndef XXH_FORCE_ALIGN_CHECK  /* can be defined externally */
2229*01826a49SYabin Cui    /* don't check on sizeopt, x86, aarch64, or arm when unaligned access is available */
2230*01826a49SYabin Cui #  if XXH_SIZE_OPT >= 1 || \
2231*01826a49SYabin Cui       defined(__i386)  || defined(__x86_64__) || defined(__aarch64__) || defined(__ARM_FEATURE_UNALIGNED) \
2232*01826a49SYabin Cui    || defined(_M_IX86) || defined(_M_X64)     || defined(_M_ARM64)    || defined(_M_ARM) /* visual */
2233*01826a49SYabin Cui #    define XXH_FORCE_ALIGN_CHECK 0
2234*01826a49SYabin Cui #  else
2235*01826a49SYabin Cui #    define XXH_FORCE_ALIGN_CHECK 1
2236*01826a49SYabin Cui #  endif
2237*01826a49SYabin Cui #endif
2238*01826a49SYabin Cui 
2239*01826a49SYabin Cui #ifndef XXH_NO_INLINE_HINTS
2240*01826a49SYabin Cui #  if XXH_SIZE_OPT >= 1 || defined(__NO_INLINE__)  /* -O0, -fno-inline */
2241*01826a49SYabin Cui #    define XXH_NO_INLINE_HINTS 1
2242*01826a49SYabin Cui #  else
2243*01826a49SYabin Cui #    define XXH_NO_INLINE_HINTS 0
2244*01826a49SYabin Cui #  endif
2245*01826a49SYabin Cui #endif
2246*01826a49SYabin Cui 
2247*01826a49SYabin Cui #ifndef XXH3_INLINE_SECRET
2248*01826a49SYabin Cui #  if (defined(__GNUC__) && !defined(__clang__) && __GNUC__ >= 12) \
2249*01826a49SYabin Cui      || !defined(XXH_INLINE_ALL)
2250*01826a49SYabin Cui #    define XXH3_INLINE_SECRET 0
2251*01826a49SYabin Cui #  else
2252*01826a49SYabin Cui #    define XXH3_INLINE_SECRET 1
2253*01826a49SYabin Cui #  endif
2254*01826a49SYabin Cui #endif
2255*01826a49SYabin Cui 
2256*01826a49SYabin Cui #ifndef XXH32_ENDJMP
2257*01826a49SYabin Cui /* generally preferable for performance */
2258*01826a49SYabin Cui #  define XXH32_ENDJMP 0
2259*01826a49SYabin Cui #endif
2260*01826a49SYabin Cui 
2261*01826a49SYabin Cui /*!
2262*01826a49SYabin Cui  * @defgroup impl Implementation
2263*01826a49SYabin Cui  * @{
2264*01826a49SYabin Cui  */
2265*01826a49SYabin Cui 
2266*01826a49SYabin Cui 
2267*01826a49SYabin Cui /* *************************************
2268*01826a49SYabin Cui *  Includes & Memory related functions
2269*01826a49SYabin Cui ***************************************/
2270*01826a49SYabin Cui #if defined(XXH_NO_STREAM)
2271*01826a49SYabin Cui /* nothing */
2272*01826a49SYabin Cui #elif defined(XXH_NO_STDLIB)
2273*01826a49SYabin Cui 
2274*01826a49SYabin Cui /* When requesting to disable any mention of stdlib,
2275*01826a49SYabin Cui  * the library loses the ability to invoked malloc / free.
2276*01826a49SYabin Cui  * In practice, it means that functions like `XXH*_createState()`
2277*01826a49SYabin Cui  * will always fail, and return NULL.
2278*01826a49SYabin Cui  * This flag is useful in situations where
2279*01826a49SYabin Cui  * xxhash.h is integrated into some kernel, embedded or limited environment
2280*01826a49SYabin Cui  * without access to dynamic allocation.
2281*01826a49SYabin Cui  */
2282*01826a49SYabin Cui 
XXH_malloc(size_t s)2283*01826a49SYabin Cui static XXH_CONSTF void* XXH_malloc(size_t s) { (void)s; return NULL; }
XXH_free(void * p)2284*01826a49SYabin Cui static void XXH_free(void* p) { (void)p; }
2285*01826a49SYabin Cui 
2286*01826a49SYabin Cui #else
2287*01826a49SYabin Cui 
2288*01826a49SYabin Cui /*
2289*01826a49SYabin Cui  * Modify the local functions below should you wish to use
2290*01826a49SYabin Cui  * different memory routines for malloc() and free()
2291*01826a49SYabin Cui  */
2292*01826a49SYabin Cui #include <stdlib.h>
2293*01826a49SYabin Cui 
2294*01826a49SYabin Cui /*!
2295*01826a49SYabin Cui  * @internal
2296*01826a49SYabin Cui  * @brief Modify this function to use a different routine than malloc().
2297*01826a49SYabin Cui  */
XXH_malloc(size_t s)2298*01826a49SYabin Cui static XXH_MALLOCF void* XXH_malloc(size_t s) { return malloc(s); }
2299*01826a49SYabin Cui 
2300*01826a49SYabin Cui /*!
2301*01826a49SYabin Cui  * @internal
2302*01826a49SYabin Cui  * @brief Modify this function to use a different routine than free().
2303*01826a49SYabin Cui  */
XXH_free(void * p)2304*01826a49SYabin Cui static void XXH_free(void* p) { free(p); }
2305*01826a49SYabin Cui 
2306*01826a49SYabin Cui #endif  /* XXH_NO_STDLIB */
2307*01826a49SYabin Cui 
2308*01826a49SYabin Cui #include <string.h>
2309*01826a49SYabin Cui 
2310*01826a49SYabin Cui /*!
2311*01826a49SYabin Cui  * @internal
2312*01826a49SYabin Cui  * @brief Modify this function to use a different routine than memcpy().
2313*01826a49SYabin Cui  */
XXH_memcpy(void * dest,const void * src,size_t size)2314*01826a49SYabin Cui static void* XXH_memcpy(void* dest, const void* src, size_t size)
2315*01826a49SYabin Cui {
2316*01826a49SYabin Cui     return memcpy(dest,src,size);
2317*01826a49SYabin Cui }
2318*01826a49SYabin Cui 
2319*01826a49SYabin Cui #include <limits.h>   /* ULLONG_MAX */
2320*01826a49SYabin Cui 
2321*01826a49SYabin Cui 
2322*01826a49SYabin Cui /* *************************************
2323*01826a49SYabin Cui *  Compiler Specific Options
2324*01826a49SYabin Cui ***************************************/
2325*01826a49SYabin Cui #ifdef _MSC_VER /* Visual Studio warning fix */
2326*01826a49SYabin Cui #  pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
2327*01826a49SYabin Cui #endif
2328*01826a49SYabin Cui 
2329*01826a49SYabin Cui #if XXH_NO_INLINE_HINTS  /* disable inlining hints */
2330*01826a49SYabin Cui #  if defined(__GNUC__) || defined(__clang__)
2331*01826a49SYabin Cui #    define XXH_FORCE_INLINE static __attribute__((unused))
2332*01826a49SYabin Cui #  else
2333*01826a49SYabin Cui #    define XXH_FORCE_INLINE static
2334*01826a49SYabin Cui #  endif
2335*01826a49SYabin Cui #  define XXH_NO_INLINE static
2336*01826a49SYabin Cui /* enable inlining hints */
2337*01826a49SYabin Cui #elif defined(__GNUC__) || defined(__clang__)
2338*01826a49SYabin Cui #  define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused))
2339*01826a49SYabin Cui #  define XXH_NO_INLINE static __attribute__((noinline))
2340*01826a49SYabin Cui #elif defined(_MSC_VER)  /* Visual Studio */
2341*01826a49SYabin Cui #  define XXH_FORCE_INLINE static __forceinline
2342*01826a49SYabin Cui #  define XXH_NO_INLINE static __declspec(noinline)
2343*01826a49SYabin Cui #elif defined (__cplusplus) \
2344*01826a49SYabin Cui   || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L))   /* C99 */
2345*01826a49SYabin Cui #  define XXH_FORCE_INLINE static inline
2346*01826a49SYabin Cui #  define XXH_NO_INLINE static
2347*01826a49SYabin Cui #else
2348*01826a49SYabin Cui #  define XXH_FORCE_INLINE static
2349*01826a49SYabin Cui #  define XXH_NO_INLINE static
2350*01826a49SYabin Cui #endif
2351*01826a49SYabin Cui 
2352*01826a49SYabin Cui #if XXH3_INLINE_SECRET
2353*01826a49SYabin Cui #  define XXH3_WITH_SECRET_INLINE XXH_FORCE_INLINE
2354*01826a49SYabin Cui #else
2355*01826a49SYabin Cui #  define XXH3_WITH_SECRET_INLINE XXH_NO_INLINE
2356*01826a49SYabin Cui #endif
2357*01826a49SYabin Cui 
2358*01826a49SYabin Cui 
2359*01826a49SYabin Cui /* *************************************
2360*01826a49SYabin Cui *  Debug
2361*01826a49SYabin Cui ***************************************/
2362*01826a49SYabin Cui /*!
2363*01826a49SYabin Cui  * @ingroup tuning
2364*01826a49SYabin Cui  * @def XXH_DEBUGLEVEL
2365*01826a49SYabin Cui  * @brief Sets the debugging level.
2366*01826a49SYabin Cui  *
2367*01826a49SYabin Cui  * XXH_DEBUGLEVEL is expected to be defined externally, typically via the
2368*01826a49SYabin Cui  * compiler's command line options. The value must be a number.
2369*01826a49SYabin Cui  */
2370*01826a49SYabin Cui #ifndef XXH_DEBUGLEVEL
2371*01826a49SYabin Cui #  ifdef DEBUGLEVEL /* backwards compat */
2372*01826a49SYabin Cui #    define XXH_DEBUGLEVEL DEBUGLEVEL
2373*01826a49SYabin Cui #  else
2374*01826a49SYabin Cui #    define XXH_DEBUGLEVEL 0
2375*01826a49SYabin Cui #  endif
2376*01826a49SYabin Cui #endif
2377*01826a49SYabin Cui 
2378*01826a49SYabin Cui #if (XXH_DEBUGLEVEL>=1)
2379*01826a49SYabin Cui #  include <assert.h>   /* note: can still be disabled with NDEBUG */
2380*01826a49SYabin Cui #  define XXH_ASSERT(c)   assert(c)
2381*01826a49SYabin Cui #else
2382*01826a49SYabin Cui #  if defined(__INTEL_COMPILER)
2383*01826a49SYabin Cui #    define XXH_ASSERT(c)   XXH_ASSUME((unsigned char) (c))
2384*01826a49SYabin Cui #  else
2385*01826a49SYabin Cui #    define XXH_ASSERT(c)   XXH_ASSUME(c)
2386*01826a49SYabin Cui #  endif
2387*01826a49SYabin Cui #endif
2388*01826a49SYabin Cui 
2389*01826a49SYabin Cui /* note: use after variable declarations */
2390*01826a49SYabin Cui #ifndef XXH_STATIC_ASSERT
2391*01826a49SYabin Cui #  if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)    /* C11 */
2392*01826a49SYabin Cui #    define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { _Static_assert((c),m); } while(0)
2393*01826a49SYabin Cui #  elif defined(__cplusplus) && (__cplusplus >= 201103L)            /* C++11 */
2394*01826a49SYabin Cui #    define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0)
2395*01826a49SYabin Cui #  else
2396*01826a49SYabin Cui #    define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { struct xxh_sa { char x[(c) ? 1 : -1]; }; } while(0)
2397*01826a49SYabin Cui #  endif
2398*01826a49SYabin Cui #  define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c),#c)
2399*01826a49SYabin Cui #endif
2400*01826a49SYabin Cui 
2401*01826a49SYabin Cui /*!
2402*01826a49SYabin Cui  * @internal
2403*01826a49SYabin Cui  * @def XXH_COMPILER_GUARD(var)
2404*01826a49SYabin Cui  * @brief Used to prevent unwanted optimizations for @p var.
2405*01826a49SYabin Cui  *
2406*01826a49SYabin Cui  * It uses an empty GCC inline assembly statement with a register constraint
2407*01826a49SYabin Cui  * which forces @p var into a general purpose register (eg eax, ebx, ecx
2408*01826a49SYabin Cui  * on x86) and marks it as modified.
2409*01826a49SYabin Cui  *
2410*01826a49SYabin Cui  * This is used in a few places to avoid unwanted autovectorization (e.g.
2411*01826a49SYabin Cui  * XXH32_round()). All vectorization we want is explicit via intrinsics,
2412*01826a49SYabin Cui  * and _usually_ isn't wanted elsewhere.
2413*01826a49SYabin Cui  *
2414*01826a49SYabin Cui  * We also use it to prevent unwanted constant folding for AArch64 in
2415*01826a49SYabin Cui  * XXH3_initCustomSecret_scalar().
2416*01826a49SYabin Cui  */
2417*01826a49SYabin Cui #if defined(__GNUC__) || defined(__clang__)
2418*01826a49SYabin Cui #  define XXH_COMPILER_GUARD(var) __asm__("" : "+r" (var))
2419*01826a49SYabin Cui #else
2420*01826a49SYabin Cui #  define XXH_COMPILER_GUARD(var) ((void)0)
2421*01826a49SYabin Cui #endif
2422*01826a49SYabin Cui 
2423*01826a49SYabin Cui /* Specifically for NEON vectors which use the "w" constraint, on
2424*01826a49SYabin Cui  * Clang. */
2425*01826a49SYabin Cui #if defined(__clang__) && defined(__ARM_ARCH) && !defined(__wasm__)
2426*01826a49SYabin Cui #  define XXH_COMPILER_GUARD_CLANG_NEON(var) __asm__("" : "+w" (var))
2427*01826a49SYabin Cui #else
2428*01826a49SYabin Cui #  define XXH_COMPILER_GUARD_CLANG_NEON(var) ((void)0)
2429*01826a49SYabin Cui #endif
2430*01826a49SYabin Cui 
2431*01826a49SYabin Cui /* *************************************
2432*01826a49SYabin Cui *  Basic Types
2433*01826a49SYabin Cui ***************************************/
2434*01826a49SYabin Cui #if !defined (__VMS) \
2435*01826a49SYabin Cui  && (defined (__cplusplus) \
2436*01826a49SYabin Cui  || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
2437*01826a49SYabin Cui # ifdef _AIX
2438*01826a49SYabin Cui #   include <inttypes.h>
2439*01826a49SYabin Cui # else
2440*01826a49SYabin Cui #   include <stdint.h>
2441*01826a49SYabin Cui # endif
2442*01826a49SYabin Cui   typedef uint8_t xxh_u8;
2443*01826a49SYabin Cui #else
2444*01826a49SYabin Cui   typedef unsigned char xxh_u8;
2445*01826a49SYabin Cui #endif
2446*01826a49SYabin Cui typedef XXH32_hash_t xxh_u32;
2447*01826a49SYabin Cui 
2448*01826a49SYabin Cui #ifdef XXH_OLD_NAMES
2449*01826a49SYabin Cui #  warning "XXH_OLD_NAMES is planned to be removed starting v0.9. If the program depends on it, consider moving away from it by employing newer type names directly"
2450*01826a49SYabin Cui #  define BYTE xxh_u8
2451*01826a49SYabin Cui #  define U8   xxh_u8
2452*01826a49SYabin Cui #  define U32  xxh_u32
2453*01826a49SYabin Cui #endif
2454*01826a49SYabin Cui 
2455*01826a49SYabin Cui /* ***   Memory access   *** */
2456*01826a49SYabin Cui 
2457*01826a49SYabin Cui /*!
2458*01826a49SYabin Cui  * @internal
2459*01826a49SYabin Cui  * @fn xxh_u32 XXH_read32(const void* ptr)
2460*01826a49SYabin Cui  * @brief Reads an unaligned 32-bit integer from @p ptr in native endianness.
2461*01826a49SYabin Cui  *
2462*01826a49SYabin Cui  * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
2463*01826a49SYabin Cui  *
2464*01826a49SYabin Cui  * @param ptr The pointer to read from.
2465*01826a49SYabin Cui  * @return The 32-bit native endian integer from the bytes at @p ptr.
2466*01826a49SYabin Cui  */
2467*01826a49SYabin Cui 
2468*01826a49SYabin Cui /*!
2469*01826a49SYabin Cui  * @internal
2470*01826a49SYabin Cui  * @fn xxh_u32 XXH_readLE32(const void* ptr)
2471*01826a49SYabin Cui  * @brief Reads an unaligned 32-bit little endian integer from @p ptr.
2472*01826a49SYabin Cui  *
2473*01826a49SYabin Cui  * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
2474*01826a49SYabin Cui  *
2475*01826a49SYabin Cui  * @param ptr The pointer to read from.
2476*01826a49SYabin Cui  * @return The 32-bit little endian integer from the bytes at @p ptr.
2477*01826a49SYabin Cui  */
2478*01826a49SYabin Cui 
2479*01826a49SYabin Cui /*!
2480*01826a49SYabin Cui  * @internal
2481*01826a49SYabin Cui  * @fn xxh_u32 XXH_readBE32(const void* ptr)
2482*01826a49SYabin Cui  * @brief Reads an unaligned 32-bit big endian integer from @p ptr.
2483*01826a49SYabin Cui  *
2484*01826a49SYabin Cui  * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
2485*01826a49SYabin Cui  *
2486*01826a49SYabin Cui  * @param ptr The pointer to read from.
2487*01826a49SYabin Cui  * @return The 32-bit big endian integer from the bytes at @p ptr.
2488*01826a49SYabin Cui  */
2489*01826a49SYabin Cui 
2490*01826a49SYabin Cui /*!
2491*01826a49SYabin Cui  * @internal
2492*01826a49SYabin Cui  * @fn xxh_u32 XXH_readLE32_align(const void* ptr, XXH_alignment align)
2493*01826a49SYabin Cui  * @brief Like @ref XXH_readLE32(), but has an option for aligned reads.
2494*01826a49SYabin Cui  *
2495*01826a49SYabin Cui  * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
2496*01826a49SYabin Cui  * Note that when @ref XXH_FORCE_ALIGN_CHECK == 0, the @p align parameter is
2497*01826a49SYabin Cui  * always @ref XXH_alignment::XXH_unaligned.
2498*01826a49SYabin Cui  *
2499*01826a49SYabin Cui  * @param ptr The pointer to read from.
2500*01826a49SYabin Cui  * @param align Whether @p ptr is aligned.
2501*01826a49SYabin Cui  * @pre
2502*01826a49SYabin Cui  *   If @p align == @ref XXH_alignment::XXH_aligned, @p ptr must be 4 byte
2503*01826a49SYabin Cui  *   aligned.
2504*01826a49SYabin Cui  * @return The 32-bit little endian integer from the bytes at @p ptr.
2505*01826a49SYabin Cui  */
2506*01826a49SYabin Cui 
2507*01826a49SYabin Cui #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
2508*01826a49SYabin Cui /*
2509*01826a49SYabin Cui  * Manual byteshift. Best for old compilers which don't inline memcpy.
2510*01826a49SYabin Cui  * We actually directly use XXH_readLE32 and XXH_readBE32.
2511*01826a49SYabin Cui  */
2512*01826a49SYabin Cui #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
2513*01826a49SYabin Cui 
2514*01826a49SYabin Cui /*
2515*01826a49SYabin Cui  * Force direct memory access. Only works on CPU which support unaligned memory
2516*01826a49SYabin Cui  * access in hardware.
2517*01826a49SYabin Cui  */
XXH_read32(const void * memPtr)2518*01826a49SYabin Cui static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; }
2519*01826a49SYabin Cui 
2520*01826a49SYabin Cui #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
2521*01826a49SYabin Cui 
2522*01826a49SYabin Cui /*
2523*01826a49SYabin Cui  * __attribute__((aligned(1))) is supported by gcc and clang. Originally the
2524*01826a49SYabin Cui  * documentation claimed that it only increased the alignment, but actually it
2525*01826a49SYabin Cui  * can decrease it on gcc, clang, and icc:
2526*01826a49SYabin Cui  * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502,
2527*01826a49SYabin Cui  * https://gcc.godbolt.org/z/xYez1j67Y.
2528*01826a49SYabin Cui  */
2529*01826a49SYabin Cui #ifdef XXH_OLD_NAMES
2530*01826a49SYabin Cui typedef union { xxh_u32 u32; } __attribute__((packed)) unalign;
2531*01826a49SYabin Cui #endif
XXH_read32(const void * ptr)2532*01826a49SYabin Cui static xxh_u32 XXH_read32(const void* ptr)
2533*01826a49SYabin Cui {
2534*01826a49SYabin Cui     typedef __attribute__((aligned(1))) xxh_u32 xxh_unalign32;
2535*01826a49SYabin Cui     return *((const xxh_unalign32*)ptr);
2536*01826a49SYabin Cui }
2537*01826a49SYabin Cui 
2538*01826a49SYabin Cui #else
2539*01826a49SYabin Cui 
2540*01826a49SYabin Cui /*
2541*01826a49SYabin Cui  * Portable and safe solution. Generally efficient.
2542*01826a49SYabin Cui  * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
2543*01826a49SYabin Cui  */
XXH_read32(const void * memPtr)2544*01826a49SYabin Cui static xxh_u32 XXH_read32(const void* memPtr)
2545*01826a49SYabin Cui {
2546*01826a49SYabin Cui     xxh_u32 val;
2547*01826a49SYabin Cui     XXH_memcpy(&val, memPtr, sizeof(val));
2548*01826a49SYabin Cui     return val;
2549*01826a49SYabin Cui }
2550*01826a49SYabin Cui 
2551*01826a49SYabin Cui #endif   /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
2552*01826a49SYabin Cui 
2553*01826a49SYabin Cui 
2554*01826a49SYabin Cui /* ***   Endianness   *** */
2555*01826a49SYabin Cui 
2556*01826a49SYabin Cui /*!
2557*01826a49SYabin Cui  * @ingroup tuning
2558*01826a49SYabin Cui  * @def XXH_CPU_LITTLE_ENDIAN
2559*01826a49SYabin Cui  * @brief Whether the target is little endian.
2560*01826a49SYabin Cui  *
2561*01826a49SYabin Cui  * Defined to 1 if the target is little endian, or 0 if it is big endian.
2562*01826a49SYabin Cui  * It can be defined externally, for example on the compiler command line.
2563*01826a49SYabin Cui  *
2564*01826a49SYabin Cui  * If it is not defined,
2565*01826a49SYabin Cui  * a runtime check (which is usually constant folded) is used instead.
2566*01826a49SYabin Cui  *
2567*01826a49SYabin Cui  * @note
2568*01826a49SYabin Cui  *   This is not necessarily defined to an integer constant.
2569*01826a49SYabin Cui  *
2570*01826a49SYabin Cui  * @see XXH_isLittleEndian() for the runtime check.
2571*01826a49SYabin Cui  */
2572*01826a49SYabin Cui #ifndef XXH_CPU_LITTLE_ENDIAN
2573*01826a49SYabin Cui /*
2574*01826a49SYabin Cui  * Try to detect endianness automatically, to avoid the nonstandard behavior
2575*01826a49SYabin Cui  * in `XXH_isLittleEndian()`
2576*01826a49SYabin Cui  */
2577*01826a49SYabin Cui #  if defined(_WIN32) /* Windows is always little endian */ \
2578*01826a49SYabin Cui      || defined(__LITTLE_ENDIAN__) \
2579*01826a49SYabin Cui      || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
2580*01826a49SYabin Cui #    define XXH_CPU_LITTLE_ENDIAN 1
2581*01826a49SYabin Cui #  elif defined(__BIG_ENDIAN__) \
2582*01826a49SYabin Cui      || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
2583*01826a49SYabin Cui #    define XXH_CPU_LITTLE_ENDIAN 0
2584*01826a49SYabin Cui #  else
2585*01826a49SYabin Cui /*!
2586*01826a49SYabin Cui  * @internal
2587*01826a49SYabin Cui  * @brief Runtime check for @ref XXH_CPU_LITTLE_ENDIAN.
2588*01826a49SYabin Cui  *
2589*01826a49SYabin Cui  * Most compilers will constant fold this.
2590*01826a49SYabin Cui  */
XXH_isLittleEndian(void)2591*01826a49SYabin Cui static int XXH_isLittleEndian(void)
2592*01826a49SYabin Cui {
2593*01826a49SYabin Cui     /*
2594*01826a49SYabin Cui      * Portable and well-defined behavior.
2595*01826a49SYabin Cui      * Don't use static: it is detrimental to performance.
2596*01826a49SYabin Cui      */
2597*01826a49SYabin Cui     const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 };
2598*01826a49SYabin Cui     return one.c[0];
2599*01826a49SYabin Cui }
2600*01826a49SYabin Cui #   define XXH_CPU_LITTLE_ENDIAN   XXH_isLittleEndian()
2601*01826a49SYabin Cui #  endif
2602*01826a49SYabin Cui #endif
2603*01826a49SYabin Cui 
2604*01826a49SYabin Cui 
2605*01826a49SYabin Cui 
2606*01826a49SYabin Cui 
2607*01826a49SYabin Cui /* ****************************************
2608*01826a49SYabin Cui *  Compiler-specific Functions and Macros
2609*01826a49SYabin Cui ******************************************/
2610*01826a49SYabin Cui #define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
2611*01826a49SYabin Cui 
2612*01826a49SYabin Cui #ifdef __has_builtin
2613*01826a49SYabin Cui #  define XXH_HAS_BUILTIN(x) __has_builtin(x)
2614*01826a49SYabin Cui #else
2615*01826a49SYabin Cui #  define XXH_HAS_BUILTIN(x) 0
2616*01826a49SYabin Cui #endif
2617*01826a49SYabin Cui 
2618*01826a49SYabin Cui 
2619*01826a49SYabin Cui 
2620*01826a49SYabin Cui /*
2621*01826a49SYabin Cui  * C23 and future versions have standard "unreachable()".
2622*01826a49SYabin Cui  * Once it has been implemented reliably we can add it as an
2623*01826a49SYabin Cui  * additional case:
2624*01826a49SYabin Cui  *
2625*01826a49SYabin Cui  * ```
2626*01826a49SYabin Cui  * #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN)
2627*01826a49SYabin Cui  * #  include <stddef.h>
2628*01826a49SYabin Cui  * #  ifdef unreachable
2629*01826a49SYabin Cui  * #    define XXH_UNREACHABLE() unreachable()
2630*01826a49SYabin Cui  * #  endif
2631*01826a49SYabin Cui  * #endif
2632*01826a49SYabin Cui  * ```
2633*01826a49SYabin Cui  *
2634*01826a49SYabin Cui  * Note C++23 also has std::unreachable() which can be detected
2635*01826a49SYabin Cui  * as follows:
2636*01826a49SYabin Cui  * ```
2637*01826a49SYabin Cui  * #if defined(__cpp_lib_unreachable) && (__cpp_lib_unreachable >= 202202L)
2638*01826a49SYabin Cui  * #  include <utility>
2639*01826a49SYabin Cui  * #  define XXH_UNREACHABLE() std::unreachable()
2640*01826a49SYabin Cui  * #endif
2641*01826a49SYabin Cui  * ```
2642*01826a49SYabin Cui  * NB: `__cpp_lib_unreachable` is defined in the `<version>` header.
2643*01826a49SYabin Cui  * We don't use that as including `<utility>` in `extern "C"` blocks
2644*01826a49SYabin Cui  * doesn't work on GCC12
2645*01826a49SYabin Cui  */
2646*01826a49SYabin Cui 
2647*01826a49SYabin Cui #if XXH_HAS_BUILTIN(__builtin_unreachable)
2648*01826a49SYabin Cui #  define XXH_UNREACHABLE() __builtin_unreachable()
2649*01826a49SYabin Cui 
2650*01826a49SYabin Cui #elif defined(_MSC_VER)
2651*01826a49SYabin Cui #  define XXH_UNREACHABLE() __assume(0)
2652*01826a49SYabin Cui 
2653*01826a49SYabin Cui #else
2654*01826a49SYabin Cui #  define XXH_UNREACHABLE()
2655*01826a49SYabin Cui #endif
2656*01826a49SYabin Cui 
2657*01826a49SYabin Cui #if XXH_HAS_BUILTIN(__builtin_assume)
2658*01826a49SYabin Cui #  define XXH_ASSUME(c) __builtin_assume(c)
2659*01826a49SYabin Cui #else
2660*01826a49SYabin Cui #  define XXH_ASSUME(c) if (!(c)) { XXH_UNREACHABLE(); }
2661*01826a49SYabin Cui #endif
2662*01826a49SYabin Cui 
2663*01826a49SYabin Cui /*!
2664*01826a49SYabin Cui  * @internal
2665*01826a49SYabin Cui  * @def XXH_rotl32(x,r)
2666*01826a49SYabin Cui  * @brief 32-bit rotate left.
2667*01826a49SYabin Cui  *
2668*01826a49SYabin Cui  * @param x The 32-bit integer to be rotated.
2669*01826a49SYabin Cui  * @param r The number of bits to rotate.
2670*01826a49SYabin Cui  * @pre
2671*01826a49SYabin Cui  *   @p r > 0 && @p r < 32
2672*01826a49SYabin Cui  * @note
2673*01826a49SYabin Cui  *   @p x and @p r may be evaluated multiple times.
2674*01826a49SYabin Cui  * @return The rotated result.
2675*01826a49SYabin Cui  */
2676*01826a49SYabin Cui #if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \
2677*01826a49SYabin Cui                                && XXH_HAS_BUILTIN(__builtin_rotateleft64)
2678*01826a49SYabin Cui #  define XXH_rotl32 __builtin_rotateleft32
2679*01826a49SYabin Cui #  define XXH_rotl64 __builtin_rotateleft64
2680*01826a49SYabin Cui /* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */
2681*01826a49SYabin Cui #elif defined(_MSC_VER)
2682*01826a49SYabin Cui #  define XXH_rotl32(x,r) _rotl(x,r)
2683*01826a49SYabin Cui #  define XXH_rotl64(x,r) _rotl64(x,r)
2684*01826a49SYabin Cui #else
2685*01826a49SYabin Cui #  define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
2686*01826a49SYabin Cui #  define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r))))
2687*01826a49SYabin Cui #endif
2688*01826a49SYabin Cui 
2689*01826a49SYabin Cui /*!
2690*01826a49SYabin Cui  * @internal
2691*01826a49SYabin Cui  * @fn xxh_u32 XXH_swap32(xxh_u32 x)
2692*01826a49SYabin Cui  * @brief A 32-bit byteswap.
2693*01826a49SYabin Cui  *
2694*01826a49SYabin Cui  * @param x The 32-bit integer to byteswap.
2695*01826a49SYabin Cui  * @return @p x, byteswapped.
2696*01826a49SYabin Cui  */
2697*01826a49SYabin Cui #if defined(_MSC_VER)     /* Visual Studio */
2698*01826a49SYabin Cui #  define XXH_swap32 _byteswap_ulong
2699*01826a49SYabin Cui #elif XXH_GCC_VERSION >= 403
2700*01826a49SYabin Cui #  define XXH_swap32 __builtin_bswap32
2701*01826a49SYabin Cui #else
XXH_swap32(xxh_u32 x)2702*01826a49SYabin Cui static xxh_u32 XXH_swap32 (xxh_u32 x)
2703*01826a49SYabin Cui {
2704*01826a49SYabin Cui     return  ((x << 24) & 0xff000000 ) |
2705*01826a49SYabin Cui             ((x <<  8) & 0x00ff0000 ) |
2706*01826a49SYabin Cui             ((x >>  8) & 0x0000ff00 ) |
2707*01826a49SYabin Cui             ((x >> 24) & 0x000000ff );
2708*01826a49SYabin Cui }
2709*01826a49SYabin Cui #endif
2710*01826a49SYabin Cui 
2711*01826a49SYabin Cui 
2712*01826a49SYabin Cui /* ***************************
2713*01826a49SYabin Cui *  Memory reads
2714*01826a49SYabin Cui *****************************/
2715*01826a49SYabin Cui 
2716*01826a49SYabin Cui /*!
2717*01826a49SYabin Cui  * @internal
2718*01826a49SYabin Cui  * @brief Enum to indicate whether a pointer is aligned.
2719*01826a49SYabin Cui  */
2720*01826a49SYabin Cui typedef enum {
2721*01826a49SYabin Cui     XXH_aligned,  /*!< Aligned */
2722*01826a49SYabin Cui     XXH_unaligned /*!< Possibly unaligned */
2723*01826a49SYabin Cui } XXH_alignment;
2724*01826a49SYabin Cui 
2725*01826a49SYabin Cui /*
2726*01826a49SYabin Cui  * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load.
2727*01826a49SYabin Cui  *
2728*01826a49SYabin Cui  * This is ideal for older compilers which don't inline memcpy.
2729*01826a49SYabin Cui  */
2730*01826a49SYabin Cui #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
2731*01826a49SYabin Cui 
XXH_readLE32(const void * memPtr)2732*01826a49SYabin Cui XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* memPtr)
2733*01826a49SYabin Cui {
2734*01826a49SYabin Cui     const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
2735*01826a49SYabin Cui     return bytePtr[0]
2736*01826a49SYabin Cui          | ((xxh_u32)bytePtr[1] << 8)
2737*01826a49SYabin Cui          | ((xxh_u32)bytePtr[2] << 16)
2738*01826a49SYabin Cui          | ((xxh_u32)bytePtr[3] << 24);
2739*01826a49SYabin Cui }
2740*01826a49SYabin Cui 
XXH_readBE32(const void * memPtr)2741*01826a49SYabin Cui XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void* memPtr)
2742*01826a49SYabin Cui {
2743*01826a49SYabin Cui     const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
2744*01826a49SYabin Cui     return bytePtr[3]
2745*01826a49SYabin Cui          | ((xxh_u32)bytePtr[2] << 8)
2746*01826a49SYabin Cui          | ((xxh_u32)bytePtr[1] << 16)
2747*01826a49SYabin Cui          | ((xxh_u32)bytePtr[0] << 24);
2748*01826a49SYabin Cui }
2749*01826a49SYabin Cui 
2750*01826a49SYabin Cui #else
XXH_readLE32(const void * ptr)2751*01826a49SYabin Cui XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr)
2752*01826a49SYabin Cui {
2753*01826a49SYabin Cui     return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
2754*01826a49SYabin Cui }
2755*01826a49SYabin Cui 
XXH_readBE32(const void * ptr)2756*01826a49SYabin Cui static xxh_u32 XXH_readBE32(const void* ptr)
2757*01826a49SYabin Cui {
2758*01826a49SYabin Cui     return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
2759*01826a49SYabin Cui }
2760*01826a49SYabin Cui #endif
2761*01826a49SYabin Cui 
2762*01826a49SYabin Cui XXH_FORCE_INLINE xxh_u32
XXH_readLE32_align(const void * ptr,XXH_alignment align)2763*01826a49SYabin Cui XXH_readLE32_align(const void* ptr, XXH_alignment align)
2764*01826a49SYabin Cui {
2765*01826a49SYabin Cui     if (align==XXH_unaligned) {
2766*01826a49SYabin Cui         return XXH_readLE32(ptr);
2767*01826a49SYabin Cui     } else {
2768*01826a49SYabin Cui         return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr);
2769*01826a49SYabin Cui     }
2770*01826a49SYabin Cui }
2771*01826a49SYabin Cui 
2772*01826a49SYabin Cui 
2773*01826a49SYabin Cui /* *************************************
2774*01826a49SYabin Cui *  Misc
2775*01826a49SYabin Cui ***************************************/
2776*01826a49SYabin Cui /*! @ingroup public */
XXH_versionNumber(void)2777*01826a49SYabin Cui XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
2778*01826a49SYabin Cui 
2779*01826a49SYabin Cui 
2780*01826a49SYabin Cui /* *******************************************************************
2781*01826a49SYabin Cui *  32-bit hash functions
2782*01826a49SYabin Cui *********************************************************************/
2783*01826a49SYabin Cui /*!
2784*01826a49SYabin Cui  * @}
2785*01826a49SYabin Cui  * @defgroup XXH32_impl XXH32 implementation
2786*01826a49SYabin Cui  * @ingroup impl
2787*01826a49SYabin Cui  *
2788*01826a49SYabin Cui  * Details on the XXH32 implementation.
2789*01826a49SYabin Cui  * @{
2790*01826a49SYabin Cui  */
2791*01826a49SYabin Cui  /* #define instead of static const, to be used as initializers */
2792*01826a49SYabin Cui #define XXH_PRIME32_1  0x9E3779B1U  /*!< 0b10011110001101110111100110110001 */
2793*01826a49SYabin Cui #define XXH_PRIME32_2  0x85EBCA77U  /*!< 0b10000101111010111100101001110111 */
2794*01826a49SYabin Cui #define XXH_PRIME32_3  0xC2B2AE3DU  /*!< 0b11000010101100101010111000111101 */
2795*01826a49SYabin Cui #define XXH_PRIME32_4  0x27D4EB2FU  /*!< 0b00100111110101001110101100101111 */
2796*01826a49SYabin Cui #define XXH_PRIME32_5  0x165667B1U  /*!< 0b00010110010101100110011110110001 */
2797*01826a49SYabin Cui 
2798*01826a49SYabin Cui #ifdef XXH_OLD_NAMES
2799*01826a49SYabin Cui #  define PRIME32_1 XXH_PRIME32_1
2800*01826a49SYabin Cui #  define PRIME32_2 XXH_PRIME32_2
2801*01826a49SYabin Cui #  define PRIME32_3 XXH_PRIME32_3
2802*01826a49SYabin Cui #  define PRIME32_4 XXH_PRIME32_4
2803*01826a49SYabin Cui #  define PRIME32_5 XXH_PRIME32_5
2804*01826a49SYabin Cui #endif
2805*01826a49SYabin Cui 
2806*01826a49SYabin Cui /*!
2807*01826a49SYabin Cui  * @internal
2808*01826a49SYabin Cui  * @brief Normal stripe processing routine.
2809*01826a49SYabin Cui  *
2810*01826a49SYabin Cui  * This shuffles the bits so that any bit from @p input impacts several bits in
2811*01826a49SYabin Cui  * @p acc.
2812*01826a49SYabin Cui  *
2813*01826a49SYabin Cui  * @param acc The accumulator lane.
2814*01826a49SYabin Cui  * @param input The stripe of input to mix.
2815*01826a49SYabin Cui  * @return The mixed accumulator lane.
2816*01826a49SYabin Cui  */
XXH32_round(xxh_u32 acc,xxh_u32 input)2817*01826a49SYabin Cui static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input)
2818*01826a49SYabin Cui {
2819*01826a49SYabin Cui     acc += input * XXH_PRIME32_2;
2820*01826a49SYabin Cui     acc  = XXH_rotl32(acc, 13);
2821*01826a49SYabin Cui     acc *= XXH_PRIME32_1;
2822*01826a49SYabin Cui #if (defined(__SSE4_1__) || defined(__aarch64__) || defined(__wasm_simd128__)) && !defined(XXH_ENABLE_AUTOVECTORIZE)
2823*01826a49SYabin Cui     /*
2824*01826a49SYabin Cui      * UGLY HACK:
2825*01826a49SYabin Cui      * A compiler fence is the only thing that prevents GCC and Clang from
2826*01826a49SYabin Cui      * autovectorizing the XXH32 loop (pragmas and attributes don't work for some
2827*01826a49SYabin Cui      * reason) without globally disabling SSE4.1.
2828*01826a49SYabin Cui      *
2829*01826a49SYabin Cui      * The reason we want to avoid vectorization is because despite working on
2830*01826a49SYabin Cui      * 4 integers at a time, there are multiple factors slowing XXH32 down on
2831*01826a49SYabin Cui      * SSE4:
2832*01826a49SYabin Cui      * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on
2833*01826a49SYabin Cui      *   newer chips!) making it slightly slower to multiply four integers at
2834*01826a49SYabin Cui      *   once compared to four integers independently. Even when pmulld was
2835*01826a49SYabin Cui      *   fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE
2836*01826a49SYabin Cui      *   just to multiply unless doing a long operation.
2837*01826a49SYabin Cui      *
2838*01826a49SYabin Cui      * - Four instructions are required to rotate,
2839*01826a49SYabin Cui      *      movqda tmp,  v // not required with VEX encoding
2840*01826a49SYabin Cui      *      pslld  tmp, 13 // tmp <<= 13
2841*01826a49SYabin Cui      *      psrld  v,   19 // x >>= 19
2842*01826a49SYabin Cui      *      por    v,  tmp // x |= tmp
2843*01826a49SYabin Cui      *   compared to one for scalar:
2844*01826a49SYabin Cui      *      roll   v, 13    // reliably fast across the board
2845*01826a49SYabin Cui      *      shldl  v, v, 13 // Sandy Bridge and later prefer this for some reason
2846*01826a49SYabin Cui      *
2847*01826a49SYabin Cui      * - Instruction level parallelism is actually more beneficial here because
2848*01826a49SYabin Cui      *   the SIMD actually serializes this operation: While v1 is rotating, v2
2849*01826a49SYabin Cui      *   can load data, while v3 can multiply. SSE forces them to operate
2850*01826a49SYabin Cui      *   together.
2851*01826a49SYabin Cui      *
2852*01826a49SYabin Cui      * This is also enabled on AArch64, as Clang is *very aggressive* in vectorizing
2853*01826a49SYabin Cui      * the loop. NEON is only faster on the A53, and with the newer cores, it is less
2854*01826a49SYabin Cui      * than half the speed.
2855*01826a49SYabin Cui      *
2856*01826a49SYabin Cui      * Additionally, this is used on WASM SIMD128 because it JITs to the same
2857*01826a49SYabin Cui      * SIMD instructions and has the same issue.
2858*01826a49SYabin Cui      */
2859*01826a49SYabin Cui     XXH_COMPILER_GUARD(acc);
2860*01826a49SYabin Cui #endif
2861*01826a49SYabin Cui     return acc;
2862*01826a49SYabin Cui }
2863*01826a49SYabin Cui 
2864*01826a49SYabin Cui /*!
2865*01826a49SYabin Cui  * @internal
2866*01826a49SYabin Cui  * @brief Mixes all bits to finalize the hash.
2867*01826a49SYabin Cui  *
2868*01826a49SYabin Cui  * The final mix ensures that all input bits have a chance to impact any bit in
2869*01826a49SYabin Cui  * the output digest, resulting in an unbiased distribution.
2870*01826a49SYabin Cui  *
2871*01826a49SYabin Cui  * @param hash The hash to avalanche.
2872*01826a49SYabin Cui  * @return The avalanched hash.
2873*01826a49SYabin Cui  */
XXH32_avalanche(xxh_u32 hash)2874*01826a49SYabin Cui static xxh_u32 XXH32_avalanche(xxh_u32 hash)
2875*01826a49SYabin Cui {
2876*01826a49SYabin Cui     hash ^= hash >> 15;
2877*01826a49SYabin Cui     hash *= XXH_PRIME32_2;
2878*01826a49SYabin Cui     hash ^= hash >> 13;
2879*01826a49SYabin Cui     hash *= XXH_PRIME32_3;
2880*01826a49SYabin Cui     hash ^= hash >> 16;
2881*01826a49SYabin Cui     return hash;
2882*01826a49SYabin Cui }
2883*01826a49SYabin Cui 
2884*01826a49SYabin Cui #define XXH_get32bits(p) XXH_readLE32_align(p, align)
2885*01826a49SYabin Cui 
2886*01826a49SYabin Cui /*!
2887*01826a49SYabin Cui  * @internal
2888*01826a49SYabin Cui  * @brief Processes the last 0-15 bytes of @p ptr.
2889*01826a49SYabin Cui  *
2890*01826a49SYabin Cui  * There may be up to 15 bytes remaining to consume from the input.
2891*01826a49SYabin Cui  * This final stage will digest them to ensure that all input bytes are present
2892*01826a49SYabin Cui  * in the final mix.
2893*01826a49SYabin Cui  *
2894*01826a49SYabin Cui  * @param hash The hash to finalize.
2895*01826a49SYabin Cui  * @param ptr The pointer to the remaining input.
2896*01826a49SYabin Cui  * @param len The remaining length, modulo 16.
2897*01826a49SYabin Cui  * @param align Whether @p ptr is aligned.
2898*01826a49SYabin Cui  * @return The finalized hash.
2899*01826a49SYabin Cui  * @see XXH64_finalize().
2900*01826a49SYabin Cui  */
2901*01826a49SYabin Cui static XXH_PUREF xxh_u32
XXH32_finalize(xxh_u32 hash,const xxh_u8 * ptr,size_t len,XXH_alignment align)2902*01826a49SYabin Cui XXH32_finalize(xxh_u32 hash, const xxh_u8* ptr, size_t len, XXH_alignment align)
2903*01826a49SYabin Cui {
2904*01826a49SYabin Cui #define XXH_PROCESS1 do {                             \
2905*01826a49SYabin Cui     hash += (*ptr++) * XXH_PRIME32_5;                 \
2906*01826a49SYabin Cui     hash = XXH_rotl32(hash, 11) * XXH_PRIME32_1;      \
2907*01826a49SYabin Cui } while (0)
2908*01826a49SYabin Cui 
2909*01826a49SYabin Cui #define XXH_PROCESS4 do {                             \
2910*01826a49SYabin Cui     hash += XXH_get32bits(ptr) * XXH_PRIME32_3;       \
2911*01826a49SYabin Cui     ptr += 4;                                         \
2912*01826a49SYabin Cui     hash  = XXH_rotl32(hash, 17) * XXH_PRIME32_4;     \
2913*01826a49SYabin Cui } while (0)
2914*01826a49SYabin Cui 
2915*01826a49SYabin Cui     if (ptr==NULL) XXH_ASSERT(len == 0);
2916*01826a49SYabin Cui 
2917*01826a49SYabin Cui     /* Compact rerolled version; generally faster */
2918*01826a49SYabin Cui     if (!XXH32_ENDJMP) {
2919*01826a49SYabin Cui         len &= 15;
2920*01826a49SYabin Cui         while (len >= 4) {
2921*01826a49SYabin Cui             XXH_PROCESS4;
2922*01826a49SYabin Cui             len -= 4;
2923*01826a49SYabin Cui         }
2924*01826a49SYabin Cui         while (len > 0) {
2925*01826a49SYabin Cui             XXH_PROCESS1;
2926*01826a49SYabin Cui             --len;
2927*01826a49SYabin Cui         }
2928*01826a49SYabin Cui         return XXH32_avalanche(hash);
2929*01826a49SYabin Cui     } else {
2930*01826a49SYabin Cui          switch(len&15) /* or switch(bEnd - p) */ {
2931*01826a49SYabin Cui            case 12:      XXH_PROCESS4;
2932*01826a49SYabin Cui                          XXH_FALLTHROUGH;  /* fallthrough */
2933*01826a49SYabin Cui            case 8:       XXH_PROCESS4;
2934*01826a49SYabin Cui                          XXH_FALLTHROUGH;  /* fallthrough */
2935*01826a49SYabin Cui            case 4:       XXH_PROCESS4;
2936*01826a49SYabin Cui                          return XXH32_avalanche(hash);
2937*01826a49SYabin Cui 
2938*01826a49SYabin Cui            case 13:      XXH_PROCESS4;
2939*01826a49SYabin Cui                          XXH_FALLTHROUGH;  /* fallthrough */
2940*01826a49SYabin Cui            case 9:       XXH_PROCESS4;
2941*01826a49SYabin Cui                          XXH_FALLTHROUGH;  /* fallthrough */
2942*01826a49SYabin Cui            case 5:       XXH_PROCESS4;
2943*01826a49SYabin Cui                          XXH_PROCESS1;
2944*01826a49SYabin Cui                          return XXH32_avalanche(hash);
2945*01826a49SYabin Cui 
2946*01826a49SYabin Cui            case 14:      XXH_PROCESS4;
2947*01826a49SYabin Cui                          XXH_FALLTHROUGH;  /* fallthrough */
2948*01826a49SYabin Cui            case 10:      XXH_PROCESS4;
2949*01826a49SYabin Cui                          XXH_FALLTHROUGH;  /* fallthrough */
2950*01826a49SYabin Cui            case 6:       XXH_PROCESS4;
2951*01826a49SYabin Cui                          XXH_PROCESS1;
2952*01826a49SYabin Cui                          XXH_PROCESS1;
2953*01826a49SYabin Cui                          return XXH32_avalanche(hash);
2954*01826a49SYabin Cui 
2955*01826a49SYabin Cui            case 15:      XXH_PROCESS4;
2956*01826a49SYabin Cui                          XXH_FALLTHROUGH;  /* fallthrough */
2957*01826a49SYabin Cui            case 11:      XXH_PROCESS4;
2958*01826a49SYabin Cui                          XXH_FALLTHROUGH;  /* fallthrough */
2959*01826a49SYabin Cui            case 7:       XXH_PROCESS4;
2960*01826a49SYabin Cui                          XXH_FALLTHROUGH;  /* fallthrough */
2961*01826a49SYabin Cui            case 3:       XXH_PROCESS1;
2962*01826a49SYabin Cui                          XXH_FALLTHROUGH;  /* fallthrough */
2963*01826a49SYabin Cui            case 2:       XXH_PROCESS1;
2964*01826a49SYabin Cui                          XXH_FALLTHROUGH;  /* fallthrough */
2965*01826a49SYabin Cui            case 1:       XXH_PROCESS1;
2966*01826a49SYabin Cui                          XXH_FALLTHROUGH;  /* fallthrough */
2967*01826a49SYabin Cui            case 0:       return XXH32_avalanche(hash);
2968*01826a49SYabin Cui         }
2969*01826a49SYabin Cui         XXH_ASSERT(0);
2970*01826a49SYabin Cui         return hash;   /* reaching this point is deemed impossible */
2971*01826a49SYabin Cui     }
2972*01826a49SYabin Cui }
2973*01826a49SYabin Cui 
2974*01826a49SYabin Cui #ifdef XXH_OLD_NAMES
2975*01826a49SYabin Cui #  define PROCESS1 XXH_PROCESS1
2976*01826a49SYabin Cui #  define PROCESS4 XXH_PROCESS4
2977*01826a49SYabin Cui #else
2978*01826a49SYabin Cui #  undef XXH_PROCESS1
2979*01826a49SYabin Cui #  undef XXH_PROCESS4
2980*01826a49SYabin Cui #endif
2981*01826a49SYabin Cui 
2982*01826a49SYabin Cui /*!
2983*01826a49SYabin Cui  * @internal
2984*01826a49SYabin Cui  * @brief The implementation for @ref XXH32().
2985*01826a49SYabin Cui  *
2986*01826a49SYabin Cui  * @param input , len , seed Directly passed from @ref XXH32().
2987*01826a49SYabin Cui  * @param align Whether @p input is aligned.
2988*01826a49SYabin Cui  * @return The calculated hash.
2989*01826a49SYabin Cui  */
2990*01826a49SYabin Cui XXH_FORCE_INLINE XXH_PUREF xxh_u32
XXH32_endian_align(const xxh_u8 * input,size_t len,xxh_u32 seed,XXH_alignment align)2991*01826a49SYabin Cui XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align)
2992*01826a49SYabin Cui {
2993*01826a49SYabin Cui     xxh_u32 h32;
2994*01826a49SYabin Cui 
2995*01826a49SYabin Cui     if (input==NULL) XXH_ASSERT(len == 0);
2996*01826a49SYabin Cui 
2997*01826a49SYabin Cui     if (len>=16) {
2998*01826a49SYabin Cui         const xxh_u8* const bEnd = input + len;
2999*01826a49SYabin Cui         const xxh_u8* const limit = bEnd - 15;
3000*01826a49SYabin Cui         xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
3001*01826a49SYabin Cui         xxh_u32 v2 = seed + XXH_PRIME32_2;
3002*01826a49SYabin Cui         xxh_u32 v3 = seed + 0;
3003*01826a49SYabin Cui         xxh_u32 v4 = seed - XXH_PRIME32_1;
3004*01826a49SYabin Cui 
3005*01826a49SYabin Cui         do {
3006*01826a49SYabin Cui             v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4;
3007*01826a49SYabin Cui             v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4;
3008*01826a49SYabin Cui             v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4;
3009*01826a49SYabin Cui             v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4;
3010*01826a49SYabin Cui         } while (input < limit);
3011*01826a49SYabin Cui 
3012*01826a49SYabin Cui         h32 = XXH_rotl32(v1, 1)  + XXH_rotl32(v2, 7)
3013*01826a49SYabin Cui             + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
3014*01826a49SYabin Cui     } else {
3015*01826a49SYabin Cui         h32  = seed + XXH_PRIME32_5;
3016*01826a49SYabin Cui     }
3017*01826a49SYabin Cui 
3018*01826a49SYabin Cui     h32 += (xxh_u32)len;
3019*01826a49SYabin Cui 
3020*01826a49SYabin Cui     return XXH32_finalize(h32, input, len&15, align);
3021*01826a49SYabin Cui }
3022*01826a49SYabin Cui 
3023*01826a49SYabin Cui /*! @ingroup XXH32_family */
XXH32(const void * input,size_t len,XXH32_hash_t seed)3024*01826a49SYabin Cui XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed)
3025*01826a49SYabin Cui {
3026*01826a49SYabin Cui #if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2
3027*01826a49SYabin Cui     /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
3028*01826a49SYabin Cui     XXH32_state_t state;
3029*01826a49SYabin Cui     XXH32_reset(&state, seed);
3030*01826a49SYabin Cui     XXH32_update(&state, (const xxh_u8*)input, len);
3031*01826a49SYabin Cui     return XXH32_digest(&state);
3032*01826a49SYabin Cui #else
3033*01826a49SYabin Cui     if (XXH_FORCE_ALIGN_CHECK) {
3034*01826a49SYabin Cui         if ((((size_t)input) & 3) == 0) {   /* Input is 4-bytes aligned, leverage the speed benefit */
3035*01826a49SYabin Cui             return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
3036*01826a49SYabin Cui     }   }
3037*01826a49SYabin Cui 
3038*01826a49SYabin Cui     return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
3039*01826a49SYabin Cui #endif
3040*01826a49SYabin Cui }
3041*01826a49SYabin Cui 
3042*01826a49SYabin Cui 
3043*01826a49SYabin Cui 
3044*01826a49SYabin Cui /*******   Hash streaming   *******/
3045*01826a49SYabin Cui #ifndef XXH_NO_STREAM
3046*01826a49SYabin Cui /*! @ingroup XXH32_family */
XXH32_createState(void)3047*01826a49SYabin Cui XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
3048*01826a49SYabin Cui {
3049*01826a49SYabin Cui     return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
3050*01826a49SYabin Cui }
3051*01826a49SYabin Cui /*! @ingroup XXH32_family */
XXH32_freeState(XXH32_state_t * statePtr)3052*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
3053*01826a49SYabin Cui {
3054*01826a49SYabin Cui     XXH_free(statePtr);
3055*01826a49SYabin Cui     return XXH_OK;
3056*01826a49SYabin Cui }
3057*01826a49SYabin Cui 
3058*01826a49SYabin Cui /*! @ingroup XXH32_family */
XXH32_copyState(XXH32_state_t * dstState,const XXH32_state_t * srcState)3059*01826a49SYabin Cui XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState)
3060*01826a49SYabin Cui {
3061*01826a49SYabin Cui     XXH_memcpy(dstState, srcState, sizeof(*dstState));
3062*01826a49SYabin Cui }
3063*01826a49SYabin Cui 
3064*01826a49SYabin Cui /*! @ingroup XXH32_family */
XXH32_reset(XXH32_state_t * statePtr,XXH32_hash_t seed)3065*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed)
3066*01826a49SYabin Cui {
3067*01826a49SYabin Cui     XXH_ASSERT(statePtr != NULL);
3068*01826a49SYabin Cui     memset(statePtr, 0, sizeof(*statePtr));
3069*01826a49SYabin Cui     statePtr->v[0] = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
3070*01826a49SYabin Cui     statePtr->v[1] = seed + XXH_PRIME32_2;
3071*01826a49SYabin Cui     statePtr->v[2] = seed + 0;
3072*01826a49SYabin Cui     statePtr->v[3] = seed - XXH_PRIME32_1;
3073*01826a49SYabin Cui     return XXH_OK;
3074*01826a49SYabin Cui }
3075*01826a49SYabin Cui 
3076*01826a49SYabin Cui 
3077*01826a49SYabin Cui /*! @ingroup XXH32_family */
3078*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode
XXH32_update(XXH32_state_t * state,const void * input,size_t len)3079*01826a49SYabin Cui XXH32_update(XXH32_state_t* state, const void* input, size_t len)
3080*01826a49SYabin Cui {
3081*01826a49SYabin Cui     if (input==NULL) {
3082*01826a49SYabin Cui         XXH_ASSERT(len == 0);
3083*01826a49SYabin Cui         return XXH_OK;
3084*01826a49SYabin Cui     }
3085*01826a49SYabin Cui 
3086*01826a49SYabin Cui     {   const xxh_u8* p = (const xxh_u8*)input;
3087*01826a49SYabin Cui         const xxh_u8* const bEnd = p + len;
3088*01826a49SYabin Cui 
3089*01826a49SYabin Cui         state->total_len_32 += (XXH32_hash_t)len;
3090*01826a49SYabin Cui         state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16));
3091*01826a49SYabin Cui 
3092*01826a49SYabin Cui         if (state->memsize + len < 16)  {   /* fill in tmp buffer */
3093*01826a49SYabin Cui             XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len);
3094*01826a49SYabin Cui             state->memsize += (XXH32_hash_t)len;
3095*01826a49SYabin Cui             return XXH_OK;
3096*01826a49SYabin Cui         }
3097*01826a49SYabin Cui 
3098*01826a49SYabin Cui         if (state->memsize) {   /* some data left from previous update */
3099*01826a49SYabin Cui             XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize);
3100*01826a49SYabin Cui             {   const xxh_u32* p32 = state->mem32;
3101*01826a49SYabin Cui                 state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p32)); p32++;
3102*01826a49SYabin Cui                 state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p32)); p32++;
3103*01826a49SYabin Cui                 state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p32)); p32++;
3104*01826a49SYabin Cui                 state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p32));
3105*01826a49SYabin Cui             }
3106*01826a49SYabin Cui             p += 16-state->memsize;
3107*01826a49SYabin Cui             state->memsize = 0;
3108*01826a49SYabin Cui         }
3109*01826a49SYabin Cui 
3110*01826a49SYabin Cui         if (p <= bEnd-16) {
3111*01826a49SYabin Cui             const xxh_u8* const limit = bEnd - 16;
3112*01826a49SYabin Cui 
3113*01826a49SYabin Cui             do {
3114*01826a49SYabin Cui                 state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p)); p+=4;
3115*01826a49SYabin Cui                 state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p)); p+=4;
3116*01826a49SYabin Cui                 state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p)); p+=4;
3117*01826a49SYabin Cui                 state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p)); p+=4;
3118*01826a49SYabin Cui             } while (p<=limit);
3119*01826a49SYabin Cui 
3120*01826a49SYabin Cui         }
3121*01826a49SYabin Cui 
3122*01826a49SYabin Cui         if (p < bEnd) {
3123*01826a49SYabin Cui             XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
3124*01826a49SYabin Cui             state->memsize = (unsigned)(bEnd-p);
3125*01826a49SYabin Cui         }
3126*01826a49SYabin Cui     }
3127*01826a49SYabin Cui 
3128*01826a49SYabin Cui     return XXH_OK;
3129*01826a49SYabin Cui }
3130*01826a49SYabin Cui 
3131*01826a49SYabin Cui 
3132*01826a49SYabin Cui /*! @ingroup XXH32_family */
XXH32_digest(const XXH32_state_t * state)3133*01826a49SYabin Cui XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t* state)
3134*01826a49SYabin Cui {
3135*01826a49SYabin Cui     xxh_u32 h32;
3136*01826a49SYabin Cui 
3137*01826a49SYabin Cui     if (state->large_len) {
3138*01826a49SYabin Cui         h32 = XXH_rotl32(state->v[0], 1)
3139*01826a49SYabin Cui             + XXH_rotl32(state->v[1], 7)
3140*01826a49SYabin Cui             + XXH_rotl32(state->v[2], 12)
3141*01826a49SYabin Cui             + XXH_rotl32(state->v[3], 18);
3142*01826a49SYabin Cui     } else {
3143*01826a49SYabin Cui         h32 = state->v[2] /* == seed */ + XXH_PRIME32_5;
3144*01826a49SYabin Cui     }
3145*01826a49SYabin Cui 
3146*01826a49SYabin Cui     h32 += state->total_len_32;
3147*01826a49SYabin Cui 
3148*01826a49SYabin Cui     return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned);
3149*01826a49SYabin Cui }
3150*01826a49SYabin Cui #endif /* !XXH_NO_STREAM */
3151*01826a49SYabin Cui 
3152*01826a49SYabin Cui /*******   Canonical representation   *******/
3153*01826a49SYabin Cui 
3154*01826a49SYabin Cui /*! @ingroup XXH32_family */
XXH32_canonicalFromHash(XXH32_canonical_t * dst,XXH32_hash_t hash)3155*01826a49SYabin Cui XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
3156*01826a49SYabin Cui {
3157*01826a49SYabin Cui     XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
3158*01826a49SYabin Cui     if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
3159*01826a49SYabin Cui     XXH_memcpy(dst, &hash, sizeof(*dst));
3160*01826a49SYabin Cui }
3161*01826a49SYabin Cui /*! @ingroup XXH32_family */
XXH32_hashFromCanonical(const XXH32_canonical_t * src)3162*01826a49SYabin Cui XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
3163*01826a49SYabin Cui {
3164*01826a49SYabin Cui     return XXH_readBE32(src);
3165*01826a49SYabin Cui }
3166*01826a49SYabin Cui 
3167*01826a49SYabin Cui 
3168*01826a49SYabin Cui #ifndef XXH_NO_LONG_LONG
3169*01826a49SYabin Cui 
3170*01826a49SYabin Cui /* *******************************************************************
3171*01826a49SYabin Cui *  64-bit hash functions
3172*01826a49SYabin Cui *********************************************************************/
3173*01826a49SYabin Cui /*!
3174*01826a49SYabin Cui  * @}
3175*01826a49SYabin Cui  * @ingroup impl
3176*01826a49SYabin Cui  * @{
3177*01826a49SYabin Cui  */
3178*01826a49SYabin Cui /*******   Memory access   *******/
3179*01826a49SYabin Cui 
3180*01826a49SYabin Cui typedef XXH64_hash_t xxh_u64;
3181*01826a49SYabin Cui 
3182*01826a49SYabin Cui #ifdef XXH_OLD_NAMES
3183*01826a49SYabin Cui #  define U64 xxh_u64
3184*01826a49SYabin Cui #endif
3185*01826a49SYabin Cui 
3186*01826a49SYabin Cui #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
3187*01826a49SYabin Cui /*
3188*01826a49SYabin Cui  * Manual byteshift. Best for old compilers which don't inline memcpy.
3189*01826a49SYabin Cui  * We actually directly use XXH_readLE64 and XXH_readBE64.
3190*01826a49SYabin Cui  */
3191*01826a49SYabin Cui #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
3192*01826a49SYabin Cui 
3193*01826a49SYabin Cui /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
XXH_read64(const void * memPtr)3194*01826a49SYabin Cui static xxh_u64 XXH_read64(const void* memPtr)
3195*01826a49SYabin Cui {
3196*01826a49SYabin Cui     return *(const xxh_u64*) memPtr;
3197*01826a49SYabin Cui }
3198*01826a49SYabin Cui 
3199*01826a49SYabin Cui #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
3200*01826a49SYabin Cui 
3201*01826a49SYabin Cui /*
3202*01826a49SYabin Cui  * __attribute__((aligned(1))) is supported by gcc and clang. Originally the
3203*01826a49SYabin Cui  * documentation claimed that it only increased the alignment, but actually it
3204*01826a49SYabin Cui  * can decrease it on gcc, clang, and icc:
3205*01826a49SYabin Cui  * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502,
3206*01826a49SYabin Cui  * https://gcc.godbolt.org/z/xYez1j67Y.
3207*01826a49SYabin Cui  */
3208*01826a49SYabin Cui #ifdef XXH_OLD_NAMES
3209*01826a49SYabin Cui typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64;
3210*01826a49SYabin Cui #endif
XXH_read64(const void * ptr)3211*01826a49SYabin Cui static xxh_u64 XXH_read64(const void* ptr)
3212*01826a49SYabin Cui {
3213*01826a49SYabin Cui     typedef __attribute__((aligned(1))) xxh_u64 xxh_unalign64;
3214*01826a49SYabin Cui     return *((const xxh_unalign64*)ptr);
3215*01826a49SYabin Cui }
3216*01826a49SYabin Cui 
3217*01826a49SYabin Cui #else
3218*01826a49SYabin Cui 
3219*01826a49SYabin Cui /*
3220*01826a49SYabin Cui  * Portable and safe solution. Generally efficient.
3221*01826a49SYabin Cui  * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
3222*01826a49SYabin Cui  */
XXH_read64(const void * memPtr)3223*01826a49SYabin Cui static xxh_u64 XXH_read64(const void* memPtr)
3224*01826a49SYabin Cui {
3225*01826a49SYabin Cui     xxh_u64 val;
3226*01826a49SYabin Cui     XXH_memcpy(&val, memPtr, sizeof(val));
3227*01826a49SYabin Cui     return val;
3228*01826a49SYabin Cui }
3229*01826a49SYabin Cui 
3230*01826a49SYabin Cui #endif   /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
3231*01826a49SYabin Cui 
3232*01826a49SYabin Cui #if defined(_MSC_VER)     /* Visual Studio */
3233*01826a49SYabin Cui #  define XXH_swap64 _byteswap_uint64
3234*01826a49SYabin Cui #elif XXH_GCC_VERSION >= 403
3235*01826a49SYabin Cui #  define XXH_swap64 __builtin_bswap64
3236*01826a49SYabin Cui #else
XXH_swap64(xxh_u64 x)3237*01826a49SYabin Cui static xxh_u64 XXH_swap64(xxh_u64 x)
3238*01826a49SYabin Cui {
3239*01826a49SYabin Cui     return  ((x << 56) & 0xff00000000000000ULL) |
3240*01826a49SYabin Cui             ((x << 40) & 0x00ff000000000000ULL) |
3241*01826a49SYabin Cui             ((x << 24) & 0x0000ff0000000000ULL) |
3242*01826a49SYabin Cui             ((x << 8)  & 0x000000ff00000000ULL) |
3243*01826a49SYabin Cui             ((x >> 8)  & 0x00000000ff000000ULL) |
3244*01826a49SYabin Cui             ((x >> 24) & 0x0000000000ff0000ULL) |
3245*01826a49SYabin Cui             ((x >> 40) & 0x000000000000ff00ULL) |
3246*01826a49SYabin Cui             ((x >> 56) & 0x00000000000000ffULL);
3247*01826a49SYabin Cui }
3248*01826a49SYabin Cui #endif
3249*01826a49SYabin Cui 
3250*01826a49SYabin Cui 
3251*01826a49SYabin Cui /* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */
3252*01826a49SYabin Cui #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
3253*01826a49SYabin Cui 
XXH_readLE64(const void * memPtr)3254*01826a49SYabin Cui XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* memPtr)
3255*01826a49SYabin Cui {
3256*01826a49SYabin Cui     const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
3257*01826a49SYabin Cui     return bytePtr[0]
3258*01826a49SYabin Cui          | ((xxh_u64)bytePtr[1] << 8)
3259*01826a49SYabin Cui          | ((xxh_u64)bytePtr[2] << 16)
3260*01826a49SYabin Cui          | ((xxh_u64)bytePtr[3] << 24)
3261*01826a49SYabin Cui          | ((xxh_u64)bytePtr[4] << 32)
3262*01826a49SYabin Cui          | ((xxh_u64)bytePtr[5] << 40)
3263*01826a49SYabin Cui          | ((xxh_u64)bytePtr[6] << 48)
3264*01826a49SYabin Cui          | ((xxh_u64)bytePtr[7] << 56);
3265*01826a49SYabin Cui }
3266*01826a49SYabin Cui 
XXH_readBE64(const void * memPtr)3267*01826a49SYabin Cui XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void* memPtr)
3268*01826a49SYabin Cui {
3269*01826a49SYabin Cui     const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
3270*01826a49SYabin Cui     return bytePtr[7]
3271*01826a49SYabin Cui          | ((xxh_u64)bytePtr[6] << 8)
3272*01826a49SYabin Cui          | ((xxh_u64)bytePtr[5] << 16)
3273*01826a49SYabin Cui          | ((xxh_u64)bytePtr[4] << 24)
3274*01826a49SYabin Cui          | ((xxh_u64)bytePtr[3] << 32)
3275*01826a49SYabin Cui          | ((xxh_u64)bytePtr[2] << 40)
3276*01826a49SYabin Cui          | ((xxh_u64)bytePtr[1] << 48)
3277*01826a49SYabin Cui          | ((xxh_u64)bytePtr[0] << 56);
3278*01826a49SYabin Cui }
3279*01826a49SYabin Cui 
3280*01826a49SYabin Cui #else
XXH_readLE64(const void * ptr)3281*01826a49SYabin Cui XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr)
3282*01826a49SYabin Cui {
3283*01826a49SYabin Cui     return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
3284*01826a49SYabin Cui }
3285*01826a49SYabin Cui 
XXH_readBE64(const void * ptr)3286*01826a49SYabin Cui static xxh_u64 XXH_readBE64(const void* ptr)
3287*01826a49SYabin Cui {
3288*01826a49SYabin Cui     return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
3289*01826a49SYabin Cui }
3290*01826a49SYabin Cui #endif
3291*01826a49SYabin Cui 
3292*01826a49SYabin Cui XXH_FORCE_INLINE xxh_u64
XXH_readLE64_align(const void * ptr,XXH_alignment align)3293*01826a49SYabin Cui XXH_readLE64_align(const void* ptr, XXH_alignment align)
3294*01826a49SYabin Cui {
3295*01826a49SYabin Cui     if (align==XXH_unaligned)
3296*01826a49SYabin Cui         return XXH_readLE64(ptr);
3297*01826a49SYabin Cui     else
3298*01826a49SYabin Cui         return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr);
3299*01826a49SYabin Cui }
3300*01826a49SYabin Cui 
3301*01826a49SYabin Cui 
3302*01826a49SYabin Cui /*******   xxh64   *******/
3303*01826a49SYabin Cui /*!
3304*01826a49SYabin Cui  * @}
3305*01826a49SYabin Cui  * @defgroup XXH64_impl XXH64 implementation
3306*01826a49SYabin Cui  * @ingroup impl
3307*01826a49SYabin Cui  *
3308*01826a49SYabin Cui  * Details on the XXH64 implementation.
3309*01826a49SYabin Cui  * @{
3310*01826a49SYabin Cui  */
3311*01826a49SYabin Cui /* #define rather that static const, to be used as initializers */
3312*01826a49SYabin Cui #define XXH_PRIME64_1  0x9E3779B185EBCA87ULL  /*!< 0b1001111000110111011110011011000110000101111010111100101010000111 */
3313*01826a49SYabin Cui #define XXH_PRIME64_2  0xC2B2AE3D27D4EB4FULL  /*!< 0b1100001010110010101011100011110100100111110101001110101101001111 */
3314*01826a49SYabin Cui #define XXH_PRIME64_3  0x165667B19E3779F9ULL  /*!< 0b0001011001010110011001111011000110011110001101110111100111111001 */
3315*01826a49SYabin Cui #define XXH_PRIME64_4  0x85EBCA77C2B2AE63ULL  /*!< 0b1000010111101011110010100111011111000010101100101010111001100011 */
3316*01826a49SYabin Cui #define XXH_PRIME64_5  0x27D4EB2F165667C5ULL  /*!< 0b0010011111010100111010110010111100010110010101100110011111000101 */
3317*01826a49SYabin Cui 
3318*01826a49SYabin Cui #ifdef XXH_OLD_NAMES
3319*01826a49SYabin Cui #  define PRIME64_1 XXH_PRIME64_1
3320*01826a49SYabin Cui #  define PRIME64_2 XXH_PRIME64_2
3321*01826a49SYabin Cui #  define PRIME64_3 XXH_PRIME64_3
3322*01826a49SYabin Cui #  define PRIME64_4 XXH_PRIME64_4
3323*01826a49SYabin Cui #  define PRIME64_5 XXH_PRIME64_5
3324*01826a49SYabin Cui #endif
3325*01826a49SYabin Cui 
3326*01826a49SYabin Cui /*! @copydoc XXH32_round */
XXH64_round(xxh_u64 acc,xxh_u64 input)3327*01826a49SYabin Cui static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input)
3328*01826a49SYabin Cui {
3329*01826a49SYabin Cui     acc += input * XXH_PRIME64_2;
3330*01826a49SYabin Cui     acc  = XXH_rotl64(acc, 31);
3331*01826a49SYabin Cui     acc *= XXH_PRIME64_1;
3332*01826a49SYabin Cui #if (defined(__AVX512F__)) && !defined(XXH_ENABLE_AUTOVECTORIZE)
3333*01826a49SYabin Cui     /*
3334*01826a49SYabin Cui      * DISABLE AUTOVECTORIZATION:
3335*01826a49SYabin Cui      * A compiler fence is used to prevent GCC and Clang from
3336*01826a49SYabin Cui      * autovectorizing the XXH64 loop (pragmas and attributes don't work for some
3337*01826a49SYabin Cui      * reason) without globally disabling AVX512.
3338*01826a49SYabin Cui      *
3339*01826a49SYabin Cui      * Autovectorization of XXH64 tends to be detrimental,
3340*01826a49SYabin Cui      * though the exact outcome may change depending on exact cpu and compiler version.
3341*01826a49SYabin Cui      * For information, it has been reported as detrimental for Skylake-X,
3342*01826a49SYabin Cui      * but possibly beneficial for Zen4.
3343*01826a49SYabin Cui      *
3344*01826a49SYabin Cui      * The default is to disable auto-vectorization,
3345*01826a49SYabin Cui      * but you can select to enable it instead using `XXH_ENABLE_AUTOVECTORIZE` build variable.
3346*01826a49SYabin Cui      */
3347*01826a49SYabin Cui     XXH_COMPILER_GUARD(acc);
3348*01826a49SYabin Cui #endif
3349*01826a49SYabin Cui     return acc;
3350*01826a49SYabin Cui }
3351*01826a49SYabin Cui 
XXH64_mergeRound(xxh_u64 acc,xxh_u64 val)3352*01826a49SYabin Cui static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val)
3353*01826a49SYabin Cui {
3354*01826a49SYabin Cui     val  = XXH64_round(0, val);
3355*01826a49SYabin Cui     acc ^= val;
3356*01826a49SYabin Cui     acc  = acc * XXH_PRIME64_1 + XXH_PRIME64_4;
3357*01826a49SYabin Cui     return acc;
3358*01826a49SYabin Cui }
3359*01826a49SYabin Cui 
3360*01826a49SYabin Cui /*! @copydoc XXH32_avalanche */
XXH64_avalanche(xxh_u64 hash)3361*01826a49SYabin Cui static xxh_u64 XXH64_avalanche(xxh_u64 hash)
3362*01826a49SYabin Cui {
3363*01826a49SYabin Cui     hash ^= hash >> 33;
3364*01826a49SYabin Cui     hash *= XXH_PRIME64_2;
3365*01826a49SYabin Cui     hash ^= hash >> 29;
3366*01826a49SYabin Cui     hash *= XXH_PRIME64_3;
3367*01826a49SYabin Cui     hash ^= hash >> 32;
3368*01826a49SYabin Cui     return hash;
3369*01826a49SYabin Cui }
3370*01826a49SYabin Cui 
3371*01826a49SYabin Cui 
3372*01826a49SYabin Cui #define XXH_get64bits(p) XXH_readLE64_align(p, align)
3373*01826a49SYabin Cui 
3374*01826a49SYabin Cui /*!
3375*01826a49SYabin Cui  * @internal
3376*01826a49SYabin Cui  * @brief Processes the last 0-31 bytes of @p ptr.
3377*01826a49SYabin Cui  *
3378*01826a49SYabin Cui  * There may be up to 31 bytes remaining to consume from the input.
3379*01826a49SYabin Cui  * This final stage will digest them to ensure that all input bytes are present
3380*01826a49SYabin Cui  * in the final mix.
3381*01826a49SYabin Cui  *
3382*01826a49SYabin Cui  * @param hash The hash to finalize.
3383*01826a49SYabin Cui  * @param ptr The pointer to the remaining input.
3384*01826a49SYabin Cui  * @param len The remaining length, modulo 32.
3385*01826a49SYabin Cui  * @param align Whether @p ptr is aligned.
3386*01826a49SYabin Cui  * @return The finalized hash
3387*01826a49SYabin Cui  * @see XXH32_finalize().
3388*01826a49SYabin Cui  */
3389*01826a49SYabin Cui static XXH_PUREF xxh_u64
XXH64_finalize(xxh_u64 hash,const xxh_u8 * ptr,size_t len,XXH_alignment align)3390*01826a49SYabin Cui XXH64_finalize(xxh_u64 hash, const xxh_u8* ptr, size_t len, XXH_alignment align)
3391*01826a49SYabin Cui {
3392*01826a49SYabin Cui     if (ptr==NULL) XXH_ASSERT(len == 0);
3393*01826a49SYabin Cui     len &= 31;
3394*01826a49SYabin Cui     while (len >= 8) {
3395*01826a49SYabin Cui         xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr));
3396*01826a49SYabin Cui         ptr += 8;
3397*01826a49SYabin Cui         hash ^= k1;
3398*01826a49SYabin Cui         hash  = XXH_rotl64(hash,27) * XXH_PRIME64_1 + XXH_PRIME64_4;
3399*01826a49SYabin Cui         len -= 8;
3400*01826a49SYabin Cui     }
3401*01826a49SYabin Cui     if (len >= 4) {
3402*01826a49SYabin Cui         hash ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1;
3403*01826a49SYabin Cui         ptr += 4;
3404*01826a49SYabin Cui         hash = XXH_rotl64(hash, 23) * XXH_PRIME64_2 + XXH_PRIME64_3;
3405*01826a49SYabin Cui         len -= 4;
3406*01826a49SYabin Cui     }
3407*01826a49SYabin Cui     while (len > 0) {
3408*01826a49SYabin Cui         hash ^= (*ptr++) * XXH_PRIME64_5;
3409*01826a49SYabin Cui         hash = XXH_rotl64(hash, 11) * XXH_PRIME64_1;
3410*01826a49SYabin Cui         --len;
3411*01826a49SYabin Cui     }
3412*01826a49SYabin Cui     return  XXH64_avalanche(hash);
3413*01826a49SYabin Cui }
3414*01826a49SYabin Cui 
3415*01826a49SYabin Cui #ifdef XXH_OLD_NAMES
3416*01826a49SYabin Cui #  define PROCESS1_64 XXH_PROCESS1_64
3417*01826a49SYabin Cui #  define PROCESS4_64 XXH_PROCESS4_64
3418*01826a49SYabin Cui #  define PROCESS8_64 XXH_PROCESS8_64
3419*01826a49SYabin Cui #else
3420*01826a49SYabin Cui #  undef XXH_PROCESS1_64
3421*01826a49SYabin Cui #  undef XXH_PROCESS4_64
3422*01826a49SYabin Cui #  undef XXH_PROCESS8_64
3423*01826a49SYabin Cui #endif
3424*01826a49SYabin Cui 
3425*01826a49SYabin Cui /*!
3426*01826a49SYabin Cui  * @internal
3427*01826a49SYabin Cui  * @brief The implementation for @ref XXH64().
3428*01826a49SYabin Cui  *
3429*01826a49SYabin Cui  * @param input , len , seed Directly passed from @ref XXH64().
3430*01826a49SYabin Cui  * @param align Whether @p input is aligned.
3431*01826a49SYabin Cui  * @return The calculated hash.
3432*01826a49SYabin Cui  */
3433*01826a49SYabin Cui XXH_FORCE_INLINE XXH_PUREF xxh_u64
XXH64_endian_align(const xxh_u8 * input,size_t len,xxh_u64 seed,XXH_alignment align)3434*01826a49SYabin Cui XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align)
3435*01826a49SYabin Cui {
3436*01826a49SYabin Cui     xxh_u64 h64;
3437*01826a49SYabin Cui     if (input==NULL) XXH_ASSERT(len == 0);
3438*01826a49SYabin Cui 
3439*01826a49SYabin Cui     if (len>=32) {
3440*01826a49SYabin Cui         const xxh_u8* const bEnd = input + len;
3441*01826a49SYabin Cui         const xxh_u8* const limit = bEnd - 31;
3442*01826a49SYabin Cui         xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
3443*01826a49SYabin Cui         xxh_u64 v2 = seed + XXH_PRIME64_2;
3444*01826a49SYabin Cui         xxh_u64 v3 = seed + 0;
3445*01826a49SYabin Cui         xxh_u64 v4 = seed - XXH_PRIME64_1;
3446*01826a49SYabin Cui 
3447*01826a49SYabin Cui         do {
3448*01826a49SYabin Cui             v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8;
3449*01826a49SYabin Cui             v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8;
3450*01826a49SYabin Cui             v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8;
3451*01826a49SYabin Cui             v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8;
3452*01826a49SYabin Cui         } while (input<limit);
3453*01826a49SYabin Cui 
3454*01826a49SYabin Cui         h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
3455*01826a49SYabin Cui         h64 = XXH64_mergeRound(h64, v1);
3456*01826a49SYabin Cui         h64 = XXH64_mergeRound(h64, v2);
3457*01826a49SYabin Cui         h64 = XXH64_mergeRound(h64, v3);
3458*01826a49SYabin Cui         h64 = XXH64_mergeRound(h64, v4);
3459*01826a49SYabin Cui 
3460*01826a49SYabin Cui     } else {
3461*01826a49SYabin Cui         h64  = seed + XXH_PRIME64_5;
3462*01826a49SYabin Cui     }
3463*01826a49SYabin Cui 
3464*01826a49SYabin Cui     h64 += (xxh_u64) len;
3465*01826a49SYabin Cui 
3466*01826a49SYabin Cui     return XXH64_finalize(h64, input, len, align);
3467*01826a49SYabin Cui }
3468*01826a49SYabin Cui 
3469*01826a49SYabin Cui 
3470*01826a49SYabin Cui /*! @ingroup XXH64_family */
XXH64(XXH_NOESCAPE const void * input,size_t len,XXH64_hash_t seed)3471*01826a49SYabin Cui XXH_PUBLIC_API XXH64_hash_t XXH64 (XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed)
3472*01826a49SYabin Cui {
3473*01826a49SYabin Cui #if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2
3474*01826a49SYabin Cui     /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
3475*01826a49SYabin Cui     XXH64_state_t state;
3476*01826a49SYabin Cui     XXH64_reset(&state, seed);
3477*01826a49SYabin Cui     XXH64_update(&state, (const xxh_u8*)input, len);
3478*01826a49SYabin Cui     return XXH64_digest(&state);
3479*01826a49SYabin Cui #else
3480*01826a49SYabin Cui     if (XXH_FORCE_ALIGN_CHECK) {
3481*01826a49SYabin Cui         if ((((size_t)input) & 7)==0) {  /* Input is aligned, let's leverage the speed advantage */
3482*01826a49SYabin Cui             return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
3483*01826a49SYabin Cui     }   }
3484*01826a49SYabin Cui 
3485*01826a49SYabin Cui     return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
3486*01826a49SYabin Cui 
3487*01826a49SYabin Cui #endif
3488*01826a49SYabin Cui }
3489*01826a49SYabin Cui 
3490*01826a49SYabin Cui /*******   Hash Streaming   *******/
3491*01826a49SYabin Cui #ifndef XXH_NO_STREAM
3492*01826a49SYabin Cui /*! @ingroup XXH64_family*/
XXH64_createState(void)3493*01826a49SYabin Cui XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
3494*01826a49SYabin Cui {
3495*01826a49SYabin Cui     return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
3496*01826a49SYabin Cui }
3497*01826a49SYabin Cui /*! @ingroup XXH64_family */
XXH64_freeState(XXH64_state_t * statePtr)3498*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
3499*01826a49SYabin Cui {
3500*01826a49SYabin Cui     XXH_free(statePtr);
3501*01826a49SYabin Cui     return XXH_OK;
3502*01826a49SYabin Cui }
3503*01826a49SYabin Cui 
3504*01826a49SYabin Cui /*! @ingroup XXH64_family */
XXH64_copyState(XXH_NOESCAPE XXH64_state_t * dstState,const XXH64_state_t * srcState)3505*01826a49SYabin Cui XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dstState, const XXH64_state_t* srcState)
3506*01826a49SYabin Cui {
3507*01826a49SYabin Cui     XXH_memcpy(dstState, srcState, sizeof(*dstState));
3508*01826a49SYabin Cui }
3509*01826a49SYabin Cui 
3510*01826a49SYabin Cui /*! @ingroup XXH64_family */
XXH64_reset(XXH_NOESCAPE XXH64_state_t * statePtr,XXH64_hash_t seed)3511*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH_NOESCAPE XXH64_state_t* statePtr, XXH64_hash_t seed)
3512*01826a49SYabin Cui {
3513*01826a49SYabin Cui     XXH_ASSERT(statePtr != NULL);
3514*01826a49SYabin Cui     memset(statePtr, 0, sizeof(*statePtr));
3515*01826a49SYabin Cui     statePtr->v[0] = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
3516*01826a49SYabin Cui     statePtr->v[1] = seed + XXH_PRIME64_2;
3517*01826a49SYabin Cui     statePtr->v[2] = seed + 0;
3518*01826a49SYabin Cui     statePtr->v[3] = seed - XXH_PRIME64_1;
3519*01826a49SYabin Cui     return XXH_OK;
3520*01826a49SYabin Cui }
3521*01826a49SYabin Cui 
3522*01826a49SYabin Cui /*! @ingroup XXH64_family */
3523*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode
XXH64_update(XXH_NOESCAPE XXH64_state_t * state,XXH_NOESCAPE const void * input,size_t len)3524*01826a49SYabin Cui XXH64_update (XXH_NOESCAPE XXH64_state_t* state, XXH_NOESCAPE const void* input, size_t len)
3525*01826a49SYabin Cui {
3526*01826a49SYabin Cui     if (input==NULL) {
3527*01826a49SYabin Cui         XXH_ASSERT(len == 0);
3528*01826a49SYabin Cui         return XXH_OK;
3529*01826a49SYabin Cui     }
3530*01826a49SYabin Cui 
3531*01826a49SYabin Cui     {   const xxh_u8* p = (const xxh_u8*)input;
3532*01826a49SYabin Cui         const xxh_u8* const bEnd = p + len;
3533*01826a49SYabin Cui 
3534*01826a49SYabin Cui         state->total_len += len;
3535*01826a49SYabin Cui 
3536*01826a49SYabin Cui         if (state->memsize + len < 32) {  /* fill in tmp buffer */
3537*01826a49SYabin Cui             XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len);
3538*01826a49SYabin Cui             state->memsize += (xxh_u32)len;
3539*01826a49SYabin Cui             return XXH_OK;
3540*01826a49SYabin Cui         }
3541*01826a49SYabin Cui 
3542*01826a49SYabin Cui         if (state->memsize) {   /* tmp buffer is full */
3543*01826a49SYabin Cui             XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize);
3544*01826a49SYabin Cui             state->v[0] = XXH64_round(state->v[0], XXH_readLE64(state->mem64+0));
3545*01826a49SYabin Cui             state->v[1] = XXH64_round(state->v[1], XXH_readLE64(state->mem64+1));
3546*01826a49SYabin Cui             state->v[2] = XXH64_round(state->v[2], XXH_readLE64(state->mem64+2));
3547*01826a49SYabin Cui             state->v[3] = XXH64_round(state->v[3], XXH_readLE64(state->mem64+3));
3548*01826a49SYabin Cui             p += 32 - state->memsize;
3549*01826a49SYabin Cui             state->memsize = 0;
3550*01826a49SYabin Cui         }
3551*01826a49SYabin Cui 
3552*01826a49SYabin Cui         if (p+32 <= bEnd) {
3553*01826a49SYabin Cui             const xxh_u8* const limit = bEnd - 32;
3554*01826a49SYabin Cui 
3555*01826a49SYabin Cui             do {
3556*01826a49SYabin Cui                 state->v[0] = XXH64_round(state->v[0], XXH_readLE64(p)); p+=8;
3557*01826a49SYabin Cui                 state->v[1] = XXH64_round(state->v[1], XXH_readLE64(p)); p+=8;
3558*01826a49SYabin Cui                 state->v[2] = XXH64_round(state->v[2], XXH_readLE64(p)); p+=8;
3559*01826a49SYabin Cui                 state->v[3] = XXH64_round(state->v[3], XXH_readLE64(p)); p+=8;
3560*01826a49SYabin Cui             } while (p<=limit);
3561*01826a49SYabin Cui 
3562*01826a49SYabin Cui         }
3563*01826a49SYabin Cui 
3564*01826a49SYabin Cui         if (p < bEnd) {
3565*01826a49SYabin Cui             XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
3566*01826a49SYabin Cui             state->memsize = (unsigned)(bEnd-p);
3567*01826a49SYabin Cui         }
3568*01826a49SYabin Cui     }
3569*01826a49SYabin Cui 
3570*01826a49SYabin Cui     return XXH_OK;
3571*01826a49SYabin Cui }
3572*01826a49SYabin Cui 
3573*01826a49SYabin Cui 
3574*01826a49SYabin Cui /*! @ingroup XXH64_family */
XXH64_digest(XXH_NOESCAPE const XXH64_state_t * state)3575*01826a49SYabin Cui XXH_PUBLIC_API XXH64_hash_t XXH64_digest(XXH_NOESCAPE const XXH64_state_t* state)
3576*01826a49SYabin Cui {
3577*01826a49SYabin Cui     xxh_u64 h64;
3578*01826a49SYabin Cui 
3579*01826a49SYabin Cui     if (state->total_len >= 32) {
3580*01826a49SYabin Cui         h64 = XXH_rotl64(state->v[0], 1) + XXH_rotl64(state->v[1], 7) + XXH_rotl64(state->v[2], 12) + XXH_rotl64(state->v[3], 18);
3581*01826a49SYabin Cui         h64 = XXH64_mergeRound(h64, state->v[0]);
3582*01826a49SYabin Cui         h64 = XXH64_mergeRound(h64, state->v[1]);
3583*01826a49SYabin Cui         h64 = XXH64_mergeRound(h64, state->v[2]);
3584*01826a49SYabin Cui         h64 = XXH64_mergeRound(h64, state->v[3]);
3585*01826a49SYabin Cui     } else {
3586*01826a49SYabin Cui         h64  = state->v[2] /*seed*/ + XXH_PRIME64_5;
3587*01826a49SYabin Cui     }
3588*01826a49SYabin Cui 
3589*01826a49SYabin Cui     h64 += (xxh_u64) state->total_len;
3590*01826a49SYabin Cui 
3591*01826a49SYabin Cui     return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned);
3592*01826a49SYabin Cui }
3593*01826a49SYabin Cui #endif /* !XXH_NO_STREAM */
3594*01826a49SYabin Cui 
3595*01826a49SYabin Cui /******* Canonical representation   *******/
3596*01826a49SYabin Cui 
3597*01826a49SYabin Cui /*! @ingroup XXH64_family */
XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t * dst,XXH64_hash_t hash)3598*01826a49SYabin Cui XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash)
3599*01826a49SYabin Cui {
3600*01826a49SYabin Cui     XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
3601*01826a49SYabin Cui     if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
3602*01826a49SYabin Cui     XXH_memcpy(dst, &hash, sizeof(*dst));
3603*01826a49SYabin Cui }
3604*01826a49SYabin Cui 
3605*01826a49SYabin Cui /*! @ingroup XXH64_family */
XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t * src)3606*01826a49SYabin Cui XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src)
3607*01826a49SYabin Cui {
3608*01826a49SYabin Cui     return XXH_readBE64(src);
3609*01826a49SYabin Cui }
3610*01826a49SYabin Cui 
3611*01826a49SYabin Cui #ifndef XXH_NO_XXH3
3612*01826a49SYabin Cui 
3613*01826a49SYabin Cui /* *********************************************************************
3614*01826a49SYabin Cui *  XXH3
3615*01826a49SYabin Cui *  New generation hash designed for speed on small keys and vectorization
3616*01826a49SYabin Cui ************************************************************************ */
3617*01826a49SYabin Cui /*!
3618*01826a49SYabin Cui  * @}
3619*01826a49SYabin Cui  * @defgroup XXH3_impl XXH3 implementation
3620*01826a49SYabin Cui  * @ingroup impl
3621*01826a49SYabin Cui  * @{
3622*01826a49SYabin Cui  */
3623*01826a49SYabin Cui 
3624*01826a49SYabin Cui /* ===   Compiler specifics   === */
3625*01826a49SYabin Cui 
3626*01826a49SYabin Cui #if ((defined(sun) || defined(__sun)) && __cplusplus) /* Solaris includes __STDC_VERSION__ with C++. Tested with GCC 5.5 */
3627*01826a49SYabin Cui #  define XXH_RESTRICT   /* disable */
3628*01826a49SYabin Cui #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* >= C99 */
3629*01826a49SYabin Cui #  define XXH_RESTRICT   restrict
3630*01826a49SYabin Cui #elif (defined (__GNUC__) && ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))) \
3631*01826a49SYabin Cui    || (defined (__clang__)) \
3632*01826a49SYabin Cui    || (defined (_MSC_VER) && (_MSC_VER >= 1400)) \
3633*01826a49SYabin Cui    || (defined (__INTEL_COMPILER) && (__INTEL_COMPILER >= 1300))
3634*01826a49SYabin Cui /*
3635*01826a49SYabin Cui  * There are a LOT more compilers that recognize __restrict but this
3636*01826a49SYabin Cui  * covers the major ones.
3637*01826a49SYabin Cui  */
3638*01826a49SYabin Cui #  define XXH_RESTRICT   __restrict
3639*01826a49SYabin Cui #else
3640*01826a49SYabin Cui #  define XXH_RESTRICT   /* disable */
3641*01826a49SYabin Cui #endif
3642*01826a49SYabin Cui 
3643*01826a49SYabin Cui #if (defined(__GNUC__) && (__GNUC__ >= 3))  \
3644*01826a49SYabin Cui   || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) \
3645*01826a49SYabin Cui   || defined(__clang__)
3646*01826a49SYabin Cui #    define XXH_likely(x) __builtin_expect(x, 1)
3647*01826a49SYabin Cui #    define XXH_unlikely(x) __builtin_expect(x, 0)
3648*01826a49SYabin Cui #else
3649*01826a49SYabin Cui #    define XXH_likely(x) (x)
3650*01826a49SYabin Cui #    define XXH_unlikely(x) (x)
3651*01826a49SYabin Cui #endif
3652*01826a49SYabin Cui 
3653*01826a49SYabin Cui #ifndef XXH_HAS_INCLUDE
3654*01826a49SYabin Cui #  ifdef __has_include
3655*01826a49SYabin Cui /*
3656*01826a49SYabin Cui  * Not defined as XXH_HAS_INCLUDE(x) (function-like) because
3657*01826a49SYabin Cui  * this causes segfaults in Apple Clang 4.2 (on Mac OS X 10.7 Lion)
3658*01826a49SYabin Cui  */
3659*01826a49SYabin Cui #    define XXH_HAS_INCLUDE __has_include
3660*01826a49SYabin Cui #  else
3661*01826a49SYabin Cui #    define XXH_HAS_INCLUDE(x) 0
3662*01826a49SYabin Cui #  endif
3663*01826a49SYabin Cui #endif
3664*01826a49SYabin Cui 
3665*01826a49SYabin Cui #if defined(__GNUC__) || defined(__clang__)
3666*01826a49SYabin Cui #  if defined(__ARM_FEATURE_SVE)
3667*01826a49SYabin Cui #    include <arm_sve.h>
3668*01826a49SYabin Cui #  endif
3669*01826a49SYabin Cui #  if defined(__ARM_NEON__) || defined(__ARM_NEON) \
3670*01826a49SYabin Cui    || (defined(_M_ARM) && _M_ARM >= 7) \
3671*01826a49SYabin Cui    || defined(_M_ARM64) || defined(_M_ARM64EC) \
3672*01826a49SYabin Cui    || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE(<arm_neon.h>)) /* WASM SIMD128 via SIMDe */
3673*01826a49SYabin Cui #    define inline __inline__  /* circumvent a clang bug */
3674*01826a49SYabin Cui #    include <arm_neon.h>
3675*01826a49SYabin Cui #    undef inline
3676*01826a49SYabin Cui #  elif defined(__AVX2__)
3677*01826a49SYabin Cui #    include <immintrin.h>
3678*01826a49SYabin Cui #  elif defined(__SSE2__)
3679*01826a49SYabin Cui #    include <emmintrin.h>
3680*01826a49SYabin Cui #  endif
3681*01826a49SYabin Cui #endif
3682*01826a49SYabin Cui 
3683*01826a49SYabin Cui #if defined(_MSC_VER)
3684*01826a49SYabin Cui #  include <intrin.h>
3685*01826a49SYabin Cui #endif
3686*01826a49SYabin Cui 
3687*01826a49SYabin Cui /*
3688*01826a49SYabin Cui  * One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while
3689*01826a49SYabin Cui  * remaining a true 64-bit/128-bit hash function.
3690*01826a49SYabin Cui  *
3691*01826a49SYabin Cui  * This is done by prioritizing a subset of 64-bit operations that can be
3692*01826a49SYabin Cui  * emulated without too many steps on the average 32-bit machine.
3693*01826a49SYabin Cui  *
3694*01826a49SYabin Cui  * For example, these two lines seem similar, and run equally fast on 64-bit:
3695*01826a49SYabin Cui  *
3696*01826a49SYabin Cui  *   xxh_u64 x;
3697*01826a49SYabin Cui  *   x ^= (x >> 47); // good
3698*01826a49SYabin Cui  *   x ^= (x >> 13); // bad
3699*01826a49SYabin Cui  *
3700*01826a49SYabin Cui  * However, to a 32-bit machine, there is a major difference.
3701*01826a49SYabin Cui  *
3702*01826a49SYabin Cui  * x ^= (x >> 47) looks like this:
3703*01826a49SYabin Cui  *
3704*01826a49SYabin Cui  *   x.lo ^= (x.hi >> (47 - 32));
3705*01826a49SYabin Cui  *
3706*01826a49SYabin Cui  * while x ^= (x >> 13) looks like this:
3707*01826a49SYabin Cui  *
3708*01826a49SYabin Cui  *   // note: funnel shifts are not usually cheap.
3709*01826a49SYabin Cui  *   x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13));
3710*01826a49SYabin Cui  *   x.hi ^= (x.hi >> 13);
3711*01826a49SYabin Cui  *
3712*01826a49SYabin Cui  * The first one is significantly faster than the second, simply because the
3713*01826a49SYabin Cui  * shift is larger than 32. This means:
3714*01826a49SYabin Cui  *  - All the bits we need are in the upper 32 bits, so we can ignore the lower
3715*01826a49SYabin Cui  *    32 bits in the shift.
3716*01826a49SYabin Cui  *  - The shift result will always fit in the lower 32 bits, and therefore,
3717*01826a49SYabin Cui  *    we can ignore the upper 32 bits in the xor.
3718*01826a49SYabin Cui  *
3719*01826a49SYabin Cui  * Thanks to this optimization, XXH3 only requires these features to be efficient:
3720*01826a49SYabin Cui  *
3721*01826a49SYabin Cui  *  - Usable unaligned access
3722*01826a49SYabin Cui  *  - A 32-bit or 64-bit ALU
3723*01826a49SYabin Cui  *      - If 32-bit, a decent ADC instruction
3724*01826a49SYabin Cui  *  - A 32 or 64-bit multiply with a 64-bit result
3725*01826a49SYabin Cui  *  - For the 128-bit variant, a decent byteswap helps short inputs.
3726*01826a49SYabin Cui  *
3727*01826a49SYabin Cui  * The first two are already required by XXH32, and almost all 32-bit and 64-bit
3728*01826a49SYabin Cui  * platforms which can run XXH32 can run XXH3 efficiently.
3729*01826a49SYabin Cui  *
3730*01826a49SYabin Cui  * Thumb-1, the classic 16-bit only subset of ARM's instruction set, is one
3731*01826a49SYabin Cui  * notable exception.
3732*01826a49SYabin Cui  *
3733*01826a49SYabin Cui  * First of all, Thumb-1 lacks support for the UMULL instruction which
3734*01826a49SYabin Cui  * performs the important long multiply. This means numerous __aeabi_lmul
3735*01826a49SYabin Cui  * calls.
3736*01826a49SYabin Cui  *
3737*01826a49SYabin Cui  * Second of all, the 8 functional registers are just not enough.
3738*01826a49SYabin Cui  * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic need
3739*01826a49SYabin Cui  * Lo registers, and this shuffling results in thousands more MOVs than A32.
3740*01826a49SYabin Cui  *
3741*01826a49SYabin Cui  * A32 and T32 don't have this limitation. They can access all 14 registers,
3742*01826a49SYabin Cui  * do a 32->64 multiply with UMULL, and the flexible operand allowing free
3743*01826a49SYabin Cui  * shifts is helpful, too.
3744*01826a49SYabin Cui  *
3745*01826a49SYabin Cui  * Therefore, we do a quick sanity check.
3746*01826a49SYabin Cui  *
3747*01826a49SYabin Cui  * If compiling Thumb-1 for a target which supports ARM instructions, we will
3748*01826a49SYabin Cui  * emit a warning, as it is not a "sane" platform to compile for.
3749*01826a49SYabin Cui  *
3750*01826a49SYabin Cui  * Usually, if this happens, it is because of an accident and you probably need
3751*01826a49SYabin Cui  * to specify -march, as you likely meant to compile for a newer architecture.
3752*01826a49SYabin Cui  *
3753*01826a49SYabin Cui  * Credit: large sections of the vectorial and asm source code paths
3754*01826a49SYabin Cui  *         have been contributed by @easyaspi314
3755*01826a49SYabin Cui  */
3756*01826a49SYabin Cui #if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM)
3757*01826a49SYabin Cui #   warning "XXH3 is highly inefficient without ARM or Thumb-2."
3758*01826a49SYabin Cui #endif
3759*01826a49SYabin Cui 
3760*01826a49SYabin Cui /* ==========================================
3761*01826a49SYabin Cui  * Vectorization detection
3762*01826a49SYabin Cui  * ========================================== */
3763*01826a49SYabin Cui 
3764*01826a49SYabin Cui #ifdef XXH_DOXYGEN
3765*01826a49SYabin Cui /*!
3766*01826a49SYabin Cui  * @ingroup tuning
3767*01826a49SYabin Cui  * @brief Overrides the vectorization implementation chosen for XXH3.
3768*01826a49SYabin Cui  *
3769*01826a49SYabin Cui  * Can be defined to 0 to disable SIMD or any of the values mentioned in
3770*01826a49SYabin Cui  * @ref XXH_VECTOR_TYPE.
3771*01826a49SYabin Cui  *
3772*01826a49SYabin Cui  * If this is not defined, it uses predefined macros to determine the best
3773*01826a49SYabin Cui  * implementation.
3774*01826a49SYabin Cui  */
3775*01826a49SYabin Cui #  define XXH_VECTOR XXH_SCALAR
3776*01826a49SYabin Cui /*!
3777*01826a49SYabin Cui  * @ingroup tuning
3778*01826a49SYabin Cui  * @brief Possible values for @ref XXH_VECTOR.
3779*01826a49SYabin Cui  *
3780*01826a49SYabin Cui  * Note that these are actually implemented as macros.
3781*01826a49SYabin Cui  *
3782*01826a49SYabin Cui  * If this is not defined, it is detected automatically.
3783*01826a49SYabin Cui  * internal macro XXH_X86DISPATCH overrides this.
3784*01826a49SYabin Cui  */
3785*01826a49SYabin Cui enum XXH_VECTOR_TYPE /* fake enum */ {
3786*01826a49SYabin Cui     XXH_SCALAR = 0,  /*!< Portable scalar version */
3787*01826a49SYabin Cui     XXH_SSE2   = 1,  /*!<
3788*01826a49SYabin Cui                       * SSE2 for Pentium 4, Opteron, all x86_64.
3789*01826a49SYabin Cui                       *
3790*01826a49SYabin Cui                       * @note SSE2 is also guaranteed on Windows 10, macOS, and
3791*01826a49SYabin Cui                       * Android x86.
3792*01826a49SYabin Cui                       */
3793*01826a49SYabin Cui     XXH_AVX2   = 2,  /*!< AVX2 for Haswell and Bulldozer */
3794*01826a49SYabin Cui     XXH_AVX512 = 3,  /*!< AVX512 for Skylake and Icelake */
3795*01826a49SYabin Cui     XXH_NEON   = 4,  /*!<
3796*01826a49SYabin Cui                        * NEON for most ARMv7-A, all AArch64, and WASM SIMD128
3797*01826a49SYabin Cui                        * via the SIMDeverywhere polyfill provided with the
3798*01826a49SYabin Cui                        * Emscripten SDK.
3799*01826a49SYabin Cui                        */
3800*01826a49SYabin Cui     XXH_VSX    = 5,  /*!< VSX and ZVector for POWER8/z13 (64-bit) */
3801*01826a49SYabin Cui     XXH_SVE    = 6,  /*!< SVE for some ARMv8-A and ARMv9-A */
3802*01826a49SYabin Cui };
3803*01826a49SYabin Cui /*!
3804*01826a49SYabin Cui  * @ingroup tuning
3805*01826a49SYabin Cui  * @brief Selects the minimum alignment for XXH3's accumulators.
3806*01826a49SYabin Cui  *
3807*01826a49SYabin Cui  * When using SIMD, this should match the alignment required for said vector
3808*01826a49SYabin Cui  * type, so, for example, 32 for AVX2.
3809*01826a49SYabin Cui  *
3810*01826a49SYabin Cui  * Default: Auto detected.
3811*01826a49SYabin Cui  */
3812*01826a49SYabin Cui #  define XXH_ACC_ALIGN 8
3813*01826a49SYabin Cui #endif
3814*01826a49SYabin Cui 
3815*01826a49SYabin Cui /* Actual definition */
3816*01826a49SYabin Cui #ifndef XXH_DOXYGEN
3817*01826a49SYabin Cui #  define XXH_SCALAR 0
3818*01826a49SYabin Cui #  define XXH_SSE2   1
3819*01826a49SYabin Cui #  define XXH_AVX2   2
3820*01826a49SYabin Cui #  define XXH_AVX512 3
3821*01826a49SYabin Cui #  define XXH_NEON   4
3822*01826a49SYabin Cui #  define XXH_VSX    5
3823*01826a49SYabin Cui #  define XXH_SVE    6
3824*01826a49SYabin Cui #endif
3825*01826a49SYabin Cui 
3826*01826a49SYabin Cui #ifndef XXH_VECTOR    /* can be defined on command line */
3827*01826a49SYabin Cui #  if defined(__ARM_FEATURE_SVE)
3828*01826a49SYabin Cui #    define XXH_VECTOR XXH_SVE
3829*01826a49SYabin Cui #  elif ( \
3830*01826a49SYabin Cui         defined(__ARM_NEON__) || defined(__ARM_NEON) /* gcc */ \
3831*01826a49SYabin Cui      || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) /* msvc */ \
3832*01826a49SYabin Cui      || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE(<arm_neon.h>)) /* wasm simd128 via SIMDe */ \
3833*01826a49SYabin Cui    ) && ( \
3834*01826a49SYabin Cui         defined(_WIN32) || defined(__LITTLE_ENDIAN__) /* little endian only */ \
3835*01826a49SYabin Cui     || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \
3836*01826a49SYabin Cui    )
3837*01826a49SYabin Cui #    define XXH_VECTOR XXH_NEON
3838*01826a49SYabin Cui #  elif defined(__AVX512F__)
3839*01826a49SYabin Cui #    define XXH_VECTOR XXH_AVX512
3840*01826a49SYabin Cui #  elif defined(__AVX2__)
3841*01826a49SYabin Cui #    define XXH_VECTOR XXH_AVX2
3842*01826a49SYabin Cui #  elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2))
3843*01826a49SYabin Cui #    define XXH_VECTOR XXH_SSE2
3844*01826a49SYabin Cui #  elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \
3845*01826a49SYabin Cui      || (defined(__s390x__) && defined(__VEC__)) \
3846*01826a49SYabin Cui      && defined(__GNUC__) /* TODO: IBM XL */
3847*01826a49SYabin Cui #    define XXH_VECTOR XXH_VSX
3848*01826a49SYabin Cui #  else
3849*01826a49SYabin Cui #    define XXH_VECTOR XXH_SCALAR
3850*01826a49SYabin Cui #  endif
3851*01826a49SYabin Cui #endif
3852*01826a49SYabin Cui 
3853*01826a49SYabin Cui /* __ARM_FEATURE_SVE is only supported by GCC & Clang. */
3854*01826a49SYabin Cui #if (XXH_VECTOR == XXH_SVE) && !defined(__ARM_FEATURE_SVE)
3855*01826a49SYabin Cui #  ifdef _MSC_VER
3856*01826a49SYabin Cui #    pragma warning(once : 4606)
3857*01826a49SYabin Cui #  else
3858*01826a49SYabin Cui #    warning "__ARM_FEATURE_SVE isn't supported. Use SCALAR instead."
3859*01826a49SYabin Cui #  endif
3860*01826a49SYabin Cui #  undef XXH_VECTOR
3861*01826a49SYabin Cui #  define XXH_VECTOR XXH_SCALAR
3862*01826a49SYabin Cui #endif
3863*01826a49SYabin Cui 
3864*01826a49SYabin Cui /*
3865*01826a49SYabin Cui  * Controls the alignment of the accumulator,
3866*01826a49SYabin Cui  * for compatibility with aligned vector loads, which are usually faster.
3867*01826a49SYabin Cui  */
3868*01826a49SYabin Cui #ifndef XXH_ACC_ALIGN
3869*01826a49SYabin Cui #  if defined(XXH_X86DISPATCH)
3870*01826a49SYabin Cui #     define XXH_ACC_ALIGN 64  /* for compatibility with avx512 */
3871*01826a49SYabin Cui #  elif XXH_VECTOR == XXH_SCALAR  /* scalar */
3872*01826a49SYabin Cui #     define XXH_ACC_ALIGN 8
3873*01826a49SYabin Cui #  elif XXH_VECTOR == XXH_SSE2  /* sse2 */
3874*01826a49SYabin Cui #     define XXH_ACC_ALIGN 16
3875*01826a49SYabin Cui #  elif XXH_VECTOR == XXH_AVX2  /* avx2 */
3876*01826a49SYabin Cui #     define XXH_ACC_ALIGN 32
3877*01826a49SYabin Cui #  elif XXH_VECTOR == XXH_NEON  /* neon */
3878*01826a49SYabin Cui #     define XXH_ACC_ALIGN 16
3879*01826a49SYabin Cui #  elif XXH_VECTOR == XXH_VSX   /* vsx */
3880*01826a49SYabin Cui #     define XXH_ACC_ALIGN 16
3881*01826a49SYabin Cui #  elif XXH_VECTOR == XXH_AVX512  /* avx512 */
3882*01826a49SYabin Cui #     define XXH_ACC_ALIGN 64
3883*01826a49SYabin Cui #  elif XXH_VECTOR == XXH_SVE   /* sve */
3884*01826a49SYabin Cui #     define XXH_ACC_ALIGN 64
3885*01826a49SYabin Cui #  endif
3886*01826a49SYabin Cui #endif
3887*01826a49SYabin Cui 
3888*01826a49SYabin Cui #if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \
3889*01826a49SYabin Cui     || XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512
3890*01826a49SYabin Cui #  define XXH_SEC_ALIGN XXH_ACC_ALIGN
3891*01826a49SYabin Cui #elif XXH_VECTOR == XXH_SVE
3892*01826a49SYabin Cui #  define XXH_SEC_ALIGN XXH_ACC_ALIGN
3893*01826a49SYabin Cui #else
3894*01826a49SYabin Cui #  define XXH_SEC_ALIGN 8
3895*01826a49SYabin Cui #endif
3896*01826a49SYabin Cui 
3897*01826a49SYabin Cui #if defined(__GNUC__) || defined(__clang__)
3898*01826a49SYabin Cui #  define XXH_ALIASING __attribute__((may_alias))
3899*01826a49SYabin Cui #else
3900*01826a49SYabin Cui #  define XXH_ALIASING /* nothing */
3901*01826a49SYabin Cui #endif
3902*01826a49SYabin Cui 
3903*01826a49SYabin Cui /*
3904*01826a49SYabin Cui  * UGLY HACK:
3905*01826a49SYabin Cui  * GCC usually generates the best code with -O3 for xxHash.
3906*01826a49SYabin Cui  *
3907*01826a49SYabin Cui  * However, when targeting AVX2, it is overzealous in its unrolling resulting
3908*01826a49SYabin Cui  * in code roughly 3/4 the speed of Clang.
3909*01826a49SYabin Cui  *
3910*01826a49SYabin Cui  * There are other issues, such as GCC splitting _mm256_loadu_si256 into
3911*01826a49SYabin Cui  * _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization which
3912*01826a49SYabin Cui  * only applies to Sandy and Ivy Bridge... which don't even support AVX2.
3913*01826a49SYabin Cui  *
3914*01826a49SYabin Cui  * That is why when compiling the AVX2 version, it is recommended to use either
3915*01826a49SYabin Cui  *   -O2 -mavx2 -march=haswell
3916*01826a49SYabin Cui  * or
3917*01826a49SYabin Cui  *   -O2 -mavx2 -mno-avx256-split-unaligned-load
3918*01826a49SYabin Cui  * for decent performance, or to use Clang instead.
3919*01826a49SYabin Cui  *
3920*01826a49SYabin Cui  * Fortunately, we can control the first one with a pragma that forces GCC into
3921*01826a49SYabin Cui  * -O2, but the other one we can't control without "failed to inline always
3922*01826a49SYabin Cui  * inline function due to target mismatch" warnings.
3923*01826a49SYabin Cui  */
3924*01826a49SYabin Cui #if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
3925*01826a49SYabin Cui   && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
3926*01826a49SYabin Cui   && defined(__OPTIMIZE__) && XXH_SIZE_OPT <= 0 /* respect -O0 and -Os */
3927*01826a49SYabin Cui #  pragma GCC push_options
3928*01826a49SYabin Cui #  pragma GCC optimize("-O2")
3929*01826a49SYabin Cui #endif
3930*01826a49SYabin Cui 
3931*01826a49SYabin Cui #if XXH_VECTOR == XXH_NEON
3932*01826a49SYabin Cui 
3933*01826a49SYabin Cui /*
3934*01826a49SYabin Cui  * UGLY HACK: While AArch64 GCC on Linux does not seem to care, on macOS, GCC -O3
3935*01826a49SYabin Cui  * optimizes out the entire hashLong loop because of the aliasing violation.
3936*01826a49SYabin Cui  *
3937*01826a49SYabin Cui  * However, GCC is also inefficient at load-store optimization with vld1q/vst1q,
3938*01826a49SYabin Cui  * so the only option is to mark it as aliasing.
3939*01826a49SYabin Cui  */
3940*01826a49SYabin Cui typedef uint64x2_t xxh_aliasing_uint64x2_t XXH_ALIASING;
3941*01826a49SYabin Cui 
3942*01826a49SYabin Cui /*!
3943*01826a49SYabin Cui  * @internal
3944*01826a49SYabin Cui  * @brief `vld1q_u64` but faster and alignment-safe.
3945*01826a49SYabin Cui  *
3946*01826a49SYabin Cui  * On AArch64, unaligned access is always safe, but on ARMv7-a, it is only
3947*01826a49SYabin Cui  * *conditionally* safe (`vld1` has an alignment bit like `movdq[ua]` in x86).
3948*01826a49SYabin Cui  *
3949*01826a49SYabin Cui  * GCC for AArch64 sees `vld1q_u8` as an intrinsic instead of a load, so it
3950*01826a49SYabin Cui  * prohibits load-store optimizations. Therefore, a direct dereference is used.
3951*01826a49SYabin Cui  *
3952*01826a49SYabin Cui  * Otherwise, `vld1q_u8` is used with `vreinterpretq_u8_u64` to do a safe
3953*01826a49SYabin Cui  * unaligned load.
3954*01826a49SYabin Cui  */
3955*01826a49SYabin Cui #if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__)
XXH_vld1q_u64(void const * ptr)3956*01826a49SYabin Cui XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr) /* silence -Wcast-align */
3957*01826a49SYabin Cui {
3958*01826a49SYabin Cui     return *(xxh_aliasing_uint64x2_t const *)ptr;
3959*01826a49SYabin Cui }
3960*01826a49SYabin Cui #else
XXH_vld1q_u64(void const * ptr)3961*01826a49SYabin Cui XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr)
3962*01826a49SYabin Cui {
3963*01826a49SYabin Cui     return vreinterpretq_u64_u8(vld1q_u8((uint8_t const*)ptr));
3964*01826a49SYabin Cui }
3965*01826a49SYabin Cui #endif
3966*01826a49SYabin Cui 
3967*01826a49SYabin Cui /*!
3968*01826a49SYabin Cui  * @internal
3969*01826a49SYabin Cui  * @brief `vmlal_u32` on low and high halves of a vector.
3970*01826a49SYabin Cui  *
3971*01826a49SYabin Cui  * This is a workaround for AArch64 GCC < 11 which implemented arm_neon.h with
3972*01826a49SYabin Cui  * inline assembly and were therefore incapable of merging the `vget_{low, high}_u32`
3973*01826a49SYabin Cui  * with `vmlal_u32`.
3974*01826a49SYabin Cui  */
3975*01826a49SYabin Cui #if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__) && __GNUC__ < 11
3976*01826a49SYabin Cui XXH_FORCE_INLINE uint64x2_t
XXH_vmlal_low_u32(uint64x2_t acc,uint32x4_t lhs,uint32x4_t rhs)3977*01826a49SYabin Cui XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
3978*01826a49SYabin Cui {
3979*01826a49SYabin Cui     /* Inline assembly is the only way */
3980*01826a49SYabin Cui     __asm__("umlal   %0.2d, %1.2s, %2.2s" : "+w" (acc) : "w" (lhs), "w" (rhs));
3981*01826a49SYabin Cui     return acc;
3982*01826a49SYabin Cui }
3983*01826a49SYabin Cui XXH_FORCE_INLINE uint64x2_t
XXH_vmlal_high_u32(uint64x2_t acc,uint32x4_t lhs,uint32x4_t rhs)3984*01826a49SYabin Cui XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
3985*01826a49SYabin Cui {
3986*01826a49SYabin Cui     /* This intrinsic works as expected */
3987*01826a49SYabin Cui     return vmlal_high_u32(acc, lhs, rhs);
3988*01826a49SYabin Cui }
3989*01826a49SYabin Cui #else
3990*01826a49SYabin Cui /* Portable intrinsic versions */
3991*01826a49SYabin Cui XXH_FORCE_INLINE uint64x2_t
XXH_vmlal_low_u32(uint64x2_t acc,uint32x4_t lhs,uint32x4_t rhs)3992*01826a49SYabin Cui XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
3993*01826a49SYabin Cui {
3994*01826a49SYabin Cui     return vmlal_u32(acc, vget_low_u32(lhs), vget_low_u32(rhs));
3995*01826a49SYabin Cui }
3996*01826a49SYabin Cui /*! @copydoc XXH_vmlal_low_u32
3997*01826a49SYabin Cui  * Assume the compiler converts this to vmlal_high_u32 on aarch64 */
3998*01826a49SYabin Cui XXH_FORCE_INLINE uint64x2_t
XXH_vmlal_high_u32(uint64x2_t acc,uint32x4_t lhs,uint32x4_t rhs)3999*01826a49SYabin Cui XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
4000*01826a49SYabin Cui {
4001*01826a49SYabin Cui     return vmlal_u32(acc, vget_high_u32(lhs), vget_high_u32(rhs));
4002*01826a49SYabin Cui }
4003*01826a49SYabin Cui #endif
4004*01826a49SYabin Cui 
4005*01826a49SYabin Cui /*!
4006*01826a49SYabin Cui  * @ingroup tuning
4007*01826a49SYabin Cui  * @brief Controls the NEON to scalar ratio for XXH3
4008*01826a49SYabin Cui  *
4009*01826a49SYabin Cui  * This can be set to 2, 4, 6, or 8.
4010*01826a49SYabin Cui  *
4011*01826a49SYabin Cui  * ARM Cortex CPUs are _very_ sensitive to how their pipelines are used.
4012*01826a49SYabin Cui  *
4013*01826a49SYabin Cui  * For example, the Cortex-A73 can dispatch 3 micro-ops per cycle, but only 2 of those
4014*01826a49SYabin Cui  * can be NEON. If you are only using NEON instructions, you are only using 2/3 of the CPU
4015*01826a49SYabin Cui  * bandwidth.
4016*01826a49SYabin Cui  *
4017*01826a49SYabin Cui  * This is even more noticeable on the more advanced cores like the Cortex-A76 which
4018*01826a49SYabin Cui  * can dispatch 8 micro-ops per cycle, but still only 2 NEON micro-ops at once.
4019*01826a49SYabin Cui  *
4020*01826a49SYabin Cui  * Therefore, to make the most out of the pipeline, it is beneficial to run 6 NEON lanes
4021*01826a49SYabin Cui  * and 2 scalar lanes, which is chosen by default.
4022*01826a49SYabin Cui  *
4023*01826a49SYabin Cui  * This does not apply to Apple processors or 32-bit processors, which run better with
4024*01826a49SYabin Cui  * full NEON. These will default to 8. Additionally, size-optimized builds run 8 lanes.
4025*01826a49SYabin Cui  *
4026*01826a49SYabin Cui  * This change benefits CPUs with large micro-op buffers without negatively affecting
4027*01826a49SYabin Cui  * most other CPUs:
4028*01826a49SYabin Cui  *
4029*01826a49SYabin Cui  *  | Chipset               | Dispatch type       | NEON only | 6:2 hybrid | Diff. |
4030*01826a49SYabin Cui  *  |:----------------------|:--------------------|----------:|-----------:|------:|
4031*01826a49SYabin Cui  *  | Snapdragon 730 (A76)  | 2 NEON/8 micro-ops  |  8.8 GB/s |  10.1 GB/s |  ~16% |
4032*01826a49SYabin Cui  *  | Snapdragon 835 (A73)  | 2 NEON/3 micro-ops  |  5.1 GB/s |   5.3 GB/s |   ~5% |
4033*01826a49SYabin Cui  *  | Marvell PXA1928 (A53) | In-order dual-issue |  1.9 GB/s |   1.9 GB/s |    0% |
4034*01826a49SYabin Cui  *  | Apple M1              | 4 NEON/8 micro-ops  | 37.3 GB/s |  36.1 GB/s |  ~-3% |
4035*01826a49SYabin Cui  *
4036*01826a49SYabin Cui  * It also seems to fix some bad codegen on GCC, making it almost as fast as clang.
4037*01826a49SYabin Cui  *
4038*01826a49SYabin Cui  * When using WASM SIMD128, if this is 2 or 6, SIMDe will scalarize 2 of the lanes meaning
4039*01826a49SYabin Cui  * it effectively becomes worse 4.
4040*01826a49SYabin Cui  *
4041*01826a49SYabin Cui  * @see XXH3_accumulate_512_neon()
4042*01826a49SYabin Cui  */
4043*01826a49SYabin Cui # ifndef XXH3_NEON_LANES
4044*01826a49SYabin Cui #  if (defined(__aarch64__) || defined(__arm64__) || defined(_M_ARM64) || defined(_M_ARM64EC)) \
4045*01826a49SYabin Cui    && !defined(__APPLE__) && XXH_SIZE_OPT <= 0
4046*01826a49SYabin Cui #   define XXH3_NEON_LANES 6
4047*01826a49SYabin Cui #  else
4048*01826a49SYabin Cui #   define XXH3_NEON_LANES XXH_ACC_NB
4049*01826a49SYabin Cui #  endif
4050*01826a49SYabin Cui # endif
4051*01826a49SYabin Cui #endif  /* XXH_VECTOR == XXH_NEON */
4052*01826a49SYabin Cui 
4053*01826a49SYabin Cui /*
4054*01826a49SYabin Cui  * VSX and Z Vector helpers.
4055*01826a49SYabin Cui  *
4056*01826a49SYabin Cui  * This is very messy, and any pull requests to clean this up are welcome.
4057*01826a49SYabin Cui  *
4058*01826a49SYabin Cui  * There are a lot of problems with supporting VSX and s390x, due to
4059*01826a49SYabin Cui  * inconsistent intrinsics, spotty coverage, and multiple endiannesses.
4060*01826a49SYabin Cui  */
4061*01826a49SYabin Cui #if XXH_VECTOR == XXH_VSX
4062*01826a49SYabin Cui /* Annoyingly, these headers _may_ define three macros: `bool`, `vector`,
4063*01826a49SYabin Cui  * and `pixel`. This is a problem for obvious reasons.
4064*01826a49SYabin Cui  *
4065*01826a49SYabin Cui  * These keywords are unnecessary; the spec literally says they are
4066*01826a49SYabin Cui  * equivalent to `__bool`, `__vector`, and `__pixel` and may be undef'd
4067*01826a49SYabin Cui  * after including the header.
4068*01826a49SYabin Cui  *
4069*01826a49SYabin Cui  * We use pragma push_macro/pop_macro to keep the namespace clean. */
4070*01826a49SYabin Cui #  pragma push_macro("bool")
4071*01826a49SYabin Cui #  pragma push_macro("vector")
4072*01826a49SYabin Cui #  pragma push_macro("pixel")
4073*01826a49SYabin Cui /* silence potential macro redefined warnings */
4074*01826a49SYabin Cui #  undef bool
4075*01826a49SYabin Cui #  undef vector
4076*01826a49SYabin Cui #  undef pixel
4077*01826a49SYabin Cui 
4078*01826a49SYabin Cui #  if defined(__s390x__)
4079*01826a49SYabin Cui #    include <s390intrin.h>
4080*01826a49SYabin Cui #  else
4081*01826a49SYabin Cui #    include <altivec.h>
4082*01826a49SYabin Cui #  endif
4083*01826a49SYabin Cui 
4084*01826a49SYabin Cui /* Restore the original macro values, if applicable. */
4085*01826a49SYabin Cui #  pragma pop_macro("pixel")
4086*01826a49SYabin Cui #  pragma pop_macro("vector")
4087*01826a49SYabin Cui #  pragma pop_macro("bool")
4088*01826a49SYabin Cui 
4089*01826a49SYabin Cui typedef __vector unsigned long long xxh_u64x2;
4090*01826a49SYabin Cui typedef __vector unsigned char xxh_u8x16;
4091*01826a49SYabin Cui typedef __vector unsigned xxh_u32x4;
4092*01826a49SYabin Cui 
4093*01826a49SYabin Cui /*
4094*01826a49SYabin Cui  * UGLY HACK: Similar to aarch64 macOS GCC, s390x GCC has the same aliasing issue.
4095*01826a49SYabin Cui  */
4096*01826a49SYabin Cui typedef xxh_u64x2 xxh_aliasing_u64x2 XXH_ALIASING;
4097*01826a49SYabin Cui 
4098*01826a49SYabin Cui # ifndef XXH_VSX_BE
4099*01826a49SYabin Cui #  if defined(__BIG_ENDIAN__) \
4100*01826a49SYabin Cui   || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
4101*01826a49SYabin Cui #    define XXH_VSX_BE 1
4102*01826a49SYabin Cui #  elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__
4103*01826a49SYabin Cui #    warning "-maltivec=be is not recommended. Please use native endianness."
4104*01826a49SYabin Cui #    define XXH_VSX_BE 1
4105*01826a49SYabin Cui #  else
4106*01826a49SYabin Cui #    define XXH_VSX_BE 0
4107*01826a49SYabin Cui #  endif
4108*01826a49SYabin Cui # endif /* !defined(XXH_VSX_BE) */
4109*01826a49SYabin Cui 
4110*01826a49SYabin Cui # if XXH_VSX_BE
4111*01826a49SYabin Cui #  if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__))
4112*01826a49SYabin Cui #    define XXH_vec_revb vec_revb
4113*01826a49SYabin Cui #  else
4114*01826a49SYabin Cui /*!
4115*01826a49SYabin Cui  * A polyfill for POWER9's vec_revb().
4116*01826a49SYabin Cui  */
XXH_vec_revb(xxh_u64x2 val)4117*01826a49SYabin Cui XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val)
4118*01826a49SYabin Cui {
4119*01826a49SYabin Cui     xxh_u8x16 const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
4120*01826a49SYabin Cui                                   0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 };
4121*01826a49SYabin Cui     return vec_perm(val, val, vByteSwap);
4122*01826a49SYabin Cui }
4123*01826a49SYabin Cui #  endif
4124*01826a49SYabin Cui # endif /* XXH_VSX_BE */
4125*01826a49SYabin Cui 
4126*01826a49SYabin Cui /*!
4127*01826a49SYabin Cui  * Performs an unaligned vector load and byte swaps it on big endian.
4128*01826a49SYabin Cui  */
XXH_vec_loadu(const void * ptr)4129*01826a49SYabin Cui XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr)
4130*01826a49SYabin Cui {
4131*01826a49SYabin Cui     xxh_u64x2 ret;
4132*01826a49SYabin Cui     XXH_memcpy(&ret, ptr, sizeof(xxh_u64x2));
4133*01826a49SYabin Cui # if XXH_VSX_BE
4134*01826a49SYabin Cui     ret = XXH_vec_revb(ret);
4135*01826a49SYabin Cui # endif
4136*01826a49SYabin Cui     return ret;
4137*01826a49SYabin Cui }
4138*01826a49SYabin Cui 
4139*01826a49SYabin Cui /*
4140*01826a49SYabin Cui  * vec_mulo and vec_mule are very problematic intrinsics on PowerPC
4141*01826a49SYabin Cui  *
4142*01826a49SYabin Cui  * These intrinsics weren't added until GCC 8, despite existing for a while,
4143*01826a49SYabin Cui  * and they are endian dependent. Also, their meaning swap depending on version.
4144*01826a49SYabin Cui  * */
4145*01826a49SYabin Cui # if defined(__s390x__)
4146*01826a49SYabin Cui  /* s390x is always big endian, no issue on this platform */
4147*01826a49SYabin Cui #  define XXH_vec_mulo vec_mulo
4148*01826a49SYabin Cui #  define XXH_vec_mule vec_mule
4149*01826a49SYabin Cui # elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw) && !defined(__ibmxl__)
4150*01826a49SYabin Cui /* Clang has a better way to control this, we can just use the builtin which doesn't swap. */
4151*01826a49SYabin Cui  /* The IBM XL Compiler (which defined __clang__) only implements the vec_* operations */
4152*01826a49SYabin Cui #  define XXH_vec_mulo __builtin_altivec_vmulouw
4153*01826a49SYabin Cui #  define XXH_vec_mule __builtin_altivec_vmuleuw
4154*01826a49SYabin Cui # else
4155*01826a49SYabin Cui /* gcc needs inline assembly */
4156*01826a49SYabin Cui /* Adapted from https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */
XXH_vec_mulo(xxh_u32x4 a,xxh_u32x4 b)4157*01826a49SYabin Cui XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b)
4158*01826a49SYabin Cui {
4159*01826a49SYabin Cui     xxh_u64x2 result;
4160*01826a49SYabin Cui     __asm__("vmulouw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
4161*01826a49SYabin Cui     return result;
4162*01826a49SYabin Cui }
XXH_vec_mule(xxh_u32x4 a,xxh_u32x4 b)4163*01826a49SYabin Cui XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b)
4164*01826a49SYabin Cui {
4165*01826a49SYabin Cui     xxh_u64x2 result;
4166*01826a49SYabin Cui     __asm__("vmuleuw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
4167*01826a49SYabin Cui     return result;
4168*01826a49SYabin Cui }
4169*01826a49SYabin Cui # endif /* XXH_vec_mulo, XXH_vec_mule */
4170*01826a49SYabin Cui #endif /* XXH_VECTOR == XXH_VSX */
4171*01826a49SYabin Cui 
4172*01826a49SYabin Cui #if XXH_VECTOR == XXH_SVE
4173*01826a49SYabin Cui #define ACCRND(acc, offset) \
4174*01826a49SYabin Cui do { \
4175*01826a49SYabin Cui     svuint64_t input_vec = svld1_u64(mask, xinput + offset);         \
4176*01826a49SYabin Cui     svuint64_t secret_vec = svld1_u64(mask, xsecret + offset);       \
4177*01826a49SYabin Cui     svuint64_t mixed = sveor_u64_x(mask, secret_vec, input_vec);     \
4178*01826a49SYabin Cui     svuint64_t swapped = svtbl_u64(input_vec, kSwap);                \
4179*01826a49SYabin Cui     svuint64_t mixed_lo = svextw_u64_x(mask, mixed);                 \
4180*01826a49SYabin Cui     svuint64_t mixed_hi = svlsr_n_u64_x(mask, mixed, 32);            \
4181*01826a49SYabin Cui     svuint64_t mul = svmad_u64_x(mask, mixed_lo, mixed_hi, swapped); \
4182*01826a49SYabin Cui     acc = svadd_u64_x(mask, acc, mul);                               \
4183*01826a49SYabin Cui } while (0)
4184*01826a49SYabin Cui #endif /* XXH_VECTOR == XXH_SVE */
4185*01826a49SYabin Cui 
4186*01826a49SYabin Cui /* prefetch
4187*01826a49SYabin Cui  * can be disabled, by declaring XXH_NO_PREFETCH build macro */
4188*01826a49SYabin Cui #if defined(XXH_NO_PREFETCH)
4189*01826a49SYabin Cui #  define XXH_PREFETCH(ptr)  (void)(ptr)  /* disabled */
4190*01826a49SYabin Cui #else
4191*01826a49SYabin Cui #  if XXH_SIZE_OPT >= 1
4192*01826a49SYabin Cui #    define XXH_PREFETCH(ptr) (void)(ptr)
4193*01826a49SYabin Cui #  elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86))  /* _mm_prefetch() not defined outside of x86/x64 */
4194*01826a49SYabin Cui #    include <mmintrin.h>   /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
4195*01826a49SYabin Cui #    define XXH_PREFETCH(ptr)  _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
4196*01826a49SYabin Cui #  elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
4197*01826a49SYabin Cui #    define XXH_PREFETCH(ptr)  __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
4198*01826a49SYabin Cui #  else
4199*01826a49SYabin Cui #    define XXH_PREFETCH(ptr) (void)(ptr)  /* disabled */
4200*01826a49SYabin Cui #  endif
4201*01826a49SYabin Cui #endif  /* XXH_NO_PREFETCH */
4202*01826a49SYabin Cui 
4203*01826a49SYabin Cui 
4204*01826a49SYabin Cui /* ==========================================
4205*01826a49SYabin Cui  * XXH3 default settings
4206*01826a49SYabin Cui  * ========================================== */
4207*01826a49SYabin Cui 
4208*01826a49SYabin Cui #define XXH_SECRET_DEFAULT_SIZE 192   /* minimum XXH3_SECRET_SIZE_MIN */
4209*01826a49SYabin Cui 
4210*01826a49SYabin Cui #if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN)
4211*01826a49SYabin Cui #  error "default keyset is not large enough"
4212*01826a49SYabin Cui #endif
4213*01826a49SYabin Cui 
4214*01826a49SYabin Cui /*! Pseudorandom secret taken directly from FARSH. */
4215*01826a49SYabin Cui XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = {
4216*01826a49SYabin Cui     0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c,
4217*01826a49SYabin Cui     0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f,
4218*01826a49SYabin Cui     0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21,
4219*01826a49SYabin Cui     0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c,
4220*01826a49SYabin Cui     0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3,
4221*01826a49SYabin Cui     0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8,
4222*01826a49SYabin Cui     0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d,
4223*01826a49SYabin Cui     0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64,
4224*01826a49SYabin Cui     0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb,
4225*01826a49SYabin Cui     0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e,
4226*01826a49SYabin Cui     0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce,
4227*01826a49SYabin Cui     0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e,
4228*01826a49SYabin Cui };
4229*01826a49SYabin Cui 
4230*01826a49SYabin Cui static const xxh_u64 PRIME_MX1 = 0x165667919E3779F9ULL;  /*!< 0b0001011001010110011001111001000110011110001101110111100111111001 */
4231*01826a49SYabin Cui static const xxh_u64 PRIME_MX2 = 0x9FB21C651E98DF25ULL;  /*!< 0b1001111110110010000111000110010100011110100110001101111100100101 */
4232*01826a49SYabin Cui 
4233*01826a49SYabin Cui #ifdef XXH_OLD_NAMES
4234*01826a49SYabin Cui #  define kSecret XXH3_kSecret
4235*01826a49SYabin Cui #endif
4236*01826a49SYabin Cui 
4237*01826a49SYabin Cui #ifdef XXH_DOXYGEN
4238*01826a49SYabin Cui /*!
4239*01826a49SYabin Cui  * @brief Calculates a 32-bit to 64-bit long multiply.
4240*01826a49SYabin Cui  *
4241*01826a49SYabin Cui  * Implemented as a macro.
4242*01826a49SYabin Cui  *
4243*01826a49SYabin Cui  * Wraps `__emulu` on MSVC x86 because it tends to call `__allmul` when it doesn't
4244*01826a49SYabin Cui  * need to (but it shouldn't need to anyways, it is about 7 instructions to do
4245*01826a49SYabin Cui  * a 64x64 multiply...). Since we know that this will _always_ emit `MULL`, we
4246*01826a49SYabin Cui  * use that instead of the normal method.
4247*01826a49SYabin Cui  *
4248*01826a49SYabin Cui  * If you are compiling for platforms like Thumb-1 and don't have a better option,
4249*01826a49SYabin Cui  * you may also want to write your own long multiply routine here.
4250*01826a49SYabin Cui  *
4251*01826a49SYabin Cui  * @param x, y Numbers to be multiplied
4252*01826a49SYabin Cui  * @return 64-bit product of the low 32 bits of @p x and @p y.
4253*01826a49SYabin Cui  */
4254*01826a49SYabin Cui XXH_FORCE_INLINE xxh_u64
XXH_mult32to64(xxh_u64 x,xxh_u64 y)4255*01826a49SYabin Cui XXH_mult32to64(xxh_u64 x, xxh_u64 y)
4256*01826a49SYabin Cui {
4257*01826a49SYabin Cui    return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF);
4258*01826a49SYabin Cui }
4259*01826a49SYabin Cui #elif defined(_MSC_VER) && defined(_M_IX86)
4260*01826a49SYabin Cui #    define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y))
4261*01826a49SYabin Cui #else
4262*01826a49SYabin Cui /*
4263*01826a49SYabin Cui  * Downcast + upcast is usually better than masking on older compilers like
4264*01826a49SYabin Cui  * GCC 4.2 (especially 32-bit ones), all without affecting newer compilers.
4265*01826a49SYabin Cui  *
4266*01826a49SYabin Cui  * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both operands
4267*01826a49SYabin Cui  * and perform a full 64x64 multiply -- entirely redundant on 32-bit.
4268*01826a49SYabin Cui  */
4269*01826a49SYabin Cui #    define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y))
4270*01826a49SYabin Cui #endif
4271*01826a49SYabin Cui 
4272*01826a49SYabin Cui /*!
4273*01826a49SYabin Cui  * @brief Calculates a 64->128-bit long multiply.
4274*01826a49SYabin Cui  *
4275*01826a49SYabin Cui  * Uses `__uint128_t` and `_umul128` if available, otherwise uses a scalar
4276*01826a49SYabin Cui  * version.
4277*01826a49SYabin Cui  *
4278*01826a49SYabin Cui  * @param lhs , rhs The 64-bit integers to be multiplied
4279*01826a49SYabin Cui  * @return The 128-bit result represented in an @ref XXH128_hash_t.
4280*01826a49SYabin Cui  */
4281*01826a49SYabin Cui static XXH128_hash_t
XXH_mult64to128(xxh_u64 lhs,xxh_u64 rhs)4282*01826a49SYabin Cui XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs)
4283*01826a49SYabin Cui {
4284*01826a49SYabin Cui     /*
4285*01826a49SYabin Cui      * GCC/Clang __uint128_t method.
4286*01826a49SYabin Cui      *
4287*01826a49SYabin Cui      * On most 64-bit targets, GCC and Clang define a __uint128_t type.
4288*01826a49SYabin Cui      * This is usually the best way as it usually uses a native long 64-bit
4289*01826a49SYabin Cui      * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64.
4290*01826a49SYabin Cui      *
4291*01826a49SYabin Cui      * Usually.
4292*01826a49SYabin Cui      *
4293*01826a49SYabin Cui      * Despite being a 32-bit platform, Clang (and emscripten) define this type
4294*01826a49SYabin Cui      * despite not having the arithmetic for it. This results in a laggy
4295*01826a49SYabin Cui      * compiler builtin call which calculates a full 128-bit multiply.
4296*01826a49SYabin Cui      * In that case it is best to use the portable one.
4297*01826a49SYabin Cui      * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677
4298*01826a49SYabin Cui      */
4299*01826a49SYabin Cui #if (defined(__GNUC__) || defined(__clang__)) && !defined(__wasm__) \
4300*01826a49SYabin Cui     && defined(__SIZEOF_INT128__) \
4301*01826a49SYabin Cui     || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
4302*01826a49SYabin Cui 
4303*01826a49SYabin Cui     __uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs;
4304*01826a49SYabin Cui     XXH128_hash_t r128;
4305*01826a49SYabin Cui     r128.low64  = (xxh_u64)(product);
4306*01826a49SYabin Cui     r128.high64 = (xxh_u64)(product >> 64);
4307*01826a49SYabin Cui     return r128;
4308*01826a49SYabin Cui 
4309*01826a49SYabin Cui     /*
4310*01826a49SYabin Cui      * MSVC for x64's _umul128 method.
4311*01826a49SYabin Cui      *
4312*01826a49SYabin Cui      * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 *HighProduct);
4313*01826a49SYabin Cui      *
4314*01826a49SYabin Cui      * This compiles to single operand MUL on x64.
4315*01826a49SYabin Cui      */
4316*01826a49SYabin Cui #elif (defined(_M_X64) || defined(_M_IA64)) && !defined(_M_ARM64EC)
4317*01826a49SYabin Cui 
4318*01826a49SYabin Cui #ifndef _MSC_VER
4319*01826a49SYabin Cui #   pragma intrinsic(_umul128)
4320*01826a49SYabin Cui #endif
4321*01826a49SYabin Cui     xxh_u64 product_high;
4322*01826a49SYabin Cui     xxh_u64 const product_low = _umul128(lhs, rhs, &product_high);
4323*01826a49SYabin Cui     XXH128_hash_t r128;
4324*01826a49SYabin Cui     r128.low64  = product_low;
4325*01826a49SYabin Cui     r128.high64 = product_high;
4326*01826a49SYabin Cui     return r128;
4327*01826a49SYabin Cui 
4328*01826a49SYabin Cui     /*
4329*01826a49SYabin Cui      * MSVC for ARM64's __umulh method.
4330*01826a49SYabin Cui      *
4331*01826a49SYabin Cui      * This compiles to the same MUL + UMULH as GCC/Clang's __uint128_t method.
4332*01826a49SYabin Cui      */
4333*01826a49SYabin Cui #elif defined(_M_ARM64) || defined(_M_ARM64EC)
4334*01826a49SYabin Cui 
4335*01826a49SYabin Cui #ifndef _MSC_VER
4336*01826a49SYabin Cui #   pragma intrinsic(__umulh)
4337*01826a49SYabin Cui #endif
4338*01826a49SYabin Cui     XXH128_hash_t r128;
4339*01826a49SYabin Cui     r128.low64  = lhs * rhs;
4340*01826a49SYabin Cui     r128.high64 = __umulh(lhs, rhs);
4341*01826a49SYabin Cui     return r128;
4342*01826a49SYabin Cui 
4343*01826a49SYabin Cui #else
4344*01826a49SYabin Cui     /*
4345*01826a49SYabin Cui      * Portable scalar method. Optimized for 32-bit and 64-bit ALUs.
4346*01826a49SYabin Cui      *
4347*01826a49SYabin Cui      * This is a fast and simple grade school multiply, which is shown below
4348*01826a49SYabin Cui      * with base 10 arithmetic instead of base 0x100000000.
4349*01826a49SYabin Cui      *
4350*01826a49SYabin Cui      *           9 3 // D2 lhs = 93
4351*01826a49SYabin Cui      *         x 7 5 // D2 rhs = 75
4352*01826a49SYabin Cui      *     ----------
4353*01826a49SYabin Cui      *           1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15
4354*01826a49SYabin Cui      *         4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45
4355*01826a49SYabin Cui      *         2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21
4356*01826a49SYabin Cui      *     + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63
4357*01826a49SYabin Cui      *     ---------
4358*01826a49SYabin Cui      *         2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27
4359*01826a49SYabin Cui      *     + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67
4360*01826a49SYabin Cui      *     ---------
4361*01826a49SYabin Cui      *       6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975
4362*01826a49SYabin Cui      *
4363*01826a49SYabin Cui      * The reasons for adding the products like this are:
4364*01826a49SYabin Cui      *  1. It avoids manual carry tracking. Just like how
4365*01826a49SYabin Cui      *     (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX.
4366*01826a49SYabin Cui      *     This avoids a lot of complexity.
4367*01826a49SYabin Cui      *
4368*01826a49SYabin Cui      *  2. It hints for, and on Clang, compiles to, the powerful UMAAL
4369*01826a49SYabin Cui      *     instruction available in ARM's Digital Signal Processing extension
4370*01826a49SYabin Cui      *     in 32-bit ARMv6 and later, which is shown below:
4371*01826a49SYabin Cui      *
4372*01826a49SYabin Cui      *         void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm)
4373*01826a49SYabin Cui      *         {
4374*01826a49SYabin Cui      *             xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm;
4375*01826a49SYabin Cui      *             *RdLo = (xxh_u32)(product & 0xFFFFFFFF);
4376*01826a49SYabin Cui      *             *RdHi = (xxh_u32)(product >> 32);
4377*01826a49SYabin Cui      *         }
4378*01826a49SYabin Cui      *
4379*01826a49SYabin Cui      *     This instruction was designed for efficient long multiplication, and
4380*01826a49SYabin Cui      *     allows this to be calculated in only 4 instructions at speeds
4381*01826a49SYabin Cui      *     comparable to some 64-bit ALUs.
4382*01826a49SYabin Cui      *
4383*01826a49SYabin Cui      *  3. It isn't terrible on other platforms. Usually this will be a couple
4384*01826a49SYabin Cui      *     of 32-bit ADD/ADCs.
4385*01826a49SYabin Cui      */
4386*01826a49SYabin Cui 
4387*01826a49SYabin Cui     /* First calculate all of the cross products. */
4388*01826a49SYabin Cui     xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF);
4389*01826a49SYabin Cui     xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32,        rhs & 0xFFFFFFFF);
4390*01826a49SYabin Cui     xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32);
4391*01826a49SYabin Cui     xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32,        rhs >> 32);
4392*01826a49SYabin Cui 
4393*01826a49SYabin Cui     /* Now add the products together. These will never overflow. */
4394*01826a49SYabin Cui     xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi;
4395*01826a49SYabin Cui     xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32)        + hi_hi;
4396*01826a49SYabin Cui     xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF);
4397*01826a49SYabin Cui 
4398*01826a49SYabin Cui     XXH128_hash_t r128;
4399*01826a49SYabin Cui     r128.low64  = lower;
4400*01826a49SYabin Cui     r128.high64 = upper;
4401*01826a49SYabin Cui     return r128;
4402*01826a49SYabin Cui #endif
4403*01826a49SYabin Cui }
4404*01826a49SYabin Cui 
4405*01826a49SYabin Cui /*!
4406*01826a49SYabin Cui  * @brief Calculates a 64-bit to 128-bit multiply, then XOR folds it.
4407*01826a49SYabin Cui  *
4408*01826a49SYabin Cui  * The reason for the separate function is to prevent passing too many structs
4409*01826a49SYabin Cui  * around by value. This will hopefully inline the multiply, but we don't force it.
4410*01826a49SYabin Cui  *
4411*01826a49SYabin Cui  * @param lhs , rhs The 64-bit integers to multiply
4412*01826a49SYabin Cui  * @return The low 64 bits of the product XOR'd by the high 64 bits.
4413*01826a49SYabin Cui  * @see XXH_mult64to128()
4414*01826a49SYabin Cui  */
4415*01826a49SYabin Cui static xxh_u64
XXH3_mul128_fold64(xxh_u64 lhs,xxh_u64 rhs)4416*01826a49SYabin Cui XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs)
4417*01826a49SYabin Cui {
4418*01826a49SYabin Cui     XXH128_hash_t product = XXH_mult64to128(lhs, rhs);
4419*01826a49SYabin Cui     return product.low64 ^ product.high64;
4420*01826a49SYabin Cui }
4421*01826a49SYabin Cui 
4422*01826a49SYabin Cui /*! Seems to produce slightly better code on GCC for some reason. */
XXH_xorshift64(xxh_u64 v64,int shift)4423*01826a49SYabin Cui XXH_FORCE_INLINE XXH_CONSTF xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift)
4424*01826a49SYabin Cui {
4425*01826a49SYabin Cui     XXH_ASSERT(0 <= shift && shift < 64);
4426*01826a49SYabin Cui     return v64 ^ (v64 >> shift);
4427*01826a49SYabin Cui }
4428*01826a49SYabin Cui 
4429*01826a49SYabin Cui /*
4430*01826a49SYabin Cui  * This is a fast avalanche stage,
4431*01826a49SYabin Cui  * suitable when input bits are already partially mixed
4432*01826a49SYabin Cui  */
XXH3_avalanche(xxh_u64 h64)4433*01826a49SYabin Cui static XXH64_hash_t XXH3_avalanche(xxh_u64 h64)
4434*01826a49SYabin Cui {
4435*01826a49SYabin Cui     h64 = XXH_xorshift64(h64, 37);
4436*01826a49SYabin Cui     h64 *= PRIME_MX1;
4437*01826a49SYabin Cui     h64 = XXH_xorshift64(h64, 32);
4438*01826a49SYabin Cui     return h64;
4439*01826a49SYabin Cui }
4440*01826a49SYabin Cui 
4441*01826a49SYabin Cui /*
4442*01826a49SYabin Cui  * This is a stronger avalanche,
4443*01826a49SYabin Cui  * inspired by Pelle Evensen's rrmxmx
4444*01826a49SYabin Cui  * preferable when input has not been previously mixed
4445*01826a49SYabin Cui  */
XXH3_rrmxmx(xxh_u64 h64,xxh_u64 len)4446*01826a49SYabin Cui static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len)
4447*01826a49SYabin Cui {
4448*01826a49SYabin Cui     /* this mix is inspired by Pelle Evensen's rrmxmx */
4449*01826a49SYabin Cui     h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24);
4450*01826a49SYabin Cui     h64 *= PRIME_MX2;
4451*01826a49SYabin Cui     h64 ^= (h64 >> 35) + len ;
4452*01826a49SYabin Cui     h64 *= PRIME_MX2;
4453*01826a49SYabin Cui     return XXH_xorshift64(h64, 28);
4454*01826a49SYabin Cui }
4455*01826a49SYabin Cui 
4456*01826a49SYabin Cui 
4457*01826a49SYabin Cui /* ==========================================
4458*01826a49SYabin Cui  * Short keys
4459*01826a49SYabin Cui  * ==========================================
4460*01826a49SYabin Cui  * One of the shortcomings of XXH32 and XXH64 was that their performance was
4461*01826a49SYabin Cui  * sub-optimal on short lengths. It used an iterative algorithm which strongly
4462*01826a49SYabin Cui  * favored lengths that were a multiple of 4 or 8.
4463*01826a49SYabin Cui  *
4464*01826a49SYabin Cui  * Instead of iterating over individual inputs, we use a set of single shot
4465*01826a49SYabin Cui  * functions which piece together a range of lengths and operate in constant time.
4466*01826a49SYabin Cui  *
4467*01826a49SYabin Cui  * Additionally, the number of multiplies has been significantly reduced. This
4468*01826a49SYabin Cui  * reduces latency, especially when emulating 64-bit multiplies on 32-bit.
4469*01826a49SYabin Cui  *
4470*01826a49SYabin Cui  * Depending on the platform, this may or may not be faster than XXH32, but it
4471*01826a49SYabin Cui  * is almost guaranteed to be faster than XXH64.
4472*01826a49SYabin Cui  */
4473*01826a49SYabin Cui 
4474*01826a49SYabin Cui /*
4475*01826a49SYabin Cui  * At very short lengths, there isn't enough input to fully hide secrets, or use
4476*01826a49SYabin Cui  * the entire secret.
4477*01826a49SYabin Cui  *
4478*01826a49SYabin Cui  * There is also only a limited amount of mixing we can do before significantly
4479*01826a49SYabin Cui  * impacting performance.
4480*01826a49SYabin Cui  *
4481*01826a49SYabin Cui  * Therefore, we use different sections of the secret and always mix two secret
4482*01826a49SYabin Cui  * samples with an XOR. This should have no effect on performance on the
4483*01826a49SYabin Cui  * seedless or withSeed variants because everything _should_ be constant folded
4484*01826a49SYabin Cui  * by modern compilers.
4485*01826a49SYabin Cui  *
4486*01826a49SYabin Cui  * The XOR mixing hides individual parts of the secret and increases entropy.
4487*01826a49SYabin Cui  *
4488*01826a49SYabin Cui  * This adds an extra layer of strength for custom secrets.
4489*01826a49SYabin Cui  */
4490*01826a49SYabin Cui XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
XXH3_len_1to3_64b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)4491*01826a49SYabin Cui XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
4492*01826a49SYabin Cui {
4493*01826a49SYabin Cui     XXH_ASSERT(input != NULL);
4494*01826a49SYabin Cui     XXH_ASSERT(1 <= len && len <= 3);
4495*01826a49SYabin Cui     XXH_ASSERT(secret != NULL);
4496*01826a49SYabin Cui     /*
4497*01826a49SYabin Cui      * len = 1: combined = { input[0], 0x01, input[0], input[0] }
4498*01826a49SYabin Cui      * len = 2: combined = { input[1], 0x02, input[0], input[1] }
4499*01826a49SYabin Cui      * len = 3: combined = { input[2], 0x03, input[0], input[1] }
4500*01826a49SYabin Cui      */
4501*01826a49SYabin Cui     {   xxh_u8  const c1 = input[0];
4502*01826a49SYabin Cui         xxh_u8  const c2 = input[len >> 1];
4503*01826a49SYabin Cui         xxh_u8  const c3 = input[len - 1];
4504*01826a49SYabin Cui         xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2  << 24)
4505*01826a49SYabin Cui                                | ((xxh_u32)c3 <<  0) | ((xxh_u32)len << 8);
4506*01826a49SYabin Cui         xxh_u64 const bitflip = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
4507*01826a49SYabin Cui         xxh_u64 const keyed = (xxh_u64)combined ^ bitflip;
4508*01826a49SYabin Cui         return XXH64_avalanche(keyed);
4509*01826a49SYabin Cui     }
4510*01826a49SYabin Cui }
4511*01826a49SYabin Cui 
4512*01826a49SYabin Cui XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
XXH3_len_4to8_64b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)4513*01826a49SYabin Cui XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
4514*01826a49SYabin Cui {
4515*01826a49SYabin Cui     XXH_ASSERT(input != NULL);
4516*01826a49SYabin Cui     XXH_ASSERT(secret != NULL);
4517*01826a49SYabin Cui     XXH_ASSERT(4 <= len && len <= 8);
4518*01826a49SYabin Cui     seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
4519*01826a49SYabin Cui     {   xxh_u32 const input1 = XXH_readLE32(input);
4520*01826a49SYabin Cui         xxh_u32 const input2 = XXH_readLE32(input + len - 4);
4521*01826a49SYabin Cui         xxh_u64 const bitflip = (XXH_readLE64(secret+8) ^ XXH_readLE64(secret+16)) - seed;
4522*01826a49SYabin Cui         xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32);
4523*01826a49SYabin Cui         xxh_u64 const keyed = input64 ^ bitflip;
4524*01826a49SYabin Cui         return XXH3_rrmxmx(keyed, len);
4525*01826a49SYabin Cui     }
4526*01826a49SYabin Cui }
4527*01826a49SYabin Cui 
4528*01826a49SYabin Cui XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
XXH3_len_9to16_64b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)4529*01826a49SYabin Cui XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
4530*01826a49SYabin Cui {
4531*01826a49SYabin Cui     XXH_ASSERT(input != NULL);
4532*01826a49SYabin Cui     XXH_ASSERT(secret != NULL);
4533*01826a49SYabin Cui     XXH_ASSERT(9 <= len && len <= 16);
4534*01826a49SYabin Cui     {   xxh_u64 const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) + seed;
4535*01826a49SYabin Cui         xxh_u64 const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) - seed;
4536*01826a49SYabin Cui         xxh_u64 const input_lo = XXH_readLE64(input)           ^ bitflip1;
4537*01826a49SYabin Cui         xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2;
4538*01826a49SYabin Cui         xxh_u64 const acc = len
4539*01826a49SYabin Cui                           + XXH_swap64(input_lo) + input_hi
4540*01826a49SYabin Cui                           + XXH3_mul128_fold64(input_lo, input_hi);
4541*01826a49SYabin Cui         return XXH3_avalanche(acc);
4542*01826a49SYabin Cui     }
4543*01826a49SYabin Cui }
4544*01826a49SYabin Cui 
4545*01826a49SYabin Cui XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
XXH3_len_0to16_64b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)4546*01826a49SYabin Cui XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
4547*01826a49SYabin Cui {
4548*01826a49SYabin Cui     XXH_ASSERT(len <= 16);
4549*01826a49SYabin Cui     {   if (XXH_likely(len >  8)) return XXH3_len_9to16_64b(input, len, secret, seed);
4550*01826a49SYabin Cui         if (XXH_likely(len >= 4)) return XXH3_len_4to8_64b(input, len, secret, seed);
4551*01826a49SYabin Cui         if (len) return XXH3_len_1to3_64b(input, len, secret, seed);
4552*01826a49SYabin Cui         return XXH64_avalanche(seed ^ (XXH_readLE64(secret+56) ^ XXH_readLE64(secret+64)));
4553*01826a49SYabin Cui     }
4554*01826a49SYabin Cui }
4555*01826a49SYabin Cui 
4556*01826a49SYabin Cui /*
4557*01826a49SYabin Cui  * DISCLAIMER: There are known *seed-dependent* multicollisions here due to
4558*01826a49SYabin Cui  * multiplication by zero, affecting hashes of lengths 17 to 240.
4559*01826a49SYabin Cui  *
4560*01826a49SYabin Cui  * However, they are very unlikely.
4561*01826a49SYabin Cui  *
4562*01826a49SYabin Cui  * Keep this in mind when using the unseeded XXH3_64bits() variant: As with all
4563*01826a49SYabin Cui  * unseeded non-cryptographic hashes, it does not attempt to defend itself
4564*01826a49SYabin Cui  * against specially crafted inputs, only random inputs.
4565*01826a49SYabin Cui  *
4566*01826a49SYabin Cui  * Compared to classic UMAC where a 1 in 2^31 chance of 4 consecutive bytes
4567*01826a49SYabin Cui  * cancelling out the secret is taken an arbitrary number of times (addressed
4568*01826a49SYabin Cui  * in XXH3_accumulate_512), this collision is very unlikely with random inputs
4569*01826a49SYabin Cui  * and/or proper seeding:
4570*01826a49SYabin Cui  *
4571*01826a49SYabin Cui  * This only has a 1 in 2^63 chance of 8 consecutive bytes cancelling out, in a
4572*01826a49SYabin Cui  * function that is only called up to 16 times per hash with up to 240 bytes of
4573*01826a49SYabin Cui  * input.
4574*01826a49SYabin Cui  *
4575*01826a49SYabin Cui  * This is not too bad for a non-cryptographic hash function, especially with
4576*01826a49SYabin Cui  * only 64 bit outputs.
4577*01826a49SYabin Cui  *
4578*01826a49SYabin Cui  * The 128-bit variant (which trades some speed for strength) is NOT affected
4579*01826a49SYabin Cui  * by this, although it is always a good idea to use a proper seed if you care
4580*01826a49SYabin Cui  * about strength.
4581*01826a49SYabin Cui  */
XXH3_mix16B(const xxh_u8 * XXH_RESTRICT input,const xxh_u8 * XXH_RESTRICT secret,xxh_u64 seed64)4582*01826a49SYabin Cui XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input,
4583*01826a49SYabin Cui                                      const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64)
4584*01826a49SYabin Cui {
4585*01826a49SYabin Cui #if defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
4586*01826a49SYabin Cui   && defined(__i386__) && defined(__SSE2__)  /* x86 + SSE2 */ \
4587*01826a49SYabin Cui   && !defined(XXH_ENABLE_AUTOVECTORIZE)      /* Define to disable like XXH32 hack */
4588*01826a49SYabin Cui     /*
4589*01826a49SYabin Cui      * UGLY HACK:
4590*01826a49SYabin Cui      * GCC for x86 tends to autovectorize the 128-bit multiply, resulting in
4591*01826a49SYabin Cui      * slower code.
4592*01826a49SYabin Cui      *
4593*01826a49SYabin Cui      * By forcing seed64 into a register, we disrupt the cost model and
4594*01826a49SYabin Cui      * cause it to scalarize. See `XXH32_round()`
4595*01826a49SYabin Cui      *
4596*01826a49SYabin Cui      * FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600,
4597*01826a49SYabin Cui      * XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on
4598*01826a49SYabin Cui      * GCC 9.2, despite both emitting scalar code.
4599*01826a49SYabin Cui      *
4600*01826a49SYabin Cui      * GCC generates much better scalar code than Clang for the rest of XXH3,
4601*01826a49SYabin Cui      * which is why finding a more optimal codepath is an interest.
4602*01826a49SYabin Cui      */
4603*01826a49SYabin Cui     XXH_COMPILER_GUARD(seed64);
4604*01826a49SYabin Cui #endif
4605*01826a49SYabin Cui     {   xxh_u64 const input_lo = XXH_readLE64(input);
4606*01826a49SYabin Cui         xxh_u64 const input_hi = XXH_readLE64(input+8);
4607*01826a49SYabin Cui         return XXH3_mul128_fold64(
4608*01826a49SYabin Cui             input_lo ^ (XXH_readLE64(secret)   + seed64),
4609*01826a49SYabin Cui             input_hi ^ (XXH_readLE64(secret+8) - seed64)
4610*01826a49SYabin Cui         );
4611*01826a49SYabin Cui     }
4612*01826a49SYabin Cui }
4613*01826a49SYabin Cui 
4614*01826a49SYabin Cui /* For mid range keys, XXH3 uses a Mum-hash variant. */
4615*01826a49SYabin Cui XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
XXH3_len_17to128_64b(const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH64_hash_t seed)4616*01826a49SYabin Cui XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
4617*01826a49SYabin Cui                      const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
4618*01826a49SYabin Cui                      XXH64_hash_t seed)
4619*01826a49SYabin Cui {
4620*01826a49SYabin Cui     XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
4621*01826a49SYabin Cui     XXH_ASSERT(16 < len && len <= 128);
4622*01826a49SYabin Cui 
4623*01826a49SYabin Cui     {   xxh_u64 acc = len * XXH_PRIME64_1;
4624*01826a49SYabin Cui #if XXH_SIZE_OPT >= 1
4625*01826a49SYabin Cui         /* Smaller and cleaner, but slightly slower. */
4626*01826a49SYabin Cui         unsigned int i = (unsigned int)(len - 1) / 32;
4627*01826a49SYabin Cui         do {
4628*01826a49SYabin Cui             acc += XXH3_mix16B(input+16 * i, secret+32*i, seed);
4629*01826a49SYabin Cui             acc += XXH3_mix16B(input+len-16*(i+1), secret+32*i+16, seed);
4630*01826a49SYabin Cui         } while (i-- != 0);
4631*01826a49SYabin Cui #else
4632*01826a49SYabin Cui         if (len > 32) {
4633*01826a49SYabin Cui             if (len > 64) {
4634*01826a49SYabin Cui                 if (len > 96) {
4635*01826a49SYabin Cui                     acc += XXH3_mix16B(input+48, secret+96, seed);
4636*01826a49SYabin Cui                     acc += XXH3_mix16B(input+len-64, secret+112, seed);
4637*01826a49SYabin Cui                 }
4638*01826a49SYabin Cui                 acc += XXH3_mix16B(input+32, secret+64, seed);
4639*01826a49SYabin Cui                 acc += XXH3_mix16B(input+len-48, secret+80, seed);
4640*01826a49SYabin Cui             }
4641*01826a49SYabin Cui             acc += XXH3_mix16B(input+16, secret+32, seed);
4642*01826a49SYabin Cui             acc += XXH3_mix16B(input+len-32, secret+48, seed);
4643*01826a49SYabin Cui         }
4644*01826a49SYabin Cui         acc += XXH3_mix16B(input+0, secret+0, seed);
4645*01826a49SYabin Cui         acc += XXH3_mix16B(input+len-16, secret+16, seed);
4646*01826a49SYabin Cui #endif
4647*01826a49SYabin Cui         return XXH3_avalanche(acc);
4648*01826a49SYabin Cui     }
4649*01826a49SYabin Cui }
4650*01826a49SYabin Cui 
4651*01826a49SYabin Cui /*!
4652*01826a49SYabin Cui  * @brief Maximum size of "short" key in bytes.
4653*01826a49SYabin Cui  */
4654*01826a49SYabin Cui #define XXH3_MIDSIZE_MAX 240
4655*01826a49SYabin Cui 
4656*01826a49SYabin Cui XXH_NO_INLINE XXH_PUREF XXH64_hash_t
XXH3_len_129to240_64b(const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH64_hash_t seed)4657*01826a49SYabin Cui XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
4658*01826a49SYabin Cui                       const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
4659*01826a49SYabin Cui                       XXH64_hash_t seed)
4660*01826a49SYabin Cui {
4661*01826a49SYabin Cui     XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
4662*01826a49SYabin Cui     XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
4663*01826a49SYabin Cui 
4664*01826a49SYabin Cui     #define XXH3_MIDSIZE_STARTOFFSET 3
4665*01826a49SYabin Cui     #define XXH3_MIDSIZE_LASTOFFSET  17
4666*01826a49SYabin Cui 
4667*01826a49SYabin Cui     {   xxh_u64 acc = len * XXH_PRIME64_1;
4668*01826a49SYabin Cui         xxh_u64 acc_end;
4669*01826a49SYabin Cui         unsigned int const nbRounds = (unsigned int)len / 16;
4670*01826a49SYabin Cui         unsigned int i;
4671*01826a49SYabin Cui         XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
4672*01826a49SYabin Cui         for (i=0; i<8; i++) {
4673*01826a49SYabin Cui             acc += XXH3_mix16B(input+(16*i), secret+(16*i), seed);
4674*01826a49SYabin Cui         }
4675*01826a49SYabin Cui         /* last bytes */
4676*01826a49SYabin Cui         acc_end = XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed);
4677*01826a49SYabin Cui         XXH_ASSERT(nbRounds >= 8);
4678*01826a49SYabin Cui         acc = XXH3_avalanche(acc);
4679*01826a49SYabin Cui #if defined(__clang__)                                /* Clang */ \
4680*01826a49SYabin Cui     && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
4681*01826a49SYabin Cui     && !defined(XXH_ENABLE_AUTOVECTORIZE)             /* Define to disable */
4682*01826a49SYabin Cui         /*
4683*01826a49SYabin Cui          * UGLY HACK:
4684*01826a49SYabin Cui          * Clang for ARMv7-A tries to vectorize this loop, similar to GCC x86.
4685*01826a49SYabin Cui          * In everywhere else, it uses scalar code.
4686*01826a49SYabin Cui          *
4687*01826a49SYabin Cui          * For 64->128-bit multiplies, even if the NEON was 100% optimal, it
4688*01826a49SYabin Cui          * would still be slower than UMAAL (see XXH_mult64to128).
4689*01826a49SYabin Cui          *
4690*01826a49SYabin Cui          * Unfortunately, Clang doesn't handle the long multiplies properly and
4691*01826a49SYabin Cui          * converts them to the nonexistent "vmulq_u64" intrinsic, which is then
4692*01826a49SYabin Cui          * scalarized into an ugly mess of VMOV.32 instructions.
4693*01826a49SYabin Cui          *
4694*01826a49SYabin Cui          * This mess is difficult to avoid without turning autovectorization
4695*01826a49SYabin Cui          * off completely, but they are usually relatively minor and/or not
4696*01826a49SYabin Cui          * worth it to fix.
4697*01826a49SYabin Cui          *
4698*01826a49SYabin Cui          * This loop is the easiest to fix, as unlike XXH32, this pragma
4699*01826a49SYabin Cui          * _actually works_ because it is a loop vectorization instead of an
4700*01826a49SYabin Cui          * SLP vectorization.
4701*01826a49SYabin Cui          */
4702*01826a49SYabin Cui         #pragma clang loop vectorize(disable)
4703*01826a49SYabin Cui #endif
4704*01826a49SYabin Cui         for (i=8 ; i < nbRounds; i++) {
4705*01826a49SYabin Cui             /*
4706*01826a49SYabin Cui              * Prevents clang for unrolling the acc loop and interleaving with this one.
4707*01826a49SYabin Cui              */
4708*01826a49SYabin Cui             XXH_COMPILER_GUARD(acc);
4709*01826a49SYabin Cui             acc_end += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed);
4710*01826a49SYabin Cui         }
4711*01826a49SYabin Cui         return XXH3_avalanche(acc + acc_end);
4712*01826a49SYabin Cui     }
4713*01826a49SYabin Cui }
4714*01826a49SYabin Cui 
4715*01826a49SYabin Cui 
4716*01826a49SYabin Cui /* =======     Long Keys     ======= */
4717*01826a49SYabin Cui 
4718*01826a49SYabin Cui #define XXH_STRIPE_LEN 64
4719*01826a49SYabin Cui #define XXH_SECRET_CONSUME_RATE 8   /* nb of secret bytes consumed at each accumulation */
4720*01826a49SYabin Cui #define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64))
4721*01826a49SYabin Cui 
4722*01826a49SYabin Cui #ifdef XXH_OLD_NAMES
4723*01826a49SYabin Cui #  define STRIPE_LEN XXH_STRIPE_LEN
4724*01826a49SYabin Cui #  define ACC_NB XXH_ACC_NB
4725*01826a49SYabin Cui #endif
4726*01826a49SYabin Cui 
4727*01826a49SYabin Cui #ifndef XXH_PREFETCH_DIST
4728*01826a49SYabin Cui #  ifdef __clang__
4729*01826a49SYabin Cui #    define XXH_PREFETCH_DIST 320
4730*01826a49SYabin Cui #  else
4731*01826a49SYabin Cui #    if (XXH_VECTOR == XXH_AVX512)
4732*01826a49SYabin Cui #      define XXH_PREFETCH_DIST 512
4733*01826a49SYabin Cui #    else
4734*01826a49SYabin Cui #      define XXH_PREFETCH_DIST 384
4735*01826a49SYabin Cui #    endif
4736*01826a49SYabin Cui #  endif  /* __clang__ */
4737*01826a49SYabin Cui #endif  /* XXH_PREFETCH_DIST */
4738*01826a49SYabin Cui 
4739*01826a49SYabin Cui /*
4740*01826a49SYabin Cui  * These macros are to generate an XXH3_accumulate() function.
4741*01826a49SYabin Cui  * The two arguments select the name suffix and target attribute.
4742*01826a49SYabin Cui  *
4743*01826a49SYabin Cui  * The name of this symbol is XXH3_accumulate_<name>() and it calls
4744*01826a49SYabin Cui  * XXH3_accumulate_512_<name>().
4745*01826a49SYabin Cui  *
4746*01826a49SYabin Cui  * It may be useful to hand implement this function if the compiler fails to
4747*01826a49SYabin Cui  * optimize the inline function.
4748*01826a49SYabin Cui  */
4749*01826a49SYabin Cui #define XXH3_ACCUMULATE_TEMPLATE(name)                      \
4750*01826a49SYabin Cui void                                                        \
4751*01826a49SYabin Cui XXH3_accumulate_##name(xxh_u64* XXH_RESTRICT acc,           \
4752*01826a49SYabin Cui                        const xxh_u8* XXH_RESTRICT input,    \
4753*01826a49SYabin Cui                        const xxh_u8* XXH_RESTRICT secret,   \
4754*01826a49SYabin Cui                        size_t nbStripes)                    \
4755*01826a49SYabin Cui {                                                           \
4756*01826a49SYabin Cui     size_t n;                                               \
4757*01826a49SYabin Cui     for (n = 0; n < nbStripes; n++ ) {                      \
4758*01826a49SYabin Cui         const xxh_u8* const in = input + n*XXH_STRIPE_LEN;  \
4759*01826a49SYabin Cui         XXH_PREFETCH(in + XXH_PREFETCH_DIST);               \
4760*01826a49SYabin Cui         XXH3_accumulate_512_##name(                         \
4761*01826a49SYabin Cui                  acc,                                       \
4762*01826a49SYabin Cui                  in,                                        \
4763*01826a49SYabin Cui                  secret + n*XXH_SECRET_CONSUME_RATE);       \
4764*01826a49SYabin Cui     }                                                       \
4765*01826a49SYabin Cui }
4766*01826a49SYabin Cui 
4767*01826a49SYabin Cui 
XXH_writeLE64(void * dst,xxh_u64 v64)4768*01826a49SYabin Cui XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64)
4769*01826a49SYabin Cui {
4770*01826a49SYabin Cui     if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64);
4771*01826a49SYabin Cui     XXH_memcpy(dst, &v64, sizeof(v64));
4772*01826a49SYabin Cui }
4773*01826a49SYabin Cui 
4774*01826a49SYabin Cui /* Several intrinsic functions below are supposed to accept __int64 as argument,
4775*01826a49SYabin Cui  * as documented in https://software.intel.com/sites/landingpage/IntrinsicsGuide/ .
4776*01826a49SYabin Cui  * However, several environments do not define __int64 type,
4777*01826a49SYabin Cui  * requiring a workaround.
4778*01826a49SYabin Cui  */
4779*01826a49SYabin Cui #if !defined (__VMS) \
4780*01826a49SYabin Cui   && (defined (__cplusplus) \
4781*01826a49SYabin Cui   || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
4782*01826a49SYabin Cui     typedef int64_t xxh_i64;
4783*01826a49SYabin Cui #else
4784*01826a49SYabin Cui     /* the following type must have a width of 64-bit */
4785*01826a49SYabin Cui     typedef long long xxh_i64;
4786*01826a49SYabin Cui #endif
4787*01826a49SYabin Cui 
4788*01826a49SYabin Cui 
4789*01826a49SYabin Cui /*
4790*01826a49SYabin Cui  * XXH3_accumulate_512 is the tightest loop for long inputs, and it is the most optimized.
4791*01826a49SYabin Cui  *
4792*01826a49SYabin Cui  * It is a hardened version of UMAC, based off of FARSH's implementation.
4793*01826a49SYabin Cui  *
4794*01826a49SYabin Cui  * This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD
4795*01826a49SYabin Cui  * implementations, and it is ridiculously fast.
4796*01826a49SYabin Cui  *
4797*01826a49SYabin Cui  * We harden it by mixing the original input to the accumulators as well as the product.
4798*01826a49SYabin Cui  *
4799*01826a49SYabin Cui  * This means that in the (relatively likely) case of a multiply by zero, the
4800*01826a49SYabin Cui  * original input is preserved.
4801*01826a49SYabin Cui  *
4802*01826a49SYabin Cui  * On 128-bit inputs, we swap 64-bit pairs when we add the input to improve
4803*01826a49SYabin Cui  * cross-pollination, as otherwise the upper and lower halves would be
4804*01826a49SYabin Cui  * essentially independent.
4805*01826a49SYabin Cui  *
4806*01826a49SYabin Cui  * This doesn't matter on 64-bit hashes since they all get merged together in
4807*01826a49SYabin Cui  * the end, so we skip the extra step.
4808*01826a49SYabin Cui  *
4809*01826a49SYabin Cui  * Both XXH3_64bits and XXH3_128bits use this subroutine.
4810*01826a49SYabin Cui  */
4811*01826a49SYabin Cui 
4812*01826a49SYabin Cui #if (XXH_VECTOR == XXH_AVX512) \
4813*01826a49SYabin Cui      || (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0)
4814*01826a49SYabin Cui 
4815*01826a49SYabin Cui #ifndef XXH_TARGET_AVX512
4816*01826a49SYabin Cui # define XXH_TARGET_AVX512  /* disable attribute target */
4817*01826a49SYabin Cui #endif
4818*01826a49SYabin Cui 
4819*01826a49SYabin Cui XXH_FORCE_INLINE XXH_TARGET_AVX512 void
XXH3_accumulate_512_avx512(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)4820*01826a49SYabin Cui XXH3_accumulate_512_avx512(void* XXH_RESTRICT acc,
4821*01826a49SYabin Cui                      const void* XXH_RESTRICT input,
4822*01826a49SYabin Cui                      const void* XXH_RESTRICT secret)
4823*01826a49SYabin Cui {
4824*01826a49SYabin Cui     __m512i* const xacc = (__m512i *) acc;
4825*01826a49SYabin Cui     XXH_ASSERT((((size_t)acc) & 63) == 0);
4826*01826a49SYabin Cui     XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
4827*01826a49SYabin Cui 
4828*01826a49SYabin Cui     {
4829*01826a49SYabin Cui         /* data_vec    = input[0]; */
4830*01826a49SYabin Cui         __m512i const data_vec    = _mm512_loadu_si512   (input);
4831*01826a49SYabin Cui         /* key_vec     = secret[0]; */
4832*01826a49SYabin Cui         __m512i const key_vec     = _mm512_loadu_si512   (secret);
4833*01826a49SYabin Cui         /* data_key    = data_vec ^ key_vec; */
4834*01826a49SYabin Cui         __m512i const data_key    = _mm512_xor_si512     (data_vec, key_vec);
4835*01826a49SYabin Cui         /* data_key_lo = data_key >> 32; */
4836*01826a49SYabin Cui         __m512i const data_key_lo = _mm512_srli_epi64 (data_key, 32);
4837*01826a49SYabin Cui         /* product     = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
4838*01826a49SYabin Cui         __m512i const product     = _mm512_mul_epu32     (data_key, data_key_lo);
4839*01826a49SYabin Cui         /* xacc[0] += swap(data_vec); */
4840*01826a49SYabin Cui         __m512i const data_swap = _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2));
4841*01826a49SYabin Cui         __m512i const sum       = _mm512_add_epi64(*xacc, data_swap);
4842*01826a49SYabin Cui         /* xacc[0] += product; */
4843*01826a49SYabin Cui         *xacc = _mm512_add_epi64(product, sum);
4844*01826a49SYabin Cui     }
4845*01826a49SYabin Cui }
XXH3_ACCUMULATE_TEMPLATE(avx512)4846*01826a49SYabin Cui XXH_FORCE_INLINE XXH_TARGET_AVX512 XXH3_ACCUMULATE_TEMPLATE(avx512)
4847*01826a49SYabin Cui 
4848*01826a49SYabin Cui /*
4849*01826a49SYabin Cui  * XXH3_scrambleAcc: Scrambles the accumulators to improve mixing.
4850*01826a49SYabin Cui  *
4851*01826a49SYabin Cui  * Multiplication isn't perfect, as explained by Google in HighwayHash:
4852*01826a49SYabin Cui  *
4853*01826a49SYabin Cui  *  // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to
4854*01826a49SYabin Cui  *  // varying degrees. In descending order of goodness, bytes
4855*01826a49SYabin Cui  *  // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32.
4856*01826a49SYabin Cui  *  // As expected, the upper and lower bytes are much worse.
4857*01826a49SYabin Cui  *
4858*01826a49SYabin Cui  * Source: https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291
4859*01826a49SYabin Cui  *
4860*01826a49SYabin Cui  * Since our algorithm uses a pseudorandom secret to add some variance into the
4861*01826a49SYabin Cui  * mix, we don't need to (or want to) mix as often or as much as HighwayHash does.
4862*01826a49SYabin Cui  *
4863*01826a49SYabin Cui  * This isn't as tight as XXH3_accumulate, but still written in SIMD to avoid
4864*01826a49SYabin Cui  * extraction.
4865*01826a49SYabin Cui  *
4866*01826a49SYabin Cui  * Both XXH3_64bits and XXH3_128bits use this subroutine.
4867*01826a49SYabin Cui  */
4868*01826a49SYabin Cui 
4869*01826a49SYabin Cui XXH_FORCE_INLINE XXH_TARGET_AVX512 void
4870*01826a49SYabin Cui XXH3_scrambleAcc_avx512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4871*01826a49SYabin Cui {
4872*01826a49SYabin Cui     XXH_ASSERT((((size_t)acc) & 63) == 0);
4873*01826a49SYabin Cui     XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
4874*01826a49SYabin Cui     {   __m512i* const xacc = (__m512i*) acc;
4875*01826a49SYabin Cui         const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1);
4876*01826a49SYabin Cui 
4877*01826a49SYabin Cui         /* xacc[0] ^= (xacc[0] >> 47) */
4878*01826a49SYabin Cui         __m512i const acc_vec     = *xacc;
4879*01826a49SYabin Cui         __m512i const shifted     = _mm512_srli_epi64    (acc_vec, 47);
4880*01826a49SYabin Cui         /* xacc[0] ^= secret; */
4881*01826a49SYabin Cui         __m512i const key_vec     = _mm512_loadu_si512   (secret);
4882*01826a49SYabin Cui         __m512i const data_key    = _mm512_ternarylogic_epi32(key_vec, acc_vec, shifted, 0x96 /* key_vec ^ acc_vec ^ shifted */);
4883*01826a49SYabin Cui 
4884*01826a49SYabin Cui         /* xacc[0] *= XXH_PRIME32_1; */
4885*01826a49SYabin Cui         __m512i const data_key_hi = _mm512_srli_epi64 (data_key, 32);
4886*01826a49SYabin Cui         __m512i const prod_lo     = _mm512_mul_epu32     (data_key, prime32);
4887*01826a49SYabin Cui         __m512i const prod_hi     = _mm512_mul_epu32     (data_key_hi, prime32);
4888*01826a49SYabin Cui         *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32));
4889*01826a49SYabin Cui     }
4890*01826a49SYabin Cui }
4891*01826a49SYabin Cui 
4892*01826a49SYabin Cui XXH_FORCE_INLINE XXH_TARGET_AVX512 void
XXH3_initCustomSecret_avx512(void * XXH_RESTRICT customSecret,xxh_u64 seed64)4893*01826a49SYabin Cui XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
4894*01826a49SYabin Cui {
4895*01826a49SYabin Cui     XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0);
4896*01826a49SYabin Cui     XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64);
4897*01826a49SYabin Cui     XXH_ASSERT(((size_t)customSecret & 63) == 0);
4898*01826a49SYabin Cui     (void)(&XXH_writeLE64);
4899*01826a49SYabin Cui     {   int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i);
4900*01826a49SYabin Cui         __m512i const seed_pos = _mm512_set1_epi64((xxh_i64)seed64);
4901*01826a49SYabin Cui         __m512i const seed     = _mm512_mask_sub_epi64(seed_pos, 0xAA, _mm512_set1_epi8(0), seed_pos);
4902*01826a49SYabin Cui 
4903*01826a49SYabin Cui         const __m512i* const src  = (const __m512i*) ((const void*) XXH3_kSecret);
4904*01826a49SYabin Cui               __m512i* const dest = (      __m512i*) customSecret;
4905*01826a49SYabin Cui         int i;
4906*01826a49SYabin Cui         XXH_ASSERT(((size_t)src & 63) == 0); /* control alignment */
4907*01826a49SYabin Cui         XXH_ASSERT(((size_t)dest & 63) == 0);
4908*01826a49SYabin Cui         for (i=0; i < nbRounds; ++i) {
4909*01826a49SYabin Cui             dest[i] = _mm512_add_epi64(_mm512_load_si512(src + i), seed);
4910*01826a49SYabin Cui     }   }
4911*01826a49SYabin Cui }
4912*01826a49SYabin Cui 
4913*01826a49SYabin Cui #endif
4914*01826a49SYabin Cui 
4915*01826a49SYabin Cui #if (XXH_VECTOR == XXH_AVX2) \
4916*01826a49SYabin Cui     || (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0)
4917*01826a49SYabin Cui 
4918*01826a49SYabin Cui #ifndef XXH_TARGET_AVX2
4919*01826a49SYabin Cui # define XXH_TARGET_AVX2  /* disable attribute target */
4920*01826a49SYabin Cui #endif
4921*01826a49SYabin Cui 
4922*01826a49SYabin Cui XXH_FORCE_INLINE XXH_TARGET_AVX2 void
XXH3_accumulate_512_avx2(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)4923*01826a49SYabin Cui XXH3_accumulate_512_avx2( void* XXH_RESTRICT acc,
4924*01826a49SYabin Cui                     const void* XXH_RESTRICT input,
4925*01826a49SYabin Cui                     const void* XXH_RESTRICT secret)
4926*01826a49SYabin Cui {
4927*01826a49SYabin Cui     XXH_ASSERT((((size_t)acc) & 31) == 0);
4928*01826a49SYabin Cui     {   __m256i* const xacc    =       (__m256i *) acc;
4929*01826a49SYabin Cui         /* Unaligned. This is mainly for pointer arithmetic, and because
4930*01826a49SYabin Cui          * _mm256_loadu_si256 requires  a const __m256i * pointer for some reason. */
4931*01826a49SYabin Cui         const         __m256i* const xinput  = (const __m256i *) input;
4932*01826a49SYabin Cui         /* Unaligned. This is mainly for pointer arithmetic, and because
4933*01826a49SYabin Cui          * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
4934*01826a49SYabin Cui         const         __m256i* const xsecret = (const __m256i *) secret;
4935*01826a49SYabin Cui 
4936*01826a49SYabin Cui         size_t i;
4937*01826a49SYabin Cui         for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
4938*01826a49SYabin Cui             /* data_vec    = xinput[i]; */
4939*01826a49SYabin Cui             __m256i const data_vec    = _mm256_loadu_si256    (xinput+i);
4940*01826a49SYabin Cui             /* key_vec     = xsecret[i]; */
4941*01826a49SYabin Cui             __m256i const key_vec     = _mm256_loadu_si256   (xsecret+i);
4942*01826a49SYabin Cui             /* data_key    = data_vec ^ key_vec; */
4943*01826a49SYabin Cui             __m256i const data_key    = _mm256_xor_si256     (data_vec, key_vec);
4944*01826a49SYabin Cui             /* data_key_lo = data_key >> 32; */
4945*01826a49SYabin Cui             __m256i const data_key_lo = _mm256_srli_epi64 (data_key, 32);
4946*01826a49SYabin Cui             /* product     = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
4947*01826a49SYabin Cui             __m256i const product     = _mm256_mul_epu32     (data_key, data_key_lo);
4948*01826a49SYabin Cui             /* xacc[i] += swap(data_vec); */
4949*01826a49SYabin Cui             __m256i const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2));
4950*01826a49SYabin Cui             __m256i const sum       = _mm256_add_epi64(xacc[i], data_swap);
4951*01826a49SYabin Cui             /* xacc[i] += product; */
4952*01826a49SYabin Cui             xacc[i] = _mm256_add_epi64(product, sum);
4953*01826a49SYabin Cui     }   }
4954*01826a49SYabin Cui }
XXH3_ACCUMULATE_TEMPLATE(avx2)4955*01826a49SYabin Cui XXH_FORCE_INLINE XXH_TARGET_AVX2 XXH3_ACCUMULATE_TEMPLATE(avx2)
4956*01826a49SYabin Cui 
4957*01826a49SYabin Cui XXH_FORCE_INLINE XXH_TARGET_AVX2 void
4958*01826a49SYabin Cui XXH3_scrambleAcc_avx2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4959*01826a49SYabin Cui {
4960*01826a49SYabin Cui     XXH_ASSERT((((size_t)acc) & 31) == 0);
4961*01826a49SYabin Cui     {   __m256i* const xacc = (__m256i*) acc;
4962*01826a49SYabin Cui         /* Unaligned. This is mainly for pointer arithmetic, and because
4963*01826a49SYabin Cui          * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
4964*01826a49SYabin Cui         const         __m256i* const xsecret = (const __m256i *) secret;
4965*01826a49SYabin Cui         const __m256i prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1);
4966*01826a49SYabin Cui 
4967*01826a49SYabin Cui         size_t i;
4968*01826a49SYabin Cui         for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
4969*01826a49SYabin Cui             /* xacc[i] ^= (xacc[i] >> 47) */
4970*01826a49SYabin Cui             __m256i const acc_vec     = xacc[i];
4971*01826a49SYabin Cui             __m256i const shifted     = _mm256_srli_epi64    (acc_vec, 47);
4972*01826a49SYabin Cui             __m256i const data_vec    = _mm256_xor_si256     (acc_vec, shifted);
4973*01826a49SYabin Cui             /* xacc[i] ^= xsecret; */
4974*01826a49SYabin Cui             __m256i const key_vec     = _mm256_loadu_si256   (xsecret+i);
4975*01826a49SYabin Cui             __m256i const data_key    = _mm256_xor_si256     (data_vec, key_vec);
4976*01826a49SYabin Cui 
4977*01826a49SYabin Cui             /* xacc[i] *= XXH_PRIME32_1; */
4978*01826a49SYabin Cui             __m256i const data_key_hi = _mm256_srli_epi64 (data_key, 32);
4979*01826a49SYabin Cui             __m256i const prod_lo     = _mm256_mul_epu32     (data_key, prime32);
4980*01826a49SYabin Cui             __m256i const prod_hi     = _mm256_mul_epu32     (data_key_hi, prime32);
4981*01826a49SYabin Cui             xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32));
4982*01826a49SYabin Cui         }
4983*01826a49SYabin Cui     }
4984*01826a49SYabin Cui }
4985*01826a49SYabin Cui 
XXH3_initCustomSecret_avx2(void * XXH_RESTRICT customSecret,xxh_u64 seed64)4986*01826a49SYabin Cui XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
4987*01826a49SYabin Cui {
4988*01826a49SYabin Cui     XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0);
4989*01826a49SYabin Cui     XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6);
4990*01826a49SYabin Cui     XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64);
4991*01826a49SYabin Cui     (void)(&XXH_writeLE64);
4992*01826a49SYabin Cui     XXH_PREFETCH(customSecret);
4993*01826a49SYabin Cui     {   __m256i const seed = _mm256_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64, (xxh_i64)(0U - seed64), (xxh_i64)seed64);
4994*01826a49SYabin Cui 
4995*01826a49SYabin Cui         const __m256i* const src  = (const __m256i*) ((const void*) XXH3_kSecret);
4996*01826a49SYabin Cui               __m256i*       dest = (      __m256i*) customSecret;
4997*01826a49SYabin Cui 
4998*01826a49SYabin Cui #       if defined(__GNUC__) || defined(__clang__)
4999*01826a49SYabin Cui         /*
5000*01826a49SYabin Cui          * On GCC & Clang, marking 'dest' as modified will cause the compiler:
5001*01826a49SYabin Cui          *   - do not extract the secret from sse registers in the internal loop
5002*01826a49SYabin Cui          *   - use less common registers, and avoid pushing these reg into stack
5003*01826a49SYabin Cui          */
5004*01826a49SYabin Cui         XXH_COMPILER_GUARD(dest);
5005*01826a49SYabin Cui #       endif
5006*01826a49SYabin Cui         XXH_ASSERT(((size_t)src & 31) == 0); /* control alignment */
5007*01826a49SYabin Cui         XXH_ASSERT(((size_t)dest & 31) == 0);
5008*01826a49SYabin Cui 
5009*01826a49SYabin Cui         /* GCC -O2 need unroll loop manually */
5010*01826a49SYabin Cui         dest[0] = _mm256_add_epi64(_mm256_load_si256(src+0), seed);
5011*01826a49SYabin Cui         dest[1] = _mm256_add_epi64(_mm256_load_si256(src+1), seed);
5012*01826a49SYabin Cui         dest[2] = _mm256_add_epi64(_mm256_load_si256(src+2), seed);
5013*01826a49SYabin Cui         dest[3] = _mm256_add_epi64(_mm256_load_si256(src+3), seed);
5014*01826a49SYabin Cui         dest[4] = _mm256_add_epi64(_mm256_load_si256(src+4), seed);
5015*01826a49SYabin Cui         dest[5] = _mm256_add_epi64(_mm256_load_si256(src+5), seed);
5016*01826a49SYabin Cui     }
5017*01826a49SYabin Cui }
5018*01826a49SYabin Cui 
5019*01826a49SYabin Cui #endif
5020*01826a49SYabin Cui 
5021*01826a49SYabin Cui /* x86dispatch always generates SSE2 */
5022*01826a49SYabin Cui #if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH)
5023*01826a49SYabin Cui 
5024*01826a49SYabin Cui #ifndef XXH_TARGET_SSE2
5025*01826a49SYabin Cui # define XXH_TARGET_SSE2  /* disable attribute target */
5026*01826a49SYabin Cui #endif
5027*01826a49SYabin Cui 
5028*01826a49SYabin Cui XXH_FORCE_INLINE XXH_TARGET_SSE2 void
XXH3_accumulate_512_sse2(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)5029*01826a49SYabin Cui XXH3_accumulate_512_sse2( void* XXH_RESTRICT acc,
5030*01826a49SYabin Cui                     const void* XXH_RESTRICT input,
5031*01826a49SYabin Cui                     const void* XXH_RESTRICT secret)
5032*01826a49SYabin Cui {
5033*01826a49SYabin Cui     /* SSE2 is just a half-scale version of the AVX2 version. */
5034*01826a49SYabin Cui     XXH_ASSERT((((size_t)acc) & 15) == 0);
5035*01826a49SYabin Cui     {   __m128i* const xacc    =       (__m128i *) acc;
5036*01826a49SYabin Cui         /* Unaligned. This is mainly for pointer arithmetic, and because
5037*01826a49SYabin Cui          * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
5038*01826a49SYabin Cui         const         __m128i* const xinput  = (const __m128i *) input;
5039*01826a49SYabin Cui         /* Unaligned. This is mainly for pointer arithmetic, and because
5040*01826a49SYabin Cui          * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
5041*01826a49SYabin Cui         const         __m128i* const xsecret = (const __m128i *) secret;
5042*01826a49SYabin Cui 
5043*01826a49SYabin Cui         size_t i;
5044*01826a49SYabin Cui         for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
5045*01826a49SYabin Cui             /* data_vec    = xinput[i]; */
5046*01826a49SYabin Cui             __m128i const data_vec    = _mm_loadu_si128   (xinput+i);
5047*01826a49SYabin Cui             /* key_vec     = xsecret[i]; */
5048*01826a49SYabin Cui             __m128i const key_vec     = _mm_loadu_si128   (xsecret+i);
5049*01826a49SYabin Cui             /* data_key    = data_vec ^ key_vec; */
5050*01826a49SYabin Cui             __m128i const data_key    = _mm_xor_si128     (data_vec, key_vec);
5051*01826a49SYabin Cui             /* data_key_lo = data_key >> 32; */
5052*01826a49SYabin Cui             __m128i const data_key_lo = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
5053*01826a49SYabin Cui             /* product     = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
5054*01826a49SYabin Cui             __m128i const product     = _mm_mul_epu32     (data_key, data_key_lo);
5055*01826a49SYabin Cui             /* xacc[i] += swap(data_vec); */
5056*01826a49SYabin Cui             __m128i const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2));
5057*01826a49SYabin Cui             __m128i const sum       = _mm_add_epi64(xacc[i], data_swap);
5058*01826a49SYabin Cui             /* xacc[i] += product; */
5059*01826a49SYabin Cui             xacc[i] = _mm_add_epi64(product, sum);
5060*01826a49SYabin Cui     }   }
5061*01826a49SYabin Cui }
XXH3_ACCUMULATE_TEMPLATE(sse2)5062*01826a49SYabin Cui XXH_FORCE_INLINE XXH_TARGET_SSE2 XXH3_ACCUMULATE_TEMPLATE(sse2)
5063*01826a49SYabin Cui 
5064*01826a49SYabin Cui XXH_FORCE_INLINE XXH_TARGET_SSE2 void
5065*01826a49SYabin Cui XXH3_scrambleAcc_sse2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
5066*01826a49SYabin Cui {
5067*01826a49SYabin Cui     XXH_ASSERT((((size_t)acc) & 15) == 0);
5068*01826a49SYabin Cui     {   __m128i* const xacc = (__m128i*) acc;
5069*01826a49SYabin Cui         /* Unaligned. This is mainly for pointer arithmetic, and because
5070*01826a49SYabin Cui          * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
5071*01826a49SYabin Cui         const         __m128i* const xsecret = (const __m128i *) secret;
5072*01826a49SYabin Cui         const __m128i prime32 = _mm_set1_epi32((int)XXH_PRIME32_1);
5073*01826a49SYabin Cui 
5074*01826a49SYabin Cui         size_t i;
5075*01826a49SYabin Cui         for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
5076*01826a49SYabin Cui             /* xacc[i] ^= (xacc[i] >> 47) */
5077*01826a49SYabin Cui             __m128i const acc_vec     = xacc[i];
5078*01826a49SYabin Cui             __m128i const shifted     = _mm_srli_epi64    (acc_vec, 47);
5079*01826a49SYabin Cui             __m128i const data_vec    = _mm_xor_si128     (acc_vec, shifted);
5080*01826a49SYabin Cui             /* xacc[i] ^= xsecret[i]; */
5081*01826a49SYabin Cui             __m128i const key_vec     = _mm_loadu_si128   (xsecret+i);
5082*01826a49SYabin Cui             __m128i const data_key    = _mm_xor_si128     (data_vec, key_vec);
5083*01826a49SYabin Cui 
5084*01826a49SYabin Cui             /* xacc[i] *= XXH_PRIME32_1; */
5085*01826a49SYabin Cui             __m128i const data_key_hi = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
5086*01826a49SYabin Cui             __m128i const prod_lo     = _mm_mul_epu32     (data_key, prime32);
5087*01826a49SYabin Cui             __m128i const prod_hi     = _mm_mul_epu32     (data_key_hi, prime32);
5088*01826a49SYabin Cui             xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32));
5089*01826a49SYabin Cui         }
5090*01826a49SYabin Cui     }
5091*01826a49SYabin Cui }
5092*01826a49SYabin Cui 
XXH3_initCustomSecret_sse2(void * XXH_RESTRICT customSecret,xxh_u64 seed64)5093*01826a49SYabin Cui XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
5094*01826a49SYabin Cui {
5095*01826a49SYabin Cui     XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
5096*01826a49SYabin Cui     (void)(&XXH_writeLE64);
5097*01826a49SYabin Cui     {   int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i);
5098*01826a49SYabin Cui 
5099*01826a49SYabin Cui #       if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900
5100*01826a49SYabin Cui         /* MSVC 32bit mode does not support _mm_set_epi64x before 2015 */
5101*01826a49SYabin Cui         XXH_ALIGN(16) const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, (xxh_i64)(0U - seed64) };
5102*01826a49SYabin Cui         __m128i const seed = _mm_load_si128((__m128i const*)seed64x2);
5103*01826a49SYabin Cui #       else
5104*01826a49SYabin Cui         __m128i const seed = _mm_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64);
5105*01826a49SYabin Cui #       endif
5106*01826a49SYabin Cui         int i;
5107*01826a49SYabin Cui 
5108*01826a49SYabin Cui         const void* const src16 = XXH3_kSecret;
5109*01826a49SYabin Cui         __m128i* dst16 = (__m128i*) customSecret;
5110*01826a49SYabin Cui #       if defined(__GNUC__) || defined(__clang__)
5111*01826a49SYabin Cui         /*
5112*01826a49SYabin Cui          * On GCC & Clang, marking 'dest' as modified will cause the compiler:
5113*01826a49SYabin Cui          *   - do not extract the secret from sse registers in the internal loop
5114*01826a49SYabin Cui          *   - use less common registers, and avoid pushing these reg into stack
5115*01826a49SYabin Cui          */
5116*01826a49SYabin Cui         XXH_COMPILER_GUARD(dst16);
5117*01826a49SYabin Cui #       endif
5118*01826a49SYabin Cui         XXH_ASSERT(((size_t)src16 & 15) == 0); /* control alignment */
5119*01826a49SYabin Cui         XXH_ASSERT(((size_t)dst16 & 15) == 0);
5120*01826a49SYabin Cui 
5121*01826a49SYabin Cui         for (i=0; i < nbRounds; ++i) {
5122*01826a49SYabin Cui             dst16[i] = _mm_add_epi64(_mm_load_si128((const __m128i *)src16+i), seed);
5123*01826a49SYabin Cui     }   }
5124*01826a49SYabin Cui }
5125*01826a49SYabin Cui 
5126*01826a49SYabin Cui #endif
5127*01826a49SYabin Cui 
5128*01826a49SYabin Cui #if (XXH_VECTOR == XXH_NEON)
5129*01826a49SYabin Cui 
5130*01826a49SYabin Cui /* forward declarations for the scalar routines */
5131*01826a49SYabin Cui XXH_FORCE_INLINE void
5132*01826a49SYabin Cui XXH3_scalarRound(void* XXH_RESTRICT acc, void const* XXH_RESTRICT input,
5133*01826a49SYabin Cui                  void const* XXH_RESTRICT secret, size_t lane);
5134*01826a49SYabin Cui 
5135*01826a49SYabin Cui XXH_FORCE_INLINE void
5136*01826a49SYabin Cui XXH3_scalarScrambleRound(void* XXH_RESTRICT acc,
5137*01826a49SYabin Cui                          void const* XXH_RESTRICT secret, size_t lane);
5138*01826a49SYabin Cui 
5139*01826a49SYabin Cui /*!
5140*01826a49SYabin Cui  * @internal
5141*01826a49SYabin Cui  * @brief The bulk processing loop for NEON and WASM SIMD128.
5142*01826a49SYabin Cui  *
5143*01826a49SYabin Cui  * The NEON code path is actually partially scalar when running on AArch64. This
5144*01826a49SYabin Cui  * is to optimize the pipelining and can have up to 15% speedup depending on the
5145*01826a49SYabin Cui  * CPU, and it also mitigates some GCC codegen issues.
5146*01826a49SYabin Cui  *
5147*01826a49SYabin Cui  * @see XXH3_NEON_LANES for configuring this and details about this optimization.
5148*01826a49SYabin Cui  *
5149*01826a49SYabin Cui  * NEON's 32-bit to 64-bit long multiply takes a half vector of 32-bit
5150*01826a49SYabin Cui  * integers instead of the other platforms which mask full 64-bit vectors,
5151*01826a49SYabin Cui  * so the setup is more complicated than just shifting right.
5152*01826a49SYabin Cui  *
5153*01826a49SYabin Cui  * Additionally, there is an optimization for 4 lanes at once noted below.
5154*01826a49SYabin Cui  *
5155*01826a49SYabin Cui  * Since, as stated, the most optimal amount of lanes for Cortexes is 6,
5156*01826a49SYabin Cui  * there needs to be *three* versions of the accumulate operation used
5157*01826a49SYabin Cui  * for the remaining 2 lanes.
5158*01826a49SYabin Cui  *
5159*01826a49SYabin Cui  * WASM's SIMD128 uses SIMDe's arm_neon.h polyfill because the intrinsics overlap
5160*01826a49SYabin Cui  * nearly perfectly.
5161*01826a49SYabin Cui  */
5162*01826a49SYabin Cui 
5163*01826a49SYabin Cui XXH_FORCE_INLINE void
XXH3_accumulate_512_neon(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)5164*01826a49SYabin Cui XXH3_accumulate_512_neon( void* XXH_RESTRICT acc,
5165*01826a49SYabin Cui                     const void* XXH_RESTRICT input,
5166*01826a49SYabin Cui                     const void* XXH_RESTRICT secret)
5167*01826a49SYabin Cui {
5168*01826a49SYabin Cui     XXH_ASSERT((((size_t)acc) & 15) == 0);
5169*01826a49SYabin Cui     XXH_STATIC_ASSERT(XXH3_NEON_LANES > 0 && XXH3_NEON_LANES <= XXH_ACC_NB && XXH3_NEON_LANES % 2 == 0);
5170*01826a49SYabin Cui     {   /* GCC for darwin arm64 does not like aliasing here */
5171*01826a49SYabin Cui         xxh_aliasing_uint64x2_t* const xacc = (xxh_aliasing_uint64x2_t*) acc;
5172*01826a49SYabin Cui         /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */
5173*01826a49SYabin Cui         uint8_t const* xinput = (const uint8_t *) input;
5174*01826a49SYabin Cui         uint8_t const* xsecret  = (const uint8_t *) secret;
5175*01826a49SYabin Cui 
5176*01826a49SYabin Cui         size_t i;
5177*01826a49SYabin Cui #ifdef __wasm_simd128__
5178*01826a49SYabin Cui         /*
5179*01826a49SYabin Cui          * On WASM SIMD128, Clang emits direct address loads when XXH3_kSecret
5180*01826a49SYabin Cui          * is constant propagated, which results in it converting it to this
5181*01826a49SYabin Cui          * inside the loop:
5182*01826a49SYabin Cui          *
5183*01826a49SYabin Cui          *    a = v128.load(XXH3_kSecret +  0 + $secret_offset, offset = 0)
5184*01826a49SYabin Cui          *    b = v128.load(XXH3_kSecret + 16 + $secret_offset, offset = 0)
5185*01826a49SYabin Cui          *    ...
5186*01826a49SYabin Cui          *
5187*01826a49SYabin Cui          * This requires a full 32-bit address immediate (and therefore a 6 byte
5188*01826a49SYabin Cui          * instruction) as well as an add for each offset.
5189*01826a49SYabin Cui          *
5190*01826a49SYabin Cui          * Putting an asm guard prevents it from folding (at the cost of losing
5191*01826a49SYabin Cui          * the alignment hint), and uses the free offset in `v128.load` instead
5192*01826a49SYabin Cui          * of adding secret_offset each time which overall reduces code size by
5193*01826a49SYabin Cui          * about a kilobyte and improves performance.
5194*01826a49SYabin Cui          */
5195*01826a49SYabin Cui         XXH_COMPILER_GUARD(xsecret);
5196*01826a49SYabin Cui #endif
5197*01826a49SYabin Cui         /* Scalar lanes use the normal scalarRound routine */
5198*01826a49SYabin Cui         for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) {
5199*01826a49SYabin Cui             XXH3_scalarRound(acc, input, secret, i);
5200*01826a49SYabin Cui         }
5201*01826a49SYabin Cui         i = 0;
5202*01826a49SYabin Cui         /* 4 NEON lanes at a time. */
5203*01826a49SYabin Cui         for (; i+1 < XXH3_NEON_LANES / 2; i+=2) {
5204*01826a49SYabin Cui             /* data_vec = xinput[i]; */
5205*01826a49SYabin Cui             uint64x2_t data_vec_1 = XXH_vld1q_u64(xinput  + (i * 16));
5206*01826a49SYabin Cui             uint64x2_t data_vec_2 = XXH_vld1q_u64(xinput  + ((i+1) * 16));
5207*01826a49SYabin Cui             /* key_vec  = xsecret[i];  */
5208*01826a49SYabin Cui             uint64x2_t key_vec_1  = XXH_vld1q_u64(xsecret + (i * 16));
5209*01826a49SYabin Cui             uint64x2_t key_vec_2  = XXH_vld1q_u64(xsecret + ((i+1) * 16));
5210*01826a49SYabin Cui             /* data_swap = swap(data_vec) */
5211*01826a49SYabin Cui             uint64x2_t data_swap_1 = vextq_u64(data_vec_1, data_vec_1, 1);
5212*01826a49SYabin Cui             uint64x2_t data_swap_2 = vextq_u64(data_vec_2, data_vec_2, 1);
5213*01826a49SYabin Cui             /* data_key = data_vec ^ key_vec; */
5214*01826a49SYabin Cui             uint64x2_t data_key_1 = veorq_u64(data_vec_1, key_vec_1);
5215*01826a49SYabin Cui             uint64x2_t data_key_2 = veorq_u64(data_vec_2, key_vec_2);
5216*01826a49SYabin Cui 
5217*01826a49SYabin Cui             /*
5218*01826a49SYabin Cui              * If we reinterpret the 64x2 vectors as 32x4 vectors, we can use a
5219*01826a49SYabin Cui              * de-interleave operation for 4 lanes in 1 step with `vuzpq_u32` to
5220*01826a49SYabin Cui              * get one vector with the low 32 bits of each lane, and one vector
5221*01826a49SYabin Cui              * with the high 32 bits of each lane.
5222*01826a49SYabin Cui              *
5223*01826a49SYabin Cui              * The intrinsic returns a double vector because the original ARMv7-a
5224*01826a49SYabin Cui              * instruction modified both arguments in place. AArch64 and SIMD128 emit
5225*01826a49SYabin Cui              * two instructions from this intrinsic.
5226*01826a49SYabin Cui              *
5227*01826a49SYabin Cui              *  [ dk11L | dk11H | dk12L | dk12H ] -> [ dk11L | dk12L | dk21L | dk22L ]
5228*01826a49SYabin Cui              *  [ dk21L | dk21H | dk22L | dk22H ] -> [ dk11H | dk12H | dk21H | dk22H ]
5229*01826a49SYabin Cui              */
5230*01826a49SYabin Cui             uint32x4x2_t unzipped = vuzpq_u32(
5231*01826a49SYabin Cui                 vreinterpretq_u32_u64(data_key_1),
5232*01826a49SYabin Cui                 vreinterpretq_u32_u64(data_key_2)
5233*01826a49SYabin Cui             );
5234*01826a49SYabin Cui             /* data_key_lo = data_key & 0xFFFFFFFF */
5235*01826a49SYabin Cui             uint32x4_t data_key_lo = unzipped.val[0];
5236*01826a49SYabin Cui             /* data_key_hi = data_key >> 32 */
5237*01826a49SYabin Cui             uint32x4_t data_key_hi = unzipped.val[1];
5238*01826a49SYabin Cui             /*
5239*01826a49SYabin Cui              * Then, we can split the vectors horizontally and multiply which, as for most
5240*01826a49SYabin Cui              * widening intrinsics, have a variant that works on both high half vectors
5241*01826a49SYabin Cui              * for free on AArch64. A similar instruction is available on SIMD128.
5242*01826a49SYabin Cui              *
5243*01826a49SYabin Cui              * sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi
5244*01826a49SYabin Cui              */
5245*01826a49SYabin Cui             uint64x2_t sum_1 = XXH_vmlal_low_u32(data_swap_1, data_key_lo, data_key_hi);
5246*01826a49SYabin Cui             uint64x2_t sum_2 = XXH_vmlal_high_u32(data_swap_2, data_key_lo, data_key_hi);
5247*01826a49SYabin Cui             /*
5248*01826a49SYabin Cui              * Clang reorders
5249*01826a49SYabin Cui              *    a += b * c;     // umlal   swap.2d, dkl.2s, dkh.2s
5250*01826a49SYabin Cui              *    c += a;         // add     acc.2d, acc.2d, swap.2d
5251*01826a49SYabin Cui              * to
5252*01826a49SYabin Cui              *    c += a;         // add     acc.2d, acc.2d, swap.2d
5253*01826a49SYabin Cui              *    c += b * c;     // umlal   acc.2d, dkl.2s, dkh.2s
5254*01826a49SYabin Cui              *
5255*01826a49SYabin Cui              * While it would make sense in theory since the addition is faster,
5256*01826a49SYabin Cui              * for reasons likely related to umlal being limited to certain NEON
5257*01826a49SYabin Cui              * pipelines, this is worse. A compiler guard fixes this.
5258*01826a49SYabin Cui              */
5259*01826a49SYabin Cui             XXH_COMPILER_GUARD_CLANG_NEON(sum_1);
5260*01826a49SYabin Cui             XXH_COMPILER_GUARD_CLANG_NEON(sum_2);
5261*01826a49SYabin Cui             /* xacc[i] = acc_vec + sum; */
5262*01826a49SYabin Cui             xacc[i]   = vaddq_u64(xacc[i], sum_1);
5263*01826a49SYabin Cui             xacc[i+1] = vaddq_u64(xacc[i+1], sum_2);
5264*01826a49SYabin Cui         }
5265*01826a49SYabin Cui         /* Operate on the remaining NEON lanes 2 at a time. */
5266*01826a49SYabin Cui         for (; i < XXH3_NEON_LANES / 2; i++) {
5267*01826a49SYabin Cui             /* data_vec = xinput[i]; */
5268*01826a49SYabin Cui             uint64x2_t data_vec = XXH_vld1q_u64(xinput  + (i * 16));
5269*01826a49SYabin Cui             /* key_vec  = xsecret[i];  */
5270*01826a49SYabin Cui             uint64x2_t key_vec  = XXH_vld1q_u64(xsecret + (i * 16));
5271*01826a49SYabin Cui             /* acc_vec_2 = swap(data_vec) */
5272*01826a49SYabin Cui             uint64x2_t data_swap = vextq_u64(data_vec, data_vec, 1);
5273*01826a49SYabin Cui             /* data_key = data_vec ^ key_vec; */
5274*01826a49SYabin Cui             uint64x2_t data_key = veorq_u64(data_vec, key_vec);
5275*01826a49SYabin Cui             /* For two lanes, just use VMOVN and VSHRN. */
5276*01826a49SYabin Cui             /* data_key_lo = data_key & 0xFFFFFFFF; */
5277*01826a49SYabin Cui             uint32x2_t data_key_lo = vmovn_u64(data_key);
5278*01826a49SYabin Cui             /* data_key_hi = data_key >> 32; */
5279*01826a49SYabin Cui             uint32x2_t data_key_hi = vshrn_n_u64(data_key, 32);
5280*01826a49SYabin Cui             /* sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi; */
5281*01826a49SYabin Cui             uint64x2_t sum = vmlal_u32(data_swap, data_key_lo, data_key_hi);
5282*01826a49SYabin Cui             /* Same Clang workaround as before */
5283*01826a49SYabin Cui             XXH_COMPILER_GUARD_CLANG_NEON(sum);
5284*01826a49SYabin Cui             /* xacc[i] = acc_vec + sum; */
5285*01826a49SYabin Cui             xacc[i] = vaddq_u64 (xacc[i], sum);
5286*01826a49SYabin Cui         }
5287*01826a49SYabin Cui     }
5288*01826a49SYabin Cui }
XXH3_ACCUMULATE_TEMPLATE(neon)5289*01826a49SYabin Cui XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(neon)
5290*01826a49SYabin Cui 
5291*01826a49SYabin Cui XXH_FORCE_INLINE void
5292*01826a49SYabin Cui XXH3_scrambleAcc_neon(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
5293*01826a49SYabin Cui {
5294*01826a49SYabin Cui     XXH_ASSERT((((size_t)acc) & 15) == 0);
5295*01826a49SYabin Cui 
5296*01826a49SYabin Cui     {   xxh_aliasing_uint64x2_t* xacc       = (xxh_aliasing_uint64x2_t*) acc;
5297*01826a49SYabin Cui         uint8_t const* xsecret = (uint8_t const*) secret;
5298*01826a49SYabin Cui 
5299*01826a49SYabin Cui         size_t i;
5300*01826a49SYabin Cui         /* WASM uses operator overloads and doesn't need these. */
5301*01826a49SYabin Cui #ifndef __wasm_simd128__
5302*01826a49SYabin Cui         /* { prime32_1, prime32_1 } */
5303*01826a49SYabin Cui         uint32x2_t const kPrimeLo = vdup_n_u32(XXH_PRIME32_1);
5304*01826a49SYabin Cui         /* { 0, prime32_1, 0, prime32_1 } */
5305*01826a49SYabin Cui         uint32x4_t const kPrimeHi = vreinterpretq_u32_u64(vdupq_n_u64((xxh_u64)XXH_PRIME32_1 << 32));
5306*01826a49SYabin Cui #endif
5307*01826a49SYabin Cui 
5308*01826a49SYabin Cui         /* AArch64 uses both scalar and neon at the same time */
5309*01826a49SYabin Cui         for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) {
5310*01826a49SYabin Cui             XXH3_scalarScrambleRound(acc, secret, i);
5311*01826a49SYabin Cui         }
5312*01826a49SYabin Cui         for (i=0; i < XXH3_NEON_LANES / 2; i++) {
5313*01826a49SYabin Cui             /* xacc[i] ^= (xacc[i] >> 47); */
5314*01826a49SYabin Cui             uint64x2_t acc_vec  = xacc[i];
5315*01826a49SYabin Cui             uint64x2_t shifted  = vshrq_n_u64(acc_vec, 47);
5316*01826a49SYabin Cui             uint64x2_t data_vec = veorq_u64(acc_vec, shifted);
5317*01826a49SYabin Cui 
5318*01826a49SYabin Cui             /* xacc[i] ^= xsecret[i]; */
5319*01826a49SYabin Cui             uint64x2_t key_vec  = XXH_vld1q_u64(xsecret + (i * 16));
5320*01826a49SYabin Cui             uint64x2_t data_key = veorq_u64(data_vec, key_vec);
5321*01826a49SYabin Cui             /* xacc[i] *= XXH_PRIME32_1 */
5322*01826a49SYabin Cui #ifdef __wasm_simd128__
5323*01826a49SYabin Cui             /* SIMD128 has multiply by u64x2, use it instead of expanding and scalarizing */
5324*01826a49SYabin Cui             xacc[i] = data_key * XXH_PRIME32_1;
5325*01826a49SYabin Cui #else
5326*01826a49SYabin Cui             /*
5327*01826a49SYabin Cui              * Expanded version with portable NEON intrinsics
5328*01826a49SYabin Cui              *
5329*01826a49SYabin Cui              *    lo(x) * lo(y) + (hi(x) * lo(y) << 32)
5330*01826a49SYabin Cui              *
5331*01826a49SYabin Cui              * prod_hi = hi(data_key) * lo(prime) << 32
5332*01826a49SYabin Cui              *
5333*01826a49SYabin Cui              * Since we only need 32 bits of this multiply a trick can be used, reinterpreting the vector
5334*01826a49SYabin Cui              * as a uint32x4_t and multiplying by { 0, prime, 0, prime } to cancel out the unwanted bits
5335*01826a49SYabin Cui              * and avoid the shift.
5336*01826a49SYabin Cui              */
5337*01826a49SYabin Cui             uint32x4_t prod_hi = vmulq_u32 (vreinterpretq_u32_u64(data_key), kPrimeHi);
5338*01826a49SYabin Cui             /* Extract low bits for vmlal_u32  */
5339*01826a49SYabin Cui             uint32x2_t data_key_lo = vmovn_u64(data_key);
5340*01826a49SYabin Cui             /* xacc[i] = prod_hi + lo(data_key) * XXH_PRIME32_1; */
5341*01826a49SYabin Cui             xacc[i] = vmlal_u32(vreinterpretq_u64_u32(prod_hi), data_key_lo, kPrimeLo);
5342*01826a49SYabin Cui #endif
5343*01826a49SYabin Cui         }
5344*01826a49SYabin Cui     }
5345*01826a49SYabin Cui }
5346*01826a49SYabin Cui #endif
5347*01826a49SYabin Cui 
5348*01826a49SYabin Cui #if (XXH_VECTOR == XXH_VSX)
5349*01826a49SYabin Cui 
5350*01826a49SYabin Cui XXH_FORCE_INLINE void
XXH3_accumulate_512_vsx(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)5351*01826a49SYabin Cui XXH3_accumulate_512_vsx(  void* XXH_RESTRICT acc,
5352*01826a49SYabin Cui                     const void* XXH_RESTRICT input,
5353*01826a49SYabin Cui                     const void* XXH_RESTRICT secret)
5354*01826a49SYabin Cui {
5355*01826a49SYabin Cui     /* presumed aligned */
5356*01826a49SYabin Cui     xxh_aliasing_u64x2* const xacc = (xxh_aliasing_u64x2*) acc;
5357*01826a49SYabin Cui     xxh_u8 const* const xinput   = (xxh_u8 const*) input;   /* no alignment restriction */
5358*01826a49SYabin Cui     xxh_u8 const* const xsecret  = (xxh_u8 const*) secret;    /* no alignment restriction */
5359*01826a49SYabin Cui     xxh_u64x2 const v32 = { 32, 32 };
5360*01826a49SYabin Cui     size_t i;
5361*01826a49SYabin Cui     for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
5362*01826a49SYabin Cui         /* data_vec = xinput[i]; */
5363*01826a49SYabin Cui         xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + 16*i);
5364*01826a49SYabin Cui         /* key_vec = xsecret[i]; */
5365*01826a49SYabin Cui         xxh_u64x2 const key_vec  = XXH_vec_loadu(xsecret + 16*i);
5366*01826a49SYabin Cui         xxh_u64x2 const data_key = data_vec ^ key_vec;
5367*01826a49SYabin Cui         /* shuffled = (data_key << 32) | (data_key >> 32); */
5368*01826a49SYabin Cui         xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32);
5369*01826a49SYabin Cui         /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & 0xFFFFFFFF); */
5370*01826a49SYabin Cui         xxh_u64x2 const product  = XXH_vec_mulo((xxh_u32x4)data_key, shuffled);
5371*01826a49SYabin Cui         /* acc_vec = xacc[i]; */
5372*01826a49SYabin Cui         xxh_u64x2 acc_vec        = xacc[i];
5373*01826a49SYabin Cui         acc_vec += product;
5374*01826a49SYabin Cui 
5375*01826a49SYabin Cui         /* swap high and low halves */
5376*01826a49SYabin Cui #ifdef __s390x__
5377*01826a49SYabin Cui         acc_vec += vec_permi(data_vec, data_vec, 2);
5378*01826a49SYabin Cui #else
5379*01826a49SYabin Cui         acc_vec += vec_xxpermdi(data_vec, data_vec, 2);
5380*01826a49SYabin Cui #endif
5381*01826a49SYabin Cui         xacc[i] = acc_vec;
5382*01826a49SYabin Cui     }
5383*01826a49SYabin Cui }
XXH3_ACCUMULATE_TEMPLATE(vsx)5384*01826a49SYabin Cui XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(vsx)
5385*01826a49SYabin Cui 
5386*01826a49SYabin Cui XXH_FORCE_INLINE void
5387*01826a49SYabin Cui XXH3_scrambleAcc_vsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
5388*01826a49SYabin Cui {
5389*01826a49SYabin Cui     XXH_ASSERT((((size_t)acc) & 15) == 0);
5390*01826a49SYabin Cui 
5391*01826a49SYabin Cui     {   xxh_aliasing_u64x2* const xacc = (xxh_aliasing_u64x2*) acc;
5392*01826a49SYabin Cui         const xxh_u8* const xsecret = (const xxh_u8*) secret;
5393*01826a49SYabin Cui         /* constants */
5394*01826a49SYabin Cui         xxh_u64x2 const v32  = { 32, 32 };
5395*01826a49SYabin Cui         xxh_u64x2 const v47 = { 47, 47 };
5396*01826a49SYabin Cui         xxh_u32x4 const prime = { XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1 };
5397*01826a49SYabin Cui         size_t i;
5398*01826a49SYabin Cui         for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
5399*01826a49SYabin Cui             /* xacc[i] ^= (xacc[i] >> 47); */
5400*01826a49SYabin Cui             xxh_u64x2 const acc_vec  = xacc[i];
5401*01826a49SYabin Cui             xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47);
5402*01826a49SYabin Cui 
5403*01826a49SYabin Cui             /* xacc[i] ^= xsecret[i]; */
5404*01826a49SYabin Cui             xxh_u64x2 const key_vec  = XXH_vec_loadu(xsecret + 16*i);
5405*01826a49SYabin Cui             xxh_u64x2 const data_key = data_vec ^ key_vec;
5406*01826a49SYabin Cui 
5407*01826a49SYabin Cui             /* xacc[i] *= XXH_PRIME32_1 */
5408*01826a49SYabin Cui             /* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime & 0xFFFFFFFF);  */
5409*01826a49SYabin Cui             xxh_u64x2 const prod_even  = XXH_vec_mule((xxh_u32x4)data_key, prime);
5410*01826a49SYabin Cui             /* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32);  */
5411*01826a49SYabin Cui             xxh_u64x2 const prod_odd  = XXH_vec_mulo((xxh_u32x4)data_key, prime);
5412*01826a49SYabin Cui             xacc[i] = prod_odd + (prod_even << v32);
5413*01826a49SYabin Cui     }   }
5414*01826a49SYabin Cui }
5415*01826a49SYabin Cui 
5416*01826a49SYabin Cui #endif
5417*01826a49SYabin Cui 
5418*01826a49SYabin Cui #if (XXH_VECTOR == XXH_SVE)
5419*01826a49SYabin Cui 
5420*01826a49SYabin Cui XXH_FORCE_INLINE void
XXH3_accumulate_512_sve(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)5421*01826a49SYabin Cui XXH3_accumulate_512_sve( void* XXH_RESTRICT acc,
5422*01826a49SYabin Cui                    const void* XXH_RESTRICT input,
5423*01826a49SYabin Cui                    const void* XXH_RESTRICT secret)
5424*01826a49SYabin Cui {
5425*01826a49SYabin Cui     uint64_t *xacc = (uint64_t *)acc;
5426*01826a49SYabin Cui     const uint64_t *xinput = (const uint64_t *)(const void *)input;
5427*01826a49SYabin Cui     const uint64_t *xsecret = (const uint64_t *)(const void *)secret;
5428*01826a49SYabin Cui     svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1);
5429*01826a49SYabin Cui     uint64_t element_count = svcntd();
5430*01826a49SYabin Cui     if (element_count >= 8) {
5431*01826a49SYabin Cui         svbool_t mask = svptrue_pat_b64(SV_VL8);
5432*01826a49SYabin Cui         svuint64_t vacc = svld1_u64(mask, xacc);
5433*01826a49SYabin Cui         ACCRND(vacc, 0);
5434*01826a49SYabin Cui         svst1_u64(mask, xacc, vacc);
5435*01826a49SYabin Cui     } else if (element_count == 2) {   /* sve128 */
5436*01826a49SYabin Cui         svbool_t mask = svptrue_pat_b64(SV_VL2);
5437*01826a49SYabin Cui         svuint64_t acc0 = svld1_u64(mask, xacc + 0);
5438*01826a49SYabin Cui         svuint64_t acc1 = svld1_u64(mask, xacc + 2);
5439*01826a49SYabin Cui         svuint64_t acc2 = svld1_u64(mask, xacc + 4);
5440*01826a49SYabin Cui         svuint64_t acc3 = svld1_u64(mask, xacc + 6);
5441*01826a49SYabin Cui         ACCRND(acc0, 0);
5442*01826a49SYabin Cui         ACCRND(acc1, 2);
5443*01826a49SYabin Cui         ACCRND(acc2, 4);
5444*01826a49SYabin Cui         ACCRND(acc3, 6);
5445*01826a49SYabin Cui         svst1_u64(mask, xacc + 0, acc0);
5446*01826a49SYabin Cui         svst1_u64(mask, xacc + 2, acc1);
5447*01826a49SYabin Cui         svst1_u64(mask, xacc + 4, acc2);
5448*01826a49SYabin Cui         svst1_u64(mask, xacc + 6, acc3);
5449*01826a49SYabin Cui     } else {
5450*01826a49SYabin Cui         svbool_t mask = svptrue_pat_b64(SV_VL4);
5451*01826a49SYabin Cui         svuint64_t acc0 = svld1_u64(mask, xacc + 0);
5452*01826a49SYabin Cui         svuint64_t acc1 = svld1_u64(mask, xacc + 4);
5453*01826a49SYabin Cui         ACCRND(acc0, 0);
5454*01826a49SYabin Cui         ACCRND(acc1, 4);
5455*01826a49SYabin Cui         svst1_u64(mask, xacc + 0, acc0);
5456*01826a49SYabin Cui         svst1_u64(mask, xacc + 4, acc1);
5457*01826a49SYabin Cui     }
5458*01826a49SYabin Cui }
5459*01826a49SYabin Cui 
5460*01826a49SYabin Cui XXH_FORCE_INLINE void
XXH3_accumulate_sve(xxh_u64 * XXH_RESTRICT acc,const xxh_u8 * XXH_RESTRICT input,const xxh_u8 * XXH_RESTRICT secret,size_t nbStripes)5461*01826a49SYabin Cui XXH3_accumulate_sve(xxh_u64* XXH_RESTRICT acc,
5462*01826a49SYabin Cui                const xxh_u8* XXH_RESTRICT input,
5463*01826a49SYabin Cui                const xxh_u8* XXH_RESTRICT secret,
5464*01826a49SYabin Cui                size_t nbStripes)
5465*01826a49SYabin Cui {
5466*01826a49SYabin Cui     if (nbStripes != 0) {
5467*01826a49SYabin Cui         uint64_t *xacc = (uint64_t *)acc;
5468*01826a49SYabin Cui         const uint64_t *xinput = (const uint64_t *)(const void *)input;
5469*01826a49SYabin Cui         const uint64_t *xsecret = (const uint64_t *)(const void *)secret;
5470*01826a49SYabin Cui         svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1);
5471*01826a49SYabin Cui         uint64_t element_count = svcntd();
5472*01826a49SYabin Cui         if (element_count >= 8) {
5473*01826a49SYabin Cui             svbool_t mask = svptrue_pat_b64(SV_VL8);
5474*01826a49SYabin Cui             svuint64_t vacc = svld1_u64(mask, xacc + 0);
5475*01826a49SYabin Cui             do {
5476*01826a49SYabin Cui                 /* svprfd(svbool_t, void *, enum svfprop); */
5477*01826a49SYabin Cui                 svprfd(mask, xinput + 128, SV_PLDL1STRM);
5478*01826a49SYabin Cui                 ACCRND(vacc, 0);
5479*01826a49SYabin Cui                 xinput += 8;
5480*01826a49SYabin Cui                 xsecret += 1;
5481*01826a49SYabin Cui                 nbStripes--;
5482*01826a49SYabin Cui            } while (nbStripes != 0);
5483*01826a49SYabin Cui 
5484*01826a49SYabin Cui            svst1_u64(mask, xacc + 0, vacc);
5485*01826a49SYabin Cui         } else if (element_count == 2) { /* sve128 */
5486*01826a49SYabin Cui             svbool_t mask = svptrue_pat_b64(SV_VL2);
5487*01826a49SYabin Cui             svuint64_t acc0 = svld1_u64(mask, xacc + 0);
5488*01826a49SYabin Cui             svuint64_t acc1 = svld1_u64(mask, xacc + 2);
5489*01826a49SYabin Cui             svuint64_t acc2 = svld1_u64(mask, xacc + 4);
5490*01826a49SYabin Cui             svuint64_t acc3 = svld1_u64(mask, xacc + 6);
5491*01826a49SYabin Cui             do {
5492*01826a49SYabin Cui                 svprfd(mask, xinput + 128, SV_PLDL1STRM);
5493*01826a49SYabin Cui                 ACCRND(acc0, 0);
5494*01826a49SYabin Cui                 ACCRND(acc1, 2);
5495*01826a49SYabin Cui                 ACCRND(acc2, 4);
5496*01826a49SYabin Cui                 ACCRND(acc3, 6);
5497*01826a49SYabin Cui                 xinput += 8;
5498*01826a49SYabin Cui                 xsecret += 1;
5499*01826a49SYabin Cui                 nbStripes--;
5500*01826a49SYabin Cui            } while (nbStripes != 0);
5501*01826a49SYabin Cui 
5502*01826a49SYabin Cui            svst1_u64(mask, xacc + 0, acc0);
5503*01826a49SYabin Cui            svst1_u64(mask, xacc + 2, acc1);
5504*01826a49SYabin Cui            svst1_u64(mask, xacc + 4, acc2);
5505*01826a49SYabin Cui            svst1_u64(mask, xacc + 6, acc3);
5506*01826a49SYabin Cui         } else {
5507*01826a49SYabin Cui             svbool_t mask = svptrue_pat_b64(SV_VL4);
5508*01826a49SYabin Cui             svuint64_t acc0 = svld1_u64(mask, xacc + 0);
5509*01826a49SYabin Cui             svuint64_t acc1 = svld1_u64(mask, xacc + 4);
5510*01826a49SYabin Cui             do {
5511*01826a49SYabin Cui                 svprfd(mask, xinput + 128, SV_PLDL1STRM);
5512*01826a49SYabin Cui                 ACCRND(acc0, 0);
5513*01826a49SYabin Cui                 ACCRND(acc1, 4);
5514*01826a49SYabin Cui                 xinput += 8;
5515*01826a49SYabin Cui                 xsecret += 1;
5516*01826a49SYabin Cui                 nbStripes--;
5517*01826a49SYabin Cui            } while (nbStripes != 0);
5518*01826a49SYabin Cui 
5519*01826a49SYabin Cui            svst1_u64(mask, xacc + 0, acc0);
5520*01826a49SYabin Cui            svst1_u64(mask, xacc + 4, acc1);
5521*01826a49SYabin Cui        }
5522*01826a49SYabin Cui     }
5523*01826a49SYabin Cui }
5524*01826a49SYabin Cui 
5525*01826a49SYabin Cui #endif
5526*01826a49SYabin Cui 
5527*01826a49SYabin Cui /* scalar variants - universal */
5528*01826a49SYabin Cui 
5529*01826a49SYabin Cui #if defined(__aarch64__) && (defined(__GNUC__) || defined(__clang__))
5530*01826a49SYabin Cui /*
5531*01826a49SYabin Cui  * In XXH3_scalarRound(), GCC and Clang have a similar codegen issue, where they
5532*01826a49SYabin Cui  * emit an excess mask and a full 64-bit multiply-add (MADD X-form).
5533*01826a49SYabin Cui  *
5534*01826a49SYabin Cui  * While this might not seem like much, as AArch64 is a 64-bit architecture, only
5535*01826a49SYabin Cui  * big Cortex designs have a full 64-bit multiplier.
5536*01826a49SYabin Cui  *
5537*01826a49SYabin Cui  * On the little cores, the smaller 32-bit multiplier is used, and full 64-bit
5538*01826a49SYabin Cui  * multiplies expand to 2-3 multiplies in microcode. This has a major penalty
5539*01826a49SYabin Cui  * of up to 4 latency cycles and 2 stall cycles in the multiply pipeline.
5540*01826a49SYabin Cui  *
5541*01826a49SYabin Cui  * Thankfully, AArch64 still provides the 32-bit long multiply-add (UMADDL) which does
5542*01826a49SYabin Cui  * not have this penalty and does the mask automatically.
5543*01826a49SYabin Cui  */
5544*01826a49SYabin Cui XXH_FORCE_INLINE xxh_u64
XXH_mult32to64_add64(xxh_u64 lhs,xxh_u64 rhs,xxh_u64 acc)5545*01826a49SYabin Cui XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc)
5546*01826a49SYabin Cui {
5547*01826a49SYabin Cui     xxh_u64 ret;
5548*01826a49SYabin Cui     /* note: %x = 64-bit register, %w = 32-bit register */
5549*01826a49SYabin Cui     __asm__("umaddl %x0, %w1, %w2, %x3" : "=r" (ret) : "r" (lhs), "r" (rhs), "r" (acc));
5550*01826a49SYabin Cui     return ret;
5551*01826a49SYabin Cui }
5552*01826a49SYabin Cui #else
5553*01826a49SYabin Cui XXH_FORCE_INLINE xxh_u64
XXH_mult32to64_add64(xxh_u64 lhs,xxh_u64 rhs,xxh_u64 acc)5554*01826a49SYabin Cui XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc)
5555*01826a49SYabin Cui {
5556*01826a49SYabin Cui     return XXH_mult32to64((xxh_u32)lhs, (xxh_u32)rhs) + acc;
5557*01826a49SYabin Cui }
5558*01826a49SYabin Cui #endif
5559*01826a49SYabin Cui 
5560*01826a49SYabin Cui /*!
5561*01826a49SYabin Cui  * @internal
5562*01826a49SYabin Cui  * @brief Scalar round for @ref XXH3_accumulate_512_scalar().
5563*01826a49SYabin Cui  *
5564*01826a49SYabin Cui  * This is extracted to its own function because the NEON path uses a combination
5565*01826a49SYabin Cui  * of NEON and scalar.
5566*01826a49SYabin Cui  */
5567*01826a49SYabin Cui XXH_FORCE_INLINE void
XXH3_scalarRound(void * XXH_RESTRICT acc,void const * XXH_RESTRICT input,void const * XXH_RESTRICT secret,size_t lane)5568*01826a49SYabin Cui XXH3_scalarRound(void* XXH_RESTRICT acc,
5569*01826a49SYabin Cui                  void const* XXH_RESTRICT input,
5570*01826a49SYabin Cui                  void const* XXH_RESTRICT secret,
5571*01826a49SYabin Cui                  size_t lane)
5572*01826a49SYabin Cui {
5573*01826a49SYabin Cui     xxh_u64* xacc = (xxh_u64*) acc;
5574*01826a49SYabin Cui     xxh_u8 const* xinput  = (xxh_u8 const*) input;
5575*01826a49SYabin Cui     xxh_u8 const* xsecret = (xxh_u8 const*) secret;
5576*01826a49SYabin Cui     XXH_ASSERT(lane < XXH_ACC_NB);
5577*01826a49SYabin Cui     XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN-1)) == 0);
5578*01826a49SYabin Cui     {
5579*01826a49SYabin Cui         xxh_u64 const data_val = XXH_readLE64(xinput + lane * 8);
5580*01826a49SYabin Cui         xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + lane * 8);
5581*01826a49SYabin Cui         xacc[lane ^ 1] += data_val; /* swap adjacent lanes */
5582*01826a49SYabin Cui         xacc[lane] = XXH_mult32to64_add64(data_key /* & 0xFFFFFFFF */, data_key >> 32, xacc[lane]);
5583*01826a49SYabin Cui     }
5584*01826a49SYabin Cui }
5585*01826a49SYabin Cui 
5586*01826a49SYabin Cui /*!
5587*01826a49SYabin Cui  * @internal
5588*01826a49SYabin Cui  * @brief Processes a 64 byte block of data using the scalar path.
5589*01826a49SYabin Cui  */
5590*01826a49SYabin Cui XXH_FORCE_INLINE void
XXH3_accumulate_512_scalar(void * XXH_RESTRICT acc,const void * XXH_RESTRICT input,const void * XXH_RESTRICT secret)5591*01826a49SYabin Cui XXH3_accumulate_512_scalar(void* XXH_RESTRICT acc,
5592*01826a49SYabin Cui                      const void* XXH_RESTRICT input,
5593*01826a49SYabin Cui                      const void* XXH_RESTRICT secret)
5594*01826a49SYabin Cui {
5595*01826a49SYabin Cui     size_t i;
5596*01826a49SYabin Cui     /* ARM GCC refuses to unroll this loop, resulting in a 24% slowdown on ARMv6. */
5597*01826a49SYabin Cui #if defined(__GNUC__) && !defined(__clang__) \
5598*01826a49SYabin Cui   && (defined(__arm__) || defined(__thumb2__)) \
5599*01826a49SYabin Cui   && defined(__ARM_FEATURE_UNALIGNED) /* no unaligned access just wastes bytes */ \
5600*01826a49SYabin Cui   && XXH_SIZE_OPT <= 0
5601*01826a49SYabin Cui #  pragma GCC unroll 8
5602*01826a49SYabin Cui #endif
5603*01826a49SYabin Cui     for (i=0; i < XXH_ACC_NB; i++) {
5604*01826a49SYabin Cui         XXH3_scalarRound(acc, input, secret, i);
5605*01826a49SYabin Cui     }
5606*01826a49SYabin Cui }
XXH3_ACCUMULATE_TEMPLATE(scalar)5607*01826a49SYabin Cui XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(scalar)
5608*01826a49SYabin Cui 
5609*01826a49SYabin Cui /*!
5610*01826a49SYabin Cui  * @internal
5611*01826a49SYabin Cui  * @brief Scalar scramble step for @ref XXH3_scrambleAcc_scalar().
5612*01826a49SYabin Cui  *
5613*01826a49SYabin Cui  * This is extracted to its own function because the NEON path uses a combination
5614*01826a49SYabin Cui  * of NEON and scalar.
5615*01826a49SYabin Cui  */
5616*01826a49SYabin Cui XXH_FORCE_INLINE void
5617*01826a49SYabin Cui XXH3_scalarScrambleRound(void* XXH_RESTRICT acc,
5618*01826a49SYabin Cui                          void const* XXH_RESTRICT secret,
5619*01826a49SYabin Cui                          size_t lane)
5620*01826a49SYabin Cui {
5621*01826a49SYabin Cui     xxh_u64* const xacc = (xxh_u64*) acc;   /* presumed aligned */
5622*01826a49SYabin Cui     const xxh_u8* const xsecret = (const xxh_u8*) secret;   /* no alignment restriction */
5623*01826a49SYabin Cui     XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0);
5624*01826a49SYabin Cui     XXH_ASSERT(lane < XXH_ACC_NB);
5625*01826a49SYabin Cui     {
5626*01826a49SYabin Cui         xxh_u64 const key64 = XXH_readLE64(xsecret + lane * 8);
5627*01826a49SYabin Cui         xxh_u64 acc64 = xacc[lane];
5628*01826a49SYabin Cui         acc64 = XXH_xorshift64(acc64, 47);
5629*01826a49SYabin Cui         acc64 ^= key64;
5630*01826a49SYabin Cui         acc64 *= XXH_PRIME32_1;
5631*01826a49SYabin Cui         xacc[lane] = acc64;
5632*01826a49SYabin Cui     }
5633*01826a49SYabin Cui }
5634*01826a49SYabin Cui 
5635*01826a49SYabin Cui /*!
5636*01826a49SYabin Cui  * @internal
5637*01826a49SYabin Cui  * @brief Scrambles the accumulators after a large chunk has been read
5638*01826a49SYabin Cui  */
5639*01826a49SYabin Cui XXH_FORCE_INLINE void
XXH3_scrambleAcc_scalar(void * XXH_RESTRICT acc,const void * XXH_RESTRICT secret)5640*01826a49SYabin Cui XXH3_scrambleAcc_scalar(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
5641*01826a49SYabin Cui {
5642*01826a49SYabin Cui     size_t i;
5643*01826a49SYabin Cui     for (i=0; i < XXH_ACC_NB; i++) {
5644*01826a49SYabin Cui         XXH3_scalarScrambleRound(acc, secret, i);
5645*01826a49SYabin Cui     }
5646*01826a49SYabin Cui }
5647*01826a49SYabin Cui 
5648*01826a49SYabin Cui XXH_FORCE_INLINE void
XXH3_initCustomSecret_scalar(void * XXH_RESTRICT customSecret,xxh_u64 seed64)5649*01826a49SYabin Cui XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
5650*01826a49SYabin Cui {
5651*01826a49SYabin Cui     /*
5652*01826a49SYabin Cui      * We need a separate pointer for the hack below,
5653*01826a49SYabin Cui      * which requires a non-const pointer.
5654*01826a49SYabin Cui      * Any decent compiler will optimize this out otherwise.
5655*01826a49SYabin Cui      */
5656*01826a49SYabin Cui     const xxh_u8* kSecretPtr = XXH3_kSecret;
5657*01826a49SYabin Cui     XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
5658*01826a49SYabin Cui 
5659*01826a49SYabin Cui #if defined(__GNUC__) && defined(__aarch64__)
5660*01826a49SYabin Cui     /*
5661*01826a49SYabin Cui      * UGLY HACK:
5662*01826a49SYabin Cui      * GCC and Clang generate a bunch of MOV/MOVK pairs for aarch64, and they are
5663*01826a49SYabin Cui      * placed sequentially, in order, at the top of the unrolled loop.
5664*01826a49SYabin Cui      *
5665*01826a49SYabin Cui      * While MOVK is great for generating constants (2 cycles for a 64-bit
5666*01826a49SYabin Cui      * constant compared to 4 cycles for LDR), it fights for bandwidth with
5667*01826a49SYabin Cui      * the arithmetic instructions.
5668*01826a49SYabin Cui      *
5669*01826a49SYabin Cui      *   I   L   S
5670*01826a49SYabin Cui      * MOVK
5671*01826a49SYabin Cui      * MOVK
5672*01826a49SYabin Cui      * MOVK
5673*01826a49SYabin Cui      * MOVK
5674*01826a49SYabin Cui      * ADD
5675*01826a49SYabin Cui      * SUB      STR
5676*01826a49SYabin Cui      *          STR
5677*01826a49SYabin Cui      * By forcing loads from memory (as the asm line causes the compiler to assume
5678*01826a49SYabin Cui      * that XXH3_kSecretPtr has been changed), the pipelines are used more
5679*01826a49SYabin Cui      * efficiently:
5680*01826a49SYabin Cui      *   I   L   S
5681*01826a49SYabin Cui      *      LDR
5682*01826a49SYabin Cui      *  ADD LDR
5683*01826a49SYabin Cui      *  SUB     STR
5684*01826a49SYabin Cui      *          STR
5685*01826a49SYabin Cui      *
5686*01826a49SYabin Cui      * See XXH3_NEON_LANES for details on the pipsline.
5687*01826a49SYabin Cui      *
5688*01826a49SYabin Cui      * XXH3_64bits_withSeed, len == 256, Snapdragon 835
5689*01826a49SYabin Cui      *   without hack: 2654.4 MB/s
5690*01826a49SYabin Cui      *   with hack:    3202.9 MB/s
5691*01826a49SYabin Cui      */
5692*01826a49SYabin Cui     XXH_COMPILER_GUARD(kSecretPtr);
5693*01826a49SYabin Cui #endif
5694*01826a49SYabin Cui     {   int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16;
5695*01826a49SYabin Cui         int i;
5696*01826a49SYabin Cui         for (i=0; i < nbRounds; i++) {
5697*01826a49SYabin Cui             /*
5698*01826a49SYabin Cui              * The asm hack causes the compiler to assume that kSecretPtr aliases with
5699*01826a49SYabin Cui              * customSecret, and on aarch64, this prevented LDP from merging two
5700*01826a49SYabin Cui              * loads together for free. Putting the loads together before the stores
5701*01826a49SYabin Cui              * properly generates LDP.
5702*01826a49SYabin Cui              */
5703*01826a49SYabin Cui             xxh_u64 lo = XXH_readLE64(kSecretPtr + 16*i)     + seed64;
5704*01826a49SYabin Cui             xxh_u64 hi = XXH_readLE64(kSecretPtr + 16*i + 8) - seed64;
5705*01826a49SYabin Cui             XXH_writeLE64((xxh_u8*)customSecret + 16*i,     lo);
5706*01826a49SYabin Cui             XXH_writeLE64((xxh_u8*)customSecret + 16*i + 8, hi);
5707*01826a49SYabin Cui     }   }
5708*01826a49SYabin Cui }
5709*01826a49SYabin Cui 
5710*01826a49SYabin Cui 
5711*01826a49SYabin Cui typedef void (*XXH3_f_accumulate)(xxh_u64* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, size_t);
5712*01826a49SYabin Cui typedef void (*XXH3_f_scrambleAcc)(void* XXH_RESTRICT, const void*);
5713*01826a49SYabin Cui typedef void (*XXH3_f_initCustomSecret)(void* XXH_RESTRICT, xxh_u64);
5714*01826a49SYabin Cui 
5715*01826a49SYabin Cui 
5716*01826a49SYabin Cui #if (XXH_VECTOR == XXH_AVX512)
5717*01826a49SYabin Cui 
5718*01826a49SYabin Cui #define XXH3_accumulate_512 XXH3_accumulate_512_avx512
5719*01826a49SYabin Cui #define XXH3_accumulate     XXH3_accumulate_avx512
5720*01826a49SYabin Cui #define XXH3_scrambleAcc    XXH3_scrambleAcc_avx512
5721*01826a49SYabin Cui #define XXH3_initCustomSecret XXH3_initCustomSecret_avx512
5722*01826a49SYabin Cui 
5723*01826a49SYabin Cui #elif (XXH_VECTOR == XXH_AVX2)
5724*01826a49SYabin Cui 
5725*01826a49SYabin Cui #define XXH3_accumulate_512 XXH3_accumulate_512_avx2
5726*01826a49SYabin Cui #define XXH3_accumulate     XXH3_accumulate_avx2
5727*01826a49SYabin Cui #define XXH3_scrambleAcc    XXH3_scrambleAcc_avx2
5728*01826a49SYabin Cui #define XXH3_initCustomSecret XXH3_initCustomSecret_avx2
5729*01826a49SYabin Cui 
5730*01826a49SYabin Cui #elif (XXH_VECTOR == XXH_SSE2)
5731*01826a49SYabin Cui 
5732*01826a49SYabin Cui #define XXH3_accumulate_512 XXH3_accumulate_512_sse2
5733*01826a49SYabin Cui #define XXH3_accumulate     XXH3_accumulate_sse2
5734*01826a49SYabin Cui #define XXH3_scrambleAcc    XXH3_scrambleAcc_sse2
5735*01826a49SYabin Cui #define XXH3_initCustomSecret XXH3_initCustomSecret_sse2
5736*01826a49SYabin Cui 
5737*01826a49SYabin Cui #elif (XXH_VECTOR == XXH_NEON)
5738*01826a49SYabin Cui 
5739*01826a49SYabin Cui #define XXH3_accumulate_512 XXH3_accumulate_512_neon
5740*01826a49SYabin Cui #define XXH3_accumulate     XXH3_accumulate_neon
5741*01826a49SYabin Cui #define XXH3_scrambleAcc    XXH3_scrambleAcc_neon
5742*01826a49SYabin Cui #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
5743*01826a49SYabin Cui 
5744*01826a49SYabin Cui #elif (XXH_VECTOR == XXH_VSX)
5745*01826a49SYabin Cui 
5746*01826a49SYabin Cui #define XXH3_accumulate_512 XXH3_accumulate_512_vsx
5747*01826a49SYabin Cui #define XXH3_accumulate     XXH3_accumulate_vsx
5748*01826a49SYabin Cui #define XXH3_scrambleAcc    XXH3_scrambleAcc_vsx
5749*01826a49SYabin Cui #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
5750*01826a49SYabin Cui 
5751*01826a49SYabin Cui #elif (XXH_VECTOR == XXH_SVE)
5752*01826a49SYabin Cui #define XXH3_accumulate_512 XXH3_accumulate_512_sve
5753*01826a49SYabin Cui #define XXH3_accumulate     XXH3_accumulate_sve
5754*01826a49SYabin Cui #define XXH3_scrambleAcc    XXH3_scrambleAcc_scalar
5755*01826a49SYabin Cui #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
5756*01826a49SYabin Cui 
5757*01826a49SYabin Cui #else /* scalar */
5758*01826a49SYabin Cui 
5759*01826a49SYabin Cui #define XXH3_accumulate_512 XXH3_accumulate_512_scalar
5760*01826a49SYabin Cui #define XXH3_accumulate     XXH3_accumulate_scalar
5761*01826a49SYabin Cui #define XXH3_scrambleAcc    XXH3_scrambleAcc_scalar
5762*01826a49SYabin Cui #define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
5763*01826a49SYabin Cui 
5764*01826a49SYabin Cui #endif
5765*01826a49SYabin Cui 
5766*01826a49SYabin Cui #if XXH_SIZE_OPT >= 1 /* don't do SIMD for initialization */
5767*01826a49SYabin Cui #  undef XXH3_initCustomSecret
5768*01826a49SYabin Cui #  define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
5769*01826a49SYabin Cui #endif
5770*01826a49SYabin Cui 
5771*01826a49SYabin Cui XXH_FORCE_INLINE void
XXH3_hashLong_internal_loop(xxh_u64 * XXH_RESTRICT acc,const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH3_f_accumulate f_acc,XXH3_f_scrambleAcc f_scramble)5772*01826a49SYabin Cui XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc,
5773*01826a49SYabin Cui                       const xxh_u8* XXH_RESTRICT input, size_t len,
5774*01826a49SYabin Cui                       const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
5775*01826a49SYabin Cui                             XXH3_f_accumulate f_acc,
5776*01826a49SYabin Cui                             XXH3_f_scrambleAcc f_scramble)
5777*01826a49SYabin Cui {
5778*01826a49SYabin Cui     size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE;
5779*01826a49SYabin Cui     size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock;
5780*01826a49SYabin Cui     size_t const nb_blocks = (len - 1) / block_len;
5781*01826a49SYabin Cui 
5782*01826a49SYabin Cui     size_t n;
5783*01826a49SYabin Cui 
5784*01826a49SYabin Cui     XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
5785*01826a49SYabin Cui 
5786*01826a49SYabin Cui     for (n = 0; n < nb_blocks; n++) {
5787*01826a49SYabin Cui         f_acc(acc, input + n*block_len, secret, nbStripesPerBlock);
5788*01826a49SYabin Cui         f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN);
5789*01826a49SYabin Cui     }
5790*01826a49SYabin Cui 
5791*01826a49SYabin Cui     /* last partial block */
5792*01826a49SYabin Cui     XXH_ASSERT(len > XXH_STRIPE_LEN);
5793*01826a49SYabin Cui     {   size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN;
5794*01826a49SYabin Cui         XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE));
5795*01826a49SYabin Cui         f_acc(acc, input + nb_blocks*block_len, secret, nbStripes);
5796*01826a49SYabin Cui 
5797*01826a49SYabin Cui         /* last stripe */
5798*01826a49SYabin Cui         {   const xxh_u8* const p = input + len - XXH_STRIPE_LEN;
5799*01826a49SYabin Cui #define XXH_SECRET_LASTACC_START 7  /* not aligned on 8, last secret is different from acc & scrambler */
5800*01826a49SYabin Cui             XXH3_accumulate_512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START);
5801*01826a49SYabin Cui     }   }
5802*01826a49SYabin Cui }
5803*01826a49SYabin Cui 
5804*01826a49SYabin Cui XXH_FORCE_INLINE xxh_u64
XXH3_mix2Accs(const xxh_u64 * XXH_RESTRICT acc,const xxh_u8 * XXH_RESTRICT secret)5805*01826a49SYabin Cui XXH3_mix2Accs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret)
5806*01826a49SYabin Cui {
5807*01826a49SYabin Cui     return XXH3_mul128_fold64(
5808*01826a49SYabin Cui                acc[0] ^ XXH_readLE64(secret),
5809*01826a49SYabin Cui                acc[1] ^ XXH_readLE64(secret+8) );
5810*01826a49SYabin Cui }
5811*01826a49SYabin Cui 
5812*01826a49SYabin Cui static XXH64_hash_t
XXH3_mergeAccs(const xxh_u64 * XXH_RESTRICT acc,const xxh_u8 * XXH_RESTRICT secret,xxh_u64 start)5813*01826a49SYabin Cui XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 start)
5814*01826a49SYabin Cui {
5815*01826a49SYabin Cui     xxh_u64 result64 = start;
5816*01826a49SYabin Cui     size_t i = 0;
5817*01826a49SYabin Cui 
5818*01826a49SYabin Cui     for (i = 0; i < 4; i++) {
5819*01826a49SYabin Cui         result64 += XXH3_mix2Accs(acc+2*i, secret + 16*i);
5820*01826a49SYabin Cui #if defined(__clang__)                                /* Clang */ \
5821*01826a49SYabin Cui     && (defined(__arm__) || defined(__thumb__))       /* ARMv7 */ \
5822*01826a49SYabin Cui     && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */  \
5823*01826a49SYabin Cui     && !defined(XXH_ENABLE_AUTOVECTORIZE)             /* Define to disable */
5824*01826a49SYabin Cui         /*
5825*01826a49SYabin Cui          * UGLY HACK:
5826*01826a49SYabin Cui          * Prevent autovectorization on Clang ARMv7-a. Exact same problem as
5827*01826a49SYabin Cui          * the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b.
5828*01826a49SYabin Cui          * XXH3_64bits, len == 256, Snapdragon 835:
5829*01826a49SYabin Cui          *   without hack: 2063.7 MB/s
5830*01826a49SYabin Cui          *   with hack:    2560.7 MB/s
5831*01826a49SYabin Cui          */
5832*01826a49SYabin Cui         XXH_COMPILER_GUARD(result64);
5833*01826a49SYabin Cui #endif
5834*01826a49SYabin Cui     }
5835*01826a49SYabin Cui 
5836*01826a49SYabin Cui     return XXH3_avalanche(result64);
5837*01826a49SYabin Cui }
5838*01826a49SYabin Cui 
5839*01826a49SYabin Cui #define XXH3_INIT_ACC { XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \
5840*01826a49SYabin Cui                         XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 }
5841*01826a49SYabin Cui 
5842*01826a49SYabin Cui XXH_FORCE_INLINE XXH64_hash_t
XXH3_hashLong_64b_internal(const void * XXH_RESTRICT input,size_t len,const void * XXH_RESTRICT secret,size_t secretSize,XXH3_f_accumulate f_acc,XXH3_f_scrambleAcc f_scramble)5843*01826a49SYabin Cui XXH3_hashLong_64b_internal(const void* XXH_RESTRICT input, size_t len,
5844*01826a49SYabin Cui                            const void* XXH_RESTRICT secret, size_t secretSize,
5845*01826a49SYabin Cui                            XXH3_f_accumulate f_acc,
5846*01826a49SYabin Cui                            XXH3_f_scrambleAcc f_scramble)
5847*01826a49SYabin Cui {
5848*01826a49SYabin Cui     XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
5849*01826a49SYabin Cui 
5850*01826a49SYabin Cui     XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, f_acc, f_scramble);
5851*01826a49SYabin Cui 
5852*01826a49SYabin Cui     /* converge into final hash */
5853*01826a49SYabin Cui     XXH_STATIC_ASSERT(sizeof(acc) == 64);
5854*01826a49SYabin Cui     /* do not align on 8, so that the secret is different from the accumulator */
5855*01826a49SYabin Cui #define XXH_SECRET_MERGEACCS_START 11
5856*01826a49SYabin Cui     XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
5857*01826a49SYabin Cui     return XXH3_mergeAccs(acc, (const xxh_u8*)secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * XXH_PRIME64_1);
5858*01826a49SYabin Cui }
5859*01826a49SYabin Cui 
5860*01826a49SYabin Cui /*
5861*01826a49SYabin Cui  * It's important for performance to transmit secret's size (when it's static)
5862*01826a49SYabin Cui  * so that the compiler can properly optimize the vectorized loop.
5863*01826a49SYabin Cui  * This makes a big performance difference for "medium" keys (<1 KB) when using AVX instruction set.
5864*01826a49SYabin Cui  * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and FORCE_INLINE
5865*01826a49SYabin Cui  * breaks -Og, this is XXH_NO_INLINE.
5866*01826a49SYabin Cui  */
5867*01826a49SYabin Cui XXH3_WITH_SECRET_INLINE XXH64_hash_t
XXH3_hashLong_64b_withSecret(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const xxh_u8 * XXH_RESTRICT secret,size_t secretLen)5868*01826a49SYabin Cui XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len,
5869*01826a49SYabin Cui                              XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
5870*01826a49SYabin Cui {
5871*01826a49SYabin Cui     (void)seed64;
5872*01826a49SYabin Cui     return XXH3_hashLong_64b_internal(input, len, secret, secretLen, XXH3_accumulate, XXH3_scrambleAcc);
5873*01826a49SYabin Cui }
5874*01826a49SYabin Cui 
5875*01826a49SYabin Cui /*
5876*01826a49SYabin Cui  * It's preferable for performance that XXH3_hashLong is not inlined,
5877*01826a49SYabin Cui  * as it results in a smaller function for small data, easier to the instruction cache.
5878*01826a49SYabin Cui  * Note that inside this no_inline function, we do inline the internal loop,
5879*01826a49SYabin Cui  * and provide a statically defined secret size to allow optimization of vector loop.
5880*01826a49SYabin Cui  */
5881*01826a49SYabin Cui XXH_NO_INLINE XXH_PUREF XXH64_hash_t
XXH3_hashLong_64b_default(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const xxh_u8 * XXH_RESTRICT secret,size_t secretLen)5882*01826a49SYabin Cui XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len,
5883*01826a49SYabin Cui                           XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
5884*01826a49SYabin Cui {
5885*01826a49SYabin Cui     (void)seed64; (void)secret; (void)secretLen;
5886*01826a49SYabin Cui     return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_accumulate, XXH3_scrambleAcc);
5887*01826a49SYabin Cui }
5888*01826a49SYabin Cui 
5889*01826a49SYabin Cui /*
5890*01826a49SYabin Cui  * XXH3_hashLong_64b_withSeed():
5891*01826a49SYabin Cui  * Generate a custom key based on alteration of default XXH3_kSecret with the seed,
5892*01826a49SYabin Cui  * and then use this key for long mode hashing.
5893*01826a49SYabin Cui  *
5894*01826a49SYabin Cui  * This operation is decently fast but nonetheless costs a little bit of time.
5895*01826a49SYabin Cui  * Try to avoid it whenever possible (typically when seed==0).
5896*01826a49SYabin Cui  *
5897*01826a49SYabin Cui  * It's important for performance that XXH3_hashLong is not inlined. Not sure
5898*01826a49SYabin Cui  * why (uop cache maybe?), but the difference is large and easily measurable.
5899*01826a49SYabin Cui  */
5900*01826a49SYabin Cui XXH_FORCE_INLINE XXH64_hash_t
XXH3_hashLong_64b_withSeed_internal(const void * input,size_t len,XXH64_hash_t seed,XXH3_f_accumulate f_acc,XXH3_f_scrambleAcc f_scramble,XXH3_f_initCustomSecret f_initSec)5901*01826a49SYabin Cui XXH3_hashLong_64b_withSeed_internal(const void* input, size_t len,
5902*01826a49SYabin Cui                                     XXH64_hash_t seed,
5903*01826a49SYabin Cui                                     XXH3_f_accumulate f_acc,
5904*01826a49SYabin Cui                                     XXH3_f_scrambleAcc f_scramble,
5905*01826a49SYabin Cui                                     XXH3_f_initCustomSecret f_initSec)
5906*01826a49SYabin Cui {
5907*01826a49SYabin Cui #if XXH_SIZE_OPT <= 0
5908*01826a49SYabin Cui     if (seed == 0)
5909*01826a49SYabin Cui         return XXH3_hashLong_64b_internal(input, len,
5910*01826a49SYabin Cui                                           XXH3_kSecret, sizeof(XXH3_kSecret),
5911*01826a49SYabin Cui                                           f_acc, f_scramble);
5912*01826a49SYabin Cui #endif
5913*01826a49SYabin Cui     {   XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
5914*01826a49SYabin Cui         f_initSec(secret, seed);
5915*01826a49SYabin Cui         return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret),
5916*01826a49SYabin Cui                                           f_acc, f_scramble);
5917*01826a49SYabin Cui     }
5918*01826a49SYabin Cui }
5919*01826a49SYabin Cui 
5920*01826a49SYabin Cui /*
5921*01826a49SYabin Cui  * It's important for performance that XXH3_hashLong is not inlined.
5922*01826a49SYabin Cui  */
5923*01826a49SYabin Cui XXH_NO_INLINE XXH64_hash_t
XXH3_hashLong_64b_withSeed(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed,const xxh_u8 * XXH_RESTRICT secret,size_t secretLen)5924*01826a49SYabin Cui XXH3_hashLong_64b_withSeed(const void* XXH_RESTRICT input, size_t len,
5925*01826a49SYabin Cui                            XXH64_hash_t seed, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
5926*01826a49SYabin Cui {
5927*01826a49SYabin Cui     (void)secret; (void)secretLen;
5928*01826a49SYabin Cui     return XXH3_hashLong_64b_withSeed_internal(input, len, seed,
5929*01826a49SYabin Cui                 XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret);
5930*01826a49SYabin Cui }
5931*01826a49SYabin Cui 
5932*01826a49SYabin Cui 
5933*01826a49SYabin Cui typedef XXH64_hash_t (*XXH3_hashLong64_f)(const void* XXH_RESTRICT, size_t,
5934*01826a49SYabin Cui                                           XXH64_hash_t, const xxh_u8* XXH_RESTRICT, size_t);
5935*01826a49SYabin Cui 
5936*01826a49SYabin Cui XXH_FORCE_INLINE XXH64_hash_t
XXH3_64bits_internal(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen,XXH3_hashLong64_f f_hashLong)5937*01826a49SYabin Cui XXH3_64bits_internal(const void* XXH_RESTRICT input, size_t len,
5938*01826a49SYabin Cui                      XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
5939*01826a49SYabin Cui                      XXH3_hashLong64_f f_hashLong)
5940*01826a49SYabin Cui {
5941*01826a49SYabin Cui     XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
5942*01826a49SYabin Cui     /*
5943*01826a49SYabin Cui      * If an action is to be taken if `secretLen` condition is not respected,
5944*01826a49SYabin Cui      * it should be done here.
5945*01826a49SYabin Cui      * For now, it's a contract pre-condition.
5946*01826a49SYabin Cui      * Adding a check and a branch here would cost performance at every hash.
5947*01826a49SYabin Cui      * Also, note that function signature doesn't offer room to return an error.
5948*01826a49SYabin Cui      */
5949*01826a49SYabin Cui     if (len <= 16)
5950*01826a49SYabin Cui         return XXH3_len_0to16_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
5951*01826a49SYabin Cui     if (len <= 128)
5952*01826a49SYabin Cui         return XXH3_len_17to128_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
5953*01826a49SYabin Cui     if (len <= XXH3_MIDSIZE_MAX)
5954*01826a49SYabin Cui         return XXH3_len_129to240_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
5955*01826a49SYabin Cui     return f_hashLong(input, len, seed64, (const xxh_u8*)secret, secretLen);
5956*01826a49SYabin Cui }
5957*01826a49SYabin Cui 
5958*01826a49SYabin Cui 
5959*01826a49SYabin Cui /* ===   Public entry point   === */
5960*01826a49SYabin Cui 
5961*01826a49SYabin Cui /*! @ingroup XXH3_family */
XXH3_64bits(XXH_NOESCAPE const void * input,size_t length)5962*01826a49SYabin Cui XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length)
5963*01826a49SYabin Cui {
5964*01826a49SYabin Cui     return XXH3_64bits_internal(input, length, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_default);
5965*01826a49SYabin Cui }
5966*01826a49SYabin Cui 
5967*01826a49SYabin Cui /*! @ingroup XXH3_family */
5968*01826a49SYabin Cui XXH_PUBLIC_API XXH64_hash_t
XXH3_64bits_withSecret(XXH_NOESCAPE const void * input,size_t length,XXH_NOESCAPE const void * secret,size_t secretSize)5969*01826a49SYabin Cui XXH3_64bits_withSecret(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize)
5970*01826a49SYabin Cui {
5971*01826a49SYabin Cui     return XXH3_64bits_internal(input, length, 0, secret, secretSize, XXH3_hashLong_64b_withSecret);
5972*01826a49SYabin Cui }
5973*01826a49SYabin Cui 
5974*01826a49SYabin Cui /*! @ingroup XXH3_family */
5975*01826a49SYabin Cui XXH_PUBLIC_API XXH64_hash_t
XXH3_64bits_withSeed(XXH_NOESCAPE const void * input,size_t length,XXH64_hash_t seed)5976*01826a49SYabin Cui XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed)
5977*01826a49SYabin Cui {
5978*01826a49SYabin Cui     return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed);
5979*01826a49SYabin Cui }
5980*01826a49SYabin Cui 
5981*01826a49SYabin Cui XXH_PUBLIC_API XXH64_hash_t
XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void * input,size_t length,XXH_NOESCAPE const void * secret,size_t secretSize,XXH64_hash_t seed)5982*01826a49SYabin Cui XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed)
5983*01826a49SYabin Cui {
5984*01826a49SYabin Cui     if (length <= XXH3_MIDSIZE_MAX)
5985*01826a49SYabin Cui         return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL);
5986*01826a49SYabin Cui     return XXH3_hashLong_64b_withSecret(input, length, seed, (const xxh_u8*)secret, secretSize);
5987*01826a49SYabin Cui }
5988*01826a49SYabin Cui 
5989*01826a49SYabin Cui 
5990*01826a49SYabin Cui /* ===   XXH3 streaming   === */
5991*01826a49SYabin Cui #ifndef XXH_NO_STREAM
5992*01826a49SYabin Cui /*
5993*01826a49SYabin Cui  * Malloc's a pointer that is always aligned to align.
5994*01826a49SYabin Cui  *
5995*01826a49SYabin Cui  * This must be freed with `XXH_alignedFree()`.
5996*01826a49SYabin Cui  *
5997*01826a49SYabin Cui  * malloc typically guarantees 16 byte alignment on 64-bit systems and 8 byte
5998*01826a49SYabin Cui  * alignment on 32-bit. This isn't enough for the 32 byte aligned loads in AVX2
5999*01826a49SYabin Cui  * or on 32-bit, the 16 byte aligned loads in SSE2 and NEON.
6000*01826a49SYabin Cui  *
6001*01826a49SYabin Cui  * This underalignment previously caused a rather obvious crash which went
6002*01826a49SYabin Cui  * completely unnoticed due to XXH3_createState() not actually being tested.
6003*01826a49SYabin Cui  * Credit to RedSpah for noticing this bug.
6004*01826a49SYabin Cui  *
6005*01826a49SYabin Cui  * The alignment is done manually: Functions like posix_memalign or _mm_malloc
6006*01826a49SYabin Cui  * are avoided: To maintain portability, we would have to write a fallback
6007*01826a49SYabin Cui  * like this anyways, and besides, testing for the existence of library
6008*01826a49SYabin Cui  * functions without relying on external build tools is impossible.
6009*01826a49SYabin Cui  *
6010*01826a49SYabin Cui  * The method is simple: Overallocate, manually align, and store the offset
6011*01826a49SYabin Cui  * to the original behind the returned pointer.
6012*01826a49SYabin Cui  *
6013*01826a49SYabin Cui  * Align must be a power of 2 and 8 <= align <= 128.
6014*01826a49SYabin Cui  */
XXH_alignedMalloc(size_t s,size_t align)6015*01826a49SYabin Cui static XXH_MALLOCF void* XXH_alignedMalloc(size_t s, size_t align)
6016*01826a49SYabin Cui {
6017*01826a49SYabin Cui     XXH_ASSERT(align <= 128 && align >= 8); /* range check */
6018*01826a49SYabin Cui     XXH_ASSERT((align & (align-1)) == 0);   /* power of 2 */
6019*01826a49SYabin Cui     XXH_ASSERT(s != 0 && s < (s + align));  /* empty/overflow */
6020*01826a49SYabin Cui     {   /* Overallocate to make room for manual realignment and an offset byte */
6021*01826a49SYabin Cui         xxh_u8* base = (xxh_u8*)XXH_malloc(s + align);
6022*01826a49SYabin Cui         if (base != NULL) {
6023*01826a49SYabin Cui             /*
6024*01826a49SYabin Cui              * Get the offset needed to align this pointer.
6025*01826a49SYabin Cui              *
6026*01826a49SYabin Cui              * Even if the returned pointer is aligned, there will always be
6027*01826a49SYabin Cui              * at least one byte to store the offset to the original pointer.
6028*01826a49SYabin Cui              */
6029*01826a49SYabin Cui             size_t offset = align - ((size_t)base & (align - 1)); /* base % align */
6030*01826a49SYabin Cui             /* Add the offset for the now-aligned pointer */
6031*01826a49SYabin Cui             xxh_u8* ptr = base + offset;
6032*01826a49SYabin Cui 
6033*01826a49SYabin Cui             XXH_ASSERT((size_t)ptr % align == 0);
6034*01826a49SYabin Cui 
6035*01826a49SYabin Cui             /* Store the offset immediately before the returned pointer. */
6036*01826a49SYabin Cui             ptr[-1] = (xxh_u8)offset;
6037*01826a49SYabin Cui             return ptr;
6038*01826a49SYabin Cui         }
6039*01826a49SYabin Cui         return NULL;
6040*01826a49SYabin Cui     }
6041*01826a49SYabin Cui }
6042*01826a49SYabin Cui /*
6043*01826a49SYabin Cui  * Frees an aligned pointer allocated by XXH_alignedMalloc(). Don't pass
6044*01826a49SYabin Cui  * normal malloc'd pointers, XXH_alignedMalloc has a specific data layout.
6045*01826a49SYabin Cui  */
XXH_alignedFree(void * p)6046*01826a49SYabin Cui static void XXH_alignedFree(void* p)
6047*01826a49SYabin Cui {
6048*01826a49SYabin Cui     if (p != NULL) {
6049*01826a49SYabin Cui         xxh_u8* ptr = (xxh_u8*)p;
6050*01826a49SYabin Cui         /* Get the offset byte we added in XXH_malloc. */
6051*01826a49SYabin Cui         xxh_u8 offset = ptr[-1];
6052*01826a49SYabin Cui         /* Free the original malloc'd pointer */
6053*01826a49SYabin Cui         xxh_u8* base = ptr - offset;
6054*01826a49SYabin Cui         XXH_free(base);
6055*01826a49SYabin Cui     }
6056*01826a49SYabin Cui }
6057*01826a49SYabin Cui /*! @ingroup XXH3_family */
6058*01826a49SYabin Cui /*!
6059*01826a49SYabin Cui  * @brief Allocate an @ref XXH3_state_t.
6060*01826a49SYabin Cui  *
6061*01826a49SYabin Cui  * @return An allocated pointer of @ref XXH3_state_t on success.
6062*01826a49SYabin Cui  * @return `NULL` on failure.
6063*01826a49SYabin Cui  *
6064*01826a49SYabin Cui  * @note Must be freed with XXH3_freeState().
6065*01826a49SYabin Cui  */
XXH3_createState(void)6066*01826a49SYabin Cui XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void)
6067*01826a49SYabin Cui {
6068*01826a49SYabin Cui     XXH3_state_t* const state = (XXH3_state_t*)XXH_alignedMalloc(sizeof(XXH3_state_t), 64);
6069*01826a49SYabin Cui     if (state==NULL) return NULL;
6070*01826a49SYabin Cui     XXH3_INITSTATE(state);
6071*01826a49SYabin Cui     return state;
6072*01826a49SYabin Cui }
6073*01826a49SYabin Cui 
6074*01826a49SYabin Cui /*! @ingroup XXH3_family */
6075*01826a49SYabin Cui /*!
6076*01826a49SYabin Cui  * @brief Frees an @ref XXH3_state_t.
6077*01826a49SYabin Cui  *
6078*01826a49SYabin Cui  * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref XXH3_createState().
6079*01826a49SYabin Cui  *
6080*01826a49SYabin Cui  * @return @ref XXH_OK.
6081*01826a49SYabin Cui  *
6082*01826a49SYabin Cui  * @note Must be allocated with XXH3_createState().
6083*01826a49SYabin Cui  */
XXH3_freeState(XXH3_state_t * statePtr)6084*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr)
6085*01826a49SYabin Cui {
6086*01826a49SYabin Cui     XXH_alignedFree(statePtr);
6087*01826a49SYabin Cui     return XXH_OK;
6088*01826a49SYabin Cui }
6089*01826a49SYabin Cui 
6090*01826a49SYabin Cui /*! @ingroup XXH3_family */
6091*01826a49SYabin Cui XXH_PUBLIC_API void
XXH3_copyState(XXH_NOESCAPE XXH3_state_t * dst_state,XXH_NOESCAPE const XXH3_state_t * src_state)6092*01826a49SYabin Cui XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state)
6093*01826a49SYabin Cui {
6094*01826a49SYabin Cui     XXH_memcpy(dst_state, src_state, sizeof(*dst_state));
6095*01826a49SYabin Cui }
6096*01826a49SYabin Cui 
6097*01826a49SYabin Cui static void
XXH3_reset_internal(XXH3_state_t * statePtr,XXH64_hash_t seed,const void * secret,size_t secretSize)6098*01826a49SYabin Cui XXH3_reset_internal(XXH3_state_t* statePtr,
6099*01826a49SYabin Cui                     XXH64_hash_t seed,
6100*01826a49SYabin Cui                     const void* secret, size_t secretSize)
6101*01826a49SYabin Cui {
6102*01826a49SYabin Cui     size_t const initStart = offsetof(XXH3_state_t, bufferedSize);
6103*01826a49SYabin Cui     size_t const initLength = offsetof(XXH3_state_t, nbStripesPerBlock) - initStart;
6104*01826a49SYabin Cui     XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart);
6105*01826a49SYabin Cui     XXH_ASSERT(statePtr != NULL);
6106*01826a49SYabin Cui     /* set members from bufferedSize to nbStripesPerBlock (excluded) to 0 */
6107*01826a49SYabin Cui     memset((char*)statePtr + initStart, 0, initLength);
6108*01826a49SYabin Cui     statePtr->acc[0] = XXH_PRIME32_3;
6109*01826a49SYabin Cui     statePtr->acc[1] = XXH_PRIME64_1;
6110*01826a49SYabin Cui     statePtr->acc[2] = XXH_PRIME64_2;
6111*01826a49SYabin Cui     statePtr->acc[3] = XXH_PRIME64_3;
6112*01826a49SYabin Cui     statePtr->acc[4] = XXH_PRIME64_4;
6113*01826a49SYabin Cui     statePtr->acc[5] = XXH_PRIME32_2;
6114*01826a49SYabin Cui     statePtr->acc[6] = XXH_PRIME64_5;
6115*01826a49SYabin Cui     statePtr->acc[7] = XXH_PRIME32_1;
6116*01826a49SYabin Cui     statePtr->seed = seed;
6117*01826a49SYabin Cui     statePtr->useSeed = (seed != 0);
6118*01826a49SYabin Cui     statePtr->extSecret = (const unsigned char*)secret;
6119*01826a49SYabin Cui     XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
6120*01826a49SYabin Cui     statePtr->secretLimit = secretSize - XXH_STRIPE_LEN;
6121*01826a49SYabin Cui     statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE;
6122*01826a49SYabin Cui }
6123*01826a49SYabin Cui 
6124*01826a49SYabin Cui /*! @ingroup XXH3_family */
6125*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t * statePtr)6126*01826a49SYabin Cui XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr)
6127*01826a49SYabin Cui {
6128*01826a49SYabin Cui     if (statePtr == NULL) return XXH_ERROR;
6129*01826a49SYabin Cui     XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE);
6130*01826a49SYabin Cui     return XXH_OK;
6131*01826a49SYabin Cui }
6132*01826a49SYabin Cui 
6133*01826a49SYabin Cui /*! @ingroup XXH3_family */
6134*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t * statePtr,XXH_NOESCAPE const void * secret,size_t secretSize)6135*01826a49SYabin Cui XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize)
6136*01826a49SYabin Cui {
6137*01826a49SYabin Cui     if (statePtr == NULL) return XXH_ERROR;
6138*01826a49SYabin Cui     XXH3_reset_internal(statePtr, 0, secret, secretSize);
6139*01826a49SYabin Cui     if (secret == NULL) return XXH_ERROR;
6140*01826a49SYabin Cui     if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
6141*01826a49SYabin Cui     return XXH_OK;
6142*01826a49SYabin Cui }
6143*01826a49SYabin Cui 
6144*01826a49SYabin Cui /*! @ingroup XXH3_family */
6145*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t * statePtr,XXH64_hash_t seed)6146*01826a49SYabin Cui XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed)
6147*01826a49SYabin Cui {
6148*01826a49SYabin Cui     if (statePtr == NULL) return XXH_ERROR;
6149*01826a49SYabin Cui     if (seed==0) return XXH3_64bits_reset(statePtr);
6150*01826a49SYabin Cui     if ((seed != statePtr->seed) || (statePtr->extSecret != NULL))
6151*01826a49SYabin Cui         XXH3_initCustomSecret(statePtr->customSecret, seed);
6152*01826a49SYabin Cui     XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE);
6153*01826a49SYabin Cui     return XXH_OK;
6154*01826a49SYabin Cui }
6155*01826a49SYabin Cui 
6156*01826a49SYabin Cui /*! @ingroup XXH3_family */
6157*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t * statePtr,XXH_NOESCAPE const void * secret,size_t secretSize,XXH64_hash_t seed64)6158*01826a49SYabin Cui XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed64)
6159*01826a49SYabin Cui {
6160*01826a49SYabin Cui     if (statePtr == NULL) return XXH_ERROR;
6161*01826a49SYabin Cui     if (secret == NULL) return XXH_ERROR;
6162*01826a49SYabin Cui     if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
6163*01826a49SYabin Cui     XXH3_reset_internal(statePtr, seed64, secret, secretSize);
6164*01826a49SYabin Cui     statePtr->useSeed = 1; /* always, even if seed64==0 */
6165*01826a49SYabin Cui     return XXH_OK;
6166*01826a49SYabin Cui }
6167*01826a49SYabin Cui 
6168*01826a49SYabin Cui /*!
6169*01826a49SYabin Cui  * @internal
6170*01826a49SYabin Cui  * @brief Processes a large input for XXH3_update() and XXH3_digest_long().
6171*01826a49SYabin Cui  *
6172*01826a49SYabin Cui  * Unlike XXH3_hashLong_internal_loop(), this can process data that overlaps a block.
6173*01826a49SYabin Cui  *
6174*01826a49SYabin Cui  * @param acc                Pointer to the 8 accumulator lanes
6175*01826a49SYabin Cui  * @param nbStripesSoFarPtr  In/out pointer to the number of leftover stripes in the block*
6176*01826a49SYabin Cui  * @param nbStripesPerBlock  Number of stripes in a block
6177*01826a49SYabin Cui  * @param input              Input pointer
6178*01826a49SYabin Cui  * @param nbStripes          Number of stripes to process
6179*01826a49SYabin Cui  * @param secret             Secret pointer
6180*01826a49SYabin Cui  * @param secretLimit        Offset of the last block in @p secret
6181*01826a49SYabin Cui  * @param f_acc              Pointer to an XXH3_accumulate implementation
6182*01826a49SYabin Cui  * @param f_scramble         Pointer to an XXH3_scrambleAcc implementation
6183*01826a49SYabin Cui  * @return                   Pointer past the end of @p input after processing
6184*01826a49SYabin Cui  */
6185*01826a49SYabin Cui XXH_FORCE_INLINE const xxh_u8 *
XXH3_consumeStripes(xxh_u64 * XXH_RESTRICT acc,size_t * XXH_RESTRICT nbStripesSoFarPtr,size_t nbStripesPerBlock,const xxh_u8 * XXH_RESTRICT input,size_t nbStripes,const xxh_u8 * XXH_RESTRICT secret,size_t secretLimit,XXH3_f_accumulate f_acc,XXH3_f_scrambleAcc f_scramble)6186*01826a49SYabin Cui XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc,
6187*01826a49SYabin Cui                     size_t* XXH_RESTRICT nbStripesSoFarPtr, size_t nbStripesPerBlock,
6188*01826a49SYabin Cui                     const xxh_u8* XXH_RESTRICT input, size_t nbStripes,
6189*01826a49SYabin Cui                     const xxh_u8* XXH_RESTRICT secret, size_t secretLimit,
6190*01826a49SYabin Cui                     XXH3_f_accumulate f_acc,
6191*01826a49SYabin Cui                     XXH3_f_scrambleAcc f_scramble)
6192*01826a49SYabin Cui {
6193*01826a49SYabin Cui     const xxh_u8* initialSecret = secret + *nbStripesSoFarPtr * XXH_SECRET_CONSUME_RATE;
6194*01826a49SYabin Cui     /* Process full blocks */
6195*01826a49SYabin Cui     if (nbStripes >= (nbStripesPerBlock - *nbStripesSoFarPtr)) {
6196*01826a49SYabin Cui         /* Process the initial partial block... */
6197*01826a49SYabin Cui         size_t nbStripesThisIter = nbStripesPerBlock - *nbStripesSoFarPtr;
6198*01826a49SYabin Cui 
6199*01826a49SYabin Cui         do {
6200*01826a49SYabin Cui             /* Accumulate and scramble */
6201*01826a49SYabin Cui             f_acc(acc, input, initialSecret, nbStripesThisIter);
6202*01826a49SYabin Cui             f_scramble(acc, secret + secretLimit);
6203*01826a49SYabin Cui             input += nbStripesThisIter * XXH_STRIPE_LEN;
6204*01826a49SYabin Cui             nbStripes -= nbStripesThisIter;
6205*01826a49SYabin Cui             /* Then continue the loop with the full block size */
6206*01826a49SYabin Cui             nbStripesThisIter = nbStripesPerBlock;
6207*01826a49SYabin Cui             initialSecret = secret;
6208*01826a49SYabin Cui         } while (nbStripes >= nbStripesPerBlock);
6209*01826a49SYabin Cui         *nbStripesSoFarPtr = 0;
6210*01826a49SYabin Cui     }
6211*01826a49SYabin Cui     /* Process a partial block */
6212*01826a49SYabin Cui     if (nbStripes > 0) {
6213*01826a49SYabin Cui         f_acc(acc, input, initialSecret, nbStripes);
6214*01826a49SYabin Cui         input += nbStripes * XXH_STRIPE_LEN;
6215*01826a49SYabin Cui         *nbStripesSoFarPtr += nbStripes;
6216*01826a49SYabin Cui     }
6217*01826a49SYabin Cui     /* Return end pointer */
6218*01826a49SYabin Cui     return input;
6219*01826a49SYabin Cui }
6220*01826a49SYabin Cui 
6221*01826a49SYabin Cui #ifndef XXH3_STREAM_USE_STACK
6222*01826a49SYabin Cui # if XXH_SIZE_OPT <= 0 && !defined(__clang__) /* clang doesn't need additional stack space */
6223*01826a49SYabin Cui #   define XXH3_STREAM_USE_STACK 1
6224*01826a49SYabin Cui # endif
6225*01826a49SYabin Cui #endif
6226*01826a49SYabin Cui /*
6227*01826a49SYabin Cui  * Both XXH3_64bits_update and XXH3_128bits_update use this routine.
6228*01826a49SYabin Cui  */
6229*01826a49SYabin Cui XXH_FORCE_INLINE XXH_errorcode
XXH3_update(XXH3_state_t * XXH_RESTRICT const state,const xxh_u8 * XXH_RESTRICT input,size_t len,XXH3_f_accumulate f_acc,XXH3_f_scrambleAcc f_scramble)6230*01826a49SYabin Cui XXH3_update(XXH3_state_t* XXH_RESTRICT const state,
6231*01826a49SYabin Cui             const xxh_u8* XXH_RESTRICT input, size_t len,
6232*01826a49SYabin Cui             XXH3_f_accumulate f_acc,
6233*01826a49SYabin Cui             XXH3_f_scrambleAcc f_scramble)
6234*01826a49SYabin Cui {
6235*01826a49SYabin Cui     if (input==NULL) {
6236*01826a49SYabin Cui         XXH_ASSERT(len == 0);
6237*01826a49SYabin Cui         return XXH_OK;
6238*01826a49SYabin Cui     }
6239*01826a49SYabin Cui 
6240*01826a49SYabin Cui     XXH_ASSERT(state != NULL);
6241*01826a49SYabin Cui     {   const xxh_u8* const bEnd = input + len;
6242*01826a49SYabin Cui         const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
6243*01826a49SYabin Cui #if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
6244*01826a49SYabin Cui         /* For some reason, gcc and MSVC seem to suffer greatly
6245*01826a49SYabin Cui          * when operating accumulators directly into state.
6246*01826a49SYabin Cui          * Operating into stack space seems to enable proper optimization.
6247*01826a49SYabin Cui          * clang, on the other hand, doesn't seem to need this trick */
6248*01826a49SYabin Cui         XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[8];
6249*01826a49SYabin Cui         XXH_memcpy(acc, state->acc, sizeof(acc));
6250*01826a49SYabin Cui #else
6251*01826a49SYabin Cui         xxh_u64* XXH_RESTRICT const acc = state->acc;
6252*01826a49SYabin Cui #endif
6253*01826a49SYabin Cui         state->totalLen += len;
6254*01826a49SYabin Cui         XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE);
6255*01826a49SYabin Cui 
6256*01826a49SYabin Cui         /* small input : just fill in tmp buffer */
6257*01826a49SYabin Cui         if (len <= XXH3_INTERNALBUFFER_SIZE - state->bufferedSize) {
6258*01826a49SYabin Cui             XXH_memcpy(state->buffer + state->bufferedSize, input, len);
6259*01826a49SYabin Cui             state->bufferedSize += (XXH32_hash_t)len;
6260*01826a49SYabin Cui             return XXH_OK;
6261*01826a49SYabin Cui         }
6262*01826a49SYabin Cui 
6263*01826a49SYabin Cui         /* total input is now > XXH3_INTERNALBUFFER_SIZE */
6264*01826a49SYabin Cui         #define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN)
6265*01826a49SYabin Cui         XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0);   /* clean multiple */
6266*01826a49SYabin Cui 
6267*01826a49SYabin Cui         /*
6268*01826a49SYabin Cui          * Internal buffer is partially filled (always, except at beginning)
6269*01826a49SYabin Cui          * Complete it, then consume it.
6270*01826a49SYabin Cui          */
6271*01826a49SYabin Cui         if (state->bufferedSize) {
6272*01826a49SYabin Cui             size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize;
6273*01826a49SYabin Cui             XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize);
6274*01826a49SYabin Cui             input += loadSize;
6275*01826a49SYabin Cui             XXH3_consumeStripes(acc,
6276*01826a49SYabin Cui                                &state->nbStripesSoFar, state->nbStripesPerBlock,
6277*01826a49SYabin Cui                                 state->buffer, XXH3_INTERNALBUFFER_STRIPES,
6278*01826a49SYabin Cui                                 secret, state->secretLimit,
6279*01826a49SYabin Cui                                 f_acc, f_scramble);
6280*01826a49SYabin Cui             state->bufferedSize = 0;
6281*01826a49SYabin Cui         }
6282*01826a49SYabin Cui         XXH_ASSERT(input < bEnd);
6283*01826a49SYabin Cui         if (bEnd - input > XXH3_INTERNALBUFFER_SIZE) {
6284*01826a49SYabin Cui             size_t nbStripes = (size_t)(bEnd - 1 - input) / XXH_STRIPE_LEN;
6285*01826a49SYabin Cui             input = XXH3_consumeStripes(acc,
6286*01826a49SYabin Cui                                        &state->nbStripesSoFar, state->nbStripesPerBlock,
6287*01826a49SYabin Cui                                        input, nbStripes,
6288*01826a49SYabin Cui                                        secret, state->secretLimit,
6289*01826a49SYabin Cui                                        f_acc, f_scramble);
6290*01826a49SYabin Cui             XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
6291*01826a49SYabin Cui 
6292*01826a49SYabin Cui         }
6293*01826a49SYabin Cui         /* Some remaining input (always) : buffer it */
6294*01826a49SYabin Cui         XXH_ASSERT(input < bEnd);
6295*01826a49SYabin Cui         XXH_ASSERT(bEnd - input <= XXH3_INTERNALBUFFER_SIZE);
6296*01826a49SYabin Cui         XXH_ASSERT(state->bufferedSize == 0);
6297*01826a49SYabin Cui         XXH_memcpy(state->buffer, input, (size_t)(bEnd-input));
6298*01826a49SYabin Cui         state->bufferedSize = (XXH32_hash_t)(bEnd-input);
6299*01826a49SYabin Cui #if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
6300*01826a49SYabin Cui         /* save stack accumulators into state */
6301*01826a49SYabin Cui         XXH_memcpy(state->acc, acc, sizeof(acc));
6302*01826a49SYabin Cui #endif
6303*01826a49SYabin Cui     }
6304*01826a49SYabin Cui 
6305*01826a49SYabin Cui     return XXH_OK;
6306*01826a49SYabin Cui }
6307*01826a49SYabin Cui 
6308*01826a49SYabin Cui /*! @ingroup XXH3_family */
6309*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode
XXH3_64bits_update(XXH_NOESCAPE XXH3_state_t * state,XXH_NOESCAPE const void * input,size_t len)6310*01826a49SYabin Cui XXH3_64bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len)
6311*01826a49SYabin Cui {
6312*01826a49SYabin Cui     return XXH3_update(state, (const xxh_u8*)input, len,
6313*01826a49SYabin Cui                        XXH3_accumulate, XXH3_scrambleAcc);
6314*01826a49SYabin Cui }
6315*01826a49SYabin Cui 
6316*01826a49SYabin Cui 
6317*01826a49SYabin Cui XXH_FORCE_INLINE void
XXH3_digest_long(XXH64_hash_t * acc,const XXH3_state_t * state,const unsigned char * secret)6318*01826a49SYabin Cui XXH3_digest_long (XXH64_hash_t* acc,
6319*01826a49SYabin Cui                   const XXH3_state_t* state,
6320*01826a49SYabin Cui                   const unsigned char* secret)
6321*01826a49SYabin Cui {
6322*01826a49SYabin Cui     xxh_u8 lastStripe[XXH_STRIPE_LEN];
6323*01826a49SYabin Cui     const xxh_u8* lastStripePtr;
6324*01826a49SYabin Cui 
6325*01826a49SYabin Cui     /*
6326*01826a49SYabin Cui      * Digest on a local copy. This way, the state remains unaltered, and it can
6327*01826a49SYabin Cui      * continue ingesting more input afterwards.
6328*01826a49SYabin Cui      */
6329*01826a49SYabin Cui     XXH_memcpy(acc, state->acc, sizeof(state->acc));
6330*01826a49SYabin Cui     if (state->bufferedSize >= XXH_STRIPE_LEN) {
6331*01826a49SYabin Cui         /* Consume remaining stripes then point to remaining data in buffer */
6332*01826a49SYabin Cui         size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN;
6333*01826a49SYabin Cui         size_t nbStripesSoFar = state->nbStripesSoFar;
6334*01826a49SYabin Cui         XXH3_consumeStripes(acc,
6335*01826a49SYabin Cui                            &nbStripesSoFar, state->nbStripesPerBlock,
6336*01826a49SYabin Cui                             state->buffer, nbStripes,
6337*01826a49SYabin Cui                             secret, state->secretLimit,
6338*01826a49SYabin Cui                             XXH3_accumulate, XXH3_scrambleAcc);
6339*01826a49SYabin Cui         lastStripePtr = state->buffer + state->bufferedSize - XXH_STRIPE_LEN;
6340*01826a49SYabin Cui     } else {  /* bufferedSize < XXH_STRIPE_LEN */
6341*01826a49SYabin Cui         /* Copy to temp buffer */
6342*01826a49SYabin Cui         size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize;
6343*01826a49SYabin Cui         XXH_ASSERT(state->bufferedSize > 0);  /* there is always some input buffered */
6344*01826a49SYabin Cui         XXH_memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize);
6345*01826a49SYabin Cui         XXH_memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize);
6346*01826a49SYabin Cui         lastStripePtr = lastStripe;
6347*01826a49SYabin Cui     }
6348*01826a49SYabin Cui     /* Last stripe */
6349*01826a49SYabin Cui     XXH3_accumulate_512(acc,
6350*01826a49SYabin Cui                         lastStripePtr,
6351*01826a49SYabin Cui                         secret + state->secretLimit - XXH_SECRET_LASTACC_START);
6352*01826a49SYabin Cui }
6353*01826a49SYabin Cui 
6354*01826a49SYabin Cui /*! @ingroup XXH3_family */
XXH3_64bits_digest(XXH_NOESCAPE const XXH3_state_t * state)6355*01826a49SYabin Cui XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* state)
6356*01826a49SYabin Cui {
6357*01826a49SYabin Cui     const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
6358*01826a49SYabin Cui     if (state->totalLen > XXH3_MIDSIZE_MAX) {
6359*01826a49SYabin Cui         XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
6360*01826a49SYabin Cui         XXH3_digest_long(acc, state, secret);
6361*01826a49SYabin Cui         return XXH3_mergeAccs(acc,
6362*01826a49SYabin Cui                               secret + XXH_SECRET_MERGEACCS_START,
6363*01826a49SYabin Cui                               (xxh_u64)state->totalLen * XXH_PRIME64_1);
6364*01826a49SYabin Cui     }
6365*01826a49SYabin Cui     /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */
6366*01826a49SYabin Cui     if (state->useSeed)
6367*01826a49SYabin Cui         return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
6368*01826a49SYabin Cui     return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen),
6369*01826a49SYabin Cui                                   secret, state->secretLimit + XXH_STRIPE_LEN);
6370*01826a49SYabin Cui }
6371*01826a49SYabin Cui #endif /* !XXH_NO_STREAM */
6372*01826a49SYabin Cui 
6373*01826a49SYabin Cui 
6374*01826a49SYabin Cui /* ==========================================
6375*01826a49SYabin Cui  * XXH3 128 bits (a.k.a XXH128)
6376*01826a49SYabin Cui  * ==========================================
6377*01826a49SYabin Cui  * XXH3's 128-bit variant has better mixing and strength than the 64-bit variant,
6378*01826a49SYabin Cui  * even without counting the significantly larger output size.
6379*01826a49SYabin Cui  *
6380*01826a49SYabin Cui  * For example, extra steps are taken to avoid the seed-dependent collisions
6381*01826a49SYabin Cui  * in 17-240 byte inputs (See XXH3_mix16B and XXH128_mix32B).
6382*01826a49SYabin Cui  *
6383*01826a49SYabin Cui  * This strength naturally comes at the cost of some speed, especially on short
6384*01826a49SYabin Cui  * lengths. Note that longer hashes are about as fast as the 64-bit version
6385*01826a49SYabin Cui  * due to it using only a slight modification of the 64-bit loop.
6386*01826a49SYabin Cui  *
6387*01826a49SYabin Cui  * XXH128 is also more oriented towards 64-bit machines. It is still extremely
6388*01826a49SYabin Cui  * fast for a _128-bit_ hash on 32-bit (it usually clears XXH64).
6389*01826a49SYabin Cui  */
6390*01826a49SYabin Cui 
6391*01826a49SYabin Cui XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
XXH3_len_1to3_128b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)6392*01826a49SYabin Cui XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
6393*01826a49SYabin Cui {
6394*01826a49SYabin Cui     /* A doubled version of 1to3_64b with different constants. */
6395*01826a49SYabin Cui     XXH_ASSERT(input != NULL);
6396*01826a49SYabin Cui     XXH_ASSERT(1 <= len && len <= 3);
6397*01826a49SYabin Cui     XXH_ASSERT(secret != NULL);
6398*01826a49SYabin Cui     /*
6399*01826a49SYabin Cui      * len = 1: combinedl = { input[0], 0x01, input[0], input[0] }
6400*01826a49SYabin Cui      * len = 2: combinedl = { input[1], 0x02, input[0], input[1] }
6401*01826a49SYabin Cui      * len = 3: combinedl = { input[2], 0x03, input[0], input[1] }
6402*01826a49SYabin Cui      */
6403*01826a49SYabin Cui     {   xxh_u8 const c1 = input[0];
6404*01826a49SYabin Cui         xxh_u8 const c2 = input[len >> 1];
6405*01826a49SYabin Cui         xxh_u8 const c3 = input[len - 1];
6406*01826a49SYabin Cui         xxh_u32 const combinedl = ((xxh_u32)c1 <<16) | ((xxh_u32)c2 << 24)
6407*01826a49SYabin Cui                                 | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
6408*01826a49SYabin Cui         xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13);
6409*01826a49SYabin Cui         xxh_u64 const bitflipl = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
6410*01826a49SYabin Cui         xxh_u64 const bitfliph = (XXH_readLE32(secret+8) ^ XXH_readLE32(secret+12)) - seed;
6411*01826a49SYabin Cui         xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl;
6412*01826a49SYabin Cui         xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph;
6413*01826a49SYabin Cui         XXH128_hash_t h128;
6414*01826a49SYabin Cui         h128.low64  = XXH64_avalanche(keyed_lo);
6415*01826a49SYabin Cui         h128.high64 = XXH64_avalanche(keyed_hi);
6416*01826a49SYabin Cui         return h128;
6417*01826a49SYabin Cui     }
6418*01826a49SYabin Cui }
6419*01826a49SYabin Cui 
6420*01826a49SYabin Cui XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
XXH3_len_4to8_128b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)6421*01826a49SYabin Cui XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
6422*01826a49SYabin Cui {
6423*01826a49SYabin Cui     XXH_ASSERT(input != NULL);
6424*01826a49SYabin Cui     XXH_ASSERT(secret != NULL);
6425*01826a49SYabin Cui     XXH_ASSERT(4 <= len && len <= 8);
6426*01826a49SYabin Cui     seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
6427*01826a49SYabin Cui     {   xxh_u32 const input_lo = XXH_readLE32(input);
6428*01826a49SYabin Cui         xxh_u32 const input_hi = XXH_readLE32(input + len - 4);
6429*01826a49SYabin Cui         xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32);
6430*01826a49SYabin Cui         xxh_u64 const bitflip = (XXH_readLE64(secret+16) ^ XXH_readLE64(secret+24)) + seed;
6431*01826a49SYabin Cui         xxh_u64 const keyed = input_64 ^ bitflip;
6432*01826a49SYabin Cui 
6433*01826a49SYabin Cui         /* Shift len to the left to ensure it is even, this avoids even multiplies. */
6434*01826a49SYabin Cui         XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2));
6435*01826a49SYabin Cui 
6436*01826a49SYabin Cui         m128.high64 += (m128.low64 << 1);
6437*01826a49SYabin Cui         m128.low64  ^= (m128.high64 >> 3);
6438*01826a49SYabin Cui 
6439*01826a49SYabin Cui         m128.low64   = XXH_xorshift64(m128.low64, 35);
6440*01826a49SYabin Cui         m128.low64  *= PRIME_MX2;
6441*01826a49SYabin Cui         m128.low64   = XXH_xorshift64(m128.low64, 28);
6442*01826a49SYabin Cui         m128.high64  = XXH3_avalanche(m128.high64);
6443*01826a49SYabin Cui         return m128;
6444*01826a49SYabin Cui     }
6445*01826a49SYabin Cui }
6446*01826a49SYabin Cui 
6447*01826a49SYabin Cui XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
XXH3_len_9to16_128b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)6448*01826a49SYabin Cui XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
6449*01826a49SYabin Cui {
6450*01826a49SYabin Cui     XXH_ASSERT(input != NULL);
6451*01826a49SYabin Cui     XXH_ASSERT(secret != NULL);
6452*01826a49SYabin Cui     XXH_ASSERT(9 <= len && len <= 16);
6453*01826a49SYabin Cui     {   xxh_u64 const bitflipl = (XXH_readLE64(secret+32) ^ XXH_readLE64(secret+40)) - seed;
6454*01826a49SYabin Cui         xxh_u64 const bitfliph = (XXH_readLE64(secret+48) ^ XXH_readLE64(secret+56)) + seed;
6455*01826a49SYabin Cui         xxh_u64 const input_lo = XXH_readLE64(input);
6456*01826a49SYabin Cui         xxh_u64       input_hi = XXH_readLE64(input + len - 8);
6457*01826a49SYabin Cui         XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1);
6458*01826a49SYabin Cui         /*
6459*01826a49SYabin Cui          * Put len in the middle of m128 to ensure that the length gets mixed to
6460*01826a49SYabin Cui          * both the low and high bits in the 128x64 multiply below.
6461*01826a49SYabin Cui          */
6462*01826a49SYabin Cui         m128.low64 += (xxh_u64)(len - 1) << 54;
6463*01826a49SYabin Cui         input_hi   ^= bitfliph;
6464*01826a49SYabin Cui         /*
6465*01826a49SYabin Cui          * Add the high 32 bits of input_hi to the high 32 bits of m128, then
6466*01826a49SYabin Cui          * add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to
6467*01826a49SYabin Cui          * the high 64 bits of m128.
6468*01826a49SYabin Cui          *
6469*01826a49SYabin Cui          * The best approach to this operation is different on 32-bit and 64-bit.
6470*01826a49SYabin Cui          */
6471*01826a49SYabin Cui         if (sizeof(void *) < sizeof(xxh_u64)) { /* 32-bit */
6472*01826a49SYabin Cui             /*
6473*01826a49SYabin Cui              * 32-bit optimized version, which is more readable.
6474*01826a49SYabin Cui              *
6475*01826a49SYabin Cui              * On 32-bit, it removes an ADC and delays a dependency between the two
6476*01826a49SYabin Cui              * halves of m128.high64, but it generates an extra mask on 64-bit.
6477*01826a49SYabin Cui              */
6478*01826a49SYabin Cui             m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2);
6479*01826a49SYabin Cui         } else {
6480*01826a49SYabin Cui             /*
6481*01826a49SYabin Cui              * 64-bit optimized (albeit more confusing) version.
6482*01826a49SYabin Cui              *
6483*01826a49SYabin Cui              * Uses some properties of addition and multiplication to remove the mask:
6484*01826a49SYabin Cui              *
6485*01826a49SYabin Cui              * Let:
6486*01826a49SYabin Cui              *    a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF)
6487*01826a49SYabin Cui              *    b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000)
6488*01826a49SYabin Cui              *    c = XXH_PRIME32_2
6489*01826a49SYabin Cui              *
6490*01826a49SYabin Cui              *    a + (b * c)
6491*01826a49SYabin Cui              * Inverse Property: x + y - x == y
6492*01826a49SYabin Cui              *    a + (b * (1 + c - 1))
6493*01826a49SYabin Cui              * Distributive Property: x * (y + z) == (x * y) + (x * z)
6494*01826a49SYabin Cui              *    a + (b * 1) + (b * (c - 1))
6495*01826a49SYabin Cui              * Identity Property: x * 1 == x
6496*01826a49SYabin Cui              *    a + b + (b * (c - 1))
6497*01826a49SYabin Cui              *
6498*01826a49SYabin Cui              * Substitute a, b, and c:
6499*01826a49SYabin Cui              *    input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
6500*01826a49SYabin Cui              *
6501*01826a49SYabin Cui              * Since input_hi.hi + input_hi.lo == input_hi, we get this:
6502*01826a49SYabin Cui              *    input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
6503*01826a49SYabin Cui              */
6504*01826a49SYabin Cui             m128.high64 += input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1);
6505*01826a49SYabin Cui         }
6506*01826a49SYabin Cui         /* m128 ^= XXH_swap64(m128 >> 64); */
6507*01826a49SYabin Cui         m128.low64  ^= XXH_swap64(m128.high64);
6508*01826a49SYabin Cui 
6509*01826a49SYabin Cui         {   /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */
6510*01826a49SYabin Cui             XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2);
6511*01826a49SYabin Cui             h128.high64 += m128.high64 * XXH_PRIME64_2;
6512*01826a49SYabin Cui 
6513*01826a49SYabin Cui             h128.low64   = XXH3_avalanche(h128.low64);
6514*01826a49SYabin Cui             h128.high64  = XXH3_avalanche(h128.high64);
6515*01826a49SYabin Cui             return h128;
6516*01826a49SYabin Cui     }   }
6517*01826a49SYabin Cui }
6518*01826a49SYabin Cui 
6519*01826a49SYabin Cui /*
6520*01826a49SYabin Cui  * Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN
6521*01826a49SYabin Cui  */
6522*01826a49SYabin Cui XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
XXH3_len_0to16_128b(const xxh_u8 * input,size_t len,const xxh_u8 * secret,XXH64_hash_t seed)6523*01826a49SYabin Cui XXH3_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
6524*01826a49SYabin Cui {
6525*01826a49SYabin Cui     XXH_ASSERT(len <= 16);
6526*01826a49SYabin Cui     {   if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed);
6527*01826a49SYabin Cui         if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed);
6528*01826a49SYabin Cui         if (len) return XXH3_len_1to3_128b(input, len, secret, seed);
6529*01826a49SYabin Cui         {   XXH128_hash_t h128;
6530*01826a49SYabin Cui             xxh_u64 const bitflipl = XXH_readLE64(secret+64) ^ XXH_readLE64(secret+72);
6531*01826a49SYabin Cui             xxh_u64 const bitfliph = XXH_readLE64(secret+80) ^ XXH_readLE64(secret+88);
6532*01826a49SYabin Cui             h128.low64 = XXH64_avalanche(seed ^ bitflipl);
6533*01826a49SYabin Cui             h128.high64 = XXH64_avalanche( seed ^ bitfliph);
6534*01826a49SYabin Cui             return h128;
6535*01826a49SYabin Cui     }   }
6536*01826a49SYabin Cui }
6537*01826a49SYabin Cui 
6538*01826a49SYabin Cui /*
6539*01826a49SYabin Cui  * A bit slower than XXH3_mix16B, but handles multiply by zero better.
6540*01826a49SYabin Cui  */
6541*01826a49SYabin Cui XXH_FORCE_INLINE XXH128_hash_t
XXH128_mix32B(XXH128_hash_t acc,const xxh_u8 * input_1,const xxh_u8 * input_2,const xxh_u8 * secret,XXH64_hash_t seed)6542*01826a49SYabin Cui XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2,
6543*01826a49SYabin Cui               const xxh_u8* secret, XXH64_hash_t seed)
6544*01826a49SYabin Cui {
6545*01826a49SYabin Cui     acc.low64  += XXH3_mix16B (input_1, secret+0, seed);
6546*01826a49SYabin Cui     acc.low64  ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8);
6547*01826a49SYabin Cui     acc.high64 += XXH3_mix16B (input_2, secret+16, seed);
6548*01826a49SYabin Cui     acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8);
6549*01826a49SYabin Cui     return acc;
6550*01826a49SYabin Cui }
6551*01826a49SYabin Cui 
6552*01826a49SYabin Cui 
6553*01826a49SYabin Cui XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
XXH3_len_17to128_128b(const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH64_hash_t seed)6554*01826a49SYabin Cui XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
6555*01826a49SYabin Cui                       const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
6556*01826a49SYabin Cui                       XXH64_hash_t seed)
6557*01826a49SYabin Cui {
6558*01826a49SYabin Cui     XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
6559*01826a49SYabin Cui     XXH_ASSERT(16 < len && len <= 128);
6560*01826a49SYabin Cui 
6561*01826a49SYabin Cui     {   XXH128_hash_t acc;
6562*01826a49SYabin Cui         acc.low64 = len * XXH_PRIME64_1;
6563*01826a49SYabin Cui         acc.high64 = 0;
6564*01826a49SYabin Cui 
6565*01826a49SYabin Cui #if XXH_SIZE_OPT >= 1
6566*01826a49SYabin Cui         {
6567*01826a49SYabin Cui             /* Smaller, but slightly slower. */
6568*01826a49SYabin Cui             unsigned int i = (unsigned int)(len - 1) / 32;
6569*01826a49SYabin Cui             do {
6570*01826a49SYabin Cui                 acc = XXH128_mix32B(acc, input+16*i, input+len-16*(i+1), secret+32*i, seed);
6571*01826a49SYabin Cui             } while (i-- != 0);
6572*01826a49SYabin Cui         }
6573*01826a49SYabin Cui #else
6574*01826a49SYabin Cui         if (len > 32) {
6575*01826a49SYabin Cui             if (len > 64) {
6576*01826a49SYabin Cui                 if (len > 96) {
6577*01826a49SYabin Cui                     acc = XXH128_mix32B(acc, input+48, input+len-64, secret+96, seed);
6578*01826a49SYabin Cui                 }
6579*01826a49SYabin Cui                 acc = XXH128_mix32B(acc, input+32, input+len-48, secret+64, seed);
6580*01826a49SYabin Cui             }
6581*01826a49SYabin Cui             acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed);
6582*01826a49SYabin Cui         }
6583*01826a49SYabin Cui         acc = XXH128_mix32B(acc, input, input+len-16, secret, seed);
6584*01826a49SYabin Cui #endif
6585*01826a49SYabin Cui         {   XXH128_hash_t h128;
6586*01826a49SYabin Cui             h128.low64  = acc.low64 + acc.high64;
6587*01826a49SYabin Cui             h128.high64 = (acc.low64    * XXH_PRIME64_1)
6588*01826a49SYabin Cui                         + (acc.high64   * XXH_PRIME64_4)
6589*01826a49SYabin Cui                         + ((len - seed) * XXH_PRIME64_2);
6590*01826a49SYabin Cui             h128.low64  = XXH3_avalanche(h128.low64);
6591*01826a49SYabin Cui             h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
6592*01826a49SYabin Cui             return h128;
6593*01826a49SYabin Cui         }
6594*01826a49SYabin Cui     }
6595*01826a49SYabin Cui }
6596*01826a49SYabin Cui 
6597*01826a49SYabin Cui XXH_NO_INLINE XXH_PUREF XXH128_hash_t
XXH3_len_129to240_128b(const xxh_u8 * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH64_hash_t seed)6598*01826a49SYabin Cui XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
6599*01826a49SYabin Cui                        const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
6600*01826a49SYabin Cui                        XXH64_hash_t seed)
6601*01826a49SYabin Cui {
6602*01826a49SYabin Cui     XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
6603*01826a49SYabin Cui     XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
6604*01826a49SYabin Cui 
6605*01826a49SYabin Cui     {   XXH128_hash_t acc;
6606*01826a49SYabin Cui         unsigned i;
6607*01826a49SYabin Cui         acc.low64 = len * XXH_PRIME64_1;
6608*01826a49SYabin Cui         acc.high64 = 0;
6609*01826a49SYabin Cui         /*
6610*01826a49SYabin Cui          *  We set as `i` as offset + 32. We do this so that unchanged
6611*01826a49SYabin Cui          * `len` can be used as upper bound. This reaches a sweet spot
6612*01826a49SYabin Cui          * where both x86 and aarch64 get simple agen and good codegen
6613*01826a49SYabin Cui          * for the loop.
6614*01826a49SYabin Cui          */
6615*01826a49SYabin Cui         for (i = 32; i < 160; i += 32) {
6616*01826a49SYabin Cui             acc = XXH128_mix32B(acc,
6617*01826a49SYabin Cui                                 input  + i - 32,
6618*01826a49SYabin Cui                                 input  + i - 16,
6619*01826a49SYabin Cui                                 secret + i - 32,
6620*01826a49SYabin Cui                                 seed);
6621*01826a49SYabin Cui         }
6622*01826a49SYabin Cui         acc.low64 = XXH3_avalanche(acc.low64);
6623*01826a49SYabin Cui         acc.high64 = XXH3_avalanche(acc.high64);
6624*01826a49SYabin Cui         /*
6625*01826a49SYabin Cui          * NB: `i <= len` will duplicate the last 32-bytes if
6626*01826a49SYabin Cui          * len % 32 was zero. This is an unfortunate necessity to keep
6627*01826a49SYabin Cui          * the hash result stable.
6628*01826a49SYabin Cui          */
6629*01826a49SYabin Cui         for (i=160; i <= len; i += 32) {
6630*01826a49SYabin Cui             acc = XXH128_mix32B(acc,
6631*01826a49SYabin Cui                                 input + i - 32,
6632*01826a49SYabin Cui                                 input + i - 16,
6633*01826a49SYabin Cui                                 secret + XXH3_MIDSIZE_STARTOFFSET + i - 160,
6634*01826a49SYabin Cui                                 seed);
6635*01826a49SYabin Cui         }
6636*01826a49SYabin Cui         /* last bytes */
6637*01826a49SYabin Cui         acc = XXH128_mix32B(acc,
6638*01826a49SYabin Cui                             input + len - 16,
6639*01826a49SYabin Cui                             input + len - 32,
6640*01826a49SYabin Cui                             secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16,
6641*01826a49SYabin Cui                             (XXH64_hash_t)0 - seed);
6642*01826a49SYabin Cui 
6643*01826a49SYabin Cui         {   XXH128_hash_t h128;
6644*01826a49SYabin Cui             h128.low64  = acc.low64 + acc.high64;
6645*01826a49SYabin Cui             h128.high64 = (acc.low64    * XXH_PRIME64_1)
6646*01826a49SYabin Cui                         + (acc.high64   * XXH_PRIME64_4)
6647*01826a49SYabin Cui                         + ((len - seed) * XXH_PRIME64_2);
6648*01826a49SYabin Cui             h128.low64  = XXH3_avalanche(h128.low64);
6649*01826a49SYabin Cui             h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
6650*01826a49SYabin Cui             return h128;
6651*01826a49SYabin Cui         }
6652*01826a49SYabin Cui     }
6653*01826a49SYabin Cui }
6654*01826a49SYabin Cui 
6655*01826a49SYabin Cui XXH_FORCE_INLINE XXH128_hash_t
XXH3_hashLong_128b_internal(const void * XXH_RESTRICT input,size_t len,const xxh_u8 * XXH_RESTRICT secret,size_t secretSize,XXH3_f_accumulate f_acc,XXH3_f_scrambleAcc f_scramble)6656*01826a49SYabin Cui XXH3_hashLong_128b_internal(const void* XXH_RESTRICT input, size_t len,
6657*01826a49SYabin Cui                             const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
6658*01826a49SYabin Cui                             XXH3_f_accumulate f_acc,
6659*01826a49SYabin Cui                             XXH3_f_scrambleAcc f_scramble)
6660*01826a49SYabin Cui {
6661*01826a49SYabin Cui     XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
6662*01826a49SYabin Cui 
6663*01826a49SYabin Cui     XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, secret, secretSize, f_acc, f_scramble);
6664*01826a49SYabin Cui 
6665*01826a49SYabin Cui     /* converge into final hash */
6666*01826a49SYabin Cui     XXH_STATIC_ASSERT(sizeof(acc) == 64);
6667*01826a49SYabin Cui     XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
6668*01826a49SYabin Cui     {   XXH128_hash_t h128;
6669*01826a49SYabin Cui         h128.low64  = XXH3_mergeAccs(acc,
6670*01826a49SYabin Cui                                      secret + XXH_SECRET_MERGEACCS_START,
6671*01826a49SYabin Cui                                      (xxh_u64)len * XXH_PRIME64_1);
6672*01826a49SYabin Cui         h128.high64 = XXH3_mergeAccs(acc,
6673*01826a49SYabin Cui                                      secret + secretSize
6674*01826a49SYabin Cui                                             - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
6675*01826a49SYabin Cui                                      ~((xxh_u64)len * XXH_PRIME64_2));
6676*01826a49SYabin Cui         return h128;
6677*01826a49SYabin Cui     }
6678*01826a49SYabin Cui }
6679*01826a49SYabin Cui 
6680*01826a49SYabin Cui /*
6681*01826a49SYabin Cui  * It's important for performance that XXH3_hashLong() is not inlined.
6682*01826a49SYabin Cui  */
6683*01826a49SYabin Cui XXH_NO_INLINE XXH_PUREF XXH128_hash_t
XXH3_hashLong_128b_default(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen)6684*01826a49SYabin Cui XXH3_hashLong_128b_default(const void* XXH_RESTRICT input, size_t len,
6685*01826a49SYabin Cui                            XXH64_hash_t seed64,
6686*01826a49SYabin Cui                            const void* XXH_RESTRICT secret, size_t secretLen)
6687*01826a49SYabin Cui {
6688*01826a49SYabin Cui     (void)seed64; (void)secret; (void)secretLen;
6689*01826a49SYabin Cui     return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret),
6690*01826a49SYabin Cui                                        XXH3_accumulate, XXH3_scrambleAcc);
6691*01826a49SYabin Cui }
6692*01826a49SYabin Cui 
6693*01826a49SYabin Cui /*
6694*01826a49SYabin Cui  * It's important for performance to pass @p secretLen (when it's static)
6695*01826a49SYabin Cui  * to the compiler, so that it can properly optimize the vectorized loop.
6696*01826a49SYabin Cui  *
6697*01826a49SYabin Cui  * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and FORCE_INLINE
6698*01826a49SYabin Cui  * breaks -Og, this is XXH_NO_INLINE.
6699*01826a49SYabin Cui  */
6700*01826a49SYabin Cui XXH3_WITH_SECRET_INLINE XXH128_hash_t
XXH3_hashLong_128b_withSecret(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen)6701*01826a49SYabin Cui XXH3_hashLong_128b_withSecret(const void* XXH_RESTRICT input, size_t len,
6702*01826a49SYabin Cui                               XXH64_hash_t seed64,
6703*01826a49SYabin Cui                               const void* XXH_RESTRICT secret, size_t secretLen)
6704*01826a49SYabin Cui {
6705*01826a49SYabin Cui     (void)seed64;
6706*01826a49SYabin Cui     return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, secretLen,
6707*01826a49SYabin Cui                                        XXH3_accumulate, XXH3_scrambleAcc);
6708*01826a49SYabin Cui }
6709*01826a49SYabin Cui 
6710*01826a49SYabin Cui XXH_FORCE_INLINE XXH128_hash_t
XXH3_hashLong_128b_withSeed_internal(const void * XXH_RESTRICT input,size_t len,XXH64_hash_t seed64,XXH3_f_accumulate f_acc,XXH3_f_scrambleAcc f_scramble,XXH3_f_initCustomSecret f_initSec)6711*01826a49SYabin Cui XXH3_hashLong_128b_withSeed_internal(const void* XXH_RESTRICT input, size_t len,
6712*01826a49SYabin Cui                                 XXH64_hash_t seed64,
6713*01826a49SYabin Cui                                 XXH3_f_accumulate f_acc,
6714*01826a49SYabin Cui                                 XXH3_f_scrambleAcc f_scramble,
6715*01826a49SYabin Cui                                 XXH3_f_initCustomSecret f_initSec)
6716*01826a49SYabin Cui {
6717*01826a49SYabin Cui     if (seed64 == 0)
6718*01826a49SYabin Cui         return XXH3_hashLong_128b_internal(input, len,
6719*01826a49SYabin Cui                                            XXH3_kSecret, sizeof(XXH3_kSecret),
6720*01826a49SYabin Cui                                            f_acc, f_scramble);
6721*01826a49SYabin Cui     {   XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
6722*01826a49SYabin Cui         f_initSec(secret, seed64);
6723*01826a49SYabin Cui         return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, sizeof(secret),
6724*01826a49SYabin Cui                                            f_acc, f_scramble);
6725*01826a49SYabin Cui     }
6726*01826a49SYabin Cui }
6727*01826a49SYabin Cui 
6728*01826a49SYabin Cui /*
6729*01826a49SYabin Cui  * It's important for performance that XXH3_hashLong is not inlined.
6730*01826a49SYabin Cui  */
6731*01826a49SYabin Cui XXH_NO_INLINE XXH128_hash_t
XXH3_hashLong_128b_withSeed(const void * input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen)6732*01826a49SYabin Cui XXH3_hashLong_128b_withSeed(const void* input, size_t len,
6733*01826a49SYabin Cui                             XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen)
6734*01826a49SYabin Cui {
6735*01826a49SYabin Cui     (void)secret; (void)secretLen;
6736*01826a49SYabin Cui     return XXH3_hashLong_128b_withSeed_internal(input, len, seed64,
6737*01826a49SYabin Cui                 XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret);
6738*01826a49SYabin Cui }
6739*01826a49SYabin Cui 
6740*01826a49SYabin Cui typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void* XXH_RESTRICT, size_t,
6741*01826a49SYabin Cui                                             XXH64_hash_t, const void* XXH_RESTRICT, size_t);
6742*01826a49SYabin Cui 
6743*01826a49SYabin Cui XXH_FORCE_INLINE XXH128_hash_t
XXH3_128bits_internal(const void * input,size_t len,XXH64_hash_t seed64,const void * XXH_RESTRICT secret,size_t secretLen,XXH3_hashLong128_f f_hl128)6744*01826a49SYabin Cui XXH3_128bits_internal(const void* input, size_t len,
6745*01826a49SYabin Cui                       XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
6746*01826a49SYabin Cui                       XXH3_hashLong128_f f_hl128)
6747*01826a49SYabin Cui {
6748*01826a49SYabin Cui     XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
6749*01826a49SYabin Cui     /*
6750*01826a49SYabin Cui      * If an action is to be taken if `secret` conditions are not respected,
6751*01826a49SYabin Cui      * it should be done here.
6752*01826a49SYabin Cui      * For now, it's a contract pre-condition.
6753*01826a49SYabin Cui      * Adding a check and a branch here would cost performance at every hash.
6754*01826a49SYabin Cui      */
6755*01826a49SYabin Cui     if (len <= 16)
6756*01826a49SYabin Cui         return XXH3_len_0to16_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
6757*01826a49SYabin Cui     if (len <= 128)
6758*01826a49SYabin Cui         return XXH3_len_17to128_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
6759*01826a49SYabin Cui     if (len <= XXH3_MIDSIZE_MAX)
6760*01826a49SYabin Cui         return XXH3_len_129to240_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
6761*01826a49SYabin Cui     return f_hl128(input, len, seed64, secret, secretLen);
6762*01826a49SYabin Cui }
6763*01826a49SYabin Cui 
6764*01826a49SYabin Cui 
6765*01826a49SYabin Cui /* ===   Public XXH128 API   === */
6766*01826a49SYabin Cui 
6767*01826a49SYabin Cui /*! @ingroup XXH3_family */
XXH3_128bits(XXH_NOESCAPE const void * input,size_t len)6768*01826a49SYabin Cui XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* input, size_t len)
6769*01826a49SYabin Cui {
6770*01826a49SYabin Cui     return XXH3_128bits_internal(input, len, 0,
6771*01826a49SYabin Cui                                  XXH3_kSecret, sizeof(XXH3_kSecret),
6772*01826a49SYabin Cui                                  XXH3_hashLong_128b_default);
6773*01826a49SYabin Cui }
6774*01826a49SYabin Cui 
6775*01826a49SYabin Cui /*! @ingroup XXH3_family */
6776*01826a49SYabin Cui XXH_PUBLIC_API XXH128_hash_t
XXH3_128bits_withSecret(XXH_NOESCAPE const void * input,size_t len,XXH_NOESCAPE const void * secret,size_t secretSize)6777*01826a49SYabin Cui XXH3_128bits_withSecret(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize)
6778*01826a49SYabin Cui {
6779*01826a49SYabin Cui     return XXH3_128bits_internal(input, len, 0,
6780*01826a49SYabin Cui                                  (const xxh_u8*)secret, secretSize,
6781*01826a49SYabin Cui                                  XXH3_hashLong_128b_withSecret);
6782*01826a49SYabin Cui }
6783*01826a49SYabin Cui 
6784*01826a49SYabin Cui /*! @ingroup XXH3_family */
6785*01826a49SYabin Cui XXH_PUBLIC_API XXH128_hash_t
XXH3_128bits_withSeed(XXH_NOESCAPE const void * input,size_t len,XXH64_hash_t seed)6786*01826a49SYabin Cui XXH3_128bits_withSeed(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed)
6787*01826a49SYabin Cui {
6788*01826a49SYabin Cui     return XXH3_128bits_internal(input, len, seed,
6789*01826a49SYabin Cui                                  XXH3_kSecret, sizeof(XXH3_kSecret),
6790*01826a49SYabin Cui                                  XXH3_hashLong_128b_withSeed);
6791*01826a49SYabin Cui }
6792*01826a49SYabin Cui 
6793*01826a49SYabin Cui /*! @ingroup XXH3_family */
6794*01826a49SYabin Cui XXH_PUBLIC_API XXH128_hash_t
XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void * input,size_t len,XXH_NOESCAPE const void * secret,size_t secretSize,XXH64_hash_t seed)6795*01826a49SYabin Cui XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed)
6796*01826a49SYabin Cui {
6797*01826a49SYabin Cui     if (len <= XXH3_MIDSIZE_MAX)
6798*01826a49SYabin Cui         return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL);
6799*01826a49SYabin Cui     return XXH3_hashLong_128b_withSecret(input, len, seed, secret, secretSize);
6800*01826a49SYabin Cui }
6801*01826a49SYabin Cui 
6802*01826a49SYabin Cui /*! @ingroup XXH3_family */
6803*01826a49SYabin Cui XXH_PUBLIC_API XXH128_hash_t
XXH128(XXH_NOESCAPE const void * input,size_t len,XXH64_hash_t seed)6804*01826a49SYabin Cui XXH128(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed)
6805*01826a49SYabin Cui {
6806*01826a49SYabin Cui     return XXH3_128bits_withSeed(input, len, seed);
6807*01826a49SYabin Cui }
6808*01826a49SYabin Cui 
6809*01826a49SYabin Cui 
6810*01826a49SYabin Cui /* ===   XXH3 128-bit streaming   === */
6811*01826a49SYabin Cui #ifndef XXH_NO_STREAM
6812*01826a49SYabin Cui /*
6813*01826a49SYabin Cui  * All initialization and update functions are identical to 64-bit streaming variant.
6814*01826a49SYabin Cui  * The only difference is the finalization routine.
6815*01826a49SYabin Cui  */
6816*01826a49SYabin Cui 
6817*01826a49SYabin Cui /*! @ingroup XXH3_family */
6818*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t * statePtr)6819*01826a49SYabin Cui XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr)
6820*01826a49SYabin Cui {
6821*01826a49SYabin Cui     return XXH3_64bits_reset(statePtr);
6822*01826a49SYabin Cui }
6823*01826a49SYabin Cui 
6824*01826a49SYabin Cui /*! @ingroup XXH3_family */
6825*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t * statePtr,XXH_NOESCAPE const void * secret,size_t secretSize)6826*01826a49SYabin Cui XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize)
6827*01826a49SYabin Cui {
6828*01826a49SYabin Cui     return XXH3_64bits_reset_withSecret(statePtr, secret, secretSize);
6829*01826a49SYabin Cui }
6830*01826a49SYabin Cui 
6831*01826a49SYabin Cui /*! @ingroup XXH3_family */
6832*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t * statePtr,XXH64_hash_t seed)6833*01826a49SYabin Cui XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed)
6834*01826a49SYabin Cui {
6835*01826a49SYabin Cui     return XXH3_64bits_reset_withSeed(statePtr, seed);
6836*01826a49SYabin Cui }
6837*01826a49SYabin Cui 
6838*01826a49SYabin Cui /*! @ingroup XXH3_family */
6839*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t * statePtr,XXH_NOESCAPE const void * secret,size_t secretSize,XXH64_hash_t seed)6840*01826a49SYabin Cui XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed)
6841*01826a49SYabin Cui {
6842*01826a49SYabin Cui     return XXH3_64bits_reset_withSecretandSeed(statePtr, secret, secretSize, seed);
6843*01826a49SYabin Cui }
6844*01826a49SYabin Cui 
6845*01826a49SYabin Cui /*! @ingroup XXH3_family */
6846*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode
XXH3_128bits_update(XXH_NOESCAPE XXH3_state_t * state,XXH_NOESCAPE const void * input,size_t len)6847*01826a49SYabin Cui XXH3_128bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len)
6848*01826a49SYabin Cui {
6849*01826a49SYabin Cui     return XXH3_64bits_update(state, input, len);
6850*01826a49SYabin Cui }
6851*01826a49SYabin Cui 
6852*01826a49SYabin Cui /*! @ingroup XXH3_family */
XXH3_128bits_digest(XXH_NOESCAPE const XXH3_state_t * state)6853*01826a49SYabin Cui XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* state)
6854*01826a49SYabin Cui {
6855*01826a49SYabin Cui     const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
6856*01826a49SYabin Cui     if (state->totalLen > XXH3_MIDSIZE_MAX) {
6857*01826a49SYabin Cui         XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
6858*01826a49SYabin Cui         XXH3_digest_long(acc, state, secret);
6859*01826a49SYabin Cui         XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
6860*01826a49SYabin Cui         {   XXH128_hash_t h128;
6861*01826a49SYabin Cui             h128.low64  = XXH3_mergeAccs(acc,
6862*01826a49SYabin Cui                                          secret + XXH_SECRET_MERGEACCS_START,
6863*01826a49SYabin Cui                                          (xxh_u64)state->totalLen * XXH_PRIME64_1);
6864*01826a49SYabin Cui             h128.high64 = XXH3_mergeAccs(acc,
6865*01826a49SYabin Cui                                          secret + state->secretLimit + XXH_STRIPE_LEN
6866*01826a49SYabin Cui                                                 - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
6867*01826a49SYabin Cui                                          ~((xxh_u64)state->totalLen * XXH_PRIME64_2));
6868*01826a49SYabin Cui             return h128;
6869*01826a49SYabin Cui         }
6870*01826a49SYabin Cui     }
6871*01826a49SYabin Cui     /* len <= XXH3_MIDSIZE_MAX : short code */
6872*01826a49SYabin Cui     if (state->seed)
6873*01826a49SYabin Cui         return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
6874*01826a49SYabin Cui     return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen),
6875*01826a49SYabin Cui                                    secret, state->secretLimit + XXH_STRIPE_LEN);
6876*01826a49SYabin Cui }
6877*01826a49SYabin Cui #endif /* !XXH_NO_STREAM */
6878*01826a49SYabin Cui /* 128-bit utility functions */
6879*01826a49SYabin Cui 
6880*01826a49SYabin Cui #include <string.h>   /* memcmp, memcpy */
6881*01826a49SYabin Cui 
6882*01826a49SYabin Cui /* return : 1 is equal, 0 if different */
6883*01826a49SYabin Cui /*! @ingroup XXH3_family */
XXH128_isEqual(XXH128_hash_t h1,XXH128_hash_t h2)6884*01826a49SYabin Cui XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2)
6885*01826a49SYabin Cui {
6886*01826a49SYabin Cui     /* note : XXH128_hash_t is compact, it has no padding byte */
6887*01826a49SYabin Cui     return !(memcmp(&h1, &h2, sizeof(h1)));
6888*01826a49SYabin Cui }
6889*01826a49SYabin Cui 
6890*01826a49SYabin Cui /* This prototype is compatible with stdlib's qsort().
6891*01826a49SYabin Cui  * @return : >0 if *h128_1  > *h128_2
6892*01826a49SYabin Cui  *           <0 if *h128_1  < *h128_2
6893*01826a49SYabin Cui  *           =0 if *h128_1 == *h128_2  */
6894*01826a49SYabin Cui /*! @ingroup XXH3_family */
XXH128_cmp(XXH_NOESCAPE const void * h128_1,XXH_NOESCAPE const void * h128_2)6895*01826a49SYabin Cui XXH_PUBLIC_API int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2)
6896*01826a49SYabin Cui {
6897*01826a49SYabin Cui     XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1;
6898*01826a49SYabin Cui     XXH128_hash_t const h2 = *(const XXH128_hash_t*)h128_2;
6899*01826a49SYabin Cui     int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64);
6900*01826a49SYabin Cui     /* note : bets that, in most cases, hash values are different */
6901*01826a49SYabin Cui     if (hcmp) return hcmp;
6902*01826a49SYabin Cui     return (h1.low64 > h2.low64) - (h2.low64 > h1.low64);
6903*01826a49SYabin Cui }
6904*01826a49SYabin Cui 
6905*01826a49SYabin Cui 
6906*01826a49SYabin Cui /*======   Canonical representation   ======*/
6907*01826a49SYabin Cui /*! @ingroup XXH3_family */
6908*01826a49SYabin Cui XXH_PUBLIC_API void
XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t * dst,XXH128_hash_t hash)6909*01826a49SYabin Cui XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash)
6910*01826a49SYabin Cui {
6911*01826a49SYabin Cui     XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t));
6912*01826a49SYabin Cui     if (XXH_CPU_LITTLE_ENDIAN) {
6913*01826a49SYabin Cui         hash.high64 = XXH_swap64(hash.high64);
6914*01826a49SYabin Cui         hash.low64  = XXH_swap64(hash.low64);
6915*01826a49SYabin Cui     }
6916*01826a49SYabin Cui     XXH_memcpy(dst, &hash.high64, sizeof(hash.high64));
6917*01826a49SYabin Cui     XXH_memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64));
6918*01826a49SYabin Cui }
6919*01826a49SYabin Cui 
6920*01826a49SYabin Cui /*! @ingroup XXH3_family */
6921*01826a49SYabin Cui XXH_PUBLIC_API XXH128_hash_t
XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t * src)6922*01826a49SYabin Cui XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src)
6923*01826a49SYabin Cui {
6924*01826a49SYabin Cui     XXH128_hash_t h;
6925*01826a49SYabin Cui     h.high64 = XXH_readBE64(src);
6926*01826a49SYabin Cui     h.low64  = XXH_readBE64(src->digest + 8);
6927*01826a49SYabin Cui     return h;
6928*01826a49SYabin Cui }
6929*01826a49SYabin Cui 
6930*01826a49SYabin Cui 
6931*01826a49SYabin Cui 
6932*01826a49SYabin Cui /* ==========================================
6933*01826a49SYabin Cui  * Secret generators
6934*01826a49SYabin Cui  * ==========================================
6935*01826a49SYabin Cui  */
6936*01826a49SYabin Cui #define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x))
6937*01826a49SYabin Cui 
XXH3_combine16(void * dst,XXH128_hash_t h128)6938*01826a49SYabin Cui XXH_FORCE_INLINE void XXH3_combine16(void* dst, XXH128_hash_t h128)
6939*01826a49SYabin Cui {
6940*01826a49SYabin Cui     XXH_writeLE64( dst, XXH_readLE64(dst) ^ h128.low64 );
6941*01826a49SYabin Cui     XXH_writeLE64( (char*)dst+8, XXH_readLE64((char*)dst+8) ^ h128.high64 );
6942*01826a49SYabin Cui }
6943*01826a49SYabin Cui 
6944*01826a49SYabin Cui /*! @ingroup XXH3_family */
6945*01826a49SYabin Cui XXH_PUBLIC_API XXH_errorcode
XXH3_generateSecret(XXH_NOESCAPE void * secretBuffer,size_t secretSize,XXH_NOESCAPE const void * customSeed,size_t customSeedSize)6946*01826a49SYabin Cui XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize)
6947*01826a49SYabin Cui {
6948*01826a49SYabin Cui #if (XXH_DEBUGLEVEL >= 1)
6949*01826a49SYabin Cui     XXH_ASSERT(secretBuffer != NULL);
6950*01826a49SYabin Cui     XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
6951*01826a49SYabin Cui #else
6952*01826a49SYabin Cui     /* production mode, assert() are disabled */
6953*01826a49SYabin Cui     if (secretBuffer == NULL) return XXH_ERROR;
6954*01826a49SYabin Cui     if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
6955*01826a49SYabin Cui #endif
6956*01826a49SYabin Cui 
6957*01826a49SYabin Cui     if (customSeedSize == 0) {
6958*01826a49SYabin Cui         customSeed = XXH3_kSecret;
6959*01826a49SYabin Cui         customSeedSize = XXH_SECRET_DEFAULT_SIZE;
6960*01826a49SYabin Cui     }
6961*01826a49SYabin Cui #if (XXH_DEBUGLEVEL >= 1)
6962*01826a49SYabin Cui     XXH_ASSERT(customSeed != NULL);
6963*01826a49SYabin Cui #else
6964*01826a49SYabin Cui     if (customSeed == NULL) return XXH_ERROR;
6965*01826a49SYabin Cui #endif
6966*01826a49SYabin Cui 
6967*01826a49SYabin Cui     /* Fill secretBuffer with a copy of customSeed - repeat as needed */
6968*01826a49SYabin Cui     {   size_t pos = 0;
6969*01826a49SYabin Cui         while (pos < secretSize) {
6970*01826a49SYabin Cui             size_t const toCopy = XXH_MIN((secretSize - pos), customSeedSize);
6971*01826a49SYabin Cui             memcpy((char*)secretBuffer + pos, customSeed, toCopy);
6972*01826a49SYabin Cui             pos += toCopy;
6973*01826a49SYabin Cui     }   }
6974*01826a49SYabin Cui 
6975*01826a49SYabin Cui     {   size_t const nbSeg16 = secretSize / 16;
6976*01826a49SYabin Cui         size_t n;
6977*01826a49SYabin Cui         XXH128_canonical_t scrambler;
6978*01826a49SYabin Cui         XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0));
6979*01826a49SYabin Cui         for (n=0; n<nbSeg16; n++) {
6980*01826a49SYabin Cui             XXH128_hash_t const h128 = XXH128(&scrambler, sizeof(scrambler), n);
6981*01826a49SYabin Cui             XXH3_combine16((char*)secretBuffer + n*16, h128);
6982*01826a49SYabin Cui         }
6983*01826a49SYabin Cui         /* last segment */
6984*01826a49SYabin Cui         XXH3_combine16((char*)secretBuffer + secretSize - 16, XXH128_hashFromCanonical(&scrambler));
6985*01826a49SYabin Cui     }
6986*01826a49SYabin Cui     return XXH_OK;
6987*01826a49SYabin Cui }
6988*01826a49SYabin Cui 
6989*01826a49SYabin Cui /*! @ingroup XXH3_family */
6990*01826a49SYabin Cui XXH_PUBLIC_API void
XXH3_generateSecret_fromSeed(XXH_NOESCAPE void * secretBuffer,XXH64_hash_t seed)6991*01826a49SYabin Cui XXH3_generateSecret_fromSeed(XXH_NOESCAPE void* secretBuffer, XXH64_hash_t seed)
6992*01826a49SYabin Cui {
6993*01826a49SYabin Cui     XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
6994*01826a49SYabin Cui     XXH3_initCustomSecret(secret, seed);
6995*01826a49SYabin Cui     XXH_ASSERT(secretBuffer != NULL);
6996*01826a49SYabin Cui     memcpy(secretBuffer, secret, XXH_SECRET_DEFAULT_SIZE);
6997*01826a49SYabin Cui }
6998*01826a49SYabin Cui 
6999*01826a49SYabin Cui 
7000*01826a49SYabin Cui 
7001*01826a49SYabin Cui /* Pop our optimization override from above */
7002*01826a49SYabin Cui #if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
7003*01826a49SYabin Cui   && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
7004*01826a49SYabin Cui   && defined(__OPTIMIZE__) && XXH_SIZE_OPT <= 0 /* respect -O0 and -Os */
7005*01826a49SYabin Cui #  pragma GCC pop_options
7006*01826a49SYabin Cui #endif
7007*01826a49SYabin Cui 
7008*01826a49SYabin Cui #endif  /* XXH_NO_LONG_LONG */
7009*01826a49SYabin Cui 
7010*01826a49SYabin Cui #endif  /* XXH_NO_XXH3 */
7011*01826a49SYabin Cui 
7012*01826a49SYabin Cui /*!
7013*01826a49SYabin Cui  * @}
7014*01826a49SYabin Cui  */
7015*01826a49SYabin Cui #endif  /* XXH_IMPLEMENTATION */
7016*01826a49SYabin Cui 
7017*01826a49SYabin Cui 
7018*01826a49SYabin Cui #if defined (__cplusplus)
7019*01826a49SYabin Cui } /* extern "C" */
7020*01826a49SYabin Cui #endif
7021