xxhash.c
Go to the documentation of this file.
00001 /*
00002 xxHash - Fast Hash algorithm
00003 Copyright (C) 2012-2014, Yann Collet.
00004 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
00005 
00006 Redistribution and use in source and binary forms, with or without
00007 modification, are permitted provided that the following conditions are
00008 met:
00009 
00010 * Redistributions of source code must retain the above copyright
00011 notice, this list of conditions and the following disclaimer.
00012 * Redistributions in binary form must reproduce the above
00013 copyright notice, this list of conditions and the following disclaimer
00014 in the documentation and/or other materials provided with the
00015 distribution.
00016 
00017 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
00018 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
00019 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
00020 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
00021 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
00022 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
00023 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
00024 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
00025 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
00026 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
00027 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00028 
00029 You can contact the author at :
00030 - xxHash source repository : http://code.google.com/p/xxhash/
00031 */
00032 
00033 
00034 //**************************************
00035 // Tuning parameters
00036 //**************************************
00037 // Unaligned memory access is automatically enabled for "common" CPU, such as x86.
00038 // For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected.
00039 // If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance.
00040 // You can also enable this parameter if you know your input data will always be aligned (boundaries of 4, for U32).
00041 #if defined(__ARM_FEATURE_UNALIGNED) || defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
00042 #  define XXH_USE_UNALIGNED_ACCESS 1
00043 #endif
00044 
00045 // XXH_ACCEPT_NULL_INPUT_POINTER :
00046 // If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
00047 // When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
00048 // This option has a very small performance cost (only measurable on small inputs).
00049 // By default, this option is disabled. To enable it, uncomment below define :
00050 //#define XXH_ACCEPT_NULL_INPUT_POINTER 1
00051 
00052 // XXH_FORCE_NATIVE_FORMAT :
00053 // By default, xxHash library provides endian-independant Hash values, based on little-endian convention.
00054 // Results are therefore identical for little-endian and big-endian CPU.
00055 // This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
00056 // Should endian-independance be of no importance for your application, you may set the #define below to 1.
00057 // It will improve speed for Big-endian CPU.
00058 // This option has no impact on Little_Endian CPU.
00059 #define XXH_FORCE_NATIVE_FORMAT 0
00060 
00061 
00062 //**************************************
00063 // Compiler Specific Options
00064 //**************************************
00065 // Disable some Visual warning messages
00066 #ifdef _MSC_VER  // Visual Studio
00067 #  pragma warning(disable : 4127)      // disable: C4127: conditional expression is constant
00068 #endif
00069 
00070 #ifdef _MSC_VER    // Visual Studio
00071 #  define FORCE_INLINE static __forceinline
00072 #else 
00073 #  ifdef __GNUC__
00074 #    define FORCE_INLINE static inline __attribute__((always_inline))
00075 #  else
00076 #    define FORCE_INLINE static inline
00077 #  endif
00078 #endif
00079 
00080 
00081 //**************************************
00082 // Includes & Memory related functions
00083 //**************************************
00084 #include "xxhash.h"
00085 // Modify the local functions below should you wish to use some other memory related routines
00086 // for malloc(), free()
00087 #include <stdlib.h>
00088 FORCE_INLINE void* XXH_malloc(size_t s) { return malloc(s); }
00089 FORCE_INLINE void  XXH_free  (void* p)  { free(p); }
00090 // for memcpy()
00091 #include <string.h>
00092 FORCE_INLINE void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
00093 
00094 
00095 //**************************************
00096 // Basic Types
00097 //**************************************
00098 #if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   // C99
00099 # include <stdint.h>
00100   typedef uint8_t  BYTE;
00101   typedef uint16_t U16;
00102   typedef uint32_t U32;
00103   typedef  int32_t S32;
00104   typedef uint64_t U64;
00105 #else
00106   typedef unsigned char      BYTE;
00107   typedef unsigned short     U16;
00108   typedef unsigned int       U32;
00109   typedef   signed int       S32;
00110   typedef unsigned long long U64;
00111 #endif
00112 
00113 #if defined(__GNUC__)  && !defined(XXH_USE_UNALIGNED_ACCESS)
00114 #  define _PACKED __attribute__ ((packed))
00115 #else
00116 #  define _PACKED
00117 #endif
00118 
00119 #if !defined(XXH_USE_UNALIGNED_ACCESS) && !defined(__GNUC__)
00120 #  ifdef __IBMC__
00121 #    pragma pack(1)
00122 #  else
00123 #    pragma pack(push, 1)
00124 #  endif
00125 #endif
00126 
00127 typedef struct _U32_S { U32 v; } _PACKED U32_S;
00128 
00129 #if !defined(XXH_USE_UNALIGNED_ACCESS) && !defined(__GNUC__)
00130 #  pragma pack(pop)
00131 #endif
00132 
00133 #define A32(x) (((U32_S *)(x))->v)
00134 
00135 
00136 //***************************************
00137 // Compiler-specific Functions and Macros
00138 //***************************************
00139 #define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
00140 
00141 // Note : although _rotl exists for minGW (GCC under windows), performance seems poor
00142 #if defined(_MSC_VER)
00143 #  define XXH_rotl32(x,r) _rotl(x,r)
00144 #else
00145 #  define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
00146 #endif
00147 
00148 #if defined(_MSC_VER)     // Visual Studio
00149 #  define XXH_swap32 _byteswap_ulong
00150 #elif GCC_VERSION >= 403
00151 #  define XXH_swap32 __builtin_bswap32
00152 #else
00153 static inline U32 XXH_swap32 (U32 x) {
00154     return  ((x << 24) & 0xff000000 ) |
00155         ((x <<  8) & 0x00ff0000 ) |
00156         ((x >>  8) & 0x0000ff00 ) |
00157         ((x >> 24) & 0x000000ff );}
00158 #endif
00159 
00160 
00161 //**************************************
00162 // Constants
00163 //**************************************
00164 #define PRIME32_1   2654435761U
00165 #define PRIME32_2   2246822519U
00166 #define PRIME32_3   3266489917U
00167 #define PRIME32_4    668265263U
00168 #define PRIME32_5    374761393U
00169 
00170 
00171 //**************************************
00172 // Architecture Macros
00173 //**************************************
00174 typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
00175 #ifndef XXH_CPU_LITTLE_ENDIAN   // It is possible to define XXH_CPU_LITTLE_ENDIAN externally, for example using a compiler switch
00176     static const int one = 1;
00177 #   define XXH_CPU_LITTLE_ENDIAN   (*(char*)(&one))
00178 #endif
00179 
00180 
00181 //**************************************
00182 // Macros
00183 //**************************************
00184 #define XXH_STATIC_ASSERT(c)   { enum { XXH_static_assert = 1/(!!(c)) }; }    // use only *after* variable declarations
00185 
00186 
00187 //****************************
00188 // Memory reads
00189 //****************************
00190 typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
00191 
00192 FORCE_INLINE U32 XXH_readLE32_align(const U32* ptr, XXH_endianess endian, XXH_alignment align)
00193 { 
00194     if (align==XXH_unaligned)
00195         return endian==XXH_littleEndian ? A32(ptr) : XXH_swap32(A32(ptr)); 
00196     else
00197         return endian==XXH_littleEndian ? *ptr : XXH_swap32(*ptr); 
00198 }
00199 
00200 FORCE_INLINE U32 XXH_readLE32(const U32* ptr, XXH_endianess endian) { return XXH_readLE32_align(ptr, endian, XXH_unaligned); }
00201 
00202 
00203 //****************************
00204 // Simple Hash Functions
00205 //****************************
00206 FORCE_INLINE U32 XXH32_endian_align(const void* input, int len, U32 seed, XXH_endianess endian, XXH_alignment align)
00207 {
00208     const BYTE* p = (const BYTE*)input;
00209     const BYTE* const bEnd = p + len;
00210     U32 h32;
00211 
00212 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
00213     if (p==NULL) { len=0; p=(const BYTE*)(size_t)16; }
00214 #endif
00215 
00216     if (len>=16)
00217     {
00218         const BYTE* const limit = bEnd - 16;
00219         U32 v1 = seed + PRIME32_1 + PRIME32_2;
00220         U32 v2 = seed + PRIME32_2;
00221         U32 v3 = seed + 0;
00222         U32 v4 = seed - PRIME32_1;
00223 
00224         do
00225         {
00226             v1 += XXH_readLE32_align((const U32*)p, endian, align) * PRIME32_2; v1 = XXH_rotl32(v1, 13); v1 *= PRIME32_1; p+=4;
00227             v2 += XXH_readLE32_align((const U32*)p, endian, align) * PRIME32_2; v2 = XXH_rotl32(v2, 13); v2 *= PRIME32_1; p+=4;
00228             v3 += XXH_readLE32_align((const U32*)p, endian, align) * PRIME32_2; v3 = XXH_rotl32(v3, 13); v3 *= PRIME32_1; p+=4;
00229             v4 += XXH_readLE32_align((const U32*)p, endian, align) * PRIME32_2; v4 = XXH_rotl32(v4, 13); v4 *= PRIME32_1; p+=4;
00230         } while (p<=limit);
00231 
00232         h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
00233     }
00234     else
00235     {
00236         h32  = seed + PRIME32_5;
00237     }
00238 
00239     h32 += (U32) len;
00240 
00241     while (p<=bEnd-4)
00242     {
00243         h32 += XXH_readLE32_align((const U32*)p, endian, align) * PRIME32_3;
00244         h32  = XXH_rotl32(h32, 17) * PRIME32_4 ;
00245         p+=4;
00246     }
00247 
00248     while (p<bEnd)
00249     {
00250         h32 += (*p) * PRIME32_5;
00251         h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
00252         p++;
00253     }
00254 
00255     h32 ^= h32 >> 15;
00256     h32 *= PRIME32_2;
00257     h32 ^= h32 >> 13;
00258     h32 *= PRIME32_3;
00259     h32 ^= h32 >> 16;
00260 
00261     return h32;
00262 }
00263 
00264 
00265 U32 XXH32(const void* input, int len, U32 seed)
00266 {
00267 #if 0
00268     // Simple version, good for code maintenance, but unfortunately slow for small inputs
00269     void* state = XXH32_init(seed);
00270     XXH32_update(state, input, len);
00271     return XXH32_digest(state);
00272 #else
00273     XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
00274 
00275 #  if !defined(XXH_USE_UNALIGNED_ACCESS)
00276     if ((((size_t)input) & 3))   // Input is aligned, let's leverage the speed advantage
00277     {
00278         if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
00279             return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
00280         else
00281             return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
00282     }
00283 #  endif
00284 
00285     if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
00286         return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
00287     else
00288         return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
00289 #endif
00290 }
00291 
00292 
00293 //****************************
00294 // Advanced Hash Functions
00295 //****************************
00296 
00297 struct XXH_state32_t
00298 {
00299     U64 total_len;
00300     U32 seed;
00301     U32 v1;
00302     U32 v2;
00303     U32 v3;
00304     U32 v4;
00305     int memsize;
00306     char memory[16];
00307 };
00308 
00309 
00310 int XXH32_sizeofState() 
00311 {
00312     XXH_STATIC_ASSERT(XXH32_SIZEOFSTATE >= sizeof(struct XXH_state32_t));   // A compilation error here means XXH32_SIZEOFSTATE is not large enough
00313     return sizeof(struct XXH_state32_t); 
00314 }
00315 
00316 
00317 XXH_errorcode XXH32_resetState(void* state_in, U32 seed)
00318 { 
00319     struct XXH_state32_t * state = (struct XXH_state32_t *) state_in;
00320     state->seed = seed;
00321     state->v1 = seed + PRIME32_1 + PRIME32_2;
00322     state->v2 = seed + PRIME32_2;
00323     state->v3 = seed + 0;
00324     state->v4 = seed - PRIME32_1;
00325     state->total_len = 0;
00326     state->memsize = 0;
00327     return XXH_OK;
00328 }
00329 
00330 
00331 void* XXH32_init (U32 seed)
00332 {
00333     void* state = XXH_malloc (sizeof(struct XXH_state32_t));
00334     XXH32_resetState(state, seed);
00335     return state;
00336 }
00337 
00338 
00339 FORCE_INLINE XXH_errorcode XXH32_update_endian (void* state_in, const void* input, int len, XXH_endianess endian)
00340 {
00341     struct XXH_state32_t * state = (struct XXH_state32_t *) state_in;
00342     const BYTE* p = (const BYTE*)input;
00343     const BYTE* const bEnd = p + len;
00344 
00345 #ifdef XXH_ACCEPT_NULL_INPUT_POINTER
00346     if (input==NULL) return XXH_ERROR;
00347 #endif
00348 
00349     state->total_len += len;
00350 
00351     if (state->memsize + len < 16)   // fill in tmp buffer
00352     {
00353         XXH_memcpy(state->memory + state->memsize, input, len);
00354         state->memsize +=  len;
00355         return XXH_OK;
00356     }
00357 
00358     if (state->memsize)   // some data left from previous update
00359     {
00360         XXH_memcpy(state->memory + state->memsize, input, 16-state->memsize);
00361         {
00362             const U32* p32 = (const U32*)state->memory;
00363             state->v1 += XXH_readLE32(p32, endian) * PRIME32_2; state->v1 = XXH_rotl32(state->v1, 13); state->v1 *= PRIME32_1; p32++;
00364             state->v2 += XXH_readLE32(p32, endian) * PRIME32_2; state->v2 = XXH_rotl32(state->v2, 13); state->v2 *= PRIME32_1; p32++; 
00365             state->v3 += XXH_readLE32(p32, endian) * PRIME32_2; state->v3 = XXH_rotl32(state->v3, 13); state->v3 *= PRIME32_1; p32++;
00366             state->v4 += XXH_readLE32(p32, endian) * PRIME32_2; state->v4 = XXH_rotl32(state->v4, 13); state->v4 *= PRIME32_1; p32++;
00367         }
00368         p += 16-state->memsize;
00369         state->memsize = 0;
00370     }
00371 
00372     if (p <= bEnd-16)
00373     {
00374         const BYTE* const limit = bEnd - 16;
00375         U32 v1 = state->v1;
00376         U32 v2 = state->v2;
00377         U32 v3 = state->v3;
00378         U32 v4 = state->v4;
00379 
00380         do
00381         {
00382             v1 += XXH_readLE32((const U32*)p, endian) * PRIME32_2; v1 = XXH_rotl32(v1, 13); v1 *= PRIME32_1; p+=4;
00383             v2 += XXH_readLE32((const U32*)p, endian) * PRIME32_2; v2 = XXH_rotl32(v2, 13); v2 *= PRIME32_1; p+=4;
00384             v3 += XXH_readLE32((const U32*)p, endian) * PRIME32_2; v3 = XXH_rotl32(v3, 13); v3 *= PRIME32_1; p+=4;
00385             v4 += XXH_readLE32((const U32*)p, endian) * PRIME32_2; v4 = XXH_rotl32(v4, 13); v4 *= PRIME32_1; p+=4;
00386         } while (p<=limit);
00387 
00388         state->v1 = v1;
00389         state->v2 = v2;
00390         state->v3 = v3;
00391         state->v4 = v4;
00392     }
00393 
00394     if (p < bEnd)
00395     {
00396         XXH_memcpy(state->memory, p, bEnd-p);
00397         state->memsize = (int)(bEnd-p);
00398     }
00399 
00400     return XXH_OK;
00401 }
00402 
00403 XXH_errorcode XXH32_update (void* state_in, const void* input, int len)
00404 {
00405     XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
00406     
00407     if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
00408         return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
00409     else
00410         return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
00411 }
00412 
00413 
00414 
00415 FORCE_INLINE U32 XXH32_intermediateDigest_endian (void* state_in, XXH_endianess endian)
00416 {
00417     struct XXH_state32_t * state = (struct XXH_state32_t *) state_in;
00418     const BYTE * p = (const BYTE*)state->memory;
00419     BYTE* bEnd = (BYTE*)state->memory + state->memsize;
00420     U32 h32;
00421 
00422     if (state->total_len >= 16)
00423     {
00424         h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
00425     }
00426     else
00427     {
00428         h32  = state->seed + PRIME32_5;
00429     }
00430 
00431     h32 += (U32) state->total_len;
00432 
00433     while (p<=bEnd-4)
00434     {
00435         h32 += XXH_readLE32((const U32*)p, endian) * PRIME32_3;
00436         h32  = XXH_rotl32(h32, 17) * PRIME32_4;
00437         p+=4;
00438     }
00439 
00440     while (p<bEnd)
00441     {
00442         h32 += (*p) * PRIME32_5;
00443         h32 = XXH_rotl32(h32, 11) * PRIME32_1;
00444         p++;
00445     }
00446 
00447     h32 ^= h32 >> 15;
00448     h32 *= PRIME32_2;
00449     h32 ^= h32 >> 13;
00450     h32 *= PRIME32_3;
00451     h32 ^= h32 >> 16;
00452 
00453     return h32;
00454 }
00455 
00456 
00457 U32 XXH32_intermediateDigest (void* state_in)
00458 {
00459     XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
00460     
00461     if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
00462         return XXH32_intermediateDigest_endian(state_in, XXH_littleEndian);
00463     else
00464         return XXH32_intermediateDigest_endian(state_in, XXH_bigEndian);
00465 }
00466 
00467 
00468 U32 XXH32_digest (void* state_in)
00469 {
00470     U32 h32 = XXH32_intermediateDigest(state_in);
00471 
00472     XXH_free(state_in);
00473 
00474     return h32;
00475 }


roslz4
Author(s): Ben Charrow
autogenerated on Thu Jun 6 2019 21:09:51