lz4.c
Go to the documentation of this file.
1 /*
2  LZ4 - Fast LZ compression algorithm
3  Copyright (C) 2011-2017, Yann Collet.
4 
5  BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6 
7  Redistribution and use in source and binary forms, with or without
8  modification, are permitted provided that the following conditions are
9  met:
10 
11  * Redistributions of source code must retain the above copyright
12  notice, this list of conditions and the following disclaimer.
13  * Redistributions in binary form must reproduce the above
14  copyright notice, this list of conditions and the following disclaimer
15  in the documentation and/or other materials provided with the
16  distribution.
17 
18  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 
30  You can contact the author at :
31  - LZ4 homepage : http://www.lz4.org
32  - LZ4 source repository : https://github.com/lz4/lz4
33 */
34 
35 
36 /*-************************************
37 * Tuning parameters
38 **************************************/
39 /*
40  * LZ4_HEAPMODE :
41  * Select how default compression functions will allocate memory for their hash table,
42  * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
43  */
44 #ifndef LZ4_HEAPMODE
45 # define LZ4_HEAPMODE 0
46 #endif
47 
48 /*
49  * ACCELERATION_DEFAULT :
50  * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0
51  */
52 #define ACCELERATION_DEFAULT 1
53 
54 
55 /*-************************************
56 * CPU Feature Detection
57 **************************************/
58 /* LZ4_FORCE_MEMORY_ACCESS
59  * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
60  * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
61  * The below switch allow to select different access method for improved performance.
62  * Method 0 (default) : use `memcpy()`. Safe and portable.
63  * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
64  * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
65  * Method 2 : direct access. This method is portable but violate C standard.
66  * It can generate buggy code on targets which assembly generation depends on alignment.
67  * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
68  * See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
69  * Prefer these methods in priority order (0 > 1 > 2)
70  */
71 #ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */
72 # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
73 # define LZ4_FORCE_MEMORY_ACCESS 2
74 # elif defined(__INTEL_COMPILER) || defined(__GNUC__)
75 # define LZ4_FORCE_MEMORY_ACCESS 1
76 # endif
77 #endif
78 
79 /*
80  * LZ4_FORCE_SW_BITCOUNT
81  * Define this parameter if your target system or compiler does not support hardware bit count
82  */
83 #if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for Windows CE does not support Hardware bit count */
84 # define LZ4_FORCE_SW_BITCOUNT
85 #endif
86 
87 
88 /*-************************************
89 * Dependency
90 **************************************/
91 #include "lz4.h"
92 /* see also "memory routines" below */
93 
94 
95 /*-************************************
96 * Compiler Options
97 **************************************/
98 #ifdef _MSC_VER /* Visual Studio */
99 # define FORCE_INLINE static __forceinline
100 # include <intrin.h>
101 # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
102 # pragma warning(disable : 4293) /* disable: C4293: too large shift (32-bits) */
103 #else
104 # if defined(__GNUC__) || defined(__clang__)
105 # define FORCE_INLINE static inline __attribute__((always_inline))
106 # elif defined(__cplusplus) || (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
107 # define FORCE_INLINE static inline
108 # else
109 # define FORCE_INLINE static
110 # endif
111 #endif /* _MSC_VER */
112 
113 #if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)
114 # define expect(expr,value) (__builtin_expect ((expr),(value)) )
115 #else
116 # define expect(expr,value) (expr)
117 #endif
118 
119 #define likely(expr) expect((expr) != 0, 1)
120 #define unlikely(expr) expect((expr) != 0, 0)
121 
122 
123 /*-************************************
124 * Memory routines
125 **************************************/
126 #include <stdlib.h> /* malloc, calloc, free */
127 #define ALLOCATOR(n,s) calloc(n,s)
128 #define FREEMEM free
129 #include <string.h> /* memset, memcpy */
130 #define MEM_INIT memset
131 
132 
133 /*-************************************
134 * Basic Types
135 **************************************/
136 #if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
137 # include <stdint.h>
138  typedef uint8_t BYTE;
139  typedef uint16_t U16;
140  typedef uint32_t U32;
141  typedef int32_t S32;
142  typedef uint64_t U64;
143  typedef uintptr_t uptrval;
144 #else
145  typedef unsigned char BYTE;
146  typedef unsigned short U16;
147  typedef unsigned int U32;
148  typedef signed int S32;
149  typedef unsigned long long U64;
150  typedef size_t uptrval; /* generally true, except OpenVMS-64 */
151 #endif
152 
153 #if defined(__x86_64__)
154  typedef U64 reg_t; /* 64-bits in x32 mode */
155 #else
156  typedef size_t reg_t; /* 32-bits in x32 mode */
157 #endif
158 
159 /*-************************************
160 * Reading and writing into memory
161 **************************************/
162 static unsigned LZ4_isLittleEndian(void)
163 {
164  const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
165  return one.c[0];
166 }
167 
168 
169 #if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2)
170 /* lie to the compiler about data alignment; use with caution */
171 
172 static U16 LZ4_read16(const void* memPtr) { return *(const U16*) memPtr; }
173 static U32 LZ4_read32(const void* memPtr) { return *(const U32*) memPtr; }
174 static reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*) memPtr; }
175 
176 static void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
177 static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
178 
179 #elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1)
180 
181 /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
182 /* currently only defined for gcc and icc */
183 typedef union { U16 u16; U32 u32; reg_t uArch; } __attribute__((packed)) unalign;
184 
185 static U16 LZ4_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
186 static U32 LZ4_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
187 static reg_t LZ4_read_ARCH(const void* ptr) { return ((const unalign*)ptr)->uArch; }
188 
189 static void LZ4_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
190 static void LZ4_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; }
191 
192 #else /* safe and portable access through memcpy() */
193 
194 static U16 LZ4_read16(const void* memPtr)
195 {
196  U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
197 }
198 
199 static U32 LZ4_read32(const void* memPtr)
200 {
201  U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
202 }
203 
204 static reg_t LZ4_read_ARCH(const void* memPtr)
205 {
206  reg_t val; memcpy(&val, memPtr, sizeof(val)); return val;
207 }
208 
209 static void LZ4_write16(void* memPtr, U16 value)
210 {
211  memcpy(memPtr, &value, sizeof(value));
212 }
213 
214 static void LZ4_write32(void* memPtr, U32 value)
215 {
216  memcpy(memPtr, &value, sizeof(value));
217 }
218 
219 #endif /* LZ4_FORCE_MEMORY_ACCESS */
220 
221 
222 static U16 LZ4_readLE16(const void* memPtr)
223 {
224  if (LZ4_isLittleEndian()) {
225  return LZ4_read16(memPtr);
226  } else {
227  const BYTE* p = (const BYTE*)memPtr;
228  return (U16)((U16)p[0] + (p[1]<<8));
229  }
230 }
231 
232 static void LZ4_writeLE16(void* memPtr, U16 value)
233 {
234  if (LZ4_isLittleEndian()) {
235  LZ4_write16(memPtr, value);
236  } else {
237  BYTE* p = (BYTE*)memPtr;
238  p[0] = (BYTE) value;
239  p[1] = (BYTE)(value>>8);
240  }
241 }
242 
243 static void LZ4_copy8(void* dst, const void* src)
244 {
245  memcpy(dst,src,8);
246 }
247 
248 /* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */
249 static void LZ4_wildCopy(void* dstPtr, const void* srcPtr, void* dstEnd)
250 {
251  BYTE* d = (BYTE*)dstPtr;
252  const BYTE* s = (const BYTE*)srcPtr;
253  BYTE* const e = (BYTE*)dstEnd;
254 
255  do { LZ4_copy8(d,s); d+=8; s+=8; } while (d<e);
256 }
257 
258 
259 /*-************************************
260 * Common Constants
261 **************************************/
262 #define MINMATCH 4
263 
264 #define WILDCOPYLENGTH 8
265 #define LASTLITERALS 5
266 #define MFLIMIT (WILDCOPYLENGTH+MINMATCH)
267 static const int LZ4_minLength = (MFLIMIT+1);
268 
269 #define KB *(1 <<10)
270 #define MB *(1 <<20)
271 #define GB *(1U<<30)
272 
273 #define MAXD_LOG 16
274 #define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
275 
276 #define ML_BITS 4
277 #define ML_MASK ((1U<<ML_BITS)-1)
278 #define RUN_BITS (8-ML_BITS)
279 #define RUN_MASK ((1U<<RUN_BITS)-1)
280 
281 
282 /*-************************************
283 * Error detection
284 **************************************/
285 #define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
286 
287 #if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2)
288 # include <stdio.h>
289 # define DEBUGLOG(l, ...) { \
290  if (l<=LZ4_DEBUG) { \
291  fprintf(stderr, __FILE__ ": "); \
292  fprintf(stderr, __VA_ARGS__); \
293  fprintf(stderr, " \n"); \
294  } }
295 #else
296 # define DEBUGLOG(l, ...) {} /* disabled */
297 #endif
298 
299 
300 /*-************************************
301 * Common functions
302 **************************************/
303 static unsigned LZ4_NbCommonBytes (register reg_t val)
304 {
305  if (LZ4_isLittleEndian()) {
306  if (sizeof(val)==8) {
307 # if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
308  unsigned long r = 0;
309  _BitScanForward64( &r, (U64)val );
310  return (int)(r>>3);
311 # elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
312  return (__builtin_ctzll((U64)val) >> 3);
313 # else
314  static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
315  return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
316 # endif
317  } else /* 32 bits */ {
318 # if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
319  unsigned long r;
320  _BitScanForward( &r, (U32)val );
321  return (int)(r>>3);
322 # elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
323  return (__builtin_ctz((U32)val) >> 3);
324 # else
325  static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
326  return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
327 # endif
328  }
329  } else /* Big Endian CPU */ {
330  if (sizeof(val)==8) {
331 # if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
332  unsigned long r = 0;
333  _BitScanReverse64( &r, val );
334  return (unsigned)(r>>3);
335 # elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
336  return (__builtin_clzll((U64)val) >> 3);
337 # else
338  unsigned r;
339  if (!(val>>32)) { r=4; } else { r=0; val>>=32; }
340  if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
341  r += (!val);
342  return r;
343 # endif
344  } else /* 32 bits */ {
345 # if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
346  unsigned long r = 0;
347  _BitScanReverse( &r, (unsigned long)val );
348  return (unsigned)(r>>3);
349 # elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT)
350  return (__builtin_clz((U32)val) >> 3);
351 # else
352  unsigned r;
353  if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
354  r += (!val);
355  return r;
356 # endif
357  }
358  }
359 }
360 
361 #define STEPSIZE sizeof(reg_t)
362 static unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)
363 {
364  const BYTE* const pStart = pIn;
365 
366  while (likely(pIn<pInLimit-(STEPSIZE-1))) {
367  reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
368  if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }
369  pIn += LZ4_NbCommonBytes(diff);
370  return (unsigned)(pIn - pStart);
371  }
372 
373  if ((STEPSIZE==8) && (pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; }
374  if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }
375  if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
376  return (unsigned)(pIn - pStart);
377 }
378 
379 
380 #ifndef LZ4_COMMONDEFS_ONLY
381 /*-************************************
382 * Local Constants
383 **************************************/
384 static const int LZ4_64Klimit = ((64 KB) + (MFLIMIT-1));
385 static const U32 LZ4_skipTrigger = 6; /* Increase this value ==> compression run slower on incompressible data */
386 
387 
388 /*-************************************
389 * Local Structures and types
390 **************************************/
392 typedef enum { byPtr, byU32, byU16 } tableType_t;
393 
396 
398 typedef enum { full = 0, partial = 1 } earlyEnd_directive;
399 
400 
401 /*-************************************
402 * Local Utils
403 **************************************/
404 int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; }
405 const char* LZ4_versionString(void) { return LZ4_VERSION_STRING; }
406 int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }
408 
409 
410 /*-******************************
411 * Compression functions
412 ********************************/
413 static U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
414 {
415  if (tableType == byU16)
416  return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
417  else
418  return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
419 }
420 
421 static U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
422 {
423  static const U64 prime5bytes = 889523592379ULL;
424  static const U64 prime8bytes = 11400714785074694791ULL;
425  const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG;
426  if (LZ4_isLittleEndian())
427  return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
428  else
429  return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
430 }
431 
432 FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType)
433 {
434  if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType);
435  return LZ4_hash4(LZ4_read32(p), tableType);
436 }
437 
438 static void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, tableType_t const tableType, const BYTE* srcBase)
439 {
440  switch (tableType)
441  {
442  case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; }
443  case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; }
444  case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; }
445  }
446 }
447 
448 FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
449 {
450  U32 const h = LZ4_hashPosition(p, tableType);
451  LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
452 }
453 
454 static const BYTE* LZ4_getPositionOnHash(U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase)
455 {
456  if (tableType == byPtr) { const BYTE** hashTable = (const BYTE**) tableBase; return hashTable[h]; }
457  if (tableType == byU32) { const U32* const hashTable = (U32*) tableBase; return hashTable[h] + srcBase; }
458  { const U16* const hashTable = (U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */
459 }
460 
461 FORCE_INLINE const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
462 {
463  U32 const h = LZ4_hashPosition(p, tableType);
464  return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
465 }
466 
467 
471  LZ4_stream_t_internal* const cctx,
472  const char* const source,
473  char* const dest,
474  const int inputSize,
475  const int maxOutputSize,
476  const limitedOutput_directive outputLimited,
477  const tableType_t tableType,
478  const dict_directive dict,
479  const dictIssue_directive dictIssue,
480  const U32 acceleration)
481 {
482  const BYTE* ip = (const BYTE*) source;
483  const BYTE* base;
484  const BYTE* lowLimit;
485  const BYTE* const lowRefLimit = ip - cctx->dictSize;
486  const BYTE* const dictionary = cctx->dictionary;
487  const BYTE* const dictEnd = dictionary + cctx->dictSize;
488  const ptrdiff_t dictDelta = dictEnd - (const BYTE*)source;
489  const BYTE* anchor = (const BYTE*) source;
490  const BYTE* const iend = ip + inputSize;
491  const BYTE* const mflimit = iend - MFLIMIT;
492  const BYTE* const matchlimit = iend - LASTLITERALS;
493 
494  BYTE* op = (BYTE*) dest;
495  BYTE* const olimit = op + maxOutputSize;
496 
497  U32 forwardH;
498 
499  /* Init conditions */
500  if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported inputSize, too large (or negative) */
501  switch(dict)
502  {
503  case noDict:
504  default:
505  base = (const BYTE*)source;
506  lowLimit = (const BYTE*)source;
507  break;
508  case withPrefix64k:
509  base = (const BYTE*)source - cctx->currentOffset;
510  lowLimit = (const BYTE*)source - cctx->dictSize;
511  break;
512  case usingExtDict:
513  base = (const BYTE*)source - cctx->currentOffset;
514  lowLimit = (const BYTE*)source;
515  break;
516  }
517  if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) return 0; /* Size too large (not within 64K limit) */
518  if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
519 
520  /* First Byte */
521  LZ4_putPosition(ip, cctx->hashTable, tableType, base);
522  ip++; forwardH = LZ4_hashPosition(ip, tableType);
523 
524  /* Main Loop */
525  for ( ; ; ) {
526  ptrdiff_t refDelta = 0;
527  const BYTE* match;
528  BYTE* token;
529 
530  /* Find a match */
531  { const BYTE* forwardIp = ip;
532  unsigned step = 1;
533  unsigned searchMatchNb = acceleration << LZ4_skipTrigger;
534  do {
535  U32 const h = forwardH;
536  ip = forwardIp;
537  forwardIp += step;
538  step = (searchMatchNb++ >> LZ4_skipTrigger);
539 
540  if (unlikely(forwardIp > mflimit)) goto _last_literals;
541 
542  match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base);
543  if (dict==usingExtDict) {
544  if (match < (const BYTE*)source) {
545  refDelta = dictDelta;
546  lowLimit = dictionary;
547  } else {
548  refDelta = 0;
549  lowLimit = (const BYTE*)source;
550  } }
551  forwardH = LZ4_hashPosition(forwardIp, tableType);
552  LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base);
553 
554  } while ( ((dictIssue==dictSmall) ? (match < lowRefLimit) : 0)
555  || ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip))
556  || (LZ4_read32(match+refDelta) != LZ4_read32(ip)) );
557  }
558 
559  /* Catch up */
560  while (((ip>anchor) & (match+refDelta > lowLimit)) && (unlikely(ip[-1]==match[refDelta-1]))) { ip--; match--; }
561 
562  /* Encode Literals */
563  { unsigned const litLength = (unsigned)(ip - anchor);
564  token = op++;
565  if ((outputLimited) && /* Check output buffer overflow */
566  (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)))
567  return 0;
568  if (litLength >= RUN_MASK) {
569  int len = (int)litLength-RUN_MASK;
570  *token = (RUN_MASK<<ML_BITS);
571  for(; len >= 255 ; len-=255) *op++ = 255;
572  *op++ = (BYTE)len;
573  }
574  else *token = (BYTE)(litLength<<ML_BITS);
575 
576  /* Copy Literals */
577  LZ4_wildCopy(op, anchor, op+litLength);
578  op+=litLength;
579  }
580 
581 _next_match:
582  /* Encode Offset */
583  LZ4_writeLE16(op, (U16)(ip-match)); op+=2;
584 
585  /* Encode MatchLength */
586  { unsigned matchCode;
587 
588  if ((dict==usingExtDict) && (lowLimit==dictionary)) {
589  const BYTE* limit;
590  match += refDelta;
591  limit = ip + (dictEnd-match);
592  if (limit > matchlimit) limit = matchlimit;
593  matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit);
594  ip += MINMATCH + matchCode;
595  if (ip==limit) {
596  unsigned const more = LZ4_count(ip, (const BYTE*)source, matchlimit);
597  matchCode += more;
598  ip += more;
599  }
600  } else {
601  matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
602  ip += MINMATCH + matchCode;
603  }
604 
605  if ( outputLimited && /* Check output buffer overflow */
606  (unlikely(op + (1 + LASTLITERALS) + (matchCode>>8) > olimit)) )
607  return 0;
608  if (matchCode >= ML_MASK) {
609  *token += ML_MASK;
610  matchCode -= ML_MASK;
611  LZ4_write32(op, 0xFFFFFFFF);
612  while (matchCode >= 4*255) op+=4, LZ4_write32(op, 0xFFFFFFFF), matchCode -= 4*255;
613  op += matchCode / 255;
614  *op++ = (BYTE)(matchCode % 255);
615  } else
616  *token += (BYTE)(matchCode);
617  }
618 
619  anchor = ip;
620 
621  /* Test end of chunk */
622  if (ip > mflimit) break;
623 
624  /* Fill table */
625  LZ4_putPosition(ip-2, cctx->hashTable, tableType, base);
626 
627  /* Test next position */
628  match = LZ4_getPosition(ip, cctx->hashTable, tableType, base);
629  if (dict==usingExtDict) {
630  if (match < (const BYTE*)source) {
631  refDelta = dictDelta;
632  lowLimit = dictionary;
633  } else {
634  refDelta = 0;
635  lowLimit = (const BYTE*)source;
636  } }
637  LZ4_putPosition(ip, cctx->hashTable, tableType, base);
638  if ( ((dictIssue==dictSmall) ? (match>=lowRefLimit) : 1)
639  && (match+MAX_DISTANCE>=ip)
640  && (LZ4_read32(match+refDelta)==LZ4_read32(ip)) )
641  { token=op++; *token=0; goto _next_match; }
642 
643  /* Prepare next loop */
644  forwardH = LZ4_hashPosition(++ip, tableType);
645  }
646 
647 _last_literals:
648  /* Encode Last Literals */
649  { size_t const lastRun = (size_t)(iend - anchor);
650  if ( (outputLimited) && /* Check output buffer overflow */
651  ((op - (BYTE*)dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize) )
652  return 0;
653  if (lastRun >= RUN_MASK) {
654  size_t accumulator = lastRun - RUN_MASK;
655  *op++ = RUN_MASK << ML_BITS;
656  for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
657  *op++ = (BYTE) accumulator;
658  } else {
659  *op++ = (BYTE)(lastRun<<ML_BITS);
660  }
661  memcpy(op, anchor, lastRun);
662  op += lastRun;
663  }
664 
665  /* End */
666  return (int) (((char*)op)-dest);
667 }
668 
669 
670 int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
671 {
672  LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse;
673  LZ4_resetStream((LZ4_stream_t*)state);
674  if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
675 
676  if (maxOutputSize >= LZ4_compressBound(inputSize)) {
677  if (inputSize < LZ4_64Klimit)
678  return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited, byU16, noDict, noDictIssue, acceleration);
679  else
680  return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue, acceleration);
681  } else {
682  if (inputSize < LZ4_64Klimit)
683  return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
684  else
685  return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue, acceleration);
686  }
687 }
688 
689 
690 int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
691 {
692 #if (LZ4_HEAPMODE)
693  void* ctxPtr = ALLOCATOR(1, sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
694 #else
696  void* const ctxPtr = &ctx;
697 #endif
698 
699  int const result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration);
700 
701 #if (LZ4_HEAPMODE)
702  FREEMEM(ctxPtr);
703 #endif
704  return result;
705 }
706 
707 
708 int LZ4_compress_default(const char* source, char* dest, int inputSize, int maxOutputSize)
709 {
710  return LZ4_compress_fast(source, dest, inputSize, maxOutputSize, 1);
711 }
712 
713 
714 /* hidden debug function */
715 /* strangely enough, gcc generates faster code when this function is uncommented, even if unused */
716 int LZ4_compress_fast_force(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
717 {
719  LZ4_resetStream(&ctx);
720 
721  if (inputSize < LZ4_64Klimit)
722  return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
723  else
724  return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, maxOutputSize, limitedOutput, sizeof(void*)==8 ? byU32 : byPtr, noDict, noDictIssue, acceleration);
725 }
726 
727 
728 /*-******************************
729 * *_destSize() variant
730 ********************************/
731 
733  LZ4_stream_t_internal* const ctx,
734  const char* const src,
735  char* const dst,
736  int* const srcSizePtr,
737  const int targetDstSize,
738  const tableType_t tableType)
739 {
740  const BYTE* ip = (const BYTE*) src;
741  const BYTE* base = (const BYTE*) src;
742  const BYTE* lowLimit = (const BYTE*) src;
743  const BYTE* anchor = ip;
744  const BYTE* const iend = ip + *srcSizePtr;
745  const BYTE* const mflimit = iend - MFLIMIT;
746  const BYTE* const matchlimit = iend - LASTLITERALS;
747 
748  BYTE* op = (BYTE*) dst;
749  BYTE* const oend = op + targetDstSize;
750  BYTE* const oMaxLit = op + targetDstSize - 2 /* offset */ - 8 /* because 8+MINMATCH==MFLIMIT */ - 1 /* token */;
751  BYTE* const oMaxMatch = op + targetDstSize - (LASTLITERALS + 1 /* token */);
752  BYTE* const oMaxSeq = oMaxLit - 1 /* token */;
753 
754  U32 forwardH;
755 
756 
757  /* Init conditions */
758  if (targetDstSize < 1) return 0; /* Impossible to store anything */
759  if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size, too large (or negative) */
760  if ((tableType == byU16) && (*srcSizePtr>=LZ4_64Klimit)) return 0; /* Size too large (not within 64K limit) */
761  if (*srcSizePtr<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
762 
763  /* First Byte */
764  *srcSizePtr = 0;
765  LZ4_putPosition(ip, ctx->hashTable, tableType, base);
766  ip++; forwardH = LZ4_hashPosition(ip, tableType);
767 
768  /* Main Loop */
769  for ( ; ; ) {
770  const BYTE* match;
771  BYTE* token;
772 
773  /* Find a match */
774  { const BYTE* forwardIp = ip;
775  unsigned step = 1;
776  unsigned searchMatchNb = 1 << LZ4_skipTrigger;
777 
778  do {
779  U32 h = forwardH;
780  ip = forwardIp;
781  forwardIp += step;
782  step = (searchMatchNb++ >> LZ4_skipTrigger);
783 
784  if (unlikely(forwardIp > mflimit)) goto _last_literals;
785 
786  match = LZ4_getPositionOnHash(h, ctx->hashTable, tableType, base);
787  forwardH = LZ4_hashPosition(forwardIp, tableType);
788  LZ4_putPositionOnHash(ip, h, ctx->hashTable, tableType, base);
789 
790  } while ( ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip))
791  || (LZ4_read32(match) != LZ4_read32(ip)) );
792  }
793 
794  /* Catch up */
795  while ((ip>anchor) && (match > lowLimit) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; }
796 
797  /* Encode Literal length */
798  { unsigned litLength = (unsigned)(ip - anchor);
799  token = op++;
800  if (op + ((litLength+240)/255) + litLength > oMaxLit) {
801  /* Not enough space for a last match */
802  op--;
803  goto _last_literals;
804  }
805  if (litLength>=RUN_MASK) {
806  unsigned len = litLength - RUN_MASK;
807  *token=(RUN_MASK<<ML_BITS);
808  for(; len >= 255 ; len-=255) *op++ = 255;
809  *op++ = (BYTE)len;
810  }
811  else *token = (BYTE)(litLength<<ML_BITS);
812 
813  /* Copy Literals */
814  LZ4_wildCopy(op, anchor, op+litLength);
815  op += litLength;
816  }
817 
818 _next_match:
819  /* Encode Offset */
820  LZ4_writeLE16(op, (U16)(ip-match)); op+=2;
821 
822  /* Encode MatchLength */
823  { size_t matchLength = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
824 
825  if (op + ((matchLength+240)/255) > oMaxMatch) {
826  /* Match description too long : reduce it */
827  matchLength = (15-1) + (oMaxMatch-op) * 255;
828  }
829  ip += MINMATCH + matchLength;
830 
831  if (matchLength>=ML_MASK) {
832  *token += ML_MASK;
833  matchLength -= ML_MASK;
834  while (matchLength >= 255) { matchLength-=255; *op++ = 255; }
835  *op++ = (BYTE)matchLength;
836  }
837  else *token += (BYTE)(matchLength);
838  }
839 
840  anchor = ip;
841 
842  /* Test end of block */
843  if (ip > mflimit) break;
844  if (op > oMaxSeq) break;
845 
846  /* Fill table */
847  LZ4_putPosition(ip-2, ctx->hashTable, tableType, base);
848 
849  /* Test next position */
850  match = LZ4_getPosition(ip, ctx->hashTable, tableType, base);
851  LZ4_putPosition(ip, ctx->hashTable, tableType, base);
852  if ( (match+MAX_DISTANCE>=ip)
853  && (LZ4_read32(match)==LZ4_read32(ip)) )
854  { token=op++; *token=0; goto _next_match; }
855 
856  /* Prepare next loop */
857  forwardH = LZ4_hashPosition(++ip, tableType);
858  }
859 
860 _last_literals:
861  /* Encode Last Literals */
862  { size_t lastRunSize = (size_t)(iend - anchor);
863  if (op + 1 /* token */ + ((lastRunSize+240)/255) /* litLength */ + lastRunSize /* literals */ > oend) {
864  /* adapt lastRunSize to fill 'dst' */
865  lastRunSize = (oend-op) - 1;
866  lastRunSize -= (lastRunSize+240)/255;
867  }
868  ip = anchor + lastRunSize;
869 
870  if (lastRunSize >= RUN_MASK) {
871  size_t accumulator = lastRunSize - RUN_MASK;
872  *op++ = RUN_MASK << ML_BITS;
873  for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
874  *op++ = (BYTE) accumulator;
875  } else {
876  *op++ = (BYTE)(lastRunSize<<ML_BITS);
877  }
878  memcpy(op, anchor, lastRunSize);
879  op += lastRunSize;
880  }
881 
882  /* End */
883  *srcSizePtr = (int) (((const char*)ip)-src);
884  return (int) (((char*)op)-dst);
885 }
886 
887 
888 static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize)
889 {
890  LZ4_resetStream(state);
891 
892  if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */
893  return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1);
894  } else {
895  if (*srcSizePtr < LZ4_64Klimit)
896  return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, byU16);
897  else
898  return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, sizeof(void*)==8 ? byU32 : byPtr);
899  }
900 }
901 
902 
903 int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)
904 {
905 #if (LZ4_HEAPMODE)
906  LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOCATOR(1, sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
907 #else
908  LZ4_stream_t ctxBody;
909  LZ4_stream_t* ctx = &ctxBody;
910 #endif
911 
912  int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize);
913 
914 #if (LZ4_HEAPMODE)
915  FREEMEM(ctx);
916 #endif
917  return result;
918 }
919 
920 
921 
922 /*-******************************
923 * Streaming functions
924 ********************************/
925 
927 {
929  LZ4_STATIC_ASSERT(LZ4_STREAMSIZE >= sizeof(LZ4_stream_t_internal)); /* A compilation error here means LZ4_STREAMSIZE is not large enough */
930  LZ4_resetStream(lz4s);
931  return lz4s;
932 }
933 
934 void LZ4_resetStream (LZ4_stream_t* LZ4_stream)
935 {
936  MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t));
937 }
938 
939 int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
940 {
941  FREEMEM(LZ4_stream);
942  return (0);
943 }
944 
945 
946 #define HASH_UNIT sizeof(reg_t)
947 int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
948 {
949  LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse;
950  const BYTE* p = (const BYTE*)dictionary;
951  const BYTE* const dictEnd = p + dictSize;
952  const BYTE* base;
953 
954  if ((dict->initCheck) || (dict->currentOffset > 1 GB)) /* Uninitialized structure, or reuse overflow */
955  LZ4_resetStream(LZ4_dict);
956 
957  if (dictSize < (int)HASH_UNIT) {
958  dict->dictionary = NULL;
959  dict->dictSize = 0;
960  return 0;
961  }
962 
963  if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;
964  dict->currentOffset += 64 KB;
965  base = p - dict->currentOffset;
966  dict->dictionary = p;
967  dict->dictSize = (U32)(dictEnd - p);
968  dict->currentOffset += dict->dictSize;
969 
970  while (p <= dictEnd-HASH_UNIT) {
971  LZ4_putPosition(p, dict->hashTable, byU32, base);
972  p+=3;
973  }
974 
975  return dict->dictSize;
976 }
977 
978 
979 static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, const BYTE* src)
980 {
981  if ((LZ4_dict->currentOffset > 0x80000000) ||
982  ((uptrval)LZ4_dict->currentOffset > (uptrval)src)) { /* address space overflow */
983  /* rescale hash table */
984  U32 const delta = LZ4_dict->currentOffset - 64 KB;
985  const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
986  int i;
987  for (i=0; i<LZ4_HASH_SIZE_U32; i++) {
988  if (LZ4_dict->hashTable[i] < delta) LZ4_dict->hashTable[i]=0;
989  else LZ4_dict->hashTable[i] -= delta;
990  }
991  LZ4_dict->currentOffset = 64 KB;
992  if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB;
993  LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
994  }
995 }
996 
997 
998 int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
999 {
1000  LZ4_stream_t_internal* streamPtr = &LZ4_stream->internal_donotuse;
1001  const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
1002 
1003  const BYTE* smallest = (const BYTE*) source;
1004  if (streamPtr->initCheck) return 0; /* Uninitialized structure detected */
1005  if ((streamPtr->dictSize>0) && (smallest>dictEnd)) smallest = dictEnd;
1006  LZ4_renormDictT(streamPtr, smallest);
1007  if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
1008 
1009  /* Check overlapping input/dictionary space */
1010  { const BYTE* sourceEnd = (const BYTE*) source + inputSize;
1011  if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd)) {
1012  streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
1013  if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB;
1014  if (streamPtr->dictSize < 4) streamPtr->dictSize = 0;
1015  streamPtr->dictionary = dictEnd - streamPtr->dictSize;
1016  }
1017  }
1018 
1019  /* prefix mode : source data follows dictionary */
1020  if (dictEnd == (const BYTE*)source) {
1021  int result;
1022  if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
1023  result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, withPrefix64k, dictSmall, acceleration);
1024  else
1025  result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, withPrefix64k, noDictIssue, acceleration);
1026  streamPtr->dictSize += (U32)inputSize;
1027  streamPtr->currentOffset += (U32)inputSize;
1028  return result;
1029  }
1030 
1031  /* external dictionary mode */
1032  { int result;
1033  if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
1034  result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, usingExtDict, dictSmall, acceleration);
1035  else
1036  result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, usingExtDict, noDictIssue, acceleration);
1037  streamPtr->dictionary = (const BYTE*)source;
1038  streamPtr->dictSize = (U32)inputSize;
1039  streamPtr->currentOffset += (U32)inputSize;
1040  return result;
1041  }
1042 }
1043 
1044 
1045 /* Hidden debug function, to force external dictionary mode */
1046 int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int inputSize)
1047 {
1048  LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse;
1049  int result;
1050  const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
1051 
1052  const BYTE* smallest = dictEnd;
1053  if (smallest > (const BYTE*) source) smallest = (const BYTE*) source;
1054  LZ4_renormDictT(streamPtr, smallest);
1055 
1056  result = LZ4_compress_generic(streamPtr, source, dest, inputSize, 0, notLimited, byU32, usingExtDict, noDictIssue, 1);
1057 
1058  streamPtr->dictionary = (const BYTE*)source;
1059  streamPtr->dictSize = (U32)inputSize;
1060  streamPtr->currentOffset += (U32)inputSize;
1061 
1062  return result;
1063 }
1064 
1065 
1073 int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)
1074 {
1075  LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;
1076  const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize;
1077 
1078  if ((U32)dictSize > 64 KB) dictSize = 64 KB; /* useless to define a dictionary > 64 KB */
1079  if ((U32)dictSize > dict->dictSize) dictSize = dict->dictSize;
1080 
1081  memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
1082 
1083  dict->dictionary = (const BYTE*)safeBuffer;
1084  dict->dictSize = (U32)dictSize;
1085 
1086  return dictSize;
1087 }
1088 
1089 
1090 
1091 /*-*****************************
1092 * Decompression functions
1093 *******************************/
1101  const char* const source,
1102  char* const dest,
1103  int inputSize,
1104  int outputSize, /* If endOnInput==endOnInputSize, this value is the max size of Output Buffer. */
1105 
1106  int endOnInput, /* endOnOutputSize, endOnInputSize */
1107  int partialDecoding, /* full, partial */
1108  int targetOutputSize, /* only used if partialDecoding==partial */
1109  int dict, /* noDict, withPrefix64k, usingExtDict */
1110  const BYTE* const lowPrefix, /* == dest when no prefix */
1111  const BYTE* const dictStart, /* only if dict==usingExtDict */
1112  const size_t dictSize /* note : = 0 if noDict */
1113  )
1114 {
1115  /* Local Variables */
1116  const BYTE* ip = (const BYTE*) source;
1117  const BYTE* const iend = ip + inputSize;
1118 
1119  BYTE* op = (BYTE*) dest;
1120  BYTE* const oend = op + outputSize;
1121  BYTE* cpy;
1122  BYTE* oexit = op + targetOutputSize;
1123  const BYTE* const lowLimit = lowPrefix - dictSize;
1124 
1125  const BYTE* const dictEnd = (const BYTE*)dictStart + dictSize;
1126  const unsigned dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4};
1127  const int dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3};
1128 
1129  const int safeDecode = (endOnInput==endOnInputSize);
1130  const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB)));
1131 
1132 
1133  /* Special cases */
1134  if ((partialDecoding) && (oexit > oend-MFLIMIT)) oexit = oend-MFLIMIT; /* targetOutputSize too high => decode everything */
1135  if ((endOnInput) && (unlikely(outputSize==0))) return ((inputSize==1) && (*ip==0)) ? 0 : -1; /* Empty output buffer */
1136  if ((!endOnInput) && (unlikely(outputSize==0))) return (*ip==0?1:-1);
1137 
1138  /* Main Loop : decode sequences */
1139  while (1) {
1140  size_t length;
1141  const BYTE* match;
1142  size_t offset;
1143 
1144  /* get literal length */
1145  unsigned const token = *ip++;
1146  if ((length=(token>>ML_BITS)) == RUN_MASK) {
1147  unsigned s;
1148  do {
1149  s = *ip++;
1150  length += s;
1151  } while ( likely(endOnInput ? ip<iend-RUN_MASK : 1) & (s==255) );
1152  if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) goto _output_error; /* overflow detection */
1153  if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) goto _output_error; /* overflow detection */
1154  }
1155 
1156  /* copy literals */
1157  cpy = op+length;
1158  if ( ((endOnInput) && ((cpy>(partialDecoding?oexit:oend-MFLIMIT)) || (ip+length>iend-(2+1+LASTLITERALS))) )
1159  || ((!endOnInput) && (cpy>oend-WILDCOPYLENGTH)) )
1160  {
1161  if (partialDecoding) {
1162  if (cpy > oend) goto _output_error; /* Error : write attempt beyond end of output buffer */
1163  if ((endOnInput) && (ip+length > iend)) goto _output_error; /* Error : read attempt beyond end of input buffer */
1164  } else {
1165  if ((!endOnInput) && (cpy != oend)) goto _output_error; /* Error : block decoding must stop exactly there */
1166  if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) goto _output_error; /* Error : input must be consumed */
1167  }
1168  memcpy(op, ip, length);
1169  ip += length;
1170  op += length;
1171  break; /* Necessarily EOF, due to parsing restrictions */
1172  }
1173  LZ4_wildCopy(op, ip, cpy);
1174  ip += length; op = cpy;
1175 
1176  /* get offset */
1177  offset = LZ4_readLE16(ip); ip+=2;
1178  match = op - offset;
1179  if ((checkOffset) && (unlikely(match < lowLimit))) goto _output_error; /* Error : offset outside buffers */
1180  LZ4_write32(op, (U32)offset); /* costs ~1%; silence an msan warning when offset==0 */
1181 
1182  /* get matchlength */
1183  length = token & ML_MASK;
1184  if (length == ML_MASK) {
1185  unsigned s;
1186  do {
1187  s = *ip++;
1188  if ((endOnInput) && (ip > iend-LASTLITERALS)) goto _output_error;
1189  length += s;
1190  } while (s==255);
1191  if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error; /* overflow detection */
1192  }
1193  length += MINMATCH;
1194 
1195  /* check external dictionary */
1196  if ((dict==usingExtDict) && (match < lowPrefix)) {
1197  if (unlikely(op+length > oend-LASTLITERALS)) goto _output_error; /* doesn't respect parsing restriction */
1198 
1199  if (length <= (size_t)(lowPrefix-match)) {
1200  /* match can be copied as a single segment from external dictionary */
1201  memmove(op, dictEnd - (lowPrefix-match), length);
1202  op += length;
1203  } else {
1204  /* match encompass external dictionary and current block */
1205  size_t const copySize = (size_t)(lowPrefix-match);
1206  size_t const restSize = length - copySize;
1207  memcpy(op, dictEnd - copySize, copySize);
1208  op += copySize;
1209  if (restSize > (size_t)(op-lowPrefix)) { /* overlap copy */
1210  BYTE* const endOfMatch = op + restSize;
1211  const BYTE* copyFrom = lowPrefix;
1212  while (op < endOfMatch) *op++ = *copyFrom++;
1213  } else {
1214  memcpy(op, lowPrefix, restSize);
1215  op += restSize;
1216  } }
1217  continue;
1218  }
1219 
1220  /* copy match within block */
1221  cpy = op + length;
1222  if (unlikely(offset<8)) {
1223  const int dec64 = dec64table[offset];
1224  op[0] = match[0];
1225  op[1] = match[1];
1226  op[2] = match[2];
1227  op[3] = match[3];
1228  match += dec32table[offset];
1229  memcpy(op+4, match, 4);
1230  match -= dec64;
1231  } else { LZ4_copy8(op, match); match+=8; }
1232  op += 8;
1233 
1234  if (unlikely(cpy>oend-12)) {
1235  BYTE* const oCopyLimit = oend-(WILDCOPYLENGTH-1);
1236  if (cpy > oend-LASTLITERALS) goto _output_error; /* Error : last LASTLITERALS bytes must be literals (uncompressed) */
1237  if (op < oCopyLimit) {
1238  LZ4_wildCopy(op, match, oCopyLimit);
1239  match += oCopyLimit - op;
1240  op = oCopyLimit;
1241  }
1242  while (op<cpy) *op++ = *match++;
1243  } else {
1244  LZ4_copy8(op, match);
1245  if (length>16) LZ4_wildCopy(op+8, match+8, cpy);
1246  }
1247  op=cpy; /* correction */
1248  }
1249 
1250  /* end of decoding */
1251  if (endOnInput)
1252  return (int) (((char*)op)-dest); /* Nb of output bytes decoded */
1253  else
1254  return (int) (((const char*)ip)-source); /* Nb of input bytes read */
1255 
1256  /* Overflow error detected */
1257 _output_error:
1258  return (int) (-(((const char*)ip)-source))-1;
1259 }
1260 
1261 
1262 int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize)
1263 {
1264  return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, full, 0, noDict, (BYTE*)dest, NULL, 0);
1265 }
1266 
1267 int LZ4_decompress_safe_partial(const char* source, char* dest, int compressedSize, int targetOutputSize, int maxDecompressedSize)
1268 {
1269  return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, partial, targetOutputSize, noDict, (BYTE*)dest, NULL, 0);
1270 }
1271 
1272 int LZ4_decompress_fast(const char* source, char* dest, int originalSize)
1273 {
1274  return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)(dest - 64 KB), NULL, 64 KB);
1275 }
1276 
1277 
1278 /*===== streaming decompression functions =====*/
1279 
1280 /*
1281  * If you prefer dynamic allocation methods,
1282  * LZ4_createStreamDecode()
1283  * provides a pointer (void*) towards an initialized LZ4_streamDecode_t structure.
1284  */
1286 {
1288  return lz4s;
1289 }
1290 
1292 {
1293  FREEMEM(LZ4_stream);
1294  return 0;
1295 }
1296 
1304 int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize)
1305 {
1306  LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
1307  lz4sd->prefixSize = (size_t) dictSize;
1308  lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize;
1309  lz4sd->externalDict = NULL;
1310  lz4sd->extDictSize = 0;
1311  return 1;
1312 }
1313 
1314 /*
1315 *_continue() :
1316  These decoding functions allow decompression of multiple blocks in "streaming" mode.
1317  Previously decoded blocks must still be available at the memory position where they were decoded.
1318  If it's not possible, save the relevant part of decoded data into a safe buffer,
1319  and indicate where it stands using LZ4_setStreamDecode()
1320 */
1321 int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize)
1322 {
1323  LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
1324  int result;
1325 
1326  if (lz4sd->prefixEnd == (BYTE*)dest) {
1327  result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
1328  endOnInputSize, full, 0,
1329  usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
1330  if (result <= 0) return result;
1331  lz4sd->prefixSize += result;
1332  lz4sd->prefixEnd += result;
1333  } else {
1334  lz4sd->extDictSize = lz4sd->prefixSize;
1335  lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
1336  result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
1337  endOnInputSize, full, 0,
1338  usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize);
1339  if (result <= 0) return result;
1340  lz4sd->prefixSize = result;
1341  lz4sd->prefixEnd = (BYTE*)dest + result;
1342  }
1343 
1344  return result;
1345 }
1346 
1347 int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize)
1348 {
1349  LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
1350  int result;
1351 
1352  if (lz4sd->prefixEnd == (BYTE*)dest) {
1353  result = LZ4_decompress_generic(source, dest, 0, originalSize,
1354  endOnOutputSize, full, 0,
1355  usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
1356  if (result <= 0) return result;
1357  lz4sd->prefixSize += originalSize;
1358  lz4sd->prefixEnd += originalSize;
1359  } else {
1360  lz4sd->extDictSize = lz4sd->prefixSize;
1361  lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
1362  result = LZ4_decompress_generic(source, dest, 0, originalSize,
1363  endOnOutputSize, full, 0,
1364  usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize);
1365  if (result <= 0) return result;
1366  lz4sd->prefixSize = originalSize;
1367  lz4sd->prefixEnd = (BYTE*)dest + originalSize;
1368  }
1369 
1370  return result;
1371 }
1372 
1373 
1374 /*
1375 Advanced decoding functions :
1376 *_usingDict() :
1377  These decoding functions work the same as "_continue" ones,
1378  the dictionary must be explicitly provided within parameters
1379 */
1380 
1381 FORCE_INLINE int LZ4_decompress_usingDict_generic(const char* source, char* dest, int compressedSize, int maxOutputSize, int safe, const char* dictStart, int dictSize)
1382 {
1383  if (dictSize==0)
1384  return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest, NULL, 0);
1385  if (dictStart+dictSize == dest) {
1386  if (dictSize >= (int)(64 KB - 1))
1387  return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, withPrefix64k, (BYTE*)dest-64 KB, NULL, 0);
1388  return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest-dictSize, NULL, 0);
1389  }
1390  return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize);
1391 }
1392 
1393 int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
1394 {
1395  return LZ4_decompress_usingDict_generic(source, dest, compressedSize, maxOutputSize, 1, dictStart, dictSize);
1396 }
1397 
1398 int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)
1399 {
1400  return LZ4_decompress_usingDict_generic(source, dest, 0, originalSize, 0, dictStart, dictSize);
1401 }
1402 
1403 /* debug function */
1404 int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
1405 {
1406  return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize);
1407 }
1408 
1409 
1410 /*=*************************************************
1411 * Obsolete Functions
1412 ***************************************************/
1413 /* obsolete compression functions */
1414 int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize) { return LZ4_compress_default(source, dest, inputSize, maxOutputSize); }
1415 int LZ4_compress(const char* source, char* dest, int inputSize) { return LZ4_compress_default(source, dest, inputSize, LZ4_compressBound(inputSize)); }
1416 int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize) { return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1); }
1417 int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize) { return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1); }
1418 int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, maxDstSize, 1); }
1419 int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize) { return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1); }
1420 
1421 /*
1422 These function names are deprecated and should no longer be used.
1423 They are only provided here for compatibility with older user programs.
1424 - LZ4_uncompress is totally equivalent to LZ4_decompress_fast
1425 - LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
1426 */
1427 int LZ4_uncompress (const char* source, char* dest, int outputSize) { return LZ4_decompress_fast(source, dest, outputSize); }
1428 int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize) { return LZ4_decompress_safe(source, dest, isize, maxOutputSize); }
1429 
1430 
1431 /* Obsolete Streaming functions */
1432 
1434 
1435 static void LZ4_init(LZ4_stream_t* lz4ds, BYTE* base)
1436 {
1437  MEM_INIT(lz4ds, 0, sizeof(LZ4_stream_t));
1438  lz4ds->internal_donotuse.bufferStart = base;
1439 }
1440 
1442 {
1443  if ((((uptrval)state) & 3) != 0) return 1; /* Error : pointer is not aligned on 4-bytes boundary */
1444  LZ4_init((LZ4_stream_t*)state, (BYTE*)inputBuffer);
1445  return 0;
1446 }
1447 
1449 {
1450  LZ4_stream_t* lz4ds = (LZ4_stream_t*)ALLOCATOR(8, sizeof(LZ4_stream_t));
1451  LZ4_init (lz4ds, (BYTE*)inputBuffer);
1452  return lz4ds;
1453 }
1454 
1455 char* LZ4_slideInputBuffer (void* LZ4_Data)
1456 {
1457  LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)LZ4_Data)->internal_donotuse;
1458  int dictSize = LZ4_saveDict((LZ4_stream_t*)LZ4_Data, (char*)ctx->bufferStart, 64 KB);
1459  return (char*)(ctx->bufferStart + dictSize);
1460 }
1461 
1462 /* Obsolete streaming decompression functions */
1463 
1465 {
1466  return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB);
1467 }
1468 
1470 {
1471  return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB);
1472 }
1473 
1474 #endif /* LZ4_COMMONDEFS_ONLY */
void * LZ4_create(char *inputBuffer)
Definition: lz4.c:1448
int LZ4_compress_destSize(const char *src, char *dst, int *srcSizePtr, int targetDstSize)
Definition: lz4.c:903
LZ4_stream_t * LZ4_createStream(void)
Definition: lz4.c:926
char * LZ4_slideInputBuffer(void *LZ4_Data)
Definition: lz4.c:1455
int LZ4_compress_fast_force(const char *source, char *dest, int inputSize, int maxOutputSize, int acceleration)
Definition: lz4.c:716
FORCE_INLINE int LZ4_compress_generic(LZ4_stream_t_internal *const cctx, const char *const source, char *const dest, const int inputSize, const int maxOutputSize, const limitedOutput_directive outputLimited, const tableType_t tableType, const dict_directive dict, const dictIssue_directive dictIssue, const U32 acceleration)
Definition: lz4.c:470
#define unlikely(expr)
Definition: lz4.c:120
static reg_t LZ4_read_ARCH(const void *memPtr)
Definition: lz4.c:204
LZ4_streamDecode_t_internal internal_donotuse
Definition: lz4.h:405
unsigned int initCheck
Definition: lz4.h:362
#define MINMATCH
Definition: lz4.c:262
int LZ4_loadDict(LZ4_stream_t *LZ4_dict, const char *dictionary, int dictSize)
Definition: lz4.c:947
Definition: lz4.c:398
#define HASH_UNIT
Definition: lz4.c:946
int LZ4_compress_limitedOutput(const char *source, char *dest, int inputSize, int maxOutputSize)
Definition: lz4.c:1414
int LZ4_decompress_fast(const char *source, char *dest, int originalSize)
Definition: lz4.c:1272
static int LZ4_compress_destSize_generic(LZ4_stream_t_internal *const ctx, const char *const src, char *const dst, int *const srcSizePtr, const int targetDstSize, const tableType_t tableType)
Definition: lz4.c:732
#define LZ4_HASH_SIZE_U32
Definition: lz4.h:336
unsigned int U32
Definition: lz4.c:147
int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode, const char *dictionary, int dictSize)
Definition: lz4.c:1304
GLuint GLenum GLsizei GLsizei GLint GLint GLboolean packed
Definition: glext.h:9770
GLdouble s
static void LZ4_init(LZ4_stream_t *lz4ds, BYTE *base)
Definition: lz4.c:1435
unsigned int currentOffset
Definition: lz4.h:361
GLfloat GLfloat p
Definition: glext.h:12687
Definition: lz4.c:398
#define LASTLITERALS
Definition: lz4.c:265
#define WILDCOPYLENGTH
Definition: lz4.c:264
static const U32 LZ4_skipTrigger
Definition: lz4.c:385
int LZ4_decompress_safe_partial(const char *source, char *dest, int compressedSize, int targetOutputSize, int maxDecompressedSize)
Definition: lz4.c:1267
int LZ4_resetStreamState(void *state, char *inputBuffer)
Definition: lz4.c:1441
int LZ4_uncompress_unknownOutputSize(const char *source, char *dest, int isize, int maxOutputSize)
Definition: lz4.c:1428
LZ4LIB_API char * inputBuffer
Definition: lz4.h:452
#define LZ4_STREAMSIZE_U64
Definition: lz4.h:385
size_t uptrval
Definition: lz4.c:150
#define ACCELERATION_DEFAULT
Definition: lz4.c:52
unsigned char * bufferStart
Definition: lz4.h:364
GLfloat value
int LZ4_freeStreamDecode(LZ4_streamDecode_t *LZ4_stream)
Definition: lz4.c:1291
int LZ4_compress_forceExtDict(LZ4_stream_t *LZ4_dict, const char *source, char *dest, int inputSize)
Definition: lz4.c:1046
LZ4_streamDecode_t * LZ4_createStreamDecode(void)
Definition: lz4.c:1285
#define FREEMEM
Definition: lz4.c:128
int LZ4_decompress_safe_withPrefix64k(const char *source, char *dest, int compressedSize, int maxOutputSize)
Definition: lz4.c:1464
#define LZ4_HASHLOG
Definition: lz4.h:334
GLenum GLenum dst
Definition: glext.h:1751
static U16 LZ4_read16(const void *memPtr)
Definition: lz4.c:194
unsigned short uint16_t
Definition: stdint.h:79
const unsigned char * prefixEnd
Definition: lz4.h:371
int LZ4_decompress_safe(const char *source, char *dest, int compressedSize, int maxDecompressedSize)
Definition: lz4.c:1262
int LZ4_compress_fast_extState(void *state, const char *source, char *dest, int inputSize, int maxOutputSize, int acceleration)
Definition: lz4.c:670
d
Definition: rmse.py:171
static void LZ4_renormDictT(LZ4_stream_t_internal *LZ4_dict, const BYTE *src)
Definition: lz4.c:979
#define MAX_DISTANCE
Definition: lz4.c:274
int LZ4_compress_default(const char *source, char *dest, int inputSize, int maxOutputSize)
Definition: lz4.c:708
GLenum src
Definition: glext.h:1751
unsigned long long U64
Definition: lz4.c:149
GLfloat GLfloat GLfloat GLfloat h
Definition: glext.h:1960
int LZ4_uncompress(const char *source, char *dest, int outputSize)
Definition: lz4.c:1427
static const int one
GLint limit
Definition: glext.h:9964
unsigned char uint8_t
Definition: stdint.h:78
e
Definition: rmse.py:177
dict_directive
Definition: lz4.c:394
int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode, const char *source, char *dest, int compressedSize, int maxOutputSize)
Definition: lz4.c:1321
int LZ4_decompress_fast_withPrefix64k(const char *source, char *dest, int originalSize)
Definition: lz4.c:1469
Definition: lz4.c:395
static U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
Definition: lz4.c:413
const unsigned char * externalDict
Definition: lz4.h:369
static U16 LZ4_readLE16(const void *memPtr)
Definition: lz4.c:222
LZ4LIB_API char int originalSize
Definition: lz4.h:457
FORCE_INLINE const BYTE * LZ4_getPosition(const BYTE *p, void *tableBase, tableType_t tableType, const BYTE *srcBase)
Definition: lz4.c:461
int LZ4_decompress_safe_forceExtDict(const char *source, char *dest, int compressedSize, int maxOutputSize, const char *dictStart, int dictSize)
Definition: lz4.c:1404
GLenum GLsizei len
Definition: glext.h:3285
GLuint GLfloat * val
unsigned int hashTable[LZ4_HASH_SIZE_U32]
Definition: lz4.h:360
#define FORCE_INLINE
Definition: lz4.c:109
dictIssue_directive
Definition: lz4.c:395
Definition: lz4.c:392
unsigned short U16
Definition: lz4.c:146
static void LZ4_write32(void *memPtr, U32 value)
Definition: lz4.c:214
int LZ4_decompress_safe_usingDict(const char *source, char *dest, int compressedSize, int maxOutputSize, const char *dictStart, int dictSize)
Definition: lz4.c:1393
unsigned char BYTE
Definition: lz4.c:145
int LZ4_compress_withState(void *state, const char *src, char *dst, int srcSize)
Definition: lz4.c:1417
#define ML_BITS
Definition: lz4.c:276
#define LZ4_STREAMSIZE
Definition: lz4.h:386
static void LZ4_write16(void *memPtr, U16 value)
Definition: lz4.c:209
const GLubyte * c
Definition: glext.h:12690
static void LZ4_copy8(void *dst, const void *src)
Definition: lz4.c:243
GLdouble GLdouble r
int LZ4_compress_fast(const char *source, char *dest, int inputSize, int maxOutputSize, int acceleration)
Definition: lz4.c:690
UINT32_TYPE u32
Definition: sqlite3.c:11447
int LZ4_freeStream(LZ4_stream_t *LZ4_stream)
Definition: lz4.c:939
unsigned int uint32_t
Definition: stdint.h:80
static unsigned LZ4_isLittleEndian(void)
Definition: lz4.c:162
_W64 unsigned int uintptr_t
Definition: stdint.h:119
int LZ4_versionNumber(void)
Definition: lz4.c:404
void LZ4_resetStream(LZ4_stream_t *LZ4_stream)
Definition: lz4.c:934
#define KB
Definition: lz4.c:269
#define LZ4_VERSION_STRING
Definition: lz4.h:99
static void LZ4_putPositionOnHash(const BYTE *p, U32 h, void *tableBase, tableType_t const tableType, const BYTE *srcBase)
Definition: lz4.c:438
#define MEM_INIT
Definition: lz4.c:130
static const int LZ4_minLength
Definition: lz4.c:267
unsigned __int64 uint64_t
Definition: stdint.h:90
unsigned int dictSize
Definition: lz4.h:365
#define STEPSIZE
Definition: lz4.c:361
#define ALLOCATOR(n, s)
Definition: lz4.c:127
static void LZ4_writeLE16(void *memPtr, U16 value)
Definition: lz4.c:232
LZ4LIB_API char int isize
Definition: lz4.h:447
static U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
Definition: lz4.c:421
LZ4_stream_t_internal internal_donotuse
Definition: lz4.h:389
int LZ4_sizeofState()
Definition: lz4.c:407
int LZ4_compress_continue(LZ4_stream_t *LZ4_stream, const char *source, char *dest, int inputSize)
Definition: lz4.c:1419
static void LZ4_wildCopy(void *dstPtr, const void *srcPtr, void *dstEnd)
Definition: lz4.c:249
static U32 LZ4_read32(const void *memPtr)
Definition: lz4.c:199
static unsigned LZ4_NbCommonBytes(register reg_t val)
Definition: lz4.c:303
int LZ4_decompress_fast_usingDict(const char *source, char *dest, int originalSize, const char *dictStart, int dictSize)
Definition: lz4.c:1398
LZ4LIB_API char int compressedSize
Definition: lz4.h:456
#define LZ4_STATIC_ASSERT(c)
Definition: lz4.c:285
FORCE_INLINE int LZ4_decompress_generic(const char *const source, char *const dest, int inputSize, int outputSize, int endOnInput, int partialDecoding, int targetOutputSize, int dict, const BYTE *const lowPrefix, const BYTE *const dictStart, const size_t dictSize)
Definition: lz4.c:1100
#define LZ4_MAX_INPUT_SIZE
Definition: lz4.h:152
FORCE_INLINE void LZ4_putPosition(const BYTE *p, void *tableBase, tableType_t tableType, const BYTE *srcBase)
Definition: lz4.c:448
LZ4LIB_API const char char int inputSize
Definition: lz4.h:440
int LZ4_compress_limitedOutput_withState(void *state, const char *src, char *dst, int srcSize, int dstSize)
Definition: lz4.c:1416
Definition: lz4.c:392
FORCE_INLINE int LZ4_decompress_usingDict_generic(const char *source, char *dest, int compressedSize, int maxOutputSize, int safe, const char *dictStart, int dictSize)
Definition: lz4.c:1381
#define ML_MASK
Definition: lz4.c:277
LZ4LIB_API char int outputSize
Definition: lz4.h:446
size_t reg_t
Definition: lz4.c:156
#define LZ4_VERSION_NUMBER
Definition: lz4.h:94
int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode, const char *source, char *dest, int originalSize)
Definition: lz4.c:1347
static const int LZ4_64Klimit
Definition: lz4.c:384
LZ4LIB_API char int int maxDstSize
Definition: lz4.h:456
LZ4LIB_API char int int maxOutputSize
Definition: lz4.h:439
int LZ4_sizeofStreamState()
Definition: lz4.c:1433
const char * LZ4_versionString(void)
Definition: lz4.c:405
int LZ4_compress_limitedOutput_continue(LZ4_stream_t *LZ4_stream, const char *src, char *dst, int srcSize, int maxDstSize)
Definition: lz4.c:1418
tableType_t
Definition: lz4.c:392
int LZ4_compress(const char *source, char *dest, int inputSize)
Definition: lz4.c:1415
GLsizei GLsizei GLchar * source
earlyEnd_directive
Definition: lz4.c:398
#define RUN_MASK
Definition: lz4.c:279
LZ4LIB_API char * dest
Definition: lz4.h:438
signed int S32
Definition: lz4.c:148
int LZ4_compressBound(int isize)
Definition: lz4.c:406
#define NULL
Definition: tinycthread.c:47
#define GB
Definition: lz4.c:271
int i
GLenum GLuint GLenum GLsizei length
limitedOutput_directive
Definition: lz4.c:391
Definition: lz4.c:394
FORCE_INLINE U32 LZ4_hashPosition(const void *const p, tableType_t const tableType)
Definition: lz4.c:432
const unsigned char * dictionary
Definition: lz4.h:363
signed int int32_t
Definition: stdint.h:77
#define LZ4_COMPRESSBOUND(isize)
Definition: lz4.h:153
static const BYTE * LZ4_getPositionOnHash(U32 h, void *tableBase, tableType_t tableType, const BYTE *srcBase)
Definition: lz4.c:454
int LZ4_saveDict(LZ4_stream_t *LZ4_dict, char *safeBuffer, int dictSize)
Definition: lz4.c:1073
endCondition_directive
Definition: lz4.c:397
GLuint64EXT * result
Definition: glext.h:10921
UINT16_TYPE u16
Definition: sqlite3.c:11448
#define likely(expr)
Definition: lz4.c:119
Definition: lz4.c:392
static int LZ4_compress_destSize_extState(LZ4_stream_t *state, const char *src, char *dst, int *srcSizePtr, int targetDstSize)
Definition: lz4.c:888
#define MFLIMIT
Definition: lz4.c:266
GLintptr offset
static unsigned LZ4_count(const BYTE *pIn, const BYTE *pMatch, const BYTE *pInLimit)
Definition: lz4.c:362
int LZ4_compress_fast_continue(LZ4_stream_t *LZ4_stream, const char *source, char *dest, int inputSize, int maxOutputSize, int acceleration)
Definition: lz4.c:998


librealsense2
Author(s): Sergey Dorodnicov , Doron Hirshberg , Mark Horn , Reagan Lopez , Itay Carpis
autogenerated on Mon May 3 2021 02:47:21