1 /*
2 LZ4 - Fast LZ compression algorithm
3 Copyright (C) 2011-2015, Yann Collet.
4
5 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are
9 met:
10
11 * Redistributions of source code must retain the above copyright
12 notice, this list of conditions and the following disclaimer.
13 * Redistributions in binary form must reproduce the above
14 copyright notice, this list of conditions and the following disclaimer
15 in the documentation and/or other materials provided with the
16 distribution.
17
18 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30 You can contact the author at :
31 - LZ4 source repository : https://github.com/Cyan4973/lz4
32 - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
33 */
34
35
36 /**************************************
37 * Tuning parameters
38 **************************************/
39 /*
40 * HEAPMODE :
41 * Select how default compression functions will allocate memory for their hash table,
42 * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
43 */
44 #define HEAPMODE 0
45
46 /*
47 * ACCELERATION_DEFAULT :
48 * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0
49 */
50 #define ACCELERATION_DEFAULT 1
51
52
53 /**************************************
54 * CPU Feature Detection
55 **************************************/
56 /* LZ4_FORCE_MEMORY_ACCESS
57 * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
58 * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
59 * The below switch allow to select different access method for improved performance.
60 * Method 0 (default) : use `memcpy()`. Safe and portable.
61 * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
62 * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
63 * Method 2 : direct access. This method is portable but violate C standard.
64 * It can generate buggy code on targets which generate assembly depending on alignment.
65 * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
66 * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
67 * Prefer these methods in priority order (0 > 1 > 2)
68 */
69 #ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
70 # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
71 # define LZ4_FORCE_MEMORY_ACCESS 2
72 # elif defined(__INTEL_COMPILER) || \
73 (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
74 # define LZ4_FORCE_MEMORY_ACCESS 1
75 # endif
76 #endif
77
78 /*
79 * LZ4_FORCE_SW_BITCOUNT
80 * Define this parameter if your target system or compiler does not support hardware bit count
81 */
82 #if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for Windows CE does not support Hardware bit count */
83 # define LZ4_FORCE_SW_BITCOUNT
84 #endif
85
86
87 /**************************************
88 * Includes
89 **************************************/
90 #include "lz4.h"
91
92
93 /**************************************
94 * Compiler Options
95 **************************************/
96 #ifdef _MSC_VER /* Visual Studio */
97 # define FORCE_INLINE static __forceinline
98 # include <intrin.h>
99 # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
100 # pragma warning(disable : 4293) /* disable: C4293: too large shift (32-bits) */
101 #else
102 # if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */
103 # if defined(__GNUC__) || defined(__clang__)
104 # define FORCE_INLINE static inline __attribute__((always_inline))
105 # else
106 # define FORCE_INLINE static inline
107 # endif
108 # else
109 # define FORCE_INLINE static
110 # endif /* __STDC_VERSION__ */
111 #endif /* _MSC_VER */
112
113 /* LZ4_GCC_VERSION is defined into lz4.h */
114 #if (LZ4_GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__)
115 # define expect(expr,value) (__builtin_expect ((expr),(value)) )
116 #else
117 # define expect(expr,value) (expr)
118 #endif
119
120 #define likely(expr) expect((expr) != 0, 1)
121 #define unlikely(expr) expect((expr) != 0, 0)
122
123
124 /**************************************
125 * Memory routines
126 **************************************/
127 #include <stdlib.h> /* malloc, calloc, free */
128 #define ALLOCATOR(n,s) calloc(n,s)
129 #define FREEMEM free
130 #include <string.h> /* memset, memcpy */
131 #define MEM_INIT memset
132
133
134 /**************************************
135 * Basic Types
136 **************************************/
137 #if defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */
138 # include <stdint.h>
139 typedef uint8_t BYTE;
140 typedef uint16_t U16;
141 typedef uint32_t U32;
142 typedef int32_t S32;
143 typedef uint64_t U64;
144 #else
145 typedef unsigned char BYTE;
146 typedef unsigned short U16;
147 typedef unsigned int U32;
148 typedef signed int S32;
149 typedef unsigned long long U64;
150 #endif
151
152
153 /**************************************
154 * Reading and writing into memory
155 **************************************/
156 #define STEPSIZE sizeof(size_t)
157
LZ4_64bits(void)158 static unsigned LZ4_64bits(void) { return sizeof(void*)==8; }
159
LZ4_isLittleEndian(void)160 static unsigned LZ4_isLittleEndian(void)
161 {
162 const union { U32 i; BYTE c[4]; } one = { 1 }; // don't use static : performance detrimental
163 return one.c[0];
164 }
165
166
167 #if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2)
168
LZ4_read16(const void * memPtr)169 static U16 LZ4_read16(const void* memPtr) { return *(const U16*) memPtr; }
LZ4_read32(const void * memPtr)170 static U32 LZ4_read32(const void* memPtr) { return *(const U32*) memPtr; }
LZ4_read_ARCH(const void * memPtr)171 static size_t LZ4_read_ARCH(const void* memPtr) { return *(const size_t*) memPtr; }
172
LZ4_write16(void * memPtr,U16 value)173 static void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
174
175 #elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1)
176
177 /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
178 /* currently only defined for gcc and icc */
179 typedef union { U16 u16; U32 u32; size_t uArch; } __packed unalign;
180
LZ4_read16(const void * ptr)181 static U16 LZ4_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
LZ4_read32(const void * ptr)182 static U32 LZ4_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
LZ4_read_ARCH(const void * ptr)183 static size_t LZ4_read_ARCH(const void* ptr) { return ((const unalign*)ptr)->uArch; }
184
LZ4_write16(void * memPtr,U16 value)185 static void LZ4_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
186
187 #else
188
LZ4_read16(const void * memPtr)189 static U16 LZ4_read16(const void* memPtr)
190 {
191 U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
192 }
193
LZ4_read32(const void * memPtr)194 static U32 LZ4_read32(const void* memPtr)
195 {
196 U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
197 }
198
LZ4_read_ARCH(const void * memPtr)199 static size_t LZ4_read_ARCH(const void* memPtr)
200 {
201 size_t val; memcpy(&val, memPtr, sizeof(val)); return val;
202 }
203
LZ4_write16(void * memPtr,U16 value)204 static void LZ4_write16(void* memPtr, U16 value)
205 {
206 memcpy(memPtr, &value, sizeof(value));
207 }
208
209 #endif // LZ4_FORCE_MEMORY_ACCESS
210
211
LZ4_readLE16(const void * memPtr)212 static U16 LZ4_readLE16(const void* memPtr)
213 {
214 if (LZ4_isLittleEndian())
215 {
216 return LZ4_read16(memPtr);
217 }
218 else
219 {
220 const BYTE* p = (const BYTE*)memPtr;
221 return (U16)((U16)p[0] + (p[1]<<8));
222 }
223 }
224
LZ4_writeLE16(void * memPtr,U16 value)225 static void LZ4_writeLE16(void* memPtr, U16 value)
226 {
227 if (LZ4_isLittleEndian())
228 {
229 LZ4_write16(memPtr, value);
230 }
231 else
232 {
233 BYTE* p = (BYTE*)memPtr;
234 p[0] = (BYTE) value;
235 p[1] = (BYTE)(value>>8);
236 }
237 }
238
LZ4_copy8(void * dst,const void * src)239 static void LZ4_copy8(void* dst, const void* src)
240 {
241 memcpy(dst,src,8);
242 }
243
244 /* customized variant of memcpy, which can overwrite up to 7 bytes beyond dstEnd */
LZ4_wildCopy(void * dstPtr,const void * srcPtr,void * dstEnd)245 static void LZ4_wildCopy(void* dstPtr, const void* srcPtr, void* dstEnd)
246 {
247 BYTE* d = (BYTE*)dstPtr;
248 const BYTE* s = (const BYTE*)srcPtr;
249 BYTE* const e = (BYTE*)dstEnd;
250
251 #if 0
252 const size_t l2 = 8 - (((size_t)d) & (sizeof(void*)-1));
253 LZ4_copy8(d,s); if (d>e-9) return;
254 d+=l2; s+=l2;
255 #endif /* join to align */
256
257 do { LZ4_copy8(d,s); d+=8; s+=8; } while (d<e);
258 }
259
260
261 /**************************************
262 * Common Constants
263 **************************************/
264 #define MINMATCH 4
265
266 #define WILDCOPYLENGTH 8
267 #define LASTLITERALS 5
268 #define MFLIMIT (WILDCOPYLENGTH+MINMATCH)
269 static const int LZ4_minLength = (MFLIMIT+1);
270
271 #define KB *(1 <<10)
272 #define MB *(1 <<20)
273 #define GB *(1U<<30)
274
275 #define MAXD_LOG 16
276 #define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
277
278 #define ML_BITS 4
279 #define ML_MASK ((1U<<ML_BITS)-1)
280 #define RUN_BITS (8-ML_BITS)
281 #define RUN_MASK ((1U<<RUN_BITS)-1)
282
283
284 /**************************************
285 * Common Utils
286 **************************************/
287 #define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
288
289
290 /**************************************
291 * Common functions
292 **************************************/
LZ4_NbCommonBytes(register size_t val)293 static unsigned LZ4_NbCommonBytes (register size_t val)
294 {
295 if (LZ4_isLittleEndian())
296 {
297 if (LZ4_64bits())
298 {
299 # if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
300 unsigned long r = 0;
301 _BitScanForward64( &r, (U64)val );
302 return (int)(r>>3);
303 # elif (defined(__clang__) || (LZ4_GCC_VERSION >= 304)) && !defined(LZ4_FORCE_SW_BITCOUNT)
304 return (__builtin_ctzll((U64)val) >> 3);
305 # else
306 static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
307 return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
308 # endif
309 }
310 else /* 32 bits */
311 {
312 # if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
313 unsigned long r;
314 _BitScanForward( &r, (U32)val );
315 return (int)(r>>3);
316 # elif (defined(__clang__) || (LZ4_GCC_VERSION >= 304)) && !defined(LZ4_FORCE_SW_BITCOUNT)
317 return (__builtin_ctz((U32)val) >> 3);
318 # else
319 static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
320 return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
321 # endif
322 }
323 }
324 else /* Big Endian CPU */
325 {
326 if (LZ4_64bits())
327 {
328 # if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
329 unsigned long r = 0;
330 _BitScanReverse64( &r, val );
331 return (unsigned)(r>>3);
332 # elif (defined(__clang__) || (LZ4_GCC_VERSION >= 304)) && !defined(LZ4_FORCE_SW_BITCOUNT)
333 return (__builtin_clzll((U64)val) >> 3);
334 # else
335 unsigned r;
336 if (!(val>>32)) { r=4; } else { r=0; val>>=32; }
337 if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
338 r += (!val);
339 return r;
340 # endif
341 }
342 else /* 32 bits */
343 {
344 # if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
345 unsigned long r = 0;
346 _BitScanReverse( &r, (unsigned long)val );
347 return (unsigned)(r>>3);
348 # elif (defined(__clang__) || (LZ4_GCC_VERSION >= 304)) && !defined(LZ4_FORCE_SW_BITCOUNT)
349 return (__builtin_clz((U32)val) >> 3);
350 # else
351 unsigned r;
352 if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
353 r += (!val);
354 return r;
355 # endif
356 }
357 }
358 }
359
LZ4_count(const BYTE * pIn,const BYTE * pMatch,const BYTE * pInLimit)360 static unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)
361 {
362 const BYTE* const pStart = pIn;
363
364 while (likely(pIn<pInLimit-(STEPSIZE-1)))
365 {
366 size_t diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
367 if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }
368 pIn += LZ4_NbCommonBytes(diff);
369 return (unsigned)(pIn - pStart);
370 }
371
372 if (LZ4_64bits()) if ((pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; }
373 if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }
374 if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
375 return (unsigned)(pIn - pStart);
376 }
377
378
379 #ifndef LZ4_COMMONDEFS_ONLY
380 /**************************************
381 * Local Constants
382 **************************************/
383 #define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2)
384 #define HASHTABLESIZE (1 << LZ4_MEMORY_USAGE)
385 #define HASH_SIZE_U32 (1 << LZ4_HASHLOG) /* required as macro for static allocation */
386
387 static const int LZ4_64Klimit = ((64 KB) + (MFLIMIT-1));
388 static const U32 LZ4_skipTrigger = 6; /* Increase this value ==> compression run slower on incompressible data */
389
390
391 /**************************************
392 * Local Structures and types
393 **************************************/
394 typedef struct {
395 U32 hashTable[HASH_SIZE_U32];
396 U32 currentOffset;
397 U32 initCheck;
398 const BYTE* dictionary;
399 BYTE* bufferStart; /* obsolete, used for slideInputBuffer */
400 U32 dictSize;
401 } LZ4_stream_t_internal;
402
403 typedef enum { notLimited = 0, limitedOutput = 1 } limitedOutput_directive;
404 typedef enum { byPtr, byU32, byU16 } tableType_t;
405
406 typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
407 typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
408
409 typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
410 typedef enum { full = 0, partial = 1 } earlyEnd_directive;
411
412
413 /**************************************
414 * Local Utils
415 **************************************/
LZ4_versionNumber(void)416 int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; }
LZ4_compressBound(int isize)417 int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }
LZ4_sizeofState()418 int LZ4_sizeofState() { return LZ4_STREAMSIZE; }
419
420
421
422 /********************************
423 * Compression functions
424 ********************************/
425
LZ4_hashSequence(U32 sequence,tableType_t const tableType)426 static U32 LZ4_hashSequence(U32 sequence, tableType_t const tableType)
427 {
428 if (tableType == byU16)
429 return (((sequence) * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
430 else
431 return (((sequence) * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
432 }
433
434 static const U64 prime5bytes = 889523592379ULL;
LZ4_hashSequence64(size_t sequence,tableType_t const tableType)435 static U32 LZ4_hashSequence64(size_t sequence, tableType_t const tableType)
436 {
437 const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG;
438 const U32 hashMask = (1<<hashLog) - 1;
439 return ((sequence * prime5bytes) >> (40 - hashLog)) & hashMask;
440 }
441
LZ4_hashSequenceT(size_t sequence,tableType_t const tableType)442 static U32 LZ4_hashSequenceT(size_t sequence, tableType_t const tableType)
443 {
444 if (LZ4_64bits())
445 return LZ4_hashSequence64(sequence, tableType);
446 return LZ4_hashSequence((U32)sequence, tableType);
447 }
448
LZ4_hashPosition(const void * p,tableType_t tableType)449 static U32 LZ4_hashPosition(const void* p, tableType_t tableType) { return LZ4_hashSequenceT(LZ4_read_ARCH(p), tableType); }
450
LZ4_putPositionOnHash(const BYTE * p,U32 h,void * tableBase,tableType_t const tableType,const BYTE * srcBase)451 static void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, tableType_t const tableType, const BYTE* srcBase)
452 {
453 switch (tableType)
454 {
455 case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; }
456 case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; }
457 case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; }
458 }
459 }
460
LZ4_putPosition(const BYTE * p,void * tableBase,tableType_t tableType,const BYTE * srcBase)461 static void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
462 {
463 U32 h = LZ4_hashPosition(p, tableType);
464 LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
465 }
466
LZ4_getPositionOnHash(U32 h,void * tableBase,tableType_t tableType,const BYTE * srcBase)467 static const BYTE* LZ4_getPositionOnHash(U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase)
468 {
469 if (tableType == byPtr) { const BYTE** hashTable = (const BYTE**) tableBase; return hashTable[h]; }
470 if (tableType == byU32) { U32* hashTable = (U32*) tableBase; return hashTable[h] + srcBase; }
471 { U16* hashTable = (U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */
472 }
473
LZ4_getPosition(const BYTE * p,void * tableBase,tableType_t tableType,const BYTE * srcBase)474 static const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
475 {
476 U32 h = LZ4_hashPosition(p, tableType);
477 return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
478 }
479
LZ4_compress_generic(void * const ctx,const char * const source,char * const dest,const int inputSize,const int maxOutputSize,const limitedOutput_directive outputLimited,const tableType_t tableType,const dict_directive dict,const dictIssue_directive dictIssue,const U32 acceleration)480 FORCE_INLINE int LZ4_compress_generic(
481 void* const ctx,
482 const char* const source,
483 char* const dest,
484 const int inputSize,
485 const int maxOutputSize,
486 const limitedOutput_directive outputLimited,
487 const tableType_t tableType,
488 const dict_directive dict,
489 const dictIssue_directive dictIssue,
490 const U32 acceleration)
491 {
492 LZ4_stream_t_internal* const dictPtr = (LZ4_stream_t_internal*)ctx;
493
494 const BYTE* ip = (const BYTE*) source;
495 const BYTE* base;
496 const BYTE* lowLimit;
497 const BYTE* const lowRefLimit = ip - dictPtr->dictSize;
498 const BYTE* const dictionary = dictPtr->dictionary;
499 const BYTE* const dictEnd = dictionary + dictPtr->dictSize;
500 const size_t dictDelta = dictEnd - (const BYTE*)source;
501 const BYTE* anchor = (const BYTE*) source;
502 const BYTE* const iend = ip + inputSize;
503 const BYTE* const mflimit = iend - MFLIMIT;
504 const BYTE* const matchlimit = iend - LASTLITERALS;
505
506 BYTE* op = (BYTE*) dest;
507 BYTE* const olimit = op + maxOutputSize;
508
509 U32 forwardH;
510 size_t refDelta=0;
511
512 /* Init conditions */
513 if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size, too large (or negative) */
514 switch(dict)
515 {
516 case noDict:
517 default:
518 base = (const BYTE*)source;
519 lowLimit = (const BYTE*)source;
520 break;
521 case withPrefix64k:
522 base = (const BYTE*)source - dictPtr->currentOffset;
523 lowLimit = (const BYTE*)source - dictPtr->dictSize;
524 break;
525 case usingExtDict:
526 base = (const BYTE*)source - dictPtr->currentOffset;
527 lowLimit = (const BYTE*)source;
528 break;
529 }
530 if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) return 0; /* Size too large (not within 64K limit) */
531 if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
532
533 /* First Byte */
534 LZ4_putPosition(ip, ctx, tableType, base);
535 ip++; forwardH = LZ4_hashPosition(ip, tableType);
536
537 /* Main Loop */
538 for ( ; ; )
539 {
540 const BYTE* match;
541 BYTE* token;
542 {
543 const BYTE* forwardIp = ip;
544 unsigned step = 1;
545 unsigned searchMatchNb = acceleration << LZ4_skipTrigger;
546
547 /* Find a match */
548 do {
549 U32 h = forwardH;
550 ip = forwardIp;
551 forwardIp += step;
552 step = (searchMatchNb++ >> LZ4_skipTrigger);
553
554 if (unlikely(forwardIp > mflimit)) goto _last_literals;
555
556 match = LZ4_getPositionOnHash(h, ctx, tableType, base);
557 if (dict==usingExtDict)
558 {
559 if (match<(const BYTE*)source)
560 {
561 refDelta = dictDelta;
562 lowLimit = dictionary;
563 }
564 else
565 {
566 refDelta = 0;
567 lowLimit = (const BYTE*)source;
568 }
569 }
570 forwardH = LZ4_hashPosition(forwardIp, tableType);
571 LZ4_putPositionOnHash(ip, h, ctx, tableType, base);
572
573 } while ( ((dictIssue==dictSmall) ? (match < lowRefLimit) : 0)
574 || ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip))
575 || (LZ4_read32(match+refDelta) != LZ4_read32(ip)) );
576 }
577
578 /* Catch up */
579 while ((ip>anchor) && (match+refDelta > lowLimit) && (unlikely(ip[-1]==match[refDelta-1]))) { ip--; match--; }
580
581 {
582 /* Encode Literal length */
583 unsigned litLength = (unsigned)(ip - anchor);
584 token = op++;
585 if ((outputLimited) && (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)))
586 return 0; /* Check output limit */
587 if (litLength>=RUN_MASK)
588 {
589 int len = (int)litLength-RUN_MASK;
590 *token=(RUN_MASK<<ML_BITS);
591 for(; len >= 255 ; len-=255) *op++ = 255;
592 *op++ = (BYTE)len;
593 }
594 else *token = (BYTE)(litLength<<ML_BITS);
595
596 /* Copy Literals */
597 LZ4_wildCopy(op, anchor, op+litLength);
598 op+=litLength;
599 }
600
601 _next_match:
602 /* Encode Offset */
603 LZ4_writeLE16(op, (U16)(ip-match)); op+=2;
604
605 /* Encode MatchLength */
606 {
607 unsigned matchLength;
608
609 if ((dict==usingExtDict) && (lowLimit==dictionary))
610 {
611 const BYTE* limit;
612 match += refDelta;
613 limit = ip + (dictEnd-match);
614 if (limit > matchlimit) limit = matchlimit;
615 matchLength = LZ4_count(ip+MINMATCH, match+MINMATCH, limit);
616 ip += MINMATCH + matchLength;
617 if (ip==limit)
618 {
619 unsigned more = LZ4_count(ip, (const BYTE*)source, matchlimit);
620 matchLength += more;
621 ip += more;
622 }
623 }
624 else
625 {
626 matchLength = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
627 ip += MINMATCH + matchLength;
628 }
629
630 if ((outputLimited) && (unlikely(op + (1 + LASTLITERALS) + (matchLength>>8) > olimit)))
631 return 0; /* Check output limit */
632 if (matchLength>=ML_MASK)
633 {
634 *token += ML_MASK;
635 matchLength -= ML_MASK;
636 for (; matchLength >= 510 ; matchLength-=510) { *op++ = 255; *op++ = 255; }
637 if (matchLength >= 255) { matchLength-=255; *op++ = 255; }
638 *op++ = (BYTE)matchLength;
639 }
640 else *token += (BYTE)(matchLength);
641 }
642
643 anchor = ip;
644
645 /* Test end of chunk */
646 if (ip > mflimit) break;
647
648 /* Fill table */
649 LZ4_putPosition(ip-2, ctx, tableType, base);
650
651 /* Test next position */
652 match = LZ4_getPosition(ip, ctx, tableType, base);
653 if (dict==usingExtDict)
654 {
655 if (match<(const BYTE*)source)
656 {
657 refDelta = dictDelta;
658 lowLimit = dictionary;
659 }
660 else
661 {
662 refDelta = 0;
663 lowLimit = (const BYTE*)source;
664 }
665 }
666 LZ4_putPosition(ip, ctx, tableType, base);
667 if ( ((dictIssue==dictSmall) ? (match>=lowRefLimit) : 1)
668 && (match+MAX_DISTANCE>=ip)
669 && (LZ4_read32(match+refDelta)==LZ4_read32(ip)) )
670 { token=op++; *token=0; goto _next_match; }
671
672 /* Prepare next loop */
673 forwardH = LZ4_hashPosition(++ip, tableType);
674 }
675
676 _last_literals:
677 /* Encode Last Literals */
678 {
679 const size_t lastRun = (size_t)(iend - anchor);
680 if ((outputLimited) && ((op - (BYTE*)dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize))
681 return 0; /* Check output limit */
682 if (lastRun >= RUN_MASK)
683 {
684 size_t accumulator = lastRun - RUN_MASK;
685 *op++ = RUN_MASK << ML_BITS;
686 for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
687 *op++ = (BYTE) accumulator;
688 }
689 else
690 {
691 *op++ = (BYTE)(lastRun<<ML_BITS);
692 }
693 memcpy(op, anchor, lastRun);
694 op += lastRun;
695 }
696
697 /* End */
698 return (int) (((char*)op)-dest);
699 }
700
701
LZ4_compress_fast_extState(void * state,const char * source,char * dest,int inputSize,int maxOutputSize,int acceleration)702 int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
703 {
704 LZ4_resetStream((LZ4_stream_t*)state);
705 if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
706
707 if (maxOutputSize >= LZ4_compressBound(inputSize))
708 {
709 if (inputSize < LZ4_64Klimit)
710 return LZ4_compress_generic(state, source, dest, inputSize, 0, notLimited, byU16, noDict, noDictIssue, acceleration);
711 else
712 return LZ4_compress_generic(state, source, dest, inputSize, 0, notLimited, LZ4_64bits() ? byU32 : byPtr, noDict, noDictIssue, acceleration);
713 }
714 else
715 {
716 if (inputSize < LZ4_64Klimit)
717 return LZ4_compress_generic(state, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
718 else
719 return LZ4_compress_generic(state, source, dest, inputSize, maxOutputSize, limitedOutput, LZ4_64bits() ? byU32 : byPtr, noDict, noDictIssue, acceleration);
720 }
721 }
722
723
LZ4_compress_fast(const char * source,char * dest,int inputSize,int maxOutputSize,int acceleration)724 int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
725 {
726 #if (HEAPMODE)
727 void* ctxPtr = ALLOCATOR(1, sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
728 #else
729 LZ4_stream_t ctx;
730 void* ctxPtr = &ctx;
731 #endif
732
733 int result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration);
734
735 #if (HEAPMODE)
736 FREEMEM(ctxPtr);
737 #endif
738 return result;
739 }
740
741
LZ4_compress_default(const char * source,char * dest,int inputSize,int maxOutputSize)742 int LZ4_compress_default(const char* source, char* dest, int inputSize, int maxOutputSize)
743 {
744 return LZ4_compress_fast(source, dest, inputSize, maxOutputSize, 1);
745 }
746
747
748 /* hidden debug function */
749 /* strangely enough, gcc generates faster code when this function is uncommented, even if unused */
LZ4_compress_fast_force(const char * source,char * dest,int inputSize,int maxOutputSize,int acceleration)750 int LZ4_compress_fast_force(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
751 {
752 LZ4_stream_t ctx;
753
754 LZ4_resetStream(&ctx);
755
756 if (inputSize < LZ4_64Klimit)
757 return LZ4_compress_generic(&ctx, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
758 else
759 return LZ4_compress_generic(&ctx, source, dest, inputSize, maxOutputSize, limitedOutput, LZ4_64bits() ? byU32 : byPtr, noDict, noDictIssue, acceleration);
760 }
761
762
763 /********************************
764 * destSize variant
765 ********************************/
766
LZ4_compress_destSize_generic(void * const ctx,const char * const src,char * const dst,int * const srcSizePtr,const int targetDstSize,const tableType_t tableType)767 static int LZ4_compress_destSize_generic(
768 void* const ctx,
769 const char* const src,
770 char* const dst,
771 int* const srcSizePtr,
772 const int targetDstSize,
773 const tableType_t tableType)
774 {
775 const BYTE* ip = (const BYTE*) src;
776 const BYTE* base = (const BYTE*) src;
777 const BYTE* lowLimit = (const BYTE*) src;
778 const BYTE* anchor = ip;
779 const BYTE* const iend = ip + *srcSizePtr;
780 const BYTE* const mflimit = iend - MFLIMIT;
781 const BYTE* const matchlimit = iend - LASTLITERALS;
782
783 BYTE* op = (BYTE*) dst;
784 BYTE* const oend = op + targetDstSize;
785 BYTE* const oMaxLit = op + targetDstSize - 2 /* offset */ - 8 /* because 8+MINMATCH==MFLIMIT */ - 1 /* token */;
786 BYTE* const oMaxMatch = op + targetDstSize - (LASTLITERALS + 1 /* token */);
787 BYTE* const oMaxSeq = oMaxLit - 1 /* token */;
788
789 U32 forwardH;
790
791
792 /* Init conditions */
793 if (targetDstSize < 1) return 0; /* Impossible to store anything */
794 if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size, too large (or negative) */
795 if ((tableType == byU16) && (*srcSizePtr>=LZ4_64Klimit)) return 0; /* Size too large (not within 64K limit) */
796 if (*srcSizePtr<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
797
798 /* First Byte */
799 *srcSizePtr = 0;
800 LZ4_putPosition(ip, ctx, tableType, base);
801 ip++; forwardH = LZ4_hashPosition(ip, tableType);
802
803 /* Main Loop */
804 for ( ; ; )
805 {
806 const BYTE* match;
807 BYTE* token;
808 {
809 const BYTE* forwardIp = ip;
810 unsigned step = 1;
811 unsigned searchMatchNb = 1 << LZ4_skipTrigger;
812
813 /* Find a match */
814 do {
815 U32 h = forwardH;
816 ip = forwardIp;
817 forwardIp += step;
818 step = (searchMatchNb++ >> LZ4_skipTrigger);
819
820 if (unlikely(forwardIp > mflimit))
821 goto _last_literals;
822
823 match = LZ4_getPositionOnHash(h, ctx, tableType, base);
824 forwardH = LZ4_hashPosition(forwardIp, tableType);
825 LZ4_putPositionOnHash(ip, h, ctx, tableType, base);
826
827 } while ( ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip))
828 || (LZ4_read32(match) != LZ4_read32(ip)) );
829 }
830
831 /* Catch up */
832 while ((ip>anchor) && (match > lowLimit) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; }
833
834 {
835 /* Encode Literal length */
836 unsigned litLength = (unsigned)(ip - anchor);
837 token = op++;
838 if (op + ((litLength+240)/255) + litLength > oMaxLit)
839 {
840 /* Not enough space for a last match */
841 op--;
842 goto _last_literals;
843 }
844 if (litLength>=RUN_MASK)
845 {
846 unsigned len = litLength - RUN_MASK;
847 *token=(RUN_MASK<<ML_BITS);
848 for(; len >= 255 ; len-=255) *op++ = 255;
849 *op++ = (BYTE)len;
850 }
851 else *token = (BYTE)(litLength<<ML_BITS);
852
853 /* Copy Literals */
854 LZ4_wildCopy(op, anchor, op+litLength);
855 op += litLength;
856 }
857
858 _next_match:
859 /* Encode Offset */
860 LZ4_writeLE16(op, (U16)(ip-match)); op+=2;
861
862 /* Encode MatchLength */
863 {
864 size_t matchLength;
865
866 matchLength = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
867
868 if (op + ((matchLength+240)/255) > oMaxMatch)
869 {
870 /* Match description too long : reduce it */
871 matchLength = (15-1) + (oMaxMatch-op) * 255;
872 }
873 ip += MINMATCH + matchLength;
874
875 if (matchLength>=ML_MASK)
876 {
877 *token += ML_MASK;
878 matchLength -= ML_MASK;
879 while (matchLength >= 255) { matchLength-=255; *op++ = 255; }
880 *op++ = (BYTE)matchLength;
881 }
882 else *token += (BYTE)(matchLength);
883 }
884
885 anchor = ip;
886
887 /* Test end of block */
888 if (ip > mflimit) break;
889 if (op > oMaxSeq) break;
890
891 /* Fill table */
892 LZ4_putPosition(ip-2, ctx, tableType, base);
893
894 /* Test next position */
895 match = LZ4_getPosition(ip, ctx, tableType, base);
896 LZ4_putPosition(ip, ctx, tableType, base);
897 if ( (match+MAX_DISTANCE>=ip)
898 && (LZ4_read32(match)==LZ4_read32(ip)) )
899 { token=op++; *token=0; goto _next_match; }
900
901 /* Prepare next loop */
902 forwardH = LZ4_hashPosition(++ip, tableType);
903 }
904
905 _last_literals:
906 /* Encode Last Literals */
907 {
908 size_t lastRunSize = (size_t)(iend - anchor);
909 if (op + 1 /* token */ + ((lastRunSize+240)/255) /* litLength */ + lastRunSize /* literals */ > oend)
910 {
911 /* adapt lastRunSize to fill 'dst' */
912 lastRunSize = (oend-op) - 1;
913 lastRunSize -= (lastRunSize+240)/255;
914 }
915 ip = anchor + lastRunSize;
916
917 if (lastRunSize >= RUN_MASK)
918 {
919 size_t accumulator = lastRunSize - RUN_MASK;
920 *op++ = RUN_MASK << ML_BITS;
921 for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
922 *op++ = (BYTE) accumulator;
923 }
924 else
925 {
926 *op++ = (BYTE)(lastRunSize<<ML_BITS);
927 }
928 memcpy(op, anchor, lastRunSize);
929 op += lastRunSize;
930 }
931
932 /* End */
933 *srcSizePtr = (int) (((const char*)ip)-src);
934 return (int) (((char*)op)-dst);
935 }
936
937
LZ4_compress_destSize_extState(void * state,const char * src,char * dst,int * srcSizePtr,int targetDstSize)938 static int LZ4_compress_destSize_extState (void* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize)
939 {
940 LZ4_resetStream((LZ4_stream_t*)state);
941
942 if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) /* compression success is guaranteed */
943 {
944 return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1);
945 }
946 else
947 {
948 if (*srcSizePtr < LZ4_64Klimit)
949 return LZ4_compress_destSize_generic(state, src, dst, srcSizePtr, targetDstSize, byU16);
950 else
951 return LZ4_compress_destSize_generic(state, src, dst, srcSizePtr, targetDstSize, LZ4_64bits() ? byU32 : byPtr);
952 }
953 }
954
955
LZ4_compress_destSize(const char * src,char * dst,int * srcSizePtr,int targetDstSize)956 int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)
957 {
958 #if (HEAPMODE)
959 void* ctx = ALLOCATOR(1, sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
960 #else
961 LZ4_stream_t ctxBody;
962 void* ctx = &ctxBody;
963 #endif
964
965 int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize);
966
967 #if (HEAPMODE)
968 FREEMEM(ctx);
969 #endif
970 return result;
971 }
972
973
974
975 /********************************
976 * Streaming functions
977 ********************************/
978
LZ4_createStream(void)979 LZ4_stream_t* LZ4_createStream(void)
980 {
981 LZ4_stream_t* lz4s = (LZ4_stream_t*)ALLOCATOR(8, LZ4_STREAMSIZE_U64);
982 LZ4_STATIC_ASSERT(LZ4_STREAMSIZE >= sizeof(LZ4_stream_t_internal)); /* A compilation error here means LZ4_STREAMSIZE is not large enough */
983 LZ4_resetStream(lz4s);
984 return lz4s;
985 }
986
LZ4_resetStream(LZ4_stream_t * LZ4_stream)987 void LZ4_resetStream (LZ4_stream_t* LZ4_stream)
988 {
989 MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t));
990 }
991
LZ4_freeStream(LZ4_stream_t * LZ4_stream)992 int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
993 {
994 FREEMEM(LZ4_stream);
995 return (0);
996 }
997
998
999 #define HASH_UNIT sizeof(size_t)
LZ4_loadDict(LZ4_stream_t * LZ4_dict,const char * dictionary,int dictSize)1000 int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
1001 {
1002 LZ4_stream_t_internal* dict = (LZ4_stream_t_internal*) LZ4_dict;
1003 const BYTE* p = (const BYTE*)dictionary;
1004 const BYTE* const dictEnd = p + dictSize;
1005 const BYTE* base;
1006
1007 if ((dict->initCheck) || (dict->currentOffset > 1 GB)) /* Uninitialized structure, or reuse overflow */
1008 LZ4_resetStream(LZ4_dict);
1009
1010 if (dictSize < (int)HASH_UNIT)
1011 {
1012 dict->dictionary = NULL;
1013 dict->dictSize = 0;
1014 return 0;
1015 }
1016
1017 if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;
1018 dict->currentOffset += 64 KB;
1019 base = p - dict->currentOffset;
1020 dict->dictionary = p;
1021 dict->dictSize = (U32)(dictEnd - p);
1022 dict->currentOffset += dict->dictSize;
1023
1024 while (p <= dictEnd-HASH_UNIT)
1025 {
1026 LZ4_putPosition(p, dict->hashTable, byU32, base);
1027 p+=3;
1028 }
1029
1030 return dict->dictSize;
1031 }
1032
1033
LZ4_renormDictT(LZ4_stream_t_internal * LZ4_dict,const BYTE * src)1034 static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, const BYTE* src)
1035 {
1036 if ((LZ4_dict->currentOffset > 0x80000000) ||
1037 ((size_t)LZ4_dict->currentOffset > (size_t)src)) /* address space overflow */
1038 {
1039 /* rescale hash table */
1040 U32 delta = LZ4_dict->currentOffset - 64 KB;
1041 const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
1042 int i;
1043 for (i=0; i<HASH_SIZE_U32; i++)
1044 {
1045 if (LZ4_dict->hashTable[i] < delta) LZ4_dict->hashTable[i]=0;
1046 else LZ4_dict->hashTable[i] -= delta;
1047 }
1048 LZ4_dict->currentOffset = 64 KB;
1049 if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB;
1050 LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
1051 }
1052 }
1053
1054
LZ4_compress_fast_continue(LZ4_stream_t * LZ4_stream,const char * source,char * dest,int inputSize,int maxOutputSize,int acceleration)1055 int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
1056 {
1057 LZ4_stream_t_internal* streamPtr = (LZ4_stream_t_internal*)LZ4_stream;
1058 const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
1059
1060 const BYTE* smallest = (const BYTE*) source;
1061 if (streamPtr->initCheck) return 0; /* Uninitialized structure detected */
1062 if ((streamPtr->dictSize>0) && (smallest>dictEnd)) smallest = dictEnd;
1063 LZ4_renormDictT(streamPtr, smallest);
1064 if (acceleration < 1) acceleration = ACCELERATION_DEFAULT;
1065
1066 /* Check overlapping input/dictionary space */
1067 {
1068 const BYTE* sourceEnd = (const BYTE*) source + inputSize;
1069 if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd))
1070 {
1071 streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
1072 if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB;
1073 if (streamPtr->dictSize < 4) streamPtr->dictSize = 0;
1074 streamPtr->dictionary = dictEnd - streamPtr->dictSize;
1075 }
1076 }
1077
1078 /* prefix mode : source data follows dictionary */
1079 if (dictEnd == (const BYTE*)source)
1080 {
1081 int result;
1082 if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
1083 result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, withPrefix64k, dictSmall, acceleration);
1084 else
1085 result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, withPrefix64k, noDictIssue, acceleration);
1086 streamPtr->dictSize += (U32)inputSize;
1087 streamPtr->currentOffset += (U32)inputSize;
1088 return result;
1089 }
1090
1091 /* external dictionary mode */
1092 {
1093 int result;
1094 if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
1095 result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, usingExtDict, dictSmall, acceleration);
1096 else
1097 result = LZ4_compress_generic(LZ4_stream, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, usingExtDict, noDictIssue, acceleration);
1098 streamPtr->dictionary = (const BYTE*)source;
1099 streamPtr->dictSize = (U32)inputSize;
1100 streamPtr->currentOffset += (U32)inputSize;
1101 return result;
1102 }
1103 }
1104
1105
1106 /* Hidden debug function, to force external dictionary mode */
LZ4_compress_forceExtDict(LZ4_stream_t * LZ4_dict,const char * source,char * dest,int inputSize)1107 int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int inputSize)
1108 {
1109 LZ4_stream_t_internal* streamPtr = (LZ4_stream_t_internal*)LZ4_dict;
1110 int result;
1111 const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize;
1112
1113 const BYTE* smallest = dictEnd;
1114 if (smallest > (const BYTE*) source) smallest = (const BYTE*) source;
1115 LZ4_renormDictT((LZ4_stream_t_internal*)LZ4_dict, smallest);
1116
1117 result = LZ4_compress_generic(LZ4_dict, source, dest, inputSize, 0, notLimited, byU32, usingExtDict, noDictIssue, 1);
1118
1119 streamPtr->dictionary = (const BYTE*)source;
1120 streamPtr->dictSize = (U32)inputSize;
1121 streamPtr->currentOffset += (U32)inputSize;
1122
1123 return result;
1124 }
1125
1126
LZ4_saveDict(LZ4_stream_t * LZ4_dict,char * safeBuffer,int dictSize)1127 int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)
1128 {
1129 LZ4_stream_t_internal* dict = (LZ4_stream_t_internal*) LZ4_dict;
1130 const BYTE* previousDictEnd = dict->dictionary + dict->dictSize;
1131
1132 if ((U32)dictSize > 64 KB) dictSize = 64 KB; /* useless to define a dictionary > 64 KB */
1133 if ((U32)dictSize > dict->dictSize) dictSize = dict->dictSize;
1134
1135 memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
1136
1137 dict->dictionary = (const BYTE*)safeBuffer;
1138 dict->dictSize = (U32)dictSize;
1139
1140 return dictSize;
1141 }
1142
1143
1144
1145 /*******************************
1146 * Decompression functions
1147 *******************************/
1148 /*
1149 * This generic decompression function cover all use cases.
1150 * It shall be instantiated several times, using different sets of directives
1151 * Note that it is essential this generic function is really inlined,
1152 * in order to remove useless branches during compilation optimization.
1153 */
LZ4_decompress_generic(const char * const source,char * const dest,int inputSize,int outputSize,int endOnInput,int partialDecoding,int targetOutputSize,int dict,const BYTE * const lowPrefix,const BYTE * const dictStart,const size_t dictSize)1154 FORCE_INLINE int LZ4_decompress_generic(
1155 const char* const source,
1156 char* const dest,
1157 int inputSize,
1158 int outputSize, /* If endOnInput==endOnInputSize, this value is the max size of Output Buffer. */
1159
1160 int endOnInput, /* endOnOutputSize, endOnInputSize */
1161 int partialDecoding, /* full, partial */
1162 int targetOutputSize, /* only used if partialDecoding==partial */
1163 int dict, /* noDict, withPrefix64k, usingExtDict */
1164 const BYTE* const lowPrefix, /* == dest if dict == noDict */
1165 const BYTE* const dictStart, /* only if dict==usingExtDict */
1166 const size_t dictSize /* note : = 0 if noDict */
1167 )
1168 {
1169 /* Local Variables */
1170 const BYTE* ip = (const BYTE*) source;
1171 const BYTE* const iend = ip + inputSize;
1172
1173 BYTE* op = (BYTE*) dest;
1174 BYTE* const oend = op + outputSize;
1175 BYTE* cpy;
1176 BYTE* oexit = op + targetOutputSize;
1177 const BYTE* const lowLimit = lowPrefix - dictSize;
1178
1179 const BYTE* const dictEnd = (const BYTE*)dictStart + dictSize;
1180 const unsigned dec32table[] = {4, 1, 2, 1, 4, 4, 4, 4};
1181 const int dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3};
1182
1183 const int safeDecode = (endOnInput==endOnInputSize);
1184 const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB)));
1185 const int inPlaceDecode = ((ip >= op) && (ip < oend));
1186
1187
1188 /* Special cases */
1189 if ((partialDecoding) && (oexit> oend-MFLIMIT)) oexit = oend-MFLIMIT; /* targetOutputSize too high => decode everything */
1190 if ((endOnInput) && (unlikely(outputSize==0))) return ((inputSize==1) && (*ip==0)) ? 0 : -1; /* Empty output buffer */
1191 if ((!endOnInput) && (unlikely(outputSize==0))) return (*ip==0?1:-1);
1192
1193
1194 /* Main Loop */
1195 while (1)
1196 {
1197 unsigned token;
1198 size_t length;
1199 const BYTE* match;
1200 size_t offset;
1201
1202 if (unlikely((inPlaceDecode) && (op + WILDCOPYLENGTH > ip))) goto _output_error; /* output stream ran over input stream */
1203
1204 /* get literal length */
1205 token = *ip++;
1206 if ((length=(token>>ML_BITS)) == RUN_MASK)
1207 {
1208 unsigned s;
1209 if ((endOnInput) && unlikely(ip>=iend-RUN_MASK)) goto _output_error; /* overflow detection */
1210 do
1211 {
1212 s = *ip++;
1213 length += s;
1214 }
1215 while ( likely(endOnInput ? ip<iend-RUN_MASK : 1) && (s==255) );
1216 if ((safeDecode) && unlikely((size_t)(op+length)<(size_t)(op))) goto _output_error; /* overflow detection */
1217 if ((safeDecode) && unlikely((size_t)(ip+length)<(size_t)(ip))) goto _output_error; /* overflow detection */
1218 }
1219
1220 /* copy literals */
1221 cpy = op+length;
1222 if (((endOnInput) && ((cpy>(partialDecoding?oexit:oend-MFLIMIT)) || (ip+length>iend-(2+1+LASTLITERALS))) )
1223 || ((!endOnInput) && (cpy>oend-WILDCOPYLENGTH)))
1224 {
1225 if (partialDecoding)
1226 {
1227 if (cpy > oend) goto _output_error; /* Error : write attempt beyond end of output buffer */
1228 if ((endOnInput) && (ip+length > iend)) goto _output_error; /* Error : read attempt beyond end of input buffer */
1229 }
1230 else
1231 {
1232 if ((!endOnInput) && (cpy != oend)) goto _output_error; /* Error : block decoding must stop exactly there */
1233 if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) goto _output_error; /* Error : input must be consumed */
1234 }
1235 memmove(op, ip, length);
1236 ip += length;
1237 op += length;
1238 break; /* Necessarily EOF, due to parsing restrictions */
1239 }
1240 LZ4_wildCopy(op, ip, cpy);
1241 ip += length; op = cpy;
1242
1243 /* get offset */
1244 offset = LZ4_readLE16(ip); ip+=2;
1245 match = op - offset;
1246 if ((checkOffset) && (unlikely(match < lowLimit))) goto _output_error; /* Error : offset outside buffers */
1247
1248 /* get matchlength */
1249 length = token & ML_MASK;
1250 if (length == ML_MASK)
1251 {
1252 unsigned s;
1253 do
1254 {
1255 if ((endOnInput) && (ip > iend-LASTLITERALS)) goto _output_error;
1256 s = *ip++;
1257 length += s;
1258 } while (s==255);
1259 if ((safeDecode) && unlikely((size_t)(op+length)<(size_t)op)) goto _output_error; /* overflow detection */
1260 }
1261 length += MINMATCH;
1262
1263 /* check external dictionary */
1264 if ((dict==usingExtDict) && (match < lowPrefix))
1265 {
1266 if (unlikely(op+length > oend-LASTLITERALS)) goto _output_error; /* doesn't respect parsing restriction */
1267
1268 if (length <= (size_t)(lowPrefix-match))
1269 {
1270 /* match can be copied as a single segment from external dictionary */
1271 match = dictEnd - (lowPrefix-match);
1272 memmove(op, match, length); op += length;
1273 }
1274 else
1275 {
1276 /* match encompass external dictionary and current block */
1277 size_t copySize = (size_t)(lowPrefix-match);
1278 memcpy(op, dictEnd - copySize, copySize);
1279 op += copySize;
1280 copySize = length - copySize;
1281 if (copySize > (size_t)(op-lowPrefix)) /* overlap copy */
1282 {
1283 BYTE* const endOfMatch = op + copySize;
1284 const BYTE* copyFrom = lowPrefix;
1285 while (op < endOfMatch) *op++ = *copyFrom++;
1286 }
1287 else
1288 {
1289 memcpy(op, lowPrefix, copySize);
1290 op += copySize;
1291 }
1292 }
1293 continue;
1294 }
1295
1296 /* copy match within block */
1297 cpy = op + length;
1298 if (unlikely(offset<8))
1299 {
1300 const int dec64 = dec64table[offset];
1301 op[0] = match[0];
1302 op[1] = match[1];
1303 op[2] = match[2];
1304 op[3] = match[3];
1305 match += dec32table[offset];
1306 memcpy(op+4, match, 4);
1307 match -= dec64;
1308 } else { LZ4_copy8(op, match); match+=8; }
1309 op += 8;
1310
1311 if (unlikely(cpy>oend-12))
1312 {
1313 BYTE* const oCopyLimit = oend-(WILDCOPYLENGTH-1);
1314 if (cpy > oend-LASTLITERALS) goto _output_error; /* Error : last LASTLITERALS bytes must be literals (uncompressed) */
1315 if (op < oCopyLimit)
1316 {
1317 LZ4_wildCopy(op, match, oCopyLimit);
1318 match += oCopyLimit - op;
1319 op = oCopyLimit;
1320 }
1321 while (op<cpy) *op++ = *match++;
1322 }
1323 else
1324 LZ4_wildCopy(op, match, cpy);
1325 op=cpy; /* correction */
1326 }
1327
1328 /* end of decoding */
1329 if (endOnInput)
1330 return (int) (((char*)op)-dest); /* Nb of output bytes decoded */
1331 else
1332 return (int) (((const char*)ip)-source); /* Nb of input bytes read */
1333
1334 /* Overflow error detected */
1335 _output_error:
1336 return (int) (-(((const char*)ip)-source))-1;
1337 }
1338
1339
LZ4_decompress_safe(const char * source,char * dest,int compressedSize,int maxDecompressedSize)1340 int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize)
1341 {
1342 return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, full, 0, noDict, (BYTE*)dest, NULL, 0);
1343 }
1344
LZ4_decompress_safe_partial(const char * source,char * dest,int compressedSize,int targetOutputSize,int maxDecompressedSize)1345 int LZ4_decompress_safe_partial(const char* source, char* dest, int compressedSize, int targetOutputSize, int maxDecompressedSize)
1346 {
1347 return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, partial, targetOutputSize, noDict, (BYTE*)dest, NULL, 0);
1348 }
1349
LZ4_decompress_fast(const char * source,char * dest,int originalSize)1350 int LZ4_decompress_fast(const char* source, char* dest, int originalSize)
1351 {
1352 return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)(dest - 64 KB), NULL, 64 KB);
1353 }
1354
1355
1356 /* streaming decompression functions */
1357
1358 typedef struct
1359 {
1360 const BYTE* externalDict;
1361 size_t extDictSize;
1362 const BYTE* prefixEnd;
1363 size_t prefixSize;
1364 } LZ4_streamDecode_t_internal;
1365
1366 /*
1367 * If you prefer dynamic allocation methods,
1368 * LZ4_createStreamDecode()
1369 * provides a pointer (void*) towards an initialized LZ4_streamDecode_t structure.
1370 */
LZ4_createStreamDecode(void)1371 LZ4_streamDecode_t* LZ4_createStreamDecode(void)
1372 {
1373 LZ4_streamDecode_t* lz4s = (LZ4_streamDecode_t*) ALLOCATOR(1, sizeof(LZ4_streamDecode_t));
1374 return lz4s;
1375 }
1376
LZ4_freeStreamDecode(LZ4_streamDecode_t * LZ4_stream)1377 int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream)
1378 {
1379 FREEMEM(LZ4_stream);
1380 return 0;
1381 }
1382
1383 /*
1384 * LZ4_setStreamDecode
1385 * Use this function to instruct where to find the dictionary
1386 * This function is not necessary if previous data is still available where it was decoded.
1387 * Loading a size of 0 is allowed (same effect as no dictionary).
1388 * Return : 1 if OK, 0 if error
1389 */
LZ4_setStreamDecode(LZ4_streamDecode_t * LZ4_streamDecode,const char * dictionary,int dictSize)1390 int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize)
1391 {
1392 LZ4_streamDecode_t_internal* lz4sd = (LZ4_streamDecode_t_internal*) LZ4_streamDecode;
1393 lz4sd->prefixSize = (size_t) dictSize;
1394 lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize;
1395 lz4sd->externalDict = NULL;
1396 lz4sd->extDictSize = 0;
1397 return 1;
1398 }
1399
1400 /*
1401 *_continue() :
1402 These decoding functions allow decompression of multiple blocks in "streaming" mode.
1403 Previously decoded blocks must still be available at the memory position where they were decoded.
1404 If it's not possible, save the relevant part of decoded data into a safe buffer,
1405 and indicate where it stands using LZ4_setStreamDecode()
1406 */
LZ4_decompress_safe_continue(LZ4_streamDecode_t * LZ4_streamDecode,const char * source,char * dest,int compressedSize,int maxOutputSize)1407 int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize)
1408 {
1409 LZ4_streamDecode_t_internal* lz4sd = (LZ4_streamDecode_t_internal*) LZ4_streamDecode;
1410 int result;
1411
1412 if (lz4sd->prefixEnd == (BYTE*)dest)
1413 {
1414 result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
1415 endOnInputSize, full, 0,
1416 usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
1417 if (result <= 0) return result;
1418 lz4sd->prefixSize += result;
1419 lz4sd->prefixEnd += result;
1420 }
1421 else
1422 {
1423 lz4sd->extDictSize = lz4sd->prefixSize;
1424 lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
1425 result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
1426 endOnInputSize, full, 0,
1427 usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize);
1428 if (result <= 0) return result;
1429 lz4sd->prefixSize = result;
1430 lz4sd->prefixEnd = (BYTE*)dest + result;
1431 }
1432
1433 return result;
1434 }
1435
LZ4_decompress_fast_continue(LZ4_streamDecode_t * LZ4_streamDecode,const char * source,char * dest,int originalSize)1436 int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize)
1437 {
1438 LZ4_streamDecode_t_internal* lz4sd = (LZ4_streamDecode_t_internal*) LZ4_streamDecode;
1439 int result;
1440
1441 if (lz4sd->prefixEnd == (BYTE*)dest)
1442 {
1443 result = LZ4_decompress_generic(source, dest, 0, originalSize,
1444 endOnOutputSize, full, 0,
1445 usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
1446 if (result <= 0) return result;
1447 lz4sd->prefixSize += originalSize;
1448 lz4sd->prefixEnd += originalSize;
1449 }
1450 else
1451 {
1452 lz4sd->extDictSize = lz4sd->prefixSize;
1453 lz4sd->externalDict = (BYTE*)dest - lz4sd->extDictSize;
1454 result = LZ4_decompress_generic(source, dest, 0, originalSize,
1455 endOnOutputSize, full, 0,
1456 usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize);
1457 if (result <= 0) return result;
1458 lz4sd->prefixSize = originalSize;
1459 lz4sd->prefixEnd = (BYTE*)dest + originalSize;
1460 }
1461
1462 return result;
1463 }
1464
1465
1466 /*
1467 Advanced decoding functions :
1468 *_usingDict() :
1469 These decoding functions work the same as "_continue" ones,
1470 the dictionary must be explicitly provided within parameters
1471 */
1472
LZ4_decompress_usingDict_generic(const char * source,char * dest,int compressedSize,int maxOutputSize,int safe,const char * dictStart,int dictSize)1473 FORCE_INLINE int LZ4_decompress_usingDict_generic(const char* source, char* dest, int compressedSize, int maxOutputSize, int safe, const char* dictStart, int dictSize)
1474 {
1475 if (dictSize==0)
1476 return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest, NULL, 0);
1477 if (dictStart+dictSize == dest)
1478 {
1479 if (dictSize >= (int)(64 KB - 1))
1480 return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, withPrefix64k, (BYTE*)dest-64 KB, NULL, 0);
1481 return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest-dictSize, NULL, 0);
1482 }
1483 return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize);
1484 }
1485
LZ4_decompress_safe_usingDict(const char * source,char * dest,int compressedSize,int maxOutputSize,const char * dictStart,int dictSize)1486 int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
1487 {
1488 return LZ4_decompress_usingDict_generic(source, dest, compressedSize, maxOutputSize, 1, dictStart, dictSize);
1489 }
1490
LZ4_decompress_fast_usingDict(const char * source,char * dest,int originalSize,const char * dictStart,int dictSize)1491 int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)
1492 {
1493 return LZ4_decompress_usingDict_generic(source, dest, 0, originalSize, 0, dictStart, dictSize);
1494 }
1495
1496 /* debug function */
LZ4_decompress_safe_forceExtDict(const char * source,char * dest,int compressedSize,int maxOutputSize,const char * dictStart,int dictSize)1497 int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
1498 {
1499 return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize);
1500 }
1501
1502
1503 /***************************************************
1504 * Obsolete Functions
1505 ***************************************************/
1506 /* obsolete compression functions */
LZ4_compress_limitedOutput(const char * source,char * dest,int inputSize,int maxOutputSize)1507 int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize) { return LZ4_compress_default(source, dest, inputSize, maxOutputSize); }
LZ4_compress(const char * source,char * dest,int inputSize)1508 int LZ4_compress(const char* source, char* dest, int inputSize) { return LZ4_compress_default(source, dest, inputSize, LZ4_compressBound(inputSize)); }
LZ4_compress_limitedOutput_withState(void * state,const char * src,char * dst,int srcSize,int dstSize)1509 int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize) { return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1); }
LZ4_compress_withState(void * state,const char * src,char * dst,int srcSize)1510 int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize) { return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1); }
LZ4_compress_limitedOutput_continue(LZ4_stream_t * LZ4_stream,const char * src,char * dst,int srcSize,int maxDstSize)1511 int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, maxDstSize, 1); }
LZ4_compress_continue(LZ4_stream_t * LZ4_stream,const char * source,char * dest,int inputSize)1512 int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize) { return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1); }
1513
1514 /*
1515 These function names are deprecated and should no longer be used.
1516 They are only provided here for compatibility with older user programs.
1517 - LZ4_uncompress is totally equivalent to LZ4_decompress_fast
1518 - LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
1519 */
LZ4_uncompress(const char * source,char * dest,int outputSize)1520 int LZ4_uncompress (const char* source, char* dest, int outputSize) { return LZ4_decompress_fast(source, dest, outputSize); }
LZ4_uncompress_unknownOutputSize(const char * source,char * dest,int isize,int maxOutputSize)1521 int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize) { return LZ4_decompress_safe(source, dest, isize, maxOutputSize); }
1522
1523
1524 /* Obsolete Streaming functions */
1525
LZ4_sizeofStreamState()1526 int LZ4_sizeofStreamState() { return LZ4_STREAMSIZE; }
1527
LZ4_init(LZ4_stream_t_internal * lz4ds,BYTE * base)1528 static void LZ4_init(LZ4_stream_t_internal* lz4ds, BYTE* base)
1529 {
1530 MEM_INIT(lz4ds, 0, LZ4_STREAMSIZE);
1531 lz4ds->bufferStart = base;
1532 }
1533
LZ4_resetStreamState(void * state,char * inputBuffer)1534 int LZ4_resetStreamState(void* state, char* inputBuffer)
1535 {
1536 if ((((size_t)state) & 3) != 0) return 1; /* Error : pointer is not aligned on 4-bytes boundary */
1537 LZ4_init((LZ4_stream_t_internal*)state, (BYTE*)inputBuffer);
1538 return 0;
1539 }
1540
LZ4_create(char * inputBuffer)1541 void* LZ4_create (char* inputBuffer)
1542 {
1543 void* lz4ds = ALLOCATOR(8, LZ4_STREAMSIZE_U64);
1544 LZ4_init ((LZ4_stream_t_internal*)lz4ds, (BYTE*)inputBuffer);
1545 return lz4ds;
1546 }
1547
LZ4_slideInputBuffer(void * LZ4_Data)1548 char* LZ4_slideInputBuffer (void* LZ4_Data)
1549 {
1550 LZ4_stream_t_internal* ctx = (LZ4_stream_t_internal*)LZ4_Data;
1551 int dictSize = LZ4_saveDict((LZ4_stream_t*)LZ4_Data, (char*)ctx->bufferStart, 64 KB);
1552 return (char*)(ctx->bufferStart + dictSize);
1553 }
1554
1555 /* Obsolete streaming decompression functions */
1556
LZ4_decompress_safe_withPrefix64k(const char * source,char * dest,int compressedSize,int maxOutputSize)1557 int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)
1558 {
1559 return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB);
1560 }
1561
LZ4_decompress_fast_withPrefix64k(const char * source,char * dest,int originalSize)1562 int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize)
1563 {
1564 return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB);
1565 }
1566
1567 #endif /* LZ4_COMMONDEFS_ONLY */
1568
1569