1 /*
2 * LZ4 - Fast LZ compression algorithm
3 * Copyright (C) 2011 - 2016, Yann Collet.
4 * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php)
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
13 * distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
15 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
16 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
17 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
18 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
20 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 * You can contact the author at :
26 * - LZ4 homepage : http://www.lz4.org
27 * - LZ4 source repository : https://github.com/lz4/lz4
28 *
29 * Changed for kernel usage by:
30 * Sven Schmidt <[email protected]>
31 */
32
33 /*-************************************
34 * Dependencies
35 **************************************/
36 #include "lz4defs.h"
37 #include <linux/module.h>
38 #include <linux/kernel.h>
39 #include <linux/unaligned.h>
40
41 static const int LZ4_minLength = (MFLIMIT + 1);
42 static const int LZ4_64Klimit = ((64 * KB) + (MFLIMIT - 1));
43
44 /*-******************************
45 * Compression functions
46 ********************************/
LZ4_hash4(U32 sequence,tableType_t const tableType)47 static FORCE_INLINE U32 LZ4_hash4(
48 U32 sequence,
49 tableType_t const tableType)
50 {
51 if (tableType == byU16)
52 return ((sequence * 2654435761U)
53 >> ((MINMATCH * 8) - (LZ4_HASHLOG + 1)));
54 else
55 return ((sequence * 2654435761U)
56 >> ((MINMATCH * 8) - LZ4_HASHLOG));
57 }
58
LZ4_hash5(U64 sequence,tableType_t const tableType)59 static FORCE_INLINE U32 LZ4_hash5(
60 U64 sequence,
61 tableType_t const tableType)
62 {
63 const U32 hashLog = (tableType == byU16)
64 ? LZ4_HASHLOG + 1
65 : LZ4_HASHLOG;
66
67 #if LZ4_LITTLE_ENDIAN
68 static const U64 prime5bytes = 889523592379ULL;
69
70 return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
71 #else
72 static const U64 prime8bytes = 11400714785074694791ULL;
73
74 return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
75 #endif
76 }
77
LZ4_hashPosition(const void * p,tableType_t const tableType)78 static FORCE_INLINE U32 LZ4_hashPosition(
79 const void *p,
80 tableType_t const tableType)
81 {
82 #if LZ4_ARCH64
83 if (tableType == byU32)
84 return LZ4_hash5(LZ4_read_ARCH(p), tableType);
85 #endif
86
87 return LZ4_hash4(LZ4_read32(p), tableType);
88 }
89
LZ4_putPositionOnHash(const BYTE * p,U32 h,void * tableBase,tableType_t const tableType,const BYTE * srcBase)90 static void LZ4_putPositionOnHash(
91 const BYTE *p,
92 U32 h,
93 void *tableBase,
94 tableType_t const tableType,
95 const BYTE *srcBase)
96 {
97 switch (tableType) {
98 case byPtr:
99 {
100 const BYTE **hashTable = (const BYTE **)tableBase;
101
102 hashTable[h] = p;
103 return;
104 }
105 case byU32:
106 {
107 U32 *hashTable = (U32 *) tableBase;
108
109 hashTable[h] = (U32)(p - srcBase);
110 return;
111 }
112 case byU16:
113 {
114 U16 *hashTable = (U16 *) tableBase;
115
116 hashTable[h] = (U16)(p - srcBase);
117 return;
118 }
119 }
120 }
121
LZ4_putPosition(const BYTE * p,void * tableBase,tableType_t tableType,const BYTE * srcBase)122 static FORCE_INLINE void LZ4_putPosition(
123 const BYTE *p,
124 void *tableBase,
125 tableType_t tableType,
126 const BYTE *srcBase)
127 {
128 U32 const h = LZ4_hashPosition(p, tableType);
129
130 LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
131 }
132
LZ4_getPositionOnHash(U32 h,void * tableBase,tableType_t tableType,const BYTE * srcBase)133 static const BYTE *LZ4_getPositionOnHash(
134 U32 h,
135 void *tableBase,
136 tableType_t tableType,
137 const BYTE *srcBase)
138 {
139 if (tableType == byPtr) {
140 const BYTE **hashTable = (const BYTE **) tableBase;
141
142 return hashTable[h];
143 }
144
145 if (tableType == byU32) {
146 const U32 * const hashTable = (U32 *) tableBase;
147
148 return hashTable[h] + srcBase;
149 }
150
151 {
152 /* default, to ensure a return */
153 const U16 * const hashTable = (U16 *) tableBase;
154
155 return hashTable[h] + srcBase;
156 }
157 }
158
LZ4_getPosition(const BYTE * p,void * tableBase,tableType_t tableType,const BYTE * srcBase)159 static FORCE_INLINE const BYTE *LZ4_getPosition(
160 const BYTE *p,
161 void *tableBase,
162 tableType_t tableType,
163 const BYTE *srcBase)
164 {
165 U32 const h = LZ4_hashPosition(p, tableType);
166
167 return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
168 }
169
170
171 /*
172 * LZ4_compress_generic() :
173 * inlined, to ensure branches are decided at compilation time
174 */
LZ4_compress_generic(LZ4_stream_t_internal * const dictPtr,const char * const source,char * const dest,const int inputSize,const int maxOutputSize,const limitedOutput_directive outputLimited,const tableType_t tableType,const dict_directive dict,const dictIssue_directive dictIssue,const U32 acceleration)175 static FORCE_INLINE int LZ4_compress_generic(
176 LZ4_stream_t_internal * const dictPtr,
177 const char * const source,
178 char * const dest,
179 const int inputSize,
180 const int maxOutputSize,
181 const limitedOutput_directive outputLimited,
182 const tableType_t tableType,
183 const dict_directive dict,
184 const dictIssue_directive dictIssue,
185 const U32 acceleration)
186 {
187 const BYTE *ip = (const BYTE *) source;
188 const BYTE *base;
189 const BYTE *lowLimit;
190 const BYTE * const lowRefLimit = ip - dictPtr->dictSize;
191 const BYTE * const dictionary = dictPtr->dictionary;
192 const BYTE * const dictEnd = dictionary + dictPtr->dictSize;
193 const size_t dictDelta = dictEnd - (const BYTE *)source;
194 const BYTE *anchor = (const BYTE *) source;
195 const BYTE * const iend = ip + inputSize;
196 const BYTE * const mflimit = iend - MFLIMIT;
197 const BYTE * const matchlimit = iend - LASTLITERALS;
198
199 BYTE *op = (BYTE *) dest;
200 BYTE * const olimit = op + maxOutputSize;
201
202 U32 forwardH;
203 size_t refDelta = 0;
204
205 /* Init conditions */
206 if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) {
207 /* Unsupported inputSize, too large (or negative) */
208 return 0;
209 }
210
211 switch (dict) {
212 case noDict:
213 default:
214 base = (const BYTE *)source;
215 lowLimit = (const BYTE *)source;
216 break;
217 case withPrefix64k:
218 base = (const BYTE *)source - dictPtr->currentOffset;
219 lowLimit = (const BYTE *)source - dictPtr->dictSize;
220 break;
221 case usingExtDict:
222 base = (const BYTE *)source - dictPtr->currentOffset;
223 lowLimit = (const BYTE *)source;
224 break;
225 }
226
227 if ((tableType == byU16)
228 && (inputSize >= LZ4_64Klimit)) {
229 /* Size too large (not within 64K limit) */
230 return 0;
231 }
232
233 if (inputSize < LZ4_minLength) {
234 /* Input too small, no compression (all literals) */
235 goto _last_literals;
236 }
237
238 /* First Byte */
239 LZ4_putPosition(ip, dictPtr->hashTable, tableType, base);
240 ip++;
241 forwardH = LZ4_hashPosition(ip, tableType);
242
243 /* Main Loop */
244 for ( ; ; ) {
245 const BYTE *match;
246 BYTE *token;
247
248 /* Find a match */
249 {
250 const BYTE *forwardIp = ip;
251 unsigned int step = 1;
252 unsigned int searchMatchNb = acceleration << LZ4_SKIPTRIGGER;
253
254 do {
255 U32 const h = forwardH;
256
257 ip = forwardIp;
258 forwardIp += step;
259 step = (searchMatchNb++ >> LZ4_SKIPTRIGGER);
260
261 if (unlikely(forwardIp > mflimit))
262 goto _last_literals;
263
264 match = LZ4_getPositionOnHash(h,
265 dictPtr->hashTable,
266 tableType, base);
267
268 if (dict == usingExtDict) {
269 if (match < (const BYTE *)source) {
270 refDelta = dictDelta;
271 lowLimit = dictionary;
272 } else {
273 refDelta = 0;
274 lowLimit = (const BYTE *)source;
275 } }
276
277 forwardH = LZ4_hashPosition(forwardIp,
278 tableType);
279
280 LZ4_putPositionOnHash(ip, h, dictPtr->hashTable,
281 tableType, base);
282 } while (((dictIssue == dictSmall)
283 ? (match < lowRefLimit)
284 : 0)
285 || ((tableType == byU16)
286 ? 0
287 : (match + MAX_DISTANCE < ip))
288 || (LZ4_read32(match + refDelta)
289 != LZ4_read32(ip)));
290 }
291
292 /* Catch up */
293 while (((ip > anchor) & (match + refDelta > lowLimit))
294 && (unlikely(ip[-1] == match[refDelta - 1]))) {
295 ip--;
296 match--;
297 }
298
299 /* Encode Literals */
300 {
301 unsigned const int litLength = (unsigned int)(ip - anchor);
302
303 token = op++;
304
305 if ((outputLimited) &&
306 /* Check output buffer overflow */
307 (unlikely(op + litLength +
308 (2 + 1 + LASTLITERALS) +
309 (litLength / 255) > olimit)))
310 return 0;
311
312 if (litLength >= RUN_MASK) {
313 int len = (int)litLength - RUN_MASK;
314
315 *token = (RUN_MASK << ML_BITS);
316
317 for (; len >= 255; len -= 255)
318 *op++ = 255;
319 *op++ = (BYTE)len;
320 } else
321 *token = (BYTE)(litLength << ML_BITS);
322
323 /* Copy Literals */
324 LZ4_wildCopy(op, anchor, op + litLength);
325 op += litLength;
326 }
327
328 _next_match:
329 /* Encode Offset */
330 LZ4_writeLE16(op, (U16)(ip - match));
331 op += 2;
332
333 /* Encode MatchLength */
334 {
335 unsigned int matchCode;
336
337 if ((dict == usingExtDict)
338 && (lowLimit == dictionary)) {
339 const BYTE *limit;
340
341 match += refDelta;
342 limit = ip + (dictEnd - match);
343
344 if (limit > matchlimit)
345 limit = matchlimit;
346
347 matchCode = LZ4_count(ip + MINMATCH,
348 match + MINMATCH, limit);
349
350 ip += MINMATCH + matchCode;
351
352 if (ip == limit) {
353 unsigned const int more = LZ4_count(ip,
354 (const BYTE *)source,
355 matchlimit);
356
357 matchCode += more;
358 ip += more;
359 }
360 } else {
361 matchCode = LZ4_count(ip + MINMATCH,
362 match + MINMATCH, matchlimit);
363 ip += MINMATCH + matchCode;
364 }
365
366 if (outputLimited &&
367 /* Check output buffer overflow */
368 (unlikely(op +
369 (1 + LASTLITERALS) +
370 (matchCode >> 8) > olimit)))
371 return 0;
372
373 if (matchCode >= ML_MASK) {
374 *token += ML_MASK;
375 matchCode -= ML_MASK;
376 LZ4_write32(op, 0xFFFFFFFF);
377
378 while (matchCode >= 4 * 255) {
379 op += 4;
380 LZ4_write32(op, 0xFFFFFFFF);
381 matchCode -= 4 * 255;
382 }
383
384 op += matchCode / 255;
385 *op++ = (BYTE)(matchCode % 255);
386 } else
387 *token += (BYTE)(matchCode);
388 }
389
390 anchor = ip;
391
392 /* Test end of chunk */
393 if (ip > mflimit)
394 break;
395
396 /* Fill table */
397 LZ4_putPosition(ip - 2, dictPtr->hashTable, tableType, base);
398
399 /* Test next position */
400 match = LZ4_getPosition(ip, dictPtr->hashTable,
401 tableType, base);
402
403 if (dict == usingExtDict) {
404 if (match < (const BYTE *)source) {
405 refDelta = dictDelta;
406 lowLimit = dictionary;
407 } else {
408 refDelta = 0;
409 lowLimit = (const BYTE *)source;
410 }
411 }
412
413 LZ4_putPosition(ip, dictPtr->hashTable, tableType, base);
414
415 if (((dictIssue == dictSmall) ? (match >= lowRefLimit) : 1)
416 && (match + MAX_DISTANCE >= ip)
417 && (LZ4_read32(match + refDelta) == LZ4_read32(ip))) {
418 token = op++;
419 *token = 0;
420 goto _next_match;
421 }
422
423 /* Prepare next loop */
424 forwardH = LZ4_hashPosition(++ip, tableType);
425 }
426
427 _last_literals:
428 /* Encode Last Literals */
429 {
430 size_t const lastRun = (size_t)(iend - anchor);
431
432 if ((outputLimited) &&
433 /* Check output buffer overflow */
434 ((op - (BYTE *)dest) + lastRun + 1 +
435 ((lastRun + 255 - RUN_MASK) / 255) > (U32)maxOutputSize))
436 return 0;
437
438 if (lastRun >= RUN_MASK) {
439 size_t accumulator = lastRun - RUN_MASK;
440 *op++ = RUN_MASK << ML_BITS;
441 for (; accumulator >= 255; accumulator -= 255)
442 *op++ = 255;
443 *op++ = (BYTE) accumulator;
444 } else {
445 *op++ = (BYTE)(lastRun << ML_BITS);
446 }
447
448 LZ4_memcpy(op, anchor, lastRun);
449
450 op += lastRun;
451 }
452
453 /* End */
454 return (int) (((char *)op) - dest);
455 }
456
LZ4_compress_fast_extState(void * state,const char * source,char * dest,int inputSize,int maxOutputSize,int acceleration)457 static int LZ4_compress_fast_extState(
458 void *state,
459 const char *source,
460 char *dest,
461 int inputSize,
462 int maxOutputSize,
463 int acceleration)
464 {
465 LZ4_stream_t_internal *ctx = &((LZ4_stream_t *)state)->internal_donotuse;
466 #if LZ4_ARCH64
467 const tableType_t tableType = byU32;
468 #else
469 const tableType_t tableType = byPtr;
470 #endif
471
472 LZ4_resetStream((LZ4_stream_t *)state);
473
474 if (acceleration < 1)
475 acceleration = LZ4_ACCELERATION_DEFAULT;
476
477 if (maxOutputSize >= LZ4_COMPRESSBOUND(inputSize)) {
478 if (inputSize < LZ4_64Klimit)
479 return LZ4_compress_generic(ctx, source,
480 dest, inputSize, 0,
481 noLimit, byU16, noDict,
482 noDictIssue, acceleration);
483 else
484 return LZ4_compress_generic(ctx, source,
485 dest, inputSize, 0,
486 noLimit, tableType, noDict,
487 noDictIssue, acceleration);
488 } else {
489 if (inputSize < LZ4_64Klimit)
490 return LZ4_compress_generic(ctx, source,
491 dest, inputSize,
492 maxOutputSize, limitedOutput, byU16, noDict,
493 noDictIssue, acceleration);
494 else
495 return LZ4_compress_generic(ctx, source,
496 dest, inputSize,
497 maxOutputSize, limitedOutput, tableType, noDict,
498 noDictIssue, acceleration);
499 }
500 }
501
LZ4_compress_fast(const char * source,char * dest,int inputSize,int maxOutputSize,int acceleration,void * wrkmem)502 int LZ4_compress_fast(const char *source, char *dest, int inputSize,
503 int maxOutputSize, int acceleration, void *wrkmem)
504 {
505 return LZ4_compress_fast_extState(wrkmem, source, dest, inputSize,
506 maxOutputSize, acceleration);
507 }
508 EXPORT_SYMBOL(LZ4_compress_fast);
509
LZ4_compress_default(const char * source,char * dest,int inputSize,int maxOutputSize,void * wrkmem)510 int LZ4_compress_default(const char *source, char *dest, int inputSize,
511 int maxOutputSize, void *wrkmem)
512 {
513 return LZ4_compress_fast(source, dest, inputSize,
514 maxOutputSize, LZ4_ACCELERATION_DEFAULT, wrkmem);
515 }
516 EXPORT_SYMBOL(LZ4_compress_default);
517
518 /*-******************************
519 * *_destSize() variant
520 ********************************/
LZ4_compress_destSize_generic(LZ4_stream_t_internal * const ctx,const char * const src,char * const dst,int * const srcSizePtr,const int targetDstSize,const tableType_t tableType)521 static int LZ4_compress_destSize_generic(
522 LZ4_stream_t_internal * const ctx,
523 const char * const src,
524 char * const dst,
525 int * const srcSizePtr,
526 const int targetDstSize,
527 const tableType_t tableType)
528 {
529 const BYTE *ip = (const BYTE *) src;
530 const BYTE *base = (const BYTE *) src;
531 const BYTE *lowLimit = (const BYTE *) src;
532 const BYTE *anchor = ip;
533 const BYTE * const iend = ip + *srcSizePtr;
534 const BYTE * const mflimit = iend - MFLIMIT;
535 const BYTE * const matchlimit = iend - LASTLITERALS;
536
537 BYTE *op = (BYTE *) dst;
538 BYTE * const oend = op + targetDstSize;
539 BYTE * const oMaxLit = op + targetDstSize - 2 /* offset */
540 - 8 /* because 8 + MINMATCH == MFLIMIT */ - 1 /* token */;
541 BYTE * const oMaxMatch = op + targetDstSize
542 - (LASTLITERALS + 1 /* token */);
543 BYTE * const oMaxSeq = oMaxLit - 1 /* token */;
544
545 U32 forwardH;
546
547 /* Init conditions */
548 /* Impossible to store anything */
549 if (targetDstSize < 1)
550 return 0;
551 /* Unsupported input size, too large (or negative) */
552 if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE)
553 return 0;
554 /* Size too large (not within 64K limit) */
555 if ((tableType == byU16) && (*srcSizePtr >= LZ4_64Klimit))
556 return 0;
557 /* Input too small, no compression (all literals) */
558 if (*srcSizePtr < LZ4_minLength)
559 goto _last_literals;
560
561 /* First Byte */
562 *srcSizePtr = 0;
563 LZ4_putPosition(ip, ctx->hashTable, tableType, base);
564 ip++; forwardH = LZ4_hashPosition(ip, tableType);
565
566 /* Main Loop */
567 for ( ; ; ) {
568 const BYTE *match;
569 BYTE *token;
570
571 /* Find a match */
572 {
573 const BYTE *forwardIp = ip;
574 unsigned int step = 1;
575 unsigned int searchMatchNb = 1 << LZ4_SKIPTRIGGER;
576
577 do {
578 U32 h = forwardH;
579
580 ip = forwardIp;
581 forwardIp += step;
582 step = (searchMatchNb++ >> LZ4_SKIPTRIGGER);
583
584 if (unlikely(forwardIp > mflimit))
585 goto _last_literals;
586
587 match = LZ4_getPositionOnHash(h, ctx->hashTable,
588 tableType, base);
589 forwardH = LZ4_hashPosition(forwardIp,
590 tableType);
591 LZ4_putPositionOnHash(ip, h,
592 ctx->hashTable, tableType,
593 base);
594
595 } while (((tableType == byU16)
596 ? 0
597 : (match + MAX_DISTANCE < ip))
598 || (LZ4_read32(match) != LZ4_read32(ip)));
599 }
600
601 /* Catch up */
602 while ((ip > anchor)
603 && (match > lowLimit)
604 && (unlikely(ip[-1] == match[-1]))) {
605 ip--;
606 match--;
607 }
608
609 /* Encode Literal length */
610 {
611 unsigned int litLength = (unsigned int)(ip - anchor);
612
613 token = op++;
614 if (op + ((litLength + 240) / 255)
615 + litLength > oMaxLit) {
616 /* Not enough space for a last match */
617 op--;
618 goto _last_literals;
619 }
620 if (litLength >= RUN_MASK) {
621 unsigned int len = litLength - RUN_MASK;
622 *token = (RUN_MASK<<ML_BITS);
623 for (; len >= 255; len -= 255)
624 *op++ = 255;
625 *op++ = (BYTE)len;
626 } else
627 *token = (BYTE)(litLength << ML_BITS);
628
629 /* Copy Literals */
630 LZ4_wildCopy(op, anchor, op + litLength);
631 op += litLength;
632 }
633
634 _next_match:
635 /* Encode Offset */
636 LZ4_writeLE16(op, (U16)(ip - match)); op += 2;
637
638 /* Encode MatchLength */
639 {
640 size_t matchLength = LZ4_count(ip + MINMATCH,
641 match + MINMATCH, matchlimit);
642
643 if (op + ((matchLength + 240)/255) > oMaxMatch) {
644 /* Match description too long : reduce it */
645 matchLength = (15 - 1) + (oMaxMatch - op) * 255;
646 }
647 ip += MINMATCH + matchLength;
648
649 if (matchLength >= ML_MASK) {
650 *token += ML_MASK;
651 matchLength -= ML_MASK;
652 while (matchLength >= 255) {
653 matchLength -= 255;
654 *op++ = 255;
655 }
656 *op++ = (BYTE)matchLength;
657 } else
658 *token += (BYTE)(matchLength);
659 }
660
661 anchor = ip;
662
663 /* Test end of block */
664 if (ip > mflimit)
665 break;
666 if (op > oMaxSeq)
667 break;
668
669 /* Fill table */
670 LZ4_putPosition(ip - 2, ctx->hashTable, tableType, base);
671
672 /* Test next position */
673 match = LZ4_getPosition(ip, ctx->hashTable, tableType, base);
674 LZ4_putPosition(ip, ctx->hashTable, tableType, base);
675
676 if ((match + MAX_DISTANCE >= ip)
677 && (LZ4_read32(match) == LZ4_read32(ip))) {
678 token = op++; *token = 0;
679 goto _next_match;
680 }
681
682 /* Prepare next loop */
683 forwardH = LZ4_hashPosition(++ip, tableType);
684 }
685
686 _last_literals:
687 /* Encode Last Literals */
688 {
689 size_t lastRunSize = (size_t)(iend - anchor);
690
691 if (op + 1 /* token */
692 + ((lastRunSize + 240) / 255) /* litLength */
693 + lastRunSize /* literals */ > oend) {
694 /* adapt lastRunSize to fill 'dst' */
695 lastRunSize = (oend - op) - 1;
696 lastRunSize -= (lastRunSize + 240) / 255;
697 }
698 ip = anchor + lastRunSize;
699
700 if (lastRunSize >= RUN_MASK) {
701 size_t accumulator = lastRunSize - RUN_MASK;
702
703 *op++ = RUN_MASK << ML_BITS;
704 for (; accumulator >= 255; accumulator -= 255)
705 *op++ = 255;
706 *op++ = (BYTE) accumulator;
707 } else {
708 *op++ = (BYTE)(lastRunSize<<ML_BITS);
709 }
710 LZ4_memcpy(op, anchor, lastRunSize);
711 op += lastRunSize;
712 }
713
714 /* End */
715 *srcSizePtr = (int) (((const char *)ip) - src);
716 return (int) (((char *)op) - dst);
717 }
718
LZ4_compress_destSize_extState(LZ4_stream_t * state,const char * src,char * dst,int * srcSizePtr,int targetDstSize)719 static int LZ4_compress_destSize_extState(
720 LZ4_stream_t *state,
721 const char *src,
722 char *dst,
723 int *srcSizePtr,
724 int targetDstSize)
725 {
726 #if LZ4_ARCH64
727 const tableType_t tableType = byU32;
728 #else
729 const tableType_t tableType = byPtr;
730 #endif
731
732 LZ4_resetStream(state);
733
734 if (targetDstSize >= LZ4_COMPRESSBOUND(*srcSizePtr)) {
735 /* compression success is guaranteed */
736 return LZ4_compress_fast_extState(
737 state, src, dst, *srcSizePtr,
738 targetDstSize, 1);
739 } else {
740 if (*srcSizePtr < LZ4_64Klimit)
741 return LZ4_compress_destSize_generic(
742 &state->internal_donotuse,
743 src, dst, srcSizePtr,
744 targetDstSize, byU16);
745 else
746 return LZ4_compress_destSize_generic(
747 &state->internal_donotuse,
748 src, dst, srcSizePtr,
749 targetDstSize, tableType);
750 }
751 }
752
753
LZ4_compress_destSize(const char * src,char * dst,int * srcSizePtr,int targetDstSize,void * wrkmem)754 int LZ4_compress_destSize(
755 const char *src,
756 char *dst,
757 int *srcSizePtr,
758 int targetDstSize,
759 void *wrkmem)
760 {
761 return LZ4_compress_destSize_extState(wrkmem, src, dst, srcSizePtr,
762 targetDstSize);
763 }
764 EXPORT_SYMBOL(LZ4_compress_destSize);
765
766 /*-******************************
767 * Streaming functions
768 ********************************/
LZ4_resetStream(LZ4_stream_t * LZ4_stream)769 void LZ4_resetStream(LZ4_stream_t *LZ4_stream)
770 {
771 memset(LZ4_stream, 0, sizeof(LZ4_stream_t));
772 }
773
LZ4_loadDict(LZ4_stream_t * LZ4_dict,const char * dictionary,int dictSize)774 int LZ4_loadDict(LZ4_stream_t *LZ4_dict,
775 const char *dictionary, int dictSize)
776 {
777 LZ4_stream_t_internal *dict = &LZ4_dict->internal_donotuse;
778 const BYTE *p = (const BYTE *)dictionary;
779 const BYTE * const dictEnd = p + dictSize;
780 const BYTE *base;
781
782 if ((dict->initCheck)
783 || (dict->currentOffset > 1 * GB)) {
784 /* Uninitialized structure, or reuse overflow */
785 LZ4_resetStream(LZ4_dict);
786 }
787
788 if (dictSize < (int)HASH_UNIT) {
789 dict->dictionary = NULL;
790 dict->dictSize = 0;
791 return 0;
792 }
793
794 if ((dictEnd - p) > 64 * KB)
795 p = dictEnd - 64 * KB;
796 dict->currentOffset += 64 * KB;
797 base = p - dict->currentOffset;
798 dict->dictionary = p;
799 dict->dictSize = (U32)(dictEnd - p);
800 dict->currentOffset += dict->dictSize;
801
802 while (p <= dictEnd - HASH_UNIT) {
803 LZ4_putPosition(p, dict->hashTable, byU32, base);
804 p += 3;
805 }
806
807 return dict->dictSize;
808 }
809 EXPORT_SYMBOL(LZ4_loadDict);
810
LZ4_renormDictT(LZ4_stream_t_internal * LZ4_dict,const BYTE * src)811 static void LZ4_renormDictT(LZ4_stream_t_internal *LZ4_dict,
812 const BYTE *src)
813 {
814 if ((LZ4_dict->currentOffset > 0x80000000) ||
815 ((uptrval)LZ4_dict->currentOffset > (uptrval)src)) {
816 /* address space overflow */
817 /* rescale hash table */
818 U32 const delta = LZ4_dict->currentOffset - 64 * KB;
819 const BYTE *dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
820 int i;
821
822 for (i = 0; i < LZ4_HASH_SIZE_U32; i++) {
823 if (LZ4_dict->hashTable[i] < delta)
824 LZ4_dict->hashTable[i] = 0;
825 else
826 LZ4_dict->hashTable[i] -= delta;
827 }
828 LZ4_dict->currentOffset = 64 * KB;
829 if (LZ4_dict->dictSize > 64 * KB)
830 LZ4_dict->dictSize = 64 * KB;
831 LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
832 }
833 }
834
LZ4_saveDict(LZ4_stream_t * LZ4_dict,char * safeBuffer,int dictSize)835 int LZ4_saveDict(LZ4_stream_t *LZ4_dict, char *safeBuffer, int dictSize)
836 {
837 LZ4_stream_t_internal * const dict = &LZ4_dict->internal_donotuse;
838 const BYTE * const previousDictEnd = dict->dictionary + dict->dictSize;
839
840 if ((U32)dictSize > 64 * KB) {
841 /* useless to define a dictionary > 64 * KB */
842 dictSize = 64 * KB;
843 }
844 if ((U32)dictSize > dict->dictSize)
845 dictSize = dict->dictSize;
846
847 memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
848
849 dict->dictionary = (const BYTE *)safeBuffer;
850 dict->dictSize = (U32)dictSize;
851
852 return dictSize;
853 }
854 EXPORT_SYMBOL(LZ4_saveDict);
855
LZ4_compress_fast_continue(LZ4_stream_t * LZ4_stream,const char * source,char * dest,int inputSize,int maxOutputSize,int acceleration)856 int LZ4_compress_fast_continue(LZ4_stream_t *LZ4_stream, const char *source,
857 char *dest, int inputSize, int maxOutputSize, int acceleration)
858 {
859 LZ4_stream_t_internal *streamPtr = &LZ4_stream->internal_donotuse;
860 const BYTE * const dictEnd = streamPtr->dictionary
861 + streamPtr->dictSize;
862
863 const BYTE *smallest = (const BYTE *) source;
864
865 if (streamPtr->initCheck) {
866 /* Uninitialized structure detected */
867 return 0;
868 }
869
870 if ((streamPtr->dictSize > 0) && (smallest > dictEnd))
871 smallest = dictEnd;
872
873 LZ4_renormDictT(streamPtr, smallest);
874
875 if (acceleration < 1)
876 acceleration = LZ4_ACCELERATION_DEFAULT;
877
878 /* Check overlapping input/dictionary space */
879 {
880 const BYTE *sourceEnd = (const BYTE *) source + inputSize;
881
882 if ((sourceEnd > streamPtr->dictionary)
883 && (sourceEnd < dictEnd)) {
884 streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
885 if (streamPtr->dictSize > 64 * KB)
886 streamPtr->dictSize = 64 * KB;
887 if (streamPtr->dictSize < 4)
888 streamPtr->dictSize = 0;
889 streamPtr->dictionary = dictEnd - streamPtr->dictSize;
890 }
891 }
892
893 /* prefix mode : source data follows dictionary */
894 if (dictEnd == (const BYTE *)source) {
895 int result;
896
897 if ((streamPtr->dictSize < 64 * KB) &&
898 (streamPtr->dictSize < streamPtr->currentOffset)) {
899 result = LZ4_compress_generic(
900 streamPtr, source, dest, inputSize,
901 maxOutputSize, limitedOutput, byU32,
902 withPrefix64k, dictSmall, acceleration);
903 } else {
904 result = LZ4_compress_generic(
905 streamPtr, source, dest, inputSize,
906 maxOutputSize, limitedOutput, byU32,
907 withPrefix64k, noDictIssue, acceleration);
908 }
909 streamPtr->dictSize += (U32)inputSize;
910 streamPtr->currentOffset += (U32)inputSize;
911 return result;
912 }
913
914 /* external dictionary mode */
915 {
916 int result;
917
918 if ((streamPtr->dictSize < 64 * KB) &&
919 (streamPtr->dictSize < streamPtr->currentOffset)) {
920 result = LZ4_compress_generic(
921 streamPtr, source, dest, inputSize,
922 maxOutputSize, limitedOutput, byU32,
923 usingExtDict, dictSmall, acceleration);
924 } else {
925 result = LZ4_compress_generic(
926 streamPtr, source, dest, inputSize,
927 maxOutputSize, limitedOutput, byU32,
928 usingExtDict, noDictIssue, acceleration);
929 }
930 streamPtr->dictionary = (const BYTE *)source;
931 streamPtr->dictSize = (U32)inputSize;
932 streamPtr->currentOffset += (U32)inputSize;
933 return result;
934 }
935 }
936 EXPORT_SYMBOL(LZ4_compress_fast_continue);
937
938 MODULE_LICENSE("Dual BSD/GPL");
939 MODULE_DESCRIPTION("LZ4 compressor");
940