1 /*
2 * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
3 * All rights reserved.
4 *
5 * This source code is licensed under both the BSD-style license (found in the
6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7 * in the COPYING file in the root directory of this source tree).
8 * You may select, at your option, one of the above-listed licenses.
9 */
10
11
12 #include <stddef.h> /* size_t, ptrdiff_t */
13 #include "zstd_v02.h"
14 #include "../common/error_private.h"
15
16
17 /******************************************
18 * Compiler-specific
19 ******************************************/
20 #if defined(_MSC_VER) /* Visual Studio */
21 # include <stdlib.h> /* _byteswap_ulong */
22 # include <intrin.h> /* _byteswap_* */
23 #endif
24
25
26 /* ******************************************************************
27 mem.h
28 low-level memory access routines
29 Copyright (C) 2013-2015, Yann Collet.
30
31 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
32
33 Redistribution and use in source and binary forms, with or without
34 modification, are permitted provided that the following conditions are
35 met:
36
37 * Redistributions of source code must retain the above copyright
38 notice, this list of conditions and the following disclaimer.
39 * Redistributions in binary form must reproduce the above
40 copyright notice, this list of conditions and the following disclaimer
41 in the documentation and/or other materials provided with the
42 distribution.
43
44 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
45 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
46 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
47 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
48 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
49 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
50 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
51 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
52 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
53 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
54 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
55
56 You can contact the author at :
57 - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
58 - Public forum : https://groups.google.com/forum/#!forum/lz4c
59 ****************************************************************** */
60 #ifndef MEM_H_MODULE
61 #define MEM_H_MODULE
62
63 #if defined (__cplusplus)
64 extern "C" {
65 #endif
66
67 /******************************************
68 * Includes
69 ******************************************/
70 #include <stddef.h> /* size_t, ptrdiff_t */
71 #include <string.h> /* memcpy */
72
73
74 /******************************************
75 * Compiler-specific
76 ******************************************/
77 #if defined(__GNUC__)
78 # define MEM_STATIC static __attribute__((unused))
79 #elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
80 # define MEM_STATIC static inline
81 #elif defined(_MSC_VER)
82 # define MEM_STATIC static __inline
83 #else
84 # define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
85 #endif
86
87
88 /****************************************************************
89 * Basic Types
90 *****************************************************************/
91 #if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
92 # if defined(_AIX)
93 # include <inttypes.h>
94 # else
95 # include <stdint.h> /* intptr_t */
96 # endif
97 typedef uint8_t BYTE;
98 typedef uint16_t U16;
99 typedef int16_t S16;
100 typedef uint32_t U32;
101 typedef int32_t S32;
102 typedef uint64_t U64;
103 typedef int64_t S64;
104 #else
105 typedef unsigned char BYTE;
106 typedef unsigned short U16;
107 typedef signed short S16;
108 typedef unsigned int U32;
109 typedef signed int S32;
110 typedef unsigned long long U64;
111 typedef signed long long S64;
112 #endif
113
114
115 /****************************************************************
116 * Memory I/O
117 *****************************************************************/
118 /* MEM_FORCE_MEMORY_ACCESS
119 * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
120 * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
121 * The below switch allow to select different access method for improved performance.
122 * Method 0 (default) : use `memcpy()`. Safe and portable.
123 * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
124 * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
125 * Method 2 : direct access. This method is portable but violate C standard.
126 * It can generate buggy code on targets generating assembly depending on alignment.
127 * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
128 * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
129 * Prefer these methods in priority order (0 > 1 > 2)
130 */
131 #ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
132 # if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
133 # define MEM_FORCE_MEMORY_ACCESS 2
134 # elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
135 (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
136 # define MEM_FORCE_MEMORY_ACCESS 1
137 # endif
138 #endif
139
MEM_32bits(void)140 MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; }
MEM_64bits(void)141 MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; }
142
MEM_isLittleEndian(void)143 MEM_STATIC unsigned MEM_isLittleEndian(void)
144 {
145 const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
146 return one.c[0];
147 }
148
149 #if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2)
150
151 /* violates C standard on structure alignment.
152 Only use if no other choice to achieve best performance on target platform */
MEM_read16(const void * memPtr)153 MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; }
MEM_read32(const void * memPtr)154 MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; }
MEM_read64(const void * memPtr)155 MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; }
156
MEM_write16(void * memPtr,U16 value)157 MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
158
159 #elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
160
161 /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
162 /* currently only defined for gcc and icc */
163 typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign;
164
MEM_read16(const void * ptr)165 MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
MEM_read32(const void * ptr)166 MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
MEM_read64(const void * ptr)167 MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
168
MEM_write16(void * memPtr,U16 value)169 MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
170
171 #else
172
173 /* default method, safe and standard.
174 can sometimes prove slower */
175
MEM_read16(const void * memPtr)176 MEM_STATIC U16 MEM_read16(const void* memPtr)
177 {
178 U16 val; memcpy(&val, memPtr, sizeof(val)); return val;
179 }
180
MEM_read32(const void * memPtr)181 MEM_STATIC U32 MEM_read32(const void* memPtr)
182 {
183 U32 val; memcpy(&val, memPtr, sizeof(val)); return val;
184 }
185
MEM_read64(const void * memPtr)186 MEM_STATIC U64 MEM_read64(const void* memPtr)
187 {
188 U64 val; memcpy(&val, memPtr, sizeof(val)); return val;
189 }
190
MEM_write16(void * memPtr,U16 value)191 MEM_STATIC void MEM_write16(void* memPtr, U16 value)
192 {
193 memcpy(memPtr, &value, sizeof(value));
194 }
195
196 #endif /* MEM_FORCE_MEMORY_ACCESS */
197
198
MEM_readLE16(const void * memPtr)199 MEM_STATIC U16 MEM_readLE16(const void* memPtr)
200 {
201 if (MEM_isLittleEndian())
202 return MEM_read16(memPtr);
203 else
204 {
205 const BYTE* p = (const BYTE*)memPtr;
206 return (U16)(p[0] + (p[1]<<8));
207 }
208 }
209
MEM_writeLE16(void * memPtr,U16 val)210 MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
211 {
212 if (MEM_isLittleEndian())
213 {
214 MEM_write16(memPtr, val);
215 }
216 else
217 {
218 BYTE* p = (BYTE*)memPtr;
219 p[0] = (BYTE)val;
220 p[1] = (BYTE)(val>>8);
221 }
222 }
223
MEM_readLE24(const void * memPtr)224 MEM_STATIC U32 MEM_readLE24(const void* memPtr)
225 {
226 return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);
227 }
228
MEM_readLE32(const void * memPtr)229 MEM_STATIC U32 MEM_readLE32(const void* memPtr)
230 {
231 if (MEM_isLittleEndian())
232 return MEM_read32(memPtr);
233 else
234 {
235 const BYTE* p = (const BYTE*)memPtr;
236 return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24));
237 }
238 }
239
240
MEM_readLE64(const void * memPtr)241 MEM_STATIC U64 MEM_readLE64(const void* memPtr)
242 {
243 if (MEM_isLittleEndian())
244 return MEM_read64(memPtr);
245 else
246 {
247 const BYTE* p = (const BYTE*)memPtr;
248 return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24)
249 + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56));
250 }
251 }
252
253
MEM_readLEST(const void * memPtr)254 MEM_STATIC size_t MEM_readLEST(const void* memPtr)
255 {
256 if (MEM_32bits())
257 return (size_t)MEM_readLE32(memPtr);
258 else
259 return (size_t)MEM_readLE64(memPtr);
260 }
261
262 #if defined (__cplusplus)
263 }
264 #endif
265
266 #endif /* MEM_H_MODULE */
267
268
269 /* ******************************************************************
270 bitstream
271 Part of NewGen Entropy library
272 header file (to include)
273 Copyright (C) 2013-2015, Yann Collet.
274
275 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
276
277 Redistribution and use in source and binary forms, with or without
278 modification, are permitted provided that the following conditions are
279 met:
280
281 * Redistributions of source code must retain the above copyright
282 notice, this list of conditions and the following disclaimer.
283 * Redistributions in binary form must reproduce the above
284 copyright notice, this list of conditions and the following disclaimer
285 in the documentation and/or other materials provided with the
286 distribution.
287
288 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
289 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
290 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
291 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
292 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
293 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
294 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
295 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
296 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
297 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
298 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
299
300 You can contact the author at :
301 - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
302 - Public forum : https://groups.google.com/forum/#!forum/lz4c
303 ****************************************************************** */
304 #ifndef BITSTREAM_H_MODULE
305 #define BITSTREAM_H_MODULE
306
307 #if defined (__cplusplus)
308 extern "C" {
309 #endif
310
311
312 /*
313 * This API consists of small unitary functions, which highly benefit from being inlined.
314 * Since link-time-optimization is not available for all compilers,
315 * these functions are defined into a .h to be included.
316 */
317
318
319 /**********************************************
320 * bitStream decompression API (read backward)
321 **********************************************/
322 typedef struct
323 {
324 size_t bitContainer;
325 unsigned bitsConsumed;
326 const char* ptr;
327 const char* start;
328 } BIT_DStream_t;
329
330 typedef enum { BIT_DStream_unfinished = 0,
331 BIT_DStream_endOfBuffer = 1,
332 BIT_DStream_completed = 2,
333 BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */
334 /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
335
336 MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
337 MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
338 MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);
339 MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
340
341
342 /******************************************
343 * unsafe API
344 ******************************************/
345 MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
346 /* faster, but works only if nbBits >= 1 */
347
348
349
350 /****************************************************************
351 * Helper functions
352 ****************************************************************/
BIT_highbit32(U32 val)353 MEM_STATIC unsigned BIT_highbit32 (U32 val)
354 {
355 # if defined(_MSC_VER) /* Visual */
356 unsigned long r=0;
357 _BitScanReverse ( &r, val );
358 return (unsigned) r;
359 # elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
360 return __builtin_clz (val) ^ 31;
361 # else /* Software version */
362 static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
363 U32 v = val;
364 unsigned r;
365 v |= v >> 1;
366 v |= v >> 2;
367 v |= v >> 4;
368 v |= v >> 8;
369 v |= v >> 16;
370 r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
371 return r;
372 # endif
373 }
374
375
376
377 /**********************************************************
378 * bitStream decoding
379 **********************************************************/
380
381 /*!BIT_initDStream
382 * Initialize a BIT_DStream_t.
383 * @bitD : a pointer to an already allocated BIT_DStream_t structure
384 * @srcBuffer must point at the beginning of a bitStream
385 * @srcSize must be the exact size of the bitStream
386 * @result : size of stream (== srcSize) or an errorCode if a problem is detected
387 */
BIT_initDStream(BIT_DStream_t * bitD,const void * srcBuffer,size_t srcSize)388 MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
389 {
390 if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
391
392 if (srcSize >= sizeof(size_t)) /* normal case */
393 {
394 U32 contain32;
395 bitD->start = (const char*)srcBuffer;
396 bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(size_t);
397 bitD->bitContainer = MEM_readLEST(bitD->ptr);
398 contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
399 if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */
400 bitD->bitsConsumed = 8 - BIT_highbit32(contain32);
401 }
402 else
403 {
404 U32 contain32;
405 bitD->start = (const char*)srcBuffer;
406 bitD->ptr = bitD->start;
407 bitD->bitContainer = *(const BYTE*)(bitD->start);
408 switch(srcSize)
409 {
410 case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16);
411 /* fallthrough */
412 case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24);
413 /* fallthrough */
414 case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32);
415 /* fallthrough */
416 case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24;
417 /* fallthrough */
418 case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16;
419 /* fallthrough */
420 case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8;
421 /* fallthrough */
422 default:;
423 }
424 contain32 = ((const BYTE*)srcBuffer)[srcSize-1];
425 if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */
426 bitD->bitsConsumed = 8 - BIT_highbit32(contain32);
427 bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8;
428 }
429
430 return srcSize;
431 }
432
BIT_lookBits(BIT_DStream_t * bitD,U32 nbBits)433 MEM_STATIC size_t BIT_lookBits(BIT_DStream_t* bitD, U32 nbBits)
434 {
435 const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
436 return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);
437 }
438
439 /*! BIT_lookBitsFast :
440 * unsafe version; only works only if nbBits >= 1 */
BIT_lookBitsFast(BIT_DStream_t * bitD,U32 nbBits)441 MEM_STATIC size_t BIT_lookBitsFast(BIT_DStream_t* bitD, U32 nbBits)
442 {
443 const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1;
444 return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);
445 }
446
BIT_skipBits(BIT_DStream_t * bitD,U32 nbBits)447 MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
448 {
449 bitD->bitsConsumed += nbBits;
450 }
451
BIT_readBits(BIT_DStream_t * bitD,U32 nbBits)452 MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits)
453 {
454 size_t value = BIT_lookBits(bitD, nbBits);
455 BIT_skipBits(bitD, nbBits);
456 return value;
457 }
458
459 /*!BIT_readBitsFast :
460 * unsafe version; only works only if nbBits >= 1 */
BIT_readBitsFast(BIT_DStream_t * bitD,U32 nbBits)461 MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
462 {
463 size_t value = BIT_lookBitsFast(bitD, nbBits);
464 BIT_skipBits(bitD, nbBits);
465 return value;
466 }
467
BIT_reloadDStream(BIT_DStream_t * bitD)468 MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
469 {
470 if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */
471 return BIT_DStream_overflow;
472
473 if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer))
474 {
475 bitD->ptr -= bitD->bitsConsumed >> 3;
476 bitD->bitsConsumed &= 7;
477 bitD->bitContainer = MEM_readLEST(bitD->ptr);
478 return BIT_DStream_unfinished;
479 }
480 if (bitD->ptr == bitD->start)
481 {
482 if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
483 return BIT_DStream_completed;
484 }
485 {
486 U32 nbBytes = bitD->bitsConsumed >> 3;
487 BIT_DStream_status result = BIT_DStream_unfinished;
488 if (bitD->ptr - nbBytes < bitD->start)
489 {
490 nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
491 result = BIT_DStream_endOfBuffer;
492 }
493 bitD->ptr -= nbBytes;
494 bitD->bitsConsumed -= nbBytes*8;
495 bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */
496 return result;
497 }
498 }
499
500 /*! BIT_endOfDStream
501 * @return Tells if DStream has reached its exact end
502 */
BIT_endOfDStream(const BIT_DStream_t * DStream)503 MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
504 {
505 return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
506 }
507
508 #if defined (__cplusplus)
509 }
510 #endif
511
512 #endif /* BITSTREAM_H_MODULE */
513 /* ******************************************************************
514 Error codes and messages
515 Copyright (C) 2013-2015, Yann Collet
516
517 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
518
519 Redistribution and use in source and binary forms, with or without
520 modification, are permitted provided that the following conditions are
521 met:
522
523 * Redistributions of source code must retain the above copyright
524 notice, this list of conditions and the following disclaimer.
525 * Redistributions in binary form must reproduce the above
526 copyright notice, this list of conditions and the following disclaimer
527 in the documentation and/or other materials provided with the
528 distribution.
529
530 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
531 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
532 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
533 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
534 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
535 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
536 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
537 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
538 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
539 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
540 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
541
542 You can contact the author at :
543 - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
544 - Public forum : https://groups.google.com/forum/#!forum/lz4c
545 ****************************************************************** */
546 #ifndef ERROR_H_MODULE
547 #define ERROR_H_MODULE
548
549 #if defined (__cplusplus)
550 extern "C" {
551 #endif
552
553
554 /******************************************
555 * Compiler-specific
556 ******************************************/
557 #if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
558 # define ERR_STATIC static inline
559 #elif defined(_MSC_VER)
560 # define ERR_STATIC static __inline
561 #elif defined(__GNUC__)
562 # define ERR_STATIC static __attribute__((unused))
563 #else
564 # define ERR_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
565 #endif
566
567
568 /******************************************
569 * Error Management
570 ******************************************/
571 #define PREFIX(name) ZSTD_error_##name
572
573 #define ERROR(name) (size_t)-PREFIX(name)
574
575 #define ERROR_LIST(ITEM) \
576 ITEM(PREFIX(No_Error)) ITEM(PREFIX(GENERIC)) \
577 ITEM(PREFIX(dstSize_tooSmall)) ITEM(PREFIX(srcSize_wrong)) \
578 ITEM(PREFIX(prefix_unknown)) ITEM(PREFIX(corruption_detected)) \
579 ITEM(PREFIX(tableLog_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooSmall)) \
580 ITEM(PREFIX(maxCode))
581
582 #define ERROR_GENERATE_ENUM(ENUM) ENUM,
583 typedef enum { ERROR_LIST(ERROR_GENERATE_ENUM) } ERR_codes; /* enum is exposed, to detect & handle specific errors; compare function result to -enum value */
584
585 #define ERROR_CONVERTTOSTRING(STRING) #STRING,
586 #define ERROR_GENERATE_STRING(EXPR) ERROR_CONVERTTOSTRING(EXPR)
587 static const char* ERR_strings[] = { ERROR_LIST(ERROR_GENERATE_STRING) };
588
ERR_isError(size_t code)589 ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
590
ERR_getErrorName(size_t code)591 ERR_STATIC const char* ERR_getErrorName(size_t code)
592 {
593 static const char* codeError = "Unspecified error code";
594 if (ERR_isError(code)) return ERR_strings[-(int)(code)];
595 return codeError;
596 }
597
598
599 #if defined (__cplusplus)
600 }
601 #endif
602
603 #endif /* ERROR_H_MODULE */
604 /*
605 Constructor and Destructor of type FSE_CTable
606 Note that its size depends on 'tableLog' and 'maxSymbolValue' */
607 typedef unsigned FSE_CTable; /* don't allocate that. It's just a way to be more restrictive than void* */
608 typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
609
610
611 /* ******************************************************************
612 FSE : Finite State Entropy coder
613 header file for static linking (only)
614 Copyright (C) 2013-2015, Yann Collet
615
616 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
617
618 Redistribution and use in source and binary forms, with or without
619 modification, are permitted provided that the following conditions are
620 met:
621
622 * Redistributions of source code must retain the above copyright
623 notice, this list of conditions and the following disclaimer.
624 * Redistributions in binary form must reproduce the above
625 copyright notice, this list of conditions and the following disclaimer
626 in the documentation and/or other materials provided with the
627 distribution.
628
629 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
630 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
631 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
632 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
633 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
634 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
635 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
636 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
637 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
638 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
639 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
640
641 You can contact the author at :
642 - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
643 - Public forum : https://groups.google.com/forum/#!forum/lz4c
644 ****************************************************************** */
645 #if defined (__cplusplus)
646 extern "C" {
647 #endif
648
649
650 /******************************************
651 * Static allocation
652 ******************************************/
653 /* FSE buffer bounds */
654 #define FSE_NCOUNTBOUND 512
655 #define FSE_BLOCKBOUND(size) (size + (size>>7))
656 #define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
657
658 /* You can statically allocate FSE CTable/DTable as a table of unsigned using below macro */
659 #define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2))
660 #define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<maxTableLog))
661
662
663 /******************************************
664 * FSE advanced API
665 ******************************************/
666 static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
667 /* build a fake FSE_DTable, designed to read an uncompressed bitstream where each symbol uses nbBits */
668
669 static size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
670 /* build a fake FSE_DTable, designed to always generate the same symbolValue */
671
672
673 /******************************************
674 * FSE symbol decompression API
675 ******************************************/
676 typedef struct
677 {
678 size_t state;
679 const void* table; /* precise table may vary, depending on U16 */
680 } FSE_DState_t;
681
682
683 static void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);
684
685 static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
686
687 static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);
688
689
690 /******************************************
691 * FSE unsafe API
692 ******************************************/
693 static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
694 /* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
695
696
697 /******************************************
698 * Implementation of inline functions
699 ******************************************/
700
701 /* decompression */
702
703 typedef struct {
704 U16 tableLog;
705 U16 fastMode;
706 } FSE_DTableHeader; /* sizeof U32 */
707
708 typedef struct
709 {
710 unsigned short newState;
711 unsigned char symbol;
712 unsigned char nbBits;
713 } FSE_decode_t; /* size == U32 */
714
FSE_initDState(FSE_DState_t * DStatePtr,BIT_DStream_t * bitD,const FSE_DTable * dt)715 MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)
716 {
717 FSE_DTableHeader DTableH;
718 memcpy(&DTableH, dt, sizeof(DTableH));
719 DStatePtr->state = BIT_readBits(bitD, DTableH.tableLog);
720 BIT_reloadDStream(bitD);
721 DStatePtr->table = dt + 1;
722 }
723
FSE_decodeSymbol(FSE_DState_t * DStatePtr,BIT_DStream_t * bitD)724 MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
725 {
726 const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
727 const U32 nbBits = DInfo.nbBits;
728 BYTE symbol = DInfo.symbol;
729 size_t lowBits = BIT_readBits(bitD, nbBits);
730
731 DStatePtr->state = DInfo.newState + lowBits;
732 return symbol;
733 }
734
FSE_decodeSymbolFast(FSE_DState_t * DStatePtr,BIT_DStream_t * bitD)735 MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
736 {
737 const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
738 const U32 nbBits = DInfo.nbBits;
739 BYTE symbol = DInfo.symbol;
740 size_t lowBits = BIT_readBitsFast(bitD, nbBits);
741
742 DStatePtr->state = DInfo.newState + lowBits;
743 return symbol;
744 }
745
FSE_endOfDState(const FSE_DState_t * DStatePtr)746 MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
747 {
748 return DStatePtr->state == 0;
749 }
750
751
752 #if defined (__cplusplus)
753 }
754 #endif
755 /* ******************************************************************
756 Huff0 : Huffman coder, part of New Generation Entropy library
757 header file for static linking (only)
758 Copyright (C) 2013-2015, Yann Collet
759
760 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
761
762 Redistribution and use in source and binary forms, with or without
763 modification, are permitted provided that the following conditions are
764 met:
765
766 * Redistributions of source code must retain the above copyright
767 notice, this list of conditions and the following disclaimer.
768 * Redistributions in binary form must reproduce the above
769 copyright notice, this list of conditions and the following disclaimer
770 in the documentation and/or other materials provided with the
771 distribution.
772
773 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
774 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
775 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
776 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
777 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
778 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
779 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
780 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
781 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
782 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
783 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
784
785 You can contact the author at :
786 - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
787 - Public forum : https://groups.google.com/forum/#!forum/lz4c
788 ****************************************************************** */
789
790 #if defined (__cplusplus)
791 extern "C" {
792 #endif
793
794 /******************************************
795 * Static allocation macros
796 ******************************************/
797 /* Huff0 buffer bounds */
798 #define HUF_CTABLEBOUND 129
799 #define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true if incompressible pre-filtered with fast heuristic */
800 #define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
801
802 /* static allocation of Huff0's DTable */
803 #define HUF_DTABLE_SIZE(maxTableLog) (1 + (1<<maxTableLog)) /* nb Cells; use unsigned short for X2, unsigned int for X4 */
804 #define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
805 unsigned short DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
806 #define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
807 unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog)] = { maxTableLog }
808 #define HUF_CREATE_STATIC_DTABLEX6(DTable, maxTableLog) \
809 unsigned int DTable[HUF_DTABLE_SIZE(maxTableLog) * 3 / 2] = { maxTableLog }
810
811
812 /******************************************
813 * Advanced functions
814 ******************************************/
815 static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
816 static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbols decoder */
817 static size_t HUF_decompress4X6 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* quad-symbols decoder */
818
819
820 #if defined (__cplusplus)
821 }
822 #endif
823
824 /*
825 zstd - standard compression library
826 Header File
827 Copyright (C) 2014-2015, Yann Collet.
828
829 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
830
831 Redistribution and use in source and binary forms, with or without
832 modification, are permitted provided that the following conditions are
833 met:
834 * Redistributions of source code must retain the above copyright
835 notice, this list of conditions and the following disclaimer.
836 * Redistributions in binary form must reproduce the above
837 copyright notice, this list of conditions and the following disclaimer
838 in the documentation and/or other materials provided with the
839 distribution.
840 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
841 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
842 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
843 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
844 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
845 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
846 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
847 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
848 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
849 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
850 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
851
852 You can contact the author at :
853 - zstd source repository : https://github.com/Cyan4973/zstd
854 - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
855 */
856
857 #if defined (__cplusplus)
858 extern "C" {
859 #endif
860
861 /* *************************************
862 * Includes
863 ***************************************/
864 #include <stddef.h> /* size_t */
865
866
867 /* *************************************
868 * Version
869 ***************************************/
870 #define ZSTD_VERSION_MAJOR 0 /* for breaking interface changes */
871 #define ZSTD_VERSION_MINOR 2 /* for new (non-breaking) interface capabilities */
872 #define ZSTD_VERSION_RELEASE 2 /* for tweaks, bug-fixes, or development */
873 #define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
874
875
876 /* *************************************
877 * Advanced functions
878 ***************************************/
879 typedef struct ZSTD_CCtx_s ZSTD_CCtx; /* incomplete type */
880
881 #if defined (__cplusplus)
882 }
883 #endif
884 /*
885 zstd - standard compression library
886 Header File for static linking only
887 Copyright (C) 2014-2015, Yann Collet.
888
889 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
890
891 Redistribution and use in source and binary forms, with or without
892 modification, are permitted provided that the following conditions are
893 met:
894 * Redistributions of source code must retain the above copyright
895 notice, this list of conditions and the following disclaimer.
896 * Redistributions in binary form must reproduce the above
897 copyright notice, this list of conditions and the following disclaimer
898 in the documentation and/or other materials provided with the
899 distribution.
900 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
901 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
902 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
903 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
904 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
905 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
906 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
907 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
908 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
909 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
910 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
911
912 You can contact the author at :
913 - zstd source repository : https://github.com/Cyan4973/zstd
914 - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
915 */
916
917 /* The objects defined into this file should be considered experimental.
918 * They are not labelled stable, as their prototype may change in the future.
919 * You can use them for tests, provide feedback, or if you can endure risk of future changes.
920 */
921
922 #if defined (__cplusplus)
923 extern "C" {
924 #endif
925
926 /* *************************************
927 * Streaming functions
928 ***************************************/
929
930 typedef struct ZSTD_DCtx_s ZSTD_DCtx;
931
932 /*
933 Use above functions alternatively.
934 ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue().
935 ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block.
936 Result is the number of bytes regenerated within 'dst'.
937 It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header.
938 */
939
940 /* *************************************
941 * Prefix - version detection
942 ***************************************/
943 #define ZSTD_magicNumber 0xFD2FB522 /* v0.2 (current)*/
944
945
946 #if defined (__cplusplus)
947 }
948 #endif
949 /* ******************************************************************
950 FSE : Finite State Entropy coder
951 Copyright (C) 2013-2015, Yann Collet.
952
953 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
954
955 Redistribution and use in source and binary forms, with or without
956 modification, are permitted provided that the following conditions are
957 met:
958
959 * Redistributions of source code must retain the above copyright
960 notice, this list of conditions and the following disclaimer.
961 * Redistributions in binary form must reproduce the above
962 copyright notice, this list of conditions and the following disclaimer
963 in the documentation and/or other materials provided with the
964 distribution.
965
966 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
967 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
968 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
969 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
970 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
971 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
972 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
973 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
974 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
975 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
976 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
977
978 You can contact the author at :
979 - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
980 - Public forum : https://groups.google.com/forum/#!forum/lz4c
981 ****************************************************************** */
982
983 #ifndef FSE_COMMONDEFS_ONLY
984
985 /****************************************************************
986 * Tuning parameters
987 ****************************************************************/
988 /* MEMORY_USAGE :
989 * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
990 * Increasing memory usage improves compression ratio
991 * Reduced memory usage can improve speed, due to cache effect
992 * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
993 #define FSE_MAX_MEMORY_USAGE 14
994 #define FSE_DEFAULT_MEMORY_USAGE 13
995
996 /* FSE_MAX_SYMBOL_VALUE :
997 * Maximum symbol value authorized.
998 * Required for proper stack allocation */
999 #define FSE_MAX_SYMBOL_VALUE 255
1000
1001
1002 /****************************************************************
1003 * template functions type & suffix
1004 ****************************************************************/
1005 #define FSE_FUNCTION_TYPE BYTE
1006 #define FSE_FUNCTION_EXTENSION
1007
1008
1009 /****************************************************************
1010 * Byte symbol type
1011 ****************************************************************/
1012 #endif /* !FSE_COMMONDEFS_ONLY */
1013
1014
1015 /****************************************************************
1016 * Compiler specifics
1017 ****************************************************************/
1018 #ifdef _MSC_VER /* Visual Studio */
1019 # define FORCE_INLINE static __forceinline
1020 # include <intrin.h> /* For Visual 2005 */
1021 # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
1022 # pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */
1023 #else
1024 # if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
1025 # ifdef __GNUC__
1026 # define FORCE_INLINE static inline __attribute__((always_inline))
1027 # else
1028 # define FORCE_INLINE static inline
1029 # endif
1030 # else
1031 # define FORCE_INLINE static
1032 # endif /* __STDC_VERSION__ */
1033 #endif
1034
1035
1036 /****************************************************************
1037 * Includes
1038 ****************************************************************/
1039 #include <stdlib.h> /* malloc, free, qsort */
1040 #include <string.h> /* memcpy, memset */
1041 #include <stdio.h> /* printf (debug) */
1042
1043 /****************************************************************
1044 * Constants
1045 *****************************************************************/
1046 #define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2)
1047 #define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
1048 #define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
1049 #define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
1050 #define FSE_MIN_TABLELOG 5
1051
1052 #define FSE_TABLELOG_ABSOLUTE_MAX 15
1053 #if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
1054 #error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
1055 #endif
1056
1057
1058 /****************************************************************
1059 * Error Management
1060 ****************************************************************/
1061 #define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
1062
1063
1064 /****************************************************************
1065 * Complex types
1066 ****************************************************************/
1067 typedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
1068
1069
1070 /****************************************************************
1071 * Templates
1072 ****************************************************************/
1073 /*
1074 designed to be included
1075 for type-specific functions (template emulation in C)
1076 Objective is to write these functions only once, for improved maintenance
1077 */
1078
1079 /* safety checks */
1080 #ifndef FSE_FUNCTION_EXTENSION
1081 # error "FSE_FUNCTION_EXTENSION must be defined"
1082 #endif
1083 #ifndef FSE_FUNCTION_TYPE
1084 # error "FSE_FUNCTION_TYPE must be defined"
1085 #endif
1086
1087 /* Function names */
1088 #define FSE_CAT(X,Y) X##Y
1089 #define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
1090 #define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
1091
1092
1093 /* Function templates */
1094
1095 #define FSE_DECODE_TYPE FSE_decode_t
1096
FSE_tableStep(U32 tableSize)1097 static U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; }
1098
FSE_buildDTable(FSE_DTable * dt,const short * normalizedCounter,unsigned maxSymbolValue,unsigned tableLog)1099 static size_t FSE_buildDTable
1100 (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
1101 {
1102 void* ptr = dt+1;
1103 FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*)ptr;
1104 FSE_DTableHeader DTableH;
1105 const U32 tableSize = 1 << tableLog;
1106 const U32 tableMask = tableSize-1;
1107 const U32 step = FSE_tableStep(tableSize);
1108 U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];
1109 U32 position = 0;
1110 U32 highThreshold = tableSize-1;
1111 const S16 largeLimit= (S16)(1 << (tableLog-1));
1112 U32 noLarge = 1;
1113 U32 s;
1114
1115 /* Sanity Checks */
1116 if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
1117 if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
1118
1119 /* Init, lay down lowprob symbols */
1120 DTableH.tableLog = (U16)tableLog;
1121 for (s=0; s<=maxSymbolValue; s++)
1122 {
1123 if (normalizedCounter[s]==-1)
1124 {
1125 tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
1126 symbolNext[s] = 1;
1127 }
1128 else
1129 {
1130 if (normalizedCounter[s] >= largeLimit) noLarge=0;
1131 symbolNext[s] = normalizedCounter[s];
1132 }
1133 }
1134
1135 /* Spread symbols */
1136 for (s=0; s<=maxSymbolValue; s++)
1137 {
1138 int i;
1139 for (i=0; i<normalizedCounter[s]; i++)
1140 {
1141 tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
1142 position = (position + step) & tableMask;
1143 while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
1144 }
1145 }
1146
1147 if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
1148
1149 /* Build Decoding table */
1150 {
1151 U32 i;
1152 for (i=0; i<tableSize; i++)
1153 {
1154 FSE_FUNCTION_TYPE symbol = (FSE_FUNCTION_TYPE)(tableDecode[i].symbol);
1155 U16 nextState = symbolNext[symbol]++;
1156 tableDecode[i].nbBits = (BYTE) (tableLog - BIT_highbit32 ((U32)nextState) );
1157 tableDecode[i].newState = (U16) ( (nextState << tableDecode[i].nbBits) - tableSize);
1158 }
1159 }
1160
1161 DTableH.fastMode = (U16)noLarge;
1162 memcpy(dt, &DTableH, sizeof(DTableH)); /* memcpy(), to avoid strict aliasing warnings */
1163 return 0;
1164 }
1165
1166
1167 #ifndef FSE_COMMONDEFS_ONLY
1168 /******************************************
1169 * FSE helper functions
1170 ******************************************/
FSE_isError(size_t code)1171 static unsigned FSE_isError(size_t code) { return ERR_isError(code); }
1172
1173
1174 /****************************************************************
1175 * FSE NCount encoding-decoding
1176 ****************************************************************/
FSE_abs(short a)1177 static short FSE_abs(short a)
1178 {
1179 return (short)(a<0 ? -a : a);
1180 }
1181
FSE_readNCount(short * normalizedCounter,unsigned * maxSVPtr,unsigned * tableLogPtr,const void * headerBuffer,size_t hbSize)1182 static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
1183 const void* headerBuffer, size_t hbSize)
1184 {
1185 const BYTE* const istart = (const BYTE*) headerBuffer;
1186 const BYTE* const iend = istart + hbSize;
1187 const BYTE* ip = istart;
1188 int nbBits;
1189 int remaining;
1190 int threshold;
1191 U32 bitStream;
1192 int bitCount;
1193 unsigned charnum = 0;
1194 int previous0 = 0;
1195
1196 if (hbSize < 4) return ERROR(srcSize_wrong);
1197 bitStream = MEM_readLE32(ip);
1198 nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */
1199 if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
1200 bitStream >>= 4;
1201 bitCount = 4;
1202 *tableLogPtr = nbBits;
1203 remaining = (1<<nbBits)+1;
1204 threshold = 1<<nbBits;
1205 nbBits++;
1206
1207 while ((remaining>1) && (charnum<=*maxSVPtr))
1208 {
1209 if (previous0)
1210 {
1211 unsigned n0 = charnum;
1212 while ((bitStream & 0xFFFF) == 0xFFFF)
1213 {
1214 n0+=24;
1215 if (ip < iend-5)
1216 {
1217 ip+=2;
1218 bitStream = MEM_readLE32(ip) >> bitCount;
1219 }
1220 else
1221 {
1222 bitStream >>= 16;
1223 bitCount+=16;
1224 }
1225 }
1226 while ((bitStream & 3) == 3)
1227 {
1228 n0+=3;
1229 bitStream>>=2;
1230 bitCount+=2;
1231 }
1232 n0 += bitStream & 3;
1233 bitCount += 2;
1234 if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
1235 while (charnum < n0) normalizedCounter[charnum++] = 0;
1236 if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
1237 {
1238 ip += bitCount>>3;
1239 bitCount &= 7;
1240 bitStream = MEM_readLE32(ip) >> bitCount;
1241 }
1242 else
1243 bitStream >>= 2;
1244 }
1245 {
1246 const short max = (short)((2*threshold-1)-remaining);
1247 short count;
1248
1249 if ((bitStream & (threshold-1)) < (U32)max)
1250 {
1251 count = (short)(bitStream & (threshold-1));
1252 bitCount += nbBits-1;
1253 }
1254 else
1255 {
1256 count = (short)(bitStream & (2*threshold-1));
1257 if (count >= threshold) count -= max;
1258 bitCount += nbBits;
1259 }
1260
1261 count--; /* extra accuracy */
1262 remaining -= FSE_abs(count);
1263 normalizedCounter[charnum++] = count;
1264 previous0 = !count;
1265 while (remaining < threshold)
1266 {
1267 nbBits--;
1268 threshold >>= 1;
1269 }
1270
1271 {
1272 if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4))
1273 {
1274 ip += bitCount>>3;
1275 bitCount &= 7;
1276 }
1277 else
1278 {
1279 bitCount -= (int)(8 * (iend - 4 - ip));
1280 ip = iend - 4;
1281 }
1282 bitStream = MEM_readLE32(ip) >> (bitCount & 31);
1283 }
1284 }
1285 }
1286 if (remaining != 1) return ERROR(GENERIC);
1287 *maxSVPtr = charnum-1;
1288
1289 ip += (bitCount+7)>>3;
1290 if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong);
1291 return ip-istart;
1292 }
1293
1294
1295 /*********************************************************
1296 * Decompression (Byte symbols)
1297 *********************************************************/
FSE_buildDTable_rle(FSE_DTable * dt,BYTE symbolValue)1298 static size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
1299 {
1300 void* ptr = dt;
1301 FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
1302 FSE_decode_t* const cell = (FSE_decode_t*)(ptr) + 1; /* because dt is unsigned */
1303
1304 DTableH->tableLog = 0;
1305 DTableH->fastMode = 0;
1306
1307 cell->newState = 0;
1308 cell->symbol = symbolValue;
1309 cell->nbBits = 0;
1310
1311 return 0;
1312 }
1313
1314
FSE_buildDTable_raw(FSE_DTable * dt,unsigned nbBits)1315 static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
1316 {
1317 void* ptr = dt;
1318 FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
1319 FSE_decode_t* const dinfo = (FSE_decode_t*)(ptr) + 1; /* because dt is unsigned */
1320 const unsigned tableSize = 1 << nbBits;
1321 const unsigned tableMask = tableSize - 1;
1322 const unsigned maxSymbolValue = tableMask;
1323 unsigned s;
1324
1325 /* Sanity checks */
1326 if (nbBits < 1) return ERROR(GENERIC); /* min size */
1327
1328 /* Build Decoding Table */
1329 DTableH->tableLog = (U16)nbBits;
1330 DTableH->fastMode = 1;
1331 for (s=0; s<=maxSymbolValue; s++)
1332 {
1333 dinfo[s].newState = 0;
1334 dinfo[s].symbol = (BYTE)s;
1335 dinfo[s].nbBits = (BYTE)nbBits;
1336 }
1337
1338 return 0;
1339 }
1340
FSE_decompress_usingDTable_generic(void * dst,size_t maxDstSize,const void * cSrc,size_t cSrcSize,const FSE_DTable * dt,const unsigned fast)1341 FORCE_INLINE size_t FSE_decompress_usingDTable_generic(
1342 void* dst, size_t maxDstSize,
1343 const void* cSrc, size_t cSrcSize,
1344 const FSE_DTable* dt, const unsigned fast)
1345 {
1346 BYTE* const ostart = (BYTE*) dst;
1347 BYTE* op = ostart;
1348 BYTE* const omax = op + maxDstSize;
1349 BYTE* const olimit = omax-3;
1350
1351 BIT_DStream_t bitD;
1352 FSE_DState_t state1;
1353 FSE_DState_t state2;
1354 size_t errorCode;
1355
1356 /* Init */
1357 errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize); /* replaced last arg by maxCompressed Size */
1358 if (FSE_isError(errorCode)) return errorCode;
1359
1360 FSE_initDState(&state1, &bitD, dt);
1361 FSE_initDState(&state2, &bitD, dt);
1362
1363 #define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
1364
1365 /* 4 symbols per loop */
1366 for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) && (op<olimit) ; op+=4)
1367 {
1368 op[0] = FSE_GETSYMBOL(&state1);
1369
1370 if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
1371 BIT_reloadDStream(&bitD);
1372
1373 op[1] = FSE_GETSYMBOL(&state2);
1374
1375 if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
1376 { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }
1377
1378 op[2] = FSE_GETSYMBOL(&state1);
1379
1380 if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
1381 BIT_reloadDStream(&bitD);
1382
1383 op[3] = FSE_GETSYMBOL(&state2);
1384 }
1385
1386 /* tail */
1387 /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
1388 while (1)
1389 {
1390 if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) )
1391 break;
1392
1393 *op++ = FSE_GETSYMBOL(&state1);
1394
1395 if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) )
1396 break;
1397
1398 *op++ = FSE_GETSYMBOL(&state2);
1399 }
1400
1401 /* end ? */
1402 if (BIT_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2))
1403 return op-ostart;
1404
1405 if (op==omax) return ERROR(dstSize_tooSmall); /* dst buffer is full, but cSrc unfinished */
1406
1407 return ERROR(corruption_detected);
1408 }
1409
1410
FSE_decompress_usingDTable(void * dst,size_t originalSize,const void * cSrc,size_t cSrcSize,const FSE_DTable * dt)1411 static size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
1412 const void* cSrc, size_t cSrcSize,
1413 const FSE_DTable* dt)
1414 {
1415 FSE_DTableHeader DTableH;
1416 memcpy(&DTableH, dt, sizeof(DTableH));
1417
1418 /* select fast mode (static) */
1419 if (DTableH.fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
1420 return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
1421 }
1422
1423
FSE_decompress(void * dst,size_t maxDstSize,const void * cSrc,size_t cSrcSize)1424 static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize)
1425 {
1426 const BYTE* const istart = (const BYTE*)cSrc;
1427 const BYTE* ip = istart;
1428 short counting[FSE_MAX_SYMBOL_VALUE+1];
1429 DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */
1430 unsigned tableLog;
1431 unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
1432 size_t errorCode;
1433
1434 if (cSrcSize<2) return ERROR(srcSize_wrong); /* too small input size */
1435
1436 /* normal FSE decoding mode */
1437 errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
1438 if (FSE_isError(errorCode)) return errorCode;
1439 if (errorCode >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size */
1440 ip += errorCode;
1441 cSrcSize -= errorCode;
1442
1443 errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog);
1444 if (FSE_isError(errorCode)) return errorCode;
1445
1446 /* always return, even if it is an error code */
1447 return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt);
1448 }
1449
1450
1451
1452 #endif /* FSE_COMMONDEFS_ONLY */
1453 /* ******************************************************************
1454 Huff0 : Huffman coder, part of New Generation Entropy library
1455 Copyright (C) 2013-2015, Yann Collet.
1456
1457 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
1458
1459 Redistribution and use in source and binary forms, with or without
1460 modification, are permitted provided that the following conditions are
1461 met:
1462
1463 * Redistributions of source code must retain the above copyright
1464 notice, this list of conditions and the following disclaimer.
1465 * Redistributions in binary form must reproduce the above
1466 copyright notice, this list of conditions and the following disclaimer
1467 in the documentation and/or other materials provided with the
1468 distribution.
1469
1470 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
1471 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
1472 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
1473 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
1474 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
1475 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
1476 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
1477 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
1478 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
1479 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
1480 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1481
1482 You can contact the author at :
1483 - FSE+Huff0 source repository : https://github.com/Cyan4973/FiniteStateEntropy
1484 - Public forum : https://groups.google.com/forum/#!forum/lz4c
1485 ****************************************************************** */
1486
1487 /****************************************************************
1488 * Compiler specifics
1489 ****************************************************************/
1490 #if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
1491 /* inline is defined */
1492 #elif defined(_MSC_VER)
1493 # define inline __inline
1494 #else
1495 # define inline /* disable inline */
1496 #endif
1497
1498
1499 #ifdef _MSC_VER /* Visual Studio */
1500 # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
1501 #endif
1502
1503
1504 /****************************************************************
1505 * Includes
1506 ****************************************************************/
1507 #include <stdlib.h> /* malloc, free, qsort */
1508 #include <string.h> /* memcpy, memset */
1509 #include <stdio.h> /* printf (debug) */
1510
1511 /****************************************************************
1512 * Error Management
1513 ****************************************************************/
1514 #define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
1515
1516
1517 /******************************************
1518 * Helper functions
1519 ******************************************/
HUF_isError(size_t code)1520 static unsigned HUF_isError(size_t code) { return ERR_isError(code); }
1521
1522 #define HUF_ABSOLUTEMAX_TABLELOG 16 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
1523 #define HUF_MAX_TABLELOG 12 /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
1524 #define HUF_DEFAULT_TABLELOG HUF_MAX_TABLELOG /* tableLog by default, when not specified */
1525 #define HUF_MAX_SYMBOL_VALUE 255
1526 #if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG)
1527 # error "HUF_MAX_TABLELOG is too large !"
1528 #endif
1529
1530
1531
1532 /*********************************************************
1533 * Huff0 : Huffman block decompression
1534 *********************************************************/
1535 typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2; /* single-symbol decoding */
1536
1537 typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4; /* double-symbols decoding */
1538
1539 typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
1540
1541 /*! HUF_readStats
1542 Read compact Huffman tree, saved by HUF_writeCTable
1543 @huffWeight : destination buffer
1544 @return : size read from `src`
1545 */
HUF_readStats(BYTE * huffWeight,size_t hwSize,U32 * rankStats,U32 * nbSymbolsPtr,U32 * tableLogPtr,const void * src,size_t srcSize)1546 static size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
1547 U32* nbSymbolsPtr, U32* tableLogPtr,
1548 const void* src, size_t srcSize)
1549 {
1550 U32 weightTotal;
1551 U32 tableLog;
1552 const BYTE* ip = (const BYTE*) src;
1553 size_t iSize;
1554 size_t oSize;
1555 U32 n;
1556
1557 if (!srcSize) return ERROR(srcSize_wrong);
1558 iSize = ip[0];
1559 //memset(huffWeight, 0, hwSize); /* is not necessary, even though some analyzer complain ... */
1560
1561 if (iSize >= 128) /* special header */
1562 {
1563 if (iSize >= (242)) /* RLE */
1564 {
1565 static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 };
1566 oSize = l[iSize-242];
1567 memset(huffWeight, 1, hwSize);
1568 iSize = 0;
1569 }
1570 else /* Incompressible */
1571 {
1572 oSize = iSize - 127;
1573 iSize = ((oSize+1)/2);
1574 if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
1575 if (oSize >= hwSize) return ERROR(corruption_detected);
1576 ip += 1;
1577 for (n=0; n<oSize; n+=2)
1578 {
1579 huffWeight[n] = ip[n/2] >> 4;
1580 huffWeight[n+1] = ip[n/2] & 15;
1581 }
1582 }
1583 }
1584 else /* header compressed with FSE (normal case) */
1585 {
1586 if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
1587 oSize = FSE_decompress(huffWeight, hwSize-1, ip+1, iSize); /* max (hwSize-1) values decoded, as last one is implied */
1588 if (FSE_isError(oSize)) return oSize;
1589 }
1590
1591 /* collect weight stats */
1592 memset(rankStats, 0, (HUF_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32));
1593 weightTotal = 0;
1594 for (n=0; n<oSize; n++)
1595 {
1596 if (huffWeight[n] >= HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
1597 rankStats[huffWeight[n]]++;
1598 weightTotal += (1 << huffWeight[n]) >> 1;
1599 }
1600 if (weightTotal == 0) return ERROR(corruption_detected);
1601
1602 /* get last non-null symbol weight (implied, total must be 2^n) */
1603 tableLog = BIT_highbit32(weightTotal) + 1;
1604 if (tableLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected);
1605 {
1606 U32 total = 1 << tableLog;
1607 U32 rest = total - weightTotal;
1608 U32 verif = 1 << BIT_highbit32(rest);
1609 U32 lastWeight = BIT_highbit32(rest) + 1;
1610 if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */
1611 huffWeight[oSize] = (BYTE)lastWeight;
1612 rankStats[lastWeight]++;
1613 }
1614
1615 /* check tree construction validity */
1616 if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
1617
1618 /* results */
1619 *nbSymbolsPtr = (U32)(oSize+1);
1620 *tableLogPtr = tableLog;
1621 return iSize+1;
1622 }
1623
1624
1625 /**************************/
1626 /* single-symbol decoding */
1627 /**************************/
1628
HUF_readDTableX2(U16 * DTable,const void * src,size_t srcSize)1629 static size_t HUF_readDTableX2 (U16* DTable, const void* src, size_t srcSize)
1630 {
1631 BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1];
1632 U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; /* large enough for values from 0 to 16 */
1633 U32 tableLog = 0;
1634 const BYTE* ip = (const BYTE*) src;
1635 size_t iSize = ip[0];
1636 U32 nbSymbols = 0;
1637 U32 n;
1638 U32 nextRankStart;
1639 void* ptr = DTable+1;
1640 HUF_DEltX2* const dt = (HUF_DEltX2*)ptr;
1641
1642 HUF_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U16)); /* if compilation fails here, assertion is false */
1643 //memset(huffWeight, 0, sizeof(huffWeight)); /* is not necessary, even though some analyzer complain ... */
1644
1645 iSize = HUF_readStats(huffWeight, HUF_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
1646 if (HUF_isError(iSize)) return iSize;
1647
1648 /* check result */
1649 if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge); /* DTable is too small */
1650 DTable[0] = (U16)tableLog; /* maybe should separate sizeof DTable, as allocated, from used size of DTable, in case of DTable re-use */
1651
1652 /* Prepare ranks */
1653 nextRankStart = 0;
1654 for (n=1; n<=tableLog; n++)
1655 {
1656 U32 current = nextRankStart;
1657 nextRankStart += (rankVal[n] << (n-1));
1658 rankVal[n] = current;
1659 }
1660
1661 /* fill DTable */
1662 for (n=0; n<nbSymbols; n++)
1663 {
1664 const U32 w = huffWeight[n];
1665 const U32 length = (1 << w) >> 1;
1666 U32 i;
1667 HUF_DEltX2 D;
1668 D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
1669 for (i = rankVal[w]; i < rankVal[w] + length; i++)
1670 dt[i] = D;
1671 rankVal[w] += length;
1672 }
1673
1674 return iSize;
1675 }
1676
HUF_decodeSymbolX2(BIT_DStream_t * Dstream,const HUF_DEltX2 * dt,const U32 dtLog)1677 static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog)
1678 {
1679 const size_t val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
1680 const BYTE c = dt[val].byte;
1681 BIT_skipBits(Dstream, dt[val].nbBits);
1682 return c;
1683 }
1684
1685 #define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
1686 *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog)
1687
1688 #define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
1689 if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \
1690 HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
1691
1692 #define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
1693 if (MEM_64bits()) \
1694 HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
1695
HUF_decodeStreamX2(BYTE * p,BIT_DStream_t * const bitDPtr,BYTE * const pEnd,const HUF_DEltX2 * const dt,const U32 dtLog)1696 static inline size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog)
1697 {
1698 BYTE* const pStart = p;
1699
1700 /* up to 4 symbols at a time */
1701 while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4))
1702 {
1703 HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
1704 HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
1705 HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
1706 HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
1707 }
1708
1709 /* closer to the end */
1710 while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd))
1711 HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
1712
1713 /* no more data to retrieve from bitstream, hence no need to reload */
1714 while (p < pEnd)
1715 HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
1716
1717 return pEnd-pStart;
1718 }
1719
1720
HUF_decompress4X2_usingDTable(void * dst,size_t dstSize,const void * cSrc,size_t cSrcSize,const U16 * DTable)1721 static size_t HUF_decompress4X2_usingDTable(
1722 void* dst, size_t dstSize,
1723 const void* cSrc, size_t cSrcSize,
1724 const U16* DTable)
1725 {
1726 if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
1727
1728 {
1729 const BYTE* const istart = (const BYTE*) cSrc;
1730 BYTE* const ostart = (BYTE*) dst;
1731 BYTE* const oend = ostart + dstSize;
1732
1733 const void* ptr = DTable;
1734 const HUF_DEltX2* const dt = ((const HUF_DEltX2*)ptr) +1;
1735 const U32 dtLog = DTable[0];
1736 size_t errorCode;
1737
1738 /* Init */
1739 BIT_DStream_t bitD1;
1740 BIT_DStream_t bitD2;
1741 BIT_DStream_t bitD3;
1742 BIT_DStream_t bitD4;
1743 const size_t length1 = MEM_readLE16(istart);
1744 const size_t length2 = MEM_readLE16(istart+2);
1745 const size_t length3 = MEM_readLE16(istart+4);
1746 size_t length4;
1747 const BYTE* const istart1 = istart + 6; /* jumpTable */
1748 const BYTE* const istart2 = istart1 + length1;
1749 const BYTE* const istart3 = istart2 + length2;
1750 const BYTE* const istart4 = istart3 + length3;
1751 const size_t segmentSize = (dstSize+3) / 4;
1752 BYTE* const opStart2 = ostart + segmentSize;
1753 BYTE* const opStart3 = opStart2 + segmentSize;
1754 BYTE* const opStart4 = opStart3 + segmentSize;
1755 BYTE* op1 = ostart;
1756 BYTE* op2 = opStart2;
1757 BYTE* op3 = opStart3;
1758 BYTE* op4 = opStart4;
1759 U32 endSignal;
1760
1761 length4 = cSrcSize - (length1 + length2 + length3 + 6);
1762 if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
1763 errorCode = BIT_initDStream(&bitD1, istart1, length1);
1764 if (HUF_isError(errorCode)) return errorCode;
1765 errorCode = BIT_initDStream(&bitD2, istart2, length2);
1766 if (HUF_isError(errorCode)) return errorCode;
1767 errorCode = BIT_initDStream(&bitD3, istart3, length3);
1768 if (HUF_isError(errorCode)) return errorCode;
1769 errorCode = BIT_initDStream(&bitD4, istart4, length4);
1770 if (HUF_isError(errorCode)) return errorCode;
1771
1772 /* 16-32 symbols per loop (4-8 symbols per stream) */
1773 endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
1774 for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; )
1775 {
1776 HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
1777 HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
1778 HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
1779 HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
1780 HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
1781 HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
1782 HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
1783 HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
1784 HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
1785 HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
1786 HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
1787 HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
1788 HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
1789 HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
1790 HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
1791 HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
1792
1793 endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
1794 }
1795
1796 /* check corruption */
1797 if (op1 > opStart2) return ERROR(corruption_detected);
1798 if (op2 > opStart3) return ERROR(corruption_detected);
1799 if (op3 > opStart4) return ERROR(corruption_detected);
1800 /* note : op4 supposed already verified within main loop */
1801
1802 /* finish bitStreams one by one */
1803 HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
1804 HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
1805 HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
1806 HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog);
1807
1808 /* check */
1809 endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
1810 if (!endSignal) return ERROR(corruption_detected);
1811
1812 /* decoded size */
1813 return dstSize;
1814 }
1815 }
1816
1817
HUF_decompress4X2(void * dst,size_t dstSize,const void * cSrc,size_t cSrcSize)1818 static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
1819 {
1820 HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_MAX_TABLELOG);
1821 const BYTE* ip = (const BYTE*) cSrc;
1822 size_t errorCode;
1823
1824 errorCode = HUF_readDTableX2 (DTable, cSrc, cSrcSize);
1825 if (HUF_isError(errorCode)) return errorCode;
1826 if (errorCode >= cSrcSize) return ERROR(srcSize_wrong);
1827 ip += errorCode;
1828 cSrcSize -= errorCode;
1829
1830 return HUF_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
1831 }
1832
1833
1834 /***************************/
1835 /* double-symbols decoding */
1836 /***************************/
1837
HUF_fillDTableX4Level2(HUF_DEltX4 * DTable,U32 sizeLog,const U32 consumed,const U32 * rankValOrigin,const int minWeight,const sortedSymbol_t * sortedSymbols,const U32 sortedListSize,U32 nbBitsBaseline,U16 baseSeq)1838 static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed,
1839 const U32* rankValOrigin, const int minWeight,
1840 const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
1841 U32 nbBitsBaseline, U16 baseSeq)
1842 {
1843 HUF_DEltX4 DElt;
1844 U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
1845 U32 s;
1846
1847 /* get pre-calculated rankVal */
1848 memcpy(rankVal, rankValOrigin, sizeof(rankVal));
1849
1850 /* fill skipped values */
1851 if (minWeight>1)
1852 {
1853 U32 i, skipSize = rankVal[minWeight];
1854 MEM_writeLE16(&(DElt.sequence), baseSeq);
1855 DElt.nbBits = (BYTE)(consumed);
1856 DElt.length = 1;
1857 for (i = 0; i < skipSize; i++)
1858 DTable[i] = DElt;
1859 }
1860
1861 /* fill DTable */
1862 for (s=0; s<sortedListSize; s++) /* note : sortedSymbols already skipped */
1863 {
1864 const U32 symbol = sortedSymbols[s].symbol;
1865 const U32 weight = sortedSymbols[s].weight;
1866 const U32 nbBits = nbBitsBaseline - weight;
1867 const U32 length = 1 << (sizeLog-nbBits);
1868 const U32 start = rankVal[weight];
1869 U32 i = start;
1870 const U32 end = start + length;
1871
1872 MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
1873 DElt.nbBits = (BYTE)(nbBits + consumed);
1874 DElt.length = 2;
1875 do { DTable[i++] = DElt; } while (i<end); /* since length >= 1 */
1876
1877 rankVal[weight] += length;
1878 }
1879 }
1880
1881 typedef U32 rankVal_t[HUF_ABSOLUTEMAX_TABLELOG][HUF_ABSOLUTEMAX_TABLELOG + 1];
1882
HUF_fillDTableX4(HUF_DEltX4 * DTable,const U32 targetLog,const sortedSymbol_t * sortedList,const U32 sortedListSize,const U32 * rankStart,rankVal_t rankValOrigin,const U32 maxWeight,const U32 nbBitsBaseline)1883 static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog,
1884 const sortedSymbol_t* sortedList, const U32 sortedListSize,
1885 const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
1886 const U32 nbBitsBaseline)
1887 {
1888 U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
1889 const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */
1890 const U32 minBits = nbBitsBaseline - maxWeight;
1891 U32 s;
1892
1893 memcpy(rankVal, rankValOrigin, sizeof(rankVal));
1894
1895 /* fill DTable */
1896 for (s=0; s<sortedListSize; s++)
1897 {
1898 const U16 symbol = sortedList[s].symbol;
1899 const U32 weight = sortedList[s].weight;
1900 const U32 nbBits = nbBitsBaseline - weight;
1901 const U32 start = rankVal[weight];
1902 const U32 length = 1 << (targetLog-nbBits);
1903
1904 if (targetLog-nbBits >= minBits) /* enough room for a second symbol */
1905 {
1906 U32 sortedRank;
1907 int minWeight = nbBits + scaleLog;
1908 if (minWeight < 1) minWeight = 1;
1909 sortedRank = rankStart[minWeight];
1910 HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,
1911 rankValOrigin[nbBits], minWeight,
1912 sortedList+sortedRank, sortedListSize-sortedRank,
1913 nbBitsBaseline, symbol);
1914 }
1915 else
1916 {
1917 U32 i;
1918 const U32 end = start + length;
1919 HUF_DEltX4 DElt;
1920
1921 MEM_writeLE16(&(DElt.sequence), symbol);
1922 DElt.nbBits = (BYTE)(nbBits);
1923 DElt.length = 1;
1924 for (i = start; i < end; i++)
1925 DTable[i] = DElt;
1926 }
1927 rankVal[weight] += length;
1928 }
1929 }
1930
HUF_readDTableX4(U32 * DTable,const void * src,size_t srcSize)1931 static size_t HUF_readDTableX4 (U32* DTable, const void* src, size_t srcSize)
1932 {
1933 BYTE weightList[HUF_MAX_SYMBOL_VALUE + 1];
1934 sortedSymbol_t sortedSymbol[HUF_MAX_SYMBOL_VALUE + 1];
1935 U32 rankStats[HUF_ABSOLUTEMAX_TABLELOG + 1] = { 0 };
1936 U32 rankStart0[HUF_ABSOLUTEMAX_TABLELOG + 2] = { 0 };
1937 U32* const rankStart = rankStart0+1;
1938 rankVal_t rankVal;
1939 U32 tableLog, maxW, sizeOfSort, nbSymbols;
1940 const U32 memLog = DTable[0];
1941 const BYTE* ip = (const BYTE*) src;
1942 size_t iSize = ip[0];
1943 void* ptr = DTable;
1944 HUF_DEltX4* const dt = ((HUF_DEltX4*)ptr) + 1;
1945
1946 HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(U32)); /* if compilation fails here, assertion is false */
1947 if (memLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);
1948 //memset(weightList, 0, sizeof(weightList)); /* is not necessary, even though some analyzer complain ... */
1949
1950 iSize = HUF_readStats(weightList, HUF_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
1951 if (HUF_isError(iSize)) return iSize;
1952
1953 /* check result */
1954 if (tableLog > memLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */
1955
1956 /* find maxWeight */
1957 for (maxW = tableLog; rankStats[maxW]==0; maxW--)
1958 {if (!maxW) return ERROR(GENERIC); } /* necessarily finds a solution before maxW==0 */
1959
1960 /* Get start index of each weight */
1961 {
1962 U32 w, nextRankStart = 0;
1963 for (w=1; w<=maxW; w++)
1964 {
1965 U32 current = nextRankStart;
1966 nextRankStart += rankStats[w];
1967 rankStart[w] = current;
1968 }
1969 rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
1970 sizeOfSort = nextRankStart;
1971 }
1972
1973 /* sort symbols by weight */
1974 {
1975 U32 s;
1976 for (s=0; s<nbSymbols; s++)
1977 {
1978 U32 w = weightList[s];
1979 U32 r = rankStart[w]++;
1980 sortedSymbol[r].symbol = (BYTE)s;
1981 sortedSymbol[r].weight = (BYTE)w;
1982 }
1983 rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
1984 }
1985
1986 /* Build rankVal */
1987 {
1988 const U32 minBits = tableLog+1 - maxW;
1989 U32 nextRankVal = 0;
1990 U32 w, consumed;
1991 const int rescale = (memLog-tableLog) - 1; /* tableLog <= memLog */
1992 U32* rankVal0 = rankVal[0];
1993 for (w=1; w<=maxW; w++)
1994 {
1995 U32 current = nextRankVal;
1996 nextRankVal += rankStats[w] << (w+rescale);
1997 rankVal0[w] = current;
1998 }
1999 for (consumed = minBits; consumed <= memLog - minBits; consumed++)
2000 {
2001 U32* rankValPtr = rankVal[consumed];
2002 for (w = 1; w <= maxW; w++)
2003 {
2004 rankValPtr[w] = rankVal0[w] >> consumed;
2005 }
2006 }
2007 }
2008
2009 HUF_fillDTableX4(dt, memLog,
2010 sortedSymbol, sizeOfSort,
2011 rankStart0, rankVal, maxW,
2012 tableLog+1);
2013
2014 return iSize;
2015 }
2016
2017
HUF_decodeSymbolX4(void * op,BIT_DStream_t * DStream,const HUF_DEltX4 * dt,const U32 dtLog)2018 static U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
2019 {
2020 const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
2021 memcpy(op, dt+val, 2);
2022 BIT_skipBits(DStream, dt[val].nbBits);
2023 return dt[val].length;
2024 }
2025
HUF_decodeLastSymbolX4(void * op,BIT_DStream_t * DStream,const HUF_DEltX4 * dt,const U32 dtLog)2026 static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
2027 {
2028 const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
2029 memcpy(op, dt+val, 1);
2030 if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);
2031 else
2032 {
2033 if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8))
2034 {
2035 BIT_skipBits(DStream, dt[val].nbBits);
2036 if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
2037 DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
2038 }
2039 }
2040 return 1;
2041 }
2042
2043
2044 #define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \
2045 ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
2046
2047 #define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
2048 if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \
2049 ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
2050
2051 #define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
2052 if (MEM_64bits()) \
2053 ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
2054
HUF_decodeStreamX4(BYTE * p,BIT_DStream_t * bitDPtr,BYTE * const pEnd,const HUF_DEltX4 * const dt,const U32 dtLog)2055 static inline size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog)
2056 {
2057 BYTE* const pStart = p;
2058
2059 /* up to 8 symbols at a time */
2060 while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd-7))
2061 {
2062 HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
2063 HUF_DECODE_SYMBOLX4_1(p, bitDPtr);
2064 HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
2065 HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
2066 }
2067
2068 /* closer to the end */
2069 while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-2))
2070 HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
2071
2072 while (p <= pEnd-2)
2073 HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
2074
2075 if (p < pEnd)
2076 p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
2077
2078 return p-pStart;
2079 }
2080
2081
2082
HUF_decompress4X4_usingDTable(void * dst,size_t dstSize,const void * cSrc,size_t cSrcSize,const U32 * DTable)2083 static size_t HUF_decompress4X4_usingDTable(
2084 void* dst, size_t dstSize,
2085 const void* cSrc, size_t cSrcSize,
2086 const U32* DTable)
2087 {
2088 if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
2089
2090 {
2091 const BYTE* const istart = (const BYTE*) cSrc;
2092 BYTE* const ostart = (BYTE*) dst;
2093 BYTE* const oend = ostart + dstSize;
2094
2095 const void* ptr = DTable;
2096 const HUF_DEltX4* const dt = ((const HUF_DEltX4*)ptr) +1;
2097 const U32 dtLog = DTable[0];
2098 size_t errorCode;
2099
2100 /* Init */
2101 BIT_DStream_t bitD1;
2102 BIT_DStream_t bitD2;
2103 BIT_DStream_t bitD3;
2104 BIT_DStream_t bitD4;
2105 const size_t length1 = MEM_readLE16(istart);
2106 const size_t length2 = MEM_readLE16(istart+2);
2107 const size_t length3 = MEM_readLE16(istart+4);
2108 size_t length4;
2109 const BYTE* const istart1 = istart + 6; /* jumpTable */
2110 const BYTE* const istart2 = istart1 + length1;
2111 const BYTE* const istart3 = istart2 + length2;
2112 const BYTE* const istart4 = istart3 + length3;
2113 const size_t segmentSize = (dstSize+3) / 4;
2114 BYTE* const opStart2 = ostart + segmentSize;
2115 BYTE* const opStart3 = opStart2 + segmentSize;
2116 BYTE* const opStart4 = opStart3 + segmentSize;
2117 BYTE* op1 = ostart;
2118 BYTE* op2 = opStart2;
2119 BYTE* op3 = opStart3;
2120 BYTE* op4 = opStart4;
2121 U32 endSignal;
2122
2123 length4 = cSrcSize - (length1 + length2 + length3 + 6);
2124 if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
2125 errorCode = BIT_initDStream(&bitD1, istart1, length1);
2126 if (HUF_isError(errorCode)) return errorCode;
2127 errorCode = BIT_initDStream(&bitD2, istart2, length2);
2128 if (HUF_isError(errorCode)) return errorCode;
2129 errorCode = BIT_initDStream(&bitD3, istart3, length3);
2130 if (HUF_isError(errorCode)) return errorCode;
2131 errorCode = BIT_initDStream(&bitD4, istart4, length4);
2132 if (HUF_isError(errorCode)) return errorCode;
2133
2134 /* 16-32 symbols per loop (4-8 symbols per stream) */
2135 endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
2136 for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; )
2137 {
2138 HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
2139 HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
2140 HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
2141 HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
2142 HUF_DECODE_SYMBOLX4_1(op1, &bitD1);
2143 HUF_DECODE_SYMBOLX4_1(op2, &bitD2);
2144 HUF_DECODE_SYMBOLX4_1(op3, &bitD3);
2145 HUF_DECODE_SYMBOLX4_1(op4, &bitD4);
2146 HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
2147 HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
2148 HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
2149 HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
2150 HUF_DECODE_SYMBOLX4_0(op1, &bitD1);
2151 HUF_DECODE_SYMBOLX4_0(op2, &bitD2);
2152 HUF_DECODE_SYMBOLX4_0(op3, &bitD3);
2153 HUF_DECODE_SYMBOLX4_0(op4, &bitD4);
2154
2155 endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
2156 }
2157
2158 /* check corruption */
2159 if (op1 > opStart2) return ERROR(corruption_detected);
2160 if (op2 > opStart3) return ERROR(corruption_detected);
2161 if (op3 > opStart4) return ERROR(corruption_detected);
2162 /* note : op4 supposed already verified within main loop */
2163
2164 /* finish bitStreams one by one */
2165 HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
2166 HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
2167 HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
2168 HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog);
2169
2170 /* check */
2171 endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
2172 if (!endSignal) return ERROR(corruption_detected);
2173
2174 /* decoded size */
2175 return dstSize;
2176 }
2177 }
2178
2179
HUF_decompress4X4(void * dst,size_t dstSize,const void * cSrc,size_t cSrcSize)2180 static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
2181 {
2182 HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_MAX_TABLELOG);
2183 const BYTE* ip = (const BYTE*) cSrc;
2184
2185 size_t hSize = HUF_readDTableX4 (DTable, cSrc, cSrcSize);
2186 if (HUF_isError(hSize)) return hSize;
2187 if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
2188 ip += hSize;
2189 cSrcSize -= hSize;
2190
2191 return HUF_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
2192 }
2193
2194
2195 /**********************************/
2196 /* quad-symbol decoding */
2197 /**********************************/
2198 typedef struct { BYTE nbBits; BYTE nbBytes; } HUF_DDescX6;
2199 typedef union { BYTE byte[4]; U32 sequence; } HUF_DSeqX6;
2200
2201 /* recursive, up to level 3; may benefit from <template>-like strategy to nest each level inline */
HUF_fillDTableX6LevelN(HUF_DDescX6 * DDescription,HUF_DSeqX6 * DSequence,int sizeLog,const rankVal_t rankValOrigin,const U32 consumed,const int minWeight,const U32 maxWeight,const sortedSymbol_t * sortedSymbols,const U32 sortedListSize,const U32 * rankStart,const U32 nbBitsBaseline,HUF_DSeqX6 baseSeq,HUF_DDescX6 DDesc)2202 static void HUF_fillDTableX6LevelN(HUF_DDescX6* DDescription, HUF_DSeqX6* DSequence, int sizeLog,
2203 const rankVal_t rankValOrigin, const U32 consumed, const int minWeight, const U32 maxWeight,
2204 const sortedSymbol_t* sortedSymbols, const U32 sortedListSize, const U32* rankStart,
2205 const U32 nbBitsBaseline, HUF_DSeqX6 baseSeq, HUF_DDescX6 DDesc)
2206 {
2207 const int scaleLog = nbBitsBaseline - sizeLog; /* note : targetLog >= (nbBitsBaseline-1), hence scaleLog <= 1 */
2208 const int minBits = nbBitsBaseline - maxWeight;
2209 const U32 level = DDesc.nbBytes;
2210 U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1];
2211 U32 symbolStartPos, s;
2212
2213 /* local rankVal, will be modified */
2214 memcpy(rankVal, rankValOrigin[consumed], sizeof(rankVal));
2215
2216 /* fill skipped values */
2217 if (minWeight>1)
2218 {
2219 U32 i;
2220 const U32 skipSize = rankVal[minWeight];
2221 for (i = 0; i < skipSize; i++)
2222 {
2223 DSequence[i] = baseSeq;
2224 DDescription[i] = DDesc;
2225 }
2226 }
2227
2228 /* fill DTable */
2229 DDesc.nbBytes++;
2230 symbolStartPos = rankStart[minWeight];
2231 for (s=symbolStartPos; s<sortedListSize; s++)
2232 {
2233 const BYTE symbol = sortedSymbols[s].symbol;
2234 const U32 weight = sortedSymbols[s].weight; /* >= 1 (sorted) */
2235 const int nbBits = nbBitsBaseline - weight; /* >= 1 (by construction) */
2236 const int totalBits = consumed+nbBits;
2237 const U32 start = rankVal[weight];
2238 const U32 length = 1 << (sizeLog-nbBits);
2239 baseSeq.byte[level] = symbol;
2240 DDesc.nbBits = (BYTE)totalBits;
2241
2242 if ((level<3) && (sizeLog-totalBits >= minBits)) /* enough room for another symbol */
2243 {
2244 int nextMinWeight = totalBits + scaleLog;
2245 if (nextMinWeight < 1) nextMinWeight = 1;
2246 HUF_fillDTableX6LevelN(DDescription+start, DSequence+start, sizeLog-nbBits,
2247 rankValOrigin, totalBits, nextMinWeight, maxWeight,
2248 sortedSymbols, sortedListSize, rankStart,
2249 nbBitsBaseline, baseSeq, DDesc); /* recursive (max : level 3) */
2250 }
2251 else
2252 {
2253 U32 i;
2254 const U32 end = start + length;
2255 for (i = start; i < end; i++)
2256 {
2257 DDescription[i] = DDesc;
2258 DSequence[i] = baseSeq;
2259 }
2260 }
2261 rankVal[weight] += length;
2262 }
2263 }
2264
2265
2266 /* note : same preparation as X4 */
HUF_readDTableX6(U32 * DTable,const void * src,size_t srcSize)2267 static size_t HUF_readDTableX6 (U32* DTable, const void* src, size_t srcSize)
2268 {
2269 BYTE weightList[HUF_MAX_SYMBOL_VALUE + 1];
2270 sortedSymbol_t sortedSymbol[HUF_MAX_SYMBOL_VALUE + 1];
2271 U32 rankStats[HUF_ABSOLUTEMAX_TABLELOG + 1] = { 0 };
2272 U32 rankStart0[HUF_ABSOLUTEMAX_TABLELOG + 2] = { 0 };
2273 U32* const rankStart = rankStart0+1;
2274 U32 tableLog, maxW, sizeOfSort, nbSymbols;
2275 rankVal_t rankVal;
2276 const U32 memLog = DTable[0];
2277 const BYTE* ip = (const BYTE*) src;
2278 size_t iSize = ip[0];
2279
2280 if (memLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge);
2281 //memset(weightList, 0, sizeof(weightList)); /* is not necessary, even though some analyzer complain ... */
2282
2283 iSize = HUF_readStats(weightList, HUF_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
2284 if (HUF_isError(iSize)) return iSize;
2285
2286 /* check result */
2287 if (tableLog > memLog) return ERROR(tableLog_tooLarge); /* DTable is too small */
2288
2289 /* find maxWeight */
2290 for (maxW = tableLog; rankStats[maxW]==0; maxW--)
2291 { if (!maxW) return ERROR(GENERIC); } /* necessarily finds a solution before maxW==0 */
2292
2293
2294 /* Get start index of each weight */
2295 {
2296 U32 w, nextRankStart = 0;
2297 for (w=1; w<=maxW; w++)
2298 {
2299 U32 current = nextRankStart;
2300 nextRankStart += rankStats[w];
2301 rankStart[w] = current;
2302 }
2303 rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
2304 sizeOfSort = nextRankStart;
2305 }
2306
2307 /* sort symbols by weight */
2308 {
2309 U32 s;
2310 for (s=0; s<nbSymbols; s++)
2311 {
2312 U32 w = weightList[s];
2313 U32 r = rankStart[w]++;
2314 sortedSymbol[r].symbol = (BYTE)s;
2315 sortedSymbol[r].weight = (BYTE)w;
2316 }
2317 rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
2318 }
2319
2320 /* Build rankVal */
2321 {
2322 const U32 minBits = tableLog+1 - maxW;
2323 U32 nextRankVal = 0;
2324 U32 w, consumed;
2325 const int rescale = (memLog-tableLog) - 1; /* tableLog <= memLog */
2326 U32* rankVal0 = rankVal[0];
2327 for (w=1; w<=maxW; w++)
2328 {
2329 U32 current = nextRankVal;
2330 nextRankVal += rankStats[w] << (w+rescale);
2331 rankVal0[w] = current;
2332 }
2333 for (consumed = minBits; consumed <= memLog - minBits; consumed++)
2334 {
2335 U32* rankValPtr = rankVal[consumed];
2336 for (w = 1; w <= maxW; w++)
2337 {
2338 rankValPtr[w] = rankVal0[w] >> consumed;
2339 }
2340 }
2341 }
2342
2343
2344 /* fill tables */
2345 {
2346 void* ptr = DTable+1;
2347 HUF_DDescX6* DDescription = (HUF_DDescX6*)(ptr);
2348 void* dSeqStart = DTable + 1 + ((size_t)1<<(memLog-1));
2349 HUF_DSeqX6* DSequence = (HUF_DSeqX6*)(dSeqStart);
2350 HUF_DSeqX6 DSeq;
2351 HUF_DDescX6 DDesc;
2352 DSeq.sequence = 0;
2353 DDesc.nbBits = 0;
2354 DDesc.nbBytes = 0;
2355 HUF_fillDTableX6LevelN(DDescription, DSequence, memLog,
2356 (const U32 (*)[HUF_ABSOLUTEMAX_TABLELOG + 1])rankVal, 0, 1, maxW,
2357 sortedSymbol, sizeOfSort, rankStart0,
2358 tableLog+1, DSeq, DDesc);
2359 }
2360
2361 return iSize;
2362 }
2363
2364
HUF_decodeSymbolX6(void * op,BIT_DStream_t * DStream,const HUF_DDescX6 * dd,const HUF_DSeqX6 * ds,const U32 dtLog)2365 static U32 HUF_decodeSymbolX6(void* op, BIT_DStream_t* DStream, const HUF_DDescX6* dd, const HUF_DSeqX6* ds, const U32 dtLog)
2366 {
2367 const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
2368 memcpy(op, ds+val, sizeof(HUF_DSeqX6));
2369 BIT_skipBits(DStream, dd[val].nbBits);
2370 return dd[val].nbBytes;
2371 }
2372
HUF_decodeLastSymbolsX6(void * op,const U32 maxL,BIT_DStream_t * DStream,const HUF_DDescX6 * dd,const HUF_DSeqX6 * ds,const U32 dtLog)2373 static U32 HUF_decodeLastSymbolsX6(void* op, const U32 maxL, BIT_DStream_t* DStream,
2374 const HUF_DDescX6* dd, const HUF_DSeqX6* ds, const U32 dtLog)
2375 {
2376 const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
2377 U32 length = dd[val].nbBytes;
2378 if (length <= maxL)
2379 {
2380 memcpy(op, ds+val, length);
2381 BIT_skipBits(DStream, dd[val].nbBits);
2382 return length;
2383 }
2384 memcpy(op, ds+val, maxL);
2385 if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8))
2386 {
2387 BIT_skipBits(DStream, dd[val].nbBits);
2388 if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
2389 DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
2390 }
2391 return maxL;
2392 }
2393
2394
2395 #define HUF_DECODE_SYMBOLX6_0(ptr, DStreamPtr) \
2396 ptr += HUF_decodeSymbolX6(ptr, DStreamPtr, dd, ds, dtLog)
2397
2398 #define HUF_DECODE_SYMBOLX6_1(ptr, DStreamPtr) \
2399 if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \
2400 HUF_DECODE_SYMBOLX6_0(ptr, DStreamPtr)
2401
2402 #define HUF_DECODE_SYMBOLX6_2(ptr, DStreamPtr) \
2403 if (MEM_64bits()) \
2404 HUF_DECODE_SYMBOLX6_0(ptr, DStreamPtr)
2405
HUF_decodeStreamX6(BYTE * p,BIT_DStream_t * bitDPtr,BYTE * const pEnd,const U32 * DTable,const U32 dtLog)2406 static inline size_t HUF_decodeStreamX6(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const U32* DTable, const U32 dtLog)
2407 {
2408 const void* ddPtr = DTable+1;
2409 const HUF_DDescX6* dd = (const HUF_DDescX6*)(ddPtr);
2410 const void* dsPtr = DTable + 1 + ((size_t)1<<(dtLog-1));
2411 const HUF_DSeqX6* ds = (const HUF_DSeqX6*)(dsPtr);
2412 BYTE* const pStart = p;
2413
2414 /* up to 16 symbols at a time */
2415 while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-16))
2416 {
2417 HUF_DECODE_SYMBOLX6_2(p, bitDPtr);
2418 HUF_DECODE_SYMBOLX6_1(p, bitDPtr);
2419 HUF_DECODE_SYMBOLX6_2(p, bitDPtr);
2420 HUF_DECODE_SYMBOLX6_0(p, bitDPtr);
2421 }
2422
2423 /* closer to the end, up to 4 symbols at a time */
2424 while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4))
2425 HUF_DECODE_SYMBOLX6_0(p, bitDPtr);
2426
2427 while (p <= pEnd-4)
2428 HUF_DECODE_SYMBOLX6_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
2429
2430 while (p < pEnd)
2431 p += HUF_decodeLastSymbolsX6(p, (U32)(pEnd-p), bitDPtr, dd, ds, dtLog);
2432
2433 return p-pStart;
2434 }
2435
2436
2437
HUF_decompress4X6_usingDTable(void * dst,size_t dstSize,const void * cSrc,size_t cSrcSize,const U32 * DTable)2438 static size_t HUF_decompress4X6_usingDTable(
2439 void* dst, size_t dstSize,
2440 const void* cSrc, size_t cSrcSize,
2441 const U32* DTable)
2442 {
2443 if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
2444
2445 {
2446 const BYTE* const istart = (const BYTE*) cSrc;
2447 BYTE* const ostart = (BYTE*) dst;
2448 BYTE* const oend = ostart + dstSize;
2449
2450 const U32 dtLog = DTable[0];
2451 const void* ddPtr = DTable+1;
2452 const HUF_DDescX6* dd = (const HUF_DDescX6*)(ddPtr);
2453 const void* dsPtr = DTable + 1 + ((size_t)1<<(dtLog-1));
2454 const HUF_DSeqX6* ds = (const HUF_DSeqX6*)(dsPtr);
2455 size_t errorCode;
2456
2457 /* Init */
2458 BIT_DStream_t bitD1;
2459 BIT_DStream_t bitD2;
2460 BIT_DStream_t bitD3;
2461 BIT_DStream_t bitD4;
2462 const size_t length1 = MEM_readLE16(istart);
2463 const size_t length2 = MEM_readLE16(istart+2);
2464 const size_t length3 = MEM_readLE16(istart+4);
2465 size_t length4;
2466 const BYTE* const istart1 = istart + 6; /* jumpTable */
2467 const BYTE* const istart2 = istart1 + length1;
2468 const BYTE* const istart3 = istart2 + length2;
2469 const BYTE* const istart4 = istart3 + length3;
2470 const size_t segmentSize = (dstSize+3) / 4;
2471 BYTE* const opStart2 = ostart + segmentSize;
2472 BYTE* const opStart3 = opStart2 + segmentSize;
2473 BYTE* const opStart4 = opStart3 + segmentSize;
2474 BYTE* op1 = ostart;
2475 BYTE* op2 = opStart2;
2476 BYTE* op3 = opStart3;
2477 BYTE* op4 = opStart4;
2478 U32 endSignal;
2479
2480 length4 = cSrcSize - (length1 + length2 + length3 + 6);
2481 if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
2482 errorCode = BIT_initDStream(&bitD1, istart1, length1);
2483 if (HUF_isError(errorCode)) return errorCode;
2484 errorCode = BIT_initDStream(&bitD2, istart2, length2);
2485 if (HUF_isError(errorCode)) return errorCode;
2486 errorCode = BIT_initDStream(&bitD3, istart3, length3);
2487 if (HUF_isError(errorCode)) return errorCode;
2488 errorCode = BIT_initDStream(&bitD4, istart4, length4);
2489 if (HUF_isError(errorCode)) return errorCode;
2490
2491 /* 16-64 symbols per loop (4-16 symbols per stream) */
2492 endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
2493 for ( ; (op3 <= opStart4) && (endSignal==BIT_DStream_unfinished) && (op4<=(oend-16)) ; )
2494 {
2495 HUF_DECODE_SYMBOLX6_2(op1, &bitD1);
2496 HUF_DECODE_SYMBOLX6_2(op2, &bitD2);
2497 HUF_DECODE_SYMBOLX6_2(op3, &bitD3);
2498 HUF_DECODE_SYMBOLX6_2(op4, &bitD4);
2499 HUF_DECODE_SYMBOLX6_1(op1, &bitD1);
2500 HUF_DECODE_SYMBOLX6_1(op2, &bitD2);
2501 HUF_DECODE_SYMBOLX6_1(op3, &bitD3);
2502 HUF_DECODE_SYMBOLX6_1(op4, &bitD4);
2503 HUF_DECODE_SYMBOLX6_2(op1, &bitD1);
2504 HUF_DECODE_SYMBOLX6_2(op2, &bitD2);
2505 HUF_DECODE_SYMBOLX6_2(op3, &bitD3);
2506 HUF_DECODE_SYMBOLX6_2(op4, &bitD4);
2507 HUF_DECODE_SYMBOLX6_0(op1, &bitD1);
2508 HUF_DECODE_SYMBOLX6_0(op2, &bitD2);
2509 HUF_DECODE_SYMBOLX6_0(op3, &bitD3);
2510 HUF_DECODE_SYMBOLX6_0(op4, &bitD4);
2511
2512 endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
2513 }
2514
2515 /* check corruption */
2516 if (op1 > opStart2) return ERROR(corruption_detected);
2517 if (op2 > opStart3) return ERROR(corruption_detected);
2518 if (op3 > opStart4) return ERROR(corruption_detected);
2519 /* note : op4 supposed already verified within main loop */
2520
2521 /* finish bitStreams one by one */
2522 HUF_decodeStreamX6(op1, &bitD1, opStart2, DTable, dtLog);
2523 HUF_decodeStreamX6(op2, &bitD2, opStart3, DTable, dtLog);
2524 HUF_decodeStreamX6(op3, &bitD3, opStart4, DTable, dtLog);
2525 HUF_decodeStreamX6(op4, &bitD4, oend, DTable, dtLog);
2526
2527 /* check */
2528 endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
2529 if (!endSignal) return ERROR(corruption_detected);
2530
2531 /* decoded size */
2532 return dstSize;
2533 }
2534 }
2535
2536
HUF_decompress4X6(void * dst,size_t dstSize,const void * cSrc,size_t cSrcSize)2537 static size_t HUF_decompress4X6 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
2538 {
2539 HUF_CREATE_STATIC_DTABLEX6(DTable, HUF_MAX_TABLELOG);
2540 const BYTE* ip = (const BYTE*) cSrc;
2541
2542 size_t hSize = HUF_readDTableX6 (DTable, cSrc, cSrcSize);
2543 if (HUF_isError(hSize)) return hSize;
2544 if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
2545 ip += hSize;
2546 cSrcSize -= hSize;
2547
2548 return HUF_decompress4X6_usingDTable (dst, dstSize, ip, cSrcSize, DTable);
2549 }
2550
2551
2552 /**********************************/
2553 /* Generic decompression selector */
2554 /**********************************/
2555
2556 typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
2557 static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
2558 {
2559 /* single, double, quad */
2560 {{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */
2561 {{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */
2562 {{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */
2563 {{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */
2564 {{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */
2565 {{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */
2566 {{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */
2567 {{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */
2568 {{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */
2569 {{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */
2570 {{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */
2571 {{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */
2572 {{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */
2573 {{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */
2574 {{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */
2575 {{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */
2576 };
2577
2578 typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
2579
HUF_decompress(void * dst,size_t dstSize,const void * cSrc,size_t cSrcSize)2580 static size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
2581 {
2582 static const decompressionAlgo decompress[3] = { HUF_decompress4X2, HUF_decompress4X4, HUF_decompress4X6 };
2583 /* estimate decompression time */
2584 U32 Q;
2585 const U32 D256 = (U32)(dstSize >> 8);
2586 U32 Dtime[3];
2587 U32 algoNb = 0;
2588 int n;
2589
2590 /* validation checks */
2591 if (dstSize == 0) return ERROR(dstSize_tooSmall);
2592 if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
2593 if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
2594 if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
2595
2596 /* decoder timing evaluation */
2597 Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */
2598 for (n=0; n<3; n++)
2599 Dtime[n] = algoTime[Q][n].tableTime + (algoTime[Q][n].decode256Time * D256);
2600
2601 Dtime[1] += Dtime[1] >> 4; Dtime[2] += Dtime[2] >> 3; /* advantage to algorithms using less memory, for cache eviction */
2602
2603 if (Dtime[1] < Dtime[0]) algoNb = 1;
2604 if (Dtime[2] < Dtime[algoNb]) algoNb = 2;
2605
2606 return decompress[algoNb](dst, dstSize, cSrc, cSrcSize);
2607
2608 //return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize); /* multi-streams single-symbol decoding */
2609 //return HUF_decompress4X4(dst, dstSize, cSrc, cSrcSize); /* multi-streams double-symbols decoding */
2610 //return HUF_decompress4X6(dst, dstSize, cSrc, cSrcSize); /* multi-streams quad-symbols decoding */
2611 }
2612 /*
2613 zstd - standard compression library
2614 Copyright (C) 2014-2015, Yann Collet.
2615
2616 BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
2617
2618 Redistribution and use in source and binary forms, with or without
2619 modification, are permitted provided that the following conditions are
2620 met:
2621 * Redistributions of source code must retain the above copyright
2622 notice, this list of conditions and the following disclaimer.
2623 * Redistributions in binary form must reproduce the above
2624 copyright notice, this list of conditions and the following disclaimer
2625 in the documentation and/or other materials provided with the
2626 distribution.
2627 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
2628 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
2629 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
2630 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
2631 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
2632 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
2633 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2634 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2635 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
2636 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
2637 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2638
2639 You can contact the author at :
2640 - zstd source repository : https://github.com/Cyan4973/zstd
2641 - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c
2642 */
2643
2644 /* ***************************************************************
2645 * Tuning parameters
2646 *****************************************************************/
2647 /*!
2648 * MEMORY_USAGE :
2649 * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
2650 * Increasing memory usage improves compression ratio
2651 * Reduced memory usage can improve speed, due to cache effect
2652 */
2653 #define ZSTD_MEMORY_USAGE 17
2654
2655 /*!
2656 * HEAPMODE :
2657 * Select how default compression functions will allocate memory for their hash table,
2658 * in memory stack (0, fastest), or in memory heap (1, requires malloc())
2659 * Note that compression context is fairly large, as a consequence heap memory is recommended.
2660 */
2661 #ifndef ZSTD_HEAPMODE
2662 # define ZSTD_HEAPMODE 1
2663 #endif /* ZSTD_HEAPMODE */
2664
2665 /*!
2666 * LEGACY_SUPPORT :
2667 * decompressor can decode older formats (starting from Zstd 0.1+)
2668 */
2669 #ifndef ZSTD_LEGACY_SUPPORT
2670 # define ZSTD_LEGACY_SUPPORT 1
2671 #endif
2672
2673
2674 /* *******************************************************
2675 * Includes
2676 *********************************************************/
2677 #include <stdlib.h> /* calloc */
2678 #include <string.h> /* memcpy, memmove */
2679 #include <stdio.h> /* debug : printf */
2680
2681
2682 /* *******************************************************
2683 * Compiler specifics
2684 *********************************************************/
2685 #ifdef __AVX2__
2686 # include <immintrin.h> /* AVX2 intrinsics */
2687 #endif
2688
2689 #ifdef _MSC_VER /* Visual Studio */
2690 # include <intrin.h> /* For Visual 2005 */
2691 # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
2692 # pragma warning(disable : 4324) /* disable: C4324: padded structure */
2693 #endif
2694
2695
2696 /* *******************************************************
2697 * Constants
2698 *********************************************************/
2699 #define HASH_LOG (ZSTD_MEMORY_USAGE - 2)
2700 #define HASH_TABLESIZE (1 << HASH_LOG)
2701 #define HASH_MASK (HASH_TABLESIZE - 1)
2702
2703 #define KNUTH 2654435761
2704
2705 #define BIT7 128
2706 #define BIT6 64
2707 #define BIT5 32
2708 #define BIT4 16
2709 #define BIT1 2
2710 #define BIT0 1
2711
2712 #define KB *(1 <<10)
2713 #define MB *(1 <<20)
2714 #define GB *(1U<<30)
2715
2716 #define BLOCKSIZE (128 KB) /* define, for static allocation */
2717 #define MIN_SEQUENCES_SIZE (2 /*seqNb*/ + 2 /*dumps*/ + 3 /*seqTables*/ + 1 /*bitStream*/)
2718 #define MIN_CBLOCK_SIZE (3 /*litCSize*/ + MIN_SEQUENCES_SIZE)
2719 #define IS_RAW BIT0
2720 #define IS_RLE BIT1
2721
2722 #define WORKPLACESIZE (BLOCKSIZE*3)
2723 #define MINMATCH 4
2724 #define MLbits 7
2725 #define LLbits 6
2726 #define Offbits 5
2727 #define MaxML ((1<<MLbits )-1)
2728 #define MaxLL ((1<<LLbits )-1)
2729 #define MaxOff 31
2730 #define LitFSELog 11
2731 #define MLFSELog 10
2732 #define LLFSELog 10
2733 #define OffFSELog 9
2734 #define MAX(a,b) ((a)<(b)?(b):(a))
2735 #define MaxSeq MAX(MaxLL, MaxML)
2736
2737 #define LITERAL_NOENTROPY 63
2738 #define COMMAND_NOENTROPY 7 /* to remove */
2739
2740 #define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
2741
2742 static const size_t ZSTD_blockHeaderSize = 3;
2743 static const size_t ZSTD_frameHeaderSize = 4;
2744
2745
2746 /* *******************************************************
2747 * Memory operations
2748 **********************************************************/
ZSTD_copy4(void * dst,const void * src)2749 static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
2750
ZSTD_copy8(void * dst,const void * src)2751 static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
2752
2753 #define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
2754
2755 /*! ZSTD_wildcopy : custom version of memcpy(), can copy up to 7-8 bytes too many */
ZSTD_wildcopy(void * dst,const void * src,ptrdiff_t length)2756 static void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)
2757 {
2758 const BYTE* ip = (const BYTE*)src;
2759 BYTE* op = (BYTE*)dst;
2760 BYTE* const oend = op + length;
2761 do COPY8(op, ip) while (op < oend);
2762 }
2763
2764
2765 /* **************************************
2766 * Local structures
2767 ****************************************/
2768 typedef enum { bt_compressed, bt_raw, bt_rle, bt_end } blockType_t;
2769
2770 typedef struct
2771 {
2772 blockType_t blockType;
2773 U32 origSize;
2774 } blockProperties_t;
2775
2776 typedef struct {
2777 void* buffer;
2778 U32* offsetStart;
2779 U32* offset;
2780 BYTE* offCodeStart;
2781 BYTE* offCode;
2782 BYTE* litStart;
2783 BYTE* lit;
2784 BYTE* litLengthStart;
2785 BYTE* litLength;
2786 BYTE* matchLengthStart;
2787 BYTE* matchLength;
2788 BYTE* dumpsStart;
2789 BYTE* dumps;
2790 } seqStore_t;
2791
2792
2793 /* *************************************
2794 * Error Management
2795 ***************************************/
2796 /*! ZSTD_isError
2797 * tells if a return value is an error code */
ZSTD_isError(size_t code)2798 static unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
2799
2800
2801
2802 /* *************************************************************
2803 * Decompression section
2804 ***************************************************************/
2805 struct ZSTD_DCtx_s
2806 {
2807 U32 LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)];
2808 U32 OffTable[FSE_DTABLE_SIZE_U32(OffFSELog)];
2809 U32 MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)];
2810 void* previousDstEnd;
2811 void* base;
2812 size_t expected;
2813 blockType_t bType;
2814 U32 phase;
2815 const BYTE* litPtr;
2816 size_t litSize;
2817 BYTE litBuffer[BLOCKSIZE + 8 /* margin for wildcopy */];
2818 }; /* typedef'd to ZSTD_Dctx within "zstd_static.h" */
2819
2820
ZSTD_getcBlockSize(const void * src,size_t srcSize,blockProperties_t * bpPtr)2821 static size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
2822 {
2823 const BYTE* const in = (const BYTE* const)src;
2824 BYTE headerFlags;
2825 U32 cSize;
2826
2827 if (srcSize < 3) return ERROR(srcSize_wrong);
2828
2829 headerFlags = *in;
2830 cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16);
2831
2832 bpPtr->blockType = (blockType_t)(headerFlags >> 6);
2833 bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0;
2834
2835 if (bpPtr->blockType == bt_end) return 0;
2836 if (bpPtr->blockType == bt_rle) return 1;
2837 return cSize;
2838 }
2839
ZSTD_copyUncompressedBlock(void * dst,size_t maxDstSize,const void * src,size_t srcSize)2840 static size_t ZSTD_copyUncompressedBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
2841 {
2842 if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall);
2843 if (srcSize > 0) {
2844 memcpy(dst, src, srcSize);
2845 }
2846 return srcSize;
2847 }
2848
2849
2850 /** ZSTD_decompressLiterals
2851 @return : nb of bytes read from src, or an error code*/
ZSTD_decompressLiterals(void * dst,size_t * maxDstSizePtr,const void * src,size_t srcSize)2852 static size_t ZSTD_decompressLiterals(void* dst, size_t* maxDstSizePtr,
2853 const void* src, size_t srcSize)
2854 {
2855 const BYTE* ip = (const BYTE*)src;
2856
2857 const size_t litSize = (MEM_readLE32(src) & 0x1FFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
2858 const size_t litCSize = (MEM_readLE32(ip+2) & 0xFFFFFF) >> 5; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
2859
2860 if (litSize > *maxDstSizePtr) return ERROR(corruption_detected);
2861 if (litCSize + 5 > srcSize) return ERROR(corruption_detected);
2862
2863 if (HUF_isError(HUF_decompress(dst, litSize, ip+5, litCSize))) return ERROR(corruption_detected);
2864
2865 *maxDstSizePtr = litSize;
2866 return litCSize + 5;
2867 }
2868
2869
2870 /** ZSTD_decodeLiteralsBlock
2871 @return : nb of bytes read from src (< srcSize )*/
ZSTD_decodeLiteralsBlock(void * ctx,const void * src,size_t srcSize)2872 static size_t ZSTD_decodeLiteralsBlock(void* ctx,
2873 const void* src, size_t srcSize)
2874 {
2875 ZSTD_DCtx* dctx = (ZSTD_DCtx*)ctx;
2876 const BYTE* const istart = (const BYTE* const)src;
2877
2878 /* any compressed block with literals segment must be at least this size */
2879 if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
2880
2881 switch(*istart & 3)
2882 {
2883 default:
2884 case 0:
2885 {
2886 size_t litSize = BLOCKSIZE;
2887 const size_t readSize = ZSTD_decompressLiterals(dctx->litBuffer, &litSize, src, srcSize);
2888 dctx->litPtr = dctx->litBuffer;
2889 dctx->litSize = litSize;
2890 memset(dctx->litBuffer + dctx->litSize, 0, 8);
2891 return readSize; /* works if it's an error too */
2892 }
2893 case IS_RAW:
2894 {
2895 const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
2896 if (litSize > srcSize-11) /* risk of reading too far with wildcopy */
2897 {
2898 if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
2899 if (litSize > srcSize-3) return ERROR(corruption_detected);
2900 memcpy(dctx->litBuffer, istart, litSize);
2901 dctx->litPtr = dctx->litBuffer;
2902 dctx->litSize = litSize;
2903 memset(dctx->litBuffer + dctx->litSize, 0, 8);
2904 return litSize+3;
2905 }
2906 /* direct reference into compressed stream */
2907 dctx->litPtr = istart+3;
2908 dctx->litSize = litSize;
2909 return litSize+3;
2910 }
2911 case IS_RLE:
2912 {
2913 const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2; /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
2914 if (litSize > BLOCKSIZE) return ERROR(corruption_detected);
2915 memset(dctx->litBuffer, istart[3], litSize + 8);
2916 dctx->litPtr = dctx->litBuffer;
2917 dctx->litSize = litSize;
2918 return 4;
2919 }
2920 }
2921 }
2922
2923
ZSTD_decodeSeqHeaders(int * nbSeq,const BYTE ** dumpsPtr,size_t * dumpsLengthPtr,FSE_DTable * DTableLL,FSE_DTable * DTableML,FSE_DTable * DTableOffb,const void * src,size_t srcSize)2924 static size_t ZSTD_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr,
2925 FSE_DTable* DTableLL, FSE_DTable* DTableML, FSE_DTable* DTableOffb,
2926 const void* src, size_t srcSize)
2927 {
2928 const BYTE* const istart = (const BYTE* const)src;
2929 const BYTE* ip = istart;
2930 const BYTE* const iend = istart + srcSize;
2931 U32 LLtype, Offtype, MLtype;
2932 U32 LLlog, Offlog, MLlog;
2933 size_t dumpsLength;
2934
2935 /* check */
2936 if (srcSize < 5) return ERROR(srcSize_wrong);
2937
2938 /* SeqHead */
2939 *nbSeq = MEM_readLE16(ip); ip+=2;
2940 LLtype = *ip >> 6;
2941 Offtype = (*ip >> 4) & 3;
2942 MLtype = (*ip >> 2) & 3;
2943 if (*ip & 2)
2944 {
2945 dumpsLength = ip[2];
2946 dumpsLength += ip[1] << 8;
2947 ip += 3;
2948 }
2949 else
2950 {
2951 dumpsLength = ip[1];
2952 dumpsLength += (ip[0] & 1) << 8;
2953 ip += 2;
2954 }
2955 *dumpsPtr = ip;
2956 ip += dumpsLength;
2957 *dumpsLengthPtr = dumpsLength;
2958
2959 /* check */
2960 if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */
2961
2962 /* sequences */
2963 {
2964 S16 norm[MaxML+1]; /* assumption : MaxML >= MaxLL and MaxOff */
2965 size_t headerSize;
2966
2967 /* Build DTables */
2968 switch(LLtype)
2969 {
2970 case bt_rle :
2971 LLlog = 0;
2972 FSE_buildDTable_rle(DTableLL, *ip++); break;
2973 case bt_raw :
2974 LLlog = LLbits;
2975 FSE_buildDTable_raw(DTableLL, LLbits); break;
2976 default :
2977 { U32 max = MaxLL;
2978 headerSize = FSE_readNCount(norm, &max, &LLlog, ip, iend-ip);
2979 if (FSE_isError(headerSize)) return ERROR(GENERIC);
2980 if (LLlog > LLFSELog) return ERROR(corruption_detected);
2981 ip += headerSize;
2982 FSE_buildDTable(DTableLL, norm, max, LLlog);
2983 } }
2984
2985 switch(Offtype)
2986 {
2987 case bt_rle :
2988 Offlog = 0;
2989 if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
2990 FSE_buildDTable_rle(DTableOffb, *ip++ & MaxOff); /* if *ip > MaxOff, data is corrupted */
2991 break;
2992 case bt_raw :
2993 Offlog = Offbits;
2994 FSE_buildDTable_raw(DTableOffb, Offbits); break;
2995 default :
2996 { U32 max = MaxOff;
2997 headerSize = FSE_readNCount(norm, &max, &Offlog, ip, iend-ip);
2998 if (FSE_isError(headerSize)) return ERROR(GENERIC);
2999 if (Offlog > OffFSELog) return ERROR(corruption_detected);
3000 ip += headerSize;
3001 FSE_buildDTable(DTableOffb, norm, max, Offlog);
3002 } }
3003
3004 switch(MLtype)
3005 {
3006 case bt_rle :
3007 MLlog = 0;
3008 if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */
3009 FSE_buildDTable_rle(DTableML, *ip++); break;
3010 case bt_raw :
3011 MLlog = MLbits;
3012 FSE_buildDTable_raw(DTableML, MLbits); break;
3013 default :
3014 { U32 max = MaxML;
3015 headerSize = FSE_readNCount(norm, &max, &MLlog, ip, iend-ip);
3016 if (FSE_isError(headerSize)) return ERROR(GENERIC);
3017 if (MLlog > MLFSELog) return ERROR(corruption_detected);
3018 ip += headerSize;
3019 FSE_buildDTable(DTableML, norm, max, MLlog);
3020 } } }
3021
3022 return ip-istart;
3023 }
3024
3025
3026 typedef struct {
3027 size_t litLength;
3028 size_t offset;
3029 size_t matchLength;
3030 } seq_t;
3031
3032 typedef struct {
3033 BIT_DStream_t DStream;
3034 FSE_DState_t stateLL;
3035 FSE_DState_t stateOffb;
3036 FSE_DState_t stateML;
3037 size_t prevOffset;
3038 const BYTE* dumps;
3039 const BYTE* dumpsEnd;
3040 } seqState_t;
3041
3042
ZSTD_decodeSequence(seq_t * seq,seqState_t * seqState)3043 static void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState)
3044 {
3045 size_t litLength;
3046 size_t prevOffset;
3047 size_t offset;
3048 size_t matchLength;
3049 const BYTE* dumps = seqState->dumps;
3050 const BYTE* const de = seqState->dumpsEnd;
3051
3052 /* Literal length */
3053 litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream));
3054 prevOffset = litLength ? seq->offset : seqState->prevOffset;
3055 seqState->prevOffset = seq->offset;
3056 if (litLength == MaxLL)
3057 {
3058 const U32 add = dumps<de ? *dumps++ : 0;
3059 if (add < 255) litLength += add;
3060 else if (dumps + 3 <= de)
3061 {
3062 litLength = MEM_readLE24(dumps);
3063 dumps += 3;
3064 }
3065 if (dumps >= de) dumps = de-1; /* late correction, to avoid read overflow (data is now corrupted anyway) */
3066 }
3067
3068 /* Offset */
3069 {
3070 static const size_t offsetPrefix[MaxOff+1] = { /* note : size_t faster than U32 */
3071 1 /*fake*/, 1, 2, 4, 8, 16, 32, 64, 128, 256,
3072 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144,
3073 524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, /*fake*/ 1, 1, 1, 1, 1 };
3074 U32 offsetCode, nbBits;
3075 offsetCode = FSE_decodeSymbol(&(seqState->stateOffb), &(seqState->DStream)); /* <= maxOff, by table construction */
3076 if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));
3077 nbBits = offsetCode - 1;
3078 if (offsetCode==0) nbBits = 0; /* cmove */
3079 offset = offsetPrefix[offsetCode] + BIT_readBits(&(seqState->DStream), nbBits);
3080 if (MEM_32bits()) BIT_reloadDStream(&(seqState->DStream));
3081 if (offsetCode==0) offset = prevOffset; /* cmove */
3082 }
3083
3084 /* MatchLength */
3085 matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream));
3086 if (matchLength == MaxML)
3087 {
3088 const U32 add = dumps<de ? *dumps++ : 0;
3089 if (add < 255) matchLength += add;
3090 else if (dumps + 3 <= de)
3091 {
3092 matchLength = MEM_readLE24(dumps);
3093 dumps += 3;
3094 }
3095 if (dumps >= de) dumps = de-1; /* late correction, to avoid read overflow (data is now corrupted anyway) */
3096 }
3097 matchLength += MINMATCH;
3098
3099 /* save result */
3100 seq->litLength = litLength;
3101 seq->offset = offset;
3102 seq->matchLength = matchLength;
3103 seqState->dumps = dumps;
3104 }
3105
3106
ZSTD_execSequence(BYTE * op,seq_t sequence,const BYTE ** litPtr,const BYTE * const litLimit,BYTE * const base,BYTE * const oend)3107 static size_t ZSTD_execSequence(BYTE* op,
3108 seq_t sequence,
3109 const BYTE** litPtr, const BYTE* const litLimit,
3110 BYTE* const base, BYTE* const oend)
3111 {
3112 static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; /* added */
3113 static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11}; /* subtracted */
3114 const BYTE* const ostart = op;
3115 BYTE* const oLitEnd = op + sequence.litLength;
3116 BYTE* const oMatchEnd = op + sequence.litLength + sequence.matchLength; /* risk : address space overflow (32-bits) */
3117 BYTE* const oend_8 = oend-8;
3118 const BYTE* const litEnd = *litPtr + sequence.litLength;
3119
3120 /* checks */
3121 if (oLitEnd > oend_8) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of 8 from oend */
3122 if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */
3123 if (litEnd > litLimit) return ERROR(corruption_detected); /* overRead beyond lit buffer */
3124
3125 /* copy Literals */
3126 ZSTD_wildcopy(op, *litPtr, sequence.litLength); /* note : oLitEnd <= oend-8 : no risk of overwrite beyond oend */
3127 op = oLitEnd;
3128 *litPtr = litEnd; /* update for next sequence */
3129
3130 /* copy Match */
3131 {
3132 const BYTE* match = op - sequence.offset;
3133
3134 /* check */
3135 if (sequence.offset > (size_t)op) return ERROR(corruption_detected); /* address space overflow test (this test seems kept by clang optimizer) */
3136 //if (match > op) return ERROR(corruption_detected); /* address space overflow test (is clang optimizer removing this test ?) */
3137 if (match < base) return ERROR(corruption_detected);
3138
3139 /* close range match, overlap */
3140 if (sequence.offset < 8)
3141 {
3142 const int dec64 = dec64table[sequence.offset];
3143 op[0] = match[0];
3144 op[1] = match[1];
3145 op[2] = match[2];
3146 op[3] = match[3];
3147 match += dec32table[sequence.offset];
3148 ZSTD_copy4(op+4, match);
3149 match -= dec64;
3150 }
3151 else
3152 {
3153 ZSTD_copy8(op, match);
3154 }
3155 op += 8; match += 8;
3156
3157 if (oMatchEnd > oend-(16-MINMATCH))
3158 {
3159 if (op < oend_8)
3160 {
3161 ZSTD_wildcopy(op, match, oend_8 - op);
3162 match += oend_8 - op;
3163 op = oend_8;
3164 }
3165 while (op < oMatchEnd) *op++ = *match++;
3166 }
3167 else
3168 {
3169 ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */
3170 }
3171 }
3172
3173 return oMatchEnd - ostart;
3174 }
3175
ZSTD_decompressSequences(void * ctx,void * dst,size_t maxDstSize,const void * seqStart,size_t seqSize)3176 static size_t ZSTD_decompressSequences(
3177 void* ctx,
3178 void* dst, size_t maxDstSize,
3179 const void* seqStart, size_t seqSize)
3180 {
3181 ZSTD_DCtx* dctx = (ZSTD_DCtx*)ctx;
3182 const BYTE* ip = (const BYTE*)seqStart;
3183 const BYTE* const iend = ip + seqSize;
3184 BYTE* const ostart = (BYTE* const)dst;
3185 BYTE* op = ostart;
3186 BYTE* const oend = ostart + maxDstSize;
3187 size_t errorCode, dumpsLength;
3188 const BYTE* litPtr = dctx->litPtr;
3189 const BYTE* const litEnd = litPtr + dctx->litSize;
3190 int nbSeq;
3191 const BYTE* dumps;
3192 U32* DTableLL = dctx->LLTable;
3193 U32* DTableML = dctx->MLTable;
3194 U32* DTableOffb = dctx->OffTable;
3195 BYTE* const base = (BYTE*) (dctx->base);
3196
3197 /* Build Decoding Tables */
3198 errorCode = ZSTD_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength,
3199 DTableLL, DTableML, DTableOffb,
3200 ip, iend-ip);
3201 if (ZSTD_isError(errorCode)) return errorCode;
3202 ip += errorCode;
3203
3204 /* Regen sequences */
3205 {
3206 seq_t sequence;
3207 seqState_t seqState;
3208
3209 memset(&sequence, 0, sizeof(sequence));
3210 seqState.dumps = dumps;
3211 seqState.dumpsEnd = dumps + dumpsLength;
3212 seqState.prevOffset = 1;
3213 errorCode = BIT_initDStream(&(seqState.DStream), ip, iend-ip);
3214 if (ERR_isError(errorCode)) return ERROR(corruption_detected);
3215 FSE_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL);
3216 FSE_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb);
3217 FSE_initDState(&(seqState.stateML), &(seqState.DStream), DTableML);
3218
3219 for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (nbSeq>0) ; )
3220 {
3221 size_t oneSeqSize;
3222 nbSeq--;
3223 ZSTD_decodeSequence(&sequence, &seqState);
3224 oneSeqSize = ZSTD_execSequence(op, sequence, &litPtr, litEnd, base, oend);
3225 if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
3226 op += oneSeqSize;
3227 }
3228
3229 /* check if reached exact end */
3230 if ( !BIT_endOfDStream(&(seqState.DStream)) ) return ERROR(corruption_detected); /* requested too much : data is corrupted */
3231 if (nbSeq<0) return ERROR(corruption_detected); /* requested too many sequences : data is corrupted */
3232
3233 /* last literal segment */
3234 {
3235 size_t lastLLSize = litEnd - litPtr;
3236 if (litPtr > litEnd) return ERROR(corruption_detected);
3237 if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall);
3238 if (lastLLSize > 0) {
3239 if (op != litPtr) memmove(op, litPtr, lastLLSize);
3240 op += lastLLSize;
3241 }
3242 }
3243 }
3244
3245 return op-ostart;
3246 }
3247
3248
ZSTD_decompressBlock(void * ctx,void * dst,size_t maxDstSize,const void * src,size_t srcSize)3249 static size_t ZSTD_decompressBlock(
3250 void* ctx,
3251 void* dst, size_t maxDstSize,
3252 const void* src, size_t srcSize)
3253 {
3254 /* blockType == blockCompressed */
3255 const BYTE* ip = (const BYTE*)src;
3256
3257 /* Decode literals sub-block */
3258 size_t litCSize = ZSTD_decodeLiteralsBlock(ctx, src, srcSize);
3259 if (ZSTD_isError(litCSize)) return litCSize;
3260 ip += litCSize;
3261 srcSize -= litCSize;
3262
3263 return ZSTD_decompressSequences(ctx, dst, maxDstSize, ip, srcSize);
3264 }
3265
3266
ZSTD_decompressDCtx(void * ctx,void * dst,size_t maxDstSize,const void * src,size_t srcSize)3267 static size_t ZSTD_decompressDCtx(void* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
3268 {
3269 const BYTE* ip = (const BYTE*)src;
3270 const BYTE* iend = ip + srcSize;
3271 BYTE* const ostart = (BYTE* const)dst;
3272 BYTE* op = ostart;
3273 BYTE* const oend = ostart + maxDstSize;
3274 size_t remainingSize = srcSize;
3275 U32 magicNumber;
3276 blockProperties_t blockProperties;
3277
3278 /* Frame Header */
3279 if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
3280 magicNumber = MEM_readLE32(src);
3281 if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
3282 ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
3283
3284 /* Loop on each block */
3285 while (1)
3286 {
3287 size_t decodedSize=0;
3288 size_t cBlockSize = ZSTD_getcBlockSize(ip, iend-ip, &blockProperties);
3289 if (ZSTD_isError(cBlockSize)) return cBlockSize;
3290
3291 ip += ZSTD_blockHeaderSize;
3292 remainingSize -= ZSTD_blockHeaderSize;
3293 if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
3294
3295 switch(blockProperties.blockType)
3296 {
3297 case bt_compressed:
3298 decodedSize = ZSTD_decompressBlock(ctx, op, oend-op, ip, cBlockSize);
3299 break;
3300 case bt_raw :
3301 decodedSize = ZSTD_copyUncompressedBlock(op, oend-op, ip, cBlockSize);
3302 break;
3303 case bt_rle :
3304 return ERROR(GENERIC); /* not yet supported */
3305 break;
3306 case bt_end :
3307 /* end of frame */
3308 if (remainingSize) return ERROR(srcSize_wrong);
3309 break;
3310 default:
3311 return ERROR(GENERIC); /* impossible */
3312 }
3313 if (cBlockSize == 0) break; /* bt_end */
3314
3315 if (ZSTD_isError(decodedSize)) return decodedSize;
3316 op += decodedSize;
3317 ip += cBlockSize;
3318 remainingSize -= cBlockSize;
3319 }
3320
3321 return op-ostart;
3322 }
3323
ZSTD_decompress(void * dst,size_t maxDstSize,const void * src,size_t srcSize)3324 static size_t ZSTD_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize)
3325 {
3326 ZSTD_DCtx ctx;
3327 ctx.base = dst;
3328 return ZSTD_decompressDCtx(&ctx, dst, maxDstSize, src, srcSize);
3329 }
3330
3331 /* ZSTD_errorFrameSizeInfoLegacy() :
3332 assumes `cSize` and `dBound` are _not_ NULL */
ZSTD_errorFrameSizeInfoLegacy(size_t * cSize,unsigned long long * dBound,size_t ret)3333 static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret)
3334 {
3335 *cSize = ret;
3336 *dBound = ZSTD_CONTENTSIZE_ERROR;
3337 }
3338
ZSTDv02_findFrameSizeInfoLegacy(const void * src,size_t srcSize,size_t * cSize,unsigned long long * dBound)3339 void ZSTDv02_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound)
3340 {
3341 const BYTE* ip = (const BYTE*)src;
3342 size_t remainingSize = srcSize;
3343 size_t nbBlocks = 0;
3344 U32 magicNumber;
3345 blockProperties_t blockProperties;
3346
3347 /* Frame Header */
3348 if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) {
3349 ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
3350 return;
3351 }
3352 magicNumber = MEM_readLE32(src);
3353 if (magicNumber != ZSTD_magicNumber) {
3354 ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown));
3355 return;
3356 }
3357 ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize;
3358
3359 /* Loop on each block */
3360 while (1)
3361 {
3362 size_t cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
3363 if (ZSTD_isError(cBlockSize)) {
3364 ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, cBlockSize);
3365 return;
3366 }
3367
3368 ip += ZSTD_blockHeaderSize;
3369 remainingSize -= ZSTD_blockHeaderSize;
3370 if (cBlockSize > remainingSize) {
3371 ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong));
3372 return;
3373 }
3374
3375 if (cBlockSize == 0) break; /* bt_end */
3376
3377 ip += cBlockSize;
3378 remainingSize -= cBlockSize;
3379 nbBlocks++;
3380 }
3381
3382 *cSize = ip - (const BYTE*)src;
3383 *dBound = nbBlocks * BLOCKSIZE;
3384 }
3385
3386 /*******************************
3387 * Streaming Decompression API
3388 *******************************/
3389
ZSTD_resetDCtx(ZSTD_DCtx * dctx)3390 static size_t ZSTD_resetDCtx(ZSTD_DCtx* dctx)
3391 {
3392 dctx->expected = ZSTD_frameHeaderSize;
3393 dctx->phase = 0;
3394 dctx->previousDstEnd = NULL;
3395 dctx->base = NULL;
3396 return 0;
3397 }
3398
ZSTD_createDCtx(void)3399 static ZSTD_DCtx* ZSTD_createDCtx(void)
3400 {
3401 ZSTD_DCtx* dctx = (ZSTD_DCtx*)malloc(sizeof(ZSTD_DCtx));
3402 if (dctx==NULL) return NULL;
3403 ZSTD_resetDCtx(dctx);
3404 return dctx;
3405 }
3406
ZSTD_freeDCtx(ZSTD_DCtx * dctx)3407 static size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
3408 {
3409 free(dctx);
3410 return 0;
3411 }
3412
ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx * dctx)3413 static size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx)
3414 {
3415 return dctx->expected;
3416 }
3417
ZSTD_decompressContinue(ZSTD_DCtx * ctx,void * dst,size_t maxDstSize,const void * src,size_t srcSize)3418 static size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
3419 {
3420 /* Sanity check */
3421 if (srcSize != ctx->expected) return ERROR(srcSize_wrong);
3422 if (dst != ctx->previousDstEnd) /* not contiguous */
3423 ctx->base = dst;
3424
3425 /* Decompress : frame header */
3426 if (ctx->phase == 0)
3427 {
3428 /* Check frame magic header */
3429 U32 magicNumber = MEM_readLE32(src);
3430 if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown);
3431 ctx->phase = 1;
3432 ctx->expected = ZSTD_blockHeaderSize;
3433 return 0;
3434 }
3435
3436 /* Decompress : block header */
3437 if (ctx->phase == 1)
3438 {
3439 blockProperties_t bp;
3440 size_t blockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
3441 if (ZSTD_isError(blockSize)) return blockSize;
3442 if (bp.blockType == bt_end)
3443 {
3444 ctx->expected = 0;
3445 ctx->phase = 0;
3446 }
3447 else
3448 {
3449 ctx->expected = blockSize;
3450 ctx->bType = bp.blockType;
3451 ctx->phase = 2;
3452 }
3453
3454 return 0;
3455 }
3456
3457 /* Decompress : block content */
3458 {
3459 size_t rSize;
3460 switch(ctx->bType)
3461 {
3462 case bt_compressed:
3463 rSize = ZSTD_decompressBlock(ctx, dst, maxDstSize, src, srcSize);
3464 break;
3465 case bt_raw :
3466 rSize = ZSTD_copyUncompressedBlock(dst, maxDstSize, src, srcSize);
3467 break;
3468 case bt_rle :
3469 return ERROR(GENERIC); /* not yet handled */
3470 break;
3471 case bt_end : /* should never happen (filtered at phase 1) */
3472 rSize = 0;
3473 break;
3474 default:
3475 return ERROR(GENERIC);
3476 }
3477 ctx->phase = 1;
3478 ctx->expected = ZSTD_blockHeaderSize;
3479 ctx->previousDstEnd = (void*)( ((char*)dst) + rSize);
3480 return rSize;
3481 }
3482
3483 }
3484
3485
3486 /* wrapper layer */
3487
ZSTDv02_isError(size_t code)3488 unsigned ZSTDv02_isError(size_t code)
3489 {
3490 return ZSTD_isError(code);
3491 }
3492
ZSTDv02_decompress(void * dst,size_t maxOriginalSize,const void * src,size_t compressedSize)3493 size_t ZSTDv02_decompress( void* dst, size_t maxOriginalSize,
3494 const void* src, size_t compressedSize)
3495 {
3496 return ZSTD_decompress(dst, maxOriginalSize, src, compressedSize);
3497 }
3498
ZSTDv02_createDCtx(void)3499 ZSTDv02_Dctx* ZSTDv02_createDCtx(void)
3500 {
3501 return (ZSTDv02_Dctx*)ZSTD_createDCtx();
3502 }
3503
ZSTDv02_freeDCtx(ZSTDv02_Dctx * dctx)3504 size_t ZSTDv02_freeDCtx(ZSTDv02_Dctx* dctx)
3505 {
3506 return ZSTD_freeDCtx((ZSTD_DCtx*)dctx);
3507 }
3508
ZSTDv02_resetDCtx(ZSTDv02_Dctx * dctx)3509 size_t ZSTDv02_resetDCtx(ZSTDv02_Dctx* dctx)
3510 {
3511 return ZSTD_resetDCtx((ZSTD_DCtx*)dctx);
3512 }
3513
ZSTDv02_nextSrcSizeToDecompress(ZSTDv02_Dctx * dctx)3514 size_t ZSTDv02_nextSrcSizeToDecompress(ZSTDv02_Dctx* dctx)
3515 {
3516 return ZSTD_nextSrcSizeToDecompress((ZSTD_DCtx*)dctx);
3517 }
3518
ZSTDv02_decompressContinue(ZSTDv02_Dctx * dctx,void * dst,size_t maxDstSize,const void * src,size_t srcSize)3519 size_t ZSTDv02_decompressContinue(ZSTDv02_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
3520 {
3521 return ZSTD_decompressContinue((ZSTD_DCtx*)dctx, dst, maxDstSize, src, srcSize);
3522 }
3523