It's a long while since the bundled lz4 library has received an update.
It pulls in a lot of various fixes and enhancements, some of the changes
fixes compiler warnings and hardens the code a bit too.
Signed-off-by: David Sommerseth <davids@openvpn.net>
Acked-by: Arne Schwabe <arne@rfc2549.org>
Message-Id: <20201001154658.9798-1-davids@openvpn.net>
URL: https://www.mail-archive.com/search?l=mid&q=20201001154658.9798-1-davids@openvpn.net
Signed-off-by: Gert Doering <gert@greenie.muc.de>
(cherry picked from commit 0f44a9080530df70410106c244e9efc7f2d8a802)
... | ... |
@@ -1,5 +1,5 @@ |
1 | 1 |
/* This file has been backported by dev-tools/lz4-rebaser.sh |
2 |
- * from upstream lz4 commit 7bb64ff2b69a9f8367de (v1.7.5) |
|
2 |
+ * from upstream lz4 commit fdf2ef5809ca875c4545 (v1.9.2) |
|
3 | 3 |
*/ |
4 | 4 |
#ifdef HAVE_CONFIG_H |
5 | 5 |
#include "config.h" |
... | ... |
@@ -10,7 +10,7 @@ |
10 | 10 |
#ifdef NEED_COMPAT_LZ4 |
11 | 11 |
/* |
12 | 12 |
LZ4 - Fast LZ compression algorithm |
13 |
- Copyright (C) 2011-2016, Yann Collet. |
|
13 |
+ Copyright (C) 2011-present, Yann Collet. |
|
14 | 14 |
|
15 | 15 |
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) |
16 | 16 |
|
... | ... |
@@ -42,17 +42,16 @@ |
42 | 42 |
- LZ4 source repository : https://github.com/lz4/lz4 |
43 | 43 |
*/ |
44 | 44 |
|
45 |
- |
|
46 | 45 |
/*-************************************ |
47 | 46 |
* Tuning parameters |
48 | 47 |
**************************************/ |
49 | 48 |
/* |
50 |
- * HEAPMODE : |
|
49 |
+ * LZ4_HEAPMODE : |
|
51 | 50 |
* Select how default compression functions will allocate memory for their hash table, |
52 | 51 |
* in memory stack (0:default, fastest), or in memory heap (1:requires malloc()). |
53 | 52 |
*/ |
54 |
-#ifndef HEAPMODE |
|
55 |
-# define HEAPMODE 0 |
|
53 |
+#ifndef LZ4_HEAPMODE |
|
54 |
+# define LZ4_HEAPMODE 0 |
|
56 | 55 |
#endif |
57 | 56 |
|
58 | 57 |
/* |
... | ... |
@@ -73,16 +72,17 @@ |
73 | 73 |
* Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). |
74 | 74 |
* This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. |
75 | 75 |
* Method 2 : direct access. This method is portable but violate C standard. |
76 |
- * It can generate buggy code on targets which generate assembly depending on alignment. |
|
76 |
+ * It can generate buggy code on targets which assembly generation depends on alignment. |
|
77 | 77 |
* But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) |
78 | 78 |
* See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. |
79 | 79 |
* Prefer these methods in priority order (0 > 1 > 2) |
80 | 80 |
*/ |
81 |
-#ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ |
|
82 |
-# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) |
|
81 |
+#ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */ |
|
82 |
+# if defined(__GNUC__) && \ |
|
83 |
+ ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) \ |
|
84 |
+ || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) |
|
83 | 85 |
# define LZ4_FORCE_MEMORY_ACCESS 2 |
84 |
-# elif defined(__INTEL_COMPILER) || \ |
|
85 |
- (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) |
|
86 |
+# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__) |
|
86 | 87 |
# define LZ4_FORCE_MEMORY_ACCESS 1 |
87 | 88 |
# endif |
88 | 89 |
#endif |
... | ... |
@@ -91,14 +91,32 @@ |
91 | 91 |
* LZ4_FORCE_SW_BITCOUNT |
92 | 92 |
* Define this parameter if your target system or compiler does not support hardware bit count |
93 | 93 |
*/ |
94 |
-#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for Windows CE does not support Hardware bit count */ |
|
94 |
+#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for WinCE doesn't support Hardware bit count */ |
|
95 | 95 |
# define LZ4_FORCE_SW_BITCOUNT |
96 | 96 |
#endif |
97 | 97 |
|
98 | 98 |
|
99 |
+ |
|
99 | 100 |
/*-************************************ |
100 | 101 |
* Dependency |
101 | 102 |
**************************************/ |
103 |
+/* |
|
104 |
+ * LZ4_SRC_INCLUDED: |
|
105 |
+ * Amalgamation flag, whether lz4.c is included |
|
106 |
+ */ |
|
107 |
+#ifndef LZ4_SRC_INCLUDED |
|
108 |
+# define LZ4_SRC_INCLUDED 1 |
|
109 |
+#endif |
|
110 |
+ |
|
111 |
+#ifndef LZ4_STATIC_LINKING_ONLY |
|
112 |
+#define LZ4_STATIC_LINKING_ONLY |
|
113 |
+#endif |
|
114 |
+ |
|
115 |
+#ifndef LZ4_DISABLE_DEPRECATE_WARNINGS |
|
116 |
+#define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */ |
|
117 |
+#endif |
|
118 |
+ |
|
119 |
+#define LZ4_STATIC_LINKING_ONLY /* LZ4_DISTANCE_MAX */ |
|
102 | 120 |
#include "compat-lz4.h" |
103 | 121 |
/* see also "memory routines" below */ |
104 | 122 |
|
... | ... |
@@ -107,42 +125,130 @@ |
107 | 107 |
* Compiler Options |
108 | 108 |
**************************************/ |
109 | 109 |
#ifdef _MSC_VER /* Visual Studio */ |
110 |
-# define FORCE_INLINE static __forceinline |
|
111 | 110 |
# include <intrin.h> |
112 | 111 |
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ |
113 | 112 |
# pragma warning(disable : 4293) /* disable: C4293: too large shift (32-bits) */ |
114 |
-#else |
|
115 |
-# if defined(__GNUC__) || defined(__clang__) |
|
116 |
-# define FORCE_INLINE static inline __attribute__((always_inline)) |
|
117 |
-# elif defined(__cplusplus) || (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) |
|
118 |
-# define FORCE_INLINE static inline |
|
119 |
-# else |
|
120 |
-# define FORCE_INLINE static |
|
121 |
-# endif |
|
122 | 113 |
#endif /* _MSC_VER */ |
123 | 114 |
|
115 |
+#ifndef LZ4_FORCE_INLINE |
|
116 |
+# ifdef _MSC_VER /* Visual Studio */ |
|
117 |
+# define LZ4_FORCE_INLINE static __forceinline |
|
118 |
+# else |
|
119 |
+# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ |
|
120 |
+# ifdef __GNUC__ |
|
121 |
+# define LZ4_FORCE_INLINE static inline __attribute__((always_inline)) |
|
122 |
+# else |
|
123 |
+# define LZ4_FORCE_INLINE static inline |
|
124 |
+# endif |
|
125 |
+# else |
|
126 |
+# define LZ4_FORCE_INLINE static |
|
127 |
+# endif /* __STDC_VERSION__ */ |
|
128 |
+# endif /* _MSC_VER */ |
|
129 |
+#endif /* LZ4_FORCE_INLINE */ |
|
130 |
+ |
|
131 |
+/* LZ4_FORCE_O2_GCC_PPC64LE and LZ4_FORCE_O2_INLINE_GCC_PPC64LE |
|
132 |
+ * gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy8, |
|
133 |
+ * together with a simple 8-byte copy loop as a fall-back path. |
|
134 |
+ * However, this optimization hurts the decompression speed by >30%, |
|
135 |
+ * because the execution does not go to the optimized loop |
|
136 |
+ * for typical compressible data, and all of the preamble checks |
|
137 |
+ * before going to the fall-back path become useless overhead. |
|
138 |
+ * This optimization happens only with the -O3 flag, and -O2 generates |
|
139 |
+ * a simple 8-byte copy loop. |
|
140 |
+ * With gcc on ppc64le, all of the LZ4_decompress_* and LZ4_wildCopy8 |
|
141 |
+ * functions are annotated with __attribute__((optimize("O2"))), |
|
142 |
+ * and also LZ4_wildCopy8 is forcibly inlined, so that the O2 attribute |
|
143 |
+ * of LZ4_wildCopy8 does not affect the compression speed. |
|
144 |
+ */ |
|
145 |
+#if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__) && !defined(__clang__) |
|
146 |
+# define LZ4_FORCE_O2_GCC_PPC64LE __attribute__((optimize("O2"))) |
|
147 |
+# define LZ4_FORCE_O2_INLINE_GCC_PPC64LE __attribute__((optimize("O2"))) LZ4_FORCE_INLINE |
|
148 |
+#else |
|
149 |
+# define LZ4_FORCE_O2_GCC_PPC64LE |
|
150 |
+# define LZ4_FORCE_O2_INLINE_GCC_PPC64LE static |
|
151 |
+#endif |
|
152 |
+ |
|
124 | 153 |
#if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__) |
125 | 154 |
# define expect(expr,value) (__builtin_expect ((expr),(value)) ) |
126 | 155 |
#else |
127 | 156 |
# define expect(expr,value) (expr) |
128 | 157 |
#endif |
129 | 158 |
|
159 |
+#ifndef likely |
|
130 | 160 |
#define likely(expr) expect((expr) != 0, 1) |
161 |
+#endif |
|
162 |
+#ifndef unlikely |
|
131 | 163 |
#define unlikely(expr) expect((expr) != 0, 0) |
164 |
+#endif |
|
132 | 165 |
|
133 | 166 |
|
134 | 167 |
/*-************************************ |
135 | 168 |
* Memory routines |
136 | 169 |
**************************************/ |
137 | 170 |
#include <stdlib.h> /* malloc, calloc, free */ |
138 |
-#define ALLOCATOR(n,s) calloc(n,s) |
|
139 |
-#define FREEMEM free |
|
171 |
+#define ALLOC(s) malloc(s) |
|
172 |
+#define ALLOC_AND_ZERO(s) calloc(1,s) |
|
173 |
+#define FREEMEM(p) free(p) |
|
140 | 174 |
#include <string.h> /* memset, memcpy */ |
141 |
-#define MEM_INIT memset |
|
175 |
+#define MEM_INIT(p,v,s) memset((p),(v),(s)) |
|
142 | 176 |
|
143 | 177 |
|
144 | 178 |
/*-************************************ |
145 |
-* Basic Types |
|
179 |
+* Common Constants |
|
180 |
+**************************************/ |
|
181 |
+#define MINMATCH 4 |
|
182 |
+ |
|
183 |
+#define WILDCOPYLENGTH 8 |
|
184 |
+#define LASTLITERALS 5 /* see ../doc/lz4_Block_format.md#parsing-restrictions */ |
|
185 |
+#define MFLIMIT 12 /* see ../doc/lz4_Block_format.md#parsing-restrictions */ |
|
186 |
+#define MATCH_SAFEGUARD_DISTANCE ((2*WILDCOPYLENGTH) - MINMATCH) /* ensure it's possible to write 2 x wildcopyLength without overflowing output buffer */ |
|
187 |
+#define FASTLOOP_SAFE_DISTANCE 64 |
|
188 |
+static const int LZ4_minLength = (MFLIMIT+1); |
|
189 |
+ |
|
190 |
+#define KB *(1 <<10) |
|
191 |
+#define MB *(1 <<20) |
|
192 |
+#define GB *(1U<<30) |
|
193 |
+ |
|
194 |
+#define LZ4_DISTANCE_ABSOLUTE_MAX 65535 |
|
195 |
+#if (LZ4_DISTANCE_MAX > LZ4_DISTANCE_ABSOLUTE_MAX) /* max supported by LZ4 format */ |
|
196 |
+# error "LZ4_DISTANCE_MAX is too big : must be <= 65535" |
|
197 |
+#endif |
|
198 |
+ |
|
199 |
+#define ML_BITS 4 |
|
200 |
+#define ML_MASK ((1U<<ML_BITS)-1) |
|
201 |
+#define RUN_BITS (8-ML_BITS) |
|
202 |
+#define RUN_MASK ((1U<<RUN_BITS)-1) |
|
203 |
+ |
|
204 |
+ |
|
205 |
+/*-************************************ |
|
206 |
+* Error detection |
|
207 |
+**************************************/ |
|
208 |
+#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1) |
|
209 |
+# include <assert.h> |
|
210 |
+#else |
|
211 |
+# ifndef assert |
|
212 |
+# define assert(condition) ((void)0) |
|
213 |
+# endif |
|
214 |
+#endif |
|
215 |
+ |
|
216 |
+#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use after variable declarations */ |
|
217 |
+ |
|
218 |
+#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2) |
|
219 |
+# include <stdio.h> |
|
220 |
+static int g_debuglog_enable = 1; |
|
221 |
+# define DEBUGLOG(l, ...) { \ |
|
222 |
+ if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \ |
|
223 |
+ fprintf(stderr, __FILE__ ": "); \ |
|
224 |
+ fprintf(stderr, __VA_ARGS__); \ |
|
225 |
+ fprintf(stderr, " \n"); \ |
|
226 |
+ } } |
|
227 |
+#else |
|
228 |
+# define DEBUGLOG(l, ...) {} /* disabled */ |
|
229 |
+#endif |
|
230 |
+ |
|
231 |
+ |
|
232 |
+/*-************************************ |
|
233 |
+* Types |
|
146 | 234 |
**************************************/ |
147 | 235 |
#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) |
148 | 236 |
# include <stdint.h> |
... | ... |
@@ -167,6 +273,13 @@ |
167 | 167 |
typedef size_t reg_t; /* 32-bits in x32 mode */ |
168 | 168 |
#endif |
169 | 169 |
|
170 |
+typedef enum { |
|
171 |
+ notLimited = 0, |
|
172 |
+ limitedOutput = 1, |
|
173 |
+ fillOutput = 2 |
|
174 |
+} limitedOutput_directive; |
|
175 |
+ |
|
176 |
+ |
|
170 | 177 |
/*-************************************ |
171 | 178 |
* Reading and writing into memory |
172 | 179 |
**************************************/ |
... | ... |
@@ -200,7 +313,7 @@ static reg_t LZ4_read_ARCH(const void* ptr) { return ((const unalign*)ptr)->uArc |
200 | 200 |
static void LZ4_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } |
201 | 201 |
static void LZ4_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; } |
202 | 202 |
|
203 |
-#else /* safe and portable access through memcpy() */ |
|
203 |
+#else /* safe and portable access using memcpy() */ |
|
204 | 204 |
|
205 | 205 |
static U16 LZ4_read16(const void* memPtr) |
206 | 206 |
{ |
... | ... |
@@ -251,55 +364,113 @@ static void LZ4_writeLE16(void* memPtr, U16 value) |
251 | 251 |
} |
252 | 252 |
} |
253 | 253 |
|
254 |
-static void LZ4_copy8(void* dst, const void* src) |
|
255 |
-{ |
|
256 |
- memcpy(dst,src,8); |
|
257 |
-} |
|
258 |
- |
|
259 | 254 |
/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */ |
260 |
-static void LZ4_wildCopy(void* dstPtr, const void* srcPtr, void* dstEnd) |
|
255 |
+LZ4_FORCE_O2_INLINE_GCC_PPC64LE |
|
256 |
+void LZ4_wildCopy8(void* dstPtr, const void* srcPtr, void* dstEnd) |
|
261 | 257 |
{ |
262 | 258 |
BYTE* d = (BYTE*)dstPtr; |
263 | 259 |
const BYTE* s = (const BYTE*)srcPtr; |
264 | 260 |
BYTE* const e = (BYTE*)dstEnd; |
265 | 261 |
|
266 |
- do { LZ4_copy8(d,s); d+=8; s+=8; } while (d<e); |
|
262 |
+ do { memcpy(d,s,8); d+=8; s+=8; } while (d<e); |
|
267 | 263 |
} |
268 | 264 |
|
265 |
+static const unsigned inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4}; |
|
266 |
+static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3}; |
|
269 | 267 |
|
270 |
-/*-************************************ |
|
271 |
-* Common Constants |
|
272 |
-**************************************/ |
|
273 |
-#define MINMATCH 4 |
|
274 | 268 |
|
275 |
-#define WILDCOPYLENGTH 8 |
|
276 |
-#define LASTLITERALS 5 |
|
277 |
-#define MFLIMIT (WILDCOPYLENGTH+MINMATCH) |
|
278 |
-static const int LZ4_minLength = (MFLIMIT+1); |
|
269 |
+#ifndef LZ4_FAST_DEC_LOOP |
|
270 |
+# if defined(__i386__) || defined(__x86_64__) |
|
271 |
+# define LZ4_FAST_DEC_LOOP 1 |
|
272 |
+# elif defined(__aarch64__) && !defined(__clang__) |
|
273 |
+ /* On aarch64, we disable this optimization for clang because on certain |
|
274 |
+ * mobile chipsets and clang, it reduces performance. For more information |
|
275 |
+ * refer to https://github.com/lz4/lz4/pull/707. */ |
|
276 |
+# define LZ4_FAST_DEC_LOOP 1 |
|
277 |
+# else |
|
278 |
+# define LZ4_FAST_DEC_LOOP 0 |
|
279 |
+# endif |
|
280 |
+#endif |
|
279 | 281 |
|
280 |
-#define KB *(1 <<10) |
|
281 |
-#define MB *(1 <<20) |
|
282 |
-#define GB *(1U<<30) |
|
282 |
+#if LZ4_FAST_DEC_LOOP |
|
283 |
+ |
|
284 |
+LZ4_FORCE_O2_INLINE_GCC_PPC64LE void |
|
285 |
+LZ4_memcpy_using_offset_base(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset) |
|
286 |
+{ |
|
287 |
+ if (offset < 8) { |
|
288 |
+ dstPtr[0] = srcPtr[0]; |
|
289 |
+ dstPtr[1] = srcPtr[1]; |
|
290 |
+ dstPtr[2] = srcPtr[2]; |
|
291 |
+ dstPtr[3] = srcPtr[3]; |
|
292 |
+ srcPtr += inc32table[offset]; |
|
293 |
+ memcpy(dstPtr+4, srcPtr, 4); |
|
294 |
+ srcPtr -= dec64table[offset]; |
|
295 |
+ dstPtr += 8; |
|
296 |
+ } else { |
|
297 |
+ memcpy(dstPtr, srcPtr, 8); |
|
298 |
+ dstPtr += 8; |
|
299 |
+ srcPtr += 8; |
|
300 |
+ } |
|
283 | 301 |
|
284 |
-#define MAXD_LOG 16 |
|
285 |
-#define MAX_DISTANCE ((1 << MAXD_LOG) - 1) |
|
302 |
+ LZ4_wildCopy8(dstPtr, srcPtr, dstEnd); |
|
303 |
+} |
|
286 | 304 |
|
287 |
-#define ML_BITS 4 |
|
288 |
-#define ML_MASK ((1U<<ML_BITS)-1) |
|
289 |
-#define RUN_BITS (8-ML_BITS) |
|
290 |
-#define RUN_MASK ((1U<<RUN_BITS)-1) |
|
305 |
+/* customized variant of memcpy, which can overwrite up to 32 bytes beyond dstEnd |
|
306 |
+ * this version copies two times 16 bytes (instead of one time 32 bytes) |
|
307 |
+ * because it must be compatible with offsets >= 16. */ |
|
308 |
+LZ4_FORCE_O2_INLINE_GCC_PPC64LE void |
|
309 |
+LZ4_wildCopy32(void* dstPtr, const void* srcPtr, void* dstEnd) |
|
310 |
+{ |
|
311 |
+ BYTE* d = (BYTE*)dstPtr; |
|
312 |
+ const BYTE* s = (const BYTE*)srcPtr; |
|
313 |
+ BYTE* const e = (BYTE*)dstEnd; |
|
314 |
+ |
|
315 |
+ do { memcpy(d,s,16); memcpy(d+16,s+16,16); d+=32; s+=32; } while (d<e); |
|
316 |
+} |
|
291 | 317 |
|
318 |
+/* LZ4_memcpy_using_offset() presumes : |
|
319 |
+ * - dstEnd >= dstPtr + MINMATCH |
|
320 |
+ * - there is at least 8 bytes available to write after dstEnd */ |
|
321 |
+LZ4_FORCE_O2_INLINE_GCC_PPC64LE void |
|
322 |
+LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset) |
|
323 |
+{ |
|
324 |
+ BYTE v[8]; |
|
292 | 325 |
|
293 |
-/*-************************************ |
|
294 |
-* Common Utils |
|
295 |
-**************************************/ |
|
296 |
-#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ |
|
326 |
+ assert(dstEnd >= dstPtr + MINMATCH); |
|
327 |
+ LZ4_write32(dstPtr, 0); /* silence an msan warning when offset==0 */ |
|
328 |
+ |
|
329 |
+ switch(offset) { |
|
330 |
+ case 1: |
|
331 |
+ memset(v, *srcPtr, 8); |
|
332 |
+ break; |
|
333 |
+ case 2: |
|
334 |
+ memcpy(v, srcPtr, 2); |
|
335 |
+ memcpy(&v[2], srcPtr, 2); |
|
336 |
+ memcpy(&v[4], &v[0], 4); |
|
337 |
+ break; |
|
338 |
+ case 4: |
|
339 |
+ memcpy(v, srcPtr, 4); |
|
340 |
+ memcpy(&v[4], srcPtr, 4); |
|
341 |
+ break; |
|
342 |
+ default: |
|
343 |
+ LZ4_memcpy_using_offset_base(dstPtr, srcPtr, dstEnd, offset); |
|
344 |
+ return; |
|
345 |
+ } |
|
346 |
+ |
|
347 |
+ memcpy(dstPtr, v, 8); |
|
348 |
+ dstPtr += 8; |
|
349 |
+ while (dstPtr < dstEnd) { |
|
350 |
+ memcpy(dstPtr, v, 8); |
|
351 |
+ dstPtr += 8; |
|
352 |
+ } |
|
353 |
+} |
|
354 |
+#endif |
|
297 | 355 |
|
298 | 356 |
|
299 | 357 |
/*-************************************ |
300 | 358 |
* Common functions |
301 | 359 |
**************************************/ |
302 |
-static unsigned LZ4_NbCommonBytes (register reg_t val) |
|
360 |
+static unsigned LZ4_NbCommonBytes (reg_t val) |
|
303 | 361 |
{ |
304 | 362 |
if (LZ4_isLittleEndian()) { |
305 | 363 |
if (sizeof(val)==8) { |
... | ... |
@@ -308,9 +479,16 @@ static unsigned LZ4_NbCommonBytes (register reg_t val) |
308 | 308 |
_BitScanForward64( &r, (U64)val ); |
309 | 309 |
return (int)(r>>3); |
310 | 310 |
# elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT) |
311 |
- return (__builtin_ctzll((U64)val) >> 3); |
|
311 |
+ return (unsigned)__builtin_ctzll((U64)val) >> 3; |
|
312 | 312 |
# else |
313 |
- static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 }; |
|
313 |
+ static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, |
|
314 |
+ 0, 3, 1, 3, 1, 4, 2, 7, |
|
315 |
+ 0, 2, 3, 6, 1, 5, 3, 5, |
|
316 |
+ 1, 3, 4, 4, 2, 5, 6, 7, |
|
317 |
+ 7, 0, 1, 2, 3, 3, 4, 6, |
|
318 |
+ 2, 6, 5, 5, 3, 4, 5, 6, |
|
319 |
+ 7, 1, 2, 4, 6, 4, 4, 5, |
|
320 |
+ 7, 2, 6, 5, 7, 6, 7, 7 }; |
|
314 | 321 |
return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; |
315 | 322 |
# endif |
316 | 323 |
} else /* 32 bits */ { |
... | ... |
@@ -319,23 +497,29 @@ static unsigned LZ4_NbCommonBytes (register reg_t val) |
319 | 319 |
_BitScanForward( &r, (U32)val ); |
320 | 320 |
return (int)(r>>3); |
321 | 321 |
# elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT) |
322 |
- return (__builtin_ctz((U32)val) >> 3); |
|
322 |
+ return (unsigned)__builtin_ctz((U32)val) >> 3; |
|
323 | 323 |
# else |
324 |
- static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 }; |
|
324 |
+ static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, |
|
325 |
+ 3, 2, 2, 1, 3, 2, 0, 1, |
|
326 |
+ 3, 3, 1, 2, 2, 2, 2, 0, |
|
327 |
+ 3, 1, 2, 0, 1, 0, 1, 1 }; |
|
325 | 328 |
return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; |
326 | 329 |
# endif |
327 | 330 |
} |
328 | 331 |
} else /* Big Endian CPU */ { |
329 |
- if (sizeof(val)==8) { |
|
332 |
+ if (sizeof(val)==8) { /* 64-bits */ |
|
330 | 333 |
# if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT) |
331 | 334 |
unsigned long r = 0; |
332 | 335 |
_BitScanReverse64( &r, val ); |
333 | 336 |
return (unsigned)(r>>3); |
334 | 337 |
# elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT) |
335 |
- return (__builtin_clzll((U64)val) >> 3); |
|
338 |
+ return (unsigned)__builtin_clzll((U64)val) >> 3; |
|
336 | 339 |
# else |
340 |
+ static const U32 by32 = sizeof(val)*4; /* 32 on 64 bits (goal), 16 on 32 bits. |
|
341 |
+ Just to avoid some static analyzer complaining about shift by 32 on 32-bits target. |
|
342 |
+ Note that this code path is never triggered in 32-bits mode. */ |
|
337 | 343 |
unsigned r; |
338 |
- if (!(val>>32)) { r=4; } else { r=0; val>>=32; } |
|
344 |
+ if (!(val>>by32)) { r=4; } else { r=0; val>>=by32; } |
|
339 | 345 |
if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } |
340 | 346 |
r += (!val); |
341 | 347 |
return r; |
... | ... |
@@ -346,7 +530,7 @@ static unsigned LZ4_NbCommonBytes (register reg_t val) |
346 | 346 |
_BitScanReverse( &r, (unsigned long)val ); |
347 | 347 |
return (unsigned)(r>>3); |
348 | 348 |
# elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT) |
349 |
- return (__builtin_clz((U32)val) >> 3); |
|
349 |
+ return (unsigned)__builtin_clz((U32)val) >> 3; |
|
350 | 350 |
# else |
351 | 351 |
unsigned r; |
352 | 352 |
if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } |
... | ... |
@@ -358,11 +542,20 @@ static unsigned LZ4_NbCommonBytes (register reg_t val) |
358 | 358 |
} |
359 | 359 |
|
360 | 360 |
#define STEPSIZE sizeof(reg_t) |
361 |
-static unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit) |
|
361 |
+LZ4_FORCE_INLINE |
|
362 |
+unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit) |
|
362 | 363 |
{ |
363 | 364 |
const BYTE* const pStart = pIn; |
364 | 365 |
|
365 |
- while (likely(pIn<pInLimit-(STEPSIZE-1))) { |
|
366 |
+ if (likely(pIn < pInLimit-(STEPSIZE-1))) { |
|
367 |
+ reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn); |
|
368 |
+ if (!diff) { |
|
369 |
+ pIn+=STEPSIZE; pMatch+=STEPSIZE; |
|
370 |
+ } else { |
|
371 |
+ return LZ4_NbCommonBytes(diff); |
|
372 |
+ } } |
|
373 |
+ |
|
374 |
+ while (likely(pIn < pInLimit-(STEPSIZE-1))) { |
|
366 | 375 |
reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn); |
367 | 376 |
if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; } |
368 | 377 |
pIn += LZ4_NbCommonBytes(diff); |
... | ... |
@@ -387,15 +580,34 @@ static const U32 LZ4_skipTrigger = 6; /* Increase this value ==> compression ru |
387 | 387 |
/*-************************************ |
388 | 388 |
* Local Structures and types |
389 | 389 |
**************************************/ |
390 |
-typedef enum { notLimited = 0, limitedOutput = 1 } limitedOutput_directive; |
|
391 |
-typedef enum { byPtr, byU32, byU16 } tableType_t; |
|
392 |
- |
|
393 |
-typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive; |
|
390 |
+typedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t; |
|
391 |
+ |
|
392 |
+/** |
|
393 |
+ * This enum distinguishes several different modes of accessing previous |
|
394 |
+ * content in the stream. |
|
395 |
+ * |
|
396 |
+ * - noDict : There is no preceding content. |
|
397 |
+ * - withPrefix64k : Table entries up to ctx->dictSize before the current blob |
|
398 |
+ * blob being compressed are valid and refer to the preceding |
|
399 |
+ * content (of length ctx->dictSize), which is available |
|
400 |
+ * contiguously preceding in memory the content currently |
|
401 |
+ * being compressed. |
|
402 |
+ * - usingExtDict : Like withPrefix64k, but the preceding content is somewhere |
|
403 |
+ * else in memory, starting at ctx->dictionary with length |
|
404 |
+ * ctx->dictSize. |
|
405 |
+ * - usingDictCtx : Like usingExtDict, but everything concerning the preceding |
|
406 |
+ * content is in a separate context, pointed to by |
|
407 |
+ * ctx->dictCtx. ctx->dictionary, ctx->dictSize, and table |
|
408 |
+ * entries in the current context that refer to positions |
|
409 |
+ * preceding the beginning of the current compression are |
|
410 |
+ * ignored. Instead, ctx->dictCtx->dictionary and ctx->dictCtx |
|
411 |
+ * ->dictSize describe the location and size of the preceding |
|
412 |
+ * content, and matches are found by looking in the ctx |
|
413 |
+ * ->dictCtx->hashTable. |
|
414 |
+ */ |
|
415 |
+typedef enum { noDict = 0, withPrefix64k, usingExtDict, usingDictCtx } dict_directive; |
|
394 | 416 |
typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive; |
395 | 417 |
|
396 |
-typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive; |
|
397 |
-typedef enum { full = 0, partial = 1 } earlyEnd_directive; |
|
398 |
- |
|
399 | 418 |
|
400 | 419 |
/*-************************************ |
401 | 420 |
* Local Utils |
... | ... |
@@ -406,6 +618,23 @@ int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); } |
406 | 406 |
int LZ4_sizeofState() { return LZ4_STREAMSIZE; } |
407 | 407 |
|
408 | 408 |
|
409 |
+/*-************************************ |
|
410 |
+* Internal Definitions used in Tests |
|
411 |
+**************************************/ |
|
412 |
+#if defined (__cplusplus) |
|
413 |
+extern "C" { |
|
414 |
+#endif |
|
415 |
+ |
|
416 |
+int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize); |
|
417 |
+ |
|
418 |
+int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, |
|
419 |
+ int compressedSize, int maxOutputSize, |
|
420 |
+ const void* dictStart, size_t dictSize); |
|
421 |
+ |
|
422 |
+#if defined (__cplusplus) |
|
423 |
+} |
|
424 |
+#endif |
|
425 |
+ |
|
409 | 426 |
/*-****************************** |
410 | 427 |
* Compression functions |
411 | 428 |
********************************/ |
... | ... |
@@ -419,102 +648,225 @@ static U32 LZ4_hash4(U32 sequence, tableType_t const tableType) |
419 | 419 |
|
420 | 420 |
static U32 LZ4_hash5(U64 sequence, tableType_t const tableType) |
421 | 421 |
{ |
422 |
- static const U64 prime5bytes = 889523592379ULL; |
|
423 |
- static const U64 prime8bytes = 11400714785074694791ULL; |
|
424 | 422 |
const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG; |
425 |
- if (LZ4_isLittleEndian()) |
|
423 |
+ if (LZ4_isLittleEndian()) { |
|
424 |
+ const U64 prime5bytes = 889523592379ULL; |
|
426 | 425 |
return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog)); |
427 |
- else |
|
426 |
+ } else { |
|
427 |
+ const U64 prime8bytes = 11400714785074694791ULL; |
|
428 | 428 |
return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog)); |
429 |
+ } |
|
429 | 430 |
} |
430 | 431 |
|
431 |
-FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType) |
|
432 |
+LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType) |
|
432 | 433 |
{ |
433 | 434 |
if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType); |
434 | 435 |
return LZ4_hash4(LZ4_read32(p), tableType); |
435 | 436 |
} |
436 | 437 |
|
437 |
-static void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, tableType_t const tableType, const BYTE* srcBase) |
|
438 |
+static void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType) |
|
438 | 439 |
{ |
439 | 440 |
switch (tableType) |
440 | 441 |
{ |
442 |
+ default: /* fallthrough */ |
|
443 |
+ case clearedTable: { /* illegal! */ assert(0); return; } |
|
444 |
+ case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = NULL; return; } |
|
445 |
+ case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = 0; return; } |
|
446 |
+ case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = 0; return; } |
|
447 |
+ } |
|
448 |
+} |
|
449 |
+ |
|
450 |
+static void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableType_t const tableType) |
|
451 |
+{ |
|
452 |
+ switch (tableType) |
|
453 |
+ { |
|
454 |
+ default: /* fallthrough */ |
|
455 |
+ case clearedTable: /* fallthrough */ |
|
456 |
+ case byPtr: { /* illegal! */ assert(0); return; } |
|
457 |
+ case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = idx; return; } |
|
458 |
+ case byU16: { U16* hashTable = (U16*) tableBase; assert(idx < 65536); hashTable[h] = (U16)idx; return; } |
|
459 |
+ } |
|
460 |
+} |
|
461 |
+ |
|
462 |
+static void LZ4_putPositionOnHash(const BYTE* p, U32 h, |
|
463 |
+ void* tableBase, tableType_t const tableType, |
|
464 |
+ const BYTE* srcBase) |
|
465 |
+{ |
|
466 |
+ switch (tableType) |
|
467 |
+ { |
|
468 |
+ case clearedTable: { /* illegal! */ assert(0); return; } |
|
441 | 469 |
case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; } |
442 | 470 |
case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; } |
443 | 471 |
case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; } |
444 | 472 |
} |
445 | 473 |
} |
446 | 474 |
|
447 |
-FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase) |
|
475 |
+LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase) |
|
448 | 476 |
{ |
449 | 477 |
U32 const h = LZ4_hashPosition(p, tableType); |
450 | 478 |
LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase); |
451 | 479 |
} |
452 | 480 |
|
453 |
-static const BYTE* LZ4_getPositionOnHash(U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase) |
|
481 |
+/* LZ4_getIndexOnHash() : |
|
482 |
+ * Index of match position registered in hash table. |
|
483 |
+ * hash position must be calculated by using base+index, or dictBase+index. |
|
484 |
+ * Assumption 1 : only valid if tableType == byU32 or byU16. |
|
485 |
+ * Assumption 2 : h is presumed valid (within limits of hash table) |
|
486 |
+ */ |
|
487 |
+static U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_t tableType) |
|
488 |
+{ |
|
489 |
+ LZ4_STATIC_ASSERT(LZ4_MEMORY_USAGE > 2); |
|
490 |
+ if (tableType == byU32) { |
|
491 |
+ const U32* const hashTable = (const U32*) tableBase; |
|
492 |
+ assert(h < (1U << (LZ4_MEMORY_USAGE-2))); |
|
493 |
+ return hashTable[h]; |
|
494 |
+ } |
|
495 |
+ if (tableType == byU16) { |
|
496 |
+ const U16* const hashTable = (const U16*) tableBase; |
|
497 |
+ assert(h < (1U << (LZ4_MEMORY_USAGE-1))); |
|
498 |
+ return hashTable[h]; |
|
499 |
+ } |
|
500 |
+ assert(0); return 0; /* forbidden case */ |
|
501 |
+} |
|
502 |
+ |
|
503 |
+static const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType, const BYTE* srcBase) |
|
454 | 504 |
{ |
455 |
- if (tableType == byPtr) { const BYTE** hashTable = (const BYTE**) tableBase; return hashTable[h]; } |
|
456 |
- if (tableType == byU32) { const U32* const hashTable = (U32*) tableBase; return hashTable[h] + srcBase; } |
|
457 |
- { const U16* const hashTable = (U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */ |
|
505 |
+ if (tableType == byPtr) { const BYTE* const* hashTable = (const BYTE* const*) tableBase; return hashTable[h]; } |
|
506 |
+ if (tableType == byU32) { const U32* const hashTable = (const U32*) tableBase; return hashTable[h] + srcBase; } |
|
507 |
+ { const U16* const hashTable = (const U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */ |
|
458 | 508 |
} |
459 | 509 |
|
460 |
-FORCE_INLINE const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase) |
|
510 |
+LZ4_FORCE_INLINE const BYTE* |
|
511 |
+LZ4_getPosition(const BYTE* p, |
|
512 |
+ const void* tableBase, tableType_t tableType, |
|
513 |
+ const BYTE* srcBase) |
|
461 | 514 |
{ |
462 | 515 |
U32 const h = LZ4_hashPosition(p, tableType); |
463 | 516 |
return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase); |
464 | 517 |
} |
465 | 518 |
|
519 |
+LZ4_FORCE_INLINE void |
|
520 |
+LZ4_prepareTable(LZ4_stream_t_internal* const cctx, |
|
521 |
+ const int inputSize, |
|
522 |
+ const tableType_t tableType) { |
|
523 |
+ /* If compression failed during the previous step, then the context |
|
524 |
+ * is marked as dirty, therefore, it has to be fully reset. |
|
525 |
+ */ |
|
526 |
+ if (cctx->dirty) { |
|
527 |
+ DEBUGLOG(5, "LZ4_prepareTable: Full reset for %p", cctx); |
|
528 |
+ MEM_INIT(cctx, 0, sizeof(LZ4_stream_t_internal)); |
|
529 |
+ return; |
|
530 |
+ } |
|
531 |
+ |
|
532 |
+ /* If the table hasn't been used, it's guaranteed to be zeroed out, and is |
|
533 |
+ * therefore safe to use no matter what mode we're in. Otherwise, we figure |
|
534 |
+ * out if it's safe to leave as is or whether it needs to be reset. |
|
535 |
+ */ |
|
536 |
+ if (cctx->tableType != clearedTable) { |
|
537 |
+ assert(inputSize >= 0); |
|
538 |
+ if (cctx->tableType != tableType |
|
539 |
+ || ((tableType == byU16) && cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU) |
|
540 |
+ || ((tableType == byU32) && cctx->currentOffset > 1 GB) |
|
541 |
+ || tableType == byPtr |
|
542 |
+ || inputSize >= 4 KB) |
|
543 |
+ { |
|
544 |
+ DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p", cctx); |
|
545 |
+ MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE); |
|
546 |
+ cctx->currentOffset = 0; |
|
547 |
+ cctx->tableType = clearedTable; |
|
548 |
+ } else { |
|
549 |
+ DEBUGLOG(4, "LZ4_prepareTable: Re-use hash table (no reset)"); |
|
550 |
+ } |
|
551 |
+ } |
|
552 |
+ |
|
553 |
+ /* Adding a gap, so all previous entries are > LZ4_DISTANCE_MAX back, is faster |
|
554 |
+ * than compressing without a gap. However, compressing with |
|
555 |
+ * currentOffset == 0 is faster still, so we preserve that case. |
|
556 |
+ */ |
|
557 |
+ if (cctx->currentOffset != 0 && tableType == byU32) { |
|
558 |
+ DEBUGLOG(5, "LZ4_prepareTable: adding 64KB to currentOffset"); |
|
559 |
+ cctx->currentOffset += 64 KB; |
|
560 |
+ } |
|
561 |
+ |
|
562 |
+ /* Finally, clear history */ |
|
563 |
+ cctx->dictCtx = NULL; |
|
564 |
+ cctx->dictionary = NULL; |
|
565 |
+ cctx->dictSize = 0; |
|
566 |
+} |
|
466 | 567 |
|
467 | 568 |
/** LZ4_compress_generic() : |
468 | 569 |
inlined, to ensure branches are decided at compilation time */ |
469 |
-FORCE_INLINE int LZ4_compress_generic( |
|
570 |
+LZ4_FORCE_INLINE int LZ4_compress_generic( |
|
470 | 571 |
LZ4_stream_t_internal* const cctx, |
471 | 572 |
const char* const source, |
472 | 573 |
char* const dest, |
473 | 574 |
const int inputSize, |
575 |
+ int *inputConsumed, /* only written when outputDirective == fillOutput */ |
|
474 | 576 |
const int maxOutputSize, |
475 |
- const limitedOutput_directive outputLimited, |
|
577 |
+ const limitedOutput_directive outputDirective, |
|
476 | 578 |
const tableType_t tableType, |
477 |
- const dict_directive dict, |
|
579 |
+ const dict_directive dictDirective, |
|
478 | 580 |
const dictIssue_directive dictIssue, |
479 |
- const U32 acceleration) |
|
581 |
+ const int acceleration) |
|
480 | 582 |
{ |
583 |
+ int result; |
|
481 | 584 |
const BYTE* ip = (const BYTE*) source; |
482 |
- const BYTE* base; |
|
585 |
+ |
|
586 |
+ U32 const startIndex = cctx->currentOffset; |
|
587 |
+ const BYTE* base = (const BYTE*) source - startIndex; |
|
483 | 588 |
const BYTE* lowLimit; |
484 |
- const BYTE* const lowRefLimit = ip - cctx->dictSize; |
|
485 |
- const BYTE* const dictionary = cctx->dictionary; |
|
486 |
- const BYTE* const dictEnd = dictionary + cctx->dictSize; |
|
487 |
- const ptrdiff_t dictDelta = dictEnd - (const BYTE*)source; |
|
589 |
+ |
|
590 |
+ const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx; |
|
591 |
+ const BYTE* const dictionary = |
|
592 |
+ dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary; |
|
593 |
+ const U32 dictSize = |
|
594 |
+ dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize; |
|
595 |
+ const U32 dictDelta = (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0; /* make indexes in dictCtx comparable with index in current context */ |
|
596 |
+ |
|
597 |
+ int const maybe_extMem = (dictDirective == usingExtDict) || (dictDirective == usingDictCtx); |
|
598 |
+ U32 const prefixIdxLimit = startIndex - dictSize; /* used when dictDirective == dictSmall */ |
|
599 |
+ const BYTE* const dictEnd = dictionary + dictSize; |
|
488 | 600 |
const BYTE* anchor = (const BYTE*) source; |
489 | 601 |
const BYTE* const iend = ip + inputSize; |
490 |
- const BYTE* const mflimit = iend - MFLIMIT; |
|
602 |
+ const BYTE* const mflimitPlusOne = iend - MFLIMIT + 1; |
|
491 | 603 |
const BYTE* const matchlimit = iend - LASTLITERALS; |
492 | 604 |
|
605 |
+ /* the dictCtx currentOffset is indexed on the start of the dictionary, |
|
606 |
+ * while a dictionary in the current context precedes the currentOffset */ |
|
607 |
+ const BYTE* dictBase = (dictDirective == usingDictCtx) ? |
|
608 |
+ dictionary + dictSize - dictCtx->currentOffset : |
|
609 |
+ dictionary + dictSize - startIndex; |
|
610 |
+ |
|
493 | 611 |
BYTE* op = (BYTE*) dest; |
494 | 612 |
BYTE* const olimit = op + maxOutputSize; |
495 | 613 |
|
614 |
+ U32 offset = 0; |
|
496 | 615 |
U32 forwardH; |
497 | 616 |
|
498 |
- /* Init conditions */ |
|
499 |
- if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported inputSize, too large (or negative) */ |
|
500 |
- switch(dict) |
|
501 |
- { |
|
502 |
- case noDict: |
|
503 |
- default: |
|
504 |
- base = (const BYTE*)source; |
|
505 |
- lowLimit = (const BYTE*)source; |
|
506 |
- break; |
|
507 |
- case withPrefix64k: |
|
508 |
- base = (const BYTE*)source - cctx->currentOffset; |
|
509 |
- lowLimit = (const BYTE*)source - cctx->dictSize; |
|
510 |
- break; |
|
511 |
- case usingExtDict: |
|
512 |
- base = (const BYTE*)source - cctx->currentOffset; |
|
513 |
- lowLimit = (const BYTE*)source; |
|
514 |
- break; |
|
617 |
+ DEBUGLOG(5, "LZ4_compress_generic: srcSize=%i, tableType=%u", inputSize, tableType); |
|
618 |
+ /* If init conditions are not met, we don't have to mark stream |
|
619 |
+ * as having dirty context, since no action was taken yet */ |
|
620 |
+ if (outputDirective == fillOutput && maxOutputSize < 1) { return 0; } /* Impossible to store anything */ |
|
621 |
+ if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) { return 0; } /* Unsupported inputSize, too large (or negative) */ |
|
622 |
+ if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) { return 0; } /* Size too large (not within 64K limit) */ |
|
623 |
+ if (tableType==byPtr) assert(dictDirective==noDict); /* only supported use case with byPtr */ |
|
624 |
+ assert(acceleration >= 1); |
|
625 |
+ |
|
626 |
+ lowLimit = (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0); |
|
627 |
+ |
|
628 |
+ /* Update context state */ |
|
629 |
+ if (dictDirective == usingDictCtx) { |
|
630 |
+ /* Subsequent linked blocks can't use the dictionary. */ |
|
631 |
+ /* Instead, they use the block we just compressed. */ |
|
632 |
+ cctx->dictCtx = NULL; |
|
633 |
+ cctx->dictSize = (U32)inputSize; |
|
634 |
+ } else { |
|
635 |
+ cctx->dictSize += (U32)inputSize; |
|
515 | 636 |
} |
516 |
- if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) return 0; /* Size too large (not within 64K limit) */ |
|
517 |
- if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */ |
|
637 |
+ cctx->currentOffset += (U32)inputSize; |
|
638 |
+ cctx->tableType = (U16)tableType; |
|
639 |
+ |
|
640 |
+ if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */ |
|
518 | 641 |
|
519 | 642 |
/* First Byte */ |
520 | 643 |
LZ4_putPosition(ip, cctx->hashTable, tableType, base); |
... | ... |
@@ -522,50 +874,112 @@ FORCE_INLINE int LZ4_compress_generic( |
522 | 522 |
|
523 | 523 |
/* Main Loop */ |
524 | 524 |
for ( ; ; ) { |
525 |
- ptrdiff_t refDelta = 0; |
|
526 | 525 |
const BYTE* match; |
527 | 526 |
BYTE* token; |
527 |
+ const BYTE* filledIp; |
|
528 | 528 |
|
529 | 529 |
/* Find a match */ |
530 |
- { const BYTE* forwardIp = ip; |
|
531 |
- unsigned step = 1; |
|
532 |
- unsigned searchMatchNb = acceleration << LZ4_skipTrigger; |
|
530 |
+ if (tableType == byPtr) { |
|
531 |
+ const BYTE* forwardIp = ip; |
|
532 |
+ int step = 1; |
|
533 |
+ int searchMatchNb = acceleration << LZ4_skipTrigger; |
|
533 | 534 |
do { |
534 | 535 |
U32 const h = forwardH; |
535 | 536 |
ip = forwardIp; |
536 | 537 |
forwardIp += step; |
537 | 538 |
step = (searchMatchNb++ >> LZ4_skipTrigger); |
538 | 539 |
|
539 |
- if (unlikely(forwardIp > mflimit)) goto _last_literals; |
|
540 |
+ if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals; |
|
541 |
+ assert(ip < mflimitPlusOne); |
|
540 | 542 |
|
541 | 543 |
match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base); |
542 |
- if (dict==usingExtDict) { |
|
543 |
- if (match < (const BYTE*)source) { |
|
544 |
- refDelta = dictDelta; |
|
544 |
+ forwardH = LZ4_hashPosition(forwardIp, tableType); |
|
545 |
+ LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base); |
|
546 |
+ |
|
547 |
+ } while ( (match+LZ4_DISTANCE_MAX < ip) |
|
548 |
+ || (LZ4_read32(match) != LZ4_read32(ip)) ); |
|
549 |
+ |
|
550 |
+ } else { /* byU32, byU16 */ |
|
551 |
+ |
|
552 |
+ const BYTE* forwardIp = ip; |
|
553 |
+ int step = 1; |
|
554 |
+ int searchMatchNb = acceleration << LZ4_skipTrigger; |
|
555 |
+ do { |
|
556 |
+ U32 const h = forwardH; |
|
557 |
+ U32 const current = (U32)(forwardIp - base); |
|
558 |
+ U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType); |
|
559 |
+ assert(matchIndex <= current); |
|
560 |
+ assert(forwardIp - base < (ptrdiff_t)(2 GB - 1)); |
|
561 |
+ ip = forwardIp; |
|
562 |
+ forwardIp += step; |
|
563 |
+ step = (searchMatchNb++ >> LZ4_skipTrigger); |
|
564 |
+ |
|
565 |
+ if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals; |
|
566 |
+ assert(ip < mflimitPlusOne); |
|
567 |
+ |
|
568 |
+ if (dictDirective == usingDictCtx) { |
|
569 |
+ if (matchIndex < startIndex) { |
|
570 |
+ /* there was no match, try the dictionary */ |
|
571 |
+ assert(tableType == byU32); |
|
572 |
+ matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32); |
|
573 |
+ match = dictBase + matchIndex; |
|
574 |
+ matchIndex += dictDelta; /* make dictCtx index comparable with current context */ |
|
545 | 575 |
lowLimit = dictionary; |
546 | 576 |
} else { |
547 |
- refDelta = 0; |
|
577 |
+ match = base + matchIndex; |
|
548 | 578 |
lowLimit = (const BYTE*)source; |
549 |
- } } |
|
579 |
+ } |
|
580 |
+ } else if (dictDirective==usingExtDict) { |
|
581 |
+ if (matchIndex < startIndex) { |
|
582 |
+ DEBUGLOG(7, "extDict candidate: matchIndex=%5u < startIndex=%5u", matchIndex, startIndex); |
|
583 |
+ assert(startIndex - matchIndex >= MINMATCH); |
|
584 |
+ match = dictBase + matchIndex; |
|
585 |
+ lowLimit = dictionary; |
|
586 |
+ } else { |
|
587 |
+ match = base + matchIndex; |
|
588 |
+ lowLimit = (const BYTE*)source; |
|
589 |
+ } |
|
590 |
+ } else { /* single continuous memory segment */ |
|
591 |
+ match = base + matchIndex; |
|
592 |
+ } |
|
550 | 593 |
forwardH = LZ4_hashPosition(forwardIp, tableType); |
551 |
- LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base); |
|
594 |
+ LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType); |
|
595 |
+ |
|
596 |
+ DEBUGLOG(7, "candidate at pos=%u (offset=%u \n", matchIndex, current - matchIndex); |
|
597 |
+ if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) { continue; } /* match outside of valid area */ |
|
598 |
+ assert(matchIndex < current); |
|
599 |
+ if ( ((tableType != byU16) || (LZ4_DISTANCE_MAX < LZ4_DISTANCE_ABSOLUTE_MAX)) |
|
600 |
+ && (matchIndex+LZ4_DISTANCE_MAX < current)) { |
|
601 |
+ continue; |
|
602 |
+ } /* too far */ |
|
603 |
+ assert((current - matchIndex) <= LZ4_DISTANCE_MAX); /* match now expected within distance */ |
|
604 |
+ |
|
605 |
+ if (LZ4_read32(match) == LZ4_read32(ip)) { |
|
606 |
+ if (maybe_extMem) offset = current - matchIndex; |
|
607 |
+ break; /* match found */ |
|
608 |
+ } |
|
552 | 609 |
|
553 |
- } while ( ((dictIssue==dictSmall) ? (match < lowRefLimit) : 0) |
|
554 |
- || ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip)) |
|
555 |
- || (LZ4_read32(match+refDelta) != LZ4_read32(ip)) ); |
|
610 |
+ } while(1); |
|
556 | 611 |
} |
557 | 612 |
|
558 | 613 |
/* Catch up */ |
559 |
- while (((ip>anchor) & (match+refDelta > lowLimit)) && (unlikely(ip[-1]==match[refDelta-1]))) { ip--; match--; } |
|
614 |
+ filledIp = ip; |
|
615 |
+ while (((ip>anchor) & (match > lowLimit)) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; } |
|
560 | 616 |
|
561 | 617 |
/* Encode Literals */ |
562 | 618 |
{ unsigned const litLength = (unsigned)(ip - anchor); |
563 | 619 |
token = op++; |
564 |
- if ((outputLimited) && /* Check output buffer overflow */ |
|
565 |
- (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit))) |
|
566 |
- return 0; |
|
620 |
+ if ((outputDirective == limitedOutput) && /* Check output buffer overflow */ |
|
621 |
+ (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)) ) { |
|
622 |
+ return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */ |
|
623 |
+ } |
|
624 |
+ if ((outputDirective == fillOutput) && |
|
625 |
+ (unlikely(op + (litLength+240)/255 /* litlen */ + litLength /* literals */ + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit))) { |
|
626 |
+ op--; |
|
627 |
+ goto _last_literals; |
|
628 |
+ } |
|
567 | 629 |
if (litLength >= RUN_MASK) { |
568 |
- int len = (int)litLength-RUN_MASK; |
|
630 |
+ int len = (int)(litLength - RUN_MASK); |
|
569 | 631 |
*token = (RUN_MASK<<ML_BITS); |
570 | 632 |
for(; len >= 255 ; len-=255) *op++ = 255; |
571 | 633 |
*op++ = (BYTE)len; |
... | ... |
@@ -573,82 +987,183 @@ FORCE_INLINE int LZ4_compress_generic( |
573 | 573 |
else *token = (BYTE)(litLength<<ML_BITS); |
574 | 574 |
|
575 | 575 |
/* Copy Literals */ |
576 |
- LZ4_wildCopy(op, anchor, op+litLength); |
|
576 |
+ LZ4_wildCopy8(op, anchor, op+litLength); |
|
577 | 577 |
op+=litLength; |
578 |
+ DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i", |
|
579 |
+ (int)(anchor-(const BYTE*)source), litLength, (int)(ip-(const BYTE*)source)); |
|
578 | 580 |
} |
579 | 581 |
|
580 | 582 |
_next_match: |
583 |
+ /* at this stage, the following variables must be correctly set : |
|
584 |
+ * - ip : at start of LZ operation |
|
585 |
+ * - match : at start of previous pattern occurence; can be within current prefix, or within extDict |
|
586 |
+ * - offset : if maybe_ext_memSegment==1 (constant) |
|
587 |
+ * - lowLimit : must be == dictionary to mean "match is within extDict"; must be == source otherwise |
|
588 |
+ * - token and *token : position to write 4-bits for match length; higher 4-bits for literal length supposed already written |
|
589 |
+ */ |
|
590 |
+ |
|
591 |
+ if ((outputDirective == fillOutput) && |
|
592 |
+ (op + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit)) { |
|
593 |
+ /* the match was too close to the end, rewind and go to last literals */ |
|
594 |
+ op = token; |
|
595 |
+ goto _last_literals; |
|
596 |
+ } |
|
597 |
+ |
|
581 | 598 |
/* Encode Offset */ |
582 |
- LZ4_writeLE16(op, (U16)(ip-match)); op+=2; |
|
599 |
+ if (maybe_extMem) { /* static test */ |
|
600 |
+ DEBUGLOG(6, " with offset=%u (ext if > %i)", offset, (int)(ip - (const BYTE*)source)); |
|
601 |
+ assert(offset <= LZ4_DISTANCE_MAX && offset > 0); |
|
602 |
+ LZ4_writeLE16(op, (U16)offset); op+=2; |
|
603 |
+ } else { |
|
604 |
+ DEBUGLOG(6, " with offset=%u (same segment)", (U32)(ip - match)); |
|
605 |
+ assert(ip-match <= LZ4_DISTANCE_MAX); |
|
606 |
+ LZ4_writeLE16(op, (U16)(ip - match)); op+=2; |
|
607 |
+ } |
|
583 | 608 |
|
584 | 609 |
/* Encode MatchLength */ |
585 | 610 |
{ unsigned matchCode; |
586 | 611 |
|
587 |
- if ((dict==usingExtDict) && (lowLimit==dictionary)) { |
|
588 |
- const BYTE* limit; |
|
589 |
- match += refDelta; |
|
590 |
- limit = ip + (dictEnd-match); |
|
612 |
+ if ( (dictDirective==usingExtDict || dictDirective==usingDictCtx) |
|
613 |
+ && (lowLimit==dictionary) /* match within extDict */ ) { |
|
614 |
+ const BYTE* limit = ip + (dictEnd-match); |
|
615 |
+ assert(dictEnd > match); |
|
591 | 616 |
if (limit > matchlimit) limit = matchlimit; |
592 | 617 |
matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit); |
593 |
- ip += MINMATCH + matchCode; |
|
618 |
+ ip += (size_t)matchCode + MINMATCH; |
|
594 | 619 |
if (ip==limit) { |
595 |
- unsigned const more = LZ4_count(ip, (const BYTE*)source, matchlimit); |
|
620 |
+ unsigned const more = LZ4_count(limit, (const BYTE*)source, matchlimit); |
|
596 | 621 |
matchCode += more; |
597 | 622 |
ip += more; |
598 | 623 |
} |
624 |
+ DEBUGLOG(6, " with matchLength=%u starting in extDict", matchCode+MINMATCH); |
|
599 | 625 |
} else { |
600 | 626 |
matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit); |
601 |
- ip += MINMATCH + matchCode; |
|
627 |
+ ip += (size_t)matchCode + MINMATCH; |
|
628 |
+ DEBUGLOG(6, " with matchLength=%u", matchCode+MINMATCH); |
|
602 | 629 |
} |
603 | 630 |
|
604 |
- if ( outputLimited && /* Check output buffer overflow */ |
|
605 |
- (unlikely(op + (1 + LASTLITERALS) + (matchCode>>8) > olimit)) ) |
|
606 |
- return 0; |
|
631 |
+ if ((outputDirective) && /* Check output buffer overflow */ |
|
632 |
+ (unlikely(op + (1 + LASTLITERALS) + (matchCode+240)/255 > olimit)) ) { |
|
633 |
+ if (outputDirective == fillOutput) { |
|
634 |
+ /* Match description too long : reduce it */ |
|
635 |
+ U32 newMatchCode = 15 /* in token */ - 1 /* to avoid needing a zero byte */ + ((U32)(olimit - op) - 1 - LASTLITERALS) * 255; |
|
636 |
+ ip -= matchCode - newMatchCode; |
|
637 |
+ assert(newMatchCode < matchCode); |
|
638 |
+ matchCode = newMatchCode; |
|
639 |
+ if (unlikely(ip <= filledIp)) { |
|
640 |
+ /* We have already filled up to filledIp so if ip ends up less than filledIp |
|
641 |
+ * we have positions in the hash table beyond the current position. This is |
|
642 |
+ * a problem if we reuse the hash table. So we have to remove these positions |
|
643 |
+ * from the hash table. |
|
644 |
+ */ |
|
645 |
+ const BYTE* ptr; |
|
646 |
+ DEBUGLOG(5, "Clearing %u positions", (U32)(filledIp - ip)); |
|
647 |
+ for (ptr = ip; ptr <= filledIp; ++ptr) { |
|
648 |
+ U32 const h = LZ4_hashPosition(ptr, tableType); |
|
649 |
+ LZ4_clearHash(h, cctx->hashTable, tableType); |
|
650 |
+ } |
|
651 |
+ } |
|
652 |
+ } else { |
|
653 |
+ assert(outputDirective == limitedOutput); |
|
654 |
+ return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */ |
|
655 |
+ } |
|
656 |
+ } |
|
607 | 657 |
if (matchCode >= ML_MASK) { |
608 | 658 |
*token += ML_MASK; |
609 | 659 |
matchCode -= ML_MASK; |
610 | 660 |
LZ4_write32(op, 0xFFFFFFFF); |
611 |
- while (matchCode >= 4*255) op+=4, LZ4_write32(op, 0xFFFFFFFF), matchCode -= 4*255; |
|
661 |
+ while (matchCode >= 4*255) { |
|
662 |
+ op+=4; |
|
663 |
+ LZ4_write32(op, 0xFFFFFFFF); |
|
664 |
+ matchCode -= 4*255; |
|
665 |
+ } |
|
612 | 666 |
op += matchCode / 255; |
613 | 667 |
*op++ = (BYTE)(matchCode % 255); |
614 | 668 |
} else |
615 | 669 |
*token += (BYTE)(matchCode); |
616 | 670 |
} |
671 |
+ /* Ensure we have enough space for the last literals. */ |
|
672 |
+ assert(!(outputDirective == fillOutput && op + 1 + LASTLITERALS > olimit)); |
|
617 | 673 |
|
618 | 674 |
anchor = ip; |
619 | 675 |
|
620 | 676 |
/* Test end of chunk */ |
621 |
- if (ip > mflimit) break; |
|
677 |
+ if (ip >= mflimitPlusOne) break; |
|
622 | 678 |
|
623 | 679 |
/* Fill table */ |
624 | 680 |
LZ4_putPosition(ip-2, cctx->hashTable, tableType, base); |
625 | 681 |
|
626 | 682 |
/* Test next position */ |
627 |
- match = LZ4_getPosition(ip, cctx->hashTable, tableType, base); |
|
628 |
- if (dict==usingExtDict) { |
|
629 |
- if (match < (const BYTE*)source) { |
|
630 |
- refDelta = dictDelta; |
|
631 |
- lowLimit = dictionary; |
|
632 |
- } else { |
|
633 |
- refDelta = 0; |
|
634 |
- lowLimit = (const BYTE*)source; |
|
635 |
- } } |
|
636 |
- LZ4_putPosition(ip, cctx->hashTable, tableType, base); |
|
637 |
- if ( ((dictIssue==dictSmall) ? (match>=lowRefLimit) : 1) |
|
638 |
- && (match+MAX_DISTANCE>=ip) |
|
639 |
- && (LZ4_read32(match+refDelta)==LZ4_read32(ip)) ) |
|
640 |
- { token=op++; *token=0; goto _next_match; } |
|
683 |
+ if (tableType == byPtr) { |
|
684 |
+ |
|
685 |
+ match = LZ4_getPosition(ip, cctx->hashTable, tableType, base); |
|
686 |
+ LZ4_putPosition(ip, cctx->hashTable, tableType, base); |
|
687 |
+ if ( (match+LZ4_DISTANCE_MAX >= ip) |
|
688 |
+ && (LZ4_read32(match) == LZ4_read32(ip)) ) |
|
689 |
+ { token=op++; *token=0; goto _next_match; } |
|
690 |
+ |
|
691 |
+ } else { /* byU32, byU16 */ |
|
692 |
+ |
|
693 |
+ U32 const h = LZ4_hashPosition(ip, tableType); |
|
694 |
+ U32 const current = (U32)(ip-base); |
|
695 |
+ U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType); |
|
696 |
+ assert(matchIndex < current); |
|
697 |
+ if (dictDirective == usingDictCtx) { |
|
698 |
+ if (matchIndex < startIndex) { |
|
699 |
+ /* there was no match, try the dictionary */ |
|
700 |
+ matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32); |
|
701 |
+ match = dictBase + matchIndex; |
|
702 |
+ lowLimit = dictionary; /* required for match length counter */ |
|
703 |
+ matchIndex += dictDelta; |
|
704 |
+ } else { |
|
705 |
+ match = base + matchIndex; |
|
706 |
+ lowLimit = (const BYTE*)source; /* required for match length counter */ |
|
707 |
+ } |
|
708 |
+ } else if (dictDirective==usingExtDict) { |
|
709 |
+ if (matchIndex < startIndex) { |
|
710 |
+ match = dictBase + matchIndex; |
|
711 |
+ lowLimit = dictionary; /* required for match length counter */ |
|
712 |
+ } else { |
|
713 |
+ match = base + matchIndex; |
|
714 |
+ lowLimit = (const BYTE*)source; /* required for match length counter */ |
|
715 |
+ } |
|
716 |
+ } else { /* single memory segment */ |
|
717 |
+ match = base + matchIndex; |
|
718 |
+ } |
|
719 |
+ LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType); |
|
720 |
+ assert(matchIndex < current); |
|
721 |
+ if ( ((dictIssue==dictSmall) ? (matchIndex >= prefixIdxLimit) : 1) |
|
722 |
+ && (((tableType==byU16) && (LZ4_DISTANCE_MAX == LZ4_DISTANCE_ABSOLUTE_MAX)) ? 1 : (matchIndex+LZ4_DISTANCE_MAX >= current)) |
|
723 |
+ && (LZ4_read32(match) == LZ4_read32(ip)) ) { |
|
724 |
+ token=op++; |
|
725 |
+ *token=0; |
|
726 |
+ if (maybe_extMem) offset = current - matchIndex; |
|
727 |
+ DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i", |
|
728 |
+ (int)(anchor-(const BYTE*)source), 0, (int)(ip-(const BYTE*)source)); |
|
729 |
+ goto _next_match; |
|
730 |
+ } |
|
731 |
+ } |
|
641 | 732 |
|
642 | 733 |
/* Prepare next loop */ |
643 | 734 |
forwardH = LZ4_hashPosition(++ip, tableType); |
735 |
+ |
|
644 | 736 |
} |
645 | 737 |
|
646 | 738 |
_last_literals: |
647 | 739 |
/* Encode Last Literals */ |
648 |
- { size_t const lastRun = (size_t)(iend - anchor); |
|
649 |
- if ( (outputLimited) && /* Check output buffer overflow */ |
|
650 |
- ((op - (BYTE*)dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize) ) |
|
651 |
- return 0; |
|
740 |
+ { size_t lastRun = (size_t)(iend - anchor); |
|
741 |
+ if ( (outputDirective) && /* Check output buffer overflow */ |
|
742 |
+ (op + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > olimit)) { |
|
743 |
+ if (outputDirective == fillOutput) { |
|
744 |
+ /* adapt lastRun to fill 'dst' */ |
|
745 |
+ assert(olimit >= op); |
|
746 |
+ lastRun = (size_t)(olimit-op) - 1; |
|
747 |
+ lastRun -= (lastRun+240)/255; |
|
748 |
+ } else { |
|
749 |
+ assert(outputDirective == limitedOutput); |
|
750 |
+ return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */ |
|
751 |
+ } |
|
752 |
+ } |
|
652 | 753 |
if (lastRun >= RUN_MASK) { |
653 | 754 |
size_t accumulator = lastRun - RUN_MASK; |
654 | 755 |
*op++ = RUN_MASK << ML_BITS; |
... | ... |
@@ -658,251 +1173,154 @@ _last_literals: |
658 | 658 |
*op++ = (BYTE)(lastRun<<ML_BITS); |
659 | 659 |
} |
660 | 660 |
memcpy(op, anchor, lastRun); |
661 |
+ ip = anchor + lastRun; |
|
661 | 662 |
op += lastRun; |
662 | 663 |
} |
663 | 664 |
|
664 |
- /* End */ |
|
665 |
- return (int) (((char*)op)-dest); |
|
665 |
+ if (outputDirective == fillOutput) { |
|
666 |
+ *inputConsumed = (int) (((const char*)ip)-source); |
|
667 |
+ } |
|
668 |
+ DEBUGLOG(5, "LZ4_compress_generic: compressed %i bytes into %i bytes", inputSize, (int)(((char*)op) - dest)); |
|
669 |
+ result = (int)(((char*)op) - dest); |
|
670 |
+ assert(result > 0); |
|
671 |
+ return result; |
|
666 | 672 |
} |
667 | 673 |
|
668 | 674 |
|
669 | 675 |
int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration) |
670 | 676 |
{ |
677 |
+ LZ4_stream_t_internal* const ctx = & LZ4_initStream(state, sizeof(LZ4_stream_t)) -> internal_donotuse; |
|
678 |
+ assert(ctx != NULL); |
|
679 |
+ if (acceleration < 1) acceleration = ACCELERATION_DEFAULT; |
|
680 |
+ if (maxOutputSize >= LZ4_compressBound(inputSize)) { |
|
681 |
+ if (inputSize < LZ4_64Klimit) { |
|
682 |
+ return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, byU16, noDict, noDictIssue, acceleration); |
|
683 |
+ } else { |
|
684 |
+ const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32; |
|
685 |
+ return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration); |
|
686 |
+ } |
|
687 |
+ } else { |
|
688 |
+ if (inputSize < LZ4_64Klimit) { |
|
689 |
+ return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration); |
|
690 |
+ } else { |
|
691 |
+ const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32; |
|
692 |
+ return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, noDict, noDictIssue, acceleration); |
|
693 |
+ } |
|
694 |
+ } |
|
695 |
+} |
|
696 |
+ |
|
697 |
+/** |
|
698 |
+ * LZ4_compress_fast_extState_fastReset() : |
|
699 |
+ * A variant of LZ4_compress_fast_extState(). |
|
700 |
+ * |
|
701 |
+ * Using this variant avoids an expensive initialization step. It is only safe |
|
702 |
+ * to call if the state buffer is known to be correctly initialized already |
|
703 |
+ * (see comment in lz4.h on LZ4_resetStream_fast() for a definition of |
|
704 |
+ * "correctly initialized"). |
|
705 |
+ */ |
|
706 |
+int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration) |
|
707 |
+{ |
|
671 | 708 |
LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse; |
672 |
- LZ4_resetStream((LZ4_stream_t*)state); |
|
673 | 709 |
if (acceleration < 1) acceleration = ACCELERATION_DEFAULT; |
674 | 710 |
|
675 |
- if (maxOutputSize >= LZ4_compressBound(inputSize)) { |
|
676 |
- if (inputSize < LZ4_64Klimit) |
|
677 |
- return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited, byU16, noDict, noDictIssue, acceleration); |
|
678 |
- else |
|
679 |
- return LZ4_compress_generic(ctx, source, dest, inputSize, 0, notLimited, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue, acceleration); |
|
711 |
+ if (dstCapacity >= LZ4_compressBound(srcSize)) { |
|
712 |
+ if (srcSize < LZ4_64Klimit) { |
|
713 |
+ const tableType_t tableType = byU16; |
|
714 |
+ LZ4_prepareTable(ctx, srcSize, tableType); |
|
715 |
+ if (ctx->currentOffset) { |
|
716 |
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, dictSmall, acceleration); |
|
717 |
+ } else { |
|
718 |
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration); |
|
719 |
+ } |
|
720 |
+ } else { |
|
721 |
+ const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32; |
|
722 |
+ LZ4_prepareTable(ctx, srcSize, tableType); |
|
723 |
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration); |
|
724 |
+ } |
|
680 | 725 |
} else { |
681 |
- if (inputSize < LZ4_64Klimit) |
|
682 |
- return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration); |
|
683 |
- else |
|
684 |
- return LZ4_compress_generic(ctx, source, dest, inputSize, maxOutputSize, limitedOutput, (sizeof(void*)==8) ? byU32 : byPtr, noDict, noDictIssue, acceleration); |
|
726 |
+ if (srcSize < LZ4_64Klimit) { |
|
727 |
+ const tableType_t tableType = byU16; |
|
728 |
+ LZ4_prepareTable(ctx, srcSize, tableType); |
|
729 |
+ if (ctx->currentOffset) { |
|
730 |
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, dictSmall, acceleration); |
|
731 |
+ } else { |
|
732 |
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration); |
|
733 |
+ } |
|
734 |
+ } else { |
|
735 |
+ const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32; |
|
736 |
+ LZ4_prepareTable(ctx, srcSize, tableType); |
|
737 |
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration); |
|
738 |
+ } |
|
685 | 739 |
} |
686 | 740 |
} |
687 | 741 |
|
688 | 742 |
|
689 | 743 |
int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration) |
690 | 744 |
{ |
691 |
-#if (HEAPMODE) |
|
692 |
- void* ctxPtr = ALLOCATOR(1, sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */ |
|
745 |
+ int result; |
|
746 |
+#if (LZ4_HEAPMODE) |
|
747 |
+ LZ4_stream_t* ctxPtr = ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */ |
|
748 |
+ if (ctxPtr == NULL) return 0; |
|
693 | 749 |
#else |
694 | 750 |
LZ4_stream_t ctx; |
695 |
- void* const ctxPtr = &ctx; |
|
751 |
+ LZ4_stream_t* const ctxPtr = &ctx; |
|
696 | 752 |
#endif |
753 |
+ result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration); |
|
697 | 754 |
|
698 |
- int const result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration); |
|
699 |
- |
|
700 |
-#if (HEAPMODE) |
|
755 |
+#if (LZ4_HEAPMODE) |
|
701 | 756 |
FREEMEM(ctxPtr); |
702 | 757 |
#endif |
703 | 758 |
return result; |
704 | 759 |
} |
705 | 760 |
|
706 | 761 |
|
707 |
-int LZ4_compress_default(const char* source, char* dest, int inputSize, int maxOutputSize) |
|
762 |
+int LZ4_compress_default(const char* src, char* dst, int srcSize, int maxOutputSize) |
|
708 | 763 |
{ |
709 |
- return LZ4_compress_fast(source, dest, inputSize, maxOutputSize, 1); |
|
764 |
+ return LZ4_compress_fast(src, dst, srcSize, maxOutputSize, 1); |
|
710 | 765 |
} |
711 | 766 |
|
712 | 767 |
|
713 | 768 |
/* hidden debug function */ |
714 | 769 |
/* strangely enough, gcc generates faster code when this function is uncommented, even if unused */ |
715 |
-int LZ4_compress_fast_force(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration) |
|
770 |
+int LZ4_compress_fast_force(const char* src, char* dst, int srcSize, int dstCapacity, int acceleration) |
|
716 | 771 |
{ |
717 | 772 |
LZ4_stream_t ctx; |
718 |
- LZ4_resetStream(&ctx); |
|
719 |
- |
|
720 |
- if (inputSize < LZ4_64Klimit) |
|
721 |
- return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration); |
|
722 |
- else |
|
723 |
- return LZ4_compress_generic(&ctx.internal_donotuse, source, dest, inputSize, maxOutputSize, limitedOutput, sizeof(void*)==8 ? byU32 : byPtr, noDict, noDictIssue, acceleration); |
|
724 |
-} |
|
725 |
- |
|
726 |
- |
|
727 |
-/*-****************************** |
|
728 |
-* *_destSize() variant |
|
729 |
-********************************/ |
|
773 |
+ LZ4_initStream(&ctx, sizeof(ctx)); |
|
730 | 774 |
|
731 |
-static int LZ4_compress_destSize_generic( |
|
732 |
- LZ4_stream_t_internal* const ctx, |
|
733 |
- const char* const src, |
|
734 |
- char* const dst, |
|
735 |
- int* const srcSizePtr, |
|
736 |
- const int targetDstSize, |
|
737 |
- const tableType_t tableType) |
|
738 |
-{ |
|
739 |
- const BYTE* ip = (const BYTE*) src; |
|
740 |
- const BYTE* base = (const BYTE*) src; |
|
741 |
- const BYTE* lowLimit = (const BYTE*) src; |
|
742 |
- const BYTE* anchor = ip; |
|
743 |
- const BYTE* const iend = ip + *srcSizePtr; |
|
744 |
- const BYTE* const mflimit = iend - MFLIMIT; |
|
745 |
- const BYTE* const matchlimit = iend - LASTLITERALS; |
|
746 |
- |
|
747 |
- BYTE* op = (BYTE*) dst; |
|
748 |
- BYTE* const oend = op + targetDstSize; |
|
749 |
- BYTE* const oMaxLit = op + targetDstSize - 2 /* offset */ - 8 /* because 8+MINMATCH==MFLIMIT */ - 1 /* token */; |
|
750 |
- BYTE* const oMaxMatch = op + targetDstSize - (LASTLITERALS + 1 /* token */); |
|
751 |
- BYTE* const oMaxSeq = oMaxLit - 1 /* token */; |
|
752 |
- |
|
753 |
- U32 forwardH; |
|
754 |
- |
|
755 |
- |
|
756 |
- /* Init conditions */ |
|
757 |
- if (targetDstSize < 1) return 0; /* Impossible to store anything */ |
|
758 |
- if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size, too large (or negative) */ |
|
759 |
- if ((tableType == byU16) && (*srcSizePtr>=LZ4_64Klimit)) return 0; /* Size too large (not within 64K limit) */ |
|
760 |
- if (*srcSizePtr<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */ |
|
761 |
- |
|
762 |
- /* First Byte */ |
|
763 |
- *srcSizePtr = 0; |
|
764 |
- LZ4_putPosition(ip, ctx->hashTable, tableType, base); |
|
765 |
- ip++; forwardH = LZ4_hashPosition(ip, tableType); |
|
766 |
- |
|
767 |
- /* Main Loop */ |
|
768 |
- for ( ; ; ) { |
|
769 |
- const BYTE* match; |
|
770 |
- BYTE* token; |
|
771 |
- |
|
772 |
- /* Find a match */ |
|
773 |
- { const BYTE* forwardIp = ip; |
|
774 |
- unsigned step = 1; |
|
775 |
- unsigned searchMatchNb = 1 << LZ4_skipTrigger; |
|
776 |
- |
|
777 |
- do { |
|
778 |
- U32 h = forwardH; |
|
779 |
- ip = forwardIp; |
|
780 |
- forwardIp += step; |
|
781 |
- step = (searchMatchNb++ >> LZ4_skipTrigger); |
|
782 |
- |
|
783 |
- if (unlikely(forwardIp > mflimit)) goto _last_literals; |
|
784 |
- |
|
785 |
- match = LZ4_getPositionOnHash(h, ctx->hashTable, tableType, base); |
|
786 |
- forwardH = LZ4_hashPosition(forwardIp, tableType); |
|
787 |
- LZ4_putPositionOnHash(ip, h, ctx->hashTable, tableType, base); |
|
788 |
- |
|
789 |
- } while ( ((tableType==byU16) ? 0 : (match + MAX_DISTANCE < ip)) |
|
790 |
- || (LZ4_read32(match) != LZ4_read32(ip)) ); |
|
791 |
- } |
|
792 |
- |
|
793 |
- /* Catch up */ |
|
794 |
- while ((ip>anchor) && (match > lowLimit) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; } |
|
795 |
- |
|
796 |
- /* Encode Literal length */ |
|
797 |
- { unsigned litLength = (unsigned)(ip - anchor); |
|
798 |
- token = op++; |
|
799 |
- if (op + ((litLength+240)/255) + litLength > oMaxLit) { |
|
800 |
- /* Not enough space for a last match */ |
|
801 |
- op--; |
|
802 |
- goto _last_literals; |
|
803 |
- } |
|
804 |
- if (litLength>=RUN_MASK) { |
|
805 |
- unsigned len = litLength - RUN_MASK; |
|
806 |
- *token=(RUN_MASK<<ML_BITS); |
|
807 |
- for(; len >= 255 ; len-=255) *op++ = 255; |
|
808 |
- *op++ = (BYTE)len; |
|
809 |
- } |
|
810 |
- else *token = (BYTE)(litLength<<ML_BITS); |
|
811 |
- |
|
812 |
- /* Copy Literals */ |
|
813 |
- LZ4_wildCopy(op, anchor, op+litLength); |
|
814 |
- op += litLength; |
|
815 |
- } |
|
816 |
- |
|
817 |
-_next_match: |
|
818 |
- /* Encode Offset */ |
|
819 |
- LZ4_writeLE16(op, (U16)(ip-match)); op+=2; |
|
820 |
- |
|
821 |
- /* Encode MatchLength */ |
|
822 |
- { size_t matchLength = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit); |
|
823 |
- |
|
824 |
- if (op + ((matchLength+240)/255) > oMaxMatch) { |
|
825 |
- /* Match description too long : reduce it */ |
|
826 |
- matchLength = (15-1) + (oMaxMatch-op) * 255; |
|
827 |
- } |
|
828 |
- ip += MINMATCH + matchLength; |
|
829 |
- |
|
830 |
- if (matchLength>=ML_MASK) { |
|
831 |
- *token += ML_MASK; |
|
832 |
- matchLength -= ML_MASK; |
|
833 |
- while (matchLength >= 255) { matchLength-=255; *op++ = 255; } |
|
834 |
- *op++ = (BYTE)matchLength; |
|
835 |
- } |
|
836 |
- else *token += (BYTE)(matchLength); |
|
837 |
- } |
|
838 |
- |
|
839 |
- anchor = ip; |
|
840 |
- |
|
841 |
- /* Test end of block */ |
|
842 |
- if (ip > mflimit) break; |
|
843 |
- if (op > oMaxSeq) break; |
|
844 |
- |
|
845 |
- /* Fill table */ |
|
846 |
- LZ4_putPosition(ip-2, ctx->hashTable, tableType, base); |
|
847 |
- |
|
848 |
- /* Test next position */ |
|
849 |
- match = LZ4_getPosition(ip, ctx->hashTable, tableType, base); |
|
850 |
- LZ4_putPosition(ip, ctx->hashTable, tableType, base); |
|
851 |
- if ( (match+MAX_DISTANCE>=ip) |
|
852 |
- && (LZ4_read32(match)==LZ4_read32(ip)) ) |
|
853 |
- { token=op++; *token=0; goto _next_match; } |
|
854 |
- |
|
855 |
- /* Prepare next loop */ |
|
856 |
- forwardH = LZ4_hashPosition(++ip, tableType); |
|
857 |
- } |
|
858 |
- |
|
859 |
-_last_literals: |
|
860 |
- /* Encode Last Literals */ |
|
861 |
- { size_t lastRunSize = (size_t)(iend - anchor); |
|
862 |
- if (op + 1 /* token */ + ((lastRunSize+240)/255) /* litLength */ + lastRunSize /* literals */ > oend) { |
|
863 |
- /* adapt lastRunSize to fill 'dst' */ |
|
864 |
- lastRunSize = (oend-op) - 1; |
|
865 |
- lastRunSize -= (lastRunSize+240)/255; |
|
866 |
- } |
|
867 |
- ip = anchor + lastRunSize; |
|
868 |
- |
|
869 |
- if (lastRunSize >= RUN_MASK) { |
|
870 |
- size_t accumulator = lastRunSize - RUN_MASK; |
|
871 |
- *op++ = RUN_MASK << ML_BITS; |
|
872 |
- for(; accumulator >= 255 ; accumulator-=255) *op++ = 255; |
|
873 |
- *op++ = (BYTE) accumulator; |
|
874 |
- } else { |
|
875 |
- *op++ = (BYTE)(lastRunSize<<ML_BITS); |
|
876 |
- } |
|
877 |
- memcpy(op, anchor, lastRunSize); |
|
878 |
- op += lastRunSize; |
|
775 |
+ if (srcSize < LZ4_64Klimit) { |
|
776 |
+ return LZ4_compress_generic(&ctx.internal_donotuse, src, dst, srcSize, NULL, dstCapacity, limitedOutput, byU16, noDict, noDictIssue, acceleration); |
|
777 |
+ } else { |
|
778 |
+ tableType_t const addrMode = (sizeof(void*) > 4) ? byU32 : byPtr; |
|
779 |
+ return LZ4_compress_generic(&ctx.internal_donotuse, src, dst, srcSize, NULL, dstCapacity, limitedOutput, addrMode, noDict, noDictIssue, acceleration); |
|
879 | 780 |
} |
880 |
- |
|
881 |
- /* End */ |
|
882 |
- *srcSizePtr = (int) (((const char*)ip)-src); |
|
883 |
- return (int) (((char*)op)-dst); |
|
884 | 781 |
} |
885 | 782 |
|
886 | 783 |
|
784 |
+/* Note!: This function leaves the stream in an unclean/broken state! |
|
785 |
+ * It is not safe to subsequently use the same state with a _fastReset() or |
|
786 |
+ * _continue() call without resetting it. */ |
|
887 | 787 |
static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize) |
888 | 788 |
{ |
889 |
- LZ4_resetStream(state); |
|
789 |
+ void* const s = LZ4_initStream(state, sizeof (*state)); |
|
790 |
+ assert(s != NULL); (void)s; |
|
890 | 791 |
|
891 | 792 |
if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */ |
892 | 793 |
return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1); |
893 | 794 |
} else { |
894 |
- if (*srcSizePtr < LZ4_64Klimit) |
|
895 |
- return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, byU16); |
|
896 |
- else |
|
897 |
- return LZ4_compress_destSize_generic(&state->internal_donotuse, src, dst, srcSizePtr, targetDstSize, sizeof(void*)==8 ? byU32 : byPtr); |
|
898 |
- } |
|
795 |
+ if (*srcSizePtr < LZ4_64Klimit) { |
|
796 |
+ return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, 1); |
|
797 |
+ } else { |
|
798 |
+ tableType_t const addrMode = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32; |
|
799 |
+ return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, addrMode, noDict, noDictIssue, 1); |
|
800 |
+ } } |
|
899 | 801 |
} |
900 | 802 |
|
901 | 803 |
|
902 | 804 |
int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize) |
903 | 805 |
{ |
904 |
-#if (HEAPMODE) |
|
905 |
- LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOCATOR(1, sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */ |
|
806 |
+#if (LZ4_HEAPMODE) |
|
807 |
+ LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */ |
|
808 |
+ if (ctx == NULL) return 0; |
|
906 | 809 |
#else |
907 | 810 |
LZ4_stream_t ctxBody; |
908 | 811 |
LZ4_stream_t* ctx = &ctxBody; |
... | ... |
@@ -910,7 +1328,7 @@ int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targe |
910 | 910 |
|
911 | 911 |
int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize); |
912 | 912 |
|
913 |
-#if (HEAPMODE) |
|
913 |
+#if (LZ4_HEAPMODE) |
|
914 | 914 |
FREEMEM(ctx); |
915 | 915 |
#endif |
916 | 916 |
return result; |
... | ... |
@@ -924,19 +1342,54 @@ int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targe |
924 | 924 |
|
925 | 925 |
LZ4_stream_t* LZ4_createStream(void) |
926 | 926 |
{ |
927 |
- LZ4_stream_t* lz4s = (LZ4_stream_t*)ALLOCATOR(8, LZ4_STREAMSIZE_U64); |
|
927 |
+ LZ4_stream_t* const lz4s = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); |
|
928 | 928 |
LZ4_STATIC_ASSERT(LZ4_STREAMSIZE >= sizeof(LZ4_stream_t_internal)); /* A compilation error here means LZ4_STREAMSIZE is not large enough */ |
929 |
- LZ4_resetStream(lz4s); |
|
929 |
+ DEBUGLOG(4, "LZ4_createStream %p", lz4s); |
|
930 |
+ if (lz4s == NULL) return NULL; |
|
931 |
+ LZ4_initStream(lz4s, sizeof(*lz4s)); |
|
930 | 932 |
return lz4s; |
931 | 933 |
} |
932 | 934 |
|
935 |
+#ifndef _MSC_VER /* for some reason, Visual fails the aligment test on 32-bit x86 : |
|
936 |
+ it reports an aligment of 8-bytes, |
|
937 |
+ while actually aligning LZ4_stream_t on 4 bytes. */ |
|
938 |
+static size_t LZ4_stream_t_alignment(void) |
|
939 |
+{ |
|
940 |
+ struct { char c; LZ4_stream_t t; } t_a; |
|
941 |
+ return sizeof(t_a) - sizeof(t_a.t); |
|
942 |
+} |
|
943 |
+#endif |
|
944 |
+ |
|
945 |
+LZ4_stream_t* LZ4_initStream (void* buffer, size_t size) |
|
946 |
+{ |
|
947 |
+ DEBUGLOG(5, "LZ4_initStream"); |
|
948 |
+ if (buffer == NULL) { return NULL; } |
|
949 |
+ if (size < sizeof(LZ4_stream_t)) { return NULL; } |
|
950 |
+#ifndef _MSC_VER /* for some reason, Visual fails the aligment test on 32-bit x86 : |
|
951 |
+ it reports an aligment of 8-bytes, |
|
952 |
+ while actually aligning LZ4_stream_t on 4 bytes. */ |
|
953 |
+ if (((size_t)buffer) & (LZ4_stream_t_alignment() - 1)) { return NULL; } /* alignment check */ |
|
954 |
+#endif |
|
955 |
+ MEM_INIT(buffer, 0, sizeof(LZ4_stream_t)); |
|
956 |
+ return (LZ4_stream_t*)buffer; |
|
957 |
+} |
|
958 |
+ |
|
959 |
+/* resetStream is now deprecated, |
|
960 |
+ * prefer initStream() which is more general */ |
|
933 | 961 |
void LZ4_resetStream (LZ4_stream_t* LZ4_stream) |
934 | 962 |
{ |
963 |
+ DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", LZ4_stream); |
|
935 | 964 |
MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t)); |
936 | 965 |
} |
937 | 966 |
|
967 |
+void LZ4_resetStream_fast(LZ4_stream_t* ctx) { |
|
968 |
+ LZ4_prepareTable(&(ctx->internal_donotuse), 0, byU32); |
|
969 |
+} |
|
970 |
+ |
|
938 | 971 |
int LZ4_freeStream (LZ4_stream_t* LZ4_stream) |
939 | 972 |
{ |
973 |
+ if (!LZ4_stream) return 0; /* support free on NULL */ |
|
974 |
+ DEBUGLOG(5, "LZ4_freeStream %p", LZ4_stream); |
|
940 | 975 |
FREEMEM(LZ4_stream); |
941 | 976 |
return (0); |
942 | 977 |
} |
... | ... |
@@ -946,43 +1399,88 @@ int LZ4_freeStream (LZ4_stream_t* LZ4_stream) |
946 | 946 |
int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize) |
947 | 947 |
{ |
948 | 948 |
LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse; |
949 |
+ const tableType_t tableType = byU32; |
|
949 | 950 |
const BYTE* p = (const BYTE*)dictionary; |
950 | 951 |
const BYTE* const dictEnd = p + dictSize; |
951 | 952 |
const BYTE* base; |
952 | 953 |
|
953 |
- if ((dict->initCheck) || (dict->currentOffset > 1 GB)) /* Uninitialized structure, or reuse overflow */ |
|
954 |
- LZ4_resetStream(LZ4_dict); |
|
954 |
+ DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, dictionary, LZ4_dict); |
|
955 |
+ |
|
956 |
+ /* It's necessary to reset the context, |
|
957 |
+ * and not just continue it with prepareTable() |
|
958 |
+ * to avoid any risk of generating overflowing matchIndex |
|
959 |
+ * when compressing using this dictionary */ |
|
960 |
+ LZ4_resetStream(LZ4_dict); |
|
961 |
+ |
|
962 |
+ /* We always increment the offset by 64 KB, since, if the dict is longer, |
|
963 |
+ * we truncate it to the last 64k, and if it's shorter, we still want to |
|
964 |
+ * advance by a whole window length so we can provide the guarantee that |
|
965 |
+ * there are only valid offsets in the window, which allows an optimization |
|
966 |
+ * in LZ4_compress_fast_continue() where it uses noDictIssue even when the |
|
967 |
+ * dictionary isn't a full 64k. */ |
|
968 |
+ dict->currentOffset += 64 KB; |
|
955 | 969 |
|
956 | 970 |
if (dictSize < (int)HASH_UNIT) { |
957 |
- dict->dictionary = NULL; |
|
958 |
- dict->dictSize = 0; |
|
959 | 971 |
return 0; |
960 | 972 |
} |
961 | 973 |
|
962 | 974 |
if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB; |
963 |
- dict->currentOffset += 64 KB; |
|
964 |
- base = p - dict->currentOffset; |
|
975 |
+ base = dictEnd - dict->currentOffset; |
|
965 | 976 |
dict->dictionary = p; |
966 | 977 |
dict->dictSize = (U32)(dictEnd - p); |
967 |
- dict->currentOffset += dict->dictSize; |
|
978 |
+ dict->tableType = tableType; |
|
968 | 979 |
|
969 | 980 |
while (p <= dictEnd-HASH_UNIT) { |
970 |
- LZ4_putPosition(p, dict->hashTable, byU32, base); |
|
981 |
+ LZ4_putPosition(p, dict->hashTable, tableType, base); |
|
971 | 982 |
p+=3; |
972 | 983 |
} |
973 | 984 |
|
974 |
- return dict->dictSize; |
|
985 |
+ return (int)dict->dictSize; |
|
975 | 986 |
} |
976 | 987 |
|
988 |
+void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream) { |
|
989 |
+ const LZ4_stream_t_internal* dictCtx = dictionaryStream == NULL ? NULL : |
|
990 |
+ &(dictionaryStream->internal_donotuse); |
|
991 |
+ |
|
992 |
+ DEBUGLOG(4, "LZ4_attach_dictionary (%p, %p, size %u)", |
|
993 |
+ workingStream, dictionaryStream, |
|
994 |
+ dictCtx != NULL ? dictCtx->dictSize : 0); |
|
995 |
+ |
|
996 |
+ /* Calling LZ4_resetStream_fast() here makes sure that changes will not be |
|
997 |
+ * erased by subsequent calls to LZ4_resetStream_fast() in case stream was |
|
998 |
+ * marked as having dirty context, e.g. requiring full reset. |
|
999 |
+ */ |
|
1000 |
+ LZ4_resetStream_fast(workingStream); |
|
1001 |
+ |
|
1002 |
+ if (dictCtx != NULL) { |
|
1003 |
+ /* If the current offset is zero, we will never look in the |
|
1004 |
+ * external dictionary context, since there is no value a table |
|
1005 |
+ * entry can take that indicate a miss. In that case, we need |
|
1006 |
+ * to bump the offset to something non-zero. |
|
1007 |
+ */ |
|
1008 |
+ if (workingStream->internal_donotuse.currentOffset == 0) { |
|
1009 |
+ workingStream->internal_donotuse.currentOffset = 64 KB; |
|
1010 |
+ } |
|
977 | 1011 |
|
978 |
-static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, const BYTE* src) |
|
1012 |
+ /* Don't actually attach an empty dictionary. |
|
1013 |
+ */ |
|
1014 |
+ if (dictCtx->dictSize == 0) { |
|
1015 |
+ dictCtx = NULL; |
|
1016 |
+ } |
|
1017 |
+ } |
|
1018 |
+ workingStream->internal_donotuse.dictCtx = dictCtx; |
|
1019 |
+} |
|
1020 |
+ |
|
1021 |
+ |
|
1022 |
+static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, int nextSize) |
|
979 | 1023 |
{ |
980 |
- if ((LZ4_dict->currentOffset > 0x80000000) || |
|
981 |
- ((uptrval)LZ4_dict->currentOffset > (uptrval)src)) { /* address space overflow */ |
|
1024 |
+ assert(nextSize >= 0); |
|
1025 |
+ if (LZ4_dict->currentOffset + (unsigned)nextSize > 0x80000000) { /* potential ptrdiff_t overflow (32-bits mode) */ |
|
982 | 1026 |
/* rescale hash table */ |
983 | 1027 |
U32 const delta = LZ4_dict->currentOffset - 64 KB; |
984 | 1028 |
const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize; |
985 | 1029 |
int i; |
1030 |
+ DEBUGLOG(4, "LZ4_renormDictT"); |
|
986 | 1031 |
for (i=0; i<LZ4_HASH_SIZE_U32; i++) { |
987 | 1032 |
if (LZ4_dict->hashTable[i] < delta) LZ4_dict->hashTable[i]=0; |
988 | 1033 |
else LZ4_dict->hashTable[i] -= delta; |
... | ... |
@@ -994,17 +1492,30 @@ static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, const BYTE* src) |
994 | 994 |
} |
995 | 995 |
|
996 | 996 |
|
997 |
-int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration) |
|
997 |
+int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, |
|
998 |
+ const char* source, char* dest, |
|
999 |
+ int inputSize, int maxOutputSize, |
|
1000 |
+ int acceleration) |
|
998 | 1001 |
{ |
1002 |
+ const tableType_t tableType = byU32; |
|
999 | 1003 |
LZ4_stream_t_internal* streamPtr = &LZ4_stream->internal_donotuse; |
1000 |
- const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize; |
|
1004 |
+ const BYTE* dictEnd = streamPtr->dictionary + streamPtr->dictSize; |
|
1001 | 1005 |
|
1002 |
- const BYTE* smallest = (const BYTE*) source; |
|
1003 |
- if (streamPtr->initCheck) return 0; /* Uninitialized structure detected */ |
|
1004 |
- if ((streamPtr->dictSize>0) && (smallest>dictEnd)) smallest = dictEnd; |
|
1005 |
- LZ4_renormDictT(streamPtr, smallest); |
|
1006 |
+ DEBUGLOG(5, "LZ4_compress_fast_continue (inputSize=%i)", inputSize); |
|
1007 |
+ |
|
1008 |
+ if (streamPtr->dirty) { return 0; } /* Uninitialized structure detected */ |
|
1009 |
+ LZ4_renormDictT(streamPtr, inputSize); /* avoid index overflow */ |
|
1006 | 1010 |
if (acceleration < 1) acceleration = ACCELERATION_DEFAULT; |
1007 | 1011 |
|
1012 |
+ /* invalidate tiny dictionaries */ |
|
1013 |
+ if ( (streamPtr->dictSize-1 < 4-1) /* intentional underflow */ |
|
1014 |
+ && (dictEnd != (const BYTE*)source) ) { |
|
1015 |
+ DEBUGLOG(5, "LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small", streamPtr->dictSize, streamPtr->dictionary); |
|
1016 |
+ streamPtr->dictSize = 0; |
|
1017 |
+ streamPtr->dictionary = (const BYTE*)source; |
|
1018 |
+ dictEnd = (const BYTE*)source; |
|
1019 |
+ } |
|
1020 |
+ |
|
1008 | 1021 |
/* Check overlapping input/dictionary space */ |
1009 | 1022 |
{ const BYTE* sourceEnd = (const BYTE*) source + inputSize; |
1010 | 1023 |
if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd)) { |
... | ... |
@@ -1017,46 +1528,61 @@ int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, const char* source, ch |
1017 | 1017 |
|
1018 | 1018 |
/* prefix mode : source data follows dictionary */ |
1019 | 1019 |
if (dictEnd == (const BYTE*)source) { |
1020 |
- int result; |
|
1021 | 1020 |
if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) |
1022 |
- result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, withPrefix64k, dictSmall, acceleration); |
|
1021 |
+ return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, dictSmall, acceleration); |
|
1023 | 1022 |
else |
1024 |
- result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, withPrefix64k, noDictIssue, acceleration); |
|
1025 |
- streamPtr->dictSize += (U32)inputSize; |
|
1026 |
- streamPtr->currentOffset += (U32)inputSize; |
|
1027 |
- return result; |
|
1023 |
+ return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, noDictIssue, acceleration); |
|
1028 | 1024 |
} |
1029 | 1025 |
|
1030 | 1026 |
/* external dictionary mode */ |
1031 | 1027 |
{ int result; |
1032 |
- if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) |
|
1033 |
- result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, usingExtDict, dictSmall, acceleration); |
|
1034 |
- else |
|
1035 |
- result = LZ4_compress_generic(streamPtr, source, dest, inputSize, maxOutputSize, limitedOutput, byU32, usingExtDict, noDictIssue, acceleration); |
|
1028 |
+ if (streamPtr->dictCtx) { |
|
1029 |
+ /* We depend here on the fact that dictCtx'es (produced by |
|
1030 |
+ * LZ4_loadDict) guarantee that their tables contain no references |
|
1031 |
+ * to offsets between dictCtx->currentOffset - 64 KB and |
|
1032 |
+ * dictCtx->currentOffset - dictCtx->dictSize. This makes it safe |
|
1033 |
+ * to use noDictIssue even when the dict isn't a full 64 KB. |
|
1034 |
+ */ |
|
1035 |
+ if (inputSize > 4 KB) { |
|
1036 |
+ /* For compressing large blobs, it is faster to pay the setup |
|
1037 |
+ * cost to copy the dictionary's tables into the active context, |
|
1038 |
+ * so that the compression loop is only looking into one table. |
|
1039 |
+ */ |
|
1040 |
+ memcpy(streamPtr, streamPtr->dictCtx, sizeof(LZ4_stream_t)); |
|
1041 |
+ result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration); |
|
1042 |
+ } else { |
|
1043 |
+ result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingDictCtx, noDictIssue, acceleration); |
|
1044 |
+ } |
|
1045 |
+ } else { |
|
1046 |
+ if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) { |
|
1047 |
+ result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, dictSmall, acceleration); |
|
1048 |
+ } else { |
|
1049 |
+ result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration); |
|
1050 |
+ } |
|
1051 |
+ } |
|
1036 | 1052 |
streamPtr->dictionary = (const BYTE*)source; |
1037 | 1053 |
streamPtr->dictSize = (U32)inputSize; |
1038 |
- streamPtr->currentOffset += (U32)inputSize; |
|
1039 | 1054 |
return result; |
1040 | 1055 |
} |
1041 | 1056 |
} |
1042 | 1057 |
|
1043 | 1058 |
|
1044 |
-/* Hidden debug function, to force external dictionary mode */ |
|
1045 |
-int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int inputSize) |
|
1059 |
+/* Hidden debug function, to force-test external dictionary mode */ |
|
1060 |
+int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize) |
|
1046 | 1061 |
{ |
1047 | 1062 |
LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse; |
1048 | 1063 |
int result; |
1049 |
- const BYTE* const dictEnd = streamPtr->dictionary + streamPtr->dictSize; |
|
1050 | 1064 |
|
1051 |
- const BYTE* smallest = dictEnd; |
|
1052 |
- if (smallest > (const BYTE*) source) smallest = (const BYTE*) source; |
|
1053 |
- LZ4_renormDictT(streamPtr, smallest); |
|
1065 |
+ LZ4_renormDictT(streamPtr, srcSize); |
|
1054 | 1066 |
|
1055 |
- result = LZ4_compress_generic(streamPtr, source, dest, inputSize, 0, notLimited, byU32, usingExtDict, noDictIssue, 1); |
|
1067 |
+ if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) { |
|
1068 |
+ result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, dictSmall, 1); |
|
1069 |
+ } else { |
|
1070 |
+ result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, noDictIssue, 1); |
|
1071 |
+ } |
|
1056 | 1072 |
|
1057 | 1073 |
streamPtr->dictionary = (const BYTE*)source; |
1058 |
- streamPtr->dictSize = (U32)inputSize; |
|
1059 |
- streamPtr->currentOffset += (U32)inputSize; |
|
1074 |
+ streamPtr->dictSize = (U32)srcSize; |
|
1060 | 1075 |
|
1061 | 1076 |
return result; |
1062 | 1077 |
} |
... | ... |
@@ -1074,8 +1600,8 @@ int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize) |
1074 | 1074 |
LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse; |
1075 | 1075 |
const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize; |
1076 | 1076 |
|
1077 |
- if ((U32)dictSize > 64 KB) dictSize = 64 KB; /* useless to define a dictionary > 64 KB */ |
|
1078 |
- if ((U32)dictSize > dict->dictSize) dictSize = dict->dictSize; |
|
1077 |
+ if ((U32)dictSize > 64 KB) { dictSize = 64 KB; } /* useless to define a dictionary > 64 KB */ |
|
1078 |
+ if ((U32)dictSize > dict->dictSize) { dictSize = (int)dict->dictSize; } |
|
1079 | 1079 |
|
1080 | 1080 |
memmove(safeBuffer, previousDictEnd - dictSize, dictSize); |
1081 | 1081 |
|
... | ... |
@@ -1087,218 +1613,587 @@ int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize) |
1087 | 1087 |
|
1088 | 1088 |
|
1089 | 1089 |
|
1090 |
-/*-***************************** |
|
1091 |
-* Decompression functions |
|
1092 |
-*******************************/ |
|
1090 |
+/*-******************************* |
|
1091 |
+ * Decompression functions |
|
1092 |
+ ********************************/ |
|
1093 |
+ |
|
1094 |
+typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive; |
|
1095 |
+typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive; |
|
1096 |
+ |
|
1097 |
+#undef MIN |
|
1098 |
+#define MIN(a,b) ( (a) < (b) ? (a) : (b) ) |
|
1099 |
+ |
|
1100 |
+/* Read the variable-length literal or match length. |
|
1101 |
+ * |
|
1102 |
+ * ip - pointer to use as input. |
|
1103 |
+ * lencheck - end ip. Return an error if ip advances >= lencheck. |
|
1104 |
+ * loop_check - check ip >= lencheck in body of loop. Returns loop_error if so. |
|
1105 |
+ * initial_check - check ip >= lencheck before start of loop. Returns initial_error if so. |
|
1106 |
+ * error (output) - error code. Should be set to 0 before call. |
|
1107 |
+ */ |
|
1108 |
+typedef enum { loop_error = -2, initial_error = -1, ok = 0 } variable_length_error; |
|
1109 |
+LZ4_FORCE_INLINE unsigned |
|
1110 |
+read_variable_length(const BYTE**ip, const BYTE* lencheck, int loop_check, int initial_check, variable_length_error* error) |
|
1111 |
+{ |
|
1112 |
+ unsigned length = 0; |
|
1113 |
+ unsigned s; |
|
1114 |
+ if (initial_check && unlikely((*ip) >= lencheck)) { /* overflow detection */ |
|
1115 |
+ *error = initial_error; |
|
1116 |
+ return length; |
|
1117 |
+ } |
|
1118 |
+ do { |
|
1119 |
+ s = **ip; |
|
1120 |
+ (*ip)++; |
|
1121 |
+ length += s; |
|
1122 |
+ if (loop_check && unlikely((*ip) >= lencheck)) { /* overflow detection */ |
|
1123 |
+ *error = loop_error; |
|
1124 |
+ return length; |
|
1125 |
+ } |
|
1126 |
+ } while (s==255); |
|
1127 |
+ |
|
1128 |
+ return length; |
|
1129 |
+} |
|
1130 |
+ |
|
1093 | 1131 |
/*! LZ4_decompress_generic() : |
1094 |
- * This generic decompression function cover all use cases. |
|
1095 |
- * It shall be instantiated several times, using different sets of directives |
|
1096 |
- * Note that it is important this generic function is really inlined, |
|
1132 |
+ * This generic decompression function covers all use cases. |
|
1133 |
+ * It shall be instantiated several times, using different sets of directives. |
|
1134 |
+ * Note that it is important for performance that this function really get inlined, |
|
1097 | 1135 |
* in order to remove useless branches during compilation optimization. |
1098 | 1136 |
*/ |
1099 |
-FORCE_INLINE int LZ4_decompress_generic( |
|
1100 |
- const char* const source, |
|
1101 |
- char* const dest, |
|
1102 |
- int inputSize, |
|
1103 |
- int outputSize, /* If endOnInput==endOnInputSize, this value is the max size of Output Buffer. */ |
|
1104 |
- |
|
1105 |
- int endOnInput, /* endOnOutputSize, endOnInputSize */ |
|
1106 |
- int partialDecoding, /* full, partial */ |
|
1107 |
- int targetOutputSize, /* only used if partialDecoding==partial */ |
|
1108 |
- int dict, /* noDict, withPrefix64k, usingExtDict */ |
|
1109 |
- const BYTE* const lowPrefix, /* == dest when no prefix */ |
|
1137 |
+LZ4_FORCE_INLINE int |
|
1138 |
+LZ4_decompress_generic( |
|
1139 |
+ const char* const src, |
|
1140 |
+ char* const dst, |
|
1141 |
+ int srcSize, |
|
1142 |
+ int outputSize, /* If endOnInput==endOnInputSize, this value is `dstCapacity` */ |
|
1143 |
+ |
|
1144 |
+ endCondition_directive endOnInput, /* endOnOutputSize, endOnInputSize */ |
|
1145 |
+ earlyEnd_directive partialDecoding, /* full, partial */ |
|
1146 |
+ dict_directive dict, /* noDict, withPrefix64k, usingExtDict */ |
|
1147 |
+ const BYTE* const lowPrefix, /* always <= dst, == dst when no prefix */ |
|
1110 | 1148 |
const BYTE* const dictStart, /* only if dict==usingExtDict */ |
1111 | 1149 |
const size_t dictSize /* note : = 0 if noDict */ |
1112 | 1150 |
) |
1113 | 1151 |
{ |
1114 |
- /* Local Variables */ |
|
1115 |
- const BYTE* ip = (const BYTE*) source; |
|
1116 |
- const BYTE* const iend = ip + inputSize; |
|
1152 |
+ if (src == NULL) { return -1; } |
|
1117 | 1153 |
|
1118 |
- BYTE* op = (BYTE*) dest; |
|
1119 |
- BYTE* const oend = op + outputSize; |
|
1120 |
- BYTE* cpy; |
|
1121 |
- BYTE* oexit = op + targetOutputSize; |
|
1122 |
- const BYTE* const lowLimit = lowPrefix - dictSize; |
|
1154 |
+ { const BYTE* ip = (const BYTE*) src; |
|
1155 |
+ const BYTE* const iend = ip + srcSize; |
|
1123 | 1156 |
|
1124 |
- const BYTE* const dictEnd = (const BYTE*)dictStart + dictSize; |
|
1125 |
- const unsigned dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; |
|
1126 |
- const int dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3}; |
|
1157 |
+ BYTE* op = (BYTE*) dst; |
|
1158 |
+ BYTE* const oend = op + outputSize; |
|
1159 |
+ BYTE* cpy; |
|
1127 | 1160 |
|
1128 |
- const int safeDecode = (endOnInput==endOnInputSize); |
|
1129 |
- const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB))); |
|
1161 |
+ const BYTE* const dictEnd = (dictStart == NULL) ? NULL : dictStart + dictSize; |
|
1130 | 1162 |
|
1163 |
+ const int safeDecode = (endOnInput==endOnInputSize); |
|
1164 |
+ const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB))); |
|
1131 | 1165 |
|
1132 |
- /* Special cases */ |
|
1133 |
- if ((partialDecoding) && (oexit > oend-MFLIMIT)) oexit = oend-MFLIMIT; /* targetOutputSize too high => decode everything */ |
|
1134 |
- if ((endOnInput) && (unlikely(outputSize==0))) return ((inputSize==1) && (*ip==0)) ? 0 : -1; /* Empty output buffer */ |
|
1135 |
- if ((!endOnInput) && (unlikely(outputSize==0))) return (*ip==0?1:-1); |
|
1136 | 1166 |
|
1137 |
- /* Main Loop : decode sequences */ |
|
1138 |
- while (1) { |
|
1139 |
- size_t length; |
|
1167 |
+ /* Set up the "end" pointers for the shortcut. */ |
|
1168 |
+ const BYTE* const shortiend = iend - (endOnInput ? 14 : 8) /*maxLL*/ - 2 /*offset*/; |
|
1169 |
+ const BYTE* const shortoend = oend - (endOnInput ? 14 : 8) /*maxLL*/ - 18 /*maxML*/; |
|
1170 |
+ |
|
1140 | 1171 |
const BYTE* match; |
1141 | 1172 |
size_t offset; |
1173 |
+ unsigned token; |
|
1174 |
+ size_t length; |
|
1142 | 1175 |
|
1143 |
- /* get literal length */ |
|
1144 |
- unsigned const token = *ip++; |
|
1145 |
- if ((length=(token>>ML_BITS)) == RUN_MASK) { |
|
1146 |
- unsigned s; |
|
1147 |
- do { |
|
1148 |
- s = *ip++; |
|
1149 |
- length += s; |
|
1150 |
- } while ( likely(endOnInput ? ip<iend-RUN_MASK : 1) & (s==255) ); |
|
1151 |
- if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) goto _output_error; /* overflow detection */ |
|
1152 |
- if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) goto _output_error; /* overflow detection */ |
|
1176 |
+ |
|
1177 |
+ DEBUGLOG(5, "LZ4_decompress_generic (srcSize:%i, dstSize:%i)", srcSize, outputSize); |
|
1178 |
+ |
|
1179 |
+ /* Special cases */ |
|
1180 |
+ assert(lowPrefix <= op); |
|
1181 |
+ if ((endOnInput) && (unlikely(outputSize==0))) { |
|
1182 |
+ /* Empty output buffer */ |
|
1183 |
+ if (partialDecoding) return 0; |
|
1184 |
+ return ((srcSize==1) && (*ip==0)) ? 0 : -1; |
|
1185 |
+ } |
|
1186 |
+ if ((!endOnInput) && (unlikely(outputSize==0))) { return (*ip==0 ? 1 : -1); } |
|
1187 |
+ if ((endOnInput) && unlikely(srcSize==0)) { return -1; } |
|
1188 |
+ |
|
1189 |
+ /* Currently the fast loop shows a regression on qualcomm arm chips. */ |
|
1190 |
+#if LZ4_FAST_DEC_LOOP |
|
1191 |
+ if ((oend - op) < FASTLOOP_SAFE_DISTANCE) { |
|
1192 |
+ DEBUGLOG(6, "skip fast decode loop"); |
|
1193 |
+ goto safe_decode; |
|
1153 | 1194 |
} |
1154 | 1195 |
|
1155 |
- /* copy literals */ |
|
1156 |
- cpy = op+length; |
|
1157 |
- if ( ((endOnInput) && ((cpy>(partialDecoding?oexit:oend-MFLIMIT)) || (ip+length>iend-(2+1+LASTLITERALS))) ) |
|
1158 |
- || ((!endOnInput) && (cpy>oend-WILDCOPYLENGTH)) ) |
|
1159 |
- { |
|
1160 |
- if (partialDecoding) { |
|
1161 |
- if (cpy > oend) goto _output_error; /* Error : write attempt beyond end of output buffer */ |
|
1162 |
- if ((endOnInput) && (ip+length > iend)) goto _output_error; /* Error : read attempt beyond end of input buffer */ |
|
1196 |
+ /* Fast loop : decode sequences as long as output < iend-FASTLOOP_SAFE_DISTANCE */ |
|
1197 |
+ while (1) { |
|
1198 |
+ /* Main fastloop assertion: We can always wildcopy FASTLOOP_SAFE_DISTANCE */ |
|
1199 |
+ assert(oend - op >= FASTLOOP_SAFE_DISTANCE); |
|
1200 |
+ if (endOnInput) { assert(ip < iend); } |
|
1201 |
+ token = *ip++; |
|
1202 |
+ length = token >> ML_BITS; /* literal length */ |
|
1203 |
+ |
|
1204 |
+ assert(!endOnInput || ip <= iend); /* ip < iend before the increment */ |
|
1205 |
+ |
|
1206 |
+ /* decode literal length */ |
|
1207 |
+ if (length == RUN_MASK) { |
|
1208 |
+ variable_length_error error = ok; |
|
1209 |
+ length += read_variable_length(&ip, iend-RUN_MASK, endOnInput, endOnInput, &error); |
|
1210 |
+ if (error == initial_error) { goto _output_error; } |
|
1211 |
+ if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */ |
|
1212 |
+ if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */ |
|
1213 |
+ |
|
1214 |
+ /* copy literals */ |
|
1215 |
+ cpy = op+length; |
|
1216 |
+ LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH); |
|
1217 |
+ if (endOnInput) { /* LZ4_decompress_safe() */ |
|
1218 |
+ if ((cpy>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; } |
|
1219 |
+ LZ4_wildCopy32(op, ip, cpy); |
|
1220 |
+ } else { /* LZ4_decompress_fast() */ |
|
1221 |
+ if (cpy>oend-8) { goto safe_literal_copy; } |
|
1222 |
+ LZ4_wildCopy8(op, ip, cpy); /* LZ4_decompress_fast() cannot copy more than 8 bytes at a time : |
|
1223 |
+ * it doesn't know input length, and only relies on end-of-block properties */ |
|
1224 |
+ } |
|
1225 |
+ ip += length; op = cpy; |
|
1163 | 1226 |
} else { |
1164 |
- if ((!endOnInput) && (cpy != oend)) goto _output_error; /* Error : block decoding must stop exactly there */ |
|
1165 |
- if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) goto _output_error; /* Error : input must be consumed */ |
|
1227 |
+ cpy = op+length; |
|
1228 |
+ if (endOnInput) { /* LZ4_decompress_safe() */ |
|
1229 |
+ DEBUGLOG(7, "copy %u bytes in a 16-bytes stripe", (unsigned)length); |
|
1230 |
+ /* We don't need to check oend, since we check it once for each loop below */ |
|
1231 |
+ if (ip > iend-(16 + 1/*max lit + offset + nextToken*/)) { goto safe_literal_copy; } |
|
1232 |
+ /* Literals can only be 14, but hope compilers optimize if we copy by a register size */ |
|
1233 |
+ memcpy(op, ip, 16); |
|
1234 |
+ } else { /* LZ4_decompress_fast() */ |
|
1235 |
+ /* LZ4_decompress_fast() cannot copy more than 8 bytes at a time : |
|
1236 |
+ * it doesn't know input length, and relies on end-of-block properties */ |
|
1237 |
+ memcpy(op, ip, 8); |
|
1238 |
+ if (length > 8) { memcpy(op+8, ip+8, 8); } |
|
1239 |
+ } |
|
1240 |
+ ip += length; op = cpy; |
|
1166 | 1241 |
} |
1167 |
- memcpy(op, ip, length); |
|
1168 |
- ip += length; |
|
1169 |
- op += length; |
|
1170 |
- break; /* Necessarily EOF, due to parsing restrictions */ |
|
1171 |
- } |
|
1172 |
- LZ4_wildCopy(op, ip, cpy); |
|
1173 |
- ip += length; op = cpy; |
|
1174 |
- |
|
1175 |
- /* get offset */ |
|
1176 |
- offset = LZ4_readLE16(ip); ip+=2; |
|
1177 |
- match = op - offset; |
|
1178 |
- if ((checkOffset) && (unlikely(match < lowLimit))) goto _output_error; /* Error : offset outside buffers */ |
|
1179 |
- LZ4_write32(op, (U32)offset); /* costs ~1%; silence an msan warning when offset==0 */ |
|
1180 |
- |
|
1181 |
- /* get matchlength */ |
|
1182 |
- length = token & ML_MASK; |
|
1183 |
- if (length == ML_MASK) { |
|
1184 |
- unsigned s; |
|
1185 |
- do { |
|
1186 |
- s = *ip++; |
|
1187 |
- if ((endOnInput) && (ip > iend-LASTLITERALS)) goto _output_error; |
|
1188 |
- length += s; |
|
1189 |
- } while (s==255); |
|
1190 |
- if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error; /* overflow detection */ |
|
1242 |
+ |
|
1243 |
+ /* get offset */ |
|
1244 |
+ offset = LZ4_readLE16(ip); ip+=2; |
|
1245 |
+ match = op - offset; |
|
1246 |
+ assert(match <= op); |
|
1247 |
+ |
|
1248 |
+ /* get matchlength */ |
|
1249 |
+ length = token & ML_MASK; |
|
1250 |
+ |
|
1251 |
+ if (length == ML_MASK) { |
|
1252 |
+ variable_length_error error = ok; |
|
1253 |
+ if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */ |
|
1254 |
+ length += read_variable_length(&ip, iend - LASTLITERALS + 1, endOnInput, 0, &error); |
|
1255 |
+ if (error != ok) { goto _output_error; } |
|
1256 |
+ if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */ |
|
1257 |
+ length += MINMATCH; |
|
1258 |
+ if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) { |
|
1259 |
+ goto safe_match_copy; |
|
1260 |
+ } |
|
1261 |
+ } else { |
|
1262 |
+ length += MINMATCH; |
|
1263 |
+ if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) { |
|
1264 |
+ goto safe_match_copy; |
|
1265 |
+ } |
|
1266 |
+ |
|
1267 |
+ /* Fastpath check: Avoids a branch in LZ4_wildCopy32 if true */ |
|
1268 |
+ if ((dict == withPrefix64k) || (match >= lowPrefix)) { |
|
1269 |
+ if (offset >= 8) { |
|
1270 |
+ assert(match >= lowPrefix); |
|
1271 |
+ assert(match <= op); |
|
1272 |
+ assert(op + 18 <= oend); |
|
1273 |
+ |
|
1274 |
+ memcpy(op, match, 8); |
|
1275 |
+ memcpy(op+8, match+8, 8); |
|
1276 |
+ memcpy(op+16, match+16, 2); |
|
1277 |
+ op += length; |
|
1278 |
+ continue; |
|
1279 |
+ } } } |
|
1280 |
+ |
|
1281 |
+ if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */ |
|
1282 |
+ /* match starting within external dictionary */ |
|
1283 |
+ if ((dict==usingExtDict) && (match < lowPrefix)) { |
|
1284 |
+ if (unlikely(op+length > oend-LASTLITERALS)) { |
|
1285 |
+ if (partialDecoding) { |
|
1286 |
+ length = MIN(length, (size_t)(oend-op)); /* reach end of buffer */ |
|
1287 |
+ } else { |
|
1288 |
+ goto _output_error; /* end-of-block condition violated */ |
|
1289 |
+ } } |
|
1290 |
+ |
|
1291 |
+ if (length <= (size_t)(lowPrefix-match)) { |
|
1292 |
+ /* match fits entirely within external dictionary : just copy */ |
|
1293 |
+ memmove(op, dictEnd - (lowPrefix-match), length); |
|
1294 |
+ op += length; |
|
1295 |
+ } else { |
|
1296 |
+ /* match stretches into both external dictionary and current block */ |
|
1297 |
+ size_t const copySize = (size_t)(lowPrefix - match); |
|
1298 |
+ size_t const restSize = length - copySize; |
|
1299 |
+ memcpy(op, dictEnd - copySize, copySize); |
|
1300 |
+ op += copySize; |
|
1301 |
+ if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */ |
|
1302 |
+ BYTE* const endOfMatch = op + restSize; |
|
1303 |
+ const BYTE* copyFrom = lowPrefix; |
|
1304 |
+ while (op < endOfMatch) { *op++ = *copyFrom++; } |
|
1305 |
+ } else { |
|
1306 |
+ memcpy(op, lowPrefix, restSize); |
|
1307 |
+ op += restSize; |
|
1308 |
+ } } |
|
1309 |
+ continue; |
|
1310 |
+ } |
|
1311 |
+ |
|
1312 |
+ /* copy match within block */ |
|
1313 |
+ cpy = op + length; |
|
1314 |
+ |
|
1315 |
+ assert((op <= oend) && (oend-op >= 32)); |
|
1316 |
+ if (unlikely(offset<16)) { |
|
1317 |
+ LZ4_memcpy_using_offset(op, match, cpy, offset); |
|
1318 |
+ } else { |
|
1319 |
+ LZ4_wildCopy32(op, match, cpy); |
|
1320 |
+ } |
|
1321 |
+ |
|
1322 |
+ op = cpy; /* wildcopy correction */ |
|
1191 | 1323 |
} |
1192 |
- length += MINMATCH; |
|
1324 |
+ safe_decode: |
|
1325 |
+#endif |
|
1326 |
+ |
|
1327 |
+ /* Main Loop : decode remaining sequences where output < FASTLOOP_SAFE_DISTANCE */ |
|
1328 |
+ while (1) { |
|
1329 |
+ token = *ip++; |
|
1330 |
+ length = token >> ML_BITS; /* literal length */ |
|
1331 |
+ |
|
1332 |
+ assert(!endOnInput || ip <= iend); /* ip < iend before the increment */ |
|
1333 |
+ |
|
1334 |
+ /* A two-stage shortcut for the most common case: |
|
1335 |
+ * 1) If the literal length is 0..14, and there is enough space, |
|
1336 |
+ * enter the shortcut and copy 16 bytes on behalf of the literals |
|
1337 |
+ * (in the fast mode, only 8 bytes can be safely copied this way). |
|
1338 |
+ * 2) Further if the match length is 4..18, copy 18 bytes in a similar |
|
1339 |
+ * manner; but we ensure that there's enough space in the output for |
|
1340 |
+ * those 18 bytes earlier, upon entering the shortcut (in other words, |
|
1341 |
+ * there is a combined check for both stages). |
|
1342 |
+ */ |
|
1343 |
+ if ( (endOnInput ? length != RUN_MASK : length <= 8) |
|
1344 |
+ /* strictly "less than" on input, to re-enter the loop with at least one byte */ |
|
1345 |
+ && likely((endOnInput ? ip < shortiend : 1) & (op <= shortoend)) ) { |
|
1346 |
+ /* Copy the literals */ |
|
1347 |
+ memcpy(op, ip, endOnInput ? 16 : 8); |
|
1348 |
+ op += length; ip += length; |
|
1349 |
+ |
|
1350 |
+ /* The second stage: prepare for match copying, decode full info. |
|
1351 |
+ * If it doesn't work out, the info won't be wasted. */ |
|
1352 |
+ length = token & ML_MASK; /* match length */ |
|
1353 |
+ offset = LZ4_readLE16(ip); ip += 2; |
|
1354 |
+ match = op - offset; |
|
1355 |
+ assert(match <= op); /* check overflow */ |
|
1356 |
+ |
|
1357 |
+ /* Do not deal with overlapping matches. */ |
|
1358 |
+ if ( (length != ML_MASK) |
|
1359 |
+ && (offset >= 8) |
|
1360 |
+ && (dict==withPrefix64k || match >= lowPrefix) ) { |
|
1361 |
+ /* Copy the match. */ |
|
1362 |
+ memcpy(op + 0, match + 0, 8); |
|
1363 |
+ memcpy(op + 8, match + 8, 8); |
|
1364 |
+ memcpy(op +16, match +16, 2); |
|
1365 |
+ op += length + MINMATCH; |
|
1366 |
+ /* Both stages worked, load the next token. */ |
|
1367 |
+ continue; |
|
1368 |
+ } |
|
1369 |
+ |
|
1370 |
+ /* The second stage didn't work out, but the info is ready. |
|
1371 |
+ * Propel it right to the point of match copying. */ |
|
1372 |
+ goto _copy_match; |
|
1373 |
+ } |
|
1193 | 1374 |
|
1194 |
- /* check external dictionary */ |
|
1195 |
- if ((dict==usingExtDict) && (match < lowPrefix)) { |
|
1196 |
- if (unlikely(op+length > oend-LASTLITERALS)) goto _output_error; /* doesn't respect parsing restriction */ |
|
1375 |
+ /* decode literal length */ |
|
1376 |
+ if (length == RUN_MASK) { |
|
1377 |
+ variable_length_error error = ok; |
|
1378 |
+ length += read_variable_length(&ip, iend-RUN_MASK, endOnInput, endOnInput, &error); |
|
1379 |
+ if (error == initial_error) { goto _output_error; } |
|
1380 |
+ if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */ |
|
1381 |
+ if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */ |
|
1382 |
+ } |
|
1197 | 1383 |
|
1198 |
- if (length <= (size_t)(lowPrefix-match)) { |
|
1199 |
- /* match can be copied as a single segment from external dictionary */ |
|
1200 |
- memmove(op, dictEnd - (lowPrefix-match), length); |
|
1384 |
+ /* copy literals */ |
|
1385 |
+ cpy = op+length; |
|
1386 |
+#if LZ4_FAST_DEC_LOOP |
|
1387 |
+ safe_literal_copy: |
|
1388 |
+#endif |
|
1389 |
+ LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH); |
|
1390 |
+ if ( ((endOnInput) && ((cpy>oend-MFLIMIT) || (ip+length>iend-(2+1+LASTLITERALS))) ) |
|
1391 |
+ || ((!endOnInput) && (cpy>oend-WILDCOPYLENGTH)) ) |
|
1392 |
+ { |
|
1393 |
+ /* We've either hit the input parsing restriction or the output parsing restriction. |
|
1394 |
+ * If we've hit the input parsing condition then this must be the last sequence. |
|
1395 |
+ * If we've hit the output parsing condition then we are either using partialDecoding |
|
1396 |
+ * or we've hit the output parsing condition. |
|
1397 |
+ */ |
|
1398 |
+ if (partialDecoding) { |
|
1399 |
+ /* Since we are partial decoding we may be in this block because of the output parsing |
|
1400 |
+ * restriction, which is not valid since the output buffer is allowed to be undersized. |
|
1401 |
+ */ |
|
1402 |
+ assert(endOnInput); |
|
1403 |
+ /* If we're in this block because of the input parsing condition, then we must be on the |
|
1404 |
+ * last sequence (or invalid), so we must check that we exactly consume the input. |
|
1405 |
+ */ |
|
1406 |
+ if ((ip+length>iend-(2+1+LASTLITERALS)) && (ip+length != iend)) { goto _output_error; } |
|
1407 |
+ assert(ip+length <= iend); |
|
1408 |
+ /* We are finishing in the middle of a literals segment. |
|
1409 |
+ * Break after the copy. |
|
1410 |
+ */ |
|
1411 |
+ if (cpy > oend) { |
|
1412 |
+ cpy = oend; |
|
1413 |
+ assert(op<=oend); |
|
1414 |
+ length = (size_t)(oend-op); |
|
1415 |
+ } |
|
1416 |
+ assert(ip+length <= iend); |
|
1417 |
+ } else { |
|
1418 |
+ /* We must be on the last sequence because of the parsing limitations so check |
|
1419 |
+ * that we exactly regenerate the original size (must be exact when !endOnInput). |
|
1420 |
+ */ |
|
1421 |
+ if ((!endOnInput) && (cpy != oend)) { goto _output_error; } |
|
1422 |
+ /* We must be on the last sequence (or invalid) because of the parsing limitations |
|
1423 |
+ * so check that we exactly consume the input and don't overrun the output buffer. |
|
1424 |
+ */ |
|
1425 |
+ if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) { goto _output_error; } |
|
1426 |
+ } |
|
1427 |
+ memmove(op, ip, length); /* supports overlapping memory regions, which only matters for in-place decompression scenarios */ |
|
1428 |
+ ip += length; |
|
1201 | 1429 |
op += length; |
1430 |
+ /* Necessarily EOF when !partialDecoding. When partialDecoding |
|
1431 |
+ * it is EOF if we've either filled the output buffer or hit |
|
1432 |
+ * the input parsing restriction. |
|
1433 |
+ */ |
|
1434 |
+ if (!partialDecoding || (cpy == oend) || (ip == iend)) { |
|
1435 |
+ break; |
|
1436 |
+ } |
|
1202 | 1437 |
} else { |
1203 |
- /* match encompass external dictionary and current block */ |
|
1204 |
- size_t const copySize = (size_t)(lowPrefix-match); |
|
1205 |
- size_t const restSize = length - copySize; |
|
1206 |
- memcpy(op, dictEnd - copySize, copySize); |
|
1207 |
- op += copySize; |
|
1208 |
- if (restSize > (size_t)(op-lowPrefix)) { /* overlap copy */ |
|
1209 |
- BYTE* const endOfMatch = op + restSize; |
|
1210 |
- const BYTE* copyFrom = lowPrefix; |
|
1211 |
- while (op < endOfMatch) *op++ = *copyFrom++; |
|
1438 |
+ LZ4_wildCopy8(op, ip, cpy); /* may overwrite up to WILDCOPYLENGTH beyond cpy */ |
|
1439 |
+ ip += length; op = cpy; |
|
1440 |
+ } |
|
1441 |
+ |
|
1442 |
+ /* get offset */ |
|
1443 |
+ offset = LZ4_readLE16(ip); ip+=2; |
|
1444 |
+ match = op - offset; |
|
1445 |
+ |
|
1446 |
+ /* get matchlength */ |
|
1447 |
+ length = token & ML_MASK; |
|
1448 |
+ |
|
1449 |
+ _copy_match: |
|
1450 |
+ if (length == ML_MASK) { |
|
1451 |
+ variable_length_error error = ok; |
|
1452 |
+ length += read_variable_length(&ip, iend - LASTLITERALS + 1, endOnInput, 0, &error); |
|
1453 |
+ if (error != ok) goto _output_error; |
|
1454 |
+ if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error; /* overflow detection */ |
|
1455 |
+ } |
|
1456 |
+ length += MINMATCH; |
|
1457 |
+ |
|
1458 |
+#if LZ4_FAST_DEC_LOOP |
|
1459 |
+ safe_match_copy: |
|
1460 |
+#endif |
|
1461 |
+ if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) goto _output_error; /* Error : offset outside buffers */ |
|
1462 |
+ /* match starting within external dictionary */ |
|
1463 |
+ if ((dict==usingExtDict) && (match < lowPrefix)) { |
|
1464 |
+ if (unlikely(op+length > oend-LASTLITERALS)) { |
|
1465 |
+ if (partialDecoding) length = MIN(length, (size_t)(oend-op)); |
|
1466 |
+ else goto _output_error; /* doesn't respect parsing restriction */ |
|
1467 |
+ } |
|
1468 |
+ |
|
1469 |
+ if (length <= (size_t)(lowPrefix-match)) { |
|
1470 |
+ /* match fits entirely within external dictionary : just copy */ |
|
1471 |
+ memmove(op, dictEnd - (lowPrefix-match), length); |
|
1472 |
+ op += length; |
|
1212 | 1473 |
} else { |
1213 |
- memcpy(op, lowPrefix, restSize); |
|
1214 |
- op += restSize; |
|
1215 |
- } } |
|
1216 |
- continue; |
|
1217 |
- } |
|
1474 |
+ /* match stretches into both external dictionary and current block */ |
|
1475 |
+ size_t const copySize = (size_t)(lowPrefix - match); |
|
1476 |
+ size_t const restSize = length - copySize; |
|
1477 |
+ memcpy(op, dictEnd - copySize, copySize); |
|
1478 |
+ op += copySize; |
|
1479 |
+ if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */ |
|
1480 |
+ BYTE* const endOfMatch = op + restSize; |
|
1481 |
+ const BYTE* copyFrom = lowPrefix; |
|
1482 |
+ while (op < endOfMatch) *op++ = *copyFrom++; |
|
1483 |
+ } else { |
|
1484 |
+ memcpy(op, lowPrefix, restSize); |
|
1485 |
+ op += restSize; |
|
1486 |
+ } } |
|
1487 |
+ continue; |
|
1488 |
+ } |
|
1489 |
+ assert(match >= lowPrefix); |
|
1490 |
+ |
|
1491 |
+ /* copy match within block */ |
|
1492 |
+ cpy = op + length; |
|
1493 |
+ |
|
1494 |
+ /* partialDecoding : may end anywhere within the block */ |
|
1495 |
+ assert(op<=oend); |
|
1496 |
+ if (partialDecoding && (cpy > oend-MATCH_SAFEGUARD_DISTANCE)) { |
|
1497 |
+ size_t const mlen = MIN(length, (size_t)(oend-op)); |
|
1498 |
+ const BYTE* const matchEnd = match + mlen; |
|
1499 |
+ BYTE* const copyEnd = op + mlen; |
|
1500 |
+ if (matchEnd > op) { /* overlap copy */ |
|
1501 |
+ while (op < copyEnd) { *op++ = *match++; } |
|
1502 |
+ } else { |
|
1503 |
+ memcpy(op, match, mlen); |
|
1504 |
+ } |
|
1505 |
+ op = copyEnd; |
|
1506 |
+ if (op == oend) { break; } |
|
1507 |
+ continue; |
|
1508 |
+ } |
|
1218 | 1509 |
|
1219 |
- /* copy match within block */ |
|
1220 |
- cpy = op + length; |
|
1221 |
- if (unlikely(offset<8)) { |
|
1222 |
- const int dec64 = dec64table[offset]; |
|
1223 |
- op[0] = match[0]; |
|
1224 |
- op[1] = match[1]; |
|
1225 |
- op[2] = match[2]; |
|
1226 |
- op[3] = match[3]; |
|
1227 |
- match += dec32table[offset]; |
|
1228 |
- memcpy(op+4, match, 4); |
|
1229 |
- match -= dec64; |
|
1230 |
- } else { LZ4_copy8(op, match); match+=8; } |
|
1231 |
- op += 8; |
|
1232 |
- |
|
1233 |
- if (unlikely(cpy>oend-12)) { |
|
1234 |
- BYTE* const oCopyLimit = oend-(WILDCOPYLENGTH-1); |
|
1235 |
- if (cpy > oend-LASTLITERALS) goto _output_error; /* Error : last LASTLITERALS bytes must be literals (uncompressed) */ |
|
1236 |
- if (op < oCopyLimit) { |
|
1237 |
- LZ4_wildCopy(op, match, oCopyLimit); |
|
1238 |
- match += oCopyLimit - op; |
|
1239 |
- op = oCopyLimit; |
|
1510 |
+ if (unlikely(offset<8)) { |
|
1511 |
+ LZ4_write32(op, 0); /* silence msan warning when offset==0 */ |
|
1512 |
+ op[0] = match[0]; |
|
1513 |
+ op[1] = match[1]; |
|
1514 |
+ op[2] = match[2]; |
|
1515 |
+ op[3] = match[3]; |
|
1516 |
+ match += inc32table[offset]; |
|
1517 |
+ memcpy(op+4, match, 4); |
|
1518 |
+ match -= dec64table[offset]; |
|
1519 |
+ } else { |
|
1520 |
+ memcpy(op, match, 8); |
|
1521 |
+ match += 8; |
|
1240 | 1522 |
} |
1241 |
- while (op<cpy) *op++ = *match++; |
|
1242 |
- } else { |
|
1243 |
- LZ4_copy8(op, match); |
|
1244 |
- if (length>16) LZ4_wildCopy(op+8, match+8, cpy); |
|
1523 |
+ op += 8; |
|
1524 |
+ |
|
1525 |
+ if (unlikely(cpy > oend-MATCH_SAFEGUARD_DISTANCE)) { |
|
1526 |
+ BYTE* const oCopyLimit = oend - (WILDCOPYLENGTH-1); |
|
1527 |
+ if (cpy > oend-LASTLITERALS) { goto _output_error; } /* Error : last LASTLITERALS bytes must be literals (uncompressed) */ |
|
1528 |
+ if (op < oCopyLimit) { |
|
1529 |
+ LZ4_wildCopy8(op, match, oCopyLimit); |
|
1530 |
+ match += oCopyLimit - op; |
|
1531 |
+ op = oCopyLimit; |
|
1532 |
+ } |
|
1533 |
+ while (op < cpy) { *op++ = *match++; } |
|
1534 |
+ } else { |
|
1535 |
+ memcpy(op, match, 8); |
|
1536 |
+ if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); } |
|
1537 |
+ } |
|
1538 |
+ op = cpy; /* wildcopy correction */ |
|
1245 | 1539 |
} |
1246 |
- op=cpy; /* correction */ |
|
1247 |
- } |
|
1248 | 1540 |
|
1249 |
- /* end of decoding */ |
|
1250 |
- if (endOnInput) |
|
1251 |
- return (int) (((char*)op)-dest); /* Nb of output bytes decoded */ |
|
1252 |
- else |
|
1253 |
- return (int) (((const char*)ip)-source); /* Nb of input bytes read */ |
|
1541 |
+ /* end of decoding */ |
|
1542 |
+ if (endOnInput) { |
|
1543 |
+ return (int) (((char*)op)-dst); /* Nb of output bytes decoded */ |
|
1544 |
+ } else { |
|
1545 |
+ return (int) (((const char*)ip)-src); /* Nb of input bytes read */ |
|
1546 |
+ } |
|
1254 | 1547 |
|
1255 |
- /* Overflow error detected */ |
|
1256 |
-_output_error: |
|
1257 |
- return (int) (-(((const char*)ip)-source))-1; |
|
1548 |
+ /* Overflow error detected */ |
|
1549 |
+ _output_error: |
|
1550 |
+ return (int) (-(((const char*)ip)-src))-1; |
|
1551 |
+ } |
|
1258 | 1552 |
} |
1259 | 1553 |
|
1260 | 1554 |
|
1555 |
+/*===== Instantiate the API decoding functions. =====*/ |
|
1556 |
+ |
|
1557 |
+LZ4_FORCE_O2_GCC_PPC64LE |
|
1261 | 1558 |
int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize) |
1262 | 1559 |
{ |
1263 |
- return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, full, 0, noDict, (BYTE*)dest, NULL, 0); |
|
1560 |
+ return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, |
|
1561 |
+ endOnInputSize, decode_full_block, noDict, |
|
1562 |
+ (BYTE*)dest, NULL, 0); |
|
1264 | 1563 |
} |
1265 | 1564 |
|
1266 |
-int LZ4_decompress_safe_partial(const char* source, char* dest, int compressedSize, int targetOutputSize, int maxDecompressedSize) |
|
1565 |
+LZ4_FORCE_O2_GCC_PPC64LE |
|
1566 |
+int LZ4_decompress_safe_partial(const char* src, char* dst, int compressedSize, int targetOutputSize, int dstCapacity) |
|
1267 | 1567 |
{ |
1268 |
- return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, endOnInputSize, partial, targetOutputSize, noDict, (BYTE*)dest, NULL, 0); |
|
1568 |
+ dstCapacity = MIN(targetOutputSize, dstCapacity); |
|
1569 |
+ return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity, |
|
1570 |
+ endOnInputSize, partial_decode, |
|
1571 |
+ noDict, (BYTE*)dst, NULL, 0); |
|
1269 | 1572 |
} |
1270 | 1573 |
|
1574 |
+LZ4_FORCE_O2_GCC_PPC64LE |
|
1271 | 1575 |
int LZ4_decompress_fast(const char* source, char* dest, int originalSize) |
1272 | 1576 |
{ |
1273 |
- return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)(dest - 64 KB), NULL, 64 KB); |
|
1577 |
+ return LZ4_decompress_generic(source, dest, 0, originalSize, |
|
1578 |
+ endOnOutputSize, decode_full_block, withPrefix64k, |
|
1579 |
+ (BYTE*)dest - 64 KB, NULL, 0); |
|
1274 | 1580 |
} |
1275 | 1581 |
|
1582 |
+/*===== Instantiate a few more decoding cases, used more than once. =====*/ |
|
1276 | 1583 |
|
1277 |
-/*===== streaming decompression functions =====*/ |
|
1584 |
+LZ4_FORCE_O2_GCC_PPC64LE /* Exported, an obsolete API function. */ |
|
1585 |
+int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize) |
|
1586 |
+{ |
|
1587 |
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, |
|
1588 |
+ endOnInputSize, decode_full_block, withPrefix64k, |
|
1589 |
+ (BYTE*)dest - 64 KB, NULL, 0); |
|
1590 |
+} |
|
1278 | 1591 |
|
1279 |
-/* |
|
1280 |
- * If you prefer dynamic allocation methods, |
|
1281 |
- * LZ4_createStreamDecode() |
|
1282 |
- * provides a pointer (void*) towards an initialized LZ4_streamDecode_t structure. |
|
1592 |
+/* Another obsolete API function, paired with the previous one. */ |
|
1593 |
+int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize) |
|
1594 |
+{ |
|
1595 |
+ /* LZ4_decompress_fast doesn't validate match offsets, |
|
1596 |
+ * and thus serves well with any prefixed dictionary. */ |
|
1597 |
+ return LZ4_decompress_fast(source, dest, originalSize); |
|
1598 |
+} |
|
1599 |
+ |
|
1600 |
+LZ4_FORCE_O2_GCC_PPC64LE |
|
1601 |
+static int LZ4_decompress_safe_withSmallPrefix(const char* source, char* dest, int compressedSize, int maxOutputSize, |
|
1602 |
+ size_t prefixSize) |
|
1603 |
+{ |
|
1604 |
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, |
|
1605 |
+ endOnInputSize, decode_full_block, noDict, |
|
1606 |
+ (BYTE*)dest-prefixSize, NULL, 0); |
|
1607 |
+} |
|
1608 |
+ |
|
1609 |
+LZ4_FORCE_O2_GCC_PPC64LE |
|
1610 |
+int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, |
|
1611 |
+ int compressedSize, int maxOutputSize, |
|
1612 |
+ const void* dictStart, size_t dictSize) |
|
1613 |
+{ |
|
1614 |
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, |
|
1615 |
+ endOnInputSize, decode_full_block, usingExtDict, |
|
1616 |
+ (BYTE*)dest, (const BYTE*)dictStart, dictSize); |
|
1617 |
+} |
|
1618 |
+ |
|
1619 |
+LZ4_FORCE_O2_GCC_PPC64LE |
|
1620 |
+static int LZ4_decompress_fast_extDict(const char* source, char* dest, int originalSize, |
|
1621 |
+ const void* dictStart, size_t dictSize) |
|
1622 |
+{ |
|
1623 |
+ return LZ4_decompress_generic(source, dest, 0, originalSize, |
|
1624 |
+ endOnOutputSize, decode_full_block, usingExtDict, |
|
1625 |
+ (BYTE*)dest, (const BYTE*)dictStart, dictSize); |
|
1626 |
+} |
|
1627 |
+ |
|
1628 |
+/* The "double dictionary" mode, for use with e.g. ring buffers: the first part |
|
1629 |
+ * of the dictionary is passed as prefix, and the second via dictStart + dictSize. |
|
1630 |
+ * These routines are used only once, in LZ4_decompress_*_continue(). |
|
1283 | 1631 |
*/ |
1632 |
+LZ4_FORCE_INLINE |
|
1633 |
+int LZ4_decompress_safe_doubleDict(const char* source, char* dest, int compressedSize, int maxOutputSize, |
|
1634 |
+ size_t prefixSize, const void* dictStart, size_t dictSize) |
|
1635 |
+{ |
|
1636 |
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, |
|
1637 |
+ endOnInputSize, decode_full_block, usingExtDict, |
|
1638 |
+ (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize); |
|
1639 |
+} |
|
1640 |
+ |
|
1641 |
+LZ4_FORCE_INLINE |
|
1642 |
+int LZ4_decompress_fast_doubleDict(const char* source, char* dest, int originalSize, |
|
1643 |
+ size_t prefixSize, const void* dictStart, size_t dictSize) |
|
1644 |
+{ |
|
1645 |
+ return LZ4_decompress_generic(source, dest, 0, originalSize, |
|
1646 |
+ endOnOutputSize, decode_full_block, usingExtDict, |
|
1647 |
+ (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize); |
|
1648 |
+} |
|
1649 |
+ |
|
1650 |
+/*===== streaming decompression functions =====*/ |
|
1651 |
+ |
|
1284 | 1652 |
LZ4_streamDecode_t* LZ4_createStreamDecode(void) |
1285 | 1653 |
{ |
1286 |
- LZ4_streamDecode_t* lz4s = (LZ4_streamDecode_t*) ALLOCATOR(1, sizeof(LZ4_streamDecode_t)); |
|
1654 |
+ LZ4_streamDecode_t* lz4s = (LZ4_streamDecode_t*) ALLOC_AND_ZERO(sizeof(LZ4_streamDecode_t)); |
|
1655 |
+ LZ4_STATIC_ASSERT(LZ4_STREAMDECODESIZE >= sizeof(LZ4_streamDecode_t_internal)); /* A compilation error here means LZ4_STREAMDECODESIZE is not large enough */ |
|
1287 | 1656 |
return lz4s; |
1288 | 1657 |
} |
1289 | 1658 |
|
1290 | 1659 |
int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream) |
1291 | 1660 |
{ |
1661 |
+ if (LZ4_stream == NULL) { return 0; } /* support free on NULL */ |
|
1292 | 1662 |
FREEMEM(LZ4_stream); |
1293 | 1663 |
return 0; |
1294 | 1664 |
} |
1295 | 1665 |
|
1296 |
-/*! |
|
1297 |
- * LZ4_setStreamDecode() : |
|
1298 |
- * Use this function to instruct where to find the dictionary. |
|
1299 |
- * This function is not necessary if previous data is still available where it was decoded. |
|
1300 |
- * Loading a size of 0 is allowed (same effect as no dictionary). |
|
1301 |
- * Return : 1 if OK, 0 if error |
|
1666 |
+/*! LZ4_setStreamDecode() : |
|
1667 |
+ * Use this function to instruct where to find the dictionary. |
|
1668 |
+ * This function is not necessary if previous data is still available where it was decoded. |
|
1669 |
+ * Loading a size of 0 is allowed (same effect as no dictionary). |
|
1670 |
+ * @return : 1 if OK, 0 if error |
|
1302 | 1671 |
*/ |
1303 | 1672 |
int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize) |
1304 | 1673 |
{ |
... | ... |
@@ -1310,6 +2205,25 @@ int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dicti |
1310 | 1310 |
return 1; |
1311 | 1311 |
} |
1312 | 1312 |
|
1313 |
+/*! LZ4_decoderRingBufferSize() : |
|
1314 |
+ * when setting a ring buffer for streaming decompression (optional scenario), |
|
1315 |
+ * provides the minimum size of this ring buffer |
|
1316 |
+ * to be compatible with any source respecting maxBlockSize condition. |
|
1317 |
+ * Note : in a ring buffer scenario, |
|
1318 |
+ * blocks are presumed decompressed next to each other. |
|
1319 |
+ * When not enough space remains for next block (remainingSize < maxBlockSize), |
|
1320 |
+ * decoding resumes from beginning of ring buffer. |
|
1321 |
+ * @return : minimum ring buffer size, |
|
1322 |
+ * or 0 if there is an error (invalid maxBlockSize). |
|
1323 |
+ */ |
|
1324 |
+int LZ4_decoderRingBufferSize(int maxBlockSize) |
|
1325 |
+{ |
|
1326 |
+ if (maxBlockSize < 0) return 0; |
|
1327 |
+ if (maxBlockSize > LZ4_MAX_INPUT_SIZE) return 0; |
|
1328 |
+ if (maxBlockSize < 16) maxBlockSize = 16; |
|
1329 |
+ return LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize); |
|
1330 |
+} |
|
1331 |
+ |
|
1313 | 1332 |
/* |
1314 | 1333 |
*_continue() : |
1315 | 1334 |
These decoding functions allow decompression of multiple blocks in "streaming" mode. |
... | ... |
@@ -1317,52 +2231,75 @@ int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dicti |
1317 | 1317 |
If it's not possible, save the relevant part of decoded data into a safe buffer, |
1318 | 1318 |
and indicate where it stands using LZ4_setStreamDecode() |
1319 | 1319 |
*/ |
1320 |
+LZ4_FORCE_O2_GCC_PPC64LE |
|
1320 | 1321 |
int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize) |
1321 | 1322 |
{ |
1322 | 1323 |
LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse; |
1323 | 1324 |
int result; |
1324 | 1325 |
|
1325 |
- if (lz4sd->prefixEnd == (BYTE*)dest) { |
|
1326 |
- result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, |
|
1327 |
- endOnInputSize, full, 0, |
|
1328 |
- usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize); |
|
1326 |
+ if (lz4sd->prefixSize == 0) { |
|
1327 |
+ /* The first call, no dictionary yet. */ |
|
1328 |
+ assert(lz4sd->extDictSize == 0); |
|
1329 |
+ result = LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize); |
|
1330 |
+ if (result <= 0) return result; |
|
1331 |
+ lz4sd->prefixSize = (size_t)result; |
|
1332 |
+ lz4sd->prefixEnd = (BYTE*)dest + result; |
|
1333 |
+ } else if (lz4sd->prefixEnd == (BYTE*)dest) { |
|
1334 |
+ /* They're rolling the current segment. */ |
|
1335 |
+ if (lz4sd->prefixSize >= 64 KB - 1) |
|
1336 |
+ result = LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize); |
|
1337 |
+ else if (lz4sd->extDictSize == 0) |
|
1338 |
+ result = LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, |
|
1339 |
+ lz4sd->prefixSize); |
|
1340 |
+ else |
|
1341 |
+ result = LZ4_decompress_safe_doubleDict(source, dest, compressedSize, maxOutputSize, |
|
1342 |
+ lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize); |
|
1329 | 1343 |
if (result <= 0) return result; |
1330 |
- lz4sd->prefixSize += result; |
|
1344 |
+ lz4sd->prefixSize += (size_t)result; |
|
1331 | 1345 |
lz4sd->prefixEnd += result; |
1332 | 1346 |
} else { |
1347 |
+ /* The buffer wraps around, or they're switching to another buffer. */ |
|
1333 | 1348 |
lz4sd->extDictSize = lz4sd->prefixSize; |
1334 | 1349 |
lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; |
1335 |
- result = LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, |
|
1336 |
- endOnInputSize, full, 0, |
|
1337 |
- usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize); |
|
1350 |
+ result = LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, |
|
1351 |
+ lz4sd->externalDict, lz4sd->extDictSize); |
|
1338 | 1352 |
if (result <= 0) return result; |
1339 |
- lz4sd->prefixSize = result; |
|
1353 |
+ lz4sd->prefixSize = (size_t)result; |
|
1340 | 1354 |
lz4sd->prefixEnd = (BYTE*)dest + result; |
1341 | 1355 |
} |
1342 | 1356 |
|
1343 | 1357 |
return result; |
1344 | 1358 |
} |
1345 | 1359 |
|
1360 |
+LZ4_FORCE_O2_GCC_PPC64LE |
|
1346 | 1361 |
int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize) |
1347 | 1362 |
{ |
1348 | 1363 |
LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse; |
1349 | 1364 |
int result; |
1365 |
+ assert(originalSize >= 0); |
|
1350 | 1366 |
|
1351 |
- if (lz4sd->prefixEnd == (BYTE*)dest) { |
|
1352 |
- result = LZ4_decompress_generic(source, dest, 0, originalSize, |
|
1353 |
- endOnOutputSize, full, 0, |
|
1354 |
- usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize); |
|
1367 |
+ if (lz4sd->prefixSize == 0) { |
|
1368 |
+ assert(lz4sd->extDictSize == 0); |
|
1369 |
+ result = LZ4_decompress_fast(source, dest, originalSize); |
|
1370 |
+ if (result <= 0) return result; |
|
1371 |
+ lz4sd->prefixSize = (size_t)originalSize; |
|
1372 |
+ lz4sd->prefixEnd = (BYTE*)dest + originalSize; |
|
1373 |
+ } else if (lz4sd->prefixEnd == (BYTE*)dest) { |
|
1374 |
+ if (lz4sd->prefixSize >= 64 KB - 1 || lz4sd->extDictSize == 0) |
|
1375 |
+ result = LZ4_decompress_fast(source, dest, originalSize); |
|
1376 |
+ else |
|
1377 |
+ result = LZ4_decompress_fast_doubleDict(source, dest, originalSize, |
|
1378 |
+ lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize); |
|
1355 | 1379 |
if (result <= 0) return result; |
1356 |
- lz4sd->prefixSize += originalSize; |
|
1380 |
+ lz4sd->prefixSize += (size_t)originalSize; |
|
1357 | 1381 |
lz4sd->prefixEnd += originalSize; |
1358 | 1382 |
} else { |
1359 | 1383 |
lz4sd->extDictSize = lz4sd->prefixSize; |
1360 | 1384 |
lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; |
1361 |
- result = LZ4_decompress_generic(source, dest, 0, originalSize, |
|
1362 |
- endOnOutputSize, full, 0, |
|
1363 |
- usingExtDict, (BYTE*)dest, lz4sd->externalDict, lz4sd->extDictSize); |
|
1385 |
+ result = LZ4_decompress_fast_extDict(source, dest, originalSize, |
|
1386 |
+ lz4sd->externalDict, lz4sd->extDictSize); |
|
1364 | 1387 |
if (result <= 0) return result; |
1365 |
- lz4sd->prefixSize = originalSize; |
|
1388 |
+ lz4sd->prefixSize = (size_t)originalSize; |
|
1366 | 1389 |
lz4sd->prefixEnd = (BYTE*)dest + originalSize; |
1367 | 1390 |
} |
1368 | 1391 |
|
... | ... |
@@ -1377,32 +2314,27 @@ Advanced decoding functions : |
1377 | 1377 |
the dictionary must be explicitly provided within parameters |
1378 | 1378 |
*/ |
1379 | 1379 |
|
1380 |
-FORCE_INLINE int LZ4_decompress_usingDict_generic(const char* source, char* dest, int compressedSize, int maxOutputSize, int safe, const char* dictStart, int dictSize) |
|
1380 |
+int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize) |
|
1381 | 1381 |
{ |
1382 | 1382 |
if (dictSize==0) |
1383 |
- return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest, NULL, 0); |
|
1383 |
+ return LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize); |
|
1384 | 1384 |
if (dictStart+dictSize == dest) { |
1385 |
- if (dictSize >= (int)(64 KB - 1)) |
|
1386 |
- return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, withPrefix64k, (BYTE*)dest-64 KB, NULL, 0); |
|
1387 |
- return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, noDict, (BYTE*)dest-dictSize, NULL, 0); |
|
1385 |
+ if (dictSize >= 64 KB - 1) { |
|
1386 |
+ return LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize); |
|
1387 |
+ } |
|
1388 |
+ assert(dictSize >= 0); |
|
1389 |
+ return LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, (size_t)dictSize); |
|
1388 | 1390 |
} |
1389 |
- return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, safe, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize); |
|
1390 |
-} |
|
1391 |
- |
|
1392 |
-int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize) |
|
1393 |
-{ |
|
1394 |
- return LZ4_decompress_usingDict_generic(source, dest, compressedSize, maxOutputSize, 1, dictStart, dictSize); |
|
1391 |
+ assert(dictSize >= 0); |
|
1392 |
+ return LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, dictStart, (size_t)dictSize); |
|
1395 | 1393 |
} |
1396 | 1394 |
|
1397 | 1395 |
int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize) |
1398 | 1396 |
{ |
1399 |
- return LZ4_decompress_usingDict_generic(source, dest, 0, originalSize, 0, dictStart, dictSize); |
|
1400 |
-} |
|
1401 |
- |
|
1402 |
-/* debug function */ |
|
1403 |
-int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize) |
|
1404 |
-{ |
|
1405 |
- return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, usingExtDict, (BYTE*)dest, (const BYTE*)dictStart, dictSize); |
|
1397 |
+ if (dictSize==0 || dictStart+dictSize == dest) |
|
1398 |
+ return LZ4_decompress_fast(source, dest, originalSize); |
|
1399 |
+ assert(dictSize >= 0); |
|
1400 |
+ return LZ4_decompress_fast_extDict(source, dest, originalSize, dictStart, (size_t)dictSize); |
|
1406 | 1401 |
} |
1407 | 1402 |
|
1408 | 1403 |
|
... | ... |
@@ -1410,64 +2342,67 @@ int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, int compres |
1410 | 1410 |
* Obsolete Functions |
1411 | 1411 |
***************************************************/ |
1412 | 1412 |
/* obsolete compression functions */ |
1413 |
-int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize) { return LZ4_compress_default(source, dest, inputSize, maxOutputSize); } |
|
1414 |
-int LZ4_compress(const char* source, char* dest, int inputSize) { return LZ4_compress_default(source, dest, inputSize, LZ4_compressBound(inputSize)); } |
|
1415 |
-int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize) { return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1); } |
|
1416 |
-int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize) { return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1); } |
|
1417 |
-int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, maxDstSize, 1); } |
|
1418 |
-int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize) { return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1); } |
|
1413 |
+int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize) |
|
1414 |
+{ |
|
1415 |
+ return LZ4_compress_default(source, dest, inputSize, maxOutputSize); |
|
1416 |
+} |
|
1417 |
+int LZ4_compress(const char* src, char* dest, int srcSize) |
|
1418 |
+{ |
|
1419 |
+ return LZ4_compress_default(src, dest, srcSize, LZ4_compressBound(srcSize)); |
|
1420 |
+} |
|
1421 |
+int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize) |
|
1422 |
+{ |
|
1423 |
+ return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1); |
|
1424 |
+} |
|
1425 |
+int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize) |
|
1426 |
+{ |
|
1427 |
+ return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1); |
|
1428 |
+} |
|
1429 |
+int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int dstCapacity) |
|
1430 |
+{ |
|
1431 |
+ return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, dstCapacity, 1); |
|
1432 |
+} |
|
1433 |
+int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize) |
|
1434 |
+{ |
|
1435 |
+ return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1); |
|
1436 |
+} |
|
1419 | 1437 |
|
1420 | 1438 |
/* |
1421 |
-These function names are deprecated and should no longer be used. |
|
1439 |
+These decompression functions are deprecated and should no longer be used. |
|
1422 | 1440 |
They are only provided here for compatibility with older user programs. |
1423 | 1441 |
- LZ4_uncompress is totally equivalent to LZ4_decompress_fast |
1424 | 1442 |
- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe |
1425 | 1443 |
*/ |
1426 |
-int LZ4_uncompress (const char* source, char* dest, int outputSize) { return LZ4_decompress_fast(source, dest, outputSize); } |
|
1427 |
-int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize) { return LZ4_decompress_safe(source, dest, isize, maxOutputSize); } |
|
1428 |
- |
|
1444 |
+int LZ4_uncompress (const char* source, char* dest, int outputSize) |
|
1445 |
+{ |
|
1446 |
+ return LZ4_decompress_fast(source, dest, outputSize); |
|
1447 |
+} |
|
1448 |
+int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize) |
|
1449 |
+{ |
|
1450 |
+ return LZ4_decompress_safe(source, dest, isize, maxOutputSize); |
|
1451 |
+} |
|
1429 | 1452 |
|
1430 | 1453 |
/* Obsolete Streaming functions */ |
1431 | 1454 |
|
1432 | 1455 |
int LZ4_sizeofStreamState() { return LZ4_STREAMSIZE; } |
1433 | 1456 |
|
1434 |
-static void LZ4_init(LZ4_stream_t* lz4ds, BYTE* base) |
|
1435 |
-{ |
|
1436 |
- MEM_INIT(lz4ds, 0, sizeof(LZ4_stream_t)); |
|
1437 |
- lz4ds->internal_donotuse.bufferStart = base; |
|
1438 |
-} |
|
1439 |
- |
|
1440 | 1457 |
int LZ4_resetStreamState(void* state, char* inputBuffer) |
1441 | 1458 |
{ |
1442 |
- if ((((uptrval)state) & 3) != 0) return 1; /* Error : pointer is not aligned on 4-bytes boundary */ |
|
1443 |
- LZ4_init((LZ4_stream_t*)state, (BYTE*)inputBuffer); |
|
1459 |
+ (void)inputBuffer; |
|
1460 |
+ LZ4_resetStream((LZ4_stream_t*)state); |
|
1444 | 1461 |
return 0; |
1445 | 1462 |
} |
1446 | 1463 |
|
1447 | 1464 |
void* LZ4_create (char* inputBuffer) |
1448 | 1465 |
{ |
1449 |
- LZ4_stream_t* lz4ds = (LZ4_stream_t*)ALLOCATOR(8, sizeof(LZ4_stream_t)); |
|
1450 |
- LZ4_init (lz4ds, (BYTE*)inputBuffer); |
|
1451 |
- return lz4ds; |
|
1466 |
+ (void)inputBuffer; |
|
1467 |
+ return LZ4_createStream(); |
|
1452 | 1468 |
} |
1453 | 1469 |
|
1454 |
-char* LZ4_slideInputBuffer (void* LZ4_Data) |
|
1455 |
-{ |
|
1456 |
- LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)LZ4_Data)->internal_donotuse; |
|
1457 |
- int dictSize = LZ4_saveDict((LZ4_stream_t*)LZ4_Data, (char*)ctx->bufferStart, 64 KB); |
|
1458 |
- return (char*)(ctx->bufferStart + dictSize); |
|
1459 |
-} |
|
1460 |
- |
|
1461 |
-/* Obsolete streaming decompression functions */ |
|
1462 |
- |
|
1463 |
-int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize) |
|
1464 |
-{ |
|
1465 |
- return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, endOnInputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB); |
|
1466 |
-} |
|
1467 |
- |
|
1468 |
-int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize) |
|
1470 |
+char* LZ4_slideInputBuffer (void* state) |
|
1469 | 1471 |
{ |
1470 |
- return LZ4_decompress_generic(source, dest, 0, originalSize, endOnOutputSize, full, 0, withPrefix64k, (BYTE*)dest - 64 KB, NULL, 64 KB); |
|
1472 |
+ /* avoid const char * -> char * conversion warning */ |
|
1473 |
+ return (char *)(uptrval)((LZ4_stream_t*)state)->internal_donotuse.dictionary; |
|
1471 | 1474 |
} |
1472 | 1475 |
|
1473 | 1476 |
#endif /* LZ4_COMMONDEFS_ONLY */ |
... | ... |
@@ -1,7 +1,7 @@ |
1 | 1 |
/* |
2 | 2 |
* LZ4 - Fast LZ compression algorithm |
3 | 3 |
* Header File |
4 |
- * Copyright (C) 2011-2016, Yann Collet. |
|
4 |
+ * Copyright (C) 2011-present, Yann Collet. |
|
5 | 5 |
|
6 | 6 |
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) |
7 | 7 |
|
... | ... |
@@ -32,13 +32,13 @@ |
32 | 32 |
- LZ4 homepage : http://www.lz4.org |
33 | 33 |
- LZ4 source repository : https://github.com/lz4/lz4 |
34 | 34 |
*/ |
35 |
-#ifndef LZ4_H_2983827168210 |
|
36 |
-#define LZ4_H_2983827168210 |
|
37 |
- |
|
38 | 35 |
#if defined (__cplusplus) |
39 | 36 |
extern "C" { |
40 | 37 |
#endif |
41 | 38 |
|
39 |
+#ifndef LZ4_H_2983827168210 |
|
40 |
+#define LZ4_H_2983827168210 |
|
41 |
+ |
|
42 | 42 |
/* --- Dependency --- */ |
43 | 43 |
#include <stddef.h> /* size_t */ |
44 | 44 |
|
... | ... |
@@ -46,24 +46,31 @@ extern "C" { |
46 | 46 |
/** |
47 | 47 |
Introduction |
48 | 48 |
|
49 |
- LZ4 is lossless compression algorithm, providing compression speed at 400 MB/s per core, |
|
49 |
+ LZ4 is lossless compression algorithm, providing compression speed >500 MB/s per core, |
|
50 | 50 |
scalable with multi-cores CPU. It features an extremely fast decoder, with speed in |
51 | 51 |
multiple GB/s per core, typically reaching RAM speed limits on multi-core systems. |
52 | 52 |
|
53 | 53 |
The LZ4 compression library provides in-memory compression and decompression functions. |
54 |
+ It gives full buffer control to user. |
|
54 | 55 |
Compression can be done in: |
55 | 56 |
- a single step (described as Simple Functions) |
56 | 57 |
- a single step, reusing a context (described in Advanced Functions) |
57 | 58 |
- unbounded multiple steps (described as Streaming compression) |
58 | 59 |
|
59 |
- lz4.h provides block compression functions. It gives full buffer control to user. |
|
60 |
- Decompressing an lz4-compressed block also requires metadata (such as compressed size). |
|
61 |
- Each application is free to encode such metadata in whichever way it wants. |
|
60 |
+ lz4.h generates and decodes LZ4-compressed blocks (doc/lz4_Block_format.md). |
|
61 |
+ Decompressing such a compressed block requires additional metadata. |
|
62 |
+ Exact metadata depends on exact decompression function. |
|
63 |
+ For the typical case of LZ4_decompress_safe(), |
|
64 |
+ metadata includes block's compressed size, and maximum bound of decompressed size. |
|
65 |
+ Each application is free to encode and pass such metadata in whichever way it wants. |
|
66 |
+ |
|
67 |
+ lz4.h only handle blocks, it can not generate Frames. |
|
62 | 68 |
|
63 |
- An additional format, called LZ4 frame specification (doc/lz4_Frame_format.md), |
|
64 |
- take care of encoding standard metadata alongside LZ4-compressed blocks. |
|
65 |
- If your application requires interoperability, it's recommended to use it. |
|
66 |
- A library is provided to take care of it, see lz4frame.h. |
|
69 |
+ Blocks are different from Frames (doc/lz4_Frame_format.md). |
|
70 |
+ Frames bundle both blocks and metadata in a specified manner. |
|
71 |
+ Embedding metadata is required for compressed data to be self-contained and portable. |
|
72 |
+ Frame format is delivered through a companion API, declared in lz4frame.h. |
|
73 |
+ The `lz4` CLI can only manage frames. |
|
67 | 74 |
*/ |
68 | 75 |
|
69 | 76 |
/*^*************************************************************** |
... | ... |
@@ -72,20 +79,28 @@ extern "C" { |
72 | 72 |
/* |
73 | 73 |
* LZ4_DLL_EXPORT : |
74 | 74 |
* Enable exporting of functions when building a Windows DLL |
75 |
+* LZ4LIB_VISIBILITY : |
|
76 |
+* Control library symbols visibility. |
|
75 | 77 |
*/ |
78 |
+#ifndef LZ4LIB_VISIBILITY |
|
79 |
+# if defined(__GNUC__) && (__GNUC__ >= 4) |
|
80 |
+# define LZ4LIB_VISIBILITY __attribute__ ((visibility ("default"))) |
|
81 |
+# else |
|
82 |
+# define LZ4LIB_VISIBILITY |
|
83 |
+# endif |
|
84 |
+#endif |
|
76 | 85 |
#if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1) |
77 |
-# define LZ4LIB_API __declspec(dllexport) |
|
86 |
+# define LZ4LIB_API __declspec(dllexport) LZ4LIB_VISIBILITY |
|
78 | 87 |
#elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1) |
79 |
-# define LZ4LIB_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ |
|
88 |
+# define LZ4LIB_API __declspec(dllimport) LZ4LIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ |
|
80 | 89 |
#else |
81 |
-# define LZ4LIB_API |
|
90 |
+# define LZ4LIB_API LZ4LIB_VISIBILITY |
|
82 | 91 |
#endif |
83 | 92 |
|
84 |
- |
|
85 |
-/*========== Version =========== */ |
|
93 |
+/*------ Version ------*/ |
|
86 | 94 |
#define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */ |
87 |
-#define LZ4_VERSION_MINOR 7 /* for new (non-breaking) interface capabilities */ |
|
88 |
-#define LZ4_VERSION_RELEASE 5 /* for tweaks, bug-fixes, or development */ |
|
95 |
+#define LZ4_VERSION_MINOR 9 /* for new (non-breaking) interface capabilities */ |
|
96 |
+#define LZ4_VERSION_RELEASE 2 /* for tweaks, bug-fixes, or development */ |
|
89 | 97 |
|
90 | 98 |
#define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE) |
91 | 99 |
|
... | ... |
@@ -94,8 +109,8 @@ extern "C" { |
94 | 94 |
#define LZ4_EXPAND_AND_QUOTE(str) LZ4_QUOTE(str) |
95 | 95 |
#define LZ4_VERSION_STRING LZ4_EXPAND_AND_QUOTE(LZ4_LIB_VERSION) |
96 | 96 |
|
97 |
-LZ4LIB_API int LZ4_versionNumber (void); |
|
98 |
-LZ4LIB_API const char* LZ4_versionString (void); |
|
97 |
+LZ4LIB_API int LZ4_versionNumber (void); /**< library version number; useful to check dll version */ |
|
98 |
+LZ4LIB_API const char* LZ4_versionString (void); /**< library version string; useful to check dll version */ |
|
99 | 99 |
|
100 | 100 |
|
101 | 101 |
/*-************************************ |
... | ... |
@@ -104,41 +119,49 @@ LZ4LIB_API const char* LZ4_versionString (void); |
104 | 104 |
/*! |
105 | 105 |
* LZ4_MEMORY_USAGE : |
106 | 106 |
* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) |
107 |
- * Increasing memory usage improves compression ratio |
|
108 |
- * Reduced memory usage can improve speed, due to cache effect |
|
107 |
+ * Increasing memory usage improves compression ratio. |
|
108 |
+ * Reduced memory usage may improve speed, thanks to better cache locality. |
|
109 | 109 |
* Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache |
110 | 110 |
*/ |
111 |
-#define LZ4_MEMORY_USAGE 14 |
|
111 |
+#ifndef LZ4_MEMORY_USAGE |
|
112 |
+# define LZ4_MEMORY_USAGE 14 |
|
113 |
+#endif |
|
112 | 114 |
|
113 | 115 |
|
114 | 116 |
/*-************************************ |
115 | 117 |
* Simple Functions |
116 | 118 |
**************************************/ |
117 | 119 |
/*! LZ4_compress_default() : |
118 |
- Compresses 'sourceSize' bytes from buffer 'source' |
|
119 |
- into already allocated 'dest' buffer of size 'maxDestSize'. |
|
120 |
- Compression is guaranteed to succeed if 'maxDestSize' >= LZ4_compressBound(sourceSize). |
|
121 |
- It also runs faster, so it's a recommended setting. |
|
122 |
- If the function cannot compress 'source' into a more limited 'dest' budget, |
|
123 |
- compression stops *immediately*, and the function result is zero. |
|
124 |
- As a consequence, 'dest' content is not valid. |
|
125 |
- This function never writes outside 'dest' buffer, nor read outside 'source' buffer. |
|
126 |
- sourceSize : Max supported value is LZ4_MAX_INPUT_VALUE |
|
127 |
- maxDestSize : full or partial size of buffer 'dest' (which must be already allocated) |
|
128 |
- return : the number of bytes written into buffer 'dest' (necessarily <= maxOutputSize) |
|
129 |
- or 0 if compression fails */ |
|
130 |
-LZ4LIB_API int LZ4_compress_default(const char* source, char* dest, int sourceSize, int maxDestSize); |
|
120 |
+ * Compresses 'srcSize' bytes from buffer 'src' |
|
121 |
+ * into already allocated 'dst' buffer of size 'dstCapacity'. |
|
122 |
+ * Compression is guaranteed to succeed if 'dstCapacity' >= LZ4_compressBound(srcSize). |
|
123 |
+ * It also runs faster, so it's a recommended setting. |
|
124 |
+ * If the function cannot compress 'src' into a more limited 'dst' budget, |
|
125 |
+ * compression stops *immediately*, and the function result is zero. |
|
126 |
+ * In which case, 'dst' content is undefined (invalid). |
|
127 |
+ * srcSize : max supported value is LZ4_MAX_INPUT_SIZE. |
|
128 |
+ * dstCapacity : size of buffer 'dst' (which must be already allocated) |
|
129 |
+ * @return : the number of bytes written into buffer 'dst' (necessarily <= dstCapacity) |
|
130 |
+ * or 0 if compression fails |
|
131 |
+ * Note : This function is protected against buffer overflow scenarios (never writes outside 'dst' buffer, nor read outside 'source' buffer). |
|
132 |
+ */ |
|
133 |
+LZ4LIB_API int LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity); |
|
131 | 134 |
|
132 | 135 |
/*! LZ4_decompress_safe() : |
133 |
- compressedSize : is the precise full size of the compressed block. |
|
134 |
- maxDecompressedSize : is the size of destination buffer, which must be already allocated. |
|
135 |
- return : the number of bytes decompressed into destination buffer (necessarily <= maxDecompressedSize) |
|
136 |
- If destination buffer is not large enough, decoding will stop and output an error code (<0). |
|
137 |
- If the source stream is detected malformed, the function will stop decoding and return a negative result. |
|
138 |
- This function is protected against buffer overflow exploits, including malicious data packets. |
|
139 |
- It never writes outside output buffer, nor reads outside input buffer. |
|
140 |
-*/ |
|
141 |
-LZ4LIB_API int LZ4_decompress_safe (const char* source, char* dest, int compressedSize, int maxDecompressedSize); |
|
136 |
+ * compressedSize : is the exact complete size of the compressed block. |
|
137 |
+ * dstCapacity : is the size of destination buffer (which must be already allocated), presumed an upper bound of decompressed size. |
|
138 |
+ * @return : the number of bytes decompressed into destination buffer (necessarily <= dstCapacity) |
|
139 |
+ * If destination buffer is not large enough, decoding will stop and output an error code (negative value). |
|
140 |
+ * If the source stream is detected malformed, the function will stop decoding and return a negative result. |
|
141 |
+ * Note 1 : This function is protected against malicious data packets : |
|
142 |
+ * it will never writes outside 'dst' buffer, nor read outside 'source' buffer, |
|
143 |
+ * even if the compressed block is maliciously modified to order the decoder to do these actions. |
|
144 |
+ * In such case, the decoder stops immediately, and considers the compressed block malformed. |
|
145 |
+ * Note 2 : compressedSize and dstCapacity must be provided to the function, the compressed block does not contain them. |
|
146 |
+ * The implementation is free to send / store / derive this information in whichever way is most beneficial. |
|
147 |
+ * If there is a need for a different format which bundles together both compressed data and its metadata, consider looking at lz4frame.h instead. |
|
148 |
+ */ |
|
149 |
+LZ4LIB_API int LZ4_decompress_safe (const char* src, char* dst, int compressedSize, int dstCapacity); |
|
142 | 150 |
|
143 | 151 |
|
144 | 152 |
/*-************************************ |
... | ... |
@@ -147,184 +170,389 @@ LZ4LIB_API int LZ4_decompress_safe (const char* source, char* dest, int compress |
147 | 147 |
#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */ |
148 | 148 |
#define LZ4_COMPRESSBOUND(isize) ((unsigned)(isize) > (unsigned)LZ4_MAX_INPUT_SIZE ? 0 : (isize) + ((isize)/255) + 16) |
149 | 149 |
|
150 |
-/*! |
|
151 |
-LZ4_compressBound() : |
|
150 |
+/*! LZ4_compressBound() : |
|
152 | 151 |
Provides the maximum size that LZ4 compression may output in a "worst case" scenario (input data not compressible) |
153 | 152 |
This function is primarily useful for memory allocation purposes (destination buffer size). |
154 | 153 |
Macro LZ4_COMPRESSBOUND() is also provided for compilation-time evaluation (stack memory allocation for example). |
155 |
- Note that LZ4_compress_default() compress faster when dest buffer size is >= LZ4_compressBound(srcSize) |
|
154 |
+ Note that LZ4_compress_default() compresses faster when dstCapacity is >= LZ4_compressBound(srcSize) |
|
156 | 155 |
inputSize : max supported value is LZ4_MAX_INPUT_SIZE |
157 | 156 |
return : maximum output size in a "worst case" scenario |
158 |
- or 0, if input size is too large ( > LZ4_MAX_INPUT_SIZE) |
|
157 |
+ or 0, if input size is incorrect (too large or negative) |
|
159 | 158 |
*/ |
160 | 159 |
LZ4LIB_API int LZ4_compressBound(int inputSize); |
161 | 160 |
|
162 |
-/*! |
|
163 |
-LZ4_compress_fast() : |
|
164 |
- Same as LZ4_compress_default(), but allows to select an "acceleration" factor. |
|
161 |
+/*! LZ4_compress_fast() : |
|
162 |
+ Same as LZ4_compress_default(), but allows selection of "acceleration" factor. |
|
165 | 163 |
The larger the acceleration value, the faster the algorithm, but also the lesser the compression. |
166 | 164 |
It's a trade-off. It can be fine tuned, with each successive value providing roughly +~3% to speed. |
167 | 165 |
An acceleration value of "1" is the same as regular LZ4_compress_default() |
168 |
- Values <= 0 will be replaced by ACCELERATION_DEFAULT (see lz4.c), which is 1. |
|
166 |
+ Values <= 0 will be replaced by ACCELERATION_DEFAULT (currently == 1, see lz4.c). |
|
169 | 167 |
*/ |
170 |
-LZ4LIB_API int LZ4_compress_fast (const char* source, char* dest, int sourceSize, int maxDestSize, int acceleration); |
|
168 |
+LZ4LIB_API int LZ4_compress_fast (const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); |
|
171 | 169 |
|
172 | 170 |
|
173 |
-/*! |
|
174 |
-LZ4_compress_fast_extState() : |
|
175 |
- Same compression function, just using an externally allocated memory space to store compression state. |
|
176 |
- Use LZ4_sizeofState() to know how much memory must be allocated, |
|
177 |
- and allocate it on 8-bytes boundaries (using malloc() typically). |
|
178 |
- Then, provide it as 'void* state' to compression function. |
|
179 |
-*/ |
|
171 |
+/*! LZ4_compress_fast_extState() : |
|
172 |
+ * Same as LZ4_compress_fast(), using an externally allocated memory space for its state. |
|
173 |
+ * Use LZ4_sizeofState() to know how much memory must be allocated, |
|
174 |
+ * and allocate it on 8-bytes boundaries (using `malloc()` typically). |
|
175 |
+ * Then, provide this buffer as `void* state` to compression function. |
|
176 |
+ */ |
|
180 | 177 |
LZ4LIB_API int LZ4_sizeofState(void); |
181 |
-LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* source, char* dest, int inputSize, int maxDestSize, int acceleration); |
|
182 |
- |
|
183 |
- |
|
184 |
-/*! |
|
185 |
-LZ4_compress_destSize() : |
|
186 |
- Reverse the logic, by compressing as much data as possible from 'source' buffer |
|
187 |
- into already allocated buffer 'dest' of size 'targetDestSize'. |
|
188 |
- This function either compresses the entire 'source' content into 'dest' if it's large enough, |
|
189 |
- or fill 'dest' buffer completely with as much data as possible from 'source'. |
|
190 |
- *sourceSizePtr : will be modified to indicate how many bytes where read from 'source' to fill 'dest'. |
|
191 |
- New value is necessarily <= old value. |
|
192 |
- return : Nb bytes written into 'dest' (necessarily <= targetDestSize) |
|
193 |
- or 0 if compression fails |
|
194 |
-*/ |
|
195 |
-LZ4LIB_API int LZ4_compress_destSize (const char* source, char* dest, int* sourceSizePtr, int targetDestSize); |
|
196 |
- |
|
197 |
- |
|
198 |
-/*! |
|
199 |
-LZ4_decompress_fast() : |
|
200 |
- originalSize : is the original and therefore uncompressed size |
|
201 |
- return : the number of bytes read from the source buffer (in other words, the compressed size) |
|
202 |
- If the source stream is detected malformed, the function will stop decoding and return a negative result. |
|
203 |
- Destination buffer must be already allocated. Its size must be a minimum of 'originalSize' bytes. |
|
204 |
- note : This function fully respect memory boundaries for properly formed compressed data. |
|
205 |
- It is a bit faster than LZ4_decompress_safe(). |
|
206 |
- However, it does not provide any protection against intentionally modified data stream (malicious input). |
|
207 |
- Use this function in trusted environment only (data to decode comes from a trusted source). |
|
208 |
-*/ |
|
209 |
-LZ4LIB_API int LZ4_decompress_fast (const char* source, char* dest, int originalSize); |
|
210 |
- |
|
211 |
-/*! |
|
212 |
-LZ4_decompress_safe_partial() : |
|
213 |
- This function decompress a compressed block of size 'compressedSize' at position 'source' |
|
214 |
- into destination buffer 'dest' of size 'maxDecompressedSize'. |
|
215 |
- The function tries to stop decompressing operation as soon as 'targetOutputSize' has been reached, |
|
216 |
- reducing decompression time. |
|
217 |
- return : the number of bytes decoded in the destination buffer (necessarily <= maxDecompressedSize) |
|
218 |
- Note : this number can be < 'targetOutputSize' should the compressed block to decode be smaller. |
|
219 |
- Always control how many bytes were decoded. |
|
220 |
- If the source stream is detected malformed, the function will stop decoding and return a negative result. |
|
221 |
- This function never writes outside of output buffer, and never reads outside of input buffer. It is therefore protected against malicious data packets |
|
178 |
+LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); |
|
179 |
+ |
|
180 |
+ |
|
181 |
+/*! LZ4_compress_destSize() : |
|
182 |
+ * Reverse the logic : compresses as much data as possible from 'src' buffer |
|
183 |
+ * into already allocated buffer 'dst', of size >= 'targetDestSize'. |
|
184 |
+ * This function either compresses the entire 'src' content into 'dst' if it's large enough, |
|
185 |
+ * or fill 'dst' buffer completely with as much data as possible from 'src'. |
|
186 |
+ * note: acceleration parameter is fixed to "default". |
|
187 |
+ * |
|
188 |
+ * *srcSizePtr : will be modified to indicate how many bytes where read from 'src' to fill 'dst'. |
|
189 |
+ * New value is necessarily <= input value. |
|
190 |
+ * @return : Nb bytes written into 'dst' (necessarily <= targetDestSize) |
|
191 |
+ * or 0 if compression fails. |
|
222 | 192 |
*/ |
223 |
-LZ4LIB_API int LZ4_decompress_safe_partial (const char* source, char* dest, int compressedSize, int targetOutputSize, int maxDecompressedSize); |
|
193 |
+LZ4LIB_API int LZ4_compress_destSize (const char* src, char* dst, int* srcSizePtr, int targetDstSize); |
|
194 |
+ |
|
195 |
+ |
|
196 |
+/*! LZ4_decompress_safe_partial() : |
|
197 |
+ * Decompress an LZ4 compressed block, of size 'srcSize' at position 'src', |
|
198 |
+ * into destination buffer 'dst' of size 'dstCapacity'. |
|
199 |
+ * Up to 'targetOutputSize' bytes will be decoded. |
|
200 |
+ * The function stops decoding on reaching this objective, |
|
201 |
+ * which can boost performance when only the beginning of a block is required. |
|
202 |
+ * |
|
203 |
+ * @return : the number of bytes decoded in `dst` (necessarily <= dstCapacity) |
|
204 |
+ * If source stream is detected malformed, function returns a negative result. |
|
205 |
+ * |
|
206 |
+ * Note : @return can be < targetOutputSize, if compressed block contains less data. |
|
207 |
+ * |
|
208 |
+ * Note 2 : this function features 2 parameters, targetOutputSize and dstCapacity, |
|
209 |
+ * and expects targetOutputSize <= dstCapacity. |
|
210 |
+ * It effectively stops decoding on reaching targetOutputSize, |
|
211 |
+ * so dstCapacity is kind of redundant. |
|
212 |
+ * This is because in a previous version of this function, |
|
213 |
+ * decoding operation would not "break" a sequence in the middle. |
|
214 |
+ * As a consequence, there was no guarantee that decoding would stop at exactly targetOutputSize, |
|
215 |
+ * it could write more bytes, though only up to dstCapacity. |
|
216 |
+ * Some "margin" used to be required for this operation to work properly. |
|
217 |
+ * This is no longer necessary. |
|
218 |
+ * The function nonetheless keeps its signature, in an effort to not break API. |
|
219 |
+ */ |
|
220 |
+LZ4LIB_API int LZ4_decompress_safe_partial (const char* src, char* dst, int srcSize, int targetOutputSize, int dstCapacity); |
|
224 | 221 |
|
225 | 222 |
|
226 | 223 |
/*-********************************************* |
227 | 224 |
* Streaming Compression Functions |
228 | 225 |
***********************************************/ |
229 |
-typedef union LZ4_stream_u LZ4_stream_t; /* incomplete type (defined later) */ |
|
226 |
+typedef union LZ4_stream_u LZ4_stream_t; /* incomplete type (defined later) */ |
|
230 | 227 |
|
231 |
-/*! LZ4_createStream() and LZ4_freeStream() : |
|
232 |
- * LZ4_createStream() will allocate and initialize an `LZ4_stream_t` structure. |
|
233 |
- * LZ4_freeStream() releases its memory. |
|
234 |
- */ |
|
235 | 228 |
LZ4LIB_API LZ4_stream_t* LZ4_createStream(void); |
236 | 229 |
LZ4LIB_API int LZ4_freeStream (LZ4_stream_t* streamPtr); |
237 | 230 |
|
238 |
-/*! LZ4_resetStream() : |
|
239 |
- * An LZ4_stream_t structure can be allocated once and re-used multiple times. |
|
240 |
- * Use this function to init an allocated `LZ4_stream_t` structure and start a new compression. |
|
231 |
+/*! LZ4_resetStream_fast() : v1.9.0+ |
|
232 |
+ * Use this to prepare an LZ4_stream_t for a new chain of dependent blocks |
|
233 |
+ * (e.g., LZ4_compress_fast_continue()). |
|
234 |
+ * |
|
235 |
+ * An LZ4_stream_t must be initialized once before usage. |
|
236 |
+ * This is automatically done when created by LZ4_createStream(). |
|
237 |
+ * However, should the LZ4_stream_t be simply declared on stack (for example), |
|
238 |
+ * it's necessary to initialize it first, using LZ4_initStream(). |
|
239 |
+ * |
|
240 |
+ * After init, start any new stream with LZ4_resetStream_fast(). |
|
241 |
+ * A same LZ4_stream_t can be re-used multiple times consecutively |
|
242 |
+ * and compress multiple streams, |
|
243 |
+ * provided that it starts each new stream with LZ4_resetStream_fast(). |
|
244 |
+ * |
|
245 |
+ * LZ4_resetStream_fast() is much faster than LZ4_initStream(), |
|
246 |
+ * but is not compatible with memory regions containing garbage data. |
|
247 |
+ * |
|
248 |
+ * Note: it's only useful to call LZ4_resetStream_fast() |
|
249 |
+ * in the context of streaming compression. |
|
250 |
+ * The *extState* functions perform their own resets. |
|
251 |
+ * Invoking LZ4_resetStream_fast() before is redundant, and even counterproductive. |
|
241 | 252 |
*/ |
242 |
-LZ4LIB_API void LZ4_resetStream (LZ4_stream_t* streamPtr); |
|
253 |
+LZ4LIB_API void LZ4_resetStream_fast (LZ4_stream_t* streamPtr); |
|
243 | 254 |
|
244 | 255 |
/*! LZ4_loadDict() : |
245 |
- * Use this function to load a static dictionary into LZ4_stream. |
|
246 |
- * Any previous data will be forgotten, only 'dictionary' will remain in memory. |
|
247 |
- * Loading a size of 0 is allowed. |
|
248 |
- * Return : dictionary size, in bytes (necessarily <= 64 KB) |
|
256 |
+ * Use this function to reference a static dictionary into LZ4_stream_t. |
|
257 |
+ * The dictionary must remain available during compression. |
|
258 |
+ * LZ4_loadDict() triggers a reset, so any previous data will be forgotten. |
|
259 |
+ * The same dictionary will have to be loaded on decompression side for successful decoding. |
|
260 |
+ * Dictionary are useful for better compression of small data (KB range). |
|
261 |
+ * While LZ4 accept any input as dictionary, |
|
262 |
+ * results are generally better when using Zstandard's Dictionary Builder. |
|
263 |
+ * Loading a size of 0 is allowed, and is the same as reset. |
|
264 |
+ * @return : loaded dictionary size, in bytes (necessarily <= 64 KB) |
|
249 | 265 |
*/ |
250 | 266 |
LZ4LIB_API int LZ4_loadDict (LZ4_stream_t* streamPtr, const char* dictionary, int dictSize); |
251 | 267 |
|
252 | 268 |
/*! LZ4_compress_fast_continue() : |
253 |
- * Compress buffer content 'src', using data from previously compressed blocks as dictionary to improve compression ratio. |
|
254 |
- * Important : Previous data blocks are assumed to still be present and unmodified ! |
|
255 |
- * 'dst' buffer must be already allocated. |
|
256 |
- * If maxDstSize >= LZ4_compressBound(srcSize), compression is guaranteed to succeed, and runs faster. |
|
257 |
- * If not, and if compressed data cannot fit into 'dst' buffer size, compression stops, and function returns a zero. |
|
269 |
+ * Compress 'src' content using data from previously compressed blocks, for better compression ratio. |
|
270 |
+ * 'dst' buffer must be already allocated. |
|
271 |
+ * If dstCapacity >= LZ4_compressBound(srcSize), compression is guaranteed to succeed, and runs faster. |
|
272 |
+ * |
|
273 |
+ * @return : size of compressed block |
|
274 |
+ * or 0 if there is an error (typically, cannot fit into 'dst'). |
|
275 |
+ * |
|
276 |
+ * Note 1 : Each invocation to LZ4_compress_fast_continue() generates a new block. |
|
277 |
+ * Each block has precise boundaries. |
|
278 |
+ * Each block must be decompressed separately, calling LZ4_decompress_*() with relevant metadata. |
|
279 |
+ * It's not possible to append blocks together and expect a single invocation of LZ4_decompress_*() to decompress them together. |
|
280 |
+ * |
|
281 |
+ * Note 2 : The previous 64KB of source data is __assumed__ to remain present, unmodified, at same address in memory ! |
|
282 |
+ * |
|
283 |
+ * Note 3 : When input is structured as a double-buffer, each buffer can have any size, including < 64 KB. |
|
284 |
+ * Make sure that buffers are separated, by at least one byte. |
|
285 |
+ * This construction ensures that each block only depends on previous block. |
|
286 |
+ * |
|
287 |
+ * Note 4 : If input buffer is a ring-buffer, it can have any size, including < 64 KB. |
|
288 |
+ * |
|
289 |
+ * Note 5 : After an error, the stream status is undefined (invalid), it can only be reset or freed. |
|
258 | 290 |
*/ |
259 |
-LZ4LIB_API int LZ4_compress_fast_continue (LZ4_stream_t* streamPtr, const char* src, char* dst, int srcSize, int maxDstSize, int acceleration); |
|
291 |
+LZ4LIB_API int LZ4_compress_fast_continue (LZ4_stream_t* streamPtr, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); |
|
260 | 292 |
|
261 | 293 |
/*! LZ4_saveDict() : |
262 |
- * If previously compressed data block is not guaranteed to remain available at its memory location, |
|
294 |
+ * If last 64KB data cannot be guaranteed to remain available at its current memory location, |
|
263 | 295 |
* save it into a safer place (char* safeBuffer). |
264 |
- * Note : you don't need to call LZ4_loadDict() afterwards, |
|
265 |
- * dictionary is immediately usable, you can therefore call LZ4_compress_fast_continue(). |
|
266 |
- * Return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error. |
|
296 |
+ * This is schematically equivalent to a memcpy() followed by LZ4_loadDict(), |
|
297 |
+ * but is much faster, because LZ4_saveDict() doesn't need to rebuild tables. |
|
298 |
+ * @return : saved dictionary size in bytes (necessarily <= maxDictSize), or 0 if error. |
|
267 | 299 |
*/ |
268 |
-LZ4LIB_API int LZ4_saveDict (LZ4_stream_t* streamPtr, char* safeBuffer, int dictSize); |
|
300 |
+LZ4LIB_API int LZ4_saveDict (LZ4_stream_t* streamPtr, char* safeBuffer, int maxDictSize); |
|
269 | 301 |
|
270 | 302 |
|
271 | 303 |
/*-********************************************** |
272 | 304 |
* Streaming Decompression Functions |
273 | 305 |
* Bufferless synchronous API |
274 | 306 |
************************************************/ |
275 |
-typedef union LZ4_streamDecode_u LZ4_streamDecode_t; /* incomplete type (defined later) */ |
|
307 |
+typedef union LZ4_streamDecode_u LZ4_streamDecode_t; /* tracking context */ |
|
276 | 308 |
|
277 |
-/* creation / destruction of streaming decompression tracking structure */ |
|
309 |
+/*! LZ4_createStreamDecode() and LZ4_freeStreamDecode() : |
|
310 |
+ * creation / destruction of streaming decompression tracking context. |
|
311 |
+ * A tracking context can be re-used multiple times. |
|
312 |
+ */ |
|
278 | 313 |
LZ4LIB_API LZ4_streamDecode_t* LZ4_createStreamDecode(void); |
279 | 314 |
LZ4LIB_API int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream); |
280 | 315 |
|
281 | 316 |
/*! LZ4_setStreamDecode() : |
282 |
- * Use this function to instruct where to find the dictionary. |
|
283 |
- * Setting a size of 0 is allowed (same effect as reset). |
|
284 |
- * @return : 1 if OK, 0 if error |
|
317 |
+ * An LZ4_streamDecode_t context can be allocated once and re-used multiple times. |
|
318 |
+ * Use this function to start decompression of a new stream of blocks. |
|
319 |
+ * A dictionary can optionally be set. Use NULL or size 0 for a reset order. |
|
320 |
+ * Dictionary is presumed stable : it must remain accessible and unmodified during next decompression. |
|
321 |
+ * @return : 1 if OK, 0 if error |
|
285 | 322 |
*/ |
286 | 323 |
LZ4LIB_API int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize); |
287 | 324 |
|
288 |
-/*! |
|
289 |
-LZ4_decompress_*_continue() : |
|
290 |
- These decoding functions allow decompression of multiple blocks in "streaming" mode. |
|
291 |
- Previously decoded blocks *must* remain available at the memory position where they were decoded (up to 64 KB) |
|
292 |
- In the case of a ring buffers, decoding buffer must be either : |
|
293 |
- - Exactly same size as encoding buffer, with same update rule (block boundaries at same positions) |
|
294 |
- In which case, the decoding & encoding ring buffer can have any size, including very small ones ( < 64 KB). |
|
295 |
- - Larger than encoding buffer, by a minimum of maxBlockSize more bytes. |
|
296 |
- maxBlockSize is implementation dependent. It's the maximum size you intend to compress into a single block. |
|
297 |
- In which case, encoding and decoding buffers do not need to be synchronized, |
|
298 |
- and encoding ring buffer can have any size, including small ones ( < 64 KB). |
|
299 |
- - _At least_ 64 KB + 8 bytes + maxBlockSize. |
|
300 |
- In which case, encoding and decoding buffers do not need to be synchronized, |
|
301 |
- and encoding ring buffer can have any size, including larger than decoding buffer. |
|
302 |
- Whenever these conditions are not possible, save the last 64KB of decoded data into a safe buffer, |
|
303 |
- and indicate where it is saved using LZ4_setStreamDecode() |
|
325 |
+/*! LZ4_decoderRingBufferSize() : v1.8.2+ |
|
326 |
+ * Note : in a ring buffer scenario (optional), |
|
327 |
+ * blocks are presumed decompressed next to each other |
|
328 |
+ * up to the moment there is not enough remaining space for next block (remainingSize < maxBlockSize), |
|
329 |
+ * at which stage it resumes from beginning of ring buffer. |
|
330 |
+ * When setting such a ring buffer for streaming decompression, |
|
331 |
+ * provides the minimum size of this ring buffer |
|
332 |
+ * to be compatible with any source respecting maxBlockSize condition. |
|
333 |
+ * @return : minimum ring buffer size, |
|
334 |
+ * or 0 if there is an error (invalid maxBlockSize). |
|
335 |
+ */ |
|
336 |
+LZ4LIB_API int LZ4_decoderRingBufferSize(int maxBlockSize); |
|
337 |
+#define LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize) (65536 + 14 + (maxBlockSize)) /* for static allocation; maxBlockSize presumed valid */ |
|
338 |
+ |
|
339 |
+/*! LZ4_decompress_*_continue() : |
|
340 |
+ * These decoding functions allow decompression of consecutive blocks in "streaming" mode. |
|
341 |
+ * A block is an unsplittable entity, it must be presented entirely to a decompression function. |
|
342 |
+ * Decompression functions only accepts one block at a time. |
|
343 |
+ * The last 64KB of previously decoded data *must* remain available and unmodified at the memory position where they were decoded. |
|
344 |
+ * If less than 64KB of data has been decoded, all the data must be present. |
|
345 |
+ * |
|
346 |
+ * Special : if decompression side sets a ring buffer, it must respect one of the following conditions : |
|
347 |
+ * - Decompression buffer size is _at least_ LZ4_decoderRingBufferSize(maxBlockSize). |
|
348 |
+ * maxBlockSize is the maximum size of any single block. It can have any value > 16 bytes. |
|
349 |
+ * In which case, encoding and decoding buffers do not need to be synchronized. |
|
350 |
+ * Actually, data can be produced by any source compliant with LZ4 format specification, and respecting maxBlockSize. |
|
351 |
+ * - Synchronized mode : |
|
352 |
+ * Decompression buffer size is _exactly_ the same as compression buffer size, |
|
353 |
+ * and follows exactly same update rule (block boundaries at same positions), |
|
354 |
+ * and decoding function is provided with exact decompressed size of each block (exception for last block of the stream), |
|
355 |
+ * _then_ decoding & encoding ring buffer can have any size, including small ones ( < 64 KB). |
|
356 |
+ * - Decompression buffer is larger than encoding buffer, by a minimum of maxBlockSize more bytes. |
|
357 |
+ * In which case, encoding and decoding buffers do not need to be synchronized, |
|
358 |
+ * and encoding ring buffer can have any size, including small ones ( < 64 KB). |
|
359 |
+ * |
|
360 |
+ * Whenever these conditions are not possible, |
|
361 |
+ * save the last 64KB of decoded data into a safe buffer where it can't be modified during decompression, |
|
362 |
+ * then indicate where this data is saved using LZ4_setStreamDecode(), before decompressing next block. |
|
304 | 363 |
*/ |
305 |
-LZ4LIB_API int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxDecompressedSize); |
|
306 |
-LZ4LIB_API int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize); |
|
364 |
+LZ4LIB_API int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* src, char* dst, int srcSize, int dstCapacity); |
|
307 | 365 |
|
308 | 366 |
|
309 | 367 |
/*! LZ4_decompress_*_usingDict() : |
310 | 368 |
* These decoding functions work the same as |
311 | 369 |
* a combination of LZ4_setStreamDecode() followed by LZ4_decompress_*_continue() |
312 | 370 |
* They are stand-alone, and don't need an LZ4_streamDecode_t structure. |
371 |
+ * Dictionary is presumed stable : it must remain accessible and unmodified during decompression. |
|
372 |
+ * Performance tip : Decompression speed can be substantially increased |
|
373 |
+ * when dst == dictStart + dictSize. |
|
313 | 374 |
*/ |
314 |
-LZ4LIB_API int LZ4_decompress_safe_usingDict (const char* source, char* dest, int compressedSize, int maxDecompressedSize, const char* dictStart, int dictSize); |
|
315 |
-LZ4LIB_API int LZ4_decompress_fast_usingDict (const char* source, char* dest, int originalSize, const char* dictStart, int dictSize); |
|
375 |
+LZ4LIB_API int LZ4_decompress_safe_usingDict (const char* src, char* dst, int srcSize, int dstCapcity, const char* dictStart, int dictSize); |
|
316 | 376 |
|
377 |
+#endif /* LZ4_H_2983827168210 */ |
|
317 | 378 |
|
318 |
-/*^********************************************** |
|
379 |
+ |
|
380 |
+/*^************************************* |
|
319 | 381 |
* !!!!!! STATIC LINKING ONLY !!!!!! |
320 |
- ***********************************************/ |
|
321 |
-/*-************************************ |
|
322 |
- * Private definitions |
|
323 |
- ************************************** |
|
324 |
- * Do not use these definitions. |
|
325 |
- * They are exposed to allow static allocation of `LZ4_stream_t` and `LZ4_streamDecode_t`. |
|
326 |
- * Using these definitions will expose code to API and/or ABI break in future versions of the library. |
|
327 |
- **************************************/ |
|
382 |
+ ***************************************/ |
|
383 |
+ |
|
384 |
+/*-**************************************************************************** |
|
385 |
+ * Experimental section |
|
386 |
+ * |
|
387 |
+ * Symbols declared in this section must be considered unstable. Their |
|
388 |
+ * signatures or semantics may change, or they may be removed altogether in the |
|
389 |
+ * future. They are therefore only safe to depend on when the caller is |
|
390 |
+ * statically linked against the library. |
|
391 |
+ * |
|
392 |
+ * To protect against unsafe usage, not only are the declarations guarded, |
|
393 |
+ * the definitions are hidden by default |
|
394 |
+ * when building LZ4 as a shared/dynamic library. |
|
395 |
+ * |
|
396 |
+ * In order to access these declarations, |
|
397 |
+ * define LZ4_STATIC_LINKING_ONLY in your application |
|
398 |
+ * before including LZ4's headers. |
|
399 |
+ * |
|
400 |
+ * In order to make their implementations accessible dynamically, you must |
|
401 |
+ * define LZ4_PUBLISH_STATIC_FUNCTIONS when building the LZ4 library. |
|
402 |
+ ******************************************************************************/ |
|
403 |
+ |
|
404 |
+#ifdef LZ4_STATIC_LINKING_ONLY |
|
405 |
+ |
|
406 |
+#ifndef LZ4_STATIC_3504398509 |
|
407 |
+#define LZ4_STATIC_3504398509 |
|
408 |
+ |
|
409 |
+#ifdef LZ4_PUBLISH_STATIC_FUNCTIONS |
|
410 |
+#define LZ4LIB_STATIC_API LZ4LIB_API |
|
411 |
+#else |
|
412 |
+#define LZ4LIB_STATIC_API |
|
413 |
+#endif |
|
414 |
+ |
|
415 |
+ |
|
416 |
+/*! LZ4_compress_fast_extState_fastReset() : |
|
417 |
+ * A variant of LZ4_compress_fast_extState(). |
|
418 |
+ * |
|
419 |
+ * Using this variant avoids an expensive initialization step. |
|
420 |
+ * It is only safe to call if the state buffer is known to be correctly initialized already |
|
421 |
+ * (see above comment on LZ4_resetStream_fast() for a definition of "correctly initialized"). |
|
422 |
+ * From a high level, the difference is that |
|
423 |
+ * this function initializes the provided state with a call to something like LZ4_resetStream_fast() |
|
424 |
+ * while LZ4_compress_fast_extState() starts with a call to LZ4_resetStream(). |
|
425 |
+ */ |
|
426 |
+LZ4LIB_STATIC_API int LZ4_compress_fast_extState_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); |
|
427 |
+ |
|
428 |
+/*! LZ4_attach_dictionary() : |
|
429 |
+ * This is an experimental API that allows |
|
430 |
+ * efficient use of a static dictionary many times. |
|
431 |
+ * |
|
432 |
+ * Rather than re-loading the dictionary buffer into a working context before |
|
433 |
+ * each compression, or copying a pre-loaded dictionary's LZ4_stream_t into a |
|
434 |
+ * working LZ4_stream_t, this function introduces a no-copy setup mechanism, |
|
435 |
+ * in which the working stream references the dictionary stream in-place. |
|
436 |
+ * |
|
437 |
+ * Several assumptions are made about the state of the dictionary stream. |
|
438 |
+ * Currently, only streams which have been prepared by LZ4_loadDict() should |
|
439 |
+ * be expected to work. |
|
440 |
+ * |
|
441 |
+ * Alternatively, the provided dictionaryStream may be NULL, |
|
442 |
+ * in which case any existing dictionary stream is unset. |
|
443 |
+ * |
|
444 |
+ * If a dictionary is provided, it replaces any pre-existing stream history. |
|
445 |
+ * The dictionary contents are the only history that can be referenced and |
|
446 |
+ * logically immediately precede the data compressed in the first subsequent |
|
447 |
+ * compression call. |
|
448 |
+ * |
|
449 |
+ * The dictionary will only remain attached to the working stream through the |
|
450 |
+ * first compression call, at the end of which it is cleared. The dictionary |
|
451 |
+ * stream (and source buffer) must remain in-place / accessible / unchanged |
|
452 |
+ * through the completion of the first compression call on the stream. |
|
453 |
+ */ |
|
454 |
+LZ4LIB_STATIC_API void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream); |
|
455 |
+ |
|
456 |
+ |
|
457 |
+/*! In-place compression and decompression |
|
458 |
+ * |
|
459 |
+ * It's possible to have input and output sharing the same buffer, |
|
460 |
+ * for highly contrained memory environments. |
|
461 |
+ * In both cases, it requires input to lay at the end of the buffer, |
|
462 |
+ * and decompression to start at beginning of the buffer. |
|
463 |
+ * Buffer size must feature some margin, hence be larger than final size. |
|
464 |
+ * |
|
465 |
+ * |<------------------------buffer--------------------------------->| |
|
466 |
+ * |<-----------compressed data--------->| |
|
467 |
+ * |<-----------decompressed size------------------>| |
|
468 |
+ * |<----margin---->| |
|
469 |
+ * |
|
470 |
+ * This technique is more useful for decompression, |
|
471 |
+ * since decompressed size is typically larger, |
|
472 |
+ * and margin is short. |
|
473 |
+ * |
|
474 |
+ * In-place decompression will work inside any buffer |
|
475 |
+ * which size is >= LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize). |
|
476 |
+ * This presumes that decompressedSize > compressedSize. |
|
477 |
+ * Otherwise, it means compression actually expanded data, |
|
478 |
+ * and it would be more efficient to store such data with a flag indicating it's not compressed. |
|
479 |
+ * This can happen when data is not compressible (already compressed, or encrypted). |
|
480 |
+ * |
|
481 |
+ * For in-place compression, margin is larger, as it must be able to cope with both |
|
482 |
+ * history preservation, requiring input data to remain unmodified up to LZ4_DISTANCE_MAX, |
|
483 |
+ * and data expansion, which can happen when input is not compressible. |
|
484 |
+ * As a consequence, buffer size requirements are much higher, |
|
485 |
+ * and memory savings offered by in-place compression are more limited. |
|
486 |
+ * |
|
487 |
+ * There are ways to limit this cost for compression : |
|
488 |
+ * - Reduce history size, by modifying LZ4_DISTANCE_MAX. |
|
489 |
+ * Note that it is a compile-time constant, so all compressions will apply this limit. |
|
490 |
+ * Lower values will reduce compression ratio, except when input_size < LZ4_DISTANCE_MAX, |
|
491 |
+ * so it's a reasonable trick when inputs are known to be small. |
|
492 |
+ * - Require the compressor to deliver a "maximum compressed size". |
|
493 |
+ * This is the `dstCapacity` parameter in `LZ4_compress*()`. |
|
494 |
+ * When this size is < LZ4_COMPRESSBOUND(inputSize), then compression can fail, |
|
495 |
+ * in which case, the return code will be 0 (zero). |
|
496 |
+ * The caller must be ready for these cases to happen, |
|
497 |
+ * and typically design a backup scheme to send data uncompressed. |
|
498 |
+ * The combination of both techniques can significantly reduce |
|
499 |
+ * the amount of margin required for in-place compression. |
|
500 |
+ * |
|
501 |
+ * In-place compression can work in any buffer |
|
502 |
+ * which size is >= (maxCompressedSize) |
|
503 |
+ * with maxCompressedSize == LZ4_COMPRESSBOUND(srcSize) for guaranteed compression success. |
|
504 |
+ * LZ4_COMPRESS_INPLACE_BUFFER_SIZE() depends on both maxCompressedSize and LZ4_DISTANCE_MAX, |
|
505 |
+ * so it's possible to reduce memory requirements by playing with them. |
|
506 |
+ */ |
|
507 |
+ |
|
508 |
+#define LZ4_DECOMPRESS_INPLACE_MARGIN(compressedSize) (((compressedSize) >> 8) + 32) |
|
509 |
+#define LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize) ((decompressedSize) + LZ4_DECOMPRESS_INPLACE_MARGIN(decompressedSize)) /**< note: presumes that compressedSize < decompressedSize. note2: margin is overestimated a bit, since it could use compressedSize instead */ |
|
510 |
+ |
|
511 |
+#ifndef LZ4_DISTANCE_MAX /* history window size; can be user-defined at compile time */ |
|
512 |
+# define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */ |
|
513 |
+#endif |
|
514 |
+ |
|
515 |
+#define LZ4_COMPRESS_INPLACE_MARGIN (LZ4_DISTANCE_MAX + 32) /* LZ4_DISTANCE_MAX can be safely replaced by srcSize when it's smaller */ |
|
516 |
+#define LZ4_COMPRESS_INPLACE_BUFFER_SIZE(maxCompressedSize) ((maxCompressedSize) + LZ4_COMPRESS_INPLACE_MARGIN) /**< maxCompressedSize is generally LZ4_COMPRESSBOUND(inputSize), but can be set to any lower value, with the risk that compression can fail (return code 0(zero)) */ |
|
517 |
+ |
|
518 |
+#endif /* LZ4_STATIC_3504398509 */ |
|
519 |
+#endif /* LZ4_STATIC_LINKING_ONLY */ |
|
520 |
+ |
|
521 |
+ |
|
522 |
+ |
|
523 |
+#ifndef LZ4_H_98237428734687 |
|
524 |
+#define LZ4_H_98237428734687 |
|
525 |
+ |
|
526 |
+/*-************************************************************ |
|
527 |
+ * PRIVATE DEFINITIONS |
|
528 |
+ ************************************************************** |
|
529 |
+ * Do not use these definitions directly. |
|
530 |
+ * They are only exposed to allow static allocation of `LZ4_stream_t` and `LZ4_streamDecode_t`. |
|
531 |
+ * Accessing members will expose code to API and/or ABI break in future versions of the library. |
|
532 |
+ **************************************************************/ |
|
328 | 533 |
#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2) |
329 | 534 |
#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE) |
330 | 535 |
#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG) /* required as macro for static allocation */ |
... | ... |
@@ -332,14 +560,16 @@ LZ4LIB_API int LZ4_decompress_fast_usingDict (const char* source, char* dest, in |
332 | 332 |
#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) |
333 | 333 |
#include <stdint.h> |
334 | 334 |
|
335 |
-typedef struct { |
|
335 |
+typedef struct LZ4_stream_t_internal LZ4_stream_t_internal; |
|
336 |
+struct LZ4_stream_t_internal { |
|
336 | 337 |
uint32_t hashTable[LZ4_HASH_SIZE_U32]; |
337 | 338 |
uint32_t currentOffset; |
338 |
- uint32_t initCheck; |
|
339 |
+ uint16_t dirty; |
|
340 |
+ uint16_t tableType; |
|
339 | 341 |
const uint8_t* dictionary; |
340 |
- uint8_t* bufferStart; /* obsolete, used for slideInputBuffer */ |
|
342 |
+ const LZ4_stream_t_internal* dictCtx; |
|
341 | 343 |
uint32_t dictSize; |
342 |
-} LZ4_stream_t_internal; |
|
344 |
+}; |
|
343 | 345 |
|
344 | 346 |
typedef struct { |
345 | 347 |
const uint8_t* externalDict; |
... | ... |
@@ -350,49 +580,67 @@ typedef struct { |
350 | 350 |
|
351 | 351 |
#else |
352 | 352 |
|
353 |
-typedef struct { |
|
353 |
+typedef struct LZ4_stream_t_internal LZ4_stream_t_internal; |
|
354 |
+struct LZ4_stream_t_internal { |
|
354 | 355 |
unsigned int hashTable[LZ4_HASH_SIZE_U32]; |
355 | 356 |
unsigned int currentOffset; |
356 |
- unsigned int initCheck; |
|
357 |
+ unsigned short dirty; |
|
358 |
+ unsigned short tableType; |
|
357 | 359 |
const unsigned char* dictionary; |
358 |
- unsigned char* bufferStart; /* obsolete, used for slideInputBuffer */ |
|
360 |
+ const LZ4_stream_t_internal* dictCtx; |
|
359 | 361 |
unsigned int dictSize; |
360 |
-} LZ4_stream_t_internal; |
|
362 |
+}; |
|
361 | 363 |
|
362 | 364 |
typedef struct { |
363 | 365 |
const unsigned char* externalDict; |
364 |
- size_t extDictSize; |
|
365 | 366 |
const unsigned char* prefixEnd; |
367 |
+ size_t extDictSize; |
|
366 | 368 |
size_t prefixSize; |
367 | 369 |
} LZ4_streamDecode_t_internal; |
368 | 370 |
|
369 | 371 |
#endif |
370 | 372 |
|
371 |
-/*! |
|
372 |
- * LZ4_stream_t : |
|
373 |
- * information structure to track an LZ4 stream. |
|
374 |
- * init this structure before first use. |
|
375 |
- * note : only use in association with static linking ! |
|
376 |
- * this definition is not API/ABI safe, |
|
377 |
- * and may change in a future version ! |
|
373 |
+/*! LZ4_stream_t : |
|
374 |
+ * information structure to track an LZ4 stream. |
|
375 |
+ * LZ4_stream_t can also be created using LZ4_createStream(), which is recommended. |
|
376 |
+ * The structure definition can be convenient for static allocation |
|
377 |
+ * (on stack, or as part of larger structure). |
|
378 |
+ * Init this structure with LZ4_initStream() before first use. |
|
379 |
+ * note : only use this definition in association with static linking ! |
|
380 |
+ * this definition is not API/ABI safe, and may change in a future version. |
|
378 | 381 |
*/ |
379 |
-#define LZ4_STREAMSIZE_U64 ((1 << (LZ4_MEMORY_USAGE-3)) + 4) |
|
382 |
+#define LZ4_STREAMSIZE_U64 ((1 << (LZ4_MEMORY_USAGE-3)) + 4 + ((sizeof(void*)==16) ? 4 : 0) /*AS-400*/ ) |
|
380 | 383 |
#define LZ4_STREAMSIZE (LZ4_STREAMSIZE_U64 * sizeof(unsigned long long)) |
381 | 384 |
union LZ4_stream_u { |
382 | 385 |
unsigned long long table[LZ4_STREAMSIZE_U64]; |
383 | 386 |
LZ4_stream_t_internal internal_donotuse; |
384 | 387 |
} ; /* previously typedef'd to LZ4_stream_t */ |
385 | 388 |
|
389 |
+/*! LZ4_initStream() : v1.9.0+ |
|
390 |
+ * An LZ4_stream_t structure must be initialized at least once. |
|
391 |
+ * This is automatically done when invoking LZ4_createStream(), |
|
392 |
+ * but it's not when the structure is simply declared on stack (for example). |
|
393 |
+ * |
|
394 |
+ * Use LZ4_initStream() to properly initialize a newly declared LZ4_stream_t. |
|
395 |
+ * It can also initialize any arbitrary buffer of sufficient size, |
|
396 |
+ * and will @return a pointer of proper type upon initialization. |
|
397 |
+ * |
|
398 |
+ * Note : initialization fails if size and alignment conditions are not respected. |
|
399 |
+ * In which case, the function will @return NULL. |
|
400 |
+ * Note2: An LZ4_stream_t structure guarantees correct alignment and size. |
|
401 |
+ * Note3: Before v1.9.0, use LZ4_resetStream() instead |
|
402 |
+ */ |
|
403 |
+LZ4LIB_API LZ4_stream_t* LZ4_initStream (void* buffer, size_t size); |
|
386 | 404 |
|
387 |
-/*! |
|
388 |
- * LZ4_streamDecode_t : |
|
389 |
- * information structure to track an LZ4 stream during decompression. |
|
390 |
- * init this structure using LZ4_setStreamDecode (or memset()) before first use |
|
391 |
- * note : only use in association with static linking ! |
|
392 |
- * this definition is not API/ABI safe, |
|
393 |
- * and may change in a future version ! |
|
405 |
+ |
|
406 |
+/*! LZ4_streamDecode_t : |
|
407 |
+ * information structure to track an LZ4 stream during decompression. |
|
408 |
+ * init this structure using LZ4_setStreamDecode() before first use. |
|
409 |
+ * note : only use in association with static linking ! |
|
410 |
+ * this definition is not API/ABI safe, |
|
411 |
+ * and may change in a future version ! |
|
394 | 412 |
*/ |
395 |
-#define LZ4_STREAMDECODESIZE_U64 4 |
|
413 |
+#define LZ4_STREAMDECODESIZE_U64 (4 + ((sizeof(void*)==16) ? 2 : 0) /*AS-400*/ ) |
|
396 | 414 |
#define LZ4_STREAMDECODESIZE (LZ4_STREAMDECODESIZE_U64 * sizeof(unsigned long long)) |
397 | 415 |
union LZ4_streamDecode_u { |
398 | 416 |
unsigned long long table[LZ4_STREAMDECODESIZE_U64]; |
... | ... |
@@ -400,15 +648,22 @@ union LZ4_streamDecode_u { |
400 | 400 |
} ; /* previously typedef'd to LZ4_streamDecode_t */ |
401 | 401 |
|
402 | 402 |
|
403 |
-/*=************************************ |
|
403 |
+ |
|
404 |
+/*-************************************ |
|
404 | 405 |
* Obsolete Functions |
405 | 406 |
**************************************/ |
406 |
-/* Deprecation warnings */ |
|
407 |
-/* Should these warnings be a problem, |
|
408 |
- it is generally possible to disable them, |
|
409 |
- typically with -Wno-deprecated-declarations for gcc |
|
410 |
- or _CRT_SECURE_NO_WARNINGS in Visual. |
|
411 |
- Otherwise, it's also possible to define LZ4_DISABLE_DEPRECATE_WARNINGS */ |
|
407 |
+ |
|
408 |
+/*! Deprecation warnings |
|
409 |
+ * |
|
410 |
+ * Deprecated functions make the compiler generate a warning when invoked. |
|
411 |
+ * This is meant to invite users to update their source code. |
|
412 |
+ * Should deprecation warnings be a problem, it is generally possible to disable them, |
|
413 |
+ * typically with -Wno-deprecated-declarations for gcc |
|
414 |
+ * or _CRT_SECURE_NO_WARNINGS in Visual. |
|
415 |
+ * |
|
416 |
+ * Another method is to define LZ4_DISABLE_DEPRECATE_WARNINGS |
|
417 |
+ * before including the header file. |
|
418 |
+ */ |
|
412 | 419 |
#ifdef LZ4_DISABLE_DEPRECATE_WARNINGS |
413 | 420 |
# define LZ4_DEPRECATED(message) /* disable deprecation warnings */ |
414 | 421 |
#else |
... | ... |
@@ -428,36 +683,82 @@ union LZ4_streamDecode_u { |
428 | 428 |
#endif /* LZ4_DISABLE_DEPRECATE_WARNINGS */ |
429 | 429 |
|
430 | 430 |
/* Obsolete compression functions */ |
431 |
-LZ4_DEPRECATED("use LZ4_compress_default() instead") int LZ4_compress (const char* source, char* dest, int sourceSize); |
|
432 |
-LZ4_DEPRECATED("use LZ4_compress_default() instead") int LZ4_compress_limitedOutput (const char* source, char* dest, int sourceSize, int maxOutputSize); |
|
433 |
-LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize); |
|
434 |
-LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize); |
|
435 |
-LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") int LZ4_compress_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize); |
|
436 |
-LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize, int maxOutputSize); |
|
431 |
+LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress (const char* src, char* dest, int srcSize); |
|
432 |
+LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress_limitedOutput (const char* src, char* dest, int srcSize, int maxOutputSize); |
|
433 |
+LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize); |
|
434 |
+LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize); |
|
435 |
+LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize); |
|
436 |
+LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize, int maxOutputSize); |
|
437 | 437 |
|
438 | 438 |
/* Obsolete decompression functions */ |
439 |
-/* These function names are completely deprecated and must no longer be used. |
|
440 |
- They are only provided in lz4.c for compatibility with older programs. |
|
441 |
- - LZ4_uncompress is the same as LZ4_decompress_fast |
|
442 |
- - LZ4_uncompress_unknownOutputSize is the same as LZ4_decompress_safe |
|
443 |
- These function prototypes are now disabled; uncomment them only if you really need them. |
|
444 |
- It is highly recommended to stop using these prototypes and migrate to maintained ones */ |
|
445 |
-/* int LZ4_uncompress (const char* source, char* dest, int outputSize); */ |
|
446 |
-/* int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize); */ |
|
447 |
- |
|
448 |
-/* Obsolete streaming functions; use new streaming interface whenever possible */ |
|
449 |
-LZ4_DEPRECATED("use LZ4_createStream() instead") void* LZ4_create (char* inputBuffer); |
|
450 |
-LZ4_DEPRECATED("use LZ4_createStream() instead") int LZ4_sizeofStreamState(void); |
|
451 |
-LZ4_DEPRECATED("use LZ4_resetStream() instead") int LZ4_resetStreamState(void* state, char* inputBuffer); |
|
452 |
-LZ4_DEPRECATED("use LZ4_saveDict() instead") char* LZ4_slideInputBuffer (void* state); |
|
439 |
+LZ4_DEPRECATED("use LZ4_decompress_fast() instead") LZ4LIB_API int LZ4_uncompress (const char* source, char* dest, int outputSize); |
|
440 |
+LZ4_DEPRECATED("use LZ4_decompress_safe() instead") LZ4LIB_API int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize); |
|
441 |
+ |
|
442 |
+/* Obsolete streaming functions; degraded functionality; do not use! |
|
443 |
+ * |
|
444 |
+ * In order to perform streaming compression, these functions depended on data |
|
445 |
+ * that is no longer tracked in the state. They have been preserved as well as |
|
446 |
+ * possible: using them will still produce a correct output. However, they don't |
|
447 |
+ * actually retain any history between compression calls. The compression ratio |
|
448 |
+ * achieved will therefore be no better than compressing each chunk |
|
449 |
+ * independently. |
|
450 |
+ */ |
|
451 |
+LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API void* LZ4_create (char* inputBuffer); |
|
452 |
+LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API int LZ4_sizeofStreamState(void); |
|
453 |
+LZ4_DEPRECATED("Use LZ4_resetStream() instead") LZ4LIB_API int LZ4_resetStreamState(void* state, char* inputBuffer); |
|
454 |
+LZ4_DEPRECATED("Use LZ4_saveDict() instead") LZ4LIB_API char* LZ4_slideInputBuffer (void* state); |
|
453 | 455 |
|
454 | 456 |
/* Obsolete streaming decoding functions */ |
455 |
-LZ4_DEPRECATED("use LZ4_decompress_safe_usingDict() instead") int LZ4_decompress_safe_withPrefix64k (const char* src, char* dst, int compressedSize, int maxDstSize); |
|
456 |
-LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") int LZ4_decompress_fast_withPrefix64k (const char* src, char* dst, int originalSize); |
|
457 |
+LZ4_DEPRECATED("use LZ4_decompress_safe_usingDict() instead") LZ4LIB_API int LZ4_decompress_safe_withPrefix64k (const char* src, char* dst, int compressedSize, int maxDstSize); |
|
458 |
+LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") LZ4LIB_API int LZ4_decompress_fast_withPrefix64k (const char* src, char* dst, int originalSize); |
|
459 |
+ |
|
460 |
+/*! LZ4_decompress_fast() : **unsafe!** |
|
461 |
+ * These functions used to be faster than LZ4_decompress_safe(), |
|
462 |
+ * but it has changed, and they are now slower than LZ4_decompress_safe(). |
|
463 |
+ * This is because LZ4_decompress_fast() doesn't know the input size, |
|
464 |
+ * and therefore must progress more cautiously in the input buffer to not read beyond the end of block. |
|
465 |
+ * On top of that `LZ4_decompress_fast()` is not protected vs malformed or malicious inputs, making it a security liability. |
|
466 |
+ * As a consequence, LZ4_decompress_fast() is strongly discouraged, and deprecated. |
|
467 |
+ * |
|
468 |
+ * The last remaining LZ4_decompress_fast() specificity is that |
|
469 |
+ * it can decompress a block without knowing its compressed size. |
|
470 |
+ * Such functionality could be achieved in a more secure manner, |
|
471 |
+ * by also providing the maximum size of input buffer, |
|
472 |
+ * but it would require new prototypes, and adaptation of the implementation to this new use case. |
|
473 |
+ * |
|
474 |
+ * Parameters: |
|
475 |
+ * originalSize : is the uncompressed size to regenerate. |
|
476 |
+ * `dst` must be already allocated, its size must be >= 'originalSize' bytes. |
|
477 |
+ * @return : number of bytes read from source buffer (== compressed size). |
|
478 |
+ * The function expects to finish at block's end exactly. |
|
479 |
+ * If the source stream is detected malformed, the function stops decoding and returns a negative result. |
|
480 |
+ * note : LZ4_decompress_fast*() requires originalSize. Thanks to this information, it never writes past the output buffer. |
|
481 |
+ * However, since it doesn't know its 'src' size, it may read an unknown amount of input, past input buffer bounds. |
|
482 |
+ * Also, since match offsets are not validated, match reads from 'src' may underflow too. |
|
483 |
+ * These issues never happen if input (compressed) data is correct. |
|
484 |
+ * But they may happen if input data is invalid (error or intentional tampering). |
|
485 |
+ * As a consequence, use these functions in trusted environments with trusted data **only**. |
|
486 |
+ */ |
|
487 |
+ |
|
488 |
+LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe() instead") |
|
489 |
+LZ4LIB_API int LZ4_decompress_fast (const char* src, char* dst, int originalSize); |
|
490 |
+LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_continue() instead") |
|
491 |
+LZ4LIB_API int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* src, char* dst, int originalSize); |
|
492 |
+LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_usingDict() instead") |
|
493 |
+LZ4LIB_API int LZ4_decompress_fast_usingDict (const char* src, char* dst, int originalSize, const char* dictStart, int dictSize); |
|
494 |
+ |
|
495 |
+/*! LZ4_resetStream() : |
|
496 |
+ * An LZ4_stream_t structure must be initialized at least once. |
|
497 |
+ * This is done with LZ4_initStream(), or LZ4_resetStream(). |
|
498 |
+ * Consider switching to LZ4_initStream(), |
|
499 |
+ * invoking LZ4_resetStream() will trigger deprecation warnings in the future. |
|
500 |
+ */ |
|
501 |
+LZ4LIB_API void LZ4_resetStream (LZ4_stream_t* streamPtr); |
|
502 |
+ |
|
503 |
+ |
|
504 |
+#endif /* LZ4_H_98237428734687 */ |
|
457 | 505 |
|
458 | 506 |
|
459 | 507 |
#if defined (__cplusplus) |
460 | 508 |
} |
461 | 509 |
#endif |
462 |
- |
|
463 |
-#endif /* LZ4_H_2983827168210 */ |