Browse code

Provide LZ4 sources in src/compat/ and use if no system lz4 library found.

Bundle lz4.c and lz4.h from http://code.google.com/p/lz4/ (r109) as
src/compat/compat-lz4.[ch], and use that (via #define NEED_COMPAT_LZ4)
if autoconf cannot find lz4.h or -llz4 in the system.

Signed-off-by: Gert Doering <gert@greenie.muc.de>
Acked-by: Arne Schwabe <arne@rfc2549.org>
Message-Id: <1388613479-22377-2-git-send-email-gert@greenie.muc.de>
URL: http://article.gmane.org/gmane.network.openvpn.devel/8154

Gert Doering authored on 2014/01/02 06:57:59
Showing 5 changed files
... ...
@@ -970,8 +970,9 @@ if test "$enable_lz4" = "yes" && test "$enable_comp_stub" = "no"; then
970 970
        ])
971 971
 
972 972
     if test $havelz4lib = 0 ; then
973
-	AC_MSG_RESULT([LZ4 library available from http://code.google.com/p/lz4/])
974
-        AC_MSG_ERROR([Or try ./configure --disable-lz4 OR ./configure --enable-comp-stub])
973
+	AC_MSG_RESULT([LZ4 library or header not found, using version in src/compat/compat-lz4.*])
974
+	AC_DEFINE([NEED_COMPAT_LZ4], [1], [use copy of LZ4 source in compat/])
975
+	LZ4_LIBS=""
975 976
     fi
976 977
     OPTIONAL_LZ4_CFLAGS="${LZ4_CFLAGS}"
977 978
     OPTIONAL_LZ4_LIBS="${LZ4_LIBS}"
... ...
@@ -26,4 +26,5 @@ libcompat_la_SOURCES = \
26 26
 	compat-gettimeofday.c \
27 27
 	compat-daemon.c \
28 28
 	compat-inet_ntop.c \
29
-	compat-inet_pton.c
29
+	compat-inet_pton.c \
30
+	compat-lz4.c compat-lz4.h
30 31
new file mode 100644
... ...
@@ -0,0 +1,830 @@
0
+/*
1
+   LZ4 - Fast LZ compression algorithm
2
+   Copyright (C) 2011-2013, Yann Collet.
3
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
4
+
5
+   Redistribution and use in source and binary forms, with or without
6
+   modification, are permitted provided that the following conditions are
7
+   met:
8
+
9
+       * Redistributions of source code must retain the above copyright
10
+   notice, this list of conditions and the following disclaimer.
11
+       * Redistributions in binary form must reproduce the above
12
+   copyright notice, this list of conditions and the following disclaimer
13
+   in the documentation and/or other materials provided with the
14
+   distribution.
15
+
16
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
+
28
+   You can contact the author at :
29
+   - LZ4 source repository : http://code.google.com/p/lz4/
30
+   - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
31
+*/
32
+
33
+#ifdef HAVE_CONFIG_H
34
+#include "config.h"
35
+#elif defined(_MSC_VER)
36
+#include "config-msvc.h"
37
+#endif
38
+
39
+#ifdef NEED_COMPAT_LZ4
40
+
41
+//**************************************
42
+// Tuning parameters
43
+//**************************************
44
+// MEMORY_USAGE :
45
+// Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
46
+// Increasing memory usage improves compression ratio
47
+// Reduced memory usage can improve speed, due to cache effect
48
+// Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
49
+#define MEMORY_USAGE 14
50
+
51
+// HEAPMODE :
52
+// Select how default compression functions will allocate memory for their hash table,
53
+// in memory stack (0:default, fastest), or in memory heap (1:requires memory allocation (malloc)).
54
+#define HEAPMODE 0
55
+
56
+
57
+//**************************************
58
+// CPU Feature Detection
59
+//**************************************
60
+// 32 or 64 bits ?
61
+#if (defined(__x86_64__) || defined(_M_X64) || defined(_WIN64) \
62
+  || defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__) \
63
+  || defined(__64BIT__) || defined(_LP64) || defined(__LP64__) \
64
+  || defined(__ia64) || defined(__itanium__) || defined(_M_IA64) )   // Detects 64 bits mode
65
+#  define LZ4_ARCH64 1
66
+#else
67
+#  define LZ4_ARCH64 0
68
+#endif
69
+
70
+// Little Endian or Big Endian ?
71
+// Overwrite the #define below if you know your architecture endianess
72
+#if defined (__GLIBC__)
73
+#  include <endian.h>
74
+#  if (__BYTE_ORDER == __BIG_ENDIAN)
75
+#     define LZ4_BIG_ENDIAN 1
76
+#  endif
77
+#elif (defined(__BIG_ENDIAN__) || defined(__BIG_ENDIAN) || defined(_BIG_ENDIAN)) && !(defined(__LITTLE_ENDIAN__) || defined(__LITTLE_ENDIAN) || defined(_LITTLE_ENDIAN))
78
+#  define LZ4_BIG_ENDIAN 1
79
+#elif defined(__sparc) || defined(__sparc__) \
80
+   || defined(__powerpc__) || defined(__ppc__) || defined(__PPC__) \
81
+   || defined(__hpux)  || defined(__hppa) \
82
+   || defined(_MIPSEB) || defined(__s390__)
83
+#  define LZ4_BIG_ENDIAN 1
84
+#else
85
+// Little Endian assumed. PDP Endian and other very rare endian format are unsupported.
86
+#endif
87
+
88
+// Unaligned memory access is automatically enabled for "common" CPU, such as x86.
89
+// For others CPU, such as ARM, the compiler may be more cautious, inserting unnecessary extra code to ensure aligned access property
90
+// If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance
91
+#if defined(__ARM_FEATURE_UNALIGNED)
92
+#  define LZ4_FORCE_UNALIGNED_ACCESS 1
93
+#endif
94
+
95
+// Define this parameter if your target system or compiler does not support hardware bit count
96
+#if defined(_MSC_VER) && defined(_WIN32_WCE)            // Visual Studio for Windows CE does not support Hardware bit count
97
+#  define LZ4_FORCE_SW_BITCOUNT
98
+#endif
99
+
100
+// BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE :
101
+// This option may provide a small boost to performance for some big endian cpu, although probably modest.
102
+// You may set this option to 1 if data will remain within closed environment.
103
+// This option is useless on Little_Endian CPU (such as x86)
104
+//#define BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE 1
105
+
106
+
107
+//**************************************
108
+// Compiler Options
109
+//**************************************
110
+#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)   // C99
111
+/* "restrict" is a known keyword */
112
+#else
113
+#  define restrict // Disable restrict
114
+#endif
115
+
116
+#ifdef _MSC_VER    // Visual Studio
117
+#  define FORCE_INLINE static __forceinline
118
+#  include <intrin.h>                    // For Visual 2005
119
+#  if LZ4_ARCH64   // 64-bits
120
+#    pragma intrinsic(_BitScanForward64) // For Visual 2005
121
+#    pragma intrinsic(_BitScanReverse64) // For Visual 2005
122
+#  else            // 32-bits
123
+#    pragma intrinsic(_BitScanForward)   // For Visual 2005
124
+#    pragma intrinsic(_BitScanReverse)   // For Visual 2005
125
+#  endif
126
+#  pragma warning(disable : 4127)        // disable: C4127: conditional expression is constant
127
+#else
128
+#  ifdef __GNUC__
129
+#    define FORCE_INLINE static inline __attribute__((always_inline))
130
+#  else
131
+#    define FORCE_INLINE static inline
132
+#  endif
133
+#endif
134
+
135
+#ifdef _MSC_VER
136
+#  define lz4_bswap16(x) _byteswap_ushort(x)
137
+#else
138
+#  define lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8)))
139
+#endif
140
+
141
+#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
142
+
143
+#if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__)
144
+#  define expect(expr,value)    (__builtin_expect ((expr),(value)) )
145
+#else
146
+#  define expect(expr,value)    (expr)
147
+#endif
148
+
149
+#define likely(expr)     expect((expr) != 0, 1)
150
+#define unlikely(expr)   expect((expr) != 0, 0)
151
+
152
+
153
+//**************************************
154
+// Memory routines
155
+//**************************************
156
+#include <stdlib.h>   // malloc, calloc, free
157
+#define ALLOCATOR(n,s) calloc(n,s)
158
+#define FREEMEM        free
159
+#include <string.h>   // memset, memcpy
160
+#define MEM_INIT       memset
161
+
162
+
163
+//**************************************
164
+// Includes
165
+//**************************************
166
+#include "compat-lz4.h"
167
+
168
+
169
+//**************************************
170
+// Basic Types
171
+//**************************************
172
+#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   // C99
173
+# include <stdint.h>
174
+  typedef  uint8_t BYTE;
175
+  typedef uint16_t U16;
176
+  typedef uint32_t U32;
177
+  typedef  int32_t S32;
178
+  typedef uint64_t U64;
179
+#else
180
+  typedef unsigned char       BYTE;
181
+  typedef unsigned short      U16;
182
+  typedef unsigned int        U32;
183
+  typedef   signed int        S32;
184
+  typedef unsigned long long  U64;
185
+#endif
186
+
187
+#if defined(__GNUC__)  && !defined(LZ4_FORCE_UNALIGNED_ACCESS)
188
+#  define _PACKED __attribute__ ((packed))
189
+#else
190
+#  define _PACKED
191
+#endif
192
+
193
+#if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__)
194
+#  if defined(__IBMC__) || defined(__SUNPRO_C) || defined(__SUNPRO_CC)
195
+#    pragma pack(1)
196
+#  else
197
+#    pragma pack(push, 1)
198
+#  endif
199
+#endif
200
+
201
+typedef struct { U16 v; }  _PACKED U16_S;
202
+typedef struct { U32 v; }  _PACKED U32_S;
203
+typedef struct { U64 v; }  _PACKED U64_S;
204
+typedef struct {size_t v;} _PACKED size_t_S;
205
+
206
+#if !defined(LZ4_FORCE_UNALIGNED_ACCESS) && !defined(__GNUC__)
207
+#  if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
208
+#    pragma pack(0)
209
+#  else
210
+#    pragma pack(pop)
211
+#  endif
212
+#endif
213
+
214
+#define A16(x)   (((U16_S *)(x))->v)
215
+#define A32(x)   (((U32_S *)(x))->v)
216
+#define A64(x)   (((U64_S *)(x))->v)
217
+#define AARCH(x) (((size_t_S *)(x))->v)
218
+
219
+
220
+//**************************************
221
+// Constants
222
+//**************************************
223
+#define LZ4_HASHLOG   (MEMORY_USAGE-2)
224
+#define HASHTABLESIZE (1 << MEMORY_USAGE)
225
+#define HASHNBCELLS4  (1 << LZ4_HASHLOG)
226
+
227
+#define MINMATCH 4
228
+
229
+#define COPYLENGTH 8
230
+#define LASTLITERALS 5
231
+#define MFLIMIT (COPYLENGTH+MINMATCH)
232
+const int LZ4_minLength = (MFLIMIT+1);
233
+
234
+#define LZ4_64KLIMIT ((1<<16) + (MFLIMIT-1))
235
+#define SKIPSTRENGTH 6     // Increasing this value will make the compression run slower on incompressible data
236
+
237
+#define MAXD_LOG 16
238
+#define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
239
+
240
+#define ML_BITS  4
241
+#define ML_MASK  ((1U<<ML_BITS)-1)
242
+#define RUN_BITS (8-ML_BITS)
243
+#define RUN_MASK ((1U<<RUN_BITS)-1)
244
+
245
+#define KB *(1U<<10)
246
+#define MB *(1U<<20)
247
+#define GB *(1U<<30)
248
+
249
+
250
+//**************************************
251
+// Structures and local types
252
+//**************************************
253
+
254
+typedef struct {
255
+    U32 hashTable[HASHNBCELLS4];
256
+    const BYTE* bufferStart;
257
+    const BYTE* base;
258
+    const BYTE* nextBlock;
259
+} LZ4_Data_Structure;
260
+
261
+typedef enum { notLimited = 0, limited = 1 } limitedOutput_directive;
262
+typedef enum { byPtr, byU32, byU16 } tableType_t;
263
+
264
+typedef enum { noPrefix = 0, withPrefix = 1 } prefix64k_directive;
265
+
266
+typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
267
+typedef enum { full = 0, partial = 1 } earlyEnd_directive;
268
+
269
+
270
+//**************************************
271
+// Architecture-specific macros
272
+//**************************************
273
+#define STEPSIZE                  sizeof(size_t)
274
+#define LZ4_COPYSTEP(d,s)         { AARCH(d) = AARCH(s); d+=STEPSIZE; s+=STEPSIZE; }
275
+#define LZ4_COPY8(d,s)            { LZ4_COPYSTEP(d,s); if (STEPSIZE<8) LZ4_COPYSTEP(d,s); }
276
+#define LZ4_SECURECOPY(d,s,e)     { if ((STEPSIZE==4)||(d<e)) LZ4_WILDCOPY(d,s,e); }
277
+
278
+#if LZ4_ARCH64   // 64-bit
279
+#  define HTYPE                   U32
280
+#  define INITBASE(base)          const BYTE* const base = ip
281
+#else            // 32-bit
282
+#  define HTYPE                   const BYTE*
283
+#  define INITBASE(base)          const int base = 0
284
+#endif
285
+
286
+#if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))
287
+#  define LZ4_READ_LITTLEENDIAN_16(d,s,p) { U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
288
+#  define LZ4_WRITE_LITTLEENDIAN_16(p,i)  { U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p+=2; }
289
+#else      // Little Endian
290
+#  define LZ4_READ_LITTLEENDIAN_16(d,s,p) { d = (s) - A16(p); }
291
+#  define LZ4_WRITE_LITTLEENDIAN_16(p,v)  { A16(p) = v; p+=2; }
292
+#endif
293
+
294
+
295
+//**************************************
296
+// Macros
297
+//**************************************
298
+#define LZ4_WILDCOPY(d,s,e)     { do { LZ4_COPY8(d,s) } while (d<e); }           // at the end, d>=e;
299
+
300
+
301
+//****************************
302
+// Private functions
303
+//****************************
304
+#if LZ4_ARCH64
305
+
306
+FORCE_INLINE int LZ4_NbCommonBytes (register U64 val)
307
+{
308
+# if defined(LZ4_BIG_ENDIAN)
309
+#   if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
310
+    unsigned long r = 0;
311
+    _BitScanReverse64( &r, val );
312
+    return (int)(r>>3);
313
+#   elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
314
+    return (__builtin_clzll(val) >> 3);
315
+#   else
316
+    int r;
317
+    if (!(val>>32)) { r=4; } else { r=0; val>>=32; }
318
+    if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
319
+    r += (!val);
320
+    return r;
321
+#   endif
322
+# else
323
+#   if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
324
+    unsigned long r = 0;
325
+    _BitScanForward64( &r, val );
326
+    return (int)(r>>3);
327
+#   elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
328
+    return (__builtin_ctzll(val) >> 3);
329
+#   else
330
+    static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
331
+    return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
332
+#   endif
333
+# endif
334
+}
335
+
336
+#else
337
+
338
+FORCE_INLINE int LZ4_NbCommonBytes (register U32 val)
339
+{
340
+# if defined(LZ4_BIG_ENDIAN)
341
+#   if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
342
+    unsigned long r = 0;
343
+    _BitScanReverse( &r, val );
344
+    return (int)(r>>3);
345
+#   elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
346
+    return (__builtin_clz(val) >> 3);
347
+#   else
348
+    int r;
349
+    if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
350
+    r += (!val);
351
+    return r;
352
+#   endif
353
+# else
354
+#   if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT)
355
+    unsigned long r;
356
+    _BitScanForward( &r, val );
357
+    return (int)(r>>3);
358
+#   elif defined(__GNUC__) && (GCC_VERSION >= 304) && !defined(LZ4_FORCE_SW_BITCOUNT)
359
+    return (__builtin_ctz(val) >> 3);
360
+#   else
361
+    static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
362
+    return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
363
+#   endif
364
+# endif
365
+}
366
+
367
+#endif
368
+
369
+
370
+//****************************
371
+// Compression functions
372
+//****************************
373
+FORCE_INLINE int LZ4_hashSequence(U32 sequence, tableType_t tableType)
374
+{
375
+    if (tableType == byU16)
376
+        return (((sequence) * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
377
+    else
378
+        return (((sequence) * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
379
+}
380
+
381
+FORCE_INLINE int LZ4_hashPosition(const BYTE* p, tableType_t tableType) { return LZ4_hashSequence(A32(p), tableType); }
382
+
383
+FORCE_INLINE void LZ4_putPositionOnHash(const BYTE* p, U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase)
384
+{
385
+    switch (tableType)
386
+    {
387
+    case byPtr: { const BYTE** hashTable = (const BYTE**) tableBase; hashTable[h] = p; break; }
388
+    case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); break; }
389
+    case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); break; }
390
+    }
391
+}
392
+
393
+FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
394
+{
395
+    U32 h = LZ4_hashPosition(p, tableType);
396
+    LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
397
+}
398
+
399
+FORCE_INLINE const BYTE* LZ4_getPositionOnHash(U32 h, void* tableBase, tableType_t tableType, const BYTE* srcBase)
400
+{
401
+    if (tableType == byPtr) { const BYTE** hashTable = (const BYTE**) tableBase; return hashTable[h]; }
402
+    if (tableType == byU32) { U32* hashTable = (U32*) tableBase; return hashTable[h] + srcBase; }
403
+    { U16* hashTable = (U16*) tableBase; return hashTable[h] + srcBase; }   // default, to ensure a return
404
+}
405
+
406
+FORCE_INLINE const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
407
+{
408
+    U32 h = LZ4_hashPosition(p, tableType);
409
+    return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
410
+}
411
+
412
+
413
+FORCE_INLINE int LZ4_compress_generic(
414
+                 void* ctx,
415
+                 const char* source,
416
+                 char* dest,
417
+                 int inputSize,
418
+                 int maxOutputSize,
419
+
420
+                 limitedOutput_directive limitedOutput,
421
+                 tableType_t tableType,
422
+                 prefix64k_directive prefix)
423
+{
424
+    const BYTE* ip = (const BYTE*) source;
425
+    const BYTE* const base = (prefix==withPrefix) ? ((LZ4_Data_Structure*)ctx)->base : (const BYTE*) source;
426
+    const BYTE* const lowLimit = ((prefix==withPrefix) ? ((LZ4_Data_Structure*)ctx)->bufferStart : (const BYTE*)source);
427
+    const BYTE* anchor = (const BYTE*) source;
428
+    const BYTE* const iend = ip + inputSize;
429
+    const BYTE* const mflimit = iend - MFLIMIT;
430
+    const BYTE* const matchlimit = iend - LASTLITERALS;
431
+
432
+    BYTE* op = (BYTE*) dest;
433
+    BYTE* const oend = op + maxOutputSize;
434
+
435
+    int length;
436
+    const int skipStrength = SKIPSTRENGTH;
437
+    U32 forwardH;
438
+
439
+    // Init conditions
440
+    if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) return 0;                                // Unsupported input size, too large (or negative)
441
+    if ((prefix==withPrefix) && (ip != ((LZ4_Data_Structure*)ctx)->nextBlock)) return 0;   // must continue from end of previous block
442
+    if (prefix==withPrefix) ((LZ4_Data_Structure*)ctx)->nextBlock=iend;                    // do it now, due to potential early exit
443
+    if ((tableType == byU16) && (inputSize>=LZ4_64KLIMIT)) return 0;                       // Size too large (not within 64K limit)
444
+    if (inputSize<LZ4_minLength) goto _last_literals;                                      // Input too small, no compression (all literals)
445
+
446
+    // First Byte
447
+    LZ4_putPosition(ip, ctx, tableType, base);
448
+    ip++; forwardH = LZ4_hashPosition(ip, tableType);
449
+
450
+    // Main Loop
451
+    for ( ; ; )
452
+    {
453
+        int findMatchAttempts = (1U << skipStrength) + 3;
454
+        const BYTE* forwardIp = ip;
455
+        const BYTE* ref;
456
+        BYTE* token;
457
+
458
+        // Find a match
459
+        do {
460
+            U32 h = forwardH;
461
+            int step = findMatchAttempts++ >> skipStrength;
462
+            ip = forwardIp;
463
+            forwardIp = ip + step;
464
+
465
+            if unlikely(forwardIp > mflimit) { goto _last_literals; }
466
+
467
+            forwardH = LZ4_hashPosition(forwardIp, tableType);
468
+            ref = LZ4_getPositionOnHash(h, ctx, tableType, base);
469
+            LZ4_putPositionOnHash(ip, h, ctx, tableType, base);
470
+
471
+        } while ((ref + MAX_DISTANCE < ip) || (A32(ref) != A32(ip)));
472
+
473
+        // Catch up
474
+        while ((ip>anchor) && (ref > lowLimit) && unlikely(ip[-1]==ref[-1])) { ip--; ref--; }
475
+
476
+        // Encode Literal length
477
+        length = (int)(ip - anchor);
478
+        token = op++;
479
+        if ((limitedOutput) && unlikely(op + length + (2 + 1 + LASTLITERALS) + (length/255) > oend)) return 0;   // Check output limit
480
+        if (length>=(int)RUN_MASK)
481
+        {
482
+            int len = length-RUN_MASK;
483
+            *token=(RUN_MASK<<ML_BITS);
484
+            for(; len >= 255 ; len-=255) *op++ = 255;
485
+            *op++ = (BYTE)len;
486
+        }
487
+        else *token = (BYTE)(length<<ML_BITS);
488
+
489
+        // Copy Literals
490
+        { BYTE* end=(op)+(length); LZ4_WILDCOPY(op,anchor,end); op=end; }
491
+
492
+_next_match:
493
+        // Encode Offset
494
+        LZ4_WRITE_LITTLEENDIAN_16(op,(U16)(ip-ref));
495
+
496
+        // Start Counting
497
+        ip+=MINMATCH; ref+=MINMATCH;    // MinMatch already verified
498
+        anchor = ip;
499
+        while likely(ip<matchlimit-(STEPSIZE-1))
500
+        {
501
+            size_t diff = AARCH(ref) ^ AARCH(ip);
502
+            if (!diff) { ip+=STEPSIZE; ref+=STEPSIZE; continue; }
503
+            ip += LZ4_NbCommonBytes(diff);
504
+            goto _endCount;
505
+        }
506
+        if (LZ4_ARCH64) if ((ip<(matchlimit-3)) && (A32(ref) == A32(ip))) { ip+=4; ref+=4; }
507
+        if ((ip<(matchlimit-1)) && (A16(ref) == A16(ip))) { ip+=2; ref+=2; }
508
+        if ((ip<matchlimit) && (*ref == *ip)) ip++;
509
+_endCount:
510
+
511
+        // Encode MatchLength
512
+        length = (int)(ip - anchor);
513
+        if ((limitedOutput) && unlikely(op + (1 + LASTLITERALS) + (length>>8) > oend)) return 0;    // Check output limit
514
+        if (length>=(int)ML_MASK)
515
+        {
516
+            *token += ML_MASK;
517
+            length -= ML_MASK;
518
+            for (; length > 509 ; length-=510) { *op++ = 255; *op++ = 255; }
519
+            if (length >= 255) { length-=255; *op++ = 255; }
520
+            *op++ = (BYTE)length;
521
+        }
522
+        else *token += (BYTE)(length);
523
+
524
+        // Test end of chunk
525
+        if (ip > mflimit) { anchor = ip;  break; }
526
+
527
+        // Fill table
528
+        LZ4_putPosition(ip-2, ctx, tableType, base);
529
+
530
+        // Test next position
531
+        ref = LZ4_getPosition(ip, ctx, tableType, base);
532
+        LZ4_putPosition(ip, ctx, tableType, base);
533
+        if ((ref + MAX_DISTANCE >= ip) && (A32(ref) == A32(ip))) { token = op++; *token=0; goto _next_match; }
534
+
535
+        // Prepare next loop
536
+        anchor = ip++;
537
+        forwardH = LZ4_hashPosition(ip, tableType);
538
+    }
539
+
540
+_last_literals:
541
+    // Encode Last Literals
542
+    {
543
+        int lastRun = (int)(iend - anchor);
544
+        if ((limitedOutput) && (((char*)op - dest) + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > (U32)maxOutputSize)) return 0;   // Check output limit
545
+        if (lastRun>=(int)RUN_MASK) { *op++=(RUN_MASK<<ML_BITS); lastRun-=RUN_MASK; for(; lastRun >= 255 ; lastRun-=255) *op++ = 255; *op++ = (BYTE) lastRun; }
546
+        else *op++ = (BYTE)(lastRun<<ML_BITS);
547
+        memcpy(op, anchor, iend - anchor);
548
+        op += iend-anchor;
549
+    }
550
+
551
+    // End
552
+    return (int) (((char*)op)-dest);
553
+}
554
+
555
+
556
+int LZ4_compress(const char* source, char* dest, int inputSize)
557
+{
558
+#if (HEAPMODE)
559
+    void* ctx = ALLOCATOR(HASHNBCELLS4, 4);   // Aligned on 4-bytes boundaries
560
+#else
561
+    U32 ctx[1U<<(MEMORY_USAGE-2)] = {0};           // Ensure data is aligned on 4-bytes boundaries
562
+#endif
563
+    int result;
564
+
565
+    if (inputSize < (int)LZ4_64KLIMIT)
566
+        result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, 0, notLimited, byU16, noPrefix);
567
+    else
568
+        result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, 0, notLimited, (sizeof(void*)==8) ? byU32 : byPtr, noPrefix);
569
+
570
+#if (HEAPMODE)
571
+    FREEMEM(ctx);
572
+#endif
573
+    return result;
574
+}
575
+
576
+int LZ4_compress_continue (void* LZ4_Data, const char* source, char* dest, int inputSize)
577
+{
578
+    return LZ4_compress_generic(LZ4_Data, source, dest, inputSize, 0, notLimited, byU32, withPrefix);
579
+}
580
+
581
+
582
+int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)
583
+{
584
+#if (HEAPMODE)
585
+    void* ctx = ALLOCATOR(HASHNBCELLS4, 4);   // Aligned on 4-bytes boundaries
586
+#else
587
+    U32 ctx[1U<<(MEMORY_USAGE-2)] = {0};           // Ensure data is aligned on 4-bytes boundaries
588
+#endif
589
+    int result;
590
+
591
+    if (inputSize < (int)LZ4_64KLIMIT)
592
+        result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, maxOutputSize, limited, byU16, noPrefix);
593
+    else
594
+        result = LZ4_compress_generic((void*)ctx, source, dest, inputSize, maxOutputSize, limited, (sizeof(void*)==8) ? byU32 : byPtr, noPrefix);
595
+
596
+#if (HEAPMODE)
597
+    FREEMEM(ctx);
598
+#endif
599
+    return result;
600
+}
601
+
602
+int LZ4_compress_limitedOutput_continue (void* LZ4_Data, const char* source, char* dest, int inputSize, int maxOutputSize)
603
+{
604
+    return LZ4_compress_generic(LZ4_Data, source, dest, inputSize, maxOutputSize, limited, byU32, withPrefix);
605
+}
606
+
607
+
608
+//****************************
609
+// Stream functions
610
+//****************************
611
+
612
+FORCE_INLINE void LZ4_init(LZ4_Data_Structure* lz4ds, const BYTE* base)
613
+{
614
+    MEM_INIT(lz4ds->hashTable, 0, sizeof(lz4ds->hashTable));
615
+    lz4ds->bufferStart = base;
616
+    lz4ds->base = base;
617
+    lz4ds->nextBlock = base;
618
+}
619
+
620
+
621
+void* LZ4_create (const char* inputBuffer)
622
+{
623
+    void* lz4ds = ALLOCATOR(1, sizeof(LZ4_Data_Structure));
624
+    LZ4_init ((LZ4_Data_Structure*)lz4ds, (const BYTE*)inputBuffer);
625
+    return lz4ds;
626
+}
627
+
628
+
629
+int LZ4_free (void* LZ4_Data)
630
+{
631
+    FREEMEM(LZ4_Data);
632
+    return (0);
633
+}
634
+
635
+
636
+char* LZ4_slideInputBuffer (void* LZ4_Data)
637
+{
638
+    LZ4_Data_Structure* lz4ds = (LZ4_Data_Structure*)LZ4_Data;
639
+    size_t delta = lz4ds->nextBlock - (lz4ds->bufferStart + 64 KB);
640
+
641
+    if ( (lz4ds->base - delta > lz4ds->base)                          // underflow control
642
+       || ((size_t)(lz4ds->nextBlock - lz4ds->base) > 0xE0000000) )   // close to 32-bits limit
643
+    {
644
+        size_t deltaLimit = (lz4ds->nextBlock - 64 KB) - lz4ds->base;
645
+        int nH;
646
+
647
+        for (nH=0; nH < HASHNBCELLS4; nH++)
648
+        {
649
+            if ((size_t)(lz4ds->hashTable[nH]) < deltaLimit) lz4ds->hashTable[nH] = 0;
650
+            else lz4ds->hashTable[nH] -= (U32)deltaLimit;
651
+        }
652
+        memcpy((void*)(lz4ds->bufferStart), (const void*)(lz4ds->nextBlock - 64 KB), 64 KB);
653
+        lz4ds->base = lz4ds->bufferStart;
654
+        lz4ds->nextBlock = lz4ds->base + 64 KB;
655
+    }
656
+    else
657
+    {
658
+        memcpy((void*)(lz4ds->bufferStart), (const void*)(lz4ds->nextBlock - 64 KB), 64 KB);
659
+        lz4ds->nextBlock -= delta;
660
+        lz4ds->base -= delta;
661
+    }
662
+
663
+    return (char*)(lz4ds->nextBlock);
664
+}
665
+
666
+
667
+//****************************
668
+// Decompression functions
669
+//****************************
670
+
671
+// This generic decompression function cover all use cases.
672
+// It shall be instanciated several times, using different sets of directives
673
+// Note that it is essential this generic function is really inlined,
674
+// in order to remove useless branches during compilation optimisation.
675
+FORCE_INLINE int LZ4_decompress_generic(
676
+                 const char* source,
677
+                 char* dest,
678
+                 int inputSize,          //
679
+                 int outputSize,         // If endOnInput==endOnInputSize, this value is the max size of Output Buffer.
680
+
681
+                 int endOnInput,         // endOnOutputSize, endOnInputSize
682
+                 int prefix64k,          // noPrefix, withPrefix
683
+                 int partialDecoding,    // full, partial
684
+                 int targetOutputSize    // only used if partialDecoding==partial
685
+                 )
686
+{
687
+    // Local Variables
688
+    const BYTE* restrict ip = (const BYTE*) source;
689
+    const BYTE* ref;
690
+    const BYTE* const iend = ip + inputSize;
691
+
692
+    BYTE* op = (BYTE*) dest;
693
+    BYTE* const oend = op + outputSize;
694
+    BYTE* cpy;
695
+    BYTE* oexit = op + targetOutputSize;
696
+
697
+    const size_t dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};   // static reduces speed for LZ4_decompress_safe() on GCC64
698
+    static const size_t dec64table[] = {0, 0, 0, (size_t)-1, 0, 1, 2, 3};
699
+
700
+
701
+    // Special cases
702
+    if ((partialDecoding) && (oexit> oend-MFLIMIT)) oexit = oend-MFLIMIT;                        // targetOutputSize too high => decode everything
703
+    if ((endOnInput) && unlikely(outputSize==0)) return ((inputSize==1) && (*ip==0)) ? 0 : -1;   // Empty output buffer
704
+    if ((!endOnInput) && unlikely(outputSize==0)) return (*ip==0?1:-1);
705
+
706
+
707
+    // Main Loop
708
+    while (1)
709
+    {
710
+        unsigned token;
711
+        size_t length;
712
+
713
+        // get runlength
714
+        token = *ip++;
715
+        if ((length=(token>>ML_BITS)) == RUN_MASK)
716
+        {
717
+            unsigned s=255;
718
+            while (((endOnInput)?ip<iend:1) && (s==255))
719
+            {
720
+                s = *ip++;
721
+                length += s;
722
+            }
723
+        }
724
+
725
+        // copy literals
726
+        cpy = op+length;
727
+        if (((endOnInput) && ((cpy>(partialDecoding?oexit:oend-MFLIMIT)) || (ip+length>iend-(2+1+LASTLITERALS))) )
728
+            || ((!endOnInput) && (cpy>oend-COPYLENGTH)))
729
+        {
730
+            if (partialDecoding)
731
+            {
732
+                if (cpy > oend) goto _output_error;                           // Error : write attempt beyond end of output buffer
733
+                if ((endOnInput) && (ip+length > iend)) goto _output_error;   // Error : read attempt beyond end of input buffer
734
+            }
735
+            else
736
+            {
737
+                if ((!endOnInput) && (cpy != oend)) goto _output_error;       // Error : block decoding must stop exactly there
738
+                if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) goto _output_error;   // Error : input must be consumed
739
+            }
740
+            memcpy(op, ip, length);
741
+            ip += length;
742
+            op += length;
743
+            break;                                       // Necessarily EOF, due to parsing restrictions
744
+        }
745
+        LZ4_WILDCOPY(op, ip, cpy); ip -= (op-cpy); op = cpy;
746
+
747
+        // get offset
748
+        LZ4_READ_LITTLEENDIAN_16(ref,cpy,ip); ip+=2;
749
+        if ((prefix64k==noPrefix) && unlikely(ref < (BYTE* const)dest)) goto _output_error;   // Error : offset outside destination buffer
750
+
751
+        // get matchlength
752
+        if ((length=(token&ML_MASK)) == ML_MASK)
753
+        {
754
+            while ((!endOnInput) || (ip<iend-(LASTLITERALS+1)))   // Ensure enough bytes remain for LASTLITERALS + token
755
+            {
756
+                unsigned s = *ip++;
757
+                length += s;
758
+                if (s==255) continue;
759
+                break;
760
+            }
761
+        }
762
+
763
+        // copy repeated sequence
764
+        if unlikely((op-ref)<(int)STEPSIZE)
765
+        {
766
+            const size_t dec64 = dec64table[(sizeof(void*)==4) ? 0 : op-ref];
767
+            op[0] = ref[0];
768
+            op[1] = ref[1];
769
+            op[2] = ref[2];
770
+            op[3] = ref[3];
771
+            op += 4, ref += 4; ref -= dec32table[op-ref];
772
+            A32(op) = A32(ref);
773
+            op += STEPSIZE-4; ref -= dec64;
774
+        } else { LZ4_COPYSTEP(op,ref); }
775
+        cpy = op + length - (STEPSIZE-4);
776
+
777
+        if unlikely(cpy>oend-COPYLENGTH-(STEPSIZE-4))
778
+        {
779
+            if (cpy > oend-LASTLITERALS) goto _output_error;    // Error : last 5 bytes must be literals
780
+            LZ4_SECURECOPY(op, ref, (oend-COPYLENGTH));
781
+            while(op<cpy) *op++=*ref++;
782
+            op=cpy;
783
+            continue;
784
+        }
785
+        LZ4_WILDCOPY(op, ref, cpy);
786
+        op=cpy;   // correction
787
+    }
788
+
789
+    // end of decoding
790
+    if (endOnInput)
791
+       return (int) (((char*)op)-dest);     // Nb of output bytes decoded
792
+    else
793
+       return (int) (((char*)ip)-source);   // Nb of input bytes read
794
+
795
+    // Overflow error detected
796
+_output_error:
797
+    return (int) (-(((char*)ip)-source))-1;
798
+}
799
+
800
+
801
+int LZ4_decompress_safe(const char* source, char* dest, int inputSize, int maxOutputSize)
802
+{
803
+    return LZ4_decompress_generic(source, dest, inputSize, maxOutputSize, endOnInputSize, noPrefix, full, 0);
804
+}
805
+
806
+int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int inputSize, int maxOutputSize)
807
+{
808
+    return LZ4_decompress_generic(source, dest, inputSize, maxOutputSize, endOnInputSize, withPrefix, full, 0);
809
+}
810
+
811
+int LZ4_decompress_safe_partial(const char* source, char* dest, int inputSize, int targetOutputSize, int maxOutputSize)
812
+{
813
+    return LZ4_decompress_generic(source, dest, inputSize, maxOutputSize, endOnInputSize, noPrefix, partial, targetOutputSize);
814
+}
815
+
816
+int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int outputSize)
817
+{
818
+    return LZ4_decompress_generic(source, dest, 0, outputSize, endOnOutputSize, withPrefix, full, 0);
819
+}
820
+
821
+int LZ4_decompress_fast(const char* source, char* dest, int outputSize)
822
+{
823
+#ifdef _MSC_VER   // This version is faster with Visual
824
+    return LZ4_decompress_generic(source, dest, 0, outputSize, endOnOutputSize, noPrefix, full, 0);
825
+#else
826
+    return LZ4_decompress_generic(source, dest, 0, outputSize, endOnOutputSize, withPrefix, full, 0);
827
+#endif
828
+}
829
+#endif
0 830
new file mode 100644
... ...
@@ -0,0 +1,205 @@
0
+/*
1
+   LZ4 - Fast LZ compression algorithm
2
+   Header File
3
+   Copyright (C) 2011-2013, Yann Collet.
4
+   BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
5
+
6
+   Redistribution and use in source and binary forms, with or without
7
+   modification, are permitted provided that the following conditions are
8
+   met:
9
+
10
+       * Redistributions of source code must retain the above copyright
11
+   notice, this list of conditions and the following disclaimer.
12
+       * Redistributions in binary form must reproduce the above
13
+   copyright notice, this list of conditions and the following disclaimer
14
+   in the documentation and/or other materials provided with the
15
+   distribution.
16
+
17
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21
+   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
+
29
+   You can contact the author at :
30
+   - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
31
+   - LZ4 source repository : http://code.google.com/p/lz4/
32
+*/
33
+#pragma once
34
+
35
+#if defined (__cplusplus)
36
+extern "C" {
37
+#endif
38
+
39
+
40
+//**************************************
41
+// Compiler Options
42
+//**************************************
43
+#if defined(_MSC_VER) && !defined(__cplusplus)   // Visual Studio
44
+#  define inline __inline           // Visual C is not C99, but supports some kind of inline
45
+#endif
46
+
47
+
48
+//****************************
49
+// Simple Functions
50
+//****************************
51
+
52
+int LZ4_compress        (const char* source, char* dest, int inputSize);
53
+int LZ4_decompress_safe (const char* source, char* dest, int inputSize, int maxOutputSize);
54
+
55
+/*
56
+LZ4_compress() :
57
+    Compresses 'inputSize' bytes from 'source' into 'dest'.
58
+    Destination buffer must be already allocated,
59
+    and must be sized to handle worst cases situations (input data not compressible)
60
+    Worst case size evaluation is provided by function LZ4_compressBound()
61
+    inputSize : Max supported value is LZ4_MAX_INPUT_VALUE
62
+    return : the number of bytes written in buffer dest
63
+             or 0 if the compression fails
64
+
65
+LZ4_decompress_safe() :
66
+    maxOutputSize : is the size of the destination buffer (which must be already allocated)
67
+    return : the number of bytes decoded in the destination buffer (necessarily <= maxOutputSize)
68
+             If the source stream is detected malformed, the function will stop decoding and return a negative result.
69
+             This function is protected against buffer overflow exploits (never writes outside of output buffer, and never reads outside of input buffer). Therefore, it is protected against malicious data packets
70
+*/
71
+
72
+
73
+//****************************
74
+// Advanced Functions
75
+//****************************
76
+#define LZ4_MAX_INPUT_SIZE        0x7E000000   // 2 113 929 216 bytes
77
+#define LZ4_COMPRESSBOUND(isize)  ((unsigned int)(isize) > (unsigned int)LZ4_MAX_INPUT_SIZE ? 0 : (isize) + ((isize)/255) + 16)
78
+static inline int LZ4_compressBound(int isize)  { return LZ4_COMPRESSBOUND(isize); }
79
+
80
+/*
81
+LZ4_compressBound() :
82
+    Provides the maximum size that LZ4 may output in a "worst case" scenario (input data not compressible)
83
+    primarily useful for memory allocation of output buffer.
84
+    inline function is recommended for the general case,
85
+    macro is also provided when result needs to be evaluated at compilation (such as stack memory allocation).
86
+
87
+    isize  : is the input size. Max supported value is LZ4_MAX_INPUT_SIZE
88
+    return : maximum output size in a "worst case" scenario
89
+             or 0, if input size is too large ( > LZ4_MAX_INPUT_SIZE)
90
+*/
91
+
92
+
93
+int LZ4_compress_limitedOutput (const char* source, char* dest, int inputSize, int maxOutputSize);
94
+
95
+/*
96
+LZ4_compress_limitedOutput() :
97
+    Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'.
98
+    If it cannot achieve it, compression will stop, and result of the function will be zero.
99
+    This function never writes outside of provided output buffer.
100
+
101
+    inputSize  : Max supported value is LZ4_MAX_INPUT_VALUE
102
+    maxOutputSize : is the size of the destination buffer (which must be already allocated)
103
+    return : the number of bytes written in buffer 'dest'
104
+             or 0 if the compression fails
105
+*/
106
+
107
+
108
+int LZ4_decompress_fast (const char* source, char* dest, int outputSize);
109
+
110
+/*
111
+LZ4_decompress_fast() :
112
+    outputSize : is the original (uncompressed) size
113
+    return : the number of bytes read from the source buffer (in other words, the compressed size)
114
+             If the source stream is malformed, the function will stop decoding and return a negative result.
115
+    note : This function is a bit faster than LZ4_decompress_safe()
116
+           This function never writes outside of output buffers, but may read beyond input buffer in case of malicious data packet.
117
+           Use this function preferably into a trusted environment (data to decode comes from a trusted source).
118
+           Destination buffer must be already allocated. Its size must be a minimum of 'outputSize' bytes.
119
+*/
120
+
121
+int LZ4_decompress_safe_partial (const char* source, char* dest, int inputSize, int targetOutputSize, int maxOutputSize);
122
+
123
+/*
124
+LZ4_decompress_safe_partial() :
125
+    This function decompress a compressed block of size 'inputSize' at position 'source'
126
+    into output buffer 'dest' of size 'maxOutputSize'.
127
+    The function tries to stop decompressing operation as soon as 'targetOutputSize' has been reached,
128
+    reducing decompression time.
129
+    return : the number of bytes decoded in the destination buffer (necessarily <= maxOutputSize)
130
+       Note : this number can be < 'targetOutputSize' should the compressed block to decode be smaller.
131
+             Always control how many bytes were decoded.
132
+             If the source stream is detected malformed, the function will stop decoding and return a negative result.
133
+             This function never writes outside of output buffer, and never reads outside of input buffer. It is therefore protected against malicious data packets
134
+*/
135
+
136
+
137
+//****************************
138
+// Stream Functions
139
+//****************************
140
+
141
+void* LZ4_create (const char* inputBuffer);
142
+int   LZ4_compress_continue (void* LZ4_Data, const char* source, char* dest, int inputSize);
143
+int   LZ4_compress_limitedOutput_continue (void* LZ4_Data, const char* source, char* dest, int inputSize, int maxOutputSize);
144
+char* LZ4_slideInputBuffer (void* LZ4_Data);
145
+int   LZ4_free (void* LZ4_Data);
146
+
147
+/*
148
+These functions allow the compression of dependent blocks, where each block benefits from prior 64 KB within preceding blocks.
149
+In order to achieve this, it is necessary to start creating the LZ4 Data Structure, thanks to the function :
150
+
151
+void* LZ4_create (const char* inputBuffer);
152
+The result of the function is the (void*) pointer on the LZ4 Data Structure.
153
+This pointer will be needed in all other functions.
154
+If the pointer returned is NULL, then the allocation has failed, and compression must be aborted.
155
+The only parameter 'const char* inputBuffer' must, obviously, point at the beginning of input buffer.
156
+The input buffer must be already allocated, and size at least 192KB.
157
+'inputBuffer' will also be the 'const char* source' of the first block.
158
+
159
+All blocks are expected to lay next to each other within the input buffer, starting from 'inputBuffer'.
160
+To compress each block, use either LZ4_compress_continue() or LZ4_compress_limitedOutput_continue().
161
+Their behavior are identical to LZ4_compress() or LZ4_compress_limitedOutput(),
162
+but require the LZ4 Data Structure as their first argument, and check that each block starts right after the previous one.
163
+If next block does not begin immediately after the previous one, the compression will fail (return 0).
164
+
165
+When it's no longer possible to lay the next block after the previous one (not enough space left into input buffer), a call to :
166
+char* LZ4_slideInputBuffer(void* LZ4_Data);
167
+must be performed. It will typically copy the latest 64KB of input at the beginning of input buffer.
168
+Note that, for this function to work properly, minimum size of an input buffer must be 192KB.
169
+==> The memory position where the next input data block must start is provided as the result of the function.
170
+
171
+Compression can then resume, using LZ4_compress_continue() or LZ4_compress_limitedOutput_continue(), as usual.
172
+
173
+When compression is completed, a call to LZ4_free() will release the memory used by the LZ4 Data Structure.
174
+*/
175
+
176
+
177
+int LZ4_decompress_safe_withPrefix64k (const char* source, char* dest, int inputSize, int maxOutputSize);
178
+int LZ4_decompress_fast_withPrefix64k (const char* source, char* dest, int outputSize);
179
+
180
+/*
181
+*_withPrefix64k() :
182
+    These decoding functions work the same as their "normal name" versions,
183
+    but can use up to 64KB of data in front of 'char* dest'.
184
+    These functions are necessary to decode inter-dependant blocks.
185
+*/
186
+
187
+
188
+//****************************
189
+// Obsolete Functions
190
+//****************************
191
+
192
+static inline int LZ4_uncompress (const char* source, char* dest, int outputSize) { return LZ4_decompress_fast(source, dest, outputSize); }
193
+static inline int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize) { return LZ4_decompress_safe(source, dest, isize, maxOutputSize); }
194
+
195
+/*
196
+These functions are deprecated and should no longer be used.
197
+They are provided here for compatibility with existing user programs.
198
+*/
199
+
200
+
201
+
202
+#if defined (__cplusplus)
203
+}
204
+#endif
... ...
@@ -33,7 +33,11 @@
33 33
 
34 34
 #if defined(ENABLE_LZ4)
35 35
 
36
+#if defined(NEED_COMPAT_LZ4)
37
+#include "compat-lz4.h"
38
+#else
36 39
 #include "lz4.h"
40
+#endif
37 41
 
38 42
 #include "comp.h"
39 43
 #include "error.h"