xref: /titanic_52/usr/src/boot/sys/cddl/boot/zfs/lz4.c (revision a71a9b4041b747842ca055046d2e2b9c3564a4a8)
1 /*
2  * LZ4 - Fast LZ compression algorithm
3  * Header File
4  * Copyright (C) 2011-2013, Yann Collet.
5  * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions are
9  * met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  * notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above
14  * copyright notice, this list of conditions and the following disclaimer
15  * in the documentation and/or other materials provided with the
16  * distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  *
30  * You can contact the author at :
31  * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
32  * - LZ4 source repository : http://code.google.com/p/lz4/
33  *
34  * $FreeBSD$
35  */
36 int lz4_decompress(void *, void *, size_t, size_t, int);
37 
38 static int LZ4_uncompress_unknownOutputSize(const char *source, char *dest,
39 					    int isize, int maxOutputSize);
40 
41 /* ARGSUSED */
42 int
43 lz4_decompress(void *s_start, void *d_start, size_t s_len, size_t d_len, int dummy __unused)
44 {
45 	const uint8_t *src = s_start;
46 	uint32_t bufsiz = htonl(*(uint32_t *)src);
47 
48 	/* invalid compressed buffer size encoded at start */
49 	if (bufsiz + 4 > s_len)
50 		return (1);
51 
52 	/*
53 	 * Returns 0 on success (decompression function returned non-negative)
54 	 * and non-zero on failure (decompression function returned negative).
55 	 */
56 	return (LZ4_uncompress_unknownOutputSize((const char *)s_start + 4, d_start, bufsiz,
57 	    d_len) < 0);
58 }
59 
60 /*
61  * CPU Feature Detection
62  */
63 
64 /* 32 or 64 bits ? */
65 #if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) || \
66 	defined(__amd64) || defined(__ppc64__) || defined(_WIN64) || \
67 	defined(__LP64__) || defined(_LP64))
68 #define	LZ4_ARCH64	1
69 #else
70 #define	LZ4_ARCH64	0
71 #endif
72 
73 /*
74  * Little Endian or Big Endian?
75  * Note: overwrite the below #define if you know your architecture endianess.
76  */
77 #if BYTE_ORDER == BIG_ENDIAN
78 #define	LZ4_BIG_ENDIAN	1
79 #else
80 	/*
81 	 * Little Endian assumed. PDP Endian and other very rare endian format
82 	 * are unsupported.
83 	 */
84 #endif
85 
86 /*
87  * Unaligned memory access is automatically enabled for "common" CPU,
88  * such as x86. For others CPU, the compiler will be more cautious, and
89  * insert extra code to ensure aligned access is respected. If you know
90  * your target CPU supports unaligned memory access, you may want to
91  * force this option manually to improve performance
92  */
93 #if defined(__ARM_FEATURE_UNALIGNED)
94 #define	LZ4_FORCE_UNALIGNED_ACCESS 1
95 #endif
96 
97 /*
98  * Compiler Options
99  */
100 #if __STDC_VERSION__ >= 199901L	/* C99 */
101 /* "restrict" is a known keyword */
102 #else
103 /* Disable restrict */
104 #define	restrict
105 #endif
106 
107 #define	GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
108 
109 #define	lz4_bswap16(x) ((unsigned short int) ((((x) >> 8) & 0xffu) \
110 	| (((x) & 0xffu) << 8)))
111 
112 #if (GCC_VERSION >= 302) || (__INTEL_COMPILER >= 800) || defined(__clang__)
113 #define	expect(expr, value)    (__builtin_expect((expr), (value)))
114 #else
115 #define	expect(expr, value)    (expr)
116 #endif
117 
118 #define	likely(expr)	expect((expr) != 0, 1)
119 #define	unlikely(expr)	expect((expr) != 0, 0)
120 
121 /* Basic types */
122 #define	BYTE	uint8_t
123 #define	U16	uint16_t
124 #define	U32	uint32_t
125 #define	S32	int32_t
126 #define	U64	uint64_t
127 
128 #ifndef LZ4_FORCE_UNALIGNED_ACCESS
129 #pragma pack(1)
130 #endif
131 
132 typedef struct _U16_S {
133 	U16 v;
134 } U16_S;
135 typedef struct _U32_S {
136 	U32 v;
137 } U32_S;
138 typedef struct _U64_S {
139 	U64 v;
140 } U64_S;
141 
142 #ifndef LZ4_FORCE_UNALIGNED_ACCESS
143 #pragma pack()
144 #endif
145 
146 #define	A64(x)	(((U64_S *)(x))->v)
147 #define	A32(x)	(((U32_S *)(x))->v)
148 #define	A16(x)	(((U16_S *)(x))->v)
149 
150 /*
151  * Constants
152  */
153 #define	MINMATCH 4
154 
155 #define	COPYLENGTH 8
156 #define	LASTLITERALS 5
157 
158 #define	ML_BITS 4
159 #define	ML_MASK ((1U<<ML_BITS)-1)
160 #define	RUN_BITS (8-ML_BITS)
161 #define	RUN_MASK ((1U<<RUN_BITS)-1)
162 
163 /*
164  * Architecture-specific macros
165  */
166 #if LZ4_ARCH64
167 #define	STEPSIZE 8
168 #define	UARCH U64
169 #define	AARCH A64
170 #define	LZ4_COPYSTEP(s, d)	A64(d) = A64(s); d += 8; s += 8;
171 #define	LZ4_COPYPACKET(s, d)	LZ4_COPYSTEP(s, d)
172 #define	LZ4_SECURECOPY(s, d, e)	if (d < e) LZ4_WILDCOPY(s, d, e)
173 #define	HTYPE U32
174 #define	INITBASE(base)		const BYTE* const base = ip
175 #else
176 #define	STEPSIZE 4
177 #define	UARCH U32
178 #define	AARCH A32
179 #define	LZ4_COPYSTEP(s, d)	A32(d) = A32(s); d += 4; s += 4;
180 #define	LZ4_COPYPACKET(s, d)	LZ4_COPYSTEP(s, d); LZ4_COPYSTEP(s, d);
181 #define	LZ4_SECURECOPY		LZ4_WILDCOPY
182 #define	HTYPE const BYTE*
183 #define	INITBASE(base)		const int base = 0
184 #endif
185 
186 #if (defined(LZ4_BIG_ENDIAN) && !defined(BIG_ENDIAN_NATIVE_BUT_INCOMPATIBLE))
187 #define	LZ4_READ_LITTLEENDIAN_16(d, s, p) \
188 	{ U16 v = A16(p); v = lz4_bswap16(v); d = (s) - v; }
189 #define	LZ4_WRITE_LITTLEENDIAN_16(p, i) \
190 	{ U16 v = (U16)(i); v = lz4_bswap16(v); A16(p) = v; p += 2; }
191 #else
192 #define	LZ4_READ_LITTLEENDIAN_16(d, s, p) { d = (s) - A16(p); }
193 #define	LZ4_WRITE_LITTLEENDIAN_16(p, v)  { A16(p) = v; p += 2; }
194 #endif
195 
196 /* Macros */
197 #define	LZ4_WILDCOPY(s, d, e) do { LZ4_COPYPACKET(s, d) } while (d < e);
198 
199 /* Decompression functions */
200 
201 static int
202 LZ4_uncompress_unknownOutputSize(const char *source,
203     char *dest, int isize, int maxOutputSize)
204 {
205 	/* Local Variables */
206 	const BYTE *restrict ip = (const BYTE *) source;
207 	const BYTE *const iend = ip + isize;
208 	const BYTE *restrict ref;
209 
210 	BYTE *restrict op = (BYTE *) dest;
211 	BYTE *const oend = op + maxOutputSize;
212 	BYTE *cpy;
213 
214 	size_t dec[] = { 0, 3, 2, 3, 0, 0, 0, 0 };
215 
216 	/* Main Loop */
217 	while (ip < iend) {
218 		BYTE token;
219 		int length;
220 
221 		/* get runlength */
222 		token = *ip++;
223 		if ((length = (token >> ML_BITS)) == RUN_MASK) {
224 			int s = 255;
225 			while ((ip < iend) && (s == 255)) {
226 				s = *ip++;
227 				length += s;
228 			}
229 		}
230 		/* copy literals */
231 		cpy = op + length;
232 		if ((cpy > oend - COPYLENGTH) ||
233 		    (ip + length > iend - COPYLENGTH)) {
234 			if (cpy > oend)
235 				/*
236 				 * Error: request to write beyond destination
237 				 * buffer.
238 				 */
239 				goto _output_error;
240 			if (ip + length > iend)
241 				/*
242 				 * Error : request to read beyond source
243 				 * buffer.
244 				 */
245 				goto _output_error;
246 			memcpy(op, ip, length);
247 			op += length;
248 			ip += length;
249 			if (ip < iend)
250 				/* Error : LZ4 format violation */
251 				goto _output_error;
252 			/* Necessarily EOF, due to parsing restrictions. */
253 			break;
254 		}
255 		LZ4_WILDCOPY(ip, op, cpy);
256 		ip -= (op - cpy);
257 		op = cpy;
258 
259 		/* get offset */
260 		LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip);
261 		ip += 2;
262 		if (ref < (BYTE * const) dest)
263 			/*
264 			 * Error: offset creates reference outside of
265 			 * destination buffer.
266 			 */
267 			goto _output_error;
268 
269 		/* get matchlength */
270 		if ((length = (token & ML_MASK)) == ML_MASK) {
271 			while (ip < iend) {
272 				int s = *ip++;
273 				length += s;
274 				if (s == 255)
275 					continue;
276 				break;
277 			}
278 		}
279 		/* copy repeated sequence */
280 		if unlikely(op - ref < STEPSIZE) {
281 #if LZ4_ARCH64
282 			size_t dec2table[] = { 0, 0, 0, -1, 0, 1, 2, 3 };
283 			size_t dec2 = dec2table[op - ref];
284 #else
285 			const int dec2 = 0;
286 #endif
287 			*op++ = *ref++;
288 			*op++ = *ref++;
289 			*op++ = *ref++;
290 			*op++ = *ref++;
291 			ref -= dec[op - ref];
292 			A32(op) = A32(ref);
293 			op += STEPSIZE - 4;
294 			ref -= dec2;
295 		} else {
296 			LZ4_COPYSTEP(ref, op);
297 		}
298 		cpy = op + length - (STEPSIZE - 4);
299 		if (cpy > oend - COPYLENGTH) {
300 			if (cpy > oend)
301 				/*
302 				 * Error: request to write outside of
303 				 * destination buffer.
304 				 */
305 				goto _output_error;
306 			LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
307 			while (op < cpy)
308 				*op++ = *ref++;
309 			op = cpy;
310 			if (op == oend)
311 				/*
312 				 * Check EOF (should never happen, since last
313 				 * 5 bytes are supposed to be literals).
314 				 */
315 				break;
316 			continue;
317 		}
318 		LZ4_SECURECOPY(ref, op, cpy);
319 		op = cpy;	/* correction */
320 	}
321 
322 	/* end of decoding */
323 	return (int)(((char *)op) - dest);
324 
325 	/* write overflow error detected */
326 	_output_error:
327 	return (int)(-(((char *)ip) - source));
328 }
329