xref: /linux/lib/lzo/lzo1x_compress.c (revision 4f9786035f9e519db41375818e1d0b5f20da2f10)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  LZO1X Compressor from LZO
4  *
5  *  Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com>
6  *
7  *  The full LZO package can be found at:
8  *  http://www.oberhumer.com/opensource/lzo/
9  *
10  *  Changed for Linux kernel use by:
11  *  Nitin Gupta <nitingupta910@gmail.com>
12  *  Richard Purdie <rpurdie@openedhand.com>
13  */
14 
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/unaligned.h>
18 #include <linux/lzo.h>
19 #include "lzodefs.h"
20 
21 #undef LZO_UNSAFE
22 
23 #ifndef LZO_SAFE
24 #define LZO_UNSAFE 1
25 #define LZO_SAFE(name) name
26 #define HAVE_OP(x) 1
27 #endif
28 
29 #define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun
30 
31 static noinline int
32 LZO_SAFE(lzo1x_1_do_compress)(const unsigned char *in, size_t in_len,
33 			      unsigned char **out, unsigned char *op_end,
34 			      size_t *tp, void *wrkmem,
35 			      signed char *state_offset,
36 			      const unsigned char bitstream_version)
37 {
38 	const unsigned char *ip;
39 	unsigned char *op;
40 	const unsigned char * const in_end = in + in_len;
41 	const unsigned char * const ip_end = in + in_len - 20;
42 	const unsigned char *ii;
43 	lzo_dict_t * const dict = (lzo_dict_t *) wrkmem;
44 	size_t ti = *tp;
45 
46 	op = *out;
47 	ip = in;
48 	ii = ip;
49 	ip += ti < 4 ? 4 - ti : 0;
50 
51 	for (;;) {
52 		const unsigned char *m_pos = NULL;
53 		size_t t, m_len, m_off;
54 		u32 dv;
55 		u32 run_length = 0;
56 literal:
57 		ip += 1 + ((ip - ii) >> 5);
58 next:
59 		if (unlikely(ip >= ip_end))
60 			break;
61 		dv = get_unaligned_le32(ip);
62 
63 		if (dv == 0 && bitstream_version) {
64 			const unsigned char *ir = ip + 4;
65 			const unsigned char *limit = min(ip_end, ip + MAX_ZERO_RUN_LENGTH + 1);
66 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && \
67 	defined(LZO_FAST_64BIT_MEMORY_ACCESS)
68 			u64 dv64;
69 
70 			for (; (ir + 32) <= limit; ir += 32) {
71 				dv64 = get_unaligned((u64 *)ir);
72 				dv64 |= get_unaligned((u64 *)ir + 1);
73 				dv64 |= get_unaligned((u64 *)ir + 2);
74 				dv64 |= get_unaligned((u64 *)ir + 3);
75 				if (dv64)
76 					break;
77 			}
78 			for (; (ir + 8) <= limit; ir += 8) {
79 				dv64 = get_unaligned((u64 *)ir);
80 				if (dv64) {
81 #  if defined(__LITTLE_ENDIAN)
82 					ir += __builtin_ctzll(dv64) >> 3;
83 #  elif defined(__BIG_ENDIAN)
84 					ir += __builtin_clzll(dv64) >> 3;
85 #  else
86 #    error "missing endian definition"
87 #  endif
88 					break;
89 				}
90 			}
91 #else
92 			while ((ir < (const unsigned char *)
93 					ALIGN((uintptr_t)ir, 4)) &&
94 					(ir < limit) && (*ir == 0))
95 				ir++;
96 			if (IS_ALIGNED((uintptr_t)ir, 4)) {
97 				for (; (ir + 4) <= limit; ir += 4) {
98 					dv = *((u32 *)ir);
99 					if (dv) {
100 #  if defined(__LITTLE_ENDIAN)
101 						ir += __builtin_ctz(dv) >> 3;
102 #  elif defined(__BIG_ENDIAN)
103 						ir += __builtin_clz(dv) >> 3;
104 #  else
105 #    error "missing endian definition"
106 #  endif
107 						break;
108 					}
109 				}
110 			}
111 #endif
112 			while (likely(ir < limit) && unlikely(*ir == 0))
113 				ir++;
114 			run_length = ir - ip;
115 			if (run_length > MAX_ZERO_RUN_LENGTH)
116 				run_length = MAX_ZERO_RUN_LENGTH;
117 		} else {
118 			t = ((dv * 0x1824429d) >> (32 - D_BITS)) & D_MASK;
119 			m_pos = in + dict[t];
120 			dict[t] = (lzo_dict_t) (ip - in);
121 			if (unlikely(dv != get_unaligned_le32(m_pos)))
122 				goto literal;
123 		}
124 
125 		ii -= ti;
126 		ti = 0;
127 		t = ip - ii;
128 		if (t != 0) {
129 			if (t <= 3) {
130 				op[*state_offset] |= t;
131 				NEED_OP(4);
132 				COPY4(op, ii);
133 				op += t;
134 			} else if (t <= 16) {
135 				NEED_OP(17);
136 				*op++ = (t - 3);
137 				COPY8(op, ii);
138 				COPY8(op + 8, ii + 8);
139 				op += t;
140 			} else {
141 				if (t <= 18) {
142 					NEED_OP(1);
143 					*op++ = (t - 3);
144 				} else {
145 					size_t tt = t - 18;
146 					NEED_OP(1);
147 					*op++ = 0;
148 					while (unlikely(tt > 255)) {
149 						tt -= 255;
150 						NEED_OP(1);
151 						*op++ = 0;
152 					}
153 					NEED_OP(1);
154 					*op++ = tt;
155 				}
156 				NEED_OP(t);
157 				do {
158 					COPY8(op, ii);
159 					COPY8(op + 8, ii + 8);
160 					op += 16;
161 					ii += 16;
162 					t -= 16;
163 				} while (t >= 16);
164 				if (t > 0) do {
165 					*op++ = *ii++;
166 				} while (--t > 0);
167 			}
168 		}
169 
170 		if (unlikely(run_length)) {
171 			ip += run_length;
172 			run_length -= MIN_ZERO_RUN_LENGTH;
173 			NEED_OP(4);
174 			put_unaligned_le32((run_length << 21) | 0xfffc18
175 					   | (run_length & 0x7), op);
176 			op += 4;
177 			run_length = 0;
178 			*state_offset = -3;
179 			goto finished_writing_instruction;
180 		}
181 
182 		m_len = 4;
183 		{
184 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(LZO_USE_CTZ64)
185 		u64 v;
186 		v = get_unaligned((const u64 *) (ip + m_len)) ^
187 		    get_unaligned((const u64 *) (m_pos + m_len));
188 		if (unlikely(v == 0)) {
189 			do {
190 				m_len += 8;
191 				v = get_unaligned((const u64 *) (ip + m_len)) ^
192 				    get_unaligned((const u64 *) (m_pos + m_len));
193 				if (unlikely(ip + m_len >= ip_end))
194 					goto m_len_done;
195 			} while (v == 0);
196 		}
197 #  if defined(__LITTLE_ENDIAN)
198 		m_len += (unsigned) __builtin_ctzll(v) / 8;
199 #  elif defined(__BIG_ENDIAN)
200 		m_len += (unsigned) __builtin_clzll(v) / 8;
201 #  else
202 #    error "missing endian definition"
203 #  endif
204 #elif defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(LZO_USE_CTZ32)
205 		u32 v;
206 		v = get_unaligned((const u32 *) (ip + m_len)) ^
207 		    get_unaligned((const u32 *) (m_pos + m_len));
208 		if (unlikely(v == 0)) {
209 			do {
210 				m_len += 4;
211 				v = get_unaligned((const u32 *) (ip + m_len)) ^
212 				    get_unaligned((const u32 *) (m_pos + m_len));
213 				if (v != 0)
214 					break;
215 				m_len += 4;
216 				v = get_unaligned((const u32 *) (ip + m_len)) ^
217 				    get_unaligned((const u32 *) (m_pos + m_len));
218 				if (unlikely(ip + m_len >= ip_end))
219 					goto m_len_done;
220 			} while (v == 0);
221 		}
222 #  if defined(__LITTLE_ENDIAN)
223 		m_len += (unsigned) __builtin_ctz(v) / 8;
224 #  elif defined(__BIG_ENDIAN)
225 		m_len += (unsigned) __builtin_clz(v) / 8;
226 #  else
227 #    error "missing endian definition"
228 #  endif
229 #else
230 		if (unlikely(ip[m_len] == m_pos[m_len])) {
231 			do {
232 				m_len += 1;
233 				if (ip[m_len] != m_pos[m_len])
234 					break;
235 				m_len += 1;
236 				if (ip[m_len] != m_pos[m_len])
237 					break;
238 				m_len += 1;
239 				if (ip[m_len] != m_pos[m_len])
240 					break;
241 				m_len += 1;
242 				if (ip[m_len] != m_pos[m_len])
243 					break;
244 				m_len += 1;
245 				if (ip[m_len] != m_pos[m_len])
246 					break;
247 				m_len += 1;
248 				if (ip[m_len] != m_pos[m_len])
249 					break;
250 				m_len += 1;
251 				if (ip[m_len] != m_pos[m_len])
252 					break;
253 				m_len += 1;
254 				if (unlikely(ip + m_len >= ip_end))
255 					goto m_len_done;
256 			} while (ip[m_len] == m_pos[m_len]);
257 		}
258 #endif
259 		}
260 m_len_done:
261 
262 		m_off = ip - m_pos;
263 		ip += m_len;
264 		if (m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET) {
265 			m_off -= 1;
266 			NEED_OP(2);
267 			*op++ = (((m_len - 1) << 5) | ((m_off & 7) << 2));
268 			*op++ = (m_off >> 3);
269 		} else if (m_off <= M3_MAX_OFFSET) {
270 			m_off -= 1;
271 			NEED_OP(1);
272 			if (m_len <= M3_MAX_LEN)
273 				*op++ = (M3_MARKER | (m_len - 2));
274 			else {
275 				m_len -= M3_MAX_LEN;
276 				*op++ = M3_MARKER | 0;
277 				while (unlikely(m_len > 255)) {
278 					m_len -= 255;
279 					NEED_OP(1);
280 					*op++ = 0;
281 				}
282 				NEED_OP(1);
283 				*op++ = (m_len);
284 			}
285 			NEED_OP(2);
286 			*op++ = (m_off << 2);
287 			*op++ = (m_off >> 6);
288 		} else {
289 			m_off -= 0x4000;
290 			NEED_OP(1);
291 			if (m_len <= M4_MAX_LEN)
292 				*op++ = (M4_MARKER | ((m_off >> 11) & 8)
293 						| (m_len - 2));
294 			else {
295 				if (unlikely(((m_off & 0x403f) == 0x403f)
296 						&& (m_len >= 261)
297 						&& (m_len <= 264))
298 						&& likely(bitstream_version)) {
299 					// Under lzo-rle, block copies
300 					// for 261 <= length <= 264 and
301 					// (distance & 0x80f3) == 0x80f3
302 					// can result in ambiguous
303 					// output. Adjust length
304 					// to 260 to prevent ambiguity.
305 					ip -= m_len - 260;
306 					m_len = 260;
307 				}
308 				m_len -= M4_MAX_LEN;
309 				*op++ = (M4_MARKER | ((m_off >> 11) & 8));
310 				while (unlikely(m_len > 255)) {
311 					NEED_OP(1);
312 					m_len -= 255;
313 					*op++ = 0;
314 				}
315 				NEED_OP(1);
316 				*op++ = (m_len);
317 			}
318 			NEED_OP(2);
319 			*op++ = (m_off << 2);
320 			*op++ = (m_off >> 6);
321 		}
322 		*state_offset = -2;
323 finished_writing_instruction:
324 		ii = ip;
325 		goto next;
326 	}
327 	*out = op;
328 	*tp = in_end - (ii - ti);
329 	return LZO_E_OK;
330 
331 output_overrun:
332 	return LZO_E_OUTPUT_OVERRUN;
333 }
334 
335 static int LZO_SAFE(lzogeneric1x_1_compress)(
336 	const unsigned char *in, size_t in_len,
337 	unsigned char *out, size_t *out_len,
338 	void *wrkmem, const unsigned char bitstream_version)
339 {
340 	unsigned char * const op_end = out + *out_len;
341 	const unsigned char *ip = in;
342 	unsigned char *op = out;
343 	unsigned char *data_start;
344 	size_t l = in_len;
345 	size_t t = 0;
346 	signed char state_offset = -2;
347 	unsigned int m4_max_offset;
348 
349 	// LZO v0 will never write 17 as first byte (except for zero-length
350 	// input), so this is used to version the bitstream
351 	if (bitstream_version > 0) {
352 		*op++ = 17;
353 		*op++ = bitstream_version;
354 		m4_max_offset = M4_MAX_OFFSET_V1;
355 	} else {
356 		m4_max_offset = M4_MAX_OFFSET_V0;
357 	}
358 
359 	data_start = op;
360 
361 	while (l > 20) {
362 		size_t ll = min_t(size_t, l, m4_max_offset + 1);
363 		uintptr_t ll_end = (uintptr_t) ip + ll;
364 		int err;
365 
366 		if ((ll_end + ((t + ll) >> 5)) <= ll_end)
367 			break;
368 		BUILD_BUG_ON(D_SIZE * sizeof(lzo_dict_t) > LZO1X_1_MEM_COMPRESS);
369 		memset(wrkmem, 0, D_SIZE * sizeof(lzo_dict_t));
370 		err = LZO_SAFE(lzo1x_1_do_compress)(
371 			ip, ll, &op, op_end, &t, wrkmem,
372 			&state_offset, bitstream_version);
373 		if (err != LZO_E_OK)
374 			return err;
375 		ip += ll;
376 		l  -= ll;
377 	}
378 	t += l;
379 
380 	if (t > 0) {
381 		const unsigned char *ii = in + in_len - t;
382 
383 		if (op == data_start && t <= 238) {
384 			NEED_OP(1);
385 			*op++ = (17 + t);
386 		} else if (t <= 3) {
387 			op[state_offset] |= t;
388 		} else if (t <= 18) {
389 			NEED_OP(1);
390 			*op++ = (t - 3);
391 		} else {
392 			size_t tt = t - 18;
393 			NEED_OP(1);
394 			*op++ = 0;
395 			while (tt > 255) {
396 				tt -= 255;
397 				NEED_OP(1);
398 				*op++ = 0;
399 			}
400 			NEED_OP(1);
401 			*op++ = tt;
402 		}
403 		NEED_OP(t);
404 		if (t >= 16) do {
405 			COPY8(op, ii);
406 			COPY8(op + 8, ii + 8);
407 			op += 16;
408 			ii += 16;
409 			t -= 16;
410 		} while (t >= 16);
411 		if (t > 0) do {
412 			*op++ = *ii++;
413 		} while (--t > 0);
414 	}
415 
416 	NEED_OP(3);
417 	*op++ = M4_MARKER | 1;
418 	*op++ = 0;
419 	*op++ = 0;
420 
421 	*out_len = op - out;
422 	return LZO_E_OK;
423 
424 output_overrun:
425 	return LZO_E_OUTPUT_OVERRUN;
426 }
427 
428 int LZO_SAFE(lzo1x_1_compress)(const unsigned char *in, size_t in_len,
429 			       unsigned char *out, size_t *out_len,
430 			       void *wrkmem)
431 {
432 	return LZO_SAFE(lzogeneric1x_1_compress)(
433 		in, in_len, out, out_len, wrkmem, 0);
434 }
435 
436 int LZO_SAFE(lzorle1x_1_compress)(const unsigned char *in, size_t in_len,
437 				  unsigned char *out, size_t *out_len,
438 				  void *wrkmem)
439 {
440 	return LZO_SAFE(lzogeneric1x_1_compress)(
441 		in, in_len, out, out_len, wrkmem, LZO_VERSION);
442 }
443 
444 EXPORT_SYMBOL_GPL(LZO_SAFE(lzo1x_1_compress));
445 EXPORT_SYMBOL_GPL(LZO_SAFE(lzorle1x_1_compress));
446 
447 #ifndef LZO_UNSAFE
448 MODULE_LICENSE("GPL");
449 MODULE_DESCRIPTION("LZO1X-1 Compressor");
450 #endif
451