xref: /linux/fs/erofs/decompressor.c (revision 69050f8d6d075dc01af7a5f2f550a8067510366f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2019 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  * Copyright (C) 2024 Alibaba Cloud
6  */
7 #include "compress.h"
8 #include <linux/lz4.h>
9 
10 #define LZ4_MAX_DISTANCE_PAGES	(DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
11 
12 static int z_erofs_load_lz4_config(struct super_block *sb,
13 			    struct erofs_super_block *dsb, void *data, int size)
14 {
15 	struct erofs_sb_info *sbi = EROFS_SB(sb);
16 	struct z_erofs_lz4_cfgs *lz4 = data;
17 	u16 distance;
18 
19 	if (lz4) {
20 		if (size < sizeof(struct z_erofs_lz4_cfgs)) {
21 			erofs_err(sb, "invalid lz4 cfgs, size=%u", size);
22 			return -EINVAL;
23 		}
24 		distance = le16_to_cpu(lz4->max_distance);
25 
26 		sbi->lz4.max_pclusterblks = le16_to_cpu(lz4->max_pclusterblks);
27 		if (!sbi->lz4.max_pclusterblks) {
28 			sbi->lz4.max_pclusterblks = 1;	/* reserved case */
29 		} else if (sbi->lz4.max_pclusterblks >
30 			   erofs_blknr(sb, Z_EROFS_PCLUSTER_MAX_SIZE)) {
31 			erofs_err(sb, "too large lz4 pclusterblks %u",
32 				  sbi->lz4.max_pclusterblks);
33 			return -EINVAL;
34 		}
35 	} else {
36 		distance = le16_to_cpu(dsb->u1.lz4_max_distance);
37 		if (!distance && !erofs_sb_has_lz4_0padding(sbi))
38 			return 0;
39 		sbi->lz4.max_pclusterblks = 1;
40 		sbi->available_compr_algs = 1 << Z_EROFS_COMPRESSION_LZ4;
41 	}
42 
43 	sbi->lz4.max_distance_pages = distance ?
44 					DIV_ROUND_UP(distance, PAGE_SIZE) + 1 :
45 					LZ4_MAX_DISTANCE_PAGES;
46 	return z_erofs_gbuf_growsize(sbi->lz4.max_pclusterblks);
47 }
48 
49 /*
50  * Fill all gaps with bounce pages if it's a sparse page list. Also check if
51  * all physical pages are consecutive, which can be seen for moderate CR.
52  */
53 static int z_erofs_lz4_prepare_dstpages(struct z_erofs_decompress_req *rq,
54 					struct page **pagepool)
55 {
56 	struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
57 	unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
58 					   BITS_PER_LONG)] = { 0 };
59 	unsigned int lz4_max_distance_pages =
60 				EROFS_SB(rq->sb)->lz4.max_distance_pages;
61 	void *kaddr = NULL;
62 	unsigned int i, j, top;
63 
64 	top = 0;
65 	for (i = j = 0; i < rq->outpages; ++i, ++j) {
66 		struct page *const page = rq->out[i];
67 		struct page *victim;
68 
69 		if (j >= lz4_max_distance_pages)
70 			j = 0;
71 
72 		/* 'valid' bounced can only be tested after a complete round */
73 		if (!rq->fillgaps && test_bit(j, bounced)) {
74 			DBG_BUGON(i < lz4_max_distance_pages);
75 			DBG_BUGON(top >= lz4_max_distance_pages);
76 			availables[top++] = rq->out[i - lz4_max_distance_pages];
77 		}
78 
79 		if (page) {
80 			__clear_bit(j, bounced);
81 			if (!PageHighMem(page)) {
82 				if (!i) {
83 					kaddr = page_address(page);
84 					continue;
85 				}
86 				if (kaddr &&
87 				    kaddr + PAGE_SIZE == page_address(page)) {
88 					kaddr += PAGE_SIZE;
89 					continue;
90 				}
91 			}
92 			kaddr = NULL;
93 			continue;
94 		}
95 		kaddr = NULL;
96 		__set_bit(j, bounced);
97 
98 		if (top) {
99 			victim = availables[--top];
100 		} else {
101 			victim = __erofs_allocpage(pagepool, rq->gfp, true);
102 			if (!victim)
103 				return -ENOMEM;
104 			set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
105 		}
106 		rq->out[i] = victim;
107 	}
108 	return kaddr ? 1 : 0;
109 }
110 
111 static void *z_erofs_lz4_handle_overlap(const struct z_erofs_decompress_req *rq,
112 			void *inpage, void *out, unsigned int *inputmargin,
113 			int *maptype, bool may_inplace)
114 {
115 	unsigned int oend, omargin, cnt, i;
116 	struct page **in;
117 	void *src;
118 
119 	/*
120 	 * If in-place I/O isn't used, for example, the bounce compressed cache
121 	 * can hold data for incomplete read requests. Just map the compressed
122 	 * buffer as well and decompress directly.
123 	 */
124 	if (!rq->inplace_io) {
125 		if (rq->inpages <= 1) {
126 			*maptype = 0;
127 			return inpage;
128 		}
129 		kunmap_local(inpage);
130 		src = erofs_vm_map_ram(rq->in, rq->inpages);
131 		if (!src)
132 			return ERR_PTR(-ENOMEM);
133 		*maptype = 1;
134 		return src;
135 	}
136 	/*
137 	 * Then, deal with in-place I/Os. The reasons why in-place I/O is useful
138 	 * are: (1) It minimizes memory footprint during the I/O submission,
139 	 * which is useful for slow storage (including network devices and
140 	 * low-end HDDs/eMMCs) but with a lot inflight I/Os; (2) If in-place
141 	 * decompression can also be applied, it will reuse the unique buffer so
142 	 * that no extra CPU D-cache is polluted with temporary compressed data
143 	 * for extreme performance.
144 	 */
145 	oend = rq->pageofs_out + rq->outputsize;
146 	omargin = PAGE_ALIGN(oend) - oend;
147 	if (!rq->partial_decoding && may_inplace &&
148 	    omargin >= LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize)) {
149 		for (i = 0; i < rq->inpages; ++i)
150 			if (rq->out[rq->outpages - rq->inpages + i] !=
151 			    rq->in[i])
152 				break;
153 		if (i >= rq->inpages) {
154 			kunmap_local(inpage);
155 			*maptype = 3;
156 			return out + ((rq->outpages - rq->inpages) << PAGE_SHIFT);
157 		}
158 	}
159 	/*
160 	 * If in-place decompression can't be applied, copy compressed data that
161 	 * may potentially overlap during decompression to a per-CPU buffer.
162 	 */
163 	src = z_erofs_get_gbuf(rq->inpages);
164 	if (!src) {
165 		DBG_BUGON(1);
166 		kunmap_local(inpage);
167 		return ERR_PTR(-EFAULT);
168 	}
169 
170 	for (i = 0, in = rq->in; i < rq->inputsize; i += cnt, ++in) {
171 		cnt = min_t(u32, rq->inputsize - i, PAGE_SIZE - *inputmargin);
172 		if (!inpage)
173 			inpage = kmap_local_page(*in);
174 		memcpy(src + i, inpage + *inputmargin, cnt);
175 		kunmap_local(inpage);
176 		inpage = NULL;
177 		*inputmargin = 0;
178 	}
179 	*maptype = 2;
180 	return src;
181 }
182 
183 /*
184  * Get the exact on-disk size of the compressed data:
185  *  - For LZ4, it should apply if the zero_padding feature is on (5.3+);
186  *  - For others, zero_padding is enabled all the time.
187  */
188 const char *z_erofs_fixup_insize(struct z_erofs_decompress_req *rq,
189 				 const char *padbuf, unsigned int padbufsize)
190 {
191 	const char *padend;
192 
193 	padend = memchr_inv(padbuf, 0, padbufsize);
194 	if (!padend)
195 		return "compressed data start not found";
196 	rq->inputsize -= padend - padbuf;
197 	rq->pageofs_in += padend - padbuf;
198 	return NULL;
199 }
200 
201 static const char *__z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
202 					    u8 *dst)
203 {
204 	bool may_inplace = false;
205 	unsigned int inputmargin;
206 	u8 *out, *headpage, *src;
207 	const char *reason;
208 	int ret, maptype;
209 
210 	headpage = kmap_local_page(*rq->in);
211 	reason = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in,
212 			min_t(unsigned int, rq->inputsize,
213 			      rq->sb->s_blocksize - rq->pageofs_in));
214 	if (reason) {
215 		kunmap_local(headpage);
216 		return reason;
217 	}
218 	may_inplace = !((rq->pageofs_in + rq->inputsize) &
219 			(rq->sb->s_blocksize - 1));
220 
221 	inputmargin = rq->pageofs_in;
222 	src = z_erofs_lz4_handle_overlap(rq, headpage, dst, &inputmargin,
223 					 &maptype, may_inplace);
224 	if (IS_ERR(src))
225 		return ERR_CAST(src);
226 
227 	out = dst + rq->pageofs_out;
228 	if (rq->partial_decoding)
229 		ret = LZ4_decompress_safe_partial(src + inputmargin, out,
230 				rq->inputsize, rq->outputsize, rq->outputsize);
231 	else
232 		ret = LZ4_decompress_safe(src + inputmargin, out,
233 					  rq->inputsize, rq->outputsize);
234 	if (ret == rq->outputsize)
235 		reason = NULL;
236 	else if (ret < 0)
237 		reason = "corrupted compressed data";
238 	else
239 		reason = "unexpected end of stream";
240 
241 	if (!maptype) {
242 		kunmap_local(headpage);
243 	} else if (maptype == 1) {
244 		vm_unmap_ram(src, rq->inpages);
245 	} else if (maptype == 2) {
246 		z_erofs_put_gbuf(src);
247 	} else if (maptype != 3) {
248 		DBG_BUGON(1);
249 		return ERR_PTR(-EFAULT);
250 	}
251 	return reason;
252 }
253 
254 static const char *z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
255 					  struct page **pagepool)
256 {
257 	unsigned int dst_maptype;
258 	const char *reason;
259 	void *dst;
260 	int ret;
261 
262 	/* one optimized fast path only for non bigpcluster cases yet */
263 	if (rq->inpages == 1 && rq->outpages == 1 && !rq->inplace_io) {
264 		DBG_BUGON(!*rq->out);
265 		dst = kmap_local_page(*rq->out);
266 		dst_maptype = 0;
267 	} else {
268 		/* general decoding path which can be used for all cases */
269 		ret = z_erofs_lz4_prepare_dstpages(rq, pagepool);
270 		if (ret < 0)
271 			return ERR_PTR(ret);
272 		if (ret > 0) {
273 			dst = page_address(*rq->out);
274 			dst_maptype = 1;
275 		} else {
276 			dst = erofs_vm_map_ram(rq->out, rq->outpages);
277 			if (!dst)
278 				return ERR_PTR(-ENOMEM);
279 			dst_maptype = 2;
280 		}
281 	}
282 	reason = __z_erofs_lz4_decompress(rq, dst);
283 	if (!dst_maptype)
284 		kunmap_local(dst);
285 	else if (dst_maptype == 2)
286 		vm_unmap_ram(dst, rq->outpages);
287 	return reason;
288 }
289 
290 static const char *z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
291 					   struct page **pagepool)
292 {
293 	const unsigned int nrpages_in = rq->inpages, nrpages_out = rq->outpages;
294 	const unsigned int bs = rq->sb->s_blocksize;
295 	unsigned int cur = 0, ni = 0, no, pi, po, insz, cnt;
296 	u8 *kin;
297 
298 	if (rq->outputsize > rq->inputsize)
299 		return ERR_PTR(-EOPNOTSUPP);
300 	if (rq->alg == Z_EROFS_COMPRESSION_INTERLACED) {
301 		cur = bs - (rq->pageofs_out & (bs - 1));
302 		pi = (rq->pageofs_in + rq->inputsize - cur) & ~PAGE_MASK;
303 		cur = min(cur, rq->outputsize);
304 		if (cur && rq->out[0]) {
305 			kin = kmap_local_page(rq->in[nrpages_in - 1]);
306 			if (rq->out[0] == rq->in[nrpages_in - 1])
307 				memmove(kin + rq->pageofs_out, kin + pi, cur);
308 			else
309 				memcpy_to_page(rq->out[0], rq->pageofs_out,
310 					       kin + pi, cur);
311 			kunmap_local(kin);
312 		}
313 		rq->outputsize -= cur;
314 	}
315 
316 	for (; rq->outputsize; rq->pageofs_in = 0, cur += insz, ni++) {
317 		insz = min(PAGE_SIZE - rq->pageofs_in, rq->outputsize);
318 		rq->outputsize -= insz;
319 		if (!rq->in[ni])
320 			continue;
321 		kin = kmap_local_page(rq->in[ni]);
322 		pi = 0;
323 		do {
324 			no = (rq->pageofs_out + cur + pi) >> PAGE_SHIFT;
325 			po = (rq->pageofs_out + cur + pi) & ~PAGE_MASK;
326 			DBG_BUGON(no >= nrpages_out);
327 			cnt = min(insz - pi, PAGE_SIZE - po);
328 			if (rq->out[no] == rq->in[ni])
329 				memmove(kin + po,
330 					kin + rq->pageofs_in + pi, cnt);
331 			else if (rq->out[no])
332 				memcpy_to_page(rq->out[no], po,
333 					       kin + rq->pageofs_in + pi, cnt);
334 			pi += cnt;
335 		} while (pi < insz);
336 		kunmap_local(kin);
337 	}
338 	DBG_BUGON(ni > nrpages_in);
339 	return NULL;
340 }
341 
342 const char *z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx *dctx,
343 				void **dst, void **src, struct page **pgpl)
344 {
345 	struct z_erofs_decompress_req *rq = dctx->rq;
346 	struct page **pgo, *tmppage;
347 	unsigned int j;
348 
349 	if (!dctx->avail_out) {
350 		if (++dctx->no >= rq->outpages || !rq->outputsize)
351 			return "insufficient space for decompressed data";
352 
353 		if (dctx->kout)
354 			kunmap_local(dctx->kout);
355 		dctx->avail_out = min(rq->outputsize, PAGE_SIZE - rq->pageofs_out);
356 		rq->outputsize -= dctx->avail_out;
357 		pgo = &rq->out[dctx->no];
358 		if (!*pgo && rq->fillgaps) {		/* deduped */
359 			*pgo = erofs_allocpage(pgpl, rq->gfp);
360 			if (!*pgo) {
361 				dctx->kout = NULL;
362 				return ERR_PTR(-ENOMEM);
363 			}
364 			set_page_private(*pgo, Z_EROFS_SHORTLIVED_PAGE);
365 		}
366 		if (*pgo) {
367 			dctx->kout = kmap_local_page(*pgo);
368 			*dst = dctx->kout + rq->pageofs_out;
369 		} else {
370 			*dst = dctx->kout = NULL;
371 		}
372 		rq->pageofs_out = 0;
373 	}
374 
375 	if (dctx->inbuf_pos == dctx->inbuf_sz && rq->inputsize) {
376 		if (++dctx->ni >= rq->inpages)
377 			return "invalid compressed data";
378 		if (dctx->kout) /* unlike kmap(), take care of the orders */
379 			kunmap_local(dctx->kout);
380 		kunmap_local(dctx->kin);
381 
382 		dctx->inbuf_sz = min_t(u32, rq->inputsize, PAGE_SIZE);
383 		rq->inputsize -= dctx->inbuf_sz;
384 		dctx->kin = kmap_local_page(rq->in[dctx->ni]);
385 		*src = dctx->kin;
386 		dctx->bounced = false;
387 		if (dctx->kout) {
388 			j = (u8 *)*dst - dctx->kout;
389 			dctx->kout = kmap_local_page(rq->out[dctx->no]);
390 			*dst = dctx->kout + j;
391 		}
392 		dctx->inbuf_pos = 0;
393 	}
394 
395 	/*
396 	 * Handle overlapping: Use the given bounce buffer if the input data is
397 	 * under processing; Or utilize short-lived pages from the on-stack page
398 	 * pool, where pages are shared among the same request.  Note that only
399 	 * a few inplace I/O pages need to be doubled.
400 	 */
401 	if (!dctx->bounced && rq->out[dctx->no] == rq->in[dctx->ni]) {
402 		memcpy(dctx->bounce, *src, dctx->inbuf_sz);
403 		*src = dctx->bounce;
404 		dctx->bounced = true;
405 	}
406 
407 	for (j = dctx->ni + 1; j < rq->inpages; ++j) {
408 		if (rq->out[dctx->no] != rq->in[j])
409 			continue;
410 		tmppage = erofs_allocpage(pgpl, rq->gfp);
411 		if (!tmppage)
412 			return ERR_PTR(-ENOMEM);
413 		set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
414 		copy_highpage(tmppage, rq->in[j]);
415 		rq->in[j] = tmppage;
416 	}
417 	return NULL;
418 }
419 
420 const struct z_erofs_decompressor *z_erofs_decomp[] = {
421 	[Z_EROFS_COMPRESSION_SHIFTED] = &(const struct z_erofs_decompressor) {
422 		.decompress = z_erofs_transform_plain,
423 		.name = "shifted"
424 	},
425 	[Z_EROFS_COMPRESSION_INTERLACED] = &(const struct z_erofs_decompressor) {
426 		.decompress = z_erofs_transform_plain,
427 		.name = "interlaced"
428 	},
429 	[Z_EROFS_COMPRESSION_LZ4] = &(const struct z_erofs_decompressor) {
430 		.config = z_erofs_load_lz4_config,
431 		.decompress = z_erofs_lz4_decompress,
432 		.init = z_erofs_gbuf_init,
433 		.exit = z_erofs_gbuf_exit,
434 		.name = "lz4"
435 	},
436 #ifdef CONFIG_EROFS_FS_ZIP_LZMA
437 	[Z_EROFS_COMPRESSION_LZMA] = &z_erofs_lzma_decomp,
438 #endif
439 #ifdef CONFIG_EROFS_FS_ZIP_DEFLATE
440 	[Z_EROFS_COMPRESSION_DEFLATE] = &z_erofs_deflate_decomp,
441 #endif
442 #ifdef CONFIG_EROFS_FS_ZIP_ZSTD
443 	[Z_EROFS_COMPRESSION_ZSTD] = &z_erofs_zstd_decomp,
444 #endif
445 };
446 
447 int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb)
448 {
449 	struct erofs_sb_info *sbi = EROFS_SB(sb);
450 	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
451 	unsigned long algs, alg;
452 	erofs_off_t offset;
453 	int size, ret = 0;
454 
455 	if (!erofs_sb_has_compr_cfgs(sbi))
456 		return z_erofs_load_lz4_config(sb, dsb, NULL, 0);
457 
458 	algs = le16_to_cpu(dsb->u1.available_compr_algs);
459 	sbi->available_compr_algs = algs;
460 	if (algs & ~Z_EROFS_ALL_COMPR_ALGS) {
461 		erofs_err(sb, "unidentified algorithms %lx, please upgrade kernel",
462 			  algs & ~Z_EROFS_ALL_COMPR_ALGS);
463 		return -EOPNOTSUPP;
464 	}
465 
466 	(void)erofs_init_metabuf(&buf, sb, false);
467 	offset = EROFS_SUPER_OFFSET + sbi->sb_size;
468 	for_each_set_bit(alg, &algs, Z_EROFS_COMPRESSION_MAX) {
469 		const struct z_erofs_decompressor *dec = z_erofs_decomp[alg];
470 		void *data;
471 
472 		data = erofs_read_metadata(sb, &buf, &offset, &size);
473 		if (IS_ERR(data)) {
474 			ret = PTR_ERR(data);
475 			break;
476 		}
477 
478 		if (dec && dec->config) {
479 			ret = dec->config(sb, dsb, data, size);
480 		} else {
481 			erofs_err(sb, "algorithm %ld isn't enabled on this kernel",
482 				  alg);
483 			ret = -EOPNOTSUPP;
484 		}
485 		kfree(data);
486 		if (ret)
487 			break;
488 	}
489 	erofs_put_metabuf(&buf);
490 	return ret;
491 }
492 
493 int __init z_erofs_init_decompressor(void)
494 {
495 	int i, err;
496 
497 	for (i = 0; i < Z_EROFS_COMPRESSION_MAX; ++i) {
498 		err = z_erofs_decomp[i] ? z_erofs_decomp[i]->init() : 0;
499 		if (err) {
500 			while (i--)
501 				if (z_erofs_decomp[i])
502 					z_erofs_decomp[i]->exit();
503 			return err;
504 		}
505 	}
506 	return 0;
507 }
508 
509 void z_erofs_exit_decompressor(void)
510 {
511 	int i;
512 
513 	for (i = 0; i < Z_EROFS_COMPRESSION_MAX; ++i)
514 		if (z_erofs_decomp[i])
515 			z_erofs_decomp[i]->exit();
516 }
517