xref: /linux/fs/erofs/decompressor.c (revision 3b428e1cfcc4c5f063bb8b367beb71ee06470d4b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2019 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  * Copyright (C) 2024 Alibaba Cloud
6  */
7 #include "compress.h"
8 #include <linux/lz4.h>
9 
10 #define LZ4_MAX_DISTANCE_PAGES	(DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
11 
z_erofs_load_lz4_config(struct super_block * sb,struct erofs_super_block * dsb,void * data,int size)12 static int z_erofs_load_lz4_config(struct super_block *sb,
13 			    struct erofs_super_block *dsb, void *data, int size)
14 {
15 	struct erofs_sb_info *sbi = EROFS_SB(sb);
16 	struct z_erofs_lz4_cfgs *lz4 = data;
17 	u16 distance;
18 
19 	if (lz4) {
20 		if (size < sizeof(struct z_erofs_lz4_cfgs)) {
21 			erofs_err(sb, "invalid lz4 cfgs, size=%u", size);
22 			return -EINVAL;
23 		}
24 		distance = le16_to_cpu(lz4->max_distance);
25 
26 		sbi->lz4.max_pclusterblks = le16_to_cpu(lz4->max_pclusterblks);
27 		if (!sbi->lz4.max_pclusterblks) {
28 			sbi->lz4.max_pclusterblks = 1;	/* reserved case */
29 		} else if (sbi->lz4.max_pclusterblks >
30 			   erofs_blknr(sb, Z_EROFS_PCLUSTER_MAX_SIZE)) {
31 			erofs_err(sb, "too large lz4 pclusterblks %u",
32 				  sbi->lz4.max_pclusterblks);
33 			return -EINVAL;
34 		}
35 	} else {
36 		distance = le16_to_cpu(dsb->u1.lz4_max_distance);
37 		sbi->lz4.max_pclusterblks = 1;
38 	}
39 
40 	sbi->lz4.max_distance_pages = distance ?
41 					DIV_ROUND_UP(distance, PAGE_SIZE) + 1 :
42 					LZ4_MAX_DISTANCE_PAGES;
43 	return z_erofs_gbuf_growsize(sbi->lz4.max_pclusterblks);
44 }
45 
46 /*
47  * Fill all gaps with bounce pages if it's a sparse page list. Also check if
48  * all physical pages are consecutive, which can be seen for moderate CR.
49  */
z_erofs_lz4_prepare_dstpages(struct z_erofs_decompress_req * rq,struct page ** pagepool)50 static int z_erofs_lz4_prepare_dstpages(struct z_erofs_decompress_req *rq,
51 					struct page **pagepool)
52 {
53 	struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
54 	unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
55 					   BITS_PER_LONG)] = { 0 };
56 	unsigned int lz4_max_distance_pages =
57 				EROFS_SB(rq->sb)->lz4.max_distance_pages;
58 	void *kaddr = NULL;
59 	unsigned int i, j, top;
60 
61 	top = 0;
62 	for (i = j = 0; i < rq->outpages; ++i, ++j) {
63 		struct page *const page = rq->out[i];
64 		struct page *victim;
65 
66 		if (j >= lz4_max_distance_pages)
67 			j = 0;
68 
69 		/* 'valid' bounced can only be tested after a complete round */
70 		if (!rq->fillgaps && test_bit(j, bounced)) {
71 			DBG_BUGON(i < lz4_max_distance_pages);
72 			DBG_BUGON(top >= lz4_max_distance_pages);
73 			availables[top++] = rq->out[i - lz4_max_distance_pages];
74 		}
75 
76 		if (page) {
77 			__clear_bit(j, bounced);
78 			if (!PageHighMem(page)) {
79 				if (!i) {
80 					kaddr = page_address(page);
81 					continue;
82 				}
83 				if (kaddr &&
84 				    kaddr + PAGE_SIZE == page_address(page)) {
85 					kaddr += PAGE_SIZE;
86 					continue;
87 				}
88 			}
89 			kaddr = NULL;
90 			continue;
91 		}
92 		kaddr = NULL;
93 		__set_bit(j, bounced);
94 
95 		if (top) {
96 			victim = availables[--top];
97 		} else {
98 			victim = __erofs_allocpage(pagepool, rq->gfp, true);
99 			if (!victim)
100 				return -ENOMEM;
101 			set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
102 		}
103 		rq->out[i] = victim;
104 	}
105 	return kaddr ? 1 : 0;
106 }
107 
z_erofs_lz4_handle_overlap(struct z_erofs_decompress_req * rq,void * inpage,void * out,unsigned int * inputmargin,int * maptype,bool may_inplace)108 static void *z_erofs_lz4_handle_overlap(struct z_erofs_decompress_req *rq,
109 			void *inpage, void *out, unsigned int *inputmargin,
110 			int *maptype, bool may_inplace)
111 {
112 	unsigned int oend, omargin, total, i;
113 	struct page **in;
114 	void *src, *tmp;
115 
116 	if (rq->inplace_io) {
117 		oend = rq->pageofs_out + rq->outputsize;
118 		omargin = PAGE_ALIGN(oend) - oend;
119 		if (rq->partial_decoding || !may_inplace ||
120 		    omargin < LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize))
121 			goto docopy;
122 
123 		for (i = 0; i < rq->inpages; ++i)
124 			if (rq->out[rq->outpages - rq->inpages + i] !=
125 			    rq->in[i])
126 				goto docopy;
127 		kunmap_local(inpage);
128 		*maptype = 3;
129 		return out + ((rq->outpages - rq->inpages) << PAGE_SHIFT);
130 	}
131 
132 	if (rq->inpages <= 1) {
133 		*maptype = 0;
134 		return inpage;
135 	}
136 	kunmap_local(inpage);
137 	src = erofs_vm_map_ram(rq->in, rq->inpages);
138 	if (!src)
139 		return ERR_PTR(-ENOMEM);
140 	*maptype = 1;
141 	return src;
142 
143 docopy:
144 	/* Or copy compressed data which can be overlapped to per-CPU buffer */
145 	in = rq->in;
146 	src = z_erofs_get_gbuf(rq->inpages);
147 	if (!src) {
148 		DBG_BUGON(1);
149 		kunmap_local(inpage);
150 		return ERR_PTR(-EFAULT);
151 	}
152 
153 	tmp = src;
154 	total = rq->inputsize;
155 	while (total) {
156 		unsigned int page_copycnt =
157 			min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
158 
159 		if (!inpage)
160 			inpage = kmap_local_page(*in);
161 		memcpy(tmp, inpage + *inputmargin, page_copycnt);
162 		kunmap_local(inpage);
163 		inpage = NULL;
164 		tmp += page_copycnt;
165 		total -= page_copycnt;
166 		++in;
167 		*inputmargin = 0;
168 	}
169 	*maptype = 2;
170 	return src;
171 }
172 
173 /*
174  * Get the exact inputsize with zero_padding feature.
175  *  - For LZ4, it should work if zero_padding feature is on (5.3+);
176  *  - For MicroLZMA, it'd be enabled all the time.
177  */
z_erofs_fixup_insize(struct z_erofs_decompress_req * rq,const char * padbuf,unsigned int padbufsize)178 int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
179 			 unsigned int padbufsize)
180 {
181 	const char *padend;
182 
183 	padend = memchr_inv(padbuf, 0, padbufsize);
184 	if (!padend)
185 		return -EFSCORRUPTED;
186 	rq->inputsize -= padend - padbuf;
187 	rq->pageofs_in += padend - padbuf;
188 	return 0;
189 }
190 
z_erofs_lz4_decompress_mem(struct z_erofs_decompress_req * rq,u8 * dst)191 static int z_erofs_lz4_decompress_mem(struct z_erofs_decompress_req *rq, u8 *dst)
192 {
193 	bool support_0padding = false, may_inplace = false;
194 	unsigned int inputmargin;
195 	u8 *out, *headpage, *src;
196 	int ret, maptype;
197 
198 	DBG_BUGON(*rq->in == NULL);
199 	headpage = kmap_local_page(*rq->in);
200 
201 	/* LZ4 decompression inplace is only safe if zero_padding is enabled */
202 	if (erofs_sb_has_zero_padding(EROFS_SB(rq->sb))) {
203 		support_0padding = true;
204 		ret = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in,
205 				min_t(unsigned int, rq->inputsize,
206 				      rq->sb->s_blocksize - rq->pageofs_in));
207 		if (ret) {
208 			kunmap_local(headpage);
209 			return ret;
210 		}
211 		may_inplace = !((rq->pageofs_in + rq->inputsize) &
212 				(rq->sb->s_blocksize - 1));
213 	}
214 
215 	inputmargin = rq->pageofs_in;
216 	src = z_erofs_lz4_handle_overlap(rq, headpage, dst, &inputmargin,
217 					 &maptype, may_inplace);
218 	if (IS_ERR(src))
219 		return PTR_ERR(src);
220 
221 	out = dst + rq->pageofs_out;
222 	/* legacy format could compress extra data in a pcluster. */
223 	if (rq->partial_decoding || !support_0padding)
224 		ret = LZ4_decompress_safe_partial(src + inputmargin, out,
225 				rq->inputsize, rq->outputsize, rq->outputsize);
226 	else
227 		ret = LZ4_decompress_safe(src + inputmargin, out,
228 					  rq->inputsize, rq->outputsize);
229 
230 	if (ret != rq->outputsize) {
231 		erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
232 			  ret, rq->inputsize, inputmargin, rq->outputsize);
233 		if (ret >= 0)
234 			memset(out + ret, 0, rq->outputsize - ret);
235 		ret = -EFSCORRUPTED;
236 	} else {
237 		ret = 0;
238 	}
239 
240 	if (maptype == 0) {
241 		kunmap_local(headpage);
242 	} else if (maptype == 1) {
243 		vm_unmap_ram(src, rq->inpages);
244 	} else if (maptype == 2) {
245 		z_erofs_put_gbuf(src);
246 	} else if (maptype != 3) {
247 		DBG_BUGON(1);
248 		return -EFAULT;
249 	}
250 	return ret;
251 }
252 
z_erofs_lz4_decompress(struct z_erofs_decompress_req * rq,struct page ** pagepool)253 static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
254 				  struct page **pagepool)
255 {
256 	unsigned int dst_maptype;
257 	void *dst;
258 	int ret;
259 
260 	/* one optimized fast path only for non bigpcluster cases yet */
261 	if (rq->inpages == 1 && rq->outpages == 1 && !rq->inplace_io) {
262 		DBG_BUGON(!*rq->out);
263 		dst = kmap_local_page(*rq->out);
264 		dst_maptype = 0;
265 	} else {
266 		/* general decoding path which can be used for all cases */
267 		ret = z_erofs_lz4_prepare_dstpages(rq, pagepool);
268 		if (ret < 0)
269 			return ret;
270 		if (ret > 0) {
271 			dst = page_address(*rq->out);
272 			dst_maptype = 1;
273 		} else {
274 			dst = erofs_vm_map_ram(rq->out, rq->outpages);
275 			if (!dst)
276 				return -ENOMEM;
277 			dst_maptype = 2;
278 		}
279 	}
280 	ret = z_erofs_lz4_decompress_mem(rq, dst);
281 	if (!dst_maptype)
282 		kunmap_local(dst);
283 	else if (dst_maptype == 2)
284 		vm_unmap_ram(dst, rq->outpages);
285 	return ret;
286 }
287 
z_erofs_transform_plain(struct z_erofs_decompress_req * rq,struct page ** pagepool)288 static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
289 				   struct page **pagepool)
290 {
291 	const unsigned int nrpages_in = rq->inpages, nrpages_out = rq->outpages;
292 	const unsigned int bs = rq->sb->s_blocksize;
293 	unsigned int cur = 0, ni = 0, no, pi, po, insz, cnt;
294 	u8 *kin;
295 
296 	if (rq->outputsize > rq->inputsize)
297 		return -EOPNOTSUPP;
298 	if (rq->alg == Z_EROFS_COMPRESSION_INTERLACED) {
299 		cur = bs - (rq->pageofs_out & (bs - 1));
300 		pi = (rq->pageofs_in + rq->inputsize - cur) & ~PAGE_MASK;
301 		cur = min(cur, rq->outputsize);
302 		if (cur && rq->out[0]) {
303 			kin = kmap_local_page(rq->in[nrpages_in - 1]);
304 			if (rq->out[0] == rq->in[nrpages_in - 1])
305 				memmove(kin + rq->pageofs_out, kin + pi, cur);
306 			else
307 				memcpy_to_page(rq->out[0], rq->pageofs_out,
308 					       kin + pi, cur);
309 			kunmap_local(kin);
310 		}
311 		rq->outputsize -= cur;
312 	}
313 
314 	for (; rq->outputsize; rq->pageofs_in = 0, cur += insz, ni++) {
315 		insz = min(PAGE_SIZE - rq->pageofs_in, rq->outputsize);
316 		rq->outputsize -= insz;
317 		if (!rq->in[ni])
318 			continue;
319 		kin = kmap_local_page(rq->in[ni]);
320 		pi = 0;
321 		do {
322 			no = (rq->pageofs_out + cur + pi) >> PAGE_SHIFT;
323 			po = (rq->pageofs_out + cur + pi) & ~PAGE_MASK;
324 			DBG_BUGON(no >= nrpages_out);
325 			cnt = min(insz - pi, PAGE_SIZE - po);
326 			if (rq->out[no] == rq->in[ni])
327 				memmove(kin + po,
328 					kin + rq->pageofs_in + pi, cnt);
329 			else if (rq->out[no])
330 				memcpy_to_page(rq->out[no], po,
331 					       kin + rq->pageofs_in + pi, cnt);
332 			pi += cnt;
333 		} while (pi < insz);
334 		kunmap_local(kin);
335 	}
336 	DBG_BUGON(ni > nrpages_in);
337 	return 0;
338 }
339 
z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx * dctx,void ** dst,void ** src,struct page ** pgpl)340 int z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx *dctx, void **dst,
341 			       void **src, struct page **pgpl)
342 {
343 	struct z_erofs_decompress_req *rq = dctx->rq;
344 	struct super_block *sb = rq->sb;
345 	struct page **pgo, *tmppage;
346 	unsigned int j;
347 
348 	if (!dctx->avail_out) {
349 		if (++dctx->no >= rq->outpages || !rq->outputsize) {
350 			erofs_err(sb, "insufficient space for decompressed data");
351 			return -EFSCORRUPTED;
352 		}
353 
354 		if (dctx->kout)
355 			kunmap_local(dctx->kout);
356 		dctx->avail_out = min(rq->outputsize, PAGE_SIZE - rq->pageofs_out);
357 		rq->outputsize -= dctx->avail_out;
358 		pgo = &rq->out[dctx->no];
359 		if (!*pgo && rq->fillgaps) {		/* deduped */
360 			*pgo = erofs_allocpage(pgpl, rq->gfp);
361 			if (!*pgo) {
362 				dctx->kout = NULL;
363 				return -ENOMEM;
364 			}
365 			set_page_private(*pgo, Z_EROFS_SHORTLIVED_PAGE);
366 		}
367 		if (*pgo) {
368 			dctx->kout = kmap_local_page(*pgo);
369 			*dst = dctx->kout + rq->pageofs_out;
370 		} else {
371 			*dst = dctx->kout = NULL;
372 		}
373 		rq->pageofs_out = 0;
374 	}
375 
376 	if (dctx->inbuf_pos == dctx->inbuf_sz && rq->inputsize) {
377 		if (++dctx->ni >= rq->inpages) {
378 			erofs_err(sb, "invalid compressed data");
379 			return -EFSCORRUPTED;
380 		}
381 		if (dctx->kout) /* unlike kmap(), take care of the orders */
382 			kunmap_local(dctx->kout);
383 		kunmap_local(dctx->kin);
384 
385 		dctx->inbuf_sz = min_t(u32, rq->inputsize, PAGE_SIZE);
386 		rq->inputsize -= dctx->inbuf_sz;
387 		dctx->kin = kmap_local_page(rq->in[dctx->ni]);
388 		*src = dctx->kin;
389 		dctx->bounced = false;
390 		if (dctx->kout) {
391 			j = (u8 *)*dst - dctx->kout;
392 			dctx->kout = kmap_local_page(rq->out[dctx->no]);
393 			*dst = dctx->kout + j;
394 		}
395 		dctx->inbuf_pos = 0;
396 	}
397 
398 	/*
399 	 * Handle overlapping: Use the given bounce buffer if the input data is
400 	 * under processing; Or utilize short-lived pages from the on-stack page
401 	 * pool, where pages are shared among the same request.  Note that only
402 	 * a few inplace I/O pages need to be doubled.
403 	 */
404 	if (!dctx->bounced && rq->out[dctx->no] == rq->in[dctx->ni]) {
405 		memcpy(dctx->bounce, *src, dctx->inbuf_sz);
406 		*src = dctx->bounce;
407 		dctx->bounced = true;
408 	}
409 
410 	for (j = dctx->ni + 1; j < rq->inpages; ++j) {
411 		if (rq->out[dctx->no] != rq->in[j])
412 			continue;
413 		tmppage = erofs_allocpage(pgpl, rq->gfp);
414 		if (!tmppage)
415 			return -ENOMEM;
416 		set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
417 		copy_highpage(tmppage, rq->in[j]);
418 		rq->in[j] = tmppage;
419 	}
420 	return 0;
421 }
422 
423 const struct z_erofs_decompressor *z_erofs_decomp[] = {
424 	[Z_EROFS_COMPRESSION_SHIFTED] = &(const struct z_erofs_decompressor) {
425 		.decompress = z_erofs_transform_plain,
426 		.name = "shifted"
427 	},
428 	[Z_EROFS_COMPRESSION_INTERLACED] = &(const struct z_erofs_decompressor) {
429 		.decompress = z_erofs_transform_plain,
430 		.name = "interlaced"
431 	},
432 	[Z_EROFS_COMPRESSION_LZ4] = &(const struct z_erofs_decompressor) {
433 		.config = z_erofs_load_lz4_config,
434 		.decompress = z_erofs_lz4_decompress,
435 		.init = z_erofs_gbuf_init,
436 		.exit = z_erofs_gbuf_exit,
437 		.name = "lz4"
438 	},
439 #ifdef CONFIG_EROFS_FS_ZIP_LZMA
440 	[Z_EROFS_COMPRESSION_LZMA] = &z_erofs_lzma_decomp,
441 #endif
442 #ifdef CONFIG_EROFS_FS_ZIP_DEFLATE
443 	[Z_EROFS_COMPRESSION_DEFLATE] = &z_erofs_deflate_decomp,
444 #endif
445 #ifdef CONFIG_EROFS_FS_ZIP_ZSTD
446 	[Z_EROFS_COMPRESSION_ZSTD] = &z_erofs_zstd_decomp,
447 #endif
448 };
449 
z_erofs_parse_cfgs(struct super_block * sb,struct erofs_super_block * dsb)450 int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb)
451 {
452 	struct erofs_sb_info *sbi = EROFS_SB(sb);
453 	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
454 	unsigned int algs, alg;
455 	erofs_off_t offset;
456 	int size, ret = 0;
457 
458 	if (!erofs_sb_has_compr_cfgs(sbi)) {
459 		sbi->available_compr_algs = 1 << Z_EROFS_COMPRESSION_LZ4;
460 		return z_erofs_load_lz4_config(sb, dsb, NULL, 0);
461 	}
462 
463 	sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs);
464 	if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) {
465 		erofs_err(sb, "unidentified algorithms %x, please upgrade kernel",
466 			  sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS);
467 		return -EOPNOTSUPP;
468 	}
469 
470 	erofs_init_metabuf(&buf, sb);
471 	offset = EROFS_SUPER_OFFSET + sbi->sb_size;
472 	alg = 0;
473 	for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
474 		const struct z_erofs_decompressor *dec = z_erofs_decomp[alg];
475 		void *data;
476 
477 		if (!(algs & 1))
478 			continue;
479 
480 		data = erofs_read_metadata(sb, &buf, &offset, &size);
481 		if (IS_ERR(data)) {
482 			ret = PTR_ERR(data);
483 			break;
484 		}
485 
486 		if (alg < Z_EROFS_COMPRESSION_MAX && dec && dec->config) {
487 			ret = dec->config(sb, dsb, data, size);
488 		} else {
489 			erofs_err(sb, "algorithm %d isn't enabled on this kernel",
490 				  alg);
491 			ret = -EOPNOTSUPP;
492 		}
493 		kfree(data);
494 		if (ret)
495 			break;
496 	}
497 	erofs_put_metabuf(&buf);
498 	return ret;
499 }
500 
z_erofs_init_decompressor(void)501 int __init z_erofs_init_decompressor(void)
502 {
503 	int i, err;
504 
505 	for (i = 0; i < Z_EROFS_COMPRESSION_MAX; ++i) {
506 		err = z_erofs_decomp[i] ? z_erofs_decomp[i]->init() : 0;
507 		if (err) {
508 			while (i--)
509 				if (z_erofs_decomp[i])
510 					z_erofs_decomp[i]->exit();
511 			return err;
512 		}
513 	}
514 	return 0;
515 }
516 
z_erofs_exit_decompressor(void)517 void z_erofs_exit_decompressor(void)
518 {
519 	int i;
520 
521 	for (i = 0; i < Z_EROFS_COMPRESSION_MAX; ++i)
522 		if (z_erofs_decomp[i])
523 			z_erofs_decomp[i]->exit();
524 }
525