xref: /linux/fs/erofs/decompressor.c (revision 447e140e66fd226350b3ce86cffc965eaae4c856)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2019 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  */
6 #include "compress.h"
7 #include <linux/lz4.h>
8 
9 #ifndef LZ4_DISTANCE_MAX	/* history window size */
10 #define LZ4_DISTANCE_MAX 65535	/* set to maximum value by default */
11 #endif
12 
13 #define LZ4_MAX_DISTANCE_PAGES	(DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
14 #ifndef LZ4_DECOMPRESS_INPLACE_MARGIN
15 #define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize)  (((srcsize) >> 8) + 32)
16 #endif
17 
18 struct z_erofs_lz4_decompress_ctx {
19 	struct z_erofs_decompress_req *rq;
20 	/* # of encoded, decoded pages */
21 	unsigned int inpages, outpages;
22 	/* decoded block total length (used for in-place decompression) */
23 	unsigned int oend;
24 };
25 
26 static int z_erofs_load_lz4_config(struct super_block *sb,
27 			    struct erofs_super_block *dsb, void *data, int size)
28 {
29 	struct erofs_sb_info *sbi = EROFS_SB(sb);
30 	struct z_erofs_lz4_cfgs *lz4 = data;
31 	u16 distance;
32 
33 	if (lz4) {
34 		if (size < sizeof(struct z_erofs_lz4_cfgs)) {
35 			erofs_err(sb, "invalid lz4 cfgs, size=%u", size);
36 			return -EINVAL;
37 		}
38 		distance = le16_to_cpu(lz4->max_distance);
39 
40 		sbi->lz4.max_pclusterblks = le16_to_cpu(lz4->max_pclusterblks);
41 		if (!sbi->lz4.max_pclusterblks) {
42 			sbi->lz4.max_pclusterblks = 1;	/* reserved case */
43 		} else if (sbi->lz4.max_pclusterblks >
44 			   erofs_blknr(sb, Z_EROFS_PCLUSTER_MAX_SIZE)) {
45 			erofs_err(sb, "too large lz4 pclusterblks %u",
46 				  sbi->lz4.max_pclusterblks);
47 			return -EINVAL;
48 		}
49 	} else {
50 		distance = le16_to_cpu(dsb->u1.lz4_max_distance);
51 		sbi->lz4.max_pclusterblks = 1;
52 	}
53 
54 	sbi->lz4.max_distance_pages = distance ?
55 					DIV_ROUND_UP(distance, PAGE_SIZE) + 1 :
56 					LZ4_MAX_DISTANCE_PAGES;
57 	return z_erofs_gbuf_growsize(sbi->lz4.max_pclusterblks);
58 }
59 
60 /*
61  * Fill all gaps with bounce pages if it's a sparse page list. Also check if
62  * all physical pages are consecutive, which can be seen for moderate CR.
63  */
64 static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
65 					struct page **pagepool)
66 {
67 	struct z_erofs_decompress_req *rq = ctx->rq;
68 	struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
69 	unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
70 					   BITS_PER_LONG)] = { 0 };
71 	unsigned int lz4_max_distance_pages =
72 				EROFS_SB(rq->sb)->lz4.max_distance_pages;
73 	void *kaddr = NULL;
74 	unsigned int i, j, top;
75 
76 	top = 0;
77 	for (i = j = 0; i < ctx->outpages; ++i, ++j) {
78 		struct page *const page = rq->out[i];
79 		struct page *victim;
80 
81 		if (j >= lz4_max_distance_pages)
82 			j = 0;
83 
84 		/* 'valid' bounced can only be tested after a complete round */
85 		if (!rq->fillgaps && test_bit(j, bounced)) {
86 			DBG_BUGON(i < lz4_max_distance_pages);
87 			DBG_BUGON(top >= lz4_max_distance_pages);
88 			availables[top++] = rq->out[i - lz4_max_distance_pages];
89 		}
90 
91 		if (page) {
92 			__clear_bit(j, bounced);
93 			if (!PageHighMem(page)) {
94 				if (!i) {
95 					kaddr = page_address(page);
96 					continue;
97 				}
98 				if (kaddr &&
99 				    kaddr + PAGE_SIZE == page_address(page)) {
100 					kaddr += PAGE_SIZE;
101 					continue;
102 				}
103 			}
104 			kaddr = NULL;
105 			continue;
106 		}
107 		kaddr = NULL;
108 		__set_bit(j, bounced);
109 
110 		if (top) {
111 			victim = availables[--top];
112 			get_page(victim);
113 		} else {
114 			victim = __erofs_allocpage(pagepool, rq->gfp, true);
115 			if (!victim)
116 				return -ENOMEM;
117 			set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
118 		}
119 		rq->out[i] = victim;
120 	}
121 	return kaddr ? 1 : 0;
122 }
123 
124 static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
125 			void *inpage, void *out, unsigned int *inputmargin,
126 			int *maptype, bool may_inplace)
127 {
128 	struct z_erofs_decompress_req *rq = ctx->rq;
129 	unsigned int omargin, total, i;
130 	struct page **in;
131 	void *src, *tmp;
132 
133 	if (rq->inplace_io) {
134 		omargin = PAGE_ALIGN(ctx->oend) - ctx->oend;
135 		if (rq->partial_decoding || !may_inplace ||
136 		    omargin < LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize))
137 			goto docopy;
138 
139 		for (i = 0; i < ctx->inpages; ++i)
140 			if (rq->out[ctx->outpages - ctx->inpages + i] !=
141 			    rq->in[i])
142 				goto docopy;
143 		kunmap_local(inpage);
144 		*maptype = 3;
145 		return out + ((ctx->outpages - ctx->inpages) << PAGE_SHIFT);
146 	}
147 
148 	if (ctx->inpages <= 1) {
149 		*maptype = 0;
150 		return inpage;
151 	}
152 	kunmap_local(inpage);
153 	src = erofs_vm_map_ram(rq->in, ctx->inpages);
154 	if (!src)
155 		return ERR_PTR(-ENOMEM);
156 	*maptype = 1;
157 	return src;
158 
159 docopy:
160 	/* Or copy compressed data which can be overlapped to per-CPU buffer */
161 	in = rq->in;
162 	src = z_erofs_get_gbuf(ctx->inpages);
163 	if (!src) {
164 		DBG_BUGON(1);
165 		kunmap_local(inpage);
166 		return ERR_PTR(-EFAULT);
167 	}
168 
169 	tmp = src;
170 	total = rq->inputsize;
171 	while (total) {
172 		unsigned int page_copycnt =
173 			min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
174 
175 		if (!inpage)
176 			inpage = kmap_local_page(*in);
177 		memcpy(tmp, inpage + *inputmargin, page_copycnt);
178 		kunmap_local(inpage);
179 		inpage = NULL;
180 		tmp += page_copycnt;
181 		total -= page_copycnt;
182 		++in;
183 		*inputmargin = 0;
184 	}
185 	*maptype = 2;
186 	return src;
187 }
188 
189 /*
190  * Get the exact inputsize with zero_padding feature.
191  *  - For LZ4, it should work if zero_padding feature is on (5.3+);
192  *  - For MicroLZMA, it'd be enabled all the time.
193  */
194 int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
195 			 unsigned int padbufsize)
196 {
197 	const char *padend;
198 
199 	padend = memchr_inv(padbuf, 0, padbufsize);
200 	if (!padend)
201 		return -EFSCORRUPTED;
202 	rq->inputsize -= padend - padbuf;
203 	rq->pageofs_in += padend - padbuf;
204 	return 0;
205 }
206 
207 static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
208 				      u8 *dst)
209 {
210 	struct z_erofs_decompress_req *rq = ctx->rq;
211 	bool support_0padding = false, may_inplace = false;
212 	unsigned int inputmargin;
213 	u8 *out, *headpage, *src;
214 	int ret, maptype;
215 
216 	DBG_BUGON(*rq->in == NULL);
217 	headpage = kmap_local_page(*rq->in);
218 
219 	/* LZ4 decompression inplace is only safe if zero_padding is enabled */
220 	if (erofs_sb_has_zero_padding(EROFS_SB(rq->sb))) {
221 		support_0padding = true;
222 		ret = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in,
223 				min_t(unsigned int, rq->inputsize,
224 				      rq->sb->s_blocksize - rq->pageofs_in));
225 		if (ret) {
226 			kunmap_local(headpage);
227 			return ret;
228 		}
229 		may_inplace = !((rq->pageofs_in + rq->inputsize) &
230 				(rq->sb->s_blocksize - 1));
231 	}
232 
233 	inputmargin = rq->pageofs_in;
234 	src = z_erofs_lz4_handle_overlap(ctx, headpage, dst, &inputmargin,
235 					 &maptype, may_inplace);
236 	if (IS_ERR(src))
237 		return PTR_ERR(src);
238 
239 	out = dst + rq->pageofs_out;
240 	/* legacy format could compress extra data in a pcluster. */
241 	if (rq->partial_decoding || !support_0padding)
242 		ret = LZ4_decompress_safe_partial(src + inputmargin, out,
243 				rq->inputsize, rq->outputsize, rq->outputsize);
244 	else
245 		ret = LZ4_decompress_safe(src + inputmargin, out,
246 					  rq->inputsize, rq->outputsize);
247 
248 	if (ret != rq->outputsize) {
249 		erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
250 			  ret, rq->inputsize, inputmargin, rq->outputsize);
251 		if (ret >= 0)
252 			memset(out + ret, 0, rq->outputsize - ret);
253 		ret = -EFSCORRUPTED;
254 	} else {
255 		ret = 0;
256 	}
257 
258 	if (maptype == 0) {
259 		kunmap_local(headpage);
260 	} else if (maptype == 1) {
261 		vm_unmap_ram(src, ctx->inpages);
262 	} else if (maptype == 2) {
263 		z_erofs_put_gbuf(src);
264 	} else if (maptype != 3) {
265 		DBG_BUGON(1);
266 		return -EFAULT;
267 	}
268 	return ret;
269 }
270 
271 static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
272 				  struct page **pagepool)
273 {
274 	struct z_erofs_lz4_decompress_ctx ctx;
275 	unsigned int dst_maptype;
276 	void *dst;
277 	int ret;
278 
279 	ctx.rq = rq;
280 	ctx.oend = rq->pageofs_out + rq->outputsize;
281 	ctx.outpages = PAGE_ALIGN(ctx.oend) >> PAGE_SHIFT;
282 	ctx.inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
283 
284 	/* one optimized fast path only for non bigpcluster cases yet */
285 	if (ctx.inpages == 1 && ctx.outpages == 1 && !rq->inplace_io) {
286 		DBG_BUGON(!*rq->out);
287 		dst = kmap_local_page(*rq->out);
288 		dst_maptype = 0;
289 		goto dstmap_out;
290 	}
291 
292 	/* general decoding path which can be used for all cases */
293 	ret = z_erofs_lz4_prepare_dstpages(&ctx, pagepool);
294 	if (ret < 0) {
295 		return ret;
296 	} else if (ret > 0) {
297 		dst = page_address(*rq->out);
298 		dst_maptype = 1;
299 	} else {
300 		dst = erofs_vm_map_ram(rq->out, ctx.outpages);
301 		if (!dst)
302 			return -ENOMEM;
303 		dst_maptype = 2;
304 	}
305 
306 dstmap_out:
307 	ret = z_erofs_lz4_decompress_mem(&ctx, dst);
308 	if (!dst_maptype)
309 		kunmap_local(dst);
310 	else if (dst_maptype == 2)
311 		vm_unmap_ram(dst, ctx.outpages);
312 	return ret;
313 }
314 
315 static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
316 				   struct page **pagepool)
317 {
318 	const unsigned int nrpages_in =
319 		PAGE_ALIGN(rq->pageofs_in + rq->inputsize) >> PAGE_SHIFT;
320 	const unsigned int nrpages_out =
321 		PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
322 	const unsigned int bs = rq->sb->s_blocksize;
323 	unsigned int cur = 0, ni = 0, no, pi, po, insz, cnt;
324 	u8 *kin;
325 
326 	if (rq->outputsize > rq->inputsize)
327 		return -EOPNOTSUPP;
328 	if (rq->alg == Z_EROFS_COMPRESSION_INTERLACED) {
329 		cur = bs - (rq->pageofs_out & (bs - 1));
330 		pi = (rq->pageofs_in + rq->inputsize - cur) & ~PAGE_MASK;
331 		cur = min(cur, rq->outputsize);
332 		if (cur && rq->out[0]) {
333 			kin = kmap_local_page(rq->in[nrpages_in - 1]);
334 			if (rq->out[0] == rq->in[nrpages_in - 1]) {
335 				memmove(kin + rq->pageofs_out, kin + pi, cur);
336 				flush_dcache_page(rq->out[0]);
337 			} else {
338 				memcpy_to_page(rq->out[0], rq->pageofs_out,
339 					       kin + pi, cur);
340 			}
341 			kunmap_local(kin);
342 		}
343 		rq->outputsize -= cur;
344 	}
345 
346 	for (; rq->outputsize; rq->pageofs_in = 0, cur += PAGE_SIZE, ni++) {
347 		insz = min(PAGE_SIZE - rq->pageofs_in, rq->outputsize);
348 		rq->outputsize -= insz;
349 		if (!rq->in[ni])
350 			continue;
351 		kin = kmap_local_page(rq->in[ni]);
352 		pi = 0;
353 		do {
354 			no = (rq->pageofs_out + cur + pi) >> PAGE_SHIFT;
355 			po = (rq->pageofs_out + cur + pi) & ~PAGE_MASK;
356 			DBG_BUGON(no >= nrpages_out);
357 			cnt = min(insz - pi, PAGE_SIZE - po);
358 			if (rq->out[no] == rq->in[ni]) {
359 				memmove(kin + po,
360 					kin + rq->pageofs_in + pi, cnt);
361 				flush_dcache_page(rq->out[no]);
362 			} else if (rq->out[no]) {
363 				memcpy_to_page(rq->out[no], po,
364 					       kin + rq->pageofs_in + pi, cnt);
365 			}
366 			pi += cnt;
367 		} while (pi < insz);
368 		kunmap_local(kin);
369 	}
370 	DBG_BUGON(ni > nrpages_in);
371 	return 0;
372 }
373 
374 const struct z_erofs_decompressor erofs_decompressors[] = {
375 	[Z_EROFS_COMPRESSION_SHIFTED] = {
376 		.decompress = z_erofs_transform_plain,
377 		.name = "shifted"
378 	},
379 	[Z_EROFS_COMPRESSION_INTERLACED] = {
380 		.decompress = z_erofs_transform_plain,
381 		.name = "interlaced"
382 	},
383 	[Z_EROFS_COMPRESSION_LZ4] = {
384 		.config = z_erofs_load_lz4_config,
385 		.decompress = z_erofs_lz4_decompress,
386 		.name = "lz4"
387 	},
388 #ifdef CONFIG_EROFS_FS_ZIP_LZMA
389 	[Z_EROFS_COMPRESSION_LZMA] = {
390 		.config = z_erofs_load_lzma_config,
391 		.decompress = z_erofs_lzma_decompress,
392 		.name = "lzma"
393 	},
394 #endif
395 #ifdef CONFIG_EROFS_FS_ZIP_DEFLATE
396 	[Z_EROFS_COMPRESSION_DEFLATE] = {
397 		.config = z_erofs_load_deflate_config,
398 		.decompress = z_erofs_deflate_decompress,
399 		.name = "deflate"
400 	},
401 #endif
402 #ifdef CONFIG_EROFS_FS_ZIP_ZSTD
403 	[Z_EROFS_COMPRESSION_ZSTD] = {
404 		.config = z_erofs_load_zstd_config,
405 		.decompress = z_erofs_zstd_decompress,
406 		.name = "zstd"
407 	},
408 #endif
409 };
410 
411 int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb)
412 {
413 	struct erofs_sb_info *sbi = EROFS_SB(sb);
414 	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
415 	unsigned int algs, alg;
416 	erofs_off_t offset;
417 	int size, ret = 0;
418 
419 	if (!erofs_sb_has_compr_cfgs(sbi)) {
420 		sbi->available_compr_algs = 1 << Z_EROFS_COMPRESSION_LZ4;
421 		return z_erofs_load_lz4_config(sb, dsb, NULL, 0);
422 	}
423 
424 	sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs);
425 	if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) {
426 		erofs_err(sb, "unidentified algorithms %x, please upgrade kernel",
427 			  sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS);
428 		return -EOPNOTSUPP;
429 	}
430 
431 	erofs_init_metabuf(&buf, sb);
432 	offset = EROFS_SUPER_OFFSET + sbi->sb_size;
433 	alg = 0;
434 	for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
435 		void *data;
436 
437 		if (!(algs & 1))
438 			continue;
439 
440 		data = erofs_read_metadata(sb, &buf, &offset, &size);
441 		if (IS_ERR(data)) {
442 			ret = PTR_ERR(data);
443 			break;
444 		}
445 
446 		if (alg >= ARRAY_SIZE(erofs_decompressors) ||
447 		    !erofs_decompressors[alg].config) {
448 			erofs_err(sb, "algorithm %d isn't enabled on this kernel",
449 				  alg);
450 			ret = -EOPNOTSUPP;
451 		} else {
452 			ret = erofs_decompressors[alg].config(sb,
453 					dsb, data, size);
454 		}
455 
456 		kfree(data);
457 		if (ret)
458 			break;
459 	}
460 	erofs_put_metabuf(&buf);
461 	return ret;
462 }
463