xref: /linux/fs/erofs/decompressor.c (revision 5027ec19f1049a07df5b0a37b1f462514cf2724b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2019 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  */
6 #include "compress.h"
7 #include <linux/lz4.h>
8 
9 #ifndef LZ4_DISTANCE_MAX	/* history window size */
10 #define LZ4_DISTANCE_MAX 65535	/* set to maximum value by default */
11 #endif
12 
13 #define LZ4_MAX_DISTANCE_PAGES	(DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
14 #ifndef LZ4_DECOMPRESS_INPLACE_MARGIN
15 #define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize)  (((srcsize) >> 8) + 32)
16 #endif
17 
18 struct z_erofs_lz4_decompress_ctx {
19 	struct z_erofs_decompress_req *rq;
20 	/* # of encoded, decoded pages */
21 	unsigned int inpages, outpages;
22 	/* decoded block total length (used for in-place decompression) */
23 	unsigned int oend;
24 };
25 
26 static int z_erofs_load_lz4_config(struct super_block *sb,
27 			    struct erofs_super_block *dsb, void *data, int size)
28 {
29 	struct erofs_sb_info *sbi = EROFS_SB(sb);
30 	struct z_erofs_lz4_cfgs *lz4 = data;
31 	u16 distance;
32 
33 	if (lz4) {
34 		if (size < sizeof(struct z_erofs_lz4_cfgs)) {
35 			erofs_err(sb, "invalid lz4 cfgs, size=%u", size);
36 			return -EINVAL;
37 		}
38 		distance = le16_to_cpu(lz4->max_distance);
39 
40 		sbi->lz4.max_pclusterblks = le16_to_cpu(lz4->max_pclusterblks);
41 		if (!sbi->lz4.max_pclusterblks) {
42 			sbi->lz4.max_pclusterblks = 1;	/* reserved case */
43 		} else if (sbi->lz4.max_pclusterblks >
44 			   erofs_blknr(sb, Z_EROFS_PCLUSTER_MAX_SIZE)) {
45 			erofs_err(sb, "too large lz4 pclusterblks %u",
46 				  sbi->lz4.max_pclusterblks);
47 			return -EINVAL;
48 		}
49 	} else {
50 		distance = le16_to_cpu(dsb->u1.lz4_max_distance);
51 		sbi->lz4.max_pclusterblks = 1;
52 	}
53 
54 	sbi->lz4.max_distance_pages = distance ?
55 					DIV_ROUND_UP(distance, PAGE_SIZE) + 1 :
56 					LZ4_MAX_DISTANCE_PAGES;
57 	return erofs_pcpubuf_growsize(sbi->lz4.max_pclusterblks);
58 }
59 
60 /*
61  * Fill all gaps with bounce pages if it's a sparse page list. Also check if
62  * all physical pages are consecutive, which can be seen for moderate CR.
63  */
64 static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
65 					struct page **pagepool)
66 {
67 	struct z_erofs_decompress_req *rq = ctx->rq;
68 	struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
69 	unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
70 					   BITS_PER_LONG)] = { 0 };
71 	unsigned int lz4_max_distance_pages =
72 				EROFS_SB(rq->sb)->lz4.max_distance_pages;
73 	void *kaddr = NULL;
74 	unsigned int i, j, top;
75 
76 	top = 0;
77 	for (i = j = 0; i < ctx->outpages; ++i, ++j) {
78 		struct page *const page = rq->out[i];
79 		struct page *victim;
80 
81 		if (j >= lz4_max_distance_pages)
82 			j = 0;
83 
84 		/* 'valid' bounced can only be tested after a complete round */
85 		if (!rq->fillgaps && test_bit(j, bounced)) {
86 			DBG_BUGON(i < lz4_max_distance_pages);
87 			DBG_BUGON(top >= lz4_max_distance_pages);
88 			availables[top++] = rq->out[i - lz4_max_distance_pages];
89 		}
90 
91 		if (page) {
92 			__clear_bit(j, bounced);
93 			if (!PageHighMem(page)) {
94 				if (!i) {
95 					kaddr = page_address(page);
96 					continue;
97 				}
98 				if (kaddr &&
99 				    kaddr + PAGE_SIZE == page_address(page)) {
100 					kaddr += PAGE_SIZE;
101 					continue;
102 				}
103 			}
104 			kaddr = NULL;
105 			continue;
106 		}
107 		kaddr = NULL;
108 		__set_bit(j, bounced);
109 
110 		if (top) {
111 			victim = availables[--top];
112 			get_page(victim);
113 		} else {
114 			victim = erofs_allocpage(pagepool,
115 						 GFP_KERNEL | __GFP_NOFAIL);
116 			set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
117 		}
118 		rq->out[i] = victim;
119 	}
120 	return kaddr ? 1 : 0;
121 }
122 
123 static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
124 			void *inpage, unsigned int *inputmargin, int *maptype,
125 			bool may_inplace)
126 {
127 	struct z_erofs_decompress_req *rq = ctx->rq;
128 	unsigned int omargin, total, i, j;
129 	struct page **in;
130 	void *src, *tmp;
131 
132 	if (rq->inplace_io) {
133 		omargin = PAGE_ALIGN(ctx->oend) - ctx->oend;
134 		if (rq->partial_decoding || !may_inplace ||
135 		    omargin < LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize))
136 			goto docopy;
137 
138 		for (i = 0; i < ctx->inpages; ++i) {
139 			DBG_BUGON(rq->in[i] == NULL);
140 			for (j = 0; j < ctx->outpages - ctx->inpages + i; ++j)
141 				if (rq->out[j] == rq->in[i])
142 					goto docopy;
143 		}
144 	}
145 
146 	if (ctx->inpages <= 1) {
147 		*maptype = 0;
148 		return inpage;
149 	}
150 	kunmap_local(inpage);
151 	might_sleep();
152 	src = erofs_vm_map_ram(rq->in, ctx->inpages);
153 	if (!src)
154 		return ERR_PTR(-ENOMEM);
155 	*maptype = 1;
156 	return src;
157 
158 docopy:
159 	/* Or copy compressed data which can be overlapped to per-CPU buffer */
160 	in = rq->in;
161 	src = erofs_get_pcpubuf(ctx->inpages);
162 	if (!src) {
163 		DBG_BUGON(1);
164 		kunmap_local(inpage);
165 		return ERR_PTR(-EFAULT);
166 	}
167 
168 	tmp = src;
169 	total = rq->inputsize;
170 	while (total) {
171 		unsigned int page_copycnt =
172 			min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
173 
174 		if (!inpage)
175 			inpage = kmap_local_page(*in);
176 		memcpy(tmp, inpage + *inputmargin, page_copycnt);
177 		kunmap_local(inpage);
178 		inpage = NULL;
179 		tmp += page_copycnt;
180 		total -= page_copycnt;
181 		++in;
182 		*inputmargin = 0;
183 	}
184 	*maptype = 2;
185 	return src;
186 }
187 
188 /*
189  * Get the exact inputsize with zero_padding feature.
190  *  - For LZ4, it should work if zero_padding feature is on (5.3+);
191  *  - For MicroLZMA, it'd be enabled all the time.
192  */
193 int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
194 			 unsigned int padbufsize)
195 {
196 	const char *padend;
197 
198 	padend = memchr_inv(padbuf, 0, padbufsize);
199 	if (!padend)
200 		return -EFSCORRUPTED;
201 	rq->inputsize -= padend - padbuf;
202 	rq->pageofs_in += padend - padbuf;
203 	return 0;
204 }
205 
206 static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
207 				      u8 *out)
208 {
209 	struct z_erofs_decompress_req *rq = ctx->rq;
210 	bool support_0padding = false, may_inplace = false;
211 	unsigned int inputmargin;
212 	u8 *headpage, *src;
213 	int ret, maptype;
214 
215 	DBG_BUGON(*rq->in == NULL);
216 	headpage = kmap_local_page(*rq->in);
217 
218 	/* LZ4 decompression inplace is only safe if zero_padding is enabled */
219 	if (erofs_sb_has_zero_padding(EROFS_SB(rq->sb))) {
220 		support_0padding = true;
221 		ret = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in,
222 				min_t(unsigned int, rq->inputsize,
223 				      rq->sb->s_blocksize - rq->pageofs_in));
224 		if (ret) {
225 			kunmap_local(headpage);
226 			return ret;
227 		}
228 		may_inplace = !((rq->pageofs_in + rq->inputsize) &
229 				(rq->sb->s_blocksize - 1));
230 	}
231 
232 	inputmargin = rq->pageofs_in;
233 	src = z_erofs_lz4_handle_overlap(ctx, headpage, &inputmargin,
234 					 &maptype, may_inplace);
235 	if (IS_ERR(src))
236 		return PTR_ERR(src);
237 
238 	/* legacy format could compress extra data in a pcluster. */
239 	if (rq->partial_decoding || !support_0padding)
240 		ret = LZ4_decompress_safe_partial(src + inputmargin, out,
241 				rq->inputsize, rq->outputsize, rq->outputsize);
242 	else
243 		ret = LZ4_decompress_safe(src + inputmargin, out,
244 					  rq->inputsize, rq->outputsize);
245 
246 	if (ret != rq->outputsize) {
247 		erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
248 			  ret, rq->inputsize, inputmargin, rq->outputsize);
249 
250 		print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET,
251 			       16, 1, src + inputmargin, rq->inputsize, true);
252 		print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET,
253 			       16, 1, out, rq->outputsize, true);
254 
255 		if (ret >= 0)
256 			memset(out + ret, 0, rq->outputsize - ret);
257 		ret = -EIO;
258 	} else {
259 		ret = 0;
260 	}
261 
262 	if (maptype == 0) {
263 		kunmap_local(headpage);
264 	} else if (maptype == 1) {
265 		vm_unmap_ram(src, ctx->inpages);
266 	} else if (maptype == 2) {
267 		erofs_put_pcpubuf(src);
268 	} else {
269 		DBG_BUGON(1);
270 		return -EFAULT;
271 	}
272 	return ret;
273 }
274 
275 static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
276 				  struct page **pagepool)
277 {
278 	struct z_erofs_lz4_decompress_ctx ctx;
279 	unsigned int dst_maptype;
280 	void *dst;
281 	int ret;
282 
283 	ctx.rq = rq;
284 	ctx.oend = rq->pageofs_out + rq->outputsize;
285 	ctx.outpages = PAGE_ALIGN(ctx.oend) >> PAGE_SHIFT;
286 	ctx.inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
287 
288 	/* one optimized fast path only for non bigpcluster cases yet */
289 	if (ctx.inpages == 1 && ctx.outpages == 1 && !rq->inplace_io) {
290 		DBG_BUGON(!*rq->out);
291 		dst = kmap_local_page(*rq->out);
292 		dst_maptype = 0;
293 		goto dstmap_out;
294 	}
295 
296 	/* general decoding path which can be used for all cases */
297 	ret = z_erofs_lz4_prepare_dstpages(&ctx, pagepool);
298 	if (ret < 0) {
299 		return ret;
300 	} else if (ret > 0) {
301 		dst = page_address(*rq->out);
302 		dst_maptype = 1;
303 	} else {
304 		dst = erofs_vm_map_ram(rq->out, ctx.outpages);
305 		if (!dst)
306 			return -ENOMEM;
307 		dst_maptype = 2;
308 	}
309 
310 dstmap_out:
311 	ret = z_erofs_lz4_decompress_mem(&ctx, dst + rq->pageofs_out);
312 	if (!dst_maptype)
313 		kunmap_local(dst);
314 	else if (dst_maptype == 2)
315 		vm_unmap_ram(dst, ctx.outpages);
316 	return ret;
317 }
318 
319 static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
320 				   struct page **pagepool)
321 {
322 	const unsigned int inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
323 	const unsigned int outpages =
324 		PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
325 	const unsigned int righthalf = min_t(unsigned int, rq->outputsize,
326 					     PAGE_SIZE - rq->pageofs_out);
327 	const unsigned int lefthalf = rq->outputsize - righthalf;
328 	const unsigned int interlaced_offset =
329 		rq->alg == Z_EROFS_COMPRESSION_SHIFTED ? 0 : rq->pageofs_out;
330 	u8 *src;
331 
332 	if (outpages > 2 && rq->alg == Z_EROFS_COMPRESSION_SHIFTED) {
333 		DBG_BUGON(1);
334 		return -EFSCORRUPTED;
335 	}
336 
337 	if (rq->out[0] == *rq->in) {
338 		DBG_BUGON(rq->pageofs_out);
339 		return 0;
340 	}
341 
342 	src = kmap_local_page(rq->in[inpages - 1]) + rq->pageofs_in;
343 	if (rq->out[0])
344 		memcpy_to_page(rq->out[0], rq->pageofs_out,
345 			       src + interlaced_offset, righthalf);
346 
347 	if (outpages > inpages) {
348 		DBG_BUGON(!rq->out[outpages - 1]);
349 		if (rq->out[outpages - 1] != rq->in[inpages - 1]) {
350 			memcpy_to_page(rq->out[outpages - 1], 0, src +
351 					(interlaced_offset ? 0 : righthalf),
352 				       lefthalf);
353 		} else if (!interlaced_offset) {
354 			memmove(src, src + righthalf, lefthalf);
355 			flush_dcache_page(rq->in[inpages - 1]);
356 		}
357 	}
358 	kunmap_local(src);
359 	return 0;
360 }
361 
362 const struct z_erofs_decompressor erofs_decompressors[] = {
363 	[Z_EROFS_COMPRESSION_SHIFTED] = {
364 		.decompress = z_erofs_transform_plain,
365 		.name = "shifted"
366 	},
367 	[Z_EROFS_COMPRESSION_INTERLACED] = {
368 		.decompress = z_erofs_transform_plain,
369 		.name = "interlaced"
370 	},
371 	[Z_EROFS_COMPRESSION_LZ4] = {
372 		.config = z_erofs_load_lz4_config,
373 		.decompress = z_erofs_lz4_decompress,
374 		.name = "lz4"
375 	},
376 #ifdef CONFIG_EROFS_FS_ZIP_LZMA
377 	[Z_EROFS_COMPRESSION_LZMA] = {
378 		.config = z_erofs_load_lzma_config,
379 		.decompress = z_erofs_lzma_decompress,
380 		.name = "lzma"
381 	},
382 #endif
383 #ifdef CONFIG_EROFS_FS_ZIP_DEFLATE
384 	[Z_EROFS_COMPRESSION_DEFLATE] = {
385 		.config = z_erofs_load_deflate_config,
386 		.decompress = z_erofs_deflate_decompress,
387 		.name = "deflate"
388 	},
389 #endif
390 };
391 
392 int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb)
393 {
394 	struct erofs_sb_info *sbi = EROFS_SB(sb);
395 	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
396 	unsigned int algs, alg;
397 	erofs_off_t offset;
398 	int size, ret = 0;
399 
400 	if (!erofs_sb_has_compr_cfgs(sbi)) {
401 		sbi->available_compr_algs = Z_EROFS_COMPRESSION_LZ4;
402 		return z_erofs_load_lz4_config(sb, dsb, NULL, 0);
403 	}
404 
405 	sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs);
406 	if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) {
407 		erofs_err(sb, "unidentified algorithms %x, please upgrade kernel",
408 			  sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS);
409 		return -EOPNOTSUPP;
410 	}
411 
412 	erofs_init_metabuf(&buf, sb);
413 	offset = EROFS_SUPER_OFFSET + sbi->sb_size;
414 	alg = 0;
415 	for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
416 		void *data;
417 
418 		if (!(algs & 1))
419 			continue;
420 
421 		data = erofs_read_metadata(sb, &buf, &offset, &size);
422 		if (IS_ERR(data)) {
423 			ret = PTR_ERR(data);
424 			break;
425 		}
426 
427 		if (alg >= ARRAY_SIZE(erofs_decompressors) ||
428 		    !erofs_decompressors[alg].config) {
429 			erofs_err(sb, "algorithm %d isn't enabled on this kernel",
430 				  alg);
431 			ret = -EOPNOTSUPP;
432 		} else {
433 			ret = erofs_decompressors[alg].config(sb,
434 					dsb, data, size);
435 		}
436 
437 		kfree(data);
438 		if (ret)
439 			break;
440 	}
441 	erofs_put_metabuf(&buf);
442 	return ret;
443 }
444