1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2019 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Copyright (C) 2024 Alibaba Cloud
6 */
7 #include "compress.h"
8 #include <linux/lz4.h>
9
10 #define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
11
z_erofs_load_lz4_config(struct super_block * sb,struct erofs_super_block * dsb,void * data,int size)12 static int z_erofs_load_lz4_config(struct super_block *sb,
13 struct erofs_super_block *dsb, void *data, int size)
14 {
15 struct erofs_sb_info *sbi = EROFS_SB(sb);
16 struct z_erofs_lz4_cfgs *lz4 = data;
17 u16 distance;
18
19 if (lz4) {
20 if (size < sizeof(struct z_erofs_lz4_cfgs)) {
21 erofs_err(sb, "invalid lz4 cfgs, size=%u", size);
22 return -EINVAL;
23 }
24 distance = le16_to_cpu(lz4->max_distance);
25
26 sbi->lz4.max_pclusterblks = le16_to_cpu(lz4->max_pclusterblks);
27 if (!sbi->lz4.max_pclusterblks) {
28 sbi->lz4.max_pclusterblks = 1; /* reserved case */
29 } else if (sbi->lz4.max_pclusterblks >
30 erofs_blknr(sb, Z_EROFS_PCLUSTER_MAX_SIZE)) {
31 erofs_err(sb, "too large lz4 pclusterblks %u",
32 sbi->lz4.max_pclusterblks);
33 return -EINVAL;
34 }
35 } else {
36 distance = le16_to_cpu(dsb->u1.lz4_max_distance);
37 sbi->lz4.max_pclusterblks = 1;
38 }
39
40 sbi->lz4.max_distance_pages = distance ?
41 DIV_ROUND_UP(distance, PAGE_SIZE) + 1 :
42 LZ4_MAX_DISTANCE_PAGES;
43 return z_erofs_gbuf_growsize(sbi->lz4.max_pclusterblks);
44 }
45
46 /*
47 * Fill all gaps with bounce pages if it's a sparse page list. Also check if
48 * all physical pages are consecutive, which can be seen for moderate CR.
49 */
z_erofs_lz4_prepare_dstpages(struct z_erofs_decompress_req * rq,struct page ** pagepool)50 static int z_erofs_lz4_prepare_dstpages(struct z_erofs_decompress_req *rq,
51 struct page **pagepool)
52 {
53 struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
54 unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
55 BITS_PER_LONG)] = { 0 };
56 unsigned int lz4_max_distance_pages =
57 EROFS_SB(rq->sb)->lz4.max_distance_pages;
58 void *kaddr = NULL;
59 unsigned int i, j, top;
60
61 top = 0;
62 for (i = j = 0; i < rq->outpages; ++i, ++j) {
63 struct page *const page = rq->out[i];
64 struct page *victim;
65
66 if (j >= lz4_max_distance_pages)
67 j = 0;
68
69 /* 'valid' bounced can only be tested after a complete round */
70 if (!rq->fillgaps && test_bit(j, bounced)) {
71 DBG_BUGON(i < lz4_max_distance_pages);
72 DBG_BUGON(top >= lz4_max_distance_pages);
73 availables[top++] = rq->out[i - lz4_max_distance_pages];
74 }
75
76 if (page) {
77 __clear_bit(j, bounced);
78 if (!PageHighMem(page)) {
79 if (!i) {
80 kaddr = page_address(page);
81 continue;
82 }
83 if (kaddr &&
84 kaddr + PAGE_SIZE == page_address(page)) {
85 kaddr += PAGE_SIZE;
86 continue;
87 }
88 }
89 kaddr = NULL;
90 continue;
91 }
92 kaddr = NULL;
93 __set_bit(j, bounced);
94
95 if (top) {
96 victim = availables[--top];
97 } else {
98 victim = __erofs_allocpage(pagepool, rq->gfp, true);
99 if (!victim)
100 return -ENOMEM;
101 set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
102 }
103 rq->out[i] = victim;
104 }
105 return kaddr ? 1 : 0;
106 }
107
z_erofs_lz4_handle_overlap(struct z_erofs_decompress_req * rq,void * inpage,void * out,unsigned int * inputmargin,int * maptype,bool may_inplace)108 static void *z_erofs_lz4_handle_overlap(struct z_erofs_decompress_req *rq,
109 void *inpage, void *out, unsigned int *inputmargin,
110 int *maptype, bool may_inplace)
111 {
112 unsigned int oend, omargin, total, i;
113 struct page **in;
114 void *src, *tmp;
115
116 if (rq->inplace_io) {
117 oend = rq->pageofs_out + rq->outputsize;
118 omargin = PAGE_ALIGN(oend) - oend;
119 if (rq->partial_decoding || !may_inplace ||
120 omargin < LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize))
121 goto docopy;
122
123 for (i = 0; i < rq->inpages; ++i)
124 if (rq->out[rq->outpages - rq->inpages + i] !=
125 rq->in[i])
126 goto docopy;
127 kunmap_local(inpage);
128 *maptype = 3;
129 return out + ((rq->outpages - rq->inpages) << PAGE_SHIFT);
130 }
131
132 if (rq->inpages <= 1) {
133 *maptype = 0;
134 return inpage;
135 }
136 kunmap_local(inpage);
137 src = erofs_vm_map_ram(rq->in, rq->inpages);
138 if (!src)
139 return ERR_PTR(-ENOMEM);
140 *maptype = 1;
141 return src;
142
143 docopy:
144 /* Or copy compressed data which can be overlapped to per-CPU buffer */
145 in = rq->in;
146 src = z_erofs_get_gbuf(rq->inpages);
147 if (!src) {
148 DBG_BUGON(1);
149 kunmap_local(inpage);
150 return ERR_PTR(-EFAULT);
151 }
152
153 tmp = src;
154 total = rq->inputsize;
155 while (total) {
156 unsigned int page_copycnt =
157 min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
158
159 if (!inpage)
160 inpage = kmap_local_page(*in);
161 memcpy(tmp, inpage + *inputmargin, page_copycnt);
162 kunmap_local(inpage);
163 inpage = NULL;
164 tmp += page_copycnt;
165 total -= page_copycnt;
166 ++in;
167 *inputmargin = 0;
168 }
169 *maptype = 2;
170 return src;
171 }
172
173 /*
174 * Get the exact inputsize with zero_padding feature.
175 * - For LZ4, it should work if zero_padding feature is on (5.3+);
176 * - For MicroLZMA, it'd be enabled all the time.
177 */
z_erofs_fixup_insize(struct z_erofs_decompress_req * rq,const char * padbuf,unsigned int padbufsize)178 int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
179 unsigned int padbufsize)
180 {
181 const char *padend;
182
183 padend = memchr_inv(padbuf, 0, padbufsize);
184 if (!padend)
185 return -EFSCORRUPTED;
186 rq->inputsize -= padend - padbuf;
187 rq->pageofs_in += padend - padbuf;
188 return 0;
189 }
190
z_erofs_lz4_decompress_mem(struct z_erofs_decompress_req * rq,u8 * dst)191 static int z_erofs_lz4_decompress_mem(struct z_erofs_decompress_req *rq, u8 *dst)
192 {
193 bool support_0padding = false, may_inplace = false;
194 unsigned int inputmargin;
195 u8 *out, *headpage, *src;
196 int ret, maptype;
197
198 DBG_BUGON(*rq->in == NULL);
199 headpage = kmap_local_page(*rq->in);
200
201 /* LZ4 decompression inplace is only safe if zero_padding is enabled */
202 if (erofs_sb_has_zero_padding(EROFS_SB(rq->sb))) {
203 support_0padding = true;
204 ret = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in,
205 min_t(unsigned int, rq->inputsize,
206 rq->sb->s_blocksize - rq->pageofs_in));
207 if (ret) {
208 kunmap_local(headpage);
209 return ret;
210 }
211 may_inplace = !((rq->pageofs_in + rq->inputsize) &
212 (rq->sb->s_blocksize - 1));
213 }
214
215 inputmargin = rq->pageofs_in;
216 src = z_erofs_lz4_handle_overlap(rq, headpage, dst, &inputmargin,
217 &maptype, may_inplace);
218 if (IS_ERR(src))
219 return PTR_ERR(src);
220
221 out = dst + rq->pageofs_out;
222 /* legacy format could compress extra data in a pcluster. */
223 if (rq->partial_decoding || !support_0padding)
224 ret = LZ4_decompress_safe_partial(src + inputmargin, out,
225 rq->inputsize, rq->outputsize, rq->outputsize);
226 else
227 ret = LZ4_decompress_safe(src + inputmargin, out,
228 rq->inputsize, rq->outputsize);
229
230 if (ret != rq->outputsize) {
231 erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]",
232 ret, rq->inputsize, inputmargin, rq->outputsize);
233 if (ret >= 0)
234 memset(out + ret, 0, rq->outputsize - ret);
235 ret = -EFSCORRUPTED;
236 } else {
237 ret = 0;
238 }
239
240 if (maptype == 0) {
241 kunmap_local(headpage);
242 } else if (maptype == 1) {
243 vm_unmap_ram(src, rq->inpages);
244 } else if (maptype == 2) {
245 z_erofs_put_gbuf(src);
246 } else if (maptype != 3) {
247 DBG_BUGON(1);
248 return -EFAULT;
249 }
250 return ret;
251 }
252
z_erofs_lz4_decompress(struct z_erofs_decompress_req * rq,struct page ** pagepool)253 static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq,
254 struct page **pagepool)
255 {
256 unsigned int dst_maptype;
257 void *dst;
258 int ret;
259
260 /* one optimized fast path only for non bigpcluster cases yet */
261 if (rq->inpages == 1 && rq->outpages == 1 && !rq->inplace_io) {
262 DBG_BUGON(!*rq->out);
263 dst = kmap_local_page(*rq->out);
264 dst_maptype = 0;
265 } else {
266 /* general decoding path which can be used for all cases */
267 ret = z_erofs_lz4_prepare_dstpages(rq, pagepool);
268 if (ret < 0)
269 return ret;
270 if (ret > 0) {
271 dst = page_address(*rq->out);
272 dst_maptype = 1;
273 } else {
274 dst = erofs_vm_map_ram(rq->out, rq->outpages);
275 if (!dst)
276 return -ENOMEM;
277 dst_maptype = 2;
278 }
279 }
280 ret = z_erofs_lz4_decompress_mem(rq, dst);
281 if (!dst_maptype)
282 kunmap_local(dst);
283 else if (dst_maptype == 2)
284 vm_unmap_ram(dst, rq->outpages);
285 return ret;
286 }
287
z_erofs_transform_plain(struct z_erofs_decompress_req * rq,struct page ** pagepool)288 static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
289 struct page **pagepool)
290 {
291 const unsigned int nrpages_in = rq->inpages, nrpages_out = rq->outpages;
292 const unsigned int bs = rq->sb->s_blocksize;
293 unsigned int cur = 0, ni = 0, no, pi, po, insz, cnt;
294 u8 *kin;
295
296 if (rq->outputsize > rq->inputsize)
297 return -EOPNOTSUPP;
298 if (rq->alg == Z_EROFS_COMPRESSION_INTERLACED) {
299 cur = bs - (rq->pageofs_out & (bs - 1));
300 pi = (rq->pageofs_in + rq->inputsize - cur) & ~PAGE_MASK;
301 cur = min(cur, rq->outputsize);
302 if (cur && rq->out[0]) {
303 kin = kmap_local_page(rq->in[nrpages_in - 1]);
304 if (rq->out[0] == rq->in[nrpages_in - 1]) {
305 memmove(kin + rq->pageofs_out, kin + pi, cur);
306 flush_dcache_page(rq->out[0]);
307 } else {
308 memcpy_to_page(rq->out[0], rq->pageofs_out,
309 kin + pi, cur);
310 }
311 kunmap_local(kin);
312 }
313 rq->outputsize -= cur;
314 }
315
316 for (; rq->outputsize; rq->pageofs_in = 0, cur += insz, ni++) {
317 insz = min(PAGE_SIZE - rq->pageofs_in, rq->outputsize);
318 rq->outputsize -= insz;
319 if (!rq->in[ni])
320 continue;
321 kin = kmap_local_page(rq->in[ni]);
322 pi = 0;
323 do {
324 no = (rq->pageofs_out + cur + pi) >> PAGE_SHIFT;
325 po = (rq->pageofs_out + cur + pi) & ~PAGE_MASK;
326 DBG_BUGON(no >= nrpages_out);
327 cnt = min(insz - pi, PAGE_SIZE - po);
328 if (rq->out[no] == rq->in[ni]) {
329 memmove(kin + po,
330 kin + rq->pageofs_in + pi, cnt);
331 flush_dcache_page(rq->out[no]);
332 } else if (rq->out[no]) {
333 memcpy_to_page(rq->out[no], po,
334 kin + rq->pageofs_in + pi, cnt);
335 }
336 pi += cnt;
337 } while (pi < insz);
338 kunmap_local(kin);
339 }
340 DBG_BUGON(ni > nrpages_in);
341 return 0;
342 }
343
z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx * dctx,void ** dst,void ** src,struct page ** pgpl)344 int z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx *dctx, void **dst,
345 void **src, struct page **pgpl)
346 {
347 struct z_erofs_decompress_req *rq = dctx->rq;
348 struct super_block *sb = rq->sb;
349 struct page **pgo, *tmppage;
350 unsigned int j;
351
352 if (!dctx->avail_out) {
353 if (++dctx->no >= rq->outpages || !rq->outputsize) {
354 erofs_err(sb, "insufficient space for decompressed data");
355 return -EFSCORRUPTED;
356 }
357
358 if (dctx->kout)
359 kunmap_local(dctx->kout);
360 dctx->avail_out = min(rq->outputsize, PAGE_SIZE - rq->pageofs_out);
361 rq->outputsize -= dctx->avail_out;
362 pgo = &rq->out[dctx->no];
363 if (!*pgo && rq->fillgaps) { /* deduped */
364 *pgo = erofs_allocpage(pgpl, rq->gfp);
365 if (!*pgo) {
366 dctx->kout = NULL;
367 return -ENOMEM;
368 }
369 set_page_private(*pgo, Z_EROFS_SHORTLIVED_PAGE);
370 }
371 if (*pgo) {
372 dctx->kout = kmap_local_page(*pgo);
373 *dst = dctx->kout + rq->pageofs_out;
374 } else {
375 *dst = dctx->kout = NULL;
376 }
377 rq->pageofs_out = 0;
378 }
379
380 if (dctx->inbuf_pos == dctx->inbuf_sz && rq->inputsize) {
381 if (++dctx->ni >= rq->inpages) {
382 erofs_err(sb, "invalid compressed data");
383 return -EFSCORRUPTED;
384 }
385 if (dctx->kout) /* unlike kmap(), take care of the orders */
386 kunmap_local(dctx->kout);
387 kunmap_local(dctx->kin);
388
389 dctx->inbuf_sz = min_t(u32, rq->inputsize, PAGE_SIZE);
390 rq->inputsize -= dctx->inbuf_sz;
391 dctx->kin = kmap_local_page(rq->in[dctx->ni]);
392 *src = dctx->kin;
393 dctx->bounced = false;
394 if (dctx->kout) {
395 j = (u8 *)*dst - dctx->kout;
396 dctx->kout = kmap_local_page(rq->out[dctx->no]);
397 *dst = dctx->kout + j;
398 }
399 dctx->inbuf_pos = 0;
400 }
401
402 /*
403 * Handle overlapping: Use the given bounce buffer if the input data is
404 * under processing; Or utilize short-lived pages from the on-stack page
405 * pool, where pages are shared among the same request. Note that only
406 * a few inplace I/O pages need to be doubled.
407 */
408 if (!dctx->bounced && rq->out[dctx->no] == rq->in[dctx->ni]) {
409 memcpy(dctx->bounce, *src, dctx->inbuf_sz);
410 *src = dctx->bounce;
411 dctx->bounced = true;
412 }
413
414 for (j = dctx->ni + 1; j < rq->inpages; ++j) {
415 if (rq->out[dctx->no] != rq->in[j])
416 continue;
417 tmppage = erofs_allocpage(pgpl, rq->gfp);
418 if (!tmppage)
419 return -ENOMEM;
420 set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
421 copy_highpage(tmppage, rq->in[j]);
422 rq->in[j] = tmppage;
423 }
424 return 0;
425 }
426
427 const struct z_erofs_decompressor *z_erofs_decomp[] = {
428 [Z_EROFS_COMPRESSION_SHIFTED] = &(const struct z_erofs_decompressor) {
429 .decompress = z_erofs_transform_plain,
430 .name = "shifted"
431 },
432 [Z_EROFS_COMPRESSION_INTERLACED] = &(const struct z_erofs_decompressor) {
433 .decompress = z_erofs_transform_plain,
434 .name = "interlaced"
435 },
436 [Z_EROFS_COMPRESSION_LZ4] = &(const struct z_erofs_decompressor) {
437 .config = z_erofs_load_lz4_config,
438 .decompress = z_erofs_lz4_decompress,
439 .init = z_erofs_gbuf_init,
440 .exit = z_erofs_gbuf_exit,
441 .name = "lz4"
442 },
443 #ifdef CONFIG_EROFS_FS_ZIP_LZMA
444 [Z_EROFS_COMPRESSION_LZMA] = &z_erofs_lzma_decomp,
445 #endif
446 #ifdef CONFIG_EROFS_FS_ZIP_DEFLATE
447 [Z_EROFS_COMPRESSION_DEFLATE] = &z_erofs_deflate_decomp,
448 #endif
449 #ifdef CONFIG_EROFS_FS_ZIP_ZSTD
450 [Z_EROFS_COMPRESSION_ZSTD] = &z_erofs_zstd_decomp,
451 #endif
452 };
453
z_erofs_parse_cfgs(struct super_block * sb,struct erofs_super_block * dsb)454 int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb)
455 {
456 struct erofs_sb_info *sbi = EROFS_SB(sb);
457 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
458 unsigned int algs, alg;
459 erofs_off_t offset;
460 int size, ret = 0;
461
462 if (!erofs_sb_has_compr_cfgs(sbi)) {
463 sbi->available_compr_algs = 1 << Z_EROFS_COMPRESSION_LZ4;
464 return z_erofs_load_lz4_config(sb, dsb, NULL, 0);
465 }
466
467 sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs);
468 if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) {
469 erofs_err(sb, "unidentified algorithms %x, please upgrade kernel",
470 sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS);
471 return -EOPNOTSUPP;
472 }
473
474 erofs_init_metabuf(&buf, sb);
475 offset = EROFS_SUPER_OFFSET + sbi->sb_size;
476 alg = 0;
477 for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
478 const struct z_erofs_decompressor *dec = z_erofs_decomp[alg];
479 void *data;
480
481 if (!(algs & 1))
482 continue;
483
484 data = erofs_read_metadata(sb, &buf, &offset, &size);
485 if (IS_ERR(data)) {
486 ret = PTR_ERR(data);
487 break;
488 }
489
490 if (alg < Z_EROFS_COMPRESSION_MAX && dec && dec->config) {
491 ret = dec->config(sb, dsb, data, size);
492 } else {
493 erofs_err(sb, "algorithm %d isn't enabled on this kernel",
494 alg);
495 ret = -EOPNOTSUPP;
496 }
497 kfree(data);
498 if (ret)
499 break;
500 }
501 erofs_put_metabuf(&buf);
502 return ret;
503 }
504
z_erofs_init_decompressor(void)505 int __init z_erofs_init_decompressor(void)
506 {
507 int i, err;
508
509 for (i = 0; i < Z_EROFS_COMPRESSION_MAX; ++i) {
510 err = z_erofs_decomp[i] ? z_erofs_decomp[i]->init() : 0;
511 if (err) {
512 while (i--)
513 if (z_erofs_decomp[i])
514 z_erofs_decomp[i]->exit();
515 return err;
516 }
517 }
518 return 0;
519 }
520
z_erofs_exit_decompressor(void)521 void z_erofs_exit_decompressor(void)
522 {
523 int i;
524
525 for (i = 0; i < Z_EROFS_COMPRESSION_MAX; ++i)
526 if (z_erofs_decomp[i])
527 z_erofs_decomp[i]->exit();
528 }
529