1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2019 HUAWEI, Inc. 4 * https://www.huawei.com/ 5 * Copyright (C) 2024 Alibaba Cloud 6 */ 7 #include "compress.h" 8 #include <linux/lz4.h> 9 10 #ifndef LZ4_DISTANCE_MAX /* history window size */ 11 #define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */ 12 #endif 13 14 #define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1) 15 #ifndef LZ4_DECOMPRESS_INPLACE_MARGIN 16 #define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32) 17 #endif 18 19 struct z_erofs_lz4_decompress_ctx { 20 struct z_erofs_decompress_req *rq; 21 /* # of encoded, decoded pages */ 22 unsigned int inpages, outpages; 23 /* decoded block total length (used for in-place decompression) */ 24 unsigned int oend; 25 }; 26 27 static int z_erofs_load_lz4_config(struct super_block *sb, 28 struct erofs_super_block *dsb, void *data, int size) 29 { 30 struct erofs_sb_info *sbi = EROFS_SB(sb); 31 struct z_erofs_lz4_cfgs *lz4 = data; 32 u16 distance; 33 34 if (lz4) { 35 if (size < sizeof(struct z_erofs_lz4_cfgs)) { 36 erofs_err(sb, "invalid lz4 cfgs, size=%u", size); 37 return -EINVAL; 38 } 39 distance = le16_to_cpu(lz4->max_distance); 40 41 sbi->lz4.max_pclusterblks = le16_to_cpu(lz4->max_pclusterblks); 42 if (!sbi->lz4.max_pclusterblks) { 43 sbi->lz4.max_pclusterblks = 1; /* reserved case */ 44 } else if (sbi->lz4.max_pclusterblks > 45 erofs_blknr(sb, Z_EROFS_PCLUSTER_MAX_SIZE)) { 46 erofs_err(sb, "too large lz4 pclusterblks %u", 47 sbi->lz4.max_pclusterblks); 48 return -EINVAL; 49 } 50 } else { 51 distance = le16_to_cpu(dsb->u1.lz4_max_distance); 52 sbi->lz4.max_pclusterblks = 1; 53 } 54 55 sbi->lz4.max_distance_pages = distance ? 56 DIV_ROUND_UP(distance, PAGE_SIZE) + 1 : 57 LZ4_MAX_DISTANCE_PAGES; 58 return z_erofs_gbuf_growsize(sbi->lz4.max_pclusterblks); 59 } 60 61 /* 62 * Fill all gaps with bounce pages if it's a sparse page list. Also check if 63 * all physical pages are consecutive, which can be seen for moderate CR. 64 */ 65 static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx, 66 struct page **pagepool) 67 { 68 struct z_erofs_decompress_req *rq = ctx->rq; 69 struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL }; 70 unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES, 71 BITS_PER_LONG)] = { 0 }; 72 unsigned int lz4_max_distance_pages = 73 EROFS_SB(rq->sb)->lz4.max_distance_pages; 74 void *kaddr = NULL; 75 unsigned int i, j, top; 76 77 top = 0; 78 for (i = j = 0; i < ctx->outpages; ++i, ++j) { 79 struct page *const page = rq->out[i]; 80 struct page *victim; 81 82 if (j >= lz4_max_distance_pages) 83 j = 0; 84 85 /* 'valid' bounced can only be tested after a complete round */ 86 if (!rq->fillgaps && test_bit(j, bounced)) { 87 DBG_BUGON(i < lz4_max_distance_pages); 88 DBG_BUGON(top >= lz4_max_distance_pages); 89 availables[top++] = rq->out[i - lz4_max_distance_pages]; 90 } 91 92 if (page) { 93 __clear_bit(j, bounced); 94 if (!PageHighMem(page)) { 95 if (!i) { 96 kaddr = page_address(page); 97 continue; 98 } 99 if (kaddr && 100 kaddr + PAGE_SIZE == page_address(page)) { 101 kaddr += PAGE_SIZE; 102 continue; 103 } 104 } 105 kaddr = NULL; 106 continue; 107 } 108 kaddr = NULL; 109 __set_bit(j, bounced); 110 111 if (top) { 112 victim = availables[--top]; 113 } else { 114 victim = __erofs_allocpage(pagepool, rq->gfp, true); 115 if (!victim) 116 return -ENOMEM; 117 set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE); 118 } 119 rq->out[i] = victim; 120 } 121 return kaddr ? 1 : 0; 122 } 123 124 static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx, 125 void *inpage, void *out, unsigned int *inputmargin, 126 int *maptype, bool may_inplace) 127 { 128 struct z_erofs_decompress_req *rq = ctx->rq; 129 unsigned int omargin, total, i; 130 struct page **in; 131 void *src, *tmp; 132 133 if (rq->inplace_io) { 134 omargin = PAGE_ALIGN(ctx->oend) - ctx->oend; 135 if (rq->partial_decoding || !may_inplace || 136 omargin < LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize)) 137 goto docopy; 138 139 for (i = 0; i < ctx->inpages; ++i) 140 if (rq->out[ctx->outpages - ctx->inpages + i] != 141 rq->in[i]) 142 goto docopy; 143 kunmap_local(inpage); 144 *maptype = 3; 145 return out + ((ctx->outpages - ctx->inpages) << PAGE_SHIFT); 146 } 147 148 if (ctx->inpages <= 1) { 149 *maptype = 0; 150 return inpage; 151 } 152 kunmap_local(inpage); 153 src = erofs_vm_map_ram(rq->in, ctx->inpages); 154 if (!src) 155 return ERR_PTR(-ENOMEM); 156 *maptype = 1; 157 return src; 158 159 docopy: 160 /* Or copy compressed data which can be overlapped to per-CPU buffer */ 161 in = rq->in; 162 src = z_erofs_get_gbuf(ctx->inpages); 163 if (!src) { 164 DBG_BUGON(1); 165 kunmap_local(inpage); 166 return ERR_PTR(-EFAULT); 167 } 168 169 tmp = src; 170 total = rq->inputsize; 171 while (total) { 172 unsigned int page_copycnt = 173 min_t(unsigned int, total, PAGE_SIZE - *inputmargin); 174 175 if (!inpage) 176 inpage = kmap_local_page(*in); 177 memcpy(tmp, inpage + *inputmargin, page_copycnt); 178 kunmap_local(inpage); 179 inpage = NULL; 180 tmp += page_copycnt; 181 total -= page_copycnt; 182 ++in; 183 *inputmargin = 0; 184 } 185 *maptype = 2; 186 return src; 187 } 188 189 /* 190 * Get the exact inputsize with zero_padding feature. 191 * - For LZ4, it should work if zero_padding feature is on (5.3+); 192 * - For MicroLZMA, it'd be enabled all the time. 193 */ 194 int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf, 195 unsigned int padbufsize) 196 { 197 const char *padend; 198 199 padend = memchr_inv(padbuf, 0, padbufsize); 200 if (!padend) 201 return -EFSCORRUPTED; 202 rq->inputsize -= padend - padbuf; 203 rq->pageofs_in += padend - padbuf; 204 return 0; 205 } 206 207 static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx, 208 u8 *dst) 209 { 210 struct z_erofs_decompress_req *rq = ctx->rq; 211 bool support_0padding = false, may_inplace = false; 212 unsigned int inputmargin; 213 u8 *out, *headpage, *src; 214 int ret, maptype; 215 216 DBG_BUGON(*rq->in == NULL); 217 headpage = kmap_local_page(*rq->in); 218 219 /* LZ4 decompression inplace is only safe if zero_padding is enabled */ 220 if (erofs_sb_has_zero_padding(EROFS_SB(rq->sb))) { 221 support_0padding = true; 222 ret = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in, 223 min_t(unsigned int, rq->inputsize, 224 rq->sb->s_blocksize - rq->pageofs_in)); 225 if (ret) { 226 kunmap_local(headpage); 227 return ret; 228 } 229 may_inplace = !((rq->pageofs_in + rq->inputsize) & 230 (rq->sb->s_blocksize - 1)); 231 } 232 233 inputmargin = rq->pageofs_in; 234 src = z_erofs_lz4_handle_overlap(ctx, headpage, dst, &inputmargin, 235 &maptype, may_inplace); 236 if (IS_ERR(src)) 237 return PTR_ERR(src); 238 239 out = dst + rq->pageofs_out; 240 /* legacy format could compress extra data in a pcluster. */ 241 if (rq->partial_decoding || !support_0padding) 242 ret = LZ4_decompress_safe_partial(src + inputmargin, out, 243 rq->inputsize, rq->outputsize, rq->outputsize); 244 else 245 ret = LZ4_decompress_safe(src + inputmargin, out, 246 rq->inputsize, rq->outputsize); 247 248 if (ret != rq->outputsize) { 249 erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]", 250 ret, rq->inputsize, inputmargin, rq->outputsize); 251 if (ret >= 0) 252 memset(out + ret, 0, rq->outputsize - ret); 253 ret = -EFSCORRUPTED; 254 } else { 255 ret = 0; 256 } 257 258 if (maptype == 0) { 259 kunmap_local(headpage); 260 } else if (maptype == 1) { 261 vm_unmap_ram(src, ctx->inpages); 262 } else if (maptype == 2) { 263 z_erofs_put_gbuf(src); 264 } else if (maptype != 3) { 265 DBG_BUGON(1); 266 return -EFAULT; 267 } 268 return ret; 269 } 270 271 static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, 272 struct page **pagepool) 273 { 274 struct z_erofs_lz4_decompress_ctx ctx; 275 unsigned int dst_maptype; 276 void *dst; 277 int ret; 278 279 ctx.rq = rq; 280 ctx.oend = rq->pageofs_out + rq->outputsize; 281 ctx.outpages = PAGE_ALIGN(ctx.oend) >> PAGE_SHIFT; 282 ctx.inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT; 283 284 /* one optimized fast path only for non bigpcluster cases yet */ 285 if (ctx.inpages == 1 && ctx.outpages == 1 && !rq->inplace_io) { 286 DBG_BUGON(!*rq->out); 287 dst = kmap_local_page(*rq->out); 288 dst_maptype = 0; 289 goto dstmap_out; 290 } 291 292 /* general decoding path which can be used for all cases */ 293 ret = z_erofs_lz4_prepare_dstpages(&ctx, pagepool); 294 if (ret < 0) { 295 return ret; 296 } else if (ret > 0) { 297 dst = page_address(*rq->out); 298 dst_maptype = 1; 299 } else { 300 dst = erofs_vm_map_ram(rq->out, ctx.outpages); 301 if (!dst) 302 return -ENOMEM; 303 dst_maptype = 2; 304 } 305 306 dstmap_out: 307 ret = z_erofs_lz4_decompress_mem(&ctx, dst); 308 if (!dst_maptype) 309 kunmap_local(dst); 310 else if (dst_maptype == 2) 311 vm_unmap_ram(dst, ctx.outpages); 312 return ret; 313 } 314 315 static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq, 316 struct page **pagepool) 317 { 318 const unsigned int nrpages_in = 319 PAGE_ALIGN(rq->pageofs_in + rq->inputsize) >> PAGE_SHIFT; 320 const unsigned int nrpages_out = 321 PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; 322 const unsigned int bs = rq->sb->s_blocksize; 323 unsigned int cur = 0, ni = 0, no, pi, po, insz, cnt; 324 u8 *kin; 325 326 if (rq->outputsize > rq->inputsize) 327 return -EOPNOTSUPP; 328 if (rq->alg == Z_EROFS_COMPRESSION_INTERLACED) { 329 cur = bs - (rq->pageofs_out & (bs - 1)); 330 pi = (rq->pageofs_in + rq->inputsize - cur) & ~PAGE_MASK; 331 cur = min(cur, rq->outputsize); 332 if (cur && rq->out[0]) { 333 kin = kmap_local_page(rq->in[nrpages_in - 1]); 334 if (rq->out[0] == rq->in[nrpages_in - 1]) { 335 memmove(kin + rq->pageofs_out, kin + pi, cur); 336 flush_dcache_page(rq->out[0]); 337 } else { 338 memcpy_to_page(rq->out[0], rq->pageofs_out, 339 kin + pi, cur); 340 } 341 kunmap_local(kin); 342 } 343 rq->outputsize -= cur; 344 } 345 346 for (; rq->outputsize; rq->pageofs_in = 0, cur += PAGE_SIZE, ni++) { 347 insz = min(PAGE_SIZE - rq->pageofs_in, rq->outputsize); 348 rq->outputsize -= insz; 349 if (!rq->in[ni]) 350 continue; 351 kin = kmap_local_page(rq->in[ni]); 352 pi = 0; 353 do { 354 no = (rq->pageofs_out + cur + pi) >> PAGE_SHIFT; 355 po = (rq->pageofs_out + cur + pi) & ~PAGE_MASK; 356 DBG_BUGON(no >= nrpages_out); 357 cnt = min(insz - pi, PAGE_SIZE - po); 358 if (rq->out[no] == rq->in[ni]) { 359 memmove(kin + po, 360 kin + rq->pageofs_in + pi, cnt); 361 flush_dcache_page(rq->out[no]); 362 } else if (rq->out[no]) { 363 memcpy_to_page(rq->out[no], po, 364 kin + rq->pageofs_in + pi, cnt); 365 } 366 pi += cnt; 367 } while (pi < insz); 368 kunmap_local(kin); 369 } 370 DBG_BUGON(ni > nrpages_in); 371 return 0; 372 } 373 374 int z_erofs_stream_switch_bufs(struct z_erofs_stream_dctx *dctx, void **dst, 375 void **src, struct page **pgpl) 376 { 377 struct z_erofs_decompress_req *rq = dctx->rq; 378 struct super_block *sb = rq->sb; 379 struct page **pgo, *tmppage; 380 unsigned int j; 381 382 if (!dctx->avail_out) { 383 if (++dctx->no >= dctx->outpages || !rq->outputsize) { 384 erofs_err(sb, "insufficient space for decompressed data"); 385 return -EFSCORRUPTED; 386 } 387 388 if (dctx->kout) 389 kunmap_local(dctx->kout); 390 dctx->avail_out = min(rq->outputsize, PAGE_SIZE - rq->pageofs_out); 391 rq->outputsize -= dctx->avail_out; 392 pgo = &rq->out[dctx->no]; 393 if (!*pgo && rq->fillgaps) { /* deduped */ 394 *pgo = erofs_allocpage(pgpl, rq->gfp); 395 if (!*pgo) { 396 dctx->kout = NULL; 397 return -ENOMEM; 398 } 399 set_page_private(*pgo, Z_EROFS_SHORTLIVED_PAGE); 400 } 401 if (*pgo) { 402 dctx->kout = kmap_local_page(*pgo); 403 *dst = dctx->kout + rq->pageofs_out; 404 } else { 405 *dst = dctx->kout = NULL; 406 } 407 rq->pageofs_out = 0; 408 } 409 410 if (dctx->inbuf_pos == dctx->inbuf_sz && rq->inputsize) { 411 if (++dctx->ni >= dctx->inpages) { 412 erofs_err(sb, "invalid compressed data"); 413 return -EFSCORRUPTED; 414 } 415 if (dctx->kout) /* unlike kmap(), take care of the orders */ 416 kunmap_local(dctx->kout); 417 kunmap_local(dctx->kin); 418 419 dctx->inbuf_sz = min_t(u32, rq->inputsize, PAGE_SIZE); 420 rq->inputsize -= dctx->inbuf_sz; 421 dctx->kin = kmap_local_page(rq->in[dctx->ni]); 422 *src = dctx->kin; 423 dctx->bounced = false; 424 if (dctx->kout) { 425 j = (u8 *)*dst - dctx->kout; 426 dctx->kout = kmap_local_page(rq->out[dctx->no]); 427 *dst = dctx->kout + j; 428 } 429 dctx->inbuf_pos = 0; 430 } 431 432 /* 433 * Handle overlapping: Use the given bounce buffer if the input data is 434 * under processing; Or utilize short-lived pages from the on-stack page 435 * pool, where pages are shared among the same request. Note that only 436 * a few inplace I/O pages need to be doubled. 437 */ 438 if (!dctx->bounced && rq->out[dctx->no] == rq->in[dctx->ni]) { 439 memcpy(dctx->bounce, *src, dctx->inbuf_sz); 440 *src = dctx->bounce; 441 dctx->bounced = true; 442 } 443 444 for (j = dctx->ni + 1; j < dctx->inpages; ++j) { 445 if (rq->out[dctx->no] != rq->in[j]) 446 continue; 447 tmppage = erofs_allocpage(pgpl, rq->gfp); 448 if (!tmppage) 449 return -ENOMEM; 450 set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE); 451 copy_highpage(tmppage, rq->in[j]); 452 rq->in[j] = tmppage; 453 } 454 return 0; 455 } 456 457 const struct z_erofs_decompressor *z_erofs_decomp[] = { 458 [Z_EROFS_COMPRESSION_SHIFTED] = &(const struct z_erofs_decompressor) { 459 .decompress = z_erofs_transform_plain, 460 .name = "shifted" 461 }, 462 [Z_EROFS_COMPRESSION_INTERLACED] = &(const struct z_erofs_decompressor) { 463 .decompress = z_erofs_transform_plain, 464 .name = "interlaced" 465 }, 466 [Z_EROFS_COMPRESSION_LZ4] = &(const struct z_erofs_decompressor) { 467 .config = z_erofs_load_lz4_config, 468 .decompress = z_erofs_lz4_decompress, 469 .init = z_erofs_gbuf_init, 470 .exit = z_erofs_gbuf_exit, 471 .name = "lz4" 472 }, 473 #ifdef CONFIG_EROFS_FS_ZIP_LZMA 474 [Z_EROFS_COMPRESSION_LZMA] = &z_erofs_lzma_decomp, 475 #endif 476 #ifdef CONFIG_EROFS_FS_ZIP_DEFLATE 477 [Z_EROFS_COMPRESSION_DEFLATE] = &z_erofs_deflate_decomp, 478 #endif 479 #ifdef CONFIG_EROFS_FS_ZIP_ZSTD 480 [Z_EROFS_COMPRESSION_ZSTD] = &z_erofs_zstd_decomp, 481 #endif 482 }; 483 484 int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb) 485 { 486 struct erofs_sb_info *sbi = EROFS_SB(sb); 487 struct erofs_buf buf = __EROFS_BUF_INITIALIZER; 488 unsigned int algs, alg; 489 erofs_off_t offset; 490 int size, ret = 0; 491 492 if (!erofs_sb_has_compr_cfgs(sbi)) { 493 sbi->available_compr_algs = 1 << Z_EROFS_COMPRESSION_LZ4; 494 return z_erofs_load_lz4_config(sb, dsb, NULL, 0); 495 } 496 497 sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs); 498 if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) { 499 erofs_err(sb, "unidentified algorithms %x, please upgrade kernel", 500 sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS); 501 return -EOPNOTSUPP; 502 } 503 504 erofs_init_metabuf(&buf, sb); 505 offset = EROFS_SUPER_OFFSET + sbi->sb_size; 506 alg = 0; 507 for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) { 508 const struct z_erofs_decompressor *dec = z_erofs_decomp[alg]; 509 void *data; 510 511 if (!(algs & 1)) 512 continue; 513 514 data = erofs_read_metadata(sb, &buf, &offset, &size); 515 if (IS_ERR(data)) { 516 ret = PTR_ERR(data); 517 break; 518 } 519 520 if (alg < Z_EROFS_COMPRESSION_MAX && dec && dec->config) { 521 ret = dec->config(sb, dsb, data, size); 522 } else { 523 erofs_err(sb, "algorithm %d isn't enabled on this kernel", 524 alg); 525 ret = -EOPNOTSUPP; 526 } 527 kfree(data); 528 if (ret) 529 break; 530 } 531 erofs_put_metabuf(&buf); 532 return ret; 533 } 534 535 int __init z_erofs_init_decompressor(void) 536 { 537 int i, err; 538 539 for (i = 0; i < Z_EROFS_COMPRESSION_MAX; ++i) { 540 err = z_erofs_decomp[i] ? z_erofs_decomp[i]->init() : 0; 541 if (err) { 542 while (--i) 543 if (z_erofs_decomp[i]) 544 z_erofs_decomp[i]->exit(); 545 return err; 546 } 547 } 548 return 0; 549 } 550 551 void z_erofs_exit_decompressor(void) 552 { 553 int i; 554 555 for (i = 0; i < Z_EROFS_COMPRESSION_MAX; ++i) 556 if (z_erofs_decomp[i]) 557 z_erofs_decomp[i]->exit(); 558 } 559