1 // SPDX-License-Identifier: GPL-2.0 2 #include "bcachefs.h" 3 #include "checksum.h" 4 #include "compress.h" 5 #include "error.h" 6 #include "extents.h" 7 #include "opts.h" 8 #include "super-io.h" 9 10 #include <linux/lz4.h> 11 #include <linux/zlib.h> 12 #include <linux/zstd.h> 13 14 static inline enum bch_compression_opts bch2_compression_type_to_opt(enum bch_compression_type type) 15 { 16 switch (type) { 17 case BCH_COMPRESSION_TYPE_none: 18 case BCH_COMPRESSION_TYPE_incompressible: 19 return BCH_COMPRESSION_OPT_none; 20 case BCH_COMPRESSION_TYPE_lz4_old: 21 case BCH_COMPRESSION_TYPE_lz4: 22 return BCH_COMPRESSION_OPT_lz4; 23 case BCH_COMPRESSION_TYPE_gzip: 24 return BCH_COMPRESSION_OPT_gzip; 25 case BCH_COMPRESSION_TYPE_zstd: 26 return BCH_COMPRESSION_OPT_zstd; 27 default: 28 BUG(); 29 } 30 } 31 32 /* Bounce buffer: */ 33 struct bbuf { 34 void *b; 35 enum { 36 BB_NONE, 37 BB_VMAP, 38 BB_KMALLOC, 39 BB_MEMPOOL, 40 } type; 41 int rw; 42 }; 43 44 static struct bbuf __bounce_alloc(struct bch_fs *c, unsigned size, int rw) 45 { 46 void *b; 47 48 BUG_ON(size > c->opts.encoded_extent_max); 49 50 b = kmalloc(size, GFP_NOFS|__GFP_NOWARN); 51 if (b) 52 return (struct bbuf) { .b = b, .type = BB_KMALLOC, .rw = rw }; 53 54 b = mempool_alloc(&c->compression_bounce[rw], GFP_NOFS); 55 if (b) 56 return (struct bbuf) { .b = b, .type = BB_MEMPOOL, .rw = rw }; 57 58 BUG(); 59 } 60 61 static bool bio_phys_contig(struct bio *bio, struct bvec_iter start) 62 { 63 struct bio_vec bv; 64 struct bvec_iter iter; 65 void *expected_start = NULL; 66 67 __bio_for_each_bvec(bv, bio, iter, start) { 68 if (expected_start && 69 expected_start != page_address(bv.bv_page) + bv.bv_offset) 70 return false; 71 72 expected_start = page_address(bv.bv_page) + 73 bv.bv_offset + bv.bv_len; 74 } 75 76 return true; 77 } 78 79 static struct bbuf __bio_map_or_bounce(struct bch_fs *c, struct bio *bio, 80 struct bvec_iter start, int rw) 81 { 82 struct bbuf ret; 83 struct bio_vec bv; 84 struct bvec_iter iter; 85 unsigned nr_pages = 0; 86 struct page *stack_pages[16]; 87 struct page **pages = NULL; 88 void *data; 89 90 BUG_ON(start.bi_size > c->opts.encoded_extent_max); 91 92 if (!PageHighMem(bio_iter_page(bio, start)) && 93 bio_phys_contig(bio, start)) 94 return (struct bbuf) { 95 .b = page_address(bio_iter_page(bio, start)) + 96 bio_iter_offset(bio, start), 97 .type = BB_NONE, .rw = rw 98 }; 99 100 /* check if we can map the pages contiguously: */ 101 __bio_for_each_segment(bv, bio, iter, start) { 102 if (iter.bi_size != start.bi_size && 103 bv.bv_offset) 104 goto bounce; 105 106 if (bv.bv_len < iter.bi_size && 107 bv.bv_offset + bv.bv_len < PAGE_SIZE) 108 goto bounce; 109 110 nr_pages++; 111 } 112 113 BUG_ON(DIV_ROUND_UP(start.bi_size, PAGE_SIZE) > nr_pages); 114 115 pages = nr_pages > ARRAY_SIZE(stack_pages) 116 ? kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS) 117 : stack_pages; 118 if (!pages) 119 goto bounce; 120 121 nr_pages = 0; 122 __bio_for_each_segment(bv, bio, iter, start) 123 pages[nr_pages++] = bv.bv_page; 124 125 data = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL); 126 if (pages != stack_pages) 127 kfree(pages); 128 129 if (data) 130 return (struct bbuf) { 131 .b = data + bio_iter_offset(bio, start), 132 .type = BB_VMAP, .rw = rw 133 }; 134 bounce: 135 ret = __bounce_alloc(c, start.bi_size, rw); 136 137 if (rw == READ) 138 memcpy_from_bio(ret.b, bio, start); 139 140 return ret; 141 } 142 143 static struct bbuf bio_map_or_bounce(struct bch_fs *c, struct bio *bio, int rw) 144 { 145 return __bio_map_or_bounce(c, bio, bio->bi_iter, rw); 146 } 147 148 static void bio_unmap_or_unbounce(struct bch_fs *c, struct bbuf buf) 149 { 150 switch (buf.type) { 151 case BB_NONE: 152 break; 153 case BB_VMAP: 154 vunmap((void *) ((unsigned long) buf.b & PAGE_MASK)); 155 break; 156 case BB_KMALLOC: 157 kfree(buf.b); 158 break; 159 case BB_MEMPOOL: 160 mempool_free(buf.b, &c->compression_bounce[buf.rw]); 161 break; 162 } 163 } 164 165 static inline void zlib_set_workspace(z_stream *strm, void *workspace) 166 { 167 #ifdef __KERNEL__ 168 strm->workspace = workspace; 169 #endif 170 } 171 172 static int __bio_uncompress(struct bch_fs *c, struct bio *src, 173 void *dst_data, struct bch_extent_crc_unpacked crc) 174 { 175 struct bbuf src_data = { NULL }; 176 size_t src_len = src->bi_iter.bi_size; 177 size_t dst_len = crc.uncompressed_size << 9; 178 void *workspace; 179 int ret; 180 181 enum bch_compression_opts opt = bch2_compression_type_to_opt(crc.compression_type); 182 mempool_t *workspace_pool = &c->compress_workspace[opt]; 183 if (unlikely(!mempool_initialized(workspace_pool))) { 184 if (fsck_err(c, compression_type_not_marked_in_sb, 185 "compression type %s set but not marked in superblock", 186 __bch2_compression_types[crc.compression_type])) 187 ret = bch2_check_set_has_compressed_data(c, opt); 188 else 189 ret = -BCH_ERR_compression_workspace_not_initialized; 190 if (ret) 191 goto out; 192 } 193 194 src_data = bio_map_or_bounce(c, src, READ); 195 196 switch (crc.compression_type) { 197 case BCH_COMPRESSION_TYPE_lz4_old: 198 case BCH_COMPRESSION_TYPE_lz4: 199 ret = LZ4_decompress_safe_partial(src_data.b, dst_data, 200 src_len, dst_len, dst_len); 201 if (ret != dst_len) 202 goto err; 203 break; 204 case BCH_COMPRESSION_TYPE_gzip: { 205 z_stream strm = { 206 .next_in = src_data.b, 207 .avail_in = src_len, 208 .next_out = dst_data, 209 .avail_out = dst_len, 210 }; 211 212 workspace = mempool_alloc(workspace_pool, GFP_NOFS); 213 214 zlib_set_workspace(&strm, workspace); 215 zlib_inflateInit2(&strm, -MAX_WBITS); 216 ret = zlib_inflate(&strm, Z_FINISH); 217 218 mempool_free(workspace, workspace_pool); 219 220 if (ret != Z_STREAM_END) 221 goto err; 222 break; 223 } 224 case BCH_COMPRESSION_TYPE_zstd: { 225 ZSTD_DCtx *ctx; 226 size_t real_src_len = le32_to_cpup(src_data.b); 227 228 if (real_src_len > src_len - 4) 229 goto err; 230 231 workspace = mempool_alloc(workspace_pool, GFP_NOFS); 232 ctx = zstd_init_dctx(workspace, zstd_dctx_workspace_bound()); 233 234 ret = zstd_decompress_dctx(ctx, 235 dst_data, dst_len, 236 src_data.b + 4, real_src_len); 237 238 mempool_free(workspace, workspace_pool); 239 240 if (ret != dst_len) 241 goto err; 242 break; 243 } 244 default: 245 BUG(); 246 } 247 ret = 0; 248 fsck_err: 249 out: 250 bio_unmap_or_unbounce(c, src_data); 251 return ret; 252 err: 253 ret = -EIO; 254 goto out; 255 } 256 257 int bch2_bio_uncompress_inplace(struct bch_fs *c, struct bio *bio, 258 struct bch_extent_crc_unpacked *crc) 259 { 260 struct bbuf data = { NULL }; 261 size_t dst_len = crc->uncompressed_size << 9; 262 263 /* bio must own its pages: */ 264 BUG_ON(!bio->bi_vcnt); 265 BUG_ON(DIV_ROUND_UP(crc->live_size, PAGE_SECTORS) > bio->bi_max_vecs); 266 267 if (crc->uncompressed_size << 9 > c->opts.encoded_extent_max || 268 crc->compressed_size << 9 > c->opts.encoded_extent_max) { 269 bch_err(c, "error rewriting existing data: extent too big"); 270 return -EIO; 271 } 272 273 data = __bounce_alloc(c, dst_len, WRITE); 274 275 if (__bio_uncompress(c, bio, data.b, *crc)) { 276 if (!c->opts.no_data_io) 277 bch_err(c, "error rewriting existing data: decompression error"); 278 bio_unmap_or_unbounce(c, data); 279 return -EIO; 280 } 281 282 /* 283 * XXX: don't have a good way to assert that the bio was allocated with 284 * enough space, we depend on bch2_move_extent doing the right thing 285 */ 286 bio->bi_iter.bi_size = crc->live_size << 9; 287 288 memcpy_to_bio(bio, bio->bi_iter, data.b + (crc->offset << 9)); 289 290 crc->csum_type = 0; 291 crc->compression_type = 0; 292 crc->compressed_size = crc->live_size; 293 crc->uncompressed_size = crc->live_size; 294 crc->offset = 0; 295 crc->csum = (struct bch_csum) { 0, 0 }; 296 297 bio_unmap_or_unbounce(c, data); 298 return 0; 299 } 300 301 int bch2_bio_uncompress(struct bch_fs *c, struct bio *src, 302 struct bio *dst, struct bvec_iter dst_iter, 303 struct bch_extent_crc_unpacked crc) 304 { 305 struct bbuf dst_data = { NULL }; 306 size_t dst_len = crc.uncompressed_size << 9; 307 int ret; 308 309 if (crc.uncompressed_size << 9 > c->opts.encoded_extent_max || 310 crc.compressed_size << 9 > c->opts.encoded_extent_max) 311 return -EIO; 312 313 dst_data = dst_len == dst_iter.bi_size 314 ? __bio_map_or_bounce(c, dst, dst_iter, WRITE) 315 : __bounce_alloc(c, dst_len, WRITE); 316 317 ret = __bio_uncompress(c, src, dst_data.b, crc); 318 if (ret) 319 goto err; 320 321 if (dst_data.type != BB_NONE && 322 dst_data.type != BB_VMAP) 323 memcpy_to_bio(dst, dst_iter, dst_data.b + (crc.offset << 9)); 324 err: 325 bio_unmap_or_unbounce(c, dst_data); 326 return ret; 327 } 328 329 static int attempt_compress(struct bch_fs *c, 330 void *workspace, 331 void *dst, size_t dst_len, 332 void *src, size_t src_len, 333 struct bch_compression_opt compression) 334 { 335 enum bch_compression_type compression_type = 336 __bch2_compression_opt_to_type[compression.type]; 337 338 switch (compression_type) { 339 case BCH_COMPRESSION_TYPE_lz4: 340 if (compression.level < LZ4HC_MIN_CLEVEL) { 341 int len = src_len; 342 int ret = LZ4_compress_destSize( 343 src, dst, 344 &len, dst_len, 345 workspace); 346 if (len < src_len) 347 return -len; 348 349 return ret; 350 } else { 351 int ret = LZ4_compress_HC( 352 src, dst, 353 src_len, dst_len, 354 compression.level, 355 workspace); 356 357 return ret ?: -1; 358 } 359 case BCH_COMPRESSION_TYPE_gzip: { 360 z_stream strm = { 361 .next_in = src, 362 .avail_in = src_len, 363 .next_out = dst, 364 .avail_out = dst_len, 365 }; 366 367 zlib_set_workspace(&strm, workspace); 368 zlib_deflateInit2(&strm, 369 compression.level 370 ? clamp_t(unsigned, compression.level, 371 Z_BEST_SPEED, Z_BEST_COMPRESSION) 372 : Z_DEFAULT_COMPRESSION, 373 Z_DEFLATED, -MAX_WBITS, DEF_MEM_LEVEL, 374 Z_DEFAULT_STRATEGY); 375 376 if (zlib_deflate(&strm, Z_FINISH) != Z_STREAM_END) 377 return 0; 378 379 if (zlib_deflateEnd(&strm) != Z_OK) 380 return 0; 381 382 return strm.total_out; 383 } 384 case BCH_COMPRESSION_TYPE_zstd: { 385 /* 386 * rescale: 387 * zstd max compression level is 22, our max level is 15 388 */ 389 unsigned level = min((compression.level * 3) / 2, zstd_max_clevel()); 390 ZSTD_parameters params = zstd_get_params(level, c->opts.encoded_extent_max); 391 ZSTD_CCtx *ctx = zstd_init_cctx(workspace, c->zstd_workspace_size); 392 393 /* 394 * ZSTD requires that when we decompress we pass in the exact 395 * compressed size - rounding it up to the nearest sector 396 * doesn't work, so we use the first 4 bytes of the buffer for 397 * that. 398 * 399 * Additionally, the ZSTD code seems to have a bug where it will 400 * write just past the end of the buffer - so subtract a fudge 401 * factor (7 bytes) from the dst buffer size to account for 402 * that. 403 */ 404 size_t len = zstd_compress_cctx(ctx, 405 dst + 4, dst_len - 4 - 7, 406 src, src_len, 407 ¶ms); 408 if (zstd_is_error(len)) 409 return 0; 410 411 *((__le32 *) dst) = cpu_to_le32(len); 412 return len + 4; 413 } 414 default: 415 BUG(); 416 } 417 } 418 419 static unsigned __bio_compress(struct bch_fs *c, 420 struct bio *dst, size_t *dst_len, 421 struct bio *src, size_t *src_len, 422 struct bch_compression_opt compression) 423 { 424 struct bbuf src_data = { NULL }, dst_data = { NULL }; 425 void *workspace; 426 enum bch_compression_type compression_type = 427 __bch2_compression_opt_to_type[compression.type]; 428 unsigned pad; 429 int ret = 0; 430 431 /* bch2_compression_decode catches unknown compression types: */ 432 BUG_ON(compression.type >= BCH_COMPRESSION_OPT_NR); 433 434 mempool_t *workspace_pool = &c->compress_workspace[compression.type]; 435 if (unlikely(!mempool_initialized(workspace_pool))) { 436 if (fsck_err(c, compression_opt_not_marked_in_sb, 437 "compression opt %s set but not marked in superblock", 438 bch2_compression_opts[compression.type])) { 439 ret = bch2_check_set_has_compressed_data(c, compression.type); 440 if (ret) /* memory allocation failure, don't compress */ 441 return 0; 442 } else { 443 return 0; 444 } 445 } 446 447 /* If it's only one block, don't bother trying to compress: */ 448 if (src->bi_iter.bi_size <= c->opts.block_size) 449 return BCH_COMPRESSION_TYPE_incompressible; 450 451 dst_data = bio_map_or_bounce(c, dst, WRITE); 452 src_data = bio_map_or_bounce(c, src, READ); 453 454 workspace = mempool_alloc(workspace_pool, GFP_NOFS); 455 456 *src_len = src->bi_iter.bi_size; 457 *dst_len = dst->bi_iter.bi_size; 458 459 /* 460 * XXX: this algorithm sucks when the compression code doesn't tell us 461 * how much would fit, like LZ4 does: 462 */ 463 while (1) { 464 if (*src_len <= block_bytes(c)) { 465 ret = -1; 466 break; 467 } 468 469 ret = attempt_compress(c, workspace, 470 dst_data.b, *dst_len, 471 src_data.b, *src_len, 472 compression); 473 if (ret > 0) { 474 *dst_len = ret; 475 ret = 0; 476 break; 477 } 478 479 /* Didn't fit: should we retry with a smaller amount? */ 480 if (*src_len <= *dst_len) { 481 ret = -1; 482 break; 483 } 484 485 /* 486 * If ret is negative, it's a hint as to how much data would fit 487 */ 488 BUG_ON(-ret >= *src_len); 489 490 if (ret < 0) 491 *src_len = -ret; 492 else 493 *src_len -= (*src_len - *dst_len) / 2; 494 *src_len = round_down(*src_len, block_bytes(c)); 495 } 496 497 mempool_free(workspace, workspace_pool); 498 499 if (ret) 500 goto err; 501 502 /* Didn't get smaller: */ 503 if (round_up(*dst_len, block_bytes(c)) >= *src_len) 504 goto err; 505 506 pad = round_up(*dst_len, block_bytes(c)) - *dst_len; 507 508 memset(dst_data.b + *dst_len, 0, pad); 509 *dst_len += pad; 510 511 if (dst_data.type != BB_NONE && 512 dst_data.type != BB_VMAP) 513 memcpy_to_bio(dst, dst->bi_iter, dst_data.b); 514 515 BUG_ON(!*dst_len || *dst_len > dst->bi_iter.bi_size); 516 BUG_ON(!*src_len || *src_len > src->bi_iter.bi_size); 517 BUG_ON(*dst_len & (block_bytes(c) - 1)); 518 BUG_ON(*src_len & (block_bytes(c) - 1)); 519 ret = compression_type; 520 out: 521 bio_unmap_or_unbounce(c, src_data); 522 bio_unmap_or_unbounce(c, dst_data); 523 return ret; 524 err: 525 ret = BCH_COMPRESSION_TYPE_incompressible; 526 goto out; 527 fsck_err: 528 ret = 0; 529 goto out; 530 } 531 532 unsigned bch2_bio_compress(struct bch_fs *c, 533 struct bio *dst, size_t *dst_len, 534 struct bio *src, size_t *src_len, 535 unsigned compression_opt) 536 { 537 unsigned orig_dst = dst->bi_iter.bi_size; 538 unsigned orig_src = src->bi_iter.bi_size; 539 unsigned compression_type; 540 541 /* Don't consume more than BCH_ENCODED_EXTENT_MAX from @src: */ 542 src->bi_iter.bi_size = min_t(unsigned, src->bi_iter.bi_size, 543 c->opts.encoded_extent_max); 544 /* Don't generate a bigger output than input: */ 545 dst->bi_iter.bi_size = min(dst->bi_iter.bi_size, src->bi_iter.bi_size); 546 547 compression_type = 548 __bio_compress(c, dst, dst_len, src, src_len, 549 bch2_compression_decode(compression_opt)); 550 551 dst->bi_iter.bi_size = orig_dst; 552 src->bi_iter.bi_size = orig_src; 553 return compression_type; 554 } 555 556 static int __bch2_fs_compress_init(struct bch_fs *, u64); 557 558 #define BCH_FEATURE_none 0 559 560 static const unsigned bch2_compression_opt_to_feature[] = { 561 #define x(t, n) [BCH_COMPRESSION_OPT_##t] = BCH_FEATURE_##t, 562 BCH_COMPRESSION_OPTS() 563 #undef x 564 }; 565 566 #undef BCH_FEATURE_none 567 568 static int __bch2_check_set_has_compressed_data(struct bch_fs *c, u64 f) 569 { 570 int ret = 0; 571 572 if ((c->sb.features & f) == f) 573 return 0; 574 575 mutex_lock(&c->sb_lock); 576 577 if ((c->sb.features & f) == f) { 578 mutex_unlock(&c->sb_lock); 579 return 0; 580 } 581 582 ret = __bch2_fs_compress_init(c, c->sb.features|f); 583 if (ret) { 584 mutex_unlock(&c->sb_lock); 585 return ret; 586 } 587 588 c->disk_sb.sb->features[0] |= cpu_to_le64(f); 589 bch2_write_super(c); 590 mutex_unlock(&c->sb_lock); 591 592 return 0; 593 } 594 595 int bch2_check_set_has_compressed_data(struct bch_fs *c, 596 unsigned compression_opt) 597 { 598 unsigned compression_type = bch2_compression_decode(compression_opt).type; 599 600 BUG_ON(compression_type >= ARRAY_SIZE(bch2_compression_opt_to_feature)); 601 602 return compression_type 603 ? __bch2_check_set_has_compressed_data(c, 604 1ULL << bch2_compression_opt_to_feature[compression_type]) 605 : 0; 606 } 607 608 void bch2_fs_compress_exit(struct bch_fs *c) 609 { 610 unsigned i; 611 612 for (i = 0; i < ARRAY_SIZE(c->compress_workspace); i++) 613 mempool_exit(&c->compress_workspace[i]); 614 mempool_exit(&c->compression_bounce[WRITE]); 615 mempool_exit(&c->compression_bounce[READ]); 616 } 617 618 static int __bch2_fs_compress_init(struct bch_fs *c, u64 features) 619 { 620 ZSTD_parameters params = zstd_get_params(zstd_max_clevel(), 621 c->opts.encoded_extent_max); 622 623 c->zstd_workspace_size = zstd_cctx_workspace_bound(¶ms.cParams); 624 625 struct { 626 unsigned feature; 627 enum bch_compression_opts type; 628 size_t compress_workspace; 629 } compression_types[] = { 630 { BCH_FEATURE_lz4, BCH_COMPRESSION_OPT_lz4, 631 max_t(size_t, LZ4_MEM_COMPRESS, LZ4HC_MEM_COMPRESS) }, 632 { BCH_FEATURE_gzip, BCH_COMPRESSION_OPT_gzip, 633 max(zlib_deflate_workspacesize(MAX_WBITS, DEF_MEM_LEVEL), 634 zlib_inflate_workspacesize()) }, 635 { BCH_FEATURE_zstd, BCH_COMPRESSION_OPT_zstd, 636 max(c->zstd_workspace_size, 637 zstd_dctx_workspace_bound()) }, 638 }, *i; 639 bool have_compressed = false; 640 641 for (i = compression_types; 642 i < compression_types + ARRAY_SIZE(compression_types); 643 i++) 644 have_compressed |= (features & (1 << i->feature)) != 0; 645 646 if (!have_compressed) 647 return 0; 648 649 if (!mempool_initialized(&c->compression_bounce[READ]) && 650 mempool_init_kvmalloc_pool(&c->compression_bounce[READ], 651 1, c->opts.encoded_extent_max)) 652 return -BCH_ERR_ENOMEM_compression_bounce_read_init; 653 654 if (!mempool_initialized(&c->compression_bounce[WRITE]) && 655 mempool_init_kvmalloc_pool(&c->compression_bounce[WRITE], 656 1, c->opts.encoded_extent_max)) 657 return -BCH_ERR_ENOMEM_compression_bounce_write_init; 658 659 for (i = compression_types; 660 i < compression_types + ARRAY_SIZE(compression_types); 661 i++) { 662 if (!(features & (1 << i->feature))) 663 continue; 664 665 if (mempool_initialized(&c->compress_workspace[i->type])) 666 continue; 667 668 if (mempool_init_kvmalloc_pool( 669 &c->compress_workspace[i->type], 670 1, i->compress_workspace)) 671 return -BCH_ERR_ENOMEM_compression_workspace_init; 672 } 673 674 return 0; 675 } 676 677 static u64 compression_opt_to_feature(unsigned v) 678 { 679 unsigned type = bch2_compression_decode(v).type; 680 681 return BIT_ULL(bch2_compression_opt_to_feature[type]); 682 } 683 684 int bch2_fs_compress_init(struct bch_fs *c) 685 { 686 u64 f = c->sb.features; 687 688 f |= compression_opt_to_feature(c->opts.compression); 689 f |= compression_opt_to_feature(c->opts.background_compression); 690 691 return __bch2_fs_compress_init(c, f); 692 } 693 694 int bch2_opt_compression_parse(struct bch_fs *c, const char *_val, u64 *res, 695 struct printbuf *err) 696 { 697 char *val = kstrdup(_val, GFP_KERNEL); 698 char *p = val, *type_str, *level_str; 699 struct bch_compression_opt opt = { 0 }; 700 int ret; 701 702 if (!val) 703 return -ENOMEM; 704 705 type_str = strsep(&p, ":"); 706 level_str = p; 707 708 ret = match_string(bch2_compression_opts, -1, type_str); 709 if (ret < 0 && err) 710 prt_str(err, "invalid compression type"); 711 if (ret < 0) 712 goto err; 713 714 opt.type = ret; 715 716 if (level_str) { 717 unsigned level; 718 719 ret = kstrtouint(level_str, 10, &level); 720 if (!ret && !opt.type && level) 721 ret = -EINVAL; 722 if (!ret && level > 15) 723 ret = -EINVAL; 724 if (ret < 0 && err) 725 prt_str(err, "invalid compression level"); 726 if (ret < 0) 727 goto err; 728 729 opt.level = level; 730 } 731 732 *res = bch2_compression_encode(opt); 733 err: 734 kfree(val); 735 return ret; 736 } 737 738 void bch2_compression_opt_to_text(struct printbuf *out, u64 v) 739 { 740 struct bch_compression_opt opt = bch2_compression_decode(v); 741 742 if (opt.type < BCH_COMPRESSION_OPT_NR) 743 prt_str(out, bch2_compression_opts[opt.type]); 744 else 745 prt_printf(out, "(unknown compression opt %u)", opt.type); 746 if (opt.level) 747 prt_printf(out, ":%u", opt.level); 748 } 749 750 void bch2_opt_compression_to_text(struct printbuf *out, 751 struct bch_fs *c, 752 struct bch_sb *sb, 753 u64 v) 754 { 755 return bch2_compression_opt_to_text(out, v); 756 } 757 758 int bch2_opt_compression_validate(u64 v, struct printbuf *err) 759 { 760 if (!bch2_compression_opt_valid(v)) { 761 prt_printf(err, "invalid compression opt %llu", v); 762 return -BCH_ERR_invalid_sb_opt_compression; 763 } 764 765 return 0; 766 } 767