1 // SPDX-License-Identifier: GPL-2.0 2 #include "bcachefs.h" 3 #include "checksum.h" 4 #include "compress.h" 5 #include "error.h" 6 #include "extents.h" 7 #include "io_write.h" 8 #include "opts.h" 9 #include "super-io.h" 10 11 #include <linux/lz4.h> 12 #include <linux/zlib.h> 13 #include <linux/zstd.h> 14 15 static inline enum bch_compression_opts bch2_compression_type_to_opt(enum bch_compression_type type) 16 { 17 switch (type) { 18 case BCH_COMPRESSION_TYPE_none: 19 case BCH_COMPRESSION_TYPE_incompressible: 20 return BCH_COMPRESSION_OPT_none; 21 case BCH_COMPRESSION_TYPE_lz4_old: 22 case BCH_COMPRESSION_TYPE_lz4: 23 return BCH_COMPRESSION_OPT_lz4; 24 case BCH_COMPRESSION_TYPE_gzip: 25 return BCH_COMPRESSION_OPT_gzip; 26 case BCH_COMPRESSION_TYPE_zstd: 27 return BCH_COMPRESSION_OPT_zstd; 28 default: 29 BUG(); 30 } 31 } 32 33 /* Bounce buffer: */ 34 struct bbuf { 35 void *b; 36 enum { 37 BB_NONE, 38 BB_VMAP, 39 BB_KMALLOC, 40 BB_MEMPOOL, 41 } type; 42 int rw; 43 }; 44 45 static struct bbuf __bounce_alloc(struct bch_fs *c, unsigned size, int rw) 46 { 47 void *b; 48 49 BUG_ON(size > c->opts.encoded_extent_max); 50 51 b = kmalloc(size, GFP_NOFS|__GFP_NOWARN); 52 if (b) 53 return (struct bbuf) { .b = b, .type = BB_KMALLOC, .rw = rw }; 54 55 b = mempool_alloc(&c->compression_bounce[rw], GFP_NOFS); 56 if (b) 57 return (struct bbuf) { .b = b, .type = BB_MEMPOOL, .rw = rw }; 58 59 BUG(); 60 } 61 62 static bool bio_phys_contig(struct bio *bio, struct bvec_iter start) 63 { 64 struct bio_vec bv; 65 struct bvec_iter iter; 66 void *expected_start = NULL; 67 68 __bio_for_each_bvec(bv, bio, iter, start) { 69 if (expected_start && 70 expected_start != page_address(bv.bv_page) + bv.bv_offset) 71 return false; 72 73 expected_start = page_address(bv.bv_page) + 74 bv.bv_offset + bv.bv_len; 75 } 76 77 return true; 78 } 79 80 static struct bbuf __bio_map_or_bounce(struct bch_fs *c, struct bio *bio, 81 struct bvec_iter start, int rw) 82 { 83 struct bbuf ret; 84 struct bio_vec bv; 85 struct bvec_iter iter; 86 unsigned nr_pages = 0; 87 struct page *stack_pages[16]; 88 struct page **pages = NULL; 89 void *data; 90 91 BUG_ON(start.bi_size > c->opts.encoded_extent_max); 92 93 if (!PageHighMem(bio_iter_page(bio, start)) && 94 bio_phys_contig(bio, start)) 95 return (struct bbuf) { 96 .b = page_address(bio_iter_page(bio, start)) + 97 bio_iter_offset(bio, start), 98 .type = BB_NONE, .rw = rw 99 }; 100 101 /* check if we can map the pages contiguously: */ 102 __bio_for_each_segment(bv, bio, iter, start) { 103 if (iter.bi_size != start.bi_size && 104 bv.bv_offset) 105 goto bounce; 106 107 if (bv.bv_len < iter.bi_size && 108 bv.bv_offset + bv.bv_len < PAGE_SIZE) 109 goto bounce; 110 111 nr_pages++; 112 } 113 114 BUG_ON(DIV_ROUND_UP(start.bi_size, PAGE_SIZE) > nr_pages); 115 116 pages = nr_pages > ARRAY_SIZE(stack_pages) 117 ? kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS) 118 : stack_pages; 119 if (!pages) 120 goto bounce; 121 122 nr_pages = 0; 123 __bio_for_each_segment(bv, bio, iter, start) 124 pages[nr_pages++] = bv.bv_page; 125 126 data = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL); 127 if (pages != stack_pages) 128 kfree(pages); 129 130 if (data) 131 return (struct bbuf) { 132 .b = data + bio_iter_offset(bio, start), 133 .type = BB_VMAP, .rw = rw 134 }; 135 bounce: 136 ret = __bounce_alloc(c, start.bi_size, rw); 137 138 if (rw == READ) 139 memcpy_from_bio(ret.b, bio, start); 140 141 return ret; 142 } 143 144 static struct bbuf bio_map_or_bounce(struct bch_fs *c, struct bio *bio, int rw) 145 { 146 return __bio_map_or_bounce(c, bio, bio->bi_iter, rw); 147 } 148 149 static void bio_unmap_or_unbounce(struct bch_fs *c, struct bbuf buf) 150 { 151 switch (buf.type) { 152 case BB_NONE: 153 break; 154 case BB_VMAP: 155 vunmap((void *) ((unsigned long) buf.b & PAGE_MASK)); 156 break; 157 case BB_KMALLOC: 158 kfree(buf.b); 159 break; 160 case BB_MEMPOOL: 161 mempool_free(buf.b, &c->compression_bounce[buf.rw]); 162 break; 163 } 164 } 165 166 static inline void zlib_set_workspace(z_stream *strm, void *workspace) 167 { 168 #ifdef __KERNEL__ 169 strm->workspace = workspace; 170 #endif 171 } 172 173 static int __bio_uncompress(struct bch_fs *c, struct bio *src, 174 void *dst_data, struct bch_extent_crc_unpacked crc) 175 { 176 struct bbuf src_data = { NULL }; 177 size_t src_len = src->bi_iter.bi_size; 178 size_t dst_len = crc.uncompressed_size << 9; 179 void *workspace; 180 int ret = 0, ret2; 181 182 enum bch_compression_opts opt = bch2_compression_type_to_opt(crc.compression_type); 183 mempool_t *workspace_pool = &c->compress_workspace[opt]; 184 if (unlikely(!mempool_initialized(workspace_pool))) { 185 if (fsck_err(c, compression_type_not_marked_in_sb, 186 "compression type %s set but not marked in superblock", 187 __bch2_compression_types[crc.compression_type])) 188 ret = bch2_check_set_has_compressed_data(c, opt); 189 else 190 ret = -BCH_ERR_compression_workspace_not_initialized; 191 if (ret) 192 goto err; 193 } 194 195 src_data = bio_map_or_bounce(c, src, READ); 196 197 switch (crc.compression_type) { 198 case BCH_COMPRESSION_TYPE_lz4_old: 199 case BCH_COMPRESSION_TYPE_lz4: 200 ret2 = LZ4_decompress_safe_partial(src_data.b, dst_data, 201 src_len, dst_len, dst_len); 202 if (ret2 != dst_len) 203 ret = -BCH_ERR_decompress_lz4; 204 break; 205 case BCH_COMPRESSION_TYPE_gzip: { 206 z_stream strm = { 207 .next_in = src_data.b, 208 .avail_in = src_len, 209 .next_out = dst_data, 210 .avail_out = dst_len, 211 }; 212 213 workspace = mempool_alloc(workspace_pool, GFP_NOFS); 214 215 zlib_set_workspace(&strm, workspace); 216 zlib_inflateInit2(&strm, -MAX_WBITS); 217 ret2 = zlib_inflate(&strm, Z_FINISH); 218 219 mempool_free(workspace, workspace_pool); 220 221 if (ret2 != Z_STREAM_END) 222 ret = -BCH_ERR_decompress_gzip; 223 break; 224 } 225 case BCH_COMPRESSION_TYPE_zstd: { 226 ZSTD_DCtx *ctx; 227 size_t real_src_len = le32_to_cpup(src_data.b); 228 229 if (real_src_len > src_len - 4) { 230 ret = -BCH_ERR_decompress_zstd_src_len_bad; 231 goto err; 232 } 233 234 workspace = mempool_alloc(workspace_pool, GFP_NOFS); 235 ctx = zstd_init_dctx(workspace, zstd_dctx_workspace_bound()); 236 237 ret2 = zstd_decompress_dctx(ctx, 238 dst_data, dst_len, 239 src_data.b + 4, real_src_len); 240 241 mempool_free(workspace, workspace_pool); 242 243 if (ret2 != dst_len) 244 ret = -BCH_ERR_decompress_zstd; 245 break; 246 } 247 default: 248 BUG(); 249 } 250 err: 251 fsck_err: 252 bio_unmap_or_unbounce(c, src_data); 253 return ret; 254 } 255 256 int bch2_bio_uncompress_inplace(struct bch_write_op *op, 257 struct bio *bio) 258 { 259 struct bch_fs *c = op->c; 260 struct bch_extent_crc_unpacked *crc = &op->crc; 261 struct bbuf data = { NULL }; 262 size_t dst_len = crc->uncompressed_size << 9; 263 int ret = 0; 264 265 /* bio must own its pages: */ 266 BUG_ON(!bio->bi_vcnt); 267 BUG_ON(DIV_ROUND_UP(crc->live_size, PAGE_SECTORS) > bio->bi_max_vecs); 268 269 if (crc->uncompressed_size << 9 > c->opts.encoded_extent_max) { 270 bch2_write_op_error(op, op->pos.offset, 271 "extent too big to decompress (%u > %u)", 272 crc->uncompressed_size << 9, c->opts.encoded_extent_max); 273 return -BCH_ERR_decompress_exceeded_max_encoded_extent; 274 } 275 276 data = __bounce_alloc(c, dst_len, WRITE); 277 278 ret = __bio_uncompress(c, bio, data.b, *crc); 279 280 if (c->opts.no_data_io) 281 ret = 0; 282 283 if (ret) { 284 bch2_write_op_error(op, op->pos.offset, "%s", bch2_err_str(ret)); 285 goto err; 286 } 287 288 /* 289 * XXX: don't have a good way to assert that the bio was allocated with 290 * enough space, we depend on bch2_move_extent doing the right thing 291 */ 292 bio->bi_iter.bi_size = crc->live_size << 9; 293 294 memcpy_to_bio(bio, bio->bi_iter, data.b + (crc->offset << 9)); 295 296 crc->csum_type = 0; 297 crc->compression_type = 0; 298 crc->compressed_size = crc->live_size; 299 crc->uncompressed_size = crc->live_size; 300 crc->offset = 0; 301 crc->csum = (struct bch_csum) { 0, 0 }; 302 err: 303 bio_unmap_or_unbounce(c, data); 304 return ret; 305 } 306 307 int bch2_bio_uncompress(struct bch_fs *c, struct bio *src, 308 struct bio *dst, struct bvec_iter dst_iter, 309 struct bch_extent_crc_unpacked crc) 310 { 311 struct bbuf dst_data = { NULL }; 312 size_t dst_len = crc.uncompressed_size << 9; 313 int ret; 314 315 if (crc.uncompressed_size << 9 > c->opts.encoded_extent_max || 316 crc.compressed_size << 9 > c->opts.encoded_extent_max) 317 return -BCH_ERR_decompress_exceeded_max_encoded_extent; 318 319 dst_data = dst_len == dst_iter.bi_size 320 ? __bio_map_or_bounce(c, dst, dst_iter, WRITE) 321 : __bounce_alloc(c, dst_len, WRITE); 322 323 ret = __bio_uncompress(c, src, dst_data.b, crc); 324 if (ret) 325 goto err; 326 327 if (dst_data.type != BB_NONE && 328 dst_data.type != BB_VMAP) 329 memcpy_to_bio(dst, dst_iter, dst_data.b + (crc.offset << 9)); 330 err: 331 bio_unmap_or_unbounce(c, dst_data); 332 return ret; 333 } 334 335 static int attempt_compress(struct bch_fs *c, 336 void *workspace, 337 void *dst, size_t dst_len, 338 void *src, size_t src_len, 339 struct bch_compression_opt compression) 340 { 341 enum bch_compression_type compression_type = 342 __bch2_compression_opt_to_type[compression.type]; 343 344 switch (compression_type) { 345 case BCH_COMPRESSION_TYPE_lz4: 346 if (compression.level < LZ4HC_MIN_CLEVEL) { 347 int len = src_len; 348 int ret = LZ4_compress_destSize( 349 src, dst, 350 &len, dst_len, 351 workspace); 352 if (len < src_len) 353 return -len; 354 355 return ret; 356 } else { 357 int ret = LZ4_compress_HC( 358 src, dst, 359 src_len, dst_len, 360 compression.level, 361 workspace); 362 363 return ret ?: -1; 364 } 365 case BCH_COMPRESSION_TYPE_gzip: { 366 z_stream strm = { 367 .next_in = src, 368 .avail_in = src_len, 369 .next_out = dst, 370 .avail_out = dst_len, 371 }; 372 373 zlib_set_workspace(&strm, workspace); 374 if (zlib_deflateInit2(&strm, 375 compression.level 376 ? clamp_t(unsigned, compression.level, 377 Z_BEST_SPEED, Z_BEST_COMPRESSION) 378 : Z_DEFAULT_COMPRESSION, 379 Z_DEFLATED, -MAX_WBITS, DEF_MEM_LEVEL, 380 Z_DEFAULT_STRATEGY) != Z_OK) 381 return 0; 382 383 if (zlib_deflate(&strm, Z_FINISH) != Z_STREAM_END) 384 return 0; 385 386 if (zlib_deflateEnd(&strm) != Z_OK) 387 return 0; 388 389 return strm.total_out; 390 } 391 case BCH_COMPRESSION_TYPE_zstd: { 392 /* 393 * rescale: 394 * zstd max compression level is 22, our max level is 15 395 */ 396 unsigned level = min((compression.level * 3) / 2, zstd_max_clevel()); 397 ZSTD_parameters params = zstd_get_params(level, c->opts.encoded_extent_max); 398 ZSTD_CCtx *ctx = zstd_init_cctx(workspace, c->zstd_workspace_size); 399 400 /* 401 * ZSTD requires that when we decompress we pass in the exact 402 * compressed size - rounding it up to the nearest sector 403 * doesn't work, so we use the first 4 bytes of the buffer for 404 * that. 405 * 406 * Additionally, the ZSTD code seems to have a bug where it will 407 * write just past the end of the buffer - so subtract a fudge 408 * factor (7 bytes) from the dst buffer size to account for 409 * that. 410 */ 411 size_t len = zstd_compress_cctx(ctx, 412 dst + 4, dst_len - 4 - 7, 413 src, src_len, 414 ¶ms); 415 if (zstd_is_error(len)) 416 return 0; 417 418 *((__le32 *) dst) = cpu_to_le32(len); 419 return len + 4; 420 } 421 default: 422 BUG(); 423 } 424 } 425 426 static unsigned __bio_compress(struct bch_fs *c, 427 struct bio *dst, size_t *dst_len, 428 struct bio *src, size_t *src_len, 429 struct bch_compression_opt compression) 430 { 431 struct bbuf src_data = { NULL }, dst_data = { NULL }; 432 void *workspace; 433 enum bch_compression_type compression_type = 434 __bch2_compression_opt_to_type[compression.type]; 435 unsigned pad; 436 int ret = 0; 437 438 /* bch2_compression_decode catches unknown compression types: */ 439 BUG_ON(compression.type >= BCH_COMPRESSION_OPT_NR); 440 441 mempool_t *workspace_pool = &c->compress_workspace[compression.type]; 442 if (unlikely(!mempool_initialized(workspace_pool))) { 443 if (fsck_err(c, compression_opt_not_marked_in_sb, 444 "compression opt %s set but not marked in superblock", 445 bch2_compression_opts[compression.type])) { 446 ret = bch2_check_set_has_compressed_data(c, compression.type); 447 if (ret) /* memory allocation failure, don't compress */ 448 return 0; 449 } else { 450 return 0; 451 } 452 } 453 454 /* If it's only one block, don't bother trying to compress: */ 455 if (src->bi_iter.bi_size <= c->opts.block_size) 456 return BCH_COMPRESSION_TYPE_incompressible; 457 458 dst_data = bio_map_or_bounce(c, dst, WRITE); 459 src_data = bio_map_or_bounce(c, src, READ); 460 461 workspace = mempool_alloc(workspace_pool, GFP_NOFS); 462 463 *src_len = src->bi_iter.bi_size; 464 *dst_len = dst->bi_iter.bi_size; 465 466 /* 467 * XXX: this algorithm sucks when the compression code doesn't tell us 468 * how much would fit, like LZ4 does: 469 */ 470 while (1) { 471 if (*src_len <= block_bytes(c)) { 472 ret = -1; 473 break; 474 } 475 476 ret = attempt_compress(c, workspace, 477 dst_data.b, *dst_len, 478 src_data.b, *src_len, 479 compression); 480 if (ret > 0) { 481 *dst_len = ret; 482 ret = 0; 483 break; 484 } 485 486 /* Didn't fit: should we retry with a smaller amount? */ 487 if (*src_len <= *dst_len) { 488 ret = -1; 489 break; 490 } 491 492 /* 493 * If ret is negative, it's a hint as to how much data would fit 494 */ 495 BUG_ON(-ret >= *src_len); 496 497 if (ret < 0) 498 *src_len = -ret; 499 else 500 *src_len -= (*src_len - *dst_len) / 2; 501 *src_len = round_down(*src_len, block_bytes(c)); 502 } 503 504 mempool_free(workspace, workspace_pool); 505 506 if (ret) 507 goto err; 508 509 /* Didn't get smaller: */ 510 if (round_up(*dst_len, block_bytes(c)) >= *src_len) 511 goto err; 512 513 pad = round_up(*dst_len, block_bytes(c)) - *dst_len; 514 515 memset(dst_data.b + *dst_len, 0, pad); 516 *dst_len += pad; 517 518 if (dst_data.type != BB_NONE && 519 dst_data.type != BB_VMAP) 520 memcpy_to_bio(dst, dst->bi_iter, dst_data.b); 521 522 BUG_ON(!*dst_len || *dst_len > dst->bi_iter.bi_size); 523 BUG_ON(!*src_len || *src_len > src->bi_iter.bi_size); 524 BUG_ON(*dst_len & (block_bytes(c) - 1)); 525 BUG_ON(*src_len & (block_bytes(c) - 1)); 526 ret = compression_type; 527 out: 528 bio_unmap_or_unbounce(c, src_data); 529 bio_unmap_or_unbounce(c, dst_data); 530 return ret; 531 err: 532 ret = BCH_COMPRESSION_TYPE_incompressible; 533 goto out; 534 fsck_err: 535 ret = 0; 536 goto out; 537 } 538 539 unsigned bch2_bio_compress(struct bch_fs *c, 540 struct bio *dst, size_t *dst_len, 541 struct bio *src, size_t *src_len, 542 unsigned compression_opt) 543 { 544 unsigned orig_dst = dst->bi_iter.bi_size; 545 unsigned orig_src = src->bi_iter.bi_size; 546 unsigned compression_type; 547 548 /* Don't consume more than BCH_ENCODED_EXTENT_MAX from @src: */ 549 src->bi_iter.bi_size = min_t(unsigned, src->bi_iter.bi_size, 550 c->opts.encoded_extent_max); 551 /* Don't generate a bigger output than input: */ 552 dst->bi_iter.bi_size = min(dst->bi_iter.bi_size, src->bi_iter.bi_size); 553 554 compression_type = 555 __bio_compress(c, dst, dst_len, src, src_len, 556 bch2_compression_decode(compression_opt)); 557 558 dst->bi_iter.bi_size = orig_dst; 559 src->bi_iter.bi_size = orig_src; 560 return compression_type; 561 } 562 563 static int __bch2_fs_compress_init(struct bch_fs *, u64); 564 565 #define BCH_FEATURE_none 0 566 567 static const unsigned bch2_compression_opt_to_feature[] = { 568 #define x(t, n) [BCH_COMPRESSION_OPT_##t] = BCH_FEATURE_##t, 569 BCH_COMPRESSION_OPTS() 570 #undef x 571 }; 572 573 #undef BCH_FEATURE_none 574 575 static int __bch2_check_set_has_compressed_data(struct bch_fs *c, u64 f) 576 { 577 int ret = 0; 578 579 if ((c->sb.features & f) == f) 580 return 0; 581 582 mutex_lock(&c->sb_lock); 583 584 if ((c->sb.features & f) == f) { 585 mutex_unlock(&c->sb_lock); 586 return 0; 587 } 588 589 ret = __bch2_fs_compress_init(c, c->sb.features|f); 590 if (ret) { 591 mutex_unlock(&c->sb_lock); 592 return ret; 593 } 594 595 c->disk_sb.sb->features[0] |= cpu_to_le64(f); 596 bch2_write_super(c); 597 mutex_unlock(&c->sb_lock); 598 599 return 0; 600 } 601 602 int bch2_check_set_has_compressed_data(struct bch_fs *c, 603 unsigned compression_opt) 604 { 605 unsigned compression_type = bch2_compression_decode(compression_opt).type; 606 607 BUG_ON(compression_type >= ARRAY_SIZE(bch2_compression_opt_to_feature)); 608 609 return compression_type 610 ? __bch2_check_set_has_compressed_data(c, 611 1ULL << bch2_compression_opt_to_feature[compression_type]) 612 : 0; 613 } 614 615 void bch2_fs_compress_exit(struct bch_fs *c) 616 { 617 unsigned i; 618 619 for (i = 0; i < ARRAY_SIZE(c->compress_workspace); i++) 620 mempool_exit(&c->compress_workspace[i]); 621 mempool_exit(&c->compression_bounce[WRITE]); 622 mempool_exit(&c->compression_bounce[READ]); 623 } 624 625 static int __bch2_fs_compress_init(struct bch_fs *c, u64 features) 626 { 627 ZSTD_parameters params = zstd_get_params(zstd_max_clevel(), 628 c->opts.encoded_extent_max); 629 630 c->zstd_workspace_size = zstd_cctx_workspace_bound(¶ms.cParams); 631 632 struct { 633 unsigned feature; 634 enum bch_compression_opts type; 635 size_t compress_workspace; 636 } compression_types[] = { 637 { BCH_FEATURE_lz4, BCH_COMPRESSION_OPT_lz4, 638 max_t(size_t, LZ4_MEM_COMPRESS, LZ4HC_MEM_COMPRESS) }, 639 { BCH_FEATURE_gzip, BCH_COMPRESSION_OPT_gzip, 640 max(zlib_deflate_workspacesize(MAX_WBITS, DEF_MEM_LEVEL), 641 zlib_inflate_workspacesize()) }, 642 { BCH_FEATURE_zstd, BCH_COMPRESSION_OPT_zstd, 643 max(c->zstd_workspace_size, 644 zstd_dctx_workspace_bound()) }, 645 }, *i; 646 bool have_compressed = false; 647 648 for (i = compression_types; 649 i < compression_types + ARRAY_SIZE(compression_types); 650 i++) 651 have_compressed |= (features & (1 << i->feature)) != 0; 652 653 if (!have_compressed) 654 return 0; 655 656 if (!mempool_initialized(&c->compression_bounce[READ]) && 657 mempool_init_kvmalloc_pool(&c->compression_bounce[READ], 658 1, c->opts.encoded_extent_max)) 659 return -BCH_ERR_ENOMEM_compression_bounce_read_init; 660 661 if (!mempool_initialized(&c->compression_bounce[WRITE]) && 662 mempool_init_kvmalloc_pool(&c->compression_bounce[WRITE], 663 1, c->opts.encoded_extent_max)) 664 return -BCH_ERR_ENOMEM_compression_bounce_write_init; 665 666 for (i = compression_types; 667 i < compression_types + ARRAY_SIZE(compression_types); 668 i++) { 669 if (!(features & (1 << i->feature))) 670 continue; 671 672 if (mempool_initialized(&c->compress_workspace[i->type])) 673 continue; 674 675 if (mempool_init_kvmalloc_pool( 676 &c->compress_workspace[i->type], 677 1, i->compress_workspace)) 678 return -BCH_ERR_ENOMEM_compression_workspace_init; 679 } 680 681 return 0; 682 } 683 684 static u64 compression_opt_to_feature(unsigned v) 685 { 686 unsigned type = bch2_compression_decode(v).type; 687 688 return BIT_ULL(bch2_compression_opt_to_feature[type]); 689 } 690 691 int bch2_fs_compress_init(struct bch_fs *c) 692 { 693 u64 f = c->sb.features; 694 695 f |= compression_opt_to_feature(c->opts.compression); 696 f |= compression_opt_to_feature(c->opts.background_compression); 697 698 return __bch2_fs_compress_init(c, f); 699 } 700 701 int bch2_opt_compression_parse(struct bch_fs *c, const char *_val, u64 *res, 702 struct printbuf *err) 703 { 704 char *val = kstrdup(_val, GFP_KERNEL); 705 char *p = val, *type_str, *level_str; 706 struct bch_compression_opt opt = { 0 }; 707 int ret; 708 709 if (!val) 710 return -ENOMEM; 711 712 type_str = strsep(&p, ":"); 713 level_str = p; 714 715 ret = match_string(bch2_compression_opts, -1, type_str); 716 if (ret < 0 && err) 717 prt_str(err, "invalid compression type"); 718 if (ret < 0) 719 goto err; 720 721 opt.type = ret; 722 723 if (level_str) { 724 unsigned level; 725 726 ret = kstrtouint(level_str, 10, &level); 727 if (!ret && !opt.type && level) 728 ret = -EINVAL; 729 if (!ret && level > 15) 730 ret = -EINVAL; 731 if (ret < 0 && err) 732 prt_str(err, "invalid compression level"); 733 if (ret < 0) 734 goto err; 735 736 opt.level = level; 737 } 738 739 *res = bch2_compression_encode(opt); 740 err: 741 kfree(val); 742 return ret; 743 } 744 745 void bch2_compression_opt_to_text(struct printbuf *out, u64 v) 746 { 747 struct bch_compression_opt opt = bch2_compression_decode(v); 748 749 if (opt.type < BCH_COMPRESSION_OPT_NR) 750 prt_str(out, bch2_compression_opts[opt.type]); 751 else 752 prt_printf(out, "(unknown compression opt %u)", opt.type); 753 if (opt.level) 754 prt_printf(out, ":%u", opt.level); 755 } 756 757 void bch2_opt_compression_to_text(struct printbuf *out, 758 struct bch_fs *c, 759 struct bch_sb *sb, 760 u64 v) 761 { 762 return bch2_compression_opt_to_text(out, v); 763 } 764 765 int bch2_opt_compression_validate(u64 v, struct printbuf *err) 766 { 767 if (!bch2_compression_opt_valid(v)) { 768 prt_printf(err, "invalid compression opt %llu", v); 769 return -BCH_ERR_invalid_sb_opt_compression; 770 } 771 772 return 0; 773 } 774