1 // SPDX-License-Identifier: GPL-2.0 2 #include "bcachefs.h" 3 #include "checksum.h" 4 #include "compress.h" 5 #include "extents.h" 6 #include "super-io.h" 7 8 #include <linux/lz4.h> 9 #include <linux/zlib.h> 10 #include <linux/zstd.h> 11 12 /* Bounce buffer: */ 13 struct bbuf { 14 void *b; 15 enum { 16 BB_NONE, 17 BB_VMAP, 18 BB_KMALLOC, 19 BB_MEMPOOL, 20 } type; 21 int rw; 22 }; 23 24 static struct bbuf __bounce_alloc(struct bch_fs *c, unsigned size, int rw) 25 { 26 void *b; 27 28 BUG_ON(size > c->opts.encoded_extent_max); 29 30 b = kmalloc(size, GFP_NOFS|__GFP_NOWARN); 31 if (b) 32 return (struct bbuf) { .b = b, .type = BB_KMALLOC, .rw = rw }; 33 34 b = mempool_alloc(&c->compression_bounce[rw], GFP_NOFS); 35 if (b) 36 return (struct bbuf) { .b = b, .type = BB_MEMPOOL, .rw = rw }; 37 38 BUG(); 39 } 40 41 static bool bio_phys_contig(struct bio *bio, struct bvec_iter start) 42 { 43 struct bio_vec bv; 44 struct bvec_iter iter; 45 void *expected_start = NULL; 46 47 __bio_for_each_bvec(bv, bio, iter, start) { 48 if (expected_start && 49 expected_start != page_address(bv.bv_page) + bv.bv_offset) 50 return false; 51 52 expected_start = page_address(bv.bv_page) + 53 bv.bv_offset + bv.bv_len; 54 } 55 56 return true; 57 } 58 59 static struct bbuf __bio_map_or_bounce(struct bch_fs *c, struct bio *bio, 60 struct bvec_iter start, int rw) 61 { 62 struct bbuf ret; 63 struct bio_vec bv; 64 struct bvec_iter iter; 65 unsigned nr_pages = 0; 66 struct page *stack_pages[16]; 67 struct page **pages = NULL; 68 void *data; 69 70 BUG_ON(start.bi_size > c->opts.encoded_extent_max); 71 72 if (!PageHighMem(bio_iter_page(bio, start)) && 73 bio_phys_contig(bio, start)) 74 return (struct bbuf) { 75 .b = page_address(bio_iter_page(bio, start)) + 76 bio_iter_offset(bio, start), 77 .type = BB_NONE, .rw = rw 78 }; 79 80 /* check if we can map the pages contiguously: */ 81 __bio_for_each_segment(bv, bio, iter, start) { 82 if (iter.bi_size != start.bi_size && 83 bv.bv_offset) 84 goto bounce; 85 86 if (bv.bv_len < iter.bi_size && 87 bv.bv_offset + bv.bv_len < PAGE_SIZE) 88 goto bounce; 89 90 nr_pages++; 91 } 92 93 BUG_ON(DIV_ROUND_UP(start.bi_size, PAGE_SIZE) > nr_pages); 94 95 pages = nr_pages > ARRAY_SIZE(stack_pages) 96 ? kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS) 97 : stack_pages; 98 if (!pages) 99 goto bounce; 100 101 nr_pages = 0; 102 __bio_for_each_segment(bv, bio, iter, start) 103 pages[nr_pages++] = bv.bv_page; 104 105 data = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL); 106 if (pages != stack_pages) 107 kfree(pages); 108 109 if (data) 110 return (struct bbuf) { 111 .b = data + bio_iter_offset(bio, start), 112 .type = BB_VMAP, .rw = rw 113 }; 114 bounce: 115 ret = __bounce_alloc(c, start.bi_size, rw); 116 117 if (rw == READ) 118 memcpy_from_bio(ret.b, bio, start); 119 120 return ret; 121 } 122 123 static struct bbuf bio_map_or_bounce(struct bch_fs *c, struct bio *bio, int rw) 124 { 125 return __bio_map_or_bounce(c, bio, bio->bi_iter, rw); 126 } 127 128 static void bio_unmap_or_unbounce(struct bch_fs *c, struct bbuf buf) 129 { 130 switch (buf.type) { 131 case BB_NONE: 132 break; 133 case BB_VMAP: 134 vunmap((void *) ((unsigned long) buf.b & PAGE_MASK)); 135 break; 136 case BB_KMALLOC: 137 kfree(buf.b); 138 break; 139 case BB_MEMPOOL: 140 mempool_free(buf.b, &c->compression_bounce[buf.rw]); 141 break; 142 } 143 } 144 145 static inline void zlib_set_workspace(z_stream *strm, void *workspace) 146 { 147 #ifdef __KERNEL__ 148 strm->workspace = workspace; 149 #endif 150 } 151 152 static int __bio_uncompress(struct bch_fs *c, struct bio *src, 153 void *dst_data, struct bch_extent_crc_unpacked crc) 154 { 155 struct bbuf src_data = { NULL }; 156 size_t src_len = src->bi_iter.bi_size; 157 size_t dst_len = crc.uncompressed_size << 9; 158 void *workspace; 159 int ret; 160 161 src_data = bio_map_or_bounce(c, src, READ); 162 163 switch (crc.compression_type) { 164 case BCH_COMPRESSION_TYPE_lz4_old: 165 case BCH_COMPRESSION_TYPE_lz4: 166 ret = LZ4_decompress_safe_partial(src_data.b, dst_data, 167 src_len, dst_len, dst_len); 168 if (ret != dst_len) 169 goto err; 170 break; 171 case BCH_COMPRESSION_TYPE_gzip: { 172 z_stream strm = { 173 .next_in = src_data.b, 174 .avail_in = src_len, 175 .next_out = dst_data, 176 .avail_out = dst_len, 177 }; 178 179 workspace = mempool_alloc(&c->decompress_workspace, GFP_NOFS); 180 181 zlib_set_workspace(&strm, workspace); 182 zlib_inflateInit2(&strm, -MAX_WBITS); 183 ret = zlib_inflate(&strm, Z_FINISH); 184 185 mempool_free(workspace, &c->decompress_workspace); 186 187 if (ret != Z_STREAM_END) 188 goto err; 189 break; 190 } 191 case BCH_COMPRESSION_TYPE_zstd: { 192 ZSTD_DCtx *ctx; 193 size_t real_src_len = le32_to_cpup(src_data.b); 194 195 if (real_src_len > src_len - 4) 196 goto err; 197 198 workspace = mempool_alloc(&c->decompress_workspace, GFP_NOFS); 199 ctx = zstd_init_dctx(workspace, zstd_dctx_workspace_bound()); 200 201 ret = zstd_decompress_dctx(ctx, 202 dst_data, dst_len, 203 src_data.b + 4, real_src_len); 204 205 mempool_free(workspace, &c->decompress_workspace); 206 207 if (ret != dst_len) 208 goto err; 209 break; 210 } 211 default: 212 BUG(); 213 } 214 ret = 0; 215 out: 216 bio_unmap_or_unbounce(c, src_data); 217 return ret; 218 err: 219 ret = -EIO; 220 goto out; 221 } 222 223 int bch2_bio_uncompress_inplace(struct bch_fs *c, struct bio *bio, 224 struct bch_extent_crc_unpacked *crc) 225 { 226 struct bbuf data = { NULL }; 227 size_t dst_len = crc->uncompressed_size << 9; 228 229 /* bio must own its pages: */ 230 BUG_ON(!bio->bi_vcnt); 231 BUG_ON(DIV_ROUND_UP(crc->live_size, PAGE_SECTORS) > bio->bi_max_vecs); 232 233 if (crc->uncompressed_size << 9 > c->opts.encoded_extent_max || 234 crc->compressed_size << 9 > c->opts.encoded_extent_max) { 235 bch_err(c, "error rewriting existing data: extent too big"); 236 return -EIO; 237 } 238 239 data = __bounce_alloc(c, dst_len, WRITE); 240 241 if (__bio_uncompress(c, bio, data.b, *crc)) { 242 if (!c->opts.no_data_io) 243 bch_err(c, "error rewriting existing data: decompression error"); 244 bio_unmap_or_unbounce(c, data); 245 return -EIO; 246 } 247 248 /* 249 * XXX: don't have a good way to assert that the bio was allocated with 250 * enough space, we depend on bch2_move_extent doing the right thing 251 */ 252 bio->bi_iter.bi_size = crc->live_size << 9; 253 254 memcpy_to_bio(bio, bio->bi_iter, data.b + (crc->offset << 9)); 255 256 crc->csum_type = 0; 257 crc->compression_type = 0; 258 crc->compressed_size = crc->live_size; 259 crc->uncompressed_size = crc->live_size; 260 crc->offset = 0; 261 crc->csum = (struct bch_csum) { 0, 0 }; 262 263 bio_unmap_or_unbounce(c, data); 264 return 0; 265 } 266 267 int bch2_bio_uncompress(struct bch_fs *c, struct bio *src, 268 struct bio *dst, struct bvec_iter dst_iter, 269 struct bch_extent_crc_unpacked crc) 270 { 271 struct bbuf dst_data = { NULL }; 272 size_t dst_len = crc.uncompressed_size << 9; 273 int ret; 274 275 if (crc.uncompressed_size << 9 > c->opts.encoded_extent_max || 276 crc.compressed_size << 9 > c->opts.encoded_extent_max) 277 return -EIO; 278 279 dst_data = dst_len == dst_iter.bi_size 280 ? __bio_map_or_bounce(c, dst, dst_iter, WRITE) 281 : __bounce_alloc(c, dst_len, WRITE); 282 283 ret = __bio_uncompress(c, src, dst_data.b, crc); 284 if (ret) 285 goto err; 286 287 if (dst_data.type != BB_NONE && 288 dst_data.type != BB_VMAP) 289 memcpy_to_bio(dst, dst_iter, dst_data.b + (crc.offset << 9)); 290 err: 291 bio_unmap_or_unbounce(c, dst_data); 292 return ret; 293 } 294 295 static int attempt_compress(struct bch_fs *c, 296 void *workspace, 297 void *dst, size_t dst_len, 298 void *src, size_t src_len, 299 struct bch_compression_opt compression) 300 { 301 enum bch_compression_type compression_type = 302 __bch2_compression_opt_to_type[compression.type]; 303 304 switch (compression_type) { 305 case BCH_COMPRESSION_TYPE_lz4: 306 if (compression.level < LZ4HC_MIN_CLEVEL) { 307 int len = src_len; 308 int ret = LZ4_compress_destSize( 309 src, dst, 310 &len, dst_len, 311 workspace); 312 if (len < src_len) 313 return -len; 314 315 return ret; 316 } else { 317 int ret = LZ4_compress_HC( 318 src, dst, 319 src_len, dst_len, 320 compression.level, 321 workspace); 322 323 return ret ?: -1; 324 } 325 case BCH_COMPRESSION_TYPE_gzip: { 326 z_stream strm = { 327 .next_in = src, 328 .avail_in = src_len, 329 .next_out = dst, 330 .avail_out = dst_len, 331 }; 332 333 zlib_set_workspace(&strm, workspace); 334 zlib_deflateInit2(&strm, 335 compression.level 336 ? clamp_t(unsigned, compression.level, 337 Z_BEST_SPEED, Z_BEST_COMPRESSION) 338 : Z_DEFAULT_COMPRESSION, 339 Z_DEFLATED, -MAX_WBITS, DEF_MEM_LEVEL, 340 Z_DEFAULT_STRATEGY); 341 342 if (zlib_deflate(&strm, Z_FINISH) != Z_STREAM_END) 343 return 0; 344 345 if (zlib_deflateEnd(&strm) != Z_OK) 346 return 0; 347 348 return strm.total_out; 349 } 350 case BCH_COMPRESSION_TYPE_zstd: { 351 /* 352 * rescale: 353 * zstd max compression level is 22, our max level is 15 354 */ 355 unsigned level = min((compression.level * 3) / 2, zstd_max_clevel()); 356 ZSTD_parameters params = zstd_get_params(level, c->opts.encoded_extent_max); 357 ZSTD_CCtx *ctx = zstd_init_cctx(workspace, 358 zstd_cctx_workspace_bound(¶ms.cParams)); 359 360 /* 361 * ZSTD requires that when we decompress we pass in the exact 362 * compressed size - rounding it up to the nearest sector 363 * doesn't work, so we use the first 4 bytes of the buffer for 364 * that. 365 * 366 * Additionally, the ZSTD code seems to have a bug where it will 367 * write just past the end of the buffer - so subtract a fudge 368 * factor (7 bytes) from the dst buffer size to account for 369 * that. 370 */ 371 size_t len = zstd_compress_cctx(ctx, 372 dst + 4, dst_len - 4 - 7, 373 src, src_len, 374 &c->zstd_params); 375 if (zstd_is_error(len)) 376 return 0; 377 378 *((__le32 *) dst) = cpu_to_le32(len); 379 return len + 4; 380 } 381 default: 382 BUG(); 383 } 384 } 385 386 static unsigned __bio_compress(struct bch_fs *c, 387 struct bio *dst, size_t *dst_len, 388 struct bio *src, size_t *src_len, 389 struct bch_compression_opt compression) 390 { 391 struct bbuf src_data = { NULL }, dst_data = { NULL }; 392 void *workspace; 393 enum bch_compression_type compression_type = 394 __bch2_compression_opt_to_type[compression.type]; 395 unsigned pad; 396 int ret = 0; 397 398 BUG_ON(compression_type >= BCH_COMPRESSION_TYPE_NR); 399 BUG_ON(!mempool_initialized(&c->compress_workspace[compression_type])); 400 401 /* If it's only one block, don't bother trying to compress: */ 402 if (src->bi_iter.bi_size <= c->opts.block_size) 403 return BCH_COMPRESSION_TYPE_incompressible; 404 405 dst_data = bio_map_or_bounce(c, dst, WRITE); 406 src_data = bio_map_or_bounce(c, src, READ); 407 408 workspace = mempool_alloc(&c->compress_workspace[compression_type], GFP_NOFS); 409 410 *src_len = src->bi_iter.bi_size; 411 *dst_len = dst->bi_iter.bi_size; 412 413 /* 414 * XXX: this algorithm sucks when the compression code doesn't tell us 415 * how much would fit, like LZ4 does: 416 */ 417 while (1) { 418 if (*src_len <= block_bytes(c)) { 419 ret = -1; 420 break; 421 } 422 423 ret = attempt_compress(c, workspace, 424 dst_data.b, *dst_len, 425 src_data.b, *src_len, 426 compression); 427 if (ret > 0) { 428 *dst_len = ret; 429 ret = 0; 430 break; 431 } 432 433 /* Didn't fit: should we retry with a smaller amount? */ 434 if (*src_len <= *dst_len) { 435 ret = -1; 436 break; 437 } 438 439 /* 440 * If ret is negative, it's a hint as to how much data would fit 441 */ 442 BUG_ON(-ret >= *src_len); 443 444 if (ret < 0) 445 *src_len = -ret; 446 else 447 *src_len -= (*src_len - *dst_len) / 2; 448 *src_len = round_down(*src_len, block_bytes(c)); 449 } 450 451 mempool_free(workspace, &c->compress_workspace[compression_type]); 452 453 if (ret) 454 goto err; 455 456 /* Didn't get smaller: */ 457 if (round_up(*dst_len, block_bytes(c)) >= *src_len) 458 goto err; 459 460 pad = round_up(*dst_len, block_bytes(c)) - *dst_len; 461 462 memset(dst_data.b + *dst_len, 0, pad); 463 *dst_len += pad; 464 465 if (dst_data.type != BB_NONE && 466 dst_data.type != BB_VMAP) 467 memcpy_to_bio(dst, dst->bi_iter, dst_data.b); 468 469 BUG_ON(!*dst_len || *dst_len > dst->bi_iter.bi_size); 470 BUG_ON(!*src_len || *src_len > src->bi_iter.bi_size); 471 BUG_ON(*dst_len & (block_bytes(c) - 1)); 472 BUG_ON(*src_len & (block_bytes(c) - 1)); 473 ret = compression_type; 474 out: 475 bio_unmap_or_unbounce(c, src_data); 476 bio_unmap_or_unbounce(c, dst_data); 477 return ret; 478 err: 479 ret = BCH_COMPRESSION_TYPE_incompressible; 480 goto out; 481 } 482 483 unsigned bch2_bio_compress(struct bch_fs *c, 484 struct bio *dst, size_t *dst_len, 485 struct bio *src, size_t *src_len, 486 unsigned compression_opt) 487 { 488 unsigned orig_dst = dst->bi_iter.bi_size; 489 unsigned orig_src = src->bi_iter.bi_size; 490 unsigned compression_type; 491 492 /* Don't consume more than BCH_ENCODED_EXTENT_MAX from @src: */ 493 src->bi_iter.bi_size = min_t(unsigned, src->bi_iter.bi_size, 494 c->opts.encoded_extent_max); 495 /* Don't generate a bigger output than input: */ 496 dst->bi_iter.bi_size = min(dst->bi_iter.bi_size, src->bi_iter.bi_size); 497 498 compression_type = 499 __bio_compress(c, dst, dst_len, src, src_len, 500 bch2_compression_decode(compression_opt)); 501 502 dst->bi_iter.bi_size = orig_dst; 503 src->bi_iter.bi_size = orig_src; 504 return compression_type; 505 } 506 507 static int __bch2_fs_compress_init(struct bch_fs *, u64); 508 509 #define BCH_FEATURE_none 0 510 511 static const unsigned bch2_compression_opt_to_feature[] = { 512 #define x(t, n) [BCH_COMPRESSION_OPT_##t] = BCH_FEATURE_##t, 513 BCH_COMPRESSION_OPTS() 514 #undef x 515 }; 516 517 #undef BCH_FEATURE_none 518 519 static int __bch2_check_set_has_compressed_data(struct bch_fs *c, u64 f) 520 { 521 int ret = 0; 522 523 if ((c->sb.features & f) == f) 524 return 0; 525 526 mutex_lock(&c->sb_lock); 527 528 if ((c->sb.features & f) == f) { 529 mutex_unlock(&c->sb_lock); 530 return 0; 531 } 532 533 ret = __bch2_fs_compress_init(c, c->sb.features|f); 534 if (ret) { 535 mutex_unlock(&c->sb_lock); 536 return ret; 537 } 538 539 c->disk_sb.sb->features[0] |= cpu_to_le64(f); 540 bch2_write_super(c); 541 mutex_unlock(&c->sb_lock); 542 543 return 0; 544 } 545 546 int bch2_check_set_has_compressed_data(struct bch_fs *c, 547 unsigned compression_opt) 548 { 549 unsigned compression_type = bch2_compression_decode(compression_opt).type; 550 551 BUG_ON(compression_type >= ARRAY_SIZE(bch2_compression_opt_to_feature)); 552 553 return compression_type 554 ? __bch2_check_set_has_compressed_data(c, 555 1ULL << bch2_compression_opt_to_feature[compression_type]) 556 : 0; 557 } 558 559 void bch2_fs_compress_exit(struct bch_fs *c) 560 { 561 unsigned i; 562 563 mempool_exit(&c->decompress_workspace); 564 for (i = 0; i < ARRAY_SIZE(c->compress_workspace); i++) 565 mempool_exit(&c->compress_workspace[i]); 566 mempool_exit(&c->compression_bounce[WRITE]); 567 mempool_exit(&c->compression_bounce[READ]); 568 } 569 570 static int __bch2_fs_compress_init(struct bch_fs *c, u64 features) 571 { 572 size_t decompress_workspace_size = 0; 573 ZSTD_parameters params = zstd_get_params(zstd_max_clevel(), 574 c->opts.encoded_extent_max); 575 struct { 576 unsigned feature; 577 enum bch_compression_type type; 578 size_t compress_workspace; 579 size_t decompress_workspace; 580 } compression_types[] = { 581 { BCH_FEATURE_lz4, BCH_COMPRESSION_TYPE_lz4, 582 max_t(size_t, LZ4_MEM_COMPRESS, LZ4HC_MEM_COMPRESS), 583 0 }, 584 { BCH_FEATURE_gzip, BCH_COMPRESSION_TYPE_gzip, 585 zlib_deflate_workspacesize(MAX_WBITS, DEF_MEM_LEVEL), 586 zlib_inflate_workspacesize(), }, 587 { BCH_FEATURE_zstd, BCH_COMPRESSION_TYPE_zstd, 588 zstd_cctx_workspace_bound(¶ms.cParams), 589 zstd_dctx_workspace_bound() }, 590 }, *i; 591 bool have_compressed = false; 592 593 c->zstd_params = params; 594 595 for (i = compression_types; 596 i < compression_types + ARRAY_SIZE(compression_types); 597 i++) 598 have_compressed |= (features & (1 << i->feature)) != 0; 599 600 if (!have_compressed) 601 return 0; 602 603 if (!mempool_initialized(&c->compression_bounce[READ]) && 604 mempool_init_kvpmalloc_pool(&c->compression_bounce[READ], 605 1, c->opts.encoded_extent_max)) 606 return -BCH_ERR_ENOMEM_compression_bounce_read_init; 607 608 if (!mempool_initialized(&c->compression_bounce[WRITE]) && 609 mempool_init_kvpmalloc_pool(&c->compression_bounce[WRITE], 610 1, c->opts.encoded_extent_max)) 611 return -BCH_ERR_ENOMEM_compression_bounce_write_init; 612 613 for (i = compression_types; 614 i < compression_types + ARRAY_SIZE(compression_types); 615 i++) { 616 decompress_workspace_size = 617 max(decompress_workspace_size, i->decompress_workspace); 618 619 if (!(features & (1 << i->feature))) 620 continue; 621 622 if (mempool_initialized(&c->compress_workspace[i->type])) 623 continue; 624 625 if (mempool_init_kvpmalloc_pool( 626 &c->compress_workspace[i->type], 627 1, i->compress_workspace)) 628 return -BCH_ERR_ENOMEM_compression_workspace_init; 629 } 630 631 if (!mempool_initialized(&c->decompress_workspace) && 632 mempool_init_kvpmalloc_pool(&c->decompress_workspace, 633 1, decompress_workspace_size)) 634 return -BCH_ERR_ENOMEM_decompression_workspace_init; 635 636 return 0; 637 } 638 639 static u64 compression_opt_to_feature(unsigned v) 640 { 641 unsigned type = bch2_compression_decode(v).type; 642 643 return BIT_ULL(bch2_compression_opt_to_feature[type]); 644 } 645 646 int bch2_fs_compress_init(struct bch_fs *c) 647 { 648 u64 f = c->sb.features; 649 650 f |= compression_opt_to_feature(c->opts.compression); 651 f |= compression_opt_to_feature(c->opts.background_compression); 652 653 return __bch2_fs_compress_init(c, f); 654 } 655 656 int bch2_opt_compression_parse(struct bch_fs *c, const char *_val, u64 *res, 657 struct printbuf *err) 658 { 659 char *val = kstrdup(_val, GFP_KERNEL); 660 char *p = val, *type_str, *level_str; 661 struct bch_compression_opt opt = { 0 }; 662 int ret; 663 664 if (!val) 665 return -ENOMEM; 666 667 type_str = strsep(&p, ":"); 668 level_str = p; 669 670 ret = match_string(bch2_compression_opts, -1, type_str); 671 if (ret < 0 && err) 672 prt_str(err, "invalid compression type"); 673 if (ret < 0) 674 goto err; 675 676 opt.type = ret; 677 678 if (level_str) { 679 unsigned level; 680 681 ret = kstrtouint(level_str, 10, &level); 682 if (!ret && !opt.type && level) 683 ret = -EINVAL; 684 if (!ret && level > 15) 685 ret = -EINVAL; 686 if (ret < 0 && err) 687 prt_str(err, "invalid compression level"); 688 if (ret < 0) 689 goto err; 690 691 opt.level = level; 692 } 693 694 *res = bch2_compression_encode(opt); 695 err: 696 kfree(val); 697 return ret; 698 } 699 700 void bch2_compression_opt_to_text(struct printbuf *out, u64 v) 701 { 702 struct bch_compression_opt opt = bch2_compression_decode(v); 703 704 if (opt.type < BCH_COMPRESSION_OPT_NR) 705 prt_str(out, bch2_compression_opts[opt.type]); 706 else 707 prt_printf(out, "(unknown compression opt %u)", opt.type); 708 if (opt.level) 709 prt_printf(out, ":%u", opt.level); 710 } 711 712 void bch2_opt_compression_to_text(struct printbuf *out, 713 struct bch_fs *c, 714 struct bch_sb *sb, 715 u64 v) 716 { 717 return bch2_compression_opt_to_text(out, v); 718 } 719 720 int bch2_opt_compression_validate(u64 v, struct printbuf *err) 721 { 722 if (!bch2_compression_opt_valid(v)) { 723 prt_printf(err, "invalid compression opt %llu", v); 724 return -BCH_ERR_invalid_sb_opt_compression; 725 } 726 727 return 0; 728 } 729