1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2017-2018 HUAWEI, Inc. 4 * https://www.huawei.com/ 5 * Copyright (C) 2021, Alibaba Cloud 6 */ 7 #include <linux/module.h> 8 #include <linux/buffer_head.h> 9 #include <linux/statfs.h> 10 #include <linux/parser.h> 11 #include <linux/seq_file.h> 12 #include <linux/crc32c.h> 13 #include <linux/fs_context.h> 14 #include <linux/fs_parser.h> 15 #include <linux/dax.h> 16 #include "xattr.h" 17 18 #define CREATE_TRACE_POINTS 19 #include <trace/events/erofs.h> 20 21 static struct kmem_cache *erofs_inode_cachep __read_mostly; 22 23 void _erofs_err(struct super_block *sb, const char *function, 24 const char *fmt, ...) 25 { 26 struct va_format vaf; 27 va_list args; 28 29 va_start(args, fmt); 30 31 vaf.fmt = fmt; 32 vaf.va = &args; 33 34 pr_err("(device %s): %s: %pV", sb->s_id, function, &vaf); 35 va_end(args); 36 } 37 38 void _erofs_info(struct super_block *sb, const char *function, 39 const char *fmt, ...) 40 { 41 struct va_format vaf; 42 va_list args; 43 44 va_start(args, fmt); 45 46 vaf.fmt = fmt; 47 vaf.va = &args; 48 49 pr_info("(device %s): %pV", sb->s_id, &vaf); 50 va_end(args); 51 } 52 53 static int erofs_superblock_csum_verify(struct super_block *sb, void *sbdata) 54 { 55 struct erofs_super_block *dsb; 56 u32 expected_crc, crc; 57 58 dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET, 59 EROFS_BLKSIZ - EROFS_SUPER_OFFSET, GFP_KERNEL); 60 if (!dsb) 61 return -ENOMEM; 62 63 expected_crc = le32_to_cpu(dsb->checksum); 64 dsb->checksum = 0; 65 /* to allow for x86 boot sectors and other oddities. */ 66 crc = crc32c(~0, dsb, EROFS_BLKSIZ - EROFS_SUPER_OFFSET); 67 kfree(dsb); 68 69 if (crc != expected_crc) { 70 erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected", 71 crc, expected_crc); 72 return -EBADMSG; 73 } 74 return 0; 75 } 76 77 static void erofs_inode_init_once(void *ptr) 78 { 79 struct erofs_inode *vi = ptr; 80 81 inode_init_once(&vi->vfs_inode); 82 } 83 84 static struct inode *erofs_alloc_inode(struct super_block *sb) 85 { 86 struct erofs_inode *vi = 87 alloc_inode_sb(sb, erofs_inode_cachep, GFP_KERNEL); 88 89 if (!vi) 90 return NULL; 91 92 /* zero out everything except vfs_inode */ 93 memset(vi, 0, offsetof(struct erofs_inode, vfs_inode)); 94 return &vi->vfs_inode; 95 } 96 97 static void erofs_free_inode(struct inode *inode) 98 { 99 struct erofs_inode *vi = EROFS_I(inode); 100 101 /* be careful of RCU symlink path */ 102 if (inode->i_op == &erofs_fast_symlink_iops) 103 kfree(inode->i_link); 104 kfree(vi->xattr_shared_xattrs); 105 106 kmem_cache_free(erofs_inode_cachep, vi); 107 } 108 109 static bool check_layout_compatibility(struct super_block *sb, 110 struct erofs_super_block *dsb) 111 { 112 const unsigned int feature = le32_to_cpu(dsb->feature_incompat); 113 114 EROFS_SB(sb)->feature_incompat = feature; 115 116 /* check if current kernel meets all mandatory requirements */ 117 if (feature & (~EROFS_ALL_FEATURE_INCOMPAT)) { 118 erofs_err(sb, 119 "unidentified incompatible feature %x, please upgrade kernel version", 120 feature & ~EROFS_ALL_FEATURE_INCOMPAT); 121 return false; 122 } 123 return true; 124 } 125 126 #ifdef CONFIG_EROFS_FS_ZIP 127 /* read variable-sized metadata, offset will be aligned by 4-byte */ 128 static void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf, 129 erofs_off_t *offset, int *lengthp) 130 { 131 u8 *buffer, *ptr; 132 int len, i, cnt; 133 134 *offset = round_up(*offset, 4); 135 ptr = erofs_read_metabuf(buf, sb, erofs_blknr(*offset), EROFS_KMAP); 136 if (IS_ERR(ptr)) 137 return ptr; 138 139 len = le16_to_cpu(*(__le16 *)&ptr[erofs_blkoff(*offset)]); 140 if (!len) 141 len = U16_MAX + 1; 142 buffer = kmalloc(len, GFP_KERNEL); 143 if (!buffer) 144 return ERR_PTR(-ENOMEM); 145 *offset += sizeof(__le16); 146 *lengthp = len; 147 148 for (i = 0; i < len; i += cnt) { 149 cnt = min(EROFS_BLKSIZ - (int)erofs_blkoff(*offset), len - i); 150 ptr = erofs_read_metabuf(buf, sb, erofs_blknr(*offset), 151 EROFS_KMAP); 152 if (IS_ERR(ptr)) { 153 kfree(buffer); 154 return ptr; 155 } 156 memcpy(buffer + i, ptr + erofs_blkoff(*offset), cnt); 157 *offset += cnt; 158 } 159 return buffer; 160 } 161 162 static int erofs_load_compr_cfgs(struct super_block *sb, 163 struct erofs_super_block *dsb) 164 { 165 struct erofs_sb_info *sbi = EROFS_SB(sb); 166 struct erofs_buf buf = __EROFS_BUF_INITIALIZER; 167 unsigned int algs, alg; 168 erofs_off_t offset; 169 int size, ret = 0; 170 171 sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs); 172 if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) { 173 erofs_err(sb, "try to load compressed fs with unsupported algorithms %x", 174 sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS); 175 return -EINVAL; 176 } 177 178 offset = EROFS_SUPER_OFFSET + sbi->sb_size; 179 alg = 0; 180 for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) { 181 void *data; 182 183 if (!(algs & 1)) 184 continue; 185 186 data = erofs_read_metadata(sb, &buf, &offset, &size); 187 if (IS_ERR(data)) { 188 ret = PTR_ERR(data); 189 break; 190 } 191 192 switch (alg) { 193 case Z_EROFS_COMPRESSION_LZ4: 194 ret = z_erofs_load_lz4_config(sb, dsb, data, size); 195 break; 196 case Z_EROFS_COMPRESSION_LZMA: 197 ret = z_erofs_load_lzma_config(sb, dsb, data, size); 198 break; 199 default: 200 DBG_BUGON(1); 201 ret = -EFAULT; 202 } 203 kfree(data); 204 if (ret) 205 break; 206 } 207 erofs_put_metabuf(&buf); 208 return ret; 209 } 210 #else 211 static int erofs_load_compr_cfgs(struct super_block *sb, 212 struct erofs_super_block *dsb) 213 { 214 if (dsb->u1.available_compr_algs) { 215 erofs_err(sb, "try to load compressed fs when compression is disabled"); 216 return -EINVAL; 217 } 218 return 0; 219 } 220 #endif 221 222 static int erofs_init_devices(struct super_block *sb, 223 struct erofs_super_block *dsb) 224 { 225 struct erofs_sb_info *sbi = EROFS_SB(sb); 226 unsigned int ondisk_extradevs; 227 erofs_off_t pos; 228 struct erofs_buf buf = __EROFS_BUF_INITIALIZER; 229 struct erofs_device_info *dif; 230 struct erofs_deviceslot *dis; 231 void *ptr; 232 int id, err = 0; 233 234 sbi->total_blocks = sbi->primarydevice_blocks; 235 if (!erofs_sb_has_device_table(sbi)) 236 ondisk_extradevs = 0; 237 else 238 ondisk_extradevs = le16_to_cpu(dsb->extra_devices); 239 240 if (ondisk_extradevs != sbi->devs->extra_devices) { 241 erofs_err(sb, "extra devices don't match (ondisk %u, given %u)", 242 ondisk_extradevs, sbi->devs->extra_devices); 243 return -EINVAL; 244 } 245 if (!ondisk_extradevs) 246 return 0; 247 248 sbi->device_id_mask = roundup_pow_of_two(ondisk_extradevs + 1) - 1; 249 pos = le16_to_cpu(dsb->devt_slotoff) * EROFS_DEVT_SLOT_SIZE; 250 down_read(&sbi->devs->rwsem); 251 idr_for_each_entry(&sbi->devs->tree, dif, id) { 252 struct block_device *bdev; 253 254 ptr = erofs_read_metabuf(&buf, sb, erofs_blknr(pos), 255 EROFS_KMAP); 256 if (IS_ERR(ptr)) { 257 err = PTR_ERR(ptr); 258 break; 259 } 260 dis = ptr + erofs_blkoff(pos); 261 262 bdev = blkdev_get_by_path(dif->path, 263 FMODE_READ | FMODE_EXCL, 264 sb->s_type); 265 if (IS_ERR(bdev)) { 266 err = PTR_ERR(bdev); 267 break; 268 } 269 dif->bdev = bdev; 270 dif->dax_dev = fs_dax_get_by_bdev(bdev, &dif->dax_part_off); 271 dif->blocks = le32_to_cpu(dis->blocks); 272 dif->mapped_blkaddr = le32_to_cpu(dis->mapped_blkaddr); 273 sbi->total_blocks += dif->blocks; 274 pos += EROFS_DEVT_SLOT_SIZE; 275 } 276 up_read(&sbi->devs->rwsem); 277 erofs_put_metabuf(&buf); 278 return err; 279 } 280 281 static int erofs_read_superblock(struct super_block *sb) 282 { 283 struct erofs_sb_info *sbi; 284 struct erofs_buf buf = __EROFS_BUF_INITIALIZER; 285 struct erofs_super_block *dsb; 286 unsigned int blkszbits; 287 void *data; 288 int ret; 289 290 data = erofs_read_metabuf(&buf, sb, 0, EROFS_KMAP); 291 if (IS_ERR(data)) { 292 erofs_err(sb, "cannot read erofs superblock"); 293 return PTR_ERR(data); 294 } 295 296 sbi = EROFS_SB(sb); 297 dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET); 298 299 ret = -EINVAL; 300 if (le32_to_cpu(dsb->magic) != EROFS_SUPER_MAGIC_V1) { 301 erofs_err(sb, "cannot find valid erofs superblock"); 302 goto out; 303 } 304 305 sbi->feature_compat = le32_to_cpu(dsb->feature_compat); 306 if (erofs_sb_has_sb_chksum(sbi)) { 307 ret = erofs_superblock_csum_verify(sb, data); 308 if (ret) 309 goto out; 310 } 311 312 ret = -EINVAL; 313 blkszbits = dsb->blkszbits; 314 /* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */ 315 if (blkszbits != LOG_BLOCK_SIZE) { 316 erofs_err(sb, "blkszbits %u isn't supported on this platform", 317 blkszbits); 318 goto out; 319 } 320 321 if (!check_layout_compatibility(sb, dsb)) 322 goto out; 323 324 sbi->sb_size = 128 + dsb->sb_extslots * EROFS_SB_EXTSLOT_SIZE; 325 if (sbi->sb_size > EROFS_BLKSIZ) { 326 erofs_err(sb, "invalid sb_extslots %u (more than a fs block)", 327 sbi->sb_size); 328 goto out; 329 } 330 sbi->primarydevice_blocks = le32_to_cpu(dsb->blocks); 331 sbi->meta_blkaddr = le32_to_cpu(dsb->meta_blkaddr); 332 #ifdef CONFIG_EROFS_FS_XATTR 333 sbi->xattr_blkaddr = le32_to_cpu(dsb->xattr_blkaddr); 334 #endif 335 sbi->islotbits = ilog2(sizeof(struct erofs_inode_compact)); 336 sbi->root_nid = le16_to_cpu(dsb->root_nid); 337 sbi->inos = le64_to_cpu(dsb->inos); 338 339 sbi->build_time = le64_to_cpu(dsb->build_time); 340 sbi->build_time_nsec = le32_to_cpu(dsb->build_time_nsec); 341 342 memcpy(&sb->s_uuid, dsb->uuid, sizeof(dsb->uuid)); 343 344 ret = strscpy(sbi->volume_name, dsb->volume_name, 345 sizeof(dsb->volume_name)); 346 if (ret < 0) { /* -E2BIG */ 347 erofs_err(sb, "bad volume name without NIL terminator"); 348 ret = -EFSCORRUPTED; 349 goto out; 350 } 351 352 /* parse on-disk compression configurations */ 353 if (erofs_sb_has_compr_cfgs(sbi)) 354 ret = erofs_load_compr_cfgs(sb, dsb); 355 else 356 ret = z_erofs_load_lz4_config(sb, dsb, NULL, 0); 357 if (ret < 0) 358 goto out; 359 360 /* handle multiple devices */ 361 ret = erofs_init_devices(sb, dsb); 362 363 if (erofs_sb_has_ztailpacking(sbi)) 364 erofs_info(sb, "EXPERIMENTAL compressed inline data feature in use. Use at your own risk!"); 365 out: 366 erofs_put_metabuf(&buf); 367 return ret; 368 } 369 370 /* set up default EROFS parameters */ 371 static void erofs_default_options(struct erofs_fs_context *ctx) 372 { 373 #ifdef CONFIG_EROFS_FS_ZIP 374 ctx->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND; 375 ctx->opt.max_sync_decompress_pages = 3; 376 ctx->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_AUTO; 377 #endif 378 #ifdef CONFIG_EROFS_FS_XATTR 379 set_opt(&ctx->opt, XATTR_USER); 380 #endif 381 #ifdef CONFIG_EROFS_FS_POSIX_ACL 382 set_opt(&ctx->opt, POSIX_ACL); 383 #endif 384 } 385 386 enum { 387 Opt_user_xattr, 388 Opt_acl, 389 Opt_cache_strategy, 390 Opt_dax, 391 Opt_dax_enum, 392 Opt_device, 393 Opt_err 394 }; 395 396 static const struct constant_table erofs_param_cache_strategy[] = { 397 {"disabled", EROFS_ZIP_CACHE_DISABLED}, 398 {"readahead", EROFS_ZIP_CACHE_READAHEAD}, 399 {"readaround", EROFS_ZIP_CACHE_READAROUND}, 400 {} 401 }; 402 403 static const struct constant_table erofs_dax_param_enums[] = { 404 {"always", EROFS_MOUNT_DAX_ALWAYS}, 405 {"never", EROFS_MOUNT_DAX_NEVER}, 406 {} 407 }; 408 409 static const struct fs_parameter_spec erofs_fs_parameters[] = { 410 fsparam_flag_no("user_xattr", Opt_user_xattr), 411 fsparam_flag_no("acl", Opt_acl), 412 fsparam_enum("cache_strategy", Opt_cache_strategy, 413 erofs_param_cache_strategy), 414 fsparam_flag("dax", Opt_dax), 415 fsparam_enum("dax", Opt_dax_enum, erofs_dax_param_enums), 416 fsparam_string("device", Opt_device), 417 {} 418 }; 419 420 static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode) 421 { 422 #ifdef CONFIG_FS_DAX 423 struct erofs_fs_context *ctx = fc->fs_private; 424 425 switch (mode) { 426 case EROFS_MOUNT_DAX_ALWAYS: 427 warnfc(fc, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); 428 set_opt(&ctx->opt, DAX_ALWAYS); 429 clear_opt(&ctx->opt, DAX_NEVER); 430 return true; 431 case EROFS_MOUNT_DAX_NEVER: 432 set_opt(&ctx->opt, DAX_NEVER); 433 clear_opt(&ctx->opt, DAX_ALWAYS); 434 return true; 435 default: 436 DBG_BUGON(1); 437 return false; 438 } 439 #else 440 errorfc(fc, "dax options not supported"); 441 return false; 442 #endif 443 } 444 445 static int erofs_fc_parse_param(struct fs_context *fc, 446 struct fs_parameter *param) 447 { 448 struct erofs_fs_context *ctx = fc->fs_private; 449 struct fs_parse_result result; 450 struct erofs_device_info *dif; 451 int opt, ret; 452 453 opt = fs_parse(fc, erofs_fs_parameters, param, &result); 454 if (opt < 0) 455 return opt; 456 457 switch (opt) { 458 case Opt_user_xattr: 459 #ifdef CONFIG_EROFS_FS_XATTR 460 if (result.boolean) 461 set_opt(&ctx->opt, XATTR_USER); 462 else 463 clear_opt(&ctx->opt, XATTR_USER); 464 #else 465 errorfc(fc, "{,no}user_xattr options not supported"); 466 #endif 467 break; 468 case Opt_acl: 469 #ifdef CONFIG_EROFS_FS_POSIX_ACL 470 if (result.boolean) 471 set_opt(&ctx->opt, POSIX_ACL); 472 else 473 clear_opt(&ctx->opt, POSIX_ACL); 474 #else 475 errorfc(fc, "{,no}acl options not supported"); 476 #endif 477 break; 478 case Opt_cache_strategy: 479 #ifdef CONFIG_EROFS_FS_ZIP 480 ctx->opt.cache_strategy = result.uint_32; 481 #else 482 errorfc(fc, "compression not supported, cache_strategy ignored"); 483 #endif 484 break; 485 case Opt_dax: 486 if (!erofs_fc_set_dax_mode(fc, EROFS_MOUNT_DAX_ALWAYS)) 487 return -EINVAL; 488 break; 489 case Opt_dax_enum: 490 if (!erofs_fc_set_dax_mode(fc, result.uint_32)) 491 return -EINVAL; 492 break; 493 case Opt_device: 494 dif = kzalloc(sizeof(*dif), GFP_KERNEL); 495 if (!dif) 496 return -ENOMEM; 497 dif->path = kstrdup(param->string, GFP_KERNEL); 498 if (!dif->path) { 499 kfree(dif); 500 return -ENOMEM; 501 } 502 down_write(&ctx->devs->rwsem); 503 ret = idr_alloc(&ctx->devs->tree, dif, 0, 0, GFP_KERNEL); 504 up_write(&ctx->devs->rwsem); 505 if (ret < 0) { 506 kfree(dif->path); 507 kfree(dif); 508 return ret; 509 } 510 ++ctx->devs->extra_devices; 511 break; 512 default: 513 return -ENOPARAM; 514 } 515 return 0; 516 } 517 518 #ifdef CONFIG_EROFS_FS_ZIP 519 static const struct address_space_operations managed_cache_aops; 520 521 static int erofs_managed_cache_releasepage(struct page *page, gfp_t gfp_mask) 522 { 523 int ret = 1; /* 0 - busy */ 524 struct address_space *const mapping = page->mapping; 525 526 DBG_BUGON(!PageLocked(page)); 527 DBG_BUGON(mapping->a_ops != &managed_cache_aops); 528 529 if (PagePrivate(page)) 530 ret = erofs_try_to_free_cached_page(page); 531 532 return ret; 533 } 534 535 /* 536 * It will be called only on inode eviction. In case that there are still some 537 * decompression requests in progress, wait with rescheduling for a bit here. 538 * We could introduce an extra locking instead but it seems unnecessary. 539 */ 540 static void erofs_managed_cache_invalidate_folio(struct folio *folio, 541 size_t offset, size_t length) 542 { 543 const size_t stop = length + offset; 544 545 DBG_BUGON(!folio_test_locked(folio)); 546 547 /* Check for potential overflow in debug mode */ 548 DBG_BUGON(stop > folio_size(folio) || stop < length); 549 550 if (offset == 0 && stop == folio_size(folio)) 551 while (!erofs_managed_cache_releasepage(&folio->page, GFP_NOFS)) 552 cond_resched(); 553 } 554 555 static const struct address_space_operations managed_cache_aops = { 556 .releasepage = erofs_managed_cache_releasepage, 557 .invalidate_folio = erofs_managed_cache_invalidate_folio, 558 }; 559 560 static int erofs_init_managed_cache(struct super_block *sb) 561 { 562 struct erofs_sb_info *const sbi = EROFS_SB(sb); 563 struct inode *const inode = new_inode(sb); 564 565 if (!inode) 566 return -ENOMEM; 567 568 set_nlink(inode, 1); 569 inode->i_size = OFFSET_MAX; 570 571 inode->i_mapping->a_ops = &managed_cache_aops; 572 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); 573 sbi->managed_cache = inode; 574 return 0; 575 } 576 #else 577 static int erofs_init_managed_cache(struct super_block *sb) { return 0; } 578 #endif 579 580 static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) 581 { 582 struct inode *inode; 583 struct erofs_sb_info *sbi; 584 struct erofs_fs_context *ctx = fc->fs_private; 585 int err; 586 587 sb->s_magic = EROFS_SUPER_MAGIC; 588 589 if (!sb_set_blocksize(sb, EROFS_BLKSIZ)) { 590 erofs_err(sb, "failed to set erofs blksize"); 591 return -EINVAL; 592 } 593 594 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); 595 if (!sbi) 596 return -ENOMEM; 597 598 sb->s_fs_info = sbi; 599 sbi->opt = ctx->opt; 600 sbi->dax_dev = fs_dax_get_by_bdev(sb->s_bdev, &sbi->dax_part_off); 601 sbi->devs = ctx->devs; 602 ctx->devs = NULL; 603 604 err = erofs_read_superblock(sb); 605 if (err) 606 return err; 607 608 if (test_opt(&sbi->opt, DAX_ALWAYS)) { 609 BUILD_BUG_ON(EROFS_BLKSIZ != PAGE_SIZE); 610 611 if (!sbi->dax_dev) { 612 errorfc(fc, "DAX unsupported by block device. Turning off DAX."); 613 clear_opt(&sbi->opt, DAX_ALWAYS); 614 } 615 } 616 sb->s_flags |= SB_RDONLY | SB_NOATIME; 617 sb->s_maxbytes = MAX_LFS_FILESIZE; 618 sb->s_time_gran = 1; 619 620 sb->s_op = &erofs_sops; 621 sb->s_xattr = erofs_xattr_handlers; 622 623 if (test_opt(&sbi->opt, POSIX_ACL)) 624 sb->s_flags |= SB_POSIXACL; 625 else 626 sb->s_flags &= ~SB_POSIXACL; 627 628 #ifdef CONFIG_EROFS_FS_ZIP 629 xa_init(&sbi->managed_pslots); 630 #endif 631 632 /* get the root inode */ 633 inode = erofs_iget(sb, ROOT_NID(sbi), true); 634 if (IS_ERR(inode)) 635 return PTR_ERR(inode); 636 637 if (!S_ISDIR(inode->i_mode)) { 638 erofs_err(sb, "rootino(nid %llu) is not a directory(i_mode %o)", 639 ROOT_NID(sbi), inode->i_mode); 640 iput(inode); 641 return -EINVAL; 642 } 643 644 sb->s_root = d_make_root(inode); 645 if (!sb->s_root) 646 return -ENOMEM; 647 648 erofs_shrinker_register(sb); 649 /* sb->s_umount is already locked, SB_ACTIVE and SB_BORN are not set */ 650 err = erofs_init_managed_cache(sb); 651 if (err) 652 return err; 653 654 err = erofs_register_sysfs(sb); 655 if (err) 656 return err; 657 658 erofs_info(sb, "mounted with root inode @ nid %llu.", ROOT_NID(sbi)); 659 return 0; 660 } 661 662 static int erofs_fc_get_tree(struct fs_context *fc) 663 { 664 return get_tree_bdev(fc, erofs_fc_fill_super); 665 } 666 667 static int erofs_fc_reconfigure(struct fs_context *fc) 668 { 669 struct super_block *sb = fc->root->d_sb; 670 struct erofs_sb_info *sbi = EROFS_SB(sb); 671 struct erofs_fs_context *ctx = fc->fs_private; 672 673 DBG_BUGON(!sb_rdonly(sb)); 674 675 if (test_opt(&ctx->opt, POSIX_ACL)) 676 fc->sb_flags |= SB_POSIXACL; 677 else 678 fc->sb_flags &= ~SB_POSIXACL; 679 680 sbi->opt = ctx->opt; 681 682 fc->sb_flags |= SB_RDONLY; 683 return 0; 684 } 685 686 static int erofs_release_device_info(int id, void *ptr, void *data) 687 { 688 struct erofs_device_info *dif = ptr; 689 690 fs_put_dax(dif->dax_dev); 691 if (dif->bdev) 692 blkdev_put(dif->bdev, FMODE_READ | FMODE_EXCL); 693 kfree(dif->path); 694 kfree(dif); 695 return 0; 696 } 697 698 static void erofs_free_dev_context(struct erofs_dev_context *devs) 699 { 700 if (!devs) 701 return; 702 idr_for_each(&devs->tree, &erofs_release_device_info, NULL); 703 idr_destroy(&devs->tree); 704 kfree(devs); 705 } 706 707 static void erofs_fc_free(struct fs_context *fc) 708 { 709 struct erofs_fs_context *ctx = fc->fs_private; 710 711 erofs_free_dev_context(ctx->devs); 712 kfree(ctx); 713 } 714 715 static const struct fs_context_operations erofs_context_ops = { 716 .parse_param = erofs_fc_parse_param, 717 .get_tree = erofs_fc_get_tree, 718 .reconfigure = erofs_fc_reconfigure, 719 .free = erofs_fc_free, 720 }; 721 722 static int erofs_init_fs_context(struct fs_context *fc) 723 { 724 struct erofs_fs_context *ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 725 726 if (!ctx) 727 return -ENOMEM; 728 ctx->devs = kzalloc(sizeof(struct erofs_dev_context), GFP_KERNEL); 729 if (!ctx->devs) { 730 kfree(ctx); 731 return -ENOMEM; 732 } 733 fc->fs_private = ctx; 734 735 idr_init(&ctx->devs->tree); 736 init_rwsem(&ctx->devs->rwsem); 737 erofs_default_options(ctx); 738 fc->ops = &erofs_context_ops; 739 return 0; 740 } 741 742 /* 743 * could be triggered after deactivate_locked_super() 744 * is called, thus including umount and failed to initialize. 745 */ 746 static void erofs_kill_sb(struct super_block *sb) 747 { 748 struct erofs_sb_info *sbi; 749 750 WARN_ON(sb->s_magic != EROFS_SUPER_MAGIC); 751 752 kill_block_super(sb); 753 754 sbi = EROFS_SB(sb); 755 if (!sbi) 756 return; 757 758 erofs_free_dev_context(sbi->devs); 759 fs_put_dax(sbi->dax_dev); 760 kfree(sbi); 761 sb->s_fs_info = NULL; 762 } 763 764 /* called when ->s_root is non-NULL */ 765 static void erofs_put_super(struct super_block *sb) 766 { 767 struct erofs_sb_info *const sbi = EROFS_SB(sb); 768 769 DBG_BUGON(!sbi); 770 771 erofs_unregister_sysfs(sb); 772 erofs_shrinker_unregister(sb); 773 #ifdef CONFIG_EROFS_FS_ZIP 774 iput(sbi->managed_cache); 775 sbi->managed_cache = NULL; 776 #endif 777 } 778 779 static struct file_system_type erofs_fs_type = { 780 .owner = THIS_MODULE, 781 .name = "erofs", 782 .init_fs_context = erofs_init_fs_context, 783 .kill_sb = erofs_kill_sb, 784 .fs_flags = FS_REQUIRES_DEV, 785 }; 786 MODULE_ALIAS_FS("erofs"); 787 788 static int __init erofs_module_init(void) 789 { 790 int err; 791 792 erofs_check_ondisk_layout_definitions(); 793 794 erofs_inode_cachep = kmem_cache_create("erofs_inode", 795 sizeof(struct erofs_inode), 0, 796 SLAB_RECLAIM_ACCOUNT, 797 erofs_inode_init_once); 798 if (!erofs_inode_cachep) { 799 err = -ENOMEM; 800 goto icache_err; 801 } 802 803 err = erofs_init_shrinker(); 804 if (err) 805 goto shrinker_err; 806 807 err = z_erofs_lzma_init(); 808 if (err) 809 goto lzma_err; 810 811 erofs_pcpubuf_init(); 812 err = z_erofs_init_zip_subsystem(); 813 if (err) 814 goto zip_err; 815 816 err = erofs_init_sysfs(); 817 if (err) 818 goto sysfs_err; 819 820 err = register_filesystem(&erofs_fs_type); 821 if (err) 822 goto fs_err; 823 824 return 0; 825 826 fs_err: 827 erofs_exit_sysfs(); 828 sysfs_err: 829 z_erofs_exit_zip_subsystem(); 830 zip_err: 831 z_erofs_lzma_exit(); 832 lzma_err: 833 erofs_exit_shrinker(); 834 shrinker_err: 835 kmem_cache_destroy(erofs_inode_cachep); 836 icache_err: 837 return err; 838 } 839 840 static void __exit erofs_module_exit(void) 841 { 842 unregister_filesystem(&erofs_fs_type); 843 844 /* Ensure all RCU free inodes / pclusters are safe to be destroyed. */ 845 rcu_barrier(); 846 847 erofs_exit_sysfs(); 848 z_erofs_exit_zip_subsystem(); 849 z_erofs_lzma_exit(); 850 erofs_exit_shrinker(); 851 kmem_cache_destroy(erofs_inode_cachep); 852 erofs_pcpubuf_exit(); 853 } 854 855 /* get filesystem statistics */ 856 static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf) 857 { 858 struct super_block *sb = dentry->d_sb; 859 struct erofs_sb_info *sbi = EROFS_SB(sb); 860 u64 id = huge_encode_dev(sb->s_bdev->bd_dev); 861 862 buf->f_type = sb->s_magic; 863 buf->f_bsize = EROFS_BLKSIZ; 864 buf->f_blocks = sbi->total_blocks; 865 buf->f_bfree = buf->f_bavail = 0; 866 867 buf->f_files = ULLONG_MAX; 868 buf->f_ffree = ULLONG_MAX - sbi->inos; 869 870 buf->f_namelen = EROFS_NAME_LEN; 871 872 buf->f_fsid = u64_to_fsid(id); 873 return 0; 874 } 875 876 static int erofs_show_options(struct seq_file *seq, struct dentry *root) 877 { 878 struct erofs_sb_info *sbi = EROFS_SB(root->d_sb); 879 struct erofs_mount_opts *opt = &sbi->opt; 880 881 #ifdef CONFIG_EROFS_FS_XATTR 882 if (test_opt(opt, XATTR_USER)) 883 seq_puts(seq, ",user_xattr"); 884 else 885 seq_puts(seq, ",nouser_xattr"); 886 #endif 887 #ifdef CONFIG_EROFS_FS_POSIX_ACL 888 if (test_opt(opt, POSIX_ACL)) 889 seq_puts(seq, ",acl"); 890 else 891 seq_puts(seq, ",noacl"); 892 #endif 893 #ifdef CONFIG_EROFS_FS_ZIP 894 if (opt->cache_strategy == EROFS_ZIP_CACHE_DISABLED) 895 seq_puts(seq, ",cache_strategy=disabled"); 896 else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAHEAD) 897 seq_puts(seq, ",cache_strategy=readahead"); 898 else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAROUND) 899 seq_puts(seq, ",cache_strategy=readaround"); 900 #endif 901 if (test_opt(opt, DAX_ALWAYS)) 902 seq_puts(seq, ",dax=always"); 903 if (test_opt(opt, DAX_NEVER)) 904 seq_puts(seq, ",dax=never"); 905 return 0; 906 } 907 908 const struct super_operations erofs_sops = { 909 .put_super = erofs_put_super, 910 .alloc_inode = erofs_alloc_inode, 911 .free_inode = erofs_free_inode, 912 .statfs = erofs_statfs, 913 .show_options = erofs_show_options, 914 }; 915 916 module_init(erofs_module_init); 917 module_exit(erofs_module_exit); 918 919 MODULE_DESCRIPTION("Enhanced ROM File System"); 920 MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc."); 921 MODULE_LICENSE("GPL"); 922