1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2017-2018 HUAWEI, Inc. 4 * https://www.huawei.com/ 5 * Copyright (C) 2021, Alibaba Cloud 6 */ 7 #include <linux/statfs.h> 8 #include <linux/seq_file.h> 9 #include <linux/crc32c.h> 10 #include <linux/fs_context.h> 11 #include <linux/fs_parser.h> 12 #include <linux/exportfs.h> 13 #include "xattr.h" 14 15 #define CREATE_TRACE_POINTS 16 #include <trace/events/erofs.h> 17 18 static struct kmem_cache *erofs_inode_cachep __read_mostly; 19 20 void _erofs_err(struct super_block *sb, const char *func, const char *fmt, ...) 21 { 22 struct va_format vaf; 23 va_list args; 24 25 va_start(args, fmt); 26 27 vaf.fmt = fmt; 28 vaf.va = &args; 29 30 if (sb) 31 pr_err("(device %s): %s: %pV", sb->s_id, func, &vaf); 32 else 33 pr_err("%s: %pV", func, &vaf); 34 va_end(args); 35 } 36 37 void _erofs_info(struct super_block *sb, const char *func, const char *fmt, ...) 38 { 39 struct va_format vaf; 40 va_list args; 41 42 va_start(args, fmt); 43 44 vaf.fmt = fmt; 45 vaf.va = &args; 46 47 if (sb) 48 pr_info("(device %s): %pV", sb->s_id, &vaf); 49 else 50 pr_info("%pV", &vaf); 51 va_end(args); 52 } 53 54 static int erofs_superblock_csum_verify(struct super_block *sb, void *sbdata) 55 { 56 size_t len = 1 << EROFS_SB(sb)->blkszbits; 57 struct erofs_super_block *dsb; 58 u32 expected_crc, crc; 59 60 if (len > EROFS_SUPER_OFFSET) 61 len -= EROFS_SUPER_OFFSET; 62 63 dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET, len, GFP_KERNEL); 64 if (!dsb) 65 return -ENOMEM; 66 67 expected_crc = le32_to_cpu(dsb->checksum); 68 dsb->checksum = 0; 69 /* to allow for x86 boot sectors and other oddities. */ 70 crc = crc32c(~0, dsb, len); 71 kfree(dsb); 72 73 if (crc != expected_crc) { 74 erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected", 75 crc, expected_crc); 76 return -EBADMSG; 77 } 78 return 0; 79 } 80 81 static void erofs_inode_init_once(void *ptr) 82 { 83 struct erofs_inode *vi = ptr; 84 85 inode_init_once(&vi->vfs_inode); 86 } 87 88 static struct inode *erofs_alloc_inode(struct super_block *sb) 89 { 90 struct erofs_inode *vi = 91 alloc_inode_sb(sb, erofs_inode_cachep, GFP_KERNEL); 92 93 if (!vi) 94 return NULL; 95 96 /* zero out everything except vfs_inode */ 97 memset(vi, 0, offsetof(struct erofs_inode, vfs_inode)); 98 return &vi->vfs_inode; 99 } 100 101 static void erofs_free_inode(struct inode *inode) 102 { 103 struct erofs_inode *vi = EROFS_I(inode); 104 105 if (inode->i_op == &erofs_fast_symlink_iops) 106 kfree(inode->i_link); 107 kfree(vi->xattr_shared_xattrs); 108 kmem_cache_free(erofs_inode_cachep, vi); 109 } 110 111 static bool check_layout_compatibility(struct super_block *sb, 112 struct erofs_super_block *dsb) 113 { 114 const unsigned int feature = le32_to_cpu(dsb->feature_incompat); 115 116 EROFS_SB(sb)->feature_incompat = feature; 117 118 /* check if current kernel meets all mandatory requirements */ 119 if (feature & (~EROFS_ALL_FEATURE_INCOMPAT)) { 120 erofs_err(sb, "unidentified incompatible feature %x, please upgrade kernel", 121 feature & ~EROFS_ALL_FEATURE_INCOMPAT); 122 return false; 123 } 124 return true; 125 } 126 127 /* read variable-sized metadata, offset will be aligned by 4-byte */ 128 void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf, 129 erofs_off_t *offset, int *lengthp) 130 { 131 u8 *buffer, *ptr; 132 int len, i, cnt; 133 134 *offset = round_up(*offset, 4); 135 ptr = erofs_bread(buf, *offset, EROFS_KMAP); 136 if (IS_ERR(ptr)) 137 return ptr; 138 139 len = le16_to_cpu(*(__le16 *)ptr); 140 if (!len) 141 len = U16_MAX + 1; 142 buffer = kmalloc(len, GFP_KERNEL); 143 if (!buffer) 144 return ERR_PTR(-ENOMEM); 145 *offset += sizeof(__le16); 146 *lengthp = len; 147 148 for (i = 0; i < len; i += cnt) { 149 cnt = min_t(int, sb->s_blocksize - erofs_blkoff(sb, *offset), 150 len - i); 151 ptr = erofs_bread(buf, *offset, EROFS_KMAP); 152 if (IS_ERR(ptr)) { 153 kfree(buffer); 154 return ptr; 155 } 156 memcpy(buffer + i, ptr, cnt); 157 *offset += cnt; 158 } 159 return buffer; 160 } 161 162 #ifndef CONFIG_EROFS_FS_ZIP 163 static int z_erofs_parse_cfgs(struct super_block *sb, 164 struct erofs_super_block *dsb) 165 { 166 if (!dsb->u1.available_compr_algs) 167 return 0; 168 169 erofs_err(sb, "compression disabled, unable to mount compressed EROFS"); 170 return -EOPNOTSUPP; 171 } 172 #endif 173 174 static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb, 175 struct erofs_device_info *dif, erofs_off_t *pos) 176 { 177 struct erofs_sb_info *sbi = EROFS_SB(sb); 178 struct erofs_fscache *fscache; 179 struct erofs_deviceslot *dis; 180 struct file *bdev_file; 181 182 dis = erofs_read_metabuf(buf, sb, *pos, EROFS_KMAP); 183 if (IS_ERR(dis)) 184 return PTR_ERR(dis); 185 186 if (!sbi->devs->flatdev && !dif->path) { 187 if (!dis->tag[0]) { 188 erofs_err(sb, "empty device tag @ pos %llu", *pos); 189 return -EINVAL; 190 } 191 dif->path = kmemdup_nul(dis->tag, sizeof(dis->tag), GFP_KERNEL); 192 if (!dif->path) 193 return -ENOMEM; 194 } 195 196 if (erofs_is_fscache_mode(sb)) { 197 fscache = erofs_fscache_register_cookie(sb, dif->path, 0); 198 if (IS_ERR(fscache)) 199 return PTR_ERR(fscache); 200 dif->fscache = fscache; 201 } else if (!sbi->devs->flatdev) { 202 bdev_file = bdev_file_open_by_path(dif->path, BLK_OPEN_READ, 203 sb->s_type, NULL); 204 if (IS_ERR(bdev_file)) 205 return PTR_ERR(bdev_file); 206 dif->bdev_file = bdev_file; 207 dif->dax_dev = fs_dax_get_by_bdev(file_bdev(bdev_file), 208 &dif->dax_part_off, NULL, NULL); 209 } 210 211 dif->blocks = le32_to_cpu(dis->blocks); 212 dif->mapped_blkaddr = le32_to_cpu(dis->mapped_blkaddr); 213 sbi->total_blocks += dif->blocks; 214 *pos += EROFS_DEVT_SLOT_SIZE; 215 return 0; 216 } 217 218 static int erofs_scan_devices(struct super_block *sb, 219 struct erofs_super_block *dsb) 220 { 221 struct erofs_sb_info *sbi = EROFS_SB(sb); 222 unsigned int ondisk_extradevs; 223 erofs_off_t pos; 224 struct erofs_buf buf = __EROFS_BUF_INITIALIZER; 225 struct erofs_device_info *dif; 226 int id, err = 0; 227 228 sbi->total_blocks = sbi->primarydevice_blocks; 229 if (!erofs_sb_has_device_table(sbi)) 230 ondisk_extradevs = 0; 231 else 232 ondisk_extradevs = le16_to_cpu(dsb->extra_devices); 233 234 if (sbi->devs->extra_devices && 235 ondisk_extradevs != sbi->devs->extra_devices) { 236 erofs_err(sb, "extra devices don't match (ondisk %u, given %u)", 237 ondisk_extradevs, sbi->devs->extra_devices); 238 return -EINVAL; 239 } 240 if (!ondisk_extradevs) 241 return 0; 242 243 if (!sbi->devs->extra_devices && !erofs_is_fscache_mode(sb)) 244 sbi->devs->flatdev = true; 245 246 sbi->device_id_mask = roundup_pow_of_two(ondisk_extradevs + 1) - 1; 247 pos = le16_to_cpu(dsb->devt_slotoff) * EROFS_DEVT_SLOT_SIZE; 248 down_read(&sbi->devs->rwsem); 249 if (sbi->devs->extra_devices) { 250 idr_for_each_entry(&sbi->devs->tree, dif, id) { 251 err = erofs_init_device(&buf, sb, dif, &pos); 252 if (err) 253 break; 254 } 255 } else { 256 for (id = 0; id < ondisk_extradevs; id++) { 257 dif = kzalloc(sizeof(*dif), GFP_KERNEL); 258 if (!dif) { 259 err = -ENOMEM; 260 break; 261 } 262 263 err = idr_alloc(&sbi->devs->tree, dif, 0, 0, GFP_KERNEL); 264 if (err < 0) { 265 kfree(dif); 266 break; 267 } 268 ++sbi->devs->extra_devices; 269 270 err = erofs_init_device(&buf, sb, dif, &pos); 271 if (err) 272 break; 273 } 274 } 275 up_read(&sbi->devs->rwsem); 276 erofs_put_metabuf(&buf); 277 return err; 278 } 279 280 static int erofs_read_superblock(struct super_block *sb) 281 { 282 struct erofs_sb_info *sbi; 283 struct erofs_buf buf = __EROFS_BUF_INITIALIZER; 284 struct erofs_super_block *dsb; 285 void *data; 286 int ret; 287 288 data = erofs_read_metabuf(&buf, sb, 0, EROFS_KMAP); 289 if (IS_ERR(data)) { 290 erofs_err(sb, "cannot read erofs superblock"); 291 return PTR_ERR(data); 292 } 293 294 sbi = EROFS_SB(sb); 295 dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET); 296 297 ret = -EINVAL; 298 if (le32_to_cpu(dsb->magic) != EROFS_SUPER_MAGIC_V1) { 299 erofs_err(sb, "cannot find valid erofs superblock"); 300 goto out; 301 } 302 303 sbi->blkszbits = dsb->blkszbits; 304 if (sbi->blkszbits < 9 || sbi->blkszbits > PAGE_SHIFT) { 305 erofs_err(sb, "blkszbits %u isn't supported", sbi->blkszbits); 306 goto out; 307 } 308 if (dsb->dirblkbits) { 309 erofs_err(sb, "dirblkbits %u isn't supported", dsb->dirblkbits); 310 goto out; 311 } 312 313 sbi->feature_compat = le32_to_cpu(dsb->feature_compat); 314 if (erofs_sb_has_sb_chksum(sbi)) { 315 ret = erofs_superblock_csum_verify(sb, data); 316 if (ret) 317 goto out; 318 } 319 320 ret = -EINVAL; 321 if (!check_layout_compatibility(sb, dsb)) 322 goto out; 323 324 sbi->sb_size = 128 + dsb->sb_extslots * EROFS_SB_EXTSLOT_SIZE; 325 if (sbi->sb_size > PAGE_SIZE - EROFS_SUPER_OFFSET) { 326 erofs_err(sb, "invalid sb_extslots %u (more than a fs block)", 327 sbi->sb_size); 328 goto out; 329 } 330 sbi->primarydevice_blocks = le32_to_cpu(dsb->blocks); 331 sbi->meta_blkaddr = le32_to_cpu(dsb->meta_blkaddr); 332 #ifdef CONFIG_EROFS_FS_XATTR 333 sbi->xattr_blkaddr = le32_to_cpu(dsb->xattr_blkaddr); 334 sbi->xattr_prefix_start = le32_to_cpu(dsb->xattr_prefix_start); 335 sbi->xattr_prefix_count = dsb->xattr_prefix_count; 336 sbi->xattr_filter_reserved = dsb->xattr_filter_reserved; 337 #endif 338 sbi->islotbits = ilog2(sizeof(struct erofs_inode_compact)); 339 sbi->root_nid = le16_to_cpu(dsb->root_nid); 340 sbi->packed_nid = le64_to_cpu(dsb->packed_nid); 341 sbi->inos = le64_to_cpu(dsb->inos); 342 343 sbi->build_time = le64_to_cpu(dsb->build_time); 344 sbi->build_time_nsec = le32_to_cpu(dsb->build_time_nsec); 345 346 memcpy(&sb->s_uuid, dsb->uuid, sizeof(dsb->uuid)); 347 348 ret = strscpy(sbi->volume_name, dsb->volume_name, 349 sizeof(dsb->volume_name)); 350 if (ret < 0) { /* -E2BIG */ 351 erofs_err(sb, "bad volume name without NIL terminator"); 352 ret = -EFSCORRUPTED; 353 goto out; 354 } 355 356 /* parse on-disk compression configurations */ 357 ret = z_erofs_parse_cfgs(sb, dsb); 358 if (ret < 0) 359 goto out; 360 361 /* handle multiple devices */ 362 ret = erofs_scan_devices(sb, dsb); 363 364 if (erofs_is_fscache_mode(sb)) 365 erofs_info(sb, "EXPERIMENTAL fscache-based on-demand read feature in use. Use at your own risk!"); 366 out: 367 erofs_put_metabuf(&buf); 368 return ret; 369 } 370 371 static void erofs_default_options(struct erofs_sb_info *sbi) 372 { 373 #ifdef CONFIG_EROFS_FS_ZIP 374 sbi->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND; 375 sbi->opt.max_sync_decompress_pages = 3; 376 sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_AUTO; 377 #endif 378 #ifdef CONFIG_EROFS_FS_XATTR 379 set_opt(&sbi->opt, XATTR_USER); 380 #endif 381 #ifdef CONFIG_EROFS_FS_POSIX_ACL 382 set_opt(&sbi->opt, POSIX_ACL); 383 #endif 384 } 385 386 enum { 387 Opt_user_xattr, 388 Opt_acl, 389 Opt_cache_strategy, 390 Opt_dax, 391 Opt_dax_enum, 392 Opt_device, 393 Opt_fsid, 394 Opt_domain_id, 395 Opt_err 396 }; 397 398 static const struct constant_table erofs_param_cache_strategy[] = { 399 {"disabled", EROFS_ZIP_CACHE_DISABLED}, 400 {"readahead", EROFS_ZIP_CACHE_READAHEAD}, 401 {"readaround", EROFS_ZIP_CACHE_READAROUND}, 402 {} 403 }; 404 405 static const struct constant_table erofs_dax_param_enums[] = { 406 {"always", EROFS_MOUNT_DAX_ALWAYS}, 407 {"never", EROFS_MOUNT_DAX_NEVER}, 408 {} 409 }; 410 411 static const struct fs_parameter_spec erofs_fs_parameters[] = { 412 fsparam_flag_no("user_xattr", Opt_user_xattr), 413 fsparam_flag_no("acl", Opt_acl), 414 fsparam_enum("cache_strategy", Opt_cache_strategy, 415 erofs_param_cache_strategy), 416 fsparam_flag("dax", Opt_dax), 417 fsparam_enum("dax", Opt_dax_enum, erofs_dax_param_enums), 418 fsparam_string("device", Opt_device), 419 fsparam_string("fsid", Opt_fsid), 420 fsparam_string("domain_id", Opt_domain_id), 421 {} 422 }; 423 424 static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode) 425 { 426 #ifdef CONFIG_FS_DAX 427 struct erofs_sb_info *sbi = fc->s_fs_info; 428 429 switch (mode) { 430 case EROFS_MOUNT_DAX_ALWAYS: 431 set_opt(&sbi->opt, DAX_ALWAYS); 432 clear_opt(&sbi->opt, DAX_NEVER); 433 return true; 434 case EROFS_MOUNT_DAX_NEVER: 435 set_opt(&sbi->opt, DAX_NEVER); 436 clear_opt(&sbi->opt, DAX_ALWAYS); 437 return true; 438 default: 439 DBG_BUGON(1); 440 return false; 441 } 442 #else 443 errorfc(fc, "dax options not supported"); 444 return false; 445 #endif 446 } 447 448 static int erofs_fc_parse_param(struct fs_context *fc, 449 struct fs_parameter *param) 450 { 451 struct erofs_sb_info *sbi = fc->s_fs_info; 452 struct fs_parse_result result; 453 struct erofs_device_info *dif; 454 int opt, ret; 455 456 opt = fs_parse(fc, erofs_fs_parameters, param, &result); 457 if (opt < 0) 458 return opt; 459 460 switch (opt) { 461 case Opt_user_xattr: 462 #ifdef CONFIG_EROFS_FS_XATTR 463 if (result.boolean) 464 set_opt(&sbi->opt, XATTR_USER); 465 else 466 clear_opt(&sbi->opt, XATTR_USER); 467 #else 468 errorfc(fc, "{,no}user_xattr options not supported"); 469 #endif 470 break; 471 case Opt_acl: 472 #ifdef CONFIG_EROFS_FS_POSIX_ACL 473 if (result.boolean) 474 set_opt(&sbi->opt, POSIX_ACL); 475 else 476 clear_opt(&sbi->opt, POSIX_ACL); 477 #else 478 errorfc(fc, "{,no}acl options not supported"); 479 #endif 480 break; 481 case Opt_cache_strategy: 482 #ifdef CONFIG_EROFS_FS_ZIP 483 sbi->opt.cache_strategy = result.uint_32; 484 #else 485 errorfc(fc, "compression not supported, cache_strategy ignored"); 486 #endif 487 break; 488 case Opt_dax: 489 if (!erofs_fc_set_dax_mode(fc, EROFS_MOUNT_DAX_ALWAYS)) 490 return -EINVAL; 491 break; 492 case Opt_dax_enum: 493 if (!erofs_fc_set_dax_mode(fc, result.uint_32)) 494 return -EINVAL; 495 break; 496 case Opt_device: 497 dif = kzalloc(sizeof(*dif), GFP_KERNEL); 498 if (!dif) 499 return -ENOMEM; 500 dif->path = kstrdup(param->string, GFP_KERNEL); 501 if (!dif->path) { 502 kfree(dif); 503 return -ENOMEM; 504 } 505 down_write(&sbi->devs->rwsem); 506 ret = idr_alloc(&sbi->devs->tree, dif, 0, 0, GFP_KERNEL); 507 up_write(&sbi->devs->rwsem); 508 if (ret < 0) { 509 kfree(dif->path); 510 kfree(dif); 511 return ret; 512 } 513 ++sbi->devs->extra_devices; 514 break; 515 #ifdef CONFIG_EROFS_FS_ONDEMAND 516 case Opt_fsid: 517 kfree(sbi->fsid); 518 sbi->fsid = kstrdup(param->string, GFP_KERNEL); 519 if (!sbi->fsid) 520 return -ENOMEM; 521 break; 522 case Opt_domain_id: 523 kfree(sbi->domain_id); 524 sbi->domain_id = kstrdup(param->string, GFP_KERNEL); 525 if (!sbi->domain_id) 526 return -ENOMEM; 527 break; 528 #else 529 case Opt_fsid: 530 case Opt_domain_id: 531 errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name); 532 break; 533 #endif 534 default: 535 return -ENOPARAM; 536 } 537 return 0; 538 } 539 540 static struct inode *erofs_nfs_get_inode(struct super_block *sb, 541 u64 ino, u32 generation) 542 { 543 return erofs_iget(sb, ino); 544 } 545 546 static struct dentry *erofs_fh_to_dentry(struct super_block *sb, 547 struct fid *fid, int fh_len, int fh_type) 548 { 549 return generic_fh_to_dentry(sb, fid, fh_len, fh_type, 550 erofs_nfs_get_inode); 551 } 552 553 static struct dentry *erofs_fh_to_parent(struct super_block *sb, 554 struct fid *fid, int fh_len, int fh_type) 555 { 556 return generic_fh_to_parent(sb, fid, fh_len, fh_type, 557 erofs_nfs_get_inode); 558 } 559 560 static struct dentry *erofs_get_parent(struct dentry *child) 561 { 562 erofs_nid_t nid; 563 unsigned int d_type; 564 int err; 565 566 err = erofs_namei(d_inode(child), &dotdot_name, &nid, &d_type); 567 if (err) 568 return ERR_PTR(err); 569 return d_obtain_alias(erofs_iget(child->d_sb, nid)); 570 } 571 572 static const struct export_operations erofs_export_ops = { 573 .encode_fh = generic_encode_ino32_fh, 574 .fh_to_dentry = erofs_fh_to_dentry, 575 .fh_to_parent = erofs_fh_to_parent, 576 .get_parent = erofs_get_parent, 577 }; 578 579 static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) 580 { 581 struct inode *inode; 582 struct erofs_sb_info *sbi = EROFS_SB(sb); 583 int err; 584 585 sb->s_magic = EROFS_SUPER_MAGIC; 586 sb->s_flags |= SB_RDONLY | SB_NOATIME; 587 sb->s_maxbytes = MAX_LFS_FILESIZE; 588 sb->s_op = &erofs_sops; 589 590 sbi->blkszbits = PAGE_SHIFT; 591 if (erofs_is_fscache_mode(sb)) { 592 sb->s_blocksize = PAGE_SIZE; 593 sb->s_blocksize_bits = PAGE_SHIFT; 594 595 err = erofs_fscache_register_fs(sb); 596 if (err) 597 return err; 598 599 err = super_setup_bdi(sb); 600 if (err) 601 return err; 602 } else { 603 if (!sb_set_blocksize(sb, PAGE_SIZE)) { 604 errorfc(fc, "failed to set initial blksize"); 605 return -EINVAL; 606 } 607 608 sbi->dax_dev = fs_dax_get_by_bdev(sb->s_bdev, 609 &sbi->dax_part_off, 610 NULL, NULL); 611 } 612 613 err = erofs_read_superblock(sb); 614 if (err) 615 return err; 616 617 if (sb->s_blocksize_bits != sbi->blkszbits) { 618 if (erofs_is_fscache_mode(sb)) { 619 errorfc(fc, "unsupported blksize for fscache mode"); 620 return -EINVAL; 621 } 622 if (!sb_set_blocksize(sb, 1 << sbi->blkszbits)) { 623 errorfc(fc, "failed to set erofs blksize"); 624 return -EINVAL; 625 } 626 } 627 628 if (test_opt(&sbi->opt, DAX_ALWAYS)) { 629 if (!sbi->dax_dev) { 630 errorfc(fc, "DAX unsupported by block device. Turning off DAX."); 631 clear_opt(&sbi->opt, DAX_ALWAYS); 632 } else if (sbi->blkszbits != PAGE_SHIFT) { 633 errorfc(fc, "unsupported blocksize for DAX"); 634 clear_opt(&sbi->opt, DAX_ALWAYS); 635 } 636 } 637 638 sb->s_time_gran = 1; 639 sb->s_xattr = erofs_xattr_handlers; 640 sb->s_export_op = &erofs_export_ops; 641 642 if (test_opt(&sbi->opt, POSIX_ACL)) 643 sb->s_flags |= SB_POSIXACL; 644 else 645 sb->s_flags &= ~SB_POSIXACL; 646 647 #ifdef CONFIG_EROFS_FS_ZIP 648 xa_init(&sbi->managed_pslots); 649 #endif 650 651 inode = erofs_iget(sb, sbi->root_nid); 652 if (IS_ERR(inode)) 653 return PTR_ERR(inode); 654 655 if (!S_ISDIR(inode->i_mode)) { 656 erofs_err(sb, "rootino(nid %llu) is not a directory(i_mode %o)", 657 sbi->root_nid, inode->i_mode); 658 iput(inode); 659 return -EINVAL; 660 } 661 662 sb->s_root = d_make_root(inode); 663 if (!sb->s_root) 664 return -ENOMEM; 665 666 erofs_shrinker_register(sb); 667 if (erofs_sb_has_fragments(sbi) && sbi->packed_nid) { 668 sbi->packed_inode = erofs_iget(sb, sbi->packed_nid); 669 if (IS_ERR(sbi->packed_inode)) { 670 err = PTR_ERR(sbi->packed_inode); 671 sbi->packed_inode = NULL; 672 return err; 673 } 674 } 675 err = erofs_init_managed_cache(sb); 676 if (err) 677 return err; 678 679 err = erofs_xattr_prefixes_init(sb); 680 if (err) 681 return err; 682 683 err = erofs_register_sysfs(sb); 684 if (err) 685 return err; 686 687 erofs_info(sb, "mounted with root inode @ nid %llu.", sbi->root_nid); 688 return 0; 689 } 690 691 static int erofs_fc_get_tree(struct fs_context *fc) 692 { 693 struct erofs_sb_info *sbi = fc->s_fs_info; 694 695 if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid) 696 return get_tree_nodev(fc, erofs_fc_fill_super); 697 698 return get_tree_bdev(fc, erofs_fc_fill_super); 699 } 700 701 static int erofs_fc_reconfigure(struct fs_context *fc) 702 { 703 struct super_block *sb = fc->root->d_sb; 704 struct erofs_sb_info *sbi = EROFS_SB(sb); 705 struct erofs_sb_info *new_sbi = fc->s_fs_info; 706 707 DBG_BUGON(!sb_rdonly(sb)); 708 709 if (new_sbi->fsid || new_sbi->domain_id) 710 erofs_info(sb, "ignoring reconfiguration for fsid|domain_id."); 711 712 if (test_opt(&new_sbi->opt, POSIX_ACL)) 713 fc->sb_flags |= SB_POSIXACL; 714 else 715 fc->sb_flags &= ~SB_POSIXACL; 716 717 sbi->opt = new_sbi->opt; 718 719 fc->sb_flags |= SB_RDONLY; 720 return 0; 721 } 722 723 static int erofs_release_device_info(int id, void *ptr, void *data) 724 { 725 struct erofs_device_info *dif = ptr; 726 727 fs_put_dax(dif->dax_dev, NULL); 728 if (dif->bdev_file) 729 fput(dif->bdev_file); 730 erofs_fscache_unregister_cookie(dif->fscache); 731 dif->fscache = NULL; 732 kfree(dif->path); 733 kfree(dif); 734 return 0; 735 } 736 737 static void erofs_free_dev_context(struct erofs_dev_context *devs) 738 { 739 if (!devs) 740 return; 741 idr_for_each(&devs->tree, &erofs_release_device_info, NULL); 742 idr_destroy(&devs->tree); 743 kfree(devs); 744 } 745 746 static void erofs_fc_free(struct fs_context *fc) 747 { 748 struct erofs_sb_info *sbi = fc->s_fs_info; 749 750 if (!sbi) 751 return; 752 753 erofs_free_dev_context(sbi->devs); 754 kfree(sbi->fsid); 755 kfree(sbi->domain_id); 756 kfree(sbi); 757 } 758 759 static const struct fs_context_operations erofs_context_ops = { 760 .parse_param = erofs_fc_parse_param, 761 .get_tree = erofs_fc_get_tree, 762 .reconfigure = erofs_fc_reconfigure, 763 .free = erofs_fc_free, 764 }; 765 766 static int erofs_init_fs_context(struct fs_context *fc) 767 { 768 struct erofs_sb_info *sbi; 769 770 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); 771 if (!sbi) 772 return -ENOMEM; 773 774 sbi->devs = kzalloc(sizeof(struct erofs_dev_context), GFP_KERNEL); 775 if (!sbi->devs) { 776 kfree(sbi); 777 return -ENOMEM; 778 } 779 fc->s_fs_info = sbi; 780 781 idr_init(&sbi->devs->tree); 782 init_rwsem(&sbi->devs->rwsem); 783 erofs_default_options(sbi); 784 fc->ops = &erofs_context_ops; 785 return 0; 786 } 787 788 static void erofs_kill_sb(struct super_block *sb) 789 { 790 struct erofs_sb_info *sbi = EROFS_SB(sb); 791 792 if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid) 793 kill_anon_super(sb); 794 else 795 kill_block_super(sb); 796 797 erofs_free_dev_context(sbi->devs); 798 fs_put_dax(sbi->dax_dev, NULL); 799 erofs_fscache_unregister_fs(sb); 800 kfree(sbi->fsid); 801 kfree(sbi->domain_id); 802 kfree(sbi); 803 sb->s_fs_info = NULL; 804 } 805 806 static void erofs_put_super(struct super_block *sb) 807 { 808 struct erofs_sb_info *const sbi = EROFS_SB(sb); 809 810 DBG_BUGON(!sbi); 811 812 erofs_unregister_sysfs(sb); 813 erofs_shrinker_unregister(sb); 814 erofs_xattr_prefixes_cleanup(sb); 815 #ifdef CONFIG_EROFS_FS_ZIP 816 iput(sbi->managed_cache); 817 sbi->managed_cache = NULL; 818 #endif 819 iput(sbi->packed_inode); 820 sbi->packed_inode = NULL; 821 erofs_free_dev_context(sbi->devs); 822 sbi->devs = NULL; 823 erofs_fscache_unregister_fs(sb); 824 } 825 826 static struct file_system_type erofs_fs_type = { 827 .owner = THIS_MODULE, 828 .name = "erofs", 829 .init_fs_context = erofs_init_fs_context, 830 .kill_sb = erofs_kill_sb, 831 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP, 832 }; 833 MODULE_ALIAS_FS("erofs"); 834 835 static int __init erofs_module_init(void) 836 { 837 int err; 838 839 erofs_check_ondisk_layout_definitions(); 840 841 erofs_inode_cachep = kmem_cache_create("erofs_inode", 842 sizeof(struct erofs_inode), 0, 843 SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT, 844 erofs_inode_init_once); 845 if (!erofs_inode_cachep) 846 return -ENOMEM; 847 848 err = erofs_init_shrinker(); 849 if (err) 850 goto shrinker_err; 851 852 err = z_erofs_lzma_init(); 853 if (err) 854 goto lzma_err; 855 856 err = z_erofs_deflate_init(); 857 if (err) 858 goto deflate_err; 859 860 err = z_erofs_zstd_init(); 861 if (err) 862 goto zstd_err; 863 864 err = z_erofs_gbuf_init(); 865 if (err) 866 goto gbuf_err; 867 868 err = z_erofs_init_zip_subsystem(); 869 if (err) 870 goto zip_err; 871 872 err = erofs_init_sysfs(); 873 if (err) 874 goto sysfs_err; 875 876 err = register_filesystem(&erofs_fs_type); 877 if (err) 878 goto fs_err; 879 880 return 0; 881 882 fs_err: 883 erofs_exit_sysfs(); 884 sysfs_err: 885 z_erofs_exit_zip_subsystem(); 886 zip_err: 887 z_erofs_gbuf_exit(); 888 gbuf_err: 889 z_erofs_zstd_exit(); 890 zstd_err: 891 z_erofs_deflate_exit(); 892 deflate_err: 893 z_erofs_lzma_exit(); 894 lzma_err: 895 erofs_exit_shrinker(); 896 shrinker_err: 897 kmem_cache_destroy(erofs_inode_cachep); 898 return err; 899 } 900 901 static void __exit erofs_module_exit(void) 902 { 903 unregister_filesystem(&erofs_fs_type); 904 905 /* Ensure all RCU free inodes / pclusters are safe to be destroyed. */ 906 rcu_barrier(); 907 908 erofs_exit_sysfs(); 909 z_erofs_exit_zip_subsystem(); 910 z_erofs_zstd_exit(); 911 z_erofs_deflate_exit(); 912 z_erofs_lzma_exit(); 913 erofs_exit_shrinker(); 914 kmem_cache_destroy(erofs_inode_cachep); 915 z_erofs_gbuf_exit(); 916 } 917 918 static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf) 919 { 920 struct super_block *sb = dentry->d_sb; 921 struct erofs_sb_info *sbi = EROFS_SB(sb); 922 923 buf->f_type = sb->s_magic; 924 buf->f_bsize = sb->s_blocksize; 925 buf->f_blocks = sbi->total_blocks; 926 buf->f_bfree = buf->f_bavail = 0; 927 buf->f_files = ULLONG_MAX; 928 buf->f_ffree = ULLONG_MAX - sbi->inos; 929 buf->f_namelen = EROFS_NAME_LEN; 930 931 if (uuid_is_null(&sb->s_uuid)) 932 buf->f_fsid = u64_to_fsid(erofs_is_fscache_mode(sb) ? 0 : 933 huge_encode_dev(sb->s_bdev->bd_dev)); 934 else 935 buf->f_fsid = uuid_to_fsid(sb->s_uuid.b); 936 return 0; 937 } 938 939 static int erofs_show_options(struct seq_file *seq, struct dentry *root) 940 { 941 struct erofs_sb_info *sbi = EROFS_SB(root->d_sb); 942 struct erofs_mount_opts *opt = &sbi->opt; 943 944 if (IS_ENABLED(CONFIG_EROFS_FS_XATTR)) 945 seq_puts(seq, test_opt(opt, XATTR_USER) ? 946 ",user_xattr" : ",nouser_xattr"); 947 if (IS_ENABLED(CONFIG_EROFS_FS_POSIX_ACL)) 948 seq_puts(seq, test_opt(opt, POSIX_ACL) ? ",acl" : ",noacl"); 949 if (IS_ENABLED(CONFIG_EROFS_FS_ZIP)) 950 seq_printf(seq, ",cache_strategy=%s", 951 erofs_param_cache_strategy[opt->cache_strategy].name); 952 if (test_opt(opt, DAX_ALWAYS)) 953 seq_puts(seq, ",dax=always"); 954 if (test_opt(opt, DAX_NEVER)) 955 seq_puts(seq, ",dax=never"); 956 #ifdef CONFIG_EROFS_FS_ONDEMAND 957 if (sbi->fsid) 958 seq_printf(seq, ",fsid=%s", sbi->fsid); 959 if (sbi->domain_id) 960 seq_printf(seq, ",domain_id=%s", sbi->domain_id); 961 #endif 962 return 0; 963 } 964 965 const struct super_operations erofs_sops = { 966 .put_super = erofs_put_super, 967 .alloc_inode = erofs_alloc_inode, 968 .free_inode = erofs_free_inode, 969 .statfs = erofs_statfs, 970 .show_options = erofs_show_options, 971 }; 972 973 module_init(erofs_module_init); 974 module_exit(erofs_module_exit); 975 976 MODULE_DESCRIPTION("Enhanced ROM File System"); 977 MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc."); 978 MODULE_LICENSE("GPL"); 979