1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2017-2018 HUAWEI, Inc. 4 * https://www.huawei.com/ 5 * Copyright (C) 2021, Alibaba Cloud 6 */ 7 #include <linux/statfs.h> 8 #include <linux/seq_file.h> 9 #include <linux/crc32c.h> 10 #include <linux/fs_context.h> 11 #include <linux/fs_parser.h> 12 #include <linux/exportfs.h> 13 #include "xattr.h" 14 15 #define CREATE_TRACE_POINTS 16 #include <trace/events/erofs.h> 17 18 static struct kmem_cache *erofs_inode_cachep __read_mostly; 19 20 void _erofs_err(struct super_block *sb, const char *func, const char *fmt, ...) 21 { 22 struct va_format vaf; 23 va_list args; 24 25 va_start(args, fmt); 26 27 vaf.fmt = fmt; 28 vaf.va = &args; 29 30 if (sb) 31 pr_err("(device %s): %s: %pV", sb->s_id, func, &vaf); 32 else 33 pr_err("%s: %pV", func, &vaf); 34 va_end(args); 35 } 36 37 void _erofs_info(struct super_block *sb, const char *func, const char *fmt, ...) 38 { 39 struct va_format vaf; 40 va_list args; 41 42 va_start(args, fmt); 43 44 vaf.fmt = fmt; 45 vaf.va = &args; 46 47 if (sb) 48 pr_info("(device %s): %pV", sb->s_id, &vaf); 49 else 50 pr_info("%pV", &vaf); 51 va_end(args); 52 } 53 54 static int erofs_superblock_csum_verify(struct super_block *sb, void *sbdata) 55 { 56 size_t len = 1 << EROFS_SB(sb)->blkszbits; 57 struct erofs_super_block *dsb; 58 u32 expected_crc, crc; 59 60 if (len > EROFS_SUPER_OFFSET) 61 len -= EROFS_SUPER_OFFSET; 62 63 dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET, len, GFP_KERNEL); 64 if (!dsb) 65 return -ENOMEM; 66 67 expected_crc = le32_to_cpu(dsb->checksum); 68 dsb->checksum = 0; 69 /* to allow for x86 boot sectors and other oddities. */ 70 crc = crc32c(~0, dsb, len); 71 kfree(dsb); 72 73 if (crc != expected_crc) { 74 erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected", 75 crc, expected_crc); 76 return -EBADMSG; 77 } 78 return 0; 79 } 80 81 static void erofs_inode_init_once(void *ptr) 82 { 83 struct erofs_inode *vi = ptr; 84 85 inode_init_once(&vi->vfs_inode); 86 } 87 88 static struct inode *erofs_alloc_inode(struct super_block *sb) 89 { 90 struct erofs_inode *vi = 91 alloc_inode_sb(sb, erofs_inode_cachep, GFP_KERNEL); 92 93 if (!vi) 94 return NULL; 95 96 /* zero out everything except vfs_inode */ 97 memset(vi, 0, offsetof(struct erofs_inode, vfs_inode)); 98 return &vi->vfs_inode; 99 } 100 101 static void erofs_free_inode(struct inode *inode) 102 { 103 struct erofs_inode *vi = EROFS_I(inode); 104 105 if (inode->i_op == &erofs_fast_symlink_iops) 106 kfree(inode->i_link); 107 kfree(vi->xattr_shared_xattrs); 108 kmem_cache_free(erofs_inode_cachep, vi); 109 } 110 111 static bool check_layout_compatibility(struct super_block *sb, 112 struct erofs_super_block *dsb) 113 { 114 const unsigned int feature = le32_to_cpu(dsb->feature_incompat); 115 116 EROFS_SB(sb)->feature_incompat = feature; 117 118 /* check if current kernel meets all mandatory requirements */ 119 if (feature & (~EROFS_ALL_FEATURE_INCOMPAT)) { 120 erofs_err(sb, "unidentified incompatible feature %x, please upgrade kernel", 121 feature & ~EROFS_ALL_FEATURE_INCOMPAT); 122 return false; 123 } 124 return true; 125 } 126 127 /* read variable-sized metadata, offset will be aligned by 4-byte */ 128 void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf, 129 erofs_off_t *offset, int *lengthp) 130 { 131 u8 *buffer, *ptr; 132 int len, i, cnt; 133 134 *offset = round_up(*offset, 4); 135 ptr = erofs_bread(buf, erofs_blknr(sb, *offset), EROFS_KMAP); 136 if (IS_ERR(ptr)) 137 return ptr; 138 139 len = le16_to_cpu(*(__le16 *)&ptr[erofs_blkoff(sb, *offset)]); 140 if (!len) 141 len = U16_MAX + 1; 142 buffer = kmalloc(len, GFP_KERNEL); 143 if (!buffer) 144 return ERR_PTR(-ENOMEM); 145 *offset += sizeof(__le16); 146 *lengthp = len; 147 148 for (i = 0; i < len; i += cnt) { 149 cnt = min_t(int, sb->s_blocksize - erofs_blkoff(sb, *offset), 150 len - i); 151 ptr = erofs_bread(buf, erofs_blknr(sb, *offset), EROFS_KMAP); 152 if (IS_ERR(ptr)) { 153 kfree(buffer); 154 return ptr; 155 } 156 memcpy(buffer + i, ptr + erofs_blkoff(sb, *offset), cnt); 157 *offset += cnt; 158 } 159 return buffer; 160 } 161 162 #ifndef CONFIG_EROFS_FS_ZIP 163 static int z_erofs_parse_cfgs(struct super_block *sb, 164 struct erofs_super_block *dsb) 165 { 166 if (!dsb->u1.available_compr_algs) 167 return 0; 168 169 erofs_err(sb, "compression disabled, unable to mount compressed EROFS"); 170 return -EOPNOTSUPP; 171 } 172 #endif 173 174 static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb, 175 struct erofs_device_info *dif, erofs_off_t *pos) 176 { 177 struct erofs_sb_info *sbi = EROFS_SB(sb); 178 struct erofs_fscache *fscache; 179 struct erofs_deviceslot *dis; 180 struct file *bdev_file; 181 void *ptr; 182 183 ptr = erofs_read_metabuf(buf, sb, erofs_blknr(sb, *pos), EROFS_KMAP); 184 if (IS_ERR(ptr)) 185 return PTR_ERR(ptr); 186 dis = ptr + erofs_blkoff(sb, *pos); 187 188 if (!sbi->devs->flatdev && !dif->path) { 189 if (!dis->tag[0]) { 190 erofs_err(sb, "empty device tag @ pos %llu", *pos); 191 return -EINVAL; 192 } 193 dif->path = kmemdup_nul(dis->tag, sizeof(dis->tag), GFP_KERNEL); 194 if (!dif->path) 195 return -ENOMEM; 196 } 197 198 if (erofs_is_fscache_mode(sb)) { 199 fscache = erofs_fscache_register_cookie(sb, dif->path, 0); 200 if (IS_ERR(fscache)) 201 return PTR_ERR(fscache); 202 dif->fscache = fscache; 203 } else if (!sbi->devs->flatdev) { 204 bdev_file = bdev_file_open_by_path(dif->path, BLK_OPEN_READ, 205 sb->s_type, NULL); 206 if (IS_ERR(bdev_file)) 207 return PTR_ERR(bdev_file); 208 dif->bdev_file = bdev_file; 209 dif->dax_dev = fs_dax_get_by_bdev(file_bdev(bdev_file), 210 &dif->dax_part_off, NULL, NULL); 211 } 212 213 dif->blocks = le32_to_cpu(dis->blocks); 214 dif->mapped_blkaddr = le32_to_cpu(dis->mapped_blkaddr); 215 sbi->total_blocks += dif->blocks; 216 *pos += EROFS_DEVT_SLOT_SIZE; 217 return 0; 218 } 219 220 static int erofs_scan_devices(struct super_block *sb, 221 struct erofs_super_block *dsb) 222 { 223 struct erofs_sb_info *sbi = EROFS_SB(sb); 224 unsigned int ondisk_extradevs; 225 erofs_off_t pos; 226 struct erofs_buf buf = __EROFS_BUF_INITIALIZER; 227 struct erofs_device_info *dif; 228 int id, err = 0; 229 230 sbi->total_blocks = sbi->primarydevice_blocks; 231 if (!erofs_sb_has_device_table(sbi)) 232 ondisk_extradevs = 0; 233 else 234 ondisk_extradevs = le16_to_cpu(dsb->extra_devices); 235 236 if (sbi->devs->extra_devices && 237 ondisk_extradevs != sbi->devs->extra_devices) { 238 erofs_err(sb, "extra devices don't match (ondisk %u, given %u)", 239 ondisk_extradevs, sbi->devs->extra_devices); 240 return -EINVAL; 241 } 242 if (!ondisk_extradevs) 243 return 0; 244 245 if (!sbi->devs->extra_devices && !erofs_is_fscache_mode(sb)) 246 sbi->devs->flatdev = true; 247 248 sbi->device_id_mask = roundup_pow_of_two(ondisk_extradevs + 1) - 1; 249 pos = le16_to_cpu(dsb->devt_slotoff) * EROFS_DEVT_SLOT_SIZE; 250 down_read(&sbi->devs->rwsem); 251 if (sbi->devs->extra_devices) { 252 idr_for_each_entry(&sbi->devs->tree, dif, id) { 253 err = erofs_init_device(&buf, sb, dif, &pos); 254 if (err) 255 break; 256 } 257 } else { 258 for (id = 0; id < ondisk_extradevs; id++) { 259 dif = kzalloc(sizeof(*dif), GFP_KERNEL); 260 if (!dif) { 261 err = -ENOMEM; 262 break; 263 } 264 265 err = idr_alloc(&sbi->devs->tree, dif, 0, 0, GFP_KERNEL); 266 if (err < 0) { 267 kfree(dif); 268 break; 269 } 270 ++sbi->devs->extra_devices; 271 272 err = erofs_init_device(&buf, sb, dif, &pos); 273 if (err) 274 break; 275 } 276 } 277 up_read(&sbi->devs->rwsem); 278 erofs_put_metabuf(&buf); 279 return err; 280 } 281 282 static int erofs_read_superblock(struct super_block *sb) 283 { 284 struct erofs_sb_info *sbi; 285 struct erofs_buf buf = __EROFS_BUF_INITIALIZER; 286 struct erofs_super_block *dsb; 287 void *data; 288 int ret; 289 290 data = erofs_read_metabuf(&buf, sb, 0, EROFS_KMAP); 291 if (IS_ERR(data)) { 292 erofs_err(sb, "cannot read erofs superblock"); 293 return PTR_ERR(data); 294 } 295 296 sbi = EROFS_SB(sb); 297 dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET); 298 299 ret = -EINVAL; 300 if (le32_to_cpu(dsb->magic) != EROFS_SUPER_MAGIC_V1) { 301 erofs_err(sb, "cannot find valid erofs superblock"); 302 goto out; 303 } 304 305 sbi->blkszbits = dsb->blkszbits; 306 if (sbi->blkszbits < 9 || sbi->blkszbits > PAGE_SHIFT) { 307 erofs_err(sb, "blkszbits %u isn't supported", sbi->blkszbits); 308 goto out; 309 } 310 if (dsb->dirblkbits) { 311 erofs_err(sb, "dirblkbits %u isn't supported", dsb->dirblkbits); 312 goto out; 313 } 314 315 sbi->feature_compat = le32_to_cpu(dsb->feature_compat); 316 if (erofs_sb_has_sb_chksum(sbi)) { 317 ret = erofs_superblock_csum_verify(sb, data); 318 if (ret) 319 goto out; 320 } 321 322 ret = -EINVAL; 323 if (!check_layout_compatibility(sb, dsb)) 324 goto out; 325 326 sbi->sb_size = 128 + dsb->sb_extslots * EROFS_SB_EXTSLOT_SIZE; 327 if (sbi->sb_size > PAGE_SIZE - EROFS_SUPER_OFFSET) { 328 erofs_err(sb, "invalid sb_extslots %u (more than a fs block)", 329 sbi->sb_size); 330 goto out; 331 } 332 sbi->primarydevice_blocks = le32_to_cpu(dsb->blocks); 333 sbi->meta_blkaddr = le32_to_cpu(dsb->meta_blkaddr); 334 #ifdef CONFIG_EROFS_FS_XATTR 335 sbi->xattr_blkaddr = le32_to_cpu(dsb->xattr_blkaddr); 336 sbi->xattr_prefix_start = le32_to_cpu(dsb->xattr_prefix_start); 337 sbi->xattr_prefix_count = dsb->xattr_prefix_count; 338 sbi->xattr_filter_reserved = dsb->xattr_filter_reserved; 339 #endif 340 sbi->islotbits = ilog2(sizeof(struct erofs_inode_compact)); 341 sbi->root_nid = le16_to_cpu(dsb->root_nid); 342 sbi->packed_nid = le64_to_cpu(dsb->packed_nid); 343 sbi->inos = le64_to_cpu(dsb->inos); 344 345 sbi->build_time = le64_to_cpu(dsb->build_time); 346 sbi->build_time_nsec = le32_to_cpu(dsb->build_time_nsec); 347 348 memcpy(&sb->s_uuid, dsb->uuid, sizeof(dsb->uuid)); 349 350 ret = strscpy(sbi->volume_name, dsb->volume_name, 351 sizeof(dsb->volume_name)); 352 if (ret < 0) { /* -E2BIG */ 353 erofs_err(sb, "bad volume name without NIL terminator"); 354 ret = -EFSCORRUPTED; 355 goto out; 356 } 357 358 /* parse on-disk compression configurations */ 359 ret = z_erofs_parse_cfgs(sb, dsb); 360 if (ret < 0) 361 goto out; 362 363 /* handle multiple devices */ 364 ret = erofs_scan_devices(sb, dsb); 365 366 if (erofs_is_fscache_mode(sb)) 367 erofs_info(sb, "EXPERIMENTAL fscache-based on-demand read feature in use. Use at your own risk!"); 368 out: 369 erofs_put_metabuf(&buf); 370 return ret; 371 } 372 373 static void erofs_default_options(struct erofs_fs_context *ctx) 374 { 375 #ifdef CONFIG_EROFS_FS_ZIP 376 ctx->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND; 377 ctx->opt.max_sync_decompress_pages = 3; 378 ctx->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_AUTO; 379 #endif 380 #ifdef CONFIG_EROFS_FS_XATTR 381 set_opt(&ctx->opt, XATTR_USER); 382 #endif 383 #ifdef CONFIG_EROFS_FS_POSIX_ACL 384 set_opt(&ctx->opt, POSIX_ACL); 385 #endif 386 } 387 388 enum { 389 Opt_user_xattr, 390 Opt_acl, 391 Opt_cache_strategy, 392 Opt_dax, 393 Opt_dax_enum, 394 Opt_device, 395 Opt_fsid, 396 Opt_domain_id, 397 Opt_err 398 }; 399 400 static const struct constant_table erofs_param_cache_strategy[] = { 401 {"disabled", EROFS_ZIP_CACHE_DISABLED}, 402 {"readahead", EROFS_ZIP_CACHE_READAHEAD}, 403 {"readaround", EROFS_ZIP_CACHE_READAROUND}, 404 {} 405 }; 406 407 static const struct constant_table erofs_dax_param_enums[] = { 408 {"always", EROFS_MOUNT_DAX_ALWAYS}, 409 {"never", EROFS_MOUNT_DAX_NEVER}, 410 {} 411 }; 412 413 static const struct fs_parameter_spec erofs_fs_parameters[] = { 414 fsparam_flag_no("user_xattr", Opt_user_xattr), 415 fsparam_flag_no("acl", Opt_acl), 416 fsparam_enum("cache_strategy", Opt_cache_strategy, 417 erofs_param_cache_strategy), 418 fsparam_flag("dax", Opt_dax), 419 fsparam_enum("dax", Opt_dax_enum, erofs_dax_param_enums), 420 fsparam_string("device", Opt_device), 421 fsparam_string("fsid", Opt_fsid), 422 fsparam_string("domain_id", Opt_domain_id), 423 {} 424 }; 425 426 static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode) 427 { 428 #ifdef CONFIG_FS_DAX 429 struct erofs_fs_context *ctx = fc->fs_private; 430 431 switch (mode) { 432 case EROFS_MOUNT_DAX_ALWAYS: 433 warnfc(fc, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); 434 set_opt(&ctx->opt, DAX_ALWAYS); 435 clear_opt(&ctx->opt, DAX_NEVER); 436 return true; 437 case EROFS_MOUNT_DAX_NEVER: 438 set_opt(&ctx->opt, DAX_NEVER); 439 clear_opt(&ctx->opt, DAX_ALWAYS); 440 return true; 441 default: 442 DBG_BUGON(1); 443 return false; 444 } 445 #else 446 errorfc(fc, "dax options not supported"); 447 return false; 448 #endif 449 } 450 451 static int erofs_fc_parse_param(struct fs_context *fc, 452 struct fs_parameter *param) 453 { 454 struct erofs_fs_context *ctx = fc->fs_private; 455 struct fs_parse_result result; 456 struct erofs_device_info *dif; 457 int opt, ret; 458 459 opt = fs_parse(fc, erofs_fs_parameters, param, &result); 460 if (opt < 0) 461 return opt; 462 463 switch (opt) { 464 case Opt_user_xattr: 465 #ifdef CONFIG_EROFS_FS_XATTR 466 if (result.boolean) 467 set_opt(&ctx->opt, XATTR_USER); 468 else 469 clear_opt(&ctx->opt, XATTR_USER); 470 #else 471 errorfc(fc, "{,no}user_xattr options not supported"); 472 #endif 473 break; 474 case Opt_acl: 475 #ifdef CONFIG_EROFS_FS_POSIX_ACL 476 if (result.boolean) 477 set_opt(&ctx->opt, POSIX_ACL); 478 else 479 clear_opt(&ctx->opt, POSIX_ACL); 480 #else 481 errorfc(fc, "{,no}acl options not supported"); 482 #endif 483 break; 484 case Opt_cache_strategy: 485 #ifdef CONFIG_EROFS_FS_ZIP 486 ctx->opt.cache_strategy = result.uint_32; 487 #else 488 errorfc(fc, "compression not supported, cache_strategy ignored"); 489 #endif 490 break; 491 case Opt_dax: 492 if (!erofs_fc_set_dax_mode(fc, EROFS_MOUNT_DAX_ALWAYS)) 493 return -EINVAL; 494 break; 495 case Opt_dax_enum: 496 if (!erofs_fc_set_dax_mode(fc, result.uint_32)) 497 return -EINVAL; 498 break; 499 case Opt_device: 500 dif = kzalloc(sizeof(*dif), GFP_KERNEL); 501 if (!dif) 502 return -ENOMEM; 503 dif->path = kstrdup(param->string, GFP_KERNEL); 504 if (!dif->path) { 505 kfree(dif); 506 return -ENOMEM; 507 } 508 down_write(&ctx->devs->rwsem); 509 ret = idr_alloc(&ctx->devs->tree, dif, 0, 0, GFP_KERNEL); 510 up_write(&ctx->devs->rwsem); 511 if (ret < 0) { 512 kfree(dif->path); 513 kfree(dif); 514 return ret; 515 } 516 ++ctx->devs->extra_devices; 517 break; 518 #ifdef CONFIG_EROFS_FS_ONDEMAND 519 case Opt_fsid: 520 kfree(ctx->fsid); 521 ctx->fsid = kstrdup(param->string, GFP_KERNEL); 522 if (!ctx->fsid) 523 return -ENOMEM; 524 break; 525 case Opt_domain_id: 526 kfree(ctx->domain_id); 527 ctx->domain_id = kstrdup(param->string, GFP_KERNEL); 528 if (!ctx->domain_id) 529 return -ENOMEM; 530 break; 531 #else 532 case Opt_fsid: 533 case Opt_domain_id: 534 errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name); 535 break; 536 #endif 537 default: 538 return -ENOPARAM; 539 } 540 return 0; 541 } 542 543 static struct inode *erofs_nfs_get_inode(struct super_block *sb, 544 u64 ino, u32 generation) 545 { 546 return erofs_iget(sb, ino); 547 } 548 549 static struct dentry *erofs_fh_to_dentry(struct super_block *sb, 550 struct fid *fid, int fh_len, int fh_type) 551 { 552 return generic_fh_to_dentry(sb, fid, fh_len, fh_type, 553 erofs_nfs_get_inode); 554 } 555 556 static struct dentry *erofs_fh_to_parent(struct super_block *sb, 557 struct fid *fid, int fh_len, int fh_type) 558 { 559 return generic_fh_to_parent(sb, fid, fh_len, fh_type, 560 erofs_nfs_get_inode); 561 } 562 563 static struct dentry *erofs_get_parent(struct dentry *child) 564 { 565 erofs_nid_t nid; 566 unsigned int d_type; 567 int err; 568 569 err = erofs_namei(d_inode(child), &dotdot_name, &nid, &d_type); 570 if (err) 571 return ERR_PTR(err); 572 return d_obtain_alias(erofs_iget(child->d_sb, nid)); 573 } 574 575 static const struct export_operations erofs_export_ops = { 576 .encode_fh = generic_encode_ino32_fh, 577 .fh_to_dentry = erofs_fh_to_dentry, 578 .fh_to_parent = erofs_fh_to_parent, 579 .get_parent = erofs_get_parent, 580 }; 581 582 static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) 583 { 584 struct inode *inode; 585 struct erofs_sb_info *sbi; 586 struct erofs_fs_context *ctx = fc->fs_private; 587 int err; 588 589 sb->s_magic = EROFS_SUPER_MAGIC; 590 sb->s_flags |= SB_RDONLY | SB_NOATIME; 591 sb->s_maxbytes = MAX_LFS_FILESIZE; 592 sb->s_op = &erofs_sops; 593 594 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); 595 if (!sbi) 596 return -ENOMEM; 597 598 sb->s_fs_info = sbi; 599 sbi->opt = ctx->opt; 600 sbi->devs = ctx->devs; 601 ctx->devs = NULL; 602 sbi->fsid = ctx->fsid; 603 ctx->fsid = NULL; 604 sbi->domain_id = ctx->domain_id; 605 ctx->domain_id = NULL; 606 607 sbi->blkszbits = PAGE_SHIFT; 608 if (erofs_is_fscache_mode(sb)) { 609 sb->s_blocksize = PAGE_SIZE; 610 sb->s_blocksize_bits = PAGE_SHIFT; 611 612 err = erofs_fscache_register_fs(sb); 613 if (err) 614 return err; 615 616 err = super_setup_bdi(sb); 617 if (err) 618 return err; 619 } else { 620 if (!sb_set_blocksize(sb, PAGE_SIZE)) { 621 errorfc(fc, "failed to set initial blksize"); 622 return -EINVAL; 623 } 624 625 sbi->dax_dev = fs_dax_get_by_bdev(sb->s_bdev, 626 &sbi->dax_part_off, 627 NULL, NULL); 628 } 629 630 err = erofs_read_superblock(sb); 631 if (err) 632 return err; 633 634 if (sb->s_blocksize_bits != sbi->blkszbits) { 635 if (erofs_is_fscache_mode(sb)) { 636 errorfc(fc, "unsupported blksize for fscache mode"); 637 return -EINVAL; 638 } 639 if (!sb_set_blocksize(sb, 1 << sbi->blkszbits)) { 640 errorfc(fc, "failed to set erofs blksize"); 641 return -EINVAL; 642 } 643 } 644 645 if (test_opt(&sbi->opt, DAX_ALWAYS)) { 646 if (!sbi->dax_dev) { 647 errorfc(fc, "DAX unsupported by block device. Turning off DAX."); 648 clear_opt(&sbi->opt, DAX_ALWAYS); 649 } else if (sbi->blkszbits != PAGE_SHIFT) { 650 errorfc(fc, "unsupported blocksize for DAX"); 651 clear_opt(&sbi->opt, DAX_ALWAYS); 652 } 653 } 654 655 sb->s_time_gran = 1; 656 sb->s_xattr = erofs_xattr_handlers; 657 sb->s_export_op = &erofs_export_ops; 658 659 if (test_opt(&sbi->opt, POSIX_ACL)) 660 sb->s_flags |= SB_POSIXACL; 661 else 662 sb->s_flags &= ~SB_POSIXACL; 663 664 #ifdef CONFIG_EROFS_FS_ZIP 665 xa_init(&sbi->managed_pslots); 666 #endif 667 668 inode = erofs_iget(sb, sbi->root_nid); 669 if (IS_ERR(inode)) 670 return PTR_ERR(inode); 671 672 if (!S_ISDIR(inode->i_mode)) { 673 erofs_err(sb, "rootino(nid %llu) is not a directory(i_mode %o)", 674 sbi->root_nid, inode->i_mode); 675 iput(inode); 676 return -EINVAL; 677 } 678 679 sb->s_root = d_make_root(inode); 680 if (!sb->s_root) 681 return -ENOMEM; 682 683 erofs_shrinker_register(sb); 684 if (erofs_sb_has_fragments(sbi) && sbi->packed_nid) { 685 sbi->packed_inode = erofs_iget(sb, sbi->packed_nid); 686 if (IS_ERR(sbi->packed_inode)) { 687 err = PTR_ERR(sbi->packed_inode); 688 sbi->packed_inode = NULL; 689 return err; 690 } 691 } 692 err = erofs_init_managed_cache(sb); 693 if (err) 694 return err; 695 696 err = erofs_xattr_prefixes_init(sb); 697 if (err) 698 return err; 699 700 err = erofs_register_sysfs(sb); 701 if (err) 702 return err; 703 704 erofs_info(sb, "mounted with root inode @ nid %llu.", sbi->root_nid); 705 return 0; 706 } 707 708 static int erofs_fc_get_tree(struct fs_context *fc) 709 { 710 struct erofs_fs_context *ctx = fc->fs_private; 711 712 if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && ctx->fsid) 713 return get_tree_nodev(fc, erofs_fc_fill_super); 714 715 return get_tree_bdev(fc, erofs_fc_fill_super); 716 } 717 718 static int erofs_fc_reconfigure(struct fs_context *fc) 719 { 720 struct super_block *sb = fc->root->d_sb; 721 struct erofs_sb_info *sbi = EROFS_SB(sb); 722 struct erofs_fs_context *ctx = fc->fs_private; 723 724 DBG_BUGON(!sb_rdonly(sb)); 725 726 if (ctx->fsid || ctx->domain_id) 727 erofs_info(sb, "ignoring reconfiguration for fsid|domain_id."); 728 729 if (test_opt(&ctx->opt, POSIX_ACL)) 730 fc->sb_flags |= SB_POSIXACL; 731 else 732 fc->sb_flags &= ~SB_POSIXACL; 733 734 sbi->opt = ctx->opt; 735 736 fc->sb_flags |= SB_RDONLY; 737 return 0; 738 } 739 740 static int erofs_release_device_info(int id, void *ptr, void *data) 741 { 742 struct erofs_device_info *dif = ptr; 743 744 fs_put_dax(dif->dax_dev, NULL); 745 if (dif->bdev_file) 746 fput(dif->bdev_file); 747 erofs_fscache_unregister_cookie(dif->fscache); 748 dif->fscache = NULL; 749 kfree(dif->path); 750 kfree(dif); 751 return 0; 752 } 753 754 static void erofs_free_dev_context(struct erofs_dev_context *devs) 755 { 756 if (!devs) 757 return; 758 idr_for_each(&devs->tree, &erofs_release_device_info, NULL); 759 idr_destroy(&devs->tree); 760 kfree(devs); 761 } 762 763 static void erofs_fc_free(struct fs_context *fc) 764 { 765 struct erofs_fs_context *ctx = fc->fs_private; 766 767 erofs_free_dev_context(ctx->devs); 768 kfree(ctx->fsid); 769 kfree(ctx->domain_id); 770 kfree(ctx); 771 } 772 773 static const struct fs_context_operations erofs_context_ops = { 774 .parse_param = erofs_fc_parse_param, 775 .get_tree = erofs_fc_get_tree, 776 .reconfigure = erofs_fc_reconfigure, 777 .free = erofs_fc_free, 778 }; 779 780 static int erofs_init_fs_context(struct fs_context *fc) 781 { 782 struct erofs_fs_context *ctx; 783 784 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 785 if (!ctx) 786 return -ENOMEM; 787 ctx->devs = kzalloc(sizeof(struct erofs_dev_context), GFP_KERNEL); 788 if (!ctx->devs) { 789 kfree(ctx); 790 return -ENOMEM; 791 } 792 fc->fs_private = ctx; 793 794 idr_init(&ctx->devs->tree); 795 init_rwsem(&ctx->devs->rwsem); 796 erofs_default_options(ctx); 797 fc->ops = &erofs_context_ops; 798 return 0; 799 } 800 801 static void erofs_kill_sb(struct super_block *sb) 802 { 803 struct erofs_sb_info *sbi; 804 805 if (erofs_is_fscache_mode(sb)) 806 kill_anon_super(sb); 807 else 808 kill_block_super(sb); 809 810 sbi = EROFS_SB(sb); 811 if (!sbi) 812 return; 813 814 erofs_free_dev_context(sbi->devs); 815 fs_put_dax(sbi->dax_dev, NULL); 816 erofs_fscache_unregister_fs(sb); 817 kfree(sbi->fsid); 818 kfree(sbi->domain_id); 819 kfree(sbi); 820 sb->s_fs_info = NULL; 821 } 822 823 static void erofs_put_super(struct super_block *sb) 824 { 825 struct erofs_sb_info *const sbi = EROFS_SB(sb); 826 827 DBG_BUGON(!sbi); 828 829 erofs_unregister_sysfs(sb); 830 erofs_shrinker_unregister(sb); 831 erofs_xattr_prefixes_cleanup(sb); 832 #ifdef CONFIG_EROFS_FS_ZIP 833 iput(sbi->managed_cache); 834 sbi->managed_cache = NULL; 835 #endif 836 iput(sbi->packed_inode); 837 sbi->packed_inode = NULL; 838 erofs_free_dev_context(sbi->devs); 839 sbi->devs = NULL; 840 erofs_fscache_unregister_fs(sb); 841 } 842 843 static struct file_system_type erofs_fs_type = { 844 .owner = THIS_MODULE, 845 .name = "erofs", 846 .init_fs_context = erofs_init_fs_context, 847 .kill_sb = erofs_kill_sb, 848 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP, 849 }; 850 MODULE_ALIAS_FS("erofs"); 851 852 static int __init erofs_module_init(void) 853 { 854 int err; 855 856 erofs_check_ondisk_layout_definitions(); 857 858 erofs_inode_cachep = kmem_cache_create("erofs_inode", 859 sizeof(struct erofs_inode), 0, 860 SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT, 861 erofs_inode_init_once); 862 if (!erofs_inode_cachep) 863 return -ENOMEM; 864 865 err = erofs_init_shrinker(); 866 if (err) 867 goto shrinker_err; 868 869 err = z_erofs_lzma_init(); 870 if (err) 871 goto lzma_err; 872 873 err = z_erofs_deflate_init(); 874 if (err) 875 goto deflate_err; 876 877 erofs_pcpubuf_init(); 878 err = z_erofs_init_zip_subsystem(); 879 if (err) 880 goto zip_err; 881 882 err = erofs_init_sysfs(); 883 if (err) 884 goto sysfs_err; 885 886 err = register_filesystem(&erofs_fs_type); 887 if (err) 888 goto fs_err; 889 890 return 0; 891 892 fs_err: 893 erofs_exit_sysfs(); 894 sysfs_err: 895 z_erofs_exit_zip_subsystem(); 896 zip_err: 897 z_erofs_deflate_exit(); 898 deflate_err: 899 z_erofs_lzma_exit(); 900 lzma_err: 901 erofs_exit_shrinker(); 902 shrinker_err: 903 kmem_cache_destroy(erofs_inode_cachep); 904 return err; 905 } 906 907 static void __exit erofs_module_exit(void) 908 { 909 unregister_filesystem(&erofs_fs_type); 910 911 /* Ensure all RCU free inodes / pclusters are safe to be destroyed. */ 912 rcu_barrier(); 913 914 erofs_exit_sysfs(); 915 z_erofs_exit_zip_subsystem(); 916 z_erofs_deflate_exit(); 917 z_erofs_lzma_exit(); 918 erofs_exit_shrinker(); 919 kmem_cache_destroy(erofs_inode_cachep); 920 erofs_pcpubuf_exit(); 921 } 922 923 static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf) 924 { 925 struct super_block *sb = dentry->d_sb; 926 struct erofs_sb_info *sbi = EROFS_SB(sb); 927 u64 id = 0; 928 929 if (!erofs_is_fscache_mode(sb)) 930 id = huge_encode_dev(sb->s_bdev->bd_dev); 931 932 buf->f_type = sb->s_magic; 933 buf->f_bsize = sb->s_blocksize; 934 buf->f_blocks = sbi->total_blocks; 935 buf->f_bfree = buf->f_bavail = 0; 936 937 buf->f_files = ULLONG_MAX; 938 buf->f_ffree = ULLONG_MAX - sbi->inos; 939 940 buf->f_namelen = EROFS_NAME_LEN; 941 942 buf->f_fsid = u64_to_fsid(id); 943 return 0; 944 } 945 946 static int erofs_show_options(struct seq_file *seq, struct dentry *root) 947 { 948 struct erofs_sb_info *sbi = EROFS_SB(root->d_sb); 949 struct erofs_mount_opts *opt = &sbi->opt; 950 951 #ifdef CONFIG_EROFS_FS_XATTR 952 if (test_opt(opt, XATTR_USER)) 953 seq_puts(seq, ",user_xattr"); 954 else 955 seq_puts(seq, ",nouser_xattr"); 956 #endif 957 #ifdef CONFIG_EROFS_FS_POSIX_ACL 958 if (test_opt(opt, POSIX_ACL)) 959 seq_puts(seq, ",acl"); 960 else 961 seq_puts(seq, ",noacl"); 962 #endif 963 #ifdef CONFIG_EROFS_FS_ZIP 964 if (opt->cache_strategy == EROFS_ZIP_CACHE_DISABLED) 965 seq_puts(seq, ",cache_strategy=disabled"); 966 else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAHEAD) 967 seq_puts(seq, ",cache_strategy=readahead"); 968 else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAROUND) 969 seq_puts(seq, ",cache_strategy=readaround"); 970 #endif 971 if (test_opt(opt, DAX_ALWAYS)) 972 seq_puts(seq, ",dax=always"); 973 if (test_opt(opt, DAX_NEVER)) 974 seq_puts(seq, ",dax=never"); 975 #ifdef CONFIG_EROFS_FS_ONDEMAND 976 if (sbi->fsid) 977 seq_printf(seq, ",fsid=%s", sbi->fsid); 978 if (sbi->domain_id) 979 seq_printf(seq, ",domain_id=%s", sbi->domain_id); 980 #endif 981 return 0; 982 } 983 984 const struct super_operations erofs_sops = { 985 .put_super = erofs_put_super, 986 .alloc_inode = erofs_alloc_inode, 987 .free_inode = erofs_free_inode, 988 .statfs = erofs_statfs, 989 .show_options = erofs_show_options, 990 }; 991 992 module_init(erofs_module_init); 993 module_exit(erofs_module_exit); 994 995 MODULE_DESCRIPTION("Enhanced ROM File System"); 996 MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc."); 997 MODULE_LICENSE("GPL"); 998