1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/fs/hfsplus/super.c 4 * 5 * Copyright (C) 2001 6 * Brad Boyer (flar@allandria.com) 7 * (C) 2003 Ardis Technologies <roman@ardistech.com> 8 * 9 */ 10 11 #include <linux/module.h> 12 #include <linux/init.h> 13 #include <linux/pagemap.h> 14 #include <linux/blkdev.h> 15 #include <linux/backing-dev.h> 16 #include <linux/fs.h> 17 #include <linux/fs_context.h> 18 #include <linux/slab.h> 19 #include <linux/vfs.h> 20 #include <linux/nls.h> 21 22 static struct inode *hfsplus_alloc_inode(struct super_block *sb); 23 static void hfsplus_free_inode(struct inode *inode); 24 25 #include "hfsplus_fs.h" 26 #include "xattr.h" 27 28 static int hfsplus_system_read_inode(struct inode *inode) 29 { 30 struct hfsplus_vh *vhdr = HFSPLUS_SB(inode->i_sb)->s_vhdr; 31 32 switch (inode->i_ino) { 33 case HFSPLUS_EXT_CNID: 34 hfsplus_inode_read_fork(inode, &vhdr->ext_file); 35 inode->i_mapping->a_ops = &hfsplus_btree_aops; 36 break; 37 case HFSPLUS_CAT_CNID: 38 hfsplus_inode_read_fork(inode, &vhdr->cat_file); 39 inode->i_mapping->a_ops = &hfsplus_btree_aops; 40 break; 41 case HFSPLUS_ALLOC_CNID: 42 hfsplus_inode_read_fork(inode, &vhdr->alloc_file); 43 inode->i_mapping->a_ops = &hfsplus_aops; 44 break; 45 case HFSPLUS_START_CNID: 46 hfsplus_inode_read_fork(inode, &vhdr->start_file); 47 break; 48 case HFSPLUS_ATTR_CNID: 49 hfsplus_inode_read_fork(inode, &vhdr->attr_file); 50 inode->i_mapping->a_ops = &hfsplus_btree_aops; 51 break; 52 default: 53 return -EIO; 54 } 55 56 /* 57 * Assign a dummy file type, for may_open() requires that 58 * an inode has a valid file type. 59 */ 60 inode->i_mode = S_IFREG; 61 62 return 0; 63 } 64 65 struct inode *hfsplus_iget(struct super_block *sb, unsigned long ino) 66 { 67 struct hfs_find_data fd; 68 struct inode *inode; 69 int err; 70 71 inode = iget_locked(sb, ino); 72 if (!inode) 73 return ERR_PTR(-ENOMEM); 74 if (!(inode_state_read_once(inode) & I_NEW)) 75 return inode; 76 77 atomic_set(&HFSPLUS_I(inode)->opencnt, 0); 78 HFSPLUS_I(inode)->first_blocks = 0; 79 HFSPLUS_I(inode)->clump_blocks = 0; 80 HFSPLUS_I(inode)->alloc_blocks = 0; 81 HFSPLUS_I(inode)->cached_start = U32_MAX; 82 HFSPLUS_I(inode)->cached_blocks = 0; 83 memset(HFSPLUS_I(inode)->first_extents, 0, sizeof(hfsplus_extent_rec)); 84 memset(HFSPLUS_I(inode)->cached_extents, 0, sizeof(hfsplus_extent_rec)); 85 HFSPLUS_I(inode)->extent_state = 0; 86 mutex_init(&HFSPLUS_I(inode)->extents_lock); 87 HFSPLUS_I(inode)->rsrc_inode = NULL; 88 HFSPLUS_I(inode)->create_date = 0; 89 HFSPLUS_I(inode)->linkid = 0; 90 HFSPLUS_I(inode)->flags = 0; 91 HFSPLUS_I(inode)->fs_blocks = 0; 92 HFSPLUS_I(inode)->userflags = 0; 93 HFSPLUS_I(inode)->subfolders = 0; 94 INIT_LIST_HEAD(&HFSPLUS_I(inode)->open_dir_list); 95 spin_lock_init(&HFSPLUS_I(inode)->open_dir_lock); 96 HFSPLUS_I(inode)->phys_size = 0; 97 98 if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID || 99 inode->i_ino == HFSPLUS_ROOT_CNID) { 100 err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd); 101 if (!err) { 102 err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd); 103 if (!err) 104 err = hfsplus_cat_read_inode(inode, &fd); 105 hfs_find_exit(&fd); 106 } 107 } else { 108 err = hfsplus_system_read_inode(inode); 109 } 110 111 if (err) { 112 iget_failed(inode); 113 return ERR_PTR(err); 114 } 115 116 unlock_new_inode(inode); 117 return inode; 118 } 119 120 static int hfsplus_system_write_inode(struct inode *inode) 121 { 122 struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); 123 struct hfsplus_vh *vhdr = sbi->s_vhdr; 124 struct hfsplus_fork_raw *fork; 125 struct hfs_btree *tree = NULL; 126 127 switch (inode->i_ino) { 128 case HFSPLUS_EXT_CNID: 129 fork = &vhdr->ext_file; 130 tree = sbi->ext_tree; 131 break; 132 case HFSPLUS_CAT_CNID: 133 fork = &vhdr->cat_file; 134 tree = sbi->cat_tree; 135 break; 136 case HFSPLUS_ALLOC_CNID: 137 fork = &vhdr->alloc_file; 138 break; 139 case HFSPLUS_START_CNID: 140 fork = &vhdr->start_file; 141 break; 142 case HFSPLUS_ATTR_CNID: 143 fork = &vhdr->attr_file; 144 tree = sbi->attr_tree; 145 break; 146 default: 147 return -EIO; 148 } 149 150 if (fork->total_size != cpu_to_be64(inode->i_size)) { 151 set_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags); 152 hfsplus_mark_mdb_dirty(inode->i_sb); 153 } 154 hfsplus_inode_write_fork(inode, fork); 155 if (tree) { 156 mutex_lock_nested(&tree->tree_lock, 157 hfsplus_btree_lock_class(tree)); 158 int err = hfs_btree_write(tree); 159 mutex_unlock(&tree->tree_lock); 160 161 if (err) { 162 pr_err("b-tree write err: %d, ino %llu\n", 163 err, inode->i_ino); 164 return err; 165 } 166 } 167 return 0; 168 } 169 170 static int hfsplus_write_inode(struct inode *inode, 171 struct writeback_control *wbc) 172 { 173 int err; 174 175 hfs_dbg("ino %llu\n", inode->i_ino); 176 177 err = hfsplus_ext_write_extent(inode); 178 if (err) 179 return err; 180 181 if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID || 182 inode->i_ino == HFSPLUS_ROOT_CNID) 183 return hfsplus_cat_write_inode(inode); 184 else 185 return hfsplus_system_write_inode(inode); 186 } 187 188 static void hfsplus_evict_inode(struct inode *inode) 189 { 190 hfs_dbg("ino %llu\n", inode->i_ino); 191 truncate_inode_pages_final(&inode->i_data); 192 clear_inode(inode); 193 if (HFSPLUS_IS_RSRC(inode)) { 194 HFSPLUS_I(HFSPLUS_I(inode)->rsrc_inode)->rsrc_inode = NULL; 195 iput(HFSPLUS_I(inode)->rsrc_inode); 196 } 197 } 198 199 int hfsplus_commit_superblock(struct super_block *sb) 200 { 201 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); 202 struct hfsplus_vh *vhdr = sbi->s_vhdr; 203 int write_backup = 0; 204 int error = 0, error2; 205 206 hfs_dbg("starting...\n"); 207 208 mutex_lock(&sbi->vh_mutex); 209 mutex_lock(&sbi->alloc_mutex); 210 vhdr->free_blocks = cpu_to_be32(sbi->free_blocks); 211 vhdr->next_cnid = cpu_to_be32(sbi->next_cnid); 212 vhdr->folder_count = cpu_to_be32(sbi->folder_count); 213 vhdr->file_count = cpu_to_be32(sbi->file_count); 214 215 hfs_dbg("free_blocks %u, next_cnid %u, folder_count %u, file_count %u\n", 216 sbi->free_blocks, sbi->next_cnid, 217 sbi->folder_count, sbi->file_count); 218 219 if (test_and_clear_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags)) { 220 memcpy(sbi->s_backup_vhdr, sbi->s_vhdr, sizeof(*sbi->s_vhdr)); 221 write_backup = 1; 222 } 223 224 error2 = hfsplus_submit_bio(sb, 225 sbi->part_start + HFSPLUS_VOLHEAD_SECTOR, 226 sbi->s_vhdr_buf, NULL, REQ_OP_WRITE); 227 if (!error) 228 error = error2; 229 if (!write_backup) 230 goto out; 231 232 error2 = hfsplus_submit_bio(sb, 233 sbi->part_start + sbi->sect_count - 2, 234 sbi->s_backup_vhdr_buf, NULL, REQ_OP_WRITE); 235 if (!error) 236 error = error2; 237 out: 238 mutex_unlock(&sbi->alloc_mutex); 239 mutex_unlock(&sbi->vh_mutex); 240 241 hfs_dbg("finished: err %d\n", error); 242 243 return error; 244 } 245 246 static int hfsplus_sync_fs(struct super_block *sb, int wait) 247 { 248 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); 249 int error, error2; 250 251 if (!wait) 252 return 0; 253 254 hfs_dbg("starting...\n"); 255 256 /* 257 * Explicitly write out the special metadata inodes. 258 * 259 * While these special inodes are marked as hashed and written 260 * out peridocically by the flusher threads we redirty them 261 * during writeout of normal inodes, and thus the life lock 262 * prevents us from getting the latest state to disk. 263 */ 264 error = filemap_write_and_wait(sbi->cat_tree->inode->i_mapping); 265 error2 = filemap_write_and_wait(sbi->ext_tree->inode->i_mapping); 266 if (!error) 267 error = error2; 268 if (sbi->attr_tree) { 269 error2 = 270 filemap_write_and_wait(sbi->attr_tree->inode->i_mapping); 271 if (!error) 272 error = error2; 273 } 274 error2 = filemap_write_and_wait(sbi->alloc_file->i_mapping); 275 if (!error) 276 error = error2; 277 278 error2 = hfsplus_commit_superblock(sb); 279 if (!error) 280 error = error2; 281 282 if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags)) 283 blkdev_issue_flush(sb->s_bdev); 284 285 hfs_dbg("finished: err %d\n", error); 286 287 return error; 288 } 289 290 static void delayed_sync_fs(struct work_struct *work) 291 { 292 int err; 293 struct hfsplus_sb_info *sbi; 294 295 sbi = container_of(work, struct hfsplus_sb_info, sync_work.work); 296 297 spin_lock(&sbi->work_lock); 298 sbi->work_queued = 0; 299 spin_unlock(&sbi->work_lock); 300 301 err = hfsplus_sync_fs(sbi->alloc_file->i_sb, 1); 302 if (err) 303 pr_err("delayed sync fs err %d\n", err); 304 } 305 306 void hfsplus_mark_mdb_dirty(struct super_block *sb) 307 { 308 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); 309 unsigned long delay; 310 311 if (sb_rdonly(sb)) 312 return; 313 314 spin_lock(&sbi->work_lock); 315 if (!sbi->work_queued) { 316 delay = msecs_to_jiffies(dirty_writeback_interval * 10); 317 queue_delayed_work(system_long_wq, &sbi->sync_work, delay); 318 sbi->work_queued = 1; 319 } 320 spin_unlock(&sbi->work_lock); 321 } 322 323 static void delayed_free(struct rcu_head *p) 324 { 325 struct hfsplus_sb_info *sbi = container_of(p, struct hfsplus_sb_info, rcu); 326 327 unload_nls(sbi->nls); 328 kfree(sbi); 329 } 330 331 static void hfsplus_put_super(struct super_block *sb) 332 { 333 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); 334 335 hfs_dbg("starting...\n"); 336 337 cancel_delayed_work_sync(&sbi->sync_work); 338 339 if (!sb_rdonly(sb) && sbi->s_vhdr) { 340 struct hfsplus_vh *vhdr = sbi->s_vhdr; 341 342 vhdr->modify_date = hfsp_now2mt(); 343 vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_UNMNT); 344 vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_INCNSTNT); 345 346 hfsplus_sync_fs(sb, 1); 347 } 348 349 iput(sbi->alloc_file); 350 iput(sbi->hidden_dir); 351 hfs_btree_close(sbi->attr_tree); 352 hfs_btree_close(sbi->cat_tree); 353 hfs_btree_close(sbi->ext_tree); 354 kfree(sbi->s_vhdr_buf); 355 kfree(sbi->s_backup_vhdr_buf); 356 hfs_dbg("finished\n"); 357 } 358 359 static int hfsplus_statfs(struct dentry *dentry, struct kstatfs *buf) 360 { 361 struct super_block *sb = dentry->d_sb; 362 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); 363 u64 id = huge_encode_dev(sb->s_bdev->bd_dev); 364 365 buf->f_type = HFSPLUS_SUPER_MAGIC; 366 buf->f_bsize = sb->s_blocksize; 367 buf->f_blocks = sbi->total_blocks << sbi->fs_shift; 368 buf->f_bfree = sbi->free_blocks << sbi->fs_shift; 369 buf->f_bavail = buf->f_bfree; 370 buf->f_files = 0xFFFFFFFF; 371 buf->f_ffree = 0xFFFFFFFF - sbi->next_cnid; 372 buf->f_fsid = u64_to_fsid(id); 373 buf->f_namelen = HFSPLUS_MAX_STRLEN; 374 375 return 0; 376 } 377 378 static int hfsplus_reconfigure(struct fs_context *fc) 379 { 380 struct super_block *sb = fc->root->d_sb; 381 382 sync_filesystem(sb); 383 if ((bool)(fc->sb_flags & SB_RDONLY) == sb_rdonly(sb)) 384 return 0; 385 if (!(fc->sb_flags & SB_RDONLY)) { 386 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); 387 struct hfsplus_vh *vhdr = sbi->s_vhdr; 388 389 if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) { 390 pr_warn("filesystem was not cleanly unmounted, running fsck.hfsplus is recommended. leaving read-only.\n"); 391 sb->s_flags |= SB_RDONLY; 392 fc->sb_flags |= SB_RDONLY; 393 } else if (test_bit(HFSPLUS_SB_FORCE, &sbi->flags)) { 394 /* nothing */ 395 } else if (vhdr->attributes & 396 cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) { 397 pr_warn("filesystem is marked locked, leaving read-only.\n"); 398 sb->s_flags |= SB_RDONLY; 399 fc->sb_flags |= SB_RDONLY; 400 } else if (vhdr->attributes & 401 cpu_to_be32(HFSPLUS_VOL_JOURNALED)) { 402 pr_warn("filesystem is marked journaled, leaving read-only.\n"); 403 sb->s_flags |= SB_RDONLY; 404 fc->sb_flags |= SB_RDONLY; 405 } 406 } 407 return 0; 408 } 409 410 static const struct super_operations hfsplus_sops = { 411 .alloc_inode = hfsplus_alloc_inode, 412 .free_inode = hfsplus_free_inode, 413 .write_inode = hfsplus_write_inode, 414 .evict_inode = hfsplus_evict_inode, 415 .put_super = hfsplus_put_super, 416 .sync_fs = hfsplus_sync_fs, 417 .statfs = hfsplus_statfs, 418 .show_options = hfsplus_show_options, 419 }; 420 421 void hfsplus_prepare_volume_header_for_commit(struct hfsplus_vh *vhdr) 422 { 423 vhdr->last_mount_vers = cpu_to_be32(HFSP_MOUNT_VERSION); 424 vhdr->modify_date = hfsp_now2mt(); 425 be32_add_cpu(&vhdr->write_count, 1); 426 vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT); 427 vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_INCNSTNT); 428 } 429 430 static inline int hfsplus_get_hidden_dir_entry(struct super_block *sb, 431 const struct qstr *str, 432 hfsplus_cat_entry *entry) 433 { 434 struct hfs_find_data fd; 435 int err; 436 437 err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); 438 if (unlikely(err)) 439 return err; 440 441 err = hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_ROOT_CNID, str); 442 if (unlikely(err)) 443 goto free_fd; 444 445 err = hfsplus_brec_read_cat(&fd, entry); 446 if (err) 447 err = -ENOENT; 448 449 free_fd: 450 hfs_find_exit(&fd); 451 return err; 452 } 453 454 static int hfsplus_fill_super(struct super_block *sb, struct fs_context *fc) 455 { 456 struct hfsplus_vh *vhdr; 457 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); 458 hfsplus_cat_entry entry; 459 struct inode *root, *inode; 460 struct qstr str; 461 struct nls_table *nls; 462 u64 last_fs_block, last_fs_page; 463 int silent = fc->sb_flags & SB_SILENT; 464 int err; 465 466 mutex_init(&sbi->alloc_mutex); 467 mutex_init(&sbi->vh_mutex); 468 spin_lock_init(&sbi->work_lock); 469 INIT_DELAYED_WORK(&sbi->sync_work, delayed_sync_fs); 470 471 err = -EINVAL; 472 if (!sbi->nls) { 473 /* try utf8 first, as this is the old default behaviour */ 474 sbi->nls = load_nls("utf8"); 475 if (!sbi->nls) 476 sbi->nls = load_nls_default(); 477 } 478 479 /* temporarily use utf8 to correctly find the hidden dir below */ 480 nls = sbi->nls; 481 sbi->nls = load_nls("utf8"); 482 if (!sbi->nls) { 483 pr_err("unable to load nls for utf8\n"); 484 goto out_unload_nls; 485 } 486 487 /* Grab the volume header */ 488 if (hfsplus_read_wrapper(sb)) { 489 if (!silent) 490 pr_warn("unable to find HFS+ superblock\n"); 491 goto out_unload_nls; 492 } 493 vhdr = sbi->s_vhdr; 494 495 /* Copy parts of the volume header into the superblock */ 496 sb->s_magic = HFSPLUS_VOLHEAD_SIG; 497 if (be16_to_cpu(vhdr->version) < HFSPLUS_MIN_VERSION || 498 be16_to_cpu(vhdr->version) > HFSPLUS_CURRENT_VERSION) { 499 pr_err("wrong filesystem version\n"); 500 goto out_free_vhdr; 501 } 502 sbi->total_blocks = be32_to_cpu(vhdr->total_blocks); 503 sbi->free_blocks = be32_to_cpu(vhdr->free_blocks); 504 sbi->next_cnid = be32_to_cpu(vhdr->next_cnid); 505 sbi->file_count = be32_to_cpu(vhdr->file_count); 506 sbi->folder_count = be32_to_cpu(vhdr->folder_count); 507 sbi->data_clump_blocks = 508 be32_to_cpu(vhdr->data_clump_sz) >> sbi->alloc_blksz_shift; 509 if (!sbi->data_clump_blocks) 510 sbi->data_clump_blocks = 1; 511 sbi->rsrc_clump_blocks = 512 be32_to_cpu(vhdr->rsrc_clump_sz) >> sbi->alloc_blksz_shift; 513 if (!sbi->rsrc_clump_blocks) 514 sbi->rsrc_clump_blocks = 1; 515 516 err = -EFBIG; 517 last_fs_block = sbi->total_blocks - 1; 518 last_fs_page = (last_fs_block << sbi->alloc_blksz_shift) >> 519 PAGE_SHIFT; 520 521 if ((last_fs_block > (sector_t)(~0ULL) >> (sbi->alloc_blksz_shift - 9)) || 522 (last_fs_page > (pgoff_t)(~0ULL))) { 523 pr_err("filesystem size too large\n"); 524 goto out_free_vhdr; 525 } 526 527 /* Set up operations so we can load metadata */ 528 sb->s_op = &hfsplus_sops; 529 sb->s_maxbytes = MAX_LFS_FILESIZE; 530 531 if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) { 532 pr_warn("Filesystem was not cleanly unmounted, running fsck.hfsplus is recommended. mounting read-only.\n"); 533 sb->s_flags |= SB_RDONLY; 534 } else if (test_and_clear_bit(HFSPLUS_SB_FORCE, &sbi->flags)) { 535 /* nothing */ 536 } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) { 537 pr_warn("Filesystem is marked locked, mounting read-only.\n"); 538 sb->s_flags |= SB_RDONLY; 539 } else if ((vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_JOURNALED)) && 540 !sb_rdonly(sb)) { 541 pr_warn("write access to a journaled filesystem is not supported, use the force option at your own risk, mounting read-only.\n"); 542 sb->s_flags |= SB_RDONLY; 543 } 544 545 err = -EINVAL; 546 547 /* Load metadata objects (B*Trees) */ 548 sbi->ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID); 549 if (!sbi->ext_tree) { 550 pr_err("failed to load extents file\n"); 551 goto out_free_vhdr; 552 } 553 sbi->cat_tree = hfs_btree_open(sb, HFSPLUS_CAT_CNID); 554 if (!sbi->cat_tree) { 555 pr_err("failed to load catalog file\n"); 556 goto out_close_ext_tree; 557 } 558 atomic_set(&sbi->attr_tree_state, HFSPLUS_EMPTY_ATTR_TREE); 559 if (vhdr->attr_file.total_blocks != 0) { 560 sbi->attr_tree = hfs_btree_open(sb, HFSPLUS_ATTR_CNID); 561 if (!sbi->attr_tree) { 562 pr_err("failed to load attributes file\n"); 563 goto out_close_cat_tree; 564 } 565 atomic_set(&sbi->attr_tree_state, HFSPLUS_VALID_ATTR_TREE); 566 } 567 sb->s_xattr = hfsplus_xattr_handlers; 568 569 inode = hfsplus_iget(sb, HFSPLUS_ALLOC_CNID); 570 if (IS_ERR(inode)) { 571 pr_err("failed to load allocation file\n"); 572 err = PTR_ERR(inode); 573 goto out_close_attr_tree; 574 } 575 sbi->alloc_file = inode; 576 577 /* Load the root directory */ 578 root = hfsplus_iget(sb, HFSPLUS_ROOT_CNID); 579 if (IS_ERR(root)) { 580 pr_err("failed to load root directory\n"); 581 err = PTR_ERR(root); 582 goto out_put_alloc_file; 583 } 584 585 set_default_d_op(sb, &hfsplus_dentry_operations); 586 sb->s_root = d_make_root(root); 587 if (!sb->s_root) { 588 err = -ENOMEM; 589 goto out_put_alloc_file; 590 } 591 592 str.len = sizeof(HFSP_HIDDENDIR_NAME) - 1; 593 str.name = HFSP_HIDDENDIR_NAME; 594 err = hfsplus_get_hidden_dir_entry(sb, &str, &entry); 595 if (err == -ENOENT) { 596 /* 597 * Hidden directory is absent or it cannot be read. 598 */ 599 } else if (unlikely(err)) { 600 goto out_put_root; 601 } else { 602 if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) { 603 err = -EIO; 604 goto out_put_root; 605 } 606 inode = hfsplus_iget(sb, be32_to_cpu(entry.folder.id)); 607 if (IS_ERR(inode)) { 608 err = PTR_ERR(inode); 609 goto out_put_root; 610 } 611 sbi->hidden_dir = inode; 612 } 613 614 if (!sb_rdonly(sb)) { 615 /* 616 * H+LX == hfsplusutils, H+Lx == this driver, H+lx is unused 617 * all three are registered with Apple for our use 618 */ 619 hfsplus_prepare_volume_header_for_commit(vhdr); 620 hfsplus_sync_fs(sb, 1); 621 622 if (!sbi->hidden_dir) { 623 mutex_lock(&sbi->vh_mutex); 624 sbi->hidden_dir = hfsplus_new_inode(sb, root, S_IFDIR); 625 if (!sbi->hidden_dir) { 626 mutex_unlock(&sbi->vh_mutex); 627 err = -ENOMEM; 628 goto out_put_root; 629 } 630 err = hfsplus_create_cat(sbi->hidden_dir->i_ino, root, 631 &str, sbi->hidden_dir); 632 if (err) { 633 mutex_unlock(&sbi->vh_mutex); 634 goto out_put_hidden_dir; 635 } 636 637 err = hfsplus_init_security(sbi->hidden_dir, 638 root, &str); 639 if (err == -EOPNOTSUPP) 640 err = 0; /* Operation is not supported. */ 641 else if (err) { 642 /* 643 * Try to delete anyway without 644 * error analysis. 645 */ 646 hfsplus_delete_cat(sbi->hidden_dir->i_ino, 647 root, &str); 648 mutex_unlock(&sbi->vh_mutex); 649 goto out_put_hidden_dir; 650 } 651 652 mutex_unlock(&sbi->vh_mutex); 653 hfsplus_mark_inode_dirty(HFSPLUS_CAT_TREE_I(sb), 654 HFSPLUS_I_CAT_DIRTY); 655 hfsplus_mark_inode_dirty(sbi->hidden_dir, 656 HFSPLUS_I_CAT_DIRTY); 657 } 658 } 659 660 unload_nls(sbi->nls); 661 sbi->nls = nls; 662 return 0; 663 664 out_put_hidden_dir: 665 cancel_delayed_work_sync(&sbi->sync_work); 666 iput(sbi->hidden_dir); 667 out_put_root: 668 dput(sb->s_root); 669 sb->s_root = NULL; 670 out_put_alloc_file: 671 iput(sbi->alloc_file); 672 out_close_attr_tree: 673 hfs_btree_close(sbi->attr_tree); 674 out_close_cat_tree: 675 hfs_btree_close(sbi->cat_tree); 676 out_close_ext_tree: 677 hfs_btree_close(sbi->ext_tree); 678 out_free_vhdr: 679 kfree(sbi->s_vhdr_buf); 680 kfree(sbi->s_backup_vhdr_buf); 681 out_unload_nls: 682 unload_nls(nls); 683 return err; 684 } 685 686 MODULE_AUTHOR("Brad Boyer"); 687 MODULE_DESCRIPTION("Extended Macintosh Filesystem"); 688 MODULE_LICENSE("GPL"); 689 690 static struct kmem_cache *hfsplus_inode_cachep; 691 692 static struct inode *hfsplus_alloc_inode(struct super_block *sb) 693 { 694 struct hfsplus_inode_info *i; 695 696 i = alloc_inode_sb(sb, hfsplus_inode_cachep, GFP_KERNEL); 697 return i ? &i->vfs_inode : NULL; 698 } 699 700 static void hfsplus_free_inode(struct inode *inode) 701 { 702 kmem_cache_free(hfsplus_inode_cachep, HFSPLUS_I(inode)); 703 } 704 705 #define HFSPLUS_INODE_SIZE sizeof(struct hfsplus_inode_info) 706 707 static int hfsplus_get_tree(struct fs_context *fc) 708 { 709 return get_tree_bdev(fc, hfsplus_fill_super); 710 } 711 712 static void hfsplus_free_fc(struct fs_context *fc) 713 { 714 kfree(fc->s_fs_info); 715 } 716 717 static const struct fs_context_operations hfsplus_context_ops = { 718 .parse_param = hfsplus_parse_param, 719 .get_tree = hfsplus_get_tree, 720 .reconfigure = hfsplus_reconfigure, 721 .free = hfsplus_free_fc, 722 }; 723 724 static int hfsplus_init_fs_context(struct fs_context *fc) 725 { 726 struct hfsplus_sb_info *sbi; 727 728 sbi = kzalloc_obj(struct hfsplus_sb_info); 729 if (!sbi) 730 return -ENOMEM; 731 732 if (fc->purpose != FS_CONTEXT_FOR_RECONFIGURE) 733 hfsplus_fill_defaults(sbi); 734 735 fc->s_fs_info = sbi; 736 fc->ops = &hfsplus_context_ops; 737 738 return 0; 739 } 740 741 static void hfsplus_kill_super(struct super_block *sb) 742 { 743 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); 744 745 kill_block_super(sb); 746 call_rcu(&sbi->rcu, delayed_free); 747 } 748 749 static struct file_system_type hfsplus_fs_type = { 750 .owner = THIS_MODULE, 751 .name = "hfsplus", 752 .kill_sb = hfsplus_kill_super, 753 .fs_flags = FS_REQUIRES_DEV, 754 .init_fs_context = hfsplus_init_fs_context, 755 }; 756 MODULE_ALIAS_FS("hfsplus"); 757 758 static void hfsplus_init_once(void *p) 759 { 760 struct hfsplus_inode_info *i = p; 761 762 inode_init_once(&i->vfs_inode); 763 } 764 765 static int __init init_hfsplus_fs(void) 766 { 767 int err; 768 769 hfsplus_inode_cachep = kmem_cache_create("hfsplus_icache", 770 HFSPLUS_INODE_SIZE, 0, SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, 771 hfsplus_init_once); 772 if (!hfsplus_inode_cachep) 773 return -ENOMEM; 774 err = hfsplus_create_attr_tree_cache(); 775 if (err) 776 goto destroy_inode_cache; 777 err = register_filesystem(&hfsplus_fs_type); 778 if (err) 779 goto destroy_attr_tree_cache; 780 return 0; 781 782 destroy_attr_tree_cache: 783 hfsplus_destroy_attr_tree_cache(); 784 785 destroy_inode_cache: 786 kmem_cache_destroy(hfsplus_inode_cachep); 787 788 return err; 789 } 790 791 static void __exit exit_hfsplus_fs(void) 792 { 793 unregister_filesystem(&hfsplus_fs_type); 794 795 /* 796 * Make sure all delayed rcu free inodes are flushed before we 797 * destroy cache. 798 */ 799 rcu_barrier(); 800 hfsplus_destroy_attr_tree_cache(); 801 kmem_cache_destroy(hfsplus_inode_cachep); 802 } 803 804 module_init(init_hfsplus_fs) 805 module_exit(exit_hfsplus_fs) 806