1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/fs/hfsplus/super.c 4 * 5 * Copyright (C) 2001 6 * Brad Boyer (flar@allandria.com) 7 * (C) 2003 Ardis Technologies <roman@ardistech.com> 8 * 9 */ 10 11 #include <linux/module.h> 12 #include <linux/init.h> 13 #include <linux/pagemap.h> 14 #include <linux/blkdev.h> 15 #include <linux/backing-dev.h> 16 #include <linux/fs.h> 17 #include <linux/slab.h> 18 #include <linux/vfs.h> 19 #include <linux/nls.h> 20 21 static struct inode *hfsplus_alloc_inode(struct super_block *sb); 22 static void hfsplus_free_inode(struct inode *inode); 23 24 #include "hfsplus_fs.h" 25 #include "xattr.h" 26 27 static int hfsplus_system_read_inode(struct inode *inode) 28 { 29 struct hfsplus_vh *vhdr = HFSPLUS_SB(inode->i_sb)->s_vhdr; 30 31 switch (inode->i_ino) { 32 case HFSPLUS_EXT_CNID: 33 hfsplus_inode_read_fork(inode, &vhdr->ext_file); 34 inode->i_mapping->a_ops = &hfsplus_btree_aops; 35 break; 36 case HFSPLUS_CAT_CNID: 37 hfsplus_inode_read_fork(inode, &vhdr->cat_file); 38 inode->i_mapping->a_ops = &hfsplus_btree_aops; 39 break; 40 case HFSPLUS_ALLOC_CNID: 41 hfsplus_inode_read_fork(inode, &vhdr->alloc_file); 42 inode->i_mapping->a_ops = &hfsplus_aops; 43 break; 44 case HFSPLUS_START_CNID: 45 hfsplus_inode_read_fork(inode, &vhdr->start_file); 46 break; 47 case HFSPLUS_ATTR_CNID: 48 hfsplus_inode_read_fork(inode, &vhdr->attr_file); 49 inode->i_mapping->a_ops = &hfsplus_btree_aops; 50 break; 51 default: 52 return -EIO; 53 } 54 55 return 0; 56 } 57 58 struct inode *hfsplus_iget(struct super_block *sb, unsigned long ino) 59 { 60 struct hfs_find_data fd; 61 struct inode *inode; 62 int err; 63 64 inode = iget_locked(sb, ino); 65 if (!inode) 66 return ERR_PTR(-ENOMEM); 67 if (!(inode->i_state & I_NEW)) 68 return inode; 69 70 INIT_LIST_HEAD(&HFSPLUS_I(inode)->open_dir_list); 71 spin_lock_init(&HFSPLUS_I(inode)->open_dir_lock); 72 mutex_init(&HFSPLUS_I(inode)->extents_lock); 73 HFSPLUS_I(inode)->flags = 0; 74 HFSPLUS_I(inode)->extent_state = 0; 75 HFSPLUS_I(inode)->rsrc_inode = NULL; 76 atomic_set(&HFSPLUS_I(inode)->opencnt, 0); 77 78 if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID || 79 inode->i_ino == HFSPLUS_ROOT_CNID) { 80 err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd); 81 if (!err) { 82 err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd); 83 if (!err) 84 err = hfsplus_cat_read_inode(inode, &fd); 85 hfs_find_exit(&fd); 86 } 87 } else { 88 err = hfsplus_system_read_inode(inode); 89 } 90 91 if (err) { 92 iget_failed(inode); 93 return ERR_PTR(err); 94 } 95 96 unlock_new_inode(inode); 97 return inode; 98 } 99 100 static int hfsplus_system_write_inode(struct inode *inode) 101 { 102 struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); 103 struct hfsplus_vh *vhdr = sbi->s_vhdr; 104 struct hfsplus_fork_raw *fork; 105 struct hfs_btree *tree = NULL; 106 107 switch (inode->i_ino) { 108 case HFSPLUS_EXT_CNID: 109 fork = &vhdr->ext_file; 110 tree = sbi->ext_tree; 111 break; 112 case HFSPLUS_CAT_CNID: 113 fork = &vhdr->cat_file; 114 tree = sbi->cat_tree; 115 break; 116 case HFSPLUS_ALLOC_CNID: 117 fork = &vhdr->alloc_file; 118 break; 119 case HFSPLUS_START_CNID: 120 fork = &vhdr->start_file; 121 break; 122 case HFSPLUS_ATTR_CNID: 123 fork = &vhdr->attr_file; 124 tree = sbi->attr_tree; 125 break; 126 default: 127 return -EIO; 128 } 129 130 if (fork->total_size != cpu_to_be64(inode->i_size)) { 131 set_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags); 132 hfsplus_mark_mdb_dirty(inode->i_sb); 133 } 134 hfsplus_inode_write_fork(inode, fork); 135 if (tree) { 136 int err = hfs_btree_write(tree); 137 138 if (err) { 139 pr_err("b-tree write err: %d, ino %lu\n", 140 err, inode->i_ino); 141 return err; 142 } 143 } 144 return 0; 145 } 146 147 static int hfsplus_write_inode(struct inode *inode, 148 struct writeback_control *wbc) 149 { 150 int err; 151 152 hfs_dbg(INODE, "hfsplus_write_inode: %lu\n", inode->i_ino); 153 154 err = hfsplus_ext_write_extent(inode); 155 if (err) 156 return err; 157 158 if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID || 159 inode->i_ino == HFSPLUS_ROOT_CNID) 160 return hfsplus_cat_write_inode(inode); 161 else 162 return hfsplus_system_write_inode(inode); 163 } 164 165 static void hfsplus_evict_inode(struct inode *inode) 166 { 167 hfs_dbg(INODE, "hfsplus_evict_inode: %lu\n", inode->i_ino); 168 truncate_inode_pages_final(&inode->i_data); 169 clear_inode(inode); 170 if (HFSPLUS_IS_RSRC(inode)) { 171 HFSPLUS_I(HFSPLUS_I(inode)->rsrc_inode)->rsrc_inode = NULL; 172 iput(HFSPLUS_I(inode)->rsrc_inode); 173 } 174 } 175 176 static int hfsplus_sync_fs(struct super_block *sb, int wait) 177 { 178 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); 179 struct hfsplus_vh *vhdr = sbi->s_vhdr; 180 int write_backup = 0; 181 int error, error2; 182 183 if (!wait) 184 return 0; 185 186 hfs_dbg(SUPER, "hfsplus_sync_fs\n"); 187 188 /* 189 * Explicitly write out the special metadata inodes. 190 * 191 * While these special inodes are marked as hashed and written 192 * out peridocically by the flusher threads we redirty them 193 * during writeout of normal inodes, and thus the life lock 194 * prevents us from getting the latest state to disk. 195 */ 196 error = filemap_write_and_wait(sbi->cat_tree->inode->i_mapping); 197 error2 = filemap_write_and_wait(sbi->ext_tree->inode->i_mapping); 198 if (!error) 199 error = error2; 200 if (sbi->attr_tree) { 201 error2 = 202 filemap_write_and_wait(sbi->attr_tree->inode->i_mapping); 203 if (!error) 204 error = error2; 205 } 206 error2 = filemap_write_and_wait(sbi->alloc_file->i_mapping); 207 if (!error) 208 error = error2; 209 210 mutex_lock(&sbi->vh_mutex); 211 mutex_lock(&sbi->alloc_mutex); 212 vhdr->free_blocks = cpu_to_be32(sbi->free_blocks); 213 vhdr->next_cnid = cpu_to_be32(sbi->next_cnid); 214 vhdr->folder_count = cpu_to_be32(sbi->folder_count); 215 vhdr->file_count = cpu_to_be32(sbi->file_count); 216 217 if (test_and_clear_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags)) { 218 memcpy(sbi->s_backup_vhdr, sbi->s_vhdr, sizeof(*sbi->s_vhdr)); 219 write_backup = 1; 220 } 221 222 error2 = hfsplus_submit_bio(sb, 223 sbi->part_start + HFSPLUS_VOLHEAD_SECTOR, 224 sbi->s_vhdr_buf, NULL, REQ_OP_WRITE | 225 REQ_SYNC); 226 if (!error) 227 error = error2; 228 if (!write_backup) 229 goto out; 230 231 error2 = hfsplus_submit_bio(sb, 232 sbi->part_start + sbi->sect_count - 2, 233 sbi->s_backup_vhdr_buf, NULL, REQ_OP_WRITE | 234 REQ_SYNC); 235 if (!error) 236 error2 = error; 237 out: 238 mutex_unlock(&sbi->alloc_mutex); 239 mutex_unlock(&sbi->vh_mutex); 240 241 if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags)) 242 blkdev_issue_flush(sb->s_bdev); 243 244 return error; 245 } 246 247 static void delayed_sync_fs(struct work_struct *work) 248 { 249 int err; 250 struct hfsplus_sb_info *sbi; 251 252 sbi = container_of(work, struct hfsplus_sb_info, sync_work.work); 253 254 spin_lock(&sbi->work_lock); 255 sbi->work_queued = 0; 256 spin_unlock(&sbi->work_lock); 257 258 err = hfsplus_sync_fs(sbi->alloc_file->i_sb, 1); 259 if (err) 260 pr_err("delayed sync fs err %d\n", err); 261 } 262 263 void hfsplus_mark_mdb_dirty(struct super_block *sb) 264 { 265 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); 266 unsigned long delay; 267 268 if (sb_rdonly(sb)) 269 return; 270 271 spin_lock(&sbi->work_lock); 272 if (!sbi->work_queued) { 273 delay = msecs_to_jiffies(dirty_writeback_interval * 10); 274 queue_delayed_work(system_long_wq, &sbi->sync_work, delay); 275 sbi->work_queued = 1; 276 } 277 spin_unlock(&sbi->work_lock); 278 } 279 280 static void delayed_free(struct rcu_head *p) 281 { 282 struct hfsplus_sb_info *sbi = container_of(p, struct hfsplus_sb_info, rcu); 283 284 unload_nls(sbi->nls); 285 kfree(sbi); 286 } 287 288 static void hfsplus_put_super(struct super_block *sb) 289 { 290 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); 291 292 hfs_dbg(SUPER, "hfsplus_put_super\n"); 293 294 cancel_delayed_work_sync(&sbi->sync_work); 295 296 if (!sb_rdonly(sb) && sbi->s_vhdr) { 297 struct hfsplus_vh *vhdr = sbi->s_vhdr; 298 299 vhdr->modify_date = hfsp_now2mt(); 300 vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_UNMNT); 301 vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_INCNSTNT); 302 303 hfsplus_sync_fs(sb, 1); 304 } 305 306 iput(sbi->alloc_file); 307 iput(sbi->hidden_dir); 308 hfs_btree_close(sbi->attr_tree); 309 hfs_btree_close(sbi->cat_tree); 310 hfs_btree_close(sbi->ext_tree); 311 kfree(sbi->s_vhdr_buf); 312 kfree(sbi->s_backup_vhdr_buf); 313 call_rcu(&sbi->rcu, delayed_free); 314 } 315 316 static int hfsplus_statfs(struct dentry *dentry, struct kstatfs *buf) 317 { 318 struct super_block *sb = dentry->d_sb; 319 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb); 320 u64 id = huge_encode_dev(sb->s_bdev->bd_dev); 321 322 buf->f_type = HFSPLUS_SUPER_MAGIC; 323 buf->f_bsize = sb->s_blocksize; 324 buf->f_blocks = sbi->total_blocks << sbi->fs_shift; 325 buf->f_bfree = sbi->free_blocks << sbi->fs_shift; 326 buf->f_bavail = buf->f_bfree; 327 buf->f_files = 0xFFFFFFFF; 328 buf->f_ffree = 0xFFFFFFFF - sbi->next_cnid; 329 buf->f_fsid = u64_to_fsid(id); 330 buf->f_namelen = HFSPLUS_MAX_STRLEN; 331 332 return 0; 333 } 334 335 static int hfsplus_remount(struct super_block *sb, int *flags, char *data) 336 { 337 sync_filesystem(sb); 338 if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb)) 339 return 0; 340 if (!(*flags & SB_RDONLY)) { 341 struct hfsplus_vh *vhdr = HFSPLUS_SB(sb)->s_vhdr; 342 int force = 0; 343 344 if (!hfsplus_parse_options_remount(data, &force)) 345 return -EINVAL; 346 347 if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) { 348 pr_warn("filesystem was not cleanly unmounted, running fsck.hfsplus is recommended. leaving read-only.\n"); 349 sb->s_flags |= SB_RDONLY; 350 *flags |= SB_RDONLY; 351 } else if (force) { 352 /* nothing */ 353 } else if (vhdr->attributes & 354 cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) { 355 pr_warn("filesystem is marked locked, leaving read-only.\n"); 356 sb->s_flags |= SB_RDONLY; 357 *flags |= SB_RDONLY; 358 } else if (vhdr->attributes & 359 cpu_to_be32(HFSPLUS_VOL_JOURNALED)) { 360 pr_warn("filesystem is marked journaled, leaving read-only.\n"); 361 sb->s_flags |= SB_RDONLY; 362 *flags |= SB_RDONLY; 363 } 364 } 365 return 0; 366 } 367 368 static const struct super_operations hfsplus_sops = { 369 .alloc_inode = hfsplus_alloc_inode, 370 .free_inode = hfsplus_free_inode, 371 .write_inode = hfsplus_write_inode, 372 .evict_inode = hfsplus_evict_inode, 373 .put_super = hfsplus_put_super, 374 .sync_fs = hfsplus_sync_fs, 375 .statfs = hfsplus_statfs, 376 .remount_fs = hfsplus_remount, 377 .show_options = hfsplus_show_options, 378 }; 379 380 static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) 381 { 382 struct hfsplus_vh *vhdr; 383 struct hfsplus_sb_info *sbi; 384 hfsplus_cat_entry entry; 385 struct hfs_find_data fd; 386 struct inode *root, *inode; 387 struct qstr str; 388 struct nls_table *nls = NULL; 389 u64 last_fs_block, last_fs_page; 390 int err; 391 392 err = -ENOMEM; 393 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); 394 if (!sbi) 395 goto out; 396 397 sb->s_fs_info = sbi; 398 mutex_init(&sbi->alloc_mutex); 399 mutex_init(&sbi->vh_mutex); 400 spin_lock_init(&sbi->work_lock); 401 INIT_DELAYED_WORK(&sbi->sync_work, delayed_sync_fs); 402 hfsplus_fill_defaults(sbi); 403 404 err = -EINVAL; 405 if (!hfsplus_parse_options(data, sbi)) { 406 pr_err("unable to parse mount options\n"); 407 goto out_unload_nls; 408 } 409 410 /* temporarily use utf8 to correctly find the hidden dir below */ 411 nls = sbi->nls; 412 sbi->nls = load_nls("utf8"); 413 if (!sbi->nls) { 414 pr_err("unable to load nls for utf8\n"); 415 goto out_unload_nls; 416 } 417 418 /* Grab the volume header */ 419 if (hfsplus_read_wrapper(sb)) { 420 if (!silent) 421 pr_warn("unable to find HFS+ superblock\n"); 422 goto out_unload_nls; 423 } 424 vhdr = sbi->s_vhdr; 425 426 /* Copy parts of the volume header into the superblock */ 427 sb->s_magic = HFSPLUS_VOLHEAD_SIG; 428 if (be16_to_cpu(vhdr->version) < HFSPLUS_MIN_VERSION || 429 be16_to_cpu(vhdr->version) > HFSPLUS_CURRENT_VERSION) { 430 pr_err("wrong filesystem version\n"); 431 goto out_free_vhdr; 432 } 433 sbi->total_blocks = be32_to_cpu(vhdr->total_blocks); 434 sbi->free_blocks = be32_to_cpu(vhdr->free_blocks); 435 sbi->next_cnid = be32_to_cpu(vhdr->next_cnid); 436 sbi->file_count = be32_to_cpu(vhdr->file_count); 437 sbi->folder_count = be32_to_cpu(vhdr->folder_count); 438 sbi->data_clump_blocks = 439 be32_to_cpu(vhdr->data_clump_sz) >> sbi->alloc_blksz_shift; 440 if (!sbi->data_clump_blocks) 441 sbi->data_clump_blocks = 1; 442 sbi->rsrc_clump_blocks = 443 be32_to_cpu(vhdr->rsrc_clump_sz) >> sbi->alloc_blksz_shift; 444 if (!sbi->rsrc_clump_blocks) 445 sbi->rsrc_clump_blocks = 1; 446 447 err = -EFBIG; 448 last_fs_block = sbi->total_blocks - 1; 449 last_fs_page = (last_fs_block << sbi->alloc_blksz_shift) >> 450 PAGE_SHIFT; 451 452 if ((last_fs_block > (sector_t)(~0ULL) >> (sbi->alloc_blksz_shift - 9)) || 453 (last_fs_page > (pgoff_t)(~0ULL))) { 454 pr_err("filesystem size too large\n"); 455 goto out_free_vhdr; 456 } 457 458 /* Set up operations so we can load metadata */ 459 sb->s_op = &hfsplus_sops; 460 sb->s_maxbytes = MAX_LFS_FILESIZE; 461 462 if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) { 463 pr_warn("Filesystem was not cleanly unmounted, running fsck.hfsplus is recommended. mounting read-only.\n"); 464 sb->s_flags |= SB_RDONLY; 465 } else if (test_and_clear_bit(HFSPLUS_SB_FORCE, &sbi->flags)) { 466 /* nothing */ 467 } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) { 468 pr_warn("Filesystem is marked locked, mounting read-only.\n"); 469 sb->s_flags |= SB_RDONLY; 470 } else if ((vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_JOURNALED)) && 471 !sb_rdonly(sb)) { 472 pr_warn("write access to a journaled filesystem is not supported, use the force option at your own risk, mounting read-only.\n"); 473 sb->s_flags |= SB_RDONLY; 474 } 475 476 err = -EINVAL; 477 478 /* Load metadata objects (B*Trees) */ 479 sbi->ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID); 480 if (!sbi->ext_tree) { 481 pr_err("failed to load extents file\n"); 482 goto out_free_vhdr; 483 } 484 sbi->cat_tree = hfs_btree_open(sb, HFSPLUS_CAT_CNID); 485 if (!sbi->cat_tree) { 486 pr_err("failed to load catalog file\n"); 487 goto out_close_ext_tree; 488 } 489 atomic_set(&sbi->attr_tree_state, HFSPLUS_EMPTY_ATTR_TREE); 490 if (vhdr->attr_file.total_blocks != 0) { 491 sbi->attr_tree = hfs_btree_open(sb, HFSPLUS_ATTR_CNID); 492 if (!sbi->attr_tree) { 493 pr_err("failed to load attributes file\n"); 494 goto out_close_cat_tree; 495 } 496 atomic_set(&sbi->attr_tree_state, HFSPLUS_VALID_ATTR_TREE); 497 } 498 sb->s_xattr = hfsplus_xattr_handlers; 499 500 inode = hfsplus_iget(sb, HFSPLUS_ALLOC_CNID); 501 if (IS_ERR(inode)) { 502 pr_err("failed to load allocation file\n"); 503 err = PTR_ERR(inode); 504 goto out_close_attr_tree; 505 } 506 sbi->alloc_file = inode; 507 508 /* Load the root directory */ 509 root = hfsplus_iget(sb, HFSPLUS_ROOT_CNID); 510 if (IS_ERR(root)) { 511 pr_err("failed to load root directory\n"); 512 err = PTR_ERR(root); 513 goto out_put_alloc_file; 514 } 515 516 sb->s_d_op = &hfsplus_dentry_operations; 517 sb->s_root = d_make_root(root); 518 if (!sb->s_root) { 519 err = -ENOMEM; 520 goto out_put_alloc_file; 521 } 522 523 str.len = sizeof(HFSP_HIDDENDIR_NAME) - 1; 524 str.name = HFSP_HIDDENDIR_NAME; 525 err = hfs_find_init(sbi->cat_tree, &fd); 526 if (err) 527 goto out_put_root; 528 err = hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_ROOT_CNID, &str); 529 if (unlikely(err < 0)) 530 goto out_put_root; 531 if (!hfs_brec_read(&fd, &entry, sizeof(entry))) { 532 hfs_find_exit(&fd); 533 if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) { 534 err = -EINVAL; 535 goto out_put_root; 536 } 537 inode = hfsplus_iget(sb, be32_to_cpu(entry.folder.id)); 538 if (IS_ERR(inode)) { 539 err = PTR_ERR(inode); 540 goto out_put_root; 541 } 542 sbi->hidden_dir = inode; 543 } else 544 hfs_find_exit(&fd); 545 546 if (!sb_rdonly(sb)) { 547 /* 548 * H+LX == hfsplusutils, H+Lx == this driver, H+lx is unused 549 * all three are registered with Apple for our use 550 */ 551 vhdr->last_mount_vers = cpu_to_be32(HFSP_MOUNT_VERSION); 552 vhdr->modify_date = hfsp_now2mt(); 553 be32_add_cpu(&vhdr->write_count, 1); 554 vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT); 555 vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_INCNSTNT); 556 hfsplus_sync_fs(sb, 1); 557 558 if (!sbi->hidden_dir) { 559 mutex_lock(&sbi->vh_mutex); 560 sbi->hidden_dir = hfsplus_new_inode(sb, root, S_IFDIR); 561 if (!sbi->hidden_dir) { 562 mutex_unlock(&sbi->vh_mutex); 563 err = -ENOMEM; 564 goto out_put_root; 565 } 566 err = hfsplus_create_cat(sbi->hidden_dir->i_ino, root, 567 &str, sbi->hidden_dir); 568 if (err) { 569 mutex_unlock(&sbi->vh_mutex); 570 goto out_put_hidden_dir; 571 } 572 573 err = hfsplus_init_security(sbi->hidden_dir, 574 root, &str); 575 if (err == -EOPNOTSUPP) 576 err = 0; /* Operation is not supported. */ 577 else if (err) { 578 /* 579 * Try to delete anyway without 580 * error analysis. 581 */ 582 hfsplus_delete_cat(sbi->hidden_dir->i_ino, 583 root, &str); 584 mutex_unlock(&sbi->vh_mutex); 585 goto out_put_hidden_dir; 586 } 587 588 mutex_unlock(&sbi->vh_mutex); 589 hfsplus_mark_inode_dirty(sbi->hidden_dir, 590 HFSPLUS_I_CAT_DIRTY); 591 } 592 } 593 594 unload_nls(sbi->nls); 595 sbi->nls = nls; 596 return 0; 597 598 out_put_hidden_dir: 599 cancel_delayed_work_sync(&sbi->sync_work); 600 iput(sbi->hidden_dir); 601 out_put_root: 602 dput(sb->s_root); 603 sb->s_root = NULL; 604 out_put_alloc_file: 605 iput(sbi->alloc_file); 606 out_close_attr_tree: 607 hfs_btree_close(sbi->attr_tree); 608 out_close_cat_tree: 609 hfs_btree_close(sbi->cat_tree); 610 out_close_ext_tree: 611 hfs_btree_close(sbi->ext_tree); 612 out_free_vhdr: 613 kfree(sbi->s_vhdr_buf); 614 kfree(sbi->s_backup_vhdr_buf); 615 out_unload_nls: 616 unload_nls(sbi->nls); 617 unload_nls(nls); 618 kfree(sbi); 619 out: 620 return err; 621 } 622 623 MODULE_AUTHOR("Brad Boyer"); 624 MODULE_DESCRIPTION("Extended Macintosh Filesystem"); 625 MODULE_LICENSE("GPL"); 626 627 static struct kmem_cache *hfsplus_inode_cachep; 628 629 static struct inode *hfsplus_alloc_inode(struct super_block *sb) 630 { 631 struct hfsplus_inode_info *i; 632 633 i = alloc_inode_sb(sb, hfsplus_inode_cachep, GFP_KERNEL); 634 return i ? &i->vfs_inode : NULL; 635 } 636 637 static void hfsplus_free_inode(struct inode *inode) 638 { 639 kmem_cache_free(hfsplus_inode_cachep, HFSPLUS_I(inode)); 640 } 641 642 #define HFSPLUS_INODE_SIZE sizeof(struct hfsplus_inode_info) 643 644 static struct dentry *hfsplus_mount(struct file_system_type *fs_type, 645 int flags, const char *dev_name, void *data) 646 { 647 return mount_bdev(fs_type, flags, dev_name, data, hfsplus_fill_super); 648 } 649 650 static struct file_system_type hfsplus_fs_type = { 651 .owner = THIS_MODULE, 652 .name = "hfsplus", 653 .mount = hfsplus_mount, 654 .kill_sb = kill_block_super, 655 .fs_flags = FS_REQUIRES_DEV, 656 }; 657 MODULE_ALIAS_FS("hfsplus"); 658 659 static void hfsplus_init_once(void *p) 660 { 661 struct hfsplus_inode_info *i = p; 662 663 inode_init_once(&i->vfs_inode); 664 } 665 666 static int __init init_hfsplus_fs(void) 667 { 668 int err; 669 670 hfsplus_inode_cachep = kmem_cache_create("hfsplus_icache", 671 HFSPLUS_INODE_SIZE, 0, SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, 672 hfsplus_init_once); 673 if (!hfsplus_inode_cachep) 674 return -ENOMEM; 675 err = hfsplus_create_attr_tree_cache(); 676 if (err) 677 goto destroy_inode_cache; 678 err = register_filesystem(&hfsplus_fs_type); 679 if (err) 680 goto destroy_attr_tree_cache; 681 return 0; 682 683 destroy_attr_tree_cache: 684 hfsplus_destroy_attr_tree_cache(); 685 686 destroy_inode_cache: 687 kmem_cache_destroy(hfsplus_inode_cachep); 688 689 return err; 690 } 691 692 static void __exit exit_hfsplus_fs(void) 693 { 694 unregister_filesystem(&hfsplus_fs_type); 695 696 /* 697 * Make sure all delayed rcu free inodes are flushed before we 698 * destroy cache. 699 */ 700 rcu_barrier(); 701 hfsplus_destroy_attr_tree_cache(); 702 kmem_cache_destroy(hfsplus_inode_cachep); 703 } 704 705 module_init(init_hfsplus_fs) 706 module_exit(exit_hfsplus_fs) 707