xref: /linux/fs/hfsplus/super.c (revision 69050f8d6d075dc01af7a5f2f550a8067510366f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/fs/hfsplus/super.c
4  *
5  * Copyright (C) 2001
6  * Brad Boyer (flar@allandria.com)
7  * (C) 2003 Ardis Technologies <roman@ardistech.com>
8  *
9  */
10 
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/pagemap.h>
14 #include <linux/blkdev.h>
15 #include <linux/backing-dev.h>
16 #include <linux/fs.h>
17 #include <linux/fs_context.h>
18 #include <linux/slab.h>
19 #include <linux/vfs.h>
20 #include <linux/nls.h>
21 
22 static struct inode *hfsplus_alloc_inode(struct super_block *sb);
23 static void hfsplus_free_inode(struct inode *inode);
24 
25 #include "hfsplus_fs.h"
26 #include "xattr.h"
27 
28 static int hfsplus_system_read_inode(struct inode *inode)
29 {
30 	struct hfsplus_vh *vhdr = HFSPLUS_SB(inode->i_sb)->s_vhdr;
31 
32 	switch (inode->i_ino) {
33 	case HFSPLUS_EXT_CNID:
34 		hfsplus_inode_read_fork(inode, &vhdr->ext_file);
35 		inode->i_mapping->a_ops = &hfsplus_btree_aops;
36 		break;
37 	case HFSPLUS_CAT_CNID:
38 		hfsplus_inode_read_fork(inode, &vhdr->cat_file);
39 		inode->i_mapping->a_ops = &hfsplus_btree_aops;
40 		break;
41 	case HFSPLUS_ALLOC_CNID:
42 		hfsplus_inode_read_fork(inode, &vhdr->alloc_file);
43 		inode->i_mapping->a_ops = &hfsplus_aops;
44 		break;
45 	case HFSPLUS_START_CNID:
46 		hfsplus_inode_read_fork(inode, &vhdr->start_file);
47 		break;
48 	case HFSPLUS_ATTR_CNID:
49 		hfsplus_inode_read_fork(inode, &vhdr->attr_file);
50 		inode->i_mapping->a_ops = &hfsplus_btree_aops;
51 		break;
52 	default:
53 		return -EIO;
54 	}
55 
56 	/*
57 	 * Assign a dummy file type, for may_open() requires that
58 	 * an inode has a valid file type.
59 	 */
60 	inode->i_mode = S_IFREG;
61 
62 	return 0;
63 }
64 
65 struct inode *hfsplus_iget(struct super_block *sb, unsigned long ino)
66 {
67 	struct hfs_find_data fd;
68 	struct inode *inode;
69 	int err;
70 
71 	inode = iget_locked(sb, ino);
72 	if (!inode)
73 		return ERR_PTR(-ENOMEM);
74 	if (!(inode_state_read_once(inode) & I_NEW))
75 		return inode;
76 
77 	atomic_set(&HFSPLUS_I(inode)->opencnt, 0);
78 	HFSPLUS_I(inode)->first_blocks = 0;
79 	HFSPLUS_I(inode)->clump_blocks = 0;
80 	HFSPLUS_I(inode)->alloc_blocks = 0;
81 	HFSPLUS_I(inode)->cached_start = U32_MAX;
82 	HFSPLUS_I(inode)->cached_blocks = 0;
83 	memset(HFSPLUS_I(inode)->first_extents, 0, sizeof(hfsplus_extent_rec));
84 	memset(HFSPLUS_I(inode)->cached_extents, 0, sizeof(hfsplus_extent_rec));
85 	HFSPLUS_I(inode)->extent_state = 0;
86 	mutex_init(&HFSPLUS_I(inode)->extents_lock);
87 	HFSPLUS_I(inode)->rsrc_inode = NULL;
88 	HFSPLUS_I(inode)->create_date = 0;
89 	HFSPLUS_I(inode)->linkid = 0;
90 	HFSPLUS_I(inode)->flags = 0;
91 	HFSPLUS_I(inode)->fs_blocks = 0;
92 	HFSPLUS_I(inode)->userflags = 0;
93 	HFSPLUS_I(inode)->subfolders = 0;
94 	INIT_LIST_HEAD(&HFSPLUS_I(inode)->open_dir_list);
95 	spin_lock_init(&HFSPLUS_I(inode)->open_dir_lock);
96 	HFSPLUS_I(inode)->phys_size = 0;
97 
98 	if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID ||
99 	    inode->i_ino == HFSPLUS_ROOT_CNID) {
100 		err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
101 		if (!err) {
102 			err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
103 			if (!err)
104 				err = hfsplus_cat_read_inode(inode, &fd);
105 			hfs_find_exit(&fd);
106 		}
107 	} else {
108 		err = hfsplus_system_read_inode(inode);
109 	}
110 
111 	if (err) {
112 		iget_failed(inode);
113 		return ERR_PTR(err);
114 	}
115 
116 	unlock_new_inode(inode);
117 	return inode;
118 }
119 
120 static int hfsplus_system_write_inode(struct inode *inode)
121 {
122 	struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
123 	struct hfsplus_vh *vhdr = sbi->s_vhdr;
124 	struct hfsplus_fork_raw *fork;
125 	struct hfs_btree *tree = NULL;
126 
127 	switch (inode->i_ino) {
128 	case HFSPLUS_EXT_CNID:
129 		fork = &vhdr->ext_file;
130 		tree = sbi->ext_tree;
131 		break;
132 	case HFSPLUS_CAT_CNID:
133 		fork = &vhdr->cat_file;
134 		tree = sbi->cat_tree;
135 		break;
136 	case HFSPLUS_ALLOC_CNID:
137 		fork = &vhdr->alloc_file;
138 		break;
139 	case HFSPLUS_START_CNID:
140 		fork = &vhdr->start_file;
141 		break;
142 	case HFSPLUS_ATTR_CNID:
143 		fork = &vhdr->attr_file;
144 		tree = sbi->attr_tree;
145 		break;
146 	default:
147 		return -EIO;
148 	}
149 
150 	if (fork->total_size != cpu_to_be64(inode->i_size)) {
151 		set_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags);
152 		hfsplus_mark_mdb_dirty(inode->i_sb);
153 	}
154 	hfsplus_inode_write_fork(inode, fork);
155 	if (tree) {
156 		int err = hfs_btree_write(tree);
157 
158 		if (err) {
159 			pr_err("b-tree write err: %d, ino %lu\n",
160 			       err, inode->i_ino);
161 			return err;
162 		}
163 	}
164 	return 0;
165 }
166 
167 static int hfsplus_write_inode(struct inode *inode,
168 		struct writeback_control *wbc)
169 {
170 	int err;
171 
172 	hfs_dbg("ino %lu\n", inode->i_ino);
173 
174 	err = hfsplus_ext_write_extent(inode);
175 	if (err)
176 		return err;
177 
178 	if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID ||
179 	    inode->i_ino == HFSPLUS_ROOT_CNID)
180 		return hfsplus_cat_write_inode(inode);
181 	else
182 		return hfsplus_system_write_inode(inode);
183 }
184 
185 static void hfsplus_evict_inode(struct inode *inode)
186 {
187 	hfs_dbg("ino %lu\n", inode->i_ino);
188 	truncate_inode_pages_final(&inode->i_data);
189 	clear_inode(inode);
190 	if (HFSPLUS_IS_RSRC(inode)) {
191 		HFSPLUS_I(HFSPLUS_I(inode)->rsrc_inode)->rsrc_inode = NULL;
192 		iput(HFSPLUS_I(inode)->rsrc_inode);
193 	}
194 }
195 
196 int hfsplus_commit_superblock(struct super_block *sb)
197 {
198 	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
199 	struct hfsplus_vh *vhdr = sbi->s_vhdr;
200 	int write_backup = 0;
201 	int error = 0, error2;
202 
203 	hfs_dbg("starting...\n");
204 
205 	mutex_lock(&sbi->vh_mutex);
206 	mutex_lock(&sbi->alloc_mutex);
207 	vhdr->free_blocks = cpu_to_be32(sbi->free_blocks);
208 	vhdr->next_cnid = cpu_to_be32(sbi->next_cnid);
209 	vhdr->folder_count = cpu_to_be32(sbi->folder_count);
210 	vhdr->file_count = cpu_to_be32(sbi->file_count);
211 
212 	hfs_dbg("free_blocks %u, next_cnid %u, folder_count %u, file_count %u\n",
213 		sbi->free_blocks, sbi->next_cnid,
214 		sbi->folder_count, sbi->file_count);
215 
216 	if (test_and_clear_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags)) {
217 		memcpy(sbi->s_backup_vhdr, sbi->s_vhdr, sizeof(*sbi->s_vhdr));
218 		write_backup = 1;
219 	}
220 
221 	error2 = hfsplus_submit_bio(sb,
222 				   sbi->part_start + HFSPLUS_VOLHEAD_SECTOR,
223 				   sbi->s_vhdr_buf, NULL, REQ_OP_WRITE);
224 	if (!error)
225 		error = error2;
226 	if (!write_backup)
227 		goto out;
228 
229 	error2 = hfsplus_submit_bio(sb,
230 				  sbi->part_start + sbi->sect_count - 2,
231 				  sbi->s_backup_vhdr_buf, NULL, REQ_OP_WRITE);
232 	if (!error)
233 		error = error2;
234 out:
235 	mutex_unlock(&sbi->alloc_mutex);
236 	mutex_unlock(&sbi->vh_mutex);
237 
238 	hfs_dbg("finished: err %d\n", error);
239 
240 	return error;
241 }
242 
243 static int hfsplus_sync_fs(struct super_block *sb, int wait)
244 {
245 	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
246 	int error, error2;
247 
248 	if (!wait)
249 		return 0;
250 
251 	hfs_dbg("starting...\n");
252 
253 	/*
254 	 * Explicitly write out the special metadata inodes.
255 	 *
256 	 * While these special inodes are marked as hashed and written
257 	 * out peridocically by the flusher threads we redirty them
258 	 * during writeout of normal inodes, and thus the life lock
259 	 * prevents us from getting the latest state to disk.
260 	 */
261 	error = filemap_write_and_wait(sbi->cat_tree->inode->i_mapping);
262 	error2 = filemap_write_and_wait(sbi->ext_tree->inode->i_mapping);
263 	if (!error)
264 		error = error2;
265 	if (sbi->attr_tree) {
266 		error2 =
267 		    filemap_write_and_wait(sbi->attr_tree->inode->i_mapping);
268 		if (!error)
269 			error = error2;
270 	}
271 	error2 = filemap_write_and_wait(sbi->alloc_file->i_mapping);
272 	if (!error)
273 		error = error2;
274 
275 	error2 = hfsplus_commit_superblock(sb);
276 	if (!error)
277 		error = error2;
278 
279 	if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags))
280 		blkdev_issue_flush(sb->s_bdev);
281 
282 	hfs_dbg("finished: err %d\n", error);
283 
284 	return error;
285 }
286 
287 static void delayed_sync_fs(struct work_struct *work)
288 {
289 	int err;
290 	struct hfsplus_sb_info *sbi;
291 
292 	sbi = container_of(work, struct hfsplus_sb_info, sync_work.work);
293 
294 	spin_lock(&sbi->work_lock);
295 	sbi->work_queued = 0;
296 	spin_unlock(&sbi->work_lock);
297 
298 	err = hfsplus_sync_fs(sbi->alloc_file->i_sb, 1);
299 	if (err)
300 		pr_err("delayed sync fs err %d\n", err);
301 }
302 
303 void hfsplus_mark_mdb_dirty(struct super_block *sb)
304 {
305 	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
306 	unsigned long delay;
307 
308 	if (sb_rdonly(sb))
309 		return;
310 
311 	spin_lock(&sbi->work_lock);
312 	if (!sbi->work_queued) {
313 		delay = msecs_to_jiffies(dirty_writeback_interval * 10);
314 		queue_delayed_work(system_long_wq, &sbi->sync_work, delay);
315 		sbi->work_queued = 1;
316 	}
317 	spin_unlock(&sbi->work_lock);
318 }
319 
320 static void delayed_free(struct rcu_head *p)
321 {
322 	struct hfsplus_sb_info *sbi = container_of(p, struct hfsplus_sb_info, rcu);
323 
324 	unload_nls(sbi->nls);
325 	kfree(sbi);
326 }
327 
328 static void hfsplus_put_super(struct super_block *sb)
329 {
330 	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
331 
332 	hfs_dbg("starting...\n");
333 
334 	cancel_delayed_work_sync(&sbi->sync_work);
335 
336 	if (!sb_rdonly(sb) && sbi->s_vhdr) {
337 		struct hfsplus_vh *vhdr = sbi->s_vhdr;
338 
339 		vhdr->modify_date = hfsp_now2mt();
340 		vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_UNMNT);
341 		vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_INCNSTNT);
342 
343 		hfsplus_sync_fs(sb, 1);
344 	}
345 
346 	iput(sbi->alloc_file);
347 	iput(sbi->hidden_dir);
348 	hfs_btree_close(sbi->attr_tree);
349 	hfs_btree_close(sbi->cat_tree);
350 	hfs_btree_close(sbi->ext_tree);
351 	kfree(sbi->s_vhdr_buf);
352 	kfree(sbi->s_backup_vhdr_buf);
353 	hfs_dbg("finished\n");
354 }
355 
356 static int hfsplus_statfs(struct dentry *dentry, struct kstatfs *buf)
357 {
358 	struct super_block *sb = dentry->d_sb;
359 	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
360 	u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
361 
362 	buf->f_type = HFSPLUS_SUPER_MAGIC;
363 	buf->f_bsize = sb->s_blocksize;
364 	buf->f_blocks = sbi->total_blocks << sbi->fs_shift;
365 	buf->f_bfree = sbi->free_blocks << sbi->fs_shift;
366 	buf->f_bavail = buf->f_bfree;
367 	buf->f_files = 0xFFFFFFFF;
368 	buf->f_ffree = 0xFFFFFFFF - sbi->next_cnid;
369 	buf->f_fsid = u64_to_fsid(id);
370 	buf->f_namelen = HFSPLUS_MAX_STRLEN;
371 
372 	return 0;
373 }
374 
375 static int hfsplus_reconfigure(struct fs_context *fc)
376 {
377 	struct super_block *sb = fc->root->d_sb;
378 
379 	sync_filesystem(sb);
380 	if ((bool)(fc->sb_flags & SB_RDONLY) == sb_rdonly(sb))
381 		return 0;
382 	if (!(fc->sb_flags & SB_RDONLY)) {
383 		struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
384 		struct hfsplus_vh *vhdr = sbi->s_vhdr;
385 
386 		if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) {
387 			pr_warn("filesystem was not cleanly unmounted, running fsck.hfsplus is recommended.  leaving read-only.\n");
388 			sb->s_flags |= SB_RDONLY;
389 			fc->sb_flags |= SB_RDONLY;
390 		} else if (test_bit(HFSPLUS_SB_FORCE, &sbi->flags)) {
391 			/* nothing */
392 		} else if (vhdr->attributes &
393 				cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
394 			pr_warn("filesystem is marked locked, leaving read-only.\n");
395 			sb->s_flags |= SB_RDONLY;
396 			fc->sb_flags |= SB_RDONLY;
397 		} else if (vhdr->attributes &
398 				cpu_to_be32(HFSPLUS_VOL_JOURNALED)) {
399 			pr_warn("filesystem is marked journaled, leaving read-only.\n");
400 			sb->s_flags |= SB_RDONLY;
401 			fc->sb_flags |= SB_RDONLY;
402 		}
403 	}
404 	return 0;
405 }
406 
407 static const struct super_operations hfsplus_sops = {
408 	.alloc_inode	= hfsplus_alloc_inode,
409 	.free_inode	= hfsplus_free_inode,
410 	.write_inode	= hfsplus_write_inode,
411 	.evict_inode	= hfsplus_evict_inode,
412 	.put_super	= hfsplus_put_super,
413 	.sync_fs	= hfsplus_sync_fs,
414 	.statfs		= hfsplus_statfs,
415 	.show_options	= hfsplus_show_options,
416 };
417 
418 void hfsplus_prepare_volume_header_for_commit(struct hfsplus_vh *vhdr)
419 {
420 	vhdr->last_mount_vers = cpu_to_be32(HFSP_MOUNT_VERSION);
421 	vhdr->modify_date = hfsp_now2mt();
422 	be32_add_cpu(&vhdr->write_count, 1);
423 	vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT);
424 	vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_INCNSTNT);
425 }
426 
427 static int hfsplus_fill_super(struct super_block *sb, struct fs_context *fc)
428 {
429 	struct hfsplus_vh *vhdr;
430 	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
431 	hfsplus_cat_entry entry;
432 	struct hfs_find_data fd;
433 	struct inode *root, *inode;
434 	struct qstr str;
435 	struct nls_table *nls;
436 	u64 last_fs_block, last_fs_page;
437 	int silent = fc->sb_flags & SB_SILENT;
438 	int err;
439 
440 	mutex_init(&sbi->alloc_mutex);
441 	mutex_init(&sbi->vh_mutex);
442 	spin_lock_init(&sbi->work_lock);
443 	INIT_DELAYED_WORK(&sbi->sync_work, delayed_sync_fs);
444 
445 	err = -EINVAL;
446 	if (!sbi->nls) {
447 		/* try utf8 first, as this is the old default behaviour */
448 		sbi->nls = load_nls("utf8");
449 		if (!sbi->nls)
450 			sbi->nls = load_nls_default();
451 	}
452 
453 	/* temporarily use utf8 to correctly find the hidden dir below */
454 	nls = sbi->nls;
455 	sbi->nls = load_nls("utf8");
456 	if (!sbi->nls) {
457 		pr_err("unable to load nls for utf8\n");
458 		goto out_unload_nls;
459 	}
460 
461 	/* Grab the volume header */
462 	if (hfsplus_read_wrapper(sb)) {
463 		if (!silent)
464 			pr_warn("unable to find HFS+ superblock\n");
465 		goto out_unload_nls;
466 	}
467 	vhdr = sbi->s_vhdr;
468 
469 	/* Copy parts of the volume header into the superblock */
470 	sb->s_magic = HFSPLUS_VOLHEAD_SIG;
471 	if (be16_to_cpu(vhdr->version) < HFSPLUS_MIN_VERSION ||
472 	    be16_to_cpu(vhdr->version) > HFSPLUS_CURRENT_VERSION) {
473 		pr_err("wrong filesystem version\n");
474 		goto out_free_vhdr;
475 	}
476 	sbi->total_blocks = be32_to_cpu(vhdr->total_blocks);
477 	sbi->free_blocks = be32_to_cpu(vhdr->free_blocks);
478 	sbi->next_cnid = be32_to_cpu(vhdr->next_cnid);
479 	sbi->file_count = be32_to_cpu(vhdr->file_count);
480 	sbi->folder_count = be32_to_cpu(vhdr->folder_count);
481 	sbi->data_clump_blocks =
482 		be32_to_cpu(vhdr->data_clump_sz) >> sbi->alloc_blksz_shift;
483 	if (!sbi->data_clump_blocks)
484 		sbi->data_clump_blocks = 1;
485 	sbi->rsrc_clump_blocks =
486 		be32_to_cpu(vhdr->rsrc_clump_sz) >> sbi->alloc_blksz_shift;
487 	if (!sbi->rsrc_clump_blocks)
488 		sbi->rsrc_clump_blocks = 1;
489 
490 	err = -EFBIG;
491 	last_fs_block = sbi->total_blocks - 1;
492 	last_fs_page = (last_fs_block << sbi->alloc_blksz_shift) >>
493 			PAGE_SHIFT;
494 
495 	if ((last_fs_block > (sector_t)(~0ULL) >> (sbi->alloc_blksz_shift - 9)) ||
496 	    (last_fs_page > (pgoff_t)(~0ULL))) {
497 		pr_err("filesystem size too large\n");
498 		goto out_free_vhdr;
499 	}
500 
501 	/* Set up operations so we can load metadata */
502 	sb->s_op = &hfsplus_sops;
503 	sb->s_maxbytes = MAX_LFS_FILESIZE;
504 
505 	if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) {
506 		pr_warn("Filesystem was not cleanly unmounted, running fsck.hfsplus is recommended.  mounting read-only.\n");
507 		sb->s_flags |= SB_RDONLY;
508 	} else if (test_and_clear_bit(HFSPLUS_SB_FORCE, &sbi->flags)) {
509 		/* nothing */
510 	} else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
511 		pr_warn("Filesystem is marked locked, mounting read-only.\n");
512 		sb->s_flags |= SB_RDONLY;
513 	} else if ((vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_JOURNALED)) &&
514 			!sb_rdonly(sb)) {
515 		pr_warn("write access to a journaled filesystem is not supported, use the force option at your own risk, mounting read-only.\n");
516 		sb->s_flags |= SB_RDONLY;
517 	}
518 
519 	err = -EINVAL;
520 
521 	/* Load metadata objects (B*Trees) */
522 	sbi->ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID);
523 	if (!sbi->ext_tree) {
524 		pr_err("failed to load extents file\n");
525 		goto out_free_vhdr;
526 	}
527 	sbi->cat_tree = hfs_btree_open(sb, HFSPLUS_CAT_CNID);
528 	if (!sbi->cat_tree) {
529 		pr_err("failed to load catalog file\n");
530 		goto out_close_ext_tree;
531 	}
532 	atomic_set(&sbi->attr_tree_state, HFSPLUS_EMPTY_ATTR_TREE);
533 	if (vhdr->attr_file.total_blocks != 0) {
534 		sbi->attr_tree = hfs_btree_open(sb, HFSPLUS_ATTR_CNID);
535 		if (!sbi->attr_tree) {
536 			pr_err("failed to load attributes file\n");
537 			goto out_close_cat_tree;
538 		}
539 		atomic_set(&sbi->attr_tree_state, HFSPLUS_VALID_ATTR_TREE);
540 	}
541 	sb->s_xattr = hfsplus_xattr_handlers;
542 
543 	inode = hfsplus_iget(sb, HFSPLUS_ALLOC_CNID);
544 	if (IS_ERR(inode)) {
545 		pr_err("failed to load allocation file\n");
546 		err = PTR_ERR(inode);
547 		goto out_close_attr_tree;
548 	}
549 	sbi->alloc_file = inode;
550 
551 	/* Load the root directory */
552 	root = hfsplus_iget(sb, HFSPLUS_ROOT_CNID);
553 	if (IS_ERR(root)) {
554 		pr_err("failed to load root directory\n");
555 		err = PTR_ERR(root);
556 		goto out_put_alloc_file;
557 	}
558 
559 	set_default_d_op(sb, &hfsplus_dentry_operations);
560 	sb->s_root = d_make_root(root);
561 	if (!sb->s_root) {
562 		err = -ENOMEM;
563 		goto out_put_alloc_file;
564 	}
565 
566 	str.len = sizeof(HFSP_HIDDENDIR_NAME) - 1;
567 	str.name = HFSP_HIDDENDIR_NAME;
568 	err = hfs_find_init(sbi->cat_tree, &fd);
569 	if (err)
570 		goto out_put_root;
571 	err = hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_ROOT_CNID, &str);
572 	if (unlikely(err < 0))
573 		goto out_put_root;
574 	if (!hfs_brec_read(&fd, &entry, sizeof(entry))) {
575 		hfs_find_exit(&fd);
576 		if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) {
577 			err = -EIO;
578 			goto out_put_root;
579 		}
580 		inode = hfsplus_iget(sb, be32_to_cpu(entry.folder.id));
581 		if (IS_ERR(inode)) {
582 			err = PTR_ERR(inode);
583 			goto out_put_root;
584 		}
585 		sbi->hidden_dir = inode;
586 	} else
587 		hfs_find_exit(&fd);
588 
589 	if (!sb_rdonly(sb)) {
590 		/*
591 		 * H+LX == hfsplusutils, H+Lx == this driver, H+lx is unused
592 		 * all three are registered with Apple for our use
593 		 */
594 		hfsplus_prepare_volume_header_for_commit(vhdr);
595 		hfsplus_sync_fs(sb, 1);
596 
597 		if (!sbi->hidden_dir) {
598 			mutex_lock(&sbi->vh_mutex);
599 			sbi->hidden_dir = hfsplus_new_inode(sb, root, S_IFDIR);
600 			if (!sbi->hidden_dir) {
601 				mutex_unlock(&sbi->vh_mutex);
602 				err = -ENOMEM;
603 				goto out_put_root;
604 			}
605 			err = hfsplus_create_cat(sbi->hidden_dir->i_ino, root,
606 						 &str, sbi->hidden_dir);
607 			if (err) {
608 				mutex_unlock(&sbi->vh_mutex);
609 				goto out_put_hidden_dir;
610 			}
611 
612 			err = hfsplus_init_security(sbi->hidden_dir,
613 							root, &str);
614 			if (err == -EOPNOTSUPP)
615 				err = 0; /* Operation is not supported. */
616 			else if (err) {
617 				/*
618 				 * Try to delete anyway without
619 				 * error analysis.
620 				 */
621 				hfsplus_delete_cat(sbi->hidden_dir->i_ino,
622 							root, &str);
623 				mutex_unlock(&sbi->vh_mutex);
624 				goto out_put_hidden_dir;
625 			}
626 
627 			mutex_unlock(&sbi->vh_mutex);
628 			hfsplus_mark_inode_dirty(sbi->hidden_dir,
629 						 HFSPLUS_I_CAT_DIRTY);
630 		}
631 	}
632 
633 	unload_nls(sbi->nls);
634 	sbi->nls = nls;
635 	return 0;
636 
637 out_put_hidden_dir:
638 	cancel_delayed_work_sync(&sbi->sync_work);
639 	iput(sbi->hidden_dir);
640 out_put_root:
641 	dput(sb->s_root);
642 	sb->s_root = NULL;
643 out_put_alloc_file:
644 	iput(sbi->alloc_file);
645 out_close_attr_tree:
646 	hfs_btree_close(sbi->attr_tree);
647 out_close_cat_tree:
648 	hfs_btree_close(sbi->cat_tree);
649 out_close_ext_tree:
650 	hfs_btree_close(sbi->ext_tree);
651 out_free_vhdr:
652 	kfree(sbi->s_vhdr_buf);
653 	kfree(sbi->s_backup_vhdr_buf);
654 out_unload_nls:
655 	unload_nls(nls);
656 	return err;
657 }
658 
659 MODULE_AUTHOR("Brad Boyer");
660 MODULE_DESCRIPTION("Extended Macintosh Filesystem");
661 MODULE_LICENSE("GPL");
662 
663 static struct kmem_cache *hfsplus_inode_cachep;
664 
665 static struct inode *hfsplus_alloc_inode(struct super_block *sb)
666 {
667 	struct hfsplus_inode_info *i;
668 
669 	i = alloc_inode_sb(sb, hfsplus_inode_cachep, GFP_KERNEL);
670 	return i ? &i->vfs_inode : NULL;
671 }
672 
673 static void hfsplus_free_inode(struct inode *inode)
674 {
675 	kmem_cache_free(hfsplus_inode_cachep, HFSPLUS_I(inode));
676 }
677 
678 #define HFSPLUS_INODE_SIZE	sizeof(struct hfsplus_inode_info)
679 
680 static int hfsplus_get_tree(struct fs_context *fc)
681 {
682 	return get_tree_bdev(fc, hfsplus_fill_super);
683 }
684 
685 static void hfsplus_free_fc(struct fs_context *fc)
686 {
687 	kfree(fc->s_fs_info);
688 }
689 
690 static const struct fs_context_operations hfsplus_context_ops = {
691 	.parse_param	= hfsplus_parse_param,
692 	.get_tree	= hfsplus_get_tree,
693 	.reconfigure	= hfsplus_reconfigure,
694 	.free		= hfsplus_free_fc,
695 };
696 
697 static int hfsplus_init_fs_context(struct fs_context *fc)
698 {
699 	struct hfsplus_sb_info *sbi;
700 
701 	sbi = kzalloc_obj(struct hfsplus_sb_info, GFP_KERNEL);
702 	if (!sbi)
703 		return -ENOMEM;
704 
705 	if (fc->purpose != FS_CONTEXT_FOR_RECONFIGURE)
706 		hfsplus_fill_defaults(sbi);
707 
708 	fc->s_fs_info = sbi;
709 	fc->ops = &hfsplus_context_ops;
710 
711 	return 0;
712 }
713 
714 static void hfsplus_kill_super(struct super_block *sb)
715 {
716 	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
717 
718 	kill_block_super(sb);
719 	call_rcu(&sbi->rcu, delayed_free);
720 }
721 
722 static struct file_system_type hfsplus_fs_type = {
723 	.owner		= THIS_MODULE,
724 	.name		= "hfsplus",
725 	.kill_sb	= hfsplus_kill_super,
726 	.fs_flags	= FS_REQUIRES_DEV,
727 	.init_fs_context = hfsplus_init_fs_context,
728 };
729 MODULE_ALIAS_FS("hfsplus");
730 
731 static void hfsplus_init_once(void *p)
732 {
733 	struct hfsplus_inode_info *i = p;
734 
735 	inode_init_once(&i->vfs_inode);
736 }
737 
738 static int __init init_hfsplus_fs(void)
739 {
740 	int err;
741 
742 	hfsplus_inode_cachep = kmem_cache_create("hfsplus_icache",
743 		HFSPLUS_INODE_SIZE, 0, SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT,
744 		hfsplus_init_once);
745 	if (!hfsplus_inode_cachep)
746 		return -ENOMEM;
747 	err = hfsplus_create_attr_tree_cache();
748 	if (err)
749 		goto destroy_inode_cache;
750 	err = register_filesystem(&hfsplus_fs_type);
751 	if (err)
752 		goto destroy_attr_tree_cache;
753 	return 0;
754 
755 destroy_attr_tree_cache:
756 	hfsplus_destroy_attr_tree_cache();
757 
758 destroy_inode_cache:
759 	kmem_cache_destroy(hfsplus_inode_cachep);
760 
761 	return err;
762 }
763 
764 static void __exit exit_hfsplus_fs(void)
765 {
766 	unregister_filesystem(&hfsplus_fs_type);
767 
768 	/*
769 	 * Make sure all delayed rcu free inodes are flushed before we
770 	 * destroy cache.
771 	 */
772 	rcu_barrier();
773 	hfsplus_destroy_attr_tree_cache();
774 	kmem_cache_destroy(hfsplus_inode_cachep);
775 }
776 
777 module_init(init_hfsplus_fs)
778 module_exit(exit_hfsplus_fs)
779