xref: /linux/fs/hfs/super.c (revision 9a379e77033f02c4a071891afdf0f0a01eff8ccb)
1 /*
2  *  linux/fs/hfs/super.c
3  *
4  * Copyright (C) 1995-1997  Paul H. Hargrove
5  * (C) 2003 Ardis Technologies <roman@ardistech.com>
6  * This file may be distributed under the terms of the GNU General Public License.
7  *
8  * This file contains hfs_read_super(), some of the super_ops and
9  * init_hfs_fs() and exit_hfs_fs().  The remaining super_ops are in
10  * inode.c since they deal with inodes.
11  *
12  * Based on the minix file system code, (C) 1991, 1992 by Linus Torvalds
13  */
14 
15 #include <linux/module.h>
16 #include <linux/blkdev.h>
17 #include <linux/backing-dev.h>
18 #include <linux/mount.h>
19 #include <linux/init.h>
20 #include <linux/nls.h>
21 #include <linux/parser.h>
22 #include <linux/seq_file.h>
23 #include <linux/slab.h>
24 #include <linux/vfs.h>
25 
26 #include "hfs_fs.h"
27 #include "btree.h"
28 
29 static struct kmem_cache *hfs_inode_cachep;
30 
31 MODULE_LICENSE("GPL");
32 
33 static int hfs_sync_fs(struct super_block *sb, int wait)
34 {
35 	hfs_mdb_commit(sb);
36 	return 0;
37 }
38 
39 /*
40  * hfs_put_super()
41  *
42  * This is the put_super() entry in the super_operations structure for
43  * HFS filesystems.  The purpose is to release the resources
44  * associated with the superblock sb.
45  */
46 static void hfs_put_super(struct super_block *sb)
47 {
48 	cancel_delayed_work_sync(&HFS_SB(sb)->mdb_work);
49 	hfs_mdb_close(sb);
50 	/* release the MDB's resources */
51 	hfs_mdb_put(sb);
52 }
53 
54 static void flush_mdb(struct work_struct *work)
55 {
56 	struct hfs_sb_info *sbi;
57 	struct super_block *sb;
58 
59 	sbi = container_of(work, struct hfs_sb_info, mdb_work.work);
60 	sb = sbi->sb;
61 
62 	spin_lock(&sbi->work_lock);
63 	sbi->work_queued = 0;
64 	spin_unlock(&sbi->work_lock);
65 
66 	hfs_mdb_commit(sb);
67 }
68 
69 void hfs_mark_mdb_dirty(struct super_block *sb)
70 {
71 	struct hfs_sb_info *sbi = HFS_SB(sb);
72 	unsigned long delay;
73 
74 	if (sb_rdonly(sb))
75 		return;
76 
77 	spin_lock(&sbi->work_lock);
78 	if (!sbi->work_queued) {
79 		delay = msecs_to_jiffies(dirty_writeback_interval * 10);
80 		queue_delayed_work(system_long_wq, &sbi->mdb_work, delay);
81 		sbi->work_queued = 1;
82 	}
83 	spin_unlock(&sbi->work_lock);
84 }
85 
86 /*
87  * hfs_statfs()
88  *
89  * This is the statfs() entry in the super_operations structure for
90  * HFS filesystems.  The purpose is to return various data about the
91  * filesystem.
92  *
93  * changed f_files/f_ffree to reflect the fs_ablock/free_ablocks.
94  */
95 static int hfs_statfs(struct dentry *dentry, struct kstatfs *buf)
96 {
97 	struct super_block *sb = dentry->d_sb;
98 	u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
99 
100 	buf->f_type = HFS_SUPER_MAGIC;
101 	buf->f_bsize = sb->s_blocksize;
102 	buf->f_blocks = (u32)HFS_SB(sb)->fs_ablocks * HFS_SB(sb)->fs_div;
103 	buf->f_bfree = (u32)HFS_SB(sb)->free_ablocks * HFS_SB(sb)->fs_div;
104 	buf->f_bavail = buf->f_bfree;
105 	buf->f_files = HFS_SB(sb)->fs_ablocks;
106 	buf->f_ffree = HFS_SB(sb)->free_ablocks;
107 	buf->f_fsid.val[0] = (u32)id;
108 	buf->f_fsid.val[1] = (u32)(id >> 32);
109 	buf->f_namelen = HFS_NAMELEN;
110 
111 	return 0;
112 }
113 
114 static int hfs_remount(struct super_block *sb, int *flags, char *data)
115 {
116 	sync_filesystem(sb);
117 	*flags |= SB_NODIRATIME;
118 	if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
119 		return 0;
120 	if (!(*flags & SB_RDONLY)) {
121 		if (!(HFS_SB(sb)->mdb->drAtrb & cpu_to_be16(HFS_SB_ATTRIB_UNMNT))) {
122 			pr_warn("filesystem was not cleanly unmounted, running fsck.hfs is recommended.  leaving read-only.\n");
123 			sb->s_flags |= SB_RDONLY;
124 			*flags |= SB_RDONLY;
125 		} else if (HFS_SB(sb)->mdb->drAtrb & cpu_to_be16(HFS_SB_ATTRIB_SLOCK)) {
126 			pr_warn("filesystem is marked locked, leaving read-only.\n");
127 			sb->s_flags |= SB_RDONLY;
128 			*flags |= SB_RDONLY;
129 		}
130 	}
131 	return 0;
132 }
133 
134 static int hfs_show_options(struct seq_file *seq, struct dentry *root)
135 {
136 	struct hfs_sb_info *sbi = HFS_SB(root->d_sb);
137 
138 	if (sbi->s_creator != cpu_to_be32(0x3f3f3f3f))
139 		seq_show_option_n(seq, "creator", (char *)&sbi->s_creator, 4);
140 	if (sbi->s_type != cpu_to_be32(0x3f3f3f3f))
141 		seq_show_option_n(seq, "type", (char *)&sbi->s_type, 4);
142 	seq_printf(seq, ",uid=%u,gid=%u",
143 			from_kuid_munged(&init_user_ns, sbi->s_uid),
144 			from_kgid_munged(&init_user_ns, sbi->s_gid));
145 	if (sbi->s_file_umask != 0133)
146 		seq_printf(seq, ",file_umask=%o", sbi->s_file_umask);
147 	if (sbi->s_dir_umask != 0022)
148 		seq_printf(seq, ",dir_umask=%o", sbi->s_dir_umask);
149 	if (sbi->part >= 0)
150 		seq_printf(seq, ",part=%u", sbi->part);
151 	if (sbi->session >= 0)
152 		seq_printf(seq, ",session=%u", sbi->session);
153 	if (sbi->nls_disk)
154 		seq_printf(seq, ",codepage=%s", sbi->nls_disk->charset);
155 	if (sbi->nls_io)
156 		seq_printf(seq, ",iocharset=%s", sbi->nls_io->charset);
157 	if (sbi->s_quiet)
158 		seq_printf(seq, ",quiet");
159 	return 0;
160 }
161 
162 static struct inode *hfs_alloc_inode(struct super_block *sb)
163 {
164 	struct hfs_inode_info *i;
165 
166 	i = kmem_cache_alloc(hfs_inode_cachep, GFP_KERNEL);
167 	return i ? &i->vfs_inode : NULL;
168 }
169 
170 static void hfs_i_callback(struct rcu_head *head)
171 {
172 	struct inode *inode = container_of(head, struct inode, i_rcu);
173 	kmem_cache_free(hfs_inode_cachep, HFS_I(inode));
174 }
175 
176 static void hfs_destroy_inode(struct inode *inode)
177 {
178 	call_rcu(&inode->i_rcu, hfs_i_callback);
179 }
180 
181 static const struct super_operations hfs_super_operations = {
182 	.alloc_inode	= hfs_alloc_inode,
183 	.destroy_inode	= hfs_destroy_inode,
184 	.write_inode	= hfs_write_inode,
185 	.evict_inode	= hfs_evict_inode,
186 	.put_super	= hfs_put_super,
187 	.sync_fs	= hfs_sync_fs,
188 	.statfs		= hfs_statfs,
189 	.remount_fs     = hfs_remount,
190 	.show_options	= hfs_show_options,
191 };
192 
193 enum {
194 	opt_uid, opt_gid, opt_umask, opt_file_umask, opt_dir_umask,
195 	opt_part, opt_session, opt_type, opt_creator, opt_quiet,
196 	opt_codepage, opt_iocharset,
197 	opt_err
198 };
199 
200 static const match_table_t tokens = {
201 	{ opt_uid, "uid=%u" },
202 	{ opt_gid, "gid=%u" },
203 	{ opt_umask, "umask=%o" },
204 	{ opt_file_umask, "file_umask=%o" },
205 	{ opt_dir_umask, "dir_umask=%o" },
206 	{ opt_part, "part=%u" },
207 	{ opt_session, "session=%u" },
208 	{ opt_type, "type=%s" },
209 	{ opt_creator, "creator=%s" },
210 	{ opt_quiet, "quiet" },
211 	{ opt_codepage, "codepage=%s" },
212 	{ opt_iocharset, "iocharset=%s" },
213 	{ opt_err, NULL }
214 };
215 
216 static inline int match_fourchar(substring_t *arg, u32 *result)
217 {
218 	if (arg->to - arg->from != 4)
219 		return -EINVAL;
220 	memcpy(result, arg->from, 4);
221 	return 0;
222 }
223 
224 /*
225  * parse_options()
226  *
227  * adapted from linux/fs/msdos/inode.c written 1992,93 by Werner Almesberger
228  * This function is called by hfs_read_super() to parse the mount options.
229  */
230 static int parse_options(char *options, struct hfs_sb_info *hsb)
231 {
232 	char *p;
233 	substring_t args[MAX_OPT_ARGS];
234 	int tmp, token;
235 
236 	/* initialize the sb with defaults */
237 	hsb->s_uid = current_uid();
238 	hsb->s_gid = current_gid();
239 	hsb->s_file_umask = 0133;
240 	hsb->s_dir_umask = 0022;
241 	hsb->s_type = hsb->s_creator = cpu_to_be32(0x3f3f3f3f);	/* == '????' */
242 	hsb->s_quiet = 0;
243 	hsb->part = -1;
244 	hsb->session = -1;
245 
246 	if (!options)
247 		return 1;
248 
249 	while ((p = strsep(&options, ",")) != NULL) {
250 		if (!*p)
251 			continue;
252 
253 		token = match_token(p, tokens, args);
254 		switch (token) {
255 		case opt_uid:
256 			if (match_int(&args[0], &tmp)) {
257 				pr_err("uid requires an argument\n");
258 				return 0;
259 			}
260 			hsb->s_uid = make_kuid(current_user_ns(), (uid_t)tmp);
261 			if (!uid_valid(hsb->s_uid)) {
262 				pr_err("invalid uid %d\n", tmp);
263 				return 0;
264 			}
265 			break;
266 		case opt_gid:
267 			if (match_int(&args[0], &tmp)) {
268 				pr_err("gid requires an argument\n");
269 				return 0;
270 			}
271 			hsb->s_gid = make_kgid(current_user_ns(), (gid_t)tmp);
272 			if (!gid_valid(hsb->s_gid)) {
273 				pr_err("invalid gid %d\n", tmp);
274 				return 0;
275 			}
276 			break;
277 		case opt_umask:
278 			if (match_octal(&args[0], &tmp)) {
279 				pr_err("umask requires a value\n");
280 				return 0;
281 			}
282 			hsb->s_file_umask = (umode_t)tmp;
283 			hsb->s_dir_umask = (umode_t)tmp;
284 			break;
285 		case opt_file_umask:
286 			if (match_octal(&args[0], &tmp)) {
287 				pr_err("file_umask requires a value\n");
288 				return 0;
289 			}
290 			hsb->s_file_umask = (umode_t)tmp;
291 			break;
292 		case opt_dir_umask:
293 			if (match_octal(&args[0], &tmp)) {
294 				pr_err("dir_umask requires a value\n");
295 				return 0;
296 			}
297 			hsb->s_dir_umask = (umode_t)tmp;
298 			break;
299 		case opt_part:
300 			if (match_int(&args[0], &hsb->part)) {
301 				pr_err("part requires an argument\n");
302 				return 0;
303 			}
304 			break;
305 		case opt_session:
306 			if (match_int(&args[0], &hsb->session)) {
307 				pr_err("session requires an argument\n");
308 				return 0;
309 			}
310 			break;
311 		case opt_type:
312 			if (match_fourchar(&args[0], &hsb->s_type)) {
313 				pr_err("type requires a 4 character value\n");
314 				return 0;
315 			}
316 			break;
317 		case opt_creator:
318 			if (match_fourchar(&args[0], &hsb->s_creator)) {
319 				pr_err("creator requires a 4 character value\n");
320 				return 0;
321 			}
322 			break;
323 		case opt_quiet:
324 			hsb->s_quiet = 1;
325 			break;
326 		case opt_codepage:
327 			if (hsb->nls_disk) {
328 				pr_err("unable to change codepage\n");
329 				return 0;
330 			}
331 			p = match_strdup(&args[0]);
332 			if (p)
333 				hsb->nls_disk = load_nls(p);
334 			if (!hsb->nls_disk) {
335 				pr_err("unable to load codepage \"%s\"\n", p);
336 				kfree(p);
337 				return 0;
338 			}
339 			kfree(p);
340 			break;
341 		case opt_iocharset:
342 			if (hsb->nls_io) {
343 				pr_err("unable to change iocharset\n");
344 				return 0;
345 			}
346 			p = match_strdup(&args[0]);
347 			if (p)
348 				hsb->nls_io = load_nls(p);
349 			if (!hsb->nls_io) {
350 				pr_err("unable to load iocharset \"%s\"\n", p);
351 				kfree(p);
352 				return 0;
353 			}
354 			kfree(p);
355 			break;
356 		default:
357 			return 0;
358 		}
359 	}
360 
361 	if (hsb->nls_disk && !hsb->nls_io) {
362 		hsb->nls_io = load_nls_default();
363 		if (!hsb->nls_io) {
364 			pr_err("unable to load default iocharset\n");
365 			return 0;
366 		}
367 	}
368 	hsb->s_dir_umask &= 0777;
369 	hsb->s_file_umask &= 0577;
370 
371 	return 1;
372 }
373 
374 /*
375  * hfs_read_super()
376  *
377  * This is the function that is responsible for mounting an HFS
378  * filesystem.	It performs all the tasks necessary to get enough data
379  * from the disk to read the root inode.  This includes parsing the
380  * mount options, dealing with Macintosh partitions, reading the
381  * superblock and the allocation bitmap blocks, calling
382  * hfs_btree_init() to get the necessary data about the extents and
383  * catalog B-trees and, finally, reading the root inode into memory.
384  */
385 static int hfs_fill_super(struct super_block *sb, void *data, int silent)
386 {
387 	struct hfs_sb_info *sbi;
388 	struct hfs_find_data fd;
389 	hfs_cat_rec rec;
390 	struct inode *root_inode;
391 	int res;
392 
393 	sbi = kzalloc(sizeof(struct hfs_sb_info), GFP_KERNEL);
394 	if (!sbi)
395 		return -ENOMEM;
396 
397 	sbi->sb = sb;
398 	sb->s_fs_info = sbi;
399 	spin_lock_init(&sbi->work_lock);
400 	INIT_DELAYED_WORK(&sbi->mdb_work, flush_mdb);
401 
402 	res = -EINVAL;
403 	if (!parse_options((char *)data, sbi)) {
404 		pr_err("unable to parse mount options\n");
405 		goto bail;
406 	}
407 
408 	sb->s_op = &hfs_super_operations;
409 	sb->s_xattr = hfs_xattr_handlers;
410 	sb->s_flags |= SB_NODIRATIME;
411 	mutex_init(&sbi->bitmap_lock);
412 
413 	res = hfs_mdb_get(sb);
414 	if (res) {
415 		if (!silent)
416 			pr_warn("can't find a HFS filesystem on dev %s\n",
417 				hfs_mdb_name(sb));
418 		res = -EINVAL;
419 		goto bail;
420 	}
421 
422 	/* try to get the root inode */
423 	res = hfs_find_init(HFS_SB(sb)->cat_tree, &fd);
424 	if (res)
425 		goto bail_no_root;
426 	res = hfs_cat_find_brec(sb, HFS_ROOT_CNID, &fd);
427 	if (!res) {
428 		if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) {
429 			res =  -EIO;
430 			goto bail;
431 		}
432 		hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength);
433 	}
434 	if (res) {
435 		hfs_find_exit(&fd);
436 		goto bail_no_root;
437 	}
438 	res = -EINVAL;
439 	root_inode = hfs_iget(sb, &fd.search_key->cat, &rec);
440 	hfs_find_exit(&fd);
441 	if (!root_inode)
442 		goto bail_no_root;
443 
444 	sb->s_d_op = &hfs_dentry_operations;
445 	res = -ENOMEM;
446 	sb->s_root = d_make_root(root_inode);
447 	if (!sb->s_root)
448 		goto bail_no_root;
449 
450 	/* everything's okay */
451 	return 0;
452 
453 bail_no_root:
454 	pr_err("get root inode failed\n");
455 bail:
456 	hfs_mdb_put(sb);
457 	return res;
458 }
459 
460 static struct dentry *hfs_mount(struct file_system_type *fs_type,
461 		      int flags, const char *dev_name, void *data)
462 {
463 	return mount_bdev(fs_type, flags, dev_name, data, hfs_fill_super);
464 }
465 
466 static struct file_system_type hfs_fs_type = {
467 	.owner		= THIS_MODULE,
468 	.name		= "hfs",
469 	.mount		= hfs_mount,
470 	.kill_sb	= kill_block_super,
471 	.fs_flags	= FS_REQUIRES_DEV,
472 };
473 MODULE_ALIAS_FS("hfs");
474 
475 static void hfs_init_once(void *p)
476 {
477 	struct hfs_inode_info *i = p;
478 
479 	inode_init_once(&i->vfs_inode);
480 }
481 
482 static int __init init_hfs_fs(void)
483 {
484 	int err;
485 
486 	hfs_inode_cachep = kmem_cache_create("hfs_inode_cache",
487 		sizeof(struct hfs_inode_info), 0,
488 		SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, hfs_init_once);
489 	if (!hfs_inode_cachep)
490 		return -ENOMEM;
491 	err = register_filesystem(&hfs_fs_type);
492 	if (err)
493 		kmem_cache_destroy(hfs_inode_cachep);
494 	return err;
495 }
496 
497 static void __exit exit_hfs_fs(void)
498 {
499 	unregister_filesystem(&hfs_fs_type);
500 
501 	/*
502 	 * Make sure all delayed rcu free inodes are flushed before we
503 	 * destroy cache.
504 	 */
505 	rcu_barrier();
506 	kmem_cache_destroy(hfs_inode_cachep);
507 }
508 
509 module_init(init_hfs_fs)
510 module_exit(exit_hfs_fs)
511