xref: /linux/fs/xfs/xfs_super.c (revision f783529bee39c3fa1451728007eb4890a94f2638)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 
7 #include "xfs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sb.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_btree.h"
16 #include "xfs_bmap.h"
17 #include "xfs_alloc.h"
18 #include "xfs_fsops.h"
19 #include "xfs_trans.h"
20 #include "xfs_buf_item.h"
21 #include "xfs_log.h"
22 #include "xfs_log_priv.h"
23 #include "xfs_dir2.h"
24 #include "xfs_extfree_item.h"
25 #include "xfs_mru_cache.h"
26 #include "xfs_inode_item.h"
27 #include "xfs_icache.h"
28 #include "xfs_trace.h"
29 #include "xfs_icreate_item.h"
30 #include "xfs_filestream.h"
31 #include "xfs_quota.h"
32 #include "xfs_sysfs.h"
33 #include "xfs_ondisk.h"
34 #include "xfs_rmap_item.h"
35 #include "xfs_refcount_item.h"
36 #include "xfs_bmap_item.h"
37 #include "xfs_reflink.h"
38 #include "xfs_pwork.h"
39 #include "xfs_ag.h"
40 #include "xfs_defer.h"
41 #include "xfs_attr_item.h"
42 #include "xfs_xattr.h"
43 #include "xfs_iunlink_item.h"
44 #include "xfs_dahash_test.h"
45 #include "xfs_rtbitmap.h"
46 #include "xfs_exchmaps_item.h"
47 #include "scrub/stats.h"
48 #include "scrub/rcbag_btree.h"
49 
50 #include <linux/magic.h>
51 #include <linux/fs_context.h>
52 #include <linux/fs_parser.h>
53 
54 static const struct super_operations xfs_super_operations;
55 
56 static struct dentry *xfs_debugfs;	/* top-level xfs debugfs dir */
57 static struct kset *xfs_kset;		/* top-level xfs sysfs dir */
58 #ifdef DEBUG
59 static struct xfs_kobj xfs_dbg_kobj;	/* global debug sysfs attrs */
60 #endif
61 
62 enum xfs_dax_mode {
63 	XFS_DAX_INODE = 0,
64 	XFS_DAX_ALWAYS = 1,
65 	XFS_DAX_NEVER = 2,
66 };
67 
68 static void
69 xfs_mount_set_dax_mode(
70 	struct xfs_mount	*mp,
71 	enum xfs_dax_mode	mode)
72 {
73 	switch (mode) {
74 	case XFS_DAX_INODE:
75 		mp->m_features &= ~(XFS_FEAT_DAX_ALWAYS | XFS_FEAT_DAX_NEVER);
76 		break;
77 	case XFS_DAX_ALWAYS:
78 		mp->m_features |= XFS_FEAT_DAX_ALWAYS;
79 		mp->m_features &= ~XFS_FEAT_DAX_NEVER;
80 		break;
81 	case XFS_DAX_NEVER:
82 		mp->m_features |= XFS_FEAT_DAX_NEVER;
83 		mp->m_features &= ~XFS_FEAT_DAX_ALWAYS;
84 		break;
85 	}
86 }
87 
88 static const struct constant_table dax_param_enums[] = {
89 	{"inode",	XFS_DAX_INODE },
90 	{"always",	XFS_DAX_ALWAYS },
91 	{"never",	XFS_DAX_NEVER },
92 	{}
93 };
94 
95 /*
96  * Table driven mount option parser.
97  */
98 enum {
99 	Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
100 	Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
101 	Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
102 	Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
103 	Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
104 	Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
105 	Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
106 	Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
107 	Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum,
108 };
109 
110 static const struct fs_parameter_spec xfs_fs_parameters[] = {
111 	fsparam_u32("logbufs",		Opt_logbufs),
112 	fsparam_string("logbsize",	Opt_logbsize),
113 	fsparam_string("logdev",	Opt_logdev),
114 	fsparam_string("rtdev",		Opt_rtdev),
115 	fsparam_flag("wsync",		Opt_wsync),
116 	fsparam_flag("noalign",		Opt_noalign),
117 	fsparam_flag("swalloc",		Opt_swalloc),
118 	fsparam_u32("sunit",		Opt_sunit),
119 	fsparam_u32("swidth",		Opt_swidth),
120 	fsparam_flag("nouuid",		Opt_nouuid),
121 	fsparam_flag("grpid",		Opt_grpid),
122 	fsparam_flag("nogrpid",		Opt_nogrpid),
123 	fsparam_flag("bsdgroups",	Opt_bsdgroups),
124 	fsparam_flag("sysvgroups",	Opt_sysvgroups),
125 	fsparam_string("allocsize",	Opt_allocsize),
126 	fsparam_flag("norecovery",	Opt_norecovery),
127 	fsparam_flag("inode64",		Opt_inode64),
128 	fsparam_flag("inode32",		Opt_inode32),
129 	fsparam_flag("ikeep",		Opt_ikeep),
130 	fsparam_flag("noikeep",		Opt_noikeep),
131 	fsparam_flag("largeio",		Opt_largeio),
132 	fsparam_flag("nolargeio",	Opt_nolargeio),
133 	fsparam_flag("attr2",		Opt_attr2),
134 	fsparam_flag("noattr2",		Opt_noattr2),
135 	fsparam_flag("filestreams",	Opt_filestreams),
136 	fsparam_flag("quota",		Opt_quota),
137 	fsparam_flag("noquota",		Opt_noquota),
138 	fsparam_flag("usrquota",	Opt_usrquota),
139 	fsparam_flag("grpquota",	Opt_grpquota),
140 	fsparam_flag("prjquota",	Opt_prjquota),
141 	fsparam_flag("uquota",		Opt_uquota),
142 	fsparam_flag("gquota",		Opt_gquota),
143 	fsparam_flag("pquota",		Opt_pquota),
144 	fsparam_flag("uqnoenforce",	Opt_uqnoenforce),
145 	fsparam_flag("gqnoenforce",	Opt_gqnoenforce),
146 	fsparam_flag("pqnoenforce",	Opt_pqnoenforce),
147 	fsparam_flag("qnoenforce",	Opt_qnoenforce),
148 	fsparam_flag("discard",		Opt_discard),
149 	fsparam_flag("nodiscard",	Opt_nodiscard),
150 	fsparam_flag("dax",		Opt_dax),
151 	fsparam_enum("dax",		Opt_dax_enum, dax_param_enums),
152 	{}
153 };
154 
155 struct proc_xfs_info {
156 	uint64_t	flag;
157 	char		*str;
158 };
159 
160 static int
161 xfs_fs_show_options(
162 	struct seq_file		*m,
163 	struct dentry		*root)
164 {
165 	static struct proc_xfs_info xfs_info_set[] = {
166 		/* the few simple ones we can get from the mount struct */
167 		{ XFS_FEAT_IKEEP,		",ikeep" },
168 		{ XFS_FEAT_WSYNC,		",wsync" },
169 		{ XFS_FEAT_NOALIGN,		",noalign" },
170 		{ XFS_FEAT_SWALLOC,		",swalloc" },
171 		{ XFS_FEAT_NOUUID,		",nouuid" },
172 		{ XFS_FEAT_NORECOVERY,		",norecovery" },
173 		{ XFS_FEAT_ATTR2,		",attr2" },
174 		{ XFS_FEAT_FILESTREAMS,		",filestreams" },
175 		{ XFS_FEAT_GRPID,		",grpid" },
176 		{ XFS_FEAT_DISCARD,		",discard" },
177 		{ XFS_FEAT_LARGE_IOSIZE,	",largeio" },
178 		{ XFS_FEAT_DAX_ALWAYS,		",dax=always" },
179 		{ XFS_FEAT_DAX_NEVER,		",dax=never" },
180 		{ 0, NULL }
181 	};
182 	struct xfs_mount	*mp = XFS_M(root->d_sb);
183 	struct proc_xfs_info	*xfs_infop;
184 
185 	for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
186 		if (mp->m_features & xfs_infop->flag)
187 			seq_puts(m, xfs_infop->str);
188 	}
189 
190 	seq_printf(m, ",inode%d", xfs_has_small_inums(mp) ? 32 : 64);
191 
192 	if (xfs_has_allocsize(mp))
193 		seq_printf(m, ",allocsize=%dk",
194 			   (1 << mp->m_allocsize_log) >> 10);
195 
196 	if (mp->m_logbufs > 0)
197 		seq_printf(m, ",logbufs=%d", mp->m_logbufs);
198 	if (mp->m_logbsize > 0)
199 		seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
200 
201 	if (mp->m_logname)
202 		seq_show_option(m, "logdev", mp->m_logname);
203 	if (mp->m_rtname)
204 		seq_show_option(m, "rtdev", mp->m_rtname);
205 
206 	if (mp->m_dalign > 0)
207 		seq_printf(m, ",sunit=%d",
208 				(int)XFS_FSB_TO_BB(mp, mp->m_dalign));
209 	if (mp->m_swidth > 0)
210 		seq_printf(m, ",swidth=%d",
211 				(int)XFS_FSB_TO_BB(mp, mp->m_swidth));
212 
213 	if (mp->m_qflags & XFS_UQUOTA_ENFD)
214 		seq_puts(m, ",usrquota");
215 	else if (mp->m_qflags & XFS_UQUOTA_ACCT)
216 		seq_puts(m, ",uqnoenforce");
217 
218 	if (mp->m_qflags & XFS_PQUOTA_ENFD)
219 		seq_puts(m, ",prjquota");
220 	else if (mp->m_qflags & XFS_PQUOTA_ACCT)
221 		seq_puts(m, ",pqnoenforce");
222 
223 	if (mp->m_qflags & XFS_GQUOTA_ENFD)
224 		seq_puts(m, ",grpquota");
225 	else if (mp->m_qflags & XFS_GQUOTA_ACCT)
226 		seq_puts(m, ",gqnoenforce");
227 
228 	if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
229 		seq_puts(m, ",noquota");
230 
231 	return 0;
232 }
233 
234 static bool
235 xfs_set_inode_alloc_perag(
236 	struct xfs_perag	*pag,
237 	xfs_ino_t		ino,
238 	xfs_agnumber_t		max_metadata)
239 {
240 	if (!xfs_is_inode32(pag->pag_mount)) {
241 		set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
242 		clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
243 		return false;
244 	}
245 
246 	if (ino > XFS_MAXINUMBER_32) {
247 		clear_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
248 		clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
249 		return false;
250 	}
251 
252 	set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
253 	if (pag->pag_agno < max_metadata)
254 		set_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
255 	else
256 		clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
257 	return true;
258 }
259 
260 /*
261  * Set parameters for inode allocation heuristics, taking into account
262  * filesystem size and inode32/inode64 mount options; i.e. specifically
263  * whether or not XFS_FEAT_SMALL_INUMS is set.
264  *
265  * Inode allocation patterns are altered only if inode32 is requested
266  * (XFS_FEAT_SMALL_INUMS), and the filesystem is sufficiently large.
267  * If altered, XFS_OPSTATE_INODE32 is set as well.
268  *
269  * An agcount independent of that in the mount structure is provided
270  * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
271  * to the potentially higher ag count.
272  *
273  * Returns the maximum AG index which may contain inodes.
274  */
275 xfs_agnumber_t
276 xfs_set_inode_alloc(
277 	struct xfs_mount *mp,
278 	xfs_agnumber_t	agcount)
279 {
280 	xfs_agnumber_t	index;
281 	xfs_agnumber_t	maxagi = 0;
282 	xfs_sb_t	*sbp = &mp->m_sb;
283 	xfs_agnumber_t	max_metadata;
284 	xfs_agino_t	agino;
285 	xfs_ino_t	ino;
286 
287 	/*
288 	 * Calculate how much should be reserved for inodes to meet
289 	 * the max inode percentage.  Used only for inode32.
290 	 */
291 	if (M_IGEO(mp)->maxicount) {
292 		uint64_t	icount;
293 
294 		icount = sbp->sb_dblocks * sbp->sb_imax_pct;
295 		do_div(icount, 100);
296 		icount += sbp->sb_agblocks - 1;
297 		do_div(icount, sbp->sb_agblocks);
298 		max_metadata = icount;
299 	} else {
300 		max_metadata = agcount;
301 	}
302 
303 	/* Get the last possible inode in the filesystem */
304 	agino =	XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
305 	ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
306 
307 	/*
308 	 * If user asked for no more than 32-bit inodes, and the fs is
309 	 * sufficiently large, set XFS_OPSTATE_INODE32 if we must alter
310 	 * the allocator to accommodate the request.
311 	 */
312 	if (xfs_has_small_inums(mp) && ino > XFS_MAXINUMBER_32)
313 		set_bit(XFS_OPSTATE_INODE32, &mp->m_opstate);
314 	else
315 		clear_bit(XFS_OPSTATE_INODE32, &mp->m_opstate);
316 
317 	for (index = 0; index < agcount; index++) {
318 		struct xfs_perag	*pag;
319 
320 		ino = XFS_AGINO_TO_INO(mp, index, agino);
321 
322 		pag = xfs_perag_get(mp, index);
323 		if (xfs_set_inode_alloc_perag(pag, ino, max_metadata))
324 			maxagi++;
325 		xfs_perag_put(pag);
326 	}
327 
328 	return xfs_is_inode32(mp) ? maxagi : agcount;
329 }
330 
331 static int
332 xfs_setup_dax_always(
333 	struct xfs_mount	*mp)
334 {
335 	if (!mp->m_ddev_targp->bt_daxdev &&
336 	    (!mp->m_rtdev_targp || !mp->m_rtdev_targp->bt_daxdev)) {
337 		xfs_alert(mp,
338 			"DAX unsupported by block device. Turning off DAX.");
339 		goto disable_dax;
340 	}
341 
342 	if (mp->m_super->s_blocksize != PAGE_SIZE) {
343 		xfs_alert(mp,
344 			"DAX not supported for blocksize. Turning off DAX.");
345 		goto disable_dax;
346 	}
347 
348 	if (xfs_has_reflink(mp) &&
349 	    bdev_is_partition(mp->m_ddev_targp->bt_bdev)) {
350 		xfs_alert(mp,
351 			"DAX and reflink cannot work with multi-partitions!");
352 		return -EINVAL;
353 	}
354 
355 	return 0;
356 
357 disable_dax:
358 	xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
359 	return 0;
360 }
361 
362 STATIC int
363 xfs_blkdev_get(
364 	xfs_mount_t		*mp,
365 	const char		*name,
366 	struct file		**bdev_filep)
367 {
368 	int			error = 0;
369 
370 	*bdev_filep = bdev_file_open_by_path(name,
371 		BLK_OPEN_READ | BLK_OPEN_WRITE | BLK_OPEN_RESTRICT_WRITES,
372 		mp->m_super, &fs_holder_ops);
373 	if (IS_ERR(*bdev_filep)) {
374 		error = PTR_ERR(*bdev_filep);
375 		*bdev_filep = NULL;
376 		xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
377 	}
378 
379 	return error;
380 }
381 
382 STATIC void
383 xfs_shutdown_devices(
384 	struct xfs_mount	*mp)
385 {
386 	/*
387 	 * Udev is triggered whenever anyone closes a block device or unmounts
388 	 * a file systemm on a block device.
389 	 * The default udev rules invoke blkid to read the fs super and create
390 	 * symlinks to the bdev under /dev/disk.  For this, it uses buffered
391 	 * reads through the page cache.
392 	 *
393 	 * xfs_db also uses buffered reads to examine metadata.  There is no
394 	 * coordination between xfs_db and udev, which means that they can run
395 	 * concurrently.  Note there is no coordination between the kernel and
396 	 * blkid either.
397 	 *
398 	 * On a system with 64k pages, the page cache can cache the superblock
399 	 * and the root inode (and hence the root directory) with the same 64k
400 	 * page.  If udev spawns blkid after the mkfs and the system is busy
401 	 * enough that it is still running when xfs_db starts up, they'll both
402 	 * read from the same page in the pagecache.
403 	 *
404 	 * The unmount writes updated inode metadata to disk directly.  The XFS
405 	 * buffer cache does not use the bdev pagecache, so it needs to
406 	 * invalidate that pagecache on unmount.  If the above scenario occurs,
407 	 * the pagecache no longer reflects what's on disk, xfs_db reads the
408 	 * stale metadata, and fails to find /a.  Most of the time this succeeds
409 	 * because closing a bdev invalidates the page cache, but when processes
410 	 * race, everyone loses.
411 	 */
412 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
413 		blkdev_issue_flush(mp->m_logdev_targp->bt_bdev);
414 		invalidate_bdev(mp->m_logdev_targp->bt_bdev);
415 	}
416 	if (mp->m_rtdev_targp) {
417 		blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev);
418 		invalidate_bdev(mp->m_rtdev_targp->bt_bdev);
419 	}
420 	blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
421 	invalidate_bdev(mp->m_ddev_targp->bt_bdev);
422 }
423 
424 /*
425  * The file system configurations are:
426  *	(1) device (partition) with data and internal log
427  *	(2) logical volume with data and log subvolumes.
428  *	(3) logical volume with data, log, and realtime subvolumes.
429  *
430  * We only have to handle opening the log and realtime volumes here if
431  * they are present.  The data subvolume has already been opened by
432  * get_sb_bdev() and is stored in sb->s_bdev.
433  */
434 STATIC int
435 xfs_open_devices(
436 	struct xfs_mount	*mp)
437 {
438 	struct super_block	*sb = mp->m_super;
439 	struct block_device	*ddev = sb->s_bdev;
440 	struct file		*logdev_file = NULL, *rtdev_file = NULL;
441 	int			error;
442 
443 	/*
444 	 * Open real time and log devices - order is important.
445 	 */
446 	if (mp->m_logname) {
447 		error = xfs_blkdev_get(mp, mp->m_logname, &logdev_file);
448 		if (error)
449 			return error;
450 	}
451 
452 	if (mp->m_rtname) {
453 		error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev_file);
454 		if (error)
455 			goto out_close_logdev;
456 
457 		if (file_bdev(rtdev_file) == ddev ||
458 		    (logdev_file &&
459 		     file_bdev(rtdev_file) == file_bdev(logdev_file))) {
460 			xfs_warn(mp,
461 	"Cannot mount filesystem with identical rtdev and ddev/logdev.");
462 			error = -EINVAL;
463 			goto out_close_rtdev;
464 		}
465 	}
466 
467 	/*
468 	 * Setup xfs_mount buffer target pointers
469 	 */
470 	error = -ENOMEM;
471 	mp->m_ddev_targp = xfs_alloc_buftarg(mp, sb->s_bdev_file);
472 	if (!mp->m_ddev_targp)
473 		goto out_close_rtdev;
474 
475 	if (rtdev_file) {
476 		mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev_file);
477 		if (!mp->m_rtdev_targp)
478 			goto out_free_ddev_targ;
479 	}
480 
481 	if (logdev_file && file_bdev(logdev_file) != ddev) {
482 		mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev_file);
483 		if (!mp->m_logdev_targp)
484 			goto out_free_rtdev_targ;
485 	} else {
486 		mp->m_logdev_targp = mp->m_ddev_targp;
487 		/* Handle won't be used, drop it */
488 		if (logdev_file)
489 			bdev_fput(logdev_file);
490 	}
491 
492 	return 0;
493 
494  out_free_rtdev_targ:
495 	if (mp->m_rtdev_targp)
496 		xfs_free_buftarg(mp->m_rtdev_targp);
497  out_free_ddev_targ:
498 	xfs_free_buftarg(mp->m_ddev_targp);
499  out_close_rtdev:
500 	 if (rtdev_file)
501 		bdev_fput(rtdev_file);
502  out_close_logdev:
503 	if (logdev_file)
504 		bdev_fput(logdev_file);
505 	return error;
506 }
507 
508 /*
509  * Setup xfs_mount buffer target pointers based on superblock
510  */
511 STATIC int
512 xfs_setup_devices(
513 	struct xfs_mount	*mp)
514 {
515 	int			error;
516 
517 	error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
518 	if (error)
519 		return error;
520 
521 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
522 		unsigned int	log_sector_size = BBSIZE;
523 
524 		if (xfs_has_sector(mp))
525 			log_sector_size = mp->m_sb.sb_logsectsize;
526 		error = xfs_setsize_buftarg(mp->m_logdev_targp,
527 					    log_sector_size);
528 		if (error)
529 			return error;
530 	}
531 	if (mp->m_rtdev_targp) {
532 		error = xfs_setsize_buftarg(mp->m_rtdev_targp,
533 					    mp->m_sb.sb_sectsize);
534 		if (error)
535 			return error;
536 	}
537 
538 	return 0;
539 }
540 
541 STATIC int
542 xfs_init_mount_workqueues(
543 	struct xfs_mount	*mp)
544 {
545 	mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
546 			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
547 			1, mp->m_super->s_id);
548 	if (!mp->m_buf_workqueue)
549 		goto out;
550 
551 	mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
552 			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
553 			0, mp->m_super->s_id);
554 	if (!mp->m_unwritten_workqueue)
555 		goto out_destroy_buf;
556 
557 	mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
558 			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
559 			0, mp->m_super->s_id);
560 	if (!mp->m_reclaim_workqueue)
561 		goto out_destroy_unwritten;
562 
563 	mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s",
564 			XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM),
565 			0, mp->m_super->s_id);
566 	if (!mp->m_blockgc_wq)
567 		goto out_destroy_reclaim;
568 
569 	mp->m_inodegc_wq = alloc_workqueue("xfs-inodegc/%s",
570 			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
571 			1, mp->m_super->s_id);
572 	if (!mp->m_inodegc_wq)
573 		goto out_destroy_blockgc;
574 
575 	mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s",
576 			XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id);
577 	if (!mp->m_sync_workqueue)
578 		goto out_destroy_inodegc;
579 
580 	return 0;
581 
582 out_destroy_inodegc:
583 	destroy_workqueue(mp->m_inodegc_wq);
584 out_destroy_blockgc:
585 	destroy_workqueue(mp->m_blockgc_wq);
586 out_destroy_reclaim:
587 	destroy_workqueue(mp->m_reclaim_workqueue);
588 out_destroy_unwritten:
589 	destroy_workqueue(mp->m_unwritten_workqueue);
590 out_destroy_buf:
591 	destroy_workqueue(mp->m_buf_workqueue);
592 out:
593 	return -ENOMEM;
594 }
595 
596 STATIC void
597 xfs_destroy_mount_workqueues(
598 	struct xfs_mount	*mp)
599 {
600 	destroy_workqueue(mp->m_sync_workqueue);
601 	destroy_workqueue(mp->m_blockgc_wq);
602 	destroy_workqueue(mp->m_inodegc_wq);
603 	destroy_workqueue(mp->m_reclaim_workqueue);
604 	destroy_workqueue(mp->m_unwritten_workqueue);
605 	destroy_workqueue(mp->m_buf_workqueue);
606 }
607 
608 static void
609 xfs_flush_inodes_worker(
610 	struct work_struct	*work)
611 {
612 	struct xfs_mount	*mp = container_of(work, struct xfs_mount,
613 						   m_flush_inodes_work);
614 	struct super_block	*sb = mp->m_super;
615 
616 	if (down_read_trylock(&sb->s_umount)) {
617 		sync_inodes_sb(sb);
618 		up_read(&sb->s_umount);
619 	}
620 }
621 
622 /*
623  * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
624  * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
625  * for IO to complete so that we effectively throttle multiple callers to the
626  * rate at which IO is completing.
627  */
628 void
629 xfs_flush_inodes(
630 	struct xfs_mount	*mp)
631 {
632 	/*
633 	 * If flush_work() returns true then that means we waited for a flush
634 	 * which was already in progress.  Don't bother running another scan.
635 	 */
636 	if (flush_work(&mp->m_flush_inodes_work))
637 		return;
638 
639 	queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work);
640 	flush_work(&mp->m_flush_inodes_work);
641 }
642 
643 /* Catch misguided souls that try to use this interface on XFS */
644 STATIC struct inode *
645 xfs_fs_alloc_inode(
646 	struct super_block	*sb)
647 {
648 	BUG();
649 	return NULL;
650 }
651 
652 /*
653  * Now that the generic code is guaranteed not to be accessing
654  * the linux inode, we can inactivate and reclaim the inode.
655  */
656 STATIC void
657 xfs_fs_destroy_inode(
658 	struct inode		*inode)
659 {
660 	struct xfs_inode	*ip = XFS_I(inode);
661 
662 	trace_xfs_destroy_inode(ip);
663 
664 	ASSERT(!rwsem_is_locked(&inode->i_rwsem));
665 	XFS_STATS_INC(ip->i_mount, vn_rele);
666 	XFS_STATS_INC(ip->i_mount, vn_remove);
667 	xfs_inode_mark_reclaimable(ip);
668 }
669 
670 static void
671 xfs_fs_dirty_inode(
672 	struct inode			*inode,
673 	int				flags)
674 {
675 	struct xfs_inode		*ip = XFS_I(inode);
676 	struct xfs_mount		*mp = ip->i_mount;
677 	struct xfs_trans		*tp;
678 
679 	if (!(inode->i_sb->s_flags & SB_LAZYTIME))
680 		return;
681 
682 	/*
683 	 * Only do the timestamp update if the inode is dirty (I_DIRTY_SYNC)
684 	 * and has dirty timestamp (I_DIRTY_TIME). I_DIRTY_TIME can be passed
685 	 * in flags possibly together with I_DIRTY_SYNC.
686 	 */
687 	if ((flags & ~I_DIRTY_TIME) != I_DIRTY_SYNC || !(flags & I_DIRTY_TIME))
688 		return;
689 
690 	if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
691 		return;
692 	xfs_ilock(ip, XFS_ILOCK_EXCL);
693 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
694 	xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
695 	xfs_trans_commit(tp);
696 }
697 
698 /*
699  * Slab object creation initialisation for the XFS inode.
700  * This covers only the idempotent fields in the XFS inode;
701  * all other fields need to be initialised on allocation
702  * from the slab. This avoids the need to repeatedly initialise
703  * fields in the xfs inode that left in the initialise state
704  * when freeing the inode.
705  */
706 STATIC void
707 xfs_fs_inode_init_once(
708 	void			*inode)
709 {
710 	struct xfs_inode	*ip = inode;
711 
712 	memset(ip, 0, sizeof(struct xfs_inode));
713 
714 	/* vfs inode */
715 	inode_init_once(VFS_I(ip));
716 
717 	/* xfs inode */
718 	atomic_set(&ip->i_pincount, 0);
719 	spin_lock_init(&ip->i_flags_lock);
720 	init_rwsem(&ip->i_lock);
721 }
722 
723 /*
724  * We do an unlocked check for XFS_IDONTCACHE here because we are already
725  * serialised against cache hits here via the inode->i_lock and igrab() in
726  * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
727  * racing with us, and it avoids needing to grab a spinlock here for every inode
728  * we drop the final reference on.
729  */
730 STATIC int
731 xfs_fs_drop_inode(
732 	struct inode		*inode)
733 {
734 	struct xfs_inode	*ip = XFS_I(inode);
735 
736 	/*
737 	 * If this unlinked inode is in the middle of recovery, don't
738 	 * drop the inode just yet; log recovery will take care of
739 	 * that.  See the comment for this inode flag.
740 	 */
741 	if (ip->i_flags & XFS_IRECOVERY) {
742 		ASSERT(xlog_recovery_needed(ip->i_mount->m_log));
743 		return 0;
744 	}
745 
746 	return generic_drop_inode(inode);
747 }
748 
749 static void
750 xfs_mount_free(
751 	struct xfs_mount	*mp)
752 {
753 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
754 		xfs_free_buftarg(mp->m_logdev_targp);
755 	if (mp->m_rtdev_targp)
756 		xfs_free_buftarg(mp->m_rtdev_targp);
757 	if (mp->m_ddev_targp)
758 		xfs_free_buftarg(mp->m_ddev_targp);
759 
760 	debugfs_remove(mp->m_debugfs);
761 	kfree(mp->m_rtname);
762 	kfree(mp->m_logname);
763 	kfree(mp);
764 }
765 
766 STATIC int
767 xfs_fs_sync_fs(
768 	struct super_block	*sb,
769 	int			wait)
770 {
771 	struct xfs_mount	*mp = XFS_M(sb);
772 	int			error;
773 
774 	trace_xfs_fs_sync_fs(mp, __return_address);
775 
776 	/*
777 	 * Doing anything during the async pass would be counterproductive.
778 	 */
779 	if (!wait)
780 		return 0;
781 
782 	error = xfs_log_force(mp, XFS_LOG_SYNC);
783 	if (error)
784 		return error;
785 
786 	if (laptop_mode) {
787 		/*
788 		 * The disk must be active because we're syncing.
789 		 * We schedule log work now (now that the disk is
790 		 * active) instead of later (when it might not be).
791 		 */
792 		flush_delayed_work(&mp->m_log->l_work);
793 	}
794 
795 	/*
796 	 * If we are called with page faults frozen out, it means we are about
797 	 * to freeze the transaction subsystem. Take the opportunity to shut
798 	 * down inodegc because once SB_FREEZE_FS is set it's too late to
799 	 * prevent inactivation races with freeze. The fs doesn't get called
800 	 * again by the freezing process until after SB_FREEZE_FS has been set,
801 	 * so it's now or never.  Same logic applies to speculative allocation
802 	 * garbage collection.
803 	 *
804 	 * We don't care if this is a normal syncfs call that does this or
805 	 * freeze that does this - we can run this multiple times without issue
806 	 * and we won't race with a restart because a restart can only occur
807 	 * when the state is either SB_FREEZE_FS or SB_FREEZE_COMPLETE.
808 	 */
809 	if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT) {
810 		xfs_inodegc_stop(mp);
811 		xfs_blockgc_stop(mp);
812 	}
813 
814 	return 0;
815 }
816 
817 STATIC int
818 xfs_fs_statfs(
819 	struct dentry		*dentry,
820 	struct kstatfs		*statp)
821 {
822 	struct xfs_mount	*mp = XFS_M(dentry->d_sb);
823 	xfs_sb_t		*sbp = &mp->m_sb;
824 	struct xfs_inode	*ip = XFS_I(d_inode(dentry));
825 	uint64_t		fakeinos, id;
826 	uint64_t		icount;
827 	uint64_t		ifree;
828 	uint64_t		fdblocks;
829 	xfs_extlen_t		lsize;
830 	int64_t			ffree;
831 
832 	/*
833 	 * Expedite background inodegc but don't wait. We do not want to block
834 	 * here waiting hours for a billion extent file to be truncated.
835 	 */
836 	xfs_inodegc_push(mp);
837 
838 	statp->f_type = XFS_SUPER_MAGIC;
839 	statp->f_namelen = MAXNAMELEN - 1;
840 
841 	id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
842 	statp->f_fsid = u64_to_fsid(id);
843 
844 	icount = percpu_counter_sum(&mp->m_icount);
845 	ifree = percpu_counter_sum(&mp->m_ifree);
846 	fdblocks = percpu_counter_sum(&mp->m_fdblocks);
847 
848 	spin_lock(&mp->m_sb_lock);
849 	statp->f_bsize = sbp->sb_blocksize;
850 	lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
851 	statp->f_blocks = sbp->sb_dblocks - lsize;
852 	spin_unlock(&mp->m_sb_lock);
853 
854 	/* make sure statp->f_bfree does not underflow */
855 	statp->f_bfree = max_t(int64_t, 0,
856 				fdblocks - xfs_fdblocks_unavailable(mp));
857 	statp->f_bavail = statp->f_bfree;
858 
859 	fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
860 	statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
861 	if (M_IGEO(mp)->maxicount)
862 		statp->f_files = min_t(typeof(statp->f_files),
863 					statp->f_files,
864 					M_IGEO(mp)->maxicount);
865 
866 	/* If sb_icount overshot maxicount, report actual allocation */
867 	statp->f_files = max_t(typeof(statp->f_files),
868 					statp->f_files,
869 					sbp->sb_icount);
870 
871 	/* make sure statp->f_ffree does not underflow */
872 	ffree = statp->f_files - (icount - ifree);
873 	statp->f_ffree = max_t(int64_t, ffree, 0);
874 
875 
876 	if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
877 	    ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
878 			      (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
879 		xfs_qm_statvfs(ip, statp);
880 
881 	if (XFS_IS_REALTIME_MOUNT(mp) &&
882 	    (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
883 		s64	freertx;
884 
885 		statp->f_blocks = sbp->sb_rblocks;
886 		freertx = percpu_counter_sum_positive(&mp->m_frextents);
887 		statp->f_bavail = statp->f_bfree = xfs_rtx_to_rtb(mp, freertx);
888 	}
889 
890 	return 0;
891 }
892 
893 STATIC void
894 xfs_save_resvblks(struct xfs_mount *mp)
895 {
896 	mp->m_resblks_save = mp->m_resblks;
897 	xfs_reserve_blocks(mp, 0);
898 }
899 
900 STATIC void
901 xfs_restore_resvblks(struct xfs_mount *mp)
902 {
903 	uint64_t resblks;
904 
905 	if (mp->m_resblks_save) {
906 		resblks = mp->m_resblks_save;
907 		mp->m_resblks_save = 0;
908 	} else
909 		resblks = xfs_default_resblks(mp);
910 
911 	xfs_reserve_blocks(mp, resblks);
912 }
913 
914 /*
915  * Second stage of a freeze. The data is already frozen so we only
916  * need to take care of the metadata. Once that's done sync the superblock
917  * to the log to dirty it in case of a crash while frozen. This ensures that we
918  * will recover the unlinked inode lists on the next mount.
919  */
920 STATIC int
921 xfs_fs_freeze(
922 	struct super_block	*sb)
923 {
924 	struct xfs_mount	*mp = XFS_M(sb);
925 	unsigned int		flags;
926 	int			ret;
927 
928 	/*
929 	 * The filesystem is now frozen far enough that memory reclaim
930 	 * cannot safely operate on the filesystem. Hence we need to
931 	 * set a GFP_NOFS context here to avoid recursion deadlocks.
932 	 */
933 	flags = memalloc_nofs_save();
934 	xfs_save_resvblks(mp);
935 	ret = xfs_log_quiesce(mp);
936 	memalloc_nofs_restore(flags);
937 
938 	/*
939 	 * For read-write filesystems, we need to restart the inodegc on error
940 	 * because we stopped it at SB_FREEZE_PAGEFAULT level and a thaw is not
941 	 * going to be run to restart it now.  We are at SB_FREEZE_FS level
942 	 * here, so we can restart safely without racing with a stop in
943 	 * xfs_fs_sync_fs().
944 	 */
945 	if (ret && !xfs_is_readonly(mp)) {
946 		xfs_blockgc_start(mp);
947 		xfs_inodegc_start(mp);
948 	}
949 
950 	return ret;
951 }
952 
953 STATIC int
954 xfs_fs_unfreeze(
955 	struct super_block	*sb)
956 {
957 	struct xfs_mount	*mp = XFS_M(sb);
958 
959 	xfs_restore_resvblks(mp);
960 	xfs_log_work_queue(mp);
961 
962 	/*
963 	 * Don't reactivate the inodegc worker on a readonly filesystem because
964 	 * inodes are sent directly to reclaim.  Don't reactivate the blockgc
965 	 * worker because there are no speculative preallocations on a readonly
966 	 * filesystem.
967 	 */
968 	if (!xfs_is_readonly(mp)) {
969 		xfs_blockgc_start(mp);
970 		xfs_inodegc_start(mp);
971 	}
972 
973 	return 0;
974 }
975 
976 /*
977  * This function fills in xfs_mount_t fields based on mount args.
978  * Note: the superblock _has_ now been read in.
979  */
980 STATIC int
981 xfs_finish_flags(
982 	struct xfs_mount	*mp)
983 {
984 	/* Fail a mount where the logbuf is smaller than the log stripe */
985 	if (xfs_has_logv2(mp)) {
986 		if (mp->m_logbsize <= 0 &&
987 		    mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
988 			mp->m_logbsize = mp->m_sb.sb_logsunit;
989 		} else if (mp->m_logbsize > 0 &&
990 			   mp->m_logbsize < mp->m_sb.sb_logsunit) {
991 			xfs_warn(mp,
992 		"logbuf size must be greater than or equal to log stripe size");
993 			return -EINVAL;
994 		}
995 	} else {
996 		/* Fail a mount if the logbuf is larger than 32K */
997 		if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
998 			xfs_warn(mp,
999 		"logbuf size for version 1 logs must be 16K or 32K");
1000 			return -EINVAL;
1001 		}
1002 	}
1003 
1004 	/*
1005 	 * V5 filesystems always use attr2 format for attributes.
1006 	 */
1007 	if (xfs_has_crc(mp) && xfs_has_noattr2(mp)) {
1008 		xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
1009 			     "attr2 is always enabled for V5 filesystems.");
1010 		return -EINVAL;
1011 	}
1012 
1013 	/*
1014 	 * prohibit r/w mounts of read-only filesystems
1015 	 */
1016 	if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !xfs_is_readonly(mp)) {
1017 		xfs_warn(mp,
1018 			"cannot mount a read-only filesystem as read-write");
1019 		return -EROFS;
1020 	}
1021 
1022 	if ((mp->m_qflags & XFS_GQUOTA_ACCT) &&
1023 	    (mp->m_qflags & XFS_PQUOTA_ACCT) &&
1024 	    !xfs_has_pquotino(mp)) {
1025 		xfs_warn(mp,
1026 		  "Super block does not support project and group quota together");
1027 		return -EINVAL;
1028 	}
1029 
1030 	return 0;
1031 }
1032 
1033 static int
1034 xfs_init_percpu_counters(
1035 	struct xfs_mount	*mp)
1036 {
1037 	int		error;
1038 
1039 	error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
1040 	if (error)
1041 		return -ENOMEM;
1042 
1043 	error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
1044 	if (error)
1045 		goto free_icount;
1046 
1047 	error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
1048 	if (error)
1049 		goto free_ifree;
1050 
1051 	error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
1052 	if (error)
1053 		goto free_fdblocks;
1054 
1055 	error = percpu_counter_init(&mp->m_frextents, 0, GFP_KERNEL);
1056 	if (error)
1057 		goto free_delalloc;
1058 
1059 	return 0;
1060 
1061 free_delalloc:
1062 	percpu_counter_destroy(&mp->m_delalloc_blks);
1063 free_fdblocks:
1064 	percpu_counter_destroy(&mp->m_fdblocks);
1065 free_ifree:
1066 	percpu_counter_destroy(&mp->m_ifree);
1067 free_icount:
1068 	percpu_counter_destroy(&mp->m_icount);
1069 	return -ENOMEM;
1070 }
1071 
1072 void
1073 xfs_reinit_percpu_counters(
1074 	struct xfs_mount	*mp)
1075 {
1076 	percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1077 	percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1078 	percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1079 	percpu_counter_set(&mp->m_frextents, mp->m_sb.sb_frextents);
1080 }
1081 
1082 static void
1083 xfs_destroy_percpu_counters(
1084 	struct xfs_mount	*mp)
1085 {
1086 	percpu_counter_destroy(&mp->m_icount);
1087 	percpu_counter_destroy(&mp->m_ifree);
1088 	percpu_counter_destroy(&mp->m_fdblocks);
1089 	ASSERT(xfs_is_shutdown(mp) ||
1090 	       percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1091 	percpu_counter_destroy(&mp->m_delalloc_blks);
1092 	percpu_counter_destroy(&mp->m_frextents);
1093 }
1094 
1095 static int
1096 xfs_inodegc_init_percpu(
1097 	struct xfs_mount	*mp)
1098 {
1099 	struct xfs_inodegc	*gc;
1100 	int			cpu;
1101 
1102 	mp->m_inodegc = alloc_percpu(struct xfs_inodegc);
1103 	if (!mp->m_inodegc)
1104 		return -ENOMEM;
1105 
1106 	for_each_possible_cpu(cpu) {
1107 		gc = per_cpu_ptr(mp->m_inodegc, cpu);
1108 		gc->cpu = cpu;
1109 		gc->mp = mp;
1110 		init_llist_head(&gc->list);
1111 		gc->items = 0;
1112 		gc->error = 0;
1113 		INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker);
1114 	}
1115 	return 0;
1116 }
1117 
1118 static void
1119 xfs_inodegc_free_percpu(
1120 	struct xfs_mount	*mp)
1121 {
1122 	if (!mp->m_inodegc)
1123 		return;
1124 	free_percpu(mp->m_inodegc);
1125 }
1126 
1127 static void
1128 xfs_fs_put_super(
1129 	struct super_block	*sb)
1130 {
1131 	struct xfs_mount	*mp = XFS_M(sb);
1132 
1133 	xfs_notice(mp, "Unmounting Filesystem %pU", &mp->m_sb.sb_uuid);
1134 	xfs_filestream_unmount(mp);
1135 	xfs_unmountfs(mp);
1136 
1137 	xfs_freesb(mp);
1138 	xchk_mount_stats_free(mp);
1139 	free_percpu(mp->m_stats.xs_stats);
1140 	xfs_inodegc_free_percpu(mp);
1141 	xfs_destroy_percpu_counters(mp);
1142 	xfs_destroy_mount_workqueues(mp);
1143 	xfs_shutdown_devices(mp);
1144 }
1145 
1146 static long
1147 xfs_fs_nr_cached_objects(
1148 	struct super_block	*sb,
1149 	struct shrink_control	*sc)
1150 {
1151 	/* Paranoia: catch incorrect calls during mount setup or teardown */
1152 	if (WARN_ON_ONCE(!sb->s_fs_info))
1153 		return 0;
1154 	return xfs_reclaim_inodes_count(XFS_M(sb));
1155 }
1156 
1157 static long
1158 xfs_fs_free_cached_objects(
1159 	struct super_block	*sb,
1160 	struct shrink_control	*sc)
1161 {
1162 	return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1163 }
1164 
1165 static void
1166 xfs_fs_shutdown(
1167 	struct super_block	*sb)
1168 {
1169 	xfs_force_shutdown(XFS_M(sb), SHUTDOWN_DEVICE_REMOVED);
1170 }
1171 
1172 static const struct super_operations xfs_super_operations = {
1173 	.alloc_inode		= xfs_fs_alloc_inode,
1174 	.destroy_inode		= xfs_fs_destroy_inode,
1175 	.dirty_inode		= xfs_fs_dirty_inode,
1176 	.drop_inode		= xfs_fs_drop_inode,
1177 	.put_super		= xfs_fs_put_super,
1178 	.sync_fs		= xfs_fs_sync_fs,
1179 	.freeze_fs		= xfs_fs_freeze,
1180 	.unfreeze_fs		= xfs_fs_unfreeze,
1181 	.statfs			= xfs_fs_statfs,
1182 	.show_options		= xfs_fs_show_options,
1183 	.nr_cached_objects	= xfs_fs_nr_cached_objects,
1184 	.free_cached_objects	= xfs_fs_free_cached_objects,
1185 	.shutdown		= xfs_fs_shutdown,
1186 };
1187 
1188 static int
1189 suffix_kstrtoint(
1190 	const char	*s,
1191 	unsigned int	base,
1192 	int		*res)
1193 {
1194 	int		last, shift_left_factor = 0, _res;
1195 	char		*value;
1196 	int		ret = 0;
1197 
1198 	value = kstrdup(s, GFP_KERNEL);
1199 	if (!value)
1200 		return -ENOMEM;
1201 
1202 	last = strlen(value) - 1;
1203 	if (value[last] == 'K' || value[last] == 'k') {
1204 		shift_left_factor = 10;
1205 		value[last] = '\0';
1206 	}
1207 	if (value[last] == 'M' || value[last] == 'm') {
1208 		shift_left_factor = 20;
1209 		value[last] = '\0';
1210 	}
1211 	if (value[last] == 'G' || value[last] == 'g') {
1212 		shift_left_factor = 30;
1213 		value[last] = '\0';
1214 	}
1215 
1216 	if (kstrtoint(value, base, &_res))
1217 		ret = -EINVAL;
1218 	kfree(value);
1219 	*res = _res << shift_left_factor;
1220 	return ret;
1221 }
1222 
1223 static inline void
1224 xfs_fs_warn_deprecated(
1225 	struct fs_context	*fc,
1226 	struct fs_parameter	*param,
1227 	uint64_t		flag,
1228 	bool			value)
1229 {
1230 	/* Don't print the warning if reconfiguring and current mount point
1231 	 * already had the flag set
1232 	 */
1233 	if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) &&
1234             !!(XFS_M(fc->root->d_sb)->m_features & flag) == value)
1235 		return;
1236 	xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key);
1237 }
1238 
1239 /*
1240  * Set mount state from a mount option.
1241  *
1242  * NOTE: mp->m_super is NULL here!
1243  */
1244 static int
1245 xfs_fs_parse_param(
1246 	struct fs_context	*fc,
1247 	struct fs_parameter	*param)
1248 {
1249 	struct xfs_mount	*parsing_mp = fc->s_fs_info;
1250 	struct fs_parse_result	result;
1251 	int			size = 0;
1252 	int			opt;
1253 
1254 	opt = fs_parse(fc, xfs_fs_parameters, param, &result);
1255 	if (opt < 0)
1256 		return opt;
1257 
1258 	switch (opt) {
1259 	case Opt_logbufs:
1260 		parsing_mp->m_logbufs = result.uint_32;
1261 		return 0;
1262 	case Opt_logbsize:
1263 		if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize))
1264 			return -EINVAL;
1265 		return 0;
1266 	case Opt_logdev:
1267 		kfree(parsing_mp->m_logname);
1268 		parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL);
1269 		if (!parsing_mp->m_logname)
1270 			return -ENOMEM;
1271 		return 0;
1272 	case Opt_rtdev:
1273 		kfree(parsing_mp->m_rtname);
1274 		parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
1275 		if (!parsing_mp->m_rtname)
1276 			return -ENOMEM;
1277 		return 0;
1278 	case Opt_allocsize:
1279 		if (suffix_kstrtoint(param->string, 10, &size))
1280 			return -EINVAL;
1281 		parsing_mp->m_allocsize_log = ffs(size) - 1;
1282 		parsing_mp->m_features |= XFS_FEAT_ALLOCSIZE;
1283 		return 0;
1284 	case Opt_grpid:
1285 	case Opt_bsdgroups:
1286 		parsing_mp->m_features |= XFS_FEAT_GRPID;
1287 		return 0;
1288 	case Opt_nogrpid:
1289 	case Opt_sysvgroups:
1290 		parsing_mp->m_features &= ~XFS_FEAT_GRPID;
1291 		return 0;
1292 	case Opt_wsync:
1293 		parsing_mp->m_features |= XFS_FEAT_WSYNC;
1294 		return 0;
1295 	case Opt_norecovery:
1296 		parsing_mp->m_features |= XFS_FEAT_NORECOVERY;
1297 		return 0;
1298 	case Opt_noalign:
1299 		parsing_mp->m_features |= XFS_FEAT_NOALIGN;
1300 		return 0;
1301 	case Opt_swalloc:
1302 		parsing_mp->m_features |= XFS_FEAT_SWALLOC;
1303 		return 0;
1304 	case Opt_sunit:
1305 		parsing_mp->m_dalign = result.uint_32;
1306 		return 0;
1307 	case Opt_swidth:
1308 		parsing_mp->m_swidth = result.uint_32;
1309 		return 0;
1310 	case Opt_inode32:
1311 		parsing_mp->m_features |= XFS_FEAT_SMALL_INUMS;
1312 		return 0;
1313 	case Opt_inode64:
1314 		parsing_mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
1315 		return 0;
1316 	case Opt_nouuid:
1317 		parsing_mp->m_features |= XFS_FEAT_NOUUID;
1318 		return 0;
1319 	case Opt_largeio:
1320 		parsing_mp->m_features |= XFS_FEAT_LARGE_IOSIZE;
1321 		return 0;
1322 	case Opt_nolargeio:
1323 		parsing_mp->m_features &= ~XFS_FEAT_LARGE_IOSIZE;
1324 		return 0;
1325 	case Opt_filestreams:
1326 		parsing_mp->m_features |= XFS_FEAT_FILESTREAMS;
1327 		return 0;
1328 	case Opt_noquota:
1329 		parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
1330 		parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
1331 		return 0;
1332 	case Opt_quota:
1333 	case Opt_uquota:
1334 	case Opt_usrquota:
1335 		parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD);
1336 		return 0;
1337 	case Opt_qnoenforce:
1338 	case Opt_uqnoenforce:
1339 		parsing_mp->m_qflags |= XFS_UQUOTA_ACCT;
1340 		parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD;
1341 		return 0;
1342 	case Opt_pquota:
1343 	case Opt_prjquota:
1344 		parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD);
1345 		return 0;
1346 	case Opt_pqnoenforce:
1347 		parsing_mp->m_qflags |= XFS_PQUOTA_ACCT;
1348 		parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD;
1349 		return 0;
1350 	case Opt_gquota:
1351 	case Opt_grpquota:
1352 		parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD);
1353 		return 0;
1354 	case Opt_gqnoenforce:
1355 		parsing_mp->m_qflags |= XFS_GQUOTA_ACCT;
1356 		parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD;
1357 		return 0;
1358 	case Opt_discard:
1359 		parsing_mp->m_features |= XFS_FEAT_DISCARD;
1360 		return 0;
1361 	case Opt_nodiscard:
1362 		parsing_mp->m_features &= ~XFS_FEAT_DISCARD;
1363 		return 0;
1364 #ifdef CONFIG_FS_DAX
1365 	case Opt_dax:
1366 		xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS);
1367 		return 0;
1368 	case Opt_dax_enum:
1369 		xfs_mount_set_dax_mode(parsing_mp, result.uint_32);
1370 		return 0;
1371 #endif
1372 	/* Following mount options will be removed in September 2025 */
1373 	case Opt_ikeep:
1374 		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, true);
1375 		parsing_mp->m_features |= XFS_FEAT_IKEEP;
1376 		return 0;
1377 	case Opt_noikeep:
1378 		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, false);
1379 		parsing_mp->m_features &= ~XFS_FEAT_IKEEP;
1380 		return 0;
1381 	case Opt_attr2:
1382 		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_ATTR2, true);
1383 		parsing_mp->m_features |= XFS_FEAT_ATTR2;
1384 		return 0;
1385 	case Opt_noattr2:
1386 		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_NOATTR2, true);
1387 		parsing_mp->m_features |= XFS_FEAT_NOATTR2;
1388 		return 0;
1389 	default:
1390 		xfs_warn(parsing_mp, "unknown mount option [%s].", param->key);
1391 		return -EINVAL;
1392 	}
1393 
1394 	return 0;
1395 }
1396 
1397 static int
1398 xfs_fs_validate_params(
1399 	struct xfs_mount	*mp)
1400 {
1401 	/* No recovery flag requires a read-only mount */
1402 	if (xfs_has_norecovery(mp) && !xfs_is_readonly(mp)) {
1403 		xfs_warn(mp, "no-recovery mounts must be read-only.");
1404 		return -EINVAL;
1405 	}
1406 
1407 	/*
1408 	 * We have not read the superblock at this point, so only the attr2
1409 	 * mount option can set the attr2 feature by this stage.
1410 	 */
1411 	if (xfs_has_attr2(mp) && xfs_has_noattr2(mp)) {
1412 		xfs_warn(mp, "attr2 and noattr2 cannot both be specified.");
1413 		return -EINVAL;
1414 	}
1415 
1416 
1417 	if (xfs_has_noalign(mp) && (mp->m_dalign || mp->m_swidth)) {
1418 		xfs_warn(mp,
1419 	"sunit and swidth options incompatible with the noalign option");
1420 		return -EINVAL;
1421 	}
1422 
1423 	if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) {
1424 		xfs_warn(mp, "quota support not available in this kernel.");
1425 		return -EINVAL;
1426 	}
1427 
1428 	if ((mp->m_dalign && !mp->m_swidth) ||
1429 	    (!mp->m_dalign && mp->m_swidth)) {
1430 		xfs_warn(mp, "sunit and swidth must be specified together");
1431 		return -EINVAL;
1432 	}
1433 
1434 	if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
1435 		xfs_warn(mp,
1436 	"stripe width (%d) must be a multiple of the stripe unit (%d)",
1437 			mp->m_swidth, mp->m_dalign);
1438 		return -EINVAL;
1439 	}
1440 
1441 	if (mp->m_logbufs != -1 &&
1442 	    mp->m_logbufs != 0 &&
1443 	    (mp->m_logbufs < XLOG_MIN_ICLOGS ||
1444 	     mp->m_logbufs > XLOG_MAX_ICLOGS)) {
1445 		xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
1446 			mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
1447 		return -EINVAL;
1448 	}
1449 
1450 	if (mp->m_logbsize != -1 &&
1451 	    mp->m_logbsize !=  0 &&
1452 	    (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
1453 	     mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
1454 	     !is_power_of_2(mp->m_logbsize))) {
1455 		xfs_warn(mp,
1456 			"invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
1457 			mp->m_logbsize);
1458 		return -EINVAL;
1459 	}
1460 
1461 	if (xfs_has_allocsize(mp) &&
1462 	    (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
1463 	     mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
1464 		xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
1465 			mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
1466 		return -EINVAL;
1467 	}
1468 
1469 	return 0;
1470 }
1471 
1472 struct dentry *
1473 xfs_debugfs_mkdir(
1474 	const char	*name,
1475 	struct dentry	*parent)
1476 {
1477 	struct dentry	*child;
1478 
1479 	/* Apparently we're expected to ignore error returns?? */
1480 	child = debugfs_create_dir(name, parent);
1481 	if (IS_ERR(child))
1482 		return NULL;
1483 
1484 	return child;
1485 }
1486 
1487 static int
1488 xfs_fs_fill_super(
1489 	struct super_block	*sb,
1490 	struct fs_context	*fc)
1491 {
1492 	struct xfs_mount	*mp = sb->s_fs_info;
1493 	struct inode		*root;
1494 	int			flags = 0, error;
1495 
1496 	mp->m_super = sb;
1497 
1498 	/*
1499 	 * Copy VFS mount flags from the context now that all parameter parsing
1500 	 * is guaranteed to have been completed by either the old mount API or
1501 	 * the newer fsopen/fsconfig API.
1502 	 */
1503 	if (fc->sb_flags & SB_RDONLY)
1504 		set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1505 	if (fc->sb_flags & SB_DIRSYNC)
1506 		mp->m_features |= XFS_FEAT_DIRSYNC;
1507 	if (fc->sb_flags & SB_SYNCHRONOUS)
1508 		mp->m_features |= XFS_FEAT_WSYNC;
1509 
1510 	error = xfs_fs_validate_params(mp);
1511 	if (error)
1512 		return error;
1513 
1514 	sb_min_blocksize(sb, BBSIZE);
1515 	sb->s_xattr = xfs_xattr_handlers;
1516 	sb->s_export_op = &xfs_export_operations;
1517 #ifdef CONFIG_XFS_QUOTA
1518 	sb->s_qcop = &xfs_quotactl_operations;
1519 	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1520 #endif
1521 	sb->s_op = &xfs_super_operations;
1522 
1523 	/*
1524 	 * Delay mount work if the debug hook is set. This is debug
1525 	 * instrumention to coordinate simulation of xfs mount failures with
1526 	 * VFS superblock operations
1527 	 */
1528 	if (xfs_globals.mount_delay) {
1529 		xfs_notice(mp, "Delaying mount for %d seconds.",
1530 			xfs_globals.mount_delay);
1531 		msleep(xfs_globals.mount_delay * 1000);
1532 	}
1533 
1534 	if (fc->sb_flags & SB_SILENT)
1535 		flags |= XFS_MFSI_QUIET;
1536 
1537 	error = xfs_open_devices(mp);
1538 	if (error)
1539 		return error;
1540 
1541 	if (xfs_debugfs) {
1542 		mp->m_debugfs = xfs_debugfs_mkdir(mp->m_super->s_id,
1543 						  xfs_debugfs);
1544 	} else {
1545 		mp->m_debugfs = NULL;
1546 	}
1547 
1548 	error = xfs_init_mount_workqueues(mp);
1549 	if (error)
1550 		goto out_shutdown_devices;
1551 
1552 	error = xfs_init_percpu_counters(mp);
1553 	if (error)
1554 		goto out_destroy_workqueues;
1555 
1556 	error = xfs_inodegc_init_percpu(mp);
1557 	if (error)
1558 		goto out_destroy_counters;
1559 
1560 	/* Allocate stats memory before we do operations that might use it */
1561 	mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1562 	if (!mp->m_stats.xs_stats) {
1563 		error = -ENOMEM;
1564 		goto out_destroy_inodegc;
1565 	}
1566 
1567 	error = xchk_mount_stats_alloc(mp);
1568 	if (error)
1569 		goto out_free_stats;
1570 
1571 	error = xfs_readsb(mp, flags);
1572 	if (error)
1573 		goto out_free_scrub_stats;
1574 
1575 	error = xfs_finish_flags(mp);
1576 	if (error)
1577 		goto out_free_sb;
1578 
1579 	error = xfs_setup_devices(mp);
1580 	if (error)
1581 		goto out_free_sb;
1582 
1583 	/* V4 support is undergoing deprecation. */
1584 	if (!xfs_has_crc(mp)) {
1585 #ifdef CONFIG_XFS_SUPPORT_V4
1586 		xfs_warn_once(mp,
1587 	"Deprecated V4 format (crc=0) will not be supported after September 2030.");
1588 #else
1589 		xfs_warn(mp,
1590 	"Deprecated V4 format (crc=0) not supported by kernel.");
1591 		error = -EINVAL;
1592 		goto out_free_sb;
1593 #endif
1594 	}
1595 
1596 	/* ASCII case insensitivity is undergoing deprecation. */
1597 	if (xfs_has_asciici(mp)) {
1598 #ifdef CONFIG_XFS_SUPPORT_ASCII_CI
1599 		xfs_warn_once(mp,
1600 	"Deprecated ASCII case-insensitivity feature (ascii-ci=1) will not be supported after September 2030.");
1601 #else
1602 		xfs_warn(mp,
1603 	"Deprecated ASCII case-insensitivity feature (ascii-ci=1) not supported by kernel.");
1604 		error = -EINVAL;
1605 		goto out_free_sb;
1606 #endif
1607 	}
1608 
1609 	/* Filesystem claims it needs repair, so refuse the mount. */
1610 	if (xfs_has_needsrepair(mp)) {
1611 		xfs_warn(mp, "Filesystem needs repair.  Please run xfs_repair.");
1612 		error = -EFSCORRUPTED;
1613 		goto out_free_sb;
1614 	}
1615 
1616 	/*
1617 	 * Don't touch the filesystem if a user tool thinks it owns the primary
1618 	 * superblock.  mkfs doesn't clear the flag from secondary supers, so
1619 	 * we don't check them at all.
1620 	 */
1621 	if (mp->m_sb.sb_inprogress) {
1622 		xfs_warn(mp, "Offline file system operation in progress!");
1623 		error = -EFSCORRUPTED;
1624 		goto out_free_sb;
1625 	}
1626 
1627 	/*
1628 	 * Until this is fixed only page-sized or smaller data blocks work.
1629 	 */
1630 	if (mp->m_sb.sb_blocksize > PAGE_SIZE) {
1631 		xfs_warn(mp,
1632 		"File system with blocksize %d bytes. "
1633 		"Only pagesize (%ld) or less will currently work.",
1634 				mp->m_sb.sb_blocksize, PAGE_SIZE);
1635 		error = -ENOSYS;
1636 		goto out_free_sb;
1637 	}
1638 
1639 	/* Ensure this filesystem fits in the page cache limits */
1640 	if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) ||
1641 	    xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) {
1642 		xfs_warn(mp,
1643 		"file system too large to be mounted on this system.");
1644 		error = -EFBIG;
1645 		goto out_free_sb;
1646 	}
1647 
1648 	/*
1649 	 * XFS block mappings use 54 bits to store the logical block offset.
1650 	 * This should suffice to handle the maximum file size that the VFS
1651 	 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
1652 	 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
1653 	 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
1654 	 * to check this assertion.
1655 	 *
1656 	 * Avoid integer overflow by comparing the maximum bmbt offset to the
1657 	 * maximum pagecache offset in units of fs blocks.
1658 	 */
1659 	if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) {
1660 		xfs_warn(mp,
1661 "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
1662 			 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
1663 			 XFS_MAX_FILEOFF);
1664 		error = -EINVAL;
1665 		goto out_free_sb;
1666 	}
1667 
1668 	error = xfs_filestream_mount(mp);
1669 	if (error)
1670 		goto out_free_sb;
1671 
1672 	/*
1673 	 * we must configure the block size in the superblock before we run the
1674 	 * full mount process as the mount process can lookup and cache inodes.
1675 	 */
1676 	sb->s_magic = XFS_SUPER_MAGIC;
1677 	sb->s_blocksize = mp->m_sb.sb_blocksize;
1678 	sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1679 	sb->s_maxbytes = MAX_LFS_FILESIZE;
1680 	sb->s_max_links = XFS_MAXLINK;
1681 	sb->s_time_gran = 1;
1682 	if (xfs_has_bigtime(mp)) {
1683 		sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN);
1684 		sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX);
1685 	} else {
1686 		sb->s_time_min = XFS_LEGACY_TIME_MIN;
1687 		sb->s_time_max = XFS_LEGACY_TIME_MAX;
1688 	}
1689 	trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max);
1690 	sb->s_iflags |= SB_I_CGROUPWB;
1691 
1692 	set_posix_acl_flag(sb);
1693 
1694 	/* version 5 superblocks support inode version counters. */
1695 	if (xfs_has_crc(mp))
1696 		sb->s_flags |= SB_I_VERSION;
1697 
1698 	if (xfs_has_dax_always(mp)) {
1699 		error = xfs_setup_dax_always(mp);
1700 		if (error)
1701 			goto out_filestream_unmount;
1702 	}
1703 
1704 	if (xfs_has_discard(mp) && !bdev_max_discard_sectors(sb->s_bdev)) {
1705 		xfs_warn(mp,
1706 	"mounting with \"discard\" option, but the device does not support discard");
1707 		mp->m_features &= ~XFS_FEAT_DISCARD;
1708 	}
1709 
1710 	if (xfs_has_reflink(mp)) {
1711 		if (mp->m_sb.sb_rblocks) {
1712 			xfs_alert(mp,
1713 	"reflink not compatible with realtime device!");
1714 			error = -EINVAL;
1715 			goto out_filestream_unmount;
1716 		}
1717 
1718 		if (xfs_globals.always_cow) {
1719 			xfs_info(mp, "using DEBUG-only always_cow mode.");
1720 			mp->m_always_cow = true;
1721 		}
1722 	}
1723 
1724 	if (xfs_has_rmapbt(mp) && mp->m_sb.sb_rblocks) {
1725 		xfs_alert(mp,
1726 	"reverse mapping btree not compatible with realtime device!");
1727 		error = -EINVAL;
1728 		goto out_filestream_unmount;
1729 	}
1730 
1731 	if (xfs_has_exchange_range(mp))
1732 		xfs_warn(mp,
1733 	"EXPERIMENTAL exchange-range feature enabled. Use at your own risk!");
1734 
1735 	error = xfs_mountfs(mp);
1736 	if (error)
1737 		goto out_filestream_unmount;
1738 
1739 	root = igrab(VFS_I(mp->m_rootip));
1740 	if (!root) {
1741 		error = -ENOENT;
1742 		goto out_unmount;
1743 	}
1744 	sb->s_root = d_make_root(root);
1745 	if (!sb->s_root) {
1746 		error = -ENOMEM;
1747 		goto out_unmount;
1748 	}
1749 
1750 	return 0;
1751 
1752  out_filestream_unmount:
1753 	xfs_filestream_unmount(mp);
1754  out_free_sb:
1755 	xfs_freesb(mp);
1756  out_free_scrub_stats:
1757 	xchk_mount_stats_free(mp);
1758  out_free_stats:
1759 	free_percpu(mp->m_stats.xs_stats);
1760  out_destroy_inodegc:
1761 	xfs_inodegc_free_percpu(mp);
1762  out_destroy_counters:
1763 	xfs_destroy_percpu_counters(mp);
1764  out_destroy_workqueues:
1765 	xfs_destroy_mount_workqueues(mp);
1766  out_shutdown_devices:
1767 	xfs_shutdown_devices(mp);
1768 	return error;
1769 
1770  out_unmount:
1771 	xfs_filestream_unmount(mp);
1772 	xfs_unmountfs(mp);
1773 	goto out_free_sb;
1774 }
1775 
1776 static int
1777 xfs_fs_get_tree(
1778 	struct fs_context	*fc)
1779 {
1780 	return get_tree_bdev(fc, xfs_fs_fill_super);
1781 }
1782 
1783 static int
1784 xfs_remount_rw(
1785 	struct xfs_mount	*mp)
1786 {
1787 	struct xfs_sb		*sbp = &mp->m_sb;
1788 	int error;
1789 
1790 	if (xfs_has_norecovery(mp)) {
1791 		xfs_warn(mp,
1792 			"ro->rw transition prohibited on norecovery mount");
1793 		return -EINVAL;
1794 	}
1795 
1796 	if (xfs_sb_is_v5(sbp) &&
1797 	    xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1798 		xfs_warn(mp,
1799 	"ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1800 			(sbp->sb_features_ro_compat &
1801 				XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1802 		return -EINVAL;
1803 	}
1804 
1805 	clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1806 
1807 	/*
1808 	 * If this is the first remount to writeable state we might have some
1809 	 * superblock changes to update.
1810 	 */
1811 	if (mp->m_update_sb) {
1812 		error = xfs_sync_sb(mp, false);
1813 		if (error) {
1814 			xfs_warn(mp, "failed to write sb changes");
1815 			return error;
1816 		}
1817 		mp->m_update_sb = false;
1818 	}
1819 
1820 	/*
1821 	 * Fill out the reserve pool if it is empty. Use the stashed value if
1822 	 * it is non-zero, otherwise go with the default.
1823 	 */
1824 	xfs_restore_resvblks(mp);
1825 	xfs_log_work_queue(mp);
1826 	xfs_blockgc_start(mp);
1827 
1828 	/* Create the per-AG metadata reservation pool .*/
1829 	error = xfs_fs_reserve_ag_blocks(mp);
1830 	if (error && error != -ENOSPC)
1831 		return error;
1832 
1833 	/* Re-enable the background inode inactivation worker. */
1834 	xfs_inodegc_start(mp);
1835 
1836 	return 0;
1837 }
1838 
1839 static int
1840 xfs_remount_ro(
1841 	struct xfs_mount	*mp)
1842 {
1843 	struct xfs_icwalk	icw = {
1844 		.icw_flags	= XFS_ICWALK_FLAG_SYNC,
1845 	};
1846 	int			error;
1847 
1848 	/* Flush all the dirty data to disk. */
1849 	error = sync_filesystem(mp->m_super);
1850 	if (error)
1851 		return error;
1852 
1853 	/*
1854 	 * Cancel background eofb scanning so it cannot race with the final
1855 	 * log force+buftarg wait and deadlock the remount.
1856 	 */
1857 	xfs_blockgc_stop(mp);
1858 
1859 	/*
1860 	 * Clear out all remaining COW staging extents and speculative post-EOF
1861 	 * preallocations so that we don't leave inodes requiring inactivation
1862 	 * cleanups during reclaim on a read-only mount.  We must process every
1863 	 * cached inode, so this requires a synchronous cache scan.
1864 	 */
1865 	error = xfs_blockgc_free_space(mp, &icw);
1866 	if (error) {
1867 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1868 		return error;
1869 	}
1870 
1871 	/*
1872 	 * Stop the inodegc background worker.  xfs_fs_reconfigure already
1873 	 * flushed all pending inodegc work when it sync'd the filesystem.
1874 	 * The VFS holds s_umount, so we know that inodes cannot enter
1875 	 * xfs_fs_destroy_inode during a remount operation.  In readonly mode
1876 	 * we send inodes straight to reclaim, so no inodes will be queued.
1877 	 */
1878 	xfs_inodegc_stop(mp);
1879 
1880 	/* Free the per-AG metadata reservation pool. */
1881 	error = xfs_fs_unreserve_ag_blocks(mp);
1882 	if (error) {
1883 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1884 		return error;
1885 	}
1886 
1887 	/*
1888 	 * Before we sync the metadata, we need to free up the reserve block
1889 	 * pool so that the used block count in the superblock on disk is
1890 	 * correct at the end of the remount. Stash the current* reserve pool
1891 	 * size so that if we get remounted rw, we can return it to the same
1892 	 * size.
1893 	 */
1894 	xfs_save_resvblks(mp);
1895 
1896 	xfs_log_clean(mp);
1897 	set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1898 
1899 	return 0;
1900 }
1901 
1902 /*
1903  * Logically we would return an error here to prevent users from believing
1904  * they might have changed mount options using remount which can't be changed.
1905  *
1906  * But unfortunately mount(8) adds all options from mtab and fstab to the mount
1907  * arguments in some cases so we can't blindly reject options, but have to
1908  * check for each specified option if it actually differs from the currently
1909  * set option and only reject it if that's the case.
1910  *
1911  * Until that is implemented we return success for every remount request, and
1912  * silently ignore all options that we can't actually change.
1913  */
1914 static int
1915 xfs_fs_reconfigure(
1916 	struct fs_context *fc)
1917 {
1918 	struct xfs_mount	*mp = XFS_M(fc->root->d_sb);
1919 	struct xfs_mount        *new_mp = fc->s_fs_info;
1920 	int			flags = fc->sb_flags;
1921 	int			error;
1922 
1923 	/* version 5 superblocks always support version counters. */
1924 	if (xfs_has_crc(mp))
1925 		fc->sb_flags |= SB_I_VERSION;
1926 
1927 	error = xfs_fs_validate_params(new_mp);
1928 	if (error)
1929 		return error;
1930 
1931 	/* inode32 -> inode64 */
1932 	if (xfs_has_small_inums(mp) && !xfs_has_small_inums(new_mp)) {
1933 		mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
1934 		mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
1935 	}
1936 
1937 	/* inode64 -> inode32 */
1938 	if (!xfs_has_small_inums(mp) && xfs_has_small_inums(new_mp)) {
1939 		mp->m_features |= XFS_FEAT_SMALL_INUMS;
1940 		mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
1941 	}
1942 
1943 	/* ro -> rw */
1944 	if (xfs_is_readonly(mp) && !(flags & SB_RDONLY)) {
1945 		error = xfs_remount_rw(mp);
1946 		if (error)
1947 			return error;
1948 	}
1949 
1950 	/* rw -> ro */
1951 	if (!xfs_is_readonly(mp) && (flags & SB_RDONLY)) {
1952 		error = xfs_remount_ro(mp);
1953 		if (error)
1954 			return error;
1955 	}
1956 
1957 	return 0;
1958 }
1959 
1960 static void
1961 xfs_fs_free(
1962 	struct fs_context	*fc)
1963 {
1964 	struct xfs_mount	*mp = fc->s_fs_info;
1965 
1966 	/*
1967 	 * mp is stored in the fs_context when it is initialized.
1968 	 * mp is transferred to the superblock on a successful mount,
1969 	 * but if an error occurs before the transfer we have to free
1970 	 * it here.
1971 	 */
1972 	if (mp)
1973 		xfs_mount_free(mp);
1974 }
1975 
1976 static const struct fs_context_operations xfs_context_ops = {
1977 	.parse_param = xfs_fs_parse_param,
1978 	.get_tree    = xfs_fs_get_tree,
1979 	.reconfigure = xfs_fs_reconfigure,
1980 	.free        = xfs_fs_free,
1981 };
1982 
1983 /*
1984  * WARNING: do not initialise any parameters in this function that depend on
1985  * mount option parsing having already been performed as this can be called from
1986  * fsopen() before any parameters have been set.
1987  */
1988 static int xfs_init_fs_context(
1989 	struct fs_context	*fc)
1990 {
1991 	struct xfs_mount	*mp;
1992 
1993 	mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL | __GFP_NOFAIL);
1994 	if (!mp)
1995 		return -ENOMEM;
1996 
1997 	spin_lock_init(&mp->m_sb_lock);
1998 	INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
1999 	spin_lock_init(&mp->m_perag_lock);
2000 	mutex_init(&mp->m_growlock);
2001 	INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
2002 	INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
2003 	mp->m_kobj.kobject.kset = xfs_kset;
2004 	/*
2005 	 * We don't create the finobt per-ag space reservation until after log
2006 	 * recovery, so we must set this to true so that an ifree transaction
2007 	 * started during log recovery will not depend on space reservations
2008 	 * for finobt expansion.
2009 	 */
2010 	mp->m_finobt_nores = true;
2011 
2012 	/*
2013 	 * These can be overridden by the mount option parsing.
2014 	 */
2015 	mp->m_logbufs = -1;
2016 	mp->m_logbsize = -1;
2017 	mp->m_allocsize_log = 16; /* 64k */
2018 
2019 	xfs_hooks_init(&mp->m_dir_update_hooks);
2020 
2021 	fc->s_fs_info = mp;
2022 	fc->ops = &xfs_context_ops;
2023 
2024 	return 0;
2025 }
2026 
2027 static void
2028 xfs_kill_sb(
2029 	struct super_block		*sb)
2030 {
2031 	kill_block_super(sb);
2032 	xfs_mount_free(XFS_M(sb));
2033 }
2034 
2035 static struct file_system_type xfs_fs_type = {
2036 	.owner			= THIS_MODULE,
2037 	.name			= "xfs",
2038 	.init_fs_context	= xfs_init_fs_context,
2039 	.parameters		= xfs_fs_parameters,
2040 	.kill_sb		= xfs_kill_sb,
2041 	.fs_flags		= FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
2042 };
2043 MODULE_ALIAS_FS("xfs");
2044 
2045 STATIC int __init
2046 xfs_init_caches(void)
2047 {
2048 	int		error;
2049 
2050 	xfs_buf_cache = kmem_cache_create("xfs_buf", sizeof(struct xfs_buf), 0,
2051 					 SLAB_HWCACHE_ALIGN |
2052 					 SLAB_RECLAIM_ACCOUNT,
2053 					 NULL);
2054 	if (!xfs_buf_cache)
2055 		goto out;
2056 
2057 	xfs_log_ticket_cache = kmem_cache_create("xfs_log_ticket",
2058 						sizeof(struct xlog_ticket),
2059 						0, 0, NULL);
2060 	if (!xfs_log_ticket_cache)
2061 		goto out_destroy_buf_cache;
2062 
2063 	error = xfs_btree_init_cur_caches();
2064 	if (error)
2065 		goto out_destroy_log_ticket_cache;
2066 
2067 	error = rcbagbt_init_cur_cache();
2068 	if (error)
2069 		goto out_destroy_btree_cur_cache;
2070 
2071 	error = xfs_defer_init_item_caches();
2072 	if (error)
2073 		goto out_destroy_rcbagbt_cur_cache;
2074 
2075 	xfs_da_state_cache = kmem_cache_create("xfs_da_state",
2076 					      sizeof(struct xfs_da_state),
2077 					      0, 0, NULL);
2078 	if (!xfs_da_state_cache)
2079 		goto out_destroy_defer_item_cache;
2080 
2081 	xfs_ifork_cache = kmem_cache_create("xfs_ifork",
2082 					   sizeof(struct xfs_ifork),
2083 					   0, 0, NULL);
2084 	if (!xfs_ifork_cache)
2085 		goto out_destroy_da_state_cache;
2086 
2087 	xfs_trans_cache = kmem_cache_create("xfs_trans",
2088 					   sizeof(struct xfs_trans),
2089 					   0, 0, NULL);
2090 	if (!xfs_trans_cache)
2091 		goto out_destroy_ifork_cache;
2092 
2093 
2094 	/*
2095 	 * The size of the cache-allocated buf log item is the maximum
2096 	 * size possible under XFS.  This wastes a little bit of memory,
2097 	 * but it is much faster.
2098 	 */
2099 	xfs_buf_item_cache = kmem_cache_create("xfs_buf_item",
2100 					      sizeof(struct xfs_buf_log_item),
2101 					      0, 0, NULL);
2102 	if (!xfs_buf_item_cache)
2103 		goto out_destroy_trans_cache;
2104 
2105 	xfs_efd_cache = kmem_cache_create("xfs_efd_item",
2106 			xfs_efd_log_item_sizeof(XFS_EFD_MAX_FAST_EXTENTS),
2107 			0, 0, NULL);
2108 	if (!xfs_efd_cache)
2109 		goto out_destroy_buf_item_cache;
2110 
2111 	xfs_efi_cache = kmem_cache_create("xfs_efi_item",
2112 			xfs_efi_log_item_sizeof(XFS_EFI_MAX_FAST_EXTENTS),
2113 			0, 0, NULL);
2114 	if (!xfs_efi_cache)
2115 		goto out_destroy_efd_cache;
2116 
2117 	xfs_inode_cache = kmem_cache_create("xfs_inode",
2118 					   sizeof(struct xfs_inode), 0,
2119 					   (SLAB_HWCACHE_ALIGN |
2120 					    SLAB_RECLAIM_ACCOUNT |
2121 					    SLAB_ACCOUNT),
2122 					   xfs_fs_inode_init_once);
2123 	if (!xfs_inode_cache)
2124 		goto out_destroy_efi_cache;
2125 
2126 	xfs_ili_cache = kmem_cache_create("xfs_ili",
2127 					 sizeof(struct xfs_inode_log_item), 0,
2128 					 SLAB_RECLAIM_ACCOUNT,
2129 					 NULL);
2130 	if (!xfs_ili_cache)
2131 		goto out_destroy_inode_cache;
2132 
2133 	xfs_icreate_cache = kmem_cache_create("xfs_icr",
2134 					     sizeof(struct xfs_icreate_item),
2135 					     0, 0, NULL);
2136 	if (!xfs_icreate_cache)
2137 		goto out_destroy_ili_cache;
2138 
2139 	xfs_rud_cache = kmem_cache_create("xfs_rud_item",
2140 					 sizeof(struct xfs_rud_log_item),
2141 					 0, 0, NULL);
2142 	if (!xfs_rud_cache)
2143 		goto out_destroy_icreate_cache;
2144 
2145 	xfs_rui_cache = kmem_cache_create("xfs_rui_item",
2146 			xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
2147 			0, 0, NULL);
2148 	if (!xfs_rui_cache)
2149 		goto out_destroy_rud_cache;
2150 
2151 	xfs_cud_cache = kmem_cache_create("xfs_cud_item",
2152 					 sizeof(struct xfs_cud_log_item),
2153 					 0, 0, NULL);
2154 	if (!xfs_cud_cache)
2155 		goto out_destroy_rui_cache;
2156 
2157 	xfs_cui_cache = kmem_cache_create("xfs_cui_item",
2158 			xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
2159 			0, 0, NULL);
2160 	if (!xfs_cui_cache)
2161 		goto out_destroy_cud_cache;
2162 
2163 	xfs_bud_cache = kmem_cache_create("xfs_bud_item",
2164 					 sizeof(struct xfs_bud_log_item),
2165 					 0, 0, NULL);
2166 	if (!xfs_bud_cache)
2167 		goto out_destroy_cui_cache;
2168 
2169 	xfs_bui_cache = kmem_cache_create("xfs_bui_item",
2170 			xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
2171 			0, 0, NULL);
2172 	if (!xfs_bui_cache)
2173 		goto out_destroy_bud_cache;
2174 
2175 	xfs_attrd_cache = kmem_cache_create("xfs_attrd_item",
2176 					    sizeof(struct xfs_attrd_log_item),
2177 					    0, 0, NULL);
2178 	if (!xfs_attrd_cache)
2179 		goto out_destroy_bui_cache;
2180 
2181 	xfs_attri_cache = kmem_cache_create("xfs_attri_item",
2182 					    sizeof(struct xfs_attri_log_item),
2183 					    0, 0, NULL);
2184 	if (!xfs_attri_cache)
2185 		goto out_destroy_attrd_cache;
2186 
2187 	xfs_iunlink_cache = kmem_cache_create("xfs_iul_item",
2188 					     sizeof(struct xfs_iunlink_item),
2189 					     0, 0, NULL);
2190 	if (!xfs_iunlink_cache)
2191 		goto out_destroy_attri_cache;
2192 
2193 	xfs_xmd_cache = kmem_cache_create("xfs_xmd_item",
2194 					 sizeof(struct xfs_xmd_log_item),
2195 					 0, 0, NULL);
2196 	if (!xfs_xmd_cache)
2197 		goto out_destroy_iul_cache;
2198 
2199 	xfs_xmi_cache = kmem_cache_create("xfs_xmi_item",
2200 					 sizeof(struct xfs_xmi_log_item),
2201 					 0, 0, NULL);
2202 	if (!xfs_xmi_cache)
2203 		goto out_destroy_xmd_cache;
2204 
2205 	return 0;
2206 
2207  out_destroy_xmd_cache:
2208 	kmem_cache_destroy(xfs_xmd_cache);
2209  out_destroy_iul_cache:
2210 	kmem_cache_destroy(xfs_iunlink_cache);
2211  out_destroy_attri_cache:
2212 	kmem_cache_destroy(xfs_attri_cache);
2213  out_destroy_attrd_cache:
2214 	kmem_cache_destroy(xfs_attrd_cache);
2215  out_destroy_bui_cache:
2216 	kmem_cache_destroy(xfs_bui_cache);
2217  out_destroy_bud_cache:
2218 	kmem_cache_destroy(xfs_bud_cache);
2219  out_destroy_cui_cache:
2220 	kmem_cache_destroy(xfs_cui_cache);
2221  out_destroy_cud_cache:
2222 	kmem_cache_destroy(xfs_cud_cache);
2223  out_destroy_rui_cache:
2224 	kmem_cache_destroy(xfs_rui_cache);
2225  out_destroy_rud_cache:
2226 	kmem_cache_destroy(xfs_rud_cache);
2227  out_destroy_icreate_cache:
2228 	kmem_cache_destroy(xfs_icreate_cache);
2229  out_destroy_ili_cache:
2230 	kmem_cache_destroy(xfs_ili_cache);
2231  out_destroy_inode_cache:
2232 	kmem_cache_destroy(xfs_inode_cache);
2233  out_destroy_efi_cache:
2234 	kmem_cache_destroy(xfs_efi_cache);
2235  out_destroy_efd_cache:
2236 	kmem_cache_destroy(xfs_efd_cache);
2237  out_destroy_buf_item_cache:
2238 	kmem_cache_destroy(xfs_buf_item_cache);
2239  out_destroy_trans_cache:
2240 	kmem_cache_destroy(xfs_trans_cache);
2241  out_destroy_ifork_cache:
2242 	kmem_cache_destroy(xfs_ifork_cache);
2243  out_destroy_da_state_cache:
2244 	kmem_cache_destroy(xfs_da_state_cache);
2245  out_destroy_defer_item_cache:
2246 	xfs_defer_destroy_item_caches();
2247  out_destroy_rcbagbt_cur_cache:
2248 	rcbagbt_destroy_cur_cache();
2249  out_destroy_btree_cur_cache:
2250 	xfs_btree_destroy_cur_caches();
2251  out_destroy_log_ticket_cache:
2252 	kmem_cache_destroy(xfs_log_ticket_cache);
2253  out_destroy_buf_cache:
2254 	kmem_cache_destroy(xfs_buf_cache);
2255  out:
2256 	return -ENOMEM;
2257 }
2258 
2259 STATIC void
2260 xfs_destroy_caches(void)
2261 {
2262 	/*
2263 	 * Make sure all delayed rcu free are flushed before we
2264 	 * destroy caches.
2265 	 */
2266 	rcu_barrier();
2267 	kmem_cache_destroy(xfs_xmd_cache);
2268 	kmem_cache_destroy(xfs_xmi_cache);
2269 	kmem_cache_destroy(xfs_iunlink_cache);
2270 	kmem_cache_destroy(xfs_attri_cache);
2271 	kmem_cache_destroy(xfs_attrd_cache);
2272 	kmem_cache_destroy(xfs_bui_cache);
2273 	kmem_cache_destroy(xfs_bud_cache);
2274 	kmem_cache_destroy(xfs_cui_cache);
2275 	kmem_cache_destroy(xfs_cud_cache);
2276 	kmem_cache_destroy(xfs_rui_cache);
2277 	kmem_cache_destroy(xfs_rud_cache);
2278 	kmem_cache_destroy(xfs_icreate_cache);
2279 	kmem_cache_destroy(xfs_ili_cache);
2280 	kmem_cache_destroy(xfs_inode_cache);
2281 	kmem_cache_destroy(xfs_efi_cache);
2282 	kmem_cache_destroy(xfs_efd_cache);
2283 	kmem_cache_destroy(xfs_buf_item_cache);
2284 	kmem_cache_destroy(xfs_trans_cache);
2285 	kmem_cache_destroy(xfs_ifork_cache);
2286 	kmem_cache_destroy(xfs_da_state_cache);
2287 	xfs_defer_destroy_item_caches();
2288 	rcbagbt_destroy_cur_cache();
2289 	xfs_btree_destroy_cur_caches();
2290 	kmem_cache_destroy(xfs_log_ticket_cache);
2291 	kmem_cache_destroy(xfs_buf_cache);
2292 }
2293 
2294 STATIC int __init
2295 xfs_init_workqueues(void)
2296 {
2297 	/*
2298 	 * The allocation workqueue can be used in memory reclaim situations
2299 	 * (writepage path), and parallelism is only limited by the number of
2300 	 * AGs in all the filesystems mounted. Hence use the default large
2301 	 * max_active value for this workqueue.
2302 	 */
2303 	xfs_alloc_wq = alloc_workqueue("xfsalloc",
2304 			XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0);
2305 	if (!xfs_alloc_wq)
2306 		return -ENOMEM;
2307 
2308 	xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND),
2309 			0);
2310 	if (!xfs_discard_wq)
2311 		goto out_free_alloc_wq;
2312 
2313 	return 0;
2314 out_free_alloc_wq:
2315 	destroy_workqueue(xfs_alloc_wq);
2316 	return -ENOMEM;
2317 }
2318 
2319 STATIC void
2320 xfs_destroy_workqueues(void)
2321 {
2322 	destroy_workqueue(xfs_discard_wq);
2323 	destroy_workqueue(xfs_alloc_wq);
2324 }
2325 
2326 STATIC int __init
2327 init_xfs_fs(void)
2328 {
2329 	int			error;
2330 
2331 	xfs_check_ondisk_structs();
2332 
2333 	error = xfs_dahash_test();
2334 	if (error)
2335 		return error;
2336 
2337 	printk(KERN_INFO XFS_VERSION_STRING " with "
2338 			 XFS_BUILD_OPTIONS " enabled\n");
2339 
2340 	xfs_dir_startup();
2341 
2342 	error = xfs_init_caches();
2343 	if (error)
2344 		goto out;
2345 
2346 	error = xfs_init_workqueues();
2347 	if (error)
2348 		goto out_destroy_caches;
2349 
2350 	error = xfs_mru_cache_init();
2351 	if (error)
2352 		goto out_destroy_wq;
2353 
2354 	error = xfs_init_procfs();
2355 	if (error)
2356 		goto out_mru_cache_uninit;
2357 
2358 	error = xfs_sysctl_register();
2359 	if (error)
2360 		goto out_cleanup_procfs;
2361 
2362 	xfs_debugfs = xfs_debugfs_mkdir("xfs", NULL);
2363 
2364 	xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2365 	if (!xfs_kset) {
2366 		error = -ENOMEM;
2367 		goto out_debugfs_unregister;
2368 	}
2369 
2370 	xfsstats.xs_kobj.kobject.kset = xfs_kset;
2371 
2372 	xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2373 	if (!xfsstats.xs_stats) {
2374 		error = -ENOMEM;
2375 		goto out_kset_unregister;
2376 	}
2377 
2378 	error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2379 			       "stats");
2380 	if (error)
2381 		goto out_free_stats;
2382 
2383 	error = xchk_global_stats_setup(xfs_debugfs);
2384 	if (error)
2385 		goto out_remove_stats_kobj;
2386 
2387 #ifdef DEBUG
2388 	xfs_dbg_kobj.kobject.kset = xfs_kset;
2389 	error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2390 	if (error)
2391 		goto out_remove_scrub_stats;
2392 #endif
2393 
2394 	error = xfs_qm_init();
2395 	if (error)
2396 		goto out_remove_dbg_kobj;
2397 
2398 	error = register_filesystem(&xfs_fs_type);
2399 	if (error)
2400 		goto out_qm_exit;
2401 	return 0;
2402 
2403  out_qm_exit:
2404 	xfs_qm_exit();
2405  out_remove_dbg_kobj:
2406 #ifdef DEBUG
2407 	xfs_sysfs_del(&xfs_dbg_kobj);
2408  out_remove_scrub_stats:
2409 #endif
2410 	xchk_global_stats_teardown();
2411  out_remove_stats_kobj:
2412 	xfs_sysfs_del(&xfsstats.xs_kobj);
2413  out_free_stats:
2414 	free_percpu(xfsstats.xs_stats);
2415  out_kset_unregister:
2416 	kset_unregister(xfs_kset);
2417  out_debugfs_unregister:
2418 	debugfs_remove(xfs_debugfs);
2419 	xfs_sysctl_unregister();
2420  out_cleanup_procfs:
2421 	xfs_cleanup_procfs();
2422  out_mru_cache_uninit:
2423 	xfs_mru_cache_uninit();
2424  out_destroy_wq:
2425 	xfs_destroy_workqueues();
2426  out_destroy_caches:
2427 	xfs_destroy_caches();
2428  out:
2429 	return error;
2430 }
2431 
2432 STATIC void __exit
2433 exit_xfs_fs(void)
2434 {
2435 	xfs_qm_exit();
2436 	unregister_filesystem(&xfs_fs_type);
2437 #ifdef DEBUG
2438 	xfs_sysfs_del(&xfs_dbg_kobj);
2439 #endif
2440 	xchk_global_stats_teardown();
2441 	xfs_sysfs_del(&xfsstats.xs_kobj);
2442 	free_percpu(xfsstats.xs_stats);
2443 	kset_unregister(xfs_kset);
2444 	debugfs_remove(xfs_debugfs);
2445 	xfs_sysctl_unregister();
2446 	xfs_cleanup_procfs();
2447 	xfs_mru_cache_uninit();
2448 	xfs_destroy_workqueues();
2449 	xfs_destroy_caches();
2450 	xfs_uuid_table_free();
2451 }
2452 
2453 module_init(init_xfs_fs);
2454 module_exit(exit_xfs_fs);
2455 
2456 MODULE_AUTHOR("Silicon Graphics, Inc.");
2457 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2458 MODULE_LICENSE("GPL");
2459