xref: /linux/fs/xfs/xfs_super.c (revision a6d5f9dca42eab3526e2f73aa5b7df2a5fec2c9d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 
7 #include "xfs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sb.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_btree.h"
16 #include "xfs_bmap.h"
17 #include "xfs_alloc.h"
18 #include "xfs_fsops.h"
19 #include "xfs_trans.h"
20 #include "xfs_buf_item.h"
21 #include "xfs_log.h"
22 #include "xfs_log_priv.h"
23 #include "xfs_dir2.h"
24 #include "xfs_extfree_item.h"
25 #include "xfs_mru_cache.h"
26 #include "xfs_inode_item.h"
27 #include "xfs_icache.h"
28 #include "xfs_trace.h"
29 #include "xfs_icreate_item.h"
30 #include "xfs_filestream.h"
31 #include "xfs_quota.h"
32 #include "xfs_sysfs.h"
33 #include "xfs_ondisk.h"
34 #include "xfs_rmap_item.h"
35 #include "xfs_refcount_item.h"
36 #include "xfs_bmap_item.h"
37 #include "xfs_reflink.h"
38 
39 #include <linux/magic.h>
40 #include <linux/fs_context.h>
41 #include <linux/fs_parser.h>
42 
43 static const struct super_operations xfs_super_operations;
44 
45 static struct kset *xfs_kset;		/* top-level xfs sysfs dir */
46 #ifdef DEBUG
47 static struct xfs_kobj xfs_dbg_kobj;	/* global debug sysfs attrs */
48 #endif
49 
50 /*
51  * Table driven mount option parser.
52  */
53 enum {
54 	Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
55 	Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
56 	Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
57 	Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
58 	Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
59 	Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
60 	Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
61 	Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
62 	Opt_discard, Opt_nodiscard, Opt_dax,
63 };
64 
65 static const struct fs_parameter_spec xfs_param_specs[] = {
66 	fsparam_u32("logbufs",		Opt_logbufs),
67 	fsparam_string("logbsize",	Opt_logbsize),
68 	fsparam_string("logdev",	Opt_logdev),
69 	fsparam_string("rtdev",		Opt_rtdev),
70 	fsparam_flag("wsync",		Opt_wsync),
71 	fsparam_flag("noalign",		Opt_noalign),
72 	fsparam_flag("swalloc",		Opt_swalloc),
73 	fsparam_u32("sunit",		Opt_sunit),
74 	fsparam_u32("swidth",		Opt_swidth),
75 	fsparam_flag("nouuid",		Opt_nouuid),
76 	fsparam_flag("grpid",		Opt_grpid),
77 	fsparam_flag("nogrpid",		Opt_nogrpid),
78 	fsparam_flag("bsdgroups",	Opt_bsdgroups),
79 	fsparam_flag("sysvgroups",	Opt_sysvgroups),
80 	fsparam_string("allocsize",	Opt_allocsize),
81 	fsparam_flag("norecovery",	Opt_norecovery),
82 	fsparam_flag("inode64",		Opt_inode64),
83 	fsparam_flag("inode32",		Opt_inode32),
84 	fsparam_flag("ikeep",		Opt_ikeep),
85 	fsparam_flag("noikeep",		Opt_noikeep),
86 	fsparam_flag("largeio",		Opt_largeio),
87 	fsparam_flag("nolargeio",	Opt_nolargeio),
88 	fsparam_flag("attr2",		Opt_attr2),
89 	fsparam_flag("noattr2",		Opt_noattr2),
90 	fsparam_flag("filestreams",	Opt_filestreams),
91 	fsparam_flag("quota",		Opt_quota),
92 	fsparam_flag("noquota",		Opt_noquota),
93 	fsparam_flag("usrquota",	Opt_usrquota),
94 	fsparam_flag("grpquota",	Opt_grpquota),
95 	fsparam_flag("prjquota",	Opt_prjquota),
96 	fsparam_flag("uquota",		Opt_uquota),
97 	fsparam_flag("gquota",		Opt_gquota),
98 	fsparam_flag("pquota",		Opt_pquota),
99 	fsparam_flag("uqnoenforce",	Opt_uqnoenforce),
100 	fsparam_flag("gqnoenforce",	Opt_gqnoenforce),
101 	fsparam_flag("pqnoenforce",	Opt_pqnoenforce),
102 	fsparam_flag("qnoenforce",	Opt_qnoenforce),
103 	fsparam_flag("discard",		Opt_discard),
104 	fsparam_flag("nodiscard",	Opt_nodiscard),
105 	fsparam_flag("dax",		Opt_dax),
106 	{}
107 };
108 
109 static const struct fs_parameter_description xfs_fs_parameters = {
110 	.name		= "xfs",
111 	.specs		= xfs_param_specs,
112 };
113 
114 struct proc_xfs_info {
115 	uint64_t	flag;
116 	char		*str;
117 };
118 
119 static int
120 xfs_fs_show_options(
121 	struct seq_file		*m,
122 	struct dentry		*root)
123 {
124 	static struct proc_xfs_info xfs_info_set[] = {
125 		/* the few simple ones we can get from the mount struct */
126 		{ XFS_MOUNT_IKEEP,		",ikeep" },
127 		{ XFS_MOUNT_WSYNC,		",wsync" },
128 		{ XFS_MOUNT_NOALIGN,		",noalign" },
129 		{ XFS_MOUNT_SWALLOC,		",swalloc" },
130 		{ XFS_MOUNT_NOUUID,		",nouuid" },
131 		{ XFS_MOUNT_NORECOVERY,		",norecovery" },
132 		{ XFS_MOUNT_ATTR2,		",attr2" },
133 		{ XFS_MOUNT_FILESTREAMS,	",filestreams" },
134 		{ XFS_MOUNT_GRPID,		",grpid" },
135 		{ XFS_MOUNT_DISCARD,		",discard" },
136 		{ XFS_MOUNT_LARGEIO,		",largeio" },
137 		{ XFS_MOUNT_DAX,		",dax" },
138 		{ 0, NULL }
139 	};
140 	struct xfs_mount	*mp = XFS_M(root->d_sb);
141 	struct proc_xfs_info	*xfs_infop;
142 
143 	for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
144 		if (mp->m_flags & xfs_infop->flag)
145 			seq_puts(m, xfs_infop->str);
146 	}
147 
148 	seq_printf(m, ",inode%d",
149 		(mp->m_flags & XFS_MOUNT_SMALL_INUMS) ? 32 : 64);
150 
151 	if (mp->m_flags & XFS_MOUNT_ALLOCSIZE)
152 		seq_printf(m, ",allocsize=%dk",
153 			   (1 << mp->m_allocsize_log) >> 10);
154 
155 	if (mp->m_logbufs > 0)
156 		seq_printf(m, ",logbufs=%d", mp->m_logbufs);
157 	if (mp->m_logbsize > 0)
158 		seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
159 
160 	if (mp->m_logname)
161 		seq_show_option(m, "logdev", mp->m_logname);
162 	if (mp->m_rtname)
163 		seq_show_option(m, "rtdev", mp->m_rtname);
164 
165 	if (mp->m_dalign > 0)
166 		seq_printf(m, ",sunit=%d",
167 				(int)XFS_FSB_TO_BB(mp, mp->m_dalign));
168 	if (mp->m_swidth > 0)
169 		seq_printf(m, ",swidth=%d",
170 				(int)XFS_FSB_TO_BB(mp, mp->m_swidth));
171 
172 	if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD))
173 		seq_puts(m, ",usrquota");
174 	else if (mp->m_qflags & XFS_UQUOTA_ACCT)
175 		seq_puts(m, ",uqnoenforce");
176 
177 	if (mp->m_qflags & XFS_PQUOTA_ACCT) {
178 		if (mp->m_qflags & XFS_PQUOTA_ENFD)
179 			seq_puts(m, ",prjquota");
180 		else
181 			seq_puts(m, ",pqnoenforce");
182 	}
183 	if (mp->m_qflags & XFS_GQUOTA_ACCT) {
184 		if (mp->m_qflags & XFS_GQUOTA_ENFD)
185 			seq_puts(m, ",grpquota");
186 		else
187 			seq_puts(m, ",gqnoenforce");
188 	}
189 
190 	if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
191 		seq_puts(m, ",noquota");
192 
193 	return 0;
194 }
195 
196 /*
197  * Set parameters for inode allocation heuristics, taking into account
198  * filesystem size and inode32/inode64 mount options; i.e. specifically
199  * whether or not XFS_MOUNT_SMALL_INUMS is set.
200  *
201  * Inode allocation patterns are altered only if inode32 is requested
202  * (XFS_MOUNT_SMALL_INUMS), and the filesystem is sufficiently large.
203  * If altered, XFS_MOUNT_32BITINODES is set as well.
204  *
205  * An agcount independent of that in the mount structure is provided
206  * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
207  * to the potentially higher ag count.
208  *
209  * Returns the maximum AG index which may contain inodes.
210  */
211 xfs_agnumber_t
212 xfs_set_inode_alloc(
213 	struct xfs_mount *mp,
214 	xfs_agnumber_t	agcount)
215 {
216 	xfs_agnumber_t	index;
217 	xfs_agnumber_t	maxagi = 0;
218 	xfs_sb_t	*sbp = &mp->m_sb;
219 	xfs_agnumber_t	max_metadata;
220 	xfs_agino_t	agino;
221 	xfs_ino_t	ino;
222 
223 	/*
224 	 * Calculate how much should be reserved for inodes to meet
225 	 * the max inode percentage.  Used only for inode32.
226 	 */
227 	if (M_IGEO(mp)->maxicount) {
228 		uint64_t	icount;
229 
230 		icount = sbp->sb_dblocks * sbp->sb_imax_pct;
231 		do_div(icount, 100);
232 		icount += sbp->sb_agblocks - 1;
233 		do_div(icount, sbp->sb_agblocks);
234 		max_metadata = icount;
235 	} else {
236 		max_metadata = agcount;
237 	}
238 
239 	/* Get the last possible inode in the filesystem */
240 	agino =	XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
241 	ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
242 
243 	/*
244 	 * If user asked for no more than 32-bit inodes, and the fs is
245 	 * sufficiently large, set XFS_MOUNT_32BITINODES if we must alter
246 	 * the allocator to accommodate the request.
247 	 */
248 	if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32)
249 		mp->m_flags |= XFS_MOUNT_32BITINODES;
250 	else
251 		mp->m_flags &= ~XFS_MOUNT_32BITINODES;
252 
253 	for (index = 0; index < agcount; index++) {
254 		struct xfs_perag	*pag;
255 
256 		ino = XFS_AGINO_TO_INO(mp, index, agino);
257 
258 		pag = xfs_perag_get(mp, index);
259 
260 		if (mp->m_flags & XFS_MOUNT_32BITINODES) {
261 			if (ino > XFS_MAXINUMBER_32) {
262 				pag->pagi_inodeok = 0;
263 				pag->pagf_metadata = 0;
264 			} else {
265 				pag->pagi_inodeok = 1;
266 				maxagi++;
267 				if (index < max_metadata)
268 					pag->pagf_metadata = 1;
269 				else
270 					pag->pagf_metadata = 0;
271 			}
272 		} else {
273 			pag->pagi_inodeok = 1;
274 			pag->pagf_metadata = 0;
275 		}
276 
277 		xfs_perag_put(pag);
278 	}
279 
280 	return (mp->m_flags & XFS_MOUNT_32BITINODES) ? maxagi : agcount;
281 }
282 
283 STATIC int
284 xfs_blkdev_get(
285 	xfs_mount_t		*mp,
286 	const char		*name,
287 	struct block_device	**bdevp)
288 {
289 	int			error = 0;
290 
291 	*bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
292 				    mp);
293 	if (IS_ERR(*bdevp)) {
294 		error = PTR_ERR(*bdevp);
295 		xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
296 	}
297 
298 	return error;
299 }
300 
301 STATIC void
302 xfs_blkdev_put(
303 	struct block_device	*bdev)
304 {
305 	if (bdev)
306 		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
307 }
308 
309 void
310 xfs_blkdev_issue_flush(
311 	xfs_buftarg_t		*buftarg)
312 {
313 	blkdev_issue_flush(buftarg->bt_bdev, GFP_NOFS, NULL);
314 }
315 
316 STATIC void
317 xfs_close_devices(
318 	struct xfs_mount	*mp)
319 {
320 	struct dax_device *dax_ddev = mp->m_ddev_targp->bt_daxdev;
321 
322 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
323 		struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
324 		struct dax_device *dax_logdev = mp->m_logdev_targp->bt_daxdev;
325 
326 		xfs_free_buftarg(mp->m_logdev_targp);
327 		xfs_blkdev_put(logdev);
328 		fs_put_dax(dax_logdev);
329 	}
330 	if (mp->m_rtdev_targp) {
331 		struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
332 		struct dax_device *dax_rtdev = mp->m_rtdev_targp->bt_daxdev;
333 
334 		xfs_free_buftarg(mp->m_rtdev_targp);
335 		xfs_blkdev_put(rtdev);
336 		fs_put_dax(dax_rtdev);
337 	}
338 	xfs_free_buftarg(mp->m_ddev_targp);
339 	fs_put_dax(dax_ddev);
340 }
341 
342 /*
343  * The file system configurations are:
344  *	(1) device (partition) with data and internal log
345  *	(2) logical volume with data and log subvolumes.
346  *	(3) logical volume with data, log, and realtime subvolumes.
347  *
348  * We only have to handle opening the log and realtime volumes here if
349  * they are present.  The data subvolume has already been opened by
350  * get_sb_bdev() and is stored in sb->s_bdev.
351  */
352 STATIC int
353 xfs_open_devices(
354 	struct xfs_mount	*mp)
355 {
356 	struct block_device	*ddev = mp->m_super->s_bdev;
357 	struct dax_device	*dax_ddev = fs_dax_get_by_bdev(ddev);
358 	struct dax_device	*dax_logdev = NULL, *dax_rtdev = NULL;
359 	struct block_device	*logdev = NULL, *rtdev = NULL;
360 	int			error;
361 
362 	/*
363 	 * Open real time and log devices - order is important.
364 	 */
365 	if (mp->m_logname) {
366 		error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
367 		if (error)
368 			goto out;
369 		dax_logdev = fs_dax_get_by_bdev(logdev);
370 	}
371 
372 	if (mp->m_rtname) {
373 		error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
374 		if (error)
375 			goto out_close_logdev;
376 
377 		if (rtdev == ddev || rtdev == logdev) {
378 			xfs_warn(mp,
379 	"Cannot mount filesystem with identical rtdev and ddev/logdev.");
380 			error = -EINVAL;
381 			goto out_close_rtdev;
382 		}
383 		dax_rtdev = fs_dax_get_by_bdev(rtdev);
384 	}
385 
386 	/*
387 	 * Setup xfs_mount buffer target pointers
388 	 */
389 	error = -ENOMEM;
390 	mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, dax_ddev);
391 	if (!mp->m_ddev_targp)
392 		goto out_close_rtdev;
393 
394 	if (rtdev) {
395 		mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, dax_rtdev);
396 		if (!mp->m_rtdev_targp)
397 			goto out_free_ddev_targ;
398 	}
399 
400 	if (logdev && logdev != ddev) {
401 		mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, dax_logdev);
402 		if (!mp->m_logdev_targp)
403 			goto out_free_rtdev_targ;
404 	} else {
405 		mp->m_logdev_targp = mp->m_ddev_targp;
406 	}
407 
408 	return 0;
409 
410  out_free_rtdev_targ:
411 	if (mp->m_rtdev_targp)
412 		xfs_free_buftarg(mp->m_rtdev_targp);
413  out_free_ddev_targ:
414 	xfs_free_buftarg(mp->m_ddev_targp);
415  out_close_rtdev:
416 	xfs_blkdev_put(rtdev);
417 	fs_put_dax(dax_rtdev);
418  out_close_logdev:
419 	if (logdev && logdev != ddev) {
420 		xfs_blkdev_put(logdev);
421 		fs_put_dax(dax_logdev);
422 	}
423  out:
424 	fs_put_dax(dax_ddev);
425 	return error;
426 }
427 
428 /*
429  * Setup xfs_mount buffer target pointers based on superblock
430  */
431 STATIC int
432 xfs_setup_devices(
433 	struct xfs_mount	*mp)
434 {
435 	int			error;
436 
437 	error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
438 	if (error)
439 		return error;
440 
441 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
442 		unsigned int	log_sector_size = BBSIZE;
443 
444 		if (xfs_sb_version_hassector(&mp->m_sb))
445 			log_sector_size = mp->m_sb.sb_logsectsize;
446 		error = xfs_setsize_buftarg(mp->m_logdev_targp,
447 					    log_sector_size);
448 		if (error)
449 			return error;
450 	}
451 	if (mp->m_rtdev_targp) {
452 		error = xfs_setsize_buftarg(mp->m_rtdev_targp,
453 					    mp->m_sb.sb_sectsize);
454 		if (error)
455 			return error;
456 	}
457 
458 	return 0;
459 }
460 
461 STATIC int
462 xfs_init_mount_workqueues(
463 	struct xfs_mount	*mp)
464 {
465 	mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
466 			WQ_MEM_RECLAIM|WQ_FREEZABLE, 1, mp->m_super->s_id);
467 	if (!mp->m_buf_workqueue)
468 		goto out;
469 
470 	mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
471 			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
472 	if (!mp->m_unwritten_workqueue)
473 		goto out_destroy_buf;
474 
475 	mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s",
476 			WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND,
477 			0, mp->m_super->s_id);
478 	if (!mp->m_cil_workqueue)
479 		goto out_destroy_unwritten;
480 
481 	mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
482 			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
483 	if (!mp->m_reclaim_workqueue)
484 		goto out_destroy_cil;
485 
486 	mp->m_eofblocks_workqueue = alloc_workqueue("xfs-eofblocks/%s",
487 			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
488 	if (!mp->m_eofblocks_workqueue)
489 		goto out_destroy_reclaim;
490 
491 	mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", WQ_FREEZABLE, 0,
492 					       mp->m_super->s_id);
493 	if (!mp->m_sync_workqueue)
494 		goto out_destroy_eofb;
495 
496 	return 0;
497 
498 out_destroy_eofb:
499 	destroy_workqueue(mp->m_eofblocks_workqueue);
500 out_destroy_reclaim:
501 	destroy_workqueue(mp->m_reclaim_workqueue);
502 out_destroy_cil:
503 	destroy_workqueue(mp->m_cil_workqueue);
504 out_destroy_unwritten:
505 	destroy_workqueue(mp->m_unwritten_workqueue);
506 out_destroy_buf:
507 	destroy_workqueue(mp->m_buf_workqueue);
508 out:
509 	return -ENOMEM;
510 }
511 
512 STATIC void
513 xfs_destroy_mount_workqueues(
514 	struct xfs_mount	*mp)
515 {
516 	destroy_workqueue(mp->m_sync_workqueue);
517 	destroy_workqueue(mp->m_eofblocks_workqueue);
518 	destroy_workqueue(mp->m_reclaim_workqueue);
519 	destroy_workqueue(mp->m_cil_workqueue);
520 	destroy_workqueue(mp->m_unwritten_workqueue);
521 	destroy_workqueue(mp->m_buf_workqueue);
522 }
523 
524 /*
525  * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
526  * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
527  * for IO to complete so that we effectively throttle multiple callers to the
528  * rate at which IO is completing.
529  */
530 void
531 xfs_flush_inodes(
532 	struct xfs_mount	*mp)
533 {
534 	struct super_block	*sb = mp->m_super;
535 
536 	if (down_read_trylock(&sb->s_umount)) {
537 		sync_inodes_sb(sb);
538 		up_read(&sb->s_umount);
539 	}
540 }
541 
542 /* Catch misguided souls that try to use this interface on XFS */
543 STATIC struct inode *
544 xfs_fs_alloc_inode(
545 	struct super_block	*sb)
546 {
547 	BUG();
548 	return NULL;
549 }
550 
551 #ifdef DEBUG
552 static void
553 xfs_check_delalloc(
554 	struct xfs_inode	*ip,
555 	int			whichfork)
556 {
557 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
558 	struct xfs_bmbt_irec	got;
559 	struct xfs_iext_cursor	icur;
560 
561 	if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
562 		return;
563 	do {
564 		if (isnullstartblock(got.br_startblock)) {
565 			xfs_warn(ip->i_mount,
566 	"ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
567 				ip->i_ino,
568 				whichfork == XFS_DATA_FORK ? "data" : "cow",
569 				got.br_startoff, got.br_blockcount);
570 		}
571 	} while (xfs_iext_next_extent(ifp, &icur, &got));
572 }
573 #else
574 #define xfs_check_delalloc(ip, whichfork)	do { } while (0)
575 #endif
576 
577 /*
578  * Now that the generic code is guaranteed not to be accessing
579  * the linux inode, we can inactivate and reclaim the inode.
580  */
581 STATIC void
582 xfs_fs_destroy_inode(
583 	struct inode		*inode)
584 {
585 	struct xfs_inode	*ip = XFS_I(inode);
586 
587 	trace_xfs_destroy_inode(ip);
588 
589 	ASSERT(!rwsem_is_locked(&inode->i_rwsem));
590 	XFS_STATS_INC(ip->i_mount, vn_rele);
591 	XFS_STATS_INC(ip->i_mount, vn_remove);
592 
593 	xfs_inactive(ip);
594 
595 	if (!XFS_FORCED_SHUTDOWN(ip->i_mount) && ip->i_delayed_blks) {
596 		xfs_check_delalloc(ip, XFS_DATA_FORK);
597 		xfs_check_delalloc(ip, XFS_COW_FORK);
598 		ASSERT(0);
599 	}
600 
601 	XFS_STATS_INC(ip->i_mount, vn_reclaim);
602 
603 	/*
604 	 * We should never get here with one of the reclaim flags already set.
605 	 */
606 	ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
607 	ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
608 
609 	/*
610 	 * We always use background reclaim here because even if the
611 	 * inode is clean, it still may be under IO and hence we have
612 	 * to take the flush lock. The background reclaim path handles
613 	 * this more efficiently than we can here, so simply let background
614 	 * reclaim tear down all inodes.
615 	 */
616 	xfs_inode_set_reclaim_tag(ip);
617 }
618 
619 static void
620 xfs_fs_dirty_inode(
621 	struct inode			*inode,
622 	int				flag)
623 {
624 	struct xfs_inode		*ip = XFS_I(inode);
625 	struct xfs_mount		*mp = ip->i_mount;
626 	struct xfs_trans		*tp;
627 
628 	if (!(inode->i_sb->s_flags & SB_LAZYTIME))
629 		return;
630 	if (flag != I_DIRTY_SYNC || !(inode->i_state & I_DIRTY_TIME))
631 		return;
632 
633 	if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
634 		return;
635 	xfs_ilock(ip, XFS_ILOCK_EXCL);
636 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
637 	xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
638 	xfs_trans_commit(tp);
639 }
640 
641 /*
642  * Slab object creation initialisation for the XFS inode.
643  * This covers only the idempotent fields in the XFS inode;
644  * all other fields need to be initialised on allocation
645  * from the slab. This avoids the need to repeatedly initialise
646  * fields in the xfs inode that left in the initialise state
647  * when freeing the inode.
648  */
649 STATIC void
650 xfs_fs_inode_init_once(
651 	void			*inode)
652 {
653 	struct xfs_inode	*ip = inode;
654 
655 	memset(ip, 0, sizeof(struct xfs_inode));
656 
657 	/* vfs inode */
658 	inode_init_once(VFS_I(ip));
659 
660 	/* xfs inode */
661 	atomic_set(&ip->i_pincount, 0);
662 	spin_lock_init(&ip->i_flags_lock);
663 
664 	mrlock_init(&ip->i_mmaplock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
665 		     "xfsino", ip->i_ino);
666 	mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
667 		     "xfsino", ip->i_ino);
668 }
669 
670 /*
671  * We do an unlocked check for XFS_IDONTCACHE here because we are already
672  * serialised against cache hits here via the inode->i_lock and igrab() in
673  * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
674  * racing with us, and it avoids needing to grab a spinlock here for every inode
675  * we drop the final reference on.
676  */
677 STATIC int
678 xfs_fs_drop_inode(
679 	struct inode		*inode)
680 {
681 	struct xfs_inode	*ip = XFS_I(inode);
682 
683 	/*
684 	 * If this unlinked inode is in the middle of recovery, don't
685 	 * drop the inode just yet; log recovery will take care of
686 	 * that.  See the comment for this inode flag.
687 	 */
688 	if (ip->i_flags & XFS_IRECOVERY) {
689 		ASSERT(ip->i_mount->m_log->l_flags & XLOG_RECOVERY_NEEDED);
690 		return 0;
691 	}
692 
693 	return generic_drop_inode(inode) || (ip->i_flags & XFS_IDONTCACHE);
694 }
695 
696 static void
697 xfs_mount_free(
698 	struct xfs_mount	*mp)
699 {
700 	kfree(mp->m_rtname);
701 	kfree(mp->m_logname);
702 	kmem_free(mp);
703 }
704 
705 STATIC int
706 xfs_fs_sync_fs(
707 	struct super_block	*sb,
708 	int			wait)
709 {
710 	struct xfs_mount	*mp = XFS_M(sb);
711 
712 	/*
713 	 * Doing anything during the async pass would be counterproductive.
714 	 */
715 	if (!wait)
716 		return 0;
717 
718 	xfs_log_force(mp, XFS_LOG_SYNC);
719 	if (laptop_mode) {
720 		/*
721 		 * The disk must be active because we're syncing.
722 		 * We schedule log work now (now that the disk is
723 		 * active) instead of later (when it might not be).
724 		 */
725 		flush_delayed_work(&mp->m_log->l_work);
726 	}
727 
728 	return 0;
729 }
730 
731 STATIC int
732 xfs_fs_statfs(
733 	struct dentry		*dentry,
734 	struct kstatfs		*statp)
735 {
736 	struct xfs_mount	*mp = XFS_M(dentry->d_sb);
737 	xfs_sb_t		*sbp = &mp->m_sb;
738 	struct xfs_inode	*ip = XFS_I(d_inode(dentry));
739 	uint64_t		fakeinos, id;
740 	uint64_t		icount;
741 	uint64_t		ifree;
742 	uint64_t		fdblocks;
743 	xfs_extlen_t		lsize;
744 	int64_t			ffree;
745 
746 	statp->f_type = XFS_SUPER_MAGIC;
747 	statp->f_namelen = MAXNAMELEN - 1;
748 
749 	id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
750 	statp->f_fsid.val[0] = (u32)id;
751 	statp->f_fsid.val[1] = (u32)(id >> 32);
752 
753 	icount = percpu_counter_sum(&mp->m_icount);
754 	ifree = percpu_counter_sum(&mp->m_ifree);
755 	fdblocks = percpu_counter_sum(&mp->m_fdblocks);
756 
757 	spin_lock(&mp->m_sb_lock);
758 	statp->f_bsize = sbp->sb_blocksize;
759 	lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
760 	statp->f_blocks = sbp->sb_dblocks - lsize;
761 	spin_unlock(&mp->m_sb_lock);
762 
763 	statp->f_bfree = fdblocks - mp->m_alloc_set_aside;
764 	statp->f_bavail = statp->f_bfree;
765 
766 	fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
767 	statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
768 	if (M_IGEO(mp)->maxicount)
769 		statp->f_files = min_t(typeof(statp->f_files),
770 					statp->f_files,
771 					M_IGEO(mp)->maxicount);
772 
773 	/* If sb_icount overshot maxicount, report actual allocation */
774 	statp->f_files = max_t(typeof(statp->f_files),
775 					statp->f_files,
776 					sbp->sb_icount);
777 
778 	/* make sure statp->f_ffree does not underflow */
779 	ffree = statp->f_files - (icount - ifree);
780 	statp->f_ffree = max_t(int64_t, ffree, 0);
781 
782 
783 	if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
784 	    ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
785 			      (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
786 		xfs_qm_statvfs(ip, statp);
787 
788 	if (XFS_IS_REALTIME_MOUNT(mp) &&
789 	    (ip->i_d.di_flags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
790 		statp->f_blocks = sbp->sb_rblocks;
791 		statp->f_bavail = statp->f_bfree =
792 			sbp->sb_frextents * sbp->sb_rextsize;
793 	}
794 
795 	return 0;
796 }
797 
798 STATIC void
799 xfs_save_resvblks(struct xfs_mount *mp)
800 {
801 	uint64_t resblks = 0;
802 
803 	mp->m_resblks_save = mp->m_resblks;
804 	xfs_reserve_blocks(mp, &resblks, NULL);
805 }
806 
807 STATIC void
808 xfs_restore_resvblks(struct xfs_mount *mp)
809 {
810 	uint64_t resblks;
811 
812 	if (mp->m_resblks_save) {
813 		resblks = mp->m_resblks_save;
814 		mp->m_resblks_save = 0;
815 	} else
816 		resblks = xfs_default_resblks(mp);
817 
818 	xfs_reserve_blocks(mp, &resblks, NULL);
819 }
820 
821 /*
822  * Trigger writeback of all the dirty metadata in the file system.
823  *
824  * This ensures that the metadata is written to their location on disk rather
825  * than just existing in transactions in the log. This means after a quiesce
826  * there is no log replay required to write the inodes to disk - this is the
827  * primary difference between a sync and a quiesce.
828  *
829  * Note: xfs_log_quiesce() stops background log work - the callers must ensure
830  * it is started again when appropriate.
831  */
832 void
833 xfs_quiesce_attr(
834 	struct xfs_mount	*mp)
835 {
836 	int	error = 0;
837 
838 	/* wait for all modifications to complete */
839 	while (atomic_read(&mp->m_active_trans) > 0)
840 		delay(100);
841 
842 	/* force the log to unpin objects from the now complete transactions */
843 	xfs_log_force(mp, XFS_LOG_SYNC);
844 
845 	/* reclaim inodes to do any IO before the freeze completes */
846 	xfs_reclaim_inodes(mp, 0);
847 	xfs_reclaim_inodes(mp, SYNC_WAIT);
848 
849 	/* Push the superblock and write an unmount record */
850 	error = xfs_log_sbcount(mp);
851 	if (error)
852 		xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. "
853 				"Frozen image may not be consistent.");
854 	/*
855 	 * Just warn here till VFS can correctly support
856 	 * read-only remount without racing.
857 	 */
858 	WARN_ON(atomic_read(&mp->m_active_trans) != 0);
859 
860 	xfs_log_quiesce(mp);
861 }
862 
863 /*
864  * Second stage of a freeze. The data is already frozen so we only
865  * need to take care of the metadata. Once that's done sync the superblock
866  * to the log to dirty it in case of a crash while frozen. This ensures that we
867  * will recover the unlinked inode lists on the next mount.
868  */
869 STATIC int
870 xfs_fs_freeze(
871 	struct super_block	*sb)
872 {
873 	struct xfs_mount	*mp = XFS_M(sb);
874 
875 	xfs_stop_block_reaping(mp);
876 	xfs_save_resvblks(mp);
877 	xfs_quiesce_attr(mp);
878 	return xfs_sync_sb(mp, true);
879 }
880 
881 STATIC int
882 xfs_fs_unfreeze(
883 	struct super_block	*sb)
884 {
885 	struct xfs_mount	*mp = XFS_M(sb);
886 
887 	xfs_restore_resvblks(mp);
888 	xfs_log_work_queue(mp);
889 	xfs_start_block_reaping(mp);
890 	return 0;
891 }
892 
893 /*
894  * This function fills in xfs_mount_t fields based on mount args.
895  * Note: the superblock _has_ now been read in.
896  */
897 STATIC int
898 xfs_finish_flags(
899 	struct xfs_mount	*mp)
900 {
901 	int			ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
902 
903 	/* Fail a mount where the logbuf is smaller than the log stripe */
904 	if (xfs_sb_version_haslogv2(&mp->m_sb)) {
905 		if (mp->m_logbsize <= 0 &&
906 		    mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
907 			mp->m_logbsize = mp->m_sb.sb_logsunit;
908 		} else if (mp->m_logbsize > 0 &&
909 			   mp->m_logbsize < mp->m_sb.sb_logsunit) {
910 			xfs_warn(mp,
911 		"logbuf size must be greater than or equal to log stripe size");
912 			return -EINVAL;
913 		}
914 	} else {
915 		/* Fail a mount if the logbuf is larger than 32K */
916 		if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
917 			xfs_warn(mp,
918 		"logbuf size for version 1 logs must be 16K or 32K");
919 			return -EINVAL;
920 		}
921 	}
922 
923 	/*
924 	 * V5 filesystems always use attr2 format for attributes.
925 	 */
926 	if (xfs_sb_version_hascrc(&mp->m_sb) &&
927 	    (mp->m_flags & XFS_MOUNT_NOATTR2)) {
928 		xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
929 			     "attr2 is always enabled for V5 filesystems.");
930 		return -EINVAL;
931 	}
932 
933 	/*
934 	 * mkfs'ed attr2 will turn on attr2 mount unless explicitly
935 	 * told by noattr2 to turn it off
936 	 */
937 	if (xfs_sb_version_hasattr2(&mp->m_sb) &&
938 	    !(mp->m_flags & XFS_MOUNT_NOATTR2))
939 		mp->m_flags |= XFS_MOUNT_ATTR2;
940 
941 	/*
942 	 * prohibit r/w mounts of read-only filesystems
943 	 */
944 	if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
945 		xfs_warn(mp,
946 			"cannot mount a read-only filesystem as read-write");
947 		return -EROFS;
948 	}
949 
950 	if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
951 	    (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE)) &&
952 	    !xfs_sb_version_has_pquotino(&mp->m_sb)) {
953 		xfs_warn(mp,
954 		  "Super block does not support project and group quota together");
955 		return -EINVAL;
956 	}
957 
958 	return 0;
959 }
960 
961 static int
962 xfs_init_percpu_counters(
963 	struct xfs_mount	*mp)
964 {
965 	int		error;
966 
967 	error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
968 	if (error)
969 		return -ENOMEM;
970 
971 	error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
972 	if (error)
973 		goto free_icount;
974 
975 	error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
976 	if (error)
977 		goto free_ifree;
978 
979 	error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
980 	if (error)
981 		goto free_fdblocks;
982 
983 	return 0;
984 
985 free_fdblocks:
986 	percpu_counter_destroy(&mp->m_fdblocks);
987 free_ifree:
988 	percpu_counter_destroy(&mp->m_ifree);
989 free_icount:
990 	percpu_counter_destroy(&mp->m_icount);
991 	return -ENOMEM;
992 }
993 
994 void
995 xfs_reinit_percpu_counters(
996 	struct xfs_mount	*mp)
997 {
998 	percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
999 	percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1000 	percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1001 }
1002 
1003 static void
1004 xfs_destroy_percpu_counters(
1005 	struct xfs_mount	*mp)
1006 {
1007 	percpu_counter_destroy(&mp->m_icount);
1008 	percpu_counter_destroy(&mp->m_ifree);
1009 	percpu_counter_destroy(&mp->m_fdblocks);
1010 	ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
1011 	       percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1012 	percpu_counter_destroy(&mp->m_delalloc_blks);
1013 }
1014 
1015 static void
1016 xfs_fs_put_super(
1017 	struct super_block	*sb)
1018 {
1019 	struct xfs_mount	*mp = XFS_M(sb);
1020 
1021 	/* if ->fill_super failed, we have no mount to tear down */
1022 	if (!sb->s_fs_info)
1023 		return;
1024 
1025 	xfs_notice(mp, "Unmounting Filesystem");
1026 	xfs_filestream_unmount(mp);
1027 	xfs_unmountfs(mp);
1028 
1029 	xfs_freesb(mp);
1030 	free_percpu(mp->m_stats.xs_stats);
1031 	xfs_destroy_percpu_counters(mp);
1032 	xfs_destroy_mount_workqueues(mp);
1033 	xfs_close_devices(mp);
1034 
1035 	sb->s_fs_info = NULL;
1036 	xfs_mount_free(mp);
1037 }
1038 
1039 static long
1040 xfs_fs_nr_cached_objects(
1041 	struct super_block	*sb,
1042 	struct shrink_control	*sc)
1043 {
1044 	/* Paranoia: catch incorrect calls during mount setup or teardown */
1045 	if (WARN_ON_ONCE(!sb->s_fs_info))
1046 		return 0;
1047 	return xfs_reclaim_inodes_count(XFS_M(sb));
1048 }
1049 
1050 static long
1051 xfs_fs_free_cached_objects(
1052 	struct super_block	*sb,
1053 	struct shrink_control	*sc)
1054 {
1055 	return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1056 }
1057 
1058 static const struct super_operations xfs_super_operations = {
1059 	.alloc_inode		= xfs_fs_alloc_inode,
1060 	.destroy_inode		= xfs_fs_destroy_inode,
1061 	.dirty_inode		= xfs_fs_dirty_inode,
1062 	.drop_inode		= xfs_fs_drop_inode,
1063 	.put_super		= xfs_fs_put_super,
1064 	.sync_fs		= xfs_fs_sync_fs,
1065 	.freeze_fs		= xfs_fs_freeze,
1066 	.unfreeze_fs		= xfs_fs_unfreeze,
1067 	.statfs			= xfs_fs_statfs,
1068 	.show_options		= xfs_fs_show_options,
1069 	.nr_cached_objects	= xfs_fs_nr_cached_objects,
1070 	.free_cached_objects	= xfs_fs_free_cached_objects,
1071 };
1072 
1073 static int
1074 suffix_kstrtoint(
1075 	const char	*s,
1076 	unsigned int	base,
1077 	int		*res)
1078 {
1079 	int		last, shift_left_factor = 0, _res;
1080 	char		*value;
1081 	int		ret = 0;
1082 
1083 	value = kstrdup(s, GFP_KERNEL);
1084 	if (!value)
1085 		return -ENOMEM;
1086 
1087 	last = strlen(value) - 1;
1088 	if (value[last] == 'K' || value[last] == 'k') {
1089 		shift_left_factor = 10;
1090 		value[last] = '\0';
1091 	}
1092 	if (value[last] == 'M' || value[last] == 'm') {
1093 		shift_left_factor = 20;
1094 		value[last] = '\0';
1095 	}
1096 	if (value[last] == 'G' || value[last] == 'g') {
1097 		shift_left_factor = 30;
1098 		value[last] = '\0';
1099 	}
1100 
1101 	if (kstrtoint(value, base, &_res))
1102 		ret = -EINVAL;
1103 	kfree(value);
1104 	*res = _res << shift_left_factor;
1105 	return ret;
1106 }
1107 
1108 /*
1109  * Set mount state from a mount option.
1110  *
1111  * NOTE: mp->m_super is NULL here!
1112  */
1113 static int
1114 xfs_fc_parse_param(
1115 	struct fs_context	*fc,
1116 	struct fs_parameter	*param)
1117 {
1118 	struct xfs_mount	*mp = fc->s_fs_info;
1119 	struct fs_parse_result	result;
1120 	int			size = 0;
1121 	int			opt;
1122 
1123 	opt = fs_parse(fc, &xfs_fs_parameters, param, &result);
1124 	if (opt < 0)
1125 		return opt;
1126 
1127 	switch (opt) {
1128 	case Opt_logbufs:
1129 		mp->m_logbufs = result.uint_32;
1130 		return 0;
1131 	case Opt_logbsize:
1132 		if (suffix_kstrtoint(param->string, 10, &mp->m_logbsize))
1133 			return -EINVAL;
1134 		return 0;
1135 	case Opt_logdev:
1136 		kfree(mp->m_logname);
1137 		mp->m_logname = kstrdup(param->string, GFP_KERNEL);
1138 		if (!mp->m_logname)
1139 			return -ENOMEM;
1140 		return 0;
1141 	case Opt_rtdev:
1142 		kfree(mp->m_rtname);
1143 		mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
1144 		if (!mp->m_rtname)
1145 			return -ENOMEM;
1146 		return 0;
1147 	case Opt_allocsize:
1148 		if (suffix_kstrtoint(param->string, 10, &size))
1149 			return -EINVAL;
1150 		mp->m_allocsize_log = ffs(size) - 1;
1151 		mp->m_flags |= XFS_MOUNT_ALLOCSIZE;
1152 		return 0;
1153 	case Opt_grpid:
1154 	case Opt_bsdgroups:
1155 		mp->m_flags |= XFS_MOUNT_GRPID;
1156 		return 0;
1157 	case Opt_nogrpid:
1158 	case Opt_sysvgroups:
1159 		mp->m_flags &= ~XFS_MOUNT_GRPID;
1160 		return 0;
1161 	case Opt_wsync:
1162 		mp->m_flags |= XFS_MOUNT_WSYNC;
1163 		return 0;
1164 	case Opt_norecovery:
1165 		mp->m_flags |= XFS_MOUNT_NORECOVERY;
1166 		return 0;
1167 	case Opt_noalign:
1168 		mp->m_flags |= XFS_MOUNT_NOALIGN;
1169 		return 0;
1170 	case Opt_swalloc:
1171 		mp->m_flags |= XFS_MOUNT_SWALLOC;
1172 		return 0;
1173 	case Opt_sunit:
1174 		mp->m_dalign = result.uint_32;
1175 		return 0;
1176 	case Opt_swidth:
1177 		mp->m_swidth = result.uint_32;
1178 		return 0;
1179 	case Opt_inode32:
1180 		mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
1181 		return 0;
1182 	case Opt_inode64:
1183 		mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
1184 		return 0;
1185 	case Opt_nouuid:
1186 		mp->m_flags |= XFS_MOUNT_NOUUID;
1187 		return 0;
1188 	case Opt_ikeep:
1189 		mp->m_flags |= XFS_MOUNT_IKEEP;
1190 		return 0;
1191 	case Opt_noikeep:
1192 		mp->m_flags &= ~XFS_MOUNT_IKEEP;
1193 		return 0;
1194 	case Opt_largeio:
1195 		mp->m_flags |= XFS_MOUNT_LARGEIO;
1196 		return 0;
1197 	case Opt_nolargeio:
1198 		mp->m_flags &= ~XFS_MOUNT_LARGEIO;
1199 		return 0;
1200 	case Opt_attr2:
1201 		mp->m_flags |= XFS_MOUNT_ATTR2;
1202 		return 0;
1203 	case Opt_noattr2:
1204 		mp->m_flags &= ~XFS_MOUNT_ATTR2;
1205 		mp->m_flags |= XFS_MOUNT_NOATTR2;
1206 		return 0;
1207 	case Opt_filestreams:
1208 		mp->m_flags |= XFS_MOUNT_FILESTREAMS;
1209 		return 0;
1210 	case Opt_noquota:
1211 		mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
1212 		mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
1213 		mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE;
1214 		return 0;
1215 	case Opt_quota:
1216 	case Opt_uquota:
1217 	case Opt_usrquota:
1218 		mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
1219 				 XFS_UQUOTA_ENFD);
1220 		return 0;
1221 	case Opt_qnoenforce:
1222 	case Opt_uqnoenforce:
1223 		mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
1224 		mp->m_qflags &= ~XFS_UQUOTA_ENFD;
1225 		return 0;
1226 	case Opt_pquota:
1227 	case Opt_prjquota:
1228 		mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
1229 				 XFS_PQUOTA_ENFD);
1230 		return 0;
1231 	case Opt_pqnoenforce:
1232 		mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
1233 		mp->m_qflags &= ~XFS_PQUOTA_ENFD;
1234 		return 0;
1235 	case Opt_gquota:
1236 	case Opt_grpquota:
1237 		mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
1238 				 XFS_GQUOTA_ENFD);
1239 		return 0;
1240 	case Opt_gqnoenforce:
1241 		mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
1242 		mp->m_qflags &= ~XFS_GQUOTA_ENFD;
1243 		return 0;
1244 	case Opt_discard:
1245 		mp->m_flags |= XFS_MOUNT_DISCARD;
1246 		return 0;
1247 	case Opt_nodiscard:
1248 		mp->m_flags &= ~XFS_MOUNT_DISCARD;
1249 		return 0;
1250 #ifdef CONFIG_FS_DAX
1251 	case Opt_dax:
1252 		mp->m_flags |= XFS_MOUNT_DAX;
1253 		return 0;
1254 #endif
1255 	default:
1256 		xfs_warn(mp, "unknown mount option [%s].", param->key);
1257 		return -EINVAL;
1258 	}
1259 
1260 	return 0;
1261 }
1262 
1263 static int
1264 xfs_fc_validate_params(
1265 	struct xfs_mount	*mp)
1266 {
1267 	/*
1268 	 * no recovery flag requires a read-only mount
1269 	 */
1270 	if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
1271 	    !(mp->m_flags & XFS_MOUNT_RDONLY)) {
1272 		xfs_warn(mp, "no-recovery mounts must be read-only.");
1273 		return -EINVAL;
1274 	}
1275 
1276 	if ((mp->m_flags & XFS_MOUNT_NOALIGN) &&
1277 	    (mp->m_dalign || mp->m_swidth)) {
1278 		xfs_warn(mp,
1279 	"sunit and swidth options incompatible with the noalign option");
1280 		return -EINVAL;
1281 	}
1282 
1283 	if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) {
1284 		xfs_warn(mp, "quota support not available in this kernel.");
1285 		return -EINVAL;
1286 	}
1287 
1288 	if ((mp->m_dalign && !mp->m_swidth) ||
1289 	    (!mp->m_dalign && mp->m_swidth)) {
1290 		xfs_warn(mp, "sunit and swidth must be specified together");
1291 		return -EINVAL;
1292 	}
1293 
1294 	if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
1295 		xfs_warn(mp,
1296 	"stripe width (%d) must be a multiple of the stripe unit (%d)",
1297 			mp->m_swidth, mp->m_dalign);
1298 		return -EINVAL;
1299 	}
1300 
1301 	if (mp->m_logbufs != -1 &&
1302 	    mp->m_logbufs != 0 &&
1303 	    (mp->m_logbufs < XLOG_MIN_ICLOGS ||
1304 	     mp->m_logbufs > XLOG_MAX_ICLOGS)) {
1305 		xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
1306 			mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
1307 		return -EINVAL;
1308 	}
1309 
1310 	if (mp->m_logbsize != -1 &&
1311 	    mp->m_logbsize !=  0 &&
1312 	    (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
1313 	     mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
1314 	     !is_power_of_2(mp->m_logbsize))) {
1315 		xfs_warn(mp,
1316 			"invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
1317 			mp->m_logbsize);
1318 		return -EINVAL;
1319 	}
1320 
1321 	if ((mp->m_flags & XFS_MOUNT_ALLOCSIZE) &&
1322 	    (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
1323 	     mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
1324 		xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
1325 			mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
1326 		return -EINVAL;
1327 	}
1328 
1329 	return 0;
1330 }
1331 
1332 static int
1333 xfs_fc_fill_super(
1334 	struct super_block	*sb,
1335 	struct fs_context	*fc)
1336 {
1337 	struct xfs_mount	*mp = sb->s_fs_info;
1338 	struct inode		*root;
1339 	int			flags = 0, error;
1340 
1341 	mp->m_super = sb;
1342 
1343 	error = xfs_fc_validate_params(mp);
1344 	if (error)
1345 		goto out_free_names;
1346 
1347 	sb_min_blocksize(sb, BBSIZE);
1348 	sb->s_xattr = xfs_xattr_handlers;
1349 	sb->s_export_op = &xfs_export_operations;
1350 #ifdef CONFIG_XFS_QUOTA
1351 	sb->s_qcop = &xfs_quotactl_operations;
1352 	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1353 #endif
1354 	sb->s_op = &xfs_super_operations;
1355 
1356 	/*
1357 	 * Delay mount work if the debug hook is set. This is debug
1358 	 * instrumention to coordinate simulation of xfs mount failures with
1359 	 * VFS superblock operations
1360 	 */
1361 	if (xfs_globals.mount_delay) {
1362 		xfs_notice(mp, "Delaying mount for %d seconds.",
1363 			xfs_globals.mount_delay);
1364 		msleep(xfs_globals.mount_delay * 1000);
1365 	}
1366 
1367 	if (fc->sb_flags & SB_SILENT)
1368 		flags |= XFS_MFSI_QUIET;
1369 
1370 	error = xfs_open_devices(mp);
1371 	if (error)
1372 		goto out_free_names;
1373 
1374 	error = xfs_init_mount_workqueues(mp);
1375 	if (error)
1376 		goto out_close_devices;
1377 
1378 	error = xfs_init_percpu_counters(mp);
1379 	if (error)
1380 		goto out_destroy_workqueues;
1381 
1382 	/* Allocate stats memory before we do operations that might use it */
1383 	mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1384 	if (!mp->m_stats.xs_stats) {
1385 		error = -ENOMEM;
1386 		goto out_destroy_counters;
1387 	}
1388 
1389 	error = xfs_readsb(mp, flags);
1390 	if (error)
1391 		goto out_free_stats;
1392 
1393 	error = xfs_finish_flags(mp);
1394 	if (error)
1395 		goto out_free_sb;
1396 
1397 	error = xfs_setup_devices(mp);
1398 	if (error)
1399 		goto out_free_sb;
1400 
1401 	/*
1402 	 * XFS block mappings use 54 bits to store the logical block offset.
1403 	 * This should suffice to handle the maximum file size that the VFS
1404 	 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
1405 	 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
1406 	 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
1407 	 * to check this assertion.
1408 	 *
1409 	 * Avoid integer overflow by comparing the maximum bmbt offset to the
1410 	 * maximum pagecache offset in units of fs blocks.
1411 	 */
1412 	if (XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE) > XFS_MAX_FILEOFF) {
1413 		xfs_warn(mp,
1414 "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
1415 			 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
1416 			 XFS_MAX_FILEOFF);
1417 		error = -EINVAL;
1418 		goto out_free_sb;
1419 	}
1420 
1421 	error = xfs_filestream_mount(mp);
1422 	if (error)
1423 		goto out_free_sb;
1424 
1425 	/*
1426 	 * we must configure the block size in the superblock before we run the
1427 	 * full mount process as the mount process can lookup and cache inodes.
1428 	 */
1429 	sb->s_magic = XFS_SUPER_MAGIC;
1430 	sb->s_blocksize = mp->m_sb.sb_blocksize;
1431 	sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1432 	sb->s_maxbytes = MAX_LFS_FILESIZE;
1433 	sb->s_max_links = XFS_MAXLINK;
1434 	sb->s_time_gran = 1;
1435 	sb->s_time_min = S32_MIN;
1436 	sb->s_time_max = S32_MAX;
1437 	sb->s_iflags |= SB_I_CGROUPWB;
1438 
1439 	set_posix_acl_flag(sb);
1440 
1441 	/* version 5 superblocks support inode version counters. */
1442 	if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
1443 		sb->s_flags |= SB_I_VERSION;
1444 
1445 	if (mp->m_flags & XFS_MOUNT_DAX) {
1446 		bool rtdev_is_dax = false, datadev_is_dax;
1447 
1448 		xfs_warn(mp,
1449 		"DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
1450 
1451 		datadev_is_dax = bdev_dax_supported(mp->m_ddev_targp->bt_bdev,
1452 			sb->s_blocksize);
1453 		if (mp->m_rtdev_targp)
1454 			rtdev_is_dax = bdev_dax_supported(
1455 				mp->m_rtdev_targp->bt_bdev, sb->s_blocksize);
1456 		if (!rtdev_is_dax && !datadev_is_dax) {
1457 			xfs_alert(mp,
1458 			"DAX unsupported by block device. Turning off DAX.");
1459 			mp->m_flags &= ~XFS_MOUNT_DAX;
1460 		}
1461 		if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1462 			xfs_alert(mp,
1463 		"DAX and reflink cannot be used together!");
1464 			error = -EINVAL;
1465 			goto out_filestream_unmount;
1466 		}
1467 	}
1468 
1469 	if (mp->m_flags & XFS_MOUNT_DISCARD) {
1470 		struct request_queue *q = bdev_get_queue(sb->s_bdev);
1471 
1472 		if (!blk_queue_discard(q)) {
1473 			xfs_warn(mp, "mounting with \"discard\" option, but "
1474 					"the device does not support discard");
1475 			mp->m_flags &= ~XFS_MOUNT_DISCARD;
1476 		}
1477 	}
1478 
1479 	if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1480 		if (mp->m_sb.sb_rblocks) {
1481 			xfs_alert(mp,
1482 	"reflink not compatible with realtime device!");
1483 			error = -EINVAL;
1484 			goto out_filestream_unmount;
1485 		}
1486 
1487 		if (xfs_globals.always_cow) {
1488 			xfs_info(mp, "using DEBUG-only always_cow mode.");
1489 			mp->m_always_cow = true;
1490 		}
1491 	}
1492 
1493 	if (xfs_sb_version_hasrmapbt(&mp->m_sb) && mp->m_sb.sb_rblocks) {
1494 		xfs_alert(mp,
1495 	"reverse mapping btree not compatible with realtime device!");
1496 		error = -EINVAL;
1497 		goto out_filestream_unmount;
1498 	}
1499 
1500 	error = xfs_mountfs(mp);
1501 	if (error)
1502 		goto out_filestream_unmount;
1503 
1504 	root = igrab(VFS_I(mp->m_rootip));
1505 	if (!root) {
1506 		error = -ENOENT;
1507 		goto out_unmount;
1508 	}
1509 	sb->s_root = d_make_root(root);
1510 	if (!sb->s_root) {
1511 		error = -ENOMEM;
1512 		goto out_unmount;
1513 	}
1514 
1515 	return 0;
1516 
1517  out_filestream_unmount:
1518 	xfs_filestream_unmount(mp);
1519  out_free_sb:
1520 	xfs_freesb(mp);
1521  out_free_stats:
1522 	free_percpu(mp->m_stats.xs_stats);
1523  out_destroy_counters:
1524 	xfs_destroy_percpu_counters(mp);
1525  out_destroy_workqueues:
1526 	xfs_destroy_mount_workqueues(mp);
1527  out_close_devices:
1528 	xfs_close_devices(mp);
1529  out_free_names:
1530 	sb->s_fs_info = NULL;
1531 	xfs_mount_free(mp);
1532 	return error;
1533 
1534  out_unmount:
1535 	xfs_filestream_unmount(mp);
1536 	xfs_unmountfs(mp);
1537 	goto out_free_sb;
1538 }
1539 
1540 static int
1541 xfs_fc_get_tree(
1542 	struct fs_context	*fc)
1543 {
1544 	return get_tree_bdev(fc, xfs_fc_fill_super);
1545 }
1546 
1547 static int
1548 xfs_remount_rw(
1549 	struct xfs_mount	*mp)
1550 {
1551 	struct xfs_sb		*sbp = &mp->m_sb;
1552 	int error;
1553 
1554 	if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
1555 		xfs_warn(mp,
1556 			"ro->rw transition prohibited on norecovery mount");
1557 		return -EINVAL;
1558 	}
1559 
1560 	if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
1561 	    xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1562 		xfs_warn(mp,
1563 	"ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1564 			(sbp->sb_features_ro_compat &
1565 				XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1566 		return -EINVAL;
1567 	}
1568 
1569 	mp->m_flags &= ~XFS_MOUNT_RDONLY;
1570 
1571 	/*
1572 	 * If this is the first remount to writeable state we might have some
1573 	 * superblock changes to update.
1574 	 */
1575 	if (mp->m_update_sb) {
1576 		error = xfs_sync_sb(mp, false);
1577 		if (error) {
1578 			xfs_warn(mp, "failed to write sb changes");
1579 			return error;
1580 		}
1581 		mp->m_update_sb = false;
1582 	}
1583 
1584 	/*
1585 	 * Fill out the reserve pool if it is empty. Use the stashed value if
1586 	 * it is non-zero, otherwise go with the default.
1587 	 */
1588 	xfs_restore_resvblks(mp);
1589 	xfs_log_work_queue(mp);
1590 
1591 	/* Recover any CoW blocks that never got remapped. */
1592 	error = xfs_reflink_recover_cow(mp);
1593 	if (error) {
1594 		xfs_err(mp,
1595 			"Error %d recovering leftover CoW allocations.", error);
1596 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1597 		return error;
1598 	}
1599 	xfs_start_block_reaping(mp);
1600 
1601 	/* Create the per-AG metadata reservation pool .*/
1602 	error = xfs_fs_reserve_ag_blocks(mp);
1603 	if (error && error != -ENOSPC)
1604 		return error;
1605 
1606 	return 0;
1607 }
1608 
1609 static int
1610 xfs_remount_ro(
1611 	struct xfs_mount	*mp)
1612 {
1613 	int error;
1614 
1615 	/*
1616 	 * Cancel background eofb scanning so it cannot race with the final
1617 	 * log force+buftarg wait and deadlock the remount.
1618 	 */
1619 	xfs_stop_block_reaping(mp);
1620 
1621 	/* Get rid of any leftover CoW reservations... */
1622 	error = xfs_icache_free_cowblocks(mp, NULL);
1623 	if (error) {
1624 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1625 		return error;
1626 	}
1627 
1628 	/* Free the per-AG metadata reservation pool. */
1629 	error = xfs_fs_unreserve_ag_blocks(mp);
1630 	if (error) {
1631 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1632 		return error;
1633 	}
1634 
1635 	/*
1636 	 * Before we sync the metadata, we need to free up the reserve block
1637 	 * pool so that the used block count in the superblock on disk is
1638 	 * correct at the end of the remount. Stash the current* reserve pool
1639 	 * size so that if we get remounted rw, we can return it to the same
1640 	 * size.
1641 	 */
1642 	xfs_save_resvblks(mp);
1643 
1644 	xfs_quiesce_attr(mp);
1645 	mp->m_flags |= XFS_MOUNT_RDONLY;
1646 
1647 	return 0;
1648 }
1649 
1650 /*
1651  * Logically we would return an error here to prevent users from believing
1652  * they might have changed mount options using remount which can't be changed.
1653  *
1654  * But unfortunately mount(8) adds all options from mtab and fstab to the mount
1655  * arguments in some cases so we can't blindly reject options, but have to
1656  * check for each specified option if it actually differs from the currently
1657  * set option and only reject it if that's the case.
1658  *
1659  * Until that is implemented we return success for every remount request, and
1660  * silently ignore all options that we can't actually change.
1661  */
1662 static int
1663 xfs_fc_reconfigure(
1664 	struct fs_context *fc)
1665 {
1666 	struct xfs_mount	*mp = XFS_M(fc->root->d_sb);
1667 	struct xfs_mount        *new_mp = fc->s_fs_info;
1668 	xfs_sb_t		*sbp = &mp->m_sb;
1669 	int			flags = fc->sb_flags;
1670 	int			error;
1671 
1672 	error = xfs_fc_validate_params(new_mp);
1673 	if (error)
1674 		return error;
1675 
1676 	sync_filesystem(mp->m_super);
1677 
1678 	/* inode32 -> inode64 */
1679 	if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) &&
1680 	    !(new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) {
1681 		mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
1682 		mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1683 	}
1684 
1685 	/* inode64 -> inode32 */
1686 	if (!(mp->m_flags & XFS_MOUNT_SMALL_INUMS) &&
1687 	    (new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) {
1688 		mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
1689 		mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1690 	}
1691 
1692 	/* ro -> rw */
1693 	if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(flags & SB_RDONLY)) {
1694 		error = xfs_remount_rw(mp);
1695 		if (error)
1696 			return error;
1697 	}
1698 
1699 	/* rw -> ro */
1700 	if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (flags & SB_RDONLY)) {
1701 		error = xfs_remount_ro(mp);
1702 		if (error)
1703 			return error;
1704 	}
1705 
1706 	return 0;
1707 }
1708 
1709 static void xfs_fc_free(
1710 	struct fs_context	*fc)
1711 {
1712 	struct xfs_mount	*mp = fc->s_fs_info;
1713 
1714 	/*
1715 	 * mp is stored in the fs_context when it is initialized.
1716 	 * mp is transferred to the superblock on a successful mount,
1717 	 * but if an error occurs before the transfer we have to free
1718 	 * it here.
1719 	 */
1720 	if (mp)
1721 		xfs_mount_free(mp);
1722 }
1723 
1724 static const struct fs_context_operations xfs_context_ops = {
1725 	.parse_param = xfs_fc_parse_param,
1726 	.get_tree    = xfs_fc_get_tree,
1727 	.reconfigure = xfs_fc_reconfigure,
1728 	.free        = xfs_fc_free,
1729 };
1730 
1731 static int xfs_init_fs_context(
1732 	struct fs_context	*fc)
1733 {
1734 	struct xfs_mount	*mp;
1735 
1736 	mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
1737 	if (!mp)
1738 		return -ENOMEM;
1739 
1740 	spin_lock_init(&mp->m_sb_lock);
1741 	spin_lock_init(&mp->m_agirotor_lock);
1742 	INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
1743 	spin_lock_init(&mp->m_perag_lock);
1744 	mutex_init(&mp->m_growlock);
1745 	atomic_set(&mp->m_active_trans, 0);
1746 	INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
1747 	INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker);
1748 	INIT_DELAYED_WORK(&mp->m_cowblocks_work, xfs_cowblocks_worker);
1749 	mp->m_kobj.kobject.kset = xfs_kset;
1750 	/*
1751 	 * We don't create the finobt per-ag space reservation until after log
1752 	 * recovery, so we must set this to true so that an ifree transaction
1753 	 * started during log recovery will not depend on space reservations
1754 	 * for finobt expansion.
1755 	 */
1756 	mp->m_finobt_nores = true;
1757 
1758 	/*
1759 	 * These can be overridden by the mount option parsing.
1760 	 */
1761 	mp->m_logbufs = -1;
1762 	mp->m_logbsize = -1;
1763 	mp->m_allocsize_log = 16; /* 64k */
1764 
1765 	/*
1766 	 * Copy binary VFS mount flags we are interested in.
1767 	 */
1768 	if (fc->sb_flags & SB_RDONLY)
1769 		mp->m_flags |= XFS_MOUNT_RDONLY;
1770 	if (fc->sb_flags & SB_DIRSYNC)
1771 		mp->m_flags |= XFS_MOUNT_DIRSYNC;
1772 	if (fc->sb_flags & SB_SYNCHRONOUS)
1773 		mp->m_flags |= XFS_MOUNT_WSYNC;
1774 
1775 	fc->s_fs_info = mp;
1776 	fc->ops = &xfs_context_ops;
1777 
1778 	return 0;
1779 }
1780 
1781 static struct file_system_type xfs_fs_type = {
1782 	.owner			= THIS_MODULE,
1783 	.name			= "xfs",
1784 	.init_fs_context	= xfs_init_fs_context,
1785 	.parameters		= &xfs_fs_parameters,
1786 	.kill_sb		= kill_block_super,
1787 	.fs_flags		= FS_REQUIRES_DEV,
1788 };
1789 MODULE_ALIAS_FS("xfs");
1790 
1791 STATIC int __init
1792 xfs_init_zones(void)
1793 {
1794 	xfs_log_ticket_zone = kmem_cache_create("xfs_log_ticket",
1795 						sizeof(struct xlog_ticket),
1796 						0, 0, NULL);
1797 	if (!xfs_log_ticket_zone)
1798 		goto out;
1799 
1800 	xfs_bmap_free_item_zone = kmem_cache_create("xfs_bmap_free_item",
1801 					sizeof(struct xfs_extent_free_item),
1802 					0, 0, NULL);
1803 	if (!xfs_bmap_free_item_zone)
1804 		goto out_destroy_log_ticket_zone;
1805 
1806 	xfs_btree_cur_zone = kmem_cache_create("xfs_btree_cur",
1807 					       sizeof(struct xfs_btree_cur),
1808 					       0, 0, NULL);
1809 	if (!xfs_btree_cur_zone)
1810 		goto out_destroy_bmap_free_item_zone;
1811 
1812 	xfs_da_state_zone = kmem_cache_create("xfs_da_state",
1813 					      sizeof(struct xfs_da_state),
1814 					      0, 0, NULL);
1815 	if (!xfs_da_state_zone)
1816 		goto out_destroy_btree_cur_zone;
1817 
1818 	xfs_ifork_zone = kmem_cache_create("xfs_ifork",
1819 					   sizeof(struct xfs_ifork),
1820 					   0, 0, NULL);
1821 	if (!xfs_ifork_zone)
1822 		goto out_destroy_da_state_zone;
1823 
1824 	xfs_trans_zone = kmem_cache_create("xf_trans",
1825 					   sizeof(struct xfs_trans),
1826 					   0, 0, NULL);
1827 	if (!xfs_trans_zone)
1828 		goto out_destroy_ifork_zone;
1829 
1830 
1831 	/*
1832 	 * The size of the zone allocated buf log item is the maximum
1833 	 * size possible under XFS.  This wastes a little bit of memory,
1834 	 * but it is much faster.
1835 	 */
1836 	xfs_buf_item_zone = kmem_cache_create("xfs_buf_item",
1837 					      sizeof(struct xfs_buf_log_item),
1838 					      0, 0, NULL);
1839 	if (!xfs_buf_item_zone)
1840 		goto out_destroy_trans_zone;
1841 
1842 	xfs_efd_zone = kmem_cache_create("xfs_efd_item",
1843 					(sizeof(struct xfs_efd_log_item) +
1844 					(XFS_EFD_MAX_FAST_EXTENTS - 1) *
1845 					sizeof(struct xfs_extent)),
1846 					0, 0, NULL);
1847 	if (!xfs_efd_zone)
1848 		goto out_destroy_buf_item_zone;
1849 
1850 	xfs_efi_zone = kmem_cache_create("xfs_efi_item",
1851 					 (sizeof(struct xfs_efi_log_item) +
1852 					 (XFS_EFI_MAX_FAST_EXTENTS - 1) *
1853 					 sizeof(struct xfs_extent)),
1854 					 0, 0, NULL);
1855 	if (!xfs_efi_zone)
1856 		goto out_destroy_efd_zone;
1857 
1858 	xfs_inode_zone = kmem_cache_create("xfs_inode",
1859 					   sizeof(struct xfs_inode), 0,
1860 					   (SLAB_HWCACHE_ALIGN |
1861 					    SLAB_RECLAIM_ACCOUNT |
1862 					    SLAB_MEM_SPREAD | SLAB_ACCOUNT),
1863 					   xfs_fs_inode_init_once);
1864 	if (!xfs_inode_zone)
1865 		goto out_destroy_efi_zone;
1866 
1867 	xfs_ili_zone = kmem_cache_create("xfs_ili",
1868 					 sizeof(struct xfs_inode_log_item), 0,
1869 					 SLAB_MEM_SPREAD, NULL);
1870 	if (!xfs_ili_zone)
1871 		goto out_destroy_inode_zone;
1872 
1873 	xfs_icreate_zone = kmem_cache_create("xfs_icr",
1874 					     sizeof(struct xfs_icreate_item),
1875 					     0, 0, NULL);
1876 	if (!xfs_icreate_zone)
1877 		goto out_destroy_ili_zone;
1878 
1879 	xfs_rud_zone = kmem_cache_create("xfs_rud_item",
1880 					 sizeof(struct xfs_rud_log_item),
1881 					 0, 0, NULL);
1882 	if (!xfs_rud_zone)
1883 		goto out_destroy_icreate_zone;
1884 
1885 	xfs_rui_zone = kmem_cache_create("xfs_rui_item",
1886 			xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
1887 			0, 0, NULL);
1888 	if (!xfs_rui_zone)
1889 		goto out_destroy_rud_zone;
1890 
1891 	xfs_cud_zone = kmem_cache_create("xfs_cud_item",
1892 					 sizeof(struct xfs_cud_log_item),
1893 					 0, 0, NULL);
1894 	if (!xfs_cud_zone)
1895 		goto out_destroy_rui_zone;
1896 
1897 	xfs_cui_zone = kmem_cache_create("xfs_cui_item",
1898 			xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
1899 			0, 0, NULL);
1900 	if (!xfs_cui_zone)
1901 		goto out_destroy_cud_zone;
1902 
1903 	xfs_bud_zone = kmem_cache_create("xfs_bud_item",
1904 					 sizeof(struct xfs_bud_log_item),
1905 					 0, 0, NULL);
1906 	if (!xfs_bud_zone)
1907 		goto out_destroy_cui_zone;
1908 
1909 	xfs_bui_zone = kmem_cache_create("xfs_bui_item",
1910 			xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
1911 			0, 0, NULL);
1912 	if (!xfs_bui_zone)
1913 		goto out_destroy_bud_zone;
1914 
1915 	return 0;
1916 
1917  out_destroy_bud_zone:
1918 	kmem_cache_destroy(xfs_bud_zone);
1919  out_destroy_cui_zone:
1920 	kmem_cache_destroy(xfs_cui_zone);
1921  out_destroy_cud_zone:
1922 	kmem_cache_destroy(xfs_cud_zone);
1923  out_destroy_rui_zone:
1924 	kmem_cache_destroy(xfs_rui_zone);
1925  out_destroy_rud_zone:
1926 	kmem_cache_destroy(xfs_rud_zone);
1927  out_destroy_icreate_zone:
1928 	kmem_cache_destroy(xfs_icreate_zone);
1929  out_destroy_ili_zone:
1930 	kmem_cache_destroy(xfs_ili_zone);
1931  out_destroy_inode_zone:
1932 	kmem_cache_destroy(xfs_inode_zone);
1933  out_destroy_efi_zone:
1934 	kmem_cache_destroy(xfs_efi_zone);
1935  out_destroy_efd_zone:
1936 	kmem_cache_destroy(xfs_efd_zone);
1937  out_destroy_buf_item_zone:
1938 	kmem_cache_destroy(xfs_buf_item_zone);
1939  out_destroy_trans_zone:
1940 	kmem_cache_destroy(xfs_trans_zone);
1941  out_destroy_ifork_zone:
1942 	kmem_cache_destroy(xfs_ifork_zone);
1943  out_destroy_da_state_zone:
1944 	kmem_cache_destroy(xfs_da_state_zone);
1945  out_destroy_btree_cur_zone:
1946 	kmem_cache_destroy(xfs_btree_cur_zone);
1947  out_destroy_bmap_free_item_zone:
1948 	kmem_cache_destroy(xfs_bmap_free_item_zone);
1949  out_destroy_log_ticket_zone:
1950 	kmem_cache_destroy(xfs_log_ticket_zone);
1951  out:
1952 	return -ENOMEM;
1953 }
1954 
1955 STATIC void
1956 xfs_destroy_zones(void)
1957 {
1958 	/*
1959 	 * Make sure all delayed rcu free are flushed before we
1960 	 * destroy caches.
1961 	 */
1962 	rcu_barrier();
1963 	kmem_cache_destroy(xfs_bui_zone);
1964 	kmem_cache_destroy(xfs_bud_zone);
1965 	kmem_cache_destroy(xfs_cui_zone);
1966 	kmem_cache_destroy(xfs_cud_zone);
1967 	kmem_cache_destroy(xfs_rui_zone);
1968 	kmem_cache_destroy(xfs_rud_zone);
1969 	kmem_cache_destroy(xfs_icreate_zone);
1970 	kmem_cache_destroy(xfs_ili_zone);
1971 	kmem_cache_destroy(xfs_inode_zone);
1972 	kmem_cache_destroy(xfs_efi_zone);
1973 	kmem_cache_destroy(xfs_efd_zone);
1974 	kmem_cache_destroy(xfs_buf_item_zone);
1975 	kmem_cache_destroy(xfs_trans_zone);
1976 	kmem_cache_destroy(xfs_ifork_zone);
1977 	kmem_cache_destroy(xfs_da_state_zone);
1978 	kmem_cache_destroy(xfs_btree_cur_zone);
1979 	kmem_cache_destroy(xfs_bmap_free_item_zone);
1980 	kmem_cache_destroy(xfs_log_ticket_zone);
1981 }
1982 
1983 STATIC int __init
1984 xfs_init_workqueues(void)
1985 {
1986 	/*
1987 	 * The allocation workqueue can be used in memory reclaim situations
1988 	 * (writepage path), and parallelism is only limited by the number of
1989 	 * AGs in all the filesystems mounted. Hence use the default large
1990 	 * max_active value for this workqueue.
1991 	 */
1992 	xfs_alloc_wq = alloc_workqueue("xfsalloc",
1993 			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0);
1994 	if (!xfs_alloc_wq)
1995 		return -ENOMEM;
1996 
1997 	xfs_discard_wq = alloc_workqueue("xfsdiscard", WQ_UNBOUND, 0);
1998 	if (!xfs_discard_wq)
1999 		goto out_free_alloc_wq;
2000 
2001 	return 0;
2002 out_free_alloc_wq:
2003 	destroy_workqueue(xfs_alloc_wq);
2004 	return -ENOMEM;
2005 }
2006 
2007 STATIC void
2008 xfs_destroy_workqueues(void)
2009 {
2010 	destroy_workqueue(xfs_discard_wq);
2011 	destroy_workqueue(xfs_alloc_wq);
2012 }
2013 
2014 STATIC int __init
2015 init_xfs_fs(void)
2016 {
2017 	int			error;
2018 
2019 	xfs_check_ondisk_structs();
2020 
2021 	printk(KERN_INFO XFS_VERSION_STRING " with "
2022 			 XFS_BUILD_OPTIONS " enabled\n");
2023 
2024 	xfs_dir_startup();
2025 
2026 	error = xfs_init_zones();
2027 	if (error)
2028 		goto out;
2029 
2030 	error = xfs_init_workqueues();
2031 	if (error)
2032 		goto out_destroy_zones;
2033 
2034 	error = xfs_mru_cache_init();
2035 	if (error)
2036 		goto out_destroy_wq;
2037 
2038 	error = xfs_buf_init();
2039 	if (error)
2040 		goto out_mru_cache_uninit;
2041 
2042 	error = xfs_init_procfs();
2043 	if (error)
2044 		goto out_buf_terminate;
2045 
2046 	error = xfs_sysctl_register();
2047 	if (error)
2048 		goto out_cleanup_procfs;
2049 
2050 	xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2051 	if (!xfs_kset) {
2052 		error = -ENOMEM;
2053 		goto out_sysctl_unregister;
2054 	}
2055 
2056 	xfsstats.xs_kobj.kobject.kset = xfs_kset;
2057 
2058 	xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2059 	if (!xfsstats.xs_stats) {
2060 		error = -ENOMEM;
2061 		goto out_kset_unregister;
2062 	}
2063 
2064 	error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2065 			       "stats");
2066 	if (error)
2067 		goto out_free_stats;
2068 
2069 #ifdef DEBUG
2070 	xfs_dbg_kobj.kobject.kset = xfs_kset;
2071 	error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2072 	if (error)
2073 		goto out_remove_stats_kobj;
2074 #endif
2075 
2076 	error = xfs_qm_init();
2077 	if (error)
2078 		goto out_remove_dbg_kobj;
2079 
2080 	error = register_filesystem(&xfs_fs_type);
2081 	if (error)
2082 		goto out_qm_exit;
2083 	return 0;
2084 
2085  out_qm_exit:
2086 	xfs_qm_exit();
2087  out_remove_dbg_kobj:
2088 #ifdef DEBUG
2089 	xfs_sysfs_del(&xfs_dbg_kobj);
2090  out_remove_stats_kobj:
2091 #endif
2092 	xfs_sysfs_del(&xfsstats.xs_kobj);
2093  out_free_stats:
2094 	free_percpu(xfsstats.xs_stats);
2095  out_kset_unregister:
2096 	kset_unregister(xfs_kset);
2097  out_sysctl_unregister:
2098 	xfs_sysctl_unregister();
2099  out_cleanup_procfs:
2100 	xfs_cleanup_procfs();
2101  out_buf_terminate:
2102 	xfs_buf_terminate();
2103  out_mru_cache_uninit:
2104 	xfs_mru_cache_uninit();
2105  out_destroy_wq:
2106 	xfs_destroy_workqueues();
2107  out_destroy_zones:
2108 	xfs_destroy_zones();
2109  out:
2110 	return error;
2111 }
2112 
2113 STATIC void __exit
2114 exit_xfs_fs(void)
2115 {
2116 	xfs_qm_exit();
2117 	unregister_filesystem(&xfs_fs_type);
2118 #ifdef DEBUG
2119 	xfs_sysfs_del(&xfs_dbg_kobj);
2120 #endif
2121 	xfs_sysfs_del(&xfsstats.xs_kobj);
2122 	free_percpu(xfsstats.xs_stats);
2123 	kset_unregister(xfs_kset);
2124 	xfs_sysctl_unregister();
2125 	xfs_cleanup_procfs();
2126 	xfs_buf_terminate();
2127 	xfs_mru_cache_uninit();
2128 	xfs_destroy_workqueues();
2129 	xfs_destroy_zones();
2130 	xfs_uuid_table_free();
2131 }
2132 
2133 module_init(init_xfs_fs);
2134 module_exit(exit_xfs_fs);
2135 
2136 MODULE_AUTHOR("Silicon Graphics, Inc.");
2137 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2138 MODULE_LICENSE("GPL");
2139