xref: /linux/fs/xfs/xfs_super.c (revision 132db93572821ec2fdf81e354cc40f558faf7e4f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 
7 #include "xfs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sb.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_btree.h"
16 #include "xfs_bmap.h"
17 #include "xfs_alloc.h"
18 #include "xfs_fsops.h"
19 #include "xfs_trans.h"
20 #include "xfs_buf_item.h"
21 #include "xfs_log.h"
22 #include "xfs_log_priv.h"
23 #include "xfs_dir2.h"
24 #include "xfs_extfree_item.h"
25 #include "xfs_mru_cache.h"
26 #include "xfs_inode_item.h"
27 #include "xfs_icache.h"
28 #include "xfs_trace.h"
29 #include "xfs_icreate_item.h"
30 #include "xfs_filestream.h"
31 #include "xfs_quota.h"
32 #include "xfs_sysfs.h"
33 #include "xfs_ondisk.h"
34 #include "xfs_rmap_item.h"
35 #include "xfs_refcount_item.h"
36 #include "xfs_bmap_item.h"
37 #include "xfs_reflink.h"
38 
39 #include <linux/magic.h>
40 #include <linux/fs_context.h>
41 #include <linux/fs_parser.h>
42 
43 static const struct super_operations xfs_super_operations;
44 
45 static struct kset *xfs_kset;		/* top-level xfs sysfs dir */
46 #ifdef DEBUG
47 static struct xfs_kobj xfs_dbg_kobj;	/* global debug sysfs attrs */
48 #endif
49 
50 enum xfs_dax_mode {
51 	XFS_DAX_INODE = 0,
52 	XFS_DAX_ALWAYS = 1,
53 	XFS_DAX_NEVER = 2,
54 };
55 
56 static void
57 xfs_mount_set_dax_mode(
58 	struct xfs_mount	*mp,
59 	enum xfs_dax_mode	mode)
60 {
61 	switch (mode) {
62 	case XFS_DAX_INODE:
63 		mp->m_flags &= ~(XFS_MOUNT_DAX_ALWAYS | XFS_MOUNT_DAX_NEVER);
64 		break;
65 	case XFS_DAX_ALWAYS:
66 		mp->m_flags |= XFS_MOUNT_DAX_ALWAYS;
67 		mp->m_flags &= ~XFS_MOUNT_DAX_NEVER;
68 		break;
69 	case XFS_DAX_NEVER:
70 		mp->m_flags |= XFS_MOUNT_DAX_NEVER;
71 		mp->m_flags &= ~XFS_MOUNT_DAX_ALWAYS;
72 		break;
73 	}
74 }
75 
76 static const struct constant_table dax_param_enums[] = {
77 	{"inode",	XFS_DAX_INODE },
78 	{"always",	XFS_DAX_ALWAYS },
79 	{"never",	XFS_DAX_NEVER },
80 	{}
81 };
82 
83 /*
84  * Table driven mount option parser.
85  */
86 enum {
87 	Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
88 	Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
89 	Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
90 	Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
91 	Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
92 	Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
93 	Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
94 	Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
95 	Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum,
96 };
97 
98 static const struct fs_parameter_spec xfs_fs_parameters[] = {
99 	fsparam_u32("logbufs",		Opt_logbufs),
100 	fsparam_string("logbsize",	Opt_logbsize),
101 	fsparam_string("logdev",	Opt_logdev),
102 	fsparam_string("rtdev",		Opt_rtdev),
103 	fsparam_flag("wsync",		Opt_wsync),
104 	fsparam_flag("noalign",		Opt_noalign),
105 	fsparam_flag("swalloc",		Opt_swalloc),
106 	fsparam_u32("sunit",		Opt_sunit),
107 	fsparam_u32("swidth",		Opt_swidth),
108 	fsparam_flag("nouuid",		Opt_nouuid),
109 	fsparam_flag("grpid",		Opt_grpid),
110 	fsparam_flag("nogrpid",		Opt_nogrpid),
111 	fsparam_flag("bsdgroups",	Opt_bsdgroups),
112 	fsparam_flag("sysvgroups",	Opt_sysvgroups),
113 	fsparam_string("allocsize",	Opt_allocsize),
114 	fsparam_flag("norecovery",	Opt_norecovery),
115 	fsparam_flag("inode64",		Opt_inode64),
116 	fsparam_flag("inode32",		Opt_inode32),
117 	fsparam_flag("ikeep",		Opt_ikeep),
118 	fsparam_flag("noikeep",		Opt_noikeep),
119 	fsparam_flag("largeio",		Opt_largeio),
120 	fsparam_flag("nolargeio",	Opt_nolargeio),
121 	fsparam_flag("attr2",		Opt_attr2),
122 	fsparam_flag("noattr2",		Opt_noattr2),
123 	fsparam_flag("filestreams",	Opt_filestreams),
124 	fsparam_flag("quota",		Opt_quota),
125 	fsparam_flag("noquota",		Opt_noquota),
126 	fsparam_flag("usrquota",	Opt_usrquota),
127 	fsparam_flag("grpquota",	Opt_grpquota),
128 	fsparam_flag("prjquota",	Opt_prjquota),
129 	fsparam_flag("uquota",		Opt_uquota),
130 	fsparam_flag("gquota",		Opt_gquota),
131 	fsparam_flag("pquota",		Opt_pquota),
132 	fsparam_flag("uqnoenforce",	Opt_uqnoenforce),
133 	fsparam_flag("gqnoenforce",	Opt_gqnoenforce),
134 	fsparam_flag("pqnoenforce",	Opt_pqnoenforce),
135 	fsparam_flag("qnoenforce",	Opt_qnoenforce),
136 	fsparam_flag("discard",		Opt_discard),
137 	fsparam_flag("nodiscard",	Opt_nodiscard),
138 	fsparam_flag("dax",		Opt_dax),
139 	fsparam_enum("dax",		Opt_dax_enum, dax_param_enums),
140 	{}
141 };
142 
143 struct proc_xfs_info {
144 	uint64_t	flag;
145 	char		*str;
146 };
147 
148 static int
149 xfs_fs_show_options(
150 	struct seq_file		*m,
151 	struct dentry		*root)
152 {
153 	static struct proc_xfs_info xfs_info_set[] = {
154 		/* the few simple ones we can get from the mount struct */
155 		{ XFS_MOUNT_IKEEP,		",ikeep" },
156 		{ XFS_MOUNT_WSYNC,		",wsync" },
157 		{ XFS_MOUNT_NOALIGN,		",noalign" },
158 		{ XFS_MOUNT_SWALLOC,		",swalloc" },
159 		{ XFS_MOUNT_NOUUID,		",nouuid" },
160 		{ XFS_MOUNT_NORECOVERY,		",norecovery" },
161 		{ XFS_MOUNT_ATTR2,		",attr2" },
162 		{ XFS_MOUNT_FILESTREAMS,	",filestreams" },
163 		{ XFS_MOUNT_GRPID,		",grpid" },
164 		{ XFS_MOUNT_DISCARD,		",discard" },
165 		{ XFS_MOUNT_LARGEIO,		",largeio" },
166 		{ XFS_MOUNT_DAX_ALWAYS,		",dax=always" },
167 		{ XFS_MOUNT_DAX_NEVER,		",dax=never" },
168 		{ 0, NULL }
169 	};
170 	struct xfs_mount	*mp = XFS_M(root->d_sb);
171 	struct proc_xfs_info	*xfs_infop;
172 
173 	for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
174 		if (mp->m_flags & xfs_infop->flag)
175 			seq_puts(m, xfs_infop->str);
176 	}
177 
178 	seq_printf(m, ",inode%d",
179 		(mp->m_flags & XFS_MOUNT_SMALL_INUMS) ? 32 : 64);
180 
181 	if (mp->m_flags & XFS_MOUNT_ALLOCSIZE)
182 		seq_printf(m, ",allocsize=%dk",
183 			   (1 << mp->m_allocsize_log) >> 10);
184 
185 	if (mp->m_logbufs > 0)
186 		seq_printf(m, ",logbufs=%d", mp->m_logbufs);
187 	if (mp->m_logbsize > 0)
188 		seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
189 
190 	if (mp->m_logname)
191 		seq_show_option(m, "logdev", mp->m_logname);
192 	if (mp->m_rtname)
193 		seq_show_option(m, "rtdev", mp->m_rtname);
194 
195 	if (mp->m_dalign > 0)
196 		seq_printf(m, ",sunit=%d",
197 				(int)XFS_FSB_TO_BB(mp, mp->m_dalign));
198 	if (mp->m_swidth > 0)
199 		seq_printf(m, ",swidth=%d",
200 				(int)XFS_FSB_TO_BB(mp, mp->m_swidth));
201 
202 	if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD))
203 		seq_puts(m, ",usrquota");
204 	else if (mp->m_qflags & XFS_UQUOTA_ACCT)
205 		seq_puts(m, ",uqnoenforce");
206 
207 	if (mp->m_qflags & XFS_PQUOTA_ACCT) {
208 		if (mp->m_qflags & XFS_PQUOTA_ENFD)
209 			seq_puts(m, ",prjquota");
210 		else
211 			seq_puts(m, ",pqnoenforce");
212 	}
213 	if (mp->m_qflags & XFS_GQUOTA_ACCT) {
214 		if (mp->m_qflags & XFS_GQUOTA_ENFD)
215 			seq_puts(m, ",grpquota");
216 		else
217 			seq_puts(m, ",gqnoenforce");
218 	}
219 
220 	if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
221 		seq_puts(m, ",noquota");
222 
223 	return 0;
224 }
225 
226 /*
227  * Set parameters for inode allocation heuristics, taking into account
228  * filesystem size and inode32/inode64 mount options; i.e. specifically
229  * whether or not XFS_MOUNT_SMALL_INUMS is set.
230  *
231  * Inode allocation patterns are altered only if inode32 is requested
232  * (XFS_MOUNT_SMALL_INUMS), and the filesystem is sufficiently large.
233  * If altered, XFS_MOUNT_32BITINODES is set as well.
234  *
235  * An agcount independent of that in the mount structure is provided
236  * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
237  * to the potentially higher ag count.
238  *
239  * Returns the maximum AG index which may contain inodes.
240  */
241 xfs_agnumber_t
242 xfs_set_inode_alloc(
243 	struct xfs_mount *mp,
244 	xfs_agnumber_t	agcount)
245 {
246 	xfs_agnumber_t	index;
247 	xfs_agnumber_t	maxagi = 0;
248 	xfs_sb_t	*sbp = &mp->m_sb;
249 	xfs_agnumber_t	max_metadata;
250 	xfs_agino_t	agino;
251 	xfs_ino_t	ino;
252 
253 	/*
254 	 * Calculate how much should be reserved for inodes to meet
255 	 * the max inode percentage.  Used only for inode32.
256 	 */
257 	if (M_IGEO(mp)->maxicount) {
258 		uint64_t	icount;
259 
260 		icount = sbp->sb_dblocks * sbp->sb_imax_pct;
261 		do_div(icount, 100);
262 		icount += sbp->sb_agblocks - 1;
263 		do_div(icount, sbp->sb_agblocks);
264 		max_metadata = icount;
265 	} else {
266 		max_metadata = agcount;
267 	}
268 
269 	/* Get the last possible inode in the filesystem */
270 	agino =	XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
271 	ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
272 
273 	/*
274 	 * If user asked for no more than 32-bit inodes, and the fs is
275 	 * sufficiently large, set XFS_MOUNT_32BITINODES if we must alter
276 	 * the allocator to accommodate the request.
277 	 */
278 	if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32)
279 		mp->m_flags |= XFS_MOUNT_32BITINODES;
280 	else
281 		mp->m_flags &= ~XFS_MOUNT_32BITINODES;
282 
283 	for (index = 0; index < agcount; index++) {
284 		struct xfs_perag	*pag;
285 
286 		ino = XFS_AGINO_TO_INO(mp, index, agino);
287 
288 		pag = xfs_perag_get(mp, index);
289 
290 		if (mp->m_flags & XFS_MOUNT_32BITINODES) {
291 			if (ino > XFS_MAXINUMBER_32) {
292 				pag->pagi_inodeok = 0;
293 				pag->pagf_metadata = 0;
294 			} else {
295 				pag->pagi_inodeok = 1;
296 				maxagi++;
297 				if (index < max_metadata)
298 					pag->pagf_metadata = 1;
299 				else
300 					pag->pagf_metadata = 0;
301 			}
302 		} else {
303 			pag->pagi_inodeok = 1;
304 			pag->pagf_metadata = 0;
305 		}
306 
307 		xfs_perag_put(pag);
308 	}
309 
310 	return (mp->m_flags & XFS_MOUNT_32BITINODES) ? maxagi : agcount;
311 }
312 
313 STATIC int
314 xfs_blkdev_get(
315 	xfs_mount_t		*mp,
316 	const char		*name,
317 	struct block_device	**bdevp)
318 {
319 	int			error = 0;
320 
321 	*bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
322 				    mp);
323 	if (IS_ERR(*bdevp)) {
324 		error = PTR_ERR(*bdevp);
325 		xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
326 	}
327 
328 	return error;
329 }
330 
331 STATIC void
332 xfs_blkdev_put(
333 	struct block_device	*bdev)
334 {
335 	if (bdev)
336 		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
337 }
338 
339 void
340 xfs_blkdev_issue_flush(
341 	xfs_buftarg_t		*buftarg)
342 {
343 	blkdev_issue_flush(buftarg->bt_bdev, GFP_NOFS);
344 }
345 
346 STATIC void
347 xfs_close_devices(
348 	struct xfs_mount	*mp)
349 {
350 	struct dax_device *dax_ddev = mp->m_ddev_targp->bt_daxdev;
351 
352 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
353 		struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
354 		struct dax_device *dax_logdev = mp->m_logdev_targp->bt_daxdev;
355 
356 		xfs_free_buftarg(mp->m_logdev_targp);
357 		xfs_blkdev_put(logdev);
358 		fs_put_dax(dax_logdev);
359 	}
360 	if (mp->m_rtdev_targp) {
361 		struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
362 		struct dax_device *dax_rtdev = mp->m_rtdev_targp->bt_daxdev;
363 
364 		xfs_free_buftarg(mp->m_rtdev_targp);
365 		xfs_blkdev_put(rtdev);
366 		fs_put_dax(dax_rtdev);
367 	}
368 	xfs_free_buftarg(mp->m_ddev_targp);
369 	fs_put_dax(dax_ddev);
370 }
371 
372 /*
373  * The file system configurations are:
374  *	(1) device (partition) with data and internal log
375  *	(2) logical volume with data and log subvolumes.
376  *	(3) logical volume with data, log, and realtime subvolumes.
377  *
378  * We only have to handle opening the log and realtime volumes here if
379  * they are present.  The data subvolume has already been opened by
380  * get_sb_bdev() and is stored in sb->s_bdev.
381  */
382 STATIC int
383 xfs_open_devices(
384 	struct xfs_mount	*mp)
385 {
386 	struct block_device	*ddev = mp->m_super->s_bdev;
387 	struct dax_device	*dax_ddev = fs_dax_get_by_bdev(ddev);
388 	struct dax_device	*dax_logdev = NULL, *dax_rtdev = NULL;
389 	struct block_device	*logdev = NULL, *rtdev = NULL;
390 	int			error;
391 
392 	/*
393 	 * Open real time and log devices - order is important.
394 	 */
395 	if (mp->m_logname) {
396 		error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
397 		if (error)
398 			goto out;
399 		dax_logdev = fs_dax_get_by_bdev(logdev);
400 	}
401 
402 	if (mp->m_rtname) {
403 		error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
404 		if (error)
405 			goto out_close_logdev;
406 
407 		if (rtdev == ddev || rtdev == logdev) {
408 			xfs_warn(mp,
409 	"Cannot mount filesystem with identical rtdev and ddev/logdev.");
410 			error = -EINVAL;
411 			goto out_close_rtdev;
412 		}
413 		dax_rtdev = fs_dax_get_by_bdev(rtdev);
414 	}
415 
416 	/*
417 	 * Setup xfs_mount buffer target pointers
418 	 */
419 	error = -ENOMEM;
420 	mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, dax_ddev);
421 	if (!mp->m_ddev_targp)
422 		goto out_close_rtdev;
423 
424 	if (rtdev) {
425 		mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, dax_rtdev);
426 		if (!mp->m_rtdev_targp)
427 			goto out_free_ddev_targ;
428 	}
429 
430 	if (logdev && logdev != ddev) {
431 		mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, dax_logdev);
432 		if (!mp->m_logdev_targp)
433 			goto out_free_rtdev_targ;
434 	} else {
435 		mp->m_logdev_targp = mp->m_ddev_targp;
436 	}
437 
438 	return 0;
439 
440  out_free_rtdev_targ:
441 	if (mp->m_rtdev_targp)
442 		xfs_free_buftarg(mp->m_rtdev_targp);
443  out_free_ddev_targ:
444 	xfs_free_buftarg(mp->m_ddev_targp);
445  out_close_rtdev:
446 	xfs_blkdev_put(rtdev);
447 	fs_put_dax(dax_rtdev);
448  out_close_logdev:
449 	if (logdev && logdev != ddev) {
450 		xfs_blkdev_put(logdev);
451 		fs_put_dax(dax_logdev);
452 	}
453  out:
454 	fs_put_dax(dax_ddev);
455 	return error;
456 }
457 
458 /*
459  * Setup xfs_mount buffer target pointers based on superblock
460  */
461 STATIC int
462 xfs_setup_devices(
463 	struct xfs_mount	*mp)
464 {
465 	int			error;
466 
467 	error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
468 	if (error)
469 		return error;
470 
471 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
472 		unsigned int	log_sector_size = BBSIZE;
473 
474 		if (xfs_sb_version_hassector(&mp->m_sb))
475 			log_sector_size = mp->m_sb.sb_logsectsize;
476 		error = xfs_setsize_buftarg(mp->m_logdev_targp,
477 					    log_sector_size);
478 		if (error)
479 			return error;
480 	}
481 	if (mp->m_rtdev_targp) {
482 		error = xfs_setsize_buftarg(mp->m_rtdev_targp,
483 					    mp->m_sb.sb_sectsize);
484 		if (error)
485 			return error;
486 	}
487 
488 	return 0;
489 }
490 
491 STATIC int
492 xfs_init_mount_workqueues(
493 	struct xfs_mount	*mp)
494 {
495 	mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
496 			WQ_MEM_RECLAIM|WQ_FREEZABLE, 1, mp->m_super->s_id);
497 	if (!mp->m_buf_workqueue)
498 		goto out;
499 
500 	mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
501 			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
502 	if (!mp->m_unwritten_workqueue)
503 		goto out_destroy_buf;
504 
505 	mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s",
506 			WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND,
507 			0, mp->m_super->s_id);
508 	if (!mp->m_cil_workqueue)
509 		goto out_destroy_unwritten;
510 
511 	mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
512 			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
513 	if (!mp->m_reclaim_workqueue)
514 		goto out_destroy_cil;
515 
516 	mp->m_eofblocks_workqueue = alloc_workqueue("xfs-eofblocks/%s",
517 			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
518 	if (!mp->m_eofblocks_workqueue)
519 		goto out_destroy_reclaim;
520 
521 	mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", WQ_FREEZABLE, 0,
522 					       mp->m_super->s_id);
523 	if (!mp->m_sync_workqueue)
524 		goto out_destroy_eofb;
525 
526 	return 0;
527 
528 out_destroy_eofb:
529 	destroy_workqueue(mp->m_eofblocks_workqueue);
530 out_destroy_reclaim:
531 	destroy_workqueue(mp->m_reclaim_workqueue);
532 out_destroy_cil:
533 	destroy_workqueue(mp->m_cil_workqueue);
534 out_destroy_unwritten:
535 	destroy_workqueue(mp->m_unwritten_workqueue);
536 out_destroy_buf:
537 	destroy_workqueue(mp->m_buf_workqueue);
538 out:
539 	return -ENOMEM;
540 }
541 
542 STATIC void
543 xfs_destroy_mount_workqueues(
544 	struct xfs_mount	*mp)
545 {
546 	destroy_workqueue(mp->m_sync_workqueue);
547 	destroy_workqueue(mp->m_eofblocks_workqueue);
548 	destroy_workqueue(mp->m_reclaim_workqueue);
549 	destroy_workqueue(mp->m_cil_workqueue);
550 	destroy_workqueue(mp->m_unwritten_workqueue);
551 	destroy_workqueue(mp->m_buf_workqueue);
552 }
553 
554 static void
555 xfs_flush_inodes_worker(
556 	struct work_struct	*work)
557 {
558 	struct xfs_mount	*mp = container_of(work, struct xfs_mount,
559 						   m_flush_inodes_work);
560 	struct super_block	*sb = mp->m_super;
561 
562 	if (down_read_trylock(&sb->s_umount)) {
563 		sync_inodes_sb(sb);
564 		up_read(&sb->s_umount);
565 	}
566 }
567 
568 /*
569  * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
570  * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
571  * for IO to complete so that we effectively throttle multiple callers to the
572  * rate at which IO is completing.
573  */
574 void
575 xfs_flush_inodes(
576 	struct xfs_mount	*mp)
577 {
578 	/*
579 	 * If flush_work() returns true then that means we waited for a flush
580 	 * which was already in progress.  Don't bother running another scan.
581 	 */
582 	if (flush_work(&mp->m_flush_inodes_work))
583 		return;
584 
585 	queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work);
586 	flush_work(&mp->m_flush_inodes_work);
587 }
588 
589 /* Catch misguided souls that try to use this interface on XFS */
590 STATIC struct inode *
591 xfs_fs_alloc_inode(
592 	struct super_block	*sb)
593 {
594 	BUG();
595 	return NULL;
596 }
597 
598 #ifdef DEBUG
599 static void
600 xfs_check_delalloc(
601 	struct xfs_inode	*ip,
602 	int			whichfork)
603 {
604 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
605 	struct xfs_bmbt_irec	got;
606 	struct xfs_iext_cursor	icur;
607 
608 	if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
609 		return;
610 	do {
611 		if (isnullstartblock(got.br_startblock)) {
612 			xfs_warn(ip->i_mount,
613 	"ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
614 				ip->i_ino,
615 				whichfork == XFS_DATA_FORK ? "data" : "cow",
616 				got.br_startoff, got.br_blockcount);
617 		}
618 	} while (xfs_iext_next_extent(ifp, &icur, &got));
619 }
620 #else
621 #define xfs_check_delalloc(ip, whichfork)	do { } while (0)
622 #endif
623 
624 /*
625  * Now that the generic code is guaranteed not to be accessing
626  * the linux inode, we can inactivate and reclaim the inode.
627  */
628 STATIC void
629 xfs_fs_destroy_inode(
630 	struct inode		*inode)
631 {
632 	struct xfs_inode	*ip = XFS_I(inode);
633 
634 	trace_xfs_destroy_inode(ip);
635 
636 	ASSERT(!rwsem_is_locked(&inode->i_rwsem));
637 	XFS_STATS_INC(ip->i_mount, vn_rele);
638 	XFS_STATS_INC(ip->i_mount, vn_remove);
639 
640 	xfs_inactive(ip);
641 
642 	if (!XFS_FORCED_SHUTDOWN(ip->i_mount) && ip->i_delayed_blks) {
643 		xfs_check_delalloc(ip, XFS_DATA_FORK);
644 		xfs_check_delalloc(ip, XFS_COW_FORK);
645 		ASSERT(0);
646 	}
647 
648 	XFS_STATS_INC(ip->i_mount, vn_reclaim);
649 
650 	/*
651 	 * We should never get here with one of the reclaim flags already set.
652 	 */
653 	ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
654 	ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
655 
656 	/*
657 	 * We always use background reclaim here because even if the
658 	 * inode is clean, it still may be under IO and hence we have
659 	 * to take the flush lock. The background reclaim path handles
660 	 * this more efficiently than we can here, so simply let background
661 	 * reclaim tear down all inodes.
662 	 */
663 	xfs_inode_set_reclaim_tag(ip);
664 }
665 
666 static void
667 xfs_fs_dirty_inode(
668 	struct inode			*inode,
669 	int				flag)
670 {
671 	struct xfs_inode		*ip = XFS_I(inode);
672 	struct xfs_mount		*mp = ip->i_mount;
673 	struct xfs_trans		*tp;
674 
675 	if (!(inode->i_sb->s_flags & SB_LAZYTIME))
676 		return;
677 	if (flag != I_DIRTY_SYNC || !(inode->i_state & I_DIRTY_TIME))
678 		return;
679 
680 	if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
681 		return;
682 	xfs_ilock(ip, XFS_ILOCK_EXCL);
683 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
684 	xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
685 	xfs_trans_commit(tp);
686 }
687 
688 /*
689  * Slab object creation initialisation for the XFS inode.
690  * This covers only the idempotent fields in the XFS inode;
691  * all other fields need to be initialised on allocation
692  * from the slab. This avoids the need to repeatedly initialise
693  * fields in the xfs inode that left in the initialise state
694  * when freeing the inode.
695  */
696 STATIC void
697 xfs_fs_inode_init_once(
698 	void			*inode)
699 {
700 	struct xfs_inode	*ip = inode;
701 
702 	memset(ip, 0, sizeof(struct xfs_inode));
703 
704 	/* vfs inode */
705 	inode_init_once(VFS_I(ip));
706 
707 	/* xfs inode */
708 	atomic_set(&ip->i_pincount, 0);
709 	spin_lock_init(&ip->i_flags_lock);
710 
711 	mrlock_init(&ip->i_mmaplock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
712 		     "xfsino", ip->i_ino);
713 	mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
714 		     "xfsino", ip->i_ino);
715 }
716 
717 /*
718  * We do an unlocked check for XFS_IDONTCACHE here because we are already
719  * serialised against cache hits here via the inode->i_lock and igrab() in
720  * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
721  * racing with us, and it avoids needing to grab a spinlock here for every inode
722  * we drop the final reference on.
723  */
724 STATIC int
725 xfs_fs_drop_inode(
726 	struct inode		*inode)
727 {
728 	struct xfs_inode	*ip = XFS_I(inode);
729 
730 	/*
731 	 * If this unlinked inode is in the middle of recovery, don't
732 	 * drop the inode just yet; log recovery will take care of
733 	 * that.  See the comment for this inode flag.
734 	 */
735 	if (ip->i_flags & XFS_IRECOVERY) {
736 		ASSERT(ip->i_mount->m_log->l_flags & XLOG_RECOVERY_NEEDED);
737 		return 0;
738 	}
739 
740 	return generic_drop_inode(inode);
741 }
742 
743 static void
744 xfs_mount_free(
745 	struct xfs_mount	*mp)
746 {
747 	kfree(mp->m_rtname);
748 	kfree(mp->m_logname);
749 	kmem_free(mp);
750 }
751 
752 STATIC int
753 xfs_fs_sync_fs(
754 	struct super_block	*sb,
755 	int			wait)
756 {
757 	struct xfs_mount	*mp = XFS_M(sb);
758 
759 	/*
760 	 * Doing anything during the async pass would be counterproductive.
761 	 */
762 	if (!wait)
763 		return 0;
764 
765 	xfs_log_force(mp, XFS_LOG_SYNC);
766 	if (laptop_mode) {
767 		/*
768 		 * The disk must be active because we're syncing.
769 		 * We schedule log work now (now that the disk is
770 		 * active) instead of later (when it might not be).
771 		 */
772 		flush_delayed_work(&mp->m_log->l_work);
773 	}
774 
775 	return 0;
776 }
777 
778 STATIC int
779 xfs_fs_statfs(
780 	struct dentry		*dentry,
781 	struct kstatfs		*statp)
782 {
783 	struct xfs_mount	*mp = XFS_M(dentry->d_sb);
784 	xfs_sb_t		*sbp = &mp->m_sb;
785 	struct xfs_inode	*ip = XFS_I(d_inode(dentry));
786 	uint64_t		fakeinos, id;
787 	uint64_t		icount;
788 	uint64_t		ifree;
789 	uint64_t		fdblocks;
790 	xfs_extlen_t		lsize;
791 	int64_t			ffree;
792 
793 	statp->f_type = XFS_SUPER_MAGIC;
794 	statp->f_namelen = MAXNAMELEN - 1;
795 
796 	id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
797 	statp->f_fsid.val[0] = (u32)id;
798 	statp->f_fsid.val[1] = (u32)(id >> 32);
799 
800 	icount = percpu_counter_sum(&mp->m_icount);
801 	ifree = percpu_counter_sum(&mp->m_ifree);
802 	fdblocks = percpu_counter_sum(&mp->m_fdblocks);
803 
804 	spin_lock(&mp->m_sb_lock);
805 	statp->f_bsize = sbp->sb_blocksize;
806 	lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
807 	statp->f_blocks = sbp->sb_dblocks - lsize;
808 	spin_unlock(&mp->m_sb_lock);
809 
810 	/* make sure statp->f_bfree does not underflow */
811 	statp->f_bfree = max_t(int64_t, fdblocks - mp->m_alloc_set_aside, 0);
812 	statp->f_bavail = statp->f_bfree;
813 
814 	fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
815 	statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
816 	if (M_IGEO(mp)->maxicount)
817 		statp->f_files = min_t(typeof(statp->f_files),
818 					statp->f_files,
819 					M_IGEO(mp)->maxicount);
820 
821 	/* If sb_icount overshot maxicount, report actual allocation */
822 	statp->f_files = max_t(typeof(statp->f_files),
823 					statp->f_files,
824 					sbp->sb_icount);
825 
826 	/* make sure statp->f_ffree does not underflow */
827 	ffree = statp->f_files - (icount - ifree);
828 	statp->f_ffree = max_t(int64_t, ffree, 0);
829 
830 
831 	if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
832 	    ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
833 			      (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
834 		xfs_qm_statvfs(ip, statp);
835 
836 	if (XFS_IS_REALTIME_MOUNT(mp) &&
837 	    (ip->i_d.di_flags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
838 		statp->f_blocks = sbp->sb_rblocks;
839 		statp->f_bavail = statp->f_bfree =
840 			sbp->sb_frextents * sbp->sb_rextsize;
841 	}
842 
843 	return 0;
844 }
845 
846 STATIC void
847 xfs_save_resvblks(struct xfs_mount *mp)
848 {
849 	uint64_t resblks = 0;
850 
851 	mp->m_resblks_save = mp->m_resblks;
852 	xfs_reserve_blocks(mp, &resblks, NULL);
853 }
854 
855 STATIC void
856 xfs_restore_resvblks(struct xfs_mount *mp)
857 {
858 	uint64_t resblks;
859 
860 	if (mp->m_resblks_save) {
861 		resblks = mp->m_resblks_save;
862 		mp->m_resblks_save = 0;
863 	} else
864 		resblks = xfs_default_resblks(mp);
865 
866 	xfs_reserve_blocks(mp, &resblks, NULL);
867 }
868 
869 /*
870  * Trigger writeback of all the dirty metadata in the file system.
871  *
872  * This ensures that the metadata is written to their location on disk rather
873  * than just existing in transactions in the log. This means after a quiesce
874  * there is no log replay required to write the inodes to disk - this is the
875  * primary difference between a sync and a quiesce.
876  *
877  * We cancel log work early here to ensure all transactions the log worker may
878  * run have finished before we clean up and log the superblock and write an
879  * unmount record. The unfreeze process is responsible for restarting the log
880  * worker correctly.
881  */
882 void
883 xfs_quiesce_attr(
884 	struct xfs_mount	*mp)
885 {
886 	int	error = 0;
887 
888 	cancel_delayed_work_sync(&mp->m_log->l_work);
889 
890 	/* force the log to unpin objects from the now complete transactions */
891 	xfs_log_force(mp, XFS_LOG_SYNC);
892 
893 	/* reclaim inodes to do any IO before the freeze completes */
894 	xfs_reclaim_inodes(mp, 0);
895 	xfs_reclaim_inodes(mp, SYNC_WAIT);
896 
897 	/* Push the superblock and write an unmount record */
898 	error = xfs_log_sbcount(mp);
899 	if (error)
900 		xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. "
901 				"Frozen image may not be consistent.");
902 	xfs_log_quiesce(mp);
903 }
904 
905 /*
906  * Second stage of a freeze. The data is already frozen so we only
907  * need to take care of the metadata. Once that's done sync the superblock
908  * to the log to dirty it in case of a crash while frozen. This ensures that we
909  * will recover the unlinked inode lists on the next mount.
910  */
911 STATIC int
912 xfs_fs_freeze(
913 	struct super_block	*sb)
914 {
915 	struct xfs_mount	*mp = XFS_M(sb);
916 
917 	xfs_stop_block_reaping(mp);
918 	xfs_save_resvblks(mp);
919 	xfs_quiesce_attr(mp);
920 	return xfs_sync_sb(mp, true);
921 }
922 
923 STATIC int
924 xfs_fs_unfreeze(
925 	struct super_block	*sb)
926 {
927 	struct xfs_mount	*mp = XFS_M(sb);
928 
929 	xfs_restore_resvblks(mp);
930 	xfs_log_work_queue(mp);
931 	xfs_start_block_reaping(mp);
932 	return 0;
933 }
934 
935 /*
936  * This function fills in xfs_mount_t fields based on mount args.
937  * Note: the superblock _has_ now been read in.
938  */
939 STATIC int
940 xfs_finish_flags(
941 	struct xfs_mount	*mp)
942 {
943 	int			ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
944 
945 	/* Fail a mount where the logbuf is smaller than the log stripe */
946 	if (xfs_sb_version_haslogv2(&mp->m_sb)) {
947 		if (mp->m_logbsize <= 0 &&
948 		    mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
949 			mp->m_logbsize = mp->m_sb.sb_logsunit;
950 		} else if (mp->m_logbsize > 0 &&
951 			   mp->m_logbsize < mp->m_sb.sb_logsunit) {
952 			xfs_warn(mp,
953 		"logbuf size must be greater than or equal to log stripe size");
954 			return -EINVAL;
955 		}
956 	} else {
957 		/* Fail a mount if the logbuf is larger than 32K */
958 		if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
959 			xfs_warn(mp,
960 		"logbuf size for version 1 logs must be 16K or 32K");
961 			return -EINVAL;
962 		}
963 	}
964 
965 	/*
966 	 * V5 filesystems always use attr2 format for attributes.
967 	 */
968 	if (xfs_sb_version_hascrc(&mp->m_sb) &&
969 	    (mp->m_flags & XFS_MOUNT_NOATTR2)) {
970 		xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
971 			     "attr2 is always enabled for V5 filesystems.");
972 		return -EINVAL;
973 	}
974 
975 	/*
976 	 * mkfs'ed attr2 will turn on attr2 mount unless explicitly
977 	 * told by noattr2 to turn it off
978 	 */
979 	if (xfs_sb_version_hasattr2(&mp->m_sb) &&
980 	    !(mp->m_flags & XFS_MOUNT_NOATTR2))
981 		mp->m_flags |= XFS_MOUNT_ATTR2;
982 
983 	/*
984 	 * prohibit r/w mounts of read-only filesystems
985 	 */
986 	if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
987 		xfs_warn(mp,
988 			"cannot mount a read-only filesystem as read-write");
989 		return -EROFS;
990 	}
991 
992 	if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
993 	    (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE)) &&
994 	    !xfs_sb_version_has_pquotino(&mp->m_sb)) {
995 		xfs_warn(mp,
996 		  "Super block does not support project and group quota together");
997 		return -EINVAL;
998 	}
999 
1000 	return 0;
1001 }
1002 
1003 static int
1004 xfs_init_percpu_counters(
1005 	struct xfs_mount	*mp)
1006 {
1007 	int		error;
1008 
1009 	error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
1010 	if (error)
1011 		return -ENOMEM;
1012 
1013 	error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
1014 	if (error)
1015 		goto free_icount;
1016 
1017 	error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
1018 	if (error)
1019 		goto free_ifree;
1020 
1021 	error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
1022 	if (error)
1023 		goto free_fdblocks;
1024 
1025 	return 0;
1026 
1027 free_fdblocks:
1028 	percpu_counter_destroy(&mp->m_fdblocks);
1029 free_ifree:
1030 	percpu_counter_destroy(&mp->m_ifree);
1031 free_icount:
1032 	percpu_counter_destroy(&mp->m_icount);
1033 	return -ENOMEM;
1034 }
1035 
1036 void
1037 xfs_reinit_percpu_counters(
1038 	struct xfs_mount	*mp)
1039 {
1040 	percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1041 	percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1042 	percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1043 }
1044 
1045 static void
1046 xfs_destroy_percpu_counters(
1047 	struct xfs_mount	*mp)
1048 {
1049 	percpu_counter_destroy(&mp->m_icount);
1050 	percpu_counter_destroy(&mp->m_ifree);
1051 	percpu_counter_destroy(&mp->m_fdblocks);
1052 	ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
1053 	       percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1054 	percpu_counter_destroy(&mp->m_delalloc_blks);
1055 }
1056 
1057 static void
1058 xfs_fs_put_super(
1059 	struct super_block	*sb)
1060 {
1061 	struct xfs_mount	*mp = XFS_M(sb);
1062 
1063 	/* if ->fill_super failed, we have no mount to tear down */
1064 	if (!sb->s_fs_info)
1065 		return;
1066 
1067 	xfs_notice(mp, "Unmounting Filesystem");
1068 	xfs_filestream_unmount(mp);
1069 	xfs_unmountfs(mp);
1070 
1071 	xfs_freesb(mp);
1072 	free_percpu(mp->m_stats.xs_stats);
1073 	xfs_destroy_percpu_counters(mp);
1074 	xfs_destroy_mount_workqueues(mp);
1075 	xfs_close_devices(mp);
1076 
1077 	sb->s_fs_info = NULL;
1078 	xfs_mount_free(mp);
1079 }
1080 
1081 static long
1082 xfs_fs_nr_cached_objects(
1083 	struct super_block	*sb,
1084 	struct shrink_control	*sc)
1085 {
1086 	/* Paranoia: catch incorrect calls during mount setup or teardown */
1087 	if (WARN_ON_ONCE(!sb->s_fs_info))
1088 		return 0;
1089 	return xfs_reclaim_inodes_count(XFS_M(sb));
1090 }
1091 
1092 static long
1093 xfs_fs_free_cached_objects(
1094 	struct super_block	*sb,
1095 	struct shrink_control	*sc)
1096 {
1097 	return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1098 }
1099 
1100 static const struct super_operations xfs_super_operations = {
1101 	.alloc_inode		= xfs_fs_alloc_inode,
1102 	.destroy_inode		= xfs_fs_destroy_inode,
1103 	.dirty_inode		= xfs_fs_dirty_inode,
1104 	.drop_inode		= xfs_fs_drop_inode,
1105 	.put_super		= xfs_fs_put_super,
1106 	.sync_fs		= xfs_fs_sync_fs,
1107 	.freeze_fs		= xfs_fs_freeze,
1108 	.unfreeze_fs		= xfs_fs_unfreeze,
1109 	.statfs			= xfs_fs_statfs,
1110 	.show_options		= xfs_fs_show_options,
1111 	.nr_cached_objects	= xfs_fs_nr_cached_objects,
1112 	.free_cached_objects	= xfs_fs_free_cached_objects,
1113 };
1114 
1115 static int
1116 suffix_kstrtoint(
1117 	const char	*s,
1118 	unsigned int	base,
1119 	int		*res)
1120 {
1121 	int		last, shift_left_factor = 0, _res;
1122 	char		*value;
1123 	int		ret = 0;
1124 
1125 	value = kstrdup(s, GFP_KERNEL);
1126 	if (!value)
1127 		return -ENOMEM;
1128 
1129 	last = strlen(value) - 1;
1130 	if (value[last] == 'K' || value[last] == 'k') {
1131 		shift_left_factor = 10;
1132 		value[last] = '\0';
1133 	}
1134 	if (value[last] == 'M' || value[last] == 'm') {
1135 		shift_left_factor = 20;
1136 		value[last] = '\0';
1137 	}
1138 	if (value[last] == 'G' || value[last] == 'g') {
1139 		shift_left_factor = 30;
1140 		value[last] = '\0';
1141 	}
1142 
1143 	if (kstrtoint(value, base, &_res))
1144 		ret = -EINVAL;
1145 	kfree(value);
1146 	*res = _res << shift_left_factor;
1147 	return ret;
1148 }
1149 
1150 /*
1151  * Set mount state from a mount option.
1152  *
1153  * NOTE: mp->m_super is NULL here!
1154  */
1155 static int
1156 xfs_fc_parse_param(
1157 	struct fs_context	*fc,
1158 	struct fs_parameter	*param)
1159 {
1160 	struct xfs_mount	*mp = fc->s_fs_info;
1161 	struct fs_parse_result	result;
1162 	int			size = 0;
1163 	int			opt;
1164 
1165 	opt = fs_parse(fc, xfs_fs_parameters, param, &result);
1166 	if (opt < 0)
1167 		return opt;
1168 
1169 	switch (opt) {
1170 	case Opt_logbufs:
1171 		mp->m_logbufs = result.uint_32;
1172 		return 0;
1173 	case Opt_logbsize:
1174 		if (suffix_kstrtoint(param->string, 10, &mp->m_logbsize))
1175 			return -EINVAL;
1176 		return 0;
1177 	case Opt_logdev:
1178 		kfree(mp->m_logname);
1179 		mp->m_logname = kstrdup(param->string, GFP_KERNEL);
1180 		if (!mp->m_logname)
1181 			return -ENOMEM;
1182 		return 0;
1183 	case Opt_rtdev:
1184 		kfree(mp->m_rtname);
1185 		mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
1186 		if (!mp->m_rtname)
1187 			return -ENOMEM;
1188 		return 0;
1189 	case Opt_allocsize:
1190 		if (suffix_kstrtoint(param->string, 10, &size))
1191 			return -EINVAL;
1192 		mp->m_allocsize_log = ffs(size) - 1;
1193 		mp->m_flags |= XFS_MOUNT_ALLOCSIZE;
1194 		return 0;
1195 	case Opt_grpid:
1196 	case Opt_bsdgroups:
1197 		mp->m_flags |= XFS_MOUNT_GRPID;
1198 		return 0;
1199 	case Opt_nogrpid:
1200 	case Opt_sysvgroups:
1201 		mp->m_flags &= ~XFS_MOUNT_GRPID;
1202 		return 0;
1203 	case Opt_wsync:
1204 		mp->m_flags |= XFS_MOUNT_WSYNC;
1205 		return 0;
1206 	case Opt_norecovery:
1207 		mp->m_flags |= XFS_MOUNT_NORECOVERY;
1208 		return 0;
1209 	case Opt_noalign:
1210 		mp->m_flags |= XFS_MOUNT_NOALIGN;
1211 		return 0;
1212 	case Opt_swalloc:
1213 		mp->m_flags |= XFS_MOUNT_SWALLOC;
1214 		return 0;
1215 	case Opt_sunit:
1216 		mp->m_dalign = result.uint_32;
1217 		return 0;
1218 	case Opt_swidth:
1219 		mp->m_swidth = result.uint_32;
1220 		return 0;
1221 	case Opt_inode32:
1222 		mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
1223 		return 0;
1224 	case Opt_inode64:
1225 		mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
1226 		return 0;
1227 	case Opt_nouuid:
1228 		mp->m_flags |= XFS_MOUNT_NOUUID;
1229 		return 0;
1230 	case Opt_ikeep:
1231 		mp->m_flags |= XFS_MOUNT_IKEEP;
1232 		return 0;
1233 	case Opt_noikeep:
1234 		mp->m_flags &= ~XFS_MOUNT_IKEEP;
1235 		return 0;
1236 	case Opt_largeio:
1237 		mp->m_flags |= XFS_MOUNT_LARGEIO;
1238 		return 0;
1239 	case Opt_nolargeio:
1240 		mp->m_flags &= ~XFS_MOUNT_LARGEIO;
1241 		return 0;
1242 	case Opt_attr2:
1243 		mp->m_flags |= XFS_MOUNT_ATTR2;
1244 		return 0;
1245 	case Opt_noattr2:
1246 		mp->m_flags &= ~XFS_MOUNT_ATTR2;
1247 		mp->m_flags |= XFS_MOUNT_NOATTR2;
1248 		return 0;
1249 	case Opt_filestreams:
1250 		mp->m_flags |= XFS_MOUNT_FILESTREAMS;
1251 		return 0;
1252 	case Opt_noquota:
1253 		mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
1254 		mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
1255 		mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE;
1256 		return 0;
1257 	case Opt_quota:
1258 	case Opt_uquota:
1259 	case Opt_usrquota:
1260 		mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
1261 				 XFS_UQUOTA_ENFD);
1262 		return 0;
1263 	case Opt_qnoenforce:
1264 	case Opt_uqnoenforce:
1265 		mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
1266 		mp->m_qflags &= ~XFS_UQUOTA_ENFD;
1267 		return 0;
1268 	case Opt_pquota:
1269 	case Opt_prjquota:
1270 		mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
1271 				 XFS_PQUOTA_ENFD);
1272 		return 0;
1273 	case Opt_pqnoenforce:
1274 		mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
1275 		mp->m_qflags &= ~XFS_PQUOTA_ENFD;
1276 		return 0;
1277 	case Opt_gquota:
1278 	case Opt_grpquota:
1279 		mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
1280 				 XFS_GQUOTA_ENFD);
1281 		return 0;
1282 	case Opt_gqnoenforce:
1283 		mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
1284 		mp->m_qflags &= ~XFS_GQUOTA_ENFD;
1285 		return 0;
1286 	case Opt_discard:
1287 		mp->m_flags |= XFS_MOUNT_DISCARD;
1288 		return 0;
1289 	case Opt_nodiscard:
1290 		mp->m_flags &= ~XFS_MOUNT_DISCARD;
1291 		return 0;
1292 #ifdef CONFIG_FS_DAX
1293 	case Opt_dax:
1294 		xfs_mount_set_dax_mode(mp, XFS_DAX_ALWAYS);
1295 		return 0;
1296 	case Opt_dax_enum:
1297 		xfs_mount_set_dax_mode(mp, result.uint_32);
1298 		return 0;
1299 #endif
1300 	default:
1301 		xfs_warn(mp, "unknown mount option [%s].", param->key);
1302 		return -EINVAL;
1303 	}
1304 
1305 	return 0;
1306 }
1307 
1308 static int
1309 xfs_fc_validate_params(
1310 	struct xfs_mount	*mp)
1311 {
1312 	/*
1313 	 * no recovery flag requires a read-only mount
1314 	 */
1315 	if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
1316 	    !(mp->m_flags & XFS_MOUNT_RDONLY)) {
1317 		xfs_warn(mp, "no-recovery mounts must be read-only.");
1318 		return -EINVAL;
1319 	}
1320 
1321 	if ((mp->m_flags & XFS_MOUNT_NOALIGN) &&
1322 	    (mp->m_dalign || mp->m_swidth)) {
1323 		xfs_warn(mp,
1324 	"sunit and swidth options incompatible with the noalign option");
1325 		return -EINVAL;
1326 	}
1327 
1328 	if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) {
1329 		xfs_warn(mp, "quota support not available in this kernel.");
1330 		return -EINVAL;
1331 	}
1332 
1333 	if ((mp->m_dalign && !mp->m_swidth) ||
1334 	    (!mp->m_dalign && mp->m_swidth)) {
1335 		xfs_warn(mp, "sunit and swidth must be specified together");
1336 		return -EINVAL;
1337 	}
1338 
1339 	if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
1340 		xfs_warn(mp,
1341 	"stripe width (%d) must be a multiple of the stripe unit (%d)",
1342 			mp->m_swidth, mp->m_dalign);
1343 		return -EINVAL;
1344 	}
1345 
1346 	if (mp->m_logbufs != -1 &&
1347 	    mp->m_logbufs != 0 &&
1348 	    (mp->m_logbufs < XLOG_MIN_ICLOGS ||
1349 	     mp->m_logbufs > XLOG_MAX_ICLOGS)) {
1350 		xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
1351 			mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
1352 		return -EINVAL;
1353 	}
1354 
1355 	if (mp->m_logbsize != -1 &&
1356 	    mp->m_logbsize !=  0 &&
1357 	    (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
1358 	     mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
1359 	     !is_power_of_2(mp->m_logbsize))) {
1360 		xfs_warn(mp,
1361 			"invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
1362 			mp->m_logbsize);
1363 		return -EINVAL;
1364 	}
1365 
1366 	if ((mp->m_flags & XFS_MOUNT_ALLOCSIZE) &&
1367 	    (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
1368 	     mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
1369 		xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
1370 			mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
1371 		return -EINVAL;
1372 	}
1373 
1374 	return 0;
1375 }
1376 
1377 static int
1378 xfs_fc_fill_super(
1379 	struct super_block	*sb,
1380 	struct fs_context	*fc)
1381 {
1382 	struct xfs_mount	*mp = sb->s_fs_info;
1383 	struct inode		*root;
1384 	int			flags = 0, error;
1385 
1386 	mp->m_super = sb;
1387 
1388 	error = xfs_fc_validate_params(mp);
1389 	if (error)
1390 		goto out_free_names;
1391 
1392 	sb_min_blocksize(sb, BBSIZE);
1393 	sb->s_xattr = xfs_xattr_handlers;
1394 	sb->s_export_op = &xfs_export_operations;
1395 #ifdef CONFIG_XFS_QUOTA
1396 	sb->s_qcop = &xfs_quotactl_operations;
1397 	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1398 #endif
1399 	sb->s_op = &xfs_super_operations;
1400 
1401 	/*
1402 	 * Delay mount work if the debug hook is set. This is debug
1403 	 * instrumention to coordinate simulation of xfs mount failures with
1404 	 * VFS superblock operations
1405 	 */
1406 	if (xfs_globals.mount_delay) {
1407 		xfs_notice(mp, "Delaying mount for %d seconds.",
1408 			xfs_globals.mount_delay);
1409 		msleep(xfs_globals.mount_delay * 1000);
1410 	}
1411 
1412 	if (fc->sb_flags & SB_SILENT)
1413 		flags |= XFS_MFSI_QUIET;
1414 
1415 	error = xfs_open_devices(mp);
1416 	if (error)
1417 		goto out_free_names;
1418 
1419 	error = xfs_init_mount_workqueues(mp);
1420 	if (error)
1421 		goto out_close_devices;
1422 
1423 	error = xfs_init_percpu_counters(mp);
1424 	if (error)
1425 		goto out_destroy_workqueues;
1426 
1427 	/* Allocate stats memory before we do operations that might use it */
1428 	mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1429 	if (!mp->m_stats.xs_stats) {
1430 		error = -ENOMEM;
1431 		goto out_destroy_counters;
1432 	}
1433 
1434 	error = xfs_readsb(mp, flags);
1435 	if (error)
1436 		goto out_free_stats;
1437 
1438 	error = xfs_finish_flags(mp);
1439 	if (error)
1440 		goto out_free_sb;
1441 
1442 	error = xfs_setup_devices(mp);
1443 	if (error)
1444 		goto out_free_sb;
1445 
1446 	/*
1447 	 * XFS block mappings use 54 bits to store the logical block offset.
1448 	 * This should suffice to handle the maximum file size that the VFS
1449 	 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
1450 	 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
1451 	 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
1452 	 * to check this assertion.
1453 	 *
1454 	 * Avoid integer overflow by comparing the maximum bmbt offset to the
1455 	 * maximum pagecache offset in units of fs blocks.
1456 	 */
1457 	if (XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE) > XFS_MAX_FILEOFF) {
1458 		xfs_warn(mp,
1459 "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
1460 			 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
1461 			 XFS_MAX_FILEOFF);
1462 		error = -EINVAL;
1463 		goto out_free_sb;
1464 	}
1465 
1466 	error = xfs_filestream_mount(mp);
1467 	if (error)
1468 		goto out_free_sb;
1469 
1470 	/*
1471 	 * we must configure the block size in the superblock before we run the
1472 	 * full mount process as the mount process can lookup and cache inodes.
1473 	 */
1474 	sb->s_magic = XFS_SUPER_MAGIC;
1475 	sb->s_blocksize = mp->m_sb.sb_blocksize;
1476 	sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1477 	sb->s_maxbytes = MAX_LFS_FILESIZE;
1478 	sb->s_max_links = XFS_MAXLINK;
1479 	sb->s_time_gran = 1;
1480 	sb->s_time_min = S32_MIN;
1481 	sb->s_time_max = S32_MAX;
1482 	sb->s_iflags |= SB_I_CGROUPWB;
1483 
1484 	set_posix_acl_flag(sb);
1485 
1486 	/* version 5 superblocks support inode version counters. */
1487 	if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
1488 		sb->s_flags |= SB_I_VERSION;
1489 
1490 	if (mp->m_flags & XFS_MOUNT_DAX_ALWAYS) {
1491 		bool rtdev_is_dax = false, datadev_is_dax;
1492 
1493 		xfs_warn(mp,
1494 		"DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
1495 
1496 		datadev_is_dax = bdev_dax_supported(mp->m_ddev_targp->bt_bdev,
1497 			sb->s_blocksize);
1498 		if (mp->m_rtdev_targp)
1499 			rtdev_is_dax = bdev_dax_supported(
1500 				mp->m_rtdev_targp->bt_bdev, sb->s_blocksize);
1501 		if (!rtdev_is_dax && !datadev_is_dax) {
1502 			xfs_alert(mp,
1503 			"DAX unsupported by block device. Turning off DAX.");
1504 			xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
1505 		}
1506 		if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1507 			xfs_alert(mp,
1508 		"DAX and reflink cannot be used together!");
1509 			error = -EINVAL;
1510 			goto out_filestream_unmount;
1511 		}
1512 	}
1513 
1514 	if (mp->m_flags & XFS_MOUNT_DISCARD) {
1515 		struct request_queue *q = bdev_get_queue(sb->s_bdev);
1516 
1517 		if (!blk_queue_discard(q)) {
1518 			xfs_warn(mp, "mounting with \"discard\" option, but "
1519 					"the device does not support discard");
1520 			mp->m_flags &= ~XFS_MOUNT_DISCARD;
1521 		}
1522 	}
1523 
1524 	if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1525 		if (mp->m_sb.sb_rblocks) {
1526 			xfs_alert(mp,
1527 	"reflink not compatible with realtime device!");
1528 			error = -EINVAL;
1529 			goto out_filestream_unmount;
1530 		}
1531 
1532 		if (xfs_globals.always_cow) {
1533 			xfs_info(mp, "using DEBUG-only always_cow mode.");
1534 			mp->m_always_cow = true;
1535 		}
1536 	}
1537 
1538 	if (xfs_sb_version_hasrmapbt(&mp->m_sb) && mp->m_sb.sb_rblocks) {
1539 		xfs_alert(mp,
1540 	"reverse mapping btree not compatible with realtime device!");
1541 		error = -EINVAL;
1542 		goto out_filestream_unmount;
1543 	}
1544 
1545 	error = xfs_mountfs(mp);
1546 	if (error)
1547 		goto out_filestream_unmount;
1548 
1549 	root = igrab(VFS_I(mp->m_rootip));
1550 	if (!root) {
1551 		error = -ENOENT;
1552 		goto out_unmount;
1553 	}
1554 	sb->s_root = d_make_root(root);
1555 	if (!sb->s_root) {
1556 		error = -ENOMEM;
1557 		goto out_unmount;
1558 	}
1559 
1560 	return 0;
1561 
1562  out_filestream_unmount:
1563 	xfs_filestream_unmount(mp);
1564  out_free_sb:
1565 	xfs_freesb(mp);
1566  out_free_stats:
1567 	free_percpu(mp->m_stats.xs_stats);
1568  out_destroy_counters:
1569 	xfs_destroy_percpu_counters(mp);
1570  out_destroy_workqueues:
1571 	xfs_destroy_mount_workqueues(mp);
1572  out_close_devices:
1573 	xfs_close_devices(mp);
1574  out_free_names:
1575 	sb->s_fs_info = NULL;
1576 	xfs_mount_free(mp);
1577 	return error;
1578 
1579  out_unmount:
1580 	xfs_filestream_unmount(mp);
1581 	xfs_unmountfs(mp);
1582 	goto out_free_sb;
1583 }
1584 
1585 static int
1586 xfs_fc_get_tree(
1587 	struct fs_context	*fc)
1588 {
1589 	return get_tree_bdev(fc, xfs_fc_fill_super);
1590 }
1591 
1592 static int
1593 xfs_remount_rw(
1594 	struct xfs_mount	*mp)
1595 {
1596 	struct xfs_sb		*sbp = &mp->m_sb;
1597 	int error;
1598 
1599 	if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
1600 		xfs_warn(mp,
1601 			"ro->rw transition prohibited on norecovery mount");
1602 		return -EINVAL;
1603 	}
1604 
1605 	if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
1606 	    xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1607 		xfs_warn(mp,
1608 	"ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1609 			(sbp->sb_features_ro_compat &
1610 				XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1611 		return -EINVAL;
1612 	}
1613 
1614 	mp->m_flags &= ~XFS_MOUNT_RDONLY;
1615 
1616 	/*
1617 	 * If this is the first remount to writeable state we might have some
1618 	 * superblock changes to update.
1619 	 */
1620 	if (mp->m_update_sb) {
1621 		error = xfs_sync_sb(mp, false);
1622 		if (error) {
1623 			xfs_warn(mp, "failed to write sb changes");
1624 			return error;
1625 		}
1626 		mp->m_update_sb = false;
1627 	}
1628 
1629 	/*
1630 	 * Fill out the reserve pool if it is empty. Use the stashed value if
1631 	 * it is non-zero, otherwise go with the default.
1632 	 */
1633 	xfs_restore_resvblks(mp);
1634 	xfs_log_work_queue(mp);
1635 
1636 	/* Recover any CoW blocks that never got remapped. */
1637 	error = xfs_reflink_recover_cow(mp);
1638 	if (error) {
1639 		xfs_err(mp,
1640 			"Error %d recovering leftover CoW allocations.", error);
1641 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1642 		return error;
1643 	}
1644 	xfs_start_block_reaping(mp);
1645 
1646 	/* Create the per-AG metadata reservation pool .*/
1647 	error = xfs_fs_reserve_ag_blocks(mp);
1648 	if (error && error != -ENOSPC)
1649 		return error;
1650 
1651 	return 0;
1652 }
1653 
1654 static int
1655 xfs_remount_ro(
1656 	struct xfs_mount	*mp)
1657 {
1658 	int error;
1659 
1660 	/*
1661 	 * Cancel background eofb scanning so it cannot race with the final
1662 	 * log force+buftarg wait and deadlock the remount.
1663 	 */
1664 	xfs_stop_block_reaping(mp);
1665 
1666 	/* Get rid of any leftover CoW reservations... */
1667 	error = xfs_icache_free_cowblocks(mp, NULL);
1668 	if (error) {
1669 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1670 		return error;
1671 	}
1672 
1673 	/* Free the per-AG metadata reservation pool. */
1674 	error = xfs_fs_unreserve_ag_blocks(mp);
1675 	if (error) {
1676 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1677 		return error;
1678 	}
1679 
1680 	/*
1681 	 * Before we sync the metadata, we need to free up the reserve block
1682 	 * pool so that the used block count in the superblock on disk is
1683 	 * correct at the end of the remount. Stash the current* reserve pool
1684 	 * size so that if we get remounted rw, we can return it to the same
1685 	 * size.
1686 	 */
1687 	xfs_save_resvblks(mp);
1688 
1689 	xfs_quiesce_attr(mp);
1690 	mp->m_flags |= XFS_MOUNT_RDONLY;
1691 
1692 	return 0;
1693 }
1694 
1695 /*
1696  * Logically we would return an error here to prevent users from believing
1697  * they might have changed mount options using remount which can't be changed.
1698  *
1699  * But unfortunately mount(8) adds all options from mtab and fstab to the mount
1700  * arguments in some cases so we can't blindly reject options, but have to
1701  * check for each specified option if it actually differs from the currently
1702  * set option and only reject it if that's the case.
1703  *
1704  * Until that is implemented we return success for every remount request, and
1705  * silently ignore all options that we can't actually change.
1706  */
1707 static int
1708 xfs_fc_reconfigure(
1709 	struct fs_context *fc)
1710 {
1711 	struct xfs_mount	*mp = XFS_M(fc->root->d_sb);
1712 	struct xfs_mount        *new_mp = fc->s_fs_info;
1713 	xfs_sb_t		*sbp = &mp->m_sb;
1714 	int			flags = fc->sb_flags;
1715 	int			error;
1716 
1717 	error = xfs_fc_validate_params(new_mp);
1718 	if (error)
1719 		return error;
1720 
1721 	sync_filesystem(mp->m_super);
1722 
1723 	/* inode32 -> inode64 */
1724 	if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) &&
1725 	    !(new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) {
1726 		mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
1727 		mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1728 	}
1729 
1730 	/* inode64 -> inode32 */
1731 	if (!(mp->m_flags & XFS_MOUNT_SMALL_INUMS) &&
1732 	    (new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) {
1733 		mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
1734 		mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1735 	}
1736 
1737 	/* ro -> rw */
1738 	if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(flags & SB_RDONLY)) {
1739 		error = xfs_remount_rw(mp);
1740 		if (error)
1741 			return error;
1742 	}
1743 
1744 	/* rw -> ro */
1745 	if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (flags & SB_RDONLY)) {
1746 		error = xfs_remount_ro(mp);
1747 		if (error)
1748 			return error;
1749 	}
1750 
1751 	return 0;
1752 }
1753 
1754 static void xfs_fc_free(
1755 	struct fs_context	*fc)
1756 {
1757 	struct xfs_mount	*mp = fc->s_fs_info;
1758 
1759 	/*
1760 	 * mp is stored in the fs_context when it is initialized.
1761 	 * mp is transferred to the superblock on a successful mount,
1762 	 * but if an error occurs before the transfer we have to free
1763 	 * it here.
1764 	 */
1765 	if (mp)
1766 		xfs_mount_free(mp);
1767 }
1768 
1769 static const struct fs_context_operations xfs_context_ops = {
1770 	.parse_param = xfs_fc_parse_param,
1771 	.get_tree    = xfs_fc_get_tree,
1772 	.reconfigure = xfs_fc_reconfigure,
1773 	.free        = xfs_fc_free,
1774 };
1775 
1776 static int xfs_init_fs_context(
1777 	struct fs_context	*fc)
1778 {
1779 	struct xfs_mount	*mp;
1780 
1781 	mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
1782 	if (!mp)
1783 		return -ENOMEM;
1784 
1785 	spin_lock_init(&mp->m_sb_lock);
1786 	spin_lock_init(&mp->m_agirotor_lock);
1787 	INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
1788 	spin_lock_init(&mp->m_perag_lock);
1789 	mutex_init(&mp->m_growlock);
1790 	INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
1791 	INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
1792 	INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker);
1793 	INIT_DELAYED_WORK(&mp->m_cowblocks_work, xfs_cowblocks_worker);
1794 	mp->m_kobj.kobject.kset = xfs_kset;
1795 	/*
1796 	 * We don't create the finobt per-ag space reservation until after log
1797 	 * recovery, so we must set this to true so that an ifree transaction
1798 	 * started during log recovery will not depend on space reservations
1799 	 * for finobt expansion.
1800 	 */
1801 	mp->m_finobt_nores = true;
1802 
1803 	/*
1804 	 * These can be overridden by the mount option parsing.
1805 	 */
1806 	mp->m_logbufs = -1;
1807 	mp->m_logbsize = -1;
1808 	mp->m_allocsize_log = 16; /* 64k */
1809 
1810 	/*
1811 	 * Copy binary VFS mount flags we are interested in.
1812 	 */
1813 	if (fc->sb_flags & SB_RDONLY)
1814 		mp->m_flags |= XFS_MOUNT_RDONLY;
1815 	if (fc->sb_flags & SB_DIRSYNC)
1816 		mp->m_flags |= XFS_MOUNT_DIRSYNC;
1817 	if (fc->sb_flags & SB_SYNCHRONOUS)
1818 		mp->m_flags |= XFS_MOUNT_WSYNC;
1819 
1820 	fc->s_fs_info = mp;
1821 	fc->ops = &xfs_context_ops;
1822 
1823 	return 0;
1824 }
1825 
1826 static struct file_system_type xfs_fs_type = {
1827 	.owner			= THIS_MODULE,
1828 	.name			= "xfs",
1829 	.init_fs_context	= xfs_init_fs_context,
1830 	.parameters		= xfs_fs_parameters,
1831 	.kill_sb		= kill_block_super,
1832 	.fs_flags		= FS_REQUIRES_DEV,
1833 };
1834 MODULE_ALIAS_FS("xfs");
1835 
1836 STATIC int __init
1837 xfs_init_zones(void)
1838 {
1839 	xfs_log_ticket_zone = kmem_cache_create("xfs_log_ticket",
1840 						sizeof(struct xlog_ticket),
1841 						0, 0, NULL);
1842 	if (!xfs_log_ticket_zone)
1843 		goto out;
1844 
1845 	xfs_bmap_free_item_zone = kmem_cache_create("xfs_bmap_free_item",
1846 					sizeof(struct xfs_extent_free_item),
1847 					0, 0, NULL);
1848 	if (!xfs_bmap_free_item_zone)
1849 		goto out_destroy_log_ticket_zone;
1850 
1851 	xfs_btree_cur_zone = kmem_cache_create("xfs_btree_cur",
1852 					       sizeof(struct xfs_btree_cur),
1853 					       0, 0, NULL);
1854 	if (!xfs_btree_cur_zone)
1855 		goto out_destroy_bmap_free_item_zone;
1856 
1857 	xfs_da_state_zone = kmem_cache_create("xfs_da_state",
1858 					      sizeof(struct xfs_da_state),
1859 					      0, 0, NULL);
1860 	if (!xfs_da_state_zone)
1861 		goto out_destroy_btree_cur_zone;
1862 
1863 	xfs_ifork_zone = kmem_cache_create("xfs_ifork",
1864 					   sizeof(struct xfs_ifork),
1865 					   0, 0, NULL);
1866 	if (!xfs_ifork_zone)
1867 		goto out_destroy_da_state_zone;
1868 
1869 	xfs_trans_zone = kmem_cache_create("xf_trans",
1870 					   sizeof(struct xfs_trans),
1871 					   0, 0, NULL);
1872 	if (!xfs_trans_zone)
1873 		goto out_destroy_ifork_zone;
1874 
1875 
1876 	/*
1877 	 * The size of the zone allocated buf log item is the maximum
1878 	 * size possible under XFS.  This wastes a little bit of memory,
1879 	 * but it is much faster.
1880 	 */
1881 	xfs_buf_item_zone = kmem_cache_create("xfs_buf_item",
1882 					      sizeof(struct xfs_buf_log_item),
1883 					      0, 0, NULL);
1884 	if (!xfs_buf_item_zone)
1885 		goto out_destroy_trans_zone;
1886 
1887 	xfs_efd_zone = kmem_cache_create("xfs_efd_item",
1888 					(sizeof(struct xfs_efd_log_item) +
1889 					(XFS_EFD_MAX_FAST_EXTENTS - 1) *
1890 					sizeof(struct xfs_extent)),
1891 					0, 0, NULL);
1892 	if (!xfs_efd_zone)
1893 		goto out_destroy_buf_item_zone;
1894 
1895 	xfs_efi_zone = kmem_cache_create("xfs_efi_item",
1896 					 (sizeof(struct xfs_efi_log_item) +
1897 					 (XFS_EFI_MAX_FAST_EXTENTS - 1) *
1898 					 sizeof(struct xfs_extent)),
1899 					 0, 0, NULL);
1900 	if (!xfs_efi_zone)
1901 		goto out_destroy_efd_zone;
1902 
1903 	xfs_inode_zone = kmem_cache_create("xfs_inode",
1904 					   sizeof(struct xfs_inode), 0,
1905 					   (SLAB_HWCACHE_ALIGN |
1906 					    SLAB_RECLAIM_ACCOUNT |
1907 					    SLAB_MEM_SPREAD | SLAB_ACCOUNT),
1908 					   xfs_fs_inode_init_once);
1909 	if (!xfs_inode_zone)
1910 		goto out_destroy_efi_zone;
1911 
1912 	xfs_ili_zone = kmem_cache_create("xfs_ili",
1913 					 sizeof(struct xfs_inode_log_item), 0,
1914 					 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
1915 					 NULL);
1916 	if (!xfs_ili_zone)
1917 		goto out_destroy_inode_zone;
1918 
1919 	xfs_icreate_zone = kmem_cache_create("xfs_icr",
1920 					     sizeof(struct xfs_icreate_item),
1921 					     0, 0, NULL);
1922 	if (!xfs_icreate_zone)
1923 		goto out_destroy_ili_zone;
1924 
1925 	xfs_rud_zone = kmem_cache_create("xfs_rud_item",
1926 					 sizeof(struct xfs_rud_log_item),
1927 					 0, 0, NULL);
1928 	if (!xfs_rud_zone)
1929 		goto out_destroy_icreate_zone;
1930 
1931 	xfs_rui_zone = kmem_cache_create("xfs_rui_item",
1932 			xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
1933 			0, 0, NULL);
1934 	if (!xfs_rui_zone)
1935 		goto out_destroy_rud_zone;
1936 
1937 	xfs_cud_zone = kmem_cache_create("xfs_cud_item",
1938 					 sizeof(struct xfs_cud_log_item),
1939 					 0, 0, NULL);
1940 	if (!xfs_cud_zone)
1941 		goto out_destroy_rui_zone;
1942 
1943 	xfs_cui_zone = kmem_cache_create("xfs_cui_item",
1944 			xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
1945 			0, 0, NULL);
1946 	if (!xfs_cui_zone)
1947 		goto out_destroy_cud_zone;
1948 
1949 	xfs_bud_zone = kmem_cache_create("xfs_bud_item",
1950 					 sizeof(struct xfs_bud_log_item),
1951 					 0, 0, NULL);
1952 	if (!xfs_bud_zone)
1953 		goto out_destroy_cui_zone;
1954 
1955 	xfs_bui_zone = kmem_cache_create("xfs_bui_item",
1956 			xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
1957 			0, 0, NULL);
1958 	if (!xfs_bui_zone)
1959 		goto out_destroy_bud_zone;
1960 
1961 	return 0;
1962 
1963  out_destroy_bud_zone:
1964 	kmem_cache_destroy(xfs_bud_zone);
1965  out_destroy_cui_zone:
1966 	kmem_cache_destroy(xfs_cui_zone);
1967  out_destroy_cud_zone:
1968 	kmem_cache_destroy(xfs_cud_zone);
1969  out_destroy_rui_zone:
1970 	kmem_cache_destroy(xfs_rui_zone);
1971  out_destroy_rud_zone:
1972 	kmem_cache_destroy(xfs_rud_zone);
1973  out_destroy_icreate_zone:
1974 	kmem_cache_destroy(xfs_icreate_zone);
1975  out_destroy_ili_zone:
1976 	kmem_cache_destroy(xfs_ili_zone);
1977  out_destroy_inode_zone:
1978 	kmem_cache_destroy(xfs_inode_zone);
1979  out_destroy_efi_zone:
1980 	kmem_cache_destroy(xfs_efi_zone);
1981  out_destroy_efd_zone:
1982 	kmem_cache_destroy(xfs_efd_zone);
1983  out_destroy_buf_item_zone:
1984 	kmem_cache_destroy(xfs_buf_item_zone);
1985  out_destroy_trans_zone:
1986 	kmem_cache_destroy(xfs_trans_zone);
1987  out_destroy_ifork_zone:
1988 	kmem_cache_destroy(xfs_ifork_zone);
1989  out_destroy_da_state_zone:
1990 	kmem_cache_destroy(xfs_da_state_zone);
1991  out_destroy_btree_cur_zone:
1992 	kmem_cache_destroy(xfs_btree_cur_zone);
1993  out_destroy_bmap_free_item_zone:
1994 	kmem_cache_destroy(xfs_bmap_free_item_zone);
1995  out_destroy_log_ticket_zone:
1996 	kmem_cache_destroy(xfs_log_ticket_zone);
1997  out:
1998 	return -ENOMEM;
1999 }
2000 
2001 STATIC void
2002 xfs_destroy_zones(void)
2003 {
2004 	/*
2005 	 * Make sure all delayed rcu free are flushed before we
2006 	 * destroy caches.
2007 	 */
2008 	rcu_barrier();
2009 	kmem_cache_destroy(xfs_bui_zone);
2010 	kmem_cache_destroy(xfs_bud_zone);
2011 	kmem_cache_destroy(xfs_cui_zone);
2012 	kmem_cache_destroy(xfs_cud_zone);
2013 	kmem_cache_destroy(xfs_rui_zone);
2014 	kmem_cache_destroy(xfs_rud_zone);
2015 	kmem_cache_destroy(xfs_icreate_zone);
2016 	kmem_cache_destroy(xfs_ili_zone);
2017 	kmem_cache_destroy(xfs_inode_zone);
2018 	kmem_cache_destroy(xfs_efi_zone);
2019 	kmem_cache_destroy(xfs_efd_zone);
2020 	kmem_cache_destroy(xfs_buf_item_zone);
2021 	kmem_cache_destroy(xfs_trans_zone);
2022 	kmem_cache_destroy(xfs_ifork_zone);
2023 	kmem_cache_destroy(xfs_da_state_zone);
2024 	kmem_cache_destroy(xfs_btree_cur_zone);
2025 	kmem_cache_destroy(xfs_bmap_free_item_zone);
2026 	kmem_cache_destroy(xfs_log_ticket_zone);
2027 }
2028 
2029 STATIC int __init
2030 xfs_init_workqueues(void)
2031 {
2032 	/*
2033 	 * The allocation workqueue can be used in memory reclaim situations
2034 	 * (writepage path), and parallelism is only limited by the number of
2035 	 * AGs in all the filesystems mounted. Hence use the default large
2036 	 * max_active value for this workqueue.
2037 	 */
2038 	xfs_alloc_wq = alloc_workqueue("xfsalloc",
2039 			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0);
2040 	if (!xfs_alloc_wq)
2041 		return -ENOMEM;
2042 
2043 	xfs_discard_wq = alloc_workqueue("xfsdiscard", WQ_UNBOUND, 0);
2044 	if (!xfs_discard_wq)
2045 		goto out_free_alloc_wq;
2046 
2047 	return 0;
2048 out_free_alloc_wq:
2049 	destroy_workqueue(xfs_alloc_wq);
2050 	return -ENOMEM;
2051 }
2052 
2053 STATIC void
2054 xfs_destroy_workqueues(void)
2055 {
2056 	destroy_workqueue(xfs_discard_wq);
2057 	destroy_workqueue(xfs_alloc_wq);
2058 }
2059 
2060 STATIC int __init
2061 init_xfs_fs(void)
2062 {
2063 	int			error;
2064 
2065 	xfs_check_ondisk_structs();
2066 
2067 	printk(KERN_INFO XFS_VERSION_STRING " with "
2068 			 XFS_BUILD_OPTIONS " enabled\n");
2069 
2070 	xfs_dir_startup();
2071 
2072 	error = xfs_init_zones();
2073 	if (error)
2074 		goto out;
2075 
2076 	error = xfs_init_workqueues();
2077 	if (error)
2078 		goto out_destroy_zones;
2079 
2080 	error = xfs_mru_cache_init();
2081 	if (error)
2082 		goto out_destroy_wq;
2083 
2084 	error = xfs_buf_init();
2085 	if (error)
2086 		goto out_mru_cache_uninit;
2087 
2088 	error = xfs_init_procfs();
2089 	if (error)
2090 		goto out_buf_terminate;
2091 
2092 	error = xfs_sysctl_register();
2093 	if (error)
2094 		goto out_cleanup_procfs;
2095 
2096 	xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2097 	if (!xfs_kset) {
2098 		error = -ENOMEM;
2099 		goto out_sysctl_unregister;
2100 	}
2101 
2102 	xfsstats.xs_kobj.kobject.kset = xfs_kset;
2103 
2104 	xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2105 	if (!xfsstats.xs_stats) {
2106 		error = -ENOMEM;
2107 		goto out_kset_unregister;
2108 	}
2109 
2110 	error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2111 			       "stats");
2112 	if (error)
2113 		goto out_free_stats;
2114 
2115 #ifdef DEBUG
2116 	xfs_dbg_kobj.kobject.kset = xfs_kset;
2117 	error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2118 	if (error)
2119 		goto out_remove_stats_kobj;
2120 #endif
2121 
2122 	error = xfs_qm_init();
2123 	if (error)
2124 		goto out_remove_dbg_kobj;
2125 
2126 	error = register_filesystem(&xfs_fs_type);
2127 	if (error)
2128 		goto out_qm_exit;
2129 	return 0;
2130 
2131  out_qm_exit:
2132 	xfs_qm_exit();
2133  out_remove_dbg_kobj:
2134 #ifdef DEBUG
2135 	xfs_sysfs_del(&xfs_dbg_kobj);
2136  out_remove_stats_kobj:
2137 #endif
2138 	xfs_sysfs_del(&xfsstats.xs_kobj);
2139  out_free_stats:
2140 	free_percpu(xfsstats.xs_stats);
2141  out_kset_unregister:
2142 	kset_unregister(xfs_kset);
2143  out_sysctl_unregister:
2144 	xfs_sysctl_unregister();
2145  out_cleanup_procfs:
2146 	xfs_cleanup_procfs();
2147  out_buf_terminate:
2148 	xfs_buf_terminate();
2149  out_mru_cache_uninit:
2150 	xfs_mru_cache_uninit();
2151  out_destroy_wq:
2152 	xfs_destroy_workqueues();
2153  out_destroy_zones:
2154 	xfs_destroy_zones();
2155  out:
2156 	return error;
2157 }
2158 
2159 STATIC void __exit
2160 exit_xfs_fs(void)
2161 {
2162 	xfs_qm_exit();
2163 	unregister_filesystem(&xfs_fs_type);
2164 #ifdef DEBUG
2165 	xfs_sysfs_del(&xfs_dbg_kobj);
2166 #endif
2167 	xfs_sysfs_del(&xfsstats.xs_kobj);
2168 	free_percpu(xfsstats.xs_stats);
2169 	kset_unregister(xfs_kset);
2170 	xfs_sysctl_unregister();
2171 	xfs_cleanup_procfs();
2172 	xfs_buf_terminate();
2173 	xfs_mru_cache_uninit();
2174 	xfs_destroy_workqueues();
2175 	xfs_destroy_zones();
2176 	xfs_uuid_table_free();
2177 }
2178 
2179 module_init(init_xfs_fs);
2180 module_exit(exit_xfs_fs);
2181 
2182 MODULE_AUTHOR("Silicon Graphics, Inc.");
2183 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2184 MODULE_LICENSE("GPL");
2185