xref: /linux/fs/xfs/xfs_super.c (revision 34f7c6e7d4396090692a09789db231e12cb4762b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 
7 #include "xfs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sb.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_btree.h"
16 #include "xfs_bmap.h"
17 #include "xfs_alloc.h"
18 #include "xfs_fsops.h"
19 #include "xfs_trans.h"
20 #include "xfs_buf_item.h"
21 #include "xfs_log.h"
22 #include "xfs_log_priv.h"
23 #include "xfs_dir2.h"
24 #include "xfs_extfree_item.h"
25 #include "xfs_mru_cache.h"
26 #include "xfs_inode_item.h"
27 #include "xfs_icache.h"
28 #include "xfs_trace.h"
29 #include "xfs_icreate_item.h"
30 #include "xfs_filestream.h"
31 #include "xfs_quota.h"
32 #include "xfs_sysfs.h"
33 #include "xfs_ondisk.h"
34 #include "xfs_rmap_item.h"
35 #include "xfs_refcount_item.h"
36 #include "xfs_bmap_item.h"
37 #include "xfs_reflink.h"
38 #include "xfs_pwork.h"
39 #include "xfs_ag.h"
40 #include "xfs_defer.h"
41 
42 #include <linux/magic.h>
43 #include <linux/fs_context.h>
44 #include <linux/fs_parser.h>
45 
46 static const struct super_operations xfs_super_operations;
47 
48 static struct kset *xfs_kset;		/* top-level xfs sysfs dir */
49 #ifdef DEBUG
50 static struct xfs_kobj xfs_dbg_kobj;	/* global debug sysfs attrs */
51 #endif
52 
53 #ifdef CONFIG_HOTPLUG_CPU
54 static LIST_HEAD(xfs_mount_list);
55 static DEFINE_SPINLOCK(xfs_mount_list_lock);
56 
57 static inline void xfs_mount_list_add(struct xfs_mount *mp)
58 {
59 	spin_lock(&xfs_mount_list_lock);
60 	list_add(&mp->m_mount_list, &xfs_mount_list);
61 	spin_unlock(&xfs_mount_list_lock);
62 }
63 
64 static inline void xfs_mount_list_del(struct xfs_mount *mp)
65 {
66 	spin_lock(&xfs_mount_list_lock);
67 	list_del(&mp->m_mount_list);
68 	spin_unlock(&xfs_mount_list_lock);
69 }
70 #else /* !CONFIG_HOTPLUG_CPU */
71 static inline void xfs_mount_list_add(struct xfs_mount *mp) {}
72 static inline void xfs_mount_list_del(struct xfs_mount *mp) {}
73 #endif
74 
75 enum xfs_dax_mode {
76 	XFS_DAX_INODE = 0,
77 	XFS_DAX_ALWAYS = 1,
78 	XFS_DAX_NEVER = 2,
79 };
80 
81 static void
82 xfs_mount_set_dax_mode(
83 	struct xfs_mount	*mp,
84 	enum xfs_dax_mode	mode)
85 {
86 	switch (mode) {
87 	case XFS_DAX_INODE:
88 		mp->m_features &= ~(XFS_FEAT_DAX_ALWAYS | XFS_FEAT_DAX_NEVER);
89 		break;
90 	case XFS_DAX_ALWAYS:
91 		mp->m_features |= XFS_FEAT_DAX_ALWAYS;
92 		mp->m_features &= ~XFS_FEAT_DAX_NEVER;
93 		break;
94 	case XFS_DAX_NEVER:
95 		mp->m_features |= XFS_FEAT_DAX_NEVER;
96 		mp->m_features &= ~XFS_FEAT_DAX_ALWAYS;
97 		break;
98 	}
99 }
100 
101 static const struct constant_table dax_param_enums[] = {
102 	{"inode",	XFS_DAX_INODE },
103 	{"always",	XFS_DAX_ALWAYS },
104 	{"never",	XFS_DAX_NEVER },
105 	{}
106 };
107 
108 /*
109  * Table driven mount option parser.
110  */
111 enum {
112 	Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
113 	Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
114 	Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
115 	Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
116 	Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
117 	Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
118 	Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
119 	Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
120 	Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum,
121 };
122 
123 static const struct fs_parameter_spec xfs_fs_parameters[] = {
124 	fsparam_u32("logbufs",		Opt_logbufs),
125 	fsparam_string("logbsize",	Opt_logbsize),
126 	fsparam_string("logdev",	Opt_logdev),
127 	fsparam_string("rtdev",		Opt_rtdev),
128 	fsparam_flag("wsync",		Opt_wsync),
129 	fsparam_flag("noalign",		Opt_noalign),
130 	fsparam_flag("swalloc",		Opt_swalloc),
131 	fsparam_u32("sunit",		Opt_sunit),
132 	fsparam_u32("swidth",		Opt_swidth),
133 	fsparam_flag("nouuid",		Opt_nouuid),
134 	fsparam_flag("grpid",		Opt_grpid),
135 	fsparam_flag("nogrpid",		Opt_nogrpid),
136 	fsparam_flag("bsdgroups",	Opt_bsdgroups),
137 	fsparam_flag("sysvgroups",	Opt_sysvgroups),
138 	fsparam_string("allocsize",	Opt_allocsize),
139 	fsparam_flag("norecovery",	Opt_norecovery),
140 	fsparam_flag("inode64",		Opt_inode64),
141 	fsparam_flag("inode32",		Opt_inode32),
142 	fsparam_flag("ikeep",		Opt_ikeep),
143 	fsparam_flag("noikeep",		Opt_noikeep),
144 	fsparam_flag("largeio",		Opt_largeio),
145 	fsparam_flag("nolargeio",	Opt_nolargeio),
146 	fsparam_flag("attr2",		Opt_attr2),
147 	fsparam_flag("noattr2",		Opt_noattr2),
148 	fsparam_flag("filestreams",	Opt_filestreams),
149 	fsparam_flag("quota",		Opt_quota),
150 	fsparam_flag("noquota",		Opt_noquota),
151 	fsparam_flag("usrquota",	Opt_usrquota),
152 	fsparam_flag("grpquota",	Opt_grpquota),
153 	fsparam_flag("prjquota",	Opt_prjquota),
154 	fsparam_flag("uquota",		Opt_uquota),
155 	fsparam_flag("gquota",		Opt_gquota),
156 	fsparam_flag("pquota",		Opt_pquota),
157 	fsparam_flag("uqnoenforce",	Opt_uqnoenforce),
158 	fsparam_flag("gqnoenforce",	Opt_gqnoenforce),
159 	fsparam_flag("pqnoenforce",	Opt_pqnoenforce),
160 	fsparam_flag("qnoenforce",	Opt_qnoenforce),
161 	fsparam_flag("discard",		Opt_discard),
162 	fsparam_flag("nodiscard",	Opt_nodiscard),
163 	fsparam_flag("dax",		Opt_dax),
164 	fsparam_enum("dax",		Opt_dax_enum, dax_param_enums),
165 	{}
166 };
167 
168 struct proc_xfs_info {
169 	uint64_t	flag;
170 	char		*str;
171 };
172 
173 static int
174 xfs_fs_show_options(
175 	struct seq_file		*m,
176 	struct dentry		*root)
177 {
178 	static struct proc_xfs_info xfs_info_set[] = {
179 		/* the few simple ones we can get from the mount struct */
180 		{ XFS_FEAT_IKEEP,		",ikeep" },
181 		{ XFS_FEAT_WSYNC,		",wsync" },
182 		{ XFS_FEAT_NOALIGN,		",noalign" },
183 		{ XFS_FEAT_SWALLOC,		",swalloc" },
184 		{ XFS_FEAT_NOUUID,		",nouuid" },
185 		{ XFS_FEAT_NORECOVERY,		",norecovery" },
186 		{ XFS_FEAT_ATTR2,		",attr2" },
187 		{ XFS_FEAT_FILESTREAMS,		",filestreams" },
188 		{ XFS_FEAT_GRPID,		",grpid" },
189 		{ XFS_FEAT_DISCARD,		",discard" },
190 		{ XFS_FEAT_LARGE_IOSIZE,	",largeio" },
191 		{ XFS_FEAT_DAX_ALWAYS,		",dax=always" },
192 		{ XFS_FEAT_DAX_NEVER,		",dax=never" },
193 		{ 0, NULL }
194 	};
195 	struct xfs_mount	*mp = XFS_M(root->d_sb);
196 	struct proc_xfs_info	*xfs_infop;
197 
198 	for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
199 		if (mp->m_features & xfs_infop->flag)
200 			seq_puts(m, xfs_infop->str);
201 	}
202 
203 	seq_printf(m, ",inode%d", xfs_has_small_inums(mp) ? 32 : 64);
204 
205 	if (xfs_has_allocsize(mp))
206 		seq_printf(m, ",allocsize=%dk",
207 			   (1 << mp->m_allocsize_log) >> 10);
208 
209 	if (mp->m_logbufs > 0)
210 		seq_printf(m, ",logbufs=%d", mp->m_logbufs);
211 	if (mp->m_logbsize > 0)
212 		seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
213 
214 	if (mp->m_logname)
215 		seq_show_option(m, "logdev", mp->m_logname);
216 	if (mp->m_rtname)
217 		seq_show_option(m, "rtdev", mp->m_rtname);
218 
219 	if (mp->m_dalign > 0)
220 		seq_printf(m, ",sunit=%d",
221 				(int)XFS_FSB_TO_BB(mp, mp->m_dalign));
222 	if (mp->m_swidth > 0)
223 		seq_printf(m, ",swidth=%d",
224 				(int)XFS_FSB_TO_BB(mp, mp->m_swidth));
225 
226 	if (mp->m_qflags & XFS_UQUOTA_ENFD)
227 		seq_puts(m, ",usrquota");
228 	else if (mp->m_qflags & XFS_UQUOTA_ACCT)
229 		seq_puts(m, ",uqnoenforce");
230 
231 	if (mp->m_qflags & XFS_PQUOTA_ENFD)
232 		seq_puts(m, ",prjquota");
233 	else if (mp->m_qflags & XFS_PQUOTA_ACCT)
234 		seq_puts(m, ",pqnoenforce");
235 
236 	if (mp->m_qflags & XFS_GQUOTA_ENFD)
237 		seq_puts(m, ",grpquota");
238 	else if (mp->m_qflags & XFS_GQUOTA_ACCT)
239 		seq_puts(m, ",gqnoenforce");
240 
241 	if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
242 		seq_puts(m, ",noquota");
243 
244 	return 0;
245 }
246 
247 /*
248  * Set parameters for inode allocation heuristics, taking into account
249  * filesystem size and inode32/inode64 mount options; i.e. specifically
250  * whether or not XFS_FEAT_SMALL_INUMS is set.
251  *
252  * Inode allocation patterns are altered only if inode32 is requested
253  * (XFS_FEAT_SMALL_INUMS), and the filesystem is sufficiently large.
254  * If altered, XFS_OPSTATE_INODE32 is set as well.
255  *
256  * An agcount independent of that in the mount structure is provided
257  * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
258  * to the potentially higher ag count.
259  *
260  * Returns the maximum AG index which may contain inodes.
261  */
262 xfs_agnumber_t
263 xfs_set_inode_alloc(
264 	struct xfs_mount *mp,
265 	xfs_agnumber_t	agcount)
266 {
267 	xfs_agnumber_t	index;
268 	xfs_agnumber_t	maxagi = 0;
269 	xfs_sb_t	*sbp = &mp->m_sb;
270 	xfs_agnumber_t	max_metadata;
271 	xfs_agino_t	agino;
272 	xfs_ino_t	ino;
273 
274 	/*
275 	 * Calculate how much should be reserved for inodes to meet
276 	 * the max inode percentage.  Used only for inode32.
277 	 */
278 	if (M_IGEO(mp)->maxicount) {
279 		uint64_t	icount;
280 
281 		icount = sbp->sb_dblocks * sbp->sb_imax_pct;
282 		do_div(icount, 100);
283 		icount += sbp->sb_agblocks - 1;
284 		do_div(icount, sbp->sb_agblocks);
285 		max_metadata = icount;
286 	} else {
287 		max_metadata = agcount;
288 	}
289 
290 	/* Get the last possible inode in the filesystem */
291 	agino =	XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
292 	ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
293 
294 	/*
295 	 * If user asked for no more than 32-bit inodes, and the fs is
296 	 * sufficiently large, set XFS_OPSTATE_INODE32 if we must alter
297 	 * the allocator to accommodate the request.
298 	 */
299 	if (xfs_has_small_inums(mp) && ino > XFS_MAXINUMBER_32)
300 		set_bit(XFS_OPSTATE_INODE32, &mp->m_opstate);
301 	else
302 		clear_bit(XFS_OPSTATE_INODE32, &mp->m_opstate);
303 
304 	for (index = 0; index < agcount; index++) {
305 		struct xfs_perag	*pag;
306 
307 		ino = XFS_AGINO_TO_INO(mp, index, agino);
308 
309 		pag = xfs_perag_get(mp, index);
310 
311 		if (xfs_is_inode32(mp)) {
312 			if (ino > XFS_MAXINUMBER_32) {
313 				pag->pagi_inodeok = 0;
314 				pag->pagf_metadata = 0;
315 			} else {
316 				pag->pagi_inodeok = 1;
317 				maxagi++;
318 				if (index < max_metadata)
319 					pag->pagf_metadata = 1;
320 				else
321 					pag->pagf_metadata = 0;
322 			}
323 		} else {
324 			pag->pagi_inodeok = 1;
325 			pag->pagf_metadata = 0;
326 		}
327 
328 		xfs_perag_put(pag);
329 	}
330 
331 	return xfs_is_inode32(mp) ? maxagi : agcount;
332 }
333 
334 static int
335 xfs_setup_dax_always(
336 	struct xfs_mount	*mp)
337 {
338 	if (!mp->m_ddev_targp->bt_daxdev &&
339 	    (!mp->m_rtdev_targp || !mp->m_rtdev_targp->bt_daxdev)) {
340 		xfs_alert(mp,
341 			"DAX unsupported by block device. Turning off DAX.");
342 		goto disable_dax;
343 	}
344 
345 	if (mp->m_super->s_blocksize != PAGE_SIZE) {
346 		xfs_alert(mp,
347 			"DAX not supported for blocksize. Turning off DAX.");
348 		goto disable_dax;
349 	}
350 
351 	if (xfs_has_reflink(mp)) {
352 		xfs_alert(mp, "DAX and reflink cannot be used together!");
353 		return -EINVAL;
354 	}
355 
356 	xfs_warn(mp, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
357 	return 0;
358 
359 disable_dax:
360 	xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
361 	return 0;
362 }
363 
364 STATIC int
365 xfs_blkdev_get(
366 	xfs_mount_t		*mp,
367 	const char		*name,
368 	struct block_device	**bdevp)
369 {
370 	int			error = 0;
371 
372 	*bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
373 				    mp);
374 	if (IS_ERR(*bdevp)) {
375 		error = PTR_ERR(*bdevp);
376 		xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
377 	}
378 
379 	return error;
380 }
381 
382 STATIC void
383 xfs_blkdev_put(
384 	struct block_device	*bdev)
385 {
386 	if (bdev)
387 		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
388 }
389 
390 STATIC void
391 xfs_close_devices(
392 	struct xfs_mount	*mp)
393 {
394 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
395 		struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
396 
397 		xfs_free_buftarg(mp->m_logdev_targp);
398 		xfs_blkdev_put(logdev);
399 	}
400 	if (mp->m_rtdev_targp) {
401 		struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
402 
403 		xfs_free_buftarg(mp->m_rtdev_targp);
404 		xfs_blkdev_put(rtdev);
405 	}
406 	xfs_free_buftarg(mp->m_ddev_targp);
407 }
408 
409 /*
410  * The file system configurations are:
411  *	(1) device (partition) with data and internal log
412  *	(2) logical volume with data and log subvolumes.
413  *	(3) logical volume with data, log, and realtime subvolumes.
414  *
415  * We only have to handle opening the log and realtime volumes here if
416  * they are present.  The data subvolume has already been opened by
417  * get_sb_bdev() and is stored in sb->s_bdev.
418  */
419 STATIC int
420 xfs_open_devices(
421 	struct xfs_mount	*mp)
422 {
423 	struct block_device	*ddev = mp->m_super->s_bdev;
424 	struct block_device	*logdev = NULL, *rtdev = NULL;
425 	int			error;
426 
427 	/*
428 	 * Open real time and log devices - order is important.
429 	 */
430 	if (mp->m_logname) {
431 		error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
432 		if (error)
433 			return error;
434 	}
435 
436 	if (mp->m_rtname) {
437 		error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
438 		if (error)
439 			goto out_close_logdev;
440 
441 		if (rtdev == ddev || rtdev == logdev) {
442 			xfs_warn(mp,
443 	"Cannot mount filesystem with identical rtdev and ddev/logdev.");
444 			error = -EINVAL;
445 			goto out_close_rtdev;
446 		}
447 	}
448 
449 	/*
450 	 * Setup xfs_mount buffer target pointers
451 	 */
452 	error = -ENOMEM;
453 	mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev);
454 	if (!mp->m_ddev_targp)
455 		goto out_close_rtdev;
456 
457 	if (rtdev) {
458 		mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev);
459 		if (!mp->m_rtdev_targp)
460 			goto out_free_ddev_targ;
461 	}
462 
463 	if (logdev && logdev != ddev) {
464 		mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev);
465 		if (!mp->m_logdev_targp)
466 			goto out_free_rtdev_targ;
467 	} else {
468 		mp->m_logdev_targp = mp->m_ddev_targp;
469 	}
470 
471 	return 0;
472 
473  out_free_rtdev_targ:
474 	if (mp->m_rtdev_targp)
475 		xfs_free_buftarg(mp->m_rtdev_targp);
476  out_free_ddev_targ:
477 	xfs_free_buftarg(mp->m_ddev_targp);
478  out_close_rtdev:
479 	xfs_blkdev_put(rtdev);
480  out_close_logdev:
481 	if (logdev && logdev != ddev)
482 		xfs_blkdev_put(logdev);
483 	return error;
484 }
485 
486 /*
487  * Setup xfs_mount buffer target pointers based on superblock
488  */
489 STATIC int
490 xfs_setup_devices(
491 	struct xfs_mount	*mp)
492 {
493 	int			error;
494 
495 	error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
496 	if (error)
497 		return error;
498 
499 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
500 		unsigned int	log_sector_size = BBSIZE;
501 
502 		if (xfs_has_sector(mp))
503 			log_sector_size = mp->m_sb.sb_logsectsize;
504 		error = xfs_setsize_buftarg(mp->m_logdev_targp,
505 					    log_sector_size);
506 		if (error)
507 			return error;
508 	}
509 	if (mp->m_rtdev_targp) {
510 		error = xfs_setsize_buftarg(mp->m_rtdev_targp,
511 					    mp->m_sb.sb_sectsize);
512 		if (error)
513 			return error;
514 	}
515 
516 	return 0;
517 }
518 
519 STATIC int
520 xfs_init_mount_workqueues(
521 	struct xfs_mount	*mp)
522 {
523 	mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
524 			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
525 			1, mp->m_super->s_id);
526 	if (!mp->m_buf_workqueue)
527 		goto out;
528 
529 	mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
530 			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
531 			0, mp->m_super->s_id);
532 	if (!mp->m_unwritten_workqueue)
533 		goto out_destroy_buf;
534 
535 	mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
536 			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
537 			0, mp->m_super->s_id);
538 	if (!mp->m_reclaim_workqueue)
539 		goto out_destroy_unwritten;
540 
541 	mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s",
542 			XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM),
543 			0, mp->m_super->s_id);
544 	if (!mp->m_blockgc_wq)
545 		goto out_destroy_reclaim;
546 
547 	mp->m_inodegc_wq = alloc_workqueue("xfs-inodegc/%s",
548 			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
549 			1, mp->m_super->s_id);
550 	if (!mp->m_inodegc_wq)
551 		goto out_destroy_blockgc;
552 
553 	mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s",
554 			XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id);
555 	if (!mp->m_sync_workqueue)
556 		goto out_destroy_inodegc;
557 
558 	return 0;
559 
560 out_destroy_inodegc:
561 	destroy_workqueue(mp->m_inodegc_wq);
562 out_destroy_blockgc:
563 	destroy_workqueue(mp->m_blockgc_wq);
564 out_destroy_reclaim:
565 	destroy_workqueue(mp->m_reclaim_workqueue);
566 out_destroy_unwritten:
567 	destroy_workqueue(mp->m_unwritten_workqueue);
568 out_destroy_buf:
569 	destroy_workqueue(mp->m_buf_workqueue);
570 out:
571 	return -ENOMEM;
572 }
573 
574 STATIC void
575 xfs_destroy_mount_workqueues(
576 	struct xfs_mount	*mp)
577 {
578 	destroy_workqueue(mp->m_sync_workqueue);
579 	destroy_workqueue(mp->m_blockgc_wq);
580 	destroy_workqueue(mp->m_inodegc_wq);
581 	destroy_workqueue(mp->m_reclaim_workqueue);
582 	destroy_workqueue(mp->m_unwritten_workqueue);
583 	destroy_workqueue(mp->m_buf_workqueue);
584 }
585 
586 static void
587 xfs_flush_inodes_worker(
588 	struct work_struct	*work)
589 {
590 	struct xfs_mount	*mp = container_of(work, struct xfs_mount,
591 						   m_flush_inodes_work);
592 	struct super_block	*sb = mp->m_super;
593 
594 	if (down_read_trylock(&sb->s_umount)) {
595 		sync_inodes_sb(sb);
596 		up_read(&sb->s_umount);
597 	}
598 }
599 
600 /*
601  * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
602  * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
603  * for IO to complete so that we effectively throttle multiple callers to the
604  * rate at which IO is completing.
605  */
606 void
607 xfs_flush_inodes(
608 	struct xfs_mount	*mp)
609 {
610 	/*
611 	 * If flush_work() returns true then that means we waited for a flush
612 	 * which was already in progress.  Don't bother running another scan.
613 	 */
614 	if (flush_work(&mp->m_flush_inodes_work))
615 		return;
616 
617 	queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work);
618 	flush_work(&mp->m_flush_inodes_work);
619 }
620 
621 /* Catch misguided souls that try to use this interface on XFS */
622 STATIC struct inode *
623 xfs_fs_alloc_inode(
624 	struct super_block	*sb)
625 {
626 	BUG();
627 	return NULL;
628 }
629 
630 /*
631  * Now that the generic code is guaranteed not to be accessing
632  * the linux inode, we can inactivate and reclaim the inode.
633  */
634 STATIC void
635 xfs_fs_destroy_inode(
636 	struct inode		*inode)
637 {
638 	struct xfs_inode	*ip = XFS_I(inode);
639 
640 	trace_xfs_destroy_inode(ip);
641 
642 	ASSERT(!rwsem_is_locked(&inode->i_rwsem));
643 	XFS_STATS_INC(ip->i_mount, vn_rele);
644 	XFS_STATS_INC(ip->i_mount, vn_remove);
645 	xfs_inode_mark_reclaimable(ip);
646 }
647 
648 static void
649 xfs_fs_dirty_inode(
650 	struct inode			*inode,
651 	int				flag)
652 {
653 	struct xfs_inode		*ip = XFS_I(inode);
654 	struct xfs_mount		*mp = ip->i_mount;
655 	struct xfs_trans		*tp;
656 
657 	if (!(inode->i_sb->s_flags & SB_LAZYTIME))
658 		return;
659 	if (flag != I_DIRTY_SYNC || !(inode->i_state & I_DIRTY_TIME))
660 		return;
661 
662 	if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
663 		return;
664 	xfs_ilock(ip, XFS_ILOCK_EXCL);
665 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
666 	xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
667 	xfs_trans_commit(tp);
668 }
669 
670 /*
671  * Slab object creation initialisation for the XFS inode.
672  * This covers only the idempotent fields in the XFS inode;
673  * all other fields need to be initialised on allocation
674  * from the slab. This avoids the need to repeatedly initialise
675  * fields in the xfs inode that left in the initialise state
676  * when freeing the inode.
677  */
678 STATIC void
679 xfs_fs_inode_init_once(
680 	void			*inode)
681 {
682 	struct xfs_inode	*ip = inode;
683 
684 	memset(ip, 0, sizeof(struct xfs_inode));
685 
686 	/* vfs inode */
687 	inode_init_once(VFS_I(ip));
688 
689 	/* xfs inode */
690 	atomic_set(&ip->i_pincount, 0);
691 	spin_lock_init(&ip->i_flags_lock);
692 
693 	mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
694 		     "xfsino", ip->i_ino);
695 }
696 
697 /*
698  * We do an unlocked check for XFS_IDONTCACHE here because we are already
699  * serialised against cache hits here via the inode->i_lock and igrab() in
700  * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
701  * racing with us, and it avoids needing to grab a spinlock here for every inode
702  * we drop the final reference on.
703  */
704 STATIC int
705 xfs_fs_drop_inode(
706 	struct inode		*inode)
707 {
708 	struct xfs_inode	*ip = XFS_I(inode);
709 
710 	/*
711 	 * If this unlinked inode is in the middle of recovery, don't
712 	 * drop the inode just yet; log recovery will take care of
713 	 * that.  See the comment for this inode flag.
714 	 */
715 	if (ip->i_flags & XFS_IRECOVERY) {
716 		ASSERT(xlog_recovery_needed(ip->i_mount->m_log));
717 		return 0;
718 	}
719 
720 	return generic_drop_inode(inode);
721 }
722 
723 static void
724 xfs_mount_free(
725 	struct xfs_mount	*mp)
726 {
727 	kfree(mp->m_rtname);
728 	kfree(mp->m_logname);
729 	kmem_free(mp);
730 }
731 
732 STATIC int
733 xfs_fs_sync_fs(
734 	struct super_block	*sb,
735 	int			wait)
736 {
737 	struct xfs_mount	*mp = XFS_M(sb);
738 	int			error;
739 
740 	trace_xfs_fs_sync_fs(mp, __return_address);
741 
742 	/*
743 	 * Doing anything during the async pass would be counterproductive.
744 	 */
745 	if (!wait)
746 		return 0;
747 
748 	error = xfs_log_force(mp, XFS_LOG_SYNC);
749 	if (error)
750 		return error;
751 
752 	if (laptop_mode) {
753 		/*
754 		 * The disk must be active because we're syncing.
755 		 * We schedule log work now (now that the disk is
756 		 * active) instead of later (when it might not be).
757 		 */
758 		flush_delayed_work(&mp->m_log->l_work);
759 	}
760 
761 	/*
762 	 * If we are called with page faults frozen out, it means we are about
763 	 * to freeze the transaction subsystem. Take the opportunity to shut
764 	 * down inodegc because once SB_FREEZE_FS is set it's too late to
765 	 * prevent inactivation races with freeze. The fs doesn't get called
766 	 * again by the freezing process until after SB_FREEZE_FS has been set,
767 	 * so it's now or never.  Same logic applies to speculative allocation
768 	 * garbage collection.
769 	 *
770 	 * We don't care if this is a normal syncfs call that does this or
771 	 * freeze that does this - we can run this multiple times without issue
772 	 * and we won't race with a restart because a restart can only occur
773 	 * when the state is either SB_FREEZE_FS or SB_FREEZE_COMPLETE.
774 	 */
775 	if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT) {
776 		xfs_inodegc_stop(mp);
777 		xfs_blockgc_stop(mp);
778 	}
779 
780 	return 0;
781 }
782 
783 STATIC int
784 xfs_fs_statfs(
785 	struct dentry		*dentry,
786 	struct kstatfs		*statp)
787 {
788 	struct xfs_mount	*mp = XFS_M(dentry->d_sb);
789 	xfs_sb_t		*sbp = &mp->m_sb;
790 	struct xfs_inode	*ip = XFS_I(d_inode(dentry));
791 	uint64_t		fakeinos, id;
792 	uint64_t		icount;
793 	uint64_t		ifree;
794 	uint64_t		fdblocks;
795 	xfs_extlen_t		lsize;
796 	int64_t			ffree;
797 
798 	/* Wait for whatever inactivations are in progress. */
799 	xfs_inodegc_flush(mp);
800 
801 	statp->f_type = XFS_SUPER_MAGIC;
802 	statp->f_namelen = MAXNAMELEN - 1;
803 
804 	id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
805 	statp->f_fsid = u64_to_fsid(id);
806 
807 	icount = percpu_counter_sum(&mp->m_icount);
808 	ifree = percpu_counter_sum(&mp->m_ifree);
809 	fdblocks = percpu_counter_sum(&mp->m_fdblocks);
810 
811 	spin_lock(&mp->m_sb_lock);
812 	statp->f_bsize = sbp->sb_blocksize;
813 	lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
814 	statp->f_blocks = sbp->sb_dblocks - lsize;
815 	spin_unlock(&mp->m_sb_lock);
816 
817 	/* make sure statp->f_bfree does not underflow */
818 	statp->f_bfree = max_t(int64_t, 0,
819 				fdblocks - xfs_fdblocks_unavailable(mp));
820 	statp->f_bavail = statp->f_bfree;
821 
822 	fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
823 	statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
824 	if (M_IGEO(mp)->maxicount)
825 		statp->f_files = min_t(typeof(statp->f_files),
826 					statp->f_files,
827 					M_IGEO(mp)->maxicount);
828 
829 	/* If sb_icount overshot maxicount, report actual allocation */
830 	statp->f_files = max_t(typeof(statp->f_files),
831 					statp->f_files,
832 					sbp->sb_icount);
833 
834 	/* make sure statp->f_ffree does not underflow */
835 	ffree = statp->f_files - (icount - ifree);
836 	statp->f_ffree = max_t(int64_t, ffree, 0);
837 
838 
839 	if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
840 	    ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
841 			      (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
842 		xfs_qm_statvfs(ip, statp);
843 
844 	if (XFS_IS_REALTIME_MOUNT(mp) &&
845 	    (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
846 		statp->f_blocks = sbp->sb_rblocks;
847 		statp->f_bavail = statp->f_bfree =
848 			sbp->sb_frextents * sbp->sb_rextsize;
849 	}
850 
851 	return 0;
852 }
853 
854 STATIC void
855 xfs_save_resvblks(struct xfs_mount *mp)
856 {
857 	uint64_t resblks = 0;
858 
859 	mp->m_resblks_save = mp->m_resblks;
860 	xfs_reserve_blocks(mp, &resblks, NULL);
861 }
862 
863 STATIC void
864 xfs_restore_resvblks(struct xfs_mount *mp)
865 {
866 	uint64_t resblks;
867 
868 	if (mp->m_resblks_save) {
869 		resblks = mp->m_resblks_save;
870 		mp->m_resblks_save = 0;
871 	} else
872 		resblks = xfs_default_resblks(mp);
873 
874 	xfs_reserve_blocks(mp, &resblks, NULL);
875 }
876 
877 /*
878  * Second stage of a freeze. The data is already frozen so we only
879  * need to take care of the metadata. Once that's done sync the superblock
880  * to the log to dirty it in case of a crash while frozen. This ensures that we
881  * will recover the unlinked inode lists on the next mount.
882  */
883 STATIC int
884 xfs_fs_freeze(
885 	struct super_block	*sb)
886 {
887 	struct xfs_mount	*mp = XFS_M(sb);
888 	unsigned int		flags;
889 	int			ret;
890 
891 	/*
892 	 * The filesystem is now frozen far enough that memory reclaim
893 	 * cannot safely operate on the filesystem. Hence we need to
894 	 * set a GFP_NOFS context here to avoid recursion deadlocks.
895 	 */
896 	flags = memalloc_nofs_save();
897 	xfs_save_resvblks(mp);
898 	ret = xfs_log_quiesce(mp);
899 	memalloc_nofs_restore(flags);
900 
901 	/*
902 	 * For read-write filesystems, we need to restart the inodegc on error
903 	 * because we stopped it at SB_FREEZE_PAGEFAULT level and a thaw is not
904 	 * going to be run to restart it now.  We are at SB_FREEZE_FS level
905 	 * here, so we can restart safely without racing with a stop in
906 	 * xfs_fs_sync_fs().
907 	 */
908 	if (ret && !xfs_is_readonly(mp)) {
909 		xfs_blockgc_start(mp);
910 		xfs_inodegc_start(mp);
911 	}
912 
913 	return ret;
914 }
915 
916 STATIC int
917 xfs_fs_unfreeze(
918 	struct super_block	*sb)
919 {
920 	struct xfs_mount	*mp = XFS_M(sb);
921 
922 	xfs_restore_resvblks(mp);
923 	xfs_log_work_queue(mp);
924 
925 	/*
926 	 * Don't reactivate the inodegc worker on a readonly filesystem because
927 	 * inodes are sent directly to reclaim.  Don't reactivate the blockgc
928 	 * worker because there are no speculative preallocations on a readonly
929 	 * filesystem.
930 	 */
931 	if (!xfs_is_readonly(mp)) {
932 		xfs_blockgc_start(mp);
933 		xfs_inodegc_start(mp);
934 	}
935 
936 	return 0;
937 }
938 
939 /*
940  * This function fills in xfs_mount_t fields based on mount args.
941  * Note: the superblock _has_ now been read in.
942  */
943 STATIC int
944 xfs_finish_flags(
945 	struct xfs_mount	*mp)
946 {
947 	/* Fail a mount where the logbuf is smaller than the log stripe */
948 	if (xfs_has_logv2(mp)) {
949 		if (mp->m_logbsize <= 0 &&
950 		    mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
951 			mp->m_logbsize = mp->m_sb.sb_logsunit;
952 		} else if (mp->m_logbsize > 0 &&
953 			   mp->m_logbsize < mp->m_sb.sb_logsunit) {
954 			xfs_warn(mp,
955 		"logbuf size must be greater than or equal to log stripe size");
956 			return -EINVAL;
957 		}
958 	} else {
959 		/* Fail a mount if the logbuf is larger than 32K */
960 		if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
961 			xfs_warn(mp,
962 		"logbuf size for version 1 logs must be 16K or 32K");
963 			return -EINVAL;
964 		}
965 	}
966 
967 	/*
968 	 * V5 filesystems always use attr2 format for attributes.
969 	 */
970 	if (xfs_has_crc(mp) && xfs_has_noattr2(mp)) {
971 		xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
972 			     "attr2 is always enabled for V5 filesystems.");
973 		return -EINVAL;
974 	}
975 
976 	/*
977 	 * prohibit r/w mounts of read-only filesystems
978 	 */
979 	if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !xfs_is_readonly(mp)) {
980 		xfs_warn(mp,
981 			"cannot mount a read-only filesystem as read-write");
982 		return -EROFS;
983 	}
984 
985 	if ((mp->m_qflags & XFS_GQUOTA_ACCT) &&
986 	    (mp->m_qflags & XFS_PQUOTA_ACCT) &&
987 	    !xfs_has_pquotino(mp)) {
988 		xfs_warn(mp,
989 		  "Super block does not support project and group quota together");
990 		return -EINVAL;
991 	}
992 
993 	return 0;
994 }
995 
996 static int
997 xfs_init_percpu_counters(
998 	struct xfs_mount	*mp)
999 {
1000 	int		error;
1001 
1002 	error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
1003 	if (error)
1004 		return -ENOMEM;
1005 
1006 	error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
1007 	if (error)
1008 		goto free_icount;
1009 
1010 	error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
1011 	if (error)
1012 		goto free_ifree;
1013 
1014 	error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
1015 	if (error)
1016 		goto free_fdblocks;
1017 
1018 	return 0;
1019 
1020 free_fdblocks:
1021 	percpu_counter_destroy(&mp->m_fdblocks);
1022 free_ifree:
1023 	percpu_counter_destroy(&mp->m_ifree);
1024 free_icount:
1025 	percpu_counter_destroy(&mp->m_icount);
1026 	return -ENOMEM;
1027 }
1028 
1029 void
1030 xfs_reinit_percpu_counters(
1031 	struct xfs_mount	*mp)
1032 {
1033 	percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1034 	percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1035 	percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1036 }
1037 
1038 static void
1039 xfs_destroy_percpu_counters(
1040 	struct xfs_mount	*mp)
1041 {
1042 	percpu_counter_destroy(&mp->m_icount);
1043 	percpu_counter_destroy(&mp->m_ifree);
1044 	percpu_counter_destroy(&mp->m_fdblocks);
1045 	ASSERT(xfs_is_shutdown(mp) ||
1046 	       percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1047 	percpu_counter_destroy(&mp->m_delalloc_blks);
1048 }
1049 
1050 static int
1051 xfs_inodegc_init_percpu(
1052 	struct xfs_mount	*mp)
1053 {
1054 	struct xfs_inodegc	*gc;
1055 	int			cpu;
1056 
1057 	mp->m_inodegc = alloc_percpu(struct xfs_inodegc);
1058 	if (!mp->m_inodegc)
1059 		return -ENOMEM;
1060 
1061 	for_each_possible_cpu(cpu) {
1062 		gc = per_cpu_ptr(mp->m_inodegc, cpu);
1063 		init_llist_head(&gc->list);
1064 		gc->items = 0;
1065 		INIT_WORK(&gc->work, xfs_inodegc_worker);
1066 	}
1067 	return 0;
1068 }
1069 
1070 static void
1071 xfs_inodegc_free_percpu(
1072 	struct xfs_mount	*mp)
1073 {
1074 	if (!mp->m_inodegc)
1075 		return;
1076 	free_percpu(mp->m_inodegc);
1077 }
1078 
1079 static void
1080 xfs_fs_put_super(
1081 	struct super_block	*sb)
1082 {
1083 	struct xfs_mount	*mp = XFS_M(sb);
1084 
1085 	/* if ->fill_super failed, we have no mount to tear down */
1086 	if (!sb->s_fs_info)
1087 		return;
1088 
1089 	xfs_notice(mp, "Unmounting Filesystem");
1090 	xfs_filestream_unmount(mp);
1091 	xfs_unmountfs(mp);
1092 
1093 	xfs_freesb(mp);
1094 	free_percpu(mp->m_stats.xs_stats);
1095 	xfs_mount_list_del(mp);
1096 	xfs_inodegc_free_percpu(mp);
1097 	xfs_destroy_percpu_counters(mp);
1098 	xfs_destroy_mount_workqueues(mp);
1099 	xfs_close_devices(mp);
1100 
1101 	sb->s_fs_info = NULL;
1102 	xfs_mount_free(mp);
1103 }
1104 
1105 static long
1106 xfs_fs_nr_cached_objects(
1107 	struct super_block	*sb,
1108 	struct shrink_control	*sc)
1109 {
1110 	/* Paranoia: catch incorrect calls during mount setup or teardown */
1111 	if (WARN_ON_ONCE(!sb->s_fs_info))
1112 		return 0;
1113 	return xfs_reclaim_inodes_count(XFS_M(sb));
1114 }
1115 
1116 static long
1117 xfs_fs_free_cached_objects(
1118 	struct super_block	*sb,
1119 	struct shrink_control	*sc)
1120 {
1121 	return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1122 }
1123 
1124 static const struct super_operations xfs_super_operations = {
1125 	.alloc_inode		= xfs_fs_alloc_inode,
1126 	.destroy_inode		= xfs_fs_destroy_inode,
1127 	.dirty_inode		= xfs_fs_dirty_inode,
1128 	.drop_inode		= xfs_fs_drop_inode,
1129 	.put_super		= xfs_fs_put_super,
1130 	.sync_fs		= xfs_fs_sync_fs,
1131 	.freeze_fs		= xfs_fs_freeze,
1132 	.unfreeze_fs		= xfs_fs_unfreeze,
1133 	.statfs			= xfs_fs_statfs,
1134 	.show_options		= xfs_fs_show_options,
1135 	.nr_cached_objects	= xfs_fs_nr_cached_objects,
1136 	.free_cached_objects	= xfs_fs_free_cached_objects,
1137 };
1138 
1139 static int
1140 suffix_kstrtoint(
1141 	const char	*s,
1142 	unsigned int	base,
1143 	int		*res)
1144 {
1145 	int		last, shift_left_factor = 0, _res;
1146 	char		*value;
1147 	int		ret = 0;
1148 
1149 	value = kstrdup(s, GFP_KERNEL);
1150 	if (!value)
1151 		return -ENOMEM;
1152 
1153 	last = strlen(value) - 1;
1154 	if (value[last] == 'K' || value[last] == 'k') {
1155 		shift_left_factor = 10;
1156 		value[last] = '\0';
1157 	}
1158 	if (value[last] == 'M' || value[last] == 'm') {
1159 		shift_left_factor = 20;
1160 		value[last] = '\0';
1161 	}
1162 	if (value[last] == 'G' || value[last] == 'g') {
1163 		shift_left_factor = 30;
1164 		value[last] = '\0';
1165 	}
1166 
1167 	if (kstrtoint(value, base, &_res))
1168 		ret = -EINVAL;
1169 	kfree(value);
1170 	*res = _res << shift_left_factor;
1171 	return ret;
1172 }
1173 
1174 static inline void
1175 xfs_fs_warn_deprecated(
1176 	struct fs_context	*fc,
1177 	struct fs_parameter	*param,
1178 	uint64_t		flag,
1179 	bool			value)
1180 {
1181 	/* Don't print the warning if reconfiguring and current mount point
1182 	 * already had the flag set
1183 	 */
1184 	if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) &&
1185             !!(XFS_M(fc->root->d_sb)->m_features & flag) == value)
1186 		return;
1187 	xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key);
1188 }
1189 
1190 /*
1191  * Set mount state from a mount option.
1192  *
1193  * NOTE: mp->m_super is NULL here!
1194  */
1195 static int
1196 xfs_fs_parse_param(
1197 	struct fs_context	*fc,
1198 	struct fs_parameter	*param)
1199 {
1200 	struct xfs_mount	*parsing_mp = fc->s_fs_info;
1201 	struct fs_parse_result	result;
1202 	int			size = 0;
1203 	int			opt;
1204 
1205 	opt = fs_parse(fc, xfs_fs_parameters, param, &result);
1206 	if (opt < 0)
1207 		return opt;
1208 
1209 	switch (opt) {
1210 	case Opt_logbufs:
1211 		parsing_mp->m_logbufs = result.uint_32;
1212 		return 0;
1213 	case Opt_logbsize:
1214 		if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize))
1215 			return -EINVAL;
1216 		return 0;
1217 	case Opt_logdev:
1218 		kfree(parsing_mp->m_logname);
1219 		parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL);
1220 		if (!parsing_mp->m_logname)
1221 			return -ENOMEM;
1222 		return 0;
1223 	case Opt_rtdev:
1224 		kfree(parsing_mp->m_rtname);
1225 		parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
1226 		if (!parsing_mp->m_rtname)
1227 			return -ENOMEM;
1228 		return 0;
1229 	case Opt_allocsize:
1230 		if (suffix_kstrtoint(param->string, 10, &size))
1231 			return -EINVAL;
1232 		parsing_mp->m_allocsize_log = ffs(size) - 1;
1233 		parsing_mp->m_features |= XFS_FEAT_ALLOCSIZE;
1234 		return 0;
1235 	case Opt_grpid:
1236 	case Opt_bsdgroups:
1237 		parsing_mp->m_features |= XFS_FEAT_GRPID;
1238 		return 0;
1239 	case Opt_nogrpid:
1240 	case Opt_sysvgroups:
1241 		parsing_mp->m_features &= ~XFS_FEAT_GRPID;
1242 		return 0;
1243 	case Opt_wsync:
1244 		parsing_mp->m_features |= XFS_FEAT_WSYNC;
1245 		return 0;
1246 	case Opt_norecovery:
1247 		parsing_mp->m_features |= XFS_FEAT_NORECOVERY;
1248 		return 0;
1249 	case Opt_noalign:
1250 		parsing_mp->m_features |= XFS_FEAT_NOALIGN;
1251 		return 0;
1252 	case Opt_swalloc:
1253 		parsing_mp->m_features |= XFS_FEAT_SWALLOC;
1254 		return 0;
1255 	case Opt_sunit:
1256 		parsing_mp->m_dalign = result.uint_32;
1257 		return 0;
1258 	case Opt_swidth:
1259 		parsing_mp->m_swidth = result.uint_32;
1260 		return 0;
1261 	case Opt_inode32:
1262 		parsing_mp->m_features |= XFS_FEAT_SMALL_INUMS;
1263 		return 0;
1264 	case Opt_inode64:
1265 		parsing_mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
1266 		return 0;
1267 	case Opt_nouuid:
1268 		parsing_mp->m_features |= XFS_FEAT_NOUUID;
1269 		return 0;
1270 	case Opt_largeio:
1271 		parsing_mp->m_features |= XFS_FEAT_LARGE_IOSIZE;
1272 		return 0;
1273 	case Opt_nolargeio:
1274 		parsing_mp->m_features &= ~XFS_FEAT_LARGE_IOSIZE;
1275 		return 0;
1276 	case Opt_filestreams:
1277 		parsing_mp->m_features |= XFS_FEAT_FILESTREAMS;
1278 		return 0;
1279 	case Opt_noquota:
1280 		parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
1281 		parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
1282 		return 0;
1283 	case Opt_quota:
1284 	case Opt_uquota:
1285 	case Opt_usrquota:
1286 		parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD);
1287 		return 0;
1288 	case Opt_qnoenforce:
1289 	case Opt_uqnoenforce:
1290 		parsing_mp->m_qflags |= XFS_UQUOTA_ACCT;
1291 		parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD;
1292 		return 0;
1293 	case Opt_pquota:
1294 	case Opt_prjquota:
1295 		parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD);
1296 		return 0;
1297 	case Opt_pqnoenforce:
1298 		parsing_mp->m_qflags |= XFS_PQUOTA_ACCT;
1299 		parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD;
1300 		return 0;
1301 	case Opt_gquota:
1302 	case Opt_grpquota:
1303 		parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD);
1304 		return 0;
1305 	case Opt_gqnoenforce:
1306 		parsing_mp->m_qflags |= XFS_GQUOTA_ACCT;
1307 		parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD;
1308 		return 0;
1309 	case Opt_discard:
1310 		parsing_mp->m_features |= XFS_FEAT_DISCARD;
1311 		return 0;
1312 	case Opt_nodiscard:
1313 		parsing_mp->m_features &= ~XFS_FEAT_DISCARD;
1314 		return 0;
1315 #ifdef CONFIG_FS_DAX
1316 	case Opt_dax:
1317 		xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS);
1318 		return 0;
1319 	case Opt_dax_enum:
1320 		xfs_mount_set_dax_mode(parsing_mp, result.uint_32);
1321 		return 0;
1322 #endif
1323 	/* Following mount options will be removed in September 2025 */
1324 	case Opt_ikeep:
1325 		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, true);
1326 		parsing_mp->m_features |= XFS_FEAT_IKEEP;
1327 		return 0;
1328 	case Opt_noikeep:
1329 		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, false);
1330 		parsing_mp->m_features &= ~XFS_FEAT_IKEEP;
1331 		return 0;
1332 	case Opt_attr2:
1333 		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_ATTR2, true);
1334 		parsing_mp->m_features |= XFS_FEAT_ATTR2;
1335 		return 0;
1336 	case Opt_noattr2:
1337 		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_NOATTR2, true);
1338 		parsing_mp->m_features |= XFS_FEAT_NOATTR2;
1339 		return 0;
1340 	default:
1341 		xfs_warn(parsing_mp, "unknown mount option [%s].", param->key);
1342 		return -EINVAL;
1343 	}
1344 
1345 	return 0;
1346 }
1347 
1348 static int
1349 xfs_fs_validate_params(
1350 	struct xfs_mount	*mp)
1351 {
1352 	/* No recovery flag requires a read-only mount */
1353 	if (xfs_has_norecovery(mp) && !xfs_is_readonly(mp)) {
1354 		xfs_warn(mp, "no-recovery mounts must be read-only.");
1355 		return -EINVAL;
1356 	}
1357 
1358 	/*
1359 	 * We have not read the superblock at this point, so only the attr2
1360 	 * mount option can set the attr2 feature by this stage.
1361 	 */
1362 	if (xfs_has_attr2(mp) && xfs_has_noattr2(mp)) {
1363 		xfs_warn(mp, "attr2 and noattr2 cannot both be specified.");
1364 		return -EINVAL;
1365 	}
1366 
1367 
1368 	if (xfs_has_noalign(mp) && (mp->m_dalign || mp->m_swidth)) {
1369 		xfs_warn(mp,
1370 	"sunit and swidth options incompatible with the noalign option");
1371 		return -EINVAL;
1372 	}
1373 
1374 	if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) {
1375 		xfs_warn(mp, "quota support not available in this kernel.");
1376 		return -EINVAL;
1377 	}
1378 
1379 	if ((mp->m_dalign && !mp->m_swidth) ||
1380 	    (!mp->m_dalign && mp->m_swidth)) {
1381 		xfs_warn(mp, "sunit and swidth must be specified together");
1382 		return -EINVAL;
1383 	}
1384 
1385 	if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
1386 		xfs_warn(mp,
1387 	"stripe width (%d) must be a multiple of the stripe unit (%d)",
1388 			mp->m_swidth, mp->m_dalign);
1389 		return -EINVAL;
1390 	}
1391 
1392 	if (mp->m_logbufs != -1 &&
1393 	    mp->m_logbufs != 0 &&
1394 	    (mp->m_logbufs < XLOG_MIN_ICLOGS ||
1395 	     mp->m_logbufs > XLOG_MAX_ICLOGS)) {
1396 		xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
1397 			mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
1398 		return -EINVAL;
1399 	}
1400 
1401 	if (mp->m_logbsize != -1 &&
1402 	    mp->m_logbsize !=  0 &&
1403 	    (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
1404 	     mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
1405 	     !is_power_of_2(mp->m_logbsize))) {
1406 		xfs_warn(mp,
1407 			"invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
1408 			mp->m_logbsize);
1409 		return -EINVAL;
1410 	}
1411 
1412 	if (xfs_has_allocsize(mp) &&
1413 	    (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
1414 	     mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
1415 		xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
1416 			mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
1417 		return -EINVAL;
1418 	}
1419 
1420 	return 0;
1421 }
1422 
1423 static int
1424 xfs_fs_fill_super(
1425 	struct super_block	*sb,
1426 	struct fs_context	*fc)
1427 {
1428 	struct xfs_mount	*mp = sb->s_fs_info;
1429 	struct inode		*root;
1430 	int			flags = 0, error;
1431 
1432 	mp->m_super = sb;
1433 
1434 	error = xfs_fs_validate_params(mp);
1435 	if (error)
1436 		goto out_free_names;
1437 
1438 	sb_min_blocksize(sb, BBSIZE);
1439 	sb->s_xattr = xfs_xattr_handlers;
1440 	sb->s_export_op = &xfs_export_operations;
1441 #ifdef CONFIG_XFS_QUOTA
1442 	sb->s_qcop = &xfs_quotactl_operations;
1443 	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1444 #endif
1445 	sb->s_op = &xfs_super_operations;
1446 
1447 	/*
1448 	 * Delay mount work if the debug hook is set. This is debug
1449 	 * instrumention to coordinate simulation of xfs mount failures with
1450 	 * VFS superblock operations
1451 	 */
1452 	if (xfs_globals.mount_delay) {
1453 		xfs_notice(mp, "Delaying mount for %d seconds.",
1454 			xfs_globals.mount_delay);
1455 		msleep(xfs_globals.mount_delay * 1000);
1456 	}
1457 
1458 	if (fc->sb_flags & SB_SILENT)
1459 		flags |= XFS_MFSI_QUIET;
1460 
1461 	error = xfs_open_devices(mp);
1462 	if (error)
1463 		goto out_free_names;
1464 
1465 	error = xfs_init_mount_workqueues(mp);
1466 	if (error)
1467 		goto out_close_devices;
1468 
1469 	error = xfs_init_percpu_counters(mp);
1470 	if (error)
1471 		goto out_destroy_workqueues;
1472 
1473 	error = xfs_inodegc_init_percpu(mp);
1474 	if (error)
1475 		goto out_destroy_counters;
1476 
1477 	/*
1478 	 * All percpu data structures requiring cleanup when a cpu goes offline
1479 	 * must be allocated before adding this @mp to the cpu-dead handler's
1480 	 * mount list.
1481 	 */
1482 	xfs_mount_list_add(mp);
1483 
1484 	/* Allocate stats memory before we do operations that might use it */
1485 	mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1486 	if (!mp->m_stats.xs_stats) {
1487 		error = -ENOMEM;
1488 		goto out_destroy_inodegc;
1489 	}
1490 
1491 	error = xfs_readsb(mp, flags);
1492 	if (error)
1493 		goto out_free_stats;
1494 
1495 	error = xfs_finish_flags(mp);
1496 	if (error)
1497 		goto out_free_sb;
1498 
1499 	error = xfs_setup_devices(mp);
1500 	if (error)
1501 		goto out_free_sb;
1502 
1503 	/* V4 support is undergoing deprecation. */
1504 	if (!xfs_has_crc(mp)) {
1505 #ifdef CONFIG_XFS_SUPPORT_V4
1506 		xfs_warn_once(mp,
1507 	"Deprecated V4 format (crc=0) will not be supported after September 2030.");
1508 #else
1509 		xfs_warn(mp,
1510 	"Deprecated V4 format (crc=0) not supported by kernel.");
1511 		error = -EINVAL;
1512 		goto out_free_sb;
1513 #endif
1514 	}
1515 
1516 	/* Filesystem claims it needs repair, so refuse the mount. */
1517 	if (xfs_has_needsrepair(mp)) {
1518 		xfs_warn(mp, "Filesystem needs repair.  Please run xfs_repair.");
1519 		error = -EFSCORRUPTED;
1520 		goto out_free_sb;
1521 	}
1522 
1523 	/*
1524 	 * Don't touch the filesystem if a user tool thinks it owns the primary
1525 	 * superblock.  mkfs doesn't clear the flag from secondary supers, so
1526 	 * we don't check them at all.
1527 	 */
1528 	if (mp->m_sb.sb_inprogress) {
1529 		xfs_warn(mp, "Offline file system operation in progress!");
1530 		error = -EFSCORRUPTED;
1531 		goto out_free_sb;
1532 	}
1533 
1534 	/*
1535 	 * Until this is fixed only page-sized or smaller data blocks work.
1536 	 */
1537 	if (mp->m_sb.sb_blocksize > PAGE_SIZE) {
1538 		xfs_warn(mp,
1539 		"File system with blocksize %d bytes. "
1540 		"Only pagesize (%ld) or less will currently work.",
1541 				mp->m_sb.sb_blocksize, PAGE_SIZE);
1542 		error = -ENOSYS;
1543 		goto out_free_sb;
1544 	}
1545 
1546 	/* Ensure this filesystem fits in the page cache limits */
1547 	if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) ||
1548 	    xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) {
1549 		xfs_warn(mp,
1550 		"file system too large to be mounted on this system.");
1551 		error = -EFBIG;
1552 		goto out_free_sb;
1553 	}
1554 
1555 	/*
1556 	 * XFS block mappings use 54 bits to store the logical block offset.
1557 	 * This should suffice to handle the maximum file size that the VFS
1558 	 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
1559 	 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
1560 	 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
1561 	 * to check this assertion.
1562 	 *
1563 	 * Avoid integer overflow by comparing the maximum bmbt offset to the
1564 	 * maximum pagecache offset in units of fs blocks.
1565 	 */
1566 	if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) {
1567 		xfs_warn(mp,
1568 "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
1569 			 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
1570 			 XFS_MAX_FILEOFF);
1571 		error = -EINVAL;
1572 		goto out_free_sb;
1573 	}
1574 
1575 	error = xfs_filestream_mount(mp);
1576 	if (error)
1577 		goto out_free_sb;
1578 
1579 	/*
1580 	 * we must configure the block size in the superblock before we run the
1581 	 * full mount process as the mount process can lookup and cache inodes.
1582 	 */
1583 	sb->s_magic = XFS_SUPER_MAGIC;
1584 	sb->s_blocksize = mp->m_sb.sb_blocksize;
1585 	sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1586 	sb->s_maxbytes = MAX_LFS_FILESIZE;
1587 	sb->s_max_links = XFS_MAXLINK;
1588 	sb->s_time_gran = 1;
1589 	if (xfs_has_bigtime(mp)) {
1590 		sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN);
1591 		sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX);
1592 	} else {
1593 		sb->s_time_min = XFS_LEGACY_TIME_MIN;
1594 		sb->s_time_max = XFS_LEGACY_TIME_MAX;
1595 	}
1596 	trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max);
1597 	sb->s_iflags |= SB_I_CGROUPWB;
1598 
1599 	set_posix_acl_flag(sb);
1600 
1601 	/* version 5 superblocks support inode version counters. */
1602 	if (xfs_has_crc(mp))
1603 		sb->s_flags |= SB_I_VERSION;
1604 
1605 	if (xfs_has_dax_always(mp)) {
1606 		error = xfs_setup_dax_always(mp);
1607 		if (error)
1608 			goto out_filestream_unmount;
1609 	}
1610 
1611 	if (xfs_has_discard(mp)) {
1612 		struct request_queue *q = bdev_get_queue(sb->s_bdev);
1613 
1614 		if (!blk_queue_discard(q)) {
1615 			xfs_warn(mp, "mounting with \"discard\" option, but "
1616 					"the device does not support discard");
1617 			mp->m_features &= ~XFS_FEAT_DISCARD;
1618 		}
1619 	}
1620 
1621 	if (xfs_has_reflink(mp)) {
1622 		if (mp->m_sb.sb_rblocks) {
1623 			xfs_alert(mp,
1624 	"reflink not compatible with realtime device!");
1625 			error = -EINVAL;
1626 			goto out_filestream_unmount;
1627 		}
1628 
1629 		if (xfs_globals.always_cow) {
1630 			xfs_info(mp, "using DEBUG-only always_cow mode.");
1631 			mp->m_always_cow = true;
1632 		}
1633 	}
1634 
1635 	if (xfs_has_rmapbt(mp) && mp->m_sb.sb_rblocks) {
1636 		xfs_alert(mp,
1637 	"reverse mapping btree not compatible with realtime device!");
1638 		error = -EINVAL;
1639 		goto out_filestream_unmount;
1640 	}
1641 
1642 	error = xfs_mountfs(mp);
1643 	if (error)
1644 		goto out_filestream_unmount;
1645 
1646 	root = igrab(VFS_I(mp->m_rootip));
1647 	if (!root) {
1648 		error = -ENOENT;
1649 		goto out_unmount;
1650 	}
1651 	sb->s_root = d_make_root(root);
1652 	if (!sb->s_root) {
1653 		error = -ENOMEM;
1654 		goto out_unmount;
1655 	}
1656 
1657 	return 0;
1658 
1659  out_filestream_unmount:
1660 	xfs_filestream_unmount(mp);
1661  out_free_sb:
1662 	xfs_freesb(mp);
1663  out_free_stats:
1664 	free_percpu(mp->m_stats.xs_stats);
1665  out_destroy_inodegc:
1666 	xfs_mount_list_del(mp);
1667 	xfs_inodegc_free_percpu(mp);
1668  out_destroy_counters:
1669 	xfs_destroy_percpu_counters(mp);
1670  out_destroy_workqueues:
1671 	xfs_destroy_mount_workqueues(mp);
1672  out_close_devices:
1673 	xfs_close_devices(mp);
1674  out_free_names:
1675 	sb->s_fs_info = NULL;
1676 	xfs_mount_free(mp);
1677 	return error;
1678 
1679  out_unmount:
1680 	xfs_filestream_unmount(mp);
1681 	xfs_unmountfs(mp);
1682 	goto out_free_sb;
1683 }
1684 
1685 static int
1686 xfs_fs_get_tree(
1687 	struct fs_context	*fc)
1688 {
1689 	return get_tree_bdev(fc, xfs_fs_fill_super);
1690 }
1691 
1692 static int
1693 xfs_remount_rw(
1694 	struct xfs_mount	*mp)
1695 {
1696 	struct xfs_sb		*sbp = &mp->m_sb;
1697 	int error;
1698 
1699 	if (xfs_has_norecovery(mp)) {
1700 		xfs_warn(mp,
1701 			"ro->rw transition prohibited on norecovery mount");
1702 		return -EINVAL;
1703 	}
1704 
1705 	if (xfs_sb_is_v5(sbp) &&
1706 	    xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1707 		xfs_warn(mp,
1708 	"ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1709 			(sbp->sb_features_ro_compat &
1710 				XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1711 		return -EINVAL;
1712 	}
1713 
1714 	clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1715 
1716 	/*
1717 	 * If this is the first remount to writeable state we might have some
1718 	 * superblock changes to update.
1719 	 */
1720 	if (mp->m_update_sb) {
1721 		error = xfs_sync_sb(mp, false);
1722 		if (error) {
1723 			xfs_warn(mp, "failed to write sb changes");
1724 			return error;
1725 		}
1726 		mp->m_update_sb = false;
1727 	}
1728 
1729 	/*
1730 	 * Fill out the reserve pool if it is empty. Use the stashed value if
1731 	 * it is non-zero, otherwise go with the default.
1732 	 */
1733 	xfs_restore_resvblks(mp);
1734 	xfs_log_work_queue(mp);
1735 	xfs_blockgc_start(mp);
1736 
1737 	/* Create the per-AG metadata reservation pool .*/
1738 	error = xfs_fs_reserve_ag_blocks(mp);
1739 	if (error && error != -ENOSPC)
1740 		return error;
1741 
1742 	/* Re-enable the background inode inactivation worker. */
1743 	xfs_inodegc_start(mp);
1744 
1745 	return 0;
1746 }
1747 
1748 static int
1749 xfs_remount_ro(
1750 	struct xfs_mount	*mp)
1751 {
1752 	struct xfs_icwalk	icw = {
1753 		.icw_flags	= XFS_ICWALK_FLAG_SYNC,
1754 	};
1755 	int			error;
1756 
1757 	/* Flush all the dirty data to disk. */
1758 	error = sync_filesystem(mp->m_super);
1759 	if (error)
1760 		return error;
1761 
1762 	/*
1763 	 * Cancel background eofb scanning so it cannot race with the final
1764 	 * log force+buftarg wait and deadlock the remount.
1765 	 */
1766 	xfs_blockgc_stop(mp);
1767 
1768 	/*
1769 	 * Clear out all remaining COW staging extents and speculative post-EOF
1770 	 * preallocations so that we don't leave inodes requiring inactivation
1771 	 * cleanups during reclaim on a read-only mount.  We must process every
1772 	 * cached inode, so this requires a synchronous cache scan.
1773 	 */
1774 	error = xfs_blockgc_free_space(mp, &icw);
1775 	if (error) {
1776 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1777 		return error;
1778 	}
1779 
1780 	/*
1781 	 * Stop the inodegc background worker.  xfs_fs_reconfigure already
1782 	 * flushed all pending inodegc work when it sync'd the filesystem.
1783 	 * The VFS holds s_umount, so we know that inodes cannot enter
1784 	 * xfs_fs_destroy_inode during a remount operation.  In readonly mode
1785 	 * we send inodes straight to reclaim, so no inodes will be queued.
1786 	 */
1787 	xfs_inodegc_stop(mp);
1788 
1789 	/* Free the per-AG metadata reservation pool. */
1790 	error = xfs_fs_unreserve_ag_blocks(mp);
1791 	if (error) {
1792 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1793 		return error;
1794 	}
1795 
1796 	/*
1797 	 * Before we sync the metadata, we need to free up the reserve block
1798 	 * pool so that the used block count in the superblock on disk is
1799 	 * correct at the end of the remount. Stash the current* reserve pool
1800 	 * size so that if we get remounted rw, we can return it to the same
1801 	 * size.
1802 	 */
1803 	xfs_save_resvblks(mp);
1804 
1805 	xfs_log_clean(mp);
1806 	set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1807 
1808 	return 0;
1809 }
1810 
1811 /*
1812  * Logically we would return an error here to prevent users from believing
1813  * they might have changed mount options using remount which can't be changed.
1814  *
1815  * But unfortunately mount(8) adds all options from mtab and fstab to the mount
1816  * arguments in some cases so we can't blindly reject options, but have to
1817  * check for each specified option if it actually differs from the currently
1818  * set option and only reject it if that's the case.
1819  *
1820  * Until that is implemented we return success for every remount request, and
1821  * silently ignore all options that we can't actually change.
1822  */
1823 static int
1824 xfs_fs_reconfigure(
1825 	struct fs_context *fc)
1826 {
1827 	struct xfs_mount	*mp = XFS_M(fc->root->d_sb);
1828 	struct xfs_mount        *new_mp = fc->s_fs_info;
1829 	int			flags = fc->sb_flags;
1830 	int			error;
1831 
1832 	/* version 5 superblocks always support version counters. */
1833 	if (xfs_has_crc(mp))
1834 		fc->sb_flags |= SB_I_VERSION;
1835 
1836 	error = xfs_fs_validate_params(new_mp);
1837 	if (error)
1838 		return error;
1839 
1840 	/* inode32 -> inode64 */
1841 	if (xfs_has_small_inums(mp) && !xfs_has_small_inums(new_mp)) {
1842 		mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
1843 		mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
1844 	}
1845 
1846 	/* inode64 -> inode32 */
1847 	if (!xfs_has_small_inums(mp) && xfs_has_small_inums(new_mp)) {
1848 		mp->m_features |= XFS_FEAT_SMALL_INUMS;
1849 		mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
1850 	}
1851 
1852 	/* ro -> rw */
1853 	if (xfs_is_readonly(mp) && !(flags & SB_RDONLY)) {
1854 		error = xfs_remount_rw(mp);
1855 		if (error)
1856 			return error;
1857 	}
1858 
1859 	/* rw -> ro */
1860 	if (!xfs_is_readonly(mp) && (flags & SB_RDONLY)) {
1861 		error = xfs_remount_ro(mp);
1862 		if (error)
1863 			return error;
1864 	}
1865 
1866 	return 0;
1867 }
1868 
1869 static void xfs_fs_free(
1870 	struct fs_context	*fc)
1871 {
1872 	struct xfs_mount	*mp = fc->s_fs_info;
1873 
1874 	/*
1875 	 * mp is stored in the fs_context when it is initialized.
1876 	 * mp is transferred to the superblock on a successful mount,
1877 	 * but if an error occurs before the transfer we have to free
1878 	 * it here.
1879 	 */
1880 	if (mp)
1881 		xfs_mount_free(mp);
1882 }
1883 
1884 static const struct fs_context_operations xfs_context_ops = {
1885 	.parse_param = xfs_fs_parse_param,
1886 	.get_tree    = xfs_fs_get_tree,
1887 	.reconfigure = xfs_fs_reconfigure,
1888 	.free        = xfs_fs_free,
1889 };
1890 
1891 static int xfs_init_fs_context(
1892 	struct fs_context	*fc)
1893 {
1894 	struct xfs_mount	*mp;
1895 
1896 	mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
1897 	if (!mp)
1898 		return -ENOMEM;
1899 
1900 	spin_lock_init(&mp->m_sb_lock);
1901 	spin_lock_init(&mp->m_agirotor_lock);
1902 	INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
1903 	spin_lock_init(&mp->m_perag_lock);
1904 	mutex_init(&mp->m_growlock);
1905 	INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
1906 	INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
1907 	mp->m_kobj.kobject.kset = xfs_kset;
1908 	/*
1909 	 * We don't create the finobt per-ag space reservation until after log
1910 	 * recovery, so we must set this to true so that an ifree transaction
1911 	 * started during log recovery will not depend on space reservations
1912 	 * for finobt expansion.
1913 	 */
1914 	mp->m_finobt_nores = true;
1915 
1916 	/*
1917 	 * These can be overridden by the mount option parsing.
1918 	 */
1919 	mp->m_logbufs = -1;
1920 	mp->m_logbsize = -1;
1921 	mp->m_allocsize_log = 16; /* 64k */
1922 
1923 	/*
1924 	 * Copy binary VFS mount flags we are interested in.
1925 	 */
1926 	if (fc->sb_flags & SB_RDONLY)
1927 		set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1928 	if (fc->sb_flags & SB_DIRSYNC)
1929 		mp->m_features |= XFS_FEAT_DIRSYNC;
1930 	if (fc->sb_flags & SB_SYNCHRONOUS)
1931 		mp->m_features |= XFS_FEAT_WSYNC;
1932 
1933 	fc->s_fs_info = mp;
1934 	fc->ops = &xfs_context_ops;
1935 
1936 	return 0;
1937 }
1938 
1939 static struct file_system_type xfs_fs_type = {
1940 	.owner			= THIS_MODULE,
1941 	.name			= "xfs",
1942 	.init_fs_context	= xfs_init_fs_context,
1943 	.parameters		= xfs_fs_parameters,
1944 	.kill_sb		= kill_block_super,
1945 	.fs_flags		= FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
1946 };
1947 MODULE_ALIAS_FS("xfs");
1948 
1949 STATIC int __init
1950 xfs_init_caches(void)
1951 {
1952 	int		error;
1953 
1954 	xfs_log_ticket_cache = kmem_cache_create("xfs_log_ticket",
1955 						sizeof(struct xlog_ticket),
1956 						0, 0, NULL);
1957 	if (!xfs_log_ticket_cache)
1958 		goto out;
1959 
1960 	error = xfs_btree_init_cur_caches();
1961 	if (error)
1962 		goto out_destroy_log_ticket_cache;
1963 
1964 	error = xfs_defer_init_item_caches();
1965 	if (error)
1966 		goto out_destroy_btree_cur_cache;
1967 
1968 	xfs_da_state_cache = kmem_cache_create("xfs_da_state",
1969 					      sizeof(struct xfs_da_state),
1970 					      0, 0, NULL);
1971 	if (!xfs_da_state_cache)
1972 		goto out_destroy_defer_item_cache;
1973 
1974 	xfs_ifork_cache = kmem_cache_create("xfs_ifork",
1975 					   sizeof(struct xfs_ifork),
1976 					   0, 0, NULL);
1977 	if (!xfs_ifork_cache)
1978 		goto out_destroy_da_state_cache;
1979 
1980 	xfs_trans_cache = kmem_cache_create("xfs_trans",
1981 					   sizeof(struct xfs_trans),
1982 					   0, 0, NULL);
1983 	if (!xfs_trans_cache)
1984 		goto out_destroy_ifork_cache;
1985 
1986 
1987 	/*
1988 	 * The size of the cache-allocated buf log item is the maximum
1989 	 * size possible under XFS.  This wastes a little bit of memory,
1990 	 * but it is much faster.
1991 	 */
1992 	xfs_buf_item_cache = kmem_cache_create("xfs_buf_item",
1993 					      sizeof(struct xfs_buf_log_item),
1994 					      0, 0, NULL);
1995 	if (!xfs_buf_item_cache)
1996 		goto out_destroy_trans_cache;
1997 
1998 	xfs_efd_cache = kmem_cache_create("xfs_efd_item",
1999 					(sizeof(struct xfs_efd_log_item) +
2000 					(XFS_EFD_MAX_FAST_EXTENTS - 1) *
2001 					sizeof(struct xfs_extent)),
2002 					0, 0, NULL);
2003 	if (!xfs_efd_cache)
2004 		goto out_destroy_buf_item_cache;
2005 
2006 	xfs_efi_cache = kmem_cache_create("xfs_efi_item",
2007 					 (sizeof(struct xfs_efi_log_item) +
2008 					 (XFS_EFI_MAX_FAST_EXTENTS - 1) *
2009 					 sizeof(struct xfs_extent)),
2010 					 0, 0, NULL);
2011 	if (!xfs_efi_cache)
2012 		goto out_destroy_efd_cache;
2013 
2014 	xfs_inode_cache = kmem_cache_create("xfs_inode",
2015 					   sizeof(struct xfs_inode), 0,
2016 					   (SLAB_HWCACHE_ALIGN |
2017 					    SLAB_RECLAIM_ACCOUNT |
2018 					    SLAB_MEM_SPREAD | SLAB_ACCOUNT),
2019 					   xfs_fs_inode_init_once);
2020 	if (!xfs_inode_cache)
2021 		goto out_destroy_efi_cache;
2022 
2023 	xfs_ili_cache = kmem_cache_create("xfs_ili",
2024 					 sizeof(struct xfs_inode_log_item), 0,
2025 					 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
2026 					 NULL);
2027 	if (!xfs_ili_cache)
2028 		goto out_destroy_inode_cache;
2029 
2030 	xfs_icreate_cache = kmem_cache_create("xfs_icr",
2031 					     sizeof(struct xfs_icreate_item),
2032 					     0, 0, NULL);
2033 	if (!xfs_icreate_cache)
2034 		goto out_destroy_ili_cache;
2035 
2036 	xfs_rud_cache = kmem_cache_create("xfs_rud_item",
2037 					 sizeof(struct xfs_rud_log_item),
2038 					 0, 0, NULL);
2039 	if (!xfs_rud_cache)
2040 		goto out_destroy_icreate_cache;
2041 
2042 	xfs_rui_cache = kmem_cache_create("xfs_rui_item",
2043 			xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
2044 			0, 0, NULL);
2045 	if (!xfs_rui_cache)
2046 		goto out_destroy_rud_cache;
2047 
2048 	xfs_cud_cache = kmem_cache_create("xfs_cud_item",
2049 					 sizeof(struct xfs_cud_log_item),
2050 					 0, 0, NULL);
2051 	if (!xfs_cud_cache)
2052 		goto out_destroy_rui_cache;
2053 
2054 	xfs_cui_cache = kmem_cache_create("xfs_cui_item",
2055 			xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
2056 			0, 0, NULL);
2057 	if (!xfs_cui_cache)
2058 		goto out_destroy_cud_cache;
2059 
2060 	xfs_bud_cache = kmem_cache_create("xfs_bud_item",
2061 					 sizeof(struct xfs_bud_log_item),
2062 					 0, 0, NULL);
2063 	if (!xfs_bud_cache)
2064 		goto out_destroy_cui_cache;
2065 
2066 	xfs_bui_cache = kmem_cache_create("xfs_bui_item",
2067 			xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
2068 			0, 0, NULL);
2069 	if (!xfs_bui_cache)
2070 		goto out_destroy_bud_cache;
2071 
2072 	return 0;
2073 
2074  out_destroy_bud_cache:
2075 	kmem_cache_destroy(xfs_bud_cache);
2076  out_destroy_cui_cache:
2077 	kmem_cache_destroy(xfs_cui_cache);
2078  out_destroy_cud_cache:
2079 	kmem_cache_destroy(xfs_cud_cache);
2080  out_destroy_rui_cache:
2081 	kmem_cache_destroy(xfs_rui_cache);
2082  out_destroy_rud_cache:
2083 	kmem_cache_destroy(xfs_rud_cache);
2084  out_destroy_icreate_cache:
2085 	kmem_cache_destroy(xfs_icreate_cache);
2086  out_destroy_ili_cache:
2087 	kmem_cache_destroy(xfs_ili_cache);
2088  out_destroy_inode_cache:
2089 	kmem_cache_destroy(xfs_inode_cache);
2090  out_destroy_efi_cache:
2091 	kmem_cache_destroy(xfs_efi_cache);
2092  out_destroy_efd_cache:
2093 	kmem_cache_destroy(xfs_efd_cache);
2094  out_destroy_buf_item_cache:
2095 	kmem_cache_destroy(xfs_buf_item_cache);
2096  out_destroy_trans_cache:
2097 	kmem_cache_destroy(xfs_trans_cache);
2098  out_destroy_ifork_cache:
2099 	kmem_cache_destroy(xfs_ifork_cache);
2100  out_destroy_da_state_cache:
2101 	kmem_cache_destroy(xfs_da_state_cache);
2102  out_destroy_defer_item_cache:
2103 	xfs_defer_destroy_item_caches();
2104  out_destroy_btree_cur_cache:
2105 	xfs_btree_destroy_cur_caches();
2106  out_destroy_log_ticket_cache:
2107 	kmem_cache_destroy(xfs_log_ticket_cache);
2108  out:
2109 	return -ENOMEM;
2110 }
2111 
2112 STATIC void
2113 xfs_destroy_caches(void)
2114 {
2115 	/*
2116 	 * Make sure all delayed rcu free are flushed before we
2117 	 * destroy caches.
2118 	 */
2119 	rcu_barrier();
2120 	kmem_cache_destroy(xfs_bui_cache);
2121 	kmem_cache_destroy(xfs_bud_cache);
2122 	kmem_cache_destroy(xfs_cui_cache);
2123 	kmem_cache_destroy(xfs_cud_cache);
2124 	kmem_cache_destroy(xfs_rui_cache);
2125 	kmem_cache_destroy(xfs_rud_cache);
2126 	kmem_cache_destroy(xfs_icreate_cache);
2127 	kmem_cache_destroy(xfs_ili_cache);
2128 	kmem_cache_destroy(xfs_inode_cache);
2129 	kmem_cache_destroy(xfs_efi_cache);
2130 	kmem_cache_destroy(xfs_efd_cache);
2131 	kmem_cache_destroy(xfs_buf_item_cache);
2132 	kmem_cache_destroy(xfs_trans_cache);
2133 	kmem_cache_destroy(xfs_ifork_cache);
2134 	kmem_cache_destroy(xfs_da_state_cache);
2135 	xfs_defer_destroy_item_caches();
2136 	xfs_btree_destroy_cur_caches();
2137 	kmem_cache_destroy(xfs_log_ticket_cache);
2138 }
2139 
2140 STATIC int __init
2141 xfs_init_workqueues(void)
2142 {
2143 	/*
2144 	 * The allocation workqueue can be used in memory reclaim situations
2145 	 * (writepage path), and parallelism is only limited by the number of
2146 	 * AGs in all the filesystems mounted. Hence use the default large
2147 	 * max_active value for this workqueue.
2148 	 */
2149 	xfs_alloc_wq = alloc_workqueue("xfsalloc",
2150 			XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0);
2151 	if (!xfs_alloc_wq)
2152 		return -ENOMEM;
2153 
2154 	xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND),
2155 			0);
2156 	if (!xfs_discard_wq)
2157 		goto out_free_alloc_wq;
2158 
2159 	return 0;
2160 out_free_alloc_wq:
2161 	destroy_workqueue(xfs_alloc_wq);
2162 	return -ENOMEM;
2163 }
2164 
2165 STATIC void
2166 xfs_destroy_workqueues(void)
2167 {
2168 	destroy_workqueue(xfs_discard_wq);
2169 	destroy_workqueue(xfs_alloc_wq);
2170 }
2171 
2172 #ifdef CONFIG_HOTPLUG_CPU
2173 static int
2174 xfs_cpu_dead(
2175 	unsigned int		cpu)
2176 {
2177 	struct xfs_mount	*mp, *n;
2178 
2179 	spin_lock(&xfs_mount_list_lock);
2180 	list_for_each_entry_safe(mp, n, &xfs_mount_list, m_mount_list) {
2181 		spin_unlock(&xfs_mount_list_lock);
2182 		xfs_inodegc_cpu_dead(mp, cpu);
2183 		spin_lock(&xfs_mount_list_lock);
2184 	}
2185 	spin_unlock(&xfs_mount_list_lock);
2186 	return 0;
2187 }
2188 
2189 static int __init
2190 xfs_cpu_hotplug_init(void)
2191 {
2192 	int	error;
2193 
2194 	error = cpuhp_setup_state_nocalls(CPUHP_XFS_DEAD, "xfs:dead", NULL,
2195 			xfs_cpu_dead);
2196 	if (error < 0)
2197 		xfs_alert(NULL,
2198 "Failed to initialise CPU hotplug, error %d. XFS is non-functional.",
2199 			error);
2200 	return error;
2201 }
2202 
2203 static void
2204 xfs_cpu_hotplug_destroy(void)
2205 {
2206 	cpuhp_remove_state_nocalls(CPUHP_XFS_DEAD);
2207 }
2208 
2209 #else /* !CONFIG_HOTPLUG_CPU */
2210 static inline int xfs_cpu_hotplug_init(void) { return 0; }
2211 static inline void xfs_cpu_hotplug_destroy(void) {}
2212 #endif
2213 
2214 STATIC int __init
2215 init_xfs_fs(void)
2216 {
2217 	int			error;
2218 
2219 	xfs_check_ondisk_structs();
2220 
2221 	printk(KERN_INFO XFS_VERSION_STRING " with "
2222 			 XFS_BUILD_OPTIONS " enabled\n");
2223 
2224 	xfs_dir_startup();
2225 
2226 	error = xfs_cpu_hotplug_init();
2227 	if (error)
2228 		goto out;
2229 
2230 	error = xfs_init_caches();
2231 	if (error)
2232 		goto out_destroy_hp;
2233 
2234 	error = xfs_init_workqueues();
2235 	if (error)
2236 		goto out_destroy_caches;
2237 
2238 	error = xfs_mru_cache_init();
2239 	if (error)
2240 		goto out_destroy_wq;
2241 
2242 	error = xfs_buf_init();
2243 	if (error)
2244 		goto out_mru_cache_uninit;
2245 
2246 	error = xfs_init_procfs();
2247 	if (error)
2248 		goto out_buf_terminate;
2249 
2250 	error = xfs_sysctl_register();
2251 	if (error)
2252 		goto out_cleanup_procfs;
2253 
2254 	xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2255 	if (!xfs_kset) {
2256 		error = -ENOMEM;
2257 		goto out_sysctl_unregister;
2258 	}
2259 
2260 	xfsstats.xs_kobj.kobject.kset = xfs_kset;
2261 
2262 	xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2263 	if (!xfsstats.xs_stats) {
2264 		error = -ENOMEM;
2265 		goto out_kset_unregister;
2266 	}
2267 
2268 	error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2269 			       "stats");
2270 	if (error)
2271 		goto out_free_stats;
2272 
2273 #ifdef DEBUG
2274 	xfs_dbg_kobj.kobject.kset = xfs_kset;
2275 	error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2276 	if (error)
2277 		goto out_remove_stats_kobj;
2278 #endif
2279 
2280 	error = xfs_qm_init();
2281 	if (error)
2282 		goto out_remove_dbg_kobj;
2283 
2284 	error = register_filesystem(&xfs_fs_type);
2285 	if (error)
2286 		goto out_qm_exit;
2287 	return 0;
2288 
2289  out_qm_exit:
2290 	xfs_qm_exit();
2291  out_remove_dbg_kobj:
2292 #ifdef DEBUG
2293 	xfs_sysfs_del(&xfs_dbg_kobj);
2294  out_remove_stats_kobj:
2295 #endif
2296 	xfs_sysfs_del(&xfsstats.xs_kobj);
2297  out_free_stats:
2298 	free_percpu(xfsstats.xs_stats);
2299  out_kset_unregister:
2300 	kset_unregister(xfs_kset);
2301  out_sysctl_unregister:
2302 	xfs_sysctl_unregister();
2303  out_cleanup_procfs:
2304 	xfs_cleanup_procfs();
2305  out_buf_terminate:
2306 	xfs_buf_terminate();
2307  out_mru_cache_uninit:
2308 	xfs_mru_cache_uninit();
2309  out_destroy_wq:
2310 	xfs_destroy_workqueues();
2311  out_destroy_caches:
2312 	xfs_destroy_caches();
2313  out_destroy_hp:
2314 	xfs_cpu_hotplug_destroy();
2315  out:
2316 	return error;
2317 }
2318 
2319 STATIC void __exit
2320 exit_xfs_fs(void)
2321 {
2322 	xfs_qm_exit();
2323 	unregister_filesystem(&xfs_fs_type);
2324 #ifdef DEBUG
2325 	xfs_sysfs_del(&xfs_dbg_kobj);
2326 #endif
2327 	xfs_sysfs_del(&xfsstats.xs_kobj);
2328 	free_percpu(xfsstats.xs_stats);
2329 	kset_unregister(xfs_kset);
2330 	xfs_sysctl_unregister();
2331 	xfs_cleanup_procfs();
2332 	xfs_buf_terminate();
2333 	xfs_mru_cache_uninit();
2334 	xfs_destroy_workqueues();
2335 	xfs_destroy_caches();
2336 	xfs_uuid_table_free();
2337 	xfs_cpu_hotplug_destroy();
2338 }
2339 
2340 module_init(init_xfs_fs);
2341 module_exit(exit_xfs_fs);
2342 
2343 MODULE_AUTHOR("Silicon Graphics, Inc.");
2344 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2345 MODULE_LICENSE("GPL");
2346