xref: /linux/fs/xfs/xfs_super.c (revision 547c5775a742d9c83891b629b75d1d4c8e88d8c0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 
7 #include "xfs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sb.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_btree.h"
16 #include "xfs_bmap.h"
17 #include "xfs_alloc.h"
18 #include "xfs_fsops.h"
19 #include "xfs_trans.h"
20 #include "xfs_buf_item.h"
21 #include "xfs_log.h"
22 #include "xfs_log_priv.h"
23 #include "xfs_dir2.h"
24 #include "xfs_extfree_item.h"
25 #include "xfs_mru_cache.h"
26 #include "xfs_inode_item.h"
27 #include "xfs_icache.h"
28 #include "xfs_trace.h"
29 #include "xfs_icreate_item.h"
30 #include "xfs_filestream.h"
31 #include "xfs_quota.h"
32 #include "xfs_sysfs.h"
33 #include "xfs_ondisk.h"
34 #include "xfs_rmap_item.h"
35 #include "xfs_refcount_item.h"
36 #include "xfs_bmap_item.h"
37 #include "xfs_reflink.h"
38 #include "xfs_pwork.h"
39 #include "xfs_ag.h"
40 #include "xfs_defer.h"
41 #include "xfs_attr_item.h"
42 #include "xfs_xattr.h"
43 #include "xfs_iunlink_item.h"
44 #include "xfs_dahash_test.h"
45 #include "xfs_rtbitmap.h"
46 #include "xfs_exchmaps_item.h"
47 #include "xfs_parent.h"
48 #include "xfs_rtalloc.h"
49 #include "xfs_zone_alloc.h"
50 #include "scrub/stats.h"
51 #include "scrub/rcbag_btree.h"
52 
53 #include <linux/magic.h>
54 #include <linux/fs_context.h>
55 #include <linux/fs_parser.h>
56 
57 static const struct super_operations xfs_super_operations;
58 
59 static struct dentry *xfs_debugfs;	/* top-level xfs debugfs dir */
60 static struct kset *xfs_kset;		/* top-level xfs sysfs dir */
61 #ifdef DEBUG
62 static struct xfs_kobj xfs_dbg_kobj;	/* global debug sysfs attrs */
63 #endif
64 
65 enum xfs_dax_mode {
66 	XFS_DAX_INODE = 0,
67 	XFS_DAX_ALWAYS = 1,
68 	XFS_DAX_NEVER = 2,
69 };
70 
71 /* Were quota mount options provided?  Must use the upper 16 bits of qflags. */
72 #define XFS_QFLAGS_MNTOPTS	(1U << 31)
73 
74 static void
xfs_mount_set_dax_mode(struct xfs_mount * mp,enum xfs_dax_mode mode)75 xfs_mount_set_dax_mode(
76 	struct xfs_mount	*mp,
77 	enum xfs_dax_mode	mode)
78 {
79 	switch (mode) {
80 	case XFS_DAX_INODE:
81 		mp->m_features &= ~(XFS_FEAT_DAX_ALWAYS | XFS_FEAT_DAX_NEVER);
82 		break;
83 	case XFS_DAX_ALWAYS:
84 		mp->m_features |= XFS_FEAT_DAX_ALWAYS;
85 		mp->m_features &= ~XFS_FEAT_DAX_NEVER;
86 		break;
87 	case XFS_DAX_NEVER:
88 		mp->m_features |= XFS_FEAT_DAX_NEVER;
89 		mp->m_features &= ~XFS_FEAT_DAX_ALWAYS;
90 		break;
91 	}
92 }
93 
94 static const struct constant_table dax_param_enums[] = {
95 	{"inode",	XFS_DAX_INODE },
96 	{"always",	XFS_DAX_ALWAYS },
97 	{"never",	XFS_DAX_NEVER },
98 	{}
99 };
100 
101 /*
102  * Table driven mount option parser.
103  */
104 enum {
105 	Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
106 	Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
107 	Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
108 	Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
109 	Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
110 	Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
111 	Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
112 	Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
113 	Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum, Opt_max_open_zones,
114 	Opt_lifetime, Opt_nolifetime, Opt_max_atomic_write,
115 };
116 
117 static const struct fs_parameter_spec xfs_fs_parameters[] = {
118 	fsparam_u32("logbufs",		Opt_logbufs),
119 	fsparam_string("logbsize",	Opt_logbsize),
120 	fsparam_string("logdev",	Opt_logdev),
121 	fsparam_string("rtdev",		Opt_rtdev),
122 	fsparam_flag("wsync",		Opt_wsync),
123 	fsparam_flag("noalign",		Opt_noalign),
124 	fsparam_flag("swalloc",		Opt_swalloc),
125 	fsparam_u32("sunit",		Opt_sunit),
126 	fsparam_u32("swidth",		Opt_swidth),
127 	fsparam_flag("nouuid",		Opt_nouuid),
128 	fsparam_flag("grpid",		Opt_grpid),
129 	fsparam_flag("nogrpid",		Opt_nogrpid),
130 	fsparam_flag("bsdgroups",	Opt_bsdgroups),
131 	fsparam_flag("sysvgroups",	Opt_sysvgroups),
132 	fsparam_string("allocsize",	Opt_allocsize),
133 	fsparam_flag("norecovery",	Opt_norecovery),
134 	fsparam_flag("inode64",		Opt_inode64),
135 	fsparam_flag("inode32",		Opt_inode32),
136 	fsparam_flag("ikeep",		Opt_ikeep),
137 	fsparam_flag("noikeep",		Opt_noikeep),
138 	fsparam_flag("largeio",		Opt_largeio),
139 	fsparam_flag("nolargeio",	Opt_nolargeio),
140 	fsparam_flag("attr2",		Opt_attr2),
141 	fsparam_flag("noattr2",		Opt_noattr2),
142 	fsparam_flag("filestreams",	Opt_filestreams),
143 	fsparam_flag("quota",		Opt_quota),
144 	fsparam_flag("noquota",		Opt_noquota),
145 	fsparam_flag("usrquota",	Opt_usrquota),
146 	fsparam_flag("grpquota",	Opt_grpquota),
147 	fsparam_flag("prjquota",	Opt_prjquota),
148 	fsparam_flag("uquota",		Opt_uquota),
149 	fsparam_flag("gquota",		Opt_gquota),
150 	fsparam_flag("pquota",		Opt_pquota),
151 	fsparam_flag("uqnoenforce",	Opt_uqnoenforce),
152 	fsparam_flag("gqnoenforce",	Opt_gqnoenforce),
153 	fsparam_flag("pqnoenforce",	Opt_pqnoenforce),
154 	fsparam_flag("qnoenforce",	Opt_qnoenforce),
155 	fsparam_flag("discard",		Opt_discard),
156 	fsparam_flag("nodiscard",	Opt_nodiscard),
157 	fsparam_flag("dax",		Opt_dax),
158 	fsparam_enum("dax",		Opt_dax_enum, dax_param_enums),
159 	fsparam_u32("max_open_zones",	Opt_max_open_zones),
160 	fsparam_flag("lifetime",	Opt_lifetime),
161 	fsparam_flag("nolifetime",	Opt_nolifetime),
162 	fsparam_string("max_atomic_write",	Opt_max_atomic_write),
163 	{}
164 };
165 
166 struct proc_xfs_info {
167 	uint64_t	flag;
168 	char		*str;
169 };
170 
171 static int
xfs_fs_show_options(struct seq_file * m,struct dentry * root)172 xfs_fs_show_options(
173 	struct seq_file		*m,
174 	struct dentry		*root)
175 {
176 	static struct proc_xfs_info xfs_info_set[] = {
177 		/* the few simple ones we can get from the mount struct */
178 		{ XFS_FEAT_IKEEP,		",ikeep" },
179 		{ XFS_FEAT_WSYNC,		",wsync" },
180 		{ XFS_FEAT_NOALIGN,		",noalign" },
181 		{ XFS_FEAT_SWALLOC,		",swalloc" },
182 		{ XFS_FEAT_NOUUID,		",nouuid" },
183 		{ XFS_FEAT_NORECOVERY,		",norecovery" },
184 		{ XFS_FEAT_ATTR2,		",attr2" },
185 		{ XFS_FEAT_FILESTREAMS,		",filestreams" },
186 		{ XFS_FEAT_GRPID,		",grpid" },
187 		{ XFS_FEAT_DISCARD,		",discard" },
188 		{ XFS_FEAT_LARGE_IOSIZE,	",largeio" },
189 		{ XFS_FEAT_DAX_ALWAYS,		",dax=always" },
190 		{ XFS_FEAT_DAX_NEVER,		",dax=never" },
191 		{ XFS_FEAT_NOLIFETIME,		",nolifetime" },
192 		{ 0, NULL }
193 	};
194 	struct xfs_mount	*mp = XFS_M(root->d_sb);
195 	struct proc_xfs_info	*xfs_infop;
196 
197 	for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
198 		if (mp->m_features & xfs_infop->flag)
199 			seq_puts(m, xfs_infop->str);
200 	}
201 
202 	seq_printf(m, ",inode%d", xfs_has_small_inums(mp) ? 32 : 64);
203 
204 	if (xfs_has_allocsize(mp))
205 		seq_printf(m, ",allocsize=%dk",
206 			   (1 << mp->m_allocsize_log) >> 10);
207 
208 	if (mp->m_logbufs > 0)
209 		seq_printf(m, ",logbufs=%d", mp->m_logbufs);
210 	if (mp->m_logbsize > 0)
211 		seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
212 
213 	if (mp->m_logname)
214 		seq_show_option(m, "logdev", mp->m_logname);
215 	if (mp->m_rtname)
216 		seq_show_option(m, "rtdev", mp->m_rtname);
217 
218 	if (mp->m_dalign > 0)
219 		seq_printf(m, ",sunit=%d",
220 				(int)XFS_FSB_TO_BB(mp, mp->m_dalign));
221 	if (mp->m_swidth > 0)
222 		seq_printf(m, ",swidth=%d",
223 				(int)XFS_FSB_TO_BB(mp, mp->m_swidth));
224 
225 	if (mp->m_qflags & XFS_UQUOTA_ENFD)
226 		seq_puts(m, ",usrquota");
227 	else if (mp->m_qflags & XFS_UQUOTA_ACCT)
228 		seq_puts(m, ",uqnoenforce");
229 
230 	if (mp->m_qflags & XFS_PQUOTA_ENFD)
231 		seq_puts(m, ",prjquota");
232 	else if (mp->m_qflags & XFS_PQUOTA_ACCT)
233 		seq_puts(m, ",pqnoenforce");
234 
235 	if (mp->m_qflags & XFS_GQUOTA_ENFD)
236 		seq_puts(m, ",grpquota");
237 	else if (mp->m_qflags & XFS_GQUOTA_ACCT)
238 		seq_puts(m, ",gqnoenforce");
239 
240 	if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
241 		seq_puts(m, ",noquota");
242 
243 	if (mp->m_max_open_zones)
244 		seq_printf(m, ",max_open_zones=%u", mp->m_max_open_zones);
245 	if (mp->m_awu_max_bytes)
246 		seq_printf(m, ",max_atomic_write=%lluk",
247 				mp->m_awu_max_bytes >> 10);
248 
249 	return 0;
250 }
251 
252 static bool
xfs_set_inode_alloc_perag(struct xfs_perag * pag,xfs_ino_t ino,xfs_agnumber_t max_metadata)253 xfs_set_inode_alloc_perag(
254 	struct xfs_perag	*pag,
255 	xfs_ino_t		ino,
256 	xfs_agnumber_t		max_metadata)
257 {
258 	if (!xfs_is_inode32(pag_mount(pag))) {
259 		set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
260 		clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
261 		return false;
262 	}
263 
264 	if (ino > XFS_MAXINUMBER_32) {
265 		clear_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
266 		clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
267 		return false;
268 	}
269 
270 	set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
271 	if (pag_agno(pag) < max_metadata)
272 		set_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
273 	else
274 		clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
275 	return true;
276 }
277 
278 /*
279  * Set parameters for inode allocation heuristics, taking into account
280  * filesystem size and inode32/inode64 mount options; i.e. specifically
281  * whether or not XFS_FEAT_SMALL_INUMS is set.
282  *
283  * Inode allocation patterns are altered only if inode32 is requested
284  * (XFS_FEAT_SMALL_INUMS), and the filesystem is sufficiently large.
285  * If altered, XFS_OPSTATE_INODE32 is set as well.
286  *
287  * An agcount independent of that in the mount structure is provided
288  * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
289  * to the potentially higher ag count.
290  *
291  * Returns the maximum AG index which may contain inodes.
292  */
293 xfs_agnumber_t
xfs_set_inode_alloc(struct xfs_mount * mp,xfs_agnumber_t agcount)294 xfs_set_inode_alloc(
295 	struct xfs_mount *mp,
296 	xfs_agnumber_t	agcount)
297 {
298 	xfs_agnumber_t	index;
299 	xfs_agnumber_t	maxagi = 0;
300 	xfs_sb_t	*sbp = &mp->m_sb;
301 	xfs_agnumber_t	max_metadata;
302 	xfs_agino_t	agino;
303 	xfs_ino_t	ino;
304 
305 	/*
306 	 * Calculate how much should be reserved for inodes to meet
307 	 * the max inode percentage.  Used only for inode32.
308 	 */
309 	if (M_IGEO(mp)->maxicount) {
310 		uint64_t	icount;
311 
312 		icount = sbp->sb_dblocks * sbp->sb_imax_pct;
313 		do_div(icount, 100);
314 		icount += sbp->sb_agblocks - 1;
315 		do_div(icount, sbp->sb_agblocks);
316 		max_metadata = icount;
317 	} else {
318 		max_metadata = agcount;
319 	}
320 
321 	/* Get the last possible inode in the filesystem */
322 	agino =	XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
323 	ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
324 
325 	/*
326 	 * If user asked for no more than 32-bit inodes, and the fs is
327 	 * sufficiently large, set XFS_OPSTATE_INODE32 if we must alter
328 	 * the allocator to accommodate the request.
329 	 */
330 	if (xfs_has_small_inums(mp) && ino > XFS_MAXINUMBER_32)
331 		xfs_set_inode32(mp);
332 	else
333 		xfs_clear_inode32(mp);
334 
335 	for (index = 0; index < agcount; index++) {
336 		struct xfs_perag	*pag;
337 
338 		ino = XFS_AGINO_TO_INO(mp, index, agino);
339 
340 		pag = xfs_perag_get(mp, index);
341 		if (xfs_set_inode_alloc_perag(pag, ino, max_metadata))
342 			maxagi++;
343 		xfs_perag_put(pag);
344 	}
345 
346 	return xfs_is_inode32(mp) ? maxagi : agcount;
347 }
348 
349 static int
xfs_setup_dax_always(struct xfs_mount * mp)350 xfs_setup_dax_always(
351 	struct xfs_mount	*mp)
352 {
353 	if (!mp->m_ddev_targp->bt_daxdev &&
354 	    (!mp->m_rtdev_targp || !mp->m_rtdev_targp->bt_daxdev)) {
355 		xfs_alert(mp,
356 			"DAX unsupported by block device. Turning off DAX.");
357 		goto disable_dax;
358 	}
359 
360 	if (mp->m_super->s_blocksize != PAGE_SIZE) {
361 		xfs_alert(mp,
362 			"DAX not supported for blocksize. Turning off DAX.");
363 		goto disable_dax;
364 	}
365 
366 	if (xfs_has_reflink(mp) &&
367 	    bdev_is_partition(mp->m_ddev_targp->bt_bdev)) {
368 		xfs_alert(mp,
369 			"DAX and reflink cannot work with multi-partitions!");
370 		return -EINVAL;
371 	}
372 
373 	return 0;
374 
375 disable_dax:
376 	xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
377 	return 0;
378 }
379 
380 STATIC int
xfs_blkdev_get(xfs_mount_t * mp,const char * name,struct file ** bdev_filep)381 xfs_blkdev_get(
382 	xfs_mount_t		*mp,
383 	const char		*name,
384 	struct file		**bdev_filep)
385 {
386 	int			error = 0;
387 	blk_mode_t		mode;
388 
389 	mode = sb_open_mode(mp->m_super->s_flags);
390 	*bdev_filep = bdev_file_open_by_path(name, mode,
391 			mp->m_super, &fs_holder_ops);
392 	if (IS_ERR(*bdev_filep)) {
393 		error = PTR_ERR(*bdev_filep);
394 		*bdev_filep = NULL;
395 		xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
396 	}
397 
398 	return error;
399 }
400 
401 STATIC void
xfs_shutdown_devices(struct xfs_mount * mp)402 xfs_shutdown_devices(
403 	struct xfs_mount	*mp)
404 {
405 	/*
406 	 * Udev is triggered whenever anyone closes a block device or unmounts
407 	 * a file systemm on a block device.
408 	 * The default udev rules invoke blkid to read the fs super and create
409 	 * symlinks to the bdev under /dev/disk.  For this, it uses buffered
410 	 * reads through the page cache.
411 	 *
412 	 * xfs_db also uses buffered reads to examine metadata.  There is no
413 	 * coordination between xfs_db and udev, which means that they can run
414 	 * concurrently.  Note there is no coordination between the kernel and
415 	 * blkid either.
416 	 *
417 	 * On a system with 64k pages, the page cache can cache the superblock
418 	 * and the root inode (and hence the root directory) with the same 64k
419 	 * page.  If udev spawns blkid after the mkfs and the system is busy
420 	 * enough that it is still running when xfs_db starts up, they'll both
421 	 * read from the same page in the pagecache.
422 	 *
423 	 * The unmount writes updated inode metadata to disk directly.  The XFS
424 	 * buffer cache does not use the bdev pagecache, so it needs to
425 	 * invalidate that pagecache on unmount.  If the above scenario occurs,
426 	 * the pagecache no longer reflects what's on disk, xfs_db reads the
427 	 * stale metadata, and fails to find /a.  Most of the time this succeeds
428 	 * because closing a bdev invalidates the page cache, but when processes
429 	 * race, everyone loses.
430 	 */
431 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
432 		blkdev_issue_flush(mp->m_logdev_targp->bt_bdev);
433 		invalidate_bdev(mp->m_logdev_targp->bt_bdev);
434 	}
435 	if (mp->m_rtdev_targp) {
436 		blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev);
437 		invalidate_bdev(mp->m_rtdev_targp->bt_bdev);
438 	}
439 	blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
440 	invalidate_bdev(mp->m_ddev_targp->bt_bdev);
441 }
442 
443 /*
444  * The file system configurations are:
445  *	(1) device (partition) with data and internal log
446  *	(2) logical volume with data and log subvolumes.
447  *	(3) logical volume with data, log, and realtime subvolumes.
448  *
449  * We only have to handle opening the log and realtime volumes here if
450  * they are present.  The data subvolume has already been opened by
451  * get_sb_bdev() and is stored in sb->s_bdev.
452  */
453 STATIC int
xfs_open_devices(struct xfs_mount * mp)454 xfs_open_devices(
455 	struct xfs_mount	*mp)
456 {
457 	struct super_block	*sb = mp->m_super;
458 	struct block_device	*ddev = sb->s_bdev;
459 	struct file		*logdev_file = NULL, *rtdev_file = NULL;
460 	int			error;
461 
462 	/*
463 	 * Open real time and log devices - order is important.
464 	 */
465 	if (mp->m_logname) {
466 		error = xfs_blkdev_get(mp, mp->m_logname, &logdev_file);
467 		if (error)
468 			return error;
469 	}
470 
471 	if (mp->m_rtname) {
472 		error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev_file);
473 		if (error)
474 			goto out_close_logdev;
475 
476 		if (file_bdev(rtdev_file) == ddev ||
477 		    (logdev_file &&
478 		     file_bdev(rtdev_file) == file_bdev(logdev_file))) {
479 			xfs_warn(mp,
480 	"Cannot mount filesystem with identical rtdev and ddev/logdev.");
481 			error = -EINVAL;
482 			goto out_close_rtdev;
483 		}
484 	}
485 
486 	/*
487 	 * Setup xfs_mount buffer target pointers
488 	 */
489 	mp->m_ddev_targp = xfs_alloc_buftarg(mp, sb->s_bdev_file);
490 	if (IS_ERR(mp->m_ddev_targp)) {
491 		error = PTR_ERR(mp->m_ddev_targp);
492 		mp->m_ddev_targp = NULL;
493 		goto out_close_rtdev;
494 	}
495 
496 	if (rtdev_file) {
497 		mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev_file);
498 		if (IS_ERR(mp->m_rtdev_targp)) {
499 			error = PTR_ERR(mp->m_rtdev_targp);
500 			mp->m_rtdev_targp = NULL;
501 			goto out_free_ddev_targ;
502 		}
503 	}
504 
505 	if (logdev_file && file_bdev(logdev_file) != ddev) {
506 		mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev_file);
507 		if (IS_ERR(mp->m_logdev_targp)) {
508 			error = PTR_ERR(mp->m_logdev_targp);
509 			mp->m_logdev_targp = NULL;
510 			goto out_free_rtdev_targ;
511 		}
512 	} else {
513 		mp->m_logdev_targp = mp->m_ddev_targp;
514 		/* Handle won't be used, drop it */
515 		if (logdev_file)
516 			bdev_fput(logdev_file);
517 	}
518 
519 	return 0;
520 
521  out_free_rtdev_targ:
522 	if (mp->m_rtdev_targp)
523 		xfs_free_buftarg(mp->m_rtdev_targp);
524  out_free_ddev_targ:
525 	xfs_free_buftarg(mp->m_ddev_targp);
526  out_close_rtdev:
527 	 if (rtdev_file)
528 		bdev_fput(rtdev_file);
529  out_close_logdev:
530 	if (logdev_file)
531 		bdev_fput(logdev_file);
532 	return error;
533 }
534 
535 /*
536  * Setup xfs_mount buffer target pointers based on superblock
537  */
538 STATIC int
xfs_setup_devices(struct xfs_mount * mp)539 xfs_setup_devices(
540 	struct xfs_mount	*mp)
541 {
542 	int			error;
543 
544 	error = xfs_configure_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
545 	if (error)
546 		return error;
547 
548 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
549 		unsigned int	log_sector_size = BBSIZE;
550 
551 		if (xfs_has_sector(mp))
552 			log_sector_size = mp->m_sb.sb_logsectsize;
553 		error = xfs_configure_buftarg(mp->m_logdev_targp,
554 					    log_sector_size);
555 		if (error)
556 			return error;
557 	}
558 
559 	if (mp->m_sb.sb_rtstart) {
560 		if (mp->m_rtdev_targp) {
561 			xfs_warn(mp,
562 		"can't use internal and external rtdev at the same time");
563 			return -EINVAL;
564 		}
565 		mp->m_rtdev_targp = mp->m_ddev_targp;
566 	} else if (mp->m_rtname) {
567 		error = xfs_configure_buftarg(mp->m_rtdev_targp,
568 					    mp->m_sb.sb_sectsize);
569 		if (error)
570 			return error;
571 	}
572 
573 	return 0;
574 }
575 
576 STATIC int
xfs_init_mount_workqueues(struct xfs_mount * mp)577 xfs_init_mount_workqueues(
578 	struct xfs_mount	*mp)
579 {
580 	mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
581 			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
582 			1, mp->m_super->s_id);
583 	if (!mp->m_buf_workqueue)
584 		goto out;
585 
586 	mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
587 			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
588 			0, mp->m_super->s_id);
589 	if (!mp->m_unwritten_workqueue)
590 		goto out_destroy_buf;
591 
592 	mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
593 			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
594 			0, mp->m_super->s_id);
595 	if (!mp->m_reclaim_workqueue)
596 		goto out_destroy_unwritten;
597 
598 	mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s",
599 			XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM),
600 			0, mp->m_super->s_id);
601 	if (!mp->m_blockgc_wq)
602 		goto out_destroy_reclaim;
603 
604 	mp->m_inodegc_wq = alloc_workqueue("xfs-inodegc/%s",
605 			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
606 			1, mp->m_super->s_id);
607 	if (!mp->m_inodegc_wq)
608 		goto out_destroy_blockgc;
609 
610 	mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s",
611 			XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id);
612 	if (!mp->m_sync_workqueue)
613 		goto out_destroy_inodegc;
614 
615 	return 0;
616 
617 out_destroy_inodegc:
618 	destroy_workqueue(mp->m_inodegc_wq);
619 out_destroy_blockgc:
620 	destroy_workqueue(mp->m_blockgc_wq);
621 out_destroy_reclaim:
622 	destroy_workqueue(mp->m_reclaim_workqueue);
623 out_destroy_unwritten:
624 	destroy_workqueue(mp->m_unwritten_workqueue);
625 out_destroy_buf:
626 	destroy_workqueue(mp->m_buf_workqueue);
627 out:
628 	return -ENOMEM;
629 }
630 
631 STATIC void
xfs_destroy_mount_workqueues(struct xfs_mount * mp)632 xfs_destroy_mount_workqueues(
633 	struct xfs_mount	*mp)
634 {
635 	destroy_workqueue(mp->m_sync_workqueue);
636 	destroy_workqueue(mp->m_blockgc_wq);
637 	destroy_workqueue(mp->m_inodegc_wq);
638 	destroy_workqueue(mp->m_reclaim_workqueue);
639 	destroy_workqueue(mp->m_unwritten_workqueue);
640 	destroy_workqueue(mp->m_buf_workqueue);
641 }
642 
643 static void
xfs_flush_inodes_worker(struct work_struct * work)644 xfs_flush_inodes_worker(
645 	struct work_struct	*work)
646 {
647 	struct xfs_mount	*mp = container_of(work, struct xfs_mount,
648 						   m_flush_inodes_work);
649 	struct super_block	*sb = mp->m_super;
650 
651 	if (down_read_trylock(&sb->s_umount)) {
652 		sync_inodes_sb(sb);
653 		up_read(&sb->s_umount);
654 	}
655 }
656 
657 /*
658  * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
659  * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
660  * for IO to complete so that we effectively throttle multiple callers to the
661  * rate at which IO is completing.
662  */
663 void
xfs_flush_inodes(struct xfs_mount * mp)664 xfs_flush_inodes(
665 	struct xfs_mount	*mp)
666 {
667 	/*
668 	 * If flush_work() returns true then that means we waited for a flush
669 	 * which was already in progress.  Don't bother running another scan.
670 	 */
671 	if (flush_work(&mp->m_flush_inodes_work))
672 		return;
673 
674 	queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work);
675 	flush_work(&mp->m_flush_inodes_work);
676 }
677 
678 /* Catch misguided souls that try to use this interface on XFS */
679 STATIC struct inode *
xfs_fs_alloc_inode(struct super_block * sb)680 xfs_fs_alloc_inode(
681 	struct super_block	*sb)
682 {
683 	BUG();
684 	return NULL;
685 }
686 
687 /*
688  * Now that the generic code is guaranteed not to be accessing
689  * the linux inode, we can inactivate and reclaim the inode.
690  */
691 STATIC void
xfs_fs_destroy_inode(struct inode * inode)692 xfs_fs_destroy_inode(
693 	struct inode		*inode)
694 {
695 	struct xfs_inode	*ip = XFS_I(inode);
696 
697 	trace_xfs_destroy_inode(ip);
698 
699 	ASSERT(!rwsem_is_locked(&inode->i_rwsem));
700 	XFS_STATS_INC(ip->i_mount, vn_rele);
701 	XFS_STATS_INC(ip->i_mount, vn_remove);
702 	xfs_inode_mark_reclaimable(ip);
703 }
704 
705 static void
xfs_fs_dirty_inode(struct inode * inode,int flags)706 xfs_fs_dirty_inode(
707 	struct inode			*inode,
708 	int				flags)
709 {
710 	struct xfs_inode		*ip = XFS_I(inode);
711 	struct xfs_mount		*mp = ip->i_mount;
712 	struct xfs_trans		*tp;
713 
714 	if (!(inode->i_sb->s_flags & SB_LAZYTIME))
715 		return;
716 
717 	/*
718 	 * Only do the timestamp update if the inode is dirty (I_DIRTY_SYNC)
719 	 * and has dirty timestamp (I_DIRTY_TIME). I_DIRTY_TIME can be passed
720 	 * in flags possibly together with I_DIRTY_SYNC.
721 	 */
722 	if ((flags & ~I_DIRTY_TIME) != I_DIRTY_SYNC || !(flags & I_DIRTY_TIME))
723 		return;
724 
725 	if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
726 		return;
727 	xfs_ilock(ip, XFS_ILOCK_EXCL);
728 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
729 	xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
730 	xfs_trans_commit(tp);
731 }
732 
733 /*
734  * Slab object creation initialisation for the XFS inode.
735  * This covers only the idempotent fields in the XFS inode;
736  * all other fields need to be initialised on allocation
737  * from the slab. This avoids the need to repeatedly initialise
738  * fields in the xfs inode that left in the initialise state
739  * when freeing the inode.
740  */
741 STATIC void
xfs_fs_inode_init_once(void * inode)742 xfs_fs_inode_init_once(
743 	void			*inode)
744 {
745 	struct xfs_inode	*ip = inode;
746 
747 	memset(ip, 0, sizeof(struct xfs_inode));
748 
749 	/* vfs inode */
750 	inode_init_once(VFS_I(ip));
751 
752 	/* xfs inode */
753 	atomic_set(&ip->i_pincount, 0);
754 	spin_lock_init(&ip->i_flags_lock);
755 	init_rwsem(&ip->i_lock);
756 }
757 
758 /*
759  * We do an unlocked check for XFS_IDONTCACHE here because we are already
760  * serialised against cache hits here via the inode->i_lock and igrab() in
761  * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
762  * racing with us, and it avoids needing to grab a spinlock here for every inode
763  * we drop the final reference on.
764  */
765 STATIC int
xfs_fs_drop_inode(struct inode * inode)766 xfs_fs_drop_inode(
767 	struct inode		*inode)
768 {
769 	struct xfs_inode	*ip = XFS_I(inode);
770 
771 	/*
772 	 * If this unlinked inode is in the middle of recovery, don't
773 	 * drop the inode just yet; log recovery will take care of
774 	 * that.  See the comment for this inode flag.
775 	 */
776 	if (ip->i_flags & XFS_IRECOVERY) {
777 		ASSERT(xlog_recovery_needed(ip->i_mount->m_log));
778 		return 0;
779 	}
780 
781 	return generic_drop_inode(inode);
782 }
783 
784 STATIC void
xfs_fs_evict_inode(struct inode * inode)785 xfs_fs_evict_inode(
786 	struct inode		*inode)
787 {
788 	if (IS_DAX(inode))
789 		dax_break_layout_final(inode);
790 
791 	truncate_inode_pages_final(&inode->i_data);
792 	clear_inode(inode);
793 }
794 
795 static void
xfs_mount_free(struct xfs_mount * mp)796 xfs_mount_free(
797 	struct xfs_mount	*mp)
798 {
799 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
800 		xfs_free_buftarg(mp->m_logdev_targp);
801 	if (mp->m_rtdev_targp && mp->m_rtdev_targp != mp->m_ddev_targp)
802 		xfs_free_buftarg(mp->m_rtdev_targp);
803 	if (mp->m_ddev_targp)
804 		xfs_free_buftarg(mp->m_ddev_targp);
805 
806 	debugfs_remove(mp->m_debugfs);
807 	kfree(mp->m_rtname);
808 	kfree(mp->m_logname);
809 	kfree(mp);
810 }
811 
812 STATIC int
xfs_fs_sync_fs(struct super_block * sb,int wait)813 xfs_fs_sync_fs(
814 	struct super_block	*sb,
815 	int			wait)
816 {
817 	struct xfs_mount	*mp = XFS_M(sb);
818 	int			error;
819 
820 	trace_xfs_fs_sync_fs(mp, __return_address);
821 
822 	/*
823 	 * Doing anything during the async pass would be counterproductive.
824 	 */
825 	if (!wait)
826 		return 0;
827 
828 	error = xfs_log_force(mp, XFS_LOG_SYNC);
829 	if (error)
830 		return error;
831 
832 	if (laptop_mode) {
833 		/*
834 		 * The disk must be active because we're syncing.
835 		 * We schedule log work now (now that the disk is
836 		 * active) instead of later (when it might not be).
837 		 */
838 		flush_delayed_work(&mp->m_log->l_work);
839 	}
840 
841 	/*
842 	 * If we are called with page faults frozen out, it means we are about
843 	 * to freeze the transaction subsystem. Take the opportunity to shut
844 	 * down inodegc because once SB_FREEZE_FS is set it's too late to
845 	 * prevent inactivation races with freeze. The fs doesn't get called
846 	 * again by the freezing process until after SB_FREEZE_FS has been set,
847 	 * so it's now or never.  Same logic applies to speculative allocation
848 	 * garbage collection.
849 	 *
850 	 * We don't care if this is a normal syncfs call that does this or
851 	 * freeze that does this - we can run this multiple times without issue
852 	 * and we won't race with a restart because a restart can only occur
853 	 * when the state is either SB_FREEZE_FS or SB_FREEZE_COMPLETE.
854 	 */
855 	if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT) {
856 		xfs_inodegc_stop(mp);
857 		xfs_blockgc_stop(mp);
858 		xfs_zone_gc_stop(mp);
859 	}
860 
861 	return 0;
862 }
863 
864 static xfs_extlen_t
xfs_internal_log_size(struct xfs_mount * mp)865 xfs_internal_log_size(
866 	struct xfs_mount	*mp)
867 {
868 	if (!mp->m_sb.sb_logstart)
869 		return 0;
870 	return mp->m_sb.sb_logblocks;
871 }
872 
873 static void
xfs_statfs_data(struct xfs_mount * mp,struct kstatfs * st)874 xfs_statfs_data(
875 	struct xfs_mount	*mp,
876 	struct kstatfs		*st)
877 {
878 	int64_t			fdblocks =
879 		xfs_sum_freecounter(mp, XC_FREE_BLOCKS);
880 
881 	/* make sure st->f_bfree does not underflow */
882 	st->f_bfree = max(0LL,
883 		fdblocks - xfs_freecounter_unavailable(mp, XC_FREE_BLOCKS));
884 
885 	/*
886 	 * sb_dblocks can change during growfs, but nothing cares about reporting
887 	 * the old or new value during growfs.
888 	 */
889 	st->f_blocks = mp->m_sb.sb_dblocks - xfs_internal_log_size(mp);
890 }
891 
892 /*
893  * When stat(v)fs is called on a file with the realtime bit set or a directory
894  * with the rtinherit bit, report freespace information for the RT device
895  * instead of the main data device.
896  */
897 static void
xfs_statfs_rt(struct xfs_mount * mp,struct kstatfs * st)898 xfs_statfs_rt(
899 	struct xfs_mount	*mp,
900 	struct kstatfs		*st)
901 {
902 	st->f_bfree = xfs_rtbxlen_to_blen(mp,
903 			xfs_sum_freecounter(mp, XC_FREE_RTEXTENTS));
904 	st->f_blocks = mp->m_sb.sb_rblocks - xfs_rtbxlen_to_blen(mp,
905 			mp->m_free[XC_FREE_RTEXTENTS].res_total);
906 }
907 
908 static void
xfs_statfs_inodes(struct xfs_mount * mp,struct kstatfs * st)909 xfs_statfs_inodes(
910 	struct xfs_mount	*mp,
911 	struct kstatfs		*st)
912 {
913 	uint64_t		icount = percpu_counter_sum(&mp->m_icount);
914 	uint64_t		ifree = percpu_counter_sum(&mp->m_ifree);
915 	uint64_t		fakeinos = XFS_FSB_TO_INO(mp, st->f_bfree);
916 
917 	st->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
918 	if (M_IGEO(mp)->maxicount)
919 		st->f_files = min_t(typeof(st->f_files), st->f_files,
920 					M_IGEO(mp)->maxicount);
921 
922 	/* If sb_icount overshot maxicount, report actual allocation */
923 	st->f_files = max_t(typeof(st->f_files), st->f_files,
924 			mp->m_sb.sb_icount);
925 
926 	/* Make sure st->f_ffree does not underflow */
927 	st->f_ffree = max_t(int64_t, 0, st->f_files - (icount - ifree));
928 }
929 
930 STATIC int
xfs_fs_statfs(struct dentry * dentry,struct kstatfs * st)931 xfs_fs_statfs(
932 	struct dentry		*dentry,
933 	struct kstatfs		*st)
934 {
935 	struct xfs_mount	*mp = XFS_M(dentry->d_sb);
936 	struct xfs_inode	*ip = XFS_I(d_inode(dentry));
937 
938 	/*
939 	 * Expedite background inodegc but don't wait. We do not want to block
940 	 * here waiting hours for a billion extent file to be truncated.
941 	 */
942 	xfs_inodegc_push(mp);
943 
944 	st->f_type = XFS_SUPER_MAGIC;
945 	st->f_namelen = MAXNAMELEN - 1;
946 	st->f_bsize = mp->m_sb.sb_blocksize;
947 	st->f_fsid = u64_to_fsid(huge_encode_dev(mp->m_ddev_targp->bt_dev));
948 
949 	xfs_statfs_data(mp, st);
950 	xfs_statfs_inodes(mp, st);
951 
952 	if (XFS_IS_REALTIME_MOUNT(mp) &&
953 	    (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME)))
954 		xfs_statfs_rt(mp, st);
955 
956 	if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
957 	    ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
958 			      (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
959 		xfs_qm_statvfs(ip, st);
960 
961 	/*
962 	 * XFS does not distinguish between blocks available to privileged and
963 	 * unprivileged users.
964 	 */
965 	st->f_bavail = st->f_bfree;
966 	return 0;
967 }
968 
969 STATIC void
xfs_save_resvblks(struct xfs_mount * mp)970 xfs_save_resvblks(
971 	struct xfs_mount	*mp)
972 {
973 	enum xfs_free_counter	i;
974 
975 	for (i = 0; i < XC_FREE_NR; i++) {
976 		mp->m_free[i].res_saved = mp->m_free[i].res_total;
977 		xfs_reserve_blocks(mp, i, 0);
978 	}
979 }
980 
981 STATIC void
xfs_restore_resvblks(struct xfs_mount * mp)982 xfs_restore_resvblks(
983 	struct xfs_mount	*mp)
984 {
985 	uint64_t		resblks;
986 	enum xfs_free_counter	i;
987 
988 	for (i = 0; i < XC_FREE_NR; i++) {
989 		if (mp->m_free[i].res_saved) {
990 			resblks = mp->m_free[i].res_saved;
991 			mp->m_free[i].res_saved = 0;
992 		} else
993 			resblks = xfs_default_resblks(mp, i);
994 		xfs_reserve_blocks(mp, i, resblks);
995 	}
996 }
997 
998 /*
999  * Second stage of a freeze. The data is already frozen so we only
1000  * need to take care of the metadata. Once that's done sync the superblock
1001  * to the log to dirty it in case of a crash while frozen. This ensures that we
1002  * will recover the unlinked inode lists on the next mount.
1003  */
1004 STATIC int
xfs_fs_freeze(struct super_block * sb)1005 xfs_fs_freeze(
1006 	struct super_block	*sb)
1007 {
1008 	struct xfs_mount	*mp = XFS_M(sb);
1009 	unsigned int		flags;
1010 	int			ret;
1011 
1012 	/*
1013 	 * The filesystem is now frozen far enough that memory reclaim
1014 	 * cannot safely operate on the filesystem. Hence we need to
1015 	 * set a GFP_NOFS context here to avoid recursion deadlocks.
1016 	 */
1017 	flags = memalloc_nofs_save();
1018 	xfs_save_resvblks(mp);
1019 	ret = xfs_log_quiesce(mp);
1020 	memalloc_nofs_restore(flags);
1021 
1022 	/*
1023 	 * For read-write filesystems, we need to restart the inodegc on error
1024 	 * because we stopped it at SB_FREEZE_PAGEFAULT level and a thaw is not
1025 	 * going to be run to restart it now.  We are at SB_FREEZE_FS level
1026 	 * here, so we can restart safely without racing with a stop in
1027 	 * xfs_fs_sync_fs().
1028 	 */
1029 	if (ret && !xfs_is_readonly(mp)) {
1030 		xfs_blockgc_start(mp);
1031 		xfs_inodegc_start(mp);
1032 		xfs_zone_gc_start(mp);
1033 	}
1034 
1035 	return ret;
1036 }
1037 
1038 STATIC int
xfs_fs_unfreeze(struct super_block * sb)1039 xfs_fs_unfreeze(
1040 	struct super_block	*sb)
1041 {
1042 	struct xfs_mount	*mp = XFS_M(sb);
1043 
1044 	xfs_restore_resvblks(mp);
1045 	xfs_log_work_queue(mp);
1046 
1047 	/*
1048 	 * Don't reactivate the inodegc worker on a readonly filesystem because
1049 	 * inodes are sent directly to reclaim.  Don't reactivate the blockgc
1050 	 * worker because there are no speculative preallocations on a readonly
1051 	 * filesystem.
1052 	 */
1053 	if (!xfs_is_readonly(mp)) {
1054 		xfs_zone_gc_start(mp);
1055 		xfs_blockgc_start(mp);
1056 		xfs_inodegc_start(mp);
1057 	}
1058 
1059 	return 0;
1060 }
1061 
1062 /*
1063  * This function fills in xfs_mount_t fields based on mount args.
1064  * Note: the superblock _has_ now been read in.
1065  */
1066 STATIC int
xfs_finish_flags(struct xfs_mount * mp)1067 xfs_finish_flags(
1068 	struct xfs_mount	*mp)
1069 {
1070 	/* Fail a mount where the logbuf is smaller than the log stripe */
1071 	if (xfs_has_logv2(mp)) {
1072 		if (mp->m_logbsize <= 0 &&
1073 		    mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
1074 			mp->m_logbsize = mp->m_sb.sb_logsunit;
1075 		} else if (mp->m_logbsize > 0 &&
1076 			   mp->m_logbsize < mp->m_sb.sb_logsunit) {
1077 			xfs_warn(mp,
1078 		"logbuf size must be greater than or equal to log stripe size");
1079 			return -EINVAL;
1080 		}
1081 	} else {
1082 		/* Fail a mount if the logbuf is larger than 32K */
1083 		if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
1084 			xfs_warn(mp,
1085 		"logbuf size for version 1 logs must be 16K or 32K");
1086 			return -EINVAL;
1087 		}
1088 	}
1089 
1090 	/*
1091 	 * V5 filesystems always use attr2 format for attributes.
1092 	 */
1093 	if (xfs_has_crc(mp) && xfs_has_noattr2(mp)) {
1094 		xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
1095 			     "attr2 is always enabled for V5 filesystems.");
1096 		return -EINVAL;
1097 	}
1098 
1099 	/*
1100 	 * prohibit r/w mounts of read-only filesystems
1101 	 */
1102 	if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !xfs_is_readonly(mp)) {
1103 		xfs_warn(mp,
1104 			"cannot mount a read-only filesystem as read-write");
1105 		return -EROFS;
1106 	}
1107 
1108 	if ((mp->m_qflags & XFS_GQUOTA_ACCT) &&
1109 	    (mp->m_qflags & XFS_PQUOTA_ACCT) &&
1110 	    !xfs_has_pquotino(mp)) {
1111 		xfs_warn(mp,
1112 		  "Super block does not support project and group quota together");
1113 		return -EINVAL;
1114 	}
1115 
1116 	if (!xfs_has_zoned(mp)) {
1117 		if (mp->m_max_open_zones) {
1118 			xfs_warn(mp,
1119 "max_open_zones mount option only supported on zoned file systems.");
1120 			return -EINVAL;
1121 		}
1122 		if (mp->m_features & XFS_FEAT_NOLIFETIME) {
1123 			xfs_warn(mp,
1124 "nolifetime mount option only supported on zoned file systems.");
1125 			return -EINVAL;
1126 		}
1127 	}
1128 
1129 	return 0;
1130 }
1131 
1132 static int
xfs_init_percpu_counters(struct xfs_mount * mp)1133 xfs_init_percpu_counters(
1134 	struct xfs_mount	*mp)
1135 {
1136 	int			error;
1137 	int			i;
1138 
1139 	error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
1140 	if (error)
1141 		return -ENOMEM;
1142 
1143 	error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
1144 	if (error)
1145 		goto free_icount;
1146 
1147 	error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
1148 	if (error)
1149 		goto free_ifree;
1150 
1151 	error = percpu_counter_init(&mp->m_delalloc_rtextents, 0, GFP_KERNEL);
1152 	if (error)
1153 		goto free_delalloc;
1154 
1155 	for (i = 0; i < XC_FREE_NR; i++) {
1156 		error = percpu_counter_init(&mp->m_free[i].count, 0,
1157 				GFP_KERNEL);
1158 		if (error)
1159 			goto free_freecounters;
1160 	}
1161 
1162 	return 0;
1163 
1164 free_freecounters:
1165 	while (--i >= 0)
1166 		percpu_counter_destroy(&mp->m_free[i].count);
1167 	percpu_counter_destroy(&mp->m_delalloc_rtextents);
1168 free_delalloc:
1169 	percpu_counter_destroy(&mp->m_delalloc_blks);
1170 free_ifree:
1171 	percpu_counter_destroy(&mp->m_ifree);
1172 free_icount:
1173 	percpu_counter_destroy(&mp->m_icount);
1174 	return -ENOMEM;
1175 }
1176 
1177 void
xfs_reinit_percpu_counters(struct xfs_mount * mp)1178 xfs_reinit_percpu_counters(
1179 	struct xfs_mount	*mp)
1180 {
1181 	percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1182 	percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1183 	xfs_set_freecounter(mp, XC_FREE_BLOCKS, mp->m_sb.sb_fdblocks);
1184 	if (!xfs_has_zoned(mp))
1185 		xfs_set_freecounter(mp, XC_FREE_RTEXTENTS,
1186 				mp->m_sb.sb_frextents);
1187 }
1188 
1189 static void
xfs_destroy_percpu_counters(struct xfs_mount * mp)1190 xfs_destroy_percpu_counters(
1191 	struct xfs_mount	*mp)
1192 {
1193 	enum xfs_free_counter	i;
1194 
1195 	for (i = 0; i < XC_FREE_NR; i++)
1196 		percpu_counter_destroy(&mp->m_free[i].count);
1197 	percpu_counter_destroy(&mp->m_icount);
1198 	percpu_counter_destroy(&mp->m_ifree);
1199 	ASSERT(xfs_is_shutdown(mp) ||
1200 	       percpu_counter_sum(&mp->m_delalloc_rtextents) == 0);
1201 	percpu_counter_destroy(&mp->m_delalloc_rtextents);
1202 	ASSERT(xfs_is_shutdown(mp) ||
1203 	       percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1204 	percpu_counter_destroy(&mp->m_delalloc_blks);
1205 }
1206 
1207 static int
xfs_inodegc_init_percpu(struct xfs_mount * mp)1208 xfs_inodegc_init_percpu(
1209 	struct xfs_mount	*mp)
1210 {
1211 	struct xfs_inodegc	*gc;
1212 	int			cpu;
1213 
1214 	mp->m_inodegc = alloc_percpu(struct xfs_inodegc);
1215 	if (!mp->m_inodegc)
1216 		return -ENOMEM;
1217 
1218 	for_each_possible_cpu(cpu) {
1219 		gc = per_cpu_ptr(mp->m_inodegc, cpu);
1220 		gc->cpu = cpu;
1221 		gc->mp = mp;
1222 		init_llist_head(&gc->list);
1223 		gc->items = 0;
1224 		gc->error = 0;
1225 		INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker);
1226 	}
1227 	return 0;
1228 }
1229 
1230 static void
xfs_inodegc_free_percpu(struct xfs_mount * mp)1231 xfs_inodegc_free_percpu(
1232 	struct xfs_mount	*mp)
1233 {
1234 	if (!mp->m_inodegc)
1235 		return;
1236 	free_percpu(mp->m_inodegc);
1237 }
1238 
1239 static void
xfs_fs_put_super(struct super_block * sb)1240 xfs_fs_put_super(
1241 	struct super_block	*sb)
1242 {
1243 	struct xfs_mount	*mp = XFS_M(sb);
1244 
1245 	xfs_notice(mp, "Unmounting Filesystem %pU", &mp->m_sb.sb_uuid);
1246 	xfs_filestream_unmount(mp);
1247 	xfs_unmountfs(mp);
1248 
1249 	xfs_rtmount_freesb(mp);
1250 	xfs_freesb(mp);
1251 	xchk_mount_stats_free(mp);
1252 	free_percpu(mp->m_stats.xs_stats);
1253 	xfs_inodegc_free_percpu(mp);
1254 	xfs_destroy_percpu_counters(mp);
1255 	xfs_destroy_mount_workqueues(mp);
1256 	xfs_shutdown_devices(mp);
1257 }
1258 
1259 static long
xfs_fs_nr_cached_objects(struct super_block * sb,struct shrink_control * sc)1260 xfs_fs_nr_cached_objects(
1261 	struct super_block	*sb,
1262 	struct shrink_control	*sc)
1263 {
1264 	/* Paranoia: catch incorrect calls during mount setup or teardown */
1265 	if (WARN_ON_ONCE(!sb->s_fs_info))
1266 		return 0;
1267 	return xfs_reclaim_inodes_count(XFS_M(sb));
1268 }
1269 
1270 static long
xfs_fs_free_cached_objects(struct super_block * sb,struct shrink_control * sc)1271 xfs_fs_free_cached_objects(
1272 	struct super_block	*sb,
1273 	struct shrink_control	*sc)
1274 {
1275 	return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1276 }
1277 
1278 static void
xfs_fs_shutdown(struct super_block * sb)1279 xfs_fs_shutdown(
1280 	struct super_block	*sb)
1281 {
1282 	xfs_force_shutdown(XFS_M(sb), SHUTDOWN_DEVICE_REMOVED);
1283 }
1284 
1285 static int
xfs_fs_show_stats(struct seq_file * m,struct dentry * root)1286 xfs_fs_show_stats(
1287 	struct seq_file		*m,
1288 	struct dentry		*root)
1289 {
1290 	struct xfs_mount	*mp = XFS_M(root->d_sb);
1291 
1292 	if (xfs_has_zoned(mp) && IS_ENABLED(CONFIG_XFS_RT))
1293 		xfs_zoned_show_stats(m, mp);
1294 	return 0;
1295 }
1296 
1297 static const struct super_operations xfs_super_operations = {
1298 	.alloc_inode		= xfs_fs_alloc_inode,
1299 	.destroy_inode		= xfs_fs_destroy_inode,
1300 	.dirty_inode		= xfs_fs_dirty_inode,
1301 	.drop_inode		= xfs_fs_drop_inode,
1302 	.evict_inode		= xfs_fs_evict_inode,
1303 	.put_super		= xfs_fs_put_super,
1304 	.sync_fs		= xfs_fs_sync_fs,
1305 	.freeze_fs		= xfs_fs_freeze,
1306 	.unfreeze_fs		= xfs_fs_unfreeze,
1307 	.statfs			= xfs_fs_statfs,
1308 	.show_options		= xfs_fs_show_options,
1309 	.nr_cached_objects	= xfs_fs_nr_cached_objects,
1310 	.free_cached_objects	= xfs_fs_free_cached_objects,
1311 	.shutdown		= xfs_fs_shutdown,
1312 	.show_stats		= xfs_fs_show_stats,
1313 };
1314 
1315 static int
suffix_kstrtoint(const char * s,unsigned int base,int * res)1316 suffix_kstrtoint(
1317 	const char	*s,
1318 	unsigned int	base,
1319 	int		*res)
1320 {
1321 	int		last, shift_left_factor = 0, _res;
1322 	char		*value;
1323 	int		ret = 0;
1324 
1325 	value = kstrdup(s, GFP_KERNEL);
1326 	if (!value)
1327 		return -ENOMEM;
1328 
1329 	last = strlen(value) - 1;
1330 	if (value[last] == 'K' || value[last] == 'k') {
1331 		shift_left_factor = 10;
1332 		value[last] = '\0';
1333 	}
1334 	if (value[last] == 'M' || value[last] == 'm') {
1335 		shift_left_factor = 20;
1336 		value[last] = '\0';
1337 	}
1338 	if (value[last] == 'G' || value[last] == 'g') {
1339 		shift_left_factor = 30;
1340 		value[last] = '\0';
1341 	}
1342 
1343 	if (kstrtoint(value, base, &_res))
1344 		ret = -EINVAL;
1345 	kfree(value);
1346 	*res = _res << shift_left_factor;
1347 	return ret;
1348 }
1349 
1350 static int
suffix_kstrtoull(const char * s,unsigned int base,unsigned long long * res)1351 suffix_kstrtoull(
1352 	const char		*s,
1353 	unsigned int		base,
1354 	unsigned long long	*res)
1355 {
1356 	int			last, shift_left_factor = 0;
1357 	unsigned long long	_res;
1358 	char			*value;
1359 	int			ret = 0;
1360 
1361 	value = kstrdup(s, GFP_KERNEL);
1362 	if (!value)
1363 		return -ENOMEM;
1364 
1365 	last = strlen(value) - 1;
1366 	if (value[last] == 'K' || value[last] == 'k') {
1367 		shift_left_factor = 10;
1368 		value[last] = '\0';
1369 	}
1370 	if (value[last] == 'M' || value[last] == 'm') {
1371 		shift_left_factor = 20;
1372 		value[last] = '\0';
1373 	}
1374 	if (value[last] == 'G' || value[last] == 'g') {
1375 		shift_left_factor = 30;
1376 		value[last] = '\0';
1377 	}
1378 
1379 	if (kstrtoull(value, base, &_res))
1380 		ret = -EINVAL;
1381 	kfree(value);
1382 	*res = _res << shift_left_factor;
1383 	return ret;
1384 }
1385 
1386 static inline void
xfs_fs_warn_deprecated(struct fs_context * fc,struct fs_parameter * param,uint64_t flag,bool value)1387 xfs_fs_warn_deprecated(
1388 	struct fs_context	*fc,
1389 	struct fs_parameter	*param,
1390 	uint64_t		flag,
1391 	bool			value)
1392 {
1393 	/* Don't print the warning if reconfiguring and current mount point
1394 	 * already had the flag set
1395 	 */
1396 	if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) &&
1397             !!(XFS_M(fc->root->d_sb)->m_features & flag) == value)
1398 		return;
1399 	xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key);
1400 }
1401 
1402 /*
1403  * Set mount state from a mount option.
1404  *
1405  * NOTE: mp->m_super is NULL here!
1406  */
1407 static int
xfs_fs_parse_param(struct fs_context * fc,struct fs_parameter * param)1408 xfs_fs_parse_param(
1409 	struct fs_context	*fc,
1410 	struct fs_parameter	*param)
1411 {
1412 	struct xfs_mount	*parsing_mp = fc->s_fs_info;
1413 	struct fs_parse_result	result;
1414 	int			size = 0;
1415 	int			opt;
1416 
1417 	BUILD_BUG_ON(XFS_QFLAGS_MNTOPTS & XFS_MOUNT_QUOTA_ALL);
1418 
1419 	opt = fs_parse(fc, xfs_fs_parameters, param, &result);
1420 	if (opt < 0)
1421 		return opt;
1422 
1423 	switch (opt) {
1424 	case Opt_logbufs:
1425 		parsing_mp->m_logbufs = result.uint_32;
1426 		return 0;
1427 	case Opt_logbsize:
1428 		if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize))
1429 			return -EINVAL;
1430 		return 0;
1431 	case Opt_logdev:
1432 		kfree(parsing_mp->m_logname);
1433 		parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL);
1434 		if (!parsing_mp->m_logname)
1435 			return -ENOMEM;
1436 		return 0;
1437 	case Opt_rtdev:
1438 		kfree(parsing_mp->m_rtname);
1439 		parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
1440 		if (!parsing_mp->m_rtname)
1441 			return -ENOMEM;
1442 		return 0;
1443 	case Opt_allocsize:
1444 		if (suffix_kstrtoint(param->string, 10, &size))
1445 			return -EINVAL;
1446 		parsing_mp->m_allocsize_log = ffs(size) - 1;
1447 		parsing_mp->m_features |= XFS_FEAT_ALLOCSIZE;
1448 		return 0;
1449 	case Opt_grpid:
1450 	case Opt_bsdgroups:
1451 		parsing_mp->m_features |= XFS_FEAT_GRPID;
1452 		return 0;
1453 	case Opt_nogrpid:
1454 	case Opt_sysvgroups:
1455 		parsing_mp->m_features &= ~XFS_FEAT_GRPID;
1456 		return 0;
1457 	case Opt_wsync:
1458 		parsing_mp->m_features |= XFS_FEAT_WSYNC;
1459 		return 0;
1460 	case Opt_norecovery:
1461 		parsing_mp->m_features |= XFS_FEAT_NORECOVERY;
1462 		return 0;
1463 	case Opt_noalign:
1464 		parsing_mp->m_features |= XFS_FEAT_NOALIGN;
1465 		return 0;
1466 	case Opt_swalloc:
1467 		parsing_mp->m_features |= XFS_FEAT_SWALLOC;
1468 		return 0;
1469 	case Opt_sunit:
1470 		parsing_mp->m_dalign = result.uint_32;
1471 		return 0;
1472 	case Opt_swidth:
1473 		parsing_mp->m_swidth = result.uint_32;
1474 		return 0;
1475 	case Opt_inode32:
1476 		parsing_mp->m_features |= XFS_FEAT_SMALL_INUMS;
1477 		return 0;
1478 	case Opt_inode64:
1479 		parsing_mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
1480 		return 0;
1481 	case Opt_nouuid:
1482 		parsing_mp->m_features |= XFS_FEAT_NOUUID;
1483 		return 0;
1484 	case Opt_largeio:
1485 		parsing_mp->m_features |= XFS_FEAT_LARGE_IOSIZE;
1486 		return 0;
1487 	case Opt_nolargeio:
1488 		parsing_mp->m_features &= ~XFS_FEAT_LARGE_IOSIZE;
1489 		return 0;
1490 	case Opt_filestreams:
1491 		parsing_mp->m_features |= XFS_FEAT_FILESTREAMS;
1492 		return 0;
1493 	case Opt_noquota:
1494 		parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
1495 		parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
1496 		parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS;
1497 		return 0;
1498 	case Opt_quota:
1499 	case Opt_uquota:
1500 	case Opt_usrquota:
1501 		parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD);
1502 		parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS;
1503 		return 0;
1504 	case Opt_qnoenforce:
1505 	case Opt_uqnoenforce:
1506 		parsing_mp->m_qflags |= XFS_UQUOTA_ACCT;
1507 		parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD;
1508 		parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS;
1509 		return 0;
1510 	case Opt_pquota:
1511 	case Opt_prjquota:
1512 		parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD);
1513 		parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS;
1514 		return 0;
1515 	case Opt_pqnoenforce:
1516 		parsing_mp->m_qflags |= XFS_PQUOTA_ACCT;
1517 		parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD;
1518 		parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS;
1519 		return 0;
1520 	case Opt_gquota:
1521 	case Opt_grpquota:
1522 		parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD);
1523 		parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS;
1524 		return 0;
1525 	case Opt_gqnoenforce:
1526 		parsing_mp->m_qflags |= XFS_GQUOTA_ACCT;
1527 		parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD;
1528 		parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS;
1529 		return 0;
1530 	case Opt_discard:
1531 		parsing_mp->m_features |= XFS_FEAT_DISCARD;
1532 		return 0;
1533 	case Opt_nodiscard:
1534 		parsing_mp->m_features &= ~XFS_FEAT_DISCARD;
1535 		return 0;
1536 #ifdef CONFIG_FS_DAX
1537 	case Opt_dax:
1538 		xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS);
1539 		return 0;
1540 	case Opt_dax_enum:
1541 		xfs_mount_set_dax_mode(parsing_mp, result.uint_32);
1542 		return 0;
1543 #endif
1544 	/* Following mount options will be removed in September 2025 */
1545 	case Opt_ikeep:
1546 		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, true);
1547 		parsing_mp->m_features |= XFS_FEAT_IKEEP;
1548 		return 0;
1549 	case Opt_noikeep:
1550 		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, false);
1551 		parsing_mp->m_features &= ~XFS_FEAT_IKEEP;
1552 		return 0;
1553 	case Opt_attr2:
1554 		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_ATTR2, true);
1555 		parsing_mp->m_features |= XFS_FEAT_ATTR2;
1556 		return 0;
1557 	case Opt_noattr2:
1558 		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_NOATTR2, true);
1559 		parsing_mp->m_features |= XFS_FEAT_NOATTR2;
1560 		return 0;
1561 	case Opt_max_open_zones:
1562 		parsing_mp->m_max_open_zones = result.uint_32;
1563 		return 0;
1564 	case Opt_lifetime:
1565 		parsing_mp->m_features &= ~XFS_FEAT_NOLIFETIME;
1566 		return 0;
1567 	case Opt_nolifetime:
1568 		parsing_mp->m_features |= XFS_FEAT_NOLIFETIME;
1569 		return 0;
1570 	case Opt_max_atomic_write:
1571 		if (suffix_kstrtoull(param->string, 10,
1572 				     &parsing_mp->m_awu_max_bytes)) {
1573 			xfs_warn(parsing_mp,
1574  "max atomic write size must be positive integer");
1575 			return -EINVAL;
1576 		}
1577 		return 0;
1578 	default:
1579 		xfs_warn(parsing_mp, "unknown mount option [%s].", param->key);
1580 		return -EINVAL;
1581 	}
1582 
1583 	return 0;
1584 }
1585 
1586 static int
xfs_fs_validate_params(struct xfs_mount * mp)1587 xfs_fs_validate_params(
1588 	struct xfs_mount	*mp)
1589 {
1590 	/* No recovery flag requires a read-only mount */
1591 	if (xfs_has_norecovery(mp) && !xfs_is_readonly(mp)) {
1592 		xfs_warn(mp, "no-recovery mounts must be read-only.");
1593 		return -EINVAL;
1594 	}
1595 
1596 	/*
1597 	 * We have not read the superblock at this point, so only the attr2
1598 	 * mount option can set the attr2 feature by this stage.
1599 	 */
1600 	if (xfs_has_attr2(mp) && xfs_has_noattr2(mp)) {
1601 		xfs_warn(mp, "attr2 and noattr2 cannot both be specified.");
1602 		return -EINVAL;
1603 	}
1604 
1605 
1606 	if (xfs_has_noalign(mp) && (mp->m_dalign || mp->m_swidth)) {
1607 		xfs_warn(mp,
1608 	"sunit and swidth options incompatible with the noalign option");
1609 		return -EINVAL;
1610 	}
1611 
1612 	if (!IS_ENABLED(CONFIG_XFS_QUOTA) &&
1613 	    (mp->m_qflags & ~XFS_QFLAGS_MNTOPTS)) {
1614 		xfs_warn(mp, "quota support not available in this kernel.");
1615 		return -EINVAL;
1616 	}
1617 
1618 	if ((mp->m_dalign && !mp->m_swidth) ||
1619 	    (!mp->m_dalign && mp->m_swidth)) {
1620 		xfs_warn(mp, "sunit and swidth must be specified together");
1621 		return -EINVAL;
1622 	}
1623 
1624 	if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
1625 		xfs_warn(mp,
1626 	"stripe width (%d) must be a multiple of the stripe unit (%d)",
1627 			mp->m_swidth, mp->m_dalign);
1628 		return -EINVAL;
1629 	}
1630 
1631 	if (mp->m_logbufs != -1 &&
1632 	    mp->m_logbufs != 0 &&
1633 	    (mp->m_logbufs < XLOG_MIN_ICLOGS ||
1634 	     mp->m_logbufs > XLOG_MAX_ICLOGS)) {
1635 		xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
1636 			mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
1637 		return -EINVAL;
1638 	}
1639 
1640 	if (mp->m_logbsize != -1 &&
1641 	    mp->m_logbsize !=  0 &&
1642 	    (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
1643 	     mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
1644 	     !is_power_of_2(mp->m_logbsize))) {
1645 		xfs_warn(mp,
1646 			"invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
1647 			mp->m_logbsize);
1648 		return -EINVAL;
1649 	}
1650 
1651 	if (xfs_has_allocsize(mp) &&
1652 	    (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
1653 	     mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
1654 		xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
1655 			mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
1656 		return -EINVAL;
1657 	}
1658 
1659 	return 0;
1660 }
1661 
1662 struct dentry *
xfs_debugfs_mkdir(const char * name,struct dentry * parent)1663 xfs_debugfs_mkdir(
1664 	const char	*name,
1665 	struct dentry	*parent)
1666 {
1667 	struct dentry	*child;
1668 
1669 	/* Apparently we're expected to ignore error returns?? */
1670 	child = debugfs_create_dir(name, parent);
1671 	if (IS_ERR(child))
1672 		return NULL;
1673 
1674 	return child;
1675 }
1676 
1677 static int
xfs_fs_fill_super(struct super_block * sb,struct fs_context * fc)1678 xfs_fs_fill_super(
1679 	struct super_block	*sb,
1680 	struct fs_context	*fc)
1681 {
1682 	struct xfs_mount	*mp = sb->s_fs_info;
1683 	struct inode		*root;
1684 	int			flags = 0, error;
1685 
1686 	mp->m_super = sb;
1687 
1688 	/*
1689 	 * Copy VFS mount flags from the context now that all parameter parsing
1690 	 * is guaranteed to have been completed by either the old mount API or
1691 	 * the newer fsopen/fsconfig API.
1692 	 */
1693 	if (fc->sb_flags & SB_RDONLY)
1694 		xfs_set_readonly(mp);
1695 	if (fc->sb_flags & SB_DIRSYNC)
1696 		mp->m_features |= XFS_FEAT_DIRSYNC;
1697 	if (fc->sb_flags & SB_SYNCHRONOUS)
1698 		mp->m_features |= XFS_FEAT_WSYNC;
1699 
1700 	error = xfs_fs_validate_params(mp);
1701 	if (error)
1702 		return error;
1703 
1704 	sb_min_blocksize(sb, BBSIZE);
1705 	sb->s_xattr = xfs_xattr_handlers;
1706 	sb->s_export_op = &xfs_export_operations;
1707 #ifdef CONFIG_XFS_QUOTA
1708 	sb->s_qcop = &xfs_quotactl_operations;
1709 	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1710 #endif
1711 	sb->s_op = &xfs_super_operations;
1712 
1713 	/*
1714 	 * Delay mount work if the debug hook is set. This is debug
1715 	 * instrumention to coordinate simulation of xfs mount failures with
1716 	 * VFS superblock operations
1717 	 */
1718 	if (xfs_globals.mount_delay) {
1719 		xfs_notice(mp, "Delaying mount for %d seconds.",
1720 			xfs_globals.mount_delay);
1721 		msleep(xfs_globals.mount_delay * 1000);
1722 	}
1723 
1724 	if (fc->sb_flags & SB_SILENT)
1725 		flags |= XFS_MFSI_QUIET;
1726 
1727 	error = xfs_open_devices(mp);
1728 	if (error)
1729 		return error;
1730 
1731 	if (xfs_debugfs) {
1732 		mp->m_debugfs = xfs_debugfs_mkdir(mp->m_super->s_id,
1733 						  xfs_debugfs);
1734 	} else {
1735 		mp->m_debugfs = NULL;
1736 	}
1737 
1738 	error = xfs_init_mount_workqueues(mp);
1739 	if (error)
1740 		goto out_shutdown_devices;
1741 
1742 	error = xfs_init_percpu_counters(mp);
1743 	if (error)
1744 		goto out_destroy_workqueues;
1745 
1746 	error = xfs_inodegc_init_percpu(mp);
1747 	if (error)
1748 		goto out_destroy_counters;
1749 
1750 	/* Allocate stats memory before we do operations that might use it */
1751 	mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1752 	if (!mp->m_stats.xs_stats) {
1753 		error = -ENOMEM;
1754 		goto out_destroy_inodegc;
1755 	}
1756 
1757 	error = xchk_mount_stats_alloc(mp);
1758 	if (error)
1759 		goto out_free_stats;
1760 
1761 	error = xfs_readsb(mp, flags);
1762 	if (error)
1763 		goto out_free_scrub_stats;
1764 
1765 	error = xfs_finish_flags(mp);
1766 	if (error)
1767 		goto out_free_sb;
1768 
1769 	error = xfs_setup_devices(mp);
1770 	if (error)
1771 		goto out_free_sb;
1772 
1773 	/*
1774 	 * V4 support is undergoing deprecation.
1775 	 *
1776 	 * Note: this has to use an open coded m_features check as xfs_has_crc
1777 	 * always returns false for !CONFIG_XFS_SUPPORT_V4.
1778 	 */
1779 	if (!(mp->m_features & XFS_FEAT_CRC)) {
1780 		if (!IS_ENABLED(CONFIG_XFS_SUPPORT_V4)) {
1781 			xfs_warn(mp,
1782 	"Deprecated V4 format (crc=0) not supported by kernel.");
1783 			error = -EINVAL;
1784 			goto out_free_sb;
1785 		}
1786 		xfs_warn_once(mp,
1787 	"Deprecated V4 format (crc=0) will not be supported after September 2030.");
1788 	}
1789 
1790 	/* ASCII case insensitivity is undergoing deprecation. */
1791 	if (xfs_has_asciici(mp)) {
1792 #ifdef CONFIG_XFS_SUPPORT_ASCII_CI
1793 		xfs_warn_once(mp,
1794 	"Deprecated ASCII case-insensitivity feature (ascii-ci=1) will not be supported after September 2030.");
1795 #else
1796 		xfs_warn(mp,
1797 	"Deprecated ASCII case-insensitivity feature (ascii-ci=1) not supported by kernel.");
1798 		error = -EINVAL;
1799 		goto out_free_sb;
1800 #endif
1801 	}
1802 
1803 	/*
1804 	 * Filesystem claims it needs repair, so refuse the mount unless
1805 	 * norecovery is also specified, in which case the filesystem can
1806 	 * be mounted with no risk of further damage.
1807 	 */
1808 	if (xfs_has_needsrepair(mp) && !xfs_has_norecovery(mp)) {
1809 		xfs_warn(mp, "Filesystem needs repair.  Please run xfs_repair.");
1810 		error = -EFSCORRUPTED;
1811 		goto out_free_sb;
1812 	}
1813 
1814 	/*
1815 	 * Don't touch the filesystem if a user tool thinks it owns the primary
1816 	 * superblock.  mkfs doesn't clear the flag from secondary supers, so
1817 	 * we don't check them at all.
1818 	 */
1819 	if (mp->m_sb.sb_inprogress) {
1820 		xfs_warn(mp, "Offline file system operation in progress!");
1821 		error = -EFSCORRUPTED;
1822 		goto out_free_sb;
1823 	}
1824 
1825 	if (mp->m_sb.sb_blocksize > PAGE_SIZE) {
1826 		size_t max_folio_size = mapping_max_folio_size_supported();
1827 
1828 		if (!xfs_has_crc(mp)) {
1829 			xfs_warn(mp,
1830 "V4 Filesystem with blocksize %d bytes. Only pagesize (%ld) or less is supported.",
1831 				mp->m_sb.sb_blocksize, PAGE_SIZE);
1832 			error = -ENOSYS;
1833 			goto out_free_sb;
1834 		}
1835 
1836 		if (mp->m_sb.sb_blocksize > max_folio_size) {
1837 			xfs_warn(mp,
1838 "block size (%u bytes) not supported; Only block size (%zu) or less is supported",
1839 				mp->m_sb.sb_blocksize, max_folio_size);
1840 			error = -ENOSYS;
1841 			goto out_free_sb;
1842 		}
1843 
1844 		xfs_warn_experimental(mp, XFS_EXPERIMENTAL_LBS);
1845 	}
1846 
1847 	/* Ensure this filesystem fits in the page cache limits */
1848 	if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) ||
1849 	    xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) {
1850 		xfs_warn(mp,
1851 		"file system too large to be mounted on this system.");
1852 		error = -EFBIG;
1853 		goto out_free_sb;
1854 	}
1855 
1856 	/*
1857 	 * XFS block mappings use 54 bits to store the logical block offset.
1858 	 * This should suffice to handle the maximum file size that the VFS
1859 	 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
1860 	 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
1861 	 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
1862 	 * to check this assertion.
1863 	 *
1864 	 * Avoid integer overflow by comparing the maximum bmbt offset to the
1865 	 * maximum pagecache offset in units of fs blocks.
1866 	 */
1867 	if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) {
1868 		xfs_warn(mp,
1869 "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
1870 			 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
1871 			 XFS_MAX_FILEOFF);
1872 		error = -EINVAL;
1873 		goto out_free_sb;
1874 	}
1875 
1876 	error = xfs_rtmount_readsb(mp);
1877 	if (error)
1878 		goto out_free_sb;
1879 
1880 	error = xfs_filestream_mount(mp);
1881 	if (error)
1882 		goto out_free_rtsb;
1883 
1884 	/*
1885 	 * we must configure the block size in the superblock before we run the
1886 	 * full mount process as the mount process can lookup and cache inodes.
1887 	 */
1888 	sb->s_magic = XFS_SUPER_MAGIC;
1889 	sb->s_blocksize = mp->m_sb.sb_blocksize;
1890 	sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1891 	sb->s_maxbytes = MAX_LFS_FILESIZE;
1892 	sb->s_max_links = XFS_MAXLINK;
1893 	sb->s_time_gran = 1;
1894 	if (xfs_has_bigtime(mp)) {
1895 		sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN);
1896 		sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX);
1897 	} else {
1898 		sb->s_time_min = XFS_LEGACY_TIME_MIN;
1899 		sb->s_time_max = XFS_LEGACY_TIME_MAX;
1900 	}
1901 	trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max);
1902 	sb->s_iflags |= SB_I_CGROUPWB | SB_I_ALLOW_HSM;
1903 
1904 	set_posix_acl_flag(sb);
1905 
1906 	/* version 5 superblocks support inode version counters. */
1907 	if (xfs_has_crc(mp))
1908 		sb->s_flags |= SB_I_VERSION;
1909 
1910 	if (xfs_has_dax_always(mp)) {
1911 		error = xfs_setup_dax_always(mp);
1912 		if (error)
1913 			goto out_filestream_unmount;
1914 	}
1915 
1916 	if (xfs_has_discard(mp) && !bdev_max_discard_sectors(sb->s_bdev)) {
1917 		xfs_warn(mp,
1918 	"mounting with \"discard\" option, but the device does not support discard");
1919 		mp->m_features &= ~XFS_FEAT_DISCARD;
1920 	}
1921 
1922 	if (xfs_has_zoned(mp)) {
1923 		if (!xfs_has_metadir(mp)) {
1924 			xfs_alert(mp,
1925 		"metadir feature required for zoned realtime devices.");
1926 			error = -EINVAL;
1927 			goto out_filestream_unmount;
1928 		}
1929 		xfs_warn_experimental(mp, XFS_EXPERIMENTAL_ZONED);
1930 	} else if (xfs_has_metadir(mp)) {
1931 		xfs_warn_experimental(mp, XFS_EXPERIMENTAL_METADIR);
1932 	}
1933 
1934 	if (xfs_has_reflink(mp)) {
1935 		if (xfs_has_realtime(mp) &&
1936 		    !xfs_reflink_supports_rextsize(mp, mp->m_sb.sb_rextsize)) {
1937 			xfs_alert(mp,
1938 	"reflink not compatible with realtime extent size %u!",
1939 					mp->m_sb.sb_rextsize);
1940 			error = -EINVAL;
1941 			goto out_filestream_unmount;
1942 		}
1943 
1944 		if (xfs_has_zoned(mp)) {
1945 			xfs_alert(mp,
1946 	"reflink not compatible with zoned RT device!");
1947 			error = -EINVAL;
1948 			goto out_filestream_unmount;
1949 		}
1950 
1951 		if (xfs_globals.always_cow) {
1952 			xfs_info(mp, "using DEBUG-only always_cow mode.");
1953 			mp->m_always_cow = true;
1954 		}
1955 	}
1956 
1957 	/*
1958 	 * If no quota mount options were provided, maybe we'll try to pick
1959 	 * up the quota accounting and enforcement flags from the ondisk sb.
1960 	 */
1961 	if (!(mp->m_qflags & XFS_QFLAGS_MNTOPTS))
1962 		xfs_set_resuming_quotaon(mp);
1963 	mp->m_qflags &= ~XFS_QFLAGS_MNTOPTS;
1964 
1965 	error = xfs_mountfs(mp);
1966 	if (error)
1967 		goto out_filestream_unmount;
1968 
1969 	root = igrab(VFS_I(mp->m_rootip));
1970 	if (!root) {
1971 		error = -ENOENT;
1972 		goto out_unmount;
1973 	}
1974 	sb->s_root = d_make_root(root);
1975 	if (!sb->s_root) {
1976 		error = -ENOMEM;
1977 		goto out_unmount;
1978 	}
1979 
1980 	return 0;
1981 
1982  out_filestream_unmount:
1983 	xfs_filestream_unmount(mp);
1984  out_free_rtsb:
1985 	xfs_rtmount_freesb(mp);
1986  out_free_sb:
1987 	xfs_freesb(mp);
1988  out_free_scrub_stats:
1989 	xchk_mount_stats_free(mp);
1990  out_free_stats:
1991 	free_percpu(mp->m_stats.xs_stats);
1992  out_destroy_inodegc:
1993 	xfs_inodegc_free_percpu(mp);
1994  out_destroy_counters:
1995 	xfs_destroy_percpu_counters(mp);
1996  out_destroy_workqueues:
1997 	xfs_destroy_mount_workqueues(mp);
1998  out_shutdown_devices:
1999 	xfs_shutdown_devices(mp);
2000 	return error;
2001 
2002  out_unmount:
2003 	xfs_filestream_unmount(mp);
2004 	xfs_unmountfs(mp);
2005 	goto out_free_rtsb;
2006 }
2007 
2008 static int
xfs_fs_get_tree(struct fs_context * fc)2009 xfs_fs_get_tree(
2010 	struct fs_context	*fc)
2011 {
2012 	return get_tree_bdev(fc, xfs_fs_fill_super);
2013 }
2014 
2015 static int
xfs_remount_rw(struct xfs_mount * mp)2016 xfs_remount_rw(
2017 	struct xfs_mount	*mp)
2018 {
2019 	struct xfs_sb		*sbp = &mp->m_sb;
2020 	int error;
2021 
2022 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp &&
2023 	    bdev_read_only(mp->m_logdev_targp->bt_bdev)) {
2024 		xfs_warn(mp,
2025 			"ro->rw transition prohibited by read-only logdev");
2026 		return -EACCES;
2027 	}
2028 
2029 	if (mp->m_rtdev_targp &&
2030 	    bdev_read_only(mp->m_rtdev_targp->bt_bdev)) {
2031 		xfs_warn(mp,
2032 			"ro->rw transition prohibited by read-only rtdev");
2033 		return -EACCES;
2034 	}
2035 
2036 	if (xfs_has_norecovery(mp)) {
2037 		xfs_warn(mp,
2038 			"ro->rw transition prohibited on norecovery mount");
2039 		return -EINVAL;
2040 	}
2041 
2042 	if (xfs_sb_is_v5(sbp) &&
2043 	    xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
2044 		xfs_warn(mp,
2045 	"ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
2046 			(sbp->sb_features_ro_compat &
2047 				XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
2048 		return -EINVAL;
2049 	}
2050 
2051 	xfs_clear_readonly(mp);
2052 
2053 	/*
2054 	 * If this is the first remount to writeable state we might have some
2055 	 * superblock changes to update.
2056 	 */
2057 	if (mp->m_update_sb) {
2058 		error = xfs_sync_sb(mp, false);
2059 		if (error) {
2060 			xfs_warn(mp, "failed to write sb changes");
2061 			return error;
2062 		}
2063 		mp->m_update_sb = false;
2064 	}
2065 
2066 	/*
2067 	 * Fill out the reserve pool if it is empty. Use the stashed value if
2068 	 * it is non-zero, otherwise go with the default.
2069 	 */
2070 	xfs_restore_resvblks(mp);
2071 	xfs_log_work_queue(mp);
2072 	xfs_blockgc_start(mp);
2073 
2074 	/* Create the per-AG metadata reservation pool .*/
2075 	error = xfs_fs_reserve_ag_blocks(mp);
2076 	if (error && error != -ENOSPC)
2077 		return error;
2078 
2079 	/* Re-enable the background inode inactivation worker. */
2080 	xfs_inodegc_start(mp);
2081 
2082 	/* Restart zone reclaim */
2083 	xfs_zone_gc_start(mp);
2084 
2085 	return 0;
2086 }
2087 
2088 static int
xfs_remount_ro(struct xfs_mount * mp)2089 xfs_remount_ro(
2090 	struct xfs_mount	*mp)
2091 {
2092 	struct xfs_icwalk	icw = {
2093 		.icw_flags	= XFS_ICWALK_FLAG_SYNC,
2094 	};
2095 	int			error;
2096 
2097 	/* Flush all the dirty data to disk. */
2098 	error = sync_filesystem(mp->m_super);
2099 	if (error)
2100 		return error;
2101 
2102 	/*
2103 	 * Cancel background eofb scanning so it cannot race with the final
2104 	 * log force+buftarg wait and deadlock the remount.
2105 	 */
2106 	xfs_blockgc_stop(mp);
2107 
2108 	/*
2109 	 * Clear out all remaining COW staging extents and speculative post-EOF
2110 	 * preallocations so that we don't leave inodes requiring inactivation
2111 	 * cleanups during reclaim on a read-only mount.  We must process every
2112 	 * cached inode, so this requires a synchronous cache scan.
2113 	 */
2114 	error = xfs_blockgc_free_space(mp, &icw);
2115 	if (error) {
2116 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
2117 		return error;
2118 	}
2119 
2120 	/*
2121 	 * Stop the inodegc background worker.  xfs_fs_reconfigure already
2122 	 * flushed all pending inodegc work when it sync'd the filesystem.
2123 	 * The VFS holds s_umount, so we know that inodes cannot enter
2124 	 * xfs_fs_destroy_inode during a remount operation.  In readonly mode
2125 	 * we send inodes straight to reclaim, so no inodes will be queued.
2126 	 */
2127 	xfs_inodegc_stop(mp);
2128 
2129 	/* Stop zone reclaim */
2130 	xfs_zone_gc_stop(mp);
2131 
2132 	/* Free the per-AG metadata reservation pool. */
2133 	xfs_fs_unreserve_ag_blocks(mp);
2134 
2135 	/*
2136 	 * Before we sync the metadata, we need to free up the reserve block
2137 	 * pool so that the used block count in the superblock on disk is
2138 	 * correct at the end of the remount. Stash the current* reserve pool
2139 	 * size so that if we get remounted rw, we can return it to the same
2140 	 * size.
2141 	 */
2142 	xfs_save_resvblks(mp);
2143 
2144 	xfs_log_clean(mp);
2145 	xfs_set_readonly(mp);
2146 
2147 	return 0;
2148 }
2149 
2150 /*
2151  * Logically we would return an error here to prevent users from believing
2152  * they might have changed mount options using remount which can't be changed.
2153  *
2154  * But unfortunately mount(8) adds all options from mtab and fstab to the mount
2155  * arguments in some cases so we can't blindly reject options, but have to
2156  * check for each specified option if it actually differs from the currently
2157  * set option and only reject it if that's the case.
2158  *
2159  * Until that is implemented we return success for every remount request, and
2160  * silently ignore all options that we can't actually change.
2161  */
2162 static int
xfs_fs_reconfigure(struct fs_context * fc)2163 xfs_fs_reconfigure(
2164 	struct fs_context *fc)
2165 {
2166 	struct xfs_mount	*mp = XFS_M(fc->root->d_sb);
2167 	struct xfs_mount        *new_mp = fc->s_fs_info;
2168 	int			flags = fc->sb_flags;
2169 	int			error;
2170 
2171 	new_mp->m_qflags &= ~XFS_QFLAGS_MNTOPTS;
2172 
2173 	/* version 5 superblocks always support version counters. */
2174 	if (xfs_has_crc(mp))
2175 		fc->sb_flags |= SB_I_VERSION;
2176 
2177 	error = xfs_fs_validate_params(new_mp);
2178 	if (error)
2179 		return error;
2180 
2181 	/* attr2 -> noattr2 */
2182 	if (xfs_has_noattr2(new_mp)) {
2183 		if (xfs_has_crc(mp)) {
2184 			xfs_warn(mp,
2185 			"attr2 is always enabled for a V5 filesystem - can't be changed.");
2186 			return -EINVAL;
2187 		}
2188 		mp->m_features &= ~XFS_FEAT_ATTR2;
2189 		mp->m_features |= XFS_FEAT_NOATTR2;
2190 	} else if (xfs_has_attr2(new_mp)) {
2191 		/* noattr2 -> attr2 */
2192 		mp->m_features &= ~XFS_FEAT_NOATTR2;
2193 		mp->m_features |= XFS_FEAT_ATTR2;
2194 	}
2195 
2196 	/* Validate new max_atomic_write option before making other changes */
2197 	if (mp->m_awu_max_bytes != new_mp->m_awu_max_bytes) {
2198 		error = xfs_set_max_atomic_write_opt(mp,
2199 				new_mp->m_awu_max_bytes);
2200 		if (error)
2201 			return error;
2202 	}
2203 
2204 	/* inode32 -> inode64 */
2205 	if (xfs_has_small_inums(mp) && !xfs_has_small_inums(new_mp)) {
2206 		mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
2207 		mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
2208 	}
2209 
2210 	/* inode64 -> inode32 */
2211 	if (!xfs_has_small_inums(mp) && xfs_has_small_inums(new_mp)) {
2212 		mp->m_features |= XFS_FEAT_SMALL_INUMS;
2213 		mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
2214 	}
2215 
2216 	/*
2217 	 * Now that mp has been modified according to the remount options, we
2218 	 * do a final option validation with xfs_finish_flags() just like it is
2219 	 * just like it is done during mount. We cannot use
2220 	 * done during mount. We cannot use xfs_finish_flags() on new_mp as it
2221 	 * contains only the user given options.
2222 	 */
2223 	error = xfs_finish_flags(mp);
2224 	if (error)
2225 		return error;
2226 
2227 	/* ro -> rw */
2228 	if (xfs_is_readonly(mp) && !(flags & SB_RDONLY)) {
2229 		error = xfs_remount_rw(mp);
2230 		if (error)
2231 			return error;
2232 	}
2233 
2234 	/* rw -> ro */
2235 	if (!xfs_is_readonly(mp) && (flags & SB_RDONLY)) {
2236 		error = xfs_remount_ro(mp);
2237 		if (error)
2238 			return error;
2239 	}
2240 
2241 	return 0;
2242 }
2243 
2244 static void
xfs_fs_free(struct fs_context * fc)2245 xfs_fs_free(
2246 	struct fs_context	*fc)
2247 {
2248 	struct xfs_mount	*mp = fc->s_fs_info;
2249 
2250 	/*
2251 	 * mp is stored in the fs_context when it is initialized.
2252 	 * mp is transferred to the superblock on a successful mount,
2253 	 * but if an error occurs before the transfer we have to free
2254 	 * it here.
2255 	 */
2256 	if (mp)
2257 		xfs_mount_free(mp);
2258 }
2259 
2260 static const struct fs_context_operations xfs_context_ops = {
2261 	.parse_param = xfs_fs_parse_param,
2262 	.get_tree    = xfs_fs_get_tree,
2263 	.reconfigure = xfs_fs_reconfigure,
2264 	.free        = xfs_fs_free,
2265 };
2266 
2267 /*
2268  * WARNING: do not initialise any parameters in this function that depend on
2269  * mount option parsing having already been performed as this can be called from
2270  * fsopen() before any parameters have been set.
2271  */
2272 static int
xfs_init_fs_context(struct fs_context * fc)2273 xfs_init_fs_context(
2274 	struct fs_context	*fc)
2275 {
2276 	struct xfs_mount	*mp;
2277 	int			i;
2278 
2279 	mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL | __GFP_NOFAIL);
2280 	if (!mp)
2281 		return -ENOMEM;
2282 
2283 	spin_lock_init(&mp->m_sb_lock);
2284 	for (i = 0; i < XG_TYPE_MAX; i++)
2285 		xa_init(&mp->m_groups[i].xa);
2286 	mutex_init(&mp->m_growlock);
2287 	mutex_init(&mp->m_metafile_resv_lock);
2288 	INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
2289 	INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
2290 	mp->m_kobj.kobject.kset = xfs_kset;
2291 	/*
2292 	 * We don't create the finobt per-ag space reservation until after log
2293 	 * recovery, so we must set this to true so that an ifree transaction
2294 	 * started during log recovery will not depend on space reservations
2295 	 * for finobt expansion.
2296 	 */
2297 	mp->m_finobt_nores = true;
2298 
2299 	/*
2300 	 * These can be overridden by the mount option parsing.
2301 	 */
2302 	mp->m_logbufs = -1;
2303 	mp->m_logbsize = -1;
2304 	mp->m_allocsize_log = 16; /* 64k */
2305 
2306 	xfs_hooks_init(&mp->m_dir_update_hooks);
2307 
2308 	fc->s_fs_info = mp;
2309 	fc->ops = &xfs_context_ops;
2310 
2311 	return 0;
2312 }
2313 
2314 static void
xfs_kill_sb(struct super_block * sb)2315 xfs_kill_sb(
2316 	struct super_block		*sb)
2317 {
2318 	kill_block_super(sb);
2319 	xfs_mount_free(XFS_M(sb));
2320 }
2321 
2322 static struct file_system_type xfs_fs_type = {
2323 	.owner			= THIS_MODULE,
2324 	.name			= "xfs",
2325 	.init_fs_context	= xfs_init_fs_context,
2326 	.parameters		= xfs_fs_parameters,
2327 	.kill_sb		= xfs_kill_sb,
2328 	.fs_flags		= FS_REQUIRES_DEV | FS_ALLOW_IDMAP | FS_MGTIME |
2329 				  FS_LBS,
2330 };
2331 MODULE_ALIAS_FS("xfs");
2332 
2333 STATIC int __init
xfs_init_caches(void)2334 xfs_init_caches(void)
2335 {
2336 	int		error;
2337 
2338 	xfs_buf_cache = kmem_cache_create("xfs_buf", sizeof(struct xfs_buf), 0,
2339 					 SLAB_HWCACHE_ALIGN |
2340 					 SLAB_RECLAIM_ACCOUNT,
2341 					 NULL);
2342 	if (!xfs_buf_cache)
2343 		goto out;
2344 
2345 	xfs_log_ticket_cache = kmem_cache_create("xfs_log_ticket",
2346 						sizeof(struct xlog_ticket),
2347 						0, 0, NULL);
2348 	if (!xfs_log_ticket_cache)
2349 		goto out_destroy_buf_cache;
2350 
2351 	error = xfs_btree_init_cur_caches();
2352 	if (error)
2353 		goto out_destroy_log_ticket_cache;
2354 
2355 	error = rcbagbt_init_cur_cache();
2356 	if (error)
2357 		goto out_destroy_btree_cur_cache;
2358 
2359 	error = xfs_defer_init_item_caches();
2360 	if (error)
2361 		goto out_destroy_rcbagbt_cur_cache;
2362 
2363 	xfs_da_state_cache = kmem_cache_create("xfs_da_state",
2364 					      sizeof(struct xfs_da_state),
2365 					      0, 0, NULL);
2366 	if (!xfs_da_state_cache)
2367 		goto out_destroy_defer_item_cache;
2368 
2369 	xfs_ifork_cache = kmem_cache_create("xfs_ifork",
2370 					   sizeof(struct xfs_ifork),
2371 					   0, 0, NULL);
2372 	if (!xfs_ifork_cache)
2373 		goto out_destroy_da_state_cache;
2374 
2375 	xfs_trans_cache = kmem_cache_create("xfs_trans",
2376 					   sizeof(struct xfs_trans),
2377 					   0, 0, NULL);
2378 	if (!xfs_trans_cache)
2379 		goto out_destroy_ifork_cache;
2380 
2381 
2382 	/*
2383 	 * The size of the cache-allocated buf log item is the maximum
2384 	 * size possible under XFS.  This wastes a little bit of memory,
2385 	 * but it is much faster.
2386 	 */
2387 	xfs_buf_item_cache = kmem_cache_create("xfs_buf_item",
2388 					      sizeof(struct xfs_buf_log_item),
2389 					      0, 0, NULL);
2390 	if (!xfs_buf_item_cache)
2391 		goto out_destroy_trans_cache;
2392 
2393 	xfs_efd_cache = kmem_cache_create("xfs_efd_item",
2394 			xfs_efd_log_item_sizeof(XFS_EFD_MAX_FAST_EXTENTS),
2395 			0, 0, NULL);
2396 	if (!xfs_efd_cache)
2397 		goto out_destroy_buf_item_cache;
2398 
2399 	xfs_efi_cache = kmem_cache_create("xfs_efi_item",
2400 			xfs_efi_log_item_sizeof(XFS_EFI_MAX_FAST_EXTENTS),
2401 			0, 0, NULL);
2402 	if (!xfs_efi_cache)
2403 		goto out_destroy_efd_cache;
2404 
2405 	xfs_inode_cache = kmem_cache_create("xfs_inode",
2406 					   sizeof(struct xfs_inode), 0,
2407 					   (SLAB_HWCACHE_ALIGN |
2408 					    SLAB_RECLAIM_ACCOUNT |
2409 					    SLAB_ACCOUNT),
2410 					   xfs_fs_inode_init_once);
2411 	if (!xfs_inode_cache)
2412 		goto out_destroy_efi_cache;
2413 
2414 	xfs_ili_cache = kmem_cache_create("xfs_ili",
2415 					 sizeof(struct xfs_inode_log_item), 0,
2416 					 SLAB_RECLAIM_ACCOUNT,
2417 					 NULL);
2418 	if (!xfs_ili_cache)
2419 		goto out_destroy_inode_cache;
2420 
2421 	xfs_icreate_cache = kmem_cache_create("xfs_icr",
2422 					     sizeof(struct xfs_icreate_item),
2423 					     0, 0, NULL);
2424 	if (!xfs_icreate_cache)
2425 		goto out_destroy_ili_cache;
2426 
2427 	xfs_rud_cache = kmem_cache_create("xfs_rud_item",
2428 					 sizeof(struct xfs_rud_log_item),
2429 					 0, 0, NULL);
2430 	if (!xfs_rud_cache)
2431 		goto out_destroy_icreate_cache;
2432 
2433 	xfs_rui_cache = kmem_cache_create("xfs_rui_item",
2434 			xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
2435 			0, 0, NULL);
2436 	if (!xfs_rui_cache)
2437 		goto out_destroy_rud_cache;
2438 
2439 	xfs_cud_cache = kmem_cache_create("xfs_cud_item",
2440 					 sizeof(struct xfs_cud_log_item),
2441 					 0, 0, NULL);
2442 	if (!xfs_cud_cache)
2443 		goto out_destroy_rui_cache;
2444 
2445 	xfs_cui_cache = kmem_cache_create("xfs_cui_item",
2446 			xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
2447 			0, 0, NULL);
2448 	if (!xfs_cui_cache)
2449 		goto out_destroy_cud_cache;
2450 
2451 	xfs_bud_cache = kmem_cache_create("xfs_bud_item",
2452 					 sizeof(struct xfs_bud_log_item),
2453 					 0, 0, NULL);
2454 	if (!xfs_bud_cache)
2455 		goto out_destroy_cui_cache;
2456 
2457 	xfs_bui_cache = kmem_cache_create("xfs_bui_item",
2458 			xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
2459 			0, 0, NULL);
2460 	if (!xfs_bui_cache)
2461 		goto out_destroy_bud_cache;
2462 
2463 	xfs_attrd_cache = kmem_cache_create("xfs_attrd_item",
2464 					    sizeof(struct xfs_attrd_log_item),
2465 					    0, 0, NULL);
2466 	if (!xfs_attrd_cache)
2467 		goto out_destroy_bui_cache;
2468 
2469 	xfs_attri_cache = kmem_cache_create("xfs_attri_item",
2470 					    sizeof(struct xfs_attri_log_item),
2471 					    0, 0, NULL);
2472 	if (!xfs_attri_cache)
2473 		goto out_destroy_attrd_cache;
2474 
2475 	xfs_iunlink_cache = kmem_cache_create("xfs_iul_item",
2476 					     sizeof(struct xfs_iunlink_item),
2477 					     0, 0, NULL);
2478 	if (!xfs_iunlink_cache)
2479 		goto out_destroy_attri_cache;
2480 
2481 	xfs_xmd_cache = kmem_cache_create("xfs_xmd_item",
2482 					 sizeof(struct xfs_xmd_log_item),
2483 					 0, 0, NULL);
2484 	if (!xfs_xmd_cache)
2485 		goto out_destroy_iul_cache;
2486 
2487 	xfs_xmi_cache = kmem_cache_create("xfs_xmi_item",
2488 					 sizeof(struct xfs_xmi_log_item),
2489 					 0, 0, NULL);
2490 	if (!xfs_xmi_cache)
2491 		goto out_destroy_xmd_cache;
2492 
2493 	xfs_parent_args_cache = kmem_cache_create("xfs_parent_args",
2494 					     sizeof(struct xfs_parent_args),
2495 					     0, 0, NULL);
2496 	if (!xfs_parent_args_cache)
2497 		goto out_destroy_xmi_cache;
2498 
2499 	return 0;
2500 
2501  out_destroy_xmi_cache:
2502 	kmem_cache_destroy(xfs_xmi_cache);
2503  out_destroy_xmd_cache:
2504 	kmem_cache_destroy(xfs_xmd_cache);
2505  out_destroy_iul_cache:
2506 	kmem_cache_destroy(xfs_iunlink_cache);
2507  out_destroy_attri_cache:
2508 	kmem_cache_destroy(xfs_attri_cache);
2509  out_destroy_attrd_cache:
2510 	kmem_cache_destroy(xfs_attrd_cache);
2511  out_destroy_bui_cache:
2512 	kmem_cache_destroy(xfs_bui_cache);
2513  out_destroy_bud_cache:
2514 	kmem_cache_destroy(xfs_bud_cache);
2515  out_destroy_cui_cache:
2516 	kmem_cache_destroy(xfs_cui_cache);
2517  out_destroy_cud_cache:
2518 	kmem_cache_destroy(xfs_cud_cache);
2519  out_destroy_rui_cache:
2520 	kmem_cache_destroy(xfs_rui_cache);
2521  out_destroy_rud_cache:
2522 	kmem_cache_destroy(xfs_rud_cache);
2523  out_destroy_icreate_cache:
2524 	kmem_cache_destroy(xfs_icreate_cache);
2525  out_destroy_ili_cache:
2526 	kmem_cache_destroy(xfs_ili_cache);
2527  out_destroy_inode_cache:
2528 	kmem_cache_destroy(xfs_inode_cache);
2529  out_destroy_efi_cache:
2530 	kmem_cache_destroy(xfs_efi_cache);
2531  out_destroy_efd_cache:
2532 	kmem_cache_destroy(xfs_efd_cache);
2533  out_destroy_buf_item_cache:
2534 	kmem_cache_destroy(xfs_buf_item_cache);
2535  out_destroy_trans_cache:
2536 	kmem_cache_destroy(xfs_trans_cache);
2537  out_destroy_ifork_cache:
2538 	kmem_cache_destroy(xfs_ifork_cache);
2539  out_destroy_da_state_cache:
2540 	kmem_cache_destroy(xfs_da_state_cache);
2541  out_destroy_defer_item_cache:
2542 	xfs_defer_destroy_item_caches();
2543  out_destroy_rcbagbt_cur_cache:
2544 	rcbagbt_destroy_cur_cache();
2545  out_destroy_btree_cur_cache:
2546 	xfs_btree_destroy_cur_caches();
2547  out_destroy_log_ticket_cache:
2548 	kmem_cache_destroy(xfs_log_ticket_cache);
2549  out_destroy_buf_cache:
2550 	kmem_cache_destroy(xfs_buf_cache);
2551  out:
2552 	return -ENOMEM;
2553 }
2554 
2555 STATIC void
xfs_destroy_caches(void)2556 xfs_destroy_caches(void)
2557 {
2558 	/*
2559 	 * Make sure all delayed rcu free are flushed before we
2560 	 * destroy caches.
2561 	 */
2562 	rcu_barrier();
2563 	kmem_cache_destroy(xfs_parent_args_cache);
2564 	kmem_cache_destroy(xfs_xmd_cache);
2565 	kmem_cache_destroy(xfs_xmi_cache);
2566 	kmem_cache_destroy(xfs_iunlink_cache);
2567 	kmem_cache_destroy(xfs_attri_cache);
2568 	kmem_cache_destroy(xfs_attrd_cache);
2569 	kmem_cache_destroy(xfs_bui_cache);
2570 	kmem_cache_destroy(xfs_bud_cache);
2571 	kmem_cache_destroy(xfs_cui_cache);
2572 	kmem_cache_destroy(xfs_cud_cache);
2573 	kmem_cache_destroy(xfs_rui_cache);
2574 	kmem_cache_destroy(xfs_rud_cache);
2575 	kmem_cache_destroy(xfs_icreate_cache);
2576 	kmem_cache_destroy(xfs_ili_cache);
2577 	kmem_cache_destroy(xfs_inode_cache);
2578 	kmem_cache_destroy(xfs_efi_cache);
2579 	kmem_cache_destroy(xfs_efd_cache);
2580 	kmem_cache_destroy(xfs_buf_item_cache);
2581 	kmem_cache_destroy(xfs_trans_cache);
2582 	kmem_cache_destroy(xfs_ifork_cache);
2583 	kmem_cache_destroy(xfs_da_state_cache);
2584 	xfs_defer_destroy_item_caches();
2585 	rcbagbt_destroy_cur_cache();
2586 	xfs_btree_destroy_cur_caches();
2587 	kmem_cache_destroy(xfs_log_ticket_cache);
2588 	kmem_cache_destroy(xfs_buf_cache);
2589 }
2590 
2591 STATIC int __init
xfs_init_workqueues(void)2592 xfs_init_workqueues(void)
2593 {
2594 	/*
2595 	 * The allocation workqueue can be used in memory reclaim situations
2596 	 * (writepage path), and parallelism is only limited by the number of
2597 	 * AGs in all the filesystems mounted. Hence use the default large
2598 	 * max_active value for this workqueue.
2599 	 */
2600 	xfs_alloc_wq = alloc_workqueue("xfsalloc",
2601 			XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0);
2602 	if (!xfs_alloc_wq)
2603 		return -ENOMEM;
2604 
2605 	xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND),
2606 			0);
2607 	if (!xfs_discard_wq)
2608 		goto out_free_alloc_wq;
2609 
2610 	return 0;
2611 out_free_alloc_wq:
2612 	destroy_workqueue(xfs_alloc_wq);
2613 	return -ENOMEM;
2614 }
2615 
2616 STATIC void
xfs_destroy_workqueues(void)2617 xfs_destroy_workqueues(void)
2618 {
2619 	destroy_workqueue(xfs_discard_wq);
2620 	destroy_workqueue(xfs_alloc_wq);
2621 }
2622 
2623 STATIC int __init
init_xfs_fs(void)2624 init_xfs_fs(void)
2625 {
2626 	int			error;
2627 
2628 	xfs_check_ondisk_structs();
2629 
2630 	error = xfs_dahash_test();
2631 	if (error)
2632 		return error;
2633 
2634 	printk(KERN_INFO XFS_VERSION_STRING " with "
2635 			 XFS_BUILD_OPTIONS " enabled\n");
2636 
2637 	xfs_dir_startup();
2638 
2639 	error = xfs_init_caches();
2640 	if (error)
2641 		goto out;
2642 
2643 	error = xfs_init_workqueues();
2644 	if (error)
2645 		goto out_destroy_caches;
2646 
2647 	error = xfs_mru_cache_init();
2648 	if (error)
2649 		goto out_destroy_wq;
2650 
2651 	error = xfs_init_procfs();
2652 	if (error)
2653 		goto out_mru_cache_uninit;
2654 
2655 	error = xfs_sysctl_register();
2656 	if (error)
2657 		goto out_cleanup_procfs;
2658 
2659 	xfs_debugfs = xfs_debugfs_mkdir("xfs", NULL);
2660 
2661 	xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2662 	if (!xfs_kset) {
2663 		error = -ENOMEM;
2664 		goto out_debugfs_unregister;
2665 	}
2666 
2667 	xfsstats.xs_kobj.kobject.kset = xfs_kset;
2668 
2669 	xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2670 	if (!xfsstats.xs_stats) {
2671 		error = -ENOMEM;
2672 		goto out_kset_unregister;
2673 	}
2674 
2675 	error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2676 			       "stats");
2677 	if (error)
2678 		goto out_free_stats;
2679 
2680 	error = xchk_global_stats_setup(xfs_debugfs);
2681 	if (error)
2682 		goto out_remove_stats_kobj;
2683 
2684 #ifdef DEBUG
2685 	xfs_dbg_kobj.kobject.kset = xfs_kset;
2686 	error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2687 	if (error)
2688 		goto out_remove_scrub_stats;
2689 #endif
2690 
2691 	error = xfs_qm_init();
2692 	if (error)
2693 		goto out_remove_dbg_kobj;
2694 
2695 	error = register_filesystem(&xfs_fs_type);
2696 	if (error)
2697 		goto out_qm_exit;
2698 	return 0;
2699 
2700  out_qm_exit:
2701 	xfs_qm_exit();
2702  out_remove_dbg_kobj:
2703 #ifdef DEBUG
2704 	xfs_sysfs_del(&xfs_dbg_kobj);
2705  out_remove_scrub_stats:
2706 #endif
2707 	xchk_global_stats_teardown();
2708  out_remove_stats_kobj:
2709 	xfs_sysfs_del(&xfsstats.xs_kobj);
2710  out_free_stats:
2711 	free_percpu(xfsstats.xs_stats);
2712  out_kset_unregister:
2713 	kset_unregister(xfs_kset);
2714  out_debugfs_unregister:
2715 	debugfs_remove(xfs_debugfs);
2716 	xfs_sysctl_unregister();
2717  out_cleanup_procfs:
2718 	xfs_cleanup_procfs();
2719  out_mru_cache_uninit:
2720 	xfs_mru_cache_uninit();
2721  out_destroy_wq:
2722 	xfs_destroy_workqueues();
2723  out_destroy_caches:
2724 	xfs_destroy_caches();
2725  out:
2726 	return error;
2727 }
2728 
2729 STATIC void __exit
exit_xfs_fs(void)2730 exit_xfs_fs(void)
2731 {
2732 	xfs_qm_exit();
2733 	unregister_filesystem(&xfs_fs_type);
2734 #ifdef DEBUG
2735 	xfs_sysfs_del(&xfs_dbg_kobj);
2736 #endif
2737 	xchk_global_stats_teardown();
2738 	xfs_sysfs_del(&xfsstats.xs_kobj);
2739 	free_percpu(xfsstats.xs_stats);
2740 	kset_unregister(xfs_kset);
2741 	debugfs_remove(xfs_debugfs);
2742 	xfs_sysctl_unregister();
2743 	xfs_cleanup_procfs();
2744 	xfs_mru_cache_uninit();
2745 	xfs_destroy_workqueues();
2746 	xfs_destroy_caches();
2747 	xfs_uuid_table_free();
2748 }
2749 
2750 module_init(init_xfs_fs);
2751 module_exit(exit_xfs_fs);
2752 
2753 MODULE_AUTHOR("Silicon Graphics, Inc.");
2754 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2755 MODULE_LICENSE("GPL");
2756