xref: /linux/fs/xfs/xfs_super.c (revision 27c0b5c4f67aeb73edd515200bd1e0c82a3ee892)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 
7 #include "xfs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sb.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_btree.h"
16 #include "xfs_bmap.h"
17 #include "xfs_alloc.h"
18 #include "xfs_fsops.h"
19 #include "xfs_trans.h"
20 #include "xfs_buf_item.h"
21 #include "xfs_log.h"
22 #include "xfs_log_priv.h"
23 #include "xfs_dir2.h"
24 #include "xfs_extfree_item.h"
25 #include "xfs_mru_cache.h"
26 #include "xfs_inode_item.h"
27 #include "xfs_icache.h"
28 #include "xfs_trace.h"
29 #include "xfs_icreate_item.h"
30 #include "xfs_filestream.h"
31 #include "xfs_quota.h"
32 #include "xfs_sysfs.h"
33 #include "xfs_ondisk.h"
34 #include "xfs_rmap_item.h"
35 #include "xfs_refcount_item.h"
36 #include "xfs_bmap_item.h"
37 #include "xfs_reflink.h"
38 #include "xfs_pwork.h"
39 #include "xfs_ag.h"
40 #include "xfs_defer.h"
41 #include "xfs_attr_item.h"
42 #include "xfs_xattr.h"
43 #include "xfs_iunlink_item.h"
44 #include "xfs_dahash_test.h"
45 #include "xfs_rtbitmap.h"
46 #include "xfs_exchmaps_item.h"
47 #include "xfs_parent.h"
48 #include "xfs_rtalloc.h"
49 #include "xfs_zone_alloc.h"
50 #include "scrub/stats.h"
51 #include "scrub/rcbag_btree.h"
52 
53 #include <linux/magic.h>
54 #include <linux/fs_context.h>
55 #include <linux/fs_parser.h>
56 
57 static const struct super_operations xfs_super_operations;
58 
59 static struct dentry *xfs_debugfs;	/* top-level xfs debugfs dir */
60 static struct kset *xfs_kset;		/* top-level xfs sysfs dir */
61 #ifdef DEBUG
62 static struct xfs_kobj xfs_dbg_kobj;	/* global debug sysfs attrs */
63 #endif
64 
65 enum xfs_dax_mode {
66 	XFS_DAX_INODE = 0,
67 	XFS_DAX_ALWAYS = 1,
68 	XFS_DAX_NEVER = 2,
69 };
70 
71 /* Were quota mount options provided?  Must use the upper 16 bits of qflags. */
72 #define XFS_QFLAGS_MNTOPTS	(1U << 31)
73 
74 static void
xfs_mount_set_dax_mode(struct xfs_mount * mp,enum xfs_dax_mode mode)75 xfs_mount_set_dax_mode(
76 	struct xfs_mount	*mp,
77 	enum xfs_dax_mode	mode)
78 {
79 	switch (mode) {
80 	case XFS_DAX_INODE:
81 		mp->m_features &= ~(XFS_FEAT_DAX_ALWAYS | XFS_FEAT_DAX_NEVER);
82 		break;
83 	case XFS_DAX_ALWAYS:
84 		mp->m_features |= XFS_FEAT_DAX_ALWAYS;
85 		mp->m_features &= ~XFS_FEAT_DAX_NEVER;
86 		break;
87 	case XFS_DAX_NEVER:
88 		mp->m_features |= XFS_FEAT_DAX_NEVER;
89 		mp->m_features &= ~XFS_FEAT_DAX_ALWAYS;
90 		break;
91 	}
92 }
93 
94 static const struct constant_table dax_param_enums[] = {
95 	{"inode",	XFS_DAX_INODE },
96 	{"always",	XFS_DAX_ALWAYS },
97 	{"never",	XFS_DAX_NEVER },
98 	{}
99 };
100 
101 /*
102  * Table driven mount option parser.
103  */
104 enum {
105 	Op_deprecated, Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
106 	Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
107 	Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
108 	Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32,
109 	Opt_largeio, Opt_nolargeio,
110 	Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
111 	Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
112 	Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
113 	Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum, Opt_max_open_zones,
114 	Opt_lifetime, Opt_nolifetime, Opt_max_atomic_write,
115 };
116 
117 #define fsparam_dead(NAME) \
118 	__fsparam(NULL, (NAME), Op_deprecated, fs_param_deprecated, NULL)
119 
120 static const struct fs_parameter_spec xfs_fs_parameters[] = {
121 	/*
122 	 * These mount options were supposed to be deprecated in September 2025
123 	 * but the deprecation warning was buggy, so not all users were
124 	 * notified.  The deprecation is now obnoxiously loud and postponed to
125 	 * September 2030.
126 	 */
127 	fsparam_dead("attr2"),
128 	fsparam_dead("noattr2"),
129 	fsparam_dead("ikeep"),
130 	fsparam_dead("noikeep"),
131 
132 	fsparam_u32("logbufs",		Opt_logbufs),
133 	fsparam_string("logbsize",	Opt_logbsize),
134 	fsparam_string("logdev",	Opt_logdev),
135 	fsparam_string("rtdev",		Opt_rtdev),
136 	fsparam_flag("wsync",		Opt_wsync),
137 	fsparam_flag("noalign",		Opt_noalign),
138 	fsparam_flag("swalloc",		Opt_swalloc),
139 	fsparam_u32("sunit",		Opt_sunit),
140 	fsparam_u32("swidth",		Opt_swidth),
141 	fsparam_flag("nouuid",		Opt_nouuid),
142 	fsparam_flag("grpid",		Opt_grpid),
143 	fsparam_flag("nogrpid",		Opt_nogrpid),
144 	fsparam_flag("bsdgroups",	Opt_bsdgroups),
145 	fsparam_flag("sysvgroups",	Opt_sysvgroups),
146 	fsparam_string("allocsize",	Opt_allocsize),
147 	fsparam_flag("norecovery",	Opt_norecovery),
148 	fsparam_flag("inode64",		Opt_inode64),
149 	fsparam_flag("inode32",		Opt_inode32),
150 	fsparam_flag("largeio",		Opt_largeio),
151 	fsparam_flag("nolargeio",	Opt_nolargeio),
152 	fsparam_flag("filestreams",	Opt_filestreams),
153 	fsparam_flag("quota",		Opt_quota),
154 	fsparam_flag("noquota",		Opt_noquota),
155 	fsparam_flag("usrquota",	Opt_usrquota),
156 	fsparam_flag("grpquota",	Opt_grpquota),
157 	fsparam_flag("prjquota",	Opt_prjquota),
158 	fsparam_flag("uquota",		Opt_uquota),
159 	fsparam_flag("gquota",		Opt_gquota),
160 	fsparam_flag("pquota",		Opt_pquota),
161 	fsparam_flag("uqnoenforce",	Opt_uqnoenforce),
162 	fsparam_flag("gqnoenforce",	Opt_gqnoenforce),
163 	fsparam_flag("pqnoenforce",	Opt_pqnoenforce),
164 	fsparam_flag("qnoenforce",	Opt_qnoenforce),
165 	fsparam_flag("discard",		Opt_discard),
166 	fsparam_flag("nodiscard",	Opt_nodiscard),
167 	fsparam_flag("dax",		Opt_dax),
168 	fsparam_enum("dax",		Opt_dax_enum, dax_param_enums),
169 	fsparam_u32("max_open_zones",	Opt_max_open_zones),
170 	fsparam_flag("lifetime",	Opt_lifetime),
171 	fsparam_flag("nolifetime",	Opt_nolifetime),
172 	fsparam_string("max_atomic_write",	Opt_max_atomic_write),
173 	{}
174 };
175 
176 struct proc_xfs_info {
177 	uint64_t	flag;
178 	char		*str;
179 };
180 
181 static int
xfs_fs_show_options(struct seq_file * m,struct dentry * root)182 xfs_fs_show_options(
183 	struct seq_file		*m,
184 	struct dentry		*root)
185 {
186 	static struct proc_xfs_info xfs_info_set[] = {
187 		/* the few simple ones we can get from the mount struct */
188 		{ XFS_FEAT_WSYNC,		",wsync" },
189 		{ XFS_FEAT_NOALIGN,		",noalign" },
190 		{ XFS_FEAT_SWALLOC,		",swalloc" },
191 		{ XFS_FEAT_NOUUID,		",nouuid" },
192 		{ XFS_FEAT_NORECOVERY,		",norecovery" },
193 		{ XFS_FEAT_FILESTREAMS,		",filestreams" },
194 		{ XFS_FEAT_GRPID,		",grpid" },
195 		{ XFS_FEAT_DISCARD,		",discard" },
196 		{ XFS_FEAT_LARGE_IOSIZE,	",largeio" },
197 		{ XFS_FEAT_DAX_ALWAYS,		",dax=always" },
198 		{ XFS_FEAT_DAX_NEVER,		",dax=never" },
199 		{ XFS_FEAT_NOLIFETIME,		",nolifetime" },
200 		{ 0, NULL }
201 	};
202 	struct xfs_mount	*mp = XFS_M(root->d_sb);
203 	struct proc_xfs_info	*xfs_infop;
204 
205 	for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
206 		if (mp->m_features & xfs_infop->flag)
207 			seq_puts(m, xfs_infop->str);
208 	}
209 
210 	seq_printf(m, ",inode%d", xfs_has_small_inums(mp) ? 32 : 64);
211 
212 	if (xfs_has_allocsize(mp))
213 		seq_printf(m, ",allocsize=%dk",
214 			   (1 << mp->m_allocsize_log) >> 10);
215 
216 	if (mp->m_logbufs > 0)
217 		seq_printf(m, ",logbufs=%d", mp->m_logbufs);
218 	if (mp->m_logbsize > 0)
219 		seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
220 
221 	if (mp->m_logname)
222 		seq_show_option(m, "logdev", mp->m_logname);
223 	if (mp->m_rtname)
224 		seq_show_option(m, "rtdev", mp->m_rtname);
225 
226 	if (mp->m_dalign > 0)
227 		seq_printf(m, ",sunit=%d",
228 				(int)XFS_FSB_TO_BB(mp, mp->m_dalign));
229 	if (mp->m_swidth > 0)
230 		seq_printf(m, ",swidth=%d",
231 				(int)XFS_FSB_TO_BB(mp, mp->m_swidth));
232 
233 	if (mp->m_qflags & XFS_UQUOTA_ENFD)
234 		seq_puts(m, ",usrquota");
235 	else if (mp->m_qflags & XFS_UQUOTA_ACCT)
236 		seq_puts(m, ",uqnoenforce");
237 
238 	if (mp->m_qflags & XFS_PQUOTA_ENFD)
239 		seq_puts(m, ",prjquota");
240 	else if (mp->m_qflags & XFS_PQUOTA_ACCT)
241 		seq_puts(m, ",pqnoenforce");
242 
243 	if (mp->m_qflags & XFS_GQUOTA_ENFD)
244 		seq_puts(m, ",grpquota");
245 	else if (mp->m_qflags & XFS_GQUOTA_ACCT)
246 		seq_puts(m, ",gqnoenforce");
247 
248 	if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
249 		seq_puts(m, ",noquota");
250 
251 	if (mp->m_max_open_zones)
252 		seq_printf(m, ",max_open_zones=%u", mp->m_max_open_zones);
253 	if (mp->m_awu_max_bytes)
254 		seq_printf(m, ",max_atomic_write=%lluk",
255 				mp->m_awu_max_bytes >> 10);
256 
257 	return 0;
258 }
259 
260 static bool
xfs_set_inode_alloc_perag(struct xfs_perag * pag,xfs_ino_t ino,xfs_agnumber_t max_metadata)261 xfs_set_inode_alloc_perag(
262 	struct xfs_perag	*pag,
263 	xfs_ino_t		ino,
264 	xfs_agnumber_t		max_metadata)
265 {
266 	if (!xfs_is_inode32(pag_mount(pag))) {
267 		set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
268 		clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
269 		return false;
270 	}
271 
272 	if (ino > XFS_MAXINUMBER_32) {
273 		clear_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
274 		clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
275 		return false;
276 	}
277 
278 	set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
279 	if (pag_agno(pag) < max_metadata)
280 		set_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
281 	else
282 		clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
283 	return true;
284 }
285 
286 /*
287  * Set parameters for inode allocation heuristics, taking into account
288  * filesystem size and inode32/inode64 mount options; i.e. specifically
289  * whether or not XFS_FEAT_SMALL_INUMS is set.
290  *
291  * Inode allocation patterns are altered only if inode32 is requested
292  * (XFS_FEAT_SMALL_INUMS), and the filesystem is sufficiently large.
293  * If altered, XFS_OPSTATE_INODE32 is set as well.
294  *
295  * An agcount independent of that in the mount structure is provided
296  * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
297  * to the potentially higher ag count.
298  *
299  * Returns the maximum AG index which may contain inodes.
300  */
301 xfs_agnumber_t
xfs_set_inode_alloc(struct xfs_mount * mp,xfs_agnumber_t agcount)302 xfs_set_inode_alloc(
303 	struct xfs_mount *mp,
304 	xfs_agnumber_t	agcount)
305 {
306 	xfs_agnumber_t	index;
307 	xfs_agnumber_t	maxagi = 0;
308 	xfs_sb_t	*sbp = &mp->m_sb;
309 	xfs_agnumber_t	max_metadata;
310 	xfs_agino_t	agino;
311 	xfs_ino_t	ino;
312 
313 	/*
314 	 * Calculate how much should be reserved for inodes to meet
315 	 * the max inode percentage.  Used only for inode32.
316 	 */
317 	if (M_IGEO(mp)->maxicount) {
318 		uint64_t	icount;
319 
320 		icount = sbp->sb_dblocks * sbp->sb_imax_pct;
321 		do_div(icount, 100);
322 		icount += sbp->sb_agblocks - 1;
323 		do_div(icount, sbp->sb_agblocks);
324 		max_metadata = icount;
325 	} else {
326 		max_metadata = agcount;
327 	}
328 
329 	/* Get the last possible inode in the filesystem */
330 	agino =	XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
331 	ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
332 
333 	/*
334 	 * If user asked for no more than 32-bit inodes, and the fs is
335 	 * sufficiently large, set XFS_OPSTATE_INODE32 if we must alter
336 	 * the allocator to accommodate the request.
337 	 */
338 	if (xfs_has_small_inums(mp) && ino > XFS_MAXINUMBER_32)
339 		xfs_set_inode32(mp);
340 	else
341 		xfs_clear_inode32(mp);
342 
343 	for (index = 0; index < agcount; index++) {
344 		struct xfs_perag	*pag;
345 
346 		ino = XFS_AGINO_TO_INO(mp, index, agino);
347 
348 		pag = xfs_perag_get(mp, index);
349 		if (xfs_set_inode_alloc_perag(pag, ino, max_metadata))
350 			maxagi++;
351 		xfs_perag_put(pag);
352 	}
353 
354 	return xfs_is_inode32(mp) ? maxagi : agcount;
355 }
356 
357 static int
xfs_setup_dax_always(struct xfs_mount * mp)358 xfs_setup_dax_always(
359 	struct xfs_mount	*mp)
360 {
361 	if (!mp->m_ddev_targp->bt_daxdev &&
362 	    (!mp->m_rtdev_targp || !mp->m_rtdev_targp->bt_daxdev)) {
363 		xfs_alert(mp,
364 			"DAX unsupported by block device. Turning off DAX.");
365 		goto disable_dax;
366 	}
367 
368 	if (mp->m_super->s_blocksize != PAGE_SIZE) {
369 		xfs_alert(mp,
370 			"DAX not supported for blocksize. Turning off DAX.");
371 		goto disable_dax;
372 	}
373 
374 	if (xfs_has_reflink(mp) &&
375 	    bdev_is_partition(mp->m_ddev_targp->bt_bdev)) {
376 		xfs_alert(mp,
377 			"DAX and reflink cannot work with multi-partitions!");
378 		return -EINVAL;
379 	}
380 
381 	return 0;
382 
383 disable_dax:
384 	xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
385 	return 0;
386 }
387 
388 STATIC int
xfs_blkdev_get(xfs_mount_t * mp,const char * name,struct file ** bdev_filep)389 xfs_blkdev_get(
390 	xfs_mount_t		*mp,
391 	const char		*name,
392 	struct file		**bdev_filep)
393 {
394 	int			error = 0;
395 	blk_mode_t		mode;
396 
397 	mode = sb_open_mode(mp->m_super->s_flags);
398 	*bdev_filep = bdev_file_open_by_path(name, mode,
399 			mp->m_super, &fs_holder_ops);
400 	if (IS_ERR(*bdev_filep)) {
401 		error = PTR_ERR(*bdev_filep);
402 		*bdev_filep = NULL;
403 		xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
404 	}
405 
406 	return error;
407 }
408 
409 STATIC void
xfs_shutdown_devices(struct xfs_mount * mp)410 xfs_shutdown_devices(
411 	struct xfs_mount	*mp)
412 {
413 	/*
414 	 * Udev is triggered whenever anyone closes a block device or unmounts
415 	 * a file systemm on a block device.
416 	 * The default udev rules invoke blkid to read the fs super and create
417 	 * symlinks to the bdev under /dev/disk.  For this, it uses buffered
418 	 * reads through the page cache.
419 	 *
420 	 * xfs_db also uses buffered reads to examine metadata.  There is no
421 	 * coordination between xfs_db and udev, which means that they can run
422 	 * concurrently.  Note there is no coordination between the kernel and
423 	 * blkid either.
424 	 *
425 	 * On a system with 64k pages, the page cache can cache the superblock
426 	 * and the root inode (and hence the root directory) with the same 64k
427 	 * page.  If udev spawns blkid after the mkfs and the system is busy
428 	 * enough that it is still running when xfs_db starts up, they'll both
429 	 * read from the same page in the pagecache.
430 	 *
431 	 * The unmount writes updated inode metadata to disk directly.  The XFS
432 	 * buffer cache does not use the bdev pagecache, so it needs to
433 	 * invalidate that pagecache on unmount.  If the above scenario occurs,
434 	 * the pagecache no longer reflects what's on disk, xfs_db reads the
435 	 * stale metadata, and fails to find /a.  Most of the time this succeeds
436 	 * because closing a bdev invalidates the page cache, but when processes
437 	 * race, everyone loses.
438 	 */
439 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
440 		blkdev_issue_flush(mp->m_logdev_targp->bt_bdev);
441 		invalidate_bdev(mp->m_logdev_targp->bt_bdev);
442 	}
443 	if (mp->m_rtdev_targp) {
444 		blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev);
445 		invalidate_bdev(mp->m_rtdev_targp->bt_bdev);
446 	}
447 	blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
448 	invalidate_bdev(mp->m_ddev_targp->bt_bdev);
449 }
450 
451 /*
452  * The file system configurations are:
453  *	(1) device (partition) with data and internal log
454  *	(2) logical volume with data and log subvolumes.
455  *	(3) logical volume with data, log, and realtime subvolumes.
456  *
457  * We only have to handle opening the log and realtime volumes here if
458  * they are present.  The data subvolume has already been opened by
459  * get_sb_bdev() and is stored in sb->s_bdev.
460  */
461 STATIC int
xfs_open_devices(struct xfs_mount * mp)462 xfs_open_devices(
463 	struct xfs_mount	*mp)
464 {
465 	struct super_block	*sb = mp->m_super;
466 	struct block_device	*ddev = sb->s_bdev;
467 	struct file		*logdev_file = NULL, *rtdev_file = NULL;
468 	int			error;
469 
470 	/*
471 	 * Open real time and log devices - order is important.
472 	 */
473 	if (mp->m_logname) {
474 		error = xfs_blkdev_get(mp, mp->m_logname, &logdev_file);
475 		if (error)
476 			return error;
477 	}
478 
479 	if (mp->m_rtname) {
480 		error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev_file);
481 		if (error)
482 			goto out_close_logdev;
483 
484 		if (file_bdev(rtdev_file) == ddev ||
485 		    (logdev_file &&
486 		     file_bdev(rtdev_file) == file_bdev(logdev_file))) {
487 			xfs_warn(mp,
488 	"Cannot mount filesystem with identical rtdev and ddev/logdev.");
489 			error = -EINVAL;
490 			goto out_close_rtdev;
491 		}
492 	}
493 
494 	/*
495 	 * Setup xfs_mount buffer target pointers
496 	 */
497 	mp->m_ddev_targp = xfs_alloc_buftarg(mp, sb->s_bdev_file);
498 	if (IS_ERR(mp->m_ddev_targp)) {
499 		error = PTR_ERR(mp->m_ddev_targp);
500 		mp->m_ddev_targp = NULL;
501 		goto out_close_rtdev;
502 	}
503 
504 	if (rtdev_file) {
505 		mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev_file);
506 		if (IS_ERR(mp->m_rtdev_targp)) {
507 			error = PTR_ERR(mp->m_rtdev_targp);
508 			mp->m_rtdev_targp = NULL;
509 			goto out_free_ddev_targ;
510 		}
511 	}
512 
513 	if (logdev_file && file_bdev(logdev_file) != ddev) {
514 		mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev_file);
515 		if (IS_ERR(mp->m_logdev_targp)) {
516 			error = PTR_ERR(mp->m_logdev_targp);
517 			mp->m_logdev_targp = NULL;
518 			goto out_free_rtdev_targ;
519 		}
520 	} else {
521 		mp->m_logdev_targp = mp->m_ddev_targp;
522 		/* Handle won't be used, drop it */
523 		if (logdev_file)
524 			bdev_fput(logdev_file);
525 	}
526 
527 	return 0;
528 
529  out_free_rtdev_targ:
530 	if (mp->m_rtdev_targp)
531 		xfs_free_buftarg(mp->m_rtdev_targp);
532  out_free_ddev_targ:
533 	xfs_free_buftarg(mp->m_ddev_targp);
534  out_close_rtdev:
535 	 if (rtdev_file)
536 		bdev_fput(rtdev_file);
537  out_close_logdev:
538 	if (logdev_file)
539 		bdev_fput(logdev_file);
540 	return error;
541 }
542 
543 /*
544  * Setup xfs_mount buffer target pointers based on superblock
545  */
546 STATIC int
xfs_setup_devices(struct xfs_mount * mp)547 xfs_setup_devices(
548 	struct xfs_mount	*mp)
549 {
550 	int			error;
551 
552 	error = xfs_configure_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize,
553 			mp->m_sb.sb_dblocks);
554 	if (error)
555 		return error;
556 
557 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
558 		unsigned int	log_sector_size = BBSIZE;
559 
560 		if (xfs_has_sector(mp))
561 			log_sector_size = mp->m_sb.sb_logsectsize;
562 		error = xfs_configure_buftarg(mp->m_logdev_targp,
563 				log_sector_size, mp->m_sb.sb_logblocks);
564 		if (error)
565 			return error;
566 	}
567 
568 	if (mp->m_sb.sb_rtstart) {
569 		if (mp->m_rtdev_targp) {
570 			xfs_warn(mp,
571 		"can't use internal and external rtdev at the same time");
572 			return -EINVAL;
573 		}
574 		mp->m_rtdev_targp = mp->m_ddev_targp;
575 	} else if (mp->m_rtname) {
576 		error = xfs_configure_buftarg(mp->m_rtdev_targp,
577 				mp->m_sb.sb_sectsize, mp->m_sb.sb_rblocks);
578 		if (error)
579 			return error;
580 	}
581 
582 	return 0;
583 }
584 
585 STATIC int
xfs_init_mount_workqueues(struct xfs_mount * mp)586 xfs_init_mount_workqueues(
587 	struct xfs_mount	*mp)
588 {
589 	mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
590 			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU),
591 			1, mp->m_super->s_id);
592 	if (!mp->m_buf_workqueue)
593 		goto out;
594 
595 	mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
596 			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU),
597 			0, mp->m_super->s_id);
598 	if (!mp->m_unwritten_workqueue)
599 		goto out_destroy_buf;
600 
601 	mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
602 			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU),
603 			0, mp->m_super->s_id);
604 	if (!mp->m_reclaim_workqueue)
605 		goto out_destroy_unwritten;
606 
607 	mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s",
608 			XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM),
609 			0, mp->m_super->s_id);
610 	if (!mp->m_blockgc_wq)
611 		goto out_destroy_reclaim;
612 
613 	mp->m_inodegc_wq = alloc_workqueue("xfs-inodegc/%s",
614 			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU),
615 			1, mp->m_super->s_id);
616 	if (!mp->m_inodegc_wq)
617 		goto out_destroy_blockgc;
618 
619 	mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s",
620 			XFS_WQFLAGS(WQ_FREEZABLE | WQ_PERCPU), 0,
621 			mp->m_super->s_id);
622 	if (!mp->m_sync_workqueue)
623 		goto out_destroy_inodegc;
624 
625 	return 0;
626 
627 out_destroy_inodegc:
628 	destroy_workqueue(mp->m_inodegc_wq);
629 out_destroy_blockgc:
630 	destroy_workqueue(mp->m_blockgc_wq);
631 out_destroy_reclaim:
632 	destroy_workqueue(mp->m_reclaim_workqueue);
633 out_destroy_unwritten:
634 	destroy_workqueue(mp->m_unwritten_workqueue);
635 out_destroy_buf:
636 	destroy_workqueue(mp->m_buf_workqueue);
637 out:
638 	return -ENOMEM;
639 }
640 
641 STATIC void
xfs_destroy_mount_workqueues(struct xfs_mount * mp)642 xfs_destroy_mount_workqueues(
643 	struct xfs_mount	*mp)
644 {
645 	destroy_workqueue(mp->m_sync_workqueue);
646 	destroy_workqueue(mp->m_blockgc_wq);
647 	destroy_workqueue(mp->m_inodegc_wq);
648 	destroy_workqueue(mp->m_reclaim_workqueue);
649 	destroy_workqueue(mp->m_unwritten_workqueue);
650 	destroy_workqueue(mp->m_buf_workqueue);
651 }
652 
653 static void
xfs_flush_inodes_worker(struct work_struct * work)654 xfs_flush_inodes_worker(
655 	struct work_struct	*work)
656 {
657 	struct xfs_mount	*mp = container_of(work, struct xfs_mount,
658 						   m_flush_inodes_work);
659 	struct super_block	*sb = mp->m_super;
660 
661 	if (down_read_trylock(&sb->s_umount)) {
662 		sync_inodes_sb(sb);
663 		up_read(&sb->s_umount);
664 	}
665 }
666 
667 /*
668  * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
669  * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
670  * for IO to complete so that we effectively throttle multiple callers to the
671  * rate at which IO is completing.
672  */
673 void
xfs_flush_inodes(struct xfs_mount * mp)674 xfs_flush_inodes(
675 	struct xfs_mount	*mp)
676 {
677 	/*
678 	 * If flush_work() returns true then that means we waited for a flush
679 	 * which was already in progress.  Don't bother running another scan.
680 	 */
681 	if (flush_work(&mp->m_flush_inodes_work))
682 		return;
683 
684 	queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work);
685 	flush_work(&mp->m_flush_inodes_work);
686 }
687 
688 /* Catch misguided souls that try to use this interface on XFS */
689 STATIC struct inode *
xfs_fs_alloc_inode(struct super_block * sb)690 xfs_fs_alloc_inode(
691 	struct super_block	*sb)
692 {
693 	BUG();
694 	return NULL;
695 }
696 
697 /*
698  * Now that the generic code is guaranteed not to be accessing
699  * the linux inode, we can inactivate and reclaim the inode.
700  */
701 STATIC void
xfs_fs_destroy_inode(struct inode * inode)702 xfs_fs_destroy_inode(
703 	struct inode		*inode)
704 {
705 	struct xfs_inode	*ip = XFS_I(inode);
706 
707 	trace_xfs_destroy_inode(ip);
708 
709 	ASSERT(!rwsem_is_locked(&inode->i_rwsem));
710 	XFS_STATS_INC(ip->i_mount, vn_rele);
711 	XFS_STATS_INC(ip->i_mount, vn_remove);
712 	xfs_inode_mark_reclaimable(ip);
713 }
714 
715 static void
xfs_fs_dirty_inode(struct inode * inode,int flags)716 xfs_fs_dirty_inode(
717 	struct inode			*inode,
718 	int				flags)
719 {
720 	struct xfs_inode		*ip = XFS_I(inode);
721 	struct xfs_mount		*mp = ip->i_mount;
722 	struct xfs_trans		*tp;
723 
724 	if (!(inode->i_sb->s_flags & SB_LAZYTIME))
725 		return;
726 
727 	/*
728 	 * Only do the timestamp update if the inode is dirty (I_DIRTY_SYNC)
729 	 * and has dirty timestamp (I_DIRTY_TIME). I_DIRTY_TIME can be passed
730 	 * in flags possibly together with I_DIRTY_SYNC.
731 	 */
732 	if ((flags & ~I_DIRTY_TIME) != I_DIRTY_SYNC || !(flags & I_DIRTY_TIME))
733 		return;
734 
735 	if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
736 		return;
737 	xfs_ilock(ip, XFS_ILOCK_EXCL);
738 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
739 	xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
740 	xfs_trans_commit(tp);
741 }
742 
743 /*
744  * Slab object creation initialisation for the XFS inode.
745  * This covers only the idempotent fields in the XFS inode;
746  * all other fields need to be initialised on allocation
747  * from the slab. This avoids the need to repeatedly initialise
748  * fields in the xfs inode that left in the initialise state
749  * when freeing the inode.
750  */
751 STATIC void
xfs_fs_inode_init_once(void * inode)752 xfs_fs_inode_init_once(
753 	void			*inode)
754 {
755 	struct xfs_inode	*ip = inode;
756 
757 	memset(ip, 0, sizeof(struct xfs_inode));
758 
759 	/* vfs inode */
760 	inode_init_once(VFS_I(ip));
761 
762 	/* xfs inode */
763 	atomic_set(&ip->i_pincount, 0);
764 	spin_lock_init(&ip->i_flags_lock);
765 	init_rwsem(&ip->i_lock);
766 }
767 
768 /*
769  * We do an unlocked check for XFS_IDONTCACHE here because we are already
770  * serialised against cache hits here via the inode->i_lock and igrab() in
771  * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
772  * racing with us, and it avoids needing to grab a spinlock here for every inode
773  * we drop the final reference on.
774  */
775 STATIC int
xfs_fs_drop_inode(struct inode * inode)776 xfs_fs_drop_inode(
777 	struct inode		*inode)
778 {
779 	struct xfs_inode	*ip = XFS_I(inode);
780 
781 	/*
782 	 * If this unlinked inode is in the middle of recovery, don't
783 	 * drop the inode just yet; log recovery will take care of
784 	 * that.  See the comment for this inode flag.
785 	 */
786 	if (ip->i_flags & XFS_IRECOVERY) {
787 		ASSERT(xlog_recovery_needed(ip->i_mount->m_log));
788 		return 0;
789 	}
790 
791 	return inode_generic_drop(inode);
792 }
793 
794 STATIC void
xfs_fs_evict_inode(struct inode * inode)795 xfs_fs_evict_inode(
796 	struct inode		*inode)
797 {
798 	if (IS_DAX(inode))
799 		dax_break_layout_final(inode);
800 
801 	truncate_inode_pages_final(&inode->i_data);
802 	clear_inode(inode);
803 
804 	if (IS_ENABLED(CONFIG_XFS_RT) &&
805 	    S_ISREG(inode->i_mode) && inode->i_private) {
806 		xfs_open_zone_put(inode->i_private);
807 		inode->i_private = NULL;
808 	}
809 }
810 
811 static void
xfs_mount_free(struct xfs_mount * mp)812 xfs_mount_free(
813 	struct xfs_mount	*mp)
814 {
815 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
816 		xfs_free_buftarg(mp->m_logdev_targp);
817 	if (mp->m_rtdev_targp && mp->m_rtdev_targp != mp->m_ddev_targp)
818 		xfs_free_buftarg(mp->m_rtdev_targp);
819 	if (mp->m_ddev_targp)
820 		xfs_free_buftarg(mp->m_ddev_targp);
821 
822 	debugfs_remove(mp->m_debugfs);
823 	kfree(mp->m_rtname);
824 	kfree(mp->m_logname);
825 	kfree(mp);
826 }
827 
828 STATIC int
xfs_fs_sync_fs(struct super_block * sb,int wait)829 xfs_fs_sync_fs(
830 	struct super_block	*sb,
831 	int			wait)
832 {
833 	struct xfs_mount	*mp = XFS_M(sb);
834 	int			error;
835 
836 	trace_xfs_fs_sync_fs(mp, __return_address);
837 
838 	/*
839 	 * Doing anything during the async pass would be counterproductive.
840 	 */
841 	if (!wait)
842 		return 0;
843 
844 	error = xfs_log_force(mp, XFS_LOG_SYNC);
845 	if (error)
846 		return error;
847 
848 	if (laptop_mode) {
849 		/*
850 		 * The disk must be active because we're syncing.
851 		 * We schedule log work now (now that the disk is
852 		 * active) instead of later (when it might not be).
853 		 */
854 		flush_delayed_work(&mp->m_log->l_work);
855 	}
856 
857 	/*
858 	 * If we are called with page faults frozen out, it means we are about
859 	 * to freeze the transaction subsystem. Take the opportunity to shut
860 	 * down inodegc because once SB_FREEZE_FS is set it's too late to
861 	 * prevent inactivation races with freeze. The fs doesn't get called
862 	 * again by the freezing process until after SB_FREEZE_FS has been set,
863 	 * so it's now or never.  Same logic applies to speculative allocation
864 	 * garbage collection.
865 	 *
866 	 * We don't care if this is a normal syncfs call that does this or
867 	 * freeze that does this - we can run this multiple times without issue
868 	 * and we won't race with a restart because a restart can only occur
869 	 * when the state is either SB_FREEZE_FS or SB_FREEZE_COMPLETE.
870 	 */
871 	if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT) {
872 		xfs_inodegc_stop(mp);
873 		xfs_blockgc_stop(mp);
874 		xfs_zone_gc_stop(mp);
875 	}
876 
877 	return 0;
878 }
879 
880 static xfs_extlen_t
xfs_internal_log_size(struct xfs_mount * mp)881 xfs_internal_log_size(
882 	struct xfs_mount	*mp)
883 {
884 	if (!mp->m_sb.sb_logstart)
885 		return 0;
886 	return mp->m_sb.sb_logblocks;
887 }
888 
889 static void
xfs_statfs_data(struct xfs_mount * mp,struct kstatfs * st)890 xfs_statfs_data(
891 	struct xfs_mount	*mp,
892 	struct kstatfs		*st)
893 {
894 	int64_t			fdblocks =
895 		xfs_sum_freecounter(mp, XC_FREE_BLOCKS);
896 
897 	/* make sure st->f_bfree does not underflow */
898 	st->f_bfree = max(0LL,
899 		fdblocks - xfs_freecounter_unavailable(mp, XC_FREE_BLOCKS));
900 
901 	/*
902 	 * sb_dblocks can change during growfs, but nothing cares about reporting
903 	 * the old or new value during growfs.
904 	 */
905 	st->f_blocks = mp->m_sb.sb_dblocks - xfs_internal_log_size(mp);
906 }
907 
908 /*
909  * When stat(v)fs is called on a file with the realtime bit set or a directory
910  * with the rtinherit bit, report freespace information for the RT device
911  * instead of the main data device.
912  */
913 static void
xfs_statfs_rt(struct xfs_mount * mp,struct kstatfs * st)914 xfs_statfs_rt(
915 	struct xfs_mount	*mp,
916 	struct kstatfs		*st)
917 {
918 	st->f_bfree = xfs_rtbxlen_to_blen(mp,
919 			xfs_sum_freecounter(mp, XC_FREE_RTEXTENTS));
920 	st->f_blocks = mp->m_sb.sb_rblocks - xfs_rtbxlen_to_blen(mp,
921 			mp->m_free[XC_FREE_RTEXTENTS].res_total);
922 }
923 
924 static void
xfs_statfs_inodes(struct xfs_mount * mp,struct kstatfs * st)925 xfs_statfs_inodes(
926 	struct xfs_mount	*mp,
927 	struct kstatfs		*st)
928 {
929 	uint64_t		icount = percpu_counter_sum(&mp->m_icount);
930 	uint64_t		ifree = percpu_counter_sum(&mp->m_ifree);
931 	uint64_t		fakeinos = XFS_FSB_TO_INO(mp, st->f_bfree);
932 
933 	st->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
934 	if (M_IGEO(mp)->maxicount)
935 		st->f_files = min_t(typeof(st->f_files), st->f_files,
936 					M_IGEO(mp)->maxicount);
937 
938 	/* If sb_icount overshot maxicount, report actual allocation */
939 	st->f_files = max_t(typeof(st->f_files), st->f_files,
940 			mp->m_sb.sb_icount);
941 
942 	/* Make sure st->f_ffree does not underflow */
943 	st->f_ffree = max_t(int64_t, 0, st->f_files - (icount - ifree));
944 }
945 
946 STATIC int
xfs_fs_statfs(struct dentry * dentry,struct kstatfs * st)947 xfs_fs_statfs(
948 	struct dentry		*dentry,
949 	struct kstatfs		*st)
950 {
951 	struct xfs_mount	*mp = XFS_M(dentry->d_sb);
952 	struct xfs_inode	*ip = XFS_I(d_inode(dentry));
953 
954 	/*
955 	 * Expedite background inodegc but don't wait. We do not want to block
956 	 * here waiting hours for a billion extent file to be truncated.
957 	 */
958 	xfs_inodegc_push(mp);
959 
960 	st->f_type = XFS_SUPER_MAGIC;
961 	st->f_namelen = MAXNAMELEN - 1;
962 	st->f_bsize = mp->m_sb.sb_blocksize;
963 	st->f_fsid = u64_to_fsid(huge_encode_dev(mp->m_ddev_targp->bt_dev));
964 
965 	xfs_statfs_data(mp, st);
966 	xfs_statfs_inodes(mp, st);
967 
968 	if (XFS_IS_REALTIME_MOUNT(mp) &&
969 	    (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME)))
970 		xfs_statfs_rt(mp, st);
971 
972 	if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
973 	    ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
974 			      (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
975 		xfs_qm_statvfs(ip, st);
976 
977 	/*
978 	 * XFS does not distinguish between blocks available to privileged and
979 	 * unprivileged users.
980 	 */
981 	st->f_bavail = st->f_bfree;
982 	return 0;
983 }
984 
985 STATIC void
xfs_save_resvblks(struct xfs_mount * mp)986 xfs_save_resvblks(
987 	struct xfs_mount	*mp)
988 {
989 	enum xfs_free_counter	i;
990 
991 	for (i = 0; i < XC_FREE_NR; i++) {
992 		mp->m_free[i].res_saved = mp->m_free[i].res_total;
993 		xfs_reserve_blocks(mp, i, 0);
994 	}
995 }
996 
997 STATIC void
xfs_restore_resvblks(struct xfs_mount * mp)998 xfs_restore_resvblks(
999 	struct xfs_mount	*mp)
1000 {
1001 	uint64_t		resblks;
1002 	enum xfs_free_counter	i;
1003 
1004 	for (i = 0; i < XC_FREE_NR; i++) {
1005 		if (mp->m_free[i].res_saved) {
1006 			resblks = mp->m_free[i].res_saved;
1007 			mp->m_free[i].res_saved = 0;
1008 		} else
1009 			resblks = xfs_default_resblks(mp, i);
1010 		xfs_reserve_blocks(mp, i, resblks);
1011 	}
1012 }
1013 
1014 /*
1015  * Second stage of a freeze. The data is already frozen so we only
1016  * need to take care of the metadata. Once that's done sync the superblock
1017  * to the log to dirty it in case of a crash while frozen. This ensures that we
1018  * will recover the unlinked inode lists on the next mount.
1019  */
1020 STATIC int
xfs_fs_freeze(struct super_block * sb)1021 xfs_fs_freeze(
1022 	struct super_block	*sb)
1023 {
1024 	struct xfs_mount	*mp = XFS_M(sb);
1025 	unsigned int		flags;
1026 	int			ret;
1027 
1028 	/*
1029 	 * The filesystem is now frozen far enough that memory reclaim
1030 	 * cannot safely operate on the filesystem. Hence we need to
1031 	 * set a GFP_NOFS context here to avoid recursion deadlocks.
1032 	 */
1033 	flags = memalloc_nofs_save();
1034 	xfs_save_resvblks(mp);
1035 	ret = xfs_log_quiesce(mp);
1036 	memalloc_nofs_restore(flags);
1037 
1038 	/*
1039 	 * For read-write filesystems, we need to restart the inodegc on error
1040 	 * because we stopped it at SB_FREEZE_PAGEFAULT level and a thaw is not
1041 	 * going to be run to restart it now.  We are at SB_FREEZE_FS level
1042 	 * here, so we can restart safely without racing with a stop in
1043 	 * xfs_fs_sync_fs().
1044 	 */
1045 	if (ret && !xfs_is_readonly(mp)) {
1046 		xfs_blockgc_start(mp);
1047 		xfs_inodegc_start(mp);
1048 		xfs_zone_gc_start(mp);
1049 	}
1050 
1051 	return ret;
1052 }
1053 
1054 STATIC int
xfs_fs_unfreeze(struct super_block * sb)1055 xfs_fs_unfreeze(
1056 	struct super_block	*sb)
1057 {
1058 	struct xfs_mount	*mp = XFS_M(sb);
1059 
1060 	xfs_restore_resvblks(mp);
1061 	xfs_log_work_queue(mp);
1062 
1063 	/*
1064 	 * Don't reactivate the inodegc worker on a readonly filesystem because
1065 	 * inodes are sent directly to reclaim.  Don't reactivate the blockgc
1066 	 * worker because there are no speculative preallocations on a readonly
1067 	 * filesystem.
1068 	 */
1069 	if (!xfs_is_readonly(mp)) {
1070 		xfs_zone_gc_start(mp);
1071 		xfs_blockgc_start(mp);
1072 		xfs_inodegc_start(mp);
1073 	}
1074 
1075 	return 0;
1076 }
1077 
1078 /*
1079  * This function fills in xfs_mount_t fields based on mount args.
1080  * Note: the superblock _has_ now been read in.
1081  */
1082 STATIC int
xfs_finish_flags(struct xfs_mount * mp)1083 xfs_finish_flags(
1084 	struct xfs_mount	*mp)
1085 {
1086 	/* Fail a mount where the logbuf is smaller than the log stripe */
1087 	if (xfs_has_logv2(mp)) {
1088 		if (mp->m_logbsize <= 0 &&
1089 		    mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
1090 			mp->m_logbsize = mp->m_sb.sb_logsunit;
1091 		} else if (mp->m_logbsize > 0 &&
1092 			   mp->m_logbsize < mp->m_sb.sb_logsunit) {
1093 			xfs_warn(mp,
1094 		"logbuf size must be greater than or equal to log stripe size");
1095 			return -EINVAL;
1096 		}
1097 	} else {
1098 		/* Fail a mount if the logbuf is larger than 32K */
1099 		if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
1100 			xfs_warn(mp,
1101 		"logbuf size for version 1 logs must be 16K or 32K");
1102 			return -EINVAL;
1103 		}
1104 	}
1105 
1106 	/*
1107 	 * prohibit r/w mounts of read-only filesystems
1108 	 */
1109 	if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !xfs_is_readonly(mp)) {
1110 		xfs_warn(mp,
1111 			"cannot mount a read-only filesystem as read-write");
1112 		return -EROFS;
1113 	}
1114 
1115 	if ((mp->m_qflags & XFS_GQUOTA_ACCT) &&
1116 	    (mp->m_qflags & XFS_PQUOTA_ACCT) &&
1117 	    !xfs_has_pquotino(mp)) {
1118 		xfs_warn(mp,
1119 		  "Super block does not support project and group quota together");
1120 		return -EINVAL;
1121 	}
1122 
1123 	if (!xfs_has_zoned(mp)) {
1124 		if (mp->m_max_open_zones) {
1125 			xfs_warn(mp,
1126 "max_open_zones mount option only supported on zoned file systems.");
1127 			return -EINVAL;
1128 		}
1129 		if (mp->m_features & XFS_FEAT_NOLIFETIME) {
1130 			xfs_warn(mp,
1131 "nolifetime mount option only supported on zoned file systems.");
1132 			return -EINVAL;
1133 		}
1134 	}
1135 
1136 	return 0;
1137 }
1138 
1139 static int
xfs_init_percpu_counters(struct xfs_mount * mp)1140 xfs_init_percpu_counters(
1141 	struct xfs_mount	*mp)
1142 {
1143 	int			error;
1144 	int			i;
1145 
1146 	error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
1147 	if (error)
1148 		return -ENOMEM;
1149 
1150 	error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
1151 	if (error)
1152 		goto free_icount;
1153 
1154 	error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
1155 	if (error)
1156 		goto free_ifree;
1157 
1158 	error = percpu_counter_init(&mp->m_delalloc_rtextents, 0, GFP_KERNEL);
1159 	if (error)
1160 		goto free_delalloc;
1161 
1162 	for (i = 0; i < XC_FREE_NR; i++) {
1163 		error = percpu_counter_init(&mp->m_free[i].count, 0,
1164 				GFP_KERNEL);
1165 		if (error)
1166 			goto free_freecounters;
1167 	}
1168 
1169 	return 0;
1170 
1171 free_freecounters:
1172 	while (--i >= 0)
1173 		percpu_counter_destroy(&mp->m_free[i].count);
1174 	percpu_counter_destroy(&mp->m_delalloc_rtextents);
1175 free_delalloc:
1176 	percpu_counter_destroy(&mp->m_delalloc_blks);
1177 free_ifree:
1178 	percpu_counter_destroy(&mp->m_ifree);
1179 free_icount:
1180 	percpu_counter_destroy(&mp->m_icount);
1181 	return -ENOMEM;
1182 }
1183 
1184 void
xfs_reinit_percpu_counters(struct xfs_mount * mp)1185 xfs_reinit_percpu_counters(
1186 	struct xfs_mount	*mp)
1187 {
1188 	percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1189 	percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1190 	xfs_set_freecounter(mp, XC_FREE_BLOCKS, mp->m_sb.sb_fdblocks);
1191 	if (!xfs_has_zoned(mp))
1192 		xfs_set_freecounter(mp, XC_FREE_RTEXTENTS,
1193 				mp->m_sb.sb_frextents);
1194 }
1195 
1196 static void
xfs_destroy_percpu_counters(struct xfs_mount * mp)1197 xfs_destroy_percpu_counters(
1198 	struct xfs_mount	*mp)
1199 {
1200 	enum xfs_free_counter	i;
1201 
1202 	for (i = 0; i < XC_FREE_NR; i++)
1203 		percpu_counter_destroy(&mp->m_free[i].count);
1204 	percpu_counter_destroy(&mp->m_icount);
1205 	percpu_counter_destroy(&mp->m_ifree);
1206 	ASSERT(xfs_is_shutdown(mp) ||
1207 	       percpu_counter_sum(&mp->m_delalloc_rtextents) == 0);
1208 	percpu_counter_destroy(&mp->m_delalloc_rtextents);
1209 	ASSERT(xfs_is_shutdown(mp) ||
1210 	       percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1211 	percpu_counter_destroy(&mp->m_delalloc_blks);
1212 }
1213 
1214 static int
xfs_inodegc_init_percpu(struct xfs_mount * mp)1215 xfs_inodegc_init_percpu(
1216 	struct xfs_mount	*mp)
1217 {
1218 	struct xfs_inodegc	*gc;
1219 	int			cpu;
1220 
1221 	mp->m_inodegc = alloc_percpu(struct xfs_inodegc);
1222 	if (!mp->m_inodegc)
1223 		return -ENOMEM;
1224 
1225 	for_each_possible_cpu(cpu) {
1226 		gc = per_cpu_ptr(mp->m_inodegc, cpu);
1227 		gc->cpu = cpu;
1228 		gc->mp = mp;
1229 		init_llist_head(&gc->list);
1230 		gc->items = 0;
1231 		gc->error = 0;
1232 		INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker);
1233 	}
1234 	return 0;
1235 }
1236 
1237 static void
xfs_inodegc_free_percpu(struct xfs_mount * mp)1238 xfs_inodegc_free_percpu(
1239 	struct xfs_mount	*mp)
1240 {
1241 	if (!mp->m_inodegc)
1242 		return;
1243 	free_percpu(mp->m_inodegc);
1244 }
1245 
1246 static void
xfs_fs_put_super(struct super_block * sb)1247 xfs_fs_put_super(
1248 	struct super_block	*sb)
1249 {
1250 	struct xfs_mount	*mp = XFS_M(sb);
1251 
1252 	xfs_notice(mp, "Unmounting Filesystem %pU", &mp->m_sb.sb_uuid);
1253 	xfs_filestream_unmount(mp);
1254 	xfs_unmountfs(mp);
1255 
1256 	xfs_rtmount_freesb(mp);
1257 	xfs_freesb(mp);
1258 	xchk_mount_stats_free(mp);
1259 	free_percpu(mp->m_stats.xs_stats);
1260 	xfs_inodegc_free_percpu(mp);
1261 	xfs_destroy_percpu_counters(mp);
1262 	xfs_destroy_mount_workqueues(mp);
1263 	xfs_shutdown_devices(mp);
1264 }
1265 
1266 static long
xfs_fs_nr_cached_objects(struct super_block * sb,struct shrink_control * sc)1267 xfs_fs_nr_cached_objects(
1268 	struct super_block	*sb,
1269 	struct shrink_control	*sc)
1270 {
1271 	/* Paranoia: catch incorrect calls during mount setup or teardown */
1272 	if (WARN_ON_ONCE(!sb->s_fs_info))
1273 		return 0;
1274 	return xfs_reclaim_inodes_count(XFS_M(sb));
1275 }
1276 
1277 static long
xfs_fs_free_cached_objects(struct super_block * sb,struct shrink_control * sc)1278 xfs_fs_free_cached_objects(
1279 	struct super_block	*sb,
1280 	struct shrink_control	*sc)
1281 {
1282 	return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1283 }
1284 
1285 static void
xfs_fs_shutdown(struct super_block * sb)1286 xfs_fs_shutdown(
1287 	struct super_block	*sb)
1288 {
1289 	xfs_force_shutdown(XFS_M(sb), SHUTDOWN_DEVICE_REMOVED);
1290 }
1291 
1292 static int
xfs_fs_show_stats(struct seq_file * m,struct dentry * root)1293 xfs_fs_show_stats(
1294 	struct seq_file		*m,
1295 	struct dentry		*root)
1296 {
1297 	struct xfs_mount	*mp = XFS_M(root->d_sb);
1298 
1299 	if (xfs_has_zoned(mp) && IS_ENABLED(CONFIG_XFS_RT))
1300 		xfs_zoned_show_stats(m, mp);
1301 	return 0;
1302 }
1303 
1304 static const struct super_operations xfs_super_operations = {
1305 	.alloc_inode		= xfs_fs_alloc_inode,
1306 	.destroy_inode		= xfs_fs_destroy_inode,
1307 	.dirty_inode		= xfs_fs_dirty_inode,
1308 	.drop_inode		= xfs_fs_drop_inode,
1309 	.evict_inode		= xfs_fs_evict_inode,
1310 	.put_super		= xfs_fs_put_super,
1311 	.sync_fs		= xfs_fs_sync_fs,
1312 	.freeze_fs		= xfs_fs_freeze,
1313 	.unfreeze_fs		= xfs_fs_unfreeze,
1314 	.statfs			= xfs_fs_statfs,
1315 	.show_options		= xfs_fs_show_options,
1316 	.nr_cached_objects	= xfs_fs_nr_cached_objects,
1317 	.free_cached_objects	= xfs_fs_free_cached_objects,
1318 	.shutdown		= xfs_fs_shutdown,
1319 	.show_stats		= xfs_fs_show_stats,
1320 };
1321 
1322 static int
suffix_kstrtoint(const char * s,unsigned int base,int * res)1323 suffix_kstrtoint(
1324 	const char	*s,
1325 	unsigned int	base,
1326 	int		*res)
1327 {
1328 	int		last, shift_left_factor = 0, _res;
1329 	char		*value;
1330 	int		ret = 0;
1331 
1332 	value = kstrdup(s, GFP_KERNEL);
1333 	if (!value)
1334 		return -ENOMEM;
1335 
1336 	last = strlen(value) - 1;
1337 	if (value[last] == 'K' || value[last] == 'k') {
1338 		shift_left_factor = 10;
1339 		value[last] = '\0';
1340 	}
1341 	if (value[last] == 'M' || value[last] == 'm') {
1342 		shift_left_factor = 20;
1343 		value[last] = '\0';
1344 	}
1345 	if (value[last] == 'G' || value[last] == 'g') {
1346 		shift_left_factor = 30;
1347 		value[last] = '\0';
1348 	}
1349 
1350 	if (kstrtoint(value, base, &_res))
1351 		ret = -EINVAL;
1352 	kfree(value);
1353 	*res = _res << shift_left_factor;
1354 	return ret;
1355 }
1356 
1357 static int
suffix_kstrtoull(const char * s,unsigned int base,unsigned long long * res)1358 suffix_kstrtoull(
1359 	const char		*s,
1360 	unsigned int		base,
1361 	unsigned long long	*res)
1362 {
1363 	int			last, shift_left_factor = 0;
1364 	unsigned long long	_res;
1365 	char			*value;
1366 	int			ret = 0;
1367 
1368 	value = kstrdup(s, GFP_KERNEL);
1369 	if (!value)
1370 		return -ENOMEM;
1371 
1372 	last = strlen(value) - 1;
1373 	if (value[last] == 'K' || value[last] == 'k') {
1374 		shift_left_factor = 10;
1375 		value[last] = '\0';
1376 	}
1377 	if (value[last] == 'M' || value[last] == 'm') {
1378 		shift_left_factor = 20;
1379 		value[last] = '\0';
1380 	}
1381 	if (value[last] == 'G' || value[last] == 'g') {
1382 		shift_left_factor = 30;
1383 		value[last] = '\0';
1384 	}
1385 
1386 	if (kstrtoull(value, base, &_res))
1387 		ret = -EINVAL;
1388 	kfree(value);
1389 	*res = _res << shift_left_factor;
1390 	return ret;
1391 }
1392 
1393 static inline void
xfs_fs_warn_deprecated(struct fs_context * fc,struct fs_parameter * param)1394 xfs_fs_warn_deprecated(
1395 	struct fs_context	*fc,
1396 	struct fs_parameter	*param)
1397 {
1398 	/*
1399 	 * Always warn about someone passing in a deprecated mount option.
1400 	 * Previously we wouldn't print the warning if we were reconfiguring
1401 	 * and current mount point already had the flag set, but that was not
1402 	 * the right thing to do.
1403 	 *
1404 	 * Many distributions mount the root filesystem with no options in the
1405 	 * initramfs and rely on mount -a to remount the root fs with the
1406 	 * options in fstab.  However, the old behavior meant that there would
1407 	 * never be a warning about deprecated mount options for the root fs in
1408 	 * /etc/fstab.  On a single-fs system, that means no warning at all.
1409 	 *
1410 	 * Compounding this problem are distribution scripts that copy
1411 	 * /proc/mounts to fstab, which means that we can't remove mount
1412 	 * options unless we're 100% sure they have only ever been advertised
1413 	 * in /proc/mounts in response to explicitly provided mount options.
1414 	 */
1415 	xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key);
1416 }
1417 
1418 /*
1419  * Set mount state from a mount option.
1420  *
1421  * NOTE: mp->m_super is NULL here!
1422  */
1423 static int
xfs_fs_parse_param(struct fs_context * fc,struct fs_parameter * param)1424 xfs_fs_parse_param(
1425 	struct fs_context	*fc,
1426 	struct fs_parameter	*param)
1427 {
1428 	struct xfs_mount	*parsing_mp = fc->s_fs_info;
1429 	struct fs_parse_result	result;
1430 	int			size = 0;
1431 	int			opt;
1432 
1433 	BUILD_BUG_ON(XFS_QFLAGS_MNTOPTS & XFS_MOUNT_QUOTA_ALL);
1434 
1435 	opt = fs_parse(fc, xfs_fs_parameters, param, &result);
1436 	if (opt < 0)
1437 		return opt;
1438 
1439 	switch (opt) {
1440 	case Op_deprecated:
1441 		xfs_fs_warn_deprecated(fc, param);
1442 		return 0;
1443 	case Opt_logbufs:
1444 		parsing_mp->m_logbufs = result.uint_32;
1445 		return 0;
1446 	case Opt_logbsize:
1447 		if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize))
1448 			return -EINVAL;
1449 		return 0;
1450 	case Opt_logdev:
1451 		kfree(parsing_mp->m_logname);
1452 		parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL);
1453 		if (!parsing_mp->m_logname)
1454 			return -ENOMEM;
1455 		return 0;
1456 	case Opt_rtdev:
1457 		kfree(parsing_mp->m_rtname);
1458 		parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
1459 		if (!parsing_mp->m_rtname)
1460 			return -ENOMEM;
1461 		return 0;
1462 	case Opt_allocsize:
1463 		if (suffix_kstrtoint(param->string, 10, &size))
1464 			return -EINVAL;
1465 		parsing_mp->m_allocsize_log = ffs(size) - 1;
1466 		parsing_mp->m_features |= XFS_FEAT_ALLOCSIZE;
1467 		return 0;
1468 	case Opt_grpid:
1469 	case Opt_bsdgroups:
1470 		parsing_mp->m_features |= XFS_FEAT_GRPID;
1471 		return 0;
1472 	case Opt_nogrpid:
1473 	case Opt_sysvgroups:
1474 		parsing_mp->m_features &= ~XFS_FEAT_GRPID;
1475 		return 0;
1476 	case Opt_wsync:
1477 		parsing_mp->m_features |= XFS_FEAT_WSYNC;
1478 		return 0;
1479 	case Opt_norecovery:
1480 		parsing_mp->m_features |= XFS_FEAT_NORECOVERY;
1481 		return 0;
1482 	case Opt_noalign:
1483 		parsing_mp->m_features |= XFS_FEAT_NOALIGN;
1484 		return 0;
1485 	case Opt_swalloc:
1486 		parsing_mp->m_features |= XFS_FEAT_SWALLOC;
1487 		return 0;
1488 	case Opt_sunit:
1489 		parsing_mp->m_dalign = result.uint_32;
1490 		return 0;
1491 	case Opt_swidth:
1492 		parsing_mp->m_swidth = result.uint_32;
1493 		return 0;
1494 	case Opt_inode32:
1495 		parsing_mp->m_features |= XFS_FEAT_SMALL_INUMS;
1496 		return 0;
1497 	case Opt_inode64:
1498 		parsing_mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
1499 		return 0;
1500 	case Opt_nouuid:
1501 		parsing_mp->m_features |= XFS_FEAT_NOUUID;
1502 		return 0;
1503 	case Opt_largeio:
1504 		parsing_mp->m_features |= XFS_FEAT_LARGE_IOSIZE;
1505 		return 0;
1506 	case Opt_nolargeio:
1507 		parsing_mp->m_features &= ~XFS_FEAT_LARGE_IOSIZE;
1508 		return 0;
1509 	case Opt_filestreams:
1510 		parsing_mp->m_features |= XFS_FEAT_FILESTREAMS;
1511 		return 0;
1512 	case Opt_noquota:
1513 		parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
1514 		parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
1515 		parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS;
1516 		return 0;
1517 	case Opt_quota:
1518 	case Opt_uquota:
1519 	case Opt_usrquota:
1520 		parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD);
1521 		parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS;
1522 		return 0;
1523 	case Opt_qnoenforce:
1524 	case Opt_uqnoenforce:
1525 		parsing_mp->m_qflags |= XFS_UQUOTA_ACCT;
1526 		parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD;
1527 		parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS;
1528 		return 0;
1529 	case Opt_pquota:
1530 	case Opt_prjquota:
1531 		parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD);
1532 		parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS;
1533 		return 0;
1534 	case Opt_pqnoenforce:
1535 		parsing_mp->m_qflags |= XFS_PQUOTA_ACCT;
1536 		parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD;
1537 		parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS;
1538 		return 0;
1539 	case Opt_gquota:
1540 	case Opt_grpquota:
1541 		parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD);
1542 		parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS;
1543 		return 0;
1544 	case Opt_gqnoenforce:
1545 		parsing_mp->m_qflags |= XFS_GQUOTA_ACCT;
1546 		parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD;
1547 		parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS;
1548 		return 0;
1549 	case Opt_discard:
1550 		parsing_mp->m_features |= XFS_FEAT_DISCARD;
1551 		return 0;
1552 	case Opt_nodiscard:
1553 		parsing_mp->m_features &= ~XFS_FEAT_DISCARD;
1554 		return 0;
1555 #ifdef CONFIG_FS_DAX
1556 	case Opt_dax:
1557 		xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS);
1558 		return 0;
1559 	case Opt_dax_enum:
1560 		xfs_mount_set_dax_mode(parsing_mp, result.uint_32);
1561 		return 0;
1562 #endif
1563 	case Opt_max_open_zones:
1564 		parsing_mp->m_max_open_zones = result.uint_32;
1565 		return 0;
1566 	case Opt_lifetime:
1567 		parsing_mp->m_features &= ~XFS_FEAT_NOLIFETIME;
1568 		return 0;
1569 	case Opt_nolifetime:
1570 		parsing_mp->m_features |= XFS_FEAT_NOLIFETIME;
1571 		return 0;
1572 	case Opt_max_atomic_write:
1573 		if (suffix_kstrtoull(param->string, 10,
1574 				     &parsing_mp->m_awu_max_bytes)) {
1575 			xfs_warn(parsing_mp,
1576  "max atomic write size must be positive integer");
1577 			return -EINVAL;
1578 		}
1579 		return 0;
1580 	default:
1581 		xfs_warn(parsing_mp, "unknown mount option [%s].", param->key);
1582 		return -EINVAL;
1583 	}
1584 
1585 	return 0;
1586 }
1587 
1588 static int
xfs_fs_validate_params(struct xfs_mount * mp)1589 xfs_fs_validate_params(
1590 	struct xfs_mount	*mp)
1591 {
1592 	/* No recovery flag requires a read-only mount */
1593 	if (xfs_has_norecovery(mp) && !xfs_is_readonly(mp)) {
1594 		xfs_warn(mp, "no-recovery mounts must be read-only.");
1595 		return -EINVAL;
1596 	}
1597 
1598 	if (xfs_has_noalign(mp) && (mp->m_dalign || mp->m_swidth)) {
1599 		xfs_warn(mp,
1600 	"sunit and swidth options incompatible with the noalign option");
1601 		return -EINVAL;
1602 	}
1603 
1604 	if (!IS_ENABLED(CONFIG_XFS_QUOTA) &&
1605 	    (mp->m_qflags & ~XFS_QFLAGS_MNTOPTS)) {
1606 		xfs_warn(mp, "quota support not available in this kernel.");
1607 		return -EINVAL;
1608 	}
1609 
1610 	if ((mp->m_dalign && !mp->m_swidth) ||
1611 	    (!mp->m_dalign && mp->m_swidth)) {
1612 		xfs_warn(mp, "sunit and swidth must be specified together");
1613 		return -EINVAL;
1614 	}
1615 
1616 	if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
1617 		xfs_warn(mp,
1618 	"stripe width (%d) must be a multiple of the stripe unit (%d)",
1619 			mp->m_swidth, mp->m_dalign);
1620 		return -EINVAL;
1621 	}
1622 
1623 	if (mp->m_logbufs != -1 &&
1624 	    mp->m_logbufs != 0 &&
1625 	    (mp->m_logbufs < XLOG_MIN_ICLOGS ||
1626 	     mp->m_logbufs > XLOG_MAX_ICLOGS)) {
1627 		xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
1628 			mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
1629 		return -EINVAL;
1630 	}
1631 
1632 	if (mp->m_logbsize != -1 &&
1633 	    mp->m_logbsize !=  0 &&
1634 	    (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
1635 	     mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
1636 	     !is_power_of_2(mp->m_logbsize))) {
1637 		xfs_warn(mp,
1638 			"invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
1639 			mp->m_logbsize);
1640 		return -EINVAL;
1641 	}
1642 
1643 	if (xfs_has_allocsize(mp) &&
1644 	    (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
1645 	     mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
1646 		xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
1647 			mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
1648 		return -EINVAL;
1649 	}
1650 
1651 	return 0;
1652 }
1653 
1654 struct dentry *
xfs_debugfs_mkdir(const char * name,struct dentry * parent)1655 xfs_debugfs_mkdir(
1656 	const char	*name,
1657 	struct dentry	*parent)
1658 {
1659 	struct dentry	*child;
1660 
1661 	/* Apparently we're expected to ignore error returns?? */
1662 	child = debugfs_create_dir(name, parent);
1663 	if (IS_ERR(child))
1664 		return NULL;
1665 
1666 	return child;
1667 }
1668 
1669 static int
xfs_fs_fill_super(struct super_block * sb,struct fs_context * fc)1670 xfs_fs_fill_super(
1671 	struct super_block	*sb,
1672 	struct fs_context	*fc)
1673 {
1674 	struct xfs_mount	*mp = sb->s_fs_info;
1675 	struct inode		*root;
1676 	int			flags = 0, error;
1677 
1678 	mp->m_super = sb;
1679 
1680 	/*
1681 	 * Copy VFS mount flags from the context now that all parameter parsing
1682 	 * is guaranteed to have been completed by either the old mount API or
1683 	 * the newer fsopen/fsconfig API.
1684 	 */
1685 	if (fc->sb_flags & SB_RDONLY)
1686 		xfs_set_readonly(mp);
1687 	if (fc->sb_flags & SB_DIRSYNC)
1688 		mp->m_features |= XFS_FEAT_DIRSYNC;
1689 	if (fc->sb_flags & SB_SYNCHRONOUS)
1690 		mp->m_features |= XFS_FEAT_WSYNC;
1691 
1692 	error = xfs_fs_validate_params(mp);
1693 	if (error)
1694 		return error;
1695 
1696 	sb_min_blocksize(sb, BBSIZE);
1697 	sb->s_xattr = xfs_xattr_handlers;
1698 	sb->s_export_op = &xfs_export_operations;
1699 #ifdef CONFIG_XFS_QUOTA
1700 	sb->s_qcop = &xfs_quotactl_operations;
1701 	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1702 #endif
1703 	sb->s_op = &xfs_super_operations;
1704 
1705 	/*
1706 	 * Delay mount work if the debug hook is set. This is debug
1707 	 * instrumention to coordinate simulation of xfs mount failures with
1708 	 * VFS superblock operations
1709 	 */
1710 	if (xfs_globals.mount_delay) {
1711 		xfs_notice(mp, "Delaying mount for %d seconds.",
1712 			xfs_globals.mount_delay);
1713 		msleep(xfs_globals.mount_delay * 1000);
1714 	}
1715 
1716 	if (fc->sb_flags & SB_SILENT)
1717 		flags |= XFS_MFSI_QUIET;
1718 
1719 	error = xfs_open_devices(mp);
1720 	if (error)
1721 		return error;
1722 
1723 	if (xfs_debugfs) {
1724 		mp->m_debugfs = xfs_debugfs_mkdir(mp->m_super->s_id,
1725 						  xfs_debugfs);
1726 	} else {
1727 		mp->m_debugfs = NULL;
1728 	}
1729 
1730 	error = xfs_init_mount_workqueues(mp);
1731 	if (error)
1732 		goto out_shutdown_devices;
1733 
1734 	error = xfs_init_percpu_counters(mp);
1735 	if (error)
1736 		goto out_destroy_workqueues;
1737 
1738 	error = xfs_inodegc_init_percpu(mp);
1739 	if (error)
1740 		goto out_destroy_counters;
1741 
1742 	/* Allocate stats memory before we do operations that might use it */
1743 	mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1744 	if (!mp->m_stats.xs_stats) {
1745 		error = -ENOMEM;
1746 		goto out_destroy_inodegc;
1747 	}
1748 
1749 	error = xchk_mount_stats_alloc(mp);
1750 	if (error)
1751 		goto out_free_stats;
1752 
1753 	error = xfs_readsb(mp, flags);
1754 	if (error)
1755 		goto out_free_scrub_stats;
1756 
1757 	error = xfs_finish_flags(mp);
1758 	if (error)
1759 		goto out_free_sb;
1760 
1761 	error = xfs_setup_devices(mp);
1762 	if (error)
1763 		goto out_free_sb;
1764 
1765 	/*
1766 	 * V4 support is undergoing deprecation.
1767 	 *
1768 	 * Note: this has to use an open coded m_features check as xfs_has_crc
1769 	 * always returns false for !CONFIG_XFS_SUPPORT_V4.
1770 	 */
1771 	if (!(mp->m_features & XFS_FEAT_CRC)) {
1772 		if (!IS_ENABLED(CONFIG_XFS_SUPPORT_V4)) {
1773 			xfs_warn(mp,
1774 	"Deprecated V4 format (crc=0) not supported by kernel.");
1775 			error = -EINVAL;
1776 			goto out_free_sb;
1777 		}
1778 		xfs_warn_once(mp,
1779 	"Deprecated V4 format (crc=0) will not be supported after September 2030.");
1780 	}
1781 
1782 	/* ASCII case insensitivity is undergoing deprecation. */
1783 	if (xfs_has_asciici(mp)) {
1784 #ifdef CONFIG_XFS_SUPPORT_ASCII_CI
1785 		xfs_warn_once(mp,
1786 	"Deprecated ASCII case-insensitivity feature (ascii-ci=1) will not be supported after September 2030.");
1787 #else
1788 		xfs_warn(mp,
1789 	"Deprecated ASCII case-insensitivity feature (ascii-ci=1) not supported by kernel.");
1790 		error = -EINVAL;
1791 		goto out_free_sb;
1792 #endif
1793 	}
1794 
1795 	/*
1796 	 * Filesystem claims it needs repair, so refuse the mount unless
1797 	 * norecovery is also specified, in which case the filesystem can
1798 	 * be mounted with no risk of further damage.
1799 	 */
1800 	if (xfs_has_needsrepair(mp) && !xfs_has_norecovery(mp)) {
1801 		xfs_warn(mp, "Filesystem needs repair.  Please run xfs_repair.");
1802 		error = -EFSCORRUPTED;
1803 		goto out_free_sb;
1804 	}
1805 
1806 	/*
1807 	 * Don't touch the filesystem if a user tool thinks it owns the primary
1808 	 * superblock.  mkfs doesn't clear the flag from secondary supers, so
1809 	 * we don't check them at all.
1810 	 */
1811 	if (mp->m_sb.sb_inprogress) {
1812 		xfs_warn(mp, "Offline file system operation in progress!");
1813 		error = -EFSCORRUPTED;
1814 		goto out_free_sb;
1815 	}
1816 
1817 	if (mp->m_sb.sb_blocksize > PAGE_SIZE) {
1818 		size_t max_folio_size = mapping_max_folio_size_supported();
1819 
1820 		if (!xfs_has_crc(mp)) {
1821 			xfs_warn(mp,
1822 "V4 Filesystem with blocksize %d bytes. Only pagesize (%ld) or less is supported.",
1823 				mp->m_sb.sb_blocksize, PAGE_SIZE);
1824 			error = -ENOSYS;
1825 			goto out_free_sb;
1826 		}
1827 
1828 		if (mp->m_sb.sb_blocksize > max_folio_size) {
1829 			xfs_warn(mp,
1830 "block size (%u bytes) not supported; Only block size (%zu) or less is supported",
1831 				mp->m_sb.sb_blocksize, max_folio_size);
1832 			error = -ENOSYS;
1833 			goto out_free_sb;
1834 		}
1835 
1836 		xfs_warn_experimental(mp, XFS_EXPERIMENTAL_LBS);
1837 	}
1838 
1839 	/* Ensure this filesystem fits in the page cache limits */
1840 	if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) ||
1841 	    xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) {
1842 		xfs_warn(mp,
1843 		"file system too large to be mounted on this system.");
1844 		error = -EFBIG;
1845 		goto out_free_sb;
1846 	}
1847 
1848 	/*
1849 	 * XFS block mappings use 54 bits to store the logical block offset.
1850 	 * This should suffice to handle the maximum file size that the VFS
1851 	 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
1852 	 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
1853 	 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
1854 	 * to check this assertion.
1855 	 *
1856 	 * Avoid integer overflow by comparing the maximum bmbt offset to the
1857 	 * maximum pagecache offset in units of fs blocks.
1858 	 */
1859 	if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) {
1860 		xfs_warn(mp,
1861 "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
1862 			 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
1863 			 XFS_MAX_FILEOFF);
1864 		error = -EINVAL;
1865 		goto out_free_sb;
1866 	}
1867 
1868 	error = xfs_rtmount_readsb(mp);
1869 	if (error)
1870 		goto out_free_sb;
1871 
1872 	error = xfs_filestream_mount(mp);
1873 	if (error)
1874 		goto out_free_rtsb;
1875 
1876 	/*
1877 	 * we must configure the block size in the superblock before we run the
1878 	 * full mount process as the mount process can lookup and cache inodes.
1879 	 */
1880 	sb->s_magic = XFS_SUPER_MAGIC;
1881 	sb->s_blocksize = mp->m_sb.sb_blocksize;
1882 	sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1883 	sb->s_maxbytes = MAX_LFS_FILESIZE;
1884 	sb->s_max_links = XFS_MAXLINK;
1885 	sb->s_time_gran = 1;
1886 	if (xfs_has_bigtime(mp)) {
1887 		sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN);
1888 		sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX);
1889 	} else {
1890 		sb->s_time_min = XFS_LEGACY_TIME_MIN;
1891 		sb->s_time_max = XFS_LEGACY_TIME_MAX;
1892 	}
1893 	trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max);
1894 	sb->s_iflags |= SB_I_CGROUPWB | SB_I_ALLOW_HSM;
1895 
1896 	set_posix_acl_flag(sb);
1897 
1898 	/* version 5 superblocks support inode version counters. */
1899 	if (xfs_has_crc(mp))
1900 		sb->s_flags |= SB_I_VERSION;
1901 
1902 	if (xfs_has_dax_always(mp)) {
1903 		error = xfs_setup_dax_always(mp);
1904 		if (error)
1905 			goto out_filestream_unmount;
1906 	}
1907 
1908 	if (xfs_has_discard(mp) && !bdev_max_discard_sectors(sb->s_bdev)) {
1909 		xfs_warn(mp,
1910 	"mounting with \"discard\" option, but the device does not support discard");
1911 		mp->m_features &= ~XFS_FEAT_DISCARD;
1912 	}
1913 
1914 	if (xfs_has_zoned(mp)) {
1915 		if (!xfs_has_metadir(mp)) {
1916 			xfs_alert(mp,
1917 		"metadir feature required for zoned realtime devices.");
1918 			error = -EINVAL;
1919 			goto out_filestream_unmount;
1920 		}
1921 		xfs_warn_experimental(mp, XFS_EXPERIMENTAL_ZONED);
1922 	} else if (xfs_has_metadir(mp)) {
1923 		xfs_warn_experimental(mp, XFS_EXPERIMENTAL_METADIR);
1924 	}
1925 
1926 	if (xfs_has_reflink(mp)) {
1927 		if (xfs_has_realtime(mp) &&
1928 		    !xfs_reflink_supports_rextsize(mp, mp->m_sb.sb_rextsize)) {
1929 			xfs_alert(mp,
1930 	"reflink not compatible with realtime extent size %u!",
1931 					mp->m_sb.sb_rextsize);
1932 			error = -EINVAL;
1933 			goto out_filestream_unmount;
1934 		}
1935 
1936 		if (xfs_has_zoned(mp)) {
1937 			xfs_alert(mp,
1938 	"reflink not compatible with zoned RT device!");
1939 			error = -EINVAL;
1940 			goto out_filestream_unmount;
1941 		}
1942 
1943 		if (xfs_globals.always_cow) {
1944 			xfs_info(mp, "using DEBUG-only always_cow mode.");
1945 			mp->m_always_cow = true;
1946 		}
1947 	}
1948 
1949 	/*
1950 	 * If no quota mount options were provided, maybe we'll try to pick
1951 	 * up the quota accounting and enforcement flags from the ondisk sb.
1952 	 */
1953 	if (!(mp->m_qflags & XFS_QFLAGS_MNTOPTS))
1954 		xfs_set_resuming_quotaon(mp);
1955 	mp->m_qflags &= ~XFS_QFLAGS_MNTOPTS;
1956 
1957 	error = xfs_mountfs(mp);
1958 	if (error)
1959 		goto out_filestream_unmount;
1960 
1961 	root = igrab(VFS_I(mp->m_rootip));
1962 	if (!root) {
1963 		error = -ENOENT;
1964 		goto out_unmount;
1965 	}
1966 	sb->s_root = d_make_root(root);
1967 	if (!sb->s_root) {
1968 		error = -ENOMEM;
1969 		goto out_unmount;
1970 	}
1971 
1972 	return 0;
1973 
1974  out_filestream_unmount:
1975 	xfs_filestream_unmount(mp);
1976  out_free_rtsb:
1977 	xfs_rtmount_freesb(mp);
1978  out_free_sb:
1979 	xfs_freesb(mp);
1980  out_free_scrub_stats:
1981 	xchk_mount_stats_free(mp);
1982  out_free_stats:
1983 	free_percpu(mp->m_stats.xs_stats);
1984  out_destroy_inodegc:
1985 	xfs_inodegc_free_percpu(mp);
1986  out_destroy_counters:
1987 	xfs_destroy_percpu_counters(mp);
1988  out_destroy_workqueues:
1989 	xfs_destroy_mount_workqueues(mp);
1990  out_shutdown_devices:
1991 	xfs_shutdown_devices(mp);
1992 	return error;
1993 
1994  out_unmount:
1995 	xfs_filestream_unmount(mp);
1996 	xfs_unmountfs(mp);
1997 	goto out_free_rtsb;
1998 }
1999 
2000 static int
xfs_fs_get_tree(struct fs_context * fc)2001 xfs_fs_get_tree(
2002 	struct fs_context	*fc)
2003 {
2004 	return get_tree_bdev(fc, xfs_fs_fill_super);
2005 }
2006 
2007 static int
xfs_remount_rw(struct xfs_mount * mp)2008 xfs_remount_rw(
2009 	struct xfs_mount	*mp)
2010 {
2011 	struct xfs_sb		*sbp = &mp->m_sb;
2012 	int error;
2013 
2014 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp &&
2015 	    xfs_readonly_buftarg(mp->m_logdev_targp)) {
2016 		xfs_warn(mp,
2017 			"ro->rw transition prohibited by read-only logdev");
2018 		return -EACCES;
2019 	}
2020 
2021 	if (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp)) {
2022 		xfs_warn(mp,
2023 			"ro->rw transition prohibited by read-only rtdev");
2024 		return -EACCES;
2025 	}
2026 
2027 	if (xfs_has_norecovery(mp)) {
2028 		xfs_warn(mp,
2029 			"ro->rw transition prohibited on norecovery mount");
2030 		return -EINVAL;
2031 	}
2032 
2033 	if (xfs_sb_is_v5(sbp) &&
2034 	    xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
2035 		xfs_warn(mp,
2036 	"ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
2037 			(sbp->sb_features_ro_compat &
2038 				XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
2039 		return -EINVAL;
2040 	}
2041 
2042 	xfs_clear_readonly(mp);
2043 
2044 	/*
2045 	 * If this is the first remount to writeable state we might have some
2046 	 * superblock changes to update.
2047 	 */
2048 	if (mp->m_update_sb) {
2049 		error = xfs_sync_sb(mp, false);
2050 		if (error) {
2051 			xfs_warn(mp, "failed to write sb changes");
2052 			return error;
2053 		}
2054 		mp->m_update_sb = false;
2055 	}
2056 
2057 	/*
2058 	 * Fill out the reserve pool if it is empty. Use the stashed value if
2059 	 * it is non-zero, otherwise go with the default.
2060 	 */
2061 	xfs_restore_resvblks(mp);
2062 	xfs_log_work_queue(mp);
2063 	xfs_blockgc_start(mp);
2064 
2065 	/* Create the per-AG metadata reservation pool .*/
2066 	error = xfs_fs_reserve_ag_blocks(mp);
2067 	if (error && error != -ENOSPC)
2068 		return error;
2069 
2070 	/* Re-enable the background inode inactivation worker. */
2071 	xfs_inodegc_start(mp);
2072 
2073 	/* Restart zone reclaim */
2074 	xfs_zone_gc_start(mp);
2075 
2076 	return 0;
2077 }
2078 
2079 static int
xfs_remount_ro(struct xfs_mount * mp)2080 xfs_remount_ro(
2081 	struct xfs_mount	*mp)
2082 {
2083 	struct xfs_icwalk	icw = {
2084 		.icw_flags	= XFS_ICWALK_FLAG_SYNC,
2085 	};
2086 	int			error;
2087 
2088 	/* Flush all the dirty data to disk. */
2089 	error = sync_filesystem(mp->m_super);
2090 	if (error)
2091 		return error;
2092 
2093 	/*
2094 	 * Cancel background eofb scanning so it cannot race with the final
2095 	 * log force+buftarg wait and deadlock the remount.
2096 	 */
2097 	xfs_blockgc_stop(mp);
2098 
2099 	/*
2100 	 * Clear out all remaining COW staging extents and speculative post-EOF
2101 	 * preallocations so that we don't leave inodes requiring inactivation
2102 	 * cleanups during reclaim on a read-only mount.  We must process every
2103 	 * cached inode, so this requires a synchronous cache scan.
2104 	 */
2105 	error = xfs_blockgc_free_space(mp, &icw);
2106 	if (error) {
2107 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
2108 		return error;
2109 	}
2110 
2111 	/*
2112 	 * Stop the inodegc background worker.  xfs_fs_reconfigure already
2113 	 * flushed all pending inodegc work when it sync'd the filesystem.
2114 	 * The VFS holds s_umount, so we know that inodes cannot enter
2115 	 * xfs_fs_destroy_inode during a remount operation.  In readonly mode
2116 	 * we send inodes straight to reclaim, so no inodes will be queued.
2117 	 */
2118 	xfs_inodegc_stop(mp);
2119 
2120 	/* Stop zone reclaim */
2121 	xfs_zone_gc_stop(mp);
2122 
2123 	/* Free the per-AG metadata reservation pool. */
2124 	xfs_fs_unreserve_ag_blocks(mp);
2125 
2126 	/*
2127 	 * Before we sync the metadata, we need to free up the reserve block
2128 	 * pool so that the used block count in the superblock on disk is
2129 	 * correct at the end of the remount. Stash the current* reserve pool
2130 	 * size so that if we get remounted rw, we can return it to the same
2131 	 * size.
2132 	 */
2133 	xfs_save_resvblks(mp);
2134 
2135 	xfs_log_clean(mp);
2136 	xfs_set_readonly(mp);
2137 
2138 	return 0;
2139 }
2140 
2141 /*
2142  * Logically we would return an error here to prevent users from believing
2143  * they might have changed mount options using remount which can't be changed.
2144  *
2145  * But unfortunately mount(8) adds all options from mtab and fstab to the mount
2146  * arguments in some cases so we can't blindly reject options, but have to
2147  * check for each specified option if it actually differs from the currently
2148  * set option and only reject it if that's the case.
2149  *
2150  * Until that is implemented we return success for every remount request, and
2151  * silently ignore all options that we can't actually change.
2152  */
2153 static int
xfs_fs_reconfigure(struct fs_context * fc)2154 xfs_fs_reconfigure(
2155 	struct fs_context *fc)
2156 {
2157 	struct xfs_mount	*mp = XFS_M(fc->root->d_sb);
2158 	struct xfs_mount        *new_mp = fc->s_fs_info;
2159 	int			flags = fc->sb_flags;
2160 	int			error;
2161 
2162 	new_mp->m_qflags &= ~XFS_QFLAGS_MNTOPTS;
2163 
2164 	/* version 5 superblocks always support version counters. */
2165 	if (xfs_has_crc(mp))
2166 		fc->sb_flags |= SB_I_VERSION;
2167 
2168 	error = xfs_fs_validate_params(new_mp);
2169 	if (error)
2170 		return error;
2171 
2172 	/* Validate new max_atomic_write option before making other changes */
2173 	if (mp->m_awu_max_bytes != new_mp->m_awu_max_bytes) {
2174 		error = xfs_set_max_atomic_write_opt(mp,
2175 				new_mp->m_awu_max_bytes);
2176 		if (error)
2177 			return error;
2178 	}
2179 
2180 	/* inode32 -> inode64 */
2181 	if (xfs_has_small_inums(mp) && !xfs_has_small_inums(new_mp)) {
2182 		mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
2183 		mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
2184 	}
2185 
2186 	/* inode64 -> inode32 */
2187 	if (!xfs_has_small_inums(mp) && xfs_has_small_inums(new_mp)) {
2188 		mp->m_features |= XFS_FEAT_SMALL_INUMS;
2189 		mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
2190 	}
2191 
2192 	/*
2193 	 * Now that mp has been modified according to the remount options, we
2194 	 * do a final option validation with xfs_finish_flags() just like it is
2195 	 * just like it is done during mount. We cannot use
2196 	 * done during mount. We cannot use xfs_finish_flags() on new_mp as it
2197 	 * contains only the user given options.
2198 	 */
2199 	error = xfs_finish_flags(mp);
2200 	if (error)
2201 		return error;
2202 
2203 	/* ro -> rw */
2204 	if (xfs_is_readonly(mp) && !(flags & SB_RDONLY)) {
2205 		error = xfs_remount_rw(mp);
2206 		if (error)
2207 			return error;
2208 	}
2209 
2210 	/* rw -> ro */
2211 	if (!xfs_is_readonly(mp) && (flags & SB_RDONLY)) {
2212 		error = xfs_remount_ro(mp);
2213 		if (error)
2214 			return error;
2215 	}
2216 
2217 	return 0;
2218 }
2219 
2220 static void
xfs_fs_free(struct fs_context * fc)2221 xfs_fs_free(
2222 	struct fs_context	*fc)
2223 {
2224 	struct xfs_mount	*mp = fc->s_fs_info;
2225 
2226 	/*
2227 	 * mp is stored in the fs_context when it is initialized.
2228 	 * mp is transferred to the superblock on a successful mount,
2229 	 * but if an error occurs before the transfer we have to free
2230 	 * it here.
2231 	 */
2232 	if (mp)
2233 		xfs_mount_free(mp);
2234 }
2235 
2236 static const struct fs_context_operations xfs_context_ops = {
2237 	.parse_param = xfs_fs_parse_param,
2238 	.get_tree    = xfs_fs_get_tree,
2239 	.reconfigure = xfs_fs_reconfigure,
2240 	.free        = xfs_fs_free,
2241 };
2242 
2243 /*
2244  * WARNING: do not initialise any parameters in this function that depend on
2245  * mount option parsing having already been performed as this can be called from
2246  * fsopen() before any parameters have been set.
2247  */
2248 static int
xfs_init_fs_context(struct fs_context * fc)2249 xfs_init_fs_context(
2250 	struct fs_context	*fc)
2251 {
2252 	struct xfs_mount	*mp;
2253 	int			i;
2254 
2255 	mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL);
2256 	if (!mp)
2257 		return -ENOMEM;
2258 
2259 	spin_lock_init(&mp->m_sb_lock);
2260 	for (i = 0; i < XG_TYPE_MAX; i++)
2261 		xa_init(&mp->m_groups[i].xa);
2262 	mutex_init(&mp->m_growlock);
2263 	mutex_init(&mp->m_metafile_resv_lock);
2264 	INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
2265 	INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
2266 	mp->m_kobj.kobject.kset = xfs_kset;
2267 	/*
2268 	 * We don't create the finobt per-ag space reservation until after log
2269 	 * recovery, so we must set this to true so that an ifree transaction
2270 	 * started during log recovery will not depend on space reservations
2271 	 * for finobt expansion.
2272 	 */
2273 	mp->m_finobt_nores = true;
2274 
2275 	/*
2276 	 * These can be overridden by the mount option parsing.
2277 	 */
2278 	mp->m_logbufs = -1;
2279 	mp->m_logbsize = -1;
2280 	mp->m_allocsize_log = 16; /* 64k */
2281 
2282 	xfs_hooks_init(&mp->m_dir_update_hooks);
2283 
2284 	fc->s_fs_info = mp;
2285 	fc->ops = &xfs_context_ops;
2286 
2287 	return 0;
2288 }
2289 
2290 static void
xfs_kill_sb(struct super_block * sb)2291 xfs_kill_sb(
2292 	struct super_block		*sb)
2293 {
2294 	kill_block_super(sb);
2295 	xfs_mount_free(XFS_M(sb));
2296 }
2297 
2298 static struct file_system_type xfs_fs_type = {
2299 	.owner			= THIS_MODULE,
2300 	.name			= "xfs",
2301 	.init_fs_context	= xfs_init_fs_context,
2302 	.parameters		= xfs_fs_parameters,
2303 	.kill_sb		= xfs_kill_sb,
2304 	.fs_flags		= FS_REQUIRES_DEV | FS_ALLOW_IDMAP | FS_MGTIME |
2305 				  FS_LBS,
2306 };
2307 MODULE_ALIAS_FS("xfs");
2308 
2309 STATIC int __init
xfs_init_caches(void)2310 xfs_init_caches(void)
2311 {
2312 	int		error;
2313 
2314 	xfs_buf_cache = kmem_cache_create("xfs_buf", sizeof(struct xfs_buf), 0,
2315 					 SLAB_HWCACHE_ALIGN |
2316 					 SLAB_RECLAIM_ACCOUNT,
2317 					 NULL);
2318 	if (!xfs_buf_cache)
2319 		goto out;
2320 
2321 	xfs_log_ticket_cache = kmem_cache_create("xfs_log_ticket",
2322 						sizeof(struct xlog_ticket),
2323 						0, 0, NULL);
2324 	if (!xfs_log_ticket_cache)
2325 		goto out_destroy_buf_cache;
2326 
2327 	error = xfs_btree_init_cur_caches();
2328 	if (error)
2329 		goto out_destroy_log_ticket_cache;
2330 
2331 	error = rcbagbt_init_cur_cache();
2332 	if (error)
2333 		goto out_destroy_btree_cur_cache;
2334 
2335 	error = xfs_defer_init_item_caches();
2336 	if (error)
2337 		goto out_destroy_rcbagbt_cur_cache;
2338 
2339 	xfs_da_state_cache = kmem_cache_create("xfs_da_state",
2340 					      sizeof(struct xfs_da_state),
2341 					      0, 0, NULL);
2342 	if (!xfs_da_state_cache)
2343 		goto out_destroy_defer_item_cache;
2344 
2345 	xfs_ifork_cache = kmem_cache_create("xfs_ifork",
2346 					   sizeof(struct xfs_ifork),
2347 					   0, 0, NULL);
2348 	if (!xfs_ifork_cache)
2349 		goto out_destroy_da_state_cache;
2350 
2351 	xfs_trans_cache = kmem_cache_create("xfs_trans",
2352 					   sizeof(struct xfs_trans),
2353 					   0, 0, NULL);
2354 	if (!xfs_trans_cache)
2355 		goto out_destroy_ifork_cache;
2356 
2357 
2358 	/*
2359 	 * The size of the cache-allocated buf log item is the maximum
2360 	 * size possible under XFS.  This wastes a little bit of memory,
2361 	 * but it is much faster.
2362 	 */
2363 	xfs_buf_item_cache = kmem_cache_create("xfs_buf_item",
2364 					      sizeof(struct xfs_buf_log_item),
2365 					      0, 0, NULL);
2366 	if (!xfs_buf_item_cache)
2367 		goto out_destroy_trans_cache;
2368 
2369 	xfs_efd_cache = kmem_cache_create("xfs_efd_item",
2370 			xfs_efd_log_item_sizeof(XFS_EFD_MAX_FAST_EXTENTS),
2371 			0, 0, NULL);
2372 	if (!xfs_efd_cache)
2373 		goto out_destroy_buf_item_cache;
2374 
2375 	xfs_efi_cache = kmem_cache_create("xfs_efi_item",
2376 			xfs_efi_log_item_sizeof(XFS_EFI_MAX_FAST_EXTENTS),
2377 			0, 0, NULL);
2378 	if (!xfs_efi_cache)
2379 		goto out_destroy_efd_cache;
2380 
2381 	xfs_inode_cache = kmem_cache_create("xfs_inode",
2382 					   sizeof(struct xfs_inode), 0,
2383 					   (SLAB_HWCACHE_ALIGN |
2384 					    SLAB_RECLAIM_ACCOUNT |
2385 					    SLAB_ACCOUNT),
2386 					   xfs_fs_inode_init_once);
2387 	if (!xfs_inode_cache)
2388 		goto out_destroy_efi_cache;
2389 
2390 	xfs_ili_cache = kmem_cache_create("xfs_ili",
2391 					 sizeof(struct xfs_inode_log_item), 0,
2392 					 SLAB_RECLAIM_ACCOUNT,
2393 					 NULL);
2394 	if (!xfs_ili_cache)
2395 		goto out_destroy_inode_cache;
2396 
2397 	xfs_icreate_cache = kmem_cache_create("xfs_icr",
2398 					     sizeof(struct xfs_icreate_item),
2399 					     0, 0, NULL);
2400 	if (!xfs_icreate_cache)
2401 		goto out_destroy_ili_cache;
2402 
2403 	xfs_rud_cache = kmem_cache_create("xfs_rud_item",
2404 					 sizeof(struct xfs_rud_log_item),
2405 					 0, 0, NULL);
2406 	if (!xfs_rud_cache)
2407 		goto out_destroy_icreate_cache;
2408 
2409 	xfs_rui_cache = kmem_cache_create("xfs_rui_item",
2410 			xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
2411 			0, 0, NULL);
2412 	if (!xfs_rui_cache)
2413 		goto out_destroy_rud_cache;
2414 
2415 	xfs_cud_cache = kmem_cache_create("xfs_cud_item",
2416 					 sizeof(struct xfs_cud_log_item),
2417 					 0, 0, NULL);
2418 	if (!xfs_cud_cache)
2419 		goto out_destroy_rui_cache;
2420 
2421 	xfs_cui_cache = kmem_cache_create("xfs_cui_item",
2422 			xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
2423 			0, 0, NULL);
2424 	if (!xfs_cui_cache)
2425 		goto out_destroy_cud_cache;
2426 
2427 	xfs_bud_cache = kmem_cache_create("xfs_bud_item",
2428 					 sizeof(struct xfs_bud_log_item),
2429 					 0, 0, NULL);
2430 	if (!xfs_bud_cache)
2431 		goto out_destroy_cui_cache;
2432 
2433 	xfs_bui_cache = kmem_cache_create("xfs_bui_item",
2434 			xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
2435 			0, 0, NULL);
2436 	if (!xfs_bui_cache)
2437 		goto out_destroy_bud_cache;
2438 
2439 	xfs_attrd_cache = kmem_cache_create("xfs_attrd_item",
2440 					    sizeof(struct xfs_attrd_log_item),
2441 					    0, 0, NULL);
2442 	if (!xfs_attrd_cache)
2443 		goto out_destroy_bui_cache;
2444 
2445 	xfs_attri_cache = kmem_cache_create("xfs_attri_item",
2446 					    sizeof(struct xfs_attri_log_item),
2447 					    0, 0, NULL);
2448 	if (!xfs_attri_cache)
2449 		goto out_destroy_attrd_cache;
2450 
2451 	xfs_iunlink_cache = kmem_cache_create("xfs_iul_item",
2452 					     sizeof(struct xfs_iunlink_item),
2453 					     0, 0, NULL);
2454 	if (!xfs_iunlink_cache)
2455 		goto out_destroy_attri_cache;
2456 
2457 	xfs_xmd_cache = kmem_cache_create("xfs_xmd_item",
2458 					 sizeof(struct xfs_xmd_log_item),
2459 					 0, 0, NULL);
2460 	if (!xfs_xmd_cache)
2461 		goto out_destroy_iul_cache;
2462 
2463 	xfs_xmi_cache = kmem_cache_create("xfs_xmi_item",
2464 					 sizeof(struct xfs_xmi_log_item),
2465 					 0, 0, NULL);
2466 	if (!xfs_xmi_cache)
2467 		goto out_destroy_xmd_cache;
2468 
2469 	xfs_parent_args_cache = kmem_cache_create("xfs_parent_args",
2470 					     sizeof(struct xfs_parent_args),
2471 					     0, 0, NULL);
2472 	if (!xfs_parent_args_cache)
2473 		goto out_destroy_xmi_cache;
2474 
2475 	return 0;
2476 
2477  out_destroy_xmi_cache:
2478 	kmem_cache_destroy(xfs_xmi_cache);
2479  out_destroy_xmd_cache:
2480 	kmem_cache_destroy(xfs_xmd_cache);
2481  out_destroy_iul_cache:
2482 	kmem_cache_destroy(xfs_iunlink_cache);
2483  out_destroy_attri_cache:
2484 	kmem_cache_destroy(xfs_attri_cache);
2485  out_destroy_attrd_cache:
2486 	kmem_cache_destroy(xfs_attrd_cache);
2487  out_destroy_bui_cache:
2488 	kmem_cache_destroy(xfs_bui_cache);
2489  out_destroy_bud_cache:
2490 	kmem_cache_destroy(xfs_bud_cache);
2491  out_destroy_cui_cache:
2492 	kmem_cache_destroy(xfs_cui_cache);
2493  out_destroy_cud_cache:
2494 	kmem_cache_destroy(xfs_cud_cache);
2495  out_destroy_rui_cache:
2496 	kmem_cache_destroy(xfs_rui_cache);
2497  out_destroy_rud_cache:
2498 	kmem_cache_destroy(xfs_rud_cache);
2499  out_destroy_icreate_cache:
2500 	kmem_cache_destroy(xfs_icreate_cache);
2501  out_destroy_ili_cache:
2502 	kmem_cache_destroy(xfs_ili_cache);
2503  out_destroy_inode_cache:
2504 	kmem_cache_destroy(xfs_inode_cache);
2505  out_destroy_efi_cache:
2506 	kmem_cache_destroy(xfs_efi_cache);
2507  out_destroy_efd_cache:
2508 	kmem_cache_destroy(xfs_efd_cache);
2509  out_destroy_buf_item_cache:
2510 	kmem_cache_destroy(xfs_buf_item_cache);
2511  out_destroy_trans_cache:
2512 	kmem_cache_destroy(xfs_trans_cache);
2513  out_destroy_ifork_cache:
2514 	kmem_cache_destroy(xfs_ifork_cache);
2515  out_destroy_da_state_cache:
2516 	kmem_cache_destroy(xfs_da_state_cache);
2517  out_destroy_defer_item_cache:
2518 	xfs_defer_destroy_item_caches();
2519  out_destroy_rcbagbt_cur_cache:
2520 	rcbagbt_destroy_cur_cache();
2521  out_destroy_btree_cur_cache:
2522 	xfs_btree_destroy_cur_caches();
2523  out_destroy_log_ticket_cache:
2524 	kmem_cache_destroy(xfs_log_ticket_cache);
2525  out_destroy_buf_cache:
2526 	kmem_cache_destroy(xfs_buf_cache);
2527  out:
2528 	return -ENOMEM;
2529 }
2530 
2531 STATIC void
xfs_destroy_caches(void)2532 xfs_destroy_caches(void)
2533 {
2534 	/*
2535 	 * Make sure all delayed rcu free are flushed before we
2536 	 * destroy caches.
2537 	 */
2538 	rcu_barrier();
2539 	kmem_cache_destroy(xfs_parent_args_cache);
2540 	kmem_cache_destroy(xfs_xmd_cache);
2541 	kmem_cache_destroy(xfs_xmi_cache);
2542 	kmem_cache_destroy(xfs_iunlink_cache);
2543 	kmem_cache_destroy(xfs_attri_cache);
2544 	kmem_cache_destroy(xfs_attrd_cache);
2545 	kmem_cache_destroy(xfs_bui_cache);
2546 	kmem_cache_destroy(xfs_bud_cache);
2547 	kmem_cache_destroy(xfs_cui_cache);
2548 	kmem_cache_destroy(xfs_cud_cache);
2549 	kmem_cache_destroy(xfs_rui_cache);
2550 	kmem_cache_destroy(xfs_rud_cache);
2551 	kmem_cache_destroy(xfs_icreate_cache);
2552 	kmem_cache_destroy(xfs_ili_cache);
2553 	kmem_cache_destroy(xfs_inode_cache);
2554 	kmem_cache_destroy(xfs_efi_cache);
2555 	kmem_cache_destroy(xfs_efd_cache);
2556 	kmem_cache_destroy(xfs_buf_item_cache);
2557 	kmem_cache_destroy(xfs_trans_cache);
2558 	kmem_cache_destroy(xfs_ifork_cache);
2559 	kmem_cache_destroy(xfs_da_state_cache);
2560 	xfs_defer_destroy_item_caches();
2561 	rcbagbt_destroy_cur_cache();
2562 	xfs_btree_destroy_cur_caches();
2563 	kmem_cache_destroy(xfs_log_ticket_cache);
2564 	kmem_cache_destroy(xfs_buf_cache);
2565 }
2566 
2567 STATIC int __init
xfs_init_workqueues(void)2568 xfs_init_workqueues(void)
2569 {
2570 	/*
2571 	 * The allocation workqueue can be used in memory reclaim situations
2572 	 * (writepage path), and parallelism is only limited by the number of
2573 	 * AGs in all the filesystems mounted. Hence use the default large
2574 	 * max_active value for this workqueue.
2575 	 */
2576 	xfs_alloc_wq = alloc_workqueue("xfsalloc", XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_PERCPU),
2577 			0);
2578 	if (!xfs_alloc_wq)
2579 		return -ENOMEM;
2580 
2581 	xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND),
2582 			0);
2583 	if (!xfs_discard_wq)
2584 		goto out_free_alloc_wq;
2585 
2586 	return 0;
2587 out_free_alloc_wq:
2588 	destroy_workqueue(xfs_alloc_wq);
2589 	return -ENOMEM;
2590 }
2591 
2592 STATIC void
xfs_destroy_workqueues(void)2593 xfs_destroy_workqueues(void)
2594 {
2595 	destroy_workqueue(xfs_discard_wq);
2596 	destroy_workqueue(xfs_alloc_wq);
2597 }
2598 
2599 STATIC int __init
init_xfs_fs(void)2600 init_xfs_fs(void)
2601 {
2602 	int			error;
2603 
2604 	xfs_check_ondisk_structs();
2605 
2606 	error = xfs_dahash_test();
2607 	if (error)
2608 		return error;
2609 
2610 	printk(KERN_INFO XFS_VERSION_STRING " with "
2611 			 XFS_BUILD_OPTIONS " enabled\n");
2612 
2613 	xfs_dir_startup();
2614 
2615 	error = xfs_init_caches();
2616 	if (error)
2617 		goto out;
2618 
2619 	error = xfs_init_workqueues();
2620 	if (error)
2621 		goto out_destroy_caches;
2622 
2623 	error = xfs_mru_cache_init();
2624 	if (error)
2625 		goto out_destroy_wq;
2626 
2627 	error = xfs_init_procfs();
2628 	if (error)
2629 		goto out_mru_cache_uninit;
2630 
2631 	error = xfs_sysctl_register();
2632 	if (error)
2633 		goto out_cleanup_procfs;
2634 
2635 	xfs_debugfs = xfs_debugfs_mkdir("xfs", NULL);
2636 
2637 	xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2638 	if (!xfs_kset) {
2639 		error = -ENOMEM;
2640 		goto out_debugfs_unregister;
2641 	}
2642 
2643 	xfsstats.xs_kobj.kobject.kset = xfs_kset;
2644 
2645 	xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2646 	if (!xfsstats.xs_stats) {
2647 		error = -ENOMEM;
2648 		goto out_kset_unregister;
2649 	}
2650 
2651 	error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2652 			       "stats");
2653 	if (error)
2654 		goto out_free_stats;
2655 
2656 	error = xchk_global_stats_setup(xfs_debugfs);
2657 	if (error)
2658 		goto out_remove_stats_kobj;
2659 
2660 #ifdef DEBUG
2661 	xfs_dbg_kobj.kobject.kset = xfs_kset;
2662 	error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2663 	if (error)
2664 		goto out_remove_scrub_stats;
2665 #endif
2666 
2667 	error = xfs_qm_init();
2668 	if (error)
2669 		goto out_remove_dbg_kobj;
2670 
2671 	error = register_filesystem(&xfs_fs_type);
2672 	if (error)
2673 		goto out_qm_exit;
2674 	return 0;
2675 
2676  out_qm_exit:
2677 	xfs_qm_exit();
2678  out_remove_dbg_kobj:
2679 #ifdef DEBUG
2680 	xfs_sysfs_del(&xfs_dbg_kobj);
2681  out_remove_scrub_stats:
2682 #endif
2683 	xchk_global_stats_teardown();
2684  out_remove_stats_kobj:
2685 	xfs_sysfs_del(&xfsstats.xs_kobj);
2686  out_free_stats:
2687 	free_percpu(xfsstats.xs_stats);
2688  out_kset_unregister:
2689 	kset_unregister(xfs_kset);
2690  out_debugfs_unregister:
2691 	debugfs_remove(xfs_debugfs);
2692 	xfs_sysctl_unregister();
2693  out_cleanup_procfs:
2694 	xfs_cleanup_procfs();
2695  out_mru_cache_uninit:
2696 	xfs_mru_cache_uninit();
2697  out_destroy_wq:
2698 	xfs_destroy_workqueues();
2699  out_destroy_caches:
2700 	xfs_destroy_caches();
2701  out:
2702 	return error;
2703 }
2704 
2705 STATIC void __exit
exit_xfs_fs(void)2706 exit_xfs_fs(void)
2707 {
2708 	xfs_qm_exit();
2709 	unregister_filesystem(&xfs_fs_type);
2710 #ifdef DEBUG
2711 	xfs_sysfs_del(&xfs_dbg_kobj);
2712 #endif
2713 	xchk_global_stats_teardown();
2714 	xfs_sysfs_del(&xfsstats.xs_kobj);
2715 	free_percpu(xfsstats.xs_stats);
2716 	kset_unregister(xfs_kset);
2717 	debugfs_remove(xfs_debugfs);
2718 	xfs_sysctl_unregister();
2719 	xfs_cleanup_procfs();
2720 	xfs_mru_cache_uninit();
2721 	xfs_destroy_workqueues();
2722 	xfs_destroy_caches();
2723 	xfs_uuid_table_free();
2724 }
2725 
2726 module_init(init_xfs_fs);
2727 module_exit(exit_xfs_fs);
2728 
2729 MODULE_AUTHOR("Silicon Graphics, Inc.");
2730 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2731 MODULE_LICENSE("GPL");
2732