xref: /linux/fs/xfs/xfs_super.c (revision 22d55f02b8922a097cd4be1e2f131dfa7ef65901)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 
7 #include "xfs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sb.h"
13 #include "xfs_mount.h"
14 #include "xfs_da_format.h"
15 #include "xfs_inode.h"
16 #include "xfs_btree.h"
17 #include "xfs_bmap.h"
18 #include "xfs_alloc.h"
19 #include "xfs_error.h"
20 #include "xfs_fsops.h"
21 #include "xfs_trans.h"
22 #include "xfs_buf_item.h"
23 #include "xfs_log.h"
24 #include "xfs_log_priv.h"
25 #include "xfs_da_btree.h"
26 #include "xfs_dir2.h"
27 #include "xfs_extfree_item.h"
28 #include "xfs_mru_cache.h"
29 #include "xfs_inode_item.h"
30 #include "xfs_icache.h"
31 #include "xfs_trace.h"
32 #include "xfs_icreate_item.h"
33 #include "xfs_filestream.h"
34 #include "xfs_quota.h"
35 #include "xfs_sysfs.h"
36 #include "xfs_ondisk.h"
37 #include "xfs_rmap_item.h"
38 #include "xfs_refcount_item.h"
39 #include "xfs_bmap_item.h"
40 #include "xfs_reflink.h"
41 #include "xfs_defer.h"
42 
43 #include <linux/namei.h>
44 #include <linux/dax.h>
45 #include <linux/init.h>
46 #include <linux/slab.h>
47 #include <linux/magic.h>
48 #include <linux/mount.h>
49 #include <linux/mempool.h>
50 #include <linux/writeback.h>
51 #include <linux/kthread.h>
52 #include <linux/freezer.h>
53 #include <linux/parser.h>
54 
55 static const struct super_operations xfs_super_operations;
56 struct bio_set xfs_ioend_bioset;
57 
58 static struct kset *xfs_kset;		/* top-level xfs sysfs dir */
59 #ifdef DEBUG
60 static struct xfs_kobj xfs_dbg_kobj;	/* global debug sysfs attrs */
61 #endif
62 
63 /*
64  * Table driven mount option parser.
65  */
66 enum {
67 	Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev, Opt_biosize,
68 	Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
69 	Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
70 	Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
71 	Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
72 	Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
73 	Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
74 	Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
75 	Opt_discard, Opt_nodiscard, Opt_dax, Opt_err,
76 };
77 
78 static const match_table_t tokens = {
79 	{Opt_logbufs,	"logbufs=%u"},	/* number of XFS log buffers */
80 	{Opt_logbsize,	"logbsize=%s"},	/* size of XFS log buffers */
81 	{Opt_logdev,	"logdev=%s"},	/* log device */
82 	{Opt_rtdev,	"rtdev=%s"},	/* realtime I/O device */
83 	{Opt_biosize,	"biosize=%u"},	/* log2 of preferred buffered io size */
84 	{Opt_wsync,	"wsync"},	/* safe-mode nfs compatible mount */
85 	{Opt_noalign,	"noalign"},	/* turn off stripe alignment */
86 	{Opt_swalloc,	"swalloc"},	/* turn on stripe width allocation */
87 	{Opt_sunit,	"sunit=%u"},	/* data volume stripe unit */
88 	{Opt_swidth,	"swidth=%u"},	/* data volume stripe width */
89 	{Opt_nouuid,	"nouuid"},	/* ignore filesystem UUID */
90 	{Opt_grpid,	"grpid"},	/* group-ID from parent directory */
91 	{Opt_nogrpid,	"nogrpid"},	/* group-ID from current process */
92 	{Opt_bsdgroups,	"bsdgroups"},	/* group-ID from parent directory */
93 	{Opt_sysvgroups,"sysvgroups"},	/* group-ID from current process */
94 	{Opt_allocsize,	"allocsize=%s"},/* preferred allocation size */
95 	{Opt_norecovery,"norecovery"},	/* don't run XFS recovery */
96 	{Opt_inode64,	"inode64"},	/* inodes can be allocated anywhere */
97 	{Opt_inode32,   "inode32"},	/* inode allocation limited to
98 					 * XFS_MAXINUMBER_32 */
99 	{Opt_ikeep,	"ikeep"},	/* do not free empty inode clusters */
100 	{Opt_noikeep,	"noikeep"},	/* free empty inode clusters */
101 	{Opt_largeio,	"largeio"},	/* report large I/O sizes in stat() */
102 	{Opt_nolargeio,	"nolargeio"},	/* do not report large I/O sizes
103 					 * in stat(). */
104 	{Opt_attr2,	"attr2"},	/* do use attr2 attribute format */
105 	{Opt_noattr2,	"noattr2"},	/* do not use attr2 attribute format */
106 	{Opt_filestreams,"filestreams"},/* use filestreams allocator */
107 	{Opt_quota,	"quota"},	/* disk quotas (user) */
108 	{Opt_noquota,	"noquota"},	/* no quotas */
109 	{Opt_usrquota,	"usrquota"},	/* user quota enabled */
110 	{Opt_grpquota,	"grpquota"},	/* group quota enabled */
111 	{Opt_prjquota,	"prjquota"},	/* project quota enabled */
112 	{Opt_uquota,	"uquota"},	/* user quota (IRIX variant) */
113 	{Opt_gquota,	"gquota"},	/* group quota (IRIX variant) */
114 	{Opt_pquota,	"pquota"},	/* project quota (IRIX variant) */
115 	{Opt_uqnoenforce,"uqnoenforce"},/* user quota limit enforcement */
116 	{Opt_gqnoenforce,"gqnoenforce"},/* group quota limit enforcement */
117 	{Opt_pqnoenforce,"pqnoenforce"},/* project quota limit enforcement */
118 	{Opt_qnoenforce, "qnoenforce"},	/* same as uqnoenforce */
119 	{Opt_discard,	"discard"},	/* Discard unused blocks */
120 	{Opt_nodiscard,	"nodiscard"},	/* Do not discard unused blocks */
121 	{Opt_dax,	"dax"},		/* Enable direct access to bdev pages */
122 	{Opt_err,	NULL},
123 };
124 
125 
126 STATIC int
127 suffix_kstrtoint(const substring_t *s, unsigned int base, int *res)
128 {
129 	int	last, shift_left_factor = 0, _res;
130 	char	*value;
131 	int	ret = 0;
132 
133 	value = match_strdup(s);
134 	if (!value)
135 		return -ENOMEM;
136 
137 	last = strlen(value) - 1;
138 	if (value[last] == 'K' || value[last] == 'k') {
139 		shift_left_factor = 10;
140 		value[last] = '\0';
141 	}
142 	if (value[last] == 'M' || value[last] == 'm') {
143 		shift_left_factor = 20;
144 		value[last] = '\0';
145 	}
146 	if (value[last] == 'G' || value[last] == 'g') {
147 		shift_left_factor = 30;
148 		value[last] = '\0';
149 	}
150 
151 	if (kstrtoint(value, base, &_res))
152 		ret = -EINVAL;
153 	kfree(value);
154 	*res = _res << shift_left_factor;
155 	return ret;
156 }
157 
158 /*
159  * This function fills in xfs_mount_t fields based on mount args.
160  * Note: the superblock has _not_ yet been read in.
161  *
162  * Note that this function leaks the various device name allocations on
163  * failure.  The caller takes care of them.
164  *
165  * *sb is const because this is also used to test options on the remount
166  * path, and we don't want this to have any side effects at remount time.
167  * Today this function does not change *sb, but just to future-proof...
168  */
169 STATIC int
170 xfs_parseargs(
171 	struct xfs_mount	*mp,
172 	char			*options)
173 {
174 	const struct super_block *sb = mp->m_super;
175 	char			*p;
176 	substring_t		args[MAX_OPT_ARGS];
177 	int			dsunit = 0;
178 	int			dswidth = 0;
179 	int			iosize = 0;
180 	uint8_t			iosizelog = 0;
181 
182 	/*
183 	 * set up the mount name first so all the errors will refer to the
184 	 * correct device.
185 	 */
186 	mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL);
187 	if (!mp->m_fsname)
188 		return -ENOMEM;
189 	mp->m_fsname_len = strlen(mp->m_fsname) + 1;
190 
191 	/*
192 	 * Copy binary VFS mount flags we are interested in.
193 	 */
194 	if (sb_rdonly(sb))
195 		mp->m_flags |= XFS_MOUNT_RDONLY;
196 	if (sb->s_flags & SB_DIRSYNC)
197 		mp->m_flags |= XFS_MOUNT_DIRSYNC;
198 	if (sb->s_flags & SB_SYNCHRONOUS)
199 		mp->m_flags |= XFS_MOUNT_WSYNC;
200 
201 	/*
202 	 * Set some default flags that could be cleared by the mount option
203 	 * parsing.
204 	 */
205 	mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
206 
207 	/*
208 	 * These can be overridden by the mount option parsing.
209 	 */
210 	mp->m_logbufs = -1;
211 	mp->m_logbsize = -1;
212 
213 	if (!options)
214 		goto done;
215 
216 	while ((p = strsep(&options, ",")) != NULL) {
217 		int		token;
218 
219 		if (!*p)
220 			continue;
221 
222 		token = match_token(p, tokens, args);
223 		switch (token) {
224 		case Opt_logbufs:
225 			if (match_int(args, &mp->m_logbufs))
226 				return -EINVAL;
227 			break;
228 		case Opt_logbsize:
229 			if (suffix_kstrtoint(args, 10, &mp->m_logbsize))
230 				return -EINVAL;
231 			break;
232 		case Opt_logdev:
233 			kfree(mp->m_logname);
234 			mp->m_logname = match_strdup(args);
235 			if (!mp->m_logname)
236 				return -ENOMEM;
237 			break;
238 		case Opt_rtdev:
239 			kfree(mp->m_rtname);
240 			mp->m_rtname = match_strdup(args);
241 			if (!mp->m_rtname)
242 				return -ENOMEM;
243 			break;
244 		case Opt_allocsize:
245 		case Opt_biosize:
246 			if (suffix_kstrtoint(args, 10, &iosize))
247 				return -EINVAL;
248 			iosizelog = ffs(iosize) - 1;
249 			break;
250 		case Opt_grpid:
251 		case Opt_bsdgroups:
252 			mp->m_flags |= XFS_MOUNT_GRPID;
253 			break;
254 		case Opt_nogrpid:
255 		case Opt_sysvgroups:
256 			mp->m_flags &= ~XFS_MOUNT_GRPID;
257 			break;
258 		case Opt_wsync:
259 			mp->m_flags |= XFS_MOUNT_WSYNC;
260 			break;
261 		case Opt_norecovery:
262 			mp->m_flags |= XFS_MOUNT_NORECOVERY;
263 			break;
264 		case Opt_noalign:
265 			mp->m_flags |= XFS_MOUNT_NOALIGN;
266 			break;
267 		case Opt_swalloc:
268 			mp->m_flags |= XFS_MOUNT_SWALLOC;
269 			break;
270 		case Opt_sunit:
271 			if (match_int(args, &dsunit))
272 				return -EINVAL;
273 			break;
274 		case Opt_swidth:
275 			if (match_int(args, &dswidth))
276 				return -EINVAL;
277 			break;
278 		case Opt_inode32:
279 			mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
280 			break;
281 		case Opt_inode64:
282 			mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
283 			break;
284 		case Opt_nouuid:
285 			mp->m_flags |= XFS_MOUNT_NOUUID;
286 			break;
287 		case Opt_ikeep:
288 			mp->m_flags |= XFS_MOUNT_IKEEP;
289 			break;
290 		case Opt_noikeep:
291 			mp->m_flags &= ~XFS_MOUNT_IKEEP;
292 			break;
293 		case Opt_largeio:
294 			mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE;
295 			break;
296 		case Opt_nolargeio:
297 			mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
298 			break;
299 		case Opt_attr2:
300 			mp->m_flags |= XFS_MOUNT_ATTR2;
301 			break;
302 		case Opt_noattr2:
303 			mp->m_flags &= ~XFS_MOUNT_ATTR2;
304 			mp->m_flags |= XFS_MOUNT_NOATTR2;
305 			break;
306 		case Opt_filestreams:
307 			mp->m_flags |= XFS_MOUNT_FILESTREAMS;
308 			break;
309 		case Opt_noquota:
310 			mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
311 			mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
312 			mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE;
313 			break;
314 		case Opt_quota:
315 		case Opt_uquota:
316 		case Opt_usrquota:
317 			mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
318 					 XFS_UQUOTA_ENFD);
319 			break;
320 		case Opt_qnoenforce:
321 		case Opt_uqnoenforce:
322 			mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
323 			mp->m_qflags &= ~XFS_UQUOTA_ENFD;
324 			break;
325 		case Opt_pquota:
326 		case Opt_prjquota:
327 			mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
328 					 XFS_PQUOTA_ENFD);
329 			break;
330 		case Opt_pqnoenforce:
331 			mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
332 			mp->m_qflags &= ~XFS_PQUOTA_ENFD;
333 			break;
334 		case Opt_gquota:
335 		case Opt_grpquota:
336 			mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
337 					 XFS_GQUOTA_ENFD);
338 			break;
339 		case Opt_gqnoenforce:
340 			mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
341 			mp->m_qflags &= ~XFS_GQUOTA_ENFD;
342 			break;
343 		case Opt_discard:
344 			mp->m_flags |= XFS_MOUNT_DISCARD;
345 			break;
346 		case Opt_nodiscard:
347 			mp->m_flags &= ~XFS_MOUNT_DISCARD;
348 			break;
349 #ifdef CONFIG_FS_DAX
350 		case Opt_dax:
351 			mp->m_flags |= XFS_MOUNT_DAX;
352 			break;
353 #endif
354 		default:
355 			xfs_warn(mp, "unknown mount option [%s].", p);
356 			return -EINVAL;
357 		}
358 	}
359 
360 	/*
361 	 * no recovery flag requires a read-only mount
362 	 */
363 	if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
364 	    !(mp->m_flags & XFS_MOUNT_RDONLY)) {
365 		xfs_warn(mp, "no-recovery mounts must be read-only.");
366 		return -EINVAL;
367 	}
368 
369 	if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) {
370 		xfs_warn(mp,
371 	"sunit and swidth options incompatible with the noalign option");
372 		return -EINVAL;
373 	}
374 
375 #ifndef CONFIG_XFS_QUOTA
376 	if (XFS_IS_QUOTA_RUNNING(mp)) {
377 		xfs_warn(mp, "quota support not available in this kernel.");
378 		return -EINVAL;
379 	}
380 #endif
381 
382 	if ((dsunit && !dswidth) || (!dsunit && dswidth)) {
383 		xfs_warn(mp, "sunit and swidth must be specified together");
384 		return -EINVAL;
385 	}
386 
387 	if (dsunit && (dswidth % dsunit != 0)) {
388 		xfs_warn(mp,
389 	"stripe width (%d) must be a multiple of the stripe unit (%d)",
390 			dswidth, dsunit);
391 		return -EINVAL;
392 	}
393 
394 done:
395 	if (dsunit && !(mp->m_flags & XFS_MOUNT_NOALIGN)) {
396 		/*
397 		 * At this point the superblock has not been read
398 		 * in, therefore we do not know the block size.
399 		 * Before the mount call ends we will convert
400 		 * these to FSBs.
401 		 */
402 		mp->m_dalign = dsunit;
403 		mp->m_swidth = dswidth;
404 	}
405 
406 	if (mp->m_logbufs != -1 &&
407 	    mp->m_logbufs != 0 &&
408 	    (mp->m_logbufs < XLOG_MIN_ICLOGS ||
409 	     mp->m_logbufs > XLOG_MAX_ICLOGS)) {
410 		xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
411 			mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
412 		return -EINVAL;
413 	}
414 	if (mp->m_logbsize != -1 &&
415 	    mp->m_logbsize !=  0 &&
416 	    (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
417 	     mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
418 	     !is_power_of_2(mp->m_logbsize))) {
419 		xfs_warn(mp,
420 			"invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
421 			mp->m_logbsize);
422 		return -EINVAL;
423 	}
424 
425 	if (iosizelog) {
426 		if (iosizelog > XFS_MAX_IO_LOG ||
427 		    iosizelog < XFS_MIN_IO_LOG) {
428 			xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
429 				iosizelog, XFS_MIN_IO_LOG,
430 				XFS_MAX_IO_LOG);
431 			return -EINVAL;
432 		}
433 
434 		mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE;
435 		mp->m_readio_log = iosizelog;
436 		mp->m_writeio_log = iosizelog;
437 	}
438 
439 	return 0;
440 }
441 
442 struct proc_xfs_info {
443 	uint64_t	flag;
444 	char		*str;
445 };
446 
447 STATIC void
448 xfs_showargs(
449 	struct xfs_mount	*mp,
450 	struct seq_file		*m)
451 {
452 	static struct proc_xfs_info xfs_info_set[] = {
453 		/* the few simple ones we can get from the mount struct */
454 		{ XFS_MOUNT_IKEEP,		",ikeep" },
455 		{ XFS_MOUNT_WSYNC,		",wsync" },
456 		{ XFS_MOUNT_NOALIGN,		",noalign" },
457 		{ XFS_MOUNT_SWALLOC,		",swalloc" },
458 		{ XFS_MOUNT_NOUUID,		",nouuid" },
459 		{ XFS_MOUNT_NORECOVERY,		",norecovery" },
460 		{ XFS_MOUNT_ATTR2,		",attr2" },
461 		{ XFS_MOUNT_FILESTREAMS,	",filestreams" },
462 		{ XFS_MOUNT_GRPID,		",grpid" },
463 		{ XFS_MOUNT_DISCARD,		",discard" },
464 		{ XFS_MOUNT_SMALL_INUMS,	",inode32" },
465 		{ XFS_MOUNT_DAX,		",dax" },
466 		{ 0, NULL }
467 	};
468 	static struct proc_xfs_info xfs_info_unset[] = {
469 		/* the few simple ones we can get from the mount struct */
470 		{ XFS_MOUNT_COMPAT_IOSIZE,	",largeio" },
471 		{ XFS_MOUNT_SMALL_INUMS,	",inode64" },
472 		{ 0, NULL }
473 	};
474 	struct proc_xfs_info	*xfs_infop;
475 
476 	for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
477 		if (mp->m_flags & xfs_infop->flag)
478 			seq_puts(m, xfs_infop->str);
479 	}
480 	for (xfs_infop = xfs_info_unset; xfs_infop->flag; xfs_infop++) {
481 		if (!(mp->m_flags & xfs_infop->flag))
482 			seq_puts(m, xfs_infop->str);
483 	}
484 
485 	if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
486 		seq_printf(m, ",allocsize=%dk",
487 				(int)(1 << mp->m_writeio_log) >> 10);
488 
489 	if (mp->m_logbufs > 0)
490 		seq_printf(m, ",logbufs=%d", mp->m_logbufs);
491 	if (mp->m_logbsize > 0)
492 		seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
493 
494 	if (mp->m_logname)
495 		seq_show_option(m, "logdev", mp->m_logname);
496 	if (mp->m_rtname)
497 		seq_show_option(m, "rtdev", mp->m_rtname);
498 
499 	if (mp->m_dalign > 0)
500 		seq_printf(m, ",sunit=%d",
501 				(int)XFS_FSB_TO_BB(mp, mp->m_dalign));
502 	if (mp->m_swidth > 0)
503 		seq_printf(m, ",swidth=%d",
504 				(int)XFS_FSB_TO_BB(mp, mp->m_swidth));
505 
506 	if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD))
507 		seq_puts(m, ",usrquota");
508 	else if (mp->m_qflags & XFS_UQUOTA_ACCT)
509 		seq_puts(m, ",uqnoenforce");
510 
511 	if (mp->m_qflags & XFS_PQUOTA_ACCT) {
512 		if (mp->m_qflags & XFS_PQUOTA_ENFD)
513 			seq_puts(m, ",prjquota");
514 		else
515 			seq_puts(m, ",pqnoenforce");
516 	}
517 	if (mp->m_qflags & XFS_GQUOTA_ACCT) {
518 		if (mp->m_qflags & XFS_GQUOTA_ENFD)
519 			seq_puts(m, ",grpquota");
520 		else
521 			seq_puts(m, ",gqnoenforce");
522 	}
523 
524 	if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
525 		seq_puts(m, ",noquota");
526 }
527 
528 static uint64_t
529 xfs_max_file_offset(
530 	unsigned int		blockshift)
531 {
532 	unsigned int		pagefactor = 1;
533 	unsigned int		bitshift = BITS_PER_LONG - 1;
534 
535 	/* Figure out maximum filesize, on Linux this can depend on
536 	 * the filesystem blocksize (on 32 bit platforms).
537 	 * __block_write_begin does this in an [unsigned] long long...
538 	 *      page->index << (PAGE_SHIFT - bbits)
539 	 * So, for page sized blocks (4K on 32 bit platforms),
540 	 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
541 	 *      (((u64)PAGE_SIZE << (BITS_PER_LONG-1))-1)
542 	 * but for smaller blocksizes it is less (bbits = log2 bsize).
543 	 */
544 
545 #if BITS_PER_LONG == 32
546 	ASSERT(sizeof(sector_t) == 8);
547 	pagefactor = PAGE_SIZE;
548 	bitshift = BITS_PER_LONG;
549 #endif
550 
551 	return (((uint64_t)pagefactor) << bitshift) - 1;
552 }
553 
554 /*
555  * Set parameters for inode allocation heuristics, taking into account
556  * filesystem size and inode32/inode64 mount options; i.e. specifically
557  * whether or not XFS_MOUNT_SMALL_INUMS is set.
558  *
559  * Inode allocation patterns are altered only if inode32 is requested
560  * (XFS_MOUNT_SMALL_INUMS), and the filesystem is sufficiently large.
561  * If altered, XFS_MOUNT_32BITINODES is set as well.
562  *
563  * An agcount independent of that in the mount structure is provided
564  * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
565  * to the potentially higher ag count.
566  *
567  * Returns the maximum AG index which may contain inodes.
568  */
569 xfs_agnumber_t
570 xfs_set_inode_alloc(
571 	struct xfs_mount *mp,
572 	xfs_agnumber_t	agcount)
573 {
574 	xfs_agnumber_t	index;
575 	xfs_agnumber_t	maxagi = 0;
576 	xfs_sb_t	*sbp = &mp->m_sb;
577 	xfs_agnumber_t	max_metadata;
578 	xfs_agino_t	agino;
579 	xfs_ino_t	ino;
580 
581 	/*
582 	 * Calculate how much should be reserved for inodes to meet
583 	 * the max inode percentage.  Used only for inode32.
584 	 */
585 	if (mp->m_maxicount) {
586 		uint64_t	icount;
587 
588 		icount = sbp->sb_dblocks * sbp->sb_imax_pct;
589 		do_div(icount, 100);
590 		icount += sbp->sb_agblocks - 1;
591 		do_div(icount, sbp->sb_agblocks);
592 		max_metadata = icount;
593 	} else {
594 		max_metadata = agcount;
595 	}
596 
597 	/* Get the last possible inode in the filesystem */
598 	agino =	XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
599 	ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
600 
601 	/*
602 	 * If user asked for no more than 32-bit inodes, and the fs is
603 	 * sufficiently large, set XFS_MOUNT_32BITINODES if we must alter
604 	 * the allocator to accommodate the request.
605 	 */
606 	if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32)
607 		mp->m_flags |= XFS_MOUNT_32BITINODES;
608 	else
609 		mp->m_flags &= ~XFS_MOUNT_32BITINODES;
610 
611 	for (index = 0; index < agcount; index++) {
612 		struct xfs_perag	*pag;
613 
614 		ino = XFS_AGINO_TO_INO(mp, index, agino);
615 
616 		pag = xfs_perag_get(mp, index);
617 
618 		if (mp->m_flags & XFS_MOUNT_32BITINODES) {
619 			if (ino > XFS_MAXINUMBER_32) {
620 				pag->pagi_inodeok = 0;
621 				pag->pagf_metadata = 0;
622 			} else {
623 				pag->pagi_inodeok = 1;
624 				maxagi++;
625 				if (index < max_metadata)
626 					pag->pagf_metadata = 1;
627 				else
628 					pag->pagf_metadata = 0;
629 			}
630 		} else {
631 			pag->pagi_inodeok = 1;
632 			pag->pagf_metadata = 0;
633 		}
634 
635 		xfs_perag_put(pag);
636 	}
637 
638 	return (mp->m_flags & XFS_MOUNT_32BITINODES) ? maxagi : agcount;
639 }
640 
641 STATIC int
642 xfs_blkdev_get(
643 	xfs_mount_t		*mp,
644 	const char		*name,
645 	struct block_device	**bdevp)
646 {
647 	int			error = 0;
648 
649 	*bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
650 				    mp);
651 	if (IS_ERR(*bdevp)) {
652 		error = PTR_ERR(*bdevp);
653 		xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
654 	}
655 
656 	return error;
657 }
658 
659 STATIC void
660 xfs_blkdev_put(
661 	struct block_device	*bdev)
662 {
663 	if (bdev)
664 		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
665 }
666 
667 void
668 xfs_blkdev_issue_flush(
669 	xfs_buftarg_t		*buftarg)
670 {
671 	blkdev_issue_flush(buftarg->bt_bdev, GFP_NOFS, NULL);
672 }
673 
674 STATIC void
675 xfs_close_devices(
676 	struct xfs_mount	*mp)
677 {
678 	struct dax_device *dax_ddev = mp->m_ddev_targp->bt_daxdev;
679 
680 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
681 		struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
682 		struct dax_device *dax_logdev = mp->m_logdev_targp->bt_daxdev;
683 
684 		xfs_free_buftarg(mp->m_logdev_targp);
685 		xfs_blkdev_put(logdev);
686 		fs_put_dax(dax_logdev);
687 	}
688 	if (mp->m_rtdev_targp) {
689 		struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
690 		struct dax_device *dax_rtdev = mp->m_rtdev_targp->bt_daxdev;
691 
692 		xfs_free_buftarg(mp->m_rtdev_targp);
693 		xfs_blkdev_put(rtdev);
694 		fs_put_dax(dax_rtdev);
695 	}
696 	xfs_free_buftarg(mp->m_ddev_targp);
697 	fs_put_dax(dax_ddev);
698 }
699 
700 /*
701  * The file system configurations are:
702  *	(1) device (partition) with data and internal log
703  *	(2) logical volume with data and log subvolumes.
704  *	(3) logical volume with data, log, and realtime subvolumes.
705  *
706  * We only have to handle opening the log and realtime volumes here if
707  * they are present.  The data subvolume has already been opened by
708  * get_sb_bdev() and is stored in sb->s_bdev.
709  */
710 STATIC int
711 xfs_open_devices(
712 	struct xfs_mount	*mp)
713 {
714 	struct block_device	*ddev = mp->m_super->s_bdev;
715 	struct dax_device	*dax_ddev = fs_dax_get_by_bdev(ddev);
716 	struct dax_device	*dax_logdev = NULL, *dax_rtdev = NULL;
717 	struct block_device	*logdev = NULL, *rtdev = NULL;
718 	int			error;
719 
720 	/*
721 	 * Open real time and log devices - order is important.
722 	 */
723 	if (mp->m_logname) {
724 		error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
725 		if (error)
726 			goto out;
727 		dax_logdev = fs_dax_get_by_bdev(logdev);
728 	}
729 
730 	if (mp->m_rtname) {
731 		error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
732 		if (error)
733 			goto out_close_logdev;
734 
735 		if (rtdev == ddev || rtdev == logdev) {
736 			xfs_warn(mp,
737 	"Cannot mount filesystem with identical rtdev and ddev/logdev.");
738 			error = -EINVAL;
739 			goto out_close_rtdev;
740 		}
741 		dax_rtdev = fs_dax_get_by_bdev(rtdev);
742 	}
743 
744 	/*
745 	 * Setup xfs_mount buffer target pointers
746 	 */
747 	error = -ENOMEM;
748 	mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, dax_ddev);
749 	if (!mp->m_ddev_targp)
750 		goto out_close_rtdev;
751 
752 	if (rtdev) {
753 		mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, dax_rtdev);
754 		if (!mp->m_rtdev_targp)
755 			goto out_free_ddev_targ;
756 	}
757 
758 	if (logdev && logdev != ddev) {
759 		mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, dax_logdev);
760 		if (!mp->m_logdev_targp)
761 			goto out_free_rtdev_targ;
762 	} else {
763 		mp->m_logdev_targp = mp->m_ddev_targp;
764 	}
765 
766 	return 0;
767 
768  out_free_rtdev_targ:
769 	if (mp->m_rtdev_targp)
770 		xfs_free_buftarg(mp->m_rtdev_targp);
771  out_free_ddev_targ:
772 	xfs_free_buftarg(mp->m_ddev_targp);
773  out_close_rtdev:
774 	xfs_blkdev_put(rtdev);
775 	fs_put_dax(dax_rtdev);
776  out_close_logdev:
777 	if (logdev && logdev != ddev) {
778 		xfs_blkdev_put(logdev);
779 		fs_put_dax(dax_logdev);
780 	}
781  out:
782 	fs_put_dax(dax_ddev);
783 	return error;
784 }
785 
786 /*
787  * Setup xfs_mount buffer target pointers based on superblock
788  */
789 STATIC int
790 xfs_setup_devices(
791 	struct xfs_mount	*mp)
792 {
793 	int			error;
794 
795 	error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
796 	if (error)
797 		return error;
798 
799 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
800 		unsigned int	log_sector_size = BBSIZE;
801 
802 		if (xfs_sb_version_hassector(&mp->m_sb))
803 			log_sector_size = mp->m_sb.sb_logsectsize;
804 		error = xfs_setsize_buftarg(mp->m_logdev_targp,
805 					    log_sector_size);
806 		if (error)
807 			return error;
808 	}
809 	if (mp->m_rtdev_targp) {
810 		error = xfs_setsize_buftarg(mp->m_rtdev_targp,
811 					    mp->m_sb.sb_sectsize);
812 		if (error)
813 			return error;
814 	}
815 
816 	return 0;
817 }
818 
819 STATIC int
820 xfs_init_mount_workqueues(
821 	struct xfs_mount	*mp)
822 {
823 	mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
824 			WQ_MEM_RECLAIM|WQ_FREEZABLE, 1, mp->m_fsname);
825 	if (!mp->m_buf_workqueue)
826 		goto out;
827 
828 	mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
829 			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
830 	if (!mp->m_unwritten_workqueue)
831 		goto out_destroy_buf;
832 
833 	mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s",
834 			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
835 	if (!mp->m_cil_workqueue)
836 		goto out_destroy_unwritten;
837 
838 	mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
839 			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
840 	if (!mp->m_reclaim_workqueue)
841 		goto out_destroy_cil;
842 
843 	mp->m_log_workqueue = alloc_workqueue("xfs-log/%s",
844 			WQ_MEM_RECLAIM|WQ_FREEZABLE|WQ_HIGHPRI, 0,
845 			mp->m_fsname);
846 	if (!mp->m_log_workqueue)
847 		goto out_destroy_reclaim;
848 
849 	mp->m_eofblocks_workqueue = alloc_workqueue("xfs-eofblocks/%s",
850 			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
851 	if (!mp->m_eofblocks_workqueue)
852 		goto out_destroy_log;
853 
854 	mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", WQ_FREEZABLE, 0,
855 					       mp->m_fsname);
856 	if (!mp->m_sync_workqueue)
857 		goto out_destroy_eofb;
858 
859 	return 0;
860 
861 out_destroy_eofb:
862 	destroy_workqueue(mp->m_eofblocks_workqueue);
863 out_destroy_log:
864 	destroy_workqueue(mp->m_log_workqueue);
865 out_destroy_reclaim:
866 	destroy_workqueue(mp->m_reclaim_workqueue);
867 out_destroy_cil:
868 	destroy_workqueue(mp->m_cil_workqueue);
869 out_destroy_unwritten:
870 	destroy_workqueue(mp->m_unwritten_workqueue);
871 out_destroy_buf:
872 	destroy_workqueue(mp->m_buf_workqueue);
873 out:
874 	return -ENOMEM;
875 }
876 
877 STATIC void
878 xfs_destroy_mount_workqueues(
879 	struct xfs_mount	*mp)
880 {
881 	destroy_workqueue(mp->m_sync_workqueue);
882 	destroy_workqueue(mp->m_eofblocks_workqueue);
883 	destroy_workqueue(mp->m_log_workqueue);
884 	destroy_workqueue(mp->m_reclaim_workqueue);
885 	destroy_workqueue(mp->m_cil_workqueue);
886 	destroy_workqueue(mp->m_unwritten_workqueue);
887 	destroy_workqueue(mp->m_buf_workqueue);
888 }
889 
890 /*
891  * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
892  * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
893  * for IO to complete so that we effectively throttle multiple callers to the
894  * rate at which IO is completing.
895  */
896 void
897 xfs_flush_inodes(
898 	struct xfs_mount	*mp)
899 {
900 	struct super_block	*sb = mp->m_super;
901 
902 	if (down_read_trylock(&sb->s_umount)) {
903 		sync_inodes_sb(sb);
904 		up_read(&sb->s_umount);
905 	}
906 }
907 
908 /* Catch misguided souls that try to use this interface on XFS */
909 STATIC struct inode *
910 xfs_fs_alloc_inode(
911 	struct super_block	*sb)
912 {
913 	BUG();
914 	return NULL;
915 }
916 
917 #ifdef DEBUG
918 static void
919 xfs_check_delalloc(
920 	struct xfs_inode	*ip,
921 	int			whichfork)
922 {
923 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
924 	struct xfs_bmbt_irec	got;
925 	struct xfs_iext_cursor	icur;
926 
927 	if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
928 		return;
929 	do {
930 		if (isnullstartblock(got.br_startblock)) {
931 			xfs_warn(ip->i_mount,
932 	"ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
933 				ip->i_ino,
934 				whichfork == XFS_DATA_FORK ? "data" : "cow",
935 				got.br_startoff, got.br_blockcount);
936 		}
937 	} while (xfs_iext_next_extent(ifp, &icur, &got));
938 }
939 #else
940 #define xfs_check_delalloc(ip, whichfork)	do { } while (0)
941 #endif
942 
943 /*
944  * Now that the generic code is guaranteed not to be accessing
945  * the linux inode, we can inactivate and reclaim the inode.
946  */
947 STATIC void
948 xfs_fs_destroy_inode(
949 	struct inode		*inode)
950 {
951 	struct xfs_inode	*ip = XFS_I(inode);
952 
953 	trace_xfs_destroy_inode(ip);
954 
955 	ASSERT(!rwsem_is_locked(&inode->i_rwsem));
956 	XFS_STATS_INC(ip->i_mount, vn_rele);
957 	XFS_STATS_INC(ip->i_mount, vn_remove);
958 
959 	xfs_inactive(ip);
960 
961 	if (!XFS_FORCED_SHUTDOWN(ip->i_mount) && ip->i_delayed_blks) {
962 		xfs_check_delalloc(ip, XFS_DATA_FORK);
963 		xfs_check_delalloc(ip, XFS_COW_FORK);
964 		ASSERT(0);
965 	}
966 
967 	XFS_STATS_INC(ip->i_mount, vn_reclaim);
968 
969 	/*
970 	 * We should never get here with one of the reclaim flags already set.
971 	 */
972 	ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
973 	ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
974 
975 	/*
976 	 * We always use background reclaim here because even if the
977 	 * inode is clean, it still may be under IO and hence we have
978 	 * to take the flush lock. The background reclaim path handles
979 	 * this more efficiently than we can here, so simply let background
980 	 * reclaim tear down all inodes.
981 	 */
982 	xfs_inode_set_reclaim_tag(ip);
983 }
984 
985 static void
986 xfs_fs_dirty_inode(
987 	struct inode			*inode,
988 	int				flag)
989 {
990 	struct xfs_inode		*ip = XFS_I(inode);
991 	struct xfs_mount		*mp = ip->i_mount;
992 	struct xfs_trans		*tp;
993 
994 	if (!(inode->i_sb->s_flags & SB_LAZYTIME))
995 		return;
996 	if (flag != I_DIRTY_SYNC || !(inode->i_state & I_DIRTY_TIME))
997 		return;
998 
999 	if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
1000 		return;
1001 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1002 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1003 	xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
1004 	xfs_trans_commit(tp);
1005 }
1006 
1007 /*
1008  * Slab object creation initialisation for the XFS inode.
1009  * This covers only the idempotent fields in the XFS inode;
1010  * all other fields need to be initialised on allocation
1011  * from the slab. This avoids the need to repeatedly initialise
1012  * fields in the xfs inode that left in the initialise state
1013  * when freeing the inode.
1014  */
1015 STATIC void
1016 xfs_fs_inode_init_once(
1017 	void			*inode)
1018 {
1019 	struct xfs_inode	*ip = inode;
1020 
1021 	memset(ip, 0, sizeof(struct xfs_inode));
1022 
1023 	/* vfs inode */
1024 	inode_init_once(VFS_I(ip));
1025 
1026 	/* xfs inode */
1027 	atomic_set(&ip->i_pincount, 0);
1028 	spin_lock_init(&ip->i_flags_lock);
1029 
1030 	mrlock_init(&ip->i_mmaplock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
1031 		     "xfsino", ip->i_ino);
1032 	mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
1033 		     "xfsino", ip->i_ino);
1034 }
1035 
1036 /*
1037  * We do an unlocked check for XFS_IDONTCACHE here because we are already
1038  * serialised against cache hits here via the inode->i_lock and igrab() in
1039  * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
1040  * racing with us, and it avoids needing to grab a spinlock here for every inode
1041  * we drop the final reference on.
1042  */
1043 STATIC int
1044 xfs_fs_drop_inode(
1045 	struct inode		*inode)
1046 {
1047 	struct xfs_inode	*ip = XFS_I(inode);
1048 
1049 	/*
1050 	 * If this unlinked inode is in the middle of recovery, don't
1051 	 * drop the inode just yet; log recovery will take care of
1052 	 * that.  See the comment for this inode flag.
1053 	 */
1054 	if (ip->i_flags & XFS_IRECOVERY) {
1055 		ASSERT(ip->i_mount->m_log->l_flags & XLOG_RECOVERY_NEEDED);
1056 		return 0;
1057 	}
1058 
1059 	return generic_drop_inode(inode) || (ip->i_flags & XFS_IDONTCACHE);
1060 }
1061 
1062 STATIC void
1063 xfs_free_fsname(
1064 	struct xfs_mount	*mp)
1065 {
1066 	kfree(mp->m_fsname);
1067 	kfree(mp->m_rtname);
1068 	kfree(mp->m_logname);
1069 }
1070 
1071 STATIC int
1072 xfs_fs_sync_fs(
1073 	struct super_block	*sb,
1074 	int			wait)
1075 {
1076 	struct xfs_mount	*mp = XFS_M(sb);
1077 
1078 	/*
1079 	 * Doing anything during the async pass would be counterproductive.
1080 	 */
1081 	if (!wait)
1082 		return 0;
1083 
1084 	xfs_log_force(mp, XFS_LOG_SYNC);
1085 	if (laptop_mode) {
1086 		/*
1087 		 * The disk must be active because we're syncing.
1088 		 * We schedule log work now (now that the disk is
1089 		 * active) instead of later (when it might not be).
1090 		 */
1091 		flush_delayed_work(&mp->m_log->l_work);
1092 	}
1093 
1094 	return 0;
1095 }
1096 
1097 STATIC int
1098 xfs_fs_statfs(
1099 	struct dentry		*dentry,
1100 	struct kstatfs		*statp)
1101 {
1102 	struct xfs_mount	*mp = XFS_M(dentry->d_sb);
1103 	xfs_sb_t		*sbp = &mp->m_sb;
1104 	struct xfs_inode	*ip = XFS_I(d_inode(dentry));
1105 	uint64_t		fakeinos, id;
1106 	uint64_t		icount;
1107 	uint64_t		ifree;
1108 	uint64_t		fdblocks;
1109 	xfs_extlen_t		lsize;
1110 	int64_t			ffree;
1111 
1112 	statp->f_type = XFS_SUPER_MAGIC;
1113 	statp->f_namelen = MAXNAMELEN - 1;
1114 
1115 	id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
1116 	statp->f_fsid.val[0] = (u32)id;
1117 	statp->f_fsid.val[1] = (u32)(id >> 32);
1118 
1119 	icount = percpu_counter_sum(&mp->m_icount);
1120 	ifree = percpu_counter_sum(&mp->m_ifree);
1121 	fdblocks = percpu_counter_sum(&mp->m_fdblocks);
1122 
1123 	spin_lock(&mp->m_sb_lock);
1124 	statp->f_bsize = sbp->sb_blocksize;
1125 	lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
1126 	statp->f_blocks = sbp->sb_dblocks - lsize;
1127 	spin_unlock(&mp->m_sb_lock);
1128 
1129 	statp->f_bfree = fdblocks - mp->m_alloc_set_aside;
1130 	statp->f_bavail = statp->f_bfree;
1131 
1132 	fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
1133 	statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
1134 	if (mp->m_maxicount)
1135 		statp->f_files = min_t(typeof(statp->f_files),
1136 					statp->f_files,
1137 					mp->m_maxicount);
1138 
1139 	/* If sb_icount overshot maxicount, report actual allocation */
1140 	statp->f_files = max_t(typeof(statp->f_files),
1141 					statp->f_files,
1142 					sbp->sb_icount);
1143 
1144 	/* make sure statp->f_ffree does not underflow */
1145 	ffree = statp->f_files - (icount - ifree);
1146 	statp->f_ffree = max_t(int64_t, ffree, 0);
1147 
1148 
1149 	if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
1150 	    ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
1151 			      (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
1152 		xfs_qm_statvfs(ip, statp);
1153 
1154 	if (XFS_IS_REALTIME_MOUNT(mp) &&
1155 	    (ip->i_d.di_flags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
1156 		statp->f_blocks = sbp->sb_rblocks;
1157 		statp->f_bavail = statp->f_bfree =
1158 			sbp->sb_frextents * sbp->sb_rextsize;
1159 	}
1160 
1161 	return 0;
1162 }
1163 
1164 STATIC void
1165 xfs_save_resvblks(struct xfs_mount *mp)
1166 {
1167 	uint64_t resblks = 0;
1168 
1169 	mp->m_resblks_save = mp->m_resblks;
1170 	xfs_reserve_blocks(mp, &resblks, NULL);
1171 }
1172 
1173 STATIC void
1174 xfs_restore_resvblks(struct xfs_mount *mp)
1175 {
1176 	uint64_t resblks;
1177 
1178 	if (mp->m_resblks_save) {
1179 		resblks = mp->m_resblks_save;
1180 		mp->m_resblks_save = 0;
1181 	} else
1182 		resblks = xfs_default_resblks(mp);
1183 
1184 	xfs_reserve_blocks(mp, &resblks, NULL);
1185 }
1186 
1187 /*
1188  * Trigger writeback of all the dirty metadata in the file system.
1189  *
1190  * This ensures that the metadata is written to their location on disk rather
1191  * than just existing in transactions in the log. This means after a quiesce
1192  * there is no log replay required to write the inodes to disk - this is the
1193  * primary difference between a sync and a quiesce.
1194  *
1195  * Note: xfs_log_quiesce() stops background log work - the callers must ensure
1196  * it is started again when appropriate.
1197  */
1198 void
1199 xfs_quiesce_attr(
1200 	struct xfs_mount	*mp)
1201 {
1202 	int	error = 0;
1203 
1204 	/* wait for all modifications to complete */
1205 	while (atomic_read(&mp->m_active_trans) > 0)
1206 		delay(100);
1207 
1208 	/* force the log to unpin objects from the now complete transactions */
1209 	xfs_log_force(mp, XFS_LOG_SYNC);
1210 
1211 	/* reclaim inodes to do any IO before the freeze completes */
1212 	xfs_reclaim_inodes(mp, 0);
1213 	xfs_reclaim_inodes(mp, SYNC_WAIT);
1214 
1215 	/* Push the superblock and write an unmount record */
1216 	error = xfs_log_sbcount(mp);
1217 	if (error)
1218 		xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. "
1219 				"Frozen image may not be consistent.");
1220 	/*
1221 	 * Just warn here till VFS can correctly support
1222 	 * read-only remount without racing.
1223 	 */
1224 	WARN_ON(atomic_read(&mp->m_active_trans) != 0);
1225 
1226 	xfs_log_quiesce(mp);
1227 }
1228 
1229 STATIC int
1230 xfs_test_remount_options(
1231 	struct super_block	*sb,
1232 	char			*options)
1233 {
1234 	int			error = 0;
1235 	struct xfs_mount	*tmp_mp;
1236 
1237 	tmp_mp = kmem_zalloc(sizeof(*tmp_mp), KM_MAYFAIL);
1238 	if (!tmp_mp)
1239 		return -ENOMEM;
1240 
1241 	tmp_mp->m_super = sb;
1242 	error = xfs_parseargs(tmp_mp, options);
1243 	xfs_free_fsname(tmp_mp);
1244 	kmem_free(tmp_mp);
1245 
1246 	return error;
1247 }
1248 
1249 STATIC int
1250 xfs_fs_remount(
1251 	struct super_block	*sb,
1252 	int			*flags,
1253 	char			*options)
1254 {
1255 	struct xfs_mount	*mp = XFS_M(sb);
1256 	xfs_sb_t		*sbp = &mp->m_sb;
1257 	substring_t		args[MAX_OPT_ARGS];
1258 	char			*p;
1259 	int			error;
1260 
1261 	/* First, check for complete junk; i.e. invalid options */
1262 	error = xfs_test_remount_options(sb, options);
1263 	if (error)
1264 		return error;
1265 
1266 	sync_filesystem(sb);
1267 	while ((p = strsep(&options, ",")) != NULL) {
1268 		int token;
1269 
1270 		if (!*p)
1271 			continue;
1272 
1273 		token = match_token(p, tokens, args);
1274 		switch (token) {
1275 		case Opt_inode64:
1276 			mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
1277 			mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1278 			break;
1279 		case Opt_inode32:
1280 			mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
1281 			mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1282 			break;
1283 		default:
1284 			/*
1285 			 * Logically we would return an error here to prevent
1286 			 * users from believing they might have changed
1287 			 * mount options using remount which can't be changed.
1288 			 *
1289 			 * But unfortunately mount(8) adds all options from
1290 			 * mtab and fstab to the mount arguments in some cases
1291 			 * so we can't blindly reject options, but have to
1292 			 * check for each specified option if it actually
1293 			 * differs from the currently set option and only
1294 			 * reject it if that's the case.
1295 			 *
1296 			 * Until that is implemented we return success for
1297 			 * every remount request, and silently ignore all
1298 			 * options that we can't actually change.
1299 			 */
1300 #if 0
1301 			xfs_info(mp,
1302 		"mount option \"%s\" not supported for remount", p);
1303 			return -EINVAL;
1304 #else
1305 			break;
1306 #endif
1307 		}
1308 	}
1309 
1310 	/* ro -> rw */
1311 	if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & SB_RDONLY)) {
1312 		if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
1313 			xfs_warn(mp,
1314 		"ro->rw transition prohibited on norecovery mount");
1315 			return -EINVAL;
1316 		}
1317 
1318 		if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
1319 		    xfs_sb_has_ro_compat_feature(sbp,
1320 					XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1321 			xfs_warn(mp,
1322 "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1323 				(sbp->sb_features_ro_compat &
1324 					XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1325 			return -EINVAL;
1326 		}
1327 
1328 		mp->m_flags &= ~XFS_MOUNT_RDONLY;
1329 
1330 		/*
1331 		 * If this is the first remount to writeable state we
1332 		 * might have some superblock changes to update.
1333 		 */
1334 		if (mp->m_update_sb) {
1335 			error = xfs_sync_sb(mp, false);
1336 			if (error) {
1337 				xfs_warn(mp, "failed to write sb changes");
1338 				return error;
1339 			}
1340 			mp->m_update_sb = false;
1341 		}
1342 
1343 		/*
1344 		 * Fill out the reserve pool if it is empty. Use the stashed
1345 		 * value if it is non-zero, otherwise go with the default.
1346 		 */
1347 		xfs_restore_resvblks(mp);
1348 		xfs_log_work_queue(mp);
1349 
1350 		/* Recover any CoW blocks that never got remapped. */
1351 		error = xfs_reflink_recover_cow(mp);
1352 		if (error) {
1353 			xfs_err(mp,
1354 	"Error %d recovering leftover CoW allocations.", error);
1355 			xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1356 			return error;
1357 		}
1358 		xfs_start_block_reaping(mp);
1359 
1360 		/* Create the per-AG metadata reservation pool .*/
1361 		error = xfs_fs_reserve_ag_blocks(mp);
1362 		if (error && error != -ENOSPC)
1363 			return error;
1364 	}
1365 
1366 	/* rw -> ro */
1367 	if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & SB_RDONLY)) {
1368 		/*
1369 		 * Cancel background eofb scanning so it cannot race with the
1370 		 * final log force+buftarg wait and deadlock the remount.
1371 		 */
1372 		xfs_stop_block_reaping(mp);
1373 
1374 		/* Get rid of any leftover CoW reservations... */
1375 		error = xfs_icache_free_cowblocks(mp, NULL);
1376 		if (error) {
1377 			xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1378 			return error;
1379 		}
1380 
1381 		/* Free the per-AG metadata reservation pool. */
1382 		error = xfs_fs_unreserve_ag_blocks(mp);
1383 		if (error) {
1384 			xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1385 			return error;
1386 		}
1387 
1388 		/*
1389 		 * Before we sync the metadata, we need to free up the reserve
1390 		 * block pool so that the used block count in the superblock on
1391 		 * disk is correct at the end of the remount. Stash the current
1392 		 * reserve pool size so that if we get remounted rw, we can
1393 		 * return it to the same size.
1394 		 */
1395 		xfs_save_resvblks(mp);
1396 
1397 		xfs_quiesce_attr(mp);
1398 		mp->m_flags |= XFS_MOUNT_RDONLY;
1399 	}
1400 
1401 	return 0;
1402 }
1403 
1404 /*
1405  * Second stage of a freeze. The data is already frozen so we only
1406  * need to take care of the metadata. Once that's done sync the superblock
1407  * to the log to dirty it in case of a crash while frozen. This ensures that we
1408  * will recover the unlinked inode lists on the next mount.
1409  */
1410 STATIC int
1411 xfs_fs_freeze(
1412 	struct super_block	*sb)
1413 {
1414 	struct xfs_mount	*mp = XFS_M(sb);
1415 
1416 	xfs_stop_block_reaping(mp);
1417 	xfs_save_resvblks(mp);
1418 	xfs_quiesce_attr(mp);
1419 	return xfs_sync_sb(mp, true);
1420 }
1421 
1422 STATIC int
1423 xfs_fs_unfreeze(
1424 	struct super_block	*sb)
1425 {
1426 	struct xfs_mount	*mp = XFS_M(sb);
1427 
1428 	xfs_restore_resvblks(mp);
1429 	xfs_log_work_queue(mp);
1430 	xfs_start_block_reaping(mp);
1431 	return 0;
1432 }
1433 
1434 STATIC int
1435 xfs_fs_show_options(
1436 	struct seq_file		*m,
1437 	struct dentry		*root)
1438 {
1439 	xfs_showargs(XFS_M(root->d_sb), m);
1440 	return 0;
1441 }
1442 
1443 /*
1444  * This function fills in xfs_mount_t fields based on mount args.
1445  * Note: the superblock _has_ now been read in.
1446  */
1447 STATIC int
1448 xfs_finish_flags(
1449 	struct xfs_mount	*mp)
1450 {
1451 	int			ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
1452 
1453 	/* Fail a mount where the logbuf is smaller than the log stripe */
1454 	if (xfs_sb_version_haslogv2(&mp->m_sb)) {
1455 		if (mp->m_logbsize <= 0 &&
1456 		    mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
1457 			mp->m_logbsize = mp->m_sb.sb_logsunit;
1458 		} else if (mp->m_logbsize > 0 &&
1459 			   mp->m_logbsize < mp->m_sb.sb_logsunit) {
1460 			xfs_warn(mp,
1461 		"logbuf size must be greater than or equal to log stripe size");
1462 			return -EINVAL;
1463 		}
1464 	} else {
1465 		/* Fail a mount if the logbuf is larger than 32K */
1466 		if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
1467 			xfs_warn(mp,
1468 		"logbuf size for version 1 logs must be 16K or 32K");
1469 			return -EINVAL;
1470 		}
1471 	}
1472 
1473 	/*
1474 	 * V5 filesystems always use attr2 format for attributes.
1475 	 */
1476 	if (xfs_sb_version_hascrc(&mp->m_sb) &&
1477 	    (mp->m_flags & XFS_MOUNT_NOATTR2)) {
1478 		xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
1479 			     "attr2 is always enabled for V5 filesystems.");
1480 		return -EINVAL;
1481 	}
1482 
1483 	/*
1484 	 * mkfs'ed attr2 will turn on attr2 mount unless explicitly
1485 	 * told by noattr2 to turn it off
1486 	 */
1487 	if (xfs_sb_version_hasattr2(&mp->m_sb) &&
1488 	    !(mp->m_flags & XFS_MOUNT_NOATTR2))
1489 		mp->m_flags |= XFS_MOUNT_ATTR2;
1490 
1491 	/*
1492 	 * prohibit r/w mounts of read-only filesystems
1493 	 */
1494 	if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
1495 		xfs_warn(mp,
1496 			"cannot mount a read-only filesystem as read-write");
1497 		return -EROFS;
1498 	}
1499 
1500 	if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
1501 	    (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE)) &&
1502 	    !xfs_sb_version_has_pquotino(&mp->m_sb)) {
1503 		xfs_warn(mp,
1504 		  "Super block does not support project and group quota together");
1505 		return -EINVAL;
1506 	}
1507 
1508 	return 0;
1509 }
1510 
1511 static int
1512 xfs_init_percpu_counters(
1513 	struct xfs_mount	*mp)
1514 {
1515 	int		error;
1516 
1517 	error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
1518 	if (error)
1519 		return -ENOMEM;
1520 
1521 	error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
1522 	if (error)
1523 		goto free_icount;
1524 
1525 	error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
1526 	if (error)
1527 		goto free_ifree;
1528 
1529 	error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
1530 	if (error)
1531 		goto free_fdblocks;
1532 
1533 	return 0;
1534 
1535 free_fdblocks:
1536 	percpu_counter_destroy(&mp->m_fdblocks);
1537 free_ifree:
1538 	percpu_counter_destroy(&mp->m_ifree);
1539 free_icount:
1540 	percpu_counter_destroy(&mp->m_icount);
1541 	return -ENOMEM;
1542 }
1543 
1544 void
1545 xfs_reinit_percpu_counters(
1546 	struct xfs_mount	*mp)
1547 {
1548 	percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1549 	percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1550 	percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1551 }
1552 
1553 static void
1554 xfs_destroy_percpu_counters(
1555 	struct xfs_mount	*mp)
1556 {
1557 	percpu_counter_destroy(&mp->m_icount);
1558 	percpu_counter_destroy(&mp->m_ifree);
1559 	percpu_counter_destroy(&mp->m_fdblocks);
1560 	ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
1561 	       percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1562 	percpu_counter_destroy(&mp->m_delalloc_blks);
1563 }
1564 
1565 static struct xfs_mount *
1566 xfs_mount_alloc(
1567 	struct super_block	*sb)
1568 {
1569 	struct xfs_mount	*mp;
1570 
1571 	mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL);
1572 	if (!mp)
1573 		return NULL;
1574 
1575 	mp->m_super = sb;
1576 	spin_lock_init(&mp->m_sb_lock);
1577 	spin_lock_init(&mp->m_agirotor_lock);
1578 	INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
1579 	spin_lock_init(&mp->m_perag_lock);
1580 	mutex_init(&mp->m_growlock);
1581 	atomic_set(&mp->m_active_trans, 0);
1582 	INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
1583 	INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker);
1584 	INIT_DELAYED_WORK(&mp->m_cowblocks_work, xfs_cowblocks_worker);
1585 	mp->m_kobj.kobject.kset = xfs_kset;
1586 	/*
1587 	 * We don't create the finobt per-ag space reservation until after log
1588 	 * recovery, so we must set this to true so that an ifree transaction
1589 	 * started during log recovery will not depend on space reservations
1590 	 * for finobt expansion.
1591 	 */
1592 	mp->m_finobt_nores = true;
1593 	return mp;
1594 }
1595 
1596 
1597 STATIC int
1598 xfs_fs_fill_super(
1599 	struct super_block	*sb,
1600 	void			*data,
1601 	int			silent)
1602 {
1603 	struct inode		*root;
1604 	struct xfs_mount	*mp = NULL;
1605 	int			flags = 0, error = -ENOMEM;
1606 
1607 	/*
1608 	 * allocate mp and do all low-level struct initializations before we
1609 	 * attach it to the super
1610 	 */
1611 	mp = xfs_mount_alloc(sb);
1612 	if (!mp)
1613 		goto out;
1614 	sb->s_fs_info = mp;
1615 
1616 	error = xfs_parseargs(mp, (char *)data);
1617 	if (error)
1618 		goto out_free_fsname;
1619 
1620 	sb_min_blocksize(sb, BBSIZE);
1621 	sb->s_xattr = xfs_xattr_handlers;
1622 	sb->s_export_op = &xfs_export_operations;
1623 #ifdef CONFIG_XFS_QUOTA
1624 	sb->s_qcop = &xfs_quotactl_operations;
1625 	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1626 #endif
1627 	sb->s_op = &xfs_super_operations;
1628 
1629 	/*
1630 	 * Delay mount work if the debug hook is set. This is debug
1631 	 * instrumention to coordinate simulation of xfs mount failures with
1632 	 * VFS superblock operations
1633 	 */
1634 	if (xfs_globals.mount_delay) {
1635 		xfs_notice(mp, "Delaying mount for %d seconds.",
1636 			xfs_globals.mount_delay);
1637 		msleep(xfs_globals.mount_delay * 1000);
1638 	}
1639 
1640 	if (silent)
1641 		flags |= XFS_MFSI_QUIET;
1642 
1643 	error = xfs_open_devices(mp);
1644 	if (error)
1645 		goto out_free_fsname;
1646 
1647 	error = xfs_init_mount_workqueues(mp);
1648 	if (error)
1649 		goto out_close_devices;
1650 
1651 	error = xfs_init_percpu_counters(mp);
1652 	if (error)
1653 		goto out_destroy_workqueues;
1654 
1655 	/* Allocate stats memory before we do operations that might use it */
1656 	mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1657 	if (!mp->m_stats.xs_stats) {
1658 		error = -ENOMEM;
1659 		goto out_destroy_counters;
1660 	}
1661 
1662 	error = xfs_readsb(mp, flags);
1663 	if (error)
1664 		goto out_free_stats;
1665 
1666 	error = xfs_finish_flags(mp);
1667 	if (error)
1668 		goto out_free_sb;
1669 
1670 	error = xfs_setup_devices(mp);
1671 	if (error)
1672 		goto out_free_sb;
1673 
1674 	error = xfs_filestream_mount(mp);
1675 	if (error)
1676 		goto out_free_sb;
1677 
1678 	/*
1679 	 * we must configure the block size in the superblock before we run the
1680 	 * full mount process as the mount process can lookup and cache inodes.
1681 	 */
1682 	sb->s_magic = XFS_SUPER_MAGIC;
1683 	sb->s_blocksize = mp->m_sb.sb_blocksize;
1684 	sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1685 	sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
1686 	sb->s_max_links = XFS_MAXLINK;
1687 	sb->s_time_gran = 1;
1688 	set_posix_acl_flag(sb);
1689 
1690 	/* version 5 superblocks support inode version counters. */
1691 	if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
1692 		sb->s_flags |= SB_I_VERSION;
1693 
1694 	if (mp->m_flags & XFS_MOUNT_DAX) {
1695 		bool rtdev_is_dax = false, datadev_is_dax;
1696 
1697 		xfs_warn(mp,
1698 		"DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
1699 
1700 		datadev_is_dax = bdev_dax_supported(mp->m_ddev_targp->bt_bdev,
1701 			sb->s_blocksize);
1702 		if (mp->m_rtdev_targp)
1703 			rtdev_is_dax = bdev_dax_supported(
1704 				mp->m_rtdev_targp->bt_bdev, sb->s_blocksize);
1705 		if (!rtdev_is_dax && !datadev_is_dax) {
1706 			xfs_alert(mp,
1707 			"DAX unsupported by block device. Turning off DAX.");
1708 			mp->m_flags &= ~XFS_MOUNT_DAX;
1709 		}
1710 		if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1711 			xfs_alert(mp,
1712 		"DAX and reflink cannot be used together!");
1713 			error = -EINVAL;
1714 			goto out_filestream_unmount;
1715 		}
1716 	}
1717 
1718 	if (mp->m_flags & XFS_MOUNT_DISCARD) {
1719 		struct request_queue *q = bdev_get_queue(sb->s_bdev);
1720 
1721 		if (!blk_queue_discard(q)) {
1722 			xfs_warn(mp, "mounting with \"discard\" option, but "
1723 					"the device does not support discard");
1724 			mp->m_flags &= ~XFS_MOUNT_DISCARD;
1725 		}
1726 	}
1727 
1728 	if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1729 		if (mp->m_sb.sb_rblocks) {
1730 			xfs_alert(mp,
1731 	"reflink not compatible with realtime device!");
1732 			error = -EINVAL;
1733 			goto out_filestream_unmount;
1734 		}
1735 
1736 		if (xfs_globals.always_cow) {
1737 			xfs_info(mp, "using DEBUG-only always_cow mode.");
1738 			mp->m_always_cow = true;
1739 		}
1740 	}
1741 
1742 	if (xfs_sb_version_hasrmapbt(&mp->m_sb) && mp->m_sb.sb_rblocks) {
1743 		xfs_alert(mp,
1744 	"reverse mapping btree not compatible with realtime device!");
1745 		error = -EINVAL;
1746 		goto out_filestream_unmount;
1747 	}
1748 
1749 	error = xfs_mountfs(mp);
1750 	if (error)
1751 		goto out_filestream_unmount;
1752 
1753 	root = igrab(VFS_I(mp->m_rootip));
1754 	if (!root) {
1755 		error = -ENOENT;
1756 		goto out_unmount;
1757 	}
1758 	sb->s_root = d_make_root(root);
1759 	if (!sb->s_root) {
1760 		error = -ENOMEM;
1761 		goto out_unmount;
1762 	}
1763 
1764 	return 0;
1765 
1766  out_filestream_unmount:
1767 	xfs_filestream_unmount(mp);
1768  out_free_sb:
1769 	xfs_freesb(mp);
1770  out_free_stats:
1771 	free_percpu(mp->m_stats.xs_stats);
1772  out_destroy_counters:
1773 	xfs_destroy_percpu_counters(mp);
1774  out_destroy_workqueues:
1775 	xfs_destroy_mount_workqueues(mp);
1776  out_close_devices:
1777 	xfs_close_devices(mp);
1778  out_free_fsname:
1779 	sb->s_fs_info = NULL;
1780 	xfs_free_fsname(mp);
1781 	kfree(mp);
1782  out:
1783 	return error;
1784 
1785  out_unmount:
1786 	xfs_filestream_unmount(mp);
1787 	xfs_unmountfs(mp);
1788 	goto out_free_sb;
1789 }
1790 
1791 STATIC void
1792 xfs_fs_put_super(
1793 	struct super_block	*sb)
1794 {
1795 	struct xfs_mount	*mp = XFS_M(sb);
1796 
1797 	/* if ->fill_super failed, we have no mount to tear down */
1798 	if (!sb->s_fs_info)
1799 		return;
1800 
1801 	xfs_notice(mp, "Unmounting Filesystem");
1802 	xfs_filestream_unmount(mp);
1803 	xfs_unmountfs(mp);
1804 
1805 	xfs_freesb(mp);
1806 	free_percpu(mp->m_stats.xs_stats);
1807 	xfs_destroy_percpu_counters(mp);
1808 	xfs_destroy_mount_workqueues(mp);
1809 	xfs_close_devices(mp);
1810 
1811 	sb->s_fs_info = NULL;
1812 	xfs_free_fsname(mp);
1813 	kfree(mp);
1814 }
1815 
1816 STATIC struct dentry *
1817 xfs_fs_mount(
1818 	struct file_system_type	*fs_type,
1819 	int			flags,
1820 	const char		*dev_name,
1821 	void			*data)
1822 {
1823 	return mount_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super);
1824 }
1825 
1826 static long
1827 xfs_fs_nr_cached_objects(
1828 	struct super_block	*sb,
1829 	struct shrink_control	*sc)
1830 {
1831 	/* Paranoia: catch incorrect calls during mount setup or teardown */
1832 	if (WARN_ON_ONCE(!sb->s_fs_info))
1833 		return 0;
1834 	return xfs_reclaim_inodes_count(XFS_M(sb));
1835 }
1836 
1837 static long
1838 xfs_fs_free_cached_objects(
1839 	struct super_block	*sb,
1840 	struct shrink_control	*sc)
1841 {
1842 	return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1843 }
1844 
1845 static const struct super_operations xfs_super_operations = {
1846 	.alloc_inode		= xfs_fs_alloc_inode,
1847 	.destroy_inode		= xfs_fs_destroy_inode,
1848 	.dirty_inode		= xfs_fs_dirty_inode,
1849 	.drop_inode		= xfs_fs_drop_inode,
1850 	.put_super		= xfs_fs_put_super,
1851 	.sync_fs		= xfs_fs_sync_fs,
1852 	.freeze_fs		= xfs_fs_freeze,
1853 	.unfreeze_fs		= xfs_fs_unfreeze,
1854 	.statfs			= xfs_fs_statfs,
1855 	.remount_fs		= xfs_fs_remount,
1856 	.show_options		= xfs_fs_show_options,
1857 	.nr_cached_objects	= xfs_fs_nr_cached_objects,
1858 	.free_cached_objects	= xfs_fs_free_cached_objects,
1859 };
1860 
1861 static struct file_system_type xfs_fs_type = {
1862 	.owner			= THIS_MODULE,
1863 	.name			= "xfs",
1864 	.mount			= xfs_fs_mount,
1865 	.kill_sb		= kill_block_super,
1866 	.fs_flags		= FS_REQUIRES_DEV,
1867 };
1868 MODULE_ALIAS_FS("xfs");
1869 
1870 STATIC int __init
1871 xfs_init_zones(void)
1872 {
1873 	if (bioset_init(&xfs_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
1874 			offsetof(struct xfs_ioend, io_inline_bio),
1875 			BIOSET_NEED_BVECS))
1876 		goto out;
1877 
1878 	xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t),
1879 						"xfs_log_ticket");
1880 	if (!xfs_log_ticket_zone)
1881 		goto out_free_ioend_bioset;
1882 
1883 	xfs_bmap_free_item_zone = kmem_zone_init(
1884 			sizeof(struct xfs_extent_free_item),
1885 			"xfs_bmap_free_item");
1886 	if (!xfs_bmap_free_item_zone)
1887 		goto out_destroy_log_ticket_zone;
1888 
1889 	xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t),
1890 						"xfs_btree_cur");
1891 	if (!xfs_btree_cur_zone)
1892 		goto out_destroy_bmap_free_item_zone;
1893 
1894 	xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t),
1895 						"xfs_da_state");
1896 	if (!xfs_da_state_zone)
1897 		goto out_destroy_btree_cur_zone;
1898 
1899 	xfs_ifork_zone = kmem_zone_init(sizeof(struct xfs_ifork), "xfs_ifork");
1900 	if (!xfs_ifork_zone)
1901 		goto out_destroy_da_state_zone;
1902 
1903 	xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans");
1904 	if (!xfs_trans_zone)
1905 		goto out_destroy_ifork_zone;
1906 
1907 
1908 	/*
1909 	 * The size of the zone allocated buf log item is the maximum
1910 	 * size possible under XFS.  This wastes a little bit of memory,
1911 	 * but it is much faster.
1912 	 */
1913 	xfs_buf_item_zone = kmem_zone_init(sizeof(struct xfs_buf_log_item),
1914 					   "xfs_buf_item");
1915 	if (!xfs_buf_item_zone)
1916 		goto out_destroy_trans_zone;
1917 
1918 	xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) +
1919 			((XFS_EFD_MAX_FAST_EXTENTS - 1) *
1920 				 sizeof(xfs_extent_t))), "xfs_efd_item");
1921 	if (!xfs_efd_zone)
1922 		goto out_destroy_buf_item_zone;
1923 
1924 	xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) +
1925 			((XFS_EFI_MAX_FAST_EXTENTS - 1) *
1926 				sizeof(xfs_extent_t))), "xfs_efi_item");
1927 	if (!xfs_efi_zone)
1928 		goto out_destroy_efd_zone;
1929 
1930 	xfs_inode_zone =
1931 		kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode",
1932 			KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | KM_ZONE_SPREAD |
1933 			KM_ZONE_ACCOUNT, xfs_fs_inode_init_once);
1934 	if (!xfs_inode_zone)
1935 		goto out_destroy_efi_zone;
1936 
1937 	xfs_ili_zone =
1938 		kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili",
1939 					KM_ZONE_SPREAD, NULL);
1940 	if (!xfs_ili_zone)
1941 		goto out_destroy_inode_zone;
1942 	xfs_icreate_zone = kmem_zone_init(sizeof(struct xfs_icreate_item),
1943 					"xfs_icr");
1944 	if (!xfs_icreate_zone)
1945 		goto out_destroy_ili_zone;
1946 
1947 	xfs_rud_zone = kmem_zone_init(sizeof(struct xfs_rud_log_item),
1948 			"xfs_rud_item");
1949 	if (!xfs_rud_zone)
1950 		goto out_destroy_icreate_zone;
1951 
1952 	xfs_rui_zone = kmem_zone_init(
1953 			xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
1954 			"xfs_rui_item");
1955 	if (!xfs_rui_zone)
1956 		goto out_destroy_rud_zone;
1957 
1958 	xfs_cud_zone = kmem_zone_init(sizeof(struct xfs_cud_log_item),
1959 			"xfs_cud_item");
1960 	if (!xfs_cud_zone)
1961 		goto out_destroy_rui_zone;
1962 
1963 	xfs_cui_zone = kmem_zone_init(
1964 			xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
1965 			"xfs_cui_item");
1966 	if (!xfs_cui_zone)
1967 		goto out_destroy_cud_zone;
1968 
1969 	xfs_bud_zone = kmem_zone_init(sizeof(struct xfs_bud_log_item),
1970 			"xfs_bud_item");
1971 	if (!xfs_bud_zone)
1972 		goto out_destroy_cui_zone;
1973 
1974 	xfs_bui_zone = kmem_zone_init(
1975 			xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
1976 			"xfs_bui_item");
1977 	if (!xfs_bui_zone)
1978 		goto out_destroy_bud_zone;
1979 
1980 	return 0;
1981 
1982  out_destroy_bud_zone:
1983 	kmem_zone_destroy(xfs_bud_zone);
1984  out_destroy_cui_zone:
1985 	kmem_zone_destroy(xfs_cui_zone);
1986  out_destroy_cud_zone:
1987 	kmem_zone_destroy(xfs_cud_zone);
1988  out_destroy_rui_zone:
1989 	kmem_zone_destroy(xfs_rui_zone);
1990  out_destroy_rud_zone:
1991 	kmem_zone_destroy(xfs_rud_zone);
1992  out_destroy_icreate_zone:
1993 	kmem_zone_destroy(xfs_icreate_zone);
1994  out_destroy_ili_zone:
1995 	kmem_zone_destroy(xfs_ili_zone);
1996  out_destroy_inode_zone:
1997 	kmem_zone_destroy(xfs_inode_zone);
1998  out_destroy_efi_zone:
1999 	kmem_zone_destroy(xfs_efi_zone);
2000  out_destroy_efd_zone:
2001 	kmem_zone_destroy(xfs_efd_zone);
2002  out_destroy_buf_item_zone:
2003 	kmem_zone_destroy(xfs_buf_item_zone);
2004  out_destroy_trans_zone:
2005 	kmem_zone_destroy(xfs_trans_zone);
2006  out_destroy_ifork_zone:
2007 	kmem_zone_destroy(xfs_ifork_zone);
2008  out_destroy_da_state_zone:
2009 	kmem_zone_destroy(xfs_da_state_zone);
2010  out_destroy_btree_cur_zone:
2011 	kmem_zone_destroy(xfs_btree_cur_zone);
2012  out_destroy_bmap_free_item_zone:
2013 	kmem_zone_destroy(xfs_bmap_free_item_zone);
2014  out_destroy_log_ticket_zone:
2015 	kmem_zone_destroy(xfs_log_ticket_zone);
2016  out_free_ioend_bioset:
2017 	bioset_exit(&xfs_ioend_bioset);
2018  out:
2019 	return -ENOMEM;
2020 }
2021 
2022 STATIC void
2023 xfs_destroy_zones(void)
2024 {
2025 	/*
2026 	 * Make sure all delayed rcu free are flushed before we
2027 	 * destroy caches.
2028 	 */
2029 	rcu_barrier();
2030 	kmem_zone_destroy(xfs_bui_zone);
2031 	kmem_zone_destroy(xfs_bud_zone);
2032 	kmem_zone_destroy(xfs_cui_zone);
2033 	kmem_zone_destroy(xfs_cud_zone);
2034 	kmem_zone_destroy(xfs_rui_zone);
2035 	kmem_zone_destroy(xfs_rud_zone);
2036 	kmem_zone_destroy(xfs_icreate_zone);
2037 	kmem_zone_destroy(xfs_ili_zone);
2038 	kmem_zone_destroy(xfs_inode_zone);
2039 	kmem_zone_destroy(xfs_efi_zone);
2040 	kmem_zone_destroy(xfs_efd_zone);
2041 	kmem_zone_destroy(xfs_buf_item_zone);
2042 	kmem_zone_destroy(xfs_trans_zone);
2043 	kmem_zone_destroy(xfs_ifork_zone);
2044 	kmem_zone_destroy(xfs_da_state_zone);
2045 	kmem_zone_destroy(xfs_btree_cur_zone);
2046 	kmem_zone_destroy(xfs_bmap_free_item_zone);
2047 	kmem_zone_destroy(xfs_log_ticket_zone);
2048 	bioset_exit(&xfs_ioend_bioset);
2049 }
2050 
2051 STATIC int __init
2052 xfs_init_workqueues(void)
2053 {
2054 	/*
2055 	 * The allocation workqueue can be used in memory reclaim situations
2056 	 * (writepage path), and parallelism is only limited by the number of
2057 	 * AGs in all the filesystems mounted. Hence use the default large
2058 	 * max_active value for this workqueue.
2059 	 */
2060 	xfs_alloc_wq = alloc_workqueue("xfsalloc",
2061 			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0);
2062 	if (!xfs_alloc_wq)
2063 		return -ENOMEM;
2064 
2065 	xfs_discard_wq = alloc_workqueue("xfsdiscard", WQ_UNBOUND, 0);
2066 	if (!xfs_discard_wq)
2067 		goto out_free_alloc_wq;
2068 
2069 	return 0;
2070 out_free_alloc_wq:
2071 	destroy_workqueue(xfs_alloc_wq);
2072 	return -ENOMEM;
2073 }
2074 
2075 STATIC void
2076 xfs_destroy_workqueues(void)
2077 {
2078 	destroy_workqueue(xfs_discard_wq);
2079 	destroy_workqueue(xfs_alloc_wq);
2080 }
2081 
2082 STATIC int __init
2083 init_xfs_fs(void)
2084 {
2085 	int			error;
2086 
2087 	xfs_check_ondisk_structs();
2088 
2089 	printk(KERN_INFO XFS_VERSION_STRING " with "
2090 			 XFS_BUILD_OPTIONS " enabled\n");
2091 
2092 	xfs_dir_startup();
2093 
2094 	error = xfs_init_zones();
2095 	if (error)
2096 		goto out;
2097 
2098 	error = xfs_init_workqueues();
2099 	if (error)
2100 		goto out_destroy_zones;
2101 
2102 	error = xfs_mru_cache_init();
2103 	if (error)
2104 		goto out_destroy_wq;
2105 
2106 	error = xfs_buf_init();
2107 	if (error)
2108 		goto out_mru_cache_uninit;
2109 
2110 	error = xfs_init_procfs();
2111 	if (error)
2112 		goto out_buf_terminate;
2113 
2114 	error = xfs_sysctl_register();
2115 	if (error)
2116 		goto out_cleanup_procfs;
2117 
2118 	xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2119 	if (!xfs_kset) {
2120 		error = -ENOMEM;
2121 		goto out_sysctl_unregister;
2122 	}
2123 
2124 	xfsstats.xs_kobj.kobject.kset = xfs_kset;
2125 
2126 	xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2127 	if (!xfsstats.xs_stats) {
2128 		error = -ENOMEM;
2129 		goto out_kset_unregister;
2130 	}
2131 
2132 	error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2133 			       "stats");
2134 	if (error)
2135 		goto out_free_stats;
2136 
2137 #ifdef DEBUG
2138 	xfs_dbg_kobj.kobject.kset = xfs_kset;
2139 	error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2140 	if (error)
2141 		goto out_remove_stats_kobj;
2142 #endif
2143 
2144 	error = xfs_qm_init();
2145 	if (error)
2146 		goto out_remove_dbg_kobj;
2147 
2148 	error = register_filesystem(&xfs_fs_type);
2149 	if (error)
2150 		goto out_qm_exit;
2151 	return 0;
2152 
2153  out_qm_exit:
2154 	xfs_qm_exit();
2155  out_remove_dbg_kobj:
2156 #ifdef DEBUG
2157 	xfs_sysfs_del(&xfs_dbg_kobj);
2158  out_remove_stats_kobj:
2159 #endif
2160 	xfs_sysfs_del(&xfsstats.xs_kobj);
2161  out_free_stats:
2162 	free_percpu(xfsstats.xs_stats);
2163  out_kset_unregister:
2164 	kset_unregister(xfs_kset);
2165  out_sysctl_unregister:
2166 	xfs_sysctl_unregister();
2167  out_cleanup_procfs:
2168 	xfs_cleanup_procfs();
2169  out_buf_terminate:
2170 	xfs_buf_terminate();
2171  out_mru_cache_uninit:
2172 	xfs_mru_cache_uninit();
2173  out_destroy_wq:
2174 	xfs_destroy_workqueues();
2175  out_destroy_zones:
2176 	xfs_destroy_zones();
2177  out:
2178 	return error;
2179 }
2180 
2181 STATIC void __exit
2182 exit_xfs_fs(void)
2183 {
2184 	xfs_qm_exit();
2185 	unregister_filesystem(&xfs_fs_type);
2186 #ifdef DEBUG
2187 	xfs_sysfs_del(&xfs_dbg_kobj);
2188 #endif
2189 	xfs_sysfs_del(&xfsstats.xs_kobj);
2190 	free_percpu(xfsstats.xs_stats);
2191 	kset_unregister(xfs_kset);
2192 	xfs_sysctl_unregister();
2193 	xfs_cleanup_procfs();
2194 	xfs_buf_terminate();
2195 	xfs_mru_cache_uninit();
2196 	xfs_destroy_workqueues();
2197 	xfs_destroy_zones();
2198 	xfs_uuid_table_free();
2199 }
2200 
2201 module_init(init_xfs_fs);
2202 module_exit(exit_xfs_fs);
2203 
2204 MODULE_AUTHOR("Silicon Graphics, Inc.");
2205 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2206 MODULE_LICENSE("GPL");
2207