1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6
7 #include "xfs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sb.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_btree.h"
16 #include "xfs_bmap.h"
17 #include "xfs_alloc.h"
18 #include "xfs_fsops.h"
19 #include "xfs_trans.h"
20 #include "xfs_buf_item.h"
21 #include "xfs_log.h"
22 #include "xfs_log_priv.h"
23 #include "xfs_dir2.h"
24 #include "xfs_extfree_item.h"
25 #include "xfs_mru_cache.h"
26 #include "xfs_inode_item.h"
27 #include "xfs_icache.h"
28 #include "xfs_trace.h"
29 #include "xfs_icreate_item.h"
30 #include "xfs_filestream.h"
31 #include "xfs_quota.h"
32 #include "xfs_sysfs.h"
33 #include "xfs_ondisk.h"
34 #include "xfs_rmap_item.h"
35 #include "xfs_refcount_item.h"
36 #include "xfs_bmap_item.h"
37 #include "xfs_reflink.h"
38 #include "xfs_pwork.h"
39 #include "xfs_ag.h"
40 #include "xfs_defer.h"
41 #include "xfs_attr_item.h"
42 #include "xfs_xattr.h"
43 #include "xfs_iunlink_item.h"
44 #include "xfs_dahash_test.h"
45 #include "xfs_rtbitmap.h"
46 #include "xfs_exchmaps_item.h"
47 #include "xfs_parent.h"
48 #include "xfs_rtalloc.h"
49 #include "scrub/stats.h"
50 #include "scrub/rcbag_btree.h"
51
52 #include <linux/magic.h>
53 #include <linux/fs_context.h>
54 #include <linux/fs_parser.h>
55
56 static const struct super_operations xfs_super_operations;
57
58 static struct dentry *xfs_debugfs; /* top-level xfs debugfs dir */
59 static struct kset *xfs_kset; /* top-level xfs sysfs dir */
60 #ifdef DEBUG
61 static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */
62 #endif
63
64 enum xfs_dax_mode {
65 XFS_DAX_INODE = 0,
66 XFS_DAX_ALWAYS = 1,
67 XFS_DAX_NEVER = 2,
68 };
69
70 /* Were quota mount options provided? Must use the upper 16 bits of qflags. */
71 #define XFS_QFLAGS_MNTOPTS (1U << 31)
72
73 static void
xfs_mount_set_dax_mode(struct xfs_mount * mp,enum xfs_dax_mode mode)74 xfs_mount_set_dax_mode(
75 struct xfs_mount *mp,
76 enum xfs_dax_mode mode)
77 {
78 switch (mode) {
79 case XFS_DAX_INODE:
80 mp->m_features &= ~(XFS_FEAT_DAX_ALWAYS | XFS_FEAT_DAX_NEVER);
81 break;
82 case XFS_DAX_ALWAYS:
83 mp->m_features |= XFS_FEAT_DAX_ALWAYS;
84 mp->m_features &= ~XFS_FEAT_DAX_NEVER;
85 break;
86 case XFS_DAX_NEVER:
87 mp->m_features |= XFS_FEAT_DAX_NEVER;
88 mp->m_features &= ~XFS_FEAT_DAX_ALWAYS;
89 break;
90 }
91 }
92
93 static const struct constant_table dax_param_enums[] = {
94 {"inode", XFS_DAX_INODE },
95 {"always", XFS_DAX_ALWAYS },
96 {"never", XFS_DAX_NEVER },
97 {}
98 };
99
100 /*
101 * Table driven mount option parser.
102 */
103 enum {
104 Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
105 Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
106 Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
107 Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
108 Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
109 Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
110 Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
111 Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
112 Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum,
113 };
114
115 static const struct fs_parameter_spec xfs_fs_parameters[] = {
116 fsparam_u32("logbufs", Opt_logbufs),
117 fsparam_string("logbsize", Opt_logbsize),
118 fsparam_string("logdev", Opt_logdev),
119 fsparam_string("rtdev", Opt_rtdev),
120 fsparam_flag("wsync", Opt_wsync),
121 fsparam_flag("noalign", Opt_noalign),
122 fsparam_flag("swalloc", Opt_swalloc),
123 fsparam_u32("sunit", Opt_sunit),
124 fsparam_u32("swidth", Opt_swidth),
125 fsparam_flag("nouuid", Opt_nouuid),
126 fsparam_flag("grpid", Opt_grpid),
127 fsparam_flag("nogrpid", Opt_nogrpid),
128 fsparam_flag("bsdgroups", Opt_bsdgroups),
129 fsparam_flag("sysvgroups", Opt_sysvgroups),
130 fsparam_string("allocsize", Opt_allocsize),
131 fsparam_flag("norecovery", Opt_norecovery),
132 fsparam_flag("inode64", Opt_inode64),
133 fsparam_flag("inode32", Opt_inode32),
134 fsparam_flag("ikeep", Opt_ikeep),
135 fsparam_flag("noikeep", Opt_noikeep),
136 fsparam_flag("largeio", Opt_largeio),
137 fsparam_flag("nolargeio", Opt_nolargeio),
138 fsparam_flag("attr2", Opt_attr2),
139 fsparam_flag("noattr2", Opt_noattr2),
140 fsparam_flag("filestreams", Opt_filestreams),
141 fsparam_flag("quota", Opt_quota),
142 fsparam_flag("noquota", Opt_noquota),
143 fsparam_flag("usrquota", Opt_usrquota),
144 fsparam_flag("grpquota", Opt_grpquota),
145 fsparam_flag("prjquota", Opt_prjquota),
146 fsparam_flag("uquota", Opt_uquota),
147 fsparam_flag("gquota", Opt_gquota),
148 fsparam_flag("pquota", Opt_pquota),
149 fsparam_flag("uqnoenforce", Opt_uqnoenforce),
150 fsparam_flag("gqnoenforce", Opt_gqnoenforce),
151 fsparam_flag("pqnoenforce", Opt_pqnoenforce),
152 fsparam_flag("qnoenforce", Opt_qnoenforce),
153 fsparam_flag("discard", Opt_discard),
154 fsparam_flag("nodiscard", Opt_nodiscard),
155 fsparam_flag("dax", Opt_dax),
156 fsparam_enum("dax", Opt_dax_enum, dax_param_enums),
157 {}
158 };
159
160 struct proc_xfs_info {
161 uint64_t flag;
162 char *str;
163 };
164
165 static int
xfs_fs_show_options(struct seq_file * m,struct dentry * root)166 xfs_fs_show_options(
167 struct seq_file *m,
168 struct dentry *root)
169 {
170 static struct proc_xfs_info xfs_info_set[] = {
171 /* the few simple ones we can get from the mount struct */
172 { XFS_FEAT_IKEEP, ",ikeep" },
173 { XFS_FEAT_WSYNC, ",wsync" },
174 { XFS_FEAT_NOALIGN, ",noalign" },
175 { XFS_FEAT_SWALLOC, ",swalloc" },
176 { XFS_FEAT_NOUUID, ",nouuid" },
177 { XFS_FEAT_NORECOVERY, ",norecovery" },
178 { XFS_FEAT_ATTR2, ",attr2" },
179 { XFS_FEAT_FILESTREAMS, ",filestreams" },
180 { XFS_FEAT_GRPID, ",grpid" },
181 { XFS_FEAT_DISCARD, ",discard" },
182 { XFS_FEAT_LARGE_IOSIZE, ",largeio" },
183 { XFS_FEAT_DAX_ALWAYS, ",dax=always" },
184 { XFS_FEAT_DAX_NEVER, ",dax=never" },
185 { 0, NULL }
186 };
187 struct xfs_mount *mp = XFS_M(root->d_sb);
188 struct proc_xfs_info *xfs_infop;
189
190 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
191 if (mp->m_features & xfs_infop->flag)
192 seq_puts(m, xfs_infop->str);
193 }
194
195 seq_printf(m, ",inode%d", xfs_has_small_inums(mp) ? 32 : 64);
196
197 if (xfs_has_allocsize(mp))
198 seq_printf(m, ",allocsize=%dk",
199 (1 << mp->m_allocsize_log) >> 10);
200
201 if (mp->m_logbufs > 0)
202 seq_printf(m, ",logbufs=%d", mp->m_logbufs);
203 if (mp->m_logbsize > 0)
204 seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
205
206 if (mp->m_logname)
207 seq_show_option(m, "logdev", mp->m_logname);
208 if (mp->m_rtname)
209 seq_show_option(m, "rtdev", mp->m_rtname);
210
211 if (mp->m_dalign > 0)
212 seq_printf(m, ",sunit=%d",
213 (int)XFS_FSB_TO_BB(mp, mp->m_dalign));
214 if (mp->m_swidth > 0)
215 seq_printf(m, ",swidth=%d",
216 (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
217
218 if (mp->m_qflags & XFS_UQUOTA_ENFD)
219 seq_puts(m, ",usrquota");
220 else if (mp->m_qflags & XFS_UQUOTA_ACCT)
221 seq_puts(m, ",uqnoenforce");
222
223 if (mp->m_qflags & XFS_PQUOTA_ENFD)
224 seq_puts(m, ",prjquota");
225 else if (mp->m_qflags & XFS_PQUOTA_ACCT)
226 seq_puts(m, ",pqnoenforce");
227
228 if (mp->m_qflags & XFS_GQUOTA_ENFD)
229 seq_puts(m, ",grpquota");
230 else if (mp->m_qflags & XFS_GQUOTA_ACCT)
231 seq_puts(m, ",gqnoenforce");
232
233 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
234 seq_puts(m, ",noquota");
235
236 return 0;
237 }
238
239 static bool
xfs_set_inode_alloc_perag(struct xfs_perag * pag,xfs_ino_t ino,xfs_agnumber_t max_metadata)240 xfs_set_inode_alloc_perag(
241 struct xfs_perag *pag,
242 xfs_ino_t ino,
243 xfs_agnumber_t max_metadata)
244 {
245 if (!xfs_is_inode32(pag_mount(pag))) {
246 set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
247 clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
248 return false;
249 }
250
251 if (ino > XFS_MAXINUMBER_32) {
252 clear_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
253 clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
254 return false;
255 }
256
257 set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
258 if (pag_agno(pag) < max_metadata)
259 set_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
260 else
261 clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
262 return true;
263 }
264
265 /*
266 * Set parameters for inode allocation heuristics, taking into account
267 * filesystem size and inode32/inode64 mount options; i.e. specifically
268 * whether or not XFS_FEAT_SMALL_INUMS is set.
269 *
270 * Inode allocation patterns are altered only if inode32 is requested
271 * (XFS_FEAT_SMALL_INUMS), and the filesystem is sufficiently large.
272 * If altered, XFS_OPSTATE_INODE32 is set as well.
273 *
274 * An agcount independent of that in the mount structure is provided
275 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
276 * to the potentially higher ag count.
277 *
278 * Returns the maximum AG index which may contain inodes.
279 */
280 xfs_agnumber_t
xfs_set_inode_alloc(struct xfs_mount * mp,xfs_agnumber_t agcount)281 xfs_set_inode_alloc(
282 struct xfs_mount *mp,
283 xfs_agnumber_t agcount)
284 {
285 xfs_agnumber_t index;
286 xfs_agnumber_t maxagi = 0;
287 xfs_sb_t *sbp = &mp->m_sb;
288 xfs_agnumber_t max_metadata;
289 xfs_agino_t agino;
290 xfs_ino_t ino;
291
292 /*
293 * Calculate how much should be reserved for inodes to meet
294 * the max inode percentage. Used only for inode32.
295 */
296 if (M_IGEO(mp)->maxicount) {
297 uint64_t icount;
298
299 icount = sbp->sb_dblocks * sbp->sb_imax_pct;
300 do_div(icount, 100);
301 icount += sbp->sb_agblocks - 1;
302 do_div(icount, sbp->sb_agblocks);
303 max_metadata = icount;
304 } else {
305 max_metadata = agcount;
306 }
307
308 /* Get the last possible inode in the filesystem */
309 agino = XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
310 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
311
312 /*
313 * If user asked for no more than 32-bit inodes, and the fs is
314 * sufficiently large, set XFS_OPSTATE_INODE32 if we must alter
315 * the allocator to accommodate the request.
316 */
317 if (xfs_has_small_inums(mp) && ino > XFS_MAXINUMBER_32)
318 xfs_set_inode32(mp);
319 else
320 xfs_clear_inode32(mp);
321
322 for (index = 0; index < agcount; index++) {
323 struct xfs_perag *pag;
324
325 ino = XFS_AGINO_TO_INO(mp, index, agino);
326
327 pag = xfs_perag_get(mp, index);
328 if (xfs_set_inode_alloc_perag(pag, ino, max_metadata))
329 maxagi++;
330 xfs_perag_put(pag);
331 }
332
333 return xfs_is_inode32(mp) ? maxagi : agcount;
334 }
335
336 static int
xfs_setup_dax_always(struct xfs_mount * mp)337 xfs_setup_dax_always(
338 struct xfs_mount *mp)
339 {
340 if (!mp->m_ddev_targp->bt_daxdev &&
341 (!mp->m_rtdev_targp || !mp->m_rtdev_targp->bt_daxdev)) {
342 xfs_alert(mp,
343 "DAX unsupported by block device. Turning off DAX.");
344 goto disable_dax;
345 }
346
347 if (mp->m_super->s_blocksize != PAGE_SIZE) {
348 xfs_alert(mp,
349 "DAX not supported for blocksize. Turning off DAX.");
350 goto disable_dax;
351 }
352
353 if (xfs_has_reflink(mp) &&
354 bdev_is_partition(mp->m_ddev_targp->bt_bdev)) {
355 xfs_alert(mp,
356 "DAX and reflink cannot work with multi-partitions!");
357 return -EINVAL;
358 }
359
360 return 0;
361
362 disable_dax:
363 xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
364 return 0;
365 }
366
367 STATIC int
xfs_blkdev_get(xfs_mount_t * mp,const char * name,struct file ** bdev_filep)368 xfs_blkdev_get(
369 xfs_mount_t *mp,
370 const char *name,
371 struct file **bdev_filep)
372 {
373 int error = 0;
374
375 *bdev_filep = bdev_file_open_by_path(name,
376 BLK_OPEN_READ | BLK_OPEN_WRITE | BLK_OPEN_RESTRICT_WRITES,
377 mp->m_super, &fs_holder_ops);
378 if (IS_ERR(*bdev_filep)) {
379 error = PTR_ERR(*bdev_filep);
380 *bdev_filep = NULL;
381 xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
382 }
383
384 return error;
385 }
386
387 STATIC void
xfs_shutdown_devices(struct xfs_mount * mp)388 xfs_shutdown_devices(
389 struct xfs_mount *mp)
390 {
391 /*
392 * Udev is triggered whenever anyone closes a block device or unmounts
393 * a file systemm on a block device.
394 * The default udev rules invoke blkid to read the fs super and create
395 * symlinks to the bdev under /dev/disk. For this, it uses buffered
396 * reads through the page cache.
397 *
398 * xfs_db also uses buffered reads to examine metadata. There is no
399 * coordination between xfs_db and udev, which means that they can run
400 * concurrently. Note there is no coordination between the kernel and
401 * blkid either.
402 *
403 * On a system with 64k pages, the page cache can cache the superblock
404 * and the root inode (and hence the root directory) with the same 64k
405 * page. If udev spawns blkid after the mkfs and the system is busy
406 * enough that it is still running when xfs_db starts up, they'll both
407 * read from the same page in the pagecache.
408 *
409 * The unmount writes updated inode metadata to disk directly. The XFS
410 * buffer cache does not use the bdev pagecache, so it needs to
411 * invalidate that pagecache on unmount. If the above scenario occurs,
412 * the pagecache no longer reflects what's on disk, xfs_db reads the
413 * stale metadata, and fails to find /a. Most of the time this succeeds
414 * because closing a bdev invalidates the page cache, but when processes
415 * race, everyone loses.
416 */
417 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
418 blkdev_issue_flush(mp->m_logdev_targp->bt_bdev);
419 invalidate_bdev(mp->m_logdev_targp->bt_bdev);
420 }
421 if (mp->m_rtdev_targp) {
422 blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev);
423 invalidate_bdev(mp->m_rtdev_targp->bt_bdev);
424 }
425 blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
426 invalidate_bdev(mp->m_ddev_targp->bt_bdev);
427 }
428
429 /*
430 * The file system configurations are:
431 * (1) device (partition) with data and internal log
432 * (2) logical volume with data and log subvolumes.
433 * (3) logical volume with data, log, and realtime subvolumes.
434 *
435 * We only have to handle opening the log and realtime volumes here if
436 * they are present. The data subvolume has already been opened by
437 * get_sb_bdev() and is stored in sb->s_bdev.
438 */
439 STATIC int
xfs_open_devices(struct xfs_mount * mp)440 xfs_open_devices(
441 struct xfs_mount *mp)
442 {
443 struct super_block *sb = mp->m_super;
444 struct block_device *ddev = sb->s_bdev;
445 struct file *logdev_file = NULL, *rtdev_file = NULL;
446 int error;
447
448 /*
449 * Open real time and log devices - order is important.
450 */
451 if (mp->m_logname) {
452 error = xfs_blkdev_get(mp, mp->m_logname, &logdev_file);
453 if (error)
454 return error;
455 }
456
457 if (mp->m_rtname) {
458 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev_file);
459 if (error)
460 goto out_close_logdev;
461
462 if (file_bdev(rtdev_file) == ddev ||
463 (logdev_file &&
464 file_bdev(rtdev_file) == file_bdev(logdev_file))) {
465 xfs_warn(mp,
466 "Cannot mount filesystem with identical rtdev and ddev/logdev.");
467 error = -EINVAL;
468 goto out_close_rtdev;
469 }
470 }
471
472 /*
473 * Setup xfs_mount buffer target pointers
474 */
475 error = -ENOMEM;
476 mp->m_ddev_targp = xfs_alloc_buftarg(mp, sb->s_bdev_file);
477 if (!mp->m_ddev_targp)
478 goto out_close_rtdev;
479
480 if (rtdev_file) {
481 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev_file);
482 if (!mp->m_rtdev_targp)
483 goto out_free_ddev_targ;
484 }
485
486 if (logdev_file && file_bdev(logdev_file) != ddev) {
487 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev_file);
488 if (!mp->m_logdev_targp)
489 goto out_free_rtdev_targ;
490 } else {
491 mp->m_logdev_targp = mp->m_ddev_targp;
492 /* Handle won't be used, drop it */
493 if (logdev_file)
494 bdev_fput(logdev_file);
495 }
496
497 return 0;
498
499 out_free_rtdev_targ:
500 if (mp->m_rtdev_targp)
501 xfs_free_buftarg(mp->m_rtdev_targp);
502 out_free_ddev_targ:
503 xfs_free_buftarg(mp->m_ddev_targp);
504 out_close_rtdev:
505 if (rtdev_file)
506 bdev_fput(rtdev_file);
507 out_close_logdev:
508 if (logdev_file)
509 bdev_fput(logdev_file);
510 return error;
511 }
512
513 /*
514 * Setup xfs_mount buffer target pointers based on superblock
515 */
516 STATIC int
xfs_setup_devices(struct xfs_mount * mp)517 xfs_setup_devices(
518 struct xfs_mount *mp)
519 {
520 int error;
521
522 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
523 if (error)
524 return error;
525
526 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
527 unsigned int log_sector_size = BBSIZE;
528
529 if (xfs_has_sector(mp))
530 log_sector_size = mp->m_sb.sb_logsectsize;
531 error = xfs_setsize_buftarg(mp->m_logdev_targp,
532 log_sector_size);
533 if (error)
534 return error;
535 }
536 if (mp->m_rtdev_targp) {
537 error = xfs_setsize_buftarg(mp->m_rtdev_targp,
538 mp->m_sb.sb_sectsize);
539 if (error)
540 return error;
541 }
542
543 return 0;
544 }
545
546 STATIC int
xfs_init_mount_workqueues(struct xfs_mount * mp)547 xfs_init_mount_workqueues(
548 struct xfs_mount *mp)
549 {
550 mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
551 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
552 1, mp->m_super->s_id);
553 if (!mp->m_buf_workqueue)
554 goto out;
555
556 mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
557 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
558 0, mp->m_super->s_id);
559 if (!mp->m_unwritten_workqueue)
560 goto out_destroy_buf;
561
562 mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
563 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
564 0, mp->m_super->s_id);
565 if (!mp->m_reclaim_workqueue)
566 goto out_destroy_unwritten;
567
568 mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s",
569 XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM),
570 0, mp->m_super->s_id);
571 if (!mp->m_blockgc_wq)
572 goto out_destroy_reclaim;
573
574 mp->m_inodegc_wq = alloc_workqueue("xfs-inodegc/%s",
575 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
576 1, mp->m_super->s_id);
577 if (!mp->m_inodegc_wq)
578 goto out_destroy_blockgc;
579
580 mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s",
581 XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id);
582 if (!mp->m_sync_workqueue)
583 goto out_destroy_inodegc;
584
585 return 0;
586
587 out_destroy_inodegc:
588 destroy_workqueue(mp->m_inodegc_wq);
589 out_destroy_blockgc:
590 destroy_workqueue(mp->m_blockgc_wq);
591 out_destroy_reclaim:
592 destroy_workqueue(mp->m_reclaim_workqueue);
593 out_destroy_unwritten:
594 destroy_workqueue(mp->m_unwritten_workqueue);
595 out_destroy_buf:
596 destroy_workqueue(mp->m_buf_workqueue);
597 out:
598 return -ENOMEM;
599 }
600
601 STATIC void
xfs_destroy_mount_workqueues(struct xfs_mount * mp)602 xfs_destroy_mount_workqueues(
603 struct xfs_mount *mp)
604 {
605 destroy_workqueue(mp->m_sync_workqueue);
606 destroy_workqueue(mp->m_blockgc_wq);
607 destroy_workqueue(mp->m_inodegc_wq);
608 destroy_workqueue(mp->m_reclaim_workqueue);
609 destroy_workqueue(mp->m_unwritten_workqueue);
610 destroy_workqueue(mp->m_buf_workqueue);
611 }
612
613 static void
xfs_flush_inodes_worker(struct work_struct * work)614 xfs_flush_inodes_worker(
615 struct work_struct *work)
616 {
617 struct xfs_mount *mp = container_of(work, struct xfs_mount,
618 m_flush_inodes_work);
619 struct super_block *sb = mp->m_super;
620
621 if (down_read_trylock(&sb->s_umount)) {
622 sync_inodes_sb(sb);
623 up_read(&sb->s_umount);
624 }
625 }
626
627 /*
628 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
629 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
630 * for IO to complete so that we effectively throttle multiple callers to the
631 * rate at which IO is completing.
632 */
633 void
xfs_flush_inodes(struct xfs_mount * mp)634 xfs_flush_inodes(
635 struct xfs_mount *mp)
636 {
637 /*
638 * If flush_work() returns true then that means we waited for a flush
639 * which was already in progress. Don't bother running another scan.
640 */
641 if (flush_work(&mp->m_flush_inodes_work))
642 return;
643
644 queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work);
645 flush_work(&mp->m_flush_inodes_work);
646 }
647
648 /* Catch misguided souls that try to use this interface on XFS */
649 STATIC struct inode *
xfs_fs_alloc_inode(struct super_block * sb)650 xfs_fs_alloc_inode(
651 struct super_block *sb)
652 {
653 BUG();
654 return NULL;
655 }
656
657 /*
658 * Now that the generic code is guaranteed not to be accessing
659 * the linux inode, we can inactivate and reclaim the inode.
660 */
661 STATIC void
xfs_fs_destroy_inode(struct inode * inode)662 xfs_fs_destroy_inode(
663 struct inode *inode)
664 {
665 struct xfs_inode *ip = XFS_I(inode);
666
667 trace_xfs_destroy_inode(ip);
668
669 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
670 XFS_STATS_INC(ip->i_mount, vn_rele);
671 XFS_STATS_INC(ip->i_mount, vn_remove);
672 xfs_inode_mark_reclaimable(ip);
673 }
674
675 static void
xfs_fs_dirty_inode(struct inode * inode,int flags)676 xfs_fs_dirty_inode(
677 struct inode *inode,
678 int flags)
679 {
680 struct xfs_inode *ip = XFS_I(inode);
681 struct xfs_mount *mp = ip->i_mount;
682 struct xfs_trans *tp;
683
684 if (!(inode->i_sb->s_flags & SB_LAZYTIME))
685 return;
686
687 /*
688 * Only do the timestamp update if the inode is dirty (I_DIRTY_SYNC)
689 * and has dirty timestamp (I_DIRTY_TIME). I_DIRTY_TIME can be passed
690 * in flags possibly together with I_DIRTY_SYNC.
691 */
692 if ((flags & ~I_DIRTY_TIME) != I_DIRTY_SYNC || !(flags & I_DIRTY_TIME))
693 return;
694
695 if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
696 return;
697 xfs_ilock(ip, XFS_ILOCK_EXCL);
698 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
699 xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
700 xfs_trans_commit(tp);
701 }
702
703 /*
704 * Slab object creation initialisation for the XFS inode.
705 * This covers only the idempotent fields in the XFS inode;
706 * all other fields need to be initialised on allocation
707 * from the slab. This avoids the need to repeatedly initialise
708 * fields in the xfs inode that left in the initialise state
709 * when freeing the inode.
710 */
711 STATIC void
xfs_fs_inode_init_once(void * inode)712 xfs_fs_inode_init_once(
713 void *inode)
714 {
715 struct xfs_inode *ip = inode;
716
717 memset(ip, 0, sizeof(struct xfs_inode));
718
719 /* vfs inode */
720 inode_init_once(VFS_I(ip));
721
722 /* xfs inode */
723 atomic_set(&ip->i_pincount, 0);
724 spin_lock_init(&ip->i_flags_lock);
725 init_rwsem(&ip->i_lock);
726 }
727
728 /*
729 * We do an unlocked check for XFS_IDONTCACHE here because we are already
730 * serialised against cache hits here via the inode->i_lock and igrab() in
731 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
732 * racing with us, and it avoids needing to grab a spinlock here for every inode
733 * we drop the final reference on.
734 */
735 STATIC int
xfs_fs_drop_inode(struct inode * inode)736 xfs_fs_drop_inode(
737 struct inode *inode)
738 {
739 struct xfs_inode *ip = XFS_I(inode);
740
741 /*
742 * If this unlinked inode is in the middle of recovery, don't
743 * drop the inode just yet; log recovery will take care of
744 * that. See the comment for this inode flag.
745 */
746 if (ip->i_flags & XFS_IRECOVERY) {
747 ASSERT(xlog_recovery_needed(ip->i_mount->m_log));
748 return 0;
749 }
750
751 return generic_drop_inode(inode);
752 }
753
754 static void
xfs_mount_free(struct xfs_mount * mp)755 xfs_mount_free(
756 struct xfs_mount *mp)
757 {
758 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
759 xfs_free_buftarg(mp->m_logdev_targp);
760 if (mp->m_rtdev_targp)
761 xfs_free_buftarg(mp->m_rtdev_targp);
762 if (mp->m_ddev_targp)
763 xfs_free_buftarg(mp->m_ddev_targp);
764
765 debugfs_remove(mp->m_debugfs);
766 kfree(mp->m_rtname);
767 kfree(mp->m_logname);
768 kfree(mp);
769 }
770
771 STATIC int
xfs_fs_sync_fs(struct super_block * sb,int wait)772 xfs_fs_sync_fs(
773 struct super_block *sb,
774 int wait)
775 {
776 struct xfs_mount *mp = XFS_M(sb);
777 int error;
778
779 trace_xfs_fs_sync_fs(mp, __return_address);
780
781 /*
782 * Doing anything during the async pass would be counterproductive.
783 */
784 if (!wait)
785 return 0;
786
787 error = xfs_log_force(mp, XFS_LOG_SYNC);
788 if (error)
789 return error;
790
791 if (laptop_mode) {
792 /*
793 * The disk must be active because we're syncing.
794 * We schedule log work now (now that the disk is
795 * active) instead of later (when it might not be).
796 */
797 flush_delayed_work(&mp->m_log->l_work);
798 }
799
800 /*
801 * If we are called with page faults frozen out, it means we are about
802 * to freeze the transaction subsystem. Take the opportunity to shut
803 * down inodegc because once SB_FREEZE_FS is set it's too late to
804 * prevent inactivation races with freeze. The fs doesn't get called
805 * again by the freezing process until after SB_FREEZE_FS has been set,
806 * so it's now or never. Same logic applies to speculative allocation
807 * garbage collection.
808 *
809 * We don't care if this is a normal syncfs call that does this or
810 * freeze that does this - we can run this multiple times without issue
811 * and we won't race with a restart because a restart can only occur
812 * when the state is either SB_FREEZE_FS or SB_FREEZE_COMPLETE.
813 */
814 if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT) {
815 xfs_inodegc_stop(mp);
816 xfs_blockgc_stop(mp);
817 }
818
819 return 0;
820 }
821
822 static xfs_extlen_t
xfs_internal_log_size(struct xfs_mount * mp)823 xfs_internal_log_size(
824 struct xfs_mount *mp)
825 {
826 if (!mp->m_sb.sb_logstart)
827 return 0;
828 return mp->m_sb.sb_logblocks;
829 }
830
831 static void
xfs_statfs_data(struct xfs_mount * mp,struct kstatfs * st)832 xfs_statfs_data(
833 struct xfs_mount *mp,
834 struct kstatfs *st)
835 {
836 int64_t fdblocks =
837 percpu_counter_sum(&mp->m_fdblocks);
838
839 /* make sure st->f_bfree does not underflow */
840 st->f_bfree = max(0LL, fdblocks - xfs_fdblocks_unavailable(mp));
841 /*
842 * sb_dblocks can change during growfs, but nothing cares about reporting
843 * the old or new value during growfs.
844 */
845 st->f_blocks = mp->m_sb.sb_dblocks - xfs_internal_log_size(mp);
846 }
847
848 /*
849 * When stat(v)fs is called on a file with the realtime bit set or a directory
850 * with the rtinherit bit, report freespace information for the RT device
851 * instead of the main data device.
852 */
853 static void
xfs_statfs_rt(struct xfs_mount * mp,struct kstatfs * st)854 xfs_statfs_rt(
855 struct xfs_mount *mp,
856 struct kstatfs *st)
857 {
858 st->f_bfree = xfs_rtbxlen_to_blen(mp,
859 percpu_counter_sum_positive(&mp->m_frextents));
860 st->f_blocks = mp->m_sb.sb_rblocks;
861 }
862
863 static void
xfs_statfs_inodes(struct xfs_mount * mp,struct kstatfs * st)864 xfs_statfs_inodes(
865 struct xfs_mount *mp,
866 struct kstatfs *st)
867 {
868 uint64_t icount = percpu_counter_sum(&mp->m_icount);
869 uint64_t ifree = percpu_counter_sum(&mp->m_ifree);
870 uint64_t fakeinos = XFS_FSB_TO_INO(mp, st->f_bfree);
871
872 st->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
873 if (M_IGEO(mp)->maxicount)
874 st->f_files = min_t(typeof(st->f_files), st->f_files,
875 M_IGEO(mp)->maxicount);
876
877 /* If sb_icount overshot maxicount, report actual allocation */
878 st->f_files = max_t(typeof(st->f_files), st->f_files,
879 mp->m_sb.sb_icount);
880
881 /* Make sure st->f_ffree does not underflow */
882 st->f_ffree = max_t(int64_t, 0, st->f_files - (icount - ifree));
883 }
884
885 STATIC int
xfs_fs_statfs(struct dentry * dentry,struct kstatfs * st)886 xfs_fs_statfs(
887 struct dentry *dentry,
888 struct kstatfs *st)
889 {
890 struct xfs_mount *mp = XFS_M(dentry->d_sb);
891 struct xfs_inode *ip = XFS_I(d_inode(dentry));
892
893 /*
894 * Expedite background inodegc but don't wait. We do not want to block
895 * here waiting hours for a billion extent file to be truncated.
896 */
897 xfs_inodegc_push(mp);
898
899 st->f_type = XFS_SUPER_MAGIC;
900 st->f_namelen = MAXNAMELEN - 1;
901 st->f_bsize = mp->m_sb.sb_blocksize;
902 st->f_fsid = u64_to_fsid(huge_encode_dev(mp->m_ddev_targp->bt_dev));
903
904 xfs_statfs_data(mp, st);
905 xfs_statfs_inodes(mp, st);
906
907 if (XFS_IS_REALTIME_MOUNT(mp) &&
908 (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME)))
909 xfs_statfs_rt(mp, st);
910
911 if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
912 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
913 (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
914 xfs_qm_statvfs(ip, st);
915
916 /*
917 * XFS does not distinguish between blocks available to privileged and
918 * unprivileged users.
919 */
920 st->f_bavail = st->f_bfree;
921 return 0;
922 }
923
924 STATIC void
xfs_save_resvblks(struct xfs_mount * mp)925 xfs_save_resvblks(struct xfs_mount *mp)
926 {
927 mp->m_resblks_save = mp->m_resblks;
928 xfs_reserve_blocks(mp, 0);
929 }
930
931 STATIC void
xfs_restore_resvblks(struct xfs_mount * mp)932 xfs_restore_resvblks(struct xfs_mount *mp)
933 {
934 uint64_t resblks;
935
936 if (mp->m_resblks_save) {
937 resblks = mp->m_resblks_save;
938 mp->m_resblks_save = 0;
939 } else
940 resblks = xfs_default_resblks(mp);
941
942 xfs_reserve_blocks(mp, resblks);
943 }
944
945 /*
946 * Second stage of a freeze. The data is already frozen so we only
947 * need to take care of the metadata. Once that's done sync the superblock
948 * to the log to dirty it in case of a crash while frozen. This ensures that we
949 * will recover the unlinked inode lists on the next mount.
950 */
951 STATIC int
xfs_fs_freeze(struct super_block * sb)952 xfs_fs_freeze(
953 struct super_block *sb)
954 {
955 struct xfs_mount *mp = XFS_M(sb);
956 unsigned int flags;
957 int ret;
958
959 /*
960 * The filesystem is now frozen far enough that memory reclaim
961 * cannot safely operate on the filesystem. Hence we need to
962 * set a GFP_NOFS context here to avoid recursion deadlocks.
963 */
964 flags = memalloc_nofs_save();
965 xfs_save_resvblks(mp);
966 ret = xfs_log_quiesce(mp);
967 memalloc_nofs_restore(flags);
968
969 /*
970 * For read-write filesystems, we need to restart the inodegc on error
971 * because we stopped it at SB_FREEZE_PAGEFAULT level and a thaw is not
972 * going to be run to restart it now. We are at SB_FREEZE_FS level
973 * here, so we can restart safely without racing with a stop in
974 * xfs_fs_sync_fs().
975 */
976 if (ret && !xfs_is_readonly(mp)) {
977 xfs_blockgc_start(mp);
978 xfs_inodegc_start(mp);
979 }
980
981 return ret;
982 }
983
984 STATIC int
xfs_fs_unfreeze(struct super_block * sb)985 xfs_fs_unfreeze(
986 struct super_block *sb)
987 {
988 struct xfs_mount *mp = XFS_M(sb);
989
990 xfs_restore_resvblks(mp);
991 xfs_log_work_queue(mp);
992
993 /*
994 * Don't reactivate the inodegc worker on a readonly filesystem because
995 * inodes are sent directly to reclaim. Don't reactivate the blockgc
996 * worker because there are no speculative preallocations on a readonly
997 * filesystem.
998 */
999 if (!xfs_is_readonly(mp)) {
1000 xfs_blockgc_start(mp);
1001 xfs_inodegc_start(mp);
1002 }
1003
1004 return 0;
1005 }
1006
1007 /*
1008 * This function fills in xfs_mount_t fields based on mount args.
1009 * Note: the superblock _has_ now been read in.
1010 */
1011 STATIC int
xfs_finish_flags(struct xfs_mount * mp)1012 xfs_finish_flags(
1013 struct xfs_mount *mp)
1014 {
1015 /* Fail a mount where the logbuf is smaller than the log stripe */
1016 if (xfs_has_logv2(mp)) {
1017 if (mp->m_logbsize <= 0 &&
1018 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
1019 mp->m_logbsize = mp->m_sb.sb_logsunit;
1020 } else if (mp->m_logbsize > 0 &&
1021 mp->m_logbsize < mp->m_sb.sb_logsunit) {
1022 xfs_warn(mp,
1023 "logbuf size must be greater than or equal to log stripe size");
1024 return -EINVAL;
1025 }
1026 } else {
1027 /* Fail a mount if the logbuf is larger than 32K */
1028 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
1029 xfs_warn(mp,
1030 "logbuf size for version 1 logs must be 16K or 32K");
1031 return -EINVAL;
1032 }
1033 }
1034
1035 /*
1036 * V5 filesystems always use attr2 format for attributes.
1037 */
1038 if (xfs_has_crc(mp) && xfs_has_noattr2(mp)) {
1039 xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
1040 "attr2 is always enabled for V5 filesystems.");
1041 return -EINVAL;
1042 }
1043
1044 /*
1045 * prohibit r/w mounts of read-only filesystems
1046 */
1047 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !xfs_is_readonly(mp)) {
1048 xfs_warn(mp,
1049 "cannot mount a read-only filesystem as read-write");
1050 return -EROFS;
1051 }
1052
1053 if ((mp->m_qflags & XFS_GQUOTA_ACCT) &&
1054 (mp->m_qflags & XFS_PQUOTA_ACCT) &&
1055 !xfs_has_pquotino(mp)) {
1056 xfs_warn(mp,
1057 "Super block does not support project and group quota together");
1058 return -EINVAL;
1059 }
1060
1061 return 0;
1062 }
1063
1064 static int
xfs_init_percpu_counters(struct xfs_mount * mp)1065 xfs_init_percpu_counters(
1066 struct xfs_mount *mp)
1067 {
1068 int error;
1069
1070 error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
1071 if (error)
1072 return -ENOMEM;
1073
1074 error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
1075 if (error)
1076 goto free_icount;
1077
1078 error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
1079 if (error)
1080 goto free_ifree;
1081
1082 error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
1083 if (error)
1084 goto free_fdblocks;
1085
1086 error = percpu_counter_init(&mp->m_delalloc_rtextents, 0, GFP_KERNEL);
1087 if (error)
1088 goto free_delalloc;
1089
1090 error = percpu_counter_init(&mp->m_frextents, 0, GFP_KERNEL);
1091 if (error)
1092 goto free_delalloc_rt;
1093
1094 return 0;
1095
1096 free_delalloc_rt:
1097 percpu_counter_destroy(&mp->m_delalloc_rtextents);
1098 free_delalloc:
1099 percpu_counter_destroy(&mp->m_delalloc_blks);
1100 free_fdblocks:
1101 percpu_counter_destroy(&mp->m_fdblocks);
1102 free_ifree:
1103 percpu_counter_destroy(&mp->m_ifree);
1104 free_icount:
1105 percpu_counter_destroy(&mp->m_icount);
1106 return -ENOMEM;
1107 }
1108
1109 void
xfs_reinit_percpu_counters(struct xfs_mount * mp)1110 xfs_reinit_percpu_counters(
1111 struct xfs_mount *mp)
1112 {
1113 percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1114 percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1115 percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1116 percpu_counter_set(&mp->m_frextents, mp->m_sb.sb_frextents);
1117 }
1118
1119 static void
xfs_destroy_percpu_counters(struct xfs_mount * mp)1120 xfs_destroy_percpu_counters(
1121 struct xfs_mount *mp)
1122 {
1123 percpu_counter_destroy(&mp->m_icount);
1124 percpu_counter_destroy(&mp->m_ifree);
1125 percpu_counter_destroy(&mp->m_fdblocks);
1126 ASSERT(xfs_is_shutdown(mp) ||
1127 percpu_counter_sum(&mp->m_delalloc_rtextents) == 0);
1128 percpu_counter_destroy(&mp->m_delalloc_rtextents);
1129 ASSERT(xfs_is_shutdown(mp) ||
1130 percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1131 percpu_counter_destroy(&mp->m_delalloc_blks);
1132 percpu_counter_destroy(&mp->m_frextents);
1133 }
1134
1135 static int
xfs_inodegc_init_percpu(struct xfs_mount * mp)1136 xfs_inodegc_init_percpu(
1137 struct xfs_mount *mp)
1138 {
1139 struct xfs_inodegc *gc;
1140 int cpu;
1141
1142 mp->m_inodegc = alloc_percpu(struct xfs_inodegc);
1143 if (!mp->m_inodegc)
1144 return -ENOMEM;
1145
1146 for_each_possible_cpu(cpu) {
1147 gc = per_cpu_ptr(mp->m_inodegc, cpu);
1148 gc->cpu = cpu;
1149 gc->mp = mp;
1150 init_llist_head(&gc->list);
1151 gc->items = 0;
1152 gc->error = 0;
1153 INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker);
1154 }
1155 return 0;
1156 }
1157
1158 static void
xfs_inodegc_free_percpu(struct xfs_mount * mp)1159 xfs_inodegc_free_percpu(
1160 struct xfs_mount *mp)
1161 {
1162 if (!mp->m_inodegc)
1163 return;
1164 free_percpu(mp->m_inodegc);
1165 }
1166
1167 static void
xfs_fs_put_super(struct super_block * sb)1168 xfs_fs_put_super(
1169 struct super_block *sb)
1170 {
1171 struct xfs_mount *mp = XFS_M(sb);
1172
1173 xfs_notice(mp, "Unmounting Filesystem %pU", &mp->m_sb.sb_uuid);
1174 xfs_filestream_unmount(mp);
1175 xfs_unmountfs(mp);
1176
1177 xfs_rtmount_freesb(mp);
1178 xfs_freesb(mp);
1179 xchk_mount_stats_free(mp);
1180 free_percpu(mp->m_stats.xs_stats);
1181 xfs_inodegc_free_percpu(mp);
1182 xfs_destroy_percpu_counters(mp);
1183 xfs_destroy_mount_workqueues(mp);
1184 xfs_shutdown_devices(mp);
1185 }
1186
1187 static long
xfs_fs_nr_cached_objects(struct super_block * sb,struct shrink_control * sc)1188 xfs_fs_nr_cached_objects(
1189 struct super_block *sb,
1190 struct shrink_control *sc)
1191 {
1192 /* Paranoia: catch incorrect calls during mount setup or teardown */
1193 if (WARN_ON_ONCE(!sb->s_fs_info))
1194 return 0;
1195 return xfs_reclaim_inodes_count(XFS_M(sb));
1196 }
1197
1198 static long
xfs_fs_free_cached_objects(struct super_block * sb,struct shrink_control * sc)1199 xfs_fs_free_cached_objects(
1200 struct super_block *sb,
1201 struct shrink_control *sc)
1202 {
1203 return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1204 }
1205
1206 static void
xfs_fs_shutdown(struct super_block * sb)1207 xfs_fs_shutdown(
1208 struct super_block *sb)
1209 {
1210 xfs_force_shutdown(XFS_M(sb), SHUTDOWN_DEVICE_REMOVED);
1211 }
1212
1213 static const struct super_operations xfs_super_operations = {
1214 .alloc_inode = xfs_fs_alloc_inode,
1215 .destroy_inode = xfs_fs_destroy_inode,
1216 .dirty_inode = xfs_fs_dirty_inode,
1217 .drop_inode = xfs_fs_drop_inode,
1218 .put_super = xfs_fs_put_super,
1219 .sync_fs = xfs_fs_sync_fs,
1220 .freeze_fs = xfs_fs_freeze,
1221 .unfreeze_fs = xfs_fs_unfreeze,
1222 .statfs = xfs_fs_statfs,
1223 .show_options = xfs_fs_show_options,
1224 .nr_cached_objects = xfs_fs_nr_cached_objects,
1225 .free_cached_objects = xfs_fs_free_cached_objects,
1226 .shutdown = xfs_fs_shutdown,
1227 };
1228
1229 static int
suffix_kstrtoint(const char * s,unsigned int base,int * res)1230 suffix_kstrtoint(
1231 const char *s,
1232 unsigned int base,
1233 int *res)
1234 {
1235 int last, shift_left_factor = 0, _res;
1236 char *value;
1237 int ret = 0;
1238
1239 value = kstrdup(s, GFP_KERNEL);
1240 if (!value)
1241 return -ENOMEM;
1242
1243 last = strlen(value) - 1;
1244 if (value[last] == 'K' || value[last] == 'k') {
1245 shift_left_factor = 10;
1246 value[last] = '\0';
1247 }
1248 if (value[last] == 'M' || value[last] == 'm') {
1249 shift_left_factor = 20;
1250 value[last] = '\0';
1251 }
1252 if (value[last] == 'G' || value[last] == 'g') {
1253 shift_left_factor = 30;
1254 value[last] = '\0';
1255 }
1256
1257 if (kstrtoint(value, base, &_res))
1258 ret = -EINVAL;
1259 kfree(value);
1260 *res = _res << shift_left_factor;
1261 return ret;
1262 }
1263
1264 static inline void
xfs_fs_warn_deprecated(struct fs_context * fc,struct fs_parameter * param,uint64_t flag,bool value)1265 xfs_fs_warn_deprecated(
1266 struct fs_context *fc,
1267 struct fs_parameter *param,
1268 uint64_t flag,
1269 bool value)
1270 {
1271 /* Don't print the warning if reconfiguring and current mount point
1272 * already had the flag set
1273 */
1274 if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) &&
1275 !!(XFS_M(fc->root->d_sb)->m_features & flag) == value)
1276 return;
1277 xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key);
1278 }
1279
1280 /*
1281 * Set mount state from a mount option.
1282 *
1283 * NOTE: mp->m_super is NULL here!
1284 */
1285 static int
xfs_fs_parse_param(struct fs_context * fc,struct fs_parameter * param)1286 xfs_fs_parse_param(
1287 struct fs_context *fc,
1288 struct fs_parameter *param)
1289 {
1290 struct xfs_mount *parsing_mp = fc->s_fs_info;
1291 struct fs_parse_result result;
1292 int size = 0;
1293 int opt;
1294
1295 BUILD_BUG_ON(XFS_QFLAGS_MNTOPTS & XFS_MOUNT_QUOTA_ALL);
1296
1297 opt = fs_parse(fc, xfs_fs_parameters, param, &result);
1298 if (opt < 0)
1299 return opt;
1300
1301 switch (opt) {
1302 case Opt_logbufs:
1303 parsing_mp->m_logbufs = result.uint_32;
1304 return 0;
1305 case Opt_logbsize:
1306 if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize))
1307 return -EINVAL;
1308 return 0;
1309 case Opt_logdev:
1310 kfree(parsing_mp->m_logname);
1311 parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL);
1312 if (!parsing_mp->m_logname)
1313 return -ENOMEM;
1314 return 0;
1315 case Opt_rtdev:
1316 kfree(parsing_mp->m_rtname);
1317 parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
1318 if (!parsing_mp->m_rtname)
1319 return -ENOMEM;
1320 return 0;
1321 case Opt_allocsize:
1322 if (suffix_kstrtoint(param->string, 10, &size))
1323 return -EINVAL;
1324 parsing_mp->m_allocsize_log = ffs(size) - 1;
1325 parsing_mp->m_features |= XFS_FEAT_ALLOCSIZE;
1326 return 0;
1327 case Opt_grpid:
1328 case Opt_bsdgroups:
1329 parsing_mp->m_features |= XFS_FEAT_GRPID;
1330 return 0;
1331 case Opt_nogrpid:
1332 case Opt_sysvgroups:
1333 parsing_mp->m_features &= ~XFS_FEAT_GRPID;
1334 return 0;
1335 case Opt_wsync:
1336 parsing_mp->m_features |= XFS_FEAT_WSYNC;
1337 return 0;
1338 case Opt_norecovery:
1339 parsing_mp->m_features |= XFS_FEAT_NORECOVERY;
1340 return 0;
1341 case Opt_noalign:
1342 parsing_mp->m_features |= XFS_FEAT_NOALIGN;
1343 return 0;
1344 case Opt_swalloc:
1345 parsing_mp->m_features |= XFS_FEAT_SWALLOC;
1346 return 0;
1347 case Opt_sunit:
1348 parsing_mp->m_dalign = result.uint_32;
1349 return 0;
1350 case Opt_swidth:
1351 parsing_mp->m_swidth = result.uint_32;
1352 return 0;
1353 case Opt_inode32:
1354 parsing_mp->m_features |= XFS_FEAT_SMALL_INUMS;
1355 return 0;
1356 case Opt_inode64:
1357 parsing_mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
1358 return 0;
1359 case Opt_nouuid:
1360 parsing_mp->m_features |= XFS_FEAT_NOUUID;
1361 return 0;
1362 case Opt_largeio:
1363 parsing_mp->m_features |= XFS_FEAT_LARGE_IOSIZE;
1364 return 0;
1365 case Opt_nolargeio:
1366 parsing_mp->m_features &= ~XFS_FEAT_LARGE_IOSIZE;
1367 return 0;
1368 case Opt_filestreams:
1369 parsing_mp->m_features |= XFS_FEAT_FILESTREAMS;
1370 return 0;
1371 case Opt_noquota:
1372 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
1373 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
1374 parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS;
1375 return 0;
1376 case Opt_quota:
1377 case Opt_uquota:
1378 case Opt_usrquota:
1379 parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD);
1380 parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS;
1381 return 0;
1382 case Opt_qnoenforce:
1383 case Opt_uqnoenforce:
1384 parsing_mp->m_qflags |= XFS_UQUOTA_ACCT;
1385 parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD;
1386 parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS;
1387 return 0;
1388 case Opt_pquota:
1389 case Opt_prjquota:
1390 parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD);
1391 parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS;
1392 return 0;
1393 case Opt_pqnoenforce:
1394 parsing_mp->m_qflags |= XFS_PQUOTA_ACCT;
1395 parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD;
1396 parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS;
1397 return 0;
1398 case Opt_gquota:
1399 case Opt_grpquota:
1400 parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD);
1401 parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS;
1402 return 0;
1403 case Opt_gqnoenforce:
1404 parsing_mp->m_qflags |= XFS_GQUOTA_ACCT;
1405 parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD;
1406 parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS;
1407 return 0;
1408 case Opt_discard:
1409 parsing_mp->m_features |= XFS_FEAT_DISCARD;
1410 return 0;
1411 case Opt_nodiscard:
1412 parsing_mp->m_features &= ~XFS_FEAT_DISCARD;
1413 return 0;
1414 #ifdef CONFIG_FS_DAX
1415 case Opt_dax:
1416 xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS);
1417 return 0;
1418 case Opt_dax_enum:
1419 xfs_mount_set_dax_mode(parsing_mp, result.uint_32);
1420 return 0;
1421 #endif
1422 /* Following mount options will be removed in September 2025 */
1423 case Opt_ikeep:
1424 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, true);
1425 parsing_mp->m_features |= XFS_FEAT_IKEEP;
1426 return 0;
1427 case Opt_noikeep:
1428 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, false);
1429 parsing_mp->m_features &= ~XFS_FEAT_IKEEP;
1430 return 0;
1431 case Opt_attr2:
1432 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_ATTR2, true);
1433 parsing_mp->m_features |= XFS_FEAT_ATTR2;
1434 return 0;
1435 case Opt_noattr2:
1436 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_NOATTR2, true);
1437 parsing_mp->m_features |= XFS_FEAT_NOATTR2;
1438 return 0;
1439 default:
1440 xfs_warn(parsing_mp, "unknown mount option [%s].", param->key);
1441 return -EINVAL;
1442 }
1443
1444 return 0;
1445 }
1446
1447 static int
xfs_fs_validate_params(struct xfs_mount * mp)1448 xfs_fs_validate_params(
1449 struct xfs_mount *mp)
1450 {
1451 /* No recovery flag requires a read-only mount */
1452 if (xfs_has_norecovery(mp) && !xfs_is_readonly(mp)) {
1453 xfs_warn(mp, "no-recovery mounts must be read-only.");
1454 return -EINVAL;
1455 }
1456
1457 /*
1458 * We have not read the superblock at this point, so only the attr2
1459 * mount option can set the attr2 feature by this stage.
1460 */
1461 if (xfs_has_attr2(mp) && xfs_has_noattr2(mp)) {
1462 xfs_warn(mp, "attr2 and noattr2 cannot both be specified.");
1463 return -EINVAL;
1464 }
1465
1466
1467 if (xfs_has_noalign(mp) && (mp->m_dalign || mp->m_swidth)) {
1468 xfs_warn(mp,
1469 "sunit and swidth options incompatible with the noalign option");
1470 return -EINVAL;
1471 }
1472
1473 if (!IS_ENABLED(CONFIG_XFS_QUOTA) &&
1474 (mp->m_qflags & ~XFS_QFLAGS_MNTOPTS)) {
1475 xfs_warn(mp, "quota support not available in this kernel.");
1476 return -EINVAL;
1477 }
1478
1479 if ((mp->m_dalign && !mp->m_swidth) ||
1480 (!mp->m_dalign && mp->m_swidth)) {
1481 xfs_warn(mp, "sunit and swidth must be specified together");
1482 return -EINVAL;
1483 }
1484
1485 if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
1486 xfs_warn(mp,
1487 "stripe width (%d) must be a multiple of the stripe unit (%d)",
1488 mp->m_swidth, mp->m_dalign);
1489 return -EINVAL;
1490 }
1491
1492 if (mp->m_logbufs != -1 &&
1493 mp->m_logbufs != 0 &&
1494 (mp->m_logbufs < XLOG_MIN_ICLOGS ||
1495 mp->m_logbufs > XLOG_MAX_ICLOGS)) {
1496 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
1497 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
1498 return -EINVAL;
1499 }
1500
1501 if (mp->m_logbsize != -1 &&
1502 mp->m_logbsize != 0 &&
1503 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
1504 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
1505 !is_power_of_2(mp->m_logbsize))) {
1506 xfs_warn(mp,
1507 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
1508 mp->m_logbsize);
1509 return -EINVAL;
1510 }
1511
1512 if (xfs_has_allocsize(mp) &&
1513 (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
1514 mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
1515 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
1516 mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
1517 return -EINVAL;
1518 }
1519
1520 return 0;
1521 }
1522
1523 struct dentry *
xfs_debugfs_mkdir(const char * name,struct dentry * parent)1524 xfs_debugfs_mkdir(
1525 const char *name,
1526 struct dentry *parent)
1527 {
1528 struct dentry *child;
1529
1530 /* Apparently we're expected to ignore error returns?? */
1531 child = debugfs_create_dir(name, parent);
1532 if (IS_ERR(child))
1533 return NULL;
1534
1535 return child;
1536 }
1537
1538 static int
xfs_fs_fill_super(struct super_block * sb,struct fs_context * fc)1539 xfs_fs_fill_super(
1540 struct super_block *sb,
1541 struct fs_context *fc)
1542 {
1543 struct xfs_mount *mp = sb->s_fs_info;
1544 struct inode *root;
1545 int flags = 0, error;
1546
1547 mp->m_super = sb;
1548
1549 /*
1550 * Copy VFS mount flags from the context now that all parameter parsing
1551 * is guaranteed to have been completed by either the old mount API or
1552 * the newer fsopen/fsconfig API.
1553 */
1554 if (fc->sb_flags & SB_RDONLY)
1555 xfs_set_readonly(mp);
1556 if (fc->sb_flags & SB_DIRSYNC)
1557 mp->m_features |= XFS_FEAT_DIRSYNC;
1558 if (fc->sb_flags & SB_SYNCHRONOUS)
1559 mp->m_features |= XFS_FEAT_WSYNC;
1560
1561 error = xfs_fs_validate_params(mp);
1562 if (error)
1563 return error;
1564
1565 sb_min_blocksize(sb, BBSIZE);
1566 sb->s_xattr = xfs_xattr_handlers;
1567 sb->s_export_op = &xfs_export_operations;
1568 #ifdef CONFIG_XFS_QUOTA
1569 sb->s_qcop = &xfs_quotactl_operations;
1570 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1571 #endif
1572 sb->s_op = &xfs_super_operations;
1573
1574 /*
1575 * Delay mount work if the debug hook is set. This is debug
1576 * instrumention to coordinate simulation of xfs mount failures with
1577 * VFS superblock operations
1578 */
1579 if (xfs_globals.mount_delay) {
1580 xfs_notice(mp, "Delaying mount for %d seconds.",
1581 xfs_globals.mount_delay);
1582 msleep(xfs_globals.mount_delay * 1000);
1583 }
1584
1585 if (fc->sb_flags & SB_SILENT)
1586 flags |= XFS_MFSI_QUIET;
1587
1588 error = xfs_open_devices(mp);
1589 if (error)
1590 return error;
1591
1592 if (xfs_debugfs) {
1593 mp->m_debugfs = xfs_debugfs_mkdir(mp->m_super->s_id,
1594 xfs_debugfs);
1595 } else {
1596 mp->m_debugfs = NULL;
1597 }
1598
1599 error = xfs_init_mount_workqueues(mp);
1600 if (error)
1601 goto out_shutdown_devices;
1602
1603 error = xfs_init_percpu_counters(mp);
1604 if (error)
1605 goto out_destroy_workqueues;
1606
1607 error = xfs_inodegc_init_percpu(mp);
1608 if (error)
1609 goto out_destroy_counters;
1610
1611 /* Allocate stats memory before we do operations that might use it */
1612 mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1613 if (!mp->m_stats.xs_stats) {
1614 error = -ENOMEM;
1615 goto out_destroy_inodegc;
1616 }
1617
1618 error = xchk_mount_stats_alloc(mp);
1619 if (error)
1620 goto out_free_stats;
1621
1622 error = xfs_readsb(mp, flags);
1623 if (error)
1624 goto out_free_scrub_stats;
1625
1626 error = xfs_finish_flags(mp);
1627 if (error)
1628 goto out_free_sb;
1629
1630 error = xfs_setup_devices(mp);
1631 if (error)
1632 goto out_free_sb;
1633
1634 /*
1635 * V4 support is undergoing deprecation.
1636 *
1637 * Note: this has to use an open coded m_features check as xfs_has_crc
1638 * always returns false for !CONFIG_XFS_SUPPORT_V4.
1639 */
1640 if (!(mp->m_features & XFS_FEAT_CRC)) {
1641 if (!IS_ENABLED(CONFIG_XFS_SUPPORT_V4)) {
1642 xfs_warn(mp,
1643 "Deprecated V4 format (crc=0) not supported by kernel.");
1644 error = -EINVAL;
1645 goto out_free_sb;
1646 }
1647 xfs_warn_once(mp,
1648 "Deprecated V4 format (crc=0) will not be supported after September 2030.");
1649 }
1650
1651 /* ASCII case insensitivity is undergoing deprecation. */
1652 if (xfs_has_asciici(mp)) {
1653 #ifdef CONFIG_XFS_SUPPORT_ASCII_CI
1654 xfs_warn_once(mp,
1655 "Deprecated ASCII case-insensitivity feature (ascii-ci=1) will not be supported after September 2030.");
1656 #else
1657 xfs_warn(mp,
1658 "Deprecated ASCII case-insensitivity feature (ascii-ci=1) not supported by kernel.");
1659 error = -EINVAL;
1660 goto out_free_sb;
1661 #endif
1662 }
1663
1664 /*
1665 * Filesystem claims it needs repair, so refuse the mount unless
1666 * norecovery is also specified, in which case the filesystem can
1667 * be mounted with no risk of further damage.
1668 */
1669 if (xfs_has_needsrepair(mp) && !xfs_has_norecovery(mp)) {
1670 xfs_warn(mp, "Filesystem needs repair. Please run xfs_repair.");
1671 error = -EFSCORRUPTED;
1672 goto out_free_sb;
1673 }
1674
1675 /*
1676 * Don't touch the filesystem if a user tool thinks it owns the primary
1677 * superblock. mkfs doesn't clear the flag from secondary supers, so
1678 * we don't check them at all.
1679 */
1680 if (mp->m_sb.sb_inprogress) {
1681 xfs_warn(mp, "Offline file system operation in progress!");
1682 error = -EFSCORRUPTED;
1683 goto out_free_sb;
1684 }
1685
1686 if (mp->m_sb.sb_blocksize > PAGE_SIZE) {
1687 size_t max_folio_size = mapping_max_folio_size_supported();
1688
1689 if (!xfs_has_crc(mp)) {
1690 xfs_warn(mp,
1691 "V4 Filesystem with blocksize %d bytes. Only pagesize (%ld) or less is supported.",
1692 mp->m_sb.sb_blocksize, PAGE_SIZE);
1693 error = -ENOSYS;
1694 goto out_free_sb;
1695 }
1696
1697 if (mp->m_sb.sb_blocksize > max_folio_size) {
1698 xfs_warn(mp,
1699 "block size (%u bytes) not supported; Only block size (%zu) or less is supported",
1700 mp->m_sb.sb_blocksize, max_folio_size);
1701 error = -ENOSYS;
1702 goto out_free_sb;
1703 }
1704
1705 xfs_warn_experimental(mp, XFS_EXPERIMENTAL_LBS);
1706 }
1707
1708 /* Ensure this filesystem fits in the page cache limits */
1709 if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) ||
1710 xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) {
1711 xfs_warn(mp,
1712 "file system too large to be mounted on this system.");
1713 error = -EFBIG;
1714 goto out_free_sb;
1715 }
1716
1717 /*
1718 * XFS block mappings use 54 bits to store the logical block offset.
1719 * This should suffice to handle the maximum file size that the VFS
1720 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
1721 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
1722 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
1723 * to check this assertion.
1724 *
1725 * Avoid integer overflow by comparing the maximum bmbt offset to the
1726 * maximum pagecache offset in units of fs blocks.
1727 */
1728 if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) {
1729 xfs_warn(mp,
1730 "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
1731 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
1732 XFS_MAX_FILEOFF);
1733 error = -EINVAL;
1734 goto out_free_sb;
1735 }
1736
1737 error = xfs_rtmount_readsb(mp);
1738 if (error)
1739 goto out_free_sb;
1740
1741 error = xfs_filestream_mount(mp);
1742 if (error)
1743 goto out_free_rtsb;
1744
1745 /*
1746 * we must configure the block size in the superblock before we run the
1747 * full mount process as the mount process can lookup and cache inodes.
1748 */
1749 sb->s_magic = XFS_SUPER_MAGIC;
1750 sb->s_blocksize = mp->m_sb.sb_blocksize;
1751 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1752 sb->s_maxbytes = MAX_LFS_FILESIZE;
1753 sb->s_max_links = XFS_MAXLINK;
1754 sb->s_time_gran = 1;
1755 if (xfs_has_bigtime(mp)) {
1756 sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN);
1757 sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX);
1758 } else {
1759 sb->s_time_min = XFS_LEGACY_TIME_MIN;
1760 sb->s_time_max = XFS_LEGACY_TIME_MAX;
1761 }
1762 trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max);
1763 sb->s_iflags |= SB_I_CGROUPWB | SB_I_ALLOW_HSM;
1764
1765 set_posix_acl_flag(sb);
1766
1767 /* version 5 superblocks support inode version counters. */
1768 if (xfs_has_crc(mp))
1769 sb->s_flags |= SB_I_VERSION;
1770
1771 if (xfs_has_dax_always(mp)) {
1772 error = xfs_setup_dax_always(mp);
1773 if (error)
1774 goto out_filestream_unmount;
1775 }
1776
1777 if (xfs_has_discard(mp) && !bdev_max_discard_sectors(sb->s_bdev)) {
1778 xfs_warn(mp,
1779 "mounting with \"discard\" option, but the device does not support discard");
1780 mp->m_features &= ~XFS_FEAT_DISCARD;
1781 }
1782
1783 if (xfs_has_metadir(mp))
1784 xfs_warn_experimental(mp, XFS_EXPERIMENTAL_METADIR);
1785
1786 if (xfs_has_reflink(mp)) {
1787 if (xfs_has_realtime(mp) &&
1788 !xfs_reflink_supports_rextsize(mp, mp->m_sb.sb_rextsize)) {
1789 xfs_alert(mp,
1790 "reflink not compatible with realtime extent size %u!",
1791 mp->m_sb.sb_rextsize);
1792 error = -EINVAL;
1793 goto out_filestream_unmount;
1794 }
1795
1796 if (xfs_globals.always_cow) {
1797 xfs_info(mp, "using DEBUG-only always_cow mode.");
1798 mp->m_always_cow = true;
1799 }
1800 }
1801
1802
1803 if (xfs_has_exchange_range(mp))
1804 xfs_warn_experimental(mp, XFS_EXPERIMENTAL_EXCHRANGE);
1805
1806 if (xfs_has_parent(mp))
1807 xfs_warn_experimental(mp, XFS_EXPERIMENTAL_PPTR);
1808
1809 /*
1810 * If no quota mount options were provided, maybe we'll try to pick
1811 * up the quota accounting and enforcement flags from the ondisk sb.
1812 */
1813 if (!(mp->m_qflags & XFS_QFLAGS_MNTOPTS))
1814 xfs_set_resuming_quotaon(mp);
1815 mp->m_qflags &= ~XFS_QFLAGS_MNTOPTS;
1816
1817 error = xfs_mountfs(mp);
1818 if (error)
1819 goto out_filestream_unmount;
1820
1821 root = igrab(VFS_I(mp->m_rootip));
1822 if (!root) {
1823 error = -ENOENT;
1824 goto out_unmount;
1825 }
1826 sb->s_root = d_make_root(root);
1827 if (!sb->s_root) {
1828 error = -ENOMEM;
1829 goto out_unmount;
1830 }
1831
1832 return 0;
1833
1834 out_filestream_unmount:
1835 xfs_filestream_unmount(mp);
1836 out_free_rtsb:
1837 xfs_rtmount_freesb(mp);
1838 out_free_sb:
1839 xfs_freesb(mp);
1840 out_free_scrub_stats:
1841 xchk_mount_stats_free(mp);
1842 out_free_stats:
1843 free_percpu(mp->m_stats.xs_stats);
1844 out_destroy_inodegc:
1845 xfs_inodegc_free_percpu(mp);
1846 out_destroy_counters:
1847 xfs_destroy_percpu_counters(mp);
1848 out_destroy_workqueues:
1849 xfs_destroy_mount_workqueues(mp);
1850 out_shutdown_devices:
1851 xfs_shutdown_devices(mp);
1852 return error;
1853
1854 out_unmount:
1855 xfs_filestream_unmount(mp);
1856 xfs_unmountfs(mp);
1857 goto out_free_rtsb;
1858 }
1859
1860 static int
xfs_fs_get_tree(struct fs_context * fc)1861 xfs_fs_get_tree(
1862 struct fs_context *fc)
1863 {
1864 return get_tree_bdev(fc, xfs_fs_fill_super);
1865 }
1866
1867 static int
xfs_remount_rw(struct xfs_mount * mp)1868 xfs_remount_rw(
1869 struct xfs_mount *mp)
1870 {
1871 struct xfs_sb *sbp = &mp->m_sb;
1872 int error;
1873
1874 if (xfs_has_norecovery(mp)) {
1875 xfs_warn(mp,
1876 "ro->rw transition prohibited on norecovery mount");
1877 return -EINVAL;
1878 }
1879
1880 if (xfs_sb_is_v5(sbp) &&
1881 xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1882 xfs_warn(mp,
1883 "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1884 (sbp->sb_features_ro_compat &
1885 XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1886 return -EINVAL;
1887 }
1888
1889 xfs_clear_readonly(mp);
1890
1891 /*
1892 * If this is the first remount to writeable state we might have some
1893 * superblock changes to update.
1894 */
1895 if (mp->m_update_sb) {
1896 error = xfs_sync_sb(mp, false);
1897 if (error) {
1898 xfs_warn(mp, "failed to write sb changes");
1899 return error;
1900 }
1901 mp->m_update_sb = false;
1902 }
1903
1904 /*
1905 * Fill out the reserve pool if it is empty. Use the stashed value if
1906 * it is non-zero, otherwise go with the default.
1907 */
1908 xfs_restore_resvblks(mp);
1909 xfs_log_work_queue(mp);
1910 xfs_blockgc_start(mp);
1911
1912 /* Create the per-AG metadata reservation pool .*/
1913 error = xfs_fs_reserve_ag_blocks(mp);
1914 if (error && error != -ENOSPC)
1915 return error;
1916
1917 /* Re-enable the background inode inactivation worker. */
1918 xfs_inodegc_start(mp);
1919
1920 return 0;
1921 }
1922
1923 static int
xfs_remount_ro(struct xfs_mount * mp)1924 xfs_remount_ro(
1925 struct xfs_mount *mp)
1926 {
1927 struct xfs_icwalk icw = {
1928 .icw_flags = XFS_ICWALK_FLAG_SYNC,
1929 };
1930 int error;
1931
1932 /* Flush all the dirty data to disk. */
1933 error = sync_filesystem(mp->m_super);
1934 if (error)
1935 return error;
1936
1937 /*
1938 * Cancel background eofb scanning so it cannot race with the final
1939 * log force+buftarg wait and deadlock the remount.
1940 */
1941 xfs_blockgc_stop(mp);
1942
1943 /*
1944 * Clear out all remaining COW staging extents and speculative post-EOF
1945 * preallocations so that we don't leave inodes requiring inactivation
1946 * cleanups during reclaim on a read-only mount. We must process every
1947 * cached inode, so this requires a synchronous cache scan.
1948 */
1949 error = xfs_blockgc_free_space(mp, &icw);
1950 if (error) {
1951 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1952 return error;
1953 }
1954
1955 /*
1956 * Stop the inodegc background worker. xfs_fs_reconfigure already
1957 * flushed all pending inodegc work when it sync'd the filesystem.
1958 * The VFS holds s_umount, so we know that inodes cannot enter
1959 * xfs_fs_destroy_inode during a remount operation. In readonly mode
1960 * we send inodes straight to reclaim, so no inodes will be queued.
1961 */
1962 xfs_inodegc_stop(mp);
1963
1964 /* Free the per-AG metadata reservation pool. */
1965 xfs_fs_unreserve_ag_blocks(mp);
1966
1967 /*
1968 * Before we sync the metadata, we need to free up the reserve block
1969 * pool so that the used block count in the superblock on disk is
1970 * correct at the end of the remount. Stash the current* reserve pool
1971 * size so that if we get remounted rw, we can return it to the same
1972 * size.
1973 */
1974 xfs_save_resvblks(mp);
1975
1976 xfs_log_clean(mp);
1977 xfs_set_readonly(mp);
1978
1979 return 0;
1980 }
1981
1982 /*
1983 * Logically we would return an error here to prevent users from believing
1984 * they might have changed mount options using remount which can't be changed.
1985 *
1986 * But unfortunately mount(8) adds all options from mtab and fstab to the mount
1987 * arguments in some cases so we can't blindly reject options, but have to
1988 * check for each specified option if it actually differs from the currently
1989 * set option and only reject it if that's the case.
1990 *
1991 * Until that is implemented we return success for every remount request, and
1992 * silently ignore all options that we can't actually change.
1993 */
1994 static int
xfs_fs_reconfigure(struct fs_context * fc)1995 xfs_fs_reconfigure(
1996 struct fs_context *fc)
1997 {
1998 struct xfs_mount *mp = XFS_M(fc->root->d_sb);
1999 struct xfs_mount *new_mp = fc->s_fs_info;
2000 int flags = fc->sb_flags;
2001 int error;
2002
2003 new_mp->m_qflags &= ~XFS_QFLAGS_MNTOPTS;
2004
2005 /* version 5 superblocks always support version counters. */
2006 if (xfs_has_crc(mp))
2007 fc->sb_flags |= SB_I_VERSION;
2008
2009 error = xfs_fs_validate_params(new_mp);
2010 if (error)
2011 return error;
2012
2013 /* inode32 -> inode64 */
2014 if (xfs_has_small_inums(mp) && !xfs_has_small_inums(new_mp)) {
2015 mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
2016 mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
2017 }
2018
2019 /* inode64 -> inode32 */
2020 if (!xfs_has_small_inums(mp) && xfs_has_small_inums(new_mp)) {
2021 mp->m_features |= XFS_FEAT_SMALL_INUMS;
2022 mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
2023 }
2024
2025 /* ro -> rw */
2026 if (xfs_is_readonly(mp) && !(flags & SB_RDONLY)) {
2027 error = xfs_remount_rw(mp);
2028 if (error)
2029 return error;
2030 }
2031
2032 /* rw -> ro */
2033 if (!xfs_is_readonly(mp) && (flags & SB_RDONLY)) {
2034 error = xfs_remount_ro(mp);
2035 if (error)
2036 return error;
2037 }
2038
2039 return 0;
2040 }
2041
2042 static void
xfs_fs_free(struct fs_context * fc)2043 xfs_fs_free(
2044 struct fs_context *fc)
2045 {
2046 struct xfs_mount *mp = fc->s_fs_info;
2047
2048 /*
2049 * mp is stored in the fs_context when it is initialized.
2050 * mp is transferred to the superblock on a successful mount,
2051 * but if an error occurs before the transfer we have to free
2052 * it here.
2053 */
2054 if (mp)
2055 xfs_mount_free(mp);
2056 }
2057
2058 static const struct fs_context_operations xfs_context_ops = {
2059 .parse_param = xfs_fs_parse_param,
2060 .get_tree = xfs_fs_get_tree,
2061 .reconfigure = xfs_fs_reconfigure,
2062 .free = xfs_fs_free,
2063 };
2064
2065 /*
2066 * WARNING: do not initialise any parameters in this function that depend on
2067 * mount option parsing having already been performed as this can be called from
2068 * fsopen() before any parameters have been set.
2069 */
2070 static int
xfs_init_fs_context(struct fs_context * fc)2071 xfs_init_fs_context(
2072 struct fs_context *fc)
2073 {
2074 struct xfs_mount *mp;
2075 int i;
2076
2077 mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL | __GFP_NOFAIL);
2078 if (!mp)
2079 return -ENOMEM;
2080
2081 spin_lock_init(&mp->m_sb_lock);
2082 for (i = 0; i < XG_TYPE_MAX; i++)
2083 xa_init(&mp->m_groups[i].xa);
2084 mutex_init(&mp->m_growlock);
2085 INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
2086 INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
2087 mp->m_kobj.kobject.kset = xfs_kset;
2088 /*
2089 * We don't create the finobt per-ag space reservation until after log
2090 * recovery, so we must set this to true so that an ifree transaction
2091 * started during log recovery will not depend on space reservations
2092 * for finobt expansion.
2093 */
2094 mp->m_finobt_nores = true;
2095
2096 /*
2097 * These can be overridden by the mount option parsing.
2098 */
2099 mp->m_logbufs = -1;
2100 mp->m_logbsize = -1;
2101 mp->m_allocsize_log = 16; /* 64k */
2102
2103 xfs_hooks_init(&mp->m_dir_update_hooks);
2104
2105 fc->s_fs_info = mp;
2106 fc->ops = &xfs_context_ops;
2107
2108 return 0;
2109 }
2110
2111 static void
xfs_kill_sb(struct super_block * sb)2112 xfs_kill_sb(
2113 struct super_block *sb)
2114 {
2115 kill_block_super(sb);
2116 xfs_mount_free(XFS_M(sb));
2117 }
2118
2119 static struct file_system_type xfs_fs_type = {
2120 .owner = THIS_MODULE,
2121 .name = "xfs",
2122 .init_fs_context = xfs_init_fs_context,
2123 .parameters = xfs_fs_parameters,
2124 .kill_sb = xfs_kill_sb,
2125 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP | FS_MGTIME,
2126 };
2127 MODULE_ALIAS_FS("xfs");
2128
2129 STATIC int __init
xfs_init_caches(void)2130 xfs_init_caches(void)
2131 {
2132 int error;
2133
2134 xfs_buf_cache = kmem_cache_create("xfs_buf", sizeof(struct xfs_buf), 0,
2135 SLAB_HWCACHE_ALIGN |
2136 SLAB_RECLAIM_ACCOUNT,
2137 NULL);
2138 if (!xfs_buf_cache)
2139 goto out;
2140
2141 xfs_log_ticket_cache = kmem_cache_create("xfs_log_ticket",
2142 sizeof(struct xlog_ticket),
2143 0, 0, NULL);
2144 if (!xfs_log_ticket_cache)
2145 goto out_destroy_buf_cache;
2146
2147 error = xfs_btree_init_cur_caches();
2148 if (error)
2149 goto out_destroy_log_ticket_cache;
2150
2151 error = rcbagbt_init_cur_cache();
2152 if (error)
2153 goto out_destroy_btree_cur_cache;
2154
2155 error = xfs_defer_init_item_caches();
2156 if (error)
2157 goto out_destroy_rcbagbt_cur_cache;
2158
2159 xfs_da_state_cache = kmem_cache_create("xfs_da_state",
2160 sizeof(struct xfs_da_state),
2161 0, 0, NULL);
2162 if (!xfs_da_state_cache)
2163 goto out_destroy_defer_item_cache;
2164
2165 xfs_ifork_cache = kmem_cache_create("xfs_ifork",
2166 sizeof(struct xfs_ifork),
2167 0, 0, NULL);
2168 if (!xfs_ifork_cache)
2169 goto out_destroy_da_state_cache;
2170
2171 xfs_trans_cache = kmem_cache_create("xfs_trans",
2172 sizeof(struct xfs_trans),
2173 0, 0, NULL);
2174 if (!xfs_trans_cache)
2175 goto out_destroy_ifork_cache;
2176
2177
2178 /*
2179 * The size of the cache-allocated buf log item is the maximum
2180 * size possible under XFS. This wastes a little bit of memory,
2181 * but it is much faster.
2182 */
2183 xfs_buf_item_cache = kmem_cache_create("xfs_buf_item",
2184 sizeof(struct xfs_buf_log_item),
2185 0, 0, NULL);
2186 if (!xfs_buf_item_cache)
2187 goto out_destroy_trans_cache;
2188
2189 xfs_efd_cache = kmem_cache_create("xfs_efd_item",
2190 xfs_efd_log_item_sizeof(XFS_EFD_MAX_FAST_EXTENTS),
2191 0, 0, NULL);
2192 if (!xfs_efd_cache)
2193 goto out_destroy_buf_item_cache;
2194
2195 xfs_efi_cache = kmem_cache_create("xfs_efi_item",
2196 xfs_efi_log_item_sizeof(XFS_EFI_MAX_FAST_EXTENTS),
2197 0, 0, NULL);
2198 if (!xfs_efi_cache)
2199 goto out_destroy_efd_cache;
2200
2201 xfs_inode_cache = kmem_cache_create("xfs_inode",
2202 sizeof(struct xfs_inode), 0,
2203 (SLAB_HWCACHE_ALIGN |
2204 SLAB_RECLAIM_ACCOUNT |
2205 SLAB_ACCOUNT),
2206 xfs_fs_inode_init_once);
2207 if (!xfs_inode_cache)
2208 goto out_destroy_efi_cache;
2209
2210 xfs_ili_cache = kmem_cache_create("xfs_ili",
2211 sizeof(struct xfs_inode_log_item), 0,
2212 SLAB_RECLAIM_ACCOUNT,
2213 NULL);
2214 if (!xfs_ili_cache)
2215 goto out_destroy_inode_cache;
2216
2217 xfs_icreate_cache = kmem_cache_create("xfs_icr",
2218 sizeof(struct xfs_icreate_item),
2219 0, 0, NULL);
2220 if (!xfs_icreate_cache)
2221 goto out_destroy_ili_cache;
2222
2223 xfs_rud_cache = kmem_cache_create("xfs_rud_item",
2224 sizeof(struct xfs_rud_log_item),
2225 0, 0, NULL);
2226 if (!xfs_rud_cache)
2227 goto out_destroy_icreate_cache;
2228
2229 xfs_rui_cache = kmem_cache_create("xfs_rui_item",
2230 xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
2231 0, 0, NULL);
2232 if (!xfs_rui_cache)
2233 goto out_destroy_rud_cache;
2234
2235 xfs_cud_cache = kmem_cache_create("xfs_cud_item",
2236 sizeof(struct xfs_cud_log_item),
2237 0, 0, NULL);
2238 if (!xfs_cud_cache)
2239 goto out_destroy_rui_cache;
2240
2241 xfs_cui_cache = kmem_cache_create("xfs_cui_item",
2242 xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
2243 0, 0, NULL);
2244 if (!xfs_cui_cache)
2245 goto out_destroy_cud_cache;
2246
2247 xfs_bud_cache = kmem_cache_create("xfs_bud_item",
2248 sizeof(struct xfs_bud_log_item),
2249 0, 0, NULL);
2250 if (!xfs_bud_cache)
2251 goto out_destroy_cui_cache;
2252
2253 xfs_bui_cache = kmem_cache_create("xfs_bui_item",
2254 xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
2255 0, 0, NULL);
2256 if (!xfs_bui_cache)
2257 goto out_destroy_bud_cache;
2258
2259 xfs_attrd_cache = kmem_cache_create("xfs_attrd_item",
2260 sizeof(struct xfs_attrd_log_item),
2261 0, 0, NULL);
2262 if (!xfs_attrd_cache)
2263 goto out_destroy_bui_cache;
2264
2265 xfs_attri_cache = kmem_cache_create("xfs_attri_item",
2266 sizeof(struct xfs_attri_log_item),
2267 0, 0, NULL);
2268 if (!xfs_attri_cache)
2269 goto out_destroy_attrd_cache;
2270
2271 xfs_iunlink_cache = kmem_cache_create("xfs_iul_item",
2272 sizeof(struct xfs_iunlink_item),
2273 0, 0, NULL);
2274 if (!xfs_iunlink_cache)
2275 goto out_destroy_attri_cache;
2276
2277 xfs_xmd_cache = kmem_cache_create("xfs_xmd_item",
2278 sizeof(struct xfs_xmd_log_item),
2279 0, 0, NULL);
2280 if (!xfs_xmd_cache)
2281 goto out_destroy_iul_cache;
2282
2283 xfs_xmi_cache = kmem_cache_create("xfs_xmi_item",
2284 sizeof(struct xfs_xmi_log_item),
2285 0, 0, NULL);
2286 if (!xfs_xmi_cache)
2287 goto out_destroy_xmd_cache;
2288
2289 xfs_parent_args_cache = kmem_cache_create("xfs_parent_args",
2290 sizeof(struct xfs_parent_args),
2291 0, 0, NULL);
2292 if (!xfs_parent_args_cache)
2293 goto out_destroy_xmi_cache;
2294
2295 return 0;
2296
2297 out_destroy_xmi_cache:
2298 kmem_cache_destroy(xfs_xmi_cache);
2299 out_destroy_xmd_cache:
2300 kmem_cache_destroy(xfs_xmd_cache);
2301 out_destroy_iul_cache:
2302 kmem_cache_destroy(xfs_iunlink_cache);
2303 out_destroy_attri_cache:
2304 kmem_cache_destroy(xfs_attri_cache);
2305 out_destroy_attrd_cache:
2306 kmem_cache_destroy(xfs_attrd_cache);
2307 out_destroy_bui_cache:
2308 kmem_cache_destroy(xfs_bui_cache);
2309 out_destroy_bud_cache:
2310 kmem_cache_destroy(xfs_bud_cache);
2311 out_destroy_cui_cache:
2312 kmem_cache_destroy(xfs_cui_cache);
2313 out_destroy_cud_cache:
2314 kmem_cache_destroy(xfs_cud_cache);
2315 out_destroy_rui_cache:
2316 kmem_cache_destroy(xfs_rui_cache);
2317 out_destroy_rud_cache:
2318 kmem_cache_destroy(xfs_rud_cache);
2319 out_destroy_icreate_cache:
2320 kmem_cache_destroy(xfs_icreate_cache);
2321 out_destroy_ili_cache:
2322 kmem_cache_destroy(xfs_ili_cache);
2323 out_destroy_inode_cache:
2324 kmem_cache_destroy(xfs_inode_cache);
2325 out_destroy_efi_cache:
2326 kmem_cache_destroy(xfs_efi_cache);
2327 out_destroy_efd_cache:
2328 kmem_cache_destroy(xfs_efd_cache);
2329 out_destroy_buf_item_cache:
2330 kmem_cache_destroy(xfs_buf_item_cache);
2331 out_destroy_trans_cache:
2332 kmem_cache_destroy(xfs_trans_cache);
2333 out_destroy_ifork_cache:
2334 kmem_cache_destroy(xfs_ifork_cache);
2335 out_destroy_da_state_cache:
2336 kmem_cache_destroy(xfs_da_state_cache);
2337 out_destroy_defer_item_cache:
2338 xfs_defer_destroy_item_caches();
2339 out_destroy_rcbagbt_cur_cache:
2340 rcbagbt_destroy_cur_cache();
2341 out_destroy_btree_cur_cache:
2342 xfs_btree_destroy_cur_caches();
2343 out_destroy_log_ticket_cache:
2344 kmem_cache_destroy(xfs_log_ticket_cache);
2345 out_destroy_buf_cache:
2346 kmem_cache_destroy(xfs_buf_cache);
2347 out:
2348 return -ENOMEM;
2349 }
2350
2351 STATIC void
xfs_destroy_caches(void)2352 xfs_destroy_caches(void)
2353 {
2354 /*
2355 * Make sure all delayed rcu free are flushed before we
2356 * destroy caches.
2357 */
2358 rcu_barrier();
2359 kmem_cache_destroy(xfs_parent_args_cache);
2360 kmem_cache_destroy(xfs_xmd_cache);
2361 kmem_cache_destroy(xfs_xmi_cache);
2362 kmem_cache_destroy(xfs_iunlink_cache);
2363 kmem_cache_destroy(xfs_attri_cache);
2364 kmem_cache_destroy(xfs_attrd_cache);
2365 kmem_cache_destroy(xfs_bui_cache);
2366 kmem_cache_destroy(xfs_bud_cache);
2367 kmem_cache_destroy(xfs_cui_cache);
2368 kmem_cache_destroy(xfs_cud_cache);
2369 kmem_cache_destroy(xfs_rui_cache);
2370 kmem_cache_destroy(xfs_rud_cache);
2371 kmem_cache_destroy(xfs_icreate_cache);
2372 kmem_cache_destroy(xfs_ili_cache);
2373 kmem_cache_destroy(xfs_inode_cache);
2374 kmem_cache_destroy(xfs_efi_cache);
2375 kmem_cache_destroy(xfs_efd_cache);
2376 kmem_cache_destroy(xfs_buf_item_cache);
2377 kmem_cache_destroy(xfs_trans_cache);
2378 kmem_cache_destroy(xfs_ifork_cache);
2379 kmem_cache_destroy(xfs_da_state_cache);
2380 xfs_defer_destroy_item_caches();
2381 rcbagbt_destroy_cur_cache();
2382 xfs_btree_destroy_cur_caches();
2383 kmem_cache_destroy(xfs_log_ticket_cache);
2384 kmem_cache_destroy(xfs_buf_cache);
2385 }
2386
2387 STATIC int __init
xfs_init_workqueues(void)2388 xfs_init_workqueues(void)
2389 {
2390 /*
2391 * The allocation workqueue can be used in memory reclaim situations
2392 * (writepage path), and parallelism is only limited by the number of
2393 * AGs in all the filesystems mounted. Hence use the default large
2394 * max_active value for this workqueue.
2395 */
2396 xfs_alloc_wq = alloc_workqueue("xfsalloc",
2397 XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0);
2398 if (!xfs_alloc_wq)
2399 return -ENOMEM;
2400
2401 xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND),
2402 0);
2403 if (!xfs_discard_wq)
2404 goto out_free_alloc_wq;
2405
2406 return 0;
2407 out_free_alloc_wq:
2408 destroy_workqueue(xfs_alloc_wq);
2409 return -ENOMEM;
2410 }
2411
2412 STATIC void
xfs_destroy_workqueues(void)2413 xfs_destroy_workqueues(void)
2414 {
2415 destroy_workqueue(xfs_discard_wq);
2416 destroy_workqueue(xfs_alloc_wq);
2417 }
2418
2419 STATIC int __init
init_xfs_fs(void)2420 init_xfs_fs(void)
2421 {
2422 int error;
2423
2424 xfs_check_ondisk_structs();
2425
2426 error = xfs_dahash_test();
2427 if (error)
2428 return error;
2429
2430 printk(KERN_INFO XFS_VERSION_STRING " with "
2431 XFS_BUILD_OPTIONS " enabled\n");
2432
2433 xfs_dir_startup();
2434
2435 error = xfs_init_caches();
2436 if (error)
2437 goto out;
2438
2439 error = xfs_init_workqueues();
2440 if (error)
2441 goto out_destroy_caches;
2442
2443 error = xfs_mru_cache_init();
2444 if (error)
2445 goto out_destroy_wq;
2446
2447 error = xfs_init_procfs();
2448 if (error)
2449 goto out_mru_cache_uninit;
2450
2451 error = xfs_sysctl_register();
2452 if (error)
2453 goto out_cleanup_procfs;
2454
2455 xfs_debugfs = xfs_debugfs_mkdir("xfs", NULL);
2456
2457 xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2458 if (!xfs_kset) {
2459 error = -ENOMEM;
2460 goto out_debugfs_unregister;
2461 }
2462
2463 xfsstats.xs_kobj.kobject.kset = xfs_kset;
2464
2465 xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2466 if (!xfsstats.xs_stats) {
2467 error = -ENOMEM;
2468 goto out_kset_unregister;
2469 }
2470
2471 error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2472 "stats");
2473 if (error)
2474 goto out_free_stats;
2475
2476 error = xchk_global_stats_setup(xfs_debugfs);
2477 if (error)
2478 goto out_remove_stats_kobj;
2479
2480 #ifdef DEBUG
2481 xfs_dbg_kobj.kobject.kset = xfs_kset;
2482 error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2483 if (error)
2484 goto out_remove_scrub_stats;
2485 #endif
2486
2487 error = xfs_qm_init();
2488 if (error)
2489 goto out_remove_dbg_kobj;
2490
2491 error = register_filesystem(&xfs_fs_type);
2492 if (error)
2493 goto out_qm_exit;
2494 return 0;
2495
2496 out_qm_exit:
2497 xfs_qm_exit();
2498 out_remove_dbg_kobj:
2499 #ifdef DEBUG
2500 xfs_sysfs_del(&xfs_dbg_kobj);
2501 out_remove_scrub_stats:
2502 #endif
2503 xchk_global_stats_teardown();
2504 out_remove_stats_kobj:
2505 xfs_sysfs_del(&xfsstats.xs_kobj);
2506 out_free_stats:
2507 free_percpu(xfsstats.xs_stats);
2508 out_kset_unregister:
2509 kset_unregister(xfs_kset);
2510 out_debugfs_unregister:
2511 debugfs_remove(xfs_debugfs);
2512 xfs_sysctl_unregister();
2513 out_cleanup_procfs:
2514 xfs_cleanup_procfs();
2515 out_mru_cache_uninit:
2516 xfs_mru_cache_uninit();
2517 out_destroy_wq:
2518 xfs_destroy_workqueues();
2519 out_destroy_caches:
2520 xfs_destroy_caches();
2521 out:
2522 return error;
2523 }
2524
2525 STATIC void __exit
exit_xfs_fs(void)2526 exit_xfs_fs(void)
2527 {
2528 xfs_qm_exit();
2529 unregister_filesystem(&xfs_fs_type);
2530 #ifdef DEBUG
2531 xfs_sysfs_del(&xfs_dbg_kobj);
2532 #endif
2533 xchk_global_stats_teardown();
2534 xfs_sysfs_del(&xfsstats.xs_kobj);
2535 free_percpu(xfsstats.xs_stats);
2536 kset_unregister(xfs_kset);
2537 debugfs_remove(xfs_debugfs);
2538 xfs_sysctl_unregister();
2539 xfs_cleanup_procfs();
2540 xfs_mru_cache_uninit();
2541 xfs_destroy_workqueues();
2542 xfs_destroy_caches();
2543 xfs_uuid_table_free();
2544 }
2545
2546 module_init(init_xfs_fs);
2547 module_exit(exit_xfs_fs);
2548
2549 MODULE_AUTHOR("Silicon Graphics, Inc.");
2550 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2551 MODULE_LICENSE("GPL");
2552