1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 7 #include "xfs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_sb.h" 13 #include "xfs_mount.h" 14 #include "xfs_inode.h" 15 #include "xfs_btree.h" 16 #include "xfs_bmap.h" 17 #include "xfs_alloc.h" 18 #include "xfs_fsops.h" 19 #include "xfs_trans.h" 20 #include "xfs_buf_item.h" 21 #include "xfs_log.h" 22 #include "xfs_log_priv.h" 23 #include "xfs_dir2.h" 24 #include "xfs_extfree_item.h" 25 #include "xfs_mru_cache.h" 26 #include "xfs_inode_item.h" 27 #include "xfs_icache.h" 28 #include "xfs_trace.h" 29 #include "xfs_icreate_item.h" 30 #include "xfs_filestream.h" 31 #include "xfs_quota.h" 32 #include "xfs_sysfs.h" 33 #include "xfs_ondisk.h" 34 #include "xfs_rmap_item.h" 35 #include "xfs_refcount_item.h" 36 #include "xfs_bmap_item.h" 37 #include "xfs_reflink.h" 38 #include "xfs_pwork.h" 39 #include "xfs_ag.h" 40 #include "xfs_defer.h" 41 #include "xfs_attr_item.h" 42 #include "xfs_xattr.h" 43 #include "xfs_iunlink_item.h" 44 #include "xfs_dahash_test.h" 45 #include "xfs_rtbitmap.h" 46 #include "xfs_exchmaps_item.h" 47 #include "xfs_parent.h" 48 #include "xfs_rtalloc.h" 49 #include "xfs_zone_alloc.h" 50 #include "scrub/stats.h" 51 #include "scrub/rcbag_btree.h" 52 53 #include <linux/magic.h> 54 #include <linux/fs_context.h> 55 #include <linux/fs_parser.h> 56 57 static const struct super_operations xfs_super_operations; 58 59 static struct dentry *xfs_debugfs; /* top-level xfs debugfs dir */ 60 static struct kset *xfs_kset; /* top-level xfs sysfs dir */ 61 #ifdef DEBUG 62 static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */ 63 #endif 64 65 enum xfs_dax_mode { 66 XFS_DAX_INODE = 0, 67 XFS_DAX_ALWAYS = 1, 68 XFS_DAX_NEVER = 2, 69 }; 70 71 /* Were quota mount options provided? Must use the upper 16 bits of qflags. */ 72 #define XFS_QFLAGS_MNTOPTS (1U << 31) 73 74 static void 75 xfs_mount_set_dax_mode( 76 struct xfs_mount *mp, 77 enum xfs_dax_mode mode) 78 { 79 switch (mode) { 80 case XFS_DAX_INODE: 81 mp->m_features &= ~(XFS_FEAT_DAX_ALWAYS | XFS_FEAT_DAX_NEVER); 82 break; 83 case XFS_DAX_ALWAYS: 84 mp->m_features |= XFS_FEAT_DAX_ALWAYS; 85 mp->m_features &= ~XFS_FEAT_DAX_NEVER; 86 break; 87 case XFS_DAX_NEVER: 88 mp->m_features |= XFS_FEAT_DAX_NEVER; 89 mp->m_features &= ~XFS_FEAT_DAX_ALWAYS; 90 break; 91 } 92 } 93 94 static const struct constant_table dax_param_enums[] = { 95 {"inode", XFS_DAX_INODE }, 96 {"always", XFS_DAX_ALWAYS }, 97 {"never", XFS_DAX_NEVER }, 98 {} 99 }; 100 101 /* 102 * Table driven mount option parser. 103 */ 104 enum { 105 Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev, 106 Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid, 107 Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups, 108 Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, 109 Opt_largeio, Opt_nolargeio, 110 Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota, 111 Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota, 112 Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce, 113 Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum, Opt_max_open_zones, 114 Opt_lifetime, Opt_nolifetime, Opt_max_atomic_write, 115 }; 116 117 static const struct fs_parameter_spec xfs_fs_parameters[] = { 118 fsparam_u32("logbufs", Opt_logbufs), 119 fsparam_string("logbsize", Opt_logbsize), 120 fsparam_string("logdev", Opt_logdev), 121 fsparam_string("rtdev", Opt_rtdev), 122 fsparam_flag("wsync", Opt_wsync), 123 fsparam_flag("noalign", Opt_noalign), 124 fsparam_flag("swalloc", Opt_swalloc), 125 fsparam_u32("sunit", Opt_sunit), 126 fsparam_u32("swidth", Opt_swidth), 127 fsparam_flag("nouuid", Opt_nouuid), 128 fsparam_flag("grpid", Opt_grpid), 129 fsparam_flag("nogrpid", Opt_nogrpid), 130 fsparam_flag("bsdgroups", Opt_bsdgroups), 131 fsparam_flag("sysvgroups", Opt_sysvgroups), 132 fsparam_string("allocsize", Opt_allocsize), 133 fsparam_flag("norecovery", Opt_norecovery), 134 fsparam_flag("inode64", Opt_inode64), 135 fsparam_flag("inode32", Opt_inode32), 136 fsparam_flag("largeio", Opt_largeio), 137 fsparam_flag("nolargeio", Opt_nolargeio), 138 fsparam_flag("filestreams", Opt_filestreams), 139 fsparam_flag("quota", Opt_quota), 140 fsparam_flag("noquota", Opt_noquota), 141 fsparam_flag("usrquota", Opt_usrquota), 142 fsparam_flag("grpquota", Opt_grpquota), 143 fsparam_flag("prjquota", Opt_prjquota), 144 fsparam_flag("uquota", Opt_uquota), 145 fsparam_flag("gquota", Opt_gquota), 146 fsparam_flag("pquota", Opt_pquota), 147 fsparam_flag("uqnoenforce", Opt_uqnoenforce), 148 fsparam_flag("gqnoenforce", Opt_gqnoenforce), 149 fsparam_flag("pqnoenforce", Opt_pqnoenforce), 150 fsparam_flag("qnoenforce", Opt_qnoenforce), 151 fsparam_flag("discard", Opt_discard), 152 fsparam_flag("nodiscard", Opt_nodiscard), 153 fsparam_flag("dax", Opt_dax), 154 fsparam_enum("dax", Opt_dax_enum, dax_param_enums), 155 fsparam_u32("max_open_zones", Opt_max_open_zones), 156 fsparam_flag("lifetime", Opt_lifetime), 157 fsparam_flag("nolifetime", Opt_nolifetime), 158 fsparam_string("max_atomic_write", Opt_max_atomic_write), 159 {} 160 }; 161 162 struct proc_xfs_info { 163 uint64_t flag; 164 char *str; 165 }; 166 167 static int 168 xfs_fs_show_options( 169 struct seq_file *m, 170 struct dentry *root) 171 { 172 static struct proc_xfs_info xfs_info_set[] = { 173 /* the few simple ones we can get from the mount struct */ 174 { XFS_FEAT_WSYNC, ",wsync" }, 175 { XFS_FEAT_NOALIGN, ",noalign" }, 176 { XFS_FEAT_SWALLOC, ",swalloc" }, 177 { XFS_FEAT_NOUUID, ",nouuid" }, 178 { XFS_FEAT_NORECOVERY, ",norecovery" }, 179 { XFS_FEAT_FILESTREAMS, ",filestreams" }, 180 { XFS_FEAT_GRPID, ",grpid" }, 181 { XFS_FEAT_DISCARD, ",discard" }, 182 { XFS_FEAT_LARGE_IOSIZE, ",largeio" }, 183 { XFS_FEAT_DAX_ALWAYS, ",dax=always" }, 184 { XFS_FEAT_DAX_NEVER, ",dax=never" }, 185 { XFS_FEAT_NOLIFETIME, ",nolifetime" }, 186 { 0, NULL } 187 }; 188 struct xfs_mount *mp = XFS_M(root->d_sb); 189 struct proc_xfs_info *xfs_infop; 190 191 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) { 192 if (mp->m_features & xfs_infop->flag) 193 seq_puts(m, xfs_infop->str); 194 } 195 196 seq_printf(m, ",inode%d", xfs_has_small_inums(mp) ? 32 : 64); 197 198 if (xfs_has_allocsize(mp)) 199 seq_printf(m, ",allocsize=%dk", 200 (1 << mp->m_allocsize_log) >> 10); 201 202 if (mp->m_logbufs > 0) 203 seq_printf(m, ",logbufs=%d", mp->m_logbufs); 204 if (mp->m_logbsize > 0) 205 seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10); 206 207 if (mp->m_logname) 208 seq_show_option(m, "logdev", mp->m_logname); 209 if (mp->m_rtname) 210 seq_show_option(m, "rtdev", mp->m_rtname); 211 212 if (mp->m_dalign > 0) 213 seq_printf(m, ",sunit=%d", 214 (int)XFS_FSB_TO_BB(mp, mp->m_dalign)); 215 if (mp->m_swidth > 0) 216 seq_printf(m, ",swidth=%d", 217 (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); 218 219 if (mp->m_qflags & XFS_UQUOTA_ENFD) 220 seq_puts(m, ",usrquota"); 221 else if (mp->m_qflags & XFS_UQUOTA_ACCT) 222 seq_puts(m, ",uqnoenforce"); 223 224 if (mp->m_qflags & XFS_PQUOTA_ENFD) 225 seq_puts(m, ",prjquota"); 226 else if (mp->m_qflags & XFS_PQUOTA_ACCT) 227 seq_puts(m, ",pqnoenforce"); 228 229 if (mp->m_qflags & XFS_GQUOTA_ENFD) 230 seq_puts(m, ",grpquota"); 231 else if (mp->m_qflags & XFS_GQUOTA_ACCT) 232 seq_puts(m, ",gqnoenforce"); 233 234 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT)) 235 seq_puts(m, ",noquota"); 236 237 if (mp->m_max_open_zones) 238 seq_printf(m, ",max_open_zones=%u", mp->m_max_open_zones); 239 if (mp->m_awu_max_bytes) 240 seq_printf(m, ",max_atomic_write=%lluk", 241 mp->m_awu_max_bytes >> 10); 242 243 return 0; 244 } 245 246 static bool 247 xfs_set_inode_alloc_perag( 248 struct xfs_perag *pag, 249 xfs_ino_t ino, 250 xfs_agnumber_t max_metadata) 251 { 252 if (!xfs_is_inode32(pag_mount(pag))) { 253 set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate); 254 clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate); 255 return false; 256 } 257 258 if (ino > XFS_MAXINUMBER_32) { 259 clear_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate); 260 clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate); 261 return false; 262 } 263 264 set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate); 265 if (pag_agno(pag) < max_metadata) 266 set_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate); 267 else 268 clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate); 269 return true; 270 } 271 272 /* 273 * Set parameters for inode allocation heuristics, taking into account 274 * filesystem size and inode32/inode64 mount options; i.e. specifically 275 * whether or not XFS_FEAT_SMALL_INUMS is set. 276 * 277 * Inode allocation patterns are altered only if inode32 is requested 278 * (XFS_FEAT_SMALL_INUMS), and the filesystem is sufficiently large. 279 * If altered, XFS_OPSTATE_INODE32 is set as well. 280 * 281 * An agcount independent of that in the mount structure is provided 282 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated 283 * to the potentially higher ag count. 284 * 285 * Returns the maximum AG index which may contain inodes. 286 */ 287 xfs_agnumber_t 288 xfs_set_inode_alloc( 289 struct xfs_mount *mp, 290 xfs_agnumber_t agcount) 291 { 292 xfs_agnumber_t index; 293 xfs_agnumber_t maxagi = 0; 294 xfs_sb_t *sbp = &mp->m_sb; 295 xfs_agnumber_t max_metadata; 296 xfs_agino_t agino; 297 xfs_ino_t ino; 298 299 /* 300 * Calculate how much should be reserved for inodes to meet 301 * the max inode percentage. Used only for inode32. 302 */ 303 if (M_IGEO(mp)->maxicount) { 304 uint64_t icount; 305 306 icount = sbp->sb_dblocks * sbp->sb_imax_pct; 307 do_div(icount, 100); 308 icount += sbp->sb_agblocks - 1; 309 do_div(icount, sbp->sb_agblocks); 310 max_metadata = icount; 311 } else { 312 max_metadata = agcount; 313 } 314 315 /* Get the last possible inode in the filesystem */ 316 agino = XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1); 317 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino); 318 319 /* 320 * If user asked for no more than 32-bit inodes, and the fs is 321 * sufficiently large, set XFS_OPSTATE_INODE32 if we must alter 322 * the allocator to accommodate the request. 323 */ 324 if (xfs_has_small_inums(mp) && ino > XFS_MAXINUMBER_32) 325 xfs_set_inode32(mp); 326 else 327 xfs_clear_inode32(mp); 328 329 for (index = 0; index < agcount; index++) { 330 struct xfs_perag *pag; 331 332 ino = XFS_AGINO_TO_INO(mp, index, agino); 333 334 pag = xfs_perag_get(mp, index); 335 if (xfs_set_inode_alloc_perag(pag, ino, max_metadata)) 336 maxagi++; 337 xfs_perag_put(pag); 338 } 339 340 return xfs_is_inode32(mp) ? maxagi : agcount; 341 } 342 343 static int 344 xfs_setup_dax_always( 345 struct xfs_mount *mp) 346 { 347 if (!mp->m_ddev_targp->bt_daxdev && 348 (!mp->m_rtdev_targp || !mp->m_rtdev_targp->bt_daxdev)) { 349 xfs_alert(mp, 350 "DAX unsupported by block device. Turning off DAX."); 351 goto disable_dax; 352 } 353 354 if (mp->m_super->s_blocksize != PAGE_SIZE) { 355 xfs_alert(mp, 356 "DAX not supported for blocksize. Turning off DAX."); 357 goto disable_dax; 358 } 359 360 if (xfs_has_reflink(mp) && 361 bdev_is_partition(mp->m_ddev_targp->bt_bdev)) { 362 xfs_alert(mp, 363 "DAX and reflink cannot work with multi-partitions!"); 364 return -EINVAL; 365 } 366 367 return 0; 368 369 disable_dax: 370 xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER); 371 return 0; 372 } 373 374 STATIC int 375 xfs_blkdev_get( 376 xfs_mount_t *mp, 377 const char *name, 378 struct file **bdev_filep) 379 { 380 int error = 0; 381 blk_mode_t mode; 382 383 mode = sb_open_mode(mp->m_super->s_flags); 384 *bdev_filep = bdev_file_open_by_path(name, mode, 385 mp->m_super, &fs_holder_ops); 386 if (IS_ERR(*bdev_filep)) { 387 error = PTR_ERR(*bdev_filep); 388 *bdev_filep = NULL; 389 xfs_warn(mp, "Invalid device [%s], error=%d", name, error); 390 } 391 392 return error; 393 } 394 395 STATIC void 396 xfs_shutdown_devices( 397 struct xfs_mount *mp) 398 { 399 /* 400 * Udev is triggered whenever anyone closes a block device or unmounts 401 * a file systemm on a block device. 402 * The default udev rules invoke blkid to read the fs super and create 403 * symlinks to the bdev under /dev/disk. For this, it uses buffered 404 * reads through the page cache. 405 * 406 * xfs_db also uses buffered reads to examine metadata. There is no 407 * coordination between xfs_db and udev, which means that they can run 408 * concurrently. Note there is no coordination between the kernel and 409 * blkid either. 410 * 411 * On a system with 64k pages, the page cache can cache the superblock 412 * and the root inode (and hence the root directory) with the same 64k 413 * page. If udev spawns blkid after the mkfs and the system is busy 414 * enough that it is still running when xfs_db starts up, they'll both 415 * read from the same page in the pagecache. 416 * 417 * The unmount writes updated inode metadata to disk directly. The XFS 418 * buffer cache does not use the bdev pagecache, so it needs to 419 * invalidate that pagecache on unmount. If the above scenario occurs, 420 * the pagecache no longer reflects what's on disk, xfs_db reads the 421 * stale metadata, and fails to find /a. Most of the time this succeeds 422 * because closing a bdev invalidates the page cache, but when processes 423 * race, everyone loses. 424 */ 425 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 426 blkdev_issue_flush(mp->m_logdev_targp->bt_bdev); 427 invalidate_bdev(mp->m_logdev_targp->bt_bdev); 428 } 429 if (mp->m_rtdev_targp) { 430 blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev); 431 invalidate_bdev(mp->m_rtdev_targp->bt_bdev); 432 } 433 blkdev_issue_flush(mp->m_ddev_targp->bt_bdev); 434 invalidate_bdev(mp->m_ddev_targp->bt_bdev); 435 } 436 437 /* 438 * The file system configurations are: 439 * (1) device (partition) with data and internal log 440 * (2) logical volume with data and log subvolumes. 441 * (3) logical volume with data, log, and realtime subvolumes. 442 * 443 * We only have to handle opening the log and realtime volumes here if 444 * they are present. The data subvolume has already been opened by 445 * get_sb_bdev() and is stored in sb->s_bdev. 446 */ 447 STATIC int 448 xfs_open_devices( 449 struct xfs_mount *mp) 450 { 451 struct super_block *sb = mp->m_super; 452 struct block_device *ddev = sb->s_bdev; 453 struct file *logdev_file = NULL, *rtdev_file = NULL; 454 int error; 455 456 /* 457 * Open real time and log devices - order is important. 458 */ 459 if (mp->m_logname) { 460 error = xfs_blkdev_get(mp, mp->m_logname, &logdev_file); 461 if (error) 462 return error; 463 } 464 465 if (mp->m_rtname) { 466 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev_file); 467 if (error) 468 goto out_close_logdev; 469 470 if (file_bdev(rtdev_file) == ddev || 471 (logdev_file && 472 file_bdev(rtdev_file) == file_bdev(logdev_file))) { 473 xfs_warn(mp, 474 "Cannot mount filesystem with identical rtdev and ddev/logdev."); 475 error = -EINVAL; 476 goto out_close_rtdev; 477 } 478 } 479 480 /* 481 * Setup xfs_mount buffer target pointers 482 */ 483 mp->m_ddev_targp = xfs_alloc_buftarg(mp, sb->s_bdev_file); 484 if (IS_ERR(mp->m_ddev_targp)) { 485 error = PTR_ERR(mp->m_ddev_targp); 486 mp->m_ddev_targp = NULL; 487 goto out_close_rtdev; 488 } 489 490 if (rtdev_file) { 491 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev_file); 492 if (IS_ERR(mp->m_rtdev_targp)) { 493 error = PTR_ERR(mp->m_rtdev_targp); 494 mp->m_rtdev_targp = NULL; 495 goto out_free_ddev_targ; 496 } 497 } 498 499 if (logdev_file && file_bdev(logdev_file) != ddev) { 500 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev_file); 501 if (IS_ERR(mp->m_logdev_targp)) { 502 error = PTR_ERR(mp->m_logdev_targp); 503 mp->m_logdev_targp = NULL; 504 goto out_free_rtdev_targ; 505 } 506 } else { 507 mp->m_logdev_targp = mp->m_ddev_targp; 508 /* Handle won't be used, drop it */ 509 if (logdev_file) 510 bdev_fput(logdev_file); 511 } 512 513 return 0; 514 515 out_free_rtdev_targ: 516 if (mp->m_rtdev_targp) 517 xfs_free_buftarg(mp->m_rtdev_targp); 518 out_free_ddev_targ: 519 xfs_free_buftarg(mp->m_ddev_targp); 520 out_close_rtdev: 521 if (rtdev_file) 522 bdev_fput(rtdev_file); 523 out_close_logdev: 524 if (logdev_file) 525 bdev_fput(logdev_file); 526 return error; 527 } 528 529 /* 530 * Setup xfs_mount buffer target pointers based on superblock 531 */ 532 STATIC int 533 xfs_setup_devices( 534 struct xfs_mount *mp) 535 { 536 int error; 537 538 error = xfs_configure_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize, 539 mp->m_sb.sb_dblocks); 540 if (error) 541 return error; 542 543 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 544 unsigned int log_sector_size = BBSIZE; 545 546 if (xfs_has_sector(mp)) 547 log_sector_size = mp->m_sb.sb_logsectsize; 548 error = xfs_configure_buftarg(mp->m_logdev_targp, 549 log_sector_size, mp->m_sb.sb_logblocks); 550 if (error) 551 return error; 552 } 553 554 if (mp->m_sb.sb_rtstart) { 555 if (mp->m_rtdev_targp) { 556 xfs_warn(mp, 557 "can't use internal and external rtdev at the same time"); 558 return -EINVAL; 559 } 560 mp->m_rtdev_targp = mp->m_ddev_targp; 561 } else if (mp->m_rtname) { 562 error = xfs_configure_buftarg(mp->m_rtdev_targp, 563 mp->m_sb.sb_sectsize, mp->m_sb.sb_rblocks); 564 if (error) 565 return error; 566 } 567 568 return 0; 569 } 570 571 STATIC int 572 xfs_init_mount_workqueues( 573 struct xfs_mount *mp) 574 { 575 mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s", 576 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU), 577 1, mp->m_super->s_id); 578 if (!mp->m_buf_workqueue) 579 goto out; 580 581 mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s", 582 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU), 583 0, mp->m_super->s_id); 584 if (!mp->m_unwritten_workqueue) 585 goto out_destroy_buf; 586 587 mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s", 588 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU), 589 0, mp->m_super->s_id); 590 if (!mp->m_reclaim_workqueue) 591 goto out_destroy_unwritten; 592 593 mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s", 594 XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM), 595 0, mp->m_super->s_id); 596 if (!mp->m_blockgc_wq) 597 goto out_destroy_reclaim; 598 599 mp->m_inodegc_wq = alloc_workqueue("xfs-inodegc/%s", 600 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU), 601 1, mp->m_super->s_id); 602 if (!mp->m_inodegc_wq) 603 goto out_destroy_blockgc; 604 605 mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", 606 XFS_WQFLAGS(WQ_FREEZABLE | WQ_PERCPU), 0, 607 mp->m_super->s_id); 608 if (!mp->m_sync_workqueue) 609 goto out_destroy_inodegc; 610 611 return 0; 612 613 out_destroy_inodegc: 614 destroy_workqueue(mp->m_inodegc_wq); 615 out_destroy_blockgc: 616 destroy_workqueue(mp->m_blockgc_wq); 617 out_destroy_reclaim: 618 destroy_workqueue(mp->m_reclaim_workqueue); 619 out_destroy_unwritten: 620 destroy_workqueue(mp->m_unwritten_workqueue); 621 out_destroy_buf: 622 destroy_workqueue(mp->m_buf_workqueue); 623 out: 624 return -ENOMEM; 625 } 626 627 STATIC void 628 xfs_destroy_mount_workqueues( 629 struct xfs_mount *mp) 630 { 631 destroy_workqueue(mp->m_sync_workqueue); 632 destroy_workqueue(mp->m_blockgc_wq); 633 destroy_workqueue(mp->m_inodegc_wq); 634 destroy_workqueue(mp->m_reclaim_workqueue); 635 destroy_workqueue(mp->m_unwritten_workqueue); 636 destroy_workqueue(mp->m_buf_workqueue); 637 } 638 639 static void 640 xfs_flush_inodes_worker( 641 struct work_struct *work) 642 { 643 struct xfs_mount *mp = container_of(work, struct xfs_mount, 644 m_flush_inodes_work); 645 struct super_block *sb = mp->m_super; 646 647 if (down_read_trylock(&sb->s_umount)) { 648 sync_inodes_sb(sb); 649 up_read(&sb->s_umount); 650 } 651 } 652 653 /* 654 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK 655 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting 656 * for IO to complete so that we effectively throttle multiple callers to the 657 * rate at which IO is completing. 658 */ 659 void 660 xfs_flush_inodes( 661 struct xfs_mount *mp) 662 { 663 /* 664 * If flush_work() returns true then that means we waited for a flush 665 * which was already in progress. Don't bother running another scan. 666 */ 667 if (flush_work(&mp->m_flush_inodes_work)) 668 return; 669 670 queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work); 671 flush_work(&mp->m_flush_inodes_work); 672 } 673 674 /* Catch misguided souls that try to use this interface on XFS */ 675 STATIC struct inode * 676 xfs_fs_alloc_inode( 677 struct super_block *sb) 678 { 679 BUG(); 680 return NULL; 681 } 682 683 /* 684 * Now that the generic code is guaranteed not to be accessing 685 * the linux inode, we can inactivate and reclaim the inode. 686 */ 687 STATIC void 688 xfs_fs_destroy_inode( 689 struct inode *inode) 690 { 691 struct xfs_inode *ip = XFS_I(inode); 692 693 trace_xfs_destroy_inode(ip); 694 695 ASSERT(!rwsem_is_locked(&inode->i_rwsem)); 696 XFS_STATS_INC(ip->i_mount, vn_rele); 697 XFS_STATS_INC(ip->i_mount, vn_remove); 698 xfs_inode_mark_reclaimable(ip); 699 } 700 701 static void 702 xfs_fs_dirty_inode( 703 struct inode *inode, 704 int flags) 705 { 706 struct xfs_inode *ip = XFS_I(inode); 707 struct xfs_mount *mp = ip->i_mount; 708 struct xfs_trans *tp; 709 710 if (!(inode->i_sb->s_flags & SB_LAZYTIME)) 711 return; 712 713 /* 714 * Only do the timestamp update if the inode is dirty (I_DIRTY_SYNC) 715 * and has dirty timestamp (I_DIRTY_TIME). I_DIRTY_TIME can be passed 716 * in flags possibly together with I_DIRTY_SYNC. 717 */ 718 if ((flags & ~I_DIRTY_TIME) != I_DIRTY_SYNC || !(flags & I_DIRTY_TIME)) 719 return; 720 721 if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp)) 722 return; 723 xfs_ilock(ip, XFS_ILOCK_EXCL); 724 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 725 xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP); 726 xfs_trans_commit(tp); 727 } 728 729 /* 730 * Slab object creation initialisation for the XFS inode. 731 * This covers only the idempotent fields in the XFS inode; 732 * all other fields need to be initialised on allocation 733 * from the slab. This avoids the need to repeatedly initialise 734 * fields in the xfs inode that left in the initialise state 735 * when freeing the inode. 736 */ 737 STATIC void 738 xfs_fs_inode_init_once( 739 void *inode) 740 { 741 struct xfs_inode *ip = inode; 742 743 memset(ip, 0, sizeof(struct xfs_inode)); 744 745 /* vfs inode */ 746 inode_init_once(VFS_I(ip)); 747 748 /* xfs inode */ 749 atomic_set(&ip->i_pincount, 0); 750 spin_lock_init(&ip->i_flags_lock); 751 init_rwsem(&ip->i_lock); 752 } 753 754 /* 755 * We do an unlocked check for XFS_IDONTCACHE here because we are already 756 * serialised against cache hits here via the inode->i_lock and igrab() in 757 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be 758 * racing with us, and it avoids needing to grab a spinlock here for every inode 759 * we drop the final reference on. 760 */ 761 STATIC int 762 xfs_fs_drop_inode( 763 struct inode *inode) 764 { 765 struct xfs_inode *ip = XFS_I(inode); 766 767 /* 768 * If this unlinked inode is in the middle of recovery, don't 769 * drop the inode just yet; log recovery will take care of 770 * that. See the comment for this inode flag. 771 */ 772 if (ip->i_flags & XFS_IRECOVERY) { 773 ASSERT(xlog_recovery_needed(ip->i_mount->m_log)); 774 return 0; 775 } 776 777 return inode_generic_drop(inode); 778 } 779 780 STATIC void 781 xfs_fs_evict_inode( 782 struct inode *inode) 783 { 784 if (IS_DAX(inode)) 785 dax_break_layout_final(inode); 786 787 truncate_inode_pages_final(&inode->i_data); 788 clear_inode(inode); 789 } 790 791 static void 792 xfs_mount_free( 793 struct xfs_mount *mp) 794 { 795 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) 796 xfs_free_buftarg(mp->m_logdev_targp); 797 if (mp->m_rtdev_targp && mp->m_rtdev_targp != mp->m_ddev_targp) 798 xfs_free_buftarg(mp->m_rtdev_targp); 799 if (mp->m_ddev_targp) 800 xfs_free_buftarg(mp->m_ddev_targp); 801 802 debugfs_remove(mp->m_debugfs); 803 kfree(mp->m_rtname); 804 kfree(mp->m_logname); 805 kfree(mp); 806 } 807 808 STATIC int 809 xfs_fs_sync_fs( 810 struct super_block *sb, 811 int wait) 812 { 813 struct xfs_mount *mp = XFS_M(sb); 814 int error; 815 816 trace_xfs_fs_sync_fs(mp, __return_address); 817 818 /* 819 * Doing anything during the async pass would be counterproductive. 820 */ 821 if (!wait) 822 return 0; 823 824 error = xfs_log_force(mp, XFS_LOG_SYNC); 825 if (error) 826 return error; 827 828 if (laptop_mode) { 829 /* 830 * The disk must be active because we're syncing. 831 * We schedule log work now (now that the disk is 832 * active) instead of later (when it might not be). 833 */ 834 flush_delayed_work(&mp->m_log->l_work); 835 } 836 837 /* 838 * If we are called with page faults frozen out, it means we are about 839 * to freeze the transaction subsystem. Take the opportunity to shut 840 * down inodegc because once SB_FREEZE_FS is set it's too late to 841 * prevent inactivation races with freeze. The fs doesn't get called 842 * again by the freezing process until after SB_FREEZE_FS has been set, 843 * so it's now or never. Same logic applies to speculative allocation 844 * garbage collection. 845 * 846 * We don't care if this is a normal syncfs call that does this or 847 * freeze that does this - we can run this multiple times without issue 848 * and we won't race with a restart because a restart can only occur 849 * when the state is either SB_FREEZE_FS or SB_FREEZE_COMPLETE. 850 */ 851 if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT) { 852 xfs_inodegc_stop(mp); 853 xfs_blockgc_stop(mp); 854 xfs_zone_gc_stop(mp); 855 } 856 857 return 0; 858 } 859 860 static xfs_extlen_t 861 xfs_internal_log_size( 862 struct xfs_mount *mp) 863 { 864 if (!mp->m_sb.sb_logstart) 865 return 0; 866 return mp->m_sb.sb_logblocks; 867 } 868 869 static void 870 xfs_statfs_data( 871 struct xfs_mount *mp, 872 struct kstatfs *st) 873 { 874 int64_t fdblocks = 875 xfs_sum_freecounter(mp, XC_FREE_BLOCKS); 876 877 /* make sure st->f_bfree does not underflow */ 878 st->f_bfree = max(0LL, 879 fdblocks - xfs_freecounter_unavailable(mp, XC_FREE_BLOCKS)); 880 881 /* 882 * sb_dblocks can change during growfs, but nothing cares about reporting 883 * the old or new value during growfs. 884 */ 885 st->f_blocks = mp->m_sb.sb_dblocks - xfs_internal_log_size(mp); 886 } 887 888 /* 889 * When stat(v)fs is called on a file with the realtime bit set or a directory 890 * with the rtinherit bit, report freespace information for the RT device 891 * instead of the main data device. 892 */ 893 static void 894 xfs_statfs_rt( 895 struct xfs_mount *mp, 896 struct kstatfs *st) 897 { 898 st->f_bfree = xfs_rtbxlen_to_blen(mp, 899 xfs_sum_freecounter(mp, XC_FREE_RTEXTENTS)); 900 st->f_blocks = mp->m_sb.sb_rblocks - xfs_rtbxlen_to_blen(mp, 901 mp->m_free[XC_FREE_RTEXTENTS].res_total); 902 } 903 904 static void 905 xfs_statfs_inodes( 906 struct xfs_mount *mp, 907 struct kstatfs *st) 908 { 909 uint64_t icount = percpu_counter_sum(&mp->m_icount); 910 uint64_t ifree = percpu_counter_sum(&mp->m_ifree); 911 uint64_t fakeinos = XFS_FSB_TO_INO(mp, st->f_bfree); 912 913 st->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER); 914 if (M_IGEO(mp)->maxicount) 915 st->f_files = min_t(typeof(st->f_files), st->f_files, 916 M_IGEO(mp)->maxicount); 917 918 /* If sb_icount overshot maxicount, report actual allocation */ 919 st->f_files = max_t(typeof(st->f_files), st->f_files, 920 mp->m_sb.sb_icount); 921 922 /* Make sure st->f_ffree does not underflow */ 923 st->f_ffree = max_t(int64_t, 0, st->f_files - (icount - ifree)); 924 } 925 926 STATIC int 927 xfs_fs_statfs( 928 struct dentry *dentry, 929 struct kstatfs *st) 930 { 931 struct xfs_mount *mp = XFS_M(dentry->d_sb); 932 struct xfs_inode *ip = XFS_I(d_inode(dentry)); 933 934 /* 935 * Expedite background inodegc but don't wait. We do not want to block 936 * here waiting hours for a billion extent file to be truncated. 937 */ 938 xfs_inodegc_push(mp); 939 940 st->f_type = XFS_SUPER_MAGIC; 941 st->f_namelen = MAXNAMELEN - 1; 942 st->f_bsize = mp->m_sb.sb_blocksize; 943 st->f_fsid = u64_to_fsid(huge_encode_dev(mp->m_ddev_targp->bt_dev)); 944 945 xfs_statfs_data(mp, st); 946 xfs_statfs_inodes(mp, st); 947 948 if (XFS_IS_REALTIME_MOUNT(mp) && 949 (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) 950 xfs_statfs_rt(mp, st); 951 952 if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) && 953 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) == 954 (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD)) 955 xfs_qm_statvfs(ip, st); 956 957 /* 958 * XFS does not distinguish between blocks available to privileged and 959 * unprivileged users. 960 */ 961 st->f_bavail = st->f_bfree; 962 return 0; 963 } 964 965 STATIC void 966 xfs_save_resvblks( 967 struct xfs_mount *mp) 968 { 969 enum xfs_free_counter i; 970 971 for (i = 0; i < XC_FREE_NR; i++) { 972 mp->m_free[i].res_saved = mp->m_free[i].res_total; 973 xfs_reserve_blocks(mp, i, 0); 974 } 975 } 976 977 STATIC void 978 xfs_restore_resvblks( 979 struct xfs_mount *mp) 980 { 981 uint64_t resblks; 982 enum xfs_free_counter i; 983 984 for (i = 0; i < XC_FREE_NR; i++) { 985 if (mp->m_free[i].res_saved) { 986 resblks = mp->m_free[i].res_saved; 987 mp->m_free[i].res_saved = 0; 988 } else 989 resblks = xfs_default_resblks(mp, i); 990 xfs_reserve_blocks(mp, i, resblks); 991 } 992 } 993 994 /* 995 * Second stage of a freeze. The data is already frozen so we only 996 * need to take care of the metadata. Once that's done sync the superblock 997 * to the log to dirty it in case of a crash while frozen. This ensures that we 998 * will recover the unlinked inode lists on the next mount. 999 */ 1000 STATIC int 1001 xfs_fs_freeze( 1002 struct super_block *sb) 1003 { 1004 struct xfs_mount *mp = XFS_M(sb); 1005 unsigned int flags; 1006 int ret; 1007 1008 /* 1009 * The filesystem is now frozen far enough that memory reclaim 1010 * cannot safely operate on the filesystem. Hence we need to 1011 * set a GFP_NOFS context here to avoid recursion deadlocks. 1012 */ 1013 flags = memalloc_nofs_save(); 1014 xfs_save_resvblks(mp); 1015 ret = xfs_log_quiesce(mp); 1016 memalloc_nofs_restore(flags); 1017 1018 /* 1019 * For read-write filesystems, we need to restart the inodegc on error 1020 * because we stopped it at SB_FREEZE_PAGEFAULT level and a thaw is not 1021 * going to be run to restart it now. We are at SB_FREEZE_FS level 1022 * here, so we can restart safely without racing with a stop in 1023 * xfs_fs_sync_fs(). 1024 */ 1025 if (ret && !xfs_is_readonly(mp)) { 1026 xfs_blockgc_start(mp); 1027 xfs_inodegc_start(mp); 1028 xfs_zone_gc_start(mp); 1029 } 1030 1031 return ret; 1032 } 1033 1034 STATIC int 1035 xfs_fs_unfreeze( 1036 struct super_block *sb) 1037 { 1038 struct xfs_mount *mp = XFS_M(sb); 1039 1040 xfs_restore_resvblks(mp); 1041 xfs_log_work_queue(mp); 1042 1043 /* 1044 * Don't reactivate the inodegc worker on a readonly filesystem because 1045 * inodes are sent directly to reclaim. Don't reactivate the blockgc 1046 * worker because there are no speculative preallocations on a readonly 1047 * filesystem. 1048 */ 1049 if (!xfs_is_readonly(mp)) { 1050 xfs_zone_gc_start(mp); 1051 xfs_blockgc_start(mp); 1052 xfs_inodegc_start(mp); 1053 } 1054 1055 return 0; 1056 } 1057 1058 /* 1059 * This function fills in xfs_mount_t fields based on mount args. 1060 * Note: the superblock _has_ now been read in. 1061 */ 1062 STATIC int 1063 xfs_finish_flags( 1064 struct xfs_mount *mp) 1065 { 1066 /* Fail a mount where the logbuf is smaller than the log stripe */ 1067 if (xfs_has_logv2(mp)) { 1068 if (mp->m_logbsize <= 0 && 1069 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) { 1070 mp->m_logbsize = mp->m_sb.sb_logsunit; 1071 } else if (mp->m_logbsize > 0 && 1072 mp->m_logbsize < mp->m_sb.sb_logsunit) { 1073 xfs_warn(mp, 1074 "logbuf size must be greater than or equal to log stripe size"); 1075 return -EINVAL; 1076 } 1077 } else { 1078 /* Fail a mount if the logbuf is larger than 32K */ 1079 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) { 1080 xfs_warn(mp, 1081 "logbuf size for version 1 logs must be 16K or 32K"); 1082 return -EINVAL; 1083 } 1084 } 1085 1086 /* 1087 * prohibit r/w mounts of read-only filesystems 1088 */ 1089 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !xfs_is_readonly(mp)) { 1090 xfs_warn(mp, 1091 "cannot mount a read-only filesystem as read-write"); 1092 return -EROFS; 1093 } 1094 1095 if ((mp->m_qflags & XFS_GQUOTA_ACCT) && 1096 (mp->m_qflags & XFS_PQUOTA_ACCT) && 1097 !xfs_has_pquotino(mp)) { 1098 xfs_warn(mp, 1099 "Super block does not support project and group quota together"); 1100 return -EINVAL; 1101 } 1102 1103 if (!xfs_has_zoned(mp)) { 1104 if (mp->m_max_open_zones) { 1105 xfs_warn(mp, 1106 "max_open_zones mount option only supported on zoned file systems."); 1107 return -EINVAL; 1108 } 1109 if (mp->m_features & XFS_FEAT_NOLIFETIME) { 1110 xfs_warn(mp, 1111 "nolifetime mount option only supported on zoned file systems."); 1112 return -EINVAL; 1113 } 1114 } 1115 1116 return 0; 1117 } 1118 1119 static int 1120 xfs_init_percpu_counters( 1121 struct xfs_mount *mp) 1122 { 1123 int error; 1124 int i; 1125 1126 error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL); 1127 if (error) 1128 return -ENOMEM; 1129 1130 error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL); 1131 if (error) 1132 goto free_icount; 1133 1134 error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL); 1135 if (error) 1136 goto free_ifree; 1137 1138 error = percpu_counter_init(&mp->m_delalloc_rtextents, 0, GFP_KERNEL); 1139 if (error) 1140 goto free_delalloc; 1141 1142 for (i = 0; i < XC_FREE_NR; i++) { 1143 error = percpu_counter_init(&mp->m_free[i].count, 0, 1144 GFP_KERNEL); 1145 if (error) 1146 goto free_freecounters; 1147 } 1148 1149 return 0; 1150 1151 free_freecounters: 1152 while (--i >= 0) 1153 percpu_counter_destroy(&mp->m_free[i].count); 1154 percpu_counter_destroy(&mp->m_delalloc_rtextents); 1155 free_delalloc: 1156 percpu_counter_destroy(&mp->m_delalloc_blks); 1157 free_ifree: 1158 percpu_counter_destroy(&mp->m_ifree); 1159 free_icount: 1160 percpu_counter_destroy(&mp->m_icount); 1161 return -ENOMEM; 1162 } 1163 1164 void 1165 xfs_reinit_percpu_counters( 1166 struct xfs_mount *mp) 1167 { 1168 percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount); 1169 percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree); 1170 xfs_set_freecounter(mp, XC_FREE_BLOCKS, mp->m_sb.sb_fdblocks); 1171 if (!xfs_has_zoned(mp)) 1172 xfs_set_freecounter(mp, XC_FREE_RTEXTENTS, 1173 mp->m_sb.sb_frextents); 1174 } 1175 1176 static void 1177 xfs_destroy_percpu_counters( 1178 struct xfs_mount *mp) 1179 { 1180 enum xfs_free_counter i; 1181 1182 for (i = 0; i < XC_FREE_NR; i++) 1183 percpu_counter_destroy(&mp->m_free[i].count); 1184 percpu_counter_destroy(&mp->m_icount); 1185 percpu_counter_destroy(&mp->m_ifree); 1186 ASSERT(xfs_is_shutdown(mp) || 1187 percpu_counter_sum(&mp->m_delalloc_rtextents) == 0); 1188 percpu_counter_destroy(&mp->m_delalloc_rtextents); 1189 ASSERT(xfs_is_shutdown(mp) || 1190 percpu_counter_sum(&mp->m_delalloc_blks) == 0); 1191 percpu_counter_destroy(&mp->m_delalloc_blks); 1192 } 1193 1194 static int 1195 xfs_inodegc_init_percpu( 1196 struct xfs_mount *mp) 1197 { 1198 struct xfs_inodegc *gc; 1199 int cpu; 1200 1201 mp->m_inodegc = alloc_percpu(struct xfs_inodegc); 1202 if (!mp->m_inodegc) 1203 return -ENOMEM; 1204 1205 for_each_possible_cpu(cpu) { 1206 gc = per_cpu_ptr(mp->m_inodegc, cpu); 1207 gc->cpu = cpu; 1208 gc->mp = mp; 1209 init_llist_head(&gc->list); 1210 gc->items = 0; 1211 gc->error = 0; 1212 INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker); 1213 } 1214 return 0; 1215 } 1216 1217 static void 1218 xfs_inodegc_free_percpu( 1219 struct xfs_mount *mp) 1220 { 1221 if (!mp->m_inodegc) 1222 return; 1223 free_percpu(mp->m_inodegc); 1224 } 1225 1226 static void 1227 xfs_fs_put_super( 1228 struct super_block *sb) 1229 { 1230 struct xfs_mount *mp = XFS_M(sb); 1231 1232 xfs_notice(mp, "Unmounting Filesystem %pU", &mp->m_sb.sb_uuid); 1233 xfs_filestream_unmount(mp); 1234 xfs_unmountfs(mp); 1235 1236 xfs_rtmount_freesb(mp); 1237 xfs_freesb(mp); 1238 xchk_mount_stats_free(mp); 1239 free_percpu(mp->m_stats.xs_stats); 1240 xfs_inodegc_free_percpu(mp); 1241 xfs_destroy_percpu_counters(mp); 1242 xfs_destroy_mount_workqueues(mp); 1243 xfs_shutdown_devices(mp); 1244 } 1245 1246 static long 1247 xfs_fs_nr_cached_objects( 1248 struct super_block *sb, 1249 struct shrink_control *sc) 1250 { 1251 /* Paranoia: catch incorrect calls during mount setup or teardown */ 1252 if (WARN_ON_ONCE(!sb->s_fs_info)) 1253 return 0; 1254 return xfs_reclaim_inodes_count(XFS_M(sb)); 1255 } 1256 1257 static long 1258 xfs_fs_free_cached_objects( 1259 struct super_block *sb, 1260 struct shrink_control *sc) 1261 { 1262 return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan); 1263 } 1264 1265 static void 1266 xfs_fs_shutdown( 1267 struct super_block *sb) 1268 { 1269 xfs_force_shutdown(XFS_M(sb), SHUTDOWN_DEVICE_REMOVED); 1270 } 1271 1272 static int 1273 xfs_fs_show_stats( 1274 struct seq_file *m, 1275 struct dentry *root) 1276 { 1277 struct xfs_mount *mp = XFS_M(root->d_sb); 1278 1279 if (xfs_has_zoned(mp) && IS_ENABLED(CONFIG_XFS_RT)) 1280 xfs_zoned_show_stats(m, mp); 1281 return 0; 1282 } 1283 1284 static const struct super_operations xfs_super_operations = { 1285 .alloc_inode = xfs_fs_alloc_inode, 1286 .destroy_inode = xfs_fs_destroy_inode, 1287 .dirty_inode = xfs_fs_dirty_inode, 1288 .drop_inode = xfs_fs_drop_inode, 1289 .evict_inode = xfs_fs_evict_inode, 1290 .put_super = xfs_fs_put_super, 1291 .sync_fs = xfs_fs_sync_fs, 1292 .freeze_fs = xfs_fs_freeze, 1293 .unfreeze_fs = xfs_fs_unfreeze, 1294 .statfs = xfs_fs_statfs, 1295 .show_options = xfs_fs_show_options, 1296 .nr_cached_objects = xfs_fs_nr_cached_objects, 1297 .free_cached_objects = xfs_fs_free_cached_objects, 1298 .shutdown = xfs_fs_shutdown, 1299 .show_stats = xfs_fs_show_stats, 1300 }; 1301 1302 static int 1303 suffix_kstrtoint( 1304 const char *s, 1305 unsigned int base, 1306 int *res) 1307 { 1308 int last, shift_left_factor = 0, _res; 1309 char *value; 1310 int ret = 0; 1311 1312 value = kstrdup(s, GFP_KERNEL); 1313 if (!value) 1314 return -ENOMEM; 1315 1316 last = strlen(value) - 1; 1317 if (value[last] == 'K' || value[last] == 'k') { 1318 shift_left_factor = 10; 1319 value[last] = '\0'; 1320 } 1321 if (value[last] == 'M' || value[last] == 'm') { 1322 shift_left_factor = 20; 1323 value[last] = '\0'; 1324 } 1325 if (value[last] == 'G' || value[last] == 'g') { 1326 shift_left_factor = 30; 1327 value[last] = '\0'; 1328 } 1329 1330 if (kstrtoint(value, base, &_res)) 1331 ret = -EINVAL; 1332 kfree(value); 1333 *res = _res << shift_left_factor; 1334 return ret; 1335 } 1336 1337 static int 1338 suffix_kstrtoull( 1339 const char *s, 1340 unsigned int base, 1341 unsigned long long *res) 1342 { 1343 int last, shift_left_factor = 0; 1344 unsigned long long _res; 1345 char *value; 1346 int ret = 0; 1347 1348 value = kstrdup(s, GFP_KERNEL); 1349 if (!value) 1350 return -ENOMEM; 1351 1352 last = strlen(value) - 1; 1353 if (value[last] == 'K' || value[last] == 'k') { 1354 shift_left_factor = 10; 1355 value[last] = '\0'; 1356 } 1357 if (value[last] == 'M' || value[last] == 'm') { 1358 shift_left_factor = 20; 1359 value[last] = '\0'; 1360 } 1361 if (value[last] == 'G' || value[last] == 'g') { 1362 shift_left_factor = 30; 1363 value[last] = '\0'; 1364 } 1365 1366 if (kstrtoull(value, base, &_res)) 1367 ret = -EINVAL; 1368 kfree(value); 1369 *res = _res << shift_left_factor; 1370 return ret; 1371 } 1372 1373 static inline void 1374 xfs_fs_warn_deprecated( 1375 struct fs_context *fc, 1376 struct fs_parameter *param, 1377 uint64_t flag, 1378 bool value) 1379 { 1380 /* Don't print the warning if reconfiguring and current mount point 1381 * already had the flag set 1382 */ 1383 if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) && 1384 !!(XFS_M(fc->root->d_sb)->m_features & flag) == value) 1385 return; 1386 xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key); 1387 } 1388 1389 /* 1390 * Set mount state from a mount option. 1391 * 1392 * NOTE: mp->m_super is NULL here! 1393 */ 1394 static int 1395 xfs_fs_parse_param( 1396 struct fs_context *fc, 1397 struct fs_parameter *param) 1398 { 1399 struct xfs_mount *parsing_mp = fc->s_fs_info; 1400 struct fs_parse_result result; 1401 int size = 0; 1402 int opt; 1403 1404 BUILD_BUG_ON(XFS_QFLAGS_MNTOPTS & XFS_MOUNT_QUOTA_ALL); 1405 1406 opt = fs_parse(fc, xfs_fs_parameters, param, &result); 1407 if (opt < 0) 1408 return opt; 1409 1410 switch (opt) { 1411 case Opt_logbufs: 1412 parsing_mp->m_logbufs = result.uint_32; 1413 return 0; 1414 case Opt_logbsize: 1415 if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize)) 1416 return -EINVAL; 1417 return 0; 1418 case Opt_logdev: 1419 kfree(parsing_mp->m_logname); 1420 parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL); 1421 if (!parsing_mp->m_logname) 1422 return -ENOMEM; 1423 return 0; 1424 case Opt_rtdev: 1425 kfree(parsing_mp->m_rtname); 1426 parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL); 1427 if (!parsing_mp->m_rtname) 1428 return -ENOMEM; 1429 return 0; 1430 case Opt_allocsize: 1431 if (suffix_kstrtoint(param->string, 10, &size)) 1432 return -EINVAL; 1433 parsing_mp->m_allocsize_log = ffs(size) - 1; 1434 parsing_mp->m_features |= XFS_FEAT_ALLOCSIZE; 1435 return 0; 1436 case Opt_grpid: 1437 case Opt_bsdgroups: 1438 parsing_mp->m_features |= XFS_FEAT_GRPID; 1439 return 0; 1440 case Opt_nogrpid: 1441 case Opt_sysvgroups: 1442 parsing_mp->m_features &= ~XFS_FEAT_GRPID; 1443 return 0; 1444 case Opt_wsync: 1445 parsing_mp->m_features |= XFS_FEAT_WSYNC; 1446 return 0; 1447 case Opt_norecovery: 1448 parsing_mp->m_features |= XFS_FEAT_NORECOVERY; 1449 return 0; 1450 case Opt_noalign: 1451 parsing_mp->m_features |= XFS_FEAT_NOALIGN; 1452 return 0; 1453 case Opt_swalloc: 1454 parsing_mp->m_features |= XFS_FEAT_SWALLOC; 1455 return 0; 1456 case Opt_sunit: 1457 parsing_mp->m_dalign = result.uint_32; 1458 return 0; 1459 case Opt_swidth: 1460 parsing_mp->m_swidth = result.uint_32; 1461 return 0; 1462 case Opt_inode32: 1463 parsing_mp->m_features |= XFS_FEAT_SMALL_INUMS; 1464 return 0; 1465 case Opt_inode64: 1466 parsing_mp->m_features &= ~XFS_FEAT_SMALL_INUMS; 1467 return 0; 1468 case Opt_nouuid: 1469 parsing_mp->m_features |= XFS_FEAT_NOUUID; 1470 return 0; 1471 case Opt_largeio: 1472 parsing_mp->m_features |= XFS_FEAT_LARGE_IOSIZE; 1473 return 0; 1474 case Opt_nolargeio: 1475 parsing_mp->m_features &= ~XFS_FEAT_LARGE_IOSIZE; 1476 return 0; 1477 case Opt_filestreams: 1478 parsing_mp->m_features |= XFS_FEAT_FILESTREAMS; 1479 return 0; 1480 case Opt_noquota: 1481 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT; 1482 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD; 1483 parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS; 1484 return 0; 1485 case Opt_quota: 1486 case Opt_uquota: 1487 case Opt_usrquota: 1488 parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD); 1489 parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS; 1490 return 0; 1491 case Opt_qnoenforce: 1492 case Opt_uqnoenforce: 1493 parsing_mp->m_qflags |= XFS_UQUOTA_ACCT; 1494 parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD; 1495 parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS; 1496 return 0; 1497 case Opt_pquota: 1498 case Opt_prjquota: 1499 parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD); 1500 parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS; 1501 return 0; 1502 case Opt_pqnoenforce: 1503 parsing_mp->m_qflags |= XFS_PQUOTA_ACCT; 1504 parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD; 1505 parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS; 1506 return 0; 1507 case Opt_gquota: 1508 case Opt_grpquota: 1509 parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD); 1510 parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS; 1511 return 0; 1512 case Opt_gqnoenforce: 1513 parsing_mp->m_qflags |= XFS_GQUOTA_ACCT; 1514 parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD; 1515 parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS; 1516 return 0; 1517 case Opt_discard: 1518 parsing_mp->m_features |= XFS_FEAT_DISCARD; 1519 return 0; 1520 case Opt_nodiscard: 1521 parsing_mp->m_features &= ~XFS_FEAT_DISCARD; 1522 return 0; 1523 #ifdef CONFIG_FS_DAX 1524 case Opt_dax: 1525 xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS); 1526 return 0; 1527 case Opt_dax_enum: 1528 xfs_mount_set_dax_mode(parsing_mp, result.uint_32); 1529 return 0; 1530 #endif 1531 /* Following mount options will be removed in September 2025 */ 1532 case Opt_max_open_zones: 1533 parsing_mp->m_max_open_zones = result.uint_32; 1534 return 0; 1535 case Opt_lifetime: 1536 parsing_mp->m_features &= ~XFS_FEAT_NOLIFETIME; 1537 return 0; 1538 case Opt_nolifetime: 1539 parsing_mp->m_features |= XFS_FEAT_NOLIFETIME; 1540 return 0; 1541 case Opt_max_atomic_write: 1542 if (suffix_kstrtoull(param->string, 10, 1543 &parsing_mp->m_awu_max_bytes)) { 1544 xfs_warn(parsing_mp, 1545 "max atomic write size must be positive integer"); 1546 return -EINVAL; 1547 } 1548 return 0; 1549 default: 1550 xfs_warn(parsing_mp, "unknown mount option [%s].", param->key); 1551 return -EINVAL; 1552 } 1553 1554 return 0; 1555 } 1556 1557 static int 1558 xfs_fs_validate_params( 1559 struct xfs_mount *mp) 1560 { 1561 /* No recovery flag requires a read-only mount */ 1562 if (xfs_has_norecovery(mp) && !xfs_is_readonly(mp)) { 1563 xfs_warn(mp, "no-recovery mounts must be read-only."); 1564 return -EINVAL; 1565 } 1566 1567 if (xfs_has_noalign(mp) && (mp->m_dalign || mp->m_swidth)) { 1568 xfs_warn(mp, 1569 "sunit and swidth options incompatible with the noalign option"); 1570 return -EINVAL; 1571 } 1572 1573 if (!IS_ENABLED(CONFIG_XFS_QUOTA) && 1574 (mp->m_qflags & ~XFS_QFLAGS_MNTOPTS)) { 1575 xfs_warn(mp, "quota support not available in this kernel."); 1576 return -EINVAL; 1577 } 1578 1579 if ((mp->m_dalign && !mp->m_swidth) || 1580 (!mp->m_dalign && mp->m_swidth)) { 1581 xfs_warn(mp, "sunit and swidth must be specified together"); 1582 return -EINVAL; 1583 } 1584 1585 if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) { 1586 xfs_warn(mp, 1587 "stripe width (%d) must be a multiple of the stripe unit (%d)", 1588 mp->m_swidth, mp->m_dalign); 1589 return -EINVAL; 1590 } 1591 1592 if (mp->m_logbufs != -1 && 1593 mp->m_logbufs != 0 && 1594 (mp->m_logbufs < XLOG_MIN_ICLOGS || 1595 mp->m_logbufs > XLOG_MAX_ICLOGS)) { 1596 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]", 1597 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS); 1598 return -EINVAL; 1599 } 1600 1601 if (mp->m_logbsize != -1 && 1602 mp->m_logbsize != 0 && 1603 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE || 1604 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE || 1605 !is_power_of_2(mp->m_logbsize))) { 1606 xfs_warn(mp, 1607 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]", 1608 mp->m_logbsize); 1609 return -EINVAL; 1610 } 1611 1612 if (xfs_has_allocsize(mp) && 1613 (mp->m_allocsize_log > XFS_MAX_IO_LOG || 1614 mp->m_allocsize_log < XFS_MIN_IO_LOG)) { 1615 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]", 1616 mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG); 1617 return -EINVAL; 1618 } 1619 1620 return 0; 1621 } 1622 1623 struct dentry * 1624 xfs_debugfs_mkdir( 1625 const char *name, 1626 struct dentry *parent) 1627 { 1628 struct dentry *child; 1629 1630 /* Apparently we're expected to ignore error returns?? */ 1631 child = debugfs_create_dir(name, parent); 1632 if (IS_ERR(child)) 1633 return NULL; 1634 1635 return child; 1636 } 1637 1638 static int 1639 xfs_fs_fill_super( 1640 struct super_block *sb, 1641 struct fs_context *fc) 1642 { 1643 struct xfs_mount *mp = sb->s_fs_info; 1644 struct inode *root; 1645 int flags = 0, error; 1646 1647 mp->m_super = sb; 1648 1649 /* 1650 * Copy VFS mount flags from the context now that all parameter parsing 1651 * is guaranteed to have been completed by either the old mount API or 1652 * the newer fsopen/fsconfig API. 1653 */ 1654 if (fc->sb_flags & SB_RDONLY) 1655 xfs_set_readonly(mp); 1656 if (fc->sb_flags & SB_DIRSYNC) 1657 mp->m_features |= XFS_FEAT_DIRSYNC; 1658 if (fc->sb_flags & SB_SYNCHRONOUS) 1659 mp->m_features |= XFS_FEAT_WSYNC; 1660 1661 error = xfs_fs_validate_params(mp); 1662 if (error) 1663 return error; 1664 1665 sb_min_blocksize(sb, BBSIZE); 1666 sb->s_xattr = xfs_xattr_handlers; 1667 sb->s_export_op = &xfs_export_operations; 1668 #ifdef CONFIG_XFS_QUOTA 1669 sb->s_qcop = &xfs_quotactl_operations; 1670 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ; 1671 #endif 1672 sb->s_op = &xfs_super_operations; 1673 1674 /* 1675 * Delay mount work if the debug hook is set. This is debug 1676 * instrumention to coordinate simulation of xfs mount failures with 1677 * VFS superblock operations 1678 */ 1679 if (xfs_globals.mount_delay) { 1680 xfs_notice(mp, "Delaying mount for %d seconds.", 1681 xfs_globals.mount_delay); 1682 msleep(xfs_globals.mount_delay * 1000); 1683 } 1684 1685 if (fc->sb_flags & SB_SILENT) 1686 flags |= XFS_MFSI_QUIET; 1687 1688 error = xfs_open_devices(mp); 1689 if (error) 1690 return error; 1691 1692 if (xfs_debugfs) { 1693 mp->m_debugfs = xfs_debugfs_mkdir(mp->m_super->s_id, 1694 xfs_debugfs); 1695 } else { 1696 mp->m_debugfs = NULL; 1697 } 1698 1699 error = xfs_init_mount_workqueues(mp); 1700 if (error) 1701 goto out_shutdown_devices; 1702 1703 error = xfs_init_percpu_counters(mp); 1704 if (error) 1705 goto out_destroy_workqueues; 1706 1707 error = xfs_inodegc_init_percpu(mp); 1708 if (error) 1709 goto out_destroy_counters; 1710 1711 /* Allocate stats memory before we do operations that might use it */ 1712 mp->m_stats.xs_stats = alloc_percpu(struct xfsstats); 1713 if (!mp->m_stats.xs_stats) { 1714 error = -ENOMEM; 1715 goto out_destroy_inodegc; 1716 } 1717 1718 error = xchk_mount_stats_alloc(mp); 1719 if (error) 1720 goto out_free_stats; 1721 1722 error = xfs_readsb(mp, flags); 1723 if (error) 1724 goto out_free_scrub_stats; 1725 1726 error = xfs_finish_flags(mp); 1727 if (error) 1728 goto out_free_sb; 1729 1730 error = xfs_setup_devices(mp); 1731 if (error) 1732 goto out_free_sb; 1733 1734 /* 1735 * V4 support is undergoing deprecation. 1736 * 1737 * Note: this has to use an open coded m_features check as xfs_has_crc 1738 * always returns false for !CONFIG_XFS_SUPPORT_V4. 1739 */ 1740 if (!(mp->m_features & XFS_FEAT_CRC)) { 1741 if (!IS_ENABLED(CONFIG_XFS_SUPPORT_V4)) { 1742 xfs_warn(mp, 1743 "Deprecated V4 format (crc=0) not supported by kernel."); 1744 error = -EINVAL; 1745 goto out_free_sb; 1746 } 1747 xfs_warn_once(mp, 1748 "Deprecated V4 format (crc=0) will not be supported after September 2030."); 1749 } 1750 1751 /* ASCII case insensitivity is undergoing deprecation. */ 1752 if (xfs_has_asciici(mp)) { 1753 #ifdef CONFIG_XFS_SUPPORT_ASCII_CI 1754 xfs_warn_once(mp, 1755 "Deprecated ASCII case-insensitivity feature (ascii-ci=1) will not be supported after September 2030."); 1756 #else 1757 xfs_warn(mp, 1758 "Deprecated ASCII case-insensitivity feature (ascii-ci=1) not supported by kernel."); 1759 error = -EINVAL; 1760 goto out_free_sb; 1761 #endif 1762 } 1763 1764 /* 1765 * Filesystem claims it needs repair, so refuse the mount unless 1766 * norecovery is also specified, in which case the filesystem can 1767 * be mounted with no risk of further damage. 1768 */ 1769 if (xfs_has_needsrepair(mp) && !xfs_has_norecovery(mp)) { 1770 xfs_warn(mp, "Filesystem needs repair. Please run xfs_repair."); 1771 error = -EFSCORRUPTED; 1772 goto out_free_sb; 1773 } 1774 1775 /* 1776 * Don't touch the filesystem if a user tool thinks it owns the primary 1777 * superblock. mkfs doesn't clear the flag from secondary supers, so 1778 * we don't check them at all. 1779 */ 1780 if (mp->m_sb.sb_inprogress) { 1781 xfs_warn(mp, "Offline file system operation in progress!"); 1782 error = -EFSCORRUPTED; 1783 goto out_free_sb; 1784 } 1785 1786 if (mp->m_sb.sb_blocksize > PAGE_SIZE) { 1787 size_t max_folio_size = mapping_max_folio_size_supported(); 1788 1789 if (!xfs_has_crc(mp)) { 1790 xfs_warn(mp, 1791 "V4 Filesystem with blocksize %d bytes. Only pagesize (%ld) or less is supported.", 1792 mp->m_sb.sb_blocksize, PAGE_SIZE); 1793 error = -ENOSYS; 1794 goto out_free_sb; 1795 } 1796 1797 if (mp->m_sb.sb_blocksize > max_folio_size) { 1798 xfs_warn(mp, 1799 "block size (%u bytes) not supported; Only block size (%zu) or less is supported", 1800 mp->m_sb.sb_blocksize, max_folio_size); 1801 error = -ENOSYS; 1802 goto out_free_sb; 1803 } 1804 1805 xfs_warn_experimental(mp, XFS_EXPERIMENTAL_LBS); 1806 } 1807 1808 /* Ensure this filesystem fits in the page cache limits */ 1809 if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) || 1810 xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) { 1811 xfs_warn(mp, 1812 "file system too large to be mounted on this system."); 1813 error = -EFBIG; 1814 goto out_free_sb; 1815 } 1816 1817 /* 1818 * XFS block mappings use 54 bits to store the logical block offset. 1819 * This should suffice to handle the maximum file size that the VFS 1820 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT 1821 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes 1822 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON 1823 * to check this assertion. 1824 * 1825 * Avoid integer overflow by comparing the maximum bmbt offset to the 1826 * maximum pagecache offset in units of fs blocks. 1827 */ 1828 if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) { 1829 xfs_warn(mp, 1830 "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!", 1831 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE), 1832 XFS_MAX_FILEOFF); 1833 error = -EINVAL; 1834 goto out_free_sb; 1835 } 1836 1837 error = xfs_rtmount_readsb(mp); 1838 if (error) 1839 goto out_free_sb; 1840 1841 error = xfs_filestream_mount(mp); 1842 if (error) 1843 goto out_free_rtsb; 1844 1845 /* 1846 * we must configure the block size in the superblock before we run the 1847 * full mount process as the mount process can lookup and cache inodes. 1848 */ 1849 sb->s_magic = XFS_SUPER_MAGIC; 1850 sb->s_blocksize = mp->m_sb.sb_blocksize; 1851 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; 1852 sb->s_maxbytes = MAX_LFS_FILESIZE; 1853 sb->s_max_links = XFS_MAXLINK; 1854 sb->s_time_gran = 1; 1855 if (xfs_has_bigtime(mp)) { 1856 sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN); 1857 sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX); 1858 } else { 1859 sb->s_time_min = XFS_LEGACY_TIME_MIN; 1860 sb->s_time_max = XFS_LEGACY_TIME_MAX; 1861 } 1862 trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max); 1863 sb->s_iflags |= SB_I_CGROUPWB | SB_I_ALLOW_HSM; 1864 1865 set_posix_acl_flag(sb); 1866 1867 /* version 5 superblocks support inode version counters. */ 1868 if (xfs_has_crc(mp)) 1869 sb->s_flags |= SB_I_VERSION; 1870 1871 if (xfs_has_dax_always(mp)) { 1872 error = xfs_setup_dax_always(mp); 1873 if (error) 1874 goto out_filestream_unmount; 1875 } 1876 1877 if (xfs_has_discard(mp) && !bdev_max_discard_sectors(sb->s_bdev)) { 1878 xfs_warn(mp, 1879 "mounting with \"discard\" option, but the device does not support discard"); 1880 mp->m_features &= ~XFS_FEAT_DISCARD; 1881 } 1882 1883 if (xfs_has_zoned(mp)) { 1884 if (!xfs_has_metadir(mp)) { 1885 xfs_alert(mp, 1886 "metadir feature required for zoned realtime devices."); 1887 error = -EINVAL; 1888 goto out_filestream_unmount; 1889 } 1890 xfs_warn_experimental(mp, XFS_EXPERIMENTAL_ZONED); 1891 } else if (xfs_has_metadir(mp)) { 1892 xfs_warn_experimental(mp, XFS_EXPERIMENTAL_METADIR); 1893 } 1894 1895 if (xfs_has_reflink(mp)) { 1896 if (xfs_has_realtime(mp) && 1897 !xfs_reflink_supports_rextsize(mp, mp->m_sb.sb_rextsize)) { 1898 xfs_alert(mp, 1899 "reflink not compatible with realtime extent size %u!", 1900 mp->m_sb.sb_rextsize); 1901 error = -EINVAL; 1902 goto out_filestream_unmount; 1903 } 1904 1905 if (xfs_has_zoned(mp)) { 1906 xfs_alert(mp, 1907 "reflink not compatible with zoned RT device!"); 1908 error = -EINVAL; 1909 goto out_filestream_unmount; 1910 } 1911 1912 if (xfs_globals.always_cow) { 1913 xfs_info(mp, "using DEBUG-only always_cow mode."); 1914 mp->m_always_cow = true; 1915 } 1916 } 1917 1918 /* 1919 * If no quota mount options were provided, maybe we'll try to pick 1920 * up the quota accounting and enforcement flags from the ondisk sb. 1921 */ 1922 if (!(mp->m_qflags & XFS_QFLAGS_MNTOPTS)) 1923 xfs_set_resuming_quotaon(mp); 1924 mp->m_qflags &= ~XFS_QFLAGS_MNTOPTS; 1925 1926 error = xfs_mountfs(mp); 1927 if (error) 1928 goto out_filestream_unmount; 1929 1930 root = igrab(VFS_I(mp->m_rootip)); 1931 if (!root) { 1932 error = -ENOENT; 1933 goto out_unmount; 1934 } 1935 sb->s_root = d_make_root(root); 1936 if (!sb->s_root) { 1937 error = -ENOMEM; 1938 goto out_unmount; 1939 } 1940 1941 return 0; 1942 1943 out_filestream_unmount: 1944 xfs_filestream_unmount(mp); 1945 out_free_rtsb: 1946 xfs_rtmount_freesb(mp); 1947 out_free_sb: 1948 xfs_freesb(mp); 1949 out_free_scrub_stats: 1950 xchk_mount_stats_free(mp); 1951 out_free_stats: 1952 free_percpu(mp->m_stats.xs_stats); 1953 out_destroy_inodegc: 1954 xfs_inodegc_free_percpu(mp); 1955 out_destroy_counters: 1956 xfs_destroy_percpu_counters(mp); 1957 out_destroy_workqueues: 1958 xfs_destroy_mount_workqueues(mp); 1959 out_shutdown_devices: 1960 xfs_shutdown_devices(mp); 1961 return error; 1962 1963 out_unmount: 1964 xfs_filestream_unmount(mp); 1965 xfs_unmountfs(mp); 1966 goto out_free_rtsb; 1967 } 1968 1969 static int 1970 xfs_fs_get_tree( 1971 struct fs_context *fc) 1972 { 1973 return get_tree_bdev(fc, xfs_fs_fill_super); 1974 } 1975 1976 static int 1977 xfs_remount_rw( 1978 struct xfs_mount *mp) 1979 { 1980 struct xfs_sb *sbp = &mp->m_sb; 1981 int error; 1982 1983 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp && 1984 xfs_readonly_buftarg(mp->m_logdev_targp)) { 1985 xfs_warn(mp, 1986 "ro->rw transition prohibited by read-only logdev"); 1987 return -EACCES; 1988 } 1989 1990 if (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp)) { 1991 xfs_warn(mp, 1992 "ro->rw transition prohibited by read-only rtdev"); 1993 return -EACCES; 1994 } 1995 1996 if (xfs_has_norecovery(mp)) { 1997 xfs_warn(mp, 1998 "ro->rw transition prohibited on norecovery mount"); 1999 return -EINVAL; 2000 } 2001 2002 if (xfs_sb_is_v5(sbp) && 2003 xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) { 2004 xfs_warn(mp, 2005 "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem", 2006 (sbp->sb_features_ro_compat & 2007 XFS_SB_FEAT_RO_COMPAT_UNKNOWN)); 2008 return -EINVAL; 2009 } 2010 2011 xfs_clear_readonly(mp); 2012 2013 /* 2014 * If this is the first remount to writeable state we might have some 2015 * superblock changes to update. 2016 */ 2017 if (mp->m_update_sb) { 2018 error = xfs_sync_sb(mp, false); 2019 if (error) { 2020 xfs_warn(mp, "failed to write sb changes"); 2021 return error; 2022 } 2023 mp->m_update_sb = false; 2024 } 2025 2026 /* 2027 * Fill out the reserve pool if it is empty. Use the stashed value if 2028 * it is non-zero, otherwise go with the default. 2029 */ 2030 xfs_restore_resvblks(mp); 2031 xfs_log_work_queue(mp); 2032 xfs_blockgc_start(mp); 2033 2034 /* Create the per-AG metadata reservation pool .*/ 2035 error = xfs_fs_reserve_ag_blocks(mp); 2036 if (error && error != -ENOSPC) 2037 return error; 2038 2039 /* Re-enable the background inode inactivation worker. */ 2040 xfs_inodegc_start(mp); 2041 2042 /* Restart zone reclaim */ 2043 xfs_zone_gc_start(mp); 2044 2045 return 0; 2046 } 2047 2048 static int 2049 xfs_remount_ro( 2050 struct xfs_mount *mp) 2051 { 2052 struct xfs_icwalk icw = { 2053 .icw_flags = XFS_ICWALK_FLAG_SYNC, 2054 }; 2055 int error; 2056 2057 /* Flush all the dirty data to disk. */ 2058 error = sync_filesystem(mp->m_super); 2059 if (error) 2060 return error; 2061 2062 /* 2063 * Cancel background eofb scanning so it cannot race with the final 2064 * log force+buftarg wait and deadlock the remount. 2065 */ 2066 xfs_blockgc_stop(mp); 2067 2068 /* 2069 * Clear out all remaining COW staging extents and speculative post-EOF 2070 * preallocations so that we don't leave inodes requiring inactivation 2071 * cleanups during reclaim on a read-only mount. We must process every 2072 * cached inode, so this requires a synchronous cache scan. 2073 */ 2074 error = xfs_blockgc_free_space(mp, &icw); 2075 if (error) { 2076 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 2077 return error; 2078 } 2079 2080 /* 2081 * Stop the inodegc background worker. xfs_fs_reconfigure already 2082 * flushed all pending inodegc work when it sync'd the filesystem. 2083 * The VFS holds s_umount, so we know that inodes cannot enter 2084 * xfs_fs_destroy_inode during a remount operation. In readonly mode 2085 * we send inodes straight to reclaim, so no inodes will be queued. 2086 */ 2087 xfs_inodegc_stop(mp); 2088 2089 /* Stop zone reclaim */ 2090 xfs_zone_gc_stop(mp); 2091 2092 /* Free the per-AG metadata reservation pool. */ 2093 xfs_fs_unreserve_ag_blocks(mp); 2094 2095 /* 2096 * Before we sync the metadata, we need to free up the reserve block 2097 * pool so that the used block count in the superblock on disk is 2098 * correct at the end of the remount. Stash the current* reserve pool 2099 * size so that if we get remounted rw, we can return it to the same 2100 * size. 2101 */ 2102 xfs_save_resvblks(mp); 2103 2104 xfs_log_clean(mp); 2105 xfs_set_readonly(mp); 2106 2107 return 0; 2108 } 2109 2110 /* 2111 * Logically we would return an error here to prevent users from believing 2112 * they might have changed mount options using remount which can't be changed. 2113 * 2114 * But unfortunately mount(8) adds all options from mtab and fstab to the mount 2115 * arguments in some cases so we can't blindly reject options, but have to 2116 * check for each specified option if it actually differs from the currently 2117 * set option and only reject it if that's the case. 2118 * 2119 * Until that is implemented we return success for every remount request, and 2120 * silently ignore all options that we can't actually change. 2121 */ 2122 static int 2123 xfs_fs_reconfigure( 2124 struct fs_context *fc) 2125 { 2126 struct xfs_mount *mp = XFS_M(fc->root->d_sb); 2127 struct xfs_mount *new_mp = fc->s_fs_info; 2128 int flags = fc->sb_flags; 2129 int error; 2130 2131 new_mp->m_qflags &= ~XFS_QFLAGS_MNTOPTS; 2132 2133 /* version 5 superblocks always support version counters. */ 2134 if (xfs_has_crc(mp)) 2135 fc->sb_flags |= SB_I_VERSION; 2136 2137 error = xfs_fs_validate_params(new_mp); 2138 if (error) 2139 return error; 2140 2141 /* Validate new max_atomic_write option before making other changes */ 2142 if (mp->m_awu_max_bytes != new_mp->m_awu_max_bytes) { 2143 error = xfs_set_max_atomic_write_opt(mp, 2144 new_mp->m_awu_max_bytes); 2145 if (error) 2146 return error; 2147 } 2148 2149 /* inode32 -> inode64 */ 2150 if (xfs_has_small_inums(mp) && !xfs_has_small_inums(new_mp)) { 2151 mp->m_features &= ~XFS_FEAT_SMALL_INUMS; 2152 mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount); 2153 } 2154 2155 /* inode64 -> inode32 */ 2156 if (!xfs_has_small_inums(mp) && xfs_has_small_inums(new_mp)) { 2157 mp->m_features |= XFS_FEAT_SMALL_INUMS; 2158 mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount); 2159 } 2160 2161 /* 2162 * Now that mp has been modified according to the remount options, we 2163 * do a final option validation with xfs_finish_flags() just like it is 2164 * just like it is done during mount. We cannot use 2165 * done during mount. We cannot use xfs_finish_flags() on new_mp as it 2166 * contains only the user given options. 2167 */ 2168 error = xfs_finish_flags(mp); 2169 if (error) 2170 return error; 2171 2172 /* ro -> rw */ 2173 if (xfs_is_readonly(mp) && !(flags & SB_RDONLY)) { 2174 error = xfs_remount_rw(mp); 2175 if (error) 2176 return error; 2177 } 2178 2179 /* rw -> ro */ 2180 if (!xfs_is_readonly(mp) && (flags & SB_RDONLY)) { 2181 error = xfs_remount_ro(mp); 2182 if (error) 2183 return error; 2184 } 2185 2186 return 0; 2187 } 2188 2189 static void 2190 xfs_fs_free( 2191 struct fs_context *fc) 2192 { 2193 struct xfs_mount *mp = fc->s_fs_info; 2194 2195 /* 2196 * mp is stored in the fs_context when it is initialized. 2197 * mp is transferred to the superblock on a successful mount, 2198 * but if an error occurs before the transfer we have to free 2199 * it here. 2200 */ 2201 if (mp) 2202 xfs_mount_free(mp); 2203 } 2204 2205 static const struct fs_context_operations xfs_context_ops = { 2206 .parse_param = xfs_fs_parse_param, 2207 .get_tree = xfs_fs_get_tree, 2208 .reconfigure = xfs_fs_reconfigure, 2209 .free = xfs_fs_free, 2210 }; 2211 2212 /* 2213 * WARNING: do not initialise any parameters in this function that depend on 2214 * mount option parsing having already been performed as this can be called from 2215 * fsopen() before any parameters have been set. 2216 */ 2217 static int 2218 xfs_init_fs_context( 2219 struct fs_context *fc) 2220 { 2221 struct xfs_mount *mp; 2222 int i; 2223 2224 mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL | __GFP_NOFAIL); 2225 if (!mp) 2226 return -ENOMEM; 2227 2228 spin_lock_init(&mp->m_sb_lock); 2229 for (i = 0; i < XG_TYPE_MAX; i++) 2230 xa_init(&mp->m_groups[i].xa); 2231 mutex_init(&mp->m_growlock); 2232 mutex_init(&mp->m_metafile_resv_lock); 2233 INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker); 2234 INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker); 2235 mp->m_kobj.kobject.kset = xfs_kset; 2236 /* 2237 * We don't create the finobt per-ag space reservation until after log 2238 * recovery, so we must set this to true so that an ifree transaction 2239 * started during log recovery will not depend on space reservations 2240 * for finobt expansion. 2241 */ 2242 mp->m_finobt_nores = true; 2243 2244 /* 2245 * These can be overridden by the mount option parsing. 2246 */ 2247 mp->m_logbufs = -1; 2248 mp->m_logbsize = -1; 2249 mp->m_allocsize_log = 16; /* 64k */ 2250 2251 xfs_hooks_init(&mp->m_dir_update_hooks); 2252 2253 fc->s_fs_info = mp; 2254 fc->ops = &xfs_context_ops; 2255 2256 return 0; 2257 } 2258 2259 static void 2260 xfs_kill_sb( 2261 struct super_block *sb) 2262 { 2263 kill_block_super(sb); 2264 xfs_mount_free(XFS_M(sb)); 2265 } 2266 2267 static struct file_system_type xfs_fs_type = { 2268 .owner = THIS_MODULE, 2269 .name = "xfs", 2270 .init_fs_context = xfs_init_fs_context, 2271 .parameters = xfs_fs_parameters, 2272 .kill_sb = xfs_kill_sb, 2273 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP | FS_MGTIME | 2274 FS_LBS, 2275 }; 2276 MODULE_ALIAS_FS("xfs"); 2277 2278 STATIC int __init 2279 xfs_init_caches(void) 2280 { 2281 int error; 2282 2283 xfs_buf_cache = kmem_cache_create("xfs_buf", sizeof(struct xfs_buf), 0, 2284 SLAB_HWCACHE_ALIGN | 2285 SLAB_RECLAIM_ACCOUNT, 2286 NULL); 2287 if (!xfs_buf_cache) 2288 goto out; 2289 2290 xfs_log_ticket_cache = kmem_cache_create("xfs_log_ticket", 2291 sizeof(struct xlog_ticket), 2292 0, 0, NULL); 2293 if (!xfs_log_ticket_cache) 2294 goto out_destroy_buf_cache; 2295 2296 error = xfs_btree_init_cur_caches(); 2297 if (error) 2298 goto out_destroy_log_ticket_cache; 2299 2300 error = rcbagbt_init_cur_cache(); 2301 if (error) 2302 goto out_destroy_btree_cur_cache; 2303 2304 error = xfs_defer_init_item_caches(); 2305 if (error) 2306 goto out_destroy_rcbagbt_cur_cache; 2307 2308 xfs_da_state_cache = kmem_cache_create("xfs_da_state", 2309 sizeof(struct xfs_da_state), 2310 0, 0, NULL); 2311 if (!xfs_da_state_cache) 2312 goto out_destroy_defer_item_cache; 2313 2314 xfs_ifork_cache = kmem_cache_create("xfs_ifork", 2315 sizeof(struct xfs_ifork), 2316 0, 0, NULL); 2317 if (!xfs_ifork_cache) 2318 goto out_destroy_da_state_cache; 2319 2320 xfs_trans_cache = kmem_cache_create("xfs_trans", 2321 sizeof(struct xfs_trans), 2322 0, 0, NULL); 2323 if (!xfs_trans_cache) 2324 goto out_destroy_ifork_cache; 2325 2326 2327 /* 2328 * The size of the cache-allocated buf log item is the maximum 2329 * size possible under XFS. This wastes a little bit of memory, 2330 * but it is much faster. 2331 */ 2332 xfs_buf_item_cache = kmem_cache_create("xfs_buf_item", 2333 sizeof(struct xfs_buf_log_item), 2334 0, 0, NULL); 2335 if (!xfs_buf_item_cache) 2336 goto out_destroy_trans_cache; 2337 2338 xfs_efd_cache = kmem_cache_create("xfs_efd_item", 2339 xfs_efd_log_item_sizeof(XFS_EFD_MAX_FAST_EXTENTS), 2340 0, 0, NULL); 2341 if (!xfs_efd_cache) 2342 goto out_destroy_buf_item_cache; 2343 2344 xfs_efi_cache = kmem_cache_create("xfs_efi_item", 2345 xfs_efi_log_item_sizeof(XFS_EFI_MAX_FAST_EXTENTS), 2346 0, 0, NULL); 2347 if (!xfs_efi_cache) 2348 goto out_destroy_efd_cache; 2349 2350 xfs_inode_cache = kmem_cache_create("xfs_inode", 2351 sizeof(struct xfs_inode), 0, 2352 (SLAB_HWCACHE_ALIGN | 2353 SLAB_RECLAIM_ACCOUNT | 2354 SLAB_ACCOUNT), 2355 xfs_fs_inode_init_once); 2356 if (!xfs_inode_cache) 2357 goto out_destroy_efi_cache; 2358 2359 xfs_ili_cache = kmem_cache_create("xfs_ili", 2360 sizeof(struct xfs_inode_log_item), 0, 2361 SLAB_RECLAIM_ACCOUNT, 2362 NULL); 2363 if (!xfs_ili_cache) 2364 goto out_destroy_inode_cache; 2365 2366 xfs_icreate_cache = kmem_cache_create("xfs_icr", 2367 sizeof(struct xfs_icreate_item), 2368 0, 0, NULL); 2369 if (!xfs_icreate_cache) 2370 goto out_destroy_ili_cache; 2371 2372 xfs_rud_cache = kmem_cache_create("xfs_rud_item", 2373 sizeof(struct xfs_rud_log_item), 2374 0, 0, NULL); 2375 if (!xfs_rud_cache) 2376 goto out_destroy_icreate_cache; 2377 2378 xfs_rui_cache = kmem_cache_create("xfs_rui_item", 2379 xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS), 2380 0, 0, NULL); 2381 if (!xfs_rui_cache) 2382 goto out_destroy_rud_cache; 2383 2384 xfs_cud_cache = kmem_cache_create("xfs_cud_item", 2385 sizeof(struct xfs_cud_log_item), 2386 0, 0, NULL); 2387 if (!xfs_cud_cache) 2388 goto out_destroy_rui_cache; 2389 2390 xfs_cui_cache = kmem_cache_create("xfs_cui_item", 2391 xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS), 2392 0, 0, NULL); 2393 if (!xfs_cui_cache) 2394 goto out_destroy_cud_cache; 2395 2396 xfs_bud_cache = kmem_cache_create("xfs_bud_item", 2397 sizeof(struct xfs_bud_log_item), 2398 0, 0, NULL); 2399 if (!xfs_bud_cache) 2400 goto out_destroy_cui_cache; 2401 2402 xfs_bui_cache = kmem_cache_create("xfs_bui_item", 2403 xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS), 2404 0, 0, NULL); 2405 if (!xfs_bui_cache) 2406 goto out_destroy_bud_cache; 2407 2408 xfs_attrd_cache = kmem_cache_create("xfs_attrd_item", 2409 sizeof(struct xfs_attrd_log_item), 2410 0, 0, NULL); 2411 if (!xfs_attrd_cache) 2412 goto out_destroy_bui_cache; 2413 2414 xfs_attri_cache = kmem_cache_create("xfs_attri_item", 2415 sizeof(struct xfs_attri_log_item), 2416 0, 0, NULL); 2417 if (!xfs_attri_cache) 2418 goto out_destroy_attrd_cache; 2419 2420 xfs_iunlink_cache = kmem_cache_create("xfs_iul_item", 2421 sizeof(struct xfs_iunlink_item), 2422 0, 0, NULL); 2423 if (!xfs_iunlink_cache) 2424 goto out_destroy_attri_cache; 2425 2426 xfs_xmd_cache = kmem_cache_create("xfs_xmd_item", 2427 sizeof(struct xfs_xmd_log_item), 2428 0, 0, NULL); 2429 if (!xfs_xmd_cache) 2430 goto out_destroy_iul_cache; 2431 2432 xfs_xmi_cache = kmem_cache_create("xfs_xmi_item", 2433 sizeof(struct xfs_xmi_log_item), 2434 0, 0, NULL); 2435 if (!xfs_xmi_cache) 2436 goto out_destroy_xmd_cache; 2437 2438 xfs_parent_args_cache = kmem_cache_create("xfs_parent_args", 2439 sizeof(struct xfs_parent_args), 2440 0, 0, NULL); 2441 if (!xfs_parent_args_cache) 2442 goto out_destroy_xmi_cache; 2443 2444 return 0; 2445 2446 out_destroy_xmi_cache: 2447 kmem_cache_destroy(xfs_xmi_cache); 2448 out_destroy_xmd_cache: 2449 kmem_cache_destroy(xfs_xmd_cache); 2450 out_destroy_iul_cache: 2451 kmem_cache_destroy(xfs_iunlink_cache); 2452 out_destroy_attri_cache: 2453 kmem_cache_destroy(xfs_attri_cache); 2454 out_destroy_attrd_cache: 2455 kmem_cache_destroy(xfs_attrd_cache); 2456 out_destroy_bui_cache: 2457 kmem_cache_destroy(xfs_bui_cache); 2458 out_destroy_bud_cache: 2459 kmem_cache_destroy(xfs_bud_cache); 2460 out_destroy_cui_cache: 2461 kmem_cache_destroy(xfs_cui_cache); 2462 out_destroy_cud_cache: 2463 kmem_cache_destroy(xfs_cud_cache); 2464 out_destroy_rui_cache: 2465 kmem_cache_destroy(xfs_rui_cache); 2466 out_destroy_rud_cache: 2467 kmem_cache_destroy(xfs_rud_cache); 2468 out_destroy_icreate_cache: 2469 kmem_cache_destroy(xfs_icreate_cache); 2470 out_destroy_ili_cache: 2471 kmem_cache_destroy(xfs_ili_cache); 2472 out_destroy_inode_cache: 2473 kmem_cache_destroy(xfs_inode_cache); 2474 out_destroy_efi_cache: 2475 kmem_cache_destroy(xfs_efi_cache); 2476 out_destroy_efd_cache: 2477 kmem_cache_destroy(xfs_efd_cache); 2478 out_destroy_buf_item_cache: 2479 kmem_cache_destroy(xfs_buf_item_cache); 2480 out_destroy_trans_cache: 2481 kmem_cache_destroy(xfs_trans_cache); 2482 out_destroy_ifork_cache: 2483 kmem_cache_destroy(xfs_ifork_cache); 2484 out_destroy_da_state_cache: 2485 kmem_cache_destroy(xfs_da_state_cache); 2486 out_destroy_defer_item_cache: 2487 xfs_defer_destroy_item_caches(); 2488 out_destroy_rcbagbt_cur_cache: 2489 rcbagbt_destroy_cur_cache(); 2490 out_destroy_btree_cur_cache: 2491 xfs_btree_destroy_cur_caches(); 2492 out_destroy_log_ticket_cache: 2493 kmem_cache_destroy(xfs_log_ticket_cache); 2494 out_destroy_buf_cache: 2495 kmem_cache_destroy(xfs_buf_cache); 2496 out: 2497 return -ENOMEM; 2498 } 2499 2500 STATIC void 2501 xfs_destroy_caches(void) 2502 { 2503 /* 2504 * Make sure all delayed rcu free are flushed before we 2505 * destroy caches. 2506 */ 2507 rcu_barrier(); 2508 kmem_cache_destroy(xfs_parent_args_cache); 2509 kmem_cache_destroy(xfs_xmd_cache); 2510 kmem_cache_destroy(xfs_xmi_cache); 2511 kmem_cache_destroy(xfs_iunlink_cache); 2512 kmem_cache_destroy(xfs_attri_cache); 2513 kmem_cache_destroy(xfs_attrd_cache); 2514 kmem_cache_destroy(xfs_bui_cache); 2515 kmem_cache_destroy(xfs_bud_cache); 2516 kmem_cache_destroy(xfs_cui_cache); 2517 kmem_cache_destroy(xfs_cud_cache); 2518 kmem_cache_destroy(xfs_rui_cache); 2519 kmem_cache_destroy(xfs_rud_cache); 2520 kmem_cache_destroy(xfs_icreate_cache); 2521 kmem_cache_destroy(xfs_ili_cache); 2522 kmem_cache_destroy(xfs_inode_cache); 2523 kmem_cache_destroy(xfs_efi_cache); 2524 kmem_cache_destroy(xfs_efd_cache); 2525 kmem_cache_destroy(xfs_buf_item_cache); 2526 kmem_cache_destroy(xfs_trans_cache); 2527 kmem_cache_destroy(xfs_ifork_cache); 2528 kmem_cache_destroy(xfs_da_state_cache); 2529 xfs_defer_destroy_item_caches(); 2530 rcbagbt_destroy_cur_cache(); 2531 xfs_btree_destroy_cur_caches(); 2532 kmem_cache_destroy(xfs_log_ticket_cache); 2533 kmem_cache_destroy(xfs_buf_cache); 2534 } 2535 2536 STATIC int __init 2537 xfs_init_workqueues(void) 2538 { 2539 /* 2540 * The allocation workqueue can be used in memory reclaim situations 2541 * (writepage path), and parallelism is only limited by the number of 2542 * AGs in all the filesystems mounted. Hence use the default large 2543 * max_active value for this workqueue. 2544 */ 2545 xfs_alloc_wq = alloc_workqueue("xfsalloc", XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_PERCPU), 2546 0); 2547 if (!xfs_alloc_wq) 2548 return -ENOMEM; 2549 2550 xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND), 2551 0); 2552 if (!xfs_discard_wq) 2553 goto out_free_alloc_wq; 2554 2555 return 0; 2556 out_free_alloc_wq: 2557 destroy_workqueue(xfs_alloc_wq); 2558 return -ENOMEM; 2559 } 2560 2561 STATIC void 2562 xfs_destroy_workqueues(void) 2563 { 2564 destroy_workqueue(xfs_discard_wq); 2565 destroy_workqueue(xfs_alloc_wq); 2566 } 2567 2568 STATIC int __init 2569 init_xfs_fs(void) 2570 { 2571 int error; 2572 2573 xfs_check_ondisk_structs(); 2574 2575 error = xfs_dahash_test(); 2576 if (error) 2577 return error; 2578 2579 printk(KERN_INFO XFS_VERSION_STRING " with " 2580 XFS_BUILD_OPTIONS " enabled\n"); 2581 2582 xfs_dir_startup(); 2583 2584 error = xfs_init_caches(); 2585 if (error) 2586 goto out; 2587 2588 error = xfs_init_workqueues(); 2589 if (error) 2590 goto out_destroy_caches; 2591 2592 error = xfs_mru_cache_init(); 2593 if (error) 2594 goto out_destroy_wq; 2595 2596 error = xfs_init_procfs(); 2597 if (error) 2598 goto out_mru_cache_uninit; 2599 2600 error = xfs_sysctl_register(); 2601 if (error) 2602 goto out_cleanup_procfs; 2603 2604 xfs_debugfs = xfs_debugfs_mkdir("xfs", NULL); 2605 2606 xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj); 2607 if (!xfs_kset) { 2608 error = -ENOMEM; 2609 goto out_debugfs_unregister; 2610 } 2611 2612 xfsstats.xs_kobj.kobject.kset = xfs_kset; 2613 2614 xfsstats.xs_stats = alloc_percpu(struct xfsstats); 2615 if (!xfsstats.xs_stats) { 2616 error = -ENOMEM; 2617 goto out_kset_unregister; 2618 } 2619 2620 error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL, 2621 "stats"); 2622 if (error) 2623 goto out_free_stats; 2624 2625 error = xchk_global_stats_setup(xfs_debugfs); 2626 if (error) 2627 goto out_remove_stats_kobj; 2628 2629 #ifdef DEBUG 2630 xfs_dbg_kobj.kobject.kset = xfs_kset; 2631 error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug"); 2632 if (error) 2633 goto out_remove_scrub_stats; 2634 #endif 2635 2636 error = xfs_qm_init(); 2637 if (error) 2638 goto out_remove_dbg_kobj; 2639 2640 error = register_filesystem(&xfs_fs_type); 2641 if (error) 2642 goto out_qm_exit; 2643 return 0; 2644 2645 out_qm_exit: 2646 xfs_qm_exit(); 2647 out_remove_dbg_kobj: 2648 #ifdef DEBUG 2649 xfs_sysfs_del(&xfs_dbg_kobj); 2650 out_remove_scrub_stats: 2651 #endif 2652 xchk_global_stats_teardown(); 2653 out_remove_stats_kobj: 2654 xfs_sysfs_del(&xfsstats.xs_kobj); 2655 out_free_stats: 2656 free_percpu(xfsstats.xs_stats); 2657 out_kset_unregister: 2658 kset_unregister(xfs_kset); 2659 out_debugfs_unregister: 2660 debugfs_remove(xfs_debugfs); 2661 xfs_sysctl_unregister(); 2662 out_cleanup_procfs: 2663 xfs_cleanup_procfs(); 2664 out_mru_cache_uninit: 2665 xfs_mru_cache_uninit(); 2666 out_destroy_wq: 2667 xfs_destroy_workqueues(); 2668 out_destroy_caches: 2669 xfs_destroy_caches(); 2670 out: 2671 return error; 2672 } 2673 2674 STATIC void __exit 2675 exit_xfs_fs(void) 2676 { 2677 xfs_qm_exit(); 2678 unregister_filesystem(&xfs_fs_type); 2679 #ifdef DEBUG 2680 xfs_sysfs_del(&xfs_dbg_kobj); 2681 #endif 2682 xchk_global_stats_teardown(); 2683 xfs_sysfs_del(&xfsstats.xs_kobj); 2684 free_percpu(xfsstats.xs_stats); 2685 kset_unregister(xfs_kset); 2686 debugfs_remove(xfs_debugfs); 2687 xfs_sysctl_unregister(); 2688 xfs_cleanup_procfs(); 2689 xfs_mru_cache_uninit(); 2690 xfs_destroy_workqueues(); 2691 xfs_destroy_caches(); 2692 xfs_uuid_table_free(); 2693 } 2694 2695 module_init(init_xfs_fs); 2696 module_exit(exit_xfs_fs); 2697 2698 MODULE_AUTHOR("Silicon Graphics, Inc."); 2699 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled"); 2700 MODULE_LICENSE("GPL"); 2701