1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 7 #include "xfs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_sb.h" 13 #include "xfs_mount.h" 14 #include "xfs_inode.h" 15 #include "xfs_btree.h" 16 #include "xfs_bmap.h" 17 #include "xfs_alloc.h" 18 #include "xfs_fsops.h" 19 #include "xfs_trans.h" 20 #include "xfs_buf_item.h" 21 #include "xfs_log.h" 22 #include "xfs_log_priv.h" 23 #include "xfs_dir2.h" 24 #include "xfs_extfree_item.h" 25 #include "xfs_mru_cache.h" 26 #include "xfs_inode_item.h" 27 #include "xfs_icache.h" 28 #include "xfs_trace.h" 29 #include "xfs_icreate_item.h" 30 #include "xfs_filestream.h" 31 #include "xfs_quota.h" 32 #include "xfs_sysfs.h" 33 #include "xfs_ondisk.h" 34 #include "xfs_rmap_item.h" 35 #include "xfs_refcount_item.h" 36 #include "xfs_bmap_item.h" 37 #include "xfs_reflink.h" 38 #include "xfs_pwork.h" 39 #include "xfs_ag.h" 40 #include "xfs_defer.h" 41 #include "xfs_attr_item.h" 42 #include "xfs_xattr.h" 43 #include "xfs_iunlink_item.h" 44 45 #include <linux/magic.h> 46 #include <linux/fs_context.h> 47 #include <linux/fs_parser.h> 48 49 static const struct super_operations xfs_super_operations; 50 51 static struct kset *xfs_kset; /* top-level xfs sysfs dir */ 52 #ifdef DEBUG 53 static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */ 54 #endif 55 56 #ifdef CONFIG_HOTPLUG_CPU 57 static LIST_HEAD(xfs_mount_list); 58 static DEFINE_SPINLOCK(xfs_mount_list_lock); 59 60 static inline void xfs_mount_list_add(struct xfs_mount *mp) 61 { 62 spin_lock(&xfs_mount_list_lock); 63 list_add(&mp->m_mount_list, &xfs_mount_list); 64 spin_unlock(&xfs_mount_list_lock); 65 } 66 67 static inline void xfs_mount_list_del(struct xfs_mount *mp) 68 { 69 spin_lock(&xfs_mount_list_lock); 70 list_del(&mp->m_mount_list); 71 spin_unlock(&xfs_mount_list_lock); 72 } 73 #else /* !CONFIG_HOTPLUG_CPU */ 74 static inline void xfs_mount_list_add(struct xfs_mount *mp) {} 75 static inline void xfs_mount_list_del(struct xfs_mount *mp) {} 76 #endif 77 78 enum xfs_dax_mode { 79 XFS_DAX_INODE = 0, 80 XFS_DAX_ALWAYS = 1, 81 XFS_DAX_NEVER = 2, 82 }; 83 84 static void 85 xfs_mount_set_dax_mode( 86 struct xfs_mount *mp, 87 enum xfs_dax_mode mode) 88 { 89 switch (mode) { 90 case XFS_DAX_INODE: 91 mp->m_features &= ~(XFS_FEAT_DAX_ALWAYS | XFS_FEAT_DAX_NEVER); 92 break; 93 case XFS_DAX_ALWAYS: 94 mp->m_features |= XFS_FEAT_DAX_ALWAYS; 95 mp->m_features &= ~XFS_FEAT_DAX_NEVER; 96 break; 97 case XFS_DAX_NEVER: 98 mp->m_features |= XFS_FEAT_DAX_NEVER; 99 mp->m_features &= ~XFS_FEAT_DAX_ALWAYS; 100 break; 101 } 102 } 103 104 static const struct constant_table dax_param_enums[] = { 105 {"inode", XFS_DAX_INODE }, 106 {"always", XFS_DAX_ALWAYS }, 107 {"never", XFS_DAX_NEVER }, 108 {} 109 }; 110 111 /* 112 * Table driven mount option parser. 113 */ 114 enum { 115 Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev, 116 Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid, 117 Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups, 118 Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep, 119 Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2, 120 Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota, 121 Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota, 122 Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce, 123 Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum, 124 }; 125 126 static const struct fs_parameter_spec xfs_fs_parameters[] = { 127 fsparam_u32("logbufs", Opt_logbufs), 128 fsparam_string("logbsize", Opt_logbsize), 129 fsparam_string("logdev", Opt_logdev), 130 fsparam_string("rtdev", Opt_rtdev), 131 fsparam_flag("wsync", Opt_wsync), 132 fsparam_flag("noalign", Opt_noalign), 133 fsparam_flag("swalloc", Opt_swalloc), 134 fsparam_u32("sunit", Opt_sunit), 135 fsparam_u32("swidth", Opt_swidth), 136 fsparam_flag("nouuid", Opt_nouuid), 137 fsparam_flag("grpid", Opt_grpid), 138 fsparam_flag("nogrpid", Opt_nogrpid), 139 fsparam_flag("bsdgroups", Opt_bsdgroups), 140 fsparam_flag("sysvgroups", Opt_sysvgroups), 141 fsparam_string("allocsize", Opt_allocsize), 142 fsparam_flag("norecovery", Opt_norecovery), 143 fsparam_flag("inode64", Opt_inode64), 144 fsparam_flag("inode32", Opt_inode32), 145 fsparam_flag("ikeep", Opt_ikeep), 146 fsparam_flag("noikeep", Opt_noikeep), 147 fsparam_flag("largeio", Opt_largeio), 148 fsparam_flag("nolargeio", Opt_nolargeio), 149 fsparam_flag("attr2", Opt_attr2), 150 fsparam_flag("noattr2", Opt_noattr2), 151 fsparam_flag("filestreams", Opt_filestreams), 152 fsparam_flag("quota", Opt_quota), 153 fsparam_flag("noquota", Opt_noquota), 154 fsparam_flag("usrquota", Opt_usrquota), 155 fsparam_flag("grpquota", Opt_grpquota), 156 fsparam_flag("prjquota", Opt_prjquota), 157 fsparam_flag("uquota", Opt_uquota), 158 fsparam_flag("gquota", Opt_gquota), 159 fsparam_flag("pquota", Opt_pquota), 160 fsparam_flag("uqnoenforce", Opt_uqnoenforce), 161 fsparam_flag("gqnoenforce", Opt_gqnoenforce), 162 fsparam_flag("pqnoenforce", Opt_pqnoenforce), 163 fsparam_flag("qnoenforce", Opt_qnoenforce), 164 fsparam_flag("discard", Opt_discard), 165 fsparam_flag("nodiscard", Opt_nodiscard), 166 fsparam_flag("dax", Opt_dax), 167 fsparam_enum("dax", Opt_dax_enum, dax_param_enums), 168 {} 169 }; 170 171 struct proc_xfs_info { 172 uint64_t flag; 173 char *str; 174 }; 175 176 static int 177 xfs_fs_show_options( 178 struct seq_file *m, 179 struct dentry *root) 180 { 181 static struct proc_xfs_info xfs_info_set[] = { 182 /* the few simple ones we can get from the mount struct */ 183 { XFS_FEAT_IKEEP, ",ikeep" }, 184 { XFS_FEAT_WSYNC, ",wsync" }, 185 { XFS_FEAT_NOALIGN, ",noalign" }, 186 { XFS_FEAT_SWALLOC, ",swalloc" }, 187 { XFS_FEAT_NOUUID, ",nouuid" }, 188 { XFS_FEAT_NORECOVERY, ",norecovery" }, 189 { XFS_FEAT_ATTR2, ",attr2" }, 190 { XFS_FEAT_FILESTREAMS, ",filestreams" }, 191 { XFS_FEAT_GRPID, ",grpid" }, 192 { XFS_FEAT_DISCARD, ",discard" }, 193 { XFS_FEAT_LARGE_IOSIZE, ",largeio" }, 194 { XFS_FEAT_DAX_ALWAYS, ",dax=always" }, 195 { XFS_FEAT_DAX_NEVER, ",dax=never" }, 196 { 0, NULL } 197 }; 198 struct xfs_mount *mp = XFS_M(root->d_sb); 199 struct proc_xfs_info *xfs_infop; 200 201 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) { 202 if (mp->m_features & xfs_infop->flag) 203 seq_puts(m, xfs_infop->str); 204 } 205 206 seq_printf(m, ",inode%d", xfs_has_small_inums(mp) ? 32 : 64); 207 208 if (xfs_has_allocsize(mp)) 209 seq_printf(m, ",allocsize=%dk", 210 (1 << mp->m_allocsize_log) >> 10); 211 212 if (mp->m_logbufs > 0) 213 seq_printf(m, ",logbufs=%d", mp->m_logbufs); 214 if (mp->m_logbsize > 0) 215 seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10); 216 217 if (mp->m_logname) 218 seq_show_option(m, "logdev", mp->m_logname); 219 if (mp->m_rtname) 220 seq_show_option(m, "rtdev", mp->m_rtname); 221 222 if (mp->m_dalign > 0) 223 seq_printf(m, ",sunit=%d", 224 (int)XFS_FSB_TO_BB(mp, mp->m_dalign)); 225 if (mp->m_swidth > 0) 226 seq_printf(m, ",swidth=%d", 227 (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); 228 229 if (mp->m_qflags & XFS_UQUOTA_ENFD) 230 seq_puts(m, ",usrquota"); 231 else if (mp->m_qflags & XFS_UQUOTA_ACCT) 232 seq_puts(m, ",uqnoenforce"); 233 234 if (mp->m_qflags & XFS_PQUOTA_ENFD) 235 seq_puts(m, ",prjquota"); 236 else if (mp->m_qflags & XFS_PQUOTA_ACCT) 237 seq_puts(m, ",pqnoenforce"); 238 239 if (mp->m_qflags & XFS_GQUOTA_ENFD) 240 seq_puts(m, ",grpquota"); 241 else if (mp->m_qflags & XFS_GQUOTA_ACCT) 242 seq_puts(m, ",gqnoenforce"); 243 244 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT)) 245 seq_puts(m, ",noquota"); 246 247 return 0; 248 } 249 250 /* 251 * Set parameters for inode allocation heuristics, taking into account 252 * filesystem size and inode32/inode64 mount options; i.e. specifically 253 * whether or not XFS_FEAT_SMALL_INUMS is set. 254 * 255 * Inode allocation patterns are altered only if inode32 is requested 256 * (XFS_FEAT_SMALL_INUMS), and the filesystem is sufficiently large. 257 * If altered, XFS_OPSTATE_INODE32 is set as well. 258 * 259 * An agcount independent of that in the mount structure is provided 260 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated 261 * to the potentially higher ag count. 262 * 263 * Returns the maximum AG index which may contain inodes. 264 */ 265 xfs_agnumber_t 266 xfs_set_inode_alloc( 267 struct xfs_mount *mp, 268 xfs_agnumber_t agcount) 269 { 270 xfs_agnumber_t index; 271 xfs_agnumber_t maxagi = 0; 272 xfs_sb_t *sbp = &mp->m_sb; 273 xfs_agnumber_t max_metadata; 274 xfs_agino_t agino; 275 xfs_ino_t ino; 276 277 /* 278 * Calculate how much should be reserved for inodes to meet 279 * the max inode percentage. Used only for inode32. 280 */ 281 if (M_IGEO(mp)->maxicount) { 282 uint64_t icount; 283 284 icount = sbp->sb_dblocks * sbp->sb_imax_pct; 285 do_div(icount, 100); 286 icount += sbp->sb_agblocks - 1; 287 do_div(icount, sbp->sb_agblocks); 288 max_metadata = icount; 289 } else { 290 max_metadata = agcount; 291 } 292 293 /* Get the last possible inode in the filesystem */ 294 agino = XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1); 295 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino); 296 297 /* 298 * If user asked for no more than 32-bit inodes, and the fs is 299 * sufficiently large, set XFS_OPSTATE_INODE32 if we must alter 300 * the allocator to accommodate the request. 301 */ 302 if (xfs_has_small_inums(mp) && ino > XFS_MAXINUMBER_32) 303 set_bit(XFS_OPSTATE_INODE32, &mp->m_opstate); 304 else 305 clear_bit(XFS_OPSTATE_INODE32, &mp->m_opstate); 306 307 for (index = 0; index < agcount; index++) { 308 struct xfs_perag *pag; 309 310 ino = XFS_AGINO_TO_INO(mp, index, agino); 311 312 pag = xfs_perag_get(mp, index); 313 314 if (xfs_is_inode32(mp)) { 315 if (ino > XFS_MAXINUMBER_32) { 316 pag->pagi_inodeok = 0; 317 pag->pagf_metadata = 0; 318 } else { 319 pag->pagi_inodeok = 1; 320 maxagi++; 321 if (index < max_metadata) 322 pag->pagf_metadata = 1; 323 else 324 pag->pagf_metadata = 0; 325 } 326 } else { 327 pag->pagi_inodeok = 1; 328 pag->pagf_metadata = 0; 329 } 330 331 xfs_perag_put(pag); 332 } 333 334 return xfs_is_inode32(mp) ? maxagi : agcount; 335 } 336 337 static int 338 xfs_setup_dax_always( 339 struct xfs_mount *mp) 340 { 341 if (!mp->m_ddev_targp->bt_daxdev && 342 (!mp->m_rtdev_targp || !mp->m_rtdev_targp->bt_daxdev)) { 343 xfs_alert(mp, 344 "DAX unsupported by block device. Turning off DAX."); 345 goto disable_dax; 346 } 347 348 if (mp->m_super->s_blocksize != PAGE_SIZE) { 349 xfs_alert(mp, 350 "DAX not supported for blocksize. Turning off DAX."); 351 goto disable_dax; 352 } 353 354 if (xfs_has_reflink(mp) && 355 bdev_is_partition(mp->m_ddev_targp->bt_bdev)) { 356 xfs_alert(mp, 357 "DAX and reflink cannot work with multi-partitions!"); 358 return -EINVAL; 359 } 360 361 xfs_warn(mp, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); 362 return 0; 363 364 disable_dax: 365 xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER); 366 return 0; 367 } 368 369 STATIC int 370 xfs_blkdev_get( 371 xfs_mount_t *mp, 372 const char *name, 373 struct block_device **bdevp) 374 { 375 int error = 0; 376 377 *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL, 378 mp); 379 if (IS_ERR(*bdevp)) { 380 error = PTR_ERR(*bdevp); 381 xfs_warn(mp, "Invalid device [%s], error=%d", name, error); 382 } 383 384 return error; 385 } 386 387 STATIC void 388 xfs_blkdev_put( 389 struct block_device *bdev) 390 { 391 if (bdev) 392 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 393 } 394 395 STATIC void 396 xfs_close_devices( 397 struct xfs_mount *mp) 398 { 399 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 400 struct block_device *logdev = mp->m_logdev_targp->bt_bdev; 401 402 xfs_free_buftarg(mp->m_logdev_targp); 403 xfs_blkdev_put(logdev); 404 } 405 if (mp->m_rtdev_targp) { 406 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev; 407 408 xfs_free_buftarg(mp->m_rtdev_targp); 409 xfs_blkdev_put(rtdev); 410 } 411 xfs_free_buftarg(mp->m_ddev_targp); 412 } 413 414 /* 415 * The file system configurations are: 416 * (1) device (partition) with data and internal log 417 * (2) logical volume with data and log subvolumes. 418 * (3) logical volume with data, log, and realtime subvolumes. 419 * 420 * We only have to handle opening the log and realtime volumes here if 421 * they are present. The data subvolume has already been opened by 422 * get_sb_bdev() and is stored in sb->s_bdev. 423 */ 424 STATIC int 425 xfs_open_devices( 426 struct xfs_mount *mp) 427 { 428 struct block_device *ddev = mp->m_super->s_bdev; 429 struct block_device *logdev = NULL, *rtdev = NULL; 430 int error; 431 432 /* 433 * Open real time and log devices - order is important. 434 */ 435 if (mp->m_logname) { 436 error = xfs_blkdev_get(mp, mp->m_logname, &logdev); 437 if (error) 438 return error; 439 } 440 441 if (mp->m_rtname) { 442 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev); 443 if (error) 444 goto out_close_logdev; 445 446 if (rtdev == ddev || rtdev == logdev) { 447 xfs_warn(mp, 448 "Cannot mount filesystem with identical rtdev and ddev/logdev."); 449 error = -EINVAL; 450 goto out_close_rtdev; 451 } 452 } 453 454 /* 455 * Setup xfs_mount buffer target pointers 456 */ 457 error = -ENOMEM; 458 mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev); 459 if (!mp->m_ddev_targp) 460 goto out_close_rtdev; 461 462 if (rtdev) { 463 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev); 464 if (!mp->m_rtdev_targp) 465 goto out_free_ddev_targ; 466 } 467 468 if (logdev && logdev != ddev) { 469 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev); 470 if (!mp->m_logdev_targp) 471 goto out_free_rtdev_targ; 472 } else { 473 mp->m_logdev_targp = mp->m_ddev_targp; 474 } 475 476 return 0; 477 478 out_free_rtdev_targ: 479 if (mp->m_rtdev_targp) 480 xfs_free_buftarg(mp->m_rtdev_targp); 481 out_free_ddev_targ: 482 xfs_free_buftarg(mp->m_ddev_targp); 483 out_close_rtdev: 484 xfs_blkdev_put(rtdev); 485 out_close_logdev: 486 if (logdev && logdev != ddev) 487 xfs_blkdev_put(logdev); 488 return error; 489 } 490 491 /* 492 * Setup xfs_mount buffer target pointers based on superblock 493 */ 494 STATIC int 495 xfs_setup_devices( 496 struct xfs_mount *mp) 497 { 498 int error; 499 500 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize); 501 if (error) 502 return error; 503 504 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 505 unsigned int log_sector_size = BBSIZE; 506 507 if (xfs_has_sector(mp)) 508 log_sector_size = mp->m_sb.sb_logsectsize; 509 error = xfs_setsize_buftarg(mp->m_logdev_targp, 510 log_sector_size); 511 if (error) 512 return error; 513 } 514 if (mp->m_rtdev_targp) { 515 error = xfs_setsize_buftarg(mp->m_rtdev_targp, 516 mp->m_sb.sb_sectsize); 517 if (error) 518 return error; 519 } 520 521 return 0; 522 } 523 524 STATIC int 525 xfs_init_mount_workqueues( 526 struct xfs_mount *mp) 527 { 528 mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s", 529 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), 530 1, mp->m_super->s_id); 531 if (!mp->m_buf_workqueue) 532 goto out; 533 534 mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s", 535 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), 536 0, mp->m_super->s_id); 537 if (!mp->m_unwritten_workqueue) 538 goto out_destroy_buf; 539 540 mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s", 541 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), 542 0, mp->m_super->s_id); 543 if (!mp->m_reclaim_workqueue) 544 goto out_destroy_unwritten; 545 546 mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s", 547 XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM), 548 0, mp->m_super->s_id); 549 if (!mp->m_blockgc_wq) 550 goto out_destroy_reclaim; 551 552 mp->m_inodegc_wq = alloc_workqueue("xfs-inodegc/%s", 553 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), 554 1, mp->m_super->s_id); 555 if (!mp->m_inodegc_wq) 556 goto out_destroy_blockgc; 557 558 mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", 559 XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id); 560 if (!mp->m_sync_workqueue) 561 goto out_destroy_inodegc; 562 563 return 0; 564 565 out_destroy_inodegc: 566 destroy_workqueue(mp->m_inodegc_wq); 567 out_destroy_blockgc: 568 destroy_workqueue(mp->m_blockgc_wq); 569 out_destroy_reclaim: 570 destroy_workqueue(mp->m_reclaim_workqueue); 571 out_destroy_unwritten: 572 destroy_workqueue(mp->m_unwritten_workqueue); 573 out_destroy_buf: 574 destroy_workqueue(mp->m_buf_workqueue); 575 out: 576 return -ENOMEM; 577 } 578 579 STATIC void 580 xfs_destroy_mount_workqueues( 581 struct xfs_mount *mp) 582 { 583 destroy_workqueue(mp->m_sync_workqueue); 584 destroy_workqueue(mp->m_blockgc_wq); 585 destroy_workqueue(mp->m_inodegc_wq); 586 destroy_workqueue(mp->m_reclaim_workqueue); 587 destroy_workqueue(mp->m_unwritten_workqueue); 588 destroy_workqueue(mp->m_buf_workqueue); 589 } 590 591 static void 592 xfs_flush_inodes_worker( 593 struct work_struct *work) 594 { 595 struct xfs_mount *mp = container_of(work, struct xfs_mount, 596 m_flush_inodes_work); 597 struct super_block *sb = mp->m_super; 598 599 if (down_read_trylock(&sb->s_umount)) { 600 sync_inodes_sb(sb); 601 up_read(&sb->s_umount); 602 } 603 } 604 605 /* 606 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK 607 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting 608 * for IO to complete so that we effectively throttle multiple callers to the 609 * rate at which IO is completing. 610 */ 611 void 612 xfs_flush_inodes( 613 struct xfs_mount *mp) 614 { 615 /* 616 * If flush_work() returns true then that means we waited for a flush 617 * which was already in progress. Don't bother running another scan. 618 */ 619 if (flush_work(&mp->m_flush_inodes_work)) 620 return; 621 622 queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work); 623 flush_work(&mp->m_flush_inodes_work); 624 } 625 626 /* Catch misguided souls that try to use this interface on XFS */ 627 STATIC struct inode * 628 xfs_fs_alloc_inode( 629 struct super_block *sb) 630 { 631 BUG(); 632 return NULL; 633 } 634 635 /* 636 * Now that the generic code is guaranteed not to be accessing 637 * the linux inode, we can inactivate and reclaim the inode. 638 */ 639 STATIC void 640 xfs_fs_destroy_inode( 641 struct inode *inode) 642 { 643 struct xfs_inode *ip = XFS_I(inode); 644 645 trace_xfs_destroy_inode(ip); 646 647 ASSERT(!rwsem_is_locked(&inode->i_rwsem)); 648 XFS_STATS_INC(ip->i_mount, vn_rele); 649 XFS_STATS_INC(ip->i_mount, vn_remove); 650 xfs_inode_mark_reclaimable(ip); 651 } 652 653 static void 654 xfs_fs_dirty_inode( 655 struct inode *inode, 656 int flags) 657 { 658 struct xfs_inode *ip = XFS_I(inode); 659 struct xfs_mount *mp = ip->i_mount; 660 struct xfs_trans *tp; 661 662 if (!(inode->i_sb->s_flags & SB_LAZYTIME)) 663 return; 664 665 /* 666 * Only do the timestamp update if the inode is dirty (I_DIRTY_SYNC) 667 * and has dirty timestamp (I_DIRTY_TIME). I_DIRTY_TIME can be passed 668 * in flags possibly together with I_DIRTY_SYNC. 669 */ 670 if ((flags & ~I_DIRTY_TIME) != I_DIRTY_SYNC || !(flags & I_DIRTY_TIME)) 671 return; 672 673 if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp)) 674 return; 675 xfs_ilock(ip, XFS_ILOCK_EXCL); 676 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 677 xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP); 678 xfs_trans_commit(tp); 679 } 680 681 /* 682 * Slab object creation initialisation for the XFS inode. 683 * This covers only the idempotent fields in the XFS inode; 684 * all other fields need to be initialised on allocation 685 * from the slab. This avoids the need to repeatedly initialise 686 * fields in the xfs inode that left in the initialise state 687 * when freeing the inode. 688 */ 689 STATIC void 690 xfs_fs_inode_init_once( 691 void *inode) 692 { 693 struct xfs_inode *ip = inode; 694 695 memset(ip, 0, sizeof(struct xfs_inode)); 696 697 /* vfs inode */ 698 inode_init_once(VFS_I(ip)); 699 700 /* xfs inode */ 701 atomic_set(&ip->i_pincount, 0); 702 spin_lock_init(&ip->i_flags_lock); 703 704 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, 705 "xfsino", ip->i_ino); 706 } 707 708 /* 709 * We do an unlocked check for XFS_IDONTCACHE here because we are already 710 * serialised against cache hits here via the inode->i_lock and igrab() in 711 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be 712 * racing with us, and it avoids needing to grab a spinlock here for every inode 713 * we drop the final reference on. 714 */ 715 STATIC int 716 xfs_fs_drop_inode( 717 struct inode *inode) 718 { 719 struct xfs_inode *ip = XFS_I(inode); 720 721 /* 722 * If this unlinked inode is in the middle of recovery, don't 723 * drop the inode just yet; log recovery will take care of 724 * that. See the comment for this inode flag. 725 */ 726 if (ip->i_flags & XFS_IRECOVERY) { 727 ASSERT(xlog_recovery_needed(ip->i_mount->m_log)); 728 return 0; 729 } 730 731 return generic_drop_inode(inode); 732 } 733 734 static void 735 xfs_mount_free( 736 struct xfs_mount *mp) 737 { 738 kfree(mp->m_rtname); 739 kfree(mp->m_logname); 740 kmem_free(mp); 741 } 742 743 STATIC int 744 xfs_fs_sync_fs( 745 struct super_block *sb, 746 int wait) 747 { 748 struct xfs_mount *mp = XFS_M(sb); 749 int error; 750 751 trace_xfs_fs_sync_fs(mp, __return_address); 752 753 /* 754 * Doing anything during the async pass would be counterproductive. 755 */ 756 if (!wait) 757 return 0; 758 759 error = xfs_log_force(mp, XFS_LOG_SYNC); 760 if (error) 761 return error; 762 763 if (laptop_mode) { 764 /* 765 * The disk must be active because we're syncing. 766 * We schedule log work now (now that the disk is 767 * active) instead of later (when it might not be). 768 */ 769 flush_delayed_work(&mp->m_log->l_work); 770 } 771 772 /* 773 * If we are called with page faults frozen out, it means we are about 774 * to freeze the transaction subsystem. Take the opportunity to shut 775 * down inodegc because once SB_FREEZE_FS is set it's too late to 776 * prevent inactivation races with freeze. The fs doesn't get called 777 * again by the freezing process until after SB_FREEZE_FS has been set, 778 * so it's now or never. Same logic applies to speculative allocation 779 * garbage collection. 780 * 781 * We don't care if this is a normal syncfs call that does this or 782 * freeze that does this - we can run this multiple times without issue 783 * and we won't race with a restart because a restart can only occur 784 * when the state is either SB_FREEZE_FS or SB_FREEZE_COMPLETE. 785 */ 786 if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT) { 787 xfs_inodegc_stop(mp); 788 xfs_blockgc_stop(mp); 789 } 790 791 return 0; 792 } 793 794 STATIC int 795 xfs_fs_statfs( 796 struct dentry *dentry, 797 struct kstatfs *statp) 798 { 799 struct xfs_mount *mp = XFS_M(dentry->d_sb); 800 xfs_sb_t *sbp = &mp->m_sb; 801 struct xfs_inode *ip = XFS_I(d_inode(dentry)); 802 uint64_t fakeinos, id; 803 uint64_t icount; 804 uint64_t ifree; 805 uint64_t fdblocks; 806 xfs_extlen_t lsize; 807 int64_t ffree; 808 809 /* 810 * Expedite background inodegc but don't wait. We do not want to block 811 * here waiting hours for a billion extent file to be truncated. 812 */ 813 xfs_inodegc_push(mp); 814 815 statp->f_type = XFS_SUPER_MAGIC; 816 statp->f_namelen = MAXNAMELEN - 1; 817 818 id = huge_encode_dev(mp->m_ddev_targp->bt_dev); 819 statp->f_fsid = u64_to_fsid(id); 820 821 icount = percpu_counter_sum(&mp->m_icount); 822 ifree = percpu_counter_sum(&mp->m_ifree); 823 fdblocks = percpu_counter_sum(&mp->m_fdblocks); 824 825 spin_lock(&mp->m_sb_lock); 826 statp->f_bsize = sbp->sb_blocksize; 827 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; 828 statp->f_blocks = sbp->sb_dblocks - lsize; 829 spin_unlock(&mp->m_sb_lock); 830 831 /* make sure statp->f_bfree does not underflow */ 832 statp->f_bfree = max_t(int64_t, 0, 833 fdblocks - xfs_fdblocks_unavailable(mp)); 834 statp->f_bavail = statp->f_bfree; 835 836 fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree); 837 statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER); 838 if (M_IGEO(mp)->maxicount) 839 statp->f_files = min_t(typeof(statp->f_files), 840 statp->f_files, 841 M_IGEO(mp)->maxicount); 842 843 /* If sb_icount overshot maxicount, report actual allocation */ 844 statp->f_files = max_t(typeof(statp->f_files), 845 statp->f_files, 846 sbp->sb_icount); 847 848 /* make sure statp->f_ffree does not underflow */ 849 ffree = statp->f_files - (icount - ifree); 850 statp->f_ffree = max_t(int64_t, ffree, 0); 851 852 853 if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) && 854 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) == 855 (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD)) 856 xfs_qm_statvfs(ip, statp); 857 858 if (XFS_IS_REALTIME_MOUNT(mp) && 859 (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) { 860 s64 freertx; 861 862 statp->f_blocks = sbp->sb_rblocks; 863 freertx = percpu_counter_sum_positive(&mp->m_frextents); 864 statp->f_bavail = statp->f_bfree = freertx * sbp->sb_rextsize; 865 } 866 867 return 0; 868 } 869 870 STATIC void 871 xfs_save_resvblks(struct xfs_mount *mp) 872 { 873 uint64_t resblks = 0; 874 875 mp->m_resblks_save = mp->m_resblks; 876 xfs_reserve_blocks(mp, &resblks, NULL); 877 } 878 879 STATIC void 880 xfs_restore_resvblks(struct xfs_mount *mp) 881 { 882 uint64_t resblks; 883 884 if (mp->m_resblks_save) { 885 resblks = mp->m_resblks_save; 886 mp->m_resblks_save = 0; 887 } else 888 resblks = xfs_default_resblks(mp); 889 890 xfs_reserve_blocks(mp, &resblks, NULL); 891 } 892 893 /* 894 * Second stage of a freeze. The data is already frozen so we only 895 * need to take care of the metadata. Once that's done sync the superblock 896 * to the log to dirty it in case of a crash while frozen. This ensures that we 897 * will recover the unlinked inode lists on the next mount. 898 */ 899 STATIC int 900 xfs_fs_freeze( 901 struct super_block *sb) 902 { 903 struct xfs_mount *mp = XFS_M(sb); 904 unsigned int flags; 905 int ret; 906 907 /* 908 * The filesystem is now frozen far enough that memory reclaim 909 * cannot safely operate on the filesystem. Hence we need to 910 * set a GFP_NOFS context here to avoid recursion deadlocks. 911 */ 912 flags = memalloc_nofs_save(); 913 xfs_save_resvblks(mp); 914 ret = xfs_log_quiesce(mp); 915 memalloc_nofs_restore(flags); 916 917 /* 918 * For read-write filesystems, we need to restart the inodegc on error 919 * because we stopped it at SB_FREEZE_PAGEFAULT level and a thaw is not 920 * going to be run to restart it now. We are at SB_FREEZE_FS level 921 * here, so we can restart safely without racing with a stop in 922 * xfs_fs_sync_fs(). 923 */ 924 if (ret && !xfs_is_readonly(mp)) { 925 xfs_blockgc_start(mp); 926 xfs_inodegc_start(mp); 927 } 928 929 return ret; 930 } 931 932 STATIC int 933 xfs_fs_unfreeze( 934 struct super_block *sb) 935 { 936 struct xfs_mount *mp = XFS_M(sb); 937 938 xfs_restore_resvblks(mp); 939 xfs_log_work_queue(mp); 940 941 /* 942 * Don't reactivate the inodegc worker on a readonly filesystem because 943 * inodes are sent directly to reclaim. Don't reactivate the blockgc 944 * worker because there are no speculative preallocations on a readonly 945 * filesystem. 946 */ 947 if (!xfs_is_readonly(mp)) { 948 xfs_blockgc_start(mp); 949 xfs_inodegc_start(mp); 950 } 951 952 return 0; 953 } 954 955 /* 956 * This function fills in xfs_mount_t fields based on mount args. 957 * Note: the superblock _has_ now been read in. 958 */ 959 STATIC int 960 xfs_finish_flags( 961 struct xfs_mount *mp) 962 { 963 /* Fail a mount where the logbuf is smaller than the log stripe */ 964 if (xfs_has_logv2(mp)) { 965 if (mp->m_logbsize <= 0 && 966 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) { 967 mp->m_logbsize = mp->m_sb.sb_logsunit; 968 } else if (mp->m_logbsize > 0 && 969 mp->m_logbsize < mp->m_sb.sb_logsunit) { 970 xfs_warn(mp, 971 "logbuf size must be greater than or equal to log stripe size"); 972 return -EINVAL; 973 } 974 } else { 975 /* Fail a mount if the logbuf is larger than 32K */ 976 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) { 977 xfs_warn(mp, 978 "logbuf size for version 1 logs must be 16K or 32K"); 979 return -EINVAL; 980 } 981 } 982 983 /* 984 * V5 filesystems always use attr2 format for attributes. 985 */ 986 if (xfs_has_crc(mp) && xfs_has_noattr2(mp)) { 987 xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. " 988 "attr2 is always enabled for V5 filesystems."); 989 return -EINVAL; 990 } 991 992 /* 993 * prohibit r/w mounts of read-only filesystems 994 */ 995 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !xfs_is_readonly(mp)) { 996 xfs_warn(mp, 997 "cannot mount a read-only filesystem as read-write"); 998 return -EROFS; 999 } 1000 1001 if ((mp->m_qflags & XFS_GQUOTA_ACCT) && 1002 (mp->m_qflags & XFS_PQUOTA_ACCT) && 1003 !xfs_has_pquotino(mp)) { 1004 xfs_warn(mp, 1005 "Super block does not support project and group quota together"); 1006 return -EINVAL; 1007 } 1008 1009 return 0; 1010 } 1011 1012 static int 1013 xfs_init_percpu_counters( 1014 struct xfs_mount *mp) 1015 { 1016 int error; 1017 1018 error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL); 1019 if (error) 1020 return -ENOMEM; 1021 1022 error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL); 1023 if (error) 1024 goto free_icount; 1025 1026 error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL); 1027 if (error) 1028 goto free_ifree; 1029 1030 error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL); 1031 if (error) 1032 goto free_fdblocks; 1033 1034 error = percpu_counter_init(&mp->m_frextents, 0, GFP_KERNEL); 1035 if (error) 1036 goto free_delalloc; 1037 1038 return 0; 1039 1040 free_delalloc: 1041 percpu_counter_destroy(&mp->m_delalloc_blks); 1042 free_fdblocks: 1043 percpu_counter_destroy(&mp->m_fdblocks); 1044 free_ifree: 1045 percpu_counter_destroy(&mp->m_ifree); 1046 free_icount: 1047 percpu_counter_destroy(&mp->m_icount); 1048 return -ENOMEM; 1049 } 1050 1051 void 1052 xfs_reinit_percpu_counters( 1053 struct xfs_mount *mp) 1054 { 1055 percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount); 1056 percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree); 1057 percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks); 1058 percpu_counter_set(&mp->m_frextents, mp->m_sb.sb_frextents); 1059 } 1060 1061 static void 1062 xfs_destroy_percpu_counters( 1063 struct xfs_mount *mp) 1064 { 1065 percpu_counter_destroy(&mp->m_icount); 1066 percpu_counter_destroy(&mp->m_ifree); 1067 percpu_counter_destroy(&mp->m_fdblocks); 1068 ASSERT(xfs_is_shutdown(mp) || 1069 percpu_counter_sum(&mp->m_delalloc_blks) == 0); 1070 percpu_counter_destroy(&mp->m_delalloc_blks); 1071 percpu_counter_destroy(&mp->m_frextents); 1072 } 1073 1074 static int 1075 xfs_inodegc_init_percpu( 1076 struct xfs_mount *mp) 1077 { 1078 struct xfs_inodegc *gc; 1079 int cpu; 1080 1081 mp->m_inodegc = alloc_percpu(struct xfs_inodegc); 1082 if (!mp->m_inodegc) 1083 return -ENOMEM; 1084 1085 for_each_possible_cpu(cpu) { 1086 gc = per_cpu_ptr(mp->m_inodegc, cpu); 1087 init_llist_head(&gc->list); 1088 gc->items = 0; 1089 INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker); 1090 } 1091 return 0; 1092 } 1093 1094 static void 1095 xfs_inodegc_free_percpu( 1096 struct xfs_mount *mp) 1097 { 1098 if (!mp->m_inodegc) 1099 return; 1100 free_percpu(mp->m_inodegc); 1101 } 1102 1103 static void 1104 xfs_fs_put_super( 1105 struct super_block *sb) 1106 { 1107 struct xfs_mount *mp = XFS_M(sb); 1108 1109 /* if ->fill_super failed, we have no mount to tear down */ 1110 if (!sb->s_fs_info) 1111 return; 1112 1113 xfs_notice(mp, "Unmounting Filesystem"); 1114 xfs_filestream_unmount(mp); 1115 xfs_unmountfs(mp); 1116 1117 xfs_freesb(mp); 1118 free_percpu(mp->m_stats.xs_stats); 1119 xfs_mount_list_del(mp); 1120 xfs_inodegc_free_percpu(mp); 1121 xfs_destroy_percpu_counters(mp); 1122 xfs_destroy_mount_workqueues(mp); 1123 xfs_close_devices(mp); 1124 1125 sb->s_fs_info = NULL; 1126 xfs_mount_free(mp); 1127 } 1128 1129 static long 1130 xfs_fs_nr_cached_objects( 1131 struct super_block *sb, 1132 struct shrink_control *sc) 1133 { 1134 /* Paranoia: catch incorrect calls during mount setup or teardown */ 1135 if (WARN_ON_ONCE(!sb->s_fs_info)) 1136 return 0; 1137 return xfs_reclaim_inodes_count(XFS_M(sb)); 1138 } 1139 1140 static long 1141 xfs_fs_free_cached_objects( 1142 struct super_block *sb, 1143 struct shrink_control *sc) 1144 { 1145 return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan); 1146 } 1147 1148 static const struct super_operations xfs_super_operations = { 1149 .alloc_inode = xfs_fs_alloc_inode, 1150 .destroy_inode = xfs_fs_destroy_inode, 1151 .dirty_inode = xfs_fs_dirty_inode, 1152 .drop_inode = xfs_fs_drop_inode, 1153 .put_super = xfs_fs_put_super, 1154 .sync_fs = xfs_fs_sync_fs, 1155 .freeze_fs = xfs_fs_freeze, 1156 .unfreeze_fs = xfs_fs_unfreeze, 1157 .statfs = xfs_fs_statfs, 1158 .show_options = xfs_fs_show_options, 1159 .nr_cached_objects = xfs_fs_nr_cached_objects, 1160 .free_cached_objects = xfs_fs_free_cached_objects, 1161 }; 1162 1163 static int 1164 suffix_kstrtoint( 1165 const char *s, 1166 unsigned int base, 1167 int *res) 1168 { 1169 int last, shift_left_factor = 0, _res; 1170 char *value; 1171 int ret = 0; 1172 1173 value = kstrdup(s, GFP_KERNEL); 1174 if (!value) 1175 return -ENOMEM; 1176 1177 last = strlen(value) - 1; 1178 if (value[last] == 'K' || value[last] == 'k') { 1179 shift_left_factor = 10; 1180 value[last] = '\0'; 1181 } 1182 if (value[last] == 'M' || value[last] == 'm') { 1183 shift_left_factor = 20; 1184 value[last] = '\0'; 1185 } 1186 if (value[last] == 'G' || value[last] == 'g') { 1187 shift_left_factor = 30; 1188 value[last] = '\0'; 1189 } 1190 1191 if (kstrtoint(value, base, &_res)) 1192 ret = -EINVAL; 1193 kfree(value); 1194 *res = _res << shift_left_factor; 1195 return ret; 1196 } 1197 1198 static inline void 1199 xfs_fs_warn_deprecated( 1200 struct fs_context *fc, 1201 struct fs_parameter *param, 1202 uint64_t flag, 1203 bool value) 1204 { 1205 /* Don't print the warning if reconfiguring and current mount point 1206 * already had the flag set 1207 */ 1208 if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) && 1209 !!(XFS_M(fc->root->d_sb)->m_features & flag) == value) 1210 return; 1211 xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key); 1212 } 1213 1214 /* 1215 * Set mount state from a mount option. 1216 * 1217 * NOTE: mp->m_super is NULL here! 1218 */ 1219 static int 1220 xfs_fs_parse_param( 1221 struct fs_context *fc, 1222 struct fs_parameter *param) 1223 { 1224 struct xfs_mount *parsing_mp = fc->s_fs_info; 1225 struct fs_parse_result result; 1226 int size = 0; 1227 int opt; 1228 1229 opt = fs_parse(fc, xfs_fs_parameters, param, &result); 1230 if (opt < 0) 1231 return opt; 1232 1233 switch (opt) { 1234 case Opt_logbufs: 1235 parsing_mp->m_logbufs = result.uint_32; 1236 return 0; 1237 case Opt_logbsize: 1238 if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize)) 1239 return -EINVAL; 1240 return 0; 1241 case Opt_logdev: 1242 kfree(parsing_mp->m_logname); 1243 parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL); 1244 if (!parsing_mp->m_logname) 1245 return -ENOMEM; 1246 return 0; 1247 case Opt_rtdev: 1248 kfree(parsing_mp->m_rtname); 1249 parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL); 1250 if (!parsing_mp->m_rtname) 1251 return -ENOMEM; 1252 return 0; 1253 case Opt_allocsize: 1254 if (suffix_kstrtoint(param->string, 10, &size)) 1255 return -EINVAL; 1256 parsing_mp->m_allocsize_log = ffs(size) - 1; 1257 parsing_mp->m_features |= XFS_FEAT_ALLOCSIZE; 1258 return 0; 1259 case Opt_grpid: 1260 case Opt_bsdgroups: 1261 parsing_mp->m_features |= XFS_FEAT_GRPID; 1262 return 0; 1263 case Opt_nogrpid: 1264 case Opt_sysvgroups: 1265 parsing_mp->m_features &= ~XFS_FEAT_GRPID; 1266 return 0; 1267 case Opt_wsync: 1268 parsing_mp->m_features |= XFS_FEAT_WSYNC; 1269 return 0; 1270 case Opt_norecovery: 1271 parsing_mp->m_features |= XFS_FEAT_NORECOVERY; 1272 return 0; 1273 case Opt_noalign: 1274 parsing_mp->m_features |= XFS_FEAT_NOALIGN; 1275 return 0; 1276 case Opt_swalloc: 1277 parsing_mp->m_features |= XFS_FEAT_SWALLOC; 1278 return 0; 1279 case Opt_sunit: 1280 parsing_mp->m_dalign = result.uint_32; 1281 return 0; 1282 case Opt_swidth: 1283 parsing_mp->m_swidth = result.uint_32; 1284 return 0; 1285 case Opt_inode32: 1286 parsing_mp->m_features |= XFS_FEAT_SMALL_INUMS; 1287 return 0; 1288 case Opt_inode64: 1289 parsing_mp->m_features &= ~XFS_FEAT_SMALL_INUMS; 1290 return 0; 1291 case Opt_nouuid: 1292 parsing_mp->m_features |= XFS_FEAT_NOUUID; 1293 return 0; 1294 case Opt_largeio: 1295 parsing_mp->m_features |= XFS_FEAT_LARGE_IOSIZE; 1296 return 0; 1297 case Opt_nolargeio: 1298 parsing_mp->m_features &= ~XFS_FEAT_LARGE_IOSIZE; 1299 return 0; 1300 case Opt_filestreams: 1301 parsing_mp->m_features |= XFS_FEAT_FILESTREAMS; 1302 return 0; 1303 case Opt_noquota: 1304 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT; 1305 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD; 1306 return 0; 1307 case Opt_quota: 1308 case Opt_uquota: 1309 case Opt_usrquota: 1310 parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD); 1311 return 0; 1312 case Opt_qnoenforce: 1313 case Opt_uqnoenforce: 1314 parsing_mp->m_qflags |= XFS_UQUOTA_ACCT; 1315 parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD; 1316 return 0; 1317 case Opt_pquota: 1318 case Opt_prjquota: 1319 parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD); 1320 return 0; 1321 case Opt_pqnoenforce: 1322 parsing_mp->m_qflags |= XFS_PQUOTA_ACCT; 1323 parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD; 1324 return 0; 1325 case Opt_gquota: 1326 case Opt_grpquota: 1327 parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD); 1328 return 0; 1329 case Opt_gqnoenforce: 1330 parsing_mp->m_qflags |= XFS_GQUOTA_ACCT; 1331 parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD; 1332 return 0; 1333 case Opt_discard: 1334 parsing_mp->m_features |= XFS_FEAT_DISCARD; 1335 return 0; 1336 case Opt_nodiscard: 1337 parsing_mp->m_features &= ~XFS_FEAT_DISCARD; 1338 return 0; 1339 #ifdef CONFIG_FS_DAX 1340 case Opt_dax: 1341 xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS); 1342 return 0; 1343 case Opt_dax_enum: 1344 xfs_mount_set_dax_mode(parsing_mp, result.uint_32); 1345 return 0; 1346 #endif 1347 /* Following mount options will be removed in September 2025 */ 1348 case Opt_ikeep: 1349 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, true); 1350 parsing_mp->m_features |= XFS_FEAT_IKEEP; 1351 return 0; 1352 case Opt_noikeep: 1353 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, false); 1354 parsing_mp->m_features &= ~XFS_FEAT_IKEEP; 1355 return 0; 1356 case Opt_attr2: 1357 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_ATTR2, true); 1358 parsing_mp->m_features |= XFS_FEAT_ATTR2; 1359 return 0; 1360 case Opt_noattr2: 1361 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_NOATTR2, true); 1362 parsing_mp->m_features |= XFS_FEAT_NOATTR2; 1363 return 0; 1364 default: 1365 xfs_warn(parsing_mp, "unknown mount option [%s].", param->key); 1366 return -EINVAL; 1367 } 1368 1369 return 0; 1370 } 1371 1372 static int 1373 xfs_fs_validate_params( 1374 struct xfs_mount *mp) 1375 { 1376 /* No recovery flag requires a read-only mount */ 1377 if (xfs_has_norecovery(mp) && !xfs_is_readonly(mp)) { 1378 xfs_warn(mp, "no-recovery mounts must be read-only."); 1379 return -EINVAL; 1380 } 1381 1382 /* 1383 * We have not read the superblock at this point, so only the attr2 1384 * mount option can set the attr2 feature by this stage. 1385 */ 1386 if (xfs_has_attr2(mp) && xfs_has_noattr2(mp)) { 1387 xfs_warn(mp, "attr2 and noattr2 cannot both be specified."); 1388 return -EINVAL; 1389 } 1390 1391 1392 if (xfs_has_noalign(mp) && (mp->m_dalign || mp->m_swidth)) { 1393 xfs_warn(mp, 1394 "sunit and swidth options incompatible with the noalign option"); 1395 return -EINVAL; 1396 } 1397 1398 if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) { 1399 xfs_warn(mp, "quota support not available in this kernel."); 1400 return -EINVAL; 1401 } 1402 1403 if ((mp->m_dalign && !mp->m_swidth) || 1404 (!mp->m_dalign && mp->m_swidth)) { 1405 xfs_warn(mp, "sunit and swidth must be specified together"); 1406 return -EINVAL; 1407 } 1408 1409 if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) { 1410 xfs_warn(mp, 1411 "stripe width (%d) must be a multiple of the stripe unit (%d)", 1412 mp->m_swidth, mp->m_dalign); 1413 return -EINVAL; 1414 } 1415 1416 if (mp->m_logbufs != -1 && 1417 mp->m_logbufs != 0 && 1418 (mp->m_logbufs < XLOG_MIN_ICLOGS || 1419 mp->m_logbufs > XLOG_MAX_ICLOGS)) { 1420 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]", 1421 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS); 1422 return -EINVAL; 1423 } 1424 1425 if (mp->m_logbsize != -1 && 1426 mp->m_logbsize != 0 && 1427 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE || 1428 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE || 1429 !is_power_of_2(mp->m_logbsize))) { 1430 xfs_warn(mp, 1431 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]", 1432 mp->m_logbsize); 1433 return -EINVAL; 1434 } 1435 1436 if (xfs_has_allocsize(mp) && 1437 (mp->m_allocsize_log > XFS_MAX_IO_LOG || 1438 mp->m_allocsize_log < XFS_MIN_IO_LOG)) { 1439 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]", 1440 mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG); 1441 return -EINVAL; 1442 } 1443 1444 return 0; 1445 } 1446 1447 static int 1448 xfs_fs_fill_super( 1449 struct super_block *sb, 1450 struct fs_context *fc) 1451 { 1452 struct xfs_mount *mp = sb->s_fs_info; 1453 struct inode *root; 1454 int flags = 0, error; 1455 1456 mp->m_super = sb; 1457 1458 error = xfs_fs_validate_params(mp); 1459 if (error) 1460 goto out_free_names; 1461 1462 sb_min_blocksize(sb, BBSIZE); 1463 sb->s_xattr = xfs_xattr_handlers; 1464 sb->s_export_op = &xfs_export_operations; 1465 #ifdef CONFIG_XFS_QUOTA 1466 sb->s_qcop = &xfs_quotactl_operations; 1467 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ; 1468 #endif 1469 sb->s_op = &xfs_super_operations; 1470 1471 /* 1472 * Delay mount work if the debug hook is set. This is debug 1473 * instrumention to coordinate simulation of xfs mount failures with 1474 * VFS superblock operations 1475 */ 1476 if (xfs_globals.mount_delay) { 1477 xfs_notice(mp, "Delaying mount for %d seconds.", 1478 xfs_globals.mount_delay); 1479 msleep(xfs_globals.mount_delay * 1000); 1480 } 1481 1482 if (fc->sb_flags & SB_SILENT) 1483 flags |= XFS_MFSI_QUIET; 1484 1485 error = xfs_open_devices(mp); 1486 if (error) 1487 goto out_free_names; 1488 1489 error = xfs_init_mount_workqueues(mp); 1490 if (error) 1491 goto out_close_devices; 1492 1493 error = xfs_init_percpu_counters(mp); 1494 if (error) 1495 goto out_destroy_workqueues; 1496 1497 error = xfs_inodegc_init_percpu(mp); 1498 if (error) 1499 goto out_destroy_counters; 1500 1501 /* 1502 * All percpu data structures requiring cleanup when a cpu goes offline 1503 * must be allocated before adding this @mp to the cpu-dead handler's 1504 * mount list. 1505 */ 1506 xfs_mount_list_add(mp); 1507 1508 /* Allocate stats memory before we do operations that might use it */ 1509 mp->m_stats.xs_stats = alloc_percpu(struct xfsstats); 1510 if (!mp->m_stats.xs_stats) { 1511 error = -ENOMEM; 1512 goto out_destroy_inodegc; 1513 } 1514 1515 error = xfs_readsb(mp, flags); 1516 if (error) 1517 goto out_free_stats; 1518 1519 error = xfs_finish_flags(mp); 1520 if (error) 1521 goto out_free_sb; 1522 1523 error = xfs_setup_devices(mp); 1524 if (error) 1525 goto out_free_sb; 1526 1527 /* V4 support is undergoing deprecation. */ 1528 if (!xfs_has_crc(mp)) { 1529 #ifdef CONFIG_XFS_SUPPORT_V4 1530 xfs_warn_once(mp, 1531 "Deprecated V4 format (crc=0) will not be supported after September 2030."); 1532 #else 1533 xfs_warn(mp, 1534 "Deprecated V4 format (crc=0) not supported by kernel."); 1535 error = -EINVAL; 1536 goto out_free_sb; 1537 #endif 1538 } 1539 1540 /* Filesystem claims it needs repair, so refuse the mount. */ 1541 if (xfs_has_needsrepair(mp)) { 1542 xfs_warn(mp, "Filesystem needs repair. Please run xfs_repair."); 1543 error = -EFSCORRUPTED; 1544 goto out_free_sb; 1545 } 1546 1547 /* 1548 * Don't touch the filesystem if a user tool thinks it owns the primary 1549 * superblock. mkfs doesn't clear the flag from secondary supers, so 1550 * we don't check them at all. 1551 */ 1552 if (mp->m_sb.sb_inprogress) { 1553 xfs_warn(mp, "Offline file system operation in progress!"); 1554 error = -EFSCORRUPTED; 1555 goto out_free_sb; 1556 } 1557 1558 /* 1559 * Until this is fixed only page-sized or smaller data blocks work. 1560 */ 1561 if (mp->m_sb.sb_blocksize > PAGE_SIZE) { 1562 xfs_warn(mp, 1563 "File system with blocksize %d bytes. " 1564 "Only pagesize (%ld) or less will currently work.", 1565 mp->m_sb.sb_blocksize, PAGE_SIZE); 1566 error = -ENOSYS; 1567 goto out_free_sb; 1568 } 1569 1570 /* Ensure this filesystem fits in the page cache limits */ 1571 if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) || 1572 xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) { 1573 xfs_warn(mp, 1574 "file system too large to be mounted on this system."); 1575 error = -EFBIG; 1576 goto out_free_sb; 1577 } 1578 1579 /* 1580 * XFS block mappings use 54 bits to store the logical block offset. 1581 * This should suffice to handle the maximum file size that the VFS 1582 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT 1583 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes 1584 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON 1585 * to check this assertion. 1586 * 1587 * Avoid integer overflow by comparing the maximum bmbt offset to the 1588 * maximum pagecache offset in units of fs blocks. 1589 */ 1590 if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) { 1591 xfs_warn(mp, 1592 "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!", 1593 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE), 1594 XFS_MAX_FILEOFF); 1595 error = -EINVAL; 1596 goto out_free_sb; 1597 } 1598 1599 error = xfs_filestream_mount(mp); 1600 if (error) 1601 goto out_free_sb; 1602 1603 /* 1604 * we must configure the block size in the superblock before we run the 1605 * full mount process as the mount process can lookup and cache inodes. 1606 */ 1607 sb->s_magic = XFS_SUPER_MAGIC; 1608 sb->s_blocksize = mp->m_sb.sb_blocksize; 1609 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; 1610 sb->s_maxbytes = MAX_LFS_FILESIZE; 1611 sb->s_max_links = XFS_MAXLINK; 1612 sb->s_time_gran = 1; 1613 if (xfs_has_bigtime(mp)) { 1614 sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN); 1615 sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX); 1616 } else { 1617 sb->s_time_min = XFS_LEGACY_TIME_MIN; 1618 sb->s_time_max = XFS_LEGACY_TIME_MAX; 1619 } 1620 trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max); 1621 sb->s_iflags |= SB_I_CGROUPWB; 1622 1623 set_posix_acl_flag(sb); 1624 1625 /* version 5 superblocks support inode version counters. */ 1626 if (xfs_has_crc(mp)) 1627 sb->s_flags |= SB_I_VERSION; 1628 1629 if (xfs_has_dax_always(mp)) { 1630 error = xfs_setup_dax_always(mp); 1631 if (error) 1632 goto out_filestream_unmount; 1633 } 1634 1635 if (xfs_has_discard(mp) && !bdev_max_discard_sectors(sb->s_bdev)) { 1636 xfs_warn(mp, 1637 "mounting with \"discard\" option, but the device does not support discard"); 1638 mp->m_features &= ~XFS_FEAT_DISCARD; 1639 } 1640 1641 if (xfs_has_reflink(mp)) { 1642 if (mp->m_sb.sb_rblocks) { 1643 xfs_alert(mp, 1644 "reflink not compatible with realtime device!"); 1645 error = -EINVAL; 1646 goto out_filestream_unmount; 1647 } 1648 1649 if (xfs_globals.always_cow) { 1650 xfs_info(mp, "using DEBUG-only always_cow mode."); 1651 mp->m_always_cow = true; 1652 } 1653 } 1654 1655 if (xfs_has_rmapbt(mp) && mp->m_sb.sb_rblocks) { 1656 xfs_alert(mp, 1657 "reverse mapping btree not compatible with realtime device!"); 1658 error = -EINVAL; 1659 goto out_filestream_unmount; 1660 } 1661 1662 if (xfs_has_large_extent_counts(mp)) 1663 xfs_warn(mp, 1664 "EXPERIMENTAL Large extent counts feature in use. Use at your own risk!"); 1665 1666 error = xfs_mountfs(mp); 1667 if (error) 1668 goto out_filestream_unmount; 1669 1670 root = igrab(VFS_I(mp->m_rootip)); 1671 if (!root) { 1672 error = -ENOENT; 1673 goto out_unmount; 1674 } 1675 sb->s_root = d_make_root(root); 1676 if (!sb->s_root) { 1677 error = -ENOMEM; 1678 goto out_unmount; 1679 } 1680 1681 return 0; 1682 1683 out_filestream_unmount: 1684 xfs_filestream_unmount(mp); 1685 out_free_sb: 1686 xfs_freesb(mp); 1687 out_free_stats: 1688 free_percpu(mp->m_stats.xs_stats); 1689 out_destroy_inodegc: 1690 xfs_mount_list_del(mp); 1691 xfs_inodegc_free_percpu(mp); 1692 out_destroy_counters: 1693 xfs_destroy_percpu_counters(mp); 1694 out_destroy_workqueues: 1695 xfs_destroy_mount_workqueues(mp); 1696 out_close_devices: 1697 xfs_close_devices(mp); 1698 out_free_names: 1699 sb->s_fs_info = NULL; 1700 xfs_mount_free(mp); 1701 return error; 1702 1703 out_unmount: 1704 xfs_filestream_unmount(mp); 1705 xfs_unmountfs(mp); 1706 goto out_free_sb; 1707 } 1708 1709 static int 1710 xfs_fs_get_tree( 1711 struct fs_context *fc) 1712 { 1713 return get_tree_bdev(fc, xfs_fs_fill_super); 1714 } 1715 1716 static int 1717 xfs_remount_rw( 1718 struct xfs_mount *mp) 1719 { 1720 struct xfs_sb *sbp = &mp->m_sb; 1721 int error; 1722 1723 if (xfs_has_norecovery(mp)) { 1724 xfs_warn(mp, 1725 "ro->rw transition prohibited on norecovery mount"); 1726 return -EINVAL; 1727 } 1728 1729 if (xfs_sb_is_v5(sbp) && 1730 xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) { 1731 xfs_warn(mp, 1732 "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem", 1733 (sbp->sb_features_ro_compat & 1734 XFS_SB_FEAT_RO_COMPAT_UNKNOWN)); 1735 return -EINVAL; 1736 } 1737 1738 clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate); 1739 1740 /* 1741 * If this is the first remount to writeable state we might have some 1742 * superblock changes to update. 1743 */ 1744 if (mp->m_update_sb) { 1745 error = xfs_sync_sb(mp, false); 1746 if (error) { 1747 xfs_warn(mp, "failed to write sb changes"); 1748 return error; 1749 } 1750 mp->m_update_sb = false; 1751 } 1752 1753 /* 1754 * Fill out the reserve pool if it is empty. Use the stashed value if 1755 * it is non-zero, otherwise go with the default. 1756 */ 1757 xfs_restore_resvblks(mp); 1758 xfs_log_work_queue(mp); 1759 xfs_blockgc_start(mp); 1760 1761 /* Create the per-AG metadata reservation pool .*/ 1762 error = xfs_fs_reserve_ag_blocks(mp); 1763 if (error && error != -ENOSPC) 1764 return error; 1765 1766 /* Re-enable the background inode inactivation worker. */ 1767 xfs_inodegc_start(mp); 1768 1769 return 0; 1770 } 1771 1772 static int 1773 xfs_remount_ro( 1774 struct xfs_mount *mp) 1775 { 1776 struct xfs_icwalk icw = { 1777 .icw_flags = XFS_ICWALK_FLAG_SYNC, 1778 }; 1779 int error; 1780 1781 /* Flush all the dirty data to disk. */ 1782 error = sync_filesystem(mp->m_super); 1783 if (error) 1784 return error; 1785 1786 /* 1787 * Cancel background eofb scanning so it cannot race with the final 1788 * log force+buftarg wait and deadlock the remount. 1789 */ 1790 xfs_blockgc_stop(mp); 1791 1792 /* 1793 * Clear out all remaining COW staging extents and speculative post-EOF 1794 * preallocations so that we don't leave inodes requiring inactivation 1795 * cleanups during reclaim on a read-only mount. We must process every 1796 * cached inode, so this requires a synchronous cache scan. 1797 */ 1798 error = xfs_blockgc_free_space(mp, &icw); 1799 if (error) { 1800 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1801 return error; 1802 } 1803 1804 /* 1805 * Stop the inodegc background worker. xfs_fs_reconfigure already 1806 * flushed all pending inodegc work when it sync'd the filesystem. 1807 * The VFS holds s_umount, so we know that inodes cannot enter 1808 * xfs_fs_destroy_inode during a remount operation. In readonly mode 1809 * we send inodes straight to reclaim, so no inodes will be queued. 1810 */ 1811 xfs_inodegc_stop(mp); 1812 1813 /* Free the per-AG metadata reservation pool. */ 1814 error = xfs_fs_unreserve_ag_blocks(mp); 1815 if (error) { 1816 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1817 return error; 1818 } 1819 1820 /* 1821 * Before we sync the metadata, we need to free up the reserve block 1822 * pool so that the used block count in the superblock on disk is 1823 * correct at the end of the remount. Stash the current* reserve pool 1824 * size so that if we get remounted rw, we can return it to the same 1825 * size. 1826 */ 1827 xfs_save_resvblks(mp); 1828 1829 xfs_log_clean(mp); 1830 set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate); 1831 1832 return 0; 1833 } 1834 1835 /* 1836 * Logically we would return an error here to prevent users from believing 1837 * they might have changed mount options using remount which can't be changed. 1838 * 1839 * But unfortunately mount(8) adds all options from mtab and fstab to the mount 1840 * arguments in some cases so we can't blindly reject options, but have to 1841 * check for each specified option if it actually differs from the currently 1842 * set option and only reject it if that's the case. 1843 * 1844 * Until that is implemented we return success for every remount request, and 1845 * silently ignore all options that we can't actually change. 1846 */ 1847 static int 1848 xfs_fs_reconfigure( 1849 struct fs_context *fc) 1850 { 1851 struct xfs_mount *mp = XFS_M(fc->root->d_sb); 1852 struct xfs_mount *new_mp = fc->s_fs_info; 1853 int flags = fc->sb_flags; 1854 int error; 1855 1856 /* version 5 superblocks always support version counters. */ 1857 if (xfs_has_crc(mp)) 1858 fc->sb_flags |= SB_I_VERSION; 1859 1860 error = xfs_fs_validate_params(new_mp); 1861 if (error) 1862 return error; 1863 1864 /* inode32 -> inode64 */ 1865 if (xfs_has_small_inums(mp) && !xfs_has_small_inums(new_mp)) { 1866 mp->m_features &= ~XFS_FEAT_SMALL_INUMS; 1867 mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount); 1868 } 1869 1870 /* inode64 -> inode32 */ 1871 if (!xfs_has_small_inums(mp) && xfs_has_small_inums(new_mp)) { 1872 mp->m_features |= XFS_FEAT_SMALL_INUMS; 1873 mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount); 1874 } 1875 1876 /* ro -> rw */ 1877 if (xfs_is_readonly(mp) && !(flags & SB_RDONLY)) { 1878 error = xfs_remount_rw(mp); 1879 if (error) 1880 return error; 1881 } 1882 1883 /* rw -> ro */ 1884 if (!xfs_is_readonly(mp) && (flags & SB_RDONLY)) { 1885 error = xfs_remount_ro(mp); 1886 if (error) 1887 return error; 1888 } 1889 1890 return 0; 1891 } 1892 1893 static void xfs_fs_free( 1894 struct fs_context *fc) 1895 { 1896 struct xfs_mount *mp = fc->s_fs_info; 1897 1898 /* 1899 * mp is stored in the fs_context when it is initialized. 1900 * mp is transferred to the superblock on a successful mount, 1901 * but if an error occurs before the transfer we have to free 1902 * it here. 1903 */ 1904 if (mp) 1905 xfs_mount_free(mp); 1906 } 1907 1908 static const struct fs_context_operations xfs_context_ops = { 1909 .parse_param = xfs_fs_parse_param, 1910 .get_tree = xfs_fs_get_tree, 1911 .reconfigure = xfs_fs_reconfigure, 1912 .free = xfs_fs_free, 1913 }; 1914 1915 static int xfs_init_fs_context( 1916 struct fs_context *fc) 1917 { 1918 struct xfs_mount *mp; 1919 1920 mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO); 1921 if (!mp) 1922 return -ENOMEM; 1923 1924 spin_lock_init(&mp->m_sb_lock); 1925 spin_lock_init(&mp->m_agirotor_lock); 1926 INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC); 1927 spin_lock_init(&mp->m_perag_lock); 1928 mutex_init(&mp->m_growlock); 1929 INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker); 1930 INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker); 1931 mp->m_kobj.kobject.kset = xfs_kset; 1932 /* 1933 * We don't create the finobt per-ag space reservation until after log 1934 * recovery, so we must set this to true so that an ifree transaction 1935 * started during log recovery will not depend on space reservations 1936 * for finobt expansion. 1937 */ 1938 mp->m_finobt_nores = true; 1939 1940 /* 1941 * These can be overridden by the mount option parsing. 1942 */ 1943 mp->m_logbufs = -1; 1944 mp->m_logbsize = -1; 1945 mp->m_allocsize_log = 16; /* 64k */ 1946 1947 /* 1948 * Copy binary VFS mount flags we are interested in. 1949 */ 1950 if (fc->sb_flags & SB_RDONLY) 1951 set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate); 1952 if (fc->sb_flags & SB_DIRSYNC) 1953 mp->m_features |= XFS_FEAT_DIRSYNC; 1954 if (fc->sb_flags & SB_SYNCHRONOUS) 1955 mp->m_features |= XFS_FEAT_WSYNC; 1956 1957 fc->s_fs_info = mp; 1958 fc->ops = &xfs_context_ops; 1959 1960 return 0; 1961 } 1962 1963 static struct file_system_type xfs_fs_type = { 1964 .owner = THIS_MODULE, 1965 .name = "xfs", 1966 .init_fs_context = xfs_init_fs_context, 1967 .parameters = xfs_fs_parameters, 1968 .kill_sb = kill_block_super, 1969 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP, 1970 }; 1971 MODULE_ALIAS_FS("xfs"); 1972 1973 STATIC int __init 1974 xfs_init_caches(void) 1975 { 1976 int error; 1977 1978 xfs_buf_cache = kmem_cache_create("xfs_buf", sizeof(struct xfs_buf), 0, 1979 SLAB_HWCACHE_ALIGN | 1980 SLAB_RECLAIM_ACCOUNT | 1981 SLAB_MEM_SPREAD, 1982 NULL); 1983 if (!xfs_buf_cache) 1984 goto out; 1985 1986 xfs_log_ticket_cache = kmem_cache_create("xfs_log_ticket", 1987 sizeof(struct xlog_ticket), 1988 0, 0, NULL); 1989 if (!xfs_log_ticket_cache) 1990 goto out_destroy_buf_cache; 1991 1992 error = xfs_btree_init_cur_caches(); 1993 if (error) 1994 goto out_destroy_log_ticket_cache; 1995 1996 error = xfs_defer_init_item_caches(); 1997 if (error) 1998 goto out_destroy_btree_cur_cache; 1999 2000 xfs_da_state_cache = kmem_cache_create("xfs_da_state", 2001 sizeof(struct xfs_da_state), 2002 0, 0, NULL); 2003 if (!xfs_da_state_cache) 2004 goto out_destroy_defer_item_cache; 2005 2006 xfs_ifork_cache = kmem_cache_create("xfs_ifork", 2007 sizeof(struct xfs_ifork), 2008 0, 0, NULL); 2009 if (!xfs_ifork_cache) 2010 goto out_destroy_da_state_cache; 2011 2012 xfs_trans_cache = kmem_cache_create("xfs_trans", 2013 sizeof(struct xfs_trans), 2014 0, 0, NULL); 2015 if (!xfs_trans_cache) 2016 goto out_destroy_ifork_cache; 2017 2018 2019 /* 2020 * The size of the cache-allocated buf log item is the maximum 2021 * size possible under XFS. This wastes a little bit of memory, 2022 * but it is much faster. 2023 */ 2024 xfs_buf_item_cache = kmem_cache_create("xfs_buf_item", 2025 sizeof(struct xfs_buf_log_item), 2026 0, 0, NULL); 2027 if (!xfs_buf_item_cache) 2028 goto out_destroy_trans_cache; 2029 2030 xfs_efd_cache = kmem_cache_create("xfs_efd_item", 2031 (sizeof(struct xfs_efd_log_item) + 2032 (XFS_EFD_MAX_FAST_EXTENTS - 1) * 2033 sizeof(struct xfs_extent)), 2034 0, 0, NULL); 2035 if (!xfs_efd_cache) 2036 goto out_destroy_buf_item_cache; 2037 2038 xfs_efi_cache = kmem_cache_create("xfs_efi_item", 2039 (sizeof(struct xfs_efi_log_item) + 2040 (XFS_EFI_MAX_FAST_EXTENTS - 1) * 2041 sizeof(struct xfs_extent)), 2042 0, 0, NULL); 2043 if (!xfs_efi_cache) 2044 goto out_destroy_efd_cache; 2045 2046 xfs_inode_cache = kmem_cache_create("xfs_inode", 2047 sizeof(struct xfs_inode), 0, 2048 (SLAB_HWCACHE_ALIGN | 2049 SLAB_RECLAIM_ACCOUNT | 2050 SLAB_MEM_SPREAD | SLAB_ACCOUNT), 2051 xfs_fs_inode_init_once); 2052 if (!xfs_inode_cache) 2053 goto out_destroy_efi_cache; 2054 2055 xfs_ili_cache = kmem_cache_create("xfs_ili", 2056 sizeof(struct xfs_inode_log_item), 0, 2057 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, 2058 NULL); 2059 if (!xfs_ili_cache) 2060 goto out_destroy_inode_cache; 2061 2062 xfs_icreate_cache = kmem_cache_create("xfs_icr", 2063 sizeof(struct xfs_icreate_item), 2064 0, 0, NULL); 2065 if (!xfs_icreate_cache) 2066 goto out_destroy_ili_cache; 2067 2068 xfs_rud_cache = kmem_cache_create("xfs_rud_item", 2069 sizeof(struct xfs_rud_log_item), 2070 0, 0, NULL); 2071 if (!xfs_rud_cache) 2072 goto out_destroy_icreate_cache; 2073 2074 xfs_rui_cache = kmem_cache_create("xfs_rui_item", 2075 xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS), 2076 0, 0, NULL); 2077 if (!xfs_rui_cache) 2078 goto out_destroy_rud_cache; 2079 2080 xfs_cud_cache = kmem_cache_create("xfs_cud_item", 2081 sizeof(struct xfs_cud_log_item), 2082 0, 0, NULL); 2083 if (!xfs_cud_cache) 2084 goto out_destroy_rui_cache; 2085 2086 xfs_cui_cache = kmem_cache_create("xfs_cui_item", 2087 xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS), 2088 0, 0, NULL); 2089 if (!xfs_cui_cache) 2090 goto out_destroy_cud_cache; 2091 2092 xfs_bud_cache = kmem_cache_create("xfs_bud_item", 2093 sizeof(struct xfs_bud_log_item), 2094 0, 0, NULL); 2095 if (!xfs_bud_cache) 2096 goto out_destroy_cui_cache; 2097 2098 xfs_bui_cache = kmem_cache_create("xfs_bui_item", 2099 xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS), 2100 0, 0, NULL); 2101 if (!xfs_bui_cache) 2102 goto out_destroy_bud_cache; 2103 2104 xfs_attrd_cache = kmem_cache_create("xfs_attrd_item", 2105 sizeof(struct xfs_attrd_log_item), 2106 0, 0, NULL); 2107 if (!xfs_attrd_cache) 2108 goto out_destroy_bui_cache; 2109 2110 xfs_attri_cache = kmem_cache_create("xfs_attri_item", 2111 sizeof(struct xfs_attri_log_item), 2112 0, 0, NULL); 2113 if (!xfs_attri_cache) 2114 goto out_destroy_attrd_cache; 2115 2116 xfs_iunlink_cache = kmem_cache_create("xfs_iul_item", 2117 sizeof(struct xfs_iunlink_item), 2118 0, 0, NULL); 2119 if (!xfs_iunlink_cache) 2120 goto out_destroy_attri_cache; 2121 2122 return 0; 2123 2124 out_destroy_attri_cache: 2125 kmem_cache_destroy(xfs_attri_cache); 2126 out_destroy_attrd_cache: 2127 kmem_cache_destroy(xfs_attrd_cache); 2128 out_destroy_bui_cache: 2129 kmem_cache_destroy(xfs_bui_cache); 2130 out_destroy_bud_cache: 2131 kmem_cache_destroy(xfs_bud_cache); 2132 out_destroy_cui_cache: 2133 kmem_cache_destroy(xfs_cui_cache); 2134 out_destroy_cud_cache: 2135 kmem_cache_destroy(xfs_cud_cache); 2136 out_destroy_rui_cache: 2137 kmem_cache_destroy(xfs_rui_cache); 2138 out_destroy_rud_cache: 2139 kmem_cache_destroy(xfs_rud_cache); 2140 out_destroy_icreate_cache: 2141 kmem_cache_destroy(xfs_icreate_cache); 2142 out_destroy_ili_cache: 2143 kmem_cache_destroy(xfs_ili_cache); 2144 out_destroy_inode_cache: 2145 kmem_cache_destroy(xfs_inode_cache); 2146 out_destroy_efi_cache: 2147 kmem_cache_destroy(xfs_efi_cache); 2148 out_destroy_efd_cache: 2149 kmem_cache_destroy(xfs_efd_cache); 2150 out_destroy_buf_item_cache: 2151 kmem_cache_destroy(xfs_buf_item_cache); 2152 out_destroy_trans_cache: 2153 kmem_cache_destroy(xfs_trans_cache); 2154 out_destroy_ifork_cache: 2155 kmem_cache_destroy(xfs_ifork_cache); 2156 out_destroy_da_state_cache: 2157 kmem_cache_destroy(xfs_da_state_cache); 2158 out_destroy_defer_item_cache: 2159 xfs_defer_destroy_item_caches(); 2160 out_destroy_btree_cur_cache: 2161 xfs_btree_destroy_cur_caches(); 2162 out_destroy_log_ticket_cache: 2163 kmem_cache_destroy(xfs_log_ticket_cache); 2164 out_destroy_buf_cache: 2165 kmem_cache_destroy(xfs_buf_cache); 2166 out: 2167 return -ENOMEM; 2168 } 2169 2170 STATIC void 2171 xfs_destroy_caches(void) 2172 { 2173 /* 2174 * Make sure all delayed rcu free are flushed before we 2175 * destroy caches. 2176 */ 2177 rcu_barrier(); 2178 kmem_cache_destroy(xfs_iunlink_cache); 2179 kmem_cache_destroy(xfs_attri_cache); 2180 kmem_cache_destroy(xfs_attrd_cache); 2181 kmem_cache_destroy(xfs_bui_cache); 2182 kmem_cache_destroy(xfs_bud_cache); 2183 kmem_cache_destroy(xfs_cui_cache); 2184 kmem_cache_destroy(xfs_cud_cache); 2185 kmem_cache_destroy(xfs_rui_cache); 2186 kmem_cache_destroy(xfs_rud_cache); 2187 kmem_cache_destroy(xfs_icreate_cache); 2188 kmem_cache_destroy(xfs_ili_cache); 2189 kmem_cache_destroy(xfs_inode_cache); 2190 kmem_cache_destroy(xfs_efi_cache); 2191 kmem_cache_destroy(xfs_efd_cache); 2192 kmem_cache_destroy(xfs_buf_item_cache); 2193 kmem_cache_destroy(xfs_trans_cache); 2194 kmem_cache_destroy(xfs_ifork_cache); 2195 kmem_cache_destroy(xfs_da_state_cache); 2196 xfs_defer_destroy_item_caches(); 2197 xfs_btree_destroy_cur_caches(); 2198 kmem_cache_destroy(xfs_log_ticket_cache); 2199 kmem_cache_destroy(xfs_buf_cache); 2200 } 2201 2202 STATIC int __init 2203 xfs_init_workqueues(void) 2204 { 2205 /* 2206 * The allocation workqueue can be used in memory reclaim situations 2207 * (writepage path), and parallelism is only limited by the number of 2208 * AGs in all the filesystems mounted. Hence use the default large 2209 * max_active value for this workqueue. 2210 */ 2211 xfs_alloc_wq = alloc_workqueue("xfsalloc", 2212 XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0); 2213 if (!xfs_alloc_wq) 2214 return -ENOMEM; 2215 2216 xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND), 2217 0); 2218 if (!xfs_discard_wq) 2219 goto out_free_alloc_wq; 2220 2221 return 0; 2222 out_free_alloc_wq: 2223 destroy_workqueue(xfs_alloc_wq); 2224 return -ENOMEM; 2225 } 2226 2227 STATIC void 2228 xfs_destroy_workqueues(void) 2229 { 2230 destroy_workqueue(xfs_discard_wq); 2231 destroy_workqueue(xfs_alloc_wq); 2232 } 2233 2234 #ifdef CONFIG_HOTPLUG_CPU 2235 static int 2236 xfs_cpu_dead( 2237 unsigned int cpu) 2238 { 2239 struct xfs_mount *mp, *n; 2240 2241 spin_lock(&xfs_mount_list_lock); 2242 list_for_each_entry_safe(mp, n, &xfs_mount_list, m_mount_list) { 2243 spin_unlock(&xfs_mount_list_lock); 2244 xfs_inodegc_cpu_dead(mp, cpu); 2245 xlog_cil_pcp_dead(mp->m_log, cpu); 2246 spin_lock(&xfs_mount_list_lock); 2247 } 2248 spin_unlock(&xfs_mount_list_lock); 2249 return 0; 2250 } 2251 2252 static int __init 2253 xfs_cpu_hotplug_init(void) 2254 { 2255 int error; 2256 2257 error = cpuhp_setup_state_nocalls(CPUHP_XFS_DEAD, "xfs:dead", NULL, 2258 xfs_cpu_dead); 2259 if (error < 0) 2260 xfs_alert(NULL, 2261 "Failed to initialise CPU hotplug, error %d. XFS is non-functional.", 2262 error); 2263 return error; 2264 } 2265 2266 static void 2267 xfs_cpu_hotplug_destroy(void) 2268 { 2269 cpuhp_remove_state_nocalls(CPUHP_XFS_DEAD); 2270 } 2271 2272 #else /* !CONFIG_HOTPLUG_CPU */ 2273 static inline int xfs_cpu_hotplug_init(void) { return 0; } 2274 static inline void xfs_cpu_hotplug_destroy(void) {} 2275 #endif 2276 2277 STATIC int __init 2278 init_xfs_fs(void) 2279 { 2280 int error; 2281 2282 xfs_check_ondisk_structs(); 2283 2284 printk(KERN_INFO XFS_VERSION_STRING " with " 2285 XFS_BUILD_OPTIONS " enabled\n"); 2286 2287 xfs_dir_startup(); 2288 2289 error = xfs_cpu_hotplug_init(); 2290 if (error) 2291 goto out; 2292 2293 error = xfs_init_caches(); 2294 if (error) 2295 goto out_destroy_hp; 2296 2297 error = xfs_init_workqueues(); 2298 if (error) 2299 goto out_destroy_caches; 2300 2301 error = xfs_mru_cache_init(); 2302 if (error) 2303 goto out_destroy_wq; 2304 2305 error = xfs_init_procfs(); 2306 if (error) 2307 goto out_mru_cache_uninit; 2308 2309 error = xfs_sysctl_register(); 2310 if (error) 2311 goto out_cleanup_procfs; 2312 2313 xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj); 2314 if (!xfs_kset) { 2315 error = -ENOMEM; 2316 goto out_sysctl_unregister; 2317 } 2318 2319 xfsstats.xs_kobj.kobject.kset = xfs_kset; 2320 2321 xfsstats.xs_stats = alloc_percpu(struct xfsstats); 2322 if (!xfsstats.xs_stats) { 2323 error = -ENOMEM; 2324 goto out_kset_unregister; 2325 } 2326 2327 error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL, 2328 "stats"); 2329 if (error) 2330 goto out_free_stats; 2331 2332 #ifdef DEBUG 2333 xfs_dbg_kobj.kobject.kset = xfs_kset; 2334 error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug"); 2335 if (error) 2336 goto out_remove_stats_kobj; 2337 #endif 2338 2339 error = xfs_qm_init(); 2340 if (error) 2341 goto out_remove_dbg_kobj; 2342 2343 error = register_filesystem(&xfs_fs_type); 2344 if (error) 2345 goto out_qm_exit; 2346 return 0; 2347 2348 out_qm_exit: 2349 xfs_qm_exit(); 2350 out_remove_dbg_kobj: 2351 #ifdef DEBUG 2352 xfs_sysfs_del(&xfs_dbg_kobj); 2353 out_remove_stats_kobj: 2354 #endif 2355 xfs_sysfs_del(&xfsstats.xs_kobj); 2356 out_free_stats: 2357 free_percpu(xfsstats.xs_stats); 2358 out_kset_unregister: 2359 kset_unregister(xfs_kset); 2360 out_sysctl_unregister: 2361 xfs_sysctl_unregister(); 2362 out_cleanup_procfs: 2363 xfs_cleanup_procfs(); 2364 out_mru_cache_uninit: 2365 xfs_mru_cache_uninit(); 2366 out_destroy_wq: 2367 xfs_destroy_workqueues(); 2368 out_destroy_caches: 2369 xfs_destroy_caches(); 2370 out_destroy_hp: 2371 xfs_cpu_hotplug_destroy(); 2372 out: 2373 return error; 2374 } 2375 2376 STATIC void __exit 2377 exit_xfs_fs(void) 2378 { 2379 xfs_qm_exit(); 2380 unregister_filesystem(&xfs_fs_type); 2381 #ifdef DEBUG 2382 xfs_sysfs_del(&xfs_dbg_kobj); 2383 #endif 2384 xfs_sysfs_del(&xfsstats.xs_kobj); 2385 free_percpu(xfsstats.xs_stats); 2386 kset_unregister(xfs_kset); 2387 xfs_sysctl_unregister(); 2388 xfs_cleanup_procfs(); 2389 xfs_mru_cache_uninit(); 2390 xfs_destroy_workqueues(); 2391 xfs_destroy_caches(); 2392 xfs_uuid_table_free(); 2393 xfs_cpu_hotplug_destroy(); 2394 } 2395 2396 module_init(init_xfs_fs); 2397 module_exit(exit_xfs_fs); 2398 2399 MODULE_AUTHOR("Silicon Graphics, Inc."); 2400 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled"); 2401 MODULE_LICENSE("GPL"); 2402