1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 7 #include "xfs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_sb.h" 13 #include "xfs_mount.h" 14 #include "xfs_inode.h" 15 #include "xfs_btree.h" 16 #include "xfs_bmap.h" 17 #include "xfs_alloc.h" 18 #include "xfs_fsops.h" 19 #include "xfs_trans.h" 20 #include "xfs_buf_item.h" 21 #include "xfs_log.h" 22 #include "xfs_log_priv.h" 23 #include "xfs_dir2.h" 24 #include "xfs_extfree_item.h" 25 #include "xfs_mru_cache.h" 26 #include "xfs_inode_item.h" 27 #include "xfs_icache.h" 28 #include "xfs_trace.h" 29 #include "xfs_icreate_item.h" 30 #include "xfs_filestream.h" 31 #include "xfs_quota.h" 32 #include "xfs_sysfs.h" 33 #include "xfs_ondisk.h" 34 #include "xfs_rmap_item.h" 35 #include "xfs_refcount_item.h" 36 #include "xfs_bmap_item.h" 37 #include "xfs_reflink.h" 38 #include "xfs_pwork.h" 39 #include "xfs_ag.h" 40 #include "xfs_defer.h" 41 #include "xfs_attr_item.h" 42 #include "xfs_xattr.h" 43 #include "xfs_iunlink_item.h" 44 45 #include <linux/magic.h> 46 #include <linux/fs_context.h> 47 #include <linux/fs_parser.h> 48 49 static const struct super_operations xfs_super_operations; 50 51 static struct kset *xfs_kset; /* top-level xfs sysfs dir */ 52 #ifdef DEBUG 53 static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */ 54 #endif 55 56 #ifdef CONFIG_HOTPLUG_CPU 57 static LIST_HEAD(xfs_mount_list); 58 static DEFINE_SPINLOCK(xfs_mount_list_lock); 59 60 static inline void xfs_mount_list_add(struct xfs_mount *mp) 61 { 62 spin_lock(&xfs_mount_list_lock); 63 list_add(&mp->m_mount_list, &xfs_mount_list); 64 spin_unlock(&xfs_mount_list_lock); 65 } 66 67 static inline void xfs_mount_list_del(struct xfs_mount *mp) 68 { 69 spin_lock(&xfs_mount_list_lock); 70 list_del(&mp->m_mount_list); 71 spin_unlock(&xfs_mount_list_lock); 72 } 73 #else /* !CONFIG_HOTPLUG_CPU */ 74 static inline void xfs_mount_list_add(struct xfs_mount *mp) {} 75 static inline void xfs_mount_list_del(struct xfs_mount *mp) {} 76 #endif 77 78 enum xfs_dax_mode { 79 XFS_DAX_INODE = 0, 80 XFS_DAX_ALWAYS = 1, 81 XFS_DAX_NEVER = 2, 82 }; 83 84 static void 85 xfs_mount_set_dax_mode( 86 struct xfs_mount *mp, 87 enum xfs_dax_mode mode) 88 { 89 switch (mode) { 90 case XFS_DAX_INODE: 91 mp->m_features &= ~(XFS_FEAT_DAX_ALWAYS | XFS_FEAT_DAX_NEVER); 92 break; 93 case XFS_DAX_ALWAYS: 94 mp->m_features |= XFS_FEAT_DAX_ALWAYS; 95 mp->m_features &= ~XFS_FEAT_DAX_NEVER; 96 break; 97 case XFS_DAX_NEVER: 98 mp->m_features |= XFS_FEAT_DAX_NEVER; 99 mp->m_features &= ~XFS_FEAT_DAX_ALWAYS; 100 break; 101 } 102 } 103 104 static const struct constant_table dax_param_enums[] = { 105 {"inode", XFS_DAX_INODE }, 106 {"always", XFS_DAX_ALWAYS }, 107 {"never", XFS_DAX_NEVER }, 108 {} 109 }; 110 111 /* 112 * Table driven mount option parser. 113 */ 114 enum { 115 Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev, 116 Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid, 117 Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups, 118 Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep, 119 Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2, 120 Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota, 121 Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota, 122 Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce, 123 Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum, 124 }; 125 126 static const struct fs_parameter_spec xfs_fs_parameters[] = { 127 fsparam_u32("logbufs", Opt_logbufs), 128 fsparam_string("logbsize", Opt_logbsize), 129 fsparam_string("logdev", Opt_logdev), 130 fsparam_string("rtdev", Opt_rtdev), 131 fsparam_flag("wsync", Opt_wsync), 132 fsparam_flag("noalign", Opt_noalign), 133 fsparam_flag("swalloc", Opt_swalloc), 134 fsparam_u32("sunit", Opt_sunit), 135 fsparam_u32("swidth", Opt_swidth), 136 fsparam_flag("nouuid", Opt_nouuid), 137 fsparam_flag("grpid", Opt_grpid), 138 fsparam_flag("nogrpid", Opt_nogrpid), 139 fsparam_flag("bsdgroups", Opt_bsdgroups), 140 fsparam_flag("sysvgroups", Opt_sysvgroups), 141 fsparam_string("allocsize", Opt_allocsize), 142 fsparam_flag("norecovery", Opt_norecovery), 143 fsparam_flag("inode64", Opt_inode64), 144 fsparam_flag("inode32", Opt_inode32), 145 fsparam_flag("ikeep", Opt_ikeep), 146 fsparam_flag("noikeep", Opt_noikeep), 147 fsparam_flag("largeio", Opt_largeio), 148 fsparam_flag("nolargeio", Opt_nolargeio), 149 fsparam_flag("attr2", Opt_attr2), 150 fsparam_flag("noattr2", Opt_noattr2), 151 fsparam_flag("filestreams", Opt_filestreams), 152 fsparam_flag("quota", Opt_quota), 153 fsparam_flag("noquota", Opt_noquota), 154 fsparam_flag("usrquota", Opt_usrquota), 155 fsparam_flag("grpquota", Opt_grpquota), 156 fsparam_flag("prjquota", Opt_prjquota), 157 fsparam_flag("uquota", Opt_uquota), 158 fsparam_flag("gquota", Opt_gquota), 159 fsparam_flag("pquota", Opt_pquota), 160 fsparam_flag("uqnoenforce", Opt_uqnoenforce), 161 fsparam_flag("gqnoenforce", Opt_gqnoenforce), 162 fsparam_flag("pqnoenforce", Opt_pqnoenforce), 163 fsparam_flag("qnoenforce", Opt_qnoenforce), 164 fsparam_flag("discard", Opt_discard), 165 fsparam_flag("nodiscard", Opt_nodiscard), 166 fsparam_flag("dax", Opt_dax), 167 fsparam_enum("dax", Opt_dax_enum, dax_param_enums), 168 {} 169 }; 170 171 struct proc_xfs_info { 172 uint64_t flag; 173 char *str; 174 }; 175 176 static int 177 xfs_fs_show_options( 178 struct seq_file *m, 179 struct dentry *root) 180 { 181 static struct proc_xfs_info xfs_info_set[] = { 182 /* the few simple ones we can get from the mount struct */ 183 { XFS_FEAT_IKEEP, ",ikeep" }, 184 { XFS_FEAT_WSYNC, ",wsync" }, 185 { XFS_FEAT_NOALIGN, ",noalign" }, 186 { XFS_FEAT_SWALLOC, ",swalloc" }, 187 { XFS_FEAT_NOUUID, ",nouuid" }, 188 { XFS_FEAT_NORECOVERY, ",norecovery" }, 189 { XFS_FEAT_ATTR2, ",attr2" }, 190 { XFS_FEAT_FILESTREAMS, ",filestreams" }, 191 { XFS_FEAT_GRPID, ",grpid" }, 192 { XFS_FEAT_DISCARD, ",discard" }, 193 { XFS_FEAT_LARGE_IOSIZE, ",largeio" }, 194 { XFS_FEAT_DAX_ALWAYS, ",dax=always" }, 195 { XFS_FEAT_DAX_NEVER, ",dax=never" }, 196 { 0, NULL } 197 }; 198 struct xfs_mount *mp = XFS_M(root->d_sb); 199 struct proc_xfs_info *xfs_infop; 200 201 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) { 202 if (mp->m_features & xfs_infop->flag) 203 seq_puts(m, xfs_infop->str); 204 } 205 206 seq_printf(m, ",inode%d", xfs_has_small_inums(mp) ? 32 : 64); 207 208 if (xfs_has_allocsize(mp)) 209 seq_printf(m, ",allocsize=%dk", 210 (1 << mp->m_allocsize_log) >> 10); 211 212 if (mp->m_logbufs > 0) 213 seq_printf(m, ",logbufs=%d", mp->m_logbufs); 214 if (mp->m_logbsize > 0) 215 seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10); 216 217 if (mp->m_logname) 218 seq_show_option(m, "logdev", mp->m_logname); 219 if (mp->m_rtname) 220 seq_show_option(m, "rtdev", mp->m_rtname); 221 222 if (mp->m_dalign > 0) 223 seq_printf(m, ",sunit=%d", 224 (int)XFS_FSB_TO_BB(mp, mp->m_dalign)); 225 if (mp->m_swidth > 0) 226 seq_printf(m, ",swidth=%d", 227 (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); 228 229 if (mp->m_qflags & XFS_UQUOTA_ENFD) 230 seq_puts(m, ",usrquota"); 231 else if (mp->m_qflags & XFS_UQUOTA_ACCT) 232 seq_puts(m, ",uqnoenforce"); 233 234 if (mp->m_qflags & XFS_PQUOTA_ENFD) 235 seq_puts(m, ",prjquota"); 236 else if (mp->m_qflags & XFS_PQUOTA_ACCT) 237 seq_puts(m, ",pqnoenforce"); 238 239 if (mp->m_qflags & XFS_GQUOTA_ENFD) 240 seq_puts(m, ",grpquota"); 241 else if (mp->m_qflags & XFS_GQUOTA_ACCT) 242 seq_puts(m, ",gqnoenforce"); 243 244 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT)) 245 seq_puts(m, ",noquota"); 246 247 return 0; 248 } 249 250 static bool 251 xfs_set_inode_alloc_perag( 252 struct xfs_perag *pag, 253 xfs_ino_t ino, 254 xfs_agnumber_t max_metadata) 255 { 256 if (!xfs_is_inode32(pag->pag_mount)) { 257 set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate); 258 clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate); 259 return false; 260 } 261 262 if (ino > XFS_MAXINUMBER_32) { 263 clear_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate); 264 clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate); 265 return false; 266 } 267 268 set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate); 269 if (pag->pag_agno < max_metadata) 270 set_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate); 271 else 272 clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate); 273 return true; 274 } 275 276 /* 277 * Set parameters for inode allocation heuristics, taking into account 278 * filesystem size and inode32/inode64 mount options; i.e. specifically 279 * whether or not XFS_FEAT_SMALL_INUMS is set. 280 * 281 * Inode allocation patterns are altered only if inode32 is requested 282 * (XFS_FEAT_SMALL_INUMS), and the filesystem is sufficiently large. 283 * If altered, XFS_OPSTATE_INODE32 is set as well. 284 * 285 * An agcount independent of that in the mount structure is provided 286 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated 287 * to the potentially higher ag count. 288 * 289 * Returns the maximum AG index which may contain inodes. 290 */ 291 xfs_agnumber_t 292 xfs_set_inode_alloc( 293 struct xfs_mount *mp, 294 xfs_agnumber_t agcount) 295 { 296 xfs_agnumber_t index; 297 xfs_agnumber_t maxagi = 0; 298 xfs_sb_t *sbp = &mp->m_sb; 299 xfs_agnumber_t max_metadata; 300 xfs_agino_t agino; 301 xfs_ino_t ino; 302 303 /* 304 * Calculate how much should be reserved for inodes to meet 305 * the max inode percentage. Used only for inode32. 306 */ 307 if (M_IGEO(mp)->maxicount) { 308 uint64_t icount; 309 310 icount = sbp->sb_dblocks * sbp->sb_imax_pct; 311 do_div(icount, 100); 312 icount += sbp->sb_agblocks - 1; 313 do_div(icount, sbp->sb_agblocks); 314 max_metadata = icount; 315 } else { 316 max_metadata = agcount; 317 } 318 319 /* Get the last possible inode in the filesystem */ 320 agino = XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1); 321 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino); 322 323 /* 324 * If user asked for no more than 32-bit inodes, and the fs is 325 * sufficiently large, set XFS_OPSTATE_INODE32 if we must alter 326 * the allocator to accommodate the request. 327 */ 328 if (xfs_has_small_inums(mp) && ino > XFS_MAXINUMBER_32) 329 set_bit(XFS_OPSTATE_INODE32, &mp->m_opstate); 330 else 331 clear_bit(XFS_OPSTATE_INODE32, &mp->m_opstate); 332 333 for (index = 0; index < agcount; index++) { 334 struct xfs_perag *pag; 335 336 ino = XFS_AGINO_TO_INO(mp, index, agino); 337 338 pag = xfs_perag_get(mp, index); 339 if (xfs_set_inode_alloc_perag(pag, ino, max_metadata)) 340 maxagi++; 341 xfs_perag_put(pag); 342 } 343 344 return xfs_is_inode32(mp) ? maxagi : agcount; 345 } 346 347 static int 348 xfs_setup_dax_always( 349 struct xfs_mount *mp) 350 { 351 if (!mp->m_ddev_targp->bt_daxdev && 352 (!mp->m_rtdev_targp || !mp->m_rtdev_targp->bt_daxdev)) { 353 xfs_alert(mp, 354 "DAX unsupported by block device. Turning off DAX."); 355 goto disable_dax; 356 } 357 358 if (mp->m_super->s_blocksize != PAGE_SIZE) { 359 xfs_alert(mp, 360 "DAX not supported for blocksize. Turning off DAX."); 361 goto disable_dax; 362 } 363 364 if (xfs_has_reflink(mp) && 365 bdev_is_partition(mp->m_ddev_targp->bt_bdev)) { 366 xfs_alert(mp, 367 "DAX and reflink cannot work with multi-partitions!"); 368 return -EINVAL; 369 } 370 371 xfs_warn(mp, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); 372 return 0; 373 374 disable_dax: 375 xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER); 376 return 0; 377 } 378 379 STATIC int 380 xfs_blkdev_get( 381 xfs_mount_t *mp, 382 const char *name, 383 struct block_device **bdevp) 384 { 385 int error = 0; 386 387 *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL, 388 mp); 389 if (IS_ERR(*bdevp)) { 390 error = PTR_ERR(*bdevp); 391 xfs_warn(mp, "Invalid device [%s], error=%d", name, error); 392 } 393 394 return error; 395 } 396 397 STATIC void 398 xfs_blkdev_put( 399 struct block_device *bdev) 400 { 401 if (bdev) 402 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 403 } 404 405 STATIC void 406 xfs_close_devices( 407 struct xfs_mount *mp) 408 { 409 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 410 struct block_device *logdev = mp->m_logdev_targp->bt_bdev; 411 412 xfs_free_buftarg(mp->m_logdev_targp); 413 xfs_blkdev_put(logdev); 414 } 415 if (mp->m_rtdev_targp) { 416 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev; 417 418 xfs_free_buftarg(mp->m_rtdev_targp); 419 xfs_blkdev_put(rtdev); 420 } 421 xfs_free_buftarg(mp->m_ddev_targp); 422 } 423 424 /* 425 * The file system configurations are: 426 * (1) device (partition) with data and internal log 427 * (2) logical volume with data and log subvolumes. 428 * (3) logical volume with data, log, and realtime subvolumes. 429 * 430 * We only have to handle opening the log and realtime volumes here if 431 * they are present. The data subvolume has already been opened by 432 * get_sb_bdev() and is stored in sb->s_bdev. 433 */ 434 STATIC int 435 xfs_open_devices( 436 struct xfs_mount *mp) 437 { 438 struct block_device *ddev = mp->m_super->s_bdev; 439 struct block_device *logdev = NULL, *rtdev = NULL; 440 int error; 441 442 /* 443 * Open real time and log devices - order is important. 444 */ 445 if (mp->m_logname) { 446 error = xfs_blkdev_get(mp, mp->m_logname, &logdev); 447 if (error) 448 return error; 449 } 450 451 if (mp->m_rtname) { 452 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev); 453 if (error) 454 goto out_close_logdev; 455 456 if (rtdev == ddev || rtdev == logdev) { 457 xfs_warn(mp, 458 "Cannot mount filesystem with identical rtdev and ddev/logdev."); 459 error = -EINVAL; 460 goto out_close_rtdev; 461 } 462 } 463 464 /* 465 * Setup xfs_mount buffer target pointers 466 */ 467 error = -ENOMEM; 468 mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev); 469 if (!mp->m_ddev_targp) 470 goto out_close_rtdev; 471 472 if (rtdev) { 473 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev); 474 if (!mp->m_rtdev_targp) 475 goto out_free_ddev_targ; 476 } 477 478 if (logdev && logdev != ddev) { 479 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev); 480 if (!mp->m_logdev_targp) 481 goto out_free_rtdev_targ; 482 } else { 483 mp->m_logdev_targp = mp->m_ddev_targp; 484 } 485 486 return 0; 487 488 out_free_rtdev_targ: 489 if (mp->m_rtdev_targp) 490 xfs_free_buftarg(mp->m_rtdev_targp); 491 out_free_ddev_targ: 492 xfs_free_buftarg(mp->m_ddev_targp); 493 out_close_rtdev: 494 xfs_blkdev_put(rtdev); 495 out_close_logdev: 496 if (logdev && logdev != ddev) 497 xfs_blkdev_put(logdev); 498 return error; 499 } 500 501 /* 502 * Setup xfs_mount buffer target pointers based on superblock 503 */ 504 STATIC int 505 xfs_setup_devices( 506 struct xfs_mount *mp) 507 { 508 int error; 509 510 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize); 511 if (error) 512 return error; 513 514 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 515 unsigned int log_sector_size = BBSIZE; 516 517 if (xfs_has_sector(mp)) 518 log_sector_size = mp->m_sb.sb_logsectsize; 519 error = xfs_setsize_buftarg(mp->m_logdev_targp, 520 log_sector_size); 521 if (error) 522 return error; 523 } 524 if (mp->m_rtdev_targp) { 525 error = xfs_setsize_buftarg(mp->m_rtdev_targp, 526 mp->m_sb.sb_sectsize); 527 if (error) 528 return error; 529 } 530 531 return 0; 532 } 533 534 STATIC int 535 xfs_init_mount_workqueues( 536 struct xfs_mount *mp) 537 { 538 mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s", 539 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), 540 1, mp->m_super->s_id); 541 if (!mp->m_buf_workqueue) 542 goto out; 543 544 mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s", 545 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), 546 0, mp->m_super->s_id); 547 if (!mp->m_unwritten_workqueue) 548 goto out_destroy_buf; 549 550 mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s", 551 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), 552 0, mp->m_super->s_id); 553 if (!mp->m_reclaim_workqueue) 554 goto out_destroy_unwritten; 555 556 mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s", 557 XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM), 558 0, mp->m_super->s_id); 559 if (!mp->m_blockgc_wq) 560 goto out_destroy_reclaim; 561 562 mp->m_inodegc_wq = alloc_workqueue("xfs-inodegc/%s", 563 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), 564 1, mp->m_super->s_id); 565 if (!mp->m_inodegc_wq) 566 goto out_destroy_blockgc; 567 568 mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", 569 XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id); 570 if (!mp->m_sync_workqueue) 571 goto out_destroy_inodegc; 572 573 return 0; 574 575 out_destroy_inodegc: 576 destroy_workqueue(mp->m_inodegc_wq); 577 out_destroy_blockgc: 578 destroy_workqueue(mp->m_blockgc_wq); 579 out_destroy_reclaim: 580 destroy_workqueue(mp->m_reclaim_workqueue); 581 out_destroy_unwritten: 582 destroy_workqueue(mp->m_unwritten_workqueue); 583 out_destroy_buf: 584 destroy_workqueue(mp->m_buf_workqueue); 585 out: 586 return -ENOMEM; 587 } 588 589 STATIC void 590 xfs_destroy_mount_workqueues( 591 struct xfs_mount *mp) 592 { 593 destroy_workqueue(mp->m_sync_workqueue); 594 destroy_workqueue(mp->m_blockgc_wq); 595 destroy_workqueue(mp->m_inodegc_wq); 596 destroy_workqueue(mp->m_reclaim_workqueue); 597 destroy_workqueue(mp->m_unwritten_workqueue); 598 destroy_workqueue(mp->m_buf_workqueue); 599 } 600 601 static void 602 xfs_flush_inodes_worker( 603 struct work_struct *work) 604 { 605 struct xfs_mount *mp = container_of(work, struct xfs_mount, 606 m_flush_inodes_work); 607 struct super_block *sb = mp->m_super; 608 609 if (down_read_trylock(&sb->s_umount)) { 610 sync_inodes_sb(sb); 611 up_read(&sb->s_umount); 612 } 613 } 614 615 /* 616 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK 617 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting 618 * for IO to complete so that we effectively throttle multiple callers to the 619 * rate at which IO is completing. 620 */ 621 void 622 xfs_flush_inodes( 623 struct xfs_mount *mp) 624 { 625 /* 626 * If flush_work() returns true then that means we waited for a flush 627 * which was already in progress. Don't bother running another scan. 628 */ 629 if (flush_work(&mp->m_flush_inodes_work)) 630 return; 631 632 queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work); 633 flush_work(&mp->m_flush_inodes_work); 634 } 635 636 /* Catch misguided souls that try to use this interface on XFS */ 637 STATIC struct inode * 638 xfs_fs_alloc_inode( 639 struct super_block *sb) 640 { 641 BUG(); 642 return NULL; 643 } 644 645 /* 646 * Now that the generic code is guaranteed not to be accessing 647 * the linux inode, we can inactivate and reclaim the inode. 648 */ 649 STATIC void 650 xfs_fs_destroy_inode( 651 struct inode *inode) 652 { 653 struct xfs_inode *ip = XFS_I(inode); 654 655 trace_xfs_destroy_inode(ip); 656 657 ASSERT(!rwsem_is_locked(&inode->i_rwsem)); 658 XFS_STATS_INC(ip->i_mount, vn_rele); 659 XFS_STATS_INC(ip->i_mount, vn_remove); 660 xfs_inode_mark_reclaimable(ip); 661 } 662 663 static void 664 xfs_fs_dirty_inode( 665 struct inode *inode, 666 int flags) 667 { 668 struct xfs_inode *ip = XFS_I(inode); 669 struct xfs_mount *mp = ip->i_mount; 670 struct xfs_trans *tp; 671 672 if (!(inode->i_sb->s_flags & SB_LAZYTIME)) 673 return; 674 675 /* 676 * Only do the timestamp update if the inode is dirty (I_DIRTY_SYNC) 677 * and has dirty timestamp (I_DIRTY_TIME). I_DIRTY_TIME can be passed 678 * in flags possibly together with I_DIRTY_SYNC. 679 */ 680 if ((flags & ~I_DIRTY_TIME) != I_DIRTY_SYNC || !(flags & I_DIRTY_TIME)) 681 return; 682 683 if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp)) 684 return; 685 xfs_ilock(ip, XFS_ILOCK_EXCL); 686 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 687 xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP); 688 xfs_trans_commit(tp); 689 } 690 691 /* 692 * Slab object creation initialisation for the XFS inode. 693 * This covers only the idempotent fields in the XFS inode; 694 * all other fields need to be initialised on allocation 695 * from the slab. This avoids the need to repeatedly initialise 696 * fields in the xfs inode that left in the initialise state 697 * when freeing the inode. 698 */ 699 STATIC void 700 xfs_fs_inode_init_once( 701 void *inode) 702 { 703 struct xfs_inode *ip = inode; 704 705 memset(ip, 0, sizeof(struct xfs_inode)); 706 707 /* vfs inode */ 708 inode_init_once(VFS_I(ip)); 709 710 /* xfs inode */ 711 atomic_set(&ip->i_pincount, 0); 712 spin_lock_init(&ip->i_flags_lock); 713 714 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, 715 "xfsino", ip->i_ino); 716 } 717 718 /* 719 * We do an unlocked check for XFS_IDONTCACHE here because we are already 720 * serialised against cache hits here via the inode->i_lock and igrab() in 721 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be 722 * racing with us, and it avoids needing to grab a spinlock here for every inode 723 * we drop the final reference on. 724 */ 725 STATIC int 726 xfs_fs_drop_inode( 727 struct inode *inode) 728 { 729 struct xfs_inode *ip = XFS_I(inode); 730 731 /* 732 * If this unlinked inode is in the middle of recovery, don't 733 * drop the inode just yet; log recovery will take care of 734 * that. See the comment for this inode flag. 735 */ 736 if (ip->i_flags & XFS_IRECOVERY) { 737 ASSERT(xlog_recovery_needed(ip->i_mount->m_log)); 738 return 0; 739 } 740 741 return generic_drop_inode(inode); 742 } 743 744 static void 745 xfs_mount_free( 746 struct xfs_mount *mp) 747 { 748 kfree(mp->m_rtname); 749 kfree(mp->m_logname); 750 kmem_free(mp); 751 } 752 753 STATIC int 754 xfs_fs_sync_fs( 755 struct super_block *sb, 756 int wait) 757 { 758 struct xfs_mount *mp = XFS_M(sb); 759 int error; 760 761 trace_xfs_fs_sync_fs(mp, __return_address); 762 763 /* 764 * Doing anything during the async pass would be counterproductive. 765 */ 766 if (!wait) 767 return 0; 768 769 error = xfs_log_force(mp, XFS_LOG_SYNC); 770 if (error) 771 return error; 772 773 if (laptop_mode) { 774 /* 775 * The disk must be active because we're syncing. 776 * We schedule log work now (now that the disk is 777 * active) instead of later (when it might not be). 778 */ 779 flush_delayed_work(&mp->m_log->l_work); 780 } 781 782 /* 783 * If we are called with page faults frozen out, it means we are about 784 * to freeze the transaction subsystem. Take the opportunity to shut 785 * down inodegc because once SB_FREEZE_FS is set it's too late to 786 * prevent inactivation races with freeze. The fs doesn't get called 787 * again by the freezing process until after SB_FREEZE_FS has been set, 788 * so it's now or never. Same logic applies to speculative allocation 789 * garbage collection. 790 * 791 * We don't care if this is a normal syncfs call that does this or 792 * freeze that does this - we can run this multiple times without issue 793 * and we won't race with a restart because a restart can only occur 794 * when the state is either SB_FREEZE_FS or SB_FREEZE_COMPLETE. 795 */ 796 if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT) { 797 xfs_inodegc_stop(mp); 798 xfs_blockgc_stop(mp); 799 } 800 801 return 0; 802 } 803 804 STATIC int 805 xfs_fs_statfs( 806 struct dentry *dentry, 807 struct kstatfs *statp) 808 { 809 struct xfs_mount *mp = XFS_M(dentry->d_sb); 810 xfs_sb_t *sbp = &mp->m_sb; 811 struct xfs_inode *ip = XFS_I(d_inode(dentry)); 812 uint64_t fakeinos, id; 813 uint64_t icount; 814 uint64_t ifree; 815 uint64_t fdblocks; 816 xfs_extlen_t lsize; 817 int64_t ffree; 818 819 /* 820 * Expedite background inodegc but don't wait. We do not want to block 821 * here waiting hours for a billion extent file to be truncated. 822 */ 823 xfs_inodegc_push(mp); 824 825 statp->f_type = XFS_SUPER_MAGIC; 826 statp->f_namelen = MAXNAMELEN - 1; 827 828 id = huge_encode_dev(mp->m_ddev_targp->bt_dev); 829 statp->f_fsid = u64_to_fsid(id); 830 831 icount = percpu_counter_sum(&mp->m_icount); 832 ifree = percpu_counter_sum(&mp->m_ifree); 833 fdblocks = percpu_counter_sum(&mp->m_fdblocks); 834 835 spin_lock(&mp->m_sb_lock); 836 statp->f_bsize = sbp->sb_blocksize; 837 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; 838 statp->f_blocks = sbp->sb_dblocks - lsize; 839 spin_unlock(&mp->m_sb_lock); 840 841 /* make sure statp->f_bfree does not underflow */ 842 statp->f_bfree = max_t(int64_t, 0, 843 fdblocks - xfs_fdblocks_unavailable(mp)); 844 statp->f_bavail = statp->f_bfree; 845 846 fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree); 847 statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER); 848 if (M_IGEO(mp)->maxicount) 849 statp->f_files = min_t(typeof(statp->f_files), 850 statp->f_files, 851 M_IGEO(mp)->maxicount); 852 853 /* If sb_icount overshot maxicount, report actual allocation */ 854 statp->f_files = max_t(typeof(statp->f_files), 855 statp->f_files, 856 sbp->sb_icount); 857 858 /* make sure statp->f_ffree does not underflow */ 859 ffree = statp->f_files - (icount - ifree); 860 statp->f_ffree = max_t(int64_t, ffree, 0); 861 862 863 if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) && 864 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) == 865 (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD)) 866 xfs_qm_statvfs(ip, statp); 867 868 if (XFS_IS_REALTIME_MOUNT(mp) && 869 (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) { 870 s64 freertx; 871 872 statp->f_blocks = sbp->sb_rblocks; 873 freertx = percpu_counter_sum_positive(&mp->m_frextents); 874 statp->f_bavail = statp->f_bfree = freertx * sbp->sb_rextsize; 875 } 876 877 return 0; 878 } 879 880 STATIC void 881 xfs_save_resvblks(struct xfs_mount *mp) 882 { 883 uint64_t resblks = 0; 884 885 mp->m_resblks_save = mp->m_resblks; 886 xfs_reserve_blocks(mp, &resblks, NULL); 887 } 888 889 STATIC void 890 xfs_restore_resvblks(struct xfs_mount *mp) 891 { 892 uint64_t resblks; 893 894 if (mp->m_resblks_save) { 895 resblks = mp->m_resblks_save; 896 mp->m_resblks_save = 0; 897 } else 898 resblks = xfs_default_resblks(mp); 899 900 xfs_reserve_blocks(mp, &resblks, NULL); 901 } 902 903 /* 904 * Second stage of a freeze. The data is already frozen so we only 905 * need to take care of the metadata. Once that's done sync the superblock 906 * to the log to dirty it in case of a crash while frozen. This ensures that we 907 * will recover the unlinked inode lists on the next mount. 908 */ 909 STATIC int 910 xfs_fs_freeze( 911 struct super_block *sb) 912 { 913 struct xfs_mount *mp = XFS_M(sb); 914 unsigned int flags; 915 int ret; 916 917 /* 918 * The filesystem is now frozen far enough that memory reclaim 919 * cannot safely operate on the filesystem. Hence we need to 920 * set a GFP_NOFS context here to avoid recursion deadlocks. 921 */ 922 flags = memalloc_nofs_save(); 923 xfs_save_resvblks(mp); 924 ret = xfs_log_quiesce(mp); 925 memalloc_nofs_restore(flags); 926 927 /* 928 * For read-write filesystems, we need to restart the inodegc on error 929 * because we stopped it at SB_FREEZE_PAGEFAULT level and a thaw is not 930 * going to be run to restart it now. We are at SB_FREEZE_FS level 931 * here, so we can restart safely without racing with a stop in 932 * xfs_fs_sync_fs(). 933 */ 934 if (ret && !xfs_is_readonly(mp)) { 935 xfs_blockgc_start(mp); 936 xfs_inodegc_start(mp); 937 } 938 939 return ret; 940 } 941 942 STATIC int 943 xfs_fs_unfreeze( 944 struct super_block *sb) 945 { 946 struct xfs_mount *mp = XFS_M(sb); 947 948 xfs_restore_resvblks(mp); 949 xfs_log_work_queue(mp); 950 951 /* 952 * Don't reactivate the inodegc worker on a readonly filesystem because 953 * inodes are sent directly to reclaim. Don't reactivate the blockgc 954 * worker because there are no speculative preallocations on a readonly 955 * filesystem. 956 */ 957 if (!xfs_is_readonly(mp)) { 958 xfs_blockgc_start(mp); 959 xfs_inodegc_start(mp); 960 } 961 962 return 0; 963 } 964 965 /* 966 * This function fills in xfs_mount_t fields based on mount args. 967 * Note: the superblock _has_ now been read in. 968 */ 969 STATIC int 970 xfs_finish_flags( 971 struct xfs_mount *mp) 972 { 973 /* Fail a mount where the logbuf is smaller than the log stripe */ 974 if (xfs_has_logv2(mp)) { 975 if (mp->m_logbsize <= 0 && 976 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) { 977 mp->m_logbsize = mp->m_sb.sb_logsunit; 978 } else if (mp->m_logbsize > 0 && 979 mp->m_logbsize < mp->m_sb.sb_logsunit) { 980 xfs_warn(mp, 981 "logbuf size must be greater than or equal to log stripe size"); 982 return -EINVAL; 983 } 984 } else { 985 /* Fail a mount if the logbuf is larger than 32K */ 986 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) { 987 xfs_warn(mp, 988 "logbuf size for version 1 logs must be 16K or 32K"); 989 return -EINVAL; 990 } 991 } 992 993 /* 994 * V5 filesystems always use attr2 format for attributes. 995 */ 996 if (xfs_has_crc(mp) && xfs_has_noattr2(mp)) { 997 xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. " 998 "attr2 is always enabled for V5 filesystems."); 999 return -EINVAL; 1000 } 1001 1002 /* 1003 * prohibit r/w mounts of read-only filesystems 1004 */ 1005 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !xfs_is_readonly(mp)) { 1006 xfs_warn(mp, 1007 "cannot mount a read-only filesystem as read-write"); 1008 return -EROFS; 1009 } 1010 1011 if ((mp->m_qflags & XFS_GQUOTA_ACCT) && 1012 (mp->m_qflags & XFS_PQUOTA_ACCT) && 1013 !xfs_has_pquotino(mp)) { 1014 xfs_warn(mp, 1015 "Super block does not support project and group quota together"); 1016 return -EINVAL; 1017 } 1018 1019 return 0; 1020 } 1021 1022 static int 1023 xfs_init_percpu_counters( 1024 struct xfs_mount *mp) 1025 { 1026 int error; 1027 1028 error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL); 1029 if (error) 1030 return -ENOMEM; 1031 1032 error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL); 1033 if (error) 1034 goto free_icount; 1035 1036 error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL); 1037 if (error) 1038 goto free_ifree; 1039 1040 error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL); 1041 if (error) 1042 goto free_fdblocks; 1043 1044 error = percpu_counter_init(&mp->m_frextents, 0, GFP_KERNEL); 1045 if (error) 1046 goto free_delalloc; 1047 1048 return 0; 1049 1050 free_delalloc: 1051 percpu_counter_destroy(&mp->m_delalloc_blks); 1052 free_fdblocks: 1053 percpu_counter_destroy(&mp->m_fdblocks); 1054 free_ifree: 1055 percpu_counter_destroy(&mp->m_ifree); 1056 free_icount: 1057 percpu_counter_destroy(&mp->m_icount); 1058 return -ENOMEM; 1059 } 1060 1061 void 1062 xfs_reinit_percpu_counters( 1063 struct xfs_mount *mp) 1064 { 1065 percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount); 1066 percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree); 1067 percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks); 1068 percpu_counter_set(&mp->m_frextents, mp->m_sb.sb_frextents); 1069 } 1070 1071 static void 1072 xfs_destroy_percpu_counters( 1073 struct xfs_mount *mp) 1074 { 1075 percpu_counter_destroy(&mp->m_icount); 1076 percpu_counter_destroy(&mp->m_ifree); 1077 percpu_counter_destroy(&mp->m_fdblocks); 1078 ASSERT(xfs_is_shutdown(mp) || 1079 percpu_counter_sum(&mp->m_delalloc_blks) == 0); 1080 percpu_counter_destroy(&mp->m_delalloc_blks); 1081 percpu_counter_destroy(&mp->m_frextents); 1082 } 1083 1084 static int 1085 xfs_inodegc_init_percpu( 1086 struct xfs_mount *mp) 1087 { 1088 struct xfs_inodegc *gc; 1089 int cpu; 1090 1091 mp->m_inodegc = alloc_percpu(struct xfs_inodegc); 1092 if (!mp->m_inodegc) 1093 return -ENOMEM; 1094 1095 for_each_possible_cpu(cpu) { 1096 gc = per_cpu_ptr(mp->m_inodegc, cpu); 1097 init_llist_head(&gc->list); 1098 gc->items = 0; 1099 INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker); 1100 } 1101 return 0; 1102 } 1103 1104 static void 1105 xfs_inodegc_free_percpu( 1106 struct xfs_mount *mp) 1107 { 1108 if (!mp->m_inodegc) 1109 return; 1110 free_percpu(mp->m_inodegc); 1111 } 1112 1113 static void 1114 xfs_fs_put_super( 1115 struct super_block *sb) 1116 { 1117 struct xfs_mount *mp = XFS_M(sb); 1118 1119 /* if ->fill_super failed, we have no mount to tear down */ 1120 if (!sb->s_fs_info) 1121 return; 1122 1123 xfs_notice(mp, "Unmounting Filesystem %pU", &mp->m_sb.sb_uuid); 1124 xfs_filestream_unmount(mp); 1125 xfs_unmountfs(mp); 1126 1127 xfs_freesb(mp); 1128 free_percpu(mp->m_stats.xs_stats); 1129 xfs_mount_list_del(mp); 1130 xfs_inodegc_free_percpu(mp); 1131 xfs_destroy_percpu_counters(mp); 1132 xfs_destroy_mount_workqueues(mp); 1133 xfs_close_devices(mp); 1134 1135 sb->s_fs_info = NULL; 1136 xfs_mount_free(mp); 1137 } 1138 1139 static long 1140 xfs_fs_nr_cached_objects( 1141 struct super_block *sb, 1142 struct shrink_control *sc) 1143 { 1144 /* Paranoia: catch incorrect calls during mount setup or teardown */ 1145 if (WARN_ON_ONCE(!sb->s_fs_info)) 1146 return 0; 1147 return xfs_reclaim_inodes_count(XFS_M(sb)); 1148 } 1149 1150 static long 1151 xfs_fs_free_cached_objects( 1152 struct super_block *sb, 1153 struct shrink_control *sc) 1154 { 1155 return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan); 1156 } 1157 1158 static const struct super_operations xfs_super_operations = { 1159 .alloc_inode = xfs_fs_alloc_inode, 1160 .destroy_inode = xfs_fs_destroy_inode, 1161 .dirty_inode = xfs_fs_dirty_inode, 1162 .drop_inode = xfs_fs_drop_inode, 1163 .put_super = xfs_fs_put_super, 1164 .sync_fs = xfs_fs_sync_fs, 1165 .freeze_fs = xfs_fs_freeze, 1166 .unfreeze_fs = xfs_fs_unfreeze, 1167 .statfs = xfs_fs_statfs, 1168 .show_options = xfs_fs_show_options, 1169 .nr_cached_objects = xfs_fs_nr_cached_objects, 1170 .free_cached_objects = xfs_fs_free_cached_objects, 1171 }; 1172 1173 static int 1174 suffix_kstrtoint( 1175 const char *s, 1176 unsigned int base, 1177 int *res) 1178 { 1179 int last, shift_left_factor = 0, _res; 1180 char *value; 1181 int ret = 0; 1182 1183 value = kstrdup(s, GFP_KERNEL); 1184 if (!value) 1185 return -ENOMEM; 1186 1187 last = strlen(value) - 1; 1188 if (value[last] == 'K' || value[last] == 'k') { 1189 shift_left_factor = 10; 1190 value[last] = '\0'; 1191 } 1192 if (value[last] == 'M' || value[last] == 'm') { 1193 shift_left_factor = 20; 1194 value[last] = '\0'; 1195 } 1196 if (value[last] == 'G' || value[last] == 'g') { 1197 shift_left_factor = 30; 1198 value[last] = '\0'; 1199 } 1200 1201 if (kstrtoint(value, base, &_res)) 1202 ret = -EINVAL; 1203 kfree(value); 1204 *res = _res << shift_left_factor; 1205 return ret; 1206 } 1207 1208 static inline void 1209 xfs_fs_warn_deprecated( 1210 struct fs_context *fc, 1211 struct fs_parameter *param, 1212 uint64_t flag, 1213 bool value) 1214 { 1215 /* Don't print the warning if reconfiguring and current mount point 1216 * already had the flag set 1217 */ 1218 if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) && 1219 !!(XFS_M(fc->root->d_sb)->m_features & flag) == value) 1220 return; 1221 xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key); 1222 } 1223 1224 /* 1225 * Set mount state from a mount option. 1226 * 1227 * NOTE: mp->m_super is NULL here! 1228 */ 1229 static int 1230 xfs_fs_parse_param( 1231 struct fs_context *fc, 1232 struct fs_parameter *param) 1233 { 1234 struct xfs_mount *parsing_mp = fc->s_fs_info; 1235 struct fs_parse_result result; 1236 int size = 0; 1237 int opt; 1238 1239 opt = fs_parse(fc, xfs_fs_parameters, param, &result); 1240 if (opt < 0) 1241 return opt; 1242 1243 switch (opt) { 1244 case Opt_logbufs: 1245 parsing_mp->m_logbufs = result.uint_32; 1246 return 0; 1247 case Opt_logbsize: 1248 if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize)) 1249 return -EINVAL; 1250 return 0; 1251 case Opt_logdev: 1252 kfree(parsing_mp->m_logname); 1253 parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL); 1254 if (!parsing_mp->m_logname) 1255 return -ENOMEM; 1256 return 0; 1257 case Opt_rtdev: 1258 kfree(parsing_mp->m_rtname); 1259 parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL); 1260 if (!parsing_mp->m_rtname) 1261 return -ENOMEM; 1262 return 0; 1263 case Opt_allocsize: 1264 if (suffix_kstrtoint(param->string, 10, &size)) 1265 return -EINVAL; 1266 parsing_mp->m_allocsize_log = ffs(size) - 1; 1267 parsing_mp->m_features |= XFS_FEAT_ALLOCSIZE; 1268 return 0; 1269 case Opt_grpid: 1270 case Opt_bsdgroups: 1271 parsing_mp->m_features |= XFS_FEAT_GRPID; 1272 return 0; 1273 case Opt_nogrpid: 1274 case Opt_sysvgroups: 1275 parsing_mp->m_features &= ~XFS_FEAT_GRPID; 1276 return 0; 1277 case Opt_wsync: 1278 parsing_mp->m_features |= XFS_FEAT_WSYNC; 1279 return 0; 1280 case Opt_norecovery: 1281 parsing_mp->m_features |= XFS_FEAT_NORECOVERY; 1282 return 0; 1283 case Opt_noalign: 1284 parsing_mp->m_features |= XFS_FEAT_NOALIGN; 1285 return 0; 1286 case Opt_swalloc: 1287 parsing_mp->m_features |= XFS_FEAT_SWALLOC; 1288 return 0; 1289 case Opt_sunit: 1290 parsing_mp->m_dalign = result.uint_32; 1291 return 0; 1292 case Opt_swidth: 1293 parsing_mp->m_swidth = result.uint_32; 1294 return 0; 1295 case Opt_inode32: 1296 parsing_mp->m_features |= XFS_FEAT_SMALL_INUMS; 1297 return 0; 1298 case Opt_inode64: 1299 parsing_mp->m_features &= ~XFS_FEAT_SMALL_INUMS; 1300 return 0; 1301 case Opt_nouuid: 1302 parsing_mp->m_features |= XFS_FEAT_NOUUID; 1303 return 0; 1304 case Opt_largeio: 1305 parsing_mp->m_features |= XFS_FEAT_LARGE_IOSIZE; 1306 return 0; 1307 case Opt_nolargeio: 1308 parsing_mp->m_features &= ~XFS_FEAT_LARGE_IOSIZE; 1309 return 0; 1310 case Opt_filestreams: 1311 parsing_mp->m_features |= XFS_FEAT_FILESTREAMS; 1312 return 0; 1313 case Opt_noquota: 1314 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT; 1315 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD; 1316 return 0; 1317 case Opt_quota: 1318 case Opt_uquota: 1319 case Opt_usrquota: 1320 parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD); 1321 return 0; 1322 case Opt_qnoenforce: 1323 case Opt_uqnoenforce: 1324 parsing_mp->m_qflags |= XFS_UQUOTA_ACCT; 1325 parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD; 1326 return 0; 1327 case Opt_pquota: 1328 case Opt_prjquota: 1329 parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD); 1330 return 0; 1331 case Opt_pqnoenforce: 1332 parsing_mp->m_qflags |= XFS_PQUOTA_ACCT; 1333 parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD; 1334 return 0; 1335 case Opt_gquota: 1336 case Opt_grpquota: 1337 parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD); 1338 return 0; 1339 case Opt_gqnoenforce: 1340 parsing_mp->m_qflags |= XFS_GQUOTA_ACCT; 1341 parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD; 1342 return 0; 1343 case Opt_discard: 1344 parsing_mp->m_features |= XFS_FEAT_DISCARD; 1345 return 0; 1346 case Opt_nodiscard: 1347 parsing_mp->m_features &= ~XFS_FEAT_DISCARD; 1348 return 0; 1349 #ifdef CONFIG_FS_DAX 1350 case Opt_dax: 1351 xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS); 1352 return 0; 1353 case Opt_dax_enum: 1354 xfs_mount_set_dax_mode(parsing_mp, result.uint_32); 1355 return 0; 1356 #endif 1357 /* Following mount options will be removed in September 2025 */ 1358 case Opt_ikeep: 1359 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, true); 1360 parsing_mp->m_features |= XFS_FEAT_IKEEP; 1361 return 0; 1362 case Opt_noikeep: 1363 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, false); 1364 parsing_mp->m_features &= ~XFS_FEAT_IKEEP; 1365 return 0; 1366 case Opt_attr2: 1367 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_ATTR2, true); 1368 parsing_mp->m_features |= XFS_FEAT_ATTR2; 1369 return 0; 1370 case Opt_noattr2: 1371 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_NOATTR2, true); 1372 parsing_mp->m_features |= XFS_FEAT_NOATTR2; 1373 return 0; 1374 default: 1375 xfs_warn(parsing_mp, "unknown mount option [%s].", param->key); 1376 return -EINVAL; 1377 } 1378 1379 return 0; 1380 } 1381 1382 static int 1383 xfs_fs_validate_params( 1384 struct xfs_mount *mp) 1385 { 1386 /* No recovery flag requires a read-only mount */ 1387 if (xfs_has_norecovery(mp) && !xfs_is_readonly(mp)) { 1388 xfs_warn(mp, "no-recovery mounts must be read-only."); 1389 return -EINVAL; 1390 } 1391 1392 /* 1393 * We have not read the superblock at this point, so only the attr2 1394 * mount option can set the attr2 feature by this stage. 1395 */ 1396 if (xfs_has_attr2(mp) && xfs_has_noattr2(mp)) { 1397 xfs_warn(mp, "attr2 and noattr2 cannot both be specified."); 1398 return -EINVAL; 1399 } 1400 1401 1402 if (xfs_has_noalign(mp) && (mp->m_dalign || mp->m_swidth)) { 1403 xfs_warn(mp, 1404 "sunit and swidth options incompatible with the noalign option"); 1405 return -EINVAL; 1406 } 1407 1408 if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) { 1409 xfs_warn(mp, "quota support not available in this kernel."); 1410 return -EINVAL; 1411 } 1412 1413 if ((mp->m_dalign && !mp->m_swidth) || 1414 (!mp->m_dalign && mp->m_swidth)) { 1415 xfs_warn(mp, "sunit and swidth must be specified together"); 1416 return -EINVAL; 1417 } 1418 1419 if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) { 1420 xfs_warn(mp, 1421 "stripe width (%d) must be a multiple of the stripe unit (%d)", 1422 mp->m_swidth, mp->m_dalign); 1423 return -EINVAL; 1424 } 1425 1426 if (mp->m_logbufs != -1 && 1427 mp->m_logbufs != 0 && 1428 (mp->m_logbufs < XLOG_MIN_ICLOGS || 1429 mp->m_logbufs > XLOG_MAX_ICLOGS)) { 1430 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]", 1431 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS); 1432 return -EINVAL; 1433 } 1434 1435 if (mp->m_logbsize != -1 && 1436 mp->m_logbsize != 0 && 1437 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE || 1438 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE || 1439 !is_power_of_2(mp->m_logbsize))) { 1440 xfs_warn(mp, 1441 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]", 1442 mp->m_logbsize); 1443 return -EINVAL; 1444 } 1445 1446 if (xfs_has_allocsize(mp) && 1447 (mp->m_allocsize_log > XFS_MAX_IO_LOG || 1448 mp->m_allocsize_log < XFS_MIN_IO_LOG)) { 1449 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]", 1450 mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG); 1451 return -EINVAL; 1452 } 1453 1454 return 0; 1455 } 1456 1457 static int 1458 xfs_fs_fill_super( 1459 struct super_block *sb, 1460 struct fs_context *fc) 1461 { 1462 struct xfs_mount *mp = sb->s_fs_info; 1463 struct inode *root; 1464 int flags = 0, error; 1465 1466 mp->m_super = sb; 1467 1468 error = xfs_fs_validate_params(mp); 1469 if (error) 1470 goto out_free_names; 1471 1472 sb_min_blocksize(sb, BBSIZE); 1473 sb->s_xattr = xfs_xattr_handlers; 1474 sb->s_export_op = &xfs_export_operations; 1475 #ifdef CONFIG_XFS_QUOTA 1476 sb->s_qcop = &xfs_quotactl_operations; 1477 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ; 1478 #endif 1479 sb->s_op = &xfs_super_operations; 1480 1481 /* 1482 * Delay mount work if the debug hook is set. This is debug 1483 * instrumention to coordinate simulation of xfs mount failures with 1484 * VFS superblock operations 1485 */ 1486 if (xfs_globals.mount_delay) { 1487 xfs_notice(mp, "Delaying mount for %d seconds.", 1488 xfs_globals.mount_delay); 1489 msleep(xfs_globals.mount_delay * 1000); 1490 } 1491 1492 if (fc->sb_flags & SB_SILENT) 1493 flags |= XFS_MFSI_QUIET; 1494 1495 error = xfs_open_devices(mp); 1496 if (error) 1497 goto out_free_names; 1498 1499 error = xfs_init_mount_workqueues(mp); 1500 if (error) 1501 goto out_close_devices; 1502 1503 error = xfs_init_percpu_counters(mp); 1504 if (error) 1505 goto out_destroy_workqueues; 1506 1507 error = xfs_inodegc_init_percpu(mp); 1508 if (error) 1509 goto out_destroy_counters; 1510 1511 /* 1512 * All percpu data structures requiring cleanup when a cpu goes offline 1513 * must be allocated before adding this @mp to the cpu-dead handler's 1514 * mount list. 1515 */ 1516 xfs_mount_list_add(mp); 1517 1518 /* Allocate stats memory before we do operations that might use it */ 1519 mp->m_stats.xs_stats = alloc_percpu(struct xfsstats); 1520 if (!mp->m_stats.xs_stats) { 1521 error = -ENOMEM; 1522 goto out_destroy_inodegc; 1523 } 1524 1525 error = xfs_readsb(mp, flags); 1526 if (error) 1527 goto out_free_stats; 1528 1529 error = xfs_finish_flags(mp); 1530 if (error) 1531 goto out_free_sb; 1532 1533 error = xfs_setup_devices(mp); 1534 if (error) 1535 goto out_free_sb; 1536 1537 /* V4 support is undergoing deprecation. */ 1538 if (!xfs_has_crc(mp)) { 1539 #ifdef CONFIG_XFS_SUPPORT_V4 1540 xfs_warn_once(mp, 1541 "Deprecated V4 format (crc=0) will not be supported after September 2030."); 1542 #else 1543 xfs_warn(mp, 1544 "Deprecated V4 format (crc=0) not supported by kernel."); 1545 error = -EINVAL; 1546 goto out_free_sb; 1547 #endif 1548 } 1549 1550 /* Filesystem claims it needs repair, so refuse the mount. */ 1551 if (xfs_has_needsrepair(mp)) { 1552 xfs_warn(mp, "Filesystem needs repair. Please run xfs_repair."); 1553 error = -EFSCORRUPTED; 1554 goto out_free_sb; 1555 } 1556 1557 /* 1558 * Don't touch the filesystem if a user tool thinks it owns the primary 1559 * superblock. mkfs doesn't clear the flag from secondary supers, so 1560 * we don't check them at all. 1561 */ 1562 if (mp->m_sb.sb_inprogress) { 1563 xfs_warn(mp, "Offline file system operation in progress!"); 1564 error = -EFSCORRUPTED; 1565 goto out_free_sb; 1566 } 1567 1568 /* 1569 * Until this is fixed only page-sized or smaller data blocks work. 1570 */ 1571 if (mp->m_sb.sb_blocksize > PAGE_SIZE) { 1572 xfs_warn(mp, 1573 "File system with blocksize %d bytes. " 1574 "Only pagesize (%ld) or less will currently work.", 1575 mp->m_sb.sb_blocksize, PAGE_SIZE); 1576 error = -ENOSYS; 1577 goto out_free_sb; 1578 } 1579 1580 /* Ensure this filesystem fits in the page cache limits */ 1581 if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) || 1582 xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) { 1583 xfs_warn(mp, 1584 "file system too large to be mounted on this system."); 1585 error = -EFBIG; 1586 goto out_free_sb; 1587 } 1588 1589 /* 1590 * XFS block mappings use 54 bits to store the logical block offset. 1591 * This should suffice to handle the maximum file size that the VFS 1592 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT 1593 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes 1594 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON 1595 * to check this assertion. 1596 * 1597 * Avoid integer overflow by comparing the maximum bmbt offset to the 1598 * maximum pagecache offset in units of fs blocks. 1599 */ 1600 if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) { 1601 xfs_warn(mp, 1602 "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!", 1603 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE), 1604 XFS_MAX_FILEOFF); 1605 error = -EINVAL; 1606 goto out_free_sb; 1607 } 1608 1609 error = xfs_filestream_mount(mp); 1610 if (error) 1611 goto out_free_sb; 1612 1613 /* 1614 * we must configure the block size in the superblock before we run the 1615 * full mount process as the mount process can lookup and cache inodes. 1616 */ 1617 sb->s_magic = XFS_SUPER_MAGIC; 1618 sb->s_blocksize = mp->m_sb.sb_blocksize; 1619 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; 1620 sb->s_maxbytes = MAX_LFS_FILESIZE; 1621 sb->s_max_links = XFS_MAXLINK; 1622 sb->s_time_gran = 1; 1623 if (xfs_has_bigtime(mp)) { 1624 sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN); 1625 sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX); 1626 } else { 1627 sb->s_time_min = XFS_LEGACY_TIME_MIN; 1628 sb->s_time_max = XFS_LEGACY_TIME_MAX; 1629 } 1630 trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max); 1631 sb->s_iflags |= SB_I_CGROUPWB; 1632 1633 set_posix_acl_flag(sb); 1634 1635 /* version 5 superblocks support inode version counters. */ 1636 if (xfs_has_crc(mp)) 1637 sb->s_flags |= SB_I_VERSION; 1638 1639 if (xfs_has_dax_always(mp)) { 1640 error = xfs_setup_dax_always(mp); 1641 if (error) 1642 goto out_filestream_unmount; 1643 } 1644 1645 if (xfs_has_discard(mp) && !bdev_max_discard_sectors(sb->s_bdev)) { 1646 xfs_warn(mp, 1647 "mounting with \"discard\" option, but the device does not support discard"); 1648 mp->m_features &= ~XFS_FEAT_DISCARD; 1649 } 1650 1651 if (xfs_has_reflink(mp)) { 1652 if (mp->m_sb.sb_rblocks) { 1653 xfs_alert(mp, 1654 "reflink not compatible with realtime device!"); 1655 error = -EINVAL; 1656 goto out_filestream_unmount; 1657 } 1658 1659 if (xfs_globals.always_cow) { 1660 xfs_info(mp, "using DEBUG-only always_cow mode."); 1661 mp->m_always_cow = true; 1662 } 1663 } 1664 1665 if (xfs_has_rmapbt(mp) && mp->m_sb.sb_rblocks) { 1666 xfs_alert(mp, 1667 "reverse mapping btree not compatible with realtime device!"); 1668 error = -EINVAL; 1669 goto out_filestream_unmount; 1670 } 1671 1672 if (xfs_has_large_extent_counts(mp)) 1673 xfs_warn(mp, 1674 "EXPERIMENTAL Large extent counts feature in use. Use at your own risk!"); 1675 1676 error = xfs_mountfs(mp); 1677 if (error) 1678 goto out_filestream_unmount; 1679 1680 root = igrab(VFS_I(mp->m_rootip)); 1681 if (!root) { 1682 error = -ENOENT; 1683 goto out_unmount; 1684 } 1685 sb->s_root = d_make_root(root); 1686 if (!sb->s_root) { 1687 error = -ENOMEM; 1688 goto out_unmount; 1689 } 1690 1691 return 0; 1692 1693 out_filestream_unmount: 1694 xfs_filestream_unmount(mp); 1695 out_free_sb: 1696 xfs_freesb(mp); 1697 out_free_stats: 1698 free_percpu(mp->m_stats.xs_stats); 1699 out_destroy_inodegc: 1700 xfs_mount_list_del(mp); 1701 xfs_inodegc_free_percpu(mp); 1702 out_destroy_counters: 1703 xfs_destroy_percpu_counters(mp); 1704 out_destroy_workqueues: 1705 xfs_destroy_mount_workqueues(mp); 1706 out_close_devices: 1707 xfs_close_devices(mp); 1708 out_free_names: 1709 sb->s_fs_info = NULL; 1710 xfs_mount_free(mp); 1711 return error; 1712 1713 out_unmount: 1714 xfs_filestream_unmount(mp); 1715 xfs_unmountfs(mp); 1716 goto out_free_sb; 1717 } 1718 1719 static int 1720 xfs_fs_get_tree( 1721 struct fs_context *fc) 1722 { 1723 return get_tree_bdev(fc, xfs_fs_fill_super); 1724 } 1725 1726 static int 1727 xfs_remount_rw( 1728 struct xfs_mount *mp) 1729 { 1730 struct xfs_sb *sbp = &mp->m_sb; 1731 int error; 1732 1733 if (xfs_has_norecovery(mp)) { 1734 xfs_warn(mp, 1735 "ro->rw transition prohibited on norecovery mount"); 1736 return -EINVAL; 1737 } 1738 1739 if (xfs_sb_is_v5(sbp) && 1740 xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) { 1741 xfs_warn(mp, 1742 "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem", 1743 (sbp->sb_features_ro_compat & 1744 XFS_SB_FEAT_RO_COMPAT_UNKNOWN)); 1745 return -EINVAL; 1746 } 1747 1748 clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate); 1749 1750 /* 1751 * If this is the first remount to writeable state we might have some 1752 * superblock changes to update. 1753 */ 1754 if (mp->m_update_sb) { 1755 error = xfs_sync_sb(mp, false); 1756 if (error) { 1757 xfs_warn(mp, "failed to write sb changes"); 1758 return error; 1759 } 1760 mp->m_update_sb = false; 1761 } 1762 1763 /* 1764 * Fill out the reserve pool if it is empty. Use the stashed value if 1765 * it is non-zero, otherwise go with the default. 1766 */ 1767 xfs_restore_resvblks(mp); 1768 xfs_log_work_queue(mp); 1769 xfs_blockgc_start(mp); 1770 1771 /* Create the per-AG metadata reservation pool .*/ 1772 error = xfs_fs_reserve_ag_blocks(mp); 1773 if (error && error != -ENOSPC) 1774 return error; 1775 1776 /* Re-enable the background inode inactivation worker. */ 1777 xfs_inodegc_start(mp); 1778 1779 return 0; 1780 } 1781 1782 static int 1783 xfs_remount_ro( 1784 struct xfs_mount *mp) 1785 { 1786 struct xfs_icwalk icw = { 1787 .icw_flags = XFS_ICWALK_FLAG_SYNC, 1788 }; 1789 int error; 1790 1791 /* Flush all the dirty data to disk. */ 1792 error = sync_filesystem(mp->m_super); 1793 if (error) 1794 return error; 1795 1796 /* 1797 * Cancel background eofb scanning so it cannot race with the final 1798 * log force+buftarg wait and deadlock the remount. 1799 */ 1800 xfs_blockgc_stop(mp); 1801 1802 /* 1803 * Clear out all remaining COW staging extents and speculative post-EOF 1804 * preallocations so that we don't leave inodes requiring inactivation 1805 * cleanups during reclaim on a read-only mount. We must process every 1806 * cached inode, so this requires a synchronous cache scan. 1807 */ 1808 error = xfs_blockgc_free_space(mp, &icw); 1809 if (error) { 1810 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1811 return error; 1812 } 1813 1814 /* 1815 * Stop the inodegc background worker. xfs_fs_reconfigure already 1816 * flushed all pending inodegc work when it sync'd the filesystem. 1817 * The VFS holds s_umount, so we know that inodes cannot enter 1818 * xfs_fs_destroy_inode during a remount operation. In readonly mode 1819 * we send inodes straight to reclaim, so no inodes will be queued. 1820 */ 1821 xfs_inodegc_stop(mp); 1822 1823 /* Free the per-AG metadata reservation pool. */ 1824 error = xfs_fs_unreserve_ag_blocks(mp); 1825 if (error) { 1826 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1827 return error; 1828 } 1829 1830 /* 1831 * Before we sync the metadata, we need to free up the reserve block 1832 * pool so that the used block count in the superblock on disk is 1833 * correct at the end of the remount. Stash the current* reserve pool 1834 * size so that if we get remounted rw, we can return it to the same 1835 * size. 1836 */ 1837 xfs_save_resvblks(mp); 1838 1839 xfs_log_clean(mp); 1840 set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate); 1841 1842 return 0; 1843 } 1844 1845 /* 1846 * Logically we would return an error here to prevent users from believing 1847 * they might have changed mount options using remount which can't be changed. 1848 * 1849 * But unfortunately mount(8) adds all options from mtab and fstab to the mount 1850 * arguments in some cases so we can't blindly reject options, but have to 1851 * check for each specified option if it actually differs from the currently 1852 * set option and only reject it if that's the case. 1853 * 1854 * Until that is implemented we return success for every remount request, and 1855 * silently ignore all options that we can't actually change. 1856 */ 1857 static int 1858 xfs_fs_reconfigure( 1859 struct fs_context *fc) 1860 { 1861 struct xfs_mount *mp = XFS_M(fc->root->d_sb); 1862 struct xfs_mount *new_mp = fc->s_fs_info; 1863 int flags = fc->sb_flags; 1864 int error; 1865 1866 /* version 5 superblocks always support version counters. */ 1867 if (xfs_has_crc(mp)) 1868 fc->sb_flags |= SB_I_VERSION; 1869 1870 error = xfs_fs_validate_params(new_mp); 1871 if (error) 1872 return error; 1873 1874 /* inode32 -> inode64 */ 1875 if (xfs_has_small_inums(mp) && !xfs_has_small_inums(new_mp)) { 1876 mp->m_features &= ~XFS_FEAT_SMALL_INUMS; 1877 mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount); 1878 } 1879 1880 /* inode64 -> inode32 */ 1881 if (!xfs_has_small_inums(mp) && xfs_has_small_inums(new_mp)) { 1882 mp->m_features |= XFS_FEAT_SMALL_INUMS; 1883 mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount); 1884 } 1885 1886 /* ro -> rw */ 1887 if (xfs_is_readonly(mp) && !(flags & SB_RDONLY)) { 1888 error = xfs_remount_rw(mp); 1889 if (error) 1890 return error; 1891 } 1892 1893 /* rw -> ro */ 1894 if (!xfs_is_readonly(mp) && (flags & SB_RDONLY)) { 1895 error = xfs_remount_ro(mp); 1896 if (error) 1897 return error; 1898 } 1899 1900 return 0; 1901 } 1902 1903 static void xfs_fs_free( 1904 struct fs_context *fc) 1905 { 1906 struct xfs_mount *mp = fc->s_fs_info; 1907 1908 /* 1909 * mp is stored in the fs_context when it is initialized. 1910 * mp is transferred to the superblock on a successful mount, 1911 * but if an error occurs before the transfer we have to free 1912 * it here. 1913 */ 1914 if (mp) 1915 xfs_mount_free(mp); 1916 } 1917 1918 static const struct fs_context_operations xfs_context_ops = { 1919 .parse_param = xfs_fs_parse_param, 1920 .get_tree = xfs_fs_get_tree, 1921 .reconfigure = xfs_fs_reconfigure, 1922 .free = xfs_fs_free, 1923 }; 1924 1925 static int xfs_init_fs_context( 1926 struct fs_context *fc) 1927 { 1928 struct xfs_mount *mp; 1929 1930 mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO); 1931 if (!mp) 1932 return -ENOMEM; 1933 1934 spin_lock_init(&mp->m_sb_lock); 1935 INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC); 1936 spin_lock_init(&mp->m_perag_lock); 1937 mutex_init(&mp->m_growlock); 1938 INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker); 1939 INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker); 1940 mp->m_kobj.kobject.kset = xfs_kset; 1941 /* 1942 * We don't create the finobt per-ag space reservation until after log 1943 * recovery, so we must set this to true so that an ifree transaction 1944 * started during log recovery will not depend on space reservations 1945 * for finobt expansion. 1946 */ 1947 mp->m_finobt_nores = true; 1948 1949 /* 1950 * These can be overridden by the mount option parsing. 1951 */ 1952 mp->m_logbufs = -1; 1953 mp->m_logbsize = -1; 1954 mp->m_allocsize_log = 16; /* 64k */ 1955 1956 /* 1957 * Copy binary VFS mount flags we are interested in. 1958 */ 1959 if (fc->sb_flags & SB_RDONLY) 1960 set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate); 1961 if (fc->sb_flags & SB_DIRSYNC) 1962 mp->m_features |= XFS_FEAT_DIRSYNC; 1963 if (fc->sb_flags & SB_SYNCHRONOUS) 1964 mp->m_features |= XFS_FEAT_WSYNC; 1965 1966 fc->s_fs_info = mp; 1967 fc->ops = &xfs_context_ops; 1968 1969 return 0; 1970 } 1971 1972 static struct file_system_type xfs_fs_type = { 1973 .owner = THIS_MODULE, 1974 .name = "xfs", 1975 .init_fs_context = xfs_init_fs_context, 1976 .parameters = xfs_fs_parameters, 1977 .kill_sb = kill_block_super, 1978 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP, 1979 }; 1980 MODULE_ALIAS_FS("xfs"); 1981 1982 STATIC int __init 1983 xfs_init_caches(void) 1984 { 1985 int error; 1986 1987 xfs_buf_cache = kmem_cache_create("xfs_buf", sizeof(struct xfs_buf), 0, 1988 SLAB_HWCACHE_ALIGN | 1989 SLAB_RECLAIM_ACCOUNT | 1990 SLAB_MEM_SPREAD, 1991 NULL); 1992 if (!xfs_buf_cache) 1993 goto out; 1994 1995 xfs_log_ticket_cache = kmem_cache_create("xfs_log_ticket", 1996 sizeof(struct xlog_ticket), 1997 0, 0, NULL); 1998 if (!xfs_log_ticket_cache) 1999 goto out_destroy_buf_cache; 2000 2001 error = xfs_btree_init_cur_caches(); 2002 if (error) 2003 goto out_destroy_log_ticket_cache; 2004 2005 error = xfs_defer_init_item_caches(); 2006 if (error) 2007 goto out_destroy_btree_cur_cache; 2008 2009 xfs_da_state_cache = kmem_cache_create("xfs_da_state", 2010 sizeof(struct xfs_da_state), 2011 0, 0, NULL); 2012 if (!xfs_da_state_cache) 2013 goto out_destroy_defer_item_cache; 2014 2015 xfs_ifork_cache = kmem_cache_create("xfs_ifork", 2016 sizeof(struct xfs_ifork), 2017 0, 0, NULL); 2018 if (!xfs_ifork_cache) 2019 goto out_destroy_da_state_cache; 2020 2021 xfs_trans_cache = kmem_cache_create("xfs_trans", 2022 sizeof(struct xfs_trans), 2023 0, 0, NULL); 2024 if (!xfs_trans_cache) 2025 goto out_destroy_ifork_cache; 2026 2027 2028 /* 2029 * The size of the cache-allocated buf log item is the maximum 2030 * size possible under XFS. This wastes a little bit of memory, 2031 * but it is much faster. 2032 */ 2033 xfs_buf_item_cache = kmem_cache_create("xfs_buf_item", 2034 sizeof(struct xfs_buf_log_item), 2035 0, 0, NULL); 2036 if (!xfs_buf_item_cache) 2037 goto out_destroy_trans_cache; 2038 2039 xfs_efd_cache = kmem_cache_create("xfs_efd_item", 2040 xfs_efd_log_item_sizeof(XFS_EFD_MAX_FAST_EXTENTS), 2041 0, 0, NULL); 2042 if (!xfs_efd_cache) 2043 goto out_destroy_buf_item_cache; 2044 2045 xfs_efi_cache = kmem_cache_create("xfs_efi_item", 2046 xfs_efi_log_item_sizeof(XFS_EFI_MAX_FAST_EXTENTS), 2047 0, 0, NULL); 2048 if (!xfs_efi_cache) 2049 goto out_destroy_efd_cache; 2050 2051 xfs_inode_cache = kmem_cache_create("xfs_inode", 2052 sizeof(struct xfs_inode), 0, 2053 (SLAB_HWCACHE_ALIGN | 2054 SLAB_RECLAIM_ACCOUNT | 2055 SLAB_MEM_SPREAD | SLAB_ACCOUNT), 2056 xfs_fs_inode_init_once); 2057 if (!xfs_inode_cache) 2058 goto out_destroy_efi_cache; 2059 2060 xfs_ili_cache = kmem_cache_create("xfs_ili", 2061 sizeof(struct xfs_inode_log_item), 0, 2062 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, 2063 NULL); 2064 if (!xfs_ili_cache) 2065 goto out_destroy_inode_cache; 2066 2067 xfs_icreate_cache = kmem_cache_create("xfs_icr", 2068 sizeof(struct xfs_icreate_item), 2069 0, 0, NULL); 2070 if (!xfs_icreate_cache) 2071 goto out_destroy_ili_cache; 2072 2073 xfs_rud_cache = kmem_cache_create("xfs_rud_item", 2074 sizeof(struct xfs_rud_log_item), 2075 0, 0, NULL); 2076 if (!xfs_rud_cache) 2077 goto out_destroy_icreate_cache; 2078 2079 xfs_rui_cache = kmem_cache_create("xfs_rui_item", 2080 xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS), 2081 0, 0, NULL); 2082 if (!xfs_rui_cache) 2083 goto out_destroy_rud_cache; 2084 2085 xfs_cud_cache = kmem_cache_create("xfs_cud_item", 2086 sizeof(struct xfs_cud_log_item), 2087 0, 0, NULL); 2088 if (!xfs_cud_cache) 2089 goto out_destroy_rui_cache; 2090 2091 xfs_cui_cache = kmem_cache_create("xfs_cui_item", 2092 xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS), 2093 0, 0, NULL); 2094 if (!xfs_cui_cache) 2095 goto out_destroy_cud_cache; 2096 2097 xfs_bud_cache = kmem_cache_create("xfs_bud_item", 2098 sizeof(struct xfs_bud_log_item), 2099 0, 0, NULL); 2100 if (!xfs_bud_cache) 2101 goto out_destroy_cui_cache; 2102 2103 xfs_bui_cache = kmem_cache_create("xfs_bui_item", 2104 xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS), 2105 0, 0, NULL); 2106 if (!xfs_bui_cache) 2107 goto out_destroy_bud_cache; 2108 2109 xfs_attrd_cache = kmem_cache_create("xfs_attrd_item", 2110 sizeof(struct xfs_attrd_log_item), 2111 0, 0, NULL); 2112 if (!xfs_attrd_cache) 2113 goto out_destroy_bui_cache; 2114 2115 xfs_attri_cache = kmem_cache_create("xfs_attri_item", 2116 sizeof(struct xfs_attri_log_item), 2117 0, 0, NULL); 2118 if (!xfs_attri_cache) 2119 goto out_destroy_attrd_cache; 2120 2121 xfs_iunlink_cache = kmem_cache_create("xfs_iul_item", 2122 sizeof(struct xfs_iunlink_item), 2123 0, 0, NULL); 2124 if (!xfs_iunlink_cache) 2125 goto out_destroy_attri_cache; 2126 2127 return 0; 2128 2129 out_destroy_attri_cache: 2130 kmem_cache_destroy(xfs_attri_cache); 2131 out_destroy_attrd_cache: 2132 kmem_cache_destroy(xfs_attrd_cache); 2133 out_destroy_bui_cache: 2134 kmem_cache_destroy(xfs_bui_cache); 2135 out_destroy_bud_cache: 2136 kmem_cache_destroy(xfs_bud_cache); 2137 out_destroy_cui_cache: 2138 kmem_cache_destroy(xfs_cui_cache); 2139 out_destroy_cud_cache: 2140 kmem_cache_destroy(xfs_cud_cache); 2141 out_destroy_rui_cache: 2142 kmem_cache_destroy(xfs_rui_cache); 2143 out_destroy_rud_cache: 2144 kmem_cache_destroy(xfs_rud_cache); 2145 out_destroy_icreate_cache: 2146 kmem_cache_destroy(xfs_icreate_cache); 2147 out_destroy_ili_cache: 2148 kmem_cache_destroy(xfs_ili_cache); 2149 out_destroy_inode_cache: 2150 kmem_cache_destroy(xfs_inode_cache); 2151 out_destroy_efi_cache: 2152 kmem_cache_destroy(xfs_efi_cache); 2153 out_destroy_efd_cache: 2154 kmem_cache_destroy(xfs_efd_cache); 2155 out_destroy_buf_item_cache: 2156 kmem_cache_destroy(xfs_buf_item_cache); 2157 out_destroy_trans_cache: 2158 kmem_cache_destroy(xfs_trans_cache); 2159 out_destroy_ifork_cache: 2160 kmem_cache_destroy(xfs_ifork_cache); 2161 out_destroy_da_state_cache: 2162 kmem_cache_destroy(xfs_da_state_cache); 2163 out_destroy_defer_item_cache: 2164 xfs_defer_destroy_item_caches(); 2165 out_destroy_btree_cur_cache: 2166 xfs_btree_destroy_cur_caches(); 2167 out_destroy_log_ticket_cache: 2168 kmem_cache_destroy(xfs_log_ticket_cache); 2169 out_destroy_buf_cache: 2170 kmem_cache_destroy(xfs_buf_cache); 2171 out: 2172 return -ENOMEM; 2173 } 2174 2175 STATIC void 2176 xfs_destroy_caches(void) 2177 { 2178 /* 2179 * Make sure all delayed rcu free are flushed before we 2180 * destroy caches. 2181 */ 2182 rcu_barrier(); 2183 kmem_cache_destroy(xfs_iunlink_cache); 2184 kmem_cache_destroy(xfs_attri_cache); 2185 kmem_cache_destroy(xfs_attrd_cache); 2186 kmem_cache_destroy(xfs_bui_cache); 2187 kmem_cache_destroy(xfs_bud_cache); 2188 kmem_cache_destroy(xfs_cui_cache); 2189 kmem_cache_destroy(xfs_cud_cache); 2190 kmem_cache_destroy(xfs_rui_cache); 2191 kmem_cache_destroy(xfs_rud_cache); 2192 kmem_cache_destroy(xfs_icreate_cache); 2193 kmem_cache_destroy(xfs_ili_cache); 2194 kmem_cache_destroy(xfs_inode_cache); 2195 kmem_cache_destroy(xfs_efi_cache); 2196 kmem_cache_destroy(xfs_efd_cache); 2197 kmem_cache_destroy(xfs_buf_item_cache); 2198 kmem_cache_destroy(xfs_trans_cache); 2199 kmem_cache_destroy(xfs_ifork_cache); 2200 kmem_cache_destroy(xfs_da_state_cache); 2201 xfs_defer_destroy_item_caches(); 2202 xfs_btree_destroy_cur_caches(); 2203 kmem_cache_destroy(xfs_log_ticket_cache); 2204 kmem_cache_destroy(xfs_buf_cache); 2205 } 2206 2207 STATIC int __init 2208 xfs_init_workqueues(void) 2209 { 2210 /* 2211 * The allocation workqueue can be used in memory reclaim situations 2212 * (writepage path), and parallelism is only limited by the number of 2213 * AGs in all the filesystems mounted. Hence use the default large 2214 * max_active value for this workqueue. 2215 */ 2216 xfs_alloc_wq = alloc_workqueue("xfsalloc", 2217 XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0); 2218 if (!xfs_alloc_wq) 2219 return -ENOMEM; 2220 2221 xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND), 2222 0); 2223 if (!xfs_discard_wq) 2224 goto out_free_alloc_wq; 2225 2226 return 0; 2227 out_free_alloc_wq: 2228 destroy_workqueue(xfs_alloc_wq); 2229 return -ENOMEM; 2230 } 2231 2232 STATIC void 2233 xfs_destroy_workqueues(void) 2234 { 2235 destroy_workqueue(xfs_discard_wq); 2236 destroy_workqueue(xfs_alloc_wq); 2237 } 2238 2239 #ifdef CONFIG_HOTPLUG_CPU 2240 static int 2241 xfs_cpu_dead( 2242 unsigned int cpu) 2243 { 2244 struct xfs_mount *mp, *n; 2245 2246 spin_lock(&xfs_mount_list_lock); 2247 list_for_each_entry_safe(mp, n, &xfs_mount_list, m_mount_list) { 2248 spin_unlock(&xfs_mount_list_lock); 2249 xfs_inodegc_cpu_dead(mp, cpu); 2250 xlog_cil_pcp_dead(mp->m_log, cpu); 2251 spin_lock(&xfs_mount_list_lock); 2252 } 2253 spin_unlock(&xfs_mount_list_lock); 2254 return 0; 2255 } 2256 2257 static int __init 2258 xfs_cpu_hotplug_init(void) 2259 { 2260 int error; 2261 2262 error = cpuhp_setup_state_nocalls(CPUHP_XFS_DEAD, "xfs:dead", NULL, 2263 xfs_cpu_dead); 2264 if (error < 0) 2265 xfs_alert(NULL, 2266 "Failed to initialise CPU hotplug, error %d. XFS is non-functional.", 2267 error); 2268 return error; 2269 } 2270 2271 static void 2272 xfs_cpu_hotplug_destroy(void) 2273 { 2274 cpuhp_remove_state_nocalls(CPUHP_XFS_DEAD); 2275 } 2276 2277 #else /* !CONFIG_HOTPLUG_CPU */ 2278 static inline int xfs_cpu_hotplug_init(void) { return 0; } 2279 static inline void xfs_cpu_hotplug_destroy(void) {} 2280 #endif 2281 2282 STATIC int __init 2283 init_xfs_fs(void) 2284 { 2285 int error; 2286 2287 xfs_check_ondisk_structs(); 2288 2289 printk(KERN_INFO XFS_VERSION_STRING " with " 2290 XFS_BUILD_OPTIONS " enabled\n"); 2291 2292 xfs_dir_startup(); 2293 2294 error = xfs_cpu_hotplug_init(); 2295 if (error) 2296 goto out; 2297 2298 error = xfs_init_caches(); 2299 if (error) 2300 goto out_destroy_hp; 2301 2302 error = xfs_init_workqueues(); 2303 if (error) 2304 goto out_destroy_caches; 2305 2306 error = xfs_mru_cache_init(); 2307 if (error) 2308 goto out_destroy_wq; 2309 2310 error = xfs_init_procfs(); 2311 if (error) 2312 goto out_mru_cache_uninit; 2313 2314 error = xfs_sysctl_register(); 2315 if (error) 2316 goto out_cleanup_procfs; 2317 2318 xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj); 2319 if (!xfs_kset) { 2320 error = -ENOMEM; 2321 goto out_sysctl_unregister; 2322 } 2323 2324 xfsstats.xs_kobj.kobject.kset = xfs_kset; 2325 2326 xfsstats.xs_stats = alloc_percpu(struct xfsstats); 2327 if (!xfsstats.xs_stats) { 2328 error = -ENOMEM; 2329 goto out_kset_unregister; 2330 } 2331 2332 error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL, 2333 "stats"); 2334 if (error) 2335 goto out_free_stats; 2336 2337 #ifdef DEBUG 2338 xfs_dbg_kobj.kobject.kset = xfs_kset; 2339 error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug"); 2340 if (error) 2341 goto out_remove_stats_kobj; 2342 #endif 2343 2344 error = xfs_qm_init(); 2345 if (error) 2346 goto out_remove_dbg_kobj; 2347 2348 error = register_filesystem(&xfs_fs_type); 2349 if (error) 2350 goto out_qm_exit; 2351 return 0; 2352 2353 out_qm_exit: 2354 xfs_qm_exit(); 2355 out_remove_dbg_kobj: 2356 #ifdef DEBUG 2357 xfs_sysfs_del(&xfs_dbg_kobj); 2358 out_remove_stats_kobj: 2359 #endif 2360 xfs_sysfs_del(&xfsstats.xs_kobj); 2361 out_free_stats: 2362 free_percpu(xfsstats.xs_stats); 2363 out_kset_unregister: 2364 kset_unregister(xfs_kset); 2365 out_sysctl_unregister: 2366 xfs_sysctl_unregister(); 2367 out_cleanup_procfs: 2368 xfs_cleanup_procfs(); 2369 out_mru_cache_uninit: 2370 xfs_mru_cache_uninit(); 2371 out_destroy_wq: 2372 xfs_destroy_workqueues(); 2373 out_destroy_caches: 2374 xfs_destroy_caches(); 2375 out_destroy_hp: 2376 xfs_cpu_hotplug_destroy(); 2377 out: 2378 return error; 2379 } 2380 2381 STATIC void __exit 2382 exit_xfs_fs(void) 2383 { 2384 xfs_qm_exit(); 2385 unregister_filesystem(&xfs_fs_type); 2386 #ifdef DEBUG 2387 xfs_sysfs_del(&xfs_dbg_kobj); 2388 #endif 2389 xfs_sysfs_del(&xfsstats.xs_kobj); 2390 free_percpu(xfsstats.xs_stats); 2391 kset_unregister(xfs_kset); 2392 xfs_sysctl_unregister(); 2393 xfs_cleanup_procfs(); 2394 xfs_mru_cache_uninit(); 2395 xfs_destroy_workqueues(); 2396 xfs_destroy_caches(); 2397 xfs_uuid_table_free(); 2398 xfs_cpu_hotplug_destroy(); 2399 } 2400 2401 module_init(init_xfs_fs); 2402 module_exit(exit_xfs_fs); 2403 2404 MODULE_AUTHOR("Silicon Graphics, Inc."); 2405 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled"); 2406 MODULE_LICENSE("GPL"); 2407