1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 7 #include "xfs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_sb.h" 13 #include "xfs_mount.h" 14 #include "xfs_inode.h" 15 #include "xfs_btree.h" 16 #include "xfs_bmap.h" 17 #include "xfs_alloc.h" 18 #include "xfs_fsops.h" 19 #include "xfs_trans.h" 20 #include "xfs_buf_item.h" 21 #include "xfs_log.h" 22 #include "xfs_log_priv.h" 23 #include "xfs_dir2.h" 24 #include "xfs_extfree_item.h" 25 #include "xfs_mru_cache.h" 26 #include "xfs_inode_item.h" 27 #include "xfs_icache.h" 28 #include "xfs_trace.h" 29 #include "xfs_icreate_item.h" 30 #include "xfs_filestream.h" 31 #include "xfs_quota.h" 32 #include "xfs_sysfs.h" 33 #include "xfs_ondisk.h" 34 #include "xfs_rmap_item.h" 35 #include "xfs_refcount_item.h" 36 #include "xfs_bmap_item.h" 37 #include "xfs_reflink.h" 38 #include "xfs_pwork.h" 39 40 #include <linux/magic.h> 41 #include <linux/fs_context.h> 42 #include <linux/fs_parser.h> 43 44 static const struct super_operations xfs_super_operations; 45 46 static struct kset *xfs_kset; /* top-level xfs sysfs dir */ 47 #ifdef DEBUG 48 static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */ 49 #endif 50 51 enum xfs_dax_mode { 52 XFS_DAX_INODE = 0, 53 XFS_DAX_ALWAYS = 1, 54 XFS_DAX_NEVER = 2, 55 }; 56 57 static void 58 xfs_mount_set_dax_mode( 59 struct xfs_mount *mp, 60 enum xfs_dax_mode mode) 61 { 62 switch (mode) { 63 case XFS_DAX_INODE: 64 mp->m_flags &= ~(XFS_MOUNT_DAX_ALWAYS | XFS_MOUNT_DAX_NEVER); 65 break; 66 case XFS_DAX_ALWAYS: 67 mp->m_flags |= XFS_MOUNT_DAX_ALWAYS; 68 mp->m_flags &= ~XFS_MOUNT_DAX_NEVER; 69 break; 70 case XFS_DAX_NEVER: 71 mp->m_flags |= XFS_MOUNT_DAX_NEVER; 72 mp->m_flags &= ~XFS_MOUNT_DAX_ALWAYS; 73 break; 74 } 75 } 76 77 static const struct constant_table dax_param_enums[] = { 78 {"inode", XFS_DAX_INODE }, 79 {"always", XFS_DAX_ALWAYS }, 80 {"never", XFS_DAX_NEVER }, 81 {} 82 }; 83 84 /* 85 * Table driven mount option parser. 86 */ 87 enum { 88 Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev, 89 Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid, 90 Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups, 91 Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep, 92 Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2, 93 Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota, 94 Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota, 95 Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce, 96 Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum, 97 }; 98 99 static const struct fs_parameter_spec xfs_fs_parameters[] = { 100 fsparam_u32("logbufs", Opt_logbufs), 101 fsparam_string("logbsize", Opt_logbsize), 102 fsparam_string("logdev", Opt_logdev), 103 fsparam_string("rtdev", Opt_rtdev), 104 fsparam_flag("wsync", Opt_wsync), 105 fsparam_flag("noalign", Opt_noalign), 106 fsparam_flag("swalloc", Opt_swalloc), 107 fsparam_u32("sunit", Opt_sunit), 108 fsparam_u32("swidth", Opt_swidth), 109 fsparam_flag("nouuid", Opt_nouuid), 110 fsparam_flag("grpid", Opt_grpid), 111 fsparam_flag("nogrpid", Opt_nogrpid), 112 fsparam_flag("bsdgroups", Opt_bsdgroups), 113 fsparam_flag("sysvgroups", Opt_sysvgroups), 114 fsparam_string("allocsize", Opt_allocsize), 115 fsparam_flag("norecovery", Opt_norecovery), 116 fsparam_flag("inode64", Opt_inode64), 117 fsparam_flag("inode32", Opt_inode32), 118 fsparam_flag("ikeep", Opt_ikeep), 119 fsparam_flag("noikeep", Opt_noikeep), 120 fsparam_flag("largeio", Opt_largeio), 121 fsparam_flag("nolargeio", Opt_nolargeio), 122 fsparam_flag("attr2", Opt_attr2), 123 fsparam_flag("noattr2", Opt_noattr2), 124 fsparam_flag("filestreams", Opt_filestreams), 125 fsparam_flag("quota", Opt_quota), 126 fsparam_flag("noquota", Opt_noquota), 127 fsparam_flag("usrquota", Opt_usrquota), 128 fsparam_flag("grpquota", Opt_grpquota), 129 fsparam_flag("prjquota", Opt_prjquota), 130 fsparam_flag("uquota", Opt_uquota), 131 fsparam_flag("gquota", Opt_gquota), 132 fsparam_flag("pquota", Opt_pquota), 133 fsparam_flag("uqnoenforce", Opt_uqnoenforce), 134 fsparam_flag("gqnoenforce", Opt_gqnoenforce), 135 fsparam_flag("pqnoenforce", Opt_pqnoenforce), 136 fsparam_flag("qnoenforce", Opt_qnoenforce), 137 fsparam_flag("discard", Opt_discard), 138 fsparam_flag("nodiscard", Opt_nodiscard), 139 fsparam_flag("dax", Opt_dax), 140 fsparam_enum("dax", Opt_dax_enum, dax_param_enums), 141 {} 142 }; 143 144 struct proc_xfs_info { 145 uint64_t flag; 146 char *str; 147 }; 148 149 static int 150 xfs_fs_show_options( 151 struct seq_file *m, 152 struct dentry *root) 153 { 154 static struct proc_xfs_info xfs_info_set[] = { 155 /* the few simple ones we can get from the mount struct */ 156 { XFS_MOUNT_IKEEP, ",ikeep" }, 157 { XFS_MOUNT_WSYNC, ",wsync" }, 158 { XFS_MOUNT_NOALIGN, ",noalign" }, 159 { XFS_MOUNT_SWALLOC, ",swalloc" }, 160 { XFS_MOUNT_NOUUID, ",nouuid" }, 161 { XFS_MOUNT_NORECOVERY, ",norecovery" }, 162 { XFS_MOUNT_ATTR2, ",attr2" }, 163 { XFS_MOUNT_FILESTREAMS, ",filestreams" }, 164 { XFS_MOUNT_GRPID, ",grpid" }, 165 { XFS_MOUNT_DISCARD, ",discard" }, 166 { XFS_MOUNT_LARGEIO, ",largeio" }, 167 { XFS_MOUNT_DAX_ALWAYS, ",dax=always" }, 168 { XFS_MOUNT_DAX_NEVER, ",dax=never" }, 169 { 0, NULL } 170 }; 171 struct xfs_mount *mp = XFS_M(root->d_sb); 172 struct proc_xfs_info *xfs_infop; 173 174 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) { 175 if (mp->m_flags & xfs_infop->flag) 176 seq_puts(m, xfs_infop->str); 177 } 178 179 seq_printf(m, ",inode%d", 180 (mp->m_flags & XFS_MOUNT_SMALL_INUMS) ? 32 : 64); 181 182 if (mp->m_flags & XFS_MOUNT_ALLOCSIZE) 183 seq_printf(m, ",allocsize=%dk", 184 (1 << mp->m_allocsize_log) >> 10); 185 186 if (mp->m_logbufs > 0) 187 seq_printf(m, ",logbufs=%d", mp->m_logbufs); 188 if (mp->m_logbsize > 0) 189 seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10); 190 191 if (mp->m_logname) 192 seq_show_option(m, "logdev", mp->m_logname); 193 if (mp->m_rtname) 194 seq_show_option(m, "rtdev", mp->m_rtname); 195 196 if (mp->m_dalign > 0) 197 seq_printf(m, ",sunit=%d", 198 (int)XFS_FSB_TO_BB(mp, mp->m_dalign)); 199 if (mp->m_swidth > 0) 200 seq_printf(m, ",swidth=%d", 201 (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); 202 203 if (mp->m_qflags & XFS_UQUOTA_ACCT) { 204 if (mp->m_qflags & XFS_UQUOTA_ENFD) 205 seq_puts(m, ",usrquota"); 206 else 207 seq_puts(m, ",uqnoenforce"); 208 } 209 210 if (mp->m_qflags & XFS_PQUOTA_ACCT) { 211 if (mp->m_qflags & XFS_PQUOTA_ENFD) 212 seq_puts(m, ",prjquota"); 213 else 214 seq_puts(m, ",pqnoenforce"); 215 } 216 if (mp->m_qflags & XFS_GQUOTA_ACCT) { 217 if (mp->m_qflags & XFS_GQUOTA_ENFD) 218 seq_puts(m, ",grpquota"); 219 else 220 seq_puts(m, ",gqnoenforce"); 221 } 222 223 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT)) 224 seq_puts(m, ",noquota"); 225 226 return 0; 227 } 228 229 /* 230 * Set parameters for inode allocation heuristics, taking into account 231 * filesystem size and inode32/inode64 mount options; i.e. specifically 232 * whether or not XFS_MOUNT_SMALL_INUMS is set. 233 * 234 * Inode allocation patterns are altered only if inode32 is requested 235 * (XFS_MOUNT_SMALL_INUMS), and the filesystem is sufficiently large. 236 * If altered, XFS_MOUNT_32BITINODES is set as well. 237 * 238 * An agcount independent of that in the mount structure is provided 239 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated 240 * to the potentially higher ag count. 241 * 242 * Returns the maximum AG index which may contain inodes. 243 */ 244 xfs_agnumber_t 245 xfs_set_inode_alloc( 246 struct xfs_mount *mp, 247 xfs_agnumber_t agcount) 248 { 249 xfs_agnumber_t index; 250 xfs_agnumber_t maxagi = 0; 251 xfs_sb_t *sbp = &mp->m_sb; 252 xfs_agnumber_t max_metadata; 253 xfs_agino_t agino; 254 xfs_ino_t ino; 255 256 /* 257 * Calculate how much should be reserved for inodes to meet 258 * the max inode percentage. Used only for inode32. 259 */ 260 if (M_IGEO(mp)->maxicount) { 261 uint64_t icount; 262 263 icount = sbp->sb_dblocks * sbp->sb_imax_pct; 264 do_div(icount, 100); 265 icount += sbp->sb_agblocks - 1; 266 do_div(icount, sbp->sb_agblocks); 267 max_metadata = icount; 268 } else { 269 max_metadata = agcount; 270 } 271 272 /* Get the last possible inode in the filesystem */ 273 agino = XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1); 274 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino); 275 276 /* 277 * If user asked for no more than 32-bit inodes, and the fs is 278 * sufficiently large, set XFS_MOUNT_32BITINODES if we must alter 279 * the allocator to accommodate the request. 280 */ 281 if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32) 282 mp->m_flags |= XFS_MOUNT_32BITINODES; 283 else 284 mp->m_flags &= ~XFS_MOUNT_32BITINODES; 285 286 for (index = 0; index < agcount; index++) { 287 struct xfs_perag *pag; 288 289 ino = XFS_AGINO_TO_INO(mp, index, agino); 290 291 pag = xfs_perag_get(mp, index); 292 293 if (mp->m_flags & XFS_MOUNT_32BITINODES) { 294 if (ino > XFS_MAXINUMBER_32) { 295 pag->pagi_inodeok = 0; 296 pag->pagf_metadata = 0; 297 } else { 298 pag->pagi_inodeok = 1; 299 maxagi++; 300 if (index < max_metadata) 301 pag->pagf_metadata = 1; 302 else 303 pag->pagf_metadata = 0; 304 } 305 } else { 306 pag->pagi_inodeok = 1; 307 pag->pagf_metadata = 0; 308 } 309 310 xfs_perag_put(pag); 311 } 312 313 return (mp->m_flags & XFS_MOUNT_32BITINODES) ? maxagi : agcount; 314 } 315 316 STATIC int 317 xfs_blkdev_get( 318 xfs_mount_t *mp, 319 const char *name, 320 struct block_device **bdevp) 321 { 322 int error = 0; 323 324 *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL, 325 mp); 326 if (IS_ERR(*bdevp)) { 327 error = PTR_ERR(*bdevp); 328 xfs_warn(mp, "Invalid device [%s], error=%d", name, error); 329 } 330 331 return error; 332 } 333 334 STATIC void 335 xfs_blkdev_put( 336 struct block_device *bdev) 337 { 338 if (bdev) 339 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 340 } 341 342 void 343 xfs_blkdev_issue_flush( 344 xfs_buftarg_t *buftarg) 345 { 346 blkdev_issue_flush(buftarg->bt_bdev); 347 } 348 349 STATIC void 350 xfs_close_devices( 351 struct xfs_mount *mp) 352 { 353 struct dax_device *dax_ddev = mp->m_ddev_targp->bt_daxdev; 354 355 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 356 struct block_device *logdev = mp->m_logdev_targp->bt_bdev; 357 struct dax_device *dax_logdev = mp->m_logdev_targp->bt_daxdev; 358 359 xfs_free_buftarg(mp->m_logdev_targp); 360 xfs_blkdev_put(logdev); 361 fs_put_dax(dax_logdev); 362 } 363 if (mp->m_rtdev_targp) { 364 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev; 365 struct dax_device *dax_rtdev = mp->m_rtdev_targp->bt_daxdev; 366 367 xfs_free_buftarg(mp->m_rtdev_targp); 368 xfs_blkdev_put(rtdev); 369 fs_put_dax(dax_rtdev); 370 } 371 xfs_free_buftarg(mp->m_ddev_targp); 372 fs_put_dax(dax_ddev); 373 } 374 375 /* 376 * The file system configurations are: 377 * (1) device (partition) with data and internal log 378 * (2) logical volume with data and log subvolumes. 379 * (3) logical volume with data, log, and realtime subvolumes. 380 * 381 * We only have to handle opening the log and realtime volumes here if 382 * they are present. The data subvolume has already been opened by 383 * get_sb_bdev() and is stored in sb->s_bdev. 384 */ 385 STATIC int 386 xfs_open_devices( 387 struct xfs_mount *mp) 388 { 389 struct block_device *ddev = mp->m_super->s_bdev; 390 struct dax_device *dax_ddev = fs_dax_get_by_bdev(ddev); 391 struct dax_device *dax_logdev = NULL, *dax_rtdev = NULL; 392 struct block_device *logdev = NULL, *rtdev = NULL; 393 int error; 394 395 /* 396 * Open real time and log devices - order is important. 397 */ 398 if (mp->m_logname) { 399 error = xfs_blkdev_get(mp, mp->m_logname, &logdev); 400 if (error) 401 goto out; 402 dax_logdev = fs_dax_get_by_bdev(logdev); 403 } 404 405 if (mp->m_rtname) { 406 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev); 407 if (error) 408 goto out_close_logdev; 409 410 if (rtdev == ddev || rtdev == logdev) { 411 xfs_warn(mp, 412 "Cannot mount filesystem with identical rtdev and ddev/logdev."); 413 error = -EINVAL; 414 goto out_close_rtdev; 415 } 416 dax_rtdev = fs_dax_get_by_bdev(rtdev); 417 } 418 419 /* 420 * Setup xfs_mount buffer target pointers 421 */ 422 error = -ENOMEM; 423 mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, dax_ddev); 424 if (!mp->m_ddev_targp) 425 goto out_close_rtdev; 426 427 if (rtdev) { 428 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, dax_rtdev); 429 if (!mp->m_rtdev_targp) 430 goto out_free_ddev_targ; 431 } 432 433 if (logdev && logdev != ddev) { 434 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, dax_logdev); 435 if (!mp->m_logdev_targp) 436 goto out_free_rtdev_targ; 437 } else { 438 mp->m_logdev_targp = mp->m_ddev_targp; 439 } 440 441 return 0; 442 443 out_free_rtdev_targ: 444 if (mp->m_rtdev_targp) 445 xfs_free_buftarg(mp->m_rtdev_targp); 446 out_free_ddev_targ: 447 xfs_free_buftarg(mp->m_ddev_targp); 448 out_close_rtdev: 449 xfs_blkdev_put(rtdev); 450 fs_put_dax(dax_rtdev); 451 out_close_logdev: 452 if (logdev && logdev != ddev) { 453 xfs_blkdev_put(logdev); 454 fs_put_dax(dax_logdev); 455 } 456 out: 457 fs_put_dax(dax_ddev); 458 return error; 459 } 460 461 /* 462 * Setup xfs_mount buffer target pointers based on superblock 463 */ 464 STATIC int 465 xfs_setup_devices( 466 struct xfs_mount *mp) 467 { 468 int error; 469 470 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize); 471 if (error) 472 return error; 473 474 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 475 unsigned int log_sector_size = BBSIZE; 476 477 if (xfs_sb_version_hassector(&mp->m_sb)) 478 log_sector_size = mp->m_sb.sb_logsectsize; 479 error = xfs_setsize_buftarg(mp->m_logdev_targp, 480 log_sector_size); 481 if (error) 482 return error; 483 } 484 if (mp->m_rtdev_targp) { 485 error = xfs_setsize_buftarg(mp->m_rtdev_targp, 486 mp->m_sb.sb_sectsize); 487 if (error) 488 return error; 489 } 490 491 return 0; 492 } 493 494 STATIC int 495 xfs_init_mount_workqueues( 496 struct xfs_mount *mp) 497 { 498 mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s", 499 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), 500 1, mp->m_super->s_id); 501 if (!mp->m_buf_workqueue) 502 goto out; 503 504 mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s", 505 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), 506 0, mp->m_super->s_id); 507 if (!mp->m_unwritten_workqueue) 508 goto out_destroy_buf; 509 510 mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s", 511 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_UNBOUND), 512 0, mp->m_super->s_id); 513 if (!mp->m_cil_workqueue) 514 goto out_destroy_unwritten; 515 516 mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s", 517 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), 518 0, mp->m_super->s_id); 519 if (!mp->m_reclaim_workqueue) 520 goto out_destroy_cil; 521 522 mp->m_gc_workqueue = alloc_workqueue("xfs-gc/%s", 523 WQ_SYSFS | WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM, 524 0, mp->m_super->s_id); 525 if (!mp->m_gc_workqueue) 526 goto out_destroy_reclaim; 527 528 mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", 529 XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id); 530 if (!mp->m_sync_workqueue) 531 goto out_destroy_eofb; 532 533 return 0; 534 535 out_destroy_eofb: 536 destroy_workqueue(mp->m_gc_workqueue); 537 out_destroy_reclaim: 538 destroy_workqueue(mp->m_reclaim_workqueue); 539 out_destroy_cil: 540 destroy_workqueue(mp->m_cil_workqueue); 541 out_destroy_unwritten: 542 destroy_workqueue(mp->m_unwritten_workqueue); 543 out_destroy_buf: 544 destroy_workqueue(mp->m_buf_workqueue); 545 out: 546 return -ENOMEM; 547 } 548 549 STATIC void 550 xfs_destroy_mount_workqueues( 551 struct xfs_mount *mp) 552 { 553 destroy_workqueue(mp->m_sync_workqueue); 554 destroy_workqueue(mp->m_gc_workqueue); 555 destroy_workqueue(mp->m_reclaim_workqueue); 556 destroy_workqueue(mp->m_cil_workqueue); 557 destroy_workqueue(mp->m_unwritten_workqueue); 558 destroy_workqueue(mp->m_buf_workqueue); 559 } 560 561 static void 562 xfs_flush_inodes_worker( 563 struct work_struct *work) 564 { 565 struct xfs_mount *mp = container_of(work, struct xfs_mount, 566 m_flush_inodes_work); 567 struct super_block *sb = mp->m_super; 568 569 if (down_read_trylock(&sb->s_umount)) { 570 sync_inodes_sb(sb); 571 up_read(&sb->s_umount); 572 } 573 } 574 575 /* 576 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK 577 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting 578 * for IO to complete so that we effectively throttle multiple callers to the 579 * rate at which IO is completing. 580 */ 581 void 582 xfs_flush_inodes( 583 struct xfs_mount *mp) 584 { 585 /* 586 * If flush_work() returns true then that means we waited for a flush 587 * which was already in progress. Don't bother running another scan. 588 */ 589 if (flush_work(&mp->m_flush_inodes_work)) 590 return; 591 592 queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work); 593 flush_work(&mp->m_flush_inodes_work); 594 } 595 596 /* Catch misguided souls that try to use this interface on XFS */ 597 STATIC struct inode * 598 xfs_fs_alloc_inode( 599 struct super_block *sb) 600 { 601 BUG(); 602 return NULL; 603 } 604 605 #ifdef DEBUG 606 static void 607 xfs_check_delalloc( 608 struct xfs_inode *ip, 609 int whichfork) 610 { 611 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 612 struct xfs_bmbt_irec got; 613 struct xfs_iext_cursor icur; 614 615 if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got)) 616 return; 617 do { 618 if (isnullstartblock(got.br_startblock)) { 619 xfs_warn(ip->i_mount, 620 "ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]", 621 ip->i_ino, 622 whichfork == XFS_DATA_FORK ? "data" : "cow", 623 got.br_startoff, got.br_blockcount); 624 } 625 } while (xfs_iext_next_extent(ifp, &icur, &got)); 626 } 627 #else 628 #define xfs_check_delalloc(ip, whichfork) do { } while (0) 629 #endif 630 631 /* 632 * Now that the generic code is guaranteed not to be accessing 633 * the linux inode, we can inactivate and reclaim the inode. 634 */ 635 STATIC void 636 xfs_fs_destroy_inode( 637 struct inode *inode) 638 { 639 struct xfs_inode *ip = XFS_I(inode); 640 641 trace_xfs_destroy_inode(ip); 642 643 ASSERT(!rwsem_is_locked(&inode->i_rwsem)); 644 XFS_STATS_INC(ip->i_mount, vn_rele); 645 XFS_STATS_INC(ip->i_mount, vn_remove); 646 647 xfs_inactive(ip); 648 649 if (!XFS_FORCED_SHUTDOWN(ip->i_mount) && ip->i_delayed_blks) { 650 xfs_check_delalloc(ip, XFS_DATA_FORK); 651 xfs_check_delalloc(ip, XFS_COW_FORK); 652 ASSERT(0); 653 } 654 655 XFS_STATS_INC(ip->i_mount, vn_reclaim); 656 657 /* 658 * We should never get here with one of the reclaim flags already set. 659 */ 660 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE)); 661 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM)); 662 663 /* 664 * We always use background reclaim here because even if the inode is 665 * clean, it still may be under IO and hence we have wait for IO 666 * completion to occur before we can reclaim the inode. The background 667 * reclaim path handles this more efficiently than we can here, so 668 * simply let background reclaim tear down all inodes. 669 */ 670 xfs_inode_set_reclaim_tag(ip); 671 } 672 673 static void 674 xfs_fs_dirty_inode( 675 struct inode *inode, 676 int flag) 677 { 678 struct xfs_inode *ip = XFS_I(inode); 679 struct xfs_mount *mp = ip->i_mount; 680 struct xfs_trans *tp; 681 682 if (!(inode->i_sb->s_flags & SB_LAZYTIME)) 683 return; 684 if (flag != I_DIRTY_SYNC || !(inode->i_state & I_DIRTY_TIME)) 685 return; 686 687 if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp)) 688 return; 689 xfs_ilock(ip, XFS_ILOCK_EXCL); 690 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 691 xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP); 692 xfs_trans_commit(tp); 693 } 694 695 /* 696 * Slab object creation initialisation for the XFS inode. 697 * This covers only the idempotent fields in the XFS inode; 698 * all other fields need to be initialised on allocation 699 * from the slab. This avoids the need to repeatedly initialise 700 * fields in the xfs inode that left in the initialise state 701 * when freeing the inode. 702 */ 703 STATIC void 704 xfs_fs_inode_init_once( 705 void *inode) 706 { 707 struct xfs_inode *ip = inode; 708 709 memset(ip, 0, sizeof(struct xfs_inode)); 710 711 /* vfs inode */ 712 inode_init_once(VFS_I(ip)); 713 714 /* xfs inode */ 715 atomic_set(&ip->i_pincount, 0); 716 spin_lock_init(&ip->i_flags_lock); 717 718 mrlock_init(&ip->i_mmaplock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, 719 "xfsino", ip->i_ino); 720 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, 721 "xfsino", ip->i_ino); 722 } 723 724 /* 725 * We do an unlocked check for XFS_IDONTCACHE here because we are already 726 * serialised against cache hits here via the inode->i_lock and igrab() in 727 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be 728 * racing with us, and it avoids needing to grab a spinlock here for every inode 729 * we drop the final reference on. 730 */ 731 STATIC int 732 xfs_fs_drop_inode( 733 struct inode *inode) 734 { 735 struct xfs_inode *ip = XFS_I(inode); 736 737 /* 738 * If this unlinked inode is in the middle of recovery, don't 739 * drop the inode just yet; log recovery will take care of 740 * that. See the comment for this inode flag. 741 */ 742 if (ip->i_flags & XFS_IRECOVERY) { 743 ASSERT(ip->i_mount->m_log->l_flags & XLOG_RECOVERY_NEEDED); 744 return 0; 745 } 746 747 return generic_drop_inode(inode); 748 } 749 750 static void 751 xfs_mount_free( 752 struct xfs_mount *mp) 753 { 754 kfree(mp->m_rtname); 755 kfree(mp->m_logname); 756 kmem_free(mp); 757 } 758 759 STATIC int 760 xfs_fs_sync_fs( 761 struct super_block *sb, 762 int wait) 763 { 764 struct xfs_mount *mp = XFS_M(sb); 765 766 /* 767 * Doing anything during the async pass would be counterproductive. 768 */ 769 if (!wait) 770 return 0; 771 772 xfs_log_force(mp, XFS_LOG_SYNC); 773 if (laptop_mode) { 774 /* 775 * The disk must be active because we're syncing. 776 * We schedule log work now (now that the disk is 777 * active) instead of later (when it might not be). 778 */ 779 flush_delayed_work(&mp->m_log->l_work); 780 } 781 782 return 0; 783 } 784 785 STATIC int 786 xfs_fs_statfs( 787 struct dentry *dentry, 788 struct kstatfs *statp) 789 { 790 struct xfs_mount *mp = XFS_M(dentry->d_sb); 791 xfs_sb_t *sbp = &mp->m_sb; 792 struct xfs_inode *ip = XFS_I(d_inode(dentry)); 793 uint64_t fakeinos, id; 794 uint64_t icount; 795 uint64_t ifree; 796 uint64_t fdblocks; 797 xfs_extlen_t lsize; 798 int64_t ffree; 799 800 statp->f_type = XFS_SUPER_MAGIC; 801 statp->f_namelen = MAXNAMELEN - 1; 802 803 id = huge_encode_dev(mp->m_ddev_targp->bt_dev); 804 statp->f_fsid = u64_to_fsid(id); 805 806 icount = percpu_counter_sum(&mp->m_icount); 807 ifree = percpu_counter_sum(&mp->m_ifree); 808 fdblocks = percpu_counter_sum(&mp->m_fdblocks); 809 810 spin_lock(&mp->m_sb_lock); 811 statp->f_bsize = sbp->sb_blocksize; 812 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; 813 statp->f_blocks = sbp->sb_dblocks - lsize; 814 spin_unlock(&mp->m_sb_lock); 815 816 /* make sure statp->f_bfree does not underflow */ 817 statp->f_bfree = max_t(int64_t, fdblocks - mp->m_alloc_set_aside, 0); 818 statp->f_bavail = statp->f_bfree; 819 820 fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree); 821 statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER); 822 if (M_IGEO(mp)->maxicount) 823 statp->f_files = min_t(typeof(statp->f_files), 824 statp->f_files, 825 M_IGEO(mp)->maxicount); 826 827 /* If sb_icount overshot maxicount, report actual allocation */ 828 statp->f_files = max_t(typeof(statp->f_files), 829 statp->f_files, 830 sbp->sb_icount); 831 832 /* make sure statp->f_ffree does not underflow */ 833 ffree = statp->f_files - (icount - ifree); 834 statp->f_ffree = max_t(int64_t, ffree, 0); 835 836 837 if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) && 838 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) == 839 (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD)) 840 xfs_qm_statvfs(ip, statp); 841 842 if (XFS_IS_REALTIME_MOUNT(mp) && 843 (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) { 844 statp->f_blocks = sbp->sb_rblocks; 845 statp->f_bavail = statp->f_bfree = 846 sbp->sb_frextents * sbp->sb_rextsize; 847 } 848 849 return 0; 850 } 851 852 STATIC void 853 xfs_save_resvblks(struct xfs_mount *mp) 854 { 855 uint64_t resblks = 0; 856 857 mp->m_resblks_save = mp->m_resblks; 858 xfs_reserve_blocks(mp, &resblks, NULL); 859 } 860 861 STATIC void 862 xfs_restore_resvblks(struct xfs_mount *mp) 863 { 864 uint64_t resblks; 865 866 if (mp->m_resblks_save) { 867 resblks = mp->m_resblks_save; 868 mp->m_resblks_save = 0; 869 } else 870 resblks = xfs_default_resblks(mp); 871 872 xfs_reserve_blocks(mp, &resblks, NULL); 873 } 874 875 /* 876 * Second stage of a freeze. The data is already frozen so we only 877 * need to take care of the metadata. Once that's done sync the superblock 878 * to the log to dirty it in case of a crash while frozen. This ensures that we 879 * will recover the unlinked inode lists on the next mount. 880 */ 881 STATIC int 882 xfs_fs_freeze( 883 struct super_block *sb) 884 { 885 struct xfs_mount *mp = XFS_M(sb); 886 unsigned int flags; 887 int ret; 888 889 /* 890 * The filesystem is now frozen far enough that memory reclaim 891 * cannot safely operate on the filesystem. Hence we need to 892 * set a GFP_NOFS context here to avoid recursion deadlocks. 893 */ 894 flags = memalloc_nofs_save(); 895 xfs_blockgc_stop(mp); 896 xfs_save_resvblks(mp); 897 ret = xfs_log_quiesce(mp); 898 memalloc_nofs_restore(flags); 899 return ret; 900 } 901 902 STATIC int 903 xfs_fs_unfreeze( 904 struct super_block *sb) 905 { 906 struct xfs_mount *mp = XFS_M(sb); 907 908 xfs_restore_resvblks(mp); 909 xfs_log_work_queue(mp); 910 xfs_blockgc_start(mp); 911 return 0; 912 } 913 914 /* 915 * This function fills in xfs_mount_t fields based on mount args. 916 * Note: the superblock _has_ now been read in. 917 */ 918 STATIC int 919 xfs_finish_flags( 920 struct xfs_mount *mp) 921 { 922 int ronly = (mp->m_flags & XFS_MOUNT_RDONLY); 923 924 /* Fail a mount where the logbuf is smaller than the log stripe */ 925 if (xfs_sb_version_haslogv2(&mp->m_sb)) { 926 if (mp->m_logbsize <= 0 && 927 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) { 928 mp->m_logbsize = mp->m_sb.sb_logsunit; 929 } else if (mp->m_logbsize > 0 && 930 mp->m_logbsize < mp->m_sb.sb_logsunit) { 931 xfs_warn(mp, 932 "logbuf size must be greater than or equal to log stripe size"); 933 return -EINVAL; 934 } 935 } else { 936 /* Fail a mount if the logbuf is larger than 32K */ 937 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) { 938 xfs_warn(mp, 939 "logbuf size for version 1 logs must be 16K or 32K"); 940 return -EINVAL; 941 } 942 } 943 944 /* 945 * V5 filesystems always use attr2 format for attributes. 946 */ 947 if (xfs_sb_version_hascrc(&mp->m_sb) && 948 (mp->m_flags & XFS_MOUNT_NOATTR2)) { 949 xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. " 950 "attr2 is always enabled for V5 filesystems."); 951 return -EINVAL; 952 } 953 954 /* 955 * mkfs'ed attr2 will turn on attr2 mount unless explicitly 956 * told by noattr2 to turn it off 957 */ 958 if (xfs_sb_version_hasattr2(&mp->m_sb) && 959 !(mp->m_flags & XFS_MOUNT_NOATTR2)) 960 mp->m_flags |= XFS_MOUNT_ATTR2; 961 962 /* 963 * prohibit r/w mounts of read-only filesystems 964 */ 965 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) { 966 xfs_warn(mp, 967 "cannot mount a read-only filesystem as read-write"); 968 return -EROFS; 969 } 970 971 if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) && 972 (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE)) && 973 !xfs_sb_version_has_pquotino(&mp->m_sb)) { 974 xfs_warn(mp, 975 "Super block does not support project and group quota together"); 976 return -EINVAL; 977 } 978 979 return 0; 980 } 981 982 static int 983 xfs_init_percpu_counters( 984 struct xfs_mount *mp) 985 { 986 int error; 987 988 error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL); 989 if (error) 990 return -ENOMEM; 991 992 error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL); 993 if (error) 994 goto free_icount; 995 996 error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL); 997 if (error) 998 goto free_ifree; 999 1000 error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL); 1001 if (error) 1002 goto free_fdblocks; 1003 1004 return 0; 1005 1006 free_fdblocks: 1007 percpu_counter_destroy(&mp->m_fdblocks); 1008 free_ifree: 1009 percpu_counter_destroy(&mp->m_ifree); 1010 free_icount: 1011 percpu_counter_destroy(&mp->m_icount); 1012 return -ENOMEM; 1013 } 1014 1015 void 1016 xfs_reinit_percpu_counters( 1017 struct xfs_mount *mp) 1018 { 1019 percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount); 1020 percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree); 1021 percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks); 1022 } 1023 1024 static void 1025 xfs_destroy_percpu_counters( 1026 struct xfs_mount *mp) 1027 { 1028 percpu_counter_destroy(&mp->m_icount); 1029 percpu_counter_destroy(&mp->m_ifree); 1030 percpu_counter_destroy(&mp->m_fdblocks); 1031 ASSERT(XFS_FORCED_SHUTDOWN(mp) || 1032 percpu_counter_sum(&mp->m_delalloc_blks) == 0); 1033 percpu_counter_destroy(&mp->m_delalloc_blks); 1034 } 1035 1036 static void 1037 xfs_fs_put_super( 1038 struct super_block *sb) 1039 { 1040 struct xfs_mount *mp = XFS_M(sb); 1041 1042 /* if ->fill_super failed, we have no mount to tear down */ 1043 if (!sb->s_fs_info) 1044 return; 1045 1046 xfs_notice(mp, "Unmounting Filesystem"); 1047 xfs_filestream_unmount(mp); 1048 xfs_unmountfs(mp); 1049 1050 xfs_freesb(mp); 1051 free_percpu(mp->m_stats.xs_stats); 1052 xfs_destroy_percpu_counters(mp); 1053 xfs_destroy_mount_workqueues(mp); 1054 xfs_close_devices(mp); 1055 1056 sb->s_fs_info = NULL; 1057 xfs_mount_free(mp); 1058 } 1059 1060 static long 1061 xfs_fs_nr_cached_objects( 1062 struct super_block *sb, 1063 struct shrink_control *sc) 1064 { 1065 /* Paranoia: catch incorrect calls during mount setup or teardown */ 1066 if (WARN_ON_ONCE(!sb->s_fs_info)) 1067 return 0; 1068 return xfs_reclaim_inodes_count(XFS_M(sb)); 1069 } 1070 1071 static long 1072 xfs_fs_free_cached_objects( 1073 struct super_block *sb, 1074 struct shrink_control *sc) 1075 { 1076 return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan); 1077 } 1078 1079 static const struct super_operations xfs_super_operations = { 1080 .alloc_inode = xfs_fs_alloc_inode, 1081 .destroy_inode = xfs_fs_destroy_inode, 1082 .dirty_inode = xfs_fs_dirty_inode, 1083 .drop_inode = xfs_fs_drop_inode, 1084 .put_super = xfs_fs_put_super, 1085 .sync_fs = xfs_fs_sync_fs, 1086 .freeze_fs = xfs_fs_freeze, 1087 .unfreeze_fs = xfs_fs_unfreeze, 1088 .statfs = xfs_fs_statfs, 1089 .show_options = xfs_fs_show_options, 1090 .nr_cached_objects = xfs_fs_nr_cached_objects, 1091 .free_cached_objects = xfs_fs_free_cached_objects, 1092 }; 1093 1094 static int 1095 suffix_kstrtoint( 1096 const char *s, 1097 unsigned int base, 1098 int *res) 1099 { 1100 int last, shift_left_factor = 0, _res; 1101 char *value; 1102 int ret = 0; 1103 1104 value = kstrdup(s, GFP_KERNEL); 1105 if (!value) 1106 return -ENOMEM; 1107 1108 last = strlen(value) - 1; 1109 if (value[last] == 'K' || value[last] == 'k') { 1110 shift_left_factor = 10; 1111 value[last] = '\0'; 1112 } 1113 if (value[last] == 'M' || value[last] == 'm') { 1114 shift_left_factor = 20; 1115 value[last] = '\0'; 1116 } 1117 if (value[last] == 'G' || value[last] == 'g') { 1118 shift_left_factor = 30; 1119 value[last] = '\0'; 1120 } 1121 1122 if (kstrtoint(value, base, &_res)) 1123 ret = -EINVAL; 1124 kfree(value); 1125 *res = _res << shift_left_factor; 1126 return ret; 1127 } 1128 1129 static inline void 1130 xfs_fs_warn_deprecated( 1131 struct fs_context *fc, 1132 struct fs_parameter *param, 1133 uint64_t flag, 1134 bool value) 1135 { 1136 /* Don't print the warning if reconfiguring and current mount point 1137 * already had the flag set 1138 */ 1139 if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) && 1140 !!(XFS_M(fc->root->d_sb)->m_flags & flag) == value) 1141 return; 1142 xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key); 1143 } 1144 1145 /* 1146 * Set mount state from a mount option. 1147 * 1148 * NOTE: mp->m_super is NULL here! 1149 */ 1150 static int 1151 xfs_fs_parse_param( 1152 struct fs_context *fc, 1153 struct fs_parameter *param) 1154 { 1155 struct xfs_mount *parsing_mp = fc->s_fs_info; 1156 struct fs_parse_result result; 1157 int size = 0; 1158 int opt; 1159 1160 opt = fs_parse(fc, xfs_fs_parameters, param, &result); 1161 if (opt < 0) 1162 return opt; 1163 1164 switch (opt) { 1165 case Opt_logbufs: 1166 parsing_mp->m_logbufs = result.uint_32; 1167 return 0; 1168 case Opt_logbsize: 1169 if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize)) 1170 return -EINVAL; 1171 return 0; 1172 case Opt_logdev: 1173 kfree(parsing_mp->m_logname); 1174 parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL); 1175 if (!parsing_mp->m_logname) 1176 return -ENOMEM; 1177 return 0; 1178 case Opt_rtdev: 1179 kfree(parsing_mp->m_rtname); 1180 parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL); 1181 if (!parsing_mp->m_rtname) 1182 return -ENOMEM; 1183 return 0; 1184 case Opt_allocsize: 1185 if (suffix_kstrtoint(param->string, 10, &size)) 1186 return -EINVAL; 1187 parsing_mp->m_allocsize_log = ffs(size) - 1; 1188 parsing_mp->m_flags |= XFS_MOUNT_ALLOCSIZE; 1189 return 0; 1190 case Opt_grpid: 1191 case Opt_bsdgroups: 1192 parsing_mp->m_flags |= XFS_MOUNT_GRPID; 1193 return 0; 1194 case Opt_nogrpid: 1195 case Opt_sysvgroups: 1196 parsing_mp->m_flags &= ~XFS_MOUNT_GRPID; 1197 return 0; 1198 case Opt_wsync: 1199 parsing_mp->m_flags |= XFS_MOUNT_WSYNC; 1200 return 0; 1201 case Opt_norecovery: 1202 parsing_mp->m_flags |= XFS_MOUNT_NORECOVERY; 1203 return 0; 1204 case Opt_noalign: 1205 parsing_mp->m_flags |= XFS_MOUNT_NOALIGN; 1206 return 0; 1207 case Opt_swalloc: 1208 parsing_mp->m_flags |= XFS_MOUNT_SWALLOC; 1209 return 0; 1210 case Opt_sunit: 1211 parsing_mp->m_dalign = result.uint_32; 1212 return 0; 1213 case Opt_swidth: 1214 parsing_mp->m_swidth = result.uint_32; 1215 return 0; 1216 case Opt_inode32: 1217 parsing_mp->m_flags |= XFS_MOUNT_SMALL_INUMS; 1218 return 0; 1219 case Opt_inode64: 1220 parsing_mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS; 1221 return 0; 1222 case Opt_nouuid: 1223 parsing_mp->m_flags |= XFS_MOUNT_NOUUID; 1224 return 0; 1225 case Opt_largeio: 1226 parsing_mp->m_flags |= XFS_MOUNT_LARGEIO; 1227 return 0; 1228 case Opt_nolargeio: 1229 parsing_mp->m_flags &= ~XFS_MOUNT_LARGEIO; 1230 return 0; 1231 case Opt_filestreams: 1232 parsing_mp->m_flags |= XFS_MOUNT_FILESTREAMS; 1233 return 0; 1234 case Opt_noquota: 1235 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT; 1236 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD; 1237 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE; 1238 return 0; 1239 case Opt_quota: 1240 case Opt_uquota: 1241 case Opt_usrquota: 1242 parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE | 1243 XFS_UQUOTA_ENFD); 1244 return 0; 1245 case Opt_qnoenforce: 1246 case Opt_uqnoenforce: 1247 parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE); 1248 parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD; 1249 return 0; 1250 case Opt_pquota: 1251 case Opt_prjquota: 1252 parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE | 1253 XFS_PQUOTA_ENFD); 1254 return 0; 1255 case Opt_pqnoenforce: 1256 parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE); 1257 parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD; 1258 return 0; 1259 case Opt_gquota: 1260 case Opt_grpquota: 1261 parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE | 1262 XFS_GQUOTA_ENFD); 1263 return 0; 1264 case Opt_gqnoenforce: 1265 parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE); 1266 parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD; 1267 return 0; 1268 case Opt_discard: 1269 parsing_mp->m_flags |= XFS_MOUNT_DISCARD; 1270 return 0; 1271 case Opt_nodiscard: 1272 parsing_mp->m_flags &= ~XFS_MOUNT_DISCARD; 1273 return 0; 1274 #ifdef CONFIG_FS_DAX 1275 case Opt_dax: 1276 xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS); 1277 return 0; 1278 case Opt_dax_enum: 1279 xfs_mount_set_dax_mode(parsing_mp, result.uint_32); 1280 return 0; 1281 #endif 1282 /* Following mount options will be removed in September 2025 */ 1283 case Opt_ikeep: 1284 xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_IKEEP, true); 1285 parsing_mp->m_flags |= XFS_MOUNT_IKEEP; 1286 return 0; 1287 case Opt_noikeep: 1288 xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_IKEEP, false); 1289 parsing_mp->m_flags &= ~XFS_MOUNT_IKEEP; 1290 return 0; 1291 case Opt_attr2: 1292 xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_ATTR2, true); 1293 parsing_mp->m_flags |= XFS_MOUNT_ATTR2; 1294 return 0; 1295 case Opt_noattr2: 1296 xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_NOATTR2, true); 1297 parsing_mp->m_flags &= ~XFS_MOUNT_ATTR2; 1298 parsing_mp->m_flags |= XFS_MOUNT_NOATTR2; 1299 return 0; 1300 default: 1301 xfs_warn(parsing_mp, "unknown mount option [%s].", param->key); 1302 return -EINVAL; 1303 } 1304 1305 return 0; 1306 } 1307 1308 static int 1309 xfs_fs_validate_params( 1310 struct xfs_mount *mp) 1311 { 1312 /* 1313 * no recovery flag requires a read-only mount 1314 */ 1315 if ((mp->m_flags & XFS_MOUNT_NORECOVERY) && 1316 !(mp->m_flags & XFS_MOUNT_RDONLY)) { 1317 xfs_warn(mp, "no-recovery mounts must be read-only."); 1318 return -EINVAL; 1319 } 1320 1321 if ((mp->m_flags & XFS_MOUNT_NOALIGN) && 1322 (mp->m_dalign || mp->m_swidth)) { 1323 xfs_warn(mp, 1324 "sunit and swidth options incompatible with the noalign option"); 1325 return -EINVAL; 1326 } 1327 1328 if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) { 1329 xfs_warn(mp, "quota support not available in this kernel."); 1330 return -EINVAL; 1331 } 1332 1333 if ((mp->m_dalign && !mp->m_swidth) || 1334 (!mp->m_dalign && mp->m_swidth)) { 1335 xfs_warn(mp, "sunit and swidth must be specified together"); 1336 return -EINVAL; 1337 } 1338 1339 if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) { 1340 xfs_warn(mp, 1341 "stripe width (%d) must be a multiple of the stripe unit (%d)", 1342 mp->m_swidth, mp->m_dalign); 1343 return -EINVAL; 1344 } 1345 1346 if (mp->m_logbufs != -1 && 1347 mp->m_logbufs != 0 && 1348 (mp->m_logbufs < XLOG_MIN_ICLOGS || 1349 mp->m_logbufs > XLOG_MAX_ICLOGS)) { 1350 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]", 1351 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS); 1352 return -EINVAL; 1353 } 1354 1355 if (mp->m_logbsize != -1 && 1356 mp->m_logbsize != 0 && 1357 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE || 1358 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE || 1359 !is_power_of_2(mp->m_logbsize))) { 1360 xfs_warn(mp, 1361 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]", 1362 mp->m_logbsize); 1363 return -EINVAL; 1364 } 1365 1366 if ((mp->m_flags & XFS_MOUNT_ALLOCSIZE) && 1367 (mp->m_allocsize_log > XFS_MAX_IO_LOG || 1368 mp->m_allocsize_log < XFS_MIN_IO_LOG)) { 1369 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]", 1370 mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG); 1371 return -EINVAL; 1372 } 1373 1374 return 0; 1375 } 1376 1377 static int 1378 xfs_fs_fill_super( 1379 struct super_block *sb, 1380 struct fs_context *fc) 1381 { 1382 struct xfs_mount *mp = sb->s_fs_info; 1383 struct inode *root; 1384 int flags = 0, error; 1385 1386 mp->m_super = sb; 1387 1388 error = xfs_fs_validate_params(mp); 1389 if (error) 1390 goto out_free_names; 1391 1392 sb_min_blocksize(sb, BBSIZE); 1393 sb->s_xattr = xfs_xattr_handlers; 1394 sb->s_export_op = &xfs_export_operations; 1395 #ifdef CONFIG_XFS_QUOTA 1396 sb->s_qcop = &xfs_quotactl_operations; 1397 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ; 1398 #endif 1399 sb->s_op = &xfs_super_operations; 1400 1401 /* 1402 * Delay mount work if the debug hook is set. This is debug 1403 * instrumention to coordinate simulation of xfs mount failures with 1404 * VFS superblock operations 1405 */ 1406 if (xfs_globals.mount_delay) { 1407 xfs_notice(mp, "Delaying mount for %d seconds.", 1408 xfs_globals.mount_delay); 1409 msleep(xfs_globals.mount_delay * 1000); 1410 } 1411 1412 if (fc->sb_flags & SB_SILENT) 1413 flags |= XFS_MFSI_QUIET; 1414 1415 error = xfs_open_devices(mp); 1416 if (error) 1417 goto out_free_names; 1418 1419 error = xfs_init_mount_workqueues(mp); 1420 if (error) 1421 goto out_close_devices; 1422 1423 error = xfs_init_percpu_counters(mp); 1424 if (error) 1425 goto out_destroy_workqueues; 1426 1427 /* Allocate stats memory before we do operations that might use it */ 1428 mp->m_stats.xs_stats = alloc_percpu(struct xfsstats); 1429 if (!mp->m_stats.xs_stats) { 1430 error = -ENOMEM; 1431 goto out_destroy_counters; 1432 } 1433 1434 error = xfs_readsb(mp, flags); 1435 if (error) 1436 goto out_free_stats; 1437 1438 error = xfs_finish_flags(mp); 1439 if (error) 1440 goto out_free_sb; 1441 1442 error = xfs_setup_devices(mp); 1443 if (error) 1444 goto out_free_sb; 1445 1446 /* V4 support is undergoing deprecation. */ 1447 if (!xfs_sb_version_hascrc(&mp->m_sb)) { 1448 #ifdef CONFIG_XFS_SUPPORT_V4 1449 xfs_warn_once(mp, 1450 "Deprecated V4 format (crc=0) will not be supported after September 2030."); 1451 #else 1452 xfs_warn(mp, 1453 "Deprecated V4 format (crc=0) not supported by kernel."); 1454 error = -EINVAL; 1455 goto out_free_sb; 1456 #endif 1457 } 1458 1459 /* Filesystem claims it needs repair, so refuse the mount. */ 1460 if (xfs_sb_version_needsrepair(&mp->m_sb)) { 1461 xfs_warn(mp, "Filesystem needs repair. Please run xfs_repair."); 1462 error = -EFSCORRUPTED; 1463 goto out_free_sb; 1464 } 1465 1466 /* 1467 * Don't touch the filesystem if a user tool thinks it owns the primary 1468 * superblock. mkfs doesn't clear the flag from secondary supers, so 1469 * we don't check them at all. 1470 */ 1471 if (mp->m_sb.sb_inprogress) { 1472 xfs_warn(mp, "Offline file system operation in progress!"); 1473 error = -EFSCORRUPTED; 1474 goto out_free_sb; 1475 } 1476 1477 /* 1478 * Until this is fixed only page-sized or smaller data blocks work. 1479 */ 1480 if (mp->m_sb.sb_blocksize > PAGE_SIZE) { 1481 xfs_warn(mp, 1482 "File system with blocksize %d bytes. " 1483 "Only pagesize (%ld) or less will currently work.", 1484 mp->m_sb.sb_blocksize, PAGE_SIZE); 1485 error = -ENOSYS; 1486 goto out_free_sb; 1487 } 1488 1489 /* Ensure this filesystem fits in the page cache limits */ 1490 if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) || 1491 xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) { 1492 xfs_warn(mp, 1493 "file system too large to be mounted on this system."); 1494 error = -EFBIG; 1495 goto out_free_sb; 1496 } 1497 1498 /* 1499 * XFS block mappings use 54 bits to store the logical block offset. 1500 * This should suffice to handle the maximum file size that the VFS 1501 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT 1502 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes 1503 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON 1504 * to check this assertion. 1505 * 1506 * Avoid integer overflow by comparing the maximum bmbt offset to the 1507 * maximum pagecache offset in units of fs blocks. 1508 */ 1509 if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) { 1510 xfs_warn(mp, 1511 "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!", 1512 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE), 1513 XFS_MAX_FILEOFF); 1514 error = -EINVAL; 1515 goto out_free_sb; 1516 } 1517 1518 error = xfs_filestream_mount(mp); 1519 if (error) 1520 goto out_free_sb; 1521 1522 /* 1523 * we must configure the block size in the superblock before we run the 1524 * full mount process as the mount process can lookup and cache inodes. 1525 */ 1526 sb->s_magic = XFS_SUPER_MAGIC; 1527 sb->s_blocksize = mp->m_sb.sb_blocksize; 1528 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; 1529 sb->s_maxbytes = MAX_LFS_FILESIZE; 1530 sb->s_max_links = XFS_MAXLINK; 1531 sb->s_time_gran = 1; 1532 if (xfs_sb_version_hasbigtime(&mp->m_sb)) { 1533 sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN); 1534 sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX); 1535 } else { 1536 sb->s_time_min = XFS_LEGACY_TIME_MIN; 1537 sb->s_time_max = XFS_LEGACY_TIME_MAX; 1538 } 1539 trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max); 1540 sb->s_iflags |= SB_I_CGROUPWB; 1541 1542 set_posix_acl_flag(sb); 1543 1544 /* version 5 superblocks support inode version counters. */ 1545 if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5) 1546 sb->s_flags |= SB_I_VERSION; 1547 1548 if (xfs_sb_version_hasbigtime(&mp->m_sb)) 1549 xfs_warn(mp, 1550 "EXPERIMENTAL big timestamp feature in use. Use at your own risk!"); 1551 1552 if (mp->m_flags & XFS_MOUNT_DAX_ALWAYS) { 1553 bool rtdev_is_dax = false, datadev_is_dax; 1554 1555 xfs_warn(mp, 1556 "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); 1557 1558 datadev_is_dax = bdev_dax_supported(mp->m_ddev_targp->bt_bdev, 1559 sb->s_blocksize); 1560 if (mp->m_rtdev_targp) 1561 rtdev_is_dax = bdev_dax_supported( 1562 mp->m_rtdev_targp->bt_bdev, sb->s_blocksize); 1563 if (!rtdev_is_dax && !datadev_is_dax) { 1564 xfs_alert(mp, 1565 "DAX unsupported by block device. Turning off DAX."); 1566 xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER); 1567 } 1568 if (xfs_sb_version_hasreflink(&mp->m_sb)) { 1569 xfs_alert(mp, 1570 "DAX and reflink cannot be used together!"); 1571 error = -EINVAL; 1572 goto out_filestream_unmount; 1573 } 1574 } 1575 1576 if (mp->m_flags & XFS_MOUNT_DISCARD) { 1577 struct request_queue *q = bdev_get_queue(sb->s_bdev); 1578 1579 if (!blk_queue_discard(q)) { 1580 xfs_warn(mp, "mounting with \"discard\" option, but " 1581 "the device does not support discard"); 1582 mp->m_flags &= ~XFS_MOUNT_DISCARD; 1583 } 1584 } 1585 1586 if (xfs_sb_version_hasreflink(&mp->m_sb)) { 1587 if (mp->m_sb.sb_rblocks) { 1588 xfs_alert(mp, 1589 "reflink not compatible with realtime device!"); 1590 error = -EINVAL; 1591 goto out_filestream_unmount; 1592 } 1593 1594 if (xfs_globals.always_cow) { 1595 xfs_info(mp, "using DEBUG-only always_cow mode."); 1596 mp->m_always_cow = true; 1597 } 1598 } 1599 1600 if (xfs_sb_version_hasrmapbt(&mp->m_sb) && mp->m_sb.sb_rblocks) { 1601 xfs_alert(mp, 1602 "reverse mapping btree not compatible with realtime device!"); 1603 error = -EINVAL; 1604 goto out_filestream_unmount; 1605 } 1606 1607 if (xfs_sb_version_hasinobtcounts(&mp->m_sb)) 1608 xfs_warn(mp, 1609 "EXPERIMENTAL inode btree counters feature in use. Use at your own risk!"); 1610 1611 error = xfs_mountfs(mp); 1612 if (error) 1613 goto out_filestream_unmount; 1614 1615 root = igrab(VFS_I(mp->m_rootip)); 1616 if (!root) { 1617 error = -ENOENT; 1618 goto out_unmount; 1619 } 1620 sb->s_root = d_make_root(root); 1621 if (!sb->s_root) { 1622 error = -ENOMEM; 1623 goto out_unmount; 1624 } 1625 1626 return 0; 1627 1628 out_filestream_unmount: 1629 xfs_filestream_unmount(mp); 1630 out_free_sb: 1631 xfs_freesb(mp); 1632 out_free_stats: 1633 free_percpu(mp->m_stats.xs_stats); 1634 out_destroy_counters: 1635 xfs_destroy_percpu_counters(mp); 1636 out_destroy_workqueues: 1637 xfs_destroy_mount_workqueues(mp); 1638 out_close_devices: 1639 xfs_close_devices(mp); 1640 out_free_names: 1641 sb->s_fs_info = NULL; 1642 xfs_mount_free(mp); 1643 return error; 1644 1645 out_unmount: 1646 xfs_filestream_unmount(mp); 1647 xfs_unmountfs(mp); 1648 goto out_free_sb; 1649 } 1650 1651 static int 1652 xfs_fs_get_tree( 1653 struct fs_context *fc) 1654 { 1655 return get_tree_bdev(fc, xfs_fs_fill_super); 1656 } 1657 1658 static int 1659 xfs_remount_rw( 1660 struct xfs_mount *mp) 1661 { 1662 struct xfs_sb *sbp = &mp->m_sb; 1663 int error; 1664 1665 if (mp->m_flags & XFS_MOUNT_NORECOVERY) { 1666 xfs_warn(mp, 1667 "ro->rw transition prohibited on norecovery mount"); 1668 return -EINVAL; 1669 } 1670 1671 if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 && 1672 xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) { 1673 xfs_warn(mp, 1674 "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem", 1675 (sbp->sb_features_ro_compat & 1676 XFS_SB_FEAT_RO_COMPAT_UNKNOWN)); 1677 return -EINVAL; 1678 } 1679 1680 mp->m_flags &= ~XFS_MOUNT_RDONLY; 1681 1682 /* 1683 * If this is the first remount to writeable state we might have some 1684 * superblock changes to update. 1685 */ 1686 if (mp->m_update_sb) { 1687 error = xfs_sync_sb(mp, false); 1688 if (error) { 1689 xfs_warn(mp, "failed to write sb changes"); 1690 return error; 1691 } 1692 mp->m_update_sb = false; 1693 } 1694 1695 /* 1696 * Fill out the reserve pool if it is empty. Use the stashed value if 1697 * it is non-zero, otherwise go with the default. 1698 */ 1699 xfs_restore_resvblks(mp); 1700 xfs_log_work_queue(mp); 1701 1702 /* Recover any CoW blocks that never got remapped. */ 1703 error = xfs_reflink_recover_cow(mp); 1704 if (error) { 1705 xfs_err(mp, 1706 "Error %d recovering leftover CoW allocations.", error); 1707 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1708 return error; 1709 } 1710 xfs_blockgc_start(mp); 1711 1712 /* Create the per-AG metadata reservation pool .*/ 1713 error = xfs_fs_reserve_ag_blocks(mp); 1714 if (error && error != -ENOSPC) 1715 return error; 1716 1717 return 0; 1718 } 1719 1720 static int 1721 xfs_remount_ro( 1722 struct xfs_mount *mp) 1723 { 1724 int error; 1725 1726 /* 1727 * Cancel background eofb scanning so it cannot race with the final 1728 * log force+buftarg wait and deadlock the remount. 1729 */ 1730 xfs_blockgc_stop(mp); 1731 1732 /* Get rid of any leftover CoW reservations... */ 1733 error = xfs_blockgc_free_space(mp, NULL); 1734 if (error) { 1735 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1736 return error; 1737 } 1738 1739 /* Free the per-AG metadata reservation pool. */ 1740 error = xfs_fs_unreserve_ag_blocks(mp); 1741 if (error) { 1742 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1743 return error; 1744 } 1745 1746 /* 1747 * Before we sync the metadata, we need to free up the reserve block 1748 * pool so that the used block count in the superblock on disk is 1749 * correct at the end of the remount. Stash the current* reserve pool 1750 * size so that if we get remounted rw, we can return it to the same 1751 * size. 1752 */ 1753 xfs_save_resvblks(mp); 1754 1755 xfs_log_clean(mp); 1756 mp->m_flags |= XFS_MOUNT_RDONLY; 1757 1758 return 0; 1759 } 1760 1761 /* 1762 * Logically we would return an error here to prevent users from believing 1763 * they might have changed mount options using remount which can't be changed. 1764 * 1765 * But unfortunately mount(8) adds all options from mtab and fstab to the mount 1766 * arguments in some cases so we can't blindly reject options, but have to 1767 * check for each specified option if it actually differs from the currently 1768 * set option and only reject it if that's the case. 1769 * 1770 * Until that is implemented we return success for every remount request, and 1771 * silently ignore all options that we can't actually change. 1772 */ 1773 static int 1774 xfs_fs_reconfigure( 1775 struct fs_context *fc) 1776 { 1777 struct xfs_mount *mp = XFS_M(fc->root->d_sb); 1778 struct xfs_mount *new_mp = fc->s_fs_info; 1779 xfs_sb_t *sbp = &mp->m_sb; 1780 int flags = fc->sb_flags; 1781 int error; 1782 1783 /* version 5 superblocks always support version counters. */ 1784 if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5) 1785 fc->sb_flags |= SB_I_VERSION; 1786 1787 error = xfs_fs_validate_params(new_mp); 1788 if (error) 1789 return error; 1790 1791 sync_filesystem(mp->m_super); 1792 1793 /* inode32 -> inode64 */ 1794 if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && 1795 !(new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) { 1796 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS; 1797 mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount); 1798 } 1799 1800 /* inode64 -> inode32 */ 1801 if (!(mp->m_flags & XFS_MOUNT_SMALL_INUMS) && 1802 (new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) { 1803 mp->m_flags |= XFS_MOUNT_SMALL_INUMS; 1804 mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount); 1805 } 1806 1807 /* ro -> rw */ 1808 if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(flags & SB_RDONLY)) { 1809 error = xfs_remount_rw(mp); 1810 if (error) 1811 return error; 1812 } 1813 1814 /* rw -> ro */ 1815 if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (flags & SB_RDONLY)) { 1816 error = xfs_remount_ro(mp); 1817 if (error) 1818 return error; 1819 } 1820 1821 return 0; 1822 } 1823 1824 static void xfs_fs_free( 1825 struct fs_context *fc) 1826 { 1827 struct xfs_mount *mp = fc->s_fs_info; 1828 1829 /* 1830 * mp is stored in the fs_context when it is initialized. 1831 * mp is transferred to the superblock on a successful mount, 1832 * but if an error occurs before the transfer we have to free 1833 * it here. 1834 */ 1835 if (mp) 1836 xfs_mount_free(mp); 1837 } 1838 1839 static const struct fs_context_operations xfs_context_ops = { 1840 .parse_param = xfs_fs_parse_param, 1841 .get_tree = xfs_fs_get_tree, 1842 .reconfigure = xfs_fs_reconfigure, 1843 .free = xfs_fs_free, 1844 }; 1845 1846 static int xfs_init_fs_context( 1847 struct fs_context *fc) 1848 { 1849 struct xfs_mount *mp; 1850 1851 mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO); 1852 if (!mp) 1853 return -ENOMEM; 1854 1855 spin_lock_init(&mp->m_sb_lock); 1856 spin_lock_init(&mp->m_agirotor_lock); 1857 INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC); 1858 spin_lock_init(&mp->m_perag_lock); 1859 mutex_init(&mp->m_growlock); 1860 INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker); 1861 INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker); 1862 mp->m_kobj.kobject.kset = xfs_kset; 1863 /* 1864 * We don't create the finobt per-ag space reservation until after log 1865 * recovery, so we must set this to true so that an ifree transaction 1866 * started during log recovery will not depend on space reservations 1867 * for finobt expansion. 1868 */ 1869 mp->m_finobt_nores = true; 1870 1871 /* 1872 * These can be overridden by the mount option parsing. 1873 */ 1874 mp->m_logbufs = -1; 1875 mp->m_logbsize = -1; 1876 mp->m_allocsize_log = 16; /* 64k */ 1877 1878 /* 1879 * Copy binary VFS mount flags we are interested in. 1880 */ 1881 if (fc->sb_flags & SB_RDONLY) 1882 mp->m_flags |= XFS_MOUNT_RDONLY; 1883 if (fc->sb_flags & SB_DIRSYNC) 1884 mp->m_flags |= XFS_MOUNT_DIRSYNC; 1885 if (fc->sb_flags & SB_SYNCHRONOUS) 1886 mp->m_flags |= XFS_MOUNT_WSYNC; 1887 1888 fc->s_fs_info = mp; 1889 fc->ops = &xfs_context_ops; 1890 1891 return 0; 1892 } 1893 1894 static struct file_system_type xfs_fs_type = { 1895 .owner = THIS_MODULE, 1896 .name = "xfs", 1897 .init_fs_context = xfs_init_fs_context, 1898 .parameters = xfs_fs_parameters, 1899 .kill_sb = kill_block_super, 1900 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP, 1901 }; 1902 MODULE_ALIAS_FS("xfs"); 1903 1904 STATIC int __init 1905 xfs_init_zones(void) 1906 { 1907 xfs_log_ticket_zone = kmem_cache_create("xfs_log_ticket", 1908 sizeof(struct xlog_ticket), 1909 0, 0, NULL); 1910 if (!xfs_log_ticket_zone) 1911 goto out; 1912 1913 xfs_bmap_free_item_zone = kmem_cache_create("xfs_bmap_free_item", 1914 sizeof(struct xfs_extent_free_item), 1915 0, 0, NULL); 1916 if (!xfs_bmap_free_item_zone) 1917 goto out_destroy_log_ticket_zone; 1918 1919 xfs_btree_cur_zone = kmem_cache_create("xfs_btree_cur", 1920 sizeof(struct xfs_btree_cur), 1921 0, 0, NULL); 1922 if (!xfs_btree_cur_zone) 1923 goto out_destroy_bmap_free_item_zone; 1924 1925 xfs_da_state_zone = kmem_cache_create("xfs_da_state", 1926 sizeof(struct xfs_da_state), 1927 0, 0, NULL); 1928 if (!xfs_da_state_zone) 1929 goto out_destroy_btree_cur_zone; 1930 1931 xfs_ifork_zone = kmem_cache_create("xfs_ifork", 1932 sizeof(struct xfs_ifork), 1933 0, 0, NULL); 1934 if (!xfs_ifork_zone) 1935 goto out_destroy_da_state_zone; 1936 1937 xfs_trans_zone = kmem_cache_create("xfs_trans", 1938 sizeof(struct xfs_trans), 1939 0, 0, NULL); 1940 if (!xfs_trans_zone) 1941 goto out_destroy_ifork_zone; 1942 1943 1944 /* 1945 * The size of the zone allocated buf log item is the maximum 1946 * size possible under XFS. This wastes a little bit of memory, 1947 * but it is much faster. 1948 */ 1949 xfs_buf_item_zone = kmem_cache_create("xfs_buf_item", 1950 sizeof(struct xfs_buf_log_item), 1951 0, 0, NULL); 1952 if (!xfs_buf_item_zone) 1953 goto out_destroy_trans_zone; 1954 1955 xfs_efd_zone = kmem_cache_create("xfs_efd_item", 1956 (sizeof(struct xfs_efd_log_item) + 1957 (XFS_EFD_MAX_FAST_EXTENTS - 1) * 1958 sizeof(struct xfs_extent)), 1959 0, 0, NULL); 1960 if (!xfs_efd_zone) 1961 goto out_destroy_buf_item_zone; 1962 1963 xfs_efi_zone = kmem_cache_create("xfs_efi_item", 1964 (sizeof(struct xfs_efi_log_item) + 1965 (XFS_EFI_MAX_FAST_EXTENTS - 1) * 1966 sizeof(struct xfs_extent)), 1967 0, 0, NULL); 1968 if (!xfs_efi_zone) 1969 goto out_destroy_efd_zone; 1970 1971 xfs_inode_zone = kmem_cache_create("xfs_inode", 1972 sizeof(struct xfs_inode), 0, 1973 (SLAB_HWCACHE_ALIGN | 1974 SLAB_RECLAIM_ACCOUNT | 1975 SLAB_MEM_SPREAD | SLAB_ACCOUNT), 1976 xfs_fs_inode_init_once); 1977 if (!xfs_inode_zone) 1978 goto out_destroy_efi_zone; 1979 1980 xfs_ili_zone = kmem_cache_create("xfs_ili", 1981 sizeof(struct xfs_inode_log_item), 0, 1982 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, 1983 NULL); 1984 if (!xfs_ili_zone) 1985 goto out_destroy_inode_zone; 1986 1987 xfs_icreate_zone = kmem_cache_create("xfs_icr", 1988 sizeof(struct xfs_icreate_item), 1989 0, 0, NULL); 1990 if (!xfs_icreate_zone) 1991 goto out_destroy_ili_zone; 1992 1993 xfs_rud_zone = kmem_cache_create("xfs_rud_item", 1994 sizeof(struct xfs_rud_log_item), 1995 0, 0, NULL); 1996 if (!xfs_rud_zone) 1997 goto out_destroy_icreate_zone; 1998 1999 xfs_rui_zone = kmem_cache_create("xfs_rui_item", 2000 xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS), 2001 0, 0, NULL); 2002 if (!xfs_rui_zone) 2003 goto out_destroy_rud_zone; 2004 2005 xfs_cud_zone = kmem_cache_create("xfs_cud_item", 2006 sizeof(struct xfs_cud_log_item), 2007 0, 0, NULL); 2008 if (!xfs_cud_zone) 2009 goto out_destroy_rui_zone; 2010 2011 xfs_cui_zone = kmem_cache_create("xfs_cui_item", 2012 xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS), 2013 0, 0, NULL); 2014 if (!xfs_cui_zone) 2015 goto out_destroy_cud_zone; 2016 2017 xfs_bud_zone = kmem_cache_create("xfs_bud_item", 2018 sizeof(struct xfs_bud_log_item), 2019 0, 0, NULL); 2020 if (!xfs_bud_zone) 2021 goto out_destroy_cui_zone; 2022 2023 xfs_bui_zone = kmem_cache_create("xfs_bui_item", 2024 xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS), 2025 0, 0, NULL); 2026 if (!xfs_bui_zone) 2027 goto out_destroy_bud_zone; 2028 2029 return 0; 2030 2031 out_destroy_bud_zone: 2032 kmem_cache_destroy(xfs_bud_zone); 2033 out_destroy_cui_zone: 2034 kmem_cache_destroy(xfs_cui_zone); 2035 out_destroy_cud_zone: 2036 kmem_cache_destroy(xfs_cud_zone); 2037 out_destroy_rui_zone: 2038 kmem_cache_destroy(xfs_rui_zone); 2039 out_destroy_rud_zone: 2040 kmem_cache_destroy(xfs_rud_zone); 2041 out_destroy_icreate_zone: 2042 kmem_cache_destroy(xfs_icreate_zone); 2043 out_destroy_ili_zone: 2044 kmem_cache_destroy(xfs_ili_zone); 2045 out_destroy_inode_zone: 2046 kmem_cache_destroy(xfs_inode_zone); 2047 out_destroy_efi_zone: 2048 kmem_cache_destroy(xfs_efi_zone); 2049 out_destroy_efd_zone: 2050 kmem_cache_destroy(xfs_efd_zone); 2051 out_destroy_buf_item_zone: 2052 kmem_cache_destroy(xfs_buf_item_zone); 2053 out_destroy_trans_zone: 2054 kmem_cache_destroy(xfs_trans_zone); 2055 out_destroy_ifork_zone: 2056 kmem_cache_destroy(xfs_ifork_zone); 2057 out_destroy_da_state_zone: 2058 kmem_cache_destroy(xfs_da_state_zone); 2059 out_destroy_btree_cur_zone: 2060 kmem_cache_destroy(xfs_btree_cur_zone); 2061 out_destroy_bmap_free_item_zone: 2062 kmem_cache_destroy(xfs_bmap_free_item_zone); 2063 out_destroy_log_ticket_zone: 2064 kmem_cache_destroy(xfs_log_ticket_zone); 2065 out: 2066 return -ENOMEM; 2067 } 2068 2069 STATIC void 2070 xfs_destroy_zones(void) 2071 { 2072 /* 2073 * Make sure all delayed rcu free are flushed before we 2074 * destroy caches. 2075 */ 2076 rcu_barrier(); 2077 kmem_cache_destroy(xfs_bui_zone); 2078 kmem_cache_destroy(xfs_bud_zone); 2079 kmem_cache_destroy(xfs_cui_zone); 2080 kmem_cache_destroy(xfs_cud_zone); 2081 kmem_cache_destroy(xfs_rui_zone); 2082 kmem_cache_destroy(xfs_rud_zone); 2083 kmem_cache_destroy(xfs_icreate_zone); 2084 kmem_cache_destroy(xfs_ili_zone); 2085 kmem_cache_destroy(xfs_inode_zone); 2086 kmem_cache_destroy(xfs_efi_zone); 2087 kmem_cache_destroy(xfs_efd_zone); 2088 kmem_cache_destroy(xfs_buf_item_zone); 2089 kmem_cache_destroy(xfs_trans_zone); 2090 kmem_cache_destroy(xfs_ifork_zone); 2091 kmem_cache_destroy(xfs_da_state_zone); 2092 kmem_cache_destroy(xfs_btree_cur_zone); 2093 kmem_cache_destroy(xfs_bmap_free_item_zone); 2094 kmem_cache_destroy(xfs_log_ticket_zone); 2095 } 2096 2097 STATIC int __init 2098 xfs_init_workqueues(void) 2099 { 2100 /* 2101 * The allocation workqueue can be used in memory reclaim situations 2102 * (writepage path), and parallelism is only limited by the number of 2103 * AGs in all the filesystems mounted. Hence use the default large 2104 * max_active value for this workqueue. 2105 */ 2106 xfs_alloc_wq = alloc_workqueue("xfsalloc", 2107 XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0); 2108 if (!xfs_alloc_wq) 2109 return -ENOMEM; 2110 2111 xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND), 2112 0); 2113 if (!xfs_discard_wq) 2114 goto out_free_alloc_wq; 2115 2116 return 0; 2117 out_free_alloc_wq: 2118 destroy_workqueue(xfs_alloc_wq); 2119 return -ENOMEM; 2120 } 2121 2122 STATIC void 2123 xfs_destroy_workqueues(void) 2124 { 2125 destroy_workqueue(xfs_discard_wq); 2126 destroy_workqueue(xfs_alloc_wq); 2127 } 2128 2129 STATIC int __init 2130 init_xfs_fs(void) 2131 { 2132 int error; 2133 2134 xfs_check_ondisk_structs(); 2135 2136 printk(KERN_INFO XFS_VERSION_STRING " with " 2137 XFS_BUILD_OPTIONS " enabled\n"); 2138 2139 xfs_dir_startup(); 2140 2141 error = xfs_init_zones(); 2142 if (error) 2143 goto out; 2144 2145 error = xfs_init_workqueues(); 2146 if (error) 2147 goto out_destroy_zones; 2148 2149 error = xfs_mru_cache_init(); 2150 if (error) 2151 goto out_destroy_wq; 2152 2153 error = xfs_buf_init(); 2154 if (error) 2155 goto out_mru_cache_uninit; 2156 2157 error = xfs_init_procfs(); 2158 if (error) 2159 goto out_buf_terminate; 2160 2161 error = xfs_sysctl_register(); 2162 if (error) 2163 goto out_cleanup_procfs; 2164 2165 xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj); 2166 if (!xfs_kset) { 2167 error = -ENOMEM; 2168 goto out_sysctl_unregister; 2169 } 2170 2171 xfsstats.xs_kobj.kobject.kset = xfs_kset; 2172 2173 xfsstats.xs_stats = alloc_percpu(struct xfsstats); 2174 if (!xfsstats.xs_stats) { 2175 error = -ENOMEM; 2176 goto out_kset_unregister; 2177 } 2178 2179 error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL, 2180 "stats"); 2181 if (error) 2182 goto out_free_stats; 2183 2184 #ifdef DEBUG 2185 xfs_dbg_kobj.kobject.kset = xfs_kset; 2186 error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug"); 2187 if (error) 2188 goto out_remove_stats_kobj; 2189 #endif 2190 2191 error = xfs_qm_init(); 2192 if (error) 2193 goto out_remove_dbg_kobj; 2194 2195 error = register_filesystem(&xfs_fs_type); 2196 if (error) 2197 goto out_qm_exit; 2198 return 0; 2199 2200 out_qm_exit: 2201 xfs_qm_exit(); 2202 out_remove_dbg_kobj: 2203 #ifdef DEBUG 2204 xfs_sysfs_del(&xfs_dbg_kobj); 2205 out_remove_stats_kobj: 2206 #endif 2207 xfs_sysfs_del(&xfsstats.xs_kobj); 2208 out_free_stats: 2209 free_percpu(xfsstats.xs_stats); 2210 out_kset_unregister: 2211 kset_unregister(xfs_kset); 2212 out_sysctl_unregister: 2213 xfs_sysctl_unregister(); 2214 out_cleanup_procfs: 2215 xfs_cleanup_procfs(); 2216 out_buf_terminate: 2217 xfs_buf_terminate(); 2218 out_mru_cache_uninit: 2219 xfs_mru_cache_uninit(); 2220 out_destroy_wq: 2221 xfs_destroy_workqueues(); 2222 out_destroy_zones: 2223 xfs_destroy_zones(); 2224 out: 2225 return error; 2226 } 2227 2228 STATIC void __exit 2229 exit_xfs_fs(void) 2230 { 2231 xfs_qm_exit(); 2232 unregister_filesystem(&xfs_fs_type); 2233 #ifdef DEBUG 2234 xfs_sysfs_del(&xfs_dbg_kobj); 2235 #endif 2236 xfs_sysfs_del(&xfsstats.xs_kobj); 2237 free_percpu(xfsstats.xs_stats); 2238 kset_unregister(xfs_kset); 2239 xfs_sysctl_unregister(); 2240 xfs_cleanup_procfs(); 2241 xfs_buf_terminate(); 2242 xfs_mru_cache_uninit(); 2243 xfs_destroy_workqueues(); 2244 xfs_destroy_zones(); 2245 xfs_uuid_table_free(); 2246 } 2247 2248 module_init(init_xfs_fs); 2249 module_exit(exit_xfs_fs); 2250 2251 MODULE_AUTHOR("Silicon Graphics, Inc."); 2252 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled"); 2253 MODULE_LICENSE("GPL"); 2254