1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 7 #include "xfs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_sb.h" 13 #include "xfs_mount.h" 14 #include "xfs_inode.h" 15 #include "xfs_btree.h" 16 #include "xfs_bmap.h" 17 #include "xfs_alloc.h" 18 #include "xfs_fsops.h" 19 #include "xfs_trans.h" 20 #include "xfs_buf_item.h" 21 #include "xfs_log.h" 22 #include "xfs_log_priv.h" 23 #include "xfs_dir2.h" 24 #include "xfs_extfree_item.h" 25 #include "xfs_mru_cache.h" 26 #include "xfs_inode_item.h" 27 #include "xfs_icache.h" 28 #include "xfs_trace.h" 29 #include "xfs_icreate_item.h" 30 #include "xfs_filestream.h" 31 #include "xfs_quota.h" 32 #include "xfs_sysfs.h" 33 #include "xfs_ondisk.h" 34 #include "xfs_rmap_item.h" 35 #include "xfs_refcount_item.h" 36 #include "xfs_bmap_item.h" 37 #include "xfs_reflink.h" 38 #include "xfs_pwork.h" 39 #include "xfs_ag.h" 40 41 #include <linux/magic.h> 42 #include <linux/fs_context.h> 43 #include <linux/fs_parser.h> 44 45 static const struct super_operations xfs_super_operations; 46 47 static struct kset *xfs_kset; /* top-level xfs sysfs dir */ 48 #ifdef DEBUG 49 static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */ 50 #endif 51 52 enum xfs_dax_mode { 53 XFS_DAX_INODE = 0, 54 XFS_DAX_ALWAYS = 1, 55 XFS_DAX_NEVER = 2, 56 }; 57 58 static void 59 xfs_mount_set_dax_mode( 60 struct xfs_mount *mp, 61 enum xfs_dax_mode mode) 62 { 63 switch (mode) { 64 case XFS_DAX_INODE: 65 mp->m_flags &= ~(XFS_MOUNT_DAX_ALWAYS | XFS_MOUNT_DAX_NEVER); 66 break; 67 case XFS_DAX_ALWAYS: 68 mp->m_flags |= XFS_MOUNT_DAX_ALWAYS; 69 mp->m_flags &= ~XFS_MOUNT_DAX_NEVER; 70 break; 71 case XFS_DAX_NEVER: 72 mp->m_flags |= XFS_MOUNT_DAX_NEVER; 73 mp->m_flags &= ~XFS_MOUNT_DAX_ALWAYS; 74 break; 75 } 76 } 77 78 static const struct constant_table dax_param_enums[] = { 79 {"inode", XFS_DAX_INODE }, 80 {"always", XFS_DAX_ALWAYS }, 81 {"never", XFS_DAX_NEVER }, 82 {} 83 }; 84 85 /* 86 * Table driven mount option parser. 87 */ 88 enum { 89 Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev, 90 Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid, 91 Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups, 92 Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep, 93 Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2, 94 Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota, 95 Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota, 96 Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce, 97 Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum, 98 }; 99 100 static const struct fs_parameter_spec xfs_fs_parameters[] = { 101 fsparam_u32("logbufs", Opt_logbufs), 102 fsparam_string("logbsize", Opt_logbsize), 103 fsparam_string("logdev", Opt_logdev), 104 fsparam_string("rtdev", Opt_rtdev), 105 fsparam_flag("wsync", Opt_wsync), 106 fsparam_flag("noalign", Opt_noalign), 107 fsparam_flag("swalloc", Opt_swalloc), 108 fsparam_u32("sunit", Opt_sunit), 109 fsparam_u32("swidth", Opt_swidth), 110 fsparam_flag("nouuid", Opt_nouuid), 111 fsparam_flag("grpid", Opt_grpid), 112 fsparam_flag("nogrpid", Opt_nogrpid), 113 fsparam_flag("bsdgroups", Opt_bsdgroups), 114 fsparam_flag("sysvgroups", Opt_sysvgroups), 115 fsparam_string("allocsize", Opt_allocsize), 116 fsparam_flag("norecovery", Opt_norecovery), 117 fsparam_flag("inode64", Opt_inode64), 118 fsparam_flag("inode32", Opt_inode32), 119 fsparam_flag("ikeep", Opt_ikeep), 120 fsparam_flag("noikeep", Opt_noikeep), 121 fsparam_flag("largeio", Opt_largeio), 122 fsparam_flag("nolargeio", Opt_nolargeio), 123 fsparam_flag("attr2", Opt_attr2), 124 fsparam_flag("noattr2", Opt_noattr2), 125 fsparam_flag("filestreams", Opt_filestreams), 126 fsparam_flag("quota", Opt_quota), 127 fsparam_flag("noquota", Opt_noquota), 128 fsparam_flag("usrquota", Opt_usrquota), 129 fsparam_flag("grpquota", Opt_grpquota), 130 fsparam_flag("prjquota", Opt_prjquota), 131 fsparam_flag("uquota", Opt_uquota), 132 fsparam_flag("gquota", Opt_gquota), 133 fsparam_flag("pquota", Opt_pquota), 134 fsparam_flag("uqnoenforce", Opt_uqnoenforce), 135 fsparam_flag("gqnoenforce", Opt_gqnoenforce), 136 fsparam_flag("pqnoenforce", Opt_pqnoenforce), 137 fsparam_flag("qnoenforce", Opt_qnoenforce), 138 fsparam_flag("discard", Opt_discard), 139 fsparam_flag("nodiscard", Opt_nodiscard), 140 fsparam_flag("dax", Opt_dax), 141 fsparam_enum("dax", Opt_dax_enum, dax_param_enums), 142 {} 143 }; 144 145 struct proc_xfs_info { 146 uint64_t flag; 147 char *str; 148 }; 149 150 static int 151 xfs_fs_show_options( 152 struct seq_file *m, 153 struct dentry *root) 154 { 155 static struct proc_xfs_info xfs_info_set[] = { 156 /* the few simple ones we can get from the mount struct */ 157 { XFS_MOUNT_IKEEP, ",ikeep" }, 158 { XFS_MOUNT_WSYNC, ",wsync" }, 159 { XFS_MOUNT_NOALIGN, ",noalign" }, 160 { XFS_MOUNT_SWALLOC, ",swalloc" }, 161 { XFS_MOUNT_NOUUID, ",nouuid" }, 162 { XFS_MOUNT_NORECOVERY, ",norecovery" }, 163 { XFS_MOUNT_ATTR2, ",attr2" }, 164 { XFS_MOUNT_FILESTREAMS, ",filestreams" }, 165 { XFS_MOUNT_GRPID, ",grpid" }, 166 { XFS_MOUNT_DISCARD, ",discard" }, 167 { XFS_MOUNT_LARGEIO, ",largeio" }, 168 { XFS_MOUNT_DAX_ALWAYS, ",dax=always" }, 169 { XFS_MOUNT_DAX_NEVER, ",dax=never" }, 170 { 0, NULL } 171 }; 172 struct xfs_mount *mp = XFS_M(root->d_sb); 173 struct proc_xfs_info *xfs_infop; 174 175 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) { 176 if (mp->m_flags & xfs_infop->flag) 177 seq_puts(m, xfs_infop->str); 178 } 179 180 seq_printf(m, ",inode%d", 181 (mp->m_flags & XFS_MOUNT_SMALL_INUMS) ? 32 : 64); 182 183 if (mp->m_flags & XFS_MOUNT_ALLOCSIZE) 184 seq_printf(m, ",allocsize=%dk", 185 (1 << mp->m_allocsize_log) >> 10); 186 187 if (mp->m_logbufs > 0) 188 seq_printf(m, ",logbufs=%d", mp->m_logbufs); 189 if (mp->m_logbsize > 0) 190 seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10); 191 192 if (mp->m_logname) 193 seq_show_option(m, "logdev", mp->m_logname); 194 if (mp->m_rtname) 195 seq_show_option(m, "rtdev", mp->m_rtname); 196 197 if (mp->m_dalign > 0) 198 seq_printf(m, ",sunit=%d", 199 (int)XFS_FSB_TO_BB(mp, mp->m_dalign)); 200 if (mp->m_swidth > 0) 201 seq_printf(m, ",swidth=%d", 202 (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); 203 204 if (mp->m_qflags & XFS_UQUOTA_ACCT) { 205 if (mp->m_qflags & XFS_UQUOTA_ENFD) 206 seq_puts(m, ",usrquota"); 207 else 208 seq_puts(m, ",uqnoenforce"); 209 } 210 211 if (mp->m_qflags & XFS_PQUOTA_ACCT) { 212 if (mp->m_qflags & XFS_PQUOTA_ENFD) 213 seq_puts(m, ",prjquota"); 214 else 215 seq_puts(m, ",pqnoenforce"); 216 } 217 if (mp->m_qflags & XFS_GQUOTA_ACCT) { 218 if (mp->m_qflags & XFS_GQUOTA_ENFD) 219 seq_puts(m, ",grpquota"); 220 else 221 seq_puts(m, ",gqnoenforce"); 222 } 223 224 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT)) 225 seq_puts(m, ",noquota"); 226 227 return 0; 228 } 229 230 /* 231 * Set parameters for inode allocation heuristics, taking into account 232 * filesystem size and inode32/inode64 mount options; i.e. specifically 233 * whether or not XFS_MOUNT_SMALL_INUMS is set. 234 * 235 * Inode allocation patterns are altered only if inode32 is requested 236 * (XFS_MOUNT_SMALL_INUMS), and the filesystem is sufficiently large. 237 * If altered, XFS_MOUNT_32BITINODES is set as well. 238 * 239 * An agcount independent of that in the mount structure is provided 240 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated 241 * to the potentially higher ag count. 242 * 243 * Returns the maximum AG index which may contain inodes. 244 */ 245 xfs_agnumber_t 246 xfs_set_inode_alloc( 247 struct xfs_mount *mp, 248 xfs_agnumber_t agcount) 249 { 250 xfs_agnumber_t index; 251 xfs_agnumber_t maxagi = 0; 252 xfs_sb_t *sbp = &mp->m_sb; 253 xfs_agnumber_t max_metadata; 254 xfs_agino_t agino; 255 xfs_ino_t ino; 256 257 /* 258 * Calculate how much should be reserved for inodes to meet 259 * the max inode percentage. Used only for inode32. 260 */ 261 if (M_IGEO(mp)->maxicount) { 262 uint64_t icount; 263 264 icount = sbp->sb_dblocks * sbp->sb_imax_pct; 265 do_div(icount, 100); 266 icount += sbp->sb_agblocks - 1; 267 do_div(icount, sbp->sb_agblocks); 268 max_metadata = icount; 269 } else { 270 max_metadata = agcount; 271 } 272 273 /* Get the last possible inode in the filesystem */ 274 agino = XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1); 275 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino); 276 277 /* 278 * If user asked for no more than 32-bit inodes, and the fs is 279 * sufficiently large, set XFS_MOUNT_32BITINODES if we must alter 280 * the allocator to accommodate the request. 281 */ 282 if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32) 283 mp->m_flags |= XFS_MOUNT_32BITINODES; 284 else 285 mp->m_flags &= ~XFS_MOUNT_32BITINODES; 286 287 for (index = 0; index < agcount; index++) { 288 struct xfs_perag *pag; 289 290 ino = XFS_AGINO_TO_INO(mp, index, agino); 291 292 pag = xfs_perag_get(mp, index); 293 294 if (mp->m_flags & XFS_MOUNT_32BITINODES) { 295 if (ino > XFS_MAXINUMBER_32) { 296 pag->pagi_inodeok = 0; 297 pag->pagf_metadata = 0; 298 } else { 299 pag->pagi_inodeok = 1; 300 maxagi++; 301 if (index < max_metadata) 302 pag->pagf_metadata = 1; 303 else 304 pag->pagf_metadata = 0; 305 } 306 } else { 307 pag->pagi_inodeok = 1; 308 pag->pagf_metadata = 0; 309 } 310 311 xfs_perag_put(pag); 312 } 313 314 return (mp->m_flags & XFS_MOUNT_32BITINODES) ? maxagi : agcount; 315 } 316 317 STATIC int 318 xfs_blkdev_get( 319 xfs_mount_t *mp, 320 const char *name, 321 struct block_device **bdevp) 322 { 323 int error = 0; 324 325 *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL, 326 mp); 327 if (IS_ERR(*bdevp)) { 328 error = PTR_ERR(*bdevp); 329 xfs_warn(mp, "Invalid device [%s], error=%d", name, error); 330 } 331 332 return error; 333 } 334 335 STATIC void 336 xfs_blkdev_put( 337 struct block_device *bdev) 338 { 339 if (bdev) 340 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 341 } 342 343 STATIC void 344 xfs_close_devices( 345 struct xfs_mount *mp) 346 { 347 struct dax_device *dax_ddev = mp->m_ddev_targp->bt_daxdev; 348 349 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 350 struct block_device *logdev = mp->m_logdev_targp->bt_bdev; 351 struct dax_device *dax_logdev = mp->m_logdev_targp->bt_daxdev; 352 353 xfs_free_buftarg(mp->m_logdev_targp); 354 xfs_blkdev_put(logdev); 355 fs_put_dax(dax_logdev); 356 } 357 if (mp->m_rtdev_targp) { 358 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev; 359 struct dax_device *dax_rtdev = mp->m_rtdev_targp->bt_daxdev; 360 361 xfs_free_buftarg(mp->m_rtdev_targp); 362 xfs_blkdev_put(rtdev); 363 fs_put_dax(dax_rtdev); 364 } 365 xfs_free_buftarg(mp->m_ddev_targp); 366 fs_put_dax(dax_ddev); 367 } 368 369 /* 370 * The file system configurations are: 371 * (1) device (partition) with data and internal log 372 * (2) logical volume with data and log subvolumes. 373 * (3) logical volume with data, log, and realtime subvolumes. 374 * 375 * We only have to handle opening the log and realtime volumes here if 376 * they are present. The data subvolume has already been opened by 377 * get_sb_bdev() and is stored in sb->s_bdev. 378 */ 379 STATIC int 380 xfs_open_devices( 381 struct xfs_mount *mp) 382 { 383 struct block_device *ddev = mp->m_super->s_bdev; 384 struct dax_device *dax_ddev = fs_dax_get_by_bdev(ddev); 385 struct dax_device *dax_logdev = NULL, *dax_rtdev = NULL; 386 struct block_device *logdev = NULL, *rtdev = NULL; 387 int error; 388 389 /* 390 * Open real time and log devices - order is important. 391 */ 392 if (mp->m_logname) { 393 error = xfs_blkdev_get(mp, mp->m_logname, &logdev); 394 if (error) 395 goto out; 396 dax_logdev = fs_dax_get_by_bdev(logdev); 397 } 398 399 if (mp->m_rtname) { 400 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev); 401 if (error) 402 goto out_close_logdev; 403 404 if (rtdev == ddev || rtdev == logdev) { 405 xfs_warn(mp, 406 "Cannot mount filesystem with identical rtdev and ddev/logdev."); 407 error = -EINVAL; 408 goto out_close_rtdev; 409 } 410 dax_rtdev = fs_dax_get_by_bdev(rtdev); 411 } 412 413 /* 414 * Setup xfs_mount buffer target pointers 415 */ 416 error = -ENOMEM; 417 mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, dax_ddev); 418 if (!mp->m_ddev_targp) 419 goto out_close_rtdev; 420 421 if (rtdev) { 422 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, dax_rtdev); 423 if (!mp->m_rtdev_targp) 424 goto out_free_ddev_targ; 425 } 426 427 if (logdev && logdev != ddev) { 428 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, dax_logdev); 429 if (!mp->m_logdev_targp) 430 goto out_free_rtdev_targ; 431 } else { 432 mp->m_logdev_targp = mp->m_ddev_targp; 433 } 434 435 return 0; 436 437 out_free_rtdev_targ: 438 if (mp->m_rtdev_targp) 439 xfs_free_buftarg(mp->m_rtdev_targp); 440 out_free_ddev_targ: 441 xfs_free_buftarg(mp->m_ddev_targp); 442 out_close_rtdev: 443 xfs_blkdev_put(rtdev); 444 fs_put_dax(dax_rtdev); 445 out_close_logdev: 446 if (logdev && logdev != ddev) { 447 xfs_blkdev_put(logdev); 448 fs_put_dax(dax_logdev); 449 } 450 out: 451 fs_put_dax(dax_ddev); 452 return error; 453 } 454 455 /* 456 * Setup xfs_mount buffer target pointers based on superblock 457 */ 458 STATIC int 459 xfs_setup_devices( 460 struct xfs_mount *mp) 461 { 462 int error; 463 464 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize); 465 if (error) 466 return error; 467 468 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 469 unsigned int log_sector_size = BBSIZE; 470 471 if (xfs_sb_version_hassector(&mp->m_sb)) 472 log_sector_size = mp->m_sb.sb_logsectsize; 473 error = xfs_setsize_buftarg(mp->m_logdev_targp, 474 log_sector_size); 475 if (error) 476 return error; 477 } 478 if (mp->m_rtdev_targp) { 479 error = xfs_setsize_buftarg(mp->m_rtdev_targp, 480 mp->m_sb.sb_sectsize); 481 if (error) 482 return error; 483 } 484 485 return 0; 486 } 487 488 STATIC int 489 xfs_init_mount_workqueues( 490 struct xfs_mount *mp) 491 { 492 mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s", 493 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), 494 1, mp->m_super->s_id); 495 if (!mp->m_buf_workqueue) 496 goto out; 497 498 mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s", 499 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), 500 0, mp->m_super->s_id); 501 if (!mp->m_unwritten_workqueue) 502 goto out_destroy_buf; 503 504 mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s", 505 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_UNBOUND), 506 0, mp->m_super->s_id); 507 if (!mp->m_cil_workqueue) 508 goto out_destroy_unwritten; 509 510 mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s", 511 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), 512 0, mp->m_super->s_id); 513 if (!mp->m_reclaim_workqueue) 514 goto out_destroy_cil; 515 516 mp->m_gc_workqueue = alloc_workqueue("xfs-gc/%s", 517 WQ_SYSFS | WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM, 518 0, mp->m_super->s_id); 519 if (!mp->m_gc_workqueue) 520 goto out_destroy_reclaim; 521 522 mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", 523 XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id); 524 if (!mp->m_sync_workqueue) 525 goto out_destroy_eofb; 526 527 return 0; 528 529 out_destroy_eofb: 530 destroy_workqueue(mp->m_gc_workqueue); 531 out_destroy_reclaim: 532 destroy_workqueue(mp->m_reclaim_workqueue); 533 out_destroy_cil: 534 destroy_workqueue(mp->m_cil_workqueue); 535 out_destroy_unwritten: 536 destroy_workqueue(mp->m_unwritten_workqueue); 537 out_destroy_buf: 538 destroy_workqueue(mp->m_buf_workqueue); 539 out: 540 return -ENOMEM; 541 } 542 543 STATIC void 544 xfs_destroy_mount_workqueues( 545 struct xfs_mount *mp) 546 { 547 destroy_workqueue(mp->m_sync_workqueue); 548 destroy_workqueue(mp->m_gc_workqueue); 549 destroy_workqueue(mp->m_reclaim_workqueue); 550 destroy_workqueue(mp->m_cil_workqueue); 551 destroy_workqueue(mp->m_unwritten_workqueue); 552 destroy_workqueue(mp->m_buf_workqueue); 553 } 554 555 static void 556 xfs_flush_inodes_worker( 557 struct work_struct *work) 558 { 559 struct xfs_mount *mp = container_of(work, struct xfs_mount, 560 m_flush_inodes_work); 561 struct super_block *sb = mp->m_super; 562 563 if (down_read_trylock(&sb->s_umount)) { 564 sync_inodes_sb(sb); 565 up_read(&sb->s_umount); 566 } 567 } 568 569 /* 570 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK 571 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting 572 * for IO to complete so that we effectively throttle multiple callers to the 573 * rate at which IO is completing. 574 */ 575 void 576 xfs_flush_inodes( 577 struct xfs_mount *mp) 578 { 579 /* 580 * If flush_work() returns true then that means we waited for a flush 581 * which was already in progress. Don't bother running another scan. 582 */ 583 if (flush_work(&mp->m_flush_inodes_work)) 584 return; 585 586 queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work); 587 flush_work(&mp->m_flush_inodes_work); 588 } 589 590 /* Catch misguided souls that try to use this interface on XFS */ 591 STATIC struct inode * 592 xfs_fs_alloc_inode( 593 struct super_block *sb) 594 { 595 BUG(); 596 return NULL; 597 } 598 599 #ifdef DEBUG 600 static void 601 xfs_check_delalloc( 602 struct xfs_inode *ip, 603 int whichfork) 604 { 605 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork); 606 struct xfs_bmbt_irec got; 607 struct xfs_iext_cursor icur; 608 609 if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got)) 610 return; 611 do { 612 if (isnullstartblock(got.br_startblock)) { 613 xfs_warn(ip->i_mount, 614 "ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]", 615 ip->i_ino, 616 whichfork == XFS_DATA_FORK ? "data" : "cow", 617 got.br_startoff, got.br_blockcount); 618 } 619 } while (xfs_iext_next_extent(ifp, &icur, &got)); 620 } 621 #else 622 #define xfs_check_delalloc(ip, whichfork) do { } while (0) 623 #endif 624 625 /* 626 * Now that the generic code is guaranteed not to be accessing 627 * the linux inode, we can inactivate and reclaim the inode. 628 */ 629 STATIC void 630 xfs_fs_destroy_inode( 631 struct inode *inode) 632 { 633 struct xfs_inode *ip = XFS_I(inode); 634 635 trace_xfs_destroy_inode(ip); 636 637 ASSERT(!rwsem_is_locked(&inode->i_rwsem)); 638 XFS_STATS_INC(ip->i_mount, vn_rele); 639 XFS_STATS_INC(ip->i_mount, vn_remove); 640 641 xfs_inactive(ip); 642 643 if (!XFS_FORCED_SHUTDOWN(ip->i_mount) && ip->i_delayed_blks) { 644 xfs_check_delalloc(ip, XFS_DATA_FORK); 645 xfs_check_delalloc(ip, XFS_COW_FORK); 646 ASSERT(0); 647 } 648 649 XFS_STATS_INC(ip->i_mount, vn_reclaim); 650 651 /* 652 * We should never get here with one of the reclaim flags already set. 653 */ 654 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE)); 655 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM)); 656 657 /* 658 * We always use background reclaim here because even if the inode is 659 * clean, it still may be under IO and hence we have wait for IO 660 * completion to occur before we can reclaim the inode. The background 661 * reclaim path handles this more efficiently than we can here, so 662 * simply let background reclaim tear down all inodes. 663 */ 664 xfs_inode_mark_reclaimable(ip); 665 } 666 667 static void 668 xfs_fs_dirty_inode( 669 struct inode *inode, 670 int flag) 671 { 672 struct xfs_inode *ip = XFS_I(inode); 673 struct xfs_mount *mp = ip->i_mount; 674 struct xfs_trans *tp; 675 676 if (!(inode->i_sb->s_flags & SB_LAZYTIME)) 677 return; 678 if (flag != I_DIRTY_SYNC || !(inode->i_state & I_DIRTY_TIME)) 679 return; 680 681 if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp)) 682 return; 683 xfs_ilock(ip, XFS_ILOCK_EXCL); 684 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 685 xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP); 686 xfs_trans_commit(tp); 687 } 688 689 /* 690 * Slab object creation initialisation for the XFS inode. 691 * This covers only the idempotent fields in the XFS inode; 692 * all other fields need to be initialised on allocation 693 * from the slab. This avoids the need to repeatedly initialise 694 * fields in the xfs inode that left in the initialise state 695 * when freeing the inode. 696 */ 697 STATIC void 698 xfs_fs_inode_init_once( 699 void *inode) 700 { 701 struct xfs_inode *ip = inode; 702 703 memset(ip, 0, sizeof(struct xfs_inode)); 704 705 /* vfs inode */ 706 inode_init_once(VFS_I(ip)); 707 708 /* xfs inode */ 709 atomic_set(&ip->i_pincount, 0); 710 spin_lock_init(&ip->i_flags_lock); 711 712 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, 713 "xfsino", ip->i_ino); 714 } 715 716 /* 717 * We do an unlocked check for XFS_IDONTCACHE here because we are already 718 * serialised against cache hits here via the inode->i_lock and igrab() in 719 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be 720 * racing with us, and it avoids needing to grab a spinlock here for every inode 721 * we drop the final reference on. 722 */ 723 STATIC int 724 xfs_fs_drop_inode( 725 struct inode *inode) 726 { 727 struct xfs_inode *ip = XFS_I(inode); 728 729 /* 730 * If this unlinked inode is in the middle of recovery, don't 731 * drop the inode just yet; log recovery will take care of 732 * that. See the comment for this inode flag. 733 */ 734 if (ip->i_flags & XFS_IRECOVERY) { 735 ASSERT(ip->i_mount->m_log->l_flags & XLOG_RECOVERY_NEEDED); 736 return 0; 737 } 738 739 return generic_drop_inode(inode); 740 } 741 742 static void 743 xfs_mount_free( 744 struct xfs_mount *mp) 745 { 746 kfree(mp->m_rtname); 747 kfree(mp->m_logname); 748 kmem_free(mp); 749 } 750 751 STATIC int 752 xfs_fs_sync_fs( 753 struct super_block *sb, 754 int wait) 755 { 756 struct xfs_mount *mp = XFS_M(sb); 757 758 /* 759 * Doing anything during the async pass would be counterproductive. 760 */ 761 if (!wait) 762 return 0; 763 764 xfs_log_force(mp, XFS_LOG_SYNC); 765 if (laptop_mode) { 766 /* 767 * The disk must be active because we're syncing. 768 * We schedule log work now (now that the disk is 769 * active) instead of later (when it might not be). 770 */ 771 flush_delayed_work(&mp->m_log->l_work); 772 } 773 774 return 0; 775 } 776 777 STATIC int 778 xfs_fs_statfs( 779 struct dentry *dentry, 780 struct kstatfs *statp) 781 { 782 struct xfs_mount *mp = XFS_M(dentry->d_sb); 783 xfs_sb_t *sbp = &mp->m_sb; 784 struct xfs_inode *ip = XFS_I(d_inode(dentry)); 785 uint64_t fakeinos, id; 786 uint64_t icount; 787 uint64_t ifree; 788 uint64_t fdblocks; 789 xfs_extlen_t lsize; 790 int64_t ffree; 791 792 statp->f_type = XFS_SUPER_MAGIC; 793 statp->f_namelen = MAXNAMELEN - 1; 794 795 id = huge_encode_dev(mp->m_ddev_targp->bt_dev); 796 statp->f_fsid = u64_to_fsid(id); 797 798 icount = percpu_counter_sum(&mp->m_icount); 799 ifree = percpu_counter_sum(&mp->m_ifree); 800 fdblocks = percpu_counter_sum(&mp->m_fdblocks); 801 802 spin_lock(&mp->m_sb_lock); 803 statp->f_bsize = sbp->sb_blocksize; 804 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; 805 statp->f_blocks = sbp->sb_dblocks - lsize; 806 spin_unlock(&mp->m_sb_lock); 807 808 /* make sure statp->f_bfree does not underflow */ 809 statp->f_bfree = max_t(int64_t, fdblocks - mp->m_alloc_set_aside, 0); 810 statp->f_bavail = statp->f_bfree; 811 812 fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree); 813 statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER); 814 if (M_IGEO(mp)->maxicount) 815 statp->f_files = min_t(typeof(statp->f_files), 816 statp->f_files, 817 M_IGEO(mp)->maxicount); 818 819 /* If sb_icount overshot maxicount, report actual allocation */ 820 statp->f_files = max_t(typeof(statp->f_files), 821 statp->f_files, 822 sbp->sb_icount); 823 824 /* make sure statp->f_ffree does not underflow */ 825 ffree = statp->f_files - (icount - ifree); 826 statp->f_ffree = max_t(int64_t, ffree, 0); 827 828 829 if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) && 830 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) == 831 (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD)) 832 xfs_qm_statvfs(ip, statp); 833 834 if (XFS_IS_REALTIME_MOUNT(mp) && 835 (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) { 836 statp->f_blocks = sbp->sb_rblocks; 837 statp->f_bavail = statp->f_bfree = 838 sbp->sb_frextents * sbp->sb_rextsize; 839 } 840 841 return 0; 842 } 843 844 STATIC void 845 xfs_save_resvblks(struct xfs_mount *mp) 846 { 847 uint64_t resblks = 0; 848 849 mp->m_resblks_save = mp->m_resblks; 850 xfs_reserve_blocks(mp, &resblks, NULL); 851 } 852 853 STATIC void 854 xfs_restore_resvblks(struct xfs_mount *mp) 855 { 856 uint64_t resblks; 857 858 if (mp->m_resblks_save) { 859 resblks = mp->m_resblks_save; 860 mp->m_resblks_save = 0; 861 } else 862 resblks = xfs_default_resblks(mp); 863 864 xfs_reserve_blocks(mp, &resblks, NULL); 865 } 866 867 /* 868 * Second stage of a freeze. The data is already frozen so we only 869 * need to take care of the metadata. Once that's done sync the superblock 870 * to the log to dirty it in case of a crash while frozen. This ensures that we 871 * will recover the unlinked inode lists on the next mount. 872 */ 873 STATIC int 874 xfs_fs_freeze( 875 struct super_block *sb) 876 { 877 struct xfs_mount *mp = XFS_M(sb); 878 unsigned int flags; 879 int ret; 880 881 /* 882 * The filesystem is now frozen far enough that memory reclaim 883 * cannot safely operate on the filesystem. Hence we need to 884 * set a GFP_NOFS context here to avoid recursion deadlocks. 885 */ 886 flags = memalloc_nofs_save(); 887 xfs_blockgc_stop(mp); 888 xfs_save_resvblks(mp); 889 ret = xfs_log_quiesce(mp); 890 memalloc_nofs_restore(flags); 891 return ret; 892 } 893 894 STATIC int 895 xfs_fs_unfreeze( 896 struct super_block *sb) 897 { 898 struct xfs_mount *mp = XFS_M(sb); 899 900 xfs_restore_resvblks(mp); 901 xfs_log_work_queue(mp); 902 xfs_blockgc_start(mp); 903 return 0; 904 } 905 906 /* 907 * This function fills in xfs_mount_t fields based on mount args. 908 * Note: the superblock _has_ now been read in. 909 */ 910 STATIC int 911 xfs_finish_flags( 912 struct xfs_mount *mp) 913 { 914 int ronly = (mp->m_flags & XFS_MOUNT_RDONLY); 915 916 /* Fail a mount where the logbuf is smaller than the log stripe */ 917 if (xfs_sb_version_haslogv2(&mp->m_sb)) { 918 if (mp->m_logbsize <= 0 && 919 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) { 920 mp->m_logbsize = mp->m_sb.sb_logsunit; 921 } else if (mp->m_logbsize > 0 && 922 mp->m_logbsize < mp->m_sb.sb_logsunit) { 923 xfs_warn(mp, 924 "logbuf size must be greater than or equal to log stripe size"); 925 return -EINVAL; 926 } 927 } else { 928 /* Fail a mount if the logbuf is larger than 32K */ 929 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) { 930 xfs_warn(mp, 931 "logbuf size for version 1 logs must be 16K or 32K"); 932 return -EINVAL; 933 } 934 } 935 936 /* 937 * V5 filesystems always use attr2 format for attributes. 938 */ 939 if (xfs_sb_version_hascrc(&mp->m_sb) && 940 (mp->m_flags & XFS_MOUNT_NOATTR2)) { 941 xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. " 942 "attr2 is always enabled for V5 filesystems."); 943 return -EINVAL; 944 } 945 946 /* 947 * mkfs'ed attr2 will turn on attr2 mount unless explicitly 948 * told by noattr2 to turn it off 949 */ 950 if (xfs_sb_version_hasattr2(&mp->m_sb) && 951 !(mp->m_flags & XFS_MOUNT_NOATTR2)) 952 mp->m_flags |= XFS_MOUNT_ATTR2; 953 954 /* 955 * prohibit r/w mounts of read-only filesystems 956 */ 957 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) { 958 xfs_warn(mp, 959 "cannot mount a read-only filesystem as read-write"); 960 return -EROFS; 961 } 962 963 if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) && 964 (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE)) && 965 !xfs_sb_version_has_pquotino(&mp->m_sb)) { 966 xfs_warn(mp, 967 "Super block does not support project and group quota together"); 968 return -EINVAL; 969 } 970 971 return 0; 972 } 973 974 static int 975 xfs_init_percpu_counters( 976 struct xfs_mount *mp) 977 { 978 int error; 979 980 error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL); 981 if (error) 982 return -ENOMEM; 983 984 error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL); 985 if (error) 986 goto free_icount; 987 988 error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL); 989 if (error) 990 goto free_ifree; 991 992 error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL); 993 if (error) 994 goto free_fdblocks; 995 996 return 0; 997 998 free_fdblocks: 999 percpu_counter_destroy(&mp->m_fdblocks); 1000 free_ifree: 1001 percpu_counter_destroy(&mp->m_ifree); 1002 free_icount: 1003 percpu_counter_destroy(&mp->m_icount); 1004 return -ENOMEM; 1005 } 1006 1007 void 1008 xfs_reinit_percpu_counters( 1009 struct xfs_mount *mp) 1010 { 1011 percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount); 1012 percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree); 1013 percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks); 1014 } 1015 1016 static void 1017 xfs_destroy_percpu_counters( 1018 struct xfs_mount *mp) 1019 { 1020 percpu_counter_destroy(&mp->m_icount); 1021 percpu_counter_destroy(&mp->m_ifree); 1022 percpu_counter_destroy(&mp->m_fdblocks); 1023 ASSERT(XFS_FORCED_SHUTDOWN(mp) || 1024 percpu_counter_sum(&mp->m_delalloc_blks) == 0); 1025 percpu_counter_destroy(&mp->m_delalloc_blks); 1026 } 1027 1028 static void 1029 xfs_fs_put_super( 1030 struct super_block *sb) 1031 { 1032 struct xfs_mount *mp = XFS_M(sb); 1033 1034 /* if ->fill_super failed, we have no mount to tear down */ 1035 if (!sb->s_fs_info) 1036 return; 1037 1038 xfs_notice(mp, "Unmounting Filesystem"); 1039 xfs_filestream_unmount(mp); 1040 xfs_unmountfs(mp); 1041 1042 xfs_freesb(mp); 1043 free_percpu(mp->m_stats.xs_stats); 1044 xfs_destroy_percpu_counters(mp); 1045 xfs_destroy_mount_workqueues(mp); 1046 xfs_close_devices(mp); 1047 1048 sb->s_fs_info = NULL; 1049 xfs_mount_free(mp); 1050 } 1051 1052 static long 1053 xfs_fs_nr_cached_objects( 1054 struct super_block *sb, 1055 struct shrink_control *sc) 1056 { 1057 /* Paranoia: catch incorrect calls during mount setup or teardown */ 1058 if (WARN_ON_ONCE(!sb->s_fs_info)) 1059 return 0; 1060 return xfs_reclaim_inodes_count(XFS_M(sb)); 1061 } 1062 1063 static long 1064 xfs_fs_free_cached_objects( 1065 struct super_block *sb, 1066 struct shrink_control *sc) 1067 { 1068 return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan); 1069 } 1070 1071 static const struct super_operations xfs_super_operations = { 1072 .alloc_inode = xfs_fs_alloc_inode, 1073 .destroy_inode = xfs_fs_destroy_inode, 1074 .dirty_inode = xfs_fs_dirty_inode, 1075 .drop_inode = xfs_fs_drop_inode, 1076 .put_super = xfs_fs_put_super, 1077 .sync_fs = xfs_fs_sync_fs, 1078 .freeze_fs = xfs_fs_freeze, 1079 .unfreeze_fs = xfs_fs_unfreeze, 1080 .statfs = xfs_fs_statfs, 1081 .show_options = xfs_fs_show_options, 1082 .nr_cached_objects = xfs_fs_nr_cached_objects, 1083 .free_cached_objects = xfs_fs_free_cached_objects, 1084 }; 1085 1086 static int 1087 suffix_kstrtoint( 1088 const char *s, 1089 unsigned int base, 1090 int *res) 1091 { 1092 int last, shift_left_factor = 0, _res; 1093 char *value; 1094 int ret = 0; 1095 1096 value = kstrdup(s, GFP_KERNEL); 1097 if (!value) 1098 return -ENOMEM; 1099 1100 last = strlen(value) - 1; 1101 if (value[last] == 'K' || value[last] == 'k') { 1102 shift_left_factor = 10; 1103 value[last] = '\0'; 1104 } 1105 if (value[last] == 'M' || value[last] == 'm') { 1106 shift_left_factor = 20; 1107 value[last] = '\0'; 1108 } 1109 if (value[last] == 'G' || value[last] == 'g') { 1110 shift_left_factor = 30; 1111 value[last] = '\0'; 1112 } 1113 1114 if (kstrtoint(value, base, &_res)) 1115 ret = -EINVAL; 1116 kfree(value); 1117 *res = _res << shift_left_factor; 1118 return ret; 1119 } 1120 1121 static inline void 1122 xfs_fs_warn_deprecated( 1123 struct fs_context *fc, 1124 struct fs_parameter *param, 1125 uint64_t flag, 1126 bool value) 1127 { 1128 /* Don't print the warning if reconfiguring and current mount point 1129 * already had the flag set 1130 */ 1131 if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) && 1132 !!(XFS_M(fc->root->d_sb)->m_flags & flag) == value) 1133 return; 1134 xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key); 1135 } 1136 1137 /* 1138 * Set mount state from a mount option. 1139 * 1140 * NOTE: mp->m_super is NULL here! 1141 */ 1142 static int 1143 xfs_fs_parse_param( 1144 struct fs_context *fc, 1145 struct fs_parameter *param) 1146 { 1147 struct xfs_mount *parsing_mp = fc->s_fs_info; 1148 struct fs_parse_result result; 1149 int size = 0; 1150 int opt; 1151 1152 opt = fs_parse(fc, xfs_fs_parameters, param, &result); 1153 if (opt < 0) 1154 return opt; 1155 1156 switch (opt) { 1157 case Opt_logbufs: 1158 parsing_mp->m_logbufs = result.uint_32; 1159 return 0; 1160 case Opt_logbsize: 1161 if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize)) 1162 return -EINVAL; 1163 return 0; 1164 case Opt_logdev: 1165 kfree(parsing_mp->m_logname); 1166 parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL); 1167 if (!parsing_mp->m_logname) 1168 return -ENOMEM; 1169 return 0; 1170 case Opt_rtdev: 1171 kfree(parsing_mp->m_rtname); 1172 parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL); 1173 if (!parsing_mp->m_rtname) 1174 return -ENOMEM; 1175 return 0; 1176 case Opt_allocsize: 1177 if (suffix_kstrtoint(param->string, 10, &size)) 1178 return -EINVAL; 1179 parsing_mp->m_allocsize_log = ffs(size) - 1; 1180 parsing_mp->m_flags |= XFS_MOUNT_ALLOCSIZE; 1181 return 0; 1182 case Opt_grpid: 1183 case Opt_bsdgroups: 1184 parsing_mp->m_flags |= XFS_MOUNT_GRPID; 1185 return 0; 1186 case Opt_nogrpid: 1187 case Opt_sysvgroups: 1188 parsing_mp->m_flags &= ~XFS_MOUNT_GRPID; 1189 return 0; 1190 case Opt_wsync: 1191 parsing_mp->m_flags |= XFS_MOUNT_WSYNC; 1192 return 0; 1193 case Opt_norecovery: 1194 parsing_mp->m_flags |= XFS_MOUNT_NORECOVERY; 1195 return 0; 1196 case Opt_noalign: 1197 parsing_mp->m_flags |= XFS_MOUNT_NOALIGN; 1198 return 0; 1199 case Opt_swalloc: 1200 parsing_mp->m_flags |= XFS_MOUNT_SWALLOC; 1201 return 0; 1202 case Opt_sunit: 1203 parsing_mp->m_dalign = result.uint_32; 1204 return 0; 1205 case Opt_swidth: 1206 parsing_mp->m_swidth = result.uint_32; 1207 return 0; 1208 case Opt_inode32: 1209 parsing_mp->m_flags |= XFS_MOUNT_SMALL_INUMS; 1210 return 0; 1211 case Opt_inode64: 1212 parsing_mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS; 1213 return 0; 1214 case Opt_nouuid: 1215 parsing_mp->m_flags |= XFS_MOUNT_NOUUID; 1216 return 0; 1217 case Opt_largeio: 1218 parsing_mp->m_flags |= XFS_MOUNT_LARGEIO; 1219 return 0; 1220 case Opt_nolargeio: 1221 parsing_mp->m_flags &= ~XFS_MOUNT_LARGEIO; 1222 return 0; 1223 case Opt_filestreams: 1224 parsing_mp->m_flags |= XFS_MOUNT_FILESTREAMS; 1225 return 0; 1226 case Opt_noquota: 1227 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT; 1228 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD; 1229 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE; 1230 return 0; 1231 case Opt_quota: 1232 case Opt_uquota: 1233 case Opt_usrquota: 1234 parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE | 1235 XFS_UQUOTA_ENFD); 1236 return 0; 1237 case Opt_qnoenforce: 1238 case Opt_uqnoenforce: 1239 parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE); 1240 parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD; 1241 return 0; 1242 case Opt_pquota: 1243 case Opt_prjquota: 1244 parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE | 1245 XFS_PQUOTA_ENFD); 1246 return 0; 1247 case Opt_pqnoenforce: 1248 parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE); 1249 parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD; 1250 return 0; 1251 case Opt_gquota: 1252 case Opt_grpquota: 1253 parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE | 1254 XFS_GQUOTA_ENFD); 1255 return 0; 1256 case Opt_gqnoenforce: 1257 parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE); 1258 parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD; 1259 return 0; 1260 case Opt_discard: 1261 parsing_mp->m_flags |= XFS_MOUNT_DISCARD; 1262 return 0; 1263 case Opt_nodiscard: 1264 parsing_mp->m_flags &= ~XFS_MOUNT_DISCARD; 1265 return 0; 1266 #ifdef CONFIG_FS_DAX 1267 case Opt_dax: 1268 xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS); 1269 return 0; 1270 case Opt_dax_enum: 1271 xfs_mount_set_dax_mode(parsing_mp, result.uint_32); 1272 return 0; 1273 #endif 1274 /* Following mount options will be removed in September 2025 */ 1275 case Opt_ikeep: 1276 xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_IKEEP, true); 1277 parsing_mp->m_flags |= XFS_MOUNT_IKEEP; 1278 return 0; 1279 case Opt_noikeep: 1280 xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_IKEEP, false); 1281 parsing_mp->m_flags &= ~XFS_MOUNT_IKEEP; 1282 return 0; 1283 case Opt_attr2: 1284 xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_ATTR2, true); 1285 parsing_mp->m_flags |= XFS_MOUNT_ATTR2; 1286 return 0; 1287 case Opt_noattr2: 1288 xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_NOATTR2, true); 1289 parsing_mp->m_flags &= ~XFS_MOUNT_ATTR2; 1290 parsing_mp->m_flags |= XFS_MOUNT_NOATTR2; 1291 return 0; 1292 default: 1293 xfs_warn(parsing_mp, "unknown mount option [%s].", param->key); 1294 return -EINVAL; 1295 } 1296 1297 return 0; 1298 } 1299 1300 static int 1301 xfs_fs_validate_params( 1302 struct xfs_mount *mp) 1303 { 1304 /* 1305 * no recovery flag requires a read-only mount 1306 */ 1307 if ((mp->m_flags & XFS_MOUNT_NORECOVERY) && 1308 !(mp->m_flags & XFS_MOUNT_RDONLY)) { 1309 xfs_warn(mp, "no-recovery mounts must be read-only."); 1310 return -EINVAL; 1311 } 1312 1313 if ((mp->m_flags & XFS_MOUNT_NOALIGN) && 1314 (mp->m_dalign || mp->m_swidth)) { 1315 xfs_warn(mp, 1316 "sunit and swidth options incompatible with the noalign option"); 1317 return -EINVAL; 1318 } 1319 1320 if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) { 1321 xfs_warn(mp, "quota support not available in this kernel."); 1322 return -EINVAL; 1323 } 1324 1325 if ((mp->m_dalign && !mp->m_swidth) || 1326 (!mp->m_dalign && mp->m_swidth)) { 1327 xfs_warn(mp, "sunit and swidth must be specified together"); 1328 return -EINVAL; 1329 } 1330 1331 if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) { 1332 xfs_warn(mp, 1333 "stripe width (%d) must be a multiple of the stripe unit (%d)", 1334 mp->m_swidth, mp->m_dalign); 1335 return -EINVAL; 1336 } 1337 1338 if (mp->m_logbufs != -1 && 1339 mp->m_logbufs != 0 && 1340 (mp->m_logbufs < XLOG_MIN_ICLOGS || 1341 mp->m_logbufs > XLOG_MAX_ICLOGS)) { 1342 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]", 1343 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS); 1344 return -EINVAL; 1345 } 1346 1347 if (mp->m_logbsize != -1 && 1348 mp->m_logbsize != 0 && 1349 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE || 1350 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE || 1351 !is_power_of_2(mp->m_logbsize))) { 1352 xfs_warn(mp, 1353 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]", 1354 mp->m_logbsize); 1355 return -EINVAL; 1356 } 1357 1358 if ((mp->m_flags & XFS_MOUNT_ALLOCSIZE) && 1359 (mp->m_allocsize_log > XFS_MAX_IO_LOG || 1360 mp->m_allocsize_log < XFS_MIN_IO_LOG)) { 1361 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]", 1362 mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG); 1363 return -EINVAL; 1364 } 1365 1366 return 0; 1367 } 1368 1369 static int 1370 xfs_fs_fill_super( 1371 struct super_block *sb, 1372 struct fs_context *fc) 1373 { 1374 struct xfs_mount *mp = sb->s_fs_info; 1375 struct inode *root; 1376 int flags = 0, error; 1377 1378 mp->m_super = sb; 1379 1380 error = xfs_fs_validate_params(mp); 1381 if (error) 1382 goto out_free_names; 1383 1384 sb_min_blocksize(sb, BBSIZE); 1385 sb->s_xattr = xfs_xattr_handlers; 1386 sb->s_export_op = &xfs_export_operations; 1387 #ifdef CONFIG_XFS_QUOTA 1388 sb->s_qcop = &xfs_quotactl_operations; 1389 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ; 1390 #endif 1391 sb->s_op = &xfs_super_operations; 1392 1393 /* 1394 * Delay mount work if the debug hook is set. This is debug 1395 * instrumention to coordinate simulation of xfs mount failures with 1396 * VFS superblock operations 1397 */ 1398 if (xfs_globals.mount_delay) { 1399 xfs_notice(mp, "Delaying mount for %d seconds.", 1400 xfs_globals.mount_delay); 1401 msleep(xfs_globals.mount_delay * 1000); 1402 } 1403 1404 if (fc->sb_flags & SB_SILENT) 1405 flags |= XFS_MFSI_QUIET; 1406 1407 error = xfs_open_devices(mp); 1408 if (error) 1409 goto out_free_names; 1410 1411 error = xfs_init_mount_workqueues(mp); 1412 if (error) 1413 goto out_close_devices; 1414 1415 error = xfs_init_percpu_counters(mp); 1416 if (error) 1417 goto out_destroy_workqueues; 1418 1419 /* Allocate stats memory before we do operations that might use it */ 1420 mp->m_stats.xs_stats = alloc_percpu(struct xfsstats); 1421 if (!mp->m_stats.xs_stats) { 1422 error = -ENOMEM; 1423 goto out_destroy_counters; 1424 } 1425 1426 error = xfs_readsb(mp, flags); 1427 if (error) 1428 goto out_free_stats; 1429 1430 error = xfs_finish_flags(mp); 1431 if (error) 1432 goto out_free_sb; 1433 1434 error = xfs_setup_devices(mp); 1435 if (error) 1436 goto out_free_sb; 1437 1438 /* V4 support is undergoing deprecation. */ 1439 if (!xfs_sb_version_hascrc(&mp->m_sb)) { 1440 #ifdef CONFIG_XFS_SUPPORT_V4 1441 xfs_warn_once(mp, 1442 "Deprecated V4 format (crc=0) will not be supported after September 2030."); 1443 #else 1444 xfs_warn(mp, 1445 "Deprecated V4 format (crc=0) not supported by kernel."); 1446 error = -EINVAL; 1447 goto out_free_sb; 1448 #endif 1449 } 1450 1451 /* Filesystem claims it needs repair, so refuse the mount. */ 1452 if (xfs_sb_version_needsrepair(&mp->m_sb)) { 1453 xfs_warn(mp, "Filesystem needs repair. Please run xfs_repair."); 1454 error = -EFSCORRUPTED; 1455 goto out_free_sb; 1456 } 1457 1458 /* 1459 * Don't touch the filesystem if a user tool thinks it owns the primary 1460 * superblock. mkfs doesn't clear the flag from secondary supers, so 1461 * we don't check them at all. 1462 */ 1463 if (mp->m_sb.sb_inprogress) { 1464 xfs_warn(mp, "Offline file system operation in progress!"); 1465 error = -EFSCORRUPTED; 1466 goto out_free_sb; 1467 } 1468 1469 /* 1470 * Until this is fixed only page-sized or smaller data blocks work. 1471 */ 1472 if (mp->m_sb.sb_blocksize > PAGE_SIZE) { 1473 xfs_warn(mp, 1474 "File system with blocksize %d bytes. " 1475 "Only pagesize (%ld) or less will currently work.", 1476 mp->m_sb.sb_blocksize, PAGE_SIZE); 1477 error = -ENOSYS; 1478 goto out_free_sb; 1479 } 1480 1481 /* Ensure this filesystem fits in the page cache limits */ 1482 if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) || 1483 xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) { 1484 xfs_warn(mp, 1485 "file system too large to be mounted on this system."); 1486 error = -EFBIG; 1487 goto out_free_sb; 1488 } 1489 1490 /* 1491 * XFS block mappings use 54 bits to store the logical block offset. 1492 * This should suffice to handle the maximum file size that the VFS 1493 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT 1494 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes 1495 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON 1496 * to check this assertion. 1497 * 1498 * Avoid integer overflow by comparing the maximum bmbt offset to the 1499 * maximum pagecache offset in units of fs blocks. 1500 */ 1501 if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) { 1502 xfs_warn(mp, 1503 "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!", 1504 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE), 1505 XFS_MAX_FILEOFF); 1506 error = -EINVAL; 1507 goto out_free_sb; 1508 } 1509 1510 error = xfs_filestream_mount(mp); 1511 if (error) 1512 goto out_free_sb; 1513 1514 /* 1515 * we must configure the block size in the superblock before we run the 1516 * full mount process as the mount process can lookup and cache inodes. 1517 */ 1518 sb->s_magic = XFS_SUPER_MAGIC; 1519 sb->s_blocksize = mp->m_sb.sb_blocksize; 1520 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; 1521 sb->s_maxbytes = MAX_LFS_FILESIZE; 1522 sb->s_max_links = XFS_MAXLINK; 1523 sb->s_time_gran = 1; 1524 if (xfs_sb_version_hasbigtime(&mp->m_sb)) { 1525 sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN); 1526 sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX); 1527 } else { 1528 sb->s_time_min = XFS_LEGACY_TIME_MIN; 1529 sb->s_time_max = XFS_LEGACY_TIME_MAX; 1530 } 1531 trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max); 1532 sb->s_iflags |= SB_I_CGROUPWB; 1533 1534 set_posix_acl_flag(sb); 1535 1536 /* version 5 superblocks support inode version counters. */ 1537 if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5) 1538 sb->s_flags |= SB_I_VERSION; 1539 1540 if (xfs_sb_version_hasbigtime(&mp->m_sb)) 1541 xfs_warn(mp, 1542 "EXPERIMENTAL big timestamp feature in use. Use at your own risk!"); 1543 1544 if (mp->m_flags & XFS_MOUNT_DAX_ALWAYS) { 1545 bool rtdev_is_dax = false, datadev_is_dax; 1546 1547 xfs_warn(mp, 1548 "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); 1549 1550 datadev_is_dax = bdev_dax_supported(mp->m_ddev_targp->bt_bdev, 1551 sb->s_blocksize); 1552 if (mp->m_rtdev_targp) 1553 rtdev_is_dax = bdev_dax_supported( 1554 mp->m_rtdev_targp->bt_bdev, sb->s_blocksize); 1555 if (!rtdev_is_dax && !datadev_is_dax) { 1556 xfs_alert(mp, 1557 "DAX unsupported by block device. Turning off DAX."); 1558 xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER); 1559 } 1560 if (xfs_sb_version_hasreflink(&mp->m_sb)) { 1561 xfs_alert(mp, 1562 "DAX and reflink cannot be used together!"); 1563 error = -EINVAL; 1564 goto out_filestream_unmount; 1565 } 1566 } 1567 1568 if (mp->m_flags & XFS_MOUNT_DISCARD) { 1569 struct request_queue *q = bdev_get_queue(sb->s_bdev); 1570 1571 if (!blk_queue_discard(q)) { 1572 xfs_warn(mp, "mounting with \"discard\" option, but " 1573 "the device does not support discard"); 1574 mp->m_flags &= ~XFS_MOUNT_DISCARD; 1575 } 1576 } 1577 1578 if (xfs_sb_version_hasreflink(&mp->m_sb)) { 1579 if (mp->m_sb.sb_rblocks) { 1580 xfs_alert(mp, 1581 "reflink not compatible with realtime device!"); 1582 error = -EINVAL; 1583 goto out_filestream_unmount; 1584 } 1585 1586 if (xfs_globals.always_cow) { 1587 xfs_info(mp, "using DEBUG-only always_cow mode."); 1588 mp->m_always_cow = true; 1589 } 1590 } 1591 1592 if (xfs_sb_version_hasrmapbt(&mp->m_sb) && mp->m_sb.sb_rblocks) { 1593 xfs_alert(mp, 1594 "reverse mapping btree not compatible with realtime device!"); 1595 error = -EINVAL; 1596 goto out_filestream_unmount; 1597 } 1598 1599 if (xfs_sb_version_hasinobtcounts(&mp->m_sb)) 1600 xfs_warn(mp, 1601 "EXPERIMENTAL inode btree counters feature in use. Use at your own risk!"); 1602 1603 error = xfs_mountfs(mp); 1604 if (error) 1605 goto out_filestream_unmount; 1606 1607 root = igrab(VFS_I(mp->m_rootip)); 1608 if (!root) { 1609 error = -ENOENT; 1610 goto out_unmount; 1611 } 1612 sb->s_root = d_make_root(root); 1613 if (!sb->s_root) { 1614 error = -ENOMEM; 1615 goto out_unmount; 1616 } 1617 1618 return 0; 1619 1620 out_filestream_unmount: 1621 xfs_filestream_unmount(mp); 1622 out_free_sb: 1623 xfs_freesb(mp); 1624 out_free_stats: 1625 free_percpu(mp->m_stats.xs_stats); 1626 out_destroy_counters: 1627 xfs_destroy_percpu_counters(mp); 1628 out_destroy_workqueues: 1629 xfs_destroy_mount_workqueues(mp); 1630 out_close_devices: 1631 xfs_close_devices(mp); 1632 out_free_names: 1633 sb->s_fs_info = NULL; 1634 xfs_mount_free(mp); 1635 return error; 1636 1637 out_unmount: 1638 xfs_filestream_unmount(mp); 1639 xfs_unmountfs(mp); 1640 goto out_free_sb; 1641 } 1642 1643 static int 1644 xfs_fs_get_tree( 1645 struct fs_context *fc) 1646 { 1647 return get_tree_bdev(fc, xfs_fs_fill_super); 1648 } 1649 1650 static int 1651 xfs_remount_rw( 1652 struct xfs_mount *mp) 1653 { 1654 struct xfs_sb *sbp = &mp->m_sb; 1655 int error; 1656 1657 if (mp->m_flags & XFS_MOUNT_NORECOVERY) { 1658 xfs_warn(mp, 1659 "ro->rw transition prohibited on norecovery mount"); 1660 return -EINVAL; 1661 } 1662 1663 if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 && 1664 xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) { 1665 xfs_warn(mp, 1666 "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem", 1667 (sbp->sb_features_ro_compat & 1668 XFS_SB_FEAT_RO_COMPAT_UNKNOWN)); 1669 return -EINVAL; 1670 } 1671 1672 mp->m_flags &= ~XFS_MOUNT_RDONLY; 1673 1674 /* 1675 * If this is the first remount to writeable state we might have some 1676 * superblock changes to update. 1677 */ 1678 if (mp->m_update_sb) { 1679 error = xfs_sync_sb(mp, false); 1680 if (error) { 1681 xfs_warn(mp, "failed to write sb changes"); 1682 return error; 1683 } 1684 mp->m_update_sb = false; 1685 } 1686 1687 /* 1688 * Fill out the reserve pool if it is empty. Use the stashed value if 1689 * it is non-zero, otherwise go with the default. 1690 */ 1691 xfs_restore_resvblks(mp); 1692 xfs_log_work_queue(mp); 1693 1694 /* Recover any CoW blocks that never got remapped. */ 1695 error = xfs_reflink_recover_cow(mp); 1696 if (error) { 1697 xfs_err(mp, 1698 "Error %d recovering leftover CoW allocations.", error); 1699 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1700 return error; 1701 } 1702 xfs_blockgc_start(mp); 1703 1704 /* Create the per-AG metadata reservation pool .*/ 1705 error = xfs_fs_reserve_ag_blocks(mp); 1706 if (error && error != -ENOSPC) 1707 return error; 1708 1709 return 0; 1710 } 1711 1712 static int 1713 xfs_remount_ro( 1714 struct xfs_mount *mp) 1715 { 1716 int error; 1717 1718 /* 1719 * Cancel background eofb scanning so it cannot race with the final 1720 * log force+buftarg wait and deadlock the remount. 1721 */ 1722 xfs_blockgc_stop(mp); 1723 1724 /* Get rid of any leftover CoW reservations... */ 1725 error = xfs_blockgc_free_space(mp, NULL); 1726 if (error) { 1727 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1728 return error; 1729 } 1730 1731 /* Free the per-AG metadata reservation pool. */ 1732 error = xfs_fs_unreserve_ag_blocks(mp); 1733 if (error) { 1734 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1735 return error; 1736 } 1737 1738 /* 1739 * Before we sync the metadata, we need to free up the reserve block 1740 * pool so that the used block count in the superblock on disk is 1741 * correct at the end of the remount. Stash the current* reserve pool 1742 * size so that if we get remounted rw, we can return it to the same 1743 * size. 1744 */ 1745 xfs_save_resvblks(mp); 1746 1747 xfs_log_clean(mp); 1748 mp->m_flags |= XFS_MOUNT_RDONLY; 1749 1750 return 0; 1751 } 1752 1753 /* 1754 * Logically we would return an error here to prevent users from believing 1755 * they might have changed mount options using remount which can't be changed. 1756 * 1757 * But unfortunately mount(8) adds all options from mtab and fstab to the mount 1758 * arguments in some cases so we can't blindly reject options, but have to 1759 * check for each specified option if it actually differs from the currently 1760 * set option and only reject it if that's the case. 1761 * 1762 * Until that is implemented we return success for every remount request, and 1763 * silently ignore all options that we can't actually change. 1764 */ 1765 static int 1766 xfs_fs_reconfigure( 1767 struct fs_context *fc) 1768 { 1769 struct xfs_mount *mp = XFS_M(fc->root->d_sb); 1770 struct xfs_mount *new_mp = fc->s_fs_info; 1771 xfs_sb_t *sbp = &mp->m_sb; 1772 int flags = fc->sb_flags; 1773 int error; 1774 1775 /* version 5 superblocks always support version counters. */ 1776 if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5) 1777 fc->sb_flags |= SB_I_VERSION; 1778 1779 error = xfs_fs_validate_params(new_mp); 1780 if (error) 1781 return error; 1782 1783 sync_filesystem(mp->m_super); 1784 1785 /* inode32 -> inode64 */ 1786 if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && 1787 !(new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) { 1788 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS; 1789 mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount); 1790 } 1791 1792 /* inode64 -> inode32 */ 1793 if (!(mp->m_flags & XFS_MOUNT_SMALL_INUMS) && 1794 (new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) { 1795 mp->m_flags |= XFS_MOUNT_SMALL_INUMS; 1796 mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount); 1797 } 1798 1799 /* ro -> rw */ 1800 if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(flags & SB_RDONLY)) { 1801 error = xfs_remount_rw(mp); 1802 if (error) 1803 return error; 1804 } 1805 1806 /* rw -> ro */ 1807 if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (flags & SB_RDONLY)) { 1808 error = xfs_remount_ro(mp); 1809 if (error) 1810 return error; 1811 } 1812 1813 return 0; 1814 } 1815 1816 static void xfs_fs_free( 1817 struct fs_context *fc) 1818 { 1819 struct xfs_mount *mp = fc->s_fs_info; 1820 1821 /* 1822 * mp is stored in the fs_context when it is initialized. 1823 * mp is transferred to the superblock on a successful mount, 1824 * but if an error occurs before the transfer we have to free 1825 * it here. 1826 */ 1827 if (mp) 1828 xfs_mount_free(mp); 1829 } 1830 1831 static const struct fs_context_operations xfs_context_ops = { 1832 .parse_param = xfs_fs_parse_param, 1833 .get_tree = xfs_fs_get_tree, 1834 .reconfigure = xfs_fs_reconfigure, 1835 .free = xfs_fs_free, 1836 }; 1837 1838 static int xfs_init_fs_context( 1839 struct fs_context *fc) 1840 { 1841 struct xfs_mount *mp; 1842 1843 mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO); 1844 if (!mp) 1845 return -ENOMEM; 1846 1847 spin_lock_init(&mp->m_sb_lock); 1848 spin_lock_init(&mp->m_agirotor_lock); 1849 INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC); 1850 spin_lock_init(&mp->m_perag_lock); 1851 mutex_init(&mp->m_growlock); 1852 INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker); 1853 INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker); 1854 mp->m_kobj.kobject.kset = xfs_kset; 1855 /* 1856 * We don't create the finobt per-ag space reservation until after log 1857 * recovery, so we must set this to true so that an ifree transaction 1858 * started during log recovery will not depend on space reservations 1859 * for finobt expansion. 1860 */ 1861 mp->m_finobt_nores = true; 1862 1863 /* 1864 * These can be overridden by the mount option parsing. 1865 */ 1866 mp->m_logbufs = -1; 1867 mp->m_logbsize = -1; 1868 mp->m_allocsize_log = 16; /* 64k */ 1869 1870 /* 1871 * Copy binary VFS mount flags we are interested in. 1872 */ 1873 if (fc->sb_flags & SB_RDONLY) 1874 mp->m_flags |= XFS_MOUNT_RDONLY; 1875 if (fc->sb_flags & SB_DIRSYNC) 1876 mp->m_flags |= XFS_MOUNT_DIRSYNC; 1877 if (fc->sb_flags & SB_SYNCHRONOUS) 1878 mp->m_flags |= XFS_MOUNT_WSYNC; 1879 1880 fc->s_fs_info = mp; 1881 fc->ops = &xfs_context_ops; 1882 1883 return 0; 1884 } 1885 1886 static struct file_system_type xfs_fs_type = { 1887 .owner = THIS_MODULE, 1888 .name = "xfs", 1889 .init_fs_context = xfs_init_fs_context, 1890 .parameters = xfs_fs_parameters, 1891 .kill_sb = kill_block_super, 1892 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP, 1893 }; 1894 MODULE_ALIAS_FS("xfs"); 1895 1896 STATIC int __init 1897 xfs_init_zones(void) 1898 { 1899 xfs_log_ticket_zone = kmem_cache_create("xfs_log_ticket", 1900 sizeof(struct xlog_ticket), 1901 0, 0, NULL); 1902 if (!xfs_log_ticket_zone) 1903 goto out; 1904 1905 xfs_bmap_free_item_zone = kmem_cache_create("xfs_bmap_free_item", 1906 sizeof(struct xfs_extent_free_item), 1907 0, 0, NULL); 1908 if (!xfs_bmap_free_item_zone) 1909 goto out_destroy_log_ticket_zone; 1910 1911 xfs_btree_cur_zone = kmem_cache_create("xfs_btree_cur", 1912 sizeof(struct xfs_btree_cur), 1913 0, 0, NULL); 1914 if (!xfs_btree_cur_zone) 1915 goto out_destroy_bmap_free_item_zone; 1916 1917 xfs_da_state_zone = kmem_cache_create("xfs_da_state", 1918 sizeof(struct xfs_da_state), 1919 0, 0, NULL); 1920 if (!xfs_da_state_zone) 1921 goto out_destroy_btree_cur_zone; 1922 1923 xfs_ifork_zone = kmem_cache_create("xfs_ifork", 1924 sizeof(struct xfs_ifork), 1925 0, 0, NULL); 1926 if (!xfs_ifork_zone) 1927 goto out_destroy_da_state_zone; 1928 1929 xfs_trans_zone = kmem_cache_create("xfs_trans", 1930 sizeof(struct xfs_trans), 1931 0, 0, NULL); 1932 if (!xfs_trans_zone) 1933 goto out_destroy_ifork_zone; 1934 1935 1936 /* 1937 * The size of the zone allocated buf log item is the maximum 1938 * size possible under XFS. This wastes a little bit of memory, 1939 * but it is much faster. 1940 */ 1941 xfs_buf_item_zone = kmem_cache_create("xfs_buf_item", 1942 sizeof(struct xfs_buf_log_item), 1943 0, 0, NULL); 1944 if (!xfs_buf_item_zone) 1945 goto out_destroy_trans_zone; 1946 1947 xfs_efd_zone = kmem_cache_create("xfs_efd_item", 1948 (sizeof(struct xfs_efd_log_item) + 1949 (XFS_EFD_MAX_FAST_EXTENTS - 1) * 1950 sizeof(struct xfs_extent)), 1951 0, 0, NULL); 1952 if (!xfs_efd_zone) 1953 goto out_destroy_buf_item_zone; 1954 1955 xfs_efi_zone = kmem_cache_create("xfs_efi_item", 1956 (sizeof(struct xfs_efi_log_item) + 1957 (XFS_EFI_MAX_FAST_EXTENTS - 1) * 1958 sizeof(struct xfs_extent)), 1959 0, 0, NULL); 1960 if (!xfs_efi_zone) 1961 goto out_destroy_efd_zone; 1962 1963 xfs_inode_zone = kmem_cache_create("xfs_inode", 1964 sizeof(struct xfs_inode), 0, 1965 (SLAB_HWCACHE_ALIGN | 1966 SLAB_RECLAIM_ACCOUNT | 1967 SLAB_MEM_SPREAD | SLAB_ACCOUNT), 1968 xfs_fs_inode_init_once); 1969 if (!xfs_inode_zone) 1970 goto out_destroy_efi_zone; 1971 1972 xfs_ili_zone = kmem_cache_create("xfs_ili", 1973 sizeof(struct xfs_inode_log_item), 0, 1974 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, 1975 NULL); 1976 if (!xfs_ili_zone) 1977 goto out_destroy_inode_zone; 1978 1979 xfs_icreate_zone = kmem_cache_create("xfs_icr", 1980 sizeof(struct xfs_icreate_item), 1981 0, 0, NULL); 1982 if (!xfs_icreate_zone) 1983 goto out_destroy_ili_zone; 1984 1985 xfs_rud_zone = kmem_cache_create("xfs_rud_item", 1986 sizeof(struct xfs_rud_log_item), 1987 0, 0, NULL); 1988 if (!xfs_rud_zone) 1989 goto out_destroy_icreate_zone; 1990 1991 xfs_rui_zone = kmem_cache_create("xfs_rui_item", 1992 xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS), 1993 0, 0, NULL); 1994 if (!xfs_rui_zone) 1995 goto out_destroy_rud_zone; 1996 1997 xfs_cud_zone = kmem_cache_create("xfs_cud_item", 1998 sizeof(struct xfs_cud_log_item), 1999 0, 0, NULL); 2000 if (!xfs_cud_zone) 2001 goto out_destroy_rui_zone; 2002 2003 xfs_cui_zone = kmem_cache_create("xfs_cui_item", 2004 xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS), 2005 0, 0, NULL); 2006 if (!xfs_cui_zone) 2007 goto out_destroy_cud_zone; 2008 2009 xfs_bud_zone = kmem_cache_create("xfs_bud_item", 2010 sizeof(struct xfs_bud_log_item), 2011 0, 0, NULL); 2012 if (!xfs_bud_zone) 2013 goto out_destroy_cui_zone; 2014 2015 xfs_bui_zone = kmem_cache_create("xfs_bui_item", 2016 xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS), 2017 0, 0, NULL); 2018 if (!xfs_bui_zone) 2019 goto out_destroy_bud_zone; 2020 2021 return 0; 2022 2023 out_destroy_bud_zone: 2024 kmem_cache_destroy(xfs_bud_zone); 2025 out_destroy_cui_zone: 2026 kmem_cache_destroy(xfs_cui_zone); 2027 out_destroy_cud_zone: 2028 kmem_cache_destroy(xfs_cud_zone); 2029 out_destroy_rui_zone: 2030 kmem_cache_destroy(xfs_rui_zone); 2031 out_destroy_rud_zone: 2032 kmem_cache_destroy(xfs_rud_zone); 2033 out_destroy_icreate_zone: 2034 kmem_cache_destroy(xfs_icreate_zone); 2035 out_destroy_ili_zone: 2036 kmem_cache_destroy(xfs_ili_zone); 2037 out_destroy_inode_zone: 2038 kmem_cache_destroy(xfs_inode_zone); 2039 out_destroy_efi_zone: 2040 kmem_cache_destroy(xfs_efi_zone); 2041 out_destroy_efd_zone: 2042 kmem_cache_destroy(xfs_efd_zone); 2043 out_destroy_buf_item_zone: 2044 kmem_cache_destroy(xfs_buf_item_zone); 2045 out_destroy_trans_zone: 2046 kmem_cache_destroy(xfs_trans_zone); 2047 out_destroy_ifork_zone: 2048 kmem_cache_destroy(xfs_ifork_zone); 2049 out_destroy_da_state_zone: 2050 kmem_cache_destroy(xfs_da_state_zone); 2051 out_destroy_btree_cur_zone: 2052 kmem_cache_destroy(xfs_btree_cur_zone); 2053 out_destroy_bmap_free_item_zone: 2054 kmem_cache_destroy(xfs_bmap_free_item_zone); 2055 out_destroy_log_ticket_zone: 2056 kmem_cache_destroy(xfs_log_ticket_zone); 2057 out: 2058 return -ENOMEM; 2059 } 2060 2061 STATIC void 2062 xfs_destroy_zones(void) 2063 { 2064 /* 2065 * Make sure all delayed rcu free are flushed before we 2066 * destroy caches. 2067 */ 2068 rcu_barrier(); 2069 kmem_cache_destroy(xfs_bui_zone); 2070 kmem_cache_destroy(xfs_bud_zone); 2071 kmem_cache_destroy(xfs_cui_zone); 2072 kmem_cache_destroy(xfs_cud_zone); 2073 kmem_cache_destroy(xfs_rui_zone); 2074 kmem_cache_destroy(xfs_rud_zone); 2075 kmem_cache_destroy(xfs_icreate_zone); 2076 kmem_cache_destroy(xfs_ili_zone); 2077 kmem_cache_destroy(xfs_inode_zone); 2078 kmem_cache_destroy(xfs_efi_zone); 2079 kmem_cache_destroy(xfs_efd_zone); 2080 kmem_cache_destroy(xfs_buf_item_zone); 2081 kmem_cache_destroy(xfs_trans_zone); 2082 kmem_cache_destroy(xfs_ifork_zone); 2083 kmem_cache_destroy(xfs_da_state_zone); 2084 kmem_cache_destroy(xfs_btree_cur_zone); 2085 kmem_cache_destroy(xfs_bmap_free_item_zone); 2086 kmem_cache_destroy(xfs_log_ticket_zone); 2087 } 2088 2089 STATIC int __init 2090 xfs_init_workqueues(void) 2091 { 2092 /* 2093 * The allocation workqueue can be used in memory reclaim situations 2094 * (writepage path), and parallelism is only limited by the number of 2095 * AGs in all the filesystems mounted. Hence use the default large 2096 * max_active value for this workqueue. 2097 */ 2098 xfs_alloc_wq = alloc_workqueue("xfsalloc", 2099 XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0); 2100 if (!xfs_alloc_wq) 2101 return -ENOMEM; 2102 2103 xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND), 2104 0); 2105 if (!xfs_discard_wq) 2106 goto out_free_alloc_wq; 2107 2108 return 0; 2109 out_free_alloc_wq: 2110 destroy_workqueue(xfs_alloc_wq); 2111 return -ENOMEM; 2112 } 2113 2114 STATIC void 2115 xfs_destroy_workqueues(void) 2116 { 2117 destroy_workqueue(xfs_discard_wq); 2118 destroy_workqueue(xfs_alloc_wq); 2119 } 2120 2121 STATIC int __init 2122 init_xfs_fs(void) 2123 { 2124 int error; 2125 2126 xfs_check_ondisk_structs(); 2127 2128 printk(KERN_INFO XFS_VERSION_STRING " with " 2129 XFS_BUILD_OPTIONS " enabled\n"); 2130 2131 xfs_dir_startup(); 2132 2133 error = xfs_init_zones(); 2134 if (error) 2135 goto out; 2136 2137 error = xfs_init_workqueues(); 2138 if (error) 2139 goto out_destroy_zones; 2140 2141 error = xfs_mru_cache_init(); 2142 if (error) 2143 goto out_destroy_wq; 2144 2145 error = xfs_buf_init(); 2146 if (error) 2147 goto out_mru_cache_uninit; 2148 2149 error = xfs_init_procfs(); 2150 if (error) 2151 goto out_buf_terminate; 2152 2153 error = xfs_sysctl_register(); 2154 if (error) 2155 goto out_cleanup_procfs; 2156 2157 xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj); 2158 if (!xfs_kset) { 2159 error = -ENOMEM; 2160 goto out_sysctl_unregister; 2161 } 2162 2163 xfsstats.xs_kobj.kobject.kset = xfs_kset; 2164 2165 xfsstats.xs_stats = alloc_percpu(struct xfsstats); 2166 if (!xfsstats.xs_stats) { 2167 error = -ENOMEM; 2168 goto out_kset_unregister; 2169 } 2170 2171 error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL, 2172 "stats"); 2173 if (error) 2174 goto out_free_stats; 2175 2176 #ifdef DEBUG 2177 xfs_dbg_kobj.kobject.kset = xfs_kset; 2178 error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug"); 2179 if (error) 2180 goto out_remove_stats_kobj; 2181 #endif 2182 2183 error = xfs_qm_init(); 2184 if (error) 2185 goto out_remove_dbg_kobj; 2186 2187 error = register_filesystem(&xfs_fs_type); 2188 if (error) 2189 goto out_qm_exit; 2190 return 0; 2191 2192 out_qm_exit: 2193 xfs_qm_exit(); 2194 out_remove_dbg_kobj: 2195 #ifdef DEBUG 2196 xfs_sysfs_del(&xfs_dbg_kobj); 2197 out_remove_stats_kobj: 2198 #endif 2199 xfs_sysfs_del(&xfsstats.xs_kobj); 2200 out_free_stats: 2201 free_percpu(xfsstats.xs_stats); 2202 out_kset_unregister: 2203 kset_unregister(xfs_kset); 2204 out_sysctl_unregister: 2205 xfs_sysctl_unregister(); 2206 out_cleanup_procfs: 2207 xfs_cleanup_procfs(); 2208 out_buf_terminate: 2209 xfs_buf_terminate(); 2210 out_mru_cache_uninit: 2211 xfs_mru_cache_uninit(); 2212 out_destroy_wq: 2213 xfs_destroy_workqueues(); 2214 out_destroy_zones: 2215 xfs_destroy_zones(); 2216 out: 2217 return error; 2218 } 2219 2220 STATIC void __exit 2221 exit_xfs_fs(void) 2222 { 2223 xfs_qm_exit(); 2224 unregister_filesystem(&xfs_fs_type); 2225 #ifdef DEBUG 2226 xfs_sysfs_del(&xfs_dbg_kobj); 2227 #endif 2228 xfs_sysfs_del(&xfsstats.xs_kobj); 2229 free_percpu(xfsstats.xs_stats); 2230 kset_unregister(xfs_kset); 2231 xfs_sysctl_unregister(); 2232 xfs_cleanup_procfs(); 2233 xfs_buf_terminate(); 2234 xfs_mru_cache_uninit(); 2235 xfs_destroy_workqueues(); 2236 xfs_destroy_zones(); 2237 xfs_uuid_table_free(); 2238 } 2239 2240 module_init(init_xfs_fs); 2241 module_exit(exit_xfs_fs); 2242 2243 MODULE_AUTHOR("Silicon Graphics, Inc."); 2244 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled"); 2245 MODULE_LICENSE("GPL"); 2246