1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_types.h" 21 #include "xfs_bit.h" 22 #include "xfs_log.h" 23 #include "xfs_inum.h" 24 #include "xfs_trans.h" 25 #include "xfs_sb.h" 26 #include "xfs_ag.h" 27 #include "xfs_dir.h" 28 #include "xfs_dir2.h" 29 #include "xfs_dmapi.h" 30 #include "xfs_mount.h" 31 #include "xfs_bmap_btree.h" 32 #include "xfs_alloc_btree.h" 33 #include "xfs_ialloc_btree.h" 34 #include "xfs_dir_sf.h" 35 #include "xfs_dir2_sf.h" 36 #include "xfs_attr_sf.h" 37 #include "xfs_dinode.h" 38 #include "xfs_inode.h" 39 #include "xfs_btree.h" 40 #include "xfs_ialloc.h" 41 #include "xfs_alloc.h" 42 #include "xfs_rtalloc.h" 43 #include "xfs_bmap.h" 44 #include "xfs_error.h" 45 #include "xfs_rw.h" 46 #include "xfs_quota.h" 47 #include "xfs_fsops.h" 48 49 STATIC void xfs_mount_log_sbunit(xfs_mount_t *, __int64_t); 50 STATIC int xfs_uuid_mount(xfs_mount_t *); 51 STATIC void xfs_uuid_unmount(xfs_mount_t *mp); 52 STATIC void xfs_unmountfs_wait(xfs_mount_t *); 53 54 55 #ifdef HAVE_PERCPU_SB 56 STATIC void xfs_icsb_destroy_counters(xfs_mount_t *); 57 STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t, int); 58 STATIC void xfs_icsb_sync_counters(xfs_mount_t *); 59 STATIC int xfs_icsb_modify_counters(xfs_mount_t *, xfs_sb_field_t, 60 int, int); 61 STATIC int xfs_icsb_modify_counters_locked(xfs_mount_t *, xfs_sb_field_t, 62 int, int); 63 STATIC int xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t); 64 65 #else 66 67 #define xfs_icsb_destroy_counters(mp) do { } while (0) 68 #define xfs_icsb_balance_counter(mp, a, b) do { } while (0) 69 #define xfs_icsb_sync_counters(mp) do { } while (0) 70 #define xfs_icsb_modify_counters(mp, a, b, c) do { } while (0) 71 #define xfs_icsb_modify_counters_locked(mp, a, b, c) do { } while (0) 72 73 #endif 74 75 static const struct { 76 short offset; 77 short type; /* 0 = integer 78 * 1 = binary / string (no translation) 79 */ 80 } xfs_sb_info[] = { 81 { offsetof(xfs_sb_t, sb_magicnum), 0 }, 82 { offsetof(xfs_sb_t, sb_blocksize), 0 }, 83 { offsetof(xfs_sb_t, sb_dblocks), 0 }, 84 { offsetof(xfs_sb_t, sb_rblocks), 0 }, 85 { offsetof(xfs_sb_t, sb_rextents), 0 }, 86 { offsetof(xfs_sb_t, sb_uuid), 1 }, 87 { offsetof(xfs_sb_t, sb_logstart), 0 }, 88 { offsetof(xfs_sb_t, sb_rootino), 0 }, 89 { offsetof(xfs_sb_t, sb_rbmino), 0 }, 90 { offsetof(xfs_sb_t, sb_rsumino), 0 }, 91 { offsetof(xfs_sb_t, sb_rextsize), 0 }, 92 { offsetof(xfs_sb_t, sb_agblocks), 0 }, 93 { offsetof(xfs_sb_t, sb_agcount), 0 }, 94 { offsetof(xfs_sb_t, sb_rbmblocks), 0 }, 95 { offsetof(xfs_sb_t, sb_logblocks), 0 }, 96 { offsetof(xfs_sb_t, sb_versionnum), 0 }, 97 { offsetof(xfs_sb_t, sb_sectsize), 0 }, 98 { offsetof(xfs_sb_t, sb_inodesize), 0 }, 99 { offsetof(xfs_sb_t, sb_inopblock), 0 }, 100 { offsetof(xfs_sb_t, sb_fname[0]), 1 }, 101 { offsetof(xfs_sb_t, sb_blocklog), 0 }, 102 { offsetof(xfs_sb_t, sb_sectlog), 0 }, 103 { offsetof(xfs_sb_t, sb_inodelog), 0 }, 104 { offsetof(xfs_sb_t, sb_inopblog), 0 }, 105 { offsetof(xfs_sb_t, sb_agblklog), 0 }, 106 { offsetof(xfs_sb_t, sb_rextslog), 0 }, 107 { offsetof(xfs_sb_t, sb_inprogress), 0 }, 108 { offsetof(xfs_sb_t, sb_imax_pct), 0 }, 109 { offsetof(xfs_sb_t, sb_icount), 0 }, 110 { offsetof(xfs_sb_t, sb_ifree), 0 }, 111 { offsetof(xfs_sb_t, sb_fdblocks), 0 }, 112 { offsetof(xfs_sb_t, sb_frextents), 0 }, 113 { offsetof(xfs_sb_t, sb_uquotino), 0 }, 114 { offsetof(xfs_sb_t, sb_gquotino), 0 }, 115 { offsetof(xfs_sb_t, sb_qflags), 0 }, 116 { offsetof(xfs_sb_t, sb_flags), 0 }, 117 { offsetof(xfs_sb_t, sb_shared_vn), 0 }, 118 { offsetof(xfs_sb_t, sb_inoalignmt), 0 }, 119 { offsetof(xfs_sb_t, sb_unit), 0 }, 120 { offsetof(xfs_sb_t, sb_width), 0 }, 121 { offsetof(xfs_sb_t, sb_dirblklog), 0 }, 122 { offsetof(xfs_sb_t, sb_logsectlog), 0 }, 123 { offsetof(xfs_sb_t, sb_logsectsize),0 }, 124 { offsetof(xfs_sb_t, sb_logsunit), 0 }, 125 { offsetof(xfs_sb_t, sb_features2), 0 }, 126 { sizeof(xfs_sb_t), 0 } 127 }; 128 129 /* 130 * Return a pointer to an initialized xfs_mount structure. 131 */ 132 xfs_mount_t * 133 xfs_mount_init(void) 134 { 135 xfs_mount_t *mp; 136 137 mp = kmem_zalloc(sizeof(xfs_mount_t), KM_SLEEP); 138 139 if (xfs_icsb_init_counters(mp)) { 140 mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB; 141 } 142 143 AIL_LOCKINIT(&mp->m_ail_lock, "xfs_ail"); 144 spinlock_init(&mp->m_sb_lock, "xfs_sb"); 145 mutex_init(&mp->m_ilock); 146 initnsema(&mp->m_growlock, 1, "xfs_grow"); 147 /* 148 * Initialize the AIL. 149 */ 150 xfs_trans_ail_init(mp); 151 152 atomic_set(&mp->m_active_trans, 0); 153 154 return mp; 155 } 156 157 /* 158 * Free up the resources associated with a mount structure. Assume that 159 * the structure was initially zeroed, so we can tell which fields got 160 * initialized. 161 */ 162 void 163 xfs_mount_free( 164 xfs_mount_t *mp, 165 int remove_bhv) 166 { 167 if (mp->m_ihash) 168 xfs_ihash_free(mp); 169 if (mp->m_chash) 170 xfs_chash_free(mp); 171 172 if (mp->m_perag) { 173 int agno; 174 175 for (agno = 0; agno < mp->m_maxagi; agno++) 176 if (mp->m_perag[agno].pagb_list) 177 kmem_free(mp->m_perag[agno].pagb_list, 178 sizeof(xfs_perag_busy_t) * 179 XFS_PAGB_NUM_SLOTS); 180 kmem_free(mp->m_perag, 181 sizeof(xfs_perag_t) * mp->m_sb.sb_agcount); 182 } 183 184 AIL_LOCK_DESTROY(&mp->m_ail_lock); 185 spinlock_destroy(&mp->m_sb_lock); 186 mutex_destroy(&mp->m_ilock); 187 freesema(&mp->m_growlock); 188 if (mp->m_quotainfo) 189 XFS_QM_DONE(mp); 190 191 if (mp->m_fsname != NULL) 192 kmem_free(mp->m_fsname, mp->m_fsname_len); 193 if (mp->m_rtname != NULL) 194 kmem_free(mp->m_rtname, strlen(mp->m_rtname) + 1); 195 if (mp->m_logname != NULL) 196 kmem_free(mp->m_logname, strlen(mp->m_logname) + 1); 197 198 if (remove_bhv) { 199 struct vfs *vfsp = XFS_MTOVFS(mp); 200 201 bhv_remove_all_vfsops(vfsp, 0); 202 VFS_REMOVEBHV(vfsp, &mp->m_bhv); 203 } 204 205 xfs_icsb_destroy_counters(mp); 206 kmem_free(mp, sizeof(xfs_mount_t)); 207 } 208 209 210 /* 211 * Check the validity of the SB found. 212 */ 213 STATIC int 214 xfs_mount_validate_sb( 215 xfs_mount_t *mp, 216 xfs_sb_t *sbp) 217 { 218 /* 219 * If the log device and data device have the 220 * same device number, the log is internal. 221 * Consequently, the sb_logstart should be non-zero. If 222 * we have a zero sb_logstart in this case, we may be trying to mount 223 * a volume filesystem in a non-volume manner. 224 */ 225 if (sbp->sb_magicnum != XFS_SB_MAGIC) { 226 cmn_err(CE_WARN, "XFS: bad magic number"); 227 return XFS_ERROR(EWRONGFS); 228 } 229 230 if (!XFS_SB_GOOD_VERSION(sbp)) { 231 cmn_err(CE_WARN, "XFS: bad version"); 232 return XFS_ERROR(EWRONGFS); 233 } 234 235 if (unlikely( 236 sbp->sb_logstart == 0 && mp->m_logdev_targp == mp->m_ddev_targp)) { 237 cmn_err(CE_WARN, 238 "XFS: filesystem is marked as having an external log; " 239 "specify logdev on the\nmount command line."); 240 XFS_CORRUPTION_ERROR("xfs_mount_validate_sb(1)", 241 XFS_ERRLEVEL_HIGH, mp, sbp); 242 return XFS_ERROR(EFSCORRUPTED); 243 } 244 245 if (unlikely( 246 sbp->sb_logstart != 0 && mp->m_logdev_targp != mp->m_ddev_targp)) { 247 cmn_err(CE_WARN, 248 "XFS: filesystem is marked as having an internal log; " 249 "don't specify logdev on\nthe mount command line."); 250 XFS_CORRUPTION_ERROR("xfs_mount_validate_sb(2)", 251 XFS_ERRLEVEL_HIGH, mp, sbp); 252 return XFS_ERROR(EFSCORRUPTED); 253 } 254 255 /* 256 * More sanity checking. These were stolen directly from 257 * xfs_repair. 258 */ 259 if (unlikely( 260 sbp->sb_agcount <= 0 || 261 sbp->sb_sectsize < XFS_MIN_SECTORSIZE || 262 sbp->sb_sectsize > XFS_MAX_SECTORSIZE || 263 sbp->sb_sectlog < XFS_MIN_SECTORSIZE_LOG || 264 sbp->sb_sectlog > XFS_MAX_SECTORSIZE_LOG || 265 sbp->sb_blocksize < XFS_MIN_BLOCKSIZE || 266 sbp->sb_blocksize > XFS_MAX_BLOCKSIZE || 267 sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG || 268 sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG || 269 sbp->sb_inodesize < XFS_DINODE_MIN_SIZE || 270 sbp->sb_inodesize > XFS_DINODE_MAX_SIZE || 271 sbp->sb_inodelog < XFS_DINODE_MIN_LOG || 272 sbp->sb_inodelog > XFS_DINODE_MAX_LOG || 273 (sbp->sb_blocklog - sbp->sb_inodelog != sbp->sb_inopblog) || 274 (sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE) || 275 (sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE) || 276 (sbp->sb_imax_pct > 100 || sbp->sb_imax_pct < 1))) { 277 cmn_err(CE_WARN, "XFS: SB sanity check 1 failed"); 278 XFS_CORRUPTION_ERROR("xfs_mount_validate_sb(3)", 279 XFS_ERRLEVEL_LOW, mp, sbp); 280 return XFS_ERROR(EFSCORRUPTED); 281 } 282 283 /* 284 * Sanity check AG count, size fields against data size field 285 */ 286 if (unlikely( 287 sbp->sb_dblocks == 0 || 288 sbp->sb_dblocks > 289 (xfs_drfsbno_t)sbp->sb_agcount * sbp->sb_agblocks || 290 sbp->sb_dblocks < (xfs_drfsbno_t)(sbp->sb_agcount - 1) * 291 sbp->sb_agblocks + XFS_MIN_AG_BLOCKS)) { 292 cmn_err(CE_WARN, "XFS: SB sanity check 2 failed"); 293 XFS_ERROR_REPORT("xfs_mount_validate_sb(4)", 294 XFS_ERRLEVEL_LOW, mp); 295 return XFS_ERROR(EFSCORRUPTED); 296 } 297 298 ASSERT(PAGE_SHIFT >= sbp->sb_blocklog); 299 ASSERT(sbp->sb_blocklog >= BBSHIFT); 300 301 #if XFS_BIG_BLKNOS /* Limited by ULONG_MAX of page cache index */ 302 if (unlikely( 303 (sbp->sb_dblocks >> (PAGE_SHIFT - sbp->sb_blocklog)) > ULONG_MAX || 304 (sbp->sb_rblocks >> (PAGE_SHIFT - sbp->sb_blocklog)) > ULONG_MAX)) { 305 #else /* Limited by UINT_MAX of sectors */ 306 if (unlikely( 307 (sbp->sb_dblocks << (sbp->sb_blocklog - BBSHIFT)) > UINT_MAX || 308 (sbp->sb_rblocks << (sbp->sb_blocklog - BBSHIFT)) > UINT_MAX)) { 309 #endif 310 cmn_err(CE_WARN, 311 "XFS: File system is too large to be mounted on this system."); 312 return XFS_ERROR(E2BIG); 313 } 314 315 if (unlikely(sbp->sb_inprogress)) { 316 cmn_err(CE_WARN, "XFS: file system busy"); 317 XFS_ERROR_REPORT("xfs_mount_validate_sb(5)", 318 XFS_ERRLEVEL_LOW, mp); 319 return XFS_ERROR(EFSCORRUPTED); 320 } 321 322 /* 323 * Version 1 directory format has never worked on Linux. 324 */ 325 if (unlikely(!XFS_SB_VERSION_HASDIRV2(sbp))) { 326 cmn_err(CE_WARN, 327 "XFS: Attempted to mount file system using version 1 directory format"); 328 return XFS_ERROR(ENOSYS); 329 } 330 331 /* 332 * Until this is fixed only page-sized or smaller data blocks work. 333 */ 334 if (unlikely(sbp->sb_blocksize > PAGE_SIZE)) { 335 cmn_err(CE_WARN, 336 "XFS: Attempted to mount file system with blocksize %d bytes", 337 sbp->sb_blocksize); 338 cmn_err(CE_WARN, 339 "XFS: Only page-sized (%ld) or less blocksizes currently work.", 340 PAGE_SIZE); 341 return XFS_ERROR(ENOSYS); 342 } 343 344 return 0; 345 } 346 347 xfs_agnumber_t 348 xfs_initialize_perag( 349 struct vfs *vfs, 350 xfs_mount_t *mp, 351 xfs_agnumber_t agcount) 352 { 353 xfs_agnumber_t index, max_metadata; 354 xfs_perag_t *pag; 355 xfs_agino_t agino; 356 xfs_ino_t ino; 357 xfs_sb_t *sbp = &mp->m_sb; 358 xfs_ino_t max_inum = XFS_MAXINUMBER_32; 359 360 /* Check to see if the filesystem can overflow 32 bit inodes */ 361 agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0); 362 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino); 363 364 /* Clear the mount flag if no inode can overflow 32 bits 365 * on this filesystem, or if specifically requested.. 366 */ 367 if ((vfs->vfs_flag & VFS_32BITINODES) && ino > max_inum) { 368 mp->m_flags |= XFS_MOUNT_32BITINODES; 369 } else { 370 mp->m_flags &= ~XFS_MOUNT_32BITINODES; 371 } 372 373 /* If we can overflow then setup the ag headers accordingly */ 374 if (mp->m_flags & XFS_MOUNT_32BITINODES) { 375 /* Calculate how much should be reserved for inodes to 376 * meet the max inode percentage. 377 */ 378 if (mp->m_maxicount) { 379 __uint64_t icount; 380 381 icount = sbp->sb_dblocks * sbp->sb_imax_pct; 382 do_div(icount, 100); 383 icount += sbp->sb_agblocks - 1; 384 do_div(icount, sbp->sb_agblocks); 385 max_metadata = icount; 386 } else { 387 max_metadata = agcount; 388 } 389 for (index = 0; index < agcount; index++) { 390 ino = XFS_AGINO_TO_INO(mp, index, agino); 391 if (ino > max_inum) { 392 index++; 393 break; 394 } 395 396 /* This ag is preferred for inodes */ 397 pag = &mp->m_perag[index]; 398 pag->pagi_inodeok = 1; 399 if (index < max_metadata) 400 pag->pagf_metadata = 1; 401 } 402 } else { 403 /* Setup default behavior for smaller filesystems */ 404 for (index = 0; index < agcount; index++) { 405 pag = &mp->m_perag[index]; 406 pag->pagi_inodeok = 1; 407 } 408 } 409 return index; 410 } 411 412 /* 413 * xfs_xlatesb 414 * 415 * data - on disk version of sb 416 * sb - a superblock 417 * dir - conversion direction: <0 - convert sb to buf 418 * >0 - convert buf to sb 419 * fields - which fields to copy (bitmask) 420 */ 421 void 422 xfs_xlatesb( 423 void *data, 424 xfs_sb_t *sb, 425 int dir, 426 __int64_t fields) 427 { 428 xfs_caddr_t buf_ptr; 429 xfs_caddr_t mem_ptr; 430 xfs_sb_field_t f; 431 int first; 432 int size; 433 434 ASSERT(dir); 435 ASSERT(fields); 436 437 if (!fields) 438 return; 439 440 buf_ptr = (xfs_caddr_t)data; 441 mem_ptr = (xfs_caddr_t)sb; 442 443 while (fields) { 444 f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields); 445 first = xfs_sb_info[f].offset; 446 size = xfs_sb_info[f + 1].offset - first; 447 448 ASSERT(xfs_sb_info[f].type == 0 || xfs_sb_info[f].type == 1); 449 450 if (size == 1 || xfs_sb_info[f].type == 1) { 451 if (dir > 0) { 452 memcpy(mem_ptr + first, buf_ptr + first, size); 453 } else { 454 memcpy(buf_ptr + first, mem_ptr + first, size); 455 } 456 } else { 457 switch (size) { 458 case 2: 459 INT_XLATE(*(__uint16_t*)(buf_ptr+first), 460 *(__uint16_t*)(mem_ptr+first), 461 dir, ARCH_CONVERT); 462 break; 463 case 4: 464 INT_XLATE(*(__uint32_t*)(buf_ptr+first), 465 *(__uint32_t*)(mem_ptr+first), 466 dir, ARCH_CONVERT); 467 break; 468 case 8: 469 INT_XLATE(*(__uint64_t*)(buf_ptr+first), 470 *(__uint64_t*)(mem_ptr+first), dir, ARCH_CONVERT); 471 break; 472 default: 473 ASSERT(0); 474 } 475 } 476 477 fields &= ~(1LL << f); 478 } 479 } 480 481 /* 482 * xfs_readsb 483 * 484 * Does the initial read of the superblock. 485 */ 486 int 487 xfs_readsb(xfs_mount_t *mp) 488 { 489 unsigned int sector_size; 490 unsigned int extra_flags; 491 xfs_buf_t *bp; 492 xfs_sb_t *sbp; 493 int error; 494 495 ASSERT(mp->m_sb_bp == NULL); 496 ASSERT(mp->m_ddev_targp != NULL); 497 498 /* 499 * Allocate a (locked) buffer to hold the superblock. 500 * This will be kept around at all times to optimize 501 * access to the superblock. 502 */ 503 sector_size = xfs_getsize_buftarg(mp->m_ddev_targp); 504 extra_flags = XFS_BUF_LOCK | XFS_BUF_MANAGE | XFS_BUF_MAPPED; 505 506 bp = xfs_buf_read_flags(mp->m_ddev_targp, XFS_SB_DADDR, 507 BTOBB(sector_size), extra_flags); 508 if (!bp || XFS_BUF_ISERROR(bp)) { 509 cmn_err(CE_WARN, "XFS: SB read failed"); 510 error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM; 511 goto fail; 512 } 513 ASSERT(XFS_BUF_ISBUSY(bp)); 514 ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); 515 516 /* 517 * Initialize the mount structure from the superblock. 518 * But first do some basic consistency checking. 519 */ 520 sbp = XFS_BUF_TO_SBP(bp); 521 xfs_xlatesb(XFS_BUF_PTR(bp), &(mp->m_sb), 1, XFS_SB_ALL_BITS); 522 523 error = xfs_mount_validate_sb(mp, &(mp->m_sb)); 524 if (error) { 525 cmn_err(CE_WARN, "XFS: SB validate failed"); 526 goto fail; 527 } 528 529 /* 530 * We must be able to do sector-sized and sector-aligned IO. 531 */ 532 if (sector_size > mp->m_sb.sb_sectsize) { 533 cmn_err(CE_WARN, 534 "XFS: device supports only %u byte sectors (not %u)", 535 sector_size, mp->m_sb.sb_sectsize); 536 error = ENOSYS; 537 goto fail; 538 } 539 540 /* 541 * If device sector size is smaller than the superblock size, 542 * re-read the superblock so the buffer is correctly sized. 543 */ 544 if (sector_size < mp->m_sb.sb_sectsize) { 545 XFS_BUF_UNMANAGE(bp); 546 xfs_buf_relse(bp); 547 sector_size = mp->m_sb.sb_sectsize; 548 bp = xfs_buf_read_flags(mp->m_ddev_targp, XFS_SB_DADDR, 549 BTOBB(sector_size), extra_flags); 550 if (!bp || XFS_BUF_ISERROR(bp)) { 551 cmn_err(CE_WARN, "XFS: SB re-read failed"); 552 error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM; 553 goto fail; 554 } 555 ASSERT(XFS_BUF_ISBUSY(bp)); 556 ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); 557 } 558 559 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0); 560 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0); 561 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0); 562 563 mp->m_sb_bp = bp; 564 xfs_buf_relse(bp); 565 ASSERT(XFS_BUF_VALUSEMA(bp) > 0); 566 return 0; 567 568 fail: 569 if (bp) { 570 XFS_BUF_UNMANAGE(bp); 571 xfs_buf_relse(bp); 572 } 573 return error; 574 } 575 576 577 /* 578 * xfs_mount_common 579 * 580 * Mount initialization code establishing various mount 581 * fields from the superblock associated with the given 582 * mount structure 583 */ 584 STATIC void 585 xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp) 586 { 587 int i; 588 589 mp->m_agfrotor = mp->m_agirotor = 0; 590 spinlock_init(&mp->m_agirotor_lock, "m_agirotor_lock"); 591 mp->m_maxagi = mp->m_sb.sb_agcount; 592 mp->m_blkbit_log = sbp->sb_blocklog + XFS_NBBYLOG; 593 mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT; 594 mp->m_sectbb_log = sbp->sb_sectlog - BBSHIFT; 595 mp->m_agno_log = xfs_highbit32(sbp->sb_agcount - 1) + 1; 596 mp->m_agino_log = sbp->sb_inopblog + sbp->sb_agblklog; 597 mp->m_litino = sbp->sb_inodesize - 598 ((uint)sizeof(xfs_dinode_core_t) + (uint)sizeof(xfs_agino_t)); 599 mp->m_blockmask = sbp->sb_blocksize - 1; 600 mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG; 601 mp->m_blockwmask = mp->m_blockwsize - 1; 602 INIT_LIST_HEAD(&mp->m_del_inodes); 603 604 /* 605 * Setup for attributes, in case they get created. 606 * This value is for inodes getting attributes for the first time, 607 * the per-inode value is for old attribute values. 608 */ 609 ASSERT(sbp->sb_inodesize >= 256 && sbp->sb_inodesize <= 2048); 610 switch (sbp->sb_inodesize) { 611 case 256: 612 mp->m_attroffset = XFS_LITINO(mp) - 613 XFS_BMDR_SPACE_CALC(MINABTPTRS); 614 break; 615 case 512: 616 case 1024: 617 case 2048: 618 mp->m_attroffset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS); 619 break; 620 default: 621 ASSERT(0); 622 } 623 ASSERT(mp->m_attroffset < XFS_LITINO(mp)); 624 625 for (i = 0; i < 2; i++) { 626 mp->m_alloc_mxr[i] = XFS_BTREE_BLOCK_MAXRECS(sbp->sb_blocksize, 627 xfs_alloc, i == 0); 628 mp->m_alloc_mnr[i] = XFS_BTREE_BLOCK_MINRECS(sbp->sb_blocksize, 629 xfs_alloc, i == 0); 630 } 631 for (i = 0; i < 2; i++) { 632 mp->m_bmap_dmxr[i] = XFS_BTREE_BLOCK_MAXRECS(sbp->sb_blocksize, 633 xfs_bmbt, i == 0); 634 mp->m_bmap_dmnr[i] = XFS_BTREE_BLOCK_MINRECS(sbp->sb_blocksize, 635 xfs_bmbt, i == 0); 636 } 637 for (i = 0; i < 2; i++) { 638 mp->m_inobt_mxr[i] = XFS_BTREE_BLOCK_MAXRECS(sbp->sb_blocksize, 639 xfs_inobt, i == 0); 640 mp->m_inobt_mnr[i] = XFS_BTREE_BLOCK_MINRECS(sbp->sb_blocksize, 641 xfs_inobt, i == 0); 642 } 643 644 mp->m_bsize = XFS_FSB_TO_BB(mp, 1); 645 mp->m_ialloc_inos = (int)MAX((__uint16_t)XFS_INODES_PER_CHUNK, 646 sbp->sb_inopblock); 647 mp->m_ialloc_blks = mp->m_ialloc_inos >> sbp->sb_inopblog; 648 } 649 /* 650 * xfs_mountfs 651 * 652 * This function does the following on an initial mount of a file system: 653 * - reads the superblock from disk and init the mount struct 654 * - if we're a 32-bit kernel, do a size check on the superblock 655 * so we don't mount terabyte filesystems 656 * - init mount struct realtime fields 657 * - allocate inode hash table for fs 658 * - init directory manager 659 * - perform recovery and init the log manager 660 */ 661 int 662 xfs_mountfs( 663 vfs_t *vfsp, 664 xfs_mount_t *mp, 665 int mfsi_flags) 666 { 667 xfs_buf_t *bp; 668 xfs_sb_t *sbp = &(mp->m_sb); 669 xfs_inode_t *rip; 670 vnode_t *rvp = NULL; 671 int readio_log, writeio_log; 672 xfs_daddr_t d; 673 __uint64_t ret64; 674 __int64_t update_flags; 675 uint quotamount, quotaflags; 676 int agno; 677 int uuid_mounted = 0; 678 int error = 0; 679 680 if (mp->m_sb_bp == NULL) { 681 if ((error = xfs_readsb(mp))) { 682 return error; 683 } 684 } 685 xfs_mount_common(mp, sbp); 686 687 /* 688 * Check if sb_agblocks is aligned at stripe boundary 689 * If sb_agblocks is NOT aligned turn off m_dalign since 690 * allocator alignment is within an ag, therefore ag has 691 * to be aligned at stripe boundary. 692 */ 693 update_flags = 0LL; 694 if (mp->m_dalign && !(mfsi_flags & XFS_MFSI_SECOND)) { 695 /* 696 * If stripe unit and stripe width are not multiples 697 * of the fs blocksize turn off alignment. 698 */ 699 if ((BBTOB(mp->m_dalign) & mp->m_blockmask) || 700 (BBTOB(mp->m_swidth) & mp->m_blockmask)) { 701 if (mp->m_flags & XFS_MOUNT_RETERR) { 702 cmn_err(CE_WARN, 703 "XFS: alignment check 1 failed"); 704 error = XFS_ERROR(EINVAL); 705 goto error1; 706 } 707 mp->m_dalign = mp->m_swidth = 0; 708 } else { 709 /* 710 * Convert the stripe unit and width to FSBs. 711 */ 712 mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign); 713 if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) { 714 if (mp->m_flags & XFS_MOUNT_RETERR) { 715 error = XFS_ERROR(EINVAL); 716 goto error1; 717 } 718 xfs_fs_cmn_err(CE_WARN, mp, 719 "stripe alignment turned off: sunit(%d)/swidth(%d) incompatible with agsize(%d)", 720 mp->m_dalign, mp->m_swidth, 721 sbp->sb_agblocks); 722 723 mp->m_dalign = 0; 724 mp->m_swidth = 0; 725 } else if (mp->m_dalign) { 726 mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth); 727 } else { 728 if (mp->m_flags & XFS_MOUNT_RETERR) { 729 xfs_fs_cmn_err(CE_WARN, mp, 730 "stripe alignment turned off: sunit(%d) less than bsize(%d)", 731 mp->m_dalign, 732 mp->m_blockmask +1); 733 error = XFS_ERROR(EINVAL); 734 goto error1; 735 } 736 mp->m_swidth = 0; 737 } 738 } 739 740 /* 741 * Update superblock with new values 742 * and log changes 743 */ 744 if (XFS_SB_VERSION_HASDALIGN(sbp)) { 745 if (sbp->sb_unit != mp->m_dalign) { 746 sbp->sb_unit = mp->m_dalign; 747 update_flags |= XFS_SB_UNIT; 748 } 749 if (sbp->sb_width != mp->m_swidth) { 750 sbp->sb_width = mp->m_swidth; 751 update_flags |= XFS_SB_WIDTH; 752 } 753 } 754 } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN && 755 XFS_SB_VERSION_HASDALIGN(&mp->m_sb)) { 756 mp->m_dalign = sbp->sb_unit; 757 mp->m_swidth = sbp->sb_width; 758 } 759 760 xfs_alloc_compute_maxlevels(mp); 761 xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK); 762 xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK); 763 xfs_ialloc_compute_maxlevels(mp); 764 765 if (sbp->sb_imax_pct) { 766 __uint64_t icount; 767 768 /* Make sure the maximum inode count is a multiple of the 769 * units we allocate inodes in. 770 */ 771 772 icount = sbp->sb_dblocks * sbp->sb_imax_pct; 773 do_div(icount, 100); 774 do_div(icount, mp->m_ialloc_blks); 775 mp->m_maxicount = (icount * mp->m_ialloc_blks) << 776 sbp->sb_inopblog; 777 } else 778 mp->m_maxicount = 0; 779 780 mp->m_maxioffset = xfs_max_file_offset(sbp->sb_blocklog); 781 782 /* 783 * XFS uses the uuid from the superblock as the unique 784 * identifier for fsid. We can not use the uuid from the volume 785 * since a single partition filesystem is identical to a single 786 * partition volume/filesystem. 787 */ 788 if ((mfsi_flags & XFS_MFSI_SECOND) == 0 && 789 (mp->m_flags & XFS_MOUNT_NOUUID) == 0) { 790 if (xfs_uuid_mount(mp)) { 791 error = XFS_ERROR(EINVAL); 792 goto error1; 793 } 794 uuid_mounted=1; 795 ret64 = uuid_hash64(&sbp->sb_uuid); 796 memcpy(&vfsp->vfs_fsid, &ret64, sizeof(ret64)); 797 } 798 799 /* 800 * Set the default minimum read and write sizes unless 801 * already specified in a mount option. 802 * We use smaller I/O sizes when the file system 803 * is being used for NFS service (wsync mount option). 804 */ 805 if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) { 806 if (mp->m_flags & XFS_MOUNT_WSYNC) { 807 readio_log = XFS_WSYNC_READIO_LOG; 808 writeio_log = XFS_WSYNC_WRITEIO_LOG; 809 } else { 810 readio_log = XFS_READIO_LOG_LARGE; 811 writeio_log = XFS_WRITEIO_LOG_LARGE; 812 } 813 } else { 814 readio_log = mp->m_readio_log; 815 writeio_log = mp->m_writeio_log; 816 } 817 818 /* 819 * Set the number of readahead buffers to use based on 820 * physical memory size. 821 */ 822 if (xfs_physmem <= 4096) /* <= 16MB */ 823 mp->m_nreadaheads = XFS_RW_NREADAHEAD_16MB; 824 else if (xfs_physmem <= 8192) /* <= 32MB */ 825 mp->m_nreadaheads = XFS_RW_NREADAHEAD_32MB; 826 else 827 mp->m_nreadaheads = XFS_RW_NREADAHEAD_K32; 828 if (sbp->sb_blocklog > readio_log) { 829 mp->m_readio_log = sbp->sb_blocklog; 830 } else { 831 mp->m_readio_log = readio_log; 832 } 833 mp->m_readio_blocks = 1 << (mp->m_readio_log - sbp->sb_blocklog); 834 if (sbp->sb_blocklog > writeio_log) { 835 mp->m_writeio_log = sbp->sb_blocklog; 836 } else { 837 mp->m_writeio_log = writeio_log; 838 } 839 mp->m_writeio_blocks = 1 << (mp->m_writeio_log - sbp->sb_blocklog); 840 841 /* 842 * Set the inode cluster size based on the physical memory 843 * size. This may still be overridden by the file system 844 * block size if it is larger than the chosen cluster size. 845 */ 846 if (xfs_physmem <= btoc(32 * 1024 * 1024)) { /* <= 32 MB */ 847 mp->m_inode_cluster_size = XFS_INODE_SMALL_CLUSTER_SIZE; 848 } else { 849 mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE; 850 } 851 /* 852 * Set whether we're using inode alignment. 853 */ 854 if (XFS_SB_VERSION_HASALIGN(&mp->m_sb) && 855 mp->m_sb.sb_inoalignmt >= 856 XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size)) 857 mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1; 858 else 859 mp->m_inoalign_mask = 0; 860 /* 861 * If we are using stripe alignment, check whether 862 * the stripe unit is a multiple of the inode alignment 863 */ 864 if (mp->m_dalign && mp->m_inoalign_mask && 865 !(mp->m_dalign & mp->m_inoalign_mask)) 866 mp->m_sinoalign = mp->m_dalign; 867 else 868 mp->m_sinoalign = 0; 869 /* 870 * Check that the data (and log if separate) are an ok size. 871 */ 872 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks); 873 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) { 874 cmn_err(CE_WARN, "XFS: size check 1 failed"); 875 error = XFS_ERROR(E2BIG); 876 goto error1; 877 } 878 error = xfs_read_buf(mp, mp->m_ddev_targp, 879 d - XFS_FSS_TO_BB(mp, 1), 880 XFS_FSS_TO_BB(mp, 1), 0, &bp); 881 if (!error) { 882 xfs_buf_relse(bp); 883 } else { 884 cmn_err(CE_WARN, "XFS: size check 2 failed"); 885 if (error == ENOSPC) { 886 error = XFS_ERROR(E2BIG); 887 } 888 goto error1; 889 } 890 891 if (((mfsi_flags & XFS_MFSI_CLIENT) == 0) && 892 mp->m_logdev_targp != mp->m_ddev_targp) { 893 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks); 894 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) { 895 cmn_err(CE_WARN, "XFS: size check 3 failed"); 896 error = XFS_ERROR(E2BIG); 897 goto error1; 898 } 899 error = xfs_read_buf(mp, mp->m_logdev_targp, 900 d - XFS_FSB_TO_BB(mp, 1), 901 XFS_FSB_TO_BB(mp, 1), 0, &bp); 902 if (!error) { 903 xfs_buf_relse(bp); 904 } else { 905 cmn_err(CE_WARN, "XFS: size check 3 failed"); 906 if (error == ENOSPC) { 907 error = XFS_ERROR(E2BIG); 908 } 909 goto error1; 910 } 911 } 912 913 /* 914 * Initialize realtime fields in the mount structure 915 */ 916 if ((error = xfs_rtmount_init(mp))) { 917 cmn_err(CE_WARN, "XFS: RT mount failed"); 918 goto error1; 919 } 920 921 /* 922 * For client case we are done now 923 */ 924 if (mfsi_flags & XFS_MFSI_CLIENT) { 925 return 0; 926 } 927 928 /* 929 * Copies the low order bits of the timestamp and the randomly 930 * set "sequence" number out of a UUID. 931 */ 932 uuid_getnodeuniq(&sbp->sb_uuid, mp->m_fixedfsid); 933 934 /* 935 * The vfs structure needs to have a file system independent 936 * way of checking for the invariant file system ID. Since it 937 * can't look at mount structures it has a pointer to the data 938 * in the mount structure. 939 * 940 * File systems that don't support user level file handles (i.e. 941 * all of them except for XFS) will leave vfs_altfsid as NULL. 942 */ 943 vfsp->vfs_altfsid = (xfs_fsid_t *)mp->m_fixedfsid; 944 mp->m_dmevmask = 0; /* not persistent; set after each mount */ 945 946 /* 947 * Select the right directory manager. 948 */ 949 mp->m_dirops = 950 XFS_SB_VERSION_HASDIRV2(&mp->m_sb) ? 951 xfsv2_dirops : 952 xfsv1_dirops; 953 954 /* 955 * Initialize directory manager's entries. 956 */ 957 XFS_DIR_MOUNT(mp); 958 959 /* 960 * Initialize the attribute manager's entries. 961 */ 962 mp->m_attr_magicpct = (mp->m_sb.sb_blocksize * 37) / 100; 963 964 /* 965 * Initialize the precomputed transaction reservations values. 966 */ 967 xfs_trans_init(mp); 968 969 /* 970 * Allocate and initialize the inode hash table for this 971 * file system. 972 */ 973 xfs_ihash_init(mp); 974 xfs_chash_init(mp); 975 976 /* 977 * Allocate and initialize the per-ag data. 978 */ 979 init_rwsem(&mp->m_peraglock); 980 mp->m_perag = 981 kmem_zalloc(sbp->sb_agcount * sizeof(xfs_perag_t), KM_SLEEP); 982 983 mp->m_maxagi = xfs_initialize_perag(vfsp, mp, sbp->sb_agcount); 984 985 /* 986 * log's mount-time initialization. Perform 1st part recovery if needed 987 */ 988 if (likely(sbp->sb_logblocks > 0)) { /* check for volume case */ 989 error = xfs_log_mount(mp, mp->m_logdev_targp, 990 XFS_FSB_TO_DADDR(mp, sbp->sb_logstart), 991 XFS_FSB_TO_BB(mp, sbp->sb_logblocks)); 992 if (error) { 993 cmn_err(CE_WARN, "XFS: log mount failed"); 994 goto error2; 995 } 996 } else { /* No log has been defined */ 997 cmn_err(CE_WARN, "XFS: no log defined"); 998 XFS_ERROR_REPORT("xfs_mountfs_int(1)", XFS_ERRLEVEL_LOW, mp); 999 error = XFS_ERROR(EFSCORRUPTED); 1000 goto error2; 1001 } 1002 1003 /* 1004 * Get and sanity-check the root inode. 1005 * Save the pointer to it in the mount structure. 1006 */ 1007 error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip, 0); 1008 if (error) { 1009 cmn_err(CE_WARN, "XFS: failed to read root inode"); 1010 goto error3; 1011 } 1012 1013 ASSERT(rip != NULL); 1014 rvp = XFS_ITOV(rip); 1015 1016 if (unlikely((rip->i_d.di_mode & S_IFMT) != S_IFDIR)) { 1017 cmn_err(CE_WARN, "XFS: corrupted root inode"); 1018 prdev("Root inode %llu is not a directory", 1019 mp->m_ddev_targp, (unsigned long long)rip->i_ino); 1020 xfs_iunlock(rip, XFS_ILOCK_EXCL); 1021 XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW, 1022 mp); 1023 error = XFS_ERROR(EFSCORRUPTED); 1024 goto error4; 1025 } 1026 mp->m_rootip = rip; /* save it */ 1027 1028 xfs_iunlock(rip, XFS_ILOCK_EXCL); 1029 1030 /* 1031 * Initialize realtime inode pointers in the mount structure 1032 */ 1033 if ((error = xfs_rtmount_inodes(mp))) { 1034 /* 1035 * Free up the root inode. 1036 */ 1037 cmn_err(CE_WARN, "XFS: failed to read RT inodes"); 1038 goto error4; 1039 } 1040 1041 /* 1042 * If fs is not mounted readonly, then update the superblock 1043 * unit and width changes. 1044 */ 1045 if (update_flags && !(vfsp->vfs_flag & VFS_RDONLY)) 1046 xfs_mount_log_sbunit(mp, update_flags); 1047 1048 /* 1049 * Initialise the XFS quota management subsystem for this mount 1050 */ 1051 if ((error = XFS_QM_INIT(mp, "amount, "aflags))) 1052 goto error4; 1053 1054 /* 1055 * Finish recovering the file system. This part needed to be 1056 * delayed until after the root and real-time bitmap inodes 1057 * were consistently read in. 1058 */ 1059 error = xfs_log_mount_finish(mp, mfsi_flags); 1060 if (error) { 1061 cmn_err(CE_WARN, "XFS: log mount finish failed"); 1062 goto error4; 1063 } 1064 1065 /* 1066 * Complete the quota initialisation, post-log-replay component. 1067 */ 1068 if ((error = XFS_QM_MOUNT(mp, quotamount, quotaflags, mfsi_flags))) 1069 goto error4; 1070 1071 return 0; 1072 1073 error4: 1074 /* 1075 * Free up the root inode. 1076 */ 1077 VN_RELE(rvp); 1078 error3: 1079 xfs_log_unmount_dealloc(mp); 1080 error2: 1081 xfs_ihash_free(mp); 1082 xfs_chash_free(mp); 1083 for (agno = 0; agno < sbp->sb_agcount; agno++) 1084 if (mp->m_perag[agno].pagb_list) 1085 kmem_free(mp->m_perag[agno].pagb_list, 1086 sizeof(xfs_perag_busy_t) * XFS_PAGB_NUM_SLOTS); 1087 kmem_free(mp->m_perag, sbp->sb_agcount * sizeof(xfs_perag_t)); 1088 mp->m_perag = NULL; 1089 /* FALLTHROUGH */ 1090 error1: 1091 if (uuid_mounted) 1092 xfs_uuid_unmount(mp); 1093 xfs_freesb(mp); 1094 return error; 1095 } 1096 1097 /* 1098 * xfs_unmountfs 1099 * 1100 * This flushes out the inodes,dquots and the superblock, unmounts the 1101 * log and makes sure that incore structures are freed. 1102 */ 1103 int 1104 xfs_unmountfs(xfs_mount_t *mp, struct cred *cr) 1105 { 1106 struct vfs *vfsp = XFS_MTOVFS(mp); 1107 #if defined(DEBUG) || defined(INDUCE_IO_ERROR) 1108 int64_t fsid; 1109 #endif 1110 1111 xfs_iflush_all(mp); 1112 1113 XFS_QM_DQPURGEALL(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_UMOUNTING); 1114 1115 /* 1116 * Flush out the log synchronously so that we know for sure 1117 * that nothing is pinned. This is important because bflush() 1118 * will skip pinned buffers. 1119 */ 1120 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC); 1121 1122 xfs_binval(mp->m_ddev_targp); 1123 if (mp->m_rtdev_targp) { 1124 xfs_binval(mp->m_rtdev_targp); 1125 } 1126 1127 xfs_unmountfs_writesb(mp); 1128 1129 xfs_unmountfs_wait(mp); /* wait for async bufs */ 1130 1131 xfs_log_unmount(mp); /* Done! No more fs ops. */ 1132 1133 xfs_freesb(mp); 1134 1135 /* 1136 * All inodes from this mount point should be freed. 1137 */ 1138 ASSERT(mp->m_inodes == NULL); 1139 1140 xfs_unmountfs_close(mp, cr); 1141 if ((mp->m_flags & XFS_MOUNT_NOUUID) == 0) 1142 xfs_uuid_unmount(mp); 1143 1144 #if defined(DEBUG) || defined(INDUCE_IO_ERROR) 1145 /* 1146 * clear all error tags on this filesystem 1147 */ 1148 memcpy(&fsid, &vfsp->vfs_fsid, sizeof(int64_t)); 1149 xfs_errortag_clearall_umount(fsid, mp->m_fsname, 0); 1150 #endif 1151 XFS_IODONE(vfsp); 1152 xfs_mount_free(mp, 1); 1153 return 0; 1154 } 1155 1156 void 1157 xfs_unmountfs_close(xfs_mount_t *mp, struct cred *cr) 1158 { 1159 if (mp->m_logdev_targp != mp->m_ddev_targp) 1160 xfs_free_buftarg(mp->m_logdev_targp, 1); 1161 if (mp->m_rtdev_targp) 1162 xfs_free_buftarg(mp->m_rtdev_targp, 1); 1163 xfs_free_buftarg(mp->m_ddev_targp, 0); 1164 } 1165 1166 STATIC void 1167 xfs_unmountfs_wait(xfs_mount_t *mp) 1168 { 1169 if (mp->m_logdev_targp != mp->m_ddev_targp) 1170 xfs_wait_buftarg(mp->m_logdev_targp); 1171 if (mp->m_rtdev_targp) 1172 xfs_wait_buftarg(mp->m_rtdev_targp); 1173 xfs_wait_buftarg(mp->m_ddev_targp); 1174 } 1175 1176 int 1177 xfs_unmountfs_writesb(xfs_mount_t *mp) 1178 { 1179 xfs_buf_t *sbp; 1180 xfs_sb_t *sb; 1181 int error = 0; 1182 1183 /* 1184 * skip superblock write if fs is read-only, or 1185 * if we are doing a forced umount. 1186 */ 1187 sbp = xfs_getsb(mp, 0); 1188 if (!(XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY || 1189 XFS_FORCED_SHUTDOWN(mp))) { 1190 1191 xfs_icsb_sync_counters(mp); 1192 1193 /* 1194 * mark shared-readonly if desired 1195 */ 1196 sb = XFS_BUF_TO_SBP(sbp); 1197 if (mp->m_mk_sharedro) { 1198 if (!(sb->sb_flags & XFS_SBF_READONLY)) 1199 sb->sb_flags |= XFS_SBF_READONLY; 1200 if (!XFS_SB_VERSION_HASSHARED(sb)) 1201 XFS_SB_VERSION_ADDSHARED(sb); 1202 xfs_fs_cmn_err(CE_NOTE, mp, 1203 "Unmounting, marking shared read-only"); 1204 } 1205 XFS_BUF_UNDONE(sbp); 1206 XFS_BUF_UNREAD(sbp); 1207 XFS_BUF_UNDELAYWRITE(sbp); 1208 XFS_BUF_WRITE(sbp); 1209 XFS_BUF_UNASYNC(sbp); 1210 ASSERT(XFS_BUF_TARGET(sbp) == mp->m_ddev_targp); 1211 xfsbdstrat(mp, sbp); 1212 /* Nevermind errors we might get here. */ 1213 error = xfs_iowait(sbp); 1214 if (error) 1215 xfs_ioerror_alert("xfs_unmountfs_writesb", 1216 mp, sbp, XFS_BUF_ADDR(sbp)); 1217 if (error && mp->m_mk_sharedro) 1218 xfs_fs_cmn_err(CE_ALERT, mp, "Superblock write error detected while unmounting. Filesystem may not be marked shared readonly"); 1219 } 1220 xfs_buf_relse(sbp); 1221 return error; 1222 } 1223 1224 /* 1225 * xfs_mod_sb() can be used to copy arbitrary changes to the 1226 * in-core superblock into the superblock buffer to be logged. 1227 * It does not provide the higher level of locking that is 1228 * needed to protect the in-core superblock from concurrent 1229 * access. 1230 */ 1231 void 1232 xfs_mod_sb(xfs_trans_t *tp, __int64_t fields) 1233 { 1234 xfs_buf_t *bp; 1235 int first; 1236 int last; 1237 xfs_mount_t *mp; 1238 xfs_sb_t *sbp; 1239 xfs_sb_field_t f; 1240 1241 ASSERT(fields); 1242 if (!fields) 1243 return; 1244 mp = tp->t_mountp; 1245 bp = xfs_trans_getsb(tp, mp, 0); 1246 sbp = XFS_BUF_TO_SBP(bp); 1247 first = sizeof(xfs_sb_t); 1248 last = 0; 1249 1250 /* translate/copy */ 1251 1252 xfs_xlatesb(XFS_BUF_PTR(bp), &(mp->m_sb), -1, fields); 1253 1254 /* find modified range */ 1255 1256 f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields); 1257 ASSERT((1LL << f) & XFS_SB_MOD_BITS); 1258 first = xfs_sb_info[f].offset; 1259 1260 f = (xfs_sb_field_t)xfs_highbit64((__uint64_t)fields); 1261 ASSERT((1LL << f) & XFS_SB_MOD_BITS); 1262 last = xfs_sb_info[f + 1].offset - 1; 1263 1264 xfs_trans_log_buf(tp, bp, first, last); 1265 } 1266 /* 1267 * xfs_mod_incore_sb_unlocked() is a utility routine common used to apply 1268 * a delta to a specified field in the in-core superblock. Simply 1269 * switch on the field indicated and apply the delta to that field. 1270 * Fields are not allowed to dip below zero, so if the delta would 1271 * do this do not apply it and return EINVAL. 1272 * 1273 * The SB_LOCK must be held when this routine is called. 1274 */ 1275 int 1276 xfs_mod_incore_sb_unlocked(xfs_mount_t *mp, xfs_sb_field_t field, 1277 int delta, int rsvd) 1278 { 1279 int scounter; /* short counter for 32 bit fields */ 1280 long long lcounter; /* long counter for 64 bit fields */ 1281 long long res_used, rem; 1282 1283 /* 1284 * With the in-core superblock spin lock held, switch 1285 * on the indicated field. Apply the delta to the 1286 * proper field. If the fields value would dip below 1287 * 0, then do not apply the delta and return EINVAL. 1288 */ 1289 switch (field) { 1290 case XFS_SBS_ICOUNT: 1291 lcounter = (long long)mp->m_sb.sb_icount; 1292 lcounter += delta; 1293 if (lcounter < 0) { 1294 ASSERT(0); 1295 return XFS_ERROR(EINVAL); 1296 } 1297 mp->m_sb.sb_icount = lcounter; 1298 return 0; 1299 case XFS_SBS_IFREE: 1300 lcounter = (long long)mp->m_sb.sb_ifree; 1301 lcounter += delta; 1302 if (lcounter < 0) { 1303 ASSERT(0); 1304 return XFS_ERROR(EINVAL); 1305 } 1306 mp->m_sb.sb_ifree = lcounter; 1307 return 0; 1308 case XFS_SBS_FDBLOCKS: 1309 1310 lcounter = (long long)mp->m_sb.sb_fdblocks; 1311 res_used = (long long)(mp->m_resblks - mp->m_resblks_avail); 1312 1313 if (delta > 0) { /* Putting blocks back */ 1314 if (res_used > delta) { 1315 mp->m_resblks_avail += delta; 1316 } else { 1317 rem = delta - res_used; 1318 mp->m_resblks_avail = mp->m_resblks; 1319 lcounter += rem; 1320 } 1321 } else { /* Taking blocks away */ 1322 1323 lcounter += delta; 1324 1325 /* 1326 * If were out of blocks, use any available reserved blocks if 1327 * were allowed to. 1328 */ 1329 1330 if (lcounter < 0) { 1331 if (rsvd) { 1332 lcounter = (long long)mp->m_resblks_avail + delta; 1333 if (lcounter < 0) { 1334 return XFS_ERROR(ENOSPC); 1335 } 1336 mp->m_resblks_avail = lcounter; 1337 return 0; 1338 } else { /* not reserved */ 1339 return XFS_ERROR(ENOSPC); 1340 } 1341 } 1342 } 1343 1344 mp->m_sb.sb_fdblocks = lcounter; 1345 return 0; 1346 case XFS_SBS_FREXTENTS: 1347 lcounter = (long long)mp->m_sb.sb_frextents; 1348 lcounter += delta; 1349 if (lcounter < 0) { 1350 return XFS_ERROR(ENOSPC); 1351 } 1352 mp->m_sb.sb_frextents = lcounter; 1353 return 0; 1354 case XFS_SBS_DBLOCKS: 1355 lcounter = (long long)mp->m_sb.sb_dblocks; 1356 lcounter += delta; 1357 if (lcounter < 0) { 1358 ASSERT(0); 1359 return XFS_ERROR(EINVAL); 1360 } 1361 mp->m_sb.sb_dblocks = lcounter; 1362 return 0; 1363 case XFS_SBS_AGCOUNT: 1364 scounter = mp->m_sb.sb_agcount; 1365 scounter += delta; 1366 if (scounter < 0) { 1367 ASSERT(0); 1368 return XFS_ERROR(EINVAL); 1369 } 1370 mp->m_sb.sb_agcount = scounter; 1371 return 0; 1372 case XFS_SBS_IMAX_PCT: 1373 scounter = mp->m_sb.sb_imax_pct; 1374 scounter += delta; 1375 if (scounter < 0) { 1376 ASSERT(0); 1377 return XFS_ERROR(EINVAL); 1378 } 1379 mp->m_sb.sb_imax_pct = scounter; 1380 return 0; 1381 case XFS_SBS_REXTSIZE: 1382 scounter = mp->m_sb.sb_rextsize; 1383 scounter += delta; 1384 if (scounter < 0) { 1385 ASSERT(0); 1386 return XFS_ERROR(EINVAL); 1387 } 1388 mp->m_sb.sb_rextsize = scounter; 1389 return 0; 1390 case XFS_SBS_RBMBLOCKS: 1391 scounter = mp->m_sb.sb_rbmblocks; 1392 scounter += delta; 1393 if (scounter < 0) { 1394 ASSERT(0); 1395 return XFS_ERROR(EINVAL); 1396 } 1397 mp->m_sb.sb_rbmblocks = scounter; 1398 return 0; 1399 case XFS_SBS_RBLOCKS: 1400 lcounter = (long long)mp->m_sb.sb_rblocks; 1401 lcounter += delta; 1402 if (lcounter < 0) { 1403 ASSERT(0); 1404 return XFS_ERROR(EINVAL); 1405 } 1406 mp->m_sb.sb_rblocks = lcounter; 1407 return 0; 1408 case XFS_SBS_REXTENTS: 1409 lcounter = (long long)mp->m_sb.sb_rextents; 1410 lcounter += delta; 1411 if (lcounter < 0) { 1412 ASSERT(0); 1413 return XFS_ERROR(EINVAL); 1414 } 1415 mp->m_sb.sb_rextents = lcounter; 1416 return 0; 1417 case XFS_SBS_REXTSLOG: 1418 scounter = mp->m_sb.sb_rextslog; 1419 scounter += delta; 1420 if (scounter < 0) { 1421 ASSERT(0); 1422 return XFS_ERROR(EINVAL); 1423 } 1424 mp->m_sb.sb_rextslog = scounter; 1425 return 0; 1426 default: 1427 ASSERT(0); 1428 return XFS_ERROR(EINVAL); 1429 } 1430 } 1431 1432 /* 1433 * xfs_mod_incore_sb() is used to change a field in the in-core 1434 * superblock structure by the specified delta. This modification 1435 * is protected by the SB_LOCK. Just use the xfs_mod_incore_sb_unlocked() 1436 * routine to do the work. 1437 */ 1438 int 1439 xfs_mod_incore_sb(xfs_mount_t *mp, xfs_sb_field_t field, int delta, int rsvd) 1440 { 1441 unsigned long s; 1442 int status; 1443 1444 /* check for per-cpu counters */ 1445 switch (field) { 1446 #ifdef HAVE_PERCPU_SB 1447 case XFS_SBS_ICOUNT: 1448 case XFS_SBS_IFREE: 1449 case XFS_SBS_FDBLOCKS: 1450 if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { 1451 status = xfs_icsb_modify_counters(mp, field, 1452 delta, rsvd); 1453 break; 1454 } 1455 /* FALLTHROUGH */ 1456 #endif 1457 default: 1458 s = XFS_SB_LOCK(mp); 1459 status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); 1460 XFS_SB_UNLOCK(mp, s); 1461 break; 1462 } 1463 1464 return status; 1465 } 1466 1467 /* 1468 * xfs_mod_incore_sb_batch() is used to change more than one field 1469 * in the in-core superblock structure at a time. This modification 1470 * is protected by a lock internal to this module. The fields and 1471 * changes to those fields are specified in the array of xfs_mod_sb 1472 * structures passed in. 1473 * 1474 * Either all of the specified deltas will be applied or none of 1475 * them will. If any modified field dips below 0, then all modifications 1476 * will be backed out and EINVAL will be returned. 1477 */ 1478 int 1479 xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd) 1480 { 1481 unsigned long s; 1482 int status=0; 1483 xfs_mod_sb_t *msbp; 1484 1485 /* 1486 * Loop through the array of mod structures and apply each 1487 * individually. If any fail, then back out all those 1488 * which have already been applied. Do all of this within 1489 * the scope of the SB_LOCK so that all of the changes will 1490 * be atomic. 1491 */ 1492 s = XFS_SB_LOCK(mp); 1493 msbp = &msb[0]; 1494 for (msbp = &msbp[0]; msbp < (msb + nmsb); msbp++) { 1495 /* 1496 * Apply the delta at index n. If it fails, break 1497 * from the loop so we'll fall into the undo loop 1498 * below. 1499 */ 1500 switch (msbp->msb_field) { 1501 #ifdef HAVE_PERCPU_SB 1502 case XFS_SBS_ICOUNT: 1503 case XFS_SBS_IFREE: 1504 case XFS_SBS_FDBLOCKS: 1505 if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { 1506 status = xfs_icsb_modify_counters_locked(mp, 1507 msbp->msb_field, 1508 msbp->msb_delta, rsvd); 1509 break; 1510 } 1511 /* FALLTHROUGH */ 1512 #endif 1513 default: 1514 status = xfs_mod_incore_sb_unlocked(mp, 1515 msbp->msb_field, 1516 msbp->msb_delta, rsvd); 1517 break; 1518 } 1519 1520 if (status != 0) { 1521 break; 1522 } 1523 } 1524 1525 /* 1526 * If we didn't complete the loop above, then back out 1527 * any changes made to the superblock. If you add code 1528 * between the loop above and here, make sure that you 1529 * preserve the value of status. Loop back until 1530 * we step below the beginning of the array. Make sure 1531 * we don't touch anything back there. 1532 */ 1533 if (status != 0) { 1534 msbp--; 1535 while (msbp >= msb) { 1536 switch (msbp->msb_field) { 1537 #ifdef HAVE_PERCPU_SB 1538 case XFS_SBS_ICOUNT: 1539 case XFS_SBS_IFREE: 1540 case XFS_SBS_FDBLOCKS: 1541 if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { 1542 status = 1543 xfs_icsb_modify_counters_locked(mp, 1544 msbp->msb_field, 1545 -(msbp->msb_delta), 1546 rsvd); 1547 break; 1548 } 1549 /* FALLTHROUGH */ 1550 #endif 1551 default: 1552 status = xfs_mod_incore_sb_unlocked(mp, 1553 msbp->msb_field, 1554 -(msbp->msb_delta), 1555 rsvd); 1556 break; 1557 } 1558 ASSERT(status == 0); 1559 msbp--; 1560 } 1561 } 1562 XFS_SB_UNLOCK(mp, s); 1563 return status; 1564 } 1565 1566 /* 1567 * xfs_getsb() is called to obtain the buffer for the superblock. 1568 * The buffer is returned locked and read in from disk. 1569 * The buffer should be released with a call to xfs_brelse(). 1570 * 1571 * If the flags parameter is BUF_TRYLOCK, then we'll only return 1572 * the superblock buffer if it can be locked without sleeping. 1573 * If it can't then we'll return NULL. 1574 */ 1575 xfs_buf_t * 1576 xfs_getsb( 1577 xfs_mount_t *mp, 1578 int flags) 1579 { 1580 xfs_buf_t *bp; 1581 1582 ASSERT(mp->m_sb_bp != NULL); 1583 bp = mp->m_sb_bp; 1584 if (flags & XFS_BUF_TRYLOCK) { 1585 if (!XFS_BUF_CPSEMA(bp)) { 1586 return NULL; 1587 } 1588 } else { 1589 XFS_BUF_PSEMA(bp, PRIBIO); 1590 } 1591 XFS_BUF_HOLD(bp); 1592 ASSERT(XFS_BUF_ISDONE(bp)); 1593 return bp; 1594 } 1595 1596 /* 1597 * Used to free the superblock along various error paths. 1598 */ 1599 void 1600 xfs_freesb( 1601 xfs_mount_t *mp) 1602 { 1603 xfs_buf_t *bp; 1604 1605 /* 1606 * Use xfs_getsb() so that the buffer will be locked 1607 * when we call xfs_buf_relse(). 1608 */ 1609 bp = xfs_getsb(mp, 0); 1610 XFS_BUF_UNMANAGE(bp); 1611 xfs_buf_relse(bp); 1612 mp->m_sb_bp = NULL; 1613 } 1614 1615 /* 1616 * See if the UUID is unique among mounted XFS filesystems. 1617 * Mount fails if UUID is nil or a FS with the same UUID is already mounted. 1618 */ 1619 STATIC int 1620 xfs_uuid_mount( 1621 xfs_mount_t *mp) 1622 { 1623 if (uuid_is_nil(&mp->m_sb.sb_uuid)) { 1624 cmn_err(CE_WARN, 1625 "XFS: Filesystem %s has nil UUID - can't mount", 1626 mp->m_fsname); 1627 return -1; 1628 } 1629 if (!uuid_table_insert(&mp->m_sb.sb_uuid)) { 1630 cmn_err(CE_WARN, 1631 "XFS: Filesystem %s has duplicate UUID - can't mount", 1632 mp->m_fsname); 1633 return -1; 1634 } 1635 return 0; 1636 } 1637 1638 /* 1639 * Remove filesystem from the UUID table. 1640 */ 1641 STATIC void 1642 xfs_uuid_unmount( 1643 xfs_mount_t *mp) 1644 { 1645 uuid_table_remove(&mp->m_sb.sb_uuid); 1646 } 1647 1648 /* 1649 * Used to log changes to the superblock unit and width fields which could 1650 * be altered by the mount options. Only the first superblock is updated. 1651 */ 1652 STATIC void 1653 xfs_mount_log_sbunit( 1654 xfs_mount_t *mp, 1655 __int64_t fields) 1656 { 1657 xfs_trans_t *tp; 1658 1659 ASSERT(fields & (XFS_SB_UNIT|XFS_SB_WIDTH|XFS_SB_UUID)); 1660 1661 tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT); 1662 if (xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0, 1663 XFS_DEFAULT_LOG_COUNT)) { 1664 xfs_trans_cancel(tp, 0); 1665 return; 1666 } 1667 xfs_mod_sb(tp, fields); 1668 xfs_trans_commit(tp, 0, NULL); 1669 } 1670 1671 1672 #ifdef HAVE_PERCPU_SB 1673 /* 1674 * Per-cpu incore superblock counters 1675 * 1676 * Simple concept, difficult implementation 1677 * 1678 * Basically, replace the incore superblock counters with a distributed per cpu 1679 * counter for contended fields (e.g. free block count). 1680 * 1681 * Difficulties arise in that the incore sb is used for ENOSPC checking, and 1682 * hence needs to be accurately read when we are running low on space. Hence 1683 * there is a method to enable and disable the per-cpu counters based on how 1684 * much "stuff" is available in them. 1685 * 1686 * Basically, a counter is enabled if there is enough free resource to justify 1687 * running a per-cpu fast-path. If the per-cpu counter runs out (i.e. a local 1688 * ENOSPC), then we disable the counters to synchronise all callers and 1689 * re-distribute the available resources. 1690 * 1691 * If, once we redistributed the available resources, we still get a failure, 1692 * we disable the per-cpu counter and go through the slow path. 1693 * 1694 * The slow path is the current xfs_mod_incore_sb() function. This means that 1695 * when we disable a per-cpu counter, we need to drain it's resources back to 1696 * the global superblock. We do this after disabling the counter to prevent 1697 * more threads from queueing up on the counter. 1698 * 1699 * Essentially, this means that we still need a lock in the fast path to enable 1700 * synchronisation between the global counters and the per-cpu counters. This 1701 * is not a problem because the lock will be local to a CPU almost all the time 1702 * and have little contention except when we get to ENOSPC conditions. 1703 * 1704 * Basically, this lock becomes a barrier that enables us to lock out the fast 1705 * path while we do things like enabling and disabling counters and 1706 * synchronising the counters. 1707 * 1708 * Locking rules: 1709 * 1710 * 1. XFS_SB_LOCK() before picking up per-cpu locks 1711 * 2. per-cpu locks always picked up via for_each_online_cpu() order 1712 * 3. accurate counter sync requires XFS_SB_LOCK + per cpu locks 1713 * 4. modifying per-cpu counters requires holding per-cpu lock 1714 * 5. modifying global counters requires holding XFS_SB_LOCK 1715 * 6. enabling or disabling a counter requires holding the XFS_SB_LOCK 1716 * and _none_ of the per-cpu locks. 1717 * 1718 * Disabled counters are only ever re-enabled by a balance operation 1719 * that results in more free resources per CPU than a given threshold. 1720 * To ensure counters don't remain disabled, they are rebalanced when 1721 * the global resource goes above a higher threshold (i.e. some hysteresis 1722 * is present to prevent thrashing). 1723 */ 1724 1725 /* 1726 * hot-plug CPU notifier support. 1727 * 1728 * We cannot use the hotcpu_register() function because it does 1729 * not allow notifier instances. We need a notifier per filesystem 1730 * as we need to be able to identify the filesystem to balance 1731 * the counters out. This is achieved by having a notifier block 1732 * embedded in the xfs_mount_t and doing pointer magic to get the 1733 * mount pointer from the notifier block address. 1734 */ 1735 STATIC int 1736 xfs_icsb_cpu_notify( 1737 struct notifier_block *nfb, 1738 unsigned long action, 1739 void *hcpu) 1740 { 1741 xfs_icsb_cnts_t *cntp; 1742 xfs_mount_t *mp; 1743 int s; 1744 1745 mp = (xfs_mount_t *)container_of(nfb, xfs_mount_t, m_icsb_notifier); 1746 cntp = (xfs_icsb_cnts_t *) 1747 per_cpu_ptr(mp->m_sb_cnts, (unsigned long)hcpu); 1748 switch (action) { 1749 case CPU_UP_PREPARE: 1750 /* Easy Case - initialize the area and locks, and 1751 * then rebalance when online does everything else for us. */ 1752 memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); 1753 break; 1754 case CPU_ONLINE: 1755 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0); 1756 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0); 1757 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0); 1758 break; 1759 case CPU_DEAD: 1760 /* Disable all the counters, then fold the dead cpu's 1761 * count into the total on the global superblock and 1762 * re-enable the counters. */ 1763 s = XFS_SB_LOCK(mp); 1764 xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT); 1765 xfs_icsb_disable_counter(mp, XFS_SBS_IFREE); 1766 xfs_icsb_disable_counter(mp, XFS_SBS_FDBLOCKS); 1767 1768 mp->m_sb.sb_icount += cntp->icsb_icount; 1769 mp->m_sb.sb_ifree += cntp->icsb_ifree; 1770 mp->m_sb.sb_fdblocks += cntp->icsb_fdblocks; 1771 1772 memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); 1773 1774 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, XFS_ICSB_SB_LOCKED); 1775 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, XFS_ICSB_SB_LOCKED); 1776 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, XFS_ICSB_SB_LOCKED); 1777 XFS_SB_UNLOCK(mp, s); 1778 break; 1779 } 1780 1781 return NOTIFY_OK; 1782 } 1783 1784 int 1785 xfs_icsb_init_counters( 1786 xfs_mount_t *mp) 1787 { 1788 xfs_icsb_cnts_t *cntp; 1789 int i; 1790 1791 mp->m_sb_cnts = alloc_percpu(xfs_icsb_cnts_t); 1792 if (mp->m_sb_cnts == NULL) 1793 return -ENOMEM; 1794 1795 mp->m_icsb_notifier.notifier_call = xfs_icsb_cpu_notify; 1796 mp->m_icsb_notifier.priority = 0; 1797 register_cpu_notifier(&mp->m_icsb_notifier); 1798 1799 for_each_online_cpu(i) { 1800 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); 1801 memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); 1802 } 1803 /* 1804 * start with all counters disabled so that the 1805 * initial balance kicks us off correctly 1806 */ 1807 mp->m_icsb_counters = -1; 1808 return 0; 1809 } 1810 1811 STATIC void 1812 xfs_icsb_destroy_counters( 1813 xfs_mount_t *mp) 1814 { 1815 if (mp->m_sb_cnts) { 1816 unregister_cpu_notifier(&mp->m_icsb_notifier); 1817 free_percpu(mp->m_sb_cnts); 1818 } 1819 } 1820 1821 STATIC inline void 1822 xfs_icsb_lock_cntr( 1823 xfs_icsb_cnts_t *icsbp) 1824 { 1825 while (test_and_set_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags)) { 1826 ndelay(1000); 1827 } 1828 } 1829 1830 STATIC inline void 1831 xfs_icsb_unlock_cntr( 1832 xfs_icsb_cnts_t *icsbp) 1833 { 1834 clear_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags); 1835 } 1836 1837 1838 STATIC inline void 1839 xfs_icsb_lock_all_counters( 1840 xfs_mount_t *mp) 1841 { 1842 xfs_icsb_cnts_t *cntp; 1843 int i; 1844 1845 for_each_online_cpu(i) { 1846 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); 1847 xfs_icsb_lock_cntr(cntp); 1848 } 1849 } 1850 1851 STATIC inline void 1852 xfs_icsb_unlock_all_counters( 1853 xfs_mount_t *mp) 1854 { 1855 xfs_icsb_cnts_t *cntp; 1856 int i; 1857 1858 for_each_online_cpu(i) { 1859 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); 1860 xfs_icsb_unlock_cntr(cntp); 1861 } 1862 } 1863 1864 STATIC void 1865 xfs_icsb_count( 1866 xfs_mount_t *mp, 1867 xfs_icsb_cnts_t *cnt, 1868 int flags) 1869 { 1870 xfs_icsb_cnts_t *cntp; 1871 int i; 1872 1873 memset(cnt, 0, sizeof(xfs_icsb_cnts_t)); 1874 1875 if (!(flags & XFS_ICSB_LAZY_COUNT)) 1876 xfs_icsb_lock_all_counters(mp); 1877 1878 for_each_online_cpu(i) { 1879 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i); 1880 cnt->icsb_icount += cntp->icsb_icount; 1881 cnt->icsb_ifree += cntp->icsb_ifree; 1882 cnt->icsb_fdblocks += cntp->icsb_fdblocks; 1883 } 1884 1885 if (!(flags & XFS_ICSB_LAZY_COUNT)) 1886 xfs_icsb_unlock_all_counters(mp); 1887 } 1888 1889 STATIC int 1890 xfs_icsb_counter_disabled( 1891 xfs_mount_t *mp, 1892 xfs_sb_field_t field) 1893 { 1894 ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); 1895 return test_bit(field, &mp->m_icsb_counters); 1896 } 1897 1898 STATIC int 1899 xfs_icsb_disable_counter( 1900 xfs_mount_t *mp, 1901 xfs_sb_field_t field) 1902 { 1903 xfs_icsb_cnts_t cnt; 1904 1905 ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); 1906 1907 xfs_icsb_lock_all_counters(mp); 1908 if (!test_and_set_bit(field, &mp->m_icsb_counters)) { 1909 /* drain back to superblock */ 1910 1911 xfs_icsb_count(mp, &cnt, XFS_ICSB_SB_LOCKED|XFS_ICSB_LAZY_COUNT); 1912 switch(field) { 1913 case XFS_SBS_ICOUNT: 1914 mp->m_sb.sb_icount = cnt.icsb_icount; 1915 break; 1916 case XFS_SBS_IFREE: 1917 mp->m_sb.sb_ifree = cnt.icsb_ifree; 1918 break; 1919 case XFS_SBS_FDBLOCKS: 1920 mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks; 1921 break; 1922 default: 1923 BUG(); 1924 } 1925 } 1926 1927 xfs_icsb_unlock_all_counters(mp); 1928 1929 return 0; 1930 } 1931 1932 STATIC void 1933 xfs_icsb_enable_counter( 1934 xfs_mount_t *mp, 1935 xfs_sb_field_t field, 1936 uint64_t count, 1937 uint64_t resid) 1938 { 1939 xfs_icsb_cnts_t *cntp; 1940 int i; 1941 1942 ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS)); 1943 1944 xfs_icsb_lock_all_counters(mp); 1945 for_each_online_cpu(i) { 1946 cntp = per_cpu_ptr(mp->m_sb_cnts, i); 1947 switch (field) { 1948 case XFS_SBS_ICOUNT: 1949 cntp->icsb_icount = count + resid; 1950 break; 1951 case XFS_SBS_IFREE: 1952 cntp->icsb_ifree = count + resid; 1953 break; 1954 case XFS_SBS_FDBLOCKS: 1955 cntp->icsb_fdblocks = count + resid; 1956 break; 1957 default: 1958 BUG(); 1959 break; 1960 } 1961 resid = 0; 1962 } 1963 clear_bit(field, &mp->m_icsb_counters); 1964 xfs_icsb_unlock_all_counters(mp); 1965 } 1966 1967 STATIC void 1968 xfs_icsb_sync_counters_int( 1969 xfs_mount_t *mp, 1970 int flags) 1971 { 1972 xfs_icsb_cnts_t cnt; 1973 int s; 1974 1975 /* Pass 1: lock all counters */ 1976 if ((flags & XFS_ICSB_SB_LOCKED) == 0) 1977 s = XFS_SB_LOCK(mp); 1978 1979 xfs_icsb_count(mp, &cnt, flags); 1980 1981 /* Step 3: update mp->m_sb fields */ 1982 if (!xfs_icsb_counter_disabled(mp, XFS_SBS_ICOUNT)) 1983 mp->m_sb.sb_icount = cnt.icsb_icount; 1984 if (!xfs_icsb_counter_disabled(mp, XFS_SBS_IFREE)) 1985 mp->m_sb.sb_ifree = cnt.icsb_ifree; 1986 if (!xfs_icsb_counter_disabled(mp, XFS_SBS_FDBLOCKS)) 1987 mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks; 1988 1989 if ((flags & XFS_ICSB_SB_LOCKED) == 0) 1990 XFS_SB_UNLOCK(mp, s); 1991 } 1992 1993 /* 1994 * Accurate update of per-cpu counters to incore superblock 1995 */ 1996 STATIC void 1997 xfs_icsb_sync_counters( 1998 xfs_mount_t *mp) 1999 { 2000 xfs_icsb_sync_counters_int(mp, 0); 2001 } 2002 2003 /* 2004 * lazy addition used for things like df, background sb syncs, etc 2005 */ 2006 void 2007 xfs_icsb_sync_counters_lazy( 2008 xfs_mount_t *mp) 2009 { 2010 xfs_icsb_sync_counters_int(mp, XFS_ICSB_LAZY_COUNT); 2011 } 2012 2013 /* 2014 * Balance and enable/disable counters as necessary. 2015 * 2016 * Thresholds for re-enabling counters are somewhat magic. 2017 * inode counts are chosen to be the same number as single 2018 * on disk allocation chunk per CPU, and free blocks is 2019 * something far enough zero that we aren't going thrash 2020 * when we get near ENOSPC. 2021 */ 2022 #define XFS_ICSB_INO_CNTR_REENABLE 64 2023 #define XFS_ICSB_FDBLK_CNTR_REENABLE 512 2024 STATIC void 2025 xfs_icsb_balance_counter( 2026 xfs_mount_t *mp, 2027 xfs_sb_field_t field, 2028 int flags) 2029 { 2030 uint64_t count, resid = 0; 2031 int weight = num_online_cpus(); 2032 int s; 2033 2034 if (!(flags & XFS_ICSB_SB_LOCKED)) 2035 s = XFS_SB_LOCK(mp); 2036 2037 /* disable counter and sync counter */ 2038 xfs_icsb_disable_counter(mp, field); 2039 2040 /* update counters - first CPU gets residual*/ 2041 switch (field) { 2042 case XFS_SBS_ICOUNT: 2043 count = mp->m_sb.sb_icount; 2044 resid = do_div(count, weight); 2045 if (count < XFS_ICSB_INO_CNTR_REENABLE) 2046 goto out; 2047 break; 2048 case XFS_SBS_IFREE: 2049 count = mp->m_sb.sb_ifree; 2050 resid = do_div(count, weight); 2051 if (count < XFS_ICSB_INO_CNTR_REENABLE) 2052 goto out; 2053 break; 2054 case XFS_SBS_FDBLOCKS: 2055 count = mp->m_sb.sb_fdblocks; 2056 resid = do_div(count, weight); 2057 if (count < XFS_ICSB_FDBLK_CNTR_REENABLE) 2058 goto out; 2059 break; 2060 default: 2061 BUG(); 2062 break; 2063 } 2064 2065 xfs_icsb_enable_counter(mp, field, count, resid); 2066 out: 2067 if (!(flags & XFS_ICSB_SB_LOCKED)) 2068 XFS_SB_UNLOCK(mp, s); 2069 } 2070 2071 STATIC int 2072 xfs_icsb_modify_counters_int( 2073 xfs_mount_t *mp, 2074 xfs_sb_field_t field, 2075 int delta, 2076 int rsvd, 2077 int flags) 2078 { 2079 xfs_icsb_cnts_t *icsbp; 2080 long long lcounter; /* long counter for 64 bit fields */ 2081 int cpu, s, locked = 0; 2082 int ret = 0, balance_done = 0; 2083 2084 again: 2085 cpu = get_cpu(); 2086 icsbp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, cpu), 2087 xfs_icsb_lock_cntr(icsbp); 2088 if (unlikely(xfs_icsb_counter_disabled(mp, field))) 2089 goto slow_path; 2090 2091 switch (field) { 2092 case XFS_SBS_ICOUNT: 2093 lcounter = icsbp->icsb_icount; 2094 lcounter += delta; 2095 if (unlikely(lcounter < 0)) 2096 goto slow_path; 2097 icsbp->icsb_icount = lcounter; 2098 break; 2099 2100 case XFS_SBS_IFREE: 2101 lcounter = icsbp->icsb_ifree; 2102 lcounter += delta; 2103 if (unlikely(lcounter < 0)) 2104 goto slow_path; 2105 icsbp->icsb_ifree = lcounter; 2106 break; 2107 2108 case XFS_SBS_FDBLOCKS: 2109 BUG_ON((mp->m_resblks - mp->m_resblks_avail) != 0); 2110 2111 lcounter = icsbp->icsb_fdblocks; 2112 lcounter += delta; 2113 if (unlikely(lcounter < 0)) 2114 goto slow_path; 2115 icsbp->icsb_fdblocks = lcounter; 2116 break; 2117 default: 2118 BUG(); 2119 break; 2120 } 2121 xfs_icsb_unlock_cntr(icsbp); 2122 put_cpu(); 2123 if (locked) 2124 XFS_SB_UNLOCK(mp, s); 2125 return 0; 2126 2127 /* 2128 * The slow path needs to be run with the SBLOCK 2129 * held so that we prevent other threads from 2130 * attempting to run this path at the same time. 2131 * this provides exclusion for the balancing code, 2132 * and exclusive fallback if the balance does not 2133 * provide enough resources to continue in an unlocked 2134 * manner. 2135 */ 2136 slow_path: 2137 xfs_icsb_unlock_cntr(icsbp); 2138 put_cpu(); 2139 2140 /* need to hold superblock incase we need 2141 * to disable a counter */ 2142 if (!(flags & XFS_ICSB_SB_LOCKED)) { 2143 s = XFS_SB_LOCK(mp); 2144 locked = 1; 2145 flags |= XFS_ICSB_SB_LOCKED; 2146 } 2147 if (!balance_done) { 2148 xfs_icsb_balance_counter(mp, field, flags); 2149 balance_done = 1; 2150 goto again; 2151 } else { 2152 /* 2153 * we might not have enough on this local 2154 * cpu to allocate for a bulk request. 2155 * We need to drain this field from all CPUs 2156 * and disable the counter fastpath 2157 */ 2158 xfs_icsb_disable_counter(mp, field); 2159 } 2160 2161 ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); 2162 2163 if (locked) 2164 XFS_SB_UNLOCK(mp, s); 2165 return ret; 2166 } 2167 2168 STATIC int 2169 xfs_icsb_modify_counters( 2170 xfs_mount_t *mp, 2171 xfs_sb_field_t field, 2172 int delta, 2173 int rsvd) 2174 { 2175 return xfs_icsb_modify_counters_int(mp, field, delta, rsvd, 0); 2176 } 2177 2178 /* 2179 * Called when superblock is already locked 2180 */ 2181 STATIC int 2182 xfs_icsb_modify_counters_locked( 2183 xfs_mount_t *mp, 2184 xfs_sb_field_t field, 2185 int delta, 2186 int rsvd) 2187 { 2188 return xfs_icsb_modify_counters_int(mp, field, delta, 2189 rsvd, XFS_ICSB_SB_LOCKED); 2190 } 2191 #endif 2192