1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_types.h" 21 #include "xfs_bit.h" 22 #include "xfs_inum.h" 23 #include "xfs_log.h" 24 #include "xfs_trans.h" 25 #include "xfs_sb.h" 26 #include "xfs_ag.h" 27 #include "xfs_mount.h" 28 #include "xfs_bmap_btree.h" 29 #include "xfs_alloc_btree.h" 30 #include "xfs_ialloc_btree.h" 31 #include "xfs_dinode.h" 32 #include "xfs_inode.h" 33 #include "xfs_inode_item.h" 34 #include "xfs_btree.h" 35 #include "xfs_error.h" 36 #include "xfs_alloc.h" 37 #include "xfs_ialloc.h" 38 #include "xfs_fsops.h" 39 #include "xfs_itable.h" 40 #include "xfs_trans_space.h" 41 #include "xfs_rtalloc.h" 42 #include "xfs_rw.h" 43 #include "xfs_filestream.h" 44 #include "xfs_trace.h" 45 46 /* 47 * File system operations 48 */ 49 50 int 51 xfs_fs_geometry( 52 xfs_mount_t *mp, 53 xfs_fsop_geom_t *geo, 54 int new_version) 55 { 56 geo->blocksize = mp->m_sb.sb_blocksize; 57 geo->rtextsize = mp->m_sb.sb_rextsize; 58 geo->agblocks = mp->m_sb.sb_agblocks; 59 geo->agcount = mp->m_sb.sb_agcount; 60 geo->logblocks = mp->m_sb.sb_logblocks; 61 geo->sectsize = mp->m_sb.sb_sectsize; 62 geo->inodesize = mp->m_sb.sb_inodesize; 63 geo->imaxpct = mp->m_sb.sb_imax_pct; 64 geo->datablocks = mp->m_sb.sb_dblocks; 65 geo->rtblocks = mp->m_sb.sb_rblocks; 66 geo->rtextents = mp->m_sb.sb_rextents; 67 geo->logstart = mp->m_sb.sb_logstart; 68 ASSERT(sizeof(geo->uuid)==sizeof(mp->m_sb.sb_uuid)); 69 memcpy(geo->uuid, &mp->m_sb.sb_uuid, sizeof(mp->m_sb.sb_uuid)); 70 if (new_version >= 2) { 71 geo->sunit = mp->m_sb.sb_unit; 72 geo->swidth = mp->m_sb.sb_width; 73 } 74 if (new_version >= 3) { 75 geo->version = XFS_FSOP_GEOM_VERSION; 76 geo->flags = 77 (xfs_sb_version_hasattr(&mp->m_sb) ? 78 XFS_FSOP_GEOM_FLAGS_ATTR : 0) | 79 (xfs_sb_version_hasnlink(&mp->m_sb) ? 80 XFS_FSOP_GEOM_FLAGS_NLINK : 0) | 81 (xfs_sb_version_hasquota(&mp->m_sb) ? 82 XFS_FSOP_GEOM_FLAGS_QUOTA : 0) | 83 (xfs_sb_version_hasalign(&mp->m_sb) ? 84 XFS_FSOP_GEOM_FLAGS_IALIGN : 0) | 85 (xfs_sb_version_hasdalign(&mp->m_sb) ? 86 XFS_FSOP_GEOM_FLAGS_DALIGN : 0) | 87 (xfs_sb_version_hasshared(&mp->m_sb) ? 88 XFS_FSOP_GEOM_FLAGS_SHARED : 0) | 89 (xfs_sb_version_hasextflgbit(&mp->m_sb) ? 90 XFS_FSOP_GEOM_FLAGS_EXTFLG : 0) | 91 (xfs_sb_version_hasdirv2(&mp->m_sb) ? 92 XFS_FSOP_GEOM_FLAGS_DIRV2 : 0) | 93 (xfs_sb_version_hassector(&mp->m_sb) ? 94 XFS_FSOP_GEOM_FLAGS_SECTOR : 0) | 95 (xfs_sb_version_hasasciici(&mp->m_sb) ? 96 XFS_FSOP_GEOM_FLAGS_DIRV2CI : 0) | 97 (xfs_sb_version_haslazysbcount(&mp->m_sb) ? 98 XFS_FSOP_GEOM_FLAGS_LAZYSB : 0) | 99 (xfs_sb_version_hasattr2(&mp->m_sb) ? 100 XFS_FSOP_GEOM_FLAGS_ATTR2 : 0); 101 geo->logsectsize = xfs_sb_version_hassector(&mp->m_sb) ? 102 mp->m_sb.sb_logsectsize : BBSIZE; 103 geo->rtsectsize = mp->m_sb.sb_blocksize; 104 geo->dirblocksize = mp->m_dirblksize; 105 } 106 if (new_version >= 4) { 107 geo->flags |= 108 (xfs_sb_version_haslogv2(&mp->m_sb) ? 109 XFS_FSOP_GEOM_FLAGS_LOGV2 : 0); 110 geo->logsunit = mp->m_sb.sb_logsunit; 111 } 112 return 0; 113 } 114 115 static int 116 xfs_growfs_data_private( 117 xfs_mount_t *mp, /* mount point for filesystem */ 118 xfs_growfs_data_t *in) /* growfs data input struct */ 119 { 120 xfs_agf_t *agf; 121 xfs_agi_t *agi; 122 xfs_agnumber_t agno; 123 xfs_extlen_t agsize; 124 xfs_extlen_t tmpsize; 125 xfs_alloc_rec_t *arec; 126 struct xfs_btree_block *block; 127 xfs_buf_t *bp; 128 int bucket; 129 int dpct; 130 int error; 131 xfs_agnumber_t nagcount; 132 xfs_agnumber_t nagimax = 0; 133 xfs_rfsblock_t nb, nb_mod; 134 xfs_rfsblock_t new; 135 xfs_rfsblock_t nfree; 136 xfs_agnumber_t oagcount; 137 int pct; 138 xfs_trans_t *tp; 139 140 nb = in->newblocks; 141 pct = in->imaxpct; 142 if (nb < mp->m_sb.sb_dblocks || pct < 0 || pct > 100) 143 return XFS_ERROR(EINVAL); 144 if ((error = xfs_sb_validate_fsb_count(&mp->m_sb, nb))) 145 return error; 146 dpct = pct - mp->m_sb.sb_imax_pct; 147 bp = xfs_buf_read_uncached(mp, mp->m_ddev_targp, 148 XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1), 149 BBTOB(XFS_FSS_TO_BB(mp, 1)), 0); 150 if (!bp) 151 return EIO; 152 xfs_buf_relse(bp); 153 154 new = nb; /* use new as a temporary here */ 155 nb_mod = do_div(new, mp->m_sb.sb_agblocks); 156 nagcount = new + (nb_mod != 0); 157 if (nb_mod && nb_mod < XFS_MIN_AG_BLOCKS) { 158 nagcount--; 159 nb = (xfs_rfsblock_t)nagcount * mp->m_sb.sb_agblocks; 160 if (nb < mp->m_sb.sb_dblocks) 161 return XFS_ERROR(EINVAL); 162 } 163 new = nb - mp->m_sb.sb_dblocks; 164 oagcount = mp->m_sb.sb_agcount; 165 166 /* allocate the new per-ag structures */ 167 if (nagcount > oagcount) { 168 error = xfs_initialize_perag(mp, nagcount, &nagimax); 169 if (error) 170 return error; 171 } 172 173 tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFS); 174 tp->t_flags |= XFS_TRANS_RESERVE; 175 if ((error = xfs_trans_reserve(tp, XFS_GROWFS_SPACE_RES(mp), 176 XFS_GROWDATA_LOG_RES(mp), 0, 0, 0))) { 177 xfs_trans_cancel(tp, 0); 178 return error; 179 } 180 181 /* 182 * Write new AG headers to disk. Non-transactional, but written 183 * synchronously so they are completed prior to the growfs transaction 184 * being logged. 185 */ 186 nfree = 0; 187 for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) { 188 /* 189 * AG freelist header block 190 */ 191 bp = xfs_buf_get(mp->m_ddev_targp, 192 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)), 193 XFS_FSS_TO_BB(mp, 1), XBF_LOCK | XBF_MAPPED); 194 agf = XFS_BUF_TO_AGF(bp); 195 memset(agf, 0, mp->m_sb.sb_sectsize); 196 agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC); 197 agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION); 198 agf->agf_seqno = cpu_to_be32(agno); 199 if (agno == nagcount - 1) 200 agsize = 201 nb - 202 (agno * (xfs_rfsblock_t)mp->m_sb.sb_agblocks); 203 else 204 agsize = mp->m_sb.sb_agblocks; 205 agf->agf_length = cpu_to_be32(agsize); 206 agf->agf_roots[XFS_BTNUM_BNOi] = cpu_to_be32(XFS_BNO_BLOCK(mp)); 207 agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp)); 208 agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1); 209 agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1); 210 agf->agf_flfirst = 0; 211 agf->agf_fllast = cpu_to_be32(XFS_AGFL_SIZE(mp) - 1); 212 agf->agf_flcount = 0; 213 tmpsize = agsize - XFS_PREALLOC_BLOCKS(mp); 214 agf->agf_freeblks = cpu_to_be32(tmpsize); 215 agf->agf_longest = cpu_to_be32(tmpsize); 216 error = xfs_bwrite(mp, bp); 217 if (error) { 218 goto error0; 219 } 220 /* 221 * AG inode header block 222 */ 223 bp = xfs_buf_get(mp->m_ddev_targp, 224 XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)), 225 XFS_FSS_TO_BB(mp, 1), XBF_LOCK | XBF_MAPPED); 226 agi = XFS_BUF_TO_AGI(bp); 227 memset(agi, 0, mp->m_sb.sb_sectsize); 228 agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC); 229 agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION); 230 agi->agi_seqno = cpu_to_be32(agno); 231 agi->agi_length = cpu_to_be32(agsize); 232 agi->agi_count = 0; 233 agi->agi_root = cpu_to_be32(XFS_IBT_BLOCK(mp)); 234 agi->agi_level = cpu_to_be32(1); 235 agi->agi_freecount = 0; 236 agi->agi_newino = cpu_to_be32(NULLAGINO); 237 agi->agi_dirino = cpu_to_be32(NULLAGINO); 238 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) 239 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); 240 error = xfs_bwrite(mp, bp); 241 if (error) { 242 goto error0; 243 } 244 /* 245 * BNO btree root block 246 */ 247 bp = xfs_buf_get(mp->m_ddev_targp, 248 XFS_AGB_TO_DADDR(mp, agno, XFS_BNO_BLOCK(mp)), 249 BTOBB(mp->m_sb.sb_blocksize), 250 XBF_LOCK | XBF_MAPPED); 251 block = XFS_BUF_TO_BLOCK(bp); 252 memset(block, 0, mp->m_sb.sb_blocksize); 253 block->bb_magic = cpu_to_be32(XFS_ABTB_MAGIC); 254 block->bb_level = 0; 255 block->bb_numrecs = cpu_to_be16(1); 256 block->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK); 257 block->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK); 258 arec = XFS_ALLOC_REC_ADDR(mp, block, 1); 259 arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp)); 260 arec->ar_blockcount = cpu_to_be32( 261 agsize - be32_to_cpu(arec->ar_startblock)); 262 error = xfs_bwrite(mp, bp); 263 if (error) { 264 goto error0; 265 } 266 /* 267 * CNT btree root block 268 */ 269 bp = xfs_buf_get(mp->m_ddev_targp, 270 XFS_AGB_TO_DADDR(mp, agno, XFS_CNT_BLOCK(mp)), 271 BTOBB(mp->m_sb.sb_blocksize), 272 XBF_LOCK | XBF_MAPPED); 273 block = XFS_BUF_TO_BLOCK(bp); 274 memset(block, 0, mp->m_sb.sb_blocksize); 275 block->bb_magic = cpu_to_be32(XFS_ABTC_MAGIC); 276 block->bb_level = 0; 277 block->bb_numrecs = cpu_to_be16(1); 278 block->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK); 279 block->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK); 280 arec = XFS_ALLOC_REC_ADDR(mp, block, 1); 281 arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp)); 282 arec->ar_blockcount = cpu_to_be32( 283 agsize - be32_to_cpu(arec->ar_startblock)); 284 nfree += be32_to_cpu(arec->ar_blockcount); 285 error = xfs_bwrite(mp, bp); 286 if (error) { 287 goto error0; 288 } 289 /* 290 * INO btree root block 291 */ 292 bp = xfs_buf_get(mp->m_ddev_targp, 293 XFS_AGB_TO_DADDR(mp, agno, XFS_IBT_BLOCK(mp)), 294 BTOBB(mp->m_sb.sb_blocksize), 295 XBF_LOCK | XBF_MAPPED); 296 block = XFS_BUF_TO_BLOCK(bp); 297 memset(block, 0, mp->m_sb.sb_blocksize); 298 block->bb_magic = cpu_to_be32(XFS_IBT_MAGIC); 299 block->bb_level = 0; 300 block->bb_numrecs = 0; 301 block->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK); 302 block->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK); 303 error = xfs_bwrite(mp, bp); 304 if (error) { 305 goto error0; 306 } 307 } 308 xfs_trans_agblocks_delta(tp, nfree); 309 /* 310 * There are new blocks in the old last a.g. 311 */ 312 if (new) { 313 /* 314 * Change the agi length. 315 */ 316 error = xfs_ialloc_read_agi(mp, tp, agno, &bp); 317 if (error) { 318 goto error0; 319 } 320 ASSERT(bp); 321 agi = XFS_BUF_TO_AGI(bp); 322 be32_add_cpu(&agi->agi_length, new); 323 ASSERT(nagcount == oagcount || 324 be32_to_cpu(agi->agi_length) == mp->m_sb.sb_agblocks); 325 xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH); 326 /* 327 * Change agf length. 328 */ 329 error = xfs_alloc_read_agf(mp, tp, agno, 0, &bp); 330 if (error) { 331 goto error0; 332 } 333 ASSERT(bp); 334 agf = XFS_BUF_TO_AGF(bp); 335 be32_add_cpu(&agf->agf_length, new); 336 ASSERT(be32_to_cpu(agf->agf_length) == 337 be32_to_cpu(agi->agi_length)); 338 339 xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH); 340 /* 341 * Free the new space. 342 */ 343 error = xfs_free_extent(tp, XFS_AGB_TO_FSB(mp, agno, 344 be32_to_cpu(agf->agf_length) - new), new); 345 if (error) { 346 goto error0; 347 } 348 } 349 350 /* 351 * Update changed superblock fields transactionally. These are not 352 * seen by the rest of the world until the transaction commit applies 353 * them atomically to the superblock. 354 */ 355 if (nagcount > oagcount) 356 xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount); 357 if (nb > mp->m_sb.sb_dblocks) 358 xfs_trans_mod_sb(tp, XFS_TRANS_SB_DBLOCKS, 359 nb - mp->m_sb.sb_dblocks); 360 if (nfree) 361 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, nfree); 362 if (dpct) 363 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct); 364 error = xfs_trans_commit(tp, 0); 365 if (error) 366 return error; 367 368 /* New allocation groups fully initialized, so update mount struct */ 369 if (nagimax) 370 mp->m_maxagi = nagimax; 371 if (mp->m_sb.sb_imax_pct) { 372 __uint64_t icount = mp->m_sb.sb_dblocks * mp->m_sb.sb_imax_pct; 373 do_div(icount, 100); 374 mp->m_maxicount = icount << mp->m_sb.sb_inopblog; 375 } else 376 mp->m_maxicount = 0; 377 378 /* update secondary superblocks. */ 379 for (agno = 1; agno < nagcount; agno++) { 380 error = xfs_read_buf(mp, mp->m_ddev_targp, 381 XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)), 382 XFS_FSS_TO_BB(mp, 1), 0, &bp); 383 if (error) { 384 xfs_fs_cmn_err(CE_WARN, mp, 385 "error %d reading secondary superblock for ag %d", 386 error, agno); 387 break; 388 } 389 xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, XFS_SB_ALL_BITS); 390 /* 391 * If we get an error writing out the alternate superblocks, 392 * just issue a warning and continue. The real work is 393 * already done and committed. 394 */ 395 if (!(error = xfs_bwrite(mp, bp))) { 396 continue; 397 } else { 398 xfs_fs_cmn_err(CE_WARN, mp, 399 "write error %d updating secondary superblock for ag %d", 400 error, agno); 401 break; /* no point in continuing */ 402 } 403 } 404 return 0; 405 406 error0: 407 xfs_trans_cancel(tp, XFS_TRANS_ABORT); 408 return error; 409 } 410 411 static int 412 xfs_growfs_log_private( 413 xfs_mount_t *mp, /* mount point for filesystem */ 414 xfs_growfs_log_t *in) /* growfs log input struct */ 415 { 416 xfs_extlen_t nb; 417 418 nb = in->newblocks; 419 if (nb < XFS_MIN_LOG_BLOCKS || nb < XFS_B_TO_FSB(mp, XFS_MIN_LOG_BYTES)) 420 return XFS_ERROR(EINVAL); 421 if (nb == mp->m_sb.sb_logblocks && 422 in->isint == (mp->m_sb.sb_logstart != 0)) 423 return XFS_ERROR(EINVAL); 424 /* 425 * Moving the log is hard, need new interfaces to sync 426 * the log first, hold off all activity while moving it. 427 * Can have shorter or longer log in the same space, 428 * or transform internal to external log or vice versa. 429 */ 430 return XFS_ERROR(ENOSYS); 431 } 432 433 /* 434 * protected versions of growfs function acquire and release locks on the mount 435 * point - exported through ioctls: XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG, 436 * XFS_IOC_FSGROWFSRT 437 */ 438 439 440 int 441 xfs_growfs_data( 442 xfs_mount_t *mp, 443 xfs_growfs_data_t *in) 444 { 445 int error; 446 447 if (!capable(CAP_SYS_ADMIN)) 448 return XFS_ERROR(EPERM); 449 if (!mutex_trylock(&mp->m_growlock)) 450 return XFS_ERROR(EWOULDBLOCK); 451 error = xfs_growfs_data_private(mp, in); 452 mutex_unlock(&mp->m_growlock); 453 return error; 454 } 455 456 int 457 xfs_growfs_log( 458 xfs_mount_t *mp, 459 xfs_growfs_log_t *in) 460 { 461 int error; 462 463 if (!capable(CAP_SYS_ADMIN)) 464 return XFS_ERROR(EPERM); 465 if (!mutex_trylock(&mp->m_growlock)) 466 return XFS_ERROR(EWOULDBLOCK); 467 error = xfs_growfs_log_private(mp, in); 468 mutex_unlock(&mp->m_growlock); 469 return error; 470 } 471 472 /* 473 * exported through ioctl XFS_IOC_FSCOUNTS 474 */ 475 476 int 477 xfs_fs_counts( 478 xfs_mount_t *mp, 479 xfs_fsop_counts_t *cnt) 480 { 481 xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT); 482 spin_lock(&mp->m_sb_lock); 483 cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); 484 cnt->freertx = mp->m_sb.sb_frextents; 485 cnt->freeino = mp->m_sb.sb_ifree; 486 cnt->allocino = mp->m_sb.sb_icount; 487 spin_unlock(&mp->m_sb_lock); 488 return 0; 489 } 490 491 /* 492 * exported through ioctl XFS_IOC_SET_RESBLKS & XFS_IOC_GET_RESBLKS 493 * 494 * xfs_reserve_blocks is called to set m_resblks 495 * in the in-core mount table. The number of unused reserved blocks 496 * is kept in m_resblks_avail. 497 * 498 * Reserve the requested number of blocks if available. Otherwise return 499 * as many as possible to satisfy the request. The actual number 500 * reserved are returned in outval 501 * 502 * A null inval pointer indicates that only the current reserved blocks 503 * available should be returned no settings are changed. 504 */ 505 506 int 507 xfs_reserve_blocks( 508 xfs_mount_t *mp, 509 __uint64_t *inval, 510 xfs_fsop_resblks_t *outval) 511 { 512 __int64_t lcounter, delta, fdblks_delta; 513 __uint64_t request; 514 515 /* If inval is null, report current values and return */ 516 if (inval == (__uint64_t *)NULL) { 517 if (!outval) 518 return EINVAL; 519 outval->resblks = mp->m_resblks; 520 outval->resblks_avail = mp->m_resblks_avail; 521 return 0; 522 } 523 524 request = *inval; 525 526 /* 527 * With per-cpu counters, this becomes an interesting 528 * problem. we needto work out if we are freeing or allocation 529 * blocks first, then we can do the modification as necessary. 530 * 531 * We do this under the m_sb_lock so that if we are near 532 * ENOSPC, we will hold out any changes while we work out 533 * what to do. This means that the amount of free space can 534 * change while we do this, so we need to retry if we end up 535 * trying to reserve more space than is available. 536 * 537 * We also use the xfs_mod_incore_sb() interface so that we 538 * don't have to care about whether per cpu counter are 539 * enabled, disabled or even compiled in.... 540 */ 541 retry: 542 spin_lock(&mp->m_sb_lock); 543 xfs_icsb_sync_counters_locked(mp, 0); 544 545 /* 546 * If our previous reservation was larger than the current value, 547 * then move any unused blocks back to the free pool. 548 */ 549 fdblks_delta = 0; 550 if (mp->m_resblks > request) { 551 lcounter = mp->m_resblks_avail - request; 552 if (lcounter > 0) { /* release unused blocks */ 553 fdblks_delta = lcounter; 554 mp->m_resblks_avail -= lcounter; 555 } 556 mp->m_resblks = request; 557 } else { 558 __int64_t free; 559 560 free = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); 561 if (!free) 562 goto out; /* ENOSPC and fdblks_delta = 0 */ 563 564 delta = request - mp->m_resblks; 565 lcounter = free - delta; 566 if (lcounter < 0) { 567 /* We can't satisfy the request, just get what we can */ 568 mp->m_resblks += free; 569 mp->m_resblks_avail += free; 570 fdblks_delta = -free; 571 } else { 572 fdblks_delta = -delta; 573 mp->m_resblks = request; 574 mp->m_resblks_avail += delta; 575 } 576 } 577 out: 578 if (outval) { 579 outval->resblks = mp->m_resblks; 580 outval->resblks_avail = mp->m_resblks_avail; 581 } 582 spin_unlock(&mp->m_sb_lock); 583 584 if (fdblks_delta) { 585 /* 586 * If we are putting blocks back here, m_resblks_avail is 587 * already at its max so this will put it in the free pool. 588 * 589 * If we need space, we'll either succeed in getting it 590 * from the free block count or we'll get an enospc. If 591 * we get a ENOSPC, it means things changed while we were 592 * calculating fdblks_delta and so we should try again to 593 * see if there is anything left to reserve. 594 * 595 * Don't set the reserved flag here - we don't want to reserve 596 * the extra reserve blocks from the reserve..... 597 */ 598 int error; 599 error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, 600 fdblks_delta, 0); 601 if (error == ENOSPC) 602 goto retry; 603 } 604 return 0; 605 } 606 607 /* 608 * Dump a transaction into the log that contains no real change. This is needed 609 * to be able to make the log dirty or stamp the current tail LSN into the log 610 * during the covering operation. 611 * 612 * We cannot use an inode here for this - that will push dirty state back up 613 * into the VFS and then periodic inode flushing will prevent log covering from 614 * making progress. Hence we log a field in the superblock instead. 615 */ 616 int 617 xfs_fs_log_dummy( 618 xfs_mount_t *mp, 619 int flags) 620 { 621 xfs_trans_t *tp; 622 int error; 623 624 tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP); 625 error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0, 626 XFS_DEFAULT_LOG_COUNT); 627 if (error) { 628 xfs_trans_cancel(tp, 0); 629 return error; 630 } 631 632 /* log the UUID because it is an unchanging field */ 633 xfs_mod_sb(tp, XFS_SB_UUID); 634 if (flags & SYNC_WAIT) 635 xfs_trans_set_sync(tp); 636 return xfs_trans_commit(tp, 0); 637 } 638 639 int 640 xfs_fs_goingdown( 641 xfs_mount_t *mp, 642 __uint32_t inflags) 643 { 644 switch (inflags) { 645 case XFS_FSOP_GOING_FLAGS_DEFAULT: { 646 struct super_block *sb = freeze_bdev(mp->m_super->s_bdev); 647 648 if (sb && !IS_ERR(sb)) { 649 xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT); 650 thaw_bdev(sb->s_bdev, sb); 651 } 652 653 break; 654 } 655 case XFS_FSOP_GOING_FLAGS_LOGFLUSH: 656 xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT); 657 break; 658 case XFS_FSOP_GOING_FLAGS_NOLOGFLUSH: 659 xfs_force_shutdown(mp, 660 SHUTDOWN_FORCE_UMOUNT | SHUTDOWN_LOG_IO_ERROR); 661 break; 662 default: 663 return XFS_ERROR(EINVAL); 664 } 665 666 return 0; 667 } 668