1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. 4 * Copyright (C) 2010 Red Hat, Inc. 5 * All Rights Reserved. 6 */ 7 #include "xfs.h" 8 #include "xfs_fs.h" 9 #include "xfs_shared.h" 10 #include "xfs_format.h" 11 #include "xfs_log_format.h" 12 #include "xfs_trans_resv.h" 13 #include "xfs_mount.h" 14 #include "xfs_extent_busy.h" 15 #include "xfs_quota.h" 16 #include "xfs_trans.h" 17 #include "xfs_trans_priv.h" 18 #include "xfs_log.h" 19 #include "xfs_log_priv.h" 20 #include "xfs_trace.h" 21 #include "xfs_error.h" 22 #include "xfs_defer.h" 23 #include "xfs_inode.h" 24 #include "xfs_dquot_item.h" 25 #include "xfs_dquot.h" 26 #include "xfs_icache.h" 27 #include "xfs_rtbitmap.h" 28 #include "xfs_rtgroup.h" 29 #include "xfs_sb.h" 30 31 struct kmem_cache *xfs_trans_cache; 32 33 #if defined(CONFIG_TRACEPOINTS) 34 static void 35 xfs_trans_trace_reservations( 36 struct xfs_mount *mp) 37 { 38 struct xfs_trans_res *res; 39 struct xfs_trans_res *end_res; 40 int i; 41 42 res = (struct xfs_trans_res *)M_RES(mp); 43 end_res = (struct xfs_trans_res *)(M_RES(mp) + 1); 44 for (i = 0; res < end_res; i++, res++) 45 trace_xfs_trans_resv_calc(mp, i, res); 46 } 47 #else 48 # define xfs_trans_trace_reservations(mp) 49 #endif 50 51 /* 52 * Initialize the precomputed transaction reservation values 53 * in the mount structure. 54 */ 55 void 56 xfs_trans_init( 57 struct xfs_mount *mp) 58 { 59 xfs_trans_resv_calc(mp, M_RES(mp)); 60 xfs_trans_trace_reservations(mp); 61 } 62 63 /* 64 * Free the transaction structure. If there is more clean up 65 * to do when the structure is freed, add it here. 66 */ 67 STATIC void 68 xfs_trans_free( 69 struct xfs_trans *tp) 70 { 71 xfs_extent_busy_sort(&tp->t_busy); 72 xfs_extent_busy_clear(&tp->t_busy, false); 73 74 trace_xfs_trans_free(tp, _RET_IP_); 75 xfs_trans_clear_context(tp); 76 if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT)) 77 sb_end_intwrite(tp->t_mountp->m_super); 78 xfs_trans_free_dqinfo(tp); 79 kmem_cache_free(xfs_trans_cache, tp); 80 } 81 82 /* 83 * This is called to create a new transaction which will share the 84 * permanent log reservation of the given transaction. The remaining 85 * unused block and rt extent reservations are also inherited. This 86 * implies that the original transaction is no longer allowed to allocate 87 * blocks. Locks and log items, however, are no inherited. They must 88 * be added to the new transaction explicitly. 89 */ 90 STATIC struct xfs_trans * 91 xfs_trans_dup( 92 struct xfs_trans *tp) 93 { 94 struct xfs_trans *ntp; 95 96 trace_xfs_trans_dup(tp, _RET_IP_); 97 98 ntp = kmem_cache_zalloc(xfs_trans_cache, GFP_KERNEL | __GFP_NOFAIL); 99 100 /* 101 * Initialize the new transaction structure. 102 */ 103 ntp->t_mountp = tp->t_mountp; 104 INIT_LIST_HEAD(&ntp->t_items); 105 INIT_LIST_HEAD(&ntp->t_busy); 106 INIT_LIST_HEAD(&ntp->t_dfops); 107 ntp->t_highest_agno = NULLAGNUMBER; 108 109 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 110 ASSERT(tp->t_ticket != NULL); 111 112 ntp->t_flags = XFS_TRANS_PERM_LOG_RES | 113 (tp->t_flags & XFS_TRANS_RESERVE) | 114 (tp->t_flags & XFS_TRANS_NO_WRITECOUNT) | 115 (tp->t_flags & XFS_TRANS_RES_FDBLKS); 116 /* We gave our writer reference to the new transaction */ 117 tp->t_flags |= XFS_TRANS_NO_WRITECOUNT; 118 ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket); 119 120 ASSERT(tp->t_blk_res >= tp->t_blk_res_used); 121 ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used; 122 tp->t_blk_res = tp->t_blk_res_used; 123 124 ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used; 125 tp->t_rtx_res = tp->t_rtx_res_used; 126 127 xfs_trans_switch_context(tp, ntp); 128 129 /* move deferred ops over to the new tp */ 130 xfs_defer_move(ntp, tp); 131 132 xfs_trans_dup_dqinfo(tp, ntp); 133 return ntp; 134 } 135 136 /* 137 * This is called to reserve free disk blocks and log space for the given 138 * transaction before allocating any resources within the transaction. 139 * 140 * This will return ENOSPC if there are not enough blocks available. 141 * It will sleep waiting for available log space. 142 * 143 * This does not do quota reservations. That typically is done by the caller 144 * afterwards. 145 */ 146 static int 147 xfs_trans_reserve( 148 struct xfs_trans *tp, 149 struct xfs_trans_res *resp, 150 uint blocks, 151 uint rtextents) 152 { 153 struct xfs_mount *mp = tp->t_mountp; 154 int error = 0; 155 bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; 156 157 ASSERT(resp->tr_logres > 0); 158 159 /* 160 * Attempt to reserve the needed disk blocks by decrementing the number 161 * needed from the number available. This will fail if the count would 162 * go below zero. 163 */ 164 if (blocks > 0) { 165 error = xfs_dec_fdblocks(mp, blocks, rsvd); 166 if (error != 0) 167 return -ENOSPC; 168 tp->t_blk_res += blocks; 169 } 170 171 /* 172 * Reserve the log space needed for this transaction. 173 */ 174 if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) 175 tp->t_flags |= XFS_TRANS_PERM_LOG_RES; 176 error = xfs_log_reserve(mp, resp->tr_logres, resp->tr_logcount, 177 &tp->t_ticket, (tp->t_flags & XFS_TRANS_PERM_LOG_RES)); 178 if (error) 179 goto undo_blocks; 180 181 tp->t_log_res = resp->tr_logres; 182 tp->t_log_count = resp->tr_logcount; 183 184 /* 185 * Attempt to reserve the needed realtime extents by decrementing the 186 * number needed from the number available. This will fail if the 187 * count would go below zero. 188 */ 189 if (rtextents > 0) { 190 error = xfs_dec_frextents(mp, rtextents); 191 if (error) { 192 error = -ENOSPC; 193 goto undo_log; 194 } 195 tp->t_rtx_res += rtextents; 196 } 197 198 return 0; 199 200 undo_log: 201 xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket); 202 tp->t_ticket = NULL; 203 tp->t_log_res = 0; 204 tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES; 205 undo_blocks: 206 if (blocks > 0) { 207 xfs_add_fdblocks(mp, blocks); 208 tp->t_blk_res = 0; 209 } 210 return error; 211 } 212 213 static struct xfs_trans * 214 __xfs_trans_alloc( 215 struct xfs_mount *mp, 216 uint flags) 217 { 218 struct xfs_trans *tp; 219 220 ASSERT(!(flags & XFS_TRANS_RES_FDBLKS) || xfs_has_lazysbcount(mp)); 221 222 tp = kmem_cache_zalloc(xfs_trans_cache, GFP_KERNEL | __GFP_NOFAIL); 223 if (!(flags & XFS_TRANS_NO_WRITECOUNT)) 224 sb_start_intwrite(mp->m_super); 225 xfs_trans_set_context(tp); 226 tp->t_flags = flags; 227 tp->t_mountp = mp; 228 INIT_LIST_HEAD(&tp->t_items); 229 INIT_LIST_HEAD(&tp->t_busy); 230 INIT_LIST_HEAD(&tp->t_dfops); 231 tp->t_highest_agno = NULLAGNUMBER; 232 return tp; 233 } 234 235 int 236 xfs_trans_alloc( 237 struct xfs_mount *mp, 238 struct xfs_trans_res *resp, 239 uint blocks, 240 uint rtextents, 241 uint flags, 242 struct xfs_trans **tpp) 243 { 244 struct xfs_trans *tp; 245 bool want_retry = true; 246 int error; 247 248 ASSERT(resp->tr_logres > 0); 249 250 /* 251 * Allocate the handle before we do our freeze accounting and setting up 252 * GFP_NOFS allocation context so that we avoid lockdep false positives 253 * by doing GFP_KERNEL allocations inside sb_start_intwrite(). 254 */ 255 retry: 256 WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE); 257 tp = __xfs_trans_alloc(mp, flags); 258 error = xfs_trans_reserve(tp, resp, blocks, rtextents); 259 if (error == -ENOSPC && want_retry) { 260 xfs_trans_cancel(tp); 261 262 /* 263 * We weren't able to reserve enough space for the transaction. 264 * Flush the other speculative space allocations to free space. 265 * Do not perform a synchronous scan because callers can hold 266 * other locks. 267 */ 268 error = xfs_blockgc_flush_all(mp); 269 if (error) 270 return error; 271 want_retry = false; 272 goto retry; 273 } 274 if (error) { 275 xfs_trans_cancel(tp); 276 return error; 277 } 278 279 trace_xfs_trans_alloc(tp, _RET_IP_); 280 281 *tpp = tp; 282 return 0; 283 } 284 285 /* 286 * Create an empty transaction with no reservation. This is a defensive 287 * mechanism for routines that query metadata without actually modifying them -- 288 * if the metadata being queried is somehow cross-linked (think a btree block 289 * pointer that points higher in the tree), we risk deadlock. However, blocks 290 * grabbed as part of a transaction can be re-grabbed. The verifiers will 291 * notice the corrupt block and the operation will fail back to userspace 292 * without deadlocking. 293 * 294 * Note the zero-length reservation; this transaction MUST be cancelled without 295 * any dirty data. 296 * 297 * Callers should obtain freeze protection to avoid a conflict with fs freezing 298 * where we can be grabbing buffers at the same time that freeze is trying to 299 * drain the buffer LRU list. 300 */ 301 struct xfs_trans * 302 xfs_trans_alloc_empty( 303 struct xfs_mount *mp) 304 { 305 return __xfs_trans_alloc(mp, XFS_TRANS_NO_WRITECOUNT); 306 } 307 308 /* 309 * Record the indicated change to the given field for application 310 * to the file system's superblock when the transaction commits. 311 * For now, just store the change in the transaction structure. 312 * 313 * Mark the transaction structure to indicate that the superblock 314 * needs to be updated before committing. 315 * 316 * Because we may not be keeping track of allocated/free inodes and 317 * used filesystem blocks in the superblock, we do not mark the 318 * superblock dirty in this transaction if we modify these fields. 319 * We still need to update the transaction deltas so that they get 320 * applied to the incore superblock, but we don't want them to 321 * cause the superblock to get locked and logged if these are the 322 * only fields in the superblock that the transaction modifies. 323 */ 324 void 325 xfs_trans_mod_sb( 326 xfs_trans_t *tp, 327 uint field, 328 int64_t delta) 329 { 330 uint32_t flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY); 331 xfs_mount_t *mp = tp->t_mountp; 332 333 switch (field) { 334 case XFS_TRANS_SB_ICOUNT: 335 tp->t_icount_delta += delta; 336 if (xfs_has_lazysbcount(mp)) 337 flags &= ~XFS_TRANS_SB_DIRTY; 338 break; 339 case XFS_TRANS_SB_IFREE: 340 tp->t_ifree_delta += delta; 341 if (xfs_has_lazysbcount(mp)) 342 flags &= ~XFS_TRANS_SB_DIRTY; 343 break; 344 case XFS_TRANS_SB_FDBLOCKS: 345 /* 346 * Track the number of blocks allocated in the transaction. 347 * Make sure it does not exceed the number reserved. If so, 348 * shutdown as this can lead to accounting inconsistency. 349 */ 350 if (delta < 0) { 351 tp->t_blk_res_used += (uint)-delta; 352 if (tp->t_blk_res_used > tp->t_blk_res) 353 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 354 } else if (delta > 0 && (tp->t_flags & XFS_TRANS_RES_FDBLKS)) { 355 int64_t blkres_delta; 356 357 /* 358 * Return freed blocks directly to the reservation 359 * instead of the global pool, being careful not to 360 * overflow the trans counter. This is used to preserve 361 * reservation across chains of transaction rolls that 362 * repeatedly free and allocate blocks. 363 */ 364 blkres_delta = min_t(int64_t, delta, 365 UINT_MAX - tp->t_blk_res); 366 tp->t_blk_res += blkres_delta; 367 delta -= blkres_delta; 368 } 369 tp->t_fdblocks_delta += delta; 370 if (xfs_has_lazysbcount(mp)) 371 flags &= ~XFS_TRANS_SB_DIRTY; 372 break; 373 case XFS_TRANS_SB_RES_FDBLOCKS: 374 /* 375 * The allocation has already been applied to the 376 * in-core superblock's counter. This should only 377 * be applied to the on-disk superblock. 378 */ 379 tp->t_res_fdblocks_delta += delta; 380 if (xfs_has_lazysbcount(mp)) 381 flags &= ~XFS_TRANS_SB_DIRTY; 382 break; 383 case XFS_TRANS_SB_FREXTENTS: 384 /* 385 * Track the number of blocks allocated in the 386 * transaction. Make sure it does not exceed the 387 * number reserved. 388 */ 389 if (delta < 0) { 390 tp->t_rtx_res_used += (uint)-delta; 391 ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res); 392 } 393 tp->t_frextents_delta += delta; 394 if (xfs_has_rtgroups(mp)) 395 flags &= ~XFS_TRANS_SB_DIRTY; 396 break; 397 case XFS_TRANS_SB_RES_FREXTENTS: 398 /* 399 * The allocation has already been applied to the 400 * in-core superblock's counter. This should only 401 * be applied to the on-disk superblock. 402 */ 403 ASSERT(delta < 0); 404 tp->t_res_frextents_delta += delta; 405 if (xfs_has_rtgroups(mp)) 406 flags &= ~XFS_TRANS_SB_DIRTY; 407 break; 408 case XFS_TRANS_SB_DBLOCKS: 409 tp->t_dblocks_delta += delta; 410 break; 411 case XFS_TRANS_SB_AGCOUNT: 412 ASSERT(delta > 0); 413 tp->t_agcount_delta += delta; 414 break; 415 case XFS_TRANS_SB_IMAXPCT: 416 tp->t_imaxpct_delta += delta; 417 break; 418 case XFS_TRANS_SB_REXTSIZE: 419 tp->t_rextsize_delta += delta; 420 break; 421 case XFS_TRANS_SB_RBMBLOCKS: 422 tp->t_rbmblocks_delta += delta; 423 break; 424 case XFS_TRANS_SB_RBLOCKS: 425 tp->t_rblocks_delta += delta; 426 break; 427 case XFS_TRANS_SB_REXTENTS: 428 tp->t_rextents_delta += delta; 429 break; 430 case XFS_TRANS_SB_REXTSLOG: 431 tp->t_rextslog_delta += delta; 432 break; 433 case XFS_TRANS_SB_RGCOUNT: 434 ASSERT(delta > 0); 435 tp->t_rgcount_delta += delta; 436 break; 437 default: 438 ASSERT(0); 439 return; 440 } 441 442 tp->t_flags |= flags; 443 } 444 445 /* 446 * xfs_trans_apply_sb_deltas() is called from the commit code 447 * to bring the superblock buffer into the current transaction 448 * and modify it as requested by earlier calls to xfs_trans_mod_sb(). 449 * 450 * For now we just look at each field allowed to change and change 451 * it if necessary. 452 */ 453 STATIC void 454 xfs_trans_apply_sb_deltas( 455 xfs_trans_t *tp) 456 { 457 struct xfs_dsb *sbp; 458 struct xfs_buf *bp; 459 int whole = 0; 460 461 bp = xfs_trans_getsb(tp); 462 sbp = bp->b_addr; 463 464 /* 465 * Only update the superblock counters if we are logging them 466 */ 467 if (!xfs_has_lazysbcount((tp->t_mountp))) { 468 if (tp->t_icount_delta) 469 be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta); 470 if (tp->t_ifree_delta) 471 be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta); 472 if (tp->t_fdblocks_delta) 473 be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta); 474 if (tp->t_res_fdblocks_delta) 475 be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta); 476 } 477 478 /* 479 * sb_frextents was added to the lazy sb counters when the rt groups 480 * feature was introduced. This is possible because we know that all 481 * kernels supporting rtgroups will also recompute frextents from the 482 * realtime bitmap. 483 * 484 * For older file systems, updating frextents requires careful handling 485 * because we cannot rely on log recovery in older kernels to recompute 486 * the value from the rtbitmap. This means that the ondisk frextents 487 * must be consistent with the rtbitmap. 488 * 489 * Therefore, log the frextents change to the ondisk superblock and 490 * update the incore superblock so that future calls to xfs_log_sb 491 * write the correct value ondisk. 492 */ 493 if ((tp->t_frextents_delta || tp->t_res_frextents_delta) && 494 !xfs_has_rtgroups(tp->t_mountp)) { 495 struct xfs_mount *mp = tp->t_mountp; 496 int64_t rtxdelta; 497 498 rtxdelta = tp->t_frextents_delta + tp->t_res_frextents_delta; 499 500 spin_lock(&mp->m_sb_lock); 501 be64_add_cpu(&sbp->sb_frextents, rtxdelta); 502 mp->m_sb.sb_frextents += rtxdelta; 503 spin_unlock(&mp->m_sb_lock); 504 } 505 506 if (tp->t_dblocks_delta) { 507 be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta); 508 whole = 1; 509 } 510 if (tp->t_agcount_delta) { 511 be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta); 512 whole = 1; 513 } 514 if (tp->t_imaxpct_delta) { 515 sbp->sb_imax_pct += tp->t_imaxpct_delta; 516 whole = 1; 517 } 518 if (tp->t_rextsize_delta) { 519 be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta); 520 521 /* 522 * Because the ondisk sb records rtgroup size in units of rt 523 * extents, any time we update the rt extent size we have to 524 * recompute the ondisk rtgroup block log. The incore values 525 * will be recomputed in xfs_trans_unreserve_and_mod_sb. 526 */ 527 if (xfs_has_rtgroups(tp->t_mountp)) { 528 sbp->sb_rgblklog = xfs_compute_rgblklog( 529 be32_to_cpu(sbp->sb_rgextents), 530 be32_to_cpu(sbp->sb_rextsize)); 531 } 532 whole = 1; 533 } 534 if (tp->t_rbmblocks_delta) { 535 be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta); 536 whole = 1; 537 } 538 if (tp->t_rblocks_delta) { 539 be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta); 540 whole = 1; 541 } 542 if (tp->t_rextents_delta) { 543 be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta); 544 whole = 1; 545 } 546 if (tp->t_rextslog_delta) { 547 sbp->sb_rextslog += tp->t_rextslog_delta; 548 whole = 1; 549 } 550 if (tp->t_rgcount_delta) { 551 be32_add_cpu(&sbp->sb_rgcount, tp->t_rgcount_delta); 552 whole = 1; 553 } 554 555 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF); 556 if (whole) 557 /* 558 * Log the whole thing, the fields are noncontiguous. 559 */ 560 xfs_trans_log_buf(tp, bp, 0, sizeof(struct xfs_dsb) - 1); 561 else 562 /* 563 * Since all the modifiable fields are contiguous, we 564 * can get away with this. 565 */ 566 xfs_trans_log_buf(tp, bp, offsetof(struct xfs_dsb, sb_icount), 567 offsetof(struct xfs_dsb, sb_frextents) + 568 sizeof(sbp->sb_frextents) - 1); 569 } 570 571 /* 572 * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations and 573 * apply superblock counter changes to the in-core superblock. The 574 * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT 575 * applied to the in-core superblock. The idea is that that has already been 576 * done. 577 * 578 * If we are not logging superblock counters, then the inode allocated/free and 579 * used block counts are not updated in the on disk superblock. In this case, 580 * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we 581 * still need to update the incore superblock with the changes. 582 * 583 * Deltas for the inode count are +/-64, hence we use a large batch size of 128 584 * so we don't need to take the counter lock on every update. 585 */ 586 #define XFS_ICOUNT_BATCH 128 587 588 void 589 xfs_trans_unreserve_and_mod_sb( 590 struct xfs_trans *tp) 591 { 592 struct xfs_mount *mp = tp->t_mountp; 593 int64_t blkdelta = tp->t_blk_res; 594 int64_t rtxdelta = tp->t_rtx_res; 595 int64_t idelta = 0; 596 int64_t ifreedelta = 0; 597 598 /* 599 * Calculate the deltas. 600 * 601 * t_fdblocks_delta and t_frextents_delta can be positive or negative: 602 * 603 * - positive values indicate blocks freed in the transaction. 604 * - negative values indicate blocks allocated in the transaction 605 * 606 * Negative values can only happen if the transaction has a block 607 * reservation that covers the allocated block. The end result is 608 * that the calculated delta values must always be positive and we 609 * can only put back previous allocated or reserved blocks here. 610 */ 611 ASSERT(tp->t_blk_res || tp->t_fdblocks_delta >= 0); 612 if (xfs_has_lazysbcount(mp) || (tp->t_flags & XFS_TRANS_SB_DIRTY)) { 613 blkdelta += tp->t_fdblocks_delta; 614 ASSERT(blkdelta >= 0); 615 } 616 617 ASSERT(tp->t_rtx_res || tp->t_frextents_delta >= 0); 618 if (xfs_has_rtgroups(mp) || (tp->t_flags & XFS_TRANS_SB_DIRTY)) { 619 rtxdelta += tp->t_frextents_delta; 620 ASSERT(rtxdelta >= 0); 621 } 622 623 if (xfs_has_lazysbcount(mp) || (tp->t_flags & XFS_TRANS_SB_DIRTY)) { 624 idelta = tp->t_icount_delta; 625 ifreedelta = tp->t_ifree_delta; 626 } 627 628 /* apply the per-cpu counters */ 629 if (blkdelta) 630 xfs_add_fdblocks(mp, blkdelta); 631 632 if (idelta) 633 percpu_counter_add_batch(&mp->m_icount, idelta, 634 XFS_ICOUNT_BATCH); 635 636 if (ifreedelta) 637 percpu_counter_add(&mp->m_ifree, ifreedelta); 638 639 if (rtxdelta) 640 xfs_add_frextents(mp, rtxdelta); 641 642 if (!(tp->t_flags & XFS_TRANS_SB_DIRTY)) 643 return; 644 645 /* apply remaining deltas */ 646 spin_lock(&mp->m_sb_lock); 647 mp->m_sb.sb_fdblocks += tp->t_fdblocks_delta + tp->t_res_fdblocks_delta; 648 mp->m_sb.sb_icount += idelta; 649 mp->m_sb.sb_ifree += ifreedelta; 650 /* 651 * Do not touch sb_frextents here because it is handled in 652 * xfs_trans_apply_sb_deltas for file systems where it isn't a lazy 653 * counter anyway. 654 */ 655 mp->m_sb.sb_dblocks += tp->t_dblocks_delta; 656 mp->m_sb.sb_agcount += tp->t_agcount_delta; 657 mp->m_sb.sb_imax_pct += tp->t_imaxpct_delta; 658 if (tp->t_rextsize_delta) 659 xfs_mount_sb_set_rextsize(mp, &mp->m_sb, 660 mp->m_sb.sb_rextsize + tp->t_rextsize_delta); 661 mp->m_sb.sb_rbmblocks += tp->t_rbmblocks_delta; 662 mp->m_sb.sb_rblocks += tp->t_rblocks_delta; 663 mp->m_sb.sb_rextents += tp->t_rextents_delta; 664 mp->m_sb.sb_rextslog += tp->t_rextslog_delta; 665 mp->m_sb.sb_rgcount += tp->t_rgcount_delta; 666 spin_unlock(&mp->m_sb_lock); 667 668 /* 669 * Debug checks outside of the spinlock so they don't lock up the 670 * machine if they fail. 671 */ 672 ASSERT(mp->m_sb.sb_imax_pct >= 0); 673 ASSERT(mp->m_sb.sb_rextslog >= 0); 674 } 675 676 /* Add the given log item to the transaction's list of log items. */ 677 void 678 xfs_trans_add_item( 679 struct xfs_trans *tp, 680 struct xfs_log_item *lip) 681 { 682 ASSERT(lip->li_log == tp->t_mountp->m_log); 683 ASSERT(lip->li_ailp == tp->t_mountp->m_ail); 684 ASSERT(list_empty(&lip->li_trans)); 685 ASSERT(!test_bit(XFS_LI_DIRTY, &lip->li_flags)); 686 687 list_add_tail(&lip->li_trans, &tp->t_items); 688 trace_xfs_trans_add_item(tp, _RET_IP_); 689 } 690 691 /* 692 * Unlink the log item from the transaction. the log item is no longer 693 * considered dirty in this transaction, as the linked transaction has 694 * finished, either by abort or commit completion. 695 */ 696 void 697 xfs_trans_del_item( 698 struct xfs_log_item *lip) 699 { 700 clear_bit(XFS_LI_DIRTY, &lip->li_flags); 701 list_del_init(&lip->li_trans); 702 } 703 704 /* Detach and unlock all of the items in a transaction */ 705 static void 706 xfs_trans_free_items( 707 struct xfs_trans *tp, 708 bool abort) 709 { 710 struct xfs_log_item *lip, *next; 711 712 trace_xfs_trans_free_items(tp, _RET_IP_); 713 714 list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) { 715 xfs_trans_del_item(lip); 716 if (abort) { 717 trace_xfs_trans_free_abort(lip); 718 set_bit(XFS_LI_ABORTED, &lip->li_flags); 719 } 720 if (lip->li_ops->iop_release) 721 lip->li_ops->iop_release(lip); 722 } 723 } 724 725 /* 726 * Sort transaction items prior to running precommit operations. This will 727 * attempt to order the items such that they will always be locked in the same 728 * order. Items that have no sort function are moved to the end of the list 729 * and so are locked last. 730 * 731 * This may need refinement as different types of objects add sort functions. 732 * 733 * Function is more complex than it needs to be because we are comparing 64 bit 734 * values and the function only returns 32 bit values. 735 */ 736 static int 737 xfs_trans_precommit_sort( 738 void *unused_arg, 739 const struct list_head *a, 740 const struct list_head *b) 741 { 742 struct xfs_log_item *lia = container_of(a, 743 struct xfs_log_item, li_trans); 744 struct xfs_log_item *lib = container_of(b, 745 struct xfs_log_item, li_trans); 746 int64_t diff; 747 748 /* 749 * If both items are non-sortable, leave them alone. If only one is 750 * sortable, move the non-sortable item towards the end of the list. 751 */ 752 if (!lia->li_ops->iop_sort && !lib->li_ops->iop_sort) 753 return 0; 754 if (!lia->li_ops->iop_sort) 755 return 1; 756 if (!lib->li_ops->iop_sort) 757 return -1; 758 759 diff = lia->li_ops->iop_sort(lia) - lib->li_ops->iop_sort(lib); 760 if (diff < 0) 761 return -1; 762 if (diff > 0) 763 return 1; 764 return 0; 765 } 766 767 /* 768 * Run transaction precommit functions. 769 * 770 * If there is an error in any of the callouts, then stop immediately and 771 * trigger a shutdown to abort the transaction. There is no recovery possible 772 * from errors at this point as the transaction is dirty.... 773 */ 774 static int 775 xfs_trans_run_precommits( 776 struct xfs_trans *tp) 777 { 778 struct xfs_mount *mp = tp->t_mountp; 779 struct xfs_log_item *lip, *n; 780 int error = 0; 781 782 /* 783 * Sort the item list to avoid ABBA deadlocks with other transactions 784 * running precommit operations that lock multiple shared items such as 785 * inode cluster buffers. 786 */ 787 list_sort(NULL, &tp->t_items, xfs_trans_precommit_sort); 788 789 /* 790 * Precommit operations can remove the log item from the transaction 791 * if the log item exists purely to delay modifications until they 792 * can be ordered against other operations. Hence we have to use 793 * list_for_each_entry_safe() here. 794 */ 795 list_for_each_entry_safe(lip, n, &tp->t_items, li_trans) { 796 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) 797 continue; 798 if (lip->li_ops->iop_precommit) { 799 error = lip->li_ops->iop_precommit(tp, lip); 800 if (error) 801 break; 802 } 803 } 804 if (error) 805 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 806 return error; 807 } 808 809 /* 810 * Commit the given transaction to the log. 811 * 812 * XFS disk error handling mechanism is not based on a typical 813 * transaction abort mechanism. Logically after the filesystem 814 * gets marked 'SHUTDOWN', we can't let any new transactions 815 * be durable - ie. committed to disk - because some metadata might 816 * be inconsistent. In such cases, this returns an error, and the 817 * caller may assume that all locked objects joined to the transaction 818 * have already been unlocked as if the commit had succeeded. 819 * Do not reference the transaction structure after this call. 820 */ 821 static int 822 __xfs_trans_commit( 823 struct xfs_trans *tp, 824 bool regrant) 825 { 826 struct xfs_mount *mp = tp->t_mountp; 827 struct xlog *log = mp->m_log; 828 xfs_csn_t commit_seq = 0; 829 int error = 0; 830 int sync = tp->t_flags & XFS_TRANS_SYNC; 831 832 trace_xfs_trans_commit(tp, _RET_IP_); 833 834 /* 835 * Commit per-transaction changes that are not already tracked through 836 * log items. This can add dirty log items to the transaction. 837 */ 838 if (tp->t_flags & XFS_TRANS_SB_DIRTY) 839 xfs_trans_apply_sb_deltas(tp); 840 xfs_trans_apply_dquot_deltas(tp); 841 842 error = xfs_trans_run_precommits(tp); 843 if (error) 844 goto out_unreserve; 845 846 /* 847 * If there is nothing to be logged by the transaction, 848 * then unlock all of the items associated with the 849 * transaction and free the transaction structure. 850 * Also make sure to return any reserved blocks to 851 * the free pool. 852 */ 853 if (!(tp->t_flags & XFS_TRANS_DIRTY)) 854 goto out_unreserve; 855 856 /* 857 * We must check against log shutdown here because we cannot abort log 858 * items and leave them dirty, inconsistent and unpinned in memory while 859 * the log is active. This leaves them open to being written back to 860 * disk, and that will lead to on-disk corruption. 861 */ 862 if (xlog_is_shutdown(log)) { 863 error = -EIO; 864 goto out_unreserve; 865 } 866 867 ASSERT(tp->t_ticket != NULL); 868 869 xlog_cil_commit(log, tp, &commit_seq, regrant); 870 871 xfs_trans_free(tp); 872 873 /* 874 * If the transaction needs to be synchronous, then force the 875 * log out now and wait for it. 876 */ 877 if (sync) { 878 error = xfs_log_force_seq(mp, commit_seq, XFS_LOG_SYNC, NULL); 879 XFS_STATS_INC(mp, xs_trans_sync); 880 } else { 881 XFS_STATS_INC(mp, xs_trans_async); 882 } 883 884 return error; 885 886 out_unreserve: 887 xfs_trans_unreserve_and_mod_sb(tp); 888 889 /* 890 * It is indeed possible for the transaction to be not dirty but 891 * the dqinfo portion to be. All that means is that we have some 892 * (non-persistent) quota reservations that need to be unreserved. 893 */ 894 xfs_trans_unreserve_and_mod_dquots(tp, true); 895 if (tp->t_ticket) { 896 if (regrant && !xlog_is_shutdown(log)) 897 xfs_log_ticket_regrant(log, tp->t_ticket); 898 else 899 xfs_log_ticket_ungrant(log, tp->t_ticket); 900 tp->t_ticket = NULL; 901 } 902 xfs_trans_free_items(tp, !!error); 903 xfs_trans_free(tp); 904 905 XFS_STATS_INC(mp, xs_trans_empty); 906 return error; 907 } 908 909 int 910 xfs_trans_commit( 911 struct xfs_trans *tp) 912 { 913 /* 914 * Finish deferred items on final commit. Only permanent transactions 915 * should ever have deferred ops. 916 */ 917 WARN_ON_ONCE(!list_empty(&tp->t_dfops) && 918 !(tp->t_flags & XFS_TRANS_PERM_LOG_RES)); 919 if (tp->t_flags & XFS_TRANS_PERM_LOG_RES) { 920 int error = xfs_defer_finish_noroll(&tp); 921 if (error) { 922 xfs_trans_cancel(tp); 923 return error; 924 } 925 } 926 927 return __xfs_trans_commit(tp, false); 928 } 929 930 /* 931 * Unlock all of the transaction's items and free the transaction. If the 932 * transaction is dirty, we must shut down the filesystem because there is no 933 * way to restore them to their previous state. 934 * 935 * If the transaction has made a log reservation, make sure to release it as 936 * well. 937 * 938 * This is a high level function (equivalent to xfs_trans_commit()) and so can 939 * be called after the transaction has effectively been aborted due to the mount 940 * being shut down. However, if the mount has not been shut down and the 941 * transaction is dirty we will shut the mount down and, in doing so, that 942 * guarantees that the log is shut down, too. Hence we don't need to be as 943 * careful with shutdown state and dirty items here as we need to be in 944 * xfs_trans_commit(). 945 */ 946 void 947 xfs_trans_cancel( 948 struct xfs_trans *tp) 949 { 950 struct xfs_mount *mp = tp->t_mountp; 951 struct xlog *log = mp->m_log; 952 bool dirty = (tp->t_flags & XFS_TRANS_DIRTY); 953 954 trace_xfs_trans_cancel(tp, _RET_IP_); 955 956 /* 957 * It's never valid to cancel a transaction with deferred ops attached, 958 * because the transaction is effectively dirty. Complain about this 959 * loudly before freeing the in-memory defer items and shutting down the 960 * filesystem. 961 */ 962 if (!list_empty(&tp->t_dfops)) { 963 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 964 dirty = true; 965 xfs_defer_cancel(tp); 966 } 967 968 /* 969 * See if the caller is relying on us to shut down the filesystem. We 970 * only want an error report if there isn't already a shutdown in 971 * progress, so we only need to check against the mount shutdown state 972 * here. 973 */ 974 if (dirty && !xfs_is_shutdown(mp)) { 975 XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp); 976 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 977 } 978 #ifdef DEBUG 979 /* Log items need to be consistent until the log is shut down. */ 980 if (!dirty && !xlog_is_shutdown(log)) { 981 struct xfs_log_item *lip; 982 983 list_for_each_entry(lip, &tp->t_items, li_trans) 984 ASSERT(!xlog_item_is_intent_done(lip)); 985 } 986 #endif 987 xfs_trans_unreserve_and_mod_sb(tp); 988 xfs_trans_unreserve_and_mod_dquots(tp, false); 989 990 if (tp->t_ticket) { 991 xfs_log_ticket_ungrant(log, tp->t_ticket); 992 tp->t_ticket = NULL; 993 } 994 995 xfs_trans_free_items(tp, dirty); 996 xfs_trans_free(tp); 997 } 998 999 /* 1000 * Roll from one trans in the sequence of PERMANENT transactions to the next: 1001 * permanent transactions are only flushed out when committed with 1002 * xfs_trans_commit(), but we still want as soon as possible to let chunks of it 1003 * go to the log. So we commit the chunk we've been working on and get a new 1004 * transaction to continue. 1005 */ 1006 int 1007 xfs_trans_roll( 1008 struct xfs_trans **tpp) 1009 { 1010 struct xfs_trans *tp = *tpp; 1011 unsigned int log_res = tp->t_log_res; 1012 unsigned int log_count = tp->t_log_count; 1013 int error; 1014 1015 trace_xfs_trans_roll(tp, _RET_IP_); 1016 1017 ASSERT(log_res > 0); 1018 1019 /* 1020 * Copy the critical parameters from one trans to the next. 1021 */ 1022 *tpp = xfs_trans_dup(tp); 1023 1024 /* 1025 * Commit the current transaction. 1026 * 1027 * If this commit failed, then it'd just unlock those items that are not 1028 * marked ihold. That also means that a filesystem shutdown is in 1029 * progress. The caller takes the responsibility to cancel the 1030 * duplicate transaction that gets returned. 1031 */ 1032 error = __xfs_trans_commit(tp, true); 1033 if (error) 1034 return error; 1035 1036 /* 1037 * Reserve space in the log for the next transaction. 1038 * 1039 * This also pushes items in the AIL out to disk if they are taking up 1040 * space at the tail of the log that we want to use. This requires that 1041 * either nothing be locked across this call, or that anything that is 1042 * locked be logged in the prior and the next transactions. 1043 */ 1044 tp = *tpp; 1045 error = xfs_log_regrant(tp->t_mountp, tp->t_ticket); 1046 if (error) 1047 return error; 1048 tp->t_log_res = log_res; 1049 tp->t_log_count = log_count; 1050 return 0; 1051 } 1052 1053 /* 1054 * Allocate an transaction, lock and join the inode to it, and reserve quota. 1055 * 1056 * The caller must ensure that the on-disk dquots attached to this inode have 1057 * already been allocated and initialized. The caller is responsible for 1058 * releasing ILOCK_EXCL if a new transaction is returned. 1059 */ 1060 int 1061 xfs_trans_alloc_inode( 1062 struct xfs_inode *ip, 1063 struct xfs_trans_res *resv, 1064 unsigned int dblocks, 1065 unsigned int rblocks, 1066 bool force, 1067 struct xfs_trans **tpp) 1068 { 1069 struct xfs_trans *tp; 1070 struct xfs_mount *mp = ip->i_mount; 1071 bool retried = false; 1072 int error; 1073 1074 retry: 1075 error = xfs_trans_alloc(mp, resv, dblocks, 1076 xfs_extlen_to_rtxlen(mp, rblocks), 1077 force ? XFS_TRANS_RESERVE : 0, &tp); 1078 if (error) 1079 return error; 1080 1081 xfs_ilock(ip, XFS_ILOCK_EXCL); 1082 xfs_trans_ijoin(tp, ip, 0); 1083 1084 error = xfs_qm_dqattach_locked(ip, false); 1085 if (error) { 1086 /* Caller should have allocated the dquots! */ 1087 ASSERT(error != -ENOENT); 1088 goto out_cancel; 1089 } 1090 1091 error = xfs_trans_reserve_quota_nblks(tp, ip, dblocks, rblocks, force); 1092 if ((error == -EDQUOT || error == -ENOSPC) && !retried) { 1093 xfs_trans_cancel(tp); 1094 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1095 xfs_blockgc_free_quota(ip, 0); 1096 retried = true; 1097 goto retry; 1098 } 1099 if (error) 1100 goto out_cancel; 1101 1102 *tpp = tp; 1103 return 0; 1104 1105 out_cancel: 1106 xfs_trans_cancel(tp); 1107 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1108 return error; 1109 } 1110 1111 /* 1112 * Try to reserve more blocks for a transaction. 1113 * 1114 * This is for callers that need to attach resources to a transaction, scan 1115 * those resources to determine the space reservation requirements, and then 1116 * modify the attached resources. In other words, online repair. This can 1117 * fail due to ENOSPC, so the caller must be able to cancel the transaction 1118 * without shutting down the fs. 1119 */ 1120 int 1121 xfs_trans_reserve_more( 1122 struct xfs_trans *tp, 1123 unsigned int blocks, 1124 unsigned int rtextents) 1125 { 1126 bool rsvd = tp->t_flags & XFS_TRANS_RESERVE; 1127 1128 if (blocks && xfs_dec_fdblocks(tp->t_mountp, blocks, rsvd)) 1129 return -ENOSPC; 1130 if (rtextents && xfs_dec_frextents(tp->t_mountp, rtextents)) { 1131 if (blocks) 1132 xfs_add_fdblocks(tp->t_mountp, blocks); 1133 return -ENOSPC; 1134 } 1135 tp->t_blk_res += blocks; 1136 tp->t_rtx_res += rtextents; 1137 return 0; 1138 } 1139 1140 /* 1141 * Try to reserve more blocks and file quota for a transaction. Same 1142 * conditions of usage as xfs_trans_reserve_more. 1143 */ 1144 int 1145 xfs_trans_reserve_more_inode( 1146 struct xfs_trans *tp, 1147 struct xfs_inode *ip, 1148 unsigned int dblocks, 1149 unsigned int rblocks, 1150 bool force_quota) 1151 { 1152 struct xfs_mount *mp = ip->i_mount; 1153 unsigned int rtx = xfs_extlen_to_rtxlen(mp, rblocks); 1154 int error; 1155 1156 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); 1157 1158 error = xfs_trans_reserve_more(tp, dblocks, rtx); 1159 if (error) 1160 return error; 1161 1162 if (!XFS_IS_QUOTA_ON(mp) || xfs_is_quota_inode(&mp->m_sb, ip->i_ino)) 1163 return 0; 1164 1165 if (tp->t_flags & XFS_TRANS_RESERVE) 1166 force_quota = true; 1167 1168 error = xfs_trans_reserve_quota_nblks(tp, ip, dblocks, rblocks, 1169 force_quota); 1170 if (!error) 1171 return 0; 1172 1173 /* Quota failed, give back the new reservation. */ 1174 xfs_add_fdblocks(mp, dblocks); 1175 tp->t_blk_res -= dblocks; 1176 xfs_add_frextents(mp, rtx); 1177 tp->t_rtx_res -= rtx; 1178 return error; 1179 } 1180 1181 /* 1182 * Allocate an transaction in preparation for inode creation by reserving quota 1183 * against the given dquots. Callers are not required to hold any inode locks. 1184 */ 1185 int 1186 xfs_trans_alloc_icreate( 1187 struct xfs_mount *mp, 1188 struct xfs_trans_res *resv, 1189 struct xfs_dquot *udqp, 1190 struct xfs_dquot *gdqp, 1191 struct xfs_dquot *pdqp, 1192 unsigned int dblocks, 1193 struct xfs_trans **tpp) 1194 { 1195 struct xfs_trans *tp; 1196 bool retried = false; 1197 int error; 1198 1199 retry: 1200 error = xfs_trans_alloc(mp, resv, dblocks, 0, 0, &tp); 1201 if (error) 1202 return error; 1203 1204 error = xfs_trans_reserve_quota_icreate(tp, udqp, gdqp, pdqp, dblocks); 1205 if ((error == -EDQUOT || error == -ENOSPC) && !retried) { 1206 xfs_trans_cancel(tp); 1207 xfs_blockgc_free_dquots(mp, udqp, gdqp, pdqp, 0); 1208 retried = true; 1209 goto retry; 1210 } 1211 if (error) { 1212 xfs_trans_cancel(tp); 1213 return error; 1214 } 1215 1216 *tpp = tp; 1217 return 0; 1218 } 1219 1220 /* 1221 * Allocate an transaction, lock and join the inode to it, and reserve quota 1222 * in preparation for inode attribute changes that include uid, gid, or prid 1223 * changes. 1224 * 1225 * The caller must ensure that the on-disk dquots attached to this inode have 1226 * already been allocated and initialized. The ILOCK will be dropped when the 1227 * transaction is committed or cancelled. 1228 */ 1229 int 1230 xfs_trans_alloc_ichange( 1231 struct xfs_inode *ip, 1232 struct xfs_dquot *new_udqp, 1233 struct xfs_dquot *new_gdqp, 1234 struct xfs_dquot *new_pdqp, 1235 bool force, 1236 struct xfs_trans **tpp) 1237 { 1238 struct xfs_trans *tp; 1239 struct xfs_mount *mp = ip->i_mount; 1240 struct xfs_dquot *udqp; 1241 struct xfs_dquot *gdqp; 1242 struct xfs_dquot *pdqp; 1243 bool retried = false; 1244 int error; 1245 1246 retry: 1247 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp); 1248 if (error) 1249 return error; 1250 1251 xfs_ilock(ip, XFS_ILOCK_EXCL); 1252 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 1253 1254 if (xfs_is_metadir_inode(ip)) 1255 goto out; 1256 1257 error = xfs_qm_dqattach_locked(ip, false); 1258 if (error) { 1259 /* Caller should have allocated the dquots! */ 1260 ASSERT(error != -ENOENT); 1261 goto out_cancel; 1262 } 1263 1264 /* 1265 * For each quota type, skip quota reservations if the inode's dquots 1266 * now match the ones that came from the caller, or the caller didn't 1267 * pass one in. The inode's dquots can change if we drop the ILOCK to 1268 * perform a blockgc scan, so we must preserve the caller's arguments. 1269 */ 1270 udqp = (new_udqp != ip->i_udquot) ? new_udqp : NULL; 1271 gdqp = (new_gdqp != ip->i_gdquot) ? new_gdqp : NULL; 1272 pdqp = (new_pdqp != ip->i_pdquot) ? new_pdqp : NULL; 1273 if (udqp || gdqp || pdqp) { 1274 xfs_filblks_t dblocks, rblocks; 1275 unsigned int qflags = XFS_QMOPT_RES_REGBLKS; 1276 bool isrt = XFS_IS_REALTIME_INODE(ip); 1277 1278 if (force) 1279 qflags |= XFS_QMOPT_FORCE_RES; 1280 1281 if (isrt) { 1282 error = xfs_iread_extents(tp, ip, XFS_DATA_FORK); 1283 if (error) 1284 goto out_cancel; 1285 } 1286 1287 xfs_inode_count_blocks(tp, ip, &dblocks, &rblocks); 1288 1289 if (isrt) 1290 rblocks += ip->i_delayed_blks; 1291 else 1292 dblocks += ip->i_delayed_blks; 1293 1294 /* 1295 * Reserve enough quota to handle blocks on disk and reserved 1296 * for a delayed allocation. We'll actually transfer the 1297 * delalloc reservation between dquots at chown time, even 1298 * though that part is only semi-transactional. 1299 */ 1300 error = xfs_trans_reserve_quota_bydquots(tp, mp, udqp, gdqp, 1301 pdqp, dblocks, 1, qflags); 1302 if ((error == -EDQUOT || error == -ENOSPC) && !retried) { 1303 xfs_trans_cancel(tp); 1304 xfs_blockgc_free_dquots(mp, udqp, gdqp, pdqp, 0); 1305 retried = true; 1306 goto retry; 1307 } 1308 if (error) 1309 goto out_cancel; 1310 1311 /* Do the same for realtime. */ 1312 qflags = XFS_QMOPT_RES_RTBLKS | (qflags & XFS_QMOPT_FORCE_RES); 1313 error = xfs_trans_reserve_quota_bydquots(tp, mp, udqp, gdqp, 1314 pdqp, rblocks, 0, qflags); 1315 if ((error == -EDQUOT || error == -ENOSPC) && !retried) { 1316 xfs_trans_cancel(tp); 1317 xfs_blockgc_free_dquots(mp, udqp, gdqp, pdqp, 0); 1318 retried = true; 1319 goto retry; 1320 } 1321 if (error) 1322 goto out_cancel; 1323 } 1324 1325 out: 1326 *tpp = tp; 1327 return 0; 1328 1329 out_cancel: 1330 xfs_trans_cancel(tp); 1331 return error; 1332 } 1333 1334 /* 1335 * Allocate an transaction, lock and join the directory and child inodes to it, 1336 * and reserve quota for a directory update. If there isn't sufficient space, 1337 * @dblocks will be set to zero for a reservationless directory update and 1338 * @nospace_error will be set to a negative errno describing the space 1339 * constraint we hit. 1340 * 1341 * The caller must ensure that the on-disk dquots attached to this inode have 1342 * already been allocated and initialized. The ILOCKs will be dropped when the 1343 * transaction is committed or cancelled. 1344 * 1345 * Caller is responsible for unlocking the inodes manually upon return 1346 */ 1347 int 1348 xfs_trans_alloc_dir( 1349 struct xfs_inode *dp, 1350 struct xfs_trans_res *resv, 1351 struct xfs_inode *ip, 1352 unsigned int *dblocks, 1353 struct xfs_trans **tpp, 1354 int *nospace_error) 1355 { 1356 struct xfs_trans *tp; 1357 struct xfs_mount *mp = ip->i_mount; 1358 unsigned int resblks; 1359 bool retried = false; 1360 int error; 1361 1362 retry: 1363 *nospace_error = 0; 1364 resblks = *dblocks; 1365 error = xfs_trans_alloc(mp, resv, resblks, 0, 0, &tp); 1366 if (error == -ENOSPC) { 1367 *nospace_error = error; 1368 resblks = 0; 1369 error = xfs_trans_alloc(mp, resv, resblks, 0, 0, &tp); 1370 } 1371 if (error) 1372 return error; 1373 1374 xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL); 1375 1376 xfs_trans_ijoin(tp, dp, 0); 1377 xfs_trans_ijoin(tp, ip, 0); 1378 1379 error = xfs_qm_dqattach_locked(dp, false); 1380 if (error) { 1381 /* Caller should have allocated the dquots! */ 1382 ASSERT(error != -ENOENT); 1383 goto out_cancel; 1384 } 1385 1386 error = xfs_qm_dqattach_locked(ip, false); 1387 if (error) { 1388 /* Caller should have allocated the dquots! */ 1389 ASSERT(error != -ENOENT); 1390 goto out_cancel; 1391 } 1392 1393 if (resblks == 0) 1394 goto done; 1395 1396 error = xfs_trans_reserve_quota_nblks(tp, dp, resblks, 0, false); 1397 if (error == -EDQUOT || error == -ENOSPC) { 1398 if (!retried) { 1399 xfs_trans_cancel(tp); 1400 xfs_iunlock(dp, XFS_ILOCK_EXCL); 1401 if (dp != ip) 1402 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1403 xfs_blockgc_free_quota(dp, 0); 1404 retried = true; 1405 goto retry; 1406 } 1407 1408 *nospace_error = error; 1409 resblks = 0; 1410 error = 0; 1411 } 1412 if (error) 1413 goto out_cancel; 1414 1415 done: 1416 *tpp = tp; 1417 *dblocks = resblks; 1418 return 0; 1419 1420 out_cancel: 1421 xfs_trans_cancel(tp); 1422 xfs_iunlock(dp, XFS_ILOCK_EXCL); 1423 if (dp != ip) 1424 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1425 return error; 1426 } 1427