1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. 4 * Copyright (C) 2010 Red Hat, Inc. 5 * All Rights Reserved. 6 */ 7 #include "xfs.h" 8 #include "xfs_fs.h" 9 #include "xfs_shared.h" 10 #include "xfs_format.h" 11 #include "xfs_log_format.h" 12 #include "xfs_trans_resv.h" 13 #include "xfs_mount.h" 14 #include "xfs_extent_busy.h" 15 #include "xfs_quota.h" 16 #include "xfs_trans.h" 17 #include "xfs_trans_priv.h" 18 #include "xfs_log.h" 19 #include "xfs_log_priv.h" 20 #include "xfs_trace.h" 21 #include "xfs_error.h" 22 #include "xfs_defer.h" 23 #include "xfs_inode.h" 24 #include "xfs_dquot_item.h" 25 #include "xfs_dquot.h" 26 #include "xfs_icache.h" 27 #include "xfs_rtbitmap.h" 28 #include "xfs_rtgroup.h" 29 #include "xfs_sb.h" 30 31 struct kmem_cache *xfs_trans_cache; 32 33 #if defined(CONFIG_TRACEPOINTS) 34 static void 35 xfs_trans_trace_reservations( 36 struct xfs_mount *mp) 37 { 38 struct xfs_trans_res *res; 39 struct xfs_trans_res *end_res; 40 int i; 41 42 res = (struct xfs_trans_res *)M_RES(mp); 43 end_res = (struct xfs_trans_res *)(M_RES(mp) + 1); 44 for (i = 0; res < end_res; i++, res++) 45 trace_xfs_trans_resv_calc(mp, i, res); 46 } 47 #else 48 # define xfs_trans_trace_reservations(mp) 49 #endif 50 51 /* 52 * Initialize the precomputed transaction reservation values 53 * in the mount structure. 54 */ 55 void 56 xfs_trans_init( 57 struct xfs_mount *mp) 58 { 59 xfs_trans_resv_calc(mp, M_RES(mp)); 60 xfs_trans_trace_reservations(mp); 61 } 62 63 /* 64 * Free the transaction structure. If there is more clean up 65 * to do when the structure is freed, add it here. 66 */ 67 STATIC void 68 xfs_trans_free( 69 struct xfs_trans *tp) 70 { 71 xfs_extent_busy_sort(&tp->t_busy); 72 xfs_extent_busy_clear(&tp->t_busy, false); 73 74 trace_xfs_trans_free(tp, _RET_IP_); 75 xfs_trans_clear_context(tp); 76 if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT)) 77 sb_end_intwrite(tp->t_mountp->m_super); 78 xfs_trans_free_dqinfo(tp); 79 kmem_cache_free(xfs_trans_cache, tp); 80 } 81 82 /* 83 * This is called to create a new transaction which will share the 84 * permanent log reservation of the given transaction. The remaining 85 * unused block and rt extent reservations are also inherited. This 86 * implies that the original transaction is no longer allowed to allocate 87 * blocks. Locks and log items, however, are no inherited. They must 88 * be added to the new transaction explicitly. 89 */ 90 STATIC struct xfs_trans * 91 xfs_trans_dup( 92 struct xfs_trans *tp) 93 { 94 struct xfs_trans *ntp; 95 96 trace_xfs_trans_dup(tp, _RET_IP_); 97 98 ntp = kmem_cache_zalloc(xfs_trans_cache, GFP_KERNEL | __GFP_NOFAIL); 99 100 /* 101 * Initialize the new transaction structure. 102 */ 103 ntp->t_mountp = tp->t_mountp; 104 INIT_LIST_HEAD(&ntp->t_items); 105 INIT_LIST_HEAD(&ntp->t_busy); 106 INIT_LIST_HEAD(&ntp->t_dfops); 107 ntp->t_highest_agno = NULLAGNUMBER; 108 109 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 110 ASSERT(tp->t_ticket != NULL); 111 112 ntp->t_flags = XFS_TRANS_PERM_LOG_RES | 113 (tp->t_flags & XFS_TRANS_RESERVE) | 114 (tp->t_flags & XFS_TRANS_NO_WRITECOUNT) | 115 (tp->t_flags & XFS_TRANS_RES_FDBLKS); 116 /* We gave our writer reference to the new transaction */ 117 tp->t_flags |= XFS_TRANS_NO_WRITECOUNT; 118 ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket); 119 120 ASSERT(tp->t_blk_res >= tp->t_blk_res_used); 121 ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used; 122 tp->t_blk_res = tp->t_blk_res_used; 123 124 ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used; 125 tp->t_rtx_res = tp->t_rtx_res_used; 126 127 xfs_trans_switch_context(tp, ntp); 128 129 /* move deferred ops over to the new tp */ 130 xfs_defer_move(ntp, tp); 131 132 xfs_trans_dup_dqinfo(tp, ntp); 133 return ntp; 134 } 135 136 /* 137 * This is called to reserve free disk blocks and log space for the given 138 * transaction before allocating any resources within the transaction. 139 * 140 * This will return ENOSPC if there are not enough blocks available. 141 * It will sleep waiting for available log space. 142 * 143 * This does not do quota reservations. That typically is done by the caller 144 * afterwards. 145 */ 146 static int 147 xfs_trans_reserve( 148 struct xfs_trans *tp, 149 struct xfs_trans_res *resp, 150 uint blocks, 151 uint rtextents) 152 { 153 struct xfs_mount *mp = tp->t_mountp; 154 int error = 0; 155 bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; 156 157 ASSERT(resp->tr_logres > 0); 158 159 /* 160 * Attempt to reserve the needed disk blocks by decrementing the number 161 * needed from the number available. This will fail if the count would 162 * go below zero. 163 */ 164 if (blocks > 0) { 165 error = xfs_dec_fdblocks(mp, blocks, rsvd); 166 if (error != 0) 167 return -ENOSPC; 168 tp->t_blk_res += blocks; 169 } 170 171 /* 172 * Reserve the log space needed for this transaction. 173 */ 174 if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) 175 tp->t_flags |= XFS_TRANS_PERM_LOG_RES; 176 error = xfs_log_reserve(mp, resp->tr_logres, resp->tr_logcount, 177 &tp->t_ticket, (tp->t_flags & XFS_TRANS_PERM_LOG_RES)); 178 if (error) 179 goto undo_blocks; 180 181 tp->t_log_res = resp->tr_logres; 182 tp->t_log_count = resp->tr_logcount; 183 184 /* 185 * Attempt to reserve the needed realtime extents by decrementing the 186 * number needed from the number available. This will fail if the 187 * count would go below zero. 188 */ 189 if (rtextents > 0) { 190 error = xfs_dec_frextents(mp, rtextents); 191 if (error) { 192 error = -ENOSPC; 193 goto undo_log; 194 } 195 tp->t_rtx_res += rtextents; 196 } 197 198 return 0; 199 200 undo_log: 201 xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket); 202 tp->t_ticket = NULL; 203 tp->t_log_res = 0; 204 tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES; 205 undo_blocks: 206 if (blocks > 0) { 207 xfs_add_fdblocks(mp, blocks); 208 tp->t_blk_res = 0; 209 } 210 return error; 211 } 212 213 static struct xfs_trans * 214 __xfs_trans_alloc( 215 struct xfs_mount *mp, 216 uint flags) 217 { 218 struct xfs_trans *tp; 219 220 ASSERT(!(flags & XFS_TRANS_RES_FDBLKS) || xfs_has_lazysbcount(mp)); 221 222 tp = kmem_cache_zalloc(xfs_trans_cache, GFP_KERNEL | __GFP_NOFAIL); 223 if (!(flags & XFS_TRANS_NO_WRITECOUNT)) 224 sb_start_intwrite(mp->m_super); 225 xfs_trans_set_context(tp); 226 tp->t_flags = flags; 227 tp->t_mountp = mp; 228 INIT_LIST_HEAD(&tp->t_items); 229 INIT_LIST_HEAD(&tp->t_busy); 230 INIT_LIST_HEAD(&tp->t_dfops); 231 tp->t_highest_agno = NULLAGNUMBER; 232 return tp; 233 } 234 235 int 236 xfs_trans_alloc( 237 struct xfs_mount *mp, 238 struct xfs_trans_res *resp, 239 uint blocks, 240 uint rtextents, 241 uint flags, 242 struct xfs_trans **tpp) 243 { 244 struct xfs_trans *tp; 245 bool want_retry = true; 246 int error; 247 248 ASSERT(resp->tr_logres > 0); 249 250 /* 251 * Allocate the handle before we do our freeze accounting and setting up 252 * GFP_NOFS allocation context so that we avoid lockdep false positives 253 * by doing GFP_KERNEL allocations inside sb_start_intwrite(). 254 */ 255 retry: 256 tp = __xfs_trans_alloc(mp, flags); 257 WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE); 258 error = xfs_trans_reserve(tp, resp, blocks, rtextents); 259 if (error == -ENOSPC && want_retry) { 260 xfs_trans_cancel(tp); 261 262 /* 263 * We weren't able to reserve enough space for the transaction. 264 * Flush the other speculative space allocations to free space. 265 * Do not perform a synchronous scan because callers can hold 266 * other locks. 267 */ 268 error = xfs_blockgc_flush_all(mp); 269 if (error) 270 return error; 271 want_retry = false; 272 goto retry; 273 } 274 if (error) { 275 xfs_trans_cancel(tp); 276 return error; 277 } 278 279 trace_xfs_trans_alloc(tp, _RET_IP_); 280 281 *tpp = tp; 282 return 0; 283 } 284 285 /* 286 * Create an empty transaction with no reservation. This is a defensive 287 * mechanism for routines that query metadata without actually modifying them -- 288 * if the metadata being queried is somehow cross-linked (think a btree block 289 * pointer that points higher in the tree), we risk deadlock. However, blocks 290 * grabbed as part of a transaction can be re-grabbed. The verifiers will 291 * notice the corrupt block and the operation will fail back to userspace 292 * without deadlocking. 293 * 294 * Note the zero-length reservation; this transaction MUST be cancelled without 295 * any dirty data. 296 * 297 * Callers should obtain freeze protection to avoid a conflict with fs freezing 298 * where we can be grabbing buffers at the same time that freeze is trying to 299 * drain the buffer LRU list. 300 */ 301 struct xfs_trans * 302 xfs_trans_alloc_empty( 303 struct xfs_mount *mp) 304 { 305 return __xfs_trans_alloc(mp, XFS_TRANS_NO_WRITECOUNT); 306 } 307 308 /* 309 * Record the indicated change to the given field for application 310 * to the file system's superblock when the transaction commits. 311 * For now, just store the change in the transaction structure. 312 * 313 * Mark the transaction structure to indicate that the superblock 314 * needs to be updated before committing. 315 * 316 * Because we may not be keeping track of allocated/free inodes and 317 * used filesystem blocks in the superblock, we do not mark the 318 * superblock dirty in this transaction if we modify these fields. 319 * We still need to update the transaction deltas so that they get 320 * applied to the incore superblock, but we don't want them to 321 * cause the superblock to get locked and logged if these are the 322 * only fields in the superblock that the transaction modifies. 323 */ 324 void 325 xfs_trans_mod_sb( 326 xfs_trans_t *tp, 327 uint field, 328 int64_t delta) 329 { 330 uint32_t flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY); 331 xfs_mount_t *mp = tp->t_mountp; 332 333 switch (field) { 334 case XFS_TRANS_SB_ICOUNT: 335 tp->t_icount_delta += delta; 336 if (xfs_has_lazysbcount(mp)) 337 flags &= ~XFS_TRANS_SB_DIRTY; 338 break; 339 case XFS_TRANS_SB_IFREE: 340 tp->t_ifree_delta += delta; 341 if (xfs_has_lazysbcount(mp)) 342 flags &= ~XFS_TRANS_SB_DIRTY; 343 break; 344 case XFS_TRANS_SB_FDBLOCKS: 345 /* 346 * Track the number of blocks allocated in the transaction. 347 * Make sure it does not exceed the number reserved. If so, 348 * shutdown as this can lead to accounting inconsistency. 349 */ 350 if (delta < 0) { 351 tp->t_blk_res_used += (uint)-delta; 352 if (tp->t_blk_res_used > tp->t_blk_res) 353 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 354 } else if (delta > 0 && (tp->t_flags & XFS_TRANS_RES_FDBLKS)) { 355 int64_t blkres_delta; 356 357 /* 358 * Return freed blocks directly to the reservation 359 * instead of the global pool, being careful not to 360 * overflow the trans counter. This is used to preserve 361 * reservation across chains of transaction rolls that 362 * repeatedly free and allocate blocks. 363 */ 364 blkres_delta = min_t(int64_t, delta, 365 UINT_MAX - tp->t_blk_res); 366 tp->t_blk_res += blkres_delta; 367 delta -= blkres_delta; 368 } 369 tp->t_fdblocks_delta += delta; 370 if (xfs_has_lazysbcount(mp)) 371 flags &= ~XFS_TRANS_SB_DIRTY; 372 break; 373 case XFS_TRANS_SB_RES_FDBLOCKS: 374 /* 375 * The allocation has already been applied to the 376 * in-core superblock's counter. This should only 377 * be applied to the on-disk superblock. 378 */ 379 tp->t_res_fdblocks_delta += delta; 380 if (xfs_has_lazysbcount(mp)) 381 flags &= ~XFS_TRANS_SB_DIRTY; 382 break; 383 case XFS_TRANS_SB_FREXTENTS: 384 /* 385 * Track the number of blocks allocated in the 386 * transaction. Make sure it does not exceed the 387 * number reserved. 388 */ 389 if (delta < 0) { 390 tp->t_rtx_res_used += (uint)-delta; 391 ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res); 392 } 393 tp->t_frextents_delta += delta; 394 if (xfs_has_rtgroups(mp)) 395 flags &= ~XFS_TRANS_SB_DIRTY; 396 break; 397 case XFS_TRANS_SB_RES_FREXTENTS: 398 /* 399 * The allocation has already been applied to the 400 * in-core superblock's counter. This should only 401 * be applied to the on-disk superblock. 402 */ 403 ASSERT(delta < 0); 404 tp->t_res_frextents_delta += delta; 405 if (xfs_has_rtgroups(mp)) 406 flags &= ~XFS_TRANS_SB_DIRTY; 407 break; 408 case XFS_TRANS_SB_DBLOCKS: 409 tp->t_dblocks_delta += delta; 410 break; 411 case XFS_TRANS_SB_AGCOUNT: 412 ASSERT(delta > 0); 413 tp->t_agcount_delta += delta; 414 break; 415 case XFS_TRANS_SB_IMAXPCT: 416 tp->t_imaxpct_delta += delta; 417 break; 418 case XFS_TRANS_SB_REXTSIZE: 419 tp->t_rextsize_delta += delta; 420 break; 421 case XFS_TRANS_SB_RBMBLOCKS: 422 tp->t_rbmblocks_delta += delta; 423 break; 424 case XFS_TRANS_SB_RBLOCKS: 425 tp->t_rblocks_delta += delta; 426 break; 427 case XFS_TRANS_SB_REXTENTS: 428 tp->t_rextents_delta += delta; 429 break; 430 case XFS_TRANS_SB_REXTSLOG: 431 tp->t_rextslog_delta += delta; 432 break; 433 case XFS_TRANS_SB_RGCOUNT: 434 ASSERT(delta > 0); 435 tp->t_rgcount_delta += delta; 436 break; 437 default: 438 ASSERT(0); 439 return; 440 } 441 442 tp->t_flags |= flags; 443 } 444 445 /* 446 * xfs_trans_apply_sb_deltas() is called from the commit code 447 * to bring the superblock buffer into the current transaction 448 * and modify it as requested by earlier calls to xfs_trans_mod_sb(). 449 * 450 * For now we just look at each field allowed to change and change 451 * it if necessary. 452 */ 453 STATIC void 454 xfs_trans_apply_sb_deltas( 455 struct xfs_trans *tp) 456 { 457 struct xfs_mount *mp = tp->t_mountp; 458 struct xfs_buf *bp = xfs_trans_getsb(tp); 459 struct xfs_dsb *sbp = bp->b_addr; 460 int whole = 0; 461 462 /* 463 * Only update the superblock counters if we are logging them 464 */ 465 if (!xfs_has_lazysbcount(mp)) { 466 if (tp->t_icount_delta) 467 be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta); 468 if (tp->t_ifree_delta) 469 be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta); 470 if (tp->t_fdblocks_delta) 471 be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta); 472 if (tp->t_res_fdblocks_delta) 473 be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta); 474 } 475 476 /* 477 * sb_frextents was added to the lazy sb counters when the rt groups 478 * feature was introduced. This is possible because we know that all 479 * kernels supporting rtgroups will also recompute frextents from the 480 * realtime bitmap. 481 * 482 * For older file systems, updating frextents requires careful handling 483 * because we cannot rely on log recovery in older kernels to recompute 484 * the value from the rtbitmap. This means that the ondisk frextents 485 * must be consistent with the rtbitmap. 486 * 487 * Therefore, log the frextents change to the ondisk superblock and 488 * update the incore superblock so that future calls to xfs_log_sb 489 * write the correct value ondisk. 490 */ 491 if ((tp->t_frextents_delta || tp->t_res_frextents_delta) && 492 !xfs_has_rtgroups(mp)) { 493 int64_t rtxdelta; 494 495 rtxdelta = tp->t_frextents_delta + tp->t_res_frextents_delta; 496 497 spin_lock(&mp->m_sb_lock); 498 be64_add_cpu(&sbp->sb_frextents, rtxdelta); 499 mp->m_sb.sb_frextents += rtxdelta; 500 spin_unlock(&mp->m_sb_lock); 501 } 502 503 if (tp->t_dblocks_delta) { 504 be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta); 505 mp->m_ddev_targp->bt_nr_sectors += 506 XFS_FSB_TO_BB(mp, tp->t_dblocks_delta); 507 whole = 1; 508 } 509 if (tp->t_agcount_delta) { 510 be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta); 511 whole = 1; 512 } 513 if (tp->t_imaxpct_delta) { 514 sbp->sb_imax_pct += tp->t_imaxpct_delta; 515 whole = 1; 516 } 517 if (tp->t_rextsize_delta) { 518 be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta); 519 520 /* 521 * Because the ondisk sb records rtgroup size in units of rt 522 * extents, any time we update the rt extent size we have to 523 * recompute the ondisk rtgroup block log. The incore values 524 * will be recomputed in xfs_trans_unreserve_and_mod_sb. 525 */ 526 if (xfs_has_rtgroups(mp)) { 527 sbp->sb_rgblklog = xfs_compute_rgblklog( 528 be32_to_cpu(sbp->sb_rgextents), 529 be32_to_cpu(sbp->sb_rextsize)); 530 } 531 whole = 1; 532 } 533 if (tp->t_rbmblocks_delta) { 534 be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta); 535 whole = 1; 536 } 537 if (tp->t_rblocks_delta) { 538 be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta); 539 mp->m_rtdev_targp->bt_nr_sectors += 540 XFS_FSB_TO_BB(mp, tp->t_rblocks_delta); 541 whole = 1; 542 } 543 if (tp->t_rextents_delta) { 544 be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta); 545 whole = 1; 546 } 547 if (tp->t_rextslog_delta) { 548 sbp->sb_rextslog += tp->t_rextslog_delta; 549 whole = 1; 550 } 551 if (tp->t_rgcount_delta) { 552 be32_add_cpu(&sbp->sb_rgcount, tp->t_rgcount_delta); 553 whole = 1; 554 } 555 556 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF); 557 if (whole) 558 /* 559 * Log the whole thing, the fields are noncontiguous. 560 */ 561 xfs_trans_log_buf(tp, bp, 0, sizeof(struct xfs_dsb) - 1); 562 else 563 /* 564 * Since all the modifiable fields are contiguous, we 565 * can get away with this. 566 */ 567 xfs_trans_log_buf(tp, bp, offsetof(struct xfs_dsb, sb_icount), 568 offsetof(struct xfs_dsb, sb_frextents) + 569 sizeof(sbp->sb_frextents) - 1); 570 } 571 572 /* 573 * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations and 574 * apply superblock counter changes to the in-core superblock. The 575 * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT 576 * applied to the in-core superblock. The idea is that that has already been 577 * done. 578 * 579 * If we are not logging superblock counters, then the inode allocated/free and 580 * used block counts are not updated in the on disk superblock. In this case, 581 * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we 582 * still need to update the incore superblock with the changes. 583 * 584 * Deltas for the inode count are +/-64, hence we use a large batch size of 128 585 * so we don't need to take the counter lock on every update. 586 */ 587 #define XFS_ICOUNT_BATCH 128 588 589 void 590 xfs_trans_unreserve_and_mod_sb( 591 struct xfs_trans *tp) 592 { 593 struct xfs_mount *mp = tp->t_mountp; 594 int64_t blkdelta = tp->t_blk_res; 595 int64_t rtxdelta = tp->t_rtx_res; 596 int64_t idelta = 0; 597 int64_t ifreedelta = 0; 598 599 /* 600 * Calculate the deltas. 601 * 602 * t_fdblocks_delta and t_frextents_delta can be positive or negative: 603 * 604 * - positive values indicate blocks freed in the transaction. 605 * - negative values indicate blocks allocated in the transaction 606 * 607 * Negative values can only happen if the transaction has a block 608 * reservation that covers the allocated block. The end result is 609 * that the calculated delta values must always be positive and we 610 * can only put back previous allocated or reserved blocks here. 611 */ 612 ASSERT(tp->t_blk_res || tp->t_fdblocks_delta >= 0); 613 if (xfs_has_lazysbcount(mp) || (tp->t_flags & XFS_TRANS_SB_DIRTY)) { 614 blkdelta += tp->t_fdblocks_delta; 615 ASSERT(blkdelta >= 0); 616 } 617 618 ASSERT(tp->t_rtx_res || tp->t_frextents_delta >= 0); 619 if (xfs_has_rtgroups(mp) || (tp->t_flags & XFS_TRANS_SB_DIRTY)) { 620 rtxdelta += tp->t_frextents_delta; 621 ASSERT(rtxdelta >= 0); 622 } 623 624 if (xfs_has_lazysbcount(mp) || (tp->t_flags & XFS_TRANS_SB_DIRTY)) { 625 idelta = tp->t_icount_delta; 626 ifreedelta = tp->t_ifree_delta; 627 } 628 629 /* apply the per-cpu counters */ 630 if (blkdelta) 631 xfs_add_fdblocks(mp, blkdelta); 632 633 if (idelta) 634 percpu_counter_add_batch(&mp->m_icount, idelta, 635 XFS_ICOUNT_BATCH); 636 637 if (ifreedelta) 638 percpu_counter_add(&mp->m_ifree, ifreedelta); 639 640 if (rtxdelta) 641 xfs_add_frextents(mp, rtxdelta); 642 643 if (!(tp->t_flags & XFS_TRANS_SB_DIRTY)) 644 return; 645 646 /* apply remaining deltas */ 647 spin_lock(&mp->m_sb_lock); 648 mp->m_sb.sb_fdblocks += tp->t_fdblocks_delta + tp->t_res_fdblocks_delta; 649 mp->m_sb.sb_icount += idelta; 650 mp->m_sb.sb_ifree += ifreedelta; 651 /* 652 * Do not touch sb_frextents here because it is handled in 653 * xfs_trans_apply_sb_deltas for file systems where it isn't a lazy 654 * counter anyway. 655 */ 656 mp->m_sb.sb_dblocks += tp->t_dblocks_delta; 657 mp->m_sb.sb_agcount += tp->t_agcount_delta; 658 mp->m_sb.sb_imax_pct += tp->t_imaxpct_delta; 659 if (tp->t_rextsize_delta) 660 xfs_mount_sb_set_rextsize(mp, &mp->m_sb, 661 mp->m_sb.sb_rextsize + tp->t_rextsize_delta); 662 mp->m_sb.sb_rbmblocks += tp->t_rbmblocks_delta; 663 mp->m_sb.sb_rblocks += tp->t_rblocks_delta; 664 mp->m_sb.sb_rextents += tp->t_rextents_delta; 665 mp->m_sb.sb_rextslog += tp->t_rextslog_delta; 666 mp->m_sb.sb_rgcount += tp->t_rgcount_delta; 667 spin_unlock(&mp->m_sb_lock); 668 669 /* 670 * Debug checks outside of the spinlock so they don't lock up the 671 * machine if they fail. 672 */ 673 ASSERT(mp->m_sb.sb_imax_pct >= 0); 674 ASSERT(mp->m_sb.sb_rextslog >= 0); 675 } 676 677 /* Add the given log item to the transaction's list of log items. */ 678 void 679 xfs_trans_add_item( 680 struct xfs_trans *tp, 681 struct xfs_log_item *lip) 682 { 683 ASSERT(lip->li_log == tp->t_mountp->m_log); 684 ASSERT(lip->li_ailp == tp->t_mountp->m_ail); 685 ASSERT(list_empty(&lip->li_trans)); 686 ASSERT(!test_bit(XFS_LI_DIRTY, &lip->li_flags)); 687 688 list_add_tail(&lip->li_trans, &tp->t_items); 689 trace_xfs_trans_add_item(tp, _RET_IP_); 690 } 691 692 /* 693 * Unlink the log item from the transaction. the log item is no longer 694 * considered dirty in this transaction, as the linked transaction has 695 * finished, either by abort or commit completion. 696 */ 697 void 698 xfs_trans_del_item( 699 struct xfs_log_item *lip) 700 { 701 clear_bit(XFS_LI_DIRTY, &lip->li_flags); 702 list_del_init(&lip->li_trans); 703 } 704 705 /* Detach and unlock all of the items in a transaction */ 706 static void 707 xfs_trans_free_items( 708 struct xfs_trans *tp, 709 bool abort) 710 { 711 struct xfs_log_item *lip, *next; 712 713 trace_xfs_trans_free_items(tp, _RET_IP_); 714 715 list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) { 716 xfs_trans_del_item(lip); 717 if (abort) { 718 trace_xfs_trans_free_abort(lip); 719 set_bit(XFS_LI_ABORTED, &lip->li_flags); 720 } 721 if (lip->li_ops->iop_release) 722 lip->li_ops->iop_release(lip); 723 } 724 } 725 726 /* 727 * Sort transaction items prior to running precommit operations. This will 728 * attempt to order the items such that they will always be locked in the same 729 * order. Items that have no sort function are moved to the end of the list 730 * and so are locked last. 731 * 732 * This may need refinement as different types of objects add sort functions. 733 * 734 * Function is more complex than it needs to be because we are comparing 64 bit 735 * values and the function only returns 32 bit values. 736 */ 737 static int 738 xfs_trans_precommit_sort( 739 void *unused_arg, 740 const struct list_head *a, 741 const struct list_head *b) 742 { 743 struct xfs_log_item *lia = container_of(a, 744 struct xfs_log_item, li_trans); 745 struct xfs_log_item *lib = container_of(b, 746 struct xfs_log_item, li_trans); 747 int64_t diff; 748 749 /* 750 * If both items are non-sortable, leave them alone. If only one is 751 * sortable, move the non-sortable item towards the end of the list. 752 */ 753 if (!lia->li_ops->iop_sort && !lib->li_ops->iop_sort) 754 return 0; 755 if (!lia->li_ops->iop_sort) 756 return 1; 757 if (!lib->li_ops->iop_sort) 758 return -1; 759 760 diff = lia->li_ops->iop_sort(lia) - lib->li_ops->iop_sort(lib); 761 if (diff < 0) 762 return -1; 763 if (diff > 0) 764 return 1; 765 return 0; 766 } 767 768 /* 769 * Run transaction precommit functions. 770 * 771 * If there is an error in any of the callouts, then stop immediately and 772 * trigger a shutdown to abort the transaction. There is no recovery possible 773 * from errors at this point as the transaction is dirty.... 774 */ 775 static int 776 xfs_trans_run_precommits( 777 struct xfs_trans *tp) 778 { 779 struct xfs_mount *mp = tp->t_mountp; 780 struct xfs_log_item *lip, *n; 781 int error = 0; 782 783 /* 784 * Sort the item list to avoid ABBA deadlocks with other transactions 785 * running precommit operations that lock multiple shared items such as 786 * inode cluster buffers. 787 */ 788 list_sort(NULL, &tp->t_items, xfs_trans_precommit_sort); 789 790 /* 791 * Precommit operations can remove the log item from the transaction 792 * if the log item exists purely to delay modifications until they 793 * can be ordered against other operations. Hence we have to use 794 * list_for_each_entry_safe() here. 795 */ 796 list_for_each_entry_safe(lip, n, &tp->t_items, li_trans) { 797 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) 798 continue; 799 if (lip->li_ops->iop_precommit) { 800 error = lip->li_ops->iop_precommit(tp, lip); 801 if (error) 802 break; 803 } 804 } 805 if (error) 806 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 807 return error; 808 } 809 810 /* 811 * Commit the given transaction to the log. 812 * 813 * XFS disk error handling mechanism is not based on a typical 814 * transaction abort mechanism. Logically after the filesystem 815 * gets marked 'SHUTDOWN', we can't let any new transactions 816 * be durable - ie. committed to disk - because some metadata might 817 * be inconsistent. In such cases, this returns an error, and the 818 * caller may assume that all locked objects joined to the transaction 819 * have already been unlocked as if the commit had succeeded. 820 * Do not reference the transaction structure after this call. 821 */ 822 static int 823 __xfs_trans_commit( 824 struct xfs_trans *tp, 825 bool regrant) 826 { 827 struct xfs_mount *mp = tp->t_mountp; 828 struct xlog *log = mp->m_log; 829 xfs_csn_t commit_seq = 0; 830 int error = 0; 831 int sync = tp->t_flags & XFS_TRANS_SYNC; 832 833 trace_xfs_trans_commit(tp, _RET_IP_); 834 835 /* 836 * Commit per-transaction changes that are not already tracked through 837 * log items. This can add dirty log items to the transaction. 838 */ 839 if (tp->t_flags & XFS_TRANS_SB_DIRTY) 840 xfs_trans_apply_sb_deltas(tp); 841 xfs_trans_apply_dquot_deltas(tp); 842 843 error = xfs_trans_run_precommits(tp); 844 if (error) 845 goto out_unreserve; 846 847 /* 848 * If there is nothing to be logged by the transaction, 849 * then unlock all of the items associated with the 850 * transaction and free the transaction structure. 851 * Also make sure to return any reserved blocks to 852 * the free pool. 853 */ 854 if (!(tp->t_flags & XFS_TRANS_DIRTY)) 855 goto out_unreserve; 856 857 /* 858 * We must check against log shutdown here because we cannot abort log 859 * items and leave them dirty, inconsistent and unpinned in memory while 860 * the log is active. This leaves them open to being written back to 861 * disk, and that will lead to on-disk corruption. 862 */ 863 if (xlog_is_shutdown(log)) { 864 error = -EIO; 865 goto out_unreserve; 866 } 867 868 ASSERT(tp->t_ticket != NULL); 869 870 xlog_cil_commit(log, tp, &commit_seq, regrant); 871 872 xfs_trans_free(tp); 873 874 /* 875 * If the transaction needs to be synchronous, then force the 876 * log out now and wait for it. 877 */ 878 if (sync) { 879 error = xfs_log_force_seq(mp, commit_seq, XFS_LOG_SYNC, NULL); 880 XFS_STATS_INC(mp, xs_trans_sync); 881 } else { 882 XFS_STATS_INC(mp, xs_trans_async); 883 } 884 885 return error; 886 887 out_unreserve: 888 xfs_trans_unreserve_and_mod_sb(tp); 889 890 /* 891 * It is indeed possible for the transaction to be not dirty but 892 * the dqinfo portion to be. All that means is that we have some 893 * (non-persistent) quota reservations that need to be unreserved. 894 */ 895 xfs_trans_unreserve_and_mod_dquots(tp, true); 896 if (tp->t_ticket) { 897 if (regrant && !xlog_is_shutdown(log)) 898 xfs_log_ticket_regrant(log, tp->t_ticket); 899 else 900 xfs_log_ticket_ungrant(log, tp->t_ticket); 901 tp->t_ticket = NULL; 902 } 903 xfs_trans_free_items(tp, !!error); 904 xfs_trans_free(tp); 905 906 XFS_STATS_INC(mp, xs_trans_empty); 907 return error; 908 } 909 910 int 911 xfs_trans_commit( 912 struct xfs_trans *tp) 913 { 914 /* 915 * Finish deferred items on final commit. Only permanent transactions 916 * should ever have deferred ops. 917 */ 918 WARN_ON_ONCE(!list_empty(&tp->t_dfops) && 919 !(tp->t_flags & XFS_TRANS_PERM_LOG_RES)); 920 if (tp->t_flags & XFS_TRANS_PERM_LOG_RES) { 921 int error = xfs_defer_finish_noroll(&tp); 922 if (error) { 923 xfs_trans_cancel(tp); 924 return error; 925 } 926 } 927 928 return __xfs_trans_commit(tp, false); 929 } 930 931 /* 932 * Unlock all of the transaction's items and free the transaction. If the 933 * transaction is dirty, we must shut down the filesystem because there is no 934 * way to restore them to their previous state. 935 * 936 * If the transaction has made a log reservation, make sure to release it as 937 * well. 938 * 939 * This is a high level function (equivalent to xfs_trans_commit()) and so can 940 * be called after the transaction has effectively been aborted due to the mount 941 * being shut down. However, if the mount has not been shut down and the 942 * transaction is dirty we will shut the mount down and, in doing so, that 943 * guarantees that the log is shut down, too. Hence we don't need to be as 944 * careful with shutdown state and dirty items here as we need to be in 945 * xfs_trans_commit(). 946 */ 947 void 948 xfs_trans_cancel( 949 struct xfs_trans *tp) 950 { 951 struct xfs_mount *mp = tp->t_mountp; 952 struct xlog *log = mp->m_log; 953 bool dirty = (tp->t_flags & XFS_TRANS_DIRTY); 954 955 trace_xfs_trans_cancel(tp, _RET_IP_); 956 957 /* 958 * It's never valid to cancel a transaction with deferred ops attached, 959 * because the transaction is effectively dirty. Complain about this 960 * loudly before freeing the in-memory defer items and shutting down the 961 * filesystem. 962 */ 963 if (!list_empty(&tp->t_dfops)) { 964 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 965 dirty = true; 966 xfs_defer_cancel(tp); 967 } 968 969 /* 970 * See if the caller is relying on us to shut down the filesystem. We 971 * only want an error report if there isn't already a shutdown in 972 * progress, so we only need to check against the mount shutdown state 973 * here. 974 */ 975 if (dirty && !xfs_is_shutdown(mp)) { 976 XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp); 977 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 978 } 979 #ifdef DEBUG 980 /* Log items need to be consistent until the log is shut down. */ 981 if (!dirty && !xlog_is_shutdown(log)) { 982 struct xfs_log_item *lip; 983 984 list_for_each_entry(lip, &tp->t_items, li_trans) 985 ASSERT(!xlog_item_is_intent_done(lip)); 986 } 987 #endif 988 xfs_trans_unreserve_and_mod_sb(tp); 989 xfs_trans_unreserve_and_mod_dquots(tp, false); 990 991 if (tp->t_ticket) { 992 xfs_log_ticket_ungrant(log, tp->t_ticket); 993 tp->t_ticket = NULL; 994 } 995 996 xfs_trans_free_items(tp, dirty); 997 xfs_trans_free(tp); 998 } 999 1000 /* 1001 * Roll from one trans in the sequence of PERMANENT transactions to the next: 1002 * permanent transactions are only flushed out when committed with 1003 * xfs_trans_commit(), but we still want as soon as possible to let chunks of it 1004 * go to the log. So we commit the chunk we've been working on and get a new 1005 * transaction to continue. 1006 */ 1007 int 1008 xfs_trans_roll( 1009 struct xfs_trans **tpp) 1010 { 1011 struct xfs_trans *tp = *tpp; 1012 unsigned int log_res = tp->t_log_res; 1013 unsigned int log_count = tp->t_log_count; 1014 int error; 1015 1016 trace_xfs_trans_roll(tp, _RET_IP_); 1017 1018 ASSERT(log_res > 0); 1019 1020 /* 1021 * Copy the critical parameters from one trans to the next. 1022 */ 1023 *tpp = xfs_trans_dup(tp); 1024 1025 /* 1026 * Commit the current transaction. 1027 * 1028 * If this commit failed, then it'd just unlock those items that are not 1029 * marked ihold. That also means that a filesystem shutdown is in 1030 * progress. The caller takes the responsibility to cancel the 1031 * duplicate transaction that gets returned. 1032 */ 1033 error = __xfs_trans_commit(tp, true); 1034 if (error) 1035 return error; 1036 1037 /* 1038 * Reserve space in the log for the next transaction. 1039 * 1040 * This also pushes items in the AIL out to disk if they are taking up 1041 * space at the tail of the log that we want to use. This requires that 1042 * either nothing be locked across this call, or that anything that is 1043 * locked be logged in the prior and the next transactions. 1044 */ 1045 tp = *tpp; 1046 error = xfs_log_regrant(tp->t_mountp, tp->t_ticket); 1047 if (error) 1048 return error; 1049 tp->t_log_res = log_res; 1050 tp->t_log_count = log_count; 1051 return 0; 1052 } 1053 1054 /* 1055 * Allocate an transaction, lock and join the inode to it, and reserve quota. 1056 * 1057 * The caller must ensure that the on-disk dquots attached to this inode have 1058 * already been allocated and initialized. The caller is responsible for 1059 * releasing ILOCK_EXCL if a new transaction is returned. 1060 */ 1061 int 1062 xfs_trans_alloc_inode( 1063 struct xfs_inode *ip, 1064 struct xfs_trans_res *resv, 1065 unsigned int dblocks, 1066 unsigned int rblocks, 1067 bool force, 1068 struct xfs_trans **tpp) 1069 { 1070 struct xfs_trans *tp; 1071 struct xfs_mount *mp = ip->i_mount; 1072 bool retried = false; 1073 int error; 1074 1075 retry: 1076 error = xfs_trans_alloc(mp, resv, dblocks, 1077 xfs_extlen_to_rtxlen(mp, rblocks), 1078 force ? XFS_TRANS_RESERVE : 0, &tp); 1079 if (error) 1080 return error; 1081 1082 xfs_ilock(ip, XFS_ILOCK_EXCL); 1083 xfs_trans_ijoin(tp, ip, 0); 1084 1085 error = xfs_qm_dqattach_locked(ip, false); 1086 if (error) { 1087 /* Caller should have allocated the dquots! */ 1088 ASSERT(error != -ENOENT); 1089 goto out_cancel; 1090 } 1091 1092 error = xfs_trans_reserve_quota_nblks(tp, ip, dblocks, rblocks, force); 1093 if ((error == -EDQUOT || error == -ENOSPC) && !retried) { 1094 xfs_trans_cancel(tp); 1095 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1096 xfs_blockgc_free_quota(ip, 0); 1097 retried = true; 1098 goto retry; 1099 } 1100 if (error) 1101 goto out_cancel; 1102 1103 *tpp = tp; 1104 return 0; 1105 1106 out_cancel: 1107 xfs_trans_cancel(tp); 1108 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1109 return error; 1110 } 1111 1112 /* 1113 * Try to reserve more blocks for a transaction. 1114 * 1115 * This is for callers that need to attach resources to a transaction, scan 1116 * those resources to determine the space reservation requirements, and then 1117 * modify the attached resources. In other words, online repair. This can 1118 * fail due to ENOSPC, so the caller must be able to cancel the transaction 1119 * without shutting down the fs. 1120 */ 1121 int 1122 xfs_trans_reserve_more( 1123 struct xfs_trans *tp, 1124 unsigned int blocks, 1125 unsigned int rtextents) 1126 { 1127 bool rsvd = tp->t_flags & XFS_TRANS_RESERVE; 1128 1129 if (blocks && xfs_dec_fdblocks(tp->t_mountp, blocks, rsvd)) 1130 return -ENOSPC; 1131 if (rtextents && xfs_dec_frextents(tp->t_mountp, rtextents)) { 1132 if (blocks) 1133 xfs_add_fdblocks(tp->t_mountp, blocks); 1134 return -ENOSPC; 1135 } 1136 tp->t_blk_res += blocks; 1137 tp->t_rtx_res += rtextents; 1138 return 0; 1139 } 1140 1141 /* 1142 * Try to reserve more blocks and file quota for a transaction. Same 1143 * conditions of usage as xfs_trans_reserve_more. 1144 */ 1145 int 1146 xfs_trans_reserve_more_inode( 1147 struct xfs_trans *tp, 1148 struct xfs_inode *ip, 1149 unsigned int dblocks, 1150 unsigned int rblocks, 1151 bool force_quota) 1152 { 1153 struct xfs_mount *mp = ip->i_mount; 1154 unsigned int rtx = xfs_extlen_to_rtxlen(mp, rblocks); 1155 int error; 1156 1157 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); 1158 1159 error = xfs_trans_reserve_more(tp, dblocks, rtx); 1160 if (error) 1161 return error; 1162 1163 if (!XFS_IS_QUOTA_ON(mp) || xfs_is_quota_inode(&mp->m_sb, ip->i_ino)) 1164 return 0; 1165 1166 if (tp->t_flags & XFS_TRANS_RESERVE) 1167 force_quota = true; 1168 1169 error = xfs_trans_reserve_quota_nblks(tp, ip, dblocks, rblocks, 1170 force_quota); 1171 if (!error) 1172 return 0; 1173 1174 /* Quota failed, give back the new reservation. */ 1175 xfs_add_fdblocks(mp, dblocks); 1176 tp->t_blk_res -= dblocks; 1177 xfs_add_frextents(mp, rtx); 1178 tp->t_rtx_res -= rtx; 1179 return error; 1180 } 1181 1182 /* 1183 * Allocate an transaction in preparation for inode creation by reserving quota 1184 * against the given dquots. Callers are not required to hold any inode locks. 1185 */ 1186 int 1187 xfs_trans_alloc_icreate( 1188 struct xfs_mount *mp, 1189 struct xfs_trans_res *resv, 1190 struct xfs_dquot *udqp, 1191 struct xfs_dquot *gdqp, 1192 struct xfs_dquot *pdqp, 1193 unsigned int dblocks, 1194 struct xfs_trans **tpp) 1195 { 1196 struct xfs_trans *tp; 1197 bool retried = false; 1198 int error; 1199 1200 retry: 1201 error = xfs_trans_alloc(mp, resv, dblocks, 0, 0, &tp); 1202 if (error) 1203 return error; 1204 1205 error = xfs_trans_reserve_quota_icreate(tp, udqp, gdqp, pdqp, dblocks); 1206 if ((error == -EDQUOT || error == -ENOSPC) && !retried) { 1207 xfs_trans_cancel(tp); 1208 xfs_blockgc_free_dquots(mp, udqp, gdqp, pdqp, 0); 1209 retried = true; 1210 goto retry; 1211 } 1212 if (error) { 1213 xfs_trans_cancel(tp); 1214 return error; 1215 } 1216 1217 *tpp = tp; 1218 return 0; 1219 } 1220 1221 /* 1222 * Allocate an transaction, lock and join the inode to it, and reserve quota 1223 * in preparation for inode attribute changes that include uid, gid, or prid 1224 * changes. 1225 * 1226 * The caller must ensure that the on-disk dquots attached to this inode have 1227 * already been allocated and initialized. The ILOCK will be dropped when the 1228 * transaction is committed or cancelled. 1229 */ 1230 int 1231 xfs_trans_alloc_ichange( 1232 struct xfs_inode *ip, 1233 struct xfs_dquot *new_udqp, 1234 struct xfs_dquot *new_gdqp, 1235 struct xfs_dquot *new_pdqp, 1236 bool force, 1237 struct xfs_trans **tpp) 1238 { 1239 struct xfs_trans *tp; 1240 struct xfs_mount *mp = ip->i_mount; 1241 struct xfs_dquot *udqp; 1242 struct xfs_dquot *gdqp; 1243 struct xfs_dquot *pdqp; 1244 bool retried = false; 1245 int error; 1246 1247 retry: 1248 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp); 1249 if (error) 1250 return error; 1251 1252 xfs_ilock(ip, XFS_ILOCK_EXCL); 1253 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 1254 1255 if (xfs_is_metadir_inode(ip)) 1256 goto out; 1257 1258 error = xfs_qm_dqattach_locked(ip, false); 1259 if (error) { 1260 /* Caller should have allocated the dquots! */ 1261 ASSERT(error != -ENOENT); 1262 goto out_cancel; 1263 } 1264 1265 /* 1266 * For each quota type, skip quota reservations if the inode's dquots 1267 * now match the ones that came from the caller, or the caller didn't 1268 * pass one in. The inode's dquots can change if we drop the ILOCK to 1269 * perform a blockgc scan, so we must preserve the caller's arguments. 1270 */ 1271 udqp = (new_udqp != ip->i_udquot) ? new_udqp : NULL; 1272 gdqp = (new_gdqp != ip->i_gdquot) ? new_gdqp : NULL; 1273 pdqp = (new_pdqp != ip->i_pdquot) ? new_pdqp : NULL; 1274 if (udqp || gdqp || pdqp) { 1275 xfs_filblks_t dblocks, rblocks; 1276 unsigned int qflags = XFS_QMOPT_RES_REGBLKS; 1277 bool isrt = XFS_IS_REALTIME_INODE(ip); 1278 1279 if (force) 1280 qflags |= XFS_QMOPT_FORCE_RES; 1281 1282 if (isrt) { 1283 error = xfs_iread_extents(tp, ip, XFS_DATA_FORK); 1284 if (error) 1285 goto out_cancel; 1286 } 1287 1288 xfs_inode_count_blocks(tp, ip, &dblocks, &rblocks); 1289 1290 if (isrt) 1291 rblocks += ip->i_delayed_blks; 1292 else 1293 dblocks += ip->i_delayed_blks; 1294 1295 /* 1296 * Reserve enough quota to handle blocks on disk and reserved 1297 * for a delayed allocation. We'll actually transfer the 1298 * delalloc reservation between dquots at chown time, even 1299 * though that part is only semi-transactional. 1300 */ 1301 error = xfs_trans_reserve_quota_bydquots(tp, mp, udqp, gdqp, 1302 pdqp, dblocks, 1, qflags); 1303 if ((error == -EDQUOT || error == -ENOSPC) && !retried) { 1304 xfs_trans_cancel(tp); 1305 xfs_blockgc_free_dquots(mp, udqp, gdqp, pdqp, 0); 1306 retried = true; 1307 goto retry; 1308 } 1309 if (error) 1310 goto out_cancel; 1311 1312 /* Do the same for realtime. */ 1313 qflags = XFS_QMOPT_RES_RTBLKS | (qflags & XFS_QMOPT_FORCE_RES); 1314 error = xfs_trans_reserve_quota_bydquots(tp, mp, udqp, gdqp, 1315 pdqp, rblocks, 0, qflags); 1316 if ((error == -EDQUOT || error == -ENOSPC) && !retried) { 1317 xfs_trans_cancel(tp); 1318 xfs_blockgc_free_dquots(mp, udqp, gdqp, pdqp, 0); 1319 retried = true; 1320 goto retry; 1321 } 1322 if (error) 1323 goto out_cancel; 1324 } 1325 1326 out: 1327 *tpp = tp; 1328 return 0; 1329 1330 out_cancel: 1331 xfs_trans_cancel(tp); 1332 return error; 1333 } 1334 1335 /* 1336 * Allocate an transaction, lock and join the directory and child inodes to it, 1337 * and reserve quota for a directory update. If there isn't sufficient space, 1338 * @dblocks will be set to zero for a reservationless directory update and 1339 * @nospace_error will be set to a negative errno describing the space 1340 * constraint we hit. 1341 * 1342 * The caller must ensure that the on-disk dquots attached to this inode have 1343 * already been allocated and initialized. The ILOCKs will be dropped when the 1344 * transaction is committed or cancelled. 1345 * 1346 * Caller is responsible for unlocking the inodes manually upon return 1347 */ 1348 int 1349 xfs_trans_alloc_dir( 1350 struct xfs_inode *dp, 1351 struct xfs_trans_res *resv, 1352 struct xfs_inode *ip, 1353 unsigned int *dblocks, 1354 struct xfs_trans **tpp, 1355 int *nospace_error) 1356 { 1357 struct xfs_trans *tp; 1358 struct xfs_mount *mp = ip->i_mount; 1359 unsigned int resblks; 1360 bool retried = false; 1361 int error; 1362 1363 retry: 1364 *nospace_error = 0; 1365 resblks = *dblocks; 1366 error = xfs_trans_alloc(mp, resv, resblks, 0, 0, &tp); 1367 if (error == -ENOSPC) { 1368 *nospace_error = error; 1369 resblks = 0; 1370 error = xfs_trans_alloc(mp, resv, resblks, 0, 0, &tp); 1371 } 1372 if (error) 1373 return error; 1374 1375 xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL); 1376 1377 xfs_trans_ijoin(tp, dp, 0); 1378 xfs_trans_ijoin(tp, ip, 0); 1379 1380 error = xfs_qm_dqattach_locked(dp, false); 1381 if (error) { 1382 /* Caller should have allocated the dquots! */ 1383 ASSERT(error != -ENOENT); 1384 goto out_cancel; 1385 } 1386 1387 error = xfs_qm_dqattach_locked(ip, false); 1388 if (error) { 1389 /* Caller should have allocated the dquots! */ 1390 ASSERT(error != -ENOENT); 1391 goto out_cancel; 1392 } 1393 1394 if (resblks == 0) 1395 goto done; 1396 1397 error = xfs_trans_reserve_quota_nblks(tp, dp, resblks, 0, false); 1398 if (error == -EDQUOT || error == -ENOSPC) { 1399 if (!retried) { 1400 xfs_trans_cancel(tp); 1401 xfs_iunlock(dp, XFS_ILOCK_EXCL); 1402 if (dp != ip) 1403 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1404 xfs_blockgc_free_quota(dp, 0); 1405 retried = true; 1406 goto retry; 1407 } 1408 1409 *nospace_error = error; 1410 resblks = 0; 1411 error = 0; 1412 } 1413 if (error) 1414 goto out_cancel; 1415 1416 done: 1417 *tpp = tp; 1418 *dblocks = resblks; 1419 return 0; 1420 1421 out_cancel: 1422 xfs_trans_cancel(tp); 1423 xfs_iunlock(dp, XFS_ILOCK_EXCL); 1424 if (dp != ip) 1425 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1426 return error; 1427 } 1428