1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. 4 * Copyright (C) 2010 Red Hat, Inc. 5 * All Rights Reserved. 6 */ 7 #include "xfs.h" 8 #include "xfs_fs.h" 9 #include "xfs_shared.h" 10 #include "xfs_format.h" 11 #include "xfs_log_format.h" 12 #include "xfs_trans_resv.h" 13 #include "xfs_mount.h" 14 #include "xfs_extent_busy.h" 15 #include "xfs_quota.h" 16 #include "xfs_trans.h" 17 #include "xfs_trans_priv.h" 18 #include "xfs_log.h" 19 #include "xfs_log_priv.h" 20 #include "xfs_trace.h" 21 #include "xfs_error.h" 22 #include "xfs_defer.h" 23 #include "xfs_inode.h" 24 #include "xfs_dquot_item.h" 25 #include "xfs_dquot.h" 26 #include "xfs_icache.h" 27 28 struct kmem_cache *xfs_trans_cache; 29 30 #if defined(CONFIG_TRACEPOINTS) 31 static void 32 xfs_trans_trace_reservations( 33 struct xfs_mount *mp) 34 { 35 struct xfs_trans_res *res; 36 struct xfs_trans_res *end_res; 37 int i; 38 39 res = (struct xfs_trans_res *)M_RES(mp); 40 end_res = (struct xfs_trans_res *)(M_RES(mp) + 1); 41 for (i = 0; res < end_res; i++, res++) 42 trace_xfs_trans_resv_calc(mp, i, res); 43 } 44 #else 45 # define xfs_trans_trace_reservations(mp) 46 #endif 47 48 /* 49 * Initialize the precomputed transaction reservation values 50 * in the mount structure. 51 */ 52 void 53 xfs_trans_init( 54 struct xfs_mount *mp) 55 { 56 xfs_trans_resv_calc(mp, M_RES(mp)); 57 xfs_trans_trace_reservations(mp); 58 } 59 60 /* 61 * Free the transaction structure. If there is more clean up 62 * to do when the structure is freed, add it here. 63 */ 64 STATIC void 65 xfs_trans_free( 66 struct xfs_trans *tp) 67 { 68 xfs_extent_busy_sort(&tp->t_busy); 69 xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false); 70 71 trace_xfs_trans_free(tp, _RET_IP_); 72 xfs_trans_clear_context(tp); 73 if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT)) 74 sb_end_intwrite(tp->t_mountp->m_super); 75 xfs_trans_free_dqinfo(tp); 76 kmem_cache_free(xfs_trans_cache, tp); 77 } 78 79 /* 80 * This is called to create a new transaction which will share the 81 * permanent log reservation of the given transaction. The remaining 82 * unused block and rt extent reservations are also inherited. This 83 * implies that the original transaction is no longer allowed to allocate 84 * blocks. Locks and log items, however, are no inherited. They must 85 * be added to the new transaction explicitly. 86 */ 87 STATIC struct xfs_trans * 88 xfs_trans_dup( 89 struct xfs_trans *tp) 90 { 91 struct xfs_trans *ntp; 92 93 trace_xfs_trans_dup(tp, _RET_IP_); 94 95 ntp = kmem_cache_zalloc(xfs_trans_cache, GFP_KERNEL | __GFP_NOFAIL); 96 97 /* 98 * Initialize the new transaction structure. 99 */ 100 ntp->t_magic = XFS_TRANS_HEADER_MAGIC; 101 ntp->t_mountp = tp->t_mountp; 102 INIT_LIST_HEAD(&ntp->t_items); 103 INIT_LIST_HEAD(&ntp->t_busy); 104 INIT_LIST_HEAD(&ntp->t_dfops); 105 ntp->t_highest_agno = NULLAGNUMBER; 106 107 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 108 ASSERT(tp->t_ticket != NULL); 109 110 ntp->t_flags = XFS_TRANS_PERM_LOG_RES | 111 (tp->t_flags & XFS_TRANS_RESERVE) | 112 (tp->t_flags & XFS_TRANS_NO_WRITECOUNT) | 113 (tp->t_flags & XFS_TRANS_RES_FDBLKS); 114 /* We gave our writer reference to the new transaction */ 115 tp->t_flags |= XFS_TRANS_NO_WRITECOUNT; 116 ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket); 117 118 ASSERT(tp->t_blk_res >= tp->t_blk_res_used); 119 ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used; 120 tp->t_blk_res = tp->t_blk_res_used; 121 122 ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used; 123 tp->t_rtx_res = tp->t_rtx_res_used; 124 125 xfs_trans_switch_context(tp, ntp); 126 127 /* move deferred ops over to the new tp */ 128 xfs_defer_move(ntp, tp); 129 130 xfs_trans_dup_dqinfo(tp, ntp); 131 return ntp; 132 } 133 134 /* 135 * This is called to reserve free disk blocks and log space for the 136 * given transaction. This must be done before allocating any resources 137 * within the transaction. 138 * 139 * This will return ENOSPC if there are not enough blocks available. 140 * It will sleep waiting for available log space. 141 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which 142 * is used by long running transactions. If any one of the reservations 143 * fails then they will all be backed out. 144 * 145 * This does not do quota reservations. That typically is done by the 146 * caller afterwards. 147 */ 148 static int 149 xfs_trans_reserve( 150 struct xfs_trans *tp, 151 struct xfs_trans_res *resp, 152 uint blocks, 153 uint rtextents) 154 { 155 struct xfs_mount *mp = tp->t_mountp; 156 int error = 0; 157 bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; 158 159 /* 160 * Attempt to reserve the needed disk blocks by decrementing 161 * the number needed from the number available. This will 162 * fail if the count would go below zero. 163 */ 164 if (blocks > 0) { 165 error = xfs_mod_fdblocks(mp, -((int64_t)blocks), rsvd); 166 if (error != 0) 167 return -ENOSPC; 168 tp->t_blk_res += blocks; 169 } 170 171 /* 172 * Reserve the log space needed for this transaction. 173 */ 174 if (resp->tr_logres > 0) { 175 bool permanent = false; 176 177 ASSERT(tp->t_log_res == 0 || 178 tp->t_log_res == resp->tr_logres); 179 ASSERT(tp->t_log_count == 0 || 180 tp->t_log_count == resp->tr_logcount); 181 182 if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) { 183 tp->t_flags |= XFS_TRANS_PERM_LOG_RES; 184 permanent = true; 185 } else { 186 ASSERT(tp->t_ticket == NULL); 187 ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES)); 188 } 189 190 if (tp->t_ticket != NULL) { 191 ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES); 192 error = xfs_log_regrant(mp, tp->t_ticket); 193 } else { 194 error = xfs_log_reserve(mp, resp->tr_logres, 195 resp->tr_logcount, 196 &tp->t_ticket, permanent); 197 } 198 199 if (error) 200 goto undo_blocks; 201 202 tp->t_log_res = resp->tr_logres; 203 tp->t_log_count = resp->tr_logcount; 204 } 205 206 /* 207 * Attempt to reserve the needed realtime extents by decrementing 208 * the number needed from the number available. This will 209 * fail if the count would go below zero. 210 */ 211 if (rtextents > 0) { 212 error = xfs_mod_frextents(mp, -((int64_t)rtextents)); 213 if (error) { 214 error = -ENOSPC; 215 goto undo_log; 216 } 217 tp->t_rtx_res += rtextents; 218 } 219 220 return 0; 221 222 /* 223 * Error cases jump to one of these labels to undo any 224 * reservations which have already been performed. 225 */ 226 undo_log: 227 if (resp->tr_logres > 0) { 228 xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket); 229 tp->t_ticket = NULL; 230 tp->t_log_res = 0; 231 tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES; 232 } 233 234 undo_blocks: 235 if (blocks > 0) { 236 xfs_mod_fdblocks(mp, (int64_t)blocks, rsvd); 237 tp->t_blk_res = 0; 238 } 239 return error; 240 } 241 242 int 243 xfs_trans_alloc( 244 struct xfs_mount *mp, 245 struct xfs_trans_res *resp, 246 uint blocks, 247 uint rtextents, 248 uint flags, 249 struct xfs_trans **tpp) 250 { 251 struct xfs_trans *tp; 252 bool want_retry = true; 253 int error; 254 255 /* 256 * Allocate the handle before we do our freeze accounting and setting up 257 * GFP_NOFS allocation context so that we avoid lockdep false positives 258 * by doing GFP_KERNEL allocations inside sb_start_intwrite(). 259 */ 260 retry: 261 tp = kmem_cache_zalloc(xfs_trans_cache, GFP_KERNEL | __GFP_NOFAIL); 262 if (!(flags & XFS_TRANS_NO_WRITECOUNT)) 263 sb_start_intwrite(mp->m_super); 264 xfs_trans_set_context(tp); 265 266 /* 267 * Zero-reservation ("empty") transactions can't modify anything, so 268 * they're allowed to run while we're frozen. 269 */ 270 WARN_ON(resp->tr_logres > 0 && 271 mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE); 272 ASSERT(!(flags & XFS_TRANS_RES_FDBLKS) || 273 xfs_has_lazysbcount(mp)); 274 275 tp->t_magic = XFS_TRANS_HEADER_MAGIC; 276 tp->t_flags = flags; 277 tp->t_mountp = mp; 278 INIT_LIST_HEAD(&tp->t_items); 279 INIT_LIST_HEAD(&tp->t_busy); 280 INIT_LIST_HEAD(&tp->t_dfops); 281 tp->t_highest_agno = NULLAGNUMBER; 282 283 error = xfs_trans_reserve(tp, resp, blocks, rtextents); 284 if (error == -ENOSPC && want_retry) { 285 xfs_trans_cancel(tp); 286 287 /* 288 * We weren't able to reserve enough space for the transaction. 289 * Flush the other speculative space allocations to free space. 290 * Do not perform a synchronous scan because callers can hold 291 * other locks. 292 */ 293 error = xfs_blockgc_flush_all(mp); 294 if (error) 295 return error; 296 want_retry = false; 297 goto retry; 298 } 299 if (error) { 300 xfs_trans_cancel(tp); 301 return error; 302 } 303 304 trace_xfs_trans_alloc(tp, _RET_IP_); 305 306 *tpp = tp; 307 return 0; 308 } 309 310 /* 311 * Create an empty transaction with no reservation. This is a defensive 312 * mechanism for routines that query metadata without actually modifying them -- 313 * if the metadata being queried is somehow cross-linked (think a btree block 314 * pointer that points higher in the tree), we risk deadlock. However, blocks 315 * grabbed as part of a transaction can be re-grabbed. The verifiers will 316 * notice the corrupt block and the operation will fail back to userspace 317 * without deadlocking. 318 * 319 * Note the zero-length reservation; this transaction MUST be cancelled without 320 * any dirty data. 321 * 322 * Callers should obtain freeze protection to avoid a conflict with fs freezing 323 * where we can be grabbing buffers at the same time that freeze is trying to 324 * drain the buffer LRU list. 325 */ 326 int 327 xfs_trans_alloc_empty( 328 struct xfs_mount *mp, 329 struct xfs_trans **tpp) 330 { 331 struct xfs_trans_res resv = {0}; 332 333 return xfs_trans_alloc(mp, &resv, 0, 0, XFS_TRANS_NO_WRITECOUNT, tpp); 334 } 335 336 /* 337 * Record the indicated change to the given field for application 338 * to the file system's superblock when the transaction commits. 339 * For now, just store the change in the transaction structure. 340 * 341 * Mark the transaction structure to indicate that the superblock 342 * needs to be updated before committing. 343 * 344 * Because we may not be keeping track of allocated/free inodes and 345 * used filesystem blocks in the superblock, we do not mark the 346 * superblock dirty in this transaction if we modify these fields. 347 * We still need to update the transaction deltas so that they get 348 * applied to the incore superblock, but we don't want them to 349 * cause the superblock to get locked and logged if these are the 350 * only fields in the superblock that the transaction modifies. 351 */ 352 void 353 xfs_trans_mod_sb( 354 xfs_trans_t *tp, 355 uint field, 356 int64_t delta) 357 { 358 uint32_t flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY); 359 xfs_mount_t *mp = tp->t_mountp; 360 361 switch (field) { 362 case XFS_TRANS_SB_ICOUNT: 363 tp->t_icount_delta += delta; 364 if (xfs_has_lazysbcount(mp)) 365 flags &= ~XFS_TRANS_SB_DIRTY; 366 break; 367 case XFS_TRANS_SB_IFREE: 368 tp->t_ifree_delta += delta; 369 if (xfs_has_lazysbcount(mp)) 370 flags &= ~XFS_TRANS_SB_DIRTY; 371 break; 372 case XFS_TRANS_SB_FDBLOCKS: 373 /* 374 * Track the number of blocks allocated in the transaction. 375 * Make sure it does not exceed the number reserved. If so, 376 * shutdown as this can lead to accounting inconsistency. 377 */ 378 if (delta < 0) { 379 tp->t_blk_res_used += (uint)-delta; 380 if (tp->t_blk_res_used > tp->t_blk_res) 381 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 382 } else if (delta > 0 && (tp->t_flags & XFS_TRANS_RES_FDBLKS)) { 383 int64_t blkres_delta; 384 385 /* 386 * Return freed blocks directly to the reservation 387 * instead of the global pool, being careful not to 388 * overflow the trans counter. This is used to preserve 389 * reservation across chains of transaction rolls that 390 * repeatedly free and allocate blocks. 391 */ 392 blkres_delta = min_t(int64_t, delta, 393 UINT_MAX - tp->t_blk_res); 394 tp->t_blk_res += blkres_delta; 395 delta -= blkres_delta; 396 } 397 tp->t_fdblocks_delta += delta; 398 if (xfs_has_lazysbcount(mp)) 399 flags &= ~XFS_TRANS_SB_DIRTY; 400 break; 401 case XFS_TRANS_SB_RES_FDBLOCKS: 402 /* 403 * The allocation has already been applied to the 404 * in-core superblock's counter. This should only 405 * be applied to the on-disk superblock. 406 */ 407 tp->t_res_fdblocks_delta += delta; 408 if (xfs_has_lazysbcount(mp)) 409 flags &= ~XFS_TRANS_SB_DIRTY; 410 break; 411 case XFS_TRANS_SB_FREXTENTS: 412 /* 413 * Track the number of blocks allocated in the 414 * transaction. Make sure it does not exceed the 415 * number reserved. 416 */ 417 if (delta < 0) { 418 tp->t_rtx_res_used += (uint)-delta; 419 ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res); 420 } 421 tp->t_frextents_delta += delta; 422 break; 423 case XFS_TRANS_SB_RES_FREXTENTS: 424 /* 425 * The allocation has already been applied to the 426 * in-core superblock's counter. This should only 427 * be applied to the on-disk superblock. 428 */ 429 ASSERT(delta < 0); 430 tp->t_res_frextents_delta += delta; 431 break; 432 case XFS_TRANS_SB_DBLOCKS: 433 tp->t_dblocks_delta += delta; 434 break; 435 case XFS_TRANS_SB_AGCOUNT: 436 ASSERT(delta > 0); 437 tp->t_agcount_delta += delta; 438 break; 439 case XFS_TRANS_SB_IMAXPCT: 440 tp->t_imaxpct_delta += delta; 441 break; 442 case XFS_TRANS_SB_REXTSIZE: 443 tp->t_rextsize_delta += delta; 444 break; 445 case XFS_TRANS_SB_RBMBLOCKS: 446 tp->t_rbmblocks_delta += delta; 447 break; 448 case XFS_TRANS_SB_RBLOCKS: 449 tp->t_rblocks_delta += delta; 450 break; 451 case XFS_TRANS_SB_REXTENTS: 452 tp->t_rextents_delta += delta; 453 break; 454 case XFS_TRANS_SB_REXTSLOG: 455 tp->t_rextslog_delta += delta; 456 break; 457 default: 458 ASSERT(0); 459 return; 460 } 461 462 tp->t_flags |= flags; 463 } 464 465 /* 466 * xfs_trans_apply_sb_deltas() is called from the commit code 467 * to bring the superblock buffer into the current transaction 468 * and modify it as requested by earlier calls to xfs_trans_mod_sb(). 469 * 470 * For now we just look at each field allowed to change and change 471 * it if necessary. 472 */ 473 STATIC void 474 xfs_trans_apply_sb_deltas( 475 xfs_trans_t *tp) 476 { 477 struct xfs_dsb *sbp; 478 struct xfs_buf *bp; 479 int whole = 0; 480 481 bp = xfs_trans_getsb(tp); 482 sbp = bp->b_addr; 483 484 /* 485 * Only update the superblock counters if we are logging them 486 */ 487 if (!xfs_has_lazysbcount((tp->t_mountp))) { 488 if (tp->t_icount_delta) 489 be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta); 490 if (tp->t_ifree_delta) 491 be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta); 492 if (tp->t_fdblocks_delta) 493 be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta); 494 if (tp->t_res_fdblocks_delta) 495 be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta); 496 } 497 498 /* 499 * Updating frextents requires careful handling because it does not 500 * behave like the lazysb counters because we cannot rely on log 501 * recovery in older kenels to recompute the value from the rtbitmap. 502 * This means that the ondisk frextents must be consistent with the 503 * rtbitmap. 504 * 505 * Therefore, log the frextents change to the ondisk superblock and 506 * update the incore superblock so that future calls to xfs_log_sb 507 * write the correct value ondisk. 508 * 509 * Don't touch m_frextents because it includes incore reservations, 510 * and those are handled by the unreserve function. 511 */ 512 if (tp->t_frextents_delta || tp->t_res_frextents_delta) { 513 struct xfs_mount *mp = tp->t_mountp; 514 int64_t rtxdelta; 515 516 rtxdelta = tp->t_frextents_delta + tp->t_res_frextents_delta; 517 518 spin_lock(&mp->m_sb_lock); 519 be64_add_cpu(&sbp->sb_frextents, rtxdelta); 520 mp->m_sb.sb_frextents += rtxdelta; 521 spin_unlock(&mp->m_sb_lock); 522 } 523 524 if (tp->t_dblocks_delta) { 525 be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta); 526 whole = 1; 527 } 528 if (tp->t_agcount_delta) { 529 be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta); 530 whole = 1; 531 } 532 if (tp->t_imaxpct_delta) { 533 sbp->sb_imax_pct += tp->t_imaxpct_delta; 534 whole = 1; 535 } 536 if (tp->t_rextsize_delta) { 537 be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta); 538 whole = 1; 539 } 540 if (tp->t_rbmblocks_delta) { 541 be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta); 542 whole = 1; 543 } 544 if (tp->t_rblocks_delta) { 545 be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta); 546 whole = 1; 547 } 548 if (tp->t_rextents_delta) { 549 be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta); 550 whole = 1; 551 } 552 if (tp->t_rextslog_delta) { 553 sbp->sb_rextslog += tp->t_rextslog_delta; 554 whole = 1; 555 } 556 557 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF); 558 if (whole) 559 /* 560 * Log the whole thing, the fields are noncontiguous. 561 */ 562 xfs_trans_log_buf(tp, bp, 0, sizeof(struct xfs_dsb) - 1); 563 else 564 /* 565 * Since all the modifiable fields are contiguous, we 566 * can get away with this. 567 */ 568 xfs_trans_log_buf(tp, bp, offsetof(struct xfs_dsb, sb_icount), 569 offsetof(struct xfs_dsb, sb_frextents) + 570 sizeof(sbp->sb_frextents) - 1); 571 } 572 573 /* 574 * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations and 575 * apply superblock counter changes to the in-core superblock. The 576 * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT 577 * applied to the in-core superblock. The idea is that that has already been 578 * done. 579 * 580 * If we are not logging superblock counters, then the inode allocated/free and 581 * used block counts are not updated in the on disk superblock. In this case, 582 * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we 583 * still need to update the incore superblock with the changes. 584 * 585 * Deltas for the inode count are +/-64, hence we use a large batch size of 128 586 * so we don't need to take the counter lock on every update. 587 */ 588 #define XFS_ICOUNT_BATCH 128 589 590 void 591 xfs_trans_unreserve_and_mod_sb( 592 struct xfs_trans *tp) 593 { 594 struct xfs_mount *mp = tp->t_mountp; 595 bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; 596 int64_t blkdelta = 0; 597 int64_t rtxdelta = 0; 598 int64_t idelta = 0; 599 int64_t ifreedelta = 0; 600 int error; 601 602 /* calculate deltas */ 603 if (tp->t_blk_res > 0) 604 blkdelta = tp->t_blk_res; 605 if ((tp->t_fdblocks_delta != 0) && 606 (xfs_has_lazysbcount(mp) || 607 (tp->t_flags & XFS_TRANS_SB_DIRTY))) 608 blkdelta += tp->t_fdblocks_delta; 609 610 if (tp->t_rtx_res > 0) 611 rtxdelta = tp->t_rtx_res; 612 if ((tp->t_frextents_delta != 0) && 613 (tp->t_flags & XFS_TRANS_SB_DIRTY)) 614 rtxdelta += tp->t_frextents_delta; 615 616 if (xfs_has_lazysbcount(mp) || 617 (tp->t_flags & XFS_TRANS_SB_DIRTY)) { 618 idelta = tp->t_icount_delta; 619 ifreedelta = tp->t_ifree_delta; 620 } 621 622 /* apply the per-cpu counters */ 623 if (blkdelta) { 624 error = xfs_mod_fdblocks(mp, blkdelta, rsvd); 625 ASSERT(!error); 626 } 627 628 if (idelta) 629 percpu_counter_add_batch(&mp->m_icount, idelta, 630 XFS_ICOUNT_BATCH); 631 632 if (ifreedelta) 633 percpu_counter_add(&mp->m_ifree, ifreedelta); 634 635 if (rtxdelta) { 636 error = xfs_mod_frextents(mp, rtxdelta); 637 ASSERT(!error); 638 } 639 640 if (!(tp->t_flags & XFS_TRANS_SB_DIRTY)) 641 return; 642 643 /* apply remaining deltas */ 644 spin_lock(&mp->m_sb_lock); 645 mp->m_sb.sb_fdblocks += tp->t_fdblocks_delta + tp->t_res_fdblocks_delta; 646 mp->m_sb.sb_icount += idelta; 647 mp->m_sb.sb_ifree += ifreedelta; 648 /* 649 * Do not touch sb_frextents here because we are dealing with incore 650 * reservation. sb_frextents is not part of the lazy sb counters so it 651 * must be consistent with the ondisk rtbitmap and must never include 652 * incore reservations. 653 */ 654 mp->m_sb.sb_dblocks += tp->t_dblocks_delta; 655 mp->m_sb.sb_agcount += tp->t_agcount_delta; 656 mp->m_sb.sb_imax_pct += tp->t_imaxpct_delta; 657 mp->m_sb.sb_rextsize += tp->t_rextsize_delta; 658 mp->m_sb.sb_rbmblocks += tp->t_rbmblocks_delta; 659 mp->m_sb.sb_rblocks += tp->t_rblocks_delta; 660 mp->m_sb.sb_rextents += tp->t_rextents_delta; 661 mp->m_sb.sb_rextslog += tp->t_rextslog_delta; 662 spin_unlock(&mp->m_sb_lock); 663 664 /* 665 * Debug checks outside of the spinlock so they don't lock up the 666 * machine if they fail. 667 */ 668 ASSERT(mp->m_sb.sb_imax_pct >= 0); 669 ASSERT(mp->m_sb.sb_rextslog >= 0); 670 return; 671 } 672 673 /* Add the given log item to the transaction's list of log items. */ 674 void 675 xfs_trans_add_item( 676 struct xfs_trans *tp, 677 struct xfs_log_item *lip) 678 { 679 ASSERT(lip->li_log == tp->t_mountp->m_log); 680 ASSERT(lip->li_ailp == tp->t_mountp->m_ail); 681 ASSERT(list_empty(&lip->li_trans)); 682 ASSERT(!test_bit(XFS_LI_DIRTY, &lip->li_flags)); 683 684 list_add_tail(&lip->li_trans, &tp->t_items); 685 trace_xfs_trans_add_item(tp, _RET_IP_); 686 } 687 688 /* 689 * Unlink the log item from the transaction. the log item is no longer 690 * considered dirty in this transaction, as the linked transaction has 691 * finished, either by abort or commit completion. 692 */ 693 void 694 xfs_trans_del_item( 695 struct xfs_log_item *lip) 696 { 697 clear_bit(XFS_LI_DIRTY, &lip->li_flags); 698 list_del_init(&lip->li_trans); 699 } 700 701 /* Detach and unlock all of the items in a transaction */ 702 static void 703 xfs_trans_free_items( 704 struct xfs_trans *tp, 705 bool abort) 706 { 707 struct xfs_log_item *lip, *next; 708 709 trace_xfs_trans_free_items(tp, _RET_IP_); 710 711 list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) { 712 xfs_trans_del_item(lip); 713 if (abort) 714 set_bit(XFS_LI_ABORTED, &lip->li_flags); 715 if (lip->li_ops->iop_release) 716 lip->li_ops->iop_release(lip); 717 } 718 } 719 720 static inline void 721 xfs_log_item_batch_insert( 722 struct xfs_ail *ailp, 723 struct xfs_ail_cursor *cur, 724 struct xfs_log_item **log_items, 725 int nr_items, 726 xfs_lsn_t commit_lsn) 727 { 728 int i; 729 730 spin_lock(&ailp->ail_lock); 731 /* xfs_trans_ail_update_bulk drops ailp->ail_lock */ 732 xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn); 733 734 for (i = 0; i < nr_items; i++) { 735 struct xfs_log_item *lip = log_items[i]; 736 737 if (lip->li_ops->iop_unpin) 738 lip->li_ops->iop_unpin(lip, 0); 739 } 740 } 741 742 /* 743 * Bulk operation version of xfs_trans_committed that takes a log vector of 744 * items to insert into the AIL. This uses bulk AIL insertion techniques to 745 * minimise lock traffic. 746 * 747 * If we are called with the aborted flag set, it is because a log write during 748 * a CIL checkpoint commit has failed. In this case, all the items in the 749 * checkpoint have already gone through iop_committed and iop_committing, which 750 * means that checkpoint commit abort handling is treated exactly the same 751 * as an iclog write error even though we haven't started any IO yet. Hence in 752 * this case all we need to do is iop_committed processing, followed by an 753 * iop_unpin(aborted) call. 754 * 755 * The AIL cursor is used to optimise the insert process. If commit_lsn is not 756 * at the end of the AIL, the insert cursor avoids the need to walk 757 * the AIL to find the insertion point on every xfs_log_item_batch_insert() 758 * call. This saves a lot of needless list walking and is a net win, even 759 * though it slightly increases that amount of AIL lock traffic to set it up 760 * and tear it down. 761 */ 762 void 763 xfs_trans_committed_bulk( 764 struct xfs_ail *ailp, 765 struct list_head *lv_chain, 766 xfs_lsn_t commit_lsn, 767 bool aborted) 768 { 769 #define LOG_ITEM_BATCH_SIZE 32 770 struct xfs_log_item *log_items[LOG_ITEM_BATCH_SIZE]; 771 struct xfs_log_vec *lv; 772 struct xfs_ail_cursor cur; 773 int i = 0; 774 775 spin_lock(&ailp->ail_lock); 776 xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn); 777 spin_unlock(&ailp->ail_lock); 778 779 /* unpin all the log items */ 780 list_for_each_entry(lv, lv_chain, lv_list) { 781 struct xfs_log_item *lip = lv->lv_item; 782 xfs_lsn_t item_lsn; 783 784 if (aborted) 785 set_bit(XFS_LI_ABORTED, &lip->li_flags); 786 787 if (lip->li_ops->flags & XFS_ITEM_RELEASE_WHEN_COMMITTED) { 788 lip->li_ops->iop_release(lip); 789 continue; 790 } 791 792 if (lip->li_ops->iop_committed) 793 item_lsn = lip->li_ops->iop_committed(lip, commit_lsn); 794 else 795 item_lsn = commit_lsn; 796 797 /* item_lsn of -1 means the item needs no further processing */ 798 if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0) 799 continue; 800 801 /* 802 * if we are aborting the operation, no point in inserting the 803 * object into the AIL as we are in a shutdown situation. 804 */ 805 if (aborted) { 806 ASSERT(xlog_is_shutdown(ailp->ail_log)); 807 if (lip->li_ops->iop_unpin) 808 lip->li_ops->iop_unpin(lip, 1); 809 continue; 810 } 811 812 if (item_lsn != commit_lsn) { 813 814 /* 815 * Not a bulk update option due to unusual item_lsn. 816 * Push into AIL immediately, rechecking the lsn once 817 * we have the ail lock. Then unpin the item. This does 818 * not affect the AIL cursor the bulk insert path is 819 * using. 820 */ 821 spin_lock(&ailp->ail_lock); 822 if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) 823 xfs_trans_ail_update(ailp, lip, item_lsn); 824 else 825 spin_unlock(&ailp->ail_lock); 826 if (lip->li_ops->iop_unpin) 827 lip->li_ops->iop_unpin(lip, 0); 828 continue; 829 } 830 831 /* Item is a candidate for bulk AIL insert. */ 832 log_items[i++] = lv->lv_item; 833 if (i >= LOG_ITEM_BATCH_SIZE) { 834 xfs_log_item_batch_insert(ailp, &cur, log_items, 835 LOG_ITEM_BATCH_SIZE, commit_lsn); 836 i = 0; 837 } 838 } 839 840 /* make sure we insert the remainder! */ 841 if (i) 842 xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn); 843 844 spin_lock(&ailp->ail_lock); 845 xfs_trans_ail_cursor_done(&cur); 846 spin_unlock(&ailp->ail_lock); 847 } 848 849 /* 850 * Sort transaction items prior to running precommit operations. This will 851 * attempt to order the items such that they will always be locked in the same 852 * order. Items that have no sort function are moved to the end of the list 853 * and so are locked last. 854 * 855 * This may need refinement as different types of objects add sort functions. 856 * 857 * Function is more complex than it needs to be because we are comparing 64 bit 858 * values and the function only returns 32 bit values. 859 */ 860 static int 861 xfs_trans_precommit_sort( 862 void *unused_arg, 863 const struct list_head *a, 864 const struct list_head *b) 865 { 866 struct xfs_log_item *lia = container_of(a, 867 struct xfs_log_item, li_trans); 868 struct xfs_log_item *lib = container_of(b, 869 struct xfs_log_item, li_trans); 870 int64_t diff; 871 872 /* 873 * If both items are non-sortable, leave them alone. If only one is 874 * sortable, move the non-sortable item towards the end of the list. 875 */ 876 if (!lia->li_ops->iop_sort && !lib->li_ops->iop_sort) 877 return 0; 878 if (!lia->li_ops->iop_sort) 879 return 1; 880 if (!lib->li_ops->iop_sort) 881 return -1; 882 883 diff = lia->li_ops->iop_sort(lia) - lib->li_ops->iop_sort(lib); 884 if (diff < 0) 885 return -1; 886 if (diff > 0) 887 return 1; 888 return 0; 889 } 890 891 /* 892 * Run transaction precommit functions. 893 * 894 * If there is an error in any of the callouts, then stop immediately and 895 * trigger a shutdown to abort the transaction. There is no recovery possible 896 * from errors at this point as the transaction is dirty.... 897 */ 898 static int 899 xfs_trans_run_precommits( 900 struct xfs_trans *tp) 901 { 902 struct xfs_mount *mp = tp->t_mountp; 903 struct xfs_log_item *lip, *n; 904 int error = 0; 905 906 /* 907 * Sort the item list to avoid ABBA deadlocks with other transactions 908 * running precommit operations that lock multiple shared items such as 909 * inode cluster buffers. 910 */ 911 list_sort(NULL, &tp->t_items, xfs_trans_precommit_sort); 912 913 /* 914 * Precommit operations can remove the log item from the transaction 915 * if the log item exists purely to delay modifications until they 916 * can be ordered against other operations. Hence we have to use 917 * list_for_each_entry_safe() here. 918 */ 919 list_for_each_entry_safe(lip, n, &tp->t_items, li_trans) { 920 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) 921 continue; 922 if (lip->li_ops->iop_precommit) { 923 error = lip->li_ops->iop_precommit(tp, lip); 924 if (error) 925 break; 926 } 927 } 928 if (error) 929 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 930 return error; 931 } 932 933 /* 934 * Commit the given transaction to the log. 935 * 936 * XFS disk error handling mechanism is not based on a typical 937 * transaction abort mechanism. Logically after the filesystem 938 * gets marked 'SHUTDOWN', we can't let any new transactions 939 * be durable - ie. committed to disk - because some metadata might 940 * be inconsistent. In such cases, this returns an error, and the 941 * caller may assume that all locked objects joined to the transaction 942 * have already been unlocked as if the commit had succeeded. 943 * Do not reference the transaction structure after this call. 944 */ 945 static int 946 __xfs_trans_commit( 947 struct xfs_trans *tp, 948 bool regrant) 949 { 950 struct xfs_mount *mp = tp->t_mountp; 951 struct xlog *log = mp->m_log; 952 xfs_csn_t commit_seq = 0; 953 int error = 0; 954 int sync = tp->t_flags & XFS_TRANS_SYNC; 955 956 trace_xfs_trans_commit(tp, _RET_IP_); 957 958 error = xfs_trans_run_precommits(tp); 959 if (error) { 960 if (tp->t_flags & XFS_TRANS_PERM_LOG_RES) 961 xfs_defer_cancel(tp); 962 goto out_unreserve; 963 } 964 965 /* 966 * Finish deferred items on final commit. Only permanent transactions 967 * should ever have deferred ops. 968 */ 969 WARN_ON_ONCE(!list_empty(&tp->t_dfops) && 970 !(tp->t_flags & XFS_TRANS_PERM_LOG_RES)); 971 if (!regrant && (tp->t_flags & XFS_TRANS_PERM_LOG_RES)) { 972 error = xfs_defer_finish_noroll(&tp); 973 if (error) 974 goto out_unreserve; 975 976 /* Run precommits from final tx in defer chain. */ 977 error = xfs_trans_run_precommits(tp); 978 if (error) 979 goto out_unreserve; 980 } 981 982 /* 983 * If there is nothing to be logged by the transaction, 984 * then unlock all of the items associated with the 985 * transaction and free the transaction structure. 986 * Also make sure to return any reserved blocks to 987 * the free pool. 988 */ 989 if (!(tp->t_flags & XFS_TRANS_DIRTY)) 990 goto out_unreserve; 991 992 /* 993 * We must check against log shutdown here because we cannot abort log 994 * items and leave them dirty, inconsistent and unpinned in memory while 995 * the log is active. This leaves them open to being written back to 996 * disk, and that will lead to on-disk corruption. 997 */ 998 if (xlog_is_shutdown(log)) { 999 error = -EIO; 1000 goto out_unreserve; 1001 } 1002 1003 ASSERT(tp->t_ticket != NULL); 1004 1005 /* 1006 * If we need to update the superblock, then do it now. 1007 */ 1008 if (tp->t_flags & XFS_TRANS_SB_DIRTY) 1009 xfs_trans_apply_sb_deltas(tp); 1010 xfs_trans_apply_dquot_deltas(tp); 1011 1012 xlog_cil_commit(log, tp, &commit_seq, regrant); 1013 1014 xfs_trans_free(tp); 1015 1016 /* 1017 * If the transaction needs to be synchronous, then force the 1018 * log out now and wait for it. 1019 */ 1020 if (sync) { 1021 error = xfs_log_force_seq(mp, commit_seq, XFS_LOG_SYNC, NULL); 1022 XFS_STATS_INC(mp, xs_trans_sync); 1023 } else { 1024 XFS_STATS_INC(mp, xs_trans_async); 1025 } 1026 1027 return error; 1028 1029 out_unreserve: 1030 xfs_trans_unreserve_and_mod_sb(tp); 1031 1032 /* 1033 * It is indeed possible for the transaction to be not dirty but 1034 * the dqinfo portion to be. All that means is that we have some 1035 * (non-persistent) quota reservations that need to be unreserved. 1036 */ 1037 xfs_trans_unreserve_and_mod_dquots(tp); 1038 if (tp->t_ticket) { 1039 if (regrant && !xlog_is_shutdown(log)) 1040 xfs_log_ticket_regrant(log, tp->t_ticket); 1041 else 1042 xfs_log_ticket_ungrant(log, tp->t_ticket); 1043 tp->t_ticket = NULL; 1044 } 1045 xfs_trans_free_items(tp, !!error); 1046 xfs_trans_free(tp); 1047 1048 XFS_STATS_INC(mp, xs_trans_empty); 1049 return error; 1050 } 1051 1052 int 1053 xfs_trans_commit( 1054 struct xfs_trans *tp) 1055 { 1056 return __xfs_trans_commit(tp, false); 1057 } 1058 1059 /* 1060 * Unlock all of the transaction's items and free the transaction. If the 1061 * transaction is dirty, we must shut down the filesystem because there is no 1062 * way to restore them to their previous state. 1063 * 1064 * If the transaction has made a log reservation, make sure to release it as 1065 * well. 1066 * 1067 * This is a high level function (equivalent to xfs_trans_commit()) and so can 1068 * be called after the transaction has effectively been aborted due to the mount 1069 * being shut down. However, if the mount has not been shut down and the 1070 * transaction is dirty we will shut the mount down and, in doing so, that 1071 * guarantees that the log is shut down, too. Hence we don't need to be as 1072 * careful with shutdown state and dirty items here as we need to be in 1073 * xfs_trans_commit(). 1074 */ 1075 void 1076 xfs_trans_cancel( 1077 struct xfs_trans *tp) 1078 { 1079 struct xfs_mount *mp = tp->t_mountp; 1080 struct xlog *log = mp->m_log; 1081 bool dirty = (tp->t_flags & XFS_TRANS_DIRTY); 1082 1083 trace_xfs_trans_cancel(tp, _RET_IP_); 1084 1085 /* 1086 * It's never valid to cancel a transaction with deferred ops attached, 1087 * because the transaction is effectively dirty. Complain about this 1088 * loudly before freeing the in-memory defer items and shutting down the 1089 * filesystem. 1090 */ 1091 if (!list_empty(&tp->t_dfops)) { 1092 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 1093 dirty = true; 1094 xfs_defer_cancel(tp); 1095 } 1096 1097 /* 1098 * See if the caller is relying on us to shut down the filesystem. We 1099 * only want an error report if there isn't already a shutdown in 1100 * progress, so we only need to check against the mount shutdown state 1101 * here. 1102 */ 1103 if (dirty && !xfs_is_shutdown(mp)) { 1104 XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp); 1105 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1106 } 1107 #ifdef DEBUG 1108 /* Log items need to be consistent until the log is shut down. */ 1109 if (!dirty && !xlog_is_shutdown(log)) { 1110 struct xfs_log_item *lip; 1111 1112 list_for_each_entry(lip, &tp->t_items, li_trans) 1113 ASSERT(!xlog_item_is_intent_done(lip)); 1114 } 1115 #endif 1116 xfs_trans_unreserve_and_mod_sb(tp); 1117 xfs_trans_unreserve_and_mod_dquots(tp); 1118 1119 if (tp->t_ticket) { 1120 xfs_log_ticket_ungrant(log, tp->t_ticket); 1121 tp->t_ticket = NULL; 1122 } 1123 1124 xfs_trans_free_items(tp, dirty); 1125 xfs_trans_free(tp); 1126 } 1127 1128 /* 1129 * Roll from one trans in the sequence of PERMANENT transactions to 1130 * the next: permanent transactions are only flushed out when 1131 * committed with xfs_trans_commit(), but we still want as soon 1132 * as possible to let chunks of it go to the log. So we commit the 1133 * chunk we've been working on and get a new transaction to continue. 1134 */ 1135 int 1136 xfs_trans_roll( 1137 struct xfs_trans **tpp) 1138 { 1139 struct xfs_trans *trans = *tpp; 1140 struct xfs_trans_res tres; 1141 int error; 1142 1143 trace_xfs_trans_roll(trans, _RET_IP_); 1144 1145 /* 1146 * Copy the critical parameters from one trans to the next. 1147 */ 1148 tres.tr_logres = trans->t_log_res; 1149 tres.tr_logcount = trans->t_log_count; 1150 1151 *tpp = xfs_trans_dup(trans); 1152 1153 /* 1154 * Commit the current transaction. 1155 * If this commit failed, then it'd just unlock those items that 1156 * are not marked ihold. That also means that a filesystem shutdown 1157 * is in progress. The caller takes the responsibility to cancel 1158 * the duplicate transaction that gets returned. 1159 */ 1160 error = __xfs_trans_commit(trans, true); 1161 if (error) 1162 return error; 1163 1164 /* 1165 * Reserve space in the log for the next transaction. 1166 * This also pushes items in the "AIL", the list of logged items, 1167 * out to disk if they are taking up space at the tail of the log 1168 * that we want to use. This requires that either nothing be locked 1169 * across this call, or that anything that is locked be logged in 1170 * the prior and the next transactions. 1171 */ 1172 tres.tr_logflags = XFS_TRANS_PERM_LOG_RES; 1173 return xfs_trans_reserve(*tpp, &tres, 0, 0); 1174 } 1175 1176 /* 1177 * Allocate an transaction, lock and join the inode to it, and reserve quota. 1178 * 1179 * The caller must ensure that the on-disk dquots attached to this inode have 1180 * already been allocated and initialized. The caller is responsible for 1181 * releasing ILOCK_EXCL if a new transaction is returned. 1182 */ 1183 int 1184 xfs_trans_alloc_inode( 1185 struct xfs_inode *ip, 1186 struct xfs_trans_res *resv, 1187 unsigned int dblocks, 1188 unsigned int rblocks, 1189 bool force, 1190 struct xfs_trans **tpp) 1191 { 1192 struct xfs_trans *tp; 1193 struct xfs_mount *mp = ip->i_mount; 1194 bool retried = false; 1195 int error; 1196 1197 retry: 1198 error = xfs_trans_alloc(mp, resv, dblocks, 1199 rblocks / mp->m_sb.sb_rextsize, 1200 force ? XFS_TRANS_RESERVE : 0, &tp); 1201 if (error) 1202 return error; 1203 1204 xfs_ilock(ip, XFS_ILOCK_EXCL); 1205 xfs_trans_ijoin(tp, ip, 0); 1206 1207 error = xfs_qm_dqattach_locked(ip, false); 1208 if (error) { 1209 /* Caller should have allocated the dquots! */ 1210 ASSERT(error != -ENOENT); 1211 goto out_cancel; 1212 } 1213 1214 error = xfs_trans_reserve_quota_nblks(tp, ip, dblocks, rblocks, force); 1215 if ((error == -EDQUOT || error == -ENOSPC) && !retried) { 1216 xfs_trans_cancel(tp); 1217 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1218 xfs_blockgc_free_quota(ip, 0); 1219 retried = true; 1220 goto retry; 1221 } 1222 if (error) 1223 goto out_cancel; 1224 1225 *tpp = tp; 1226 return 0; 1227 1228 out_cancel: 1229 xfs_trans_cancel(tp); 1230 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1231 return error; 1232 } 1233 1234 /* 1235 * Allocate an transaction in preparation for inode creation by reserving quota 1236 * against the given dquots. Callers are not required to hold any inode locks. 1237 */ 1238 int 1239 xfs_trans_alloc_icreate( 1240 struct xfs_mount *mp, 1241 struct xfs_trans_res *resv, 1242 struct xfs_dquot *udqp, 1243 struct xfs_dquot *gdqp, 1244 struct xfs_dquot *pdqp, 1245 unsigned int dblocks, 1246 struct xfs_trans **tpp) 1247 { 1248 struct xfs_trans *tp; 1249 bool retried = false; 1250 int error; 1251 1252 retry: 1253 error = xfs_trans_alloc(mp, resv, dblocks, 0, 0, &tp); 1254 if (error) 1255 return error; 1256 1257 error = xfs_trans_reserve_quota_icreate(tp, udqp, gdqp, pdqp, dblocks); 1258 if ((error == -EDQUOT || error == -ENOSPC) && !retried) { 1259 xfs_trans_cancel(tp); 1260 xfs_blockgc_free_dquots(mp, udqp, gdqp, pdqp, 0); 1261 retried = true; 1262 goto retry; 1263 } 1264 if (error) { 1265 xfs_trans_cancel(tp); 1266 return error; 1267 } 1268 1269 *tpp = tp; 1270 return 0; 1271 } 1272 1273 /* 1274 * Allocate an transaction, lock and join the inode to it, and reserve quota 1275 * in preparation for inode attribute changes that include uid, gid, or prid 1276 * changes. 1277 * 1278 * The caller must ensure that the on-disk dquots attached to this inode have 1279 * already been allocated and initialized. The ILOCK will be dropped when the 1280 * transaction is committed or cancelled. 1281 */ 1282 int 1283 xfs_trans_alloc_ichange( 1284 struct xfs_inode *ip, 1285 struct xfs_dquot *new_udqp, 1286 struct xfs_dquot *new_gdqp, 1287 struct xfs_dquot *new_pdqp, 1288 bool force, 1289 struct xfs_trans **tpp) 1290 { 1291 struct xfs_trans *tp; 1292 struct xfs_mount *mp = ip->i_mount; 1293 struct xfs_dquot *udqp; 1294 struct xfs_dquot *gdqp; 1295 struct xfs_dquot *pdqp; 1296 bool retried = false; 1297 int error; 1298 1299 retry: 1300 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp); 1301 if (error) 1302 return error; 1303 1304 xfs_ilock(ip, XFS_ILOCK_EXCL); 1305 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 1306 1307 error = xfs_qm_dqattach_locked(ip, false); 1308 if (error) { 1309 /* Caller should have allocated the dquots! */ 1310 ASSERT(error != -ENOENT); 1311 goto out_cancel; 1312 } 1313 1314 /* 1315 * For each quota type, skip quota reservations if the inode's dquots 1316 * now match the ones that came from the caller, or the caller didn't 1317 * pass one in. The inode's dquots can change if we drop the ILOCK to 1318 * perform a blockgc scan, so we must preserve the caller's arguments. 1319 */ 1320 udqp = (new_udqp != ip->i_udquot) ? new_udqp : NULL; 1321 gdqp = (new_gdqp != ip->i_gdquot) ? new_gdqp : NULL; 1322 pdqp = (new_pdqp != ip->i_pdquot) ? new_pdqp : NULL; 1323 if (udqp || gdqp || pdqp) { 1324 unsigned int qflags = XFS_QMOPT_RES_REGBLKS; 1325 1326 if (force) 1327 qflags |= XFS_QMOPT_FORCE_RES; 1328 1329 /* 1330 * Reserve enough quota to handle blocks on disk and reserved 1331 * for a delayed allocation. We'll actually transfer the 1332 * delalloc reservation between dquots at chown time, even 1333 * though that part is only semi-transactional. 1334 */ 1335 error = xfs_trans_reserve_quota_bydquots(tp, mp, udqp, gdqp, 1336 pdqp, ip->i_nblocks + ip->i_delayed_blks, 1337 1, qflags); 1338 if ((error == -EDQUOT || error == -ENOSPC) && !retried) { 1339 xfs_trans_cancel(tp); 1340 xfs_blockgc_free_dquots(mp, udqp, gdqp, pdqp, 0); 1341 retried = true; 1342 goto retry; 1343 } 1344 if (error) 1345 goto out_cancel; 1346 } 1347 1348 *tpp = tp; 1349 return 0; 1350 1351 out_cancel: 1352 xfs_trans_cancel(tp); 1353 return error; 1354 } 1355 1356 /* 1357 * Allocate an transaction, lock and join the directory and child inodes to it, 1358 * and reserve quota for a directory update. If there isn't sufficient space, 1359 * @dblocks will be set to zero for a reservationless directory update and 1360 * @nospace_error will be set to a negative errno describing the space 1361 * constraint we hit. 1362 * 1363 * The caller must ensure that the on-disk dquots attached to this inode have 1364 * already been allocated and initialized. The ILOCKs will be dropped when the 1365 * transaction is committed or cancelled. 1366 */ 1367 int 1368 xfs_trans_alloc_dir( 1369 struct xfs_inode *dp, 1370 struct xfs_trans_res *resv, 1371 struct xfs_inode *ip, 1372 unsigned int *dblocks, 1373 struct xfs_trans **tpp, 1374 int *nospace_error) 1375 { 1376 struct xfs_trans *tp; 1377 struct xfs_mount *mp = ip->i_mount; 1378 unsigned int resblks; 1379 bool retried = false; 1380 int error; 1381 1382 retry: 1383 *nospace_error = 0; 1384 resblks = *dblocks; 1385 error = xfs_trans_alloc(mp, resv, resblks, 0, 0, &tp); 1386 if (error == -ENOSPC) { 1387 *nospace_error = error; 1388 resblks = 0; 1389 error = xfs_trans_alloc(mp, resv, resblks, 0, 0, &tp); 1390 } 1391 if (error) 1392 return error; 1393 1394 xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL); 1395 1396 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); 1397 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 1398 1399 error = xfs_qm_dqattach_locked(dp, false); 1400 if (error) { 1401 /* Caller should have allocated the dquots! */ 1402 ASSERT(error != -ENOENT); 1403 goto out_cancel; 1404 } 1405 1406 error = xfs_qm_dqattach_locked(ip, false); 1407 if (error) { 1408 /* Caller should have allocated the dquots! */ 1409 ASSERT(error != -ENOENT); 1410 goto out_cancel; 1411 } 1412 1413 if (resblks == 0) 1414 goto done; 1415 1416 error = xfs_trans_reserve_quota_nblks(tp, dp, resblks, 0, false); 1417 if (error == -EDQUOT || error == -ENOSPC) { 1418 if (!retried) { 1419 xfs_trans_cancel(tp); 1420 xfs_blockgc_free_quota(dp, 0); 1421 retried = true; 1422 goto retry; 1423 } 1424 1425 *nospace_error = error; 1426 resblks = 0; 1427 error = 0; 1428 } 1429 if (error) 1430 goto out_cancel; 1431 1432 done: 1433 *tpp = tp; 1434 *dblocks = resblks; 1435 return 0; 1436 1437 out_cancel: 1438 xfs_trans_cancel(tp); 1439 return error; 1440 } 1441