1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. 4 * Copyright (C) 2010 Red Hat, Inc. 5 * All Rights Reserved. 6 */ 7 #include "xfs.h" 8 #include "xfs_fs.h" 9 #include "xfs_shared.h" 10 #include "xfs_format.h" 11 #include "xfs_log_format.h" 12 #include "xfs_log_priv.h" 13 #include "xfs_trans_resv.h" 14 #include "xfs_mount.h" 15 #include "xfs_extent_busy.h" 16 #include "xfs_quota.h" 17 #include "xfs_trans.h" 18 #include "xfs_trans_priv.h" 19 #include "xfs_log.h" 20 #include "xfs_trace.h" 21 #include "xfs_error.h" 22 #include "xfs_defer.h" 23 #include "xfs_inode.h" 24 #include "xfs_dquot_item.h" 25 #include "xfs_dquot.h" 26 #include "xfs_icache.h" 27 28 kmem_zone_t *xfs_trans_zone; 29 30 #if defined(CONFIG_TRACEPOINTS) 31 static void 32 xfs_trans_trace_reservations( 33 struct xfs_mount *mp) 34 { 35 struct xfs_trans_res resv; 36 struct xfs_trans_res *res; 37 struct xfs_trans_res *end_res; 38 int i; 39 40 res = (struct xfs_trans_res *)M_RES(mp); 41 end_res = (struct xfs_trans_res *)(M_RES(mp) + 1); 42 for (i = 0; res < end_res; i++, res++) 43 trace_xfs_trans_resv_calc(mp, i, res); 44 xfs_log_get_max_trans_res(mp, &resv); 45 trace_xfs_trans_resv_calc(mp, -1, &resv); 46 } 47 #else 48 # define xfs_trans_trace_reservations(mp) 49 #endif 50 51 /* 52 * Initialize the precomputed transaction reservation values 53 * in the mount structure. 54 */ 55 void 56 xfs_trans_init( 57 struct xfs_mount *mp) 58 { 59 xfs_trans_resv_calc(mp, M_RES(mp)); 60 xfs_trans_trace_reservations(mp); 61 } 62 63 /* 64 * Free the transaction structure. If there is more clean up 65 * to do when the structure is freed, add it here. 66 */ 67 STATIC void 68 xfs_trans_free( 69 struct xfs_trans *tp) 70 { 71 xfs_extent_busy_sort(&tp->t_busy); 72 xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false); 73 74 trace_xfs_trans_free(tp, _RET_IP_); 75 xfs_trans_clear_context(tp); 76 if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT)) 77 sb_end_intwrite(tp->t_mountp->m_super); 78 xfs_trans_free_dqinfo(tp); 79 kmem_cache_free(xfs_trans_zone, tp); 80 } 81 82 /* 83 * This is called to create a new transaction which will share the 84 * permanent log reservation of the given transaction. The remaining 85 * unused block and rt extent reservations are also inherited. This 86 * implies that the original transaction is no longer allowed to allocate 87 * blocks. Locks and log items, however, are no inherited. They must 88 * be added to the new transaction explicitly. 89 */ 90 STATIC struct xfs_trans * 91 xfs_trans_dup( 92 struct xfs_trans *tp) 93 { 94 struct xfs_trans *ntp; 95 96 trace_xfs_trans_dup(tp, _RET_IP_); 97 98 ntp = kmem_cache_zalloc(xfs_trans_zone, GFP_KERNEL | __GFP_NOFAIL); 99 100 /* 101 * Initialize the new transaction structure. 102 */ 103 ntp->t_magic = XFS_TRANS_HEADER_MAGIC; 104 ntp->t_mountp = tp->t_mountp; 105 INIT_LIST_HEAD(&ntp->t_items); 106 INIT_LIST_HEAD(&ntp->t_busy); 107 INIT_LIST_HEAD(&ntp->t_dfops); 108 ntp->t_firstblock = NULLFSBLOCK; 109 110 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 111 ASSERT(tp->t_ticket != NULL); 112 113 ntp->t_flags = XFS_TRANS_PERM_LOG_RES | 114 (tp->t_flags & XFS_TRANS_RESERVE) | 115 (tp->t_flags & XFS_TRANS_NO_WRITECOUNT) | 116 (tp->t_flags & XFS_TRANS_RES_FDBLKS); 117 /* We gave our writer reference to the new transaction */ 118 tp->t_flags |= XFS_TRANS_NO_WRITECOUNT; 119 ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket); 120 121 ASSERT(tp->t_blk_res >= tp->t_blk_res_used); 122 ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used; 123 tp->t_blk_res = tp->t_blk_res_used; 124 125 ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used; 126 tp->t_rtx_res = tp->t_rtx_res_used; 127 128 xfs_trans_switch_context(tp, ntp); 129 130 /* move deferred ops over to the new tp */ 131 xfs_defer_move(ntp, tp); 132 133 xfs_trans_dup_dqinfo(tp, ntp); 134 return ntp; 135 } 136 137 /* 138 * This is called to reserve free disk blocks and log space for the 139 * given transaction. This must be done before allocating any resources 140 * within the transaction. 141 * 142 * This will return ENOSPC if there are not enough blocks available. 143 * It will sleep waiting for available log space. 144 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which 145 * is used by long running transactions. If any one of the reservations 146 * fails then they will all be backed out. 147 * 148 * This does not do quota reservations. That typically is done by the 149 * caller afterwards. 150 */ 151 static int 152 xfs_trans_reserve( 153 struct xfs_trans *tp, 154 struct xfs_trans_res *resp, 155 uint blocks, 156 uint rtextents) 157 { 158 struct xfs_mount *mp = tp->t_mountp; 159 int error = 0; 160 bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; 161 162 /* 163 * Attempt to reserve the needed disk blocks by decrementing 164 * the number needed from the number available. This will 165 * fail if the count would go below zero. 166 */ 167 if (blocks > 0) { 168 error = xfs_mod_fdblocks(mp, -((int64_t)blocks), rsvd); 169 if (error != 0) 170 return -ENOSPC; 171 tp->t_blk_res += blocks; 172 } 173 174 /* 175 * Reserve the log space needed for this transaction. 176 */ 177 if (resp->tr_logres > 0) { 178 bool permanent = false; 179 180 ASSERT(tp->t_log_res == 0 || 181 tp->t_log_res == resp->tr_logres); 182 ASSERT(tp->t_log_count == 0 || 183 tp->t_log_count == resp->tr_logcount); 184 185 if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) { 186 tp->t_flags |= XFS_TRANS_PERM_LOG_RES; 187 permanent = true; 188 } else { 189 ASSERT(tp->t_ticket == NULL); 190 ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES)); 191 } 192 193 if (tp->t_ticket != NULL) { 194 ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES); 195 error = xfs_log_regrant(mp, tp->t_ticket); 196 } else { 197 error = xfs_log_reserve(mp, 198 resp->tr_logres, 199 resp->tr_logcount, 200 &tp->t_ticket, XFS_TRANSACTION, 201 permanent); 202 } 203 204 if (error) 205 goto undo_blocks; 206 207 tp->t_log_res = resp->tr_logres; 208 tp->t_log_count = resp->tr_logcount; 209 } 210 211 /* 212 * Attempt to reserve the needed realtime extents by decrementing 213 * the number needed from the number available. This will 214 * fail if the count would go below zero. 215 */ 216 if (rtextents > 0) { 217 error = xfs_mod_frextents(mp, -((int64_t)rtextents)); 218 if (error) { 219 error = -ENOSPC; 220 goto undo_log; 221 } 222 tp->t_rtx_res += rtextents; 223 } 224 225 return 0; 226 227 /* 228 * Error cases jump to one of these labels to undo any 229 * reservations which have already been performed. 230 */ 231 undo_log: 232 if (resp->tr_logres > 0) { 233 xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket); 234 tp->t_ticket = NULL; 235 tp->t_log_res = 0; 236 tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES; 237 } 238 239 undo_blocks: 240 if (blocks > 0) { 241 xfs_mod_fdblocks(mp, (int64_t)blocks, rsvd); 242 tp->t_blk_res = 0; 243 } 244 return error; 245 } 246 247 int 248 xfs_trans_alloc( 249 struct xfs_mount *mp, 250 struct xfs_trans_res *resp, 251 uint blocks, 252 uint rtextents, 253 uint flags, 254 struct xfs_trans **tpp) 255 { 256 struct xfs_trans *tp; 257 bool want_retry = true; 258 int error; 259 260 /* 261 * Allocate the handle before we do our freeze accounting and setting up 262 * GFP_NOFS allocation context so that we avoid lockdep false positives 263 * by doing GFP_KERNEL allocations inside sb_start_intwrite(). 264 */ 265 retry: 266 tp = kmem_cache_zalloc(xfs_trans_zone, GFP_KERNEL | __GFP_NOFAIL); 267 if (!(flags & XFS_TRANS_NO_WRITECOUNT)) 268 sb_start_intwrite(mp->m_super); 269 xfs_trans_set_context(tp); 270 271 /* 272 * Zero-reservation ("empty") transactions can't modify anything, so 273 * they're allowed to run while we're frozen. 274 */ 275 WARN_ON(resp->tr_logres > 0 && 276 mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE); 277 ASSERT(!(flags & XFS_TRANS_RES_FDBLKS) || 278 xfs_sb_version_haslazysbcount(&mp->m_sb)); 279 280 tp->t_magic = XFS_TRANS_HEADER_MAGIC; 281 tp->t_flags = flags; 282 tp->t_mountp = mp; 283 INIT_LIST_HEAD(&tp->t_items); 284 INIT_LIST_HEAD(&tp->t_busy); 285 INIT_LIST_HEAD(&tp->t_dfops); 286 tp->t_firstblock = NULLFSBLOCK; 287 288 error = xfs_trans_reserve(tp, resp, blocks, rtextents); 289 if (error == -ENOSPC && want_retry) { 290 xfs_trans_cancel(tp); 291 292 /* 293 * We weren't able to reserve enough space for the transaction. 294 * Flush the other speculative space allocations to free space. 295 * Do not perform a synchronous scan because callers can hold 296 * other locks. 297 */ 298 error = xfs_blockgc_free_space(mp, NULL); 299 if (error) 300 return error; 301 302 want_retry = false; 303 goto retry; 304 } 305 if (error) { 306 xfs_trans_cancel(tp); 307 return error; 308 } 309 310 trace_xfs_trans_alloc(tp, _RET_IP_); 311 312 *tpp = tp; 313 return 0; 314 } 315 316 /* 317 * Create an empty transaction with no reservation. This is a defensive 318 * mechanism for routines that query metadata without actually modifying them -- 319 * if the metadata being queried is somehow cross-linked (think a btree block 320 * pointer that points higher in the tree), we risk deadlock. However, blocks 321 * grabbed as part of a transaction can be re-grabbed. The verifiers will 322 * notice the corrupt block and the operation will fail back to userspace 323 * without deadlocking. 324 * 325 * Note the zero-length reservation; this transaction MUST be cancelled without 326 * any dirty data. 327 * 328 * Callers should obtain freeze protection to avoid a conflict with fs freezing 329 * where we can be grabbing buffers at the same time that freeze is trying to 330 * drain the buffer LRU list. 331 */ 332 int 333 xfs_trans_alloc_empty( 334 struct xfs_mount *mp, 335 struct xfs_trans **tpp) 336 { 337 struct xfs_trans_res resv = {0}; 338 339 return xfs_trans_alloc(mp, &resv, 0, 0, XFS_TRANS_NO_WRITECOUNT, tpp); 340 } 341 342 /* 343 * Record the indicated change to the given field for application 344 * to the file system's superblock when the transaction commits. 345 * For now, just store the change in the transaction structure. 346 * 347 * Mark the transaction structure to indicate that the superblock 348 * needs to be updated before committing. 349 * 350 * Because we may not be keeping track of allocated/free inodes and 351 * used filesystem blocks in the superblock, we do not mark the 352 * superblock dirty in this transaction if we modify these fields. 353 * We still need to update the transaction deltas so that they get 354 * applied to the incore superblock, but we don't want them to 355 * cause the superblock to get locked and logged if these are the 356 * only fields in the superblock that the transaction modifies. 357 */ 358 void 359 xfs_trans_mod_sb( 360 xfs_trans_t *tp, 361 uint field, 362 int64_t delta) 363 { 364 uint32_t flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY); 365 xfs_mount_t *mp = tp->t_mountp; 366 367 switch (field) { 368 case XFS_TRANS_SB_ICOUNT: 369 tp->t_icount_delta += delta; 370 if (xfs_sb_version_haslazysbcount(&mp->m_sb)) 371 flags &= ~XFS_TRANS_SB_DIRTY; 372 break; 373 case XFS_TRANS_SB_IFREE: 374 tp->t_ifree_delta += delta; 375 if (xfs_sb_version_haslazysbcount(&mp->m_sb)) 376 flags &= ~XFS_TRANS_SB_DIRTY; 377 break; 378 case XFS_TRANS_SB_FDBLOCKS: 379 /* 380 * Track the number of blocks allocated in the transaction. 381 * Make sure it does not exceed the number reserved. If so, 382 * shutdown as this can lead to accounting inconsistency. 383 */ 384 if (delta < 0) { 385 tp->t_blk_res_used += (uint)-delta; 386 if (tp->t_blk_res_used > tp->t_blk_res) 387 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 388 } else if (delta > 0 && (tp->t_flags & XFS_TRANS_RES_FDBLKS)) { 389 int64_t blkres_delta; 390 391 /* 392 * Return freed blocks directly to the reservation 393 * instead of the global pool, being careful not to 394 * overflow the trans counter. This is used to preserve 395 * reservation across chains of transaction rolls that 396 * repeatedly free and allocate blocks. 397 */ 398 blkres_delta = min_t(int64_t, delta, 399 UINT_MAX - tp->t_blk_res); 400 tp->t_blk_res += blkres_delta; 401 delta -= blkres_delta; 402 } 403 tp->t_fdblocks_delta += delta; 404 if (xfs_sb_version_haslazysbcount(&mp->m_sb)) 405 flags &= ~XFS_TRANS_SB_DIRTY; 406 break; 407 case XFS_TRANS_SB_RES_FDBLOCKS: 408 /* 409 * The allocation has already been applied to the 410 * in-core superblock's counter. This should only 411 * be applied to the on-disk superblock. 412 */ 413 tp->t_res_fdblocks_delta += delta; 414 if (xfs_sb_version_haslazysbcount(&mp->m_sb)) 415 flags &= ~XFS_TRANS_SB_DIRTY; 416 break; 417 case XFS_TRANS_SB_FREXTENTS: 418 /* 419 * Track the number of blocks allocated in the 420 * transaction. Make sure it does not exceed the 421 * number reserved. 422 */ 423 if (delta < 0) { 424 tp->t_rtx_res_used += (uint)-delta; 425 ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res); 426 } 427 tp->t_frextents_delta += delta; 428 break; 429 case XFS_TRANS_SB_RES_FREXTENTS: 430 /* 431 * The allocation has already been applied to the 432 * in-core superblock's counter. This should only 433 * be applied to the on-disk superblock. 434 */ 435 ASSERT(delta < 0); 436 tp->t_res_frextents_delta += delta; 437 break; 438 case XFS_TRANS_SB_DBLOCKS: 439 tp->t_dblocks_delta += delta; 440 break; 441 case XFS_TRANS_SB_AGCOUNT: 442 ASSERT(delta > 0); 443 tp->t_agcount_delta += delta; 444 break; 445 case XFS_TRANS_SB_IMAXPCT: 446 tp->t_imaxpct_delta += delta; 447 break; 448 case XFS_TRANS_SB_REXTSIZE: 449 tp->t_rextsize_delta += delta; 450 break; 451 case XFS_TRANS_SB_RBMBLOCKS: 452 tp->t_rbmblocks_delta += delta; 453 break; 454 case XFS_TRANS_SB_RBLOCKS: 455 tp->t_rblocks_delta += delta; 456 break; 457 case XFS_TRANS_SB_REXTENTS: 458 tp->t_rextents_delta += delta; 459 break; 460 case XFS_TRANS_SB_REXTSLOG: 461 tp->t_rextslog_delta += delta; 462 break; 463 default: 464 ASSERT(0); 465 return; 466 } 467 468 tp->t_flags |= flags; 469 } 470 471 /* 472 * xfs_trans_apply_sb_deltas() is called from the commit code 473 * to bring the superblock buffer into the current transaction 474 * and modify it as requested by earlier calls to xfs_trans_mod_sb(). 475 * 476 * For now we just look at each field allowed to change and change 477 * it if necessary. 478 */ 479 STATIC void 480 xfs_trans_apply_sb_deltas( 481 xfs_trans_t *tp) 482 { 483 xfs_dsb_t *sbp; 484 struct xfs_buf *bp; 485 int whole = 0; 486 487 bp = xfs_trans_getsb(tp); 488 sbp = bp->b_addr; 489 490 /* 491 * Check that superblock mods match the mods made to AGF counters. 492 */ 493 ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) == 494 (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta + 495 tp->t_ag_btree_delta)); 496 497 /* 498 * Only update the superblock counters if we are logging them 499 */ 500 if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) { 501 if (tp->t_icount_delta) 502 be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta); 503 if (tp->t_ifree_delta) 504 be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta); 505 if (tp->t_fdblocks_delta) 506 be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta); 507 if (tp->t_res_fdblocks_delta) 508 be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta); 509 } 510 511 if (tp->t_frextents_delta) 512 be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta); 513 if (tp->t_res_frextents_delta) 514 be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta); 515 516 if (tp->t_dblocks_delta) { 517 be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta); 518 whole = 1; 519 } 520 if (tp->t_agcount_delta) { 521 be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta); 522 whole = 1; 523 } 524 if (tp->t_imaxpct_delta) { 525 sbp->sb_imax_pct += tp->t_imaxpct_delta; 526 whole = 1; 527 } 528 if (tp->t_rextsize_delta) { 529 be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta); 530 whole = 1; 531 } 532 if (tp->t_rbmblocks_delta) { 533 be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta); 534 whole = 1; 535 } 536 if (tp->t_rblocks_delta) { 537 be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta); 538 whole = 1; 539 } 540 if (tp->t_rextents_delta) { 541 be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta); 542 whole = 1; 543 } 544 if (tp->t_rextslog_delta) { 545 sbp->sb_rextslog += tp->t_rextslog_delta; 546 whole = 1; 547 } 548 549 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF); 550 if (whole) 551 /* 552 * Log the whole thing, the fields are noncontiguous. 553 */ 554 xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_dsb_t) - 1); 555 else 556 /* 557 * Since all the modifiable fields are contiguous, we 558 * can get away with this. 559 */ 560 xfs_trans_log_buf(tp, bp, offsetof(xfs_dsb_t, sb_icount), 561 offsetof(xfs_dsb_t, sb_frextents) + 562 sizeof(sbp->sb_frextents) - 1); 563 } 564 565 /* 566 * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations and 567 * apply superblock counter changes to the in-core superblock. The 568 * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT 569 * applied to the in-core superblock. The idea is that that has already been 570 * done. 571 * 572 * If we are not logging superblock counters, then the inode allocated/free and 573 * used block counts are not updated in the on disk superblock. In this case, 574 * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we 575 * still need to update the incore superblock with the changes. 576 * 577 * Deltas for the inode count are +/-64, hence we use a large batch size of 128 578 * so we don't need to take the counter lock on every update. 579 */ 580 #define XFS_ICOUNT_BATCH 128 581 582 void 583 xfs_trans_unreserve_and_mod_sb( 584 struct xfs_trans *tp) 585 { 586 struct xfs_mount *mp = tp->t_mountp; 587 bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; 588 int64_t blkdelta = 0; 589 int64_t rtxdelta = 0; 590 int64_t idelta = 0; 591 int64_t ifreedelta = 0; 592 int error; 593 594 /* calculate deltas */ 595 if (tp->t_blk_res > 0) 596 blkdelta = tp->t_blk_res; 597 if ((tp->t_fdblocks_delta != 0) && 598 (xfs_sb_version_haslazysbcount(&mp->m_sb) || 599 (tp->t_flags & XFS_TRANS_SB_DIRTY))) 600 blkdelta += tp->t_fdblocks_delta; 601 602 if (tp->t_rtx_res > 0) 603 rtxdelta = tp->t_rtx_res; 604 if ((tp->t_frextents_delta != 0) && 605 (tp->t_flags & XFS_TRANS_SB_DIRTY)) 606 rtxdelta += tp->t_frextents_delta; 607 608 if (xfs_sb_version_haslazysbcount(&mp->m_sb) || 609 (tp->t_flags & XFS_TRANS_SB_DIRTY)) { 610 idelta = tp->t_icount_delta; 611 ifreedelta = tp->t_ifree_delta; 612 } 613 614 /* apply the per-cpu counters */ 615 if (blkdelta) { 616 error = xfs_mod_fdblocks(mp, blkdelta, rsvd); 617 ASSERT(!error); 618 } 619 620 if (idelta) 621 percpu_counter_add_batch(&mp->m_icount, idelta, 622 XFS_ICOUNT_BATCH); 623 624 if (ifreedelta) 625 percpu_counter_add(&mp->m_ifree, ifreedelta); 626 627 if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY)) 628 return; 629 630 /* apply remaining deltas */ 631 spin_lock(&mp->m_sb_lock); 632 mp->m_sb.sb_frextents += rtxdelta; 633 mp->m_sb.sb_dblocks += tp->t_dblocks_delta; 634 mp->m_sb.sb_agcount += tp->t_agcount_delta; 635 mp->m_sb.sb_imax_pct += tp->t_imaxpct_delta; 636 mp->m_sb.sb_rextsize += tp->t_rextsize_delta; 637 mp->m_sb.sb_rbmblocks += tp->t_rbmblocks_delta; 638 mp->m_sb.sb_rblocks += tp->t_rblocks_delta; 639 mp->m_sb.sb_rextents += tp->t_rextents_delta; 640 mp->m_sb.sb_rextslog += tp->t_rextslog_delta; 641 spin_unlock(&mp->m_sb_lock); 642 643 /* 644 * Debug checks outside of the spinlock so they don't lock up the 645 * machine if they fail. 646 */ 647 ASSERT(mp->m_sb.sb_imax_pct >= 0); 648 ASSERT(mp->m_sb.sb_rextslog >= 0); 649 return; 650 } 651 652 /* Add the given log item to the transaction's list of log items. */ 653 void 654 xfs_trans_add_item( 655 struct xfs_trans *tp, 656 struct xfs_log_item *lip) 657 { 658 ASSERT(lip->li_mountp == tp->t_mountp); 659 ASSERT(lip->li_ailp == tp->t_mountp->m_ail); 660 ASSERT(list_empty(&lip->li_trans)); 661 ASSERT(!test_bit(XFS_LI_DIRTY, &lip->li_flags)); 662 663 list_add_tail(&lip->li_trans, &tp->t_items); 664 trace_xfs_trans_add_item(tp, _RET_IP_); 665 } 666 667 /* 668 * Unlink the log item from the transaction. the log item is no longer 669 * considered dirty in this transaction, as the linked transaction has 670 * finished, either by abort or commit completion. 671 */ 672 void 673 xfs_trans_del_item( 674 struct xfs_log_item *lip) 675 { 676 clear_bit(XFS_LI_DIRTY, &lip->li_flags); 677 list_del_init(&lip->li_trans); 678 } 679 680 /* Detach and unlock all of the items in a transaction */ 681 static void 682 xfs_trans_free_items( 683 struct xfs_trans *tp, 684 bool abort) 685 { 686 struct xfs_log_item *lip, *next; 687 688 trace_xfs_trans_free_items(tp, _RET_IP_); 689 690 list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) { 691 xfs_trans_del_item(lip); 692 if (abort) 693 set_bit(XFS_LI_ABORTED, &lip->li_flags); 694 if (lip->li_ops->iop_release) 695 lip->li_ops->iop_release(lip); 696 } 697 } 698 699 static inline void 700 xfs_log_item_batch_insert( 701 struct xfs_ail *ailp, 702 struct xfs_ail_cursor *cur, 703 struct xfs_log_item **log_items, 704 int nr_items, 705 xfs_lsn_t commit_lsn) 706 { 707 int i; 708 709 spin_lock(&ailp->ail_lock); 710 /* xfs_trans_ail_update_bulk drops ailp->ail_lock */ 711 xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn); 712 713 for (i = 0; i < nr_items; i++) { 714 struct xfs_log_item *lip = log_items[i]; 715 716 if (lip->li_ops->iop_unpin) 717 lip->li_ops->iop_unpin(lip, 0); 718 } 719 } 720 721 /* 722 * Bulk operation version of xfs_trans_committed that takes a log vector of 723 * items to insert into the AIL. This uses bulk AIL insertion techniques to 724 * minimise lock traffic. 725 * 726 * If we are called with the aborted flag set, it is because a log write during 727 * a CIL checkpoint commit has failed. In this case, all the items in the 728 * checkpoint have already gone through iop_committed and iop_committing, which 729 * means that checkpoint commit abort handling is treated exactly the same 730 * as an iclog write error even though we haven't started any IO yet. Hence in 731 * this case all we need to do is iop_committed processing, followed by an 732 * iop_unpin(aborted) call. 733 * 734 * The AIL cursor is used to optimise the insert process. If commit_lsn is not 735 * at the end of the AIL, the insert cursor avoids the need to walk 736 * the AIL to find the insertion point on every xfs_log_item_batch_insert() 737 * call. This saves a lot of needless list walking and is a net win, even 738 * though it slightly increases that amount of AIL lock traffic to set it up 739 * and tear it down. 740 */ 741 void 742 xfs_trans_committed_bulk( 743 struct xfs_ail *ailp, 744 struct xfs_log_vec *log_vector, 745 xfs_lsn_t commit_lsn, 746 bool aborted) 747 { 748 #define LOG_ITEM_BATCH_SIZE 32 749 struct xfs_log_item *log_items[LOG_ITEM_BATCH_SIZE]; 750 struct xfs_log_vec *lv; 751 struct xfs_ail_cursor cur; 752 int i = 0; 753 754 spin_lock(&ailp->ail_lock); 755 xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn); 756 spin_unlock(&ailp->ail_lock); 757 758 /* unpin all the log items */ 759 for (lv = log_vector; lv; lv = lv->lv_next ) { 760 struct xfs_log_item *lip = lv->lv_item; 761 xfs_lsn_t item_lsn; 762 763 if (aborted) 764 set_bit(XFS_LI_ABORTED, &lip->li_flags); 765 766 if (lip->li_ops->flags & XFS_ITEM_RELEASE_WHEN_COMMITTED) { 767 lip->li_ops->iop_release(lip); 768 continue; 769 } 770 771 if (lip->li_ops->iop_committed) 772 item_lsn = lip->li_ops->iop_committed(lip, commit_lsn); 773 else 774 item_lsn = commit_lsn; 775 776 /* item_lsn of -1 means the item needs no further processing */ 777 if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0) 778 continue; 779 780 /* 781 * if we are aborting the operation, no point in inserting the 782 * object into the AIL as we are in a shutdown situation. 783 */ 784 if (aborted) { 785 ASSERT(XFS_FORCED_SHUTDOWN(ailp->ail_mount)); 786 if (lip->li_ops->iop_unpin) 787 lip->li_ops->iop_unpin(lip, 1); 788 continue; 789 } 790 791 if (item_lsn != commit_lsn) { 792 793 /* 794 * Not a bulk update option due to unusual item_lsn. 795 * Push into AIL immediately, rechecking the lsn once 796 * we have the ail lock. Then unpin the item. This does 797 * not affect the AIL cursor the bulk insert path is 798 * using. 799 */ 800 spin_lock(&ailp->ail_lock); 801 if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) 802 xfs_trans_ail_update(ailp, lip, item_lsn); 803 else 804 spin_unlock(&ailp->ail_lock); 805 if (lip->li_ops->iop_unpin) 806 lip->li_ops->iop_unpin(lip, 0); 807 continue; 808 } 809 810 /* Item is a candidate for bulk AIL insert. */ 811 log_items[i++] = lv->lv_item; 812 if (i >= LOG_ITEM_BATCH_SIZE) { 813 xfs_log_item_batch_insert(ailp, &cur, log_items, 814 LOG_ITEM_BATCH_SIZE, commit_lsn); 815 i = 0; 816 } 817 } 818 819 /* make sure we insert the remainder! */ 820 if (i) 821 xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn); 822 823 spin_lock(&ailp->ail_lock); 824 xfs_trans_ail_cursor_done(&cur); 825 spin_unlock(&ailp->ail_lock); 826 } 827 828 /* 829 * Commit the given transaction to the log. 830 * 831 * XFS disk error handling mechanism is not based on a typical 832 * transaction abort mechanism. Logically after the filesystem 833 * gets marked 'SHUTDOWN', we can't let any new transactions 834 * be durable - ie. committed to disk - because some metadata might 835 * be inconsistent. In such cases, this returns an error, and the 836 * caller may assume that all locked objects joined to the transaction 837 * have already been unlocked as if the commit had succeeded. 838 * Do not reference the transaction structure after this call. 839 */ 840 static int 841 __xfs_trans_commit( 842 struct xfs_trans *tp, 843 bool regrant) 844 { 845 struct xfs_mount *mp = tp->t_mountp; 846 xfs_lsn_t commit_lsn = -1; 847 int error = 0; 848 int sync = tp->t_flags & XFS_TRANS_SYNC; 849 850 trace_xfs_trans_commit(tp, _RET_IP_); 851 852 /* 853 * Finish deferred items on final commit. Only permanent transactions 854 * should ever have deferred ops. 855 */ 856 WARN_ON_ONCE(!list_empty(&tp->t_dfops) && 857 !(tp->t_flags & XFS_TRANS_PERM_LOG_RES)); 858 if (!regrant && (tp->t_flags & XFS_TRANS_PERM_LOG_RES)) { 859 error = xfs_defer_finish_noroll(&tp); 860 if (error) 861 goto out_unreserve; 862 } 863 864 /* 865 * If there is nothing to be logged by the transaction, 866 * then unlock all of the items associated with the 867 * transaction and free the transaction structure. 868 * Also make sure to return any reserved blocks to 869 * the free pool. 870 */ 871 if (!(tp->t_flags & XFS_TRANS_DIRTY)) 872 goto out_unreserve; 873 874 if (XFS_FORCED_SHUTDOWN(mp)) { 875 error = -EIO; 876 goto out_unreserve; 877 } 878 879 ASSERT(tp->t_ticket != NULL); 880 881 /* 882 * If we need to update the superblock, then do it now. 883 */ 884 if (tp->t_flags & XFS_TRANS_SB_DIRTY) 885 xfs_trans_apply_sb_deltas(tp); 886 xfs_trans_apply_dquot_deltas(tp); 887 888 xfs_log_commit_cil(mp, tp, &commit_lsn, regrant); 889 890 xfs_trans_free(tp); 891 892 /* 893 * If the transaction needs to be synchronous, then force the 894 * log out now and wait for it. 895 */ 896 if (sync) { 897 error = xfs_log_force_lsn(mp, commit_lsn, XFS_LOG_SYNC, NULL); 898 XFS_STATS_INC(mp, xs_trans_sync); 899 } else { 900 XFS_STATS_INC(mp, xs_trans_async); 901 } 902 903 return error; 904 905 out_unreserve: 906 xfs_trans_unreserve_and_mod_sb(tp); 907 908 /* 909 * It is indeed possible for the transaction to be not dirty but 910 * the dqinfo portion to be. All that means is that we have some 911 * (non-persistent) quota reservations that need to be unreserved. 912 */ 913 xfs_trans_unreserve_and_mod_dquots(tp); 914 if (tp->t_ticket) { 915 if (regrant && !XLOG_FORCED_SHUTDOWN(mp->m_log)) 916 xfs_log_ticket_regrant(mp->m_log, tp->t_ticket); 917 else 918 xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket); 919 tp->t_ticket = NULL; 920 } 921 xfs_trans_free_items(tp, !!error); 922 xfs_trans_free(tp); 923 924 XFS_STATS_INC(mp, xs_trans_empty); 925 return error; 926 } 927 928 int 929 xfs_trans_commit( 930 struct xfs_trans *tp) 931 { 932 return __xfs_trans_commit(tp, false); 933 } 934 935 /* 936 * Unlock all of the transaction's items and free the transaction. 937 * The transaction must not have modified any of its items, because 938 * there is no way to restore them to their previous state. 939 * 940 * If the transaction has made a log reservation, make sure to release 941 * it as well. 942 */ 943 void 944 xfs_trans_cancel( 945 struct xfs_trans *tp) 946 { 947 struct xfs_mount *mp = tp->t_mountp; 948 bool dirty = (tp->t_flags & XFS_TRANS_DIRTY); 949 950 trace_xfs_trans_cancel(tp, _RET_IP_); 951 952 if (tp->t_flags & XFS_TRANS_PERM_LOG_RES) 953 xfs_defer_cancel(tp); 954 955 /* 956 * See if the caller is relying on us to shut down the 957 * filesystem. This happens in paths where we detect 958 * corruption and decide to give up. 959 */ 960 if (dirty && !XFS_FORCED_SHUTDOWN(mp)) { 961 XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp); 962 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 963 } 964 #ifdef DEBUG 965 if (!dirty && !XFS_FORCED_SHUTDOWN(mp)) { 966 struct xfs_log_item *lip; 967 968 list_for_each_entry(lip, &tp->t_items, li_trans) 969 ASSERT(!xlog_item_is_intent_done(lip)); 970 } 971 #endif 972 xfs_trans_unreserve_and_mod_sb(tp); 973 xfs_trans_unreserve_and_mod_dquots(tp); 974 975 if (tp->t_ticket) { 976 xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket); 977 tp->t_ticket = NULL; 978 } 979 980 xfs_trans_free_items(tp, dirty); 981 xfs_trans_free(tp); 982 } 983 984 /* 985 * Roll from one trans in the sequence of PERMANENT transactions to 986 * the next: permanent transactions are only flushed out when 987 * committed with xfs_trans_commit(), but we still want as soon 988 * as possible to let chunks of it go to the log. So we commit the 989 * chunk we've been working on and get a new transaction to continue. 990 */ 991 int 992 xfs_trans_roll( 993 struct xfs_trans **tpp) 994 { 995 struct xfs_trans *trans = *tpp; 996 struct xfs_trans_res tres; 997 int error; 998 999 trace_xfs_trans_roll(trans, _RET_IP_); 1000 1001 /* 1002 * Copy the critical parameters from one trans to the next. 1003 */ 1004 tres.tr_logres = trans->t_log_res; 1005 tres.tr_logcount = trans->t_log_count; 1006 1007 *tpp = xfs_trans_dup(trans); 1008 1009 /* 1010 * Commit the current transaction. 1011 * If this commit failed, then it'd just unlock those items that 1012 * are not marked ihold. That also means that a filesystem shutdown 1013 * is in progress. The caller takes the responsibility to cancel 1014 * the duplicate transaction that gets returned. 1015 */ 1016 error = __xfs_trans_commit(trans, true); 1017 if (error) 1018 return error; 1019 1020 /* 1021 * Reserve space in the log for the next transaction. 1022 * This also pushes items in the "AIL", the list of logged items, 1023 * out to disk if they are taking up space at the tail of the log 1024 * that we want to use. This requires that either nothing be locked 1025 * across this call, or that anything that is locked be logged in 1026 * the prior and the next transactions. 1027 */ 1028 tres.tr_logflags = XFS_TRANS_PERM_LOG_RES; 1029 return xfs_trans_reserve(*tpp, &tres, 0, 0); 1030 } 1031 1032 /* 1033 * Allocate an transaction, lock and join the inode to it, and reserve quota. 1034 * 1035 * The caller must ensure that the on-disk dquots attached to this inode have 1036 * already been allocated and initialized. The caller is responsible for 1037 * releasing ILOCK_EXCL if a new transaction is returned. 1038 */ 1039 int 1040 xfs_trans_alloc_inode( 1041 struct xfs_inode *ip, 1042 struct xfs_trans_res *resv, 1043 unsigned int dblocks, 1044 unsigned int rblocks, 1045 bool force, 1046 struct xfs_trans **tpp) 1047 { 1048 struct xfs_trans *tp; 1049 struct xfs_mount *mp = ip->i_mount; 1050 bool retried = false; 1051 int error; 1052 1053 retry: 1054 error = xfs_trans_alloc(mp, resv, dblocks, 1055 rblocks / mp->m_sb.sb_rextsize, 1056 force ? XFS_TRANS_RESERVE : 0, &tp); 1057 if (error) 1058 return error; 1059 1060 xfs_ilock(ip, XFS_ILOCK_EXCL); 1061 xfs_trans_ijoin(tp, ip, 0); 1062 1063 error = xfs_qm_dqattach_locked(ip, false); 1064 if (error) { 1065 /* Caller should have allocated the dquots! */ 1066 ASSERT(error != -ENOENT); 1067 goto out_cancel; 1068 } 1069 1070 error = xfs_trans_reserve_quota_nblks(tp, ip, dblocks, rblocks, force); 1071 if ((error == -EDQUOT || error == -ENOSPC) && !retried) { 1072 xfs_trans_cancel(tp); 1073 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1074 xfs_blockgc_free_quota(ip, 0); 1075 retried = true; 1076 goto retry; 1077 } 1078 if (error) 1079 goto out_cancel; 1080 1081 *tpp = tp; 1082 return 0; 1083 1084 out_cancel: 1085 xfs_trans_cancel(tp); 1086 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1087 return error; 1088 } 1089 1090 /* 1091 * Allocate an transaction in preparation for inode creation by reserving quota 1092 * against the given dquots. Callers are not required to hold any inode locks. 1093 */ 1094 int 1095 xfs_trans_alloc_icreate( 1096 struct xfs_mount *mp, 1097 struct xfs_trans_res *resv, 1098 struct xfs_dquot *udqp, 1099 struct xfs_dquot *gdqp, 1100 struct xfs_dquot *pdqp, 1101 unsigned int dblocks, 1102 struct xfs_trans **tpp) 1103 { 1104 struct xfs_trans *tp; 1105 bool retried = false; 1106 int error; 1107 1108 retry: 1109 error = xfs_trans_alloc(mp, resv, dblocks, 0, 0, &tp); 1110 if (error) 1111 return error; 1112 1113 error = xfs_trans_reserve_quota_icreate(tp, udqp, gdqp, pdqp, dblocks); 1114 if ((error == -EDQUOT || error == -ENOSPC) && !retried) { 1115 xfs_trans_cancel(tp); 1116 xfs_blockgc_free_dquots(mp, udqp, gdqp, pdqp, 0); 1117 retried = true; 1118 goto retry; 1119 } 1120 if (error) { 1121 xfs_trans_cancel(tp); 1122 return error; 1123 } 1124 1125 *tpp = tp; 1126 return 0; 1127 } 1128 1129 /* 1130 * Allocate an transaction, lock and join the inode to it, and reserve quota 1131 * in preparation for inode attribute changes that include uid, gid, or prid 1132 * changes. 1133 * 1134 * The caller must ensure that the on-disk dquots attached to this inode have 1135 * already been allocated and initialized. The ILOCK will be dropped when the 1136 * transaction is committed or cancelled. 1137 */ 1138 int 1139 xfs_trans_alloc_ichange( 1140 struct xfs_inode *ip, 1141 struct xfs_dquot *new_udqp, 1142 struct xfs_dquot *new_gdqp, 1143 struct xfs_dquot *new_pdqp, 1144 bool force, 1145 struct xfs_trans **tpp) 1146 { 1147 struct xfs_trans *tp; 1148 struct xfs_mount *mp = ip->i_mount; 1149 struct xfs_dquot *udqp; 1150 struct xfs_dquot *gdqp; 1151 struct xfs_dquot *pdqp; 1152 bool retried = false; 1153 int error; 1154 1155 retry: 1156 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp); 1157 if (error) 1158 return error; 1159 1160 xfs_ilock(ip, XFS_ILOCK_EXCL); 1161 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 1162 1163 error = xfs_qm_dqattach_locked(ip, false); 1164 if (error) { 1165 /* Caller should have allocated the dquots! */ 1166 ASSERT(error != -ENOENT); 1167 goto out_cancel; 1168 } 1169 1170 /* 1171 * For each quota type, skip quota reservations if the inode's dquots 1172 * now match the ones that came from the caller, or the caller didn't 1173 * pass one in. The inode's dquots can change if we drop the ILOCK to 1174 * perform a blockgc scan, so we must preserve the caller's arguments. 1175 */ 1176 udqp = (new_udqp != ip->i_udquot) ? new_udqp : NULL; 1177 gdqp = (new_gdqp != ip->i_gdquot) ? new_gdqp : NULL; 1178 pdqp = (new_pdqp != ip->i_pdquot) ? new_pdqp : NULL; 1179 if (udqp || gdqp || pdqp) { 1180 unsigned int qflags = XFS_QMOPT_RES_REGBLKS; 1181 1182 if (force) 1183 qflags |= XFS_QMOPT_FORCE_RES; 1184 1185 /* 1186 * Reserve enough quota to handle blocks on disk and reserved 1187 * for a delayed allocation. We'll actually transfer the 1188 * delalloc reservation between dquots at chown time, even 1189 * though that part is only semi-transactional. 1190 */ 1191 error = xfs_trans_reserve_quota_bydquots(tp, mp, udqp, gdqp, 1192 pdqp, ip->i_nblocks + ip->i_delayed_blks, 1193 1, qflags); 1194 if ((error == -EDQUOT || error == -ENOSPC) && !retried) { 1195 xfs_trans_cancel(tp); 1196 xfs_blockgc_free_dquots(mp, udqp, gdqp, pdqp, 0); 1197 retried = true; 1198 goto retry; 1199 } 1200 if (error) 1201 goto out_cancel; 1202 } 1203 1204 *tpp = tp; 1205 return 0; 1206 1207 out_cancel: 1208 xfs_trans_cancel(tp); 1209 return error; 1210 } 1211