1 /* 2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_types.h" 21 #include "xfs_bit.h" 22 #include "xfs_log.h" 23 #include "xfs_inum.h" 24 #include "xfs_trans.h" 25 #include "xfs_sb.h" 26 #include "xfs_ag.h" 27 #include "xfs_dir.h" 28 #include "xfs_dir2.h" 29 #include "xfs_dmapi.h" 30 #include "xfs_mount.h" 31 #include "xfs_error.h" 32 #include "xfs_da_btree.h" 33 #include "xfs_bmap_btree.h" 34 #include "xfs_alloc_btree.h" 35 #include "xfs_ialloc_btree.h" 36 #include "xfs_dir_sf.h" 37 #include "xfs_dir2_sf.h" 38 #include "xfs_attr_sf.h" 39 #include "xfs_dinode.h" 40 #include "xfs_inode.h" 41 #include "xfs_btree.h" 42 #include "xfs_ialloc.h" 43 #include "xfs_alloc.h" 44 #include "xfs_bmap.h" 45 #include "xfs_quota.h" 46 #include "xfs_trans_priv.h" 47 #include "xfs_trans_space.h" 48 49 50 STATIC void xfs_trans_apply_sb_deltas(xfs_trans_t *); 51 STATIC uint xfs_trans_count_vecs(xfs_trans_t *); 52 STATIC void xfs_trans_fill_vecs(xfs_trans_t *, xfs_log_iovec_t *); 53 STATIC void xfs_trans_uncommit(xfs_trans_t *, uint); 54 STATIC void xfs_trans_committed(xfs_trans_t *, int); 55 STATIC void xfs_trans_chunk_committed(xfs_log_item_chunk_t *, xfs_lsn_t, int); 56 STATIC void xfs_trans_free(xfs_trans_t *); 57 58 kmem_zone_t *xfs_trans_zone; 59 60 61 /* 62 * Reservation functions here avoid a huge stack in xfs_trans_init 63 * due to register overflow from temporaries in the calculations. 64 */ 65 66 STATIC uint 67 xfs_calc_write_reservation(xfs_mount_t *mp) 68 { 69 return XFS_CALC_WRITE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 70 } 71 72 STATIC uint 73 xfs_calc_itruncate_reservation(xfs_mount_t *mp) 74 { 75 return XFS_CALC_ITRUNCATE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 76 } 77 78 STATIC uint 79 xfs_calc_rename_reservation(xfs_mount_t *mp) 80 { 81 return XFS_CALC_RENAME_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 82 } 83 84 STATIC uint 85 xfs_calc_link_reservation(xfs_mount_t *mp) 86 { 87 return XFS_CALC_LINK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 88 } 89 90 STATIC uint 91 xfs_calc_remove_reservation(xfs_mount_t *mp) 92 { 93 return XFS_CALC_REMOVE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 94 } 95 96 STATIC uint 97 xfs_calc_symlink_reservation(xfs_mount_t *mp) 98 { 99 return XFS_CALC_SYMLINK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 100 } 101 102 STATIC uint 103 xfs_calc_create_reservation(xfs_mount_t *mp) 104 { 105 return XFS_CALC_CREATE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 106 } 107 108 STATIC uint 109 xfs_calc_mkdir_reservation(xfs_mount_t *mp) 110 { 111 return XFS_CALC_MKDIR_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 112 } 113 114 STATIC uint 115 xfs_calc_ifree_reservation(xfs_mount_t *mp) 116 { 117 return XFS_CALC_IFREE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 118 } 119 120 STATIC uint 121 xfs_calc_ichange_reservation(xfs_mount_t *mp) 122 { 123 return XFS_CALC_ICHANGE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 124 } 125 126 STATIC uint 127 xfs_calc_growdata_reservation(xfs_mount_t *mp) 128 { 129 return XFS_CALC_GROWDATA_LOG_RES(mp); 130 } 131 132 STATIC uint 133 xfs_calc_growrtalloc_reservation(xfs_mount_t *mp) 134 { 135 return XFS_CALC_GROWRTALLOC_LOG_RES(mp); 136 } 137 138 STATIC uint 139 xfs_calc_growrtzero_reservation(xfs_mount_t *mp) 140 { 141 return XFS_CALC_GROWRTZERO_LOG_RES(mp); 142 } 143 144 STATIC uint 145 xfs_calc_growrtfree_reservation(xfs_mount_t *mp) 146 { 147 return XFS_CALC_GROWRTFREE_LOG_RES(mp); 148 } 149 150 STATIC uint 151 xfs_calc_swrite_reservation(xfs_mount_t *mp) 152 { 153 return XFS_CALC_SWRITE_LOG_RES(mp); 154 } 155 156 STATIC uint 157 xfs_calc_writeid_reservation(xfs_mount_t *mp) 158 { 159 return XFS_CALC_WRITEID_LOG_RES(mp); 160 } 161 162 STATIC uint 163 xfs_calc_addafork_reservation(xfs_mount_t *mp) 164 { 165 return XFS_CALC_ADDAFORK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 166 } 167 168 STATIC uint 169 xfs_calc_attrinval_reservation(xfs_mount_t *mp) 170 { 171 return XFS_CALC_ATTRINVAL_LOG_RES(mp); 172 } 173 174 STATIC uint 175 xfs_calc_attrset_reservation(xfs_mount_t *mp) 176 { 177 return XFS_CALC_ATTRSET_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 178 } 179 180 STATIC uint 181 xfs_calc_attrrm_reservation(xfs_mount_t *mp) 182 { 183 return XFS_CALC_ATTRRM_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 184 } 185 186 STATIC uint 187 xfs_calc_clear_agi_bucket_reservation(xfs_mount_t *mp) 188 { 189 return XFS_CALC_CLEAR_AGI_BUCKET_LOG_RES(mp); 190 } 191 192 /* 193 * Initialize the precomputed transaction reservation values 194 * in the mount structure. 195 */ 196 void 197 xfs_trans_init( 198 xfs_mount_t *mp) 199 { 200 xfs_trans_reservations_t *resp; 201 202 resp = &(mp->m_reservations); 203 resp->tr_write = xfs_calc_write_reservation(mp); 204 resp->tr_itruncate = xfs_calc_itruncate_reservation(mp); 205 resp->tr_rename = xfs_calc_rename_reservation(mp); 206 resp->tr_link = xfs_calc_link_reservation(mp); 207 resp->tr_remove = xfs_calc_remove_reservation(mp); 208 resp->tr_symlink = xfs_calc_symlink_reservation(mp); 209 resp->tr_create = xfs_calc_create_reservation(mp); 210 resp->tr_mkdir = xfs_calc_mkdir_reservation(mp); 211 resp->tr_ifree = xfs_calc_ifree_reservation(mp); 212 resp->tr_ichange = xfs_calc_ichange_reservation(mp); 213 resp->tr_growdata = xfs_calc_growdata_reservation(mp); 214 resp->tr_swrite = xfs_calc_swrite_reservation(mp); 215 resp->tr_writeid = xfs_calc_writeid_reservation(mp); 216 resp->tr_addafork = xfs_calc_addafork_reservation(mp); 217 resp->tr_attrinval = xfs_calc_attrinval_reservation(mp); 218 resp->tr_attrset = xfs_calc_attrset_reservation(mp); 219 resp->tr_attrrm = xfs_calc_attrrm_reservation(mp); 220 resp->tr_clearagi = xfs_calc_clear_agi_bucket_reservation(mp); 221 resp->tr_growrtalloc = xfs_calc_growrtalloc_reservation(mp); 222 resp->tr_growrtzero = xfs_calc_growrtzero_reservation(mp); 223 resp->tr_growrtfree = xfs_calc_growrtfree_reservation(mp); 224 } 225 226 /* 227 * This routine is called to allocate a transaction structure. 228 * The type parameter indicates the type of the transaction. These 229 * are enumerated in xfs_trans.h. 230 * 231 * Dynamically allocate the transaction structure from the transaction 232 * zone, initialize it, and return it to the caller. 233 */ 234 xfs_trans_t * 235 xfs_trans_alloc( 236 xfs_mount_t *mp, 237 uint type) 238 { 239 fs_check_frozen(XFS_MTOVFS(mp), SB_FREEZE_TRANS); 240 atomic_inc(&mp->m_active_trans); 241 242 return (_xfs_trans_alloc(mp, type)); 243 244 } 245 246 xfs_trans_t * 247 _xfs_trans_alloc( 248 xfs_mount_t *mp, 249 uint type) 250 { 251 xfs_trans_t *tp; 252 253 ASSERT(xfs_trans_zone != NULL); 254 tp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP); 255 256 /* 257 * Initialize the transaction structure. 258 */ 259 tp->t_magic = XFS_TRANS_MAGIC; 260 tp->t_type = type; 261 tp->t_mountp = mp; 262 tp->t_items_free = XFS_LIC_NUM_SLOTS; 263 tp->t_busy_free = XFS_LBC_NUM_SLOTS; 264 XFS_LIC_INIT(&(tp->t_items)); 265 XFS_LBC_INIT(&(tp->t_busy)); 266 267 return (tp); 268 } 269 270 /* 271 * This is called to create a new transaction which will share the 272 * permanent log reservation of the given transaction. The remaining 273 * unused block and rt extent reservations are also inherited. This 274 * implies that the original transaction is no longer allowed to allocate 275 * blocks. Locks and log items, however, are no inherited. They must 276 * be added to the new transaction explicitly. 277 */ 278 xfs_trans_t * 279 xfs_trans_dup( 280 xfs_trans_t *tp) 281 { 282 xfs_trans_t *ntp; 283 284 ntp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP); 285 286 /* 287 * Initialize the new transaction structure. 288 */ 289 ntp->t_magic = XFS_TRANS_MAGIC; 290 ntp->t_type = tp->t_type; 291 ntp->t_mountp = tp->t_mountp; 292 ntp->t_items_free = XFS_LIC_NUM_SLOTS; 293 ntp->t_busy_free = XFS_LBC_NUM_SLOTS; 294 XFS_LIC_INIT(&(ntp->t_items)); 295 XFS_LBC_INIT(&(ntp->t_busy)); 296 297 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 298 ASSERT(tp->t_ticket != NULL); 299 300 ntp->t_flags = XFS_TRANS_PERM_LOG_RES | (tp->t_flags & XFS_TRANS_RESERVE); 301 ntp->t_ticket = tp->t_ticket; 302 ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used; 303 tp->t_blk_res = tp->t_blk_res_used; 304 ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used; 305 tp->t_rtx_res = tp->t_rtx_res_used; 306 PFLAGS_DUP(&tp->t_pflags, &ntp->t_pflags); 307 308 XFS_TRANS_DUP_DQINFO(tp->t_mountp, tp, ntp); 309 310 atomic_inc(&tp->t_mountp->m_active_trans); 311 return ntp; 312 } 313 314 /* 315 * This is called to reserve free disk blocks and log space for the 316 * given transaction. This must be done before allocating any resources 317 * within the transaction. 318 * 319 * This will return ENOSPC if there are not enough blocks available. 320 * It will sleep waiting for available log space. 321 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which 322 * is used by long running transactions. If any one of the reservations 323 * fails then they will all be backed out. 324 * 325 * This does not do quota reservations. That typically is done by the 326 * caller afterwards. 327 */ 328 int 329 xfs_trans_reserve( 330 xfs_trans_t *tp, 331 uint blocks, 332 uint logspace, 333 uint rtextents, 334 uint flags, 335 uint logcount) 336 { 337 int log_flags; 338 int error; 339 int rsvd; 340 341 error = 0; 342 rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; 343 344 /* Mark this thread as being in a transaction */ 345 PFLAGS_SET_FSTRANS(&tp->t_pflags); 346 347 /* 348 * Attempt to reserve the needed disk blocks by decrementing 349 * the number needed from the number available. This will 350 * fail if the count would go below zero. 351 */ 352 if (blocks > 0) { 353 error = xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FDBLOCKS, 354 -blocks, rsvd); 355 if (error != 0) { 356 PFLAGS_RESTORE_FSTRANS(&tp->t_pflags); 357 return (XFS_ERROR(ENOSPC)); 358 } 359 tp->t_blk_res += blocks; 360 } 361 362 /* 363 * Reserve the log space needed for this transaction. 364 */ 365 if (logspace > 0) { 366 ASSERT((tp->t_log_res == 0) || (tp->t_log_res == logspace)); 367 ASSERT((tp->t_log_count == 0) || 368 (tp->t_log_count == logcount)); 369 if (flags & XFS_TRANS_PERM_LOG_RES) { 370 log_flags = XFS_LOG_PERM_RESERV; 371 tp->t_flags |= XFS_TRANS_PERM_LOG_RES; 372 } else { 373 ASSERT(tp->t_ticket == NULL); 374 ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES)); 375 log_flags = 0; 376 } 377 378 error = xfs_log_reserve(tp->t_mountp, logspace, logcount, 379 &tp->t_ticket, 380 XFS_TRANSACTION, log_flags, tp->t_type); 381 if (error) { 382 goto undo_blocks; 383 } 384 tp->t_log_res = logspace; 385 tp->t_log_count = logcount; 386 } 387 388 /* 389 * Attempt to reserve the needed realtime extents by decrementing 390 * the number needed from the number available. This will 391 * fail if the count would go below zero. 392 */ 393 if (rtextents > 0) { 394 error = xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FREXTENTS, 395 -rtextents, rsvd); 396 if (error) { 397 error = XFS_ERROR(ENOSPC); 398 goto undo_log; 399 } 400 tp->t_rtx_res += rtextents; 401 } 402 403 return 0; 404 405 /* 406 * Error cases jump to one of these labels to undo any 407 * reservations which have already been performed. 408 */ 409 undo_log: 410 if (logspace > 0) { 411 if (flags & XFS_TRANS_PERM_LOG_RES) { 412 log_flags = XFS_LOG_REL_PERM_RESERV; 413 } else { 414 log_flags = 0; 415 } 416 xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, log_flags); 417 tp->t_ticket = NULL; 418 tp->t_log_res = 0; 419 tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES; 420 } 421 422 undo_blocks: 423 if (blocks > 0) { 424 (void) xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FDBLOCKS, 425 blocks, rsvd); 426 tp->t_blk_res = 0; 427 } 428 429 PFLAGS_RESTORE_FSTRANS(&tp->t_pflags); 430 431 return (error); 432 } 433 434 435 /* 436 * Record the indicated change to the given field for application 437 * to the file system's superblock when the transaction commits. 438 * For now, just store the change in the transaction structure. 439 * 440 * Mark the transaction structure to indicate that the superblock 441 * needs to be updated before committing. 442 */ 443 void 444 xfs_trans_mod_sb( 445 xfs_trans_t *tp, 446 uint field, 447 long delta) 448 { 449 450 switch (field) { 451 case XFS_TRANS_SB_ICOUNT: 452 tp->t_icount_delta += delta; 453 break; 454 case XFS_TRANS_SB_IFREE: 455 tp->t_ifree_delta += delta; 456 break; 457 case XFS_TRANS_SB_FDBLOCKS: 458 /* 459 * Track the number of blocks allocated in the 460 * transaction. Make sure it does not exceed the 461 * number reserved. 462 */ 463 if (delta < 0) { 464 tp->t_blk_res_used += (uint)-delta; 465 ASSERT(tp->t_blk_res_used <= tp->t_blk_res); 466 } 467 tp->t_fdblocks_delta += delta; 468 break; 469 case XFS_TRANS_SB_RES_FDBLOCKS: 470 /* 471 * The allocation has already been applied to the 472 * in-core superblock's counter. This should only 473 * be applied to the on-disk superblock. 474 */ 475 ASSERT(delta < 0); 476 tp->t_res_fdblocks_delta += delta; 477 break; 478 case XFS_TRANS_SB_FREXTENTS: 479 /* 480 * Track the number of blocks allocated in the 481 * transaction. Make sure it does not exceed the 482 * number reserved. 483 */ 484 if (delta < 0) { 485 tp->t_rtx_res_used += (uint)-delta; 486 ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res); 487 } 488 tp->t_frextents_delta += delta; 489 break; 490 case XFS_TRANS_SB_RES_FREXTENTS: 491 /* 492 * The allocation has already been applied to the 493 * in-core superblock's counter. This should only 494 * be applied to the on-disk superblock. 495 */ 496 ASSERT(delta < 0); 497 tp->t_res_frextents_delta += delta; 498 break; 499 case XFS_TRANS_SB_DBLOCKS: 500 ASSERT(delta > 0); 501 tp->t_dblocks_delta += delta; 502 break; 503 case XFS_TRANS_SB_AGCOUNT: 504 ASSERT(delta > 0); 505 tp->t_agcount_delta += delta; 506 break; 507 case XFS_TRANS_SB_IMAXPCT: 508 tp->t_imaxpct_delta += delta; 509 break; 510 case XFS_TRANS_SB_REXTSIZE: 511 tp->t_rextsize_delta += delta; 512 break; 513 case XFS_TRANS_SB_RBMBLOCKS: 514 tp->t_rbmblocks_delta += delta; 515 break; 516 case XFS_TRANS_SB_RBLOCKS: 517 tp->t_rblocks_delta += delta; 518 break; 519 case XFS_TRANS_SB_REXTENTS: 520 tp->t_rextents_delta += delta; 521 break; 522 case XFS_TRANS_SB_REXTSLOG: 523 tp->t_rextslog_delta += delta; 524 break; 525 default: 526 ASSERT(0); 527 return; 528 } 529 530 tp->t_flags |= (XFS_TRANS_SB_DIRTY | XFS_TRANS_DIRTY); 531 } 532 533 /* 534 * xfs_trans_apply_sb_deltas() is called from the commit code 535 * to bring the superblock buffer into the current transaction 536 * and modify it as requested by earlier calls to xfs_trans_mod_sb(). 537 * 538 * For now we just look at each field allowed to change and change 539 * it if necessary. 540 */ 541 STATIC void 542 xfs_trans_apply_sb_deltas( 543 xfs_trans_t *tp) 544 { 545 xfs_sb_t *sbp; 546 xfs_buf_t *bp; 547 int whole = 0; 548 549 bp = xfs_trans_getsb(tp, tp->t_mountp, 0); 550 sbp = XFS_BUF_TO_SBP(bp); 551 552 /* 553 * Check that superblock mods match the mods made to AGF counters. 554 */ 555 ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) == 556 (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta + 557 tp->t_ag_btree_delta)); 558 559 if (tp->t_icount_delta != 0) { 560 INT_MOD(sbp->sb_icount, ARCH_CONVERT, tp->t_icount_delta); 561 } 562 if (tp->t_ifree_delta != 0) { 563 INT_MOD(sbp->sb_ifree, ARCH_CONVERT, tp->t_ifree_delta); 564 } 565 566 if (tp->t_fdblocks_delta != 0) { 567 INT_MOD(sbp->sb_fdblocks, ARCH_CONVERT, tp->t_fdblocks_delta); 568 } 569 if (tp->t_res_fdblocks_delta != 0) { 570 INT_MOD(sbp->sb_fdblocks, ARCH_CONVERT, tp->t_res_fdblocks_delta); 571 } 572 573 if (tp->t_frextents_delta != 0) { 574 INT_MOD(sbp->sb_frextents, ARCH_CONVERT, tp->t_frextents_delta); 575 } 576 if (tp->t_res_frextents_delta != 0) { 577 INT_MOD(sbp->sb_frextents, ARCH_CONVERT, tp->t_res_frextents_delta); 578 } 579 if (tp->t_dblocks_delta != 0) { 580 INT_MOD(sbp->sb_dblocks, ARCH_CONVERT, tp->t_dblocks_delta); 581 whole = 1; 582 } 583 if (tp->t_agcount_delta != 0) { 584 INT_MOD(sbp->sb_agcount, ARCH_CONVERT, tp->t_agcount_delta); 585 whole = 1; 586 } 587 if (tp->t_imaxpct_delta != 0) { 588 INT_MOD(sbp->sb_imax_pct, ARCH_CONVERT, tp->t_imaxpct_delta); 589 whole = 1; 590 } 591 if (tp->t_rextsize_delta != 0) { 592 INT_MOD(sbp->sb_rextsize, ARCH_CONVERT, tp->t_rextsize_delta); 593 whole = 1; 594 } 595 if (tp->t_rbmblocks_delta != 0) { 596 INT_MOD(sbp->sb_rbmblocks, ARCH_CONVERT, tp->t_rbmblocks_delta); 597 whole = 1; 598 } 599 if (tp->t_rblocks_delta != 0) { 600 INT_MOD(sbp->sb_rblocks, ARCH_CONVERT, tp->t_rblocks_delta); 601 whole = 1; 602 } 603 if (tp->t_rextents_delta != 0) { 604 INT_MOD(sbp->sb_rextents, ARCH_CONVERT, tp->t_rextents_delta); 605 whole = 1; 606 } 607 if (tp->t_rextslog_delta != 0) { 608 INT_MOD(sbp->sb_rextslog, ARCH_CONVERT, tp->t_rextslog_delta); 609 whole = 1; 610 } 611 612 if (whole) 613 /* 614 * Log the whole thing, the fields are noncontiguous. 615 */ 616 xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_sb_t) - 1); 617 else 618 /* 619 * Since all the modifiable fields are contiguous, we 620 * can get away with this. 621 */ 622 xfs_trans_log_buf(tp, bp, offsetof(xfs_sb_t, sb_icount), 623 offsetof(xfs_sb_t, sb_frextents) + 624 sizeof(sbp->sb_frextents) - 1); 625 626 XFS_MTOVFS(tp->t_mountp)->vfs_super->s_dirt = 1; 627 } 628 629 /* 630 * xfs_trans_unreserve_and_mod_sb() is called to release unused 631 * reservations and apply superblock counter changes to the in-core 632 * superblock. 633 * 634 * This is done efficiently with a single call to xfs_mod_incore_sb_batch(). 635 */ 636 STATIC void 637 xfs_trans_unreserve_and_mod_sb( 638 xfs_trans_t *tp) 639 { 640 xfs_mod_sb_t msb[14]; /* If you add cases, add entries */ 641 xfs_mod_sb_t *msbp; 642 /* REFERENCED */ 643 int error; 644 int rsvd; 645 646 msbp = msb; 647 rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; 648 649 /* 650 * Release any reserved blocks. Any that were allocated 651 * will be taken back again by fdblocks_delta below. 652 */ 653 if (tp->t_blk_res > 0) { 654 msbp->msb_field = XFS_SBS_FDBLOCKS; 655 msbp->msb_delta = tp->t_blk_res; 656 msbp++; 657 } 658 659 /* 660 * Release any reserved real time extents . Any that were 661 * allocated will be taken back again by frextents_delta below. 662 */ 663 if (tp->t_rtx_res > 0) { 664 msbp->msb_field = XFS_SBS_FREXTENTS; 665 msbp->msb_delta = tp->t_rtx_res; 666 msbp++; 667 } 668 669 /* 670 * Apply any superblock modifications to the in-core version. 671 * The t_res_fdblocks_delta and t_res_frextents_delta fields are 672 * explicitly NOT applied to the in-core superblock. 673 * The idea is that that has already been done. 674 */ 675 if (tp->t_flags & XFS_TRANS_SB_DIRTY) { 676 if (tp->t_icount_delta != 0) { 677 msbp->msb_field = XFS_SBS_ICOUNT; 678 msbp->msb_delta = (int)tp->t_icount_delta; 679 msbp++; 680 } 681 if (tp->t_ifree_delta != 0) { 682 msbp->msb_field = XFS_SBS_IFREE; 683 msbp->msb_delta = (int)tp->t_ifree_delta; 684 msbp++; 685 } 686 if (tp->t_fdblocks_delta != 0) { 687 msbp->msb_field = XFS_SBS_FDBLOCKS; 688 msbp->msb_delta = (int)tp->t_fdblocks_delta; 689 msbp++; 690 } 691 if (tp->t_frextents_delta != 0) { 692 msbp->msb_field = XFS_SBS_FREXTENTS; 693 msbp->msb_delta = (int)tp->t_frextents_delta; 694 msbp++; 695 } 696 if (tp->t_dblocks_delta != 0) { 697 msbp->msb_field = XFS_SBS_DBLOCKS; 698 msbp->msb_delta = (int)tp->t_dblocks_delta; 699 msbp++; 700 } 701 if (tp->t_agcount_delta != 0) { 702 msbp->msb_field = XFS_SBS_AGCOUNT; 703 msbp->msb_delta = (int)tp->t_agcount_delta; 704 msbp++; 705 } 706 if (tp->t_imaxpct_delta != 0) { 707 msbp->msb_field = XFS_SBS_IMAX_PCT; 708 msbp->msb_delta = (int)tp->t_imaxpct_delta; 709 msbp++; 710 } 711 if (tp->t_rextsize_delta != 0) { 712 msbp->msb_field = XFS_SBS_REXTSIZE; 713 msbp->msb_delta = (int)tp->t_rextsize_delta; 714 msbp++; 715 } 716 if (tp->t_rbmblocks_delta != 0) { 717 msbp->msb_field = XFS_SBS_RBMBLOCKS; 718 msbp->msb_delta = (int)tp->t_rbmblocks_delta; 719 msbp++; 720 } 721 if (tp->t_rblocks_delta != 0) { 722 msbp->msb_field = XFS_SBS_RBLOCKS; 723 msbp->msb_delta = (int)tp->t_rblocks_delta; 724 msbp++; 725 } 726 if (tp->t_rextents_delta != 0) { 727 msbp->msb_field = XFS_SBS_REXTENTS; 728 msbp->msb_delta = (int)tp->t_rextents_delta; 729 msbp++; 730 } 731 if (tp->t_rextslog_delta != 0) { 732 msbp->msb_field = XFS_SBS_REXTSLOG; 733 msbp->msb_delta = (int)tp->t_rextslog_delta; 734 msbp++; 735 } 736 } 737 738 /* 739 * If we need to change anything, do it. 740 */ 741 if (msbp > msb) { 742 error = xfs_mod_incore_sb_batch(tp->t_mountp, msb, 743 (uint)(msbp - msb), rsvd); 744 ASSERT(error == 0); 745 } 746 } 747 748 749 /* 750 * xfs_trans_commit 751 * 752 * Commit the given transaction to the log a/synchronously. 753 * 754 * XFS disk error handling mechanism is not based on a typical 755 * transaction abort mechanism. Logically after the filesystem 756 * gets marked 'SHUTDOWN', we can't let any new transactions 757 * be durable - ie. committed to disk - because some metadata might 758 * be inconsistent. In such cases, this returns an error, and the 759 * caller may assume that all locked objects joined to the transaction 760 * have already been unlocked as if the commit had succeeded. 761 * Do not reference the transaction structure after this call. 762 */ 763 /*ARGSUSED*/ 764 int 765 _xfs_trans_commit( 766 xfs_trans_t *tp, 767 uint flags, 768 xfs_lsn_t *commit_lsn_p, 769 int *log_flushed) 770 { 771 xfs_log_iovec_t *log_vector; 772 int nvec; 773 xfs_mount_t *mp; 774 xfs_lsn_t commit_lsn; 775 /* REFERENCED */ 776 int error; 777 int log_flags; 778 int sync; 779 #define XFS_TRANS_LOGVEC_COUNT 16 780 xfs_log_iovec_t log_vector_fast[XFS_TRANS_LOGVEC_COUNT]; 781 void *commit_iclog; 782 int shutdown; 783 784 commit_lsn = -1; 785 786 /* 787 * Determine whether this commit is releasing a permanent 788 * log reservation or not. 789 */ 790 if (flags & XFS_TRANS_RELEASE_LOG_RES) { 791 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 792 log_flags = XFS_LOG_REL_PERM_RESERV; 793 } else { 794 log_flags = 0; 795 } 796 mp = tp->t_mountp; 797 798 /* 799 * If there is nothing to be logged by the transaction, 800 * then unlock all of the items associated with the 801 * transaction and free the transaction structure. 802 * Also make sure to return any reserved blocks to 803 * the free pool. 804 */ 805 shut_us_down: 806 shutdown = XFS_FORCED_SHUTDOWN(mp) ? EIO : 0; 807 if (!(tp->t_flags & XFS_TRANS_DIRTY) || shutdown) { 808 xfs_trans_unreserve_and_mod_sb(tp); 809 /* 810 * It is indeed possible for the transaction to be 811 * not dirty but the dqinfo portion to be. All that 812 * means is that we have some (non-persistent) quota 813 * reservations that need to be unreserved. 814 */ 815 XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(mp, tp); 816 if (tp->t_ticket) { 817 commit_lsn = xfs_log_done(mp, tp->t_ticket, 818 NULL, log_flags); 819 if (commit_lsn == -1 && !shutdown) 820 shutdown = XFS_ERROR(EIO); 821 } 822 PFLAGS_RESTORE_FSTRANS(&tp->t_pflags); 823 xfs_trans_free_items(tp, shutdown? XFS_TRANS_ABORT : 0); 824 xfs_trans_free_busy(tp); 825 xfs_trans_free(tp); 826 XFS_STATS_INC(xs_trans_empty); 827 if (commit_lsn_p) 828 *commit_lsn_p = commit_lsn; 829 return (shutdown); 830 } 831 ASSERT(tp->t_ticket != NULL); 832 833 /* 834 * If we need to update the superblock, then do it now. 835 */ 836 if (tp->t_flags & XFS_TRANS_SB_DIRTY) { 837 xfs_trans_apply_sb_deltas(tp); 838 } 839 XFS_TRANS_APPLY_DQUOT_DELTAS(mp, tp); 840 841 /* 842 * Ask each log item how many log_vector entries it will 843 * need so we can figure out how many to allocate. 844 * Try to avoid the kmem_alloc() call in the common case 845 * by using a vector from the stack when it fits. 846 */ 847 nvec = xfs_trans_count_vecs(tp); 848 if (nvec == 0) { 849 xfs_force_shutdown(mp, XFS_LOG_IO_ERROR); 850 goto shut_us_down; 851 } else if (nvec <= XFS_TRANS_LOGVEC_COUNT) { 852 log_vector = log_vector_fast; 853 } else { 854 log_vector = (xfs_log_iovec_t *)kmem_alloc(nvec * 855 sizeof(xfs_log_iovec_t), 856 KM_SLEEP); 857 } 858 859 /* 860 * Fill in the log_vector and pin the logged items, and 861 * then write the transaction to the log. 862 */ 863 xfs_trans_fill_vecs(tp, log_vector); 864 865 error = xfs_log_write(mp, log_vector, nvec, tp->t_ticket, &(tp->t_lsn)); 866 867 /* 868 * The transaction is committed incore here, and can go out to disk 869 * at any time after this call. However, all the items associated 870 * with the transaction are still locked and pinned in memory. 871 */ 872 commit_lsn = xfs_log_done(mp, tp->t_ticket, &commit_iclog, log_flags); 873 874 tp->t_commit_lsn = commit_lsn; 875 if (nvec > XFS_TRANS_LOGVEC_COUNT) { 876 kmem_free(log_vector, nvec * sizeof(xfs_log_iovec_t)); 877 } 878 879 if (commit_lsn_p) 880 *commit_lsn_p = commit_lsn; 881 882 /* 883 * If we got a log write error. Unpin the logitems that we 884 * had pinned, clean up, free trans structure, and return error. 885 */ 886 if (error || commit_lsn == -1) { 887 PFLAGS_RESTORE_FSTRANS(&tp->t_pflags); 888 xfs_trans_uncommit(tp, flags|XFS_TRANS_ABORT); 889 return XFS_ERROR(EIO); 890 } 891 892 /* 893 * Once the transaction has committed, unused 894 * reservations need to be released and changes to 895 * the superblock need to be reflected in the in-core 896 * version. Do that now. 897 */ 898 xfs_trans_unreserve_and_mod_sb(tp); 899 900 sync = tp->t_flags & XFS_TRANS_SYNC; 901 902 /* 903 * Tell the LM to call the transaction completion routine 904 * when the log write with LSN commit_lsn completes (e.g. 905 * when the transaction commit really hits the on-disk log). 906 * After this call we cannot reference tp, because the call 907 * can happen at any time and the call will free the transaction 908 * structure pointed to by tp. The only case where we call 909 * the completion routine (xfs_trans_committed) directly is 910 * if the log is turned off on a debug kernel or we're 911 * running in simulation mode (the log is explicitly turned 912 * off). 913 */ 914 tp->t_logcb.cb_func = (void(*)(void*, int))xfs_trans_committed; 915 tp->t_logcb.cb_arg = tp; 916 917 /* 918 * We need to pass the iclog buffer which was used for the 919 * transaction commit record into this function, and attach 920 * the callback to it. The callback must be attached before 921 * the items are unlocked to avoid racing with other threads 922 * waiting for an item to unlock. 923 */ 924 shutdown = xfs_log_notify(mp, commit_iclog, &(tp->t_logcb)); 925 926 /* 927 * Mark this thread as no longer being in a transaction 928 */ 929 PFLAGS_RESTORE_FSTRANS(&tp->t_pflags); 930 931 /* 932 * Once all the items of the transaction have been copied 933 * to the in core log and the callback is attached, the 934 * items can be unlocked. 935 * 936 * This will free descriptors pointing to items which were 937 * not logged since there is nothing more to do with them. 938 * For items which were logged, we will keep pointers to them 939 * so they can be unpinned after the transaction commits to disk. 940 * This will also stamp each modified meta-data item with 941 * the commit lsn of this transaction for dependency tracking 942 * purposes. 943 */ 944 xfs_trans_unlock_items(tp, commit_lsn); 945 946 /* 947 * If we detected a log error earlier, finish committing 948 * the transaction now (unpin log items, etc). 949 * 950 * Order is critical here, to avoid using the transaction 951 * pointer after its been freed (by xfs_trans_committed 952 * either here now, or as a callback). We cannot do this 953 * step inside xfs_log_notify as was done earlier because 954 * of this issue. 955 */ 956 if (shutdown) 957 xfs_trans_committed(tp, XFS_LI_ABORTED); 958 959 /* 960 * Now that the xfs_trans_committed callback has been attached, 961 * and the items are released we can finally allow the iclog to 962 * go to disk. 963 */ 964 error = xfs_log_release_iclog(mp, commit_iclog); 965 966 /* 967 * If the transaction needs to be synchronous, then force the 968 * log out now and wait for it. 969 */ 970 if (sync) { 971 if (!error) { 972 error = _xfs_log_force(mp, commit_lsn, 973 XFS_LOG_FORCE | XFS_LOG_SYNC, 974 log_flushed); 975 } 976 XFS_STATS_INC(xs_trans_sync); 977 } else { 978 XFS_STATS_INC(xs_trans_async); 979 } 980 981 return (error); 982 } 983 984 985 /* 986 * Total up the number of log iovecs needed to commit this 987 * transaction. The transaction itself needs one for the 988 * transaction header. Ask each dirty item in turn how many 989 * it needs to get the total. 990 */ 991 STATIC uint 992 xfs_trans_count_vecs( 993 xfs_trans_t *tp) 994 { 995 int nvecs; 996 xfs_log_item_desc_t *lidp; 997 998 nvecs = 1; 999 lidp = xfs_trans_first_item(tp); 1000 ASSERT(lidp != NULL); 1001 1002 /* In the non-debug case we need to start bailing out if we 1003 * didn't find a log_item here, return zero and let trans_commit 1004 * deal with it. 1005 */ 1006 if (lidp == NULL) 1007 return 0; 1008 1009 while (lidp != NULL) { 1010 /* 1011 * Skip items which aren't dirty in this transaction. 1012 */ 1013 if (!(lidp->lid_flags & XFS_LID_DIRTY)) { 1014 lidp = xfs_trans_next_item(tp, lidp); 1015 continue; 1016 } 1017 lidp->lid_size = IOP_SIZE(lidp->lid_item); 1018 nvecs += lidp->lid_size; 1019 lidp = xfs_trans_next_item(tp, lidp); 1020 } 1021 1022 return nvecs; 1023 } 1024 1025 /* 1026 * Called from the trans_commit code when we notice that 1027 * the filesystem is in the middle of a forced shutdown. 1028 */ 1029 STATIC void 1030 xfs_trans_uncommit( 1031 xfs_trans_t *tp, 1032 uint flags) 1033 { 1034 xfs_log_item_desc_t *lidp; 1035 1036 for (lidp = xfs_trans_first_item(tp); 1037 lidp != NULL; 1038 lidp = xfs_trans_next_item(tp, lidp)) { 1039 /* 1040 * Unpin all but those that aren't dirty. 1041 */ 1042 if (lidp->lid_flags & XFS_LID_DIRTY) 1043 IOP_UNPIN_REMOVE(lidp->lid_item, tp); 1044 } 1045 1046 xfs_trans_unreserve_and_mod_sb(tp); 1047 XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(tp->t_mountp, tp); 1048 1049 xfs_trans_free_items(tp, flags); 1050 xfs_trans_free_busy(tp); 1051 xfs_trans_free(tp); 1052 } 1053 1054 /* 1055 * Fill in the vector with pointers to data to be logged 1056 * by this transaction. The transaction header takes 1057 * the first vector, and then each dirty item takes the 1058 * number of vectors it indicated it needed in xfs_trans_count_vecs(). 1059 * 1060 * As each item fills in the entries it needs, also pin the item 1061 * so that it cannot be flushed out until the log write completes. 1062 */ 1063 STATIC void 1064 xfs_trans_fill_vecs( 1065 xfs_trans_t *tp, 1066 xfs_log_iovec_t *log_vector) 1067 { 1068 xfs_log_item_desc_t *lidp; 1069 xfs_log_iovec_t *vecp; 1070 uint nitems; 1071 1072 /* 1073 * Skip over the entry for the transaction header, we'll 1074 * fill that in at the end. 1075 */ 1076 vecp = log_vector + 1; /* pointer arithmetic */ 1077 1078 nitems = 0; 1079 lidp = xfs_trans_first_item(tp); 1080 ASSERT(lidp != NULL); 1081 while (lidp != NULL) { 1082 /* 1083 * Skip items which aren't dirty in this transaction. 1084 */ 1085 if (!(lidp->lid_flags & XFS_LID_DIRTY)) { 1086 lidp = xfs_trans_next_item(tp, lidp); 1087 continue; 1088 } 1089 /* 1090 * The item may be marked dirty but not log anything. 1091 * This can be used to get called when a transaction 1092 * is committed. 1093 */ 1094 if (lidp->lid_size) { 1095 nitems++; 1096 } 1097 IOP_FORMAT(lidp->lid_item, vecp); 1098 vecp += lidp->lid_size; /* pointer arithmetic */ 1099 IOP_PIN(lidp->lid_item); 1100 lidp = xfs_trans_next_item(tp, lidp); 1101 } 1102 1103 /* 1104 * Now that we've counted the number of items in this 1105 * transaction, fill in the transaction header. 1106 */ 1107 tp->t_header.th_magic = XFS_TRANS_HEADER_MAGIC; 1108 tp->t_header.th_type = tp->t_type; 1109 tp->t_header.th_num_items = nitems; 1110 log_vector->i_addr = (xfs_caddr_t)&tp->t_header; 1111 log_vector->i_len = sizeof(xfs_trans_header_t); 1112 XLOG_VEC_SET_TYPE(log_vector, XLOG_REG_TYPE_TRANSHDR); 1113 } 1114 1115 1116 /* 1117 * Unlock all of the transaction's items and free the transaction. 1118 * The transaction must not have modified any of its items, because 1119 * there is no way to restore them to their previous state. 1120 * 1121 * If the transaction has made a log reservation, make sure to release 1122 * it as well. 1123 */ 1124 void 1125 xfs_trans_cancel( 1126 xfs_trans_t *tp, 1127 int flags) 1128 { 1129 int log_flags; 1130 #ifdef DEBUG 1131 xfs_log_item_chunk_t *licp; 1132 xfs_log_item_desc_t *lidp; 1133 xfs_log_item_t *lip; 1134 int i; 1135 #endif 1136 xfs_mount_t *mp = tp->t_mountp; 1137 1138 /* 1139 * See if the caller is being too lazy to figure out if 1140 * the transaction really needs an abort. 1141 */ 1142 if ((flags & XFS_TRANS_ABORT) && !(tp->t_flags & XFS_TRANS_DIRTY)) 1143 flags &= ~XFS_TRANS_ABORT; 1144 /* 1145 * See if the caller is relying on us to shut down the 1146 * filesystem. This happens in paths where we detect 1147 * corruption and decide to give up. 1148 */ 1149 if ((tp->t_flags & XFS_TRANS_DIRTY) && !XFS_FORCED_SHUTDOWN(mp)) { 1150 XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp); 1151 xfs_force_shutdown(mp, XFS_CORRUPT_INCORE); 1152 } 1153 #ifdef DEBUG 1154 if (!(flags & XFS_TRANS_ABORT)) { 1155 licp = &(tp->t_items); 1156 while (licp != NULL) { 1157 lidp = licp->lic_descs; 1158 for (i = 0; i < licp->lic_unused; i++, lidp++) { 1159 if (XFS_LIC_ISFREE(licp, i)) { 1160 continue; 1161 } 1162 1163 lip = lidp->lid_item; 1164 if (!XFS_FORCED_SHUTDOWN(mp)) 1165 ASSERT(!(lip->li_type == XFS_LI_EFD)); 1166 } 1167 licp = licp->lic_next; 1168 } 1169 } 1170 #endif 1171 xfs_trans_unreserve_and_mod_sb(tp); 1172 XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(mp, tp); 1173 1174 if (tp->t_ticket) { 1175 if (flags & XFS_TRANS_RELEASE_LOG_RES) { 1176 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 1177 log_flags = XFS_LOG_REL_PERM_RESERV; 1178 } else { 1179 log_flags = 0; 1180 } 1181 xfs_log_done(mp, tp->t_ticket, NULL, log_flags); 1182 } 1183 1184 /* mark this thread as no longer being in a transaction */ 1185 PFLAGS_RESTORE_FSTRANS(&tp->t_pflags); 1186 1187 xfs_trans_free_items(tp, flags); 1188 xfs_trans_free_busy(tp); 1189 xfs_trans_free(tp); 1190 } 1191 1192 1193 /* 1194 * Free the transaction structure. If there is more clean up 1195 * to do when the structure is freed, add it here. 1196 */ 1197 STATIC void 1198 xfs_trans_free( 1199 xfs_trans_t *tp) 1200 { 1201 atomic_dec(&tp->t_mountp->m_active_trans); 1202 XFS_TRANS_FREE_DQINFO(tp->t_mountp, tp); 1203 kmem_zone_free(xfs_trans_zone, tp); 1204 } 1205 1206 1207 /* 1208 * THIS SHOULD BE REWRITTEN TO USE xfs_trans_next_item(). 1209 * 1210 * This is typically called by the LM when a transaction has been fully 1211 * committed to disk. It needs to unpin the items which have 1212 * been logged by the transaction and update their positions 1213 * in the AIL if necessary. 1214 * This also gets called when the transactions didn't get written out 1215 * because of an I/O error. Abortflag & XFS_LI_ABORTED is set then. 1216 * 1217 * Call xfs_trans_chunk_committed() to process the items in 1218 * each chunk. 1219 */ 1220 STATIC void 1221 xfs_trans_committed( 1222 xfs_trans_t *tp, 1223 int abortflag) 1224 { 1225 xfs_log_item_chunk_t *licp; 1226 xfs_log_item_chunk_t *next_licp; 1227 xfs_log_busy_chunk_t *lbcp; 1228 xfs_log_busy_slot_t *lbsp; 1229 int i; 1230 1231 /* 1232 * Call the transaction's completion callback if there 1233 * is one. 1234 */ 1235 if (tp->t_callback != NULL) { 1236 tp->t_callback(tp, tp->t_callarg); 1237 } 1238 1239 /* 1240 * Special case the chunk embedded in the transaction. 1241 */ 1242 licp = &(tp->t_items); 1243 if (!(XFS_LIC_ARE_ALL_FREE(licp))) { 1244 xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag); 1245 } 1246 1247 /* 1248 * Process the items in each chunk in turn. 1249 */ 1250 licp = licp->lic_next; 1251 while (licp != NULL) { 1252 ASSERT(!XFS_LIC_ARE_ALL_FREE(licp)); 1253 xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag); 1254 next_licp = licp->lic_next; 1255 kmem_free(licp, sizeof(xfs_log_item_chunk_t)); 1256 licp = next_licp; 1257 } 1258 1259 /* 1260 * Clear all the per-AG busy list items listed in this transaction 1261 */ 1262 lbcp = &tp->t_busy; 1263 while (lbcp != NULL) { 1264 for (i = 0, lbsp = lbcp->lbc_busy; i < lbcp->lbc_unused; i++, lbsp++) { 1265 if (!XFS_LBC_ISFREE(lbcp, i)) { 1266 xfs_alloc_clear_busy(tp, lbsp->lbc_ag, 1267 lbsp->lbc_idx); 1268 } 1269 } 1270 lbcp = lbcp->lbc_next; 1271 } 1272 xfs_trans_free_busy(tp); 1273 1274 /* 1275 * That's it for the transaction structure. Free it. 1276 */ 1277 xfs_trans_free(tp); 1278 } 1279 1280 /* 1281 * This is called to perform the commit processing for each 1282 * item described by the given chunk. 1283 * 1284 * The commit processing consists of unlocking items which were 1285 * held locked with the SYNC_UNLOCK attribute, calling the committed 1286 * routine of each logged item, updating the item's position in the AIL 1287 * if necessary, and unpinning each item. If the committed routine 1288 * returns -1, then do nothing further with the item because it 1289 * may have been freed. 1290 * 1291 * Since items are unlocked when they are copied to the incore 1292 * log, it is possible for two transactions to be completing 1293 * and manipulating the same item simultaneously. The AIL lock 1294 * will protect the lsn field of each item. The value of this 1295 * field can never go backwards. 1296 * 1297 * We unpin the items after repositioning them in the AIL, because 1298 * otherwise they could be immediately flushed and we'd have to race 1299 * with the flusher trying to pull the item from the AIL as we add it. 1300 */ 1301 STATIC void 1302 xfs_trans_chunk_committed( 1303 xfs_log_item_chunk_t *licp, 1304 xfs_lsn_t lsn, 1305 int aborted) 1306 { 1307 xfs_log_item_desc_t *lidp; 1308 xfs_log_item_t *lip; 1309 xfs_lsn_t item_lsn; 1310 struct xfs_mount *mp; 1311 int i; 1312 SPLDECL(s); 1313 1314 lidp = licp->lic_descs; 1315 for (i = 0; i < licp->lic_unused; i++, lidp++) { 1316 if (XFS_LIC_ISFREE(licp, i)) { 1317 continue; 1318 } 1319 1320 lip = lidp->lid_item; 1321 if (aborted) 1322 lip->li_flags |= XFS_LI_ABORTED; 1323 1324 /* 1325 * Send in the ABORTED flag to the COMMITTED routine 1326 * so that it knows whether the transaction was aborted 1327 * or not. 1328 */ 1329 item_lsn = IOP_COMMITTED(lip, lsn); 1330 1331 /* 1332 * If the committed routine returns -1, make 1333 * no more references to the item. 1334 */ 1335 if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0) { 1336 continue; 1337 } 1338 1339 /* 1340 * If the returned lsn is greater than what it 1341 * contained before, update the location of the 1342 * item in the AIL. If it is not, then do nothing. 1343 * Items can never move backwards in the AIL. 1344 * 1345 * While the new lsn should usually be greater, it 1346 * is possible that a later transaction completing 1347 * simultaneously with an earlier one using the 1348 * same item could complete first with a higher lsn. 1349 * This would cause the earlier transaction to fail 1350 * the test below. 1351 */ 1352 mp = lip->li_mountp; 1353 AIL_LOCK(mp,s); 1354 if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) { 1355 /* 1356 * This will set the item's lsn to item_lsn 1357 * and update the position of the item in 1358 * the AIL. 1359 * 1360 * xfs_trans_update_ail() drops the AIL lock. 1361 */ 1362 xfs_trans_update_ail(mp, lip, item_lsn, s); 1363 } else { 1364 AIL_UNLOCK(mp, s); 1365 } 1366 1367 /* 1368 * Now that we've repositioned the item in the AIL, 1369 * unpin it so it can be flushed. Pass information 1370 * about buffer stale state down from the log item 1371 * flags, if anyone else stales the buffer we do not 1372 * want to pay any attention to it. 1373 */ 1374 IOP_UNPIN(lip, lidp->lid_flags & XFS_LID_BUF_STALE); 1375 } 1376 } 1377