1 /* 2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #include "xfs.h" 19 #include "xfs_fs.h" 20 #include "xfs_types.h" 21 #include "xfs_bit.h" 22 #include "xfs_log.h" 23 #include "xfs_inum.h" 24 #include "xfs_trans.h" 25 #include "xfs_sb.h" 26 #include "xfs_ag.h" 27 #include "xfs_dir2.h" 28 #include "xfs_dmapi.h" 29 #include "xfs_mount.h" 30 #include "xfs_error.h" 31 #include "xfs_da_btree.h" 32 #include "xfs_bmap_btree.h" 33 #include "xfs_alloc_btree.h" 34 #include "xfs_ialloc_btree.h" 35 #include "xfs_dir2_sf.h" 36 #include "xfs_attr_sf.h" 37 #include "xfs_dinode.h" 38 #include "xfs_inode.h" 39 #include "xfs_btree.h" 40 #include "xfs_ialloc.h" 41 #include "xfs_alloc.h" 42 #include "xfs_bmap.h" 43 #include "xfs_quota.h" 44 #include "xfs_trans_priv.h" 45 #include "xfs_trans_space.h" 46 47 48 STATIC void xfs_trans_apply_sb_deltas(xfs_trans_t *); 49 STATIC uint xfs_trans_count_vecs(xfs_trans_t *); 50 STATIC void xfs_trans_fill_vecs(xfs_trans_t *, xfs_log_iovec_t *); 51 STATIC void xfs_trans_uncommit(xfs_trans_t *, uint); 52 STATIC void xfs_trans_committed(xfs_trans_t *, int); 53 STATIC void xfs_trans_chunk_committed(xfs_log_item_chunk_t *, xfs_lsn_t, int); 54 STATIC void xfs_trans_free(xfs_trans_t *); 55 56 kmem_zone_t *xfs_trans_zone; 57 58 59 /* 60 * Reservation functions here avoid a huge stack in xfs_trans_init 61 * due to register overflow from temporaries in the calculations. 62 */ 63 64 STATIC uint 65 xfs_calc_write_reservation(xfs_mount_t *mp) 66 { 67 return XFS_CALC_WRITE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 68 } 69 70 STATIC uint 71 xfs_calc_itruncate_reservation(xfs_mount_t *mp) 72 { 73 return XFS_CALC_ITRUNCATE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 74 } 75 76 STATIC uint 77 xfs_calc_rename_reservation(xfs_mount_t *mp) 78 { 79 return XFS_CALC_RENAME_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 80 } 81 82 STATIC uint 83 xfs_calc_link_reservation(xfs_mount_t *mp) 84 { 85 return XFS_CALC_LINK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 86 } 87 88 STATIC uint 89 xfs_calc_remove_reservation(xfs_mount_t *mp) 90 { 91 return XFS_CALC_REMOVE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 92 } 93 94 STATIC uint 95 xfs_calc_symlink_reservation(xfs_mount_t *mp) 96 { 97 return XFS_CALC_SYMLINK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 98 } 99 100 STATIC uint 101 xfs_calc_create_reservation(xfs_mount_t *mp) 102 { 103 return XFS_CALC_CREATE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 104 } 105 106 STATIC uint 107 xfs_calc_mkdir_reservation(xfs_mount_t *mp) 108 { 109 return XFS_CALC_MKDIR_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 110 } 111 112 STATIC uint 113 xfs_calc_ifree_reservation(xfs_mount_t *mp) 114 { 115 return XFS_CALC_IFREE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 116 } 117 118 STATIC uint 119 xfs_calc_ichange_reservation(xfs_mount_t *mp) 120 { 121 return XFS_CALC_ICHANGE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 122 } 123 124 STATIC uint 125 xfs_calc_growdata_reservation(xfs_mount_t *mp) 126 { 127 return XFS_CALC_GROWDATA_LOG_RES(mp); 128 } 129 130 STATIC uint 131 xfs_calc_growrtalloc_reservation(xfs_mount_t *mp) 132 { 133 return XFS_CALC_GROWRTALLOC_LOG_RES(mp); 134 } 135 136 STATIC uint 137 xfs_calc_growrtzero_reservation(xfs_mount_t *mp) 138 { 139 return XFS_CALC_GROWRTZERO_LOG_RES(mp); 140 } 141 142 STATIC uint 143 xfs_calc_growrtfree_reservation(xfs_mount_t *mp) 144 { 145 return XFS_CALC_GROWRTFREE_LOG_RES(mp); 146 } 147 148 STATIC uint 149 xfs_calc_swrite_reservation(xfs_mount_t *mp) 150 { 151 return XFS_CALC_SWRITE_LOG_RES(mp); 152 } 153 154 STATIC uint 155 xfs_calc_writeid_reservation(xfs_mount_t *mp) 156 { 157 return XFS_CALC_WRITEID_LOG_RES(mp); 158 } 159 160 STATIC uint 161 xfs_calc_addafork_reservation(xfs_mount_t *mp) 162 { 163 return XFS_CALC_ADDAFORK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 164 } 165 166 STATIC uint 167 xfs_calc_attrinval_reservation(xfs_mount_t *mp) 168 { 169 return XFS_CALC_ATTRINVAL_LOG_RES(mp); 170 } 171 172 STATIC uint 173 xfs_calc_attrset_reservation(xfs_mount_t *mp) 174 { 175 return XFS_CALC_ATTRSET_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 176 } 177 178 STATIC uint 179 xfs_calc_attrrm_reservation(xfs_mount_t *mp) 180 { 181 return XFS_CALC_ATTRRM_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 182 } 183 184 STATIC uint 185 xfs_calc_clear_agi_bucket_reservation(xfs_mount_t *mp) 186 { 187 return XFS_CALC_CLEAR_AGI_BUCKET_LOG_RES(mp); 188 } 189 190 /* 191 * Initialize the precomputed transaction reservation values 192 * in the mount structure. 193 */ 194 void 195 xfs_trans_init( 196 xfs_mount_t *mp) 197 { 198 xfs_trans_reservations_t *resp; 199 200 resp = &(mp->m_reservations); 201 resp->tr_write = xfs_calc_write_reservation(mp); 202 resp->tr_itruncate = xfs_calc_itruncate_reservation(mp); 203 resp->tr_rename = xfs_calc_rename_reservation(mp); 204 resp->tr_link = xfs_calc_link_reservation(mp); 205 resp->tr_remove = xfs_calc_remove_reservation(mp); 206 resp->tr_symlink = xfs_calc_symlink_reservation(mp); 207 resp->tr_create = xfs_calc_create_reservation(mp); 208 resp->tr_mkdir = xfs_calc_mkdir_reservation(mp); 209 resp->tr_ifree = xfs_calc_ifree_reservation(mp); 210 resp->tr_ichange = xfs_calc_ichange_reservation(mp); 211 resp->tr_growdata = xfs_calc_growdata_reservation(mp); 212 resp->tr_swrite = xfs_calc_swrite_reservation(mp); 213 resp->tr_writeid = xfs_calc_writeid_reservation(mp); 214 resp->tr_addafork = xfs_calc_addafork_reservation(mp); 215 resp->tr_attrinval = xfs_calc_attrinval_reservation(mp); 216 resp->tr_attrset = xfs_calc_attrset_reservation(mp); 217 resp->tr_attrrm = xfs_calc_attrrm_reservation(mp); 218 resp->tr_clearagi = xfs_calc_clear_agi_bucket_reservation(mp); 219 resp->tr_growrtalloc = xfs_calc_growrtalloc_reservation(mp); 220 resp->tr_growrtzero = xfs_calc_growrtzero_reservation(mp); 221 resp->tr_growrtfree = xfs_calc_growrtfree_reservation(mp); 222 } 223 224 /* 225 * This routine is called to allocate a transaction structure. 226 * The type parameter indicates the type of the transaction. These 227 * are enumerated in xfs_trans.h. 228 * 229 * Dynamically allocate the transaction structure from the transaction 230 * zone, initialize it, and return it to the caller. 231 */ 232 xfs_trans_t * 233 xfs_trans_alloc( 234 xfs_mount_t *mp, 235 uint type) 236 { 237 vfs_wait_for_freeze(XFS_MTOVFS(mp), SB_FREEZE_TRANS); 238 return _xfs_trans_alloc(mp, type); 239 } 240 241 xfs_trans_t * 242 _xfs_trans_alloc( 243 xfs_mount_t *mp, 244 uint type) 245 { 246 xfs_trans_t *tp; 247 248 atomic_inc(&mp->m_active_trans); 249 250 tp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP); 251 tp->t_magic = XFS_TRANS_MAGIC; 252 tp->t_type = type; 253 tp->t_mountp = mp; 254 tp->t_items_free = XFS_LIC_NUM_SLOTS; 255 tp->t_busy_free = XFS_LBC_NUM_SLOTS; 256 XFS_LIC_INIT(&(tp->t_items)); 257 XFS_LBC_INIT(&(tp->t_busy)); 258 return tp; 259 } 260 261 /* 262 * This is called to create a new transaction which will share the 263 * permanent log reservation of the given transaction. The remaining 264 * unused block and rt extent reservations are also inherited. This 265 * implies that the original transaction is no longer allowed to allocate 266 * blocks. Locks and log items, however, are no inherited. They must 267 * be added to the new transaction explicitly. 268 */ 269 xfs_trans_t * 270 xfs_trans_dup( 271 xfs_trans_t *tp) 272 { 273 xfs_trans_t *ntp; 274 275 ntp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP); 276 277 /* 278 * Initialize the new transaction structure. 279 */ 280 ntp->t_magic = XFS_TRANS_MAGIC; 281 ntp->t_type = tp->t_type; 282 ntp->t_mountp = tp->t_mountp; 283 ntp->t_items_free = XFS_LIC_NUM_SLOTS; 284 ntp->t_busy_free = XFS_LBC_NUM_SLOTS; 285 XFS_LIC_INIT(&(ntp->t_items)); 286 XFS_LBC_INIT(&(ntp->t_busy)); 287 288 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 289 ASSERT(tp->t_ticket != NULL); 290 291 ntp->t_flags = XFS_TRANS_PERM_LOG_RES | (tp->t_flags & XFS_TRANS_RESERVE); 292 ntp->t_ticket = tp->t_ticket; 293 ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used; 294 tp->t_blk_res = tp->t_blk_res_used; 295 ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used; 296 tp->t_rtx_res = tp->t_rtx_res_used; 297 ntp->t_pflags = tp->t_pflags; 298 299 XFS_TRANS_DUP_DQINFO(tp->t_mountp, tp, ntp); 300 301 atomic_inc(&tp->t_mountp->m_active_trans); 302 return ntp; 303 } 304 305 /* 306 * This is called to reserve free disk blocks and log space for the 307 * given transaction. This must be done before allocating any resources 308 * within the transaction. 309 * 310 * This will return ENOSPC if there are not enough blocks available. 311 * It will sleep waiting for available log space. 312 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which 313 * is used by long running transactions. If any one of the reservations 314 * fails then they will all be backed out. 315 * 316 * This does not do quota reservations. That typically is done by the 317 * caller afterwards. 318 */ 319 int 320 xfs_trans_reserve( 321 xfs_trans_t *tp, 322 uint blocks, 323 uint logspace, 324 uint rtextents, 325 uint flags, 326 uint logcount) 327 { 328 int log_flags; 329 int error = 0; 330 int rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; 331 332 /* Mark this thread as being in a transaction */ 333 current_set_flags_nested(&tp->t_pflags, PF_FSTRANS); 334 335 /* 336 * Attempt to reserve the needed disk blocks by decrementing 337 * the number needed from the number available. This will 338 * fail if the count would go below zero. 339 */ 340 if (blocks > 0) { 341 error = xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FDBLOCKS, 342 -blocks, rsvd); 343 if (error != 0) { 344 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); 345 return (XFS_ERROR(ENOSPC)); 346 } 347 tp->t_blk_res += blocks; 348 } 349 350 /* 351 * Reserve the log space needed for this transaction. 352 */ 353 if (logspace > 0) { 354 ASSERT((tp->t_log_res == 0) || (tp->t_log_res == logspace)); 355 ASSERT((tp->t_log_count == 0) || 356 (tp->t_log_count == logcount)); 357 if (flags & XFS_TRANS_PERM_LOG_RES) { 358 log_flags = XFS_LOG_PERM_RESERV; 359 tp->t_flags |= XFS_TRANS_PERM_LOG_RES; 360 } else { 361 ASSERT(tp->t_ticket == NULL); 362 ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES)); 363 log_flags = 0; 364 } 365 366 error = xfs_log_reserve(tp->t_mountp, logspace, logcount, 367 &tp->t_ticket, 368 XFS_TRANSACTION, log_flags, tp->t_type); 369 if (error) { 370 goto undo_blocks; 371 } 372 tp->t_log_res = logspace; 373 tp->t_log_count = logcount; 374 } 375 376 /* 377 * Attempt to reserve the needed realtime extents by decrementing 378 * the number needed from the number available. This will 379 * fail if the count would go below zero. 380 */ 381 if (rtextents > 0) { 382 error = xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FREXTENTS, 383 -rtextents, rsvd); 384 if (error) { 385 error = XFS_ERROR(ENOSPC); 386 goto undo_log; 387 } 388 tp->t_rtx_res += rtextents; 389 } 390 391 return 0; 392 393 /* 394 * Error cases jump to one of these labels to undo any 395 * reservations which have already been performed. 396 */ 397 undo_log: 398 if (logspace > 0) { 399 if (flags & XFS_TRANS_PERM_LOG_RES) { 400 log_flags = XFS_LOG_REL_PERM_RESERV; 401 } else { 402 log_flags = 0; 403 } 404 xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, log_flags); 405 tp->t_ticket = NULL; 406 tp->t_log_res = 0; 407 tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES; 408 } 409 410 undo_blocks: 411 if (blocks > 0) { 412 (void) xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FDBLOCKS, 413 blocks, rsvd); 414 tp->t_blk_res = 0; 415 } 416 417 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); 418 419 return error; 420 } 421 422 423 /* 424 * Record the indicated change to the given field for application 425 * to the file system's superblock when the transaction commits. 426 * For now, just store the change in the transaction structure. 427 * 428 * Mark the transaction structure to indicate that the superblock 429 * needs to be updated before committing. 430 */ 431 void 432 xfs_trans_mod_sb( 433 xfs_trans_t *tp, 434 uint field, 435 long delta) 436 { 437 438 switch (field) { 439 case XFS_TRANS_SB_ICOUNT: 440 tp->t_icount_delta += delta; 441 break; 442 case XFS_TRANS_SB_IFREE: 443 tp->t_ifree_delta += delta; 444 break; 445 case XFS_TRANS_SB_FDBLOCKS: 446 /* 447 * Track the number of blocks allocated in the 448 * transaction. Make sure it does not exceed the 449 * number reserved. 450 */ 451 if (delta < 0) { 452 tp->t_blk_res_used += (uint)-delta; 453 ASSERT(tp->t_blk_res_used <= tp->t_blk_res); 454 } 455 tp->t_fdblocks_delta += delta; 456 break; 457 case XFS_TRANS_SB_RES_FDBLOCKS: 458 /* 459 * The allocation has already been applied to the 460 * in-core superblock's counter. This should only 461 * be applied to the on-disk superblock. 462 */ 463 ASSERT(delta < 0); 464 tp->t_res_fdblocks_delta += delta; 465 break; 466 case XFS_TRANS_SB_FREXTENTS: 467 /* 468 * Track the number of blocks allocated in the 469 * transaction. Make sure it does not exceed the 470 * number reserved. 471 */ 472 if (delta < 0) { 473 tp->t_rtx_res_used += (uint)-delta; 474 ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res); 475 } 476 tp->t_frextents_delta += delta; 477 break; 478 case XFS_TRANS_SB_RES_FREXTENTS: 479 /* 480 * The allocation has already been applied to the 481 * in-core superblock's counter. This should only 482 * be applied to the on-disk superblock. 483 */ 484 ASSERT(delta < 0); 485 tp->t_res_frextents_delta += delta; 486 break; 487 case XFS_TRANS_SB_DBLOCKS: 488 ASSERT(delta > 0); 489 tp->t_dblocks_delta += delta; 490 break; 491 case XFS_TRANS_SB_AGCOUNT: 492 ASSERT(delta > 0); 493 tp->t_agcount_delta += delta; 494 break; 495 case XFS_TRANS_SB_IMAXPCT: 496 tp->t_imaxpct_delta += delta; 497 break; 498 case XFS_TRANS_SB_REXTSIZE: 499 tp->t_rextsize_delta += delta; 500 break; 501 case XFS_TRANS_SB_RBMBLOCKS: 502 tp->t_rbmblocks_delta += delta; 503 break; 504 case XFS_TRANS_SB_RBLOCKS: 505 tp->t_rblocks_delta += delta; 506 break; 507 case XFS_TRANS_SB_REXTENTS: 508 tp->t_rextents_delta += delta; 509 break; 510 case XFS_TRANS_SB_REXTSLOG: 511 tp->t_rextslog_delta += delta; 512 break; 513 default: 514 ASSERT(0); 515 return; 516 } 517 518 tp->t_flags |= (XFS_TRANS_SB_DIRTY | XFS_TRANS_DIRTY); 519 } 520 521 /* 522 * xfs_trans_apply_sb_deltas() is called from the commit code 523 * to bring the superblock buffer into the current transaction 524 * and modify it as requested by earlier calls to xfs_trans_mod_sb(). 525 * 526 * For now we just look at each field allowed to change and change 527 * it if necessary. 528 */ 529 STATIC void 530 xfs_trans_apply_sb_deltas( 531 xfs_trans_t *tp) 532 { 533 xfs_sb_t *sbp; 534 xfs_buf_t *bp; 535 int whole = 0; 536 537 bp = xfs_trans_getsb(tp, tp->t_mountp, 0); 538 sbp = XFS_BUF_TO_SBP(bp); 539 540 /* 541 * Check that superblock mods match the mods made to AGF counters. 542 */ 543 ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) == 544 (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta + 545 tp->t_ag_btree_delta)); 546 547 if (tp->t_icount_delta != 0) { 548 INT_MOD(sbp->sb_icount, ARCH_CONVERT, tp->t_icount_delta); 549 } 550 if (tp->t_ifree_delta != 0) { 551 INT_MOD(sbp->sb_ifree, ARCH_CONVERT, tp->t_ifree_delta); 552 } 553 554 if (tp->t_fdblocks_delta != 0) { 555 INT_MOD(sbp->sb_fdblocks, ARCH_CONVERT, tp->t_fdblocks_delta); 556 } 557 if (tp->t_res_fdblocks_delta != 0) { 558 INT_MOD(sbp->sb_fdblocks, ARCH_CONVERT, tp->t_res_fdblocks_delta); 559 } 560 561 if (tp->t_frextents_delta != 0) { 562 INT_MOD(sbp->sb_frextents, ARCH_CONVERT, tp->t_frextents_delta); 563 } 564 if (tp->t_res_frextents_delta != 0) { 565 INT_MOD(sbp->sb_frextents, ARCH_CONVERT, tp->t_res_frextents_delta); 566 } 567 if (tp->t_dblocks_delta != 0) { 568 INT_MOD(sbp->sb_dblocks, ARCH_CONVERT, tp->t_dblocks_delta); 569 whole = 1; 570 } 571 if (tp->t_agcount_delta != 0) { 572 INT_MOD(sbp->sb_agcount, ARCH_CONVERT, tp->t_agcount_delta); 573 whole = 1; 574 } 575 if (tp->t_imaxpct_delta != 0) { 576 INT_MOD(sbp->sb_imax_pct, ARCH_CONVERT, tp->t_imaxpct_delta); 577 whole = 1; 578 } 579 if (tp->t_rextsize_delta != 0) { 580 INT_MOD(sbp->sb_rextsize, ARCH_CONVERT, tp->t_rextsize_delta); 581 whole = 1; 582 } 583 if (tp->t_rbmblocks_delta != 0) { 584 INT_MOD(sbp->sb_rbmblocks, ARCH_CONVERT, tp->t_rbmblocks_delta); 585 whole = 1; 586 } 587 if (tp->t_rblocks_delta != 0) { 588 INT_MOD(sbp->sb_rblocks, ARCH_CONVERT, tp->t_rblocks_delta); 589 whole = 1; 590 } 591 if (tp->t_rextents_delta != 0) { 592 INT_MOD(sbp->sb_rextents, ARCH_CONVERT, tp->t_rextents_delta); 593 whole = 1; 594 } 595 if (tp->t_rextslog_delta != 0) { 596 INT_MOD(sbp->sb_rextslog, ARCH_CONVERT, tp->t_rextslog_delta); 597 whole = 1; 598 } 599 600 if (whole) 601 /* 602 * Log the whole thing, the fields are noncontiguous. 603 */ 604 xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_sb_t) - 1); 605 else 606 /* 607 * Since all the modifiable fields are contiguous, we 608 * can get away with this. 609 */ 610 xfs_trans_log_buf(tp, bp, offsetof(xfs_sb_t, sb_icount), 611 offsetof(xfs_sb_t, sb_frextents) + 612 sizeof(sbp->sb_frextents) - 1); 613 614 XFS_MTOVFS(tp->t_mountp)->vfs_super->s_dirt = 1; 615 } 616 617 /* 618 * xfs_trans_unreserve_and_mod_sb() is called to release unused 619 * reservations and apply superblock counter changes to the in-core 620 * superblock. 621 * 622 * This is done efficiently with a single call to xfs_mod_incore_sb_batch(). 623 */ 624 STATIC void 625 xfs_trans_unreserve_and_mod_sb( 626 xfs_trans_t *tp) 627 { 628 xfs_mod_sb_t msb[14]; /* If you add cases, add entries */ 629 xfs_mod_sb_t *msbp; 630 /* REFERENCED */ 631 int error; 632 int rsvd; 633 634 msbp = msb; 635 rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; 636 637 /* 638 * Release any reserved blocks. Any that were allocated 639 * will be taken back again by fdblocks_delta below. 640 */ 641 if (tp->t_blk_res > 0) { 642 msbp->msb_field = XFS_SBS_FDBLOCKS; 643 msbp->msb_delta = tp->t_blk_res; 644 msbp++; 645 } 646 647 /* 648 * Release any reserved real time extents . Any that were 649 * allocated will be taken back again by frextents_delta below. 650 */ 651 if (tp->t_rtx_res > 0) { 652 msbp->msb_field = XFS_SBS_FREXTENTS; 653 msbp->msb_delta = tp->t_rtx_res; 654 msbp++; 655 } 656 657 /* 658 * Apply any superblock modifications to the in-core version. 659 * The t_res_fdblocks_delta and t_res_frextents_delta fields are 660 * explicitly NOT applied to the in-core superblock. 661 * The idea is that that has already been done. 662 */ 663 if (tp->t_flags & XFS_TRANS_SB_DIRTY) { 664 if (tp->t_icount_delta != 0) { 665 msbp->msb_field = XFS_SBS_ICOUNT; 666 msbp->msb_delta = (int)tp->t_icount_delta; 667 msbp++; 668 } 669 if (tp->t_ifree_delta != 0) { 670 msbp->msb_field = XFS_SBS_IFREE; 671 msbp->msb_delta = (int)tp->t_ifree_delta; 672 msbp++; 673 } 674 if (tp->t_fdblocks_delta != 0) { 675 msbp->msb_field = XFS_SBS_FDBLOCKS; 676 msbp->msb_delta = (int)tp->t_fdblocks_delta; 677 msbp++; 678 } 679 if (tp->t_frextents_delta != 0) { 680 msbp->msb_field = XFS_SBS_FREXTENTS; 681 msbp->msb_delta = (int)tp->t_frextents_delta; 682 msbp++; 683 } 684 if (tp->t_dblocks_delta != 0) { 685 msbp->msb_field = XFS_SBS_DBLOCKS; 686 msbp->msb_delta = (int)tp->t_dblocks_delta; 687 msbp++; 688 } 689 if (tp->t_agcount_delta != 0) { 690 msbp->msb_field = XFS_SBS_AGCOUNT; 691 msbp->msb_delta = (int)tp->t_agcount_delta; 692 msbp++; 693 } 694 if (tp->t_imaxpct_delta != 0) { 695 msbp->msb_field = XFS_SBS_IMAX_PCT; 696 msbp->msb_delta = (int)tp->t_imaxpct_delta; 697 msbp++; 698 } 699 if (tp->t_rextsize_delta != 0) { 700 msbp->msb_field = XFS_SBS_REXTSIZE; 701 msbp->msb_delta = (int)tp->t_rextsize_delta; 702 msbp++; 703 } 704 if (tp->t_rbmblocks_delta != 0) { 705 msbp->msb_field = XFS_SBS_RBMBLOCKS; 706 msbp->msb_delta = (int)tp->t_rbmblocks_delta; 707 msbp++; 708 } 709 if (tp->t_rblocks_delta != 0) { 710 msbp->msb_field = XFS_SBS_RBLOCKS; 711 msbp->msb_delta = (int)tp->t_rblocks_delta; 712 msbp++; 713 } 714 if (tp->t_rextents_delta != 0) { 715 msbp->msb_field = XFS_SBS_REXTENTS; 716 msbp->msb_delta = (int)tp->t_rextents_delta; 717 msbp++; 718 } 719 if (tp->t_rextslog_delta != 0) { 720 msbp->msb_field = XFS_SBS_REXTSLOG; 721 msbp->msb_delta = (int)tp->t_rextslog_delta; 722 msbp++; 723 } 724 } 725 726 /* 727 * If we need to change anything, do it. 728 */ 729 if (msbp > msb) { 730 error = xfs_mod_incore_sb_batch(tp->t_mountp, msb, 731 (uint)(msbp - msb), rsvd); 732 ASSERT(error == 0); 733 } 734 } 735 736 737 /* 738 * xfs_trans_commit 739 * 740 * Commit the given transaction to the log a/synchronously. 741 * 742 * XFS disk error handling mechanism is not based on a typical 743 * transaction abort mechanism. Logically after the filesystem 744 * gets marked 'SHUTDOWN', we can't let any new transactions 745 * be durable - ie. committed to disk - because some metadata might 746 * be inconsistent. In such cases, this returns an error, and the 747 * caller may assume that all locked objects joined to the transaction 748 * have already been unlocked as if the commit had succeeded. 749 * Do not reference the transaction structure after this call. 750 */ 751 /*ARGSUSED*/ 752 int 753 _xfs_trans_commit( 754 xfs_trans_t *tp, 755 uint flags, 756 xfs_lsn_t *commit_lsn_p, 757 int *log_flushed) 758 { 759 xfs_log_iovec_t *log_vector; 760 int nvec; 761 xfs_mount_t *mp; 762 xfs_lsn_t commit_lsn; 763 /* REFERENCED */ 764 int error; 765 int log_flags; 766 int sync; 767 #define XFS_TRANS_LOGVEC_COUNT 16 768 xfs_log_iovec_t log_vector_fast[XFS_TRANS_LOGVEC_COUNT]; 769 void *commit_iclog; 770 int shutdown; 771 772 commit_lsn = -1; 773 774 /* 775 * Determine whether this commit is releasing a permanent 776 * log reservation or not. 777 */ 778 if (flags & XFS_TRANS_RELEASE_LOG_RES) { 779 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 780 log_flags = XFS_LOG_REL_PERM_RESERV; 781 } else { 782 log_flags = 0; 783 } 784 mp = tp->t_mountp; 785 786 /* 787 * If there is nothing to be logged by the transaction, 788 * then unlock all of the items associated with the 789 * transaction and free the transaction structure. 790 * Also make sure to return any reserved blocks to 791 * the free pool. 792 */ 793 shut_us_down: 794 shutdown = XFS_FORCED_SHUTDOWN(mp) ? EIO : 0; 795 if (!(tp->t_flags & XFS_TRANS_DIRTY) || shutdown) { 796 xfs_trans_unreserve_and_mod_sb(tp); 797 /* 798 * It is indeed possible for the transaction to be 799 * not dirty but the dqinfo portion to be. All that 800 * means is that we have some (non-persistent) quota 801 * reservations that need to be unreserved. 802 */ 803 XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(mp, tp); 804 if (tp->t_ticket) { 805 commit_lsn = xfs_log_done(mp, tp->t_ticket, 806 NULL, log_flags); 807 if (commit_lsn == -1 && !shutdown) 808 shutdown = XFS_ERROR(EIO); 809 } 810 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); 811 xfs_trans_free_items(tp, shutdown? XFS_TRANS_ABORT : 0); 812 xfs_trans_free_busy(tp); 813 xfs_trans_free(tp); 814 XFS_STATS_INC(xs_trans_empty); 815 if (commit_lsn_p) 816 *commit_lsn_p = commit_lsn; 817 return (shutdown); 818 } 819 ASSERT(tp->t_ticket != NULL); 820 821 /* 822 * If we need to update the superblock, then do it now. 823 */ 824 if (tp->t_flags & XFS_TRANS_SB_DIRTY) { 825 xfs_trans_apply_sb_deltas(tp); 826 } 827 XFS_TRANS_APPLY_DQUOT_DELTAS(mp, tp); 828 829 /* 830 * Ask each log item how many log_vector entries it will 831 * need so we can figure out how many to allocate. 832 * Try to avoid the kmem_alloc() call in the common case 833 * by using a vector from the stack when it fits. 834 */ 835 nvec = xfs_trans_count_vecs(tp); 836 if (nvec == 0) { 837 xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR); 838 goto shut_us_down; 839 } else if (nvec <= XFS_TRANS_LOGVEC_COUNT) { 840 log_vector = log_vector_fast; 841 } else { 842 log_vector = (xfs_log_iovec_t *)kmem_alloc(nvec * 843 sizeof(xfs_log_iovec_t), 844 KM_SLEEP); 845 } 846 847 /* 848 * Fill in the log_vector and pin the logged items, and 849 * then write the transaction to the log. 850 */ 851 xfs_trans_fill_vecs(tp, log_vector); 852 853 error = xfs_log_write(mp, log_vector, nvec, tp->t_ticket, &(tp->t_lsn)); 854 855 /* 856 * The transaction is committed incore here, and can go out to disk 857 * at any time after this call. However, all the items associated 858 * with the transaction are still locked and pinned in memory. 859 */ 860 commit_lsn = xfs_log_done(mp, tp->t_ticket, &commit_iclog, log_flags); 861 862 tp->t_commit_lsn = commit_lsn; 863 if (nvec > XFS_TRANS_LOGVEC_COUNT) { 864 kmem_free(log_vector, nvec * sizeof(xfs_log_iovec_t)); 865 } 866 867 if (commit_lsn_p) 868 *commit_lsn_p = commit_lsn; 869 870 /* 871 * If we got a log write error. Unpin the logitems that we 872 * had pinned, clean up, free trans structure, and return error. 873 */ 874 if (error || commit_lsn == -1) { 875 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); 876 xfs_trans_uncommit(tp, flags|XFS_TRANS_ABORT); 877 return XFS_ERROR(EIO); 878 } 879 880 /* 881 * Once the transaction has committed, unused 882 * reservations need to be released and changes to 883 * the superblock need to be reflected in the in-core 884 * version. Do that now. 885 */ 886 xfs_trans_unreserve_and_mod_sb(tp); 887 888 sync = tp->t_flags & XFS_TRANS_SYNC; 889 890 /* 891 * Tell the LM to call the transaction completion routine 892 * when the log write with LSN commit_lsn completes (e.g. 893 * when the transaction commit really hits the on-disk log). 894 * After this call we cannot reference tp, because the call 895 * can happen at any time and the call will free the transaction 896 * structure pointed to by tp. The only case where we call 897 * the completion routine (xfs_trans_committed) directly is 898 * if the log is turned off on a debug kernel or we're 899 * running in simulation mode (the log is explicitly turned 900 * off). 901 */ 902 tp->t_logcb.cb_func = (void(*)(void*, int))xfs_trans_committed; 903 tp->t_logcb.cb_arg = tp; 904 905 /* 906 * We need to pass the iclog buffer which was used for the 907 * transaction commit record into this function, and attach 908 * the callback to it. The callback must be attached before 909 * the items are unlocked to avoid racing with other threads 910 * waiting for an item to unlock. 911 */ 912 shutdown = xfs_log_notify(mp, commit_iclog, &(tp->t_logcb)); 913 914 /* 915 * Mark this thread as no longer being in a transaction 916 */ 917 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); 918 919 /* 920 * Once all the items of the transaction have been copied 921 * to the in core log and the callback is attached, the 922 * items can be unlocked. 923 * 924 * This will free descriptors pointing to items which were 925 * not logged since there is nothing more to do with them. 926 * For items which were logged, we will keep pointers to them 927 * so they can be unpinned after the transaction commits to disk. 928 * This will also stamp each modified meta-data item with 929 * the commit lsn of this transaction for dependency tracking 930 * purposes. 931 */ 932 xfs_trans_unlock_items(tp, commit_lsn); 933 934 /* 935 * If we detected a log error earlier, finish committing 936 * the transaction now (unpin log items, etc). 937 * 938 * Order is critical here, to avoid using the transaction 939 * pointer after its been freed (by xfs_trans_committed 940 * either here now, or as a callback). We cannot do this 941 * step inside xfs_log_notify as was done earlier because 942 * of this issue. 943 */ 944 if (shutdown) 945 xfs_trans_committed(tp, XFS_LI_ABORTED); 946 947 /* 948 * Now that the xfs_trans_committed callback has been attached, 949 * and the items are released we can finally allow the iclog to 950 * go to disk. 951 */ 952 error = xfs_log_release_iclog(mp, commit_iclog); 953 954 /* 955 * If the transaction needs to be synchronous, then force the 956 * log out now and wait for it. 957 */ 958 if (sync) { 959 if (!error) { 960 error = _xfs_log_force(mp, commit_lsn, 961 XFS_LOG_FORCE | XFS_LOG_SYNC, 962 log_flushed); 963 } 964 XFS_STATS_INC(xs_trans_sync); 965 } else { 966 XFS_STATS_INC(xs_trans_async); 967 } 968 969 return (error); 970 } 971 972 973 /* 974 * Total up the number of log iovecs needed to commit this 975 * transaction. The transaction itself needs one for the 976 * transaction header. Ask each dirty item in turn how many 977 * it needs to get the total. 978 */ 979 STATIC uint 980 xfs_trans_count_vecs( 981 xfs_trans_t *tp) 982 { 983 int nvecs; 984 xfs_log_item_desc_t *lidp; 985 986 nvecs = 1; 987 lidp = xfs_trans_first_item(tp); 988 ASSERT(lidp != NULL); 989 990 /* In the non-debug case we need to start bailing out if we 991 * didn't find a log_item here, return zero and let trans_commit 992 * deal with it. 993 */ 994 if (lidp == NULL) 995 return 0; 996 997 while (lidp != NULL) { 998 /* 999 * Skip items which aren't dirty in this transaction. 1000 */ 1001 if (!(lidp->lid_flags & XFS_LID_DIRTY)) { 1002 lidp = xfs_trans_next_item(tp, lidp); 1003 continue; 1004 } 1005 lidp->lid_size = IOP_SIZE(lidp->lid_item); 1006 nvecs += lidp->lid_size; 1007 lidp = xfs_trans_next_item(tp, lidp); 1008 } 1009 1010 return nvecs; 1011 } 1012 1013 /* 1014 * Called from the trans_commit code when we notice that 1015 * the filesystem is in the middle of a forced shutdown. 1016 */ 1017 STATIC void 1018 xfs_trans_uncommit( 1019 xfs_trans_t *tp, 1020 uint flags) 1021 { 1022 xfs_log_item_desc_t *lidp; 1023 1024 for (lidp = xfs_trans_first_item(tp); 1025 lidp != NULL; 1026 lidp = xfs_trans_next_item(tp, lidp)) { 1027 /* 1028 * Unpin all but those that aren't dirty. 1029 */ 1030 if (lidp->lid_flags & XFS_LID_DIRTY) 1031 IOP_UNPIN_REMOVE(lidp->lid_item, tp); 1032 } 1033 1034 xfs_trans_unreserve_and_mod_sb(tp); 1035 XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(tp->t_mountp, tp); 1036 1037 xfs_trans_free_items(tp, flags); 1038 xfs_trans_free_busy(tp); 1039 xfs_trans_free(tp); 1040 } 1041 1042 /* 1043 * Fill in the vector with pointers to data to be logged 1044 * by this transaction. The transaction header takes 1045 * the first vector, and then each dirty item takes the 1046 * number of vectors it indicated it needed in xfs_trans_count_vecs(). 1047 * 1048 * As each item fills in the entries it needs, also pin the item 1049 * so that it cannot be flushed out until the log write completes. 1050 */ 1051 STATIC void 1052 xfs_trans_fill_vecs( 1053 xfs_trans_t *tp, 1054 xfs_log_iovec_t *log_vector) 1055 { 1056 xfs_log_item_desc_t *lidp; 1057 xfs_log_iovec_t *vecp; 1058 uint nitems; 1059 1060 /* 1061 * Skip over the entry for the transaction header, we'll 1062 * fill that in at the end. 1063 */ 1064 vecp = log_vector + 1; /* pointer arithmetic */ 1065 1066 nitems = 0; 1067 lidp = xfs_trans_first_item(tp); 1068 ASSERT(lidp != NULL); 1069 while (lidp != NULL) { 1070 /* 1071 * Skip items which aren't dirty in this transaction. 1072 */ 1073 if (!(lidp->lid_flags & XFS_LID_DIRTY)) { 1074 lidp = xfs_trans_next_item(tp, lidp); 1075 continue; 1076 } 1077 /* 1078 * The item may be marked dirty but not log anything. 1079 * This can be used to get called when a transaction 1080 * is committed. 1081 */ 1082 if (lidp->lid_size) { 1083 nitems++; 1084 } 1085 IOP_FORMAT(lidp->lid_item, vecp); 1086 vecp += lidp->lid_size; /* pointer arithmetic */ 1087 IOP_PIN(lidp->lid_item); 1088 lidp = xfs_trans_next_item(tp, lidp); 1089 } 1090 1091 /* 1092 * Now that we've counted the number of items in this 1093 * transaction, fill in the transaction header. 1094 */ 1095 tp->t_header.th_magic = XFS_TRANS_HEADER_MAGIC; 1096 tp->t_header.th_type = tp->t_type; 1097 tp->t_header.th_num_items = nitems; 1098 log_vector->i_addr = (xfs_caddr_t)&tp->t_header; 1099 log_vector->i_len = sizeof(xfs_trans_header_t); 1100 XLOG_VEC_SET_TYPE(log_vector, XLOG_REG_TYPE_TRANSHDR); 1101 } 1102 1103 1104 /* 1105 * Unlock all of the transaction's items and free the transaction. 1106 * The transaction must not have modified any of its items, because 1107 * there is no way to restore them to their previous state. 1108 * 1109 * If the transaction has made a log reservation, make sure to release 1110 * it as well. 1111 */ 1112 void 1113 xfs_trans_cancel( 1114 xfs_trans_t *tp, 1115 int flags) 1116 { 1117 int log_flags; 1118 #ifdef DEBUG 1119 xfs_log_item_chunk_t *licp; 1120 xfs_log_item_desc_t *lidp; 1121 xfs_log_item_t *lip; 1122 int i; 1123 #endif 1124 xfs_mount_t *mp = tp->t_mountp; 1125 1126 /* 1127 * See if the caller is being too lazy to figure out if 1128 * the transaction really needs an abort. 1129 */ 1130 if ((flags & XFS_TRANS_ABORT) && !(tp->t_flags & XFS_TRANS_DIRTY)) 1131 flags &= ~XFS_TRANS_ABORT; 1132 /* 1133 * See if the caller is relying on us to shut down the 1134 * filesystem. This happens in paths where we detect 1135 * corruption and decide to give up. 1136 */ 1137 if ((tp->t_flags & XFS_TRANS_DIRTY) && !XFS_FORCED_SHUTDOWN(mp)) { 1138 XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp); 1139 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1140 } 1141 #ifdef DEBUG 1142 if (!(flags & XFS_TRANS_ABORT)) { 1143 licp = &(tp->t_items); 1144 while (licp != NULL) { 1145 lidp = licp->lic_descs; 1146 for (i = 0; i < licp->lic_unused; i++, lidp++) { 1147 if (XFS_LIC_ISFREE(licp, i)) { 1148 continue; 1149 } 1150 1151 lip = lidp->lid_item; 1152 if (!XFS_FORCED_SHUTDOWN(mp)) 1153 ASSERT(!(lip->li_type == XFS_LI_EFD)); 1154 } 1155 licp = licp->lic_next; 1156 } 1157 } 1158 #endif 1159 xfs_trans_unreserve_and_mod_sb(tp); 1160 XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(mp, tp); 1161 1162 if (tp->t_ticket) { 1163 if (flags & XFS_TRANS_RELEASE_LOG_RES) { 1164 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 1165 log_flags = XFS_LOG_REL_PERM_RESERV; 1166 } else { 1167 log_flags = 0; 1168 } 1169 xfs_log_done(mp, tp->t_ticket, NULL, log_flags); 1170 } 1171 1172 /* mark this thread as no longer being in a transaction */ 1173 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); 1174 1175 xfs_trans_free_items(tp, flags); 1176 xfs_trans_free_busy(tp); 1177 xfs_trans_free(tp); 1178 } 1179 1180 1181 /* 1182 * Free the transaction structure. If there is more clean up 1183 * to do when the structure is freed, add it here. 1184 */ 1185 STATIC void 1186 xfs_trans_free( 1187 xfs_trans_t *tp) 1188 { 1189 atomic_dec(&tp->t_mountp->m_active_trans); 1190 XFS_TRANS_FREE_DQINFO(tp->t_mountp, tp); 1191 kmem_zone_free(xfs_trans_zone, tp); 1192 } 1193 1194 1195 /* 1196 * THIS SHOULD BE REWRITTEN TO USE xfs_trans_next_item(). 1197 * 1198 * This is typically called by the LM when a transaction has been fully 1199 * committed to disk. It needs to unpin the items which have 1200 * been logged by the transaction and update their positions 1201 * in the AIL if necessary. 1202 * This also gets called when the transactions didn't get written out 1203 * because of an I/O error. Abortflag & XFS_LI_ABORTED is set then. 1204 * 1205 * Call xfs_trans_chunk_committed() to process the items in 1206 * each chunk. 1207 */ 1208 STATIC void 1209 xfs_trans_committed( 1210 xfs_trans_t *tp, 1211 int abortflag) 1212 { 1213 xfs_log_item_chunk_t *licp; 1214 xfs_log_item_chunk_t *next_licp; 1215 xfs_log_busy_chunk_t *lbcp; 1216 xfs_log_busy_slot_t *lbsp; 1217 int i; 1218 1219 /* 1220 * Call the transaction's completion callback if there 1221 * is one. 1222 */ 1223 if (tp->t_callback != NULL) { 1224 tp->t_callback(tp, tp->t_callarg); 1225 } 1226 1227 /* 1228 * Special case the chunk embedded in the transaction. 1229 */ 1230 licp = &(tp->t_items); 1231 if (!(XFS_LIC_ARE_ALL_FREE(licp))) { 1232 xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag); 1233 } 1234 1235 /* 1236 * Process the items in each chunk in turn. 1237 */ 1238 licp = licp->lic_next; 1239 while (licp != NULL) { 1240 ASSERT(!XFS_LIC_ARE_ALL_FREE(licp)); 1241 xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag); 1242 next_licp = licp->lic_next; 1243 kmem_free(licp, sizeof(xfs_log_item_chunk_t)); 1244 licp = next_licp; 1245 } 1246 1247 /* 1248 * Clear all the per-AG busy list items listed in this transaction 1249 */ 1250 lbcp = &tp->t_busy; 1251 while (lbcp != NULL) { 1252 for (i = 0, lbsp = lbcp->lbc_busy; i < lbcp->lbc_unused; i++, lbsp++) { 1253 if (!XFS_LBC_ISFREE(lbcp, i)) { 1254 xfs_alloc_clear_busy(tp, lbsp->lbc_ag, 1255 lbsp->lbc_idx); 1256 } 1257 } 1258 lbcp = lbcp->lbc_next; 1259 } 1260 xfs_trans_free_busy(tp); 1261 1262 /* 1263 * That's it for the transaction structure. Free it. 1264 */ 1265 xfs_trans_free(tp); 1266 } 1267 1268 /* 1269 * This is called to perform the commit processing for each 1270 * item described by the given chunk. 1271 * 1272 * The commit processing consists of unlocking items which were 1273 * held locked with the SYNC_UNLOCK attribute, calling the committed 1274 * routine of each logged item, updating the item's position in the AIL 1275 * if necessary, and unpinning each item. If the committed routine 1276 * returns -1, then do nothing further with the item because it 1277 * may have been freed. 1278 * 1279 * Since items are unlocked when they are copied to the incore 1280 * log, it is possible for two transactions to be completing 1281 * and manipulating the same item simultaneously. The AIL lock 1282 * will protect the lsn field of each item. The value of this 1283 * field can never go backwards. 1284 * 1285 * We unpin the items after repositioning them in the AIL, because 1286 * otherwise they could be immediately flushed and we'd have to race 1287 * with the flusher trying to pull the item from the AIL as we add it. 1288 */ 1289 STATIC void 1290 xfs_trans_chunk_committed( 1291 xfs_log_item_chunk_t *licp, 1292 xfs_lsn_t lsn, 1293 int aborted) 1294 { 1295 xfs_log_item_desc_t *lidp; 1296 xfs_log_item_t *lip; 1297 xfs_lsn_t item_lsn; 1298 struct xfs_mount *mp; 1299 int i; 1300 SPLDECL(s); 1301 1302 lidp = licp->lic_descs; 1303 for (i = 0; i < licp->lic_unused; i++, lidp++) { 1304 if (XFS_LIC_ISFREE(licp, i)) { 1305 continue; 1306 } 1307 1308 lip = lidp->lid_item; 1309 if (aborted) 1310 lip->li_flags |= XFS_LI_ABORTED; 1311 1312 /* 1313 * Send in the ABORTED flag to the COMMITTED routine 1314 * so that it knows whether the transaction was aborted 1315 * or not. 1316 */ 1317 item_lsn = IOP_COMMITTED(lip, lsn); 1318 1319 /* 1320 * If the committed routine returns -1, make 1321 * no more references to the item. 1322 */ 1323 if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0) { 1324 continue; 1325 } 1326 1327 /* 1328 * If the returned lsn is greater than what it 1329 * contained before, update the location of the 1330 * item in the AIL. If it is not, then do nothing. 1331 * Items can never move backwards in the AIL. 1332 * 1333 * While the new lsn should usually be greater, it 1334 * is possible that a later transaction completing 1335 * simultaneously with an earlier one using the 1336 * same item could complete first with a higher lsn. 1337 * This would cause the earlier transaction to fail 1338 * the test below. 1339 */ 1340 mp = lip->li_mountp; 1341 AIL_LOCK(mp,s); 1342 if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) { 1343 /* 1344 * This will set the item's lsn to item_lsn 1345 * and update the position of the item in 1346 * the AIL. 1347 * 1348 * xfs_trans_update_ail() drops the AIL lock. 1349 */ 1350 xfs_trans_update_ail(mp, lip, item_lsn, s); 1351 } else { 1352 AIL_UNLOCK(mp, s); 1353 } 1354 1355 /* 1356 * Now that we've repositioned the item in the AIL, 1357 * unpin it so it can be flushed. Pass information 1358 * about buffer stale state down from the log item 1359 * flags, if anyone else stales the buffer we do not 1360 * want to pay any attention to it. 1361 */ 1362 IOP_UNPIN(lip, lidp->lid_flags & XFS_LID_BUF_STALE); 1363 } 1364 } 1365