10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0 21da177e4SLinus Torvalds /* 37b718769SNathan Scott * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. 4e98c414fSChristoph Hellwig * Copyright (C) 2010 Red Hat, Inc. 57b718769SNathan Scott * All Rights Reserved. 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds #include "xfs.h" 8a844f451SNathan Scott #include "xfs_fs.h" 970a9883cSDave Chinner #include "xfs_shared.h" 10239880efSDave Chinner #include "xfs_format.h" 11239880efSDave Chinner #include "xfs_log_format.h" 12239880efSDave Chinner #include "xfs_trans_resv.h" 131da177e4SLinus Torvalds #include "xfs_mount.h" 14a844f451SNathan Scott #include "xfs_inode.h" 15efc27b52SDave Chinner #include "xfs_extent_busy.h" 161da177e4SLinus Torvalds #include "xfs_quota.h" 17239880efSDave Chinner #include "xfs_trans.h" 18a844f451SNathan Scott #include "xfs_trans_priv.h" 19239880efSDave Chinner #include "xfs_log.h" 20ed3b4d6cSDave Chinner #include "xfs_trace.h" 21a4fbe6abSDave Chinner #include "xfs_error.h" 22f8f2835aSBrian Foster #include "xfs_defer.h" 231da177e4SLinus Torvalds 241da177e4SLinus Torvalds kmem_zone_t *xfs_trans_zone; 251da177e4SLinus Torvalds 26b872af2cSDarrick J. Wong #if defined(CONFIG_TRACEPOINTS) 27b872af2cSDarrick J. Wong static void 28b872af2cSDarrick J. Wong xfs_trans_trace_reservations( 29b872af2cSDarrick J. Wong struct xfs_mount *mp) 30b872af2cSDarrick J. Wong { 31b872af2cSDarrick J. Wong struct xfs_trans_res resv; 32b872af2cSDarrick J. Wong struct xfs_trans_res *res; 33b872af2cSDarrick J. Wong struct xfs_trans_res *end_res; 34b872af2cSDarrick J. Wong int i; 35b872af2cSDarrick J. Wong 36b872af2cSDarrick J. Wong res = (struct xfs_trans_res *)M_RES(mp); 37b872af2cSDarrick J. Wong end_res = (struct xfs_trans_res *)(M_RES(mp) + 1); 38b872af2cSDarrick J. Wong for (i = 0; res < end_res; i++, res++) 39b872af2cSDarrick J. Wong trace_xfs_trans_resv_calc(mp, i, res); 40b872af2cSDarrick J. Wong xfs_log_get_max_trans_res(mp, &resv); 41b872af2cSDarrick J. Wong trace_xfs_trans_resv_calc(mp, -1, &resv); 42b872af2cSDarrick J. Wong } 43b872af2cSDarrick J. Wong #else 44b872af2cSDarrick J. Wong # define xfs_trans_trace_reservations(mp) 45b872af2cSDarrick J. Wong #endif 46b872af2cSDarrick J. Wong 474f3b5783SJeff Liu /* 481da177e4SLinus Torvalds * Initialize the precomputed transaction reservation values 491da177e4SLinus Torvalds * in the mount structure. 501da177e4SLinus Torvalds */ 511da177e4SLinus Torvalds void 521da177e4SLinus Torvalds xfs_trans_init( 53025101dcSChristoph Hellwig struct xfs_mount *mp) 541da177e4SLinus Torvalds { 553d3c8b52SJie Liu xfs_trans_resv_calc(mp, M_RES(mp)); 56b872af2cSDarrick J. Wong xfs_trans_trace_reservations(mp); 571da177e4SLinus Torvalds } 581da177e4SLinus Torvalds 591da177e4SLinus Torvalds /* 60b1c1b5b6SDave Chinner * Free the transaction structure. If there is more clean up 61b1c1b5b6SDave Chinner * to do when the structure is freed, add it here. 62b1c1b5b6SDave Chinner */ 63b1c1b5b6SDave Chinner STATIC void 64b1c1b5b6SDave Chinner xfs_trans_free( 65ed3b4d6cSDave Chinner struct xfs_trans *tp) 66b1c1b5b6SDave Chinner { 674ecbfe63SDave Chinner xfs_extent_busy_sort(&tp->t_busy); 684ecbfe63SDave Chinner xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false); 69ed3b4d6cSDave Chinner 70ba18781bSDave Chinner trace_xfs_trans_free(tp, _RET_IP_); 71b1c1b5b6SDave Chinner atomic_dec(&tp->t_mountp->m_active_trans); 72253f4911SChristoph Hellwig if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT)) 73d9457dc0SJan Kara sb_end_intwrite(tp->t_mountp->m_super); 74b1c1b5b6SDave Chinner xfs_trans_free_dqinfo(tp); 75b1c1b5b6SDave Chinner kmem_zone_free(xfs_trans_zone, tp); 76b1c1b5b6SDave Chinner } 77b1c1b5b6SDave Chinner 78b1c1b5b6SDave Chinner /* 791da177e4SLinus Torvalds * This is called to create a new transaction which will share the 801da177e4SLinus Torvalds * permanent log reservation of the given transaction. The remaining 811da177e4SLinus Torvalds * unused block and rt extent reservations are also inherited. This 821da177e4SLinus Torvalds * implies that the original transaction is no longer allowed to allocate 831da177e4SLinus Torvalds * blocks. Locks and log items, however, are no inherited. They must 841da177e4SLinus Torvalds * be added to the new transaction explicitly. 851da177e4SLinus Torvalds */ 86f8f2835aSBrian Foster STATIC struct xfs_trans * 871da177e4SLinus Torvalds xfs_trans_dup( 88f8f2835aSBrian Foster struct xfs_trans *tp) 891da177e4SLinus Torvalds { 90f8f2835aSBrian Foster struct xfs_trans *ntp; 911da177e4SLinus Torvalds 92ba18781bSDave Chinner trace_xfs_trans_dup(tp, _RET_IP_); 93ba18781bSDave Chinner 941da177e4SLinus Torvalds ntp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP); 951da177e4SLinus Torvalds 961da177e4SLinus Torvalds /* 971da177e4SLinus Torvalds * Initialize the new transaction structure. 981da177e4SLinus Torvalds */ 992a3c0accSDave Chinner ntp->t_magic = XFS_TRANS_HEADER_MAGIC; 1001da177e4SLinus Torvalds ntp->t_mountp = tp->t_mountp; 101e98c414fSChristoph Hellwig INIT_LIST_HEAD(&ntp->t_items); 102ed3b4d6cSDave Chinner INIT_LIST_HEAD(&ntp->t_busy); 1039d9e6233SBrian Foster INIT_LIST_HEAD(&ntp->t_dfops); 104bba59c5eSBrian Foster ntp->t_firstblock = NULLFSBLOCK; 1051da177e4SLinus Torvalds 1061da177e4SLinus Torvalds ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 1071da177e4SLinus Torvalds ASSERT(tp->t_ticket != NULL); 108cfcbbbd0SNathan Scott 109d9457dc0SJan Kara ntp->t_flags = XFS_TRANS_PERM_LOG_RES | 110d9457dc0SJan Kara (tp->t_flags & XFS_TRANS_RESERVE) | 111253f4911SChristoph Hellwig (tp->t_flags & XFS_TRANS_NO_WRITECOUNT); 112d9457dc0SJan Kara /* We gave our writer reference to the new transaction */ 113253f4911SChristoph Hellwig tp->t_flags |= XFS_TRANS_NO_WRITECOUNT; 114cc09c0dcSDave Chinner ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket); 1153e78b9a4SBrian Foster 1163e78b9a4SBrian Foster ASSERT(tp->t_blk_res >= tp->t_blk_res_used); 1171da177e4SLinus Torvalds ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used; 1181da177e4SLinus Torvalds tp->t_blk_res = tp->t_blk_res_used; 1193e78b9a4SBrian Foster 1201da177e4SLinus Torvalds ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used; 1211da177e4SLinus Torvalds tp->t_rtx_res = tp->t_rtx_res_used; 12259c1b082SNathan Scott ntp->t_pflags = tp->t_pflags; 123e021a2e5SBrian Foster 1249d9e6233SBrian Foster /* move deferred ops over to the new tp */ 125ce356d64SBrian Foster xfs_defer_move(ntp, tp); 1261da177e4SLinus Torvalds 1277d095257SChristoph Hellwig xfs_trans_dup_dqinfo(tp, ntp); 1281da177e4SLinus Torvalds 1291da177e4SLinus Torvalds atomic_inc(&tp->t_mountp->m_active_trans); 1301da177e4SLinus Torvalds return ntp; 1311da177e4SLinus Torvalds } 1321da177e4SLinus Torvalds 1331da177e4SLinus Torvalds /* 1341da177e4SLinus Torvalds * This is called to reserve free disk blocks and log space for the 1351da177e4SLinus Torvalds * given transaction. This must be done before allocating any resources 1361da177e4SLinus Torvalds * within the transaction. 1371da177e4SLinus Torvalds * 1381da177e4SLinus Torvalds * This will return ENOSPC if there are not enough blocks available. 1391da177e4SLinus Torvalds * It will sleep waiting for available log space. 1401da177e4SLinus Torvalds * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which 1411da177e4SLinus Torvalds * is used by long running transactions. If any one of the reservations 1421da177e4SLinus Torvalds * fails then they will all be backed out. 1431da177e4SLinus Torvalds * 1441da177e4SLinus Torvalds * This does not do quota reservations. That typically is done by the 1451da177e4SLinus Torvalds * caller afterwards. 1461da177e4SLinus Torvalds */ 147253f4911SChristoph Hellwig static int 1481da177e4SLinus Torvalds xfs_trans_reserve( 1493d3c8b52SJie Liu struct xfs_trans *tp, 1503d3c8b52SJie Liu struct xfs_trans_res *resp, 1511da177e4SLinus Torvalds uint blocks, 1523d3c8b52SJie Liu uint rtextents) 1531da177e4SLinus Torvalds { 15459c1b082SNathan Scott int error = 0; 1550d485adaSDave Chinner bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; 1561da177e4SLinus Torvalds 1571da177e4SLinus Torvalds /* Mark this thread as being in a transaction */ 1589070733bSMichal Hocko current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); 1591da177e4SLinus Torvalds 1601da177e4SLinus Torvalds /* 1611da177e4SLinus Torvalds * Attempt to reserve the needed disk blocks by decrementing 1621da177e4SLinus Torvalds * the number needed from the number available. This will 1631da177e4SLinus Torvalds * fail if the count would go below zero. 1641da177e4SLinus Torvalds */ 1651da177e4SLinus Torvalds if (blocks > 0) { 1660d485adaSDave Chinner error = xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd); 1671da177e4SLinus Torvalds if (error != 0) { 1689070733bSMichal Hocko current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); 1692451337dSDave Chinner return -ENOSPC; 1701da177e4SLinus Torvalds } 1711da177e4SLinus Torvalds tp->t_blk_res += blocks; 1721da177e4SLinus Torvalds } 1731da177e4SLinus Torvalds 1741da177e4SLinus Torvalds /* 1751da177e4SLinus Torvalds * Reserve the log space needed for this transaction. 1761da177e4SLinus Torvalds */ 1773d3c8b52SJie Liu if (resp->tr_logres > 0) { 1789006fb91SChristoph Hellwig bool permanent = false; 1799006fb91SChristoph Hellwig 1803d3c8b52SJie Liu ASSERT(tp->t_log_res == 0 || 1813d3c8b52SJie Liu tp->t_log_res == resp->tr_logres); 1823d3c8b52SJie Liu ASSERT(tp->t_log_count == 0 || 1833d3c8b52SJie Liu tp->t_log_count == resp->tr_logcount); 1849006fb91SChristoph Hellwig 1853d3c8b52SJie Liu if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) { 1861da177e4SLinus Torvalds tp->t_flags |= XFS_TRANS_PERM_LOG_RES; 1879006fb91SChristoph Hellwig permanent = true; 1881da177e4SLinus Torvalds } else { 1891da177e4SLinus Torvalds ASSERT(tp->t_ticket == NULL); 1901da177e4SLinus Torvalds ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES)); 1911da177e4SLinus Torvalds } 1921da177e4SLinus Torvalds 1939006fb91SChristoph Hellwig if (tp->t_ticket != NULL) { 1943d3c8b52SJie Liu ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES); 1959006fb91SChristoph Hellwig error = xfs_log_regrant(tp->t_mountp, tp->t_ticket); 1969006fb91SChristoph Hellwig } else { 1973d3c8b52SJie Liu error = xfs_log_reserve(tp->t_mountp, 1983d3c8b52SJie Liu resp->tr_logres, 1993d3c8b52SJie Liu resp->tr_logcount, 2003d3c8b52SJie Liu &tp->t_ticket, XFS_TRANSACTION, 201710b1e2cSChristoph Hellwig permanent); 2021da177e4SLinus Torvalds } 2039006fb91SChristoph Hellwig 2049006fb91SChristoph Hellwig if (error) 2059006fb91SChristoph Hellwig goto undo_blocks; 2069006fb91SChristoph Hellwig 2073d3c8b52SJie Liu tp->t_log_res = resp->tr_logres; 2083d3c8b52SJie Liu tp->t_log_count = resp->tr_logcount; 2091da177e4SLinus Torvalds } 2101da177e4SLinus Torvalds 2111da177e4SLinus Torvalds /* 2121da177e4SLinus Torvalds * Attempt to reserve the needed realtime extents by decrementing 2131da177e4SLinus Torvalds * the number needed from the number available. This will 2141da177e4SLinus Torvalds * fail if the count would go below zero. 2151da177e4SLinus Torvalds */ 2161da177e4SLinus Torvalds if (rtextents > 0) { 217bab98bbeSDave Chinner error = xfs_mod_frextents(tp->t_mountp, -((int64_t)rtextents)); 2181da177e4SLinus Torvalds if (error) { 2192451337dSDave Chinner error = -ENOSPC; 2201da177e4SLinus Torvalds goto undo_log; 2211da177e4SLinus Torvalds } 2221da177e4SLinus Torvalds tp->t_rtx_res += rtextents; 2231da177e4SLinus Torvalds } 2241da177e4SLinus Torvalds 2251da177e4SLinus Torvalds return 0; 2261da177e4SLinus Torvalds 2271da177e4SLinus Torvalds /* 2281da177e4SLinus Torvalds * Error cases jump to one of these labels to undo any 2291da177e4SLinus Torvalds * reservations which have already been performed. 2301da177e4SLinus Torvalds */ 2311da177e4SLinus Torvalds undo_log: 2323d3c8b52SJie Liu if (resp->tr_logres > 0) { 233f78c3901SChristoph Hellwig xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, false); 2341da177e4SLinus Torvalds tp->t_ticket = NULL; 2351da177e4SLinus Torvalds tp->t_log_res = 0; 2361da177e4SLinus Torvalds tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES; 2371da177e4SLinus Torvalds } 2381da177e4SLinus Torvalds 2391da177e4SLinus Torvalds undo_blocks: 2401da177e4SLinus Torvalds if (blocks > 0) { 241a27f6ef4SEryu Guan xfs_mod_fdblocks(tp->t_mountp, (int64_t)blocks, rsvd); 2421da177e4SLinus Torvalds tp->t_blk_res = 0; 2431da177e4SLinus Torvalds } 2441da177e4SLinus Torvalds 2459070733bSMichal Hocko current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); 2461da177e4SLinus Torvalds 24759c1b082SNathan Scott return error; 2481da177e4SLinus Torvalds } 2491da177e4SLinus Torvalds 250253f4911SChristoph Hellwig int 251253f4911SChristoph Hellwig xfs_trans_alloc( 252253f4911SChristoph Hellwig struct xfs_mount *mp, 253253f4911SChristoph Hellwig struct xfs_trans_res *resp, 254253f4911SChristoph Hellwig uint blocks, 255253f4911SChristoph Hellwig uint rtextents, 256253f4911SChristoph Hellwig uint flags, 257253f4911SChristoph Hellwig struct xfs_trans **tpp) 258253f4911SChristoph Hellwig { 259253f4911SChristoph Hellwig struct xfs_trans *tp; 260253f4911SChristoph Hellwig int error; 261253f4911SChristoph Hellwig 2628683edb7SDave Chinner /* 2638683edb7SDave Chinner * Allocate the handle before we do our freeze accounting and setting up 2648683edb7SDave Chinner * GFP_NOFS allocation context so that we avoid lockdep false positives 2658683edb7SDave Chinner * by doing GFP_KERNEL allocations inside sb_start_intwrite(). 2668683edb7SDave Chinner */ 2678683edb7SDave Chinner tp = kmem_zone_zalloc(xfs_trans_zone, 2688683edb7SDave Chinner (flags & XFS_TRANS_NOFS) ? KM_NOFS : KM_SLEEP); 2698683edb7SDave Chinner 270253f4911SChristoph Hellwig if (!(flags & XFS_TRANS_NO_WRITECOUNT)) 271253f4911SChristoph Hellwig sb_start_intwrite(mp->m_super); 272253f4911SChristoph Hellwig 27310ee2526SDarrick J. Wong /* 27410ee2526SDarrick J. Wong * Zero-reservation ("empty") transactions can't modify anything, so 27510ee2526SDarrick J. Wong * they're allowed to run while we're frozen. 27610ee2526SDarrick J. Wong */ 27710ee2526SDarrick J. Wong WARN_ON(resp->tr_logres > 0 && 27810ee2526SDarrick J. Wong mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE); 279253f4911SChristoph Hellwig atomic_inc(&mp->m_active_trans); 280253f4911SChristoph Hellwig 281253f4911SChristoph Hellwig tp->t_magic = XFS_TRANS_HEADER_MAGIC; 282253f4911SChristoph Hellwig tp->t_flags = flags; 283253f4911SChristoph Hellwig tp->t_mountp = mp; 284253f4911SChristoph Hellwig INIT_LIST_HEAD(&tp->t_items); 285253f4911SChristoph Hellwig INIT_LIST_HEAD(&tp->t_busy); 2869d9e6233SBrian Foster INIT_LIST_HEAD(&tp->t_dfops); 287bba59c5eSBrian Foster tp->t_firstblock = NULLFSBLOCK; 288253f4911SChristoph Hellwig 289253f4911SChristoph Hellwig error = xfs_trans_reserve(tp, resp, blocks, rtextents); 290253f4911SChristoph Hellwig if (error) { 291253f4911SChristoph Hellwig xfs_trans_cancel(tp); 292253f4911SChristoph Hellwig return error; 293253f4911SChristoph Hellwig } 294253f4911SChristoph Hellwig 295ba18781bSDave Chinner trace_xfs_trans_alloc(tp, _RET_IP_); 296ba18781bSDave Chinner 297253f4911SChristoph Hellwig *tpp = tp; 298253f4911SChristoph Hellwig return 0; 299253f4911SChristoph Hellwig } 300253f4911SChristoph Hellwig 3011da177e4SLinus Torvalds /* 302e89c0413SDarrick J. Wong * Create an empty transaction with no reservation. This is a defensive 303e89c0413SDarrick J. Wong * mechanism for routines that query metadata without actually modifying 304e89c0413SDarrick J. Wong * them -- if the metadata being queried is somehow cross-linked (think a 305e89c0413SDarrick J. Wong * btree block pointer that points higher in the tree), we risk deadlock. 306e89c0413SDarrick J. Wong * However, blocks grabbed as part of a transaction can be re-grabbed. 307e89c0413SDarrick J. Wong * The verifiers will notice the corrupt block and the operation will fail 308e89c0413SDarrick J. Wong * back to userspace without deadlocking. 309e89c0413SDarrick J. Wong * 310e89c0413SDarrick J. Wong * Note the zero-length reservation; this transaction MUST be cancelled 311e89c0413SDarrick J. Wong * without any dirty data. 312e89c0413SDarrick J. Wong */ 313e89c0413SDarrick J. Wong int 314e89c0413SDarrick J. Wong xfs_trans_alloc_empty( 315e89c0413SDarrick J. Wong struct xfs_mount *mp, 316e89c0413SDarrick J. Wong struct xfs_trans **tpp) 317e89c0413SDarrick J. Wong { 318e89c0413SDarrick J. Wong struct xfs_trans_res resv = {0}; 319e89c0413SDarrick J. Wong 320e89c0413SDarrick J. Wong return xfs_trans_alloc(mp, &resv, 0, 0, XFS_TRANS_NO_WRITECOUNT, tpp); 321e89c0413SDarrick J. Wong } 322e89c0413SDarrick J. Wong 323e89c0413SDarrick J. Wong /* 3241da177e4SLinus Torvalds * Record the indicated change to the given field for application 3251da177e4SLinus Torvalds * to the file system's superblock when the transaction commits. 3261da177e4SLinus Torvalds * For now, just store the change in the transaction structure. 3271da177e4SLinus Torvalds * 3281da177e4SLinus Torvalds * Mark the transaction structure to indicate that the superblock 3291da177e4SLinus Torvalds * needs to be updated before committing. 33092821e2bSDavid Chinner * 33192821e2bSDavid Chinner * Because we may not be keeping track of allocated/free inodes and 33292821e2bSDavid Chinner * used filesystem blocks in the superblock, we do not mark the 33392821e2bSDavid Chinner * superblock dirty in this transaction if we modify these fields. 33492821e2bSDavid Chinner * We still need to update the transaction deltas so that they get 33592821e2bSDavid Chinner * applied to the incore superblock, but we don't want them to 33692821e2bSDavid Chinner * cause the superblock to get locked and logged if these are the 33792821e2bSDavid Chinner * only fields in the superblock that the transaction modifies. 3381da177e4SLinus Torvalds */ 3391da177e4SLinus Torvalds void 3401da177e4SLinus Torvalds xfs_trans_mod_sb( 3411da177e4SLinus Torvalds xfs_trans_t *tp, 3421da177e4SLinus Torvalds uint field, 34320f4ebf2SDavid Chinner int64_t delta) 3441da177e4SLinus Torvalds { 34592821e2bSDavid Chinner uint32_t flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY); 34692821e2bSDavid Chinner xfs_mount_t *mp = tp->t_mountp; 3471da177e4SLinus Torvalds 3481da177e4SLinus Torvalds switch (field) { 3491da177e4SLinus Torvalds case XFS_TRANS_SB_ICOUNT: 3501da177e4SLinus Torvalds tp->t_icount_delta += delta; 35192821e2bSDavid Chinner if (xfs_sb_version_haslazysbcount(&mp->m_sb)) 35292821e2bSDavid Chinner flags &= ~XFS_TRANS_SB_DIRTY; 3531da177e4SLinus Torvalds break; 3541da177e4SLinus Torvalds case XFS_TRANS_SB_IFREE: 3551da177e4SLinus Torvalds tp->t_ifree_delta += delta; 35692821e2bSDavid Chinner if (xfs_sb_version_haslazysbcount(&mp->m_sb)) 35792821e2bSDavid Chinner flags &= ~XFS_TRANS_SB_DIRTY; 3581da177e4SLinus Torvalds break; 3591da177e4SLinus Torvalds case XFS_TRANS_SB_FDBLOCKS: 3601da177e4SLinus Torvalds /* 3613e78b9a4SBrian Foster * Track the number of blocks allocated in the transaction. 3623e78b9a4SBrian Foster * Make sure it does not exceed the number reserved. If so, 3633e78b9a4SBrian Foster * shutdown as this can lead to accounting inconsistency. 3641da177e4SLinus Torvalds */ 3651da177e4SLinus Torvalds if (delta < 0) { 3661da177e4SLinus Torvalds tp->t_blk_res_used += (uint)-delta; 3673e78b9a4SBrian Foster if (tp->t_blk_res_used > tp->t_blk_res) 3683e78b9a4SBrian Foster xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 3691da177e4SLinus Torvalds } 3701da177e4SLinus Torvalds tp->t_fdblocks_delta += delta; 37192821e2bSDavid Chinner if (xfs_sb_version_haslazysbcount(&mp->m_sb)) 37292821e2bSDavid Chinner flags &= ~XFS_TRANS_SB_DIRTY; 3731da177e4SLinus Torvalds break; 3741da177e4SLinus Torvalds case XFS_TRANS_SB_RES_FDBLOCKS: 3751da177e4SLinus Torvalds /* 3761da177e4SLinus Torvalds * The allocation has already been applied to the 3771da177e4SLinus Torvalds * in-core superblock's counter. This should only 3781da177e4SLinus Torvalds * be applied to the on-disk superblock. 3791da177e4SLinus Torvalds */ 3801da177e4SLinus Torvalds tp->t_res_fdblocks_delta += delta; 38192821e2bSDavid Chinner if (xfs_sb_version_haslazysbcount(&mp->m_sb)) 38292821e2bSDavid Chinner flags &= ~XFS_TRANS_SB_DIRTY; 3831da177e4SLinus Torvalds break; 3841da177e4SLinus Torvalds case XFS_TRANS_SB_FREXTENTS: 3851da177e4SLinus Torvalds /* 3861da177e4SLinus Torvalds * Track the number of blocks allocated in the 3871da177e4SLinus Torvalds * transaction. Make sure it does not exceed the 3881da177e4SLinus Torvalds * number reserved. 3891da177e4SLinus Torvalds */ 3901da177e4SLinus Torvalds if (delta < 0) { 3911da177e4SLinus Torvalds tp->t_rtx_res_used += (uint)-delta; 3921da177e4SLinus Torvalds ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res); 3931da177e4SLinus Torvalds } 3941da177e4SLinus Torvalds tp->t_frextents_delta += delta; 3951da177e4SLinus Torvalds break; 3961da177e4SLinus Torvalds case XFS_TRANS_SB_RES_FREXTENTS: 3971da177e4SLinus Torvalds /* 3981da177e4SLinus Torvalds * The allocation has already been applied to the 399c41564b5SNathan Scott * in-core superblock's counter. This should only 4001da177e4SLinus Torvalds * be applied to the on-disk superblock. 4011da177e4SLinus Torvalds */ 4021da177e4SLinus Torvalds ASSERT(delta < 0); 4031da177e4SLinus Torvalds tp->t_res_frextents_delta += delta; 4041da177e4SLinus Torvalds break; 4051da177e4SLinus Torvalds case XFS_TRANS_SB_DBLOCKS: 4061da177e4SLinus Torvalds ASSERT(delta > 0); 4071da177e4SLinus Torvalds tp->t_dblocks_delta += delta; 4081da177e4SLinus Torvalds break; 4091da177e4SLinus Torvalds case XFS_TRANS_SB_AGCOUNT: 4101da177e4SLinus Torvalds ASSERT(delta > 0); 4111da177e4SLinus Torvalds tp->t_agcount_delta += delta; 4121da177e4SLinus Torvalds break; 4131da177e4SLinus Torvalds case XFS_TRANS_SB_IMAXPCT: 4141da177e4SLinus Torvalds tp->t_imaxpct_delta += delta; 4151da177e4SLinus Torvalds break; 4161da177e4SLinus Torvalds case XFS_TRANS_SB_REXTSIZE: 4171da177e4SLinus Torvalds tp->t_rextsize_delta += delta; 4181da177e4SLinus Torvalds break; 4191da177e4SLinus Torvalds case XFS_TRANS_SB_RBMBLOCKS: 4201da177e4SLinus Torvalds tp->t_rbmblocks_delta += delta; 4211da177e4SLinus Torvalds break; 4221da177e4SLinus Torvalds case XFS_TRANS_SB_RBLOCKS: 4231da177e4SLinus Torvalds tp->t_rblocks_delta += delta; 4241da177e4SLinus Torvalds break; 4251da177e4SLinus Torvalds case XFS_TRANS_SB_REXTENTS: 4261da177e4SLinus Torvalds tp->t_rextents_delta += delta; 4271da177e4SLinus Torvalds break; 4281da177e4SLinus Torvalds case XFS_TRANS_SB_REXTSLOG: 4291da177e4SLinus Torvalds tp->t_rextslog_delta += delta; 4301da177e4SLinus Torvalds break; 4311da177e4SLinus Torvalds default: 4321da177e4SLinus Torvalds ASSERT(0); 4331da177e4SLinus Torvalds return; 4341da177e4SLinus Torvalds } 4351da177e4SLinus Torvalds 436210c6f1cSDavid Chinner tp->t_flags |= flags; 4371da177e4SLinus Torvalds } 4381da177e4SLinus Torvalds 4391da177e4SLinus Torvalds /* 4401da177e4SLinus Torvalds * xfs_trans_apply_sb_deltas() is called from the commit code 4411da177e4SLinus Torvalds * to bring the superblock buffer into the current transaction 4421da177e4SLinus Torvalds * and modify it as requested by earlier calls to xfs_trans_mod_sb(). 4431da177e4SLinus Torvalds * 4441da177e4SLinus Torvalds * For now we just look at each field allowed to change and change 4451da177e4SLinus Torvalds * it if necessary. 4461da177e4SLinus Torvalds */ 4471da177e4SLinus Torvalds STATIC void 4481da177e4SLinus Torvalds xfs_trans_apply_sb_deltas( 4491da177e4SLinus Torvalds xfs_trans_t *tp) 4501da177e4SLinus Torvalds { 4512bdf7cd0SChristoph Hellwig xfs_dsb_t *sbp; 4521da177e4SLinus Torvalds xfs_buf_t *bp; 4531da177e4SLinus Torvalds int whole = 0; 4541da177e4SLinus Torvalds 4558c9ce2f7SEric Sandeen bp = xfs_trans_getsb(tp, tp->t_mountp); 4561da177e4SLinus Torvalds sbp = XFS_BUF_TO_SBP(bp); 4571da177e4SLinus Torvalds 4581da177e4SLinus Torvalds /* 4591da177e4SLinus Torvalds * Check that superblock mods match the mods made to AGF counters. 4601da177e4SLinus Torvalds */ 4611da177e4SLinus Torvalds ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) == 4621da177e4SLinus Torvalds (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta + 4631da177e4SLinus Torvalds tp->t_ag_btree_delta)); 4641da177e4SLinus Torvalds 46592821e2bSDavid Chinner /* 46692821e2bSDavid Chinner * Only update the superblock counters if we are logging them 46792821e2bSDavid Chinner */ 46892821e2bSDavid Chinner if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) { 4692bdf7cd0SChristoph Hellwig if (tp->t_icount_delta) 470413d57c9SMarcin Slusarz be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta); 4712bdf7cd0SChristoph Hellwig if (tp->t_ifree_delta) 472413d57c9SMarcin Slusarz be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta); 4732bdf7cd0SChristoph Hellwig if (tp->t_fdblocks_delta) 474413d57c9SMarcin Slusarz be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta); 4752bdf7cd0SChristoph Hellwig if (tp->t_res_fdblocks_delta) 476413d57c9SMarcin Slusarz be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta); 4771da177e4SLinus Torvalds } 4781da177e4SLinus Torvalds 4792bdf7cd0SChristoph Hellwig if (tp->t_frextents_delta) 480413d57c9SMarcin Slusarz be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta); 4812bdf7cd0SChristoph Hellwig if (tp->t_res_frextents_delta) 482413d57c9SMarcin Slusarz be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta); 4831da177e4SLinus Torvalds 4842bdf7cd0SChristoph Hellwig if (tp->t_dblocks_delta) { 485413d57c9SMarcin Slusarz be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta); 4861da177e4SLinus Torvalds whole = 1; 4871da177e4SLinus Torvalds } 4882bdf7cd0SChristoph Hellwig if (tp->t_agcount_delta) { 489413d57c9SMarcin Slusarz be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta); 4901da177e4SLinus Torvalds whole = 1; 4911da177e4SLinus Torvalds } 4922bdf7cd0SChristoph Hellwig if (tp->t_imaxpct_delta) { 4932bdf7cd0SChristoph Hellwig sbp->sb_imax_pct += tp->t_imaxpct_delta; 4941da177e4SLinus Torvalds whole = 1; 4951da177e4SLinus Torvalds } 4962bdf7cd0SChristoph Hellwig if (tp->t_rextsize_delta) { 497413d57c9SMarcin Slusarz be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta); 4981da177e4SLinus Torvalds whole = 1; 4991da177e4SLinus Torvalds } 5002bdf7cd0SChristoph Hellwig if (tp->t_rbmblocks_delta) { 501413d57c9SMarcin Slusarz be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta); 5021da177e4SLinus Torvalds whole = 1; 5031da177e4SLinus Torvalds } 5042bdf7cd0SChristoph Hellwig if (tp->t_rblocks_delta) { 505413d57c9SMarcin Slusarz be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta); 5061da177e4SLinus Torvalds whole = 1; 5071da177e4SLinus Torvalds } 5082bdf7cd0SChristoph Hellwig if (tp->t_rextents_delta) { 509413d57c9SMarcin Slusarz be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta); 5101da177e4SLinus Torvalds whole = 1; 5111da177e4SLinus Torvalds } 5122bdf7cd0SChristoph Hellwig if (tp->t_rextslog_delta) { 5132bdf7cd0SChristoph Hellwig sbp->sb_rextslog += tp->t_rextslog_delta; 5141da177e4SLinus Torvalds whole = 1; 5151da177e4SLinus Torvalds } 5161da177e4SLinus Torvalds 5173443a3bcSDave Chinner xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF); 5181da177e4SLinus Torvalds if (whole) 5191da177e4SLinus Torvalds /* 520c41564b5SNathan Scott * Log the whole thing, the fields are noncontiguous. 5211da177e4SLinus Torvalds */ 5222bdf7cd0SChristoph Hellwig xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_dsb_t) - 1); 5231da177e4SLinus Torvalds else 5241da177e4SLinus Torvalds /* 5251da177e4SLinus Torvalds * Since all the modifiable fields are contiguous, we 5261da177e4SLinus Torvalds * can get away with this. 5271da177e4SLinus Torvalds */ 5282bdf7cd0SChristoph Hellwig xfs_trans_log_buf(tp, bp, offsetof(xfs_dsb_t, sb_icount), 5292bdf7cd0SChristoph Hellwig offsetof(xfs_dsb_t, sb_frextents) + 5301da177e4SLinus Torvalds sizeof(sbp->sb_frextents) - 1); 5311da177e4SLinus Torvalds } 5321da177e4SLinus Torvalds 5330bd5ddedSDave Chinner STATIC int 5340bd5ddedSDave Chinner xfs_sb_mod8( 5350bd5ddedSDave Chinner uint8_t *field, 5360bd5ddedSDave Chinner int8_t delta) 5370bd5ddedSDave Chinner { 5380bd5ddedSDave Chinner int8_t counter = *field; 5390bd5ddedSDave Chinner 5400bd5ddedSDave Chinner counter += delta; 5410bd5ddedSDave Chinner if (counter < 0) { 5420bd5ddedSDave Chinner ASSERT(0); 5430bd5ddedSDave Chinner return -EINVAL; 5440bd5ddedSDave Chinner } 5450bd5ddedSDave Chinner *field = counter; 5460bd5ddedSDave Chinner return 0; 5470bd5ddedSDave Chinner } 5480bd5ddedSDave Chinner 5490bd5ddedSDave Chinner STATIC int 5500bd5ddedSDave Chinner xfs_sb_mod32( 5510bd5ddedSDave Chinner uint32_t *field, 5520bd5ddedSDave Chinner int32_t delta) 5530bd5ddedSDave Chinner { 5540bd5ddedSDave Chinner int32_t counter = *field; 5550bd5ddedSDave Chinner 5560bd5ddedSDave Chinner counter += delta; 5570bd5ddedSDave Chinner if (counter < 0) { 5580bd5ddedSDave Chinner ASSERT(0); 5590bd5ddedSDave Chinner return -EINVAL; 5600bd5ddedSDave Chinner } 5610bd5ddedSDave Chinner *field = counter; 5620bd5ddedSDave Chinner return 0; 5630bd5ddedSDave Chinner } 5640bd5ddedSDave Chinner 5650bd5ddedSDave Chinner STATIC int 5660bd5ddedSDave Chinner xfs_sb_mod64( 5670bd5ddedSDave Chinner uint64_t *field, 5680bd5ddedSDave Chinner int64_t delta) 5690bd5ddedSDave Chinner { 5700bd5ddedSDave Chinner int64_t counter = *field; 5710bd5ddedSDave Chinner 5720bd5ddedSDave Chinner counter += delta; 5730bd5ddedSDave Chinner if (counter < 0) { 5740bd5ddedSDave Chinner ASSERT(0); 5750bd5ddedSDave Chinner return -EINVAL; 5760bd5ddedSDave Chinner } 5770bd5ddedSDave Chinner *field = counter; 5780bd5ddedSDave Chinner return 0; 5790bd5ddedSDave Chinner } 5800bd5ddedSDave Chinner 5811da177e4SLinus Torvalds /* 58245c34141SDavid Chinner * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations 58345c34141SDavid Chinner * and apply superblock counter changes to the in-core superblock. The 58445c34141SDavid Chinner * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT 58545c34141SDavid Chinner * applied to the in-core superblock. The idea is that that has already been 58645c34141SDavid Chinner * done. 5871da177e4SLinus Torvalds * 58845c34141SDavid Chinner * If we are not logging superblock counters, then the inode allocated/free and 58945c34141SDavid Chinner * used block counts are not updated in the on disk superblock. In this case, 59045c34141SDavid Chinner * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we 59145c34141SDavid Chinner * still need to update the incore superblock with the changes. 5921da177e4SLinus Torvalds */ 59371e330b5SDave Chinner void 5941da177e4SLinus Torvalds xfs_trans_unreserve_and_mod_sb( 5950bd5ddedSDave Chinner struct xfs_trans *tp) 5961da177e4SLinus Torvalds { 5970bd5ddedSDave Chinner struct xfs_mount *mp = tp->t_mountp; 5980d485adaSDave Chinner bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; 59945c34141SDavid Chinner int64_t blkdelta = 0; 60045c34141SDavid Chinner int64_t rtxdelta = 0; 6011b040712SChristoph Hellwig int64_t idelta = 0; 6021b040712SChristoph Hellwig int64_t ifreedelta = 0; 6030bd5ddedSDave Chinner int error; 6041da177e4SLinus Torvalds 6051b040712SChristoph Hellwig /* calculate deltas */ 60645c34141SDavid Chinner if (tp->t_blk_res > 0) 60745c34141SDavid Chinner blkdelta = tp->t_blk_res; 60845c34141SDavid Chinner if ((tp->t_fdblocks_delta != 0) && 60945c34141SDavid Chinner (xfs_sb_version_haslazysbcount(&mp->m_sb) || 61045c34141SDavid Chinner (tp->t_flags & XFS_TRANS_SB_DIRTY))) 61145c34141SDavid Chinner blkdelta += tp->t_fdblocks_delta; 61245c34141SDavid Chinner 61345c34141SDavid Chinner if (tp->t_rtx_res > 0) 61445c34141SDavid Chinner rtxdelta = tp->t_rtx_res; 61545c34141SDavid Chinner if ((tp->t_frextents_delta != 0) && 61645c34141SDavid Chinner (tp->t_flags & XFS_TRANS_SB_DIRTY)) 61745c34141SDavid Chinner rtxdelta += tp->t_frextents_delta; 61845c34141SDavid Chinner 6191b040712SChristoph Hellwig if (xfs_sb_version_haslazysbcount(&mp->m_sb) || 6201b040712SChristoph Hellwig (tp->t_flags & XFS_TRANS_SB_DIRTY)) { 6211b040712SChristoph Hellwig idelta = tp->t_icount_delta; 6221b040712SChristoph Hellwig ifreedelta = tp->t_ifree_delta; 6231b040712SChristoph Hellwig } 6241b040712SChristoph Hellwig 6251b040712SChristoph Hellwig /* apply the per-cpu counters */ 6261b040712SChristoph Hellwig if (blkdelta) { 6270d485adaSDave Chinner error = xfs_mod_fdblocks(mp, blkdelta, rsvd); 6281b040712SChristoph Hellwig if (error) 6291b040712SChristoph Hellwig goto out; 6301b040712SChristoph Hellwig } 6311b040712SChristoph Hellwig 6321b040712SChristoph Hellwig if (idelta) { 633501ab323SDave Chinner error = xfs_mod_icount(mp, idelta); 6341b040712SChristoph Hellwig if (error) 6351b040712SChristoph Hellwig goto out_undo_fdblocks; 6361b040712SChristoph Hellwig } 6371b040712SChristoph Hellwig 6381b040712SChristoph Hellwig if (ifreedelta) { 639e88b64eaSDave Chinner error = xfs_mod_ifree(mp, ifreedelta); 6401b040712SChristoph Hellwig if (error) 6411b040712SChristoph Hellwig goto out_undo_icount; 6421b040712SChristoph Hellwig } 6431b040712SChristoph Hellwig 6440bd5ddedSDave Chinner if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY)) 6450bd5ddedSDave Chinner return; 6460bd5ddedSDave Chinner 6471b040712SChristoph Hellwig /* apply remaining deltas */ 6480bd5ddedSDave Chinner spin_lock(&mp->m_sb_lock); 649bab98bbeSDave Chinner if (rtxdelta) { 6500bd5ddedSDave Chinner error = xfs_sb_mod64(&mp->m_sb.sb_frextents, rtxdelta); 651bab98bbeSDave Chinner if (error) 652bab98bbeSDave Chinner goto out_undo_ifree; 6531da177e4SLinus Torvalds } 6541da177e4SLinus Torvalds 6551da177e4SLinus Torvalds if (tp->t_dblocks_delta != 0) { 6560bd5ddedSDave Chinner error = xfs_sb_mod64(&mp->m_sb.sb_dblocks, tp->t_dblocks_delta); 6571b040712SChristoph Hellwig if (error) 658bab98bbeSDave Chinner goto out_undo_frextents; 6591da177e4SLinus Torvalds } 6600bd5ddedSDave Chinner if (tp->t_agcount_delta != 0) { 6610bd5ddedSDave Chinner error = xfs_sb_mod32(&mp->m_sb.sb_agcount, tp->t_agcount_delta); 6620bd5ddedSDave Chinner if (error) 6630bd5ddedSDave Chinner goto out_undo_dblocks; 6640bd5ddedSDave Chinner } 6650bd5ddedSDave Chinner if (tp->t_imaxpct_delta != 0) { 6660bd5ddedSDave Chinner error = xfs_sb_mod8(&mp->m_sb.sb_imax_pct, tp->t_imaxpct_delta); 6670bd5ddedSDave Chinner if (error) 6680bd5ddedSDave Chinner goto out_undo_agcount; 6690bd5ddedSDave Chinner } 6700bd5ddedSDave Chinner if (tp->t_rextsize_delta != 0) { 6710bd5ddedSDave Chinner error = xfs_sb_mod32(&mp->m_sb.sb_rextsize, 6720bd5ddedSDave Chinner tp->t_rextsize_delta); 6730bd5ddedSDave Chinner if (error) 6740bd5ddedSDave Chinner goto out_undo_imaxpct; 6750bd5ddedSDave Chinner } 6760bd5ddedSDave Chinner if (tp->t_rbmblocks_delta != 0) { 6770bd5ddedSDave Chinner error = xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, 6780bd5ddedSDave Chinner tp->t_rbmblocks_delta); 6790bd5ddedSDave Chinner if (error) 6800bd5ddedSDave Chinner goto out_undo_rextsize; 6810bd5ddedSDave Chinner } 6820bd5ddedSDave Chinner if (tp->t_rblocks_delta != 0) { 6830bd5ddedSDave Chinner error = xfs_sb_mod64(&mp->m_sb.sb_rblocks, tp->t_rblocks_delta); 6840bd5ddedSDave Chinner if (error) 6850bd5ddedSDave Chinner goto out_undo_rbmblocks; 6860bd5ddedSDave Chinner } 6870bd5ddedSDave Chinner if (tp->t_rextents_delta != 0) { 6880bd5ddedSDave Chinner error = xfs_sb_mod64(&mp->m_sb.sb_rextents, 6890bd5ddedSDave Chinner tp->t_rextents_delta); 6900bd5ddedSDave Chinner if (error) 6910bd5ddedSDave Chinner goto out_undo_rblocks; 6920bd5ddedSDave Chinner } 6930bd5ddedSDave Chinner if (tp->t_rextslog_delta != 0) { 6940bd5ddedSDave Chinner error = xfs_sb_mod8(&mp->m_sb.sb_rextslog, 6950bd5ddedSDave Chinner tp->t_rextslog_delta); 6960bd5ddedSDave Chinner if (error) 6970bd5ddedSDave Chinner goto out_undo_rextents; 6980bd5ddedSDave Chinner } 6990bd5ddedSDave Chinner spin_unlock(&mp->m_sb_lock); 7001b040712SChristoph Hellwig return; 7011b040712SChristoph Hellwig 7020bd5ddedSDave Chinner out_undo_rextents: 7030bd5ddedSDave Chinner if (tp->t_rextents_delta) 7040bd5ddedSDave Chinner xfs_sb_mod64(&mp->m_sb.sb_rextents, -tp->t_rextents_delta); 7050bd5ddedSDave Chinner out_undo_rblocks: 7060bd5ddedSDave Chinner if (tp->t_rblocks_delta) 7070bd5ddedSDave Chinner xfs_sb_mod64(&mp->m_sb.sb_rblocks, -tp->t_rblocks_delta); 7080bd5ddedSDave Chinner out_undo_rbmblocks: 7090bd5ddedSDave Chinner if (tp->t_rbmblocks_delta) 7100bd5ddedSDave Chinner xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, -tp->t_rbmblocks_delta); 7110bd5ddedSDave Chinner out_undo_rextsize: 7120bd5ddedSDave Chinner if (tp->t_rextsize_delta) 7130bd5ddedSDave Chinner xfs_sb_mod32(&mp->m_sb.sb_rextsize, -tp->t_rextsize_delta); 7140bd5ddedSDave Chinner out_undo_imaxpct: 7150bd5ddedSDave Chinner if (tp->t_rextsize_delta) 7160bd5ddedSDave Chinner xfs_sb_mod8(&mp->m_sb.sb_imax_pct, -tp->t_imaxpct_delta); 7170bd5ddedSDave Chinner out_undo_agcount: 7180bd5ddedSDave Chinner if (tp->t_agcount_delta) 7190bd5ddedSDave Chinner xfs_sb_mod32(&mp->m_sb.sb_agcount, -tp->t_agcount_delta); 7200bd5ddedSDave Chinner out_undo_dblocks: 7210bd5ddedSDave Chinner if (tp->t_dblocks_delta) 7220bd5ddedSDave Chinner xfs_sb_mod64(&mp->m_sb.sb_dblocks, -tp->t_dblocks_delta); 723bab98bbeSDave Chinner out_undo_frextents: 724bab98bbeSDave Chinner if (rtxdelta) 7250bd5ddedSDave Chinner xfs_sb_mod64(&mp->m_sb.sb_frextents, -rtxdelta); 726bab98bbeSDave Chinner out_undo_ifree: 7270bd5ddedSDave Chinner spin_unlock(&mp->m_sb_lock); 7281b040712SChristoph Hellwig if (ifreedelta) 729e88b64eaSDave Chinner xfs_mod_ifree(mp, -ifreedelta); 7301b040712SChristoph Hellwig out_undo_icount: 7311b040712SChristoph Hellwig if (idelta) 732501ab323SDave Chinner xfs_mod_icount(mp, -idelta); 7331b040712SChristoph Hellwig out_undo_fdblocks: 7341b040712SChristoph Hellwig if (blkdelta) 7350d485adaSDave Chinner xfs_mod_fdblocks(mp, -blkdelta, rsvd); 7361b040712SChristoph Hellwig out: 7371884bd83SJesper Juhl ASSERT(error == 0); 7381b040712SChristoph Hellwig return; 7391da177e4SLinus Torvalds } 7401da177e4SLinus Torvalds 741e6631f85SDave Chinner /* Add the given log item to the transaction's list of log items. */ 742e98c414fSChristoph Hellwig void 743e98c414fSChristoph Hellwig xfs_trans_add_item( 744e98c414fSChristoph Hellwig struct xfs_trans *tp, 745e98c414fSChristoph Hellwig struct xfs_log_item *lip) 746e98c414fSChristoph Hellwig { 747f65020a8SJesper Juhl ASSERT(lip->li_mountp == tp->t_mountp); 748f65020a8SJesper Juhl ASSERT(lip->li_ailp == tp->t_mountp->m_ail); 749e6631f85SDave Chinner ASSERT(list_empty(&lip->li_trans)); 750e6631f85SDave Chinner ASSERT(!test_bit(XFS_LI_DIRTY, &lip->li_flags)); 751e98c414fSChristoph Hellwig 752e6631f85SDave Chinner list_add_tail(&lip->li_trans, &tp->t_items); 753ba18781bSDave Chinner trace_xfs_trans_add_item(tp, _RET_IP_); 754e98c414fSChristoph Hellwig } 755e98c414fSChristoph Hellwig 756e98c414fSChristoph Hellwig /* 757e6631f85SDave Chinner * Unlink the log item from the transaction. the log item is no longer 758e6631f85SDave Chinner * considered dirty in this transaction, as the linked transaction has 759e6631f85SDave Chinner * finished, either by abort or commit completion. 760e98c414fSChristoph Hellwig */ 761e98c414fSChristoph Hellwig void 762e98c414fSChristoph Hellwig xfs_trans_del_item( 763e98c414fSChristoph Hellwig struct xfs_log_item *lip) 764e98c414fSChristoph Hellwig { 765e6631f85SDave Chinner clear_bit(XFS_LI_DIRTY, &lip->li_flags); 766e6631f85SDave Chinner list_del_init(&lip->li_trans); 767e98c414fSChristoph Hellwig } 768e98c414fSChristoph Hellwig 769e6631f85SDave Chinner /* Detach and unlock all of the items in a transaction */ 770195cd83dSChristoph Hellwig static void 771e98c414fSChristoph Hellwig xfs_trans_free_items( 772e98c414fSChristoph Hellwig struct xfs_trans *tp, 773eacb24e7SChristoph Hellwig bool abort) 774e98c414fSChristoph Hellwig { 775e6631f85SDave Chinner struct xfs_log_item *lip, *next; 776e98c414fSChristoph Hellwig 777ba18781bSDave Chinner trace_xfs_trans_free_items(tp, _RET_IP_); 778ba18781bSDave Chinner 779e6631f85SDave Chinner list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) { 780e6631f85SDave Chinner xfs_trans_del_item(lip); 781eacb24e7SChristoph Hellwig if (abort) 78222525c17SDave Chinner set_bit(XFS_LI_ABORTED, &lip->li_flags); 783ddf92053SChristoph Hellwig if (lip->li_ops->iop_release) 784ddf92053SChristoph Hellwig lip->li_ops->iop_release(lip); 785e98c414fSChristoph Hellwig } 786e98c414fSChristoph Hellwig } 787e98c414fSChristoph Hellwig 7880e57f6a3SDave Chinner static inline void 7890e57f6a3SDave Chinner xfs_log_item_batch_insert( 7900e57f6a3SDave Chinner struct xfs_ail *ailp, 7911d8c95a3SDave Chinner struct xfs_ail_cursor *cur, 7920e57f6a3SDave Chinner struct xfs_log_item **log_items, 7930e57f6a3SDave Chinner int nr_items, 7940e57f6a3SDave Chinner xfs_lsn_t commit_lsn) 7950e57f6a3SDave Chinner { 7960e57f6a3SDave Chinner int i; 7970e57f6a3SDave Chinner 79857e80956SMatthew Wilcox spin_lock(&ailp->ail_lock); 79957e80956SMatthew Wilcox /* xfs_trans_ail_update_bulk drops ailp->ail_lock */ 8001d8c95a3SDave Chinner xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn); 8010e57f6a3SDave Chinner 802904c17e6SDave Chinner for (i = 0; i < nr_items; i++) { 803904c17e6SDave Chinner struct xfs_log_item *lip = log_items[i]; 804904c17e6SDave Chinner 805e8b78db7SChristoph Hellwig if (lip->li_ops->iop_unpin) 806904c17e6SDave Chinner lip->li_ops->iop_unpin(lip, 0); 807904c17e6SDave Chinner } 8080e57f6a3SDave Chinner } 8090e57f6a3SDave Chinner 8100e57f6a3SDave Chinner /* 8110e57f6a3SDave Chinner * Bulk operation version of xfs_trans_committed that takes a log vector of 8120e57f6a3SDave Chinner * items to insert into the AIL. This uses bulk AIL insertion techniques to 8130e57f6a3SDave Chinner * minimise lock traffic. 814e34a314cSDave Chinner * 815e34a314cSDave Chinner * If we are called with the aborted flag set, it is because a log write during 816e34a314cSDave Chinner * a CIL checkpoint commit has failed. In this case, all the items in the 817ddf92053SChristoph Hellwig * checkpoint have already gone through iop_committed and iop_committing, which 818e34a314cSDave Chinner * means that checkpoint commit abort handling is treated exactly the same 819e34a314cSDave Chinner * as an iclog write error even though we haven't started any IO yet. Hence in 820904c17e6SDave Chinner * this case all we need to do is iop_committed processing, followed by an 821904c17e6SDave Chinner * iop_unpin(aborted) call. 8221d8c95a3SDave Chinner * 8231d8c95a3SDave Chinner * The AIL cursor is used to optimise the insert process. If commit_lsn is not 8241d8c95a3SDave Chinner * at the end of the AIL, the insert cursor avoids the need to walk 8251d8c95a3SDave Chinner * the AIL to find the insertion point on every xfs_log_item_batch_insert() 8261d8c95a3SDave Chinner * call. This saves a lot of needless list walking and is a net win, even 8271d8c95a3SDave Chinner * though it slightly increases that amount of AIL lock traffic to set it up 8281d8c95a3SDave Chinner * and tear it down. 8290e57f6a3SDave Chinner */ 8300e57f6a3SDave Chinner void 8310e57f6a3SDave Chinner xfs_trans_committed_bulk( 8320e57f6a3SDave Chinner struct xfs_ail *ailp, 8330e57f6a3SDave Chinner struct xfs_log_vec *log_vector, 8340e57f6a3SDave Chinner xfs_lsn_t commit_lsn, 835d15cbf2fSChristoph Hellwig bool aborted) 8360e57f6a3SDave Chinner { 8370e57f6a3SDave Chinner #define LOG_ITEM_BATCH_SIZE 32 8380e57f6a3SDave Chinner struct xfs_log_item *log_items[LOG_ITEM_BATCH_SIZE]; 8390e57f6a3SDave Chinner struct xfs_log_vec *lv; 8401d8c95a3SDave Chinner struct xfs_ail_cursor cur; 8410e57f6a3SDave Chinner int i = 0; 8420e57f6a3SDave Chinner 84357e80956SMatthew Wilcox spin_lock(&ailp->ail_lock); 8441d8c95a3SDave Chinner xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn); 84557e80956SMatthew Wilcox spin_unlock(&ailp->ail_lock); 8461d8c95a3SDave Chinner 8470e57f6a3SDave Chinner /* unpin all the log items */ 8480e57f6a3SDave Chinner for (lv = log_vector; lv; lv = lv->lv_next ) { 8490e57f6a3SDave Chinner struct xfs_log_item *lip = lv->lv_item; 8500e57f6a3SDave Chinner xfs_lsn_t item_lsn; 8510e57f6a3SDave Chinner 8520e57f6a3SDave Chinner if (aborted) 85322525c17SDave Chinner set_bit(XFS_LI_ABORTED, &lip->li_flags); 854*9ce632a2SChristoph Hellwig 855*9ce632a2SChristoph Hellwig if (lip->li_ops->flags & XFS_ITEM_RELEASE_WHEN_COMMITTED) { 856*9ce632a2SChristoph Hellwig lip->li_ops->iop_release(lip); 857*9ce632a2SChristoph Hellwig continue; 858*9ce632a2SChristoph Hellwig } 859*9ce632a2SChristoph Hellwig 860e8b78db7SChristoph Hellwig if (lip->li_ops->iop_committed) 861904c17e6SDave Chinner item_lsn = lip->li_ops->iop_committed(lip, commit_lsn); 862e8b78db7SChristoph Hellwig else 863e8b78db7SChristoph Hellwig item_lsn = commit_lsn; 8640e57f6a3SDave Chinner 8651316d4daSDave Chinner /* item_lsn of -1 means the item needs no further processing */ 8660e57f6a3SDave Chinner if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0) 8670e57f6a3SDave Chinner continue; 8680e57f6a3SDave Chinner 869e34a314cSDave Chinner /* 870e34a314cSDave Chinner * if we are aborting the operation, no point in inserting the 871e34a314cSDave Chinner * object into the AIL as we are in a shutdown situation. 872e34a314cSDave Chinner */ 873e34a314cSDave Chinner if (aborted) { 87457e80956SMatthew Wilcox ASSERT(XFS_FORCED_SHUTDOWN(ailp->ail_mount)); 875e8b78db7SChristoph Hellwig if (lip->li_ops->iop_unpin) 876904c17e6SDave Chinner lip->li_ops->iop_unpin(lip, 1); 877e34a314cSDave Chinner continue; 878e34a314cSDave Chinner } 879e34a314cSDave Chinner 8800e57f6a3SDave Chinner if (item_lsn != commit_lsn) { 8810e57f6a3SDave Chinner 8820e57f6a3SDave Chinner /* 8830e57f6a3SDave Chinner * Not a bulk update option due to unusual item_lsn. 8840e57f6a3SDave Chinner * Push into AIL immediately, rechecking the lsn once 8851d8c95a3SDave Chinner * we have the ail lock. Then unpin the item. This does 8861d8c95a3SDave Chinner * not affect the AIL cursor the bulk insert path is 8871d8c95a3SDave Chinner * using. 8880e57f6a3SDave Chinner */ 88957e80956SMatthew Wilcox spin_lock(&ailp->ail_lock); 8900e57f6a3SDave Chinner if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) 8910e57f6a3SDave Chinner xfs_trans_ail_update(ailp, lip, item_lsn); 8920e57f6a3SDave Chinner else 89357e80956SMatthew Wilcox spin_unlock(&ailp->ail_lock); 894e8b78db7SChristoph Hellwig if (lip->li_ops->iop_unpin) 895904c17e6SDave Chinner lip->li_ops->iop_unpin(lip, 0); 8960e57f6a3SDave Chinner continue; 8970e57f6a3SDave Chinner } 8980e57f6a3SDave Chinner 8990e57f6a3SDave Chinner /* Item is a candidate for bulk AIL insert. */ 9000e57f6a3SDave Chinner log_items[i++] = lv->lv_item; 9010e57f6a3SDave Chinner if (i >= LOG_ITEM_BATCH_SIZE) { 9021d8c95a3SDave Chinner xfs_log_item_batch_insert(ailp, &cur, log_items, 9030e57f6a3SDave Chinner LOG_ITEM_BATCH_SIZE, commit_lsn); 9040e57f6a3SDave Chinner i = 0; 9050e57f6a3SDave Chinner } 9060e57f6a3SDave Chinner } 9070e57f6a3SDave Chinner 9080e57f6a3SDave Chinner /* make sure we insert the remainder! */ 9090e57f6a3SDave Chinner if (i) 9101d8c95a3SDave Chinner xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn); 9111d8c95a3SDave Chinner 91257e80956SMatthew Wilcox spin_lock(&ailp->ail_lock); 913e4a1e29cSEric Sandeen xfs_trans_ail_cursor_done(&cur); 91457e80956SMatthew Wilcox spin_unlock(&ailp->ail_lock); 9150e57f6a3SDave Chinner } 9160e57f6a3SDave Chinner 917b1c1b5b6SDave Chinner /* 918b1037058SChristoph Hellwig * Commit the given transaction to the log. 9190924378aSDave Chinner * 9200924378aSDave Chinner * XFS disk error handling mechanism is not based on a typical 9210924378aSDave Chinner * transaction abort mechanism. Logically after the filesystem 9220924378aSDave Chinner * gets marked 'SHUTDOWN', we can't let any new transactions 9230924378aSDave Chinner * be durable - ie. committed to disk - because some metadata might 9240924378aSDave Chinner * be inconsistent. In such cases, this returns an error, and the 9250924378aSDave Chinner * caller may assume that all locked objects joined to the transaction 9260924378aSDave Chinner * have already been unlocked as if the commit had succeeded. 9270924378aSDave Chinner * Do not reference the transaction structure after this call. 9280924378aSDave Chinner */ 92970393313SChristoph Hellwig static int 93070393313SChristoph Hellwig __xfs_trans_commit( 931a3ccd2caSChristoph Hellwig struct xfs_trans *tp, 93270393313SChristoph Hellwig bool regrant) 9330924378aSDave Chinner { 934a3ccd2caSChristoph Hellwig struct xfs_mount *mp = tp->t_mountp; 9350924378aSDave Chinner xfs_lsn_t commit_lsn = -1; 936a3ccd2caSChristoph Hellwig int error = 0; 9370924378aSDave Chinner int sync = tp->t_flags & XFS_TRANS_SYNC; 9380924378aSDave Chinner 939ba18781bSDave Chinner trace_xfs_trans_commit(tp, _RET_IP_); 940ba18781bSDave Chinner 94198719051SBrian Foster /* 94298719051SBrian Foster * Finish deferred items on final commit. Only permanent transactions 94398719051SBrian Foster * should ever have deferred ops. 94498719051SBrian Foster */ 9459d9e6233SBrian Foster WARN_ON_ONCE(!list_empty(&tp->t_dfops) && 94698719051SBrian Foster !(tp->t_flags & XFS_TRANS_PERM_LOG_RES)); 94798719051SBrian Foster if (!regrant && (tp->t_flags & XFS_TRANS_PERM_LOG_RES)) { 948b277c37fSBrian Foster error = xfs_defer_finish_noroll(&tp); 9499b1f4e98SBrian Foster if (error) 950e021a2e5SBrian Foster goto out_unreserve; 951e021a2e5SBrian Foster } 952e021a2e5SBrian Foster 9530924378aSDave Chinner /* 9540924378aSDave Chinner * If there is nothing to be logged by the transaction, 9550924378aSDave Chinner * then unlock all of the items associated with the 9560924378aSDave Chinner * transaction and free the transaction structure. 9570924378aSDave Chinner * Also make sure to return any reserved blocks to 9580924378aSDave Chinner * the free pool. 9590924378aSDave Chinner */ 960a3ccd2caSChristoph Hellwig if (!(tp->t_flags & XFS_TRANS_DIRTY)) 961a3ccd2caSChristoph Hellwig goto out_unreserve; 962a3ccd2caSChristoph Hellwig 963a3ccd2caSChristoph Hellwig if (XFS_FORCED_SHUTDOWN(mp)) { 9642451337dSDave Chinner error = -EIO; 965a3ccd2caSChristoph Hellwig goto out_unreserve; 9660924378aSDave Chinner } 967a3ccd2caSChristoph Hellwig 9680924378aSDave Chinner ASSERT(tp->t_ticket != NULL); 9690924378aSDave Chinner 9700924378aSDave Chinner /* 9710924378aSDave Chinner * If we need to update the superblock, then do it now. 9720924378aSDave Chinner */ 9730924378aSDave Chinner if (tp->t_flags & XFS_TRANS_SB_DIRTY) 9740924378aSDave Chinner xfs_trans_apply_sb_deltas(tp); 9750924378aSDave Chinner xfs_trans_apply_dquot_deltas(tp); 9760924378aSDave Chinner 97770393313SChristoph Hellwig xfs_log_commit_cil(mp, tp, &commit_lsn, regrant); 9781da177e4SLinus Torvalds 9799070733bSMichal Hocko current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); 9800244b960SChristoph Hellwig xfs_trans_free(tp); 9810244b960SChristoph Hellwig 9821da177e4SLinus Torvalds /* 9831da177e4SLinus Torvalds * If the transaction needs to be synchronous, then force the 9841da177e4SLinus Torvalds * log out now and wait for it. 9851da177e4SLinus Torvalds */ 9861da177e4SLinus Torvalds if (sync) { 987656de4ffSChristoph Hellwig error = xfs_log_force_lsn(mp, commit_lsn, XFS_LOG_SYNC, NULL); 988ff6d6af2SBill O'Donnell XFS_STATS_INC(mp, xs_trans_sync); 9891da177e4SLinus Torvalds } else { 990ff6d6af2SBill O'Donnell XFS_STATS_INC(mp, xs_trans_async); 9911da177e4SLinus Torvalds } 9921da177e4SLinus Torvalds 993a3ccd2caSChristoph Hellwig return error; 994a3ccd2caSChristoph Hellwig 995a3ccd2caSChristoph Hellwig out_unreserve: 996a3ccd2caSChristoph Hellwig xfs_trans_unreserve_and_mod_sb(tp); 997a3ccd2caSChristoph Hellwig 998a3ccd2caSChristoph Hellwig /* 999a3ccd2caSChristoph Hellwig * It is indeed possible for the transaction to be not dirty but 1000a3ccd2caSChristoph Hellwig * the dqinfo portion to be. All that means is that we have some 1001a3ccd2caSChristoph Hellwig * (non-persistent) quota reservations that need to be unreserved. 1002a3ccd2caSChristoph Hellwig */ 1003a3ccd2caSChristoph Hellwig xfs_trans_unreserve_and_mod_dquots(tp); 1004a3ccd2caSChristoph Hellwig if (tp->t_ticket) { 1005f78c3901SChristoph Hellwig commit_lsn = xfs_log_done(mp, tp->t_ticket, NULL, regrant); 1006a3ccd2caSChristoph Hellwig if (commit_lsn == -1 && !error) 10072451337dSDave Chinner error = -EIO; 1008ba18781bSDave Chinner tp->t_ticket = NULL; 1009a3ccd2caSChristoph Hellwig } 10109070733bSMichal Hocko current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); 1011195cd83dSChristoph Hellwig xfs_trans_free_items(tp, !!error); 1012a3ccd2caSChristoph Hellwig xfs_trans_free(tp); 1013a3ccd2caSChristoph Hellwig 1014ff6d6af2SBill O'Donnell XFS_STATS_INC(mp, xs_trans_empty); 1015a3ccd2caSChristoph Hellwig return error; 10161da177e4SLinus Torvalds } 10171da177e4SLinus Torvalds 101870393313SChristoph Hellwig int 101970393313SChristoph Hellwig xfs_trans_commit( 102070393313SChristoph Hellwig struct xfs_trans *tp) 102170393313SChristoph Hellwig { 102270393313SChristoph Hellwig return __xfs_trans_commit(tp, false); 102370393313SChristoph Hellwig } 102470393313SChristoph Hellwig 10251da177e4SLinus Torvalds /* 10261da177e4SLinus Torvalds * Unlock all of the transaction's items and free the transaction. 10271da177e4SLinus Torvalds * The transaction must not have modified any of its items, because 10281da177e4SLinus Torvalds * there is no way to restore them to their previous state. 10291da177e4SLinus Torvalds * 10301da177e4SLinus Torvalds * If the transaction has made a log reservation, make sure to release 10311da177e4SLinus Torvalds * it as well. 10321da177e4SLinus Torvalds */ 10331da177e4SLinus Torvalds void 10341da177e4SLinus Torvalds xfs_trans_cancel( 10354906e215SChristoph Hellwig struct xfs_trans *tp) 10361da177e4SLinus Torvalds { 10374906e215SChristoph Hellwig struct xfs_mount *mp = tp->t_mountp; 10384906e215SChristoph Hellwig bool dirty = (tp->t_flags & XFS_TRANS_DIRTY); 10391da177e4SLinus Torvalds 1040ba18781bSDave Chinner trace_xfs_trans_cancel(tp, _RET_IP_); 1041ba18781bSDave Chinner 104298719051SBrian Foster if (tp->t_flags & XFS_TRANS_PERM_LOG_RES) 10439e28a242SBrian Foster xfs_defer_cancel(tp); 1044e021a2e5SBrian Foster 10451da177e4SLinus Torvalds /* 10461da177e4SLinus Torvalds * See if the caller is relying on us to shut down the 10471da177e4SLinus Torvalds * filesystem. This happens in paths where we detect 10481da177e4SLinus Torvalds * corruption and decide to give up. 10491da177e4SLinus Torvalds */ 10504906e215SChristoph Hellwig if (dirty && !XFS_FORCED_SHUTDOWN(mp)) { 10510733af21SRyan Hankins XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp); 10527d04a335SNathan Scott xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 105360a204f0SNathan Scott } 10541da177e4SLinus Torvalds #ifdef DEBUG 10554906e215SChristoph Hellwig if (!dirty && !XFS_FORCED_SHUTDOWN(mp)) { 1056e6631f85SDave Chinner struct xfs_log_item *lip; 10571da177e4SLinus Torvalds 1058e6631f85SDave Chinner list_for_each_entry(lip, &tp->t_items, li_trans) 1059e6631f85SDave Chinner ASSERT(!(lip->li_type == XFS_LI_EFD)); 10601da177e4SLinus Torvalds } 10611da177e4SLinus Torvalds #endif 10621da177e4SLinus Torvalds xfs_trans_unreserve_and_mod_sb(tp); 10637d095257SChristoph Hellwig xfs_trans_unreserve_and_mod_dquots(tp); 10641da177e4SLinus Torvalds 1065ba18781bSDave Chinner if (tp->t_ticket) { 1066f78c3901SChristoph Hellwig xfs_log_done(mp, tp->t_ticket, NULL, false); 1067ba18781bSDave Chinner tp->t_ticket = NULL; 1068ba18781bSDave Chinner } 10691da177e4SLinus Torvalds 10701da177e4SLinus Torvalds /* mark this thread as no longer being in a transaction */ 10719070733bSMichal Hocko current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); 10721da177e4SLinus Torvalds 1073195cd83dSChristoph Hellwig xfs_trans_free_items(tp, dirty); 10741da177e4SLinus Torvalds xfs_trans_free(tp); 10751da177e4SLinus Torvalds } 10761da177e4SLinus Torvalds 1077322ff6b8SNiv Sardi /* 1078322ff6b8SNiv Sardi * Roll from one trans in the sequence of PERMANENT transactions to 1079322ff6b8SNiv Sardi * the next: permanent transactions are only flushed out when 108070393313SChristoph Hellwig * committed with xfs_trans_commit(), but we still want as soon 1081322ff6b8SNiv Sardi * as possible to let chunks of it go to the log. So we commit the 1082322ff6b8SNiv Sardi * chunk we've been working on and get a new transaction to continue. 1083322ff6b8SNiv Sardi */ 1084322ff6b8SNiv Sardi int 1085254133f5SChristoph Hellwig xfs_trans_roll( 1086411350dfSChristoph Hellwig struct xfs_trans **tpp) 1087322ff6b8SNiv Sardi { 1088411350dfSChristoph Hellwig struct xfs_trans *trans = *tpp; 10893d3c8b52SJie Liu struct xfs_trans_res tres; 1090322ff6b8SNiv Sardi int error; 1091322ff6b8SNiv Sardi 1092ba18781bSDave Chinner trace_xfs_trans_roll(trans, _RET_IP_); 1093ba18781bSDave Chinner 1094322ff6b8SNiv Sardi /* 1095322ff6b8SNiv Sardi * Copy the critical parameters from one trans to the next. 1096322ff6b8SNiv Sardi */ 10973d3c8b52SJie Liu tres.tr_logres = trans->t_log_res; 10983d3c8b52SJie Liu tres.tr_logcount = trans->t_log_count; 1099411350dfSChristoph Hellwig 1100322ff6b8SNiv Sardi *tpp = xfs_trans_dup(trans); 1101322ff6b8SNiv Sardi 1102322ff6b8SNiv Sardi /* 1103322ff6b8SNiv Sardi * Commit the current transaction. 1104322ff6b8SNiv Sardi * If this commit failed, then it'd just unlock those items that 1105322ff6b8SNiv Sardi * are not marked ihold. That also means that a filesystem shutdown 1106322ff6b8SNiv Sardi * is in progress. The caller takes the responsibility to cancel 1107322ff6b8SNiv Sardi * the duplicate transaction that gets returned. 1108322ff6b8SNiv Sardi */ 110970393313SChristoph Hellwig error = __xfs_trans_commit(trans, true); 1110322ff6b8SNiv Sardi if (error) 1111d99831ffSEric Sandeen return error; 1112322ff6b8SNiv Sardi 1113322ff6b8SNiv Sardi /* 1114411350dfSChristoph Hellwig * Reserve space in the log for the next transaction. 1115322ff6b8SNiv Sardi * This also pushes items in the "AIL", the list of logged items, 1116322ff6b8SNiv Sardi * out to disk if they are taking up space at the tail of the log 1117322ff6b8SNiv Sardi * that we want to use. This requires that either nothing be locked 1118322ff6b8SNiv Sardi * across this call, or that anything that is locked be logged in 1119322ff6b8SNiv Sardi * the prior and the next transactions. 1120322ff6b8SNiv Sardi */ 11213d3c8b52SJie Liu tres.tr_logflags = XFS_TRANS_PERM_LOG_RES; 1122411350dfSChristoph Hellwig return xfs_trans_reserve(*tpp, &tres, 0, 0); 1123322ff6b8SNiv Sardi } 1124