10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0 21da177e4SLinus Torvalds /* 37b718769SNathan Scott * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. 4e98c414fSChristoph Hellwig * Copyright (C) 2010 Red Hat, Inc. 57b718769SNathan Scott * All Rights Reserved. 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds #include "xfs.h" 8a844f451SNathan Scott #include "xfs_fs.h" 970a9883cSDave Chinner #include "xfs_shared.h" 10239880efSDave Chinner #include "xfs_format.h" 11239880efSDave Chinner #include "xfs_log_format.h" 12239880efSDave Chinner #include "xfs_trans_resv.h" 131da177e4SLinus Torvalds #include "xfs_mount.h" 14efc27b52SDave Chinner #include "xfs_extent_busy.h" 151da177e4SLinus Torvalds #include "xfs_quota.h" 16239880efSDave Chinner #include "xfs_trans.h" 17a844f451SNathan Scott #include "xfs_trans_priv.h" 18239880efSDave Chinner #include "xfs_log.h" 190020a190SDave Chinner #include "xfs_log_priv.h" 20ed3b4d6cSDave Chinner #include "xfs_trace.h" 21a4fbe6abSDave Chinner #include "xfs_error.h" 22f8f2835aSBrian Foster #include "xfs_defer.h" 233a1af6c3SDarrick J. Wong #include "xfs_inode.h" 24f2f7b9ffSDarrick J. Wong #include "xfs_dquot_item.h" 25f2f7b9ffSDarrick J. Wong #include "xfs_dquot.h" 26766aabd5SDarrick J. Wong #include "xfs_icache.h" 272c2b981bSDarrick J. Wong #include "xfs_rtbitmap.h" 281da177e4SLinus Torvalds 29182696fbSDarrick J. Wong struct kmem_cache *xfs_trans_cache; 301da177e4SLinus Torvalds 31b872af2cSDarrick J. Wong #if defined(CONFIG_TRACEPOINTS) 32b872af2cSDarrick J. Wong static void 33b872af2cSDarrick J. Wong xfs_trans_trace_reservations( 34b872af2cSDarrick J. Wong struct xfs_mount *mp) 35b872af2cSDarrick J. Wong { 36b872af2cSDarrick J. Wong struct xfs_trans_res *res; 37b872af2cSDarrick J. Wong struct xfs_trans_res *end_res; 38b872af2cSDarrick J. Wong int i; 39b872af2cSDarrick J. Wong 40b872af2cSDarrick J. Wong res = (struct xfs_trans_res *)M_RES(mp); 41b872af2cSDarrick J. Wong end_res = (struct xfs_trans_res *)(M_RES(mp) + 1); 42b872af2cSDarrick J. Wong for (i = 0; res < end_res; i++, res++) 43b872af2cSDarrick J. Wong trace_xfs_trans_resv_calc(mp, i, res); 44b872af2cSDarrick J. Wong } 45b872af2cSDarrick J. Wong #else 46b872af2cSDarrick J. Wong # define xfs_trans_trace_reservations(mp) 47b872af2cSDarrick J. Wong #endif 48b872af2cSDarrick J. Wong 494f3b5783SJeff Liu /* 501da177e4SLinus Torvalds * Initialize the precomputed transaction reservation values 511da177e4SLinus Torvalds * in the mount structure. 521da177e4SLinus Torvalds */ 531da177e4SLinus Torvalds void 541da177e4SLinus Torvalds xfs_trans_init( 55025101dcSChristoph Hellwig struct xfs_mount *mp) 561da177e4SLinus Torvalds { 573d3c8b52SJie Liu xfs_trans_resv_calc(mp, M_RES(mp)); 58b872af2cSDarrick J. Wong xfs_trans_trace_reservations(mp); 591da177e4SLinus Torvalds } 601da177e4SLinus Torvalds 611da177e4SLinus Torvalds /* 62b1c1b5b6SDave Chinner * Free the transaction structure. If there is more clean up 63b1c1b5b6SDave Chinner * to do when the structure is freed, add it here. 64b1c1b5b6SDave Chinner */ 65b1c1b5b6SDave Chinner STATIC void 66b1c1b5b6SDave Chinner xfs_trans_free( 67ed3b4d6cSDave Chinner struct xfs_trans *tp) 68b1c1b5b6SDave Chinner { 694ecbfe63SDave Chinner xfs_extent_busy_sort(&tp->t_busy); 704ecbfe63SDave Chinner xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false); 71ed3b4d6cSDave Chinner 72ba18781bSDave Chinner trace_xfs_trans_free(tp, _RET_IP_); 73756b1c34SDave Chinner xfs_trans_clear_context(tp); 74253f4911SChristoph Hellwig if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT)) 75d9457dc0SJan Kara sb_end_intwrite(tp->t_mountp->m_super); 76b1c1b5b6SDave Chinner xfs_trans_free_dqinfo(tp); 77182696fbSDarrick J. Wong kmem_cache_free(xfs_trans_cache, tp); 78b1c1b5b6SDave Chinner } 79b1c1b5b6SDave Chinner 80b1c1b5b6SDave Chinner /* 811da177e4SLinus Torvalds * This is called to create a new transaction which will share the 821da177e4SLinus Torvalds * permanent log reservation of the given transaction. The remaining 831da177e4SLinus Torvalds * unused block and rt extent reservations are also inherited. This 841da177e4SLinus Torvalds * implies that the original transaction is no longer allowed to allocate 851da177e4SLinus Torvalds * blocks. Locks and log items, however, are no inherited. They must 861da177e4SLinus Torvalds * be added to the new transaction explicitly. 871da177e4SLinus Torvalds */ 88f8f2835aSBrian Foster STATIC struct xfs_trans * 891da177e4SLinus Torvalds xfs_trans_dup( 90f8f2835aSBrian Foster struct xfs_trans *tp) 911da177e4SLinus Torvalds { 92f8f2835aSBrian Foster struct xfs_trans *ntp; 931da177e4SLinus Torvalds 94ba18781bSDave Chinner trace_xfs_trans_dup(tp, _RET_IP_); 95ba18781bSDave Chinner 96182696fbSDarrick J. Wong ntp = kmem_cache_zalloc(xfs_trans_cache, GFP_KERNEL | __GFP_NOFAIL); 971da177e4SLinus Torvalds 981da177e4SLinus Torvalds /* 991da177e4SLinus Torvalds * Initialize the new transaction structure. 1001da177e4SLinus Torvalds */ 1012a3c0accSDave Chinner ntp->t_magic = XFS_TRANS_HEADER_MAGIC; 1021da177e4SLinus Torvalds ntp->t_mountp = tp->t_mountp; 103e98c414fSChristoph Hellwig INIT_LIST_HEAD(&ntp->t_items); 104ed3b4d6cSDave Chinner INIT_LIST_HEAD(&ntp->t_busy); 1059d9e6233SBrian Foster INIT_LIST_HEAD(&ntp->t_dfops); 106692b6cddSDave Chinner ntp->t_highest_agno = NULLAGNUMBER; 1071da177e4SLinus Torvalds 1081da177e4SLinus Torvalds ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 1091da177e4SLinus Torvalds ASSERT(tp->t_ticket != NULL); 110cfcbbbd0SNathan Scott 111d9457dc0SJan Kara ntp->t_flags = XFS_TRANS_PERM_LOG_RES | 112d9457dc0SJan Kara (tp->t_flags & XFS_TRANS_RESERVE) | 113f74681baSBrian Foster (tp->t_flags & XFS_TRANS_NO_WRITECOUNT) | 114f74681baSBrian Foster (tp->t_flags & XFS_TRANS_RES_FDBLKS); 115d9457dc0SJan Kara /* We gave our writer reference to the new transaction */ 116253f4911SChristoph Hellwig tp->t_flags |= XFS_TRANS_NO_WRITECOUNT; 117cc09c0dcSDave Chinner ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket); 1183e78b9a4SBrian Foster 1193e78b9a4SBrian Foster ASSERT(tp->t_blk_res >= tp->t_blk_res_used); 1201da177e4SLinus Torvalds ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used; 1211da177e4SLinus Torvalds tp->t_blk_res = tp->t_blk_res_used; 1223e78b9a4SBrian Foster 1231da177e4SLinus Torvalds ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used; 1241da177e4SLinus Torvalds tp->t_rtx_res = tp->t_rtx_res_used; 125756b1c34SDave Chinner 126756b1c34SDave Chinner xfs_trans_switch_context(tp, ntp); 127e021a2e5SBrian Foster 1289d9e6233SBrian Foster /* move deferred ops over to the new tp */ 129ce356d64SBrian Foster xfs_defer_move(ntp, tp); 1301da177e4SLinus Torvalds 1317d095257SChristoph Hellwig xfs_trans_dup_dqinfo(tp, ntp); 1321da177e4SLinus Torvalds return ntp; 1331da177e4SLinus Torvalds } 1341da177e4SLinus Torvalds 1351da177e4SLinus Torvalds /* 1361da177e4SLinus Torvalds * This is called to reserve free disk blocks and log space for the 1371da177e4SLinus Torvalds * given transaction. This must be done before allocating any resources 1381da177e4SLinus Torvalds * within the transaction. 1391da177e4SLinus Torvalds * 1401da177e4SLinus Torvalds * This will return ENOSPC if there are not enough blocks available. 1411da177e4SLinus Torvalds * It will sleep waiting for available log space. 1421da177e4SLinus Torvalds * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which 1431da177e4SLinus Torvalds * is used by long running transactions. If any one of the reservations 1441da177e4SLinus Torvalds * fails then they will all be backed out. 1451da177e4SLinus Torvalds * 1461da177e4SLinus Torvalds * This does not do quota reservations. That typically is done by the 1471da177e4SLinus Torvalds * caller afterwards. 1481da177e4SLinus Torvalds */ 149253f4911SChristoph Hellwig static int 1501da177e4SLinus Torvalds xfs_trans_reserve( 1513d3c8b52SJie Liu struct xfs_trans *tp, 1523d3c8b52SJie Liu struct xfs_trans_res *resp, 1531da177e4SLinus Torvalds uint blocks, 1543d3c8b52SJie Liu uint rtextents) 1551da177e4SLinus Torvalds { 156dd401770SDave Chinner struct xfs_mount *mp = tp->t_mountp; 15759c1b082SNathan Scott int error = 0; 1580d485adaSDave Chinner bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; 1591da177e4SLinus Torvalds 1601da177e4SLinus Torvalds /* 1611da177e4SLinus Torvalds * Attempt to reserve the needed disk blocks by decrementing 1621da177e4SLinus Torvalds * the number needed from the number available. This will 1631da177e4SLinus Torvalds * fail if the count would go below zero. 1641da177e4SLinus Torvalds */ 1651da177e4SLinus Torvalds if (blocks > 0) { 166dd401770SDave Chinner error = xfs_mod_fdblocks(mp, -((int64_t)blocks), rsvd); 167756b1c34SDave Chinner if (error != 0) 1682451337dSDave Chinner return -ENOSPC; 1691da177e4SLinus Torvalds tp->t_blk_res += blocks; 1701da177e4SLinus Torvalds } 1711da177e4SLinus Torvalds 1721da177e4SLinus Torvalds /* 1731da177e4SLinus Torvalds * Reserve the log space needed for this transaction. 1741da177e4SLinus Torvalds */ 1753d3c8b52SJie Liu if (resp->tr_logres > 0) { 1769006fb91SChristoph Hellwig bool permanent = false; 1779006fb91SChristoph Hellwig 1783d3c8b52SJie Liu ASSERT(tp->t_log_res == 0 || 1793d3c8b52SJie Liu tp->t_log_res == resp->tr_logres); 1803d3c8b52SJie Liu ASSERT(tp->t_log_count == 0 || 1813d3c8b52SJie Liu tp->t_log_count == resp->tr_logcount); 1829006fb91SChristoph Hellwig 1833d3c8b52SJie Liu if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) { 1841da177e4SLinus Torvalds tp->t_flags |= XFS_TRANS_PERM_LOG_RES; 1859006fb91SChristoph Hellwig permanent = true; 1861da177e4SLinus Torvalds } else { 1871da177e4SLinus Torvalds ASSERT(tp->t_ticket == NULL); 1881da177e4SLinus Torvalds ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES)); 1891da177e4SLinus Torvalds } 1901da177e4SLinus Torvalds 1919006fb91SChristoph Hellwig if (tp->t_ticket != NULL) { 1923d3c8b52SJie Liu ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES); 193dd401770SDave Chinner error = xfs_log_regrant(mp, tp->t_ticket); 1949006fb91SChristoph Hellwig } else { 195c7610dceSDave Chinner error = xfs_log_reserve(mp, resp->tr_logres, 1963d3c8b52SJie Liu resp->tr_logcount, 197c7610dceSDave Chinner &tp->t_ticket, permanent); 1981da177e4SLinus Torvalds } 1999006fb91SChristoph Hellwig 2009006fb91SChristoph Hellwig if (error) 2019006fb91SChristoph Hellwig goto undo_blocks; 2029006fb91SChristoph Hellwig 2033d3c8b52SJie Liu tp->t_log_res = resp->tr_logres; 2043d3c8b52SJie Liu tp->t_log_count = resp->tr_logcount; 2051da177e4SLinus Torvalds } 2061da177e4SLinus Torvalds 2071da177e4SLinus Torvalds /* 2081da177e4SLinus Torvalds * Attempt to reserve the needed realtime extents by decrementing 2091da177e4SLinus Torvalds * the number needed from the number available. This will 2101da177e4SLinus Torvalds * fail if the count would go below zero. 2111da177e4SLinus Torvalds */ 2121da177e4SLinus Torvalds if (rtextents > 0) { 213dd401770SDave Chinner error = xfs_mod_frextents(mp, -((int64_t)rtextents)); 2141da177e4SLinus Torvalds if (error) { 2152451337dSDave Chinner error = -ENOSPC; 2161da177e4SLinus Torvalds goto undo_log; 2171da177e4SLinus Torvalds } 2181da177e4SLinus Torvalds tp->t_rtx_res += rtextents; 2191da177e4SLinus Torvalds } 2201da177e4SLinus Torvalds 2211da177e4SLinus Torvalds return 0; 2221da177e4SLinus Torvalds 2231da177e4SLinus Torvalds /* 2241da177e4SLinus Torvalds * Error cases jump to one of these labels to undo any 2251da177e4SLinus Torvalds * reservations which have already been performed. 2261da177e4SLinus Torvalds */ 2271da177e4SLinus Torvalds undo_log: 2283d3c8b52SJie Liu if (resp->tr_logres > 0) { 2298b41e3f9SChristoph Hellwig xfs_log_ticket_ungrant(mp->m_log, tp->t_ticket); 2301da177e4SLinus Torvalds tp->t_ticket = NULL; 2311da177e4SLinus Torvalds tp->t_log_res = 0; 2321da177e4SLinus Torvalds tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES; 2331da177e4SLinus Torvalds } 2341da177e4SLinus Torvalds 2351da177e4SLinus Torvalds undo_blocks: 2361da177e4SLinus Torvalds if (blocks > 0) { 237dd401770SDave Chinner xfs_mod_fdblocks(mp, (int64_t)blocks, rsvd); 2381da177e4SLinus Torvalds tp->t_blk_res = 0; 2391da177e4SLinus Torvalds } 24059c1b082SNathan Scott return error; 2411da177e4SLinus Torvalds } 2421da177e4SLinus Torvalds 243253f4911SChristoph Hellwig int 244253f4911SChristoph Hellwig xfs_trans_alloc( 245253f4911SChristoph Hellwig struct xfs_mount *mp, 246253f4911SChristoph Hellwig struct xfs_trans_res *resp, 247253f4911SChristoph Hellwig uint blocks, 248253f4911SChristoph Hellwig uint rtextents, 249253f4911SChristoph Hellwig uint flags, 250253f4911SChristoph Hellwig struct xfs_trans **tpp) 251253f4911SChristoph Hellwig { 252253f4911SChristoph Hellwig struct xfs_trans *tp; 2539febcda6SDarrick J. Wong bool want_retry = true; 254253f4911SChristoph Hellwig int error; 255253f4911SChristoph Hellwig 2568683edb7SDave Chinner /* 2578683edb7SDave Chinner * Allocate the handle before we do our freeze accounting and setting up 2588683edb7SDave Chinner * GFP_NOFS allocation context so that we avoid lockdep false positives 2598683edb7SDave Chinner * by doing GFP_KERNEL allocations inside sb_start_intwrite(). 2608683edb7SDave Chinner */ 2619febcda6SDarrick J. Wong retry: 262182696fbSDarrick J. Wong tp = kmem_cache_zalloc(xfs_trans_cache, GFP_KERNEL | __GFP_NOFAIL); 263253f4911SChristoph Hellwig if (!(flags & XFS_TRANS_NO_WRITECOUNT)) 264253f4911SChristoph Hellwig sb_start_intwrite(mp->m_super); 265756b1c34SDave Chinner xfs_trans_set_context(tp); 266253f4911SChristoph Hellwig 26710ee2526SDarrick J. Wong /* 26810ee2526SDarrick J. Wong * Zero-reservation ("empty") transactions can't modify anything, so 26910ee2526SDarrick J. Wong * they're allowed to run while we're frozen. 27010ee2526SDarrick J. Wong */ 27110ee2526SDarrick J. Wong WARN_ON(resp->tr_logres > 0 && 27210ee2526SDarrick J. Wong mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE); 273f74681baSBrian Foster ASSERT(!(flags & XFS_TRANS_RES_FDBLKS) || 27438c26bfdSDave Chinner xfs_has_lazysbcount(mp)); 275253f4911SChristoph Hellwig 276253f4911SChristoph Hellwig tp->t_magic = XFS_TRANS_HEADER_MAGIC; 277253f4911SChristoph Hellwig tp->t_flags = flags; 278253f4911SChristoph Hellwig tp->t_mountp = mp; 279253f4911SChristoph Hellwig INIT_LIST_HEAD(&tp->t_items); 280253f4911SChristoph Hellwig INIT_LIST_HEAD(&tp->t_busy); 2819d9e6233SBrian Foster INIT_LIST_HEAD(&tp->t_dfops); 282692b6cddSDave Chinner tp->t_highest_agno = NULLAGNUMBER; 283253f4911SChristoph Hellwig 284253f4911SChristoph Hellwig error = xfs_trans_reserve(tp, resp, blocks, rtextents); 2859febcda6SDarrick J. Wong if (error == -ENOSPC && want_retry) { 2869febcda6SDarrick J. Wong xfs_trans_cancel(tp); 2879febcda6SDarrick J. Wong 288a1a7d05aSDarrick J. Wong /* 289a1a7d05aSDarrick J. Wong * We weren't able to reserve enough space for the transaction. 290a1a7d05aSDarrick J. Wong * Flush the other speculative space allocations to free space. 291a1a7d05aSDarrick J. Wong * Do not perform a synchronous scan because callers can hold 292a1a7d05aSDarrick J. Wong * other locks. 293a1a7d05aSDarrick J. Wong */ 294d4d12c02SDave Chinner error = xfs_blockgc_flush_all(mp); 295d4d12c02SDave Chinner if (error) 296d4d12c02SDave Chinner return error; 2979febcda6SDarrick J. Wong want_retry = false; 2989febcda6SDarrick J. Wong goto retry; 299a1a7d05aSDarrick J. Wong } 300253f4911SChristoph Hellwig if (error) { 301253f4911SChristoph Hellwig xfs_trans_cancel(tp); 302253f4911SChristoph Hellwig return error; 303253f4911SChristoph Hellwig } 304253f4911SChristoph Hellwig 305ba18781bSDave Chinner trace_xfs_trans_alloc(tp, _RET_IP_); 306ba18781bSDave Chinner 307253f4911SChristoph Hellwig *tpp = tp; 308253f4911SChristoph Hellwig return 0; 309253f4911SChristoph Hellwig } 310253f4911SChristoph Hellwig 3111da177e4SLinus Torvalds /* 312e89c0413SDarrick J. Wong * Create an empty transaction with no reservation. This is a defensive 313b41b46c2SDave Chinner * mechanism for routines that query metadata without actually modifying them -- 314b41b46c2SDave Chinner * if the metadata being queried is somehow cross-linked (think a btree block 315b41b46c2SDave Chinner * pointer that points higher in the tree), we risk deadlock. However, blocks 316b41b46c2SDave Chinner * grabbed as part of a transaction can be re-grabbed. The verifiers will 317b41b46c2SDave Chinner * notice the corrupt block and the operation will fail back to userspace 318b41b46c2SDave Chinner * without deadlocking. 319e89c0413SDarrick J. Wong * 320b41b46c2SDave Chinner * Note the zero-length reservation; this transaction MUST be cancelled without 321b41b46c2SDave Chinner * any dirty data. 32227fb5a72SDarrick J. Wong * 323b41b46c2SDave Chinner * Callers should obtain freeze protection to avoid a conflict with fs freezing 324b41b46c2SDave Chinner * where we can be grabbing buffers at the same time that freeze is trying to 325b41b46c2SDave Chinner * drain the buffer LRU list. 326e89c0413SDarrick J. Wong */ 327e89c0413SDarrick J. Wong int 328e89c0413SDarrick J. Wong xfs_trans_alloc_empty( 329e89c0413SDarrick J. Wong struct xfs_mount *mp, 330e89c0413SDarrick J. Wong struct xfs_trans **tpp) 331e89c0413SDarrick J. Wong { 332e89c0413SDarrick J. Wong struct xfs_trans_res resv = {0}; 333e89c0413SDarrick J. Wong 334e89c0413SDarrick J. Wong return xfs_trans_alloc(mp, &resv, 0, 0, XFS_TRANS_NO_WRITECOUNT, tpp); 335e89c0413SDarrick J. Wong } 336e89c0413SDarrick J. Wong 337e89c0413SDarrick J. Wong /* 3381da177e4SLinus Torvalds * Record the indicated change to the given field for application 3391da177e4SLinus Torvalds * to the file system's superblock when the transaction commits. 3401da177e4SLinus Torvalds * For now, just store the change in the transaction structure. 3411da177e4SLinus Torvalds * 3421da177e4SLinus Torvalds * Mark the transaction structure to indicate that the superblock 3431da177e4SLinus Torvalds * needs to be updated before committing. 34492821e2bSDavid Chinner * 34592821e2bSDavid Chinner * Because we may not be keeping track of allocated/free inodes and 34692821e2bSDavid Chinner * used filesystem blocks in the superblock, we do not mark the 34792821e2bSDavid Chinner * superblock dirty in this transaction if we modify these fields. 34892821e2bSDavid Chinner * We still need to update the transaction deltas so that they get 34992821e2bSDavid Chinner * applied to the incore superblock, but we don't want them to 35092821e2bSDavid Chinner * cause the superblock to get locked and logged if these are the 35192821e2bSDavid Chinner * only fields in the superblock that the transaction modifies. 3521da177e4SLinus Torvalds */ 3531da177e4SLinus Torvalds void 3541da177e4SLinus Torvalds xfs_trans_mod_sb( 3551da177e4SLinus Torvalds xfs_trans_t *tp, 3561da177e4SLinus Torvalds uint field, 35720f4ebf2SDavid Chinner int64_t delta) 3581da177e4SLinus Torvalds { 35992821e2bSDavid Chinner uint32_t flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY); 36092821e2bSDavid Chinner xfs_mount_t *mp = tp->t_mountp; 3611da177e4SLinus Torvalds 3621da177e4SLinus Torvalds switch (field) { 3631da177e4SLinus Torvalds case XFS_TRANS_SB_ICOUNT: 3641da177e4SLinus Torvalds tp->t_icount_delta += delta; 36538c26bfdSDave Chinner if (xfs_has_lazysbcount(mp)) 36692821e2bSDavid Chinner flags &= ~XFS_TRANS_SB_DIRTY; 3671da177e4SLinus Torvalds break; 3681da177e4SLinus Torvalds case XFS_TRANS_SB_IFREE: 3691da177e4SLinus Torvalds tp->t_ifree_delta += delta; 37038c26bfdSDave Chinner if (xfs_has_lazysbcount(mp)) 37192821e2bSDavid Chinner flags &= ~XFS_TRANS_SB_DIRTY; 3721da177e4SLinus Torvalds break; 3731da177e4SLinus Torvalds case XFS_TRANS_SB_FDBLOCKS: 3741da177e4SLinus Torvalds /* 3753e78b9a4SBrian Foster * Track the number of blocks allocated in the transaction. 3763e78b9a4SBrian Foster * Make sure it does not exceed the number reserved. If so, 3773e78b9a4SBrian Foster * shutdown as this can lead to accounting inconsistency. 3781da177e4SLinus Torvalds */ 3791da177e4SLinus Torvalds if (delta < 0) { 3801da177e4SLinus Torvalds tp->t_blk_res_used += (uint)-delta; 3813e78b9a4SBrian Foster if (tp->t_blk_res_used > tp->t_blk_res) 3823e78b9a4SBrian Foster xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 383f74681baSBrian Foster } else if (delta > 0 && (tp->t_flags & XFS_TRANS_RES_FDBLKS)) { 384f74681baSBrian Foster int64_t blkres_delta; 385f74681baSBrian Foster 386f74681baSBrian Foster /* 387f74681baSBrian Foster * Return freed blocks directly to the reservation 388f74681baSBrian Foster * instead of the global pool, being careful not to 389f74681baSBrian Foster * overflow the trans counter. This is used to preserve 390f74681baSBrian Foster * reservation across chains of transaction rolls that 391f74681baSBrian Foster * repeatedly free and allocate blocks. 392f74681baSBrian Foster */ 393f74681baSBrian Foster blkres_delta = min_t(int64_t, delta, 394f74681baSBrian Foster UINT_MAX - tp->t_blk_res); 395f74681baSBrian Foster tp->t_blk_res += blkres_delta; 396f74681baSBrian Foster delta -= blkres_delta; 3971da177e4SLinus Torvalds } 3981da177e4SLinus Torvalds tp->t_fdblocks_delta += delta; 39938c26bfdSDave Chinner if (xfs_has_lazysbcount(mp)) 40092821e2bSDavid Chinner flags &= ~XFS_TRANS_SB_DIRTY; 4011da177e4SLinus Torvalds break; 4021da177e4SLinus Torvalds case XFS_TRANS_SB_RES_FDBLOCKS: 4031da177e4SLinus Torvalds /* 4041da177e4SLinus Torvalds * The allocation has already been applied to the 4051da177e4SLinus Torvalds * in-core superblock's counter. This should only 4061da177e4SLinus Torvalds * be applied to the on-disk superblock. 4071da177e4SLinus Torvalds */ 4081da177e4SLinus Torvalds tp->t_res_fdblocks_delta += delta; 40938c26bfdSDave Chinner if (xfs_has_lazysbcount(mp)) 41092821e2bSDavid Chinner flags &= ~XFS_TRANS_SB_DIRTY; 4111da177e4SLinus Torvalds break; 4121da177e4SLinus Torvalds case XFS_TRANS_SB_FREXTENTS: 4131da177e4SLinus Torvalds /* 4141da177e4SLinus Torvalds * Track the number of blocks allocated in the 4151da177e4SLinus Torvalds * transaction. Make sure it does not exceed the 4161da177e4SLinus Torvalds * number reserved. 4171da177e4SLinus Torvalds */ 4181da177e4SLinus Torvalds if (delta < 0) { 4191da177e4SLinus Torvalds tp->t_rtx_res_used += (uint)-delta; 4201da177e4SLinus Torvalds ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res); 4211da177e4SLinus Torvalds } 4221da177e4SLinus Torvalds tp->t_frextents_delta += delta; 4231da177e4SLinus Torvalds break; 4241da177e4SLinus Torvalds case XFS_TRANS_SB_RES_FREXTENTS: 4251da177e4SLinus Torvalds /* 4261da177e4SLinus Torvalds * The allocation has already been applied to the 427c41564b5SNathan Scott * in-core superblock's counter. This should only 4281da177e4SLinus Torvalds * be applied to the on-disk superblock. 4291da177e4SLinus Torvalds */ 4301da177e4SLinus Torvalds ASSERT(delta < 0); 4311da177e4SLinus Torvalds tp->t_res_frextents_delta += delta; 4321da177e4SLinus Torvalds break; 4331da177e4SLinus Torvalds case XFS_TRANS_SB_DBLOCKS: 4341da177e4SLinus Torvalds tp->t_dblocks_delta += delta; 4351da177e4SLinus Torvalds break; 4361da177e4SLinus Torvalds case XFS_TRANS_SB_AGCOUNT: 4371da177e4SLinus Torvalds ASSERT(delta > 0); 4381da177e4SLinus Torvalds tp->t_agcount_delta += delta; 4391da177e4SLinus Torvalds break; 4401da177e4SLinus Torvalds case XFS_TRANS_SB_IMAXPCT: 4411da177e4SLinus Torvalds tp->t_imaxpct_delta += delta; 4421da177e4SLinus Torvalds break; 4431da177e4SLinus Torvalds case XFS_TRANS_SB_REXTSIZE: 4441da177e4SLinus Torvalds tp->t_rextsize_delta += delta; 4451da177e4SLinus Torvalds break; 4461da177e4SLinus Torvalds case XFS_TRANS_SB_RBMBLOCKS: 4471da177e4SLinus Torvalds tp->t_rbmblocks_delta += delta; 4481da177e4SLinus Torvalds break; 4491da177e4SLinus Torvalds case XFS_TRANS_SB_RBLOCKS: 4501da177e4SLinus Torvalds tp->t_rblocks_delta += delta; 4511da177e4SLinus Torvalds break; 4521da177e4SLinus Torvalds case XFS_TRANS_SB_REXTENTS: 4531da177e4SLinus Torvalds tp->t_rextents_delta += delta; 4541da177e4SLinus Torvalds break; 4551da177e4SLinus Torvalds case XFS_TRANS_SB_REXTSLOG: 4561da177e4SLinus Torvalds tp->t_rextslog_delta += delta; 4571da177e4SLinus Torvalds break; 4581da177e4SLinus Torvalds default: 4591da177e4SLinus Torvalds ASSERT(0); 4601da177e4SLinus Torvalds return; 4611da177e4SLinus Torvalds } 4621da177e4SLinus Torvalds 463210c6f1cSDavid Chinner tp->t_flags |= flags; 4641da177e4SLinus Torvalds } 4651da177e4SLinus Torvalds 4661da177e4SLinus Torvalds /* 4671da177e4SLinus Torvalds * xfs_trans_apply_sb_deltas() is called from the commit code 4681da177e4SLinus Torvalds * to bring the superblock buffer into the current transaction 4691da177e4SLinus Torvalds * and modify it as requested by earlier calls to xfs_trans_mod_sb(). 4701da177e4SLinus Torvalds * 4711da177e4SLinus Torvalds * For now we just look at each field allowed to change and change 4721da177e4SLinus Torvalds * it if necessary. 4731da177e4SLinus Torvalds */ 4741da177e4SLinus Torvalds STATIC void 4751da177e4SLinus Torvalds xfs_trans_apply_sb_deltas( 4761da177e4SLinus Torvalds xfs_trans_t *tp) 4771da177e4SLinus Torvalds { 478ed67ebfdSChristoph Hellwig struct xfs_dsb *sbp; 479e8222613SDave Chinner struct xfs_buf *bp; 4801da177e4SLinus Torvalds int whole = 0; 4811da177e4SLinus Torvalds 482cead0b10SChristoph Hellwig bp = xfs_trans_getsb(tp); 4833e6e8afdSChristoph Hellwig sbp = bp->b_addr; 4841da177e4SLinus Torvalds 4851da177e4SLinus Torvalds /* 48692821e2bSDavid Chinner * Only update the superblock counters if we are logging them 48792821e2bSDavid Chinner */ 48838c26bfdSDave Chinner if (!xfs_has_lazysbcount((tp->t_mountp))) { 4892bdf7cd0SChristoph Hellwig if (tp->t_icount_delta) 490413d57c9SMarcin Slusarz be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta); 4912bdf7cd0SChristoph Hellwig if (tp->t_ifree_delta) 492413d57c9SMarcin Slusarz be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta); 4932bdf7cd0SChristoph Hellwig if (tp->t_fdblocks_delta) 494413d57c9SMarcin Slusarz be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta); 4952bdf7cd0SChristoph Hellwig if (tp->t_res_fdblocks_delta) 496413d57c9SMarcin Slusarz be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta); 4971da177e4SLinus Torvalds } 4981da177e4SLinus Torvalds 4992229276cSDarrick J. Wong /* 5002229276cSDarrick J. Wong * Updating frextents requires careful handling because it does not 5012229276cSDarrick J. Wong * behave like the lazysb counters because we cannot rely on log 5022229276cSDarrick J. Wong * recovery in older kenels to recompute the value from the rtbitmap. 5032229276cSDarrick J. Wong * This means that the ondisk frextents must be consistent with the 5042229276cSDarrick J. Wong * rtbitmap. 5052229276cSDarrick J. Wong * 5062229276cSDarrick J. Wong * Therefore, log the frextents change to the ondisk superblock and 5072229276cSDarrick J. Wong * update the incore superblock so that future calls to xfs_log_sb 5082229276cSDarrick J. Wong * write the correct value ondisk. 5092229276cSDarrick J. Wong * 5102229276cSDarrick J. Wong * Don't touch m_frextents because it includes incore reservations, 5112229276cSDarrick J. Wong * and those are handled by the unreserve function. 5122229276cSDarrick J. Wong */ 5132229276cSDarrick J. Wong if (tp->t_frextents_delta || tp->t_res_frextents_delta) { 5142229276cSDarrick J. Wong struct xfs_mount *mp = tp->t_mountp; 5152229276cSDarrick J. Wong int64_t rtxdelta; 5162229276cSDarrick J. Wong 5172229276cSDarrick J. Wong rtxdelta = tp->t_frextents_delta + tp->t_res_frextents_delta; 5182229276cSDarrick J. Wong 5192229276cSDarrick J. Wong spin_lock(&mp->m_sb_lock); 5202229276cSDarrick J. Wong be64_add_cpu(&sbp->sb_frextents, rtxdelta); 5212229276cSDarrick J. Wong mp->m_sb.sb_frextents += rtxdelta; 5222229276cSDarrick J. Wong spin_unlock(&mp->m_sb_lock); 5232229276cSDarrick J. Wong } 5241da177e4SLinus Torvalds 5252bdf7cd0SChristoph Hellwig if (tp->t_dblocks_delta) { 526413d57c9SMarcin Slusarz be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta); 5271da177e4SLinus Torvalds whole = 1; 5281da177e4SLinus Torvalds } 5292bdf7cd0SChristoph Hellwig if (tp->t_agcount_delta) { 530413d57c9SMarcin Slusarz be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta); 5311da177e4SLinus Torvalds whole = 1; 5321da177e4SLinus Torvalds } 5332bdf7cd0SChristoph Hellwig if (tp->t_imaxpct_delta) { 5342bdf7cd0SChristoph Hellwig sbp->sb_imax_pct += tp->t_imaxpct_delta; 5351da177e4SLinus Torvalds whole = 1; 5361da177e4SLinus Torvalds } 5372bdf7cd0SChristoph Hellwig if (tp->t_rextsize_delta) { 538413d57c9SMarcin Slusarz be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta); 5391da177e4SLinus Torvalds whole = 1; 5401da177e4SLinus Torvalds } 5412bdf7cd0SChristoph Hellwig if (tp->t_rbmblocks_delta) { 542413d57c9SMarcin Slusarz be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta); 5431da177e4SLinus Torvalds whole = 1; 5441da177e4SLinus Torvalds } 5452bdf7cd0SChristoph Hellwig if (tp->t_rblocks_delta) { 546413d57c9SMarcin Slusarz be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta); 5471da177e4SLinus Torvalds whole = 1; 5481da177e4SLinus Torvalds } 5492bdf7cd0SChristoph Hellwig if (tp->t_rextents_delta) { 550413d57c9SMarcin Slusarz be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta); 5511da177e4SLinus Torvalds whole = 1; 5521da177e4SLinus Torvalds } 5532bdf7cd0SChristoph Hellwig if (tp->t_rextslog_delta) { 5542bdf7cd0SChristoph Hellwig sbp->sb_rextslog += tp->t_rextslog_delta; 5551da177e4SLinus Torvalds whole = 1; 5561da177e4SLinus Torvalds } 5571da177e4SLinus Torvalds 5583443a3bcSDave Chinner xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF); 5591da177e4SLinus Torvalds if (whole) 5601da177e4SLinus Torvalds /* 561c41564b5SNathan Scott * Log the whole thing, the fields are noncontiguous. 5621da177e4SLinus Torvalds */ 563ed67ebfdSChristoph Hellwig xfs_trans_log_buf(tp, bp, 0, sizeof(struct xfs_dsb) - 1); 5641da177e4SLinus Torvalds else 5651da177e4SLinus Torvalds /* 5661da177e4SLinus Torvalds * Since all the modifiable fields are contiguous, we 5671da177e4SLinus Torvalds * can get away with this. 5681da177e4SLinus Torvalds */ 569ed67ebfdSChristoph Hellwig xfs_trans_log_buf(tp, bp, offsetof(struct xfs_dsb, sb_icount), 570ed67ebfdSChristoph Hellwig offsetof(struct xfs_dsb, sb_frextents) + 5711da177e4SLinus Torvalds sizeof(sbp->sb_frextents) - 1); 5721da177e4SLinus Torvalds } 5731da177e4SLinus Torvalds 5741da177e4SLinus Torvalds /* 575dc3ffbb1SDave Chinner * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations and 576dc3ffbb1SDave Chinner * apply superblock counter changes to the in-core superblock. The 57745c34141SDavid Chinner * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT 57845c34141SDavid Chinner * applied to the in-core superblock. The idea is that that has already been 57945c34141SDavid Chinner * done. 5801da177e4SLinus Torvalds * 58145c34141SDavid Chinner * If we are not logging superblock counters, then the inode allocated/free and 58245c34141SDavid Chinner * used block counts are not updated in the on disk superblock. In this case, 58345c34141SDavid Chinner * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we 58445c34141SDavid Chinner * still need to update the incore superblock with the changes. 585f18c9a90SDave Chinner * 586f18c9a90SDave Chinner * Deltas for the inode count are +/-64, hence we use a large batch size of 128 587f18c9a90SDave Chinner * so we don't need to take the counter lock on every update. 5881da177e4SLinus Torvalds */ 589f18c9a90SDave Chinner #define XFS_ICOUNT_BATCH 128 590f18c9a90SDave Chinner 59171e330b5SDave Chinner void 5921da177e4SLinus Torvalds xfs_trans_unreserve_and_mod_sb( 5930bd5ddedSDave Chinner struct xfs_trans *tp) 5941da177e4SLinus Torvalds { 5950bd5ddedSDave Chinner struct xfs_mount *mp = tp->t_mountp; 5960d485adaSDave Chinner bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; 59745c34141SDavid Chinner int64_t blkdelta = 0; 59845c34141SDavid Chinner int64_t rtxdelta = 0; 5991b040712SChristoph Hellwig int64_t idelta = 0; 6001b040712SChristoph Hellwig int64_t ifreedelta = 0; 6010bd5ddedSDave Chinner int error; 6021da177e4SLinus Torvalds 6031b040712SChristoph Hellwig /* calculate deltas */ 60445c34141SDavid Chinner if (tp->t_blk_res > 0) 60545c34141SDavid Chinner blkdelta = tp->t_blk_res; 60645c34141SDavid Chinner if ((tp->t_fdblocks_delta != 0) && 60738c26bfdSDave Chinner (xfs_has_lazysbcount(mp) || 60845c34141SDavid Chinner (tp->t_flags & XFS_TRANS_SB_DIRTY))) 60945c34141SDavid Chinner blkdelta += tp->t_fdblocks_delta; 61045c34141SDavid Chinner 61145c34141SDavid Chinner if (tp->t_rtx_res > 0) 61245c34141SDavid Chinner rtxdelta = tp->t_rtx_res; 61345c34141SDavid Chinner if ((tp->t_frextents_delta != 0) && 61445c34141SDavid Chinner (tp->t_flags & XFS_TRANS_SB_DIRTY)) 61545c34141SDavid Chinner rtxdelta += tp->t_frextents_delta; 61645c34141SDavid Chinner 61738c26bfdSDave Chinner if (xfs_has_lazysbcount(mp) || 6181b040712SChristoph Hellwig (tp->t_flags & XFS_TRANS_SB_DIRTY)) { 6191b040712SChristoph Hellwig idelta = tp->t_icount_delta; 6201b040712SChristoph Hellwig ifreedelta = tp->t_ifree_delta; 6211b040712SChristoph Hellwig } 6221b040712SChristoph Hellwig 6231b040712SChristoph Hellwig /* apply the per-cpu counters */ 6241b040712SChristoph Hellwig if (blkdelta) { 6250d485adaSDave Chinner error = xfs_mod_fdblocks(mp, blkdelta, rsvd); 626dc3ffbb1SDave Chinner ASSERT(!error); 6271b040712SChristoph Hellwig } 6281b040712SChristoph Hellwig 6295825bea0SDave Chinner if (idelta) 630f18c9a90SDave Chinner percpu_counter_add_batch(&mp->m_icount, idelta, 631f18c9a90SDave Chinner XFS_ICOUNT_BATCH); 6321b040712SChristoph Hellwig 6335825bea0SDave Chinner if (ifreedelta) 634f18c9a90SDave Chinner percpu_counter_add(&mp->m_ifree, ifreedelta); 6351b040712SChristoph Hellwig 6362229276cSDarrick J. Wong if (rtxdelta) { 6372229276cSDarrick J. Wong error = xfs_mod_frextents(mp, rtxdelta); 6382229276cSDarrick J. Wong ASSERT(!error); 6392229276cSDarrick J. Wong } 6402229276cSDarrick J. Wong 6412229276cSDarrick J. Wong if (!(tp->t_flags & XFS_TRANS_SB_DIRTY)) 6420bd5ddedSDave Chinner return; 6430bd5ddedSDave Chinner 6441b040712SChristoph Hellwig /* apply remaining deltas */ 6450bd5ddedSDave Chinner spin_lock(&mp->m_sb_lock); 6466543990aSDave Chinner mp->m_sb.sb_fdblocks += tp->t_fdblocks_delta + tp->t_res_fdblocks_delta; 6476543990aSDave Chinner mp->m_sb.sb_icount += idelta; 6486543990aSDave Chinner mp->m_sb.sb_ifree += ifreedelta; 6492229276cSDarrick J. Wong /* 6502229276cSDarrick J. Wong * Do not touch sb_frextents here because we are dealing with incore 6512229276cSDarrick J. Wong * reservation. sb_frextents is not part of the lazy sb counters so it 6522229276cSDarrick J. Wong * must be consistent with the ondisk rtbitmap and must never include 6532229276cSDarrick J. Wong * incore reservations. 6542229276cSDarrick J. Wong */ 655dc3ffbb1SDave Chinner mp->m_sb.sb_dblocks += tp->t_dblocks_delta; 656dc3ffbb1SDave Chinner mp->m_sb.sb_agcount += tp->t_agcount_delta; 657dc3ffbb1SDave Chinner mp->m_sb.sb_imax_pct += tp->t_imaxpct_delta; 658dc3ffbb1SDave Chinner mp->m_sb.sb_rextsize += tp->t_rextsize_delta; 659ef5a83b7SDarrick J. Wong if (tp->t_rextsize_delta) { 660ef5a83b7SDarrick J. Wong mp->m_rtxblklog = log2_if_power2(mp->m_sb.sb_rextsize); 661ef5a83b7SDarrick J. Wong mp->m_rtxblkmask = mask64_if_power2(mp->m_sb.sb_rextsize); 662ef5a83b7SDarrick J. Wong } 663dc3ffbb1SDave Chinner mp->m_sb.sb_rbmblocks += tp->t_rbmblocks_delta; 664dc3ffbb1SDave Chinner mp->m_sb.sb_rblocks += tp->t_rblocks_delta; 665dc3ffbb1SDave Chinner mp->m_sb.sb_rextents += tp->t_rextents_delta; 666dc3ffbb1SDave Chinner mp->m_sb.sb_rextslog += tp->t_rextslog_delta; 6670bd5ddedSDave Chinner spin_unlock(&mp->m_sb_lock); 6681b040712SChristoph Hellwig 669dc3ffbb1SDave Chinner /* 670dc3ffbb1SDave Chinner * Debug checks outside of the spinlock so they don't lock up the 671dc3ffbb1SDave Chinner * machine if they fail. 672dc3ffbb1SDave Chinner */ 673dc3ffbb1SDave Chinner ASSERT(mp->m_sb.sb_imax_pct >= 0); 674dc3ffbb1SDave Chinner ASSERT(mp->m_sb.sb_rextslog >= 0); 6751b040712SChristoph Hellwig return; 6761da177e4SLinus Torvalds } 6771da177e4SLinus Torvalds 678e6631f85SDave Chinner /* Add the given log item to the transaction's list of log items. */ 679e98c414fSChristoph Hellwig void 680e98c414fSChristoph Hellwig xfs_trans_add_item( 681e98c414fSChristoph Hellwig struct xfs_trans *tp, 682e98c414fSChristoph Hellwig struct xfs_log_item *lip) 683e98c414fSChristoph Hellwig { 684d86142ddSDave Chinner ASSERT(lip->li_log == tp->t_mountp->m_log); 685f65020a8SJesper Juhl ASSERT(lip->li_ailp == tp->t_mountp->m_ail); 686e6631f85SDave Chinner ASSERT(list_empty(&lip->li_trans)); 687e6631f85SDave Chinner ASSERT(!test_bit(XFS_LI_DIRTY, &lip->li_flags)); 688e98c414fSChristoph Hellwig 689e6631f85SDave Chinner list_add_tail(&lip->li_trans, &tp->t_items); 690ba18781bSDave Chinner trace_xfs_trans_add_item(tp, _RET_IP_); 691e98c414fSChristoph Hellwig } 692e98c414fSChristoph Hellwig 693e98c414fSChristoph Hellwig /* 694e6631f85SDave Chinner * Unlink the log item from the transaction. the log item is no longer 695e6631f85SDave Chinner * considered dirty in this transaction, as the linked transaction has 696e6631f85SDave Chinner * finished, either by abort or commit completion. 697e98c414fSChristoph Hellwig */ 698e98c414fSChristoph Hellwig void 699e98c414fSChristoph Hellwig xfs_trans_del_item( 700e98c414fSChristoph Hellwig struct xfs_log_item *lip) 701e98c414fSChristoph Hellwig { 702e6631f85SDave Chinner clear_bit(XFS_LI_DIRTY, &lip->li_flags); 703e6631f85SDave Chinner list_del_init(&lip->li_trans); 704e98c414fSChristoph Hellwig } 705e98c414fSChristoph Hellwig 706e6631f85SDave Chinner /* Detach and unlock all of the items in a transaction */ 707195cd83dSChristoph Hellwig static void 708e98c414fSChristoph Hellwig xfs_trans_free_items( 709e98c414fSChristoph Hellwig struct xfs_trans *tp, 710eacb24e7SChristoph Hellwig bool abort) 711e98c414fSChristoph Hellwig { 712e6631f85SDave Chinner struct xfs_log_item *lip, *next; 713e98c414fSChristoph Hellwig 714ba18781bSDave Chinner trace_xfs_trans_free_items(tp, _RET_IP_); 715ba18781bSDave Chinner 716e6631f85SDave Chinner list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) { 717e6631f85SDave Chinner xfs_trans_del_item(lip); 718eacb24e7SChristoph Hellwig if (abort) 71922525c17SDave Chinner set_bit(XFS_LI_ABORTED, &lip->li_flags); 720ddf92053SChristoph Hellwig if (lip->li_ops->iop_release) 721ddf92053SChristoph Hellwig lip->li_ops->iop_release(lip); 722e98c414fSChristoph Hellwig } 723e98c414fSChristoph Hellwig } 724e98c414fSChristoph Hellwig 7250e57f6a3SDave Chinner static inline void 7260e57f6a3SDave Chinner xfs_log_item_batch_insert( 7270e57f6a3SDave Chinner struct xfs_ail *ailp, 7281d8c95a3SDave Chinner struct xfs_ail_cursor *cur, 7290e57f6a3SDave Chinner struct xfs_log_item **log_items, 7300e57f6a3SDave Chinner int nr_items, 7310e57f6a3SDave Chinner xfs_lsn_t commit_lsn) 7320e57f6a3SDave Chinner { 7330e57f6a3SDave Chinner int i; 7340e57f6a3SDave Chinner 73557e80956SMatthew Wilcox spin_lock(&ailp->ail_lock); 73657e80956SMatthew Wilcox /* xfs_trans_ail_update_bulk drops ailp->ail_lock */ 7371d8c95a3SDave Chinner xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn); 7380e57f6a3SDave Chinner 739904c17e6SDave Chinner for (i = 0; i < nr_items; i++) { 740904c17e6SDave Chinner struct xfs_log_item *lip = log_items[i]; 741904c17e6SDave Chinner 742e8b78db7SChristoph Hellwig if (lip->li_ops->iop_unpin) 743904c17e6SDave Chinner lip->li_ops->iop_unpin(lip, 0); 744904c17e6SDave Chinner } 7450e57f6a3SDave Chinner } 7460e57f6a3SDave Chinner 7470e57f6a3SDave Chinner /* 7480e57f6a3SDave Chinner * Bulk operation version of xfs_trans_committed that takes a log vector of 7490e57f6a3SDave Chinner * items to insert into the AIL. This uses bulk AIL insertion techniques to 7500e57f6a3SDave Chinner * minimise lock traffic. 751e34a314cSDave Chinner * 752e34a314cSDave Chinner * If we are called with the aborted flag set, it is because a log write during 753e34a314cSDave Chinner * a CIL checkpoint commit has failed. In this case, all the items in the 754ddf92053SChristoph Hellwig * checkpoint have already gone through iop_committed and iop_committing, which 755e34a314cSDave Chinner * means that checkpoint commit abort handling is treated exactly the same 756e34a314cSDave Chinner * as an iclog write error even though we haven't started any IO yet. Hence in 757904c17e6SDave Chinner * this case all we need to do is iop_committed processing, followed by an 758904c17e6SDave Chinner * iop_unpin(aborted) call. 7591d8c95a3SDave Chinner * 7601d8c95a3SDave Chinner * The AIL cursor is used to optimise the insert process. If commit_lsn is not 7611d8c95a3SDave Chinner * at the end of the AIL, the insert cursor avoids the need to walk 7621d8c95a3SDave Chinner * the AIL to find the insertion point on every xfs_log_item_batch_insert() 7631d8c95a3SDave Chinner * call. This saves a lot of needless list walking and is a net win, even 7641d8c95a3SDave Chinner * though it slightly increases that amount of AIL lock traffic to set it up 7651d8c95a3SDave Chinner * and tear it down. 7660e57f6a3SDave Chinner */ 7670e57f6a3SDave Chinner void 7680e57f6a3SDave Chinner xfs_trans_committed_bulk( 7690e57f6a3SDave Chinner struct xfs_ail *ailp, 77016924853SDave Chinner struct list_head *lv_chain, 7710e57f6a3SDave Chinner xfs_lsn_t commit_lsn, 772d15cbf2fSChristoph Hellwig bool aborted) 7730e57f6a3SDave Chinner { 7740e57f6a3SDave Chinner #define LOG_ITEM_BATCH_SIZE 32 7750e57f6a3SDave Chinner struct xfs_log_item *log_items[LOG_ITEM_BATCH_SIZE]; 7760e57f6a3SDave Chinner struct xfs_log_vec *lv; 7771d8c95a3SDave Chinner struct xfs_ail_cursor cur; 7780e57f6a3SDave Chinner int i = 0; 7790e57f6a3SDave Chinner 78057e80956SMatthew Wilcox spin_lock(&ailp->ail_lock); 7811d8c95a3SDave Chinner xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn); 78257e80956SMatthew Wilcox spin_unlock(&ailp->ail_lock); 7831d8c95a3SDave Chinner 7840e57f6a3SDave Chinner /* unpin all the log items */ 78516924853SDave Chinner list_for_each_entry(lv, lv_chain, lv_list) { 7860e57f6a3SDave Chinner struct xfs_log_item *lip = lv->lv_item; 7870e57f6a3SDave Chinner xfs_lsn_t item_lsn; 7880e57f6a3SDave Chinner 7890e57f6a3SDave Chinner if (aborted) 79022525c17SDave Chinner set_bit(XFS_LI_ABORTED, &lip->li_flags); 7919ce632a2SChristoph Hellwig 7929ce632a2SChristoph Hellwig if (lip->li_ops->flags & XFS_ITEM_RELEASE_WHEN_COMMITTED) { 7939ce632a2SChristoph Hellwig lip->li_ops->iop_release(lip); 7949ce632a2SChristoph Hellwig continue; 7959ce632a2SChristoph Hellwig } 7969ce632a2SChristoph Hellwig 797e8b78db7SChristoph Hellwig if (lip->li_ops->iop_committed) 798904c17e6SDave Chinner item_lsn = lip->li_ops->iop_committed(lip, commit_lsn); 799e8b78db7SChristoph Hellwig else 800e8b78db7SChristoph Hellwig item_lsn = commit_lsn; 8010e57f6a3SDave Chinner 8021316d4daSDave Chinner /* item_lsn of -1 means the item needs no further processing */ 8030e57f6a3SDave Chinner if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0) 8040e57f6a3SDave Chinner continue; 8050e57f6a3SDave Chinner 806e34a314cSDave Chinner /* 807e34a314cSDave Chinner * if we are aborting the operation, no point in inserting the 808e34a314cSDave Chinner * object into the AIL as we are in a shutdown situation. 809e34a314cSDave Chinner */ 810e34a314cSDave Chinner if (aborted) { 8118eda8721SDave Chinner ASSERT(xlog_is_shutdown(ailp->ail_log)); 812e8b78db7SChristoph Hellwig if (lip->li_ops->iop_unpin) 813904c17e6SDave Chinner lip->li_ops->iop_unpin(lip, 1); 814e34a314cSDave Chinner continue; 815e34a314cSDave Chinner } 816e34a314cSDave Chinner 8170e57f6a3SDave Chinner if (item_lsn != commit_lsn) { 8180e57f6a3SDave Chinner 8190e57f6a3SDave Chinner /* 8200e57f6a3SDave Chinner * Not a bulk update option due to unusual item_lsn. 8210e57f6a3SDave Chinner * Push into AIL immediately, rechecking the lsn once 8221d8c95a3SDave Chinner * we have the ail lock. Then unpin the item. This does 8231d8c95a3SDave Chinner * not affect the AIL cursor the bulk insert path is 8241d8c95a3SDave Chinner * using. 8250e57f6a3SDave Chinner */ 82657e80956SMatthew Wilcox spin_lock(&ailp->ail_lock); 8270e57f6a3SDave Chinner if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) 8280e57f6a3SDave Chinner xfs_trans_ail_update(ailp, lip, item_lsn); 8290e57f6a3SDave Chinner else 83057e80956SMatthew Wilcox spin_unlock(&ailp->ail_lock); 831e8b78db7SChristoph Hellwig if (lip->li_ops->iop_unpin) 832904c17e6SDave Chinner lip->li_ops->iop_unpin(lip, 0); 8330e57f6a3SDave Chinner continue; 8340e57f6a3SDave Chinner } 8350e57f6a3SDave Chinner 8360e57f6a3SDave Chinner /* Item is a candidate for bulk AIL insert. */ 8370e57f6a3SDave Chinner log_items[i++] = lv->lv_item; 8380e57f6a3SDave Chinner if (i >= LOG_ITEM_BATCH_SIZE) { 8391d8c95a3SDave Chinner xfs_log_item_batch_insert(ailp, &cur, log_items, 8400e57f6a3SDave Chinner LOG_ITEM_BATCH_SIZE, commit_lsn); 8410e57f6a3SDave Chinner i = 0; 8420e57f6a3SDave Chinner } 8430e57f6a3SDave Chinner } 8440e57f6a3SDave Chinner 8450e57f6a3SDave Chinner /* make sure we insert the remainder! */ 8460e57f6a3SDave Chinner if (i) 8471d8c95a3SDave Chinner xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn); 8481d8c95a3SDave Chinner 84957e80956SMatthew Wilcox spin_lock(&ailp->ail_lock); 850e4a1e29cSEric Sandeen xfs_trans_ail_cursor_done(&cur); 85157e80956SMatthew Wilcox spin_unlock(&ailp->ail_lock); 8520e57f6a3SDave Chinner } 8530e57f6a3SDave Chinner 854b1c1b5b6SDave Chinner /* 855fad743d7SDave Chinner * Sort transaction items prior to running precommit operations. This will 856fad743d7SDave Chinner * attempt to order the items such that they will always be locked in the same 857fad743d7SDave Chinner * order. Items that have no sort function are moved to the end of the list 858fad743d7SDave Chinner * and so are locked last. 859fad743d7SDave Chinner * 860fad743d7SDave Chinner * This may need refinement as different types of objects add sort functions. 861fad743d7SDave Chinner * 862fad743d7SDave Chinner * Function is more complex than it needs to be because we are comparing 64 bit 863fad743d7SDave Chinner * values and the function only returns 32 bit values. 864fad743d7SDave Chinner */ 865fad743d7SDave Chinner static int 866fad743d7SDave Chinner xfs_trans_precommit_sort( 867fad743d7SDave Chinner void *unused_arg, 868fad743d7SDave Chinner const struct list_head *a, 869fad743d7SDave Chinner const struct list_head *b) 870fad743d7SDave Chinner { 871fad743d7SDave Chinner struct xfs_log_item *lia = container_of(a, 872fad743d7SDave Chinner struct xfs_log_item, li_trans); 873fad743d7SDave Chinner struct xfs_log_item *lib = container_of(b, 874fad743d7SDave Chinner struct xfs_log_item, li_trans); 875fad743d7SDave Chinner int64_t diff; 876fad743d7SDave Chinner 877fad743d7SDave Chinner /* 878fad743d7SDave Chinner * If both items are non-sortable, leave them alone. If only one is 879fad743d7SDave Chinner * sortable, move the non-sortable item towards the end of the list. 880fad743d7SDave Chinner */ 881fad743d7SDave Chinner if (!lia->li_ops->iop_sort && !lib->li_ops->iop_sort) 882fad743d7SDave Chinner return 0; 883fad743d7SDave Chinner if (!lia->li_ops->iop_sort) 884fad743d7SDave Chinner return 1; 885fad743d7SDave Chinner if (!lib->li_ops->iop_sort) 886fad743d7SDave Chinner return -1; 887fad743d7SDave Chinner 888fad743d7SDave Chinner diff = lia->li_ops->iop_sort(lia) - lib->li_ops->iop_sort(lib); 889fad743d7SDave Chinner if (diff < 0) 890fad743d7SDave Chinner return -1; 891fad743d7SDave Chinner if (diff > 0) 892fad743d7SDave Chinner return 1; 893fad743d7SDave Chinner return 0; 894fad743d7SDave Chinner } 895fad743d7SDave Chinner 896fad743d7SDave Chinner /* 897fad743d7SDave Chinner * Run transaction precommit functions. 898fad743d7SDave Chinner * 899fad743d7SDave Chinner * If there is an error in any of the callouts, then stop immediately and 900fad743d7SDave Chinner * trigger a shutdown to abort the transaction. There is no recovery possible 901fad743d7SDave Chinner * from errors at this point as the transaction is dirty.... 902fad743d7SDave Chinner */ 903fad743d7SDave Chinner static int 904fad743d7SDave Chinner xfs_trans_run_precommits( 905fad743d7SDave Chinner struct xfs_trans *tp) 906fad743d7SDave Chinner { 907fad743d7SDave Chinner struct xfs_mount *mp = tp->t_mountp; 908fad743d7SDave Chinner struct xfs_log_item *lip, *n; 909fad743d7SDave Chinner int error = 0; 910fad743d7SDave Chinner 911fad743d7SDave Chinner /* 912fad743d7SDave Chinner * Sort the item list to avoid ABBA deadlocks with other transactions 913fad743d7SDave Chinner * running precommit operations that lock multiple shared items such as 914fad743d7SDave Chinner * inode cluster buffers. 915fad743d7SDave Chinner */ 916fad743d7SDave Chinner list_sort(NULL, &tp->t_items, xfs_trans_precommit_sort); 917fad743d7SDave Chinner 918fad743d7SDave Chinner /* 919fad743d7SDave Chinner * Precommit operations can remove the log item from the transaction 920fad743d7SDave Chinner * if the log item exists purely to delay modifications until they 921fad743d7SDave Chinner * can be ordered against other operations. Hence we have to use 922fad743d7SDave Chinner * list_for_each_entry_safe() here. 923fad743d7SDave Chinner */ 924fad743d7SDave Chinner list_for_each_entry_safe(lip, n, &tp->t_items, li_trans) { 925fad743d7SDave Chinner if (!test_bit(XFS_LI_DIRTY, &lip->li_flags)) 926fad743d7SDave Chinner continue; 927fad743d7SDave Chinner if (lip->li_ops->iop_precommit) { 928fad743d7SDave Chinner error = lip->li_ops->iop_precommit(tp, lip); 929fad743d7SDave Chinner if (error) 930fad743d7SDave Chinner break; 931fad743d7SDave Chinner } 932fad743d7SDave Chinner } 933fad743d7SDave Chinner if (error) 934fad743d7SDave Chinner xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 935fad743d7SDave Chinner return error; 936fad743d7SDave Chinner } 937fad743d7SDave Chinner 938fad743d7SDave Chinner /* 939b1037058SChristoph Hellwig * Commit the given transaction to the log. 9400924378aSDave Chinner * 9410924378aSDave Chinner * XFS disk error handling mechanism is not based on a typical 9420924378aSDave Chinner * transaction abort mechanism. Logically after the filesystem 9430924378aSDave Chinner * gets marked 'SHUTDOWN', we can't let any new transactions 9440924378aSDave Chinner * be durable - ie. committed to disk - because some metadata might 9450924378aSDave Chinner * be inconsistent. In such cases, this returns an error, and the 9460924378aSDave Chinner * caller may assume that all locked objects joined to the transaction 9470924378aSDave Chinner * have already been unlocked as if the commit had succeeded. 9480924378aSDave Chinner * Do not reference the transaction structure after this call. 9490924378aSDave Chinner */ 95070393313SChristoph Hellwig static int 95170393313SChristoph Hellwig __xfs_trans_commit( 952a3ccd2caSChristoph Hellwig struct xfs_trans *tp, 95370393313SChristoph Hellwig bool regrant) 9540924378aSDave Chinner { 955a3ccd2caSChristoph Hellwig struct xfs_mount *mp = tp->t_mountp; 9563c4cb76bSDave Chinner struct xlog *log = mp->m_log; 9575f9b4b0dSDave Chinner xfs_csn_t commit_seq = 0; 958a3ccd2caSChristoph Hellwig int error = 0; 9590924378aSDave Chinner int sync = tp->t_flags & XFS_TRANS_SYNC; 9600924378aSDave Chinner 961ba18781bSDave Chinner trace_xfs_trans_commit(tp, _RET_IP_); 962ba18781bSDave Chinner 963fad743d7SDave Chinner error = xfs_trans_run_precommits(tp); 964fad743d7SDave Chinner if (error) { 965fad743d7SDave Chinner if (tp->t_flags & XFS_TRANS_PERM_LOG_RES) 966fad743d7SDave Chinner xfs_defer_cancel(tp); 967fad743d7SDave Chinner goto out_unreserve; 968fad743d7SDave Chinner } 969fad743d7SDave Chinner 97098719051SBrian Foster /* 97198719051SBrian Foster * Finish deferred items on final commit. Only permanent transactions 97298719051SBrian Foster * should ever have deferred ops. 97398719051SBrian Foster */ 9749d9e6233SBrian Foster WARN_ON_ONCE(!list_empty(&tp->t_dfops) && 97598719051SBrian Foster !(tp->t_flags & XFS_TRANS_PERM_LOG_RES)); 97698719051SBrian Foster if (!regrant && (tp->t_flags & XFS_TRANS_PERM_LOG_RES)) { 977b277c37fSBrian Foster error = xfs_defer_finish_noroll(&tp); 9789b1f4e98SBrian Foster if (error) 979e021a2e5SBrian Foster goto out_unreserve; 980cb042117SDave Chinner 981cb042117SDave Chinner /* Run precommits from final tx in defer chain. */ 982cb042117SDave Chinner error = xfs_trans_run_precommits(tp); 983cb042117SDave Chinner if (error) 984cb042117SDave Chinner goto out_unreserve; 985e021a2e5SBrian Foster } 986e021a2e5SBrian Foster 9870924378aSDave Chinner /* 9880924378aSDave Chinner * If there is nothing to be logged by the transaction, 9890924378aSDave Chinner * then unlock all of the items associated with the 9900924378aSDave Chinner * transaction and free the transaction structure. 9910924378aSDave Chinner * Also make sure to return any reserved blocks to 9920924378aSDave Chinner * the free pool. 9930924378aSDave Chinner */ 994a3ccd2caSChristoph Hellwig if (!(tp->t_flags & XFS_TRANS_DIRTY)) 995a3ccd2caSChristoph Hellwig goto out_unreserve; 996a3ccd2caSChristoph Hellwig 9973c4cb76bSDave Chinner /* 9983c4cb76bSDave Chinner * We must check against log shutdown here because we cannot abort log 9993c4cb76bSDave Chinner * items and leave them dirty, inconsistent and unpinned in memory while 10003c4cb76bSDave Chinner * the log is active. This leaves them open to being written back to 10013c4cb76bSDave Chinner * disk, and that will lead to on-disk corruption. 10023c4cb76bSDave Chinner */ 10033c4cb76bSDave Chinner if (xlog_is_shutdown(log)) { 10042451337dSDave Chinner error = -EIO; 1005a3ccd2caSChristoph Hellwig goto out_unreserve; 10060924378aSDave Chinner } 1007a3ccd2caSChristoph Hellwig 10080924378aSDave Chinner ASSERT(tp->t_ticket != NULL); 10090924378aSDave Chinner 10100924378aSDave Chinner /* 10110924378aSDave Chinner * If we need to update the superblock, then do it now. 10120924378aSDave Chinner */ 10130924378aSDave Chinner if (tp->t_flags & XFS_TRANS_SB_DIRTY) 10140924378aSDave Chinner xfs_trans_apply_sb_deltas(tp); 10150924378aSDave Chinner xfs_trans_apply_dquot_deltas(tp); 10160924378aSDave Chinner 10173c4cb76bSDave Chinner xlog_cil_commit(log, tp, &commit_seq, regrant); 10181da177e4SLinus Torvalds 10190244b960SChristoph Hellwig xfs_trans_free(tp); 10200244b960SChristoph Hellwig 10211da177e4SLinus Torvalds /* 10221da177e4SLinus Torvalds * If the transaction needs to be synchronous, then force the 10231da177e4SLinus Torvalds * log out now and wait for it. 10241da177e4SLinus Torvalds */ 10251da177e4SLinus Torvalds if (sync) { 10265f9b4b0dSDave Chinner error = xfs_log_force_seq(mp, commit_seq, XFS_LOG_SYNC, NULL); 1027ff6d6af2SBill O'Donnell XFS_STATS_INC(mp, xs_trans_sync); 10281da177e4SLinus Torvalds } else { 1029ff6d6af2SBill O'Donnell XFS_STATS_INC(mp, xs_trans_async); 10301da177e4SLinus Torvalds } 10311da177e4SLinus Torvalds 1032a3ccd2caSChristoph Hellwig return error; 1033a3ccd2caSChristoph Hellwig 1034a3ccd2caSChristoph Hellwig out_unreserve: 1035a3ccd2caSChristoph Hellwig xfs_trans_unreserve_and_mod_sb(tp); 1036a3ccd2caSChristoph Hellwig 1037a3ccd2caSChristoph Hellwig /* 1038a3ccd2caSChristoph Hellwig * It is indeed possible for the transaction to be not dirty but 1039a3ccd2caSChristoph Hellwig * the dqinfo portion to be. All that means is that we have some 1040a3ccd2caSChristoph Hellwig * (non-persistent) quota reservations that need to be unreserved. 1041a3ccd2caSChristoph Hellwig */ 1042a3ccd2caSChristoph Hellwig xfs_trans_unreserve_and_mod_dquots(tp); 1043a3ccd2caSChristoph Hellwig if (tp->t_ticket) { 10443c4cb76bSDave Chinner if (regrant && !xlog_is_shutdown(log)) 10453c4cb76bSDave Chinner xfs_log_ticket_regrant(log, tp->t_ticket); 10468b41e3f9SChristoph Hellwig else 10473c4cb76bSDave Chinner xfs_log_ticket_ungrant(log, tp->t_ticket); 1048ba18781bSDave Chinner tp->t_ticket = NULL; 1049a3ccd2caSChristoph Hellwig } 1050195cd83dSChristoph Hellwig xfs_trans_free_items(tp, !!error); 1051a3ccd2caSChristoph Hellwig xfs_trans_free(tp); 1052a3ccd2caSChristoph Hellwig 1053ff6d6af2SBill O'Donnell XFS_STATS_INC(mp, xs_trans_empty); 1054a3ccd2caSChristoph Hellwig return error; 10551da177e4SLinus Torvalds } 10561da177e4SLinus Torvalds 105770393313SChristoph Hellwig int 105870393313SChristoph Hellwig xfs_trans_commit( 105970393313SChristoph Hellwig struct xfs_trans *tp) 106070393313SChristoph Hellwig { 106170393313SChristoph Hellwig return __xfs_trans_commit(tp, false); 106270393313SChristoph Hellwig } 106370393313SChristoph Hellwig 10641da177e4SLinus Torvalds /* 10653c4cb76bSDave Chinner * Unlock all of the transaction's items and free the transaction. If the 10663c4cb76bSDave Chinner * transaction is dirty, we must shut down the filesystem because there is no 10673c4cb76bSDave Chinner * way to restore them to their previous state. 10681da177e4SLinus Torvalds * 10693c4cb76bSDave Chinner * If the transaction has made a log reservation, make sure to release it as 10703c4cb76bSDave Chinner * well. 10713c4cb76bSDave Chinner * 10723c4cb76bSDave Chinner * This is a high level function (equivalent to xfs_trans_commit()) and so can 10733c4cb76bSDave Chinner * be called after the transaction has effectively been aborted due to the mount 10743c4cb76bSDave Chinner * being shut down. However, if the mount has not been shut down and the 10753c4cb76bSDave Chinner * transaction is dirty we will shut the mount down and, in doing so, that 10763c4cb76bSDave Chinner * guarantees that the log is shut down, too. Hence we don't need to be as 10773c4cb76bSDave Chinner * careful with shutdown state and dirty items here as we need to be in 10783c4cb76bSDave Chinner * xfs_trans_commit(). 10791da177e4SLinus Torvalds */ 10801da177e4SLinus Torvalds void 10811da177e4SLinus Torvalds xfs_trans_cancel( 10824906e215SChristoph Hellwig struct xfs_trans *tp) 10831da177e4SLinus Torvalds { 10844906e215SChristoph Hellwig struct xfs_mount *mp = tp->t_mountp; 10853c4cb76bSDave Chinner struct xlog *log = mp->m_log; 10864906e215SChristoph Hellwig bool dirty = (tp->t_flags & XFS_TRANS_DIRTY); 10871da177e4SLinus Torvalds 1088ba18781bSDave Chinner trace_xfs_trans_cancel(tp, _RET_IP_); 1089ba18781bSDave Chinner 109047a6df7cSDarrick J. Wong /* 109147a6df7cSDarrick J. Wong * It's never valid to cancel a transaction with deferred ops attached, 109247a6df7cSDarrick J. Wong * because the transaction is effectively dirty. Complain about this 109355d5c3a3SDave Chinner * loudly before freeing the in-memory defer items and shutting down the 109455d5c3a3SDave Chinner * filesystem. 109547a6df7cSDarrick J. Wong */ 109647a6df7cSDarrick J. Wong if (!list_empty(&tp->t_dfops)) { 109747a6df7cSDarrick J. Wong ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); 109847a6df7cSDarrick J. Wong dirty = true; 10999e28a242SBrian Foster xfs_defer_cancel(tp); 110047a6df7cSDarrick J. Wong } 1101e021a2e5SBrian Foster 11021da177e4SLinus Torvalds /* 11033c4cb76bSDave Chinner * See if the caller is relying on us to shut down the filesystem. We 11043c4cb76bSDave Chinner * only want an error report if there isn't already a shutdown in 11053c4cb76bSDave Chinner * progress, so we only need to check against the mount shutdown state 11063c4cb76bSDave Chinner * here. 11071da177e4SLinus Torvalds */ 110875c8c50fSDave Chinner if (dirty && !xfs_is_shutdown(mp)) { 11090733af21SRyan Hankins XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp); 11107d04a335SNathan Scott xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 111160a204f0SNathan Scott } 11121da177e4SLinus Torvalds #ifdef DEBUG 11133c4cb76bSDave Chinner /* Log items need to be consistent until the log is shut down. */ 11143c4cb76bSDave Chinner if (!dirty && !xlog_is_shutdown(log)) { 1115e6631f85SDave Chinner struct xfs_log_item *lip; 11161da177e4SLinus Torvalds 1117e6631f85SDave Chinner list_for_each_entry(lip, &tp->t_items, li_trans) 1118d6b8fc6cSKaixu Xia ASSERT(!xlog_item_is_intent_done(lip)); 11191da177e4SLinus Torvalds } 11201da177e4SLinus Torvalds #endif 11211da177e4SLinus Torvalds xfs_trans_unreserve_and_mod_sb(tp); 11227d095257SChristoph Hellwig xfs_trans_unreserve_and_mod_dquots(tp); 11231da177e4SLinus Torvalds 1124ba18781bSDave Chinner if (tp->t_ticket) { 11253c4cb76bSDave Chinner xfs_log_ticket_ungrant(log, tp->t_ticket); 1126ba18781bSDave Chinner tp->t_ticket = NULL; 1127ba18781bSDave Chinner } 11281da177e4SLinus Torvalds 1129195cd83dSChristoph Hellwig xfs_trans_free_items(tp, dirty); 11301da177e4SLinus Torvalds xfs_trans_free(tp); 11311da177e4SLinus Torvalds } 11321da177e4SLinus Torvalds 1133322ff6b8SNiv Sardi /* 1134322ff6b8SNiv Sardi * Roll from one trans in the sequence of PERMANENT transactions to 1135322ff6b8SNiv Sardi * the next: permanent transactions are only flushed out when 113670393313SChristoph Hellwig * committed with xfs_trans_commit(), but we still want as soon 1137322ff6b8SNiv Sardi * as possible to let chunks of it go to the log. So we commit the 1138322ff6b8SNiv Sardi * chunk we've been working on and get a new transaction to continue. 1139322ff6b8SNiv Sardi */ 1140322ff6b8SNiv Sardi int 1141254133f5SChristoph Hellwig xfs_trans_roll( 1142411350dfSChristoph Hellwig struct xfs_trans **tpp) 1143322ff6b8SNiv Sardi { 1144411350dfSChristoph Hellwig struct xfs_trans *trans = *tpp; 11453d3c8b52SJie Liu struct xfs_trans_res tres; 1146322ff6b8SNiv Sardi int error; 1147322ff6b8SNiv Sardi 1148ba18781bSDave Chinner trace_xfs_trans_roll(trans, _RET_IP_); 1149ba18781bSDave Chinner 1150322ff6b8SNiv Sardi /* 1151322ff6b8SNiv Sardi * Copy the critical parameters from one trans to the next. 1152322ff6b8SNiv Sardi */ 11533d3c8b52SJie Liu tres.tr_logres = trans->t_log_res; 11543d3c8b52SJie Liu tres.tr_logcount = trans->t_log_count; 1155411350dfSChristoph Hellwig 1156322ff6b8SNiv Sardi *tpp = xfs_trans_dup(trans); 1157322ff6b8SNiv Sardi 1158322ff6b8SNiv Sardi /* 1159322ff6b8SNiv Sardi * Commit the current transaction. 1160322ff6b8SNiv Sardi * If this commit failed, then it'd just unlock those items that 1161322ff6b8SNiv Sardi * are not marked ihold. That also means that a filesystem shutdown 1162322ff6b8SNiv Sardi * is in progress. The caller takes the responsibility to cancel 1163322ff6b8SNiv Sardi * the duplicate transaction that gets returned. 1164322ff6b8SNiv Sardi */ 116570393313SChristoph Hellwig error = __xfs_trans_commit(trans, true); 1166322ff6b8SNiv Sardi if (error) 1167d99831ffSEric Sandeen return error; 1168322ff6b8SNiv Sardi 1169322ff6b8SNiv Sardi /* 1170411350dfSChristoph Hellwig * Reserve space in the log for the next transaction. 1171322ff6b8SNiv Sardi * This also pushes items in the "AIL", the list of logged items, 1172322ff6b8SNiv Sardi * out to disk if they are taking up space at the tail of the log 1173322ff6b8SNiv Sardi * that we want to use. This requires that either nothing be locked 1174322ff6b8SNiv Sardi * across this call, or that anything that is locked be logged in 1175322ff6b8SNiv Sardi * the prior and the next transactions. 1176322ff6b8SNiv Sardi */ 11773d3c8b52SJie Liu tres.tr_logflags = XFS_TRANS_PERM_LOG_RES; 1178411350dfSChristoph Hellwig return xfs_trans_reserve(*tpp, &tres, 0, 0); 1179322ff6b8SNiv Sardi } 11803a1af6c3SDarrick J. Wong 11813a1af6c3SDarrick J. Wong /* 11823a1af6c3SDarrick J. Wong * Allocate an transaction, lock and join the inode to it, and reserve quota. 11833a1af6c3SDarrick J. Wong * 11843a1af6c3SDarrick J. Wong * The caller must ensure that the on-disk dquots attached to this inode have 11853a1af6c3SDarrick J. Wong * already been allocated and initialized. The caller is responsible for 11863a1af6c3SDarrick J. Wong * releasing ILOCK_EXCL if a new transaction is returned. 11873a1af6c3SDarrick J. Wong */ 11883a1af6c3SDarrick J. Wong int 11893a1af6c3SDarrick J. Wong xfs_trans_alloc_inode( 11903a1af6c3SDarrick J. Wong struct xfs_inode *ip, 11913a1af6c3SDarrick J. Wong struct xfs_trans_res *resv, 11923a1af6c3SDarrick J. Wong unsigned int dblocks, 11933de4eb10SDarrick J. Wong unsigned int rblocks, 11943a1af6c3SDarrick J. Wong bool force, 11953a1af6c3SDarrick J. Wong struct xfs_trans **tpp) 11963a1af6c3SDarrick J. Wong { 11973a1af6c3SDarrick J. Wong struct xfs_trans *tp; 11983a1af6c3SDarrick J. Wong struct xfs_mount *mp = ip->i_mount; 1199766aabd5SDarrick J. Wong bool retried = false; 12003a1af6c3SDarrick J. Wong int error; 12013a1af6c3SDarrick J. Wong 1202766aabd5SDarrick J. Wong retry: 12033de4eb10SDarrick J. Wong error = xfs_trans_alloc(mp, resv, dblocks, 12042c2b981bSDarrick J. Wong xfs_extlen_to_rtxlen(mp, rblocks), 12053a1af6c3SDarrick J. Wong force ? XFS_TRANS_RESERVE : 0, &tp); 12063a1af6c3SDarrick J. Wong if (error) 12073a1af6c3SDarrick J. Wong return error; 12083a1af6c3SDarrick J. Wong 12093a1af6c3SDarrick J. Wong xfs_ilock(ip, XFS_ILOCK_EXCL); 12103a1af6c3SDarrick J. Wong xfs_trans_ijoin(tp, ip, 0); 12113a1af6c3SDarrick J. Wong 12123a1af6c3SDarrick J. Wong error = xfs_qm_dqattach_locked(ip, false); 12133a1af6c3SDarrick J. Wong if (error) { 12143a1af6c3SDarrick J. Wong /* Caller should have allocated the dquots! */ 12153a1af6c3SDarrick J. Wong ASSERT(error != -ENOENT); 12163a1af6c3SDarrick J. Wong goto out_cancel; 12173a1af6c3SDarrick J. Wong } 12183a1af6c3SDarrick J. Wong 12193de4eb10SDarrick J. Wong error = xfs_trans_reserve_quota_nblks(tp, ip, dblocks, rblocks, force); 1220766aabd5SDarrick J. Wong if ((error == -EDQUOT || error == -ENOSPC) && !retried) { 1221766aabd5SDarrick J. Wong xfs_trans_cancel(tp); 1222766aabd5SDarrick J. Wong xfs_iunlock(ip, XFS_ILOCK_EXCL); 1223766aabd5SDarrick J. Wong xfs_blockgc_free_quota(ip, 0); 1224766aabd5SDarrick J. Wong retried = true; 1225766aabd5SDarrick J. Wong goto retry; 1226766aabd5SDarrick J. Wong } 12273a1af6c3SDarrick J. Wong if (error) 12283a1af6c3SDarrick J. Wong goto out_cancel; 12293a1af6c3SDarrick J. Wong 12303a1af6c3SDarrick J. Wong *tpp = tp; 12313a1af6c3SDarrick J. Wong return 0; 12323a1af6c3SDarrick J. Wong 12333a1af6c3SDarrick J. Wong out_cancel: 12343a1af6c3SDarrick J. Wong xfs_trans_cancel(tp); 12353a1af6c3SDarrick J. Wong xfs_iunlock(ip, XFS_ILOCK_EXCL); 12363a1af6c3SDarrick J. Wong return error; 12373a1af6c3SDarrick J. Wong } 1238f2f7b9ffSDarrick J. Wong 1239f2f7b9ffSDarrick J. Wong /* 12408f71bedeSDarrick J. Wong * Try to reserve more blocks for a transaction. 12418f71bedeSDarrick J. Wong * 12428f71bedeSDarrick J. Wong * This is for callers that need to attach resources to a transaction, scan 12438f71bedeSDarrick J. Wong * those resources to determine the space reservation requirements, and then 12448f71bedeSDarrick J. Wong * modify the attached resources. In other words, online repair. This can 12458f71bedeSDarrick J. Wong * fail due to ENOSPC, so the caller must be able to cancel the transaction 12468f71bedeSDarrick J. Wong * without shutting down the fs. 12478f71bedeSDarrick J. Wong */ 12488f71bedeSDarrick J. Wong int 12498f71bedeSDarrick J. Wong xfs_trans_reserve_more( 12508f71bedeSDarrick J. Wong struct xfs_trans *tp, 12518f71bedeSDarrick J. Wong unsigned int blocks, 12528f71bedeSDarrick J. Wong unsigned int rtextents) 12538f71bedeSDarrick J. Wong { 12548f71bedeSDarrick J. Wong struct xfs_trans_res resv = { }; 12558f71bedeSDarrick J. Wong 12568f71bedeSDarrick J. Wong return xfs_trans_reserve(tp, &resv, blocks, rtextents); 12578f71bedeSDarrick J. Wong } 12588f71bedeSDarrick J. Wong 12598f71bedeSDarrick J. Wong /* 12608f71bedeSDarrick J. Wong * Try to reserve more blocks and file quota for a transaction. Same 12618f71bedeSDarrick J. Wong * conditions of usage as xfs_trans_reserve_more. 12628f71bedeSDarrick J. Wong */ 12638f71bedeSDarrick J. Wong int 12648f71bedeSDarrick J. Wong xfs_trans_reserve_more_inode( 12658f71bedeSDarrick J. Wong struct xfs_trans *tp, 12668f71bedeSDarrick J. Wong struct xfs_inode *ip, 12678f71bedeSDarrick J. Wong unsigned int dblocks, 12688f71bedeSDarrick J. Wong unsigned int rblocks, 12698f71bedeSDarrick J. Wong bool force_quota) 12708f71bedeSDarrick J. Wong { 12718f71bedeSDarrick J. Wong struct xfs_trans_res resv = { }; 12728f71bedeSDarrick J. Wong struct xfs_mount *mp = ip->i_mount; 12738f71bedeSDarrick J. Wong unsigned int rtx = xfs_extlen_to_rtxlen(mp, rblocks); 12748f71bedeSDarrick J. Wong int error; 12758f71bedeSDarrick J. Wong 12763fed24ffSMatthew Wilcox (Oracle) xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); 12778f71bedeSDarrick J. Wong 12788f71bedeSDarrick J. Wong error = xfs_trans_reserve(tp, &resv, dblocks, rtx); 12798f71bedeSDarrick J. Wong if (error) 12808f71bedeSDarrick J. Wong return error; 12818f71bedeSDarrick J. Wong 12828f71bedeSDarrick J. Wong if (!XFS_IS_QUOTA_ON(mp) || xfs_is_quota_inode(&mp->m_sb, ip->i_ino)) 12838f71bedeSDarrick J. Wong return 0; 12848f71bedeSDarrick J. Wong 12858f71bedeSDarrick J. Wong if (tp->t_flags & XFS_TRANS_RESERVE) 12868f71bedeSDarrick J. Wong force_quota = true; 12878f71bedeSDarrick J. Wong 12888f71bedeSDarrick J. Wong error = xfs_trans_reserve_quota_nblks(tp, ip, dblocks, rblocks, 12898f71bedeSDarrick J. Wong force_quota); 12908f71bedeSDarrick J. Wong if (!error) 12918f71bedeSDarrick J. Wong return 0; 12928f71bedeSDarrick J. Wong 12938f71bedeSDarrick J. Wong /* Quota failed, give back the new reservation. */ 12948f71bedeSDarrick J. Wong xfs_mod_fdblocks(mp, dblocks, tp->t_flags & XFS_TRANS_RESERVE); 12958f71bedeSDarrick J. Wong tp->t_blk_res -= dblocks; 12968f71bedeSDarrick J. Wong xfs_mod_frextents(mp, rtx); 12978f71bedeSDarrick J. Wong tp->t_rtx_res -= rtx; 12988f71bedeSDarrick J. Wong return error; 12998f71bedeSDarrick J. Wong } 13008f71bedeSDarrick J. Wong 13018f71bedeSDarrick J. Wong /* 1302f2f7b9ffSDarrick J. Wong * Allocate an transaction in preparation for inode creation by reserving quota 1303f2f7b9ffSDarrick J. Wong * against the given dquots. Callers are not required to hold any inode locks. 1304f2f7b9ffSDarrick J. Wong */ 1305f2f7b9ffSDarrick J. Wong int 1306f2f7b9ffSDarrick J. Wong xfs_trans_alloc_icreate( 1307f2f7b9ffSDarrick J. Wong struct xfs_mount *mp, 1308f2f7b9ffSDarrick J. Wong struct xfs_trans_res *resv, 1309f2f7b9ffSDarrick J. Wong struct xfs_dquot *udqp, 1310f2f7b9ffSDarrick J. Wong struct xfs_dquot *gdqp, 1311f2f7b9ffSDarrick J. Wong struct xfs_dquot *pdqp, 1312f2f7b9ffSDarrick J. Wong unsigned int dblocks, 1313f2f7b9ffSDarrick J. Wong struct xfs_trans **tpp) 1314f2f7b9ffSDarrick J. Wong { 1315f2f7b9ffSDarrick J. Wong struct xfs_trans *tp; 1316c237dd7cSDarrick J. Wong bool retried = false; 1317f2f7b9ffSDarrick J. Wong int error; 1318f2f7b9ffSDarrick J. Wong 1319c237dd7cSDarrick J. Wong retry: 1320f2f7b9ffSDarrick J. Wong error = xfs_trans_alloc(mp, resv, dblocks, 0, 0, &tp); 1321f2f7b9ffSDarrick J. Wong if (error) 1322f2f7b9ffSDarrick J. Wong return error; 1323f2f7b9ffSDarrick J. Wong 1324f2f7b9ffSDarrick J. Wong error = xfs_trans_reserve_quota_icreate(tp, udqp, gdqp, pdqp, dblocks); 1325c237dd7cSDarrick J. Wong if ((error == -EDQUOT || error == -ENOSPC) && !retried) { 1326c237dd7cSDarrick J. Wong xfs_trans_cancel(tp); 1327c237dd7cSDarrick J. Wong xfs_blockgc_free_dquots(mp, udqp, gdqp, pdqp, 0); 1328c237dd7cSDarrick J. Wong retried = true; 1329c237dd7cSDarrick J. Wong goto retry; 1330c237dd7cSDarrick J. Wong } 1331f2f7b9ffSDarrick J. Wong if (error) { 1332f2f7b9ffSDarrick J. Wong xfs_trans_cancel(tp); 1333f2f7b9ffSDarrick J. Wong return error; 1334f2f7b9ffSDarrick J. Wong } 1335f2f7b9ffSDarrick J. Wong 1336f2f7b9ffSDarrick J. Wong *tpp = tp; 1337f2f7b9ffSDarrick J. Wong return 0; 1338f2f7b9ffSDarrick J. Wong } 13397317a03dSDarrick J. Wong 13407317a03dSDarrick J. Wong /* 13417317a03dSDarrick J. Wong * Allocate an transaction, lock and join the inode to it, and reserve quota 13427317a03dSDarrick J. Wong * in preparation for inode attribute changes that include uid, gid, or prid 13437317a03dSDarrick J. Wong * changes. 13447317a03dSDarrick J. Wong * 13457317a03dSDarrick J. Wong * The caller must ensure that the on-disk dquots attached to this inode have 13467317a03dSDarrick J. Wong * already been allocated and initialized. The ILOCK will be dropped when the 13477317a03dSDarrick J. Wong * transaction is committed or cancelled. 13487317a03dSDarrick J. Wong */ 13497317a03dSDarrick J. Wong int 13507317a03dSDarrick J. Wong xfs_trans_alloc_ichange( 13517317a03dSDarrick J. Wong struct xfs_inode *ip, 1352758303d1SDarrick J. Wong struct xfs_dquot *new_udqp, 1353758303d1SDarrick J. Wong struct xfs_dquot *new_gdqp, 1354758303d1SDarrick J. Wong struct xfs_dquot *new_pdqp, 13557317a03dSDarrick J. Wong bool force, 13567317a03dSDarrick J. Wong struct xfs_trans **tpp) 13577317a03dSDarrick J. Wong { 13587317a03dSDarrick J. Wong struct xfs_trans *tp; 13597317a03dSDarrick J. Wong struct xfs_mount *mp = ip->i_mount; 1360758303d1SDarrick J. Wong struct xfs_dquot *udqp; 1361758303d1SDarrick J. Wong struct xfs_dquot *gdqp; 1362758303d1SDarrick J. Wong struct xfs_dquot *pdqp; 1363758303d1SDarrick J. Wong bool retried = false; 13647317a03dSDarrick J. Wong int error; 13657317a03dSDarrick J. Wong 1366758303d1SDarrick J. Wong retry: 13677317a03dSDarrick J. Wong error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp); 13687317a03dSDarrick J. Wong if (error) 13697317a03dSDarrick J. Wong return error; 13707317a03dSDarrick J. Wong 13717317a03dSDarrick J. Wong xfs_ilock(ip, XFS_ILOCK_EXCL); 13727317a03dSDarrick J. Wong xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 13737317a03dSDarrick J. Wong 13747317a03dSDarrick J. Wong error = xfs_qm_dqattach_locked(ip, false); 13757317a03dSDarrick J. Wong if (error) { 13767317a03dSDarrick J. Wong /* Caller should have allocated the dquots! */ 13777317a03dSDarrick J. Wong ASSERT(error != -ENOENT); 13787317a03dSDarrick J. Wong goto out_cancel; 13797317a03dSDarrick J. Wong } 13807317a03dSDarrick J. Wong 13817317a03dSDarrick J. Wong /* 13827317a03dSDarrick J. Wong * For each quota type, skip quota reservations if the inode's dquots 13837317a03dSDarrick J. Wong * now match the ones that came from the caller, or the caller didn't 1384758303d1SDarrick J. Wong * pass one in. The inode's dquots can change if we drop the ILOCK to 1385758303d1SDarrick J. Wong * perform a blockgc scan, so we must preserve the caller's arguments. 13867317a03dSDarrick J. Wong */ 1387758303d1SDarrick J. Wong udqp = (new_udqp != ip->i_udquot) ? new_udqp : NULL; 1388758303d1SDarrick J. Wong gdqp = (new_gdqp != ip->i_gdquot) ? new_gdqp : NULL; 1389758303d1SDarrick J. Wong pdqp = (new_pdqp != ip->i_pdquot) ? new_pdqp : NULL; 13907317a03dSDarrick J. Wong if (udqp || gdqp || pdqp) { 13915c615f0fSDarrick J. Wong unsigned int qflags = XFS_QMOPT_RES_REGBLKS; 13925c615f0fSDarrick J. Wong 13935c615f0fSDarrick J. Wong if (force) 13945c615f0fSDarrick J. Wong qflags |= XFS_QMOPT_FORCE_RES; 13955c615f0fSDarrick J. Wong 13965c615f0fSDarrick J. Wong /* 13975c615f0fSDarrick J. Wong * Reserve enough quota to handle blocks on disk and reserved 13985c615f0fSDarrick J. Wong * for a delayed allocation. We'll actually transfer the 13995c615f0fSDarrick J. Wong * delalloc reservation between dquots at chown time, even 14005c615f0fSDarrick J. Wong * though that part is only semi-transactional. 14015c615f0fSDarrick J. Wong */ 14025c615f0fSDarrick J. Wong error = xfs_trans_reserve_quota_bydquots(tp, mp, udqp, gdqp, 14036e73a545SChristoph Hellwig pdqp, ip->i_nblocks + ip->i_delayed_blks, 14045c615f0fSDarrick J. Wong 1, qflags); 1405758303d1SDarrick J. Wong if ((error == -EDQUOT || error == -ENOSPC) && !retried) { 1406758303d1SDarrick J. Wong xfs_trans_cancel(tp); 1407758303d1SDarrick J. Wong xfs_blockgc_free_dquots(mp, udqp, gdqp, pdqp, 0); 1408758303d1SDarrick J. Wong retried = true; 1409758303d1SDarrick J. Wong goto retry; 1410758303d1SDarrick J. Wong } 14117317a03dSDarrick J. Wong if (error) 14127317a03dSDarrick J. Wong goto out_cancel; 14137317a03dSDarrick J. Wong } 14147317a03dSDarrick J. Wong 14157317a03dSDarrick J. Wong *tpp = tp; 14167317a03dSDarrick J. Wong return 0; 14177317a03dSDarrick J. Wong 14187317a03dSDarrick J. Wong out_cancel: 14197317a03dSDarrick J. Wong xfs_trans_cancel(tp); 14207317a03dSDarrick J. Wong return error; 14217317a03dSDarrick J. Wong } 1422871b9316SDarrick J. Wong 1423871b9316SDarrick J. Wong /* 1424871b9316SDarrick J. Wong * Allocate an transaction, lock and join the directory and child inodes to it, 1425871b9316SDarrick J. Wong * and reserve quota for a directory update. If there isn't sufficient space, 1426871b9316SDarrick J. Wong * @dblocks will be set to zero for a reservationless directory update and 1427871b9316SDarrick J. Wong * @nospace_error will be set to a negative errno describing the space 1428871b9316SDarrick J. Wong * constraint we hit. 1429871b9316SDarrick J. Wong * 1430871b9316SDarrick J. Wong * The caller must ensure that the on-disk dquots attached to this inode have 1431871b9316SDarrick J. Wong * already been allocated and initialized. The ILOCKs will be dropped when the 1432871b9316SDarrick J. Wong * transaction is committed or cancelled. 1433*bd556211SAllison Henderson * 1434*bd556211SAllison Henderson * Caller is responsible for unlocking the inodes manually upon return 1435871b9316SDarrick J. Wong */ 1436871b9316SDarrick J. Wong int 1437871b9316SDarrick J. Wong xfs_trans_alloc_dir( 1438871b9316SDarrick J. Wong struct xfs_inode *dp, 1439871b9316SDarrick J. Wong struct xfs_trans_res *resv, 1440871b9316SDarrick J. Wong struct xfs_inode *ip, 1441871b9316SDarrick J. Wong unsigned int *dblocks, 1442871b9316SDarrick J. Wong struct xfs_trans **tpp, 1443871b9316SDarrick J. Wong int *nospace_error) 1444871b9316SDarrick J. Wong { 1445871b9316SDarrick J. Wong struct xfs_trans *tp; 1446871b9316SDarrick J. Wong struct xfs_mount *mp = ip->i_mount; 1447871b9316SDarrick J. Wong unsigned int resblks; 1448871b9316SDarrick J. Wong bool retried = false; 1449871b9316SDarrick J. Wong int error; 1450871b9316SDarrick J. Wong 1451871b9316SDarrick J. Wong retry: 1452871b9316SDarrick J. Wong *nospace_error = 0; 1453871b9316SDarrick J. Wong resblks = *dblocks; 1454871b9316SDarrick J. Wong error = xfs_trans_alloc(mp, resv, resblks, 0, 0, &tp); 1455871b9316SDarrick J. Wong if (error == -ENOSPC) { 1456871b9316SDarrick J. Wong *nospace_error = error; 1457871b9316SDarrick J. Wong resblks = 0; 1458871b9316SDarrick J. Wong error = xfs_trans_alloc(mp, resv, resblks, 0, 0, &tp); 1459871b9316SDarrick J. Wong } 1460871b9316SDarrick J. Wong if (error) 1461871b9316SDarrick J. Wong return error; 1462871b9316SDarrick J. Wong 1463871b9316SDarrick J. Wong xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL); 1464871b9316SDarrick J. Wong 1465*bd556211SAllison Henderson xfs_trans_ijoin(tp, dp, 0); 1466*bd556211SAllison Henderson xfs_trans_ijoin(tp, ip, 0); 1467871b9316SDarrick J. Wong 1468871b9316SDarrick J. Wong error = xfs_qm_dqattach_locked(dp, false); 1469871b9316SDarrick J. Wong if (error) { 1470871b9316SDarrick J. Wong /* Caller should have allocated the dquots! */ 1471871b9316SDarrick J. Wong ASSERT(error != -ENOENT); 1472871b9316SDarrick J. Wong goto out_cancel; 1473871b9316SDarrick J. Wong } 1474871b9316SDarrick J. Wong 1475871b9316SDarrick J. Wong error = xfs_qm_dqattach_locked(ip, false); 1476871b9316SDarrick J. Wong if (error) { 1477871b9316SDarrick J. Wong /* Caller should have allocated the dquots! */ 1478871b9316SDarrick J. Wong ASSERT(error != -ENOENT); 1479871b9316SDarrick J. Wong goto out_cancel; 1480871b9316SDarrick J. Wong } 1481871b9316SDarrick J. Wong 1482871b9316SDarrick J. Wong if (resblks == 0) 1483871b9316SDarrick J. Wong goto done; 1484871b9316SDarrick J. Wong 1485871b9316SDarrick J. Wong error = xfs_trans_reserve_quota_nblks(tp, dp, resblks, 0, false); 1486871b9316SDarrick J. Wong if (error == -EDQUOT || error == -ENOSPC) { 1487871b9316SDarrick J. Wong if (!retried) { 1488871b9316SDarrick J. Wong xfs_trans_cancel(tp); 1489*bd556211SAllison Henderson xfs_iunlock(dp, XFS_ILOCK_EXCL); 1490*bd556211SAllison Henderson if (dp != ip) 1491*bd556211SAllison Henderson xfs_iunlock(ip, XFS_ILOCK_EXCL); 1492871b9316SDarrick J. Wong xfs_blockgc_free_quota(dp, 0); 1493871b9316SDarrick J. Wong retried = true; 1494871b9316SDarrick J. Wong goto retry; 1495871b9316SDarrick J. Wong } 1496871b9316SDarrick J. Wong 1497871b9316SDarrick J. Wong *nospace_error = error; 1498871b9316SDarrick J. Wong resblks = 0; 1499871b9316SDarrick J. Wong error = 0; 1500871b9316SDarrick J. Wong } 1501871b9316SDarrick J. Wong if (error) 1502871b9316SDarrick J. Wong goto out_cancel; 1503871b9316SDarrick J. Wong 1504871b9316SDarrick J. Wong done: 1505871b9316SDarrick J. Wong *tpp = tp; 1506871b9316SDarrick J. Wong *dblocks = resblks; 1507871b9316SDarrick J. Wong return 0; 1508871b9316SDarrick J. Wong 1509871b9316SDarrick J. Wong out_cancel: 1510871b9316SDarrick J. Wong xfs_trans_cancel(tp); 1511871b9316SDarrick J. Wong return error; 1512871b9316SDarrick J. Wong } 1513