10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0 21da177e4SLinus Torvalds /* 37b718769SNathan Scott * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. 47b718769SNathan Scott * All Rights Reserved. 51da177e4SLinus Torvalds */ 61da177e4SLinus Torvalds #include "xfs.h" 7a844f451SNathan Scott #include "xfs_fs.h" 870a9883cSDave Chinner #include "xfs_shared.h" 9a4fbe6abSDave Chinner #include "xfs_format.h" 10239880efSDave Chinner #include "xfs_log_format.h" 11239880efSDave Chinner #include "xfs_trans_resv.h" 121da177e4SLinus Torvalds #include "xfs_mount.h" 13239880efSDave Chinner #include "xfs_trans.h" 14a844f451SNathan Scott #include "xfs_buf_item.h" 151da177e4SLinus Torvalds #include "xfs_trans_priv.h" 160b1b213fSChristoph Hellwig #include "xfs_trace.h" 171da177e4SLinus Torvalds 184a5224d7SChristoph Hellwig /* 194a5224d7SChristoph Hellwig * Check to see if a buffer matching the given parameters is already 204a5224d7SChristoph Hellwig * a part of the given transaction. 214a5224d7SChristoph Hellwig */ 224a5224d7SChristoph Hellwig STATIC struct xfs_buf * 234a5224d7SChristoph Hellwig xfs_trans_buf_item_match( 244a5224d7SChristoph Hellwig struct xfs_trans *tp, 254a5224d7SChristoph Hellwig struct xfs_buftarg *target, 26de2a4f59SDave Chinner struct xfs_buf_map *map, 27de2a4f59SDave Chinner int nmaps) 284a5224d7SChristoph Hellwig { 29e6631f85SDave Chinner struct xfs_log_item *lip; 30e98c414fSChristoph Hellwig struct xfs_buf_log_item *blip; 31de2a4f59SDave Chinner int len = 0; 32de2a4f59SDave Chinner int i; 331da177e4SLinus Torvalds 34de2a4f59SDave Chinner for (i = 0; i < nmaps; i++) 35de2a4f59SDave Chinner len += map[i].bm_len; 36de2a4f59SDave Chinner 37e6631f85SDave Chinner list_for_each_entry(lip, &tp->t_items, li_trans) { 38e6631f85SDave Chinner blip = (struct xfs_buf_log_item *)lip; 39e98c414fSChristoph Hellwig if (blip->bli_item.li_type == XFS_LI_BUF && 4049074c06SChandra Seetharaman blip->bli_buf->b_target == target && 41de2a4f59SDave Chinner XFS_BUF_ADDR(blip->bli_buf) == map[0].bm_bn && 42de2a4f59SDave Chinner blip->bli_buf->b_length == len) { 43de2a4f59SDave Chinner ASSERT(blip->bli_buf->b_map_count == nmaps); 444a5224d7SChristoph Hellwig return blip->bli_buf; 454a5224d7SChristoph Hellwig } 46de2a4f59SDave Chinner } 474a5224d7SChristoph Hellwig 484a5224d7SChristoph Hellwig return NULL; 494a5224d7SChristoph Hellwig } 501da177e4SLinus Torvalds 51d7e84f41SChristoph Hellwig /* 52d7e84f41SChristoph Hellwig * Add the locked buffer to the transaction. 53d7e84f41SChristoph Hellwig * 54d7e84f41SChristoph Hellwig * The buffer must be locked, and it cannot be associated with any 55d7e84f41SChristoph Hellwig * transaction. 56d7e84f41SChristoph Hellwig * 57d7e84f41SChristoph Hellwig * If the buffer does not yet have a buf log item associated with it, 58d7e84f41SChristoph Hellwig * then allocate one for it. Then add the buf item to the transaction. 59d7e84f41SChristoph Hellwig */ 60d7e84f41SChristoph Hellwig STATIC void 61d7e84f41SChristoph Hellwig _xfs_trans_bjoin( 62d7e84f41SChristoph Hellwig struct xfs_trans *tp, 63d7e84f41SChristoph Hellwig struct xfs_buf *bp, 64d7e84f41SChristoph Hellwig int reset_recur) 65d7e84f41SChristoph Hellwig { 66d7e84f41SChristoph Hellwig struct xfs_buf_log_item *bip; 67d7e84f41SChristoph Hellwig 68bf9d9013SChristoph Hellwig ASSERT(bp->b_transp == NULL); 69d7e84f41SChristoph Hellwig 70d7e84f41SChristoph Hellwig /* 71fb1755a6SCarlos Maiolino * The xfs_buf_log_item pointer is stored in b_log_item. If 72d7e84f41SChristoph Hellwig * it doesn't have one yet, then allocate one and initialize it. 73d7e84f41SChristoph Hellwig * The checks to see if one is there are in xfs_buf_item_init(). 74d7e84f41SChristoph Hellwig */ 75d7e84f41SChristoph Hellwig xfs_buf_item_init(bp, tp->t_mountp); 76fb1755a6SCarlos Maiolino bip = bp->b_log_item; 77d7e84f41SChristoph Hellwig ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); 780f22f9d0SMark Tinguely ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL)); 79d7e84f41SChristoph Hellwig ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED)); 80d7e84f41SChristoph Hellwig if (reset_recur) 81d7e84f41SChristoph Hellwig bip->bli_recur = 0; 82d7e84f41SChristoph Hellwig 83d7e84f41SChristoph Hellwig /* 84d7e84f41SChristoph Hellwig * Take a reference for this transaction on the buf item. 85d7e84f41SChristoph Hellwig */ 86d7e84f41SChristoph Hellwig atomic_inc(&bip->bli_refcount); 87d7e84f41SChristoph Hellwig 88d7e84f41SChristoph Hellwig /* 89e6631f85SDave Chinner * Attach the item to the transaction so we can find it in 90e6631f85SDave Chinner * xfs_trans_get_buf() and friends. 91d7e84f41SChristoph Hellwig */ 92e98c414fSChristoph Hellwig xfs_trans_add_item(tp, &bip->bli_item); 93bf9d9013SChristoph Hellwig bp->b_transp = tp; 94d7e84f41SChristoph Hellwig 95d7e84f41SChristoph Hellwig } 96d7e84f41SChristoph Hellwig 97d7e84f41SChristoph Hellwig void 98d7e84f41SChristoph Hellwig xfs_trans_bjoin( 99d7e84f41SChristoph Hellwig struct xfs_trans *tp, 100d7e84f41SChristoph Hellwig struct xfs_buf *bp) 101d7e84f41SChristoph Hellwig { 102d7e84f41SChristoph Hellwig _xfs_trans_bjoin(tp, bp, 0); 103fb1755a6SCarlos Maiolino trace_xfs_trans_bjoin(bp->b_log_item); 104d7e84f41SChristoph Hellwig } 1051da177e4SLinus Torvalds 1061da177e4SLinus Torvalds /* 1071da177e4SLinus Torvalds * Get and lock the buffer for the caller if it is not already 1081da177e4SLinus Torvalds * locked within the given transaction. If it is already locked 1091da177e4SLinus Torvalds * within the transaction, just increment its lock recursion count 1101da177e4SLinus Torvalds * and return a pointer to it. 1111da177e4SLinus Torvalds * 1121da177e4SLinus Torvalds * If the transaction pointer is NULL, make this just a normal 1131da177e4SLinus Torvalds * get_buf() call. 1141da177e4SLinus Torvalds */ 1159676b54eSDarrick J. Wong int 116de2a4f59SDave Chinner xfs_trans_get_buf_map( 117de2a4f59SDave Chinner struct xfs_trans *tp, 118de2a4f59SDave Chinner struct xfs_buftarg *target, 119de2a4f59SDave Chinner struct xfs_buf_map *map, 120de2a4f59SDave Chinner int nmaps, 1219676b54eSDarrick J. Wong xfs_buf_flags_t flags, 1229676b54eSDarrick J. Wong struct xfs_buf **bpp) 1231da177e4SLinus Torvalds { 124*e8222613SDave Chinner struct xfs_buf *bp; 12570a20655SCarlos Maiolino struct xfs_buf_log_item *bip; 1263848b5f6SDarrick J. Wong int error; 1271da177e4SLinus Torvalds 1289676b54eSDarrick J. Wong *bpp = NULL; 1299676b54eSDarrick J. Wong if (!tp) 1309676b54eSDarrick J. Wong return xfs_buf_get_map(target, map, nmaps, flags, bpp); 1311da177e4SLinus Torvalds 1321da177e4SLinus Torvalds /* 1331da177e4SLinus Torvalds * If we find the buffer in the cache with this transaction 1341da177e4SLinus Torvalds * pointer in its b_fsprivate2 field, then we know we already 1351da177e4SLinus Torvalds * have it locked. In this case we just increment the lock 1361da177e4SLinus Torvalds * recursion count and return the buffer to the caller. 1371da177e4SLinus Torvalds */ 138de2a4f59SDave Chinner bp = xfs_trans_buf_item_match(tp, target, map, nmaps); 1391da177e4SLinus Torvalds if (bp != NULL) { 1400c842ad4SChristoph Hellwig ASSERT(xfs_buf_islocked(bp)); 141c867cb61SChristoph Hellwig if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) { 142c867cb61SChristoph Hellwig xfs_buf_stale(bp); 143b0388bf1SDave Chinner bp->b_flags |= XBF_DONE; 144c867cb61SChristoph Hellwig } 1450b1b213fSChristoph Hellwig 146bf9d9013SChristoph Hellwig ASSERT(bp->b_transp == tp); 147fb1755a6SCarlos Maiolino bip = bp->b_log_item; 1481da177e4SLinus Torvalds ASSERT(bip != NULL); 1491da177e4SLinus Torvalds ASSERT(atomic_read(&bip->bli_refcount) > 0); 1501da177e4SLinus Torvalds bip->bli_recur++; 1510b1b213fSChristoph Hellwig trace_xfs_trans_get_buf_recur(bip); 1529676b54eSDarrick J. Wong *bpp = bp; 1539676b54eSDarrick J. Wong return 0; 1541da177e4SLinus Torvalds } 1551da177e4SLinus Torvalds 1563848b5f6SDarrick J. Wong error = xfs_buf_get_map(target, map, nmaps, flags, &bp); 1573848b5f6SDarrick J. Wong if (error) 1589676b54eSDarrick J. Wong return error; 1591da177e4SLinus Torvalds 1605a52c2a5SChandra Seetharaman ASSERT(!bp->b_error); 1611da177e4SLinus Torvalds 162d7e84f41SChristoph Hellwig _xfs_trans_bjoin(tp, bp, 1); 163fb1755a6SCarlos Maiolino trace_xfs_trans_get_buf(bp->b_log_item); 1649676b54eSDarrick J. Wong *bpp = bp; 1659676b54eSDarrick J. Wong return 0; 1661da177e4SLinus Torvalds } 1671da177e4SLinus Torvalds 1681da177e4SLinus Torvalds /* 169cead0b10SChristoph Hellwig * Get and lock the superblock buffer for the given transaction. 1701da177e4SLinus Torvalds */ 171cead0b10SChristoph Hellwig struct xfs_buf * 17270a20655SCarlos Maiolino xfs_trans_getsb( 173cead0b10SChristoph Hellwig struct xfs_trans *tp) 1741da177e4SLinus Torvalds { 175cead0b10SChristoph Hellwig struct xfs_buf *bp = tp->t_mountp->m_sb_bp; 1761da177e4SLinus Torvalds 1771da177e4SLinus Torvalds /* 178cead0b10SChristoph Hellwig * Just increment the lock recursion count if the buffer is already 179cead0b10SChristoph Hellwig * attached to this transaction. 1801da177e4SLinus Torvalds */ 181bf9d9013SChristoph Hellwig if (bp->b_transp == tp) { 182cead0b10SChristoph Hellwig struct xfs_buf_log_item *bip = bp->b_log_item; 183cead0b10SChristoph Hellwig 1841da177e4SLinus Torvalds ASSERT(bip != NULL); 1851da177e4SLinus Torvalds ASSERT(atomic_read(&bip->bli_refcount) > 0); 1861da177e4SLinus Torvalds bip->bli_recur++; 187cead0b10SChristoph Hellwig 1880b1b213fSChristoph Hellwig trace_xfs_trans_getsb_recur(bip); 189cead0b10SChristoph Hellwig } else { 190cead0b10SChristoph Hellwig xfs_buf_lock(bp); 191cead0b10SChristoph Hellwig xfs_buf_hold(bp); 192cead0b10SChristoph Hellwig _xfs_trans_bjoin(tp, bp, 1); 193cead0b10SChristoph Hellwig 194cead0b10SChristoph Hellwig trace_xfs_trans_getsb(bp->b_log_item); 1951da177e4SLinus Torvalds } 1961da177e4SLinus Torvalds 197d99831ffSEric Sandeen return bp; 1981da177e4SLinus Torvalds } 1991da177e4SLinus Torvalds 2001da177e4SLinus Torvalds /* 2011da177e4SLinus Torvalds * Get and lock the buffer for the caller if it is not already 2021da177e4SLinus Torvalds * locked within the given transaction. If it has not yet been 2031da177e4SLinus Torvalds * read in, read it from disk. If it is already locked 2041da177e4SLinus Torvalds * within the transaction and already read in, just increment its 2051da177e4SLinus Torvalds * lock recursion count and return a pointer to it. 2061da177e4SLinus Torvalds * 2071da177e4SLinus Torvalds * If the transaction pointer is NULL, make this just a normal 2081da177e4SLinus Torvalds * read_buf() call. 2091da177e4SLinus Torvalds */ 2101da177e4SLinus Torvalds int 211de2a4f59SDave Chinner xfs_trans_read_buf_map( 212de2a4f59SDave Chinner struct xfs_mount *mp, 213de2a4f59SDave Chinner struct xfs_trans *tp, 214de2a4f59SDave Chinner struct xfs_buftarg *target, 215de2a4f59SDave Chinner struct xfs_buf_map *map, 216de2a4f59SDave Chinner int nmaps, 217de2a4f59SDave Chinner xfs_buf_flags_t flags, 218c3f8fc73SDave Chinner struct xfs_buf **bpp, 2191813dd64SDave Chinner const struct xfs_buf_ops *ops) 2201da177e4SLinus Torvalds { 2212d3d0c53SDave Chinner struct xfs_buf *bp = NULL; 2222d3d0c53SDave Chinner struct xfs_buf_log_item *bip; 2231da177e4SLinus Torvalds int error; 2241da177e4SLinus Torvalds 2257ca790a5SDave Chinner *bpp = NULL; 2261da177e4SLinus Torvalds /* 2271da177e4SLinus Torvalds * If we find the buffer in the cache with this transaction 2281da177e4SLinus Torvalds * pointer in its b_fsprivate2 field, then we know we already 2291da177e4SLinus Torvalds * have it locked. If it is already read in we just increment 2301da177e4SLinus Torvalds * the lock recursion count and return the buffer to the caller. 2311da177e4SLinus Torvalds * If the buffer is not yet read in, then we read it in, increment 2321da177e4SLinus Torvalds * the lock recursion count, and return it to the caller. 2331da177e4SLinus Torvalds */ 2342d3d0c53SDave Chinner if (tp) 235de2a4f59SDave Chinner bp = xfs_trans_buf_item_match(tp, target, map, nmaps); 2362d3d0c53SDave Chinner if (bp) { 2370c842ad4SChristoph Hellwig ASSERT(xfs_buf_islocked(bp)); 238bf9d9013SChristoph Hellwig ASSERT(bp->b_transp == tp); 239fb1755a6SCarlos Maiolino ASSERT(bp->b_log_item != NULL); 2405a52c2a5SChandra Seetharaman ASSERT(!bp->b_error); 2412d3d0c53SDave Chinner ASSERT(bp->b_flags & XBF_DONE); 24283a0adc3SChristoph Hellwig 2431da177e4SLinus Torvalds /* 2441da177e4SLinus Torvalds * We never locked this buf ourselves, so we shouldn't 2451da177e4SLinus Torvalds * brelse it either. Just get out. 2461da177e4SLinus Torvalds */ 2471da177e4SLinus Torvalds if (XFS_FORCED_SHUTDOWN(mp)) { 2480b1b213fSChristoph Hellwig trace_xfs_trans_read_buf_shut(bp, _RET_IP_); 2492451337dSDave Chinner return -EIO; 2501da177e4SLinus Torvalds } 2511da177e4SLinus Torvalds 2521aff5696SDarrick J. Wong /* 2531aff5696SDarrick J. Wong * Check if the caller is trying to read a buffer that is 2541aff5696SDarrick J. Wong * already attached to the transaction yet has no buffer ops 2551aff5696SDarrick J. Wong * assigned. Ops are usually attached when the buffer is 2561aff5696SDarrick J. Wong * attached to the transaction, or by the read caller if 2571aff5696SDarrick J. Wong * special circumstances. That didn't happen, which is not 2581aff5696SDarrick J. Wong * how this is supposed to go. 2591aff5696SDarrick J. Wong * 2601aff5696SDarrick J. Wong * If the buffer passes verification we'll let this go, but if 2611aff5696SDarrick J. Wong * not we have to shut down. Let the transaction cleanup code 2621aff5696SDarrick J. Wong * release this buffer when it kills the tranaction. 2631aff5696SDarrick J. Wong */ 2641aff5696SDarrick J. Wong ASSERT(bp->b_ops != NULL); 26575d02303SBrian Foster error = xfs_buf_reverify(bp, ops); 2661aff5696SDarrick J. Wong if (error) { 267cdbcf82bSDarrick J. Wong xfs_buf_ioerror_alert(bp, __return_address); 2681aff5696SDarrick J. Wong 2691aff5696SDarrick J. Wong if (tp->t_flags & XFS_TRANS_DIRTY) 2701aff5696SDarrick J. Wong xfs_force_shutdown(tp->t_mountp, 2711aff5696SDarrick J. Wong SHUTDOWN_META_IO_ERROR); 2721aff5696SDarrick J. Wong 2731aff5696SDarrick J. Wong /* bad CRC means corrupted metadata */ 2741aff5696SDarrick J. Wong if (error == -EFSBADCRC) 2751aff5696SDarrick J. Wong error = -EFSCORRUPTED; 2761aff5696SDarrick J. Wong return error; 2771aff5696SDarrick J. Wong } 2781aff5696SDarrick J. Wong 279fb1755a6SCarlos Maiolino bip = bp->b_log_item; 2801da177e4SLinus Torvalds bip->bli_recur++; 2811da177e4SLinus Torvalds 2821da177e4SLinus Torvalds ASSERT(atomic_read(&bip->bli_refcount) > 0); 2830b1b213fSChristoph Hellwig trace_xfs_trans_read_buf_recur(bip); 2841aff5696SDarrick J. Wong ASSERT(bp->b_ops != NULL || ops == NULL); 2851da177e4SLinus Torvalds *bpp = bp; 2861da177e4SLinus Torvalds return 0; 2871da177e4SLinus Torvalds } 2881da177e4SLinus Torvalds 289cdbcf82bSDarrick J. Wong error = xfs_buf_read_map(target, map, nmaps, flags, &bp, ops, 290cdbcf82bSDarrick J. Wong __return_address); 2914ed8e27bSDarrick J. Wong switch (error) { 2924ed8e27bSDarrick J. Wong case 0: 2934ed8e27bSDarrick J. Wong break; 2944ed8e27bSDarrick J. Wong default: 2952d3d0c53SDave Chinner if (tp && (tp->t_flags & XFS_TRANS_DIRTY)) 2967d04a335SNathan Scott xfs_force_shutdown(tp->t_mountp, SHUTDOWN_META_IO_ERROR); 2974ed8e27bSDarrick J. Wong /* fall through */ 2984ed8e27bSDarrick J. Wong case -ENOMEM: 2994ed8e27bSDarrick J. Wong case -EAGAIN: 3001da177e4SLinus Torvalds return error; 3011da177e4SLinus Torvalds } 3022d3d0c53SDave Chinner 3032d3d0c53SDave Chinner if (XFS_FORCED_SHUTDOWN(mp)) { 3041da177e4SLinus Torvalds xfs_buf_relse(bp); 3052d3d0c53SDave Chinner trace_xfs_trans_read_buf_shut(bp, _RET_IP_); 3062451337dSDave Chinner return -EIO; 3071da177e4SLinus Torvalds } 3081da177e4SLinus Torvalds 309e9892d3cSDave Chinner if (tp) { 310d7e84f41SChristoph Hellwig _xfs_trans_bjoin(tp, bp, 1); 311fb1755a6SCarlos Maiolino trace_xfs_trans_read_buf(bp->b_log_item); 312e9892d3cSDave Chinner } 3131aff5696SDarrick J. Wong ASSERT(bp->b_ops != NULL || ops == NULL); 3141da177e4SLinus Torvalds *bpp = bp; 3151da177e4SLinus Torvalds return 0; 3161da177e4SLinus Torvalds 3171da177e4SLinus Torvalds } 3181da177e4SLinus Torvalds 31938b6238eSDarrick J. Wong /* Has this buffer been dirtied by anyone? */ 32038b6238eSDarrick J. Wong bool 32138b6238eSDarrick J. Wong xfs_trans_buf_is_dirty( 32238b6238eSDarrick J. Wong struct xfs_buf *bp) 32338b6238eSDarrick J. Wong { 32438b6238eSDarrick J. Wong struct xfs_buf_log_item *bip = bp->b_log_item; 32538b6238eSDarrick J. Wong 32638b6238eSDarrick J. Wong if (!bip) 32738b6238eSDarrick J. Wong return false; 32838b6238eSDarrick J. Wong ASSERT(bip->bli_item.li_type == XFS_LI_BUF); 32938b6238eSDarrick J. Wong return test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags); 33038b6238eSDarrick J. Wong } 33138b6238eSDarrick J. Wong 3321da177e4SLinus Torvalds /* 33323420d05SBrian Foster * Release a buffer previously joined to the transaction. If the buffer is 33423420d05SBrian Foster * modified within this transaction, decrement the recursion count but do not 33523420d05SBrian Foster * release the buffer even if the count goes to 0. If the buffer is not modified 33623420d05SBrian Foster * within the transaction, decrement the recursion count and release the buffer 33723420d05SBrian Foster * if the recursion count goes to 0. 3381da177e4SLinus Torvalds * 33923420d05SBrian Foster * If the buffer is to be released and it was not already dirty before this 34023420d05SBrian Foster * transaction began, then also free the buf_log_item associated with it. 3411da177e4SLinus Torvalds * 34223420d05SBrian Foster * If the transaction pointer is NULL, this is a normal xfs_buf_relse() call. 3431da177e4SLinus Torvalds */ 3441da177e4SLinus Torvalds void 34570a20655SCarlos Maiolino xfs_trans_brelse( 34623420d05SBrian Foster struct xfs_trans *tp, 34723420d05SBrian Foster struct xfs_buf *bp) 3481da177e4SLinus Torvalds { 34923420d05SBrian Foster struct xfs_buf_log_item *bip = bp->b_log_item; 3501da177e4SLinus Torvalds 35123420d05SBrian Foster ASSERT(bp->b_transp == tp); 35223420d05SBrian Foster 35323420d05SBrian Foster if (!tp) { 3541da177e4SLinus Torvalds xfs_buf_relse(bp); 3551da177e4SLinus Torvalds return; 3561da177e4SLinus Torvalds } 3571da177e4SLinus Torvalds 35823420d05SBrian Foster trace_xfs_trans_brelse(bip); 3591da177e4SLinus Torvalds ASSERT(bip->bli_item.li_type == XFS_LI_BUF); 3601da177e4SLinus Torvalds ASSERT(atomic_read(&bip->bli_refcount) > 0); 3611da177e4SLinus Torvalds 3621da177e4SLinus Torvalds /* 36323420d05SBrian Foster * If the release is for a recursive lookup, then decrement the count 36423420d05SBrian Foster * and return. 3651da177e4SLinus Torvalds */ 3661da177e4SLinus Torvalds if (bip->bli_recur > 0) { 3671da177e4SLinus Torvalds bip->bli_recur--; 3681da177e4SLinus Torvalds return; 3691da177e4SLinus Torvalds } 3701da177e4SLinus Torvalds 3711da177e4SLinus Torvalds /* 37223420d05SBrian Foster * If the buffer is invalidated or dirty in this transaction, we can't 3731da177e4SLinus Torvalds * release it until we commit. 3741da177e4SLinus Torvalds */ 375e6631f85SDave Chinner if (test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags)) 3761da177e4SLinus Torvalds return; 3770b1b213fSChristoph Hellwig if (bip->bli_flags & XFS_BLI_STALE) 3781da177e4SLinus Torvalds return; 3791da177e4SLinus Torvalds 38023420d05SBrian Foster /* 38123420d05SBrian Foster * Unlink the log item from the transaction and clear the hold flag, if 38223420d05SBrian Foster * set. We wouldn't want the next user of the buffer to get confused. 38323420d05SBrian Foster */ 3841da177e4SLinus Torvalds ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED)); 385e98c414fSChristoph Hellwig xfs_trans_del_item(&bip->bli_item); 3861da177e4SLinus Torvalds bip->bli_flags &= ~XFS_BLI_HOLD; 3871da177e4SLinus Torvalds 38895808459SBrian Foster /* drop the reference to the bli */ 38995808459SBrian Foster xfs_buf_item_put(bip); 3905b03ff1bSChristoph Hellwig 391bf9d9013SChristoph Hellwig bp->b_transp = NULL; 3921da177e4SLinus Torvalds xfs_buf_relse(bp); 3931da177e4SLinus Torvalds } 3941da177e4SLinus Torvalds 3951da177e4SLinus Torvalds /* 3961da177e4SLinus Torvalds * Mark the buffer as not needing to be unlocked when the buf item's 397ddf92053SChristoph Hellwig * iop_committing() routine is called. The buffer must already be locked 3981da177e4SLinus Torvalds * and associated with the given transaction. 3991da177e4SLinus Torvalds */ 4001da177e4SLinus Torvalds /* ARGSUSED */ 4011da177e4SLinus Torvalds void 40270a20655SCarlos Maiolino xfs_trans_bhold( 40370a20655SCarlos Maiolino xfs_trans_t *tp, 404*e8222613SDave Chinner struct xfs_buf *bp) 4051da177e4SLinus Torvalds { 406fb1755a6SCarlos Maiolino struct xfs_buf_log_item *bip = bp->b_log_item; 4071da177e4SLinus Torvalds 408bf9d9013SChristoph Hellwig ASSERT(bp->b_transp == tp); 409adadbeefSChristoph Hellwig ASSERT(bip != NULL); 4101da177e4SLinus Torvalds ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); 4110f22f9d0SMark Tinguely ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL)); 4121da177e4SLinus Torvalds ASSERT(atomic_read(&bip->bli_refcount) > 0); 413adadbeefSChristoph Hellwig 4141da177e4SLinus Torvalds bip->bli_flags |= XFS_BLI_HOLD; 4150b1b213fSChristoph Hellwig trace_xfs_trans_bhold(bip); 4161da177e4SLinus Torvalds } 4171da177e4SLinus Torvalds 4181da177e4SLinus Torvalds /* 419efa092f3STim Shimmin * Cancel the previous buffer hold request made on this buffer 420efa092f3STim Shimmin * for this transaction. 421efa092f3STim Shimmin */ 422efa092f3STim Shimmin void 42370a20655SCarlos Maiolino xfs_trans_bhold_release( 42470a20655SCarlos Maiolino xfs_trans_t *tp, 425*e8222613SDave Chinner struct xfs_buf *bp) 426efa092f3STim Shimmin { 427fb1755a6SCarlos Maiolino struct xfs_buf_log_item *bip = bp->b_log_item; 428efa092f3STim Shimmin 429bf9d9013SChristoph Hellwig ASSERT(bp->b_transp == tp); 430adadbeefSChristoph Hellwig ASSERT(bip != NULL); 431efa092f3STim Shimmin ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); 4320f22f9d0SMark Tinguely ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL)); 433efa092f3STim Shimmin ASSERT(atomic_read(&bip->bli_refcount) > 0); 434efa092f3STim Shimmin ASSERT(bip->bli_flags & XFS_BLI_HOLD); 4350b1b213fSChristoph Hellwig 436adadbeefSChristoph Hellwig bip->bli_flags &= ~XFS_BLI_HOLD; 4370b1b213fSChristoph Hellwig trace_xfs_trans_bhold_release(bip); 438efa092f3STim Shimmin } 439efa092f3STim Shimmin 440efa092f3STim Shimmin /* 4419684010dSBrian Foster * Mark a buffer dirty in the transaction. 4421da177e4SLinus Torvalds */ 4431da177e4SLinus Torvalds void 4449684010dSBrian Foster xfs_trans_dirty_buf( 4459684010dSBrian Foster struct xfs_trans *tp, 4469684010dSBrian Foster struct xfs_buf *bp) 4471da177e4SLinus Torvalds { 448fb1755a6SCarlos Maiolino struct xfs_buf_log_item *bip = bp->b_log_item; 4491da177e4SLinus Torvalds 450bf9d9013SChristoph Hellwig ASSERT(bp->b_transp == tp); 451adadbeefSChristoph Hellwig ASSERT(bip != NULL); 4521da177e4SLinus Torvalds 4531da177e4SLinus Torvalds /* 4541da177e4SLinus Torvalds * Mark the buffer as needing to be written out eventually, 4551da177e4SLinus Torvalds * and set its iodone function to remove the buffer's buf log 4561da177e4SLinus Torvalds * item from the AIL and free it when the buffer is flushed 457b01d1461SDave Chinner * to disk. 4581da177e4SLinus Torvalds */ 459b0388bf1SDave Chinner bp->b_flags |= XBF_DONE; 4601da177e4SLinus Torvalds 4611da177e4SLinus Torvalds ASSERT(atomic_read(&bip->bli_refcount) > 0); 4621da177e4SLinus Torvalds 4631da177e4SLinus Torvalds /* 4641da177e4SLinus Torvalds * If we invalidated the buffer within this transaction, then 4651da177e4SLinus Torvalds * cancel the invalidation now that we're dirtying the buffer 4661da177e4SLinus Torvalds * again. There are no races with the code in xfs_buf_item_unpin(), 4671da177e4SLinus Torvalds * because we have a reference to the buffer this entire time. 4681da177e4SLinus Torvalds */ 4691da177e4SLinus Torvalds if (bip->bli_flags & XFS_BLI_STALE) { 4701da177e4SLinus Torvalds bip->bli_flags &= ~XFS_BLI_STALE; 4715cfd28b6SDave Chinner ASSERT(bp->b_flags & XBF_STALE); 4725cfd28b6SDave Chinner bp->b_flags &= ~XBF_STALE; 4730f22f9d0SMark Tinguely bip->__bli_format.blf_flags &= ~XFS_BLF_CANCEL; 4741da177e4SLinus Torvalds } 4759684010dSBrian Foster bip->bli_flags |= XFS_BLI_DIRTY | XFS_BLI_LOGGED; 4761da177e4SLinus Torvalds 4771da177e4SLinus Torvalds tp->t_flags |= XFS_TRANS_DIRTY; 478e6631f85SDave Chinner set_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags); 4799684010dSBrian Foster } 4809684010dSBrian Foster 4819684010dSBrian Foster /* 4829684010dSBrian Foster * This is called to mark bytes first through last inclusive of the given 4839684010dSBrian Foster * buffer as needing to be logged when the transaction is committed. 4849684010dSBrian Foster * The buffer must already be associated with the given transaction. 4859684010dSBrian Foster * 4869684010dSBrian Foster * First and last are numbers relative to the beginning of this buffer, 4879684010dSBrian Foster * so the first byte in the buffer is numbered 0 regardless of the 4889684010dSBrian Foster * value of b_blkno. 4899684010dSBrian Foster */ 4909684010dSBrian Foster void 4919684010dSBrian Foster xfs_trans_log_buf( 4929684010dSBrian Foster struct xfs_trans *tp, 4939684010dSBrian Foster struct xfs_buf *bp, 4949684010dSBrian Foster uint first, 4959684010dSBrian Foster uint last) 4969684010dSBrian Foster { 497fb1755a6SCarlos Maiolino struct xfs_buf_log_item *bip = bp->b_log_item; 4989684010dSBrian Foster 4999684010dSBrian Foster ASSERT(first <= last && last < BBTOB(bp->b_length)); 5008dc518dfSBrian Foster ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED)); 5019684010dSBrian Foster 5029684010dSBrian Foster xfs_trans_dirty_buf(tp, bp); 5035f6bed76SDave Chinner 5049684010dSBrian Foster trace_xfs_trans_log_buf(bip); 5051da177e4SLinus Torvalds xfs_buf_item_log(bip, first, last); 5061da177e4SLinus Torvalds } 5071da177e4SLinus Torvalds 5081da177e4SLinus Torvalds 5091da177e4SLinus Torvalds /* 51043ff2122SChristoph Hellwig * Invalidate a buffer that is being used within a transaction. 5111da177e4SLinus Torvalds * 51243ff2122SChristoph Hellwig * Typically this is because the blocks in the buffer are being freed, so we 51343ff2122SChristoph Hellwig * need to prevent it from being written out when we're done. Allowing it 51443ff2122SChristoph Hellwig * to be written again might overwrite data in the free blocks if they are 51543ff2122SChristoph Hellwig * reallocated to a file. 51643ff2122SChristoph Hellwig * 51743ff2122SChristoph Hellwig * We prevent the buffer from being written out by marking it stale. We can't 51843ff2122SChristoph Hellwig * get rid of the buf log item at this point because the buffer may still be 51943ff2122SChristoph Hellwig * pinned by another transaction. If that is the case, then we'll wait until 52043ff2122SChristoph Hellwig * the buffer is committed to disk for the last time (we can tell by the ref 52143ff2122SChristoph Hellwig * count) and free it in xfs_buf_item_unpin(). Until that happens we will 52243ff2122SChristoph Hellwig * keep the buffer locked so that the buffer and buf log item are not reused. 52343ff2122SChristoph Hellwig * 52443ff2122SChristoph Hellwig * We also set the XFS_BLF_CANCEL flag in the buf log format structure and log 52543ff2122SChristoph Hellwig * the buf item. This will be used at recovery time to determine that copies 52643ff2122SChristoph Hellwig * of the buffer in the log before this should not be replayed. 52743ff2122SChristoph Hellwig * 52843ff2122SChristoph Hellwig * We mark the item descriptor and the transaction dirty so that we'll hold 52943ff2122SChristoph Hellwig * the buffer until after the commit. 53043ff2122SChristoph Hellwig * 53143ff2122SChristoph Hellwig * Since we're invalidating the buffer, we also clear the state about which 53243ff2122SChristoph Hellwig * parts of the buffer have been logged. We also clear the flag indicating 53343ff2122SChristoph Hellwig * that this is an inode buffer since the data in the buffer will no longer 53443ff2122SChristoph Hellwig * be valid. 53543ff2122SChristoph Hellwig * 53643ff2122SChristoph Hellwig * We set the stale bit in the buffer as well since we're getting rid of it. 5371da177e4SLinus Torvalds */ 5381da177e4SLinus Torvalds void 5391da177e4SLinus Torvalds xfs_trans_binval( 5401da177e4SLinus Torvalds xfs_trans_t *tp, 541*e8222613SDave Chinner struct xfs_buf *bp) 5421da177e4SLinus Torvalds { 543fb1755a6SCarlos Maiolino struct xfs_buf_log_item *bip = bp->b_log_item; 54491e4bac0SMark Tinguely int i; 5451da177e4SLinus Torvalds 546bf9d9013SChristoph Hellwig ASSERT(bp->b_transp == tp); 547adadbeefSChristoph Hellwig ASSERT(bip != NULL); 5481da177e4SLinus Torvalds ASSERT(atomic_read(&bip->bli_refcount) > 0); 5491da177e4SLinus Torvalds 5500b1b213fSChristoph Hellwig trace_xfs_trans_binval(bip); 5510b1b213fSChristoph Hellwig 5521da177e4SLinus Torvalds if (bip->bli_flags & XFS_BLI_STALE) { 5531da177e4SLinus Torvalds /* 5541da177e4SLinus Torvalds * If the buffer is already invalidated, then 5551da177e4SLinus Torvalds * just return. 5561da177e4SLinus Torvalds */ 5575cfd28b6SDave Chinner ASSERT(bp->b_flags & XBF_STALE); 5581da177e4SLinus Torvalds ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY))); 5590f22f9d0SMark Tinguely ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_INODE_BUF)); 56061fe135cSDave Chinner ASSERT(!(bip->__bli_format.blf_flags & XFS_BLFT_MASK)); 5610f22f9d0SMark Tinguely ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL); 562e6631f85SDave Chinner ASSERT(test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags)); 5631da177e4SLinus Torvalds ASSERT(tp->t_flags & XFS_TRANS_DIRTY); 5641da177e4SLinus Torvalds return; 5651da177e4SLinus Torvalds } 5661da177e4SLinus Torvalds 567c867cb61SChristoph Hellwig xfs_buf_stale(bp); 56843ff2122SChristoph Hellwig 5691da177e4SLinus Torvalds bip->bli_flags |= XFS_BLI_STALE; 570ccf7c23fSDave Chinner bip->bli_flags &= ~(XFS_BLI_INODE_BUF | XFS_BLI_LOGGED | XFS_BLI_DIRTY); 5710f22f9d0SMark Tinguely bip->__bli_format.blf_flags &= ~XFS_BLF_INODE_BUF; 5720f22f9d0SMark Tinguely bip->__bli_format.blf_flags |= XFS_BLF_CANCEL; 57361fe135cSDave Chinner bip->__bli_format.blf_flags &= ~XFS_BLFT_MASK; 57491e4bac0SMark Tinguely for (i = 0; i < bip->bli_format_count; i++) { 57591e4bac0SMark Tinguely memset(bip->bli_formats[i].blf_data_map, 0, 57691e4bac0SMark Tinguely (bip->bli_formats[i].blf_map_size * sizeof(uint))); 57791e4bac0SMark Tinguely } 578e6631f85SDave Chinner set_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags); 5791da177e4SLinus Torvalds tp->t_flags |= XFS_TRANS_DIRTY; 5801da177e4SLinus Torvalds } 5811da177e4SLinus Torvalds 5821da177e4SLinus Torvalds /* 583ccf7c23fSDave Chinner * This call is used to indicate that the buffer contains on-disk inodes which 584ccf7c23fSDave Chinner * must be handled specially during recovery. They require special handling 585ccf7c23fSDave Chinner * because only the di_next_unlinked from the inodes in the buffer should be 586ccf7c23fSDave Chinner * recovered. The rest of the data in the buffer is logged via the inodes 587ccf7c23fSDave Chinner * themselves. 5881da177e4SLinus Torvalds * 589ccf7c23fSDave Chinner * All we do is set the XFS_BLI_INODE_BUF flag in the items flags so it can be 590ccf7c23fSDave Chinner * transferred to the buffer's log format structure so that we'll know what to 591ccf7c23fSDave Chinner * do at recovery time. 5921da177e4SLinus Torvalds */ 5931da177e4SLinus Torvalds void 5941da177e4SLinus Torvalds xfs_trans_inode_buf( 5951da177e4SLinus Torvalds xfs_trans_t *tp, 596*e8222613SDave Chinner struct xfs_buf *bp) 5971da177e4SLinus Torvalds { 598fb1755a6SCarlos Maiolino struct xfs_buf_log_item *bip = bp->b_log_item; 5991da177e4SLinus Torvalds 600bf9d9013SChristoph Hellwig ASSERT(bp->b_transp == tp); 601adadbeefSChristoph Hellwig ASSERT(bip != NULL); 6021da177e4SLinus Torvalds ASSERT(atomic_read(&bip->bli_refcount) > 0); 6031da177e4SLinus Torvalds 604ccf7c23fSDave Chinner bip->bli_flags |= XFS_BLI_INODE_BUF; 605f593bf14SDave Chinner bp->b_flags |= _XBF_INODES; 60661fe135cSDave Chinner xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF); 6071da177e4SLinus Torvalds } 6081da177e4SLinus Torvalds 6091da177e4SLinus Torvalds /* 6101da177e4SLinus Torvalds * This call is used to indicate that the buffer is going to 6111da177e4SLinus Torvalds * be staled and was an inode buffer. This means it gets 6121da177e4SLinus Torvalds * special processing during unpin - where any inodes 6131da177e4SLinus Torvalds * associated with the buffer should be removed from ail. 6141da177e4SLinus Torvalds * There is also special processing during recovery, 6151da177e4SLinus Torvalds * any replay of the inodes in the buffer needs to be 6161da177e4SLinus Torvalds * prevented as the buffer may have been reused. 6171da177e4SLinus Torvalds */ 6181da177e4SLinus Torvalds void 6191da177e4SLinus Torvalds xfs_trans_stale_inode_buf( 6201da177e4SLinus Torvalds xfs_trans_t *tp, 621*e8222613SDave Chinner struct xfs_buf *bp) 6221da177e4SLinus Torvalds { 623fb1755a6SCarlos Maiolino struct xfs_buf_log_item *bip = bp->b_log_item; 6241da177e4SLinus Torvalds 625bf9d9013SChristoph Hellwig ASSERT(bp->b_transp == tp); 626adadbeefSChristoph Hellwig ASSERT(bip != NULL); 6271da177e4SLinus Torvalds ASSERT(atomic_read(&bip->bli_refcount) > 0); 6281da177e4SLinus Torvalds 6291da177e4SLinus Torvalds bip->bli_flags |= XFS_BLI_STALE_INODE; 630f593bf14SDave Chinner bp->b_flags |= _XBF_INODES; 63161fe135cSDave Chinner xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF); 6321da177e4SLinus Torvalds } 6331da177e4SLinus Torvalds 6341da177e4SLinus Torvalds /* 6351da177e4SLinus Torvalds * Mark the buffer as being one which contains newly allocated 6361da177e4SLinus Torvalds * inodes. We need to make sure that even if this buffer is 6371da177e4SLinus Torvalds * relogged as an 'inode buf' we still recover all of the inode 6381da177e4SLinus Torvalds * images in the face of a crash. This works in coordination with 6391da177e4SLinus Torvalds * xfs_buf_item_committed() to ensure that the buffer remains in the 6401da177e4SLinus Torvalds * AIL at its original location even after it has been relogged. 6411da177e4SLinus Torvalds */ 6421da177e4SLinus Torvalds /* ARGSUSED */ 6431da177e4SLinus Torvalds void 6441da177e4SLinus Torvalds xfs_trans_inode_alloc_buf( 6451da177e4SLinus Torvalds xfs_trans_t *tp, 646*e8222613SDave Chinner struct xfs_buf *bp) 6471da177e4SLinus Torvalds { 648fb1755a6SCarlos Maiolino struct xfs_buf_log_item *bip = bp->b_log_item; 6491da177e4SLinus Torvalds 650bf9d9013SChristoph Hellwig ASSERT(bp->b_transp == tp); 651adadbeefSChristoph Hellwig ASSERT(bip != NULL); 6521da177e4SLinus Torvalds ASSERT(atomic_read(&bip->bli_refcount) > 0); 6531da177e4SLinus Torvalds 6541da177e4SLinus Torvalds bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF; 655f593bf14SDave Chinner bp->b_flags |= _XBF_INODES; 65661fe135cSDave Chinner xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF); 6571da177e4SLinus Torvalds } 6581da177e4SLinus Torvalds 659ee1a47abSChristoph Hellwig /* 6608dc518dfSBrian Foster * Mark the buffer as ordered for this transaction. This means that the contents 6618dc518dfSBrian Foster * of the buffer are not recorded in the transaction but it is tracked in the 6628dc518dfSBrian Foster * AIL as though it was. This allows us to record logical changes in 6638dc518dfSBrian Foster * transactions rather than the physical changes we make to the buffer without 6648dc518dfSBrian Foster * changing writeback ordering constraints of metadata buffers. 6655f6bed76SDave Chinner */ 666a5814bceSBrian Foster bool 6675f6bed76SDave Chinner xfs_trans_ordered_buf( 6685f6bed76SDave Chinner struct xfs_trans *tp, 6695f6bed76SDave Chinner struct xfs_buf *bp) 6705f6bed76SDave Chinner { 671fb1755a6SCarlos Maiolino struct xfs_buf_log_item *bip = bp->b_log_item; 6725f6bed76SDave Chinner 6735f6bed76SDave Chinner ASSERT(bp->b_transp == tp); 6745f6bed76SDave Chinner ASSERT(bip != NULL); 6755f6bed76SDave Chinner ASSERT(atomic_read(&bip->bli_refcount) > 0); 676a5814bceSBrian Foster 677a5814bceSBrian Foster if (xfs_buf_item_dirty_format(bip)) 678a5814bceSBrian Foster return false; 6795f6bed76SDave Chinner 6805f6bed76SDave Chinner bip->bli_flags |= XFS_BLI_ORDERED; 6815f6bed76SDave Chinner trace_xfs_buf_item_ordered(bip); 6828dc518dfSBrian Foster 6838dc518dfSBrian Foster /* 6848dc518dfSBrian Foster * We don't log a dirty range of an ordered buffer but it still needs 6858dc518dfSBrian Foster * to be marked dirty and that it has been logged. 6868dc518dfSBrian Foster */ 6878dc518dfSBrian Foster xfs_trans_dirty_buf(tp, bp); 688a5814bceSBrian Foster return true; 6895f6bed76SDave Chinner } 6905f6bed76SDave Chinner 6915f6bed76SDave Chinner /* 692ee1a47abSChristoph Hellwig * Set the type of the buffer for log recovery so that it can correctly identify 693ee1a47abSChristoph Hellwig * and hence attach the correct buffer ops to the buffer after replay. 694ee1a47abSChristoph Hellwig */ 695ee1a47abSChristoph Hellwig void 696ee1a47abSChristoph Hellwig xfs_trans_buf_set_type( 697ee1a47abSChristoph Hellwig struct xfs_trans *tp, 698ee1a47abSChristoph Hellwig struct xfs_buf *bp, 69961fe135cSDave Chinner enum xfs_blft type) 700ee1a47abSChristoph Hellwig { 701fb1755a6SCarlos Maiolino struct xfs_buf_log_item *bip = bp->b_log_item; 702ee1a47abSChristoph Hellwig 703d75afeb3SDave Chinner if (!tp) 704d75afeb3SDave Chinner return; 705d75afeb3SDave Chinner 706ee1a47abSChristoph Hellwig ASSERT(bp->b_transp == tp); 707ee1a47abSChristoph Hellwig ASSERT(bip != NULL); 708ee1a47abSChristoph Hellwig ASSERT(atomic_read(&bip->bli_refcount) > 0); 709ee1a47abSChristoph Hellwig 71061fe135cSDave Chinner xfs_blft_to_flags(&bip->__bli_format, type); 711ee1a47abSChristoph Hellwig } 7121da177e4SLinus Torvalds 713d75afeb3SDave Chinner void 714d75afeb3SDave Chinner xfs_trans_buf_copy_type( 715d75afeb3SDave Chinner struct xfs_buf *dst_bp, 716d75afeb3SDave Chinner struct xfs_buf *src_bp) 717d75afeb3SDave Chinner { 718fb1755a6SCarlos Maiolino struct xfs_buf_log_item *sbip = src_bp->b_log_item; 719fb1755a6SCarlos Maiolino struct xfs_buf_log_item *dbip = dst_bp->b_log_item; 72061fe135cSDave Chinner enum xfs_blft type; 721d75afeb3SDave Chinner 72261fe135cSDave Chinner type = xfs_blft_from_flags(&sbip->__bli_format); 72361fe135cSDave Chinner xfs_blft_to_flags(&dbip->__bli_format, type); 724d75afeb3SDave Chinner } 725d75afeb3SDave Chinner 7261da177e4SLinus Torvalds /* 7271da177e4SLinus Torvalds * Similar to xfs_trans_inode_buf(), this marks the buffer as a cluster of 7281da177e4SLinus Torvalds * dquots. However, unlike in inode buffer recovery, dquot buffers get 7291da177e4SLinus Torvalds * recovered in their entirety. (Hence, no XFS_BLI_DQUOT_ALLOC_BUF flag). 7301da177e4SLinus Torvalds * The only thing that makes dquot buffers different from regular 7311da177e4SLinus Torvalds * buffers is that we must not replay dquot bufs when recovering 7321da177e4SLinus Torvalds * if a _corresponding_ quotaoff has happened. We also have to distinguish 7331da177e4SLinus Torvalds * between usr dquot bufs and grp dquot bufs, because usr and grp quotas 7341da177e4SLinus Torvalds * can be turned off independently. 7351da177e4SLinus Torvalds */ 7361da177e4SLinus Torvalds /* ARGSUSED */ 7371da177e4SLinus Torvalds void 7381da177e4SLinus Torvalds xfs_trans_dquot_buf( 7391da177e4SLinus Torvalds xfs_trans_t *tp, 740*e8222613SDave Chinner struct xfs_buf *bp, 7411da177e4SLinus Torvalds uint type) 7421da177e4SLinus Torvalds { 743fb1755a6SCarlos Maiolino struct xfs_buf_log_item *bip = bp->b_log_item; 74461fe135cSDave Chinner 745c1155410SDave Chinner ASSERT(type == XFS_BLF_UDQUOT_BUF || 746c1155410SDave Chinner type == XFS_BLF_PDQUOT_BUF || 747c1155410SDave Chinner type == XFS_BLF_GDQUOT_BUF); 7481da177e4SLinus Torvalds 74961fe135cSDave Chinner bip->__bli_format.blf_flags |= type; 75061fe135cSDave Chinner 75161fe135cSDave Chinner switch (type) { 75261fe135cSDave Chinner case XFS_BLF_UDQUOT_BUF: 75361fe135cSDave Chinner type = XFS_BLFT_UDQUOT_BUF; 75461fe135cSDave Chinner break; 75561fe135cSDave Chinner case XFS_BLF_PDQUOT_BUF: 75661fe135cSDave Chinner type = XFS_BLFT_PDQUOT_BUF; 75761fe135cSDave Chinner break; 75861fe135cSDave Chinner case XFS_BLF_GDQUOT_BUF: 75961fe135cSDave Chinner type = XFS_BLFT_GDQUOT_BUF; 76061fe135cSDave Chinner break; 76161fe135cSDave Chinner default: 76261fe135cSDave Chinner type = XFS_BLFT_UNKNOWN_BUF; 76361fe135cSDave Chinner break; 76461fe135cSDave Chinner } 76561fe135cSDave Chinner 7660c7e5afbSDave Chinner bp->b_flags |= _XBF_DQUOTS; 767ee1a47abSChristoph Hellwig xfs_trans_buf_set_type(tp, bp, type); 7681da177e4SLinus Torvalds } 769