10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0 21da177e4SLinus Torvalds /* 37b718769SNathan Scott * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. 47b718769SNathan Scott * All Rights Reserved. 51da177e4SLinus Torvalds */ 61da177e4SLinus Torvalds #include "xfs.h" 7a844f451SNathan Scott #include "xfs_fs.h" 870a9883cSDave Chinner #include "xfs_shared.h" 9a4fbe6abSDave Chinner #include "xfs_format.h" 10239880efSDave Chinner #include "xfs_log_format.h" 11239880efSDave Chinner #include "xfs_trans_resv.h" 121da177e4SLinus Torvalds #include "xfs_mount.h" 13239880efSDave Chinner #include "xfs_trans.h" 14a844f451SNathan Scott #include "xfs_buf_item.h" 151da177e4SLinus Torvalds #include "xfs_trans_priv.h" 160b1b213fSChristoph Hellwig #include "xfs_trace.h" 171da177e4SLinus Torvalds 184a5224d7SChristoph Hellwig /* 194a5224d7SChristoph Hellwig * Check to see if a buffer matching the given parameters is already 204a5224d7SChristoph Hellwig * a part of the given transaction. 214a5224d7SChristoph Hellwig */ 224a5224d7SChristoph Hellwig STATIC struct xfs_buf * 234a5224d7SChristoph Hellwig xfs_trans_buf_item_match( 244a5224d7SChristoph Hellwig struct xfs_trans *tp, 254a5224d7SChristoph Hellwig struct xfs_buftarg *target, 26de2a4f59SDave Chinner struct xfs_buf_map *map, 27de2a4f59SDave Chinner int nmaps) 284a5224d7SChristoph Hellwig { 29e6631f85SDave Chinner struct xfs_log_item *lip; 30e98c414fSChristoph Hellwig struct xfs_buf_log_item *blip; 31de2a4f59SDave Chinner int len = 0; 32de2a4f59SDave Chinner int i; 331da177e4SLinus Torvalds 34de2a4f59SDave Chinner for (i = 0; i < nmaps; i++) 35de2a4f59SDave Chinner len += map[i].bm_len; 36de2a4f59SDave Chinner 37e6631f85SDave Chinner list_for_each_entry(lip, &tp->t_items, li_trans) { 38e6631f85SDave Chinner blip = (struct xfs_buf_log_item *)lip; 39e98c414fSChristoph Hellwig if (blip->bli_item.li_type == XFS_LI_BUF && 4049074c06SChandra Seetharaman blip->bli_buf->b_target == target && 4104fcad80SDave Chinner xfs_buf_daddr(blip->bli_buf) == map[0].bm_bn && 42de2a4f59SDave Chinner blip->bli_buf->b_length == len) { 43de2a4f59SDave Chinner ASSERT(blip->bli_buf->b_map_count == nmaps); 444a5224d7SChristoph Hellwig return blip->bli_buf; 454a5224d7SChristoph Hellwig } 46de2a4f59SDave Chinner } 474a5224d7SChristoph Hellwig 484a5224d7SChristoph Hellwig return NULL; 494a5224d7SChristoph Hellwig } 501da177e4SLinus Torvalds 51d7e84f41SChristoph Hellwig /* 52d7e84f41SChristoph Hellwig * Add the locked buffer to the transaction. 53d7e84f41SChristoph Hellwig * 54d7e84f41SChristoph Hellwig * The buffer must be locked, and it cannot be associated with any 55d7e84f41SChristoph Hellwig * transaction. 56d7e84f41SChristoph Hellwig * 57d7e84f41SChristoph Hellwig * If the buffer does not yet have a buf log item associated with it, 58d7e84f41SChristoph Hellwig * then allocate one for it. Then add the buf item to the transaction. 59d7e84f41SChristoph Hellwig */ 60d7e84f41SChristoph Hellwig STATIC void 61d7e84f41SChristoph Hellwig _xfs_trans_bjoin( 62d7e84f41SChristoph Hellwig struct xfs_trans *tp, 63d7e84f41SChristoph Hellwig struct xfs_buf *bp, 64d7e84f41SChristoph Hellwig int reset_recur) 65d7e84f41SChristoph Hellwig { 66d7e84f41SChristoph Hellwig struct xfs_buf_log_item *bip; 67d7e84f41SChristoph Hellwig 68bf9d9013SChristoph Hellwig ASSERT(bp->b_transp == NULL); 69d7e84f41SChristoph Hellwig 70d7e84f41SChristoph Hellwig /* 71fb1755a6SCarlos Maiolino * The xfs_buf_log_item pointer is stored in b_log_item. If 72d7e84f41SChristoph Hellwig * it doesn't have one yet, then allocate one and initialize it. 73d7e84f41SChristoph Hellwig * The checks to see if one is there are in xfs_buf_item_init(). 74d7e84f41SChristoph Hellwig */ 75d7e84f41SChristoph Hellwig xfs_buf_item_init(bp, tp->t_mountp); 76fb1755a6SCarlos Maiolino bip = bp->b_log_item; 77d7e84f41SChristoph Hellwig ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); 780f22f9d0SMark Tinguely ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL)); 79d7e84f41SChristoph Hellwig ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED)); 80d7e84f41SChristoph Hellwig if (reset_recur) 81d7e84f41SChristoph Hellwig bip->bli_recur = 0; 82d7e84f41SChristoph Hellwig 83d7e84f41SChristoph Hellwig /* 84d7e84f41SChristoph Hellwig * Take a reference for this transaction on the buf item. 85d7e84f41SChristoph Hellwig */ 86d7e84f41SChristoph Hellwig atomic_inc(&bip->bli_refcount); 87d7e84f41SChristoph Hellwig 88d7e84f41SChristoph Hellwig /* 89e6631f85SDave Chinner * Attach the item to the transaction so we can find it in 90e6631f85SDave Chinner * xfs_trans_get_buf() and friends. 91d7e84f41SChristoph Hellwig */ 92e98c414fSChristoph Hellwig xfs_trans_add_item(tp, &bip->bli_item); 93bf9d9013SChristoph Hellwig bp->b_transp = tp; 94d7e84f41SChristoph Hellwig 95d7e84f41SChristoph Hellwig } 96d7e84f41SChristoph Hellwig 97d7e84f41SChristoph Hellwig void 98d7e84f41SChristoph Hellwig xfs_trans_bjoin( 99d7e84f41SChristoph Hellwig struct xfs_trans *tp, 100d7e84f41SChristoph Hellwig struct xfs_buf *bp) 101d7e84f41SChristoph Hellwig { 102d7e84f41SChristoph Hellwig _xfs_trans_bjoin(tp, bp, 0); 103fb1755a6SCarlos Maiolino trace_xfs_trans_bjoin(bp->b_log_item); 104d7e84f41SChristoph Hellwig } 1051da177e4SLinus Torvalds 1061da177e4SLinus Torvalds /* 1071da177e4SLinus Torvalds * Get and lock the buffer for the caller if it is not already 1081da177e4SLinus Torvalds * locked within the given transaction. If it is already locked 1091da177e4SLinus Torvalds * within the transaction, just increment its lock recursion count 1101da177e4SLinus Torvalds * and return a pointer to it. 1111da177e4SLinus Torvalds * 1121da177e4SLinus Torvalds * If the transaction pointer is NULL, make this just a normal 1131da177e4SLinus Torvalds * get_buf() call. 1141da177e4SLinus Torvalds */ 1159676b54eSDarrick J. Wong int 116de2a4f59SDave Chinner xfs_trans_get_buf_map( 117de2a4f59SDave Chinner struct xfs_trans *tp, 118de2a4f59SDave Chinner struct xfs_buftarg *target, 119de2a4f59SDave Chinner struct xfs_buf_map *map, 120de2a4f59SDave Chinner int nmaps, 1219676b54eSDarrick J. Wong xfs_buf_flags_t flags, 1229676b54eSDarrick J. Wong struct xfs_buf **bpp) 1231da177e4SLinus Torvalds { 124e8222613SDave Chinner struct xfs_buf *bp; 12570a20655SCarlos Maiolino struct xfs_buf_log_item *bip; 1263848b5f6SDarrick J. Wong int error; 1271da177e4SLinus Torvalds 1289676b54eSDarrick J. Wong *bpp = NULL; 1299676b54eSDarrick J. Wong if (!tp) 1309676b54eSDarrick J. Wong return xfs_buf_get_map(target, map, nmaps, flags, bpp); 1311da177e4SLinus Torvalds 1321da177e4SLinus Torvalds /* 1331da177e4SLinus Torvalds * If we find the buffer in the cache with this transaction 1341da177e4SLinus Torvalds * pointer in its b_fsprivate2 field, then we know we already 1351da177e4SLinus Torvalds * have it locked. In this case we just increment the lock 1361da177e4SLinus Torvalds * recursion count and return the buffer to the caller. 1371da177e4SLinus Torvalds */ 138de2a4f59SDave Chinner bp = xfs_trans_buf_item_match(tp, target, map, nmaps); 1391da177e4SLinus Torvalds if (bp != NULL) { 1400c842ad4SChristoph Hellwig ASSERT(xfs_buf_islocked(bp)); 14175c8c50fSDave Chinner if (xfs_is_shutdown(tp->t_mountp)) { 142c867cb61SChristoph Hellwig xfs_buf_stale(bp); 143b0388bf1SDave Chinner bp->b_flags |= XBF_DONE; 144c867cb61SChristoph Hellwig } 1450b1b213fSChristoph Hellwig 146bf9d9013SChristoph Hellwig ASSERT(bp->b_transp == tp); 147fb1755a6SCarlos Maiolino bip = bp->b_log_item; 1481da177e4SLinus Torvalds ASSERT(bip != NULL); 1491da177e4SLinus Torvalds ASSERT(atomic_read(&bip->bli_refcount) > 0); 1501da177e4SLinus Torvalds bip->bli_recur++; 1510b1b213fSChristoph Hellwig trace_xfs_trans_get_buf_recur(bip); 1529676b54eSDarrick J. Wong *bpp = bp; 1539676b54eSDarrick J. Wong return 0; 1541da177e4SLinus Torvalds } 1551da177e4SLinus Torvalds 1563848b5f6SDarrick J. Wong error = xfs_buf_get_map(target, map, nmaps, flags, &bp); 1573848b5f6SDarrick J. Wong if (error) 1589676b54eSDarrick J. Wong return error; 1591da177e4SLinus Torvalds 1605a52c2a5SChandra Seetharaman ASSERT(!bp->b_error); 1611da177e4SLinus Torvalds 162d7e84f41SChristoph Hellwig _xfs_trans_bjoin(tp, bp, 1); 163fb1755a6SCarlos Maiolino trace_xfs_trans_get_buf(bp->b_log_item); 1649676b54eSDarrick J. Wong *bpp = bp; 1659676b54eSDarrick J. Wong return 0; 1661da177e4SLinus Torvalds } 1671da177e4SLinus Torvalds 1681da177e4SLinus Torvalds /* 169cead0b10SChristoph Hellwig * Get and lock the superblock buffer for the given transaction. 1701da177e4SLinus Torvalds */ 171cead0b10SChristoph Hellwig struct xfs_buf * 17270a20655SCarlos Maiolino xfs_trans_getsb( 173cead0b10SChristoph Hellwig struct xfs_trans *tp) 1741da177e4SLinus Torvalds { 175cead0b10SChristoph Hellwig struct xfs_buf *bp = tp->t_mountp->m_sb_bp; 1761da177e4SLinus Torvalds 1771da177e4SLinus Torvalds /* 178cead0b10SChristoph Hellwig * Just increment the lock recursion count if the buffer is already 179cead0b10SChristoph Hellwig * attached to this transaction. 1801da177e4SLinus Torvalds */ 181bf9d9013SChristoph Hellwig if (bp->b_transp == tp) { 182cead0b10SChristoph Hellwig struct xfs_buf_log_item *bip = bp->b_log_item; 183cead0b10SChristoph Hellwig 1841da177e4SLinus Torvalds ASSERT(bip != NULL); 1851da177e4SLinus Torvalds ASSERT(atomic_read(&bip->bli_refcount) > 0); 1861da177e4SLinus Torvalds bip->bli_recur++; 187cead0b10SChristoph Hellwig 1880b1b213fSChristoph Hellwig trace_xfs_trans_getsb_recur(bip); 189cead0b10SChristoph Hellwig } else { 190cead0b10SChristoph Hellwig xfs_buf_lock(bp); 191cead0b10SChristoph Hellwig xfs_buf_hold(bp); 192cead0b10SChristoph Hellwig _xfs_trans_bjoin(tp, bp, 1); 193cead0b10SChristoph Hellwig 194cead0b10SChristoph Hellwig trace_xfs_trans_getsb(bp->b_log_item); 1951da177e4SLinus Torvalds } 1961da177e4SLinus Torvalds 197d99831ffSEric Sandeen return bp; 1981da177e4SLinus Torvalds } 1991da177e4SLinus Torvalds 2001da177e4SLinus Torvalds /* 2011da177e4SLinus Torvalds * Get and lock the buffer for the caller if it is not already 2021da177e4SLinus Torvalds * locked within the given transaction. If it has not yet been 2031da177e4SLinus Torvalds * read in, read it from disk. If it is already locked 2041da177e4SLinus Torvalds * within the transaction and already read in, just increment its 2051da177e4SLinus Torvalds * lock recursion count and return a pointer to it. 2061da177e4SLinus Torvalds * 2071da177e4SLinus Torvalds * If the transaction pointer is NULL, make this just a normal 2081da177e4SLinus Torvalds * read_buf() call. 2091da177e4SLinus Torvalds */ 2101da177e4SLinus Torvalds int 211de2a4f59SDave Chinner xfs_trans_read_buf_map( 212de2a4f59SDave Chinner struct xfs_mount *mp, 213de2a4f59SDave Chinner struct xfs_trans *tp, 214de2a4f59SDave Chinner struct xfs_buftarg *target, 215de2a4f59SDave Chinner struct xfs_buf_map *map, 216de2a4f59SDave Chinner int nmaps, 217de2a4f59SDave Chinner xfs_buf_flags_t flags, 218c3f8fc73SDave Chinner struct xfs_buf **bpp, 2191813dd64SDave Chinner const struct xfs_buf_ops *ops) 2201da177e4SLinus Torvalds { 2212d3d0c53SDave Chinner struct xfs_buf *bp = NULL; 2222d3d0c53SDave Chinner struct xfs_buf_log_item *bip; 2231da177e4SLinus Torvalds int error; 2241da177e4SLinus Torvalds 2257ca790a5SDave Chinner *bpp = NULL; 2261da177e4SLinus Torvalds /* 2271da177e4SLinus Torvalds * If we find the buffer in the cache with this transaction 2281da177e4SLinus Torvalds * pointer in its b_fsprivate2 field, then we know we already 2291da177e4SLinus Torvalds * have it locked. If it is already read in we just increment 2301da177e4SLinus Torvalds * the lock recursion count and return the buffer to the caller. 2311da177e4SLinus Torvalds * If the buffer is not yet read in, then we read it in, increment 2321da177e4SLinus Torvalds * the lock recursion count, and return it to the caller. 2331da177e4SLinus Torvalds */ 2342d3d0c53SDave Chinner if (tp) 235de2a4f59SDave Chinner bp = xfs_trans_buf_item_match(tp, target, map, nmaps); 2362d3d0c53SDave Chinner if (bp) { 2370c842ad4SChristoph Hellwig ASSERT(xfs_buf_islocked(bp)); 238bf9d9013SChristoph Hellwig ASSERT(bp->b_transp == tp); 239fb1755a6SCarlos Maiolino ASSERT(bp->b_log_item != NULL); 2405a52c2a5SChandra Seetharaman ASSERT(!bp->b_error); 2412d3d0c53SDave Chinner ASSERT(bp->b_flags & XBF_DONE); 24283a0adc3SChristoph Hellwig 2431da177e4SLinus Torvalds /* 2441da177e4SLinus Torvalds * We never locked this buf ourselves, so we shouldn't 2451da177e4SLinus Torvalds * brelse it either. Just get out. 2461da177e4SLinus Torvalds */ 24775c8c50fSDave Chinner if (xfs_is_shutdown(mp)) { 2480b1b213fSChristoph Hellwig trace_xfs_trans_read_buf_shut(bp, _RET_IP_); 2492451337dSDave Chinner return -EIO; 2501da177e4SLinus Torvalds } 2511da177e4SLinus Torvalds 2521aff5696SDarrick J. Wong /* 2531aff5696SDarrick J. Wong * Check if the caller is trying to read a buffer that is 2541aff5696SDarrick J. Wong * already attached to the transaction yet has no buffer ops 2551aff5696SDarrick J. Wong * assigned. Ops are usually attached when the buffer is 2561aff5696SDarrick J. Wong * attached to the transaction, or by the read caller if 2571aff5696SDarrick J. Wong * special circumstances. That didn't happen, which is not 2581aff5696SDarrick J. Wong * how this is supposed to go. 2591aff5696SDarrick J. Wong * 2601aff5696SDarrick J. Wong * If the buffer passes verification we'll let this go, but if 2611aff5696SDarrick J. Wong * not we have to shut down. Let the transaction cleanup code 2621aff5696SDarrick J. Wong * release this buffer when it kills the tranaction. 2631aff5696SDarrick J. Wong */ 2641aff5696SDarrick J. Wong ASSERT(bp->b_ops != NULL); 26575d02303SBrian Foster error = xfs_buf_reverify(bp, ops); 2661aff5696SDarrick J. Wong if (error) { 267cdbcf82bSDarrick J. Wong xfs_buf_ioerror_alert(bp, __return_address); 2681aff5696SDarrick J. Wong 2691aff5696SDarrick J. Wong if (tp->t_flags & XFS_TRANS_DIRTY) 2701aff5696SDarrick J. Wong xfs_force_shutdown(tp->t_mountp, 2711aff5696SDarrick J. Wong SHUTDOWN_META_IO_ERROR); 2721aff5696SDarrick J. Wong 2731aff5696SDarrick J. Wong /* bad CRC means corrupted metadata */ 2741aff5696SDarrick J. Wong if (error == -EFSBADCRC) 2751aff5696SDarrick J. Wong error = -EFSCORRUPTED; 2761aff5696SDarrick J. Wong return error; 2771aff5696SDarrick J. Wong } 2781aff5696SDarrick J. Wong 279fb1755a6SCarlos Maiolino bip = bp->b_log_item; 2801da177e4SLinus Torvalds bip->bli_recur++; 2811da177e4SLinus Torvalds 2821da177e4SLinus Torvalds ASSERT(atomic_read(&bip->bli_refcount) > 0); 2830b1b213fSChristoph Hellwig trace_xfs_trans_read_buf_recur(bip); 2841aff5696SDarrick J. Wong ASSERT(bp->b_ops != NULL || ops == NULL); 2851da177e4SLinus Torvalds *bpp = bp; 2861da177e4SLinus Torvalds return 0; 2871da177e4SLinus Torvalds } 2881da177e4SLinus Torvalds 289cdbcf82bSDarrick J. Wong error = xfs_buf_read_map(target, map, nmaps, flags, &bp, ops, 290cdbcf82bSDarrick J. Wong __return_address); 2914ed8e27bSDarrick J. Wong switch (error) { 2924ed8e27bSDarrick J. Wong case 0: 2934ed8e27bSDarrick J. Wong break; 2944ed8e27bSDarrick J. Wong default: 2952d3d0c53SDave Chinner if (tp && (tp->t_flags & XFS_TRANS_DIRTY)) 2967d04a335SNathan Scott xfs_force_shutdown(tp->t_mountp, SHUTDOWN_META_IO_ERROR); 29753004ee7SGustavo A. R. Silva fallthrough; 2984ed8e27bSDarrick J. Wong case -ENOMEM: 2994ed8e27bSDarrick J. Wong case -EAGAIN: 3001da177e4SLinus Torvalds return error; 3011da177e4SLinus Torvalds } 3022d3d0c53SDave Chinner 30375c8c50fSDave Chinner if (xfs_is_shutdown(mp)) { 3041da177e4SLinus Torvalds xfs_buf_relse(bp); 3052d3d0c53SDave Chinner trace_xfs_trans_read_buf_shut(bp, _RET_IP_); 3062451337dSDave Chinner return -EIO; 3071da177e4SLinus Torvalds } 3081da177e4SLinus Torvalds 309e9892d3cSDave Chinner if (tp) { 310d7e84f41SChristoph Hellwig _xfs_trans_bjoin(tp, bp, 1); 311fb1755a6SCarlos Maiolino trace_xfs_trans_read_buf(bp->b_log_item); 312e9892d3cSDave Chinner } 3131aff5696SDarrick J. Wong ASSERT(bp->b_ops != NULL || ops == NULL); 3141da177e4SLinus Torvalds *bpp = bp; 3151da177e4SLinus Torvalds return 0; 3161da177e4SLinus Torvalds 3171da177e4SLinus Torvalds } 3181da177e4SLinus Torvalds 31938b6238eSDarrick J. Wong /* Has this buffer been dirtied by anyone? */ 32038b6238eSDarrick J. Wong bool 32138b6238eSDarrick J. Wong xfs_trans_buf_is_dirty( 32238b6238eSDarrick J. Wong struct xfs_buf *bp) 32338b6238eSDarrick J. Wong { 32438b6238eSDarrick J. Wong struct xfs_buf_log_item *bip = bp->b_log_item; 32538b6238eSDarrick J. Wong 32638b6238eSDarrick J. Wong if (!bip) 32738b6238eSDarrick J. Wong return false; 32838b6238eSDarrick J. Wong ASSERT(bip->bli_item.li_type == XFS_LI_BUF); 32938b6238eSDarrick J. Wong return test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags); 33038b6238eSDarrick J. Wong } 33138b6238eSDarrick J. Wong 3321da177e4SLinus Torvalds /* 33323420d05SBrian Foster * Release a buffer previously joined to the transaction. If the buffer is 33423420d05SBrian Foster * modified within this transaction, decrement the recursion count but do not 33523420d05SBrian Foster * release the buffer even if the count goes to 0. If the buffer is not modified 33623420d05SBrian Foster * within the transaction, decrement the recursion count and release the buffer 33723420d05SBrian Foster * if the recursion count goes to 0. 3381da177e4SLinus Torvalds * 33923420d05SBrian Foster * If the buffer is to be released and it was not already dirty before this 34023420d05SBrian Foster * transaction began, then also free the buf_log_item associated with it. 3411da177e4SLinus Torvalds * 34223420d05SBrian Foster * If the transaction pointer is NULL, this is a normal xfs_buf_relse() call. 3431da177e4SLinus Torvalds */ 3441da177e4SLinus Torvalds void 34570a20655SCarlos Maiolino xfs_trans_brelse( 34623420d05SBrian Foster struct xfs_trans *tp, 34723420d05SBrian Foster struct xfs_buf *bp) 3481da177e4SLinus Torvalds { 34923420d05SBrian Foster struct xfs_buf_log_item *bip = bp->b_log_item; 3501da177e4SLinus Torvalds 35123420d05SBrian Foster ASSERT(bp->b_transp == tp); 35223420d05SBrian Foster 35323420d05SBrian Foster if (!tp) { 3541da177e4SLinus Torvalds xfs_buf_relse(bp); 3551da177e4SLinus Torvalds return; 3561da177e4SLinus Torvalds } 3571da177e4SLinus Torvalds 35823420d05SBrian Foster trace_xfs_trans_brelse(bip); 3591da177e4SLinus Torvalds ASSERT(bip->bli_item.li_type == XFS_LI_BUF); 3601da177e4SLinus Torvalds ASSERT(atomic_read(&bip->bli_refcount) > 0); 3611da177e4SLinus Torvalds 3621da177e4SLinus Torvalds /* 36323420d05SBrian Foster * If the release is for a recursive lookup, then decrement the count 36423420d05SBrian Foster * and return. 3651da177e4SLinus Torvalds */ 3661da177e4SLinus Torvalds if (bip->bli_recur > 0) { 3671da177e4SLinus Torvalds bip->bli_recur--; 3681da177e4SLinus Torvalds return; 3691da177e4SLinus Torvalds } 3701da177e4SLinus Torvalds 3711da177e4SLinus Torvalds /* 37223420d05SBrian Foster * If the buffer is invalidated or dirty in this transaction, we can't 3731da177e4SLinus Torvalds * release it until we commit. 3741da177e4SLinus Torvalds */ 375e6631f85SDave Chinner if (test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags)) 3761da177e4SLinus Torvalds return; 3770b1b213fSChristoph Hellwig if (bip->bli_flags & XFS_BLI_STALE) 3781da177e4SLinus Torvalds return; 3791da177e4SLinus Torvalds 38023420d05SBrian Foster /* 38123420d05SBrian Foster * Unlink the log item from the transaction and clear the hold flag, if 38223420d05SBrian Foster * set. We wouldn't want the next user of the buffer to get confused. 38323420d05SBrian Foster */ 3841da177e4SLinus Torvalds ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED)); 385e98c414fSChristoph Hellwig xfs_trans_del_item(&bip->bli_item); 3861da177e4SLinus Torvalds bip->bli_flags &= ~XFS_BLI_HOLD; 3871da177e4SLinus Torvalds 38895808459SBrian Foster /* drop the reference to the bli */ 38995808459SBrian Foster xfs_buf_item_put(bip); 3905b03ff1bSChristoph Hellwig 391bf9d9013SChristoph Hellwig bp->b_transp = NULL; 3921da177e4SLinus Torvalds xfs_buf_relse(bp); 3931da177e4SLinus Torvalds } 3941da177e4SLinus Torvalds 3951da177e4SLinus Torvalds /* 396*0dc63c8aSDarrick J. Wong * Forcibly detach a buffer previously joined to the transaction. The caller 397*0dc63c8aSDarrick J. Wong * will retain its locked reference to the buffer after this function returns. 398*0dc63c8aSDarrick J. Wong * The buffer must be completely clean and must not be held to the transaction. 399*0dc63c8aSDarrick J. Wong */ 400*0dc63c8aSDarrick J. Wong void 401*0dc63c8aSDarrick J. Wong xfs_trans_bdetach( 402*0dc63c8aSDarrick J. Wong struct xfs_trans *tp, 403*0dc63c8aSDarrick J. Wong struct xfs_buf *bp) 404*0dc63c8aSDarrick J. Wong { 405*0dc63c8aSDarrick J. Wong struct xfs_buf_log_item *bip = bp->b_log_item; 406*0dc63c8aSDarrick J. Wong 407*0dc63c8aSDarrick J. Wong ASSERT(tp != NULL); 408*0dc63c8aSDarrick J. Wong ASSERT(bp->b_transp == tp); 409*0dc63c8aSDarrick J. Wong ASSERT(bip->bli_item.li_type == XFS_LI_BUF); 410*0dc63c8aSDarrick J. Wong ASSERT(atomic_read(&bip->bli_refcount) > 0); 411*0dc63c8aSDarrick J. Wong 412*0dc63c8aSDarrick J. Wong trace_xfs_trans_bdetach(bip); 413*0dc63c8aSDarrick J. Wong 414*0dc63c8aSDarrick J. Wong /* 415*0dc63c8aSDarrick J. Wong * Erase all recursion count, since we're removing this buffer from the 416*0dc63c8aSDarrick J. Wong * transaction. 417*0dc63c8aSDarrick J. Wong */ 418*0dc63c8aSDarrick J. Wong bip->bli_recur = 0; 419*0dc63c8aSDarrick J. Wong 420*0dc63c8aSDarrick J. Wong /* 421*0dc63c8aSDarrick J. Wong * The buffer must be completely clean. Specifically, it had better 422*0dc63c8aSDarrick J. Wong * not be dirty, stale, logged, ordered, or held to the transaction. 423*0dc63c8aSDarrick J. Wong */ 424*0dc63c8aSDarrick J. Wong ASSERT(!test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags)); 425*0dc63c8aSDarrick J. Wong ASSERT(!(bip->bli_flags & XFS_BLI_DIRTY)); 426*0dc63c8aSDarrick J. Wong ASSERT(!(bip->bli_flags & XFS_BLI_HOLD)); 427*0dc63c8aSDarrick J. Wong ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED)); 428*0dc63c8aSDarrick J. Wong ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED)); 429*0dc63c8aSDarrick J. Wong ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); 430*0dc63c8aSDarrick J. Wong 431*0dc63c8aSDarrick J. Wong /* Unlink the log item from the transaction and drop the log item. */ 432*0dc63c8aSDarrick J. Wong xfs_trans_del_item(&bip->bli_item); 433*0dc63c8aSDarrick J. Wong xfs_buf_item_put(bip); 434*0dc63c8aSDarrick J. Wong bp->b_transp = NULL; 435*0dc63c8aSDarrick J. Wong } 436*0dc63c8aSDarrick J. Wong 437*0dc63c8aSDarrick J. Wong /* 4381da177e4SLinus Torvalds * Mark the buffer as not needing to be unlocked when the buf item's 439ddf92053SChristoph Hellwig * iop_committing() routine is called. The buffer must already be locked 4401da177e4SLinus Torvalds * and associated with the given transaction. 4411da177e4SLinus Torvalds */ 4421da177e4SLinus Torvalds /* ARGSUSED */ 4431da177e4SLinus Torvalds void 44470a20655SCarlos Maiolino xfs_trans_bhold( 44570a20655SCarlos Maiolino xfs_trans_t *tp, 446e8222613SDave Chinner struct xfs_buf *bp) 4471da177e4SLinus Torvalds { 448fb1755a6SCarlos Maiolino struct xfs_buf_log_item *bip = bp->b_log_item; 4491da177e4SLinus Torvalds 450bf9d9013SChristoph Hellwig ASSERT(bp->b_transp == tp); 451adadbeefSChristoph Hellwig ASSERT(bip != NULL); 4521da177e4SLinus Torvalds ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); 4530f22f9d0SMark Tinguely ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL)); 4541da177e4SLinus Torvalds ASSERT(atomic_read(&bip->bli_refcount) > 0); 455adadbeefSChristoph Hellwig 4561da177e4SLinus Torvalds bip->bli_flags |= XFS_BLI_HOLD; 4570b1b213fSChristoph Hellwig trace_xfs_trans_bhold(bip); 4581da177e4SLinus Torvalds } 4591da177e4SLinus Torvalds 4601da177e4SLinus Torvalds /* 461efa092f3STim Shimmin * Cancel the previous buffer hold request made on this buffer 462efa092f3STim Shimmin * for this transaction. 463efa092f3STim Shimmin */ 464efa092f3STim Shimmin void 46570a20655SCarlos Maiolino xfs_trans_bhold_release( 46670a20655SCarlos Maiolino xfs_trans_t *tp, 467e8222613SDave Chinner struct xfs_buf *bp) 468efa092f3STim Shimmin { 469fb1755a6SCarlos Maiolino struct xfs_buf_log_item *bip = bp->b_log_item; 470efa092f3STim Shimmin 471bf9d9013SChristoph Hellwig ASSERT(bp->b_transp == tp); 472adadbeefSChristoph Hellwig ASSERT(bip != NULL); 473efa092f3STim Shimmin ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); 4740f22f9d0SMark Tinguely ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL)); 475efa092f3STim Shimmin ASSERT(atomic_read(&bip->bli_refcount) > 0); 476efa092f3STim Shimmin ASSERT(bip->bli_flags & XFS_BLI_HOLD); 4770b1b213fSChristoph Hellwig 478adadbeefSChristoph Hellwig bip->bli_flags &= ~XFS_BLI_HOLD; 4790b1b213fSChristoph Hellwig trace_xfs_trans_bhold_release(bip); 480efa092f3STim Shimmin } 481efa092f3STim Shimmin 482efa092f3STim Shimmin /* 4839684010dSBrian Foster * Mark a buffer dirty in the transaction. 4841da177e4SLinus Torvalds */ 4851da177e4SLinus Torvalds void 4869684010dSBrian Foster xfs_trans_dirty_buf( 4879684010dSBrian Foster struct xfs_trans *tp, 4889684010dSBrian Foster struct xfs_buf *bp) 4891da177e4SLinus Torvalds { 490fb1755a6SCarlos Maiolino struct xfs_buf_log_item *bip = bp->b_log_item; 4911da177e4SLinus Torvalds 492bf9d9013SChristoph Hellwig ASSERT(bp->b_transp == tp); 493adadbeefSChristoph Hellwig ASSERT(bip != NULL); 4941da177e4SLinus Torvalds 4951da177e4SLinus Torvalds /* 4961da177e4SLinus Torvalds * Mark the buffer as needing to be written out eventually, 4971da177e4SLinus Torvalds * and set its iodone function to remove the buffer's buf log 4981da177e4SLinus Torvalds * item from the AIL and free it when the buffer is flushed 499b01d1461SDave Chinner * to disk. 5001da177e4SLinus Torvalds */ 501b0388bf1SDave Chinner bp->b_flags |= XBF_DONE; 5021da177e4SLinus Torvalds 5031da177e4SLinus Torvalds ASSERT(atomic_read(&bip->bli_refcount) > 0); 5041da177e4SLinus Torvalds 5051da177e4SLinus Torvalds /* 5061da177e4SLinus Torvalds * If we invalidated the buffer within this transaction, then 5071da177e4SLinus Torvalds * cancel the invalidation now that we're dirtying the buffer 5081da177e4SLinus Torvalds * again. There are no races with the code in xfs_buf_item_unpin(), 5091da177e4SLinus Torvalds * because we have a reference to the buffer this entire time. 5101da177e4SLinus Torvalds */ 5111da177e4SLinus Torvalds if (bip->bli_flags & XFS_BLI_STALE) { 5121da177e4SLinus Torvalds bip->bli_flags &= ~XFS_BLI_STALE; 5135cfd28b6SDave Chinner ASSERT(bp->b_flags & XBF_STALE); 5145cfd28b6SDave Chinner bp->b_flags &= ~XBF_STALE; 5150f22f9d0SMark Tinguely bip->__bli_format.blf_flags &= ~XFS_BLF_CANCEL; 5161da177e4SLinus Torvalds } 5179684010dSBrian Foster bip->bli_flags |= XFS_BLI_DIRTY | XFS_BLI_LOGGED; 5181da177e4SLinus Torvalds 5191da177e4SLinus Torvalds tp->t_flags |= XFS_TRANS_DIRTY; 520e6631f85SDave Chinner set_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags); 5219684010dSBrian Foster } 5229684010dSBrian Foster 5239684010dSBrian Foster /* 5249684010dSBrian Foster * This is called to mark bytes first through last inclusive of the given 5259684010dSBrian Foster * buffer as needing to be logged when the transaction is committed. 5269684010dSBrian Foster * The buffer must already be associated with the given transaction. 5279684010dSBrian Foster * 5289684010dSBrian Foster * First and last are numbers relative to the beginning of this buffer, 5299684010dSBrian Foster * so the first byte in the buffer is numbered 0 regardless of the 5309684010dSBrian Foster * value of b_blkno. 5319684010dSBrian Foster */ 5329684010dSBrian Foster void 5339684010dSBrian Foster xfs_trans_log_buf( 5349684010dSBrian Foster struct xfs_trans *tp, 5359684010dSBrian Foster struct xfs_buf *bp, 5369684010dSBrian Foster uint first, 5379684010dSBrian Foster uint last) 5389684010dSBrian Foster { 539fb1755a6SCarlos Maiolino struct xfs_buf_log_item *bip = bp->b_log_item; 5409684010dSBrian Foster 5419684010dSBrian Foster ASSERT(first <= last && last < BBTOB(bp->b_length)); 5428dc518dfSBrian Foster ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED)); 5439684010dSBrian Foster 5449684010dSBrian Foster xfs_trans_dirty_buf(tp, bp); 5455f6bed76SDave Chinner 5469684010dSBrian Foster trace_xfs_trans_log_buf(bip); 5471da177e4SLinus Torvalds xfs_buf_item_log(bip, first, last); 5481da177e4SLinus Torvalds } 5491da177e4SLinus Torvalds 5501da177e4SLinus Torvalds 5511da177e4SLinus Torvalds /* 55243ff2122SChristoph Hellwig * Invalidate a buffer that is being used within a transaction. 5531da177e4SLinus Torvalds * 55443ff2122SChristoph Hellwig * Typically this is because the blocks in the buffer are being freed, so we 55543ff2122SChristoph Hellwig * need to prevent it from being written out when we're done. Allowing it 55643ff2122SChristoph Hellwig * to be written again might overwrite data in the free blocks if they are 55743ff2122SChristoph Hellwig * reallocated to a file. 55843ff2122SChristoph Hellwig * 55943ff2122SChristoph Hellwig * We prevent the buffer from being written out by marking it stale. We can't 56043ff2122SChristoph Hellwig * get rid of the buf log item at this point because the buffer may still be 56143ff2122SChristoph Hellwig * pinned by another transaction. If that is the case, then we'll wait until 56243ff2122SChristoph Hellwig * the buffer is committed to disk for the last time (we can tell by the ref 56343ff2122SChristoph Hellwig * count) and free it in xfs_buf_item_unpin(). Until that happens we will 56443ff2122SChristoph Hellwig * keep the buffer locked so that the buffer and buf log item are not reused. 56543ff2122SChristoph Hellwig * 56643ff2122SChristoph Hellwig * We also set the XFS_BLF_CANCEL flag in the buf log format structure and log 56743ff2122SChristoph Hellwig * the buf item. This will be used at recovery time to determine that copies 56843ff2122SChristoph Hellwig * of the buffer in the log before this should not be replayed. 56943ff2122SChristoph Hellwig * 57043ff2122SChristoph Hellwig * We mark the item descriptor and the transaction dirty so that we'll hold 57143ff2122SChristoph Hellwig * the buffer until after the commit. 57243ff2122SChristoph Hellwig * 57343ff2122SChristoph Hellwig * Since we're invalidating the buffer, we also clear the state about which 57443ff2122SChristoph Hellwig * parts of the buffer have been logged. We also clear the flag indicating 57543ff2122SChristoph Hellwig * that this is an inode buffer since the data in the buffer will no longer 57643ff2122SChristoph Hellwig * be valid. 57743ff2122SChristoph Hellwig * 57843ff2122SChristoph Hellwig * We set the stale bit in the buffer as well since we're getting rid of it. 5791da177e4SLinus Torvalds */ 5801da177e4SLinus Torvalds void 5811da177e4SLinus Torvalds xfs_trans_binval( 5821da177e4SLinus Torvalds xfs_trans_t *tp, 583e8222613SDave Chinner struct xfs_buf *bp) 5841da177e4SLinus Torvalds { 585fb1755a6SCarlos Maiolino struct xfs_buf_log_item *bip = bp->b_log_item; 58691e4bac0SMark Tinguely int i; 5871da177e4SLinus Torvalds 588bf9d9013SChristoph Hellwig ASSERT(bp->b_transp == tp); 589adadbeefSChristoph Hellwig ASSERT(bip != NULL); 5901da177e4SLinus Torvalds ASSERT(atomic_read(&bip->bli_refcount) > 0); 5911da177e4SLinus Torvalds 5920b1b213fSChristoph Hellwig trace_xfs_trans_binval(bip); 5930b1b213fSChristoph Hellwig 5941da177e4SLinus Torvalds if (bip->bli_flags & XFS_BLI_STALE) { 5951da177e4SLinus Torvalds /* 5961da177e4SLinus Torvalds * If the buffer is already invalidated, then 5971da177e4SLinus Torvalds * just return. 5981da177e4SLinus Torvalds */ 5995cfd28b6SDave Chinner ASSERT(bp->b_flags & XBF_STALE); 6001da177e4SLinus Torvalds ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY))); 6010f22f9d0SMark Tinguely ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_INODE_BUF)); 60261fe135cSDave Chinner ASSERT(!(bip->__bli_format.blf_flags & XFS_BLFT_MASK)); 6030f22f9d0SMark Tinguely ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL); 604e6631f85SDave Chinner ASSERT(test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags)); 6051da177e4SLinus Torvalds ASSERT(tp->t_flags & XFS_TRANS_DIRTY); 6061da177e4SLinus Torvalds return; 6071da177e4SLinus Torvalds } 6081da177e4SLinus Torvalds 609c867cb61SChristoph Hellwig xfs_buf_stale(bp); 61043ff2122SChristoph Hellwig 6111da177e4SLinus Torvalds bip->bli_flags |= XFS_BLI_STALE; 612ccf7c23fSDave Chinner bip->bli_flags &= ~(XFS_BLI_INODE_BUF | XFS_BLI_LOGGED | XFS_BLI_DIRTY); 6130f22f9d0SMark Tinguely bip->__bli_format.blf_flags &= ~XFS_BLF_INODE_BUF; 6140f22f9d0SMark Tinguely bip->__bli_format.blf_flags |= XFS_BLF_CANCEL; 61561fe135cSDave Chinner bip->__bli_format.blf_flags &= ~XFS_BLFT_MASK; 61691e4bac0SMark Tinguely for (i = 0; i < bip->bli_format_count; i++) { 61791e4bac0SMark Tinguely memset(bip->bli_formats[i].blf_data_map, 0, 61891e4bac0SMark Tinguely (bip->bli_formats[i].blf_map_size * sizeof(uint))); 61991e4bac0SMark Tinguely } 620e6631f85SDave Chinner set_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags); 6211da177e4SLinus Torvalds tp->t_flags |= XFS_TRANS_DIRTY; 6221da177e4SLinus Torvalds } 6231da177e4SLinus Torvalds 6241da177e4SLinus Torvalds /* 625ccf7c23fSDave Chinner * This call is used to indicate that the buffer contains on-disk inodes which 626ccf7c23fSDave Chinner * must be handled specially during recovery. They require special handling 627ccf7c23fSDave Chinner * because only the di_next_unlinked from the inodes in the buffer should be 628ccf7c23fSDave Chinner * recovered. The rest of the data in the buffer is logged via the inodes 629ccf7c23fSDave Chinner * themselves. 6301da177e4SLinus Torvalds * 631ccf7c23fSDave Chinner * All we do is set the XFS_BLI_INODE_BUF flag in the items flags so it can be 632ccf7c23fSDave Chinner * transferred to the buffer's log format structure so that we'll know what to 633ccf7c23fSDave Chinner * do at recovery time. 6341da177e4SLinus Torvalds */ 6351da177e4SLinus Torvalds void 6361da177e4SLinus Torvalds xfs_trans_inode_buf( 6371da177e4SLinus Torvalds xfs_trans_t *tp, 638e8222613SDave Chinner struct xfs_buf *bp) 6391da177e4SLinus Torvalds { 640fb1755a6SCarlos Maiolino struct xfs_buf_log_item *bip = bp->b_log_item; 6411da177e4SLinus Torvalds 642bf9d9013SChristoph Hellwig ASSERT(bp->b_transp == tp); 643adadbeefSChristoph Hellwig ASSERT(bip != NULL); 6441da177e4SLinus Torvalds ASSERT(atomic_read(&bip->bli_refcount) > 0); 6451da177e4SLinus Torvalds 646ccf7c23fSDave Chinner bip->bli_flags |= XFS_BLI_INODE_BUF; 647f593bf14SDave Chinner bp->b_flags |= _XBF_INODES; 64861fe135cSDave Chinner xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF); 6491da177e4SLinus Torvalds } 6501da177e4SLinus Torvalds 6511da177e4SLinus Torvalds /* 6521da177e4SLinus Torvalds * This call is used to indicate that the buffer is going to 6531da177e4SLinus Torvalds * be staled and was an inode buffer. This means it gets 6541da177e4SLinus Torvalds * special processing during unpin - where any inodes 6551da177e4SLinus Torvalds * associated with the buffer should be removed from ail. 6561da177e4SLinus Torvalds * There is also special processing during recovery, 6571da177e4SLinus Torvalds * any replay of the inodes in the buffer needs to be 6581da177e4SLinus Torvalds * prevented as the buffer may have been reused. 6591da177e4SLinus Torvalds */ 6601da177e4SLinus Torvalds void 6611da177e4SLinus Torvalds xfs_trans_stale_inode_buf( 6621da177e4SLinus Torvalds xfs_trans_t *tp, 663e8222613SDave Chinner struct xfs_buf *bp) 6641da177e4SLinus Torvalds { 665fb1755a6SCarlos Maiolino struct xfs_buf_log_item *bip = bp->b_log_item; 6661da177e4SLinus Torvalds 667bf9d9013SChristoph Hellwig ASSERT(bp->b_transp == tp); 668adadbeefSChristoph Hellwig ASSERT(bip != NULL); 6691da177e4SLinus Torvalds ASSERT(atomic_read(&bip->bli_refcount) > 0); 6701da177e4SLinus Torvalds 6711da177e4SLinus Torvalds bip->bli_flags |= XFS_BLI_STALE_INODE; 672f593bf14SDave Chinner bp->b_flags |= _XBF_INODES; 67361fe135cSDave Chinner xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF); 6741da177e4SLinus Torvalds } 6751da177e4SLinus Torvalds 6761da177e4SLinus Torvalds /* 6771da177e4SLinus Torvalds * Mark the buffer as being one which contains newly allocated 6781da177e4SLinus Torvalds * inodes. We need to make sure that even if this buffer is 6791da177e4SLinus Torvalds * relogged as an 'inode buf' we still recover all of the inode 6801da177e4SLinus Torvalds * images in the face of a crash. This works in coordination with 6811da177e4SLinus Torvalds * xfs_buf_item_committed() to ensure that the buffer remains in the 6821da177e4SLinus Torvalds * AIL at its original location even after it has been relogged. 6831da177e4SLinus Torvalds */ 6841da177e4SLinus Torvalds /* ARGSUSED */ 6851da177e4SLinus Torvalds void 6861da177e4SLinus Torvalds xfs_trans_inode_alloc_buf( 6871da177e4SLinus Torvalds xfs_trans_t *tp, 688e8222613SDave Chinner struct xfs_buf *bp) 6891da177e4SLinus Torvalds { 690fb1755a6SCarlos Maiolino struct xfs_buf_log_item *bip = bp->b_log_item; 6911da177e4SLinus Torvalds 692bf9d9013SChristoph Hellwig ASSERT(bp->b_transp == tp); 693adadbeefSChristoph Hellwig ASSERT(bip != NULL); 6941da177e4SLinus Torvalds ASSERT(atomic_read(&bip->bli_refcount) > 0); 6951da177e4SLinus Torvalds 6961da177e4SLinus Torvalds bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF; 697f593bf14SDave Chinner bp->b_flags |= _XBF_INODES; 69861fe135cSDave Chinner xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF); 6991da177e4SLinus Torvalds } 7001da177e4SLinus Torvalds 701ee1a47abSChristoph Hellwig /* 7028dc518dfSBrian Foster * Mark the buffer as ordered for this transaction. This means that the contents 7038dc518dfSBrian Foster * of the buffer are not recorded in the transaction but it is tracked in the 7048dc518dfSBrian Foster * AIL as though it was. This allows us to record logical changes in 7058dc518dfSBrian Foster * transactions rather than the physical changes we make to the buffer without 7068dc518dfSBrian Foster * changing writeback ordering constraints of metadata buffers. 7075f6bed76SDave Chinner */ 708a5814bceSBrian Foster bool 7095f6bed76SDave Chinner xfs_trans_ordered_buf( 7105f6bed76SDave Chinner struct xfs_trans *tp, 7115f6bed76SDave Chinner struct xfs_buf *bp) 7125f6bed76SDave Chinner { 713fb1755a6SCarlos Maiolino struct xfs_buf_log_item *bip = bp->b_log_item; 7145f6bed76SDave Chinner 7155f6bed76SDave Chinner ASSERT(bp->b_transp == tp); 7165f6bed76SDave Chinner ASSERT(bip != NULL); 7175f6bed76SDave Chinner ASSERT(atomic_read(&bip->bli_refcount) > 0); 718a5814bceSBrian Foster 719a5814bceSBrian Foster if (xfs_buf_item_dirty_format(bip)) 720a5814bceSBrian Foster return false; 7215f6bed76SDave Chinner 7225f6bed76SDave Chinner bip->bli_flags |= XFS_BLI_ORDERED; 7235f6bed76SDave Chinner trace_xfs_buf_item_ordered(bip); 7248dc518dfSBrian Foster 7258dc518dfSBrian Foster /* 7268dc518dfSBrian Foster * We don't log a dirty range of an ordered buffer but it still needs 7278dc518dfSBrian Foster * to be marked dirty and that it has been logged. 7288dc518dfSBrian Foster */ 7298dc518dfSBrian Foster xfs_trans_dirty_buf(tp, bp); 730a5814bceSBrian Foster return true; 7315f6bed76SDave Chinner } 7325f6bed76SDave Chinner 7335f6bed76SDave Chinner /* 734ee1a47abSChristoph Hellwig * Set the type of the buffer for log recovery so that it can correctly identify 735ee1a47abSChristoph Hellwig * and hence attach the correct buffer ops to the buffer after replay. 736ee1a47abSChristoph Hellwig */ 737ee1a47abSChristoph Hellwig void 738ee1a47abSChristoph Hellwig xfs_trans_buf_set_type( 739ee1a47abSChristoph Hellwig struct xfs_trans *tp, 740ee1a47abSChristoph Hellwig struct xfs_buf *bp, 74161fe135cSDave Chinner enum xfs_blft type) 742ee1a47abSChristoph Hellwig { 743fb1755a6SCarlos Maiolino struct xfs_buf_log_item *bip = bp->b_log_item; 744ee1a47abSChristoph Hellwig 745d75afeb3SDave Chinner if (!tp) 746d75afeb3SDave Chinner return; 747d75afeb3SDave Chinner 748ee1a47abSChristoph Hellwig ASSERT(bp->b_transp == tp); 749ee1a47abSChristoph Hellwig ASSERT(bip != NULL); 750ee1a47abSChristoph Hellwig ASSERT(atomic_read(&bip->bli_refcount) > 0); 751ee1a47abSChristoph Hellwig 75261fe135cSDave Chinner xfs_blft_to_flags(&bip->__bli_format, type); 753ee1a47abSChristoph Hellwig } 7541da177e4SLinus Torvalds 755d75afeb3SDave Chinner void 756d75afeb3SDave Chinner xfs_trans_buf_copy_type( 757d75afeb3SDave Chinner struct xfs_buf *dst_bp, 758d75afeb3SDave Chinner struct xfs_buf *src_bp) 759d75afeb3SDave Chinner { 760fb1755a6SCarlos Maiolino struct xfs_buf_log_item *sbip = src_bp->b_log_item; 761fb1755a6SCarlos Maiolino struct xfs_buf_log_item *dbip = dst_bp->b_log_item; 76261fe135cSDave Chinner enum xfs_blft type; 763d75afeb3SDave Chinner 76461fe135cSDave Chinner type = xfs_blft_from_flags(&sbip->__bli_format); 76561fe135cSDave Chinner xfs_blft_to_flags(&dbip->__bli_format, type); 766d75afeb3SDave Chinner } 767d75afeb3SDave Chinner 7681da177e4SLinus Torvalds /* 7691da177e4SLinus Torvalds * Similar to xfs_trans_inode_buf(), this marks the buffer as a cluster of 7701da177e4SLinus Torvalds * dquots. However, unlike in inode buffer recovery, dquot buffers get 7711da177e4SLinus Torvalds * recovered in their entirety. (Hence, no XFS_BLI_DQUOT_ALLOC_BUF flag). 7721da177e4SLinus Torvalds * The only thing that makes dquot buffers different from regular 7731da177e4SLinus Torvalds * buffers is that we must not replay dquot bufs when recovering 7741da177e4SLinus Torvalds * if a _corresponding_ quotaoff has happened. We also have to distinguish 7751da177e4SLinus Torvalds * between usr dquot bufs and grp dquot bufs, because usr and grp quotas 7761da177e4SLinus Torvalds * can be turned off independently. 7771da177e4SLinus Torvalds */ 7781da177e4SLinus Torvalds /* ARGSUSED */ 7791da177e4SLinus Torvalds void 7801da177e4SLinus Torvalds xfs_trans_dquot_buf( 7811da177e4SLinus Torvalds xfs_trans_t *tp, 782e8222613SDave Chinner struct xfs_buf *bp, 7831da177e4SLinus Torvalds uint type) 7841da177e4SLinus Torvalds { 785fb1755a6SCarlos Maiolino struct xfs_buf_log_item *bip = bp->b_log_item; 78661fe135cSDave Chinner 787c1155410SDave Chinner ASSERT(type == XFS_BLF_UDQUOT_BUF || 788c1155410SDave Chinner type == XFS_BLF_PDQUOT_BUF || 789c1155410SDave Chinner type == XFS_BLF_GDQUOT_BUF); 7901da177e4SLinus Torvalds 79161fe135cSDave Chinner bip->__bli_format.blf_flags |= type; 79261fe135cSDave Chinner 79361fe135cSDave Chinner switch (type) { 79461fe135cSDave Chinner case XFS_BLF_UDQUOT_BUF: 79561fe135cSDave Chinner type = XFS_BLFT_UDQUOT_BUF; 79661fe135cSDave Chinner break; 79761fe135cSDave Chinner case XFS_BLF_PDQUOT_BUF: 79861fe135cSDave Chinner type = XFS_BLFT_PDQUOT_BUF; 79961fe135cSDave Chinner break; 80061fe135cSDave Chinner case XFS_BLF_GDQUOT_BUF: 80161fe135cSDave Chinner type = XFS_BLFT_GDQUOT_BUF; 80261fe135cSDave Chinner break; 80361fe135cSDave Chinner default: 80461fe135cSDave Chinner type = XFS_BLFT_UNKNOWN_BUF; 80561fe135cSDave Chinner break; 80661fe135cSDave Chinner } 80761fe135cSDave Chinner 8080c7e5afbSDave Chinner bp->b_flags |= _XBF_DQUOTS; 809ee1a47abSChristoph Hellwig xfs_trans_buf_set_type(tp, bp, type); 8101da177e4SLinus Torvalds } 811