1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2022-2023 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <djwong@kernel.org> 5 */ 6 #ifndef XFS_DRAIN_H_ 7 #define XFS_DRAIN_H_ 8 9 struct xfs_perag; 10 11 #ifdef CONFIG_XFS_DRAIN_INTENTS 12 /* 13 * Passive drain mechanism. This data structure tracks a count of some items 14 * and contains a waitqueue for callers who would like to wake up when the 15 * count hits zero. 16 */ 17 struct xfs_defer_drain { 18 /* Number of items pending in some part of the filesystem. */ 19 atomic_t dr_count; 20 21 /* Queue to wait for dri_count to go to zero */ 22 struct wait_queue_head dr_waiters; 23 }; 24 25 void xfs_defer_drain_init(struct xfs_defer_drain *dr); 26 void xfs_defer_drain_free(struct xfs_defer_drain *dr); 27 28 void xfs_drain_wait_disable(void); 29 void xfs_drain_wait_enable(void); 30 31 /* 32 * Deferred Work Intent Drains 33 * =========================== 34 * 35 * When a writer thread executes a chain of log intent items, the AG header 36 * buffer locks will cycle during a transaction roll to get from one intent 37 * item to the next in a chain. Although scrub takes all AG header buffer 38 * locks, this isn't sufficient to guard against scrub checking an AG while 39 * that writer thread is in the middle of finishing a chain because there's no 40 * higher level locking primitive guarding allocation groups. 41 * 42 * When there's a collision, cross-referencing between data structures (e.g. 43 * rmapbt and refcountbt) yields false corruption events; if repair is running, 44 * this results in incorrect repairs, which is catastrophic. 45 * 46 * The solution is to the perag structure the count of active intents and make 47 * scrub wait until it has both AG header buffer locks and the intent counter 48 * reaches zero. It is therefore critical that deferred work threads hold the 49 * AGI or AGF buffers when decrementing the intent counter. 50 * 51 * Given a list of deferred work items, the deferred work manager will complete 52 * a work item and all the sub-items that the parent item creates before moving 53 * on to the next work item in the list. This is also true for all levels of 54 * sub-items. Writer threads are permitted to queue multiple work items 55 * targetting the same AG, so a deferred work item (such as a BUI) that creates 56 * sub-items (such as RUIs) must bump the intent counter and maintain it until 57 * the sub-items can themselves bump the intent counter. 58 * 59 * Therefore, the intent count tracks entire lifetimes of deferred work items. 60 * All functions that create work items must increment the intent counter as 61 * soon as the item is added to the transaction and cannot drop the counter 62 * until the item is finished or cancelled. 63 */ 64 struct xfs_perag *xfs_perag_intent_get(struct xfs_mount *mp, 65 xfs_agnumber_t agno); 66 void xfs_perag_intent_put(struct xfs_perag *pag); 67 68 void xfs_perag_intent_hold(struct xfs_perag *pag); 69 void xfs_perag_intent_rele(struct xfs_perag *pag); 70 71 int xfs_perag_intent_drain(struct xfs_perag *pag); 72 bool xfs_perag_intent_busy(struct xfs_perag *pag); 73 #else 74 struct xfs_defer_drain { /* empty */ }; 75 76 #define xfs_defer_drain_free(dr) ((void)0) 77 #define xfs_defer_drain_init(dr) ((void)0) 78 79 #define xfs_perag_intent_get(mp, agno) xfs_perag_get((mp), (agno)) 80 #define xfs_perag_intent_put(pag) xfs_perag_put(pag) 81 82 static inline void xfs_perag_intent_hold(struct xfs_perag *pag) { } 83 static inline void xfs_perag_intent_rele(struct xfs_perag *pag) { } 84 85 #endif /* CONFIG_XFS_DRAIN_INTENTS */ 86 87 #endif /* XFS_DRAIN_H_ */ 88