xref: /linux/fs/xfs/xfs_drain.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2022-2023 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <djwong@kernel.org>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_ag.h"
13 #include "xfs_trace.h"
14 
15 /*
16  * Use a static key here to reduce the overhead of xfs_drain_rele.  If the
17  * compiler supports jump labels, the static branch will be replaced by a nop
18  * sled when there are no xfs_drain_wait callers.  Online fsck is currently
19  * the only caller, so this is a reasonable tradeoff.
20  *
21  * Note: Patching the kernel code requires taking the cpu hotplug lock.  Other
22  * parts of the kernel allocate memory with that lock held, which means that
23  * XFS callers cannot hold any locks that might be used by memory reclaim or
24  * writeback when calling the static_branch_{inc,dec} functions.
25  */
26 static DEFINE_STATIC_KEY_FALSE(xfs_drain_waiter_gate);
27 
28 void
xfs_drain_wait_disable(void)29 xfs_drain_wait_disable(void)
30 {
31 	static_branch_dec(&xfs_drain_waiter_gate);
32 }
33 
34 void
xfs_drain_wait_enable(void)35 xfs_drain_wait_enable(void)
36 {
37 	static_branch_inc(&xfs_drain_waiter_gate);
38 }
39 
40 void
xfs_defer_drain_init(struct xfs_defer_drain * dr)41 xfs_defer_drain_init(
42 	struct xfs_defer_drain	*dr)
43 {
44 	atomic_set(&dr->dr_count, 0);
45 	init_waitqueue_head(&dr->dr_waiters);
46 }
47 
48 void
xfs_defer_drain_free(struct xfs_defer_drain * dr)49 xfs_defer_drain_free(struct xfs_defer_drain	*dr)
50 {
51 	ASSERT(atomic_read(&dr->dr_count) == 0);
52 }
53 
54 /* Increase the pending intent count. */
xfs_defer_drain_grab(struct xfs_defer_drain * dr)55 static inline void xfs_defer_drain_grab(struct xfs_defer_drain *dr)
56 {
57 	atomic_inc(&dr->dr_count);
58 }
59 
has_waiters(struct wait_queue_head * wq_head)60 static inline bool has_waiters(struct wait_queue_head *wq_head)
61 {
62 	/*
63 	 * This memory barrier is paired with the one in set_current_state on
64 	 * the waiting side.
65 	 */
66 	smp_mb__after_atomic();
67 	return waitqueue_active(wq_head);
68 }
69 
70 /* Decrease the pending intent count, and wake any waiters, if appropriate. */
xfs_defer_drain_rele(struct xfs_defer_drain * dr)71 static inline void xfs_defer_drain_rele(struct xfs_defer_drain *dr)
72 {
73 	if (atomic_dec_and_test(&dr->dr_count) &&
74 	    static_branch_unlikely(&xfs_drain_waiter_gate) &&
75 	    has_waiters(&dr->dr_waiters))
76 		wake_up(&dr->dr_waiters);
77 }
78 
79 /* Are there intents pending? */
xfs_defer_drain_busy(struct xfs_defer_drain * dr)80 static inline bool xfs_defer_drain_busy(struct xfs_defer_drain *dr)
81 {
82 	return atomic_read(&dr->dr_count) > 0;
83 }
84 
85 /*
86  * Wait for the pending intent count for a drain to hit zero.
87  *
88  * Callers must not hold any locks that would prevent intents from being
89  * finished.
90  */
xfs_defer_drain_wait(struct xfs_defer_drain * dr)91 static inline int xfs_defer_drain_wait(struct xfs_defer_drain *dr)
92 {
93 	return wait_event_killable(dr->dr_waiters, !xfs_defer_drain_busy(dr));
94 }
95 
96 /*
97  * Get a passive reference to the AG that contains a fsbno and declare an intent
98  * to update its metadata.
99  */
100 struct xfs_perag *
xfs_perag_intent_get(struct xfs_mount * mp,xfs_fsblock_t fsbno)101 xfs_perag_intent_get(
102 	struct xfs_mount	*mp,
103 	xfs_fsblock_t		fsbno)
104 {
105 	struct xfs_perag	*pag;
106 
107 	pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, fsbno));
108 	if (!pag)
109 		return NULL;
110 
111 	xfs_perag_intent_hold(pag);
112 	return pag;
113 }
114 
115 /*
116  * Release our intent to update this AG's metadata, and then release our
117  * passive ref to the AG.
118  */
119 void
xfs_perag_intent_put(struct xfs_perag * pag)120 xfs_perag_intent_put(
121 	struct xfs_perag	*pag)
122 {
123 	xfs_perag_intent_rele(pag);
124 	xfs_perag_put(pag);
125 }
126 
127 /*
128  * Declare an intent to update AG metadata.  Other threads that need exclusive
129  * access can decide to back off if they see declared intentions.
130  */
131 void
xfs_perag_intent_hold(struct xfs_perag * pag)132 xfs_perag_intent_hold(
133 	struct xfs_perag	*pag)
134 {
135 	trace_xfs_perag_intent_hold(pag, __return_address);
136 	xfs_defer_drain_grab(&pag->pag_intents_drain);
137 }
138 
139 /* Release our intent to update this AG's metadata. */
140 void
xfs_perag_intent_rele(struct xfs_perag * pag)141 xfs_perag_intent_rele(
142 	struct xfs_perag	*pag)
143 {
144 	trace_xfs_perag_intent_rele(pag, __return_address);
145 	xfs_defer_drain_rele(&pag->pag_intents_drain);
146 }
147 
148 /*
149  * Wait for the intent update count for this AG to hit zero.
150  * Callers must not hold any AG header buffers.
151  */
152 int
xfs_perag_intent_drain(struct xfs_perag * pag)153 xfs_perag_intent_drain(
154 	struct xfs_perag	*pag)
155 {
156 	trace_xfs_perag_wait_intents(pag, __return_address);
157 	return xfs_defer_drain_wait(&pag->pag_intents_drain);
158 }
159 
160 /* Has anyone declared an intent to update this AG? */
161 bool
xfs_perag_intent_busy(struct xfs_perag * pag)162 xfs_perag_intent_busy(
163 	struct xfs_perag	*pag)
164 {
165 	return xfs_defer_drain_busy(&pag->pag_intents_drain);
166 }
167