xref: /linux/fs/xfs/libxfs/xfs_group.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2018 Red Hat, Inc.
4  */
5 
6 #include "xfs.h"
7 #include "xfs_shared.h"
8 #include "xfs_format.h"
9 #include "xfs_trans_resv.h"
10 #include "xfs_mount.h"
11 #include "xfs_error.h"
12 #include "xfs_trace.h"
13 #include "xfs_extent_busy.h"
14 #include "xfs_group.h"
15 
16 /*
17  * Groups can have passive and active references.
18  *
19  * For passive references the code freeing a group is responsible for cleaning
20  * up objects that hold the passive references (e.g. cached buffers).
21  * Routines manipulating passive references are xfs_group_get, xfs_group_hold
22  * and xfs_group_put.
23  *
24  * Active references are for short term access to the group for walking trees or
25  * accessing state. If a group is being shrunk or offlined, the lookup will fail
26  * to find that group and return NULL instead.
27  * Routines manipulating active references are xfs_group_grab and
28  * xfs_group_rele.
29  */
30 
31 struct xfs_group *
32 xfs_group_get(
33 	struct xfs_mount	*mp,
34 	uint32_t		index,
35 	enum xfs_group_type	type)
36 {
37 	struct xfs_group	*xg;
38 
39 	rcu_read_lock();
40 	xg = xa_load(&mp->m_groups[type].xa, index);
41 	if (xg) {
42 		trace_xfs_group_get(xg, _RET_IP_);
43 		ASSERT(atomic_read(&xg->xg_ref) >= 0);
44 		atomic_inc(&xg->xg_ref);
45 	}
46 	rcu_read_unlock();
47 	return xg;
48 }
49 
50 struct xfs_group *
51 xfs_group_hold(
52 	struct xfs_group	*xg)
53 {
54 	ASSERT(atomic_read(&xg->xg_ref) > 0 ||
55 	       atomic_read(&xg->xg_active_ref) > 0);
56 
57 	trace_xfs_group_hold(xg, _RET_IP_);
58 	atomic_inc(&xg->xg_ref);
59 	return xg;
60 }
61 
62 void
63 xfs_group_put(
64 	struct xfs_group	*xg)
65 {
66 	trace_xfs_group_put(xg, _RET_IP_);
67 
68 	ASSERT(atomic_read(&xg->xg_ref) > 0);
69 	atomic_dec(&xg->xg_ref);
70 }
71 
72 struct xfs_group *
73 xfs_group_grab(
74 	struct xfs_mount	*mp,
75 	uint32_t		index,
76 	enum xfs_group_type	type)
77 {
78 	struct xfs_group	*xg;
79 
80 	rcu_read_lock();
81 	xg = xa_load(&mp->m_groups[type].xa, index);
82 	if (xg) {
83 		trace_xfs_group_grab(xg, _RET_IP_);
84 		if (!atomic_inc_not_zero(&xg->xg_active_ref))
85 			xg = NULL;
86 	}
87 	rcu_read_unlock();
88 	return xg;
89 }
90 
91 /*
92  * Iterate to the next group.  To start the iteration at @start_index, a %NULL
93  * @xg is passed, else the previous group returned from this function.  The
94  * caller should break out of the loop when this returns %NULL.  If the caller
95  * wants to break out of a loop that did not finish it needs to release the
96  * active reference to @xg using xfs_group_rele() itself.
97  */
98 struct xfs_group *
99 xfs_group_next_range(
100 	struct xfs_mount	*mp,
101 	struct xfs_group	*xg,
102 	uint32_t		start_index,
103 	uint32_t		end_index,
104 	enum xfs_group_type	type)
105 {
106 	uint32_t		index = start_index;
107 
108 	if (xg) {
109 		index = xg->xg_gno + 1;
110 		xfs_group_rele(xg);
111 	}
112 	if (index > end_index)
113 		return NULL;
114 	return xfs_group_grab(mp, index, type);
115 }
116 
117 /*
118  * Find the next group after @xg, or the first group if @xg is NULL.
119  */
120 struct xfs_group *
121 xfs_group_grab_next_mark(
122 	struct xfs_mount	*mp,
123 	struct xfs_group	*xg,
124 	xa_mark_t		mark,
125 	enum xfs_group_type	type)
126 {
127 	unsigned long		index = 0;
128 
129 	if (xg) {
130 		index = xg->xg_gno + 1;
131 		xfs_group_rele(xg);
132 	}
133 
134 	rcu_read_lock();
135 	xg = xa_find(&mp->m_groups[type].xa, &index, ULONG_MAX, mark);
136 	if (xg) {
137 		trace_xfs_group_grab_next_tag(xg, _RET_IP_);
138 		if (!atomic_inc_not_zero(&xg->xg_active_ref))
139 			xg = NULL;
140 	}
141 	rcu_read_unlock();
142 	return xg;
143 }
144 
145 void
146 xfs_group_rele(
147 	struct xfs_group	*xg)
148 {
149 	trace_xfs_group_rele(xg, _RET_IP_);
150 	atomic_dec(&xg->xg_active_ref);
151 }
152 
153 void
154 xfs_group_free(
155 	struct xfs_mount	*mp,
156 	uint32_t		index,
157 	enum xfs_group_type	type,
158 	void			(*uninit)(struct xfs_group *xg))
159 {
160 	struct xfs_group	*xg = xa_erase(&mp->m_groups[type].xa, index);
161 
162 	XFS_IS_CORRUPT(mp, atomic_read(&xg->xg_ref) != 0);
163 
164 	xfs_defer_drain_free(&xg->xg_intents_drain);
165 #ifdef __KERNEL__
166 	kfree(xg->xg_busy_extents);
167 #endif
168 
169 	if (uninit)
170 		uninit(xg);
171 
172 	/* drop the mount's active reference */
173 	xfs_group_rele(xg);
174 	XFS_IS_CORRUPT(mp, atomic_read(&xg->xg_active_ref) != 0);
175 	kfree_rcu_mightsleep(xg);
176 }
177 
178 int
179 xfs_group_insert(
180 	struct xfs_mount	*mp,
181 	struct xfs_group	*xg,
182 	uint32_t		index,
183 	enum xfs_group_type	type)
184 {
185 	int			error;
186 
187 	xg->xg_mount = mp;
188 	xg->xg_gno = index;
189 	xg->xg_type = type;
190 
191 #ifdef __KERNEL__
192 	xg->xg_busy_extents = xfs_extent_busy_alloc();
193 	if (!xg->xg_busy_extents)
194 		return -ENOMEM;
195 	spin_lock_init(&xg->xg_state_lock);
196 	xfs_hooks_init(&xg->xg_rmap_update_hooks);
197 #endif
198 	xfs_defer_drain_init(&xg->xg_intents_drain);
199 
200 	/* Active ref owned by mount indicates group is online. */
201 	atomic_set(&xg->xg_active_ref, 1);
202 
203 	error = xa_insert(&mp->m_groups[type].xa, index, xg, GFP_KERNEL);
204 	if (error) {
205 		WARN_ON_ONCE(error == -EBUSY);
206 		goto out_drain;
207 	}
208 
209 	return 0;
210 out_drain:
211 	xfs_defer_drain_free(&xg->xg_intents_drain);
212 #ifdef __KERNEL__
213 	kfree(xg->xg_busy_extents);
214 #endif
215 	return error;
216 }
217 
218 struct xfs_group *
219 xfs_group_get_by_fsb(
220 	struct xfs_mount	*mp,
221 	xfs_fsblock_t		fsbno,
222 	enum xfs_group_type	type)
223 {
224 	return xfs_group_get(mp, xfs_fsb_to_gno(mp, fsbno, type), type);
225 }
226