xref: /linux/fs/xfs/libxfs/xfs_group.c (revision a8a9fd042e0995ed63d33f507c26baf56031e581)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2018 Red Hat, Inc.
4  */
5 
6 #include "xfs.h"
7 #include "xfs_shared.h"
8 #include "xfs_format.h"
9 #include "xfs_trans_resv.h"
10 #include "xfs_mount.h"
11 #include "xfs_error.h"
12 #include "xfs_trace.h"
13 #include "xfs_extent_busy.h"
14 #include "xfs_group.h"
15 
16 /*
17  * Groups can have passive and active references.
18  *
19  * For passive references the code freeing a group is responsible for cleaning
20  * up objects that hold the passive references (e.g. cached buffers).
21  * Routines manipulating passive references are xfs_group_get, xfs_group_hold
22  * and xfs_group_put.
23  *
24  * Active references are for short term access to the group for walking trees or
25  * accessing state. If a group is being shrunk or offlined, the lookup will fail
26  * to find that group and return NULL instead.
27  * Routines manipulating active references are xfs_group_grab and
28  * xfs_group_rele.
29  */
30 
31 struct xfs_group *
32 xfs_group_get(
33 	struct xfs_mount	*mp,
34 	uint32_t		index,
35 	enum xfs_group_type	type)
36 {
37 	struct xfs_group	*xg;
38 
39 	rcu_read_lock();
40 	xg = xa_load(&mp->m_groups[type].xa, index);
41 	if (xg) {
42 		trace_xfs_group_get(xg, _RET_IP_);
43 		ASSERT(atomic_read(&xg->xg_ref) >= 0);
44 		atomic_inc(&xg->xg_ref);
45 	}
46 	rcu_read_unlock();
47 	return xg;
48 }
49 
50 struct xfs_group *
51 xfs_group_hold(
52 	struct xfs_group	*xg)
53 {
54 	ASSERT(atomic_read(&xg->xg_ref) > 0 ||
55 	       atomic_read(&xg->xg_active_ref) > 0);
56 
57 	trace_xfs_group_hold(xg, _RET_IP_);
58 	atomic_inc(&xg->xg_ref);
59 	return xg;
60 }
61 
62 void
63 xfs_group_put(
64 	struct xfs_group	*xg)
65 {
66 	trace_xfs_group_put(xg, _RET_IP_);
67 
68 	ASSERT(atomic_read(&xg->xg_ref) > 0);
69 	atomic_dec(&xg->xg_ref);
70 }
71 
72 struct xfs_group *
73 xfs_group_grab(
74 	struct xfs_mount	*mp,
75 	uint32_t		index,
76 	enum xfs_group_type	type)
77 {
78 	struct xfs_group	*xg;
79 
80 	rcu_read_lock();
81 	xg = xa_load(&mp->m_groups[type].xa, index);
82 	if (xg) {
83 		trace_xfs_group_grab(xg, _RET_IP_);
84 		if (!atomic_inc_not_zero(&xg->xg_active_ref))
85 			xg = NULL;
86 	}
87 	rcu_read_unlock();
88 	return xg;
89 }
90 
91 /*
92  * Iterate to the next group.  To start the iteration at @start_index, a %NULL
93  * @xg is passed, else the previous group returned from this function.  The
94  * caller should break out of the loop when this returns %NULL.  If the caller
95  * wants to break out of a loop that did not finish it needs to release the
96  * active reference to @xg using xfs_group_rele() itself.
97  */
98 struct xfs_group *
99 xfs_group_next_range(
100 	struct xfs_mount	*mp,
101 	struct xfs_group	*xg,
102 	uint32_t		start_index,
103 	uint32_t		end_index,
104 	enum xfs_group_type	type)
105 {
106 	uint32_t		index = start_index;
107 
108 	if (xg) {
109 		index = xg->xg_gno + 1;
110 		xfs_group_rele(xg);
111 	}
112 	if (index > end_index)
113 		return NULL;
114 	return xfs_group_grab(mp, index, type);
115 }
116 
117 /*
118  * Find the next group after @xg, or the first group if @xg is NULL.
119  */
120 struct xfs_group *
121 xfs_group_grab_next_mark(
122 	struct xfs_mount	*mp,
123 	struct xfs_group	*xg,
124 	xa_mark_t		mark,
125 	enum xfs_group_type	type)
126 {
127 	unsigned long		index = 0;
128 
129 	if (xg) {
130 		index = xg->xg_gno + 1;
131 		xfs_group_rele(xg);
132 	}
133 
134 	rcu_read_lock();
135 	xg = xa_find(&mp->m_groups[type].xa, &index, ULONG_MAX, mark);
136 	if (xg) {
137 		trace_xfs_group_grab_next_tag(xg, _RET_IP_);
138 		if (!atomic_inc_not_zero(&xg->xg_active_ref))
139 			xg = NULL;
140 	}
141 	rcu_read_unlock();
142 	return xg;
143 }
144 
145 void
146 xfs_group_rele(
147 	struct xfs_group	*xg)
148 {
149 	trace_xfs_group_rele(xg, _RET_IP_);
150 	atomic_dec(&xg->xg_active_ref);
151 }
152 
153 void
154 xfs_group_free(
155 	struct xfs_mount	*mp,
156 	uint32_t		index,
157 	enum xfs_group_type	type,
158 	void			(*uninit)(struct xfs_group *xg))
159 {
160 	struct xfs_group	*xg = xa_erase(&mp->m_groups[type].xa, index);
161 
162 	XFS_IS_CORRUPT(mp, atomic_read(&xg->xg_ref) != 0);
163 
164 	xfs_defer_drain_free(&xg->xg_intents_drain);
165 #ifdef __KERNEL__
166 	if (xfs_group_has_extent_busy(xg->xg_mount, xg->xg_type))
167 		kfree(xg->xg_busy_extents);
168 #endif
169 
170 	if (uninit)
171 		uninit(xg);
172 
173 	/* drop the mount's active reference */
174 	xfs_group_rele(xg);
175 	XFS_IS_CORRUPT(mp, atomic_read(&xg->xg_active_ref) != 0);
176 	kfree_rcu_mightsleep(xg);
177 }
178 
179 int
180 xfs_group_insert(
181 	struct xfs_mount	*mp,
182 	struct xfs_group	*xg,
183 	uint32_t		index,
184 	enum xfs_group_type	type)
185 {
186 	int			error;
187 
188 	xg->xg_mount = mp;
189 	xg->xg_gno = index;
190 	xg->xg_type = type;
191 
192 #ifdef __KERNEL__
193 	if (xfs_group_has_extent_busy(mp, type)) {
194 		xg->xg_busy_extents = xfs_extent_busy_alloc();
195 		if (!xg->xg_busy_extents)
196 			return -ENOMEM;
197 	}
198 	spin_lock_init(&xg->xg_state_lock);
199 	xfs_hooks_init(&xg->xg_rmap_update_hooks);
200 #endif
201 	xfs_defer_drain_init(&xg->xg_intents_drain);
202 
203 	/* Active ref owned by mount indicates group is online. */
204 	atomic_set(&xg->xg_active_ref, 1);
205 
206 	error = xa_insert(&mp->m_groups[type].xa, index, xg, GFP_KERNEL);
207 	if (error) {
208 		WARN_ON_ONCE(error == -EBUSY);
209 		goto out_drain;
210 	}
211 
212 	return 0;
213 out_drain:
214 	xfs_defer_drain_free(&xg->xg_intents_drain);
215 #ifdef __KERNEL__
216 	if (xfs_group_has_extent_busy(xg->xg_mount, xg->xg_type))
217 		kfree(xg->xg_busy_extents);
218 #endif
219 	return error;
220 }
221 
222 struct xfs_group *
223 xfs_group_get_by_fsb(
224 	struct xfs_mount	*mp,
225 	xfs_fsblock_t		fsbno,
226 	enum xfs_group_type	type)
227 {
228 	return xfs_group_get(mp, xfs_fsb_to_gno(mp, fsbno, type), type);
229 }
230