xref: /linux/fs/xfs/libxfs/xfs_btree_mem.c (revision e814f3fd16acfb7f9966773953de8f740a1e3202)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2021-2024 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <djwong@kernel.org>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_trans.h"
14 #include "xfs_btree.h"
15 #include "xfs_error.h"
16 #include "xfs_buf_mem.h"
17 #include "xfs_btree_mem.h"
18 #include "xfs_ag.h"
19 #include "xfs_buf_item.h"
20 #include "xfs_trace.h"
21 #include "xfs_rtgroup.h"
22 
23 /* Set the root of an in-memory btree. */
24 void
25 xfbtree_set_root(
26 	struct xfs_btree_cur		*cur,
27 	const union xfs_btree_ptr	*ptr,
28 	int				inc)
29 {
30 	ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_MEM);
31 
32 	cur->bc_mem.xfbtree->root = *ptr;
33 	cur->bc_mem.xfbtree->nlevels += inc;
34 }
35 
36 /* Initialize a pointer from the in-memory btree header. */
37 void
38 xfbtree_init_ptr_from_cur(
39 	struct xfs_btree_cur		*cur,
40 	union xfs_btree_ptr		*ptr)
41 {
42 	ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_MEM);
43 
44 	*ptr = cur->bc_mem.xfbtree->root;
45 }
46 
47 /* Duplicate an in-memory btree cursor. */
48 struct xfs_btree_cur *
49 xfbtree_dup_cursor(
50 	struct xfs_btree_cur		*cur)
51 {
52 	struct xfs_btree_cur		*ncur;
53 
54 	ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_MEM);
55 
56 	ncur = xfs_btree_alloc_cursor(cur->bc_mp, cur->bc_tp, cur->bc_ops,
57 			cur->bc_maxlevels, cur->bc_cache);
58 	ncur->bc_flags = cur->bc_flags;
59 	ncur->bc_nlevels = cur->bc_nlevels;
60 	ncur->bc_mem.xfbtree = cur->bc_mem.xfbtree;
61 	if (cur->bc_group)
62 		ncur->bc_group = xfs_group_hold(cur->bc_group);
63 	return ncur;
64 }
65 
66 /* Close the btree xfile and release all resources. */
67 void
68 xfbtree_destroy(
69 	struct xfbtree		*xfbt)
70 {
71 	xfs_buftarg_drain(xfbt->target);
72 }
73 
74 /* Compute the number of bytes available for records. */
75 static inline unsigned int
76 xfbtree_rec_bytes(
77 	struct xfs_mount		*mp,
78 	const struct xfs_btree_ops	*ops)
79 {
80 	return XMBUF_BLOCKSIZE - XFS_BTREE_LBLOCK_CRC_LEN;
81 }
82 
83 /* Initialize an empty leaf block as the btree root. */
84 STATIC int
85 xfbtree_init_leaf_block(
86 	struct xfs_mount		*mp,
87 	struct xfbtree			*xfbt,
88 	const struct xfs_btree_ops	*ops)
89 {
90 	struct xfs_buf			*bp;
91 	xfbno_t				bno = xfbt->highest_bno++;
92 	int				error;
93 
94 	error = xfs_buf_get(xfbt->target, xfbno_to_daddr(bno), XFBNO_BBSIZE,
95 			&bp);
96 	if (error)
97 		return error;
98 
99 	trace_xfbtree_create_root_buf(xfbt, bp);
100 
101 	bp->b_ops = ops->buf_ops;
102 	xfs_btree_init_buf(mp, bp, ops, 0, 0, xfbt->owner);
103 	xfs_buf_relse(bp);
104 
105 	xfbt->root.l = cpu_to_be64(bno);
106 	return 0;
107 }
108 
109 /*
110  * Create an in-memory btree root that can be used with the given xmbuf.
111  * Callers must set xfbt->owner.
112  */
113 int
114 xfbtree_init(
115 	struct xfs_mount		*mp,
116 	struct xfbtree			*xfbt,
117 	struct xfs_buftarg		*btp,
118 	const struct xfs_btree_ops	*ops)
119 {
120 	unsigned int			blocklen = xfbtree_rec_bytes(mp, ops);
121 	unsigned int			keyptr_len;
122 	int				error;
123 
124 	/* Requires a long-format CRC-format btree */
125 	if (!xfs_has_crc(mp)) {
126 		ASSERT(xfs_has_crc(mp));
127 		return -EINVAL;
128 	}
129 	if (ops->ptr_len != XFS_BTREE_LONG_PTR_LEN) {
130 		ASSERT(ops->ptr_len == XFS_BTREE_LONG_PTR_LEN);
131 		return -EINVAL;
132 	}
133 
134 	memset(xfbt, 0, sizeof(*xfbt));
135 	xfbt->target = btp;
136 
137 	/* Set up min/maxrecs for this btree. */
138 	keyptr_len = ops->key_len + sizeof(__be64);
139 	xfbt->maxrecs[0] = blocklen / ops->rec_len;
140 	xfbt->maxrecs[1] = blocklen / keyptr_len;
141 	xfbt->minrecs[0] = xfbt->maxrecs[0] / 2;
142 	xfbt->minrecs[1] = xfbt->maxrecs[1] / 2;
143 	xfbt->highest_bno = 0;
144 	xfbt->nlevels = 1;
145 
146 	/* Initialize the empty btree. */
147 	error = xfbtree_init_leaf_block(mp, xfbt, ops);
148 	if (error)
149 		goto err_freesp;
150 
151 	trace_xfbtree_init(mp, xfbt, ops);
152 
153 	return 0;
154 
155 err_freesp:
156 	xfs_buftarg_drain(xfbt->target);
157 	return error;
158 }
159 
160 /* Allocate a block to our in-memory btree. */
161 int
162 xfbtree_alloc_block(
163 	struct xfs_btree_cur		*cur,
164 	const union xfs_btree_ptr	*start,
165 	union xfs_btree_ptr		*new,
166 	int				*stat)
167 {
168 	struct xfbtree			*xfbt = cur->bc_mem.xfbtree;
169 	xfbno_t				bno = xfbt->highest_bno++;
170 
171 	ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_MEM);
172 
173 	trace_xfbtree_alloc_block(xfbt, cur, bno);
174 
175 	/* Fail if the block address exceeds the maximum for the buftarg. */
176 	if (!xfbtree_verify_bno(xfbt, bno)) {
177 		ASSERT(xfbtree_verify_bno(xfbt, bno));
178 		*stat = 0;
179 		return 0;
180 	}
181 
182 	new->l = cpu_to_be64(bno);
183 	*stat = 1;
184 	return 0;
185 }
186 
187 /* Free a block from our in-memory btree. */
188 int
189 xfbtree_free_block(
190 	struct xfs_btree_cur	*cur,
191 	struct xfs_buf		*bp)
192 {
193 	struct xfbtree		*xfbt = cur->bc_mem.xfbtree;
194 	xfs_daddr_t		daddr = xfs_buf_daddr(bp);
195 	xfbno_t			bno = xfs_daddr_to_xfbno(daddr);
196 
197 	ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_MEM);
198 
199 	trace_xfbtree_free_block(xfbt, cur, bno);
200 
201 	if (bno + 1 == xfbt->highest_bno)
202 		xfbt->highest_bno--;
203 
204 	return 0;
205 }
206 
207 /* Return the minimum number of records for a btree block. */
208 int
209 xfbtree_get_minrecs(
210 	struct xfs_btree_cur	*cur,
211 	int			level)
212 {
213 	struct xfbtree		*xfbt = cur->bc_mem.xfbtree;
214 
215 	return xfbt->minrecs[level != 0];
216 }
217 
218 /* Return the maximum number of records for a btree block. */
219 int
220 xfbtree_get_maxrecs(
221 	struct xfs_btree_cur	*cur,
222 	int			level)
223 {
224 	struct xfbtree		*xfbt = cur->bc_mem.xfbtree;
225 
226 	return xfbt->maxrecs[level != 0];
227 }
228 
229 /* If this log item is a buffer item that came from the xfbtree, return it. */
230 static inline struct xfs_buf *
231 xfbtree_buf_match(
232 	struct xfbtree			*xfbt,
233 	const struct xfs_log_item	*lip)
234 {
235 	const struct xfs_buf_log_item	*bli;
236 	struct xfs_buf			*bp;
237 
238 	if (lip->li_type != XFS_LI_BUF)
239 		return NULL;
240 
241 	bli = container_of(lip, struct xfs_buf_log_item, bli_item);
242 	bp = bli->bli_buf;
243 	if (bp->b_target != xfbt->target)
244 		return NULL;
245 
246 	return bp;
247 }
248 
249 /*
250  * Commit changes to the incore btree immediately by writing all dirty xfbtree
251  * buffers to the backing xfile.  This detaches all xfbtree buffers from the
252  * transaction, even on failure.  The buffer locks are dropped between the
253  * delwri queue and submit, so the caller must synchronize btree access.
254  *
255  * Normally we'd let the buffers commit with the transaction and get written to
256  * the xfile via the log, but online repair stages ephemeral btrees in memory
257  * and uses the btree_staging functions to write new btrees to disk atomically.
258  * The in-memory btree (and its backing store) are discarded at the end of the
259  * repair phase, which means that xfbtree buffers cannot commit with the rest
260  * of a transaction.
261  *
262  * In other words, online repair only needs the transaction to collect buffer
263  * pointers and to avoid buffer deadlocks, not to guarantee consistency of
264  * updates.
265  */
266 int
267 xfbtree_trans_commit(
268 	struct xfbtree		*xfbt,
269 	struct xfs_trans	*tp)
270 {
271 	struct xfs_log_item	*lip, *n;
272 	bool			tp_dirty = false;
273 	int			error = 0;
274 
275 	/*
276 	 * For each xfbtree buffer attached to the transaction, write the dirty
277 	 * buffers to the xfile and release them.
278 	 */
279 	list_for_each_entry_safe(lip, n, &tp->t_items, li_trans) {
280 		struct xfs_buf	*bp = xfbtree_buf_match(xfbt, lip);
281 
282 		if (!bp) {
283 			if (test_bit(XFS_LI_DIRTY, &lip->li_flags))
284 				tp_dirty |= true;
285 			continue;
286 		}
287 
288 		trace_xfbtree_trans_commit_buf(xfbt, bp);
289 
290 		xmbuf_trans_bdetach(tp, bp);
291 
292 		/*
293 		 * If the buffer fails verification, note the failure but
294 		 * continue walking the transaction items so that we remove all
295 		 * ephemeral btree buffers.
296 		 */
297 		if (!error)
298 			error = xmbuf_finalize(bp);
299 
300 		xfs_buf_relse(bp);
301 	}
302 
303 	/*
304 	 * Reset the transaction's dirty flag to reflect the dirty state of the
305 	 * log items that are still attached.
306 	 */
307 	tp->t_flags = (tp->t_flags & ~XFS_TRANS_DIRTY) |
308 			(tp_dirty ? XFS_TRANS_DIRTY : 0);
309 
310 	return error;
311 }
312 
313 /*
314  * Cancel changes to the incore btree by detaching all the xfbtree buffers.
315  * Changes are not undone, so callers must not access the btree ever again.
316  */
317 void
318 xfbtree_trans_cancel(
319 	struct xfbtree		*xfbt,
320 	struct xfs_trans	*tp)
321 {
322 	struct xfs_log_item	*lip, *n;
323 	bool			tp_dirty = false;
324 
325 	list_for_each_entry_safe(lip, n, &tp->t_items, li_trans) {
326 		struct xfs_buf	*bp = xfbtree_buf_match(xfbt, lip);
327 
328 		if (!bp) {
329 			if (test_bit(XFS_LI_DIRTY, &lip->li_flags))
330 				tp_dirty |= true;
331 			continue;
332 		}
333 
334 		trace_xfbtree_trans_cancel_buf(xfbt, bp);
335 
336 		xmbuf_trans_bdetach(tp, bp);
337 		xfs_buf_relse(bp);
338 	}
339 
340 	/*
341 	 * Reset the transaction's dirty flag to reflect the dirty state of the
342 	 * log items that are still attached.
343 	 */
344 	tp->t_flags = (tp->t_flags & ~XFS_TRANS_DIRTY) |
345 			(tp_dirty ? XFS_TRANS_DIRTY : 0);
346 }
347