xref: /linux/fs/xfs/scrub/bitmap.c (revision 29e31a8ee811f5d85274f0381f13cd6fe650aea4)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2018-2023 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <djwong@kernel.org>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_bit.h"
10 #include "xfs_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_btree.h"
14 #include "scrub/scrub.h"
15 #include "scrub/bitmap.h"
16 
17 #include <linux/interval_tree_generic.h>
18 
19 struct xbitmap_node {
20 	struct rb_node	bn_rbnode;
21 
22 	/* First set bit of this interval and subtree. */
23 	uint64_t	bn_start;
24 
25 	/* Last set bit of this interval. */
26 	uint64_t	bn_last;
27 
28 	/* Last set bit of this subtree.  Do not touch this. */
29 	uint64_t	__bn_subtree_last;
30 };
31 
32 /* Define our own interval tree type with uint64_t parameters. */
33 
34 #define START(node) ((node)->bn_start)
35 #define LAST(node)  ((node)->bn_last)
36 
37 /*
38  * These functions are defined by the INTERVAL_TREE_DEFINE macro, but we'll
39  * forward-declare them anyway for clarity.
40  */
41 static inline void
42 xbitmap_tree_insert(struct xbitmap_node *node, struct rb_root_cached *root);
43 
44 static inline void
45 xbitmap_tree_remove(struct xbitmap_node *node, struct rb_root_cached *root);
46 
47 static inline struct xbitmap_node *
48 xbitmap_tree_iter_first(struct rb_root_cached *root, uint64_t start,
49 			uint64_t last);
50 
51 static inline struct xbitmap_node *
52 xbitmap_tree_iter_next(struct xbitmap_node *node, uint64_t start,
53 		       uint64_t last);
54 
55 INTERVAL_TREE_DEFINE(struct xbitmap_node, bn_rbnode, uint64_t,
56 		__bn_subtree_last, START, LAST, static inline, xbitmap_tree)
57 
58 /* Iterate each interval of a bitmap.  Do not change the bitmap. */
59 #define for_each_xbitmap_extent(bn, bitmap) \
60 	for ((bn) = rb_entry_safe(rb_first(&(bitmap)->xb_root.rb_root), \
61 				   struct xbitmap_node, bn_rbnode); \
62 	     (bn) != NULL; \
63 	     (bn) = rb_entry_safe(rb_next(&(bn)->bn_rbnode), \
64 				   struct xbitmap_node, bn_rbnode))
65 
66 /* Clear a range of this bitmap. */
67 int
68 xbitmap_clear(
69 	struct xbitmap		*bitmap,
70 	uint64_t		start,
71 	uint64_t		len)
72 {
73 	struct xbitmap_node	*bn;
74 	struct xbitmap_node	*new_bn;
75 	uint64_t		last = start + len - 1;
76 
77 	while ((bn = xbitmap_tree_iter_first(&bitmap->xb_root, start, last))) {
78 		if (bn->bn_start < start && bn->bn_last > last) {
79 			uint64_t	old_last = bn->bn_last;
80 
81 			/* overlaps with the entire clearing range */
82 			xbitmap_tree_remove(bn, &bitmap->xb_root);
83 			bn->bn_last = start - 1;
84 			xbitmap_tree_insert(bn, &bitmap->xb_root);
85 
86 			/* add an extent */
87 			new_bn = kmalloc(sizeof(struct xbitmap_node),
88 					XCHK_GFP_FLAGS);
89 			if (!new_bn)
90 				return -ENOMEM;
91 			new_bn->bn_start = last + 1;
92 			new_bn->bn_last = old_last;
93 			xbitmap_tree_insert(new_bn, &bitmap->xb_root);
94 		} else if (bn->bn_start < start) {
95 			/* overlaps with the left side of the clearing range */
96 			xbitmap_tree_remove(bn, &bitmap->xb_root);
97 			bn->bn_last = start - 1;
98 			xbitmap_tree_insert(bn, &bitmap->xb_root);
99 		} else if (bn->bn_last > last) {
100 			/* overlaps with the right side of the clearing range */
101 			xbitmap_tree_remove(bn, &bitmap->xb_root);
102 			bn->bn_start = last + 1;
103 			xbitmap_tree_insert(bn, &bitmap->xb_root);
104 			break;
105 		} else {
106 			/* in the middle of the clearing range */
107 			xbitmap_tree_remove(bn, &bitmap->xb_root);
108 			kfree(bn);
109 		}
110 	}
111 
112 	return 0;
113 }
114 
115 /* Set a range of this bitmap. */
116 int
117 xbitmap_set(
118 	struct xbitmap		*bitmap,
119 	uint64_t		start,
120 	uint64_t		len)
121 {
122 	struct xbitmap_node	*left;
123 	struct xbitmap_node	*right;
124 	uint64_t		last = start + len - 1;
125 	int			error;
126 
127 	/* Is this whole range already set? */
128 	left = xbitmap_tree_iter_first(&bitmap->xb_root, start, last);
129 	if (left && left->bn_start <= start && left->bn_last >= last)
130 		return 0;
131 
132 	/* Clear out everything in the range we want to set. */
133 	error = xbitmap_clear(bitmap, start, len);
134 	if (error)
135 		return error;
136 
137 	/* Do we have a left-adjacent extent? */
138 	left = xbitmap_tree_iter_first(&bitmap->xb_root, start - 1, start - 1);
139 	ASSERT(!left || left->bn_last + 1 == start);
140 
141 	/* Do we have a right-adjacent extent? */
142 	right = xbitmap_tree_iter_first(&bitmap->xb_root, last + 1, last + 1);
143 	ASSERT(!right || right->bn_start == last + 1);
144 
145 	if (left && right) {
146 		/* combine left and right adjacent extent */
147 		xbitmap_tree_remove(left, &bitmap->xb_root);
148 		xbitmap_tree_remove(right, &bitmap->xb_root);
149 		left->bn_last = right->bn_last;
150 		xbitmap_tree_insert(left, &bitmap->xb_root);
151 		kfree(right);
152 	} else if (left) {
153 		/* combine with left extent */
154 		xbitmap_tree_remove(left, &bitmap->xb_root);
155 		left->bn_last = last;
156 		xbitmap_tree_insert(left, &bitmap->xb_root);
157 	} else if (right) {
158 		/* combine with right extent */
159 		xbitmap_tree_remove(right, &bitmap->xb_root);
160 		right->bn_start = start;
161 		xbitmap_tree_insert(right, &bitmap->xb_root);
162 	} else {
163 		/* add an extent */
164 		left = kmalloc(sizeof(struct xbitmap_node), XCHK_GFP_FLAGS);
165 		if (!left)
166 			return -ENOMEM;
167 		left->bn_start = start;
168 		left->bn_last = last;
169 		xbitmap_tree_insert(left, &bitmap->xb_root);
170 	}
171 
172 	return 0;
173 }
174 
175 /* Free everything related to this bitmap. */
176 void
177 xbitmap_destroy(
178 	struct xbitmap		*bitmap)
179 {
180 	struct xbitmap_node	*bn;
181 
182 	while ((bn = xbitmap_tree_iter_first(&bitmap->xb_root, 0, -1ULL))) {
183 		xbitmap_tree_remove(bn, &bitmap->xb_root);
184 		kfree(bn);
185 	}
186 }
187 
188 /* Set up a per-AG block bitmap. */
189 void
190 xbitmap_init(
191 	struct xbitmap		*bitmap)
192 {
193 	bitmap->xb_root = RB_ROOT_CACHED;
194 }
195 
196 /*
197  * Remove all the blocks mentioned in @sub from the extents in @bitmap.
198  *
199  * The intent is that callers will iterate the rmapbt for all of its records
200  * for a given owner to generate @bitmap; and iterate all the blocks of the
201  * metadata structures that are not being rebuilt and have the same rmapbt
202  * owner to generate @sub.  This routine subtracts all the extents
203  * mentioned in sub from all the extents linked in @bitmap, which leaves
204  * @bitmap as the list of blocks that are not accounted for, which we assume
205  * are the dead blocks of the old metadata structure.  The blocks mentioned in
206  * @bitmap can be reaped.
207  *
208  * This is the logical equivalent of bitmap &= ~sub.
209  */
210 int
211 xbitmap_disunion(
212 	struct xbitmap		*bitmap,
213 	struct xbitmap		*sub)
214 {
215 	struct xbitmap_node	*bn;
216 	int			error;
217 
218 	if (xbitmap_empty(bitmap) || xbitmap_empty(sub))
219 		return 0;
220 
221 	for_each_xbitmap_extent(bn, sub) {
222 		error = xbitmap_clear(bitmap, bn->bn_start,
223 				bn->bn_last - bn->bn_start + 1);
224 		if (error)
225 			return error;
226 	}
227 
228 	return 0;
229 }
230 
231 /*
232  * Record all btree blocks seen while iterating all records of a btree.
233  *
234  * We know that the btree query_all function starts at the left edge and walks
235  * towards the right edge of the tree.  Therefore, we know that we can walk up
236  * the btree cursor towards the root; if the pointer for a given level points
237  * to the first record/key in that block, we haven't seen this block before;
238  * and therefore we need to remember that we saw this block in the btree.
239  *
240  * So if our btree is:
241  *
242  *    4
243  *  / | \
244  * 1  2  3
245  *
246  * Pretend for this example that each leaf block has 100 btree records.  For
247  * the first btree record, we'll observe that bc_levels[0].ptr == 1, so we
248  * record that we saw block 1.  Then we observe that bc_levels[1].ptr == 1, so
249  * we record block 4.  The list is [1, 4].
250  *
251  * For the second btree record, we see that bc_levels[0].ptr == 2, so we exit
252  * the loop.  The list remains [1, 4].
253  *
254  * For the 101st btree record, we've moved onto leaf block 2.  Now
255  * bc_levels[0].ptr == 1 again, so we record that we saw block 2.  We see that
256  * bc_levels[1].ptr == 2, so we exit the loop.  The list is now [1, 4, 2].
257  *
258  * For the 102nd record, bc_levels[0].ptr == 2, so we continue.
259  *
260  * For the 201st record, we've moved on to leaf block 3.
261  * bc_levels[0].ptr == 1, so we add 3 to the list.  Now it is [1, 4, 2, 3].
262  *
263  * For the 300th record we just exit, with the list being [1, 4, 2, 3].
264  */
265 
266 /* Mark a btree block to the agblock bitmap. */
267 STATIC int
268 xagb_bitmap_visit_btblock(
269 	struct xfs_btree_cur	*cur,
270 	int			level,
271 	void			*priv)
272 {
273 	struct xagb_bitmap	*bitmap = priv;
274 	struct xfs_buf		*bp;
275 	xfs_fsblock_t		fsbno;
276 	xfs_agblock_t		agbno;
277 
278 	xfs_btree_get_block(cur, level, &bp);
279 	if (!bp)
280 		return 0;
281 
282 	fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp));
283 	agbno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno);
284 
285 	return xagb_bitmap_set(bitmap, agbno, 1);
286 }
287 
288 /* Mark all (per-AG) btree blocks in the agblock bitmap. */
289 int
290 xagb_bitmap_set_btblocks(
291 	struct xagb_bitmap	*bitmap,
292 	struct xfs_btree_cur	*cur)
293 {
294 	return xfs_btree_visit_blocks(cur, xagb_bitmap_visit_btblock,
295 			XFS_BTREE_VISIT_ALL, bitmap);
296 }
297 
298 /*
299  * Record all the buffers pointed to by the btree cursor.  Callers already
300  * engaged in a btree walk should call this function to capture the list of
301  * blocks going from the leaf towards the root.
302  */
303 int
304 xbitmap_set_btcur_path(
305 	struct xbitmap		*bitmap,
306 	struct xfs_btree_cur	*cur)
307 {
308 	struct xfs_buf		*bp;
309 	xfs_fsblock_t		fsb;
310 	int			i;
311 	int			error;
312 
313 	for (i = 0; i < cur->bc_nlevels && cur->bc_levels[i].ptr == 1; i++) {
314 		xfs_btree_get_block(cur, i, &bp);
315 		if (!bp)
316 			continue;
317 		fsb = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp));
318 		error = xbitmap_set(bitmap, fsb, 1);
319 		if (error)
320 			return error;
321 	}
322 
323 	return 0;
324 }
325 
326 /* Collect a btree's block in the bitmap. */
327 STATIC int
328 xbitmap_collect_btblock(
329 	struct xfs_btree_cur	*cur,
330 	int			level,
331 	void			*priv)
332 {
333 	struct xbitmap		*bitmap = priv;
334 	struct xfs_buf		*bp;
335 	xfs_fsblock_t		fsbno;
336 
337 	xfs_btree_get_block(cur, level, &bp);
338 	if (!bp)
339 		return 0;
340 
341 	fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp));
342 	return xbitmap_set(bitmap, fsbno, 1);
343 }
344 
345 /* Walk the btree and mark the bitmap wherever a btree block is found. */
346 int
347 xbitmap_set_btblocks(
348 	struct xbitmap		*bitmap,
349 	struct xfs_btree_cur	*cur)
350 {
351 	return xfs_btree_visit_blocks(cur, xbitmap_collect_btblock,
352 			XFS_BTREE_VISIT_ALL, bitmap);
353 }
354 
355 /* How many bits are set in this bitmap? */
356 uint64_t
357 xbitmap_hweight(
358 	struct xbitmap		*bitmap)
359 {
360 	struct xbitmap_node	*bn;
361 	uint64_t		ret = 0;
362 
363 	for_each_xbitmap_extent(bn, bitmap)
364 		ret += bn->bn_last - bn->bn_start + 1;
365 
366 	return ret;
367 }
368 
369 /* Call a function for every run of set bits in this bitmap. */
370 int
371 xbitmap_walk(
372 	struct xbitmap		*bitmap,
373 	xbitmap_walk_fn		fn,
374 	void			*priv)
375 {
376 	struct xbitmap_node	*bn;
377 	int			error = 0;
378 
379 	for_each_xbitmap_extent(bn, bitmap) {
380 		error = fn(bn->bn_start, bn->bn_last - bn->bn_start + 1, priv);
381 		if (error)
382 			break;
383 	}
384 
385 	return error;
386 }
387 
388 struct xbitmap_walk_bits {
389 	xbitmap_walk_bits_fn	fn;
390 	void			*priv;
391 };
392 
393 /* Walk all the bits in a run. */
394 static int
395 xbitmap_walk_bits_in_run(
396 	uint64_t			start,
397 	uint64_t			len,
398 	void				*priv)
399 {
400 	struct xbitmap_walk_bits	*wb = priv;
401 	uint64_t			i;
402 	int				error = 0;
403 
404 	for (i = start; i < start + len; i++) {
405 		error = wb->fn(i, wb->priv);
406 		if (error)
407 			break;
408 	}
409 
410 	return error;
411 }
412 
413 /* Call a function for every set bit in this bitmap. */
414 int
415 xbitmap_walk_bits(
416 	struct xbitmap			*bitmap,
417 	xbitmap_walk_bits_fn		fn,
418 	void				*priv)
419 {
420 	struct xbitmap_walk_bits	wb = {.fn = fn, .priv = priv};
421 
422 	return xbitmap_walk(bitmap, xbitmap_walk_bits_in_run, &wb);
423 }
424 
425 /* Does this bitmap have no bits set at all? */
426 bool
427 xbitmap_empty(
428 	struct xbitmap		*bitmap)
429 {
430 	return bitmap->xb_root.rb_root.rb_node == NULL;
431 }
432 
433 /* Is the start of the range set or clear?  And for how long? */
434 bool
435 xbitmap_test(
436 	struct xbitmap		*bitmap,
437 	uint64_t		start,
438 	uint64_t		*len)
439 {
440 	struct xbitmap_node	*bn;
441 	uint64_t		last = start + *len - 1;
442 
443 	bn = xbitmap_tree_iter_first(&bitmap->xb_root, start, last);
444 	if (!bn)
445 		return false;
446 	if (bn->bn_start <= start) {
447 		if (bn->bn_last < last)
448 			*len = bn->bn_last - start + 1;
449 		return true;
450 	}
451 	*len = bn->bn_start - start;
452 	return false;
453 }
454