xref: /linux/fs/xfs/scrub/bitmap.c (revision 9f2c9170934eace462499ba0bfe042cc72900173)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2018 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_btree.h"
13 #include "scrub/scrub.h"
14 #include "scrub/bitmap.h"
15 
16 /*
17  * Set a range of this bitmap.  Caller must ensure the range is not set.
18  *
19  * This is the logical equivalent of bitmap |= mask(start, len).
20  */
21 int
22 xbitmap_set(
23 	struct xbitmap		*bitmap,
24 	uint64_t		start,
25 	uint64_t		len)
26 {
27 	struct xbitmap_range	*bmr;
28 
29 	bmr = kmalloc(sizeof(struct xbitmap_range), XCHK_GFP_FLAGS);
30 	if (!bmr)
31 		return -ENOMEM;
32 
33 	INIT_LIST_HEAD(&bmr->list);
34 	bmr->start = start;
35 	bmr->len = len;
36 	list_add_tail(&bmr->list, &bitmap->list);
37 
38 	return 0;
39 }
40 
41 /* Free everything related to this bitmap. */
42 void
43 xbitmap_destroy(
44 	struct xbitmap		*bitmap)
45 {
46 	struct xbitmap_range	*bmr;
47 	struct xbitmap_range	*n;
48 
49 	for_each_xbitmap_extent(bmr, n, bitmap) {
50 		list_del(&bmr->list);
51 		kfree(bmr);
52 	}
53 }
54 
55 /* Set up a per-AG block bitmap. */
56 void
57 xbitmap_init(
58 	struct xbitmap		*bitmap)
59 {
60 	INIT_LIST_HEAD(&bitmap->list);
61 }
62 
63 /* Compare two btree extents. */
64 static int
65 xbitmap_range_cmp(
66 	void			*priv,
67 	const struct list_head	*a,
68 	const struct list_head	*b)
69 {
70 	struct xbitmap_range	*ap;
71 	struct xbitmap_range	*bp;
72 
73 	ap = container_of(a, struct xbitmap_range, list);
74 	bp = container_of(b, struct xbitmap_range, list);
75 
76 	if (ap->start > bp->start)
77 		return 1;
78 	if (ap->start < bp->start)
79 		return -1;
80 	return 0;
81 }
82 
83 /*
84  * Remove all the blocks mentioned in @sub from the extents in @bitmap.
85  *
86  * The intent is that callers will iterate the rmapbt for all of its records
87  * for a given owner to generate @bitmap; and iterate all the blocks of the
88  * metadata structures that are not being rebuilt and have the same rmapbt
89  * owner to generate @sub.  This routine subtracts all the extents
90  * mentioned in sub from all the extents linked in @bitmap, which leaves
91  * @bitmap as the list of blocks that are not accounted for, which we assume
92  * are the dead blocks of the old metadata structure.  The blocks mentioned in
93  * @bitmap can be reaped.
94  *
95  * This is the logical equivalent of bitmap &= ~sub.
96  */
97 #define LEFT_ALIGNED	(1 << 0)
98 #define RIGHT_ALIGNED	(1 << 1)
99 int
100 xbitmap_disunion(
101 	struct xbitmap		*bitmap,
102 	struct xbitmap		*sub)
103 {
104 	struct list_head	*lp;
105 	struct xbitmap_range	*br;
106 	struct xbitmap_range	*new_br;
107 	struct xbitmap_range	*sub_br;
108 	uint64_t		sub_start;
109 	uint64_t		sub_len;
110 	int			state;
111 	int			error = 0;
112 
113 	if (list_empty(&bitmap->list) || list_empty(&sub->list))
114 		return 0;
115 	ASSERT(!list_empty(&sub->list));
116 
117 	list_sort(NULL, &bitmap->list, xbitmap_range_cmp);
118 	list_sort(NULL, &sub->list, xbitmap_range_cmp);
119 
120 	/*
121 	 * Now that we've sorted both lists, we iterate bitmap once, rolling
122 	 * forward through sub and/or bitmap as necessary until we find an
123 	 * overlap or reach the end of either list.  We do not reset lp to the
124 	 * head of bitmap nor do we reset sub_br to the head of sub.  The
125 	 * list traversal is similar to merge sort, but we're deleting
126 	 * instead.  In this manner we avoid O(n^2) operations.
127 	 */
128 	sub_br = list_first_entry(&sub->list, struct xbitmap_range,
129 			list);
130 	lp = bitmap->list.next;
131 	while (lp != &bitmap->list) {
132 		br = list_entry(lp, struct xbitmap_range, list);
133 
134 		/*
135 		 * Advance sub_br and/or br until we find a pair that
136 		 * intersect or we run out of extents.
137 		 */
138 		while (sub_br->start + sub_br->len <= br->start) {
139 			if (list_is_last(&sub_br->list, &sub->list))
140 				goto out;
141 			sub_br = list_next_entry(sub_br, list);
142 		}
143 		if (sub_br->start >= br->start + br->len) {
144 			lp = lp->next;
145 			continue;
146 		}
147 
148 		/* trim sub_br to fit the extent we have */
149 		sub_start = sub_br->start;
150 		sub_len = sub_br->len;
151 		if (sub_br->start < br->start) {
152 			sub_len -= br->start - sub_br->start;
153 			sub_start = br->start;
154 		}
155 		if (sub_len > br->len)
156 			sub_len = br->len;
157 
158 		state = 0;
159 		if (sub_start == br->start)
160 			state |= LEFT_ALIGNED;
161 		if (sub_start + sub_len == br->start + br->len)
162 			state |= RIGHT_ALIGNED;
163 		switch (state) {
164 		case LEFT_ALIGNED:
165 			/* Coincides with only the left. */
166 			br->start += sub_len;
167 			br->len -= sub_len;
168 			break;
169 		case RIGHT_ALIGNED:
170 			/* Coincides with only the right. */
171 			br->len -= sub_len;
172 			lp = lp->next;
173 			break;
174 		case LEFT_ALIGNED | RIGHT_ALIGNED:
175 			/* Total overlap, just delete ex. */
176 			lp = lp->next;
177 			list_del(&br->list);
178 			kfree(br);
179 			break;
180 		case 0:
181 			/*
182 			 * Deleting from the middle: add the new right extent
183 			 * and then shrink the left extent.
184 			 */
185 			new_br = kmalloc(sizeof(struct xbitmap_range),
186 					XCHK_GFP_FLAGS);
187 			if (!new_br) {
188 				error = -ENOMEM;
189 				goto out;
190 			}
191 			INIT_LIST_HEAD(&new_br->list);
192 			new_br->start = sub_start + sub_len;
193 			new_br->len = br->start + br->len - new_br->start;
194 			list_add(&new_br->list, &br->list);
195 			br->len = sub_start - br->start;
196 			lp = lp->next;
197 			break;
198 		default:
199 			ASSERT(0);
200 			break;
201 		}
202 	}
203 
204 out:
205 	return error;
206 }
207 #undef LEFT_ALIGNED
208 #undef RIGHT_ALIGNED
209 
210 /*
211  * Record all btree blocks seen while iterating all records of a btree.
212  *
213  * We know that the btree query_all function starts at the left edge and walks
214  * towards the right edge of the tree.  Therefore, we know that we can walk up
215  * the btree cursor towards the root; if the pointer for a given level points
216  * to the first record/key in that block, we haven't seen this block before;
217  * and therefore we need to remember that we saw this block in the btree.
218  *
219  * So if our btree is:
220  *
221  *    4
222  *  / | \
223  * 1  2  3
224  *
225  * Pretend for this example that each leaf block has 100 btree records.  For
226  * the first btree record, we'll observe that bc_levels[0].ptr == 1, so we
227  * record that we saw block 1.  Then we observe that bc_levels[1].ptr == 1, so
228  * we record block 4.  The list is [1, 4].
229  *
230  * For the second btree record, we see that bc_levels[0].ptr == 2, so we exit
231  * the loop.  The list remains [1, 4].
232  *
233  * For the 101st btree record, we've moved onto leaf block 2.  Now
234  * bc_levels[0].ptr == 1 again, so we record that we saw block 2.  We see that
235  * bc_levels[1].ptr == 2, so we exit the loop.  The list is now [1, 4, 2].
236  *
237  * For the 102nd record, bc_levels[0].ptr == 2, so we continue.
238  *
239  * For the 201st record, we've moved on to leaf block 3.
240  * bc_levels[0].ptr == 1, so we add 3 to the list.  Now it is [1, 4, 2, 3].
241  *
242  * For the 300th record we just exit, with the list being [1, 4, 2, 3].
243  */
244 
245 /*
246  * Record all the buffers pointed to by the btree cursor.  Callers already
247  * engaged in a btree walk should call this function to capture the list of
248  * blocks going from the leaf towards the root.
249  */
250 int
251 xbitmap_set_btcur_path(
252 	struct xbitmap		*bitmap,
253 	struct xfs_btree_cur	*cur)
254 {
255 	struct xfs_buf		*bp;
256 	xfs_fsblock_t		fsb;
257 	int			i;
258 	int			error;
259 
260 	for (i = 0; i < cur->bc_nlevels && cur->bc_levels[i].ptr == 1; i++) {
261 		xfs_btree_get_block(cur, i, &bp);
262 		if (!bp)
263 			continue;
264 		fsb = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp));
265 		error = xbitmap_set(bitmap, fsb, 1);
266 		if (error)
267 			return error;
268 	}
269 
270 	return 0;
271 }
272 
273 /* Collect a btree's block in the bitmap. */
274 STATIC int
275 xbitmap_collect_btblock(
276 	struct xfs_btree_cur	*cur,
277 	int			level,
278 	void			*priv)
279 {
280 	struct xbitmap		*bitmap = priv;
281 	struct xfs_buf		*bp;
282 	xfs_fsblock_t		fsbno;
283 
284 	xfs_btree_get_block(cur, level, &bp);
285 	if (!bp)
286 		return 0;
287 
288 	fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp));
289 	return xbitmap_set(bitmap, fsbno, 1);
290 }
291 
292 /* Walk the btree and mark the bitmap wherever a btree block is found. */
293 int
294 xbitmap_set_btblocks(
295 	struct xbitmap		*bitmap,
296 	struct xfs_btree_cur	*cur)
297 {
298 	return xfs_btree_visit_blocks(cur, xbitmap_collect_btblock,
299 			XFS_BTREE_VISIT_ALL, bitmap);
300 }
301 
302 /* How many bits are set in this bitmap? */
303 uint64_t
304 xbitmap_hweight(
305 	struct xbitmap		*bitmap)
306 {
307 	struct xbitmap_range	*bmr;
308 	struct xbitmap_range	*n;
309 	uint64_t		ret = 0;
310 
311 	for_each_xbitmap_extent(bmr, n, bitmap)
312 		ret += bmr->len;
313 
314 	return ret;
315 }
316