xref: /linux/fs/btrfs/tests/free-space-tests.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2013 Fusion IO.  All rights reserved.
4  */
5 
6 #include <linux/slab.h>
7 #include "btrfs-tests.h"
8 #include "../ctree.h"
9 #include "../disk-io.h"
10 #include "../free-space-cache.h"
11 #include "../block-group.h"
12 
13 #define BITS_PER_BITMAP		(PAGE_SIZE * 8UL)
14 
15 /*
16  * This test just does basic sanity checking, making sure we can add an extent
17  * entry and remove space from either end and the middle, and make sure we can
18  * remove space that covers adjacent extent entries.
19  */
test_extents(struct btrfs_block_group * cache)20 static int test_extents(struct btrfs_block_group *cache)
21 {
22 	int ret = 0;
23 
24 	test_msg("running extent only tests");
25 
26 	/* First just make sure we can remove an entire entry */
27 	ret = btrfs_add_free_space(cache, 0, SZ_4M);
28 	if (ret) {
29 		test_err("error adding initial extents %d", ret);
30 		return ret;
31 	}
32 
33 	ret = btrfs_remove_free_space(cache, 0, SZ_4M);
34 	if (ret) {
35 		test_err("error removing extent %d", ret);
36 		return ret;
37 	}
38 
39 	if (test_check_exists(cache, 0, SZ_4M)) {
40 		test_err("full remove left some lingering space");
41 		return -1;
42 	}
43 
44 	/* Ok edge and middle cases now */
45 	ret = btrfs_add_free_space(cache, 0, SZ_4M);
46 	if (ret) {
47 		test_err("error adding half extent %d", ret);
48 		return ret;
49 	}
50 
51 	ret = btrfs_remove_free_space(cache, 3 * SZ_1M, SZ_1M);
52 	if (ret) {
53 		test_err("error removing tail end %d", ret);
54 		return ret;
55 	}
56 
57 	ret = btrfs_remove_free_space(cache, 0, SZ_1M);
58 	if (ret) {
59 		test_err("error removing front end %d", ret);
60 		return ret;
61 	}
62 
63 	ret = btrfs_remove_free_space(cache, SZ_2M, 4096);
64 	if (ret) {
65 		test_err("error removing middle piece %d", ret);
66 		return ret;
67 	}
68 
69 	if (test_check_exists(cache, 0, SZ_1M)) {
70 		test_err("still have space at the front");
71 		return -1;
72 	}
73 
74 	if (test_check_exists(cache, SZ_2M, 4096)) {
75 		test_err("still have space in the middle");
76 		return -1;
77 	}
78 
79 	if (test_check_exists(cache, 3 * SZ_1M, SZ_1M)) {
80 		test_err("still have space at the end");
81 		return -1;
82 	}
83 
84 	/* Cleanup */
85 	btrfs_remove_free_space_cache(cache);
86 
87 	return 0;
88 }
89 
test_bitmaps(struct btrfs_block_group * cache,u32 sectorsize)90 static int test_bitmaps(struct btrfs_block_group *cache, u32 sectorsize)
91 {
92 	u64 next_bitmap_offset;
93 	int ret;
94 
95 	test_msg("running bitmap only tests");
96 
97 	ret = test_add_free_space_entry(cache, 0, SZ_4M, 1);
98 	if (ret) {
99 		test_err("couldn't create a bitmap entry %d", ret);
100 		return ret;
101 	}
102 
103 	ret = btrfs_remove_free_space(cache, 0, SZ_4M);
104 	if (ret) {
105 		test_err("error removing bitmap full range %d", ret);
106 		return ret;
107 	}
108 
109 	if (test_check_exists(cache, 0, SZ_4M)) {
110 		test_err("left some space in bitmap");
111 		return -1;
112 	}
113 
114 	ret = test_add_free_space_entry(cache, 0, SZ_4M, 1);
115 	if (ret) {
116 		test_err("couldn't add to our bitmap entry %d", ret);
117 		return ret;
118 	}
119 
120 	ret = btrfs_remove_free_space(cache, SZ_1M, SZ_2M);
121 	if (ret) {
122 		test_err("couldn't remove middle chunk %d", ret);
123 		return ret;
124 	}
125 
126 	/*
127 	 * The first bitmap we have starts at offset 0 so the next one is just
128 	 * at the end of the first bitmap.
129 	 */
130 	next_bitmap_offset = (u64)(BITS_PER_BITMAP * sectorsize);
131 
132 	/* Test a bit straddling two bitmaps */
133 	ret = test_add_free_space_entry(cache, next_bitmap_offset - SZ_2M,
134 					SZ_4M, 1);
135 	if (ret) {
136 		test_err("couldn't add space that straddles two bitmaps %d",
137 				ret);
138 		return ret;
139 	}
140 
141 	ret = btrfs_remove_free_space(cache, next_bitmap_offset - SZ_1M, SZ_2M);
142 	if (ret) {
143 		test_err("couldn't remove overlapping space %d", ret);
144 		return ret;
145 	}
146 
147 	if (test_check_exists(cache, next_bitmap_offset - SZ_1M, SZ_2M)) {
148 		test_err("left some space when removing overlapping");
149 		return -1;
150 	}
151 
152 	btrfs_remove_free_space_cache(cache);
153 
154 	return 0;
155 }
156 
157 /* This is the high grade jackassery */
test_bitmaps_and_extents(struct btrfs_block_group * cache,u32 sectorsize)158 static int test_bitmaps_and_extents(struct btrfs_block_group *cache,
159 				    u32 sectorsize)
160 {
161 	u64 bitmap_offset = (u64)(BITS_PER_BITMAP * sectorsize);
162 	int ret;
163 
164 	test_msg("running bitmap and extent tests");
165 
166 	/*
167 	 * First let's do something simple, an extent at the same offset as the
168 	 * bitmap, but the free space completely in the extent and then
169 	 * completely in the bitmap.
170 	 */
171 	ret = test_add_free_space_entry(cache, SZ_4M, SZ_1M, 1);
172 	if (ret) {
173 		test_err("couldn't create bitmap entry %d", ret);
174 		return ret;
175 	}
176 
177 	ret = test_add_free_space_entry(cache, 0, SZ_1M, 0);
178 	if (ret) {
179 		test_err("couldn't add extent entry %d", ret);
180 		return ret;
181 	}
182 
183 	ret = btrfs_remove_free_space(cache, 0, SZ_1M);
184 	if (ret) {
185 		test_err("couldn't remove extent entry %d", ret);
186 		return ret;
187 	}
188 
189 	if (test_check_exists(cache, 0, SZ_1M)) {
190 		test_err("left remnants after our remove");
191 		return -1;
192 	}
193 
194 	/* Now to add back the extent entry and remove from the bitmap */
195 	ret = test_add_free_space_entry(cache, 0, SZ_1M, 0);
196 	if (ret) {
197 		test_err("couldn't re-add extent entry %d", ret);
198 		return ret;
199 	}
200 
201 	ret = btrfs_remove_free_space(cache, SZ_4M, SZ_1M);
202 	if (ret) {
203 		test_err("couldn't remove from bitmap %d", ret);
204 		return ret;
205 	}
206 
207 	if (test_check_exists(cache, SZ_4M, SZ_1M)) {
208 		test_err("left remnants in the bitmap");
209 		return -1;
210 	}
211 
212 	/*
213 	 * Ok so a little more evil, extent entry and bitmap at the same offset,
214 	 * removing an overlapping chunk.
215 	 */
216 	ret = test_add_free_space_entry(cache, SZ_1M, SZ_4M, 1);
217 	if (ret) {
218 		test_err("couldn't add to a bitmap %d", ret);
219 		return ret;
220 	}
221 
222 	ret = btrfs_remove_free_space(cache, SZ_512K, 3 * SZ_1M);
223 	if (ret) {
224 		test_err("couldn't remove overlapping space %d", ret);
225 		return ret;
226 	}
227 
228 	if (test_check_exists(cache, SZ_512K, 3 * SZ_1M)) {
229 		test_err("left over pieces after removing overlapping");
230 		return -1;
231 	}
232 
233 	btrfs_remove_free_space_cache(cache);
234 
235 	/* Now with the extent entry offset into the bitmap */
236 	ret = test_add_free_space_entry(cache, SZ_4M, SZ_4M, 1);
237 	if (ret) {
238 		test_err("couldn't add space to the bitmap %d", ret);
239 		return ret;
240 	}
241 
242 	ret = test_add_free_space_entry(cache, SZ_2M, SZ_2M, 0);
243 	if (ret) {
244 		test_err("couldn't add extent to the cache %d", ret);
245 		return ret;
246 	}
247 
248 	ret = btrfs_remove_free_space(cache, 3 * SZ_1M, SZ_4M);
249 	if (ret) {
250 		test_err("problem removing overlapping space %d", ret);
251 		return ret;
252 	}
253 
254 	if (test_check_exists(cache, 3 * SZ_1M, SZ_4M)) {
255 		test_err("left something behind when removing space");
256 		return -1;
257 	}
258 
259 	/*
260 	 * This has blown up in the past, the extent entry starts before the
261 	 * bitmap entry, but we're trying to remove an offset that falls
262 	 * completely within the bitmap range and is in both the extent entry
263 	 * and the bitmap entry, looks like this
264 	 *
265 	 *   [ extent ]
266 	 *      [ bitmap ]
267 	 *        [ del ]
268 	 */
269 	btrfs_remove_free_space_cache(cache);
270 	ret = test_add_free_space_entry(cache, bitmap_offset + SZ_4M, SZ_4M, 1);
271 	if (ret) {
272 		test_err("couldn't add bitmap %d", ret);
273 		return ret;
274 	}
275 
276 	ret = test_add_free_space_entry(cache, bitmap_offset - SZ_1M,
277 					5 * SZ_1M, 0);
278 	if (ret) {
279 		test_err("couldn't add extent entry %d", ret);
280 		return ret;
281 	}
282 
283 	ret = btrfs_remove_free_space(cache, bitmap_offset + SZ_1M, 5 * SZ_1M);
284 	if (ret) {
285 		test_err("failed to free our space %d", ret);
286 		return ret;
287 	}
288 
289 	if (test_check_exists(cache, bitmap_offset + SZ_1M, 5 * SZ_1M)) {
290 		test_err("left stuff over");
291 		return -1;
292 	}
293 
294 	btrfs_remove_free_space_cache(cache);
295 
296 	/*
297 	 * This blew up before, we have part of the free space in a bitmap and
298 	 * then the entirety of the rest of the space in an extent.  This used
299 	 * to return -EAGAIN back from btrfs_remove_extent, make sure this
300 	 * doesn't happen.
301 	 */
302 	ret = test_add_free_space_entry(cache, SZ_1M, SZ_2M, 1);
303 	if (ret) {
304 		test_err("couldn't add bitmap entry %d", ret);
305 		return ret;
306 	}
307 
308 	ret = test_add_free_space_entry(cache, 3 * SZ_1M, SZ_1M, 0);
309 	if (ret) {
310 		test_err("couldn't add extent entry %d", ret);
311 		return ret;
312 	}
313 
314 	ret = btrfs_remove_free_space(cache, SZ_1M, 3 * SZ_1M);
315 	if (ret) {
316 		test_err("error removing bitmap and extent overlapping %d", ret);
317 		return ret;
318 	}
319 
320 	btrfs_remove_free_space_cache(cache);
321 	return 0;
322 }
323 
324 /* Used by test_steal_space_from_bitmap_to_extent(). */
test_use_bitmap(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info)325 static bool test_use_bitmap(struct btrfs_free_space_ctl *ctl,
326 			    struct btrfs_free_space *info)
327 {
328 	return ctl->free_extents > 0;
329 }
330 
331 /* Used by test_steal_space_from_bitmap_to_extent(). */
332 static int
check_num_extents_and_bitmaps(const struct btrfs_block_group * cache,const int num_extents,const int num_bitmaps)333 check_num_extents_and_bitmaps(const struct btrfs_block_group *cache,
334 			      const int num_extents,
335 			      const int num_bitmaps)
336 {
337 	if (cache->free_space_ctl->free_extents != num_extents) {
338 		test_err(
339 		"incorrect # of extent entries in the cache: %d, expected %d",
340 			 cache->free_space_ctl->free_extents, num_extents);
341 		return -EINVAL;
342 	}
343 	if (cache->free_space_ctl->total_bitmaps != num_bitmaps) {
344 		test_err(
345 		"incorrect # of extent entries in the cache: %d, expected %d",
346 			 cache->free_space_ctl->total_bitmaps, num_bitmaps);
347 		return -EINVAL;
348 	}
349 	return 0;
350 }
351 
352 /* Used by test_steal_space_from_bitmap_to_extent(). */
check_cache_empty(struct btrfs_block_group * cache)353 static int check_cache_empty(struct btrfs_block_group *cache)
354 {
355 	u64 offset;
356 	u64 max_extent_size;
357 
358 	/*
359 	 * Now lets confirm that there's absolutely no free space left to
360 	 * allocate.
361 	 */
362 	if (cache->free_space_ctl->free_space != 0) {
363 		test_err("cache free space is not 0");
364 		return -EINVAL;
365 	}
366 
367 	/* And any allocation request, no matter how small, should fail now. */
368 	offset = btrfs_find_space_for_alloc(cache, 0, 4096, 0,
369 					    &max_extent_size);
370 	if (offset != 0) {
371 		test_err("space allocation did not fail, returned offset: %llu",
372 			 offset);
373 		return -EINVAL;
374 	}
375 
376 	/* And no extent nor bitmap entries in the cache anymore. */
377 	return check_num_extents_and_bitmaps(cache, 0, 0);
378 }
379 
380 /*
381  * Before we were able to steal free space from a bitmap entry to an extent
382  * entry, we could end up with 2 entries representing a contiguous free space.
383  * One would be an extent entry and the other a bitmap entry. Since in order
384  * to allocate space to a caller we use only 1 entry, we couldn't return that
385  * whole range to the caller if it was requested. This forced the caller to
386  * either assume ENOSPC or perform several smaller space allocations, which
387  * wasn't optimal as they could be spread all over the block group while under
388  * concurrency (extra overhead and fragmentation).
389  *
390  * This stealing approach is beneficial, since we always prefer to allocate
391  * from extent entries, both for clustered and non-clustered allocation
392  * requests.
393  */
394 static int
test_steal_space_from_bitmap_to_extent(struct btrfs_block_group * cache,u32 sectorsize)395 test_steal_space_from_bitmap_to_extent(struct btrfs_block_group *cache,
396 				       u32 sectorsize)
397 {
398 	int ret;
399 	u64 offset;
400 	u64 max_extent_size;
401 	const struct btrfs_free_space_op test_free_space_ops = {
402 		.use_bitmap = test_use_bitmap,
403 	};
404 	const struct btrfs_free_space_op *orig_free_space_ops;
405 
406 	test_msg("running space stealing from bitmap to extent tests");
407 
408 	/*
409 	 * For this test, we want to ensure we end up with an extent entry
410 	 * immediately adjacent to a bitmap entry, where the bitmap starts
411 	 * at an offset where the extent entry ends. We keep adding and
412 	 * removing free space to reach into this state, but to get there
413 	 * we need to reach a point where marking new free space doesn't
414 	 * result in adding new extent entries or merging the new space
415 	 * with existing extent entries - the space ends up being marked
416 	 * in an existing bitmap that covers the new free space range.
417 	 *
418 	 * To get there, we need to reach the threshold defined set at
419 	 * cache->free_space_ctl->extents_thresh, which currently is
420 	 * 256 extents on a x86_64 system at least, and a few other
421 	 * conditions (check free_space_cache.c). Instead of making the
422 	 * test much longer and complicated, use a "use_bitmap" operation
423 	 * that forces use of bitmaps as soon as we have at least 1
424 	 * extent entry.
425 	 */
426 	orig_free_space_ops = cache->free_space_ctl->op;
427 	cache->free_space_ctl->op = &test_free_space_ops;
428 
429 	/*
430 	 * Extent entry covering free space range [128Mb - 256Kb, 128Mb - 128Kb[
431 	 */
432 	ret = test_add_free_space_entry(cache, SZ_128M - SZ_256K, SZ_128K, 0);
433 	if (ret) {
434 		test_err("couldn't add extent entry %d", ret);
435 		return ret;
436 	}
437 
438 	/* Bitmap entry covering free space range [128Mb + 512Kb, 256Mb[ */
439 	ret = test_add_free_space_entry(cache, SZ_128M + SZ_512K,
440 					SZ_128M - SZ_512K, 1);
441 	if (ret) {
442 		test_err("couldn't add bitmap entry %d", ret);
443 		return ret;
444 	}
445 
446 	ret = check_num_extents_and_bitmaps(cache, 2, 1);
447 	if (ret)
448 		return ret;
449 
450 	/*
451 	 * Now make only the first 256Kb of the bitmap marked as free, so that
452 	 * we end up with only the following ranges marked as free space:
453 	 *
454 	 * [128Mb - 256Kb, 128Mb - 128Kb[
455 	 * [128Mb + 512Kb, 128Mb + 768Kb[
456 	 */
457 	ret = btrfs_remove_free_space(cache,
458 				      SZ_128M + 768 * SZ_1K,
459 				      SZ_128M - 768 * SZ_1K);
460 	if (ret) {
461 		test_err("failed to free part of bitmap space %d", ret);
462 		return ret;
463 	}
464 
465 	/* Confirm that only those 2 ranges are marked as free. */
466 	if (!test_check_exists(cache, SZ_128M - SZ_256K, SZ_128K)) {
467 		test_err("free space range missing");
468 		return -ENOENT;
469 	}
470 	if (!test_check_exists(cache, SZ_128M + SZ_512K, SZ_256K)) {
471 		test_err("free space range missing");
472 		return -ENOENT;
473 	}
474 
475 	/*
476 	 * Confirm that the bitmap range [128Mb + 768Kb, 256Mb[ isn't marked
477 	 * as free anymore.
478 	 */
479 	if (test_check_exists(cache, SZ_128M + 768 * SZ_1K,
480 			      SZ_128M - 768 * SZ_1K)) {
481 		test_err("bitmap region not removed from space cache");
482 		return -EINVAL;
483 	}
484 
485 	/*
486 	 * Confirm that the region [128Mb + 256Kb, 128Mb + 512Kb[, which is
487 	 * covered by the bitmap, isn't marked as free.
488 	 */
489 	if (test_check_exists(cache, SZ_128M + SZ_256K, SZ_256K)) {
490 		test_err("invalid bitmap region marked as free");
491 		return -EINVAL;
492 	}
493 
494 	/*
495 	 * Confirm that the region [128Mb, 128Mb + 256Kb[, which is covered
496 	 * by the bitmap too, isn't marked as free either.
497 	 */
498 	if (test_check_exists(cache, SZ_128M, SZ_256K)) {
499 		test_err("invalid bitmap region marked as free");
500 		return -EINVAL;
501 	}
502 
503 	/*
504 	 * Now lets mark the region [128Mb, 128Mb + 512Kb[ as free too. But,
505 	 * lets make sure the free space cache marks it as free in the bitmap,
506 	 * and doesn't insert a new extent entry to represent this region.
507 	 */
508 	ret = btrfs_add_free_space(cache, SZ_128M, SZ_512K);
509 	if (ret) {
510 		test_err("error adding free space: %d", ret);
511 		return ret;
512 	}
513 	/* Confirm the region is marked as free. */
514 	if (!test_check_exists(cache, SZ_128M, SZ_512K)) {
515 		test_err("bitmap region not marked as free");
516 		return -ENOENT;
517 	}
518 
519 	/*
520 	 * Confirm that no new extent entries or bitmap entries were added to
521 	 * the cache after adding that free space region.
522 	 */
523 	ret = check_num_extents_and_bitmaps(cache, 2, 1);
524 	if (ret)
525 		return ret;
526 
527 	/*
528 	 * Now lets add a small free space region to the right of the previous
529 	 * one, which is not contiguous with it and is part of the bitmap too.
530 	 * The goal is to test that the bitmap entry space stealing doesn't
531 	 * steal this space region.
532 	 */
533 	ret = btrfs_add_free_space(cache, SZ_128M + SZ_16M, sectorsize);
534 	if (ret) {
535 		test_err("error adding free space: %d", ret);
536 		return ret;
537 	}
538 
539 	/*
540 	 * Confirm that no new extent entries or bitmap entries were added to
541 	 * the cache after adding that free space region.
542 	 */
543 	ret = check_num_extents_and_bitmaps(cache, 2, 1);
544 	if (ret)
545 		return ret;
546 
547 	/*
548 	 * Now mark the region [128Mb - 128Kb, 128Mb[ as free too. This will
549 	 * expand the range covered by the existing extent entry that represents
550 	 * the free space [128Mb - 256Kb, 128Mb - 128Kb[.
551 	 */
552 	ret = btrfs_add_free_space(cache, SZ_128M - SZ_128K, SZ_128K);
553 	if (ret) {
554 		test_err("error adding free space: %d", ret);
555 		return ret;
556 	}
557 	/* Confirm the region is marked as free. */
558 	if (!test_check_exists(cache, SZ_128M - SZ_128K, SZ_128K)) {
559 		test_err("extent region not marked as free");
560 		return -ENOENT;
561 	}
562 
563 	/*
564 	 * Confirm that our extent entry didn't stole all free space from the
565 	 * bitmap, because of the small 4Kb free space region.
566 	 */
567 	ret = check_num_extents_and_bitmaps(cache, 2, 1);
568 	if (ret)
569 		return ret;
570 
571 	/*
572 	 * So now we have the range [128Mb - 256Kb, 128Mb + 768Kb[ as free
573 	 * space. Without stealing bitmap free space into extent entry space,
574 	 * we would have all this free space represented by 2 entries in the
575 	 * cache:
576 	 *
577 	 * extent entry covering range: [128Mb - 256Kb, 128Mb[
578 	 * bitmap entry covering range: [128Mb, 128Mb + 768Kb[
579 	 *
580 	 * Attempting to allocate the whole free space (1Mb) would fail, because
581 	 * we can't allocate from multiple entries.
582 	 * With the bitmap free space stealing, we get a single extent entry
583 	 * that represents the 1Mb free space, and therefore we're able to
584 	 * allocate the whole free space at once.
585 	 */
586 	if (!test_check_exists(cache, SZ_128M - SZ_256K, SZ_1M)) {
587 		test_err("expected region not marked as free");
588 		return -ENOENT;
589 	}
590 
591 	if (cache->free_space_ctl->free_space != (SZ_1M + sectorsize)) {
592 		test_err("cache free space is not 1Mb + %u", sectorsize);
593 		return -EINVAL;
594 	}
595 
596 	offset = btrfs_find_space_for_alloc(cache,
597 					    0, SZ_1M, 0,
598 					    &max_extent_size);
599 	if (offset != (SZ_128M - SZ_256K)) {
600 		test_err(
601 	"failed to allocate 1Mb from space cache, returned offset is: %llu",
602 			 offset);
603 		return -EINVAL;
604 	}
605 
606 	/*
607 	 * All that remains is a sectorsize free space region in a bitmap.
608 	 * Confirm.
609 	 */
610 	ret = check_num_extents_and_bitmaps(cache, 1, 1);
611 	if (ret)
612 		return ret;
613 
614 	if (cache->free_space_ctl->free_space != sectorsize) {
615 		test_err("cache free space is not %u", sectorsize);
616 		return -EINVAL;
617 	}
618 
619 	offset = btrfs_find_space_for_alloc(cache,
620 					    0, sectorsize, 0,
621 					    &max_extent_size);
622 	if (offset != (SZ_128M + SZ_16M)) {
623 		test_err("failed to allocate %u, returned offset : %llu",
624 			 sectorsize, offset);
625 		return -EINVAL;
626 	}
627 
628 	ret = check_cache_empty(cache);
629 	if (ret)
630 		return ret;
631 
632 	btrfs_remove_free_space_cache(cache);
633 
634 	/*
635 	 * Now test a similar scenario, but where our extent entry is located
636 	 * to the right of the bitmap entry, so that we can check that stealing
637 	 * space from a bitmap to the front of an extent entry works.
638 	 */
639 
640 	/*
641 	 * Extent entry covering free space range [128Mb + 128Kb, 128Mb + 256Kb[
642 	 */
643 	ret = test_add_free_space_entry(cache, SZ_128M + SZ_128K, SZ_128K, 0);
644 	if (ret) {
645 		test_err("couldn't add extent entry %d", ret);
646 		return ret;
647 	}
648 
649 	/* Bitmap entry covering free space range [0, 128Mb - 512Kb[ */
650 	ret = test_add_free_space_entry(cache, 0, SZ_128M - SZ_512K, 1);
651 	if (ret) {
652 		test_err("couldn't add bitmap entry %d", ret);
653 		return ret;
654 	}
655 
656 	ret = check_num_extents_and_bitmaps(cache, 2, 1);
657 	if (ret)
658 		return ret;
659 
660 	/*
661 	 * Now make only the last 256Kb of the bitmap marked as free, so that
662 	 * we end up with only the following ranges marked as free space:
663 	 *
664 	 * [128Mb + 128b, 128Mb + 256Kb[
665 	 * [128Mb - 768Kb, 128Mb - 512Kb[
666 	 */
667 	ret = btrfs_remove_free_space(cache, 0, SZ_128M - 768 * SZ_1K);
668 	if (ret) {
669 		test_err("failed to free part of bitmap space %d", ret);
670 		return ret;
671 	}
672 
673 	/* Confirm that only those 2 ranges are marked as free. */
674 	if (!test_check_exists(cache, SZ_128M + SZ_128K, SZ_128K)) {
675 		test_err("free space range missing");
676 		return -ENOENT;
677 	}
678 	if (!test_check_exists(cache, SZ_128M - 768 * SZ_1K, SZ_256K)) {
679 		test_err("free space range missing");
680 		return -ENOENT;
681 	}
682 
683 	/*
684 	 * Confirm that the bitmap range [0, 128Mb - 768Kb[ isn't marked
685 	 * as free anymore.
686 	 */
687 	if (test_check_exists(cache, 0, SZ_128M - 768 * SZ_1K)) {
688 		test_err("bitmap region not removed from space cache");
689 		return -EINVAL;
690 	}
691 
692 	/*
693 	 * Confirm that the region [128Mb - 512Kb, 128Mb[, which is
694 	 * covered by the bitmap, isn't marked as free.
695 	 */
696 	if (test_check_exists(cache, SZ_128M - SZ_512K, SZ_512K)) {
697 		test_err("invalid bitmap region marked as free");
698 		return -EINVAL;
699 	}
700 
701 	/*
702 	 * Now lets mark the region [128Mb - 512Kb, 128Mb[ as free too. But,
703 	 * lets make sure the free space cache marks it as free in the bitmap,
704 	 * and doesn't insert a new extent entry to represent this region.
705 	 */
706 	ret = btrfs_add_free_space(cache, SZ_128M - SZ_512K, SZ_512K);
707 	if (ret) {
708 		test_err("error adding free space: %d", ret);
709 		return ret;
710 	}
711 	/* Confirm the region is marked as free. */
712 	if (!test_check_exists(cache, SZ_128M - SZ_512K, SZ_512K)) {
713 		test_err("bitmap region not marked as free");
714 		return -ENOENT;
715 	}
716 
717 	/*
718 	 * Confirm that no new extent entries or bitmap entries were added to
719 	 * the cache after adding that free space region.
720 	 */
721 	ret = check_num_extents_and_bitmaps(cache, 2, 1);
722 	if (ret)
723 		return ret;
724 
725 	/*
726 	 * Now lets add a small free space region to the left of the previous
727 	 * one, which is not contiguous with it and is part of the bitmap too.
728 	 * The goal is to test that the bitmap entry space stealing doesn't
729 	 * steal this space region.
730 	 */
731 	ret = btrfs_add_free_space(cache, SZ_32M, 2 * sectorsize);
732 	if (ret) {
733 		test_err("error adding free space: %d", ret);
734 		return ret;
735 	}
736 
737 	/*
738 	 * Now mark the region [128Mb, 128Mb + 128Kb[ as free too. This will
739 	 * expand the range covered by the existing extent entry that represents
740 	 * the free space [128Mb + 128Kb, 128Mb + 256Kb[.
741 	 */
742 	ret = btrfs_add_free_space(cache, SZ_128M, SZ_128K);
743 	if (ret) {
744 		test_err("error adding free space: %d", ret);
745 		return ret;
746 	}
747 	/* Confirm the region is marked as free. */
748 	if (!test_check_exists(cache, SZ_128M, SZ_128K)) {
749 		test_err("extent region not marked as free");
750 		return -ENOENT;
751 	}
752 
753 	/*
754 	 * Confirm that our extent entry didn't stole all free space from the
755 	 * bitmap, because of the small 2 * sectorsize free space region.
756 	 */
757 	ret = check_num_extents_and_bitmaps(cache, 2, 1);
758 	if (ret)
759 		return ret;
760 
761 	/*
762 	 * So now we have the range [128Mb - 768Kb, 128Mb + 256Kb[ as free
763 	 * space. Without stealing bitmap free space into extent entry space,
764 	 * we would have all this free space represented by 2 entries in the
765 	 * cache:
766 	 *
767 	 * extent entry covering range: [128Mb, 128Mb + 256Kb[
768 	 * bitmap entry covering range: [128Mb - 768Kb, 128Mb[
769 	 *
770 	 * Attempting to allocate the whole free space (1Mb) would fail, because
771 	 * we can't allocate from multiple entries.
772 	 * With the bitmap free space stealing, we get a single extent entry
773 	 * that represents the 1Mb free space, and therefore we're able to
774 	 * allocate the whole free space at once.
775 	 */
776 	if (!test_check_exists(cache, SZ_128M - 768 * SZ_1K, SZ_1M)) {
777 		test_err("expected region not marked as free");
778 		return -ENOENT;
779 	}
780 
781 	if (cache->free_space_ctl->free_space != (SZ_1M + 2 * sectorsize)) {
782 		test_err("cache free space is not 1Mb + %u", 2 * sectorsize);
783 		return -EINVAL;
784 	}
785 
786 	offset = btrfs_find_space_for_alloc(cache, 0, SZ_1M, 0,
787 					    &max_extent_size);
788 	if (offset != (SZ_128M - 768 * SZ_1K)) {
789 		test_err(
790 	"failed to allocate 1Mb from space cache, returned offset is: %llu",
791 			 offset);
792 		return -EINVAL;
793 	}
794 
795 	/*
796 	 * All that remains is 2 * sectorsize free space region
797 	 * in a bitmap. Confirm.
798 	 */
799 	ret = check_num_extents_and_bitmaps(cache, 1, 1);
800 	if (ret)
801 		return ret;
802 
803 	if (cache->free_space_ctl->free_space != 2 * sectorsize) {
804 		test_err("cache free space is not %u", 2 * sectorsize);
805 		return -EINVAL;
806 	}
807 
808 	offset = btrfs_find_space_for_alloc(cache,
809 					    0, 2 * sectorsize, 0,
810 					    &max_extent_size);
811 	if (offset != SZ_32M) {
812 		test_err("failed to allocate %u, offset: %llu",
813 			 2 * sectorsize, offset);
814 		return -EINVAL;
815 	}
816 
817 	ret = check_cache_empty(cache);
818 	if (ret)
819 		return ret;
820 
821 	cache->free_space_ctl->op = orig_free_space_ops;
822 	btrfs_remove_free_space_cache(cache);
823 
824 	return 0;
825 }
826 
bytes_index_use_bitmap(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info)827 static bool bytes_index_use_bitmap(struct btrfs_free_space_ctl *ctl,
828 				   struct btrfs_free_space *info)
829 {
830 	return true;
831 }
832 
test_bytes_index(struct btrfs_block_group * cache,u32 sectorsize)833 static int test_bytes_index(struct btrfs_block_group *cache, u32 sectorsize)
834 {
835 	const struct btrfs_free_space_op test_free_space_ops = {
836 		.use_bitmap = bytes_index_use_bitmap,
837 	};
838 	const struct btrfs_free_space_op *orig_free_space_ops;
839 	struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
840 	struct btrfs_free_space *entry;
841 	struct rb_node *node;
842 	u64 offset, max_extent_size, bytes;
843 	int ret, i;
844 
845 	test_msg("running bytes index tests");
846 
847 	/* First just validate that it does everything in order. */
848 	offset = 0;
849 	for (i = 0; i < 10; i++) {
850 		bytes = (i + 1) * SZ_1M;
851 		ret = test_add_free_space_entry(cache, offset, bytes, 0);
852 		if (ret) {
853 			test_err("couldn't add extent entry %d\n", ret);
854 			return ret;
855 		}
856 		offset += bytes + sectorsize;
857 	}
858 
859 	for (node = rb_first_cached(&ctl->free_space_bytes), i = 9; node;
860 	     node = rb_next(node), i--) {
861 		entry = rb_entry(node, struct btrfs_free_space, bytes_index);
862 		bytes = (i + 1) * SZ_1M;
863 		if (entry->bytes != bytes) {
864 			test_err("invalid bytes index order, found %llu expected %llu",
865 				 entry->bytes, bytes);
866 			return -EINVAL;
867 		}
868 	}
869 
870 	/* Now validate bitmaps do the correct thing. */
871 	btrfs_remove_free_space_cache(cache);
872 	for (i = 0; i < 2; i++) {
873 		offset = i * BITS_PER_BITMAP * sectorsize;
874 		bytes = (i + 1) * SZ_1M;
875 		ret = test_add_free_space_entry(cache, offset, bytes, 1);
876 		if (ret) {
877 			test_err("couldn't add bitmap entry");
878 			return ret;
879 		}
880 	}
881 
882 	for (node = rb_first_cached(&ctl->free_space_bytes), i = 1; node;
883 	     node = rb_next(node), i--) {
884 		entry = rb_entry(node, struct btrfs_free_space, bytes_index);
885 		bytes = (i + 1) * SZ_1M;
886 		if (entry->bytes != bytes) {
887 			test_err("invalid bytes index order, found %llu expected %llu",
888 				 entry->bytes, bytes);
889 			return -EINVAL;
890 		}
891 	}
892 
893 	/* Now validate bitmaps with different ->max_extent_size. */
894 	btrfs_remove_free_space_cache(cache);
895 	orig_free_space_ops = cache->free_space_ctl->op;
896 	cache->free_space_ctl->op = &test_free_space_ops;
897 
898 	ret = test_add_free_space_entry(cache, 0, sectorsize, 1);
899 	if (ret) {
900 		test_err("couldn't add bitmap entry");
901 		return ret;
902 	}
903 
904 	offset = BITS_PER_BITMAP * sectorsize;
905 	ret = test_add_free_space_entry(cache, offset, sectorsize, 1);
906 	if (ret) {
907 		test_err("couldn't add bitmap_entry");
908 		return ret;
909 	}
910 
911 	/*
912 	 * Now set a bunch of sectorsize extents in the first entry so it's
913 	 * ->bytes is large.
914 	 */
915 	for (i = 2; i < 20; i += 2) {
916 		offset = sectorsize * i;
917 		ret = btrfs_add_free_space(cache, offset, sectorsize);
918 		if (ret) {
919 			test_err("error populating sparse bitmap %d", ret);
920 			return ret;
921 		}
922 	}
923 
924 	/*
925 	 * Now set a contiguous extent in the second bitmap so its
926 	 * ->max_extent_size is larger than the first bitmaps.
927 	 */
928 	offset = (BITS_PER_BITMAP * sectorsize) + sectorsize;
929 	ret = btrfs_add_free_space(cache, offset, sectorsize);
930 	if (ret) {
931 		test_err("error adding contiguous extent %d", ret);
932 		return ret;
933 	}
934 
935 	/*
936 	 * Since we don't set ->max_extent_size unless we search everything
937 	 * should be indexed on bytes.
938 	 */
939 	entry = rb_entry(rb_first_cached(&ctl->free_space_bytes),
940 			 struct btrfs_free_space, bytes_index);
941 	if (entry->bytes != (10 * sectorsize)) {
942 		test_err("error, wrong entry in the first slot in bytes_index");
943 		return -EINVAL;
944 	}
945 
946 	max_extent_size = 0;
947 	offset = btrfs_find_space_for_alloc(cache, cache->start, sectorsize * 3,
948 					    0, &max_extent_size);
949 	if (offset != 0) {
950 		test_err("found space to alloc even though we don't have enough space");
951 		return -EINVAL;
952 	}
953 
954 	if (max_extent_size != (2 * sectorsize)) {
955 		test_err("got the wrong max_extent size %llu expected %llu",
956 			 max_extent_size, (unsigned long long)(2 * sectorsize));
957 		return -EINVAL;
958 	}
959 
960 	/*
961 	 * The search should have re-arranged the bytes index to use the
962 	 * ->max_extent_size, validate it's now what we expect it to be.
963 	 */
964 	entry = rb_entry(rb_first_cached(&ctl->free_space_bytes),
965 			 struct btrfs_free_space, bytes_index);
966 	if (entry->bytes != (2 * sectorsize)) {
967 		test_err("error, the bytes index wasn't recalculated properly");
968 		return -EINVAL;
969 	}
970 
971 	/* Add another sectorsize to re-arrange the tree back to ->bytes. */
972 	offset = (BITS_PER_BITMAP * sectorsize) - sectorsize;
973 	ret = btrfs_add_free_space(cache, offset, sectorsize);
974 	if (ret) {
975 		test_err("error adding extent to the sparse entry %d", ret);
976 		return ret;
977 	}
978 
979 	entry = rb_entry(rb_first_cached(&ctl->free_space_bytes),
980 			 struct btrfs_free_space, bytes_index);
981 	if (entry->bytes != (11 * sectorsize)) {
982 		test_err("error, wrong entry in the first slot in bytes_index");
983 		return -EINVAL;
984 	}
985 
986 	/*
987 	 * Now make sure we find our correct entry after searching that will
988 	 * result in a re-arranging of the tree.
989 	 */
990 	max_extent_size = 0;
991 	offset = btrfs_find_space_for_alloc(cache, cache->start, sectorsize * 2,
992 					    0, &max_extent_size);
993 	if (offset != (BITS_PER_BITMAP * sectorsize)) {
994 		test_err("error, found %llu instead of %llu for our alloc",
995 			 offset,
996 			 (unsigned long long)(BITS_PER_BITMAP * sectorsize));
997 		return -EINVAL;
998 	}
999 
1000 	cache->free_space_ctl->op = orig_free_space_ops;
1001 	btrfs_remove_free_space_cache(cache);
1002 	return 0;
1003 }
1004 
btrfs_test_free_space_cache(u32 sectorsize,u32 nodesize)1005 int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize)
1006 {
1007 	struct btrfs_fs_info *fs_info;
1008 	struct btrfs_block_group *cache;
1009 	struct btrfs_root *root = NULL;
1010 	int ret = -ENOMEM;
1011 
1012 	test_msg("running btrfs free space cache tests");
1013 	fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
1014 	if (!fs_info) {
1015 		test_std_err(TEST_ALLOC_FS_INFO);
1016 		return -ENOMEM;
1017 	}
1018 
1019 	/*
1020 	 * For ppc64 (with 64k page size), bytes per bitmap might be
1021 	 * larger than 1G.  To make bitmap test available in ppc64,
1022 	 * alloc dummy block group whose size cross bitmaps.
1023 	 */
1024 	cache = btrfs_alloc_dummy_block_group(fs_info,
1025 				      BITS_PER_BITMAP * sectorsize + PAGE_SIZE);
1026 	if (!cache) {
1027 		test_std_err(TEST_ALLOC_BLOCK_GROUP);
1028 		btrfs_free_dummy_fs_info(fs_info);
1029 		return 0;
1030 	}
1031 
1032 	root = btrfs_alloc_dummy_root(fs_info);
1033 	if (IS_ERR(root)) {
1034 		test_std_err(TEST_ALLOC_ROOT);
1035 		ret = PTR_ERR(root);
1036 		goto out;
1037 	}
1038 
1039 	root->root_key.objectid = BTRFS_EXTENT_TREE_OBJECTID;
1040 	root->root_key.type = BTRFS_ROOT_ITEM_KEY;
1041 	root->root_key.offset = 0;
1042 	btrfs_global_root_insert(root);
1043 
1044 	ret = test_extents(cache);
1045 	if (ret)
1046 		goto out;
1047 	ret = test_bitmaps(cache, sectorsize);
1048 	if (ret)
1049 		goto out;
1050 	ret = test_bitmaps_and_extents(cache, sectorsize);
1051 	if (ret)
1052 		goto out;
1053 
1054 	ret = test_steal_space_from_bitmap_to_extent(cache, sectorsize);
1055 	if (ret)
1056 		goto out;
1057 	ret = test_bytes_index(cache, sectorsize);
1058 out:
1059 	btrfs_free_dummy_block_group(cache);
1060 	btrfs_free_dummy_root(root);
1061 	btrfs_free_dummy_fs_info(fs_info);
1062 	return ret;
1063 }
1064