xref: /linux/fs/ext4/mballoc-test.c (revision a436a0b847c0fef9ead14f99bc03d8adbf66f15b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KUnit test of ext4 multiblocks allocation.
4  */
5 
6 #include <kunit/test.h>
7 #include <kunit/static_stub.h>
8 #include <linux/random.h>
9 
10 #include "ext4.h"
11 #include "mballoc.h"
12 
13 struct mbt_grp_ctx {
14 	struct buffer_head bitmap_bh;
15 	/* desc and gd_bh are just the place holders for now */
16 	struct ext4_group_desc desc;
17 	struct buffer_head gd_bh;
18 };
19 
20 struct mbt_ctx {
21 	struct mbt_grp_ctx *grp_ctx;
22 };
23 
24 struct mbt_ext4_super_block {
25 	struct ext4_super_block es;
26 	struct ext4_sb_info sbi;
27 	struct mbt_ctx mbt_ctx;
28 };
29 
30 #define MBT_SB(_sb) (container_of((_sb)->s_fs_info, struct mbt_ext4_super_block, sbi))
31 #define MBT_CTX(_sb) (&MBT_SB(_sb)->mbt_ctx)
32 #define MBT_GRP_CTX(_sb, _group) (&MBT_CTX(_sb)->grp_ctx[_group])
33 
34 static struct inode *mbt_alloc_inode(struct super_block *sb)
35 {
36 	struct ext4_inode_info *ei;
37 
38 	ei = kmalloc_obj(struct ext4_inode_info);
39 	if (!ei)
40 		return NULL;
41 
42 	INIT_LIST_HEAD(&ei->i_orphan);
43 	init_rwsem(&ei->xattr_sem);
44 	init_rwsem(&ei->i_data_sem);
45 	inode_init_once(&ei->vfs_inode);
46 	ext4_fc_init_inode(&ei->vfs_inode);
47 
48 	return &ei->vfs_inode;
49 }
50 
51 static void mbt_free_inode(struct inode *inode)
52 {
53 	kfree(EXT4_I(inode));
54 }
55 
56 static const struct super_operations mbt_sops = {
57 	.alloc_inode	= mbt_alloc_inode,
58 	.free_inode	= mbt_free_inode,
59 };
60 
61 static void mbt_kill_sb(struct super_block *sb)
62 {
63 	generic_shutdown_super(sb);
64 }
65 
66 static struct file_system_type mbt_fs_type = {
67 	.name			= "mballoc test",
68 	.kill_sb		= mbt_kill_sb,
69 };
70 
71 static int mbt_mb_init(struct super_block *sb)
72 {
73 	ext4_fsblk_t block;
74 	int ret;
75 
76 	/* needed by ext4_mb_init->bdev_rot(sb->s_bdev) */
77 	sb->s_bdev = kzalloc_obj(*sb->s_bdev);
78 	if (sb->s_bdev == NULL)
79 		return -ENOMEM;
80 
81 	sb->s_bdev->bd_queue = kzalloc_obj(struct request_queue);
82 	if (sb->s_bdev->bd_queue == NULL) {
83 		kfree(sb->s_bdev);
84 		return -ENOMEM;
85 	}
86 
87 	/*
88 	 * needed by ext4_mb_init->ext4_mb_init_backend-> sbi->s_buddy_cache =
89 	 * new_inode(sb);
90 	 */
91 	INIT_LIST_HEAD(&sb->s_inodes);
92 	sb->s_op = &mbt_sops;
93 
94 	ret = ext4_mb_init(sb);
95 	if (ret != 0)
96 		goto err_out;
97 
98 	block = ext4_count_free_clusters(sb);
99 	ret = percpu_counter_init(&EXT4_SB(sb)->s_freeclusters_counter, block,
100 				  GFP_KERNEL);
101 	if (ret != 0)
102 		goto err_mb_release;
103 
104 	ret = percpu_counter_init(&EXT4_SB(sb)->s_dirtyclusters_counter, 0,
105 				  GFP_KERNEL);
106 	if (ret != 0)
107 		goto err_freeclusters;
108 
109 	return 0;
110 
111 err_freeclusters:
112 	percpu_counter_destroy(&EXT4_SB(sb)->s_freeclusters_counter);
113 err_mb_release:
114 	ext4_mb_release(sb);
115 err_out:
116 	kfree(sb->s_bdev->bd_queue);
117 	kfree(sb->s_bdev);
118 	return ret;
119 }
120 
121 static void mbt_mb_release(struct super_block *sb)
122 {
123 	percpu_counter_destroy(&EXT4_SB(sb)->s_dirtyclusters_counter);
124 	percpu_counter_destroy(&EXT4_SB(sb)->s_freeclusters_counter);
125 	ext4_mb_release(sb);
126 	kfree(sb->s_bdev->bd_queue);
127 	kfree(sb->s_bdev);
128 }
129 
130 static int mbt_set(struct super_block *sb, void *data)
131 {
132 	return 0;
133 }
134 
135 static struct super_block *mbt_ext4_alloc_super_block(void)
136 {
137 	struct mbt_ext4_super_block *fsb;
138 	struct super_block *sb;
139 	struct ext4_sb_info *sbi;
140 
141 	fsb = kzalloc_obj(*fsb);
142 	if (fsb == NULL)
143 		return NULL;
144 
145 	sb = sget(&mbt_fs_type, NULL, mbt_set, 0, NULL);
146 	if (IS_ERR(sb))
147 		goto out;
148 
149 	sbi = &fsb->sbi;
150 
151 	sbi->s_blockgroup_lock =
152 		kzalloc_obj(struct blockgroup_lock);
153 	if (!sbi->s_blockgroup_lock)
154 		goto out_deactivate;
155 
156 	bgl_lock_init(sbi->s_blockgroup_lock);
157 
158 	sbi->s_es = &fsb->es;
159 	sbi->s_sb = sb;
160 	sb->s_fs_info = sbi;
161 
162 	up_write(&sb->s_umount);
163 	return sb;
164 
165 out_deactivate:
166 	deactivate_locked_super(sb);
167 out:
168 	kfree(fsb);
169 	return NULL;
170 }
171 
172 static void mbt_ext4_free_super_block(struct super_block *sb)
173 {
174 	struct mbt_ext4_super_block *fsb = MBT_SB(sb);
175 	struct ext4_sb_info *sbi = EXT4_SB(sb);
176 
177 	kfree(sbi->s_blockgroup_lock);
178 	deactivate_super(sb);
179 	kfree(fsb);
180 }
181 
182 struct mbt_ext4_block_layout {
183 	unsigned char blocksize_bits;
184 	unsigned int cluster_bits;
185 	uint32_t blocks_per_group;
186 	ext4_group_t group_count;
187 	uint16_t desc_size;
188 };
189 
190 static void mbt_init_sb_layout(struct super_block *sb,
191 			       struct mbt_ext4_block_layout *layout)
192 {
193 	struct ext4_sb_info *sbi = EXT4_SB(sb);
194 	struct ext4_super_block *es = sbi->s_es;
195 
196 	sb->s_blocksize = 1UL << layout->blocksize_bits;
197 	sb->s_blocksize_bits = layout->blocksize_bits;
198 
199 	sbi->s_groups_count = layout->group_count;
200 	sbi->s_blocks_per_group = layout->blocks_per_group;
201 	sbi->s_cluster_bits = layout->cluster_bits;
202 	sbi->s_cluster_ratio = 1U << layout->cluster_bits;
203 	sbi->s_clusters_per_group = layout->blocks_per_group >>
204 				    layout->cluster_bits;
205 	sbi->s_desc_size = layout->desc_size;
206 	sbi->s_desc_per_block_bits =
207 		sb->s_blocksize_bits - (fls(layout->desc_size) - 1);
208 	sbi->s_desc_per_block = 1 << sbi->s_desc_per_block_bits;
209 
210 	es->s_first_data_block = cpu_to_le32(0);
211 	es->s_blocks_count_lo = cpu_to_le32(layout->blocks_per_group *
212 					    layout->group_count);
213 }
214 
215 static int mbt_grp_ctx_init(struct super_block *sb,
216 			    struct mbt_grp_ctx *grp_ctx)
217 {
218 	ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
219 
220 	grp_ctx->bitmap_bh.b_data = kzalloc(EXT4_BLOCK_SIZE(sb), GFP_KERNEL);
221 	if (grp_ctx->bitmap_bh.b_data == NULL)
222 		return -ENOMEM;
223 	mb_set_bits(grp_ctx->bitmap_bh.b_data, max, sb->s_blocksize * 8 - max);
224 	ext4_free_group_clusters_set(sb, &grp_ctx->desc, max);
225 
226 	return 0;
227 }
228 
229 static void mbt_grp_ctx_release(struct mbt_grp_ctx *grp_ctx)
230 {
231 	kfree(grp_ctx->bitmap_bh.b_data);
232 	grp_ctx->bitmap_bh.b_data = NULL;
233 }
234 
235 static void mbt_ctx_mark_used(struct super_block *sb, ext4_group_t group,
236 			      unsigned int start, unsigned int len)
237 {
238 	struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, group);
239 
240 	mb_set_bits(grp_ctx->bitmap_bh.b_data, start, len);
241 }
242 
243 static void *mbt_ctx_bitmap(struct super_block *sb, ext4_group_t group)
244 {
245 	struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, group);
246 
247 	return grp_ctx->bitmap_bh.b_data;
248 }
249 
250 /* called after mbt_init_sb_layout */
251 static int mbt_ctx_init(struct super_block *sb)
252 {
253 	struct mbt_ctx *ctx = MBT_CTX(sb);
254 	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
255 
256 	ctx->grp_ctx = kzalloc_objs(struct mbt_grp_ctx, ngroups);
257 	if (ctx->grp_ctx == NULL)
258 		return -ENOMEM;
259 
260 	for (i = 0; i < ngroups; i++)
261 		if (mbt_grp_ctx_init(sb, &ctx->grp_ctx[i]))
262 			goto out;
263 
264 	/*
265 	 * first data block(first cluster in first group) is used by
266 	 * metadata, mark it used to avoid to alloc data block at first
267 	 * block which will fail ext4_sb_block_valid check.
268 	 */
269 	mb_set_bits(ctx->grp_ctx[0].bitmap_bh.b_data, 0, 1);
270 	ext4_free_group_clusters_set(sb, &ctx->grp_ctx[0].desc,
271 				     EXT4_CLUSTERS_PER_GROUP(sb) - 1);
272 
273 	return 0;
274 out:
275 	while (i-- > 0)
276 		mbt_grp_ctx_release(&ctx->grp_ctx[i]);
277 	kfree(ctx->grp_ctx);
278 	return -ENOMEM;
279 }
280 
281 static void mbt_ctx_release(struct super_block *sb)
282 {
283 	struct mbt_ctx *ctx = MBT_CTX(sb);
284 	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
285 
286 	for (i = 0; i < ngroups; i++)
287 		mbt_grp_ctx_release(&ctx->grp_ctx[i]);
288 	kfree(ctx->grp_ctx);
289 }
290 
291 static struct buffer_head *
292 ext4_read_block_bitmap_nowait_stub(struct super_block *sb, ext4_group_t block_group,
293 				   bool ignore_locked)
294 {
295 	struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, block_group);
296 
297 	/* paired with brelse from caller of ext4_read_block_bitmap_nowait */
298 	get_bh(&grp_ctx->bitmap_bh);
299 	return &grp_ctx->bitmap_bh;
300 }
301 
302 static int ext4_wait_block_bitmap_stub(struct super_block *sb,
303 				       ext4_group_t block_group,
304 				       struct buffer_head *bh)
305 {
306 	/*
307 	 * real ext4_wait_block_bitmap will set these flags and
308 	 * functions like ext4_mb_init_cache will verify the flags.
309 	 */
310 	set_buffer_uptodate(bh);
311 	set_bitmap_uptodate(bh);
312 	set_buffer_verified(bh);
313 	return 0;
314 }
315 
316 static struct ext4_group_desc *
317 ext4_get_group_desc_stub(struct super_block *sb, ext4_group_t block_group,
318 			 struct buffer_head **bh)
319 {
320 	struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, block_group);
321 
322 	if (bh != NULL)
323 		*bh = &grp_ctx->gd_bh;
324 
325 	return &grp_ctx->desc;
326 }
327 
328 static int
329 ext4_mb_mark_context_stub(handle_t *handle, struct super_block *sb, bool state,
330 			  ext4_group_t group, ext4_grpblk_t blkoff,
331 			  ext4_grpblk_t len, int flags,
332 			  ext4_grpblk_t *ret_changed)
333 {
334 	struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, group);
335 	struct buffer_head *bitmap_bh = &grp_ctx->bitmap_bh;
336 
337 	if (state)
338 		mb_set_bits(bitmap_bh->b_data, blkoff, len);
339 	else
340 		mb_clear_bits_test(bitmap_bh->b_data, blkoff, len);
341 
342 	return 0;
343 }
344 
345 #define TEST_GOAL_GROUP 1
346 static int mbt_kunit_init(struct kunit *test)
347 {
348 	struct mbt_ext4_block_layout *layout =
349 		(struct mbt_ext4_block_layout *)(test->param_value);
350 	struct super_block *sb;
351 	int ret;
352 
353 	sb = mbt_ext4_alloc_super_block();
354 	if (sb == NULL)
355 		return -ENOMEM;
356 
357 	mbt_init_sb_layout(sb, layout);
358 
359 	ret = mbt_ctx_init(sb);
360 	if (ret != 0) {
361 		mbt_ext4_free_super_block(sb);
362 		return ret;
363 	}
364 
365 	kunit_activate_static_stub(test,
366 				   ext4_read_block_bitmap_nowait,
367 				   ext4_read_block_bitmap_nowait_stub);
368 	kunit_activate_static_stub(test,
369 				   ext4_wait_block_bitmap,
370 				   ext4_wait_block_bitmap_stub);
371 	kunit_activate_static_stub(test,
372 				   ext4_get_group_desc,
373 				   ext4_get_group_desc_stub);
374 	kunit_activate_static_stub(test,
375 				   ext4_mb_mark_context,
376 				   ext4_mb_mark_context_stub);
377 
378 	/* stub function will be called in mbt_mb_init->ext4_mb_init */
379 	if (mbt_mb_init(sb) != 0) {
380 		mbt_ctx_release(sb);
381 		mbt_ext4_free_super_block(sb);
382 		return -ENOMEM;
383 	}
384 
385 	test->priv = sb;
386 
387 	return 0;
388 }
389 
390 static void mbt_kunit_exit(struct kunit *test)
391 {
392 	struct super_block *sb = (struct super_block *)test->priv;
393 
394 	if (!sb)
395 		return;
396 
397 	mbt_mb_release(sb);
398 	mbt_ctx_release(sb);
399 	mbt_ext4_free_super_block(sb);
400 }
401 
402 static void test_new_blocks_simple(struct kunit *test)
403 {
404 	struct super_block *sb = (struct super_block *)test->priv;
405 	struct inode *inode;
406 	struct ext4_allocation_request ar;
407 	ext4_group_t i, goal_group = TEST_GOAL_GROUP;
408 	int err = 0;
409 	ext4_fsblk_t found;
410 	struct ext4_sb_info *sbi = EXT4_SB(sb);
411 
412 	inode = kunit_kzalloc(test, sizeof(*inode), GFP_KERNEL);
413 	if (!inode)
414 		return;
415 
416 	inode->i_sb = sb;
417 	ar.inode = inode;
418 
419 	/* get block at goal */
420 	ar.goal = ext4_group_first_block_no(sb, goal_group);
421 	found = ext4_mb_new_blocks_simple_test(&ar, &err);
422 	KUNIT_ASSERT_EQ_MSG(test, ar.goal, found,
423 		"failed to alloc block at goal, expected %llu found %llu",
424 		ar.goal, found);
425 
426 	/* get block after goal in goal group */
427 	ar.goal = ext4_group_first_block_no(sb, goal_group);
428 	found = ext4_mb_new_blocks_simple_test(&ar, &err);
429 	KUNIT_ASSERT_EQ_MSG(test, ar.goal + EXT4_C2B(sbi, 1), found,
430 		"failed to alloc block after goal in goal group, expected %llu found %llu",
431 		ar.goal + 1, found);
432 
433 	/* get block after goal group */
434 	mbt_ctx_mark_used(sb, goal_group, 0, EXT4_CLUSTERS_PER_GROUP(sb));
435 	ar.goal = ext4_group_first_block_no(sb, goal_group);
436 	found = ext4_mb_new_blocks_simple_test(&ar, &err);
437 	KUNIT_ASSERT_EQ_MSG(test,
438 		ext4_group_first_block_no(sb, goal_group + 1), found,
439 		"failed to alloc block after goal group, expected %llu found %llu",
440 		ext4_group_first_block_no(sb, goal_group + 1), found);
441 
442 	/* get block before goal group */
443 	for (i = goal_group; i < ext4_get_groups_count(sb); i++)
444 		mbt_ctx_mark_used(sb, i, 0, EXT4_CLUSTERS_PER_GROUP(sb));
445 	ar.goal = ext4_group_first_block_no(sb, goal_group);
446 	found = ext4_mb_new_blocks_simple_test(&ar, &err);
447 	KUNIT_ASSERT_EQ_MSG(test,
448 		ext4_group_first_block_no(sb, 0) + EXT4_C2B(sbi, 1), found,
449 		"failed to alloc block before goal group, expected %llu found %llu",
450 		ext4_group_first_block_no(sb, 0 + EXT4_C2B(sbi, 1)), found);
451 
452 	/* no block available, fail to allocate block */
453 	for (i = 0; i < ext4_get_groups_count(sb); i++)
454 		mbt_ctx_mark_used(sb, i, 0, EXT4_CLUSTERS_PER_GROUP(sb));
455 	ar.goal = ext4_group_first_block_no(sb, goal_group);
456 	found = ext4_mb_new_blocks_simple_test(&ar, &err);
457 	KUNIT_ASSERT_NE_MSG(test, err, 0,
458 		"unexpectedly get block when no block is available");
459 }
460 
461 #define TEST_RANGE_COUNT 8
462 
463 struct test_range {
464 	ext4_grpblk_t start;
465 	ext4_grpblk_t len;
466 };
467 
468 static void
469 mbt_generate_test_ranges(struct super_block *sb, struct test_range *ranges,
470 			 int count)
471 {
472 	ext4_grpblk_t start, len, max;
473 	int i;
474 
475 	max = EXT4_CLUSTERS_PER_GROUP(sb) / count;
476 	for (i = 0; i < count; i++) {
477 		start = get_random_u32() % max;
478 		len = get_random_u32() % max;
479 		len = min(len, max - start);
480 
481 		ranges[i].start = start + i * max;
482 		ranges[i].len = len;
483 	}
484 }
485 
486 static void
487 validate_free_blocks_simple(struct kunit *test, struct super_block *sb,
488 			    ext4_group_t goal_group, ext4_grpblk_t start,
489 			    ext4_grpblk_t len)
490 {
491 	void *bitmap;
492 	ext4_grpblk_t bit, max = EXT4_CLUSTERS_PER_GROUP(sb);
493 	ext4_group_t i;
494 
495 	for (i = 0; i < ext4_get_groups_count(sb); i++) {
496 		if (i == goal_group)
497 			continue;
498 
499 		bitmap = mbt_ctx_bitmap(sb, i);
500 		bit = mb_find_next_zero_bit_test(bitmap, max, 0);
501 		KUNIT_ASSERT_EQ_MSG(test, bit, max,
502 				    "free block on unexpected group %d", i);
503 	}
504 
505 	bitmap = mbt_ctx_bitmap(sb, goal_group);
506 	bit = mb_find_next_zero_bit_test(bitmap, max, 0);
507 	KUNIT_ASSERT_EQ(test, bit, start);
508 
509 	bit = mb_find_next_bit_test(bitmap, max, bit + 1);
510 	KUNIT_ASSERT_EQ(test, bit, start + len);
511 }
512 
513 static void
514 test_free_blocks_simple_range(struct kunit *test, ext4_group_t goal_group,
515 			      ext4_grpblk_t start, ext4_grpblk_t len)
516 {
517 	struct super_block *sb = (struct super_block *)test->priv;
518 	struct ext4_sb_info *sbi = EXT4_SB(sb);
519 	struct inode *inode;
520 	ext4_fsblk_t block;
521 
522 	inode = kunit_kzalloc(test, sizeof(*inode), GFP_KERNEL);
523 	if (!inode)
524 		return;
525 	inode->i_sb = sb;
526 
527 	if (len == 0)
528 		return;
529 
530 	block = ext4_group_first_block_no(sb, goal_group) +
531 		EXT4_C2B(sbi, start);
532 	ext4_free_blocks_simple_test(inode, block, len);
533 	validate_free_blocks_simple(test, sb, goal_group, start, len);
534 	mbt_ctx_mark_used(sb, goal_group, 0, EXT4_CLUSTERS_PER_GROUP(sb));
535 }
536 
537 static void test_free_blocks_simple(struct kunit *test)
538 {
539 	struct super_block *sb = (struct super_block *)test->priv;
540 	ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
541 	ext4_group_t i;
542 	struct test_range ranges[TEST_RANGE_COUNT];
543 
544 	for (i = 0; i < ext4_get_groups_count(sb); i++)
545 		mbt_ctx_mark_used(sb, i, 0, max);
546 
547 	mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
548 	for (i = 0; i < TEST_RANGE_COUNT; i++)
549 		test_free_blocks_simple_range(test, TEST_GOAL_GROUP,
550 			ranges[i].start, ranges[i].len);
551 }
552 
553 static void
554 test_mark_diskspace_used_range(struct kunit *test,
555 			       struct ext4_allocation_context *ac,
556 			       ext4_grpblk_t start,
557 			       ext4_grpblk_t len)
558 {
559 	struct super_block *sb = (struct super_block *)test->priv;
560 	int ret;
561 	void *bitmap;
562 	ext4_grpblk_t i, max;
563 
564 	/* ext4_mb_mark_diskspace_used will BUG if len is 0 */
565 	if (len == 0)
566 		return;
567 
568 	ac->ac_b_ex.fe_group = TEST_GOAL_GROUP;
569 	ac->ac_b_ex.fe_start = start;
570 	ac->ac_b_ex.fe_len = len;
571 
572 	bitmap = mbt_ctx_bitmap(sb, TEST_GOAL_GROUP);
573 	memset(bitmap, 0, sb->s_blocksize);
574 	ret = ext4_mb_mark_diskspace_used_test(ac, NULL);
575 	KUNIT_ASSERT_EQ(test, ret, 0);
576 
577 	max = EXT4_CLUSTERS_PER_GROUP(sb);
578 	i = mb_find_next_bit_test(bitmap, max, 0);
579 	KUNIT_ASSERT_EQ(test, i, start);
580 	i = mb_find_next_zero_bit_test(bitmap, max, i + 1);
581 	KUNIT_ASSERT_EQ(test, i, start + len);
582 	i = mb_find_next_bit_test(bitmap, max, i + 1);
583 	KUNIT_ASSERT_EQ(test, max, i);
584 }
585 
586 static void test_mark_diskspace_used(struct kunit *test)
587 {
588 	struct super_block *sb = (struct super_block *)test->priv;
589 	struct inode *inode;
590 	struct ext4_allocation_context ac;
591 	struct test_range ranges[TEST_RANGE_COUNT];
592 	int i;
593 
594 	mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
595 
596 	inode = kunit_kzalloc(test, sizeof(*inode), GFP_KERNEL);
597 	if (!inode)
598 		return;
599 	inode->i_sb = sb;
600 
601 	ac.ac_status = AC_STATUS_FOUND;
602 	ac.ac_sb = sb;
603 	ac.ac_inode = inode;
604 	for (i = 0; i < TEST_RANGE_COUNT; i++)
605 		test_mark_diskspace_used_range(test, &ac, ranges[i].start,
606 					       ranges[i].len);
607 }
608 
609 static void mbt_generate_buddy(struct super_block *sb, void *buddy,
610 			       void *bitmap, struct ext4_group_info *grp)
611 {
612 	struct ext4_sb_info *sbi = EXT4_SB(sb);
613 	uint32_t order, off;
614 	void *bb, *bb_h;
615 	int max;
616 
617 	memset(buddy, 0xff, sb->s_blocksize);
618 	memset(grp, 0, offsetof(struct ext4_group_info,
619 				 bb_counters[MB_NUM_ORDERS(sb)]));
620 
621 	bb = bitmap;
622 	max = EXT4_CLUSTERS_PER_GROUP(sb);
623 	bb_h = buddy + sbi->s_mb_offsets[1];
624 
625 	off = mb_find_next_zero_bit_test(bb, max, 0);
626 	grp->bb_first_free = off;
627 	while (off < max) {
628 		grp->bb_counters[0]++;
629 		grp->bb_free++;
630 
631 		if (!(off & 1) && !mb_test_bit_test(off + 1, bb)) {
632 			grp->bb_free++;
633 			grp->bb_counters[0]--;
634 			mb_clear_bit_test(off >> 1, bb_h);
635 			grp->bb_counters[1]++;
636 			grp->bb_largest_free_order = 1;
637 			off++;
638 		}
639 
640 		off = mb_find_next_zero_bit_test(bb, max, off + 1);
641 	}
642 
643 	for (order = 1; order < MB_NUM_ORDERS(sb) - 1; order++) {
644 		bb = buddy + sbi->s_mb_offsets[order];
645 		bb_h = buddy + sbi->s_mb_offsets[order + 1];
646 		max = max >> 1;
647 		off = mb_find_next_zero_bit_test(bb, max, 0);
648 
649 		while (off < max) {
650 			if (!(off & 1) && !mb_test_bit_test(off + 1, bb)) {
651 				mb_set_bits(bb, off, 2);
652 				grp->bb_counters[order] -= 2;
653 				mb_clear_bit_test(off >> 1, bb_h);
654 				grp->bb_counters[order + 1]++;
655 				grp->bb_largest_free_order = order + 1;
656 				off++;
657 			}
658 
659 			off = mb_find_next_zero_bit_test(bb, max, off + 1);
660 		}
661 	}
662 
663 	max = EXT4_CLUSTERS_PER_GROUP(sb);
664 	off = mb_find_next_zero_bit_test(bitmap, max, 0);
665 	while (off < max) {
666 		grp->bb_fragments++;
667 
668 		off = mb_find_next_bit_test(bitmap, max, off + 1);
669 		if (off + 1 >= max)
670 			break;
671 
672 		off = mb_find_next_zero_bit_test(bitmap, max, off + 1);
673 	}
674 }
675 
676 static void
677 mbt_validate_group_info(struct kunit *test, struct ext4_group_info *grp1,
678 			struct ext4_group_info *grp2)
679 {
680 	struct super_block *sb = (struct super_block *)test->priv;
681 	int i;
682 
683 	KUNIT_ASSERT_EQ(test, grp1->bb_first_free,
684 			grp2->bb_first_free);
685 	KUNIT_ASSERT_EQ(test, grp1->bb_fragments,
686 			grp2->bb_fragments);
687 	KUNIT_ASSERT_EQ(test, grp1->bb_free, grp2->bb_free);
688 	KUNIT_ASSERT_EQ(test, grp1->bb_largest_free_order,
689 			grp2->bb_largest_free_order);
690 
691 	for (i = 1; i < MB_NUM_ORDERS(sb); i++) {
692 		KUNIT_ASSERT_EQ_MSG(test, grp1->bb_counters[i],
693 				    grp2->bb_counters[i],
694 				    "bb_counters[%d] diffs, expected %d, generated %d",
695 				    i, grp1->bb_counters[i],
696 				    grp2->bb_counters[i]);
697 	}
698 }
699 
700 static void
701 do_test_generate_buddy(struct kunit *test, struct super_block *sb, void *bitmap,
702 			   void *mbt_buddy, struct ext4_group_info *mbt_grp,
703 			   void *ext4_buddy, struct ext4_group_info *ext4_grp)
704 {
705 	int i;
706 
707 	mbt_generate_buddy(sb, mbt_buddy, bitmap, mbt_grp);
708 
709 	for (i = 0; i < MB_NUM_ORDERS(sb); i++)
710 		ext4_grp->bb_counters[i] = 0;
711 	/* needed by validation in ext4_mb_generate_buddy */
712 	ext4_grp->bb_free = mbt_grp->bb_free;
713 	memset(ext4_buddy, 0xff, sb->s_blocksize);
714 	ext4_mb_generate_buddy_test(sb, ext4_buddy, bitmap, TEST_GOAL_GROUP,
715 			       ext4_grp);
716 
717 	KUNIT_ASSERT_EQ(test, memcmp(mbt_buddy, ext4_buddy, sb->s_blocksize),
718 			0);
719 	mbt_validate_group_info(test, mbt_grp, ext4_grp);
720 }
721 
722 static void test_mb_generate_buddy(struct kunit *test)
723 {
724 	struct super_block *sb = (struct super_block *)test->priv;
725 	void *bitmap, *expected_bb, *generate_bb;
726 	struct ext4_group_info *expected_grp, *generate_grp;
727 	struct test_range ranges[TEST_RANGE_COUNT];
728 	int i;
729 
730 	bitmap = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
731 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bitmap);
732 	expected_bb = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
733 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_bb);
734 	generate_bb = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
735 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, generate_bb);
736 	expected_grp = kunit_kzalloc(test, offsetof(struct ext4_group_info,
737 				bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL);
738 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_grp);
739 	generate_grp = ext4_get_group_info(sb, TEST_GOAL_GROUP);
740 	KUNIT_ASSERT_NOT_NULL(test, generate_grp);
741 
742 	mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
743 	for (i = 0; i < TEST_RANGE_COUNT; i++) {
744 		mb_set_bits(bitmap, ranges[i].start, ranges[i].len);
745 		do_test_generate_buddy(test, sb, bitmap, expected_bb,
746 				       expected_grp, generate_bb, generate_grp);
747 	}
748 }
749 
750 static void
751 test_mb_mark_used_range(struct kunit *test, struct ext4_buddy *e4b,
752 			ext4_grpblk_t start, ext4_grpblk_t len, void *bitmap,
753 			void *buddy, struct ext4_group_info *grp)
754 {
755 	struct super_block *sb = (struct super_block *)test->priv;
756 	struct ext4_free_extent ex;
757 	int i;
758 
759 	/* mb_mark_used only accepts non-zero len */
760 	if (len == 0)
761 		return;
762 
763 	ex.fe_start = start;
764 	ex.fe_len = len;
765 	ex.fe_group = TEST_GOAL_GROUP;
766 
767 	ext4_lock_group(sb, TEST_GOAL_GROUP);
768 	mb_mark_used_test(e4b, &ex);
769 	ext4_unlock_group(sb, TEST_GOAL_GROUP);
770 
771 	mb_set_bits(bitmap, start, len);
772 	/* bypass bb_free validatoin in ext4_mb_generate_buddy */
773 	grp->bb_free -= len;
774 	memset(buddy, 0xff, sb->s_blocksize);
775 	for (i = 0; i < MB_NUM_ORDERS(sb); i++)
776 		grp->bb_counters[i] = 0;
777 	ext4_mb_generate_buddy_test(sb, buddy, bitmap, 0, grp);
778 
779 	KUNIT_ASSERT_EQ(test, memcmp(buddy, e4b->bd_buddy, sb->s_blocksize),
780 			0);
781 	mbt_validate_group_info(test, grp, e4b->bd_info);
782 }
783 
784 static void test_mb_mark_used(struct kunit *test)
785 {
786 	struct ext4_buddy e4b;
787 	struct super_block *sb = (struct super_block *)test->priv;
788 	void *bitmap, *buddy;
789 	struct ext4_group_info *grp;
790 	int ret;
791 	struct test_range ranges[TEST_RANGE_COUNT];
792 	int i;
793 
794 	/* buddy cache assumes that each page contains at least one block */
795 	if (sb->s_blocksize > PAGE_SIZE)
796 		kunit_skip(test, "blocksize exceeds pagesize");
797 
798 	bitmap = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
799 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bitmap);
800 	buddy = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
801 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buddy);
802 	grp = kunit_kzalloc(test, offsetof(struct ext4_group_info,
803 				bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL);
804 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, grp);
805 
806 	ret = ext4_mb_load_buddy_test(sb, TEST_GOAL_GROUP, &e4b);
807 	KUNIT_ASSERT_EQ(test, ret, 0);
808 
809 	grp->bb_free = EXT4_CLUSTERS_PER_GROUP(sb);
810 	grp->bb_largest_free_order = -1;
811 	grp->bb_avg_fragment_size_order = -1;
812 	mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
813 	for (i = 0; i < TEST_RANGE_COUNT; i++)
814 		test_mb_mark_used_range(test, &e4b, ranges[i].start,
815 					ranges[i].len, bitmap, buddy, grp);
816 
817 	ext4_mb_unload_buddy_test(&e4b);
818 }
819 
820 static void
821 test_mb_free_blocks_range(struct kunit *test, struct ext4_buddy *e4b,
822 			  ext4_grpblk_t start, ext4_grpblk_t len, void *bitmap,
823 			  void *buddy, struct ext4_group_info *grp)
824 {
825 	struct super_block *sb = (struct super_block *)test->priv;
826 	int i;
827 
828 	/* mb_free_blocks will WARN if len is 0 */
829 	if (len == 0)
830 		return;
831 
832 	ext4_lock_group(sb, e4b->bd_group);
833 	mb_free_blocks_test(NULL, e4b, start, len);
834 	ext4_unlock_group(sb, e4b->bd_group);
835 
836 	mb_clear_bits_test(bitmap, start, len);
837 	/* bypass bb_free validatoin in ext4_mb_generate_buddy */
838 	grp->bb_free += len;
839 	memset(buddy, 0xff, sb->s_blocksize);
840 	for (i = 0; i < MB_NUM_ORDERS(sb); i++)
841 		grp->bb_counters[i] = 0;
842 	ext4_mb_generate_buddy_test(sb, buddy, bitmap, 0, grp);
843 
844 	KUNIT_ASSERT_EQ(test, memcmp(buddy, e4b->bd_buddy, sb->s_blocksize),
845 			0);
846 	mbt_validate_group_info(test, grp, e4b->bd_info);
847 
848 }
849 
850 static void test_mb_free_blocks(struct kunit *test)
851 {
852 	struct ext4_buddy e4b;
853 	struct super_block *sb = (struct super_block *)test->priv;
854 	void *bitmap, *buddy;
855 	struct ext4_group_info *grp;
856 	struct ext4_free_extent ex;
857 	int ret;
858 	int i;
859 	struct test_range ranges[TEST_RANGE_COUNT];
860 
861 	/* buddy cache assumes that each page contains at least one block */
862 	if (sb->s_blocksize > PAGE_SIZE)
863 		kunit_skip(test, "blocksize exceeds pagesize");
864 
865 	bitmap = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
866 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bitmap);
867 	buddy = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
868 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buddy);
869 	grp = kunit_kzalloc(test, offsetof(struct ext4_group_info,
870 				bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL);
871 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, grp);
872 
873 	ret = ext4_mb_load_buddy_test(sb, TEST_GOAL_GROUP, &e4b);
874 	KUNIT_ASSERT_EQ(test, ret, 0);
875 
876 	ex.fe_start = 0;
877 	ex.fe_len = EXT4_CLUSTERS_PER_GROUP(sb);
878 	ex.fe_group = TEST_GOAL_GROUP;
879 
880 	ext4_lock_group(sb, TEST_GOAL_GROUP);
881 	mb_mark_used_test(&e4b, &ex);
882 	ext4_unlock_group(sb, TEST_GOAL_GROUP);
883 
884 	grp->bb_free = 0;
885 	grp->bb_largest_free_order = -1;
886 	grp->bb_avg_fragment_size_order = -1;
887 	memset(bitmap, 0xff, sb->s_blocksize);
888 
889 	mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
890 	for (i = 0; i < TEST_RANGE_COUNT; i++)
891 		test_mb_free_blocks_range(test, &e4b, ranges[i].start,
892 					  ranges[i].len, bitmap, buddy, grp);
893 
894 	ext4_mb_unload_buddy_test(&e4b);
895 }
896 
897 #define COUNT_FOR_ESTIMATE 100000
898 static void test_mb_mark_used_cost(struct kunit *test)
899 {
900 	struct ext4_buddy e4b;
901 	struct super_block *sb = (struct super_block *)test->priv;
902 	struct ext4_free_extent ex;
903 	int ret;
904 	struct test_range ranges[TEST_RANGE_COUNT];
905 	int i, j;
906 	unsigned long start, end, all = 0;
907 
908 	/* buddy cache assumes that each page contains at least one block */
909 	if (sb->s_blocksize > PAGE_SIZE)
910 		kunit_skip(test, "blocksize exceeds pagesize");
911 
912 	ret = ext4_mb_load_buddy_test(sb, TEST_GOAL_GROUP, &e4b);
913 	KUNIT_ASSERT_EQ(test, ret, 0);
914 
915 	ex.fe_group = TEST_GOAL_GROUP;
916 	for (j = 0; j < COUNT_FOR_ESTIMATE; j++) {
917 		mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
918 		start = jiffies;
919 		for (i = 0; i < TEST_RANGE_COUNT; i++) {
920 			if (ranges[i].len == 0)
921 				continue;
922 
923 			ex.fe_start = ranges[i].start;
924 			ex.fe_len = ranges[i].len;
925 			ext4_lock_group(sb, TEST_GOAL_GROUP);
926 			mb_mark_used_test(&e4b, &ex);
927 			ext4_unlock_group(sb, TEST_GOAL_GROUP);
928 		}
929 		end = jiffies;
930 		all += (end - start);
931 
932 		for (i = 0; i < TEST_RANGE_COUNT; i++) {
933 			if (ranges[i].len == 0)
934 				continue;
935 
936 			ext4_lock_group(sb, TEST_GOAL_GROUP);
937 			mb_free_blocks_test(NULL, &e4b, ranges[i].start,
938 				       ranges[i].len);
939 			ext4_unlock_group(sb, TEST_GOAL_GROUP);
940 		}
941 	}
942 
943 	kunit_info(test, "costed jiffies %lu\n", all);
944 	ext4_mb_unload_buddy_test(&e4b);
945 }
946 
947 static const struct mbt_ext4_block_layout mbt_test_layouts[] = {
948 	{
949 		.blocksize_bits = 10,
950 		.cluster_bits = 3,
951 		.blocks_per_group = 8192,
952 		.group_count = 4,
953 		.desc_size = 64,
954 	},
955 	{
956 		.blocksize_bits = 12,
957 		.cluster_bits = 3,
958 		.blocks_per_group = 8192,
959 		.group_count = 4,
960 		.desc_size = 64,
961 	},
962 	{
963 		.blocksize_bits = 16,
964 		.cluster_bits = 3,
965 		.blocks_per_group = 8192,
966 		.group_count = 4,
967 		.desc_size = 64,
968 	},
969 };
970 
971 static void mbt_show_layout(const struct mbt_ext4_block_layout *layout,
972 			    char *desc)
973 {
974 	snprintf(desc, KUNIT_PARAM_DESC_SIZE, "block_bits=%d cluster_bits=%d "
975 		 "blocks_per_group=%d group_count=%d desc_size=%d\n",
976 		 layout->blocksize_bits, layout->cluster_bits,
977 		 layout->blocks_per_group, layout->group_count,
978 		 layout->desc_size);
979 }
980 KUNIT_ARRAY_PARAM(mbt_layouts, mbt_test_layouts, mbt_show_layout);
981 
982 static struct kunit_case mbt_test_cases[] = {
983 	KUNIT_CASE_PARAM(test_new_blocks_simple, mbt_layouts_gen_params),
984 	KUNIT_CASE_PARAM(test_free_blocks_simple, mbt_layouts_gen_params),
985 	KUNIT_CASE_PARAM(test_mb_generate_buddy, mbt_layouts_gen_params),
986 	KUNIT_CASE_PARAM(test_mb_mark_used, mbt_layouts_gen_params),
987 	KUNIT_CASE_PARAM(test_mb_free_blocks, mbt_layouts_gen_params),
988 	KUNIT_CASE_PARAM(test_mark_diskspace_used, mbt_layouts_gen_params),
989 	KUNIT_CASE_PARAM_ATTR(test_mb_mark_used_cost, mbt_layouts_gen_params,
990 			      { .speed = KUNIT_SPEED_SLOW }),
991 	{}
992 };
993 
994 static struct kunit_suite mbt_test_suite = {
995 	.name = "ext4_mballoc_test",
996 	.init = mbt_kunit_init,
997 	.exit = mbt_kunit_exit,
998 	.test_cases = mbt_test_cases,
999 };
1000 
1001 kunit_test_suites(&mbt_test_suite);
1002 
1003 MODULE_LICENSE("GPL");
1004