1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * KUnit test of ext4 multiblocks allocation.
4 */
5
6 #include <kunit/test.h>
7 #include <kunit/static_stub.h>
8 #include <linux/random.h>
9
10 #include "ext4.h"
11
12 struct mbt_grp_ctx {
13 struct buffer_head bitmap_bh;
14 /* desc and gd_bh are just the place holders for now */
15 struct ext4_group_desc desc;
16 struct buffer_head gd_bh;
17 };
18
19 struct mbt_ctx {
20 struct mbt_grp_ctx *grp_ctx;
21 };
22
23 struct mbt_ext4_super_block {
24 struct ext4_super_block es;
25 struct ext4_sb_info sbi;
26 struct mbt_ctx mbt_ctx;
27 };
28
29 #define MBT_SB(_sb) (container_of((_sb)->s_fs_info, struct mbt_ext4_super_block, sbi))
30 #define MBT_CTX(_sb) (&MBT_SB(_sb)->mbt_ctx)
31 #define MBT_GRP_CTX(_sb, _group) (&MBT_CTX(_sb)->grp_ctx[_group])
32
mbt_alloc_inode(struct super_block * sb)33 static struct inode *mbt_alloc_inode(struct super_block *sb)
34 {
35 struct ext4_inode_info *ei;
36
37 ei = kmalloc(sizeof(struct ext4_inode_info), GFP_KERNEL);
38 if (!ei)
39 return NULL;
40
41 INIT_LIST_HEAD(&ei->i_orphan);
42 init_rwsem(&ei->xattr_sem);
43 init_rwsem(&ei->i_data_sem);
44 inode_init_once(&ei->vfs_inode);
45 ext4_fc_init_inode(&ei->vfs_inode);
46
47 return &ei->vfs_inode;
48 }
49
mbt_free_inode(struct inode * inode)50 static void mbt_free_inode(struct inode *inode)
51 {
52 kfree(EXT4_I(inode));
53 }
54
55 static const struct super_operations mbt_sops = {
56 .alloc_inode = mbt_alloc_inode,
57 .free_inode = mbt_free_inode,
58 };
59
mbt_kill_sb(struct super_block * sb)60 static void mbt_kill_sb(struct super_block *sb)
61 {
62 generic_shutdown_super(sb);
63 }
64
65 static struct file_system_type mbt_fs_type = {
66 .name = "mballoc test",
67 .kill_sb = mbt_kill_sb,
68 };
69
mbt_mb_init(struct super_block * sb)70 static int mbt_mb_init(struct super_block *sb)
71 {
72 ext4_fsblk_t block;
73 int ret;
74
75 /* needed by ext4_mb_init->bdev_nonrot(sb->s_bdev) */
76 sb->s_bdev = kzalloc(sizeof(*sb->s_bdev), GFP_KERNEL);
77 if (sb->s_bdev == NULL)
78 return -ENOMEM;
79
80 sb->s_bdev->bd_queue = kzalloc(sizeof(struct request_queue), GFP_KERNEL);
81 if (sb->s_bdev->bd_queue == NULL) {
82 kfree(sb->s_bdev);
83 return -ENOMEM;
84 }
85
86 /*
87 * needed by ext4_mb_init->ext4_mb_init_backend-> sbi->s_buddy_cache =
88 * new_inode(sb);
89 */
90 INIT_LIST_HEAD(&sb->s_inodes);
91 sb->s_op = &mbt_sops;
92
93 ret = ext4_mb_init(sb);
94 if (ret != 0)
95 goto err_out;
96
97 block = ext4_count_free_clusters(sb);
98 ret = percpu_counter_init(&EXT4_SB(sb)->s_freeclusters_counter, block,
99 GFP_KERNEL);
100 if (ret != 0)
101 goto err_mb_release;
102
103 ret = percpu_counter_init(&EXT4_SB(sb)->s_dirtyclusters_counter, 0,
104 GFP_KERNEL);
105 if (ret != 0)
106 goto err_freeclusters;
107
108 return 0;
109
110 err_freeclusters:
111 percpu_counter_destroy(&EXT4_SB(sb)->s_freeclusters_counter);
112 err_mb_release:
113 ext4_mb_release(sb);
114 err_out:
115 kfree(sb->s_bdev->bd_queue);
116 kfree(sb->s_bdev);
117 return ret;
118 }
119
mbt_mb_release(struct super_block * sb)120 static void mbt_mb_release(struct super_block *sb)
121 {
122 percpu_counter_destroy(&EXT4_SB(sb)->s_dirtyclusters_counter);
123 percpu_counter_destroy(&EXT4_SB(sb)->s_freeclusters_counter);
124 ext4_mb_release(sb);
125 kfree(sb->s_bdev->bd_queue);
126 kfree(sb->s_bdev);
127 }
128
mbt_set(struct super_block * sb,void * data)129 static int mbt_set(struct super_block *sb, void *data)
130 {
131 return 0;
132 }
133
mbt_ext4_alloc_super_block(void)134 static struct super_block *mbt_ext4_alloc_super_block(void)
135 {
136 struct mbt_ext4_super_block *fsb;
137 struct super_block *sb;
138 struct ext4_sb_info *sbi;
139
140 fsb = kzalloc(sizeof(*fsb), GFP_KERNEL);
141 if (fsb == NULL)
142 return NULL;
143
144 sb = sget(&mbt_fs_type, NULL, mbt_set, 0, NULL);
145 if (IS_ERR(sb))
146 goto out;
147
148 sbi = &fsb->sbi;
149
150 sbi->s_blockgroup_lock =
151 kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
152 if (!sbi->s_blockgroup_lock)
153 goto out_deactivate;
154
155 bgl_lock_init(sbi->s_blockgroup_lock);
156
157 sbi->s_es = &fsb->es;
158 sb->s_fs_info = sbi;
159
160 up_write(&sb->s_umount);
161 return sb;
162
163 out_deactivate:
164 deactivate_locked_super(sb);
165 out:
166 kfree(fsb);
167 return NULL;
168 }
169
mbt_ext4_free_super_block(struct super_block * sb)170 static void mbt_ext4_free_super_block(struct super_block *sb)
171 {
172 struct mbt_ext4_super_block *fsb = MBT_SB(sb);
173 struct ext4_sb_info *sbi = EXT4_SB(sb);
174
175 kfree(sbi->s_blockgroup_lock);
176 deactivate_super(sb);
177 kfree(fsb);
178 }
179
180 struct mbt_ext4_block_layout {
181 unsigned char blocksize_bits;
182 unsigned int cluster_bits;
183 uint32_t blocks_per_group;
184 ext4_group_t group_count;
185 uint16_t desc_size;
186 };
187
mbt_init_sb_layout(struct super_block * sb,struct mbt_ext4_block_layout * layout)188 static void mbt_init_sb_layout(struct super_block *sb,
189 struct mbt_ext4_block_layout *layout)
190 {
191 struct ext4_sb_info *sbi = EXT4_SB(sb);
192 struct ext4_super_block *es = sbi->s_es;
193
194 sb->s_blocksize = 1UL << layout->blocksize_bits;
195 sb->s_blocksize_bits = layout->blocksize_bits;
196
197 sbi->s_groups_count = layout->group_count;
198 sbi->s_blocks_per_group = layout->blocks_per_group;
199 sbi->s_cluster_bits = layout->cluster_bits;
200 sbi->s_cluster_ratio = 1U << layout->cluster_bits;
201 sbi->s_clusters_per_group = layout->blocks_per_group >>
202 layout->cluster_bits;
203 sbi->s_desc_size = layout->desc_size;
204 sbi->s_desc_per_block_bits =
205 sb->s_blocksize_bits - (fls(layout->desc_size) - 1);
206 sbi->s_desc_per_block = 1 << sbi->s_desc_per_block_bits;
207
208 es->s_first_data_block = cpu_to_le32(0);
209 es->s_blocks_count_lo = cpu_to_le32(layout->blocks_per_group *
210 layout->group_count);
211 }
212
mbt_grp_ctx_init(struct super_block * sb,struct mbt_grp_ctx * grp_ctx)213 static int mbt_grp_ctx_init(struct super_block *sb,
214 struct mbt_grp_ctx *grp_ctx)
215 {
216 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
217
218 grp_ctx->bitmap_bh.b_data = kzalloc(EXT4_BLOCK_SIZE(sb), GFP_KERNEL);
219 if (grp_ctx->bitmap_bh.b_data == NULL)
220 return -ENOMEM;
221 mb_set_bits(grp_ctx->bitmap_bh.b_data, max, sb->s_blocksize * 8 - max);
222 ext4_free_group_clusters_set(sb, &grp_ctx->desc, max);
223
224 return 0;
225 }
226
mbt_grp_ctx_release(struct mbt_grp_ctx * grp_ctx)227 static void mbt_grp_ctx_release(struct mbt_grp_ctx *grp_ctx)
228 {
229 kfree(grp_ctx->bitmap_bh.b_data);
230 grp_ctx->bitmap_bh.b_data = NULL;
231 }
232
mbt_ctx_mark_used(struct super_block * sb,ext4_group_t group,unsigned int start,unsigned int len)233 static void mbt_ctx_mark_used(struct super_block *sb, ext4_group_t group,
234 unsigned int start, unsigned int len)
235 {
236 struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, group);
237
238 mb_set_bits(grp_ctx->bitmap_bh.b_data, start, len);
239 }
240
mbt_ctx_bitmap(struct super_block * sb,ext4_group_t group)241 static void *mbt_ctx_bitmap(struct super_block *sb, ext4_group_t group)
242 {
243 struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, group);
244
245 return grp_ctx->bitmap_bh.b_data;
246 }
247
248 /* called after mbt_init_sb_layout */
mbt_ctx_init(struct super_block * sb)249 static int mbt_ctx_init(struct super_block *sb)
250 {
251 struct mbt_ctx *ctx = MBT_CTX(sb);
252 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
253
254 ctx->grp_ctx = kcalloc(ngroups, sizeof(struct mbt_grp_ctx),
255 GFP_KERNEL);
256 if (ctx->grp_ctx == NULL)
257 return -ENOMEM;
258
259 for (i = 0; i < ngroups; i++)
260 if (mbt_grp_ctx_init(sb, &ctx->grp_ctx[i]))
261 goto out;
262
263 /*
264 * first data block(first cluster in first group) is used by
265 * metadata, mark it used to avoid to alloc data block at first
266 * block which will fail ext4_sb_block_valid check.
267 */
268 mb_set_bits(ctx->grp_ctx[0].bitmap_bh.b_data, 0, 1);
269 ext4_free_group_clusters_set(sb, &ctx->grp_ctx[0].desc,
270 EXT4_CLUSTERS_PER_GROUP(sb) - 1);
271
272 return 0;
273 out:
274 while (i-- > 0)
275 mbt_grp_ctx_release(&ctx->grp_ctx[i]);
276 kfree(ctx->grp_ctx);
277 return -ENOMEM;
278 }
279
mbt_ctx_release(struct super_block * sb)280 static void mbt_ctx_release(struct super_block *sb)
281 {
282 struct mbt_ctx *ctx = MBT_CTX(sb);
283 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
284
285 for (i = 0; i < ngroups; i++)
286 mbt_grp_ctx_release(&ctx->grp_ctx[i]);
287 kfree(ctx->grp_ctx);
288 }
289
290 static struct buffer_head *
ext4_read_block_bitmap_nowait_stub(struct super_block * sb,ext4_group_t block_group,bool ignore_locked)291 ext4_read_block_bitmap_nowait_stub(struct super_block *sb, ext4_group_t block_group,
292 bool ignore_locked)
293 {
294 struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, block_group);
295
296 /* paired with brelse from caller of ext4_read_block_bitmap_nowait */
297 get_bh(&grp_ctx->bitmap_bh);
298 return &grp_ctx->bitmap_bh;
299 }
300
ext4_wait_block_bitmap_stub(struct super_block * sb,ext4_group_t block_group,struct buffer_head * bh)301 static int ext4_wait_block_bitmap_stub(struct super_block *sb,
302 ext4_group_t block_group,
303 struct buffer_head *bh)
304 {
305 /*
306 * real ext4_wait_block_bitmap will set these flags and
307 * functions like ext4_mb_init_cache will verify the flags.
308 */
309 set_buffer_uptodate(bh);
310 set_bitmap_uptodate(bh);
311 set_buffer_verified(bh);
312 return 0;
313 }
314
315 static struct ext4_group_desc *
ext4_get_group_desc_stub(struct super_block * sb,ext4_group_t block_group,struct buffer_head ** bh)316 ext4_get_group_desc_stub(struct super_block *sb, ext4_group_t block_group,
317 struct buffer_head **bh)
318 {
319 struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, block_group);
320
321 if (bh != NULL)
322 *bh = &grp_ctx->gd_bh;
323
324 return &grp_ctx->desc;
325 }
326
327 static int
ext4_mb_mark_context_stub(handle_t * handle,struct super_block * sb,bool state,ext4_group_t group,ext4_grpblk_t blkoff,ext4_grpblk_t len,int flags,ext4_grpblk_t * ret_changed)328 ext4_mb_mark_context_stub(handle_t *handle, struct super_block *sb, bool state,
329 ext4_group_t group, ext4_grpblk_t blkoff,
330 ext4_grpblk_t len, int flags,
331 ext4_grpblk_t *ret_changed)
332 {
333 struct mbt_grp_ctx *grp_ctx = MBT_GRP_CTX(sb, group);
334 struct buffer_head *bitmap_bh = &grp_ctx->bitmap_bh;
335
336 if (state)
337 mb_set_bits(bitmap_bh->b_data, blkoff, len);
338 else
339 mb_clear_bits(bitmap_bh->b_data, blkoff, len);
340
341 return 0;
342 }
343
344 #define TEST_GOAL_GROUP 1
mbt_kunit_init(struct kunit * test)345 static int mbt_kunit_init(struct kunit *test)
346 {
347 struct mbt_ext4_block_layout *layout =
348 (struct mbt_ext4_block_layout *)(test->param_value);
349 struct super_block *sb;
350 int ret;
351
352 sb = mbt_ext4_alloc_super_block();
353 if (sb == NULL)
354 return -ENOMEM;
355
356 mbt_init_sb_layout(sb, layout);
357
358 ret = mbt_ctx_init(sb);
359 if (ret != 0) {
360 mbt_ext4_free_super_block(sb);
361 return ret;
362 }
363
364 test->priv = sb;
365 kunit_activate_static_stub(test,
366 ext4_read_block_bitmap_nowait,
367 ext4_read_block_bitmap_nowait_stub);
368 kunit_activate_static_stub(test,
369 ext4_wait_block_bitmap,
370 ext4_wait_block_bitmap_stub);
371 kunit_activate_static_stub(test,
372 ext4_get_group_desc,
373 ext4_get_group_desc_stub);
374 kunit_activate_static_stub(test,
375 ext4_mb_mark_context,
376 ext4_mb_mark_context_stub);
377
378 /* stub function will be called in mbt_mb_init->ext4_mb_init */
379 if (mbt_mb_init(sb) != 0) {
380 mbt_ctx_release(sb);
381 mbt_ext4_free_super_block(sb);
382 return -ENOMEM;
383 }
384
385 return 0;
386 }
387
mbt_kunit_exit(struct kunit * test)388 static void mbt_kunit_exit(struct kunit *test)
389 {
390 struct super_block *sb = (struct super_block *)test->priv;
391
392 mbt_mb_release(sb);
393 mbt_ctx_release(sb);
394 mbt_ext4_free_super_block(sb);
395 }
396
test_new_blocks_simple(struct kunit * test)397 static void test_new_blocks_simple(struct kunit *test)
398 {
399 struct super_block *sb = (struct super_block *)test->priv;
400 struct inode *inode;
401 struct ext4_allocation_request ar;
402 ext4_group_t i, goal_group = TEST_GOAL_GROUP;
403 int err = 0;
404 ext4_fsblk_t found;
405 struct ext4_sb_info *sbi = EXT4_SB(sb);
406
407 inode = kunit_kzalloc(test, sizeof(*inode), GFP_KERNEL);
408 if (!inode)
409 return;
410
411 inode->i_sb = sb;
412 ar.inode = inode;
413
414 /* get block at goal */
415 ar.goal = ext4_group_first_block_no(sb, goal_group);
416 found = ext4_mb_new_blocks_simple(&ar, &err);
417 KUNIT_ASSERT_EQ_MSG(test, ar.goal, found,
418 "failed to alloc block at goal, expected %llu found %llu",
419 ar.goal, found);
420
421 /* get block after goal in goal group */
422 ar.goal = ext4_group_first_block_no(sb, goal_group);
423 found = ext4_mb_new_blocks_simple(&ar, &err);
424 KUNIT_ASSERT_EQ_MSG(test, ar.goal + EXT4_C2B(sbi, 1), found,
425 "failed to alloc block after goal in goal group, expected %llu found %llu",
426 ar.goal + 1, found);
427
428 /* get block after goal group */
429 mbt_ctx_mark_used(sb, goal_group, 0, EXT4_CLUSTERS_PER_GROUP(sb));
430 ar.goal = ext4_group_first_block_no(sb, goal_group);
431 found = ext4_mb_new_blocks_simple(&ar, &err);
432 KUNIT_ASSERT_EQ_MSG(test,
433 ext4_group_first_block_no(sb, goal_group + 1), found,
434 "failed to alloc block after goal group, expected %llu found %llu",
435 ext4_group_first_block_no(sb, goal_group + 1), found);
436
437 /* get block before goal group */
438 for (i = goal_group; i < ext4_get_groups_count(sb); i++)
439 mbt_ctx_mark_used(sb, i, 0, EXT4_CLUSTERS_PER_GROUP(sb));
440 ar.goal = ext4_group_first_block_no(sb, goal_group);
441 found = ext4_mb_new_blocks_simple(&ar, &err);
442 KUNIT_ASSERT_EQ_MSG(test,
443 ext4_group_first_block_no(sb, 0) + EXT4_C2B(sbi, 1), found,
444 "failed to alloc block before goal group, expected %llu found %llu",
445 ext4_group_first_block_no(sb, 0 + EXT4_C2B(sbi, 1)), found);
446
447 /* no block available, fail to allocate block */
448 for (i = 0; i < ext4_get_groups_count(sb); i++)
449 mbt_ctx_mark_used(sb, i, 0, EXT4_CLUSTERS_PER_GROUP(sb));
450 ar.goal = ext4_group_first_block_no(sb, goal_group);
451 found = ext4_mb_new_blocks_simple(&ar, &err);
452 KUNIT_ASSERT_NE_MSG(test, err, 0,
453 "unexpectedly get block when no block is available");
454 }
455
456 #define TEST_RANGE_COUNT 8
457
458 struct test_range {
459 ext4_grpblk_t start;
460 ext4_grpblk_t len;
461 };
462
463 static void
mbt_generate_test_ranges(struct super_block * sb,struct test_range * ranges,int count)464 mbt_generate_test_ranges(struct super_block *sb, struct test_range *ranges,
465 int count)
466 {
467 ext4_grpblk_t start, len, max;
468 int i;
469
470 max = EXT4_CLUSTERS_PER_GROUP(sb) / count;
471 for (i = 0; i < count; i++) {
472 start = get_random_u32() % max;
473 len = get_random_u32() % max;
474 len = min(len, max - start);
475
476 ranges[i].start = start + i * max;
477 ranges[i].len = len;
478 }
479 }
480
481 static void
validate_free_blocks_simple(struct kunit * test,struct super_block * sb,ext4_group_t goal_group,ext4_grpblk_t start,ext4_grpblk_t len)482 validate_free_blocks_simple(struct kunit *test, struct super_block *sb,
483 ext4_group_t goal_group, ext4_grpblk_t start,
484 ext4_grpblk_t len)
485 {
486 void *bitmap;
487 ext4_grpblk_t bit, max = EXT4_CLUSTERS_PER_GROUP(sb);
488 ext4_group_t i;
489
490 for (i = 0; i < ext4_get_groups_count(sb); i++) {
491 if (i == goal_group)
492 continue;
493
494 bitmap = mbt_ctx_bitmap(sb, i);
495 bit = mb_find_next_zero_bit(bitmap, max, 0);
496 KUNIT_ASSERT_EQ_MSG(test, bit, max,
497 "free block on unexpected group %d", i);
498 }
499
500 bitmap = mbt_ctx_bitmap(sb, goal_group);
501 bit = mb_find_next_zero_bit(bitmap, max, 0);
502 KUNIT_ASSERT_EQ(test, bit, start);
503
504 bit = mb_find_next_bit(bitmap, max, bit + 1);
505 KUNIT_ASSERT_EQ(test, bit, start + len);
506 }
507
508 static void
test_free_blocks_simple_range(struct kunit * test,ext4_group_t goal_group,ext4_grpblk_t start,ext4_grpblk_t len)509 test_free_blocks_simple_range(struct kunit *test, ext4_group_t goal_group,
510 ext4_grpblk_t start, ext4_grpblk_t len)
511 {
512 struct super_block *sb = (struct super_block *)test->priv;
513 struct ext4_sb_info *sbi = EXT4_SB(sb);
514 struct inode *inode;
515 ext4_fsblk_t block;
516
517 inode = kunit_kzalloc(test, sizeof(*inode), GFP_KERNEL);
518 if (!inode)
519 return;
520 inode->i_sb = sb;
521
522 if (len == 0)
523 return;
524
525 block = ext4_group_first_block_no(sb, goal_group) +
526 EXT4_C2B(sbi, start);
527 ext4_free_blocks_simple(inode, block, len);
528 validate_free_blocks_simple(test, sb, goal_group, start, len);
529 mbt_ctx_mark_used(sb, goal_group, 0, EXT4_CLUSTERS_PER_GROUP(sb));
530 }
531
test_free_blocks_simple(struct kunit * test)532 static void test_free_blocks_simple(struct kunit *test)
533 {
534 struct super_block *sb = (struct super_block *)test->priv;
535 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
536 ext4_group_t i;
537 struct test_range ranges[TEST_RANGE_COUNT];
538
539 for (i = 0; i < ext4_get_groups_count(sb); i++)
540 mbt_ctx_mark_used(sb, i, 0, max);
541
542 mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
543 for (i = 0; i < TEST_RANGE_COUNT; i++)
544 test_free_blocks_simple_range(test, TEST_GOAL_GROUP,
545 ranges[i].start, ranges[i].len);
546 }
547
548 static void
test_mark_diskspace_used_range(struct kunit * test,struct ext4_allocation_context * ac,ext4_grpblk_t start,ext4_grpblk_t len)549 test_mark_diskspace_used_range(struct kunit *test,
550 struct ext4_allocation_context *ac,
551 ext4_grpblk_t start,
552 ext4_grpblk_t len)
553 {
554 struct super_block *sb = (struct super_block *)test->priv;
555 int ret;
556 void *bitmap;
557 ext4_grpblk_t i, max;
558
559 /* ext4_mb_mark_diskspace_used will BUG if len is 0 */
560 if (len == 0)
561 return;
562
563 ac->ac_b_ex.fe_group = TEST_GOAL_GROUP;
564 ac->ac_b_ex.fe_start = start;
565 ac->ac_b_ex.fe_len = len;
566
567 bitmap = mbt_ctx_bitmap(sb, TEST_GOAL_GROUP);
568 memset(bitmap, 0, sb->s_blocksize);
569 ret = ext4_mb_mark_diskspace_used(ac, NULL, 0);
570 KUNIT_ASSERT_EQ(test, ret, 0);
571
572 max = EXT4_CLUSTERS_PER_GROUP(sb);
573 i = mb_find_next_bit(bitmap, max, 0);
574 KUNIT_ASSERT_EQ(test, i, start);
575 i = mb_find_next_zero_bit(bitmap, max, i + 1);
576 KUNIT_ASSERT_EQ(test, i, start + len);
577 i = mb_find_next_bit(bitmap, max, i + 1);
578 KUNIT_ASSERT_EQ(test, max, i);
579 }
580
test_mark_diskspace_used(struct kunit * test)581 static void test_mark_diskspace_used(struct kunit *test)
582 {
583 struct super_block *sb = (struct super_block *)test->priv;
584 struct inode *inode;
585 struct ext4_allocation_context ac;
586 struct test_range ranges[TEST_RANGE_COUNT];
587 int i;
588
589 mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
590
591 inode = kunit_kzalloc(test, sizeof(*inode), GFP_KERNEL);
592 if (!inode)
593 return;
594 inode->i_sb = sb;
595
596 ac.ac_status = AC_STATUS_FOUND;
597 ac.ac_sb = sb;
598 ac.ac_inode = inode;
599 for (i = 0; i < TEST_RANGE_COUNT; i++)
600 test_mark_diskspace_used_range(test, &ac, ranges[i].start,
601 ranges[i].len);
602 }
603
mbt_generate_buddy(struct super_block * sb,void * buddy,void * bitmap,struct ext4_group_info * grp)604 static void mbt_generate_buddy(struct super_block *sb, void *buddy,
605 void *bitmap, struct ext4_group_info *grp)
606 {
607 struct ext4_sb_info *sbi = EXT4_SB(sb);
608 uint32_t order, off;
609 void *bb, *bb_h;
610 int max;
611
612 memset(buddy, 0xff, sb->s_blocksize);
613 memset(grp, 0, offsetof(struct ext4_group_info,
614 bb_counters[MB_NUM_ORDERS(sb)]));
615
616 bb = bitmap;
617 max = EXT4_CLUSTERS_PER_GROUP(sb);
618 bb_h = buddy + sbi->s_mb_offsets[1];
619
620 off = mb_find_next_zero_bit(bb, max, 0);
621 grp->bb_first_free = off;
622 while (off < max) {
623 grp->bb_counters[0]++;
624 grp->bb_free++;
625
626 if (!(off & 1) && !mb_test_bit(off + 1, bb)) {
627 grp->bb_free++;
628 grp->bb_counters[0]--;
629 mb_clear_bit(off >> 1, bb_h);
630 grp->bb_counters[1]++;
631 grp->bb_largest_free_order = 1;
632 off++;
633 }
634
635 off = mb_find_next_zero_bit(bb, max, off + 1);
636 }
637
638 for (order = 1; order < MB_NUM_ORDERS(sb) - 1; order++) {
639 bb = buddy + sbi->s_mb_offsets[order];
640 bb_h = buddy + sbi->s_mb_offsets[order + 1];
641 max = max >> 1;
642 off = mb_find_next_zero_bit(bb, max, 0);
643
644 while (off < max) {
645 if (!(off & 1) && !mb_test_bit(off + 1, bb)) {
646 mb_set_bits(bb, off, 2);
647 grp->bb_counters[order] -= 2;
648 mb_clear_bit(off >> 1, bb_h);
649 grp->bb_counters[order + 1]++;
650 grp->bb_largest_free_order = order + 1;
651 off++;
652 }
653
654 off = mb_find_next_zero_bit(bb, max, off + 1);
655 }
656 }
657
658 max = EXT4_CLUSTERS_PER_GROUP(sb);
659 off = mb_find_next_zero_bit(bitmap, max, 0);
660 while (off < max) {
661 grp->bb_fragments++;
662
663 off = mb_find_next_bit(bitmap, max, off + 1);
664 if (off + 1 >= max)
665 break;
666
667 off = mb_find_next_zero_bit(bitmap, max, off + 1);
668 }
669 }
670
671 static void
mbt_validate_group_info(struct kunit * test,struct ext4_group_info * grp1,struct ext4_group_info * grp2)672 mbt_validate_group_info(struct kunit *test, struct ext4_group_info *grp1,
673 struct ext4_group_info *grp2)
674 {
675 struct super_block *sb = (struct super_block *)test->priv;
676 int i;
677
678 KUNIT_ASSERT_EQ(test, grp1->bb_first_free,
679 grp2->bb_first_free);
680 KUNIT_ASSERT_EQ(test, grp1->bb_fragments,
681 grp2->bb_fragments);
682 KUNIT_ASSERT_EQ(test, grp1->bb_free, grp2->bb_free);
683 KUNIT_ASSERT_EQ(test, grp1->bb_largest_free_order,
684 grp2->bb_largest_free_order);
685
686 for (i = 1; i < MB_NUM_ORDERS(sb); i++) {
687 KUNIT_ASSERT_EQ_MSG(test, grp1->bb_counters[i],
688 grp2->bb_counters[i],
689 "bb_counters[%d] diffs, expected %d, generated %d",
690 i, grp1->bb_counters[i],
691 grp2->bb_counters[i]);
692 }
693 }
694
695 static void
do_test_generate_buddy(struct kunit * test,struct super_block * sb,void * bitmap,void * mbt_buddy,struct ext4_group_info * mbt_grp,void * ext4_buddy,struct ext4_group_info * ext4_grp)696 do_test_generate_buddy(struct kunit *test, struct super_block *sb, void *bitmap,
697 void *mbt_buddy, struct ext4_group_info *mbt_grp,
698 void *ext4_buddy, struct ext4_group_info *ext4_grp)
699 {
700 int i;
701
702 mbt_generate_buddy(sb, mbt_buddy, bitmap, mbt_grp);
703
704 for (i = 0; i < MB_NUM_ORDERS(sb); i++)
705 ext4_grp->bb_counters[i] = 0;
706 /* needed by validation in ext4_mb_generate_buddy */
707 ext4_grp->bb_free = mbt_grp->bb_free;
708 memset(ext4_buddy, 0xff, sb->s_blocksize);
709 ext4_mb_generate_buddy(sb, ext4_buddy, bitmap, TEST_GOAL_GROUP,
710 ext4_grp);
711
712 KUNIT_ASSERT_EQ(test, memcmp(mbt_buddy, ext4_buddy, sb->s_blocksize),
713 0);
714 mbt_validate_group_info(test, mbt_grp, ext4_grp);
715 }
716
test_mb_generate_buddy(struct kunit * test)717 static void test_mb_generate_buddy(struct kunit *test)
718 {
719 struct super_block *sb = (struct super_block *)test->priv;
720 void *bitmap, *expected_bb, *generate_bb;
721 struct ext4_group_info *expected_grp, *generate_grp;
722 struct test_range ranges[TEST_RANGE_COUNT];
723 int i;
724
725 bitmap = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
726 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bitmap);
727 expected_bb = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
728 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_bb);
729 generate_bb = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
730 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, generate_bb);
731 expected_grp = kunit_kzalloc(test, offsetof(struct ext4_group_info,
732 bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL);
733 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, expected_grp);
734 generate_grp = ext4_get_group_info(sb, TEST_GOAL_GROUP);
735 KUNIT_ASSERT_NOT_NULL(test, generate_grp);
736
737 mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
738 for (i = 0; i < TEST_RANGE_COUNT; i++) {
739 mb_set_bits(bitmap, ranges[i].start, ranges[i].len);
740 do_test_generate_buddy(test, sb, bitmap, expected_bb,
741 expected_grp, generate_bb, generate_grp);
742 }
743 }
744
745 static void
test_mb_mark_used_range(struct kunit * test,struct ext4_buddy * e4b,ext4_grpblk_t start,ext4_grpblk_t len,void * bitmap,void * buddy,struct ext4_group_info * grp)746 test_mb_mark_used_range(struct kunit *test, struct ext4_buddy *e4b,
747 ext4_grpblk_t start, ext4_grpblk_t len, void *bitmap,
748 void *buddy, struct ext4_group_info *grp)
749 {
750 struct super_block *sb = (struct super_block *)test->priv;
751 struct ext4_free_extent ex;
752 int i;
753
754 /* mb_mark_used only accepts non-zero len */
755 if (len == 0)
756 return;
757
758 ex.fe_start = start;
759 ex.fe_len = len;
760 ex.fe_group = TEST_GOAL_GROUP;
761
762 ext4_lock_group(sb, TEST_GOAL_GROUP);
763 mb_mark_used(e4b, &ex);
764 ext4_unlock_group(sb, TEST_GOAL_GROUP);
765
766 mb_set_bits(bitmap, start, len);
767 /* bypass bb_free validatoin in ext4_mb_generate_buddy */
768 grp->bb_free -= len;
769 memset(buddy, 0xff, sb->s_blocksize);
770 for (i = 0; i < MB_NUM_ORDERS(sb); i++)
771 grp->bb_counters[i] = 0;
772 ext4_mb_generate_buddy(sb, buddy, bitmap, 0, grp);
773
774 KUNIT_ASSERT_EQ(test, memcmp(buddy, e4b->bd_buddy, sb->s_blocksize),
775 0);
776 mbt_validate_group_info(test, grp, e4b->bd_info);
777 }
778
test_mb_mark_used(struct kunit * test)779 static void test_mb_mark_used(struct kunit *test)
780 {
781 struct ext4_buddy e4b;
782 struct super_block *sb = (struct super_block *)test->priv;
783 void *bitmap, *buddy;
784 struct ext4_group_info *grp;
785 int ret;
786 struct test_range ranges[TEST_RANGE_COUNT];
787 int i;
788
789 /* buddy cache assumes that each page contains at least one block */
790 if (sb->s_blocksize > PAGE_SIZE)
791 kunit_skip(test, "blocksize exceeds pagesize");
792
793 bitmap = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
794 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bitmap);
795 buddy = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
796 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buddy);
797 grp = kunit_kzalloc(test, offsetof(struct ext4_group_info,
798 bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL);
799
800 ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b);
801 KUNIT_ASSERT_EQ(test, ret, 0);
802
803 grp->bb_free = EXT4_CLUSTERS_PER_GROUP(sb);
804 mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
805 for (i = 0; i < TEST_RANGE_COUNT; i++)
806 test_mb_mark_used_range(test, &e4b, ranges[i].start,
807 ranges[i].len, bitmap, buddy, grp);
808
809 ext4_mb_unload_buddy(&e4b);
810 }
811
812 static void
test_mb_free_blocks_range(struct kunit * test,struct ext4_buddy * e4b,ext4_grpblk_t start,ext4_grpblk_t len,void * bitmap,void * buddy,struct ext4_group_info * grp)813 test_mb_free_blocks_range(struct kunit *test, struct ext4_buddy *e4b,
814 ext4_grpblk_t start, ext4_grpblk_t len, void *bitmap,
815 void *buddy, struct ext4_group_info *grp)
816 {
817 struct super_block *sb = (struct super_block *)test->priv;
818 int i;
819
820 /* mb_free_blocks will WARN if len is 0 */
821 if (len == 0)
822 return;
823
824 ext4_lock_group(sb, e4b->bd_group);
825 mb_free_blocks(NULL, e4b, start, len);
826 ext4_unlock_group(sb, e4b->bd_group);
827
828 mb_clear_bits(bitmap, start, len);
829 /* bypass bb_free validatoin in ext4_mb_generate_buddy */
830 grp->bb_free += len;
831 memset(buddy, 0xff, sb->s_blocksize);
832 for (i = 0; i < MB_NUM_ORDERS(sb); i++)
833 grp->bb_counters[i] = 0;
834 ext4_mb_generate_buddy(sb, buddy, bitmap, 0, grp);
835
836 KUNIT_ASSERT_EQ(test, memcmp(buddy, e4b->bd_buddy, sb->s_blocksize),
837 0);
838 mbt_validate_group_info(test, grp, e4b->bd_info);
839
840 }
841
test_mb_free_blocks(struct kunit * test)842 static void test_mb_free_blocks(struct kunit *test)
843 {
844 struct ext4_buddy e4b;
845 struct super_block *sb = (struct super_block *)test->priv;
846 void *bitmap, *buddy;
847 struct ext4_group_info *grp;
848 struct ext4_free_extent ex;
849 int ret;
850 int i;
851 struct test_range ranges[TEST_RANGE_COUNT];
852
853 /* buddy cache assumes that each page contains at least one block */
854 if (sb->s_blocksize > PAGE_SIZE)
855 kunit_skip(test, "blocksize exceeds pagesize");
856
857 bitmap = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
858 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bitmap);
859 buddy = kunit_kzalloc(test, sb->s_blocksize, GFP_KERNEL);
860 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buddy);
861 grp = kunit_kzalloc(test, offsetof(struct ext4_group_info,
862 bb_counters[MB_NUM_ORDERS(sb)]), GFP_KERNEL);
863
864 ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b);
865 KUNIT_ASSERT_EQ(test, ret, 0);
866
867 ex.fe_start = 0;
868 ex.fe_len = EXT4_CLUSTERS_PER_GROUP(sb);
869 ex.fe_group = TEST_GOAL_GROUP;
870
871 ext4_lock_group(sb, TEST_GOAL_GROUP);
872 mb_mark_used(&e4b, &ex);
873 ext4_unlock_group(sb, TEST_GOAL_GROUP);
874
875 grp->bb_free = 0;
876 memset(bitmap, 0xff, sb->s_blocksize);
877
878 mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
879 for (i = 0; i < TEST_RANGE_COUNT; i++)
880 test_mb_free_blocks_range(test, &e4b, ranges[i].start,
881 ranges[i].len, bitmap, buddy, grp);
882
883 ext4_mb_unload_buddy(&e4b);
884 }
885
886 #define COUNT_FOR_ESTIMATE 100000
test_mb_mark_used_cost(struct kunit * test)887 static void test_mb_mark_used_cost(struct kunit *test)
888 {
889 struct ext4_buddy e4b;
890 struct super_block *sb = (struct super_block *)test->priv;
891 struct ext4_free_extent ex;
892 int ret;
893 struct test_range ranges[TEST_RANGE_COUNT];
894 int i, j;
895 unsigned long start, end, all = 0;
896
897 /* buddy cache assumes that each page contains at least one block */
898 if (sb->s_blocksize > PAGE_SIZE)
899 kunit_skip(test, "blocksize exceeds pagesize");
900
901 ret = ext4_mb_load_buddy(sb, TEST_GOAL_GROUP, &e4b);
902 KUNIT_ASSERT_EQ(test, ret, 0);
903
904 ex.fe_group = TEST_GOAL_GROUP;
905 for (j = 0; j < COUNT_FOR_ESTIMATE; j++) {
906 mbt_generate_test_ranges(sb, ranges, TEST_RANGE_COUNT);
907 start = jiffies;
908 for (i = 0; i < TEST_RANGE_COUNT; i++) {
909 if (ranges[i].len == 0)
910 continue;
911
912 ex.fe_start = ranges[i].start;
913 ex.fe_len = ranges[i].len;
914 ext4_lock_group(sb, TEST_GOAL_GROUP);
915 mb_mark_used(&e4b, &ex);
916 ext4_unlock_group(sb, TEST_GOAL_GROUP);
917 }
918 end = jiffies;
919 all += (end - start);
920
921 for (i = 0; i < TEST_RANGE_COUNT; i++) {
922 if (ranges[i].len == 0)
923 continue;
924
925 ext4_lock_group(sb, TEST_GOAL_GROUP);
926 mb_free_blocks(NULL, &e4b, ranges[i].start,
927 ranges[i].len);
928 ext4_unlock_group(sb, TEST_GOAL_GROUP);
929 }
930 }
931
932 kunit_info(test, "costed jiffies %lu\n", all);
933 ext4_mb_unload_buddy(&e4b);
934 }
935
936 static const struct mbt_ext4_block_layout mbt_test_layouts[] = {
937 {
938 .blocksize_bits = 10,
939 .cluster_bits = 3,
940 .blocks_per_group = 8192,
941 .group_count = 4,
942 .desc_size = 64,
943 },
944 {
945 .blocksize_bits = 12,
946 .cluster_bits = 3,
947 .blocks_per_group = 8192,
948 .group_count = 4,
949 .desc_size = 64,
950 },
951 {
952 .blocksize_bits = 16,
953 .cluster_bits = 3,
954 .blocks_per_group = 8192,
955 .group_count = 4,
956 .desc_size = 64,
957 },
958 };
959
mbt_show_layout(const struct mbt_ext4_block_layout * layout,char * desc)960 static void mbt_show_layout(const struct mbt_ext4_block_layout *layout,
961 char *desc)
962 {
963 snprintf(desc, KUNIT_PARAM_DESC_SIZE, "block_bits=%d cluster_bits=%d "
964 "blocks_per_group=%d group_count=%d desc_size=%d\n",
965 layout->blocksize_bits, layout->cluster_bits,
966 layout->blocks_per_group, layout->group_count,
967 layout->desc_size);
968 }
969 KUNIT_ARRAY_PARAM(mbt_layouts, mbt_test_layouts, mbt_show_layout);
970
971 static struct kunit_case mbt_test_cases[] = {
972 KUNIT_CASE_PARAM(test_new_blocks_simple, mbt_layouts_gen_params),
973 KUNIT_CASE_PARAM(test_free_blocks_simple, mbt_layouts_gen_params),
974 KUNIT_CASE_PARAM(test_mb_generate_buddy, mbt_layouts_gen_params),
975 KUNIT_CASE_PARAM(test_mb_mark_used, mbt_layouts_gen_params),
976 KUNIT_CASE_PARAM(test_mb_free_blocks, mbt_layouts_gen_params),
977 KUNIT_CASE_PARAM(test_mark_diskspace_used, mbt_layouts_gen_params),
978 KUNIT_CASE_PARAM_ATTR(test_mb_mark_used_cost, mbt_layouts_gen_params,
979 { .speed = KUNIT_SPEED_SLOW }),
980 {}
981 };
982
983 static struct kunit_suite mbt_test_suite = {
984 .name = "ext4_mballoc_test",
985 .init = mbt_kunit_init,
986 .exit = mbt_kunit_exit,
987 .test_cases = mbt_test_cases,
988 };
989
990 kunit_test_suites(&mbt_test_suite);
991
992 MODULE_LICENSE("GPL");
993