xref: /linux/drivers/gpu/drm/tests/drm_buddy_test.c (revision eb01fe7abbe2d0b38824d2a93fdb4cc3eaf2ccc1)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  * Copyright © 2022 Maíra Canal <mairacanal@riseup.net>
5  */
6 
7 #include <kunit/test.h>
8 
9 #include <linux/prime_numbers.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sizes.h>
12 
13 #include <drm/drm_buddy.h>
14 
15 #include "../lib/drm_random.h"
16 
17 static unsigned int random_seed;
18 
19 static inline u64 get_size(int order, u64 chunk_size)
20 {
21 	return (1 << order) * chunk_size;
22 }
23 
24 static void drm_test_buddy_alloc_range_bias(struct kunit *test)
25 {
26 	u32 mm_size, ps, bias_size, bias_start, bias_end, bias_rem;
27 	DRM_RND_STATE(prng, random_seed);
28 	unsigned int i, count, *order;
29 	struct drm_buddy mm;
30 	LIST_HEAD(allocated);
31 
32 	bias_size = SZ_1M;
33 	ps = roundup_pow_of_two(prandom_u32_state(&prng) % bias_size);
34 	ps = max(SZ_4K, ps);
35 	mm_size = (SZ_8M-1) & ~(ps-1); /* Multiple roots */
36 
37 	kunit_info(test, "mm_size=%u, ps=%u\n", mm_size, ps);
38 
39 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
40 			       "buddy_init failed\n");
41 
42 	count = mm_size / bias_size;
43 	order = drm_random_order(count, &prng);
44 	KUNIT_EXPECT_TRUE(test, order);
45 
46 	/*
47 	 * Idea is to split the address space into uniform bias ranges, and then
48 	 * in some random order allocate within each bias, using various
49 	 * patterns within. This should detect if allocations leak out from a
50 	 * given bias, for example.
51 	 */
52 
53 	for (i = 0; i < count; i++) {
54 		LIST_HEAD(tmp);
55 		u32 size;
56 
57 		bias_start = order[i] * bias_size;
58 		bias_end = bias_start + bias_size;
59 		bias_rem = bias_size;
60 
61 		/* internal round_up too big */
62 		KUNIT_ASSERT_TRUE_MSG(test,
63 				      drm_buddy_alloc_blocks(&mm, bias_start,
64 							     bias_end, bias_size + ps, bias_size,
65 							     &allocated,
66 							     DRM_BUDDY_RANGE_ALLOCATION),
67 				      "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
68 				      bias_start, bias_end, bias_size, bias_size);
69 
70 		/* size too big */
71 		KUNIT_ASSERT_TRUE_MSG(test,
72 				      drm_buddy_alloc_blocks(&mm, bias_start,
73 							     bias_end, bias_size + ps, ps,
74 							     &allocated,
75 							     DRM_BUDDY_RANGE_ALLOCATION),
76 				      "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
77 				      bias_start, bias_end, bias_size + ps, ps);
78 
79 		/* bias range too small for size */
80 		KUNIT_ASSERT_TRUE_MSG(test,
81 				      drm_buddy_alloc_blocks(&mm, bias_start + ps,
82 							     bias_end, bias_size, ps,
83 							     &allocated,
84 							     DRM_BUDDY_RANGE_ALLOCATION),
85 				      "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
86 				      bias_start + ps, bias_end, bias_size, ps);
87 
88 		/* bias misaligned */
89 		KUNIT_ASSERT_TRUE_MSG(test,
90 				      drm_buddy_alloc_blocks(&mm, bias_start + ps,
91 							     bias_end - ps,
92 							     bias_size >> 1, bias_size >> 1,
93 							     &allocated,
94 							     DRM_BUDDY_RANGE_ALLOCATION),
95 				      "buddy_alloc h didn't fail with bias(%x-%x), size=%u, ps=%u\n",
96 				      bias_start + ps, bias_end - ps, bias_size >> 1, bias_size >> 1);
97 
98 		/* single big page */
99 		KUNIT_ASSERT_FALSE_MSG(test,
100 				       drm_buddy_alloc_blocks(&mm, bias_start,
101 							      bias_end, bias_size, bias_size,
102 							      &tmp,
103 							      DRM_BUDDY_RANGE_ALLOCATION),
104 				       "buddy_alloc i failed with bias(%x-%x), size=%u, ps=%u\n",
105 				       bias_start, bias_end, bias_size, bias_size);
106 		drm_buddy_free_list(&mm, &tmp);
107 
108 		/* single page with internal round_up */
109 		KUNIT_ASSERT_FALSE_MSG(test,
110 				       drm_buddy_alloc_blocks(&mm, bias_start,
111 							      bias_end, ps, bias_size,
112 							      &tmp,
113 							      DRM_BUDDY_RANGE_ALLOCATION),
114 				       "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
115 				       bias_start, bias_end, ps, bias_size);
116 		drm_buddy_free_list(&mm, &tmp);
117 
118 		/* random size within */
119 		size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
120 		if (size)
121 			KUNIT_ASSERT_FALSE_MSG(test,
122 					       drm_buddy_alloc_blocks(&mm, bias_start,
123 								      bias_end, size, ps,
124 								      &tmp,
125 								      DRM_BUDDY_RANGE_ALLOCATION),
126 					       "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
127 					       bias_start, bias_end, size, ps);
128 
129 		bias_rem -= size;
130 		/* too big for current avail */
131 		KUNIT_ASSERT_TRUE_MSG(test,
132 				      drm_buddy_alloc_blocks(&mm, bias_start,
133 							     bias_end, bias_rem + ps, ps,
134 							     &allocated,
135 							     DRM_BUDDY_RANGE_ALLOCATION),
136 				      "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
137 				      bias_start, bias_end, bias_rem + ps, ps);
138 
139 		if (bias_rem) {
140 			/* random fill of the remainder */
141 			size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
142 			size = max(size, ps);
143 
144 			KUNIT_ASSERT_FALSE_MSG(test,
145 					       drm_buddy_alloc_blocks(&mm, bias_start,
146 								      bias_end, size, ps,
147 								      &allocated,
148 								      DRM_BUDDY_RANGE_ALLOCATION),
149 					       "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
150 					       bias_start, bias_end, size, ps);
151 			/*
152 			 * Intentionally allow some space to be left
153 			 * unallocated, and ideally not always on the bias
154 			 * boundaries.
155 			 */
156 			drm_buddy_free_list(&mm, &tmp);
157 		} else {
158 			list_splice_tail(&tmp, &allocated);
159 		}
160 	}
161 
162 	kfree(order);
163 	drm_buddy_free_list(&mm, &allocated);
164 	drm_buddy_fini(&mm);
165 
166 	/*
167 	 * Something more free-form. Idea is to pick a random starting bias
168 	 * range within the address space and then start filling it up. Also
169 	 * randomly grow the bias range in both directions as we go along. This
170 	 * should give us bias start/end which is not always uniform like above,
171 	 * and in some cases will require the allocator to jump over already
172 	 * allocated nodes in the middle of the address space.
173 	 */
174 
175 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
176 			       "buddy_init failed\n");
177 
178 	bias_start = round_up(prandom_u32_state(&prng) % (mm_size - ps), ps);
179 	bias_end = round_up(bias_start + prandom_u32_state(&prng) % (mm_size - bias_start), ps);
180 	bias_end = max(bias_end, bias_start + ps);
181 	bias_rem = bias_end - bias_start;
182 
183 	do {
184 		u32 size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
185 
186 		KUNIT_ASSERT_FALSE_MSG(test,
187 				       drm_buddy_alloc_blocks(&mm, bias_start,
188 							      bias_end, size, ps,
189 							      &allocated,
190 							      DRM_BUDDY_RANGE_ALLOCATION),
191 				       "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
192 				       bias_start, bias_end, size, ps);
193 		bias_rem -= size;
194 
195 		/*
196 		 * Try to randomly grow the bias range in both directions, or
197 		 * only one, or perhaps don't grow at all.
198 		 */
199 		do {
200 			u32 old_bias_start = bias_start;
201 			u32 old_bias_end = bias_end;
202 
203 			if (bias_start)
204 				bias_start -= round_up(prandom_u32_state(&prng) % bias_start, ps);
205 			if (bias_end != mm_size)
206 				bias_end += round_up(prandom_u32_state(&prng) % (mm_size - bias_end), ps);
207 
208 			bias_rem += old_bias_start - bias_start;
209 			bias_rem += bias_end - old_bias_end;
210 		} while (!bias_rem && (bias_start || bias_end != mm_size));
211 	} while (bias_rem);
212 
213 	KUNIT_ASSERT_EQ(test, bias_start, 0);
214 	KUNIT_ASSERT_EQ(test, bias_end, mm_size);
215 	KUNIT_ASSERT_TRUE_MSG(test,
216 			      drm_buddy_alloc_blocks(&mm, bias_start, bias_end,
217 						     ps, ps,
218 						     &allocated,
219 						     DRM_BUDDY_RANGE_ALLOCATION),
220 			      "buddy_alloc passed with bias(%x-%x), size=%u\n",
221 			      bias_start, bias_end, ps);
222 
223 	drm_buddy_free_list(&mm, &allocated);
224 	drm_buddy_fini(&mm);
225 }
226 
227 static void drm_test_buddy_alloc_contiguous(struct kunit *test)
228 {
229 	const unsigned long ps = SZ_4K, mm_size = 16 * 3 * SZ_4K;
230 	unsigned long i, n_pages, total;
231 	struct drm_buddy_block *block;
232 	struct drm_buddy mm;
233 	LIST_HEAD(left);
234 	LIST_HEAD(middle);
235 	LIST_HEAD(right);
236 	LIST_HEAD(allocated);
237 
238 	KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
239 
240 	/*
241 	 * Idea is to fragment the address space by alternating block
242 	 * allocations between three different lists; one for left, middle and
243 	 * right. We can then free a list to simulate fragmentation. In
244 	 * particular we want to exercise the DRM_BUDDY_CONTIGUOUS_ALLOCATION,
245 	 * including the try_harder path.
246 	 */
247 
248 	i = 0;
249 	n_pages = mm_size / ps;
250 	do {
251 		struct list_head *list;
252 		int slot = i % 3;
253 
254 		if (slot == 0)
255 			list = &left;
256 		else if (slot == 1)
257 			list = &middle;
258 		else
259 			list = &right;
260 		KUNIT_ASSERT_FALSE_MSG(test,
261 				       drm_buddy_alloc_blocks(&mm, 0, mm_size,
262 							      ps, ps, list, 0),
263 				       "buddy_alloc hit an error size=%lu\n",
264 				       ps);
265 	} while (++i < n_pages);
266 
267 	KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
268 							   3 * ps, ps, &allocated,
269 							   DRM_BUDDY_CONTIGUOUS_ALLOCATION),
270 			       "buddy_alloc didn't error size=%lu\n", 3 * ps);
271 
272 	drm_buddy_free_list(&mm, &middle);
273 	KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
274 							   3 * ps, ps, &allocated,
275 							   DRM_BUDDY_CONTIGUOUS_ALLOCATION),
276 			       "buddy_alloc didn't error size=%lu\n", 3 * ps);
277 	KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
278 							   2 * ps, ps, &allocated,
279 							   DRM_BUDDY_CONTIGUOUS_ALLOCATION),
280 			       "buddy_alloc didn't error size=%lu\n", 2 * ps);
281 
282 	drm_buddy_free_list(&mm, &right);
283 	KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
284 							   3 * ps, ps, &allocated,
285 							   DRM_BUDDY_CONTIGUOUS_ALLOCATION),
286 			       "buddy_alloc didn't error size=%lu\n", 3 * ps);
287 	/*
288 	 * At this point we should have enough contiguous space for 2 blocks,
289 	 * however they are never buddies (since we freed middle and right) so
290 	 * will require the try_harder logic to find them.
291 	 */
292 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
293 							    2 * ps, ps, &allocated,
294 							    DRM_BUDDY_CONTIGUOUS_ALLOCATION),
295 			       "buddy_alloc hit an error size=%lu\n", 2 * ps);
296 
297 	drm_buddy_free_list(&mm, &left);
298 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
299 							    3 * ps, ps, &allocated,
300 							    DRM_BUDDY_CONTIGUOUS_ALLOCATION),
301 			       "buddy_alloc hit an error size=%lu\n", 3 * ps);
302 
303 	total = 0;
304 	list_for_each_entry(block, &allocated, link)
305 		total += drm_buddy_block_size(&mm, block);
306 
307 	KUNIT_ASSERT_EQ(test, total, ps * 2 + ps * 3);
308 
309 	drm_buddy_free_list(&mm, &allocated);
310 	drm_buddy_fini(&mm);
311 }
312 
313 static void drm_test_buddy_alloc_pathological(struct kunit *test)
314 {
315 	u64 mm_size, size, start = 0;
316 	struct drm_buddy_block *block;
317 	const int max_order = 3;
318 	unsigned long flags = 0;
319 	int order, top;
320 	struct drm_buddy mm;
321 	LIST_HEAD(blocks);
322 	LIST_HEAD(holes);
323 	LIST_HEAD(tmp);
324 
325 	/*
326 	 * Create a pot-sized mm, then allocate one of each possible
327 	 * order within. This should leave the mm with exactly one
328 	 * page left. Free the largest block, then whittle down again.
329 	 * Eventually we will have a fully 50% fragmented mm.
330 	 */
331 
332 	mm_size = PAGE_SIZE << max_order;
333 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
334 			       "buddy_init failed\n");
335 
336 	KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
337 
338 	for (top = max_order; top; top--) {
339 		/* Make room by freeing the largest allocated block */
340 		block = list_first_entry_or_null(&blocks, typeof(*block), link);
341 		if (block) {
342 			list_del(&block->link);
343 			drm_buddy_free_block(&mm, block);
344 		}
345 
346 		for (order = top; order--;) {
347 			size = get_size(order, PAGE_SIZE);
348 			KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start,
349 									    mm_size, size, size,
350 										&tmp, flags),
351 					"buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
352 					order, top);
353 
354 			block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
355 			KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
356 
357 			list_move_tail(&block->link, &blocks);
358 		}
359 
360 		/* There should be one final page for this sub-allocation */
361 		size = get_size(0, PAGE_SIZE);
362 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
363 								    size, size, &tmp, flags),
364 							   "buddy_alloc hit -ENOMEM for hole\n");
365 
366 		block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
367 		KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
368 
369 		list_move_tail(&block->link, &holes);
370 
371 		size = get_size(top, PAGE_SIZE);
372 		KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
373 								   size, size, &tmp, flags),
374 							  "buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
375 							  top, max_order);
376 	}
377 
378 	drm_buddy_free_list(&mm, &holes);
379 
380 	/* Nothing larger than blocks of chunk_size now available */
381 	for (order = 1; order <= max_order; order++) {
382 		size = get_size(order, PAGE_SIZE);
383 		KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
384 								   size, size, &tmp, flags),
385 							  "buddy_alloc unexpectedly succeeded at order %d, it should be full!",
386 							  order);
387 	}
388 
389 	list_splice_tail(&holes, &blocks);
390 	drm_buddy_free_list(&mm, &blocks);
391 	drm_buddy_fini(&mm);
392 }
393 
394 static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
395 {
396 	u64 mm_size, size, start = 0;
397 	struct drm_buddy_block *block, *bn;
398 	const unsigned int max_order = 16;
399 	unsigned long flags = 0;
400 	struct drm_buddy mm;
401 	unsigned int order;
402 	LIST_HEAD(blocks);
403 	LIST_HEAD(tmp);
404 
405 	/*
406 	 * Create a pot-sized mm, then allocate one of each possible
407 	 * order within. This should leave the mm with exactly one
408 	 * page left.
409 	 */
410 
411 	mm_size = PAGE_SIZE << max_order;
412 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
413 			       "buddy_init failed\n");
414 
415 	KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
416 
417 	for (order = 0; order < max_order; order++) {
418 		size = get_size(order, PAGE_SIZE);
419 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
420 								    size, size, &tmp, flags),
421 							   "buddy_alloc hit -ENOMEM with order=%d\n",
422 							   order);
423 
424 		block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
425 		KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
426 
427 		list_move_tail(&block->link, &blocks);
428 	}
429 
430 	/* And now the last remaining block available */
431 	size = get_size(0, PAGE_SIZE);
432 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
433 							    size, size, &tmp, flags),
434 						   "buddy_alloc hit -ENOMEM on final alloc\n");
435 
436 	block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
437 	KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
438 
439 	list_move_tail(&block->link, &blocks);
440 
441 	/* Should be completely full! */
442 	for (order = max_order; order--;) {
443 		size = get_size(order, PAGE_SIZE);
444 		KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
445 								   size, size, &tmp, flags),
446 							  "buddy_alloc unexpectedly succeeded, it should be full!");
447 	}
448 
449 	block = list_last_entry(&blocks, typeof(*block), link);
450 	list_del(&block->link);
451 	drm_buddy_free_block(&mm, block);
452 
453 	/* As we free in increasing size, we make available larger blocks */
454 	order = 1;
455 	list_for_each_entry_safe(block, bn, &blocks, link) {
456 		list_del(&block->link);
457 		drm_buddy_free_block(&mm, block);
458 
459 		size = get_size(order, PAGE_SIZE);
460 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
461 								    size, size, &tmp, flags),
462 							   "buddy_alloc hit -ENOMEM with order=%d\n",
463 							   order);
464 
465 		block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
466 		KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
467 
468 		list_del(&block->link);
469 		drm_buddy_free_block(&mm, block);
470 		order++;
471 	}
472 
473 	/* To confirm, now the whole mm should be available */
474 	size = get_size(max_order, PAGE_SIZE);
475 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
476 							    size, size, &tmp, flags),
477 						   "buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
478 						   max_order);
479 
480 	block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
481 	KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
482 
483 	list_del(&block->link);
484 	drm_buddy_free_block(&mm, block);
485 	drm_buddy_free_list(&mm, &blocks);
486 	drm_buddy_fini(&mm);
487 }
488 
489 static void drm_test_buddy_alloc_optimistic(struct kunit *test)
490 {
491 	u64 mm_size, size, start = 0;
492 	struct drm_buddy_block *block;
493 	unsigned long flags = 0;
494 	const int max_order = 16;
495 	struct drm_buddy mm;
496 	LIST_HEAD(blocks);
497 	LIST_HEAD(tmp);
498 	int order;
499 
500 	/*
501 	 * Create a mm with one block of each order available, and
502 	 * try to allocate them all.
503 	 */
504 
505 	mm_size = PAGE_SIZE * ((1 << (max_order + 1)) - 1);
506 
507 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
508 			       "buddy_init failed\n");
509 
510 	KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
511 
512 	for (order = 0; order <= max_order; order++) {
513 		size = get_size(order, PAGE_SIZE);
514 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
515 								    size, size, &tmp, flags),
516 							   "buddy_alloc hit -ENOMEM with order=%d\n",
517 							   order);
518 
519 		block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
520 		KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
521 
522 		list_move_tail(&block->link, &blocks);
523 	}
524 
525 	/* Should be completely full! */
526 	size = get_size(0, PAGE_SIZE);
527 	KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
528 							   size, size, &tmp, flags),
529 						  "buddy_alloc unexpectedly succeeded, it should be full!");
530 
531 	drm_buddy_free_list(&mm, &blocks);
532 	drm_buddy_fini(&mm);
533 }
534 
535 static void drm_test_buddy_alloc_limit(struct kunit *test)
536 {
537 	u64 size = U64_MAX, start = 0;
538 	struct drm_buddy_block *block;
539 	unsigned long flags = 0;
540 	LIST_HEAD(allocated);
541 	struct drm_buddy mm;
542 
543 	KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, PAGE_SIZE));
544 
545 	KUNIT_EXPECT_EQ_MSG(test, mm.max_order, DRM_BUDDY_MAX_ORDER,
546 			    "mm.max_order(%d) != %d\n", mm.max_order,
547 						DRM_BUDDY_MAX_ORDER);
548 
549 	size = mm.chunk_size << mm.max_order;
550 	KUNIT_EXPECT_FALSE(test, drm_buddy_alloc_blocks(&mm, start, size, size,
551 							PAGE_SIZE, &allocated, flags));
552 
553 	block = list_first_entry_or_null(&allocated, struct drm_buddy_block, link);
554 	KUNIT_EXPECT_TRUE(test, block);
555 
556 	KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_order(block), mm.max_order,
557 			    "block order(%d) != %d\n",
558 						drm_buddy_block_order(block), mm.max_order);
559 
560 	KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_size(&mm, block),
561 			    BIT_ULL(mm.max_order) * PAGE_SIZE,
562 						"block size(%llu) != %llu\n",
563 						drm_buddy_block_size(&mm, block),
564 						BIT_ULL(mm.max_order) * PAGE_SIZE);
565 
566 	drm_buddy_free_list(&mm, &allocated);
567 	drm_buddy_fini(&mm);
568 }
569 
570 static int drm_buddy_suite_init(struct kunit_suite *suite)
571 {
572 	while (!random_seed)
573 		random_seed = get_random_u32();
574 
575 	kunit_info(suite, "Testing DRM buddy manager, with random_seed=0x%x\n",
576 		   random_seed);
577 
578 	return 0;
579 }
580 
581 static struct kunit_case drm_buddy_tests[] = {
582 	KUNIT_CASE(drm_test_buddy_alloc_limit),
583 	KUNIT_CASE(drm_test_buddy_alloc_optimistic),
584 	KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
585 	KUNIT_CASE(drm_test_buddy_alloc_pathological),
586 	KUNIT_CASE(drm_test_buddy_alloc_contiguous),
587 	KUNIT_CASE(drm_test_buddy_alloc_range_bias),
588 	{}
589 };
590 
591 static struct kunit_suite drm_buddy_test_suite = {
592 	.name = "drm_buddy",
593 	.suite_init = drm_buddy_suite_init,
594 	.test_cases = drm_buddy_tests,
595 };
596 
597 kunit_test_suite(drm_buddy_test_suite);
598 
599 MODULE_AUTHOR("Intel Corporation");
600 MODULE_LICENSE("GPL");
601