xref: /linux/drivers/gpu/drm/tests/drm_buddy_test.c (revision d6a5c562214f26e442c8ec3ff1e28e16675d1bcf)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  * Copyright © 2022 Maíra Canal <mairacanal@riseup.net>
5  */
6 
7 #include <kunit/test.h>
8 
9 #include <linux/prime_numbers.h>
10 #include <linux/sched/signal.h>
11 
12 #include <drm/drm_buddy.h>
13 
14 #include "../lib/drm_random.h"
15 
16 #define TIMEOUT(name__)								\
17 	unsigned long name__ = jiffies + MAX_SCHEDULE_TIMEOUT
18 
19 static unsigned int random_seed;
20 
21 static inline u64 get_size(int order, u64 chunk_size)
22 {
23 	return (1 << order) * chunk_size;
24 }
25 
26 __printf(2, 3)
27 static bool __timeout(unsigned long timeout, const char *fmt, ...)
28 {
29 	va_list va;
30 
31 	if (!signal_pending(current)) {
32 		cond_resched();
33 		if (time_before(jiffies, timeout))
34 			return false;
35 	}
36 
37 	if (fmt) {
38 		va_start(va, fmt);
39 		vprintk(fmt, va);
40 		va_end(va);
41 	}
42 
43 	return true;
44 }
45 
46 static void __dump_block(struct kunit *test, struct drm_buddy *mm,
47 			 struct drm_buddy_block *block, bool buddy)
48 {
49 	kunit_err(test, "block info: header=%llx, state=%u, order=%d, offset=%llx size=%llx root=%d buddy=%d\n",
50 		  block->header, drm_buddy_block_state(block),
51 			  drm_buddy_block_order(block), drm_buddy_block_offset(block),
52 			  drm_buddy_block_size(mm, block), !block->parent, buddy);
53 }
54 
55 static void dump_block(struct kunit *test, struct drm_buddy *mm,
56 		       struct drm_buddy_block *block)
57 {
58 	struct drm_buddy_block *buddy;
59 
60 	__dump_block(test, mm, block, false);
61 
62 	buddy = drm_get_buddy(block);
63 	if (buddy)
64 		__dump_block(test, mm, buddy, true);
65 }
66 
67 static int check_block(struct kunit *test, struct drm_buddy *mm,
68 		       struct drm_buddy_block *block)
69 {
70 	struct drm_buddy_block *buddy;
71 	unsigned int block_state;
72 	u64 block_size;
73 	u64 offset;
74 	int err = 0;
75 
76 	block_state = drm_buddy_block_state(block);
77 
78 	if (block_state != DRM_BUDDY_ALLOCATED &&
79 	    block_state != DRM_BUDDY_FREE && block_state != DRM_BUDDY_SPLIT) {
80 		kunit_err(test, "block state mismatch\n");
81 		err = -EINVAL;
82 	}
83 
84 	block_size = drm_buddy_block_size(mm, block);
85 	offset = drm_buddy_block_offset(block);
86 
87 	if (block_size < mm->chunk_size) {
88 		kunit_err(test, "block size smaller than min size\n");
89 		err = -EINVAL;
90 	}
91 
92 	if (!is_power_of_2(block_size)) {
93 		kunit_err(test, "block size not power of two\n");
94 		err = -EINVAL;
95 	}
96 
97 	if (!IS_ALIGNED(block_size, mm->chunk_size)) {
98 		kunit_err(test, "block size not aligned to min size\n");
99 		err = -EINVAL;
100 	}
101 
102 	if (!IS_ALIGNED(offset, mm->chunk_size)) {
103 		kunit_err(test, "block offset not aligned to min size\n");
104 		err = -EINVAL;
105 	}
106 
107 	if (!IS_ALIGNED(offset, block_size)) {
108 		kunit_err(test, "block offset not aligned to block size\n");
109 		err = -EINVAL;
110 	}
111 
112 	buddy = drm_get_buddy(block);
113 
114 	if (!buddy && block->parent) {
115 		kunit_err(test, "buddy has gone fishing\n");
116 		err = -EINVAL;
117 	}
118 
119 	if (buddy) {
120 		if (drm_buddy_block_offset(buddy) != (offset ^ block_size)) {
121 			kunit_err(test, "buddy has wrong offset\n");
122 			err = -EINVAL;
123 		}
124 
125 		if (drm_buddy_block_size(mm, buddy) != block_size) {
126 			kunit_err(test, "buddy size mismatch\n");
127 			err = -EINVAL;
128 		}
129 
130 		if (drm_buddy_block_state(buddy) == block_state &&
131 		    block_state == DRM_BUDDY_FREE) {
132 			kunit_err(test, "block and its buddy are free\n");
133 			err = -EINVAL;
134 		}
135 	}
136 
137 	return err;
138 }
139 
140 static int check_blocks(struct kunit *test, struct drm_buddy *mm,
141 			struct list_head *blocks, u64 expected_size, bool is_contiguous)
142 {
143 	struct drm_buddy_block *block;
144 	struct drm_buddy_block *prev;
145 	u64 total;
146 	int err = 0;
147 
148 	block = NULL;
149 	prev = NULL;
150 	total = 0;
151 
152 	list_for_each_entry(block, blocks, link) {
153 		err = check_block(test, mm, block);
154 
155 		if (!drm_buddy_block_is_allocated(block)) {
156 			kunit_err(test, "block not allocated\n");
157 			err = -EINVAL;
158 		}
159 
160 		if (is_contiguous && prev) {
161 			u64 prev_block_size;
162 			u64 prev_offset;
163 			u64 offset;
164 
165 			prev_offset = drm_buddy_block_offset(prev);
166 			prev_block_size = drm_buddy_block_size(mm, prev);
167 			offset = drm_buddy_block_offset(block);
168 
169 			if (offset != (prev_offset + prev_block_size)) {
170 				kunit_err(test, "block offset mismatch\n");
171 				err = -EINVAL;
172 			}
173 		}
174 
175 		if (err)
176 			break;
177 
178 		total += drm_buddy_block_size(mm, block);
179 		prev = block;
180 	}
181 
182 	if (!err) {
183 		if (total != expected_size) {
184 			kunit_err(test, "size mismatch, expected=%llx, found=%llx\n",
185 				  expected_size, total);
186 			err = -EINVAL;
187 		}
188 		return err;
189 	}
190 
191 	if (prev) {
192 		kunit_err(test, "prev block, dump:\n");
193 		dump_block(test, mm, prev);
194 	}
195 
196 	kunit_err(test, "bad block, dump:\n");
197 	dump_block(test, mm, block);
198 
199 	return err;
200 }
201 
202 static int check_mm(struct kunit *test, struct drm_buddy *mm)
203 {
204 	struct drm_buddy_block *root;
205 	struct drm_buddy_block *prev;
206 	unsigned int i;
207 	u64 total;
208 	int err = 0;
209 
210 	if (!mm->n_roots) {
211 		kunit_err(test, "n_roots is zero\n");
212 		return -EINVAL;
213 	}
214 
215 	if (mm->n_roots != hweight64(mm->size)) {
216 		kunit_err(test, "n_roots mismatch, n_roots=%u, expected=%lu\n",
217 			  mm->n_roots, hweight64(mm->size));
218 		return -EINVAL;
219 	}
220 
221 	root = NULL;
222 	prev = NULL;
223 	total = 0;
224 
225 	for (i = 0; i < mm->n_roots; ++i) {
226 		struct drm_buddy_block *block;
227 		unsigned int order;
228 
229 		root = mm->roots[i];
230 		if (!root) {
231 			kunit_err(test, "root(%u) is NULL\n", i);
232 			err = -EINVAL;
233 			break;
234 		}
235 
236 		err = check_block(test, mm, root);
237 
238 		if (!drm_buddy_block_is_free(root)) {
239 			kunit_err(test, "root not free\n");
240 			err = -EINVAL;
241 		}
242 
243 		order = drm_buddy_block_order(root);
244 
245 		if (!i) {
246 			if (order != mm->max_order) {
247 				kunit_err(test, "max order root missing\n");
248 				err = -EINVAL;
249 			}
250 		}
251 
252 		if (prev) {
253 			u64 prev_block_size;
254 			u64 prev_offset;
255 			u64 offset;
256 
257 			prev_offset = drm_buddy_block_offset(prev);
258 			prev_block_size = drm_buddy_block_size(mm, prev);
259 			offset = drm_buddy_block_offset(root);
260 
261 			if (offset != (prev_offset + prev_block_size)) {
262 				kunit_err(test, "root offset mismatch\n");
263 				err = -EINVAL;
264 			}
265 		}
266 
267 		block = list_first_entry_or_null(&mm->free_list[order],
268 						 struct drm_buddy_block, link);
269 		if (block != root) {
270 			kunit_err(test, "root mismatch at order=%u\n", order);
271 			err = -EINVAL;
272 		}
273 
274 		if (err)
275 			break;
276 
277 		prev = root;
278 		total += drm_buddy_block_size(mm, root);
279 	}
280 
281 	if (!err) {
282 		if (total != mm->size) {
283 			kunit_err(test, "expected mm size=%llx, found=%llx\n",
284 				  mm->size, total);
285 			err = -EINVAL;
286 		}
287 		return err;
288 	}
289 
290 	if (prev) {
291 		kunit_err(test, "prev root(%u), dump:\n", i - 1);
292 		dump_block(test, mm, prev);
293 	}
294 
295 	if (root) {
296 		kunit_err(test, "bad root(%u), dump:\n", i);
297 		dump_block(test, mm, root);
298 	}
299 
300 	return err;
301 }
302 
303 static void mm_config(u64 *size, u64 *chunk_size)
304 {
305 	DRM_RND_STATE(prng, random_seed);
306 	u32 s, ms;
307 
308 	/* Nothing fancy, just try to get an interesting bit pattern */
309 
310 	prandom_seed_state(&prng, random_seed);
311 
312 	/* Let size be a random number of pages up to 8 GB (2M pages) */
313 	s = 1 + drm_prandom_u32_max_state((BIT(33 - 12)) - 1, &prng);
314 	/* Let the chunk size be a random power of 2 less than size */
315 	ms = BIT(drm_prandom_u32_max_state(ilog2(s), &prng));
316 	/* Round size down to the chunk size */
317 	s &= -ms;
318 
319 	/* Convert from pages to bytes */
320 	*chunk_size = (u64)ms << 12;
321 	*size = (u64)s << 12;
322 }
323 
324 static void drm_test_buddy_alloc_pathological(struct kunit *test)
325 {
326 	u64 mm_size, size, start = 0;
327 	struct drm_buddy_block *block;
328 	const int max_order = 3;
329 	unsigned long flags = 0;
330 	int order, top;
331 	struct drm_buddy mm;
332 	LIST_HEAD(blocks);
333 	LIST_HEAD(holes);
334 	LIST_HEAD(tmp);
335 
336 	/*
337 	 * Create a pot-sized mm, then allocate one of each possible
338 	 * order within. This should leave the mm with exactly one
339 	 * page left. Free the largest block, then whittle down again.
340 	 * Eventually we will have a fully 50% fragmented mm.
341 	 */
342 
343 	mm_size = PAGE_SIZE << max_order;
344 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
345 			       "buddy_init failed\n");
346 
347 	KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
348 
349 	for (top = max_order; top; top--) {
350 		/* Make room by freeing the largest allocated block */
351 		block = list_first_entry_or_null(&blocks, typeof(*block), link);
352 		if (block) {
353 			list_del(&block->link);
354 			drm_buddy_free_block(&mm, block);
355 		}
356 
357 		for (order = top; order--;) {
358 			size = get_size(order, PAGE_SIZE);
359 			KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start,
360 									    mm_size, size, size,
361 										&tmp, flags),
362 					"buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
363 					order, top);
364 
365 			block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
366 			KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
367 
368 			list_move_tail(&block->link, &blocks);
369 		}
370 
371 		/* There should be one final page for this sub-allocation */
372 		size = get_size(0, PAGE_SIZE);
373 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
374 								    size, size, &tmp, flags),
375 							   "buddy_alloc hit -ENOMEM for hole\n");
376 
377 		block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
378 		KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
379 
380 		list_move_tail(&block->link, &holes);
381 
382 		size = get_size(top, PAGE_SIZE);
383 		KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
384 								   size, size, &tmp, flags),
385 							  "buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
386 							  top, max_order);
387 	}
388 
389 	drm_buddy_free_list(&mm, &holes);
390 
391 	/* Nothing larger than blocks of chunk_size now available */
392 	for (order = 1; order <= max_order; order++) {
393 		size = get_size(order, PAGE_SIZE);
394 		KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
395 								   size, size, &tmp, flags),
396 							  "buddy_alloc unexpectedly succeeded at order %d, it should be full!",
397 							  order);
398 	}
399 
400 	list_splice_tail(&holes, &blocks);
401 	drm_buddy_free_list(&mm, &blocks);
402 	drm_buddy_fini(&mm);
403 }
404 
405 static void drm_test_buddy_alloc_smoke(struct kunit *test)
406 {
407 	u64 mm_size, chunk_size, start = 0;
408 	unsigned long flags = 0;
409 	struct drm_buddy mm;
410 	int *order;
411 	int i;
412 
413 	DRM_RND_STATE(prng, random_seed);
414 	TIMEOUT(end_time);
415 
416 	mm_config(&mm_size, &chunk_size);
417 
418 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, chunk_size),
419 			       "buddy_init failed\n");
420 
421 	order = drm_random_order(mm.max_order + 1, &prng);
422 	KUNIT_ASSERT_TRUE(test, order);
423 
424 	for (i = 0; i <= mm.max_order; ++i) {
425 		struct drm_buddy_block *block;
426 		int max_order = order[i];
427 		bool timeout = false;
428 		LIST_HEAD(blocks);
429 		u64 total, size;
430 		LIST_HEAD(tmp);
431 		int order, err;
432 
433 		KUNIT_ASSERT_FALSE_MSG(test, check_mm(test, &mm),
434 				       "pre-mm check failed, abort\n");
435 
436 		order = max_order;
437 		total = 0;
438 
439 		do {
440 retry:
441 			size = get_size(order, chunk_size);
442 			err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags);
443 			if (err) {
444 				if (err == -ENOMEM) {
445 					KUNIT_FAIL(test, "buddy_alloc hit -ENOMEM with order=%d\n",
446 						   order);
447 				} else {
448 					if (order--) {
449 						err = 0;
450 						goto retry;
451 					}
452 
453 					KUNIT_FAIL(test, "buddy_alloc with order=%d failed\n",
454 						   order);
455 				}
456 
457 				break;
458 			}
459 
460 			block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
461 			KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
462 
463 			list_move_tail(&block->link, &blocks);
464 			KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_order(block), order,
465 					    "buddy_alloc order mismatch\n");
466 
467 			total += drm_buddy_block_size(&mm, block);
468 
469 			if (__timeout(end_time, NULL)) {
470 				timeout = true;
471 				break;
472 			}
473 		} while (total < mm.size);
474 
475 		if (!err)
476 			err = check_blocks(test, &mm, &blocks, total, false);
477 
478 		drm_buddy_free_list(&mm, &blocks);
479 
480 		if (!err) {
481 			KUNIT_EXPECT_FALSE_MSG(test, check_mm(test, &mm),
482 					       "post-mm check failed\n");
483 		}
484 
485 		if (err || timeout)
486 			break;
487 
488 		cond_resched();
489 	}
490 
491 	kfree(order);
492 	drm_buddy_fini(&mm);
493 }
494 
495 static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
496 {
497 	u64 mm_size, size, start = 0;
498 	struct drm_buddy_block *block, *bn;
499 	const unsigned int max_order = 16;
500 	unsigned long flags = 0;
501 	struct drm_buddy mm;
502 	unsigned int order;
503 	LIST_HEAD(blocks);
504 	LIST_HEAD(tmp);
505 
506 	/*
507 	 * Create a pot-sized mm, then allocate one of each possible
508 	 * order within. This should leave the mm with exactly one
509 	 * page left.
510 	 */
511 
512 	mm_size = PAGE_SIZE << max_order;
513 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
514 			       "buddy_init failed\n");
515 
516 	KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
517 
518 	for (order = 0; order < max_order; order++) {
519 		size = get_size(order, PAGE_SIZE);
520 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
521 								    size, size, &tmp, flags),
522 							   "buddy_alloc hit -ENOMEM with order=%d\n",
523 							   order);
524 
525 		block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
526 		KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
527 
528 		list_move_tail(&block->link, &blocks);
529 	}
530 
531 	/* And now the last remaining block available */
532 	size = get_size(0, PAGE_SIZE);
533 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
534 							    size, size, &tmp, flags),
535 						   "buddy_alloc hit -ENOMEM on final alloc\n");
536 
537 	block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
538 	KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
539 
540 	list_move_tail(&block->link, &blocks);
541 
542 	/* Should be completely full! */
543 	for (order = max_order; order--;) {
544 		size = get_size(order, PAGE_SIZE);
545 		KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
546 								   size, size, &tmp, flags),
547 							  "buddy_alloc unexpectedly succeeded, it should be full!");
548 	}
549 
550 	block = list_last_entry(&blocks, typeof(*block), link);
551 	list_del(&block->link);
552 	drm_buddy_free_block(&mm, block);
553 
554 	/* As we free in increasing size, we make available larger blocks */
555 	order = 1;
556 	list_for_each_entry_safe(block, bn, &blocks, link) {
557 		list_del(&block->link);
558 		drm_buddy_free_block(&mm, block);
559 
560 		size = get_size(order, PAGE_SIZE);
561 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
562 								    size, size, &tmp, flags),
563 							   "buddy_alloc hit -ENOMEM with order=%d\n",
564 							   order);
565 
566 		block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
567 		KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
568 
569 		list_del(&block->link);
570 		drm_buddy_free_block(&mm, block);
571 		order++;
572 	}
573 
574 	/* To confirm, now the whole mm should be available */
575 	size = get_size(max_order, PAGE_SIZE);
576 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
577 							    size, size, &tmp, flags),
578 						   "buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
579 						   max_order);
580 
581 	block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
582 	KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
583 
584 	list_del(&block->link);
585 	drm_buddy_free_block(&mm, block);
586 	drm_buddy_free_list(&mm, &blocks);
587 	drm_buddy_fini(&mm);
588 }
589 
590 static void drm_test_buddy_alloc_optimistic(struct kunit *test)
591 {
592 	u64 mm_size, size, start = 0;
593 	struct drm_buddy_block *block;
594 	unsigned long flags = 0;
595 	const int max_order = 16;
596 	struct drm_buddy mm;
597 	LIST_HEAD(blocks);
598 	LIST_HEAD(tmp);
599 	int order;
600 
601 	/*
602 	 * Create a mm with one block of each order available, and
603 	 * try to allocate them all.
604 	 */
605 
606 	mm_size = PAGE_SIZE * ((1 << (max_order + 1)) - 1);
607 
608 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
609 			       "buddy_init failed\n");
610 
611 	KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
612 
613 	for (order = 0; order <= max_order; order++) {
614 		size = get_size(order, PAGE_SIZE);
615 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
616 								    size, size, &tmp, flags),
617 							   "buddy_alloc hit -ENOMEM with order=%d\n",
618 							   order);
619 
620 		block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
621 		KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
622 
623 		list_move_tail(&block->link, &blocks);
624 	}
625 
626 	/* Should be completely full! */
627 	size = get_size(0, PAGE_SIZE);
628 	KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
629 							   size, size, &tmp, flags),
630 						  "buddy_alloc unexpectedly succeeded, it should be full!");
631 
632 	drm_buddy_free_list(&mm, &blocks);
633 	drm_buddy_fini(&mm);
634 }
635 
636 static void drm_test_buddy_alloc_range(struct kunit *test)
637 {
638 	unsigned long flags = DRM_BUDDY_RANGE_ALLOCATION;
639 	u64 offset, size, rem, chunk_size, end;
640 	unsigned long page_num;
641 	struct drm_buddy mm;
642 	LIST_HEAD(blocks);
643 
644 	mm_config(&size, &chunk_size);
645 
646 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, size, chunk_size),
647 			       "buddy_init failed");
648 
649 	KUNIT_ASSERT_FALSE_MSG(test, check_mm(test, &mm),
650 			       "pre-mm check failed, abort!");
651 
652 	rem = mm.size;
653 	offset = 0;
654 
655 	for_each_prime_number_from(page_num, 1, ULONG_MAX - 1) {
656 		struct drm_buddy_block *block;
657 		LIST_HEAD(tmp);
658 
659 		size = min(page_num * mm.chunk_size, rem);
660 		end = offset + size;
661 
662 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, offset, end,
663 								    size, mm.chunk_size,
664 									&tmp, flags),
665 				"alloc_range with offset=%llx, size=%llx failed\n", offset, size);
666 
667 		block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
668 		KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_range has no blocks\n");
669 
670 		KUNIT_ASSERT_EQ_MSG(test, drm_buddy_block_offset(block), offset,
671 				    "alloc_range start offset mismatch, found=%llx, expected=%llx\n",
672 							drm_buddy_block_offset(block), offset);
673 
674 		KUNIT_ASSERT_FALSE(test, check_blocks(test, &mm, &tmp, size, true));
675 
676 		list_splice_tail(&tmp, &blocks);
677 
678 		offset += size;
679 
680 		rem -= size;
681 		if (!rem)
682 			break;
683 
684 		cond_resched();
685 	}
686 
687 	drm_buddy_free_list(&mm, &blocks);
688 
689 	KUNIT_EXPECT_FALSE_MSG(test, check_mm(test, &mm), "post-mm check failed\n");
690 
691 	drm_buddy_fini(&mm);
692 }
693 
694 static void drm_test_buddy_alloc_limit(struct kunit *test)
695 {
696 	u64 size = U64_MAX, start = 0;
697 	struct drm_buddy_block *block;
698 	unsigned long flags = 0;
699 	LIST_HEAD(allocated);
700 	struct drm_buddy mm;
701 
702 	KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, PAGE_SIZE));
703 
704 	KUNIT_EXPECT_EQ_MSG(test, mm.max_order, DRM_BUDDY_MAX_ORDER,
705 			    "mm.max_order(%d) != %d\n", mm.max_order,
706 						DRM_BUDDY_MAX_ORDER);
707 
708 	size = mm.chunk_size << mm.max_order;
709 	KUNIT_EXPECT_FALSE(test, drm_buddy_alloc_blocks(&mm, start, size, size,
710 							PAGE_SIZE, &allocated, flags));
711 
712 	block = list_first_entry_or_null(&allocated, struct drm_buddy_block, link);
713 	KUNIT_EXPECT_TRUE(test, block);
714 
715 	KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_order(block), mm.max_order,
716 			    "block order(%d) != %d\n",
717 						drm_buddy_block_order(block), mm.max_order);
718 
719 	KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_size(&mm, block),
720 			    BIT_ULL(mm.max_order) * PAGE_SIZE,
721 						"block size(%llu) != %llu\n",
722 						drm_buddy_block_size(&mm, block),
723 						BIT_ULL(mm.max_order) * PAGE_SIZE);
724 
725 	drm_buddy_free_list(&mm, &allocated);
726 	drm_buddy_fini(&mm);
727 }
728 
729 static int drm_buddy_init_test(struct kunit *test)
730 {
731 	while (!random_seed)
732 		random_seed = get_random_u32();
733 
734 	return 0;
735 }
736 
737 static struct kunit_case drm_buddy_tests[] = {
738 	KUNIT_CASE(drm_test_buddy_alloc_limit),
739 	KUNIT_CASE(drm_test_buddy_alloc_range),
740 	KUNIT_CASE(drm_test_buddy_alloc_optimistic),
741 	KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
742 	KUNIT_CASE(drm_test_buddy_alloc_smoke),
743 	KUNIT_CASE(drm_test_buddy_alloc_pathological),
744 	{}
745 };
746 
747 static struct kunit_suite drm_buddy_test_suite = {
748 	.name = "drm_buddy",
749 	.init = drm_buddy_init_test,
750 	.test_cases = drm_buddy_tests,
751 };
752 
753 kunit_test_suite(drm_buddy_test_suite);
754 
755 MODULE_AUTHOR("Intel Corporation");
756 MODULE_LICENSE("GPL");
757