xref: /linux/drivers/gpu/drm/tests/drm_buddy_test.c (revision 55d0969c451159cff86949b38c39171cab962069)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  * Copyright © 2022 Maíra Canal <mairacanal@riseup.net>
5  */
6 
7 #include <kunit/test.h>
8 
9 #include <linux/prime_numbers.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sizes.h>
12 
13 #include <drm/drm_buddy.h>
14 
15 #include "../lib/drm_random.h"
16 
17 static unsigned int random_seed;
18 
19 static inline u64 get_size(int order, u64 chunk_size)
20 {
21 	return (1 << order) * chunk_size;
22 }
23 
24 static void drm_test_buddy_alloc_range_bias(struct kunit *test)
25 {
26 	u32 mm_size, size, ps, bias_size, bias_start, bias_end, bias_rem;
27 	DRM_RND_STATE(prng, random_seed);
28 	unsigned int i, count, *order;
29 	struct drm_buddy_block *block;
30 	unsigned long flags;
31 	struct drm_buddy mm;
32 	LIST_HEAD(allocated);
33 
34 	bias_size = SZ_1M;
35 	ps = roundup_pow_of_two(prandom_u32_state(&prng) % bias_size);
36 	ps = max(SZ_4K, ps);
37 	mm_size = (SZ_8M-1) & ~(ps-1); /* Multiple roots */
38 
39 	kunit_info(test, "mm_size=%u, ps=%u\n", mm_size, ps);
40 
41 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
42 			       "buddy_init failed\n");
43 
44 	count = mm_size / bias_size;
45 	order = drm_random_order(count, &prng);
46 	KUNIT_EXPECT_TRUE(test, order);
47 
48 	/*
49 	 * Idea is to split the address space into uniform bias ranges, and then
50 	 * in some random order allocate within each bias, using various
51 	 * patterns within. This should detect if allocations leak out from a
52 	 * given bias, for example.
53 	 */
54 
55 	for (i = 0; i < count; i++) {
56 		LIST_HEAD(tmp);
57 		u32 size;
58 
59 		bias_start = order[i] * bias_size;
60 		bias_end = bias_start + bias_size;
61 		bias_rem = bias_size;
62 
63 		/* internal round_up too big */
64 		KUNIT_ASSERT_TRUE_MSG(test,
65 				      drm_buddy_alloc_blocks(&mm, bias_start,
66 							     bias_end, bias_size + ps, bias_size,
67 							     &allocated,
68 							     DRM_BUDDY_RANGE_ALLOCATION),
69 				      "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
70 				      bias_start, bias_end, bias_size, bias_size);
71 
72 		/* size too big */
73 		KUNIT_ASSERT_TRUE_MSG(test,
74 				      drm_buddy_alloc_blocks(&mm, bias_start,
75 							     bias_end, bias_size + ps, ps,
76 							     &allocated,
77 							     DRM_BUDDY_RANGE_ALLOCATION),
78 				      "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
79 				      bias_start, bias_end, bias_size + ps, ps);
80 
81 		/* bias range too small for size */
82 		KUNIT_ASSERT_TRUE_MSG(test,
83 				      drm_buddy_alloc_blocks(&mm, bias_start + ps,
84 							     bias_end, bias_size, ps,
85 							     &allocated,
86 							     DRM_BUDDY_RANGE_ALLOCATION),
87 				      "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
88 				      bias_start + ps, bias_end, bias_size, ps);
89 
90 		/* bias misaligned */
91 		KUNIT_ASSERT_TRUE_MSG(test,
92 				      drm_buddy_alloc_blocks(&mm, bias_start + ps,
93 							     bias_end - ps,
94 							     bias_size >> 1, bias_size >> 1,
95 							     &allocated,
96 							     DRM_BUDDY_RANGE_ALLOCATION),
97 				      "buddy_alloc h didn't fail with bias(%x-%x), size=%u, ps=%u\n",
98 				      bias_start + ps, bias_end - ps, bias_size >> 1, bias_size >> 1);
99 
100 		/* single big page */
101 		KUNIT_ASSERT_FALSE_MSG(test,
102 				       drm_buddy_alloc_blocks(&mm, bias_start,
103 							      bias_end, bias_size, bias_size,
104 							      &tmp,
105 							      DRM_BUDDY_RANGE_ALLOCATION),
106 				       "buddy_alloc i failed with bias(%x-%x), size=%u, ps=%u\n",
107 				       bias_start, bias_end, bias_size, bias_size);
108 		drm_buddy_free_list(&mm, &tmp, 0);
109 
110 		/* single page with internal round_up */
111 		KUNIT_ASSERT_FALSE_MSG(test,
112 				       drm_buddy_alloc_blocks(&mm, bias_start,
113 							      bias_end, ps, bias_size,
114 							      &tmp,
115 							      DRM_BUDDY_RANGE_ALLOCATION),
116 				       "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
117 				       bias_start, bias_end, ps, bias_size);
118 		drm_buddy_free_list(&mm, &tmp, 0);
119 
120 		/* random size within */
121 		size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
122 		if (size)
123 			KUNIT_ASSERT_FALSE_MSG(test,
124 					       drm_buddy_alloc_blocks(&mm, bias_start,
125 								      bias_end, size, ps,
126 								      &tmp,
127 								      DRM_BUDDY_RANGE_ALLOCATION),
128 					       "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
129 					       bias_start, bias_end, size, ps);
130 
131 		bias_rem -= size;
132 		/* too big for current avail */
133 		KUNIT_ASSERT_TRUE_MSG(test,
134 				      drm_buddy_alloc_blocks(&mm, bias_start,
135 							     bias_end, bias_rem + ps, ps,
136 							     &allocated,
137 							     DRM_BUDDY_RANGE_ALLOCATION),
138 				      "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
139 				      bias_start, bias_end, bias_rem + ps, ps);
140 
141 		if (bias_rem) {
142 			/* random fill of the remainder */
143 			size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
144 			size = max(size, ps);
145 
146 			KUNIT_ASSERT_FALSE_MSG(test,
147 					       drm_buddy_alloc_blocks(&mm, bias_start,
148 								      bias_end, size, ps,
149 								      &allocated,
150 								      DRM_BUDDY_RANGE_ALLOCATION),
151 					       "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
152 					       bias_start, bias_end, size, ps);
153 			/*
154 			 * Intentionally allow some space to be left
155 			 * unallocated, and ideally not always on the bias
156 			 * boundaries.
157 			 */
158 			drm_buddy_free_list(&mm, &tmp, 0);
159 		} else {
160 			list_splice_tail(&tmp, &allocated);
161 		}
162 	}
163 
164 	kfree(order);
165 	drm_buddy_free_list(&mm, &allocated, 0);
166 	drm_buddy_fini(&mm);
167 
168 	/*
169 	 * Something more free-form. Idea is to pick a random starting bias
170 	 * range within the address space and then start filling it up. Also
171 	 * randomly grow the bias range in both directions as we go along. This
172 	 * should give us bias start/end which is not always uniform like above,
173 	 * and in some cases will require the allocator to jump over already
174 	 * allocated nodes in the middle of the address space.
175 	 */
176 
177 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
178 			       "buddy_init failed\n");
179 
180 	bias_start = round_up(prandom_u32_state(&prng) % (mm_size - ps), ps);
181 	bias_end = round_up(bias_start + prandom_u32_state(&prng) % (mm_size - bias_start), ps);
182 	bias_end = max(bias_end, bias_start + ps);
183 	bias_rem = bias_end - bias_start;
184 
185 	do {
186 		u32 size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
187 
188 		KUNIT_ASSERT_FALSE_MSG(test,
189 				       drm_buddy_alloc_blocks(&mm, bias_start,
190 							      bias_end, size, ps,
191 							      &allocated,
192 							      DRM_BUDDY_RANGE_ALLOCATION),
193 				       "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
194 				       bias_start, bias_end, size, ps);
195 		bias_rem -= size;
196 
197 		/*
198 		 * Try to randomly grow the bias range in both directions, or
199 		 * only one, or perhaps don't grow at all.
200 		 */
201 		do {
202 			u32 old_bias_start = bias_start;
203 			u32 old_bias_end = bias_end;
204 
205 			if (bias_start)
206 				bias_start -= round_up(prandom_u32_state(&prng) % bias_start, ps);
207 			if (bias_end != mm_size)
208 				bias_end += round_up(prandom_u32_state(&prng) % (mm_size - bias_end), ps);
209 
210 			bias_rem += old_bias_start - bias_start;
211 			bias_rem += bias_end - old_bias_end;
212 		} while (!bias_rem && (bias_start || bias_end != mm_size));
213 	} while (bias_rem);
214 
215 	KUNIT_ASSERT_EQ(test, bias_start, 0);
216 	KUNIT_ASSERT_EQ(test, bias_end, mm_size);
217 	KUNIT_ASSERT_TRUE_MSG(test,
218 			      drm_buddy_alloc_blocks(&mm, bias_start, bias_end,
219 						     ps, ps,
220 						     &allocated,
221 						     DRM_BUDDY_RANGE_ALLOCATION),
222 			      "buddy_alloc passed with bias(%x-%x), size=%u\n",
223 			      bias_start, bias_end, ps);
224 
225 	drm_buddy_free_list(&mm, &allocated, 0);
226 	drm_buddy_fini(&mm);
227 
228 	/*
229 	 * Allocate cleared blocks in the bias range when the DRM buddy's clear avail is
230 	 * zero. This will validate the bias range allocation in scenarios like system boot
231 	 * when no cleared blocks are available and exercise the fallback path too. The resulting
232 	 * blocks should always be dirty.
233 	 */
234 
235 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
236 			       "buddy_init failed\n");
237 
238 	bias_start = round_up(prandom_u32_state(&prng) % (mm_size - ps), ps);
239 	bias_end = round_up(bias_start + prandom_u32_state(&prng) % (mm_size - bias_start), ps);
240 	bias_end = max(bias_end, bias_start + ps);
241 	bias_rem = bias_end - bias_start;
242 
243 	flags = DRM_BUDDY_CLEAR_ALLOCATION | DRM_BUDDY_RANGE_ALLOCATION;
244 	size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
245 
246 	KUNIT_ASSERT_FALSE_MSG(test,
247 			       drm_buddy_alloc_blocks(&mm, bias_start,
248 						      bias_end, size, ps,
249 						      &allocated,
250 						      flags),
251 			       "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
252 			       bias_start, bias_end, size, ps);
253 
254 	list_for_each_entry(block, &allocated, link)
255 		KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false);
256 
257 	drm_buddy_free_list(&mm, &allocated, 0);
258 	drm_buddy_fini(&mm);
259 }
260 
261 static void drm_test_buddy_alloc_clear(struct kunit *test)
262 {
263 	unsigned long n_pages, total, i = 0;
264 	DRM_RND_STATE(prng, random_seed);
265 	const unsigned long ps = SZ_4K;
266 	struct drm_buddy_block *block;
267 	const int max_order = 12;
268 	LIST_HEAD(allocated);
269 	struct drm_buddy mm;
270 	unsigned int order;
271 	u32 mm_size, size;
272 	LIST_HEAD(dirty);
273 	LIST_HEAD(clean);
274 
275 	mm_size = SZ_4K << max_order;
276 	KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
277 
278 	KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
279 
280 	/*
281 	 * Idea is to allocate and free some random portion of the address space,
282 	 * returning those pages as non-dirty and randomly alternate between
283 	 * requesting dirty and non-dirty pages (not going over the limit
284 	 * we freed as non-dirty), putting that into two separate lists.
285 	 * Loop over both lists at the end checking that the dirty list
286 	 * is indeed all dirty pages and vice versa. Free it all again,
287 	 * keeping the dirty/clear status.
288 	 */
289 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
290 							    5 * ps, ps, &allocated,
291 							    DRM_BUDDY_TOPDOWN_ALLOCATION),
292 				"buddy_alloc hit an error size=%lu\n", 5 * ps);
293 	drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
294 
295 	n_pages = 10;
296 	do {
297 		unsigned long flags;
298 		struct list_head *list;
299 		int slot = i % 2;
300 
301 		if (slot == 0) {
302 			list = &dirty;
303 			flags = 0;
304 		} else {
305 			list = &clean;
306 			flags = DRM_BUDDY_CLEAR_ALLOCATION;
307 		}
308 
309 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
310 								    ps, ps, list,
311 								    flags),
312 					"buddy_alloc hit an error size=%lu\n", ps);
313 	} while (++i < n_pages);
314 
315 	list_for_each_entry(block, &clean, link)
316 		KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), true);
317 
318 	list_for_each_entry(block, &dirty, link)
319 		KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false);
320 
321 	drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
322 
323 	/*
324 	 * Trying to go over the clear limit for some allocation.
325 	 * The allocation should never fail with reasonable page-size.
326 	 */
327 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
328 							    10 * ps, ps, &clean,
329 							    DRM_BUDDY_CLEAR_ALLOCATION),
330 				"buddy_alloc hit an error size=%lu\n", 10 * ps);
331 
332 	drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
333 	drm_buddy_free_list(&mm, &dirty, 0);
334 	drm_buddy_fini(&mm);
335 
336 	KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
337 
338 	/*
339 	 * Create a new mm. Intentionally fragment the address space by creating
340 	 * two alternating lists. Free both lists, one as dirty the other as clean.
341 	 * Try to allocate double the previous size with matching min_page_size. The
342 	 * allocation should never fail as it calls the force_merge. Also check that
343 	 * the page is always dirty after force_merge. Free the page as dirty, then
344 	 * repeat the whole thing, increment the order until we hit the max_order.
345 	 */
346 
347 	i = 0;
348 	n_pages = mm_size / ps;
349 	do {
350 		struct list_head *list;
351 		int slot = i % 2;
352 
353 		if (slot == 0)
354 			list = &dirty;
355 		else
356 			list = &clean;
357 
358 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
359 								    ps, ps, list, 0),
360 					"buddy_alloc hit an error size=%lu\n", ps);
361 	} while (++i < n_pages);
362 
363 	drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
364 	drm_buddy_free_list(&mm, &dirty, 0);
365 
366 	order = 1;
367 	do {
368 		size = SZ_4K << order;
369 
370 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
371 								    size, size, &allocated,
372 								    DRM_BUDDY_CLEAR_ALLOCATION),
373 					"buddy_alloc hit an error size=%u\n", size);
374 		total = 0;
375 		list_for_each_entry(block, &allocated, link) {
376 			if (size != mm_size)
377 				KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false);
378 			total += drm_buddy_block_size(&mm, block);
379 		}
380 		KUNIT_EXPECT_EQ(test, total, size);
381 
382 		drm_buddy_free_list(&mm, &allocated, 0);
383 	} while (++order <= max_order);
384 
385 	drm_buddy_fini(&mm);
386 
387 	/*
388 	 * Create a new mm with a non power-of-two size. Allocate a random size, free as
389 	 * cleared and then call fini. This will ensure the multi-root force merge during
390 	 * fini.
391 	 */
392 	mm_size = 12 * SZ_4K;
393 	size = max(round_up(prandom_u32_state(&prng) % mm_size, ps), ps);
394 	KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
395 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
396 							    size, ps, &allocated,
397 							    DRM_BUDDY_TOPDOWN_ALLOCATION),
398 				"buddy_alloc hit an error size=%u\n", size);
399 	drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
400 	drm_buddy_fini(&mm);
401 }
402 
403 static void drm_test_buddy_alloc_contiguous(struct kunit *test)
404 {
405 	const unsigned long ps = SZ_4K, mm_size = 16 * 3 * SZ_4K;
406 	unsigned long i, n_pages, total;
407 	struct drm_buddy_block *block;
408 	struct drm_buddy mm;
409 	LIST_HEAD(left);
410 	LIST_HEAD(middle);
411 	LIST_HEAD(right);
412 	LIST_HEAD(allocated);
413 
414 	KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
415 
416 	/*
417 	 * Idea is to fragment the address space by alternating block
418 	 * allocations between three different lists; one for left, middle and
419 	 * right. We can then free a list to simulate fragmentation. In
420 	 * particular we want to exercise the DRM_BUDDY_CONTIGUOUS_ALLOCATION,
421 	 * including the try_harder path.
422 	 */
423 
424 	i = 0;
425 	n_pages = mm_size / ps;
426 	do {
427 		struct list_head *list;
428 		int slot = i % 3;
429 
430 		if (slot == 0)
431 			list = &left;
432 		else if (slot == 1)
433 			list = &middle;
434 		else
435 			list = &right;
436 		KUNIT_ASSERT_FALSE_MSG(test,
437 				       drm_buddy_alloc_blocks(&mm, 0, mm_size,
438 							      ps, ps, list, 0),
439 				       "buddy_alloc hit an error size=%lu\n",
440 				       ps);
441 	} while (++i < n_pages);
442 
443 	KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
444 							   3 * ps, ps, &allocated,
445 							   DRM_BUDDY_CONTIGUOUS_ALLOCATION),
446 			       "buddy_alloc didn't error size=%lu\n", 3 * ps);
447 
448 	drm_buddy_free_list(&mm, &middle, 0);
449 	KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
450 							   3 * ps, ps, &allocated,
451 							   DRM_BUDDY_CONTIGUOUS_ALLOCATION),
452 			       "buddy_alloc didn't error size=%lu\n", 3 * ps);
453 	KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
454 							   2 * ps, ps, &allocated,
455 							   DRM_BUDDY_CONTIGUOUS_ALLOCATION),
456 			       "buddy_alloc didn't error size=%lu\n", 2 * ps);
457 
458 	drm_buddy_free_list(&mm, &right, 0);
459 	KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
460 							   3 * ps, ps, &allocated,
461 							   DRM_BUDDY_CONTIGUOUS_ALLOCATION),
462 			       "buddy_alloc didn't error size=%lu\n", 3 * ps);
463 	/*
464 	 * At this point we should have enough contiguous space for 2 blocks,
465 	 * however they are never buddies (since we freed middle and right) so
466 	 * will require the try_harder logic to find them.
467 	 */
468 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
469 							    2 * ps, ps, &allocated,
470 							    DRM_BUDDY_CONTIGUOUS_ALLOCATION),
471 			       "buddy_alloc hit an error size=%lu\n", 2 * ps);
472 
473 	drm_buddy_free_list(&mm, &left, 0);
474 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
475 							    3 * ps, ps, &allocated,
476 							    DRM_BUDDY_CONTIGUOUS_ALLOCATION),
477 			       "buddy_alloc hit an error size=%lu\n", 3 * ps);
478 
479 	total = 0;
480 	list_for_each_entry(block, &allocated, link)
481 		total += drm_buddy_block_size(&mm, block);
482 
483 	KUNIT_ASSERT_EQ(test, total, ps * 2 + ps * 3);
484 
485 	drm_buddy_free_list(&mm, &allocated, 0);
486 	drm_buddy_fini(&mm);
487 }
488 
489 static void drm_test_buddy_alloc_pathological(struct kunit *test)
490 {
491 	u64 mm_size, size, start = 0;
492 	struct drm_buddy_block *block;
493 	const int max_order = 3;
494 	unsigned long flags = 0;
495 	int order, top;
496 	struct drm_buddy mm;
497 	LIST_HEAD(blocks);
498 	LIST_HEAD(holes);
499 	LIST_HEAD(tmp);
500 
501 	/*
502 	 * Create a pot-sized mm, then allocate one of each possible
503 	 * order within. This should leave the mm with exactly one
504 	 * page left. Free the largest block, then whittle down again.
505 	 * Eventually we will have a fully 50% fragmented mm.
506 	 */
507 
508 	mm_size = SZ_4K << max_order;
509 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
510 			       "buddy_init failed\n");
511 
512 	KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
513 
514 	for (top = max_order; top; top--) {
515 		/* Make room by freeing the largest allocated block */
516 		block = list_first_entry_or_null(&blocks, typeof(*block), link);
517 		if (block) {
518 			list_del(&block->link);
519 			drm_buddy_free_block(&mm, block);
520 		}
521 
522 		for (order = top; order--;) {
523 			size = get_size(order, mm.chunk_size);
524 			KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start,
525 									    mm_size, size, size,
526 										&tmp, flags),
527 					"buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
528 					order, top);
529 
530 			block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
531 			KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
532 
533 			list_move_tail(&block->link, &blocks);
534 		}
535 
536 		/* There should be one final page for this sub-allocation */
537 		size = get_size(0, mm.chunk_size);
538 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
539 								    size, size, &tmp, flags),
540 							   "buddy_alloc hit -ENOMEM for hole\n");
541 
542 		block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
543 		KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
544 
545 		list_move_tail(&block->link, &holes);
546 
547 		size = get_size(top, mm.chunk_size);
548 		KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
549 								   size, size, &tmp, flags),
550 							  "buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
551 							  top, max_order);
552 	}
553 
554 	drm_buddy_free_list(&mm, &holes, 0);
555 
556 	/* Nothing larger than blocks of chunk_size now available */
557 	for (order = 1; order <= max_order; order++) {
558 		size = get_size(order, mm.chunk_size);
559 		KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
560 								   size, size, &tmp, flags),
561 							  "buddy_alloc unexpectedly succeeded at order %d, it should be full!",
562 							  order);
563 	}
564 
565 	list_splice_tail(&holes, &blocks);
566 	drm_buddy_free_list(&mm, &blocks, 0);
567 	drm_buddy_fini(&mm);
568 }
569 
570 static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
571 {
572 	u64 mm_size, size, start = 0;
573 	struct drm_buddy_block *block, *bn;
574 	const unsigned int max_order = 16;
575 	unsigned long flags = 0;
576 	struct drm_buddy mm;
577 	unsigned int order;
578 	LIST_HEAD(blocks);
579 	LIST_HEAD(tmp);
580 
581 	/*
582 	 * Create a pot-sized mm, then allocate one of each possible
583 	 * order within. This should leave the mm with exactly one
584 	 * page left.
585 	 */
586 
587 	mm_size = SZ_4K << max_order;
588 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
589 			       "buddy_init failed\n");
590 
591 	KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
592 
593 	for (order = 0; order < max_order; order++) {
594 		size = get_size(order, mm.chunk_size);
595 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
596 								    size, size, &tmp, flags),
597 							   "buddy_alloc hit -ENOMEM with order=%d\n",
598 							   order);
599 
600 		block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
601 		KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
602 
603 		list_move_tail(&block->link, &blocks);
604 	}
605 
606 	/* And now the last remaining block available */
607 	size = get_size(0, mm.chunk_size);
608 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
609 							    size, size, &tmp, flags),
610 						   "buddy_alloc hit -ENOMEM on final alloc\n");
611 
612 	block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
613 	KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
614 
615 	list_move_tail(&block->link, &blocks);
616 
617 	/* Should be completely full! */
618 	for (order = max_order; order--;) {
619 		size = get_size(order, mm.chunk_size);
620 		KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
621 								   size, size, &tmp, flags),
622 							  "buddy_alloc unexpectedly succeeded, it should be full!");
623 	}
624 
625 	block = list_last_entry(&blocks, typeof(*block), link);
626 	list_del(&block->link);
627 	drm_buddy_free_block(&mm, block);
628 
629 	/* As we free in increasing size, we make available larger blocks */
630 	order = 1;
631 	list_for_each_entry_safe(block, bn, &blocks, link) {
632 		list_del(&block->link);
633 		drm_buddy_free_block(&mm, block);
634 
635 		size = get_size(order, mm.chunk_size);
636 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
637 								    size, size, &tmp, flags),
638 							   "buddy_alloc hit -ENOMEM with order=%d\n",
639 							   order);
640 
641 		block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
642 		KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
643 
644 		list_del(&block->link);
645 		drm_buddy_free_block(&mm, block);
646 		order++;
647 	}
648 
649 	/* To confirm, now the whole mm should be available */
650 	size = get_size(max_order, mm.chunk_size);
651 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
652 							    size, size, &tmp, flags),
653 						   "buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
654 						   max_order);
655 
656 	block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
657 	KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
658 
659 	list_del(&block->link);
660 	drm_buddy_free_block(&mm, block);
661 	drm_buddy_free_list(&mm, &blocks, 0);
662 	drm_buddy_fini(&mm);
663 }
664 
665 static void drm_test_buddy_alloc_optimistic(struct kunit *test)
666 {
667 	u64 mm_size, size, start = 0;
668 	struct drm_buddy_block *block;
669 	unsigned long flags = 0;
670 	const int max_order = 16;
671 	struct drm_buddy mm;
672 	LIST_HEAD(blocks);
673 	LIST_HEAD(tmp);
674 	int order;
675 
676 	/*
677 	 * Create a mm with one block of each order available, and
678 	 * try to allocate them all.
679 	 */
680 
681 	mm_size = SZ_4K * ((1 << (max_order + 1)) - 1);
682 
683 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
684 			       "buddy_init failed\n");
685 
686 	KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
687 
688 	for (order = 0; order <= max_order; order++) {
689 		size = get_size(order, mm.chunk_size);
690 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
691 								    size, size, &tmp, flags),
692 							   "buddy_alloc hit -ENOMEM with order=%d\n",
693 							   order);
694 
695 		block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
696 		KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
697 
698 		list_move_tail(&block->link, &blocks);
699 	}
700 
701 	/* Should be completely full! */
702 	size = get_size(0, mm.chunk_size);
703 	KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
704 							   size, size, &tmp, flags),
705 						  "buddy_alloc unexpectedly succeeded, it should be full!");
706 
707 	drm_buddy_free_list(&mm, &blocks, 0);
708 	drm_buddy_fini(&mm);
709 }
710 
711 static void drm_test_buddy_alloc_limit(struct kunit *test)
712 {
713 	u64 size = U64_MAX, start = 0;
714 	struct drm_buddy_block *block;
715 	unsigned long flags = 0;
716 	LIST_HEAD(allocated);
717 	struct drm_buddy mm;
718 
719 	KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, SZ_4K));
720 
721 	KUNIT_EXPECT_EQ_MSG(test, mm.max_order, DRM_BUDDY_MAX_ORDER,
722 			    "mm.max_order(%d) != %d\n", mm.max_order,
723 						DRM_BUDDY_MAX_ORDER);
724 
725 	size = mm.chunk_size << mm.max_order;
726 	KUNIT_EXPECT_FALSE(test, drm_buddy_alloc_blocks(&mm, start, size, size,
727 							mm.chunk_size, &allocated, flags));
728 
729 	block = list_first_entry_or_null(&allocated, struct drm_buddy_block, link);
730 	KUNIT_EXPECT_TRUE(test, block);
731 
732 	KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_order(block), mm.max_order,
733 			    "block order(%d) != %d\n",
734 						drm_buddy_block_order(block), mm.max_order);
735 
736 	KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_size(&mm, block),
737 			    BIT_ULL(mm.max_order) * mm.chunk_size,
738 						"block size(%llu) != %llu\n",
739 						drm_buddy_block_size(&mm, block),
740 						BIT_ULL(mm.max_order) * mm.chunk_size);
741 
742 	drm_buddy_free_list(&mm, &allocated, 0);
743 	drm_buddy_fini(&mm);
744 }
745 
746 static int drm_buddy_suite_init(struct kunit_suite *suite)
747 {
748 	while (!random_seed)
749 		random_seed = get_random_u32();
750 
751 	kunit_info(suite, "Testing DRM buddy manager, with random_seed=0x%x\n",
752 		   random_seed);
753 
754 	return 0;
755 }
756 
757 static struct kunit_case drm_buddy_tests[] = {
758 	KUNIT_CASE(drm_test_buddy_alloc_limit),
759 	KUNIT_CASE(drm_test_buddy_alloc_optimistic),
760 	KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
761 	KUNIT_CASE(drm_test_buddy_alloc_pathological),
762 	KUNIT_CASE(drm_test_buddy_alloc_contiguous),
763 	KUNIT_CASE(drm_test_buddy_alloc_clear),
764 	KUNIT_CASE(drm_test_buddy_alloc_range_bias),
765 	{}
766 };
767 
768 static struct kunit_suite drm_buddy_test_suite = {
769 	.name = "drm_buddy",
770 	.suite_init = drm_buddy_suite_init,
771 	.test_cases = drm_buddy_tests,
772 };
773 
774 kunit_test_suite(drm_buddy_test_suite);
775 
776 MODULE_AUTHOR("Intel Corporation");
777 MODULE_DESCRIPTION("Kunit test for drm_buddy functions");
778 MODULE_LICENSE("GPL");
779