xref: /linux/drivers/gpu/drm/tests/drm_buddy_test.c (revision face6a3615a649456eb4549f6d474221d877d604)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  * Copyright © 2022 Maíra Canal <mairacanal@riseup.net>
5  */
6 
7 #include <kunit/test.h>
8 
9 #include <linux/prime_numbers.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sizes.h>
12 
13 #include <drm/drm_buddy.h>
14 
15 #include "../lib/drm_random.h"
16 
17 static unsigned int random_seed;
18 
19 static inline u64 get_size(int order, u64 chunk_size)
20 {
21 	return (1 << order) * chunk_size;
22 }
23 
24 static void drm_test_buddy_fragmentation_performance(struct kunit *test)
25 {
26 	struct drm_buddy_block *block, *tmp;
27 	int num_blocks, i, ret, count = 0;
28 	LIST_HEAD(allocated_blocks);
29 	unsigned long elapsed_ms;
30 	LIST_HEAD(reverse_list);
31 	LIST_HEAD(test_blocks);
32 	LIST_HEAD(clear_list);
33 	LIST_HEAD(dirty_list);
34 	LIST_HEAD(free_list);
35 	struct drm_buddy mm;
36 	u64 mm_size = SZ_4G;
37 	ktime_t start, end;
38 
39 	/*
40 	 * Allocation under severe fragmentation
41 	 *
42 	 * Create severe fragmentation by allocating the entire 4 GiB address space
43 	 * as tiny 8 KiB blocks but forcing a 64 KiB alignment. The resulting pattern
44 	 * leaves many scattered holes. Split the allocations into two groups and
45 	 * return them with different flags to block coalescing, then repeatedly
46 	 * allocate and free 64 KiB blocks while timing the loop. This stresses how
47 	 * quickly the allocator can satisfy larger, aligned requests from a pool of
48 	 * highly fragmented space.
49 	 */
50 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
51 			       "buddy_init failed\n");
52 
53 	num_blocks = mm_size / SZ_64K;
54 
55 	start = ktime_get();
56 	/* Allocate with maximum fragmentation - 8K blocks with 64K alignment */
57 	for (i = 0; i < num_blocks; i++)
58 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, SZ_8K, SZ_64K,
59 								    &allocated_blocks, 0),
60 					"buddy_alloc hit an error size=%u\n", SZ_8K);
61 
62 	list_for_each_entry_safe(block, tmp, &allocated_blocks, link) {
63 		if (count % 4 == 0 || count % 4 == 3)
64 			list_move_tail(&block->link, &clear_list);
65 		else
66 			list_move_tail(&block->link, &dirty_list);
67 		count++;
68 	}
69 
70 	/* Free with different flags to ensure no coalescing */
71 	drm_buddy_free_list(&mm, &clear_list, DRM_BUDDY_CLEARED);
72 	drm_buddy_free_list(&mm, &dirty_list, 0);
73 
74 	for (i = 0; i < num_blocks; i++)
75 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, SZ_64K, SZ_64K,
76 								    &test_blocks, 0),
77 					"buddy_alloc hit an error size=%u\n", SZ_64K);
78 	drm_buddy_free_list(&mm, &test_blocks, 0);
79 
80 	end = ktime_get();
81 	elapsed_ms = ktime_to_ms(ktime_sub(end, start));
82 
83 	kunit_info(test, "Fragmented allocation took %lu ms\n", elapsed_ms);
84 
85 	drm_buddy_fini(&mm);
86 
87 	/*
88 	 * Reverse free order under fragmentation
89 	 *
90 	 * Construct a fragmented 4 GiB space by allocating every 8 KiB block with
91 	 * 64 KiB alignment, creating a dense scatter of small regions. Half of the
92 	 * blocks are selectively freed to form sparse gaps, while the remaining
93 	 * allocations are preserved, reordered in reverse, and released back with
94 	 * the cleared flag. This models a pathological reverse-ordered free pattern
95 	 * and measures how quickly the allocator can merge and reclaim space when
96 	 * deallocation occurs in the opposite order of allocation, exposing the
97 	 * cost difference between a linear freelist scan and an ordered tree lookup.
98 	 */
99 	ret = drm_buddy_init(&mm, mm_size, SZ_4K);
100 	KUNIT_ASSERT_EQ(test, ret, 0);
101 
102 	start = ktime_get();
103 	/* Allocate maximum fragmentation */
104 	for (i = 0; i < num_blocks; i++)
105 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, SZ_8K, SZ_64K,
106 								    &allocated_blocks, 0),
107 					"buddy_alloc hit an error size=%u\n", SZ_8K);
108 
109 	list_for_each_entry_safe(block, tmp, &allocated_blocks, link) {
110 		if (count % 2 == 0)
111 			list_move_tail(&block->link, &free_list);
112 		count++;
113 	}
114 	drm_buddy_free_list(&mm, &free_list, DRM_BUDDY_CLEARED);
115 
116 	list_for_each_entry_safe_reverse(block, tmp, &allocated_blocks, link)
117 		list_move(&block->link, &reverse_list);
118 	drm_buddy_free_list(&mm, &reverse_list, DRM_BUDDY_CLEARED);
119 
120 	end = ktime_get();
121 	elapsed_ms = ktime_to_ms(ktime_sub(end, start));
122 
123 	kunit_info(test, "Reverse-ordered free took %lu ms\n", elapsed_ms);
124 
125 	drm_buddy_fini(&mm);
126 }
127 
128 static void drm_test_buddy_alloc_range_bias(struct kunit *test)
129 {
130 	u32 mm_size, size, ps, bias_size, bias_start, bias_end, bias_rem;
131 	DRM_RND_STATE(prng, random_seed);
132 	unsigned int i, count, *order;
133 	struct drm_buddy_block *block;
134 	unsigned long flags;
135 	struct drm_buddy mm;
136 	LIST_HEAD(allocated);
137 
138 	bias_size = SZ_1M;
139 	ps = roundup_pow_of_two(prandom_u32_state(&prng) % bias_size);
140 	ps = max(SZ_4K, ps);
141 	mm_size = (SZ_8M-1) & ~(ps-1); /* Multiple roots */
142 
143 	kunit_info(test, "mm_size=%u, ps=%u\n", mm_size, ps);
144 
145 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
146 			       "buddy_init failed\n");
147 
148 	count = mm_size / bias_size;
149 	order = drm_random_order(count, &prng);
150 	KUNIT_EXPECT_TRUE(test, order);
151 
152 	/*
153 	 * Idea is to split the address space into uniform bias ranges, and then
154 	 * in some random order allocate within each bias, using various
155 	 * patterns within. This should detect if allocations leak out from a
156 	 * given bias, for example.
157 	 */
158 
159 	for (i = 0; i < count; i++) {
160 		LIST_HEAD(tmp);
161 		u32 size;
162 
163 		bias_start = order[i] * bias_size;
164 		bias_end = bias_start + bias_size;
165 		bias_rem = bias_size;
166 
167 		/* internal round_up too big */
168 		KUNIT_ASSERT_TRUE_MSG(test,
169 				      drm_buddy_alloc_blocks(&mm, bias_start,
170 							     bias_end, bias_size + ps, bias_size,
171 							     &allocated,
172 							     DRM_BUDDY_RANGE_ALLOCATION),
173 				      "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
174 				      bias_start, bias_end, bias_size, bias_size);
175 
176 		/* size too big */
177 		KUNIT_ASSERT_TRUE_MSG(test,
178 				      drm_buddy_alloc_blocks(&mm, bias_start,
179 							     bias_end, bias_size + ps, ps,
180 							     &allocated,
181 							     DRM_BUDDY_RANGE_ALLOCATION),
182 				      "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
183 				      bias_start, bias_end, bias_size + ps, ps);
184 
185 		/* bias range too small for size */
186 		KUNIT_ASSERT_TRUE_MSG(test,
187 				      drm_buddy_alloc_blocks(&mm, bias_start + ps,
188 							     bias_end, bias_size, ps,
189 							     &allocated,
190 							     DRM_BUDDY_RANGE_ALLOCATION),
191 				      "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
192 				      bias_start + ps, bias_end, bias_size, ps);
193 
194 		/* bias misaligned */
195 		KUNIT_ASSERT_TRUE_MSG(test,
196 				      drm_buddy_alloc_blocks(&mm, bias_start + ps,
197 							     bias_end - ps,
198 							     bias_size >> 1, bias_size >> 1,
199 							     &allocated,
200 							     DRM_BUDDY_RANGE_ALLOCATION),
201 				      "buddy_alloc h didn't fail with bias(%x-%x), size=%u, ps=%u\n",
202 				      bias_start + ps, bias_end - ps, bias_size >> 1, bias_size >> 1);
203 
204 		/* single big page */
205 		KUNIT_ASSERT_FALSE_MSG(test,
206 				       drm_buddy_alloc_blocks(&mm, bias_start,
207 							      bias_end, bias_size, bias_size,
208 							      &tmp,
209 							      DRM_BUDDY_RANGE_ALLOCATION),
210 				       "buddy_alloc i failed with bias(%x-%x), size=%u, ps=%u\n",
211 				       bias_start, bias_end, bias_size, bias_size);
212 		drm_buddy_free_list(&mm, &tmp, 0);
213 
214 		/* single page with internal round_up */
215 		KUNIT_ASSERT_FALSE_MSG(test,
216 				       drm_buddy_alloc_blocks(&mm, bias_start,
217 							      bias_end, ps, bias_size,
218 							      &tmp,
219 							      DRM_BUDDY_RANGE_ALLOCATION),
220 				       "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
221 				       bias_start, bias_end, ps, bias_size);
222 		drm_buddy_free_list(&mm, &tmp, 0);
223 
224 		/* random size within */
225 		size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
226 		if (size)
227 			KUNIT_ASSERT_FALSE_MSG(test,
228 					       drm_buddy_alloc_blocks(&mm, bias_start,
229 								      bias_end, size, ps,
230 								      &tmp,
231 								      DRM_BUDDY_RANGE_ALLOCATION),
232 					       "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
233 					       bias_start, bias_end, size, ps);
234 
235 		bias_rem -= size;
236 		/* too big for current avail */
237 		KUNIT_ASSERT_TRUE_MSG(test,
238 				      drm_buddy_alloc_blocks(&mm, bias_start,
239 							     bias_end, bias_rem + ps, ps,
240 							     &allocated,
241 							     DRM_BUDDY_RANGE_ALLOCATION),
242 				      "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
243 				      bias_start, bias_end, bias_rem + ps, ps);
244 
245 		if (bias_rem) {
246 			/* random fill of the remainder */
247 			size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
248 			size = max(size, ps);
249 
250 			KUNIT_ASSERT_FALSE_MSG(test,
251 					       drm_buddy_alloc_blocks(&mm, bias_start,
252 								      bias_end, size, ps,
253 								      &allocated,
254 								      DRM_BUDDY_RANGE_ALLOCATION),
255 					       "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
256 					       bias_start, bias_end, size, ps);
257 			/*
258 			 * Intentionally allow some space to be left
259 			 * unallocated, and ideally not always on the bias
260 			 * boundaries.
261 			 */
262 			drm_buddy_free_list(&mm, &tmp, 0);
263 		} else {
264 			list_splice_tail(&tmp, &allocated);
265 		}
266 	}
267 
268 	kfree(order);
269 	drm_buddy_free_list(&mm, &allocated, 0);
270 	drm_buddy_fini(&mm);
271 
272 	/*
273 	 * Something more free-form. Idea is to pick a random starting bias
274 	 * range within the address space and then start filling it up. Also
275 	 * randomly grow the bias range in both directions as we go along. This
276 	 * should give us bias start/end which is not always uniform like above,
277 	 * and in some cases will require the allocator to jump over already
278 	 * allocated nodes in the middle of the address space.
279 	 */
280 
281 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
282 			       "buddy_init failed\n");
283 
284 	bias_start = round_up(prandom_u32_state(&prng) % (mm_size - ps), ps);
285 	bias_end = round_up(bias_start + prandom_u32_state(&prng) % (mm_size - bias_start), ps);
286 	bias_end = max(bias_end, bias_start + ps);
287 	bias_rem = bias_end - bias_start;
288 
289 	do {
290 		u32 size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
291 
292 		KUNIT_ASSERT_FALSE_MSG(test,
293 				       drm_buddy_alloc_blocks(&mm, bias_start,
294 							      bias_end, size, ps,
295 							      &allocated,
296 							      DRM_BUDDY_RANGE_ALLOCATION),
297 				       "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
298 				       bias_start, bias_end, size, ps);
299 		bias_rem -= size;
300 
301 		/*
302 		 * Try to randomly grow the bias range in both directions, or
303 		 * only one, or perhaps don't grow at all.
304 		 */
305 		do {
306 			u32 old_bias_start = bias_start;
307 			u32 old_bias_end = bias_end;
308 
309 			if (bias_start)
310 				bias_start -= round_up(prandom_u32_state(&prng) % bias_start, ps);
311 			if (bias_end != mm_size)
312 				bias_end += round_up(prandom_u32_state(&prng) % (mm_size - bias_end), ps);
313 
314 			bias_rem += old_bias_start - bias_start;
315 			bias_rem += bias_end - old_bias_end;
316 		} while (!bias_rem && (bias_start || bias_end != mm_size));
317 	} while (bias_rem);
318 
319 	KUNIT_ASSERT_EQ(test, bias_start, 0);
320 	KUNIT_ASSERT_EQ(test, bias_end, mm_size);
321 	KUNIT_ASSERT_TRUE_MSG(test,
322 			      drm_buddy_alloc_blocks(&mm, bias_start, bias_end,
323 						     ps, ps,
324 						     &allocated,
325 						     DRM_BUDDY_RANGE_ALLOCATION),
326 			      "buddy_alloc passed with bias(%x-%x), size=%u\n",
327 			      bias_start, bias_end, ps);
328 
329 	drm_buddy_free_list(&mm, &allocated, 0);
330 	drm_buddy_fini(&mm);
331 
332 	/*
333 	 * Allocate cleared blocks in the bias range when the DRM buddy's clear avail is
334 	 * zero. This will validate the bias range allocation in scenarios like system boot
335 	 * when no cleared blocks are available and exercise the fallback path too. The resulting
336 	 * blocks should always be dirty.
337 	 */
338 
339 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
340 			       "buddy_init failed\n");
341 
342 	bias_start = round_up(prandom_u32_state(&prng) % (mm_size - ps), ps);
343 	bias_end = round_up(bias_start + prandom_u32_state(&prng) % (mm_size - bias_start), ps);
344 	bias_end = max(bias_end, bias_start + ps);
345 	bias_rem = bias_end - bias_start;
346 
347 	flags = DRM_BUDDY_CLEAR_ALLOCATION | DRM_BUDDY_RANGE_ALLOCATION;
348 	size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
349 
350 	KUNIT_ASSERT_FALSE_MSG(test,
351 			       drm_buddy_alloc_blocks(&mm, bias_start,
352 						      bias_end, size, ps,
353 						      &allocated,
354 						      flags),
355 			       "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
356 			       bias_start, bias_end, size, ps);
357 
358 	list_for_each_entry(block, &allocated, link)
359 		KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false);
360 
361 	drm_buddy_free_list(&mm, &allocated, 0);
362 	drm_buddy_fini(&mm);
363 }
364 
365 static void drm_test_buddy_alloc_clear(struct kunit *test)
366 {
367 	unsigned long n_pages, total, i = 0;
368 	const unsigned long ps = SZ_4K;
369 	struct drm_buddy_block *block;
370 	const int max_order = 12;
371 	LIST_HEAD(allocated);
372 	struct drm_buddy mm;
373 	unsigned int order;
374 	u32 mm_size, size;
375 	LIST_HEAD(dirty);
376 	LIST_HEAD(clean);
377 
378 	mm_size = SZ_4K << max_order;
379 	KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
380 
381 	KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
382 
383 	/*
384 	 * Idea is to allocate and free some random portion of the address space,
385 	 * returning those pages as non-dirty and randomly alternate between
386 	 * requesting dirty and non-dirty pages (not going over the limit
387 	 * we freed as non-dirty), putting that into two separate lists.
388 	 * Loop over both lists at the end checking that the dirty list
389 	 * is indeed all dirty pages and vice versa. Free it all again,
390 	 * keeping the dirty/clear status.
391 	 */
392 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
393 							    5 * ps, ps, &allocated,
394 							    DRM_BUDDY_TOPDOWN_ALLOCATION),
395 				"buddy_alloc hit an error size=%lu\n", 5 * ps);
396 	drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
397 
398 	n_pages = 10;
399 	do {
400 		unsigned long flags;
401 		struct list_head *list;
402 		int slot = i % 2;
403 
404 		if (slot == 0) {
405 			list = &dirty;
406 			flags = 0;
407 		} else {
408 			list = &clean;
409 			flags = DRM_BUDDY_CLEAR_ALLOCATION;
410 		}
411 
412 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
413 								    ps, ps, list,
414 								    flags),
415 					"buddy_alloc hit an error size=%lu\n", ps);
416 	} while (++i < n_pages);
417 
418 	list_for_each_entry(block, &clean, link)
419 		KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), true);
420 
421 	list_for_each_entry(block, &dirty, link)
422 		KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false);
423 
424 	drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
425 
426 	/*
427 	 * Trying to go over the clear limit for some allocation.
428 	 * The allocation should never fail with reasonable page-size.
429 	 */
430 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
431 							    10 * ps, ps, &clean,
432 							    DRM_BUDDY_CLEAR_ALLOCATION),
433 				"buddy_alloc hit an error size=%lu\n", 10 * ps);
434 
435 	drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
436 	drm_buddy_free_list(&mm, &dirty, 0);
437 	drm_buddy_fini(&mm);
438 
439 	KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
440 
441 	/*
442 	 * Create a new mm. Intentionally fragment the address space by creating
443 	 * two alternating lists. Free both lists, one as dirty the other as clean.
444 	 * Try to allocate double the previous size with matching min_page_size. The
445 	 * allocation should never fail as it calls the force_merge. Also check that
446 	 * the page is always dirty after force_merge. Free the page as dirty, then
447 	 * repeat the whole thing, increment the order until we hit the max_order.
448 	 */
449 
450 	i = 0;
451 	n_pages = mm_size / ps;
452 	do {
453 		struct list_head *list;
454 		int slot = i % 2;
455 
456 		if (slot == 0)
457 			list = &dirty;
458 		else
459 			list = &clean;
460 
461 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
462 								    ps, ps, list, 0),
463 					"buddy_alloc hit an error size=%lu\n", ps);
464 	} while (++i < n_pages);
465 
466 	drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
467 	drm_buddy_free_list(&mm, &dirty, 0);
468 
469 	order = 1;
470 	do {
471 		size = SZ_4K << order;
472 
473 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
474 								    size, size, &allocated,
475 								    DRM_BUDDY_CLEAR_ALLOCATION),
476 					"buddy_alloc hit an error size=%u\n", size);
477 		total = 0;
478 		list_for_each_entry(block, &allocated, link) {
479 			if (size != mm_size)
480 				KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false);
481 			total += drm_buddy_block_size(&mm, block);
482 		}
483 		KUNIT_EXPECT_EQ(test, total, size);
484 
485 		drm_buddy_free_list(&mm, &allocated, 0);
486 	} while (++order <= max_order);
487 
488 	drm_buddy_fini(&mm);
489 
490 	/*
491 	 * Create a new mm with a non power-of-two size. Allocate a random size from each
492 	 * root, free as cleared and then call fini. This will ensure the multi-root
493 	 * force merge during fini.
494 	 */
495 	mm_size = (SZ_4K << max_order) + (SZ_4K << (max_order - 2));
496 
497 	KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
498 	KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
499 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, SZ_4K << max_order,
500 							    4 * ps, ps, &allocated,
501 							    DRM_BUDDY_RANGE_ALLOCATION),
502 				"buddy_alloc hit an error size=%lu\n", 4 * ps);
503 	drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
504 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, SZ_4K << max_order,
505 							    2 * ps, ps, &allocated,
506 							    DRM_BUDDY_CLEAR_ALLOCATION),
507 				"buddy_alloc hit an error size=%lu\n", 2 * ps);
508 	drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
509 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, SZ_4K << max_order, mm_size,
510 							    ps, ps, &allocated,
511 							    DRM_BUDDY_RANGE_ALLOCATION),
512 				"buddy_alloc hit an error size=%lu\n", ps);
513 	drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
514 	drm_buddy_fini(&mm);
515 }
516 
517 static void drm_test_buddy_alloc_contiguous(struct kunit *test)
518 {
519 	const unsigned long ps = SZ_4K, mm_size = 16 * 3 * SZ_4K;
520 	unsigned long i, n_pages, total;
521 	struct drm_buddy_block *block;
522 	struct drm_buddy mm;
523 	LIST_HEAD(left);
524 	LIST_HEAD(middle);
525 	LIST_HEAD(right);
526 	LIST_HEAD(allocated);
527 
528 	KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
529 
530 	/*
531 	 * Idea is to fragment the address space by alternating block
532 	 * allocations between three different lists; one for left, middle and
533 	 * right. We can then free a list to simulate fragmentation. In
534 	 * particular we want to exercise the DRM_BUDDY_CONTIGUOUS_ALLOCATION,
535 	 * including the try_harder path.
536 	 */
537 
538 	i = 0;
539 	n_pages = mm_size / ps;
540 	do {
541 		struct list_head *list;
542 		int slot = i % 3;
543 
544 		if (slot == 0)
545 			list = &left;
546 		else if (slot == 1)
547 			list = &middle;
548 		else
549 			list = &right;
550 		KUNIT_ASSERT_FALSE_MSG(test,
551 				       drm_buddy_alloc_blocks(&mm, 0, mm_size,
552 							      ps, ps, list, 0),
553 				       "buddy_alloc hit an error size=%lu\n",
554 				       ps);
555 	} while (++i < n_pages);
556 
557 	KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
558 							   3 * ps, ps, &allocated,
559 							   DRM_BUDDY_CONTIGUOUS_ALLOCATION),
560 			       "buddy_alloc didn't error size=%lu\n", 3 * ps);
561 
562 	drm_buddy_free_list(&mm, &middle, 0);
563 	KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
564 							   3 * ps, ps, &allocated,
565 							   DRM_BUDDY_CONTIGUOUS_ALLOCATION),
566 			       "buddy_alloc didn't error size=%lu\n", 3 * ps);
567 	KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
568 							   2 * ps, ps, &allocated,
569 							   DRM_BUDDY_CONTIGUOUS_ALLOCATION),
570 			       "buddy_alloc didn't error size=%lu\n", 2 * ps);
571 
572 	drm_buddy_free_list(&mm, &right, 0);
573 	KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
574 							   3 * ps, ps, &allocated,
575 							   DRM_BUDDY_CONTIGUOUS_ALLOCATION),
576 			       "buddy_alloc didn't error size=%lu\n", 3 * ps);
577 	/*
578 	 * At this point we should have enough contiguous space for 2 blocks,
579 	 * however they are never buddies (since we freed middle and right) so
580 	 * will require the try_harder logic to find them.
581 	 */
582 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
583 							    2 * ps, ps, &allocated,
584 							    DRM_BUDDY_CONTIGUOUS_ALLOCATION),
585 			       "buddy_alloc hit an error size=%lu\n", 2 * ps);
586 
587 	drm_buddy_free_list(&mm, &left, 0);
588 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
589 							    3 * ps, ps, &allocated,
590 							    DRM_BUDDY_CONTIGUOUS_ALLOCATION),
591 			       "buddy_alloc hit an error size=%lu\n", 3 * ps);
592 
593 	total = 0;
594 	list_for_each_entry(block, &allocated, link)
595 		total += drm_buddy_block_size(&mm, block);
596 
597 	KUNIT_ASSERT_EQ(test, total, ps * 2 + ps * 3);
598 
599 	drm_buddy_free_list(&mm, &allocated, 0);
600 	drm_buddy_fini(&mm);
601 }
602 
603 static void drm_test_buddy_alloc_pathological(struct kunit *test)
604 {
605 	u64 mm_size, size, start = 0;
606 	struct drm_buddy_block *block;
607 	const int max_order = 3;
608 	unsigned long flags = 0;
609 	int order, top;
610 	struct drm_buddy mm;
611 	LIST_HEAD(blocks);
612 	LIST_HEAD(holes);
613 	LIST_HEAD(tmp);
614 
615 	/*
616 	 * Create a pot-sized mm, then allocate one of each possible
617 	 * order within. This should leave the mm with exactly one
618 	 * page left. Free the largest block, then whittle down again.
619 	 * Eventually we will have a fully 50% fragmented mm.
620 	 */
621 
622 	mm_size = SZ_4K << max_order;
623 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
624 			       "buddy_init failed\n");
625 
626 	KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
627 
628 	for (top = max_order; top; top--) {
629 		/* Make room by freeing the largest allocated block */
630 		block = list_first_entry_or_null(&blocks, typeof(*block), link);
631 		if (block) {
632 			list_del(&block->link);
633 			drm_buddy_free_block(&mm, block);
634 		}
635 
636 		for (order = top; order--;) {
637 			size = get_size(order, mm.chunk_size);
638 			KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start,
639 									    mm_size, size, size,
640 										&tmp, flags),
641 					"buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
642 					order, top);
643 
644 			block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
645 			KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
646 
647 			list_move_tail(&block->link, &blocks);
648 		}
649 
650 		/* There should be one final page for this sub-allocation */
651 		size = get_size(0, mm.chunk_size);
652 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
653 								    size, size, &tmp, flags),
654 							   "buddy_alloc hit -ENOMEM for hole\n");
655 
656 		block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
657 		KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
658 
659 		list_move_tail(&block->link, &holes);
660 
661 		size = get_size(top, mm.chunk_size);
662 		KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
663 								   size, size, &tmp, flags),
664 							  "buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
665 							  top, max_order);
666 	}
667 
668 	drm_buddy_free_list(&mm, &holes, 0);
669 
670 	/* Nothing larger than blocks of chunk_size now available */
671 	for (order = 1; order <= max_order; order++) {
672 		size = get_size(order, mm.chunk_size);
673 		KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
674 								   size, size, &tmp, flags),
675 							  "buddy_alloc unexpectedly succeeded at order %d, it should be full!",
676 							  order);
677 	}
678 
679 	list_splice_tail(&holes, &blocks);
680 	drm_buddy_free_list(&mm, &blocks, 0);
681 	drm_buddy_fini(&mm);
682 }
683 
684 static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
685 {
686 	u64 mm_size, size, start = 0;
687 	struct drm_buddy_block *block, *bn;
688 	const unsigned int max_order = 16;
689 	unsigned long flags = 0;
690 	struct drm_buddy mm;
691 	unsigned int order;
692 	LIST_HEAD(blocks);
693 	LIST_HEAD(tmp);
694 
695 	/*
696 	 * Create a pot-sized mm, then allocate one of each possible
697 	 * order within. This should leave the mm with exactly one
698 	 * page left.
699 	 */
700 
701 	mm_size = SZ_4K << max_order;
702 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
703 			       "buddy_init failed\n");
704 
705 	KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
706 
707 	for (order = 0; order < max_order; order++) {
708 		size = get_size(order, mm.chunk_size);
709 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
710 								    size, size, &tmp, flags),
711 							   "buddy_alloc hit -ENOMEM with order=%d\n",
712 							   order);
713 
714 		block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
715 		KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
716 
717 		list_move_tail(&block->link, &blocks);
718 	}
719 
720 	/* And now the last remaining block available */
721 	size = get_size(0, mm.chunk_size);
722 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
723 							    size, size, &tmp, flags),
724 						   "buddy_alloc hit -ENOMEM on final alloc\n");
725 
726 	block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
727 	KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
728 
729 	list_move_tail(&block->link, &blocks);
730 
731 	/* Should be completely full! */
732 	for (order = max_order; order--;) {
733 		size = get_size(order, mm.chunk_size);
734 		KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
735 								   size, size, &tmp, flags),
736 							  "buddy_alloc unexpectedly succeeded, it should be full!");
737 	}
738 
739 	block = list_last_entry(&blocks, typeof(*block), link);
740 	list_del(&block->link);
741 	drm_buddy_free_block(&mm, block);
742 
743 	/* As we free in increasing size, we make available larger blocks */
744 	order = 1;
745 	list_for_each_entry_safe(block, bn, &blocks, link) {
746 		list_del(&block->link);
747 		drm_buddy_free_block(&mm, block);
748 
749 		size = get_size(order, mm.chunk_size);
750 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
751 								    size, size, &tmp, flags),
752 							   "buddy_alloc hit -ENOMEM with order=%d\n",
753 							   order);
754 
755 		block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
756 		KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
757 
758 		list_del(&block->link);
759 		drm_buddy_free_block(&mm, block);
760 		order++;
761 	}
762 
763 	/* To confirm, now the whole mm should be available */
764 	size = get_size(max_order, mm.chunk_size);
765 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
766 							    size, size, &tmp, flags),
767 						   "buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
768 						   max_order);
769 
770 	block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
771 	KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
772 
773 	list_del(&block->link);
774 	drm_buddy_free_block(&mm, block);
775 	drm_buddy_free_list(&mm, &blocks, 0);
776 	drm_buddy_fini(&mm);
777 }
778 
779 static void drm_test_buddy_alloc_optimistic(struct kunit *test)
780 {
781 	u64 mm_size, size, start = 0;
782 	struct drm_buddy_block *block;
783 	unsigned long flags = 0;
784 	const int max_order = 16;
785 	struct drm_buddy mm;
786 	LIST_HEAD(blocks);
787 	LIST_HEAD(tmp);
788 	int order;
789 
790 	/*
791 	 * Create a mm with one block of each order available, and
792 	 * try to allocate them all.
793 	 */
794 
795 	mm_size = SZ_4K * ((1 << (max_order + 1)) - 1);
796 
797 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
798 			       "buddy_init failed\n");
799 
800 	KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
801 
802 	for (order = 0; order <= max_order; order++) {
803 		size = get_size(order, mm.chunk_size);
804 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
805 								    size, size, &tmp, flags),
806 							   "buddy_alloc hit -ENOMEM with order=%d\n",
807 							   order);
808 
809 		block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
810 		KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
811 
812 		list_move_tail(&block->link, &blocks);
813 	}
814 
815 	/* Should be completely full! */
816 	size = get_size(0, mm.chunk_size);
817 	KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
818 							   size, size, &tmp, flags),
819 						  "buddy_alloc unexpectedly succeeded, it should be full!");
820 
821 	drm_buddy_free_list(&mm, &blocks, 0);
822 	drm_buddy_fini(&mm);
823 }
824 
825 static void drm_test_buddy_alloc_limit(struct kunit *test)
826 {
827 	u64 size = U64_MAX, start = 0;
828 	struct drm_buddy_block *block;
829 	unsigned long flags = 0;
830 	LIST_HEAD(allocated);
831 	struct drm_buddy mm;
832 
833 	KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, SZ_4K));
834 
835 	KUNIT_EXPECT_EQ_MSG(test, mm.max_order, DRM_BUDDY_MAX_ORDER,
836 			    "mm.max_order(%d) != %d\n", mm.max_order,
837 						DRM_BUDDY_MAX_ORDER);
838 
839 	size = mm.chunk_size << mm.max_order;
840 	KUNIT_EXPECT_FALSE(test, drm_buddy_alloc_blocks(&mm, start, size, size,
841 							mm.chunk_size, &allocated, flags));
842 
843 	block = list_first_entry_or_null(&allocated, struct drm_buddy_block, link);
844 	KUNIT_EXPECT_TRUE(test, block);
845 
846 	KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_order(block), mm.max_order,
847 			    "block order(%d) != %d\n",
848 						drm_buddy_block_order(block), mm.max_order);
849 
850 	KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_size(&mm, block),
851 			    BIT_ULL(mm.max_order) * mm.chunk_size,
852 						"block size(%llu) != %llu\n",
853 						drm_buddy_block_size(&mm, block),
854 						BIT_ULL(mm.max_order) * mm.chunk_size);
855 
856 	drm_buddy_free_list(&mm, &allocated, 0);
857 	drm_buddy_fini(&mm);
858 }
859 
860 static int drm_buddy_suite_init(struct kunit_suite *suite)
861 {
862 	while (!random_seed)
863 		random_seed = get_random_u32();
864 
865 	kunit_info(suite, "Testing DRM buddy manager, with random_seed=0x%x\n",
866 		   random_seed);
867 
868 	return 0;
869 }
870 
871 static struct kunit_case drm_buddy_tests[] = {
872 	KUNIT_CASE(drm_test_buddy_alloc_limit),
873 	KUNIT_CASE(drm_test_buddy_alloc_optimistic),
874 	KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
875 	KUNIT_CASE(drm_test_buddy_alloc_pathological),
876 	KUNIT_CASE(drm_test_buddy_alloc_contiguous),
877 	KUNIT_CASE(drm_test_buddy_alloc_clear),
878 	KUNIT_CASE(drm_test_buddy_alloc_range_bias),
879 	KUNIT_CASE(drm_test_buddy_fragmentation_performance),
880 	{}
881 };
882 
883 static struct kunit_suite drm_buddy_test_suite = {
884 	.name = "drm_buddy",
885 	.suite_init = drm_buddy_suite_init,
886 	.test_cases = drm_buddy_tests,
887 };
888 
889 kunit_test_suite(drm_buddy_test_suite);
890 
891 MODULE_AUTHOR("Intel Corporation");
892 MODULE_DESCRIPTION("Kunit test for drm_buddy functions");
893 MODULE_LICENSE("GPL");
894