xref: /linux/drivers/gpu/tests/gpu_buddy_test.c (revision 4a9671a03f2be13acde0cb15c5208767a9cc56e4)
1*4a9671a0SJoel Fernandes // SPDX-License-Identifier: MIT
2*4a9671a0SJoel Fernandes /*
3*4a9671a0SJoel Fernandes  * Copyright © 2019 Intel Corporation
4*4a9671a0SJoel Fernandes  * Copyright © 2022 Maíra Canal <mairacanal@riseup.net>
5*4a9671a0SJoel Fernandes  */
6*4a9671a0SJoel Fernandes 
7*4a9671a0SJoel Fernandes #include <kunit/test.h>
8*4a9671a0SJoel Fernandes 
9*4a9671a0SJoel Fernandes #include <linux/prime_numbers.h>
10*4a9671a0SJoel Fernandes #include <linux/sched/signal.h>
11*4a9671a0SJoel Fernandes #include <linux/sizes.h>
12*4a9671a0SJoel Fernandes 
13*4a9671a0SJoel Fernandes #include <linux/gpu_buddy.h>
14*4a9671a0SJoel Fernandes 
15*4a9671a0SJoel Fernandes #include "gpu_random.h"
16*4a9671a0SJoel Fernandes 
17*4a9671a0SJoel Fernandes static unsigned int random_seed;
18*4a9671a0SJoel Fernandes 
19*4a9671a0SJoel Fernandes static inline u64 get_size(int order, u64 chunk_size)
20*4a9671a0SJoel Fernandes {
21*4a9671a0SJoel Fernandes 	return (1 << order) * chunk_size;
22*4a9671a0SJoel Fernandes }
23*4a9671a0SJoel Fernandes 
24*4a9671a0SJoel Fernandes static void drm_test_buddy_fragmentation_performance(struct kunit *test)
25*4a9671a0SJoel Fernandes {
26*4a9671a0SJoel Fernandes 	struct drm_buddy_block *block, *tmp;
27*4a9671a0SJoel Fernandes 	int num_blocks, i, ret, count = 0;
28*4a9671a0SJoel Fernandes 	LIST_HEAD(allocated_blocks);
29*4a9671a0SJoel Fernandes 	unsigned long elapsed_ms;
30*4a9671a0SJoel Fernandes 	LIST_HEAD(reverse_list);
31*4a9671a0SJoel Fernandes 	LIST_HEAD(test_blocks);
32*4a9671a0SJoel Fernandes 	LIST_HEAD(clear_list);
33*4a9671a0SJoel Fernandes 	LIST_HEAD(dirty_list);
34*4a9671a0SJoel Fernandes 	LIST_HEAD(free_list);
35*4a9671a0SJoel Fernandes 	struct drm_buddy mm;
36*4a9671a0SJoel Fernandes 	u64 mm_size = SZ_4G;
37*4a9671a0SJoel Fernandes 	ktime_t start, end;
38*4a9671a0SJoel Fernandes 
39*4a9671a0SJoel Fernandes 	/*
40*4a9671a0SJoel Fernandes 	 * Allocation under severe fragmentation
41*4a9671a0SJoel Fernandes 	 *
42*4a9671a0SJoel Fernandes 	 * Create severe fragmentation by allocating the entire 4 GiB address space
43*4a9671a0SJoel Fernandes 	 * as tiny 8 KiB blocks but forcing a 64 KiB alignment. The resulting pattern
44*4a9671a0SJoel Fernandes 	 * leaves many scattered holes. Split the allocations into two groups and
45*4a9671a0SJoel Fernandes 	 * return them with different flags to block coalescing, then repeatedly
46*4a9671a0SJoel Fernandes 	 * allocate and free 64 KiB blocks while timing the loop. This stresses how
47*4a9671a0SJoel Fernandes 	 * quickly the allocator can satisfy larger, aligned requests from a pool of
48*4a9671a0SJoel Fernandes 	 * highly fragmented space.
49*4a9671a0SJoel Fernandes 	 */
50*4a9671a0SJoel Fernandes 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
51*4a9671a0SJoel Fernandes 			       "buddy_init failed\n");
52*4a9671a0SJoel Fernandes 
53*4a9671a0SJoel Fernandes 	num_blocks = mm_size / SZ_64K;
54*4a9671a0SJoel Fernandes 
55*4a9671a0SJoel Fernandes 	start = ktime_get();
56*4a9671a0SJoel Fernandes 	/* Allocate with maximum fragmentation - 8K blocks with 64K alignment */
57*4a9671a0SJoel Fernandes 	for (i = 0; i < num_blocks; i++)
58*4a9671a0SJoel Fernandes 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, SZ_8K, SZ_64K,
59*4a9671a0SJoel Fernandes 								    &allocated_blocks, 0),
60*4a9671a0SJoel Fernandes 					"buddy_alloc hit an error size=%u\n", SZ_8K);
61*4a9671a0SJoel Fernandes 
62*4a9671a0SJoel Fernandes 	list_for_each_entry_safe(block, tmp, &allocated_blocks, link) {
63*4a9671a0SJoel Fernandes 		if (count % 4 == 0 || count % 4 == 3)
64*4a9671a0SJoel Fernandes 			list_move_tail(&block->link, &clear_list);
65*4a9671a0SJoel Fernandes 		else
66*4a9671a0SJoel Fernandes 			list_move_tail(&block->link, &dirty_list);
67*4a9671a0SJoel Fernandes 		count++;
68*4a9671a0SJoel Fernandes 	}
69*4a9671a0SJoel Fernandes 
70*4a9671a0SJoel Fernandes 	/* Free with different flags to ensure no coalescing */
71*4a9671a0SJoel Fernandes 	drm_buddy_free_list(&mm, &clear_list, DRM_BUDDY_CLEARED);
72*4a9671a0SJoel Fernandes 	drm_buddy_free_list(&mm, &dirty_list, 0);
73*4a9671a0SJoel Fernandes 
74*4a9671a0SJoel Fernandes 	for (i = 0; i < num_blocks; i++)
75*4a9671a0SJoel Fernandes 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, SZ_64K, SZ_64K,
76*4a9671a0SJoel Fernandes 								    &test_blocks, 0),
77*4a9671a0SJoel Fernandes 					"buddy_alloc hit an error size=%u\n", SZ_64K);
78*4a9671a0SJoel Fernandes 	drm_buddy_free_list(&mm, &test_blocks, 0);
79*4a9671a0SJoel Fernandes 
80*4a9671a0SJoel Fernandes 	end = ktime_get();
81*4a9671a0SJoel Fernandes 	elapsed_ms = ktime_to_ms(ktime_sub(end, start));
82*4a9671a0SJoel Fernandes 
83*4a9671a0SJoel Fernandes 	kunit_info(test, "Fragmented allocation took %lu ms\n", elapsed_ms);
84*4a9671a0SJoel Fernandes 
85*4a9671a0SJoel Fernandes 	drm_buddy_fini(&mm);
86*4a9671a0SJoel Fernandes 
87*4a9671a0SJoel Fernandes 	/*
88*4a9671a0SJoel Fernandes 	 * Reverse free order under fragmentation
89*4a9671a0SJoel Fernandes 	 *
90*4a9671a0SJoel Fernandes 	 * Construct a fragmented 4 GiB space by allocating every 8 KiB block with
91*4a9671a0SJoel Fernandes 	 * 64 KiB alignment, creating a dense scatter of small regions. Half of the
92*4a9671a0SJoel Fernandes 	 * blocks are selectively freed to form sparse gaps, while the remaining
93*4a9671a0SJoel Fernandes 	 * allocations are preserved, reordered in reverse, and released back with
94*4a9671a0SJoel Fernandes 	 * the cleared flag. This models a pathological reverse-ordered free pattern
95*4a9671a0SJoel Fernandes 	 * and measures how quickly the allocator can merge and reclaim space when
96*4a9671a0SJoel Fernandes 	 * deallocation occurs in the opposite order of allocation, exposing the
97*4a9671a0SJoel Fernandes 	 * cost difference between a linear freelist scan and an ordered tree lookup.
98*4a9671a0SJoel Fernandes 	 */
99*4a9671a0SJoel Fernandes 	ret = drm_buddy_init(&mm, mm_size, SZ_4K);
100*4a9671a0SJoel Fernandes 	KUNIT_ASSERT_EQ(test, ret, 0);
101*4a9671a0SJoel Fernandes 
102*4a9671a0SJoel Fernandes 	start = ktime_get();
103*4a9671a0SJoel Fernandes 	/* Allocate maximum fragmentation */
104*4a9671a0SJoel Fernandes 	for (i = 0; i < num_blocks; i++)
105*4a9671a0SJoel Fernandes 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, SZ_8K, SZ_64K,
106*4a9671a0SJoel Fernandes 								    &allocated_blocks, 0),
107*4a9671a0SJoel Fernandes 					"buddy_alloc hit an error size=%u\n", SZ_8K);
108*4a9671a0SJoel Fernandes 
109*4a9671a0SJoel Fernandes 	list_for_each_entry_safe(block, tmp, &allocated_blocks, link) {
110*4a9671a0SJoel Fernandes 		if (count % 2 == 0)
111*4a9671a0SJoel Fernandes 			list_move_tail(&block->link, &free_list);
112*4a9671a0SJoel Fernandes 		count++;
113*4a9671a0SJoel Fernandes 	}
114*4a9671a0SJoel Fernandes 	drm_buddy_free_list(&mm, &free_list, DRM_BUDDY_CLEARED);
115*4a9671a0SJoel Fernandes 
116*4a9671a0SJoel Fernandes 	list_for_each_entry_safe_reverse(block, tmp, &allocated_blocks, link)
117*4a9671a0SJoel Fernandes 		list_move(&block->link, &reverse_list);
118*4a9671a0SJoel Fernandes 	drm_buddy_free_list(&mm, &reverse_list, DRM_BUDDY_CLEARED);
119*4a9671a0SJoel Fernandes 
120*4a9671a0SJoel Fernandes 	end = ktime_get();
121*4a9671a0SJoel Fernandes 	elapsed_ms = ktime_to_ms(ktime_sub(end, start));
122*4a9671a0SJoel Fernandes 
123*4a9671a0SJoel Fernandes 	kunit_info(test, "Reverse-ordered free took %lu ms\n", elapsed_ms);
124*4a9671a0SJoel Fernandes 
125*4a9671a0SJoel Fernandes 	drm_buddy_fini(&mm);
126*4a9671a0SJoel Fernandes }
127*4a9671a0SJoel Fernandes 
128*4a9671a0SJoel Fernandes static void drm_test_buddy_alloc_range_bias(struct kunit *test)
129*4a9671a0SJoel Fernandes {
130*4a9671a0SJoel Fernandes 	u32 mm_size, size, ps, bias_size, bias_start, bias_end, bias_rem;
131*4a9671a0SJoel Fernandes 	DRM_RND_STATE(prng, random_seed);
132*4a9671a0SJoel Fernandes 	unsigned int i, count, *order;
133*4a9671a0SJoel Fernandes 	struct drm_buddy_block *block;
134*4a9671a0SJoel Fernandes 	unsigned long flags;
135*4a9671a0SJoel Fernandes 	struct drm_buddy mm;
136*4a9671a0SJoel Fernandes 	LIST_HEAD(allocated);
137*4a9671a0SJoel Fernandes 
138*4a9671a0SJoel Fernandes 	bias_size = SZ_1M;
139*4a9671a0SJoel Fernandes 	ps = roundup_pow_of_two(prandom_u32_state(&prng) % bias_size);
140*4a9671a0SJoel Fernandes 	ps = max(SZ_4K, ps);
141*4a9671a0SJoel Fernandes 	mm_size = (SZ_8M-1) & ~(ps-1); /* Multiple roots */
142*4a9671a0SJoel Fernandes 
143*4a9671a0SJoel Fernandes 	kunit_info(test, "mm_size=%u, ps=%u\n", mm_size, ps);
144*4a9671a0SJoel Fernandes 
145*4a9671a0SJoel Fernandes 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
146*4a9671a0SJoel Fernandes 			       "buddy_init failed\n");
147*4a9671a0SJoel Fernandes 
148*4a9671a0SJoel Fernandes 	count = mm_size / bias_size;
149*4a9671a0SJoel Fernandes 	order = drm_random_order(count, &prng);
150*4a9671a0SJoel Fernandes 	KUNIT_EXPECT_TRUE(test, order);
151*4a9671a0SJoel Fernandes 
152*4a9671a0SJoel Fernandes 	/*
153*4a9671a0SJoel Fernandes 	 * Idea is to split the address space into uniform bias ranges, and then
154*4a9671a0SJoel Fernandes 	 * in some random order allocate within each bias, using various
155*4a9671a0SJoel Fernandes 	 * patterns within. This should detect if allocations leak out from a
156*4a9671a0SJoel Fernandes 	 * given bias, for example.
157*4a9671a0SJoel Fernandes 	 */
158*4a9671a0SJoel Fernandes 
159*4a9671a0SJoel Fernandes 	for (i = 0; i < count; i++) {
160*4a9671a0SJoel Fernandes 		LIST_HEAD(tmp);
161*4a9671a0SJoel Fernandes 		u32 size;
162*4a9671a0SJoel Fernandes 
163*4a9671a0SJoel Fernandes 		bias_start = order[i] * bias_size;
164*4a9671a0SJoel Fernandes 		bias_end = bias_start + bias_size;
165*4a9671a0SJoel Fernandes 		bias_rem = bias_size;
166*4a9671a0SJoel Fernandes 
167*4a9671a0SJoel Fernandes 		/* internal round_up too big */
168*4a9671a0SJoel Fernandes 		KUNIT_ASSERT_TRUE_MSG(test,
169*4a9671a0SJoel Fernandes 				      drm_buddy_alloc_blocks(&mm, bias_start,
170*4a9671a0SJoel Fernandes 							     bias_end, bias_size + ps, bias_size,
171*4a9671a0SJoel Fernandes 							     &allocated,
172*4a9671a0SJoel Fernandes 							     DRM_BUDDY_RANGE_ALLOCATION),
173*4a9671a0SJoel Fernandes 				      "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
174*4a9671a0SJoel Fernandes 				      bias_start, bias_end, bias_size, bias_size);
175*4a9671a0SJoel Fernandes 
176*4a9671a0SJoel Fernandes 		/* size too big */
177*4a9671a0SJoel Fernandes 		KUNIT_ASSERT_TRUE_MSG(test,
178*4a9671a0SJoel Fernandes 				      drm_buddy_alloc_blocks(&mm, bias_start,
179*4a9671a0SJoel Fernandes 							     bias_end, bias_size + ps, ps,
180*4a9671a0SJoel Fernandes 							     &allocated,
181*4a9671a0SJoel Fernandes 							     DRM_BUDDY_RANGE_ALLOCATION),
182*4a9671a0SJoel Fernandes 				      "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
183*4a9671a0SJoel Fernandes 				      bias_start, bias_end, bias_size + ps, ps);
184*4a9671a0SJoel Fernandes 
185*4a9671a0SJoel Fernandes 		/* bias range too small for size */
186*4a9671a0SJoel Fernandes 		KUNIT_ASSERT_TRUE_MSG(test,
187*4a9671a0SJoel Fernandes 				      drm_buddy_alloc_blocks(&mm, bias_start + ps,
188*4a9671a0SJoel Fernandes 							     bias_end, bias_size, ps,
189*4a9671a0SJoel Fernandes 							     &allocated,
190*4a9671a0SJoel Fernandes 							     DRM_BUDDY_RANGE_ALLOCATION),
191*4a9671a0SJoel Fernandes 				      "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
192*4a9671a0SJoel Fernandes 				      bias_start + ps, bias_end, bias_size, ps);
193*4a9671a0SJoel Fernandes 
194*4a9671a0SJoel Fernandes 		/* bias misaligned */
195*4a9671a0SJoel Fernandes 		KUNIT_ASSERT_TRUE_MSG(test,
196*4a9671a0SJoel Fernandes 				      drm_buddy_alloc_blocks(&mm, bias_start + ps,
197*4a9671a0SJoel Fernandes 							     bias_end - ps,
198*4a9671a0SJoel Fernandes 							     bias_size >> 1, bias_size >> 1,
199*4a9671a0SJoel Fernandes 							     &allocated,
200*4a9671a0SJoel Fernandes 							     DRM_BUDDY_RANGE_ALLOCATION),
201*4a9671a0SJoel Fernandes 				      "buddy_alloc h didn't fail with bias(%x-%x), size=%u, ps=%u\n",
202*4a9671a0SJoel Fernandes 				      bias_start + ps, bias_end - ps, bias_size >> 1, bias_size >> 1);
203*4a9671a0SJoel Fernandes 
204*4a9671a0SJoel Fernandes 		/* single big page */
205*4a9671a0SJoel Fernandes 		KUNIT_ASSERT_FALSE_MSG(test,
206*4a9671a0SJoel Fernandes 				       drm_buddy_alloc_blocks(&mm, bias_start,
207*4a9671a0SJoel Fernandes 							      bias_end, bias_size, bias_size,
208*4a9671a0SJoel Fernandes 							      &tmp,
209*4a9671a0SJoel Fernandes 							      DRM_BUDDY_RANGE_ALLOCATION),
210*4a9671a0SJoel Fernandes 				       "buddy_alloc i failed with bias(%x-%x), size=%u, ps=%u\n",
211*4a9671a0SJoel Fernandes 				       bias_start, bias_end, bias_size, bias_size);
212*4a9671a0SJoel Fernandes 		drm_buddy_free_list(&mm, &tmp, 0);
213*4a9671a0SJoel Fernandes 
214*4a9671a0SJoel Fernandes 		/* single page with internal round_up */
215*4a9671a0SJoel Fernandes 		KUNIT_ASSERT_FALSE_MSG(test,
216*4a9671a0SJoel Fernandes 				       drm_buddy_alloc_blocks(&mm, bias_start,
217*4a9671a0SJoel Fernandes 							      bias_end, ps, bias_size,
218*4a9671a0SJoel Fernandes 							      &tmp,
219*4a9671a0SJoel Fernandes 							      DRM_BUDDY_RANGE_ALLOCATION),
220*4a9671a0SJoel Fernandes 				       "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
221*4a9671a0SJoel Fernandes 				       bias_start, bias_end, ps, bias_size);
222*4a9671a0SJoel Fernandes 		drm_buddy_free_list(&mm, &tmp, 0);
223*4a9671a0SJoel Fernandes 
224*4a9671a0SJoel Fernandes 		/* random size within */
225*4a9671a0SJoel Fernandes 		size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
226*4a9671a0SJoel Fernandes 		if (size)
227*4a9671a0SJoel Fernandes 			KUNIT_ASSERT_FALSE_MSG(test,
228*4a9671a0SJoel Fernandes 					       drm_buddy_alloc_blocks(&mm, bias_start,
229*4a9671a0SJoel Fernandes 								      bias_end, size, ps,
230*4a9671a0SJoel Fernandes 								      &tmp,
231*4a9671a0SJoel Fernandes 								      DRM_BUDDY_RANGE_ALLOCATION),
232*4a9671a0SJoel Fernandes 					       "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
233*4a9671a0SJoel Fernandes 					       bias_start, bias_end, size, ps);
234*4a9671a0SJoel Fernandes 
235*4a9671a0SJoel Fernandes 		bias_rem -= size;
236*4a9671a0SJoel Fernandes 		/* too big for current avail */
237*4a9671a0SJoel Fernandes 		KUNIT_ASSERT_TRUE_MSG(test,
238*4a9671a0SJoel Fernandes 				      drm_buddy_alloc_blocks(&mm, bias_start,
239*4a9671a0SJoel Fernandes 							     bias_end, bias_rem + ps, ps,
240*4a9671a0SJoel Fernandes 							     &allocated,
241*4a9671a0SJoel Fernandes 							     DRM_BUDDY_RANGE_ALLOCATION),
242*4a9671a0SJoel Fernandes 				      "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
243*4a9671a0SJoel Fernandes 				      bias_start, bias_end, bias_rem + ps, ps);
244*4a9671a0SJoel Fernandes 
245*4a9671a0SJoel Fernandes 		if (bias_rem) {
246*4a9671a0SJoel Fernandes 			/* random fill of the remainder */
247*4a9671a0SJoel Fernandes 			size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
248*4a9671a0SJoel Fernandes 			size = max(size, ps);
249*4a9671a0SJoel Fernandes 
250*4a9671a0SJoel Fernandes 			KUNIT_ASSERT_FALSE_MSG(test,
251*4a9671a0SJoel Fernandes 					       drm_buddy_alloc_blocks(&mm, bias_start,
252*4a9671a0SJoel Fernandes 								      bias_end, size, ps,
253*4a9671a0SJoel Fernandes 								      &allocated,
254*4a9671a0SJoel Fernandes 								      DRM_BUDDY_RANGE_ALLOCATION),
255*4a9671a0SJoel Fernandes 					       "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
256*4a9671a0SJoel Fernandes 					       bias_start, bias_end, size, ps);
257*4a9671a0SJoel Fernandes 			/*
258*4a9671a0SJoel Fernandes 			 * Intentionally allow some space to be left
259*4a9671a0SJoel Fernandes 			 * unallocated, and ideally not always on the bias
260*4a9671a0SJoel Fernandes 			 * boundaries.
261*4a9671a0SJoel Fernandes 			 */
262*4a9671a0SJoel Fernandes 			drm_buddy_free_list(&mm, &tmp, 0);
263*4a9671a0SJoel Fernandes 		} else {
264*4a9671a0SJoel Fernandes 			list_splice_tail(&tmp, &allocated);
265*4a9671a0SJoel Fernandes 		}
266*4a9671a0SJoel Fernandes 	}
267*4a9671a0SJoel Fernandes 
268*4a9671a0SJoel Fernandes 	kfree(order);
269*4a9671a0SJoel Fernandes 	drm_buddy_free_list(&mm, &allocated, 0);
270*4a9671a0SJoel Fernandes 	drm_buddy_fini(&mm);
271*4a9671a0SJoel Fernandes 
272*4a9671a0SJoel Fernandes 	/*
273*4a9671a0SJoel Fernandes 	 * Something more free-form. Idea is to pick a random starting bias
274*4a9671a0SJoel Fernandes 	 * range within the address space and then start filling it up. Also
275*4a9671a0SJoel Fernandes 	 * randomly grow the bias range in both directions as we go along. This
276*4a9671a0SJoel Fernandes 	 * should give us bias start/end which is not always uniform like above,
277*4a9671a0SJoel Fernandes 	 * and in some cases will require the allocator to jump over already
278*4a9671a0SJoel Fernandes 	 * allocated nodes in the middle of the address space.
279*4a9671a0SJoel Fernandes 	 */
280*4a9671a0SJoel Fernandes 
281*4a9671a0SJoel Fernandes 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
282*4a9671a0SJoel Fernandes 			       "buddy_init failed\n");
283*4a9671a0SJoel Fernandes 
284*4a9671a0SJoel Fernandes 	bias_start = round_up(prandom_u32_state(&prng) % (mm_size - ps), ps);
285*4a9671a0SJoel Fernandes 	bias_end = round_up(bias_start + prandom_u32_state(&prng) % (mm_size - bias_start), ps);
286*4a9671a0SJoel Fernandes 	bias_end = max(bias_end, bias_start + ps);
287*4a9671a0SJoel Fernandes 	bias_rem = bias_end - bias_start;
288*4a9671a0SJoel Fernandes 
289*4a9671a0SJoel Fernandes 	do {
290*4a9671a0SJoel Fernandes 		u32 size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
291*4a9671a0SJoel Fernandes 
292*4a9671a0SJoel Fernandes 		KUNIT_ASSERT_FALSE_MSG(test,
293*4a9671a0SJoel Fernandes 				       drm_buddy_alloc_blocks(&mm, bias_start,
294*4a9671a0SJoel Fernandes 							      bias_end, size, ps,
295*4a9671a0SJoel Fernandes 							      &allocated,
296*4a9671a0SJoel Fernandes 							      DRM_BUDDY_RANGE_ALLOCATION),
297*4a9671a0SJoel Fernandes 				       "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
298*4a9671a0SJoel Fernandes 				       bias_start, bias_end, size, ps);
299*4a9671a0SJoel Fernandes 		bias_rem -= size;
300*4a9671a0SJoel Fernandes 
301*4a9671a0SJoel Fernandes 		/*
302*4a9671a0SJoel Fernandes 		 * Try to randomly grow the bias range in both directions, or
303*4a9671a0SJoel Fernandes 		 * only one, or perhaps don't grow at all.
304*4a9671a0SJoel Fernandes 		 */
305*4a9671a0SJoel Fernandes 		do {
306*4a9671a0SJoel Fernandes 			u32 old_bias_start = bias_start;
307*4a9671a0SJoel Fernandes 			u32 old_bias_end = bias_end;
308*4a9671a0SJoel Fernandes 
309*4a9671a0SJoel Fernandes 			if (bias_start)
310*4a9671a0SJoel Fernandes 				bias_start -= round_up(prandom_u32_state(&prng) % bias_start, ps);
311*4a9671a0SJoel Fernandes 			if (bias_end != mm_size)
312*4a9671a0SJoel Fernandes 				bias_end += round_up(prandom_u32_state(&prng) % (mm_size - bias_end), ps);
313*4a9671a0SJoel Fernandes 
314*4a9671a0SJoel Fernandes 			bias_rem += old_bias_start - bias_start;
315*4a9671a0SJoel Fernandes 			bias_rem += bias_end - old_bias_end;
316*4a9671a0SJoel Fernandes 		} while (!bias_rem && (bias_start || bias_end != mm_size));
317*4a9671a0SJoel Fernandes 	} while (bias_rem);
318*4a9671a0SJoel Fernandes 
319*4a9671a0SJoel Fernandes 	KUNIT_ASSERT_EQ(test, bias_start, 0);
320*4a9671a0SJoel Fernandes 	KUNIT_ASSERT_EQ(test, bias_end, mm_size);
321*4a9671a0SJoel Fernandes 	KUNIT_ASSERT_TRUE_MSG(test,
322*4a9671a0SJoel Fernandes 			      drm_buddy_alloc_blocks(&mm, bias_start, bias_end,
323*4a9671a0SJoel Fernandes 						     ps, ps,
324*4a9671a0SJoel Fernandes 						     &allocated,
325*4a9671a0SJoel Fernandes 						     DRM_BUDDY_RANGE_ALLOCATION),
326*4a9671a0SJoel Fernandes 			      "buddy_alloc passed with bias(%x-%x), size=%u\n",
327*4a9671a0SJoel Fernandes 			      bias_start, bias_end, ps);
328*4a9671a0SJoel Fernandes 
329*4a9671a0SJoel Fernandes 	drm_buddy_free_list(&mm, &allocated, 0);
330*4a9671a0SJoel Fernandes 	drm_buddy_fini(&mm);
331*4a9671a0SJoel Fernandes 
332*4a9671a0SJoel Fernandes 	/*
333*4a9671a0SJoel Fernandes 	 * Allocate cleared blocks in the bias range when the DRM buddy's clear avail is
334*4a9671a0SJoel Fernandes 	 * zero. This will validate the bias range allocation in scenarios like system boot
335*4a9671a0SJoel Fernandes 	 * when no cleared blocks are available and exercise the fallback path too. The resulting
336*4a9671a0SJoel Fernandes 	 * blocks should always be dirty.
337*4a9671a0SJoel Fernandes 	 */
338*4a9671a0SJoel Fernandes 
339*4a9671a0SJoel Fernandes 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
340*4a9671a0SJoel Fernandes 			       "buddy_init failed\n");
341*4a9671a0SJoel Fernandes 
342*4a9671a0SJoel Fernandes 	bias_start = round_up(prandom_u32_state(&prng) % (mm_size - ps), ps);
343*4a9671a0SJoel Fernandes 	bias_end = round_up(bias_start + prandom_u32_state(&prng) % (mm_size - bias_start), ps);
344*4a9671a0SJoel Fernandes 	bias_end = max(bias_end, bias_start + ps);
345*4a9671a0SJoel Fernandes 	bias_rem = bias_end - bias_start;
346*4a9671a0SJoel Fernandes 
347*4a9671a0SJoel Fernandes 	flags = DRM_BUDDY_CLEAR_ALLOCATION | DRM_BUDDY_RANGE_ALLOCATION;
348*4a9671a0SJoel Fernandes 	size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
349*4a9671a0SJoel Fernandes 
350*4a9671a0SJoel Fernandes 	KUNIT_ASSERT_FALSE_MSG(test,
351*4a9671a0SJoel Fernandes 			       drm_buddy_alloc_blocks(&mm, bias_start,
352*4a9671a0SJoel Fernandes 						      bias_end, size, ps,
353*4a9671a0SJoel Fernandes 						      &allocated,
354*4a9671a0SJoel Fernandes 						      flags),
355*4a9671a0SJoel Fernandes 			       "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
356*4a9671a0SJoel Fernandes 			       bias_start, bias_end, size, ps);
357*4a9671a0SJoel Fernandes 
358*4a9671a0SJoel Fernandes 	list_for_each_entry(block, &allocated, link)
359*4a9671a0SJoel Fernandes 		KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false);
360*4a9671a0SJoel Fernandes 
361*4a9671a0SJoel Fernandes 	drm_buddy_free_list(&mm, &allocated, 0);
362*4a9671a0SJoel Fernandes 	drm_buddy_fini(&mm);
363*4a9671a0SJoel Fernandes }
364*4a9671a0SJoel Fernandes 
365*4a9671a0SJoel Fernandes static void drm_test_buddy_alloc_clear(struct kunit *test)
366*4a9671a0SJoel Fernandes {
367*4a9671a0SJoel Fernandes 	unsigned long n_pages, total, i = 0;
368*4a9671a0SJoel Fernandes 	const unsigned long ps = SZ_4K;
369*4a9671a0SJoel Fernandes 	struct drm_buddy_block *block;
370*4a9671a0SJoel Fernandes 	const int max_order = 12;
371*4a9671a0SJoel Fernandes 	LIST_HEAD(allocated);
372*4a9671a0SJoel Fernandes 	struct drm_buddy mm;
373*4a9671a0SJoel Fernandes 	unsigned int order;
374*4a9671a0SJoel Fernandes 	u32 mm_size, size;
375*4a9671a0SJoel Fernandes 	LIST_HEAD(dirty);
376*4a9671a0SJoel Fernandes 	LIST_HEAD(clean);
377*4a9671a0SJoel Fernandes 
378*4a9671a0SJoel Fernandes 	mm_size = SZ_4K << max_order;
379*4a9671a0SJoel Fernandes 	KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
380*4a9671a0SJoel Fernandes 
381*4a9671a0SJoel Fernandes 	KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
382*4a9671a0SJoel Fernandes 
383*4a9671a0SJoel Fernandes 	/*
384*4a9671a0SJoel Fernandes 	 * Idea is to allocate and free some random portion of the address space,
385*4a9671a0SJoel Fernandes 	 * returning those pages as non-dirty and randomly alternate between
386*4a9671a0SJoel Fernandes 	 * requesting dirty and non-dirty pages (not going over the limit
387*4a9671a0SJoel Fernandes 	 * we freed as non-dirty), putting that into two separate lists.
388*4a9671a0SJoel Fernandes 	 * Loop over both lists at the end checking that the dirty list
389*4a9671a0SJoel Fernandes 	 * is indeed all dirty pages and vice versa. Free it all again,
390*4a9671a0SJoel Fernandes 	 * keeping the dirty/clear status.
391*4a9671a0SJoel Fernandes 	 */
392*4a9671a0SJoel Fernandes 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
393*4a9671a0SJoel Fernandes 							    5 * ps, ps, &allocated,
394*4a9671a0SJoel Fernandes 							    DRM_BUDDY_TOPDOWN_ALLOCATION),
395*4a9671a0SJoel Fernandes 				"buddy_alloc hit an error size=%lu\n", 5 * ps);
396*4a9671a0SJoel Fernandes 	drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
397*4a9671a0SJoel Fernandes 
398*4a9671a0SJoel Fernandes 	n_pages = 10;
399*4a9671a0SJoel Fernandes 	do {
400*4a9671a0SJoel Fernandes 		unsigned long flags;
401*4a9671a0SJoel Fernandes 		struct list_head *list;
402*4a9671a0SJoel Fernandes 		int slot = i % 2;
403*4a9671a0SJoel Fernandes 
404*4a9671a0SJoel Fernandes 		if (slot == 0) {
405*4a9671a0SJoel Fernandes 			list = &dirty;
406*4a9671a0SJoel Fernandes 			flags = 0;
407*4a9671a0SJoel Fernandes 		} else {
408*4a9671a0SJoel Fernandes 			list = &clean;
409*4a9671a0SJoel Fernandes 			flags = DRM_BUDDY_CLEAR_ALLOCATION;
410*4a9671a0SJoel Fernandes 		}
411*4a9671a0SJoel Fernandes 
412*4a9671a0SJoel Fernandes 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
413*4a9671a0SJoel Fernandes 								    ps, ps, list,
414*4a9671a0SJoel Fernandes 								    flags),
415*4a9671a0SJoel Fernandes 					"buddy_alloc hit an error size=%lu\n", ps);
416*4a9671a0SJoel Fernandes 	} while (++i < n_pages);
417*4a9671a0SJoel Fernandes 
418*4a9671a0SJoel Fernandes 	list_for_each_entry(block, &clean, link)
419*4a9671a0SJoel Fernandes 		KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), true);
420*4a9671a0SJoel Fernandes 
421*4a9671a0SJoel Fernandes 	list_for_each_entry(block, &dirty, link)
422*4a9671a0SJoel Fernandes 		KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false);
423*4a9671a0SJoel Fernandes 
424*4a9671a0SJoel Fernandes 	drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
425*4a9671a0SJoel Fernandes 
426*4a9671a0SJoel Fernandes 	/*
427*4a9671a0SJoel Fernandes 	 * Trying to go over the clear limit for some allocation.
428*4a9671a0SJoel Fernandes 	 * The allocation should never fail with reasonable page-size.
429*4a9671a0SJoel Fernandes 	 */
430*4a9671a0SJoel Fernandes 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
431*4a9671a0SJoel Fernandes 							    10 * ps, ps, &clean,
432*4a9671a0SJoel Fernandes 							    DRM_BUDDY_CLEAR_ALLOCATION),
433*4a9671a0SJoel Fernandes 				"buddy_alloc hit an error size=%lu\n", 10 * ps);
434*4a9671a0SJoel Fernandes 
435*4a9671a0SJoel Fernandes 	drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
436*4a9671a0SJoel Fernandes 	drm_buddy_free_list(&mm, &dirty, 0);
437*4a9671a0SJoel Fernandes 	drm_buddy_fini(&mm);
438*4a9671a0SJoel Fernandes 
439*4a9671a0SJoel Fernandes 	KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
440*4a9671a0SJoel Fernandes 
441*4a9671a0SJoel Fernandes 	/*
442*4a9671a0SJoel Fernandes 	 * Create a new mm. Intentionally fragment the address space by creating
443*4a9671a0SJoel Fernandes 	 * two alternating lists. Free both lists, one as dirty the other as clean.
444*4a9671a0SJoel Fernandes 	 * Try to allocate double the previous size with matching min_page_size. The
445*4a9671a0SJoel Fernandes 	 * allocation should never fail as it calls the force_merge. Also check that
446*4a9671a0SJoel Fernandes 	 * the page is always dirty after force_merge. Free the page as dirty, then
447*4a9671a0SJoel Fernandes 	 * repeat the whole thing, increment the order until we hit the max_order.
448*4a9671a0SJoel Fernandes 	 */
449*4a9671a0SJoel Fernandes 
450*4a9671a0SJoel Fernandes 	i = 0;
451*4a9671a0SJoel Fernandes 	n_pages = mm_size / ps;
452*4a9671a0SJoel Fernandes 	do {
453*4a9671a0SJoel Fernandes 		struct list_head *list;
454*4a9671a0SJoel Fernandes 		int slot = i % 2;
455*4a9671a0SJoel Fernandes 
456*4a9671a0SJoel Fernandes 		if (slot == 0)
457*4a9671a0SJoel Fernandes 			list = &dirty;
458*4a9671a0SJoel Fernandes 		else
459*4a9671a0SJoel Fernandes 			list = &clean;
460*4a9671a0SJoel Fernandes 
461*4a9671a0SJoel Fernandes 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
462*4a9671a0SJoel Fernandes 								    ps, ps, list, 0),
463*4a9671a0SJoel Fernandes 					"buddy_alloc hit an error size=%lu\n", ps);
464*4a9671a0SJoel Fernandes 	} while (++i < n_pages);
465*4a9671a0SJoel Fernandes 
466*4a9671a0SJoel Fernandes 	drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
467*4a9671a0SJoel Fernandes 	drm_buddy_free_list(&mm, &dirty, 0);
468*4a9671a0SJoel Fernandes 
469*4a9671a0SJoel Fernandes 	order = 1;
470*4a9671a0SJoel Fernandes 	do {
471*4a9671a0SJoel Fernandes 		size = SZ_4K << order;
472*4a9671a0SJoel Fernandes 
473*4a9671a0SJoel Fernandes 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
474*4a9671a0SJoel Fernandes 								    size, size, &allocated,
475*4a9671a0SJoel Fernandes 								    DRM_BUDDY_CLEAR_ALLOCATION),
476*4a9671a0SJoel Fernandes 					"buddy_alloc hit an error size=%u\n", size);
477*4a9671a0SJoel Fernandes 		total = 0;
478*4a9671a0SJoel Fernandes 		list_for_each_entry(block, &allocated, link) {
479*4a9671a0SJoel Fernandes 			if (size != mm_size)
480*4a9671a0SJoel Fernandes 				KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false);
481*4a9671a0SJoel Fernandes 			total += drm_buddy_block_size(&mm, block);
482*4a9671a0SJoel Fernandes 		}
483*4a9671a0SJoel Fernandes 		KUNIT_EXPECT_EQ(test, total, size);
484*4a9671a0SJoel Fernandes 
485*4a9671a0SJoel Fernandes 		drm_buddy_free_list(&mm, &allocated, 0);
486*4a9671a0SJoel Fernandes 	} while (++order <= max_order);
487*4a9671a0SJoel Fernandes 
488*4a9671a0SJoel Fernandes 	drm_buddy_fini(&mm);
489*4a9671a0SJoel Fernandes 
490*4a9671a0SJoel Fernandes 	/*
491*4a9671a0SJoel Fernandes 	 * Create a new mm with a non power-of-two size. Allocate a random size from each
492*4a9671a0SJoel Fernandes 	 * root, free as cleared and then call fini. This will ensure the multi-root
493*4a9671a0SJoel Fernandes 	 * force merge during fini.
494*4a9671a0SJoel Fernandes 	 */
495*4a9671a0SJoel Fernandes 	mm_size = (SZ_4K << max_order) + (SZ_4K << (max_order - 2));
496*4a9671a0SJoel Fernandes 
497*4a9671a0SJoel Fernandes 	KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
498*4a9671a0SJoel Fernandes 	KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
499*4a9671a0SJoel Fernandes 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, SZ_4K << max_order,
500*4a9671a0SJoel Fernandes 							    4 * ps, ps, &allocated,
501*4a9671a0SJoel Fernandes 							    DRM_BUDDY_RANGE_ALLOCATION),
502*4a9671a0SJoel Fernandes 				"buddy_alloc hit an error size=%lu\n", 4 * ps);
503*4a9671a0SJoel Fernandes 	drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
504*4a9671a0SJoel Fernandes 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, SZ_4K << max_order,
505*4a9671a0SJoel Fernandes 							    2 * ps, ps, &allocated,
506*4a9671a0SJoel Fernandes 							    DRM_BUDDY_CLEAR_ALLOCATION),
507*4a9671a0SJoel Fernandes 				"buddy_alloc hit an error size=%lu\n", 2 * ps);
508*4a9671a0SJoel Fernandes 	drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
509*4a9671a0SJoel Fernandes 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, SZ_4K << max_order, mm_size,
510*4a9671a0SJoel Fernandes 							    ps, ps, &allocated,
511*4a9671a0SJoel Fernandes 							    DRM_BUDDY_RANGE_ALLOCATION),
512*4a9671a0SJoel Fernandes 				"buddy_alloc hit an error size=%lu\n", ps);
513*4a9671a0SJoel Fernandes 	drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
514*4a9671a0SJoel Fernandes 	drm_buddy_fini(&mm);
515*4a9671a0SJoel Fernandes }
516*4a9671a0SJoel Fernandes 
517*4a9671a0SJoel Fernandes static void drm_test_buddy_alloc_contiguous(struct kunit *test)
518*4a9671a0SJoel Fernandes {
519*4a9671a0SJoel Fernandes 	const unsigned long ps = SZ_4K, mm_size = 16 * 3 * SZ_4K;
520*4a9671a0SJoel Fernandes 	unsigned long i, n_pages, total;
521*4a9671a0SJoel Fernandes 	struct drm_buddy_block *block;
522*4a9671a0SJoel Fernandes 	struct drm_buddy mm;
523*4a9671a0SJoel Fernandes 	LIST_HEAD(left);
524*4a9671a0SJoel Fernandes 	LIST_HEAD(middle);
525*4a9671a0SJoel Fernandes 	LIST_HEAD(right);
526*4a9671a0SJoel Fernandes 	LIST_HEAD(allocated);
527*4a9671a0SJoel Fernandes 
528*4a9671a0SJoel Fernandes 	KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
529*4a9671a0SJoel Fernandes 
530*4a9671a0SJoel Fernandes 	/*
531*4a9671a0SJoel Fernandes 	 * Idea is to fragment the address space by alternating block
532*4a9671a0SJoel Fernandes 	 * allocations between three different lists; one for left, middle and
533*4a9671a0SJoel Fernandes 	 * right. We can then free a list to simulate fragmentation. In
534*4a9671a0SJoel Fernandes 	 * particular we want to exercise the DRM_BUDDY_CONTIGUOUS_ALLOCATION,
535*4a9671a0SJoel Fernandes 	 * including the try_harder path.
536*4a9671a0SJoel Fernandes 	 */
537*4a9671a0SJoel Fernandes 
538*4a9671a0SJoel Fernandes 	i = 0;
539*4a9671a0SJoel Fernandes 	n_pages = mm_size / ps;
540*4a9671a0SJoel Fernandes 	do {
541*4a9671a0SJoel Fernandes 		struct list_head *list;
542*4a9671a0SJoel Fernandes 		int slot = i % 3;
543*4a9671a0SJoel Fernandes 
544*4a9671a0SJoel Fernandes 		if (slot == 0)
545*4a9671a0SJoel Fernandes 			list = &left;
546*4a9671a0SJoel Fernandes 		else if (slot == 1)
547*4a9671a0SJoel Fernandes 			list = &middle;
548*4a9671a0SJoel Fernandes 		else
549*4a9671a0SJoel Fernandes 			list = &right;
550*4a9671a0SJoel Fernandes 		KUNIT_ASSERT_FALSE_MSG(test,
551*4a9671a0SJoel Fernandes 				       drm_buddy_alloc_blocks(&mm, 0, mm_size,
552*4a9671a0SJoel Fernandes 							      ps, ps, list, 0),
553*4a9671a0SJoel Fernandes 				       "buddy_alloc hit an error size=%lu\n",
554*4a9671a0SJoel Fernandes 				       ps);
555*4a9671a0SJoel Fernandes 	} while (++i < n_pages);
556*4a9671a0SJoel Fernandes 
557*4a9671a0SJoel Fernandes 	KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
558*4a9671a0SJoel Fernandes 							   3 * ps, ps, &allocated,
559*4a9671a0SJoel Fernandes 							   DRM_BUDDY_CONTIGUOUS_ALLOCATION),
560*4a9671a0SJoel Fernandes 			       "buddy_alloc didn't error size=%lu\n", 3 * ps);
561*4a9671a0SJoel Fernandes 
562*4a9671a0SJoel Fernandes 	drm_buddy_free_list(&mm, &middle, 0);
563*4a9671a0SJoel Fernandes 	KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
564*4a9671a0SJoel Fernandes 							   3 * ps, ps, &allocated,
565*4a9671a0SJoel Fernandes 							   DRM_BUDDY_CONTIGUOUS_ALLOCATION),
566*4a9671a0SJoel Fernandes 			       "buddy_alloc didn't error size=%lu\n", 3 * ps);
567*4a9671a0SJoel Fernandes 	KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
568*4a9671a0SJoel Fernandes 							   2 * ps, ps, &allocated,
569*4a9671a0SJoel Fernandes 							   DRM_BUDDY_CONTIGUOUS_ALLOCATION),
570*4a9671a0SJoel Fernandes 			       "buddy_alloc didn't error size=%lu\n", 2 * ps);
571*4a9671a0SJoel Fernandes 
572*4a9671a0SJoel Fernandes 	drm_buddy_free_list(&mm, &right, 0);
573*4a9671a0SJoel Fernandes 	KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
574*4a9671a0SJoel Fernandes 							   3 * ps, ps, &allocated,
575*4a9671a0SJoel Fernandes 							   DRM_BUDDY_CONTIGUOUS_ALLOCATION),
576*4a9671a0SJoel Fernandes 			       "buddy_alloc didn't error size=%lu\n", 3 * ps);
577*4a9671a0SJoel Fernandes 	/*
578*4a9671a0SJoel Fernandes 	 * At this point we should have enough contiguous space for 2 blocks,
579*4a9671a0SJoel Fernandes 	 * however they are never buddies (since we freed middle and right) so
580*4a9671a0SJoel Fernandes 	 * will require the try_harder logic to find them.
581*4a9671a0SJoel Fernandes 	 */
582*4a9671a0SJoel Fernandes 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
583*4a9671a0SJoel Fernandes 							    2 * ps, ps, &allocated,
584*4a9671a0SJoel Fernandes 							    DRM_BUDDY_CONTIGUOUS_ALLOCATION),
585*4a9671a0SJoel Fernandes 			       "buddy_alloc hit an error size=%lu\n", 2 * ps);
586*4a9671a0SJoel Fernandes 
587*4a9671a0SJoel Fernandes 	drm_buddy_free_list(&mm, &left, 0);
588*4a9671a0SJoel Fernandes 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
589*4a9671a0SJoel Fernandes 							    3 * ps, ps, &allocated,
590*4a9671a0SJoel Fernandes 							    DRM_BUDDY_CONTIGUOUS_ALLOCATION),
591*4a9671a0SJoel Fernandes 			       "buddy_alloc hit an error size=%lu\n", 3 * ps);
592*4a9671a0SJoel Fernandes 
593*4a9671a0SJoel Fernandes 	total = 0;
594*4a9671a0SJoel Fernandes 	list_for_each_entry(block, &allocated, link)
595*4a9671a0SJoel Fernandes 		total += drm_buddy_block_size(&mm, block);
596*4a9671a0SJoel Fernandes 
597*4a9671a0SJoel Fernandes 	KUNIT_ASSERT_EQ(test, total, ps * 2 + ps * 3);
598*4a9671a0SJoel Fernandes 
599*4a9671a0SJoel Fernandes 	drm_buddy_free_list(&mm, &allocated, 0);
600*4a9671a0SJoel Fernandes 	drm_buddy_fini(&mm);
601*4a9671a0SJoel Fernandes }
602*4a9671a0SJoel Fernandes 
603*4a9671a0SJoel Fernandes static void drm_test_buddy_alloc_pathological(struct kunit *test)
604*4a9671a0SJoel Fernandes {
605*4a9671a0SJoel Fernandes 	u64 mm_size, size, start = 0;
606*4a9671a0SJoel Fernandes 	struct drm_buddy_block *block;
607*4a9671a0SJoel Fernandes 	const int max_order = 3;
608*4a9671a0SJoel Fernandes 	unsigned long flags = 0;
609*4a9671a0SJoel Fernandes 	int order, top;
610*4a9671a0SJoel Fernandes 	struct drm_buddy mm;
611*4a9671a0SJoel Fernandes 	LIST_HEAD(blocks);
612*4a9671a0SJoel Fernandes 	LIST_HEAD(holes);
613*4a9671a0SJoel Fernandes 	LIST_HEAD(tmp);
614*4a9671a0SJoel Fernandes 
615*4a9671a0SJoel Fernandes 	/*
616*4a9671a0SJoel Fernandes 	 * Create a pot-sized mm, then allocate one of each possible
617*4a9671a0SJoel Fernandes 	 * order within. This should leave the mm with exactly one
618*4a9671a0SJoel Fernandes 	 * page left. Free the largest block, then whittle down again.
619*4a9671a0SJoel Fernandes 	 * Eventually we will have a fully 50% fragmented mm.
620*4a9671a0SJoel Fernandes 	 */
621*4a9671a0SJoel Fernandes 
622*4a9671a0SJoel Fernandes 	mm_size = SZ_4K << max_order;
623*4a9671a0SJoel Fernandes 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
624*4a9671a0SJoel Fernandes 			       "buddy_init failed\n");
625*4a9671a0SJoel Fernandes 
626*4a9671a0SJoel Fernandes 	KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
627*4a9671a0SJoel Fernandes 
628*4a9671a0SJoel Fernandes 	for (top = max_order; top; top--) {
629*4a9671a0SJoel Fernandes 		/* Make room by freeing the largest allocated block */
630*4a9671a0SJoel Fernandes 		block = list_first_entry_or_null(&blocks, typeof(*block), link);
631*4a9671a0SJoel Fernandes 		if (block) {
632*4a9671a0SJoel Fernandes 			list_del(&block->link);
633*4a9671a0SJoel Fernandes 			drm_buddy_free_block(&mm, block);
634*4a9671a0SJoel Fernandes 		}
635*4a9671a0SJoel Fernandes 
636*4a9671a0SJoel Fernandes 		for (order = top; order--;) {
637*4a9671a0SJoel Fernandes 			size = get_size(order, mm.chunk_size);
638*4a9671a0SJoel Fernandes 			KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start,
639*4a9671a0SJoel Fernandes 									    mm_size, size, size,
640*4a9671a0SJoel Fernandes 										&tmp, flags),
641*4a9671a0SJoel Fernandes 					"buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
642*4a9671a0SJoel Fernandes 					order, top);
643*4a9671a0SJoel Fernandes 
644*4a9671a0SJoel Fernandes 			block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
645*4a9671a0SJoel Fernandes 			KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
646*4a9671a0SJoel Fernandes 
647*4a9671a0SJoel Fernandes 			list_move_tail(&block->link, &blocks);
648*4a9671a0SJoel Fernandes 		}
649*4a9671a0SJoel Fernandes 
650*4a9671a0SJoel Fernandes 		/* There should be one final page for this sub-allocation */
651*4a9671a0SJoel Fernandes 		size = get_size(0, mm.chunk_size);
652*4a9671a0SJoel Fernandes 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
653*4a9671a0SJoel Fernandes 								    size, size, &tmp, flags),
654*4a9671a0SJoel Fernandes 							   "buddy_alloc hit -ENOMEM for hole\n");
655*4a9671a0SJoel Fernandes 
656*4a9671a0SJoel Fernandes 		block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
657*4a9671a0SJoel Fernandes 		KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
658*4a9671a0SJoel Fernandes 
659*4a9671a0SJoel Fernandes 		list_move_tail(&block->link, &holes);
660*4a9671a0SJoel Fernandes 
661*4a9671a0SJoel Fernandes 		size = get_size(top, mm.chunk_size);
662*4a9671a0SJoel Fernandes 		KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
663*4a9671a0SJoel Fernandes 								   size, size, &tmp, flags),
664*4a9671a0SJoel Fernandes 							  "buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
665*4a9671a0SJoel Fernandes 							  top, max_order);
666*4a9671a0SJoel Fernandes 	}
667*4a9671a0SJoel Fernandes 
668*4a9671a0SJoel Fernandes 	drm_buddy_free_list(&mm, &holes, 0);
669*4a9671a0SJoel Fernandes 
670*4a9671a0SJoel Fernandes 	/* Nothing larger than blocks of chunk_size now available */
671*4a9671a0SJoel Fernandes 	for (order = 1; order <= max_order; order++) {
672*4a9671a0SJoel Fernandes 		size = get_size(order, mm.chunk_size);
673*4a9671a0SJoel Fernandes 		KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
674*4a9671a0SJoel Fernandes 								   size, size, &tmp, flags),
675*4a9671a0SJoel Fernandes 							  "buddy_alloc unexpectedly succeeded at order %d, it should be full!",
676*4a9671a0SJoel Fernandes 							  order);
677*4a9671a0SJoel Fernandes 	}
678*4a9671a0SJoel Fernandes 
679*4a9671a0SJoel Fernandes 	list_splice_tail(&holes, &blocks);
680*4a9671a0SJoel Fernandes 	drm_buddy_free_list(&mm, &blocks, 0);
681*4a9671a0SJoel Fernandes 	drm_buddy_fini(&mm);
682*4a9671a0SJoel Fernandes }
683*4a9671a0SJoel Fernandes 
684*4a9671a0SJoel Fernandes static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
685*4a9671a0SJoel Fernandes {
686*4a9671a0SJoel Fernandes 	u64 mm_size, size, start = 0;
687*4a9671a0SJoel Fernandes 	struct drm_buddy_block *block, *bn;
688*4a9671a0SJoel Fernandes 	const unsigned int max_order = 16;
689*4a9671a0SJoel Fernandes 	unsigned long flags = 0;
690*4a9671a0SJoel Fernandes 	struct drm_buddy mm;
691*4a9671a0SJoel Fernandes 	unsigned int order;
692*4a9671a0SJoel Fernandes 	LIST_HEAD(blocks);
693*4a9671a0SJoel Fernandes 	LIST_HEAD(tmp);
694*4a9671a0SJoel Fernandes 
695*4a9671a0SJoel Fernandes 	/*
696*4a9671a0SJoel Fernandes 	 * Create a pot-sized mm, then allocate one of each possible
697*4a9671a0SJoel Fernandes 	 * order within. This should leave the mm with exactly one
698*4a9671a0SJoel Fernandes 	 * page left.
699*4a9671a0SJoel Fernandes 	 */
700*4a9671a0SJoel Fernandes 
701*4a9671a0SJoel Fernandes 	mm_size = SZ_4K << max_order;
702*4a9671a0SJoel Fernandes 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
703*4a9671a0SJoel Fernandes 			       "buddy_init failed\n");
704*4a9671a0SJoel Fernandes 
705*4a9671a0SJoel Fernandes 	KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
706*4a9671a0SJoel Fernandes 
707*4a9671a0SJoel Fernandes 	for (order = 0; order < max_order; order++) {
708*4a9671a0SJoel Fernandes 		size = get_size(order, mm.chunk_size);
709*4a9671a0SJoel Fernandes 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
710*4a9671a0SJoel Fernandes 								    size, size, &tmp, flags),
711*4a9671a0SJoel Fernandes 							   "buddy_alloc hit -ENOMEM with order=%d\n",
712*4a9671a0SJoel Fernandes 							   order);
713*4a9671a0SJoel Fernandes 
714*4a9671a0SJoel Fernandes 		block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
715*4a9671a0SJoel Fernandes 		KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
716*4a9671a0SJoel Fernandes 
717*4a9671a0SJoel Fernandes 		list_move_tail(&block->link, &blocks);
718*4a9671a0SJoel Fernandes 	}
719*4a9671a0SJoel Fernandes 
720*4a9671a0SJoel Fernandes 	/* And now the last remaining block available */
721*4a9671a0SJoel Fernandes 	size = get_size(0, mm.chunk_size);
722*4a9671a0SJoel Fernandes 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
723*4a9671a0SJoel Fernandes 							    size, size, &tmp, flags),
724*4a9671a0SJoel Fernandes 						   "buddy_alloc hit -ENOMEM on final alloc\n");
725*4a9671a0SJoel Fernandes 
726*4a9671a0SJoel Fernandes 	block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
727*4a9671a0SJoel Fernandes 	KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
728*4a9671a0SJoel Fernandes 
729*4a9671a0SJoel Fernandes 	list_move_tail(&block->link, &blocks);
730*4a9671a0SJoel Fernandes 
731*4a9671a0SJoel Fernandes 	/* Should be completely full! */
732*4a9671a0SJoel Fernandes 	for (order = max_order; order--;) {
733*4a9671a0SJoel Fernandes 		size = get_size(order, mm.chunk_size);
734*4a9671a0SJoel Fernandes 		KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
735*4a9671a0SJoel Fernandes 								   size, size, &tmp, flags),
736*4a9671a0SJoel Fernandes 							  "buddy_alloc unexpectedly succeeded, it should be full!");
737*4a9671a0SJoel Fernandes 	}
738*4a9671a0SJoel Fernandes 
739*4a9671a0SJoel Fernandes 	block = list_last_entry(&blocks, typeof(*block), link);
740*4a9671a0SJoel Fernandes 	list_del(&block->link);
741*4a9671a0SJoel Fernandes 	drm_buddy_free_block(&mm, block);
742*4a9671a0SJoel Fernandes 
743*4a9671a0SJoel Fernandes 	/* As we free in increasing size, we make available larger blocks */
744*4a9671a0SJoel Fernandes 	order = 1;
745*4a9671a0SJoel Fernandes 	list_for_each_entry_safe(block, bn, &blocks, link) {
746*4a9671a0SJoel Fernandes 		list_del(&block->link);
747*4a9671a0SJoel Fernandes 		drm_buddy_free_block(&mm, block);
748*4a9671a0SJoel Fernandes 
749*4a9671a0SJoel Fernandes 		size = get_size(order, mm.chunk_size);
750*4a9671a0SJoel Fernandes 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
751*4a9671a0SJoel Fernandes 								    size, size, &tmp, flags),
752*4a9671a0SJoel Fernandes 							   "buddy_alloc hit -ENOMEM with order=%d\n",
753*4a9671a0SJoel Fernandes 							   order);
754*4a9671a0SJoel Fernandes 
755*4a9671a0SJoel Fernandes 		block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
756*4a9671a0SJoel Fernandes 		KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
757*4a9671a0SJoel Fernandes 
758*4a9671a0SJoel Fernandes 		list_del(&block->link);
759*4a9671a0SJoel Fernandes 		drm_buddy_free_block(&mm, block);
760*4a9671a0SJoel Fernandes 		order++;
761*4a9671a0SJoel Fernandes 	}
762*4a9671a0SJoel Fernandes 
763*4a9671a0SJoel Fernandes 	/* To confirm, now the whole mm should be available */
764*4a9671a0SJoel Fernandes 	size = get_size(max_order, mm.chunk_size);
765*4a9671a0SJoel Fernandes 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
766*4a9671a0SJoel Fernandes 							    size, size, &tmp, flags),
767*4a9671a0SJoel Fernandes 						   "buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
768*4a9671a0SJoel Fernandes 						   max_order);
769*4a9671a0SJoel Fernandes 
770*4a9671a0SJoel Fernandes 	block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
771*4a9671a0SJoel Fernandes 	KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
772*4a9671a0SJoel Fernandes 
773*4a9671a0SJoel Fernandes 	list_del(&block->link);
774*4a9671a0SJoel Fernandes 	drm_buddy_free_block(&mm, block);
775*4a9671a0SJoel Fernandes 	drm_buddy_free_list(&mm, &blocks, 0);
776*4a9671a0SJoel Fernandes 	drm_buddy_fini(&mm);
777*4a9671a0SJoel Fernandes }
778*4a9671a0SJoel Fernandes 
779*4a9671a0SJoel Fernandes static void drm_test_buddy_alloc_optimistic(struct kunit *test)
780*4a9671a0SJoel Fernandes {
781*4a9671a0SJoel Fernandes 	u64 mm_size, size, start = 0;
782*4a9671a0SJoel Fernandes 	struct drm_buddy_block *block;
783*4a9671a0SJoel Fernandes 	unsigned long flags = 0;
784*4a9671a0SJoel Fernandes 	const int max_order = 16;
785*4a9671a0SJoel Fernandes 	struct drm_buddy mm;
786*4a9671a0SJoel Fernandes 	LIST_HEAD(blocks);
787*4a9671a0SJoel Fernandes 	LIST_HEAD(tmp);
788*4a9671a0SJoel Fernandes 	int order;
789*4a9671a0SJoel Fernandes 
790*4a9671a0SJoel Fernandes 	/*
791*4a9671a0SJoel Fernandes 	 * Create a mm with one block of each order available, and
792*4a9671a0SJoel Fernandes 	 * try to allocate them all.
793*4a9671a0SJoel Fernandes 	 */
794*4a9671a0SJoel Fernandes 
795*4a9671a0SJoel Fernandes 	mm_size = SZ_4K * ((1 << (max_order + 1)) - 1);
796*4a9671a0SJoel Fernandes 
797*4a9671a0SJoel Fernandes 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
798*4a9671a0SJoel Fernandes 			       "buddy_init failed\n");
799*4a9671a0SJoel Fernandes 
800*4a9671a0SJoel Fernandes 	KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
801*4a9671a0SJoel Fernandes 
802*4a9671a0SJoel Fernandes 	for (order = 0; order <= max_order; order++) {
803*4a9671a0SJoel Fernandes 		size = get_size(order, mm.chunk_size);
804*4a9671a0SJoel Fernandes 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
805*4a9671a0SJoel Fernandes 								    size, size, &tmp, flags),
806*4a9671a0SJoel Fernandes 							   "buddy_alloc hit -ENOMEM with order=%d\n",
807*4a9671a0SJoel Fernandes 							   order);
808*4a9671a0SJoel Fernandes 
809*4a9671a0SJoel Fernandes 		block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
810*4a9671a0SJoel Fernandes 		KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
811*4a9671a0SJoel Fernandes 
812*4a9671a0SJoel Fernandes 		list_move_tail(&block->link, &blocks);
813*4a9671a0SJoel Fernandes 	}
814*4a9671a0SJoel Fernandes 
815*4a9671a0SJoel Fernandes 	/* Should be completely full! */
816*4a9671a0SJoel Fernandes 	size = get_size(0, mm.chunk_size);
817*4a9671a0SJoel Fernandes 	KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
818*4a9671a0SJoel Fernandes 							   size, size, &tmp, flags),
819*4a9671a0SJoel Fernandes 						  "buddy_alloc unexpectedly succeeded, it should be full!");
820*4a9671a0SJoel Fernandes 
821*4a9671a0SJoel Fernandes 	drm_buddy_free_list(&mm, &blocks, 0);
822*4a9671a0SJoel Fernandes 	drm_buddy_fini(&mm);
823*4a9671a0SJoel Fernandes }
824*4a9671a0SJoel Fernandes 
825*4a9671a0SJoel Fernandes static void drm_test_buddy_alloc_limit(struct kunit *test)
826*4a9671a0SJoel Fernandes {
827*4a9671a0SJoel Fernandes 	u64 size = U64_MAX, start = 0;
828*4a9671a0SJoel Fernandes 	struct drm_buddy_block *block;
829*4a9671a0SJoel Fernandes 	unsigned long flags = 0;
830*4a9671a0SJoel Fernandes 	LIST_HEAD(allocated);
831*4a9671a0SJoel Fernandes 	struct drm_buddy mm;
832*4a9671a0SJoel Fernandes 
833*4a9671a0SJoel Fernandes 	KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, SZ_4K));
834*4a9671a0SJoel Fernandes 
835*4a9671a0SJoel Fernandes 	KUNIT_EXPECT_EQ_MSG(test, mm.max_order, DRM_BUDDY_MAX_ORDER,
836*4a9671a0SJoel Fernandes 			    "mm.max_order(%d) != %d\n", mm.max_order,
837*4a9671a0SJoel Fernandes 						DRM_BUDDY_MAX_ORDER);
838*4a9671a0SJoel Fernandes 
839*4a9671a0SJoel Fernandes 	size = mm.chunk_size << mm.max_order;
840*4a9671a0SJoel Fernandes 	KUNIT_EXPECT_FALSE(test, drm_buddy_alloc_blocks(&mm, start, size, size,
841*4a9671a0SJoel Fernandes 							mm.chunk_size, &allocated, flags));
842*4a9671a0SJoel Fernandes 
843*4a9671a0SJoel Fernandes 	block = list_first_entry_or_null(&allocated, struct drm_buddy_block, link);
844*4a9671a0SJoel Fernandes 	KUNIT_EXPECT_TRUE(test, block);
845*4a9671a0SJoel Fernandes 
846*4a9671a0SJoel Fernandes 	KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_order(block), mm.max_order,
847*4a9671a0SJoel Fernandes 			    "block order(%d) != %d\n",
848*4a9671a0SJoel Fernandes 						drm_buddy_block_order(block), mm.max_order);
849*4a9671a0SJoel Fernandes 
850*4a9671a0SJoel Fernandes 	KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_size(&mm, block),
851*4a9671a0SJoel Fernandes 			    BIT_ULL(mm.max_order) * mm.chunk_size,
852*4a9671a0SJoel Fernandes 						"block size(%llu) != %llu\n",
853*4a9671a0SJoel Fernandes 						drm_buddy_block_size(&mm, block),
854*4a9671a0SJoel Fernandes 						BIT_ULL(mm.max_order) * mm.chunk_size);
855*4a9671a0SJoel Fernandes 
856*4a9671a0SJoel Fernandes 	drm_buddy_free_list(&mm, &allocated, 0);
857*4a9671a0SJoel Fernandes 	drm_buddy_fini(&mm);
858*4a9671a0SJoel Fernandes }
859*4a9671a0SJoel Fernandes 
860*4a9671a0SJoel Fernandes static void drm_test_buddy_alloc_exceeds_max_order(struct kunit *test)
861*4a9671a0SJoel Fernandes {
862*4a9671a0SJoel Fernandes 	u64 mm_size = SZ_8G + SZ_2G, size = SZ_8G + SZ_1G, min_block_size = SZ_8G;
863*4a9671a0SJoel Fernandes 	struct drm_buddy mm;
864*4a9671a0SJoel Fernandes 	LIST_HEAD(blocks);
865*4a9671a0SJoel Fernandes 	int err;
866*4a9671a0SJoel Fernandes 
867*4a9671a0SJoel Fernandes 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
868*4a9671a0SJoel Fernandes 			       "buddy_init failed\n");
869*4a9671a0SJoel Fernandes 
870*4a9671a0SJoel Fernandes 	/* CONTIGUOUS allocation should succeed via try_harder fallback */
871*4a9671a0SJoel Fernandes 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, size,
872*4a9671a0SJoel Fernandes 							    SZ_4K, &blocks,
873*4a9671a0SJoel Fernandes 							    DRM_BUDDY_CONTIGUOUS_ALLOCATION),
874*4a9671a0SJoel Fernandes 			       "buddy_alloc hit an error size=%llu\n", size);
875*4a9671a0SJoel Fernandes 	drm_buddy_free_list(&mm, &blocks, 0);
876*4a9671a0SJoel Fernandes 
877*4a9671a0SJoel Fernandes 	/* Non-CONTIGUOUS with large min_block_size should return -EINVAL */
878*4a9671a0SJoel Fernandes 	err = drm_buddy_alloc_blocks(&mm, 0, mm_size, size, min_block_size, &blocks, 0);
879*4a9671a0SJoel Fernandes 	KUNIT_EXPECT_EQ(test, err, -EINVAL);
880*4a9671a0SJoel Fernandes 
881*4a9671a0SJoel Fernandes 	/* Non-CONTIGUOUS + RANGE with large min_block_size should return -EINVAL */
882*4a9671a0SJoel Fernandes 	err = drm_buddy_alloc_blocks(&mm, 0, mm_size, size, min_block_size, &blocks,
883*4a9671a0SJoel Fernandes 				     DRM_BUDDY_RANGE_ALLOCATION);
884*4a9671a0SJoel Fernandes 	KUNIT_EXPECT_EQ(test, err, -EINVAL);
885*4a9671a0SJoel Fernandes 
886*4a9671a0SJoel Fernandes 	/* CONTIGUOUS + RANGE should return -EINVAL (no try_harder for RANGE) */
887*4a9671a0SJoel Fernandes 	err = drm_buddy_alloc_blocks(&mm, 0, mm_size, size, SZ_4K, &blocks,
888*4a9671a0SJoel Fernandes 				     DRM_BUDDY_CONTIGUOUS_ALLOCATION | DRM_BUDDY_RANGE_ALLOCATION);
889*4a9671a0SJoel Fernandes 	KUNIT_EXPECT_EQ(test, err, -EINVAL);
890*4a9671a0SJoel Fernandes 
891*4a9671a0SJoel Fernandes 	drm_buddy_fini(&mm);
892*4a9671a0SJoel Fernandes }
893*4a9671a0SJoel Fernandes 
894*4a9671a0SJoel Fernandes static int drm_buddy_suite_init(struct kunit_suite *suite)
895*4a9671a0SJoel Fernandes {
896*4a9671a0SJoel Fernandes 	while (!random_seed)
897*4a9671a0SJoel Fernandes 		random_seed = get_random_u32();
898*4a9671a0SJoel Fernandes 
899*4a9671a0SJoel Fernandes 	kunit_info(suite, "Testing DRM buddy manager, with random_seed=0x%x\n",
900*4a9671a0SJoel Fernandes 		   random_seed);
901*4a9671a0SJoel Fernandes 
902*4a9671a0SJoel Fernandes 	return 0;
903*4a9671a0SJoel Fernandes }
904*4a9671a0SJoel Fernandes 
905*4a9671a0SJoel Fernandes static struct kunit_case drm_buddy_tests[] = {
906*4a9671a0SJoel Fernandes 	KUNIT_CASE(drm_test_buddy_alloc_limit),
907*4a9671a0SJoel Fernandes 	KUNIT_CASE(drm_test_buddy_alloc_optimistic),
908*4a9671a0SJoel Fernandes 	KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
909*4a9671a0SJoel Fernandes 	KUNIT_CASE(drm_test_buddy_alloc_pathological),
910*4a9671a0SJoel Fernandes 	KUNIT_CASE(drm_test_buddy_alloc_contiguous),
911*4a9671a0SJoel Fernandes 	KUNIT_CASE(drm_test_buddy_alloc_clear),
912*4a9671a0SJoel Fernandes 	KUNIT_CASE(drm_test_buddy_alloc_range_bias),
913*4a9671a0SJoel Fernandes 	KUNIT_CASE(drm_test_buddy_fragmentation_performance),
914*4a9671a0SJoel Fernandes 	KUNIT_CASE(drm_test_buddy_alloc_exceeds_max_order),
915*4a9671a0SJoel Fernandes 	{}
916*4a9671a0SJoel Fernandes };
917*4a9671a0SJoel Fernandes 
918*4a9671a0SJoel Fernandes static struct kunit_suite drm_buddy_test_suite = {
919*4a9671a0SJoel Fernandes 	.name = "drm_buddy",
920*4a9671a0SJoel Fernandes 	.suite_init = drm_buddy_suite_init,
921*4a9671a0SJoel Fernandes 	.test_cases = drm_buddy_tests,
922*4a9671a0SJoel Fernandes };
923*4a9671a0SJoel Fernandes 
924*4a9671a0SJoel Fernandes kunit_test_suite(drm_buddy_test_suite);
925*4a9671a0SJoel Fernandes 
926*4a9671a0SJoel Fernandes MODULE_AUTHOR("Intel Corporation");
927*4a9671a0SJoel Fernandes MODULE_DESCRIPTION("Kunit test for drm_buddy functions");
928*4a9671a0SJoel Fernandes MODULE_LICENSE("GPL");
929