xref: /linux/drivers/gpu/drm/tests/drm_buddy_test.c (revision 508ecc78b6c983a7921bee2f4bd22682f9f0396e)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  * Copyright © 2022 Maíra Canal <mairacanal@riseup.net>
5  */
6 
7 #include <kunit/test.h>
8 
9 #include <linux/prime_numbers.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sizes.h>
12 
13 #include <drm/drm_buddy.h>
14 
15 #include "../lib/drm_random.h"
16 
17 static inline u64 get_size(int order, u64 chunk_size)
18 {
19 	return (1 << order) * chunk_size;
20 }
21 
22 static void drm_test_buddy_alloc_contiguous(struct kunit *test)
23 {
24 	u64 mm_size, ps = SZ_4K, i, n_pages, total;
25 	struct drm_buddy_block *block;
26 	struct drm_buddy mm;
27 	LIST_HEAD(left);
28 	LIST_HEAD(middle);
29 	LIST_HEAD(right);
30 	LIST_HEAD(allocated);
31 
32 	mm_size = 16 * 3 * SZ_4K;
33 
34 	KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
35 
36 	/*
37 	 * Idea is to fragment the address space by alternating block
38 	 * allocations between three different lists; one for left, middle and
39 	 * right. We can then free a list to simulate fragmentation. In
40 	 * particular we want to exercise the DRM_BUDDY_CONTIGUOUS_ALLOCATION,
41 	 * including the try_harder path.
42 	 */
43 
44 	i = 0;
45 	n_pages = mm_size / ps;
46 	do {
47 		struct list_head *list;
48 		int slot = i % 3;
49 
50 		if (slot == 0)
51 			list = &left;
52 		else if (slot == 1)
53 			list = &middle;
54 		else
55 			list = &right;
56 		KUNIT_ASSERT_FALSE_MSG(test,
57 				       drm_buddy_alloc_blocks(&mm, 0, mm_size,
58 							      ps, ps, list, 0),
59 				       "buddy_alloc hit an error size=%d\n",
60 				       ps);
61 	} while (++i < n_pages);
62 
63 	KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
64 							   3 * ps, ps, &allocated,
65 							   DRM_BUDDY_CONTIGUOUS_ALLOCATION),
66 			       "buddy_alloc didn't error size=%d\n", 3 * ps);
67 
68 	drm_buddy_free_list(&mm, &middle);
69 	KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
70 							   3 * ps, ps, &allocated,
71 							   DRM_BUDDY_CONTIGUOUS_ALLOCATION),
72 			       "buddy_alloc didn't error size=%llu\n", 3 * ps);
73 	KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
74 							   2 * ps, ps, &allocated,
75 							   DRM_BUDDY_CONTIGUOUS_ALLOCATION),
76 			       "buddy_alloc didn't error size=%llu\n", 2 * ps);
77 
78 	drm_buddy_free_list(&mm, &right);
79 	KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
80 							   3 * ps, ps, &allocated,
81 							   DRM_BUDDY_CONTIGUOUS_ALLOCATION),
82 			       "buddy_alloc didn't error size=%llu\n", 3 * ps);
83 	/*
84 	 * At this point we should have enough contiguous space for 2 blocks,
85 	 * however they are never buddies (since we freed middle and right) so
86 	 * will require the try_harder logic to find them.
87 	 */
88 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
89 							    2 * ps, ps, &allocated,
90 							    DRM_BUDDY_CONTIGUOUS_ALLOCATION),
91 			       "buddy_alloc hit an error size=%d\n", 2 * ps);
92 
93 	drm_buddy_free_list(&mm, &left);
94 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
95 							    3 * ps, ps, &allocated,
96 							    DRM_BUDDY_CONTIGUOUS_ALLOCATION),
97 			       "buddy_alloc hit an error size=%d\n", 3 * ps);
98 
99 	total = 0;
100 	list_for_each_entry(block, &allocated, link)
101 		total += drm_buddy_block_size(&mm, block);
102 
103 	KUNIT_ASSERT_EQ(test, total, ps * 2 + ps * 3);
104 
105 	drm_buddy_free_list(&mm, &allocated);
106 	drm_buddy_fini(&mm);
107 }
108 
109 static void drm_test_buddy_alloc_pathological(struct kunit *test)
110 {
111 	u64 mm_size, size, start = 0;
112 	struct drm_buddy_block *block;
113 	const int max_order = 3;
114 	unsigned long flags = 0;
115 	int order, top;
116 	struct drm_buddy mm;
117 	LIST_HEAD(blocks);
118 	LIST_HEAD(holes);
119 	LIST_HEAD(tmp);
120 
121 	/*
122 	 * Create a pot-sized mm, then allocate one of each possible
123 	 * order within. This should leave the mm with exactly one
124 	 * page left. Free the largest block, then whittle down again.
125 	 * Eventually we will have a fully 50% fragmented mm.
126 	 */
127 
128 	mm_size = PAGE_SIZE << max_order;
129 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
130 			       "buddy_init failed\n");
131 
132 	KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
133 
134 	for (top = max_order; top; top--) {
135 		/* Make room by freeing the largest allocated block */
136 		block = list_first_entry_or_null(&blocks, typeof(*block), link);
137 		if (block) {
138 			list_del(&block->link);
139 			drm_buddy_free_block(&mm, block);
140 		}
141 
142 		for (order = top; order--;) {
143 			size = get_size(order, PAGE_SIZE);
144 			KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start,
145 									    mm_size, size, size,
146 										&tmp, flags),
147 					"buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
148 					order, top);
149 
150 			block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
151 			KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
152 
153 			list_move_tail(&block->link, &blocks);
154 		}
155 
156 		/* There should be one final page for this sub-allocation */
157 		size = get_size(0, PAGE_SIZE);
158 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
159 								    size, size, &tmp, flags),
160 							   "buddy_alloc hit -ENOMEM for hole\n");
161 
162 		block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
163 		KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
164 
165 		list_move_tail(&block->link, &holes);
166 
167 		size = get_size(top, PAGE_SIZE);
168 		KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
169 								   size, size, &tmp, flags),
170 							  "buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
171 							  top, max_order);
172 	}
173 
174 	drm_buddy_free_list(&mm, &holes);
175 
176 	/* Nothing larger than blocks of chunk_size now available */
177 	for (order = 1; order <= max_order; order++) {
178 		size = get_size(order, PAGE_SIZE);
179 		KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
180 								   size, size, &tmp, flags),
181 							  "buddy_alloc unexpectedly succeeded at order %d, it should be full!",
182 							  order);
183 	}
184 
185 	list_splice_tail(&holes, &blocks);
186 	drm_buddy_free_list(&mm, &blocks);
187 	drm_buddy_fini(&mm);
188 }
189 
190 static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
191 {
192 	u64 mm_size, size, start = 0;
193 	struct drm_buddy_block *block, *bn;
194 	const unsigned int max_order = 16;
195 	unsigned long flags = 0;
196 	struct drm_buddy mm;
197 	unsigned int order;
198 	LIST_HEAD(blocks);
199 	LIST_HEAD(tmp);
200 
201 	/*
202 	 * Create a pot-sized mm, then allocate one of each possible
203 	 * order within. This should leave the mm with exactly one
204 	 * page left.
205 	 */
206 
207 	mm_size = PAGE_SIZE << max_order;
208 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
209 			       "buddy_init failed\n");
210 
211 	KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
212 
213 	for (order = 0; order < max_order; order++) {
214 		size = get_size(order, PAGE_SIZE);
215 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
216 								    size, size, &tmp, flags),
217 							   "buddy_alloc hit -ENOMEM with order=%d\n",
218 							   order);
219 
220 		block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
221 		KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
222 
223 		list_move_tail(&block->link, &blocks);
224 	}
225 
226 	/* And now the last remaining block available */
227 	size = get_size(0, PAGE_SIZE);
228 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
229 							    size, size, &tmp, flags),
230 						   "buddy_alloc hit -ENOMEM on final alloc\n");
231 
232 	block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
233 	KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
234 
235 	list_move_tail(&block->link, &blocks);
236 
237 	/* Should be completely full! */
238 	for (order = max_order; order--;) {
239 		size = get_size(order, PAGE_SIZE);
240 		KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
241 								   size, size, &tmp, flags),
242 							  "buddy_alloc unexpectedly succeeded, it should be full!");
243 	}
244 
245 	block = list_last_entry(&blocks, typeof(*block), link);
246 	list_del(&block->link);
247 	drm_buddy_free_block(&mm, block);
248 
249 	/* As we free in increasing size, we make available larger blocks */
250 	order = 1;
251 	list_for_each_entry_safe(block, bn, &blocks, link) {
252 		list_del(&block->link);
253 		drm_buddy_free_block(&mm, block);
254 
255 		size = get_size(order, PAGE_SIZE);
256 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
257 								    size, size, &tmp, flags),
258 							   "buddy_alloc hit -ENOMEM with order=%d\n",
259 							   order);
260 
261 		block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
262 		KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
263 
264 		list_del(&block->link);
265 		drm_buddy_free_block(&mm, block);
266 		order++;
267 	}
268 
269 	/* To confirm, now the whole mm should be available */
270 	size = get_size(max_order, PAGE_SIZE);
271 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
272 							    size, size, &tmp, flags),
273 						   "buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
274 						   max_order);
275 
276 	block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
277 	KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
278 
279 	list_del(&block->link);
280 	drm_buddy_free_block(&mm, block);
281 	drm_buddy_free_list(&mm, &blocks);
282 	drm_buddy_fini(&mm);
283 }
284 
285 static void drm_test_buddy_alloc_optimistic(struct kunit *test)
286 {
287 	u64 mm_size, size, start = 0;
288 	struct drm_buddy_block *block;
289 	unsigned long flags = 0;
290 	const int max_order = 16;
291 	struct drm_buddy mm;
292 	LIST_HEAD(blocks);
293 	LIST_HEAD(tmp);
294 	int order;
295 
296 	/*
297 	 * Create a mm with one block of each order available, and
298 	 * try to allocate them all.
299 	 */
300 
301 	mm_size = PAGE_SIZE * ((1 << (max_order + 1)) - 1);
302 
303 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
304 			       "buddy_init failed\n");
305 
306 	KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
307 
308 	for (order = 0; order <= max_order; order++) {
309 		size = get_size(order, PAGE_SIZE);
310 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
311 								    size, size, &tmp, flags),
312 							   "buddy_alloc hit -ENOMEM with order=%d\n",
313 							   order);
314 
315 		block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
316 		KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
317 
318 		list_move_tail(&block->link, &blocks);
319 	}
320 
321 	/* Should be completely full! */
322 	size = get_size(0, PAGE_SIZE);
323 	KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
324 							   size, size, &tmp, flags),
325 						  "buddy_alloc unexpectedly succeeded, it should be full!");
326 
327 	drm_buddy_free_list(&mm, &blocks);
328 	drm_buddy_fini(&mm);
329 }
330 
331 static void drm_test_buddy_alloc_limit(struct kunit *test)
332 {
333 	u64 size = U64_MAX, start = 0;
334 	struct drm_buddy_block *block;
335 	unsigned long flags = 0;
336 	LIST_HEAD(allocated);
337 	struct drm_buddy mm;
338 
339 	KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, PAGE_SIZE));
340 
341 	KUNIT_EXPECT_EQ_MSG(test, mm.max_order, DRM_BUDDY_MAX_ORDER,
342 			    "mm.max_order(%d) != %d\n", mm.max_order,
343 						DRM_BUDDY_MAX_ORDER);
344 
345 	size = mm.chunk_size << mm.max_order;
346 	KUNIT_EXPECT_FALSE(test, drm_buddy_alloc_blocks(&mm, start, size, size,
347 							PAGE_SIZE, &allocated, flags));
348 
349 	block = list_first_entry_or_null(&allocated, struct drm_buddy_block, link);
350 	KUNIT_EXPECT_TRUE(test, block);
351 
352 	KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_order(block), mm.max_order,
353 			    "block order(%d) != %d\n",
354 						drm_buddy_block_order(block), mm.max_order);
355 
356 	KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_size(&mm, block),
357 			    BIT_ULL(mm.max_order) * PAGE_SIZE,
358 						"block size(%llu) != %llu\n",
359 						drm_buddy_block_size(&mm, block),
360 						BIT_ULL(mm.max_order) * PAGE_SIZE);
361 
362 	drm_buddy_free_list(&mm, &allocated);
363 	drm_buddy_fini(&mm);
364 }
365 
366 static struct kunit_case drm_buddy_tests[] = {
367 	KUNIT_CASE(drm_test_buddy_alloc_limit),
368 	KUNIT_CASE(drm_test_buddy_alloc_optimistic),
369 	KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
370 	KUNIT_CASE(drm_test_buddy_alloc_pathological),
371 	KUNIT_CASE(drm_test_buddy_alloc_contiguous),
372 	{}
373 };
374 
375 static struct kunit_suite drm_buddy_test_suite = {
376 	.name = "drm_buddy",
377 	.test_cases = drm_buddy_tests,
378 };
379 
380 kunit_test_suite(drm_buddy_test_suite);
381 
382 MODULE_AUTHOR("Intel Corporation");
383 MODULE_LICENSE("GPL");
384