xref: /linux/drivers/gpu/drm/tests/drm_buddy_test.c (revision c435bce6af9b2a277662698875a689c389358f17)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  * Copyright © 2022 Maíra Canal <mairacanal@riseup.net>
5  */
6 
7 #include <kunit/test.h>
8 
9 #include <linux/prime_numbers.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sizes.h>
12 
13 #include <drm/drm_buddy.h>
14 
15 #include "../lib/drm_random.h"
16 
17 static inline u64 get_size(int order, u64 chunk_size)
18 {
19 	return (1 << order) * chunk_size;
20 }
21 
22 static void drm_test_buddy_alloc_contiguous(struct kunit *test)
23 {
24 	const unsigned long ps = SZ_4K, mm_size = 16 * 3 * SZ_4K;
25 	unsigned long i, n_pages, total;
26 	struct drm_buddy_block *block;
27 	struct drm_buddy mm;
28 	LIST_HEAD(left);
29 	LIST_HEAD(middle);
30 	LIST_HEAD(right);
31 	LIST_HEAD(allocated);
32 
33 	KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
34 
35 	/*
36 	 * Idea is to fragment the address space by alternating block
37 	 * allocations between three different lists; one for left, middle and
38 	 * right. We can then free a list to simulate fragmentation. In
39 	 * particular we want to exercise the DRM_BUDDY_CONTIGUOUS_ALLOCATION,
40 	 * including the try_harder path.
41 	 */
42 
43 	i = 0;
44 	n_pages = mm_size / ps;
45 	do {
46 		struct list_head *list;
47 		int slot = i % 3;
48 
49 		if (slot == 0)
50 			list = &left;
51 		else if (slot == 1)
52 			list = &middle;
53 		else
54 			list = &right;
55 		KUNIT_ASSERT_FALSE_MSG(test,
56 				       drm_buddy_alloc_blocks(&mm, 0, mm_size,
57 							      ps, ps, list, 0),
58 				       "buddy_alloc hit an error size=%u\n",
59 				       ps);
60 	} while (++i < n_pages);
61 
62 	KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
63 							   3 * ps, ps, &allocated,
64 							   DRM_BUDDY_CONTIGUOUS_ALLOCATION),
65 			       "buddy_alloc didn't error size=%u\n", 3 * ps);
66 
67 	drm_buddy_free_list(&mm, &middle);
68 	KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
69 							   3 * ps, ps, &allocated,
70 							   DRM_BUDDY_CONTIGUOUS_ALLOCATION),
71 			       "buddy_alloc didn't error size=%u\n", 3 * ps);
72 	KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
73 							   2 * ps, ps, &allocated,
74 							   DRM_BUDDY_CONTIGUOUS_ALLOCATION),
75 			       "buddy_alloc didn't error size=%u\n", 2 * ps);
76 
77 	drm_buddy_free_list(&mm, &right);
78 	KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
79 							   3 * ps, ps, &allocated,
80 							   DRM_BUDDY_CONTIGUOUS_ALLOCATION),
81 			       "buddy_alloc didn't error size=%u\n", 3 * ps);
82 	/*
83 	 * At this point we should have enough contiguous space for 2 blocks,
84 	 * however they are never buddies (since we freed middle and right) so
85 	 * will require the try_harder logic to find them.
86 	 */
87 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
88 							    2 * ps, ps, &allocated,
89 							    DRM_BUDDY_CONTIGUOUS_ALLOCATION),
90 			       "buddy_alloc hit an error size=%u\n", 2 * ps);
91 
92 	drm_buddy_free_list(&mm, &left);
93 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
94 							    3 * ps, ps, &allocated,
95 							    DRM_BUDDY_CONTIGUOUS_ALLOCATION),
96 			       "buddy_alloc hit an error size=%u\n", 3 * ps);
97 
98 	total = 0;
99 	list_for_each_entry(block, &allocated, link)
100 		total += drm_buddy_block_size(&mm, block);
101 
102 	KUNIT_ASSERT_EQ(test, total, ps * 2 + ps * 3);
103 
104 	drm_buddy_free_list(&mm, &allocated);
105 	drm_buddy_fini(&mm);
106 }
107 
108 static void drm_test_buddy_alloc_pathological(struct kunit *test)
109 {
110 	u64 mm_size, size, start = 0;
111 	struct drm_buddy_block *block;
112 	const int max_order = 3;
113 	unsigned long flags = 0;
114 	int order, top;
115 	struct drm_buddy mm;
116 	LIST_HEAD(blocks);
117 	LIST_HEAD(holes);
118 	LIST_HEAD(tmp);
119 
120 	/*
121 	 * Create a pot-sized mm, then allocate one of each possible
122 	 * order within. This should leave the mm with exactly one
123 	 * page left. Free the largest block, then whittle down again.
124 	 * Eventually we will have a fully 50% fragmented mm.
125 	 */
126 
127 	mm_size = PAGE_SIZE << max_order;
128 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
129 			       "buddy_init failed\n");
130 
131 	KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
132 
133 	for (top = max_order; top; top--) {
134 		/* Make room by freeing the largest allocated block */
135 		block = list_first_entry_or_null(&blocks, typeof(*block), link);
136 		if (block) {
137 			list_del(&block->link);
138 			drm_buddy_free_block(&mm, block);
139 		}
140 
141 		for (order = top; order--;) {
142 			size = get_size(order, PAGE_SIZE);
143 			KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start,
144 									    mm_size, size, size,
145 										&tmp, flags),
146 					"buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
147 					order, top);
148 
149 			block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
150 			KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
151 
152 			list_move_tail(&block->link, &blocks);
153 		}
154 
155 		/* There should be one final page for this sub-allocation */
156 		size = get_size(0, PAGE_SIZE);
157 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
158 								    size, size, &tmp, flags),
159 							   "buddy_alloc hit -ENOMEM for hole\n");
160 
161 		block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
162 		KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
163 
164 		list_move_tail(&block->link, &holes);
165 
166 		size = get_size(top, PAGE_SIZE);
167 		KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
168 								   size, size, &tmp, flags),
169 							  "buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
170 							  top, max_order);
171 	}
172 
173 	drm_buddy_free_list(&mm, &holes);
174 
175 	/* Nothing larger than blocks of chunk_size now available */
176 	for (order = 1; order <= max_order; order++) {
177 		size = get_size(order, PAGE_SIZE);
178 		KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
179 								   size, size, &tmp, flags),
180 							  "buddy_alloc unexpectedly succeeded at order %d, it should be full!",
181 							  order);
182 	}
183 
184 	list_splice_tail(&holes, &blocks);
185 	drm_buddy_free_list(&mm, &blocks);
186 	drm_buddy_fini(&mm);
187 }
188 
189 static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
190 {
191 	u64 mm_size, size, start = 0;
192 	struct drm_buddy_block *block, *bn;
193 	const unsigned int max_order = 16;
194 	unsigned long flags = 0;
195 	struct drm_buddy mm;
196 	unsigned int order;
197 	LIST_HEAD(blocks);
198 	LIST_HEAD(tmp);
199 
200 	/*
201 	 * Create a pot-sized mm, then allocate one of each possible
202 	 * order within. This should leave the mm with exactly one
203 	 * page left.
204 	 */
205 
206 	mm_size = PAGE_SIZE << max_order;
207 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
208 			       "buddy_init failed\n");
209 
210 	KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
211 
212 	for (order = 0; order < max_order; order++) {
213 		size = get_size(order, PAGE_SIZE);
214 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
215 								    size, size, &tmp, flags),
216 							   "buddy_alloc hit -ENOMEM with order=%d\n",
217 							   order);
218 
219 		block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
220 		KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
221 
222 		list_move_tail(&block->link, &blocks);
223 	}
224 
225 	/* And now the last remaining block available */
226 	size = get_size(0, PAGE_SIZE);
227 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
228 							    size, size, &tmp, flags),
229 						   "buddy_alloc hit -ENOMEM on final alloc\n");
230 
231 	block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
232 	KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
233 
234 	list_move_tail(&block->link, &blocks);
235 
236 	/* Should be completely full! */
237 	for (order = max_order; order--;) {
238 		size = get_size(order, PAGE_SIZE);
239 		KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
240 								   size, size, &tmp, flags),
241 							  "buddy_alloc unexpectedly succeeded, it should be full!");
242 	}
243 
244 	block = list_last_entry(&blocks, typeof(*block), link);
245 	list_del(&block->link);
246 	drm_buddy_free_block(&mm, block);
247 
248 	/* As we free in increasing size, we make available larger blocks */
249 	order = 1;
250 	list_for_each_entry_safe(block, bn, &blocks, link) {
251 		list_del(&block->link);
252 		drm_buddy_free_block(&mm, block);
253 
254 		size = get_size(order, PAGE_SIZE);
255 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
256 								    size, size, &tmp, flags),
257 							   "buddy_alloc hit -ENOMEM with order=%d\n",
258 							   order);
259 
260 		block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
261 		KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
262 
263 		list_del(&block->link);
264 		drm_buddy_free_block(&mm, block);
265 		order++;
266 	}
267 
268 	/* To confirm, now the whole mm should be available */
269 	size = get_size(max_order, PAGE_SIZE);
270 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
271 							    size, size, &tmp, flags),
272 						   "buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
273 						   max_order);
274 
275 	block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
276 	KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
277 
278 	list_del(&block->link);
279 	drm_buddy_free_block(&mm, block);
280 	drm_buddy_free_list(&mm, &blocks);
281 	drm_buddy_fini(&mm);
282 }
283 
284 static void drm_test_buddy_alloc_optimistic(struct kunit *test)
285 {
286 	u64 mm_size, size, start = 0;
287 	struct drm_buddy_block *block;
288 	unsigned long flags = 0;
289 	const int max_order = 16;
290 	struct drm_buddy mm;
291 	LIST_HEAD(blocks);
292 	LIST_HEAD(tmp);
293 	int order;
294 
295 	/*
296 	 * Create a mm with one block of each order available, and
297 	 * try to allocate them all.
298 	 */
299 
300 	mm_size = PAGE_SIZE * ((1 << (max_order + 1)) - 1);
301 
302 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
303 			       "buddy_init failed\n");
304 
305 	KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
306 
307 	for (order = 0; order <= max_order; order++) {
308 		size = get_size(order, PAGE_SIZE);
309 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
310 								    size, size, &tmp, flags),
311 							   "buddy_alloc hit -ENOMEM with order=%d\n",
312 							   order);
313 
314 		block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
315 		KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
316 
317 		list_move_tail(&block->link, &blocks);
318 	}
319 
320 	/* Should be completely full! */
321 	size = get_size(0, PAGE_SIZE);
322 	KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
323 							   size, size, &tmp, flags),
324 						  "buddy_alloc unexpectedly succeeded, it should be full!");
325 
326 	drm_buddy_free_list(&mm, &blocks);
327 	drm_buddy_fini(&mm);
328 }
329 
330 static void drm_test_buddy_alloc_limit(struct kunit *test)
331 {
332 	u64 size = U64_MAX, start = 0;
333 	struct drm_buddy_block *block;
334 	unsigned long flags = 0;
335 	LIST_HEAD(allocated);
336 	struct drm_buddy mm;
337 
338 	KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, PAGE_SIZE));
339 
340 	KUNIT_EXPECT_EQ_MSG(test, mm.max_order, DRM_BUDDY_MAX_ORDER,
341 			    "mm.max_order(%d) != %d\n", mm.max_order,
342 						DRM_BUDDY_MAX_ORDER);
343 
344 	size = mm.chunk_size << mm.max_order;
345 	KUNIT_EXPECT_FALSE(test, drm_buddy_alloc_blocks(&mm, start, size, size,
346 							PAGE_SIZE, &allocated, flags));
347 
348 	block = list_first_entry_or_null(&allocated, struct drm_buddy_block, link);
349 	KUNIT_EXPECT_TRUE(test, block);
350 
351 	KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_order(block), mm.max_order,
352 			    "block order(%d) != %d\n",
353 						drm_buddy_block_order(block), mm.max_order);
354 
355 	KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_size(&mm, block),
356 			    BIT_ULL(mm.max_order) * PAGE_SIZE,
357 						"block size(%llu) != %llu\n",
358 						drm_buddy_block_size(&mm, block),
359 						BIT_ULL(mm.max_order) * PAGE_SIZE);
360 
361 	drm_buddy_free_list(&mm, &allocated);
362 	drm_buddy_fini(&mm);
363 }
364 
365 static struct kunit_case drm_buddy_tests[] = {
366 	KUNIT_CASE(drm_test_buddy_alloc_limit),
367 	KUNIT_CASE(drm_test_buddy_alloc_optimistic),
368 	KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
369 	KUNIT_CASE(drm_test_buddy_alloc_pathological),
370 	KUNIT_CASE(drm_test_buddy_alloc_contiguous),
371 	{}
372 };
373 
374 static struct kunit_suite drm_buddy_test_suite = {
375 	.name = "drm_buddy",
376 	.test_cases = drm_buddy_tests,
377 };
378 
379 kunit_test_suite(drm_buddy_test_suite);
380 
381 MODULE_AUTHOR("Intel Corporation");
382 MODULE_LICENSE("GPL");
383