xref: /linux/drivers/gpu/drm/tests/drm_buddy_test.c (revision 6c7353836a91b1479e6b81791cdc163fb04b4834)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  * Copyright © 2022 Maíra Canal <mairacanal@riseup.net>
5  */
6 
7 #include <kunit/test.h>
8 
9 #include <linux/prime_numbers.h>
10 #include <linux/sched/signal.h>
11 
12 #include <drm/drm_buddy.h>
13 
14 #include "../lib/drm_random.h"
15 
16 static inline u64 get_size(int order, u64 chunk_size)
17 {
18 	return (1 << order) * chunk_size;
19 }
20 
21 static void drm_test_buddy_alloc_pathological(struct kunit *test)
22 {
23 	u64 mm_size, size, start = 0;
24 	struct drm_buddy_block *block;
25 	const int max_order = 3;
26 	unsigned long flags = 0;
27 	int order, top;
28 	struct drm_buddy mm;
29 	LIST_HEAD(blocks);
30 	LIST_HEAD(holes);
31 	LIST_HEAD(tmp);
32 
33 	/*
34 	 * Create a pot-sized mm, then allocate one of each possible
35 	 * order within. This should leave the mm with exactly one
36 	 * page left. Free the largest block, then whittle down again.
37 	 * Eventually we will have a fully 50% fragmented mm.
38 	 */
39 
40 	mm_size = PAGE_SIZE << max_order;
41 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
42 			       "buddy_init failed\n");
43 
44 	KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
45 
46 	for (top = max_order; top; top--) {
47 		/* Make room by freeing the largest allocated block */
48 		block = list_first_entry_or_null(&blocks, typeof(*block), link);
49 		if (block) {
50 			list_del(&block->link);
51 			drm_buddy_free_block(&mm, block);
52 		}
53 
54 		for (order = top; order--;) {
55 			size = get_size(order, PAGE_SIZE);
56 			KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start,
57 									    mm_size, size, size,
58 										&tmp, flags),
59 					"buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
60 					order, top);
61 
62 			block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
63 			KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
64 
65 			list_move_tail(&block->link, &blocks);
66 		}
67 
68 		/* There should be one final page for this sub-allocation */
69 		size = get_size(0, PAGE_SIZE);
70 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
71 								    size, size, &tmp, flags),
72 							   "buddy_alloc hit -ENOMEM for hole\n");
73 
74 		block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
75 		KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
76 
77 		list_move_tail(&block->link, &holes);
78 
79 		size = get_size(top, PAGE_SIZE);
80 		KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
81 								   size, size, &tmp, flags),
82 							  "buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
83 							  top, max_order);
84 	}
85 
86 	drm_buddy_free_list(&mm, &holes);
87 
88 	/* Nothing larger than blocks of chunk_size now available */
89 	for (order = 1; order <= max_order; order++) {
90 		size = get_size(order, PAGE_SIZE);
91 		KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
92 								   size, size, &tmp, flags),
93 							  "buddy_alloc unexpectedly succeeded at order %d, it should be full!",
94 							  order);
95 	}
96 
97 	list_splice_tail(&holes, &blocks);
98 	drm_buddy_free_list(&mm, &blocks);
99 	drm_buddy_fini(&mm);
100 }
101 
102 static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
103 {
104 	u64 mm_size, size, start = 0;
105 	struct drm_buddy_block *block, *bn;
106 	const unsigned int max_order = 16;
107 	unsigned long flags = 0;
108 	struct drm_buddy mm;
109 	unsigned int order;
110 	LIST_HEAD(blocks);
111 	LIST_HEAD(tmp);
112 
113 	/*
114 	 * Create a pot-sized mm, then allocate one of each possible
115 	 * order within. This should leave the mm with exactly one
116 	 * page left.
117 	 */
118 
119 	mm_size = PAGE_SIZE << max_order;
120 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
121 			       "buddy_init failed\n");
122 
123 	KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
124 
125 	for (order = 0; order < max_order; order++) {
126 		size = get_size(order, PAGE_SIZE);
127 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
128 								    size, size, &tmp, flags),
129 							   "buddy_alloc hit -ENOMEM with order=%d\n",
130 							   order);
131 
132 		block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
133 		KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
134 
135 		list_move_tail(&block->link, &blocks);
136 	}
137 
138 	/* And now the last remaining block available */
139 	size = get_size(0, PAGE_SIZE);
140 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
141 							    size, size, &tmp, flags),
142 						   "buddy_alloc hit -ENOMEM on final alloc\n");
143 
144 	block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
145 	KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
146 
147 	list_move_tail(&block->link, &blocks);
148 
149 	/* Should be completely full! */
150 	for (order = max_order; order--;) {
151 		size = get_size(order, PAGE_SIZE);
152 		KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
153 								   size, size, &tmp, flags),
154 							  "buddy_alloc unexpectedly succeeded, it should be full!");
155 	}
156 
157 	block = list_last_entry(&blocks, typeof(*block), link);
158 	list_del(&block->link);
159 	drm_buddy_free_block(&mm, block);
160 
161 	/* As we free in increasing size, we make available larger blocks */
162 	order = 1;
163 	list_for_each_entry_safe(block, bn, &blocks, link) {
164 		list_del(&block->link);
165 		drm_buddy_free_block(&mm, block);
166 
167 		size = get_size(order, PAGE_SIZE);
168 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
169 								    size, size, &tmp, flags),
170 							   "buddy_alloc hit -ENOMEM with order=%d\n",
171 							   order);
172 
173 		block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
174 		KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
175 
176 		list_del(&block->link);
177 		drm_buddy_free_block(&mm, block);
178 		order++;
179 	}
180 
181 	/* To confirm, now the whole mm should be available */
182 	size = get_size(max_order, PAGE_SIZE);
183 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
184 							    size, size, &tmp, flags),
185 						   "buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
186 						   max_order);
187 
188 	block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
189 	KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
190 
191 	list_del(&block->link);
192 	drm_buddy_free_block(&mm, block);
193 	drm_buddy_free_list(&mm, &blocks);
194 	drm_buddy_fini(&mm);
195 }
196 
197 static void drm_test_buddy_alloc_optimistic(struct kunit *test)
198 {
199 	u64 mm_size, size, start = 0;
200 	struct drm_buddy_block *block;
201 	unsigned long flags = 0;
202 	const int max_order = 16;
203 	struct drm_buddy mm;
204 	LIST_HEAD(blocks);
205 	LIST_HEAD(tmp);
206 	int order;
207 
208 	/*
209 	 * Create a mm with one block of each order available, and
210 	 * try to allocate them all.
211 	 */
212 
213 	mm_size = PAGE_SIZE * ((1 << (max_order + 1)) - 1);
214 
215 	KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
216 			       "buddy_init failed\n");
217 
218 	KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
219 
220 	for (order = 0; order <= max_order; order++) {
221 		size = get_size(order, PAGE_SIZE);
222 		KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
223 								    size, size, &tmp, flags),
224 							   "buddy_alloc hit -ENOMEM with order=%d\n",
225 							   order);
226 
227 		block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
228 		KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
229 
230 		list_move_tail(&block->link, &blocks);
231 	}
232 
233 	/* Should be completely full! */
234 	size = get_size(0, PAGE_SIZE);
235 	KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
236 							   size, size, &tmp, flags),
237 						  "buddy_alloc unexpectedly succeeded, it should be full!");
238 
239 	drm_buddy_free_list(&mm, &blocks);
240 	drm_buddy_fini(&mm);
241 }
242 
243 static void drm_test_buddy_alloc_limit(struct kunit *test)
244 {
245 	u64 size = U64_MAX, start = 0;
246 	struct drm_buddy_block *block;
247 	unsigned long flags = 0;
248 	LIST_HEAD(allocated);
249 	struct drm_buddy mm;
250 
251 	KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, PAGE_SIZE));
252 
253 	KUNIT_EXPECT_EQ_MSG(test, mm.max_order, DRM_BUDDY_MAX_ORDER,
254 			    "mm.max_order(%d) != %d\n", mm.max_order,
255 						DRM_BUDDY_MAX_ORDER);
256 
257 	size = mm.chunk_size << mm.max_order;
258 	KUNIT_EXPECT_FALSE(test, drm_buddy_alloc_blocks(&mm, start, size, size,
259 							PAGE_SIZE, &allocated, flags));
260 
261 	block = list_first_entry_or_null(&allocated, struct drm_buddy_block, link);
262 	KUNIT_EXPECT_TRUE(test, block);
263 
264 	KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_order(block), mm.max_order,
265 			    "block order(%d) != %d\n",
266 						drm_buddy_block_order(block), mm.max_order);
267 
268 	KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_size(&mm, block),
269 			    BIT_ULL(mm.max_order) * PAGE_SIZE,
270 						"block size(%llu) != %llu\n",
271 						drm_buddy_block_size(&mm, block),
272 						BIT_ULL(mm.max_order) * PAGE_SIZE);
273 
274 	drm_buddy_free_list(&mm, &allocated);
275 	drm_buddy_fini(&mm);
276 }
277 
278 static struct kunit_case drm_buddy_tests[] = {
279 	KUNIT_CASE(drm_test_buddy_alloc_limit),
280 	KUNIT_CASE(drm_test_buddy_alloc_optimistic),
281 	KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
282 	KUNIT_CASE(drm_test_buddy_alloc_pathological),
283 	{}
284 };
285 
286 static struct kunit_suite drm_buddy_test_suite = {
287 	.name = "drm_buddy",
288 	.test_cases = drm_buddy_tests,
289 };
290 
291 kunit_test_suite(drm_buddy_test_suite);
292 
293 MODULE_AUTHOR("Intel Corporation");
294 MODULE_LICENSE("GPL");
295