xref: /linux/drivers/gpu/drm/ttm/tests/ttm_pool_test.c (revision d53adc244fbf965d7efeefb278ff8f2664bbe20e)
1 // SPDX-License-Identifier: GPL-2.0 AND MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 #include <linux/mm.h>
6 
7 #include <drm/ttm/ttm_tt.h>
8 #include <drm/ttm/ttm_pool.h>
9 
10 #include "ttm_kunit_helpers.h"
11 #include "../ttm_pool_internal.h"
12 
13 struct ttm_pool_test_case {
14 	const char *description;
15 	unsigned int order;
16 	bool use_dma_alloc;
17 };
18 
19 struct ttm_pool_test_priv {
20 	struct ttm_test_devices *devs;
21 
22 	/* Used to create mock ttm_tts */
23 	struct ttm_buffer_object *mock_bo;
24 };
25 
26 static struct ttm_operation_ctx simple_ctx = {
27 	.interruptible = true,
28 	.no_wait_gpu = false,
29 };
30 
31 static int ttm_pool_test_init(struct kunit *test)
32 {
33 	struct ttm_pool_test_priv *priv;
34 
35 	priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
36 	KUNIT_ASSERT_NOT_NULL(test, priv);
37 
38 	priv->devs = ttm_test_devices_basic(test);
39 	test->priv = priv;
40 
41 	return 0;
42 }
43 
44 static void ttm_pool_test_fini(struct kunit *test)
45 {
46 	struct ttm_pool_test_priv *priv = test->priv;
47 
48 	ttm_test_devices_put(test, priv->devs);
49 }
50 
51 static struct ttm_tt *ttm_tt_kunit_init(struct kunit *test,
52 					u32 page_flags,
53 					enum ttm_caching caching,
54 					size_t size)
55 {
56 	struct ttm_pool_test_priv *priv = test->priv;
57 	struct ttm_buffer_object *bo;
58 	struct ttm_tt *tt;
59 	int err;
60 
61 	bo = ttm_bo_kunit_init(test, priv->devs, size, NULL);
62 	KUNIT_ASSERT_NOT_NULL(test, bo);
63 	priv->mock_bo = bo;
64 
65 	tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
66 	KUNIT_ASSERT_NOT_NULL(test, tt);
67 
68 	err = ttm_tt_init(tt, priv->mock_bo, page_flags, caching, 0);
69 	KUNIT_ASSERT_EQ(test, err, 0);
70 
71 	return tt;
72 }
73 
74 static struct ttm_pool *ttm_pool_pre_populated(struct kunit *test,
75 					       size_t size,
76 					       enum ttm_caching caching)
77 {
78 	struct ttm_pool_test_priv *priv = test->priv;
79 	struct ttm_test_devices *devs = priv->devs;
80 	struct ttm_pool *pool;
81 	struct ttm_tt *tt;
82 	int err;
83 
84 	tt = ttm_tt_kunit_init(test, 0, caching, size);
85 	KUNIT_ASSERT_NOT_NULL(test, tt);
86 
87 	pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
88 	KUNIT_ASSERT_NOT_NULL(test, pool);
89 
90 	ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
91 
92 	err = ttm_pool_alloc(pool, tt, &simple_ctx);
93 	KUNIT_ASSERT_EQ(test, err, 0);
94 
95 	ttm_pool_free(pool, tt);
96 	ttm_tt_fini(tt);
97 
98 	return pool;
99 }
100 
101 static const struct ttm_pool_test_case ttm_pool_basic_cases[] = {
102 	{
103 		.description = "One page",
104 		.order = 0,
105 	},
106 	{
107 		.description = "More than one page",
108 		.order = 2,
109 	},
110 	{
111 		.description = "Above the allocation limit",
112 		.order = MAX_PAGE_ORDER + 1,
113 	},
114 	{
115 		.description = "One page, with coherent DMA mappings enabled",
116 		.order = 0,
117 		.use_dma_alloc = true,
118 	},
119 	{
120 		.description = "Above the allocation limit, with coherent DMA mappings enabled",
121 		.order = MAX_PAGE_ORDER + 1,
122 		.use_dma_alloc = true,
123 	},
124 };
125 
126 static void ttm_pool_alloc_case_desc(const struct ttm_pool_test_case *t,
127 				     char *desc)
128 {
129 	strscpy(desc, t->description, KUNIT_PARAM_DESC_SIZE);
130 }
131 
132 KUNIT_ARRAY_PARAM(ttm_pool_alloc_basic, ttm_pool_basic_cases,
133 		  ttm_pool_alloc_case_desc);
134 
135 static void ttm_pool_alloc_basic(struct kunit *test)
136 {
137 	struct ttm_pool_test_priv *priv = test->priv;
138 	struct ttm_test_devices *devs = priv->devs;
139 	const struct ttm_pool_test_case *params = test->param_value;
140 	struct ttm_tt *tt;
141 	struct ttm_pool *pool;
142 	struct page *fst_page, *last_page;
143 	enum ttm_caching caching = ttm_uncached;
144 	unsigned int expected_num_pages = 1 << params->order;
145 	size_t size = expected_num_pages * PAGE_SIZE;
146 	int err;
147 
148 	tt = ttm_tt_kunit_init(test, 0, caching, size);
149 	KUNIT_ASSERT_NOT_NULL(test, tt);
150 
151 	pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
152 	KUNIT_ASSERT_NOT_NULL(test, pool);
153 
154 	ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, params->use_dma_alloc,
155 		      false);
156 
157 	KUNIT_ASSERT_PTR_EQ(test, pool->dev, devs->dev);
158 	KUNIT_ASSERT_EQ(test, pool->nid, NUMA_NO_NODE);
159 	KUNIT_ASSERT_EQ(test, ttm_pool_uses_dma_alloc(pool),
160 			params->use_dma_alloc);
161 
162 	err = ttm_pool_alloc(pool, tt, &simple_ctx);
163 	KUNIT_ASSERT_EQ(test, err, 0);
164 	KUNIT_ASSERT_EQ(test, tt->num_pages, expected_num_pages);
165 
166 	fst_page = tt->pages[0];
167 	last_page = tt->pages[tt->num_pages - 1];
168 
169 	if (params->order <= MAX_PAGE_ORDER) {
170 		if (params->use_dma_alloc) {
171 			KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
172 			KUNIT_ASSERT_NOT_NULL(test, (void *)last_page->private);
173 		} else {
174 			KUNIT_ASSERT_EQ(test, fst_page->private, params->order);
175 		}
176 	} else {
177 		if (params->use_dma_alloc) {
178 			KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
179 			KUNIT_ASSERT_NULL(test, (void *)last_page->private);
180 		} else {
181 			/*
182 			 * We expect to alloc one big block, followed by
183 			 * order 0 blocks
184 			 */
185 			KUNIT_ASSERT_EQ(test, fst_page->private,
186 					min_t(unsigned int, MAX_PAGE_ORDER,
187 					      params->order));
188 			KUNIT_ASSERT_EQ(test, last_page->private, 0);
189 		}
190 	}
191 
192 	ttm_pool_free(pool, tt);
193 	ttm_tt_fini(tt);
194 	ttm_pool_fini(pool);
195 }
196 
197 static void ttm_pool_alloc_basic_dma_addr(struct kunit *test)
198 {
199 	struct ttm_pool_test_priv *priv = test->priv;
200 	struct ttm_test_devices *devs = priv->devs;
201 	const struct ttm_pool_test_case *params = test->param_value;
202 	struct ttm_tt *tt;
203 	struct ttm_pool *pool;
204 	struct ttm_buffer_object *bo;
205 	dma_addr_t dma1, dma2;
206 	enum ttm_caching caching = ttm_uncached;
207 	unsigned int expected_num_pages = 1 << params->order;
208 	size_t size = expected_num_pages * PAGE_SIZE;
209 	int err;
210 
211 	tt = kunit_kzalloc(test, sizeof(*tt), GFP_KERNEL);
212 	KUNIT_ASSERT_NOT_NULL(test, tt);
213 
214 	bo = ttm_bo_kunit_init(test, devs, size, NULL);
215 	KUNIT_ASSERT_NOT_NULL(test, bo);
216 
217 	err = ttm_sg_tt_init(tt, bo, 0, caching);
218 	KUNIT_ASSERT_EQ(test, err, 0);
219 
220 	pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
221 	KUNIT_ASSERT_NOT_NULL(test, pool);
222 
223 	ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
224 
225 	err = ttm_pool_alloc(pool, tt, &simple_ctx);
226 	KUNIT_ASSERT_EQ(test, err, 0);
227 	KUNIT_ASSERT_EQ(test, tt->num_pages, expected_num_pages);
228 
229 	dma1 = tt->dma_address[0];
230 	dma2 = tt->dma_address[tt->num_pages - 1];
231 
232 	KUNIT_ASSERT_NOT_NULL(test, (void *)(uintptr_t)dma1);
233 	KUNIT_ASSERT_NOT_NULL(test, (void *)(uintptr_t)dma2);
234 
235 	ttm_pool_free(pool, tt);
236 	ttm_tt_fini(tt);
237 	ttm_pool_fini(pool);
238 }
239 
240 static void ttm_pool_alloc_order_caching_match(struct kunit *test)
241 {
242 	struct ttm_tt *tt;
243 	struct ttm_pool *pool;
244 	struct ttm_pool_type *pt;
245 	enum ttm_caching caching = ttm_uncached;
246 	unsigned int order = 0;
247 	size_t size = PAGE_SIZE;
248 	int err;
249 
250 	pool = ttm_pool_pre_populated(test, size, caching);
251 
252 	pt = &pool->caching[caching].orders[order];
253 	KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
254 
255 	tt = ttm_tt_kunit_init(test, 0, caching, size);
256 	KUNIT_ASSERT_NOT_NULL(test, tt);
257 
258 	err = ttm_pool_alloc(pool, tt, &simple_ctx);
259 	KUNIT_ASSERT_EQ(test, err, 0);
260 
261 	KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
262 
263 	ttm_pool_free(pool, tt);
264 	ttm_tt_fini(tt);
265 	ttm_pool_fini(pool);
266 }
267 
268 static void ttm_pool_alloc_caching_mismatch(struct kunit *test)
269 {
270 	struct ttm_tt *tt;
271 	struct ttm_pool *pool;
272 	struct ttm_pool_type *pt_pool, *pt_tt;
273 	enum ttm_caching tt_caching = ttm_uncached;
274 	enum ttm_caching pool_caching = ttm_cached;
275 	size_t size = PAGE_SIZE;
276 	unsigned int order = 0;
277 	int err;
278 
279 	pool = ttm_pool_pre_populated(test, size, pool_caching);
280 
281 	pt_pool = &pool->caching[pool_caching].orders[order];
282 	pt_tt = &pool->caching[tt_caching].orders[order];
283 
284 	tt = ttm_tt_kunit_init(test, 0, tt_caching, size);
285 	KUNIT_ASSERT_NOT_NULL(test, tt);
286 
287 	KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
288 	KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
289 
290 	err = ttm_pool_alloc(pool, tt, &simple_ctx);
291 	KUNIT_ASSERT_EQ(test, err, 0);
292 
293 	ttm_pool_free(pool, tt);
294 	ttm_tt_fini(tt);
295 
296 	KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
297 	KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
298 
299 	ttm_pool_fini(pool);
300 }
301 
302 static void ttm_pool_alloc_order_mismatch(struct kunit *test)
303 {
304 	struct ttm_tt *tt;
305 	struct ttm_pool *pool;
306 	struct ttm_pool_type *pt_pool, *pt_tt;
307 	enum ttm_caching caching = ttm_uncached;
308 	unsigned int order = 2;
309 	size_t fst_size = (1 << order) * PAGE_SIZE;
310 	size_t snd_size = PAGE_SIZE;
311 	int err;
312 
313 	pool = ttm_pool_pre_populated(test, fst_size, caching);
314 
315 	pt_pool = &pool->caching[caching].orders[order];
316 	pt_tt = &pool->caching[caching].orders[0];
317 
318 	tt = ttm_tt_kunit_init(test, 0, caching, snd_size);
319 	KUNIT_ASSERT_NOT_NULL(test, tt);
320 
321 	KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
322 	KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages));
323 
324 	err = ttm_pool_alloc(pool, tt, &simple_ctx);
325 	KUNIT_ASSERT_EQ(test, err, 0);
326 
327 	ttm_pool_free(pool, tt);
328 	ttm_tt_fini(tt);
329 
330 	KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages));
331 	KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages));
332 
333 	ttm_pool_fini(pool);
334 }
335 
336 static void ttm_pool_free_dma_alloc(struct kunit *test)
337 {
338 	struct ttm_pool_test_priv *priv = test->priv;
339 	struct ttm_test_devices *devs = priv->devs;
340 	struct ttm_tt *tt;
341 	struct ttm_pool *pool;
342 	struct ttm_pool_type *pt;
343 	enum ttm_caching caching = ttm_uncached;
344 	unsigned int order = 2;
345 	size_t size = (1 << order) * PAGE_SIZE;
346 
347 	tt = ttm_tt_kunit_init(test, 0, caching, size);
348 	KUNIT_ASSERT_NOT_NULL(test, tt);
349 
350 	pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
351 	KUNIT_ASSERT_NOT_NULL(test, pool);
352 
353 	ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
354 	ttm_pool_alloc(pool, tt, &simple_ctx);
355 
356 	pt = &pool->caching[caching].orders[order];
357 	KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
358 
359 	ttm_pool_free(pool, tt);
360 	ttm_tt_fini(tt);
361 
362 	KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
363 
364 	ttm_pool_fini(pool);
365 }
366 
367 static void ttm_pool_free_no_dma_alloc(struct kunit *test)
368 {
369 	struct ttm_pool_test_priv *priv = test->priv;
370 	struct ttm_test_devices *devs = priv->devs;
371 	struct ttm_tt *tt;
372 	struct ttm_pool *pool;
373 	struct ttm_pool_type *pt;
374 	enum ttm_caching caching = ttm_uncached;
375 	unsigned int order = 2;
376 	size_t size = (1 << order) * PAGE_SIZE;
377 
378 	tt = ttm_tt_kunit_init(test, 0, caching, size);
379 	KUNIT_ASSERT_NOT_NULL(test, tt);
380 
381 	pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
382 	KUNIT_ASSERT_NOT_NULL(test, pool);
383 
384 	ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, false, false);
385 	ttm_pool_alloc(pool, tt, &simple_ctx);
386 
387 	pt = &pool->caching[caching].orders[order];
388 	KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
389 
390 	ttm_pool_free(pool, tt);
391 	ttm_tt_fini(tt);
392 
393 	KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages));
394 
395 	ttm_pool_fini(pool);
396 }
397 
398 static void ttm_pool_fini_basic(struct kunit *test)
399 {
400 	struct ttm_pool *pool;
401 	struct ttm_pool_type *pt;
402 	enum ttm_caching caching = ttm_uncached;
403 	unsigned int order = 0;
404 	size_t size = PAGE_SIZE;
405 
406 	pool = ttm_pool_pre_populated(test, size, caching);
407 	pt = &pool->caching[caching].orders[order];
408 
409 	KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages));
410 
411 	ttm_pool_fini(pool);
412 
413 	KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages));
414 }
415 
416 static struct kunit_case ttm_pool_test_cases[] = {
417 	KUNIT_CASE_PARAM(ttm_pool_alloc_basic, ttm_pool_alloc_basic_gen_params),
418 	KUNIT_CASE_PARAM(ttm_pool_alloc_basic_dma_addr,
419 			 ttm_pool_alloc_basic_gen_params),
420 	KUNIT_CASE(ttm_pool_alloc_order_caching_match),
421 	KUNIT_CASE(ttm_pool_alloc_caching_mismatch),
422 	KUNIT_CASE(ttm_pool_alloc_order_mismatch),
423 	KUNIT_CASE(ttm_pool_free_dma_alloc),
424 	KUNIT_CASE(ttm_pool_free_no_dma_alloc),
425 	KUNIT_CASE(ttm_pool_fini_basic),
426 	{}
427 };
428 
429 static struct kunit_suite ttm_pool_test_suite = {
430 	.name = "ttm_pool",
431 	.init = ttm_pool_test_init,
432 	.exit = ttm_pool_test_fini,
433 	.test_cases = ttm_pool_test_cases,
434 };
435 
436 kunit_test_suites(&ttm_pool_test_suite);
437 
438 MODULE_DESCRIPTION("KUnit tests for ttm_pool APIs");
439 MODULE_LICENSE("GPL and additional rights");
440