xref: /linux/drivers/gpu/drm/tests/drm_gem_shmem_test.c (revision 60cb1da6ed4a62ec8331e25ad4be87115cd28feb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KUnit test suite for GEM objects backed by shmem buffers
4  *
5  * Copyright (C) 2023 Red Hat, Inc.
6  *
7  * Author: Marco Pagani <marpagan@redhat.com>
8  */
9 
10 #include <linux/dma-buf.h>
11 #include <linux/iosys-map.h>
12 #include <linux/sizes.h>
13 
14 #include <kunit/test.h>
15 
16 #include <drm/drm_device.h>
17 #include <drm/drm_drv.h>
18 #include <drm/drm_gem.h>
19 #include <drm/drm_gem_shmem_helper.h>
20 #include <drm/drm_kunit_helpers.h>
21 
22 #define TEST_SIZE		SZ_1M
23 #define TEST_BYTE		0xae
24 
25 /*
26  * Wrappers to avoid an explicit type casting when passing action
27  * functions to kunit_add_action().
28  */
29 static void kfree_wrapper(void *ptr)
30 {
31 	const void *obj = ptr;
32 
33 	kfree(obj);
34 }
35 
36 static void sg_free_table_wrapper(void *ptr)
37 {
38 	struct sg_table *sgt = ptr;
39 
40 	sg_free_table(sgt);
41 }
42 
43 static void drm_gem_shmem_free_wrapper(void *ptr)
44 {
45 	struct drm_gem_shmem_object *shmem = ptr;
46 
47 	drm_gem_shmem_free(shmem);
48 }
49 
50 /*
51  * Test creating a shmem GEM object backed by shmem buffer. The test
52  * case succeeds if the GEM object is successfully allocated with the
53  * shmem file node and object functions attributes set, and the size
54  * attribute is equal to the correct size.
55  */
56 static void drm_gem_shmem_test_obj_create(struct kunit *test)
57 {
58 	struct drm_device *drm_dev = test->priv;
59 	struct drm_gem_shmem_object *shmem;
60 
61 	shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
62 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
63 	KUNIT_EXPECT_EQ(test, shmem->base.size, TEST_SIZE);
64 	KUNIT_EXPECT_NOT_NULL(test, shmem->base.filp);
65 	KUNIT_EXPECT_NOT_NULL(test, shmem->base.funcs);
66 
67 	drm_gem_shmem_free(shmem);
68 }
69 
70 /*
71  * Test creating a shmem GEM object from a scatter/gather table exported
72  * via a DMA-BUF. The test case succeed if the GEM object is successfully
73  * created with the shmem file node attribute equal to NULL and the sgt
74  * attribute pointing to the scatter/gather table that has been imported.
75  */
76 static void drm_gem_shmem_test_obj_create_private(struct kunit *test)
77 {
78 	struct drm_device *drm_dev = test->priv;
79 	struct drm_gem_shmem_object *shmem;
80 	struct drm_gem_object *gem_obj;
81 	struct dma_buf buf_mock;
82 	struct dma_buf_attachment attach_mock;
83 	struct sg_table *sgt;
84 	char *buf;
85 	int ret;
86 
87 	/* Create a mock scatter/gather table */
88 	buf = kunit_kzalloc(test, TEST_SIZE, GFP_KERNEL);
89 	KUNIT_ASSERT_NOT_NULL(test, buf);
90 
91 	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
92 	KUNIT_ASSERT_NOT_NULL(test, sgt);
93 
94 	ret = kunit_add_action_or_reset(test, kfree_wrapper, sgt);
95 	KUNIT_ASSERT_EQ(test, ret, 0);
96 
97 	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
98 	KUNIT_ASSERT_EQ(test, ret, 0);
99 
100 	ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt);
101 	KUNIT_ASSERT_EQ(test, ret, 0);
102 
103 	sg_init_one(sgt->sgl, buf, TEST_SIZE);
104 
105 	/*
106 	 * Set the DMA mask to 64-bits and map the sgtables
107 	 * otherwise drm_gem_shmem_free will cause a warning
108 	 * on debug kernels.
109 	 */
110 	ret = dma_set_mask(drm_dev->dev, DMA_BIT_MASK(64));
111 	KUNIT_ASSERT_EQ(test, ret, 0);
112 
113 	ret = dma_map_sgtable(drm_dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
114 	KUNIT_ASSERT_EQ(test, ret, 0);
115 
116 	/* Init a mock DMA-BUF */
117 	buf_mock.size = TEST_SIZE;
118 	attach_mock.dmabuf = &buf_mock;
119 
120 	gem_obj = drm_gem_shmem_prime_import_sg_table(drm_dev, &attach_mock, sgt);
121 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gem_obj);
122 	KUNIT_EXPECT_EQ(test, gem_obj->size, TEST_SIZE);
123 	KUNIT_EXPECT_NULL(test, gem_obj->filp);
124 	KUNIT_EXPECT_NOT_NULL(test, gem_obj->funcs);
125 
126 	/* The scatter/gather table will be freed by drm_gem_shmem_free */
127 	kunit_remove_action(test, sg_free_table_wrapper, sgt);
128 	kunit_remove_action(test, kfree_wrapper, sgt);
129 
130 	shmem = to_drm_gem_shmem_obj(gem_obj);
131 	KUNIT_EXPECT_PTR_EQ(test, shmem->sgt, sgt);
132 
133 	drm_gem_shmem_free(shmem);
134 }
135 
136 /*
137  * Test pinning backing pages for a shmem GEM object. The test case
138  * succeeds if a suitable number of backing pages are allocated, and
139  * the pages table counter attribute is increased by one.
140  */
141 static void drm_gem_shmem_test_pin_pages(struct kunit *test)
142 {
143 	struct drm_device *drm_dev = test->priv;
144 	struct drm_gem_shmem_object *shmem;
145 	int i, ret;
146 
147 	shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
148 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
149 	KUNIT_EXPECT_NULL(test, shmem->pages);
150 	KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0);
151 
152 	ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
153 	KUNIT_ASSERT_EQ(test, ret, 0);
154 
155 	ret = drm_gem_shmem_pin(shmem);
156 	KUNIT_ASSERT_EQ(test, ret, 0);
157 	KUNIT_ASSERT_NOT_NULL(test, shmem->pages);
158 	KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1);
159 
160 	for (i = 0; i < (shmem->base.size >> PAGE_SHIFT); i++)
161 		KUNIT_ASSERT_NOT_NULL(test, shmem->pages[i]);
162 
163 	drm_gem_shmem_unpin(shmem);
164 	KUNIT_EXPECT_NULL(test, shmem->pages);
165 	KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0);
166 }
167 
168 /*
169  * Test creating a virtual mapping for a shmem GEM object. The test
170  * case succeeds if the backing memory is mapped and the reference
171  * counter for virtual mapping is increased by one. Moreover, the test
172  * case writes and then reads a test pattern over the mapped memory.
173  */
174 static void drm_gem_shmem_test_vmap(struct kunit *test)
175 {
176 	struct drm_device *drm_dev = test->priv;
177 	struct drm_gem_shmem_object *shmem;
178 	struct iosys_map map;
179 	int ret, i;
180 
181 	shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
182 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
183 	KUNIT_EXPECT_NULL(test, shmem->vaddr);
184 	KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0);
185 
186 	ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
187 	KUNIT_ASSERT_EQ(test, ret, 0);
188 
189 	ret = drm_gem_shmem_vmap(shmem, &map);
190 	KUNIT_ASSERT_EQ(test, ret, 0);
191 	KUNIT_ASSERT_NOT_NULL(test, shmem->vaddr);
192 	KUNIT_ASSERT_FALSE(test, iosys_map_is_null(&map));
193 	KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 1);
194 
195 	iosys_map_memset(&map, 0, TEST_BYTE, TEST_SIZE);
196 	for (i = 0; i < TEST_SIZE; i++)
197 		KUNIT_EXPECT_EQ(test, iosys_map_rd(&map, i, u8), TEST_BYTE);
198 
199 	drm_gem_shmem_vunmap(shmem, &map);
200 	KUNIT_EXPECT_NULL(test, shmem->vaddr);
201 	KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0);
202 }
203 
204 /*
205  * Test exporting a scatter/gather table of pinned pages suitable for
206  * PRIME usage from a shmem GEM object. The test case succeeds if a
207  * scatter/gather table large enough to accommodate the backing memory
208  * is successfully exported.
209  */
210 static void drm_gem_shmem_test_get_pages_sgt(struct kunit *test)
211 {
212 	struct drm_device *drm_dev = test->priv;
213 	struct drm_gem_shmem_object *shmem;
214 	struct sg_table *sgt;
215 	struct scatterlist *sg;
216 	unsigned int si, len = 0;
217 	int ret;
218 
219 	shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
220 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
221 
222 	ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
223 	KUNIT_ASSERT_EQ(test, ret, 0);
224 
225 	ret = drm_gem_shmem_pin(shmem);
226 	KUNIT_ASSERT_EQ(test, ret, 0);
227 
228 	sgt = drm_gem_shmem_get_sg_table(shmem);
229 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
230 	KUNIT_EXPECT_NULL(test, shmem->sgt);
231 
232 	ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt);
233 	KUNIT_ASSERT_EQ(test, ret, 0);
234 
235 	for_each_sgtable_sg(sgt, sg, si) {
236 		KUNIT_EXPECT_NOT_NULL(test, sg);
237 		len += sg->length;
238 	}
239 
240 	KUNIT_EXPECT_GE(test, len, TEST_SIZE);
241 }
242 
243 /*
244  * Test pinning pages and exporting a scatter/gather table suitable for
245  * driver usage from a shmem GEM object. The test case succeeds if the
246  * backing pages are pinned and a scatter/gather table large enough to
247  * accommodate the backing memory is successfully exported.
248  */
249 static void drm_gem_shmem_test_get_sg_table(struct kunit *test)
250 {
251 	struct drm_device *drm_dev = test->priv;
252 	struct drm_gem_shmem_object *shmem;
253 	struct sg_table *sgt;
254 	struct scatterlist *sg;
255 	unsigned int si, ret, len = 0;
256 
257 	shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
258 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
259 
260 	ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
261 	KUNIT_ASSERT_EQ(test, ret, 0);
262 
263 	/* The scatter/gather table will be freed by drm_gem_shmem_free */
264 	sgt = drm_gem_shmem_get_pages_sgt(shmem);
265 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
266 	KUNIT_ASSERT_NOT_NULL(test, shmem->pages);
267 	KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1);
268 	KUNIT_EXPECT_PTR_EQ(test, sgt, shmem->sgt);
269 
270 	for_each_sgtable_sg(sgt, sg, si) {
271 		KUNIT_EXPECT_NOT_NULL(test, sg);
272 		len += sg->length;
273 	}
274 
275 	KUNIT_EXPECT_GE(test, len, TEST_SIZE);
276 }
277 
278 /*
279  * Test updating the madvise state of a shmem GEM object. The test
280  * case checks that the function for setting madv updates it only if
281  * its current value is greater or equal than zero and returns false
282  * if it has a negative value.
283  */
284 static void drm_gem_shmem_test_madvise(struct kunit *test)
285 {
286 	struct drm_device *drm_dev = test->priv;
287 	struct drm_gem_shmem_object *shmem;
288 	int ret;
289 
290 	shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
291 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
292 	KUNIT_ASSERT_EQ(test, shmem->madv, 0);
293 
294 	ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
295 	KUNIT_ASSERT_EQ(test, ret, 0);
296 
297 	ret = drm_gem_shmem_madvise(shmem, 1);
298 	KUNIT_EXPECT_TRUE(test, ret);
299 	KUNIT_ASSERT_EQ(test, shmem->madv, 1);
300 
301 	/* Set madv to a negative value */
302 	ret = drm_gem_shmem_madvise(shmem, -1);
303 	KUNIT_EXPECT_FALSE(test, ret);
304 	KUNIT_ASSERT_EQ(test, shmem->madv, -1);
305 
306 	/* Check that madv cannot be set back to a positive value */
307 	ret = drm_gem_shmem_madvise(shmem, 0);
308 	KUNIT_EXPECT_FALSE(test, ret);
309 	KUNIT_ASSERT_EQ(test, shmem->madv, -1);
310 }
311 
312 /*
313  * Test purging a shmem GEM object. First, assert that a newly created
314  * shmem GEM object is not purgeable. Then, set madvise to a positive
315  * value and call drm_gem_shmem_get_pages_sgt() to pin and dma-map the
316  * backing pages. Finally, assert that the shmem GEM object is now
317  * purgeable and purge it.
318  */
319 static void drm_gem_shmem_test_purge(struct kunit *test)
320 {
321 	struct drm_device *drm_dev = test->priv;
322 	struct drm_gem_shmem_object *shmem;
323 	struct sg_table *sgt;
324 	int ret;
325 
326 	shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
327 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
328 
329 	ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
330 	KUNIT_ASSERT_EQ(test, ret, 0);
331 
332 	ret = drm_gem_shmem_is_purgeable(shmem);
333 	KUNIT_EXPECT_FALSE(test, ret);
334 
335 	ret = drm_gem_shmem_madvise(shmem, 1);
336 	KUNIT_EXPECT_TRUE(test, ret);
337 
338 	/* The scatter/gather table will be freed by drm_gem_shmem_free */
339 	sgt = drm_gem_shmem_get_pages_sgt(shmem);
340 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
341 
342 	ret = drm_gem_shmem_is_purgeable(shmem);
343 	KUNIT_EXPECT_TRUE(test, ret);
344 
345 	drm_gem_shmem_purge(shmem);
346 	KUNIT_EXPECT_NULL(test, shmem->pages);
347 	KUNIT_EXPECT_NULL(test, shmem->sgt);
348 	KUNIT_EXPECT_EQ(test, shmem->madv, -1);
349 }
350 
351 static int drm_gem_shmem_test_init(struct kunit *test)
352 {
353 	struct device *dev;
354 	struct drm_device *drm_dev;
355 
356 	/* Allocate a parent device */
357 	dev = drm_kunit_helper_alloc_device(test);
358 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
359 
360 	/*
361 	 * The DRM core will automatically initialize the GEM core and create
362 	 * a DRM Memory Manager object which provides an address space pool
363 	 * for GEM objects allocation.
364 	 */
365 	drm_dev = __drm_kunit_helper_alloc_drm_device(test, dev, sizeof(*drm_dev),
366 						      0, DRIVER_GEM);
367 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, drm_dev);
368 
369 	test->priv = drm_dev;
370 
371 	return 0;
372 }
373 
374 static struct kunit_case drm_gem_shmem_test_cases[] = {
375 	KUNIT_CASE(drm_gem_shmem_test_obj_create),
376 	KUNIT_CASE(drm_gem_shmem_test_obj_create_private),
377 	KUNIT_CASE(drm_gem_shmem_test_pin_pages),
378 	KUNIT_CASE(drm_gem_shmem_test_vmap),
379 	KUNIT_CASE(drm_gem_shmem_test_get_pages_sgt),
380 	KUNIT_CASE(drm_gem_shmem_test_get_sg_table),
381 	KUNIT_CASE(drm_gem_shmem_test_madvise),
382 	KUNIT_CASE(drm_gem_shmem_test_purge),
383 	{}
384 };
385 
386 static struct kunit_suite drm_gem_shmem_suite = {
387 	.name = "drm_gem_shmem",
388 	.init = drm_gem_shmem_test_init,
389 	.test_cases = drm_gem_shmem_test_cases
390 };
391 
392 kunit_test_suite(drm_gem_shmem_suite);
393 
394 MODULE_DESCRIPTION("KUnit test suite for GEM objects backed by shmem buffers");
395 MODULE_LICENSE("GPL");
396