xref: /linux/drivers/gpu/drm/tests/drm_gem_shmem_test.c (revision 42b16d3ac371a2fac9b6f08fd75f23f34ba3955a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KUnit test suite for GEM objects backed by shmem buffers
4  *
5  * Copyright (C) 2023 Red Hat, Inc.
6  *
7  * Author: Marco Pagani <marpagan@redhat.com>
8  */
9 
10 #include <linux/dma-buf.h>
11 #include <linux/iosys-map.h>
12 #include <linux/sizes.h>
13 
14 #include <kunit/test.h>
15 
16 #include <drm/drm_device.h>
17 #include <drm/drm_drv.h>
18 #include <drm/drm_gem.h>
19 #include <drm/drm_gem_shmem_helper.h>
20 #include <drm/drm_kunit_helpers.h>
21 
22 #define TEST_SIZE		SZ_1M
23 #define TEST_BYTE		0xae
24 
25 /*
26  * Wrappers to avoid cast warnings when passing action functions
27  * directly to kunit_add_action().
28  */
29 KUNIT_DEFINE_ACTION_WRAPPER(kfree_wrapper, kfree, const void *);
30 
31 KUNIT_DEFINE_ACTION_WRAPPER(sg_free_table_wrapper, sg_free_table,
32 			    struct sg_table *);
33 
34 KUNIT_DEFINE_ACTION_WRAPPER(drm_gem_shmem_free_wrapper, drm_gem_shmem_free,
35 			    struct drm_gem_shmem_object *);
36 
37 /*
38  * Test creating a shmem GEM object backed by shmem buffer. The test
39  * case succeeds if the GEM object is successfully allocated with the
40  * shmem file node and object functions attributes set, and the size
41  * attribute is equal to the correct size.
42  */
drm_gem_shmem_test_obj_create(struct kunit * test)43 static void drm_gem_shmem_test_obj_create(struct kunit *test)
44 {
45 	struct drm_device *drm_dev = test->priv;
46 	struct drm_gem_shmem_object *shmem;
47 
48 	shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
49 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
50 	KUNIT_EXPECT_EQ(test, shmem->base.size, TEST_SIZE);
51 	KUNIT_EXPECT_NOT_NULL(test, shmem->base.filp);
52 	KUNIT_EXPECT_NOT_NULL(test, shmem->base.funcs);
53 
54 	drm_gem_shmem_free(shmem);
55 }
56 
57 /*
58  * Test creating a shmem GEM object from a scatter/gather table exported
59  * via a DMA-BUF. The test case succeed if the GEM object is successfully
60  * created with the shmem file node attribute equal to NULL and the sgt
61  * attribute pointing to the scatter/gather table that has been imported.
62  */
drm_gem_shmem_test_obj_create_private(struct kunit * test)63 static void drm_gem_shmem_test_obj_create_private(struct kunit *test)
64 {
65 	struct drm_device *drm_dev = test->priv;
66 	struct drm_gem_shmem_object *shmem;
67 	struct drm_gem_object *gem_obj;
68 	struct dma_buf buf_mock;
69 	struct dma_buf_attachment attach_mock;
70 	struct sg_table *sgt;
71 	char *buf;
72 	int ret;
73 
74 	/* Create a mock scatter/gather table */
75 	buf = kunit_kzalloc(test, TEST_SIZE, GFP_KERNEL);
76 	KUNIT_ASSERT_NOT_NULL(test, buf);
77 
78 	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
79 	KUNIT_ASSERT_NOT_NULL(test, sgt);
80 
81 	ret = kunit_add_action_or_reset(test, kfree_wrapper, sgt);
82 	KUNIT_ASSERT_EQ(test, ret, 0);
83 
84 	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
85 	KUNIT_ASSERT_EQ(test, ret, 0);
86 
87 	ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt);
88 	KUNIT_ASSERT_EQ(test, ret, 0);
89 
90 	sg_init_one(sgt->sgl, buf, TEST_SIZE);
91 
92 	/*
93 	 * Set the DMA mask to 64-bits and map the sgtables
94 	 * otherwise drm_gem_shmem_free will cause a warning
95 	 * on debug kernels.
96 	 */
97 	ret = dma_set_mask(drm_dev->dev, DMA_BIT_MASK(64));
98 	KUNIT_ASSERT_EQ(test, ret, 0);
99 
100 	ret = dma_map_sgtable(drm_dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
101 	KUNIT_ASSERT_EQ(test, ret, 0);
102 
103 	/* Init a mock DMA-BUF */
104 	buf_mock.size = TEST_SIZE;
105 	attach_mock.dmabuf = &buf_mock;
106 
107 	gem_obj = drm_gem_shmem_prime_import_sg_table(drm_dev, &attach_mock, sgt);
108 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gem_obj);
109 	KUNIT_EXPECT_EQ(test, gem_obj->size, TEST_SIZE);
110 	KUNIT_EXPECT_NULL(test, gem_obj->filp);
111 	KUNIT_EXPECT_NOT_NULL(test, gem_obj->funcs);
112 
113 	/* The scatter/gather table will be freed by drm_gem_shmem_free */
114 	kunit_remove_action(test, sg_free_table_wrapper, sgt);
115 	kunit_remove_action(test, kfree_wrapper, sgt);
116 
117 	shmem = to_drm_gem_shmem_obj(gem_obj);
118 	KUNIT_EXPECT_PTR_EQ(test, shmem->sgt, sgt);
119 
120 	drm_gem_shmem_free(shmem);
121 }
122 
123 /*
124  * Test pinning backing pages for a shmem GEM object. The test case
125  * succeeds if a suitable number of backing pages are allocated, and
126  * the pages table counter attribute is increased by one.
127  */
drm_gem_shmem_test_pin_pages(struct kunit * test)128 static void drm_gem_shmem_test_pin_pages(struct kunit *test)
129 {
130 	struct drm_device *drm_dev = test->priv;
131 	struct drm_gem_shmem_object *shmem;
132 	int i, ret;
133 
134 	shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
135 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
136 	KUNIT_EXPECT_NULL(test, shmem->pages);
137 	KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0);
138 
139 	ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
140 	KUNIT_ASSERT_EQ(test, ret, 0);
141 
142 	ret = drm_gem_shmem_pin(shmem);
143 	KUNIT_ASSERT_EQ(test, ret, 0);
144 	KUNIT_ASSERT_NOT_NULL(test, shmem->pages);
145 	KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1);
146 
147 	for (i = 0; i < (shmem->base.size >> PAGE_SHIFT); i++)
148 		KUNIT_ASSERT_NOT_NULL(test, shmem->pages[i]);
149 
150 	drm_gem_shmem_unpin(shmem);
151 	KUNIT_EXPECT_NULL(test, shmem->pages);
152 	KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0);
153 }
154 
155 /*
156  * Test creating a virtual mapping for a shmem GEM object. The test
157  * case succeeds if the backing memory is mapped and the reference
158  * counter for virtual mapping is increased by one. Moreover, the test
159  * case writes and then reads a test pattern over the mapped memory.
160  */
drm_gem_shmem_test_vmap(struct kunit * test)161 static void drm_gem_shmem_test_vmap(struct kunit *test)
162 {
163 	struct drm_device *drm_dev = test->priv;
164 	struct drm_gem_shmem_object *shmem;
165 	struct iosys_map map;
166 	int ret, i;
167 
168 	shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
169 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
170 	KUNIT_EXPECT_NULL(test, shmem->vaddr);
171 	KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0);
172 
173 	ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
174 	KUNIT_ASSERT_EQ(test, ret, 0);
175 
176 	ret = drm_gem_shmem_vmap(shmem, &map);
177 	KUNIT_ASSERT_EQ(test, ret, 0);
178 	KUNIT_ASSERT_NOT_NULL(test, shmem->vaddr);
179 	KUNIT_ASSERT_FALSE(test, iosys_map_is_null(&map));
180 	KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 1);
181 
182 	iosys_map_memset(&map, 0, TEST_BYTE, TEST_SIZE);
183 	for (i = 0; i < TEST_SIZE; i++)
184 		KUNIT_EXPECT_EQ(test, iosys_map_rd(&map, i, u8), TEST_BYTE);
185 
186 	drm_gem_shmem_vunmap(shmem, &map);
187 	KUNIT_EXPECT_NULL(test, shmem->vaddr);
188 	KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0);
189 }
190 
191 /*
192  * Test exporting a scatter/gather table of pinned pages suitable for
193  * PRIME usage from a shmem GEM object. The test case succeeds if a
194  * scatter/gather table large enough to accommodate the backing memory
195  * is successfully exported.
196  */
drm_gem_shmem_test_get_pages_sgt(struct kunit * test)197 static void drm_gem_shmem_test_get_pages_sgt(struct kunit *test)
198 {
199 	struct drm_device *drm_dev = test->priv;
200 	struct drm_gem_shmem_object *shmem;
201 	struct sg_table *sgt;
202 	struct scatterlist *sg;
203 	unsigned int si, len = 0;
204 	int ret;
205 
206 	shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
207 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
208 
209 	ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
210 	KUNIT_ASSERT_EQ(test, ret, 0);
211 
212 	ret = drm_gem_shmem_pin(shmem);
213 	KUNIT_ASSERT_EQ(test, ret, 0);
214 
215 	sgt = drm_gem_shmem_get_sg_table(shmem);
216 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
217 	KUNIT_EXPECT_NULL(test, shmem->sgt);
218 
219 	ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt);
220 	KUNIT_ASSERT_EQ(test, ret, 0);
221 
222 	for_each_sgtable_sg(sgt, sg, si) {
223 		KUNIT_EXPECT_NOT_NULL(test, sg);
224 		len += sg->length;
225 	}
226 
227 	KUNIT_EXPECT_GE(test, len, TEST_SIZE);
228 }
229 
230 /*
231  * Test pinning pages and exporting a scatter/gather table suitable for
232  * driver usage from a shmem GEM object. The test case succeeds if the
233  * backing pages are pinned and a scatter/gather table large enough to
234  * accommodate the backing memory is successfully exported.
235  */
drm_gem_shmem_test_get_sg_table(struct kunit * test)236 static void drm_gem_shmem_test_get_sg_table(struct kunit *test)
237 {
238 	struct drm_device *drm_dev = test->priv;
239 	struct drm_gem_shmem_object *shmem;
240 	struct sg_table *sgt;
241 	struct scatterlist *sg;
242 	unsigned int si, ret, len = 0;
243 
244 	shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
245 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
246 
247 	ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
248 	KUNIT_ASSERT_EQ(test, ret, 0);
249 
250 	/* The scatter/gather table will be freed by drm_gem_shmem_free */
251 	sgt = drm_gem_shmem_get_pages_sgt(shmem);
252 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
253 	KUNIT_ASSERT_NOT_NULL(test, shmem->pages);
254 	KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1);
255 	KUNIT_EXPECT_PTR_EQ(test, sgt, shmem->sgt);
256 
257 	for_each_sgtable_sg(sgt, sg, si) {
258 		KUNIT_EXPECT_NOT_NULL(test, sg);
259 		len += sg->length;
260 	}
261 
262 	KUNIT_EXPECT_GE(test, len, TEST_SIZE);
263 }
264 
265 /*
266  * Test updating the madvise state of a shmem GEM object. The test
267  * case checks that the function for setting madv updates it only if
268  * its current value is greater or equal than zero and returns false
269  * if it has a negative value.
270  */
drm_gem_shmem_test_madvise(struct kunit * test)271 static void drm_gem_shmem_test_madvise(struct kunit *test)
272 {
273 	struct drm_device *drm_dev = test->priv;
274 	struct drm_gem_shmem_object *shmem;
275 	int ret;
276 
277 	shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
278 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
279 	KUNIT_ASSERT_EQ(test, shmem->madv, 0);
280 
281 	ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
282 	KUNIT_ASSERT_EQ(test, ret, 0);
283 
284 	ret = drm_gem_shmem_madvise(shmem, 1);
285 	KUNIT_EXPECT_TRUE(test, ret);
286 	KUNIT_ASSERT_EQ(test, shmem->madv, 1);
287 
288 	/* Set madv to a negative value */
289 	ret = drm_gem_shmem_madvise(shmem, -1);
290 	KUNIT_EXPECT_FALSE(test, ret);
291 	KUNIT_ASSERT_EQ(test, shmem->madv, -1);
292 
293 	/* Check that madv cannot be set back to a positive value */
294 	ret = drm_gem_shmem_madvise(shmem, 0);
295 	KUNIT_EXPECT_FALSE(test, ret);
296 	KUNIT_ASSERT_EQ(test, shmem->madv, -1);
297 }
298 
299 /*
300  * Test purging a shmem GEM object. First, assert that a newly created
301  * shmem GEM object is not purgeable. Then, set madvise to a positive
302  * value and call drm_gem_shmem_get_pages_sgt() to pin and dma-map the
303  * backing pages. Finally, assert that the shmem GEM object is now
304  * purgeable and purge it.
305  */
drm_gem_shmem_test_purge(struct kunit * test)306 static void drm_gem_shmem_test_purge(struct kunit *test)
307 {
308 	struct drm_device *drm_dev = test->priv;
309 	struct drm_gem_shmem_object *shmem;
310 	struct sg_table *sgt;
311 	int ret;
312 
313 	shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
314 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
315 
316 	ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
317 	KUNIT_ASSERT_EQ(test, ret, 0);
318 
319 	ret = drm_gem_shmem_is_purgeable(shmem);
320 	KUNIT_EXPECT_FALSE(test, ret);
321 
322 	ret = drm_gem_shmem_madvise(shmem, 1);
323 	KUNIT_EXPECT_TRUE(test, ret);
324 
325 	/* The scatter/gather table will be freed by drm_gem_shmem_free */
326 	sgt = drm_gem_shmem_get_pages_sgt(shmem);
327 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
328 
329 	ret = drm_gem_shmem_is_purgeable(shmem);
330 	KUNIT_EXPECT_TRUE(test, ret);
331 
332 	drm_gem_shmem_purge(shmem);
333 	KUNIT_EXPECT_NULL(test, shmem->pages);
334 	KUNIT_EXPECT_NULL(test, shmem->sgt);
335 	KUNIT_EXPECT_EQ(test, shmem->madv, -1);
336 }
337 
drm_gem_shmem_test_init(struct kunit * test)338 static int drm_gem_shmem_test_init(struct kunit *test)
339 {
340 	struct device *dev;
341 	struct drm_device *drm_dev;
342 
343 	/* Allocate a parent device */
344 	dev = drm_kunit_helper_alloc_device(test);
345 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
346 
347 	/*
348 	 * The DRM core will automatically initialize the GEM core and create
349 	 * a DRM Memory Manager object which provides an address space pool
350 	 * for GEM objects allocation.
351 	 */
352 	drm_dev = __drm_kunit_helper_alloc_drm_device(test, dev, sizeof(*drm_dev),
353 						      0, DRIVER_GEM);
354 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, drm_dev);
355 
356 	test->priv = drm_dev;
357 
358 	return 0;
359 }
360 
361 static struct kunit_case drm_gem_shmem_test_cases[] = {
362 	KUNIT_CASE(drm_gem_shmem_test_obj_create),
363 	KUNIT_CASE(drm_gem_shmem_test_obj_create_private),
364 	KUNIT_CASE(drm_gem_shmem_test_pin_pages),
365 	KUNIT_CASE(drm_gem_shmem_test_vmap),
366 	KUNIT_CASE(drm_gem_shmem_test_get_pages_sgt),
367 	KUNIT_CASE(drm_gem_shmem_test_get_sg_table),
368 	KUNIT_CASE(drm_gem_shmem_test_madvise),
369 	KUNIT_CASE(drm_gem_shmem_test_purge),
370 	{}
371 };
372 
373 static struct kunit_suite drm_gem_shmem_suite = {
374 	.name = "drm_gem_shmem",
375 	.init = drm_gem_shmem_test_init,
376 	.test_cases = drm_gem_shmem_test_cases
377 };
378 
379 kunit_test_suite(drm_gem_shmem_suite);
380 
381 MODULE_DESCRIPTION("KUnit test suite for GEM objects backed by shmem buffers");
382 MODULE_LICENSE("GPL");
383