xref: /linux/drivers/gpu/drm/tests/drm_gem_shmem_test.c (revision 85502b2214d50ba0ddf2a5fb454e4d28a160d175)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KUnit test suite for GEM objects backed by shmem buffers
4  *
5  * Copyright (C) 2023 Red Hat, Inc.
6  *
7  * Author: Marco Pagani <marpagan@redhat.com>
8  */
9 
10 #include <linux/dma-buf.h>
11 #include <linux/iosys-map.h>
12 #include <linux/sizes.h>
13 
14 #include <kunit/test.h>
15 
16 #include <drm/drm_device.h>
17 #include <drm/drm_drv.h>
18 #include <drm/drm_gem.h>
19 #include <drm/drm_gem_shmem_helper.h>
20 #include <drm/drm_kunit_helpers.h>
21 
22 #define TEST_SIZE		SZ_1M
23 #define TEST_BYTE		0xae
24 
25 /*
26  * Wrappers to avoid cast warnings when passing action functions
27  * directly to kunit_add_action().
28  */
29 KUNIT_DEFINE_ACTION_WRAPPER(kfree_wrapper, kfree, const void *);
30 
31 KUNIT_DEFINE_ACTION_WRAPPER(sg_free_table_wrapper, sg_free_table,
32 			    struct sg_table *);
33 
34 KUNIT_DEFINE_ACTION_WRAPPER(drm_gem_shmem_free_wrapper, drm_gem_shmem_free,
35 			    struct drm_gem_shmem_object *);
36 
37 /*
38  * Test creating a shmem GEM object backed by shmem buffer. The test
39  * case succeeds if the GEM object is successfully allocated with the
40  * shmem file node and object functions attributes set, and the size
41  * attribute is equal to the correct size.
42  */
drm_gem_shmem_test_obj_create(struct kunit * test)43 static void drm_gem_shmem_test_obj_create(struct kunit *test)
44 {
45 	struct drm_device *drm_dev = test->priv;
46 	struct drm_gem_shmem_object *shmem;
47 
48 	shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
49 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
50 	KUNIT_EXPECT_EQ(test, shmem->base.size, TEST_SIZE);
51 	KUNIT_EXPECT_NOT_NULL(test, shmem->base.filp);
52 	KUNIT_EXPECT_NOT_NULL(test, shmem->base.funcs);
53 
54 	drm_gem_shmem_free(shmem);
55 }
56 
57 /*
58  * Test creating a shmem GEM object from a scatter/gather table exported
59  * via a DMA-BUF. The test case succeed if the GEM object is successfully
60  * created with the shmem file node attribute equal to NULL and the sgt
61  * attribute pointing to the scatter/gather table that has been imported.
62  */
drm_gem_shmem_test_obj_create_private(struct kunit * test)63 static void drm_gem_shmem_test_obj_create_private(struct kunit *test)
64 {
65 	struct drm_device *drm_dev = test->priv;
66 	struct drm_gem_shmem_object *shmem;
67 	struct drm_gem_object *gem_obj;
68 	struct dma_buf buf_mock;
69 	struct dma_buf_attachment attach_mock;
70 	struct sg_table *sgt;
71 	char *buf;
72 	int ret;
73 
74 	/* Create a mock scatter/gather table */
75 	buf = kunit_kzalloc(test, TEST_SIZE, GFP_KERNEL);
76 	KUNIT_ASSERT_NOT_NULL(test, buf);
77 
78 	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
79 	KUNIT_ASSERT_NOT_NULL(test, sgt);
80 
81 	ret = kunit_add_action_or_reset(test, kfree_wrapper, sgt);
82 	KUNIT_ASSERT_EQ(test, ret, 0);
83 
84 	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
85 	KUNIT_ASSERT_EQ(test, ret, 0);
86 
87 	ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt);
88 	KUNIT_ASSERT_EQ(test, ret, 0);
89 
90 	sg_init_one(sgt->sgl, buf, TEST_SIZE);
91 
92 	/*
93 	 * Set the DMA mask to 64-bits and map the sgtables
94 	 * otherwise drm_gem_shmem_free will cause a warning
95 	 * on debug kernels.
96 	 */
97 	ret = dma_set_mask(drm_dev->dev, DMA_BIT_MASK(64));
98 	KUNIT_ASSERT_EQ(test, ret, 0);
99 
100 	ret = dma_map_sgtable(drm_dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
101 	KUNIT_ASSERT_EQ(test, ret, 0);
102 
103 	/* Init a mock DMA-BUF */
104 	buf_mock.size = TEST_SIZE;
105 	attach_mock.dmabuf = &buf_mock;
106 
107 	gem_obj = drm_gem_shmem_prime_import_sg_table(drm_dev, &attach_mock, sgt);
108 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gem_obj);
109 	KUNIT_EXPECT_EQ(test, gem_obj->size, TEST_SIZE);
110 	KUNIT_EXPECT_NULL(test, gem_obj->filp);
111 	KUNIT_EXPECT_NOT_NULL(test, gem_obj->funcs);
112 
113 	/* The scatter/gather table will be freed by drm_gem_shmem_free */
114 	kunit_remove_action(test, sg_free_table_wrapper, sgt);
115 	kunit_remove_action(test, kfree_wrapper, sgt);
116 
117 	shmem = to_drm_gem_shmem_obj(gem_obj);
118 	KUNIT_EXPECT_PTR_EQ(test, shmem->sgt, sgt);
119 
120 	drm_gem_shmem_free(shmem);
121 }
122 
123 /*
124  * Test pinning backing pages for a shmem GEM object. The test case
125  * succeeds if a suitable number of backing pages are allocated, and
126  * the pages table counter attribute is increased by one.
127  */
drm_gem_shmem_test_pin_pages(struct kunit * test)128 static void drm_gem_shmem_test_pin_pages(struct kunit *test)
129 {
130 	struct drm_device *drm_dev = test->priv;
131 	struct drm_gem_shmem_object *shmem;
132 	int i, ret;
133 
134 	shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
135 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
136 	KUNIT_EXPECT_NULL(test, shmem->pages);
137 	KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 0);
138 
139 	ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
140 	KUNIT_ASSERT_EQ(test, ret, 0);
141 
142 	ret = drm_gem_shmem_pin(shmem);
143 	KUNIT_ASSERT_EQ(test, ret, 0);
144 	KUNIT_ASSERT_NOT_NULL(test, shmem->pages);
145 	KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 1);
146 
147 	for (i = 0; i < (shmem->base.size >> PAGE_SHIFT); i++)
148 		KUNIT_ASSERT_NOT_NULL(test, shmem->pages[i]);
149 
150 	drm_gem_shmem_unpin(shmem);
151 	KUNIT_EXPECT_NULL(test, shmem->pages);
152 	KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 0);
153 }
154 
155 /*
156  * Test creating a virtual mapping for a shmem GEM object. The test
157  * case succeeds if the backing memory is mapped and the reference
158  * counter for virtual mapping is increased by one. Moreover, the test
159  * case writes and then reads a test pattern over the mapped memory.
160  */
drm_gem_shmem_test_vmap(struct kunit * test)161 static void drm_gem_shmem_test_vmap(struct kunit *test)
162 {
163 	struct drm_device *drm_dev = test->priv;
164 	struct drm_gem_shmem_object *shmem;
165 	struct iosys_map map;
166 	int ret, i;
167 
168 	shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
169 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
170 	KUNIT_EXPECT_NULL(test, shmem->vaddr);
171 	KUNIT_EXPECT_EQ(test, refcount_read(&shmem->vmap_use_count), 0);
172 
173 	ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
174 	KUNIT_ASSERT_EQ(test, ret, 0);
175 
176 	ret = drm_gem_shmem_vmap_locked(shmem, &map);
177 	KUNIT_ASSERT_EQ(test, ret, 0);
178 	KUNIT_ASSERT_NOT_NULL(test, shmem->vaddr);
179 	KUNIT_ASSERT_FALSE(test, iosys_map_is_null(&map));
180 	KUNIT_EXPECT_EQ(test, refcount_read(&shmem->vmap_use_count), 1);
181 
182 	iosys_map_memset(&map, 0, TEST_BYTE, TEST_SIZE);
183 	for (i = 0; i < TEST_SIZE; i++)
184 		KUNIT_EXPECT_EQ(test, iosys_map_rd(&map, i, u8), TEST_BYTE);
185 
186 	drm_gem_shmem_vunmap_locked(shmem, &map);
187 	KUNIT_EXPECT_NULL(test, shmem->vaddr);
188 	KUNIT_EXPECT_EQ(test, refcount_read(&shmem->vmap_use_count), 0);
189 }
190 
191 /*
192  * Test exporting a scatter/gather table of pinned pages suitable for
193  * PRIME usage from a shmem GEM object. The test case succeeds if a
194  * scatter/gather table large enough to accommodate the backing memory
195  * is successfully exported.
196  */
drm_gem_shmem_test_get_pages_sgt(struct kunit * test)197 static void drm_gem_shmem_test_get_pages_sgt(struct kunit *test)
198 {
199 	struct drm_device *drm_dev = test->priv;
200 	struct drm_gem_shmem_object *shmem;
201 	struct sg_table *sgt;
202 	struct scatterlist *sg;
203 	unsigned int si, len = 0;
204 	int ret;
205 
206 	shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
207 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
208 
209 	ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
210 	KUNIT_ASSERT_EQ(test, ret, 0);
211 
212 	ret = drm_gem_shmem_pin(shmem);
213 	KUNIT_ASSERT_EQ(test, ret, 0);
214 
215 	sgt = drm_gem_shmem_get_sg_table(shmem);
216 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
217 	KUNIT_EXPECT_NULL(test, shmem->sgt);
218 
219 	ret = kunit_add_action_or_reset(test, kfree_wrapper, sgt);
220 	KUNIT_ASSERT_EQ(test, ret, 0);
221 
222 	ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt);
223 	KUNIT_ASSERT_EQ(test, ret, 0);
224 
225 	for_each_sgtable_sg(sgt, sg, si) {
226 		KUNIT_EXPECT_NOT_NULL(test, sg);
227 		len += sg->length;
228 	}
229 
230 	KUNIT_EXPECT_GE(test, len, TEST_SIZE);
231 }
232 
233 /*
234  * Test pinning pages and exporting a scatter/gather table suitable for
235  * driver usage from a shmem GEM object. The test case succeeds if the
236  * backing pages are pinned and a scatter/gather table large enough to
237  * accommodate the backing memory is successfully exported.
238  */
drm_gem_shmem_test_get_sg_table(struct kunit * test)239 static void drm_gem_shmem_test_get_sg_table(struct kunit *test)
240 {
241 	struct drm_device *drm_dev = test->priv;
242 	struct drm_gem_shmem_object *shmem;
243 	struct sg_table *sgt;
244 	struct scatterlist *sg;
245 	unsigned int si, ret, len = 0;
246 
247 	shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
248 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
249 
250 	ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
251 	KUNIT_ASSERT_EQ(test, ret, 0);
252 
253 	/* The scatter/gather table will be freed by drm_gem_shmem_free */
254 	sgt = drm_gem_shmem_get_pages_sgt(shmem);
255 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
256 	KUNIT_ASSERT_NOT_NULL(test, shmem->pages);
257 	KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 1);
258 	KUNIT_EXPECT_PTR_EQ(test, sgt, shmem->sgt);
259 
260 	for_each_sgtable_sg(sgt, sg, si) {
261 		KUNIT_EXPECT_NOT_NULL(test, sg);
262 		len += sg->length;
263 	}
264 
265 	KUNIT_EXPECT_GE(test, len, TEST_SIZE);
266 }
267 
268 /*
269  * Test updating the madvise state of a shmem GEM object. The test
270  * case checks that the function for setting madv updates it only if
271  * its current value is greater or equal than zero and returns false
272  * if it has a negative value.
273  */
drm_gem_shmem_test_madvise(struct kunit * test)274 static void drm_gem_shmem_test_madvise(struct kunit *test)
275 {
276 	struct drm_device *drm_dev = test->priv;
277 	struct drm_gem_shmem_object *shmem;
278 	int ret;
279 
280 	shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
281 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
282 	KUNIT_ASSERT_EQ(test, shmem->madv, 0);
283 
284 	ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
285 	KUNIT_ASSERT_EQ(test, ret, 0);
286 
287 	ret = drm_gem_shmem_madvise_locked(shmem, 1);
288 	KUNIT_EXPECT_TRUE(test, ret);
289 	KUNIT_ASSERT_EQ(test, shmem->madv, 1);
290 
291 	/* Set madv to a negative value */
292 	ret = drm_gem_shmem_madvise_locked(shmem, -1);
293 	KUNIT_EXPECT_FALSE(test, ret);
294 	KUNIT_ASSERT_EQ(test, shmem->madv, -1);
295 
296 	/* Check that madv cannot be set back to a positive value */
297 	ret = drm_gem_shmem_madvise_locked(shmem, 0);
298 	KUNIT_EXPECT_FALSE(test, ret);
299 	KUNIT_ASSERT_EQ(test, shmem->madv, -1);
300 }
301 
302 /*
303  * Test purging a shmem GEM object. First, assert that a newly created
304  * shmem GEM object is not purgeable. Then, set madvise to a positive
305  * value and call drm_gem_shmem_get_pages_sgt() to pin and dma-map the
306  * backing pages. Finally, assert that the shmem GEM object is now
307  * purgeable and purge it.
308  */
drm_gem_shmem_test_purge(struct kunit * test)309 static void drm_gem_shmem_test_purge(struct kunit *test)
310 {
311 	struct drm_device *drm_dev = test->priv;
312 	struct drm_gem_shmem_object *shmem;
313 	struct sg_table *sgt;
314 	int ret;
315 
316 	shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
317 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
318 
319 	ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
320 	KUNIT_ASSERT_EQ(test, ret, 0);
321 
322 	ret = drm_gem_shmem_is_purgeable(shmem);
323 	KUNIT_EXPECT_FALSE(test, ret);
324 
325 	ret = drm_gem_shmem_madvise_locked(shmem, 1);
326 	KUNIT_EXPECT_TRUE(test, ret);
327 
328 	/* The scatter/gather table will be freed by drm_gem_shmem_free */
329 	sgt = drm_gem_shmem_get_pages_sgt(shmem);
330 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
331 
332 	ret = drm_gem_shmem_is_purgeable(shmem);
333 	KUNIT_EXPECT_TRUE(test, ret);
334 
335 	drm_gem_shmem_purge_locked(shmem);
336 	KUNIT_EXPECT_NULL(test, shmem->pages);
337 	KUNIT_EXPECT_NULL(test, shmem->sgt);
338 	KUNIT_EXPECT_EQ(test, shmem->madv, -1);
339 }
340 
drm_gem_shmem_test_init(struct kunit * test)341 static int drm_gem_shmem_test_init(struct kunit *test)
342 {
343 	struct device *dev;
344 	struct drm_device *drm_dev;
345 
346 	/* Allocate a parent device */
347 	dev = drm_kunit_helper_alloc_device(test);
348 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
349 
350 	/*
351 	 * The DRM core will automatically initialize the GEM core and create
352 	 * a DRM Memory Manager object which provides an address space pool
353 	 * for GEM objects allocation.
354 	 */
355 	drm_dev = __drm_kunit_helper_alloc_drm_device(test, dev, sizeof(*drm_dev),
356 						      0, DRIVER_GEM);
357 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, drm_dev);
358 
359 	test->priv = drm_dev;
360 
361 	return 0;
362 }
363 
364 static struct kunit_case drm_gem_shmem_test_cases[] = {
365 	KUNIT_CASE(drm_gem_shmem_test_obj_create),
366 	KUNIT_CASE(drm_gem_shmem_test_obj_create_private),
367 	KUNIT_CASE(drm_gem_shmem_test_pin_pages),
368 	KUNIT_CASE(drm_gem_shmem_test_vmap),
369 	KUNIT_CASE(drm_gem_shmem_test_get_pages_sgt),
370 	KUNIT_CASE(drm_gem_shmem_test_get_sg_table),
371 	KUNIT_CASE(drm_gem_shmem_test_madvise),
372 	KUNIT_CASE(drm_gem_shmem_test_purge),
373 	{}
374 };
375 
376 static struct kunit_suite drm_gem_shmem_suite = {
377 	.name = "drm_gem_shmem",
378 	.init = drm_gem_shmem_test_init,
379 	.test_cases = drm_gem_shmem_test_cases
380 };
381 
382 kunit_test_suite(drm_gem_shmem_suite);
383 
384 MODULE_DESCRIPTION("KUnit test suite for GEM objects backed by shmem buffers");
385 MODULE_LICENSE("GPL");
386