1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * KUnit test suite for GEM objects backed by shmem buffers
4 *
5 * Copyright (C) 2023 Red Hat, Inc.
6 *
7 * Author: Marco Pagani <marpagan@redhat.com>
8 */
9
10 #include <linux/dma-buf.h>
11 #include <linux/iosys-map.h>
12 #include <linux/sizes.h>
13
14 #include <kunit/test.h>
15
16 #include <drm/drm_device.h>
17 #include <drm/drm_drv.h>
18 #include <drm/drm_gem.h>
19 #include <drm/drm_gem_shmem_helper.h>
20 #include <drm/drm_kunit_helpers.h>
21
22 MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
23
24 #define TEST_SIZE SZ_1M
25 #define TEST_BYTE 0xae
26
27 /*
28 * Wrappers to avoid cast warnings when passing action functions
29 * directly to kunit_add_action().
30 */
31 KUNIT_DEFINE_ACTION_WRAPPER(kfree_wrapper, kfree, const void *);
32
33 KUNIT_DEFINE_ACTION_WRAPPER(sg_free_table_wrapper, sg_free_table,
34 struct sg_table *);
35
36 KUNIT_DEFINE_ACTION_WRAPPER(drm_gem_shmem_free_wrapper, drm_gem_shmem_free,
37 struct drm_gem_shmem_object *);
38
39 KUNIT_DEFINE_ACTION_WRAPPER(drm_gem_shmem_unpin_wrapper, drm_gem_shmem_unpin,
40 struct drm_gem_shmem_object *);
41
42 /*
43 * Test creating a shmem GEM object backed by shmem buffer. The test
44 * case succeeds if the GEM object is successfully allocated with the
45 * shmem file node and object functions attributes set, and the size
46 * attribute is equal to the correct size.
47 */
drm_gem_shmem_test_obj_create(struct kunit * test)48 static void drm_gem_shmem_test_obj_create(struct kunit *test)
49 {
50 struct drm_device *drm_dev = test->priv;
51 struct drm_gem_shmem_object *shmem;
52
53 shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
54 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
55 KUNIT_EXPECT_EQ(test, shmem->base.size, TEST_SIZE);
56 KUNIT_EXPECT_NOT_NULL(test, shmem->base.filp);
57 KUNIT_EXPECT_NOT_NULL(test, shmem->base.funcs);
58
59 drm_gem_shmem_free(shmem);
60 }
61
62 /*
63 * Test creating a shmem GEM object from a scatter/gather table exported
64 * via a DMA-BUF. The test case succeed if the GEM object is successfully
65 * created with the shmem file node attribute equal to NULL and the sgt
66 * attribute pointing to the scatter/gather table that has been imported.
67 */
drm_gem_shmem_test_obj_create_private(struct kunit * test)68 static void drm_gem_shmem_test_obj_create_private(struct kunit *test)
69 {
70 struct drm_device *drm_dev = test->priv;
71 struct drm_gem_shmem_object *shmem;
72 struct drm_gem_object *gem_obj;
73 struct dma_buf buf_mock;
74 struct dma_buf_attachment attach_mock;
75 struct sg_table *sgt;
76 char *buf;
77 int ret;
78
79 /* Create a mock scatter/gather table */
80 buf = kunit_kzalloc(test, TEST_SIZE, GFP_KERNEL);
81 KUNIT_ASSERT_NOT_NULL(test, buf);
82
83 sgt = kzalloc_obj(*sgt);
84 KUNIT_ASSERT_NOT_NULL(test, sgt);
85
86 ret = kunit_add_action_or_reset(test, kfree_wrapper, sgt);
87 KUNIT_ASSERT_EQ(test, ret, 0);
88
89 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
90 KUNIT_ASSERT_EQ(test, ret, 0);
91
92 ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt);
93 KUNIT_ASSERT_EQ(test, ret, 0);
94
95 sg_init_one(sgt->sgl, buf, TEST_SIZE);
96
97 /*
98 * Set the DMA mask to 64-bits and map the sgtables
99 * otherwise drm_gem_shmem_free will cause a warning
100 * on debug kernels.
101 */
102 ret = dma_set_mask(drm_dev->dev, DMA_BIT_MASK(64));
103 KUNIT_ASSERT_EQ(test, ret, 0);
104
105 ret = dma_map_sgtable(drm_dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
106 KUNIT_ASSERT_EQ(test, ret, 0);
107
108 /* Init a mock DMA-BUF */
109 buf_mock.size = TEST_SIZE;
110 attach_mock.dmabuf = &buf_mock;
111
112 gem_obj = drm_gem_shmem_prime_import_sg_table(drm_dev, &attach_mock, sgt);
113 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gem_obj);
114 KUNIT_EXPECT_EQ(test, gem_obj->size, TEST_SIZE);
115 KUNIT_EXPECT_NULL(test, gem_obj->filp);
116 KUNIT_EXPECT_NOT_NULL(test, gem_obj->funcs);
117
118 /* The scatter/gather table will be freed by drm_gem_shmem_free */
119 kunit_remove_action(test, sg_free_table_wrapper, sgt);
120 kunit_remove_action(test, kfree_wrapper, sgt);
121
122 shmem = to_drm_gem_shmem_obj(gem_obj);
123 KUNIT_EXPECT_PTR_EQ(test, shmem->sgt, sgt);
124
125 drm_gem_shmem_free(shmem);
126 }
127
128 /*
129 * Test pinning backing pages for a shmem GEM object. The test case
130 * succeeds if a suitable number of backing pages are allocated, and
131 * the pages table counter attribute is increased by one.
132 */
drm_gem_shmem_test_pin_pages(struct kunit * test)133 static void drm_gem_shmem_test_pin_pages(struct kunit *test)
134 {
135 struct drm_device *drm_dev = test->priv;
136 struct drm_gem_shmem_object *shmem;
137 int i, ret;
138
139 shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
140 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
141 KUNIT_EXPECT_NULL(test, shmem->pages);
142 KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 0);
143
144 ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
145 KUNIT_ASSERT_EQ(test, ret, 0);
146
147 ret = drm_gem_shmem_pin(shmem);
148 KUNIT_ASSERT_EQ(test, ret, 0);
149 KUNIT_ASSERT_NOT_NULL(test, shmem->pages);
150 KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 1);
151
152 for (i = 0; i < (shmem->base.size >> PAGE_SHIFT); i++)
153 KUNIT_ASSERT_NOT_NULL(test, shmem->pages[i]);
154
155 drm_gem_shmem_unpin(shmem);
156 KUNIT_EXPECT_NULL(test, shmem->pages);
157 KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 0);
158 }
159
160 /*
161 * Test creating a virtual mapping for a shmem GEM object. The test
162 * case succeeds if the backing memory is mapped and the reference
163 * counter for virtual mapping is increased by one. Moreover, the test
164 * case writes and then reads a test pattern over the mapped memory.
165 */
drm_gem_shmem_test_vmap(struct kunit * test)166 static void drm_gem_shmem_test_vmap(struct kunit *test)
167 {
168 struct drm_device *drm_dev = test->priv;
169 struct drm_gem_shmem_object *shmem;
170 struct iosys_map map;
171 int ret, i;
172
173 shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
174 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
175 KUNIT_EXPECT_NULL(test, shmem->vaddr);
176 KUNIT_EXPECT_EQ(test, refcount_read(&shmem->vmap_use_count), 0);
177
178 ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
179 KUNIT_ASSERT_EQ(test, ret, 0);
180
181 ret = drm_gem_shmem_vmap(shmem, &map);
182 KUNIT_ASSERT_EQ(test, ret, 0);
183 KUNIT_ASSERT_NOT_NULL(test, shmem->vaddr);
184 KUNIT_ASSERT_FALSE(test, iosys_map_is_null(&map));
185 KUNIT_EXPECT_EQ(test, refcount_read(&shmem->vmap_use_count), 1);
186
187 iosys_map_memset(&map, 0, TEST_BYTE, TEST_SIZE);
188 for (i = 0; i < TEST_SIZE; i++)
189 KUNIT_EXPECT_EQ(test, iosys_map_rd(&map, i, u8), TEST_BYTE);
190
191 drm_gem_shmem_vunmap(shmem, &map);
192 KUNIT_EXPECT_NULL(test, shmem->vaddr);
193 KUNIT_EXPECT_EQ(test, refcount_read(&shmem->vmap_use_count), 0);
194 }
195
196 /*
197 * Test exporting a scatter/gather table of pinned pages suitable for
198 * PRIME usage from a shmem GEM object. The test case succeeds if a
199 * scatter/gather table large enough to accommodate the backing memory
200 * is successfully exported.
201 */
drm_gem_shmem_test_get_sg_table(struct kunit * test)202 static void drm_gem_shmem_test_get_sg_table(struct kunit *test)
203 {
204 struct drm_device *drm_dev = test->priv;
205 struct drm_gem_shmem_object *shmem;
206 struct sg_table *sgt;
207 struct scatterlist *sg;
208 unsigned int si, len = 0;
209 int ret;
210
211 shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
212 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
213
214 ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
215 KUNIT_ASSERT_EQ(test, ret, 0);
216
217 ret = drm_gem_shmem_pin(shmem);
218 KUNIT_ASSERT_EQ(test, ret, 0);
219
220 ret = kunit_add_action_or_reset(test, drm_gem_shmem_unpin_wrapper, shmem);
221 KUNIT_ASSERT_EQ(test, ret, 0);
222
223 sgt = drm_gem_shmem_get_sg_table(shmem);
224 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
225 KUNIT_EXPECT_NULL(test, shmem->sgt);
226
227 ret = kunit_add_action_or_reset(test, kfree_wrapper, sgt);
228 KUNIT_ASSERT_EQ(test, ret, 0);
229
230 ret = kunit_add_action_or_reset(test, sg_free_table_wrapper, sgt);
231 KUNIT_ASSERT_EQ(test, ret, 0);
232
233 for_each_sgtable_sg(sgt, sg, si) {
234 KUNIT_EXPECT_NOT_NULL(test, sg);
235 len += sg->length;
236 }
237
238 KUNIT_EXPECT_GE(test, len, TEST_SIZE);
239 }
240
241 /*
242 * Test pinning pages and exporting a scatter/gather table suitable for
243 * driver usage from a shmem GEM object. The test case succeeds if the
244 * backing pages are pinned and a scatter/gather table large enough to
245 * accommodate the backing memory is successfully exported.
246 */
drm_gem_shmem_test_get_pages_sgt(struct kunit * test)247 static void drm_gem_shmem_test_get_pages_sgt(struct kunit *test)
248 {
249 struct drm_device *drm_dev = test->priv;
250 struct drm_gem_shmem_object *shmem;
251 struct sg_table *sgt;
252 struct scatterlist *sg;
253 unsigned int si, ret, len = 0;
254
255 shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
256 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
257
258 ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
259 KUNIT_ASSERT_EQ(test, ret, 0);
260
261 /* The scatter/gather table will be freed by drm_gem_shmem_free */
262 sgt = drm_gem_shmem_get_pages_sgt(shmem);
263 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
264 KUNIT_ASSERT_NOT_NULL(test, shmem->pages);
265 KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 1);
266 KUNIT_EXPECT_PTR_EQ(test, sgt, shmem->sgt);
267
268 for_each_sgtable_sg(sgt, sg, si) {
269 KUNIT_EXPECT_NOT_NULL(test, sg);
270 len += sg->length;
271 }
272
273 KUNIT_EXPECT_GE(test, len, TEST_SIZE);
274 }
275
276 /*
277 * Test updating the madvise state of a shmem GEM object. The test
278 * case checks that the function for setting madv updates it only if
279 * its current value is greater or equal than zero and returns false
280 * if it has a negative value.
281 */
drm_gem_shmem_test_madvise(struct kunit * test)282 static void drm_gem_shmem_test_madvise(struct kunit *test)
283 {
284 struct drm_device *drm_dev = test->priv;
285 struct drm_gem_shmem_object *shmem;
286 int ret;
287
288 shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
289 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
290 KUNIT_ASSERT_EQ(test, shmem->madv, 0);
291
292 ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
293 KUNIT_ASSERT_EQ(test, ret, 0);
294
295 ret = drm_gem_shmem_madvise(shmem, 1);
296 KUNIT_EXPECT_TRUE(test, ret);
297 KUNIT_ASSERT_EQ(test, shmem->madv, 1);
298
299 /* Set madv to a negative value */
300 ret = drm_gem_shmem_madvise(shmem, -1);
301 KUNIT_EXPECT_FALSE(test, ret);
302 KUNIT_ASSERT_EQ(test, shmem->madv, -1);
303
304 /* Check that madv cannot be set back to a positive value */
305 ret = drm_gem_shmem_madvise(shmem, 0);
306 KUNIT_EXPECT_FALSE(test, ret);
307 KUNIT_ASSERT_EQ(test, shmem->madv, -1);
308 }
309
310 /*
311 * Test purging a shmem GEM object. First, assert that a newly created
312 * shmem GEM object is not purgeable. Then, set madvise to a positive
313 * value and call drm_gem_shmem_get_pages_sgt() to pin and dma-map the
314 * backing pages. Finally, assert that the shmem GEM object is now
315 * purgeable and purge it.
316 */
drm_gem_shmem_test_purge(struct kunit * test)317 static void drm_gem_shmem_test_purge(struct kunit *test)
318 {
319 struct drm_device *drm_dev = test->priv;
320 struct drm_gem_shmem_object *shmem;
321 struct sg_table *sgt;
322 int ret;
323
324 shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE);
325 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem);
326
327 ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem);
328 KUNIT_ASSERT_EQ(test, ret, 0);
329
330 ret = drm_gem_shmem_is_purgeable(shmem);
331 KUNIT_EXPECT_FALSE(test, ret);
332
333 ret = drm_gem_shmem_madvise(shmem, 1);
334 KUNIT_EXPECT_TRUE(test, ret);
335
336 /* The scatter/gather table will be freed by drm_gem_shmem_free */
337 sgt = drm_gem_shmem_get_pages_sgt(shmem);
338 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt);
339
340 ret = drm_gem_shmem_is_purgeable(shmem);
341 KUNIT_EXPECT_TRUE(test, ret);
342
343 ret = drm_gem_shmem_purge(shmem);
344 KUNIT_ASSERT_EQ(test, ret, 0);
345
346 KUNIT_EXPECT_NULL(test, shmem->pages);
347 KUNIT_EXPECT_NULL(test, shmem->sgt);
348 KUNIT_EXPECT_EQ(test, shmem->madv, -1);
349 }
350
drm_gem_shmem_test_init(struct kunit * test)351 static int drm_gem_shmem_test_init(struct kunit *test)
352 {
353 struct device *dev;
354 struct drm_device *drm_dev;
355
356 /* Allocate a parent device */
357 dev = drm_kunit_helper_alloc_device(test);
358 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
359
360 /*
361 * The DRM core will automatically initialize the GEM core and create
362 * a DRM Memory Manager object which provides an address space pool
363 * for GEM objects allocation.
364 */
365 drm_dev = __drm_kunit_helper_alloc_drm_device(test, dev, sizeof(*drm_dev),
366 0, DRIVER_GEM);
367 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, drm_dev);
368
369 test->priv = drm_dev;
370
371 return 0;
372 }
373
374 static struct kunit_case drm_gem_shmem_test_cases[] = {
375 KUNIT_CASE(drm_gem_shmem_test_obj_create),
376 KUNIT_CASE(drm_gem_shmem_test_obj_create_private),
377 KUNIT_CASE(drm_gem_shmem_test_pin_pages),
378 KUNIT_CASE(drm_gem_shmem_test_vmap),
379 KUNIT_CASE(drm_gem_shmem_test_get_sg_table),
380 KUNIT_CASE(drm_gem_shmem_test_get_pages_sgt),
381 KUNIT_CASE(drm_gem_shmem_test_madvise),
382 KUNIT_CASE(drm_gem_shmem_test_purge),
383 {}
384 };
385
386 static struct kunit_suite drm_gem_shmem_suite = {
387 .name = "drm_gem_shmem",
388 .init = drm_gem_shmem_test_init,
389 .test_cases = drm_gem_shmem_test_cases
390 };
391
392 kunit_test_suite(drm_gem_shmem_suite);
393
394 MODULE_DESCRIPTION("KUnit test suite for GEM objects backed by shmem buffers");
395 MODULE_LICENSE("GPL");
396