xref: /linux/drivers/gpu/drm/xe/tests/xe_dma_buf.c (revision aec2f682d47c54ef434b2d440992626d80b1ebdc)
1 // SPDX-License-Identifier: GPL-2.0 AND MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include <uapi/drm/xe_drm.h>
7 
8 #include <kunit/test.h>
9 #include <kunit/visibility.h>
10 
11 #include "tests/xe_kunit_helpers.h"
12 #include "tests/xe_pci_test.h"
13 
14 #include "xe_pci.h"
15 #include "xe_pm.h"
16 
17 static bool p2p_enabled(struct dma_buf_test_params *params)
18 {
19 	return IS_ENABLED(CONFIG_PCI_P2PDMA) && params->attach_ops &&
20 		params->attach_ops->allow_peer2peer;
21 }
22 
23 static bool is_dynamic(struct dma_buf_test_params *params)
24 {
25 	return params->attach_ops && params->attach_ops->invalidate_mappings;
26 }
27 
28 static void check_residency(struct kunit *test, struct xe_bo *exported,
29 			    struct xe_bo *imported, struct dma_buf *dmabuf,
30 			    struct drm_exec *exec)
31 {
32 	struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv);
33 	struct dma_buf_attachment *attach;
34 	u32 mem_type;
35 	int ret;
36 
37 	xe_bo_assert_held(exported);
38 	xe_bo_assert_held(imported);
39 
40 	mem_type = XE_PL_VRAM0;
41 	if (!(params->mem_mask & XE_BO_FLAG_VRAM0))
42 		/* No VRAM allowed */
43 		mem_type = XE_PL_TT;
44 	else if (params->force_different_devices && !p2p_enabled(params))
45 		/* No P2P */
46 		mem_type = XE_PL_TT;
47 	else if (params->force_different_devices && !is_dynamic(params) &&
48 		 (params->mem_mask & XE_BO_FLAG_SYSTEM))
49 		/* Pin migrated to TT on non-dynamic attachments. */
50 		mem_type = XE_PL_TT;
51 
52 	if (!xe_bo_is_mem_type(exported, mem_type)) {
53 		KUNIT_FAIL(test, "Exported bo was not in expected memory type.\n");
54 		return;
55 	}
56 
57 	if (xe_bo_is_pinned(exported))
58 		return;
59 
60 	/*
61 	 * Evict exporter. Evicting the exported bo will
62 	 * evict also the imported bo through the invalidate_mappings() functionality if
63 	 * importer is on a different device. If they're on the same device,
64 	 * the exporter and the importer should be the same bo.
65 	 */
66 	ret = xe_bo_evict(exported, exec);
67 	if (ret) {
68 		if (ret != -EINTR && ret != -ERESTARTSYS)
69 			KUNIT_FAIL(test, "Evicting exporter failed with err=%d.\n",
70 				   ret);
71 		return;
72 	}
73 
74 	/* Verify that also importer has been evicted to SYSTEM */
75 	if (exported != imported && !xe_bo_is_mem_type(imported, XE_PL_SYSTEM)) {
76 		KUNIT_FAIL(test, "Importer wasn't properly evicted.\n");
77 		return;
78 	}
79 
80 	/* Re-validate the importer. This should move also exporter in. */
81 	ret = xe_bo_validate(imported, NULL, false, exec);
82 	if (ret) {
83 		if (ret != -EINTR && ret != -ERESTARTSYS)
84 			KUNIT_FAIL(test, "Validating importer failed with err=%d.\n",
85 				   ret);
86 		return;
87 	}
88 
89 	KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type));
90 
91 	/* Check that we can pin without migrating. */
92 	attach = list_first_entry_or_null(&dmabuf->attachments, typeof(*attach), node);
93 	if (attach) {
94 		int err = dma_buf_pin(attach);
95 
96 		if (!err) {
97 			KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type));
98 			dma_buf_unpin(attach);
99 		}
100 		KUNIT_EXPECT_EQ(test, err, 0);
101 	}
102 
103 	if (params->force_different_devices)
104 		KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(imported, XE_PL_TT));
105 	else
106 		KUNIT_EXPECT_TRUE(test, exported == imported);
107 }
108 
109 static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
110 {
111 	struct kunit *test = kunit_get_current_test();
112 	struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv);
113 	struct drm_gem_object *import;
114 	struct dma_buf *dmabuf;
115 	struct xe_bo *bo;
116 	size_t size;
117 
118 	/* No VRAM on this device? */
119 	if (!ttm_manager_type(&xe->ttm, XE_PL_VRAM0) &&
120 	    (params->mem_mask & XE_BO_FLAG_VRAM0))
121 		return;
122 
123 	size = PAGE_SIZE;
124 	if ((params->mem_mask & XE_BO_FLAG_VRAM0) &&
125 	    xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
126 		size = SZ_64K;
127 
128 	kunit_info(test, "running %s\n", __func__);
129 	bo = xe_bo_create_user(xe, NULL, size, DRM_XE_GEM_CPU_CACHING_WC,
130 			       params->mem_mask, NULL);
131 	if (IS_ERR(bo)) {
132 		KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
133 			   PTR_ERR(bo));
134 		return;
135 	}
136 
137 	dmabuf = xe_gem_prime_export(&bo->ttm.base, 0);
138 	if (IS_ERR(dmabuf)) {
139 		KUNIT_FAIL(test, "xe_gem_prime_export() failed with err=%ld\n",
140 			   PTR_ERR(dmabuf));
141 		goto out;
142 	}
143 	bo->ttm.base.dma_buf = dmabuf;
144 
145 	import = xe_gem_prime_import(&xe->drm, dmabuf);
146 	if (!IS_ERR(import)) {
147 		struct xe_bo *import_bo = gem_to_xe_bo(import);
148 
149 		/*
150 		 * Did import succeed when it shouldn't due to lack of p2p support?
151 		 */
152 		if (params->force_different_devices &&
153 		    !p2p_enabled(params) &&
154 		    !(params->mem_mask & XE_BO_FLAG_SYSTEM)) {
155 			KUNIT_FAIL(test,
156 				   "xe_gem_prime_import() succeeded when it shouldn't have\n");
157 		} else {
158 			struct drm_exec *exec = XE_VALIDATION_OPT_OUT;
159 			int err;
160 
161 			/* Is everything where we expect it to be? */
162 			xe_bo_lock(import_bo, false);
163 			err = xe_bo_validate(import_bo, NULL, false, exec);
164 
165 			/* Pinning in VRAM is not allowed for non-dynamic attachments */
166 			if (!is_dynamic(params) &&
167 			    params->force_different_devices &&
168 			    !(params->mem_mask & XE_BO_FLAG_SYSTEM))
169 				KUNIT_EXPECT_EQ(test, err, -EINVAL);
170 			/* Otherwise only expect interrupts or success. */
171 			else if (err && err != -EINTR && err != -ERESTARTSYS)
172 				KUNIT_EXPECT_TRUE(test, !err || err == -EINTR ||
173 						  err == -ERESTARTSYS);
174 
175 			if (!err)
176 				check_residency(test, bo, import_bo, dmabuf, exec);
177 			xe_bo_unlock(import_bo);
178 		}
179 		drm_gem_object_put(import);
180 	} else if (PTR_ERR(import) != -EOPNOTSUPP) {
181 		/* Unexpected error code. */
182 		KUNIT_FAIL(test,
183 			   "xe_gem_prime_import failed with the wrong err=%ld\n",
184 			   PTR_ERR(import));
185 	} else if (!params->force_different_devices ||
186 		   p2p_enabled(params) ||
187 		   (params->mem_mask & XE_BO_FLAG_SYSTEM)) {
188 		/* Shouldn't fail if we can reuse same bo, use p2p or use system */
189 		KUNIT_FAIL(test, "dynamic p2p attachment failed with err=%ld\n",
190 			   PTR_ERR(import));
191 	}
192 	bo->ttm.base.dma_buf = NULL;
193 	dma_buf_put(dmabuf);
194 out:
195 	drm_gem_object_put(&bo->ttm.base);
196 }
197 
198 static const struct dma_buf_attach_ops nop2p_attach_ops = {
199 	.allow_peer2peer = false,
200 	.invalidate_mappings = xe_dma_buf_move_notify
201 };
202 
203 /*
204  * We test the implementation with bos of different residency and with
205  * importers with different capabilities; some lacking p2p support and some
206  * lacking dynamic capabilities (attach_ops == NULL). We also fake
207  * different devices avoiding the import shortcut that just reuses the same
208  * gem object.
209  */
210 static const struct dma_buf_test_params test_params[] = {
211 	{.mem_mask = XE_BO_FLAG_VRAM0,
212 	 .attach_ops = &xe_dma_buf_attach_ops},
213 	{.mem_mask = XE_BO_FLAG_VRAM0 | XE_BO_FLAG_NEEDS_CPU_ACCESS,
214 	 .attach_ops = &xe_dma_buf_attach_ops,
215 	 .force_different_devices = true},
216 
217 	{.mem_mask = XE_BO_FLAG_VRAM0,
218 	 .attach_ops = &nop2p_attach_ops},
219 	{.mem_mask = XE_BO_FLAG_VRAM0,
220 	 .attach_ops = &nop2p_attach_ops,
221 	 .force_different_devices = true},
222 
223 	{.mem_mask = XE_BO_FLAG_VRAM0},
224 	{.mem_mask = XE_BO_FLAG_VRAM0,
225 	 .force_different_devices = true},
226 
227 	{.mem_mask = XE_BO_FLAG_SYSTEM,
228 	 .attach_ops = &xe_dma_buf_attach_ops},
229 	{.mem_mask = XE_BO_FLAG_SYSTEM,
230 	 .attach_ops = &xe_dma_buf_attach_ops,
231 	 .force_different_devices = true},
232 
233 	{.mem_mask = XE_BO_FLAG_SYSTEM,
234 	 .attach_ops = &nop2p_attach_ops},
235 	{.mem_mask = XE_BO_FLAG_SYSTEM,
236 	 .attach_ops = &nop2p_attach_ops,
237 	 .force_different_devices = true},
238 
239 	{.mem_mask = XE_BO_FLAG_SYSTEM},
240 	{.mem_mask = XE_BO_FLAG_SYSTEM,
241 	 .force_different_devices = true},
242 
243 	{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
244 	 .attach_ops = &xe_dma_buf_attach_ops},
245 	{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0 |
246 		     XE_BO_FLAG_NEEDS_CPU_ACCESS,
247 	 .attach_ops = &xe_dma_buf_attach_ops,
248 	 .force_different_devices = true},
249 
250 	{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
251 	 .attach_ops = &nop2p_attach_ops},
252 	{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
253 	 .attach_ops = &nop2p_attach_ops,
254 	 .force_different_devices = true},
255 
256 	{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0},
257 	{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
258 	 .force_different_devices = true},
259 
260 	{}
261 };
262 
263 static int dma_buf_run_device(struct xe_device *xe)
264 {
265 	const struct dma_buf_test_params *params;
266 	struct kunit *test = kunit_get_current_test();
267 
268 	guard(xe_pm_runtime)(xe);
269 	for (params = test_params; params->mem_mask; ++params) {
270 		struct dma_buf_test_params p = *params;
271 
272 		p.base.id = XE_TEST_LIVE_DMA_BUF;
273 		test->priv = &p;
274 		xe_test_dmabuf_import_same_driver(xe);
275 	}
276 
277 	/* A non-zero return would halt iteration over driver devices */
278 	return 0;
279 }
280 
281 static void xe_dma_buf_kunit(struct kunit *test)
282 {
283 	struct xe_device *xe = test->priv;
284 
285 	dma_buf_run_device(xe);
286 }
287 
288 static struct kunit_case xe_dma_buf_tests[] = {
289 	KUNIT_CASE_PARAM(xe_dma_buf_kunit, xe_pci_live_device_gen_param),
290 	{}
291 };
292 
293 VISIBLE_IF_KUNIT
294 struct kunit_suite xe_dma_buf_test_suite = {
295 	.name = "xe_dma_buf",
296 	.test_cases = xe_dma_buf_tests,
297 	.init = xe_kunit_helper_xe_device_live_test_init,
298 };
299 EXPORT_SYMBOL_IF_KUNIT(xe_dma_buf_test_suite);
300