xref: /linux/drivers/gpu/drm/xe/tests/xe_dma_buf.c (revision 74ba587f402d5501af2c85e50cf1e4044263b6ca)
1 // SPDX-License-Identifier: GPL-2.0 AND MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include <uapi/drm/xe_drm.h>
7 
8 #include <kunit/test.h>
9 #include <kunit/visibility.h>
10 
11 #include "tests/xe_kunit_helpers.h"
12 #include "tests/xe_pci_test.h"
13 
14 #include "xe_pci.h"
15 #include "xe_pm.h"
16 
17 static bool p2p_enabled(struct dma_buf_test_params *params)
18 {
19 	return IS_ENABLED(CONFIG_PCI_P2PDMA) && params->attach_ops &&
20 		params->attach_ops->allow_peer2peer;
21 }
22 
23 static bool is_dynamic(struct dma_buf_test_params *params)
24 {
25 	return IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY) && params->attach_ops &&
26 		params->attach_ops->move_notify;
27 }
28 
29 static void check_residency(struct kunit *test, struct xe_bo *exported,
30 			    struct xe_bo *imported, struct dma_buf *dmabuf,
31 			    struct drm_exec *exec)
32 {
33 	struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv);
34 	struct dma_buf_attachment *attach;
35 	u32 mem_type;
36 	int ret;
37 
38 	xe_bo_assert_held(exported);
39 	xe_bo_assert_held(imported);
40 
41 	mem_type = XE_PL_VRAM0;
42 	if (!(params->mem_mask & XE_BO_FLAG_VRAM0))
43 		/* No VRAM allowed */
44 		mem_type = XE_PL_TT;
45 	else if (params->force_different_devices && !p2p_enabled(params))
46 		/* No P2P */
47 		mem_type = XE_PL_TT;
48 	else if (params->force_different_devices && !is_dynamic(params) &&
49 		 (params->mem_mask & XE_BO_FLAG_SYSTEM))
50 		/* Pin migrated to TT on non-dynamic attachments. */
51 		mem_type = XE_PL_TT;
52 
53 	if (!xe_bo_is_mem_type(exported, mem_type)) {
54 		KUNIT_FAIL(test, "Exported bo was not in expected memory type.\n");
55 		return;
56 	}
57 
58 	if (xe_bo_is_pinned(exported))
59 		return;
60 
61 	/*
62 	 * Evict exporter. Evicting the exported bo will
63 	 * evict also the imported bo through the move_notify() functionality if
64 	 * importer is on a different device. If they're on the same device,
65 	 * the exporter and the importer should be the same bo.
66 	 */
67 	ret = xe_bo_evict(exported, exec);
68 	if (ret) {
69 		if (ret != -EINTR && ret != -ERESTARTSYS)
70 			KUNIT_FAIL(test, "Evicting exporter failed with err=%d.\n",
71 				   ret);
72 		return;
73 	}
74 
75 	/* Verify that also importer has been evicted to SYSTEM */
76 	if (exported != imported && !xe_bo_is_mem_type(imported, XE_PL_SYSTEM)) {
77 		KUNIT_FAIL(test, "Importer wasn't properly evicted.\n");
78 		return;
79 	}
80 
81 	/* Re-validate the importer. This should move also exporter in. */
82 	ret = xe_bo_validate(imported, NULL, false, exec);
83 	if (ret) {
84 		if (ret != -EINTR && ret != -ERESTARTSYS)
85 			KUNIT_FAIL(test, "Validating importer failed with err=%d.\n",
86 				   ret);
87 		return;
88 	}
89 
90 	KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type));
91 
92 	/* Check that we can pin without migrating. */
93 	attach = list_first_entry_or_null(&dmabuf->attachments, typeof(*attach), node);
94 	if (attach) {
95 		int err = dma_buf_pin(attach);
96 
97 		if (!err) {
98 			KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type));
99 			dma_buf_unpin(attach);
100 		}
101 		KUNIT_EXPECT_EQ(test, err, 0);
102 	}
103 
104 	if (params->force_different_devices)
105 		KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(imported, XE_PL_TT));
106 	else
107 		KUNIT_EXPECT_TRUE(test, exported == imported);
108 }
109 
110 static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
111 {
112 	struct kunit *test = kunit_get_current_test();
113 	struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv);
114 	struct drm_gem_object *import;
115 	struct dma_buf *dmabuf;
116 	struct xe_bo *bo;
117 	size_t size;
118 
119 	/* No VRAM on this device? */
120 	if (!ttm_manager_type(&xe->ttm, XE_PL_VRAM0) &&
121 	    (params->mem_mask & XE_BO_FLAG_VRAM0))
122 		return;
123 
124 	size = PAGE_SIZE;
125 	if ((params->mem_mask & XE_BO_FLAG_VRAM0) &&
126 	    xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
127 		size = SZ_64K;
128 
129 	kunit_info(test, "running %s\n", __func__);
130 	bo = xe_bo_create_user(xe, NULL, size, DRM_XE_GEM_CPU_CACHING_WC,
131 			       params->mem_mask, NULL);
132 	if (IS_ERR(bo)) {
133 		KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
134 			   PTR_ERR(bo));
135 		return;
136 	}
137 
138 	dmabuf = xe_gem_prime_export(&bo->ttm.base, 0);
139 	if (IS_ERR(dmabuf)) {
140 		KUNIT_FAIL(test, "xe_gem_prime_export() failed with err=%ld\n",
141 			   PTR_ERR(dmabuf));
142 		goto out;
143 	}
144 	bo->ttm.base.dma_buf = dmabuf;
145 
146 	import = xe_gem_prime_import(&xe->drm, dmabuf);
147 	if (!IS_ERR(import)) {
148 		struct xe_bo *import_bo = gem_to_xe_bo(import);
149 
150 		/*
151 		 * Did import succeed when it shouldn't due to lack of p2p support?
152 		 */
153 		if (params->force_different_devices &&
154 		    !p2p_enabled(params) &&
155 		    !(params->mem_mask & XE_BO_FLAG_SYSTEM)) {
156 			KUNIT_FAIL(test,
157 				   "xe_gem_prime_import() succeeded when it shouldn't have\n");
158 		} else {
159 			struct drm_exec *exec = XE_VALIDATION_OPT_OUT;
160 			int err;
161 
162 			/* Is everything where we expect it to be? */
163 			xe_bo_lock(import_bo, false);
164 			err = xe_bo_validate(import_bo, NULL, false, exec);
165 
166 			/* Pinning in VRAM is not allowed for non-dynamic attachments */
167 			if (!is_dynamic(params) &&
168 			    params->force_different_devices &&
169 			    !(params->mem_mask & XE_BO_FLAG_SYSTEM))
170 				KUNIT_EXPECT_EQ(test, err, -EINVAL);
171 			/* Otherwise only expect interrupts or success. */
172 			else if (err && err != -EINTR && err != -ERESTARTSYS)
173 				KUNIT_EXPECT_TRUE(test, !err || err == -EINTR ||
174 						  err == -ERESTARTSYS);
175 
176 			if (!err)
177 				check_residency(test, bo, import_bo, dmabuf, exec);
178 			xe_bo_unlock(import_bo);
179 		}
180 		drm_gem_object_put(import);
181 	} else if (PTR_ERR(import) != -EOPNOTSUPP) {
182 		/* Unexpected error code. */
183 		KUNIT_FAIL(test,
184 			   "xe_gem_prime_import failed with the wrong err=%ld\n",
185 			   PTR_ERR(import));
186 	} else if (!params->force_different_devices ||
187 		   p2p_enabled(params) ||
188 		   (params->mem_mask & XE_BO_FLAG_SYSTEM)) {
189 		/* Shouldn't fail if we can reuse same bo, use p2p or use system */
190 		KUNIT_FAIL(test, "dynamic p2p attachment failed with err=%ld\n",
191 			   PTR_ERR(import));
192 	}
193 	bo->ttm.base.dma_buf = NULL;
194 	dma_buf_put(dmabuf);
195 out:
196 	drm_gem_object_put(&bo->ttm.base);
197 }
198 
199 static const struct dma_buf_attach_ops nop2p_attach_ops = {
200 	.allow_peer2peer = false,
201 	.move_notify = xe_dma_buf_move_notify
202 };
203 
204 /*
205  * We test the implementation with bos of different residency and with
206  * importers with different capabilities; some lacking p2p support and some
207  * lacking dynamic capabilities (attach_ops == NULL). We also fake
208  * different devices avoiding the import shortcut that just reuses the same
209  * gem object.
210  */
211 static const struct dma_buf_test_params test_params[] = {
212 	{.mem_mask = XE_BO_FLAG_VRAM0,
213 	 .attach_ops = &xe_dma_buf_attach_ops},
214 	{.mem_mask = XE_BO_FLAG_VRAM0 | XE_BO_FLAG_NEEDS_CPU_ACCESS,
215 	 .attach_ops = &xe_dma_buf_attach_ops,
216 	 .force_different_devices = true},
217 
218 	{.mem_mask = XE_BO_FLAG_VRAM0,
219 	 .attach_ops = &nop2p_attach_ops},
220 	{.mem_mask = XE_BO_FLAG_VRAM0,
221 	 .attach_ops = &nop2p_attach_ops,
222 	 .force_different_devices = true},
223 
224 	{.mem_mask = XE_BO_FLAG_VRAM0},
225 	{.mem_mask = XE_BO_FLAG_VRAM0,
226 	 .force_different_devices = true},
227 
228 	{.mem_mask = XE_BO_FLAG_SYSTEM,
229 	 .attach_ops = &xe_dma_buf_attach_ops},
230 	{.mem_mask = XE_BO_FLAG_SYSTEM,
231 	 .attach_ops = &xe_dma_buf_attach_ops,
232 	 .force_different_devices = true},
233 
234 	{.mem_mask = XE_BO_FLAG_SYSTEM,
235 	 .attach_ops = &nop2p_attach_ops},
236 	{.mem_mask = XE_BO_FLAG_SYSTEM,
237 	 .attach_ops = &nop2p_attach_ops,
238 	 .force_different_devices = true},
239 
240 	{.mem_mask = XE_BO_FLAG_SYSTEM},
241 	{.mem_mask = XE_BO_FLAG_SYSTEM,
242 	 .force_different_devices = true},
243 
244 	{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
245 	 .attach_ops = &xe_dma_buf_attach_ops},
246 	{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0 |
247 		     XE_BO_FLAG_NEEDS_CPU_ACCESS,
248 	 .attach_ops = &xe_dma_buf_attach_ops,
249 	 .force_different_devices = true},
250 
251 	{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
252 	 .attach_ops = &nop2p_attach_ops},
253 	{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
254 	 .attach_ops = &nop2p_attach_ops,
255 	 .force_different_devices = true},
256 
257 	{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0},
258 	{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
259 	 .force_different_devices = true},
260 
261 	{}
262 };
263 
264 static int dma_buf_run_device(struct xe_device *xe)
265 {
266 	const struct dma_buf_test_params *params;
267 	struct kunit *test = kunit_get_current_test();
268 
269 	xe_pm_runtime_get(xe);
270 	for (params = test_params; params->mem_mask; ++params) {
271 		struct dma_buf_test_params p = *params;
272 
273 		p.base.id = XE_TEST_LIVE_DMA_BUF;
274 		test->priv = &p;
275 		xe_test_dmabuf_import_same_driver(xe);
276 	}
277 	xe_pm_runtime_put(xe);
278 
279 	/* A non-zero return would halt iteration over driver devices */
280 	return 0;
281 }
282 
283 static void xe_dma_buf_kunit(struct kunit *test)
284 {
285 	struct xe_device *xe = test->priv;
286 
287 	dma_buf_run_device(xe);
288 }
289 
290 static struct kunit_case xe_dma_buf_tests[] = {
291 	KUNIT_CASE_PARAM(xe_dma_buf_kunit, xe_pci_live_device_gen_param),
292 	{}
293 };
294 
295 VISIBLE_IF_KUNIT
296 struct kunit_suite xe_dma_buf_test_suite = {
297 	.name = "xe_dma_buf",
298 	.test_cases = xe_dma_buf_tests,
299 	.init = xe_kunit_helper_xe_device_live_test_init,
300 };
301 EXPORT_SYMBOL_IF_KUNIT(xe_dma_buf_test_suite);
302