xref: /linux/drivers/gpu/drm/xe/tests/xe_dma_buf.c (revision 6f17ab9a63e670bd62a287f95e3982f99eafd77e)
1 // SPDX-License-Identifier: GPL-2.0 AND MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include <uapi/drm/xe_drm.h>
7 
8 #include <kunit/test.h>
9 #include <kunit/visibility.h>
10 
11 #include "tests/xe_kunit_helpers.h"
12 #include "tests/xe_pci_test.h"
13 
14 #include "xe_pci.h"
15 #include "xe_pm.h"
16 
17 static bool p2p_enabled(struct dma_buf_test_params *params)
18 {
19 	return IS_ENABLED(CONFIG_PCI_P2PDMA) && params->attach_ops &&
20 		params->attach_ops->allow_peer2peer;
21 }
22 
23 static bool is_dynamic(struct dma_buf_test_params *params)
24 {
25 	return IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY) && params->attach_ops &&
26 		params->attach_ops->move_notify;
27 }
28 
29 static void check_residency(struct kunit *test, struct xe_bo *exported,
30 			    struct xe_bo *imported, struct dma_buf *dmabuf)
31 {
32 	struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv);
33 	u32 mem_type;
34 	int ret;
35 
36 	xe_bo_assert_held(exported);
37 	xe_bo_assert_held(imported);
38 
39 	mem_type = XE_PL_VRAM0;
40 	if (!(params->mem_mask & XE_BO_FLAG_VRAM0))
41 		/* No VRAM allowed */
42 		mem_type = XE_PL_TT;
43 	else if (params->force_different_devices && !p2p_enabled(params))
44 		/* No P2P */
45 		mem_type = XE_PL_TT;
46 	else if (params->force_different_devices && !is_dynamic(params) &&
47 		 (params->mem_mask & XE_BO_FLAG_SYSTEM))
48 		/* Pin migrated to TT */
49 		mem_type = XE_PL_TT;
50 
51 	if (!xe_bo_is_mem_type(exported, mem_type)) {
52 		KUNIT_FAIL(test, "Exported bo was not in expected memory type.\n");
53 		return;
54 	}
55 
56 	if (xe_bo_is_pinned(exported))
57 		return;
58 
59 	/*
60 	 * Evict exporter. Evicting the exported bo will
61 	 * evict also the imported bo through the move_notify() functionality if
62 	 * importer is on a different device. If they're on the same device,
63 	 * the exporter and the importer should be the same bo.
64 	 */
65 	ret = xe_bo_evict(exported);
66 	if (ret) {
67 		if (ret != -EINTR && ret != -ERESTARTSYS)
68 			KUNIT_FAIL(test, "Evicting exporter failed with err=%d.\n",
69 				   ret);
70 		return;
71 	}
72 
73 	/* Verify that also importer has been evicted to SYSTEM */
74 	if (exported != imported && !xe_bo_is_mem_type(imported, XE_PL_SYSTEM)) {
75 		KUNIT_FAIL(test, "Importer wasn't properly evicted.\n");
76 		return;
77 	}
78 
79 	/* Re-validate the importer. This should move also exporter in. */
80 	ret = xe_bo_validate(imported, NULL, false);
81 	if (ret) {
82 		if (ret != -EINTR && ret != -ERESTARTSYS)
83 			KUNIT_FAIL(test, "Validating importer failed with err=%d.\n",
84 				   ret);
85 		return;
86 	}
87 
88 	KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type));
89 
90 	if (params->force_different_devices)
91 		KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(imported, XE_PL_TT));
92 	else
93 		KUNIT_EXPECT_TRUE(test, exported == imported);
94 }
95 
96 static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
97 {
98 	struct kunit *test = kunit_get_current_test();
99 	struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv);
100 	struct drm_gem_object *import;
101 	struct dma_buf *dmabuf;
102 	struct xe_bo *bo;
103 	size_t size;
104 
105 	/* No VRAM on this device? */
106 	if (!ttm_manager_type(&xe->ttm, XE_PL_VRAM0) &&
107 	    (params->mem_mask & XE_BO_FLAG_VRAM0))
108 		return;
109 
110 	size = PAGE_SIZE;
111 	if ((params->mem_mask & XE_BO_FLAG_VRAM0) &&
112 	    xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
113 		size = SZ_64K;
114 
115 	kunit_info(test, "running %s\n", __func__);
116 	bo = xe_bo_create_user(xe, NULL, NULL, size, DRM_XE_GEM_CPU_CACHING_WC,
117 			       params->mem_mask);
118 	if (IS_ERR(bo)) {
119 		KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
120 			   PTR_ERR(bo));
121 		return;
122 	}
123 
124 	dmabuf = xe_gem_prime_export(&bo->ttm.base, 0);
125 	if (IS_ERR(dmabuf)) {
126 		KUNIT_FAIL(test, "xe_gem_prime_export() failed with err=%ld\n",
127 			   PTR_ERR(dmabuf));
128 		goto out;
129 	}
130 	bo->ttm.base.dma_buf = dmabuf;
131 
132 	import = xe_gem_prime_import(&xe->drm, dmabuf);
133 	if (!IS_ERR(import)) {
134 		struct xe_bo *import_bo = gem_to_xe_bo(import);
135 
136 		/*
137 		 * Did import succeed when it shouldn't due to lack of p2p support?
138 		 */
139 		if (params->force_different_devices &&
140 		    !p2p_enabled(params) &&
141 		    !(params->mem_mask & XE_BO_FLAG_SYSTEM)) {
142 			KUNIT_FAIL(test,
143 				   "xe_gem_prime_import() succeeded when it shouldn't have\n");
144 		} else {
145 			int err;
146 
147 			/* Is everything where we expect it to be? */
148 			xe_bo_lock(import_bo, false);
149 			err = xe_bo_validate(import_bo, NULL, false);
150 
151 			/* Pinning in VRAM is not allowed. */
152 			if (!is_dynamic(params) &&
153 			    params->force_different_devices &&
154 			    !(params->mem_mask & XE_BO_FLAG_SYSTEM))
155 				KUNIT_EXPECT_EQ(test, err, -EINVAL);
156 			/* Otherwise only expect interrupts or success. */
157 			else if (err && err != -EINTR && err != -ERESTARTSYS)
158 				KUNIT_EXPECT_TRUE(test, !err || err == -EINTR ||
159 						  err == -ERESTARTSYS);
160 
161 			if (!err)
162 				check_residency(test, bo, import_bo, dmabuf);
163 			xe_bo_unlock(import_bo);
164 		}
165 		drm_gem_object_put(import);
166 	} else if (PTR_ERR(import) != -EOPNOTSUPP) {
167 		/* Unexpected error code. */
168 		KUNIT_FAIL(test,
169 			   "xe_gem_prime_import failed with the wrong err=%ld\n",
170 			   PTR_ERR(import));
171 	} else if (!params->force_different_devices ||
172 		   p2p_enabled(params) ||
173 		   (params->mem_mask & XE_BO_FLAG_SYSTEM)) {
174 		/* Shouldn't fail if we can reuse same bo, use p2p or use system */
175 		KUNIT_FAIL(test, "dynamic p2p attachment failed with err=%ld\n",
176 			   PTR_ERR(import));
177 	}
178 	bo->ttm.base.dma_buf = NULL;
179 	dma_buf_put(dmabuf);
180 out:
181 	drm_gem_object_put(&bo->ttm.base);
182 }
183 
184 static const struct dma_buf_attach_ops nop2p_attach_ops = {
185 	.allow_peer2peer = false,
186 	.move_notify = xe_dma_buf_move_notify
187 };
188 
189 /*
190  * We test the implementation with bos of different residency and with
191  * importers with different capabilities; some lacking p2p support and some
192  * lacking dynamic capabilities (attach_ops == NULL). We also fake
193  * different devices avoiding the import shortcut that just reuses the same
194  * gem object.
195  */
196 static const struct dma_buf_test_params test_params[] = {
197 	{.mem_mask = XE_BO_FLAG_VRAM0,
198 	 .attach_ops = &xe_dma_buf_attach_ops},
199 	{.mem_mask = XE_BO_FLAG_VRAM0 | XE_BO_FLAG_NEEDS_CPU_ACCESS,
200 	 .attach_ops = &xe_dma_buf_attach_ops,
201 	 .force_different_devices = true},
202 
203 	{.mem_mask = XE_BO_FLAG_VRAM0,
204 	 .attach_ops = &nop2p_attach_ops},
205 	{.mem_mask = XE_BO_FLAG_VRAM0,
206 	 .attach_ops = &nop2p_attach_ops,
207 	 .force_different_devices = true},
208 
209 	{.mem_mask = XE_BO_FLAG_VRAM0},
210 	{.mem_mask = XE_BO_FLAG_VRAM0,
211 	 .force_different_devices = true},
212 
213 	{.mem_mask = XE_BO_FLAG_SYSTEM,
214 	 .attach_ops = &xe_dma_buf_attach_ops},
215 	{.mem_mask = XE_BO_FLAG_SYSTEM,
216 	 .attach_ops = &xe_dma_buf_attach_ops,
217 	 .force_different_devices = true},
218 
219 	{.mem_mask = XE_BO_FLAG_SYSTEM,
220 	 .attach_ops = &nop2p_attach_ops},
221 	{.mem_mask = XE_BO_FLAG_SYSTEM,
222 	 .attach_ops = &nop2p_attach_ops,
223 	 .force_different_devices = true},
224 
225 	{.mem_mask = XE_BO_FLAG_SYSTEM},
226 	{.mem_mask = XE_BO_FLAG_SYSTEM,
227 	 .force_different_devices = true},
228 
229 	{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
230 	 .attach_ops = &xe_dma_buf_attach_ops},
231 	{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0 |
232 		     XE_BO_FLAG_NEEDS_CPU_ACCESS,
233 	 .attach_ops = &xe_dma_buf_attach_ops,
234 	 .force_different_devices = true},
235 
236 	{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
237 	 .attach_ops = &nop2p_attach_ops},
238 	{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
239 	 .attach_ops = &nop2p_attach_ops,
240 	 .force_different_devices = true},
241 
242 	{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0},
243 	{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
244 	 .force_different_devices = true},
245 
246 	{}
247 };
248 
249 static int dma_buf_run_device(struct xe_device *xe)
250 {
251 	const struct dma_buf_test_params *params;
252 	struct kunit *test = kunit_get_current_test();
253 
254 	xe_pm_runtime_get(xe);
255 	for (params = test_params; params->mem_mask; ++params) {
256 		struct dma_buf_test_params p = *params;
257 
258 		p.base.id = XE_TEST_LIVE_DMA_BUF;
259 		test->priv = &p;
260 		xe_test_dmabuf_import_same_driver(xe);
261 	}
262 	xe_pm_runtime_put(xe);
263 
264 	/* A non-zero return would halt iteration over driver devices */
265 	return 0;
266 }
267 
268 static void xe_dma_buf_kunit(struct kunit *test)
269 {
270 	struct xe_device *xe = test->priv;
271 
272 	dma_buf_run_device(xe);
273 }
274 
275 static struct kunit_case xe_dma_buf_tests[] = {
276 	KUNIT_CASE_PARAM(xe_dma_buf_kunit, xe_pci_live_device_gen_param),
277 	{}
278 };
279 
280 VISIBLE_IF_KUNIT
281 struct kunit_suite xe_dma_buf_test_suite = {
282 	.name = "xe_dma_buf",
283 	.test_cases = xe_dma_buf_tests,
284 	.init = xe_kunit_helper_xe_device_live_test_init,
285 };
286 EXPORT_SYMBOL_IF_KUNIT(xe_dma_buf_test_suite);
287