xref: /linux/drivers/gpu/drm/xe/tests/xe_dma_buf.c (revision 8cdcef1c2f82d207aa8b2a02298fbc17191c6261)
1 // SPDX-License-Identifier: GPL-2.0 AND MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include <kunit/test.h>
7 #include <kunit/visibility.h>
8 
9 #include "tests/xe_dma_buf_test.h"
10 #include "tests/xe_pci_test.h"
11 
12 #include "xe_pci.h"
13 
14 static bool p2p_enabled(struct dma_buf_test_params *params)
15 {
16 	return IS_ENABLED(CONFIG_PCI_P2PDMA) && params->attach_ops &&
17 		params->attach_ops->allow_peer2peer;
18 }
19 
20 static bool is_dynamic(struct dma_buf_test_params *params)
21 {
22 	return IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY) && params->attach_ops &&
23 		params->attach_ops->move_notify;
24 }
25 
26 static void check_residency(struct kunit *test, struct xe_bo *exported,
27 			    struct xe_bo *imported, struct dma_buf *dmabuf)
28 {
29 	struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv);
30 	u32 mem_type;
31 	int ret;
32 
33 	xe_bo_assert_held(exported);
34 	xe_bo_assert_held(imported);
35 
36 	mem_type = XE_PL_VRAM0;
37 	if (!(params->mem_mask & XE_BO_CREATE_VRAM0_BIT))
38 		/* No VRAM allowed */
39 		mem_type = XE_PL_TT;
40 	else if (params->force_different_devices && !p2p_enabled(params))
41 		/* No P2P */
42 		mem_type = XE_PL_TT;
43 	else if (params->force_different_devices && !is_dynamic(params) &&
44 		 (params->mem_mask & XE_BO_CREATE_SYSTEM_BIT))
45 		/* Pin migrated to TT */
46 		mem_type = XE_PL_TT;
47 
48 	if (!xe_bo_is_mem_type(exported, mem_type)) {
49 		KUNIT_FAIL(test, "Exported bo was not in expected memory type.\n");
50 		return;
51 	}
52 
53 	if (xe_bo_is_pinned(exported))
54 		return;
55 
56 	/*
57 	 * Evict exporter. Note that the gem object dma_buf member isn't
58 	 * set from xe_gem_prime_export(), and it's needed for the move_notify()
59 	 * functionality, so hack that up here. Evicting the exported bo will
60 	 * evict also the imported bo through the move_notify() functionality if
61 	 * importer is on a different device. If they're on the same device,
62 	 * the exporter and the importer should be the same bo.
63 	 */
64 	swap(exported->ttm.base.dma_buf, dmabuf);
65 	ret = xe_bo_evict(exported, true);
66 	swap(exported->ttm.base.dma_buf, dmabuf);
67 	if (ret) {
68 		if (ret != -EINTR && ret != -ERESTARTSYS)
69 			KUNIT_FAIL(test, "Evicting exporter failed with err=%d.\n",
70 				   ret);
71 		return;
72 	}
73 
74 	/* Verify that also importer has been evicted to SYSTEM */
75 	if (exported != imported && !xe_bo_is_mem_type(imported, XE_PL_SYSTEM)) {
76 		KUNIT_FAIL(test, "Importer wasn't properly evicted.\n");
77 		return;
78 	}
79 
80 	/* Re-validate the importer. This should move also exporter in. */
81 	ret = xe_bo_validate(imported, NULL, false);
82 	if (ret) {
83 		if (ret != -EINTR && ret != -ERESTARTSYS)
84 			KUNIT_FAIL(test, "Validating importer failed with err=%d.\n",
85 				   ret);
86 		return;
87 	}
88 
89 	/*
90 	 * If on different devices, the exporter is kept in system  if
91 	 * possible, saving a migration step as the transfer is just
92 	 * likely as fast from system memory.
93 	 */
94 	if (params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)
95 		KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, XE_PL_TT));
96 	else
97 		KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type));
98 
99 	if (params->force_different_devices)
100 		KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(imported, XE_PL_TT));
101 	else
102 		KUNIT_EXPECT_TRUE(test, exported == imported);
103 }
104 
105 static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
106 {
107 	struct kunit *test = xe_cur_kunit();
108 	struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv);
109 	struct drm_gem_object *import;
110 	struct dma_buf *dmabuf;
111 	struct xe_bo *bo;
112 
113 	/* No VRAM on this device? */
114 	if (!ttm_manager_type(&xe->ttm, XE_PL_VRAM0) &&
115 	    (params->mem_mask & XE_BO_CREATE_VRAM0_BIT))
116 		return;
117 
118 	kunit_info(test, "running %s\n", __func__);
119 	bo = xe_bo_create_user(xe, NULL, NULL, PAGE_SIZE, DRM_XE_GEM_CPU_CACHING_WC,
120 			       ttm_bo_type_device, params->mem_mask);
121 	if (IS_ERR(bo)) {
122 		KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
123 			   PTR_ERR(bo));
124 		return;
125 	}
126 
127 	dmabuf = xe_gem_prime_export(&bo->ttm.base, 0);
128 	if (IS_ERR(dmabuf)) {
129 		KUNIT_FAIL(test, "xe_gem_prime_export() failed with err=%ld\n",
130 			   PTR_ERR(dmabuf));
131 		goto out;
132 	}
133 
134 	import = xe_gem_prime_import(&xe->drm, dmabuf);
135 	if (!IS_ERR(import)) {
136 		struct xe_bo *import_bo = gem_to_xe_bo(import);
137 
138 		/*
139 		 * Did import succeed when it shouldn't due to lack of p2p support?
140 		 */
141 		if (params->force_different_devices &&
142 		    !p2p_enabled(params) &&
143 		    !(params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)) {
144 			KUNIT_FAIL(test,
145 				   "xe_gem_prime_import() succeeded when it shouldn't have\n");
146 		} else {
147 			int err;
148 
149 			/* Is everything where we expect it to be? */
150 			xe_bo_lock(import_bo, false);
151 			err = xe_bo_validate(import_bo, NULL, false);
152 
153 			/* Pinning in VRAM is not allowed. */
154 			if (!is_dynamic(params) &&
155 			    params->force_different_devices &&
156 			    !(params->mem_mask & XE_BO_CREATE_SYSTEM_BIT))
157 				KUNIT_EXPECT_EQ(test, err, -EINVAL);
158 			/* Otherwise only expect interrupts or success. */
159 			else if (err && err != -EINTR && err != -ERESTARTSYS)
160 				KUNIT_EXPECT_TRUE(test, !err || err == -EINTR ||
161 						  err == -ERESTARTSYS);
162 
163 			if (!err)
164 				check_residency(test, bo, import_bo, dmabuf);
165 			xe_bo_unlock(import_bo);
166 		}
167 		drm_gem_object_put(import);
168 	} else if (PTR_ERR(import) != -EOPNOTSUPP) {
169 		/* Unexpected error code. */
170 		KUNIT_FAIL(test,
171 			   "xe_gem_prime_import failed with the wrong err=%ld\n",
172 			   PTR_ERR(import));
173 	} else if (!params->force_different_devices ||
174 		   p2p_enabled(params) ||
175 		   (params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)) {
176 		/* Shouldn't fail if we can reuse same bo, use p2p or use system */
177 		KUNIT_FAIL(test, "dynamic p2p attachment failed with err=%ld\n",
178 			   PTR_ERR(import));
179 	}
180 	dma_buf_put(dmabuf);
181 out:
182 	drm_gem_object_put(&bo->ttm.base);
183 }
184 
185 static const struct dma_buf_attach_ops nop2p_attach_ops = {
186 	.allow_peer2peer = false,
187 	.move_notify = xe_dma_buf_move_notify
188 };
189 
190 /*
191  * We test the implementation with bos of different residency and with
192  * importers with different capabilities; some lacking p2p support and some
193  * lacking dynamic capabilities (attach_ops == NULL). We also fake
194  * different devices avoiding the import shortcut that just reuses the same
195  * gem object.
196  */
197 static const struct dma_buf_test_params test_params[] = {
198 	{.mem_mask = XE_BO_CREATE_VRAM0_BIT,
199 	 .attach_ops = &xe_dma_buf_attach_ops},
200 	{.mem_mask = XE_BO_CREATE_VRAM0_BIT,
201 	 .attach_ops = &xe_dma_buf_attach_ops,
202 	 .force_different_devices = true},
203 
204 	{.mem_mask = XE_BO_CREATE_VRAM0_BIT,
205 	 .attach_ops = &nop2p_attach_ops},
206 	{.mem_mask = XE_BO_CREATE_VRAM0_BIT,
207 	 .attach_ops = &nop2p_attach_ops,
208 	 .force_different_devices = true},
209 
210 	{.mem_mask = XE_BO_CREATE_VRAM0_BIT},
211 	{.mem_mask = XE_BO_CREATE_VRAM0_BIT,
212 	 .force_different_devices = true},
213 
214 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
215 	 .attach_ops = &xe_dma_buf_attach_ops},
216 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
217 	 .attach_ops = &xe_dma_buf_attach_ops,
218 	 .force_different_devices = true},
219 
220 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
221 	 .attach_ops = &nop2p_attach_ops},
222 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
223 	 .attach_ops = &nop2p_attach_ops,
224 	 .force_different_devices = true},
225 
226 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT},
227 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
228 	 .force_different_devices = true},
229 
230 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
231 	 .attach_ops = &xe_dma_buf_attach_ops},
232 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
233 	 .attach_ops = &xe_dma_buf_attach_ops,
234 	 .force_different_devices = true},
235 
236 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
237 	 .attach_ops = &nop2p_attach_ops},
238 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
239 	 .attach_ops = &nop2p_attach_ops,
240 	 .force_different_devices = true},
241 
242 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT},
243 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
244 	 .force_different_devices = true},
245 
246 	{}
247 };
248 
249 static int dma_buf_run_device(struct xe_device *xe)
250 {
251 	const struct dma_buf_test_params *params;
252 	struct kunit *test = xe_cur_kunit();
253 
254 	for (params = test_params; params->mem_mask; ++params) {
255 		struct dma_buf_test_params p = *params;
256 
257 		p.base.id = XE_TEST_LIVE_DMA_BUF;
258 		test->priv = &p;
259 		xe_test_dmabuf_import_same_driver(xe);
260 	}
261 
262 	/* A non-zero return would halt iteration over driver devices */
263 	return 0;
264 }
265 
266 void xe_dma_buf_kunit(struct kunit *test)
267 {
268 	xe_call_for_each_device(dma_buf_run_device);
269 }
270 EXPORT_SYMBOL_IF_KUNIT(xe_dma_buf_kunit);
271