xref: /linux/drivers/gpu/drm/xe/tests/xe_dma_buf.c (revision c02ce1735b150cf7c3b43790b48e23dcd17c0d46)
1 // SPDX-License-Identifier: GPL-2.0 AND MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include <drm/xe_drm.h>
7 
8 #include <kunit/test.h>
9 #include <kunit/visibility.h>
10 
11 #include "tests/xe_dma_buf_test.h"
12 #include "tests/xe_pci_test.h"
13 
14 #include "xe_pci.h"
15 
16 static bool p2p_enabled(struct dma_buf_test_params *params)
17 {
18 	return IS_ENABLED(CONFIG_PCI_P2PDMA) && params->attach_ops &&
19 		params->attach_ops->allow_peer2peer;
20 }
21 
22 static bool is_dynamic(struct dma_buf_test_params *params)
23 {
24 	return IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY) && params->attach_ops &&
25 		params->attach_ops->move_notify;
26 }
27 
28 static void check_residency(struct kunit *test, struct xe_bo *exported,
29 			    struct xe_bo *imported, struct dma_buf *dmabuf)
30 {
31 	struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv);
32 	u32 mem_type;
33 	int ret;
34 
35 	xe_bo_assert_held(exported);
36 	xe_bo_assert_held(imported);
37 
38 	mem_type = XE_PL_VRAM0;
39 	if (!(params->mem_mask & XE_BO_CREATE_VRAM0_BIT))
40 		/* No VRAM allowed */
41 		mem_type = XE_PL_TT;
42 	else if (params->force_different_devices && !p2p_enabled(params))
43 		/* No P2P */
44 		mem_type = XE_PL_TT;
45 	else if (params->force_different_devices && !is_dynamic(params) &&
46 		 (params->mem_mask & XE_BO_CREATE_SYSTEM_BIT))
47 		/* Pin migrated to TT */
48 		mem_type = XE_PL_TT;
49 
50 	if (!xe_bo_is_mem_type(exported, mem_type)) {
51 		KUNIT_FAIL(test, "Exported bo was not in expected memory type.\n");
52 		return;
53 	}
54 
55 	if (xe_bo_is_pinned(exported))
56 		return;
57 
58 	/*
59 	 * Evict exporter. Note that the gem object dma_buf member isn't
60 	 * set from xe_gem_prime_export(), and it's needed for the move_notify()
61 	 * functionality, so hack that up here. Evicting the exported bo will
62 	 * evict also the imported bo through the move_notify() functionality if
63 	 * importer is on a different device. If they're on the same device,
64 	 * the exporter and the importer should be the same bo.
65 	 */
66 	swap(exported->ttm.base.dma_buf, dmabuf);
67 	ret = xe_bo_evict(exported, true);
68 	swap(exported->ttm.base.dma_buf, dmabuf);
69 	if (ret) {
70 		if (ret != -EINTR && ret != -ERESTARTSYS)
71 			KUNIT_FAIL(test, "Evicting exporter failed with err=%d.\n",
72 				   ret);
73 		return;
74 	}
75 
76 	/* Verify that also importer has been evicted to SYSTEM */
77 	if (exported != imported && !xe_bo_is_mem_type(imported, XE_PL_SYSTEM)) {
78 		KUNIT_FAIL(test, "Importer wasn't properly evicted.\n");
79 		return;
80 	}
81 
82 	/* Re-validate the importer. This should move also exporter in. */
83 	ret = xe_bo_validate(imported, NULL, false);
84 	if (ret) {
85 		if (ret != -EINTR && ret != -ERESTARTSYS)
86 			KUNIT_FAIL(test, "Validating importer failed with err=%d.\n",
87 				   ret);
88 		return;
89 	}
90 
91 	/*
92 	 * If on different devices, the exporter is kept in system  if
93 	 * possible, saving a migration step as the transfer is just
94 	 * likely as fast from system memory.
95 	 */
96 	if (params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)
97 		KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, XE_PL_TT));
98 	else
99 		KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type));
100 
101 	if (params->force_different_devices)
102 		KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(imported, XE_PL_TT));
103 	else
104 		KUNIT_EXPECT_TRUE(test, exported == imported);
105 }
106 
107 static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
108 {
109 	struct kunit *test = xe_cur_kunit();
110 	struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv);
111 	struct drm_gem_object *import;
112 	struct dma_buf *dmabuf;
113 	struct xe_bo *bo;
114 	size_t size;
115 
116 	/* No VRAM on this device? */
117 	if (!ttm_manager_type(&xe->ttm, XE_PL_VRAM0) &&
118 	    (params->mem_mask & XE_BO_CREATE_VRAM0_BIT))
119 		return;
120 
121 	size = PAGE_SIZE;
122 	if ((params->mem_mask & XE_BO_CREATE_VRAM0_BIT) &&
123 	    xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
124 		size = SZ_64K;
125 
126 	kunit_info(test, "running %s\n", __func__);
127 	bo = xe_bo_create_user(xe, NULL, NULL, size, DRM_XE_GEM_CPU_CACHING_WC,
128 			       ttm_bo_type_device, XE_BO_CREATE_USER_BIT | params->mem_mask);
129 	if (IS_ERR(bo)) {
130 		KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
131 			   PTR_ERR(bo));
132 		return;
133 	}
134 
135 	dmabuf = xe_gem_prime_export(&bo->ttm.base, 0);
136 	if (IS_ERR(dmabuf)) {
137 		KUNIT_FAIL(test, "xe_gem_prime_export() failed with err=%ld\n",
138 			   PTR_ERR(dmabuf));
139 		goto out;
140 	}
141 
142 	import = xe_gem_prime_import(&xe->drm, dmabuf);
143 	if (!IS_ERR(import)) {
144 		struct xe_bo *import_bo = gem_to_xe_bo(import);
145 
146 		/*
147 		 * Did import succeed when it shouldn't due to lack of p2p support?
148 		 */
149 		if (params->force_different_devices &&
150 		    !p2p_enabled(params) &&
151 		    !(params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)) {
152 			KUNIT_FAIL(test,
153 				   "xe_gem_prime_import() succeeded when it shouldn't have\n");
154 		} else {
155 			int err;
156 
157 			/* Is everything where we expect it to be? */
158 			xe_bo_lock(import_bo, false);
159 			err = xe_bo_validate(import_bo, NULL, false);
160 
161 			/* Pinning in VRAM is not allowed. */
162 			if (!is_dynamic(params) &&
163 			    params->force_different_devices &&
164 			    !(params->mem_mask & XE_BO_CREATE_SYSTEM_BIT))
165 				KUNIT_EXPECT_EQ(test, err, -EINVAL);
166 			/* Otherwise only expect interrupts or success. */
167 			else if (err && err != -EINTR && err != -ERESTARTSYS)
168 				KUNIT_EXPECT_TRUE(test, !err || err == -EINTR ||
169 						  err == -ERESTARTSYS);
170 
171 			if (!err)
172 				check_residency(test, bo, import_bo, dmabuf);
173 			xe_bo_unlock(import_bo);
174 		}
175 		drm_gem_object_put(import);
176 	} else if (PTR_ERR(import) != -EOPNOTSUPP) {
177 		/* Unexpected error code. */
178 		KUNIT_FAIL(test,
179 			   "xe_gem_prime_import failed with the wrong err=%ld\n",
180 			   PTR_ERR(import));
181 	} else if (!params->force_different_devices ||
182 		   p2p_enabled(params) ||
183 		   (params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)) {
184 		/* Shouldn't fail if we can reuse same bo, use p2p or use system */
185 		KUNIT_FAIL(test, "dynamic p2p attachment failed with err=%ld\n",
186 			   PTR_ERR(import));
187 	}
188 	dma_buf_put(dmabuf);
189 out:
190 	drm_gem_object_put(&bo->ttm.base);
191 }
192 
193 static const struct dma_buf_attach_ops nop2p_attach_ops = {
194 	.allow_peer2peer = false,
195 	.move_notify = xe_dma_buf_move_notify
196 };
197 
198 /*
199  * We test the implementation with bos of different residency and with
200  * importers with different capabilities; some lacking p2p support and some
201  * lacking dynamic capabilities (attach_ops == NULL). We also fake
202  * different devices avoiding the import shortcut that just reuses the same
203  * gem object.
204  */
205 static const struct dma_buf_test_params test_params[] = {
206 	{.mem_mask = XE_BO_CREATE_VRAM0_BIT,
207 	 .attach_ops = &xe_dma_buf_attach_ops},
208 	{.mem_mask = XE_BO_CREATE_VRAM0_BIT,
209 	 .attach_ops = &xe_dma_buf_attach_ops,
210 	 .force_different_devices = true},
211 
212 	{.mem_mask = XE_BO_CREATE_VRAM0_BIT,
213 	 .attach_ops = &nop2p_attach_ops},
214 	{.mem_mask = XE_BO_CREATE_VRAM0_BIT,
215 	 .attach_ops = &nop2p_attach_ops,
216 	 .force_different_devices = true},
217 
218 	{.mem_mask = XE_BO_CREATE_VRAM0_BIT},
219 	{.mem_mask = XE_BO_CREATE_VRAM0_BIT,
220 	 .force_different_devices = true},
221 
222 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
223 	 .attach_ops = &xe_dma_buf_attach_ops},
224 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
225 	 .attach_ops = &xe_dma_buf_attach_ops,
226 	 .force_different_devices = true},
227 
228 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
229 	 .attach_ops = &nop2p_attach_ops},
230 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
231 	 .attach_ops = &nop2p_attach_ops,
232 	 .force_different_devices = true},
233 
234 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT},
235 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
236 	 .force_different_devices = true},
237 
238 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
239 	 .attach_ops = &xe_dma_buf_attach_ops},
240 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
241 	 .attach_ops = &xe_dma_buf_attach_ops,
242 	 .force_different_devices = true},
243 
244 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
245 	 .attach_ops = &nop2p_attach_ops},
246 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
247 	 .attach_ops = &nop2p_attach_ops,
248 	 .force_different_devices = true},
249 
250 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT},
251 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
252 	 .force_different_devices = true},
253 
254 	{}
255 };
256 
257 static int dma_buf_run_device(struct xe_device *xe)
258 {
259 	const struct dma_buf_test_params *params;
260 	struct kunit *test = xe_cur_kunit();
261 
262 	for (params = test_params; params->mem_mask; ++params) {
263 		struct dma_buf_test_params p = *params;
264 
265 		p.base.id = XE_TEST_LIVE_DMA_BUF;
266 		test->priv = &p;
267 		xe_test_dmabuf_import_same_driver(xe);
268 	}
269 
270 	/* A non-zero return would halt iteration over driver devices */
271 	return 0;
272 }
273 
274 void xe_dma_buf_kunit(struct kunit *test)
275 {
276 	xe_call_for_each_device(dma_buf_run_device);
277 }
278 EXPORT_SYMBOL_IF_KUNIT(xe_dma_buf_kunit);
279