xref: /linux/drivers/gpu/drm/xe/tests/xe_dma_buf.c (revision 08516de501fae647fb29bf3b62718de56cc24014)
1 // SPDX-License-Identifier: GPL-2.0 AND MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include <kunit/test.h>
7 #include <kunit/visibility.h>
8 
9 #include "tests/xe_dma_buf_test.h"
10 #include "tests/xe_pci_test.h"
11 
12 #include "xe_pci.h"
13 
14 static bool p2p_enabled(struct dma_buf_test_params *params)
15 {
16 	return IS_ENABLED(CONFIG_PCI_P2PDMA) && params->attach_ops &&
17 		params->attach_ops->allow_peer2peer;
18 }
19 
20 static bool is_dynamic(struct dma_buf_test_params *params)
21 {
22 	return IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY) && params->attach_ops &&
23 		params->attach_ops->move_notify;
24 }
25 
26 static void check_residency(struct kunit *test, struct xe_bo *exported,
27 			    struct xe_bo *imported, struct dma_buf *dmabuf)
28 {
29 	struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv);
30 	u32 mem_type;
31 	int ret;
32 
33 	xe_bo_assert_held(exported);
34 	xe_bo_assert_held(imported);
35 
36 	mem_type = XE_PL_VRAM0;
37 	if (!(params->mem_mask & XE_BO_CREATE_VRAM0_BIT))
38 		/* No VRAM allowed */
39 		mem_type = XE_PL_TT;
40 	else if (params->force_different_devices && !p2p_enabled(params))
41 		/* No P2P */
42 		mem_type = XE_PL_TT;
43 	else if (params->force_different_devices && !is_dynamic(params) &&
44 		 (params->mem_mask & XE_BO_CREATE_SYSTEM_BIT))
45 		/* Pin migrated to TT */
46 		mem_type = XE_PL_TT;
47 
48 	if (!xe_bo_is_mem_type(exported, mem_type)) {
49 		KUNIT_FAIL(test, "Exported bo was not in expected memory type.\n");
50 		return;
51 	}
52 
53 	if (xe_bo_is_pinned(exported))
54 		return;
55 
56 	/*
57 	 * Evict exporter. Note that the gem object dma_buf member isn't
58 	 * set from xe_gem_prime_export(), and it's needed for the move_notify()
59 	 * functionality, so hack that up here. Evicting the exported bo will
60 	 * evict also the imported bo through the move_notify() functionality if
61 	 * importer is on a different device. If they're on the same device,
62 	 * the exporter and the importer should be the same bo.
63 	 */
64 	swap(exported->ttm.base.dma_buf, dmabuf);
65 	ret = xe_bo_evict(exported, true);
66 	swap(exported->ttm.base.dma_buf, dmabuf);
67 	if (ret) {
68 		if (ret != -EINTR && ret != -ERESTARTSYS)
69 			KUNIT_FAIL(test, "Evicting exporter failed with err=%d.\n",
70 				   ret);
71 		return;
72 	}
73 
74 	/* Verify that also importer has been evicted to SYSTEM */
75 	if (!xe_bo_is_mem_type(imported, XE_PL_SYSTEM)) {
76 		KUNIT_FAIL(test, "Importer wasn't properly evicted.\n");
77 		return;
78 	}
79 
80 	/* Re-validate the importer. This should move also exporter in. */
81 	ret = xe_bo_validate(imported, NULL, false);
82 	if (ret) {
83 		if (ret != -EINTR && ret != -ERESTARTSYS)
84 			KUNIT_FAIL(test, "Validating importer failed with err=%d.\n",
85 				   ret);
86 		return;
87 	}
88 
89 	/*
90 	 * If on different devices, the exporter is kept in system  if
91 	 * possible, saving a migration step as the transfer is just
92 	 * likely as fast from system memory.
93 	 */
94 	if (params->force_different_devices &&
95 	    params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)
96 		KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, XE_PL_TT));
97 	else
98 		KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type));
99 
100 	if (params->force_different_devices)
101 		KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(imported, XE_PL_TT));
102 	else
103 		KUNIT_EXPECT_TRUE(test, exported == imported);
104 }
105 
106 static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
107 {
108 	struct kunit *test = xe_cur_kunit();
109 	struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv);
110 	struct drm_gem_object *import;
111 	struct dma_buf *dmabuf;
112 	struct xe_bo *bo;
113 
114 	/* No VRAM on this device? */
115 	if (!ttm_manager_type(&xe->ttm, XE_PL_VRAM0) &&
116 	    (params->mem_mask & XE_BO_CREATE_VRAM0_BIT))
117 		return;
118 
119 	kunit_info(test, "running %s\n", __func__);
120 	bo = xe_bo_create(xe, NULL, NULL, PAGE_SIZE, ttm_bo_type_device,
121 			  XE_BO_CREATE_USER_BIT | params->mem_mask);
122 	if (IS_ERR(bo)) {
123 		KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
124 			   PTR_ERR(bo));
125 		return;
126 	}
127 
128 	dmabuf = xe_gem_prime_export(&bo->ttm.base, 0);
129 	if (IS_ERR(dmabuf)) {
130 		KUNIT_FAIL(test, "xe_gem_prime_export() failed with err=%ld\n",
131 			   PTR_ERR(dmabuf));
132 		goto out;
133 	}
134 
135 	import = xe_gem_prime_import(&xe->drm, dmabuf);
136 	if (!IS_ERR(import)) {
137 		struct xe_bo *import_bo = gem_to_xe_bo(import);
138 
139 		/*
140 		 * Did import succeed when it shouldn't due to lack of p2p support?
141 		 */
142 		if (params->force_different_devices &&
143 		    !p2p_enabled(params) &&
144 		    !(params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)) {
145 			KUNIT_FAIL(test,
146 				   "xe_gem_prime_import() succeeded when it shouldn't have\n");
147 		} else {
148 			int err;
149 
150 			/* Is everything where we expect it to be? */
151 			xe_bo_lock_no_vm(import_bo, NULL);
152 			err = xe_bo_validate(import_bo, NULL, false);
153 			if (err && err != -EINTR && err != -ERESTARTSYS)
154 				KUNIT_FAIL(test,
155 					   "xe_bo_validate() failed with err=%d\n", err);
156 
157 			check_residency(test, bo, import_bo, dmabuf);
158 			xe_bo_unlock_no_vm(import_bo);
159 		}
160 		drm_gem_object_put(import);
161 	} else if (PTR_ERR(import) != -EOPNOTSUPP) {
162 		/* Unexpected error code. */
163 		KUNIT_FAIL(test,
164 			   "xe_gem_prime_import failed with the wrong err=%ld\n",
165 			   PTR_ERR(import));
166 	} else if (!params->force_different_devices ||
167 		   p2p_enabled(params) ||
168 		   (params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)) {
169 		/* Shouldn't fail if we can reuse same bo, use p2p or use system */
170 		KUNIT_FAIL(test, "dynamic p2p attachment failed with err=%ld\n",
171 			   PTR_ERR(import));
172 	}
173 	dma_buf_put(dmabuf);
174 out:
175 	drm_gem_object_put(&bo->ttm.base);
176 }
177 
178 static const struct dma_buf_attach_ops nop2p_attach_ops = {
179 	.allow_peer2peer = false,
180 	.move_notify = xe_dma_buf_move_notify
181 };
182 
183 /*
184  * We test the implementation with bos of different residency and with
185  * importers with different capabilities; some lacking p2p support and some
186  * lacking dynamic capabilities (attach_ops == NULL). We also fake
187  * different devices avoiding the import shortcut that just reuses the same
188  * gem object.
189  */
190 static const struct dma_buf_test_params test_params[] = {
191 	{.mem_mask = XE_BO_CREATE_VRAM0_BIT,
192 	 .attach_ops = &xe_dma_buf_attach_ops},
193 	{.mem_mask = XE_BO_CREATE_VRAM0_BIT,
194 	 .attach_ops = &xe_dma_buf_attach_ops,
195 	 .force_different_devices = true},
196 
197 	{.mem_mask = XE_BO_CREATE_VRAM0_BIT,
198 	 .attach_ops = &nop2p_attach_ops},
199 	{.mem_mask = XE_BO_CREATE_VRAM0_BIT,
200 	 .attach_ops = &nop2p_attach_ops,
201 	 .force_different_devices = true},
202 
203 	{.mem_mask = XE_BO_CREATE_VRAM0_BIT},
204 	{.mem_mask = XE_BO_CREATE_VRAM0_BIT,
205 	 .force_different_devices = true},
206 
207 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
208 	 .attach_ops = &xe_dma_buf_attach_ops},
209 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
210 	 .attach_ops = &xe_dma_buf_attach_ops,
211 	 .force_different_devices = true},
212 
213 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
214 	 .attach_ops = &nop2p_attach_ops},
215 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
216 	 .attach_ops = &nop2p_attach_ops,
217 	 .force_different_devices = true},
218 
219 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT},
220 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
221 	 .force_different_devices = true},
222 
223 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
224 	 .attach_ops = &xe_dma_buf_attach_ops},
225 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
226 	 .attach_ops = &xe_dma_buf_attach_ops,
227 	 .force_different_devices = true},
228 
229 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
230 	 .attach_ops = &nop2p_attach_ops},
231 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
232 	 .attach_ops = &nop2p_attach_ops,
233 	 .force_different_devices = true},
234 
235 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT},
236 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
237 	 .force_different_devices = true},
238 
239 	{}
240 };
241 
242 static int dma_buf_run_device(struct xe_device *xe)
243 {
244 	const struct dma_buf_test_params *params;
245 	struct kunit *test = xe_cur_kunit();
246 
247 	for (params = test_params; params->mem_mask; ++params) {
248 		struct dma_buf_test_params p = *params;
249 
250 		p.base.id = XE_TEST_LIVE_DMA_BUF;
251 		test->priv = &p;
252 		xe_test_dmabuf_import_same_driver(xe);
253 	}
254 
255 	/* A non-zero return would halt iteration over driver devices */
256 	return 0;
257 }
258 
259 void xe_dma_buf_kunit(struct kunit *test)
260 {
261 	xe_call_for_each_device(dma_buf_run_device);
262 }
263 EXPORT_SYMBOL_IF_KUNIT(xe_dma_buf_kunit);
264