xref: /linux/drivers/gpu/drm/xe/tests/xe_dma_buf.c (revision dd08ebf6c3525a7ea2186e636df064ea47281987)
1 // SPDX-License-Identifier: GPL-2.0 AND MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include <kunit/test.h>
7 
8 #include "xe_pci.h"
9 
10 static bool p2p_enabled(struct dma_buf_test_params *params)
11 {
12 	return IS_ENABLED(CONFIG_PCI_P2PDMA) && params->attach_ops &&
13 		params->attach_ops->allow_peer2peer;
14 }
15 
16 static bool is_dynamic(struct dma_buf_test_params *params)
17 {
18 	return IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY) && params->attach_ops &&
19 		params->attach_ops->move_notify;
20 }
21 
22 static void check_residency(struct kunit *test, struct xe_bo *exported,
23 			    struct xe_bo *imported, struct dma_buf *dmabuf)
24 {
25 	struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv);
26 	u32 mem_type;
27 	int ret;
28 
29 	xe_bo_assert_held(exported);
30 	xe_bo_assert_held(imported);
31 
32 	mem_type = XE_PL_VRAM0;
33 	if (!(params->mem_mask & XE_BO_CREATE_VRAM0_BIT))
34 		/* No VRAM allowed */
35 		mem_type = XE_PL_TT;
36 	else if (params->force_different_devices && !p2p_enabled(params))
37 		/* No P2P */
38 		mem_type = XE_PL_TT;
39 	else if (params->force_different_devices && !is_dynamic(params) &&
40 		 (params->mem_mask & XE_BO_CREATE_SYSTEM_BIT))
41 		/* Pin migrated to TT */
42 		mem_type = XE_PL_TT;
43 
44 	if (!xe_bo_is_mem_type(exported, mem_type)) {
45 		KUNIT_FAIL(test, "Exported bo was not in expected memory type.\n");
46 		return;
47 	}
48 
49 	if (xe_bo_is_pinned(exported))
50 		return;
51 
52 	/*
53 	 * Evict exporter. Note that the gem object dma_buf member isn't
54 	 * set from xe_gem_prime_export(), and it's needed for the move_notify()
55 	 * functionality, so hack that up here. Evicting the exported bo will
56 	 * evict also the imported bo through the move_notify() functionality if
57 	 * importer is on a different device. If they're on the same device,
58 	 * the exporter and the importer should be the same bo.
59 	 */
60 	swap(exported->ttm.base.dma_buf, dmabuf);
61 	ret = xe_bo_evict(exported, true);
62 	swap(exported->ttm.base.dma_buf, dmabuf);
63 	if (ret) {
64 		if (ret != -EINTR && ret != -ERESTARTSYS)
65 			KUNIT_FAIL(test, "Evicting exporter failed with err=%d.\n",
66 				   ret);
67 		return;
68 	}
69 
70 	/* Verify that also importer has been evicted to SYSTEM */
71 	if (!xe_bo_is_mem_type(imported, XE_PL_SYSTEM)) {
72 		KUNIT_FAIL(test, "Importer wasn't properly evicted.\n");
73 		return;
74 	}
75 
76 	/* Re-validate the importer. This should move also exporter in. */
77 	ret = xe_bo_validate(imported, NULL, false);
78 	if (ret) {
79 		if (ret != -EINTR && ret != -ERESTARTSYS)
80 			KUNIT_FAIL(test, "Validating importer failed with err=%d.\n",
81 				   ret);
82 		return;
83 	}
84 
85 	/*
86 	 * If on different devices, the exporter is kept in system  if
87 	 * possible, saving a migration step as the transfer is just
88 	 * likely as fast from system memory.
89 	 */
90 	if (params->force_different_devices &&
91 	    params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)
92 		KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, XE_PL_TT));
93 	else
94 		KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type));
95 
96 	if (params->force_different_devices)
97 		KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(imported, XE_PL_TT));
98 	else
99 		KUNIT_EXPECT_TRUE(test, exported == imported);
100 }
101 
102 static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
103 {
104 	struct kunit *test = xe_cur_kunit();
105 	struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv);
106 	struct drm_gem_object *import;
107 	struct dma_buf *dmabuf;
108 	struct xe_bo *bo;
109 
110 	/* No VRAM on this device? */
111 	if (!ttm_manager_type(&xe->ttm, XE_PL_VRAM0) &&
112 	    (params->mem_mask & XE_BO_CREATE_VRAM0_BIT))
113 		return;
114 
115 	kunit_info(test, "running %s\n", __func__);
116 	bo = xe_bo_create(xe, NULL, NULL, PAGE_SIZE, ttm_bo_type_device,
117 			  XE_BO_CREATE_USER_BIT | params->mem_mask);
118 	if (IS_ERR(bo)) {
119 		KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
120 			   PTR_ERR(bo));
121 		return;
122 	}
123 
124 	dmabuf = xe_gem_prime_export(&bo->ttm.base, 0);
125 	if (IS_ERR(dmabuf)) {
126 		KUNIT_FAIL(test, "xe_gem_prime_export() failed with err=%ld\n",
127 			   PTR_ERR(dmabuf));
128 		goto out;
129 	}
130 
131 	import = xe_gem_prime_import(&xe->drm, dmabuf);
132 	if (!IS_ERR(import)) {
133 		struct xe_bo *import_bo = gem_to_xe_bo(import);
134 
135 		/*
136 		 * Did import succeed when it shouldn't due to lack of p2p support?
137 		 */
138 		if (params->force_different_devices &&
139 		    !p2p_enabled(params) &&
140 		    !(params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)) {
141 			KUNIT_FAIL(test,
142 				   "xe_gem_prime_import() succeeded when it shouldn't have\n");
143 		} else {
144 			int err;
145 
146 			/* Is everything where we expect it to be? */
147 			xe_bo_lock_no_vm(import_bo, NULL);
148 			err = xe_bo_validate(import_bo, NULL, false);
149 			if (err && err != -EINTR && err != -ERESTARTSYS)
150 				KUNIT_FAIL(test,
151 					   "xe_bo_validate() failed with err=%d\n", err);
152 
153 			check_residency(test, bo, import_bo, dmabuf);
154 			xe_bo_unlock_no_vm(import_bo);
155 		}
156 		drm_gem_object_put(import);
157 	} else if (PTR_ERR(import) != -EOPNOTSUPP) {
158 		/* Unexpected error code. */
159 		KUNIT_FAIL(test,
160 			   "xe_gem_prime_import failed with the wrong err=%ld\n",
161 			   PTR_ERR(import));
162 	} else if (!params->force_different_devices ||
163 		   p2p_enabled(params) ||
164 		   (params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)) {
165 		/* Shouldn't fail if we can reuse same bo, use p2p or use system */
166 		KUNIT_FAIL(test, "dynamic p2p attachment failed with err=%ld\n",
167 			   PTR_ERR(import));
168 	}
169 	dma_buf_put(dmabuf);
170 out:
171 	drm_gem_object_put(&bo->ttm.base);
172 }
173 
174 static const struct dma_buf_attach_ops nop2p_attach_ops = {
175 	.allow_peer2peer = false,
176 	.move_notify = xe_dma_buf_move_notify
177 };
178 
179 /*
180  * We test the implementation with bos of different residency and with
181  * importers with different capabilities; some lacking p2p support and some
182  * lacking dynamic capabilities (attach_ops == NULL). We also fake
183  * different devices avoiding the import shortcut that just reuses the same
184  * gem object.
185  */
186 static const struct dma_buf_test_params test_params[] = {
187 	{.mem_mask = XE_BO_CREATE_VRAM0_BIT,
188 	 .attach_ops = &xe_dma_buf_attach_ops},
189 	{.mem_mask = XE_BO_CREATE_VRAM0_BIT,
190 	 .attach_ops = &xe_dma_buf_attach_ops,
191 	 .force_different_devices = true},
192 
193 	{.mem_mask = XE_BO_CREATE_VRAM0_BIT,
194 	 .attach_ops = &nop2p_attach_ops},
195 	{.mem_mask = XE_BO_CREATE_VRAM0_BIT,
196 	 .attach_ops = &nop2p_attach_ops,
197 	 .force_different_devices = true},
198 
199 	{.mem_mask = XE_BO_CREATE_VRAM0_BIT},
200 	{.mem_mask = XE_BO_CREATE_VRAM0_BIT,
201 	 .force_different_devices = true},
202 
203 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
204 	 .attach_ops = &xe_dma_buf_attach_ops},
205 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
206 	 .attach_ops = &xe_dma_buf_attach_ops,
207 	 .force_different_devices = true},
208 
209 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
210 	 .attach_ops = &nop2p_attach_ops},
211 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
212 	 .attach_ops = &nop2p_attach_ops,
213 	 .force_different_devices = true},
214 
215 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT},
216 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT,
217 	 .force_different_devices = true},
218 
219 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
220 	 .attach_ops = &xe_dma_buf_attach_ops},
221 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
222 	 .attach_ops = &xe_dma_buf_attach_ops,
223 	 .force_different_devices = true},
224 
225 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
226 	 .attach_ops = &nop2p_attach_ops},
227 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
228 	 .attach_ops = &nop2p_attach_ops,
229 	 .force_different_devices = true},
230 
231 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT},
232 	{.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT,
233 	 .force_different_devices = true},
234 
235 	{}
236 };
237 
238 static int dma_buf_run_device(struct xe_device *xe)
239 {
240 	const struct dma_buf_test_params *params;
241 	struct kunit *test = xe_cur_kunit();
242 
243 	for (params = test_params; params->mem_mask; ++params) {
244 		struct dma_buf_test_params p = *params;
245 
246 		p.base.id = XE_TEST_LIVE_DMA_BUF;
247 		test->priv = &p;
248 		xe_test_dmabuf_import_same_driver(xe);
249 	}
250 
251 	/* A non-zero return would halt iteration over driver devices */
252 	return 0;
253 }
254 
255 void xe_dma_buf_kunit(struct kunit *test)
256 {
257 	xe_call_for_each_device(dma_buf_run_device);
258 }
259 EXPORT_SYMBOL(xe_dma_buf_kunit);
260