1 // SPDX-License-Identifier: GPL-2.0 AND MIT
2 /*
3 * Copyright © 2022 Intel Corporation
4 */
5
6 #include <uapi/drm/xe_drm.h>
7
8 #include <kunit/test.h>
9 #include <kunit/visibility.h>
10
11 #include "tests/xe_kunit_helpers.h"
12 #include "tests/xe_pci_test.h"
13
14 #include "xe_pci.h"
15 #include "xe_pm.h"
16
p2p_enabled(struct dma_buf_test_params * params)17 static bool p2p_enabled(struct dma_buf_test_params *params)
18 {
19 return IS_ENABLED(CONFIG_PCI_P2PDMA) && params->attach_ops &&
20 params->attach_ops->allow_peer2peer;
21 }
22
is_dynamic(struct dma_buf_test_params * params)23 static bool is_dynamic(struct dma_buf_test_params *params)
24 {
25 return IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY) && params->attach_ops &&
26 params->attach_ops->move_notify;
27 }
28
check_residency(struct kunit * test,struct xe_bo * exported,struct xe_bo * imported,struct dma_buf * dmabuf,struct drm_exec * exec)29 static void check_residency(struct kunit *test, struct xe_bo *exported,
30 struct xe_bo *imported, struct dma_buf *dmabuf,
31 struct drm_exec *exec)
32 {
33 struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv);
34 u32 mem_type;
35 int ret;
36
37 xe_bo_assert_held(exported);
38 xe_bo_assert_held(imported);
39
40 mem_type = XE_PL_VRAM0;
41 if (!(params->mem_mask & XE_BO_FLAG_VRAM0))
42 /* No VRAM allowed */
43 mem_type = XE_PL_TT;
44 else if (params->force_different_devices && !p2p_enabled(params))
45 /* No P2P */
46 mem_type = XE_PL_TT;
47 else if (params->force_different_devices && !is_dynamic(params) &&
48 (params->mem_mask & XE_BO_FLAG_SYSTEM))
49 /* Pin migrated to TT */
50 mem_type = XE_PL_TT;
51
52 if (!xe_bo_is_mem_type(exported, mem_type)) {
53 KUNIT_FAIL(test, "Exported bo was not in expected memory type.\n");
54 return;
55 }
56
57 if (xe_bo_is_pinned(exported))
58 return;
59
60 /*
61 * Evict exporter. Evicting the exported bo will
62 * evict also the imported bo through the move_notify() functionality if
63 * importer is on a different device. If they're on the same device,
64 * the exporter and the importer should be the same bo.
65 */
66 ret = xe_bo_evict(exported, exec);
67 if (ret) {
68 if (ret != -EINTR && ret != -ERESTARTSYS)
69 KUNIT_FAIL(test, "Evicting exporter failed with err=%d.\n",
70 ret);
71 return;
72 }
73
74 /* Verify that also importer has been evicted to SYSTEM */
75 if (exported != imported && !xe_bo_is_mem_type(imported, XE_PL_SYSTEM)) {
76 KUNIT_FAIL(test, "Importer wasn't properly evicted.\n");
77 return;
78 }
79
80 /* Re-validate the importer. This should move also exporter in. */
81 ret = xe_bo_validate(imported, NULL, false, exec);
82 if (ret) {
83 if (ret != -EINTR && ret != -ERESTARTSYS)
84 KUNIT_FAIL(test, "Validating importer failed with err=%d.\n",
85 ret);
86 return;
87 }
88
89 KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type));
90
91 if (params->force_different_devices)
92 KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(imported, XE_PL_TT));
93 else
94 KUNIT_EXPECT_TRUE(test, exported == imported);
95 }
96
xe_test_dmabuf_import_same_driver(struct xe_device * xe)97 static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
98 {
99 struct kunit *test = kunit_get_current_test();
100 struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv);
101 struct drm_gem_object *import;
102 struct dma_buf *dmabuf;
103 struct xe_bo *bo;
104 size_t size;
105
106 /* No VRAM on this device? */
107 if (!ttm_manager_type(&xe->ttm, XE_PL_VRAM0) &&
108 (params->mem_mask & XE_BO_FLAG_VRAM0))
109 return;
110
111 size = PAGE_SIZE;
112 if ((params->mem_mask & XE_BO_FLAG_VRAM0) &&
113 xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
114 size = SZ_64K;
115
116 kunit_info(test, "running %s\n", __func__);
117 bo = xe_bo_create_user(xe, NULL, size, DRM_XE_GEM_CPU_CACHING_WC,
118 params->mem_mask, NULL);
119 if (IS_ERR(bo)) {
120 KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
121 PTR_ERR(bo));
122 return;
123 }
124
125 dmabuf = xe_gem_prime_export(&bo->ttm.base, 0);
126 if (IS_ERR(dmabuf)) {
127 KUNIT_FAIL(test, "xe_gem_prime_export() failed with err=%ld\n",
128 PTR_ERR(dmabuf));
129 goto out;
130 }
131 bo->ttm.base.dma_buf = dmabuf;
132
133 import = xe_gem_prime_import(&xe->drm, dmabuf);
134 if (!IS_ERR(import)) {
135 struct xe_bo *import_bo = gem_to_xe_bo(import);
136
137 /*
138 * Did import succeed when it shouldn't due to lack of p2p support?
139 */
140 if (params->force_different_devices &&
141 !p2p_enabled(params) &&
142 !(params->mem_mask & XE_BO_FLAG_SYSTEM)) {
143 KUNIT_FAIL(test,
144 "xe_gem_prime_import() succeeded when it shouldn't have\n");
145 } else {
146 struct drm_exec *exec = XE_VALIDATION_OPT_OUT;
147 int err;
148
149 /* Is everything where we expect it to be? */
150 xe_bo_lock(import_bo, false);
151 err = xe_bo_validate(import_bo, NULL, false, exec);
152
153 /* Pinning in VRAM is not allowed. */
154 if (!is_dynamic(params) &&
155 params->force_different_devices &&
156 !(params->mem_mask & XE_BO_FLAG_SYSTEM))
157 KUNIT_EXPECT_EQ(test, err, -EINVAL);
158 /* Otherwise only expect interrupts or success. */
159 else if (err && err != -EINTR && err != -ERESTARTSYS)
160 KUNIT_EXPECT_TRUE(test, !err || err == -EINTR ||
161 err == -ERESTARTSYS);
162
163 if (!err)
164 check_residency(test, bo, import_bo, dmabuf, exec);
165 xe_bo_unlock(import_bo);
166 }
167 drm_gem_object_put(import);
168 } else if (PTR_ERR(import) != -EOPNOTSUPP) {
169 /* Unexpected error code. */
170 KUNIT_FAIL(test,
171 "xe_gem_prime_import failed with the wrong err=%ld\n",
172 PTR_ERR(import));
173 } else if (!params->force_different_devices ||
174 p2p_enabled(params) ||
175 (params->mem_mask & XE_BO_FLAG_SYSTEM)) {
176 /* Shouldn't fail if we can reuse same bo, use p2p or use system */
177 KUNIT_FAIL(test, "dynamic p2p attachment failed with err=%ld\n",
178 PTR_ERR(import));
179 }
180 bo->ttm.base.dma_buf = NULL;
181 dma_buf_put(dmabuf);
182 out:
183 drm_gem_object_put(&bo->ttm.base);
184 }
185
186 static const struct dma_buf_attach_ops nop2p_attach_ops = {
187 .allow_peer2peer = false,
188 .move_notify = xe_dma_buf_move_notify
189 };
190
191 /*
192 * We test the implementation with bos of different residency and with
193 * importers with different capabilities; some lacking p2p support and some
194 * lacking dynamic capabilities (attach_ops == NULL). We also fake
195 * different devices avoiding the import shortcut that just reuses the same
196 * gem object.
197 */
198 static const struct dma_buf_test_params test_params[] = {
199 {.mem_mask = XE_BO_FLAG_VRAM0,
200 .attach_ops = &xe_dma_buf_attach_ops},
201 {.mem_mask = XE_BO_FLAG_VRAM0 | XE_BO_FLAG_NEEDS_CPU_ACCESS,
202 .attach_ops = &xe_dma_buf_attach_ops,
203 .force_different_devices = true},
204
205 {.mem_mask = XE_BO_FLAG_VRAM0,
206 .attach_ops = &nop2p_attach_ops},
207 {.mem_mask = XE_BO_FLAG_VRAM0,
208 .attach_ops = &nop2p_attach_ops,
209 .force_different_devices = true},
210
211 {.mem_mask = XE_BO_FLAG_VRAM0},
212 {.mem_mask = XE_BO_FLAG_VRAM0,
213 .force_different_devices = true},
214
215 {.mem_mask = XE_BO_FLAG_SYSTEM,
216 .attach_ops = &xe_dma_buf_attach_ops},
217 {.mem_mask = XE_BO_FLAG_SYSTEM,
218 .attach_ops = &xe_dma_buf_attach_ops,
219 .force_different_devices = true},
220
221 {.mem_mask = XE_BO_FLAG_SYSTEM,
222 .attach_ops = &nop2p_attach_ops},
223 {.mem_mask = XE_BO_FLAG_SYSTEM,
224 .attach_ops = &nop2p_attach_ops,
225 .force_different_devices = true},
226
227 {.mem_mask = XE_BO_FLAG_SYSTEM},
228 {.mem_mask = XE_BO_FLAG_SYSTEM,
229 .force_different_devices = true},
230
231 {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
232 .attach_ops = &xe_dma_buf_attach_ops},
233 {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0 |
234 XE_BO_FLAG_NEEDS_CPU_ACCESS,
235 .attach_ops = &xe_dma_buf_attach_ops,
236 .force_different_devices = true},
237
238 {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
239 .attach_ops = &nop2p_attach_ops},
240 {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
241 .attach_ops = &nop2p_attach_ops,
242 .force_different_devices = true},
243
244 {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0},
245 {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
246 .force_different_devices = true},
247
248 {}
249 };
250
dma_buf_run_device(struct xe_device * xe)251 static int dma_buf_run_device(struct xe_device *xe)
252 {
253 const struct dma_buf_test_params *params;
254 struct kunit *test = kunit_get_current_test();
255
256 xe_pm_runtime_get(xe);
257 for (params = test_params; params->mem_mask; ++params) {
258 struct dma_buf_test_params p = *params;
259
260 p.base.id = XE_TEST_LIVE_DMA_BUF;
261 test->priv = &p;
262 xe_test_dmabuf_import_same_driver(xe);
263 }
264 xe_pm_runtime_put(xe);
265
266 /* A non-zero return would halt iteration over driver devices */
267 return 0;
268 }
269
xe_dma_buf_kunit(struct kunit * test)270 static void xe_dma_buf_kunit(struct kunit *test)
271 {
272 struct xe_device *xe = test->priv;
273
274 dma_buf_run_device(xe);
275 }
276
277 static struct kunit_case xe_dma_buf_tests[] = {
278 KUNIT_CASE_PARAM(xe_dma_buf_kunit, xe_pci_live_device_gen_param),
279 {}
280 };
281
282 VISIBLE_IF_KUNIT
283 struct kunit_suite xe_dma_buf_test_suite = {
284 .name = "xe_dma_buf",
285 .test_cases = xe_dma_buf_tests,
286 .init = xe_kunit_helper_xe_device_live_test_init,
287 };
288 EXPORT_SYMBOL_IF_KUNIT(xe_dma_buf_test_suite);
289