1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2022 Intel Corporation
4 */
5
6 #include "xe_dma_buf.h"
7
8 #include <kunit/test.h>
9 #include <linux/dma-buf.h>
10 #include <linux/pci-p2pdma.h>
11
12 #include <drm/drm_device.h>
13 #include <drm/drm_prime.h>
14 #include <drm/ttm/ttm_tt.h>
15
16 #include "tests/xe_test.h"
17 #include "xe_bo.h"
18 #include "xe_device.h"
19 #include "xe_pm.h"
20 #include "xe_ttm_vram_mgr.h"
21 #include "xe_vm.h"
22
23 MODULE_IMPORT_NS(DMA_BUF);
24
xe_dma_buf_attach(struct dma_buf * dmabuf,struct dma_buf_attachment * attach)25 static int xe_dma_buf_attach(struct dma_buf *dmabuf,
26 struct dma_buf_attachment *attach)
27 {
28 struct drm_gem_object *obj = attach->dmabuf->priv;
29
30 if (attach->peer2peer &&
31 pci_p2pdma_distance(to_pci_dev(obj->dev->dev), attach->dev, false) < 0)
32 attach->peer2peer = false;
33
34 if (!attach->peer2peer && !xe_bo_can_migrate(gem_to_xe_bo(obj), XE_PL_TT))
35 return -EOPNOTSUPP;
36
37 xe_pm_runtime_get(to_xe_device(obj->dev));
38 return 0;
39 }
40
xe_dma_buf_detach(struct dma_buf * dmabuf,struct dma_buf_attachment * attach)41 static void xe_dma_buf_detach(struct dma_buf *dmabuf,
42 struct dma_buf_attachment *attach)
43 {
44 struct drm_gem_object *obj = attach->dmabuf->priv;
45
46 xe_pm_runtime_put(to_xe_device(obj->dev));
47 }
48
xe_dma_buf_pin(struct dma_buf_attachment * attach)49 static int xe_dma_buf_pin(struct dma_buf_attachment *attach)
50 {
51 struct drm_gem_object *obj = attach->dmabuf->priv;
52 struct xe_bo *bo = gem_to_xe_bo(obj);
53 struct xe_device *xe = xe_bo_device(bo);
54 int ret;
55
56 /*
57 * For now only support pinning in TT memory, for two reasons:
58 * 1) Avoid pinning in a placement not accessible to some importers.
59 * 2) Pinning in VRAM requires PIN accounting which is a to-do.
60 */
61 if (xe_bo_is_pinned(bo) && bo->ttm.resource->placement != XE_PL_TT) {
62 drm_dbg(&xe->drm, "Can't migrate pinned bo for dma-buf pin.\n");
63 return -EINVAL;
64 }
65
66 ret = xe_bo_migrate(bo, XE_PL_TT);
67 if (ret) {
68 if (ret != -EINTR && ret != -ERESTARTSYS)
69 drm_dbg(&xe->drm,
70 "Failed migrating dma-buf to TT memory: %pe\n",
71 ERR_PTR(ret));
72 return ret;
73 }
74
75 ret = xe_bo_pin_external(bo);
76 xe_assert(xe, !ret);
77
78 return 0;
79 }
80
xe_dma_buf_unpin(struct dma_buf_attachment * attach)81 static void xe_dma_buf_unpin(struct dma_buf_attachment *attach)
82 {
83 struct drm_gem_object *obj = attach->dmabuf->priv;
84 struct xe_bo *bo = gem_to_xe_bo(obj);
85
86 xe_bo_unpin_external(bo);
87 }
88
xe_dma_buf_map(struct dma_buf_attachment * attach,enum dma_data_direction dir)89 static struct sg_table *xe_dma_buf_map(struct dma_buf_attachment *attach,
90 enum dma_data_direction dir)
91 {
92 struct dma_buf *dma_buf = attach->dmabuf;
93 struct drm_gem_object *obj = dma_buf->priv;
94 struct xe_bo *bo = gem_to_xe_bo(obj);
95 struct sg_table *sgt;
96 int r = 0;
97
98 if (!attach->peer2peer && !xe_bo_can_migrate(bo, XE_PL_TT))
99 return ERR_PTR(-EOPNOTSUPP);
100
101 if (!xe_bo_is_pinned(bo)) {
102 if (!attach->peer2peer)
103 r = xe_bo_migrate(bo, XE_PL_TT);
104 else
105 r = xe_bo_validate(bo, NULL, false);
106 if (r)
107 return ERR_PTR(r);
108 }
109
110 switch (bo->ttm.resource->mem_type) {
111 case XE_PL_TT:
112 sgt = drm_prime_pages_to_sg(obj->dev,
113 bo->ttm.ttm->pages,
114 bo->ttm.ttm->num_pages);
115 if (IS_ERR(sgt))
116 return sgt;
117
118 if (dma_map_sgtable(attach->dev, sgt, dir,
119 DMA_ATTR_SKIP_CPU_SYNC))
120 goto error_free;
121 break;
122
123 case XE_PL_VRAM0:
124 case XE_PL_VRAM1:
125 r = xe_ttm_vram_mgr_alloc_sgt(xe_bo_device(bo),
126 bo->ttm.resource, 0,
127 bo->ttm.base.size, attach->dev,
128 dir, &sgt);
129 if (r)
130 return ERR_PTR(r);
131 break;
132 default:
133 return ERR_PTR(-EINVAL);
134 }
135
136 return sgt;
137
138 error_free:
139 sg_free_table(sgt);
140 kfree(sgt);
141 return ERR_PTR(-EBUSY);
142 }
143
xe_dma_buf_unmap(struct dma_buf_attachment * attach,struct sg_table * sgt,enum dma_data_direction dir)144 static void xe_dma_buf_unmap(struct dma_buf_attachment *attach,
145 struct sg_table *sgt,
146 enum dma_data_direction dir)
147 {
148 struct dma_buf *dma_buf = attach->dmabuf;
149 struct xe_bo *bo = gem_to_xe_bo(dma_buf->priv);
150
151 if (!xe_bo_is_vram(bo)) {
152 dma_unmap_sgtable(attach->dev, sgt, dir, 0);
153 sg_free_table(sgt);
154 kfree(sgt);
155 } else {
156 xe_ttm_vram_mgr_free_sgt(attach->dev, dir, sgt);
157 }
158 }
159
xe_dma_buf_begin_cpu_access(struct dma_buf * dma_buf,enum dma_data_direction direction)160 static int xe_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
161 enum dma_data_direction direction)
162 {
163 struct drm_gem_object *obj = dma_buf->priv;
164 struct xe_bo *bo = gem_to_xe_bo(obj);
165 bool reads = (direction == DMA_BIDIRECTIONAL ||
166 direction == DMA_FROM_DEVICE);
167
168 if (!reads)
169 return 0;
170
171 /* Can we do interruptible lock here? */
172 xe_bo_lock(bo, false);
173 (void)xe_bo_migrate(bo, XE_PL_TT);
174 xe_bo_unlock(bo);
175
176 return 0;
177 }
178
179 static const struct dma_buf_ops xe_dmabuf_ops = {
180 .attach = xe_dma_buf_attach,
181 .detach = xe_dma_buf_detach,
182 .pin = xe_dma_buf_pin,
183 .unpin = xe_dma_buf_unpin,
184 .map_dma_buf = xe_dma_buf_map,
185 .unmap_dma_buf = xe_dma_buf_unmap,
186 .release = drm_gem_dmabuf_release,
187 .begin_cpu_access = xe_dma_buf_begin_cpu_access,
188 .mmap = drm_gem_dmabuf_mmap,
189 .vmap = drm_gem_dmabuf_vmap,
190 .vunmap = drm_gem_dmabuf_vunmap,
191 };
192
xe_gem_prime_export(struct drm_gem_object * obj,int flags)193 struct dma_buf *xe_gem_prime_export(struct drm_gem_object *obj, int flags)
194 {
195 struct xe_bo *bo = gem_to_xe_bo(obj);
196 struct dma_buf *buf;
197
198 if (bo->vm)
199 return ERR_PTR(-EPERM);
200
201 buf = drm_gem_prime_export(obj, flags);
202 if (!IS_ERR(buf))
203 buf->ops = &xe_dmabuf_ops;
204
205 return buf;
206 }
207
208 static struct drm_gem_object *
xe_dma_buf_init_obj(struct drm_device * dev,struct xe_bo * storage,struct dma_buf * dma_buf)209 xe_dma_buf_init_obj(struct drm_device *dev, struct xe_bo *storage,
210 struct dma_buf *dma_buf)
211 {
212 struct dma_resv *resv = dma_buf->resv;
213 struct xe_device *xe = to_xe_device(dev);
214 struct xe_bo *bo;
215 int ret;
216
217 dma_resv_lock(resv, NULL);
218 bo = ___xe_bo_create_locked(xe, storage, NULL, resv, NULL, dma_buf->size,
219 0, /* Will require 1way or 2way for vm_bind */
220 ttm_bo_type_sg, XE_BO_FLAG_SYSTEM);
221 if (IS_ERR(bo)) {
222 ret = PTR_ERR(bo);
223 goto error;
224 }
225 dma_resv_unlock(resv);
226
227 return &bo->ttm.base;
228
229 error:
230 dma_resv_unlock(resv);
231 return ERR_PTR(ret);
232 }
233
xe_dma_buf_move_notify(struct dma_buf_attachment * attach)234 static void xe_dma_buf_move_notify(struct dma_buf_attachment *attach)
235 {
236 struct drm_gem_object *obj = attach->importer_priv;
237 struct xe_bo *bo = gem_to_xe_bo(obj);
238
239 XE_WARN_ON(xe_bo_evict(bo, false));
240 }
241
242 static const struct dma_buf_attach_ops xe_dma_buf_attach_ops = {
243 .allow_peer2peer = true,
244 .move_notify = xe_dma_buf_move_notify
245 };
246
247 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
248
249 struct dma_buf_test_params {
250 struct xe_test_priv base;
251 const struct dma_buf_attach_ops *attach_ops;
252 bool force_different_devices;
253 u32 mem_mask;
254 };
255
256 #define to_dma_buf_test_params(_priv) \
257 container_of(_priv, struct dma_buf_test_params, base)
258 #endif
259
xe_gem_prime_import(struct drm_device * dev,struct dma_buf * dma_buf)260 struct drm_gem_object *xe_gem_prime_import(struct drm_device *dev,
261 struct dma_buf *dma_buf)
262 {
263 XE_TEST_DECLARE(struct dma_buf_test_params *test =
264 to_dma_buf_test_params
265 (xe_cur_kunit_priv(XE_TEST_LIVE_DMA_BUF));)
266 const struct dma_buf_attach_ops *attach_ops;
267 struct dma_buf_attachment *attach;
268 struct drm_gem_object *obj;
269 struct xe_bo *bo;
270
271 if (dma_buf->ops == &xe_dmabuf_ops) {
272 obj = dma_buf->priv;
273 if (obj->dev == dev &&
274 !XE_TEST_ONLY(test && test->force_different_devices)) {
275 /*
276 * Importing dmabuf exported from out own gem increases
277 * refcount on gem itself instead of f_count of dmabuf.
278 */
279 drm_gem_object_get(obj);
280 return obj;
281 }
282 }
283
284 /*
285 * Don't publish the bo until we have a valid attachment, and a
286 * valid attachment needs the bo address. So pre-create a bo before
287 * creating the attachment and publish.
288 */
289 bo = xe_bo_alloc();
290 if (IS_ERR(bo))
291 return ERR_CAST(bo);
292
293 attach_ops = &xe_dma_buf_attach_ops;
294 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
295 if (test)
296 attach_ops = test->attach_ops;
297 #endif
298
299 attach = dma_buf_dynamic_attach(dma_buf, dev->dev, attach_ops, &bo->ttm.base);
300 if (IS_ERR(attach)) {
301 obj = ERR_CAST(attach);
302 goto out_err;
303 }
304
305 /* Errors here will take care of freeing the bo. */
306 obj = xe_dma_buf_init_obj(dev, bo, dma_buf);
307 if (IS_ERR(obj))
308 return obj;
309
310
311 get_dma_buf(dma_buf);
312 obj->import_attach = attach;
313 return obj;
314
315 out_err:
316 xe_bo_free(bo);
317
318 return obj;
319 }
320
321 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
322 #include "tests/xe_dma_buf.c"
323 #endif
324