xref: /linux/drivers/gpu/drm/xe/xe_dma_buf.c (revision 08516de501fae647fb29bf3b62718de56cc24014)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_dma_buf.h"
7 
8 #include <kunit/test.h>
9 #include <linux/dma-buf.h>
10 #include <linux/pci-p2pdma.h>
11 
12 #include <drm/drm_device.h>
13 #include <drm/drm_prime.h>
14 #include <drm/ttm/ttm_tt.h>
15 
16 #include "tests/xe_test.h"
17 #include "xe_bo.h"
18 #include "xe_device.h"
19 #include "xe_ttm_vram_mgr.h"
20 #include "xe_vm.h"
21 
22 MODULE_IMPORT_NS(DMA_BUF);
23 
24 static int xe_dma_buf_attach(struct dma_buf *dmabuf,
25 			     struct dma_buf_attachment *attach)
26 {
27 	struct drm_gem_object *obj = attach->dmabuf->priv;
28 
29 	if (attach->peer2peer &&
30 	    pci_p2pdma_distance(to_pci_dev(obj->dev->dev), attach->dev, false) < 0)
31 		attach->peer2peer = false;
32 
33 	if (!attach->peer2peer && !xe_bo_can_migrate(gem_to_xe_bo(obj), XE_PL_TT))
34 		return -EOPNOTSUPP;
35 
36 	xe_device_mem_access_get(to_xe_device(obj->dev));
37 	return 0;
38 }
39 
40 static void xe_dma_buf_detach(struct dma_buf *dmabuf,
41 			      struct dma_buf_attachment *attach)
42 {
43 	struct drm_gem_object *obj = attach->dmabuf->priv;
44 
45 	xe_device_mem_access_put(to_xe_device(obj->dev));
46 }
47 
48 static int xe_dma_buf_pin(struct dma_buf_attachment *attach)
49 {
50 	struct drm_gem_object *obj = attach->dmabuf->priv;
51 	struct xe_bo *bo = gem_to_xe_bo(obj);
52 
53 	/*
54 	 * Migrate to TT first to increase the chance of non-p2p clients
55 	 * can attach.
56 	 */
57 	(void)xe_bo_migrate(bo, XE_PL_TT);
58 	xe_bo_pin_external(bo);
59 
60 	return 0;
61 }
62 
63 static void xe_dma_buf_unpin(struct dma_buf_attachment *attach)
64 {
65 	struct drm_gem_object *obj = attach->dmabuf->priv;
66 	struct xe_bo *bo = gem_to_xe_bo(obj);
67 
68 	xe_bo_unpin_external(bo);
69 }
70 
71 static struct sg_table *xe_dma_buf_map(struct dma_buf_attachment *attach,
72 				       enum dma_data_direction dir)
73 {
74 	struct dma_buf *dma_buf = attach->dmabuf;
75 	struct drm_gem_object *obj = dma_buf->priv;
76 	struct xe_bo *bo = gem_to_xe_bo(obj);
77 	struct sg_table *sgt;
78 	int r = 0;
79 
80 	if (!attach->peer2peer && !xe_bo_can_migrate(bo, XE_PL_TT))
81 		return ERR_PTR(-EOPNOTSUPP);
82 
83 	if (!xe_bo_is_pinned(bo)) {
84 		if (!attach->peer2peer ||
85 		    bo->ttm.resource->mem_type == XE_PL_SYSTEM) {
86 			if (xe_bo_can_migrate(bo, XE_PL_TT))
87 				r = xe_bo_migrate(bo, XE_PL_TT);
88 			else
89 				r = xe_bo_validate(bo, NULL, false);
90 		}
91 		if (r)
92 			return ERR_PTR(r);
93 	}
94 
95 	switch (bo->ttm.resource->mem_type) {
96 	case XE_PL_TT:
97 		sgt = drm_prime_pages_to_sg(obj->dev,
98 					    bo->ttm.ttm->pages,
99 					    bo->ttm.ttm->num_pages);
100 		if (IS_ERR(sgt))
101 			return sgt;
102 
103 		if (dma_map_sgtable(attach->dev, sgt, dir,
104 				    DMA_ATTR_SKIP_CPU_SYNC))
105 			goto error_free;
106 		break;
107 
108 	case XE_PL_VRAM0:
109 	case XE_PL_VRAM1:
110 		r = xe_ttm_vram_mgr_alloc_sgt(xe_bo_device(bo),
111 					      bo->ttm.resource, 0,
112 					      bo->ttm.base.size, attach->dev,
113 					      dir, &sgt);
114 		if (r)
115 			return ERR_PTR(r);
116 		break;
117 	default:
118 		return ERR_PTR(-EINVAL);
119 	}
120 
121 	return sgt;
122 
123 error_free:
124 	sg_free_table(sgt);
125 	kfree(sgt);
126 	return ERR_PTR(-EBUSY);
127 }
128 
129 static void xe_dma_buf_unmap(struct dma_buf_attachment *attach,
130 			     struct sg_table *sgt,
131 			     enum dma_data_direction dir)
132 {
133 	struct dma_buf *dma_buf = attach->dmabuf;
134 	struct xe_bo *bo = gem_to_xe_bo(dma_buf->priv);
135 
136 	if (!xe_bo_is_vram(bo)) {
137 		dma_unmap_sgtable(attach->dev, sgt, dir, 0);
138 		sg_free_table(sgt);
139 		kfree(sgt);
140 	} else {
141 		xe_ttm_vram_mgr_free_sgt(attach->dev, dir, sgt);
142 	}
143 }
144 
145 static int xe_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
146 				       enum dma_data_direction direction)
147 {
148 	struct drm_gem_object *obj = dma_buf->priv;
149 	struct xe_bo *bo = gem_to_xe_bo(obj);
150 	bool reads =  (direction == DMA_BIDIRECTIONAL ||
151 		       direction == DMA_FROM_DEVICE);
152 
153 	if (!reads)
154 		return 0;
155 
156 	xe_bo_lock_no_vm(bo, NULL);
157 	(void)xe_bo_migrate(bo, XE_PL_TT);
158 	xe_bo_unlock_no_vm(bo);
159 
160 	return 0;
161 }
162 
163 const struct dma_buf_ops xe_dmabuf_ops = {
164 	.attach = xe_dma_buf_attach,
165 	.detach = xe_dma_buf_detach,
166 	.pin = xe_dma_buf_pin,
167 	.unpin = xe_dma_buf_unpin,
168 	.map_dma_buf = xe_dma_buf_map,
169 	.unmap_dma_buf = xe_dma_buf_unmap,
170 	.release = drm_gem_dmabuf_release,
171 	.begin_cpu_access = xe_dma_buf_begin_cpu_access,
172 	.mmap = drm_gem_dmabuf_mmap,
173 	.vmap = drm_gem_dmabuf_vmap,
174 	.vunmap = drm_gem_dmabuf_vunmap,
175 };
176 
177 struct dma_buf *xe_gem_prime_export(struct drm_gem_object *obj, int flags)
178 {
179 	struct xe_bo *bo = gem_to_xe_bo(obj);
180 	struct dma_buf *buf;
181 
182 	if (bo->vm)
183 		return ERR_PTR(-EPERM);
184 
185 	buf = drm_gem_prime_export(obj, flags);
186 	if (!IS_ERR(buf))
187 		buf->ops = &xe_dmabuf_ops;
188 
189 	return buf;
190 }
191 
192 static struct drm_gem_object *
193 xe_dma_buf_init_obj(struct drm_device *dev, struct xe_bo *storage,
194 		    struct dma_buf *dma_buf)
195 {
196 	struct dma_resv *resv = dma_buf->resv;
197 	struct xe_device *xe = to_xe_device(dev);
198 	struct xe_bo *bo;
199 	int ret;
200 
201 	dma_resv_lock(resv, NULL);
202 	bo = __xe_bo_create_locked(xe, storage, NULL, resv, dma_buf->size,
203 				   ttm_bo_type_sg, XE_BO_CREATE_SYSTEM_BIT);
204 	if (IS_ERR(bo)) {
205 		ret = PTR_ERR(bo);
206 		goto error;
207 	}
208 	dma_resv_unlock(resv);
209 
210 	return &bo->ttm.base;
211 
212 error:
213 	dma_resv_unlock(resv);
214 	return ERR_PTR(ret);
215 }
216 
217 static void xe_dma_buf_move_notify(struct dma_buf_attachment *attach)
218 {
219 	struct drm_gem_object *obj = attach->importer_priv;
220 	struct xe_bo *bo = gem_to_xe_bo(obj);
221 
222 	XE_WARN_ON(xe_bo_evict(bo, false));
223 }
224 
225 static const struct dma_buf_attach_ops xe_dma_buf_attach_ops = {
226 	.allow_peer2peer = true,
227 	.move_notify = xe_dma_buf_move_notify
228 };
229 
230 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
231 
232 struct dma_buf_test_params {
233 	struct xe_test_priv base;
234 	const struct dma_buf_attach_ops *attach_ops;
235 	bool force_different_devices;
236 	u32 mem_mask;
237 };
238 
239 #define to_dma_buf_test_params(_priv) \
240 	container_of(_priv, struct dma_buf_test_params, base)
241 #endif
242 
243 struct drm_gem_object *xe_gem_prime_import(struct drm_device *dev,
244 					   struct dma_buf *dma_buf)
245 {
246 	XE_TEST_DECLARE(struct dma_buf_test_params *test =
247 			to_dma_buf_test_params
248 			(xe_cur_kunit_priv(XE_TEST_LIVE_DMA_BUF));)
249 	const struct dma_buf_attach_ops *attach_ops;
250 	struct dma_buf_attachment *attach;
251 	struct drm_gem_object *obj;
252 	struct xe_bo *bo;
253 
254 	if (dma_buf->ops == &xe_dmabuf_ops) {
255 		obj = dma_buf->priv;
256 		if (obj->dev == dev &&
257 		    !XE_TEST_ONLY(test && test->force_different_devices)) {
258 			/*
259 			 * Importing dmabuf exported from out own gem increases
260 			 * refcount on gem itself instead of f_count of dmabuf.
261 			 */
262 			drm_gem_object_get(obj);
263 			return obj;
264 		}
265 	}
266 
267 	/*
268 	 * Don't publish the bo until we have a valid attachment, and a
269 	 * valid attachment needs the bo address. So pre-create a bo before
270 	 * creating the attachment and publish.
271 	 */
272 	bo = xe_bo_alloc();
273 	if (IS_ERR(bo))
274 		return ERR_CAST(bo);
275 
276 	attach_ops = &xe_dma_buf_attach_ops;
277 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
278 	if (test)
279 		attach_ops = test->attach_ops;
280 #endif
281 
282 	attach = dma_buf_dynamic_attach(dma_buf, dev->dev, attach_ops, &bo->ttm.base);
283 	if (IS_ERR(attach)) {
284 		obj = ERR_CAST(attach);
285 		goto out_err;
286 	}
287 
288 	/* Errors here will take care of freeing the bo. */
289 	obj = xe_dma_buf_init_obj(dev, bo, dma_buf);
290 	if (IS_ERR(obj))
291 		return obj;
292 
293 
294 	get_dma_buf(dma_buf);
295 	obj->import_attach = attach;
296 	return obj;
297 
298 out_err:
299 	xe_bo_free(bo);
300 
301 	return obj;
302 }
303 
304 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
305 #include "tests/xe_dma_buf.c"
306 #endif
307