xref: /linux/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c (revision b8265621f4888af9494e1d685620871ec81bc33d)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright 2012 Red Hat Inc
5  */
6 
7 #include <linux/dma-buf.h>
8 #include <linux/highmem.h>
9 #include <linux/dma-resv.h>
10 
11 #include "i915_drv.h"
12 #include "i915_gem_object.h"
13 #include "i915_scatterlist.h"
14 
15 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
16 {
17 	return to_intel_bo(buf->priv);
18 }
19 
20 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
21 					     enum dma_data_direction dir)
22 {
23 	struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
24 	struct sg_table *st;
25 	struct scatterlist *src, *dst;
26 	int ret, i;
27 
28 	ret = i915_gem_object_pin_pages(obj);
29 	if (ret)
30 		goto err;
31 
32 	/* Copy sg so that we make an independent mapping */
33 	st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
34 	if (st == NULL) {
35 		ret = -ENOMEM;
36 		goto err_unpin_pages;
37 	}
38 
39 	ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
40 	if (ret)
41 		goto err_free;
42 
43 	src = obj->mm.pages->sgl;
44 	dst = st->sgl;
45 	for (i = 0; i < obj->mm.pages->nents; i++) {
46 		sg_set_page(dst, sg_page(src), src->length, 0);
47 		dst = sg_next(dst);
48 		src = sg_next(src);
49 	}
50 
51 	if (!dma_map_sg_attrs(attachment->dev,
52 			      st->sgl, st->nents, dir,
53 			      DMA_ATTR_SKIP_CPU_SYNC)) {
54 		ret = -ENOMEM;
55 		goto err_free_sg;
56 	}
57 
58 	return st;
59 
60 err_free_sg:
61 	sg_free_table(st);
62 err_free:
63 	kfree(st);
64 err_unpin_pages:
65 	i915_gem_object_unpin_pages(obj);
66 err:
67 	return ERR_PTR(ret);
68 }
69 
70 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
71 				   struct sg_table *sg,
72 				   enum dma_data_direction dir)
73 {
74 	struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
75 
76 	dma_unmap_sg_attrs(attachment->dev,
77 			   sg->sgl, sg->nents, dir,
78 			   DMA_ATTR_SKIP_CPU_SYNC);
79 	sg_free_table(sg);
80 	kfree(sg);
81 
82 	i915_gem_object_unpin_pages(obj);
83 }
84 
85 static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
86 {
87 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
88 
89 	return i915_gem_object_pin_map(obj, I915_MAP_WB);
90 }
91 
92 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
93 {
94 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
95 
96 	i915_gem_object_flush_map(obj);
97 	i915_gem_object_unpin_map(obj);
98 }
99 
100 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
101 {
102 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
103 	int ret;
104 
105 	if (obj->base.size < vma->vm_end - vma->vm_start)
106 		return -EINVAL;
107 
108 	if (!obj->base.filp)
109 		return -ENODEV;
110 
111 	ret = call_mmap(obj->base.filp, vma);
112 	if (ret)
113 		return ret;
114 
115 	fput(vma->vm_file);
116 	vma->vm_file = get_file(obj->base.filp);
117 
118 	return 0;
119 }
120 
121 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
122 {
123 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
124 	bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
125 	int err;
126 
127 	err = i915_gem_object_pin_pages(obj);
128 	if (err)
129 		return err;
130 
131 	err = i915_gem_object_lock_interruptible(obj);
132 	if (err)
133 		goto out;
134 
135 	err = i915_gem_object_set_to_cpu_domain(obj, write);
136 	i915_gem_object_unlock(obj);
137 
138 out:
139 	i915_gem_object_unpin_pages(obj);
140 	return err;
141 }
142 
143 static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
144 {
145 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
146 	int err;
147 
148 	err = i915_gem_object_pin_pages(obj);
149 	if (err)
150 		return err;
151 
152 	err = i915_gem_object_lock_interruptible(obj);
153 	if (err)
154 		goto out;
155 
156 	err = i915_gem_object_set_to_gtt_domain(obj, false);
157 	i915_gem_object_unlock(obj);
158 
159 out:
160 	i915_gem_object_unpin_pages(obj);
161 	return err;
162 }
163 
164 static const struct dma_buf_ops i915_dmabuf_ops =  {
165 	.map_dma_buf = i915_gem_map_dma_buf,
166 	.unmap_dma_buf = i915_gem_unmap_dma_buf,
167 	.release = drm_gem_dmabuf_release,
168 	.mmap = i915_gem_dmabuf_mmap,
169 	.vmap = i915_gem_dmabuf_vmap,
170 	.vunmap = i915_gem_dmabuf_vunmap,
171 	.begin_cpu_access = i915_gem_begin_cpu_access,
172 	.end_cpu_access = i915_gem_end_cpu_access,
173 };
174 
175 struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags)
176 {
177 	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
178 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
179 
180 	exp_info.ops = &i915_dmabuf_ops;
181 	exp_info.size = gem_obj->size;
182 	exp_info.flags = flags;
183 	exp_info.priv = gem_obj;
184 	exp_info.resv = obj->base.resv;
185 
186 	if (obj->ops->dmabuf_export) {
187 		int ret = obj->ops->dmabuf_export(obj);
188 		if (ret)
189 			return ERR_PTR(ret);
190 	}
191 
192 	return drm_gem_dmabuf_export(gem_obj->dev, &exp_info);
193 }
194 
195 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
196 {
197 	struct sg_table *pages;
198 	unsigned int sg_page_sizes;
199 
200 	pages = dma_buf_map_attachment(obj->base.import_attach,
201 				       DMA_BIDIRECTIONAL);
202 	if (IS_ERR(pages))
203 		return PTR_ERR(pages);
204 
205 	sg_page_sizes = i915_sg_page_sizes(pages->sgl);
206 
207 	__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
208 
209 	return 0;
210 }
211 
212 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
213 					     struct sg_table *pages)
214 {
215 	dma_buf_unmap_attachment(obj->base.import_attach, pages,
216 				 DMA_BIDIRECTIONAL);
217 }
218 
219 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
220 	.get_pages = i915_gem_object_get_pages_dmabuf,
221 	.put_pages = i915_gem_object_put_pages_dmabuf,
222 };
223 
224 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
225 					     struct dma_buf *dma_buf)
226 {
227 	static struct lock_class_key lock_class;
228 	struct dma_buf_attachment *attach;
229 	struct drm_i915_gem_object *obj;
230 	int ret;
231 
232 	/* is this one of own objects? */
233 	if (dma_buf->ops == &i915_dmabuf_ops) {
234 		obj = dma_buf_to_obj(dma_buf);
235 		/* is it from our device? */
236 		if (obj->base.dev == dev) {
237 			/*
238 			 * Importing dmabuf exported from out own gem increases
239 			 * refcount on gem itself instead of f_count of dmabuf.
240 			 */
241 			return &i915_gem_object_get(obj)->base;
242 		}
243 	}
244 
245 	/* need to attach */
246 	attach = dma_buf_attach(dma_buf, dev->dev);
247 	if (IS_ERR(attach))
248 		return ERR_CAST(attach);
249 
250 	get_dma_buf(dma_buf);
251 
252 	obj = i915_gem_object_alloc();
253 	if (obj == NULL) {
254 		ret = -ENOMEM;
255 		goto fail_detach;
256 	}
257 
258 	drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
259 	i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class);
260 	obj->base.import_attach = attach;
261 	obj->base.resv = dma_buf->resv;
262 
263 	/* We use GTT as shorthand for a coherent domain, one that is
264 	 * neither in the GPU cache nor in the CPU cache, where all
265 	 * writes are immediately visible in memory. (That's not strictly
266 	 * true, but it's close! There are internal buffers such as the
267 	 * write-combined buffer or a delay through the chipset for GTT
268 	 * writes that do require us to treat GTT as a separate cache domain.)
269 	 */
270 	obj->read_domains = I915_GEM_DOMAIN_GTT;
271 	obj->write_domain = 0;
272 
273 	return &obj->base;
274 
275 fail_detach:
276 	dma_buf_detach(dma_buf, attach);
277 	dma_buf_put(dma_buf);
278 
279 	return ERR_PTR(ret);
280 }
281 
282 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
283 #include "selftests/mock_dmabuf.c"
284 #include "selftests/i915_gem_dmabuf.c"
285 #endif
286