xref: /linux/drivers/gpu/drm/virtio/virtgpu_prime.c (revision 785151f50ddacac06c7a3c5f3d31642794507fdf)
1 /*
2  * Copyright 2014 Canonical
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Andreas Pokorny
23  */
24 
25 #include <drm/drm_prime.h>
26 #include <linux/virtio_dma_buf.h>
27 
28 #include "virtgpu_drv.h"
29 
30 MODULE_IMPORT_NS("DMA_BUF");
31 
virtgpu_virtio_get_uuid(struct dma_buf * buf,uuid_t * uuid)32 static int virtgpu_virtio_get_uuid(struct dma_buf *buf,
33 				   uuid_t *uuid)
34 {
35 	struct drm_gem_object *obj = buf->priv;
36 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
37 	struct virtio_gpu_device *vgdev = obj->dev->dev_private;
38 
39 	wait_event(vgdev->resp_wq, bo->uuid_state != STATE_INITIALIZING);
40 	if (bo->uuid_state != STATE_OK)
41 		return -ENODEV;
42 
43 	uuid_copy(uuid, &bo->uuid);
44 
45 	return 0;
46 }
47 
48 static struct sg_table *
virtgpu_gem_map_dma_buf(struct dma_buf_attachment * attach,enum dma_data_direction dir)49 virtgpu_gem_map_dma_buf(struct dma_buf_attachment *attach,
50 			enum dma_data_direction dir)
51 {
52 	struct drm_gem_object *obj = attach->dmabuf->priv;
53 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
54 
55 	if (virtio_gpu_is_vram(bo))
56 		return virtio_gpu_vram_map_dma_buf(bo, attach->dev, dir);
57 
58 	return drm_gem_map_dma_buf(attach, dir);
59 }
60 
virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment * attach,struct sg_table * sgt,enum dma_data_direction dir)61 static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
62 				      struct sg_table *sgt,
63 				      enum dma_data_direction dir)
64 {
65 	struct drm_gem_object *obj = attach->dmabuf->priv;
66 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
67 
68 	if (virtio_gpu_is_vram(bo)) {
69 		virtio_gpu_vram_unmap_dma_buf(attach->dev, sgt, dir);
70 		return;
71 	}
72 
73 	drm_gem_unmap_dma_buf(attach, sgt, dir);
74 }
75 
76 static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops =  {
77 	.ops = {
78 		.attach = virtio_dma_buf_attach,
79 		.detach = drm_gem_map_detach,
80 		.map_dma_buf = virtgpu_gem_map_dma_buf,
81 		.unmap_dma_buf = virtgpu_gem_unmap_dma_buf,
82 		.release = drm_gem_dmabuf_release,
83 		.mmap = drm_gem_dmabuf_mmap,
84 		.vmap = drm_gem_dmabuf_vmap,
85 		.vunmap = drm_gem_dmabuf_vunmap,
86 	},
87 	.device_attach = drm_gem_map_attach,
88 	.get_uuid = virtgpu_virtio_get_uuid,
89 };
90 
virtio_gpu_resource_assign_uuid(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo)91 int virtio_gpu_resource_assign_uuid(struct virtio_gpu_device *vgdev,
92 				    struct virtio_gpu_object *bo)
93 {
94 	struct virtio_gpu_object_array *objs;
95 
96 	objs = virtio_gpu_array_alloc(1);
97 	if (!objs)
98 		return -ENOMEM;
99 
100 	virtio_gpu_array_add_obj(objs, &bo->base.base);
101 
102 	return virtio_gpu_cmd_resource_assign_uuid(vgdev, objs);
103 }
104 
virtgpu_gem_prime_export(struct drm_gem_object * obj,int flags)105 struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
106 					 int flags)
107 {
108 	struct dma_buf *buf;
109 	struct drm_device *dev = obj->dev;
110 	struct virtio_gpu_device *vgdev = dev->dev_private;
111 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
112 	int ret = 0;
113 	bool blob = bo->host3d_blob || bo->guest_blob;
114 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
115 
116 	if (!blob) {
117 		if (vgdev->has_resource_assign_uuid) {
118 			ret = virtio_gpu_resource_assign_uuid(vgdev, bo);
119 			if (ret)
120 				return ERR_PTR(ret);
121 
122 			virtio_gpu_notify(vgdev);
123 		} else {
124 			bo->uuid_state = STATE_ERR;
125 		}
126 	} else if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)) {
127 		bo->uuid_state = STATE_ERR;
128 	}
129 
130 	exp_info.ops = &virtgpu_dmabuf_ops.ops;
131 	exp_info.size = obj->size;
132 	exp_info.flags = flags;
133 	exp_info.priv = obj;
134 	exp_info.resv = obj->resv;
135 
136 	buf = virtio_dma_buf_export(&exp_info);
137 	if (IS_ERR(buf))
138 		return buf;
139 
140 	drm_dev_get(dev);
141 	drm_gem_object_get(obj);
142 
143 	return buf;
144 }
145 
virtgpu_dma_buf_import_sgt(struct virtio_gpu_mem_entry ** ents,unsigned int * nents,struct virtio_gpu_object * bo,struct dma_buf_attachment * attach)146 int virtgpu_dma_buf_import_sgt(struct virtio_gpu_mem_entry **ents,
147 			       unsigned int *nents,
148 			       struct virtio_gpu_object *bo,
149 			       struct dma_buf_attachment *attach)
150 {
151 	struct scatterlist *sl;
152 	struct sg_table *sgt;
153 	long i, ret;
154 
155 	dma_resv_assert_held(attach->dmabuf->resv);
156 
157 	ret = dma_resv_wait_timeout(attach->dmabuf->resv,
158 				    DMA_RESV_USAGE_KERNEL,
159 				    false, MAX_SCHEDULE_TIMEOUT);
160 	if (ret <= 0)
161 		return ret < 0 ? ret : -ETIMEDOUT;
162 
163 	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
164 	if (IS_ERR(sgt))
165 		return PTR_ERR(sgt);
166 
167 	*ents = kvmalloc_array(sgt->nents,
168 			       sizeof(struct virtio_gpu_mem_entry),
169 			       GFP_KERNEL);
170 	if (!(*ents)) {
171 		dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
172 		return -ENOMEM;
173 	}
174 
175 	*nents = sgt->nents;
176 	for_each_sgtable_dma_sg(sgt, sl, i) {
177 		(*ents)[i].addr = cpu_to_le64(sg_dma_address(sl));
178 		(*ents)[i].length = cpu_to_le32(sg_dma_len(sl));
179 		(*ents)[i].padding = 0;
180 	}
181 
182 	bo->sgt = sgt;
183 	return 0;
184 }
185 
virtgpu_dma_buf_unmap(struct virtio_gpu_object * bo)186 static void virtgpu_dma_buf_unmap(struct virtio_gpu_object *bo)
187 {
188 	struct dma_buf_attachment *attach = bo->base.base.import_attach;
189 
190 	dma_resv_assert_held(attach->dmabuf->resv);
191 
192 	if (bo->created) {
193 		virtio_gpu_detach_object_fenced(bo);
194 
195 		if (bo->sgt)
196 			dma_buf_unmap_attachment(attach, bo->sgt,
197 						 DMA_BIDIRECTIONAL);
198 
199 		bo->sgt = NULL;
200 	}
201 }
202 
virtgpu_dma_buf_free_obj(struct drm_gem_object * obj)203 static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj)
204 {
205 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
206 	struct virtio_gpu_device *vgdev = obj->dev->dev_private;
207 
208 	if (drm_gem_is_imported(obj)) {
209 		struct dma_buf *dmabuf = obj->dma_buf;
210 
211 		dma_resv_lock(dmabuf->resv, NULL);
212 		virtgpu_dma_buf_unmap(bo);
213 		dma_resv_unlock(dmabuf->resv);
214 
215 		dma_buf_detach(dmabuf, obj->import_attach);
216 		dma_buf_put(dmabuf);
217 	}
218 
219 	if (bo->created) {
220 		virtio_gpu_cmd_unref_resource(vgdev, bo);
221 		virtio_gpu_notify(vgdev);
222 		return;
223 	}
224 	virtio_gpu_cleanup_object(bo);
225 }
226 
virtgpu_dma_buf_init_obj(struct drm_device * dev,struct virtio_gpu_object * bo,struct dma_buf_attachment * attach)227 static int virtgpu_dma_buf_init_obj(struct drm_device *dev,
228 				    struct virtio_gpu_object *bo,
229 				    struct dma_buf_attachment *attach)
230 {
231 	struct virtio_gpu_device *vgdev = dev->dev_private;
232 	struct virtio_gpu_object_params params = { 0 };
233 	struct dma_resv *resv = attach->dmabuf->resv;
234 	struct virtio_gpu_mem_entry *ents = NULL;
235 	unsigned int nents;
236 	int ret;
237 
238 	ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
239 	if (ret) {
240 		virtgpu_dma_buf_free_obj(&bo->base.base);
241 		return ret;
242 	}
243 
244 	dma_resv_lock(resv, NULL);
245 
246 	ret = dma_buf_pin(attach);
247 	if (ret)
248 		goto err_pin;
249 
250 	ret = virtgpu_dma_buf_import_sgt(&ents, &nents, bo, attach);
251 	if (ret)
252 		goto err_import;
253 
254 	params.blob = true;
255 	params.blob_mem = VIRTGPU_BLOB_MEM_GUEST;
256 	params.blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
257 	params.size = attach->dmabuf->size;
258 
259 	virtio_gpu_cmd_resource_create_blob(vgdev, bo, &params,
260 					    ents, nents);
261 	bo->guest_blob = true;
262 
263 	dma_buf_unpin(attach);
264 	dma_resv_unlock(resv);
265 
266 	return 0;
267 
268 err_import:
269 	dma_buf_unpin(attach);
270 err_pin:
271 	dma_resv_unlock(resv);
272 	virtgpu_dma_buf_free_obj(&bo->base.base);
273 	return ret;
274 }
275 
276 static const struct drm_gem_object_funcs virtgpu_gem_dma_buf_funcs = {
277 	.free = virtgpu_dma_buf_free_obj,
278 };
279 
virtgpu_dma_buf_move_notify(struct dma_buf_attachment * attach)280 static void virtgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
281 {
282 	struct drm_gem_object *obj = attach->importer_priv;
283 	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
284 
285 	virtgpu_dma_buf_unmap(bo);
286 }
287 
288 static const struct dma_buf_attach_ops virtgpu_dma_buf_attach_ops = {
289 	.allow_peer2peer = true,
290 	.move_notify = virtgpu_dma_buf_move_notify
291 };
292 
virtgpu_gem_prime_import(struct drm_device * dev,struct dma_buf * buf)293 struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
294 						struct dma_buf *buf)
295 {
296 	struct virtio_gpu_device *vgdev = dev->dev_private;
297 	struct dma_buf_attachment *attach;
298 	struct virtio_gpu_object *bo;
299 	struct drm_gem_object *obj;
300 	int ret;
301 
302 	if (buf->ops == &virtgpu_dmabuf_ops.ops) {
303 		obj = buf->priv;
304 		if (obj->dev == dev) {
305 			/*
306 			 * Importing dmabuf exported from our own gem increases
307 			 * refcount on gem itself instead of f_count of dmabuf.
308 			 */
309 			drm_gem_object_get(obj);
310 			return obj;
311 		}
312 	}
313 
314 	if (!vgdev->has_resource_blob || vgdev->has_virgl_3d)
315 		return drm_gem_prime_import(dev, buf);
316 
317 	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
318 	if (!bo)
319 		return ERR_PTR(-ENOMEM);
320 
321 	obj = &bo->base.base;
322 	obj->resv = buf->resv;
323 	obj->funcs = &virtgpu_gem_dma_buf_funcs;
324 	drm_gem_private_object_init(dev, obj, buf->size);
325 
326 	attach = dma_buf_dynamic_attach(buf, dev->dev,
327 					&virtgpu_dma_buf_attach_ops, obj);
328 	if (IS_ERR(attach)) {
329 		kfree(bo);
330 		return ERR_CAST(attach);
331 	}
332 
333 	obj->import_attach = attach;
334 	get_dma_buf(buf);
335 
336 	ret = virtgpu_dma_buf_init_obj(dev, bo, attach);
337 	if (ret < 0)
338 		return ERR_PTR(ret);
339 
340 	return obj;
341 }
342 
virtgpu_gem_prime_import_sg_table(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * table)343 struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
344 	struct drm_device *dev, struct dma_buf_attachment *attach,
345 	struct sg_table *table)
346 {
347 	return ERR_PTR(-ENODEV);
348 }
349