1 /*
2 * Copyright 2014 Canonical
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Andreas Pokorny
23 */
24
25 #include <drm/drm_prime.h>
26 #include <linux/virtio_dma_buf.h>
27
28 #include "virtgpu_drv.h"
29
30 MODULE_IMPORT_NS("DMA_BUF");
31
virtgpu_virtio_get_uuid(struct dma_buf * buf,uuid_t * uuid)32 static int virtgpu_virtio_get_uuid(struct dma_buf *buf,
33 uuid_t *uuid)
34 {
35 struct drm_gem_object *obj = buf->priv;
36 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
37 struct virtio_gpu_device *vgdev = obj->dev->dev_private;
38
39 wait_event(vgdev->resp_wq, bo->uuid_state != STATE_INITIALIZING);
40 if (bo->uuid_state != STATE_OK)
41 return -ENODEV;
42
43 uuid_copy(uuid, &bo->uuid);
44
45 return 0;
46 }
47
48 static struct sg_table *
virtgpu_gem_map_dma_buf(struct dma_buf_attachment * attach,enum dma_data_direction dir)49 virtgpu_gem_map_dma_buf(struct dma_buf_attachment *attach,
50 enum dma_data_direction dir)
51 {
52 struct drm_gem_object *obj = attach->dmabuf->priv;
53 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
54
55 if (virtio_gpu_is_vram(bo))
56 return virtio_gpu_vram_map_dma_buf(bo, attach->dev, dir);
57
58 return drm_gem_map_dma_buf(attach, dir);
59 }
60
virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment * attach,struct sg_table * sgt,enum dma_data_direction dir)61 static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
62 struct sg_table *sgt,
63 enum dma_data_direction dir)
64 {
65 struct drm_gem_object *obj = attach->dmabuf->priv;
66 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
67
68 if (virtio_gpu_is_vram(bo)) {
69 virtio_gpu_vram_unmap_dma_buf(attach->dev, sgt, dir);
70 return;
71 }
72
73 drm_gem_unmap_dma_buf(attach, sgt, dir);
74 }
75
76 static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = {
77 .ops = {
78 .attach = virtio_dma_buf_attach,
79 .detach = drm_gem_map_detach,
80 .map_dma_buf = virtgpu_gem_map_dma_buf,
81 .unmap_dma_buf = virtgpu_gem_unmap_dma_buf,
82 .release = drm_gem_dmabuf_release,
83 .mmap = drm_gem_dmabuf_mmap,
84 .vmap = drm_gem_dmabuf_vmap,
85 .vunmap = drm_gem_dmabuf_vunmap,
86 },
87 .device_attach = drm_gem_map_attach,
88 .get_uuid = virtgpu_virtio_get_uuid,
89 };
90
virtio_gpu_resource_assign_uuid(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo)91 int virtio_gpu_resource_assign_uuid(struct virtio_gpu_device *vgdev,
92 struct virtio_gpu_object *bo)
93 {
94 struct virtio_gpu_object_array *objs;
95
96 objs = virtio_gpu_array_alloc(1);
97 if (!objs)
98 return -ENOMEM;
99
100 virtio_gpu_array_add_obj(objs, &bo->base.base);
101
102 return virtio_gpu_cmd_resource_assign_uuid(vgdev, objs);
103 }
104
virtgpu_gem_prime_export(struct drm_gem_object * obj,int flags)105 struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
106 int flags)
107 {
108 struct dma_buf *buf;
109 struct drm_device *dev = obj->dev;
110 struct virtio_gpu_device *vgdev = dev->dev_private;
111 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
112 int ret = 0;
113 bool blob = bo->host3d_blob || bo->guest_blob;
114 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
115
116 if (!blob) {
117 if (vgdev->has_resource_assign_uuid) {
118 ret = virtio_gpu_resource_assign_uuid(vgdev, bo);
119 if (ret)
120 return ERR_PTR(ret);
121
122 virtio_gpu_notify(vgdev);
123 } else {
124 bo->uuid_state = STATE_ERR;
125 }
126 } else if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)) {
127 bo->uuid_state = STATE_ERR;
128 }
129
130 exp_info.ops = &virtgpu_dmabuf_ops.ops;
131 exp_info.size = obj->size;
132 exp_info.flags = flags;
133 exp_info.priv = obj;
134 exp_info.resv = obj->resv;
135
136 buf = virtio_dma_buf_export(&exp_info);
137 if (IS_ERR(buf))
138 return buf;
139
140 drm_dev_get(dev);
141 drm_gem_object_get(obj);
142
143 return buf;
144 }
145
virtgpu_dma_buf_import_sgt(struct virtio_gpu_mem_entry ** ents,unsigned int * nents,struct virtio_gpu_object * bo,struct dma_buf_attachment * attach)146 int virtgpu_dma_buf_import_sgt(struct virtio_gpu_mem_entry **ents,
147 unsigned int *nents,
148 struct virtio_gpu_object *bo,
149 struct dma_buf_attachment *attach)
150 {
151 struct scatterlist *sl;
152 struct sg_table *sgt;
153 long i, ret;
154
155 dma_resv_assert_held(attach->dmabuf->resv);
156
157 ret = dma_resv_wait_timeout(attach->dmabuf->resv,
158 DMA_RESV_USAGE_KERNEL,
159 false, MAX_SCHEDULE_TIMEOUT);
160 if (ret <= 0)
161 return ret < 0 ? ret : -ETIMEDOUT;
162
163 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
164 if (IS_ERR(sgt))
165 return PTR_ERR(sgt);
166
167 *ents = kvmalloc_array(sgt->nents,
168 sizeof(struct virtio_gpu_mem_entry),
169 GFP_KERNEL);
170 if (!(*ents)) {
171 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
172 return -ENOMEM;
173 }
174
175 *nents = sgt->nents;
176 for_each_sgtable_dma_sg(sgt, sl, i) {
177 (*ents)[i].addr = cpu_to_le64(sg_dma_address(sl));
178 (*ents)[i].length = cpu_to_le32(sg_dma_len(sl));
179 (*ents)[i].padding = 0;
180 }
181
182 bo->sgt = sgt;
183 return 0;
184 }
185
virtgpu_dma_buf_unmap(struct virtio_gpu_object * bo)186 static void virtgpu_dma_buf_unmap(struct virtio_gpu_object *bo)
187 {
188 struct dma_buf_attachment *attach = bo->base.base.import_attach;
189
190 dma_resv_assert_held(attach->dmabuf->resv);
191
192 if (bo->created) {
193 virtio_gpu_detach_object_fenced(bo);
194
195 if (bo->sgt)
196 dma_buf_unmap_attachment(attach, bo->sgt,
197 DMA_BIDIRECTIONAL);
198
199 bo->sgt = NULL;
200 }
201 }
202
virtgpu_dma_buf_free_obj(struct drm_gem_object * obj)203 static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj)
204 {
205 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
206 struct virtio_gpu_device *vgdev = obj->dev->dev_private;
207 struct dma_buf_attachment *attach = obj->import_attach;
208
209 if (drm_gem_is_imported(obj)) {
210 struct dma_buf *dmabuf = attach->dmabuf;
211
212 dma_resv_lock(dmabuf->resv, NULL);
213 virtgpu_dma_buf_unmap(bo);
214 dma_resv_unlock(dmabuf->resv);
215
216 dma_buf_detach(dmabuf, attach);
217 dma_buf_put(dmabuf);
218 }
219
220 if (bo->created) {
221 virtio_gpu_cmd_unref_resource(vgdev, bo);
222 virtio_gpu_notify(vgdev);
223 return;
224 }
225 virtio_gpu_cleanup_object(bo);
226 }
227
virtgpu_dma_buf_init_obj(struct drm_device * dev,struct virtio_gpu_object * bo,struct dma_buf_attachment * attach)228 static int virtgpu_dma_buf_init_obj(struct drm_device *dev,
229 struct virtio_gpu_object *bo,
230 struct dma_buf_attachment *attach)
231 {
232 struct virtio_gpu_device *vgdev = dev->dev_private;
233 struct virtio_gpu_object_params params = { 0 };
234 struct dma_resv *resv = attach->dmabuf->resv;
235 struct virtio_gpu_mem_entry *ents = NULL;
236 unsigned int nents;
237 int ret;
238
239 ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
240 if (ret) {
241 virtgpu_dma_buf_free_obj(&bo->base.base);
242 return ret;
243 }
244
245 dma_resv_lock(resv, NULL);
246
247 ret = dma_buf_pin(attach);
248 if (ret)
249 goto err_pin;
250
251 ret = virtgpu_dma_buf_import_sgt(&ents, &nents, bo, attach);
252 if (ret)
253 goto err_import;
254
255 params.blob = true;
256 params.blob_mem = VIRTGPU_BLOB_MEM_GUEST;
257 params.blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
258 params.size = attach->dmabuf->size;
259
260 virtio_gpu_cmd_resource_create_blob(vgdev, bo, ¶ms,
261 ents, nents);
262 bo->guest_blob = true;
263
264 dma_buf_unpin(attach);
265 dma_resv_unlock(resv);
266
267 return 0;
268
269 err_import:
270 dma_buf_unpin(attach);
271 err_pin:
272 dma_resv_unlock(resv);
273 virtgpu_dma_buf_free_obj(&bo->base.base);
274 return ret;
275 }
276
277 static const struct drm_gem_object_funcs virtgpu_gem_dma_buf_funcs = {
278 .free = virtgpu_dma_buf_free_obj,
279 };
280
virtgpu_dma_buf_move_notify(struct dma_buf_attachment * attach)281 static void virtgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
282 {
283 struct drm_gem_object *obj = attach->importer_priv;
284 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
285
286 virtgpu_dma_buf_unmap(bo);
287 }
288
289 static const struct dma_buf_attach_ops virtgpu_dma_buf_attach_ops = {
290 .allow_peer2peer = true,
291 .move_notify = virtgpu_dma_buf_move_notify
292 };
293
virtgpu_gem_prime_import(struct drm_device * dev,struct dma_buf * buf)294 struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
295 struct dma_buf *buf)
296 {
297 struct virtio_gpu_device *vgdev = dev->dev_private;
298 struct dma_buf_attachment *attach;
299 struct virtio_gpu_object *bo;
300 struct drm_gem_object *obj;
301 int ret;
302
303 if (buf->ops == &virtgpu_dmabuf_ops.ops) {
304 obj = buf->priv;
305 if (obj->dev == dev) {
306 /*
307 * Importing dmabuf exported from our own gem increases
308 * refcount on gem itself instead of f_count of dmabuf.
309 */
310 drm_gem_object_get(obj);
311 return obj;
312 }
313 }
314
315 if (!vgdev->has_resource_blob || vgdev->has_virgl_3d)
316 return drm_gem_prime_import(dev, buf);
317
318 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
319 if (!bo)
320 return ERR_PTR(-ENOMEM);
321
322 obj = &bo->base.base;
323 obj->resv = buf->resv;
324 obj->funcs = &virtgpu_gem_dma_buf_funcs;
325 drm_gem_private_object_init(dev, obj, buf->size);
326
327 attach = dma_buf_dynamic_attach(buf, dev->dev,
328 &virtgpu_dma_buf_attach_ops, obj);
329 if (IS_ERR(attach)) {
330 kfree(bo);
331 return ERR_CAST(attach);
332 }
333
334 obj->import_attach = attach;
335 get_dma_buf(buf);
336
337 ret = virtgpu_dma_buf_init_obj(dev, bo, attach);
338 if (ret < 0)
339 return ERR_PTR(ret);
340
341 return obj;
342 }
343
virtgpu_gem_prime_import_sg_table(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * table)344 struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
345 struct drm_device *dev, struct dma_buf_attachment *attach,
346 struct sg_table *table)
347 {
348 return ERR_PTR(-ENODEV);
349 }
350