1 /*
2 * Copyright 2014 Canonical
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Andreas Pokorny
23 */
24
25 #include <drm/drm_prime.h>
26 #include <linux/virtio_dma_buf.h>
27
28 #include "virtgpu_drv.h"
29
30 MODULE_IMPORT_NS("DMA_BUF");
31
virtgpu_virtio_get_uuid(struct dma_buf * buf,uuid_t * uuid)32 static int virtgpu_virtio_get_uuid(struct dma_buf *buf,
33 uuid_t *uuid)
34 {
35 struct drm_gem_object *obj = buf->priv;
36 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
37 struct virtio_gpu_device *vgdev = obj->dev->dev_private;
38
39 wait_event(vgdev->resp_wq, bo->uuid_state != STATE_INITIALIZING);
40 if (bo->uuid_state != STATE_OK)
41 return -ENODEV;
42
43 uuid_copy(uuid, &bo->uuid);
44
45 return 0;
46 }
47
48 static struct sg_table *
virtgpu_gem_map_dma_buf(struct dma_buf_attachment * attach,enum dma_data_direction dir)49 virtgpu_gem_map_dma_buf(struct dma_buf_attachment *attach,
50 enum dma_data_direction dir)
51 {
52 struct drm_gem_object *obj = attach->dmabuf->priv;
53 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
54
55 if (virtio_gpu_is_vram(bo))
56 return virtio_gpu_vram_map_dma_buf(bo, attach->dev, dir);
57
58 return drm_gem_map_dma_buf(attach, dir);
59 }
60
virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment * attach,struct sg_table * sgt,enum dma_data_direction dir)61 static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
62 struct sg_table *sgt,
63 enum dma_data_direction dir)
64 {
65 struct drm_gem_object *obj = attach->dmabuf->priv;
66 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
67
68 if (virtio_gpu_is_vram(bo)) {
69 virtio_gpu_vram_unmap_dma_buf(attach->dev, sgt, dir);
70 return;
71 }
72
73 drm_gem_unmap_dma_buf(attach, sgt, dir);
74 }
75
76 static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = {
77 .ops = {
78 .cache_sgt_mapping = true,
79 .attach = virtio_dma_buf_attach,
80 .detach = drm_gem_map_detach,
81 .map_dma_buf = virtgpu_gem_map_dma_buf,
82 .unmap_dma_buf = virtgpu_gem_unmap_dma_buf,
83 .release = drm_gem_dmabuf_release,
84 .mmap = drm_gem_dmabuf_mmap,
85 .vmap = drm_gem_dmabuf_vmap,
86 .vunmap = drm_gem_dmabuf_vunmap,
87 },
88 .device_attach = drm_gem_map_attach,
89 .get_uuid = virtgpu_virtio_get_uuid,
90 };
91
virtio_gpu_resource_assign_uuid(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo)92 int virtio_gpu_resource_assign_uuid(struct virtio_gpu_device *vgdev,
93 struct virtio_gpu_object *bo)
94 {
95 struct virtio_gpu_object_array *objs;
96
97 objs = virtio_gpu_array_alloc(1);
98 if (!objs)
99 return -ENOMEM;
100
101 virtio_gpu_array_add_obj(objs, &bo->base.base);
102
103 return virtio_gpu_cmd_resource_assign_uuid(vgdev, objs);
104 }
105
virtgpu_gem_prime_export(struct drm_gem_object * obj,int flags)106 struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
107 int flags)
108 {
109 struct dma_buf *buf;
110 struct drm_device *dev = obj->dev;
111 struct virtio_gpu_device *vgdev = dev->dev_private;
112 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
113 int ret = 0;
114 bool blob = bo->host3d_blob || bo->guest_blob;
115 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
116
117 if (!blob) {
118 if (vgdev->has_resource_assign_uuid) {
119 ret = virtio_gpu_resource_assign_uuid(vgdev, bo);
120 if (ret)
121 return ERR_PTR(ret);
122
123 virtio_gpu_notify(vgdev);
124 } else {
125 bo->uuid_state = STATE_ERR;
126 }
127 } else if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)) {
128 bo->uuid_state = STATE_ERR;
129 }
130
131 exp_info.ops = &virtgpu_dmabuf_ops.ops;
132 exp_info.size = obj->size;
133 exp_info.flags = flags;
134 exp_info.priv = obj;
135 exp_info.resv = obj->resv;
136
137 buf = virtio_dma_buf_export(&exp_info);
138 if (IS_ERR(buf))
139 return buf;
140
141 drm_dev_get(dev);
142 drm_gem_object_get(obj);
143
144 return buf;
145 }
146
virtgpu_dma_buf_import_sgt(struct virtio_gpu_mem_entry ** ents,unsigned int * nents,struct virtio_gpu_object * bo,struct dma_buf_attachment * attach)147 int virtgpu_dma_buf_import_sgt(struct virtio_gpu_mem_entry **ents,
148 unsigned int *nents,
149 struct virtio_gpu_object *bo,
150 struct dma_buf_attachment *attach)
151 {
152 struct scatterlist *sl;
153 struct sg_table *sgt;
154 long i, ret;
155
156 dma_resv_assert_held(attach->dmabuf->resv);
157
158 ret = dma_resv_wait_timeout(attach->dmabuf->resv,
159 DMA_RESV_USAGE_KERNEL,
160 false, MAX_SCHEDULE_TIMEOUT);
161 if (ret <= 0)
162 return ret < 0 ? ret : -ETIMEDOUT;
163
164 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
165 if (IS_ERR(sgt))
166 return PTR_ERR(sgt);
167
168 *ents = kvmalloc_array(sgt->nents,
169 sizeof(struct virtio_gpu_mem_entry),
170 GFP_KERNEL);
171 if (!(*ents)) {
172 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
173 return -ENOMEM;
174 }
175
176 *nents = sgt->nents;
177 for_each_sgtable_dma_sg(sgt, sl, i) {
178 (*ents)[i].addr = cpu_to_le64(sg_dma_address(sl));
179 (*ents)[i].length = cpu_to_le32(sg_dma_len(sl));
180 (*ents)[i].padding = 0;
181 }
182
183 bo->sgt = sgt;
184 return 0;
185 }
186
virtgpu_dma_buf_free_obj(struct drm_gem_object * obj)187 static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj)
188 {
189 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
190 struct virtio_gpu_device *vgdev = obj->dev->dev_private;
191 struct dma_buf_attachment *attach = obj->import_attach;
192
193 if (attach) {
194 struct dma_buf *dmabuf = attach->dmabuf;
195
196 dma_resv_lock(dmabuf->resv, NULL);
197
198 virtio_gpu_detach_object_fenced(bo);
199
200 if (bo->sgt)
201 dma_buf_unmap_attachment(attach, bo->sgt,
202 DMA_BIDIRECTIONAL);
203
204 dma_resv_unlock(dmabuf->resv);
205
206 dma_buf_detach(dmabuf, attach);
207 dma_buf_put(dmabuf);
208 }
209
210 if (bo->created) {
211 virtio_gpu_cmd_unref_resource(vgdev, bo);
212 virtio_gpu_notify(vgdev);
213 return;
214 }
215 virtio_gpu_cleanup_object(bo);
216 }
217
virtgpu_dma_buf_init_obj(struct drm_device * dev,struct virtio_gpu_object * bo,struct dma_buf_attachment * attach)218 static int virtgpu_dma_buf_init_obj(struct drm_device *dev,
219 struct virtio_gpu_object *bo,
220 struct dma_buf_attachment *attach)
221 {
222 struct virtio_gpu_device *vgdev = dev->dev_private;
223 struct virtio_gpu_object_params params = { 0 };
224 struct dma_resv *resv = attach->dmabuf->resv;
225 struct virtio_gpu_mem_entry *ents = NULL;
226 unsigned int nents;
227 int ret;
228
229 ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
230 if (ret) {
231 virtgpu_dma_buf_free_obj(&bo->base.base);
232 return ret;
233 }
234
235 dma_resv_lock(resv, NULL);
236
237 ret = dma_buf_pin(attach);
238 if (ret)
239 goto err_pin;
240
241 ret = virtgpu_dma_buf_import_sgt(&ents, &nents, bo, attach);
242 if (ret)
243 goto err_import;
244
245 params.blob = true;
246 params.blob_mem = VIRTGPU_BLOB_MEM_GUEST;
247 params.blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
248 params.size = attach->dmabuf->size;
249
250 virtio_gpu_cmd_resource_create_blob(vgdev, bo, ¶ms,
251 ents, nents);
252 bo->guest_blob = true;
253 bo->attached = true;
254
255 dma_buf_unpin(attach);
256 dma_resv_unlock(resv);
257
258 return 0;
259
260 err_import:
261 dma_buf_unpin(attach);
262 err_pin:
263 dma_resv_unlock(resv);
264 virtgpu_dma_buf_free_obj(&bo->base.base);
265 return ret;
266 }
267
268 static const struct drm_gem_object_funcs virtgpu_gem_dma_buf_funcs = {
269 .free = virtgpu_dma_buf_free_obj,
270 };
271
virtgpu_dma_buf_move_notify(struct dma_buf_attachment * attach)272 static void virtgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
273 {
274 struct drm_gem_object *obj = attach->importer_priv;
275 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
276
277 if (bo->created && kref_read(&obj->refcount)) {
278 virtio_gpu_detach_object_fenced(bo);
279
280 if (bo->sgt)
281 dma_buf_unmap_attachment(attach, bo->sgt,
282 DMA_BIDIRECTIONAL);
283
284 bo->sgt = NULL;
285 }
286 }
287
288 static const struct dma_buf_attach_ops virtgpu_dma_buf_attach_ops = {
289 .allow_peer2peer = true,
290 .move_notify = virtgpu_dma_buf_move_notify
291 };
292
virtgpu_gem_prime_import(struct drm_device * dev,struct dma_buf * buf)293 struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
294 struct dma_buf *buf)
295 {
296 struct virtio_gpu_device *vgdev = dev->dev_private;
297 struct dma_buf_attachment *attach;
298 struct virtio_gpu_object *bo;
299 struct drm_gem_object *obj;
300 int ret;
301
302 if (buf->ops == &virtgpu_dmabuf_ops.ops) {
303 obj = buf->priv;
304 if (obj->dev == dev) {
305 /*
306 * Importing dmabuf exported from our own gem increases
307 * refcount on gem itself instead of f_count of dmabuf.
308 */
309 drm_gem_object_get(obj);
310 return obj;
311 }
312 }
313
314 if (!vgdev->has_resource_blob || vgdev->has_virgl_3d)
315 return drm_gem_prime_import(dev, buf);
316
317 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
318 if (!bo)
319 return ERR_PTR(-ENOMEM);
320
321 obj = &bo->base.base;
322 obj->funcs = &virtgpu_gem_dma_buf_funcs;
323 drm_gem_private_object_init(dev, obj, buf->size);
324
325 attach = dma_buf_dynamic_attach(buf, dev->dev,
326 &virtgpu_dma_buf_attach_ops, obj);
327 if (IS_ERR(attach)) {
328 kfree(bo);
329 return ERR_CAST(attach);
330 }
331
332 obj->import_attach = attach;
333 get_dma_buf(buf);
334
335 ret = virtgpu_dma_buf_init_obj(dev, bo, attach);
336 if (ret < 0)
337 return ERR_PTR(ret);
338
339 return obj;
340 }
341
virtgpu_gem_prime_import_sg_table(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * table)342 struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
343 struct drm_device *dev, struct dma_buf_attachment *attach,
344 struct sg_table *table)
345 {
346 return ERR_PTR(-ENODEV);
347 }
348