1 /*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Authors:
6 * Dave Airlie <airlied@redhat.com>
7 * Gerd Hoffmann <kraxel@redhat.com>
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */
28
29 #include <linux/dma-mapping.h>
30 #include <linux/virtio.h>
31 #include <linux/virtio_config.h>
32 #include <linux/virtio_ring.h>
33
34 #include <drm/drm_edid.h>
35
36 #include "virtgpu_drv.h"
37 #include "virtgpu_trace.h"
38
39 #define MAX_INLINE_CMD_SIZE 96
40 #define MAX_INLINE_RESP_SIZE 24
41 #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
42 + MAX_INLINE_CMD_SIZE \
43 + MAX_INLINE_RESP_SIZE)
44
convert_to_hw_box(struct virtio_gpu_box * dst,const struct drm_virtgpu_3d_box * src)45 static void convert_to_hw_box(struct virtio_gpu_box *dst,
46 const struct drm_virtgpu_3d_box *src)
47 {
48 dst->x = cpu_to_le32(src->x);
49 dst->y = cpu_to_le32(src->y);
50 dst->z = cpu_to_le32(src->z);
51 dst->w = cpu_to_le32(src->w);
52 dst->h = cpu_to_le32(src->h);
53 dst->d = cpu_to_le32(src->d);
54 }
55
virtio_gpu_ctrl_ack(struct virtqueue * vq)56 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
57 {
58 struct drm_device *dev = vq->vdev->priv;
59 struct virtio_gpu_device *vgdev = dev->dev_private;
60
61 schedule_work(&vgdev->ctrlq.dequeue_work);
62 }
63
virtio_gpu_cursor_ack(struct virtqueue * vq)64 void virtio_gpu_cursor_ack(struct virtqueue *vq)
65 {
66 struct drm_device *dev = vq->vdev->priv;
67 struct virtio_gpu_device *vgdev = dev->dev_private;
68
69 schedule_work(&vgdev->cursorq.dequeue_work);
70 }
71
virtio_gpu_alloc_vbufs(struct virtio_gpu_device * vgdev)72 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
73 {
74 vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
75 VBUFFER_SIZE,
76 __alignof__(struct virtio_gpu_vbuffer),
77 0, NULL);
78 if (!vgdev->vbufs)
79 return -ENOMEM;
80 return 0;
81 }
82
virtio_gpu_free_vbufs(struct virtio_gpu_device * vgdev)83 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
84 {
85 kmem_cache_destroy(vgdev->vbufs);
86 vgdev->vbufs = NULL;
87 }
88
89 static struct virtio_gpu_vbuffer*
virtio_gpu_get_vbuf(struct virtio_gpu_device * vgdev,int size,int resp_size,void * resp_buf,virtio_gpu_resp_cb resp_cb)90 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
91 int size, int resp_size, void *resp_buf,
92 virtio_gpu_resp_cb resp_cb)
93 {
94 struct virtio_gpu_vbuffer *vbuf;
95
96 vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL | __GFP_NOFAIL);
97
98 BUG_ON(size > MAX_INLINE_CMD_SIZE ||
99 size < sizeof(struct virtio_gpu_ctrl_hdr));
100 vbuf->buf = (void *)vbuf + sizeof(*vbuf);
101 vbuf->size = size;
102
103 vbuf->resp_cb = resp_cb;
104 vbuf->resp_size = resp_size;
105 if (resp_size <= MAX_INLINE_RESP_SIZE)
106 vbuf->resp_buf = (void *)vbuf->buf + size;
107 else
108 vbuf->resp_buf = resp_buf;
109 BUG_ON(!vbuf->resp_buf);
110 return vbuf;
111 }
112
113 static struct virtio_gpu_ctrl_hdr *
virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer * vbuf)114 virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)
115 {
116 /* this assumes a vbuf contains a command that starts with a
117 * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor
118 * virtqueues.
119 */
120 return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;
121 }
122
123 static struct virtio_gpu_update_cursor*
virtio_gpu_alloc_cursor(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer ** vbuffer_p)124 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
125 struct virtio_gpu_vbuffer **vbuffer_p)
126 {
127 struct virtio_gpu_vbuffer *vbuf;
128
129 vbuf = virtio_gpu_get_vbuf
130 (vgdev, sizeof(struct virtio_gpu_update_cursor),
131 0, NULL, NULL);
132 if (IS_ERR(vbuf)) {
133 *vbuffer_p = NULL;
134 return ERR_CAST(vbuf);
135 }
136 *vbuffer_p = vbuf;
137 return (struct virtio_gpu_update_cursor *)vbuf->buf;
138 }
139
virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device * vgdev,virtio_gpu_resp_cb cb,struct virtio_gpu_vbuffer ** vbuffer_p,int cmd_size,int resp_size,void * resp_buf)140 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
141 virtio_gpu_resp_cb cb,
142 struct virtio_gpu_vbuffer **vbuffer_p,
143 int cmd_size, int resp_size,
144 void *resp_buf)
145 {
146 struct virtio_gpu_vbuffer *vbuf;
147
148 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
149 resp_size, resp_buf, cb);
150 *vbuffer_p = vbuf;
151 return (struct virtio_gpu_command *)vbuf->buf;
152 }
153
virtio_gpu_alloc_cmd(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer ** vbuffer_p,int size)154 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
155 struct virtio_gpu_vbuffer **vbuffer_p,
156 int size)
157 {
158 return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
159 sizeof(struct virtio_gpu_ctrl_hdr),
160 NULL);
161 }
162
virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer ** vbuffer_p,int size,virtio_gpu_resp_cb cb)163 static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
164 struct virtio_gpu_vbuffer **vbuffer_p,
165 int size,
166 virtio_gpu_resp_cb cb)
167 {
168 return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
169 sizeof(struct virtio_gpu_ctrl_hdr),
170 NULL);
171 }
172
free_vbuf(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)173 static void free_vbuf(struct virtio_gpu_device *vgdev,
174 struct virtio_gpu_vbuffer *vbuf)
175 {
176 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
177 kfree(vbuf->resp_buf);
178 kvfree(vbuf->data_buf);
179 kmem_cache_free(vgdev->vbufs, vbuf);
180 }
181
reclaim_vbufs(struct virtqueue * vq,struct list_head * reclaim_list)182 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
183 {
184 struct virtio_gpu_vbuffer *vbuf;
185 unsigned int len;
186 int freed = 0;
187
188 while ((vbuf = virtqueue_get_buf(vq, &len))) {
189 list_add_tail(&vbuf->list, reclaim_list);
190 freed++;
191 }
192 if (freed == 0)
193 DRM_DEBUG("Huh? zero vbufs reclaimed");
194 }
195
virtio_gpu_dequeue_ctrl_func(struct work_struct * work)196 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
197 {
198 struct virtio_gpu_device *vgdev =
199 container_of(work, struct virtio_gpu_device,
200 ctrlq.dequeue_work);
201 struct list_head reclaim_list;
202 struct virtio_gpu_vbuffer *entry, *tmp;
203 struct virtio_gpu_ctrl_hdr *resp;
204 u64 fence_id;
205
206 INIT_LIST_HEAD(&reclaim_list);
207 spin_lock(&vgdev->ctrlq.qlock);
208 do {
209 virtqueue_disable_cb(vgdev->ctrlq.vq);
210 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
211
212 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
213 spin_unlock(&vgdev->ctrlq.qlock);
214
215 list_for_each_entry(entry, &reclaim_list, list) {
216 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
217
218 trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp, entry->seqno);
219
220 if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
221 if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
222 struct virtio_gpu_ctrl_hdr *cmd;
223 cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
224 DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
225 le32_to_cpu(resp->type),
226 le32_to_cpu(cmd->type));
227 } else
228 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
229 }
230 if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
231 fence_id = le64_to_cpu(resp->fence_id);
232 virtio_gpu_fence_event_process(vgdev, fence_id);
233 }
234 if (entry->resp_cb)
235 entry->resp_cb(vgdev, entry);
236 }
237 wake_up(&vgdev->ctrlq.ack_queue);
238
239 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
240 if (entry->objs)
241 virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
242 list_del(&entry->list);
243 free_vbuf(vgdev, entry);
244 }
245 }
246
virtio_gpu_dequeue_cursor_func(struct work_struct * work)247 void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
248 {
249 struct virtio_gpu_device *vgdev =
250 container_of(work, struct virtio_gpu_device,
251 cursorq.dequeue_work);
252 struct list_head reclaim_list;
253 struct virtio_gpu_vbuffer *entry, *tmp;
254
255 INIT_LIST_HEAD(&reclaim_list);
256 spin_lock(&vgdev->cursorq.qlock);
257 do {
258 virtqueue_disable_cb(vgdev->cursorq.vq);
259 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
260 } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
261 spin_unlock(&vgdev->cursorq.qlock);
262
263 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
264 struct virtio_gpu_ctrl_hdr *resp =
265 (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
266
267 trace_virtio_gpu_cmd_response(vgdev->cursorq.vq, resp, entry->seqno);
268 list_del(&entry->list);
269 free_vbuf(vgdev, entry);
270 }
271 wake_up(&vgdev->cursorq.ack_queue);
272 }
273
274 /* Create sg_table from a vmalloc'd buffer. */
vmalloc_to_sgt(char * data,uint32_t size,int * sg_ents)275 static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
276 {
277 int ret, s, i;
278 struct sg_table *sgt;
279 struct scatterlist *sg;
280 struct page *pg;
281
282 if (WARN_ON(!PAGE_ALIGNED(data)))
283 return NULL;
284
285 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
286 if (!sgt)
287 return NULL;
288
289 *sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
290 ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
291 if (ret) {
292 kfree(sgt);
293 return NULL;
294 }
295
296 for_each_sgtable_sg(sgt, sg, i) {
297 pg = vmalloc_to_page(data);
298 if (!pg) {
299 sg_free_table(sgt);
300 kfree(sgt);
301 return NULL;
302 }
303
304 s = min_t(int, PAGE_SIZE, size);
305 sg_set_page(sg, pg, s, 0);
306
307 size -= s;
308 data += s;
309 }
310
311 return sgt;
312 }
313
virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf,struct virtio_gpu_fence * fence,int elemcnt,struct scatterlist ** sgs,int outcnt,int incnt)314 static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
315 struct virtio_gpu_vbuffer *vbuf,
316 struct virtio_gpu_fence *fence,
317 int elemcnt,
318 struct scatterlist **sgs,
319 int outcnt,
320 int incnt)
321 {
322 struct virtqueue *vq = vgdev->ctrlq.vq;
323 int ret, idx;
324
325 if (!drm_dev_enter(vgdev->ddev, &idx)) {
326 if (fence && vbuf->objs)
327 virtio_gpu_array_unlock_resv(vbuf->objs);
328 free_vbuf(vgdev, vbuf);
329 return -ENODEV;
330 }
331
332 if (vgdev->has_indirect)
333 elemcnt = 1;
334
335 again:
336 spin_lock(&vgdev->ctrlq.qlock);
337
338 if (vq->num_free < elemcnt) {
339 spin_unlock(&vgdev->ctrlq.qlock);
340 virtio_gpu_notify(vgdev);
341 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
342 goto again;
343 }
344
345 /* now that the position of the vbuf in the virtqueue is known, we can
346 * finally set the fence id
347 */
348 if (fence) {
349 virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
350 fence);
351 if (vbuf->objs) {
352 virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
353 virtio_gpu_array_unlock_resv(vbuf->objs);
354 }
355 }
356
357 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
358 WARN_ON(ret);
359
360 vbuf->seqno = ++vgdev->ctrlq.seqno;
361 trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf), vbuf->seqno);
362
363 atomic_inc(&vgdev->pending_commands);
364
365 spin_unlock(&vgdev->ctrlq.qlock);
366
367 drm_dev_exit(idx);
368 return 0;
369 }
370
virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf,struct virtio_gpu_fence * fence)371 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
372 struct virtio_gpu_vbuffer *vbuf,
373 struct virtio_gpu_fence *fence)
374 {
375 struct scatterlist *sgs[3], vcmd, vout, vresp;
376 struct sg_table *sgt = NULL;
377 int elemcnt = 0, outcnt = 0, incnt = 0, ret;
378
379 /* set up vcmd */
380 sg_init_one(&vcmd, vbuf->buf, vbuf->size);
381 elemcnt++;
382 sgs[outcnt] = &vcmd;
383 outcnt++;
384
385 /* set up vout */
386 if (vbuf->data_size) {
387 if (is_vmalloc_addr(vbuf->data_buf)) {
388 int sg_ents;
389 sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
390 &sg_ents);
391 if (!sgt) {
392 if (fence && vbuf->objs)
393 virtio_gpu_array_unlock_resv(vbuf->objs);
394 return -ENOMEM;
395 }
396
397 elemcnt += sg_ents;
398 sgs[outcnt] = sgt->sgl;
399 } else {
400 sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
401 elemcnt++;
402 sgs[outcnt] = &vout;
403 }
404 outcnt++;
405 }
406
407 /* set up vresp */
408 if (vbuf->resp_size) {
409 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
410 elemcnt++;
411 sgs[outcnt + incnt] = &vresp;
412 incnt++;
413 }
414
415 ret = virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
416 incnt);
417
418 if (sgt) {
419 sg_free_table(sgt);
420 kfree(sgt);
421 }
422 return ret;
423 }
424
virtio_gpu_notify(struct virtio_gpu_device * vgdev)425 void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
426 {
427 bool notify;
428
429 if (!atomic_read(&vgdev->pending_commands))
430 return;
431
432 spin_lock(&vgdev->ctrlq.qlock);
433 atomic_set(&vgdev->pending_commands, 0);
434 notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
435 spin_unlock(&vgdev->ctrlq.qlock);
436
437 if (notify)
438 virtqueue_notify(vgdev->ctrlq.vq);
439 }
440
virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)441 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
442 struct virtio_gpu_vbuffer *vbuf)
443 {
444 return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
445 }
446
virtio_gpu_queue_cursor(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)447 static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
448 struct virtio_gpu_vbuffer *vbuf)
449 {
450 struct virtqueue *vq = vgdev->cursorq.vq;
451 struct scatterlist *sgs[1], ccmd;
452 int idx, ret, outcnt;
453 bool notify;
454
455 if (!drm_dev_enter(vgdev->ddev, &idx)) {
456 free_vbuf(vgdev, vbuf);
457 return;
458 }
459
460 sg_init_one(&ccmd, vbuf->buf, vbuf->size);
461 sgs[0] = &ccmd;
462 outcnt = 1;
463
464 spin_lock(&vgdev->cursorq.qlock);
465 retry:
466 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
467 if (ret == -ENOSPC) {
468 spin_unlock(&vgdev->cursorq.qlock);
469 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
470 spin_lock(&vgdev->cursorq.qlock);
471 goto retry;
472 } else {
473 vbuf->seqno = ++vgdev->cursorq.seqno;
474 trace_virtio_gpu_cmd_queue(vq,
475 virtio_gpu_vbuf_ctrl_hdr(vbuf),
476 vbuf->seqno);
477
478 notify = virtqueue_kick_prepare(vq);
479 }
480
481 spin_unlock(&vgdev->cursorq.qlock);
482
483 if (notify)
484 virtqueue_notify(vq);
485
486 drm_dev_exit(idx);
487 }
488
489 /* just create gem objects for userspace and long lived objects,
490 * just use dma_alloced pages for the queue objects?
491 */
492
493 /* create a basic resource */
virtio_gpu_cmd_create_resource(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo,struct virtio_gpu_object_params * params,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)494 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
495 struct virtio_gpu_object *bo,
496 struct virtio_gpu_object_params *params,
497 struct virtio_gpu_object_array *objs,
498 struct virtio_gpu_fence *fence)
499 {
500 struct virtio_gpu_resource_create_2d *cmd_p;
501 struct virtio_gpu_vbuffer *vbuf;
502
503 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
504 memset(cmd_p, 0, sizeof(*cmd_p));
505 vbuf->objs = objs;
506
507 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
508 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
509 cmd_p->format = cpu_to_le32(params->format);
510 cmd_p->width = cpu_to_le32(params->width);
511 cmd_p->height = cpu_to_le32(params->height);
512
513 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
514 bo->created = true;
515 }
516
virtio_gpu_cmd_unref_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)517 static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
518 struct virtio_gpu_vbuffer *vbuf)
519 {
520 struct virtio_gpu_object *bo;
521
522 bo = vbuf->resp_cb_data;
523 vbuf->resp_cb_data = NULL;
524
525 virtio_gpu_cleanup_object(bo);
526 }
527
virtio_gpu_cmd_unref_resource(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo)528 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
529 struct virtio_gpu_object *bo)
530 {
531 struct virtio_gpu_resource_unref *cmd_p;
532 struct virtio_gpu_vbuffer *vbuf;
533 int ret;
534
535 cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
536 virtio_gpu_cmd_unref_cb);
537 memset(cmd_p, 0, sizeof(*cmd_p));
538
539 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
540 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
541
542 vbuf->resp_cb_data = bo;
543 ret = virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
544 if (ret < 0)
545 virtio_gpu_cleanup_object(bo);
546 }
547
virtio_gpu_cmd_set_scanout(struct virtio_gpu_device * vgdev,uint32_t scanout_id,uint32_t resource_id,uint32_t width,uint32_t height,uint32_t x,uint32_t y)548 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
549 uint32_t scanout_id, uint32_t resource_id,
550 uint32_t width, uint32_t height,
551 uint32_t x, uint32_t y)
552 {
553 struct virtio_gpu_set_scanout *cmd_p;
554 struct virtio_gpu_vbuffer *vbuf;
555
556 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
557 memset(cmd_p, 0, sizeof(*cmd_p));
558
559 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
560 cmd_p->resource_id = cpu_to_le32(resource_id);
561 cmd_p->scanout_id = cpu_to_le32(scanout_id);
562 cmd_p->r.width = cpu_to_le32(width);
563 cmd_p->r.height = cpu_to_le32(height);
564 cmd_p->r.x = cpu_to_le32(x);
565 cmd_p->r.y = cpu_to_le32(y);
566
567 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
568 }
569
virtio_gpu_cmd_resource_flush(struct virtio_gpu_device * vgdev,uint32_t resource_id,uint32_t x,uint32_t y,uint32_t width,uint32_t height,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)570 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
571 uint32_t resource_id,
572 uint32_t x, uint32_t y,
573 uint32_t width, uint32_t height,
574 struct virtio_gpu_object_array *objs,
575 struct virtio_gpu_fence *fence)
576 {
577 struct virtio_gpu_resource_flush *cmd_p;
578 struct virtio_gpu_vbuffer *vbuf;
579
580 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
581 memset(cmd_p, 0, sizeof(*cmd_p));
582 vbuf->objs = objs;
583
584 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
585 cmd_p->resource_id = cpu_to_le32(resource_id);
586 cmd_p->r.width = cpu_to_le32(width);
587 cmd_p->r.height = cpu_to_le32(height);
588 cmd_p->r.x = cpu_to_le32(x);
589 cmd_p->r.y = cpu_to_le32(y);
590
591 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
592 }
593
virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device * vgdev,uint64_t offset,uint32_t width,uint32_t height,uint32_t x,uint32_t y,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)594 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
595 uint64_t offset,
596 uint32_t width, uint32_t height,
597 uint32_t x, uint32_t y,
598 struct virtio_gpu_object_array *objs,
599 struct virtio_gpu_fence *fence)
600 {
601 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
602 struct virtio_gpu_transfer_to_host_2d *cmd_p;
603 struct virtio_gpu_vbuffer *vbuf;
604 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
605
606 if (virtio_gpu_is_shmem(bo) && use_dma_api)
607 dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
608 bo->base.sgt, DMA_TO_DEVICE);
609
610 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
611 memset(cmd_p, 0, sizeof(*cmd_p));
612 vbuf->objs = objs;
613
614 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
615 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
616 cmd_p->offset = cpu_to_le64(offset);
617 cmd_p->r.width = cpu_to_le32(width);
618 cmd_p->r.height = cpu_to_le32(height);
619 cmd_p->r.x = cpu_to_le32(x);
620 cmd_p->r.y = cpu_to_le32(y);
621
622 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
623 }
624
625 static void
virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device * vgdev,uint32_t resource_id,struct virtio_gpu_mem_entry * ents,uint32_t nents,struct virtio_gpu_fence * fence)626 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
627 uint32_t resource_id,
628 struct virtio_gpu_mem_entry *ents,
629 uint32_t nents,
630 struct virtio_gpu_fence *fence)
631 {
632 struct virtio_gpu_resource_attach_backing *cmd_p;
633 struct virtio_gpu_vbuffer *vbuf;
634
635 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
636 memset(cmd_p, 0, sizeof(*cmd_p));
637
638 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
639 cmd_p->resource_id = cpu_to_le32(resource_id);
640 cmd_p->nr_entries = cpu_to_le32(nents);
641
642 vbuf->data_buf = ents;
643 vbuf->data_size = sizeof(*ents) * nents;
644
645 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
646 }
647
648 static void
virtio_gpu_cmd_resource_detach_backing(struct virtio_gpu_device * vgdev,uint32_t resource_id,struct virtio_gpu_fence * fence)649 virtio_gpu_cmd_resource_detach_backing(struct virtio_gpu_device *vgdev,
650 uint32_t resource_id,
651 struct virtio_gpu_fence *fence)
652 {
653 struct virtio_gpu_resource_detach_backing *cmd_p;
654 struct virtio_gpu_vbuffer *vbuf;
655
656 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
657 memset(cmd_p, 0, sizeof(*cmd_p));
658
659 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
660 cmd_p->resource_id = cpu_to_le32(resource_id);
661
662 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
663 }
664
virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)665 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
666 struct virtio_gpu_vbuffer *vbuf)
667 {
668 struct virtio_gpu_resp_display_info *resp =
669 (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
670 int i;
671
672 spin_lock(&vgdev->display_info_lock);
673 for (i = 0; i < vgdev->num_scanouts; i++) {
674 vgdev->outputs[i].info = resp->pmodes[i];
675 if (resp->pmodes[i].enabled) {
676 DRM_DEBUG("output %d: %dx%d+%d+%d", i,
677 le32_to_cpu(resp->pmodes[i].r.width),
678 le32_to_cpu(resp->pmodes[i].r.height),
679 le32_to_cpu(resp->pmodes[i].r.x),
680 le32_to_cpu(resp->pmodes[i].r.y));
681 } else {
682 DRM_DEBUG("output %d: disabled", i);
683 }
684 }
685
686 vgdev->display_info_pending = false;
687 spin_unlock(&vgdev->display_info_lock);
688 wake_up(&vgdev->resp_wq);
689
690 if (!drm_helper_hpd_irq_event(vgdev->ddev))
691 drm_kms_helper_hotplug_event(vgdev->ddev);
692 }
693
virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)694 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
695 struct virtio_gpu_vbuffer *vbuf)
696 {
697 struct virtio_gpu_get_capset_info *cmd =
698 (struct virtio_gpu_get_capset_info *)vbuf->buf;
699 struct virtio_gpu_resp_capset_info *resp =
700 (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
701 int i = le32_to_cpu(cmd->capset_index);
702
703 spin_lock(&vgdev->display_info_lock);
704 if (vgdev->capsets) {
705 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
706 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
707 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
708 } else {
709 DRM_ERROR("invalid capset memory.");
710 }
711 spin_unlock(&vgdev->display_info_lock);
712 wake_up(&vgdev->resp_wq);
713 }
714
virtio_gpu_cmd_capset_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)715 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
716 struct virtio_gpu_vbuffer *vbuf)
717 {
718 struct virtio_gpu_get_capset *cmd =
719 (struct virtio_gpu_get_capset *)vbuf->buf;
720 struct virtio_gpu_resp_capset *resp =
721 (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
722 struct virtio_gpu_drv_cap_cache *cache_ent;
723
724 spin_lock(&vgdev->display_info_lock);
725 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
726 if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
727 cache_ent->id == le32_to_cpu(cmd->capset_id)) {
728 memcpy(cache_ent->caps_cache, resp->capset_data,
729 cache_ent->size);
730 /* Copy must occur before is_valid is signalled. */
731 smp_wmb();
732 atomic_set(&cache_ent->is_valid, 1);
733 break;
734 }
735 }
736 spin_unlock(&vgdev->display_info_lock);
737 wake_up_all(&vgdev->resp_wq);
738 }
739
virtio_get_edid_block(void * data,u8 * buf,unsigned int block,size_t len)740 static int virtio_get_edid_block(void *data, u8 *buf,
741 unsigned int block, size_t len)
742 {
743 struct virtio_gpu_resp_edid *resp = data;
744 size_t start = block * EDID_LENGTH;
745
746 if (start + len > le32_to_cpu(resp->size))
747 return -EINVAL;
748 memcpy(buf, resp->edid + start, len);
749 return 0;
750 }
751
virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)752 static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
753 struct virtio_gpu_vbuffer *vbuf)
754 {
755 struct virtio_gpu_cmd_get_edid *cmd =
756 (struct virtio_gpu_cmd_get_edid *)vbuf->buf;
757 struct virtio_gpu_resp_edid *resp =
758 (struct virtio_gpu_resp_edid *)vbuf->resp_buf;
759 uint32_t scanout = le32_to_cpu(cmd->scanout);
760 struct virtio_gpu_output *output;
761 const struct drm_edid *new_edid, *old_edid;
762
763 if (scanout >= vgdev->num_scanouts)
764 return;
765 output = vgdev->outputs + scanout;
766
767 new_edid = drm_edid_read_custom(&output->conn, virtio_get_edid_block, resp);
768 drm_edid_connector_update(&output->conn, new_edid);
769
770 spin_lock(&vgdev->display_info_lock);
771 old_edid = output->drm_edid;
772 output->drm_edid = new_edid;
773 spin_unlock(&vgdev->display_info_lock);
774
775 drm_edid_free(old_edid);
776 wake_up(&vgdev->resp_wq);
777 }
778
virtio_gpu_cmd_get_display_info(struct virtio_gpu_device * vgdev)779 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
780 {
781 struct virtio_gpu_ctrl_hdr *cmd_p;
782 struct virtio_gpu_vbuffer *vbuf;
783 void *resp_buf;
784
785 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
786 GFP_KERNEL);
787 if (!resp_buf)
788 return -ENOMEM;
789
790 cmd_p = virtio_gpu_alloc_cmd_resp
791 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
792 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
793 resp_buf);
794 memset(cmd_p, 0, sizeof(*cmd_p));
795
796 vgdev->display_info_pending = true;
797 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
798 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
799 return 0;
800 }
801
virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device * vgdev,int idx)802 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
803 {
804 struct virtio_gpu_get_capset_info *cmd_p;
805 struct virtio_gpu_vbuffer *vbuf;
806 void *resp_buf;
807
808 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
809 GFP_KERNEL);
810 if (!resp_buf)
811 return -ENOMEM;
812
813 cmd_p = virtio_gpu_alloc_cmd_resp
814 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
815 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
816 resp_buf);
817 memset(cmd_p, 0, sizeof(*cmd_p));
818
819 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
820 cmd_p->capset_index = cpu_to_le32(idx);
821 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
822 return 0;
823 }
824
virtio_gpu_cmd_get_capset(struct virtio_gpu_device * vgdev,int idx,int version,struct virtio_gpu_drv_cap_cache ** cache_p)825 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
826 int idx, int version,
827 struct virtio_gpu_drv_cap_cache **cache_p)
828 {
829 struct virtio_gpu_get_capset *cmd_p;
830 struct virtio_gpu_vbuffer *vbuf;
831 int max_size;
832 struct virtio_gpu_drv_cap_cache *cache_ent;
833 struct virtio_gpu_drv_cap_cache *search_ent;
834 void *resp_buf;
835
836 *cache_p = NULL;
837
838 if (idx >= vgdev->num_capsets)
839 return -EINVAL;
840
841 if (version > vgdev->capsets[idx].max_version)
842 return -EINVAL;
843
844 cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
845 if (!cache_ent)
846 return -ENOMEM;
847
848 max_size = vgdev->capsets[idx].max_size;
849 cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
850 if (!cache_ent->caps_cache) {
851 kfree(cache_ent);
852 return -ENOMEM;
853 }
854
855 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
856 GFP_KERNEL);
857 if (!resp_buf) {
858 kfree(cache_ent->caps_cache);
859 kfree(cache_ent);
860 return -ENOMEM;
861 }
862
863 cache_ent->version = version;
864 cache_ent->id = vgdev->capsets[idx].id;
865 atomic_set(&cache_ent->is_valid, 0);
866 cache_ent->size = max_size;
867 spin_lock(&vgdev->display_info_lock);
868 /* Search while under lock in case it was added by another task. */
869 list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
870 if (search_ent->id == vgdev->capsets[idx].id &&
871 search_ent->version == version) {
872 *cache_p = search_ent;
873 break;
874 }
875 }
876 if (!*cache_p)
877 list_add_tail(&cache_ent->head, &vgdev->cap_cache);
878 spin_unlock(&vgdev->display_info_lock);
879
880 if (*cache_p) {
881 /* Entry was found, so free everything that was just created. */
882 kfree(resp_buf);
883 kfree(cache_ent->caps_cache);
884 kfree(cache_ent);
885 return 0;
886 }
887
888 cmd_p = virtio_gpu_alloc_cmd_resp
889 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
890 sizeof(struct virtio_gpu_resp_capset) + max_size,
891 resp_buf);
892 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
893 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
894 cmd_p->capset_version = cpu_to_le32(version);
895 *cache_p = cache_ent;
896 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
897
898 return 0;
899 }
900
virtio_gpu_cmd_get_edids(struct virtio_gpu_device * vgdev)901 int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
902 {
903 struct virtio_gpu_cmd_get_edid *cmd_p;
904 struct virtio_gpu_vbuffer *vbuf;
905 void *resp_buf;
906 int scanout;
907
908 if (WARN_ON(!vgdev->has_edid))
909 return -EINVAL;
910
911 for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
912 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
913 GFP_KERNEL);
914 if (!resp_buf)
915 return -ENOMEM;
916
917 cmd_p = virtio_gpu_alloc_cmd_resp
918 (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
919 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
920 resp_buf);
921 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
922 cmd_p->scanout = cpu_to_le32(scanout);
923 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
924 }
925
926 return 0;
927 }
928
virtio_gpu_cmd_context_create(struct virtio_gpu_device * vgdev,uint32_t id,uint32_t context_init,uint32_t nlen,const char * name)929 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
930 uint32_t context_init, uint32_t nlen,
931 const char *name)
932 {
933 struct virtio_gpu_ctx_create *cmd_p;
934 struct virtio_gpu_vbuffer *vbuf;
935
936 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
937 memset(cmd_p, 0, sizeof(*cmd_p));
938
939 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
940 cmd_p->hdr.ctx_id = cpu_to_le32(id);
941 cmd_p->nlen = cpu_to_le32(nlen);
942 cmd_p->context_init = cpu_to_le32(context_init);
943 strscpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name));
944 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
945 }
946
virtio_gpu_cmd_context_destroy(struct virtio_gpu_device * vgdev,uint32_t id)947 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
948 uint32_t id)
949 {
950 struct virtio_gpu_ctx_destroy *cmd_p;
951 struct virtio_gpu_vbuffer *vbuf;
952
953 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
954 memset(cmd_p, 0, sizeof(*cmd_p));
955
956 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
957 cmd_p->hdr.ctx_id = cpu_to_le32(id);
958 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
959 }
960
virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device * vgdev,uint32_t ctx_id,struct virtio_gpu_object_array * objs)961 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
962 uint32_t ctx_id,
963 struct virtio_gpu_object_array *objs)
964 {
965 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
966 struct virtio_gpu_ctx_resource *cmd_p;
967 struct virtio_gpu_vbuffer *vbuf;
968
969 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
970 memset(cmd_p, 0, sizeof(*cmd_p));
971 vbuf->objs = objs;
972
973 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
974 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
975 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
976 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
977 }
978
virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device * vgdev,uint32_t ctx_id,struct virtio_gpu_object_array * objs)979 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
980 uint32_t ctx_id,
981 struct virtio_gpu_object_array *objs)
982 {
983 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
984 struct virtio_gpu_ctx_resource *cmd_p;
985 struct virtio_gpu_vbuffer *vbuf;
986
987 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
988 memset(cmd_p, 0, sizeof(*cmd_p));
989 vbuf->objs = objs;
990
991 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
992 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
993 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
994 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
995 }
996
997 void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo,struct virtio_gpu_object_params * params,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)998 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
999 struct virtio_gpu_object *bo,
1000 struct virtio_gpu_object_params *params,
1001 struct virtio_gpu_object_array *objs,
1002 struct virtio_gpu_fence *fence)
1003 {
1004 struct virtio_gpu_resource_create_3d *cmd_p;
1005 struct virtio_gpu_vbuffer *vbuf;
1006
1007 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1008 memset(cmd_p, 0, sizeof(*cmd_p));
1009 vbuf->objs = objs;
1010
1011 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
1012 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1013 cmd_p->format = cpu_to_le32(params->format);
1014 cmd_p->width = cpu_to_le32(params->width);
1015 cmd_p->height = cpu_to_le32(params->height);
1016
1017 cmd_p->target = cpu_to_le32(params->target);
1018 cmd_p->bind = cpu_to_le32(params->bind);
1019 cmd_p->depth = cpu_to_le32(params->depth);
1020 cmd_p->array_size = cpu_to_le32(params->array_size);
1021 cmd_p->last_level = cpu_to_le32(params->last_level);
1022 cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
1023 cmd_p->flags = cpu_to_le32(params->flags);
1024
1025 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1026
1027 bo->created = true;
1028 }
1029
virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device * vgdev,uint32_t ctx_id,uint64_t offset,uint32_t level,uint32_t stride,uint32_t layer_stride,struct drm_virtgpu_3d_box * box,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)1030 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
1031 uint32_t ctx_id,
1032 uint64_t offset, uint32_t level,
1033 uint32_t stride,
1034 uint32_t layer_stride,
1035 struct drm_virtgpu_3d_box *box,
1036 struct virtio_gpu_object_array *objs,
1037 struct virtio_gpu_fence *fence)
1038 {
1039 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1040 struct virtio_gpu_transfer_host_3d *cmd_p;
1041 struct virtio_gpu_vbuffer *vbuf;
1042 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
1043
1044 if (virtio_gpu_is_shmem(bo) && use_dma_api)
1045 dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
1046 bo->base.sgt, DMA_TO_DEVICE);
1047
1048 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1049 memset(cmd_p, 0, sizeof(*cmd_p));
1050
1051 vbuf->objs = objs;
1052
1053 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
1054 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1055 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1056 convert_to_hw_box(&cmd_p->box, box);
1057 cmd_p->offset = cpu_to_le64(offset);
1058 cmd_p->level = cpu_to_le32(level);
1059 cmd_p->stride = cpu_to_le32(stride);
1060 cmd_p->layer_stride = cpu_to_le32(layer_stride);
1061
1062 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1063 }
1064
virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device * vgdev,uint32_t ctx_id,uint64_t offset,uint32_t level,uint32_t stride,uint32_t layer_stride,struct drm_virtgpu_3d_box * box,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)1065 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
1066 uint32_t ctx_id,
1067 uint64_t offset, uint32_t level,
1068 uint32_t stride,
1069 uint32_t layer_stride,
1070 struct drm_virtgpu_3d_box *box,
1071 struct virtio_gpu_object_array *objs,
1072 struct virtio_gpu_fence *fence)
1073 {
1074 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1075 struct virtio_gpu_transfer_host_3d *cmd_p;
1076 struct virtio_gpu_vbuffer *vbuf;
1077
1078 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1079 memset(cmd_p, 0, sizeof(*cmd_p));
1080
1081 vbuf->objs = objs;
1082
1083 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
1084 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1085 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1086 convert_to_hw_box(&cmd_p->box, box);
1087 cmd_p->offset = cpu_to_le64(offset);
1088 cmd_p->level = cpu_to_le32(level);
1089 cmd_p->stride = cpu_to_le32(stride);
1090 cmd_p->layer_stride = cpu_to_le32(layer_stride);
1091
1092 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1093 }
1094
virtio_gpu_cmd_submit(struct virtio_gpu_device * vgdev,void * data,uint32_t data_size,uint32_t ctx_id,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)1095 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
1096 void *data, uint32_t data_size,
1097 uint32_t ctx_id,
1098 struct virtio_gpu_object_array *objs,
1099 struct virtio_gpu_fence *fence)
1100 {
1101 struct virtio_gpu_cmd_submit *cmd_p;
1102 struct virtio_gpu_vbuffer *vbuf;
1103
1104 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1105 memset(cmd_p, 0, sizeof(*cmd_p));
1106
1107 vbuf->data_buf = data;
1108 vbuf->data_size = data_size;
1109 vbuf->objs = objs;
1110
1111 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
1112 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1113 cmd_p->size = cpu_to_le32(data_size);
1114
1115 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1116 }
1117
virtio_gpu_object_attach(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * obj,struct virtio_gpu_mem_entry * ents,unsigned int nents)1118 void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
1119 struct virtio_gpu_object *obj,
1120 struct virtio_gpu_mem_entry *ents,
1121 unsigned int nents)
1122 {
1123 if (obj->attached)
1124 return;
1125
1126 virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1127 ents, nents, NULL);
1128
1129 obj->attached = true;
1130 }
1131
virtio_gpu_object_detach(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * obj,struct virtio_gpu_fence * fence)1132 void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
1133 struct virtio_gpu_object *obj,
1134 struct virtio_gpu_fence *fence)
1135 {
1136 if (!obj->attached)
1137 return;
1138
1139 virtio_gpu_cmd_resource_detach_backing(vgdev, obj->hw_res_handle,
1140 fence);
1141
1142 obj->attached = false;
1143 }
1144
virtio_gpu_cursor_ping(struct virtio_gpu_device * vgdev,struct virtio_gpu_output * output)1145 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1146 struct virtio_gpu_output *output)
1147 {
1148 struct virtio_gpu_vbuffer *vbuf;
1149 struct virtio_gpu_update_cursor *cur_p;
1150
1151 output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1152 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1153 memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1154 virtio_gpu_queue_cursor(vgdev, vbuf);
1155 }
1156
virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)1157 static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev,
1158 struct virtio_gpu_vbuffer *vbuf)
1159 {
1160 struct virtio_gpu_object *obj =
1161 gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1162 struct virtio_gpu_resp_resource_uuid *resp =
1163 (struct virtio_gpu_resp_resource_uuid *)vbuf->resp_buf;
1164 uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1165
1166 spin_lock(&vgdev->resource_export_lock);
1167 WARN_ON(obj->uuid_state != STATE_INITIALIZING);
1168
1169 if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID &&
1170 obj->uuid_state == STATE_INITIALIZING) {
1171 import_uuid(&obj->uuid, resp->uuid);
1172 obj->uuid_state = STATE_OK;
1173 } else {
1174 obj->uuid_state = STATE_ERR;
1175 }
1176 spin_unlock(&vgdev->resource_export_lock);
1177
1178 wake_up_all(&vgdev->resp_wq);
1179 }
1180
1181 int
virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device * vgdev,struct virtio_gpu_object_array * objs)1182 virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
1183 struct virtio_gpu_object_array *objs)
1184 {
1185 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1186 struct virtio_gpu_resource_assign_uuid *cmd_p;
1187 struct virtio_gpu_vbuffer *vbuf;
1188 struct virtio_gpu_resp_resource_uuid *resp_buf;
1189
1190 resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
1191 if (!resp_buf) {
1192 spin_lock(&vgdev->resource_export_lock);
1193 bo->uuid_state = STATE_ERR;
1194 spin_unlock(&vgdev->resource_export_lock);
1195 virtio_gpu_array_put_free(objs);
1196 return -ENOMEM;
1197 }
1198
1199 cmd_p = virtio_gpu_alloc_cmd_resp
1200 (vgdev, virtio_gpu_cmd_resource_uuid_cb, &vbuf, sizeof(*cmd_p),
1201 sizeof(struct virtio_gpu_resp_resource_uuid), resp_buf);
1202 memset(cmd_p, 0, sizeof(*cmd_p));
1203
1204 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID);
1205 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1206
1207 vbuf->objs = objs;
1208 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1209 return 0;
1210 }
1211
virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)1212 static void virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device *vgdev,
1213 struct virtio_gpu_vbuffer *vbuf)
1214 {
1215 struct virtio_gpu_object *bo =
1216 gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1217 struct virtio_gpu_resp_map_info *resp =
1218 (struct virtio_gpu_resp_map_info *)vbuf->resp_buf;
1219 struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
1220 uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1221
1222 spin_lock(&vgdev->host_visible_lock);
1223
1224 if (resp_type == VIRTIO_GPU_RESP_OK_MAP_INFO) {
1225 vram->map_info = resp->map_info;
1226 vram->map_state = STATE_OK;
1227 } else {
1228 vram->map_state = STATE_ERR;
1229 }
1230
1231 spin_unlock(&vgdev->host_visible_lock);
1232 wake_up_all(&vgdev->resp_wq);
1233 }
1234
virtio_gpu_cmd_map(struct virtio_gpu_device * vgdev,struct virtio_gpu_object_array * objs,uint64_t offset)1235 int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev,
1236 struct virtio_gpu_object_array *objs, uint64_t offset)
1237 {
1238 struct virtio_gpu_resource_map_blob *cmd_p;
1239 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1240 struct virtio_gpu_vbuffer *vbuf;
1241 struct virtio_gpu_resp_map_info *resp_buf;
1242
1243 resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
1244 if (!resp_buf)
1245 return -ENOMEM;
1246
1247 cmd_p = virtio_gpu_alloc_cmd_resp
1248 (vgdev, virtio_gpu_cmd_resource_map_cb, &vbuf, sizeof(*cmd_p),
1249 sizeof(struct virtio_gpu_resp_map_info), resp_buf);
1250 memset(cmd_p, 0, sizeof(*cmd_p));
1251
1252 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB);
1253 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1254 cmd_p->offset = cpu_to_le64(offset);
1255 vbuf->objs = objs;
1256
1257 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1258 return 0;
1259 }
1260
virtio_gpu_cmd_unmap(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo)1261 void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev,
1262 struct virtio_gpu_object *bo)
1263 {
1264 struct virtio_gpu_resource_unmap_blob *cmd_p;
1265 struct virtio_gpu_vbuffer *vbuf;
1266
1267 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1268 memset(cmd_p, 0, sizeof(*cmd_p));
1269
1270 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB);
1271 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1272
1273 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1274 }
1275
1276 void
virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo,struct virtio_gpu_object_params * params,struct virtio_gpu_mem_entry * ents,uint32_t nents)1277 virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
1278 struct virtio_gpu_object *bo,
1279 struct virtio_gpu_object_params *params,
1280 struct virtio_gpu_mem_entry *ents,
1281 uint32_t nents)
1282 {
1283 struct virtio_gpu_resource_create_blob *cmd_p;
1284 struct virtio_gpu_vbuffer *vbuf;
1285
1286 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1287 memset(cmd_p, 0, sizeof(*cmd_p));
1288
1289 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB);
1290 cmd_p->hdr.ctx_id = cpu_to_le32(params->ctx_id);
1291 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1292 cmd_p->blob_mem = cpu_to_le32(params->blob_mem);
1293 cmd_p->blob_flags = cpu_to_le32(params->blob_flags);
1294 cmd_p->blob_id = cpu_to_le64(params->blob_id);
1295 cmd_p->size = cpu_to_le64(params->size);
1296 cmd_p->nr_entries = cpu_to_le32(nents);
1297
1298 vbuf->data_buf = ents;
1299 vbuf->data_size = sizeof(*ents) * nents;
1300
1301 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1302 bo->created = true;
1303 }
1304
virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device * vgdev,uint32_t scanout_id,struct virtio_gpu_object * bo,struct drm_framebuffer * fb,uint32_t width,uint32_t height,uint32_t x,uint32_t y)1305 void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev,
1306 uint32_t scanout_id,
1307 struct virtio_gpu_object *bo,
1308 struct drm_framebuffer *fb,
1309 uint32_t width, uint32_t height,
1310 uint32_t x, uint32_t y)
1311 {
1312 uint32_t i;
1313 struct virtio_gpu_set_scanout_blob *cmd_p;
1314 struct virtio_gpu_vbuffer *vbuf;
1315 uint32_t format = virtio_gpu_translate_format(fb->format->format);
1316
1317 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1318 memset(cmd_p, 0, sizeof(*cmd_p));
1319
1320 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT_BLOB);
1321 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1322 cmd_p->scanout_id = cpu_to_le32(scanout_id);
1323
1324 cmd_p->format = cpu_to_le32(format);
1325 cmd_p->width = cpu_to_le32(fb->width);
1326 cmd_p->height = cpu_to_le32(fb->height);
1327
1328 for (i = 0; i < 4; i++) {
1329 cmd_p->strides[i] = cpu_to_le32(fb->pitches[i]);
1330 cmd_p->offsets[i] = cpu_to_le32(fb->offsets[i]);
1331 }
1332
1333 cmd_p->r.width = cpu_to_le32(width);
1334 cmd_p->r.height = cpu_to_le32(height);
1335 cmd_p->r.x = cpu_to_le32(x);
1336 cmd_p->r.y = cpu_to_le32(y);
1337
1338 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1339 }
1340