1 /*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Authors:
6 * Dave Airlie <airlied@redhat.com>
7 * Gerd Hoffmann <kraxel@redhat.com>
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */
28
29 #include <linux/dma-mapping.h>
30 #include <linux/virtio.h>
31 #include <linux/virtio_config.h>
32 #include <linux/virtio_ring.h>
33
34 #include <drm/drm_edid.h>
35 #include <drm/drm_print.h>
36
37 #include "virtgpu_drv.h"
38 #include "virtgpu_trace.h"
39
40 #define MAX_INLINE_CMD_SIZE 96
41 #define MAX_INLINE_RESP_SIZE 24
42 #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
43 + MAX_INLINE_CMD_SIZE \
44 + MAX_INLINE_RESP_SIZE)
45
convert_to_hw_box(struct virtio_gpu_box * dst,const struct drm_virtgpu_3d_box * src)46 static void convert_to_hw_box(struct virtio_gpu_box *dst,
47 const struct drm_virtgpu_3d_box *src)
48 {
49 dst->x = cpu_to_le32(src->x);
50 dst->y = cpu_to_le32(src->y);
51 dst->z = cpu_to_le32(src->z);
52 dst->w = cpu_to_le32(src->w);
53 dst->h = cpu_to_le32(src->h);
54 dst->d = cpu_to_le32(src->d);
55 }
56
virtio_gpu_ctrl_ack(struct virtqueue * vq)57 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
58 {
59 struct drm_device *dev = vq->vdev->priv;
60 struct virtio_gpu_device *vgdev = dev->dev_private;
61
62 schedule_work(&vgdev->ctrlq.dequeue_work);
63 }
64
virtio_gpu_cursor_ack(struct virtqueue * vq)65 void virtio_gpu_cursor_ack(struct virtqueue *vq)
66 {
67 struct drm_device *dev = vq->vdev->priv;
68 struct virtio_gpu_device *vgdev = dev->dev_private;
69
70 schedule_work(&vgdev->cursorq.dequeue_work);
71 }
72
virtio_gpu_alloc_vbufs(struct virtio_gpu_device * vgdev)73 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
74 {
75 vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
76 VBUFFER_SIZE,
77 __alignof__(struct virtio_gpu_vbuffer),
78 0, NULL);
79 if (!vgdev->vbufs)
80 return -ENOMEM;
81 return 0;
82 }
83
virtio_gpu_free_vbufs(struct virtio_gpu_device * vgdev)84 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
85 {
86 kmem_cache_destroy(vgdev->vbufs);
87 vgdev->vbufs = NULL;
88 }
89
90 /* For drm_panic */
91 static struct virtio_gpu_vbuffer*
virtio_gpu_panic_get_vbuf(struct virtio_gpu_device * vgdev,int size)92 virtio_gpu_panic_get_vbuf(struct virtio_gpu_device *vgdev, int size)
93 {
94 struct virtio_gpu_vbuffer *vbuf;
95
96 vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_ATOMIC);
97
98 vbuf->buf = (void *)vbuf + sizeof(*vbuf);
99 vbuf->size = size;
100 vbuf->resp_cb = NULL;
101 vbuf->resp_size = sizeof(struct virtio_gpu_ctrl_hdr);
102 vbuf->resp_buf = (void *)vbuf->buf + size;
103 return vbuf;
104 }
105
106 static struct virtio_gpu_vbuffer*
virtio_gpu_get_vbuf(struct virtio_gpu_device * vgdev,int size,int resp_size,void * resp_buf,virtio_gpu_resp_cb resp_cb)107 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
108 int size, int resp_size, void *resp_buf,
109 virtio_gpu_resp_cb resp_cb)
110 {
111 struct virtio_gpu_vbuffer *vbuf;
112
113 vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL | __GFP_NOFAIL);
114
115 BUG_ON(size > MAX_INLINE_CMD_SIZE ||
116 size < sizeof(struct virtio_gpu_ctrl_hdr));
117 vbuf->buf = (void *)vbuf + sizeof(*vbuf);
118 vbuf->size = size;
119
120 vbuf->resp_cb = resp_cb;
121 vbuf->resp_size = resp_size;
122 if (resp_size <= MAX_INLINE_RESP_SIZE)
123 vbuf->resp_buf = (void *)vbuf->buf + size;
124 else
125 vbuf->resp_buf = resp_buf;
126 BUG_ON(!vbuf->resp_buf);
127 return vbuf;
128 }
129
130 static struct virtio_gpu_ctrl_hdr *
virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer * vbuf)131 virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)
132 {
133 /* this assumes a vbuf contains a command that starts with a
134 * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor
135 * virtqueues.
136 */
137 return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;
138 }
139
140 static struct virtio_gpu_update_cursor*
virtio_gpu_alloc_cursor(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer ** vbuffer_p)141 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
142 struct virtio_gpu_vbuffer **vbuffer_p)
143 {
144 struct virtio_gpu_vbuffer *vbuf;
145
146 vbuf = virtio_gpu_get_vbuf
147 (vgdev, sizeof(struct virtio_gpu_update_cursor),
148 0, NULL, NULL);
149 if (IS_ERR(vbuf)) {
150 *vbuffer_p = NULL;
151 return ERR_CAST(vbuf);
152 }
153 *vbuffer_p = vbuf;
154 return (struct virtio_gpu_update_cursor *)vbuf->buf;
155 }
156
157 /* For drm_panic */
virtio_gpu_panic_alloc_cmd_resp(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer ** vbuffer_p,int cmd_size)158 static void *virtio_gpu_panic_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
159 struct virtio_gpu_vbuffer **vbuffer_p,
160 int cmd_size)
161 {
162 struct virtio_gpu_vbuffer *vbuf;
163
164 vbuf = virtio_gpu_panic_get_vbuf(vgdev, cmd_size);
165 *vbuffer_p = vbuf;
166 return (struct virtio_gpu_command *)vbuf->buf;
167 }
168
virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device * vgdev,virtio_gpu_resp_cb cb,struct virtio_gpu_vbuffer ** vbuffer_p,int cmd_size,int resp_size,void * resp_buf)169 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
170 virtio_gpu_resp_cb cb,
171 struct virtio_gpu_vbuffer **vbuffer_p,
172 int cmd_size, int resp_size,
173 void *resp_buf)
174 {
175 struct virtio_gpu_vbuffer *vbuf;
176
177 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
178 resp_size, resp_buf, cb);
179 *vbuffer_p = vbuf;
180 return (struct virtio_gpu_command *)vbuf->buf;
181 }
182
virtio_gpu_alloc_cmd(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer ** vbuffer_p,int size)183 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
184 struct virtio_gpu_vbuffer **vbuffer_p,
185 int size)
186 {
187 return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
188 sizeof(struct virtio_gpu_ctrl_hdr),
189 NULL);
190 }
191
virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer ** vbuffer_p,int size,virtio_gpu_resp_cb cb)192 static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
193 struct virtio_gpu_vbuffer **vbuffer_p,
194 int size,
195 virtio_gpu_resp_cb cb)
196 {
197 return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
198 sizeof(struct virtio_gpu_ctrl_hdr),
199 NULL);
200 }
201
free_vbuf(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)202 static void free_vbuf(struct virtio_gpu_device *vgdev,
203 struct virtio_gpu_vbuffer *vbuf)
204 {
205 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
206 kfree(vbuf->resp_buf);
207 kvfree(vbuf->data_buf);
208 kmem_cache_free(vgdev->vbufs, vbuf);
209 }
210
reclaim_vbufs(struct virtqueue * vq,struct list_head * reclaim_list)211 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
212 {
213 struct virtio_gpu_vbuffer *vbuf;
214 unsigned int len;
215 int freed = 0;
216
217 while ((vbuf = virtqueue_get_buf(vq, &len))) {
218 list_add_tail(&vbuf->list, reclaim_list);
219 freed++;
220 }
221 if (freed == 0)
222 DRM_DEBUG("Huh? zero vbufs reclaimed");
223 }
224
virtio_gpu_dequeue_ctrl_func(struct work_struct * work)225 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
226 {
227 struct virtio_gpu_device *vgdev =
228 container_of(work, struct virtio_gpu_device,
229 ctrlq.dequeue_work);
230 struct list_head reclaim_list;
231 struct virtio_gpu_vbuffer *entry, *tmp;
232 struct virtio_gpu_ctrl_hdr *resp;
233 u64 fence_id;
234
235 INIT_LIST_HEAD(&reclaim_list);
236 spin_lock(&vgdev->ctrlq.qlock);
237 do {
238 virtqueue_disable_cb(vgdev->ctrlq.vq);
239 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
240
241 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
242 spin_unlock(&vgdev->ctrlq.qlock);
243
244 list_for_each_entry(entry, &reclaim_list, list) {
245 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
246
247 trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp, entry->seqno);
248
249 if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
250 if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
251 struct virtio_gpu_ctrl_hdr *cmd;
252
253 cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
254 DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
255 le32_to_cpu(resp->type),
256 le32_to_cpu(cmd->type));
257 } else
258 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
259 }
260 if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
261 fence_id = le64_to_cpu(resp->fence_id);
262 virtio_gpu_fence_event_process(vgdev, fence_id);
263 }
264 if (entry->resp_cb)
265 entry->resp_cb(vgdev, entry);
266 }
267 wake_up(&vgdev->ctrlq.ack_queue);
268
269 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
270 if (entry->objs)
271 virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
272 list_del(&entry->list);
273 free_vbuf(vgdev, entry);
274 }
275 }
276
virtio_gpu_dequeue_cursor_func(struct work_struct * work)277 void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
278 {
279 struct virtio_gpu_device *vgdev =
280 container_of(work, struct virtio_gpu_device,
281 cursorq.dequeue_work);
282 struct list_head reclaim_list;
283 struct virtio_gpu_vbuffer *entry, *tmp;
284
285 INIT_LIST_HEAD(&reclaim_list);
286 spin_lock(&vgdev->cursorq.qlock);
287 do {
288 virtqueue_disable_cb(vgdev->cursorq.vq);
289 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
290 } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
291 spin_unlock(&vgdev->cursorq.qlock);
292
293 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
294 struct virtio_gpu_ctrl_hdr *resp =
295 (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
296
297 trace_virtio_gpu_cmd_response(vgdev->cursorq.vq, resp, entry->seqno);
298 list_del(&entry->list);
299 free_vbuf(vgdev, entry);
300 }
301 wake_up(&vgdev->cursorq.ack_queue);
302 }
303
304 /* Create sg_table from a vmalloc'd buffer. */
vmalloc_to_sgt(char * data,uint32_t size,int * sg_ents)305 static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
306 {
307 int ret, s, i;
308 struct sg_table *sgt;
309 struct scatterlist *sg;
310 struct page *pg;
311
312 if (WARN_ON(!PAGE_ALIGNED(data)))
313 return NULL;
314
315 sgt = kmalloc_obj(*sgt, GFP_KERNEL);
316 if (!sgt)
317 return NULL;
318
319 *sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
320 ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
321 if (ret) {
322 kfree(sgt);
323 return NULL;
324 }
325
326 for_each_sgtable_sg(sgt, sg, i) {
327 pg = vmalloc_to_page(data);
328 if (!pg) {
329 sg_free_table(sgt);
330 kfree(sgt);
331 return NULL;
332 }
333
334 s = min_t(int, PAGE_SIZE, size);
335 sg_set_page(sg, pg, s, 0);
336
337 size -= s;
338 data += s;
339 }
340
341 return sgt;
342 }
343
344 /* For drm_panic */
virtio_gpu_panic_queue_ctrl_sgs(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf,int elemcnt,struct scatterlist ** sgs,int outcnt,int incnt)345 static int virtio_gpu_panic_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
346 struct virtio_gpu_vbuffer *vbuf,
347 int elemcnt,
348 struct scatterlist **sgs,
349 int outcnt,
350 int incnt)
351 {
352 struct virtqueue *vq = vgdev->ctrlq.vq;
353 int ret;
354
355 if (vgdev->has_indirect)
356 elemcnt = 1;
357
358 if (vq->num_free < elemcnt)
359 return -ENOMEM;
360
361 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
362 WARN_ON(ret);
363
364 vbuf->seqno = ++vgdev->ctrlq.seqno;
365 trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf), vbuf->seqno);
366
367 atomic_inc(&vgdev->pending_commands);
368
369 return 0;
370 }
371
virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf,struct virtio_gpu_fence * fence,int elemcnt,struct scatterlist ** sgs,int outcnt,int incnt)372 static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
373 struct virtio_gpu_vbuffer *vbuf,
374 struct virtio_gpu_fence *fence,
375 int elemcnt,
376 struct scatterlist **sgs,
377 int outcnt,
378 int incnt)
379 {
380 struct virtqueue *vq = vgdev->ctrlq.vq;
381 int ret, idx;
382
383 if (!drm_dev_enter(vgdev->ddev, &idx)) {
384 if (fence && vbuf->objs)
385 virtio_gpu_array_unlock_resv(vbuf->objs);
386 free_vbuf(vgdev, vbuf);
387 return -ENODEV;
388 }
389
390 if (vgdev->has_indirect)
391 elemcnt = 1;
392
393 again:
394 spin_lock(&vgdev->ctrlq.qlock);
395
396 if (vq->num_free < elemcnt) {
397 spin_unlock(&vgdev->ctrlq.qlock);
398 virtio_gpu_notify(vgdev);
399 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
400 goto again;
401 }
402
403 /* now that the position of the vbuf in the virtqueue is known, we can
404 * finally set the fence id
405 */
406 if (fence) {
407 virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
408 fence);
409 if (vbuf->objs) {
410 virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
411 virtio_gpu_array_unlock_resv(vbuf->objs);
412 }
413 }
414
415 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
416 WARN_ON(ret);
417
418 vbuf->seqno = ++vgdev->ctrlq.seqno;
419 trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf), vbuf->seqno);
420
421 atomic_inc(&vgdev->pending_commands);
422
423 spin_unlock(&vgdev->ctrlq.qlock);
424
425 drm_dev_exit(idx);
426 return 0;
427 }
428
429 /* For drm_panic */
virtio_gpu_panic_queue_ctrl_buffer(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)430 static int virtio_gpu_panic_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
431 struct virtio_gpu_vbuffer *vbuf)
432 {
433 struct scatterlist *sgs[3], vcmd, vresp;
434 int elemcnt = 0, outcnt = 0, incnt = 0;
435
436 /* set up vcmd */
437 sg_init_one(&vcmd, vbuf->buf, vbuf->size);
438 elemcnt++;
439 sgs[outcnt] = &vcmd;
440 outcnt++;
441
442 /* set up vresp */
443 if (vbuf->resp_size) {
444 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
445 elemcnt++;
446 sgs[outcnt + incnt] = &vresp;
447 incnt++;
448 }
449
450 return virtio_gpu_panic_queue_ctrl_sgs(vgdev, vbuf,
451 elemcnt, sgs,
452 outcnt, incnt);
453 }
454
virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf,struct virtio_gpu_fence * fence)455 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
456 struct virtio_gpu_vbuffer *vbuf,
457 struct virtio_gpu_fence *fence)
458 {
459 struct scatterlist *sgs[3], vcmd, vout, vresp;
460 struct sg_table *sgt = NULL;
461 int elemcnt = 0, outcnt = 0, incnt = 0, ret;
462
463 /* set up vcmd */
464 sg_init_one(&vcmd, vbuf->buf, vbuf->size);
465 elemcnt++;
466 sgs[outcnt] = &vcmd;
467 outcnt++;
468
469 /* set up vout */
470 if (vbuf->data_size) {
471 if (is_vmalloc_addr(vbuf->data_buf)) {
472 int sg_ents;
473
474 sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
475 &sg_ents);
476 if (!sgt) {
477 if (fence && vbuf->objs)
478 virtio_gpu_array_unlock_resv(vbuf->objs);
479 return -ENOMEM;
480 }
481
482 elemcnt += sg_ents;
483 sgs[outcnt] = sgt->sgl;
484 } else {
485 sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
486 elemcnt++;
487 sgs[outcnt] = &vout;
488 }
489 outcnt++;
490 }
491
492 /* set up vresp */
493 if (vbuf->resp_size) {
494 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
495 elemcnt++;
496 sgs[outcnt + incnt] = &vresp;
497 incnt++;
498 }
499
500 ret = virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
501 incnt);
502
503 if (sgt) {
504 sg_free_table(sgt);
505 kfree(sgt);
506 }
507 return ret;
508 }
509
510 /* For drm_panic */
virtio_gpu_panic_notify(struct virtio_gpu_device * vgdev)511 void virtio_gpu_panic_notify(struct virtio_gpu_device *vgdev)
512 {
513 bool notify;
514
515 if (!atomic_read(&vgdev->pending_commands))
516 return;
517
518 atomic_set(&vgdev->pending_commands, 0);
519 notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
520
521 if (notify)
522 virtqueue_notify(vgdev->ctrlq.vq);
523 }
524
virtio_gpu_notify(struct virtio_gpu_device * vgdev)525 void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
526 {
527 bool notify;
528
529 if (!atomic_read(&vgdev->pending_commands))
530 return;
531
532 spin_lock(&vgdev->ctrlq.qlock);
533 atomic_set(&vgdev->pending_commands, 0);
534 notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
535 spin_unlock(&vgdev->ctrlq.qlock);
536
537 if (notify)
538 virtqueue_notify(vgdev->ctrlq.vq);
539 }
540
virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)541 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
542 struct virtio_gpu_vbuffer *vbuf)
543 {
544 return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
545 }
546
virtio_gpu_queue_cursor(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)547 static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
548 struct virtio_gpu_vbuffer *vbuf)
549 {
550 struct virtqueue *vq = vgdev->cursorq.vq;
551 struct scatterlist *sgs[1], ccmd;
552 int idx, ret, outcnt;
553 bool notify;
554
555 if (!drm_dev_enter(vgdev->ddev, &idx)) {
556 free_vbuf(vgdev, vbuf);
557 return;
558 }
559
560 sg_init_one(&ccmd, vbuf->buf, vbuf->size);
561 sgs[0] = &ccmd;
562 outcnt = 1;
563
564 spin_lock(&vgdev->cursorq.qlock);
565 retry:
566 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
567 if (ret == -ENOSPC) {
568 spin_unlock(&vgdev->cursorq.qlock);
569 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
570 spin_lock(&vgdev->cursorq.qlock);
571 goto retry;
572 } else {
573 vbuf->seqno = ++vgdev->cursorq.seqno;
574 trace_virtio_gpu_cmd_queue(vq,
575 virtio_gpu_vbuf_ctrl_hdr(vbuf),
576 vbuf->seqno);
577
578 notify = virtqueue_kick_prepare(vq);
579 }
580
581 spin_unlock(&vgdev->cursorq.qlock);
582
583 if (notify)
584 virtqueue_notify(vq);
585
586 drm_dev_exit(idx);
587 }
588
589 /* just create gem objects for userspace and long lived objects,
590 * just use dma_alloced pages for the queue objects?
591 */
592
593 /* create a basic resource */
virtio_gpu_cmd_create_resource(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo,struct virtio_gpu_object_params * params,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)594 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
595 struct virtio_gpu_object *bo,
596 struct virtio_gpu_object_params *params,
597 struct virtio_gpu_object_array *objs,
598 struct virtio_gpu_fence *fence)
599 {
600 struct virtio_gpu_resource_create_2d *cmd_p;
601 struct virtio_gpu_vbuffer *vbuf;
602
603 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
604 memset(cmd_p, 0, sizeof(*cmd_p));
605 vbuf->objs = objs;
606
607 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
608 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
609 cmd_p->format = cpu_to_le32(params->format);
610 cmd_p->width = cpu_to_le32(params->width);
611 cmd_p->height = cpu_to_le32(params->height);
612
613 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
614 bo->created = true;
615 }
616
virtio_gpu_cmd_unref_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)617 static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
618 struct virtio_gpu_vbuffer *vbuf)
619 {
620 struct virtio_gpu_object *bo;
621
622 bo = vbuf->resp_cb_data;
623 vbuf->resp_cb_data = NULL;
624
625 virtio_gpu_cleanup_object(bo);
626 }
627
virtio_gpu_cmd_unref_resource(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo)628 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
629 struct virtio_gpu_object *bo)
630 {
631 struct virtio_gpu_resource_unref *cmd_p;
632 struct virtio_gpu_vbuffer *vbuf;
633 int ret;
634
635 cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
636 virtio_gpu_cmd_unref_cb);
637 memset(cmd_p, 0, sizeof(*cmd_p));
638
639 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
640 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
641
642 vbuf->resp_cb_data = bo;
643 ret = virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
644 if (ret < 0)
645 virtio_gpu_cleanup_object(bo);
646 }
647
virtio_gpu_cmd_set_scanout(struct virtio_gpu_device * vgdev,uint32_t scanout_id,uint32_t resource_id,uint32_t width,uint32_t height,uint32_t x,uint32_t y)648 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
649 uint32_t scanout_id, uint32_t resource_id,
650 uint32_t width, uint32_t height,
651 uint32_t x, uint32_t y)
652 {
653 struct virtio_gpu_set_scanout *cmd_p;
654 struct virtio_gpu_vbuffer *vbuf;
655
656 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
657 memset(cmd_p, 0, sizeof(*cmd_p));
658
659 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
660 cmd_p->resource_id = cpu_to_le32(resource_id);
661 cmd_p->scanout_id = cpu_to_le32(scanout_id);
662 cmd_p->r.width = cpu_to_le32(width);
663 cmd_p->r.height = cpu_to_le32(height);
664 cmd_p->r.x = cpu_to_le32(x);
665 cmd_p->r.y = cpu_to_le32(y);
666
667 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
668 }
669
670 /* For drm_panic */
virtio_gpu_panic_cmd_resource_flush(struct virtio_gpu_device * vgdev,uint32_t resource_id,uint32_t x,uint32_t y,uint32_t width,uint32_t height)671 void virtio_gpu_panic_cmd_resource_flush(struct virtio_gpu_device *vgdev,
672 uint32_t resource_id,
673 uint32_t x, uint32_t y,
674 uint32_t width, uint32_t height)
675 {
676 struct virtio_gpu_resource_flush *cmd_p;
677 struct virtio_gpu_vbuffer *vbuf;
678
679 cmd_p = virtio_gpu_panic_alloc_cmd_resp(vgdev, &vbuf, sizeof(*cmd_p));
680 memset(cmd_p, 0, sizeof(*cmd_p));
681 vbuf->objs = NULL;
682
683 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
684 cmd_p->resource_id = cpu_to_le32(resource_id);
685 cmd_p->r.width = cpu_to_le32(width);
686 cmd_p->r.height = cpu_to_le32(height);
687 cmd_p->r.x = cpu_to_le32(x);
688 cmd_p->r.y = cpu_to_le32(y);
689
690 virtio_gpu_panic_queue_ctrl_buffer(vgdev, vbuf);
691 }
692
virtio_gpu_cmd_resource_flush(struct virtio_gpu_device * vgdev,uint32_t resource_id,uint32_t x,uint32_t y,uint32_t width,uint32_t height,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)693 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
694 uint32_t resource_id,
695 uint32_t x, uint32_t y,
696 uint32_t width, uint32_t height,
697 struct virtio_gpu_object_array *objs,
698 struct virtio_gpu_fence *fence)
699 {
700 struct virtio_gpu_resource_flush *cmd_p;
701 struct virtio_gpu_vbuffer *vbuf;
702
703 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
704 memset(cmd_p, 0, sizeof(*cmd_p));
705 vbuf->objs = objs;
706
707 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
708 cmd_p->resource_id = cpu_to_le32(resource_id);
709 cmd_p->r.width = cpu_to_le32(width);
710 cmd_p->r.height = cpu_to_le32(height);
711 cmd_p->r.x = cpu_to_le32(x);
712 cmd_p->r.y = cpu_to_le32(y);
713
714 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
715 }
716
717 /* For drm_panic */
virtio_gpu_panic_cmd_transfer_to_host_2d(struct virtio_gpu_device * vgdev,uint64_t offset,uint32_t width,uint32_t height,uint32_t x,uint32_t y,struct virtio_gpu_object_array * objs)718 int virtio_gpu_panic_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
719 uint64_t offset,
720 uint32_t width, uint32_t height,
721 uint32_t x, uint32_t y,
722 struct virtio_gpu_object_array *objs)
723 {
724 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
725 struct virtio_gpu_transfer_to_host_2d *cmd_p;
726 struct virtio_gpu_vbuffer *vbuf;
727 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
728
729 if (virtio_gpu_is_shmem(bo) && use_dma_api)
730 dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
731 bo->base.sgt, DMA_TO_DEVICE);
732
733 cmd_p = virtio_gpu_panic_alloc_cmd_resp(vgdev, &vbuf, sizeof(*cmd_p));
734 memset(cmd_p, 0, sizeof(*cmd_p));
735 vbuf->objs = objs;
736
737 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
738 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
739 cmd_p->offset = cpu_to_le64(offset);
740 cmd_p->r.width = cpu_to_le32(width);
741 cmd_p->r.height = cpu_to_le32(height);
742 cmd_p->r.x = cpu_to_le32(x);
743 cmd_p->r.y = cpu_to_le32(y);
744
745 return virtio_gpu_panic_queue_ctrl_buffer(vgdev, vbuf);
746 }
747
virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device * vgdev,uint64_t offset,uint32_t width,uint32_t height,uint32_t x,uint32_t y,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)748 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
749 uint64_t offset,
750 uint32_t width, uint32_t height,
751 uint32_t x, uint32_t y,
752 struct virtio_gpu_object_array *objs,
753 struct virtio_gpu_fence *fence)
754 {
755 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
756 struct virtio_gpu_transfer_to_host_2d *cmd_p;
757 struct virtio_gpu_vbuffer *vbuf;
758 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
759
760 if (virtio_gpu_is_shmem(bo) && use_dma_api)
761 dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
762 bo->base.sgt, DMA_TO_DEVICE);
763
764 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
765 memset(cmd_p, 0, sizeof(*cmd_p));
766 vbuf->objs = objs;
767
768 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
769 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
770 cmd_p->offset = cpu_to_le64(offset);
771 cmd_p->r.width = cpu_to_le32(width);
772 cmd_p->r.height = cpu_to_le32(height);
773 cmd_p->r.x = cpu_to_le32(x);
774 cmd_p->r.y = cpu_to_le32(y);
775
776 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
777 }
778
779 static void
virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device * vgdev,uint32_t resource_id,struct virtio_gpu_mem_entry * ents,uint32_t nents,struct virtio_gpu_fence * fence)780 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
781 uint32_t resource_id,
782 struct virtio_gpu_mem_entry *ents,
783 uint32_t nents,
784 struct virtio_gpu_fence *fence)
785 {
786 struct virtio_gpu_resource_attach_backing *cmd_p;
787 struct virtio_gpu_vbuffer *vbuf;
788
789 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
790 memset(cmd_p, 0, sizeof(*cmd_p));
791
792 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
793 cmd_p->resource_id = cpu_to_le32(resource_id);
794 cmd_p->nr_entries = cpu_to_le32(nents);
795
796 vbuf->data_buf = ents;
797 vbuf->data_size = sizeof(*ents) * nents;
798
799 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
800 }
801
802 static void
virtio_gpu_cmd_resource_detach_backing(struct virtio_gpu_device * vgdev,uint32_t resource_id,struct virtio_gpu_fence * fence)803 virtio_gpu_cmd_resource_detach_backing(struct virtio_gpu_device *vgdev,
804 uint32_t resource_id,
805 struct virtio_gpu_fence *fence)
806 {
807 struct virtio_gpu_resource_detach_backing *cmd_p;
808 struct virtio_gpu_vbuffer *vbuf;
809
810 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
811 memset(cmd_p, 0, sizeof(*cmd_p));
812
813 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
814 cmd_p->resource_id = cpu_to_le32(resource_id);
815
816 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
817 }
818
virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)819 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
820 struct virtio_gpu_vbuffer *vbuf)
821 {
822 struct virtio_gpu_resp_display_info *resp =
823 (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
824 int i;
825
826 spin_lock(&vgdev->display_info_lock);
827 for (i = 0; i < vgdev->num_scanouts; i++) {
828 vgdev->outputs[i].info = resp->pmodes[i];
829 if (resp->pmodes[i].enabled) {
830 DRM_DEBUG("output %d: %dx%d+%d+%d", i,
831 le32_to_cpu(resp->pmodes[i].r.width),
832 le32_to_cpu(resp->pmodes[i].r.height),
833 le32_to_cpu(resp->pmodes[i].r.x),
834 le32_to_cpu(resp->pmodes[i].r.y));
835 } else {
836 DRM_DEBUG("output %d: disabled", i);
837 }
838 }
839
840 vgdev->display_info_pending = false;
841 spin_unlock(&vgdev->display_info_lock);
842 wake_up(&vgdev->resp_wq);
843
844 if (!drm_helper_hpd_irq_event(vgdev->ddev))
845 drm_kms_helper_hotplug_event(vgdev->ddev);
846 }
847
virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)848 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
849 struct virtio_gpu_vbuffer *vbuf)
850 {
851 struct virtio_gpu_get_capset_info *cmd =
852 (struct virtio_gpu_get_capset_info *)vbuf->buf;
853 struct virtio_gpu_resp_capset_info *resp =
854 (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
855 int i = le32_to_cpu(cmd->capset_index);
856
857 spin_lock(&vgdev->display_info_lock);
858 if (vgdev->capsets) {
859 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
860 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
861 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
862 } else {
863 DRM_ERROR("invalid capset memory.");
864 }
865 spin_unlock(&vgdev->display_info_lock);
866 wake_up(&vgdev->resp_wq);
867 }
868
virtio_gpu_cmd_capset_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)869 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
870 struct virtio_gpu_vbuffer *vbuf)
871 {
872 struct virtio_gpu_get_capset *cmd =
873 (struct virtio_gpu_get_capset *)vbuf->buf;
874 struct virtio_gpu_resp_capset *resp =
875 (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
876 struct virtio_gpu_drv_cap_cache *cache_ent;
877
878 spin_lock(&vgdev->display_info_lock);
879 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
880 if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
881 cache_ent->id == le32_to_cpu(cmd->capset_id)) {
882 memcpy(cache_ent->caps_cache, resp->capset_data,
883 cache_ent->size);
884 /* Copy must occur before is_valid is signalled. */
885 smp_wmb();
886 atomic_set(&cache_ent->is_valid, 1);
887 break;
888 }
889 }
890 spin_unlock(&vgdev->display_info_lock);
891 wake_up_all(&vgdev->resp_wq);
892 }
893
virtio_get_edid_block(void * data,u8 * buf,unsigned int block,size_t len)894 static int virtio_get_edid_block(void *data, u8 *buf,
895 unsigned int block, size_t len)
896 {
897 struct virtio_gpu_resp_edid *resp = data;
898 size_t start = block * EDID_LENGTH;
899
900 if (start + len > le32_to_cpu(resp->size))
901 return -EINVAL;
902 memcpy(buf, resp->edid + start, len);
903 return 0;
904 }
905
virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)906 static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
907 struct virtio_gpu_vbuffer *vbuf)
908 {
909 struct virtio_gpu_cmd_get_edid *cmd =
910 (struct virtio_gpu_cmd_get_edid *)vbuf->buf;
911 struct virtio_gpu_resp_edid *resp =
912 (struct virtio_gpu_resp_edid *)vbuf->resp_buf;
913 uint32_t scanout = le32_to_cpu(cmd->scanout);
914 struct virtio_gpu_output *output;
915 const struct drm_edid *new_edid, *old_edid;
916
917 if (scanout >= vgdev->num_scanouts)
918 return;
919 output = vgdev->outputs + scanout;
920
921 new_edid = drm_edid_read_custom(&output->conn, virtio_get_edid_block, resp);
922 drm_edid_connector_update(&output->conn, new_edid);
923
924 spin_lock(&vgdev->display_info_lock);
925 old_edid = output->drm_edid;
926 output->drm_edid = new_edid;
927 spin_unlock(&vgdev->display_info_lock);
928
929 drm_edid_free(old_edid);
930 wake_up(&vgdev->resp_wq);
931 }
932
virtio_gpu_cmd_get_display_info(struct virtio_gpu_device * vgdev)933 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
934 {
935 struct virtio_gpu_ctrl_hdr *cmd_p;
936 struct virtio_gpu_vbuffer *vbuf;
937 void *resp_buf;
938
939 resp_buf = kzalloc_obj(struct virtio_gpu_resp_display_info, GFP_KERNEL);
940 if (!resp_buf)
941 return -ENOMEM;
942
943 cmd_p = virtio_gpu_alloc_cmd_resp
944 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
945 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
946 resp_buf);
947 memset(cmd_p, 0, sizeof(*cmd_p));
948
949 vgdev->display_info_pending = true;
950 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
951 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
952 return 0;
953 }
954
virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device * vgdev,int idx)955 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
956 {
957 struct virtio_gpu_get_capset_info *cmd_p;
958 struct virtio_gpu_vbuffer *vbuf;
959 void *resp_buf;
960
961 resp_buf = kzalloc_obj(struct virtio_gpu_resp_capset_info, GFP_KERNEL);
962 if (!resp_buf)
963 return -ENOMEM;
964
965 cmd_p = virtio_gpu_alloc_cmd_resp
966 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
967 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
968 resp_buf);
969 memset(cmd_p, 0, sizeof(*cmd_p));
970
971 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
972 cmd_p->capset_index = cpu_to_le32(idx);
973 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
974 return 0;
975 }
976
virtio_gpu_cmd_get_capset(struct virtio_gpu_device * vgdev,int idx,int version,struct virtio_gpu_drv_cap_cache ** cache_p)977 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
978 int idx, int version,
979 struct virtio_gpu_drv_cap_cache **cache_p)
980 {
981 struct virtio_gpu_get_capset *cmd_p;
982 struct virtio_gpu_vbuffer *vbuf;
983 int max_size;
984 struct virtio_gpu_drv_cap_cache *cache_ent;
985 struct virtio_gpu_drv_cap_cache *search_ent;
986 void *resp_buf;
987
988 *cache_p = NULL;
989
990 if (idx >= vgdev->num_capsets)
991 return -EINVAL;
992
993 if (version > vgdev->capsets[idx].max_version)
994 return -EINVAL;
995
996 cache_ent = kzalloc_obj(*cache_ent, GFP_KERNEL);
997 if (!cache_ent)
998 return -ENOMEM;
999
1000 max_size = vgdev->capsets[idx].max_size;
1001 cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
1002 if (!cache_ent->caps_cache) {
1003 kfree(cache_ent);
1004 return -ENOMEM;
1005 }
1006
1007 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
1008 GFP_KERNEL);
1009 if (!resp_buf) {
1010 kfree(cache_ent->caps_cache);
1011 kfree(cache_ent);
1012 return -ENOMEM;
1013 }
1014
1015 cache_ent->version = version;
1016 cache_ent->id = vgdev->capsets[idx].id;
1017 atomic_set(&cache_ent->is_valid, 0);
1018 cache_ent->size = max_size;
1019 spin_lock(&vgdev->display_info_lock);
1020 /* Search while under lock in case it was added by another task. */
1021 list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
1022 if (search_ent->id == vgdev->capsets[idx].id &&
1023 search_ent->version == version) {
1024 *cache_p = search_ent;
1025 break;
1026 }
1027 }
1028 if (!*cache_p)
1029 list_add_tail(&cache_ent->head, &vgdev->cap_cache);
1030 spin_unlock(&vgdev->display_info_lock);
1031
1032 if (*cache_p) {
1033 /* Entry was found, so free everything that was just created. */
1034 kfree(resp_buf);
1035 kfree(cache_ent->caps_cache);
1036 kfree(cache_ent);
1037 return 0;
1038 }
1039
1040 cmd_p = virtio_gpu_alloc_cmd_resp
1041 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
1042 sizeof(struct virtio_gpu_resp_capset) + max_size,
1043 resp_buf);
1044 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
1045 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
1046 cmd_p->capset_version = cpu_to_le32(version);
1047 *cache_p = cache_ent;
1048 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1049
1050 return 0;
1051 }
1052
virtio_gpu_cmd_get_edids(struct virtio_gpu_device * vgdev)1053 int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
1054 {
1055 struct virtio_gpu_cmd_get_edid *cmd_p;
1056 struct virtio_gpu_vbuffer *vbuf;
1057 void *resp_buf;
1058 int scanout;
1059
1060 if (WARN_ON(!vgdev->has_edid))
1061 return -EINVAL;
1062
1063 for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
1064 resp_buf = kzalloc_obj(struct virtio_gpu_resp_edid, GFP_KERNEL);
1065 if (!resp_buf)
1066 return -ENOMEM;
1067
1068 cmd_p = virtio_gpu_alloc_cmd_resp
1069 (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
1070 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
1071 resp_buf);
1072 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
1073 cmd_p->scanout = cpu_to_le32(scanout);
1074 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1075 }
1076
1077 return 0;
1078 }
1079
virtio_gpu_cmd_context_create(struct virtio_gpu_device * vgdev,uint32_t id,uint32_t context_init,uint32_t nlen,const char * name)1080 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
1081 uint32_t context_init, uint32_t nlen,
1082 const char *name)
1083 {
1084 struct virtio_gpu_ctx_create *cmd_p;
1085 struct virtio_gpu_vbuffer *vbuf;
1086
1087 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1088 memset(cmd_p, 0, sizeof(*cmd_p));
1089
1090 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
1091 cmd_p->hdr.ctx_id = cpu_to_le32(id);
1092 cmd_p->nlen = cpu_to_le32(nlen);
1093 cmd_p->context_init = cpu_to_le32(context_init);
1094 strscpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name));
1095 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1096 }
1097
virtio_gpu_cmd_context_destroy(struct virtio_gpu_device * vgdev,uint32_t id)1098 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
1099 uint32_t id)
1100 {
1101 struct virtio_gpu_ctx_destroy *cmd_p;
1102 struct virtio_gpu_vbuffer *vbuf;
1103
1104 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1105 memset(cmd_p, 0, sizeof(*cmd_p));
1106
1107 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
1108 cmd_p->hdr.ctx_id = cpu_to_le32(id);
1109 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1110 }
1111
virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device * vgdev,uint32_t ctx_id,struct virtio_gpu_object_array * objs)1112 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
1113 uint32_t ctx_id,
1114 struct virtio_gpu_object_array *objs)
1115 {
1116 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1117 struct virtio_gpu_ctx_resource *cmd_p;
1118 struct virtio_gpu_vbuffer *vbuf;
1119
1120 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1121 memset(cmd_p, 0, sizeof(*cmd_p));
1122 vbuf->objs = objs;
1123
1124 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
1125 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1126 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1127 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1128 }
1129
virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device * vgdev,uint32_t ctx_id,struct virtio_gpu_object_array * objs)1130 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
1131 uint32_t ctx_id,
1132 struct virtio_gpu_object_array *objs)
1133 {
1134 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1135 struct virtio_gpu_ctx_resource *cmd_p;
1136 struct virtio_gpu_vbuffer *vbuf;
1137
1138 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1139 memset(cmd_p, 0, sizeof(*cmd_p));
1140 vbuf->objs = objs;
1141
1142 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
1143 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1144 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1145 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1146 }
1147
1148 void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo,struct virtio_gpu_object_params * params,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)1149 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
1150 struct virtio_gpu_object *bo,
1151 struct virtio_gpu_object_params *params,
1152 struct virtio_gpu_object_array *objs,
1153 struct virtio_gpu_fence *fence)
1154 {
1155 struct virtio_gpu_resource_create_3d *cmd_p;
1156 struct virtio_gpu_vbuffer *vbuf;
1157
1158 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1159 memset(cmd_p, 0, sizeof(*cmd_p));
1160 vbuf->objs = objs;
1161
1162 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
1163 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1164 cmd_p->format = cpu_to_le32(params->format);
1165 cmd_p->width = cpu_to_le32(params->width);
1166 cmd_p->height = cpu_to_le32(params->height);
1167
1168 cmd_p->target = cpu_to_le32(params->target);
1169 cmd_p->bind = cpu_to_le32(params->bind);
1170 cmd_p->depth = cpu_to_le32(params->depth);
1171 cmd_p->array_size = cpu_to_le32(params->array_size);
1172 cmd_p->last_level = cpu_to_le32(params->last_level);
1173 cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
1174 cmd_p->flags = cpu_to_le32(params->flags);
1175
1176 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1177
1178 bo->created = true;
1179 }
1180
virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device * vgdev,uint32_t ctx_id,uint64_t offset,uint32_t level,uint32_t stride,uint32_t layer_stride,struct drm_virtgpu_3d_box * box,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)1181 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
1182 uint32_t ctx_id,
1183 uint64_t offset, uint32_t level,
1184 uint32_t stride,
1185 uint32_t layer_stride,
1186 struct drm_virtgpu_3d_box *box,
1187 struct virtio_gpu_object_array *objs,
1188 struct virtio_gpu_fence *fence)
1189 {
1190 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1191 struct virtio_gpu_transfer_host_3d *cmd_p;
1192 struct virtio_gpu_vbuffer *vbuf;
1193 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
1194
1195 if (virtio_gpu_is_shmem(bo) && use_dma_api)
1196 dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
1197 bo->base.sgt, DMA_TO_DEVICE);
1198
1199 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1200 memset(cmd_p, 0, sizeof(*cmd_p));
1201
1202 vbuf->objs = objs;
1203
1204 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
1205 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1206 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1207 convert_to_hw_box(&cmd_p->box, box);
1208 cmd_p->offset = cpu_to_le64(offset);
1209 cmd_p->level = cpu_to_le32(level);
1210 cmd_p->stride = cpu_to_le32(stride);
1211 cmd_p->layer_stride = cpu_to_le32(layer_stride);
1212
1213 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1214 }
1215
virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device * vgdev,uint32_t ctx_id,uint64_t offset,uint32_t level,uint32_t stride,uint32_t layer_stride,struct drm_virtgpu_3d_box * box,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)1216 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
1217 uint32_t ctx_id,
1218 uint64_t offset, uint32_t level,
1219 uint32_t stride,
1220 uint32_t layer_stride,
1221 struct drm_virtgpu_3d_box *box,
1222 struct virtio_gpu_object_array *objs,
1223 struct virtio_gpu_fence *fence)
1224 {
1225 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1226 struct virtio_gpu_transfer_host_3d *cmd_p;
1227 struct virtio_gpu_vbuffer *vbuf;
1228
1229 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1230 memset(cmd_p, 0, sizeof(*cmd_p));
1231
1232 vbuf->objs = objs;
1233
1234 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
1235 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1236 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1237 convert_to_hw_box(&cmd_p->box, box);
1238 cmd_p->offset = cpu_to_le64(offset);
1239 cmd_p->level = cpu_to_le32(level);
1240 cmd_p->stride = cpu_to_le32(stride);
1241 cmd_p->layer_stride = cpu_to_le32(layer_stride);
1242
1243 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1244 }
1245
virtio_gpu_cmd_submit(struct virtio_gpu_device * vgdev,void * data,uint32_t data_size,uint32_t ctx_id,struct virtio_gpu_object_array * objs,struct virtio_gpu_fence * fence)1246 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
1247 void *data, uint32_t data_size,
1248 uint32_t ctx_id,
1249 struct virtio_gpu_object_array *objs,
1250 struct virtio_gpu_fence *fence)
1251 {
1252 struct virtio_gpu_cmd_submit *cmd_p;
1253 struct virtio_gpu_vbuffer *vbuf;
1254
1255 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1256 memset(cmd_p, 0, sizeof(*cmd_p));
1257
1258 vbuf->data_buf = data;
1259 vbuf->data_size = data_size;
1260 vbuf->objs = objs;
1261
1262 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
1263 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1264 cmd_p->size = cpu_to_le32(data_size);
1265
1266 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
1267 }
1268
virtio_gpu_object_attach(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * obj,struct virtio_gpu_mem_entry * ents,unsigned int nents)1269 void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
1270 struct virtio_gpu_object *obj,
1271 struct virtio_gpu_mem_entry *ents,
1272 unsigned int nents)
1273 {
1274 if (obj->attached)
1275 return;
1276
1277 virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1278 ents, nents, NULL);
1279
1280 obj->attached = true;
1281 }
1282
virtio_gpu_object_detach(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * obj,struct virtio_gpu_fence * fence)1283 void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
1284 struct virtio_gpu_object *obj,
1285 struct virtio_gpu_fence *fence)
1286 {
1287 if (!obj->attached)
1288 return;
1289
1290 virtio_gpu_cmd_resource_detach_backing(vgdev, obj->hw_res_handle,
1291 fence);
1292
1293 obj->attached = false;
1294 }
1295
virtio_gpu_cursor_ping(struct virtio_gpu_device * vgdev,struct virtio_gpu_output * output)1296 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1297 struct virtio_gpu_output *output)
1298 {
1299 struct virtio_gpu_vbuffer *vbuf;
1300 struct virtio_gpu_update_cursor *cur_p;
1301
1302 output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1303 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1304 memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1305 virtio_gpu_queue_cursor(vgdev, vbuf);
1306 }
1307
virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)1308 static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev,
1309 struct virtio_gpu_vbuffer *vbuf)
1310 {
1311 struct virtio_gpu_object *obj =
1312 gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1313 struct virtio_gpu_resp_resource_uuid *resp =
1314 (struct virtio_gpu_resp_resource_uuid *)vbuf->resp_buf;
1315 uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1316
1317 spin_lock(&vgdev->resource_export_lock);
1318 WARN_ON(obj->uuid_state != STATE_INITIALIZING);
1319
1320 if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID &&
1321 obj->uuid_state == STATE_INITIALIZING) {
1322 import_uuid(&obj->uuid, resp->uuid);
1323 obj->uuid_state = STATE_OK;
1324 } else {
1325 obj->uuid_state = STATE_ERR;
1326 }
1327 spin_unlock(&vgdev->resource_export_lock);
1328
1329 wake_up_all(&vgdev->resp_wq);
1330 }
1331
1332 int
virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device * vgdev,struct virtio_gpu_object_array * objs)1333 virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
1334 struct virtio_gpu_object_array *objs)
1335 {
1336 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1337 struct virtio_gpu_resource_assign_uuid *cmd_p;
1338 struct virtio_gpu_vbuffer *vbuf;
1339 struct virtio_gpu_resp_resource_uuid *resp_buf;
1340
1341 resp_buf = kzalloc_obj(*resp_buf, GFP_KERNEL);
1342 if (!resp_buf) {
1343 spin_lock(&vgdev->resource_export_lock);
1344 bo->uuid_state = STATE_ERR;
1345 spin_unlock(&vgdev->resource_export_lock);
1346 virtio_gpu_array_put_free(objs);
1347 return -ENOMEM;
1348 }
1349
1350 cmd_p = virtio_gpu_alloc_cmd_resp
1351 (vgdev, virtio_gpu_cmd_resource_uuid_cb, &vbuf, sizeof(*cmd_p),
1352 sizeof(struct virtio_gpu_resp_resource_uuid), resp_buf);
1353 memset(cmd_p, 0, sizeof(*cmd_p));
1354
1355 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID);
1356 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1357
1358 vbuf->objs = objs;
1359 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1360 return 0;
1361 }
1362
virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device * vgdev,struct virtio_gpu_vbuffer * vbuf)1363 static void virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device *vgdev,
1364 struct virtio_gpu_vbuffer *vbuf)
1365 {
1366 struct virtio_gpu_object *bo =
1367 gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
1368 struct virtio_gpu_resp_map_info *resp =
1369 (struct virtio_gpu_resp_map_info *)vbuf->resp_buf;
1370 struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
1371 uint32_t resp_type = le32_to_cpu(resp->hdr.type);
1372
1373 spin_lock(&vgdev->host_visible_lock);
1374
1375 if (resp_type == VIRTIO_GPU_RESP_OK_MAP_INFO) {
1376 vram->map_info = resp->map_info;
1377 vram->map_state = STATE_OK;
1378 } else {
1379 vram->map_state = STATE_ERR;
1380 }
1381
1382 spin_unlock(&vgdev->host_visible_lock);
1383 wake_up_all(&vgdev->resp_wq);
1384 }
1385
virtio_gpu_cmd_map(struct virtio_gpu_device * vgdev,struct virtio_gpu_object_array * objs,uint64_t offset)1386 int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev,
1387 struct virtio_gpu_object_array *objs, uint64_t offset)
1388 {
1389 struct virtio_gpu_resource_map_blob *cmd_p;
1390 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1391 struct virtio_gpu_vbuffer *vbuf;
1392 struct virtio_gpu_resp_map_info *resp_buf;
1393
1394 resp_buf = kzalloc_obj(*resp_buf, GFP_KERNEL);
1395 if (!resp_buf)
1396 return -ENOMEM;
1397
1398 cmd_p = virtio_gpu_alloc_cmd_resp
1399 (vgdev, virtio_gpu_cmd_resource_map_cb, &vbuf, sizeof(*cmd_p),
1400 sizeof(struct virtio_gpu_resp_map_info), resp_buf);
1401 memset(cmd_p, 0, sizeof(*cmd_p));
1402
1403 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB);
1404 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1405 cmd_p->offset = cpu_to_le64(offset);
1406 vbuf->objs = objs;
1407
1408 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1409 return 0;
1410 }
1411
virtio_gpu_cmd_unmap(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo)1412 void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev,
1413 struct virtio_gpu_object *bo)
1414 {
1415 struct virtio_gpu_resource_unmap_blob *cmd_p;
1416 struct virtio_gpu_vbuffer *vbuf;
1417
1418 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1419 memset(cmd_p, 0, sizeof(*cmd_p));
1420
1421 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB);
1422 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1423
1424 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1425 }
1426
1427 void
virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device * vgdev,struct virtio_gpu_object * bo,struct virtio_gpu_object_params * params,struct virtio_gpu_mem_entry * ents,uint32_t nents)1428 virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
1429 struct virtio_gpu_object *bo,
1430 struct virtio_gpu_object_params *params,
1431 struct virtio_gpu_mem_entry *ents,
1432 uint32_t nents)
1433 {
1434 struct virtio_gpu_resource_create_blob *cmd_p;
1435 struct virtio_gpu_vbuffer *vbuf;
1436
1437 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1438 memset(cmd_p, 0, sizeof(*cmd_p));
1439
1440 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB);
1441 cmd_p->hdr.ctx_id = cpu_to_le32(params->ctx_id);
1442 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1443 cmd_p->blob_mem = cpu_to_le32(params->blob_mem);
1444 cmd_p->blob_flags = cpu_to_le32(params->blob_flags);
1445 cmd_p->blob_id = cpu_to_le64(params->blob_id);
1446 cmd_p->size = cpu_to_le64(params->size);
1447 cmd_p->nr_entries = cpu_to_le32(nents);
1448
1449 vbuf->data_buf = ents;
1450 vbuf->data_size = sizeof(*ents) * nents;
1451
1452 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1453 bo->created = true;
1454
1455 if (nents)
1456 bo->attached = true;
1457 }
1458
virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device * vgdev,uint32_t scanout_id,struct virtio_gpu_object * bo,struct drm_framebuffer * fb,uint32_t width,uint32_t height,uint32_t x,uint32_t y)1459 void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev,
1460 uint32_t scanout_id,
1461 struct virtio_gpu_object *bo,
1462 struct drm_framebuffer *fb,
1463 uint32_t width, uint32_t height,
1464 uint32_t x, uint32_t y)
1465 {
1466 uint32_t i;
1467 struct virtio_gpu_set_scanout_blob *cmd_p;
1468 struct virtio_gpu_vbuffer *vbuf;
1469 uint32_t format = virtio_gpu_translate_format(fb->format->format);
1470
1471 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1472 memset(cmd_p, 0, sizeof(*cmd_p));
1473
1474 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT_BLOB);
1475 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1476 cmd_p->scanout_id = cpu_to_le32(scanout_id);
1477
1478 cmd_p->format = cpu_to_le32(format);
1479 cmd_p->width = cpu_to_le32(fb->width);
1480 cmd_p->height = cpu_to_le32(fb->height);
1481
1482 for (i = 0; i < 4; i++) {
1483 cmd_p->strides[i] = cpu_to_le32(fb->pitches[i]);
1484 cmd_p->offsets[i] = cpu_to_le32(fb->offsets[i]);
1485 }
1486
1487 cmd_p->r.width = cpu_to_le32(width);
1488 cmd_p->r.height = cpu_to_le32(height);
1489 cmd_p->r.x = cpu_to_le32(x);
1490 cmd_p->r.y = cpu_to_le32(y);
1491
1492 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
1493 }
1494