xref: /freebsd/sys/dev/virtio/gpu/virtio_gpu.c (revision 1ea0721e1a566fdb552b0a919c22667844a894d9)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2013, Bryan Venteicher <bryanv@FreeBSD.org>
5  * All rights reserved.
6  * Copyright (c) 2023, Arm Ltd
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /* Driver for VirtIO GPU device. */
31 
32 #include <sys/param.h>
33 #include <sys/types.h>
34 #include <sys/bus.h>
35 #include <sys/callout.h>
36 #include <sys/fbio.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/sglist.h>
41 
42 #include <machine/atomic.h>
43 #include <machine/bus.h>
44 #include <machine/resource.h>
45 
46 #include <vm/vm.h>
47 #include <vm/pmap.h>
48 
49 #include <dev/virtio/virtio.h>
50 #include <dev/virtio/virtqueue.h>
51 #include <dev/virtio/gpu/virtio_gpu.h>
52 
53 #include <dev/vt/vt.h>
54 #include <dev/vt/hw/fb/vt_fb.h>
55 #include <dev/vt/colors/vt_termcolors.h>
56 
57 #include "fb_if.h"
58 
59 #define VTGPU_FEATURES	0
60 
61 /* The guest can allocate resource IDs, we only need one */
62 #define	VTGPU_RESOURCE_ID	1
63 
64 struct vtgpu_softc {
65 	/* Must be first so we can cast from info -> softc */
66 	struct fb_info 		 vtgpu_fb_info;
67 	struct virtio_gpu_config vtgpu_gpucfg;
68 
69 	device_t		 vtgpu_dev;
70 	uint64_t		 vtgpu_features;
71 
72 	struct virtqueue	*vtgpu_ctrl_vq;
73 
74 	uint64_t		 vtgpu_next_fence;
75 
76 	bool			 vtgpu_have_fb_info;
77 };
78 
79 static int	vtgpu_modevent(module_t, int, void *);
80 
81 static int	vtgpu_probe(device_t);
82 static int	vtgpu_attach(device_t);
83 static int	vtgpu_detach(device_t);
84 
85 static int	vtgpu_negotiate_features(struct vtgpu_softc *);
86 static int	vtgpu_setup_features(struct vtgpu_softc *);
87 static void	vtgpu_read_config(struct vtgpu_softc *,
88 		    struct virtio_gpu_config *);
89 static int	vtgpu_alloc_virtqueue(struct vtgpu_softc *);
90 static int	vtgpu_get_display_info(struct vtgpu_softc *);
91 static int	vtgpu_create_2d(struct vtgpu_softc *);
92 static int	vtgpu_attach_backing(struct vtgpu_softc *);
93 static int	vtgpu_set_scanout(struct vtgpu_softc *, uint32_t, uint32_t,
94 		    uint32_t, uint32_t);
95 static int	vtgpu_transfer_to_host_2d(struct vtgpu_softc *, uint32_t,
96 		    uint32_t, uint32_t, uint32_t);
97 static int	vtgpu_resource_flush(struct vtgpu_softc *, uint32_t, uint32_t,
98 		    uint32_t, uint32_t);
99 
100 static vd_blank_t		vtgpu_fb_blank;
101 static vd_bitblt_text_t		vtgpu_fb_bitblt_text;
102 static vd_bitblt_bmp_t		vtgpu_fb_bitblt_bitmap;
103 static vd_drawrect_t		vtgpu_fb_drawrect;
104 static vd_setpixel_t		vtgpu_fb_setpixel;
105 static vd_bitblt_argb_t		vtgpu_fb_bitblt_argb;
106 
107 static struct vt_driver vtgpu_fb_driver = {
108 	.vd_name = "virtio_gpu",
109 	.vd_init = vt_fb_init,
110 	.vd_fini = vt_fb_fini,
111 	.vd_blank = vtgpu_fb_blank,
112 	.vd_bitblt_text = vtgpu_fb_bitblt_text,
113 	.vd_invalidate_text = vt_fb_invalidate_text,
114 	.vd_bitblt_bmp = vtgpu_fb_bitblt_bitmap,
115 	.vd_bitblt_argb = vtgpu_fb_bitblt_argb,
116 	.vd_drawrect = vtgpu_fb_drawrect,
117 	.vd_setpixel = vtgpu_fb_setpixel,
118 	.vd_postswitch = vt_fb_postswitch,
119 	.vd_priority = VD_PRIORITY_GENERIC+10,
120 	.vd_fb_ioctl = vt_fb_ioctl,
121 	.vd_fb_mmap = NULL,	/* No mmap as we need to signal the host */
122 	.vd_suspend = vt_fb_suspend,
123 	.vd_resume = vt_fb_resume,
124 };
125 
126 VT_DRIVER_DECLARE(vt_vtgpu, vtgpu_fb_driver);
127 
128 static void
129 vtgpu_fb_blank(struct vt_device *vd, term_color_t color)
130 {
131 	struct vtgpu_softc *sc;
132 	struct fb_info *info;
133 
134 	info = vd->vd_softc;
135 	sc = (struct vtgpu_softc *)info;
136 
137 	vt_fb_blank(vd, color);
138 
139 	vtgpu_transfer_to_host_2d(sc, 0, 0, sc->vtgpu_fb_info.fb_width,
140 	    sc->vtgpu_fb_info.fb_height);
141 	vtgpu_resource_flush(sc, 0, 0, sc->vtgpu_fb_info.fb_width,
142 	    sc->vtgpu_fb_info.fb_height);
143 }
144 
145 static void
146 vtgpu_fb_bitblt_text(struct vt_device *vd, const struct vt_window *vw,
147     const term_rect_t *area)
148 {
149 	struct vtgpu_softc *sc;
150 	struct fb_info *info;
151 	int x, y, width, height;
152 
153 	info = vd->vd_softc;
154 	sc = (struct vtgpu_softc *)info;
155 
156 	vt_fb_bitblt_text(vd, vw, area);
157 
158 	x = area->tr_begin.tp_col * vw->vw_font->vf_width + vw->vw_draw_area.tr_begin.tp_col;
159 	y = area->tr_begin.tp_row * vw->vw_font->vf_height + vw->vw_draw_area.tr_begin.tp_row;
160 	width = area->tr_end.tp_col * vw->vw_font->vf_width + vw->vw_draw_area.tr_begin.tp_col - x;
161 	height = area->tr_end.tp_row * vw->vw_font->vf_height + vw->vw_draw_area.tr_begin.tp_row - y;
162 
163 	vtgpu_transfer_to_host_2d(sc, x, y, width, height);
164 	vtgpu_resource_flush(sc, x, y, width, height);
165 }
166 
167 static void
168 vtgpu_fb_bitblt_bitmap(struct vt_device *vd, const struct vt_window *vw,
169     const uint8_t *pattern, const uint8_t *mask,
170     unsigned int width, unsigned int height,
171     unsigned int x, unsigned int y, term_color_t fg, term_color_t bg)
172 {
173 	struct vtgpu_softc *sc;
174 	struct fb_info *info;
175 
176 	info = vd->vd_softc;
177 	sc = (struct vtgpu_softc *)info;
178 
179 	vt_fb_bitblt_bitmap(vd, vw, pattern, mask, width, height, x, y, fg, bg);
180 
181 	vtgpu_transfer_to_host_2d(sc, x, y, width, height);
182 	vtgpu_resource_flush(sc, x, y, width, height);
183 }
184 
185 static int
186 vtgpu_fb_bitblt_argb(struct vt_device *vd, const struct vt_window *vw,
187     const uint8_t *argb,
188     unsigned int width, unsigned int height,
189     unsigned int x, unsigned int y)
190 {
191 
192 	return (EOPNOTSUPP);
193 }
194 
195 static void
196 vtgpu_fb_drawrect(struct vt_device *vd, int x1, int y1, int x2, int y2,
197     int fill, term_color_t color)
198 {
199 	struct vtgpu_softc *sc;
200 	struct fb_info *info;
201 	int width, height;
202 
203 	info = vd->vd_softc;
204 	sc = (struct vtgpu_softc *)info;
205 
206 	vt_fb_drawrect(vd, x1, y1, x2, y2, fill, color);
207 
208 	width = x2 - x1 + 1;
209 	height = y2 - y1 + 1;
210 	vtgpu_transfer_to_host_2d(sc, x1, y1, width, height);
211 	vtgpu_resource_flush(sc, x1, y1, width, height);
212 }
213 
214 static void
215 vtgpu_fb_setpixel(struct vt_device *vd, int x, int y, term_color_t color)
216 {
217 	struct vtgpu_softc *sc;
218 	struct fb_info *info;
219 
220 	info = vd->vd_softc;
221 	sc = (struct vtgpu_softc *)info;
222 
223 	vt_fb_setpixel(vd, x, y, color);
224 
225 	vtgpu_transfer_to_host_2d(sc, x, y, 1, 1);
226 	vtgpu_resource_flush(sc, x, y, 1, 1);
227 }
228 
229 static struct virtio_feature_desc vtgpu_feature_desc[] = {
230 	{ VIRTIO_GPU_F_VIRGL,		"VirGL"		},
231 	{ VIRTIO_GPU_F_EDID,		"EDID"		},
232 	{ VIRTIO_GPU_F_RESOURCE_UUID,	"ResUUID"	},
233 	{ VIRTIO_GPU_F_RESOURCE_BLOB,	"ResBlob"	},
234 	{ VIRTIO_GPU_F_CONTEXT_INIT,	"ContextInit"	},
235 	{ 0, NULL }
236 };
237 
238 static device_method_t vtgpu_methods[] = {
239 	/* Device methods. */
240 	DEVMETHOD(device_probe,		vtgpu_probe),
241 	DEVMETHOD(device_attach,	vtgpu_attach),
242 	DEVMETHOD(device_detach,	vtgpu_detach),
243 
244 	DEVMETHOD_END
245 };
246 
247 static driver_t vtgpu_driver = {
248 	"vtgpu",
249 	vtgpu_methods,
250 	sizeof(struct vtgpu_softc)
251 };
252 
253 VIRTIO_DRIVER_MODULE(virtio_gpu, vtgpu_driver, vtgpu_modevent, NULL);
254 MODULE_VERSION(virtio_gpu, 1);
255 MODULE_DEPEND(virtio_gpu, virtio, 1, 1, 1);
256 
257 VIRTIO_SIMPLE_PNPINFO(virtio_gpu, VIRTIO_ID_GPU,
258     "VirtIO GPU");
259 
260 static int
261 vtgpu_modevent(module_t mod, int type, void *unused)
262 {
263 	int error;
264 
265 	switch (type) {
266 	case MOD_LOAD:
267 	case MOD_QUIESCE:
268 	case MOD_UNLOAD:
269 	case MOD_SHUTDOWN:
270 		error = 0;
271 		break;
272 	default:
273 		error = EOPNOTSUPP;
274 		break;
275 	}
276 
277 	return (error);
278 }
279 
280 static int
281 vtgpu_probe(device_t dev)
282 {
283 	return (VIRTIO_SIMPLE_PROBE(dev, virtio_gpu));
284 }
285 
286 static int
287 vtgpu_attach(device_t dev)
288 {
289 	struct vtgpu_softc *sc;
290 	int error;
291 
292 	sc = device_get_softc(dev);
293 	sc->vtgpu_have_fb_info = false;
294 	sc->vtgpu_dev = dev;
295 	sc->vtgpu_next_fence = 1;
296 	virtio_set_feature_desc(dev, vtgpu_feature_desc);
297 
298 	error = vtgpu_setup_features(sc);
299 	if (error != 0) {
300 		device_printf(dev, "cannot setup features\n");
301 		goto fail;
302 	}
303 
304 	vtgpu_read_config(sc, &sc->vtgpu_gpucfg);
305 
306 	error = vtgpu_alloc_virtqueue(sc);
307 	if (error != 0) {
308 		device_printf(dev, "cannot allocate virtqueue\n");
309 		goto fail;
310 	}
311 
312 	virtio_setup_intr(dev, INTR_TYPE_TTY);
313 
314 	/* Read the device info to get the display size */
315 	error = vtgpu_get_display_info(sc);
316 	if (error != 0) {
317 		goto fail;
318 	}
319 
320 	/*
321 	 * TODO: This doesn't need to be contigmalloc as we
322 	 * can use scatter-gather lists.
323 	 */
324 	sc->vtgpu_fb_info.fb_vbase = (vm_offset_t)contigmalloc(
325 	    sc->vtgpu_fb_info.fb_size, M_DEVBUF, M_WAITOK|M_ZERO, 0, ~0, 4, 0);
326 	sc->vtgpu_fb_info.fb_pbase = pmap_kextract(sc->vtgpu_fb_info.fb_vbase);
327 
328 	/* Create the 2d resource */
329 	error = vtgpu_create_2d(sc);
330 	if (error != 0) {
331 		goto fail;
332 	}
333 
334 	/* Attach the backing memory */
335 	error = vtgpu_attach_backing(sc);
336 	if (error != 0) {
337 		goto fail;
338 	}
339 
340 	/* Set the scanout to link the framebuffer to the display scanout */
341 	error = vtgpu_set_scanout(sc, 0, 0, sc->vtgpu_fb_info.fb_width,
342 	    sc->vtgpu_fb_info.fb_height);
343 	if (error != 0) {
344 		goto fail;
345 	}
346 
347 	vt_allocate(&vtgpu_fb_driver, &sc->vtgpu_fb_info);
348 	sc->vtgpu_have_fb_info = true;
349 
350 	error = vtgpu_transfer_to_host_2d(sc, 0, 0, sc->vtgpu_fb_info.fb_width,
351 	    sc->vtgpu_fb_info.fb_height);
352 	if (error != 0)
353 		goto fail;
354 	error = vtgpu_resource_flush(sc, 0, 0, sc->vtgpu_fb_info.fb_width,
355 	    sc->vtgpu_fb_info.fb_height);
356 
357 fail:
358 	if (error != 0)
359 		vtgpu_detach(dev);
360 
361 	return (error);
362 }
363 
364 static int
365 vtgpu_detach(device_t dev)
366 {
367 	struct vtgpu_softc *sc;
368 
369 	sc = device_get_softc(dev);
370 	if (sc->vtgpu_have_fb_info)
371 		vt_deallocate(&vtgpu_fb_driver, &sc->vtgpu_fb_info);
372 	if (sc->vtgpu_fb_info.fb_vbase != 0) {
373 		MPASS(sc->vtgpu_fb_info.fb_size != 0);
374 		free((void *)sc->vtgpu_fb_info.fb_vbase,
375 		    M_DEVBUF);
376 	}
377 
378 	/* TODO: Tell the host we are detaching */
379 
380 	return (0);
381 }
382 
383 static int
384 vtgpu_negotiate_features(struct vtgpu_softc *sc)
385 {
386 	device_t dev;
387 	uint64_t features;
388 
389 	dev = sc->vtgpu_dev;
390 	features = VTGPU_FEATURES;
391 
392 	sc->vtgpu_features = virtio_negotiate_features(dev, features);
393 	return (virtio_finalize_features(dev));
394 }
395 
396 static int
397 vtgpu_setup_features(struct vtgpu_softc *sc)
398 {
399 	int error;
400 
401 	error = vtgpu_negotiate_features(sc);
402 	if (error != 0)
403 		return (error);
404 
405 	return (0);
406 }
407 
408 static void
409 vtgpu_read_config(struct vtgpu_softc *sc,
410     struct virtio_gpu_config *gpucfg)
411 {
412 	device_t dev;
413 
414 	dev = sc->vtgpu_dev;
415 
416 	bzero(gpucfg, sizeof(struct virtio_gpu_config));
417 
418 #define VTGPU_GET_CONFIG(_dev, _field, _cfg)			\
419 	virtio_read_device_config(_dev,				\
420 	    offsetof(struct virtio_gpu_config, _field),	\
421 	    &(_cfg)->_field, sizeof((_cfg)->_field))		\
422 
423 	VTGPU_GET_CONFIG(dev, events_read, gpucfg);
424 	VTGPU_GET_CONFIG(dev, events_clear, gpucfg);
425 	VTGPU_GET_CONFIG(dev, num_scanouts, gpucfg);
426 	VTGPU_GET_CONFIG(dev, num_capsets, gpucfg);
427 
428 #undef VTGPU_GET_CONFIG
429 }
430 
431 static int
432 vtgpu_alloc_virtqueue(struct vtgpu_softc *sc)
433 {
434 	device_t dev;
435 	struct vq_alloc_info vq_info[2];
436 	int nvqs;
437 
438 	dev = sc->vtgpu_dev;
439 	nvqs = 1;
440 
441 	VQ_ALLOC_INFO_INIT(&vq_info[0], 0, NULL, sc, &sc->vtgpu_ctrl_vq,
442 	    "%s control", device_get_nameunit(dev));
443 
444 	return (virtio_alloc_virtqueues(dev, nvqs, vq_info));
445 }
446 
447 static int
448 vtgpu_req_resp2(struct vtgpu_softc *sc, void *req1, size_t req1len,
449     void *req2, size_t req2len, void *resp, size_t resplen)
450 {
451 	struct sglist sg;
452 	struct sglist_seg segs[3];
453 	int error, rcount;
454 
455 	sglist_init(&sg, 3, segs);
456 
457 	rcount = 1;
458 	error = sglist_append(&sg, req1, req1len);
459 	if (error != 0) {
460 		device_printf(sc->vtgpu_dev,
461 		    "Unable to append the request to the sglist: %d\n",
462 		    error);
463 		return (error);
464 	}
465 	if (req2 != NULL) {
466 		error = sglist_append(&sg, req2, req2len);
467 		if (error != 0) {
468 			device_printf(sc->vtgpu_dev,
469 			    "Unable to append the request to the sglist: %d\n",
470 			    error);
471 			return (error);
472 		}
473 		rcount++;
474 	}
475 	error = sglist_append(&sg, resp, resplen);
476 	if (error != 0) {
477 		device_printf(sc->vtgpu_dev,
478 		    "Unable to append the response buffer to the sglist: %d\n",
479 		    error);
480 		return (error);
481 	}
482 	error = virtqueue_enqueue(sc->vtgpu_ctrl_vq, resp, &sg, rcount, 1);
483 	if (error != 0) {
484 		device_printf(sc->vtgpu_dev, "Enqueue failed: %d\n", error);
485 		return (error);
486 	}
487 
488 	virtqueue_notify(sc->vtgpu_ctrl_vq);
489 	virtqueue_poll(sc->vtgpu_ctrl_vq, NULL);
490 
491 	return (0);
492 }
493 
494 static int
495 vtgpu_req_resp(struct vtgpu_softc *sc, void *req, size_t reqlen,
496     void *resp, size_t resplen)
497 {
498 	return (vtgpu_req_resp2(sc, req, reqlen, NULL, 0, resp, resplen));
499 }
500 
501 static int
502 vtgpu_get_display_info(struct vtgpu_softc *sc)
503 {
504 	struct {
505 		struct virtio_gpu_ctrl_hdr req;
506 		char pad;
507 		struct virtio_gpu_resp_display_info resp;
508 	} s = { 0 };
509 	int error;
510 
511 	s.req.type = htole32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
512 	s.req.flags = htole32(VIRTIO_GPU_FLAG_FENCE);
513 	s.req.fence_id = htole64(atomic_fetchadd_64(&sc->vtgpu_next_fence, 1));
514 
515 	error = vtgpu_req_resp(sc, &s.req, sizeof(s.req), &s.resp,
516 	    sizeof(s.resp));
517 	if (error != 0)
518 		return (error);
519 
520 	for (int i = 0; i < sc->vtgpu_gpucfg.num_scanouts; i++) {
521 		if (s.resp.pmodes[i].enabled != 0)
522 			MPASS(i == 0);
523 			sc->vtgpu_fb_info.fb_name =
524 			    device_get_nameunit(sc->vtgpu_dev);
525 
526 			sc->vtgpu_fb_info.fb_width =
527 			    le32toh(s.resp.pmodes[i].r.width);
528 			sc->vtgpu_fb_info.fb_height =
529 			    le32toh(s.resp.pmodes[i].r.height);
530 			/* 32 bits per pixel */
531 			sc->vtgpu_fb_info.fb_bpp = 32;
532 			sc->vtgpu_fb_info.fb_depth = 32;
533 			sc->vtgpu_fb_info.fb_size = sc->vtgpu_fb_info.fb_width *
534 			    sc->vtgpu_fb_info.fb_height * 4;
535 			sc->vtgpu_fb_info.fb_stride =
536 			    sc->vtgpu_fb_info.fb_width * 4;
537 			return (0);
538 	}
539 
540 	return (ENXIO);
541 }
542 
543 static int
544 vtgpu_create_2d(struct vtgpu_softc *sc)
545 {
546 	struct {
547 		struct virtio_gpu_resource_create_2d req;
548 		char pad;
549 		struct virtio_gpu_ctrl_hdr resp;
550 	} s = { 0 };
551 	int error;
552 
553 	s.req.hdr.type = htole32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
554 	s.req.hdr.flags = htole32(VIRTIO_GPU_FLAG_FENCE);
555 	s.req.hdr.fence_id = htole64(
556 	    atomic_fetchadd_64(&sc->vtgpu_next_fence, 1));
557 
558 	s.req.resource_id = htole32(VTGPU_RESOURCE_ID);
559 	s.req.format = htole32(VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM);
560 	s.req.width = htole32(sc->vtgpu_fb_info.fb_width);
561 	s.req.height = htole32(sc->vtgpu_fb_info.fb_height);
562 
563 	error = vtgpu_req_resp(sc, &s.req, sizeof(s.req), &s.resp,
564 	    sizeof(s.resp));
565 	if (error != 0)
566 		return (error);
567 
568 	if (s.resp.type != htole32(VIRTIO_GPU_RESP_OK_NODATA)) {
569 		device_printf(sc->vtgpu_dev, "Invalid response type %x\n",
570 		    le32toh(s.resp.type));
571 		return (EINVAL);
572 	}
573 
574 	return (0);
575 }
576 
577 static int
578 vtgpu_attach_backing(struct vtgpu_softc *sc)
579 {
580 	struct {
581 		/*
582 		 * Split the backing and mem request arguments as some
583 		 * hypervisors, e.g. Parallels Desktop, don't work when
584 		 * they are enqueued together.
585 		 */
586 		struct {
587 			struct virtio_gpu_resource_attach_backing backing;
588 			char pad;
589 			struct virtio_gpu_mem_entry mem;
590 		} req;
591 		char pad;
592 		struct virtio_gpu_ctrl_hdr resp;
593 	} s = { 0 };
594 	int error;
595 
596 	s.req.backing.hdr.type =
597 	    htole32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
598 	s.req.backing.hdr.flags = htole32(VIRTIO_GPU_FLAG_FENCE);
599 	s.req.backing.hdr.fence_id = htole64(
600 	    atomic_fetchadd_64(&sc->vtgpu_next_fence, 1));
601 
602 	s.req.backing.resource_id = htole32(VTGPU_RESOURCE_ID);
603 	s.req.backing.nr_entries = htole32(1);
604 
605 	s.req.mem.addr = htole64(sc->vtgpu_fb_info.fb_pbase);
606 	s.req.mem.length = htole32(sc->vtgpu_fb_info.fb_size);
607 
608 	error = vtgpu_req_resp2(sc, &s.req.backing, sizeof(s.req.backing),
609 	    &s.req.mem, sizeof(s.req.mem), &s.resp, sizeof(s.resp));
610 	if (error != 0)
611 		return (error);
612 
613 	if (s.resp.type != htole32(VIRTIO_GPU_RESP_OK_NODATA)) {
614 		device_printf(sc->vtgpu_dev, "Invalid response type %x\n",
615 		    le32toh(s.resp.type));
616 		return (EINVAL);
617 	}
618 
619 	return (0);
620 }
621 
622 static int
623 vtgpu_set_scanout(struct vtgpu_softc *sc, uint32_t x, uint32_t y,
624     uint32_t width, uint32_t height)
625 {
626 	struct {
627 		struct virtio_gpu_set_scanout req;
628 		char pad;
629 		struct virtio_gpu_ctrl_hdr resp;
630 	} s = { 0 };
631 	int error;
632 
633 	s.req.hdr.type = htole32(VIRTIO_GPU_CMD_SET_SCANOUT);
634 	s.req.hdr.flags = htole32(VIRTIO_GPU_FLAG_FENCE);
635 	s.req.hdr.fence_id = htole64(
636 	    atomic_fetchadd_64(&sc->vtgpu_next_fence, 1));
637 
638 	s.req.r.x = htole32(x);
639 	s.req.r.y = htole32(y);
640 	s.req.r.width = htole32(width);
641 	s.req.r.height = htole32(height);
642 
643 	s.req.scanout_id = 0;
644 	s.req.resource_id = htole32(VTGPU_RESOURCE_ID);
645 
646 	error = vtgpu_req_resp(sc, &s.req, sizeof(s.req), &s.resp,
647 	    sizeof(s.resp));
648 	if (error != 0)
649 		return (error);
650 
651 	if (s.resp.type != htole32(VIRTIO_GPU_RESP_OK_NODATA)) {
652 		device_printf(sc->vtgpu_dev, "Invalid response type %x\n",
653 		    le32toh(s.resp.type));
654 		return (EINVAL);
655 	}
656 
657 	return (0);
658 }
659 
660 static int
661 vtgpu_transfer_to_host_2d(struct vtgpu_softc *sc, uint32_t x, uint32_t y,
662     uint32_t width, uint32_t height)
663 {
664 	struct {
665 		struct virtio_gpu_transfer_to_host_2d req;
666 		char pad;
667 		struct virtio_gpu_ctrl_hdr resp;
668 	} s = { 0 };
669 	int error;
670 
671 	s.req.hdr.type = htole32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
672 	s.req.hdr.flags = htole32(VIRTIO_GPU_FLAG_FENCE);
673 	s.req.hdr.fence_id = htole64(
674 	    atomic_fetchadd_64(&sc->vtgpu_next_fence, 1));
675 
676 	s.req.r.x = htole32(x);
677 	s.req.r.y = htole32(y);
678 	s.req.r.width = htole32(width);
679 	s.req.r.height = htole32(height);
680 
681 	s.req.offset = htole64((y * sc->vtgpu_fb_info.fb_width + x)
682 	 * (sc->vtgpu_fb_info.fb_bpp / 8));
683 	s.req.resource_id = htole32(VTGPU_RESOURCE_ID);
684 
685 	error = vtgpu_req_resp(sc, &s.req, sizeof(s.req), &s.resp,
686 	    sizeof(s.resp));
687 	if (error != 0)
688 		return (error);
689 
690 	if (s.resp.type != htole32(VIRTIO_GPU_RESP_OK_NODATA)) {
691 		device_printf(sc->vtgpu_dev, "Invalid response type %x\n",
692 		    le32toh(s.resp.type));
693 		return (EINVAL);
694 	}
695 
696 	return (0);
697 }
698 
699 static int
700 vtgpu_resource_flush(struct vtgpu_softc *sc, uint32_t x, uint32_t y,
701     uint32_t width, uint32_t height)
702 {
703 	struct {
704 		struct virtio_gpu_resource_flush req;
705 		char pad;
706 		struct virtio_gpu_ctrl_hdr resp;
707 	} s = { 0 };
708 	int error;
709 
710 	s.req.hdr.type = htole32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
711 	s.req.hdr.flags = htole32(VIRTIO_GPU_FLAG_FENCE);
712 	s.req.hdr.fence_id = htole64(
713 	    atomic_fetchadd_64(&sc->vtgpu_next_fence, 1));
714 
715 	s.req.r.x = htole32(x);
716 	s.req.r.y = htole32(y);
717 	s.req.r.width = htole32(width);
718 	s.req.r.height = htole32(height);
719 
720 	s.req.resource_id = htole32(VTGPU_RESOURCE_ID);
721 
722 	error = vtgpu_req_resp(sc, &s.req, sizeof(s.req), &s.resp,
723 	    sizeof(s.resp));
724 	if (error != 0)
725 		return (error);
726 
727 	if (s.resp.type != htole32(VIRTIO_GPU_RESP_OK_NODATA)) {
728 		device_printf(sc->vtgpu_dev, "Invalid response type %x\n",
729 		    le32toh(s.resp.type));
730 		return (EINVAL);
731 	}
732 
733 	return (0);
734 }
735