1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2013, Bryan Venteicher <bryanv@FreeBSD.org> 5 * All rights reserved. 6 * Copyright (c) 2023, Arm Ltd 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 /* Driver for VirtIO GPU device. */ 31 32 #include <sys/param.h> 33 #include <sys/types.h> 34 #include <sys/bus.h> 35 #include <sys/callout.h> 36 #include <sys/fbio.h> 37 #include <sys/kernel.h> 38 #include <sys/malloc.h> 39 #include <sys/module.h> 40 #include <sys/sglist.h> 41 42 #include <machine/atomic.h> 43 #include <machine/bus.h> 44 #include <machine/resource.h> 45 46 #include <vm/vm.h> 47 #include <vm/pmap.h> 48 49 #include <dev/virtio/virtio.h> 50 #include <dev/virtio/virtqueue.h> 51 #include <dev/virtio/gpu/virtio_gpu.h> 52 53 #include <dev/vt/vt.h> 54 #include <dev/vt/hw/fb/vt_fb.h> 55 #include <dev/vt/colors/vt_termcolors.h> 56 57 #include "fb_if.h" 58 59 #define VTGPU_FEATURES 0 60 61 /* The guest can allocate resource IDs, we only need one */ 62 #define VTGPU_RESOURCE_ID 1 63 64 struct vtgpu_softc { 65 /* Must be first so we can cast from info -> softc */ 66 struct fb_info vtgpu_fb_info; 67 struct virtio_gpu_config vtgpu_gpucfg; 68 69 device_t vtgpu_dev; 70 uint64_t vtgpu_features; 71 72 struct virtqueue *vtgpu_ctrl_vq; 73 74 uint64_t vtgpu_next_fence; 75 76 bool vtgpu_have_fb_info; 77 }; 78 79 static int vtgpu_modevent(module_t, int, void *); 80 81 static int vtgpu_probe(device_t); 82 static int vtgpu_attach(device_t); 83 static int vtgpu_detach(device_t); 84 85 static int vtgpu_negotiate_features(struct vtgpu_softc *); 86 static int vtgpu_setup_features(struct vtgpu_softc *); 87 static void vtgpu_read_config(struct vtgpu_softc *, 88 struct virtio_gpu_config *); 89 static int vtgpu_alloc_virtqueue(struct vtgpu_softc *); 90 static int vtgpu_get_display_info(struct vtgpu_softc *); 91 static int vtgpu_create_2d(struct vtgpu_softc *); 92 static int vtgpu_attach_backing(struct vtgpu_softc *); 93 static int vtgpu_set_scanout(struct vtgpu_softc *, uint32_t, uint32_t, 94 uint32_t, uint32_t); 95 static int vtgpu_transfer_to_host_2d(struct vtgpu_softc *, uint32_t, 96 uint32_t, uint32_t, uint32_t); 97 static int vtgpu_resource_flush(struct vtgpu_softc *, uint32_t, uint32_t, 98 uint32_t, uint32_t); 99 100 static vd_blank_t vtgpu_fb_blank; 101 static vd_bitblt_text_t vtgpu_fb_bitblt_text; 102 static vd_bitblt_bmp_t vtgpu_fb_bitblt_bitmap; 103 static vd_drawrect_t vtgpu_fb_drawrect; 104 static vd_setpixel_t vtgpu_fb_setpixel; 105 106 static struct vt_driver vtgpu_fb_driver = { 107 .vd_name = "virtio_gpu", 108 .vd_init = vt_fb_init, 109 .vd_fini = vt_fb_fini, 110 .vd_blank = vtgpu_fb_blank, 111 .vd_bitblt_text = vtgpu_fb_bitblt_text, 112 .vd_invalidate_text = vt_fb_invalidate_text, 113 .vd_bitblt_bmp = vtgpu_fb_bitblt_bitmap, 114 .vd_drawrect = vtgpu_fb_drawrect, 115 .vd_setpixel = vtgpu_fb_setpixel, 116 .vd_postswitch = vt_fb_postswitch, 117 .vd_priority = VD_PRIORITY_GENERIC+10, 118 .vd_fb_ioctl = vt_fb_ioctl, 119 .vd_fb_mmap = NULL, /* No mmap as we need to signal the host */ 120 .vd_suspend = vt_fb_suspend, 121 .vd_resume = vt_fb_resume, 122 }; 123 124 VT_DRIVER_DECLARE(vt_vtgpu, vtgpu_fb_driver); 125 126 static void 127 vtgpu_fb_blank(struct vt_device *vd, term_color_t color) 128 { 129 struct vtgpu_softc *sc; 130 struct fb_info *info; 131 132 info = vd->vd_softc; 133 sc = (struct vtgpu_softc *)info; 134 135 vt_fb_blank(vd, color); 136 137 vtgpu_transfer_to_host_2d(sc, 0, 0, sc->vtgpu_fb_info.fb_width, 138 sc->vtgpu_fb_info.fb_height); 139 vtgpu_resource_flush(sc, 0, 0, sc->vtgpu_fb_info.fb_width, 140 sc->vtgpu_fb_info.fb_height); 141 } 142 143 static void 144 vtgpu_fb_bitblt_text(struct vt_device *vd, const struct vt_window *vw, 145 const term_rect_t *area) 146 { 147 struct vtgpu_softc *sc; 148 struct fb_info *info; 149 int x, y, width, height; 150 151 info = vd->vd_softc; 152 sc = (struct vtgpu_softc *)info; 153 154 vt_fb_bitblt_text(vd, vw, area); 155 156 x = area->tr_begin.tp_col * vw->vw_font->vf_width + vw->vw_draw_area.tr_begin.tp_col; 157 y = area->tr_begin.tp_row * vw->vw_font->vf_height + vw->vw_draw_area.tr_begin.tp_row; 158 width = area->tr_end.tp_col * vw->vw_font->vf_width + vw->vw_draw_area.tr_begin.tp_col - x; 159 height = area->tr_end.tp_row * vw->vw_font->vf_height + vw->vw_draw_area.tr_begin.tp_row - y; 160 161 vtgpu_transfer_to_host_2d(sc, x, y, width, height); 162 vtgpu_resource_flush(sc, x, y, width, height); 163 } 164 165 static void 166 vtgpu_fb_bitblt_bitmap(struct vt_device *vd, const struct vt_window *vw, 167 const uint8_t *pattern, const uint8_t *mask, 168 unsigned int width, unsigned int height, 169 unsigned int x, unsigned int y, term_color_t fg, term_color_t bg) 170 { 171 struct vtgpu_softc *sc; 172 struct fb_info *info; 173 174 info = vd->vd_softc; 175 sc = (struct vtgpu_softc *)info; 176 177 vt_fb_bitblt_bitmap(vd, vw, pattern, mask, width, height, x, y, fg, bg); 178 179 vtgpu_transfer_to_host_2d(sc, x, y, width, height); 180 vtgpu_resource_flush(sc, x, y, width, height); 181 } 182 183 static void 184 vtgpu_fb_drawrect(struct vt_device *vd, int x1, int y1, int x2, int y2, 185 int fill, term_color_t color) 186 { 187 struct vtgpu_softc *sc; 188 struct fb_info *info; 189 int width, height; 190 191 info = vd->vd_softc; 192 sc = (struct vtgpu_softc *)info; 193 194 vt_fb_drawrect(vd, x1, y1, x2, y2, fill, color); 195 196 width = x2 - x1 + 1; 197 height = y2 - y1 + 1; 198 vtgpu_transfer_to_host_2d(sc, x1, y1, width, height); 199 vtgpu_resource_flush(sc, x1, y1, width, height); 200 } 201 202 static void 203 vtgpu_fb_setpixel(struct vt_device *vd, int x, int y, term_color_t color) 204 { 205 struct vtgpu_softc *sc; 206 struct fb_info *info; 207 208 info = vd->vd_softc; 209 sc = (struct vtgpu_softc *)info; 210 211 vt_fb_setpixel(vd, x, y, color); 212 213 vtgpu_transfer_to_host_2d(sc, x, y, 1, 1); 214 vtgpu_resource_flush(sc, x, y, 1, 1); 215 } 216 217 static struct virtio_feature_desc vtgpu_feature_desc[] = { 218 { VIRTIO_GPU_F_VIRGL, "VirGL" }, 219 { VIRTIO_GPU_F_EDID, "EDID" }, 220 { VIRTIO_GPU_F_RESOURCE_UUID, "ResUUID" }, 221 { VIRTIO_GPU_F_RESOURCE_BLOB, "ResBlob" }, 222 { VIRTIO_GPU_F_CONTEXT_INIT, "ContextInit" }, 223 { 0, NULL } 224 }; 225 226 static device_method_t vtgpu_methods[] = { 227 /* Device methods. */ 228 DEVMETHOD(device_probe, vtgpu_probe), 229 DEVMETHOD(device_attach, vtgpu_attach), 230 DEVMETHOD(device_detach, vtgpu_detach), 231 232 DEVMETHOD_END 233 }; 234 235 static driver_t vtgpu_driver = { 236 "vtgpu", 237 vtgpu_methods, 238 sizeof(struct vtgpu_softc) 239 }; 240 241 VIRTIO_DRIVER_MODULE(virtio_gpu, vtgpu_driver, vtgpu_modevent, NULL); 242 MODULE_VERSION(virtio_gpu, 1); 243 MODULE_DEPEND(virtio_gpu, virtio, 1, 1, 1); 244 245 VIRTIO_SIMPLE_PNPINFO(virtio_gpu, VIRTIO_ID_GPU, 246 "VirtIO GPU"); 247 248 static int 249 vtgpu_modevent(module_t mod, int type, void *unused) 250 { 251 int error; 252 253 switch (type) { 254 case MOD_LOAD: 255 case MOD_QUIESCE: 256 case MOD_UNLOAD: 257 case MOD_SHUTDOWN: 258 error = 0; 259 break; 260 default: 261 error = EOPNOTSUPP; 262 break; 263 } 264 265 return (error); 266 } 267 268 static int 269 vtgpu_probe(device_t dev) 270 { 271 return (VIRTIO_SIMPLE_PROBE(dev, virtio_gpu)); 272 } 273 274 static int 275 vtgpu_attach(device_t dev) 276 { 277 struct vtgpu_softc *sc; 278 int error; 279 280 sc = device_get_softc(dev); 281 sc->vtgpu_have_fb_info = false; 282 sc->vtgpu_dev = dev; 283 sc->vtgpu_next_fence = 1; 284 virtio_set_feature_desc(dev, vtgpu_feature_desc); 285 286 error = vtgpu_setup_features(sc); 287 if (error != 0) { 288 device_printf(dev, "cannot setup features\n"); 289 goto fail; 290 } 291 292 vtgpu_read_config(sc, &sc->vtgpu_gpucfg); 293 294 error = vtgpu_alloc_virtqueue(sc); 295 if (error != 0) { 296 device_printf(dev, "cannot allocate virtqueue\n"); 297 goto fail; 298 } 299 300 virtio_setup_intr(dev, INTR_TYPE_TTY); 301 302 /* Read the device info to get the display size */ 303 error = vtgpu_get_display_info(sc); 304 if (error != 0) { 305 goto fail; 306 } 307 308 /* 309 * TODO: This doesn't need to be contigmalloc as we 310 * can use scatter-gather lists. 311 */ 312 sc->vtgpu_fb_info.fb_vbase = (vm_offset_t)contigmalloc( 313 sc->vtgpu_fb_info.fb_size, M_DEVBUF, M_WAITOK|M_ZERO, 0, ~0, 4, 0); 314 sc->vtgpu_fb_info.fb_pbase = pmap_kextract(sc->vtgpu_fb_info.fb_vbase); 315 316 /* Create the 2d resource */ 317 error = vtgpu_create_2d(sc); 318 if (error != 0) { 319 goto fail; 320 } 321 322 /* Attach the backing memory */ 323 error = vtgpu_attach_backing(sc); 324 if (error != 0) { 325 goto fail; 326 } 327 328 /* Set the scanout to link the framebuffer to the display scanout */ 329 error = vtgpu_set_scanout(sc, 0, 0, sc->vtgpu_fb_info.fb_width, 330 sc->vtgpu_fb_info.fb_height); 331 if (error != 0) { 332 goto fail; 333 } 334 335 vt_allocate(&vtgpu_fb_driver, &sc->vtgpu_fb_info); 336 sc->vtgpu_have_fb_info = true; 337 338 error = vtgpu_transfer_to_host_2d(sc, 0, 0, sc->vtgpu_fb_info.fb_width, 339 sc->vtgpu_fb_info.fb_height); 340 if (error != 0) 341 goto fail; 342 error = vtgpu_resource_flush(sc, 0, 0, sc->vtgpu_fb_info.fb_width, 343 sc->vtgpu_fb_info.fb_height); 344 345 fail: 346 if (error != 0) 347 vtgpu_detach(dev); 348 349 return (error); 350 } 351 352 static int 353 vtgpu_detach(device_t dev) 354 { 355 struct vtgpu_softc *sc; 356 357 sc = device_get_softc(dev); 358 if (sc->vtgpu_have_fb_info) 359 vt_deallocate(&vtgpu_fb_driver, &sc->vtgpu_fb_info); 360 if (sc->vtgpu_fb_info.fb_vbase != 0) { 361 MPASS(sc->vtgpu_fb_info.fb_size != 0); 362 contigfree((void *)sc->vtgpu_fb_info.fb_vbase, 363 sc->vtgpu_fb_info.fb_size, M_DEVBUF); 364 } 365 366 /* TODO: Tell the host we are detaching */ 367 368 return (0); 369 } 370 371 static int 372 vtgpu_negotiate_features(struct vtgpu_softc *sc) 373 { 374 device_t dev; 375 uint64_t features; 376 377 dev = sc->vtgpu_dev; 378 features = VTGPU_FEATURES; 379 380 sc->vtgpu_features = virtio_negotiate_features(dev, features); 381 return (virtio_finalize_features(dev)); 382 } 383 384 static int 385 vtgpu_setup_features(struct vtgpu_softc *sc) 386 { 387 int error; 388 389 error = vtgpu_negotiate_features(sc); 390 if (error != 0) 391 return (error); 392 393 return (0); 394 } 395 396 static void 397 vtgpu_read_config(struct vtgpu_softc *sc, 398 struct virtio_gpu_config *gpucfg) 399 { 400 device_t dev; 401 402 dev = sc->vtgpu_dev; 403 404 bzero(gpucfg, sizeof(struct virtio_gpu_config)); 405 406 #define VTGPU_GET_CONFIG(_dev, _field, _cfg) \ 407 virtio_read_device_config(_dev, \ 408 offsetof(struct virtio_gpu_config, _field), \ 409 &(_cfg)->_field, sizeof((_cfg)->_field)) \ 410 411 VTGPU_GET_CONFIG(dev, events_read, gpucfg); 412 VTGPU_GET_CONFIG(dev, events_clear, gpucfg); 413 VTGPU_GET_CONFIG(dev, num_scanouts, gpucfg); 414 VTGPU_GET_CONFIG(dev, num_capsets, gpucfg); 415 416 #undef VTGPU_GET_CONFIG 417 } 418 419 static int 420 vtgpu_alloc_virtqueue(struct vtgpu_softc *sc) 421 { 422 device_t dev; 423 struct vq_alloc_info vq_info[2]; 424 int nvqs; 425 426 dev = sc->vtgpu_dev; 427 nvqs = 1; 428 429 VQ_ALLOC_INFO_INIT(&vq_info[0], 0, NULL, sc, &sc->vtgpu_ctrl_vq, 430 "%s control", device_get_nameunit(dev)); 431 432 return (virtio_alloc_virtqueues(dev, nvqs, vq_info)); 433 } 434 435 static int 436 vtgpu_req_resp(struct vtgpu_softc *sc, void *req, size_t reqlen, 437 void *resp, size_t resplen) 438 { 439 struct sglist sg; 440 struct sglist_seg segs[2]; 441 int error; 442 443 sglist_init(&sg, 2, segs); 444 445 error = sglist_append(&sg, req, reqlen); 446 if (error != 0) { 447 device_printf(sc->vtgpu_dev, 448 "Unable to append the request to the sglist: %d\n", error); 449 return (error); 450 } 451 error = sglist_append(&sg, resp, resplen); 452 if (error != 0) { 453 device_printf(sc->vtgpu_dev, 454 "Unable to append the response buffer to the sglist: %d\n", 455 error); 456 return (error); 457 } 458 error = virtqueue_enqueue(sc->vtgpu_ctrl_vq, resp, &sg, 1, 1); 459 if (error != 0) { 460 device_printf(sc->vtgpu_dev, "Enqueue failed: %d\n", error); 461 return (error); 462 } 463 464 virtqueue_notify(sc->vtgpu_ctrl_vq); 465 virtqueue_poll(sc->vtgpu_ctrl_vq, NULL); 466 467 return (0); 468 } 469 470 static int 471 vtgpu_get_display_info(struct vtgpu_softc *sc) 472 { 473 struct { 474 struct virtio_gpu_ctrl_hdr req; 475 char pad; 476 struct virtio_gpu_resp_display_info resp; 477 } s = { 0 }; 478 int error; 479 480 s.req.type = htole32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO); 481 s.req.flags = htole32(VIRTIO_GPU_FLAG_FENCE); 482 s.req.fence_id = htole64(atomic_fetchadd_64(&sc->vtgpu_next_fence, 1)); 483 484 error = vtgpu_req_resp(sc, &s.req, sizeof(s.req), &s.resp, 485 sizeof(s.resp)); 486 if (error != 0) 487 return (error); 488 489 for (int i = 0; i < sc->vtgpu_gpucfg.num_scanouts; i++) { 490 if (s.resp.pmodes[i].enabled != 0) 491 MPASS(i == 0); 492 sc->vtgpu_fb_info.fb_name = 493 device_get_nameunit(sc->vtgpu_dev); 494 495 sc->vtgpu_fb_info.fb_width = 496 le32toh(s.resp.pmodes[i].r.width); 497 sc->vtgpu_fb_info.fb_height = 498 le32toh(s.resp.pmodes[i].r.height); 499 /* 32 bits per pixel */ 500 sc->vtgpu_fb_info.fb_bpp = 32; 501 sc->vtgpu_fb_info.fb_depth = 32; 502 sc->vtgpu_fb_info.fb_size = sc->vtgpu_fb_info.fb_width * 503 sc->vtgpu_fb_info.fb_height * 4; 504 sc->vtgpu_fb_info.fb_stride = 505 sc->vtgpu_fb_info.fb_width * 4; 506 return (0); 507 } 508 509 return (ENXIO); 510 } 511 512 static int 513 vtgpu_create_2d(struct vtgpu_softc *sc) 514 { 515 struct { 516 struct virtio_gpu_resource_create_2d req; 517 char pad; 518 struct virtio_gpu_ctrl_hdr resp; 519 } s = { 0 }; 520 int error; 521 522 s.req.hdr.type = htole32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D); 523 s.req.hdr.flags = htole32(VIRTIO_GPU_FLAG_FENCE); 524 s.req.hdr.fence_id = htole64( 525 atomic_fetchadd_64(&sc->vtgpu_next_fence, 1)); 526 527 s.req.resource_id = htole32(VTGPU_RESOURCE_ID); 528 s.req.format = htole32(VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM); 529 s.req.width = htole32(sc->vtgpu_fb_info.fb_width); 530 s.req.height = htole32(sc->vtgpu_fb_info.fb_height); 531 532 error = vtgpu_req_resp(sc, &s.req, sizeof(s.req), &s.resp, 533 sizeof(s.resp)); 534 if (error != 0) 535 return (error); 536 537 if (s.resp.type != htole32(VIRTIO_GPU_RESP_OK_NODATA)) { 538 device_printf(sc->vtgpu_dev, "Invalid reponse type %x\n", 539 le32toh(s.resp.type)); 540 return (EINVAL); 541 } 542 543 return (0); 544 } 545 546 static int 547 vtgpu_attach_backing(struct vtgpu_softc *sc) 548 { 549 struct { 550 struct { 551 struct virtio_gpu_resource_attach_backing backing; 552 struct virtio_gpu_mem_entry mem[1]; 553 } req; 554 char pad; 555 struct virtio_gpu_ctrl_hdr resp; 556 } s = { 0 }; 557 int error; 558 559 s.req.backing.hdr.type = 560 htole32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING); 561 s.req.backing.hdr.flags = htole32(VIRTIO_GPU_FLAG_FENCE); 562 s.req.backing.hdr.fence_id = htole64( 563 atomic_fetchadd_64(&sc->vtgpu_next_fence, 1)); 564 565 s.req.backing.resource_id = htole32(VTGPU_RESOURCE_ID); 566 s.req.backing.nr_entries = htole32(1); 567 568 s.req.mem[0].addr = htole64(sc->vtgpu_fb_info.fb_pbase); 569 s.req.mem[0].length = htole32(sc->vtgpu_fb_info.fb_size); 570 571 error = vtgpu_req_resp(sc, &s.req, sizeof(s.req), &s.resp, 572 sizeof(s.resp)); 573 if (error != 0) 574 return (error); 575 576 if (s.resp.type != htole32(VIRTIO_GPU_RESP_OK_NODATA)) { 577 device_printf(sc->vtgpu_dev, "Invalid reponse type %x\n", 578 le32toh(s.resp.type)); 579 return (EINVAL); 580 } 581 582 return (0); 583 } 584 585 static int 586 vtgpu_set_scanout(struct vtgpu_softc *sc, uint32_t x, uint32_t y, 587 uint32_t width, uint32_t height) 588 { 589 struct { 590 struct virtio_gpu_set_scanout req; 591 char pad; 592 struct virtio_gpu_ctrl_hdr resp; 593 } s = { 0 }; 594 int error; 595 596 s.req.hdr.type = htole32(VIRTIO_GPU_CMD_SET_SCANOUT); 597 s.req.hdr.flags = htole32(VIRTIO_GPU_FLAG_FENCE); 598 s.req.hdr.fence_id = htole64( 599 atomic_fetchadd_64(&sc->vtgpu_next_fence, 1)); 600 601 s.req.r.x = htole32(x); 602 s.req.r.y = htole32(y); 603 s.req.r.width = htole32(width); 604 s.req.r.height = htole32(height); 605 606 s.req.scanout_id = 0; 607 s.req.resource_id = htole32(VTGPU_RESOURCE_ID); 608 609 error = vtgpu_req_resp(sc, &s.req, sizeof(s.req), &s.resp, 610 sizeof(s.resp)); 611 if (error != 0) 612 return (error); 613 614 if (s.resp.type != htole32(VIRTIO_GPU_RESP_OK_NODATA)) { 615 device_printf(sc->vtgpu_dev, "Invalid reponse type %x\n", 616 le32toh(s.resp.type)); 617 return (EINVAL); 618 } 619 620 return (0); 621 } 622 623 static int 624 vtgpu_transfer_to_host_2d(struct vtgpu_softc *sc, uint32_t x, uint32_t y, 625 uint32_t width, uint32_t height) 626 { 627 struct { 628 struct virtio_gpu_transfer_to_host_2d req; 629 char pad; 630 struct virtio_gpu_ctrl_hdr resp; 631 } s = { 0 }; 632 int error; 633 634 s.req.hdr.type = htole32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D); 635 s.req.hdr.flags = htole32(VIRTIO_GPU_FLAG_FENCE); 636 s.req.hdr.fence_id = htole64( 637 atomic_fetchadd_64(&sc->vtgpu_next_fence, 1)); 638 639 s.req.r.x = htole32(x); 640 s.req.r.y = htole32(y); 641 s.req.r.width = htole32(width); 642 s.req.r.height = htole32(height); 643 644 s.req.offset = htole64((y * sc->vtgpu_fb_info.fb_width + x) 645 * (sc->vtgpu_fb_info.fb_bpp / 8)); 646 s.req.resource_id = htole32(VTGPU_RESOURCE_ID); 647 648 error = vtgpu_req_resp(sc, &s.req, sizeof(s.req), &s.resp, 649 sizeof(s.resp)); 650 if (error != 0) 651 return (error); 652 653 if (s.resp.type != htole32(VIRTIO_GPU_RESP_OK_NODATA)) { 654 device_printf(sc->vtgpu_dev, "Invalid reponse type %x\n", 655 le32toh(s.resp.type)); 656 return (EINVAL); 657 } 658 659 return (0); 660 } 661 662 static int 663 vtgpu_resource_flush(struct vtgpu_softc *sc, uint32_t x, uint32_t y, 664 uint32_t width, uint32_t height) 665 { 666 struct { 667 struct virtio_gpu_resource_flush req; 668 char pad; 669 struct virtio_gpu_ctrl_hdr resp; 670 } s = { 0 }; 671 int error; 672 673 s.req.hdr.type = htole32(VIRTIO_GPU_CMD_RESOURCE_FLUSH); 674 s.req.hdr.flags = htole32(VIRTIO_GPU_FLAG_FENCE); 675 s.req.hdr.fence_id = htole64( 676 atomic_fetchadd_64(&sc->vtgpu_next_fence, 1)); 677 678 s.req.r.x = htole32(x); 679 s.req.r.y = htole32(y); 680 s.req.r.width = htole32(width); 681 s.req.r.height = htole32(height); 682 683 s.req.resource_id = htole32(VTGPU_RESOURCE_ID); 684 685 error = vtgpu_req_resp(sc, &s.req, sizeof(s.req), &s.resp, 686 sizeof(s.resp)); 687 if (error != 0) 688 return (error); 689 690 if (s.resp.type != htole32(VIRTIO_GPU_RESP_OK_NODATA)) { 691 device_printf(sc->vtgpu_dev, "Invalid reponse type %x\n", 692 le32toh(s.resp.type)); 693 return (EINVAL); 694 } 695 696 return (0); 697 } 698