1 /*- 2 * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com> 3 * Copyright (c) 2014 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * This software was developed by SRI International and the University of 7 * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237) 8 * ("CTSRD"), as part of the DARPA CRASH research programme. 9 * 10 * Portions of this software were developed by Andrew Turner 11 * under sponsorship from the FreeBSD Foundation. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 /* 36 * VirtIO MMIO interface. 37 * This driver is heavily based on VirtIO PCI interface driver. 38 */ 39 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/bus.h> 46 #include <sys/kernel.h> 47 #include <sys/module.h> 48 #include <sys/malloc.h> 49 #include <sys/rman.h> 50 #include <sys/endian.h> 51 52 #include <machine/bus.h> 53 #include <machine/resource.h> 54 55 #include <dev/virtio/virtio.h> 56 #include <dev/virtio/virtqueue.h> 57 #include <dev/virtio/mmio/virtio_mmio.h> 58 59 #include "virtio_mmio_if.h" 60 #include "virtio_bus_if.h" 61 #include "virtio_if.h" 62 63 struct vtmmio_virtqueue { 64 struct virtqueue *vtv_vq; 65 int vtv_no_intr; 66 }; 67 68 static int vtmmio_detach(device_t); 69 static int vtmmio_suspend(device_t); 70 static int vtmmio_resume(device_t); 71 static int vtmmio_shutdown(device_t); 72 static void vtmmio_driver_added(device_t, driver_t *); 73 static void vtmmio_child_detached(device_t, device_t); 74 static int vtmmio_read_ivar(device_t, device_t, int, uintptr_t *); 75 static int vtmmio_write_ivar(device_t, device_t, int, uintptr_t); 76 static uint64_t vtmmio_negotiate_features(device_t, uint64_t); 77 static int vtmmio_with_feature(device_t, uint64_t); 78 static void vtmmio_set_virtqueue(struct vtmmio_softc *sc, 79 struct virtqueue *vq, uint32_t size); 80 static int vtmmio_alloc_virtqueues(device_t, int, int, 81 struct vq_alloc_info *); 82 static int vtmmio_setup_intr(device_t, enum intr_type); 83 static void vtmmio_stop(device_t); 84 static void vtmmio_poll(device_t); 85 static int vtmmio_reinit(device_t, uint64_t); 86 static void vtmmio_reinit_complete(device_t); 87 static void vtmmio_notify_virtqueue(device_t, uint16_t, bus_size_t); 88 static uint8_t vtmmio_get_status(device_t); 89 static void vtmmio_set_status(device_t, uint8_t); 90 static void vtmmio_read_dev_config(device_t, bus_size_t, void *, int); 91 static void vtmmio_write_dev_config(device_t, bus_size_t, void *, int); 92 static void vtmmio_describe_features(struct vtmmio_softc *, const char *, 93 uint64_t); 94 static void vtmmio_probe_and_attach_child(struct vtmmio_softc *); 95 static int vtmmio_reinit_virtqueue(struct vtmmio_softc *, int); 96 static void vtmmio_free_interrupts(struct vtmmio_softc *); 97 static void vtmmio_free_virtqueues(struct vtmmio_softc *); 98 static void vtmmio_release_child_resources(struct vtmmio_softc *); 99 static void vtmmio_reset(struct vtmmio_softc *); 100 static void vtmmio_select_virtqueue(struct vtmmio_softc *, int); 101 static void vtmmio_vq_intr(void *); 102 103 /* 104 * I/O port read/write wrappers. 105 */ 106 #define vtmmio_write_config_1(sc, o, v) \ 107 do { \ 108 if (sc->platform != NULL) \ 109 VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \ 110 bus_write_1((sc)->res[0], (o), (v)); \ 111 if (sc->platform != NULL) \ 112 VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \ 113 } while (0) 114 #define vtmmio_write_config_2(sc, o, v) \ 115 do { \ 116 if (sc->platform != NULL) \ 117 VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \ 118 bus_write_2((sc)->res[0], (o), (v)); \ 119 if (sc->platform != NULL) \ 120 VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \ 121 } while (0) 122 #define vtmmio_write_config_4(sc, o, v) \ 123 do { \ 124 if (sc->platform != NULL) \ 125 VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \ 126 bus_write_4((sc)->res[0], (o), (v)); \ 127 if (sc->platform != NULL) \ 128 VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \ 129 } while (0) 130 131 #define vtmmio_read_config_1(sc, o) \ 132 bus_read_1((sc)->res[0], (o)) 133 #define vtmmio_read_config_2(sc, o) \ 134 bus_read_2((sc)->res[0], (o)) 135 #define vtmmio_read_config_4(sc, o) \ 136 bus_read_4((sc)->res[0], (o)) 137 138 static device_method_t vtmmio_methods[] = { 139 /* Device interface. */ 140 DEVMETHOD(device_attach, vtmmio_attach), 141 DEVMETHOD(device_detach, vtmmio_detach), 142 DEVMETHOD(device_suspend, vtmmio_suspend), 143 DEVMETHOD(device_resume, vtmmio_resume), 144 DEVMETHOD(device_shutdown, vtmmio_shutdown), 145 146 /* Bus interface. */ 147 DEVMETHOD(bus_driver_added, vtmmio_driver_added), 148 DEVMETHOD(bus_child_detached, vtmmio_child_detached), 149 DEVMETHOD(bus_child_pnpinfo, virtio_child_pnpinfo), 150 DEVMETHOD(bus_read_ivar, vtmmio_read_ivar), 151 DEVMETHOD(bus_write_ivar, vtmmio_write_ivar), 152 153 /* VirtIO bus interface. */ 154 DEVMETHOD(virtio_bus_negotiate_features, vtmmio_negotiate_features), 155 DEVMETHOD(virtio_bus_with_feature, vtmmio_with_feature), 156 DEVMETHOD(virtio_bus_alloc_virtqueues, vtmmio_alloc_virtqueues), 157 DEVMETHOD(virtio_bus_setup_intr, vtmmio_setup_intr), 158 DEVMETHOD(virtio_bus_stop, vtmmio_stop), 159 DEVMETHOD(virtio_bus_poll, vtmmio_poll), 160 DEVMETHOD(virtio_bus_reinit, vtmmio_reinit), 161 DEVMETHOD(virtio_bus_reinit_complete, vtmmio_reinit_complete), 162 DEVMETHOD(virtio_bus_notify_vq, vtmmio_notify_virtqueue), 163 DEVMETHOD(virtio_bus_read_device_config, vtmmio_read_dev_config), 164 DEVMETHOD(virtio_bus_write_device_config, vtmmio_write_dev_config), 165 166 DEVMETHOD_END 167 }; 168 169 DEFINE_CLASS_0(virtio_mmio, vtmmio_driver, vtmmio_methods, 170 sizeof(struct vtmmio_softc)); 171 172 MODULE_VERSION(virtio_mmio, 1); 173 174 int 175 vtmmio_probe(device_t dev) 176 { 177 struct vtmmio_softc *sc; 178 int rid; 179 uint32_t magic, version; 180 181 sc = device_get_softc(dev); 182 183 rid = 0; 184 sc->res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 185 RF_ACTIVE); 186 if (sc->res[0] == NULL) { 187 device_printf(dev, "Cannot allocate memory window.\n"); 188 return (ENXIO); 189 } 190 191 magic = vtmmio_read_config_4(sc, VIRTIO_MMIO_MAGIC_VALUE); 192 if (magic != VIRTIO_MMIO_MAGIC_VIRT) { 193 device_printf(dev, "Bad magic value %#x\n", magic); 194 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res[0]); 195 return (ENXIO); 196 } 197 198 version = vtmmio_read_config_4(sc, VIRTIO_MMIO_VERSION); 199 if (version < 1 || version > 2) { 200 device_printf(dev, "Unsupported version: %#x\n", version); 201 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res[0]); 202 return (ENXIO); 203 } 204 205 if (vtmmio_read_config_4(sc, VIRTIO_MMIO_DEVICE_ID) == 0) { 206 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res[0]); 207 return (ENXIO); 208 } 209 210 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res[0]); 211 212 device_set_desc(dev, "VirtIO MMIO adapter"); 213 return (BUS_PROBE_DEFAULT); 214 } 215 216 static int 217 vtmmio_setup_intr(device_t dev, enum intr_type type) 218 { 219 struct vtmmio_softc *sc; 220 int rid; 221 int err; 222 223 sc = device_get_softc(dev); 224 225 if (sc->platform != NULL) { 226 err = VIRTIO_MMIO_SETUP_INTR(sc->platform, sc->dev, 227 vtmmio_vq_intr, sc); 228 if (err == 0) { 229 /* Okay we have backend-specific interrupts */ 230 return (0); 231 } 232 } 233 234 rid = 0; 235 sc->res[1] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 236 RF_ACTIVE); 237 if (!sc->res[1]) { 238 device_printf(dev, "Can't allocate interrupt\n"); 239 return (ENXIO); 240 } 241 242 if (bus_setup_intr(dev, sc->res[1], type | INTR_MPSAFE, 243 NULL, vtmmio_vq_intr, sc, &sc->ih)) { 244 device_printf(dev, "Can't setup the interrupt\n"); 245 return (ENXIO); 246 } 247 248 return (0); 249 } 250 251 int 252 vtmmio_attach(device_t dev) 253 { 254 struct vtmmio_softc *sc; 255 device_t child; 256 int rid; 257 258 sc = device_get_softc(dev); 259 sc->dev = dev; 260 261 rid = 0; 262 sc->res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 263 RF_ACTIVE); 264 if (sc->res[0] == NULL) { 265 device_printf(dev, "Cannot allocate memory window.\n"); 266 return (ENXIO); 267 } 268 269 sc->vtmmio_version = vtmmio_read_config_4(sc, VIRTIO_MMIO_VERSION); 270 271 vtmmio_reset(sc); 272 273 /* Tell the host we've noticed this device. */ 274 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); 275 276 if ((child = device_add_child(dev, NULL, -1)) == NULL) { 277 device_printf(dev, "Cannot create child device.\n"); 278 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED); 279 vtmmio_detach(dev); 280 return (ENOMEM); 281 } 282 283 sc->vtmmio_child_dev = child; 284 vtmmio_probe_and_attach_child(sc); 285 286 return (0); 287 } 288 289 static int 290 vtmmio_detach(device_t dev) 291 { 292 struct vtmmio_softc *sc; 293 device_t child; 294 int error; 295 296 sc = device_get_softc(dev); 297 298 if ((child = sc->vtmmio_child_dev) != NULL) { 299 error = device_delete_child(dev, child); 300 if (error) 301 return (error); 302 sc->vtmmio_child_dev = NULL; 303 } 304 305 vtmmio_reset(sc); 306 307 if (sc->res[0] != NULL) { 308 bus_release_resource(dev, SYS_RES_MEMORY, 0, 309 sc->res[0]); 310 sc->res[0] = NULL; 311 } 312 313 return (0); 314 } 315 316 static int 317 vtmmio_suspend(device_t dev) 318 { 319 320 return (bus_generic_suspend(dev)); 321 } 322 323 static int 324 vtmmio_resume(device_t dev) 325 { 326 327 return (bus_generic_resume(dev)); 328 } 329 330 static int 331 vtmmio_shutdown(device_t dev) 332 { 333 334 (void) bus_generic_shutdown(dev); 335 336 /* Forcibly stop the host device. */ 337 vtmmio_stop(dev); 338 339 return (0); 340 } 341 342 static void 343 vtmmio_driver_added(device_t dev, driver_t *driver) 344 { 345 struct vtmmio_softc *sc; 346 347 sc = device_get_softc(dev); 348 349 vtmmio_probe_and_attach_child(sc); 350 } 351 352 static void 353 vtmmio_child_detached(device_t dev, device_t child) 354 { 355 struct vtmmio_softc *sc; 356 357 sc = device_get_softc(dev); 358 359 vtmmio_reset(sc); 360 vtmmio_release_child_resources(sc); 361 } 362 363 static int 364 vtmmio_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) 365 { 366 struct vtmmio_softc *sc; 367 368 sc = device_get_softc(dev); 369 370 if (sc->vtmmio_child_dev != child) 371 return (ENOENT); 372 373 switch (index) { 374 case VIRTIO_IVAR_DEVTYPE: 375 case VIRTIO_IVAR_SUBDEVICE: 376 *result = vtmmio_read_config_4(sc, VIRTIO_MMIO_DEVICE_ID); 377 break; 378 case VIRTIO_IVAR_VENDOR: 379 *result = vtmmio_read_config_4(sc, VIRTIO_MMIO_VENDOR_ID); 380 break; 381 case VIRTIO_IVAR_SUBVENDOR: 382 case VIRTIO_IVAR_DEVICE: 383 /* 384 * Dummy value for fields not present in this bus. Used by 385 * bus-agnostic virtio_child_pnpinfo. 386 */ 387 *result = 0; 388 break; 389 case VIRTIO_IVAR_MODERN: 390 /* 391 * There are several modern (aka MMIO v2) spec compliance 392 * issues with this driver, but keep the status quo. 393 */ 394 *result = sc->vtmmio_version > 1; 395 break; 396 default: 397 return (ENOENT); 398 } 399 400 return (0); 401 } 402 403 static int 404 vtmmio_write_ivar(device_t dev, device_t child, int index, uintptr_t value) 405 { 406 struct vtmmio_softc *sc; 407 408 sc = device_get_softc(dev); 409 410 if (sc->vtmmio_child_dev != child) 411 return (ENOENT); 412 413 switch (index) { 414 case VIRTIO_IVAR_FEATURE_DESC: 415 sc->vtmmio_child_feat_desc = (void *) value; 416 break; 417 default: 418 return (ENOENT); 419 } 420 421 return (0); 422 } 423 424 static uint64_t 425 vtmmio_negotiate_features(device_t dev, uint64_t child_features) 426 { 427 struct vtmmio_softc *sc; 428 uint64_t host_features, features; 429 430 sc = device_get_softc(dev); 431 432 if (sc->vtmmio_version > 1) { 433 child_features |= VIRTIO_F_VERSION_1; 434 } 435 436 vtmmio_write_config_4(sc, VIRTIO_MMIO_HOST_FEATURES_SEL, 1); 437 host_features = vtmmio_read_config_4(sc, VIRTIO_MMIO_HOST_FEATURES); 438 host_features <<= 32; 439 440 vtmmio_write_config_4(sc, VIRTIO_MMIO_HOST_FEATURES_SEL, 0); 441 host_features |= vtmmio_read_config_4(sc, VIRTIO_MMIO_HOST_FEATURES); 442 443 vtmmio_describe_features(sc, "host", host_features); 444 445 /* 446 * Limit negotiated features to what the driver, virtqueue, and 447 * host all support. 448 */ 449 features = host_features & child_features; 450 features = virtio_filter_transport_features(features); 451 sc->vtmmio_features = features; 452 453 vtmmio_describe_features(sc, "negotiated", features); 454 455 vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES_SEL, 1); 456 vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES, features >> 32); 457 458 vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES_SEL, 0); 459 vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES, features); 460 461 return (features); 462 } 463 464 static int 465 vtmmio_with_feature(device_t dev, uint64_t feature) 466 { 467 struct vtmmio_softc *sc; 468 469 sc = device_get_softc(dev); 470 471 return ((sc->vtmmio_features & feature) != 0); 472 } 473 474 static void 475 vtmmio_set_virtqueue(struct vtmmio_softc *sc, struct virtqueue *vq, 476 uint32_t size) 477 { 478 vm_paddr_t paddr; 479 480 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_NUM, size); 481 482 if (sc->vtmmio_version == 1) { 483 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_ALIGN, 484 VIRTIO_MMIO_VRING_ALIGN); 485 paddr = virtqueue_paddr(vq); 486 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_PFN, 487 paddr >> PAGE_SHIFT); 488 } else { 489 paddr = virtqueue_desc_paddr(vq); 490 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_DESC_LOW, 491 paddr); 492 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_DESC_HIGH, 493 ((uint64_t)paddr) >> 32); 494 495 paddr = virtqueue_avail_paddr(vq); 496 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_AVAIL_LOW, 497 paddr); 498 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_AVAIL_HIGH, 499 ((uint64_t)paddr) >> 32); 500 501 paddr = virtqueue_used_paddr(vq); 502 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_USED_LOW, 503 paddr); 504 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_USED_HIGH, 505 ((uint64_t)paddr) >> 32); 506 507 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_READY, 1); 508 } 509 } 510 511 static int 512 vtmmio_alloc_virtqueues(device_t dev, int flags, int nvqs, 513 struct vq_alloc_info *vq_info) 514 { 515 struct vtmmio_virtqueue *vqx; 516 struct vq_alloc_info *info; 517 struct vtmmio_softc *sc; 518 struct virtqueue *vq; 519 uint32_t size; 520 int idx, error; 521 522 sc = device_get_softc(dev); 523 524 if (sc->vtmmio_nvqs != 0) 525 return (EALREADY); 526 if (nvqs <= 0) 527 return (EINVAL); 528 529 sc->vtmmio_vqs = malloc(nvqs * sizeof(struct vtmmio_virtqueue), 530 M_DEVBUF, M_NOWAIT | M_ZERO); 531 if (sc->vtmmio_vqs == NULL) 532 return (ENOMEM); 533 534 if (sc->vtmmio_version == 1) { 535 vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_PAGE_SIZE, 536 (1 << PAGE_SHIFT)); 537 } 538 539 for (idx = 0; idx < nvqs; idx++) { 540 vqx = &sc->vtmmio_vqs[idx]; 541 info = &vq_info[idx]; 542 543 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_SEL, idx); 544 545 vtmmio_select_virtqueue(sc, idx); 546 size = vtmmio_read_config_4(sc, VIRTIO_MMIO_QUEUE_NUM_MAX); 547 548 error = virtqueue_alloc(dev, idx, size, 549 VIRTIO_MMIO_QUEUE_NOTIFY, VIRTIO_MMIO_VRING_ALIGN, 550 ~(vm_paddr_t)0, info, &vq); 551 if (error) { 552 device_printf(dev, 553 "cannot allocate virtqueue %d: %d\n", 554 idx, error); 555 break; 556 } 557 558 vtmmio_set_virtqueue(sc, vq, size); 559 560 vqx->vtv_vq = *info->vqai_vq = vq; 561 vqx->vtv_no_intr = info->vqai_intr == NULL; 562 563 sc->vtmmio_nvqs++; 564 } 565 566 if (error) 567 vtmmio_free_virtqueues(sc); 568 569 return (error); 570 } 571 572 static void 573 vtmmio_stop(device_t dev) 574 { 575 576 vtmmio_reset(device_get_softc(dev)); 577 } 578 579 static void 580 vtmmio_poll(device_t dev) 581 { 582 struct vtmmio_softc *sc; 583 584 sc = device_get_softc(dev); 585 586 if (sc->platform != NULL) 587 VIRTIO_MMIO_POLL(sc->platform); 588 } 589 590 static int 591 vtmmio_reinit(device_t dev, uint64_t features) 592 { 593 struct vtmmio_softc *sc; 594 int idx, error; 595 596 sc = device_get_softc(dev); 597 598 if (vtmmio_get_status(dev) != VIRTIO_CONFIG_STATUS_RESET) 599 vtmmio_stop(dev); 600 601 /* 602 * Quickly drive the status through ACK and DRIVER. The device 603 * does not become usable again until vtmmio_reinit_complete(). 604 */ 605 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); 606 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER); 607 608 vtmmio_negotiate_features(dev, features); 609 610 if (sc->vtmmio_version == 1) { 611 vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_PAGE_SIZE, 612 (1 << PAGE_SHIFT)); 613 } 614 615 for (idx = 0; idx < sc->vtmmio_nvqs; idx++) { 616 error = vtmmio_reinit_virtqueue(sc, idx); 617 if (error) 618 return (error); 619 } 620 621 return (0); 622 } 623 624 static void 625 vtmmio_reinit_complete(device_t dev) 626 { 627 628 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK); 629 } 630 631 static void 632 vtmmio_notify_virtqueue(device_t dev, uint16_t queue, bus_size_t offset) 633 { 634 struct vtmmio_softc *sc; 635 636 sc = device_get_softc(dev); 637 MPASS(offset == VIRTIO_MMIO_QUEUE_NOTIFY); 638 639 vtmmio_write_config_4(sc, offset, queue); 640 } 641 642 static uint8_t 643 vtmmio_get_status(device_t dev) 644 { 645 struct vtmmio_softc *sc; 646 647 sc = device_get_softc(dev); 648 649 return (vtmmio_read_config_4(sc, VIRTIO_MMIO_STATUS)); 650 } 651 652 static void 653 vtmmio_set_status(device_t dev, uint8_t status) 654 { 655 struct vtmmio_softc *sc; 656 657 sc = device_get_softc(dev); 658 659 if (status != VIRTIO_CONFIG_STATUS_RESET) 660 status |= vtmmio_get_status(dev); 661 662 vtmmio_write_config_4(sc, VIRTIO_MMIO_STATUS, status); 663 } 664 665 static void 666 vtmmio_read_dev_config(device_t dev, bus_size_t offset, 667 void *dst, int length) 668 { 669 struct vtmmio_softc *sc; 670 bus_size_t off; 671 uint8_t *d; 672 int size; 673 uint64_t low32, high32; 674 675 sc = device_get_softc(dev); 676 off = VIRTIO_MMIO_CONFIG + offset; 677 678 /* 679 * The non-legacy MMIO specification adds the following restriction: 680 * 681 * 4.2.2.2: For the device-specific configuration space, the driver 682 * MUST use 8 bit wide accesses for 8 bit wide fields, 16 bit wide 683 * and aligned accesses for 16 bit wide fields and 32 bit wide and 684 * aligned accesses for 32 and 64 bit wide fields. 685 * 686 * The endianness also varies between non-legacy and legacy: 687 * 688 * 2.4: Note: The device configuration space uses the little-endian 689 * format for multi-byte fields. 690 * 691 * 2.4.3: Note that for legacy interfaces, device configuration space 692 * is generally the guest’s native endian, rather than PCI’s 693 * little-endian. The correct endian-ness is documented for each 694 * device. 695 */ 696 if (sc->vtmmio_version > 1) { 697 switch (length) { 698 case 1: 699 *(uint8_t *)dst = vtmmio_read_config_1(sc, off); 700 break; 701 case 2: 702 *(uint16_t *)dst = 703 le16toh(vtmmio_read_config_2(sc, off)); 704 break; 705 case 4: 706 *(uint32_t *)dst = 707 le32toh(vtmmio_read_config_4(sc, off)); 708 break; 709 case 8: 710 low32 = le32toh(vtmmio_read_config_4(sc, off)); 711 high32 = le32toh(vtmmio_read_config_4(sc, off + 4)); 712 *(uint64_t *)dst = (high32 << 32) | low32; 713 break; 714 default: 715 panic("%s: invalid length %d\n", __func__, length); 716 } 717 718 return; 719 } 720 721 for (d = dst; length > 0; d += size, off += size, length -= size) { 722 #ifdef ALLOW_WORD_ALIGNED_ACCESS 723 if (length >= 4) { 724 size = 4; 725 *(uint32_t *)d = vtmmio_read_config_4(sc, off); 726 } else if (length >= 2) { 727 size = 2; 728 *(uint16_t *)d = vtmmio_read_config_2(sc, off); 729 } else 730 #endif 731 { 732 size = 1; 733 *d = vtmmio_read_config_1(sc, off); 734 } 735 } 736 } 737 738 static void 739 vtmmio_write_dev_config(device_t dev, bus_size_t offset, 740 void *src, int length) 741 { 742 struct vtmmio_softc *sc; 743 bus_size_t off; 744 uint8_t *s; 745 int size; 746 747 sc = device_get_softc(dev); 748 off = VIRTIO_MMIO_CONFIG + offset; 749 750 /* 751 * The non-legacy MMIO specification adds size and alignment 752 * restrctions. It also changes the endianness from native-endian to 753 * little-endian. See vtmmio_read_dev_config. 754 */ 755 if (sc->vtmmio_version > 1) { 756 switch (length) { 757 case 1: 758 vtmmio_write_config_1(sc, off, *(uint8_t *)src); 759 break; 760 case 2: 761 vtmmio_write_config_2(sc, off, 762 htole16(*(uint16_t *)src)); 763 break; 764 case 4: 765 vtmmio_write_config_4(sc, off, 766 htole32(*(uint32_t *)src)); 767 break; 768 case 8: 769 vtmmio_write_config_4(sc, off, 770 htole32(*(uint64_t *)src)); 771 vtmmio_write_config_4(sc, off + 4, 772 htole32((*(uint64_t *)src) >> 32)); 773 break; 774 default: 775 panic("%s: invalid length %d\n", __func__, length); 776 } 777 778 return; 779 } 780 781 for (s = src; length > 0; s += size, off += size, length -= size) { 782 #ifdef ALLOW_WORD_ALIGNED_ACCESS 783 if (length >= 4) { 784 size = 4; 785 vtmmio_write_config_4(sc, off, *(uint32_t *)s); 786 } else if (length >= 2) { 787 size = 2; 788 vtmmio_write_config_2(sc, off, *(uint16_t *)s); 789 } else 790 #endif 791 { 792 size = 1; 793 vtmmio_write_config_1(sc, off, *s); 794 } 795 } 796 } 797 798 static void 799 vtmmio_describe_features(struct vtmmio_softc *sc, const char *msg, 800 uint64_t features) 801 { 802 device_t dev, child; 803 804 dev = sc->dev; 805 child = sc->vtmmio_child_dev; 806 807 if (device_is_attached(child) || bootverbose == 0) 808 return; 809 810 virtio_describe(dev, msg, features, sc->vtmmio_child_feat_desc); 811 } 812 813 static void 814 vtmmio_probe_and_attach_child(struct vtmmio_softc *sc) 815 { 816 device_t dev, child; 817 818 dev = sc->dev; 819 child = sc->vtmmio_child_dev; 820 821 if (child == NULL) 822 return; 823 824 if (device_get_state(child) != DS_NOTPRESENT) { 825 return; 826 } 827 828 if (device_probe(child) != 0) { 829 return; 830 } 831 832 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER); 833 if (device_attach(child) != 0) { 834 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED); 835 vtmmio_reset(sc); 836 vtmmio_release_child_resources(sc); 837 /* Reset status for future attempt. */ 838 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); 839 } else { 840 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK); 841 VIRTIO_ATTACH_COMPLETED(child); 842 } 843 } 844 845 static int 846 vtmmio_reinit_virtqueue(struct vtmmio_softc *sc, int idx) 847 { 848 struct vtmmio_virtqueue *vqx; 849 struct virtqueue *vq; 850 int error; 851 uint16_t size; 852 853 vqx = &sc->vtmmio_vqs[idx]; 854 vq = vqx->vtv_vq; 855 856 KASSERT(vq != NULL, ("%s: vq %d not allocated", __func__, idx)); 857 858 vtmmio_select_virtqueue(sc, idx); 859 size = vtmmio_read_config_4(sc, VIRTIO_MMIO_QUEUE_NUM_MAX); 860 861 error = virtqueue_reinit(vq, size); 862 if (error) 863 return (error); 864 865 vtmmio_set_virtqueue(sc, vq, size); 866 867 return (0); 868 } 869 870 static void 871 vtmmio_free_interrupts(struct vtmmio_softc *sc) 872 { 873 874 if (sc->ih != NULL) 875 bus_teardown_intr(sc->dev, sc->res[1], sc->ih); 876 877 if (sc->res[1] != NULL) 878 bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->res[1]); 879 } 880 881 static void 882 vtmmio_free_virtqueues(struct vtmmio_softc *sc) 883 { 884 struct vtmmio_virtqueue *vqx; 885 int idx; 886 887 for (idx = 0; idx < sc->vtmmio_nvqs; idx++) { 888 vqx = &sc->vtmmio_vqs[idx]; 889 890 vtmmio_select_virtqueue(sc, idx); 891 if (sc->vtmmio_version == 1) 892 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_PFN, 0); 893 else 894 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_READY, 0); 895 896 virtqueue_free(vqx->vtv_vq); 897 vqx->vtv_vq = NULL; 898 } 899 900 free(sc->vtmmio_vqs, M_DEVBUF); 901 sc->vtmmio_vqs = NULL; 902 sc->vtmmio_nvqs = 0; 903 } 904 905 static void 906 vtmmio_release_child_resources(struct vtmmio_softc *sc) 907 { 908 909 vtmmio_free_interrupts(sc); 910 vtmmio_free_virtqueues(sc); 911 } 912 913 static void 914 vtmmio_reset(struct vtmmio_softc *sc) 915 { 916 917 /* 918 * Setting the status to RESET sets the host device to 919 * the original, uninitialized state. 920 */ 921 vtmmio_set_status(sc->dev, VIRTIO_CONFIG_STATUS_RESET); 922 } 923 924 static void 925 vtmmio_select_virtqueue(struct vtmmio_softc *sc, int idx) 926 { 927 928 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_SEL, idx); 929 } 930 931 static void 932 vtmmio_vq_intr(void *arg) 933 { 934 struct vtmmio_virtqueue *vqx; 935 struct vtmmio_softc *sc; 936 struct virtqueue *vq; 937 uint32_t status; 938 int idx; 939 940 sc = arg; 941 942 status = vtmmio_read_config_4(sc, VIRTIO_MMIO_INTERRUPT_STATUS); 943 vtmmio_write_config_4(sc, VIRTIO_MMIO_INTERRUPT_ACK, status); 944 945 /* The config changed */ 946 if (status & VIRTIO_MMIO_INT_CONFIG) 947 if (sc->vtmmio_child_dev != NULL) 948 VIRTIO_CONFIG_CHANGE(sc->vtmmio_child_dev); 949 950 /* Notify all virtqueues. */ 951 if (status & VIRTIO_MMIO_INT_VRING) { 952 for (idx = 0; idx < sc->vtmmio_nvqs; idx++) { 953 vqx = &sc->vtmmio_vqs[idx]; 954 if (vqx->vtv_no_intr == 0) { 955 vq = vqx->vtv_vq; 956 virtqueue_intr(vq); 957 } 958 } 959 } 960 } 961