1 /*- 2 * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com> 3 * Copyright (c) 2014 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * This software was developed by SRI International and the University of 7 * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237) 8 * ("CTSRD"), as part of the DARPA CRASH research programme. 9 * 10 * Portions of this software were developed by Andrew Turner 11 * under sponsorship from the FreeBSD Foundation. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 /* 36 * VirtIO MMIO interface. 37 * This driver is heavily based on VirtIO PCI interface driver. 38 */ 39 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/bus.h> 46 #include <sys/kernel.h> 47 #include <sys/module.h> 48 #include <sys/malloc.h> 49 #include <sys/rman.h> 50 #include <sys/endian.h> 51 52 #include <machine/bus.h> 53 #include <machine/resource.h> 54 55 #include <dev/virtio/virtio.h> 56 #include <dev/virtio/virtqueue.h> 57 #include <dev/virtio/mmio/virtio_mmio.h> 58 59 #include "virtio_mmio_if.h" 60 #include "virtio_bus_if.h" 61 #include "virtio_if.h" 62 63 struct vtmmio_virtqueue { 64 struct virtqueue *vtv_vq; 65 int vtv_no_intr; 66 }; 67 68 static int vtmmio_detach(device_t); 69 static int vtmmio_suspend(device_t); 70 static int vtmmio_resume(device_t); 71 static int vtmmio_shutdown(device_t); 72 static void vtmmio_driver_added(device_t, driver_t *); 73 static void vtmmio_child_detached(device_t, device_t); 74 static int vtmmio_read_ivar(device_t, device_t, int, uintptr_t *); 75 static int vtmmio_write_ivar(device_t, device_t, int, uintptr_t); 76 static uint64_t vtmmio_negotiate_features(device_t, uint64_t); 77 static int vtmmio_with_feature(device_t, uint64_t); 78 static void vtmmio_set_virtqueue(struct vtmmio_softc *sc, 79 struct virtqueue *vq, uint32_t size); 80 static int vtmmio_alloc_virtqueues(device_t, int, int, 81 struct vq_alloc_info *); 82 static int vtmmio_setup_intr(device_t, enum intr_type); 83 static void vtmmio_stop(device_t); 84 static void vtmmio_poll(device_t); 85 static int vtmmio_reinit(device_t, uint64_t); 86 static void vtmmio_reinit_complete(device_t); 87 static void vtmmio_notify_virtqueue(device_t, uint16_t); 88 static uint8_t vtmmio_get_status(device_t); 89 static void vtmmio_set_status(device_t, uint8_t); 90 static void vtmmio_read_dev_config(device_t, bus_size_t, void *, int); 91 static void vtmmio_write_dev_config(device_t, bus_size_t, void *, int); 92 static void vtmmio_describe_features(struct vtmmio_softc *, const char *, 93 uint64_t); 94 static void vtmmio_probe_and_attach_child(struct vtmmio_softc *); 95 static int vtmmio_reinit_virtqueue(struct vtmmio_softc *, int); 96 static void vtmmio_free_interrupts(struct vtmmio_softc *); 97 static void vtmmio_free_virtqueues(struct vtmmio_softc *); 98 static void vtmmio_release_child_resources(struct vtmmio_softc *); 99 static void vtmmio_reset(struct vtmmio_softc *); 100 static void vtmmio_select_virtqueue(struct vtmmio_softc *, int); 101 static void vtmmio_vq_intr(void *); 102 103 /* 104 * I/O port read/write wrappers. 105 */ 106 #define vtmmio_write_config_1(sc, o, v) \ 107 do { \ 108 if (sc->platform != NULL) \ 109 VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \ 110 bus_write_1((sc)->res[0], (o), (v)); \ 111 if (sc->platform != NULL) \ 112 VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \ 113 } while (0) 114 #define vtmmio_write_config_2(sc, o, v) \ 115 do { \ 116 if (sc->platform != NULL) \ 117 VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \ 118 bus_write_2((sc)->res[0], (o), (v)); \ 119 if (sc->platform != NULL) \ 120 VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \ 121 } while (0) 122 #define vtmmio_write_config_4(sc, o, v) \ 123 do { \ 124 if (sc->platform != NULL) \ 125 VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \ 126 bus_write_4((sc)->res[0], (o), (v)); \ 127 if (sc->platform != NULL) \ 128 VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \ 129 } while (0) 130 131 #define vtmmio_read_config_1(sc, o) \ 132 bus_read_1((sc)->res[0], (o)) 133 #define vtmmio_read_config_2(sc, o) \ 134 bus_read_2((sc)->res[0], (o)) 135 #define vtmmio_read_config_4(sc, o) \ 136 bus_read_4((sc)->res[0], (o)) 137 138 static device_method_t vtmmio_methods[] = { 139 /* Device interface. */ 140 DEVMETHOD(device_attach, vtmmio_attach), 141 DEVMETHOD(device_detach, vtmmio_detach), 142 DEVMETHOD(device_suspend, vtmmio_suspend), 143 DEVMETHOD(device_resume, vtmmio_resume), 144 DEVMETHOD(device_shutdown, vtmmio_shutdown), 145 146 /* Bus interface. */ 147 DEVMETHOD(bus_driver_added, vtmmio_driver_added), 148 DEVMETHOD(bus_child_detached, vtmmio_child_detached), 149 DEVMETHOD(bus_child_pnpinfo_str, virtio_child_pnpinfo_str), 150 DEVMETHOD(bus_read_ivar, vtmmio_read_ivar), 151 DEVMETHOD(bus_write_ivar, vtmmio_write_ivar), 152 153 /* VirtIO bus interface. */ 154 DEVMETHOD(virtio_bus_negotiate_features, vtmmio_negotiate_features), 155 DEVMETHOD(virtio_bus_with_feature, vtmmio_with_feature), 156 DEVMETHOD(virtio_bus_alloc_virtqueues, vtmmio_alloc_virtqueues), 157 DEVMETHOD(virtio_bus_setup_intr, vtmmio_setup_intr), 158 DEVMETHOD(virtio_bus_stop, vtmmio_stop), 159 DEVMETHOD(virtio_bus_poll, vtmmio_poll), 160 DEVMETHOD(virtio_bus_reinit, vtmmio_reinit), 161 DEVMETHOD(virtio_bus_reinit_complete, vtmmio_reinit_complete), 162 DEVMETHOD(virtio_bus_notify_vq, vtmmio_notify_virtqueue), 163 DEVMETHOD(virtio_bus_read_device_config, vtmmio_read_dev_config), 164 DEVMETHOD(virtio_bus_write_device_config, vtmmio_write_dev_config), 165 166 DEVMETHOD_END 167 }; 168 169 DEFINE_CLASS_0(virtio_mmio, vtmmio_driver, vtmmio_methods, 170 sizeof(struct vtmmio_softc)); 171 172 MODULE_VERSION(virtio_mmio, 1); 173 174 static int 175 vtmmio_setup_intr(device_t dev, enum intr_type type) 176 { 177 struct vtmmio_softc *sc; 178 int rid; 179 int err; 180 181 sc = device_get_softc(dev); 182 183 if (sc->platform != NULL) { 184 err = VIRTIO_MMIO_SETUP_INTR(sc->platform, sc->dev, 185 vtmmio_vq_intr, sc); 186 if (err == 0) { 187 /* Okay we have backend-specific interrupts */ 188 return (0); 189 } 190 } 191 192 rid = 0; 193 sc->res[1] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 194 RF_ACTIVE); 195 if (!sc->res[1]) { 196 device_printf(dev, "Can't allocate interrupt\n"); 197 return (ENXIO); 198 } 199 200 if (bus_setup_intr(dev, sc->res[1], type | INTR_MPSAFE, 201 NULL, vtmmio_vq_intr, sc, &sc->ih)) { 202 device_printf(dev, "Can't setup the interrupt\n"); 203 return (ENXIO); 204 } 205 206 return (0); 207 } 208 209 int 210 vtmmio_attach(device_t dev) 211 { 212 struct vtmmio_softc *sc; 213 device_t child; 214 int rid; 215 216 sc = device_get_softc(dev); 217 sc->dev = dev; 218 219 rid = 0; 220 sc->res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 221 RF_ACTIVE); 222 if (!sc->res[0]) { 223 device_printf(dev, "Cannot allocate memory window.\n"); 224 return (ENXIO); 225 } 226 227 sc->vtmmio_version = vtmmio_read_config_4(sc, VIRTIO_MMIO_VERSION); 228 if (sc->vtmmio_version < 1 || sc->vtmmio_version > 2) { 229 device_printf(dev, "Unsupported version: %x\n", 230 sc->vtmmio_version); 231 bus_release_resource(dev, SYS_RES_MEMORY, 0, 232 sc->res[0]); 233 sc->res[0] = NULL; 234 return (ENXIO); 235 } 236 237 vtmmio_reset(sc); 238 239 /* Tell the host we've noticed this device. */ 240 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); 241 242 if ((child = device_add_child(dev, NULL, -1)) == NULL) { 243 device_printf(dev, "Cannot create child device.\n"); 244 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED); 245 vtmmio_detach(dev); 246 return (ENOMEM); 247 } 248 249 sc->vtmmio_child_dev = child; 250 vtmmio_probe_and_attach_child(sc); 251 252 return (0); 253 } 254 255 static int 256 vtmmio_detach(device_t dev) 257 { 258 struct vtmmio_softc *sc; 259 device_t child; 260 int error; 261 262 sc = device_get_softc(dev); 263 264 if ((child = sc->vtmmio_child_dev) != NULL) { 265 error = device_delete_child(dev, child); 266 if (error) 267 return (error); 268 sc->vtmmio_child_dev = NULL; 269 } 270 271 vtmmio_reset(sc); 272 273 if (sc->res[0] != NULL) { 274 bus_release_resource(dev, SYS_RES_MEMORY, 0, 275 sc->res[0]); 276 sc->res[0] = NULL; 277 } 278 279 return (0); 280 } 281 282 static int 283 vtmmio_suspend(device_t dev) 284 { 285 286 return (bus_generic_suspend(dev)); 287 } 288 289 static int 290 vtmmio_resume(device_t dev) 291 { 292 293 return (bus_generic_resume(dev)); 294 } 295 296 static int 297 vtmmio_shutdown(device_t dev) 298 { 299 300 (void) bus_generic_shutdown(dev); 301 302 /* Forcibly stop the host device. */ 303 vtmmio_stop(dev); 304 305 return (0); 306 } 307 308 static void 309 vtmmio_driver_added(device_t dev, driver_t *driver) 310 { 311 struct vtmmio_softc *sc; 312 313 sc = device_get_softc(dev); 314 315 vtmmio_probe_and_attach_child(sc); 316 } 317 318 static void 319 vtmmio_child_detached(device_t dev, device_t child) 320 { 321 struct vtmmio_softc *sc; 322 323 sc = device_get_softc(dev); 324 325 vtmmio_reset(sc); 326 vtmmio_release_child_resources(sc); 327 } 328 329 static int 330 vtmmio_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) 331 { 332 struct vtmmio_softc *sc; 333 334 sc = device_get_softc(dev); 335 336 if (sc->vtmmio_child_dev != child) 337 return (ENOENT); 338 339 switch (index) { 340 case VIRTIO_IVAR_DEVTYPE: 341 case VIRTIO_IVAR_SUBDEVICE: 342 *result = vtmmio_read_config_4(sc, VIRTIO_MMIO_DEVICE_ID); 343 break; 344 case VIRTIO_IVAR_VENDOR: 345 *result = vtmmio_read_config_4(sc, VIRTIO_MMIO_VENDOR_ID); 346 break; 347 case VIRTIO_IVAR_SUBVENDOR: 348 case VIRTIO_IVAR_DEVICE: 349 /* 350 * Dummy value for fields not present in this bus. Used by 351 * bus-agnostic virtio_child_pnpinfo_str. 352 */ 353 *result = 0; 354 break; 355 default: 356 return (ENOENT); 357 } 358 359 return (0); 360 } 361 362 static int 363 vtmmio_write_ivar(device_t dev, device_t child, int index, uintptr_t value) 364 { 365 struct vtmmio_softc *sc; 366 367 sc = device_get_softc(dev); 368 369 if (sc->vtmmio_child_dev != child) 370 return (ENOENT); 371 372 switch (index) { 373 case VIRTIO_IVAR_FEATURE_DESC: 374 sc->vtmmio_child_feat_desc = (void *) value; 375 break; 376 default: 377 return (ENOENT); 378 } 379 380 return (0); 381 } 382 383 static uint64_t 384 vtmmio_negotiate_features(device_t dev, uint64_t child_features) 385 { 386 struct vtmmio_softc *sc; 387 uint64_t host_features, features; 388 389 sc = device_get_softc(dev); 390 391 vtmmio_write_config_4(sc, VIRTIO_MMIO_HOST_FEATURES_SEL, 1); 392 host_features = vtmmio_read_config_4(sc, VIRTIO_MMIO_HOST_FEATURES); 393 host_features <<= 32; 394 395 vtmmio_write_config_4(sc, VIRTIO_MMIO_HOST_FEATURES_SEL, 0); 396 host_features |= vtmmio_read_config_4(sc, VIRTIO_MMIO_HOST_FEATURES); 397 398 vtmmio_describe_features(sc, "host", host_features); 399 400 /* 401 * Limit negotiated features to what the driver, virtqueue, and 402 * host all support. 403 */ 404 features = host_features & child_features; 405 features = virtqueue_filter_features(features); 406 sc->vtmmio_features = features; 407 408 vtmmio_describe_features(sc, "negotiated", features); 409 410 vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES_SEL, 1); 411 vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES, features >> 32); 412 413 vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES_SEL, 0); 414 vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES, features); 415 416 return (features); 417 } 418 419 static int 420 vtmmio_with_feature(device_t dev, uint64_t feature) 421 { 422 struct vtmmio_softc *sc; 423 424 sc = device_get_softc(dev); 425 426 return ((sc->vtmmio_features & feature) != 0); 427 } 428 429 static void 430 vtmmio_set_virtqueue(struct vtmmio_softc *sc, struct virtqueue *vq, 431 uint32_t size) 432 { 433 vm_paddr_t paddr; 434 435 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_NUM, size); 436 #if 0 437 device_printf(dev, "virtqueue paddr 0x%08lx\n", 438 (uint64_t)paddr); 439 #endif 440 if (sc->vtmmio_version == 1) { 441 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_ALIGN, 442 VIRTIO_MMIO_VRING_ALIGN); 443 paddr = virtqueue_paddr(vq); 444 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_PFN, 445 paddr >> PAGE_SHIFT); 446 } else { 447 paddr = virtqueue_desc_paddr(vq); 448 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_DESC_LOW, 449 paddr); 450 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_DESC_HIGH, 451 ((uint64_t)paddr) >> 32); 452 453 paddr = virtqueue_avail_paddr(vq); 454 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_AVAIL_LOW, 455 paddr); 456 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_AVAIL_HIGH, 457 ((uint64_t)paddr) >> 32); 458 459 paddr = virtqueue_used_paddr(vq); 460 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_USED_LOW, 461 paddr); 462 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_USED_HIGH, 463 ((uint64_t)paddr) >> 32); 464 465 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_READY, 1); 466 } 467 } 468 469 static int 470 vtmmio_alloc_virtqueues(device_t dev, int flags, int nvqs, 471 struct vq_alloc_info *vq_info) 472 { 473 struct vtmmio_virtqueue *vqx; 474 struct vq_alloc_info *info; 475 struct vtmmio_softc *sc; 476 struct virtqueue *vq; 477 uint32_t size; 478 int idx, error; 479 480 sc = device_get_softc(dev); 481 482 if (sc->vtmmio_nvqs != 0) 483 return (EALREADY); 484 if (nvqs <= 0) 485 return (EINVAL); 486 487 sc->vtmmio_vqs = malloc(nvqs * sizeof(struct vtmmio_virtqueue), 488 M_DEVBUF, M_NOWAIT | M_ZERO); 489 if (sc->vtmmio_vqs == NULL) 490 return (ENOMEM); 491 492 if (sc->vtmmio_version == 1) { 493 vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_PAGE_SIZE, 494 (1 << PAGE_SHIFT)); 495 } 496 497 for (idx = 0; idx < nvqs; idx++) { 498 vqx = &sc->vtmmio_vqs[idx]; 499 info = &vq_info[idx]; 500 501 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_SEL, idx); 502 503 vtmmio_select_virtqueue(sc, idx); 504 size = vtmmio_read_config_4(sc, VIRTIO_MMIO_QUEUE_NUM_MAX); 505 506 error = virtqueue_alloc(dev, idx, size, 507 VIRTIO_MMIO_VRING_ALIGN, ~(vm_paddr_t)0, info, &vq); 508 if (error) { 509 device_printf(dev, 510 "cannot allocate virtqueue %d: %d\n", 511 idx, error); 512 break; 513 } 514 515 vtmmio_set_virtqueue(sc, vq, size); 516 517 vqx->vtv_vq = *info->vqai_vq = vq; 518 vqx->vtv_no_intr = info->vqai_intr == NULL; 519 520 sc->vtmmio_nvqs++; 521 } 522 523 if (error) 524 vtmmio_free_virtqueues(sc); 525 526 return (error); 527 } 528 529 static void 530 vtmmio_stop(device_t dev) 531 { 532 533 vtmmio_reset(device_get_softc(dev)); 534 } 535 536 static void 537 vtmmio_poll(device_t dev) 538 { 539 struct vtmmio_softc *sc; 540 541 sc = device_get_softc(dev); 542 543 if (sc->platform != NULL) 544 VIRTIO_MMIO_POLL(sc->platform); 545 } 546 547 static int 548 vtmmio_reinit(device_t dev, uint64_t features) 549 { 550 struct vtmmio_softc *sc; 551 int idx, error; 552 553 sc = device_get_softc(dev); 554 555 if (vtmmio_get_status(dev) != VIRTIO_CONFIG_STATUS_RESET) 556 vtmmio_stop(dev); 557 558 /* 559 * Quickly drive the status through ACK and DRIVER. The device 560 * does not become usable again until vtmmio_reinit_complete(). 561 */ 562 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); 563 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER); 564 565 vtmmio_negotiate_features(dev, features); 566 567 if (sc->vtmmio_version == 1) { 568 vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_PAGE_SIZE, 569 (1 << PAGE_SHIFT)); 570 } 571 572 for (idx = 0; idx < sc->vtmmio_nvqs; idx++) { 573 error = vtmmio_reinit_virtqueue(sc, idx); 574 if (error) 575 return (error); 576 } 577 578 return (0); 579 } 580 581 static void 582 vtmmio_reinit_complete(device_t dev) 583 { 584 585 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK); 586 } 587 588 static void 589 vtmmio_notify_virtqueue(device_t dev, uint16_t queue) 590 { 591 struct vtmmio_softc *sc; 592 593 sc = device_get_softc(dev); 594 595 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_NOTIFY, queue); 596 } 597 598 static uint8_t 599 vtmmio_get_status(device_t dev) 600 { 601 struct vtmmio_softc *sc; 602 603 sc = device_get_softc(dev); 604 605 return (vtmmio_read_config_4(sc, VIRTIO_MMIO_STATUS)); 606 } 607 608 static void 609 vtmmio_set_status(device_t dev, uint8_t status) 610 { 611 struct vtmmio_softc *sc; 612 613 sc = device_get_softc(dev); 614 615 if (status != VIRTIO_CONFIG_STATUS_RESET) 616 status |= vtmmio_get_status(dev); 617 618 vtmmio_write_config_4(sc, VIRTIO_MMIO_STATUS, status); 619 } 620 621 static void 622 vtmmio_read_dev_config(device_t dev, bus_size_t offset, 623 void *dst, int length) 624 { 625 struct vtmmio_softc *sc; 626 bus_size_t off; 627 uint8_t *d; 628 int size; 629 uint64_t low32, high32; 630 631 sc = device_get_softc(dev); 632 off = VIRTIO_MMIO_CONFIG + offset; 633 634 /* 635 * The non-legacy MMIO specification adds the following restriction: 636 * 637 * 4.2.2.2: For the device-specific configuration space, the driver 638 * MUST use 8 bit wide accesses for 8 bit wide fields, 16 bit wide 639 * and aligned accesses for 16 bit wide fields and 32 bit wide and 640 * aligned accesses for 32 and 64 bit wide fields. 641 * 642 * The endianness also varies between non-legacy and legacy: 643 * 644 * 2.4: Note: The device configuration space uses the little-endian 645 * format for multi-byte fields. 646 * 647 * 2.4.3: Note that for legacy interfaces, device configuration space 648 * is generally the guest’s native endian, rather than PCI’s 649 * little-endian. The correct endian-ness is documented for each 650 * device. 651 */ 652 if (sc->vtmmio_version > 1) { 653 switch (length) { 654 case 1: 655 *(uint8_t *)dst = vtmmio_read_config_1(sc, off); 656 break; 657 case 2: 658 *(uint16_t *)dst = 659 le16toh(vtmmio_read_config_2(sc, off)); 660 break; 661 case 4: 662 *(uint32_t *)dst = 663 le32toh(vtmmio_read_config_4(sc, off)); 664 break; 665 case 8: 666 low32 = le32toh(vtmmio_read_config_4(sc, off)); 667 high32 = le32toh(vtmmio_read_config_4(sc, off + 4)); 668 *(uint64_t *)dst = (high32 << 32) | low32; 669 break; 670 default: 671 panic("%s: invalid length %d\n", __func__, length); 672 } 673 674 return; 675 } 676 677 for (d = dst; length > 0; d += size, off += size, length -= size) { 678 #ifdef ALLOW_WORD_ALIGNED_ACCESS 679 if (length >= 4) { 680 size = 4; 681 *(uint32_t *)d = vtmmio_read_config_4(sc, off); 682 } else if (length >= 2) { 683 size = 2; 684 *(uint16_t *)d = vtmmio_read_config_2(sc, off); 685 } else 686 #endif 687 { 688 size = 1; 689 *d = vtmmio_read_config_1(sc, off); 690 } 691 } 692 } 693 694 static void 695 vtmmio_write_dev_config(device_t dev, bus_size_t offset, 696 void *src, int length) 697 { 698 struct vtmmio_softc *sc; 699 bus_size_t off; 700 uint8_t *s; 701 int size; 702 703 sc = device_get_softc(dev); 704 off = VIRTIO_MMIO_CONFIG + offset; 705 706 /* 707 * The non-legacy MMIO specification adds size and alignment 708 * restrctions. It also changes the endianness from native-endian to 709 * little-endian. See vtmmio_read_dev_config. 710 */ 711 if (sc->vtmmio_version > 1) { 712 switch (length) { 713 case 1: 714 vtmmio_write_config_1(sc, off, *(uint8_t *)src); 715 break; 716 case 2: 717 vtmmio_write_config_2(sc, off, 718 htole16(*(uint16_t *)src)); 719 break; 720 case 4: 721 vtmmio_write_config_4(sc, off, 722 htole32(*(uint32_t *)src)); 723 break; 724 case 8: 725 vtmmio_write_config_4(sc, off, 726 htole32(*(uint64_t *)src)); 727 vtmmio_write_config_4(sc, off + 4, 728 htole32((*(uint64_t *)src) >> 32)); 729 break; 730 default: 731 panic("%s: invalid length %d\n", __func__, length); 732 } 733 734 return; 735 } 736 737 for (s = src; length > 0; s += size, off += size, length -= size) { 738 #ifdef ALLOW_WORD_ALIGNED_ACCESS 739 if (length >= 4) { 740 size = 4; 741 vtmmio_write_config_4(sc, off, *(uint32_t *)s); 742 } else if (length >= 2) { 743 size = 2; 744 vtmmio_write_config_2(sc, off, *(uint16_t *)s); 745 } else 746 #endif 747 { 748 size = 1; 749 vtmmio_write_config_1(sc, off, *s); 750 } 751 } 752 } 753 754 static void 755 vtmmio_describe_features(struct vtmmio_softc *sc, const char *msg, 756 uint64_t features) 757 { 758 device_t dev, child; 759 760 dev = sc->dev; 761 child = sc->vtmmio_child_dev; 762 763 if (device_is_attached(child) || bootverbose == 0) 764 return; 765 766 virtio_describe(dev, msg, features, sc->vtmmio_child_feat_desc); 767 } 768 769 static void 770 vtmmio_probe_and_attach_child(struct vtmmio_softc *sc) 771 { 772 device_t dev, child; 773 774 dev = sc->dev; 775 child = sc->vtmmio_child_dev; 776 777 if (child == NULL) 778 return; 779 780 if (device_get_state(child) != DS_NOTPRESENT) { 781 return; 782 } 783 784 if (device_probe(child) != 0) { 785 return; 786 } 787 788 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER); 789 if (device_attach(child) != 0) { 790 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED); 791 vtmmio_reset(sc); 792 vtmmio_release_child_resources(sc); 793 /* Reset status for future attempt. */ 794 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); 795 } else { 796 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK); 797 VIRTIO_ATTACH_COMPLETED(child); 798 } 799 } 800 801 static int 802 vtmmio_reinit_virtqueue(struct vtmmio_softc *sc, int idx) 803 { 804 struct vtmmio_virtqueue *vqx; 805 struct virtqueue *vq; 806 int error; 807 uint16_t size; 808 809 vqx = &sc->vtmmio_vqs[idx]; 810 vq = vqx->vtv_vq; 811 812 KASSERT(vq != NULL, ("%s: vq %d not allocated", __func__, idx)); 813 814 vtmmio_select_virtqueue(sc, idx); 815 size = vtmmio_read_config_4(sc, VIRTIO_MMIO_QUEUE_NUM_MAX); 816 817 error = virtqueue_reinit(vq, size); 818 if (error) 819 return (error); 820 821 vtmmio_set_virtqueue(sc, vq, size); 822 823 return (0); 824 } 825 826 static void 827 vtmmio_free_interrupts(struct vtmmio_softc *sc) 828 { 829 830 if (sc->ih != NULL) 831 bus_teardown_intr(sc->dev, sc->res[1], sc->ih); 832 833 if (sc->res[1] != NULL) 834 bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->res[1]); 835 } 836 837 static void 838 vtmmio_free_virtqueues(struct vtmmio_softc *sc) 839 { 840 struct vtmmio_virtqueue *vqx; 841 int idx; 842 843 for (idx = 0; idx < sc->vtmmio_nvqs; idx++) { 844 vqx = &sc->vtmmio_vqs[idx]; 845 846 vtmmio_select_virtqueue(sc, idx); 847 if (sc->vtmmio_version == 1) 848 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_PFN, 0); 849 else 850 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_READY, 0); 851 852 virtqueue_free(vqx->vtv_vq); 853 vqx->vtv_vq = NULL; 854 } 855 856 free(sc->vtmmio_vqs, M_DEVBUF); 857 sc->vtmmio_vqs = NULL; 858 sc->vtmmio_nvqs = 0; 859 } 860 861 static void 862 vtmmio_release_child_resources(struct vtmmio_softc *sc) 863 { 864 865 vtmmio_free_interrupts(sc); 866 vtmmio_free_virtqueues(sc); 867 } 868 869 static void 870 vtmmio_reset(struct vtmmio_softc *sc) 871 { 872 873 /* 874 * Setting the status to RESET sets the host device to 875 * the original, uninitialized state. 876 */ 877 vtmmio_set_status(sc->dev, VIRTIO_CONFIG_STATUS_RESET); 878 } 879 880 static void 881 vtmmio_select_virtqueue(struct vtmmio_softc *sc, int idx) 882 { 883 884 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_SEL, idx); 885 } 886 887 static void 888 vtmmio_vq_intr(void *arg) 889 { 890 struct vtmmio_virtqueue *vqx; 891 struct vtmmio_softc *sc; 892 struct virtqueue *vq; 893 uint32_t status; 894 int idx; 895 896 sc = arg; 897 898 status = vtmmio_read_config_4(sc, VIRTIO_MMIO_INTERRUPT_STATUS); 899 vtmmio_write_config_4(sc, VIRTIO_MMIO_INTERRUPT_ACK, status); 900 901 /* The config changed */ 902 if (status & VIRTIO_MMIO_INT_CONFIG) 903 if (sc->vtmmio_child_dev != NULL) 904 VIRTIO_CONFIG_CHANGE(sc->vtmmio_child_dev); 905 906 /* Notify all virtqueues. */ 907 if (status & VIRTIO_MMIO_INT_VRING) { 908 for (idx = 0; idx < sc->vtmmio_nvqs; idx++) { 909 vqx = &sc->vtmmio_vqs[idx]; 910 if (vqx->vtv_no_intr == 0) { 911 vq = vqx->vtv_vq; 912 virtqueue_intr(vq); 913 } 914 } 915 } 916 } 917