1 /*- 2 * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com> 3 * Copyright (c) 2014 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * This software was developed by SRI International and the University of 7 * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237) 8 * ("CTSRD"), as part of the DARPA CRASH research programme. 9 * 10 * Portions of this software were developed by Andrew Turner 11 * under sponsorship from the FreeBSD Foundation. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 /* 36 * VirtIO MMIO interface. 37 * This driver is heavily based on VirtIO PCI interface driver. 38 */ 39 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/bus.h> 46 #include <sys/kernel.h> 47 #include <sys/module.h> 48 #include <sys/malloc.h> 49 #include <sys/rman.h> 50 #include <sys/endian.h> 51 52 #include <machine/bus.h> 53 #include <machine/resource.h> 54 55 #include <dev/virtio/virtio.h> 56 #include <dev/virtio/virtqueue.h> 57 #include <dev/virtio/mmio/virtio_mmio.h> 58 59 #include "virtio_mmio_if.h" 60 #include "virtio_bus_if.h" 61 #include "virtio_if.h" 62 63 #define PAGE_SHIFT 12 64 65 struct vtmmio_virtqueue { 66 struct virtqueue *vtv_vq; 67 int vtv_no_intr; 68 }; 69 70 static int vtmmio_detach(device_t); 71 static int vtmmio_suspend(device_t); 72 static int vtmmio_resume(device_t); 73 static int vtmmio_shutdown(device_t); 74 static void vtmmio_driver_added(device_t, driver_t *); 75 static void vtmmio_child_detached(device_t, device_t); 76 static int vtmmio_read_ivar(device_t, device_t, int, uintptr_t *); 77 static int vtmmio_write_ivar(device_t, device_t, int, uintptr_t); 78 static uint64_t vtmmio_negotiate_features(device_t, uint64_t); 79 static int vtmmio_with_feature(device_t, uint64_t); 80 static void vtmmio_set_virtqueue(struct vtmmio_softc *sc, 81 struct virtqueue *vq, uint32_t size); 82 static int vtmmio_alloc_virtqueues(device_t, int, int, 83 struct vq_alloc_info *); 84 static int vtmmio_setup_intr(device_t, enum intr_type); 85 static void vtmmio_stop(device_t); 86 static void vtmmio_poll(device_t); 87 static int vtmmio_reinit(device_t, uint64_t); 88 static void vtmmio_reinit_complete(device_t); 89 static void vtmmio_notify_virtqueue(device_t, uint16_t); 90 static uint8_t vtmmio_get_status(device_t); 91 static void vtmmio_set_status(device_t, uint8_t); 92 static void vtmmio_read_dev_config(device_t, bus_size_t, void *, int); 93 static void vtmmio_write_dev_config(device_t, bus_size_t, void *, int); 94 static void vtmmio_describe_features(struct vtmmio_softc *, const char *, 95 uint64_t); 96 static void vtmmio_probe_and_attach_child(struct vtmmio_softc *); 97 static int vtmmio_reinit_virtqueue(struct vtmmio_softc *, int); 98 static void vtmmio_free_interrupts(struct vtmmio_softc *); 99 static void vtmmio_free_virtqueues(struct vtmmio_softc *); 100 static void vtmmio_release_child_resources(struct vtmmio_softc *); 101 static void vtmmio_reset(struct vtmmio_softc *); 102 static void vtmmio_select_virtqueue(struct vtmmio_softc *, int); 103 static void vtmmio_vq_intr(void *); 104 105 /* 106 * I/O port read/write wrappers. 107 */ 108 #define vtmmio_write_config_1(sc, o, v) \ 109 do { \ 110 if (sc->platform != NULL) \ 111 VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \ 112 bus_write_1((sc)->res[0], (o), (v)); \ 113 if (sc->platform != NULL) \ 114 VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \ 115 } while (0) 116 #define vtmmio_write_config_2(sc, o, v) \ 117 do { \ 118 if (sc->platform != NULL) \ 119 VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \ 120 bus_write_2((sc)->res[0], (o), (v)); \ 121 if (sc->platform != NULL) \ 122 VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \ 123 } while (0) 124 #define vtmmio_write_config_4(sc, o, v) \ 125 do { \ 126 if (sc->platform != NULL) \ 127 VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \ 128 bus_write_4((sc)->res[0], (o), (v)); \ 129 if (sc->platform != NULL) \ 130 VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \ 131 } while (0) 132 133 #define vtmmio_read_config_1(sc, o) \ 134 bus_read_1((sc)->res[0], (o)) 135 #define vtmmio_read_config_2(sc, o) \ 136 bus_read_2((sc)->res[0], (o)) 137 #define vtmmio_read_config_4(sc, o) \ 138 bus_read_4((sc)->res[0], (o)) 139 140 static device_method_t vtmmio_methods[] = { 141 /* Device interface. */ 142 DEVMETHOD(device_attach, vtmmio_attach), 143 DEVMETHOD(device_detach, vtmmio_detach), 144 DEVMETHOD(device_suspend, vtmmio_suspend), 145 DEVMETHOD(device_resume, vtmmio_resume), 146 DEVMETHOD(device_shutdown, vtmmio_shutdown), 147 148 /* Bus interface. */ 149 DEVMETHOD(bus_driver_added, vtmmio_driver_added), 150 DEVMETHOD(bus_child_detached, vtmmio_child_detached), 151 DEVMETHOD(bus_child_pnpinfo_str, virtio_child_pnpinfo_str), 152 DEVMETHOD(bus_read_ivar, vtmmio_read_ivar), 153 DEVMETHOD(bus_write_ivar, vtmmio_write_ivar), 154 155 /* VirtIO bus interface. */ 156 DEVMETHOD(virtio_bus_negotiate_features, vtmmio_negotiate_features), 157 DEVMETHOD(virtio_bus_with_feature, vtmmio_with_feature), 158 DEVMETHOD(virtio_bus_alloc_virtqueues, vtmmio_alloc_virtqueues), 159 DEVMETHOD(virtio_bus_setup_intr, vtmmio_setup_intr), 160 DEVMETHOD(virtio_bus_stop, vtmmio_stop), 161 DEVMETHOD(virtio_bus_poll, vtmmio_poll), 162 DEVMETHOD(virtio_bus_reinit, vtmmio_reinit), 163 DEVMETHOD(virtio_bus_reinit_complete, vtmmio_reinit_complete), 164 DEVMETHOD(virtio_bus_notify_vq, vtmmio_notify_virtqueue), 165 DEVMETHOD(virtio_bus_read_device_config, vtmmio_read_dev_config), 166 DEVMETHOD(virtio_bus_write_device_config, vtmmio_write_dev_config), 167 168 DEVMETHOD_END 169 }; 170 171 DEFINE_CLASS_0(virtio_mmio, vtmmio_driver, vtmmio_methods, 172 sizeof(struct vtmmio_softc)); 173 174 MODULE_VERSION(virtio_mmio, 1); 175 176 static int 177 vtmmio_setup_intr(device_t dev, enum intr_type type) 178 { 179 struct vtmmio_softc *sc; 180 int rid; 181 int err; 182 183 sc = device_get_softc(dev); 184 185 if (sc->platform != NULL) { 186 err = VIRTIO_MMIO_SETUP_INTR(sc->platform, sc->dev, 187 vtmmio_vq_intr, sc); 188 if (err == 0) { 189 /* Okay we have backend-specific interrupts */ 190 return (0); 191 } 192 } 193 194 rid = 0; 195 sc->res[1] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 196 RF_ACTIVE); 197 if (!sc->res[1]) { 198 device_printf(dev, "Can't allocate interrupt\n"); 199 return (ENXIO); 200 } 201 202 if (bus_setup_intr(dev, sc->res[1], type | INTR_MPSAFE, 203 NULL, vtmmio_vq_intr, sc, &sc->ih)) { 204 device_printf(dev, "Can't setup the interrupt\n"); 205 return (ENXIO); 206 } 207 208 return (0); 209 } 210 211 int 212 vtmmio_attach(device_t dev) 213 { 214 struct vtmmio_softc *sc; 215 device_t child; 216 int rid; 217 218 sc = device_get_softc(dev); 219 sc->dev = dev; 220 221 rid = 0; 222 sc->res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 223 RF_ACTIVE); 224 if (!sc->res[0]) { 225 device_printf(dev, "Cannot allocate memory window.\n"); 226 return (ENXIO); 227 } 228 229 sc->vtmmio_version = vtmmio_read_config_4(sc, VIRTIO_MMIO_VERSION); 230 if (sc->vtmmio_version < 1 || sc->vtmmio_version > 2) { 231 device_printf(dev, "Unsupported version: %x\n", 232 sc->vtmmio_version); 233 bus_release_resource(dev, SYS_RES_MEMORY, 0, 234 sc->res[0]); 235 sc->res[0] = NULL; 236 return (ENXIO); 237 } 238 239 vtmmio_reset(sc); 240 241 /* Tell the host we've noticed this device. */ 242 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); 243 244 if ((child = device_add_child(dev, NULL, -1)) == NULL) { 245 device_printf(dev, "Cannot create child device.\n"); 246 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED); 247 vtmmio_detach(dev); 248 return (ENOMEM); 249 } 250 251 sc->vtmmio_child_dev = child; 252 vtmmio_probe_and_attach_child(sc); 253 254 return (0); 255 } 256 257 static int 258 vtmmio_detach(device_t dev) 259 { 260 struct vtmmio_softc *sc; 261 device_t child; 262 int error; 263 264 sc = device_get_softc(dev); 265 266 if ((child = sc->vtmmio_child_dev) != NULL) { 267 error = device_delete_child(dev, child); 268 if (error) 269 return (error); 270 sc->vtmmio_child_dev = NULL; 271 } 272 273 vtmmio_reset(sc); 274 275 if (sc->res[0] != NULL) { 276 bus_release_resource(dev, SYS_RES_MEMORY, 0, 277 sc->res[0]); 278 sc->res[0] = NULL; 279 } 280 281 return (0); 282 } 283 284 static int 285 vtmmio_suspend(device_t dev) 286 { 287 288 return (bus_generic_suspend(dev)); 289 } 290 291 static int 292 vtmmio_resume(device_t dev) 293 { 294 295 return (bus_generic_resume(dev)); 296 } 297 298 static int 299 vtmmio_shutdown(device_t dev) 300 { 301 302 (void) bus_generic_shutdown(dev); 303 304 /* Forcibly stop the host device. */ 305 vtmmio_stop(dev); 306 307 return (0); 308 } 309 310 static void 311 vtmmio_driver_added(device_t dev, driver_t *driver) 312 { 313 struct vtmmio_softc *sc; 314 315 sc = device_get_softc(dev); 316 317 vtmmio_probe_and_attach_child(sc); 318 } 319 320 static void 321 vtmmio_child_detached(device_t dev, device_t child) 322 { 323 struct vtmmio_softc *sc; 324 325 sc = device_get_softc(dev); 326 327 vtmmio_reset(sc); 328 vtmmio_release_child_resources(sc); 329 } 330 331 static int 332 vtmmio_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) 333 { 334 struct vtmmio_softc *sc; 335 336 sc = device_get_softc(dev); 337 338 if (sc->vtmmio_child_dev != child) 339 return (ENOENT); 340 341 switch (index) { 342 case VIRTIO_IVAR_DEVTYPE: 343 case VIRTIO_IVAR_SUBDEVICE: 344 *result = vtmmio_read_config_4(sc, VIRTIO_MMIO_DEVICE_ID); 345 break; 346 case VIRTIO_IVAR_VENDOR: 347 *result = vtmmio_read_config_4(sc, VIRTIO_MMIO_VENDOR_ID); 348 break; 349 case VIRTIO_IVAR_SUBVENDOR: 350 case VIRTIO_IVAR_DEVICE: 351 /* 352 * Dummy value for fields not present in this bus. Used by 353 * bus-agnostic virtio_child_pnpinfo_str. 354 */ 355 *result = 0; 356 break; 357 default: 358 return (ENOENT); 359 } 360 361 return (0); 362 } 363 364 static int 365 vtmmio_write_ivar(device_t dev, device_t child, int index, uintptr_t value) 366 { 367 struct vtmmio_softc *sc; 368 369 sc = device_get_softc(dev); 370 371 if (sc->vtmmio_child_dev != child) 372 return (ENOENT); 373 374 switch (index) { 375 case VIRTIO_IVAR_FEATURE_DESC: 376 sc->vtmmio_child_feat_desc = (void *) value; 377 break; 378 default: 379 return (ENOENT); 380 } 381 382 return (0); 383 } 384 385 static uint64_t 386 vtmmio_negotiate_features(device_t dev, uint64_t child_features) 387 { 388 struct vtmmio_softc *sc; 389 uint64_t host_features, features; 390 391 sc = device_get_softc(dev); 392 393 vtmmio_write_config_4(sc, VIRTIO_MMIO_HOST_FEATURES_SEL, 1); 394 host_features = vtmmio_read_config_4(sc, VIRTIO_MMIO_HOST_FEATURES); 395 host_features <<= 32; 396 397 vtmmio_write_config_4(sc, VIRTIO_MMIO_HOST_FEATURES_SEL, 0); 398 host_features |= vtmmio_read_config_4(sc, VIRTIO_MMIO_HOST_FEATURES); 399 400 vtmmio_describe_features(sc, "host", host_features); 401 402 /* 403 * Limit negotiated features to what the driver, virtqueue, and 404 * host all support. 405 */ 406 features = host_features & child_features; 407 features = virtqueue_filter_features(features); 408 sc->vtmmio_features = features; 409 410 vtmmio_describe_features(sc, "negotiated", features); 411 412 vtmmio_write_config_4(sc, VIRTIO_MMIO_HOST_FEATURES_SEL, 1); 413 vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES, features >> 32); 414 415 vtmmio_write_config_4(sc, VIRTIO_MMIO_HOST_FEATURES_SEL, 0); 416 vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES, features); 417 418 return (features); 419 } 420 421 static int 422 vtmmio_with_feature(device_t dev, uint64_t feature) 423 { 424 struct vtmmio_softc *sc; 425 426 sc = device_get_softc(dev); 427 428 return ((sc->vtmmio_features & feature) != 0); 429 } 430 431 static void 432 vtmmio_set_virtqueue(struct vtmmio_softc *sc, struct virtqueue *vq, 433 uint32_t size) 434 { 435 vm_paddr_t paddr; 436 437 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_NUM, size); 438 #if 0 439 device_printf(dev, "virtqueue paddr 0x%08lx\n", 440 (uint64_t)paddr); 441 #endif 442 if (sc->vtmmio_version == 1) { 443 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_ALIGN, 444 VIRTIO_MMIO_VRING_ALIGN); 445 paddr = virtqueue_paddr(vq); 446 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_PFN, 447 paddr >> PAGE_SHIFT); 448 } else { 449 paddr = virtqueue_desc_paddr(vq); 450 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_DESC_LOW, 451 paddr); 452 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_DESC_HIGH, 453 ((uint64_t)paddr) >> 32); 454 455 paddr = virtqueue_avail_paddr(vq); 456 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_AVAIL_LOW, 457 paddr); 458 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_AVAIL_HIGH, 459 ((uint64_t)paddr) >> 32); 460 461 paddr = virtqueue_used_paddr(vq); 462 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_USED_LOW, 463 paddr); 464 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_USED_HIGH, 465 ((uint64_t)paddr) >> 32); 466 467 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_READY, 1); 468 } 469 } 470 471 static int 472 vtmmio_alloc_virtqueues(device_t dev, int flags, int nvqs, 473 struct vq_alloc_info *vq_info) 474 { 475 struct vtmmio_virtqueue *vqx; 476 struct vq_alloc_info *info; 477 struct vtmmio_softc *sc; 478 struct virtqueue *vq; 479 uint32_t size; 480 int idx, error; 481 482 sc = device_get_softc(dev); 483 484 if (sc->vtmmio_nvqs != 0) 485 return (EALREADY); 486 if (nvqs <= 0) 487 return (EINVAL); 488 489 sc->vtmmio_vqs = malloc(nvqs * sizeof(struct vtmmio_virtqueue), 490 M_DEVBUF, M_NOWAIT | M_ZERO); 491 if (sc->vtmmio_vqs == NULL) 492 return (ENOMEM); 493 494 if (sc->vtmmio_version == 1) { 495 vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_PAGE_SIZE, 496 (1 << PAGE_SHIFT)); 497 } 498 499 for (idx = 0; idx < nvqs; idx++) { 500 vqx = &sc->vtmmio_vqs[idx]; 501 info = &vq_info[idx]; 502 503 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_SEL, idx); 504 505 vtmmio_select_virtqueue(sc, idx); 506 size = vtmmio_read_config_4(sc, VIRTIO_MMIO_QUEUE_NUM_MAX); 507 508 error = virtqueue_alloc(dev, idx, size, 509 VIRTIO_MMIO_VRING_ALIGN, ~(vm_paddr_t)0, info, &vq); 510 if (error) { 511 device_printf(dev, 512 "cannot allocate virtqueue %d: %d\n", 513 idx, error); 514 break; 515 } 516 517 vtmmio_set_virtqueue(sc, vq, size); 518 519 vqx->vtv_vq = *info->vqai_vq = vq; 520 vqx->vtv_no_intr = info->vqai_intr == NULL; 521 522 sc->vtmmio_nvqs++; 523 } 524 525 if (error) 526 vtmmio_free_virtqueues(sc); 527 528 return (error); 529 } 530 531 static void 532 vtmmio_stop(device_t dev) 533 { 534 535 vtmmio_reset(device_get_softc(dev)); 536 } 537 538 static void 539 vtmmio_poll(device_t dev) 540 { 541 struct vtmmio_softc *sc; 542 543 sc = device_get_softc(dev); 544 545 if (sc->platform != NULL) 546 VIRTIO_MMIO_POLL(sc->platform); 547 } 548 549 static int 550 vtmmio_reinit(device_t dev, uint64_t features) 551 { 552 struct vtmmio_softc *sc; 553 int idx, error; 554 555 sc = device_get_softc(dev); 556 557 if (vtmmio_get_status(dev) != VIRTIO_CONFIG_STATUS_RESET) 558 vtmmio_stop(dev); 559 560 /* 561 * Quickly drive the status through ACK and DRIVER. The device 562 * does not become usable again until vtmmio_reinit_complete(). 563 */ 564 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); 565 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER); 566 567 vtmmio_negotiate_features(dev, features); 568 569 if (sc->vtmmio_version == 1) { 570 vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_PAGE_SIZE, 571 (1 << PAGE_SHIFT)); 572 } 573 574 for (idx = 0; idx < sc->vtmmio_nvqs; idx++) { 575 error = vtmmio_reinit_virtqueue(sc, idx); 576 if (error) 577 return (error); 578 } 579 580 return (0); 581 } 582 583 static void 584 vtmmio_reinit_complete(device_t dev) 585 { 586 587 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK); 588 } 589 590 static void 591 vtmmio_notify_virtqueue(device_t dev, uint16_t queue) 592 { 593 struct vtmmio_softc *sc; 594 595 sc = device_get_softc(dev); 596 597 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_NOTIFY, queue); 598 } 599 600 static uint8_t 601 vtmmio_get_status(device_t dev) 602 { 603 struct vtmmio_softc *sc; 604 605 sc = device_get_softc(dev); 606 607 return (vtmmio_read_config_4(sc, VIRTIO_MMIO_STATUS)); 608 } 609 610 static void 611 vtmmio_set_status(device_t dev, uint8_t status) 612 { 613 struct vtmmio_softc *sc; 614 615 sc = device_get_softc(dev); 616 617 if (status != VIRTIO_CONFIG_STATUS_RESET) 618 status |= vtmmio_get_status(dev); 619 620 vtmmio_write_config_4(sc, VIRTIO_MMIO_STATUS, status); 621 } 622 623 static void 624 vtmmio_read_dev_config(device_t dev, bus_size_t offset, 625 void *dst, int length) 626 { 627 struct vtmmio_softc *sc; 628 bus_size_t off; 629 uint8_t *d; 630 int size; 631 uint64_t low32, high32; 632 633 sc = device_get_softc(dev); 634 off = VIRTIO_MMIO_CONFIG + offset; 635 636 /* 637 * The non-legacy MMIO specification adds the following restriction: 638 * 639 * 4.2.2.2: For the device-specific configuration space, the driver 640 * MUST use 8 bit wide accesses for 8 bit wide fields, 16 bit wide 641 * and aligned accesses for 16 bit wide fields and 32 bit wide and 642 * aligned accesses for 32 and 64 bit wide fields. 643 * 644 * The endianness also varies between non-legacy and legacy: 645 * 646 * 2.4: Note: The device configuration space uses the little-endian 647 * format for multi-byte fields. 648 * 649 * 2.4.3: Note that for legacy interfaces, device configuration space 650 * is generally the guest’s native endian, rather than PCI’s 651 * little-endian. The correct endian-ness is documented for each 652 * device. 653 */ 654 if (sc->vtmmio_version > 1) { 655 switch (length) { 656 case 1: 657 *(uint8_t *)dst = vtmmio_read_config_1(sc, off); 658 break; 659 case 2: 660 *(uint16_t *)dst = 661 le16toh(vtmmio_read_config_2(sc, off)); 662 break; 663 case 4: 664 *(uint32_t *)dst = 665 le32toh(vtmmio_read_config_4(sc, off)); 666 break; 667 case 8: 668 low32 = le32toh(vtmmio_read_config_4(sc, off)); 669 high32 = le32toh(vtmmio_read_config_4(sc, off + 4)); 670 *(uint64_t *)dst = (high32 << 32) | low32; 671 break; 672 default: 673 panic("%s: invalid length %d\n", __func__, length); 674 } 675 676 return; 677 } 678 679 for (d = dst; length > 0; d += size, off += size, length -= size) { 680 #ifdef ALLOW_WORD_ALIGNED_ACCESS 681 if (length >= 4) { 682 size = 4; 683 *(uint32_t *)d = vtmmio_read_config_4(sc, off); 684 } else if (length >= 2) { 685 size = 2; 686 *(uint16_t *)d = vtmmio_read_config_2(sc, off); 687 } else 688 #endif 689 { 690 size = 1; 691 *d = vtmmio_read_config_1(sc, off); 692 } 693 } 694 } 695 696 static void 697 vtmmio_write_dev_config(device_t dev, bus_size_t offset, 698 void *src, int length) 699 { 700 struct vtmmio_softc *sc; 701 bus_size_t off; 702 uint8_t *s; 703 int size; 704 705 sc = device_get_softc(dev); 706 off = VIRTIO_MMIO_CONFIG + offset; 707 708 /* 709 * The non-legacy MMIO specification adds size and alignment 710 * restrctions. It also changes the endianness from native-endian to 711 * little-endian. See vtmmio_read_dev_config. 712 */ 713 if (sc->vtmmio_version > 1) { 714 switch (length) { 715 case 1: 716 vtmmio_write_config_1(sc, off, *(uint8_t *)src); 717 break; 718 case 2: 719 vtmmio_write_config_2(sc, off, 720 htole16(*(uint16_t *)src)); 721 break; 722 case 4: 723 vtmmio_write_config_4(sc, off, 724 htole32(*(uint32_t *)src)); 725 break; 726 case 8: 727 vtmmio_write_config_4(sc, off, 728 htole32(*(uint64_t *)src)); 729 vtmmio_write_config_4(sc, off + 4, 730 htole32((*(uint64_t *)src) >> 32)); 731 break; 732 default: 733 panic("%s: invalid length %d\n", __func__, length); 734 } 735 736 return; 737 } 738 739 for (s = src; length > 0; s += size, off += size, length -= size) { 740 #ifdef ALLOW_WORD_ALIGNED_ACCESS 741 if (length >= 4) { 742 size = 4; 743 vtmmio_write_config_4(sc, off, *(uint32_t *)s); 744 } else if (length >= 2) { 745 size = 2; 746 vtmmio_write_config_2(sc, off, *(uint16_t *)s); 747 } else 748 #endif 749 { 750 size = 1; 751 vtmmio_write_config_1(sc, off, *s); 752 } 753 } 754 } 755 756 static void 757 vtmmio_describe_features(struct vtmmio_softc *sc, const char *msg, 758 uint64_t features) 759 { 760 device_t dev, child; 761 762 dev = sc->dev; 763 child = sc->vtmmio_child_dev; 764 765 if (device_is_attached(child) || bootverbose == 0) 766 return; 767 768 virtio_describe(dev, msg, features, sc->vtmmio_child_feat_desc); 769 } 770 771 static void 772 vtmmio_probe_and_attach_child(struct vtmmio_softc *sc) 773 { 774 device_t dev, child; 775 776 dev = sc->dev; 777 child = sc->vtmmio_child_dev; 778 779 if (child == NULL) 780 return; 781 782 if (device_get_state(child) != DS_NOTPRESENT) { 783 return; 784 } 785 786 if (device_probe(child) != 0) { 787 return; 788 } 789 790 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER); 791 if (device_attach(child) != 0) { 792 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED); 793 vtmmio_reset(sc); 794 vtmmio_release_child_resources(sc); 795 /* Reset status for future attempt. */ 796 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); 797 } else { 798 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK); 799 VIRTIO_ATTACH_COMPLETED(child); 800 } 801 } 802 803 static int 804 vtmmio_reinit_virtqueue(struct vtmmio_softc *sc, int idx) 805 { 806 struct vtmmio_virtqueue *vqx; 807 struct virtqueue *vq; 808 int error; 809 uint16_t size; 810 811 vqx = &sc->vtmmio_vqs[idx]; 812 vq = vqx->vtv_vq; 813 814 KASSERT(vq != NULL, ("%s: vq %d not allocated", __func__, idx)); 815 816 vtmmio_select_virtqueue(sc, idx); 817 size = vtmmio_read_config_4(sc, VIRTIO_MMIO_QUEUE_NUM_MAX); 818 819 error = virtqueue_reinit(vq, size); 820 if (error) 821 return (error); 822 823 vtmmio_set_virtqueue(sc, vq, size); 824 825 return (0); 826 } 827 828 static void 829 vtmmio_free_interrupts(struct vtmmio_softc *sc) 830 { 831 832 if (sc->ih != NULL) 833 bus_teardown_intr(sc->dev, sc->res[1], sc->ih); 834 835 if (sc->res[1] != NULL) 836 bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->res[1]); 837 } 838 839 static void 840 vtmmio_free_virtqueues(struct vtmmio_softc *sc) 841 { 842 struct vtmmio_virtqueue *vqx; 843 int idx; 844 845 for (idx = 0; idx < sc->vtmmio_nvqs; idx++) { 846 vqx = &sc->vtmmio_vqs[idx]; 847 848 vtmmio_select_virtqueue(sc, idx); 849 if (sc->vtmmio_version == 1) 850 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_PFN, 0); 851 else 852 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_READY, 0); 853 854 virtqueue_free(vqx->vtv_vq); 855 vqx->vtv_vq = NULL; 856 } 857 858 free(sc->vtmmio_vqs, M_DEVBUF); 859 sc->vtmmio_vqs = NULL; 860 sc->vtmmio_nvqs = 0; 861 } 862 863 static void 864 vtmmio_release_child_resources(struct vtmmio_softc *sc) 865 { 866 867 vtmmio_free_interrupts(sc); 868 vtmmio_free_virtqueues(sc); 869 } 870 871 static void 872 vtmmio_reset(struct vtmmio_softc *sc) 873 { 874 875 /* 876 * Setting the status to RESET sets the host device to 877 * the original, uninitialized state. 878 */ 879 vtmmio_set_status(sc->dev, VIRTIO_CONFIG_STATUS_RESET); 880 } 881 882 static void 883 vtmmio_select_virtqueue(struct vtmmio_softc *sc, int idx) 884 { 885 886 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_SEL, idx); 887 } 888 889 static void 890 vtmmio_vq_intr(void *arg) 891 { 892 struct vtmmio_virtqueue *vqx; 893 struct vtmmio_softc *sc; 894 struct virtqueue *vq; 895 uint32_t status; 896 int idx; 897 898 sc = arg; 899 900 status = vtmmio_read_config_4(sc, VIRTIO_MMIO_INTERRUPT_STATUS); 901 vtmmio_write_config_4(sc, VIRTIO_MMIO_INTERRUPT_ACK, status); 902 903 /* The config changed */ 904 if (status & VIRTIO_MMIO_INT_CONFIG) 905 if (sc->vtmmio_child_dev != NULL) 906 VIRTIO_CONFIG_CHANGE(sc->vtmmio_child_dev); 907 908 /* Notify all virtqueues. */ 909 if (status & VIRTIO_MMIO_INT_VRING) { 910 for (idx = 0; idx < sc->vtmmio_nvqs; idx++) { 911 vqx = &sc->vtmmio_vqs[idx]; 912 if (vqx->vtv_no_intr == 0) { 913 vq = vqx->vtv_vq; 914 virtqueue_intr(vq); 915 } 916 } 917 } 918 } 919