1 /*- 2 * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com> 3 * Copyright (c) 2014 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * This software was developed by SRI International and the University of 7 * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237) 8 * ("CTSRD"), as part of the DARPA CRASH research programme. 9 * 10 * Portions of this software were developed by Andrew Turner 11 * under sponsorship from the FreeBSD Foundation. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 */ 34 35 /* 36 * VirtIO MMIO interface. 37 * This driver is heavily based on VirtIO PCI interface driver. 38 */ 39 40 #include <sys/cdefs.h> 41 __FBSDID("$FreeBSD$"); 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/bus.h> 46 #include <sys/kernel.h> 47 #include <sys/module.h> 48 #include <sys/malloc.h> 49 #include <sys/rman.h> 50 #include <sys/endian.h> 51 52 #include <machine/bus.h> 53 #include <machine/resource.h> 54 55 #include <dev/virtio/virtio.h> 56 #include <dev/virtio/virtqueue.h> 57 #include <dev/virtio/mmio/virtio_mmio.h> 58 59 #include "virtio_mmio_if.h" 60 #include "virtio_bus_if.h" 61 #include "virtio_if.h" 62 63 struct vtmmio_virtqueue { 64 struct virtqueue *vtv_vq; 65 int vtv_no_intr; 66 }; 67 68 static int vtmmio_detach(device_t); 69 static int vtmmio_suspend(device_t); 70 static int vtmmio_resume(device_t); 71 static int vtmmio_shutdown(device_t); 72 static void vtmmio_driver_added(device_t, driver_t *); 73 static void vtmmio_child_detached(device_t, device_t); 74 static int vtmmio_read_ivar(device_t, device_t, int, uintptr_t *); 75 static int vtmmio_write_ivar(device_t, device_t, int, uintptr_t); 76 static uint64_t vtmmio_negotiate_features(device_t, uint64_t); 77 static int vtmmio_finalize_features(device_t); 78 static int vtmmio_with_feature(device_t, uint64_t); 79 static void vtmmio_set_virtqueue(struct vtmmio_softc *sc, 80 struct virtqueue *vq, uint32_t size); 81 static int vtmmio_alloc_virtqueues(device_t, int, int, 82 struct vq_alloc_info *); 83 static int vtmmio_setup_intr(device_t, enum intr_type); 84 static void vtmmio_stop(device_t); 85 static void vtmmio_poll(device_t); 86 static int vtmmio_reinit(device_t, uint64_t); 87 static void vtmmio_reinit_complete(device_t); 88 static void vtmmio_notify_virtqueue(device_t, uint16_t, bus_size_t); 89 static int vtmmio_config_generation(device_t); 90 static uint8_t vtmmio_get_status(device_t); 91 static void vtmmio_set_status(device_t, uint8_t); 92 static void vtmmio_read_dev_config(device_t, bus_size_t, void *, int); 93 static uint64_t vtmmio_read_dev_config_8(struct vtmmio_softc *, bus_size_t); 94 static void vtmmio_write_dev_config(device_t, bus_size_t, const void *, int); 95 static void vtmmio_describe_features(struct vtmmio_softc *, const char *, 96 uint64_t); 97 static void vtmmio_probe_and_attach_child(struct vtmmio_softc *); 98 static int vtmmio_reinit_virtqueue(struct vtmmio_softc *, int); 99 static void vtmmio_free_interrupts(struct vtmmio_softc *); 100 static void vtmmio_free_virtqueues(struct vtmmio_softc *); 101 static void vtmmio_release_child_resources(struct vtmmio_softc *); 102 static void vtmmio_reset(struct vtmmio_softc *); 103 static void vtmmio_select_virtqueue(struct vtmmio_softc *, int); 104 static void vtmmio_vq_intr(void *); 105 106 /* 107 * I/O port read/write wrappers. 108 */ 109 #define vtmmio_write_config_1(sc, o, v) \ 110 do { \ 111 if (sc->platform != NULL) \ 112 VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \ 113 bus_write_1((sc)->res[0], (o), (v)); \ 114 if (sc->platform != NULL) \ 115 VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \ 116 } while (0) 117 #define vtmmio_write_config_2(sc, o, v) \ 118 do { \ 119 if (sc->platform != NULL) \ 120 VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \ 121 bus_write_2((sc)->res[0], (o), (v)); \ 122 if (sc->platform != NULL) \ 123 VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \ 124 } while (0) 125 #define vtmmio_write_config_4(sc, o, v) \ 126 do { \ 127 if (sc->platform != NULL) \ 128 VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \ 129 bus_write_4((sc)->res[0], (o), (v)); \ 130 if (sc->platform != NULL) \ 131 VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \ 132 } while (0) 133 134 #define vtmmio_read_config_1(sc, o) \ 135 bus_read_1((sc)->res[0], (o)) 136 #define vtmmio_read_config_2(sc, o) \ 137 bus_read_2((sc)->res[0], (o)) 138 #define vtmmio_read_config_4(sc, o) \ 139 bus_read_4((sc)->res[0], (o)) 140 141 static device_method_t vtmmio_methods[] = { 142 /* Device interface. */ 143 DEVMETHOD(device_attach, vtmmio_attach), 144 DEVMETHOD(device_detach, vtmmio_detach), 145 DEVMETHOD(device_suspend, vtmmio_suspend), 146 DEVMETHOD(device_resume, vtmmio_resume), 147 DEVMETHOD(device_shutdown, vtmmio_shutdown), 148 149 /* Bus interface. */ 150 DEVMETHOD(bus_driver_added, vtmmio_driver_added), 151 DEVMETHOD(bus_child_detached, vtmmio_child_detached), 152 DEVMETHOD(bus_child_pnpinfo, virtio_child_pnpinfo), 153 DEVMETHOD(bus_read_ivar, vtmmio_read_ivar), 154 DEVMETHOD(bus_write_ivar, vtmmio_write_ivar), 155 156 /* VirtIO bus interface. */ 157 DEVMETHOD(virtio_bus_negotiate_features, vtmmio_negotiate_features), 158 DEVMETHOD(virtio_bus_finalize_features, vtmmio_finalize_features), 159 DEVMETHOD(virtio_bus_with_feature, vtmmio_with_feature), 160 DEVMETHOD(virtio_bus_alloc_virtqueues, vtmmio_alloc_virtqueues), 161 DEVMETHOD(virtio_bus_setup_intr, vtmmio_setup_intr), 162 DEVMETHOD(virtio_bus_stop, vtmmio_stop), 163 DEVMETHOD(virtio_bus_poll, vtmmio_poll), 164 DEVMETHOD(virtio_bus_reinit, vtmmio_reinit), 165 DEVMETHOD(virtio_bus_reinit_complete, vtmmio_reinit_complete), 166 DEVMETHOD(virtio_bus_notify_vq, vtmmio_notify_virtqueue), 167 DEVMETHOD(virtio_bus_config_generation, vtmmio_config_generation), 168 DEVMETHOD(virtio_bus_read_device_config, vtmmio_read_dev_config), 169 DEVMETHOD(virtio_bus_write_device_config, vtmmio_write_dev_config), 170 171 DEVMETHOD_END 172 }; 173 174 DEFINE_CLASS_0(virtio_mmio, vtmmio_driver, vtmmio_methods, 175 sizeof(struct vtmmio_softc)); 176 177 MODULE_VERSION(virtio_mmio, 1); 178 179 int 180 vtmmio_probe(device_t dev) 181 { 182 struct vtmmio_softc *sc; 183 int rid; 184 uint32_t magic, version; 185 186 sc = device_get_softc(dev); 187 188 rid = 0; 189 sc->res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 190 RF_ACTIVE); 191 if (sc->res[0] == NULL) { 192 device_printf(dev, "Cannot allocate memory window.\n"); 193 return (ENXIO); 194 } 195 196 magic = vtmmio_read_config_4(sc, VIRTIO_MMIO_MAGIC_VALUE); 197 if (magic != VIRTIO_MMIO_MAGIC_VIRT) { 198 device_printf(dev, "Bad magic value %#x\n", magic); 199 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res[0]); 200 return (ENXIO); 201 } 202 203 version = vtmmio_read_config_4(sc, VIRTIO_MMIO_VERSION); 204 if (version < 1 || version > 2) { 205 device_printf(dev, "Unsupported version: %#x\n", version); 206 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res[0]); 207 return (ENXIO); 208 } 209 210 if (vtmmio_read_config_4(sc, VIRTIO_MMIO_DEVICE_ID) == 0) { 211 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res[0]); 212 return (ENXIO); 213 } 214 215 bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res[0]); 216 217 device_set_desc(dev, "VirtIO MMIO adapter"); 218 return (BUS_PROBE_DEFAULT); 219 } 220 221 static int 222 vtmmio_setup_intr(device_t dev, enum intr_type type) 223 { 224 struct vtmmio_softc *sc; 225 int rid; 226 int err; 227 228 sc = device_get_softc(dev); 229 230 if (sc->platform != NULL) { 231 err = VIRTIO_MMIO_SETUP_INTR(sc->platform, sc->dev, 232 vtmmio_vq_intr, sc); 233 if (err == 0) { 234 /* Okay we have backend-specific interrupts */ 235 return (0); 236 } 237 } 238 239 rid = 0; 240 sc->res[1] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 241 RF_ACTIVE); 242 if (!sc->res[1]) { 243 device_printf(dev, "Can't allocate interrupt\n"); 244 return (ENXIO); 245 } 246 247 if (bus_setup_intr(dev, sc->res[1], type | INTR_MPSAFE, 248 NULL, vtmmio_vq_intr, sc, &sc->ih)) { 249 device_printf(dev, "Can't setup the interrupt\n"); 250 return (ENXIO); 251 } 252 253 return (0); 254 } 255 256 int 257 vtmmio_attach(device_t dev) 258 { 259 struct vtmmio_softc *sc; 260 device_t child; 261 int rid; 262 263 sc = device_get_softc(dev); 264 sc->dev = dev; 265 266 rid = 0; 267 sc->res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 268 RF_ACTIVE); 269 if (sc->res[0] == NULL) { 270 device_printf(dev, "Cannot allocate memory window.\n"); 271 return (ENXIO); 272 } 273 274 sc->vtmmio_version = vtmmio_read_config_4(sc, VIRTIO_MMIO_VERSION); 275 276 vtmmio_reset(sc); 277 278 /* Tell the host we've noticed this device. */ 279 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); 280 281 if ((child = device_add_child(dev, NULL, -1)) == NULL) { 282 device_printf(dev, "Cannot create child device.\n"); 283 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED); 284 vtmmio_detach(dev); 285 return (ENOMEM); 286 } 287 288 sc->vtmmio_child_dev = child; 289 vtmmio_probe_and_attach_child(sc); 290 291 return (0); 292 } 293 294 static int 295 vtmmio_detach(device_t dev) 296 { 297 struct vtmmio_softc *sc; 298 device_t child; 299 int error; 300 301 sc = device_get_softc(dev); 302 303 if ((child = sc->vtmmio_child_dev) != NULL) { 304 error = device_delete_child(dev, child); 305 if (error) 306 return (error); 307 sc->vtmmio_child_dev = NULL; 308 } 309 310 vtmmio_reset(sc); 311 312 if (sc->res[0] != NULL) { 313 bus_release_resource(dev, SYS_RES_MEMORY, 0, 314 sc->res[0]); 315 sc->res[0] = NULL; 316 } 317 318 return (0); 319 } 320 321 static int 322 vtmmio_suspend(device_t dev) 323 { 324 325 return (bus_generic_suspend(dev)); 326 } 327 328 static int 329 vtmmio_resume(device_t dev) 330 { 331 332 return (bus_generic_resume(dev)); 333 } 334 335 static int 336 vtmmio_shutdown(device_t dev) 337 { 338 339 (void) bus_generic_shutdown(dev); 340 341 /* Forcibly stop the host device. */ 342 vtmmio_stop(dev); 343 344 return (0); 345 } 346 347 static void 348 vtmmio_driver_added(device_t dev, driver_t *driver) 349 { 350 struct vtmmio_softc *sc; 351 352 sc = device_get_softc(dev); 353 354 vtmmio_probe_and_attach_child(sc); 355 } 356 357 static void 358 vtmmio_child_detached(device_t dev, device_t child) 359 { 360 struct vtmmio_softc *sc; 361 362 sc = device_get_softc(dev); 363 364 vtmmio_reset(sc); 365 vtmmio_release_child_resources(sc); 366 } 367 368 static int 369 vtmmio_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) 370 { 371 struct vtmmio_softc *sc; 372 373 sc = device_get_softc(dev); 374 375 if (sc->vtmmio_child_dev != child) 376 return (ENOENT); 377 378 switch (index) { 379 case VIRTIO_IVAR_DEVTYPE: 380 case VIRTIO_IVAR_SUBDEVICE: 381 *result = vtmmio_read_config_4(sc, VIRTIO_MMIO_DEVICE_ID); 382 break; 383 case VIRTIO_IVAR_VENDOR: 384 *result = vtmmio_read_config_4(sc, VIRTIO_MMIO_VENDOR_ID); 385 break; 386 case VIRTIO_IVAR_SUBVENDOR: 387 case VIRTIO_IVAR_DEVICE: 388 /* 389 * Dummy value for fields not present in this bus. Used by 390 * bus-agnostic virtio_child_pnpinfo. 391 */ 392 *result = 0; 393 break; 394 case VIRTIO_IVAR_MODERN: 395 /* 396 * There are several modern (aka MMIO v2) spec compliance 397 * issues with this driver, but keep the status quo. 398 */ 399 *result = sc->vtmmio_version > 1; 400 break; 401 default: 402 return (ENOENT); 403 } 404 405 return (0); 406 } 407 408 static int 409 vtmmio_write_ivar(device_t dev, device_t child, int index, uintptr_t value) 410 { 411 struct vtmmio_softc *sc; 412 413 sc = device_get_softc(dev); 414 415 if (sc->vtmmio_child_dev != child) 416 return (ENOENT); 417 418 switch (index) { 419 case VIRTIO_IVAR_FEATURE_DESC: 420 sc->vtmmio_child_feat_desc = (void *) value; 421 break; 422 default: 423 return (ENOENT); 424 } 425 426 return (0); 427 } 428 429 static uint64_t 430 vtmmio_negotiate_features(device_t dev, uint64_t child_features) 431 { 432 struct vtmmio_softc *sc; 433 uint64_t host_features, features; 434 435 sc = device_get_softc(dev); 436 437 if (sc->vtmmio_version > 1) { 438 child_features |= VIRTIO_F_VERSION_1; 439 } 440 441 vtmmio_write_config_4(sc, VIRTIO_MMIO_HOST_FEATURES_SEL, 1); 442 host_features = vtmmio_read_config_4(sc, VIRTIO_MMIO_HOST_FEATURES); 443 host_features <<= 32; 444 445 vtmmio_write_config_4(sc, VIRTIO_MMIO_HOST_FEATURES_SEL, 0); 446 host_features |= vtmmio_read_config_4(sc, VIRTIO_MMIO_HOST_FEATURES); 447 448 vtmmio_describe_features(sc, "host", host_features); 449 450 /* 451 * Limit negotiated features to what the driver, virtqueue, and 452 * host all support. 453 */ 454 features = host_features & child_features; 455 features = virtio_filter_transport_features(features); 456 sc->vtmmio_features = features; 457 458 vtmmio_describe_features(sc, "negotiated", features); 459 460 vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES_SEL, 1); 461 vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES, features >> 32); 462 463 vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES_SEL, 0); 464 vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES, features); 465 466 return (features); 467 } 468 469 static int 470 vtmmio_finalize_features(device_t dev) 471 { 472 struct vtmmio_softc *sc; 473 uint8_t status; 474 475 sc = device_get_softc(dev); 476 477 if (sc->vtmmio_version > 1) { 478 /* 479 * Must re-read the status after setting it to verify the 480 * negotiated features were accepted by the device. 481 */ 482 vtmmio_set_status(dev, VIRTIO_CONFIG_S_FEATURES_OK); 483 484 status = vtmmio_get_status(dev); 485 if ((status & VIRTIO_CONFIG_S_FEATURES_OK) == 0) { 486 device_printf(dev, "desired features were not accepted\n"); 487 return (ENOTSUP); 488 } 489 } 490 491 return (0); 492 } 493 494 static int 495 vtmmio_with_feature(device_t dev, uint64_t feature) 496 { 497 struct vtmmio_softc *sc; 498 499 sc = device_get_softc(dev); 500 501 return ((sc->vtmmio_features & feature) != 0); 502 } 503 504 static void 505 vtmmio_set_virtqueue(struct vtmmio_softc *sc, struct virtqueue *vq, 506 uint32_t size) 507 { 508 vm_paddr_t paddr; 509 510 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_NUM, size); 511 512 if (sc->vtmmio_version == 1) { 513 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_ALIGN, 514 VIRTIO_MMIO_VRING_ALIGN); 515 paddr = virtqueue_paddr(vq); 516 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_PFN, 517 paddr >> PAGE_SHIFT); 518 } else { 519 paddr = virtqueue_desc_paddr(vq); 520 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_DESC_LOW, 521 paddr); 522 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_DESC_HIGH, 523 ((uint64_t)paddr) >> 32); 524 525 paddr = virtqueue_avail_paddr(vq); 526 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_AVAIL_LOW, 527 paddr); 528 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_AVAIL_HIGH, 529 ((uint64_t)paddr) >> 32); 530 531 paddr = virtqueue_used_paddr(vq); 532 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_USED_LOW, 533 paddr); 534 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_USED_HIGH, 535 ((uint64_t)paddr) >> 32); 536 537 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_READY, 1); 538 } 539 } 540 541 static int 542 vtmmio_alloc_virtqueues(device_t dev, int flags, int nvqs, 543 struct vq_alloc_info *vq_info) 544 { 545 struct vtmmio_virtqueue *vqx; 546 struct vq_alloc_info *info; 547 struct vtmmio_softc *sc; 548 struct virtqueue *vq; 549 uint32_t size; 550 int idx, error; 551 552 sc = device_get_softc(dev); 553 554 if (sc->vtmmio_nvqs != 0) 555 return (EALREADY); 556 if (nvqs <= 0) 557 return (EINVAL); 558 559 sc->vtmmio_vqs = malloc(nvqs * sizeof(struct vtmmio_virtqueue), 560 M_DEVBUF, M_NOWAIT | M_ZERO); 561 if (sc->vtmmio_vqs == NULL) 562 return (ENOMEM); 563 564 if (sc->vtmmio_version == 1) { 565 vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_PAGE_SIZE, 566 (1 << PAGE_SHIFT)); 567 } 568 569 for (idx = 0; idx < nvqs; idx++) { 570 vqx = &sc->vtmmio_vqs[idx]; 571 info = &vq_info[idx]; 572 573 vtmmio_select_virtqueue(sc, idx); 574 size = vtmmio_read_config_4(sc, VIRTIO_MMIO_QUEUE_NUM_MAX); 575 576 error = virtqueue_alloc(dev, idx, size, 577 VIRTIO_MMIO_QUEUE_NOTIFY, VIRTIO_MMIO_VRING_ALIGN, 578 ~(vm_paddr_t)0, info, &vq); 579 if (error) { 580 device_printf(dev, 581 "cannot allocate virtqueue %d: %d\n", 582 idx, error); 583 break; 584 } 585 586 vtmmio_set_virtqueue(sc, vq, size); 587 588 vqx->vtv_vq = *info->vqai_vq = vq; 589 vqx->vtv_no_intr = info->vqai_intr == NULL; 590 591 sc->vtmmio_nvqs++; 592 } 593 594 if (error) 595 vtmmio_free_virtqueues(sc); 596 597 return (error); 598 } 599 600 static void 601 vtmmio_stop(device_t dev) 602 { 603 604 vtmmio_reset(device_get_softc(dev)); 605 } 606 607 static void 608 vtmmio_poll(device_t dev) 609 { 610 struct vtmmio_softc *sc; 611 612 sc = device_get_softc(dev); 613 614 if (sc->platform != NULL) 615 VIRTIO_MMIO_POLL(sc->platform); 616 } 617 618 static int 619 vtmmio_reinit(device_t dev, uint64_t features) 620 { 621 struct vtmmio_softc *sc; 622 int idx, error; 623 624 sc = device_get_softc(dev); 625 626 if (vtmmio_get_status(dev) != VIRTIO_CONFIG_STATUS_RESET) 627 vtmmio_stop(dev); 628 629 /* 630 * Quickly drive the status through ACK and DRIVER. The device 631 * does not become usable again until vtmmio_reinit_complete(). 632 */ 633 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); 634 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER); 635 636 /* 637 * TODO: Check that features are not added as to what was 638 * originally negotiated. 639 */ 640 vtmmio_negotiate_features(dev, features); 641 error = vtmmio_finalize_features(dev); 642 if (error) { 643 device_printf(dev, "cannot finalize features during reinit\n"); 644 return (error); 645 } 646 647 if (sc->vtmmio_version == 1) { 648 vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_PAGE_SIZE, 649 (1 << PAGE_SHIFT)); 650 } 651 652 for (idx = 0; idx < sc->vtmmio_nvqs; idx++) { 653 error = vtmmio_reinit_virtqueue(sc, idx); 654 if (error) 655 return (error); 656 } 657 658 return (0); 659 } 660 661 static void 662 vtmmio_reinit_complete(device_t dev) 663 { 664 665 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK); 666 } 667 668 static void 669 vtmmio_notify_virtqueue(device_t dev, uint16_t queue, bus_size_t offset) 670 { 671 struct vtmmio_softc *sc; 672 673 sc = device_get_softc(dev); 674 MPASS(offset == VIRTIO_MMIO_QUEUE_NOTIFY); 675 676 vtmmio_write_config_4(sc, offset, queue); 677 } 678 679 static int 680 vtmmio_config_generation(device_t dev) 681 { 682 struct vtmmio_softc *sc; 683 uint32_t gen; 684 685 sc = device_get_softc(dev); 686 687 if (sc->vtmmio_version > 1) 688 gen = vtmmio_read_config_4(sc, VIRTIO_MMIO_CONFIG_GENERATION); 689 else 690 gen = 0; 691 692 return (gen); 693 } 694 695 static uint8_t 696 vtmmio_get_status(device_t dev) 697 { 698 struct vtmmio_softc *sc; 699 700 sc = device_get_softc(dev); 701 702 return (vtmmio_read_config_4(sc, VIRTIO_MMIO_STATUS)); 703 } 704 705 static void 706 vtmmio_set_status(device_t dev, uint8_t status) 707 { 708 struct vtmmio_softc *sc; 709 710 sc = device_get_softc(dev); 711 712 if (status != VIRTIO_CONFIG_STATUS_RESET) 713 status |= vtmmio_get_status(dev); 714 715 vtmmio_write_config_4(sc, VIRTIO_MMIO_STATUS, status); 716 } 717 718 static void 719 vtmmio_read_dev_config(device_t dev, bus_size_t offset, 720 void *dst, int length) 721 { 722 struct vtmmio_softc *sc; 723 bus_size_t off; 724 uint8_t *d; 725 int size; 726 727 sc = device_get_softc(dev); 728 off = VIRTIO_MMIO_CONFIG + offset; 729 730 /* 731 * The non-legacy MMIO specification adds the following restriction: 732 * 733 * 4.2.2.2: For the device-specific configuration space, the driver 734 * MUST use 8 bit wide accesses for 8 bit wide fields, 16 bit wide 735 * and aligned accesses for 16 bit wide fields and 32 bit wide and 736 * aligned accesses for 32 and 64 bit wide fields. 737 * 738 * The endianness also varies between non-legacy and legacy: 739 * 740 * 2.4: Note: The device configuration space uses the little-endian 741 * format for multi-byte fields. 742 * 743 * 2.4.3: Note that for legacy interfaces, device configuration space 744 * is generally the guest’s native endian, rather than PCI’s 745 * little-endian. The correct endian-ness is documented for each 746 * device. 747 */ 748 if (sc->vtmmio_version > 1) { 749 switch (length) { 750 case 1: 751 *(uint8_t *)dst = vtmmio_read_config_1(sc, off); 752 break; 753 case 2: 754 *(uint16_t *)dst = 755 le16toh(vtmmio_read_config_2(sc, off)); 756 break; 757 case 4: 758 *(uint32_t *)dst = 759 le32toh(vtmmio_read_config_4(sc, off)); 760 break; 761 case 8: 762 *(uint64_t *)dst = vtmmio_read_dev_config_8(sc, off); 763 break; 764 default: 765 panic("%s: invalid length %d\n", __func__, length); 766 } 767 768 return; 769 } 770 771 for (d = dst; length > 0; d += size, off += size, length -= size) { 772 #ifdef ALLOW_WORD_ALIGNED_ACCESS 773 if (length >= 4) { 774 size = 4; 775 *(uint32_t *)d = vtmmio_read_config_4(sc, off); 776 } else if (length >= 2) { 777 size = 2; 778 *(uint16_t *)d = vtmmio_read_config_2(sc, off); 779 } else 780 #endif 781 { 782 size = 1; 783 *d = vtmmio_read_config_1(sc, off); 784 } 785 } 786 } 787 788 static uint64_t 789 vtmmio_read_dev_config_8(struct vtmmio_softc *sc, bus_size_t off) 790 { 791 device_t dev; 792 int gen; 793 uint32_t val0, val1; 794 795 dev = sc->dev; 796 797 do { 798 gen = vtmmio_config_generation(dev); 799 val0 = le32toh(vtmmio_read_config_4(sc, off)); 800 val1 = le32toh(vtmmio_read_config_4(sc, off + 4)); 801 } while (gen != vtmmio_config_generation(dev)); 802 803 return (((uint64_t) val1 << 32) | val0); 804 } 805 806 static void 807 vtmmio_write_dev_config(device_t dev, bus_size_t offset, 808 const void *src, int length) 809 { 810 struct vtmmio_softc *sc; 811 bus_size_t off; 812 const uint8_t *s; 813 int size; 814 815 sc = device_get_softc(dev); 816 off = VIRTIO_MMIO_CONFIG + offset; 817 818 /* 819 * The non-legacy MMIO specification adds size and alignment 820 * restrctions. It also changes the endianness from native-endian to 821 * little-endian. See vtmmio_read_dev_config. 822 */ 823 if (sc->vtmmio_version > 1) { 824 switch (length) { 825 case 1: 826 vtmmio_write_config_1(sc, off, *(const uint8_t *)src); 827 break; 828 case 2: 829 vtmmio_write_config_2(sc, off, 830 htole16(*(const uint16_t *)src)); 831 break; 832 case 4: 833 vtmmio_write_config_4(sc, off, 834 htole32(*(const uint32_t *)src)); 835 break; 836 case 8: 837 vtmmio_write_config_4(sc, off, 838 htole32(*(const uint64_t *)src)); 839 vtmmio_write_config_4(sc, off + 4, 840 htole32((*(const uint64_t *)src) >> 32)); 841 break; 842 default: 843 panic("%s: invalid length %d\n", __func__, length); 844 } 845 846 return; 847 } 848 849 for (s = src; length > 0; s += size, off += size, length -= size) { 850 #ifdef ALLOW_WORD_ALIGNED_ACCESS 851 if (length >= 4) { 852 size = 4; 853 vtmmio_write_config_4(sc, off, *(uint32_t *)s); 854 } else if (length >= 2) { 855 size = 2; 856 vtmmio_write_config_2(sc, off, *(uint16_t *)s); 857 } else 858 #endif 859 { 860 size = 1; 861 vtmmio_write_config_1(sc, off, *s); 862 } 863 } 864 } 865 866 static void 867 vtmmio_describe_features(struct vtmmio_softc *sc, const char *msg, 868 uint64_t features) 869 { 870 device_t dev, child; 871 872 dev = sc->dev; 873 child = sc->vtmmio_child_dev; 874 875 if (device_is_attached(child) || bootverbose == 0) 876 return; 877 878 virtio_describe(dev, msg, features, sc->vtmmio_child_feat_desc); 879 } 880 881 static void 882 vtmmio_probe_and_attach_child(struct vtmmio_softc *sc) 883 { 884 device_t dev, child; 885 886 dev = sc->dev; 887 child = sc->vtmmio_child_dev; 888 889 if (child == NULL) 890 return; 891 892 if (device_get_state(child) != DS_NOTPRESENT) { 893 return; 894 } 895 896 if (device_probe(child) != 0) { 897 return; 898 } 899 900 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER); 901 if (device_attach(child) != 0) { 902 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED); 903 vtmmio_reset(sc); 904 vtmmio_release_child_resources(sc); 905 /* Reset status for future attempt. */ 906 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); 907 } else { 908 vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK); 909 VIRTIO_ATTACH_COMPLETED(child); 910 } 911 } 912 913 static int 914 vtmmio_reinit_virtqueue(struct vtmmio_softc *sc, int idx) 915 { 916 struct vtmmio_virtqueue *vqx; 917 struct virtqueue *vq; 918 int error; 919 uint16_t size; 920 921 vqx = &sc->vtmmio_vqs[idx]; 922 vq = vqx->vtv_vq; 923 924 KASSERT(vq != NULL, ("%s: vq %d not allocated", __func__, idx)); 925 926 vtmmio_select_virtqueue(sc, idx); 927 size = vtmmio_read_config_4(sc, VIRTIO_MMIO_QUEUE_NUM_MAX); 928 929 error = virtqueue_reinit(vq, size); 930 if (error) 931 return (error); 932 933 vtmmio_set_virtqueue(sc, vq, size); 934 935 return (0); 936 } 937 938 static void 939 vtmmio_free_interrupts(struct vtmmio_softc *sc) 940 { 941 942 if (sc->ih != NULL) 943 bus_teardown_intr(sc->dev, sc->res[1], sc->ih); 944 945 if (sc->res[1] != NULL) 946 bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->res[1]); 947 } 948 949 static void 950 vtmmio_free_virtqueues(struct vtmmio_softc *sc) 951 { 952 struct vtmmio_virtqueue *vqx; 953 int idx; 954 955 for (idx = 0; idx < sc->vtmmio_nvqs; idx++) { 956 vqx = &sc->vtmmio_vqs[idx]; 957 958 vtmmio_select_virtqueue(sc, idx); 959 if (sc->vtmmio_version > 1) { 960 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_READY, 0); 961 vtmmio_read_config_4(sc, VIRTIO_MMIO_QUEUE_READY); 962 } else 963 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_PFN, 0); 964 965 virtqueue_free(vqx->vtv_vq); 966 vqx->vtv_vq = NULL; 967 } 968 969 free(sc->vtmmio_vqs, M_DEVBUF); 970 sc->vtmmio_vqs = NULL; 971 sc->vtmmio_nvqs = 0; 972 } 973 974 static void 975 vtmmio_release_child_resources(struct vtmmio_softc *sc) 976 { 977 978 vtmmio_free_interrupts(sc); 979 vtmmio_free_virtqueues(sc); 980 } 981 982 static void 983 vtmmio_reset(struct vtmmio_softc *sc) 984 { 985 986 /* 987 * Setting the status to RESET sets the host device to 988 * the original, uninitialized state. 989 */ 990 vtmmio_set_status(sc->dev, VIRTIO_CONFIG_STATUS_RESET); 991 } 992 993 static void 994 vtmmio_select_virtqueue(struct vtmmio_softc *sc, int idx) 995 { 996 997 vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_SEL, idx); 998 } 999 1000 static void 1001 vtmmio_vq_intr(void *arg) 1002 { 1003 struct vtmmio_virtqueue *vqx; 1004 struct vtmmio_softc *sc; 1005 struct virtqueue *vq; 1006 uint32_t status; 1007 int idx; 1008 1009 sc = arg; 1010 1011 status = vtmmio_read_config_4(sc, VIRTIO_MMIO_INTERRUPT_STATUS); 1012 vtmmio_write_config_4(sc, VIRTIO_MMIO_INTERRUPT_ACK, status); 1013 1014 /* The config changed */ 1015 if (status & VIRTIO_MMIO_INT_CONFIG) 1016 if (sc->vtmmio_child_dev != NULL) 1017 VIRTIO_CONFIG_CHANGE(sc->vtmmio_child_dev); 1018 1019 /* Notify all virtqueues. */ 1020 if (status & VIRTIO_MMIO_INT_VRING) { 1021 for (idx = 0; idx < sc->vtmmio_nvqs; idx++) { 1022 vqx = &sc->vtmmio_vqs[idx]; 1023 if (vqx->vtv_no_intr == 0) { 1024 vq = vqx->vtv_vq; 1025 virtqueue_intr(vq); 1026 } 1027 } 1028 } 1029 } 1030