1 /*- 2 * Copyright (c) 2009-2012,2016-2017 Microsoft Corp. 3 * Copyright (c) 2012 NetApp Inc. 4 * Copyright (c) 2012 Citrix Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * VM Bus Driver Implementation 31 */ 32 #include <sys/cdefs.h> 33 #include <sys/param.h> 34 #include <sys/bus.h> 35 #include <sys/kernel.h> 36 #include <sys/linker.h> 37 #include <sys/lock.h> 38 #include <sys/malloc.h> 39 #include <sys/module.h> 40 #include <sys/mutex.h> 41 #include <sys/sbuf.h> 42 #include <sys/smp.h> 43 #include <sys/sysctl.h> 44 #include <sys/systm.h> 45 #include <sys/taskqueue.h> 46 47 #include <vm/vm.h> 48 #include <vm/vm_extern.h> 49 #include <vm/vm_param.h> 50 #include <vm/pmap.h> 51 52 #include <machine/bus.h> 53 #if defined(__aarch64__) 54 #include <dev/psci/smccc.h> 55 #include <dev/hyperv/vmbus/aarch64/hyperv_machdep.h> 56 #include <dev/hyperv/vmbus/aarch64/hyperv_reg.h> 57 #else 58 #include <dev/hyperv/vmbus/x86/hyperv_machdep.h> 59 #include <dev/hyperv/vmbus/x86/hyperv_reg.h> 60 #include <machine/intr_machdep.h> 61 #include <x86/include/apicvar.h> 62 #endif 63 #include <machine/metadata.h> 64 #include <machine/md_var.h> 65 #include <machine/resource.h> 66 #include <contrib/dev/acpica/include/acpi.h> 67 #include <dev/acpica/acpivar.h> 68 69 #include <dev/hyperv/include/hyperv.h> 70 #include <dev/hyperv/include/vmbus_xact.h> 71 #include <dev/hyperv/vmbus/hyperv_var.h> 72 #include <dev/hyperv/vmbus/vmbus_reg.h> 73 #include <dev/hyperv/vmbus/vmbus_var.h> 74 #include <dev/hyperv/vmbus/vmbus_chanvar.h> 75 #include <dev/hyperv/vmbus/hyperv_common_reg.h> 76 #include "acpi_if.h" 77 #include "pcib_if.h" 78 #include "vmbus_if.h" 79 80 #define VMBUS_GPADL_START 0xe1e10 81 82 struct vmbus_msghc { 83 struct vmbus_xact *mh_xact; 84 struct hypercall_postmsg_in mh_inprm_save; 85 }; 86 87 static void vmbus_identify(driver_t *, device_t); 88 static int vmbus_probe(device_t); 89 static int vmbus_attach(device_t); 90 static int vmbus_detach(device_t); 91 static int vmbus_read_ivar(device_t, device_t, int, 92 uintptr_t *); 93 static int vmbus_child_pnpinfo(device_t, device_t, struct sbuf *); 94 static struct resource *vmbus_alloc_resource(device_t dev, 95 device_t child, int type, int *rid, 96 rman_res_t start, rman_res_t end, 97 rman_res_t count, u_int flags); 98 static int vmbus_alloc_msi(device_t bus, device_t dev, 99 int count, int maxcount, int *irqs); 100 static int vmbus_release_msi(device_t bus, device_t dev, 101 int count, int *irqs); 102 static int vmbus_alloc_msix(device_t bus, device_t dev, 103 int *irq); 104 static int vmbus_release_msix(device_t bus, device_t dev, 105 int irq); 106 static int vmbus_map_msi(device_t bus, device_t dev, 107 int irq, uint64_t *addr, uint32_t *data); 108 static uint32_t vmbus_get_version_method(device_t, device_t); 109 static int vmbus_probe_guid_method(device_t, device_t, 110 const struct hyperv_guid *); 111 static uint32_t vmbus_get_vcpu_id_method(device_t bus, 112 device_t dev, int cpu); 113 static struct taskqueue *vmbus_get_eventtq_method(device_t, device_t, 114 int); 115 #if defined(EARLY_AP_STARTUP) 116 static void vmbus_intrhook(void *); 117 #endif 118 119 static int vmbus_init(struct vmbus_softc *); 120 static int vmbus_connect(struct vmbus_softc *, uint32_t); 121 static int vmbus_req_channels(struct vmbus_softc *sc); 122 static void vmbus_disconnect(struct vmbus_softc *); 123 static int vmbus_scan(struct vmbus_softc *); 124 static void vmbus_scan_teardown(struct vmbus_softc *); 125 static void vmbus_scan_done(struct vmbus_softc *, 126 const struct vmbus_message *); 127 static void vmbus_chanmsg_handle(struct vmbus_softc *, 128 const struct vmbus_message *); 129 static void vmbus_msg_task(void *, int); 130 static void vmbus_synic_setup(void *); 131 static void vmbus_synic_teardown(void *); 132 static int vmbus_sysctl_version(SYSCTL_HANDLER_ARGS); 133 static int vmbus_dma_alloc(struct vmbus_softc *); 134 static void vmbus_dma_free(struct vmbus_softc *); 135 static int vmbus_intr_setup(struct vmbus_softc *); 136 static void vmbus_intr_teardown(struct vmbus_softc *); 137 static int vmbus_doattach(struct vmbus_softc *); 138 static void vmbus_event_proc_dummy(struct vmbus_softc *, 139 int); 140 static bus_dma_tag_t vmbus_get_dma_tag(device_t parent, device_t child); 141 static struct vmbus_softc *vmbus_sc; 142 143 SYSCTL_NODE(_hw, OID_AUTO, vmbus, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 144 "Hyper-V vmbus"); 145 146 static int vmbus_pin_evttask = 1; 147 SYSCTL_INT(_hw_vmbus, OID_AUTO, pin_evttask, CTLFLAG_RDTUN, 148 &vmbus_pin_evttask, 0, "Pin event tasks to their respective CPU"); 149 uint32_t vmbus_current_version; 150 151 static const uint32_t vmbus_version[] = { 152 VMBUS_VERSION_WIN10, 153 VMBUS_VERSION_WIN8_1, 154 VMBUS_VERSION_WIN8, 155 VMBUS_VERSION_WIN7, 156 VMBUS_VERSION_WS2008 157 }; 158 159 static const vmbus_chanmsg_proc_t 160 vmbus_chanmsg_handlers[VMBUS_CHANMSG_TYPE_MAX] = { 161 VMBUS_CHANMSG_PROC(CHOFFER_DONE, vmbus_scan_done), 162 VMBUS_CHANMSG_PROC_WAKEUP(CONNECT_RESP) 163 }; 164 165 static device_method_t vmbus_methods[] = { 166 /* Device interface */ 167 DEVMETHOD(device_identify, vmbus_identify), 168 DEVMETHOD(device_probe, vmbus_probe), 169 DEVMETHOD(device_attach, vmbus_attach), 170 DEVMETHOD(device_detach, vmbus_detach), 171 DEVMETHOD(device_shutdown, bus_generic_shutdown), 172 DEVMETHOD(device_suspend, bus_generic_suspend), 173 DEVMETHOD(device_resume, bus_generic_resume), 174 175 /* Bus interface */ 176 DEVMETHOD(bus_add_child, bus_generic_add_child), 177 DEVMETHOD(bus_print_child, bus_generic_print_child), 178 DEVMETHOD(bus_read_ivar, vmbus_read_ivar), 179 DEVMETHOD(bus_child_pnpinfo, vmbus_child_pnpinfo), 180 DEVMETHOD(bus_alloc_resource, vmbus_alloc_resource), 181 DEVMETHOD(bus_release_resource, bus_generic_release_resource), 182 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), 183 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), 184 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), 185 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), 186 DEVMETHOD(bus_get_cpus, bus_generic_get_cpus), 187 DEVMETHOD(bus_get_dma_tag, vmbus_get_dma_tag), 188 189 /* pcib interface */ 190 DEVMETHOD(pcib_alloc_msi, vmbus_alloc_msi), 191 DEVMETHOD(pcib_release_msi, vmbus_release_msi), 192 DEVMETHOD(pcib_alloc_msix, vmbus_alloc_msix), 193 DEVMETHOD(pcib_release_msix, vmbus_release_msix), 194 DEVMETHOD(pcib_map_msi, vmbus_map_msi), 195 196 /* Vmbus interface */ 197 DEVMETHOD(vmbus_get_version, vmbus_get_version_method), 198 DEVMETHOD(vmbus_probe_guid, vmbus_probe_guid_method), 199 DEVMETHOD(vmbus_get_vcpu_id, vmbus_get_vcpu_id_method), 200 DEVMETHOD(vmbus_get_event_taskq, vmbus_get_eventtq_method), 201 202 DEVMETHOD_END 203 }; 204 205 static driver_t vmbus_driver = { 206 "vmbus", 207 vmbus_methods, 208 sizeof(struct vmbus_softc) 209 }; 210 211 DRIVER_MODULE(vmbus, pcib, vmbus_driver, NULL, NULL); 212 DRIVER_MODULE(vmbus, acpi_syscontainer, vmbus_driver, NULL, NULL); 213 214 MODULE_DEPEND(vmbus, acpi, 1, 1, 1); 215 MODULE_DEPEND(vmbus, pci, 1, 1, 1); 216 MODULE_VERSION(vmbus, 1); 217 218 static __inline struct vmbus_softc * 219 vmbus_get_softc(void) 220 { 221 return vmbus_sc; 222 } 223 224 static bus_dma_tag_t 225 vmbus_get_dma_tag(device_t dev, device_t child) 226 { 227 struct vmbus_softc *sc = vmbus_get_softc(); 228 return (sc->dmat); 229 } 230 231 void 232 vmbus_msghc_reset(struct vmbus_msghc *mh, size_t dsize) 233 { 234 struct hypercall_postmsg_in *inprm; 235 236 if (dsize > HYPERCALL_POSTMSGIN_DSIZE_MAX) 237 panic("invalid data size %zu", dsize); 238 239 inprm = vmbus_xact_req_data(mh->mh_xact); 240 memset(inprm, 0, HYPERCALL_POSTMSGIN_SIZE); 241 inprm->hc_connid = VMBUS_CONNID_MESSAGE; 242 inprm->hc_msgtype = HYPERV_MSGTYPE_CHANNEL; 243 inprm->hc_dsize = dsize; 244 } 245 246 struct vmbus_msghc * 247 vmbus_msghc_get(struct vmbus_softc *sc, size_t dsize) 248 { 249 struct vmbus_msghc *mh; 250 struct vmbus_xact *xact; 251 252 if (dsize > HYPERCALL_POSTMSGIN_DSIZE_MAX) 253 panic("invalid data size %zu", dsize); 254 255 xact = vmbus_xact_get(sc->vmbus_xc, 256 dsize + __offsetof(struct hypercall_postmsg_in, hc_data[0])); 257 if (xact == NULL) 258 return (NULL); 259 260 mh = vmbus_xact_priv(xact, sizeof(*mh)); 261 mh->mh_xact = xact; 262 263 vmbus_msghc_reset(mh, dsize); 264 return (mh); 265 } 266 267 void 268 vmbus_msghc_put(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh) 269 { 270 271 vmbus_xact_put(mh->mh_xact); 272 } 273 274 void * 275 vmbus_msghc_dataptr(struct vmbus_msghc *mh) 276 { 277 struct hypercall_postmsg_in *inprm; 278 279 inprm = vmbus_xact_req_data(mh->mh_xact); 280 return (inprm->hc_data); 281 } 282 283 int 284 vmbus_msghc_exec_noresult(struct vmbus_msghc *mh) 285 { 286 sbintime_t time = SBT_1MS; 287 struct hypercall_postmsg_in *inprm; 288 bus_addr_t inprm_paddr; 289 int i; 290 291 inprm = vmbus_xact_req_data(mh->mh_xact); 292 inprm_paddr = vmbus_xact_req_paddr(mh->mh_xact); 293 294 /* 295 * Save the input parameter so that we could restore the input 296 * parameter if the Hypercall failed. 297 * 298 * XXX 299 * Is this really necessary?! i.e. Will the Hypercall ever 300 * overwrite the input parameter? 301 */ 302 memcpy(&mh->mh_inprm_save, inprm, HYPERCALL_POSTMSGIN_SIZE); 303 304 /* 305 * In order to cope with transient failures, e.g. insufficient 306 * resources on host side, we retry the post message Hypercall 307 * several times. 20 retries seem sufficient. 308 */ 309 #define HC_RETRY_MAX 20 310 311 for (i = 0; i < HC_RETRY_MAX; ++i) { 312 uint64_t status; 313 314 status = hypercall_post_message(inprm_paddr); 315 if (status == HYPERCALL_STATUS_SUCCESS) 316 return 0; 317 318 pause_sbt("hcpmsg", time, 0, C_HARDCLOCK); 319 if (time < SBT_1S * 2) 320 time *= 2; 321 322 /* Restore input parameter and try again */ 323 memcpy(inprm, &mh->mh_inprm_save, HYPERCALL_POSTMSGIN_SIZE); 324 } 325 326 #undef HC_RETRY_MAX 327 328 return EIO; 329 } 330 331 int 332 vmbus_msghc_exec(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh) 333 { 334 int error; 335 336 vmbus_xact_activate(mh->mh_xact); 337 error = vmbus_msghc_exec_noresult(mh); 338 if (error) 339 vmbus_xact_deactivate(mh->mh_xact); 340 return error; 341 } 342 343 void 344 vmbus_msghc_exec_cancel(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh) 345 { 346 347 vmbus_xact_deactivate(mh->mh_xact); 348 } 349 350 const struct vmbus_message * 351 vmbus_msghc_wait_result(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh) 352 { 353 size_t resp_len; 354 355 return (vmbus_xact_wait(mh->mh_xact, &resp_len)); 356 } 357 358 const struct vmbus_message * 359 vmbus_msghc_poll_result(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh) 360 { 361 size_t resp_len; 362 363 return (vmbus_xact_poll(mh->mh_xact, &resp_len)); 364 } 365 366 void 367 vmbus_msghc_wakeup(struct vmbus_softc *sc, const struct vmbus_message *msg) 368 { 369 370 vmbus_xact_ctx_wakeup(sc->vmbus_xc, msg, sizeof(*msg)); 371 } 372 373 uint32_t 374 vmbus_gpadl_alloc(struct vmbus_softc *sc) 375 { 376 uint32_t gpadl; 377 378 again: 379 gpadl = atomic_fetchadd_int(&sc->vmbus_gpadl, 1); 380 if (gpadl == 0) 381 goto again; 382 return (gpadl); 383 } 384 385 /* Used for Hyper-V socket when guest client connects to host */ 386 int 387 vmbus_req_tl_connect(struct hyperv_guid *guest_srv_id, 388 struct hyperv_guid *host_srv_id) 389 { 390 struct vmbus_softc *sc = vmbus_get_softc(); 391 struct vmbus_chanmsg_tl_connect *req; 392 struct vmbus_msghc *mh; 393 int error; 394 395 if (!sc) 396 return ENXIO; 397 398 mh = vmbus_msghc_get(sc, sizeof(*req)); 399 if (mh == NULL) { 400 device_printf(sc->vmbus_dev, 401 "can not get msg hypercall for tl connect\n"); 402 return ENXIO; 403 } 404 405 req = vmbus_msghc_dataptr(mh); 406 req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_TL_CONN; 407 req->guest_endpoint_id = *guest_srv_id; 408 req->host_service_id = *host_srv_id; 409 410 error = vmbus_msghc_exec_noresult(mh); 411 vmbus_msghc_put(sc, mh); 412 413 if (error) { 414 device_printf(sc->vmbus_dev, 415 "tl connect msg hypercall failed\n"); 416 } 417 418 return error; 419 } 420 421 static int 422 vmbus_connect(struct vmbus_softc *sc, uint32_t version) 423 { 424 struct vmbus_chanmsg_connect *req; 425 const struct vmbus_message *msg; 426 struct vmbus_msghc *mh; 427 int error, done = 0; 428 429 mh = vmbus_msghc_get(sc, sizeof(*req)); 430 if (mh == NULL) 431 return ENXIO; 432 433 req = vmbus_msghc_dataptr(mh); 434 req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CONNECT; 435 req->chm_ver = version; 436 req->chm_evtflags = pmap_kextract((vm_offset_t)sc->vmbus_evtflags); 437 req->chm_mnf1 = pmap_kextract((vm_offset_t)sc->vmbus_mnf1); 438 req->chm_mnf2 = pmap_kextract((vm_offset_t)sc->vmbus_mnf2); 439 440 error = vmbus_msghc_exec(sc, mh); 441 if (error) { 442 vmbus_msghc_put(sc, mh); 443 return error; 444 } 445 446 msg = vmbus_msghc_wait_result(sc, mh); 447 done = ((const struct vmbus_chanmsg_connect_resp *) 448 msg->msg_data)->chm_done; 449 450 vmbus_msghc_put(sc, mh); 451 452 return (done ? 0 : EOPNOTSUPP); 453 } 454 455 static int 456 vmbus_init(struct vmbus_softc *sc) 457 { 458 int i; 459 460 for (i = 0; i < nitems(vmbus_version); ++i) { 461 int error; 462 463 error = vmbus_connect(sc, vmbus_version[i]); 464 if (!error) { 465 vmbus_current_version = vmbus_version[i]; 466 sc->vmbus_version = vmbus_version[i]; 467 device_printf(sc->vmbus_dev, "version %u.%u\n", 468 VMBUS_VERSION_MAJOR(sc->vmbus_version), 469 VMBUS_VERSION_MINOR(sc->vmbus_version)); 470 return 0; 471 } 472 } 473 return ENXIO; 474 } 475 476 static void 477 vmbus_disconnect(struct vmbus_softc *sc) 478 { 479 struct vmbus_chanmsg_disconnect *req; 480 struct vmbus_msghc *mh; 481 int error; 482 483 mh = vmbus_msghc_get(sc, sizeof(*req)); 484 if (mh == NULL) { 485 device_printf(sc->vmbus_dev, 486 "can not get msg hypercall for disconnect\n"); 487 return; 488 } 489 490 req = vmbus_msghc_dataptr(mh); 491 req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_DISCONNECT; 492 493 error = vmbus_msghc_exec_noresult(mh); 494 vmbus_msghc_put(sc, mh); 495 496 if (error) { 497 device_printf(sc->vmbus_dev, 498 "disconnect msg hypercall failed\n"); 499 } 500 } 501 502 static int 503 vmbus_req_channels(struct vmbus_softc *sc) 504 { 505 struct vmbus_chanmsg_chrequest *req; 506 struct vmbus_msghc *mh; 507 int error; 508 509 mh = vmbus_msghc_get(sc, sizeof(*req)); 510 if (mh == NULL) 511 return ENXIO; 512 513 req = vmbus_msghc_dataptr(mh); 514 req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CHREQUEST; 515 516 error = vmbus_msghc_exec_noresult(mh); 517 vmbus_msghc_put(sc, mh); 518 519 return error; 520 } 521 522 static void 523 vmbus_scan_done_task(void *xsc, int pending __unused) 524 { 525 struct vmbus_softc *sc = xsc; 526 527 bus_topo_lock(); 528 sc->vmbus_scandone = true; 529 bus_topo_unlock(); 530 wakeup(&sc->vmbus_scandone); 531 } 532 533 static void 534 vmbus_scan_done(struct vmbus_softc *sc, 535 const struct vmbus_message *msg __unused) 536 { 537 538 taskqueue_enqueue(sc->vmbus_devtq, &sc->vmbus_scandone_task); 539 } 540 541 static int 542 vmbus_scan(struct vmbus_softc *sc) 543 { 544 int error; 545 546 /* 547 * Identify, probe and attach for non-channel devices. 548 */ 549 bus_generic_probe(sc->vmbus_dev); 550 bus_generic_attach(sc->vmbus_dev); 551 552 /* 553 * This taskqueue serializes vmbus devices' attach and detach 554 * for channel offer and rescind messages. 555 */ 556 sc->vmbus_devtq = taskqueue_create("vmbus dev", M_WAITOK, 557 taskqueue_thread_enqueue, &sc->vmbus_devtq); 558 taskqueue_start_threads(&sc->vmbus_devtq, 1, PI_NET, "vmbusdev"); 559 TASK_INIT(&sc->vmbus_scandone_task, 0, vmbus_scan_done_task, sc); 560 561 /* 562 * This taskqueue handles sub-channel detach, so that vmbus 563 * device's detach running in vmbus_devtq can drain its sub- 564 * channels. 565 */ 566 sc->vmbus_subchtq = taskqueue_create("vmbus subch", M_WAITOK, 567 taskqueue_thread_enqueue, &sc->vmbus_subchtq); 568 taskqueue_start_threads(&sc->vmbus_subchtq, 1, PI_NET, "vmbussch"); 569 570 /* 571 * Start vmbus scanning. 572 */ 573 error = vmbus_req_channels(sc); 574 if (error) { 575 device_printf(sc->vmbus_dev, "channel request failed: %d\n", 576 error); 577 return (error); 578 } 579 580 /* 581 * Wait for all vmbus devices from the initial channel offers to be 582 * attached. 583 */ 584 bus_topo_assert(); 585 while (!sc->vmbus_scandone) 586 mtx_sleep(&sc->vmbus_scandone, bus_topo_mtx(), 0, "vmbusdev", 0); 587 588 if (bootverbose) { 589 device_printf(sc->vmbus_dev, "device scan, probe and attach " 590 "done\n"); 591 } 592 return (0); 593 } 594 595 static void 596 vmbus_scan_teardown(struct vmbus_softc *sc) 597 { 598 599 bus_topo_assert(); 600 if (sc->vmbus_devtq != NULL) { 601 bus_topo_unlock(); 602 taskqueue_free(sc->vmbus_devtq); 603 bus_topo_lock(); 604 sc->vmbus_devtq = NULL; 605 } 606 if (sc->vmbus_subchtq != NULL) { 607 bus_topo_unlock(); 608 taskqueue_free(sc->vmbus_subchtq); 609 bus_topo_lock(); 610 sc->vmbus_subchtq = NULL; 611 } 612 } 613 614 static void 615 vmbus_chanmsg_handle(struct vmbus_softc *sc, const struct vmbus_message *msg) 616 { 617 vmbus_chanmsg_proc_t msg_proc; 618 uint32_t msg_type; 619 620 msg_type = ((const struct vmbus_chanmsg_hdr *)msg->msg_data)->chm_type; 621 if (msg_type >= VMBUS_CHANMSG_TYPE_MAX) { 622 device_printf(sc->vmbus_dev, "unknown message type 0x%x\n", 623 msg_type); 624 return; 625 } 626 627 msg_proc = vmbus_chanmsg_handlers[msg_type]; 628 if (msg_proc != NULL) 629 msg_proc(sc, msg); 630 631 /* Channel specific processing */ 632 vmbus_chan_msgproc(sc, msg); 633 } 634 635 static void 636 vmbus_msg_task(void *xsc, int pending __unused) 637 { 638 struct vmbus_softc *sc = xsc; 639 volatile struct vmbus_message *msg; 640 641 msg = VMBUS_PCPU_GET(sc, message, curcpu) + VMBUS_SINT_MESSAGE; 642 for (;;) { 643 if (msg->msg_type == HYPERV_MSGTYPE_NONE) { 644 /* No message */ 645 break; 646 } else if (msg->msg_type == HYPERV_MSGTYPE_CHANNEL) { 647 /* Channel message */ 648 vmbus_chanmsg_handle(sc, 649 __DEVOLATILE(const struct vmbus_message *, msg)); 650 } 651 652 msg->msg_type = HYPERV_MSGTYPE_NONE; 653 /* 654 * Make sure the write to msg_type (i.e. set to 655 * HYPERV_MSGTYPE_NONE) happens before we read the 656 * msg_flags and EOMing. Otherwise, the EOMing will 657 * not deliver any more messages since there is no 658 * empty slot 659 * 660 * NOTE: 661 * mb() is used here, since atomic_thread_fence_seq_cst() 662 * will become compiler fence on UP kernel. 663 */ 664 mb(); 665 if (msg->msg_flags & VMBUS_MSGFLAG_PENDING) { 666 /* 667 * This will cause message queue rescan to possibly 668 * deliver another msg from the hypervisor 669 */ 670 WRMSR(MSR_HV_EOM, 0); 671 } 672 } 673 } 674 static __inline int 675 vmbus_handle_intr1(struct vmbus_softc *sc, struct trapframe *frame, int cpu) 676 { 677 volatile struct vmbus_message *msg; 678 struct vmbus_message *msg_base; 679 680 msg_base = VMBUS_PCPU_GET(sc, message, cpu); 681 682 /* 683 * Check event timer. 684 * 685 * TODO: move this to independent IDT vector. 686 */ 687 vmbus_handle_timer_intr1(msg_base, frame); 688 /* 689 * Check events. Hot path for network and storage I/O data; high rate. 690 * 691 * NOTE: 692 * As recommended by the Windows guest fellows, we check events before 693 * checking messages. 694 */ 695 sc->vmbus_event_proc(sc, cpu); 696 697 /* 698 * Check messages. Mainly management stuffs; ultra low rate. 699 */ 700 msg = msg_base + VMBUS_SINT_MESSAGE; 701 if (__predict_false(msg->msg_type != HYPERV_MSGTYPE_NONE)) { 702 taskqueue_enqueue(VMBUS_PCPU_GET(sc, message_tq, cpu), 703 VMBUS_PCPU_PTR(sc, message_task, cpu)); 704 } 705 706 return (FILTER_HANDLED); 707 } 708 709 void 710 vmbus_handle_intr(struct trapframe *trap_frame) 711 { 712 struct vmbus_softc *sc = vmbus_get_softc(); 713 int cpu = curcpu; 714 715 /* 716 * Disable preemption. 717 */ 718 critical_enter(); 719 720 /* 721 * Do a little interrupt counting. This used x86 specific 722 * intrcnt_add function 723 */ 724 #if !defined(__aarch64__) 725 (*VMBUS_PCPU_GET(sc, intr_cnt, cpu))++; 726 #endif /* not for aarch64 */ 727 vmbus_handle_intr1(sc, trap_frame, cpu); 728 729 /* 730 * Enable preemption. 731 */ 732 critical_exit(); 733 } 734 735 static void 736 vmbus_synic_setup(void *xsc) 737 { 738 struct vmbus_softc *sc = xsc; 739 int cpu = curcpu; 740 uint64_t val, orig; 741 uint32_t sint; 742 743 if (hyperv_features & CPUID_HV_MSR_VP_INDEX) { 744 /* Save virtual processor id. */ 745 VMBUS_PCPU_GET(sc, vcpuid, cpu) = RDMSR(MSR_HV_VP_INDEX); 746 } else { 747 /* Set virtual processor id to 0 for compatibility. */ 748 VMBUS_PCPU_GET(sc, vcpuid, cpu) = 0; 749 } 750 751 /* 752 * Setup the SynIC message. 753 */ 754 orig = RDMSR(MSR_HV_SIMP); 755 val = pmap_kextract((vm_offset_t)VMBUS_PCPU_GET(sc, message, cpu)) & 756 MSR_HV_SIMP_PGMASK; 757 val |= MSR_HV_SIMP_ENABLE | (orig & MSR_HV_SIMP_RSVD_MASK); 758 WRMSR(MSR_HV_SIMP, val); 759 /* 760 * Setup the SynIC event flags. 761 */ 762 orig = RDMSR(MSR_HV_SIEFP); 763 val = pmap_kextract((vm_offset_t)VMBUS_PCPU_GET(sc, event_flags, cpu)) & 764 MSR_HV_SIMP_PGMASK; 765 val |= MSR_HV_SIEFP_ENABLE | (orig & MSR_HV_SIEFP_RSVD_MASK); 766 WRMSR(MSR_HV_SIEFP, val); 767 768 /* 769 * Configure and unmask SINT for message and event flags. 770 */ 771 sint = MSR_HV_SINT0 + VMBUS_SINT_MESSAGE; 772 orig = RDMSR(sint); 773 val = sc->vmbus_idtvec | MSR_HV_SINT_AUTOEOI | 774 (orig & MSR_HV_SINT_RSVD_MASK); 775 WRMSR(sint, val); 776 777 /* 778 * Configure and unmask SINT for timer. 779 */ 780 vmbus_synic_setup1(sc); 781 /* 782 * All done; enable SynIC. 783 */ 784 orig = RDMSR(MSR_HV_SCONTROL); 785 val = MSR_HV_SCTRL_ENABLE | (orig & MSR_HV_SCTRL_RSVD_MASK); 786 WRMSR(MSR_HV_SCONTROL, val); 787 } 788 789 static void 790 vmbus_synic_teardown(void *arg) 791 { 792 uint64_t orig; 793 uint32_t sint; 794 795 /* 796 * Disable SynIC. 797 */ 798 orig = RDMSR(MSR_HV_SCONTROL); 799 WRMSR(MSR_HV_SCONTROL, (orig & MSR_HV_SCTRL_RSVD_MASK)); 800 801 /* 802 * Mask message and event flags SINT. 803 */ 804 sint = MSR_HV_SINT0 + VMBUS_SINT_MESSAGE; 805 orig = RDMSR(sint); 806 WRMSR(sint, orig | MSR_HV_SINT_MASKED); 807 808 /* 809 * Mask timer SINT. 810 */ 811 vmbus_synic_teardown1(); 812 /* 813 * Teardown SynIC message. 814 */ 815 orig = RDMSR(MSR_HV_SIMP); 816 WRMSR(MSR_HV_SIMP, (orig & MSR_HV_SIMP_RSVD_MASK)); 817 818 /* 819 * Teardown SynIC event flags. 820 */ 821 orig = RDMSR(MSR_HV_SIEFP); 822 WRMSR(MSR_HV_SIEFP, (orig & MSR_HV_SIEFP_RSVD_MASK)); 823 } 824 825 static int 826 vmbus_dma_alloc(struct vmbus_softc *sc) 827 { 828 uint8_t *evtflags; 829 int cpu; 830 831 CPU_FOREACH(cpu) { 832 void *ptr; 833 834 /* 835 * Per-cpu messages and event flags. 836 */ 837 ptr = contigmalloc(PAGE_SIZE, M_DEVBUF, M_WAITOK | M_ZERO, 838 0ul, ~0ul, PAGE_SIZE, 0); 839 if (ptr == NULL) 840 return ENOMEM; 841 VMBUS_PCPU_GET(sc, message, cpu) = ptr; 842 843 ptr = contigmalloc(PAGE_SIZE, M_DEVBUF, M_WAITOK | M_ZERO, 844 0ul, ~0ul, PAGE_SIZE, 0); 845 if (ptr == NULL) 846 return ENOMEM; 847 VMBUS_PCPU_GET(sc, event_flags, cpu) = ptr; 848 } 849 850 evtflags = contigmalloc(PAGE_SIZE, M_DEVBUF, M_WAITOK | M_ZERO, 851 0ul, ~0ul, PAGE_SIZE, 0); 852 if (evtflags == NULL) 853 return ENOMEM; 854 sc->vmbus_rx_evtflags = (u_long *)evtflags; 855 sc->vmbus_tx_evtflags = (u_long *)(evtflags + (PAGE_SIZE / 2)); 856 sc->vmbus_evtflags = evtflags; 857 858 sc->vmbus_mnf1 = contigmalloc(PAGE_SIZE, M_DEVBUF, M_WAITOK | M_ZERO, 859 0ul, ~0ul, PAGE_SIZE, 0); 860 if (sc->vmbus_mnf1 == NULL) 861 return ENOMEM; 862 863 sc->vmbus_mnf2 = contigmalloc(sizeof(struct vmbus_mnf), M_DEVBUF, 864 M_WAITOK | M_ZERO, 0ul, ~0ul, PAGE_SIZE, 0); 865 if (sc->vmbus_mnf2 == NULL) 866 return ENOMEM; 867 868 return 0; 869 } 870 871 static void 872 vmbus_dma_free(struct vmbus_softc *sc) 873 { 874 int cpu; 875 876 if (sc->vmbus_evtflags != NULL) { 877 contigfree(sc->vmbus_evtflags, PAGE_SIZE, M_DEVBUF); 878 sc->vmbus_evtflags = NULL; 879 sc->vmbus_rx_evtflags = NULL; 880 sc->vmbus_tx_evtflags = NULL; 881 } 882 if (sc->vmbus_mnf1 != NULL) { 883 contigfree(sc->vmbus_mnf1, PAGE_SIZE, M_DEVBUF); 884 sc->vmbus_mnf1 = NULL; 885 } 886 if (sc->vmbus_mnf2 != NULL) { 887 contigfree(sc->vmbus_mnf2, sizeof(struct vmbus_mnf), M_DEVBUF); 888 sc->vmbus_mnf2 = NULL; 889 } 890 891 CPU_FOREACH(cpu) { 892 if (VMBUS_PCPU_GET(sc, message, cpu) != NULL) { 893 contigfree(VMBUS_PCPU_GET(sc, message, cpu), PAGE_SIZE, 894 M_DEVBUF); 895 VMBUS_PCPU_GET(sc, message, cpu) = NULL; 896 } 897 if (VMBUS_PCPU_GET(sc, event_flags, cpu) != NULL) { 898 contigfree(VMBUS_PCPU_GET(sc, event_flags, cpu), 899 PAGE_SIZE, M_DEVBUF); 900 VMBUS_PCPU_GET(sc, event_flags, cpu) = NULL; 901 } 902 } 903 } 904 905 static int 906 vmbus_intr_setup(struct vmbus_softc *sc) 907 { 908 int cpu; 909 910 CPU_FOREACH(cpu) { 911 char buf[MAXCOMLEN + 1]; 912 cpuset_t cpu_mask; 913 914 /* Allocate an interrupt counter for Hyper-V interrupt */ 915 snprintf(buf, sizeof(buf), "cpu%d:hyperv", cpu); 916 #if !defined(__aarch64__) 917 intrcnt_add(buf, VMBUS_PCPU_PTR(sc, intr_cnt, cpu)); 918 #endif /* not for aarch64 */ 919 /* 920 * Setup taskqueue to handle events. Task will be per- 921 * channel. 922 */ 923 VMBUS_PCPU_GET(sc, event_tq, cpu) = taskqueue_create_fast( 924 "hyperv event", M_WAITOK, taskqueue_thread_enqueue, 925 VMBUS_PCPU_PTR(sc, event_tq, cpu)); 926 if (vmbus_pin_evttask) { 927 CPU_SETOF(cpu, &cpu_mask); 928 taskqueue_start_threads_cpuset( 929 VMBUS_PCPU_PTR(sc, event_tq, cpu), 1, PI_NET, 930 &cpu_mask, "hvevent%d", cpu); 931 } else { 932 taskqueue_start_threads( 933 VMBUS_PCPU_PTR(sc, event_tq, cpu), 1, PI_NET, 934 "hvevent%d", cpu); 935 } 936 937 /* 938 * Setup tasks and taskqueues to handle messages. 939 */ 940 VMBUS_PCPU_GET(sc, message_tq, cpu) = taskqueue_create_fast( 941 "hyperv msg", M_WAITOK, taskqueue_thread_enqueue, 942 VMBUS_PCPU_PTR(sc, message_tq, cpu)); 943 CPU_SETOF(cpu, &cpu_mask); 944 taskqueue_start_threads_cpuset( 945 VMBUS_PCPU_PTR(sc, message_tq, cpu), 1, PI_NET, &cpu_mask, 946 "hvmsg%d", cpu); 947 TASK_INIT(VMBUS_PCPU_PTR(sc, message_task, cpu), 0, 948 vmbus_msg_task, sc); 949 } 950 return (vmbus_setup_intr1(sc)); 951 } 952 static void 953 vmbus_intr_teardown(struct vmbus_softc *sc) 954 { 955 vmbus_intr_teardown1(sc); 956 } 957 958 static int 959 vmbus_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) 960 { 961 return (ENOENT); 962 } 963 964 static int 965 vmbus_child_pnpinfo(device_t dev, device_t child, struct sbuf *sb) 966 { 967 const struct vmbus_channel *chan; 968 char guidbuf[HYPERV_GUID_STRLEN]; 969 970 chan = vmbus_get_channel(child); 971 if (chan == NULL) { 972 /* Event timer device, which does not belong to a channel */ 973 return (0); 974 } 975 976 hyperv_guid2str(&chan->ch_guid_type, guidbuf, sizeof(guidbuf)); 977 sbuf_printf(sb, "classid=%s", guidbuf); 978 979 hyperv_guid2str(&chan->ch_guid_inst, guidbuf, sizeof(guidbuf)); 980 sbuf_printf(sb, " deviceid=%s", guidbuf); 981 982 return (0); 983 } 984 985 int 986 vmbus_add_child(struct vmbus_channel *chan) 987 { 988 struct vmbus_softc *sc = chan->ch_vmbus; 989 device_t parent = sc->vmbus_dev; 990 991 bus_topo_lock(); 992 chan->ch_dev = device_add_child(parent, NULL, -1); 993 if (chan->ch_dev == NULL) { 994 bus_topo_unlock(); 995 device_printf(parent, "device_add_child for chan%u failed\n", 996 chan->ch_id); 997 return (ENXIO); 998 } 999 device_set_ivars(chan->ch_dev, chan); 1000 device_probe_and_attach(chan->ch_dev); 1001 bus_topo_unlock(); 1002 1003 return (0); 1004 } 1005 1006 int 1007 vmbus_delete_child(struct vmbus_channel *chan) 1008 { 1009 int error = 0; 1010 1011 bus_topo_lock(); 1012 if (chan->ch_dev != NULL) { 1013 error = device_delete_child(chan->ch_vmbus->vmbus_dev, 1014 chan->ch_dev); 1015 chan->ch_dev = NULL; 1016 } 1017 bus_topo_unlock(); 1018 return (error); 1019 } 1020 1021 static int 1022 vmbus_sysctl_version(SYSCTL_HANDLER_ARGS) 1023 { 1024 struct vmbus_softc *sc = arg1; 1025 char verstr[16]; 1026 1027 snprintf(verstr, sizeof(verstr), "%u.%u", 1028 VMBUS_VERSION_MAJOR(sc->vmbus_version), 1029 VMBUS_VERSION_MINOR(sc->vmbus_version)); 1030 return sysctl_handle_string(oidp, verstr, sizeof(verstr), req); 1031 } 1032 1033 /* 1034 * We need the function to make sure the MMIO resource is allocated from the 1035 * ranges found in _CRS. 1036 * 1037 * For the release function, we can use bus_generic_release_resource(). 1038 */ 1039 static struct resource * 1040 vmbus_alloc_resource(device_t dev, device_t child, int type, int *rid, 1041 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 1042 { 1043 device_t parent = device_get_parent(dev); 1044 struct resource *res; 1045 1046 #ifdef NEW_PCIB 1047 if (type == SYS_RES_MEMORY) { 1048 struct vmbus_softc *sc = device_get_softc(dev); 1049 1050 res = pcib_host_res_alloc(&sc->vmbus_mmio_res, child, type, 1051 rid, start, end, count, flags); 1052 } else 1053 #endif 1054 { 1055 res = BUS_ALLOC_RESOURCE(parent, child, type, rid, start, 1056 end, count, flags); 1057 } 1058 1059 return (res); 1060 } 1061 1062 static int 1063 vmbus_alloc_msi(device_t bus, device_t dev, int count, int maxcount, int *irqs) 1064 { 1065 1066 return (PCIB_ALLOC_MSI(device_get_parent(bus), dev, count, maxcount, 1067 irqs)); 1068 } 1069 1070 static int 1071 vmbus_release_msi(device_t bus, device_t dev, int count, int *irqs) 1072 { 1073 1074 return (PCIB_RELEASE_MSI(device_get_parent(bus), dev, count, irqs)); 1075 } 1076 1077 static int 1078 vmbus_alloc_msix(device_t bus, device_t dev, int *irq) 1079 { 1080 1081 return (PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq)); 1082 } 1083 1084 static int 1085 vmbus_release_msix(device_t bus, device_t dev, int irq) 1086 { 1087 1088 return (PCIB_RELEASE_MSIX(device_get_parent(bus), dev, irq)); 1089 } 1090 1091 static int 1092 vmbus_map_msi(device_t bus, device_t dev, int irq, uint64_t *addr, 1093 uint32_t *data) 1094 { 1095 1096 return (PCIB_MAP_MSI(device_get_parent(bus), dev, irq, addr, data)); 1097 } 1098 1099 static uint32_t 1100 vmbus_get_version_method(device_t bus, device_t dev) 1101 { 1102 struct vmbus_softc *sc = device_get_softc(bus); 1103 1104 return sc->vmbus_version; 1105 } 1106 1107 static int 1108 vmbus_probe_guid_method(device_t bus, device_t dev, 1109 const struct hyperv_guid *guid) 1110 { 1111 const struct vmbus_channel *chan = vmbus_get_channel(dev); 1112 1113 if (memcmp(&chan->ch_guid_type, guid, sizeof(struct hyperv_guid)) == 0) 1114 return 0; 1115 return ENXIO; 1116 } 1117 1118 static uint32_t 1119 vmbus_get_vcpu_id_method(device_t bus, device_t dev, int cpu) 1120 { 1121 const struct vmbus_softc *sc = device_get_softc(bus); 1122 1123 return (VMBUS_PCPU_GET(sc, vcpuid, cpu)); 1124 } 1125 1126 static struct taskqueue * 1127 vmbus_get_eventtq_method(device_t bus, device_t dev __unused, int cpu) 1128 { 1129 const struct vmbus_softc *sc = device_get_softc(bus); 1130 1131 KASSERT(cpu >= 0 && cpu < mp_ncpus, ("invalid cpu%d", cpu)); 1132 return (VMBUS_PCPU_GET(sc, event_tq, cpu)); 1133 } 1134 1135 #ifdef NEW_PCIB 1136 #define VTPM_BASE_ADDR 0xfed40000 1137 #define FOUR_GB (1ULL << 32) 1138 1139 enum parse_pass { parse_64, parse_32 }; 1140 1141 struct parse_context { 1142 device_t vmbus_dev; 1143 enum parse_pass pass; 1144 }; 1145 1146 static ACPI_STATUS 1147 parse_crs(ACPI_RESOURCE *res, void *ctx) 1148 { 1149 const struct parse_context *pc = ctx; 1150 device_t vmbus_dev = pc->vmbus_dev; 1151 1152 struct vmbus_softc *sc = device_get_softc(vmbus_dev); 1153 UINT64 start, end; 1154 1155 switch (res->Type) { 1156 case ACPI_RESOURCE_TYPE_ADDRESS32: 1157 start = res->Data.Address32.Address.Minimum; 1158 end = res->Data.Address32.Address.Maximum; 1159 break; 1160 1161 case ACPI_RESOURCE_TYPE_ADDRESS64: 1162 start = res->Data.Address64.Address.Minimum; 1163 end = res->Data.Address64.Address.Maximum; 1164 break; 1165 1166 default: 1167 /* Unused types. */ 1168 return (AE_OK); 1169 } 1170 1171 /* 1172 * We don't use <1MB addresses. 1173 */ 1174 if (end < 0x100000) 1175 return (AE_OK); 1176 1177 /* Don't conflict with vTPM. */ 1178 if (end >= VTPM_BASE_ADDR && start < VTPM_BASE_ADDR) 1179 end = VTPM_BASE_ADDR - 1; 1180 1181 if ((pc->pass == parse_32 && start < FOUR_GB) || 1182 (pc->pass == parse_64 && start >= FOUR_GB)) 1183 pcib_host_res_decodes(&sc->vmbus_mmio_res, SYS_RES_MEMORY, 1184 start, end, 0); 1185 1186 return (AE_OK); 1187 } 1188 1189 static void 1190 vmbus_get_crs(device_t dev, device_t vmbus_dev, enum parse_pass pass) 1191 { 1192 struct parse_context pc; 1193 ACPI_STATUS status; 1194 1195 if (bootverbose) 1196 device_printf(dev, "walking _CRS, pass=%d\n", pass); 1197 1198 pc.vmbus_dev = vmbus_dev; 1199 pc.pass = pass; 1200 status = AcpiWalkResources(acpi_get_handle(dev), "_CRS", 1201 parse_crs, &pc); 1202 1203 if (bootverbose && ACPI_FAILURE(status)) 1204 device_printf(dev, "_CRS: not found, pass=%d\n", pass); 1205 } 1206 1207 static void 1208 vmbus_get_mmio_res_pass(device_t dev, enum parse_pass pass) 1209 { 1210 device_t acpi0, parent; 1211 1212 parent = device_get_parent(dev); 1213 1214 acpi0 = device_get_parent(parent); 1215 if (strcmp("acpi0", device_get_nameunit(acpi0)) == 0) { 1216 device_t *children; 1217 int count; 1218 1219 /* 1220 * Try to locate VMBUS resources and find _CRS on them. 1221 */ 1222 if (device_get_children(acpi0, &children, &count) == 0) { 1223 int i; 1224 1225 for (i = 0; i < count; ++i) { 1226 if (!device_is_attached(children[i])) 1227 continue; 1228 1229 if (strcmp("vmbus_res", 1230 device_get_name(children[i])) == 0) 1231 vmbus_get_crs(children[i], dev, pass); 1232 } 1233 free(children, M_TEMP); 1234 } 1235 1236 /* 1237 * Try to find _CRS on acpi. 1238 */ 1239 vmbus_get_crs(acpi0, dev, pass); 1240 } else { 1241 device_printf(dev, "not grandchild of acpi\n"); 1242 } 1243 1244 /* 1245 * Try to find _CRS on parent. 1246 */ 1247 vmbus_get_crs(parent, dev, pass); 1248 } 1249 1250 static void 1251 vmbus_get_mmio_res(device_t dev) 1252 { 1253 struct vmbus_softc *sc = device_get_softc(dev); 1254 /* 1255 * We walk the resources twice to make sure that: in the resource 1256 * list, the 32-bit resources appear behind the 64-bit resources. 1257 * NB: resource_list_add() uses INSERT_TAIL. This way, when we 1258 * iterate through the list to find a range for a 64-bit BAR in 1259 * vmbus_alloc_resource(), we can make sure we try to use >4GB 1260 * ranges first. 1261 */ 1262 pcib_host_res_init(dev, &sc->vmbus_mmio_res); 1263 1264 vmbus_get_mmio_res_pass(dev, parse_64); 1265 vmbus_get_mmio_res_pass(dev, parse_32); 1266 } 1267 1268 /* 1269 * On Gen2 VMs, Hyper-V provides mmio space for framebuffer. 1270 * This mmio address range is not useable for other PCI devices. 1271 * Currently only efifb and vbefb drivers are using this range without 1272 * reserving it from system. 1273 * Therefore, vmbus driver reserves it before any other PCI device 1274 * drivers start to request mmio addresses. 1275 */ 1276 static struct resource *hv_fb_res; 1277 1278 static void 1279 vmbus_fb_mmio_res(device_t dev) 1280 { 1281 struct efi_fb *efifb; 1282 #if !defined(__aarch64__) 1283 struct vbe_fb *vbefb; 1284 #endif /* aarch64 */ 1285 rman_res_t fb_start, fb_end, fb_count; 1286 int fb_height, fb_width; 1287 caddr_t kmdp; 1288 1289 struct vmbus_softc *sc = device_get_softc(dev); 1290 int rid = 0; 1291 1292 kmdp = preload_search_by_type("elf kernel"); 1293 if (kmdp == NULL) 1294 kmdp = preload_search_by_type("elf64 kernel"); 1295 efifb = (struct efi_fb *)preload_search_info(kmdp, 1296 MODINFO_METADATA | MODINFOMD_EFI_FB); 1297 #if !defined(__aarch64__) 1298 vbefb = (struct vbe_fb *)preload_search_info(kmdp, 1299 MODINFO_METADATA | MODINFOMD_VBE_FB); 1300 #endif /* aarch64 */ 1301 if (efifb != NULL) { 1302 fb_start = efifb->fb_addr; 1303 fb_end = efifb->fb_addr + efifb->fb_size; 1304 fb_count = efifb->fb_size; 1305 fb_height = efifb->fb_height; 1306 fb_width = efifb->fb_width; 1307 } 1308 #if !defined(__aarch64__) 1309 else if (vbefb != NULL) { 1310 fb_start = vbefb->fb_addr; 1311 fb_end = vbefb->fb_addr + vbefb->fb_size; 1312 fb_count = vbefb->fb_size; 1313 fb_height = vbefb->fb_height; 1314 fb_width = vbefb->fb_width; 1315 } 1316 #endif /* aarch64 */ 1317 else { 1318 if (bootverbose) 1319 device_printf(dev, 1320 "no preloaded kernel fb information\n"); 1321 /* We are on Gen1 VM, just return. */ 1322 return; 1323 } 1324 1325 if (bootverbose) 1326 device_printf(dev, 1327 "fb: fb_addr: %#jx, size: %#jx, " 1328 "actual size needed: 0x%x\n", 1329 fb_start, fb_count, fb_height * fb_width); 1330 1331 hv_fb_res = pcib_host_res_alloc(&sc->vmbus_mmio_res, dev, 1332 SYS_RES_MEMORY, &rid, fb_start, fb_end, fb_count, 1333 RF_ACTIVE | rman_make_alignment_flags(PAGE_SIZE)); 1334 1335 if (hv_fb_res && bootverbose) 1336 device_printf(dev, 1337 "successfully reserved memory for framebuffer " 1338 "starting at %#jx, size %#jx\n", 1339 fb_start, fb_count); 1340 } 1341 1342 static void 1343 vmbus_free_mmio_res(device_t dev) 1344 { 1345 struct vmbus_softc *sc = device_get_softc(dev); 1346 1347 pcib_host_res_free(dev, &sc->vmbus_mmio_res); 1348 1349 if (hv_fb_res) 1350 hv_fb_res = NULL; 1351 } 1352 #endif /* NEW_PCIB */ 1353 1354 static void 1355 vmbus_identify(driver_t *driver, device_t parent) 1356 { 1357 1358 if (device_get_unit(parent) != 0 || vm_guest != VM_GUEST_HV || 1359 (hyperv_features & CPUID_HV_MSR_SYNIC) == 0) 1360 return; 1361 device_add_child(parent, "vmbus", -1); 1362 } 1363 1364 static int 1365 vmbus_probe(device_t dev) 1366 { 1367 1368 if (device_get_unit(dev) != 0 || vm_guest != VM_GUEST_HV || 1369 (hyperv_features & CPUID_HV_MSR_SYNIC) == 0) 1370 return (ENXIO); 1371 1372 device_set_desc(dev, "Hyper-V Vmbus"); 1373 return (BUS_PROBE_DEFAULT); 1374 } 1375 1376 /** 1377 * @brief Main vmbus driver initialization routine. 1378 * 1379 * Here, we 1380 * - initialize the vmbus driver context 1381 * - setup various driver entry points 1382 * - invoke the vmbus hv main init routine 1383 * - get the irq resource 1384 * - invoke the vmbus to add the vmbus root device 1385 * - setup the vmbus root device 1386 * - retrieve the channel offers 1387 */ 1388 static int 1389 vmbus_doattach(struct vmbus_softc *sc) 1390 { 1391 struct sysctl_oid_list *child; 1392 struct sysctl_ctx_list *ctx; 1393 int ret; 1394 device_t dev_res; 1395 ACPI_HANDLE handle; 1396 unsigned int coherent = 0; 1397 1398 if (sc->vmbus_flags & VMBUS_FLAG_ATTACHED) 1399 return (0); 1400 1401 #ifdef NEW_PCIB 1402 vmbus_get_mmio_res(sc->vmbus_dev); 1403 vmbus_fb_mmio_res(sc->vmbus_dev); 1404 #endif 1405 1406 sc->vmbus_flags |= VMBUS_FLAG_ATTACHED; 1407 1408 sc->vmbus_gpadl = VMBUS_GPADL_START; 1409 mtx_init(&sc->vmbus_prichan_lock, "vmbus prichan", NULL, MTX_DEF); 1410 TAILQ_INIT(&sc->vmbus_prichans); 1411 mtx_init(&sc->vmbus_chan_lock, "vmbus channel", NULL, MTX_DEF); 1412 TAILQ_INIT(&sc->vmbus_chans); 1413 sc->vmbus_chmap = malloc( 1414 sizeof(struct vmbus_channel *) * VMBUS_CHAN_MAX, M_DEVBUF, 1415 M_WAITOK | M_ZERO); 1416 1417 /* Coherency attribute */ 1418 dev_res = devclass_get_device(devclass_find("vmbus_res"), 0); 1419 if (dev_res != NULL) { 1420 handle = acpi_get_handle(dev_res); 1421 1422 if (ACPI_FAILURE(acpi_GetInteger(handle, "_CCA", &coherent))) 1423 coherent = 0; 1424 } 1425 if (bootverbose) 1426 device_printf(sc->vmbus_dev, "Bus is%s cache-coherent\n", 1427 coherent ? "" : " not"); 1428 1429 bus_dma_tag_create(bus_get_dma_tag(sc->vmbus_dev), 1430 1, 0, 1431 BUS_SPACE_MAXADDR, 1432 BUS_SPACE_MAXADDR, 1433 NULL, NULL, 1434 BUS_SPACE_MAXSIZE, 1435 BUS_SPACE_UNRESTRICTED, 1436 BUS_SPACE_MAXSIZE, 1437 coherent ? BUS_DMA_COHERENT : 0, 1438 NULL, NULL, 1439 &sc->dmat); 1440 /* 1441 * Create context for "post message" Hypercalls 1442 */ 1443 sc->vmbus_xc = vmbus_xact_ctx_create(bus_get_dma_tag(sc->vmbus_dev), 1444 HYPERCALL_POSTMSGIN_SIZE, VMBUS_MSG_SIZE, 1445 sizeof(struct vmbus_msghc)); 1446 if (sc->vmbus_xc == NULL) { 1447 ret = ENXIO; 1448 goto cleanup; 1449 } 1450 1451 /* 1452 * Allocate DMA stuffs. 1453 */ 1454 ret = vmbus_dma_alloc(sc); 1455 if (ret != 0) 1456 goto cleanup; 1457 1458 /* 1459 * Setup interrupt. 1460 */ 1461 ret = vmbus_intr_setup(sc); 1462 if (ret != 0) 1463 goto cleanup; 1464 1465 /* 1466 * Setup SynIC. 1467 */ 1468 if (bootverbose) 1469 device_printf(sc->vmbus_dev, "smp_started = %d\n", smp_started); 1470 smp_rendezvous(NULL, vmbus_synic_setup, NULL, sc); 1471 sc->vmbus_flags |= VMBUS_FLAG_SYNIC; 1472 1473 /* 1474 * Initialize vmbus, e.g. connect to Hypervisor. 1475 */ 1476 ret = vmbus_init(sc); 1477 if (ret != 0) 1478 goto cleanup; 1479 1480 if (sc->vmbus_version == VMBUS_VERSION_WS2008 || 1481 sc->vmbus_version == VMBUS_VERSION_WIN7) 1482 sc->vmbus_event_proc = vmbus_event_proc_compat; 1483 else 1484 sc->vmbus_event_proc = vmbus_event_proc; 1485 1486 ret = vmbus_scan(sc); 1487 if (ret != 0) 1488 goto cleanup; 1489 1490 ctx = device_get_sysctl_ctx(sc->vmbus_dev); 1491 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vmbus_dev)); 1492 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "version", 1493 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, 1494 vmbus_sysctl_version, "A", "vmbus version"); 1495 1496 return (ret); 1497 1498 cleanup: 1499 vmbus_scan_teardown(sc); 1500 vmbus_intr_teardown(sc); 1501 vmbus_dma_free(sc); 1502 if (sc->vmbus_xc != NULL) { 1503 vmbus_xact_ctx_destroy(sc->vmbus_xc); 1504 sc->vmbus_xc = NULL; 1505 } 1506 free(__DEVOLATILE(void *, sc->vmbus_chmap), M_DEVBUF); 1507 mtx_destroy(&sc->vmbus_prichan_lock); 1508 mtx_destroy(&sc->vmbus_chan_lock); 1509 1510 return (ret); 1511 } 1512 1513 static void 1514 vmbus_event_proc_dummy(struct vmbus_softc *sc __unused, int cpu __unused) 1515 { 1516 } 1517 1518 #if defined(EARLY_AP_STARTUP) 1519 1520 static void 1521 vmbus_intrhook(void *xsc) 1522 { 1523 struct vmbus_softc *sc = xsc; 1524 1525 if (bootverbose) 1526 device_printf(sc->vmbus_dev, "intrhook\n"); 1527 vmbus_doattach(sc); 1528 config_intrhook_disestablish(&sc->vmbus_intrhook); 1529 } 1530 1531 #endif /* EARLY_AP_STARTUP */ 1532 1533 static int 1534 vmbus_attach(device_t dev) 1535 { 1536 vmbus_sc = device_get_softc(dev); 1537 vmbus_sc->vmbus_dev = dev; 1538 vmbus_sc->vmbus_idtvec = -1; 1539 1540 /* 1541 * Event processing logic will be configured: 1542 * - After the vmbus protocol version negotiation. 1543 * - Before we request channel offers. 1544 */ 1545 vmbus_sc->vmbus_event_proc = vmbus_event_proc_dummy; 1546 1547 #if defined(EARLY_AP_STARTUP) 1548 /* 1549 * Defer the real attach until the pause(9) works as expected. 1550 */ 1551 vmbus_sc->vmbus_intrhook.ich_func = vmbus_intrhook; 1552 vmbus_sc->vmbus_intrhook.ich_arg = vmbus_sc; 1553 config_intrhook_establish(&vmbus_sc->vmbus_intrhook); 1554 #endif /* EARLY_AP_STARTUP and aarch64 */ 1555 1556 return (0); 1557 } 1558 1559 static int 1560 vmbus_detach(device_t dev) 1561 { 1562 struct vmbus_softc *sc = device_get_softc(dev); 1563 1564 bus_generic_detach(dev); 1565 vmbus_chan_destroy_all(sc); 1566 1567 vmbus_scan_teardown(sc); 1568 1569 vmbus_disconnect(sc); 1570 1571 if (sc->vmbus_flags & VMBUS_FLAG_SYNIC) { 1572 sc->vmbus_flags &= ~VMBUS_FLAG_SYNIC; 1573 smp_rendezvous(NULL, vmbus_synic_teardown, NULL, NULL); 1574 } 1575 1576 vmbus_intr_teardown(sc); 1577 vmbus_dma_free(sc); 1578 1579 if (sc->vmbus_xc != NULL) { 1580 vmbus_xact_ctx_destroy(sc->vmbus_xc); 1581 sc->vmbus_xc = NULL; 1582 } 1583 1584 free(__DEVOLATILE(void *, sc->vmbus_chmap), M_DEVBUF); 1585 mtx_destroy(&sc->vmbus_prichan_lock); 1586 mtx_destroy(&sc->vmbus_chan_lock); 1587 1588 #ifdef NEW_PCIB 1589 vmbus_free_mmio_res(dev); 1590 #endif 1591 1592 #if defined(__aarch64__) 1593 bus_release_resource(device_get_parent(dev), SYS_RES_IRQ, sc->vector, 1594 sc->ires); 1595 #endif 1596 return (0); 1597 } 1598 1599 #if !defined(EARLY_AP_STARTUP) 1600 1601 static void 1602 vmbus_sysinit(void *arg __unused) 1603 { 1604 struct vmbus_softc *sc = vmbus_get_softc(); 1605 1606 if (vm_guest != VM_GUEST_HV || sc == NULL) 1607 return; 1608 1609 vmbus_doattach(sc); 1610 } 1611 /* 1612 * NOTE: 1613 * We have to start as the last step of SI_SUB_SMP, i.e. after SMP is 1614 * initialized. 1615 */ 1616 SYSINIT(vmbus_initialize, SI_SUB_SMP, SI_ORDER_ANY, vmbus_sysinit, NULL); 1617 #endif /* !EARLY_AP_STARTUP */ 1618