1 /*- 2 * Copyright (c) 2009-2012,2016-2017 Microsoft Corp. 3 * Copyright (c) 2012 NetApp Inc. 4 * Copyright (c) 2012 Citrix Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * VM Bus Driver Implementation 31 */ 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/bus.h> 37 #include <sys/kernel.h> 38 #include <sys/linker.h> 39 #include <sys/lock.h> 40 #include <sys/malloc.h> 41 #include <sys/module.h> 42 #include <sys/mutex.h> 43 #include <sys/smp.h> 44 #include <sys/sysctl.h> 45 #include <sys/systm.h> 46 #include <sys/taskqueue.h> 47 48 #include <vm/vm.h> 49 #include <vm/vm_param.h> 50 #include <vm/pmap.h> 51 52 #include <machine/bus.h> 53 #include <machine/intr_machdep.h> 54 #include <machine/metadata.h> 55 #include <machine/md_var.h> 56 #include <machine/resource.h> 57 #include <x86/include/apicvar.h> 58 59 #include <contrib/dev/acpica/include/acpi.h> 60 #include <dev/acpica/acpivar.h> 61 62 #include <dev/hyperv/include/hyperv.h> 63 #include <dev/hyperv/include/vmbus_xact.h> 64 #include <dev/hyperv/vmbus/hyperv_reg.h> 65 #include <dev/hyperv/vmbus/hyperv_var.h> 66 #include <dev/hyperv/vmbus/vmbus_reg.h> 67 #include <dev/hyperv/vmbus/vmbus_var.h> 68 #include <dev/hyperv/vmbus/vmbus_chanvar.h> 69 70 #include "acpi_if.h" 71 #include "pcib_if.h" 72 #include "vmbus_if.h" 73 74 #define VMBUS_GPADL_START 0xe1e10 75 76 struct vmbus_msghc { 77 struct vmbus_xact *mh_xact; 78 struct hypercall_postmsg_in mh_inprm_save; 79 }; 80 81 static void vmbus_identify(driver_t *, device_t); 82 static int vmbus_probe(device_t); 83 static int vmbus_attach(device_t); 84 static int vmbus_detach(device_t); 85 static int vmbus_read_ivar(device_t, device_t, int, 86 uintptr_t *); 87 static int vmbus_child_pnpinfo_str(device_t, device_t, 88 char *, size_t); 89 static struct resource *vmbus_alloc_resource(device_t dev, 90 device_t child, int type, int *rid, 91 rman_res_t start, rman_res_t end, 92 rman_res_t count, u_int flags); 93 static int vmbus_alloc_msi(device_t bus, device_t dev, 94 int count, int maxcount, int *irqs); 95 static int vmbus_release_msi(device_t bus, device_t dev, 96 int count, int *irqs); 97 static int vmbus_alloc_msix(device_t bus, device_t dev, 98 int *irq); 99 static int vmbus_release_msix(device_t bus, device_t dev, 100 int irq); 101 static int vmbus_map_msi(device_t bus, device_t dev, 102 int irq, uint64_t *addr, uint32_t *data); 103 static uint32_t vmbus_get_version_method(device_t, device_t); 104 static int vmbus_probe_guid_method(device_t, device_t, 105 const struct hyperv_guid *); 106 static uint32_t vmbus_get_vcpu_id_method(device_t bus, 107 device_t dev, int cpu); 108 static struct taskqueue *vmbus_get_eventtq_method(device_t, device_t, 109 int); 110 #ifdef EARLY_AP_STARTUP 111 static void vmbus_intrhook(void *); 112 #endif 113 114 static int vmbus_init(struct vmbus_softc *); 115 static int vmbus_connect(struct vmbus_softc *, uint32_t); 116 static int vmbus_req_channels(struct vmbus_softc *sc); 117 static void vmbus_disconnect(struct vmbus_softc *); 118 static int vmbus_scan(struct vmbus_softc *); 119 static void vmbus_scan_teardown(struct vmbus_softc *); 120 static void vmbus_scan_done(struct vmbus_softc *, 121 const struct vmbus_message *); 122 static void vmbus_chanmsg_handle(struct vmbus_softc *, 123 const struct vmbus_message *); 124 static void vmbus_msg_task(void *, int); 125 static void vmbus_synic_setup(void *); 126 static void vmbus_synic_teardown(void *); 127 static int vmbus_sysctl_version(SYSCTL_HANDLER_ARGS); 128 static int vmbus_dma_alloc(struct vmbus_softc *); 129 static void vmbus_dma_free(struct vmbus_softc *); 130 static int vmbus_intr_setup(struct vmbus_softc *); 131 static void vmbus_intr_teardown(struct vmbus_softc *); 132 static int vmbus_doattach(struct vmbus_softc *); 133 static void vmbus_event_proc_dummy(struct vmbus_softc *, 134 int); 135 136 static struct vmbus_softc *vmbus_sc; 137 138 SYSCTL_NODE(_hw, OID_AUTO, vmbus, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 139 "Hyper-V vmbus"); 140 141 static int vmbus_pin_evttask = 1; 142 SYSCTL_INT(_hw_vmbus, OID_AUTO, pin_evttask, CTLFLAG_RDTUN, 143 &vmbus_pin_evttask, 0, "Pin event tasks to their respective CPU"); 144 145 extern inthand_t IDTVEC(vmbus_isr), IDTVEC(vmbus_isr_pti); 146 #define VMBUS_ISR_ADDR trunc_page((uintptr_t)IDTVEC(vmbus_isr_pti)) 147 148 uint32_t vmbus_current_version; 149 150 static const uint32_t vmbus_version[] = { 151 VMBUS_VERSION_WIN10, 152 VMBUS_VERSION_WIN8_1, 153 VMBUS_VERSION_WIN8, 154 VMBUS_VERSION_WIN7, 155 VMBUS_VERSION_WS2008 156 }; 157 158 static const vmbus_chanmsg_proc_t 159 vmbus_chanmsg_handlers[VMBUS_CHANMSG_TYPE_MAX] = { 160 VMBUS_CHANMSG_PROC(CHOFFER_DONE, vmbus_scan_done), 161 VMBUS_CHANMSG_PROC_WAKEUP(CONNECT_RESP) 162 }; 163 164 static device_method_t vmbus_methods[] = { 165 /* Device interface */ 166 DEVMETHOD(device_identify, vmbus_identify), 167 DEVMETHOD(device_probe, vmbus_probe), 168 DEVMETHOD(device_attach, vmbus_attach), 169 DEVMETHOD(device_detach, vmbus_detach), 170 DEVMETHOD(device_shutdown, bus_generic_shutdown), 171 DEVMETHOD(device_suspend, bus_generic_suspend), 172 DEVMETHOD(device_resume, bus_generic_resume), 173 174 /* Bus interface */ 175 DEVMETHOD(bus_add_child, bus_generic_add_child), 176 DEVMETHOD(bus_print_child, bus_generic_print_child), 177 DEVMETHOD(bus_read_ivar, vmbus_read_ivar), 178 DEVMETHOD(bus_child_pnpinfo_str, vmbus_child_pnpinfo_str), 179 DEVMETHOD(bus_alloc_resource, vmbus_alloc_resource), 180 DEVMETHOD(bus_release_resource, bus_generic_release_resource), 181 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), 182 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), 183 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), 184 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), 185 #if __FreeBSD_version >= 1100000 186 DEVMETHOD(bus_get_cpus, bus_generic_get_cpus), 187 #endif 188 189 /* pcib interface */ 190 DEVMETHOD(pcib_alloc_msi, vmbus_alloc_msi), 191 DEVMETHOD(pcib_release_msi, vmbus_release_msi), 192 DEVMETHOD(pcib_alloc_msix, vmbus_alloc_msix), 193 DEVMETHOD(pcib_release_msix, vmbus_release_msix), 194 DEVMETHOD(pcib_map_msi, vmbus_map_msi), 195 196 /* Vmbus interface */ 197 DEVMETHOD(vmbus_get_version, vmbus_get_version_method), 198 DEVMETHOD(vmbus_probe_guid, vmbus_probe_guid_method), 199 DEVMETHOD(vmbus_get_vcpu_id, vmbus_get_vcpu_id_method), 200 DEVMETHOD(vmbus_get_event_taskq, vmbus_get_eventtq_method), 201 202 DEVMETHOD_END 203 }; 204 205 static driver_t vmbus_driver = { 206 "vmbus", 207 vmbus_methods, 208 sizeof(struct vmbus_softc) 209 }; 210 211 static devclass_t vmbus_devclass; 212 213 DRIVER_MODULE(vmbus, pcib, vmbus_driver, vmbus_devclass, NULL, NULL); 214 DRIVER_MODULE(vmbus, acpi_syscontainer, vmbus_driver, vmbus_devclass, 215 NULL, NULL); 216 217 MODULE_DEPEND(vmbus, acpi, 1, 1, 1); 218 MODULE_DEPEND(vmbus, pci, 1, 1, 1); 219 MODULE_VERSION(vmbus, 1); 220 221 static __inline struct vmbus_softc * 222 vmbus_get_softc(void) 223 { 224 return vmbus_sc; 225 } 226 227 void 228 vmbus_msghc_reset(struct vmbus_msghc *mh, size_t dsize) 229 { 230 struct hypercall_postmsg_in *inprm; 231 232 if (dsize > HYPERCALL_POSTMSGIN_DSIZE_MAX) 233 panic("invalid data size %zu", dsize); 234 235 inprm = vmbus_xact_req_data(mh->mh_xact); 236 memset(inprm, 0, HYPERCALL_POSTMSGIN_SIZE); 237 inprm->hc_connid = VMBUS_CONNID_MESSAGE; 238 inprm->hc_msgtype = HYPERV_MSGTYPE_CHANNEL; 239 inprm->hc_dsize = dsize; 240 } 241 242 struct vmbus_msghc * 243 vmbus_msghc_get(struct vmbus_softc *sc, size_t dsize) 244 { 245 struct vmbus_msghc *mh; 246 struct vmbus_xact *xact; 247 248 if (dsize > HYPERCALL_POSTMSGIN_DSIZE_MAX) 249 panic("invalid data size %zu", dsize); 250 251 xact = vmbus_xact_get(sc->vmbus_xc, 252 dsize + __offsetof(struct hypercall_postmsg_in, hc_data[0])); 253 if (xact == NULL) 254 return (NULL); 255 256 mh = vmbus_xact_priv(xact, sizeof(*mh)); 257 mh->mh_xact = xact; 258 259 vmbus_msghc_reset(mh, dsize); 260 return (mh); 261 } 262 263 void 264 vmbus_msghc_put(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh) 265 { 266 267 vmbus_xact_put(mh->mh_xact); 268 } 269 270 void * 271 vmbus_msghc_dataptr(struct vmbus_msghc *mh) 272 { 273 struct hypercall_postmsg_in *inprm; 274 275 inprm = vmbus_xact_req_data(mh->mh_xact); 276 return (inprm->hc_data); 277 } 278 279 int 280 vmbus_msghc_exec_noresult(struct vmbus_msghc *mh) 281 { 282 sbintime_t time = SBT_1MS; 283 struct hypercall_postmsg_in *inprm; 284 bus_addr_t inprm_paddr; 285 int i; 286 287 inprm = vmbus_xact_req_data(mh->mh_xact); 288 inprm_paddr = vmbus_xact_req_paddr(mh->mh_xact); 289 290 /* 291 * Save the input parameter so that we could restore the input 292 * parameter if the Hypercall failed. 293 * 294 * XXX 295 * Is this really necessary?! i.e. Will the Hypercall ever 296 * overwrite the input parameter? 297 */ 298 memcpy(&mh->mh_inprm_save, inprm, HYPERCALL_POSTMSGIN_SIZE); 299 300 /* 301 * In order to cope with transient failures, e.g. insufficient 302 * resources on host side, we retry the post message Hypercall 303 * several times. 20 retries seem sufficient. 304 */ 305 #define HC_RETRY_MAX 20 306 307 for (i = 0; i < HC_RETRY_MAX; ++i) { 308 uint64_t status; 309 310 status = hypercall_post_message(inprm_paddr); 311 if (status == HYPERCALL_STATUS_SUCCESS) 312 return 0; 313 314 pause_sbt("hcpmsg", time, 0, C_HARDCLOCK); 315 if (time < SBT_1S * 2) 316 time *= 2; 317 318 /* Restore input parameter and try again */ 319 memcpy(inprm, &mh->mh_inprm_save, HYPERCALL_POSTMSGIN_SIZE); 320 } 321 322 #undef HC_RETRY_MAX 323 324 return EIO; 325 } 326 327 int 328 vmbus_msghc_exec(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh) 329 { 330 int error; 331 332 vmbus_xact_activate(mh->mh_xact); 333 error = vmbus_msghc_exec_noresult(mh); 334 if (error) 335 vmbus_xact_deactivate(mh->mh_xact); 336 return error; 337 } 338 339 void 340 vmbus_msghc_exec_cancel(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh) 341 { 342 343 vmbus_xact_deactivate(mh->mh_xact); 344 } 345 346 const struct vmbus_message * 347 vmbus_msghc_wait_result(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh) 348 { 349 size_t resp_len; 350 351 return (vmbus_xact_wait(mh->mh_xact, &resp_len)); 352 } 353 354 const struct vmbus_message * 355 vmbus_msghc_poll_result(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh) 356 { 357 size_t resp_len; 358 359 return (vmbus_xact_poll(mh->mh_xact, &resp_len)); 360 } 361 362 void 363 vmbus_msghc_wakeup(struct vmbus_softc *sc, const struct vmbus_message *msg) 364 { 365 366 vmbus_xact_ctx_wakeup(sc->vmbus_xc, msg, sizeof(*msg)); 367 } 368 369 uint32_t 370 vmbus_gpadl_alloc(struct vmbus_softc *sc) 371 { 372 uint32_t gpadl; 373 374 again: 375 gpadl = atomic_fetchadd_int(&sc->vmbus_gpadl, 1); 376 if (gpadl == 0) 377 goto again; 378 return (gpadl); 379 } 380 381 /* Used for Hyper-V socket when guest client connects to host */ 382 int 383 vmbus_req_tl_connect(struct hyperv_guid *guest_srv_id, 384 struct hyperv_guid *host_srv_id) 385 { 386 struct vmbus_softc *sc = vmbus_get_softc(); 387 struct vmbus_chanmsg_tl_connect *req; 388 struct vmbus_msghc *mh; 389 int error; 390 391 if (!sc) 392 return ENXIO; 393 394 mh = vmbus_msghc_get(sc, sizeof(*req)); 395 if (mh == NULL) { 396 device_printf(sc->vmbus_dev, 397 "can not get msg hypercall for tl connect\n"); 398 return ENXIO; 399 } 400 401 req = vmbus_msghc_dataptr(mh); 402 req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_TL_CONN; 403 req->guest_endpoint_id = *guest_srv_id; 404 req->host_service_id = *host_srv_id; 405 406 error = vmbus_msghc_exec_noresult(mh); 407 vmbus_msghc_put(sc, mh); 408 409 if (error) { 410 device_printf(sc->vmbus_dev, 411 "tl connect msg hypercall failed\n"); 412 } 413 414 return error; 415 } 416 417 static int 418 vmbus_connect(struct vmbus_softc *sc, uint32_t version) 419 { 420 struct vmbus_chanmsg_connect *req; 421 const struct vmbus_message *msg; 422 struct vmbus_msghc *mh; 423 int error, done = 0; 424 425 mh = vmbus_msghc_get(sc, sizeof(*req)); 426 if (mh == NULL) 427 return ENXIO; 428 429 req = vmbus_msghc_dataptr(mh); 430 req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CONNECT; 431 req->chm_ver = version; 432 req->chm_evtflags = sc->vmbus_evtflags_dma.hv_paddr; 433 req->chm_mnf1 = sc->vmbus_mnf1_dma.hv_paddr; 434 req->chm_mnf2 = sc->vmbus_mnf2_dma.hv_paddr; 435 436 error = vmbus_msghc_exec(sc, mh); 437 if (error) { 438 vmbus_msghc_put(sc, mh); 439 return error; 440 } 441 442 msg = vmbus_msghc_wait_result(sc, mh); 443 done = ((const struct vmbus_chanmsg_connect_resp *) 444 msg->msg_data)->chm_done; 445 446 vmbus_msghc_put(sc, mh); 447 448 return (done ? 0 : EOPNOTSUPP); 449 } 450 451 static int 452 vmbus_init(struct vmbus_softc *sc) 453 { 454 int i; 455 456 for (i = 0; i < nitems(vmbus_version); ++i) { 457 int error; 458 459 error = vmbus_connect(sc, vmbus_version[i]); 460 if (!error) { 461 vmbus_current_version = vmbus_version[i]; 462 sc->vmbus_version = vmbus_version[i]; 463 device_printf(sc->vmbus_dev, "version %u.%u\n", 464 VMBUS_VERSION_MAJOR(sc->vmbus_version), 465 VMBUS_VERSION_MINOR(sc->vmbus_version)); 466 return 0; 467 } 468 } 469 return ENXIO; 470 } 471 472 static void 473 vmbus_disconnect(struct vmbus_softc *sc) 474 { 475 struct vmbus_chanmsg_disconnect *req; 476 struct vmbus_msghc *mh; 477 int error; 478 479 mh = vmbus_msghc_get(sc, sizeof(*req)); 480 if (mh == NULL) { 481 device_printf(sc->vmbus_dev, 482 "can not get msg hypercall for disconnect\n"); 483 return; 484 } 485 486 req = vmbus_msghc_dataptr(mh); 487 req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_DISCONNECT; 488 489 error = vmbus_msghc_exec_noresult(mh); 490 vmbus_msghc_put(sc, mh); 491 492 if (error) { 493 device_printf(sc->vmbus_dev, 494 "disconnect msg hypercall failed\n"); 495 } 496 } 497 498 static int 499 vmbus_req_channels(struct vmbus_softc *sc) 500 { 501 struct vmbus_chanmsg_chrequest *req; 502 struct vmbus_msghc *mh; 503 int error; 504 505 mh = vmbus_msghc_get(sc, sizeof(*req)); 506 if (mh == NULL) 507 return ENXIO; 508 509 req = vmbus_msghc_dataptr(mh); 510 req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CHREQUEST; 511 512 error = vmbus_msghc_exec_noresult(mh); 513 vmbus_msghc_put(sc, mh); 514 515 return error; 516 } 517 518 static void 519 vmbus_scan_done_task(void *xsc, int pending __unused) 520 { 521 struct vmbus_softc *sc = xsc; 522 523 mtx_lock(&Giant); 524 sc->vmbus_scandone = true; 525 mtx_unlock(&Giant); 526 wakeup(&sc->vmbus_scandone); 527 } 528 529 static void 530 vmbus_scan_done(struct vmbus_softc *sc, 531 const struct vmbus_message *msg __unused) 532 { 533 534 taskqueue_enqueue(sc->vmbus_devtq, &sc->vmbus_scandone_task); 535 } 536 537 static int 538 vmbus_scan(struct vmbus_softc *sc) 539 { 540 int error; 541 542 /* 543 * Identify, probe and attach for non-channel devices. 544 */ 545 bus_generic_probe(sc->vmbus_dev); 546 bus_generic_attach(sc->vmbus_dev); 547 548 /* 549 * This taskqueue serializes vmbus devices' attach and detach 550 * for channel offer and rescind messages. 551 */ 552 sc->vmbus_devtq = taskqueue_create("vmbus dev", M_WAITOK, 553 taskqueue_thread_enqueue, &sc->vmbus_devtq); 554 taskqueue_start_threads(&sc->vmbus_devtq, 1, PI_NET, "vmbusdev"); 555 TASK_INIT(&sc->vmbus_scandone_task, 0, vmbus_scan_done_task, sc); 556 557 /* 558 * This taskqueue handles sub-channel detach, so that vmbus 559 * device's detach running in vmbus_devtq can drain its sub- 560 * channels. 561 */ 562 sc->vmbus_subchtq = taskqueue_create("vmbus subch", M_WAITOK, 563 taskqueue_thread_enqueue, &sc->vmbus_subchtq); 564 taskqueue_start_threads(&sc->vmbus_subchtq, 1, PI_NET, "vmbussch"); 565 566 /* 567 * Start vmbus scanning. 568 */ 569 error = vmbus_req_channels(sc); 570 if (error) { 571 device_printf(sc->vmbus_dev, "channel request failed: %d\n", 572 error); 573 return (error); 574 } 575 576 /* 577 * Wait for all vmbus devices from the initial channel offers to be 578 * attached. 579 */ 580 GIANT_REQUIRED; 581 while (!sc->vmbus_scandone) 582 mtx_sleep(&sc->vmbus_scandone, &Giant, 0, "vmbusdev", 0); 583 584 if (bootverbose) { 585 device_printf(sc->vmbus_dev, "device scan, probe and attach " 586 "done\n"); 587 } 588 return (0); 589 } 590 591 static void 592 vmbus_scan_teardown(struct vmbus_softc *sc) 593 { 594 595 GIANT_REQUIRED; 596 if (sc->vmbus_devtq != NULL) { 597 mtx_unlock(&Giant); 598 taskqueue_free(sc->vmbus_devtq); 599 mtx_lock(&Giant); 600 sc->vmbus_devtq = NULL; 601 } 602 if (sc->vmbus_subchtq != NULL) { 603 mtx_unlock(&Giant); 604 taskqueue_free(sc->vmbus_subchtq); 605 mtx_lock(&Giant); 606 sc->vmbus_subchtq = NULL; 607 } 608 } 609 610 static void 611 vmbus_chanmsg_handle(struct vmbus_softc *sc, const struct vmbus_message *msg) 612 { 613 vmbus_chanmsg_proc_t msg_proc; 614 uint32_t msg_type; 615 616 msg_type = ((const struct vmbus_chanmsg_hdr *)msg->msg_data)->chm_type; 617 if (msg_type >= VMBUS_CHANMSG_TYPE_MAX) { 618 device_printf(sc->vmbus_dev, "unknown message type 0x%x\n", 619 msg_type); 620 return; 621 } 622 623 msg_proc = vmbus_chanmsg_handlers[msg_type]; 624 if (msg_proc != NULL) 625 msg_proc(sc, msg); 626 627 /* Channel specific processing */ 628 vmbus_chan_msgproc(sc, msg); 629 } 630 631 static void 632 vmbus_msg_task(void *xsc, int pending __unused) 633 { 634 struct vmbus_softc *sc = xsc; 635 volatile struct vmbus_message *msg; 636 637 msg = VMBUS_PCPU_GET(sc, message, curcpu) + VMBUS_SINT_MESSAGE; 638 for (;;) { 639 if (msg->msg_type == HYPERV_MSGTYPE_NONE) { 640 /* No message */ 641 break; 642 } else if (msg->msg_type == HYPERV_MSGTYPE_CHANNEL) { 643 /* Channel message */ 644 vmbus_chanmsg_handle(sc, 645 __DEVOLATILE(const struct vmbus_message *, msg)); 646 } 647 648 msg->msg_type = HYPERV_MSGTYPE_NONE; 649 /* 650 * Make sure the write to msg_type (i.e. set to 651 * HYPERV_MSGTYPE_NONE) happens before we read the 652 * msg_flags and EOMing. Otherwise, the EOMing will 653 * not deliver any more messages since there is no 654 * empty slot 655 * 656 * NOTE: 657 * mb() is used here, since atomic_thread_fence_seq_cst() 658 * will become compiler fence on UP kernel. 659 */ 660 mb(); 661 if (msg->msg_flags & VMBUS_MSGFLAG_PENDING) { 662 /* 663 * This will cause message queue rescan to possibly 664 * deliver another msg from the hypervisor 665 */ 666 wrmsr(MSR_HV_EOM, 0); 667 } 668 } 669 } 670 671 static __inline int 672 vmbus_handle_intr1(struct vmbus_softc *sc, struct trapframe *frame, int cpu) 673 { 674 volatile struct vmbus_message *msg; 675 struct vmbus_message *msg_base; 676 677 msg_base = VMBUS_PCPU_GET(sc, message, cpu); 678 679 /* 680 * Check event timer. 681 * 682 * TODO: move this to independent IDT vector. 683 */ 684 msg = msg_base + VMBUS_SINT_TIMER; 685 if (msg->msg_type == HYPERV_MSGTYPE_TIMER_EXPIRED) { 686 msg->msg_type = HYPERV_MSGTYPE_NONE; 687 688 vmbus_et_intr(frame); 689 690 /* 691 * Make sure the write to msg_type (i.e. set to 692 * HYPERV_MSGTYPE_NONE) happens before we read the 693 * msg_flags and EOMing. Otherwise, the EOMing will 694 * not deliver any more messages since there is no 695 * empty slot 696 * 697 * NOTE: 698 * mb() is used here, since atomic_thread_fence_seq_cst() 699 * will become compiler fence on UP kernel. 700 */ 701 mb(); 702 if (msg->msg_flags & VMBUS_MSGFLAG_PENDING) { 703 /* 704 * This will cause message queue rescan to possibly 705 * deliver another msg from the hypervisor 706 */ 707 wrmsr(MSR_HV_EOM, 0); 708 } 709 } 710 711 /* 712 * Check events. Hot path for network and storage I/O data; high rate. 713 * 714 * NOTE: 715 * As recommended by the Windows guest fellows, we check events before 716 * checking messages. 717 */ 718 sc->vmbus_event_proc(sc, cpu); 719 720 /* 721 * Check messages. Mainly management stuffs; ultra low rate. 722 */ 723 msg = msg_base + VMBUS_SINT_MESSAGE; 724 if (__predict_false(msg->msg_type != HYPERV_MSGTYPE_NONE)) { 725 taskqueue_enqueue(VMBUS_PCPU_GET(sc, message_tq, cpu), 726 VMBUS_PCPU_PTR(sc, message_task, cpu)); 727 } 728 729 return (FILTER_HANDLED); 730 } 731 732 void 733 vmbus_handle_intr(struct trapframe *trap_frame) 734 { 735 struct vmbus_softc *sc = vmbus_get_softc(); 736 int cpu = curcpu; 737 738 /* 739 * Disable preemption. 740 */ 741 critical_enter(); 742 743 /* 744 * Do a little interrupt counting. 745 */ 746 (*VMBUS_PCPU_GET(sc, intr_cnt, cpu))++; 747 748 vmbus_handle_intr1(sc, trap_frame, cpu); 749 750 /* 751 * Enable preemption. 752 */ 753 critical_exit(); 754 } 755 756 static void 757 vmbus_synic_setup(void *xsc) 758 { 759 struct vmbus_softc *sc = xsc; 760 int cpu = curcpu; 761 uint64_t val, orig; 762 uint32_t sint; 763 764 if (hyperv_features & CPUID_HV_MSR_VP_INDEX) { 765 /* Save virtual processor id. */ 766 VMBUS_PCPU_GET(sc, vcpuid, cpu) = rdmsr(MSR_HV_VP_INDEX); 767 } else { 768 /* Set virtual processor id to 0 for compatibility. */ 769 VMBUS_PCPU_GET(sc, vcpuid, cpu) = 0; 770 } 771 772 /* 773 * Setup the SynIC message. 774 */ 775 orig = rdmsr(MSR_HV_SIMP); 776 val = MSR_HV_SIMP_ENABLE | (orig & MSR_HV_SIMP_RSVD_MASK) | 777 ((VMBUS_PCPU_GET(sc, message_dma.hv_paddr, cpu) >> PAGE_SHIFT) << 778 MSR_HV_SIMP_PGSHIFT); 779 wrmsr(MSR_HV_SIMP, val); 780 781 /* 782 * Setup the SynIC event flags. 783 */ 784 orig = rdmsr(MSR_HV_SIEFP); 785 val = MSR_HV_SIEFP_ENABLE | (orig & MSR_HV_SIEFP_RSVD_MASK) | 786 ((VMBUS_PCPU_GET(sc, event_flags_dma.hv_paddr, cpu) 787 >> PAGE_SHIFT) << MSR_HV_SIEFP_PGSHIFT); 788 wrmsr(MSR_HV_SIEFP, val); 789 790 791 /* 792 * Configure and unmask SINT for message and event flags. 793 */ 794 sint = MSR_HV_SINT0 + VMBUS_SINT_MESSAGE; 795 orig = rdmsr(sint); 796 val = sc->vmbus_idtvec | MSR_HV_SINT_AUTOEOI | 797 (orig & MSR_HV_SINT_RSVD_MASK); 798 wrmsr(sint, val); 799 800 /* 801 * Configure and unmask SINT for timer. 802 */ 803 sint = MSR_HV_SINT0 + VMBUS_SINT_TIMER; 804 orig = rdmsr(sint); 805 val = sc->vmbus_idtvec | MSR_HV_SINT_AUTOEOI | 806 (orig & MSR_HV_SINT_RSVD_MASK); 807 wrmsr(sint, val); 808 809 /* 810 * All done; enable SynIC. 811 */ 812 orig = rdmsr(MSR_HV_SCONTROL); 813 val = MSR_HV_SCTRL_ENABLE | (orig & MSR_HV_SCTRL_RSVD_MASK); 814 wrmsr(MSR_HV_SCONTROL, val); 815 } 816 817 static void 818 vmbus_synic_teardown(void *arg) 819 { 820 uint64_t orig; 821 uint32_t sint; 822 823 /* 824 * Disable SynIC. 825 */ 826 orig = rdmsr(MSR_HV_SCONTROL); 827 wrmsr(MSR_HV_SCONTROL, (orig & MSR_HV_SCTRL_RSVD_MASK)); 828 829 /* 830 * Mask message and event flags SINT. 831 */ 832 sint = MSR_HV_SINT0 + VMBUS_SINT_MESSAGE; 833 orig = rdmsr(sint); 834 wrmsr(sint, orig | MSR_HV_SINT_MASKED); 835 836 /* 837 * Mask timer SINT. 838 */ 839 sint = MSR_HV_SINT0 + VMBUS_SINT_TIMER; 840 orig = rdmsr(sint); 841 wrmsr(sint, orig | MSR_HV_SINT_MASKED); 842 843 /* 844 * Teardown SynIC message. 845 */ 846 orig = rdmsr(MSR_HV_SIMP); 847 wrmsr(MSR_HV_SIMP, (orig & MSR_HV_SIMP_RSVD_MASK)); 848 849 /* 850 * Teardown SynIC event flags. 851 */ 852 orig = rdmsr(MSR_HV_SIEFP); 853 wrmsr(MSR_HV_SIEFP, (orig & MSR_HV_SIEFP_RSVD_MASK)); 854 } 855 856 static int 857 vmbus_dma_alloc(struct vmbus_softc *sc) 858 { 859 bus_dma_tag_t parent_dtag; 860 uint8_t *evtflags; 861 int cpu; 862 863 parent_dtag = bus_get_dma_tag(sc->vmbus_dev); 864 CPU_FOREACH(cpu) { 865 void *ptr; 866 867 /* 868 * Per-cpu messages and event flags. 869 */ 870 ptr = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0, 871 PAGE_SIZE, VMBUS_PCPU_PTR(sc, message_dma, cpu), 872 BUS_DMA_WAITOK | BUS_DMA_ZERO); 873 if (ptr == NULL) 874 return ENOMEM; 875 VMBUS_PCPU_GET(sc, message, cpu) = ptr; 876 877 ptr = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0, 878 PAGE_SIZE, VMBUS_PCPU_PTR(sc, event_flags_dma, cpu), 879 BUS_DMA_WAITOK | BUS_DMA_ZERO); 880 if (ptr == NULL) 881 return ENOMEM; 882 VMBUS_PCPU_GET(sc, event_flags, cpu) = ptr; 883 } 884 885 evtflags = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0, 886 PAGE_SIZE, &sc->vmbus_evtflags_dma, BUS_DMA_WAITOK | BUS_DMA_ZERO); 887 if (evtflags == NULL) 888 return ENOMEM; 889 sc->vmbus_rx_evtflags = (u_long *)evtflags; 890 sc->vmbus_tx_evtflags = (u_long *)(evtflags + (PAGE_SIZE / 2)); 891 sc->vmbus_evtflags = evtflags; 892 893 sc->vmbus_mnf1 = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0, 894 PAGE_SIZE, &sc->vmbus_mnf1_dma, BUS_DMA_WAITOK | BUS_DMA_ZERO); 895 if (sc->vmbus_mnf1 == NULL) 896 return ENOMEM; 897 898 sc->vmbus_mnf2 = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0, 899 sizeof(struct vmbus_mnf), &sc->vmbus_mnf2_dma, 900 BUS_DMA_WAITOK | BUS_DMA_ZERO); 901 if (sc->vmbus_mnf2 == NULL) 902 return ENOMEM; 903 904 return 0; 905 } 906 907 static void 908 vmbus_dma_free(struct vmbus_softc *sc) 909 { 910 int cpu; 911 912 if (sc->vmbus_evtflags != NULL) { 913 hyperv_dmamem_free(&sc->vmbus_evtflags_dma, sc->vmbus_evtflags); 914 sc->vmbus_evtflags = NULL; 915 sc->vmbus_rx_evtflags = NULL; 916 sc->vmbus_tx_evtflags = NULL; 917 } 918 if (sc->vmbus_mnf1 != NULL) { 919 hyperv_dmamem_free(&sc->vmbus_mnf1_dma, sc->vmbus_mnf1); 920 sc->vmbus_mnf1 = NULL; 921 } 922 if (sc->vmbus_mnf2 != NULL) { 923 hyperv_dmamem_free(&sc->vmbus_mnf2_dma, sc->vmbus_mnf2); 924 sc->vmbus_mnf2 = NULL; 925 } 926 927 CPU_FOREACH(cpu) { 928 if (VMBUS_PCPU_GET(sc, message, cpu) != NULL) { 929 hyperv_dmamem_free( 930 VMBUS_PCPU_PTR(sc, message_dma, cpu), 931 VMBUS_PCPU_GET(sc, message, cpu)); 932 VMBUS_PCPU_GET(sc, message, cpu) = NULL; 933 } 934 if (VMBUS_PCPU_GET(sc, event_flags, cpu) != NULL) { 935 hyperv_dmamem_free( 936 VMBUS_PCPU_PTR(sc, event_flags_dma, cpu), 937 VMBUS_PCPU_GET(sc, event_flags, cpu)); 938 VMBUS_PCPU_GET(sc, event_flags, cpu) = NULL; 939 } 940 } 941 } 942 943 static int 944 vmbus_intr_setup(struct vmbus_softc *sc) 945 { 946 int cpu; 947 948 CPU_FOREACH(cpu) { 949 char buf[MAXCOMLEN + 1]; 950 cpuset_t cpu_mask; 951 952 /* Allocate an interrupt counter for Hyper-V interrupt */ 953 snprintf(buf, sizeof(buf), "cpu%d:hyperv", cpu); 954 intrcnt_add(buf, VMBUS_PCPU_PTR(sc, intr_cnt, cpu)); 955 956 /* 957 * Setup taskqueue to handle events. Task will be per- 958 * channel. 959 */ 960 VMBUS_PCPU_GET(sc, event_tq, cpu) = taskqueue_create_fast( 961 "hyperv event", M_WAITOK, taskqueue_thread_enqueue, 962 VMBUS_PCPU_PTR(sc, event_tq, cpu)); 963 if (vmbus_pin_evttask) { 964 CPU_SETOF(cpu, &cpu_mask); 965 taskqueue_start_threads_cpuset( 966 VMBUS_PCPU_PTR(sc, event_tq, cpu), 1, PI_NET, 967 &cpu_mask, "hvevent%d", cpu); 968 } else { 969 taskqueue_start_threads( 970 VMBUS_PCPU_PTR(sc, event_tq, cpu), 1, PI_NET, 971 "hvevent%d", cpu); 972 } 973 974 /* 975 * Setup tasks and taskqueues to handle messages. 976 */ 977 VMBUS_PCPU_GET(sc, message_tq, cpu) = taskqueue_create_fast( 978 "hyperv msg", M_WAITOK, taskqueue_thread_enqueue, 979 VMBUS_PCPU_PTR(sc, message_tq, cpu)); 980 CPU_SETOF(cpu, &cpu_mask); 981 taskqueue_start_threads_cpuset( 982 VMBUS_PCPU_PTR(sc, message_tq, cpu), 1, PI_NET, &cpu_mask, 983 "hvmsg%d", cpu); 984 TASK_INIT(VMBUS_PCPU_PTR(sc, message_task, cpu), 0, 985 vmbus_msg_task, sc); 986 } 987 988 #if defined(__amd64__) && defined(KLD_MODULE) 989 pmap_pti_add_kva(VMBUS_ISR_ADDR, VMBUS_ISR_ADDR + PAGE_SIZE, true); 990 #endif 991 992 /* 993 * All Hyper-V ISR required resources are setup, now let's find a 994 * free IDT vector for Hyper-V ISR and set it up. 995 */ 996 sc->vmbus_idtvec = lapic_ipi_alloc(pti ? IDTVEC(vmbus_isr_pti) : 997 IDTVEC(vmbus_isr)); 998 if (sc->vmbus_idtvec < 0) { 999 #if defined(__amd64__) && defined(KLD_MODULE) 1000 pmap_pti_remove_kva(VMBUS_ISR_ADDR, VMBUS_ISR_ADDR + PAGE_SIZE); 1001 #endif 1002 device_printf(sc->vmbus_dev, "cannot find free IDT vector\n"); 1003 return ENXIO; 1004 } 1005 if (bootverbose) { 1006 device_printf(sc->vmbus_dev, "vmbus IDT vector %d\n", 1007 sc->vmbus_idtvec); 1008 } 1009 return 0; 1010 } 1011 1012 static void 1013 vmbus_intr_teardown(struct vmbus_softc *sc) 1014 { 1015 int cpu; 1016 1017 if (sc->vmbus_idtvec >= 0) { 1018 lapic_ipi_free(sc->vmbus_idtvec); 1019 sc->vmbus_idtvec = -1; 1020 } 1021 1022 #if defined(__amd64__) && defined(KLD_MODULE) 1023 pmap_pti_remove_kva(VMBUS_ISR_ADDR, VMBUS_ISR_ADDR + PAGE_SIZE); 1024 #endif 1025 1026 CPU_FOREACH(cpu) { 1027 if (VMBUS_PCPU_GET(sc, event_tq, cpu) != NULL) { 1028 taskqueue_free(VMBUS_PCPU_GET(sc, event_tq, cpu)); 1029 VMBUS_PCPU_GET(sc, event_tq, cpu) = NULL; 1030 } 1031 if (VMBUS_PCPU_GET(sc, message_tq, cpu) != NULL) { 1032 taskqueue_drain(VMBUS_PCPU_GET(sc, message_tq, cpu), 1033 VMBUS_PCPU_PTR(sc, message_task, cpu)); 1034 taskqueue_free(VMBUS_PCPU_GET(sc, message_tq, cpu)); 1035 VMBUS_PCPU_GET(sc, message_tq, cpu) = NULL; 1036 } 1037 } 1038 } 1039 1040 static int 1041 vmbus_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) 1042 { 1043 return (ENOENT); 1044 } 1045 1046 static int 1047 vmbus_child_pnpinfo_str(device_t dev, device_t child, char *buf, size_t buflen) 1048 { 1049 const struct vmbus_channel *chan; 1050 char guidbuf[HYPERV_GUID_STRLEN]; 1051 1052 chan = vmbus_get_channel(child); 1053 if (chan == NULL) { 1054 /* Event timer device, which does not belong to a channel */ 1055 return (0); 1056 } 1057 1058 strlcat(buf, "classid=", buflen); 1059 hyperv_guid2str(&chan->ch_guid_type, guidbuf, sizeof(guidbuf)); 1060 strlcat(buf, guidbuf, buflen); 1061 1062 strlcat(buf, " deviceid=", buflen); 1063 hyperv_guid2str(&chan->ch_guid_inst, guidbuf, sizeof(guidbuf)); 1064 strlcat(buf, guidbuf, buflen); 1065 1066 return (0); 1067 } 1068 1069 int 1070 vmbus_add_child(struct vmbus_channel *chan) 1071 { 1072 struct vmbus_softc *sc = chan->ch_vmbus; 1073 device_t parent = sc->vmbus_dev; 1074 1075 mtx_lock(&Giant); 1076 1077 chan->ch_dev = device_add_child(parent, NULL, -1); 1078 if (chan->ch_dev == NULL) { 1079 mtx_unlock(&Giant); 1080 device_printf(parent, "device_add_child for chan%u failed\n", 1081 chan->ch_id); 1082 return (ENXIO); 1083 } 1084 device_set_ivars(chan->ch_dev, chan); 1085 device_probe_and_attach(chan->ch_dev); 1086 1087 mtx_unlock(&Giant); 1088 return (0); 1089 } 1090 1091 int 1092 vmbus_delete_child(struct vmbus_channel *chan) 1093 { 1094 int error = 0; 1095 1096 mtx_lock(&Giant); 1097 if (chan->ch_dev != NULL) { 1098 error = device_delete_child(chan->ch_vmbus->vmbus_dev, 1099 chan->ch_dev); 1100 chan->ch_dev = NULL; 1101 } 1102 mtx_unlock(&Giant); 1103 return (error); 1104 } 1105 1106 static int 1107 vmbus_sysctl_version(SYSCTL_HANDLER_ARGS) 1108 { 1109 struct vmbus_softc *sc = arg1; 1110 char verstr[16]; 1111 1112 snprintf(verstr, sizeof(verstr), "%u.%u", 1113 VMBUS_VERSION_MAJOR(sc->vmbus_version), 1114 VMBUS_VERSION_MINOR(sc->vmbus_version)); 1115 return sysctl_handle_string(oidp, verstr, sizeof(verstr), req); 1116 } 1117 1118 /* 1119 * We need the function to make sure the MMIO resource is allocated from the 1120 * ranges found in _CRS. 1121 * 1122 * For the release function, we can use bus_generic_release_resource(). 1123 */ 1124 static struct resource * 1125 vmbus_alloc_resource(device_t dev, device_t child, int type, int *rid, 1126 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 1127 { 1128 device_t parent = device_get_parent(dev); 1129 struct resource *res; 1130 1131 #ifdef NEW_PCIB 1132 if (type == SYS_RES_MEMORY) { 1133 struct vmbus_softc *sc = device_get_softc(dev); 1134 1135 res = pcib_host_res_alloc(&sc->vmbus_mmio_res, child, type, 1136 rid, start, end, count, flags); 1137 } else 1138 #endif 1139 { 1140 res = BUS_ALLOC_RESOURCE(parent, child, type, rid, start, 1141 end, count, flags); 1142 } 1143 1144 return (res); 1145 } 1146 1147 static int 1148 vmbus_alloc_msi(device_t bus, device_t dev, int count, int maxcount, int *irqs) 1149 { 1150 1151 return (PCIB_ALLOC_MSI(device_get_parent(bus), dev, count, maxcount, 1152 irqs)); 1153 } 1154 1155 static int 1156 vmbus_release_msi(device_t bus, device_t dev, int count, int *irqs) 1157 { 1158 1159 return (PCIB_RELEASE_MSI(device_get_parent(bus), dev, count, irqs)); 1160 } 1161 1162 static int 1163 vmbus_alloc_msix(device_t bus, device_t dev, int *irq) 1164 { 1165 1166 return (PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq)); 1167 } 1168 1169 static int 1170 vmbus_release_msix(device_t bus, device_t dev, int irq) 1171 { 1172 1173 return (PCIB_RELEASE_MSIX(device_get_parent(bus), dev, irq)); 1174 } 1175 1176 static int 1177 vmbus_map_msi(device_t bus, device_t dev, int irq, uint64_t *addr, 1178 uint32_t *data) 1179 { 1180 1181 return (PCIB_MAP_MSI(device_get_parent(bus), dev, irq, addr, data)); 1182 } 1183 1184 static uint32_t 1185 vmbus_get_version_method(device_t bus, device_t dev) 1186 { 1187 struct vmbus_softc *sc = device_get_softc(bus); 1188 1189 return sc->vmbus_version; 1190 } 1191 1192 static int 1193 vmbus_probe_guid_method(device_t bus, device_t dev, 1194 const struct hyperv_guid *guid) 1195 { 1196 const struct vmbus_channel *chan = vmbus_get_channel(dev); 1197 1198 if (memcmp(&chan->ch_guid_type, guid, sizeof(struct hyperv_guid)) == 0) 1199 return 0; 1200 return ENXIO; 1201 } 1202 1203 static uint32_t 1204 vmbus_get_vcpu_id_method(device_t bus, device_t dev, int cpu) 1205 { 1206 const struct vmbus_softc *sc = device_get_softc(bus); 1207 1208 return (VMBUS_PCPU_GET(sc, vcpuid, cpu)); 1209 } 1210 1211 static struct taskqueue * 1212 vmbus_get_eventtq_method(device_t bus, device_t dev __unused, int cpu) 1213 { 1214 const struct vmbus_softc *sc = device_get_softc(bus); 1215 1216 KASSERT(cpu >= 0 && cpu < mp_ncpus, ("invalid cpu%d", cpu)); 1217 return (VMBUS_PCPU_GET(sc, event_tq, cpu)); 1218 } 1219 1220 #ifdef NEW_PCIB 1221 #define VTPM_BASE_ADDR 0xfed40000 1222 #define FOUR_GB (1ULL << 32) 1223 1224 enum parse_pass { parse_64, parse_32 }; 1225 1226 struct parse_context { 1227 device_t vmbus_dev; 1228 enum parse_pass pass; 1229 }; 1230 1231 static ACPI_STATUS 1232 parse_crs(ACPI_RESOURCE *res, void *ctx) 1233 { 1234 const struct parse_context *pc = ctx; 1235 device_t vmbus_dev = pc->vmbus_dev; 1236 1237 struct vmbus_softc *sc = device_get_softc(vmbus_dev); 1238 UINT64 start, end; 1239 1240 switch (res->Type) { 1241 case ACPI_RESOURCE_TYPE_ADDRESS32: 1242 start = res->Data.Address32.Address.Minimum; 1243 end = res->Data.Address32.Address.Maximum; 1244 break; 1245 1246 case ACPI_RESOURCE_TYPE_ADDRESS64: 1247 start = res->Data.Address64.Address.Minimum; 1248 end = res->Data.Address64.Address.Maximum; 1249 break; 1250 1251 default: 1252 /* Unused types. */ 1253 return (AE_OK); 1254 } 1255 1256 /* 1257 * We don't use <1MB addresses. 1258 */ 1259 if (end < 0x100000) 1260 return (AE_OK); 1261 1262 /* Don't conflict with vTPM. */ 1263 if (end >= VTPM_BASE_ADDR && start < VTPM_BASE_ADDR) 1264 end = VTPM_BASE_ADDR - 1; 1265 1266 if ((pc->pass == parse_32 && start < FOUR_GB) || 1267 (pc->pass == parse_64 && start >= FOUR_GB)) 1268 pcib_host_res_decodes(&sc->vmbus_mmio_res, SYS_RES_MEMORY, 1269 start, end, 0); 1270 1271 return (AE_OK); 1272 } 1273 1274 static void 1275 vmbus_get_crs(device_t dev, device_t vmbus_dev, enum parse_pass pass) 1276 { 1277 struct parse_context pc; 1278 ACPI_STATUS status; 1279 1280 if (bootverbose) 1281 device_printf(dev, "walking _CRS, pass=%d\n", pass); 1282 1283 pc.vmbus_dev = vmbus_dev; 1284 pc.pass = pass; 1285 status = AcpiWalkResources(acpi_get_handle(dev), "_CRS", 1286 parse_crs, &pc); 1287 1288 if (bootverbose && ACPI_FAILURE(status)) 1289 device_printf(dev, "_CRS: not found, pass=%d\n", pass); 1290 } 1291 1292 static void 1293 vmbus_get_mmio_res_pass(device_t dev, enum parse_pass pass) 1294 { 1295 device_t acpi0, parent; 1296 1297 parent = device_get_parent(dev); 1298 1299 acpi0 = device_get_parent(parent); 1300 if (strcmp("acpi0", device_get_nameunit(acpi0)) == 0) { 1301 device_t *children; 1302 int count; 1303 1304 /* 1305 * Try to locate VMBUS resources and find _CRS on them. 1306 */ 1307 if (device_get_children(acpi0, &children, &count) == 0) { 1308 int i; 1309 1310 for (i = 0; i < count; ++i) { 1311 if (!device_is_attached(children[i])) 1312 continue; 1313 1314 if (strcmp("vmbus_res", 1315 device_get_name(children[i])) == 0) 1316 vmbus_get_crs(children[i], dev, pass); 1317 } 1318 free(children, M_TEMP); 1319 } 1320 1321 /* 1322 * Try to find _CRS on acpi. 1323 */ 1324 vmbus_get_crs(acpi0, dev, pass); 1325 } else { 1326 device_printf(dev, "not grandchild of acpi\n"); 1327 } 1328 1329 /* 1330 * Try to find _CRS on parent. 1331 */ 1332 vmbus_get_crs(parent, dev, pass); 1333 } 1334 1335 static void 1336 vmbus_get_mmio_res(device_t dev) 1337 { 1338 struct vmbus_softc *sc = device_get_softc(dev); 1339 /* 1340 * We walk the resources twice to make sure that: in the resource 1341 * list, the 32-bit resources appear behind the 64-bit resources. 1342 * NB: resource_list_add() uses INSERT_TAIL. This way, when we 1343 * iterate through the list to find a range for a 64-bit BAR in 1344 * vmbus_alloc_resource(), we can make sure we try to use >4GB 1345 * ranges first. 1346 */ 1347 pcib_host_res_init(dev, &sc->vmbus_mmio_res); 1348 1349 vmbus_get_mmio_res_pass(dev, parse_64); 1350 vmbus_get_mmio_res_pass(dev, parse_32); 1351 } 1352 1353 /* 1354 * On Gen2 VMs, Hyper-V provides mmio space for framebuffer. 1355 * This mmio address range is not useable for other PCI devices. 1356 * Currently only efifb and vbefb drivers are using this range without 1357 * reserving it from system. 1358 * Therefore, vmbus driver reserves it before any other PCI device 1359 * drivers start to request mmio addresses. 1360 */ 1361 static struct resource *hv_fb_res; 1362 1363 static void 1364 vmbus_fb_mmio_res(device_t dev) 1365 { 1366 struct efi_fb *efifb; 1367 struct vbe_fb *vbefb; 1368 rman_res_t fb_start, fb_end, fb_count; 1369 int fb_height, fb_width; 1370 caddr_t kmdp; 1371 1372 struct vmbus_softc *sc = device_get_softc(dev); 1373 int rid = 0; 1374 1375 kmdp = preload_search_by_type("elf kernel"); 1376 if (kmdp == NULL) 1377 kmdp = preload_search_by_type("elf64 kernel"); 1378 efifb = (struct efi_fb *)preload_search_info(kmdp, 1379 MODINFO_METADATA | MODINFOMD_EFI_FB); 1380 vbefb = (struct vbe_fb *)preload_search_info(kmdp, 1381 MODINFO_METADATA | MODINFOMD_VBE_FB); 1382 if (efifb != NULL) { 1383 fb_start = efifb->fb_addr; 1384 fb_end = efifb->fb_addr + efifb->fb_size; 1385 fb_count = efifb->fb_size; 1386 fb_height = efifb->fb_height; 1387 fb_width = efifb->fb_width; 1388 } else if (vbefb != NULL) { 1389 fb_start = vbefb->fb_addr; 1390 fb_end = vbefb->fb_addr + vbefb->fb_size; 1391 fb_count = vbefb->fb_size; 1392 fb_height = vbefb->fb_height; 1393 fb_width = vbefb->fb_width; 1394 } else { 1395 if (bootverbose) 1396 device_printf(dev, 1397 "no preloaded kernel fb information\n"); 1398 /* We are on Gen1 VM, just return. */ 1399 return; 1400 } 1401 1402 if (bootverbose) 1403 device_printf(dev, 1404 "fb: fb_addr: %#jx, size: %#jx, " 1405 "actual size needed: 0x%x\n", 1406 fb_start, fb_count, fb_height * fb_width); 1407 1408 hv_fb_res = pcib_host_res_alloc(&sc->vmbus_mmio_res, dev, 1409 SYS_RES_MEMORY, &rid, fb_start, fb_end, fb_count, 1410 RF_ACTIVE | rman_make_alignment_flags(PAGE_SIZE)); 1411 1412 if (hv_fb_res && bootverbose) 1413 device_printf(dev, 1414 "successfully reserved memory for framebuffer " 1415 "starting at %#jx, size %#jx\n", 1416 fb_start, fb_count); 1417 } 1418 1419 static void 1420 vmbus_free_mmio_res(device_t dev) 1421 { 1422 struct vmbus_softc *sc = device_get_softc(dev); 1423 1424 pcib_host_res_free(dev, &sc->vmbus_mmio_res); 1425 1426 if (hv_fb_res) 1427 hv_fb_res = NULL; 1428 } 1429 #endif /* NEW_PCIB */ 1430 1431 static void 1432 vmbus_identify(driver_t *driver, device_t parent) 1433 { 1434 1435 if (device_get_unit(parent) != 0 || vm_guest != VM_GUEST_HV || 1436 (hyperv_features & CPUID_HV_MSR_SYNIC) == 0) 1437 return; 1438 device_add_child(parent, "vmbus", -1); 1439 } 1440 1441 static int 1442 vmbus_probe(device_t dev) 1443 { 1444 1445 if (device_get_unit(dev) != 0 || vm_guest != VM_GUEST_HV || 1446 (hyperv_features & CPUID_HV_MSR_SYNIC) == 0) 1447 return (ENXIO); 1448 1449 device_set_desc(dev, "Hyper-V Vmbus"); 1450 return (BUS_PROBE_DEFAULT); 1451 } 1452 1453 /** 1454 * @brief Main vmbus driver initialization routine. 1455 * 1456 * Here, we 1457 * - initialize the vmbus driver context 1458 * - setup various driver entry points 1459 * - invoke the vmbus hv main init routine 1460 * - get the irq resource 1461 * - invoke the vmbus to add the vmbus root device 1462 * - setup the vmbus root device 1463 * - retrieve the channel offers 1464 */ 1465 static int 1466 vmbus_doattach(struct vmbus_softc *sc) 1467 { 1468 struct sysctl_oid_list *child; 1469 struct sysctl_ctx_list *ctx; 1470 int ret; 1471 1472 if (sc->vmbus_flags & VMBUS_FLAG_ATTACHED) 1473 return (0); 1474 1475 #ifdef NEW_PCIB 1476 vmbus_get_mmio_res(sc->vmbus_dev); 1477 vmbus_fb_mmio_res(sc->vmbus_dev); 1478 #endif 1479 1480 sc->vmbus_flags |= VMBUS_FLAG_ATTACHED; 1481 1482 sc->vmbus_gpadl = VMBUS_GPADL_START; 1483 mtx_init(&sc->vmbus_prichan_lock, "vmbus prichan", NULL, MTX_DEF); 1484 TAILQ_INIT(&sc->vmbus_prichans); 1485 mtx_init(&sc->vmbus_chan_lock, "vmbus channel", NULL, MTX_DEF); 1486 TAILQ_INIT(&sc->vmbus_chans); 1487 sc->vmbus_chmap = malloc( 1488 sizeof(struct vmbus_channel *) * VMBUS_CHAN_MAX, M_DEVBUF, 1489 M_WAITOK | M_ZERO); 1490 1491 /* 1492 * Create context for "post message" Hypercalls 1493 */ 1494 sc->vmbus_xc = vmbus_xact_ctx_create(bus_get_dma_tag(sc->vmbus_dev), 1495 HYPERCALL_POSTMSGIN_SIZE, VMBUS_MSG_SIZE, 1496 sizeof(struct vmbus_msghc)); 1497 if (sc->vmbus_xc == NULL) { 1498 ret = ENXIO; 1499 goto cleanup; 1500 } 1501 1502 /* 1503 * Allocate DMA stuffs. 1504 */ 1505 ret = vmbus_dma_alloc(sc); 1506 if (ret != 0) 1507 goto cleanup; 1508 1509 /* 1510 * Setup interrupt. 1511 */ 1512 ret = vmbus_intr_setup(sc); 1513 if (ret != 0) 1514 goto cleanup; 1515 1516 /* 1517 * Setup SynIC. 1518 */ 1519 if (bootverbose) 1520 device_printf(sc->vmbus_dev, "smp_started = %d\n", smp_started); 1521 smp_rendezvous(NULL, vmbus_synic_setup, NULL, sc); 1522 sc->vmbus_flags |= VMBUS_FLAG_SYNIC; 1523 1524 /* 1525 * Initialize vmbus, e.g. connect to Hypervisor. 1526 */ 1527 ret = vmbus_init(sc); 1528 if (ret != 0) 1529 goto cleanup; 1530 1531 if (sc->vmbus_version == VMBUS_VERSION_WS2008 || 1532 sc->vmbus_version == VMBUS_VERSION_WIN7) 1533 sc->vmbus_event_proc = vmbus_event_proc_compat; 1534 else 1535 sc->vmbus_event_proc = vmbus_event_proc; 1536 1537 ret = vmbus_scan(sc); 1538 if (ret != 0) 1539 goto cleanup; 1540 1541 ctx = device_get_sysctl_ctx(sc->vmbus_dev); 1542 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vmbus_dev)); 1543 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "version", 1544 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, 1545 vmbus_sysctl_version, "A", "vmbus version"); 1546 1547 return (ret); 1548 1549 cleanup: 1550 vmbus_scan_teardown(sc); 1551 vmbus_intr_teardown(sc); 1552 vmbus_dma_free(sc); 1553 if (sc->vmbus_xc != NULL) { 1554 vmbus_xact_ctx_destroy(sc->vmbus_xc); 1555 sc->vmbus_xc = NULL; 1556 } 1557 free(__DEVOLATILE(void *, sc->vmbus_chmap), M_DEVBUF); 1558 mtx_destroy(&sc->vmbus_prichan_lock); 1559 mtx_destroy(&sc->vmbus_chan_lock); 1560 1561 return (ret); 1562 } 1563 1564 static void 1565 vmbus_event_proc_dummy(struct vmbus_softc *sc __unused, int cpu __unused) 1566 { 1567 } 1568 1569 #ifdef EARLY_AP_STARTUP 1570 1571 static void 1572 vmbus_intrhook(void *xsc) 1573 { 1574 struct vmbus_softc *sc = xsc; 1575 1576 if (bootverbose) 1577 device_printf(sc->vmbus_dev, "intrhook\n"); 1578 vmbus_doattach(sc); 1579 config_intrhook_disestablish(&sc->vmbus_intrhook); 1580 } 1581 1582 #endif /* EARLY_AP_STARTUP */ 1583 1584 static int 1585 vmbus_attach(device_t dev) 1586 { 1587 vmbus_sc = device_get_softc(dev); 1588 vmbus_sc->vmbus_dev = dev; 1589 vmbus_sc->vmbus_idtvec = -1; 1590 1591 /* 1592 * Event processing logic will be configured: 1593 * - After the vmbus protocol version negotiation. 1594 * - Before we request channel offers. 1595 */ 1596 vmbus_sc->vmbus_event_proc = vmbus_event_proc_dummy; 1597 1598 #ifdef EARLY_AP_STARTUP 1599 /* 1600 * Defer the real attach until the pause(9) works as expected. 1601 */ 1602 vmbus_sc->vmbus_intrhook.ich_func = vmbus_intrhook; 1603 vmbus_sc->vmbus_intrhook.ich_arg = vmbus_sc; 1604 config_intrhook_establish(&vmbus_sc->vmbus_intrhook); 1605 #else /* !EARLY_AP_STARTUP */ 1606 /* 1607 * If the system has already booted and thread 1608 * scheduling is possible indicated by the global 1609 * cold set to zero, we just call the driver 1610 * initialization directly. 1611 */ 1612 if (!cold) 1613 vmbus_doattach(vmbus_sc); 1614 #endif /* EARLY_AP_STARTUP */ 1615 1616 return (0); 1617 } 1618 1619 static int 1620 vmbus_detach(device_t dev) 1621 { 1622 struct vmbus_softc *sc = device_get_softc(dev); 1623 1624 bus_generic_detach(dev); 1625 vmbus_chan_destroy_all(sc); 1626 1627 vmbus_scan_teardown(sc); 1628 1629 vmbus_disconnect(sc); 1630 1631 if (sc->vmbus_flags & VMBUS_FLAG_SYNIC) { 1632 sc->vmbus_flags &= ~VMBUS_FLAG_SYNIC; 1633 smp_rendezvous(NULL, vmbus_synic_teardown, NULL, NULL); 1634 } 1635 1636 vmbus_intr_teardown(sc); 1637 vmbus_dma_free(sc); 1638 1639 if (sc->vmbus_xc != NULL) { 1640 vmbus_xact_ctx_destroy(sc->vmbus_xc); 1641 sc->vmbus_xc = NULL; 1642 } 1643 1644 free(__DEVOLATILE(void *, sc->vmbus_chmap), M_DEVBUF); 1645 mtx_destroy(&sc->vmbus_prichan_lock); 1646 mtx_destroy(&sc->vmbus_chan_lock); 1647 1648 #ifdef NEW_PCIB 1649 vmbus_free_mmio_res(dev); 1650 #endif 1651 1652 return (0); 1653 } 1654 1655 #ifndef EARLY_AP_STARTUP 1656 1657 static void 1658 vmbus_sysinit(void *arg __unused) 1659 { 1660 struct vmbus_softc *sc = vmbus_get_softc(); 1661 1662 if (vm_guest != VM_GUEST_HV || sc == NULL) 1663 return; 1664 1665 /* 1666 * If the system has already booted and thread 1667 * scheduling is possible, as indicated by the 1668 * global cold set to zero, we just call the driver 1669 * initialization directly. 1670 */ 1671 if (!cold) 1672 vmbus_doattach(sc); 1673 } 1674 /* 1675 * NOTE: 1676 * We have to start as the last step of SI_SUB_SMP, i.e. after SMP is 1677 * initialized. 1678 */ 1679 SYSINIT(vmbus_initialize, SI_SUB_SMP, SI_ORDER_ANY, vmbus_sysinit, NULL); 1680 1681 #endif /* !EARLY_AP_STARTUP */ 1682