1 /*- 2 * Copyright (c) 2009-2012,2016-2017 Microsoft Corp. 3 * Copyright (c) 2012 NetApp Inc. 4 * Copyright (c) 2012 Citrix Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * VM Bus Driver Implementation 31 */ 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/bus.h> 37 #include <sys/kernel.h> 38 #include <sys/lock.h> 39 #include <sys/malloc.h> 40 #include <sys/module.h> 41 #include <sys/mutex.h> 42 #include <sys/smp.h> 43 #include <sys/sysctl.h> 44 #include <sys/systm.h> 45 #include <sys/taskqueue.h> 46 47 #include <machine/bus.h> 48 #include <machine/intr_machdep.h> 49 #include <machine/resource.h> 50 #include <x86/include/apicvar.h> 51 52 #include <contrib/dev/acpica/include/acpi.h> 53 #include <dev/acpica/acpivar.h> 54 55 #include <dev/hyperv/include/hyperv.h> 56 #include <dev/hyperv/include/vmbus_xact.h> 57 #include <dev/hyperv/vmbus/hyperv_reg.h> 58 #include <dev/hyperv/vmbus/hyperv_var.h> 59 #include <dev/hyperv/vmbus/vmbus_reg.h> 60 #include <dev/hyperv/vmbus/vmbus_var.h> 61 #include <dev/hyperv/vmbus/vmbus_chanvar.h> 62 63 #include "acpi_if.h" 64 #include "pcib_if.h" 65 #include "vmbus_if.h" 66 67 #define VMBUS_GPADL_START 0xe1e10 68 69 struct vmbus_msghc { 70 struct vmbus_xact *mh_xact; 71 struct hypercall_postmsg_in mh_inprm_save; 72 }; 73 74 static void vmbus_identify(driver_t *, device_t); 75 static int vmbus_probe(device_t); 76 static int vmbus_attach(device_t); 77 static int vmbus_detach(device_t); 78 static int vmbus_read_ivar(device_t, device_t, int, 79 uintptr_t *); 80 static int vmbus_child_pnpinfo_str(device_t, device_t, 81 char *, size_t); 82 static struct resource *vmbus_alloc_resource(device_t dev, 83 device_t child, int type, int *rid, 84 rman_res_t start, rman_res_t end, 85 rman_res_t count, u_int flags); 86 static int vmbus_alloc_msi(device_t bus, device_t dev, 87 int count, int maxcount, int *irqs); 88 static int vmbus_release_msi(device_t bus, device_t dev, 89 int count, int *irqs); 90 static int vmbus_alloc_msix(device_t bus, device_t dev, 91 int *irq); 92 static int vmbus_release_msix(device_t bus, device_t dev, 93 int irq); 94 static int vmbus_map_msi(device_t bus, device_t dev, 95 int irq, uint64_t *addr, uint32_t *data); 96 static uint32_t vmbus_get_version_method(device_t, device_t); 97 static int vmbus_probe_guid_method(device_t, device_t, 98 const struct hyperv_guid *); 99 static uint32_t vmbus_get_vcpu_id_method(device_t bus, 100 device_t dev, int cpu); 101 static struct taskqueue *vmbus_get_eventtq_method(device_t, device_t, 102 int); 103 #ifdef EARLY_AP_STARTUP 104 static void vmbus_intrhook(void *); 105 #endif 106 107 static int vmbus_init(struct vmbus_softc *); 108 static int vmbus_connect(struct vmbus_softc *, uint32_t); 109 static int vmbus_req_channels(struct vmbus_softc *sc); 110 static void vmbus_disconnect(struct vmbus_softc *); 111 static int vmbus_scan(struct vmbus_softc *); 112 static void vmbus_scan_teardown(struct vmbus_softc *); 113 static void vmbus_scan_done(struct vmbus_softc *, 114 const struct vmbus_message *); 115 static void vmbus_chanmsg_handle(struct vmbus_softc *, 116 const struct vmbus_message *); 117 static void vmbus_msg_task(void *, int); 118 static void vmbus_synic_setup(void *); 119 static void vmbus_synic_teardown(void *); 120 static int vmbus_sysctl_version(SYSCTL_HANDLER_ARGS); 121 static int vmbus_dma_alloc(struct vmbus_softc *); 122 static void vmbus_dma_free(struct vmbus_softc *); 123 static int vmbus_intr_setup(struct vmbus_softc *); 124 static void vmbus_intr_teardown(struct vmbus_softc *); 125 static int vmbus_doattach(struct vmbus_softc *); 126 static void vmbus_event_proc_dummy(struct vmbus_softc *, 127 int); 128 129 static struct vmbus_softc *vmbus_sc; 130 131 SYSCTL_NODE(_hw, OID_AUTO, vmbus, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 132 "Hyper-V vmbus"); 133 134 static int vmbus_pin_evttask = 1; 135 SYSCTL_INT(_hw_vmbus, OID_AUTO, pin_evttask, CTLFLAG_RDTUN, 136 &vmbus_pin_evttask, 0, "Pin event tasks to their respective CPU"); 137 138 extern inthand_t IDTVEC(vmbus_isr); 139 140 static const uint32_t vmbus_version[] = { 141 VMBUS_VERSION_WIN8_1, 142 VMBUS_VERSION_WIN8, 143 VMBUS_VERSION_WIN7, 144 VMBUS_VERSION_WS2008 145 }; 146 147 static const vmbus_chanmsg_proc_t 148 vmbus_chanmsg_handlers[VMBUS_CHANMSG_TYPE_MAX] = { 149 VMBUS_CHANMSG_PROC(CHOFFER_DONE, vmbus_scan_done), 150 VMBUS_CHANMSG_PROC_WAKEUP(CONNECT_RESP) 151 }; 152 153 static device_method_t vmbus_methods[] = { 154 /* Device interface */ 155 DEVMETHOD(device_identify, vmbus_identify), 156 DEVMETHOD(device_probe, vmbus_probe), 157 DEVMETHOD(device_attach, vmbus_attach), 158 DEVMETHOD(device_detach, vmbus_detach), 159 DEVMETHOD(device_shutdown, bus_generic_shutdown), 160 DEVMETHOD(device_suspend, bus_generic_suspend), 161 DEVMETHOD(device_resume, bus_generic_resume), 162 163 /* Bus interface */ 164 DEVMETHOD(bus_add_child, bus_generic_add_child), 165 DEVMETHOD(bus_print_child, bus_generic_print_child), 166 DEVMETHOD(bus_read_ivar, vmbus_read_ivar), 167 DEVMETHOD(bus_child_pnpinfo_str, vmbus_child_pnpinfo_str), 168 DEVMETHOD(bus_alloc_resource, vmbus_alloc_resource), 169 DEVMETHOD(bus_release_resource, bus_generic_release_resource), 170 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), 171 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), 172 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), 173 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), 174 #if __FreeBSD_version >= 1100000 175 DEVMETHOD(bus_get_cpus, bus_generic_get_cpus), 176 #endif 177 178 /* pcib interface */ 179 DEVMETHOD(pcib_alloc_msi, vmbus_alloc_msi), 180 DEVMETHOD(pcib_release_msi, vmbus_release_msi), 181 DEVMETHOD(pcib_alloc_msix, vmbus_alloc_msix), 182 DEVMETHOD(pcib_release_msix, vmbus_release_msix), 183 DEVMETHOD(pcib_map_msi, vmbus_map_msi), 184 185 /* Vmbus interface */ 186 DEVMETHOD(vmbus_get_version, vmbus_get_version_method), 187 DEVMETHOD(vmbus_probe_guid, vmbus_probe_guid_method), 188 DEVMETHOD(vmbus_get_vcpu_id, vmbus_get_vcpu_id_method), 189 DEVMETHOD(vmbus_get_event_taskq, vmbus_get_eventtq_method), 190 191 DEVMETHOD_END 192 }; 193 194 static driver_t vmbus_driver = { 195 "vmbus", 196 vmbus_methods, 197 sizeof(struct vmbus_softc) 198 }; 199 200 static devclass_t vmbus_devclass; 201 202 DRIVER_MODULE(vmbus, pcib, vmbus_driver, vmbus_devclass, NULL, NULL); 203 DRIVER_MODULE(vmbus, acpi_syscontainer, vmbus_driver, vmbus_devclass, 204 NULL, NULL); 205 206 MODULE_DEPEND(vmbus, acpi, 1, 1, 1); 207 MODULE_DEPEND(vmbus, pci, 1, 1, 1); 208 MODULE_VERSION(vmbus, 1); 209 210 static __inline struct vmbus_softc * 211 vmbus_get_softc(void) 212 { 213 return vmbus_sc; 214 } 215 216 void 217 vmbus_msghc_reset(struct vmbus_msghc *mh, size_t dsize) 218 { 219 struct hypercall_postmsg_in *inprm; 220 221 if (dsize > HYPERCALL_POSTMSGIN_DSIZE_MAX) 222 panic("invalid data size %zu", dsize); 223 224 inprm = vmbus_xact_req_data(mh->mh_xact); 225 memset(inprm, 0, HYPERCALL_POSTMSGIN_SIZE); 226 inprm->hc_connid = VMBUS_CONNID_MESSAGE; 227 inprm->hc_msgtype = HYPERV_MSGTYPE_CHANNEL; 228 inprm->hc_dsize = dsize; 229 } 230 231 struct vmbus_msghc * 232 vmbus_msghc_get(struct vmbus_softc *sc, size_t dsize) 233 { 234 struct vmbus_msghc *mh; 235 struct vmbus_xact *xact; 236 237 if (dsize > HYPERCALL_POSTMSGIN_DSIZE_MAX) 238 panic("invalid data size %zu", dsize); 239 240 xact = vmbus_xact_get(sc->vmbus_xc, 241 dsize + __offsetof(struct hypercall_postmsg_in, hc_data[0])); 242 if (xact == NULL) 243 return (NULL); 244 245 mh = vmbus_xact_priv(xact, sizeof(*mh)); 246 mh->mh_xact = xact; 247 248 vmbus_msghc_reset(mh, dsize); 249 return (mh); 250 } 251 252 void 253 vmbus_msghc_put(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh) 254 { 255 256 vmbus_xact_put(mh->mh_xact); 257 } 258 259 void * 260 vmbus_msghc_dataptr(struct vmbus_msghc *mh) 261 { 262 struct hypercall_postmsg_in *inprm; 263 264 inprm = vmbus_xact_req_data(mh->mh_xact); 265 return (inprm->hc_data); 266 } 267 268 int 269 vmbus_msghc_exec_noresult(struct vmbus_msghc *mh) 270 { 271 sbintime_t time = SBT_1MS; 272 struct hypercall_postmsg_in *inprm; 273 bus_addr_t inprm_paddr; 274 int i; 275 276 inprm = vmbus_xact_req_data(mh->mh_xact); 277 inprm_paddr = vmbus_xact_req_paddr(mh->mh_xact); 278 279 /* 280 * Save the input parameter so that we could restore the input 281 * parameter if the Hypercall failed. 282 * 283 * XXX 284 * Is this really necessary?! i.e. Will the Hypercall ever 285 * overwrite the input parameter? 286 */ 287 memcpy(&mh->mh_inprm_save, inprm, HYPERCALL_POSTMSGIN_SIZE); 288 289 /* 290 * In order to cope with transient failures, e.g. insufficient 291 * resources on host side, we retry the post message Hypercall 292 * several times. 20 retries seem sufficient. 293 */ 294 #define HC_RETRY_MAX 20 295 296 for (i = 0; i < HC_RETRY_MAX; ++i) { 297 uint64_t status; 298 299 status = hypercall_post_message(inprm_paddr); 300 if (status == HYPERCALL_STATUS_SUCCESS) 301 return 0; 302 303 pause_sbt("hcpmsg", time, 0, C_HARDCLOCK); 304 if (time < SBT_1S * 2) 305 time *= 2; 306 307 /* Restore input parameter and try again */ 308 memcpy(inprm, &mh->mh_inprm_save, HYPERCALL_POSTMSGIN_SIZE); 309 } 310 311 #undef HC_RETRY_MAX 312 313 return EIO; 314 } 315 316 int 317 vmbus_msghc_exec(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh) 318 { 319 int error; 320 321 vmbus_xact_activate(mh->mh_xact); 322 error = vmbus_msghc_exec_noresult(mh); 323 if (error) 324 vmbus_xact_deactivate(mh->mh_xact); 325 return error; 326 } 327 328 void 329 vmbus_msghc_exec_cancel(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh) 330 { 331 332 vmbus_xact_deactivate(mh->mh_xact); 333 } 334 335 const struct vmbus_message * 336 vmbus_msghc_wait_result(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh) 337 { 338 size_t resp_len; 339 340 return (vmbus_xact_wait(mh->mh_xact, &resp_len)); 341 } 342 343 const struct vmbus_message * 344 vmbus_msghc_poll_result(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh) 345 { 346 size_t resp_len; 347 348 return (vmbus_xact_poll(mh->mh_xact, &resp_len)); 349 } 350 351 void 352 vmbus_msghc_wakeup(struct vmbus_softc *sc, const struct vmbus_message *msg) 353 { 354 355 vmbus_xact_ctx_wakeup(sc->vmbus_xc, msg, sizeof(*msg)); 356 } 357 358 uint32_t 359 vmbus_gpadl_alloc(struct vmbus_softc *sc) 360 { 361 uint32_t gpadl; 362 363 again: 364 gpadl = atomic_fetchadd_int(&sc->vmbus_gpadl, 1); 365 if (gpadl == 0) 366 goto again; 367 return (gpadl); 368 } 369 370 static int 371 vmbus_connect(struct vmbus_softc *sc, uint32_t version) 372 { 373 struct vmbus_chanmsg_connect *req; 374 const struct vmbus_message *msg; 375 struct vmbus_msghc *mh; 376 int error, done = 0; 377 378 mh = vmbus_msghc_get(sc, sizeof(*req)); 379 if (mh == NULL) 380 return ENXIO; 381 382 req = vmbus_msghc_dataptr(mh); 383 req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CONNECT; 384 req->chm_ver = version; 385 req->chm_evtflags = sc->vmbus_evtflags_dma.hv_paddr; 386 req->chm_mnf1 = sc->vmbus_mnf1_dma.hv_paddr; 387 req->chm_mnf2 = sc->vmbus_mnf2_dma.hv_paddr; 388 389 error = vmbus_msghc_exec(sc, mh); 390 if (error) { 391 vmbus_msghc_put(sc, mh); 392 return error; 393 } 394 395 msg = vmbus_msghc_wait_result(sc, mh); 396 done = ((const struct vmbus_chanmsg_connect_resp *) 397 msg->msg_data)->chm_done; 398 399 vmbus_msghc_put(sc, mh); 400 401 return (done ? 0 : EOPNOTSUPP); 402 } 403 404 static int 405 vmbus_init(struct vmbus_softc *sc) 406 { 407 int i; 408 409 for (i = 0; i < nitems(vmbus_version); ++i) { 410 int error; 411 412 error = vmbus_connect(sc, vmbus_version[i]); 413 if (!error) { 414 sc->vmbus_version = vmbus_version[i]; 415 device_printf(sc->vmbus_dev, "version %u.%u\n", 416 VMBUS_VERSION_MAJOR(sc->vmbus_version), 417 VMBUS_VERSION_MINOR(sc->vmbus_version)); 418 return 0; 419 } 420 } 421 return ENXIO; 422 } 423 424 static void 425 vmbus_disconnect(struct vmbus_softc *sc) 426 { 427 struct vmbus_chanmsg_disconnect *req; 428 struct vmbus_msghc *mh; 429 int error; 430 431 mh = vmbus_msghc_get(sc, sizeof(*req)); 432 if (mh == NULL) { 433 device_printf(sc->vmbus_dev, 434 "can not get msg hypercall for disconnect\n"); 435 return; 436 } 437 438 req = vmbus_msghc_dataptr(mh); 439 req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_DISCONNECT; 440 441 error = vmbus_msghc_exec_noresult(mh); 442 vmbus_msghc_put(sc, mh); 443 444 if (error) { 445 device_printf(sc->vmbus_dev, 446 "disconnect msg hypercall failed\n"); 447 } 448 } 449 450 static int 451 vmbus_req_channels(struct vmbus_softc *sc) 452 { 453 struct vmbus_chanmsg_chrequest *req; 454 struct vmbus_msghc *mh; 455 int error; 456 457 mh = vmbus_msghc_get(sc, sizeof(*req)); 458 if (mh == NULL) 459 return ENXIO; 460 461 req = vmbus_msghc_dataptr(mh); 462 req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CHREQUEST; 463 464 error = vmbus_msghc_exec_noresult(mh); 465 vmbus_msghc_put(sc, mh); 466 467 return error; 468 } 469 470 static void 471 vmbus_scan_done_task(void *xsc, int pending __unused) 472 { 473 struct vmbus_softc *sc = xsc; 474 475 mtx_lock(&Giant); 476 sc->vmbus_scandone = true; 477 mtx_unlock(&Giant); 478 wakeup(&sc->vmbus_scandone); 479 } 480 481 static void 482 vmbus_scan_done(struct vmbus_softc *sc, 483 const struct vmbus_message *msg __unused) 484 { 485 486 taskqueue_enqueue(sc->vmbus_devtq, &sc->vmbus_scandone_task); 487 } 488 489 static int 490 vmbus_scan(struct vmbus_softc *sc) 491 { 492 int error; 493 494 /* 495 * Identify, probe and attach for non-channel devices. 496 */ 497 bus_generic_probe(sc->vmbus_dev); 498 bus_generic_attach(sc->vmbus_dev); 499 500 /* 501 * This taskqueue serializes vmbus devices' attach and detach 502 * for channel offer and rescind messages. 503 */ 504 sc->vmbus_devtq = taskqueue_create("vmbus dev", M_WAITOK, 505 taskqueue_thread_enqueue, &sc->vmbus_devtq); 506 taskqueue_start_threads(&sc->vmbus_devtq, 1, PI_NET, "vmbusdev"); 507 TASK_INIT(&sc->vmbus_scandone_task, 0, vmbus_scan_done_task, sc); 508 509 /* 510 * This taskqueue handles sub-channel detach, so that vmbus 511 * device's detach running in vmbus_devtq can drain its sub- 512 * channels. 513 */ 514 sc->vmbus_subchtq = taskqueue_create("vmbus subch", M_WAITOK, 515 taskqueue_thread_enqueue, &sc->vmbus_subchtq); 516 taskqueue_start_threads(&sc->vmbus_subchtq, 1, PI_NET, "vmbussch"); 517 518 /* 519 * Start vmbus scanning. 520 */ 521 error = vmbus_req_channels(sc); 522 if (error) { 523 device_printf(sc->vmbus_dev, "channel request failed: %d\n", 524 error); 525 return (error); 526 } 527 528 /* 529 * Wait for all vmbus devices from the initial channel offers to be 530 * attached. 531 */ 532 GIANT_REQUIRED; 533 while (!sc->vmbus_scandone) 534 mtx_sleep(&sc->vmbus_scandone, &Giant, 0, "vmbusdev", 0); 535 536 if (bootverbose) { 537 device_printf(sc->vmbus_dev, "device scan, probe and attach " 538 "done\n"); 539 } 540 return (0); 541 } 542 543 static void 544 vmbus_scan_teardown(struct vmbus_softc *sc) 545 { 546 547 GIANT_REQUIRED; 548 if (sc->vmbus_devtq != NULL) { 549 mtx_unlock(&Giant); 550 taskqueue_free(sc->vmbus_devtq); 551 mtx_lock(&Giant); 552 sc->vmbus_devtq = NULL; 553 } 554 if (sc->vmbus_subchtq != NULL) { 555 mtx_unlock(&Giant); 556 taskqueue_free(sc->vmbus_subchtq); 557 mtx_lock(&Giant); 558 sc->vmbus_subchtq = NULL; 559 } 560 } 561 562 static void 563 vmbus_chanmsg_handle(struct vmbus_softc *sc, const struct vmbus_message *msg) 564 { 565 vmbus_chanmsg_proc_t msg_proc; 566 uint32_t msg_type; 567 568 msg_type = ((const struct vmbus_chanmsg_hdr *)msg->msg_data)->chm_type; 569 if (msg_type >= VMBUS_CHANMSG_TYPE_MAX) { 570 device_printf(sc->vmbus_dev, "unknown message type 0x%x\n", 571 msg_type); 572 return; 573 } 574 575 msg_proc = vmbus_chanmsg_handlers[msg_type]; 576 if (msg_proc != NULL) 577 msg_proc(sc, msg); 578 579 /* Channel specific processing */ 580 vmbus_chan_msgproc(sc, msg); 581 } 582 583 static void 584 vmbus_msg_task(void *xsc, int pending __unused) 585 { 586 struct vmbus_softc *sc = xsc; 587 volatile struct vmbus_message *msg; 588 589 msg = VMBUS_PCPU_GET(sc, message, curcpu) + VMBUS_SINT_MESSAGE; 590 for (;;) { 591 if (msg->msg_type == HYPERV_MSGTYPE_NONE) { 592 /* No message */ 593 break; 594 } else if (msg->msg_type == HYPERV_MSGTYPE_CHANNEL) { 595 /* Channel message */ 596 vmbus_chanmsg_handle(sc, 597 __DEVOLATILE(const struct vmbus_message *, msg)); 598 } 599 600 msg->msg_type = HYPERV_MSGTYPE_NONE; 601 /* 602 * Make sure the write to msg_type (i.e. set to 603 * HYPERV_MSGTYPE_NONE) happens before we read the 604 * msg_flags and EOMing. Otherwise, the EOMing will 605 * not deliver any more messages since there is no 606 * empty slot 607 * 608 * NOTE: 609 * mb() is used here, since atomic_thread_fence_seq_cst() 610 * will become compiler fence on UP kernel. 611 */ 612 mb(); 613 if (msg->msg_flags & VMBUS_MSGFLAG_PENDING) { 614 /* 615 * This will cause message queue rescan to possibly 616 * deliver another msg from the hypervisor 617 */ 618 wrmsr(MSR_HV_EOM, 0); 619 } 620 } 621 } 622 623 static __inline int 624 vmbus_handle_intr1(struct vmbus_softc *sc, struct trapframe *frame, int cpu) 625 { 626 volatile struct vmbus_message *msg; 627 struct vmbus_message *msg_base; 628 629 msg_base = VMBUS_PCPU_GET(sc, message, cpu); 630 631 /* 632 * Check event timer. 633 * 634 * TODO: move this to independent IDT vector. 635 */ 636 msg = msg_base + VMBUS_SINT_TIMER; 637 if (msg->msg_type == HYPERV_MSGTYPE_TIMER_EXPIRED) { 638 msg->msg_type = HYPERV_MSGTYPE_NONE; 639 640 vmbus_et_intr(frame); 641 642 /* 643 * Make sure the write to msg_type (i.e. set to 644 * HYPERV_MSGTYPE_NONE) happens before we read the 645 * msg_flags and EOMing. Otherwise, the EOMing will 646 * not deliver any more messages since there is no 647 * empty slot 648 * 649 * NOTE: 650 * mb() is used here, since atomic_thread_fence_seq_cst() 651 * will become compiler fence on UP kernel. 652 */ 653 mb(); 654 if (msg->msg_flags & VMBUS_MSGFLAG_PENDING) { 655 /* 656 * This will cause message queue rescan to possibly 657 * deliver another msg from the hypervisor 658 */ 659 wrmsr(MSR_HV_EOM, 0); 660 } 661 } 662 663 /* 664 * Check events. Hot path for network and storage I/O data; high rate. 665 * 666 * NOTE: 667 * As recommended by the Windows guest fellows, we check events before 668 * checking messages. 669 */ 670 sc->vmbus_event_proc(sc, cpu); 671 672 /* 673 * Check messages. Mainly management stuffs; ultra low rate. 674 */ 675 msg = msg_base + VMBUS_SINT_MESSAGE; 676 if (__predict_false(msg->msg_type != HYPERV_MSGTYPE_NONE)) { 677 taskqueue_enqueue(VMBUS_PCPU_GET(sc, message_tq, cpu), 678 VMBUS_PCPU_PTR(sc, message_task, cpu)); 679 } 680 681 return (FILTER_HANDLED); 682 } 683 684 void 685 vmbus_handle_intr(struct trapframe *trap_frame) 686 { 687 struct vmbus_softc *sc = vmbus_get_softc(); 688 int cpu = curcpu; 689 690 /* 691 * Disable preemption. 692 */ 693 critical_enter(); 694 695 /* 696 * Do a little interrupt counting. 697 */ 698 (*VMBUS_PCPU_GET(sc, intr_cnt, cpu))++; 699 700 vmbus_handle_intr1(sc, trap_frame, cpu); 701 702 /* 703 * Enable preemption. 704 */ 705 critical_exit(); 706 } 707 708 static void 709 vmbus_synic_setup(void *xsc) 710 { 711 struct vmbus_softc *sc = xsc; 712 int cpu = curcpu; 713 uint64_t val, orig; 714 uint32_t sint; 715 716 if (hyperv_features & CPUID_HV_MSR_VP_INDEX) { 717 /* Save virtual processor id. */ 718 VMBUS_PCPU_GET(sc, vcpuid, cpu) = rdmsr(MSR_HV_VP_INDEX); 719 } else { 720 /* Set virtual processor id to 0 for compatibility. */ 721 VMBUS_PCPU_GET(sc, vcpuid, cpu) = 0; 722 } 723 724 /* 725 * Setup the SynIC message. 726 */ 727 orig = rdmsr(MSR_HV_SIMP); 728 val = MSR_HV_SIMP_ENABLE | (orig & MSR_HV_SIMP_RSVD_MASK) | 729 ((VMBUS_PCPU_GET(sc, message_dma.hv_paddr, cpu) >> PAGE_SHIFT) << 730 MSR_HV_SIMP_PGSHIFT); 731 wrmsr(MSR_HV_SIMP, val); 732 733 /* 734 * Setup the SynIC event flags. 735 */ 736 orig = rdmsr(MSR_HV_SIEFP); 737 val = MSR_HV_SIEFP_ENABLE | (orig & MSR_HV_SIEFP_RSVD_MASK) | 738 ((VMBUS_PCPU_GET(sc, event_flags_dma.hv_paddr, cpu) 739 >> PAGE_SHIFT) << MSR_HV_SIEFP_PGSHIFT); 740 wrmsr(MSR_HV_SIEFP, val); 741 742 743 /* 744 * Configure and unmask SINT for message and event flags. 745 */ 746 sint = MSR_HV_SINT0 + VMBUS_SINT_MESSAGE; 747 orig = rdmsr(sint); 748 val = sc->vmbus_idtvec | MSR_HV_SINT_AUTOEOI | 749 (orig & MSR_HV_SINT_RSVD_MASK); 750 wrmsr(sint, val); 751 752 /* 753 * Configure and unmask SINT for timer. 754 */ 755 sint = MSR_HV_SINT0 + VMBUS_SINT_TIMER; 756 orig = rdmsr(sint); 757 val = sc->vmbus_idtvec | MSR_HV_SINT_AUTOEOI | 758 (orig & MSR_HV_SINT_RSVD_MASK); 759 wrmsr(sint, val); 760 761 /* 762 * All done; enable SynIC. 763 */ 764 orig = rdmsr(MSR_HV_SCONTROL); 765 val = MSR_HV_SCTRL_ENABLE | (orig & MSR_HV_SCTRL_RSVD_MASK); 766 wrmsr(MSR_HV_SCONTROL, val); 767 } 768 769 static void 770 vmbus_synic_teardown(void *arg) 771 { 772 uint64_t orig; 773 uint32_t sint; 774 775 /* 776 * Disable SynIC. 777 */ 778 orig = rdmsr(MSR_HV_SCONTROL); 779 wrmsr(MSR_HV_SCONTROL, (orig & MSR_HV_SCTRL_RSVD_MASK)); 780 781 /* 782 * Mask message and event flags SINT. 783 */ 784 sint = MSR_HV_SINT0 + VMBUS_SINT_MESSAGE; 785 orig = rdmsr(sint); 786 wrmsr(sint, orig | MSR_HV_SINT_MASKED); 787 788 /* 789 * Mask timer SINT. 790 */ 791 sint = MSR_HV_SINT0 + VMBUS_SINT_TIMER; 792 orig = rdmsr(sint); 793 wrmsr(sint, orig | MSR_HV_SINT_MASKED); 794 795 /* 796 * Teardown SynIC message. 797 */ 798 orig = rdmsr(MSR_HV_SIMP); 799 wrmsr(MSR_HV_SIMP, (orig & MSR_HV_SIMP_RSVD_MASK)); 800 801 /* 802 * Teardown SynIC event flags. 803 */ 804 orig = rdmsr(MSR_HV_SIEFP); 805 wrmsr(MSR_HV_SIEFP, (orig & MSR_HV_SIEFP_RSVD_MASK)); 806 } 807 808 static int 809 vmbus_dma_alloc(struct vmbus_softc *sc) 810 { 811 bus_dma_tag_t parent_dtag; 812 uint8_t *evtflags; 813 int cpu; 814 815 parent_dtag = bus_get_dma_tag(sc->vmbus_dev); 816 CPU_FOREACH(cpu) { 817 void *ptr; 818 819 /* 820 * Per-cpu messages and event flags. 821 */ 822 ptr = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0, 823 PAGE_SIZE, VMBUS_PCPU_PTR(sc, message_dma, cpu), 824 BUS_DMA_WAITOK | BUS_DMA_ZERO); 825 if (ptr == NULL) 826 return ENOMEM; 827 VMBUS_PCPU_GET(sc, message, cpu) = ptr; 828 829 ptr = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0, 830 PAGE_SIZE, VMBUS_PCPU_PTR(sc, event_flags_dma, cpu), 831 BUS_DMA_WAITOK | BUS_DMA_ZERO); 832 if (ptr == NULL) 833 return ENOMEM; 834 VMBUS_PCPU_GET(sc, event_flags, cpu) = ptr; 835 } 836 837 evtflags = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0, 838 PAGE_SIZE, &sc->vmbus_evtflags_dma, BUS_DMA_WAITOK | BUS_DMA_ZERO); 839 if (evtflags == NULL) 840 return ENOMEM; 841 sc->vmbus_rx_evtflags = (u_long *)evtflags; 842 sc->vmbus_tx_evtflags = (u_long *)(evtflags + (PAGE_SIZE / 2)); 843 sc->vmbus_evtflags = evtflags; 844 845 sc->vmbus_mnf1 = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0, 846 PAGE_SIZE, &sc->vmbus_mnf1_dma, BUS_DMA_WAITOK | BUS_DMA_ZERO); 847 if (sc->vmbus_mnf1 == NULL) 848 return ENOMEM; 849 850 sc->vmbus_mnf2 = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0, 851 sizeof(struct vmbus_mnf), &sc->vmbus_mnf2_dma, 852 BUS_DMA_WAITOK | BUS_DMA_ZERO); 853 if (sc->vmbus_mnf2 == NULL) 854 return ENOMEM; 855 856 return 0; 857 } 858 859 static void 860 vmbus_dma_free(struct vmbus_softc *sc) 861 { 862 int cpu; 863 864 if (sc->vmbus_evtflags != NULL) { 865 hyperv_dmamem_free(&sc->vmbus_evtflags_dma, sc->vmbus_evtflags); 866 sc->vmbus_evtflags = NULL; 867 sc->vmbus_rx_evtflags = NULL; 868 sc->vmbus_tx_evtflags = NULL; 869 } 870 if (sc->vmbus_mnf1 != NULL) { 871 hyperv_dmamem_free(&sc->vmbus_mnf1_dma, sc->vmbus_mnf1); 872 sc->vmbus_mnf1 = NULL; 873 } 874 if (sc->vmbus_mnf2 != NULL) { 875 hyperv_dmamem_free(&sc->vmbus_mnf2_dma, sc->vmbus_mnf2); 876 sc->vmbus_mnf2 = NULL; 877 } 878 879 CPU_FOREACH(cpu) { 880 if (VMBUS_PCPU_GET(sc, message, cpu) != NULL) { 881 hyperv_dmamem_free( 882 VMBUS_PCPU_PTR(sc, message_dma, cpu), 883 VMBUS_PCPU_GET(sc, message, cpu)); 884 VMBUS_PCPU_GET(sc, message, cpu) = NULL; 885 } 886 if (VMBUS_PCPU_GET(sc, event_flags, cpu) != NULL) { 887 hyperv_dmamem_free( 888 VMBUS_PCPU_PTR(sc, event_flags_dma, cpu), 889 VMBUS_PCPU_GET(sc, event_flags, cpu)); 890 VMBUS_PCPU_GET(sc, event_flags, cpu) = NULL; 891 } 892 } 893 } 894 895 static int 896 vmbus_intr_setup(struct vmbus_softc *sc) 897 { 898 int cpu; 899 900 CPU_FOREACH(cpu) { 901 char buf[MAXCOMLEN + 1]; 902 cpuset_t cpu_mask; 903 904 /* Allocate an interrupt counter for Hyper-V interrupt */ 905 snprintf(buf, sizeof(buf), "cpu%d:hyperv", cpu); 906 intrcnt_add(buf, VMBUS_PCPU_PTR(sc, intr_cnt, cpu)); 907 908 /* 909 * Setup taskqueue to handle events. Task will be per- 910 * channel. 911 */ 912 VMBUS_PCPU_GET(sc, event_tq, cpu) = taskqueue_create_fast( 913 "hyperv event", M_WAITOK, taskqueue_thread_enqueue, 914 VMBUS_PCPU_PTR(sc, event_tq, cpu)); 915 if (vmbus_pin_evttask) { 916 CPU_SETOF(cpu, &cpu_mask); 917 taskqueue_start_threads_cpuset( 918 VMBUS_PCPU_PTR(sc, event_tq, cpu), 1, PI_NET, 919 &cpu_mask, "hvevent%d", cpu); 920 } else { 921 taskqueue_start_threads( 922 VMBUS_PCPU_PTR(sc, event_tq, cpu), 1, PI_NET, 923 "hvevent%d", cpu); 924 } 925 926 /* 927 * Setup tasks and taskqueues to handle messages. 928 */ 929 VMBUS_PCPU_GET(sc, message_tq, cpu) = taskqueue_create_fast( 930 "hyperv msg", M_WAITOK, taskqueue_thread_enqueue, 931 VMBUS_PCPU_PTR(sc, message_tq, cpu)); 932 CPU_SETOF(cpu, &cpu_mask); 933 taskqueue_start_threads_cpuset( 934 VMBUS_PCPU_PTR(sc, message_tq, cpu), 1, PI_NET, &cpu_mask, 935 "hvmsg%d", cpu); 936 TASK_INIT(VMBUS_PCPU_PTR(sc, message_task, cpu), 0, 937 vmbus_msg_task, sc); 938 } 939 940 /* 941 * All Hyper-V ISR required resources are setup, now let's find a 942 * free IDT vector for Hyper-V ISR and set it up. 943 */ 944 sc->vmbus_idtvec = lapic_ipi_alloc(IDTVEC(vmbus_isr)); 945 if (sc->vmbus_idtvec < 0) { 946 device_printf(sc->vmbus_dev, "cannot find free IDT vector\n"); 947 return ENXIO; 948 } 949 if (bootverbose) { 950 device_printf(sc->vmbus_dev, "vmbus IDT vector %d\n", 951 sc->vmbus_idtvec); 952 } 953 return 0; 954 } 955 956 static void 957 vmbus_intr_teardown(struct vmbus_softc *sc) 958 { 959 int cpu; 960 961 if (sc->vmbus_idtvec >= 0) { 962 lapic_ipi_free(sc->vmbus_idtvec); 963 sc->vmbus_idtvec = -1; 964 } 965 966 CPU_FOREACH(cpu) { 967 if (VMBUS_PCPU_GET(sc, event_tq, cpu) != NULL) { 968 taskqueue_free(VMBUS_PCPU_GET(sc, event_tq, cpu)); 969 VMBUS_PCPU_GET(sc, event_tq, cpu) = NULL; 970 } 971 if (VMBUS_PCPU_GET(sc, message_tq, cpu) != NULL) { 972 taskqueue_drain(VMBUS_PCPU_GET(sc, message_tq, cpu), 973 VMBUS_PCPU_PTR(sc, message_task, cpu)); 974 taskqueue_free(VMBUS_PCPU_GET(sc, message_tq, cpu)); 975 VMBUS_PCPU_GET(sc, message_tq, cpu) = NULL; 976 } 977 } 978 } 979 980 static int 981 vmbus_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) 982 { 983 return (ENOENT); 984 } 985 986 static int 987 vmbus_child_pnpinfo_str(device_t dev, device_t child, char *buf, size_t buflen) 988 { 989 const struct vmbus_channel *chan; 990 char guidbuf[HYPERV_GUID_STRLEN]; 991 992 chan = vmbus_get_channel(child); 993 if (chan == NULL) { 994 /* Event timer device, which does not belong to a channel */ 995 return (0); 996 } 997 998 strlcat(buf, "classid=", buflen); 999 hyperv_guid2str(&chan->ch_guid_type, guidbuf, sizeof(guidbuf)); 1000 strlcat(buf, guidbuf, buflen); 1001 1002 strlcat(buf, " deviceid=", buflen); 1003 hyperv_guid2str(&chan->ch_guid_inst, guidbuf, sizeof(guidbuf)); 1004 strlcat(buf, guidbuf, buflen); 1005 1006 return (0); 1007 } 1008 1009 int 1010 vmbus_add_child(struct vmbus_channel *chan) 1011 { 1012 struct vmbus_softc *sc = chan->ch_vmbus; 1013 device_t parent = sc->vmbus_dev; 1014 1015 mtx_lock(&Giant); 1016 1017 chan->ch_dev = device_add_child(parent, NULL, -1); 1018 if (chan->ch_dev == NULL) { 1019 mtx_unlock(&Giant); 1020 device_printf(parent, "device_add_child for chan%u failed\n", 1021 chan->ch_id); 1022 return (ENXIO); 1023 } 1024 device_set_ivars(chan->ch_dev, chan); 1025 device_probe_and_attach(chan->ch_dev); 1026 1027 mtx_unlock(&Giant); 1028 return (0); 1029 } 1030 1031 int 1032 vmbus_delete_child(struct vmbus_channel *chan) 1033 { 1034 int error = 0; 1035 1036 mtx_lock(&Giant); 1037 if (chan->ch_dev != NULL) { 1038 error = device_delete_child(chan->ch_vmbus->vmbus_dev, 1039 chan->ch_dev); 1040 chan->ch_dev = NULL; 1041 } 1042 mtx_unlock(&Giant); 1043 return (error); 1044 } 1045 1046 static int 1047 vmbus_sysctl_version(SYSCTL_HANDLER_ARGS) 1048 { 1049 struct vmbus_softc *sc = arg1; 1050 char verstr[16]; 1051 1052 snprintf(verstr, sizeof(verstr), "%u.%u", 1053 VMBUS_VERSION_MAJOR(sc->vmbus_version), 1054 VMBUS_VERSION_MINOR(sc->vmbus_version)); 1055 return sysctl_handle_string(oidp, verstr, sizeof(verstr), req); 1056 } 1057 1058 /* 1059 * We need the function to make sure the MMIO resource is allocated from the 1060 * ranges found in _CRS. 1061 * 1062 * For the release function, we can use bus_generic_release_resource(). 1063 */ 1064 static struct resource * 1065 vmbus_alloc_resource(device_t dev, device_t child, int type, int *rid, 1066 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 1067 { 1068 device_t parent = device_get_parent(dev); 1069 struct resource *res; 1070 1071 #ifdef NEW_PCIB 1072 if (type == SYS_RES_MEMORY) { 1073 struct vmbus_softc *sc = device_get_softc(dev); 1074 1075 res = pcib_host_res_alloc(&sc->vmbus_mmio_res, child, type, 1076 rid, start, end, count, flags); 1077 } else 1078 #endif 1079 { 1080 res = BUS_ALLOC_RESOURCE(parent, child, type, rid, start, 1081 end, count, flags); 1082 } 1083 1084 return (res); 1085 } 1086 1087 static int 1088 vmbus_alloc_msi(device_t bus, device_t dev, int count, int maxcount, int *irqs) 1089 { 1090 1091 return (PCIB_ALLOC_MSI(device_get_parent(bus), dev, count, maxcount, 1092 irqs)); 1093 } 1094 1095 static int 1096 vmbus_release_msi(device_t bus, device_t dev, int count, int *irqs) 1097 { 1098 1099 return (PCIB_RELEASE_MSI(device_get_parent(bus), dev, count, irqs)); 1100 } 1101 1102 static int 1103 vmbus_alloc_msix(device_t bus, device_t dev, int *irq) 1104 { 1105 1106 return (PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq)); 1107 } 1108 1109 static int 1110 vmbus_release_msix(device_t bus, device_t dev, int irq) 1111 { 1112 1113 return (PCIB_RELEASE_MSIX(device_get_parent(bus), dev, irq)); 1114 } 1115 1116 static int 1117 vmbus_map_msi(device_t bus, device_t dev, int irq, uint64_t *addr, 1118 uint32_t *data) 1119 { 1120 1121 return (PCIB_MAP_MSI(device_get_parent(bus), dev, irq, addr, data)); 1122 } 1123 1124 static uint32_t 1125 vmbus_get_version_method(device_t bus, device_t dev) 1126 { 1127 struct vmbus_softc *sc = device_get_softc(bus); 1128 1129 return sc->vmbus_version; 1130 } 1131 1132 static int 1133 vmbus_probe_guid_method(device_t bus, device_t dev, 1134 const struct hyperv_guid *guid) 1135 { 1136 const struct vmbus_channel *chan = vmbus_get_channel(dev); 1137 1138 if (memcmp(&chan->ch_guid_type, guid, sizeof(struct hyperv_guid)) == 0) 1139 return 0; 1140 return ENXIO; 1141 } 1142 1143 static uint32_t 1144 vmbus_get_vcpu_id_method(device_t bus, device_t dev, int cpu) 1145 { 1146 const struct vmbus_softc *sc = device_get_softc(bus); 1147 1148 return (VMBUS_PCPU_GET(sc, vcpuid, cpu)); 1149 } 1150 1151 static struct taskqueue * 1152 vmbus_get_eventtq_method(device_t bus, device_t dev __unused, int cpu) 1153 { 1154 const struct vmbus_softc *sc = device_get_softc(bus); 1155 1156 KASSERT(cpu >= 0 && cpu < mp_ncpus, ("invalid cpu%d", cpu)); 1157 return (VMBUS_PCPU_GET(sc, event_tq, cpu)); 1158 } 1159 1160 #ifdef NEW_PCIB 1161 #define VTPM_BASE_ADDR 0xfed40000 1162 #define FOUR_GB (1ULL << 32) 1163 1164 enum parse_pass { parse_64, parse_32 }; 1165 1166 struct parse_context { 1167 device_t vmbus_dev; 1168 enum parse_pass pass; 1169 }; 1170 1171 static ACPI_STATUS 1172 parse_crs(ACPI_RESOURCE *res, void *ctx) 1173 { 1174 const struct parse_context *pc = ctx; 1175 device_t vmbus_dev = pc->vmbus_dev; 1176 1177 struct vmbus_softc *sc = device_get_softc(vmbus_dev); 1178 UINT64 start, end; 1179 1180 switch (res->Type) { 1181 case ACPI_RESOURCE_TYPE_ADDRESS32: 1182 start = res->Data.Address32.Address.Minimum; 1183 end = res->Data.Address32.Address.Maximum; 1184 break; 1185 1186 case ACPI_RESOURCE_TYPE_ADDRESS64: 1187 start = res->Data.Address64.Address.Minimum; 1188 end = res->Data.Address64.Address.Maximum; 1189 break; 1190 1191 default: 1192 /* Unused types. */ 1193 return (AE_OK); 1194 } 1195 1196 /* 1197 * We don't use <1MB addresses. 1198 */ 1199 if (end < 0x100000) 1200 return (AE_OK); 1201 1202 /* Don't conflict with vTPM. */ 1203 if (end >= VTPM_BASE_ADDR && start < VTPM_BASE_ADDR) 1204 end = VTPM_BASE_ADDR - 1; 1205 1206 if ((pc->pass == parse_32 && start < FOUR_GB) || 1207 (pc->pass == parse_64 && start >= FOUR_GB)) 1208 pcib_host_res_decodes(&sc->vmbus_mmio_res, SYS_RES_MEMORY, 1209 start, end, 0); 1210 1211 return (AE_OK); 1212 } 1213 1214 static void 1215 vmbus_get_crs(device_t dev, device_t vmbus_dev, enum parse_pass pass) 1216 { 1217 struct parse_context pc; 1218 ACPI_STATUS status; 1219 1220 if (bootverbose) 1221 device_printf(dev, "walking _CRS, pass=%d\n", pass); 1222 1223 pc.vmbus_dev = vmbus_dev; 1224 pc.pass = pass; 1225 status = AcpiWalkResources(acpi_get_handle(dev), "_CRS", 1226 parse_crs, &pc); 1227 1228 if (bootverbose && ACPI_FAILURE(status)) 1229 device_printf(dev, "_CRS: not found, pass=%d\n", pass); 1230 } 1231 1232 static void 1233 vmbus_get_mmio_res_pass(device_t dev, enum parse_pass pass) 1234 { 1235 device_t acpi0, parent; 1236 1237 parent = device_get_parent(dev); 1238 1239 acpi0 = device_get_parent(parent); 1240 if (strcmp("acpi0", device_get_nameunit(acpi0)) == 0) { 1241 device_t *children; 1242 int count; 1243 1244 /* 1245 * Try to locate VMBUS resources and find _CRS on them. 1246 */ 1247 if (device_get_children(acpi0, &children, &count) == 0) { 1248 int i; 1249 1250 for (i = 0; i < count; ++i) { 1251 if (!device_is_attached(children[i])) 1252 continue; 1253 1254 if (strcmp("vmbus_res", 1255 device_get_name(children[i])) == 0) 1256 vmbus_get_crs(children[i], dev, pass); 1257 } 1258 free(children, M_TEMP); 1259 } 1260 1261 /* 1262 * Try to find _CRS on acpi. 1263 */ 1264 vmbus_get_crs(acpi0, dev, pass); 1265 } else { 1266 device_printf(dev, "not grandchild of acpi\n"); 1267 } 1268 1269 /* 1270 * Try to find _CRS on parent. 1271 */ 1272 vmbus_get_crs(parent, dev, pass); 1273 } 1274 1275 static void 1276 vmbus_get_mmio_res(device_t dev) 1277 { 1278 struct vmbus_softc *sc = device_get_softc(dev); 1279 /* 1280 * We walk the resources twice to make sure that: in the resource 1281 * list, the 32-bit resources appear behind the 64-bit resources. 1282 * NB: resource_list_add() uses INSERT_TAIL. This way, when we 1283 * iterate through the list to find a range for a 64-bit BAR in 1284 * vmbus_alloc_resource(), we can make sure we try to use >4GB 1285 * ranges first. 1286 */ 1287 pcib_host_res_init(dev, &sc->vmbus_mmio_res); 1288 1289 vmbus_get_mmio_res_pass(dev, parse_64); 1290 vmbus_get_mmio_res_pass(dev, parse_32); 1291 } 1292 1293 static void 1294 vmbus_free_mmio_res(device_t dev) 1295 { 1296 struct vmbus_softc *sc = device_get_softc(dev); 1297 1298 pcib_host_res_free(dev, &sc->vmbus_mmio_res); 1299 } 1300 #endif /* NEW_PCIB */ 1301 1302 static void 1303 vmbus_identify(driver_t *driver, device_t parent) 1304 { 1305 1306 if (device_get_unit(parent) != 0 || vm_guest != VM_GUEST_HV || 1307 (hyperv_features & CPUID_HV_MSR_SYNIC) == 0) 1308 return; 1309 device_add_child(parent, "vmbus", -1); 1310 } 1311 1312 static int 1313 vmbus_probe(device_t dev) 1314 { 1315 1316 if (device_get_unit(dev) != 0 || vm_guest != VM_GUEST_HV || 1317 (hyperv_features & CPUID_HV_MSR_SYNIC) == 0) 1318 return (ENXIO); 1319 1320 device_set_desc(dev, "Hyper-V Vmbus"); 1321 return (BUS_PROBE_DEFAULT); 1322 } 1323 1324 /** 1325 * @brief Main vmbus driver initialization routine. 1326 * 1327 * Here, we 1328 * - initialize the vmbus driver context 1329 * - setup various driver entry points 1330 * - invoke the vmbus hv main init routine 1331 * - get the irq resource 1332 * - invoke the vmbus to add the vmbus root device 1333 * - setup the vmbus root device 1334 * - retrieve the channel offers 1335 */ 1336 static int 1337 vmbus_doattach(struct vmbus_softc *sc) 1338 { 1339 struct sysctl_oid_list *child; 1340 struct sysctl_ctx_list *ctx; 1341 int ret; 1342 1343 if (sc->vmbus_flags & VMBUS_FLAG_ATTACHED) 1344 return (0); 1345 1346 #ifdef NEW_PCIB 1347 vmbus_get_mmio_res(sc->vmbus_dev); 1348 #endif 1349 1350 sc->vmbus_flags |= VMBUS_FLAG_ATTACHED; 1351 1352 sc->vmbus_gpadl = VMBUS_GPADL_START; 1353 mtx_init(&sc->vmbus_prichan_lock, "vmbus prichan", NULL, MTX_DEF); 1354 TAILQ_INIT(&sc->vmbus_prichans); 1355 mtx_init(&sc->vmbus_chan_lock, "vmbus channel", NULL, MTX_DEF); 1356 TAILQ_INIT(&sc->vmbus_chans); 1357 sc->vmbus_chmap = malloc( 1358 sizeof(struct vmbus_channel *) * VMBUS_CHAN_MAX, M_DEVBUF, 1359 M_WAITOK | M_ZERO); 1360 1361 /* 1362 * Create context for "post message" Hypercalls 1363 */ 1364 sc->vmbus_xc = vmbus_xact_ctx_create(bus_get_dma_tag(sc->vmbus_dev), 1365 HYPERCALL_POSTMSGIN_SIZE, VMBUS_MSG_SIZE, 1366 sizeof(struct vmbus_msghc)); 1367 if (sc->vmbus_xc == NULL) { 1368 ret = ENXIO; 1369 goto cleanup; 1370 } 1371 1372 /* 1373 * Allocate DMA stuffs. 1374 */ 1375 ret = vmbus_dma_alloc(sc); 1376 if (ret != 0) 1377 goto cleanup; 1378 1379 /* 1380 * Setup interrupt. 1381 */ 1382 ret = vmbus_intr_setup(sc); 1383 if (ret != 0) 1384 goto cleanup; 1385 1386 /* 1387 * Setup SynIC. 1388 */ 1389 if (bootverbose) 1390 device_printf(sc->vmbus_dev, "smp_started = %d\n", smp_started); 1391 smp_rendezvous(NULL, vmbus_synic_setup, NULL, sc); 1392 sc->vmbus_flags |= VMBUS_FLAG_SYNIC; 1393 1394 /* 1395 * Initialize vmbus, e.g. connect to Hypervisor. 1396 */ 1397 ret = vmbus_init(sc); 1398 if (ret != 0) 1399 goto cleanup; 1400 1401 if (sc->vmbus_version == VMBUS_VERSION_WS2008 || 1402 sc->vmbus_version == VMBUS_VERSION_WIN7) 1403 sc->vmbus_event_proc = vmbus_event_proc_compat; 1404 else 1405 sc->vmbus_event_proc = vmbus_event_proc; 1406 1407 ret = vmbus_scan(sc); 1408 if (ret != 0) 1409 goto cleanup; 1410 1411 ctx = device_get_sysctl_ctx(sc->vmbus_dev); 1412 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vmbus_dev)); 1413 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "version", 1414 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, 1415 vmbus_sysctl_version, "A", "vmbus version"); 1416 1417 return (ret); 1418 1419 cleanup: 1420 vmbus_scan_teardown(sc); 1421 vmbus_intr_teardown(sc); 1422 vmbus_dma_free(sc); 1423 if (sc->vmbus_xc != NULL) { 1424 vmbus_xact_ctx_destroy(sc->vmbus_xc); 1425 sc->vmbus_xc = NULL; 1426 } 1427 free(__DEVOLATILE(void *, sc->vmbus_chmap), M_DEVBUF); 1428 mtx_destroy(&sc->vmbus_prichan_lock); 1429 mtx_destroy(&sc->vmbus_chan_lock); 1430 1431 return (ret); 1432 } 1433 1434 static void 1435 vmbus_event_proc_dummy(struct vmbus_softc *sc __unused, int cpu __unused) 1436 { 1437 } 1438 1439 #ifdef EARLY_AP_STARTUP 1440 1441 static void 1442 vmbus_intrhook(void *xsc) 1443 { 1444 struct vmbus_softc *sc = xsc; 1445 1446 if (bootverbose) 1447 device_printf(sc->vmbus_dev, "intrhook\n"); 1448 vmbus_doattach(sc); 1449 config_intrhook_disestablish(&sc->vmbus_intrhook); 1450 } 1451 1452 #endif /* EARLY_AP_STARTUP */ 1453 1454 static int 1455 vmbus_attach(device_t dev) 1456 { 1457 vmbus_sc = device_get_softc(dev); 1458 vmbus_sc->vmbus_dev = dev; 1459 vmbus_sc->vmbus_idtvec = -1; 1460 1461 /* 1462 * Event processing logic will be configured: 1463 * - After the vmbus protocol version negotiation. 1464 * - Before we request channel offers. 1465 */ 1466 vmbus_sc->vmbus_event_proc = vmbus_event_proc_dummy; 1467 1468 #ifdef EARLY_AP_STARTUP 1469 /* 1470 * Defer the real attach until the pause(9) works as expected. 1471 */ 1472 vmbus_sc->vmbus_intrhook.ich_func = vmbus_intrhook; 1473 vmbus_sc->vmbus_intrhook.ich_arg = vmbus_sc; 1474 config_intrhook_establish(&vmbus_sc->vmbus_intrhook); 1475 #else /* !EARLY_AP_STARTUP */ 1476 /* 1477 * If the system has already booted and thread 1478 * scheduling is possible indicated by the global 1479 * cold set to zero, we just call the driver 1480 * initialization directly. 1481 */ 1482 if (!cold) 1483 vmbus_doattach(vmbus_sc); 1484 #endif /* EARLY_AP_STARTUP */ 1485 1486 return (0); 1487 } 1488 1489 static int 1490 vmbus_detach(device_t dev) 1491 { 1492 struct vmbus_softc *sc = device_get_softc(dev); 1493 1494 bus_generic_detach(dev); 1495 vmbus_chan_destroy_all(sc); 1496 1497 vmbus_scan_teardown(sc); 1498 1499 vmbus_disconnect(sc); 1500 1501 if (sc->vmbus_flags & VMBUS_FLAG_SYNIC) { 1502 sc->vmbus_flags &= ~VMBUS_FLAG_SYNIC; 1503 smp_rendezvous(NULL, vmbus_synic_teardown, NULL, NULL); 1504 } 1505 1506 vmbus_intr_teardown(sc); 1507 vmbus_dma_free(sc); 1508 1509 if (sc->vmbus_xc != NULL) { 1510 vmbus_xact_ctx_destroy(sc->vmbus_xc); 1511 sc->vmbus_xc = NULL; 1512 } 1513 1514 free(__DEVOLATILE(void *, sc->vmbus_chmap), M_DEVBUF); 1515 mtx_destroy(&sc->vmbus_prichan_lock); 1516 mtx_destroy(&sc->vmbus_chan_lock); 1517 1518 #ifdef NEW_PCIB 1519 vmbus_free_mmio_res(dev); 1520 #endif 1521 1522 return (0); 1523 } 1524 1525 #ifndef EARLY_AP_STARTUP 1526 1527 static void 1528 vmbus_sysinit(void *arg __unused) 1529 { 1530 struct vmbus_softc *sc = vmbus_get_softc(); 1531 1532 if (vm_guest != VM_GUEST_HV || sc == NULL) 1533 return; 1534 1535 /* 1536 * If the system has already booted and thread 1537 * scheduling is possible, as indicated by the 1538 * global cold set to zero, we just call the driver 1539 * initialization directly. 1540 */ 1541 if (!cold) 1542 vmbus_doattach(sc); 1543 } 1544 /* 1545 * NOTE: 1546 * We have to start as the last step of SI_SUB_SMP, i.e. after SMP is 1547 * initialized. 1548 */ 1549 SYSINIT(vmbus_initialize, SI_SUB_SMP, SI_ORDER_ANY, vmbus_sysinit, NULL); 1550 1551 #endif /* !EARLY_AP_STARTUP */ 1552