1 /*- 2 * Copyright (c) 2009-2012,2016 Microsoft Corp. 3 * Copyright (c) 2012 NetApp Inc. 4 * Copyright (c) 2012 Citrix Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * VM Bus Driver Implementation 31 */ 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/bus.h> 37 #include <sys/kernel.h> 38 #include <sys/lock.h> 39 #include <sys/malloc.h> 40 #include <sys/module.h> 41 #include <sys/mutex.h> 42 #include <sys/smp.h> 43 #include <sys/sysctl.h> 44 #include <sys/systm.h> 45 #include <sys/taskqueue.h> 46 47 #include <machine/bus.h> 48 #include <machine/intr_machdep.h> 49 #include <machine/resource.h> 50 #include <x86/include/apicvar.h> 51 52 #include <contrib/dev/acpica/include/acpi.h> 53 #include <dev/acpica/acpivar.h> 54 55 #include <dev/hyperv/include/hyperv.h> 56 #include <dev/hyperv/include/vmbus_xact.h> 57 #include <dev/hyperv/vmbus/hyperv_reg.h> 58 #include <dev/hyperv/vmbus/hyperv_var.h> 59 #include <dev/hyperv/vmbus/vmbus_reg.h> 60 #include <dev/hyperv/vmbus/vmbus_var.h> 61 #include <dev/hyperv/vmbus/vmbus_chanvar.h> 62 63 #include "acpi_if.h" 64 #include "pcib_if.h" 65 #include "vmbus_if.h" 66 67 #define VMBUS_GPADL_START 0xe1e10 68 69 struct vmbus_msghc { 70 struct vmbus_xact *mh_xact; 71 struct hypercall_postmsg_in mh_inprm_save; 72 }; 73 74 static int vmbus_probe(device_t); 75 static int vmbus_attach(device_t); 76 static int vmbus_detach(device_t); 77 static int vmbus_read_ivar(device_t, device_t, int, 78 uintptr_t *); 79 static int vmbus_child_pnpinfo_str(device_t, device_t, 80 char *, size_t); 81 static struct resource *vmbus_alloc_resource(device_t dev, 82 device_t child, int type, int *rid, 83 rman_res_t start, rman_res_t end, 84 rman_res_t count, u_int flags); 85 static int vmbus_alloc_msi(device_t bus, device_t dev, 86 int count, int maxcount, int *irqs); 87 static int vmbus_release_msi(device_t bus, device_t dev, 88 int count, int *irqs); 89 static int vmbus_alloc_msix(device_t bus, device_t dev, 90 int *irq); 91 static int vmbus_release_msix(device_t bus, device_t dev, 92 int irq); 93 static int vmbus_map_msi(device_t bus, device_t dev, 94 int irq, uint64_t *addr, uint32_t *data); 95 static uint32_t vmbus_get_version_method(device_t, device_t); 96 static int vmbus_probe_guid_method(device_t, device_t, 97 const struct hyperv_guid *); 98 static uint32_t vmbus_get_vcpu_id_method(device_t bus, 99 device_t dev, int cpu); 100 101 static int vmbus_init(struct vmbus_softc *); 102 static int vmbus_connect(struct vmbus_softc *, uint32_t); 103 static int vmbus_req_channels(struct vmbus_softc *sc); 104 static void vmbus_disconnect(struct vmbus_softc *); 105 static int vmbus_scan(struct vmbus_softc *); 106 static void vmbus_scan_teardown(struct vmbus_softc *); 107 static void vmbus_scan_done(struct vmbus_softc *, 108 const struct vmbus_message *); 109 static void vmbus_chanmsg_handle(struct vmbus_softc *, 110 const struct vmbus_message *); 111 static void vmbus_msg_task(void *, int); 112 static void vmbus_synic_setup(void *); 113 static void vmbus_synic_teardown(void *); 114 static int vmbus_sysctl_version(SYSCTL_HANDLER_ARGS); 115 static int vmbus_dma_alloc(struct vmbus_softc *); 116 static void vmbus_dma_free(struct vmbus_softc *); 117 static int vmbus_intr_setup(struct vmbus_softc *); 118 static void vmbus_intr_teardown(struct vmbus_softc *); 119 static int vmbus_doattach(struct vmbus_softc *); 120 static void vmbus_event_proc_dummy(struct vmbus_softc *, 121 int); 122 123 static struct vmbus_softc *vmbus_sc; 124 125 extern inthand_t IDTVEC(vmbus_isr); 126 127 static const uint32_t vmbus_version[] = { 128 VMBUS_VERSION_WIN8_1, 129 VMBUS_VERSION_WIN8, 130 VMBUS_VERSION_WIN7, 131 VMBUS_VERSION_WS2008 132 }; 133 134 static const vmbus_chanmsg_proc_t 135 vmbus_chanmsg_handlers[VMBUS_CHANMSG_TYPE_MAX] = { 136 VMBUS_CHANMSG_PROC(CHOFFER_DONE, vmbus_scan_done), 137 VMBUS_CHANMSG_PROC_WAKEUP(CONNECT_RESP) 138 }; 139 140 static device_method_t vmbus_methods[] = { 141 /* Device interface */ 142 DEVMETHOD(device_probe, vmbus_probe), 143 DEVMETHOD(device_attach, vmbus_attach), 144 DEVMETHOD(device_detach, vmbus_detach), 145 DEVMETHOD(device_shutdown, bus_generic_shutdown), 146 DEVMETHOD(device_suspend, bus_generic_suspend), 147 DEVMETHOD(device_resume, bus_generic_resume), 148 149 /* Bus interface */ 150 DEVMETHOD(bus_add_child, bus_generic_add_child), 151 DEVMETHOD(bus_print_child, bus_generic_print_child), 152 DEVMETHOD(bus_read_ivar, vmbus_read_ivar), 153 DEVMETHOD(bus_child_pnpinfo_str, vmbus_child_pnpinfo_str), 154 DEVMETHOD(bus_alloc_resource, vmbus_alloc_resource), 155 DEVMETHOD(bus_release_resource, bus_generic_release_resource), 156 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), 157 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), 158 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), 159 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), 160 #if __FreeBSD_version >= 1100000 161 DEVMETHOD(bus_get_cpus, bus_generic_get_cpus), 162 #endif 163 164 /* pcib interface */ 165 DEVMETHOD(pcib_alloc_msi, vmbus_alloc_msi), 166 DEVMETHOD(pcib_release_msi, vmbus_release_msi), 167 DEVMETHOD(pcib_alloc_msix, vmbus_alloc_msix), 168 DEVMETHOD(pcib_release_msix, vmbus_release_msix), 169 DEVMETHOD(pcib_map_msi, vmbus_map_msi), 170 171 /* Vmbus interface */ 172 DEVMETHOD(vmbus_get_version, vmbus_get_version_method), 173 DEVMETHOD(vmbus_probe_guid, vmbus_probe_guid_method), 174 DEVMETHOD(vmbus_get_vcpu_id, vmbus_get_vcpu_id_method), 175 176 DEVMETHOD_END 177 }; 178 179 static driver_t vmbus_driver = { 180 "vmbus", 181 vmbus_methods, 182 sizeof(struct vmbus_softc) 183 }; 184 185 static devclass_t vmbus_devclass; 186 187 DRIVER_MODULE(vmbus, acpi, vmbus_driver, vmbus_devclass, NULL, NULL); 188 MODULE_DEPEND(vmbus, acpi, 1, 1, 1); 189 MODULE_DEPEND(vmbus, pci, 1, 1, 1); 190 MODULE_VERSION(vmbus, 1); 191 192 static __inline struct vmbus_softc * 193 vmbus_get_softc(void) 194 { 195 return vmbus_sc; 196 } 197 198 void 199 vmbus_msghc_reset(struct vmbus_msghc *mh, size_t dsize) 200 { 201 struct hypercall_postmsg_in *inprm; 202 203 if (dsize > HYPERCALL_POSTMSGIN_DSIZE_MAX) 204 panic("invalid data size %zu", dsize); 205 206 inprm = vmbus_xact_req_data(mh->mh_xact); 207 memset(inprm, 0, HYPERCALL_POSTMSGIN_SIZE); 208 inprm->hc_connid = VMBUS_CONNID_MESSAGE; 209 inprm->hc_msgtype = HYPERV_MSGTYPE_CHANNEL; 210 inprm->hc_dsize = dsize; 211 } 212 213 struct vmbus_msghc * 214 vmbus_msghc_get(struct vmbus_softc *sc, size_t dsize) 215 { 216 struct vmbus_msghc *mh; 217 struct vmbus_xact *xact; 218 219 if (dsize > HYPERCALL_POSTMSGIN_DSIZE_MAX) 220 panic("invalid data size %zu", dsize); 221 222 xact = vmbus_xact_get(sc->vmbus_xc, 223 dsize + __offsetof(struct hypercall_postmsg_in, hc_data[0])); 224 if (xact == NULL) 225 return (NULL); 226 227 mh = vmbus_xact_priv(xact, sizeof(*mh)); 228 mh->mh_xact = xact; 229 230 vmbus_msghc_reset(mh, dsize); 231 return (mh); 232 } 233 234 void 235 vmbus_msghc_put(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh) 236 { 237 238 vmbus_xact_put(mh->mh_xact); 239 } 240 241 void * 242 vmbus_msghc_dataptr(struct vmbus_msghc *mh) 243 { 244 struct hypercall_postmsg_in *inprm; 245 246 inprm = vmbus_xact_req_data(mh->mh_xact); 247 return (inprm->hc_data); 248 } 249 250 int 251 vmbus_msghc_exec_noresult(struct vmbus_msghc *mh) 252 { 253 sbintime_t time = SBT_1MS; 254 struct hypercall_postmsg_in *inprm; 255 bus_addr_t inprm_paddr; 256 int i; 257 258 inprm = vmbus_xact_req_data(mh->mh_xact); 259 inprm_paddr = vmbus_xact_req_paddr(mh->mh_xact); 260 261 /* 262 * Save the input parameter so that we could restore the input 263 * parameter if the Hypercall failed. 264 * 265 * XXX 266 * Is this really necessary?! i.e. Will the Hypercall ever 267 * overwrite the input parameter? 268 */ 269 memcpy(&mh->mh_inprm_save, inprm, HYPERCALL_POSTMSGIN_SIZE); 270 271 /* 272 * In order to cope with transient failures, e.g. insufficient 273 * resources on host side, we retry the post message Hypercall 274 * several times. 20 retries seem sufficient. 275 */ 276 #define HC_RETRY_MAX 20 277 278 for (i = 0; i < HC_RETRY_MAX; ++i) { 279 uint64_t status; 280 281 status = hypercall_post_message(inprm_paddr); 282 if (status == HYPERCALL_STATUS_SUCCESS) 283 return 0; 284 285 pause_sbt("hcpmsg", time, 0, C_HARDCLOCK); 286 if (time < SBT_1S * 2) 287 time *= 2; 288 289 /* Restore input parameter and try again */ 290 memcpy(inprm, &mh->mh_inprm_save, HYPERCALL_POSTMSGIN_SIZE); 291 } 292 293 #undef HC_RETRY_MAX 294 295 return EIO; 296 } 297 298 int 299 vmbus_msghc_exec(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh) 300 { 301 int error; 302 303 vmbus_xact_activate(mh->mh_xact); 304 error = vmbus_msghc_exec_noresult(mh); 305 if (error) 306 vmbus_xact_deactivate(mh->mh_xact); 307 return error; 308 } 309 310 const struct vmbus_message * 311 vmbus_msghc_wait_result(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh) 312 { 313 size_t resp_len; 314 315 return (vmbus_xact_wait(mh->mh_xact, &resp_len)); 316 } 317 318 void 319 vmbus_msghc_wakeup(struct vmbus_softc *sc, const struct vmbus_message *msg) 320 { 321 322 vmbus_xact_ctx_wakeup(sc->vmbus_xc, msg, sizeof(*msg)); 323 } 324 325 uint32_t 326 vmbus_gpadl_alloc(struct vmbus_softc *sc) 327 { 328 return atomic_fetchadd_int(&sc->vmbus_gpadl, 1); 329 } 330 331 static int 332 vmbus_connect(struct vmbus_softc *sc, uint32_t version) 333 { 334 struct vmbus_chanmsg_connect *req; 335 const struct vmbus_message *msg; 336 struct vmbus_msghc *mh; 337 int error, done = 0; 338 339 mh = vmbus_msghc_get(sc, sizeof(*req)); 340 if (mh == NULL) 341 return ENXIO; 342 343 req = vmbus_msghc_dataptr(mh); 344 req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CONNECT; 345 req->chm_ver = version; 346 req->chm_evtflags = sc->vmbus_evtflags_dma.hv_paddr; 347 req->chm_mnf1 = sc->vmbus_mnf1_dma.hv_paddr; 348 req->chm_mnf2 = sc->vmbus_mnf2_dma.hv_paddr; 349 350 error = vmbus_msghc_exec(sc, mh); 351 if (error) { 352 vmbus_msghc_put(sc, mh); 353 return error; 354 } 355 356 msg = vmbus_msghc_wait_result(sc, mh); 357 done = ((const struct vmbus_chanmsg_connect_resp *) 358 msg->msg_data)->chm_done; 359 360 vmbus_msghc_put(sc, mh); 361 362 return (done ? 0 : EOPNOTSUPP); 363 } 364 365 static int 366 vmbus_init(struct vmbus_softc *sc) 367 { 368 int i; 369 370 for (i = 0; i < nitems(vmbus_version); ++i) { 371 int error; 372 373 error = vmbus_connect(sc, vmbus_version[i]); 374 if (!error) { 375 sc->vmbus_version = vmbus_version[i]; 376 device_printf(sc->vmbus_dev, "version %u.%u\n", 377 VMBUS_VERSION_MAJOR(sc->vmbus_version), 378 VMBUS_VERSION_MINOR(sc->vmbus_version)); 379 return 0; 380 } 381 } 382 return ENXIO; 383 } 384 385 static void 386 vmbus_disconnect(struct vmbus_softc *sc) 387 { 388 struct vmbus_chanmsg_disconnect *req; 389 struct vmbus_msghc *mh; 390 int error; 391 392 mh = vmbus_msghc_get(sc, sizeof(*req)); 393 if (mh == NULL) { 394 device_printf(sc->vmbus_dev, 395 "can not get msg hypercall for disconnect\n"); 396 return; 397 } 398 399 req = vmbus_msghc_dataptr(mh); 400 req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_DISCONNECT; 401 402 error = vmbus_msghc_exec_noresult(mh); 403 vmbus_msghc_put(sc, mh); 404 405 if (error) { 406 device_printf(sc->vmbus_dev, 407 "disconnect msg hypercall failed\n"); 408 } 409 } 410 411 static int 412 vmbus_req_channels(struct vmbus_softc *sc) 413 { 414 struct vmbus_chanmsg_chrequest *req; 415 struct vmbus_msghc *mh; 416 int error; 417 418 mh = vmbus_msghc_get(sc, sizeof(*req)); 419 if (mh == NULL) 420 return ENXIO; 421 422 req = vmbus_msghc_dataptr(mh); 423 req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CHREQUEST; 424 425 error = vmbus_msghc_exec_noresult(mh); 426 vmbus_msghc_put(sc, mh); 427 428 return error; 429 } 430 431 static void 432 vmbus_scan_done_task(void *xsc, int pending __unused) 433 { 434 struct vmbus_softc *sc = xsc; 435 436 mtx_lock(&Giant); 437 sc->vmbus_scandone = true; 438 mtx_unlock(&Giant); 439 wakeup(&sc->vmbus_scandone); 440 } 441 442 static void 443 vmbus_scan_done(struct vmbus_softc *sc, 444 const struct vmbus_message *msg __unused) 445 { 446 447 taskqueue_enqueue(sc->vmbus_devtq, &sc->vmbus_scandone_task); 448 } 449 450 static int 451 vmbus_scan(struct vmbus_softc *sc) 452 { 453 int error; 454 455 /* 456 * Identify, probe and attach for non-channel devices. 457 */ 458 bus_generic_probe(sc->vmbus_dev); 459 bus_generic_attach(sc->vmbus_dev); 460 461 /* 462 * This taskqueue serializes vmbus devices' attach and detach 463 * for channel offer and rescind messages. 464 */ 465 sc->vmbus_devtq = taskqueue_create("vmbus dev", M_WAITOK, 466 taskqueue_thread_enqueue, &sc->vmbus_devtq); 467 taskqueue_start_threads(&sc->vmbus_devtq, 1, PI_NET, "vmbusdev"); 468 TASK_INIT(&sc->vmbus_scandone_task, 0, vmbus_scan_done_task, sc); 469 470 /* 471 * This taskqueue handles sub-channel detach, so that vmbus 472 * device's detach running in vmbus_devtq can drain its sub- 473 * channels. 474 */ 475 sc->vmbus_subchtq = taskqueue_create("vmbus subch", M_WAITOK, 476 taskqueue_thread_enqueue, &sc->vmbus_subchtq); 477 taskqueue_start_threads(&sc->vmbus_subchtq, 1, PI_NET, "vmbussch"); 478 479 /* 480 * Start vmbus scanning. 481 */ 482 error = vmbus_req_channels(sc); 483 if (error) { 484 device_printf(sc->vmbus_dev, "channel request failed: %d\n", 485 error); 486 return (error); 487 } 488 489 /* 490 * Wait for all vmbus devices from the initial channel offers to be 491 * attached. 492 */ 493 GIANT_REQUIRED; 494 while (!sc->vmbus_scandone) 495 mtx_sleep(&sc->vmbus_scandone, &Giant, 0, "vmbusdev", 0); 496 497 if (bootverbose) { 498 device_printf(sc->vmbus_dev, "device scan, probe and attach " 499 "done\n"); 500 } 501 return (0); 502 } 503 504 static void 505 vmbus_scan_teardown(struct vmbus_softc *sc) 506 { 507 508 GIANT_REQUIRED; 509 if (sc->vmbus_devtq != NULL) { 510 mtx_unlock(&Giant); 511 taskqueue_free(sc->vmbus_devtq); 512 mtx_lock(&Giant); 513 sc->vmbus_devtq = NULL; 514 } 515 if (sc->vmbus_subchtq != NULL) { 516 mtx_unlock(&Giant); 517 taskqueue_free(sc->vmbus_subchtq); 518 mtx_lock(&Giant); 519 sc->vmbus_subchtq = NULL; 520 } 521 } 522 523 static void 524 vmbus_chanmsg_handle(struct vmbus_softc *sc, const struct vmbus_message *msg) 525 { 526 vmbus_chanmsg_proc_t msg_proc; 527 uint32_t msg_type; 528 529 msg_type = ((const struct vmbus_chanmsg_hdr *)msg->msg_data)->chm_type; 530 if (msg_type >= VMBUS_CHANMSG_TYPE_MAX) { 531 device_printf(sc->vmbus_dev, "unknown message type 0x%x\n", 532 msg_type); 533 return; 534 } 535 536 msg_proc = vmbus_chanmsg_handlers[msg_type]; 537 if (msg_proc != NULL) 538 msg_proc(sc, msg); 539 540 /* Channel specific processing */ 541 vmbus_chan_msgproc(sc, msg); 542 } 543 544 static void 545 vmbus_msg_task(void *xsc, int pending __unused) 546 { 547 struct vmbus_softc *sc = xsc; 548 volatile struct vmbus_message *msg; 549 550 msg = VMBUS_PCPU_GET(sc, message, curcpu) + VMBUS_SINT_MESSAGE; 551 for (;;) { 552 if (msg->msg_type == HYPERV_MSGTYPE_NONE) { 553 /* No message */ 554 break; 555 } else if (msg->msg_type == HYPERV_MSGTYPE_CHANNEL) { 556 /* Channel message */ 557 vmbus_chanmsg_handle(sc, 558 __DEVOLATILE(const struct vmbus_message *, msg)); 559 } 560 561 msg->msg_type = HYPERV_MSGTYPE_NONE; 562 /* 563 * Make sure the write to msg_type (i.e. set to 564 * HYPERV_MSGTYPE_NONE) happens before we read the 565 * msg_flags and EOMing. Otherwise, the EOMing will 566 * not deliver any more messages since there is no 567 * empty slot 568 * 569 * NOTE: 570 * mb() is used here, since atomic_thread_fence_seq_cst() 571 * will become compiler fence on UP kernel. 572 */ 573 mb(); 574 if (msg->msg_flags & VMBUS_MSGFLAG_PENDING) { 575 /* 576 * This will cause message queue rescan to possibly 577 * deliver another msg from the hypervisor 578 */ 579 wrmsr(MSR_HV_EOM, 0); 580 } 581 } 582 } 583 584 static __inline int 585 vmbus_handle_intr1(struct vmbus_softc *sc, struct trapframe *frame, int cpu) 586 { 587 volatile struct vmbus_message *msg; 588 struct vmbus_message *msg_base; 589 590 msg_base = VMBUS_PCPU_GET(sc, message, cpu); 591 592 /* 593 * Check event timer. 594 * 595 * TODO: move this to independent IDT vector. 596 */ 597 msg = msg_base + VMBUS_SINT_TIMER; 598 if (msg->msg_type == HYPERV_MSGTYPE_TIMER_EXPIRED) { 599 msg->msg_type = HYPERV_MSGTYPE_NONE; 600 601 vmbus_et_intr(frame); 602 603 /* 604 * Make sure the write to msg_type (i.e. set to 605 * HYPERV_MSGTYPE_NONE) happens before we read the 606 * msg_flags and EOMing. Otherwise, the EOMing will 607 * not deliver any more messages since there is no 608 * empty slot 609 * 610 * NOTE: 611 * mb() is used here, since atomic_thread_fence_seq_cst() 612 * will become compiler fence on UP kernel. 613 */ 614 mb(); 615 if (msg->msg_flags & VMBUS_MSGFLAG_PENDING) { 616 /* 617 * This will cause message queue rescan to possibly 618 * deliver another msg from the hypervisor 619 */ 620 wrmsr(MSR_HV_EOM, 0); 621 } 622 } 623 624 /* 625 * Check events. Hot path for network and storage I/O data; high rate. 626 * 627 * NOTE: 628 * As recommended by the Windows guest fellows, we check events before 629 * checking messages. 630 */ 631 sc->vmbus_event_proc(sc, cpu); 632 633 /* 634 * Check messages. Mainly management stuffs; ultra low rate. 635 */ 636 msg = msg_base + VMBUS_SINT_MESSAGE; 637 if (__predict_false(msg->msg_type != HYPERV_MSGTYPE_NONE)) { 638 taskqueue_enqueue(VMBUS_PCPU_GET(sc, message_tq, cpu), 639 VMBUS_PCPU_PTR(sc, message_task, cpu)); 640 } 641 642 return (FILTER_HANDLED); 643 } 644 645 void 646 vmbus_handle_intr(struct trapframe *trap_frame) 647 { 648 struct vmbus_softc *sc = vmbus_get_softc(); 649 int cpu = curcpu; 650 651 /* 652 * Disable preemption. 653 */ 654 critical_enter(); 655 656 /* 657 * Do a little interrupt counting. 658 */ 659 (*VMBUS_PCPU_GET(sc, intr_cnt, cpu))++; 660 661 vmbus_handle_intr1(sc, trap_frame, cpu); 662 663 /* 664 * Enable preemption. 665 */ 666 critical_exit(); 667 } 668 669 static void 670 vmbus_synic_setup(void *xsc) 671 { 672 struct vmbus_softc *sc = xsc; 673 int cpu = curcpu; 674 uint64_t val, orig; 675 uint32_t sint; 676 677 if (hyperv_features & CPUID_HV_MSR_VP_INDEX) { 678 /* Save virtual processor id. */ 679 VMBUS_PCPU_GET(sc, vcpuid, cpu) = rdmsr(MSR_HV_VP_INDEX); 680 } else { 681 /* Set virtual processor id to 0 for compatibility. */ 682 VMBUS_PCPU_GET(sc, vcpuid, cpu) = 0; 683 } 684 685 /* 686 * Setup the SynIC message. 687 */ 688 orig = rdmsr(MSR_HV_SIMP); 689 val = MSR_HV_SIMP_ENABLE | (orig & MSR_HV_SIMP_RSVD_MASK) | 690 ((VMBUS_PCPU_GET(sc, message_dma.hv_paddr, cpu) >> PAGE_SHIFT) << 691 MSR_HV_SIMP_PGSHIFT); 692 wrmsr(MSR_HV_SIMP, val); 693 694 /* 695 * Setup the SynIC event flags. 696 */ 697 orig = rdmsr(MSR_HV_SIEFP); 698 val = MSR_HV_SIEFP_ENABLE | (orig & MSR_HV_SIEFP_RSVD_MASK) | 699 ((VMBUS_PCPU_GET(sc, event_flags_dma.hv_paddr, cpu) 700 >> PAGE_SHIFT) << MSR_HV_SIEFP_PGSHIFT); 701 wrmsr(MSR_HV_SIEFP, val); 702 703 704 /* 705 * Configure and unmask SINT for message and event flags. 706 */ 707 sint = MSR_HV_SINT0 + VMBUS_SINT_MESSAGE; 708 orig = rdmsr(sint); 709 val = sc->vmbus_idtvec | MSR_HV_SINT_AUTOEOI | 710 (orig & MSR_HV_SINT_RSVD_MASK); 711 wrmsr(sint, val); 712 713 /* 714 * Configure and unmask SINT for timer. 715 */ 716 sint = MSR_HV_SINT0 + VMBUS_SINT_TIMER; 717 orig = rdmsr(sint); 718 val = sc->vmbus_idtvec | MSR_HV_SINT_AUTOEOI | 719 (orig & MSR_HV_SINT_RSVD_MASK); 720 wrmsr(sint, val); 721 722 /* 723 * All done; enable SynIC. 724 */ 725 orig = rdmsr(MSR_HV_SCONTROL); 726 val = MSR_HV_SCTRL_ENABLE | (orig & MSR_HV_SCTRL_RSVD_MASK); 727 wrmsr(MSR_HV_SCONTROL, val); 728 } 729 730 static void 731 vmbus_synic_teardown(void *arg) 732 { 733 uint64_t orig; 734 uint32_t sint; 735 736 /* 737 * Disable SynIC. 738 */ 739 orig = rdmsr(MSR_HV_SCONTROL); 740 wrmsr(MSR_HV_SCONTROL, (orig & MSR_HV_SCTRL_RSVD_MASK)); 741 742 /* 743 * Mask message and event flags SINT. 744 */ 745 sint = MSR_HV_SINT0 + VMBUS_SINT_MESSAGE; 746 orig = rdmsr(sint); 747 wrmsr(sint, orig | MSR_HV_SINT_MASKED); 748 749 /* 750 * Mask timer SINT. 751 */ 752 sint = MSR_HV_SINT0 + VMBUS_SINT_TIMER; 753 orig = rdmsr(sint); 754 wrmsr(sint, orig | MSR_HV_SINT_MASKED); 755 756 /* 757 * Teardown SynIC message. 758 */ 759 orig = rdmsr(MSR_HV_SIMP); 760 wrmsr(MSR_HV_SIMP, (orig & MSR_HV_SIMP_RSVD_MASK)); 761 762 /* 763 * Teardown SynIC event flags. 764 */ 765 orig = rdmsr(MSR_HV_SIEFP); 766 wrmsr(MSR_HV_SIEFP, (orig & MSR_HV_SIEFP_RSVD_MASK)); 767 } 768 769 static int 770 vmbus_dma_alloc(struct vmbus_softc *sc) 771 { 772 bus_dma_tag_t parent_dtag; 773 uint8_t *evtflags; 774 int cpu; 775 776 parent_dtag = bus_get_dma_tag(sc->vmbus_dev); 777 CPU_FOREACH(cpu) { 778 void *ptr; 779 780 /* 781 * Per-cpu messages and event flags. 782 */ 783 ptr = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0, 784 PAGE_SIZE, VMBUS_PCPU_PTR(sc, message_dma, cpu), 785 BUS_DMA_WAITOK | BUS_DMA_ZERO); 786 if (ptr == NULL) 787 return ENOMEM; 788 VMBUS_PCPU_GET(sc, message, cpu) = ptr; 789 790 ptr = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0, 791 PAGE_SIZE, VMBUS_PCPU_PTR(sc, event_flags_dma, cpu), 792 BUS_DMA_WAITOK | BUS_DMA_ZERO); 793 if (ptr == NULL) 794 return ENOMEM; 795 VMBUS_PCPU_GET(sc, event_flags, cpu) = ptr; 796 } 797 798 evtflags = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0, 799 PAGE_SIZE, &sc->vmbus_evtflags_dma, BUS_DMA_WAITOK | BUS_DMA_ZERO); 800 if (evtflags == NULL) 801 return ENOMEM; 802 sc->vmbus_rx_evtflags = (u_long *)evtflags; 803 sc->vmbus_tx_evtflags = (u_long *)(evtflags + (PAGE_SIZE / 2)); 804 sc->vmbus_evtflags = evtflags; 805 806 sc->vmbus_mnf1 = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0, 807 PAGE_SIZE, &sc->vmbus_mnf1_dma, BUS_DMA_WAITOK | BUS_DMA_ZERO); 808 if (sc->vmbus_mnf1 == NULL) 809 return ENOMEM; 810 811 sc->vmbus_mnf2 = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0, 812 sizeof(struct vmbus_mnf), &sc->vmbus_mnf2_dma, 813 BUS_DMA_WAITOK | BUS_DMA_ZERO); 814 if (sc->vmbus_mnf2 == NULL) 815 return ENOMEM; 816 817 return 0; 818 } 819 820 static void 821 vmbus_dma_free(struct vmbus_softc *sc) 822 { 823 int cpu; 824 825 if (sc->vmbus_evtflags != NULL) { 826 hyperv_dmamem_free(&sc->vmbus_evtflags_dma, sc->vmbus_evtflags); 827 sc->vmbus_evtflags = NULL; 828 sc->vmbus_rx_evtflags = NULL; 829 sc->vmbus_tx_evtflags = NULL; 830 } 831 if (sc->vmbus_mnf1 != NULL) { 832 hyperv_dmamem_free(&sc->vmbus_mnf1_dma, sc->vmbus_mnf1); 833 sc->vmbus_mnf1 = NULL; 834 } 835 if (sc->vmbus_mnf2 != NULL) { 836 hyperv_dmamem_free(&sc->vmbus_mnf2_dma, sc->vmbus_mnf2); 837 sc->vmbus_mnf2 = NULL; 838 } 839 840 CPU_FOREACH(cpu) { 841 if (VMBUS_PCPU_GET(sc, message, cpu) != NULL) { 842 hyperv_dmamem_free( 843 VMBUS_PCPU_PTR(sc, message_dma, cpu), 844 VMBUS_PCPU_GET(sc, message, cpu)); 845 VMBUS_PCPU_GET(sc, message, cpu) = NULL; 846 } 847 if (VMBUS_PCPU_GET(sc, event_flags, cpu) != NULL) { 848 hyperv_dmamem_free( 849 VMBUS_PCPU_PTR(sc, event_flags_dma, cpu), 850 VMBUS_PCPU_GET(sc, event_flags, cpu)); 851 VMBUS_PCPU_GET(sc, event_flags, cpu) = NULL; 852 } 853 } 854 } 855 856 static int 857 vmbus_intr_setup(struct vmbus_softc *sc) 858 { 859 int cpu; 860 861 CPU_FOREACH(cpu) { 862 char buf[MAXCOMLEN + 1]; 863 cpuset_t cpu_mask; 864 865 /* Allocate an interrupt counter for Hyper-V interrupt */ 866 snprintf(buf, sizeof(buf), "cpu%d:hyperv", cpu); 867 intrcnt_add(buf, VMBUS_PCPU_PTR(sc, intr_cnt, cpu)); 868 869 /* 870 * Setup taskqueue to handle events. Task will be per- 871 * channel. 872 */ 873 VMBUS_PCPU_GET(sc, event_tq, cpu) = taskqueue_create_fast( 874 "hyperv event", M_WAITOK, taskqueue_thread_enqueue, 875 VMBUS_PCPU_PTR(sc, event_tq, cpu)); 876 CPU_SETOF(cpu, &cpu_mask); 877 taskqueue_start_threads_cpuset( 878 VMBUS_PCPU_PTR(sc, event_tq, cpu), 1, PI_NET, &cpu_mask, 879 "hvevent%d", cpu); 880 881 /* 882 * Setup tasks and taskqueues to handle messages. 883 */ 884 VMBUS_PCPU_GET(sc, message_tq, cpu) = taskqueue_create_fast( 885 "hyperv msg", M_WAITOK, taskqueue_thread_enqueue, 886 VMBUS_PCPU_PTR(sc, message_tq, cpu)); 887 CPU_SETOF(cpu, &cpu_mask); 888 taskqueue_start_threads_cpuset( 889 VMBUS_PCPU_PTR(sc, message_tq, cpu), 1, PI_NET, &cpu_mask, 890 "hvmsg%d", cpu); 891 TASK_INIT(VMBUS_PCPU_PTR(sc, message_task, cpu), 0, 892 vmbus_msg_task, sc); 893 } 894 895 /* 896 * All Hyper-V ISR required resources are setup, now let's find a 897 * free IDT vector for Hyper-V ISR and set it up. 898 */ 899 sc->vmbus_idtvec = lapic_ipi_alloc(IDTVEC(vmbus_isr)); 900 if (sc->vmbus_idtvec < 0) { 901 device_printf(sc->vmbus_dev, "cannot find free IDT vector\n"); 902 return ENXIO; 903 } 904 if (bootverbose) { 905 device_printf(sc->vmbus_dev, "vmbus IDT vector %d\n", 906 sc->vmbus_idtvec); 907 } 908 return 0; 909 } 910 911 static void 912 vmbus_intr_teardown(struct vmbus_softc *sc) 913 { 914 int cpu; 915 916 if (sc->vmbus_idtvec >= 0) { 917 lapic_ipi_free(sc->vmbus_idtvec); 918 sc->vmbus_idtvec = -1; 919 } 920 921 CPU_FOREACH(cpu) { 922 if (VMBUS_PCPU_GET(sc, event_tq, cpu) != NULL) { 923 taskqueue_free(VMBUS_PCPU_GET(sc, event_tq, cpu)); 924 VMBUS_PCPU_GET(sc, event_tq, cpu) = NULL; 925 } 926 if (VMBUS_PCPU_GET(sc, message_tq, cpu) != NULL) { 927 taskqueue_drain(VMBUS_PCPU_GET(sc, message_tq, cpu), 928 VMBUS_PCPU_PTR(sc, message_task, cpu)); 929 taskqueue_free(VMBUS_PCPU_GET(sc, message_tq, cpu)); 930 VMBUS_PCPU_GET(sc, message_tq, cpu) = NULL; 931 } 932 } 933 } 934 935 static int 936 vmbus_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) 937 { 938 return (ENOENT); 939 } 940 941 static int 942 vmbus_child_pnpinfo_str(device_t dev, device_t child, char *buf, size_t buflen) 943 { 944 const struct vmbus_channel *chan; 945 char guidbuf[HYPERV_GUID_STRLEN]; 946 947 chan = vmbus_get_channel(child); 948 if (chan == NULL) { 949 /* Event timer device, which does not belong to a channel */ 950 return (0); 951 } 952 953 strlcat(buf, "classid=", buflen); 954 hyperv_guid2str(&chan->ch_guid_type, guidbuf, sizeof(guidbuf)); 955 strlcat(buf, guidbuf, buflen); 956 957 strlcat(buf, " deviceid=", buflen); 958 hyperv_guid2str(&chan->ch_guid_inst, guidbuf, sizeof(guidbuf)); 959 strlcat(buf, guidbuf, buflen); 960 961 return (0); 962 } 963 964 int 965 vmbus_add_child(struct vmbus_channel *chan) 966 { 967 struct vmbus_softc *sc = chan->ch_vmbus; 968 device_t parent = sc->vmbus_dev; 969 970 mtx_lock(&Giant); 971 972 chan->ch_dev = device_add_child(parent, NULL, -1); 973 if (chan->ch_dev == NULL) { 974 mtx_unlock(&Giant); 975 device_printf(parent, "device_add_child for chan%u failed\n", 976 chan->ch_id); 977 return (ENXIO); 978 } 979 device_set_ivars(chan->ch_dev, chan); 980 device_probe_and_attach(chan->ch_dev); 981 982 mtx_unlock(&Giant); 983 return (0); 984 } 985 986 int 987 vmbus_delete_child(struct vmbus_channel *chan) 988 { 989 int error = 0; 990 991 mtx_lock(&Giant); 992 if (chan->ch_dev != NULL) { 993 error = device_delete_child(chan->ch_vmbus->vmbus_dev, 994 chan->ch_dev); 995 chan->ch_dev = NULL; 996 } 997 mtx_unlock(&Giant); 998 return (error); 999 } 1000 1001 static int 1002 vmbus_sysctl_version(SYSCTL_HANDLER_ARGS) 1003 { 1004 struct vmbus_softc *sc = arg1; 1005 char verstr[16]; 1006 1007 snprintf(verstr, sizeof(verstr), "%u.%u", 1008 VMBUS_VERSION_MAJOR(sc->vmbus_version), 1009 VMBUS_VERSION_MINOR(sc->vmbus_version)); 1010 return sysctl_handle_string(oidp, verstr, sizeof(verstr), req); 1011 } 1012 1013 /* 1014 * We need the function to make sure the MMIO resource is allocated from the 1015 * ranges found in _CRS. 1016 * 1017 * For the release function, we can use bus_generic_release_resource(). 1018 */ 1019 static struct resource * 1020 vmbus_alloc_resource(device_t dev, device_t child, int type, int *rid, 1021 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 1022 { 1023 struct vmbus_softc *sc = device_get_softc(dev); 1024 device_t parent = device_get_parent(dev); 1025 struct resource *res; 1026 1027 if (type != SYS_RES_MEMORY) 1028 res = BUS_ALLOC_RESOURCE(parent, child, type, rid, start, 1029 end, count, flags); 1030 else 1031 res = pcib_host_res_alloc(&sc->vmbus_mmio_res, child, type, 1032 rid, start, end, count, flags); 1033 1034 return (res); 1035 } 1036 1037 static device_t 1038 get_nexus(device_t vmbus) 1039 { 1040 device_t acpi = device_get_parent(vmbus); 1041 device_t nexus = device_get_parent(acpi); 1042 return (nexus); 1043 } 1044 1045 static int 1046 vmbus_alloc_msi(device_t bus, device_t dev, int count, int maxcount, int *irqs) 1047 { 1048 return (PCIB_ALLOC_MSI(get_nexus(bus), dev, count, maxcount, irqs)); 1049 } 1050 1051 static int 1052 vmbus_release_msi(device_t bus, device_t dev, int count, int *irqs) 1053 { 1054 return (PCIB_RELEASE_MSI(get_nexus(bus), dev, count, irqs)); 1055 } 1056 1057 static int 1058 vmbus_alloc_msix(device_t bus, device_t dev, int *irq) 1059 { 1060 return (PCIB_ALLOC_MSIX(get_nexus(bus), dev, irq)); 1061 } 1062 1063 static int 1064 vmbus_release_msix(device_t bus, device_t dev, int irq) 1065 { 1066 return (PCIB_RELEASE_MSIX(get_nexus(bus), dev, irq)); 1067 } 1068 1069 static int 1070 vmbus_map_msi(device_t bus, device_t dev, int irq, uint64_t *addr, 1071 uint32_t *data) 1072 { 1073 return (PCIB_MAP_MSI(get_nexus(bus), dev, irq, addr, data)); 1074 } 1075 1076 static uint32_t 1077 vmbus_get_version_method(device_t bus, device_t dev) 1078 { 1079 struct vmbus_softc *sc = device_get_softc(bus); 1080 1081 return sc->vmbus_version; 1082 } 1083 1084 static int 1085 vmbus_probe_guid_method(device_t bus, device_t dev, 1086 const struct hyperv_guid *guid) 1087 { 1088 const struct vmbus_channel *chan = vmbus_get_channel(dev); 1089 1090 if (memcmp(&chan->ch_guid_type, guid, sizeof(struct hyperv_guid)) == 0) 1091 return 0; 1092 return ENXIO; 1093 } 1094 1095 static uint32_t 1096 vmbus_get_vcpu_id_method(device_t bus, device_t dev, int cpu) 1097 { 1098 const struct vmbus_softc *sc = device_get_softc(bus); 1099 1100 return (VMBUS_PCPU_GET(sc, vcpuid, cpu)); 1101 } 1102 1103 #define VTPM_BASE_ADDR 0xfed40000 1104 #define FOUR_GB (1ULL << 32) 1105 1106 enum parse_pass { parse_64, parse_32 }; 1107 1108 struct parse_context { 1109 device_t vmbus_dev; 1110 enum parse_pass pass; 1111 }; 1112 1113 static ACPI_STATUS 1114 parse_crs(ACPI_RESOURCE *res, void *ctx) 1115 { 1116 const struct parse_context *pc = ctx; 1117 device_t vmbus_dev = pc->vmbus_dev; 1118 1119 struct vmbus_softc *sc = device_get_softc(vmbus_dev); 1120 UINT64 start, end; 1121 1122 switch (res->Type) { 1123 case ACPI_RESOURCE_TYPE_ADDRESS32: 1124 start = res->Data.Address32.Address.Minimum; 1125 end = res->Data.Address32.Address.Maximum; 1126 break; 1127 1128 case ACPI_RESOURCE_TYPE_ADDRESS64: 1129 start = res->Data.Address64.Address.Minimum; 1130 end = res->Data.Address64.Address.Maximum; 1131 break; 1132 1133 default: 1134 /* Unused types. */ 1135 return (AE_OK); 1136 } 1137 1138 /* 1139 * We don't use <1MB addresses. 1140 */ 1141 if (end < 0x100000) 1142 return (AE_OK); 1143 1144 /* Don't conflict with vTPM. */ 1145 if (end >= VTPM_BASE_ADDR && start < VTPM_BASE_ADDR) 1146 end = VTPM_BASE_ADDR - 1; 1147 1148 if ((pc->pass == parse_32 && start < FOUR_GB) || 1149 (pc->pass == parse_64 && start >= FOUR_GB)) 1150 pcib_host_res_decodes(&sc->vmbus_mmio_res, SYS_RES_MEMORY, 1151 start, end, 0); 1152 1153 return (AE_OK); 1154 } 1155 1156 static void 1157 vmbus_get_crs(device_t dev, device_t vmbus_dev, enum parse_pass pass) 1158 { 1159 struct parse_context pc; 1160 ACPI_STATUS status; 1161 1162 if (bootverbose) 1163 device_printf(dev, "walking _CRS, pass=%d\n", pass); 1164 1165 pc.vmbus_dev = vmbus_dev; 1166 pc.pass = pass; 1167 status = AcpiWalkResources(acpi_get_handle(dev), "_CRS", 1168 parse_crs, &pc); 1169 1170 if (bootverbose && ACPI_FAILURE(status)) 1171 device_printf(dev, "_CRS: not found, pass=%d\n", pass); 1172 } 1173 1174 static void 1175 vmbus_get_mmio_res_pass(device_t dev, enum parse_pass pass) 1176 { 1177 device_t acpi0, pcib0 = NULL; 1178 device_t *children; 1179 int i, count; 1180 1181 /* Try to find _CRS on VMBus device */ 1182 vmbus_get_crs(dev, dev, pass); 1183 1184 /* Try to find _CRS on VMBus device's parent */ 1185 acpi0 = device_get_parent(dev); 1186 vmbus_get_crs(acpi0, dev, pass); 1187 1188 /* Try to locate pcib0 and find _CRS on it */ 1189 if (device_get_children(acpi0, &children, &count) != 0) 1190 return; 1191 1192 for (i = 0; i < count; i++) { 1193 if (!device_is_attached(children[i])) 1194 continue; 1195 1196 if (strcmp("pcib0", device_get_nameunit(children[i]))) 1197 continue; 1198 1199 pcib0 = children[i]; 1200 break; 1201 } 1202 1203 if (pcib0) 1204 vmbus_get_crs(pcib0, dev, pass); 1205 1206 free(children, M_TEMP); 1207 } 1208 1209 static void 1210 vmbus_get_mmio_res(device_t dev) 1211 { 1212 struct vmbus_softc *sc = device_get_softc(dev); 1213 /* 1214 * We walk the resources twice to make sure that: in the resource 1215 * list, the 32-bit resources appear behind the 64-bit resources. 1216 * NB: resource_list_add() uses INSERT_TAIL. This way, when we 1217 * iterate through the list to find a range for a 64-bit BAR in 1218 * vmbus_alloc_resource(), we can make sure we try to use >4GB 1219 * ranges first. 1220 */ 1221 pcib_host_res_init(dev, &sc->vmbus_mmio_res); 1222 1223 vmbus_get_mmio_res_pass(dev, parse_64); 1224 vmbus_get_mmio_res_pass(dev, parse_32); 1225 } 1226 1227 static void 1228 vmbus_free_mmio_res(device_t dev) 1229 { 1230 struct vmbus_softc *sc = device_get_softc(dev); 1231 1232 pcib_host_res_free(dev, &sc->vmbus_mmio_res); 1233 } 1234 1235 static int 1236 vmbus_probe(device_t dev) 1237 { 1238 char *id[] = { "VMBUS", NULL }; 1239 1240 if (ACPI_ID_PROBE(device_get_parent(dev), dev, id) == NULL || 1241 device_get_unit(dev) != 0 || vm_guest != VM_GUEST_HV || 1242 (hyperv_features & CPUID_HV_MSR_SYNIC) == 0) 1243 return (ENXIO); 1244 1245 device_set_desc(dev, "Hyper-V Vmbus"); 1246 1247 return (BUS_PROBE_DEFAULT); 1248 } 1249 1250 /** 1251 * @brief Main vmbus driver initialization routine. 1252 * 1253 * Here, we 1254 * - initialize the vmbus driver context 1255 * - setup various driver entry points 1256 * - invoke the vmbus hv main init routine 1257 * - get the irq resource 1258 * - invoke the vmbus to add the vmbus root device 1259 * - setup the vmbus root device 1260 * - retrieve the channel offers 1261 */ 1262 static int 1263 vmbus_doattach(struct vmbus_softc *sc) 1264 { 1265 struct sysctl_oid_list *child; 1266 struct sysctl_ctx_list *ctx; 1267 int ret; 1268 1269 if (sc->vmbus_flags & VMBUS_FLAG_ATTACHED) 1270 return (0); 1271 1272 vmbus_get_mmio_res(sc->vmbus_dev); 1273 1274 sc->vmbus_flags |= VMBUS_FLAG_ATTACHED; 1275 1276 sc->vmbus_gpadl = VMBUS_GPADL_START; 1277 mtx_init(&sc->vmbus_prichan_lock, "vmbus prichan", NULL, MTX_DEF); 1278 TAILQ_INIT(&sc->vmbus_prichans); 1279 mtx_init(&sc->vmbus_chan_lock, "vmbus channel", NULL, MTX_DEF); 1280 TAILQ_INIT(&sc->vmbus_chans); 1281 sc->vmbus_chmap = malloc( 1282 sizeof(struct vmbus_channel *) * VMBUS_CHAN_MAX, M_DEVBUF, 1283 M_WAITOK | M_ZERO); 1284 1285 /* 1286 * Create context for "post message" Hypercalls 1287 */ 1288 sc->vmbus_xc = vmbus_xact_ctx_create(bus_get_dma_tag(sc->vmbus_dev), 1289 HYPERCALL_POSTMSGIN_SIZE, VMBUS_MSG_SIZE, 1290 sizeof(struct vmbus_msghc)); 1291 if (sc->vmbus_xc == NULL) { 1292 ret = ENXIO; 1293 goto cleanup; 1294 } 1295 1296 /* 1297 * Allocate DMA stuffs. 1298 */ 1299 ret = vmbus_dma_alloc(sc); 1300 if (ret != 0) 1301 goto cleanup; 1302 1303 /* 1304 * Setup interrupt. 1305 */ 1306 ret = vmbus_intr_setup(sc); 1307 if (ret != 0) 1308 goto cleanup; 1309 1310 /* 1311 * Setup SynIC. 1312 */ 1313 if (bootverbose) 1314 device_printf(sc->vmbus_dev, "smp_started = %d\n", smp_started); 1315 smp_rendezvous(NULL, vmbus_synic_setup, NULL, sc); 1316 sc->vmbus_flags |= VMBUS_FLAG_SYNIC; 1317 1318 /* 1319 * Initialize vmbus, e.g. connect to Hypervisor. 1320 */ 1321 ret = vmbus_init(sc); 1322 if (ret != 0) 1323 goto cleanup; 1324 1325 if (sc->vmbus_version == VMBUS_VERSION_WS2008 || 1326 sc->vmbus_version == VMBUS_VERSION_WIN7) 1327 sc->vmbus_event_proc = vmbus_event_proc_compat; 1328 else 1329 sc->vmbus_event_proc = vmbus_event_proc; 1330 1331 ret = vmbus_scan(sc); 1332 if (ret != 0) 1333 goto cleanup; 1334 1335 ctx = device_get_sysctl_ctx(sc->vmbus_dev); 1336 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vmbus_dev)); 1337 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "version", 1338 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, 1339 vmbus_sysctl_version, "A", "vmbus version"); 1340 1341 return (ret); 1342 1343 cleanup: 1344 vmbus_scan_teardown(sc); 1345 vmbus_intr_teardown(sc); 1346 vmbus_dma_free(sc); 1347 if (sc->vmbus_xc != NULL) { 1348 vmbus_xact_ctx_destroy(sc->vmbus_xc); 1349 sc->vmbus_xc = NULL; 1350 } 1351 free(sc->vmbus_chmap, M_DEVBUF); 1352 mtx_destroy(&sc->vmbus_prichan_lock); 1353 mtx_destroy(&sc->vmbus_chan_lock); 1354 1355 return (ret); 1356 } 1357 1358 static void 1359 vmbus_event_proc_dummy(struct vmbus_softc *sc __unused, int cpu __unused) 1360 { 1361 } 1362 1363 static int 1364 vmbus_attach(device_t dev) 1365 { 1366 vmbus_sc = device_get_softc(dev); 1367 vmbus_sc->vmbus_dev = dev; 1368 vmbus_sc->vmbus_idtvec = -1; 1369 1370 /* 1371 * Event processing logic will be configured: 1372 * - After the vmbus protocol version negotiation. 1373 * - Before we request channel offers. 1374 */ 1375 vmbus_sc->vmbus_event_proc = vmbus_event_proc_dummy; 1376 1377 #ifndef EARLY_AP_STARTUP 1378 /* 1379 * If the system has already booted and thread 1380 * scheduling is possible indicated by the global 1381 * cold set to zero, we just call the driver 1382 * initialization directly. 1383 */ 1384 if (!cold) 1385 #endif 1386 vmbus_doattach(vmbus_sc); 1387 1388 return (0); 1389 } 1390 1391 static int 1392 vmbus_detach(device_t dev) 1393 { 1394 struct vmbus_softc *sc = device_get_softc(dev); 1395 1396 bus_generic_detach(dev); 1397 vmbus_chan_destroy_all(sc); 1398 1399 vmbus_scan_teardown(sc); 1400 1401 vmbus_disconnect(sc); 1402 1403 if (sc->vmbus_flags & VMBUS_FLAG_SYNIC) { 1404 sc->vmbus_flags &= ~VMBUS_FLAG_SYNIC; 1405 smp_rendezvous(NULL, vmbus_synic_teardown, NULL, NULL); 1406 } 1407 1408 vmbus_intr_teardown(sc); 1409 vmbus_dma_free(sc); 1410 1411 if (sc->vmbus_xc != NULL) { 1412 vmbus_xact_ctx_destroy(sc->vmbus_xc); 1413 sc->vmbus_xc = NULL; 1414 } 1415 1416 free(sc->vmbus_chmap, M_DEVBUF); 1417 mtx_destroy(&sc->vmbus_prichan_lock); 1418 mtx_destroy(&sc->vmbus_chan_lock); 1419 1420 vmbus_free_mmio_res(dev); 1421 1422 return (0); 1423 } 1424 1425 #ifndef EARLY_AP_STARTUP 1426 1427 static void 1428 vmbus_sysinit(void *arg __unused) 1429 { 1430 struct vmbus_softc *sc = vmbus_get_softc(); 1431 1432 if (vm_guest != VM_GUEST_HV || sc == NULL) 1433 return; 1434 1435 /* 1436 * If the system has already booted and thread 1437 * scheduling is possible, as indicated by the 1438 * global cold set to zero, we just call the driver 1439 * initialization directly. 1440 */ 1441 if (!cold) 1442 vmbus_doattach(sc); 1443 } 1444 /* 1445 * NOTE: 1446 * We have to start as the last step of SI_SUB_SMP, i.e. after SMP is 1447 * initialized. 1448 */ 1449 SYSINIT(vmbus_initialize, SI_SUB_SMP, SI_ORDER_ANY, vmbus_sysinit, NULL); 1450 1451 #endif /* !EARLY_AP_STARTUP */ 1452