1 /*- 2 * Copyright (c) 2009-2012,2016 Microsoft Corp. 3 * Copyright (c) 2012 NetApp Inc. 4 * Copyright (c) 2012 Citrix Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * VM Bus Driver Implementation 31 */ 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/bus.h> 37 #include <sys/kernel.h> 38 #include <sys/lock.h> 39 #include <sys/malloc.h> 40 #include <sys/module.h> 41 #include <sys/proc.h> 42 #include <sys/sysctl.h> 43 #include <sys/syslog.h> 44 #include <sys/systm.h> 45 #include <sys/rtprio.h> 46 #include <sys/interrupt.h> 47 #include <sys/sx.h> 48 #include <sys/taskqueue.h> 49 #include <sys/mutex.h> 50 #include <sys/smp.h> 51 52 #include <machine/resource.h> 53 #include <sys/rman.h> 54 55 #include <machine/stdarg.h> 56 #include <machine/intr_machdep.h> 57 #include <machine/md_var.h> 58 #include <machine/segments.h> 59 #include <sys/pcpu.h> 60 #include <x86/apicvar.h> 61 62 #include <dev/hyperv/include/hyperv.h> 63 #include <dev/hyperv/vmbus/hv_vmbus_priv.h> 64 #include <dev/hyperv/vmbus/hyperv_reg.h> 65 #include <dev/hyperv/vmbus/hyperv_var.h> 66 #include <dev/hyperv/vmbus/vmbus_reg.h> 67 #include <dev/hyperv/vmbus/vmbus_var.h> 68 69 #include <contrib/dev/acpica/include/acpi.h> 70 #include "acpi_if.h" 71 #include "vmbus_if.h" 72 73 #define VMBUS_GPADL_START 0xe1e10 74 75 struct vmbus_msghc { 76 struct hypercall_postmsg_in *mh_inprm; 77 struct hypercall_postmsg_in mh_inprm_save; 78 struct hyperv_dma mh_inprm_dma; 79 80 struct vmbus_message *mh_resp; 81 struct vmbus_message mh_resp0; 82 }; 83 84 struct vmbus_msghc_ctx { 85 struct vmbus_msghc *mhc_free; 86 struct mtx mhc_free_lock; 87 uint32_t mhc_flags; 88 89 struct vmbus_msghc *mhc_active; 90 struct mtx mhc_active_lock; 91 }; 92 93 #define VMBUS_MSGHC_CTXF_DESTROY 0x0001 94 95 static int vmbus_init(struct vmbus_softc *); 96 static int vmbus_connect(struct vmbus_softc *, uint32_t); 97 static int vmbus_req_channels(struct vmbus_softc *sc); 98 static void vmbus_disconnect(struct vmbus_softc *); 99 static int vmbus_scan(struct vmbus_softc *); 100 static void vmbus_scan_wait(struct vmbus_softc *); 101 static void vmbus_scan_newdev(struct vmbus_softc *); 102 103 static int vmbus_sysctl_version(SYSCTL_HANDLER_ARGS); 104 105 static struct vmbus_msghc_ctx *vmbus_msghc_ctx_create(bus_dma_tag_t); 106 static void vmbus_msghc_ctx_destroy( 107 struct vmbus_msghc_ctx *); 108 static void vmbus_msghc_ctx_free(struct vmbus_msghc_ctx *); 109 static struct vmbus_msghc *vmbus_msghc_alloc(bus_dma_tag_t); 110 static void vmbus_msghc_free(struct vmbus_msghc *); 111 static struct vmbus_msghc *vmbus_msghc_get1(struct vmbus_msghc_ctx *, 112 uint32_t); 113 114 struct vmbus_softc *vmbus_sc; 115 116 extern inthand_t IDTVEC(vmbus_isr); 117 118 static const uint32_t vmbus_version[] = { 119 VMBUS_VERSION_WIN8_1, 120 VMBUS_VERSION_WIN8, 121 VMBUS_VERSION_WIN7, 122 VMBUS_VERSION_WS2008 123 }; 124 125 static struct vmbus_msghc * 126 vmbus_msghc_alloc(bus_dma_tag_t parent_dtag) 127 { 128 struct vmbus_msghc *mh; 129 130 mh = malloc(sizeof(*mh), M_DEVBUF, M_WAITOK | M_ZERO); 131 132 mh->mh_inprm = hyperv_dmamem_alloc(parent_dtag, 133 HYPERCALL_POSTMSGIN_ALIGN, 0, HYPERCALL_POSTMSGIN_SIZE, 134 &mh->mh_inprm_dma, BUS_DMA_WAITOK); 135 if (mh->mh_inprm == NULL) { 136 free(mh, M_DEVBUF); 137 return NULL; 138 } 139 return mh; 140 } 141 142 static void 143 vmbus_msghc_free(struct vmbus_msghc *mh) 144 { 145 hyperv_dmamem_free(&mh->mh_inprm_dma, mh->mh_inprm); 146 free(mh, M_DEVBUF); 147 } 148 149 static void 150 vmbus_msghc_ctx_free(struct vmbus_msghc_ctx *mhc) 151 { 152 KASSERT(mhc->mhc_active == NULL, ("still have active msg hypercall")); 153 KASSERT(mhc->mhc_free == NULL, ("still have hypercall msg")); 154 155 mtx_destroy(&mhc->mhc_free_lock); 156 mtx_destroy(&mhc->mhc_active_lock); 157 free(mhc, M_DEVBUF); 158 } 159 160 static struct vmbus_msghc_ctx * 161 vmbus_msghc_ctx_create(bus_dma_tag_t parent_dtag) 162 { 163 struct vmbus_msghc_ctx *mhc; 164 165 mhc = malloc(sizeof(*mhc), M_DEVBUF, M_WAITOK | M_ZERO); 166 mtx_init(&mhc->mhc_free_lock, "vmbus msghc free", NULL, MTX_DEF); 167 mtx_init(&mhc->mhc_active_lock, "vmbus msghc act", NULL, MTX_DEF); 168 169 mhc->mhc_free = vmbus_msghc_alloc(parent_dtag); 170 if (mhc->mhc_free == NULL) { 171 vmbus_msghc_ctx_free(mhc); 172 return NULL; 173 } 174 return mhc; 175 } 176 177 static struct vmbus_msghc * 178 vmbus_msghc_get1(struct vmbus_msghc_ctx *mhc, uint32_t dtor_flag) 179 { 180 struct vmbus_msghc *mh; 181 182 mtx_lock(&mhc->mhc_free_lock); 183 184 while ((mhc->mhc_flags & dtor_flag) == 0 && mhc->mhc_free == NULL) { 185 mtx_sleep(&mhc->mhc_free, &mhc->mhc_free_lock, 0, 186 "gmsghc", 0); 187 } 188 if (mhc->mhc_flags & dtor_flag) { 189 /* Being destroyed */ 190 mh = NULL; 191 } else { 192 mh = mhc->mhc_free; 193 KASSERT(mh != NULL, ("no free hypercall msg")); 194 KASSERT(mh->mh_resp == NULL, 195 ("hypercall msg has pending response")); 196 mhc->mhc_free = NULL; 197 } 198 199 mtx_unlock(&mhc->mhc_free_lock); 200 201 return mh; 202 } 203 204 void 205 vmbus_msghc_reset(struct vmbus_msghc *mh, size_t dsize) 206 { 207 struct hypercall_postmsg_in *inprm; 208 209 if (dsize > HYPERCALL_POSTMSGIN_DSIZE_MAX) 210 panic("invalid data size %zu", dsize); 211 212 inprm = mh->mh_inprm; 213 memset(inprm, 0, HYPERCALL_POSTMSGIN_SIZE); 214 inprm->hc_connid = VMBUS_CONNID_MESSAGE; 215 inprm->hc_msgtype = HYPERV_MSGTYPE_CHANNEL; 216 inprm->hc_dsize = dsize; 217 } 218 219 struct vmbus_msghc * 220 vmbus_msghc_get(struct vmbus_softc *sc, size_t dsize) 221 { 222 struct vmbus_msghc *mh; 223 224 if (dsize > HYPERCALL_POSTMSGIN_DSIZE_MAX) 225 panic("invalid data size %zu", dsize); 226 227 mh = vmbus_msghc_get1(sc->vmbus_msg_hc, VMBUS_MSGHC_CTXF_DESTROY); 228 if (mh == NULL) 229 return NULL; 230 231 vmbus_msghc_reset(mh, dsize); 232 return mh; 233 } 234 235 void 236 vmbus_msghc_put(struct vmbus_softc *sc, struct vmbus_msghc *mh) 237 { 238 struct vmbus_msghc_ctx *mhc = sc->vmbus_msg_hc; 239 240 KASSERT(mhc->mhc_active == NULL, ("msg hypercall is active")); 241 mh->mh_resp = NULL; 242 243 mtx_lock(&mhc->mhc_free_lock); 244 KASSERT(mhc->mhc_free == NULL, ("has free hypercall msg")); 245 mhc->mhc_free = mh; 246 mtx_unlock(&mhc->mhc_free_lock); 247 wakeup(&mhc->mhc_free); 248 } 249 250 void * 251 vmbus_msghc_dataptr(struct vmbus_msghc *mh) 252 { 253 return mh->mh_inprm->hc_data; 254 } 255 256 static void 257 vmbus_msghc_ctx_destroy(struct vmbus_msghc_ctx *mhc) 258 { 259 struct vmbus_msghc *mh; 260 261 mtx_lock(&mhc->mhc_free_lock); 262 mhc->mhc_flags |= VMBUS_MSGHC_CTXF_DESTROY; 263 mtx_unlock(&mhc->mhc_free_lock); 264 wakeup(&mhc->mhc_free); 265 266 mh = vmbus_msghc_get1(mhc, 0); 267 if (mh == NULL) 268 panic("can't get msghc"); 269 270 vmbus_msghc_free(mh); 271 vmbus_msghc_ctx_free(mhc); 272 } 273 274 int 275 vmbus_msghc_exec_noresult(struct vmbus_msghc *mh) 276 { 277 sbintime_t time = SBT_1MS; 278 int i; 279 280 /* 281 * Save the input parameter so that we could restore the input 282 * parameter if the Hypercall failed. 283 * 284 * XXX 285 * Is this really necessary?! i.e. Will the Hypercall ever 286 * overwrite the input parameter? 287 */ 288 memcpy(&mh->mh_inprm_save, mh->mh_inprm, HYPERCALL_POSTMSGIN_SIZE); 289 290 /* 291 * In order to cope with transient failures, e.g. insufficient 292 * resources on host side, we retry the post message Hypercall 293 * several times. 20 retries seem sufficient. 294 */ 295 #define HC_RETRY_MAX 20 296 297 for (i = 0; i < HC_RETRY_MAX; ++i) { 298 uint64_t status; 299 300 status = hypercall_post_message(mh->mh_inprm_dma.hv_paddr); 301 if (status == HYPERCALL_STATUS_SUCCESS) 302 return 0; 303 304 pause_sbt("hcpmsg", time, 0, C_HARDCLOCK); 305 if (time < SBT_1S * 2) 306 time *= 2; 307 308 /* Restore input parameter and try again */ 309 memcpy(mh->mh_inprm, &mh->mh_inprm_save, 310 HYPERCALL_POSTMSGIN_SIZE); 311 } 312 313 #undef HC_RETRY_MAX 314 315 return EIO; 316 } 317 318 int 319 vmbus_msghc_exec(struct vmbus_softc *sc, struct vmbus_msghc *mh) 320 { 321 struct vmbus_msghc_ctx *mhc = sc->vmbus_msg_hc; 322 int error; 323 324 KASSERT(mh->mh_resp == NULL, ("hypercall msg has pending response")); 325 326 mtx_lock(&mhc->mhc_active_lock); 327 KASSERT(mhc->mhc_active == NULL, ("pending active msg hypercall")); 328 mhc->mhc_active = mh; 329 mtx_unlock(&mhc->mhc_active_lock); 330 331 error = vmbus_msghc_exec_noresult(mh); 332 if (error) { 333 mtx_lock(&mhc->mhc_active_lock); 334 KASSERT(mhc->mhc_active == mh, ("msghc mismatch")); 335 mhc->mhc_active = NULL; 336 mtx_unlock(&mhc->mhc_active_lock); 337 } 338 return error; 339 } 340 341 const struct vmbus_message * 342 vmbus_msghc_wait_result(struct vmbus_softc *sc, struct vmbus_msghc *mh) 343 { 344 struct vmbus_msghc_ctx *mhc = sc->vmbus_msg_hc; 345 346 mtx_lock(&mhc->mhc_active_lock); 347 348 KASSERT(mhc->mhc_active == mh, ("msghc mismatch")); 349 while (mh->mh_resp == NULL) { 350 mtx_sleep(&mhc->mhc_active, &mhc->mhc_active_lock, 0, 351 "wmsghc", 0); 352 } 353 mhc->mhc_active = NULL; 354 355 mtx_unlock(&mhc->mhc_active_lock); 356 357 return mh->mh_resp; 358 } 359 360 void 361 vmbus_msghc_wakeup(struct vmbus_softc *sc, const struct vmbus_message *msg) 362 { 363 struct vmbus_msghc_ctx *mhc = sc->vmbus_msg_hc; 364 struct vmbus_msghc *mh; 365 366 mtx_lock(&mhc->mhc_active_lock); 367 368 mh = mhc->mhc_active; 369 KASSERT(mh != NULL, ("no pending msg hypercall")); 370 memcpy(&mh->mh_resp0, msg, sizeof(mh->mh_resp0)); 371 mh->mh_resp = &mh->mh_resp0; 372 373 mtx_unlock(&mhc->mhc_active_lock); 374 wakeup(&mhc->mhc_active); 375 } 376 377 uint32_t 378 vmbus_gpadl_alloc(struct vmbus_softc *sc) 379 { 380 return atomic_fetchadd_int(&sc->vmbus_gpadl, 1); 381 } 382 383 static int 384 vmbus_connect(struct vmbus_softc *sc, uint32_t version) 385 { 386 struct vmbus_chanmsg_connect *req; 387 const struct vmbus_message *msg; 388 struct vmbus_msghc *mh; 389 int error, done = 0; 390 391 mh = vmbus_msghc_get(sc, sizeof(*req)); 392 if (mh == NULL) 393 return ENXIO; 394 395 req = vmbus_msghc_dataptr(mh); 396 req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CONNECT; 397 req->chm_ver = version; 398 req->chm_evtflags = sc->vmbus_evtflags_dma.hv_paddr; 399 req->chm_mnf1 = sc->vmbus_mnf1_dma.hv_paddr; 400 req->chm_mnf2 = sc->vmbus_mnf2_dma.hv_paddr; 401 402 error = vmbus_msghc_exec(sc, mh); 403 if (error) { 404 vmbus_msghc_put(sc, mh); 405 return error; 406 } 407 408 msg = vmbus_msghc_wait_result(sc, mh); 409 done = ((const struct vmbus_chanmsg_connect_resp *) 410 msg->msg_data)->chm_done; 411 412 vmbus_msghc_put(sc, mh); 413 414 return (done ? 0 : EOPNOTSUPP); 415 } 416 417 static int 418 vmbus_init(struct vmbus_softc *sc) 419 { 420 int i; 421 422 for (i = 0; i < nitems(vmbus_version); ++i) { 423 int error; 424 425 error = vmbus_connect(sc, vmbus_version[i]); 426 if (!error) { 427 sc->vmbus_version = vmbus_version[i]; 428 device_printf(sc->vmbus_dev, "version %u.%u\n", 429 VMBUS_VERSION_MAJOR(sc->vmbus_version), 430 VMBUS_VERSION_MINOR(sc->vmbus_version)); 431 return 0; 432 } 433 } 434 return ENXIO; 435 } 436 437 static void 438 vmbus_disconnect(struct vmbus_softc *sc) 439 { 440 struct vmbus_chanmsg_disconnect *req; 441 struct vmbus_msghc *mh; 442 int error; 443 444 mh = vmbus_msghc_get(sc, sizeof(*req)); 445 if (mh == NULL) { 446 device_printf(sc->vmbus_dev, 447 "can not get msg hypercall for disconnect\n"); 448 return; 449 } 450 451 req = vmbus_msghc_dataptr(mh); 452 req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_DISCONNECT; 453 454 error = vmbus_msghc_exec_noresult(mh); 455 vmbus_msghc_put(sc, mh); 456 457 if (error) { 458 device_printf(sc->vmbus_dev, 459 "disconnect msg hypercall failed\n"); 460 } 461 } 462 463 static int 464 vmbus_req_channels(struct vmbus_softc *sc) 465 { 466 struct vmbus_chanmsg_chrequest *req; 467 struct vmbus_msghc *mh; 468 int error; 469 470 mh = vmbus_msghc_get(sc, sizeof(*req)); 471 if (mh == NULL) 472 return ENXIO; 473 474 req = vmbus_msghc_dataptr(mh); 475 req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CHREQUEST; 476 477 error = vmbus_msghc_exec_noresult(mh); 478 vmbus_msghc_put(sc, mh); 479 480 return error; 481 } 482 483 void 484 vmbus_scan_newchan(struct vmbus_softc *sc) 485 { 486 mtx_lock(&sc->vmbus_scan_lock); 487 if ((sc->vmbus_scan_chcnt & VMBUS_SCAN_CHCNT_DONE) == 0) 488 sc->vmbus_scan_chcnt++; 489 mtx_unlock(&sc->vmbus_scan_lock); 490 } 491 492 void 493 vmbus_scan_done(struct vmbus_softc *sc) 494 { 495 mtx_lock(&sc->vmbus_scan_lock); 496 sc->vmbus_scan_chcnt |= VMBUS_SCAN_CHCNT_DONE; 497 mtx_unlock(&sc->vmbus_scan_lock); 498 wakeup(&sc->vmbus_scan_chcnt); 499 } 500 501 static void 502 vmbus_scan_newdev(struct vmbus_softc *sc) 503 { 504 mtx_lock(&sc->vmbus_scan_lock); 505 sc->vmbus_scan_devcnt++; 506 mtx_unlock(&sc->vmbus_scan_lock); 507 wakeup(&sc->vmbus_scan_devcnt); 508 } 509 510 static void 511 vmbus_scan_wait(struct vmbus_softc *sc) 512 { 513 uint32_t chancnt; 514 515 mtx_lock(&sc->vmbus_scan_lock); 516 while ((sc->vmbus_scan_chcnt & VMBUS_SCAN_CHCNT_DONE) == 0) { 517 mtx_sleep(&sc->vmbus_scan_chcnt, &sc->vmbus_scan_lock, 0, 518 "waitch", 0); 519 } 520 chancnt = sc->vmbus_scan_chcnt & ~VMBUS_SCAN_CHCNT_DONE; 521 522 while (sc->vmbus_scan_devcnt != chancnt) { 523 mtx_sleep(&sc->vmbus_scan_devcnt, &sc->vmbus_scan_lock, 0, 524 "waitdev", 0); 525 } 526 mtx_unlock(&sc->vmbus_scan_lock); 527 } 528 529 static int 530 vmbus_scan(struct vmbus_softc *sc) 531 { 532 int error; 533 534 /* 535 * Start vmbus scanning. 536 */ 537 error = vmbus_req_channels(sc); 538 if (error) { 539 device_printf(sc->vmbus_dev, "channel request failed: %d\n", 540 error); 541 return error; 542 } 543 544 /* 545 * Wait for all devices are added to vmbus. 546 */ 547 vmbus_scan_wait(sc); 548 549 /* 550 * Identify, probe and attach. 551 */ 552 bus_generic_probe(sc->vmbus_dev); 553 bus_generic_attach(sc->vmbus_dev); 554 555 if (bootverbose) { 556 device_printf(sc->vmbus_dev, "device scan, probe and attach " 557 "done\n"); 558 } 559 return 0; 560 } 561 562 static void 563 vmbus_msg_task(void *xsc, int pending __unused) 564 { 565 struct vmbus_softc *sc = xsc; 566 volatile struct vmbus_message *msg; 567 568 msg = VMBUS_PCPU_GET(sc, message, curcpu) + VMBUS_SINT_MESSAGE; 569 for (;;) { 570 if (msg->msg_type == HYPERV_MSGTYPE_NONE) { 571 /* No message */ 572 break; 573 } else if (msg->msg_type == HYPERV_MSGTYPE_CHANNEL) { 574 /* Channel message */ 575 vmbus_chan_msgproc(sc, 576 __DEVOLATILE(const struct vmbus_message *, msg)); 577 } 578 579 msg->msg_type = HYPERV_MSGTYPE_NONE; 580 /* 581 * Make sure the write to msg_type (i.e. set to 582 * HYPERV_MSGTYPE_NONE) happens before we read the 583 * msg_flags and EOMing. Otherwise, the EOMing will 584 * not deliver any more messages since there is no 585 * empty slot 586 * 587 * NOTE: 588 * mb() is used here, since atomic_thread_fence_seq_cst() 589 * will become compiler fence on UP kernel. 590 */ 591 mb(); 592 if (msg->msg_flags & VMBUS_MSGFLAG_PENDING) { 593 /* 594 * This will cause message queue rescan to possibly 595 * deliver another msg from the hypervisor 596 */ 597 wrmsr(MSR_HV_EOM, 0); 598 } 599 } 600 } 601 602 static __inline int 603 vmbus_handle_intr1(struct vmbus_softc *sc, struct trapframe *frame, int cpu) 604 { 605 volatile struct vmbus_message *msg; 606 struct vmbus_message *msg_base; 607 608 msg_base = VMBUS_PCPU_GET(sc, message, cpu); 609 610 /* 611 * Check event timer. 612 * 613 * TODO: move this to independent IDT vector. 614 */ 615 msg = msg_base + VMBUS_SINT_TIMER; 616 if (msg->msg_type == HYPERV_MSGTYPE_TIMER_EXPIRED) { 617 msg->msg_type = HYPERV_MSGTYPE_NONE; 618 619 vmbus_et_intr(frame); 620 621 /* 622 * Make sure the write to msg_type (i.e. set to 623 * HYPERV_MSGTYPE_NONE) happens before we read the 624 * msg_flags and EOMing. Otherwise, the EOMing will 625 * not deliver any more messages since there is no 626 * empty slot 627 * 628 * NOTE: 629 * mb() is used here, since atomic_thread_fence_seq_cst() 630 * will become compiler fence on UP kernel. 631 */ 632 mb(); 633 if (msg->msg_flags & VMBUS_MSGFLAG_PENDING) { 634 /* 635 * This will cause message queue rescan to possibly 636 * deliver another msg from the hypervisor 637 */ 638 wrmsr(MSR_HV_EOM, 0); 639 } 640 } 641 642 /* 643 * Check events. Hot path for network and storage I/O data; high rate. 644 * 645 * NOTE: 646 * As recommended by the Windows guest fellows, we check events before 647 * checking messages. 648 */ 649 sc->vmbus_event_proc(sc, cpu); 650 651 /* 652 * Check messages. Mainly management stuffs; ultra low rate. 653 */ 654 msg = msg_base + VMBUS_SINT_MESSAGE; 655 if (__predict_false(msg->msg_type != HYPERV_MSGTYPE_NONE)) { 656 taskqueue_enqueue(VMBUS_PCPU_GET(sc, message_tq, cpu), 657 VMBUS_PCPU_PTR(sc, message_task, cpu)); 658 } 659 660 return (FILTER_HANDLED); 661 } 662 663 void 664 vmbus_handle_intr(struct trapframe *trap_frame) 665 { 666 struct vmbus_softc *sc = vmbus_get_softc(); 667 int cpu = curcpu; 668 669 /* 670 * Disable preemption. 671 */ 672 critical_enter(); 673 674 /* 675 * Do a little interrupt counting. 676 */ 677 (*VMBUS_PCPU_GET(sc, intr_cnt, cpu))++; 678 679 vmbus_handle_intr1(sc, trap_frame, cpu); 680 681 /* 682 * Enable preemption. 683 */ 684 critical_exit(); 685 } 686 687 static void 688 vmbus_synic_setup(void *xsc) 689 { 690 struct vmbus_softc *sc = xsc; 691 int cpu = curcpu; 692 uint64_t val, orig; 693 uint32_t sint; 694 695 if (hyperv_features & CPUID_HV_MSR_VP_INDEX) { 696 /* 697 * Save virtual processor id. 698 */ 699 VMBUS_PCPU_GET(sc, vcpuid, cpu) = rdmsr(MSR_HV_VP_INDEX); 700 } else { 701 /* 702 * XXX 703 * Virtual processoor id is only used by a pretty broken 704 * channel selection code from storvsc. It's nothing 705 * critical even if CPUID_HV_MSR_VP_INDEX is not set; keep 706 * moving on. 707 */ 708 VMBUS_PCPU_GET(sc, vcpuid, cpu) = cpu; 709 } 710 711 /* 712 * Setup the SynIC message. 713 */ 714 orig = rdmsr(MSR_HV_SIMP); 715 val = MSR_HV_SIMP_ENABLE | (orig & MSR_HV_SIMP_RSVD_MASK) | 716 ((VMBUS_PCPU_GET(sc, message_dma.hv_paddr, cpu) >> PAGE_SHIFT) << 717 MSR_HV_SIMP_PGSHIFT); 718 wrmsr(MSR_HV_SIMP, val); 719 720 /* 721 * Setup the SynIC event flags. 722 */ 723 orig = rdmsr(MSR_HV_SIEFP); 724 val = MSR_HV_SIEFP_ENABLE | (orig & MSR_HV_SIEFP_RSVD_MASK) | 725 ((VMBUS_PCPU_GET(sc, event_flags_dma.hv_paddr, cpu) 726 >> PAGE_SHIFT) << MSR_HV_SIEFP_PGSHIFT); 727 wrmsr(MSR_HV_SIEFP, val); 728 729 730 /* 731 * Configure and unmask SINT for message and event flags. 732 */ 733 sint = MSR_HV_SINT0 + VMBUS_SINT_MESSAGE; 734 orig = rdmsr(sint); 735 val = sc->vmbus_idtvec | MSR_HV_SINT_AUTOEOI | 736 (orig & MSR_HV_SINT_RSVD_MASK); 737 wrmsr(sint, val); 738 739 /* 740 * Configure and unmask SINT for timer. 741 */ 742 sint = MSR_HV_SINT0 + VMBUS_SINT_TIMER; 743 orig = rdmsr(sint); 744 val = sc->vmbus_idtvec | MSR_HV_SINT_AUTOEOI | 745 (orig & MSR_HV_SINT_RSVD_MASK); 746 wrmsr(sint, val); 747 748 /* 749 * All done; enable SynIC. 750 */ 751 orig = rdmsr(MSR_HV_SCONTROL); 752 val = MSR_HV_SCTRL_ENABLE | (orig & MSR_HV_SCTRL_RSVD_MASK); 753 wrmsr(MSR_HV_SCONTROL, val); 754 } 755 756 static void 757 vmbus_synic_teardown(void *arg) 758 { 759 uint64_t orig; 760 uint32_t sint; 761 762 /* 763 * Disable SynIC. 764 */ 765 orig = rdmsr(MSR_HV_SCONTROL); 766 wrmsr(MSR_HV_SCONTROL, (orig & MSR_HV_SCTRL_RSVD_MASK)); 767 768 /* 769 * Mask message and event flags SINT. 770 */ 771 sint = MSR_HV_SINT0 + VMBUS_SINT_MESSAGE; 772 orig = rdmsr(sint); 773 wrmsr(sint, orig | MSR_HV_SINT_MASKED); 774 775 /* 776 * Mask timer SINT. 777 */ 778 sint = MSR_HV_SINT0 + VMBUS_SINT_TIMER; 779 orig = rdmsr(sint); 780 wrmsr(sint, orig | MSR_HV_SINT_MASKED); 781 782 /* 783 * Teardown SynIC message. 784 */ 785 orig = rdmsr(MSR_HV_SIMP); 786 wrmsr(MSR_HV_SIMP, (orig & MSR_HV_SIMP_RSVD_MASK)); 787 788 /* 789 * Teardown SynIC event flags. 790 */ 791 orig = rdmsr(MSR_HV_SIEFP); 792 wrmsr(MSR_HV_SIEFP, (orig & MSR_HV_SIEFP_RSVD_MASK)); 793 } 794 795 static int 796 vmbus_dma_alloc(struct vmbus_softc *sc) 797 { 798 bus_dma_tag_t parent_dtag; 799 uint8_t *evtflags; 800 int cpu; 801 802 parent_dtag = bus_get_dma_tag(sc->vmbus_dev); 803 CPU_FOREACH(cpu) { 804 void *ptr; 805 806 /* 807 * Per-cpu messages and event flags. 808 */ 809 ptr = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0, 810 PAGE_SIZE, VMBUS_PCPU_PTR(sc, message_dma, cpu), 811 BUS_DMA_WAITOK | BUS_DMA_ZERO); 812 if (ptr == NULL) 813 return ENOMEM; 814 VMBUS_PCPU_GET(sc, message, cpu) = ptr; 815 816 ptr = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0, 817 PAGE_SIZE, VMBUS_PCPU_PTR(sc, event_flags_dma, cpu), 818 BUS_DMA_WAITOK | BUS_DMA_ZERO); 819 if (ptr == NULL) 820 return ENOMEM; 821 VMBUS_PCPU_GET(sc, event_flags, cpu) = ptr; 822 } 823 824 evtflags = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0, 825 PAGE_SIZE, &sc->vmbus_evtflags_dma, BUS_DMA_WAITOK | BUS_DMA_ZERO); 826 if (evtflags == NULL) 827 return ENOMEM; 828 sc->vmbus_rx_evtflags = (u_long *)evtflags; 829 sc->vmbus_tx_evtflags = (u_long *)(evtflags + (PAGE_SIZE / 2)); 830 sc->vmbus_evtflags = evtflags; 831 832 sc->vmbus_mnf1 = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0, 833 PAGE_SIZE, &sc->vmbus_mnf1_dma, BUS_DMA_WAITOK | BUS_DMA_ZERO); 834 if (sc->vmbus_mnf1 == NULL) 835 return ENOMEM; 836 837 sc->vmbus_mnf2 = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0, 838 PAGE_SIZE, &sc->vmbus_mnf2_dma, BUS_DMA_WAITOK | BUS_DMA_ZERO); 839 if (sc->vmbus_mnf2 == NULL) 840 return ENOMEM; 841 842 return 0; 843 } 844 845 static void 846 vmbus_dma_free(struct vmbus_softc *sc) 847 { 848 int cpu; 849 850 if (sc->vmbus_evtflags != NULL) { 851 hyperv_dmamem_free(&sc->vmbus_evtflags_dma, sc->vmbus_evtflags); 852 sc->vmbus_evtflags = NULL; 853 sc->vmbus_rx_evtflags = NULL; 854 sc->vmbus_tx_evtflags = NULL; 855 } 856 if (sc->vmbus_mnf1 != NULL) { 857 hyperv_dmamem_free(&sc->vmbus_mnf1_dma, sc->vmbus_mnf1); 858 sc->vmbus_mnf1 = NULL; 859 } 860 if (sc->vmbus_mnf2 != NULL) { 861 hyperv_dmamem_free(&sc->vmbus_mnf2_dma, sc->vmbus_mnf2); 862 sc->vmbus_mnf2 = NULL; 863 } 864 865 CPU_FOREACH(cpu) { 866 if (VMBUS_PCPU_GET(sc, message, cpu) != NULL) { 867 hyperv_dmamem_free( 868 VMBUS_PCPU_PTR(sc, message_dma, cpu), 869 VMBUS_PCPU_GET(sc, message, cpu)); 870 VMBUS_PCPU_GET(sc, message, cpu) = NULL; 871 } 872 if (VMBUS_PCPU_GET(sc, event_flags, cpu) != NULL) { 873 hyperv_dmamem_free( 874 VMBUS_PCPU_PTR(sc, event_flags_dma, cpu), 875 VMBUS_PCPU_GET(sc, event_flags, cpu)); 876 VMBUS_PCPU_GET(sc, event_flags, cpu) = NULL; 877 } 878 } 879 } 880 881 static int 882 vmbus_intr_setup(struct vmbus_softc *sc) 883 { 884 int cpu; 885 886 CPU_FOREACH(cpu) { 887 char buf[MAXCOMLEN + 1]; 888 cpuset_t cpu_mask; 889 890 /* Allocate an interrupt counter for Hyper-V interrupt */ 891 snprintf(buf, sizeof(buf), "cpu%d:hyperv", cpu); 892 intrcnt_add(buf, VMBUS_PCPU_PTR(sc, intr_cnt, cpu)); 893 894 /* 895 * Setup taskqueue to handle events. Task will be per- 896 * channel. 897 */ 898 VMBUS_PCPU_GET(sc, event_tq, cpu) = taskqueue_create_fast( 899 "hyperv event", M_WAITOK, taskqueue_thread_enqueue, 900 VMBUS_PCPU_PTR(sc, event_tq, cpu)); 901 CPU_SETOF(cpu, &cpu_mask); 902 taskqueue_start_threads_cpuset( 903 VMBUS_PCPU_PTR(sc, event_tq, cpu), 1, PI_NET, &cpu_mask, 904 "hvevent%d", cpu); 905 906 /* 907 * Setup tasks and taskqueues to handle messages. 908 */ 909 VMBUS_PCPU_GET(sc, message_tq, cpu) = taskqueue_create_fast( 910 "hyperv msg", M_WAITOK, taskqueue_thread_enqueue, 911 VMBUS_PCPU_PTR(sc, message_tq, cpu)); 912 CPU_SETOF(cpu, &cpu_mask); 913 taskqueue_start_threads_cpuset( 914 VMBUS_PCPU_PTR(sc, message_tq, cpu), 1, PI_NET, &cpu_mask, 915 "hvmsg%d", cpu); 916 TASK_INIT(VMBUS_PCPU_PTR(sc, message_task, cpu), 0, 917 vmbus_msg_task, sc); 918 } 919 920 /* 921 * All Hyper-V ISR required resources are setup, now let's find a 922 * free IDT vector for Hyper-V ISR and set it up. 923 */ 924 sc->vmbus_idtvec = lapic_ipi_alloc(IDTVEC(vmbus_isr)); 925 if (sc->vmbus_idtvec < 0) { 926 device_printf(sc->vmbus_dev, "cannot find free IDT vector\n"); 927 return ENXIO; 928 } 929 if(bootverbose) { 930 device_printf(sc->vmbus_dev, "vmbus IDT vector %d\n", 931 sc->vmbus_idtvec); 932 } 933 return 0; 934 } 935 936 static void 937 vmbus_intr_teardown(struct vmbus_softc *sc) 938 { 939 int cpu; 940 941 if (sc->vmbus_idtvec >= 0) { 942 lapic_ipi_free(sc->vmbus_idtvec); 943 sc->vmbus_idtvec = -1; 944 } 945 946 CPU_FOREACH(cpu) { 947 if (VMBUS_PCPU_GET(sc, event_tq, cpu) != NULL) { 948 taskqueue_free(VMBUS_PCPU_GET(sc, event_tq, cpu)); 949 VMBUS_PCPU_GET(sc, event_tq, cpu) = NULL; 950 } 951 if (VMBUS_PCPU_GET(sc, message_tq, cpu) != NULL) { 952 taskqueue_drain(VMBUS_PCPU_GET(sc, message_tq, cpu), 953 VMBUS_PCPU_PTR(sc, message_task, cpu)); 954 taskqueue_free(VMBUS_PCPU_GET(sc, message_tq, cpu)); 955 VMBUS_PCPU_GET(sc, message_tq, cpu) = NULL; 956 } 957 } 958 } 959 960 static int 961 vmbus_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) 962 { 963 struct hv_device *child_dev_ctx = device_get_ivars(child); 964 965 switch (index) { 966 case HV_VMBUS_IVAR_TYPE: 967 *result = (uintptr_t)&child_dev_ctx->class_id; 968 return (0); 969 970 case HV_VMBUS_IVAR_INSTANCE: 971 *result = (uintptr_t)&child_dev_ctx->device_id; 972 return (0); 973 974 case HV_VMBUS_IVAR_DEVCTX: 975 *result = (uintptr_t)child_dev_ctx; 976 return (0); 977 978 case HV_VMBUS_IVAR_NODE: 979 *result = (uintptr_t)child_dev_ctx->device; 980 return (0); 981 } 982 return (ENOENT); 983 } 984 985 static int 986 vmbus_write_ivar(device_t dev, device_t child, int index, uintptr_t value) 987 { 988 switch (index) { 989 case HV_VMBUS_IVAR_TYPE: 990 case HV_VMBUS_IVAR_INSTANCE: 991 case HV_VMBUS_IVAR_DEVCTX: 992 case HV_VMBUS_IVAR_NODE: 993 /* read-only */ 994 return (EINVAL); 995 } 996 return (ENOENT); 997 } 998 999 static int 1000 vmbus_child_pnpinfo_str(device_t dev, device_t child, char *buf, size_t buflen) 1001 { 1002 struct hv_device *dev_ctx = device_get_ivars(child); 1003 char guidbuf[HYPERV_GUID_STRLEN]; 1004 1005 if (dev_ctx == NULL) 1006 return (0); 1007 1008 strlcat(buf, "classid=", buflen); 1009 hyperv_guid2str(&dev_ctx->class_id, guidbuf, sizeof(guidbuf)); 1010 strlcat(buf, guidbuf, buflen); 1011 1012 strlcat(buf, " deviceid=", buflen); 1013 hyperv_guid2str(&dev_ctx->device_id, guidbuf, sizeof(guidbuf)); 1014 strlcat(buf, guidbuf, buflen); 1015 1016 return (0); 1017 } 1018 1019 struct hv_device * 1020 hv_vmbus_child_device_create(struct hv_vmbus_channel *channel) 1021 { 1022 hv_device *child_dev; 1023 1024 /* 1025 * Allocate the new child device 1026 */ 1027 child_dev = malloc(sizeof(hv_device), M_DEVBUF, M_WAITOK | M_ZERO); 1028 1029 child_dev->channel = channel; 1030 child_dev->class_id = channel->ch_guid_type; 1031 child_dev->device_id = channel->ch_guid_inst; 1032 1033 return (child_dev); 1034 } 1035 1036 void 1037 hv_vmbus_child_device_register(struct vmbus_softc *sc, 1038 struct hv_device *child_dev) 1039 { 1040 device_t child, parent; 1041 1042 parent = sc->vmbus_dev; 1043 if (bootverbose) { 1044 char name[HYPERV_GUID_STRLEN]; 1045 1046 hyperv_guid2str(&child_dev->class_id, name, sizeof(name)); 1047 device_printf(parent, "add device, classid: %s\n", name); 1048 } 1049 1050 child = device_add_child(parent, NULL, -1); 1051 child_dev->device = child; 1052 device_set_ivars(child, child_dev); 1053 1054 /* New device was added to vmbus */ 1055 vmbus_scan_newdev(sc); 1056 } 1057 1058 int 1059 hv_vmbus_child_device_unregister(struct hv_device *child_dev) 1060 { 1061 int ret = 0; 1062 /* 1063 * XXXKYS: Ensure that this is the opposite of 1064 * device_add_child() 1065 */ 1066 mtx_lock(&Giant); 1067 ret = device_delete_child(vmbus_get_device(), child_dev->device); 1068 mtx_unlock(&Giant); 1069 return(ret); 1070 } 1071 1072 static int 1073 vmbus_sysctl_version(SYSCTL_HANDLER_ARGS) 1074 { 1075 struct vmbus_softc *sc = arg1; 1076 char verstr[16]; 1077 1078 snprintf(verstr, sizeof(verstr), "%u.%u", 1079 VMBUS_VERSION_MAJOR(sc->vmbus_version), 1080 VMBUS_VERSION_MINOR(sc->vmbus_version)); 1081 return sysctl_handle_string(oidp, verstr, sizeof(verstr), req); 1082 } 1083 1084 static uint32_t 1085 vmbus_get_version_method(device_t bus, device_t dev) 1086 { 1087 struct vmbus_softc *sc = device_get_softc(bus); 1088 1089 return sc->vmbus_version; 1090 } 1091 1092 static int 1093 vmbus_probe_guid_method(device_t bus, device_t dev, const struct hv_guid *guid) 1094 { 1095 struct hv_device *hv_dev = device_get_ivars(dev); 1096 1097 if (memcmp(&hv_dev->class_id, guid, sizeof(struct hv_guid)) == 0) 1098 return 0; 1099 return ENXIO; 1100 } 1101 1102 static int 1103 vmbus_probe(device_t dev) 1104 { 1105 char *id[] = { "VMBUS", NULL }; 1106 1107 if (ACPI_ID_PROBE(device_get_parent(dev), dev, id) == NULL || 1108 device_get_unit(dev) != 0 || vm_guest != VM_GUEST_HV || 1109 (hyperv_features & CPUID_HV_MSR_SYNIC) == 0) 1110 return (ENXIO); 1111 1112 device_set_desc(dev, "Hyper-V Vmbus"); 1113 1114 return (BUS_PROBE_DEFAULT); 1115 } 1116 1117 /** 1118 * @brief Main vmbus driver initialization routine. 1119 * 1120 * Here, we 1121 * - initialize the vmbus driver context 1122 * - setup various driver entry points 1123 * - invoke the vmbus hv main init routine 1124 * - get the irq resource 1125 * - invoke the vmbus to add the vmbus root device 1126 * - setup the vmbus root device 1127 * - retrieve the channel offers 1128 */ 1129 static int 1130 vmbus_doattach(struct vmbus_softc *sc) 1131 { 1132 struct sysctl_oid_list *child; 1133 struct sysctl_ctx_list *ctx; 1134 int ret; 1135 1136 if (sc->vmbus_flags & VMBUS_FLAG_ATTACHED) 1137 return (0); 1138 sc->vmbus_flags |= VMBUS_FLAG_ATTACHED; 1139 1140 mtx_init(&sc->vmbus_scan_lock, "vmbus scan", NULL, MTX_DEF); 1141 sc->vmbus_gpadl = VMBUS_GPADL_START; 1142 mtx_init(&sc->vmbus_chlist_lock, "vmbus chlist", NULL, MTX_DEF); 1143 TAILQ_INIT(&sc->vmbus_chlist); 1144 sc->vmbus_chmap = malloc( 1145 sizeof(struct hv_vmbus_channel *) * VMBUS_CHAN_MAX, M_DEVBUF, 1146 M_WAITOK | M_ZERO); 1147 1148 /* 1149 * Create context for "post message" Hypercalls 1150 */ 1151 sc->vmbus_msg_hc = vmbus_msghc_ctx_create( 1152 bus_get_dma_tag(sc->vmbus_dev)); 1153 if (sc->vmbus_msg_hc == NULL) { 1154 ret = ENXIO; 1155 goto cleanup; 1156 } 1157 1158 /* 1159 * Allocate DMA stuffs. 1160 */ 1161 ret = vmbus_dma_alloc(sc); 1162 if (ret != 0) 1163 goto cleanup; 1164 1165 /* 1166 * Setup interrupt. 1167 */ 1168 ret = vmbus_intr_setup(sc); 1169 if (ret != 0) 1170 goto cleanup; 1171 1172 /* 1173 * Setup SynIC. 1174 */ 1175 if (bootverbose) 1176 device_printf(sc->vmbus_dev, "smp_started = %d\n", smp_started); 1177 smp_rendezvous(NULL, vmbus_synic_setup, NULL, sc); 1178 sc->vmbus_flags |= VMBUS_FLAG_SYNIC; 1179 1180 /* 1181 * Initialize vmbus, e.g. connect to Hypervisor. 1182 */ 1183 ret = vmbus_init(sc); 1184 if (ret != 0) 1185 goto cleanup; 1186 1187 if (sc->vmbus_version == VMBUS_VERSION_WS2008 || 1188 sc->vmbus_version == VMBUS_VERSION_WIN7) 1189 sc->vmbus_event_proc = vmbus_event_proc_compat; 1190 else 1191 sc->vmbus_event_proc = vmbus_event_proc; 1192 1193 ret = vmbus_scan(sc); 1194 if (ret != 0) 1195 goto cleanup; 1196 1197 ctx = device_get_sysctl_ctx(sc->vmbus_dev); 1198 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vmbus_dev)); 1199 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "version", 1200 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, 1201 vmbus_sysctl_version, "A", "vmbus version"); 1202 1203 return (ret); 1204 1205 cleanup: 1206 vmbus_intr_teardown(sc); 1207 vmbus_dma_free(sc); 1208 if (sc->vmbus_msg_hc != NULL) { 1209 vmbus_msghc_ctx_destroy(sc->vmbus_msg_hc); 1210 sc->vmbus_msg_hc = NULL; 1211 } 1212 free(sc->vmbus_chmap, M_DEVBUF); 1213 mtx_destroy(&sc->vmbus_scan_lock); 1214 mtx_destroy(&sc->vmbus_chlist_lock); 1215 1216 return (ret); 1217 } 1218 1219 static void 1220 vmbus_event_proc_dummy(struct vmbus_softc *sc __unused, int cpu __unused) 1221 { 1222 } 1223 1224 static int 1225 vmbus_attach(device_t dev) 1226 { 1227 vmbus_sc = device_get_softc(dev); 1228 vmbus_sc->vmbus_dev = dev; 1229 vmbus_sc->vmbus_idtvec = -1; 1230 1231 /* 1232 * Event processing logic will be configured: 1233 * - After the vmbus protocol version negotiation. 1234 * - Before we request channel offers. 1235 */ 1236 vmbus_sc->vmbus_event_proc = vmbus_event_proc_dummy; 1237 1238 #ifndef EARLY_AP_STARTUP 1239 /* 1240 * If the system has already booted and thread 1241 * scheduling is possible indicated by the global 1242 * cold set to zero, we just call the driver 1243 * initialization directly. 1244 */ 1245 if (!cold) 1246 #endif 1247 vmbus_doattach(vmbus_sc); 1248 1249 return (0); 1250 } 1251 1252 static void 1253 vmbus_sysinit(void *arg __unused) 1254 { 1255 struct vmbus_softc *sc = vmbus_get_softc(); 1256 1257 if (vm_guest != VM_GUEST_HV || sc == NULL) 1258 return; 1259 1260 #ifndef EARLY_AP_STARTUP 1261 /* 1262 * If the system has already booted and thread 1263 * scheduling is possible, as indicated by the 1264 * global cold set to zero, we just call the driver 1265 * initialization directly. 1266 */ 1267 if (!cold) 1268 #endif 1269 vmbus_doattach(sc); 1270 } 1271 1272 static int 1273 vmbus_detach(device_t dev) 1274 { 1275 struct vmbus_softc *sc = device_get_softc(dev); 1276 1277 hv_vmbus_release_unattached_channels(sc); 1278 1279 vmbus_disconnect(sc); 1280 1281 if (sc->vmbus_flags & VMBUS_FLAG_SYNIC) { 1282 sc->vmbus_flags &= ~VMBUS_FLAG_SYNIC; 1283 smp_rendezvous(NULL, vmbus_synic_teardown, NULL, NULL); 1284 } 1285 1286 vmbus_intr_teardown(sc); 1287 vmbus_dma_free(sc); 1288 1289 if (sc->vmbus_msg_hc != NULL) { 1290 vmbus_msghc_ctx_destroy(sc->vmbus_msg_hc); 1291 sc->vmbus_msg_hc = NULL; 1292 } 1293 1294 free(sc->vmbus_chmap, M_DEVBUF); 1295 mtx_destroy(&sc->vmbus_scan_lock); 1296 mtx_destroy(&sc->vmbus_chlist_lock); 1297 1298 return (0); 1299 } 1300 1301 static device_method_t vmbus_methods[] = { 1302 /* Device interface */ 1303 DEVMETHOD(device_probe, vmbus_probe), 1304 DEVMETHOD(device_attach, vmbus_attach), 1305 DEVMETHOD(device_detach, vmbus_detach), 1306 DEVMETHOD(device_shutdown, bus_generic_shutdown), 1307 DEVMETHOD(device_suspend, bus_generic_suspend), 1308 DEVMETHOD(device_resume, bus_generic_resume), 1309 1310 /* Bus interface */ 1311 DEVMETHOD(bus_add_child, bus_generic_add_child), 1312 DEVMETHOD(bus_print_child, bus_generic_print_child), 1313 DEVMETHOD(bus_read_ivar, vmbus_read_ivar), 1314 DEVMETHOD(bus_write_ivar, vmbus_write_ivar), 1315 DEVMETHOD(bus_child_pnpinfo_str, vmbus_child_pnpinfo_str), 1316 1317 /* Vmbus interface */ 1318 DEVMETHOD(vmbus_get_version, vmbus_get_version_method), 1319 DEVMETHOD(vmbus_probe_guid, vmbus_probe_guid_method), 1320 1321 DEVMETHOD_END 1322 }; 1323 1324 static driver_t vmbus_driver = { 1325 "vmbus", 1326 vmbus_methods, 1327 sizeof(struct vmbus_softc) 1328 }; 1329 1330 static devclass_t vmbus_devclass; 1331 1332 DRIVER_MODULE(vmbus, acpi, vmbus_driver, vmbus_devclass, NULL, NULL); 1333 MODULE_DEPEND(vmbus, acpi, 1, 1, 1); 1334 MODULE_VERSION(vmbus, 1); 1335 1336 #ifndef EARLY_AP_STARTUP 1337 /* 1338 * NOTE: 1339 * We have to start as the last step of SI_SUB_SMP, i.e. after SMP is 1340 * initialized. 1341 */ 1342 SYSINIT(vmbus_initialize, SI_SUB_SMP, SI_ORDER_ANY, vmbus_sysinit, NULL); 1343 #endif 1344