1 /*- 2 * Copyright (c) 2009-2012,2016 Microsoft Corp. 3 * Copyright (c) 2012 NetApp Inc. 4 * Copyright (c) 2012 Citrix Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * VM Bus Driver Implementation 31 */ 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/bus.h> 37 #include <sys/kernel.h> 38 #include <sys/lock.h> 39 #include <sys/malloc.h> 40 #include <sys/module.h> 41 #include <sys/proc.h> 42 #include <sys/sysctl.h> 43 #include <sys/syslog.h> 44 #include <sys/systm.h> 45 #include <sys/rtprio.h> 46 #include <sys/interrupt.h> 47 #include <sys/sx.h> 48 #include <sys/taskqueue.h> 49 #include <sys/mutex.h> 50 #include <sys/smp.h> 51 52 #include <machine/resource.h> 53 #include <sys/rman.h> 54 55 #include <machine/stdarg.h> 56 #include <machine/intr_machdep.h> 57 #include <machine/md_var.h> 58 #include <machine/segments.h> 59 #include <sys/pcpu.h> 60 #include <x86/apicvar.h> 61 62 #include <dev/hyperv/include/hyperv.h> 63 #include <dev/hyperv/vmbus/hv_vmbus_priv.h> 64 #include <dev/hyperv/vmbus/hyperv_reg.h> 65 #include <dev/hyperv/vmbus/hyperv_var.h> 66 #include <dev/hyperv/vmbus/vmbus_reg.h> 67 #include <dev/hyperv/vmbus/vmbus_var.h> 68 69 #include <contrib/dev/acpica/include/acpi.h> 70 #include "acpi_if.h" 71 72 struct vmbus_softc *vmbus_sc; 73 74 extern inthand_t IDTVEC(vmbus_isr); 75 76 static void 77 vmbus_msg_task(void *xsc, int pending __unused) 78 { 79 struct vmbus_softc *sc = xsc; 80 volatile struct vmbus_message *msg; 81 82 msg = VMBUS_PCPU_GET(sc, message, curcpu) + VMBUS_SINT_MESSAGE; 83 for (;;) { 84 const hv_vmbus_channel_msg_table_entry *entry; 85 hv_vmbus_channel_msg_header *hdr; 86 hv_vmbus_channel_msg_type msg_type; 87 88 if (msg->msg_type == VMBUS_MSGTYPE_NONE) 89 break; /* no message */ 90 91 /* XXX: update messageHandler interface */ 92 hdr = __DEVOLATILE(hv_vmbus_channel_msg_header *, 93 msg->msg_data); 94 msg_type = hdr->message_type; 95 96 if (msg_type >= HV_CHANNEL_MESSAGE_COUNT) { 97 printf("VMBUS: unknown message type = %d\n", msg_type); 98 goto handled; 99 } 100 101 entry = &g_channel_message_table[msg_type]; 102 if (entry->messageHandler) 103 entry->messageHandler(hdr); 104 handled: 105 msg->msg_type = VMBUS_MSGTYPE_NONE; 106 /* 107 * Make sure the write to msg_type (i.e. set to 108 * VMBUS_MSGTYPE_NONE) happens before we read the 109 * msg_flags and EOMing. Otherwise, the EOMing will 110 * not deliver any more messages since there is no 111 * empty slot 112 * 113 * NOTE: 114 * mb() is used here, since atomic_thread_fence_seq_cst() 115 * will become compiler fence on UP kernel. 116 */ 117 mb(); 118 if (msg->msg_flags & VMBUS_MSGFLAG_PENDING) { 119 /* 120 * This will cause message queue rescan to possibly 121 * deliver another msg from the hypervisor 122 */ 123 wrmsr(MSR_HV_EOM, 0); 124 } 125 } 126 } 127 128 static __inline int 129 vmbus_handle_intr1(struct vmbus_softc *sc, struct trapframe *frame, int cpu) 130 { 131 volatile struct vmbus_message *msg; 132 struct vmbus_message *msg_base; 133 134 msg_base = VMBUS_PCPU_GET(sc, message, cpu); 135 136 /* 137 * Check event timer. 138 * 139 * TODO: move this to independent IDT vector. 140 */ 141 msg = msg_base + VMBUS_SINT_TIMER; 142 if (msg->msg_type == VMBUS_MSGTYPE_TIMER_EXPIRED) { 143 msg->msg_type = VMBUS_MSGTYPE_NONE; 144 145 vmbus_et_intr(frame); 146 147 /* 148 * Make sure the write to msg_type (i.e. set to 149 * VMBUS_MSGTYPE_NONE) happens before we read the 150 * msg_flags and EOMing. Otherwise, the EOMing will 151 * not deliver any more messages since there is no 152 * empty slot 153 * 154 * NOTE: 155 * mb() is used here, since atomic_thread_fence_seq_cst() 156 * will become compiler fence on UP kernel. 157 */ 158 mb(); 159 if (msg->msg_flags & VMBUS_MSGFLAG_PENDING) { 160 /* 161 * This will cause message queue rescan to possibly 162 * deliver another msg from the hypervisor 163 */ 164 wrmsr(MSR_HV_EOM, 0); 165 } 166 } 167 168 /* 169 * Check events. Hot path for network and storage I/O data; high rate. 170 * 171 * NOTE: 172 * As recommended by the Windows guest fellows, we check events before 173 * checking messages. 174 */ 175 sc->vmbus_event_proc(sc, cpu); 176 177 /* 178 * Check messages. Mainly management stuffs; ultra low rate. 179 */ 180 msg = msg_base + VMBUS_SINT_MESSAGE; 181 if (__predict_false(msg->msg_type != VMBUS_MSGTYPE_NONE)) { 182 taskqueue_enqueue(VMBUS_PCPU_GET(sc, message_tq, cpu), 183 VMBUS_PCPU_PTR(sc, message_task, cpu)); 184 } 185 186 return (FILTER_HANDLED); 187 } 188 189 void 190 vmbus_handle_intr(struct trapframe *trap_frame) 191 { 192 struct vmbus_softc *sc = vmbus_get_softc(); 193 int cpu = curcpu; 194 195 /* 196 * Disable preemption. 197 */ 198 critical_enter(); 199 200 /* 201 * Do a little interrupt counting. 202 */ 203 (*VMBUS_PCPU_GET(sc, intr_cnt, cpu))++; 204 205 vmbus_handle_intr1(sc, trap_frame, cpu); 206 207 /* 208 * Enable preemption. 209 */ 210 critical_exit(); 211 } 212 213 static void 214 vmbus_synic_setup(void *xsc) 215 { 216 struct vmbus_softc *sc = xsc; 217 int cpu = curcpu; 218 uint64_t val, orig; 219 uint32_t sint; 220 221 if (hyperv_features & CPUID_HV_MSR_VP_INDEX) { 222 /* 223 * Save virtual processor id. 224 */ 225 VMBUS_PCPU_GET(sc, vcpuid, cpu) = rdmsr(MSR_HV_VP_INDEX); 226 } else { 227 /* 228 * XXX 229 * Virtual processoor id is only used by a pretty broken 230 * channel selection code from storvsc. It's nothing 231 * critical even if CPUID_HV_MSR_VP_INDEX is not set; keep 232 * moving on. 233 */ 234 VMBUS_PCPU_GET(sc, vcpuid, cpu) = cpu; 235 } 236 237 /* 238 * Setup the SynIC message. 239 */ 240 orig = rdmsr(MSR_HV_SIMP); 241 val = MSR_HV_SIMP_ENABLE | (orig & MSR_HV_SIMP_RSVD_MASK) | 242 ((VMBUS_PCPU_GET(sc, message_dma.hv_paddr, cpu) >> PAGE_SHIFT) << 243 MSR_HV_SIMP_PGSHIFT); 244 wrmsr(MSR_HV_SIMP, val); 245 246 /* 247 * Setup the SynIC event flags. 248 */ 249 orig = rdmsr(MSR_HV_SIEFP); 250 val = MSR_HV_SIEFP_ENABLE | (orig & MSR_HV_SIEFP_RSVD_MASK) | 251 ((VMBUS_PCPU_GET(sc, event_flags_dma.hv_paddr, cpu) 252 >> PAGE_SHIFT) << MSR_HV_SIEFP_PGSHIFT); 253 wrmsr(MSR_HV_SIEFP, val); 254 255 256 /* 257 * Configure and unmask SINT for message and event flags. 258 */ 259 sint = MSR_HV_SINT0 + VMBUS_SINT_MESSAGE; 260 orig = rdmsr(sint); 261 val = sc->vmbus_idtvec | MSR_HV_SINT_AUTOEOI | 262 (orig & MSR_HV_SINT_RSVD_MASK); 263 wrmsr(sint, val); 264 265 /* 266 * Configure and unmask SINT for timer. 267 */ 268 sint = MSR_HV_SINT0 + VMBUS_SINT_TIMER; 269 orig = rdmsr(sint); 270 val = sc->vmbus_idtvec | MSR_HV_SINT_AUTOEOI | 271 (orig & MSR_HV_SINT_RSVD_MASK); 272 wrmsr(sint, val); 273 274 /* 275 * All done; enable SynIC. 276 */ 277 orig = rdmsr(MSR_HV_SCONTROL); 278 val = MSR_HV_SCTRL_ENABLE | (orig & MSR_HV_SCTRL_RSVD_MASK); 279 wrmsr(MSR_HV_SCONTROL, val); 280 } 281 282 static void 283 vmbus_synic_teardown(void *arg) 284 { 285 uint64_t orig; 286 uint32_t sint; 287 288 /* 289 * Disable SynIC. 290 */ 291 orig = rdmsr(MSR_HV_SCONTROL); 292 wrmsr(MSR_HV_SCONTROL, (orig & MSR_HV_SCTRL_RSVD_MASK)); 293 294 /* 295 * Mask message and event flags SINT. 296 */ 297 sint = MSR_HV_SINT0 + VMBUS_SINT_MESSAGE; 298 orig = rdmsr(sint); 299 wrmsr(sint, orig | MSR_HV_SINT_MASKED); 300 301 /* 302 * Mask timer SINT. 303 */ 304 sint = MSR_HV_SINT0 + VMBUS_SINT_TIMER; 305 orig = rdmsr(sint); 306 wrmsr(sint, orig | MSR_HV_SINT_MASKED); 307 308 /* 309 * Teardown SynIC message. 310 */ 311 orig = rdmsr(MSR_HV_SIMP); 312 wrmsr(MSR_HV_SIMP, (orig & MSR_HV_SIMP_RSVD_MASK)); 313 314 /* 315 * Teardown SynIC event flags. 316 */ 317 orig = rdmsr(MSR_HV_SIEFP); 318 wrmsr(MSR_HV_SIEFP, (orig & MSR_HV_SIEFP_RSVD_MASK)); 319 } 320 321 static int 322 vmbus_dma_alloc(struct vmbus_softc *sc) 323 { 324 int cpu; 325 326 CPU_FOREACH(cpu) { 327 void *ptr; 328 329 /* 330 * Per-cpu messages and event flags. 331 */ 332 ptr = hyperv_dmamem_alloc(bus_get_dma_tag(sc->vmbus_dev), 333 PAGE_SIZE, 0, PAGE_SIZE, 334 VMBUS_PCPU_PTR(sc, message_dma, cpu), 335 BUS_DMA_WAITOK | BUS_DMA_ZERO); 336 if (ptr == NULL) 337 return ENOMEM; 338 VMBUS_PCPU_GET(sc, message, cpu) = ptr; 339 340 ptr = hyperv_dmamem_alloc(bus_get_dma_tag(sc->vmbus_dev), 341 PAGE_SIZE, 0, PAGE_SIZE, 342 VMBUS_PCPU_PTR(sc, event_flags_dma, cpu), 343 BUS_DMA_WAITOK | BUS_DMA_ZERO); 344 if (ptr == NULL) 345 return ENOMEM; 346 VMBUS_PCPU_GET(sc, event_flags, cpu) = ptr; 347 } 348 return 0; 349 } 350 351 static void 352 vmbus_dma_free(struct vmbus_softc *sc) 353 { 354 int cpu; 355 356 CPU_FOREACH(cpu) { 357 if (VMBUS_PCPU_GET(sc, message, cpu) != NULL) { 358 hyperv_dmamem_free( 359 VMBUS_PCPU_PTR(sc, message_dma, cpu), 360 VMBUS_PCPU_GET(sc, message, cpu)); 361 VMBUS_PCPU_GET(sc, message, cpu) = NULL; 362 } 363 if (VMBUS_PCPU_GET(sc, event_flags, cpu) != NULL) { 364 hyperv_dmamem_free( 365 VMBUS_PCPU_PTR(sc, event_flags_dma, cpu), 366 VMBUS_PCPU_GET(sc, event_flags, cpu)); 367 VMBUS_PCPU_GET(sc, event_flags, cpu) = NULL; 368 } 369 } 370 } 371 372 static int 373 vmbus_intr_setup(struct vmbus_softc *sc) 374 { 375 int cpu; 376 377 CPU_FOREACH(cpu) { 378 char buf[MAXCOMLEN + 1]; 379 cpuset_t cpu_mask; 380 381 /* Allocate an interrupt counter for Hyper-V interrupt */ 382 snprintf(buf, sizeof(buf), "cpu%d:hyperv", cpu); 383 intrcnt_add(buf, VMBUS_PCPU_PTR(sc, intr_cnt, cpu)); 384 385 /* 386 * Setup taskqueue to handle events. Task will be per- 387 * channel. 388 */ 389 VMBUS_PCPU_GET(sc, event_tq, cpu) = taskqueue_create_fast( 390 "hyperv event", M_WAITOK, taskqueue_thread_enqueue, 391 VMBUS_PCPU_PTR(sc, event_tq, cpu)); 392 CPU_SETOF(cpu, &cpu_mask); 393 taskqueue_start_threads_cpuset( 394 VMBUS_PCPU_PTR(sc, event_tq, cpu), 1, PI_NET, &cpu_mask, 395 "hvevent%d", cpu); 396 397 /* 398 * Setup tasks and taskqueues to handle messages. 399 */ 400 VMBUS_PCPU_GET(sc, message_tq, cpu) = taskqueue_create_fast( 401 "hyperv msg", M_WAITOK, taskqueue_thread_enqueue, 402 VMBUS_PCPU_PTR(sc, message_tq, cpu)); 403 CPU_SETOF(cpu, &cpu_mask); 404 taskqueue_start_threads_cpuset( 405 VMBUS_PCPU_PTR(sc, message_tq, cpu), 1, PI_NET, &cpu_mask, 406 "hvmsg%d", cpu); 407 TASK_INIT(VMBUS_PCPU_PTR(sc, message_task, cpu), 0, 408 vmbus_msg_task, sc); 409 } 410 411 /* 412 * All Hyper-V ISR required resources are setup, now let's find a 413 * free IDT vector for Hyper-V ISR and set it up. 414 */ 415 sc->vmbus_idtvec = lapic_ipi_alloc(IDTVEC(vmbus_isr)); 416 if (sc->vmbus_idtvec < 0) { 417 device_printf(sc->vmbus_dev, "cannot find free IDT vector\n"); 418 return ENXIO; 419 } 420 if(bootverbose) { 421 device_printf(sc->vmbus_dev, "vmbus IDT vector %d\n", 422 sc->vmbus_idtvec); 423 } 424 return 0; 425 } 426 427 static void 428 vmbus_intr_teardown(struct vmbus_softc *sc) 429 { 430 int cpu; 431 432 if (sc->vmbus_idtvec >= 0) { 433 lapic_ipi_free(sc->vmbus_idtvec); 434 sc->vmbus_idtvec = -1; 435 } 436 437 CPU_FOREACH(cpu) { 438 if (VMBUS_PCPU_GET(sc, event_tq, cpu) != NULL) { 439 taskqueue_free(VMBUS_PCPU_GET(sc, event_tq, cpu)); 440 VMBUS_PCPU_GET(sc, event_tq, cpu) = NULL; 441 } 442 if (VMBUS_PCPU_GET(sc, message_tq, cpu) != NULL) { 443 taskqueue_drain(VMBUS_PCPU_GET(sc, message_tq, cpu), 444 VMBUS_PCPU_PTR(sc, message_task, cpu)); 445 taskqueue_free(VMBUS_PCPU_GET(sc, message_tq, cpu)); 446 VMBUS_PCPU_GET(sc, message_tq, cpu) = NULL; 447 } 448 } 449 } 450 451 static int 452 vmbus_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) 453 { 454 struct hv_device *child_dev_ctx = device_get_ivars(child); 455 456 switch (index) { 457 case HV_VMBUS_IVAR_TYPE: 458 *result = (uintptr_t)&child_dev_ctx->class_id; 459 return (0); 460 461 case HV_VMBUS_IVAR_INSTANCE: 462 *result = (uintptr_t)&child_dev_ctx->device_id; 463 return (0); 464 465 case HV_VMBUS_IVAR_DEVCTX: 466 *result = (uintptr_t)child_dev_ctx; 467 return (0); 468 469 case HV_VMBUS_IVAR_NODE: 470 *result = (uintptr_t)child_dev_ctx->device; 471 return (0); 472 } 473 return (ENOENT); 474 } 475 476 static int 477 vmbus_write_ivar(device_t dev, device_t child, int index, uintptr_t value) 478 { 479 switch (index) { 480 case HV_VMBUS_IVAR_TYPE: 481 case HV_VMBUS_IVAR_INSTANCE: 482 case HV_VMBUS_IVAR_DEVCTX: 483 case HV_VMBUS_IVAR_NODE: 484 /* read-only */ 485 return (EINVAL); 486 } 487 return (ENOENT); 488 } 489 490 static int 491 vmbus_child_pnpinfo_str(device_t dev, device_t child, char *buf, size_t buflen) 492 { 493 struct hv_device *dev_ctx = device_get_ivars(child); 494 char guidbuf[HYPERV_GUID_STRLEN]; 495 496 if (dev_ctx == NULL) 497 return (0); 498 499 strlcat(buf, "classid=", buflen); 500 hyperv_guid2str(&dev_ctx->class_id, guidbuf, sizeof(guidbuf)); 501 strlcat(buf, guidbuf, buflen); 502 503 strlcat(buf, " deviceid=", buflen); 504 hyperv_guid2str(&dev_ctx->device_id, guidbuf, sizeof(guidbuf)); 505 strlcat(buf, guidbuf, buflen); 506 507 return (0); 508 } 509 510 struct hv_device * 511 hv_vmbus_child_device_create(hv_guid type, hv_guid instance, 512 hv_vmbus_channel *channel) 513 { 514 hv_device *child_dev; 515 516 /* 517 * Allocate the new child device 518 */ 519 child_dev = malloc(sizeof(hv_device), M_DEVBUF, M_WAITOK | M_ZERO); 520 521 child_dev->channel = channel; 522 memcpy(&child_dev->class_id, &type, sizeof(hv_guid)); 523 memcpy(&child_dev->device_id, &instance, sizeof(hv_guid)); 524 525 return (child_dev); 526 } 527 528 int 529 hv_vmbus_child_device_register(struct hv_device *child_dev) 530 { 531 device_t child, parent; 532 533 parent = vmbus_get_device(); 534 if (bootverbose) { 535 char name[HYPERV_GUID_STRLEN]; 536 537 hyperv_guid2str(&child_dev->class_id, name, sizeof(name)); 538 device_printf(parent, "add device, classid: %s\n", name); 539 } 540 541 child = device_add_child(parent, NULL, -1); 542 child_dev->device = child; 543 device_set_ivars(child, child_dev); 544 545 return (0); 546 } 547 548 int 549 hv_vmbus_child_device_unregister(struct hv_device *child_dev) 550 { 551 int ret = 0; 552 /* 553 * XXXKYS: Ensure that this is the opposite of 554 * device_add_child() 555 */ 556 mtx_lock(&Giant); 557 ret = device_delete_child(vmbus_get_device(), child_dev->device); 558 mtx_unlock(&Giant); 559 return(ret); 560 } 561 562 static int 563 vmbus_probe(device_t dev) 564 { 565 char *id[] = { "VMBUS", NULL }; 566 567 if (ACPI_ID_PROBE(device_get_parent(dev), dev, id) == NULL || 568 device_get_unit(dev) != 0 || vm_guest != VM_GUEST_HV || 569 (hyperv_features & CPUID_HV_MSR_SYNIC) == 0) 570 return (ENXIO); 571 572 device_set_desc(dev, "Hyper-V Vmbus"); 573 574 return (BUS_PROBE_DEFAULT); 575 } 576 577 /** 578 * @brief Main vmbus driver initialization routine. 579 * 580 * Here, we 581 * - initialize the vmbus driver context 582 * - setup various driver entry points 583 * - invoke the vmbus hv main init routine 584 * - get the irq resource 585 * - invoke the vmbus to add the vmbus root device 586 * - setup the vmbus root device 587 * - retrieve the channel offers 588 */ 589 static int 590 vmbus_bus_init(void) 591 { 592 struct vmbus_softc *sc = vmbus_get_softc(); 593 int ret; 594 595 if (sc->vmbus_flags & VMBUS_FLAG_ATTACHED) 596 return (0); 597 sc->vmbus_flags |= VMBUS_FLAG_ATTACHED; 598 599 /* 600 * Allocate DMA stuffs. 601 */ 602 ret = vmbus_dma_alloc(sc); 603 if (ret != 0) 604 goto cleanup; 605 606 /* 607 * Setup interrupt. 608 */ 609 ret = vmbus_intr_setup(sc); 610 if (ret != 0) 611 goto cleanup; 612 613 /* 614 * Setup SynIC. 615 */ 616 if (bootverbose) 617 device_printf(sc->vmbus_dev, "smp_started = %d\n", smp_started); 618 smp_rendezvous(NULL, vmbus_synic_setup, NULL, sc); 619 sc->vmbus_flags |= VMBUS_FLAG_SYNIC; 620 621 /* 622 * Connect to VMBus in the root partition 623 */ 624 ret = hv_vmbus_connect(); 625 626 if (ret != 0) 627 goto cleanup; 628 629 if (hv_vmbus_protocal_version == HV_VMBUS_VERSION_WS2008 || 630 hv_vmbus_protocal_version == HV_VMBUS_VERSION_WIN7) 631 sc->vmbus_event_proc = vmbus_event_proc_compat; 632 else 633 sc->vmbus_event_proc = vmbus_event_proc; 634 635 hv_vmbus_request_channel_offers(); 636 637 vmbus_scan(); 638 bus_generic_attach(sc->vmbus_dev); 639 device_printf(sc->vmbus_dev, "device scan, probe and attach done\n"); 640 641 return (ret); 642 643 cleanup: 644 vmbus_intr_teardown(sc); 645 vmbus_dma_free(sc); 646 647 return (ret); 648 } 649 650 static void 651 vmbus_event_proc_dummy(struct vmbus_softc *sc __unused, int cpu __unused) 652 { 653 } 654 655 static int 656 vmbus_attach(device_t dev) 657 { 658 vmbus_sc = device_get_softc(dev); 659 vmbus_sc->vmbus_dev = dev; 660 vmbus_sc->vmbus_idtvec = -1; 661 662 /* 663 * Event processing logic will be configured: 664 * - After the vmbus protocol version negotiation. 665 * - Before we request channel offers. 666 */ 667 vmbus_sc->vmbus_event_proc = vmbus_event_proc_dummy; 668 669 #ifndef EARLY_AP_STARTUP 670 /* 671 * If the system has already booted and thread 672 * scheduling is possible indicated by the global 673 * cold set to zero, we just call the driver 674 * initialization directly. 675 */ 676 if (!cold) 677 #endif 678 vmbus_bus_init(); 679 680 bus_generic_probe(dev); 681 return (0); 682 } 683 684 static void 685 vmbus_sysinit(void *arg __unused) 686 { 687 if (vm_guest != VM_GUEST_HV || vmbus_get_softc() == NULL) 688 return; 689 690 #ifndef EARLY_AP_STARTUP 691 /* 692 * If the system has already booted and thread 693 * scheduling is possible, as indicated by the 694 * global cold set to zero, we just call the driver 695 * initialization directly. 696 */ 697 if (!cold) 698 #endif 699 vmbus_bus_init(); 700 } 701 702 static int 703 vmbus_detach(device_t dev) 704 { 705 struct vmbus_softc *sc = device_get_softc(dev); 706 707 hv_vmbus_release_unattached_channels(); 708 hv_vmbus_disconnect(); 709 710 if (sc->vmbus_flags & VMBUS_FLAG_SYNIC) { 711 sc->vmbus_flags &= ~VMBUS_FLAG_SYNIC; 712 smp_rendezvous(NULL, vmbus_synic_teardown, NULL, NULL); 713 } 714 715 vmbus_intr_teardown(sc); 716 vmbus_dma_free(sc); 717 718 return (0); 719 } 720 721 static device_method_t vmbus_methods[] = { 722 /* Device interface */ 723 DEVMETHOD(device_probe, vmbus_probe), 724 DEVMETHOD(device_attach, vmbus_attach), 725 DEVMETHOD(device_detach, vmbus_detach), 726 DEVMETHOD(device_shutdown, bus_generic_shutdown), 727 DEVMETHOD(device_suspend, bus_generic_suspend), 728 DEVMETHOD(device_resume, bus_generic_resume), 729 730 /* Bus interface */ 731 DEVMETHOD(bus_add_child, bus_generic_add_child), 732 DEVMETHOD(bus_print_child, bus_generic_print_child), 733 DEVMETHOD(bus_read_ivar, vmbus_read_ivar), 734 DEVMETHOD(bus_write_ivar, vmbus_write_ivar), 735 DEVMETHOD(bus_child_pnpinfo_str, vmbus_child_pnpinfo_str), 736 737 DEVMETHOD_END 738 }; 739 740 static driver_t vmbus_driver = { 741 "vmbus", 742 vmbus_methods, 743 sizeof(struct vmbus_softc) 744 }; 745 746 static devclass_t vmbus_devclass; 747 748 DRIVER_MODULE(vmbus, acpi, vmbus_driver, vmbus_devclass, NULL, NULL); 749 MODULE_DEPEND(vmbus, acpi, 1, 1, 1); 750 MODULE_VERSION(vmbus, 1); 751 752 #ifndef EARLY_AP_STARTUP 753 /* 754 * NOTE: 755 * We have to start as the last step of SI_SUB_SMP, i.e. after SMP is 756 * initialized. 757 */ 758 SYSINIT(vmbus_initialize, SI_SUB_SMP, SI_ORDER_ANY, vmbus_sysinit, NULL); 759 #endif 760