1 /*- 2 * Copyright (c) 2009-2012,2016-2017 Microsoft Corp. 3 * Copyright (c) 2012 NetApp Inc. 4 * Copyright (c) 2012 Citrix Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * VM Bus Driver Implementation 31 */ 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/bus.h> 37 #include <sys/kernel.h> 38 #include <sys/linker.h> 39 #include <sys/lock.h> 40 #include <sys/malloc.h> 41 #include <sys/module.h> 42 #include <sys/mutex.h> 43 #include <sys/sbuf.h> 44 #include <sys/smp.h> 45 #include <sys/sysctl.h> 46 #include <sys/systm.h> 47 #include <sys/taskqueue.h> 48 49 #include <vm/vm.h> 50 #include <vm/vm_param.h> 51 #include <vm/pmap.h> 52 53 #include <machine/bus.h> 54 #if defined(__aarch64__) 55 #include <dev/psci/smccc.h> 56 #include <dev/hyperv/vmbus/aarch64/hyperv_machdep.h> 57 #include <dev/hyperv/vmbus/aarch64/hyperv_reg.h> 58 #else 59 #include <dev/hyperv/vmbus/x86/hyperv_machdep.h> 60 #include <dev/hyperv/vmbus/x86/hyperv_reg.h> 61 #include <machine/intr_machdep.h> 62 #include <x86/include/apicvar.h> 63 #endif 64 #include <machine/metadata.h> 65 #include <machine/md_var.h> 66 #include <machine/resource.h> 67 #include <contrib/dev/acpica/include/acpi.h> 68 #include <dev/acpica/acpivar.h> 69 70 #include <dev/hyperv/include/hyperv.h> 71 #include <dev/hyperv/include/vmbus_xact.h> 72 #include <dev/hyperv/vmbus/hyperv_var.h> 73 #include <dev/hyperv/vmbus/vmbus_reg.h> 74 #include <dev/hyperv/vmbus/vmbus_var.h> 75 #include <dev/hyperv/vmbus/vmbus_chanvar.h> 76 #include <dev/hyperv/vmbus/hyperv_common_reg.h> 77 #include "acpi_if.h" 78 #include "pcib_if.h" 79 #include "vmbus_if.h" 80 81 #define VMBUS_GPADL_START 0xe1e10 82 83 struct vmbus_msghc { 84 struct vmbus_xact *mh_xact; 85 struct hypercall_postmsg_in mh_inprm_save; 86 }; 87 88 static void vmbus_identify(driver_t *, device_t); 89 static int vmbus_probe(device_t); 90 static int vmbus_attach(device_t); 91 static int vmbus_detach(device_t); 92 static int vmbus_read_ivar(device_t, device_t, int, 93 uintptr_t *); 94 static int vmbus_child_pnpinfo(device_t, device_t, struct sbuf *); 95 static struct resource *vmbus_alloc_resource(device_t dev, 96 device_t child, int type, int *rid, 97 rman_res_t start, rman_res_t end, 98 rman_res_t count, u_int flags); 99 static int vmbus_alloc_msi(device_t bus, device_t dev, 100 int count, int maxcount, int *irqs); 101 static int vmbus_release_msi(device_t bus, device_t dev, 102 int count, int *irqs); 103 static int vmbus_alloc_msix(device_t bus, device_t dev, 104 int *irq); 105 static int vmbus_release_msix(device_t bus, device_t dev, 106 int irq); 107 static int vmbus_map_msi(device_t bus, device_t dev, 108 int irq, uint64_t *addr, uint32_t *data); 109 static uint32_t vmbus_get_version_method(device_t, device_t); 110 static int vmbus_probe_guid_method(device_t, device_t, 111 const struct hyperv_guid *); 112 static uint32_t vmbus_get_vcpu_id_method(device_t bus, 113 device_t dev, int cpu); 114 static struct taskqueue *vmbus_get_eventtq_method(device_t, device_t, 115 int); 116 #if defined(EARLY_AP_STARTUP) || defined(__aarch64__) 117 static void vmbus_intrhook(void *); 118 #endif 119 120 static int vmbus_init(struct vmbus_softc *); 121 static int vmbus_connect(struct vmbus_softc *, uint32_t); 122 static int vmbus_req_channels(struct vmbus_softc *sc); 123 static void vmbus_disconnect(struct vmbus_softc *); 124 static int vmbus_scan(struct vmbus_softc *); 125 static void vmbus_scan_teardown(struct vmbus_softc *); 126 static void vmbus_scan_done(struct vmbus_softc *, 127 const struct vmbus_message *); 128 static void vmbus_chanmsg_handle(struct vmbus_softc *, 129 const struct vmbus_message *); 130 static void vmbus_msg_task(void *, int); 131 static void vmbus_synic_setup(void *); 132 static void vmbus_synic_teardown(void *); 133 static int vmbus_sysctl_version(SYSCTL_HANDLER_ARGS); 134 static int vmbus_dma_alloc(struct vmbus_softc *); 135 static void vmbus_dma_free(struct vmbus_softc *); 136 static int vmbus_intr_setup(struct vmbus_softc *); 137 static void vmbus_intr_teardown(struct vmbus_softc *); 138 static int vmbus_doattach(struct vmbus_softc *); 139 static void vmbus_event_proc_dummy(struct vmbus_softc *, 140 int); 141 static struct vmbus_softc *vmbus_sc; 142 143 SYSCTL_NODE(_hw, OID_AUTO, vmbus, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 144 "Hyper-V vmbus"); 145 146 static int vmbus_pin_evttask = 1; 147 SYSCTL_INT(_hw_vmbus, OID_AUTO, pin_evttask, CTLFLAG_RDTUN, 148 &vmbus_pin_evttask, 0, "Pin event tasks to their respective CPU"); 149 uint32_t vmbus_current_version; 150 151 static const uint32_t vmbus_version[] = { 152 VMBUS_VERSION_WIN10, 153 VMBUS_VERSION_WIN8_1, 154 VMBUS_VERSION_WIN8, 155 VMBUS_VERSION_WIN7, 156 VMBUS_VERSION_WS2008 157 }; 158 159 static const vmbus_chanmsg_proc_t 160 vmbus_chanmsg_handlers[VMBUS_CHANMSG_TYPE_MAX] = { 161 VMBUS_CHANMSG_PROC(CHOFFER_DONE, vmbus_scan_done), 162 VMBUS_CHANMSG_PROC_WAKEUP(CONNECT_RESP) 163 }; 164 165 static device_method_t vmbus_methods[] = { 166 /* Device interface */ 167 DEVMETHOD(device_identify, vmbus_identify), 168 DEVMETHOD(device_probe, vmbus_probe), 169 DEVMETHOD(device_attach, vmbus_attach), 170 DEVMETHOD(device_detach, vmbus_detach), 171 DEVMETHOD(device_shutdown, bus_generic_shutdown), 172 DEVMETHOD(device_suspend, bus_generic_suspend), 173 DEVMETHOD(device_resume, bus_generic_resume), 174 175 /* Bus interface */ 176 DEVMETHOD(bus_add_child, bus_generic_add_child), 177 DEVMETHOD(bus_print_child, bus_generic_print_child), 178 DEVMETHOD(bus_read_ivar, vmbus_read_ivar), 179 DEVMETHOD(bus_child_pnpinfo, vmbus_child_pnpinfo), 180 DEVMETHOD(bus_alloc_resource, vmbus_alloc_resource), 181 DEVMETHOD(bus_release_resource, bus_generic_release_resource), 182 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), 183 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), 184 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), 185 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), 186 DEVMETHOD(bus_get_cpus, bus_generic_get_cpus), 187 188 /* pcib interface */ 189 DEVMETHOD(pcib_alloc_msi, vmbus_alloc_msi), 190 DEVMETHOD(pcib_release_msi, vmbus_release_msi), 191 DEVMETHOD(pcib_alloc_msix, vmbus_alloc_msix), 192 DEVMETHOD(pcib_release_msix, vmbus_release_msix), 193 DEVMETHOD(pcib_map_msi, vmbus_map_msi), 194 195 /* Vmbus interface */ 196 DEVMETHOD(vmbus_get_version, vmbus_get_version_method), 197 DEVMETHOD(vmbus_probe_guid, vmbus_probe_guid_method), 198 DEVMETHOD(vmbus_get_vcpu_id, vmbus_get_vcpu_id_method), 199 DEVMETHOD(vmbus_get_event_taskq, vmbus_get_eventtq_method), 200 201 DEVMETHOD_END 202 }; 203 204 static driver_t vmbus_driver = { 205 "vmbus", 206 vmbus_methods, 207 sizeof(struct vmbus_softc) 208 }; 209 210 DRIVER_MODULE(vmbus, pcib, vmbus_driver, NULL, NULL); 211 DRIVER_MODULE(vmbus, acpi_syscontainer, vmbus_driver, NULL, NULL); 212 213 MODULE_DEPEND(vmbus, acpi, 1, 1, 1); 214 MODULE_DEPEND(vmbus, pci, 1, 1, 1); 215 MODULE_VERSION(vmbus, 1); 216 217 static __inline struct vmbus_softc * 218 vmbus_get_softc(void) 219 { 220 return vmbus_sc; 221 } 222 223 void 224 vmbus_msghc_reset(struct vmbus_msghc *mh, size_t dsize) 225 { 226 struct hypercall_postmsg_in *inprm; 227 228 if (dsize > HYPERCALL_POSTMSGIN_DSIZE_MAX) 229 panic("invalid data size %zu", dsize); 230 231 inprm = vmbus_xact_req_data(mh->mh_xact); 232 memset(inprm, 0, HYPERCALL_POSTMSGIN_SIZE); 233 inprm->hc_connid = VMBUS_CONNID_MESSAGE; 234 inprm->hc_msgtype = HYPERV_MSGTYPE_CHANNEL; 235 inprm->hc_dsize = dsize; 236 } 237 238 struct vmbus_msghc * 239 vmbus_msghc_get(struct vmbus_softc *sc, size_t dsize) 240 { 241 struct vmbus_msghc *mh; 242 struct vmbus_xact *xact; 243 244 if (dsize > HYPERCALL_POSTMSGIN_DSIZE_MAX) 245 panic("invalid data size %zu", dsize); 246 247 xact = vmbus_xact_get(sc->vmbus_xc, 248 dsize + __offsetof(struct hypercall_postmsg_in, hc_data[0])); 249 if (xact == NULL) 250 return (NULL); 251 252 mh = vmbus_xact_priv(xact, sizeof(*mh)); 253 mh->mh_xact = xact; 254 255 vmbus_msghc_reset(mh, dsize); 256 return (mh); 257 } 258 259 void 260 vmbus_msghc_put(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh) 261 { 262 263 vmbus_xact_put(mh->mh_xact); 264 } 265 266 void * 267 vmbus_msghc_dataptr(struct vmbus_msghc *mh) 268 { 269 struct hypercall_postmsg_in *inprm; 270 271 inprm = vmbus_xact_req_data(mh->mh_xact); 272 return (inprm->hc_data); 273 } 274 275 int 276 vmbus_msghc_exec_noresult(struct vmbus_msghc *mh) 277 { 278 sbintime_t time = SBT_1MS; 279 struct hypercall_postmsg_in *inprm; 280 bus_addr_t inprm_paddr; 281 int i; 282 283 inprm = vmbus_xact_req_data(mh->mh_xact); 284 inprm_paddr = vmbus_xact_req_paddr(mh->mh_xact); 285 286 /* 287 * Save the input parameter so that we could restore the input 288 * parameter if the Hypercall failed. 289 * 290 * XXX 291 * Is this really necessary?! i.e. Will the Hypercall ever 292 * overwrite the input parameter? 293 */ 294 memcpy(&mh->mh_inprm_save, inprm, HYPERCALL_POSTMSGIN_SIZE); 295 296 /* 297 * In order to cope with transient failures, e.g. insufficient 298 * resources on host side, we retry the post message Hypercall 299 * several times. 20 retries seem sufficient. 300 */ 301 #define HC_RETRY_MAX 20 302 303 for (i = 0; i < HC_RETRY_MAX; ++i) { 304 uint64_t status; 305 306 status = hypercall_post_message(inprm_paddr); 307 if (status == HYPERCALL_STATUS_SUCCESS) 308 return 0; 309 310 pause_sbt("hcpmsg", time, 0, C_HARDCLOCK); 311 if (time < SBT_1S * 2) 312 time *= 2; 313 314 /* Restore input parameter and try again */ 315 memcpy(inprm, &mh->mh_inprm_save, HYPERCALL_POSTMSGIN_SIZE); 316 } 317 318 #undef HC_RETRY_MAX 319 320 return EIO; 321 } 322 323 int 324 vmbus_msghc_exec(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh) 325 { 326 int error; 327 328 vmbus_xact_activate(mh->mh_xact); 329 error = vmbus_msghc_exec_noresult(mh); 330 if (error) 331 vmbus_xact_deactivate(mh->mh_xact); 332 return error; 333 } 334 335 void 336 vmbus_msghc_exec_cancel(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh) 337 { 338 339 vmbus_xact_deactivate(mh->mh_xact); 340 } 341 342 const struct vmbus_message * 343 vmbus_msghc_wait_result(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh) 344 { 345 size_t resp_len; 346 347 return (vmbus_xact_wait(mh->mh_xact, &resp_len)); 348 } 349 350 const struct vmbus_message * 351 vmbus_msghc_poll_result(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh) 352 { 353 size_t resp_len; 354 355 return (vmbus_xact_poll(mh->mh_xact, &resp_len)); 356 } 357 358 void 359 vmbus_msghc_wakeup(struct vmbus_softc *sc, const struct vmbus_message *msg) 360 { 361 362 vmbus_xact_ctx_wakeup(sc->vmbus_xc, msg, sizeof(*msg)); 363 } 364 365 uint32_t 366 vmbus_gpadl_alloc(struct vmbus_softc *sc) 367 { 368 uint32_t gpadl; 369 370 again: 371 gpadl = atomic_fetchadd_int(&sc->vmbus_gpadl, 1); 372 if (gpadl == 0) 373 goto again; 374 return (gpadl); 375 } 376 377 /* Used for Hyper-V socket when guest client connects to host */ 378 int 379 vmbus_req_tl_connect(struct hyperv_guid *guest_srv_id, 380 struct hyperv_guid *host_srv_id) 381 { 382 struct vmbus_softc *sc = vmbus_get_softc(); 383 struct vmbus_chanmsg_tl_connect *req; 384 struct vmbus_msghc *mh; 385 int error; 386 387 if (!sc) 388 return ENXIO; 389 390 mh = vmbus_msghc_get(sc, sizeof(*req)); 391 if (mh == NULL) { 392 device_printf(sc->vmbus_dev, 393 "can not get msg hypercall for tl connect\n"); 394 return ENXIO; 395 } 396 397 req = vmbus_msghc_dataptr(mh); 398 req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_TL_CONN; 399 req->guest_endpoint_id = *guest_srv_id; 400 req->host_service_id = *host_srv_id; 401 402 error = vmbus_msghc_exec_noresult(mh); 403 vmbus_msghc_put(sc, mh); 404 405 if (error) { 406 device_printf(sc->vmbus_dev, 407 "tl connect msg hypercall failed\n"); 408 } 409 410 return error; 411 } 412 413 static int 414 vmbus_connect(struct vmbus_softc *sc, uint32_t version) 415 { 416 struct vmbus_chanmsg_connect *req; 417 const struct vmbus_message *msg; 418 struct vmbus_msghc *mh; 419 int error, done = 0; 420 421 mh = vmbus_msghc_get(sc, sizeof(*req)); 422 if (mh == NULL) 423 return ENXIO; 424 425 req = vmbus_msghc_dataptr(mh); 426 req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CONNECT; 427 req->chm_ver = version; 428 req->chm_evtflags = sc->vmbus_evtflags_dma.hv_paddr; 429 req->chm_mnf1 = sc->vmbus_mnf1_dma.hv_paddr; 430 req->chm_mnf2 = sc->vmbus_mnf2_dma.hv_paddr; 431 432 error = vmbus_msghc_exec(sc, mh); 433 if (error) { 434 vmbus_msghc_put(sc, mh); 435 return error; 436 } 437 438 msg = vmbus_msghc_wait_result(sc, mh); 439 done = ((const struct vmbus_chanmsg_connect_resp *) 440 msg->msg_data)->chm_done; 441 442 vmbus_msghc_put(sc, mh); 443 444 return (done ? 0 : EOPNOTSUPP); 445 } 446 447 static int 448 vmbus_init(struct vmbus_softc *sc) 449 { 450 int i; 451 452 for (i = 0; i < nitems(vmbus_version); ++i) { 453 int error; 454 455 error = vmbus_connect(sc, vmbus_version[i]); 456 if (!error) { 457 vmbus_current_version = vmbus_version[i]; 458 sc->vmbus_version = vmbus_version[i]; 459 device_printf(sc->vmbus_dev, "version %u.%u\n", 460 VMBUS_VERSION_MAJOR(sc->vmbus_version), 461 VMBUS_VERSION_MINOR(sc->vmbus_version)); 462 return 0; 463 } 464 } 465 return ENXIO; 466 } 467 468 static void 469 vmbus_disconnect(struct vmbus_softc *sc) 470 { 471 struct vmbus_chanmsg_disconnect *req; 472 struct vmbus_msghc *mh; 473 int error; 474 475 mh = vmbus_msghc_get(sc, sizeof(*req)); 476 if (mh == NULL) { 477 device_printf(sc->vmbus_dev, 478 "can not get msg hypercall for disconnect\n"); 479 return; 480 } 481 482 req = vmbus_msghc_dataptr(mh); 483 req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_DISCONNECT; 484 485 error = vmbus_msghc_exec_noresult(mh); 486 vmbus_msghc_put(sc, mh); 487 488 if (error) { 489 device_printf(sc->vmbus_dev, 490 "disconnect msg hypercall failed\n"); 491 } 492 } 493 494 static int 495 vmbus_req_channels(struct vmbus_softc *sc) 496 { 497 struct vmbus_chanmsg_chrequest *req; 498 struct vmbus_msghc *mh; 499 int error; 500 501 mh = vmbus_msghc_get(sc, sizeof(*req)); 502 if (mh == NULL) 503 return ENXIO; 504 505 req = vmbus_msghc_dataptr(mh); 506 req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CHREQUEST; 507 508 error = vmbus_msghc_exec_noresult(mh); 509 vmbus_msghc_put(sc, mh); 510 511 return error; 512 } 513 514 static void 515 vmbus_scan_done_task(void *xsc, int pending __unused) 516 { 517 struct vmbus_softc *sc = xsc; 518 519 bus_topo_lock(); 520 sc->vmbus_scandone = true; 521 bus_topo_unlock(); 522 wakeup(&sc->vmbus_scandone); 523 } 524 525 static void 526 vmbus_scan_done(struct vmbus_softc *sc, 527 const struct vmbus_message *msg __unused) 528 { 529 530 taskqueue_enqueue(sc->vmbus_devtq, &sc->vmbus_scandone_task); 531 } 532 533 static int 534 vmbus_scan(struct vmbus_softc *sc) 535 { 536 int error; 537 538 /* 539 * Identify, probe and attach for non-channel devices. 540 */ 541 bus_generic_probe(sc->vmbus_dev); 542 bus_generic_attach(sc->vmbus_dev); 543 544 /* 545 * This taskqueue serializes vmbus devices' attach and detach 546 * for channel offer and rescind messages. 547 */ 548 sc->vmbus_devtq = taskqueue_create("vmbus dev", M_WAITOK, 549 taskqueue_thread_enqueue, &sc->vmbus_devtq); 550 taskqueue_start_threads(&sc->vmbus_devtq, 1, PI_NET, "vmbusdev"); 551 TASK_INIT(&sc->vmbus_scandone_task, 0, vmbus_scan_done_task, sc); 552 553 /* 554 * This taskqueue handles sub-channel detach, so that vmbus 555 * device's detach running in vmbus_devtq can drain its sub- 556 * channels. 557 */ 558 sc->vmbus_subchtq = taskqueue_create("vmbus subch", M_WAITOK, 559 taskqueue_thread_enqueue, &sc->vmbus_subchtq); 560 taskqueue_start_threads(&sc->vmbus_subchtq, 1, PI_NET, "vmbussch"); 561 562 /* 563 * Start vmbus scanning. 564 */ 565 error = vmbus_req_channels(sc); 566 if (error) { 567 device_printf(sc->vmbus_dev, "channel request failed: %d\n", 568 error); 569 return (error); 570 } 571 572 /* 573 * Wait for all vmbus devices from the initial channel offers to be 574 * attached. 575 */ 576 bus_topo_assert(); 577 while (!sc->vmbus_scandone) 578 mtx_sleep(&sc->vmbus_scandone, bus_topo_mtx(), 0, "vmbusdev", 0); 579 580 if (bootverbose) { 581 device_printf(sc->vmbus_dev, "device scan, probe and attach " 582 "done\n"); 583 } 584 return (0); 585 } 586 587 static void 588 vmbus_scan_teardown(struct vmbus_softc *sc) 589 { 590 591 bus_topo_assert(); 592 if (sc->vmbus_devtq != NULL) { 593 bus_topo_unlock(); 594 taskqueue_free(sc->vmbus_devtq); 595 bus_topo_lock(); 596 sc->vmbus_devtq = NULL; 597 } 598 if (sc->vmbus_subchtq != NULL) { 599 bus_topo_unlock(); 600 taskqueue_free(sc->vmbus_subchtq); 601 bus_topo_lock(); 602 sc->vmbus_subchtq = NULL; 603 } 604 } 605 606 static void 607 vmbus_chanmsg_handle(struct vmbus_softc *sc, const struct vmbus_message *msg) 608 { 609 vmbus_chanmsg_proc_t msg_proc; 610 uint32_t msg_type; 611 612 msg_type = ((const struct vmbus_chanmsg_hdr *)msg->msg_data)->chm_type; 613 if (msg_type >= VMBUS_CHANMSG_TYPE_MAX) { 614 device_printf(sc->vmbus_dev, "unknown message type 0x%x\n", 615 msg_type); 616 return; 617 } 618 619 msg_proc = vmbus_chanmsg_handlers[msg_type]; 620 if (msg_proc != NULL) 621 msg_proc(sc, msg); 622 623 /* Channel specific processing */ 624 vmbus_chan_msgproc(sc, msg); 625 } 626 627 static void 628 vmbus_msg_task(void *xsc, int pending __unused) 629 { 630 struct vmbus_softc *sc = xsc; 631 volatile struct vmbus_message *msg; 632 633 msg = VMBUS_PCPU_GET(sc, message, curcpu) + VMBUS_SINT_MESSAGE; 634 for (;;) { 635 if (msg->msg_type == HYPERV_MSGTYPE_NONE) { 636 /* No message */ 637 break; 638 } else if (msg->msg_type == HYPERV_MSGTYPE_CHANNEL) { 639 /* Channel message */ 640 vmbus_chanmsg_handle(sc, 641 __DEVOLATILE(const struct vmbus_message *, msg)); 642 } 643 644 msg->msg_type = HYPERV_MSGTYPE_NONE; 645 /* 646 * Make sure the write to msg_type (i.e. set to 647 * HYPERV_MSGTYPE_NONE) happens before we read the 648 * msg_flags and EOMing. Otherwise, the EOMing will 649 * not deliver any more messages since there is no 650 * empty slot 651 * 652 * NOTE: 653 * mb() is used here, since atomic_thread_fence_seq_cst() 654 * will become compiler fence on UP kernel. 655 */ 656 mb(); 657 if (msg->msg_flags & VMBUS_MSGFLAG_PENDING) { 658 /* 659 * This will cause message queue rescan to possibly 660 * deliver another msg from the hypervisor 661 */ 662 WRMSR(MSR_HV_EOM, 0); 663 } 664 } 665 } 666 static __inline int 667 vmbus_handle_intr1(struct vmbus_softc *sc, struct trapframe *frame, int cpu) 668 { 669 volatile struct vmbus_message *msg; 670 struct vmbus_message *msg_base; 671 672 msg_base = VMBUS_PCPU_GET(sc, message, cpu); 673 674 /* 675 * Check event timer. 676 * 677 * TODO: move this to independent IDT vector. 678 */ 679 vmbus_handle_timer_intr1(msg_base, frame); 680 /* 681 * Check events. Hot path for network and storage I/O data; high rate. 682 * 683 * NOTE: 684 * As recommended by the Windows guest fellows, we check events before 685 * checking messages. 686 */ 687 sc->vmbus_event_proc(sc, cpu); 688 689 /* 690 * Check messages. Mainly management stuffs; ultra low rate. 691 */ 692 msg = msg_base + VMBUS_SINT_MESSAGE; 693 if (__predict_false(msg->msg_type != HYPERV_MSGTYPE_NONE)) { 694 taskqueue_enqueue(VMBUS_PCPU_GET(sc, message_tq, cpu), 695 VMBUS_PCPU_PTR(sc, message_task, cpu)); 696 } 697 698 return (FILTER_HANDLED); 699 } 700 701 void 702 vmbus_handle_intr(struct trapframe *trap_frame) 703 { 704 struct vmbus_softc *sc = vmbus_get_softc(); 705 int cpu = curcpu; 706 707 /* 708 * Disable preemption. 709 */ 710 critical_enter(); 711 712 /* 713 * Do a little interrupt counting. This used x86 specific 714 * intrcnt_add function 715 */ 716 #if !defined(__aarch64__) 717 (*VMBUS_PCPU_GET(sc, intr_cnt, cpu))++; 718 #endif /* not for aarch64 */ 719 vmbus_handle_intr1(sc, trap_frame, cpu); 720 721 /* 722 * Enable preemption. 723 */ 724 critical_exit(); 725 } 726 727 static void 728 vmbus_synic_setup(void *xsc) 729 { 730 struct vmbus_softc *sc = xsc; 731 int cpu = curcpu; 732 uint64_t val, orig; 733 uint32_t sint; 734 735 if (hyperv_features & CPUID_HV_MSR_VP_INDEX) { 736 /* Save virtual processor id. */ 737 VMBUS_PCPU_GET(sc, vcpuid, cpu) = RDMSR(MSR_HV_VP_INDEX); 738 } else { 739 /* Set virtual processor id to 0 for compatibility. */ 740 VMBUS_PCPU_GET(sc, vcpuid, cpu) = 0; 741 } 742 743 /* 744 * Setup the SynIC message. 745 */ 746 orig = RDMSR(MSR_HV_SIMP); 747 val = MSR_HV_SIMP_ENABLE | (orig & MSR_HV_SIMP_RSVD_MASK) | 748 ((VMBUS_PCPU_GET(sc, message_dma.hv_paddr, cpu) >> PAGE_SHIFT) 749 << MSR_HV_SIMP_PGSHIFT); 750 WRMSR(MSR_HV_SIMP, val); 751 /* 752 * Setup the SynIC event flags. 753 */ 754 orig = RDMSR(MSR_HV_SIEFP); 755 val = MSR_HV_SIEFP_ENABLE | (orig & MSR_HV_SIEFP_RSVD_MASK) | 756 ((VMBUS_PCPU_GET(sc, event_flags_dma.hv_paddr, cpu) >> PAGE_SHIFT) 757 << MSR_HV_SIEFP_PGSHIFT); 758 WRMSR(MSR_HV_SIEFP, val); 759 760 /* 761 * Configure and unmask SINT for message and event flags. 762 */ 763 sint = MSR_HV_SINT0 + VMBUS_SINT_MESSAGE; 764 orig = RDMSR(sint); 765 val = sc->vmbus_idtvec | MSR_HV_SINT_AUTOEOI | 766 (orig & MSR_HV_SINT_RSVD_MASK); 767 WRMSR(sint, val); 768 769 /* 770 * Configure and unmask SINT for timer. 771 */ 772 vmbus_synic_setup1(sc); 773 /* 774 * All done; enable SynIC. 775 */ 776 orig = RDMSR(MSR_HV_SCONTROL); 777 val = MSR_HV_SCTRL_ENABLE | (orig & MSR_HV_SCTRL_RSVD_MASK); 778 WRMSR(MSR_HV_SCONTROL, val); 779 } 780 781 static void 782 vmbus_synic_teardown(void *arg) 783 { 784 uint64_t orig; 785 uint32_t sint; 786 787 /* 788 * Disable SynIC. 789 */ 790 orig = RDMSR(MSR_HV_SCONTROL); 791 WRMSR(MSR_HV_SCONTROL, (orig & MSR_HV_SCTRL_RSVD_MASK)); 792 793 /* 794 * Mask message and event flags SINT. 795 */ 796 sint = MSR_HV_SINT0 + VMBUS_SINT_MESSAGE; 797 orig = RDMSR(sint); 798 WRMSR(sint, orig | MSR_HV_SINT_MASKED); 799 800 /* 801 * Mask timer SINT. 802 */ 803 vmbus_synic_teardown1(); 804 /* 805 * Teardown SynIC message. 806 */ 807 orig = RDMSR(MSR_HV_SIMP); 808 WRMSR(MSR_HV_SIMP, (orig & MSR_HV_SIMP_RSVD_MASK)); 809 810 /* 811 * Teardown SynIC event flags. 812 */ 813 orig = RDMSR(MSR_HV_SIEFP); 814 WRMSR(MSR_HV_SIEFP, (orig & MSR_HV_SIEFP_RSVD_MASK)); 815 } 816 817 static int 818 vmbus_dma_alloc(struct vmbus_softc *sc) 819 { 820 bus_dma_tag_t parent_dtag; 821 uint8_t *evtflags; 822 int cpu; 823 824 parent_dtag = bus_get_dma_tag(sc->vmbus_dev); 825 CPU_FOREACH(cpu) { 826 void *ptr; 827 828 /* 829 * Per-cpu messages and event flags. 830 */ 831 ptr = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0, 832 PAGE_SIZE, VMBUS_PCPU_PTR(sc, message_dma, cpu), 833 BUS_DMA_WAITOK | BUS_DMA_ZERO); 834 if (ptr == NULL) 835 return ENOMEM; 836 VMBUS_PCPU_GET(sc, message, cpu) = ptr; 837 838 ptr = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0, 839 PAGE_SIZE, VMBUS_PCPU_PTR(sc, event_flags_dma, cpu), 840 BUS_DMA_WAITOK | BUS_DMA_ZERO); 841 if (ptr == NULL) 842 return ENOMEM; 843 VMBUS_PCPU_GET(sc, event_flags, cpu) = ptr; 844 } 845 846 evtflags = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0, 847 PAGE_SIZE, &sc->vmbus_evtflags_dma, BUS_DMA_WAITOK | BUS_DMA_ZERO); 848 if (evtflags == NULL) 849 return ENOMEM; 850 sc->vmbus_rx_evtflags = (u_long *)evtflags; 851 sc->vmbus_tx_evtflags = (u_long *)(evtflags + (PAGE_SIZE / 2)); 852 sc->vmbus_evtflags = evtflags; 853 854 sc->vmbus_mnf1 = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0, 855 PAGE_SIZE, &sc->vmbus_mnf1_dma, BUS_DMA_WAITOK | BUS_DMA_ZERO); 856 if (sc->vmbus_mnf1 == NULL) 857 return ENOMEM; 858 859 sc->vmbus_mnf2 = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0, 860 sizeof(struct vmbus_mnf), &sc->vmbus_mnf2_dma, 861 BUS_DMA_WAITOK | BUS_DMA_ZERO); 862 if (sc->vmbus_mnf2 == NULL) 863 return ENOMEM; 864 865 return 0; 866 } 867 868 static void 869 vmbus_dma_free(struct vmbus_softc *sc) 870 { 871 int cpu; 872 873 if (sc->vmbus_evtflags != NULL) { 874 hyperv_dmamem_free(&sc->vmbus_evtflags_dma, sc->vmbus_evtflags); 875 sc->vmbus_evtflags = NULL; 876 sc->vmbus_rx_evtflags = NULL; 877 sc->vmbus_tx_evtflags = NULL; 878 } 879 if (sc->vmbus_mnf1 != NULL) { 880 hyperv_dmamem_free(&sc->vmbus_mnf1_dma, sc->vmbus_mnf1); 881 sc->vmbus_mnf1 = NULL; 882 } 883 if (sc->vmbus_mnf2 != NULL) { 884 hyperv_dmamem_free(&sc->vmbus_mnf2_dma, sc->vmbus_mnf2); 885 sc->vmbus_mnf2 = NULL; 886 } 887 888 CPU_FOREACH(cpu) { 889 if (VMBUS_PCPU_GET(sc, message, cpu) != NULL) { 890 hyperv_dmamem_free( 891 VMBUS_PCPU_PTR(sc, message_dma, cpu), 892 VMBUS_PCPU_GET(sc, message, cpu)); 893 VMBUS_PCPU_GET(sc, message, cpu) = NULL; 894 } 895 if (VMBUS_PCPU_GET(sc, event_flags, cpu) != NULL) { 896 hyperv_dmamem_free( 897 VMBUS_PCPU_PTR(sc, event_flags_dma, cpu), 898 VMBUS_PCPU_GET(sc, event_flags, cpu)); 899 VMBUS_PCPU_GET(sc, event_flags, cpu) = NULL; 900 } 901 } 902 } 903 904 static int 905 vmbus_intr_setup(struct vmbus_softc *sc) 906 { 907 int cpu; 908 909 CPU_FOREACH(cpu) { 910 char buf[MAXCOMLEN + 1]; 911 cpuset_t cpu_mask; 912 913 /* Allocate an interrupt counter for Hyper-V interrupt */ 914 snprintf(buf, sizeof(buf), "cpu%d:hyperv", cpu); 915 #if !defined(__aarch64__) 916 intrcnt_add(buf, VMBUS_PCPU_PTR(sc, intr_cnt, cpu)); 917 #endif /* not for aarch64 */ 918 /* 919 * Setup taskqueue to handle events. Task will be per- 920 * channel. 921 */ 922 VMBUS_PCPU_GET(sc, event_tq, cpu) = taskqueue_create_fast( 923 "hyperv event", M_WAITOK, taskqueue_thread_enqueue, 924 VMBUS_PCPU_PTR(sc, event_tq, cpu)); 925 if (vmbus_pin_evttask) { 926 CPU_SETOF(cpu, &cpu_mask); 927 taskqueue_start_threads_cpuset( 928 VMBUS_PCPU_PTR(sc, event_tq, cpu), 1, PI_NET, 929 &cpu_mask, "hvevent%d", cpu); 930 } else { 931 taskqueue_start_threads( 932 VMBUS_PCPU_PTR(sc, event_tq, cpu), 1, PI_NET, 933 "hvevent%d", cpu); 934 } 935 936 /* 937 * Setup tasks and taskqueues to handle messages. 938 */ 939 VMBUS_PCPU_GET(sc, message_tq, cpu) = taskqueue_create_fast( 940 "hyperv msg", M_WAITOK, taskqueue_thread_enqueue, 941 VMBUS_PCPU_PTR(sc, message_tq, cpu)); 942 CPU_SETOF(cpu, &cpu_mask); 943 taskqueue_start_threads_cpuset( 944 VMBUS_PCPU_PTR(sc, message_tq, cpu), 1, PI_NET, &cpu_mask, 945 "hvmsg%d", cpu); 946 TASK_INIT(VMBUS_PCPU_PTR(sc, message_task, cpu), 0, 947 vmbus_msg_task, sc); 948 } 949 return (vmbus_setup_intr1(sc)); 950 } 951 static void 952 vmbus_intr_teardown(struct vmbus_softc *sc) 953 { 954 vmbus_intr_teardown1(sc); 955 } 956 957 static int 958 vmbus_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) 959 { 960 return (ENOENT); 961 } 962 963 static int 964 vmbus_child_pnpinfo(device_t dev, device_t child, struct sbuf *sb) 965 { 966 const struct vmbus_channel *chan; 967 char guidbuf[HYPERV_GUID_STRLEN]; 968 969 chan = vmbus_get_channel(child); 970 if (chan == NULL) { 971 /* Event timer device, which does not belong to a channel */ 972 return (0); 973 } 974 975 hyperv_guid2str(&chan->ch_guid_type, guidbuf, sizeof(guidbuf)); 976 sbuf_printf(sb, "classid=%s", guidbuf); 977 978 hyperv_guid2str(&chan->ch_guid_inst, guidbuf, sizeof(guidbuf)); 979 sbuf_printf(sb, " deviceid=%s", guidbuf); 980 981 return (0); 982 } 983 984 int 985 vmbus_add_child(struct vmbus_channel *chan) 986 { 987 struct vmbus_softc *sc = chan->ch_vmbus; 988 device_t parent = sc->vmbus_dev; 989 990 bus_topo_lock(); 991 chan->ch_dev = device_add_child(parent, NULL, -1); 992 if (chan->ch_dev == NULL) { 993 bus_topo_unlock(); 994 device_printf(parent, "device_add_child for chan%u failed\n", 995 chan->ch_id); 996 return (ENXIO); 997 } 998 device_set_ivars(chan->ch_dev, chan); 999 device_probe_and_attach(chan->ch_dev); 1000 bus_topo_unlock(); 1001 1002 return (0); 1003 } 1004 1005 int 1006 vmbus_delete_child(struct vmbus_channel *chan) 1007 { 1008 int error = 0; 1009 1010 bus_topo_lock(); 1011 if (chan->ch_dev != NULL) { 1012 error = device_delete_child(chan->ch_vmbus->vmbus_dev, 1013 chan->ch_dev); 1014 chan->ch_dev = NULL; 1015 } 1016 bus_topo_unlock(); 1017 return (error); 1018 } 1019 1020 static int 1021 vmbus_sysctl_version(SYSCTL_HANDLER_ARGS) 1022 { 1023 struct vmbus_softc *sc = arg1; 1024 char verstr[16]; 1025 1026 snprintf(verstr, sizeof(verstr), "%u.%u", 1027 VMBUS_VERSION_MAJOR(sc->vmbus_version), 1028 VMBUS_VERSION_MINOR(sc->vmbus_version)); 1029 return sysctl_handle_string(oidp, verstr, sizeof(verstr), req); 1030 } 1031 1032 /* 1033 * We need the function to make sure the MMIO resource is allocated from the 1034 * ranges found in _CRS. 1035 * 1036 * For the release function, we can use bus_generic_release_resource(). 1037 */ 1038 static struct resource * 1039 vmbus_alloc_resource(device_t dev, device_t child, int type, int *rid, 1040 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 1041 { 1042 device_t parent = device_get_parent(dev); 1043 struct resource *res; 1044 1045 #ifdef NEW_PCIB 1046 if (type == SYS_RES_MEMORY) { 1047 struct vmbus_softc *sc = device_get_softc(dev); 1048 1049 res = pcib_host_res_alloc(&sc->vmbus_mmio_res, child, type, 1050 rid, start, end, count, flags); 1051 } else 1052 #endif 1053 { 1054 res = BUS_ALLOC_RESOURCE(parent, child, type, rid, start, 1055 end, count, flags); 1056 } 1057 1058 return (res); 1059 } 1060 1061 static int 1062 vmbus_alloc_msi(device_t bus, device_t dev, int count, int maxcount, int *irqs) 1063 { 1064 1065 return (PCIB_ALLOC_MSI(device_get_parent(bus), dev, count, maxcount, 1066 irqs)); 1067 } 1068 1069 static int 1070 vmbus_release_msi(device_t bus, device_t dev, int count, int *irqs) 1071 { 1072 1073 return (PCIB_RELEASE_MSI(device_get_parent(bus), dev, count, irqs)); 1074 } 1075 1076 static int 1077 vmbus_alloc_msix(device_t bus, device_t dev, int *irq) 1078 { 1079 1080 return (PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq)); 1081 } 1082 1083 static int 1084 vmbus_release_msix(device_t bus, device_t dev, int irq) 1085 { 1086 1087 return (PCIB_RELEASE_MSIX(device_get_parent(bus), dev, irq)); 1088 } 1089 1090 static int 1091 vmbus_map_msi(device_t bus, device_t dev, int irq, uint64_t *addr, 1092 uint32_t *data) 1093 { 1094 1095 return (PCIB_MAP_MSI(device_get_parent(bus), dev, irq, addr, data)); 1096 } 1097 1098 static uint32_t 1099 vmbus_get_version_method(device_t bus, device_t dev) 1100 { 1101 struct vmbus_softc *sc = device_get_softc(bus); 1102 1103 return sc->vmbus_version; 1104 } 1105 1106 static int 1107 vmbus_probe_guid_method(device_t bus, device_t dev, 1108 const struct hyperv_guid *guid) 1109 { 1110 const struct vmbus_channel *chan = vmbus_get_channel(dev); 1111 1112 if (memcmp(&chan->ch_guid_type, guid, sizeof(struct hyperv_guid)) == 0) 1113 return 0; 1114 return ENXIO; 1115 } 1116 1117 static uint32_t 1118 vmbus_get_vcpu_id_method(device_t bus, device_t dev, int cpu) 1119 { 1120 const struct vmbus_softc *sc = device_get_softc(bus); 1121 1122 return (VMBUS_PCPU_GET(sc, vcpuid, cpu)); 1123 } 1124 1125 static struct taskqueue * 1126 vmbus_get_eventtq_method(device_t bus, device_t dev __unused, int cpu) 1127 { 1128 const struct vmbus_softc *sc = device_get_softc(bus); 1129 1130 KASSERT(cpu >= 0 && cpu < mp_ncpus, ("invalid cpu%d", cpu)); 1131 return (VMBUS_PCPU_GET(sc, event_tq, cpu)); 1132 } 1133 1134 #ifdef NEW_PCIB 1135 #define VTPM_BASE_ADDR 0xfed40000 1136 #define FOUR_GB (1ULL << 32) 1137 1138 enum parse_pass { parse_64, parse_32 }; 1139 1140 struct parse_context { 1141 device_t vmbus_dev; 1142 enum parse_pass pass; 1143 }; 1144 1145 static ACPI_STATUS 1146 parse_crs(ACPI_RESOURCE *res, void *ctx) 1147 { 1148 const struct parse_context *pc = ctx; 1149 device_t vmbus_dev = pc->vmbus_dev; 1150 1151 struct vmbus_softc *sc = device_get_softc(vmbus_dev); 1152 UINT64 start, end; 1153 1154 switch (res->Type) { 1155 case ACPI_RESOURCE_TYPE_ADDRESS32: 1156 start = res->Data.Address32.Address.Minimum; 1157 end = res->Data.Address32.Address.Maximum; 1158 break; 1159 1160 case ACPI_RESOURCE_TYPE_ADDRESS64: 1161 start = res->Data.Address64.Address.Minimum; 1162 end = res->Data.Address64.Address.Maximum; 1163 break; 1164 1165 default: 1166 /* Unused types. */ 1167 return (AE_OK); 1168 } 1169 1170 /* 1171 * We don't use <1MB addresses. 1172 */ 1173 if (end < 0x100000) 1174 return (AE_OK); 1175 1176 /* Don't conflict with vTPM. */ 1177 if (end >= VTPM_BASE_ADDR && start < VTPM_BASE_ADDR) 1178 end = VTPM_BASE_ADDR - 1; 1179 1180 if ((pc->pass == parse_32 && start < FOUR_GB) || 1181 (pc->pass == parse_64 && start >= FOUR_GB)) 1182 pcib_host_res_decodes(&sc->vmbus_mmio_res, SYS_RES_MEMORY, 1183 start, end, 0); 1184 1185 return (AE_OK); 1186 } 1187 1188 static void 1189 vmbus_get_crs(device_t dev, device_t vmbus_dev, enum parse_pass pass) 1190 { 1191 struct parse_context pc; 1192 ACPI_STATUS status; 1193 1194 if (bootverbose) 1195 device_printf(dev, "walking _CRS, pass=%d\n", pass); 1196 1197 pc.vmbus_dev = vmbus_dev; 1198 pc.pass = pass; 1199 status = AcpiWalkResources(acpi_get_handle(dev), "_CRS", 1200 parse_crs, &pc); 1201 1202 if (bootverbose && ACPI_FAILURE(status)) 1203 device_printf(dev, "_CRS: not found, pass=%d\n", pass); 1204 } 1205 1206 static void 1207 vmbus_get_mmio_res_pass(device_t dev, enum parse_pass pass) 1208 { 1209 device_t acpi0, parent; 1210 1211 parent = device_get_parent(dev); 1212 1213 acpi0 = device_get_parent(parent); 1214 if (strcmp("acpi0", device_get_nameunit(acpi0)) == 0) { 1215 device_t *children; 1216 int count; 1217 1218 /* 1219 * Try to locate VMBUS resources and find _CRS on them. 1220 */ 1221 if (device_get_children(acpi0, &children, &count) == 0) { 1222 int i; 1223 1224 for (i = 0; i < count; ++i) { 1225 if (!device_is_attached(children[i])) 1226 continue; 1227 1228 if (strcmp("vmbus_res", 1229 device_get_name(children[i])) == 0) 1230 vmbus_get_crs(children[i], dev, pass); 1231 } 1232 free(children, M_TEMP); 1233 } 1234 1235 /* 1236 * Try to find _CRS on acpi. 1237 */ 1238 vmbus_get_crs(acpi0, dev, pass); 1239 } else { 1240 device_printf(dev, "not grandchild of acpi\n"); 1241 } 1242 1243 /* 1244 * Try to find _CRS on parent. 1245 */ 1246 vmbus_get_crs(parent, dev, pass); 1247 } 1248 1249 static void 1250 vmbus_get_mmio_res(device_t dev) 1251 { 1252 struct vmbus_softc *sc = device_get_softc(dev); 1253 /* 1254 * We walk the resources twice to make sure that: in the resource 1255 * list, the 32-bit resources appear behind the 64-bit resources. 1256 * NB: resource_list_add() uses INSERT_TAIL. This way, when we 1257 * iterate through the list to find a range for a 64-bit BAR in 1258 * vmbus_alloc_resource(), we can make sure we try to use >4GB 1259 * ranges first. 1260 */ 1261 pcib_host_res_init(dev, &sc->vmbus_mmio_res); 1262 1263 vmbus_get_mmio_res_pass(dev, parse_64); 1264 vmbus_get_mmio_res_pass(dev, parse_32); 1265 } 1266 1267 /* 1268 * On Gen2 VMs, Hyper-V provides mmio space for framebuffer. 1269 * This mmio address range is not useable for other PCI devices. 1270 * Currently only efifb and vbefb drivers are using this range without 1271 * reserving it from system. 1272 * Therefore, vmbus driver reserves it before any other PCI device 1273 * drivers start to request mmio addresses. 1274 */ 1275 static struct resource *hv_fb_res; 1276 1277 static void 1278 vmbus_fb_mmio_res(device_t dev) 1279 { 1280 struct efi_fb *efifb; 1281 #if !defined(__aarch64__) 1282 struct vbe_fb *vbefb; 1283 #endif /* aarch64 */ 1284 rman_res_t fb_start, fb_end, fb_count; 1285 int fb_height, fb_width; 1286 caddr_t kmdp; 1287 1288 struct vmbus_softc *sc = device_get_softc(dev); 1289 int rid = 0; 1290 1291 kmdp = preload_search_by_type("elf kernel"); 1292 if (kmdp == NULL) 1293 kmdp = preload_search_by_type("elf64 kernel"); 1294 efifb = (struct efi_fb *)preload_search_info(kmdp, 1295 MODINFO_METADATA | MODINFOMD_EFI_FB); 1296 #if !defined(__aarch64__) 1297 vbefb = (struct vbe_fb *)preload_search_info(kmdp, 1298 MODINFO_METADATA | MODINFOMD_VBE_FB); 1299 #endif /* aarch64 */ 1300 if (efifb != NULL) { 1301 fb_start = efifb->fb_addr; 1302 fb_end = efifb->fb_addr + efifb->fb_size; 1303 fb_count = efifb->fb_size; 1304 fb_height = efifb->fb_height; 1305 fb_width = efifb->fb_width; 1306 } 1307 #if !defined(__aarch64__) 1308 else if (vbefb != NULL) { 1309 fb_start = vbefb->fb_addr; 1310 fb_end = vbefb->fb_addr + vbefb->fb_size; 1311 fb_count = vbefb->fb_size; 1312 fb_height = vbefb->fb_height; 1313 fb_width = vbefb->fb_width; 1314 } 1315 #endif /* aarch64 */ 1316 else { 1317 if (bootverbose) 1318 device_printf(dev, 1319 "no preloaded kernel fb information\n"); 1320 /* We are on Gen1 VM, just return. */ 1321 return; 1322 } 1323 1324 if (bootverbose) 1325 device_printf(dev, 1326 "fb: fb_addr: %#jx, size: %#jx, " 1327 "actual size needed: 0x%x\n", 1328 fb_start, fb_count, fb_height * fb_width); 1329 1330 hv_fb_res = pcib_host_res_alloc(&sc->vmbus_mmio_res, dev, 1331 SYS_RES_MEMORY, &rid, fb_start, fb_end, fb_count, 1332 RF_ACTIVE | rman_make_alignment_flags(PAGE_SIZE)); 1333 1334 if (hv_fb_res && bootverbose) 1335 device_printf(dev, 1336 "successfully reserved memory for framebuffer " 1337 "starting at %#jx, size %#jx\n", 1338 fb_start, fb_count); 1339 } 1340 1341 static void 1342 vmbus_free_mmio_res(device_t dev) 1343 { 1344 struct vmbus_softc *sc = device_get_softc(dev); 1345 1346 pcib_host_res_free(dev, &sc->vmbus_mmio_res); 1347 1348 if (hv_fb_res) 1349 hv_fb_res = NULL; 1350 } 1351 #endif /* NEW_PCIB */ 1352 1353 static void 1354 vmbus_identify(driver_t *driver, device_t parent) 1355 { 1356 1357 if (device_get_unit(parent) != 0 || vm_guest != VM_GUEST_HV || 1358 (hyperv_features & CPUID_HV_MSR_SYNIC) == 0) 1359 return; 1360 device_add_child(parent, "vmbus", -1); 1361 } 1362 1363 static int 1364 vmbus_probe(device_t dev) 1365 { 1366 1367 if (device_get_unit(dev) != 0 || vm_guest != VM_GUEST_HV || 1368 (hyperv_features & CPUID_HV_MSR_SYNIC) == 0) 1369 return (ENXIO); 1370 1371 device_set_desc(dev, "Hyper-V Vmbus"); 1372 return (BUS_PROBE_DEFAULT); 1373 } 1374 1375 /** 1376 * @brief Main vmbus driver initialization routine. 1377 * 1378 * Here, we 1379 * - initialize the vmbus driver context 1380 * - setup various driver entry points 1381 * - invoke the vmbus hv main init routine 1382 * - get the irq resource 1383 * - invoke the vmbus to add the vmbus root device 1384 * - setup the vmbus root device 1385 * - retrieve the channel offers 1386 */ 1387 static int 1388 vmbus_doattach(struct vmbus_softc *sc) 1389 { 1390 struct sysctl_oid_list *child; 1391 struct sysctl_ctx_list *ctx; 1392 int ret; 1393 1394 if (sc->vmbus_flags & VMBUS_FLAG_ATTACHED) 1395 return (0); 1396 1397 #ifdef NEW_PCIB 1398 vmbus_get_mmio_res(sc->vmbus_dev); 1399 vmbus_fb_mmio_res(sc->vmbus_dev); 1400 #endif 1401 1402 sc->vmbus_flags |= VMBUS_FLAG_ATTACHED; 1403 1404 sc->vmbus_gpadl = VMBUS_GPADL_START; 1405 mtx_init(&sc->vmbus_prichan_lock, "vmbus prichan", NULL, MTX_DEF); 1406 TAILQ_INIT(&sc->vmbus_prichans); 1407 mtx_init(&sc->vmbus_chan_lock, "vmbus channel", NULL, MTX_DEF); 1408 TAILQ_INIT(&sc->vmbus_chans); 1409 sc->vmbus_chmap = malloc( 1410 sizeof(struct vmbus_channel *) * VMBUS_CHAN_MAX, M_DEVBUF, 1411 M_WAITOK | M_ZERO); 1412 1413 /* 1414 * Create context for "post message" Hypercalls 1415 */ 1416 sc->vmbus_xc = vmbus_xact_ctx_create(bus_get_dma_tag(sc->vmbus_dev), 1417 HYPERCALL_POSTMSGIN_SIZE, VMBUS_MSG_SIZE, 1418 sizeof(struct vmbus_msghc)); 1419 if (sc->vmbus_xc == NULL) { 1420 ret = ENXIO; 1421 goto cleanup; 1422 } 1423 1424 /* 1425 * Allocate DMA stuffs. 1426 */ 1427 ret = vmbus_dma_alloc(sc); 1428 if (ret != 0) 1429 goto cleanup; 1430 1431 /* 1432 * Setup interrupt. 1433 */ 1434 ret = vmbus_intr_setup(sc); 1435 if (ret != 0) 1436 goto cleanup; 1437 1438 /* 1439 * Setup SynIC. 1440 */ 1441 if (bootverbose) 1442 device_printf(sc->vmbus_dev, "smp_started = %d\n", smp_started); 1443 smp_rendezvous(NULL, vmbus_synic_setup, NULL, sc); 1444 sc->vmbus_flags |= VMBUS_FLAG_SYNIC; 1445 1446 /* 1447 * Initialize vmbus, e.g. connect to Hypervisor. 1448 */ 1449 ret = vmbus_init(sc); 1450 if (ret != 0) 1451 goto cleanup; 1452 1453 if (sc->vmbus_version == VMBUS_VERSION_WS2008 || 1454 sc->vmbus_version == VMBUS_VERSION_WIN7) 1455 sc->vmbus_event_proc = vmbus_event_proc_compat; 1456 else 1457 sc->vmbus_event_proc = vmbus_event_proc; 1458 1459 ret = vmbus_scan(sc); 1460 if (ret != 0) 1461 goto cleanup; 1462 1463 ctx = device_get_sysctl_ctx(sc->vmbus_dev); 1464 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vmbus_dev)); 1465 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "version", 1466 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, 1467 vmbus_sysctl_version, "A", "vmbus version"); 1468 1469 return (ret); 1470 1471 cleanup: 1472 vmbus_scan_teardown(sc); 1473 vmbus_intr_teardown(sc); 1474 vmbus_dma_free(sc); 1475 if (sc->vmbus_xc != NULL) { 1476 vmbus_xact_ctx_destroy(sc->vmbus_xc); 1477 sc->vmbus_xc = NULL; 1478 } 1479 free(__DEVOLATILE(void *, sc->vmbus_chmap), M_DEVBUF); 1480 mtx_destroy(&sc->vmbus_prichan_lock); 1481 mtx_destroy(&sc->vmbus_chan_lock); 1482 1483 return (ret); 1484 } 1485 1486 static void 1487 vmbus_event_proc_dummy(struct vmbus_softc *sc __unused, int cpu __unused) 1488 { 1489 } 1490 1491 #if defined(EARLY_AP_STARTUP) || defined(__aarch64__) 1492 1493 static void 1494 vmbus_intrhook(void *xsc) 1495 { 1496 struct vmbus_softc *sc = xsc; 1497 1498 if (bootverbose) 1499 device_printf(sc->vmbus_dev, "intrhook\n"); 1500 vmbus_doattach(sc); 1501 config_intrhook_disestablish(&sc->vmbus_intrhook); 1502 } 1503 1504 #endif /* EARLY_AP_STARTUP aarch64 */ 1505 1506 static int 1507 vmbus_attach(device_t dev) 1508 { 1509 vmbus_sc = device_get_softc(dev); 1510 vmbus_sc->vmbus_dev = dev; 1511 vmbus_sc->vmbus_idtvec = -1; 1512 1513 /* 1514 * Event processing logic will be configured: 1515 * - After the vmbus protocol version negotiation. 1516 * - Before we request channel offers. 1517 */ 1518 vmbus_sc->vmbus_event_proc = vmbus_event_proc_dummy; 1519 1520 #if defined(EARLY_AP_STARTUP) || defined(__aarch64__) 1521 /* 1522 * Defer the real attach until the pause(9) works as expected. 1523 */ 1524 vmbus_sc->vmbus_intrhook.ich_func = vmbus_intrhook; 1525 vmbus_sc->vmbus_intrhook.ich_arg = vmbus_sc; 1526 config_intrhook_establish(&vmbus_sc->vmbus_intrhook); 1527 #else /* !EARLY_AP_STARTUP */ 1528 /* 1529 * If the system has already booted and thread 1530 * scheduling is possible indicated by the global 1531 * cold set to zero, we just call the driver 1532 * initialization directly. 1533 */ 1534 if (!cold) 1535 vmbus_doattach(vmbus_sc); 1536 #endif /* EARLY_AP_STARTUP and aarch64 */ 1537 1538 return (0); 1539 } 1540 1541 static int 1542 vmbus_detach(device_t dev) 1543 { 1544 struct vmbus_softc *sc = device_get_softc(dev); 1545 1546 bus_generic_detach(dev); 1547 vmbus_chan_destroy_all(sc); 1548 1549 vmbus_scan_teardown(sc); 1550 1551 vmbus_disconnect(sc); 1552 1553 if (sc->vmbus_flags & VMBUS_FLAG_SYNIC) { 1554 sc->vmbus_flags &= ~VMBUS_FLAG_SYNIC; 1555 smp_rendezvous(NULL, vmbus_synic_teardown, NULL, NULL); 1556 } 1557 1558 vmbus_intr_teardown(sc); 1559 vmbus_dma_free(sc); 1560 1561 if (sc->vmbus_xc != NULL) { 1562 vmbus_xact_ctx_destroy(sc->vmbus_xc); 1563 sc->vmbus_xc = NULL; 1564 } 1565 1566 free(__DEVOLATILE(void *, sc->vmbus_chmap), M_DEVBUF); 1567 mtx_destroy(&sc->vmbus_prichan_lock); 1568 mtx_destroy(&sc->vmbus_chan_lock); 1569 1570 #ifdef NEW_PCIB 1571 vmbus_free_mmio_res(dev); 1572 #endif 1573 1574 #if defined(__aarch64__) 1575 bus_release_resource(device_get_parent(dev), SYS_RES_IRQ, sc->vector, 1576 sc->ires); 1577 #endif 1578 return (0); 1579 } 1580 1581 #if !defined(EARLY_AP_STARTUP) && !defined(__aarch64__) 1582 1583 static void 1584 vmbus_sysinit(void *arg __unused) 1585 { 1586 struct vmbus_softc *sc = vmbus_get_softc(); 1587 1588 if (vm_guest != VM_GUEST_HV || sc == NULL) 1589 return; 1590 1591 /* 1592 * If the system has already booted and thread 1593 * scheduling is possible, as indicated by the 1594 * global cold set to zero, we just call the driver 1595 * initialization directly. 1596 */ 1597 if (!cold) 1598 vmbus_doattach(sc); 1599 } 1600 /* 1601 * NOTE: 1602 * We have to start as the last step of SI_SUB_SMP, i.e. after SMP is 1603 * initialized. 1604 */ 1605 SYSINIT(vmbus_initialize, SI_SUB_SMP, SI_ORDER_ANY, vmbus_sysinit, NULL); 1606 #endif /* !EARLY_AP_STARTUP */ 1607