xref: /freebsd/sys/dev/hyperv/vmbus/vmbus.c (revision 7ef62cebc2f965b0f640263e179276928885e33d)
1 /*-
2  * Copyright (c) 2009-2012,2016-2017 Microsoft Corp.
3  * Copyright (c) 2012 NetApp Inc.
4  * Copyright (c) 2012 Citrix Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /*
30  * VM Bus Driver Implementation
31  */
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/bus.h>
37 #include <sys/kernel.h>
38 #include <sys/linker.h>
39 #include <sys/lock.h>
40 #include <sys/malloc.h>
41 #include <sys/module.h>
42 #include <sys/mutex.h>
43 #include <sys/sbuf.h>
44 #include <sys/smp.h>
45 #include <sys/sysctl.h>
46 #include <sys/systm.h>
47 #include <sys/taskqueue.h>
48 
49 #include <vm/vm.h>
50 #include <vm/vm_extern.h>
51 #include <vm/vm_param.h>
52 #include <vm/pmap.h>
53 
54 #include <machine/bus.h>
55 #if defined(__aarch64__)
56 #include <dev/psci/smccc.h>
57 #include <dev/hyperv/vmbus/aarch64/hyperv_machdep.h>
58 #include <dev/hyperv/vmbus/aarch64/hyperv_reg.h>
59 #else
60 #include <dev/hyperv/vmbus/x86/hyperv_machdep.h>
61 #include <dev/hyperv/vmbus/x86/hyperv_reg.h>
62 #include <machine/intr_machdep.h>
63 #include <x86/include/apicvar.h>
64 #endif
65 #include <machine/metadata.h>
66 #include <machine/md_var.h>
67 #include <machine/resource.h>
68 #include <contrib/dev/acpica/include/acpi.h>
69 #include <dev/acpica/acpivar.h>
70 
71 #include <dev/hyperv/include/hyperv.h>
72 #include <dev/hyperv/include/vmbus_xact.h>
73 #include <dev/hyperv/vmbus/hyperv_var.h>
74 #include <dev/hyperv/vmbus/vmbus_reg.h>
75 #include <dev/hyperv/vmbus/vmbus_var.h>
76 #include <dev/hyperv/vmbus/vmbus_chanvar.h>
77 #include <dev/hyperv/vmbus/hyperv_common_reg.h>
78 #include "acpi_if.h"
79 #include "pcib_if.h"
80 #include "vmbus_if.h"
81 
82 #define VMBUS_GPADL_START		0xe1e10
83 
84 struct vmbus_msghc {
85 	struct vmbus_xact		*mh_xact;
86 	struct hypercall_postmsg_in	mh_inprm_save;
87 };
88 
89 static void			vmbus_identify(driver_t *, device_t);
90 static int			vmbus_probe(device_t);
91 static int			vmbus_attach(device_t);
92 static int			vmbus_detach(device_t);
93 static int			vmbus_read_ivar(device_t, device_t, int,
94 				    uintptr_t *);
95 static int			vmbus_child_pnpinfo(device_t, device_t, struct sbuf *);
96 static struct resource		*vmbus_alloc_resource(device_t dev,
97 				    device_t child, int type, int *rid,
98 				    rman_res_t start, rman_res_t end,
99 				    rman_res_t count, u_int flags);
100 static int			vmbus_alloc_msi(device_t bus, device_t dev,
101 				    int count, int maxcount, int *irqs);
102 static int			vmbus_release_msi(device_t bus, device_t dev,
103 				    int count, int *irqs);
104 static int			vmbus_alloc_msix(device_t bus, device_t dev,
105 				    int *irq);
106 static int			vmbus_release_msix(device_t bus, device_t dev,
107 				    int irq);
108 static int			vmbus_map_msi(device_t bus, device_t dev,
109 				    int irq, uint64_t *addr, uint32_t *data);
110 static uint32_t			vmbus_get_version_method(device_t, device_t);
111 static int			vmbus_probe_guid_method(device_t, device_t,
112 				    const struct hyperv_guid *);
113 static uint32_t			vmbus_get_vcpu_id_method(device_t bus,
114 				    device_t dev, int cpu);
115 static struct taskqueue		*vmbus_get_eventtq_method(device_t, device_t,
116 				    int);
117 #if defined(EARLY_AP_STARTUP)
118 static void			vmbus_intrhook(void *);
119 #endif
120 
121 static int			vmbus_init(struct vmbus_softc *);
122 static int			vmbus_connect(struct vmbus_softc *, uint32_t);
123 static int			vmbus_req_channels(struct vmbus_softc *sc);
124 static void			vmbus_disconnect(struct vmbus_softc *);
125 static int			vmbus_scan(struct vmbus_softc *);
126 static void			vmbus_scan_teardown(struct vmbus_softc *);
127 static void			vmbus_scan_done(struct vmbus_softc *,
128 				    const struct vmbus_message *);
129 static void			vmbus_chanmsg_handle(struct vmbus_softc *,
130 				    const struct vmbus_message *);
131 static void			vmbus_msg_task(void *, int);
132 static void			vmbus_synic_setup(void *);
133 static void			vmbus_synic_teardown(void *);
134 static int			vmbus_sysctl_version(SYSCTL_HANDLER_ARGS);
135 static int			vmbus_dma_alloc(struct vmbus_softc *);
136 static void			vmbus_dma_free(struct vmbus_softc *);
137 static int			vmbus_intr_setup(struct vmbus_softc *);
138 static void			vmbus_intr_teardown(struct vmbus_softc *);
139 static int			vmbus_doattach(struct vmbus_softc *);
140 static void			vmbus_event_proc_dummy(struct vmbus_softc *,
141 				    int);
142 static struct vmbus_softc	*vmbus_sc;
143 
144 SYSCTL_NODE(_hw, OID_AUTO, vmbus, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
145     "Hyper-V vmbus");
146 
147 static int			vmbus_pin_evttask = 1;
148 SYSCTL_INT(_hw_vmbus, OID_AUTO, pin_evttask, CTLFLAG_RDTUN,
149     &vmbus_pin_evttask, 0, "Pin event tasks to their respective CPU");
150 uint32_t			vmbus_current_version;
151 
152 static const uint32_t		vmbus_version[] = {
153 	VMBUS_VERSION_WIN10,
154 	VMBUS_VERSION_WIN8_1,
155 	VMBUS_VERSION_WIN8,
156 	VMBUS_VERSION_WIN7,
157 	VMBUS_VERSION_WS2008
158 };
159 
160 static const vmbus_chanmsg_proc_t
161 vmbus_chanmsg_handlers[VMBUS_CHANMSG_TYPE_MAX] = {
162 	VMBUS_CHANMSG_PROC(CHOFFER_DONE, vmbus_scan_done),
163 	VMBUS_CHANMSG_PROC_WAKEUP(CONNECT_RESP)
164 };
165 
166 static device_method_t vmbus_methods[] = {
167 	/* Device interface */
168 	DEVMETHOD(device_identify,		vmbus_identify),
169 	DEVMETHOD(device_probe,			vmbus_probe),
170 	DEVMETHOD(device_attach,		vmbus_attach),
171 	DEVMETHOD(device_detach,		vmbus_detach),
172 	DEVMETHOD(device_shutdown,		bus_generic_shutdown),
173 	DEVMETHOD(device_suspend,		bus_generic_suspend),
174 	DEVMETHOD(device_resume,		bus_generic_resume),
175 
176 	/* Bus interface */
177 	DEVMETHOD(bus_add_child,		bus_generic_add_child),
178 	DEVMETHOD(bus_print_child,		bus_generic_print_child),
179 	DEVMETHOD(bus_read_ivar,		vmbus_read_ivar),
180 	DEVMETHOD(bus_child_pnpinfo,		vmbus_child_pnpinfo),
181 	DEVMETHOD(bus_alloc_resource,		vmbus_alloc_resource),
182 	DEVMETHOD(bus_release_resource,		bus_generic_release_resource),
183 	DEVMETHOD(bus_activate_resource,	bus_generic_activate_resource),
184 	DEVMETHOD(bus_deactivate_resource,	bus_generic_deactivate_resource),
185 	DEVMETHOD(bus_setup_intr,		bus_generic_setup_intr),
186 	DEVMETHOD(bus_teardown_intr,		bus_generic_teardown_intr),
187 	DEVMETHOD(bus_get_cpus,			bus_generic_get_cpus),
188 
189 	/* pcib interface */
190 	DEVMETHOD(pcib_alloc_msi,		vmbus_alloc_msi),
191 	DEVMETHOD(pcib_release_msi,		vmbus_release_msi),
192 	DEVMETHOD(pcib_alloc_msix,		vmbus_alloc_msix),
193 	DEVMETHOD(pcib_release_msix,		vmbus_release_msix),
194 	DEVMETHOD(pcib_map_msi,			vmbus_map_msi),
195 
196 	/* Vmbus interface */
197 	DEVMETHOD(vmbus_get_version,		vmbus_get_version_method),
198 	DEVMETHOD(vmbus_probe_guid,		vmbus_probe_guid_method),
199 	DEVMETHOD(vmbus_get_vcpu_id,		vmbus_get_vcpu_id_method),
200 	DEVMETHOD(vmbus_get_event_taskq,	vmbus_get_eventtq_method),
201 
202 	DEVMETHOD_END
203 };
204 
205 static driver_t vmbus_driver = {
206 	"vmbus",
207 	vmbus_methods,
208 	sizeof(struct vmbus_softc)
209 };
210 
211 DRIVER_MODULE(vmbus, pcib, vmbus_driver, NULL, NULL);
212 DRIVER_MODULE(vmbus, acpi_syscontainer, vmbus_driver, NULL, NULL);
213 
214 MODULE_DEPEND(vmbus, acpi, 1, 1, 1);
215 MODULE_DEPEND(vmbus, pci, 1, 1, 1);
216 MODULE_VERSION(vmbus, 1);
217 
218 static __inline struct vmbus_softc *
219 vmbus_get_softc(void)
220 {
221 	return vmbus_sc;
222 }
223 
224 void
225 vmbus_msghc_reset(struct vmbus_msghc *mh, size_t dsize)
226 {
227 	struct hypercall_postmsg_in *inprm;
228 
229 	if (dsize > HYPERCALL_POSTMSGIN_DSIZE_MAX)
230 		panic("invalid data size %zu", dsize);
231 
232 	inprm = vmbus_xact_req_data(mh->mh_xact);
233 	memset(inprm, 0, HYPERCALL_POSTMSGIN_SIZE);
234 	inprm->hc_connid = VMBUS_CONNID_MESSAGE;
235 	inprm->hc_msgtype = HYPERV_MSGTYPE_CHANNEL;
236 	inprm->hc_dsize = dsize;
237 }
238 
239 struct vmbus_msghc *
240 vmbus_msghc_get(struct vmbus_softc *sc, size_t dsize)
241 {
242 	struct vmbus_msghc *mh;
243 	struct vmbus_xact *xact;
244 
245 	if (dsize > HYPERCALL_POSTMSGIN_DSIZE_MAX)
246 		panic("invalid data size %zu", dsize);
247 
248 	xact = vmbus_xact_get(sc->vmbus_xc,
249 	    dsize + __offsetof(struct hypercall_postmsg_in, hc_data[0]));
250 	if (xact == NULL)
251 		return (NULL);
252 
253 	mh = vmbus_xact_priv(xact, sizeof(*mh));
254 	mh->mh_xact = xact;
255 
256 	vmbus_msghc_reset(mh, dsize);
257 	return (mh);
258 }
259 
260 void
261 vmbus_msghc_put(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh)
262 {
263 
264 	vmbus_xact_put(mh->mh_xact);
265 }
266 
267 void *
268 vmbus_msghc_dataptr(struct vmbus_msghc *mh)
269 {
270 	struct hypercall_postmsg_in *inprm;
271 
272 	inprm = vmbus_xact_req_data(mh->mh_xact);
273 	return (inprm->hc_data);
274 }
275 
276 int
277 vmbus_msghc_exec_noresult(struct vmbus_msghc *mh)
278 {
279 	sbintime_t time = SBT_1MS;
280 	struct hypercall_postmsg_in *inprm;
281 	bus_addr_t inprm_paddr;
282 	int i;
283 
284 	inprm = vmbus_xact_req_data(mh->mh_xact);
285 	inprm_paddr = vmbus_xact_req_paddr(mh->mh_xact);
286 
287 	/*
288 	 * Save the input parameter so that we could restore the input
289 	 * parameter if the Hypercall failed.
290 	 *
291 	 * XXX
292 	 * Is this really necessary?!  i.e. Will the Hypercall ever
293 	 * overwrite the input parameter?
294 	 */
295 	memcpy(&mh->mh_inprm_save, inprm, HYPERCALL_POSTMSGIN_SIZE);
296 
297 	/*
298 	 * In order to cope with transient failures, e.g. insufficient
299 	 * resources on host side, we retry the post message Hypercall
300 	 * several times.  20 retries seem sufficient.
301 	 */
302 #define HC_RETRY_MAX	20
303 
304 	for (i = 0; i < HC_RETRY_MAX; ++i) {
305 		uint64_t status;
306 
307 		status = hypercall_post_message(inprm_paddr);
308 		if (status == HYPERCALL_STATUS_SUCCESS)
309 			return 0;
310 
311 		pause_sbt("hcpmsg", time, 0, C_HARDCLOCK);
312 		if (time < SBT_1S * 2)
313 			time *= 2;
314 
315 		/* Restore input parameter and try again */
316 		memcpy(inprm, &mh->mh_inprm_save, HYPERCALL_POSTMSGIN_SIZE);
317 	}
318 
319 #undef HC_RETRY_MAX
320 
321 	return EIO;
322 }
323 
324 int
325 vmbus_msghc_exec(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh)
326 {
327 	int error;
328 
329 	vmbus_xact_activate(mh->mh_xact);
330 	error = vmbus_msghc_exec_noresult(mh);
331 	if (error)
332 		vmbus_xact_deactivate(mh->mh_xact);
333 	return error;
334 }
335 
336 void
337 vmbus_msghc_exec_cancel(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh)
338 {
339 
340 	vmbus_xact_deactivate(mh->mh_xact);
341 }
342 
343 const struct vmbus_message *
344 vmbus_msghc_wait_result(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh)
345 {
346 	size_t resp_len;
347 
348 	return (vmbus_xact_wait(mh->mh_xact, &resp_len));
349 }
350 
351 const struct vmbus_message *
352 vmbus_msghc_poll_result(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh)
353 {
354 	size_t resp_len;
355 
356 	return (vmbus_xact_poll(mh->mh_xact, &resp_len));
357 }
358 
359 void
360 vmbus_msghc_wakeup(struct vmbus_softc *sc, const struct vmbus_message *msg)
361 {
362 
363 	vmbus_xact_ctx_wakeup(sc->vmbus_xc, msg, sizeof(*msg));
364 }
365 
366 uint32_t
367 vmbus_gpadl_alloc(struct vmbus_softc *sc)
368 {
369 	uint32_t gpadl;
370 
371 again:
372 	gpadl = atomic_fetchadd_int(&sc->vmbus_gpadl, 1);
373 	if (gpadl == 0)
374 		goto again;
375 	return (gpadl);
376 }
377 
378 /* Used for Hyper-V socket when guest client connects to host */
379 int
380 vmbus_req_tl_connect(struct hyperv_guid *guest_srv_id,
381     struct hyperv_guid *host_srv_id)
382 {
383 	struct vmbus_softc *sc = vmbus_get_softc();
384 	struct vmbus_chanmsg_tl_connect *req;
385 	struct vmbus_msghc *mh;
386 	int error;
387 
388 	if (!sc)
389 		return ENXIO;
390 
391 	mh = vmbus_msghc_get(sc, sizeof(*req));
392 	if (mh == NULL) {
393 		device_printf(sc->vmbus_dev,
394 		    "can not get msg hypercall for tl connect\n");
395 		return ENXIO;
396 	}
397 
398 	req = vmbus_msghc_dataptr(mh);
399 	req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_TL_CONN;
400 	req->guest_endpoint_id = *guest_srv_id;
401 	req->host_service_id = *host_srv_id;
402 
403 	error = vmbus_msghc_exec_noresult(mh);
404 	vmbus_msghc_put(sc, mh);
405 
406 	if (error) {
407 		device_printf(sc->vmbus_dev,
408 		    "tl connect msg hypercall failed\n");
409 	}
410 
411 	return error;
412 }
413 
414 static int
415 vmbus_connect(struct vmbus_softc *sc, uint32_t version)
416 {
417 	struct vmbus_chanmsg_connect *req;
418 	const struct vmbus_message *msg;
419 	struct vmbus_msghc *mh;
420 	int error, done = 0;
421 
422 	mh = vmbus_msghc_get(sc, sizeof(*req));
423 	if (mh == NULL)
424 		return ENXIO;
425 
426 	req = vmbus_msghc_dataptr(mh);
427 	req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CONNECT;
428 	req->chm_ver = version;
429 	req->chm_evtflags = pmap_kextract((vm_offset_t)sc->vmbus_evtflags);
430 	req->chm_mnf1 = pmap_kextract((vm_offset_t)sc->vmbus_mnf1);
431 	req->chm_mnf2 = pmap_kextract((vm_offset_t)sc->vmbus_mnf2);
432 
433 	error = vmbus_msghc_exec(sc, mh);
434 	if (error) {
435 		vmbus_msghc_put(sc, mh);
436 		return error;
437 	}
438 
439 	msg = vmbus_msghc_wait_result(sc, mh);
440 	done = ((const struct vmbus_chanmsg_connect_resp *)
441 	    msg->msg_data)->chm_done;
442 
443 	vmbus_msghc_put(sc, mh);
444 
445 	return (done ? 0 : EOPNOTSUPP);
446 }
447 
448 static int
449 vmbus_init(struct vmbus_softc *sc)
450 {
451 	int i;
452 
453 	for (i = 0; i < nitems(vmbus_version); ++i) {
454 		int error;
455 
456 		error = vmbus_connect(sc, vmbus_version[i]);
457 		if (!error) {
458 			vmbus_current_version = vmbus_version[i];
459 			sc->vmbus_version = vmbus_version[i];
460 			device_printf(sc->vmbus_dev, "version %u.%u\n",
461 			    VMBUS_VERSION_MAJOR(sc->vmbus_version),
462 			    VMBUS_VERSION_MINOR(sc->vmbus_version));
463 			return 0;
464 		}
465 	}
466 	return ENXIO;
467 }
468 
469 static void
470 vmbus_disconnect(struct vmbus_softc *sc)
471 {
472 	struct vmbus_chanmsg_disconnect *req;
473 	struct vmbus_msghc *mh;
474 	int error;
475 
476 	mh = vmbus_msghc_get(sc, sizeof(*req));
477 	if (mh == NULL) {
478 		device_printf(sc->vmbus_dev,
479 		    "can not get msg hypercall for disconnect\n");
480 		return;
481 	}
482 
483 	req = vmbus_msghc_dataptr(mh);
484 	req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_DISCONNECT;
485 
486 	error = vmbus_msghc_exec_noresult(mh);
487 	vmbus_msghc_put(sc, mh);
488 
489 	if (error) {
490 		device_printf(sc->vmbus_dev,
491 		    "disconnect msg hypercall failed\n");
492 	}
493 }
494 
495 static int
496 vmbus_req_channels(struct vmbus_softc *sc)
497 {
498 	struct vmbus_chanmsg_chrequest *req;
499 	struct vmbus_msghc *mh;
500 	int error;
501 
502 	mh = vmbus_msghc_get(sc, sizeof(*req));
503 	if (mh == NULL)
504 		return ENXIO;
505 
506 	req = vmbus_msghc_dataptr(mh);
507 	req->chm_hdr.chm_type = VMBUS_CHANMSG_TYPE_CHREQUEST;
508 
509 	error = vmbus_msghc_exec_noresult(mh);
510 	vmbus_msghc_put(sc, mh);
511 
512 	return error;
513 }
514 
515 static void
516 vmbus_scan_done_task(void *xsc, int pending __unused)
517 {
518 	struct vmbus_softc *sc = xsc;
519 
520 	bus_topo_lock();
521 	sc->vmbus_scandone = true;
522 	bus_topo_unlock();
523 	wakeup(&sc->vmbus_scandone);
524 }
525 
526 static void
527 vmbus_scan_done(struct vmbus_softc *sc,
528     const struct vmbus_message *msg __unused)
529 {
530 
531 	taskqueue_enqueue(sc->vmbus_devtq, &sc->vmbus_scandone_task);
532 }
533 
534 static int
535 vmbus_scan(struct vmbus_softc *sc)
536 {
537 	int error;
538 
539 	/*
540 	 * Identify, probe and attach for non-channel devices.
541 	 */
542 	bus_generic_probe(sc->vmbus_dev);
543 	bus_generic_attach(sc->vmbus_dev);
544 
545 	/*
546 	 * This taskqueue serializes vmbus devices' attach and detach
547 	 * for channel offer and rescind messages.
548 	 */
549 	sc->vmbus_devtq = taskqueue_create("vmbus dev", M_WAITOK,
550 	    taskqueue_thread_enqueue, &sc->vmbus_devtq);
551 	taskqueue_start_threads(&sc->vmbus_devtq, 1, PI_NET, "vmbusdev");
552 	TASK_INIT(&sc->vmbus_scandone_task, 0, vmbus_scan_done_task, sc);
553 
554 	/*
555 	 * This taskqueue handles sub-channel detach, so that vmbus
556 	 * device's detach running in vmbus_devtq can drain its sub-
557 	 * channels.
558 	 */
559 	sc->vmbus_subchtq = taskqueue_create("vmbus subch", M_WAITOK,
560 	    taskqueue_thread_enqueue, &sc->vmbus_subchtq);
561 	taskqueue_start_threads(&sc->vmbus_subchtq, 1, PI_NET, "vmbussch");
562 
563 	/*
564 	 * Start vmbus scanning.
565 	 */
566 	error = vmbus_req_channels(sc);
567 	if (error) {
568 		device_printf(sc->vmbus_dev, "channel request failed: %d\n",
569 		    error);
570 		return (error);
571 	}
572 
573 	/*
574 	 * Wait for all vmbus devices from the initial channel offers to be
575 	 * attached.
576 	 */
577 	bus_topo_assert();
578 	while (!sc->vmbus_scandone)
579 		mtx_sleep(&sc->vmbus_scandone, bus_topo_mtx(), 0, "vmbusdev", 0);
580 
581 	if (bootverbose) {
582 		device_printf(sc->vmbus_dev, "device scan, probe and attach "
583 		    "done\n");
584 	}
585 	return (0);
586 }
587 
588 static void
589 vmbus_scan_teardown(struct vmbus_softc *sc)
590 {
591 
592 	bus_topo_assert();
593 	if (sc->vmbus_devtq != NULL) {
594 		bus_topo_unlock();
595 		taskqueue_free(sc->vmbus_devtq);
596 		bus_topo_lock();
597 		sc->vmbus_devtq = NULL;
598 	}
599 	if (sc->vmbus_subchtq != NULL) {
600 		bus_topo_unlock();
601 		taskqueue_free(sc->vmbus_subchtq);
602 		bus_topo_lock();
603 		sc->vmbus_subchtq = NULL;
604 	}
605 }
606 
607 static void
608 vmbus_chanmsg_handle(struct vmbus_softc *sc, const struct vmbus_message *msg)
609 {
610 	vmbus_chanmsg_proc_t msg_proc;
611 	uint32_t msg_type;
612 
613 	msg_type = ((const struct vmbus_chanmsg_hdr *)msg->msg_data)->chm_type;
614 	if (msg_type >= VMBUS_CHANMSG_TYPE_MAX) {
615 		device_printf(sc->vmbus_dev, "unknown message type 0x%x\n",
616 		    msg_type);
617 		return;
618 	}
619 
620 	msg_proc = vmbus_chanmsg_handlers[msg_type];
621 	if (msg_proc != NULL)
622 		msg_proc(sc, msg);
623 
624 	/* Channel specific processing */
625 	vmbus_chan_msgproc(sc, msg);
626 }
627 
628 static void
629 vmbus_msg_task(void *xsc, int pending __unused)
630 {
631 	struct vmbus_softc *sc = xsc;
632 	volatile struct vmbus_message *msg;
633 
634 	msg = VMBUS_PCPU_GET(sc, message, curcpu) + VMBUS_SINT_MESSAGE;
635 	for (;;) {
636 		if (msg->msg_type == HYPERV_MSGTYPE_NONE) {
637 			/* No message */
638 			break;
639 		} else if (msg->msg_type == HYPERV_MSGTYPE_CHANNEL) {
640 			/* Channel message */
641 			vmbus_chanmsg_handle(sc,
642 			    __DEVOLATILE(const struct vmbus_message *, msg));
643 		}
644 
645 		msg->msg_type = HYPERV_MSGTYPE_NONE;
646 		/*
647 		 * Make sure the write to msg_type (i.e. set to
648 		 * HYPERV_MSGTYPE_NONE) happens before we read the
649 		 * msg_flags and EOMing. Otherwise, the EOMing will
650 		 * not deliver any more messages since there is no
651 		 * empty slot
652 		 *
653 		 * NOTE:
654 		 * mb() is used here, since atomic_thread_fence_seq_cst()
655 		 * will become compiler fence on UP kernel.
656 		 */
657 		mb();
658 		if (msg->msg_flags & VMBUS_MSGFLAG_PENDING) {
659 			/*
660 			 * This will cause message queue rescan to possibly
661 			 * deliver another msg from the hypervisor
662 			 */
663 			WRMSR(MSR_HV_EOM, 0);
664 		}
665 	}
666 }
667 static __inline int
668 vmbus_handle_intr1(struct vmbus_softc *sc, struct trapframe *frame, int cpu)
669 {
670 	volatile struct vmbus_message *msg;
671 	struct vmbus_message *msg_base;
672 
673 	msg_base = VMBUS_PCPU_GET(sc, message, cpu);
674 
675 	/*
676 	 * Check event timer.
677 	 *
678 	 * TODO: move this to independent IDT vector.
679 	 */
680 	vmbus_handle_timer_intr1(msg_base, frame);
681 	/*
682 	 * Check events.  Hot path for network and storage I/O data; high rate.
683 	 *
684 	 * NOTE:
685 	 * As recommended by the Windows guest fellows, we check events before
686 	 * checking messages.
687 	 */
688 	sc->vmbus_event_proc(sc, cpu);
689 
690 	/*
691 	 * Check messages.  Mainly management stuffs; ultra low rate.
692 	 */
693 	msg = msg_base + VMBUS_SINT_MESSAGE;
694 	if (__predict_false(msg->msg_type != HYPERV_MSGTYPE_NONE)) {
695 		taskqueue_enqueue(VMBUS_PCPU_GET(sc, message_tq, cpu),
696 		    VMBUS_PCPU_PTR(sc, message_task, cpu));
697 	}
698 
699 	return (FILTER_HANDLED);
700 }
701 
702 void
703 vmbus_handle_intr(struct trapframe *trap_frame)
704 {
705 	struct vmbus_softc *sc = vmbus_get_softc();
706 	int cpu = curcpu;
707 
708 	/*
709 	 * Disable preemption.
710 	 */
711 	critical_enter();
712 
713 	/*
714 	 * Do a little interrupt counting. This used x86 specific
715 	 * intrcnt_add function
716 	 */
717 #if !defined(__aarch64__)
718 	(*VMBUS_PCPU_GET(sc, intr_cnt, cpu))++;
719 #endif /* not for aarch64 */
720 	vmbus_handle_intr1(sc, trap_frame, cpu);
721 
722 	/*
723 	 * Enable preemption.
724 	 */
725 	critical_exit();
726 }
727 
728 static void
729 vmbus_synic_setup(void *xsc)
730 {
731 	struct vmbus_softc *sc = xsc;
732 	int cpu = curcpu;
733 	uint64_t val, orig;
734 	uint32_t sint;
735 
736 	if (hyperv_features & CPUID_HV_MSR_VP_INDEX) {
737 		/* Save virtual processor id. */
738 		VMBUS_PCPU_GET(sc, vcpuid, cpu) = RDMSR(MSR_HV_VP_INDEX);
739 	} else {
740 		/* Set virtual processor id to 0 for compatibility. */
741 		VMBUS_PCPU_GET(sc, vcpuid, cpu) = 0;
742 	}
743 
744 	/*
745 	 * Setup the SynIC message.
746 	 */
747 	orig = RDMSR(MSR_HV_SIMP);
748 	val = pmap_kextract((vm_offset_t)VMBUS_PCPU_GET(sc, message, cpu)) &
749 	    MSR_HV_SIMP_PGMASK;
750 	val |= MSR_HV_SIMP_ENABLE | (orig & MSR_HV_SIMP_RSVD_MASK);
751 	WRMSR(MSR_HV_SIMP, val);
752 	/*
753 	 * Setup the SynIC event flags.
754 	 */
755 	orig = RDMSR(MSR_HV_SIEFP);
756 	val = pmap_kextract((vm_offset_t)VMBUS_PCPU_GET(sc, event_flags, cpu)) &
757 	    MSR_HV_SIMP_PGMASK;
758 	val |= MSR_HV_SIEFP_ENABLE | (orig & MSR_HV_SIEFP_RSVD_MASK);
759 	WRMSR(MSR_HV_SIEFP, val);
760 
761 	/*
762 	 * Configure and unmask SINT for message and event flags.
763 	 */
764 	sint = MSR_HV_SINT0 + VMBUS_SINT_MESSAGE;
765 	orig = RDMSR(sint);
766 	val = sc->vmbus_idtvec | MSR_HV_SINT_AUTOEOI |
767 	    (orig & MSR_HV_SINT_RSVD_MASK);
768 	WRMSR(sint, val);
769 
770 	/*
771 	 * Configure and unmask SINT for timer.
772 	 */
773 	vmbus_synic_setup1(sc);
774 	/*
775 	 * All done; enable SynIC.
776 	 */
777 	orig = RDMSR(MSR_HV_SCONTROL);
778 	val = MSR_HV_SCTRL_ENABLE | (orig & MSR_HV_SCTRL_RSVD_MASK);
779 	WRMSR(MSR_HV_SCONTROL, val);
780 }
781 
782 static void
783 vmbus_synic_teardown(void *arg)
784 {
785 	uint64_t orig;
786 	uint32_t sint;
787 
788 	/*
789 	 * Disable SynIC.
790 	 */
791 	orig = RDMSR(MSR_HV_SCONTROL);
792 	WRMSR(MSR_HV_SCONTROL, (orig & MSR_HV_SCTRL_RSVD_MASK));
793 
794 	/*
795 	 * Mask message and event flags SINT.
796 	 */
797 	sint = MSR_HV_SINT0 + VMBUS_SINT_MESSAGE;
798 	orig = RDMSR(sint);
799 	WRMSR(sint, orig | MSR_HV_SINT_MASKED);
800 
801 	/*
802 	 * Mask timer SINT.
803 	 */
804 	vmbus_synic_teardown1();
805 	/*
806 	 * Teardown SynIC message.
807 	 */
808 	orig = RDMSR(MSR_HV_SIMP);
809 	WRMSR(MSR_HV_SIMP, (orig & MSR_HV_SIMP_RSVD_MASK));
810 
811 	/*
812 	 * Teardown SynIC event flags.
813 	 */
814 	orig = RDMSR(MSR_HV_SIEFP);
815 	WRMSR(MSR_HV_SIEFP, (orig & MSR_HV_SIEFP_RSVD_MASK));
816 }
817 
818 static int
819 vmbus_dma_alloc(struct vmbus_softc *sc)
820 {
821 	uint8_t *evtflags;
822 	int cpu;
823 
824 	CPU_FOREACH(cpu) {
825 		void *ptr;
826 
827 		/*
828 		 * Per-cpu messages and event flags.
829 		 */
830 		ptr = contigmalloc(PAGE_SIZE, M_DEVBUF, M_WAITOK | M_ZERO,
831 		    0ul, ~0ul, PAGE_SIZE, 0);
832 		if (ptr == NULL)
833 			return ENOMEM;
834 		VMBUS_PCPU_GET(sc, message, cpu) = ptr;
835 
836 		ptr = contigmalloc(PAGE_SIZE, M_DEVBUF, M_WAITOK | M_ZERO,
837 		    0ul, ~0ul, PAGE_SIZE, 0);
838 		if (ptr == NULL)
839 			return ENOMEM;
840 		VMBUS_PCPU_GET(sc, event_flags, cpu) = ptr;
841 	}
842 
843 	evtflags = contigmalloc(PAGE_SIZE, M_DEVBUF, M_WAITOK | M_ZERO,
844 	    0ul, ~0ul, PAGE_SIZE, 0);
845 	if (evtflags == NULL)
846 		return ENOMEM;
847 	sc->vmbus_rx_evtflags = (u_long *)evtflags;
848 	sc->vmbus_tx_evtflags = (u_long *)(evtflags + (PAGE_SIZE / 2));
849 	sc->vmbus_evtflags = evtflags;
850 
851 	sc->vmbus_mnf1 = contigmalloc(PAGE_SIZE, M_DEVBUF, M_WAITOK | M_ZERO,
852 	    0ul, ~0ul, PAGE_SIZE, 0);
853 	if (sc->vmbus_mnf1 == NULL)
854 		return ENOMEM;
855 
856 	sc->vmbus_mnf2 = contigmalloc(sizeof(struct vmbus_mnf), M_DEVBUF,
857 	    M_WAITOK | M_ZERO, 0ul, ~0ul, PAGE_SIZE, 0);
858 	if (sc->vmbus_mnf2 == NULL)
859 		return ENOMEM;
860 
861 	return 0;
862 }
863 
864 static void
865 vmbus_dma_free(struct vmbus_softc *sc)
866 {
867 	int cpu;
868 
869 	if (sc->vmbus_evtflags != NULL) {
870 		contigfree(sc->vmbus_evtflags, PAGE_SIZE, M_DEVBUF);
871 		sc->vmbus_evtflags = NULL;
872 		sc->vmbus_rx_evtflags = NULL;
873 		sc->vmbus_tx_evtflags = NULL;
874 	}
875 	if (sc->vmbus_mnf1 != NULL) {
876 		contigfree(sc->vmbus_mnf1, PAGE_SIZE, M_DEVBUF);
877 		sc->vmbus_mnf1 = NULL;
878 	}
879 	if (sc->vmbus_mnf2 != NULL) {
880 		contigfree(sc->vmbus_mnf2, sizeof(struct vmbus_mnf), M_DEVBUF);
881 		sc->vmbus_mnf2 = NULL;
882 	}
883 
884 	CPU_FOREACH(cpu) {
885 		if (VMBUS_PCPU_GET(sc, message, cpu) != NULL) {
886 			contigfree(VMBUS_PCPU_GET(sc, message, cpu), PAGE_SIZE,
887 			    M_DEVBUF);
888 			VMBUS_PCPU_GET(sc, message, cpu) = NULL;
889 		}
890 		if (VMBUS_PCPU_GET(sc, event_flags, cpu) != NULL) {
891 			contigfree(VMBUS_PCPU_GET(sc, event_flags, cpu),
892 			    PAGE_SIZE, M_DEVBUF);
893 			VMBUS_PCPU_GET(sc, event_flags, cpu) = NULL;
894 		}
895 	}
896 }
897 
898 static int
899 vmbus_intr_setup(struct vmbus_softc *sc)
900 {
901 	int cpu;
902 
903 	CPU_FOREACH(cpu) {
904 		char buf[MAXCOMLEN + 1];
905 		cpuset_t cpu_mask;
906 
907 		/* Allocate an interrupt counter for Hyper-V interrupt */
908 		snprintf(buf, sizeof(buf), "cpu%d:hyperv", cpu);
909 #if !defined(__aarch64__)
910 		intrcnt_add(buf, VMBUS_PCPU_PTR(sc, intr_cnt, cpu));
911 #endif /* not for aarch64 */
912 		/*
913 		 * Setup taskqueue to handle events.  Task will be per-
914 		 * channel.
915 		 */
916 		VMBUS_PCPU_GET(sc, event_tq, cpu) = taskqueue_create_fast(
917 		    "hyperv event", M_WAITOK, taskqueue_thread_enqueue,
918 		    VMBUS_PCPU_PTR(sc, event_tq, cpu));
919 		if (vmbus_pin_evttask) {
920 			CPU_SETOF(cpu, &cpu_mask);
921 			taskqueue_start_threads_cpuset(
922 			    VMBUS_PCPU_PTR(sc, event_tq, cpu), 1, PI_NET,
923 			    &cpu_mask, "hvevent%d", cpu);
924 		} else {
925 			taskqueue_start_threads(
926 			    VMBUS_PCPU_PTR(sc, event_tq, cpu), 1, PI_NET,
927 			    "hvevent%d", cpu);
928 		}
929 
930 		/*
931 		 * Setup tasks and taskqueues to handle messages.
932 		 */
933 		VMBUS_PCPU_GET(sc, message_tq, cpu) = taskqueue_create_fast(
934 		    "hyperv msg", M_WAITOK, taskqueue_thread_enqueue,
935 		    VMBUS_PCPU_PTR(sc, message_tq, cpu));
936 		CPU_SETOF(cpu, &cpu_mask);
937 		taskqueue_start_threads_cpuset(
938 		    VMBUS_PCPU_PTR(sc, message_tq, cpu), 1, PI_NET, &cpu_mask,
939 		    "hvmsg%d", cpu);
940 		TASK_INIT(VMBUS_PCPU_PTR(sc, message_task, cpu), 0,
941 		    vmbus_msg_task, sc);
942 	}
943 	return (vmbus_setup_intr1(sc));
944 }
945 static void
946 vmbus_intr_teardown(struct vmbus_softc *sc)
947 {
948 	vmbus_intr_teardown1(sc);
949 }
950 
951 static int
952 vmbus_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
953 {
954 	return (ENOENT);
955 }
956 
957 static int
958 vmbus_child_pnpinfo(device_t dev, device_t child, struct sbuf *sb)
959 {
960 	const struct vmbus_channel *chan;
961 	char guidbuf[HYPERV_GUID_STRLEN];
962 
963 	chan = vmbus_get_channel(child);
964 	if (chan == NULL) {
965 		/* Event timer device, which does not belong to a channel */
966 		return (0);
967 	}
968 
969 	hyperv_guid2str(&chan->ch_guid_type, guidbuf, sizeof(guidbuf));
970 	sbuf_printf(sb, "classid=%s", guidbuf);
971 
972 	hyperv_guid2str(&chan->ch_guid_inst, guidbuf, sizeof(guidbuf));
973 	sbuf_printf(sb, " deviceid=%s", guidbuf);
974 
975 	return (0);
976 }
977 
978 int
979 vmbus_add_child(struct vmbus_channel *chan)
980 {
981 	struct vmbus_softc *sc = chan->ch_vmbus;
982 	device_t parent = sc->vmbus_dev;
983 
984 	bus_topo_lock();
985 	chan->ch_dev = device_add_child(parent, NULL, -1);
986 	if (chan->ch_dev == NULL) {
987 		bus_topo_unlock();
988 		device_printf(parent, "device_add_child for chan%u failed\n",
989 		    chan->ch_id);
990 		return (ENXIO);
991 	}
992 	device_set_ivars(chan->ch_dev, chan);
993 	device_probe_and_attach(chan->ch_dev);
994 	bus_topo_unlock();
995 
996 	return (0);
997 }
998 
999 int
1000 vmbus_delete_child(struct vmbus_channel *chan)
1001 {
1002 	int error = 0;
1003 
1004 	bus_topo_lock();
1005 	if (chan->ch_dev != NULL) {
1006 		error = device_delete_child(chan->ch_vmbus->vmbus_dev,
1007 		    chan->ch_dev);
1008 		chan->ch_dev = NULL;
1009 	}
1010 	bus_topo_unlock();
1011 	return (error);
1012 }
1013 
1014 static int
1015 vmbus_sysctl_version(SYSCTL_HANDLER_ARGS)
1016 {
1017 	struct vmbus_softc *sc = arg1;
1018 	char verstr[16];
1019 
1020 	snprintf(verstr, sizeof(verstr), "%u.%u",
1021 	    VMBUS_VERSION_MAJOR(sc->vmbus_version),
1022 	    VMBUS_VERSION_MINOR(sc->vmbus_version));
1023 	return sysctl_handle_string(oidp, verstr, sizeof(verstr), req);
1024 }
1025 
1026 /*
1027  * We need the function to make sure the MMIO resource is allocated from the
1028  * ranges found in _CRS.
1029  *
1030  * For the release function, we can use bus_generic_release_resource().
1031  */
1032 static struct resource *
1033 vmbus_alloc_resource(device_t dev, device_t child, int type, int *rid,
1034     rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
1035 {
1036 	device_t parent = device_get_parent(dev);
1037 	struct resource *res;
1038 
1039 #ifdef NEW_PCIB
1040 	if (type == SYS_RES_MEMORY) {
1041 		struct vmbus_softc *sc = device_get_softc(dev);
1042 
1043 		res = pcib_host_res_alloc(&sc->vmbus_mmio_res, child, type,
1044 		    rid, start, end, count, flags);
1045 	} else
1046 #endif
1047 	{
1048 		res = BUS_ALLOC_RESOURCE(parent, child, type, rid, start,
1049 		    end, count, flags);
1050 	}
1051 
1052 	return (res);
1053 }
1054 
1055 static int
1056 vmbus_alloc_msi(device_t bus, device_t dev, int count, int maxcount, int *irqs)
1057 {
1058 
1059 	return (PCIB_ALLOC_MSI(device_get_parent(bus), dev, count, maxcount,
1060 	    irqs));
1061 }
1062 
1063 static int
1064 vmbus_release_msi(device_t bus, device_t dev, int count, int *irqs)
1065 {
1066 
1067 	return (PCIB_RELEASE_MSI(device_get_parent(bus), dev, count, irqs));
1068 }
1069 
1070 static int
1071 vmbus_alloc_msix(device_t bus, device_t dev, int *irq)
1072 {
1073 
1074 	return (PCIB_ALLOC_MSIX(device_get_parent(bus), dev, irq));
1075 }
1076 
1077 static int
1078 vmbus_release_msix(device_t bus, device_t dev, int irq)
1079 {
1080 
1081 	return (PCIB_RELEASE_MSIX(device_get_parent(bus), dev, irq));
1082 }
1083 
1084 static int
1085 vmbus_map_msi(device_t bus, device_t dev, int irq, uint64_t *addr,
1086 	uint32_t *data)
1087 {
1088 
1089 	return (PCIB_MAP_MSI(device_get_parent(bus), dev, irq, addr, data));
1090 }
1091 
1092 static uint32_t
1093 vmbus_get_version_method(device_t bus, device_t dev)
1094 {
1095 	struct vmbus_softc *sc = device_get_softc(bus);
1096 
1097 	return sc->vmbus_version;
1098 }
1099 
1100 static int
1101 vmbus_probe_guid_method(device_t bus, device_t dev,
1102     const struct hyperv_guid *guid)
1103 {
1104 	const struct vmbus_channel *chan = vmbus_get_channel(dev);
1105 
1106 	if (memcmp(&chan->ch_guid_type, guid, sizeof(struct hyperv_guid)) == 0)
1107 		return 0;
1108 	return ENXIO;
1109 }
1110 
1111 static uint32_t
1112 vmbus_get_vcpu_id_method(device_t bus, device_t dev, int cpu)
1113 {
1114 	const struct vmbus_softc *sc = device_get_softc(bus);
1115 
1116 	return (VMBUS_PCPU_GET(sc, vcpuid, cpu));
1117 }
1118 
1119 static struct taskqueue *
1120 vmbus_get_eventtq_method(device_t bus, device_t dev __unused, int cpu)
1121 {
1122 	const struct vmbus_softc *sc = device_get_softc(bus);
1123 
1124 	KASSERT(cpu >= 0 && cpu < mp_ncpus, ("invalid cpu%d", cpu));
1125 	return (VMBUS_PCPU_GET(sc, event_tq, cpu));
1126 }
1127 
1128 #ifdef NEW_PCIB
1129 #define VTPM_BASE_ADDR 0xfed40000
1130 #define FOUR_GB (1ULL << 32)
1131 
1132 enum parse_pass { parse_64, parse_32 };
1133 
1134 struct parse_context {
1135 	device_t vmbus_dev;
1136 	enum parse_pass pass;
1137 };
1138 
1139 static ACPI_STATUS
1140 parse_crs(ACPI_RESOURCE *res, void *ctx)
1141 {
1142 	const struct parse_context *pc = ctx;
1143 	device_t vmbus_dev = pc->vmbus_dev;
1144 
1145 	struct vmbus_softc *sc = device_get_softc(vmbus_dev);
1146 	UINT64 start, end;
1147 
1148 	switch (res->Type) {
1149 	case ACPI_RESOURCE_TYPE_ADDRESS32:
1150 		start = res->Data.Address32.Address.Minimum;
1151 		end = res->Data.Address32.Address.Maximum;
1152 		break;
1153 
1154 	case ACPI_RESOURCE_TYPE_ADDRESS64:
1155 		start = res->Data.Address64.Address.Minimum;
1156 		end = res->Data.Address64.Address.Maximum;
1157 		break;
1158 
1159 	default:
1160 		/* Unused types. */
1161 		return (AE_OK);
1162 	}
1163 
1164 	/*
1165 	 * We don't use <1MB addresses.
1166 	 */
1167 	if (end < 0x100000)
1168 		return (AE_OK);
1169 
1170 	/* Don't conflict with vTPM. */
1171 	if (end >= VTPM_BASE_ADDR && start < VTPM_BASE_ADDR)
1172 		end = VTPM_BASE_ADDR - 1;
1173 
1174 	if ((pc->pass == parse_32 && start < FOUR_GB) ||
1175 	    (pc->pass == parse_64 && start >= FOUR_GB))
1176 		pcib_host_res_decodes(&sc->vmbus_mmio_res, SYS_RES_MEMORY,
1177 		    start, end, 0);
1178 
1179 	return (AE_OK);
1180 }
1181 
1182 static void
1183 vmbus_get_crs(device_t dev, device_t vmbus_dev, enum parse_pass pass)
1184 {
1185 	struct parse_context pc;
1186 	ACPI_STATUS status;
1187 
1188 	if (bootverbose)
1189 		device_printf(dev, "walking _CRS, pass=%d\n", pass);
1190 
1191 	pc.vmbus_dev = vmbus_dev;
1192 	pc.pass = pass;
1193 	status = AcpiWalkResources(acpi_get_handle(dev), "_CRS",
1194 			parse_crs, &pc);
1195 
1196 	if (bootverbose && ACPI_FAILURE(status))
1197 		device_printf(dev, "_CRS: not found, pass=%d\n", pass);
1198 }
1199 
1200 static void
1201 vmbus_get_mmio_res_pass(device_t dev, enum parse_pass pass)
1202 {
1203 	device_t acpi0, parent;
1204 
1205 	parent = device_get_parent(dev);
1206 
1207 	acpi0 = device_get_parent(parent);
1208 	if (strcmp("acpi0", device_get_nameunit(acpi0)) == 0) {
1209 		device_t *children;
1210 		int count;
1211 
1212 		/*
1213 		 * Try to locate VMBUS resources and find _CRS on them.
1214 		 */
1215 		if (device_get_children(acpi0, &children, &count) == 0) {
1216 			int i;
1217 
1218 			for (i = 0; i < count; ++i) {
1219 				if (!device_is_attached(children[i]))
1220 					continue;
1221 
1222 				if (strcmp("vmbus_res",
1223 				    device_get_name(children[i])) == 0)
1224 					vmbus_get_crs(children[i], dev, pass);
1225 			}
1226 			free(children, M_TEMP);
1227 		}
1228 
1229 		/*
1230 		 * Try to find _CRS on acpi.
1231 		 */
1232 		vmbus_get_crs(acpi0, dev, pass);
1233 	} else {
1234 		device_printf(dev, "not grandchild of acpi\n");
1235 	}
1236 
1237 	/*
1238 	 * Try to find _CRS on parent.
1239 	 */
1240 	vmbus_get_crs(parent, dev, pass);
1241 }
1242 
1243 static void
1244 vmbus_get_mmio_res(device_t dev)
1245 {
1246 	struct vmbus_softc *sc = device_get_softc(dev);
1247 	/*
1248 	 * We walk the resources twice to make sure that: in the resource
1249 	 * list, the 32-bit resources appear behind the 64-bit resources.
1250 	 * NB: resource_list_add() uses INSERT_TAIL. This way, when we
1251 	 * iterate through the list to find a range for a 64-bit BAR in
1252 	 * vmbus_alloc_resource(), we can make sure we try to use >4GB
1253 	 * ranges first.
1254 	 */
1255 	pcib_host_res_init(dev, &sc->vmbus_mmio_res);
1256 
1257 	vmbus_get_mmio_res_pass(dev, parse_64);
1258 	vmbus_get_mmio_res_pass(dev, parse_32);
1259 }
1260 
1261 /*
1262  * On Gen2 VMs, Hyper-V provides mmio space for framebuffer.
1263  * This mmio address range is not useable for other PCI devices.
1264  * Currently only efifb and vbefb drivers are using this range without
1265  * reserving it from system.
1266  * Therefore, vmbus driver reserves it before any other PCI device
1267  * drivers start to request mmio addresses.
1268  */
1269 static struct resource *hv_fb_res;
1270 
1271 static void
1272 vmbus_fb_mmio_res(device_t dev)
1273 {
1274 	struct efi_fb *efifb;
1275 #if !defined(__aarch64__)
1276 	struct vbe_fb *vbefb;
1277 #endif /* aarch64 */
1278 	rman_res_t fb_start, fb_end, fb_count;
1279 	int fb_height, fb_width;
1280 	caddr_t kmdp;
1281 
1282 	struct vmbus_softc *sc = device_get_softc(dev);
1283 	int rid = 0;
1284 
1285 	kmdp = preload_search_by_type("elf kernel");
1286 	if (kmdp == NULL)
1287 		kmdp = preload_search_by_type("elf64 kernel");
1288 	efifb = (struct efi_fb *)preload_search_info(kmdp,
1289 	    MODINFO_METADATA | MODINFOMD_EFI_FB);
1290 #if !defined(__aarch64__)
1291 	vbefb = (struct vbe_fb *)preload_search_info(kmdp,
1292 	    MODINFO_METADATA | MODINFOMD_VBE_FB);
1293 #endif /* aarch64 */
1294 	if (efifb != NULL) {
1295 		fb_start = efifb->fb_addr;
1296 		fb_end = efifb->fb_addr + efifb->fb_size;
1297 		fb_count = efifb->fb_size;
1298 		fb_height = efifb->fb_height;
1299 		fb_width = efifb->fb_width;
1300 	}
1301 #if !defined(__aarch64__)
1302 	else if (vbefb != NULL) {
1303 		fb_start = vbefb->fb_addr;
1304 		fb_end = vbefb->fb_addr + vbefb->fb_size;
1305 		fb_count = vbefb->fb_size;
1306 		fb_height = vbefb->fb_height;
1307 		fb_width = vbefb->fb_width;
1308 	}
1309 #endif /* aarch64 */
1310 	else {
1311 		if (bootverbose)
1312 			device_printf(dev,
1313 			    "no preloaded kernel fb information\n");
1314 		/* We are on Gen1 VM, just return. */
1315 		return;
1316 	}
1317 
1318 	if (bootverbose)
1319 		device_printf(dev,
1320 		    "fb: fb_addr: %#jx, size: %#jx, "
1321 		    "actual size needed: 0x%x\n",
1322 		    fb_start, fb_count, fb_height * fb_width);
1323 
1324 	hv_fb_res = pcib_host_res_alloc(&sc->vmbus_mmio_res, dev,
1325 	    SYS_RES_MEMORY, &rid, fb_start, fb_end, fb_count,
1326 	    RF_ACTIVE | rman_make_alignment_flags(PAGE_SIZE));
1327 
1328 	if (hv_fb_res && bootverbose)
1329 		device_printf(dev,
1330 		    "successfully reserved memory for framebuffer "
1331 		    "starting at %#jx, size %#jx\n",
1332 		    fb_start, fb_count);
1333 }
1334 
1335 static void
1336 vmbus_free_mmio_res(device_t dev)
1337 {
1338 	struct vmbus_softc *sc = device_get_softc(dev);
1339 
1340 	pcib_host_res_free(dev, &sc->vmbus_mmio_res);
1341 
1342 	if (hv_fb_res)
1343 		hv_fb_res = NULL;
1344 }
1345 #endif	/* NEW_PCIB */
1346 
1347 static void
1348 vmbus_identify(driver_t *driver, device_t parent)
1349 {
1350 
1351 	if (device_get_unit(parent) != 0 || vm_guest != VM_GUEST_HV ||
1352 	    (hyperv_features & CPUID_HV_MSR_SYNIC) == 0)
1353 		return;
1354 	device_add_child(parent, "vmbus", -1);
1355 }
1356 
1357 static int
1358 vmbus_probe(device_t dev)
1359 {
1360 
1361 	if (device_get_unit(dev) != 0 || vm_guest != VM_GUEST_HV ||
1362 	    (hyperv_features & CPUID_HV_MSR_SYNIC) == 0)
1363 		return (ENXIO);
1364 
1365 	device_set_desc(dev, "Hyper-V Vmbus");
1366 	return (BUS_PROBE_DEFAULT);
1367 }
1368 
1369 /**
1370  * @brief Main vmbus driver initialization routine.
1371  *
1372  * Here, we
1373  * - initialize the vmbus driver context
1374  * - setup various driver entry points
1375  * - invoke the vmbus hv main init routine
1376  * - get the irq resource
1377  * - invoke the vmbus to add the vmbus root device
1378  * - setup the vmbus root device
1379  * - retrieve the channel offers
1380  */
1381 static int
1382 vmbus_doattach(struct vmbus_softc *sc)
1383 {
1384 	struct sysctl_oid_list *child;
1385 	struct sysctl_ctx_list *ctx;
1386 	int ret;
1387 
1388 	if (sc->vmbus_flags & VMBUS_FLAG_ATTACHED)
1389 		return (0);
1390 
1391 #ifdef NEW_PCIB
1392 	vmbus_get_mmio_res(sc->vmbus_dev);
1393 	vmbus_fb_mmio_res(sc->vmbus_dev);
1394 #endif
1395 
1396 	sc->vmbus_flags |= VMBUS_FLAG_ATTACHED;
1397 
1398 	sc->vmbus_gpadl = VMBUS_GPADL_START;
1399 	mtx_init(&sc->vmbus_prichan_lock, "vmbus prichan", NULL, MTX_DEF);
1400 	TAILQ_INIT(&sc->vmbus_prichans);
1401 	mtx_init(&sc->vmbus_chan_lock, "vmbus channel", NULL, MTX_DEF);
1402 	TAILQ_INIT(&sc->vmbus_chans);
1403 	sc->vmbus_chmap = malloc(
1404 	    sizeof(struct vmbus_channel *) * VMBUS_CHAN_MAX, M_DEVBUF,
1405 	    M_WAITOK | M_ZERO);
1406 
1407 	/*
1408 	 * Create context for "post message" Hypercalls
1409 	 */
1410 	sc->vmbus_xc = vmbus_xact_ctx_create(bus_get_dma_tag(sc->vmbus_dev),
1411 	    HYPERCALL_POSTMSGIN_SIZE, VMBUS_MSG_SIZE,
1412 	    sizeof(struct vmbus_msghc));
1413 	if (sc->vmbus_xc == NULL) {
1414 		ret = ENXIO;
1415 		goto cleanup;
1416 	}
1417 
1418 	/*
1419 	 * Allocate DMA stuffs.
1420 	 */
1421 	ret = vmbus_dma_alloc(sc);
1422 	if (ret != 0)
1423 		goto cleanup;
1424 
1425 	/*
1426 	 * Setup interrupt.
1427 	 */
1428 	ret = vmbus_intr_setup(sc);
1429 	if (ret != 0)
1430 		goto cleanup;
1431 
1432 	/*
1433 	 * Setup SynIC.
1434 	 */
1435 	if (bootverbose)
1436 		device_printf(sc->vmbus_dev, "smp_started = %d\n", smp_started);
1437 	smp_rendezvous(NULL, vmbus_synic_setup, NULL, sc);
1438 	sc->vmbus_flags |= VMBUS_FLAG_SYNIC;
1439 
1440 	/*
1441 	 * Initialize vmbus, e.g. connect to Hypervisor.
1442 	 */
1443 	ret = vmbus_init(sc);
1444 	if (ret != 0)
1445 		goto cleanup;
1446 
1447 	if (sc->vmbus_version == VMBUS_VERSION_WS2008 ||
1448 	    sc->vmbus_version == VMBUS_VERSION_WIN7)
1449 		sc->vmbus_event_proc = vmbus_event_proc_compat;
1450 	else
1451 		sc->vmbus_event_proc = vmbus_event_proc;
1452 
1453 	ret = vmbus_scan(sc);
1454 	if (ret != 0)
1455 		goto cleanup;
1456 
1457 	ctx = device_get_sysctl_ctx(sc->vmbus_dev);
1458 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vmbus_dev));
1459 	SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "version",
1460 	    CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
1461 	    vmbus_sysctl_version, "A", "vmbus version");
1462 
1463 	return (ret);
1464 
1465 cleanup:
1466 	vmbus_scan_teardown(sc);
1467 	vmbus_intr_teardown(sc);
1468 	vmbus_dma_free(sc);
1469 	if (sc->vmbus_xc != NULL) {
1470 		vmbus_xact_ctx_destroy(sc->vmbus_xc);
1471 		sc->vmbus_xc = NULL;
1472 	}
1473 	free(__DEVOLATILE(void *, sc->vmbus_chmap), M_DEVBUF);
1474 	mtx_destroy(&sc->vmbus_prichan_lock);
1475 	mtx_destroy(&sc->vmbus_chan_lock);
1476 
1477 	return (ret);
1478 }
1479 
1480 static void
1481 vmbus_event_proc_dummy(struct vmbus_softc *sc __unused, int cpu __unused)
1482 {
1483 }
1484 
1485 #if defined(EARLY_AP_STARTUP)
1486 
1487 static void
1488 vmbus_intrhook(void *xsc)
1489 {
1490 	struct vmbus_softc *sc = xsc;
1491 
1492 	if (bootverbose)
1493 		device_printf(sc->vmbus_dev, "intrhook\n");
1494 	vmbus_doattach(sc);
1495 	config_intrhook_disestablish(&sc->vmbus_intrhook);
1496 }
1497 
1498 #endif /* EARLY_AP_STARTUP */
1499 
1500 static int
1501 vmbus_attach(device_t dev)
1502 {
1503 	vmbus_sc = device_get_softc(dev);
1504 	vmbus_sc->vmbus_dev = dev;
1505 	vmbus_sc->vmbus_idtvec = -1;
1506 
1507 	/*
1508 	 * Event processing logic will be configured:
1509 	 * - After the vmbus protocol version negotiation.
1510 	 * - Before we request channel offers.
1511 	 */
1512 	vmbus_sc->vmbus_event_proc = vmbus_event_proc_dummy;
1513 
1514 #if defined(EARLY_AP_STARTUP)
1515 	/*
1516 	 * Defer the real attach until the pause(9) works as expected.
1517 	 */
1518 	vmbus_sc->vmbus_intrhook.ich_func = vmbus_intrhook;
1519 	vmbus_sc->vmbus_intrhook.ich_arg = vmbus_sc;
1520 	config_intrhook_establish(&vmbus_sc->vmbus_intrhook);
1521 #endif /* EARLY_AP_STARTUP  and aarch64 */
1522 
1523 	return (0);
1524 }
1525 
1526 static int
1527 vmbus_detach(device_t dev)
1528 {
1529 	struct vmbus_softc *sc = device_get_softc(dev);
1530 
1531 	bus_generic_detach(dev);
1532 	vmbus_chan_destroy_all(sc);
1533 
1534 	vmbus_scan_teardown(sc);
1535 
1536 	vmbus_disconnect(sc);
1537 
1538 	if (sc->vmbus_flags & VMBUS_FLAG_SYNIC) {
1539 		sc->vmbus_flags &= ~VMBUS_FLAG_SYNIC;
1540 		smp_rendezvous(NULL, vmbus_synic_teardown, NULL, NULL);
1541 	}
1542 
1543 	vmbus_intr_teardown(sc);
1544 	vmbus_dma_free(sc);
1545 
1546 	if (sc->vmbus_xc != NULL) {
1547 		vmbus_xact_ctx_destroy(sc->vmbus_xc);
1548 		sc->vmbus_xc = NULL;
1549 	}
1550 
1551 	free(__DEVOLATILE(void *, sc->vmbus_chmap), M_DEVBUF);
1552 	mtx_destroy(&sc->vmbus_prichan_lock);
1553 	mtx_destroy(&sc->vmbus_chan_lock);
1554 
1555 #ifdef NEW_PCIB
1556 	vmbus_free_mmio_res(dev);
1557 #endif
1558 
1559 #if defined(__aarch64__)
1560 	bus_release_resource(device_get_parent(dev), SYS_RES_IRQ, sc->vector,
1561 	    sc->ires);
1562 #endif
1563 	return (0);
1564 }
1565 
1566 #if !defined(EARLY_AP_STARTUP)
1567 
1568 static void
1569 vmbus_sysinit(void *arg __unused)
1570 {
1571 	struct vmbus_softc *sc = vmbus_get_softc();
1572 
1573 	if (vm_guest != VM_GUEST_HV || sc == NULL)
1574 		return;
1575 
1576 	vmbus_doattach(sc);
1577 }
1578 /*
1579  * NOTE:
1580  * We have to start as the last step of SI_SUB_SMP, i.e. after SMP is
1581  * initialized.
1582  */
1583 SYSINIT(vmbus_initialize, SI_SUB_SMP, SI_ORDER_ANY, vmbus_sysinit, NULL);
1584 #endif	/* !EARLY_AP_STARTUP */
1585