xref: /freebsd/sys/dev/hyperv/vmbus/vmbus.c (revision 3f68b24e10aeb1a1cd85f2d349da44138d52c501)
1 /*-
2  * Copyright (c) 2009-2012,2016 Microsoft Corp.
3  * Copyright (c) 2012 NetApp Inc.
4  * Copyright (c) 2012 Citrix Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /*
30  * VM Bus Driver Implementation
31  */
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include <sys/param.h>
36 #include <sys/bus.h>
37 #include <sys/kernel.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/proc.h>
42 #include <sys/sysctl.h>
43 #include <sys/syslog.h>
44 #include <sys/systm.h>
45 #include <sys/rtprio.h>
46 #include <sys/interrupt.h>
47 #include <sys/sx.h>
48 #include <sys/taskqueue.h>
49 #include <sys/mutex.h>
50 #include <sys/smp.h>
51 
52 #include <machine/resource.h>
53 #include <sys/rman.h>
54 
55 #include <machine/stdarg.h>
56 #include <machine/intr_machdep.h>
57 #include <machine/md_var.h>
58 #include <machine/segments.h>
59 #include <sys/pcpu.h>
60 #include <x86/apicvar.h>
61 
62 #include <dev/hyperv/include/hyperv.h>
63 #include <dev/hyperv/vmbus/hv_vmbus_priv.h>
64 #include <dev/hyperv/vmbus/hyperv_reg.h>
65 #include <dev/hyperv/vmbus/hyperv_var.h>
66 #include <dev/hyperv/vmbus/vmbus_reg.h>
67 #include <dev/hyperv/vmbus/vmbus_var.h>
68 
69 #include <contrib/dev/acpica/include/acpi.h>
70 #include "acpi_if.h"
71 
72 struct vmbus_softc	*vmbus_sc;
73 
74 extern inthand_t IDTVEC(vmbus_isr);
75 
76 static void
77 vmbus_msg_task(void *xsc, int pending __unused)
78 {
79 	struct vmbus_softc *sc = xsc;
80 	volatile struct vmbus_message *msg;
81 
82 	msg = VMBUS_PCPU_GET(sc, message, curcpu) + VMBUS_SINT_MESSAGE;
83 	for (;;) {
84 		if (msg->msg_type == VMBUS_MSGTYPE_NONE) {
85 			/* No message */
86 			break;
87 		} else if (msg->msg_type == VMBUS_MSGTYPE_CHANNEL) {
88 			/* Channel message */
89 			vmbus_chan_msgproc(sc,
90 			    __DEVOLATILE(const struct vmbus_message *, msg));
91 		}
92 
93 		msg->msg_type = VMBUS_MSGTYPE_NONE;
94 		/*
95 		 * Make sure the write to msg_type (i.e. set to
96 		 * VMBUS_MSGTYPE_NONE) happens before we read the
97 		 * msg_flags and EOMing. Otherwise, the EOMing will
98 		 * not deliver any more messages since there is no
99 		 * empty slot
100 		 *
101 		 * NOTE:
102 		 * mb() is used here, since atomic_thread_fence_seq_cst()
103 		 * will become compiler fence on UP kernel.
104 		 */
105 		mb();
106 		if (msg->msg_flags & VMBUS_MSGFLAG_PENDING) {
107 			/*
108 			 * This will cause message queue rescan to possibly
109 			 * deliver another msg from the hypervisor
110 			 */
111 			wrmsr(MSR_HV_EOM, 0);
112 		}
113 	}
114 }
115 
116 static __inline int
117 vmbus_handle_intr1(struct vmbus_softc *sc, struct trapframe *frame, int cpu)
118 {
119 	volatile struct vmbus_message *msg;
120 	struct vmbus_message *msg_base;
121 
122 	msg_base = VMBUS_PCPU_GET(sc, message, cpu);
123 
124 	/*
125 	 * Check event timer.
126 	 *
127 	 * TODO: move this to independent IDT vector.
128 	 */
129 	msg = msg_base + VMBUS_SINT_TIMER;
130 	if (msg->msg_type == VMBUS_MSGTYPE_TIMER_EXPIRED) {
131 		msg->msg_type = VMBUS_MSGTYPE_NONE;
132 
133 		vmbus_et_intr(frame);
134 
135 		/*
136 		 * Make sure the write to msg_type (i.e. set to
137 		 * VMBUS_MSGTYPE_NONE) happens before we read the
138 		 * msg_flags and EOMing. Otherwise, the EOMing will
139 		 * not deliver any more messages since there is no
140 		 * empty slot
141 		 *
142 		 * NOTE:
143 		 * mb() is used here, since atomic_thread_fence_seq_cst()
144 		 * will become compiler fence on UP kernel.
145 		 */
146 		mb();
147 		if (msg->msg_flags & VMBUS_MSGFLAG_PENDING) {
148 			/*
149 			 * This will cause message queue rescan to possibly
150 			 * deliver another msg from the hypervisor
151 			 */
152 			wrmsr(MSR_HV_EOM, 0);
153 		}
154 	}
155 
156 	/*
157 	 * Check events.  Hot path for network and storage I/O data; high rate.
158 	 *
159 	 * NOTE:
160 	 * As recommended by the Windows guest fellows, we check events before
161 	 * checking messages.
162 	 */
163 	sc->vmbus_event_proc(sc, cpu);
164 
165 	/*
166 	 * Check messages.  Mainly management stuffs; ultra low rate.
167 	 */
168 	msg = msg_base + VMBUS_SINT_MESSAGE;
169 	if (__predict_false(msg->msg_type != VMBUS_MSGTYPE_NONE)) {
170 		taskqueue_enqueue(VMBUS_PCPU_GET(sc, message_tq, cpu),
171 		    VMBUS_PCPU_PTR(sc, message_task, cpu));
172 	}
173 
174 	return (FILTER_HANDLED);
175 }
176 
177 void
178 vmbus_handle_intr(struct trapframe *trap_frame)
179 {
180 	struct vmbus_softc *sc = vmbus_get_softc();
181 	int cpu = curcpu;
182 
183 	/*
184 	 * Disable preemption.
185 	 */
186 	critical_enter();
187 
188 	/*
189 	 * Do a little interrupt counting.
190 	 */
191 	(*VMBUS_PCPU_GET(sc, intr_cnt, cpu))++;
192 
193 	vmbus_handle_intr1(sc, trap_frame, cpu);
194 
195 	/*
196 	 * Enable preemption.
197 	 */
198 	critical_exit();
199 }
200 
201 static void
202 vmbus_synic_setup(void *xsc)
203 {
204 	struct vmbus_softc *sc = xsc;
205 	int cpu = curcpu;
206 	uint64_t val, orig;
207 	uint32_t sint;
208 
209 	if (hyperv_features & CPUID_HV_MSR_VP_INDEX) {
210 		/*
211 		 * Save virtual processor id.
212 		 */
213 		VMBUS_PCPU_GET(sc, vcpuid, cpu) = rdmsr(MSR_HV_VP_INDEX);
214 	} else {
215 		/*
216 		 * XXX
217 		 * Virtual processoor id is only used by a pretty broken
218 		 * channel selection code from storvsc.  It's nothing
219 		 * critical even if CPUID_HV_MSR_VP_INDEX is not set; keep
220 		 * moving on.
221 		 */
222 		VMBUS_PCPU_GET(sc, vcpuid, cpu) = cpu;
223 	}
224 
225 	/*
226 	 * Setup the SynIC message.
227 	 */
228 	orig = rdmsr(MSR_HV_SIMP);
229 	val = MSR_HV_SIMP_ENABLE | (orig & MSR_HV_SIMP_RSVD_MASK) |
230 	    ((VMBUS_PCPU_GET(sc, message_dma.hv_paddr, cpu) >> PAGE_SHIFT) <<
231 	     MSR_HV_SIMP_PGSHIFT);
232 	wrmsr(MSR_HV_SIMP, val);
233 
234 	/*
235 	 * Setup the SynIC event flags.
236 	 */
237 	orig = rdmsr(MSR_HV_SIEFP);
238 	val = MSR_HV_SIEFP_ENABLE | (orig & MSR_HV_SIEFP_RSVD_MASK) |
239 	    ((VMBUS_PCPU_GET(sc, event_flags_dma.hv_paddr, cpu)
240 	      >> PAGE_SHIFT) << MSR_HV_SIEFP_PGSHIFT);
241 	wrmsr(MSR_HV_SIEFP, val);
242 
243 
244 	/*
245 	 * Configure and unmask SINT for message and event flags.
246 	 */
247 	sint = MSR_HV_SINT0 + VMBUS_SINT_MESSAGE;
248 	orig = rdmsr(sint);
249 	val = sc->vmbus_idtvec | MSR_HV_SINT_AUTOEOI |
250 	    (orig & MSR_HV_SINT_RSVD_MASK);
251 	wrmsr(sint, val);
252 
253 	/*
254 	 * Configure and unmask SINT for timer.
255 	 */
256 	sint = MSR_HV_SINT0 + VMBUS_SINT_TIMER;
257 	orig = rdmsr(sint);
258 	val = sc->vmbus_idtvec | MSR_HV_SINT_AUTOEOI |
259 	    (orig & MSR_HV_SINT_RSVD_MASK);
260 	wrmsr(sint, val);
261 
262 	/*
263 	 * All done; enable SynIC.
264 	 */
265 	orig = rdmsr(MSR_HV_SCONTROL);
266 	val = MSR_HV_SCTRL_ENABLE | (orig & MSR_HV_SCTRL_RSVD_MASK);
267 	wrmsr(MSR_HV_SCONTROL, val);
268 }
269 
270 static void
271 vmbus_synic_teardown(void *arg)
272 {
273 	uint64_t orig;
274 	uint32_t sint;
275 
276 	/*
277 	 * Disable SynIC.
278 	 */
279 	orig = rdmsr(MSR_HV_SCONTROL);
280 	wrmsr(MSR_HV_SCONTROL, (orig & MSR_HV_SCTRL_RSVD_MASK));
281 
282 	/*
283 	 * Mask message and event flags SINT.
284 	 */
285 	sint = MSR_HV_SINT0 + VMBUS_SINT_MESSAGE;
286 	orig = rdmsr(sint);
287 	wrmsr(sint, orig | MSR_HV_SINT_MASKED);
288 
289 	/*
290 	 * Mask timer SINT.
291 	 */
292 	sint = MSR_HV_SINT0 + VMBUS_SINT_TIMER;
293 	orig = rdmsr(sint);
294 	wrmsr(sint, orig | MSR_HV_SINT_MASKED);
295 
296 	/*
297 	 * Teardown SynIC message.
298 	 */
299 	orig = rdmsr(MSR_HV_SIMP);
300 	wrmsr(MSR_HV_SIMP, (orig & MSR_HV_SIMP_RSVD_MASK));
301 
302 	/*
303 	 * Teardown SynIC event flags.
304 	 */
305 	orig = rdmsr(MSR_HV_SIEFP);
306 	wrmsr(MSR_HV_SIEFP, (orig & MSR_HV_SIEFP_RSVD_MASK));
307 }
308 
309 static int
310 vmbus_dma_alloc(struct vmbus_softc *sc)
311 {
312 	bus_dma_tag_t parent_dtag;
313 	uint8_t *evtflags;
314 	int cpu;
315 
316 	parent_dtag = bus_get_dma_tag(sc->vmbus_dev);
317 	CPU_FOREACH(cpu) {
318 		void *ptr;
319 
320 		/*
321 		 * Per-cpu messages and event flags.
322 		 */
323 		ptr = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0,
324 		    PAGE_SIZE, VMBUS_PCPU_PTR(sc, message_dma, cpu),
325 		    BUS_DMA_WAITOK | BUS_DMA_ZERO);
326 		if (ptr == NULL)
327 			return ENOMEM;
328 		VMBUS_PCPU_GET(sc, message, cpu) = ptr;
329 
330 		ptr = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0,
331 		    PAGE_SIZE, VMBUS_PCPU_PTR(sc, event_flags_dma, cpu),
332 		    BUS_DMA_WAITOK | BUS_DMA_ZERO);
333 		if (ptr == NULL)
334 			return ENOMEM;
335 		VMBUS_PCPU_GET(sc, event_flags, cpu) = ptr;
336 	}
337 
338 	evtflags = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0,
339 	    PAGE_SIZE, &sc->vmbus_evtflags_dma, BUS_DMA_WAITOK | BUS_DMA_ZERO);
340 	if (evtflags == NULL)
341 		return ENOMEM;
342 	sc->vmbus_rx_evtflags = (u_long *)evtflags;
343 	sc->vmbus_tx_evtflags = (u_long *)(evtflags + (PAGE_SIZE / 2));
344 	sc->vmbus_evtflags = evtflags;
345 
346 	sc->vmbus_mnf1 = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0,
347 	    PAGE_SIZE, &sc->vmbus_mnf1_dma, BUS_DMA_WAITOK | BUS_DMA_ZERO);
348 	if (sc->vmbus_mnf1 == NULL)
349 		return ENOMEM;
350 
351 	sc->vmbus_mnf2 = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0,
352 	    PAGE_SIZE, &sc->vmbus_mnf2_dma, BUS_DMA_WAITOK | BUS_DMA_ZERO);
353 	if (sc->vmbus_mnf2 == NULL)
354 		return ENOMEM;
355 
356 	return 0;
357 }
358 
359 static void
360 vmbus_dma_free(struct vmbus_softc *sc)
361 {
362 	int cpu;
363 
364 	if (sc->vmbus_evtflags != NULL) {
365 		hyperv_dmamem_free(&sc->vmbus_evtflags_dma, sc->vmbus_evtflags);
366 		sc->vmbus_evtflags = NULL;
367 		sc->vmbus_rx_evtflags = NULL;
368 		sc->vmbus_tx_evtflags = NULL;
369 	}
370 	if (sc->vmbus_mnf1 != NULL) {
371 		hyperv_dmamem_free(&sc->vmbus_mnf1_dma, sc->vmbus_mnf1);
372 		sc->vmbus_mnf1 = NULL;
373 	}
374 	if (sc->vmbus_mnf2 != NULL) {
375 		hyperv_dmamem_free(&sc->vmbus_mnf2_dma, sc->vmbus_mnf2);
376 		sc->vmbus_mnf2 = NULL;
377 	}
378 
379 	CPU_FOREACH(cpu) {
380 		if (VMBUS_PCPU_GET(sc, message, cpu) != NULL) {
381 			hyperv_dmamem_free(
382 			    VMBUS_PCPU_PTR(sc, message_dma, cpu),
383 			    VMBUS_PCPU_GET(sc, message, cpu));
384 			VMBUS_PCPU_GET(sc, message, cpu) = NULL;
385 		}
386 		if (VMBUS_PCPU_GET(sc, event_flags, cpu) != NULL) {
387 			hyperv_dmamem_free(
388 			    VMBUS_PCPU_PTR(sc, event_flags_dma, cpu),
389 			    VMBUS_PCPU_GET(sc, event_flags, cpu));
390 			VMBUS_PCPU_GET(sc, event_flags, cpu) = NULL;
391 		}
392 	}
393 }
394 
395 static int
396 vmbus_intr_setup(struct vmbus_softc *sc)
397 {
398 	int cpu;
399 
400 	CPU_FOREACH(cpu) {
401 		char buf[MAXCOMLEN + 1];
402 		cpuset_t cpu_mask;
403 
404 		/* Allocate an interrupt counter for Hyper-V interrupt */
405 		snprintf(buf, sizeof(buf), "cpu%d:hyperv", cpu);
406 		intrcnt_add(buf, VMBUS_PCPU_PTR(sc, intr_cnt, cpu));
407 
408 		/*
409 		 * Setup taskqueue to handle events.  Task will be per-
410 		 * channel.
411 		 */
412 		VMBUS_PCPU_GET(sc, event_tq, cpu) = taskqueue_create_fast(
413 		    "hyperv event", M_WAITOK, taskqueue_thread_enqueue,
414 		    VMBUS_PCPU_PTR(sc, event_tq, cpu));
415 		CPU_SETOF(cpu, &cpu_mask);
416 		taskqueue_start_threads_cpuset(
417 		    VMBUS_PCPU_PTR(sc, event_tq, cpu), 1, PI_NET, &cpu_mask,
418 		    "hvevent%d", cpu);
419 
420 		/*
421 		 * Setup tasks and taskqueues to handle messages.
422 		 */
423 		VMBUS_PCPU_GET(sc, message_tq, cpu) = taskqueue_create_fast(
424 		    "hyperv msg", M_WAITOK, taskqueue_thread_enqueue,
425 		    VMBUS_PCPU_PTR(sc, message_tq, cpu));
426 		CPU_SETOF(cpu, &cpu_mask);
427 		taskqueue_start_threads_cpuset(
428 		    VMBUS_PCPU_PTR(sc, message_tq, cpu), 1, PI_NET, &cpu_mask,
429 		    "hvmsg%d", cpu);
430 		TASK_INIT(VMBUS_PCPU_PTR(sc, message_task, cpu), 0,
431 		    vmbus_msg_task, sc);
432 	}
433 
434 	/*
435 	 * All Hyper-V ISR required resources are setup, now let's find a
436 	 * free IDT vector for Hyper-V ISR and set it up.
437 	 */
438 	sc->vmbus_idtvec = lapic_ipi_alloc(IDTVEC(vmbus_isr));
439 	if (sc->vmbus_idtvec < 0) {
440 		device_printf(sc->vmbus_dev, "cannot find free IDT vector\n");
441 		return ENXIO;
442 	}
443 	if(bootverbose) {
444 		device_printf(sc->vmbus_dev, "vmbus IDT vector %d\n",
445 		    sc->vmbus_idtvec);
446 	}
447 	return 0;
448 }
449 
450 static void
451 vmbus_intr_teardown(struct vmbus_softc *sc)
452 {
453 	int cpu;
454 
455 	if (sc->vmbus_idtvec >= 0) {
456 		lapic_ipi_free(sc->vmbus_idtvec);
457 		sc->vmbus_idtvec = -1;
458 	}
459 
460 	CPU_FOREACH(cpu) {
461 		if (VMBUS_PCPU_GET(sc, event_tq, cpu) != NULL) {
462 			taskqueue_free(VMBUS_PCPU_GET(sc, event_tq, cpu));
463 			VMBUS_PCPU_GET(sc, event_tq, cpu) = NULL;
464 		}
465 		if (VMBUS_PCPU_GET(sc, message_tq, cpu) != NULL) {
466 			taskqueue_drain(VMBUS_PCPU_GET(sc, message_tq, cpu),
467 			    VMBUS_PCPU_PTR(sc, message_task, cpu));
468 			taskqueue_free(VMBUS_PCPU_GET(sc, message_tq, cpu));
469 			VMBUS_PCPU_GET(sc, message_tq, cpu) = NULL;
470 		}
471 	}
472 }
473 
474 static int
475 vmbus_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
476 {
477 	struct hv_device *child_dev_ctx = device_get_ivars(child);
478 
479 	switch (index) {
480 	case HV_VMBUS_IVAR_TYPE:
481 		*result = (uintptr_t)&child_dev_ctx->class_id;
482 		return (0);
483 
484 	case HV_VMBUS_IVAR_INSTANCE:
485 		*result = (uintptr_t)&child_dev_ctx->device_id;
486 		return (0);
487 
488 	case HV_VMBUS_IVAR_DEVCTX:
489 		*result = (uintptr_t)child_dev_ctx;
490 		return (0);
491 
492 	case HV_VMBUS_IVAR_NODE:
493 		*result = (uintptr_t)child_dev_ctx->device;
494 		return (0);
495 	}
496 	return (ENOENT);
497 }
498 
499 static int
500 vmbus_write_ivar(device_t dev, device_t child, int index, uintptr_t value)
501 {
502 	switch (index) {
503 	case HV_VMBUS_IVAR_TYPE:
504 	case HV_VMBUS_IVAR_INSTANCE:
505 	case HV_VMBUS_IVAR_DEVCTX:
506 	case HV_VMBUS_IVAR_NODE:
507 		/* read-only */
508 		return (EINVAL);
509 	}
510 	return (ENOENT);
511 }
512 
513 static int
514 vmbus_child_pnpinfo_str(device_t dev, device_t child, char *buf, size_t buflen)
515 {
516 	struct hv_device *dev_ctx = device_get_ivars(child);
517 	char guidbuf[HYPERV_GUID_STRLEN];
518 
519 	if (dev_ctx == NULL)
520 		return (0);
521 
522 	strlcat(buf, "classid=", buflen);
523 	hyperv_guid2str(&dev_ctx->class_id, guidbuf, sizeof(guidbuf));
524 	strlcat(buf, guidbuf, buflen);
525 
526 	strlcat(buf, " deviceid=", buflen);
527 	hyperv_guid2str(&dev_ctx->device_id, guidbuf, sizeof(guidbuf));
528 	strlcat(buf, guidbuf, buflen);
529 
530 	return (0);
531 }
532 
533 struct hv_device *
534 hv_vmbus_child_device_create(hv_guid type, hv_guid instance,
535     hv_vmbus_channel *channel)
536 {
537 	hv_device *child_dev;
538 
539 	/*
540 	 * Allocate the new child device
541 	 */
542 	child_dev = malloc(sizeof(hv_device), M_DEVBUF, M_WAITOK | M_ZERO);
543 
544 	child_dev->channel = channel;
545 	memcpy(&child_dev->class_id, &type, sizeof(hv_guid));
546 	memcpy(&child_dev->device_id, &instance, sizeof(hv_guid));
547 
548 	return (child_dev);
549 }
550 
551 int
552 hv_vmbus_child_device_register(struct hv_device *child_dev)
553 {
554 	device_t child, parent;
555 
556 	parent = vmbus_get_device();
557 	if (bootverbose) {
558 		char name[HYPERV_GUID_STRLEN];
559 
560 		hyperv_guid2str(&child_dev->class_id, name, sizeof(name));
561 		device_printf(parent, "add device, classid: %s\n", name);
562 	}
563 
564 	child = device_add_child(parent, NULL, -1);
565 	child_dev->device = child;
566 	device_set_ivars(child, child_dev);
567 
568 	return (0);
569 }
570 
571 int
572 hv_vmbus_child_device_unregister(struct hv_device *child_dev)
573 {
574 	int ret = 0;
575 	/*
576 	 * XXXKYS: Ensure that this is the opposite of
577 	 * device_add_child()
578 	 */
579 	mtx_lock(&Giant);
580 	ret = device_delete_child(vmbus_get_device(), child_dev->device);
581 	mtx_unlock(&Giant);
582 	return(ret);
583 }
584 
585 static int
586 vmbus_probe(device_t dev)
587 {
588 	char *id[] = { "VMBUS", NULL };
589 
590 	if (ACPI_ID_PROBE(device_get_parent(dev), dev, id) == NULL ||
591 	    device_get_unit(dev) != 0 || vm_guest != VM_GUEST_HV ||
592 	    (hyperv_features & CPUID_HV_MSR_SYNIC) == 0)
593 		return (ENXIO);
594 
595 	device_set_desc(dev, "Hyper-V Vmbus");
596 
597 	return (BUS_PROBE_DEFAULT);
598 }
599 
600 /**
601  * @brief Main vmbus driver initialization routine.
602  *
603  * Here, we
604  * - initialize the vmbus driver context
605  * - setup various driver entry points
606  * - invoke the vmbus hv main init routine
607  * - get the irq resource
608  * - invoke the vmbus to add the vmbus root device
609  * - setup the vmbus root device
610  * - retrieve the channel offers
611  */
612 static int
613 vmbus_bus_init(void)
614 {
615 	struct vmbus_softc *sc = vmbus_get_softc();
616 	int ret;
617 
618 	if (sc->vmbus_flags & VMBUS_FLAG_ATTACHED)
619 		return (0);
620 	sc->vmbus_flags |= VMBUS_FLAG_ATTACHED;
621 
622 	/*
623 	 * Allocate DMA stuffs.
624 	 */
625 	ret = vmbus_dma_alloc(sc);
626 	if (ret != 0)
627 		goto cleanup;
628 
629 	/*
630 	 * Setup interrupt.
631 	 */
632 	ret = vmbus_intr_setup(sc);
633 	if (ret != 0)
634 		goto cleanup;
635 
636 	/*
637 	 * Setup SynIC.
638 	 */
639 	if (bootverbose)
640 		device_printf(sc->vmbus_dev, "smp_started = %d\n", smp_started);
641 	smp_rendezvous(NULL, vmbus_synic_setup, NULL, sc);
642 	sc->vmbus_flags |= VMBUS_FLAG_SYNIC;
643 
644 	/*
645 	 * Connect to VMBus in the root partition
646 	 */
647 	ret = hv_vmbus_connect(sc);
648 	if (ret != 0)
649 		goto cleanup;
650 
651 	if (hv_vmbus_protocal_version == HV_VMBUS_VERSION_WS2008 ||
652 	    hv_vmbus_protocal_version == HV_VMBUS_VERSION_WIN7)
653 		sc->vmbus_event_proc = vmbus_event_proc_compat;
654 	else
655 		sc->vmbus_event_proc = vmbus_event_proc;
656 
657 	hv_vmbus_request_channel_offers();
658 
659 	vmbus_scan();
660 	bus_generic_attach(sc->vmbus_dev);
661 	device_printf(sc->vmbus_dev, "device scan, probe and attach done\n");
662 
663 	return (ret);
664 
665 cleanup:
666 	vmbus_intr_teardown(sc);
667 	vmbus_dma_free(sc);
668 
669 	return (ret);
670 }
671 
672 static void
673 vmbus_event_proc_dummy(struct vmbus_softc *sc __unused, int cpu __unused)
674 {
675 }
676 
677 static int
678 vmbus_attach(device_t dev)
679 {
680 	vmbus_sc = device_get_softc(dev);
681 	vmbus_sc->vmbus_dev = dev;
682 	vmbus_sc->vmbus_idtvec = -1;
683 
684 	/*
685 	 * Event processing logic will be configured:
686 	 * - After the vmbus protocol version negotiation.
687 	 * - Before we request channel offers.
688 	 */
689 	vmbus_sc->vmbus_event_proc = vmbus_event_proc_dummy;
690 
691 #ifndef EARLY_AP_STARTUP
692 	/*
693 	 * If the system has already booted and thread
694 	 * scheduling is possible indicated by the global
695 	 * cold set to zero, we just call the driver
696 	 * initialization directly.
697 	 */
698 	if (!cold)
699 #endif
700 		vmbus_bus_init();
701 
702 	bus_generic_probe(dev);
703 	return (0);
704 }
705 
706 static void
707 vmbus_sysinit(void *arg __unused)
708 {
709 	if (vm_guest != VM_GUEST_HV || vmbus_get_softc() == NULL)
710 		return;
711 
712 #ifndef EARLY_AP_STARTUP
713 	/*
714 	 * If the system has already booted and thread
715 	 * scheduling is possible, as indicated by the
716 	 * global cold set to zero, we just call the driver
717 	 * initialization directly.
718 	 */
719 	if (!cold)
720 #endif
721 		vmbus_bus_init();
722 }
723 
724 static int
725 vmbus_detach(device_t dev)
726 {
727 	struct vmbus_softc *sc = device_get_softc(dev);
728 
729 	hv_vmbus_release_unattached_channels();
730 	hv_vmbus_disconnect();
731 
732 	if (sc->vmbus_flags & VMBUS_FLAG_SYNIC) {
733 		sc->vmbus_flags &= ~VMBUS_FLAG_SYNIC;
734 		smp_rendezvous(NULL, vmbus_synic_teardown, NULL, NULL);
735 	}
736 
737 	vmbus_intr_teardown(sc);
738 	vmbus_dma_free(sc);
739 
740 	return (0);
741 }
742 
743 static device_method_t vmbus_methods[] = {
744 	/* Device interface */
745 	DEVMETHOD(device_probe,			vmbus_probe),
746 	DEVMETHOD(device_attach,		vmbus_attach),
747 	DEVMETHOD(device_detach,		vmbus_detach),
748 	DEVMETHOD(device_shutdown,		bus_generic_shutdown),
749 	DEVMETHOD(device_suspend,		bus_generic_suspend),
750 	DEVMETHOD(device_resume,		bus_generic_resume),
751 
752 	/* Bus interface */
753 	DEVMETHOD(bus_add_child,		bus_generic_add_child),
754 	DEVMETHOD(bus_print_child,		bus_generic_print_child),
755 	DEVMETHOD(bus_read_ivar,		vmbus_read_ivar),
756 	DEVMETHOD(bus_write_ivar,		vmbus_write_ivar),
757 	DEVMETHOD(bus_child_pnpinfo_str,	vmbus_child_pnpinfo_str),
758 
759 	DEVMETHOD_END
760 };
761 
762 static driver_t vmbus_driver = {
763 	"vmbus",
764 	vmbus_methods,
765 	sizeof(struct vmbus_softc)
766 };
767 
768 static devclass_t vmbus_devclass;
769 
770 DRIVER_MODULE(vmbus, acpi, vmbus_driver, vmbus_devclass, NULL, NULL);
771 MODULE_DEPEND(vmbus, acpi, 1, 1, 1);
772 MODULE_VERSION(vmbus, 1);
773 
774 #ifndef EARLY_AP_STARTUP
775 /*
776  * NOTE:
777  * We have to start as the last step of SI_SUB_SMP, i.e. after SMP is
778  * initialized.
779  */
780 SYSINIT(vmbus_initialize, SI_SUB_SMP, SI_ORDER_ANY, vmbus_sysinit, NULL);
781 #endif
782