xref: /freebsd/sys/x86/xen/hvm.c (revision 0a10f22a30d61a6f32777a236a82d461129538cc)
1 /*
2  * Copyright (c) 2008, 2013 Citrix Systems, Inc.
3  * Copyright (c) 2012 Spectra Logic Corporation
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/param.h>
32 #include <sys/bus.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/proc.h>
36 #include <sys/smp.h>
37 #include <sys/systm.h>
38 
39 #include <vm/vm.h>
40 #include <vm/pmap.h>
41 
42 #include <dev/pci/pcivar.h>
43 
44 #include <machine/cpufunc.h>
45 #include <machine/cpu.h>
46 #include <machine/smp.h>
47 
48 #include <x86/apicreg.h>
49 
50 #include <xen/xen-os.h>
51 #include <xen/features.h>
52 #include <xen/gnttab.h>
53 #include <xen/hypervisor.h>
54 #include <xen/hvm.h>
55 #include <xen/xen_intr.h>
56 
57 #include <xen/interface/hvm/params.h>
58 #include <xen/interface/vcpu.h>
59 
60 /*--------------------------- Forward Declarations ---------------------------*/
61 static driver_filter_t xen_smp_rendezvous_action;
62 static driver_filter_t xen_invltlb;
63 static driver_filter_t xen_invlpg;
64 static driver_filter_t xen_invlrng;
65 static driver_filter_t xen_invlcache;
66 #ifdef __i386__
67 static driver_filter_t xen_lazypmap;
68 #endif
69 static driver_filter_t xen_ipi_bitmap_handler;
70 static driver_filter_t xen_cpustop_handler;
71 static driver_filter_t xen_cpususpend_handler;
72 static driver_filter_t xen_cpustophard_handler;
73 
74 /*---------------------------- Extern Declarations ---------------------------*/
75 /* Variables used by mp_machdep to perform the MMU related IPIs */
76 extern volatile int smp_tlb_wait;
77 extern vm_offset_t smp_tlb_addr2;
78 #ifdef __i386__
79 extern vm_offset_t smp_tlb_addr1;
80 #else
81 extern struct invpcid_descr smp_tlb_invpcid;
82 extern uint64_t pcid_cr3;
83 extern int invpcid_works;
84 extern int pmap_pcid_enabled;
85 extern pmap_t smp_tlb_pmap;
86 #endif
87 
88 #ifdef __i386__
89 extern void pmap_lazyfix_action(void);
90 #endif
91 
92 /*---------------------------------- Macros ----------------------------------*/
93 #define	IPI_TO_IDX(ipi) ((ipi) - APIC_IPI_INTS)
94 
95 /*-------------------------------- Local Types -------------------------------*/
96 struct xen_ipi_handler
97 {
98 	driver_filter_t	*filter;
99 	const char	*description;
100 };
101 
102 /*-------------------------------- Global Data -------------------------------*/
103 enum xen_domain_type xen_domain_type = XEN_NATIVE;
104 
105 static MALLOC_DEFINE(M_XENHVM, "xen_hvm", "Xen HVM PV Support");
106 
107 static struct xen_ipi_handler xen_ipis[] =
108 {
109 	[IPI_TO_IDX(IPI_RENDEZVOUS)]	= { xen_smp_rendezvous_action,	"r"   },
110 	[IPI_TO_IDX(IPI_INVLTLB)]	= { xen_invltlb,		"itlb"},
111 	[IPI_TO_IDX(IPI_INVLPG)]	= { xen_invlpg,			"ipg" },
112 	[IPI_TO_IDX(IPI_INVLRNG)]	= { xen_invlrng,		"irg" },
113 	[IPI_TO_IDX(IPI_INVLCACHE)]	= { xen_invlcache,		"ic"  },
114 #ifdef __i386__
115 	[IPI_TO_IDX(IPI_LAZYPMAP)]	= { xen_lazypmap,		"lp"  },
116 #endif
117 	[IPI_TO_IDX(IPI_BITMAP_VECTOR)] = { xen_ipi_bitmap_handler,	"b"   },
118 	[IPI_TO_IDX(IPI_STOP)]		= { xen_cpustop_handler,	"st"  },
119 	[IPI_TO_IDX(IPI_SUSPEND)]	= { xen_cpususpend_handler,	"sp"  },
120 	[IPI_TO_IDX(IPI_STOP_HARD)]	= { xen_cpustophard_handler,	"sth" },
121 };
122 
123 /**
124  * If non-zero, the hypervisor has been configured to use a direct
125  * IDT event callback for interrupt injection.
126  */
127 int xen_vector_callback_enabled;
128 
129 /*------------------------------- Per-CPU Data -------------------------------*/
130 DPCPU_DEFINE(struct vcpu_info, vcpu_local_info);
131 DPCPU_DEFINE(struct vcpu_info *, vcpu_info);
132 DPCPU_DEFINE(xen_intr_handle_t, ipi_handle[nitems(xen_ipis)]);
133 
134 /*------------------ Hypervisor Access Shared Memory Regions -----------------*/
135 /** Hypercall table accessed via HYPERVISOR_*_op() methods. */
136 char *hypercall_stubs;
137 shared_info_t *HYPERVISOR_shared_info;
138 
139 /*---------------------------- XEN PV IPI Handlers ---------------------------*/
140 /*
141  * This are C clones of the ASM functions found in apic_vector.s
142  */
143 static int
144 xen_ipi_bitmap_handler(void *arg)
145 {
146 	struct trapframe *frame;
147 
148 	frame = arg;
149 	ipi_bitmap_handler(*frame);
150 	return (FILTER_HANDLED);
151 }
152 
153 static int
154 xen_smp_rendezvous_action(void *arg)
155 {
156 #ifdef COUNT_IPIS
157 	int cpu;
158 
159 	cpu = PCPU_GET(cpuid);
160 	(*ipi_rendezvous_counts[cpu])++;
161 #endif /* COUNT_IPIS */
162 
163 	smp_rendezvous_action();
164 	return (FILTER_HANDLED);
165 }
166 
167 static int
168 xen_invltlb(void *arg)
169 {
170 #if defined(COUNT_XINVLTLB_HITS) || defined(COUNT_IPIS)
171 	int cpu;
172 
173 	cpu = PCPU_GET(cpuid);
174 #ifdef COUNT_XINVLTLB_HITS
175 	xhits_gbl[cpu]++;
176 #endif /* COUNT_XINVLTLB_HITS */
177 #ifdef COUNT_IPIS
178 	(*ipi_invltlb_counts[cpu])++;
179 #endif /* COUNT_IPIS */
180 #endif /* COUNT_XINVLTLB_HITS || COUNT_IPIS */
181 
182 	invltlb();
183 	atomic_add_int(&smp_tlb_wait, 1);
184 	return (FILTER_HANDLED);
185 }
186 
187 #ifdef __amd64__
188 static int
189 xen_invltlb_pcid(void *arg)
190 {
191 	uint64_t cr3;
192 #if defined(COUNT_XINVLTLB_HITS) || defined(COUNT_IPIS)
193 	int cpu;
194 
195 	cpu = PCPU_GET(cpuid);
196 #ifdef COUNT_XINVLTLB_HITS
197 	xhits_gbl[cpu]++;
198 #endif /* COUNT_XINVLTLB_HITS */
199 #ifdef COUNT_IPIS
200 	(*ipi_invltlb_counts[cpu])++;
201 #endif /* COUNT_IPIS */
202 #endif /* COUNT_XINVLTLB_HITS || COUNT_IPIS */
203 
204 	cr3 = rcr3();
205 	if (smp_tlb_invpcid.pcid != (uint64_t)-1 &&
206 	    smp_tlb_invpcid.pcid != 0) {
207 
208 		if (invpcid_works) {
209 			invpcid(&smp_tlb_invpcid, INVPCID_CTX);
210 		} else {
211 			/* Otherwise reload %cr3 twice. */
212 			if (cr3 != pcid_cr3) {
213 				load_cr3(pcid_cr3);
214 				cr3 |= CR3_PCID_SAVE;
215 			}
216 			load_cr3(cr3);
217 		}
218 	} else {
219 		invltlb_globpcid();
220 	}
221 	if (smp_tlb_pmap != NULL)
222 		CPU_CLR_ATOMIC(PCPU_GET(cpuid), &smp_tlb_pmap->pm_save);
223 
224 	atomic_add_int(&smp_tlb_wait, 1);
225 	return (FILTER_HANDLED);
226 }
227 #endif
228 
229 static int
230 xen_invlpg(void *arg)
231 {
232 #if defined(COUNT_XINVLTLB_HITS) || defined(COUNT_IPIS)
233 	int cpu;
234 
235 	cpu = PCPU_GET(cpuid);
236 #ifdef COUNT_XINVLTLB_HITS
237 	xhits_pg[cpu]++;
238 #endif /* COUNT_XINVLTLB_HITS */
239 #ifdef COUNT_IPIS
240 	(*ipi_invlpg_counts[cpu])++;
241 #endif /* COUNT_IPIS */
242 #endif /* COUNT_XINVLTLB_HITS || COUNT_IPIS */
243 
244 #ifdef __i386__
245 	invlpg(smp_tlb_addr1);
246 #else
247 	invlpg(smp_tlb_invpcid.addr);
248 #endif
249 	atomic_add_int(&smp_tlb_wait, 1);
250 	return (FILTER_HANDLED);
251 }
252 
253 #ifdef __amd64__
254 static int
255 xen_invlpg_pcid(void *arg)
256 {
257 #if defined(COUNT_XINVLTLB_HITS) || defined(COUNT_IPIS)
258 	int cpu;
259 
260 	cpu = PCPU_GET(cpuid);
261 #ifdef COUNT_XINVLTLB_HITS
262 	xhits_pg[cpu]++;
263 #endif /* COUNT_XINVLTLB_HITS */
264 #ifdef COUNT_IPIS
265 	(*ipi_invlpg_counts[cpu])++;
266 #endif /* COUNT_IPIS */
267 #endif /* COUNT_XINVLTLB_HITS || COUNT_IPIS */
268 
269 	if (invpcid_works) {
270 		invpcid(&smp_tlb_invpcid, INVPCID_ADDR);
271 	} else if (smp_tlb_invpcid.pcid == 0) {
272 		invlpg(smp_tlb_invpcid.addr);
273 	} else if (smp_tlb_invpcid.pcid == (uint64_t)-1) {
274 		invltlb_globpcid();
275 	} else {
276 		uint64_t cr3;
277 
278 		/*
279 		 * PCID supported, but INVPCID is not.
280 		 * Temporarily switch to the target address
281 		 * space and do INVLPG.
282 		 */
283 		cr3 = rcr3();
284 		if (cr3 != pcid_cr3)
285 			load_cr3(pcid_cr3 | CR3_PCID_SAVE);
286 		invlpg(smp_tlb_invpcid.addr);
287 		load_cr3(cr3 | CR3_PCID_SAVE);
288 	}
289 
290 	atomic_add_int(&smp_tlb_wait, 1);
291 	return (FILTER_HANDLED);
292 }
293 #endif
294 
295 static inline void
296 invlpg_range(vm_offset_t start, vm_offset_t end)
297 {
298 	do {
299 		invlpg(start);
300 		start += PAGE_SIZE;
301 	} while (start < end);
302 }
303 
304 static int
305 xen_invlrng(void *arg)
306 {
307 	vm_offset_t addr;
308 #if defined(COUNT_XINVLTLB_HITS) || defined(COUNT_IPIS)
309 	int cpu;
310 
311 	cpu = PCPU_GET(cpuid);
312 #ifdef COUNT_XINVLTLB_HITS
313 	xhits_rng[cpu]++;
314 #endif /* COUNT_XINVLTLB_HITS */
315 #ifdef COUNT_IPIS
316 	(*ipi_invlrng_counts[cpu])++;
317 #endif /* COUNT_IPIS */
318 #endif /* COUNT_XINVLTLB_HITS || COUNT_IPIS */
319 
320 #ifdef __i386__
321 	addr = smp_tlb_addr1;
322 	invlpg_range(addr, smp_tlb_addr2);
323 #else
324 	addr = smp_tlb_invpcid.addr;
325 	if (pmap_pcid_enabled) {
326 		if (invpcid_works) {
327 			struct invpcid_descr d;
328 
329 			d = smp_tlb_invpcid;
330 			do {
331 				invpcid(&d, INVPCID_ADDR);
332 				d.addr += PAGE_SIZE;
333 			} while (d.addr < smp_tlb_addr2);
334 		} else if (smp_tlb_invpcid.pcid == 0) {
335 			/*
336 			 * kernel pmap - use invlpg to invalidate
337 			 * global mapping.
338 			 */
339 			invlpg_range(addr, smp_tlb_addr2);
340 		} else if (smp_tlb_invpcid.pcid != (uint64_t)-1) {
341 			invltlb_globpcid();
342 			if (smp_tlb_pmap != NULL) {
343 				CPU_CLR_ATOMIC(PCPU_GET(cpuid),
344 				    &smp_tlb_pmap->pm_save);
345 			}
346 		} else {
347 			uint64_t cr3;
348 
349 			cr3 = rcr3();
350 			if (cr3 != pcid_cr3)
351 				load_cr3(pcid_cr3 | CR3_PCID_SAVE);
352 			invlpg_range(addr, smp_tlb_addr2);
353 			load_cr3(cr3 | CR3_PCID_SAVE);
354 		}
355 	} else {
356 		invlpg_range(addr, smp_tlb_addr2);
357 	}
358 #endif
359 
360 	atomic_add_int(&smp_tlb_wait, 1);
361 	return (FILTER_HANDLED);
362 }
363 
364 static int
365 xen_invlcache(void *arg)
366 {
367 #ifdef COUNT_IPIS
368 	int cpu = PCPU_GET(cpuid);
369 
370 	cpu = PCPU_GET(cpuid);
371 	(*ipi_invlcache_counts[cpu])++;
372 #endif /* COUNT_IPIS */
373 
374 	wbinvd();
375 	atomic_add_int(&smp_tlb_wait, 1);
376 	return (FILTER_HANDLED);
377 }
378 
379 #ifdef __i386__
380 static int
381 xen_lazypmap(void *arg)
382 {
383 
384 	pmap_lazyfix_action();
385 	return (FILTER_HANDLED);
386 }
387 #endif
388 
389 static int
390 xen_cpustop_handler(void *arg)
391 {
392 
393 	cpustop_handler();
394 	return (FILTER_HANDLED);
395 }
396 
397 static int
398 xen_cpususpend_handler(void *arg)
399 {
400 
401 	cpususpend_handler();
402 	return (FILTER_HANDLED);
403 }
404 
405 static int
406 xen_cpustophard_handler(void *arg)
407 {
408 
409 	ipi_nmi_handler();
410 	return (FILTER_HANDLED);
411 }
412 
413 /* Xen PV IPI sender */
414 static void
415 xen_ipi_vectored(u_int vector, int dest)
416 {
417 	xen_intr_handle_t *ipi_handle;
418 	int ipi_idx, to_cpu, self;
419 
420 	ipi_idx = IPI_TO_IDX(vector);
421 	if (ipi_idx > nitems(xen_ipis))
422 		panic("IPI out of range");
423 
424 	switch(dest) {
425 	case APIC_IPI_DEST_SELF:
426 		ipi_handle = DPCPU_GET(ipi_handle);
427 		xen_intr_signal(ipi_handle[ipi_idx]);
428 		break;
429 	case APIC_IPI_DEST_ALL:
430 		CPU_FOREACH(to_cpu) {
431 			ipi_handle = DPCPU_ID_GET(to_cpu, ipi_handle);
432 			xen_intr_signal(ipi_handle[ipi_idx]);
433 		}
434 		break;
435 	case APIC_IPI_DEST_OTHERS:
436 		self = PCPU_GET(cpuid);
437 		CPU_FOREACH(to_cpu) {
438 			if (to_cpu != self) {
439 				ipi_handle = DPCPU_ID_GET(to_cpu, ipi_handle);
440 				xen_intr_signal(ipi_handle[ipi_idx]);
441 			}
442 		}
443 		break;
444 	default:
445 		to_cpu = apic_cpuid(dest);
446 		ipi_handle = DPCPU_ID_GET(to_cpu, ipi_handle);
447 		xen_intr_signal(ipi_handle[ipi_idx]);
448 		break;
449 	}
450 }
451 
452 static void
453 xen_cpu_ipi_init(int cpu)
454 {
455 	xen_intr_handle_t *ipi_handle;
456 	const struct xen_ipi_handler *ipi;
457 	device_t dev;
458 	int idx, rc;
459 
460 	ipi_handle = DPCPU_ID_GET(cpu, ipi_handle);
461 	dev = pcpu_find(cpu)->pc_device;
462 	KASSERT((dev != NULL), ("NULL pcpu device_t"));
463 
464 	for (ipi = xen_ipis, idx = 0; idx < nitems(xen_ipis); ipi++, idx++) {
465 
466 		if (ipi->filter == NULL) {
467 			ipi_handle[idx] = NULL;
468 			continue;
469 		}
470 
471 		rc = xen_intr_alloc_and_bind_ipi(dev, cpu, ipi->filter,
472 		    INTR_TYPE_TTY, &ipi_handle[idx]);
473 		if (rc != 0)
474 			panic("Unable to allocate a XEN IPI port");
475 		xen_intr_describe(ipi_handle[idx], "%s", ipi->description);
476 	}
477 }
478 
479 static void
480 xen_init_ipis(void)
481 {
482 	int i;
483 
484 	if (!xen_hvm_domain() || !xen_vector_callback_enabled)
485 		return;
486 
487 #ifdef __amd64__
488 	if (pmap_pcid_enabled) {
489 		xen_ipis[IPI_TO_IDX(IPI_INVLTLB)].filter = xen_invltlb_pcid;
490 		xen_ipis[IPI_TO_IDX(IPI_INVLPG)].filter = xen_invlpg_pcid;
491 	}
492 #endif
493 	CPU_FOREACH(i)
494 		xen_cpu_ipi_init(i);
495 
496 	/* Set the xen pv ipi ops to replace the native ones */
497 	cpu_ops.ipi_vectored = xen_ipi_vectored;
498 }
499 
500 /*---------------------- XEN Hypervisor Probe and Setup ----------------------*/
501 static uint32_t
502 xen_hvm_cpuid_base(void)
503 {
504 	uint32_t base, regs[4];
505 
506 	for (base = 0x40000000; base < 0x40010000; base += 0x100) {
507 		do_cpuid(base, regs);
508 		if (!memcmp("XenVMMXenVMM", &regs[1], 12)
509 		    && (regs[0] - base) >= 2)
510 			return (base);
511 	}
512 	return (0);
513 }
514 
515 /*
516  * Allocate and fill in the hypcall page.
517  */
518 static int
519 xen_hvm_init_hypercall_stubs(void)
520 {
521 	uint32_t base, regs[4];
522 	int i;
523 
524 	base = xen_hvm_cpuid_base();
525 	if (base == 0)
526 		return (ENXIO);
527 
528 	if (hypercall_stubs == NULL) {
529 		do_cpuid(base + 1, regs);
530 		printf("XEN: Hypervisor version %d.%d detected.\n",
531 		    regs[0] >> 16, regs[0] & 0xffff);
532 	}
533 
534 	/*
535 	 * Find the hypercall pages.
536 	 */
537 	do_cpuid(base + 2, regs);
538 
539 	if (hypercall_stubs == NULL) {
540 		size_t call_region_size;
541 
542 		call_region_size = regs[0] * PAGE_SIZE;
543 		hypercall_stubs = malloc(call_region_size, M_XENHVM, M_NOWAIT);
544 		if (hypercall_stubs == NULL)
545 			panic("Unable to allocate Xen hypercall region");
546 	}
547 
548 	for (i = 0; i < regs[0]; i++)
549 		wrmsr(regs[1], vtophys(hypercall_stubs + i * PAGE_SIZE) + i);
550 
551 	return (0);
552 }
553 
554 static void
555 xen_hvm_init_shared_info_page(void)
556 {
557 	struct xen_add_to_physmap xatp;
558 
559 	if (HYPERVISOR_shared_info == NULL) {
560 		HYPERVISOR_shared_info = malloc(PAGE_SIZE, M_XENHVM, M_NOWAIT);
561 		if (HYPERVISOR_shared_info == NULL)
562 			panic("Unable to allocate Xen shared info page");
563 	}
564 
565 	xatp.domid = DOMID_SELF;
566 	xatp.idx = 0;
567 	xatp.space = XENMAPSPACE_shared_info;
568 	xatp.gpfn = vtophys(HYPERVISOR_shared_info) >> PAGE_SHIFT;
569 	if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
570 		panic("HYPERVISOR_memory_op failed");
571 }
572 
573 /*
574  * Tell the hypervisor how to contact us for event channel callbacks.
575  */
576 void
577 xen_hvm_set_callback(device_t dev)
578 {
579 	struct xen_hvm_param xhp;
580 	int irq;
581 
582 	xhp.domid = DOMID_SELF;
583 	xhp.index = HVM_PARAM_CALLBACK_IRQ;
584 	if (xen_feature(XENFEAT_hvm_callback_vector) != 0) {
585 		int error;
586 
587 		xhp.value = HVM_CALLBACK_VECTOR(IDT_EVTCHN);
588 		error = HYPERVISOR_hvm_op(HVMOP_set_param, &xhp);
589 		if (error == 0) {
590 			xen_vector_callback_enabled = 1;
591 			return;
592 		}
593 		printf("Xen HVM callback vector registration failed (%d). "
594 		    "Falling back to emulated device interrupt\n", error);
595 	}
596 	xen_vector_callback_enabled = 0;
597 	if (dev == NULL) {
598 		/*
599 		 * Called from early boot or resume.
600 		 * xenpci will invoke us again later.
601 		 */
602 		return;
603 	}
604 
605 	irq = pci_get_irq(dev);
606 	if (irq < 16) {
607 		xhp.value = HVM_CALLBACK_GSI(irq);
608 	} else {
609 		u_int slot;
610 		u_int pin;
611 
612 		slot = pci_get_slot(dev);
613 		pin = pci_get_intpin(dev) - 1;
614 		xhp.value = HVM_CALLBACK_PCI_INTX(slot, pin);
615 	}
616 
617 	if (HYPERVISOR_hvm_op(HVMOP_set_param, &xhp) != 0)
618 		panic("Can't set evtchn callback");
619 }
620 
621 #define	XEN_MAGIC_IOPORT 0x10
622 enum {
623 	XMI_MAGIC			 = 0x49d2,
624 	XMI_UNPLUG_IDE_DISKS		 = 0x01,
625 	XMI_UNPLUG_NICS			 = 0x02,
626 	XMI_UNPLUG_IDE_EXCEPT_PRI_MASTER = 0x04
627 };
628 
629 static void
630 xen_hvm_disable_emulated_devices(void)
631 {
632 	if (inw(XEN_MAGIC_IOPORT) != XMI_MAGIC)
633 		return;
634 
635 	if (bootverbose)
636 		printf("XEN: Disabling emulated block and network devices\n");
637 	outw(XEN_MAGIC_IOPORT, XMI_UNPLUG_IDE_DISKS|XMI_UNPLUG_NICS);
638 }
639 
640 void
641 xen_hvm_suspend(void)
642 {
643 }
644 
645 void
646 xen_hvm_resume(void)
647 {
648 
649 	xen_hvm_init_hypercall_stubs();
650 	xen_hvm_init_shared_info_page();
651 }
652 
653 static void
654 xen_hvm_init(void *dummy __unused)
655 {
656 
657 	if (xen_hvm_init_hypercall_stubs() != 0)
658 		return;
659 
660 	xen_domain_type = XEN_HVM_DOMAIN;
661 	setup_xen_features();
662 	xen_hvm_init_shared_info_page();
663 	xen_hvm_set_callback(NULL);
664 	xen_hvm_disable_emulated_devices();
665 }
666 
667 void xen_hvm_init_cpu(void)
668 {
669 	struct vcpu_register_vcpu_info info;
670 	struct vcpu_info *vcpu_info;
671 	int cpu, rc;
672 
673 	cpu = PCPU_GET(acpi_id);
674 	vcpu_info = DPCPU_PTR(vcpu_local_info);
675 	info.mfn = vtophys(vcpu_info) >> PAGE_SHIFT;
676 	info.offset = vtophys(vcpu_info) - trunc_page(vtophys(vcpu_info));
677 
678 	rc = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
679 	if (rc != 0)
680 		DPCPU_SET(vcpu_info, &HYPERVISOR_shared_info->vcpu_info[cpu]);
681 	else
682 		DPCPU_SET(vcpu_info, vcpu_info);
683 }
684 
685 SYSINIT(xen_hvm_init, SI_SUB_HYPERVISOR, SI_ORDER_FIRST, xen_hvm_init, NULL);
686 SYSINIT(xen_init_ipis, SI_SUB_SMP, SI_ORDER_FIRST, xen_init_ipis, NULL);
687 SYSINIT(xen_hvm_init_cpu, SI_SUB_INTR, SI_ORDER_FIRST, xen_hvm_init_cpu, NULL);
688