xref: /freebsd/sys/x86/xen/hvm.c (revision 7661de35d15f582ab33e3bd6b8d909601557e436)
1 /*
2  * Copyright (c) 2008, 2013 Citrix Systems, Inc.
3  * Copyright (c) 2012 Spectra Logic Corporation
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/param.h>
32 #include <sys/bus.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/proc.h>
36 #include <sys/smp.h>
37 #include <sys/systm.h>
38 
39 #include <vm/vm.h>
40 #include <vm/pmap.h>
41 
42 #include <dev/pci/pcivar.h>
43 
44 #include <machine/cpufunc.h>
45 #include <machine/cpu.h>
46 #include <machine/smp.h>
47 
48 #include <x86/apicreg.h>
49 
50 #include <xen/xen-os.h>
51 #include <xen/features.h>
52 #include <xen/gnttab.h>
53 #include <xen/hypervisor.h>
54 #include <xen/hvm.h>
55 #include <xen/xen_intr.h>
56 
57 #include <xen/interface/hvm/params.h>
58 #include <xen/interface/vcpu.h>
59 
60 /*--------------------------- Forward Declarations ---------------------------*/
61 #ifdef SMP
62 static driver_filter_t xen_smp_rendezvous_action;
63 static driver_filter_t xen_invltlb;
64 static driver_filter_t xen_invlpg;
65 static driver_filter_t xen_invlrng;
66 static driver_filter_t xen_invlcache;
67 #ifdef __i386__
68 static driver_filter_t xen_lazypmap;
69 #endif
70 static driver_filter_t xen_ipi_bitmap_handler;
71 static driver_filter_t xen_cpustop_handler;
72 static driver_filter_t xen_cpususpend_handler;
73 static driver_filter_t xen_cpustophard_handler;
74 #endif
75 static void xen_ipi_vectored(u_int vector, int dest);
76 static void xen_hvm_cpu_resume(void);
77 static void xen_hvm_cpu_init(void);
78 
79 /*---------------------------- Extern Declarations ---------------------------*/
80 /* Variables used by mp_machdep to perform the MMU related IPIs */
81 extern volatile int smp_tlb_wait;
82 extern vm_offset_t smp_tlb_addr2;
83 #ifdef __i386__
84 extern vm_offset_t smp_tlb_addr1;
85 #else
86 extern struct invpcid_descr smp_tlb_invpcid;
87 extern uint64_t pcid_cr3;
88 extern int invpcid_works;
89 extern int pmap_pcid_enabled;
90 extern pmap_t smp_tlb_pmap;
91 #endif
92 
93 #ifdef __i386__
94 extern void pmap_lazyfix_action(void);
95 #endif
96 
97 /* Variables used by mp_machdep to perform the bitmap IPI */
98 extern volatile u_int cpu_ipi_pending[MAXCPU];
99 
100 /*---------------------------------- Macros ----------------------------------*/
101 #define	IPI_TO_IDX(ipi) ((ipi) - APIC_IPI_INTS)
102 
103 /*-------------------------------- Local Types -------------------------------*/
104 enum xen_hvm_init_type {
105 	XEN_HVM_INIT_COLD,
106 	XEN_HVM_INIT_CANCELLED_SUSPEND,
107 	XEN_HVM_INIT_RESUME
108 };
109 
110 struct xen_ipi_handler
111 {
112 	driver_filter_t	*filter;
113 	const char	*description;
114 };
115 
116 /*-------------------------------- Global Data -------------------------------*/
117 enum xen_domain_type xen_domain_type = XEN_NATIVE;
118 
119 struct cpu_ops xen_hvm_cpu_ops = {
120 	.ipi_vectored	= lapic_ipi_vectored,
121 	.cpu_init	= xen_hvm_cpu_init,
122 	.cpu_resume	= xen_hvm_cpu_resume
123 };
124 
125 static MALLOC_DEFINE(M_XENHVM, "xen_hvm", "Xen HVM PV Support");
126 
127 #ifdef SMP
128 static struct xen_ipi_handler xen_ipis[] =
129 {
130 	[IPI_TO_IDX(IPI_RENDEZVOUS)]	= { xen_smp_rendezvous_action,	"r"   },
131 	[IPI_TO_IDX(IPI_INVLTLB)]	= { xen_invltlb,		"itlb"},
132 	[IPI_TO_IDX(IPI_INVLPG)]	= { xen_invlpg,			"ipg" },
133 	[IPI_TO_IDX(IPI_INVLRNG)]	= { xen_invlrng,		"irg" },
134 	[IPI_TO_IDX(IPI_INVLCACHE)]	= { xen_invlcache,		"ic"  },
135 #ifdef __i386__
136 	[IPI_TO_IDX(IPI_LAZYPMAP)]	= { xen_lazypmap,		"lp"  },
137 #endif
138 	[IPI_TO_IDX(IPI_BITMAP_VECTOR)] = { xen_ipi_bitmap_handler,	"b"   },
139 	[IPI_TO_IDX(IPI_STOP)]		= { xen_cpustop_handler,	"st"  },
140 	[IPI_TO_IDX(IPI_SUSPEND)]	= { xen_cpususpend_handler,	"sp"  },
141 	[IPI_TO_IDX(IPI_STOP_HARD)]	= { xen_cpustophard_handler,	"sth" },
142 };
143 #endif
144 
145 /**
146  * If non-zero, the hypervisor has been configured to use a direct
147  * IDT event callback for interrupt injection.
148  */
149 int xen_vector_callback_enabled;
150 
151 /*------------------------------- Per-CPU Data -------------------------------*/
152 DPCPU_DEFINE(struct vcpu_info, vcpu_local_info);
153 DPCPU_DEFINE(struct vcpu_info *, vcpu_info);
154 #ifdef SMP
155 DPCPU_DEFINE(xen_intr_handle_t, ipi_handle[nitems(xen_ipis)]);
156 #endif
157 
158 /*------------------ Hypervisor Access Shared Memory Regions -----------------*/
159 /** Hypercall table accessed via HYPERVISOR_*_op() methods. */
160 char *hypercall_stubs;
161 shared_info_t *HYPERVISOR_shared_info;
162 
163 #ifdef SMP
164 /*---------------------------- XEN PV IPI Handlers ---------------------------*/
165 /*
166  * This are C clones of the ASM functions found in apic_vector.s
167  */
168 static int
169 xen_ipi_bitmap_handler(void *arg)
170 {
171 	struct trapframe *frame;
172 
173 	frame = arg;
174 	ipi_bitmap_handler(*frame);
175 	return (FILTER_HANDLED);
176 }
177 
178 static int
179 xen_smp_rendezvous_action(void *arg)
180 {
181 #ifdef COUNT_IPIS
182 	int cpu;
183 
184 	cpu = PCPU_GET(cpuid);
185 	(*ipi_rendezvous_counts[cpu])++;
186 #endif /* COUNT_IPIS */
187 
188 	smp_rendezvous_action();
189 	return (FILTER_HANDLED);
190 }
191 
192 static int
193 xen_invltlb(void *arg)
194 {
195 #if defined(COUNT_XINVLTLB_HITS) || defined(COUNT_IPIS)
196 	int cpu;
197 
198 	cpu = PCPU_GET(cpuid);
199 #ifdef COUNT_XINVLTLB_HITS
200 	xhits_gbl[cpu]++;
201 #endif /* COUNT_XINVLTLB_HITS */
202 #ifdef COUNT_IPIS
203 	(*ipi_invltlb_counts[cpu])++;
204 #endif /* COUNT_IPIS */
205 #endif /* COUNT_XINVLTLB_HITS || COUNT_IPIS */
206 
207 	invltlb();
208 	atomic_add_int(&smp_tlb_wait, 1);
209 	return (FILTER_HANDLED);
210 }
211 
212 #ifdef __amd64__
213 static int
214 xen_invltlb_pcid(void *arg)
215 {
216 	uint64_t cr3;
217 #if defined(COUNT_XINVLTLB_HITS) || defined(COUNT_IPIS)
218 	int cpu;
219 
220 	cpu = PCPU_GET(cpuid);
221 #ifdef COUNT_XINVLTLB_HITS
222 	xhits_gbl[cpu]++;
223 #endif /* COUNT_XINVLTLB_HITS */
224 #ifdef COUNT_IPIS
225 	(*ipi_invltlb_counts[cpu])++;
226 #endif /* COUNT_IPIS */
227 #endif /* COUNT_XINVLTLB_HITS || COUNT_IPIS */
228 
229 	cr3 = rcr3();
230 	if (smp_tlb_invpcid.pcid != (uint64_t)-1 &&
231 	    smp_tlb_invpcid.pcid != 0) {
232 
233 		if (invpcid_works) {
234 			invpcid(&smp_tlb_invpcid, INVPCID_CTX);
235 		} else {
236 			/* Otherwise reload %cr3 twice. */
237 			if (cr3 != pcid_cr3) {
238 				load_cr3(pcid_cr3);
239 				cr3 |= CR3_PCID_SAVE;
240 			}
241 			load_cr3(cr3);
242 		}
243 	} else {
244 		invltlb_globpcid();
245 	}
246 	if (smp_tlb_pmap != NULL)
247 		CPU_CLR_ATOMIC(PCPU_GET(cpuid), &smp_tlb_pmap->pm_save);
248 
249 	atomic_add_int(&smp_tlb_wait, 1);
250 	return (FILTER_HANDLED);
251 }
252 #endif
253 
254 static int
255 xen_invlpg(void *arg)
256 {
257 #if defined(COUNT_XINVLTLB_HITS) || defined(COUNT_IPIS)
258 	int cpu;
259 
260 	cpu = PCPU_GET(cpuid);
261 #ifdef COUNT_XINVLTLB_HITS
262 	xhits_pg[cpu]++;
263 #endif /* COUNT_XINVLTLB_HITS */
264 #ifdef COUNT_IPIS
265 	(*ipi_invlpg_counts[cpu])++;
266 #endif /* COUNT_IPIS */
267 #endif /* COUNT_XINVLTLB_HITS || COUNT_IPIS */
268 
269 #ifdef __i386__
270 	invlpg(smp_tlb_addr1);
271 #else
272 	invlpg(smp_tlb_invpcid.addr);
273 #endif
274 	atomic_add_int(&smp_tlb_wait, 1);
275 	return (FILTER_HANDLED);
276 }
277 
278 #ifdef __amd64__
279 static int
280 xen_invlpg_pcid(void *arg)
281 {
282 #if defined(COUNT_XINVLTLB_HITS) || defined(COUNT_IPIS)
283 	int cpu;
284 
285 	cpu = PCPU_GET(cpuid);
286 #ifdef COUNT_XINVLTLB_HITS
287 	xhits_pg[cpu]++;
288 #endif /* COUNT_XINVLTLB_HITS */
289 #ifdef COUNT_IPIS
290 	(*ipi_invlpg_counts[cpu])++;
291 #endif /* COUNT_IPIS */
292 #endif /* COUNT_XINVLTLB_HITS || COUNT_IPIS */
293 
294 	if (invpcid_works) {
295 		invpcid(&smp_tlb_invpcid, INVPCID_ADDR);
296 	} else if (smp_tlb_invpcid.pcid == 0) {
297 		invlpg(smp_tlb_invpcid.addr);
298 	} else if (smp_tlb_invpcid.pcid == (uint64_t)-1) {
299 		invltlb_globpcid();
300 	} else {
301 		uint64_t cr3;
302 
303 		/*
304 		 * PCID supported, but INVPCID is not.
305 		 * Temporarily switch to the target address
306 		 * space and do INVLPG.
307 		 */
308 		cr3 = rcr3();
309 		if (cr3 != pcid_cr3)
310 			load_cr3(pcid_cr3 | CR3_PCID_SAVE);
311 		invlpg(smp_tlb_invpcid.addr);
312 		load_cr3(cr3 | CR3_PCID_SAVE);
313 	}
314 
315 	atomic_add_int(&smp_tlb_wait, 1);
316 	return (FILTER_HANDLED);
317 }
318 #endif
319 
320 static inline void
321 invlpg_range(vm_offset_t start, vm_offset_t end)
322 {
323 	do {
324 		invlpg(start);
325 		start += PAGE_SIZE;
326 	} while (start < end);
327 }
328 
329 static int
330 xen_invlrng(void *arg)
331 {
332 	vm_offset_t addr;
333 #if defined(COUNT_XINVLTLB_HITS) || defined(COUNT_IPIS)
334 	int cpu;
335 
336 	cpu = PCPU_GET(cpuid);
337 #ifdef COUNT_XINVLTLB_HITS
338 	xhits_rng[cpu]++;
339 #endif /* COUNT_XINVLTLB_HITS */
340 #ifdef COUNT_IPIS
341 	(*ipi_invlrng_counts[cpu])++;
342 #endif /* COUNT_IPIS */
343 #endif /* COUNT_XINVLTLB_HITS || COUNT_IPIS */
344 
345 #ifdef __i386__
346 	addr = smp_tlb_addr1;
347 	invlpg_range(addr, smp_tlb_addr2);
348 #else
349 	addr = smp_tlb_invpcid.addr;
350 	if (pmap_pcid_enabled) {
351 		if (invpcid_works) {
352 			struct invpcid_descr d;
353 
354 			d = smp_tlb_invpcid;
355 			do {
356 				invpcid(&d, INVPCID_ADDR);
357 				d.addr += PAGE_SIZE;
358 			} while (d.addr < smp_tlb_addr2);
359 		} else if (smp_tlb_invpcid.pcid == 0) {
360 			/*
361 			 * kernel pmap - use invlpg to invalidate
362 			 * global mapping.
363 			 */
364 			invlpg_range(addr, smp_tlb_addr2);
365 		} else if (smp_tlb_invpcid.pcid != (uint64_t)-1) {
366 			invltlb_globpcid();
367 			if (smp_tlb_pmap != NULL) {
368 				CPU_CLR_ATOMIC(PCPU_GET(cpuid),
369 				    &smp_tlb_pmap->pm_save);
370 			}
371 		} else {
372 			uint64_t cr3;
373 
374 			cr3 = rcr3();
375 			if (cr3 != pcid_cr3)
376 				load_cr3(pcid_cr3 | CR3_PCID_SAVE);
377 			invlpg_range(addr, smp_tlb_addr2);
378 			load_cr3(cr3 | CR3_PCID_SAVE);
379 		}
380 	} else {
381 		invlpg_range(addr, smp_tlb_addr2);
382 	}
383 #endif
384 
385 	atomic_add_int(&smp_tlb_wait, 1);
386 	return (FILTER_HANDLED);
387 }
388 
389 static int
390 xen_invlcache(void *arg)
391 {
392 #ifdef COUNT_IPIS
393 	int cpu = PCPU_GET(cpuid);
394 
395 	cpu = PCPU_GET(cpuid);
396 	(*ipi_invlcache_counts[cpu])++;
397 #endif /* COUNT_IPIS */
398 
399 	wbinvd();
400 	atomic_add_int(&smp_tlb_wait, 1);
401 	return (FILTER_HANDLED);
402 }
403 
404 #ifdef __i386__
405 static int
406 xen_lazypmap(void *arg)
407 {
408 
409 	pmap_lazyfix_action();
410 	return (FILTER_HANDLED);
411 }
412 #endif
413 
414 static int
415 xen_cpustop_handler(void *arg)
416 {
417 
418 	cpustop_handler();
419 	return (FILTER_HANDLED);
420 }
421 
422 static int
423 xen_cpususpend_handler(void *arg)
424 {
425 
426 	cpususpend_handler();
427 	return (FILTER_HANDLED);
428 }
429 
430 static int
431 xen_cpustophard_handler(void *arg)
432 {
433 
434 	ipi_nmi_handler();
435 	return (FILTER_HANDLED);
436 }
437 
438 /* Xen PV IPI sender */
439 static void
440 xen_ipi_vectored(u_int vector, int dest)
441 {
442 	xen_intr_handle_t *ipi_handle;
443 	int ipi_idx, to_cpu, self;
444 
445 	ipi_idx = IPI_TO_IDX(vector);
446 	if (ipi_idx > nitems(xen_ipis))
447 		panic("IPI out of range");
448 
449 	switch(dest) {
450 	case APIC_IPI_DEST_SELF:
451 		ipi_handle = DPCPU_GET(ipi_handle);
452 		xen_intr_signal(ipi_handle[ipi_idx]);
453 		break;
454 	case APIC_IPI_DEST_ALL:
455 		CPU_FOREACH(to_cpu) {
456 			ipi_handle = DPCPU_ID_GET(to_cpu, ipi_handle);
457 			xen_intr_signal(ipi_handle[ipi_idx]);
458 		}
459 		break;
460 	case APIC_IPI_DEST_OTHERS:
461 		self = PCPU_GET(cpuid);
462 		CPU_FOREACH(to_cpu) {
463 			if (to_cpu != self) {
464 				ipi_handle = DPCPU_ID_GET(to_cpu, ipi_handle);
465 				xen_intr_signal(ipi_handle[ipi_idx]);
466 			}
467 		}
468 		break;
469 	default:
470 		to_cpu = apic_cpuid(dest);
471 		ipi_handle = DPCPU_ID_GET(to_cpu, ipi_handle);
472 		xen_intr_signal(ipi_handle[ipi_idx]);
473 		break;
474 	}
475 }
476 
477 /* XEN diverged cpu operations */
478 static void
479 xen_hvm_cpu_resume(void)
480 {
481 	u_int cpuid = PCPU_GET(cpuid);
482 
483 	/*
484 	 * Reset pending bitmap IPIs, because Xen doesn't preserve pending
485 	 * event channels on migration.
486 	 */
487 	cpu_ipi_pending[cpuid] = 0;
488 
489 	/* register vcpu_info area */
490 	xen_hvm_cpu_init();
491 }
492 
493 static void
494 xen_cpu_ipi_init(int cpu)
495 {
496 	xen_intr_handle_t *ipi_handle;
497 	const struct xen_ipi_handler *ipi;
498 	device_t dev;
499 	int idx, rc;
500 
501 	ipi_handle = DPCPU_ID_GET(cpu, ipi_handle);
502 	dev = pcpu_find(cpu)->pc_device;
503 	KASSERT((dev != NULL), ("NULL pcpu device_t"));
504 
505 	for (ipi = xen_ipis, idx = 0; idx < nitems(xen_ipis); ipi++, idx++) {
506 
507 		if (ipi->filter == NULL) {
508 			ipi_handle[idx] = NULL;
509 			continue;
510 		}
511 
512 		rc = xen_intr_alloc_and_bind_ipi(dev, cpu, ipi->filter,
513 		    INTR_TYPE_TTY, &ipi_handle[idx]);
514 		if (rc != 0)
515 			panic("Unable to allocate a XEN IPI port");
516 		xen_intr_describe(ipi_handle[idx], "%s", ipi->description);
517 	}
518 }
519 
520 static void
521 xen_setup_cpus(void)
522 {
523 	int i;
524 
525 	if (!xen_hvm_domain() || !xen_vector_callback_enabled)
526 		return;
527 
528 #ifdef __amd64__
529 	if (pmap_pcid_enabled) {
530 		xen_ipis[IPI_TO_IDX(IPI_INVLTLB)].filter = xen_invltlb_pcid;
531 		xen_ipis[IPI_TO_IDX(IPI_INVLPG)].filter = xen_invlpg_pcid;
532 	}
533 #endif
534 	CPU_FOREACH(i)
535 		xen_cpu_ipi_init(i);
536 
537 	/* Set the xen pv ipi ops to replace the native ones */
538 	cpu_ops.ipi_vectored = xen_ipi_vectored;
539 }
540 #endif
541 
542 /*---------------------- XEN Hypervisor Probe and Setup ----------------------*/
543 static uint32_t
544 xen_hvm_cpuid_base(void)
545 {
546 	uint32_t base, regs[4];
547 
548 	for (base = 0x40000000; base < 0x40010000; base += 0x100) {
549 		do_cpuid(base, regs);
550 		if (!memcmp("XenVMMXenVMM", &regs[1], 12)
551 		    && (regs[0] - base) >= 2)
552 			return (base);
553 	}
554 	return (0);
555 }
556 
557 /*
558  * Allocate and fill in the hypcall page.
559  */
560 static int
561 xen_hvm_init_hypercall_stubs(void)
562 {
563 	uint32_t base, regs[4];
564 	int i;
565 
566 	base = xen_hvm_cpuid_base();
567 	if (base == 0)
568 		return (ENXIO);
569 
570 	if (hypercall_stubs == NULL) {
571 		do_cpuid(base + 1, regs);
572 		printf("XEN: Hypervisor version %d.%d detected.\n",
573 		    regs[0] >> 16, regs[0] & 0xffff);
574 	}
575 
576 	/*
577 	 * Find the hypercall pages.
578 	 */
579 	do_cpuid(base + 2, regs);
580 
581 	if (hypercall_stubs == NULL) {
582 		size_t call_region_size;
583 
584 		call_region_size = regs[0] * PAGE_SIZE;
585 		hypercall_stubs = malloc(call_region_size, M_XENHVM, M_NOWAIT);
586 		if (hypercall_stubs == NULL)
587 			panic("Unable to allocate Xen hypercall region");
588 	}
589 
590 	for (i = 0; i < regs[0]; i++)
591 		wrmsr(regs[1], vtophys(hypercall_stubs + i * PAGE_SIZE) + i);
592 
593 	return (0);
594 }
595 
596 static void
597 xen_hvm_init_shared_info_page(void)
598 {
599 	struct xen_add_to_physmap xatp;
600 
601 	if (HYPERVISOR_shared_info == NULL) {
602 		HYPERVISOR_shared_info = malloc(PAGE_SIZE, M_XENHVM, M_NOWAIT);
603 		if (HYPERVISOR_shared_info == NULL)
604 			panic("Unable to allocate Xen shared info page");
605 	}
606 
607 	xatp.domid = DOMID_SELF;
608 	xatp.idx = 0;
609 	xatp.space = XENMAPSPACE_shared_info;
610 	xatp.gpfn = vtophys(HYPERVISOR_shared_info) >> PAGE_SHIFT;
611 	if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
612 		panic("HYPERVISOR_memory_op failed");
613 }
614 
615 /*
616  * Tell the hypervisor how to contact us for event channel callbacks.
617  */
618 void
619 xen_hvm_set_callback(device_t dev)
620 {
621 	struct xen_hvm_param xhp;
622 	int irq;
623 
624 	if (xen_vector_callback_enabled)
625 		return;
626 
627 	xhp.domid = DOMID_SELF;
628 	xhp.index = HVM_PARAM_CALLBACK_IRQ;
629 	if (xen_feature(XENFEAT_hvm_callback_vector) != 0) {
630 		int error;
631 
632 		xhp.value = HVM_CALLBACK_VECTOR(IDT_EVTCHN);
633 		error = HYPERVISOR_hvm_op(HVMOP_set_param, &xhp);
634 		if (error == 0) {
635 			xen_vector_callback_enabled = 1;
636 			return;
637 		}
638 		printf("Xen HVM callback vector registration failed (%d). "
639 		    "Falling back to emulated device interrupt\n", error);
640 	}
641 	xen_vector_callback_enabled = 0;
642 	if (dev == NULL) {
643 		/*
644 		 * Called from early boot or resume.
645 		 * xenpci will invoke us again later.
646 		 */
647 		return;
648 	}
649 
650 	irq = pci_get_irq(dev);
651 	if (irq < 16) {
652 		xhp.value = HVM_CALLBACK_GSI(irq);
653 	} else {
654 		u_int slot;
655 		u_int pin;
656 
657 		slot = pci_get_slot(dev);
658 		pin = pci_get_intpin(dev) - 1;
659 		xhp.value = HVM_CALLBACK_PCI_INTX(slot, pin);
660 	}
661 
662 	if (HYPERVISOR_hvm_op(HVMOP_set_param, &xhp) != 0)
663 		panic("Can't set evtchn callback");
664 }
665 
666 #define	XEN_MAGIC_IOPORT 0x10
667 enum {
668 	XMI_MAGIC			 = 0x49d2,
669 	XMI_UNPLUG_IDE_DISKS		 = 0x01,
670 	XMI_UNPLUG_NICS			 = 0x02,
671 	XMI_UNPLUG_IDE_EXCEPT_PRI_MASTER = 0x04
672 };
673 
674 static void
675 xen_hvm_disable_emulated_devices(void)
676 {
677 	if (inw(XEN_MAGIC_IOPORT) != XMI_MAGIC)
678 		return;
679 
680 	if (bootverbose)
681 		printf("XEN: Disabling emulated block and network devices\n");
682 	outw(XEN_MAGIC_IOPORT, XMI_UNPLUG_IDE_DISKS|XMI_UNPLUG_NICS);
683 }
684 
685 static void
686 xen_hvm_init(enum xen_hvm_init_type init_type)
687 {
688 	int error;
689 	int i;
690 
691 	if (init_type == XEN_HVM_INIT_CANCELLED_SUSPEND)
692 		return;
693 
694 	error = xen_hvm_init_hypercall_stubs();
695 
696 	switch (init_type) {
697 	case XEN_HVM_INIT_COLD:
698 		if (error != 0)
699 			return;
700 
701 		setup_xen_features();
702 		cpu_ops = xen_hvm_cpu_ops;
703  		vm_guest = VM_GUEST_XEN;
704 		break;
705 	case XEN_HVM_INIT_RESUME:
706 		if (error != 0)
707 			panic("Unable to init Xen hypercall stubs on resume");
708 
709 		/* Clear stale vcpu_info. */
710 		CPU_FOREACH(i)
711 			DPCPU_ID_SET(i, vcpu_info, NULL);
712 		break;
713 	default:
714 		panic("Unsupported HVM initialization type");
715 	}
716 
717 	xen_vector_callback_enabled = 0;
718 	xen_domain_type = XEN_HVM_DOMAIN;
719 	xen_hvm_init_shared_info_page();
720 	xen_hvm_set_callback(NULL);
721 	xen_hvm_disable_emulated_devices();
722 }
723 
724 void
725 xen_hvm_suspend(void)
726 {
727 }
728 
729 void
730 xen_hvm_resume(bool suspend_cancelled)
731 {
732 
733 	xen_hvm_init(suspend_cancelled ?
734 	    XEN_HVM_INIT_CANCELLED_SUSPEND : XEN_HVM_INIT_RESUME);
735 
736 	/* Register vcpu_info area for CPU#0. */
737 	xen_hvm_cpu_init();
738 }
739 
740 static void
741 xen_hvm_sysinit(void *arg __unused)
742 {
743 	xen_hvm_init(XEN_HVM_INIT_COLD);
744 }
745 
746 static void
747 xen_set_vcpu_id(void)
748 {
749 	struct pcpu *pc;
750 	int i;
751 
752 	/* Set vcpu_id to acpi_id */
753 	CPU_FOREACH(i) {
754 		pc = pcpu_find(i);
755 		pc->pc_vcpu_id = pc->pc_acpi_id;
756 		if (bootverbose)
757 			printf("XEN: CPU %u has VCPU ID %u\n",
758 			       i, pc->pc_vcpu_id);
759 	}
760 }
761 
762 static void
763 xen_hvm_cpu_init(void)
764 {
765 	struct vcpu_register_vcpu_info info;
766 	struct vcpu_info *vcpu_info;
767 	int cpu, rc;
768 
769 	if (!xen_domain())
770 		return;
771 
772 	if (DPCPU_GET(vcpu_info) != NULL) {
773 		/*
774 		 * vcpu_info is already set.  We're resuming
775 		 * from a failed migration and our pre-suspend
776 		 * configuration is still valid.
777 		 */
778 		return;
779 	}
780 
781 	vcpu_info = DPCPU_PTR(vcpu_local_info);
782 	cpu = PCPU_GET(vcpu_id);
783 	info.mfn = vtophys(vcpu_info) >> PAGE_SHIFT;
784 	info.offset = vtophys(vcpu_info) - trunc_page(vtophys(vcpu_info));
785 
786 	rc = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
787 	if (rc != 0)
788 		DPCPU_SET(vcpu_info, &HYPERVISOR_shared_info->vcpu_info[cpu]);
789 	else
790 		DPCPU_SET(vcpu_info, vcpu_info);
791 }
792 
793 SYSINIT(xen_hvm_init, SI_SUB_HYPERVISOR, SI_ORDER_FIRST, xen_hvm_sysinit, NULL);
794 #ifdef SMP
795 SYSINIT(xen_setup_cpus, SI_SUB_SMP, SI_ORDER_FIRST, xen_setup_cpus, NULL);
796 #endif
797 SYSINIT(xen_hvm_cpu_init, SI_SUB_INTR, SI_ORDER_FIRST, xen_hvm_cpu_init, NULL);
798 SYSINIT(xen_set_vcpu_id, SI_SUB_CPU, SI_ORDER_ANY, xen_set_vcpu_id, NULL);
799