xref: /freebsd/sys/x86/xen/hvm.c (revision 97bd480fe38abb5950359b9d864a62037b4ab5f7)
1 /*
2  * Copyright (c) 2008, 2013 Citrix Systems, Inc.
3  * Copyright (c) 2012 Spectra Logic Corporation
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/param.h>
32 #include <sys/bus.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/proc.h>
36 #include <sys/smp.h>
37 #include <sys/systm.h>
38 
39 #include <vm/vm.h>
40 #include <vm/pmap.h>
41 
42 #include <dev/pci/pcivar.h>
43 
44 #include <machine/cpufunc.h>
45 #include <machine/cpu.h>
46 #include <machine/smp.h>
47 
48 #include <x86/apicreg.h>
49 
50 #include <xen/xen-os.h>
51 #include <xen/features.h>
52 #include <xen/gnttab.h>
53 #include <xen/hypervisor.h>
54 #include <xen/hvm.h>
55 #include <xen/xen_intr.h>
56 
57 #include <xen/interface/hvm/params.h>
58 #include <xen/interface/vcpu.h>
59 
60 /*--------------------------- Forward Declarations ---------------------------*/
61 #ifdef SMP
62 static driver_filter_t xen_smp_rendezvous_action;
63 static driver_filter_t xen_invltlb;
64 static driver_filter_t xen_invlpg;
65 static driver_filter_t xen_invlrng;
66 static driver_filter_t xen_invlcache;
67 #ifdef __i386__
68 static driver_filter_t xen_lazypmap;
69 #endif
70 static driver_filter_t xen_ipi_bitmap_handler;
71 static driver_filter_t xen_cpustop_handler;
72 static driver_filter_t xen_cpususpend_handler;
73 static driver_filter_t xen_cpustophard_handler;
74 #endif
75 static void xen_ipi_vectored(u_int vector, int dest);
76 static void xen_hvm_cpu_resume(void);
77 static void xen_hvm_cpu_init(void);
78 
79 /*---------------------------- Extern Declarations ---------------------------*/
80 #ifdef __i386__
81 extern void pmap_lazyfix_action(void);
82 #endif
83 #ifdef __amd64__
84 extern int pmap_pcid_enabled;
85 #endif
86 
87 /* Variables used by mp_machdep to perform the bitmap IPI */
88 extern volatile u_int cpu_ipi_pending[MAXCPU];
89 
90 /*---------------------------------- Macros ----------------------------------*/
91 #define	IPI_TO_IDX(ipi) ((ipi) - APIC_IPI_INTS)
92 
93 /*-------------------------------- Local Types -------------------------------*/
94 enum xen_hvm_init_type {
95 	XEN_HVM_INIT_COLD,
96 	XEN_HVM_INIT_CANCELLED_SUSPEND,
97 	XEN_HVM_INIT_RESUME
98 };
99 
100 struct xen_ipi_handler
101 {
102 	driver_filter_t	*filter;
103 	const char	*description;
104 };
105 
106 /*-------------------------------- Global Data -------------------------------*/
107 enum xen_domain_type xen_domain_type = XEN_NATIVE;
108 
109 struct cpu_ops xen_hvm_cpu_ops = {
110 	.ipi_vectored	= lapic_ipi_vectored,
111 	.cpu_init	= xen_hvm_cpu_init,
112 	.cpu_resume	= xen_hvm_cpu_resume
113 };
114 
115 static MALLOC_DEFINE(M_XENHVM, "xen_hvm", "Xen HVM PV Support");
116 
117 #ifdef SMP
118 static struct xen_ipi_handler xen_ipis[] =
119 {
120 	[IPI_TO_IDX(IPI_RENDEZVOUS)]	= { xen_smp_rendezvous_action,	"r"   },
121 	[IPI_TO_IDX(IPI_INVLTLB)]	= { xen_invltlb,		"itlb"},
122 	[IPI_TO_IDX(IPI_INVLPG)]	= { xen_invlpg,			"ipg" },
123 	[IPI_TO_IDX(IPI_INVLRNG)]	= { xen_invlrng,		"irg" },
124 	[IPI_TO_IDX(IPI_INVLCACHE)]	= { xen_invlcache,		"ic"  },
125 #ifdef __i386__
126 	[IPI_TO_IDX(IPI_LAZYPMAP)]	= { xen_lazypmap,		"lp"  },
127 #endif
128 	[IPI_TO_IDX(IPI_BITMAP_VECTOR)] = { xen_ipi_bitmap_handler,	"b"   },
129 	[IPI_TO_IDX(IPI_STOP)]		= { xen_cpustop_handler,	"st"  },
130 	[IPI_TO_IDX(IPI_SUSPEND)]	= { xen_cpususpend_handler,	"sp"  },
131 	[IPI_TO_IDX(IPI_STOP_HARD)]	= { xen_cpustophard_handler,	"sth" },
132 };
133 #endif
134 
135 /**
136  * If non-zero, the hypervisor has been configured to use a direct
137  * IDT event callback for interrupt injection.
138  */
139 int xen_vector_callback_enabled;
140 
141 /*------------------------------- Per-CPU Data -------------------------------*/
142 DPCPU_DEFINE(struct vcpu_info, vcpu_local_info);
143 DPCPU_DEFINE(struct vcpu_info *, vcpu_info);
144 #ifdef SMP
145 DPCPU_DEFINE(xen_intr_handle_t, ipi_handle[nitems(xen_ipis)]);
146 #endif
147 
148 /*------------------ Hypervisor Access Shared Memory Regions -----------------*/
149 /** Hypercall table accessed via HYPERVISOR_*_op() methods. */
150 extern char *hypercall_page;
151 shared_info_t *HYPERVISOR_shared_info;
152 start_info_t *HYPERVISOR_start_info;
153 
154 #ifdef SMP
155 /*---------------------------- XEN PV IPI Handlers ---------------------------*/
156 /*
157  * This are C clones of the ASM functions found in apic_vector.s
158  */
159 static int
160 xen_ipi_bitmap_handler(void *arg)
161 {
162 	struct trapframe *frame;
163 
164 	frame = arg;
165 	ipi_bitmap_handler(*frame);
166 	return (FILTER_HANDLED);
167 }
168 
169 static int
170 xen_smp_rendezvous_action(void *arg)
171 {
172 #ifdef COUNT_IPIS
173 	(*ipi_rendezvous_counts[PCPU_GET(cpuid)])++;
174 #endif /* COUNT_IPIS */
175 
176 	smp_rendezvous_action();
177 	return (FILTER_HANDLED);
178 }
179 
180 static int
181 xen_invltlb(void *arg)
182 {
183 
184 	invltlb_handler();
185 	return (FILTER_HANDLED);
186 }
187 
188 #ifdef __amd64__
189 static int
190 xen_invltlb_pcid(void *arg)
191 {
192 
193 	invltlb_pcid_handler();
194 	return (FILTER_HANDLED);
195 }
196 #endif
197 
198 static int
199 xen_invlpg(void *arg)
200 {
201 
202 	invlpg_handler();
203 	return (FILTER_HANDLED);
204 }
205 
206 #ifdef __amd64__
207 static int
208 xen_invlpg_pcid(void *arg)
209 {
210 
211 	invlpg_pcid_handler();
212 	return (FILTER_HANDLED);
213 }
214 #endif
215 
216 static int
217 xen_invlrng(void *arg)
218 {
219 
220 	invlrng_handler();
221 	return (FILTER_HANDLED);
222 }
223 
224 static int
225 xen_invlcache(void *arg)
226 {
227 
228 	invlcache_handler();
229 	return (FILTER_HANDLED);
230 }
231 
232 #ifdef __i386__
233 static int
234 xen_lazypmap(void *arg)
235 {
236 
237 	pmap_lazyfix_action();
238 	return (FILTER_HANDLED);
239 }
240 #endif
241 
242 static int
243 xen_cpustop_handler(void *arg)
244 {
245 
246 	cpustop_handler();
247 	return (FILTER_HANDLED);
248 }
249 
250 static int
251 xen_cpususpend_handler(void *arg)
252 {
253 
254 	cpususpend_handler();
255 	return (FILTER_HANDLED);
256 }
257 
258 static int
259 xen_cpustophard_handler(void *arg)
260 {
261 
262 	ipi_nmi_handler();
263 	return (FILTER_HANDLED);
264 }
265 
266 /* Xen PV IPI sender */
267 static void
268 xen_ipi_vectored(u_int vector, int dest)
269 {
270 	xen_intr_handle_t *ipi_handle;
271 	int ipi_idx, to_cpu, self;
272 
273 	ipi_idx = IPI_TO_IDX(vector);
274 	if (ipi_idx > nitems(xen_ipis))
275 		panic("IPI out of range");
276 
277 	switch(dest) {
278 	case APIC_IPI_DEST_SELF:
279 		ipi_handle = DPCPU_GET(ipi_handle);
280 		xen_intr_signal(ipi_handle[ipi_idx]);
281 		break;
282 	case APIC_IPI_DEST_ALL:
283 		CPU_FOREACH(to_cpu) {
284 			ipi_handle = DPCPU_ID_GET(to_cpu, ipi_handle);
285 			xen_intr_signal(ipi_handle[ipi_idx]);
286 		}
287 		break;
288 	case APIC_IPI_DEST_OTHERS:
289 		self = PCPU_GET(cpuid);
290 		CPU_FOREACH(to_cpu) {
291 			if (to_cpu != self) {
292 				ipi_handle = DPCPU_ID_GET(to_cpu, ipi_handle);
293 				xen_intr_signal(ipi_handle[ipi_idx]);
294 			}
295 		}
296 		break;
297 	default:
298 		to_cpu = apic_cpuid(dest);
299 		ipi_handle = DPCPU_ID_GET(to_cpu, ipi_handle);
300 		xen_intr_signal(ipi_handle[ipi_idx]);
301 		break;
302 	}
303 }
304 
305 /* XEN diverged cpu operations */
306 static void
307 xen_hvm_cpu_resume(void)
308 {
309 	u_int cpuid = PCPU_GET(cpuid);
310 
311 	/*
312 	 * Reset pending bitmap IPIs, because Xen doesn't preserve pending
313 	 * event channels on migration.
314 	 */
315 	cpu_ipi_pending[cpuid] = 0;
316 
317 	/* register vcpu_info area */
318 	xen_hvm_cpu_init();
319 }
320 
321 static void
322 xen_cpu_ipi_init(int cpu)
323 {
324 	xen_intr_handle_t *ipi_handle;
325 	const struct xen_ipi_handler *ipi;
326 	device_t dev;
327 	int idx, rc;
328 
329 	ipi_handle = DPCPU_ID_GET(cpu, ipi_handle);
330 	dev = pcpu_find(cpu)->pc_device;
331 	KASSERT((dev != NULL), ("NULL pcpu device_t"));
332 
333 	for (ipi = xen_ipis, idx = 0; idx < nitems(xen_ipis); ipi++, idx++) {
334 
335 		if (ipi->filter == NULL) {
336 			ipi_handle[idx] = NULL;
337 			continue;
338 		}
339 
340 		rc = xen_intr_alloc_and_bind_ipi(dev, cpu, ipi->filter,
341 		    INTR_TYPE_TTY, &ipi_handle[idx]);
342 		if (rc != 0)
343 			panic("Unable to allocate a XEN IPI port");
344 		xen_intr_describe(ipi_handle[idx], "%s", ipi->description);
345 	}
346 }
347 
348 static void
349 xen_setup_cpus(void)
350 {
351 	int i;
352 
353 	if (!xen_vector_callback_enabled)
354 		return;
355 
356 #ifdef __amd64__
357 	if (pmap_pcid_enabled) {
358 		xen_ipis[IPI_TO_IDX(IPI_INVLTLB)].filter = xen_invltlb_pcid;
359 		xen_ipis[IPI_TO_IDX(IPI_INVLPG)].filter = xen_invlpg_pcid;
360 	}
361 #endif
362 	CPU_FOREACH(i)
363 		xen_cpu_ipi_init(i);
364 
365 	/* Set the xen pv ipi ops to replace the native ones */
366 	cpu_ops.ipi_vectored = xen_ipi_vectored;
367 }
368 #endif
369 
370 /*---------------------- XEN Hypervisor Probe and Setup ----------------------*/
371 static uint32_t
372 xen_hvm_cpuid_base(void)
373 {
374 	uint32_t base, regs[4];
375 
376 	for (base = 0x40000000; base < 0x40010000; base += 0x100) {
377 		do_cpuid(base, regs);
378 		if (!memcmp("XenVMMXenVMM", &regs[1], 12)
379 		    && (regs[0] - base) >= 2)
380 			return (base);
381 	}
382 	return (0);
383 }
384 
385 /*
386  * Allocate and fill in the hypcall page.
387  */
388 static int
389 xen_hvm_init_hypercall_stubs(enum xen_hvm_init_type init_type)
390 {
391 	uint32_t base, regs[4];
392 	int i;
393 
394 	if (xen_pv_domain()) {
395 		/* hypercall page is already set in the PV case */
396 		return (0);
397 	}
398 
399 	base = xen_hvm_cpuid_base();
400 	if (base == 0)
401 		return (ENXIO);
402 
403 	if (init_type == XEN_HVM_INIT_COLD) {
404 		do_cpuid(base + 1, regs);
405 		printf("XEN: Hypervisor version %d.%d detected.\n",
406 		    regs[0] >> 16, regs[0] & 0xffff);
407 	}
408 
409 	/*
410 	 * Find the hypercall pages.
411 	 */
412 	do_cpuid(base + 2, regs);
413 
414 	for (i = 0; i < regs[0]; i++)
415 		wrmsr(regs[1], vtophys(&hypercall_page + i * PAGE_SIZE) + i);
416 
417 	return (0);
418 }
419 
420 static void
421 xen_hvm_init_shared_info_page(void)
422 {
423 	struct xen_add_to_physmap xatp;
424 
425 	if (xen_pv_domain()) {
426 		/*
427 		 * Already setup in the PV case, shared_info is passed inside
428 		 * of the start_info struct at start of day.
429 		 */
430 		return;
431 	}
432 
433 	if (HYPERVISOR_shared_info == NULL) {
434 		HYPERVISOR_shared_info = malloc(PAGE_SIZE, M_XENHVM, M_NOWAIT);
435 		if (HYPERVISOR_shared_info == NULL)
436 			panic("Unable to allocate Xen shared info page");
437 	}
438 
439 	xatp.domid = DOMID_SELF;
440 	xatp.idx = 0;
441 	xatp.space = XENMAPSPACE_shared_info;
442 	xatp.gpfn = vtophys(HYPERVISOR_shared_info) >> PAGE_SHIFT;
443 	if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
444 		panic("HYPERVISOR_memory_op failed");
445 }
446 
447 /*
448  * Tell the hypervisor how to contact us for event channel callbacks.
449  */
450 void
451 xen_hvm_set_callback(device_t dev)
452 {
453 	struct xen_hvm_param xhp;
454 	int irq;
455 
456 	if (xen_vector_callback_enabled)
457 		return;
458 
459 	xhp.domid = DOMID_SELF;
460 	xhp.index = HVM_PARAM_CALLBACK_IRQ;
461 	if (xen_feature(XENFEAT_hvm_callback_vector) != 0) {
462 		int error;
463 
464 		xhp.value = HVM_CALLBACK_VECTOR(IDT_EVTCHN);
465 		error = HYPERVISOR_hvm_op(HVMOP_set_param, &xhp);
466 		if (error == 0) {
467 			xen_vector_callback_enabled = 1;
468 			return;
469 		}
470 		printf("Xen HVM callback vector registration failed (%d). "
471 		    "Falling back to emulated device interrupt\n", error);
472 	}
473 	xen_vector_callback_enabled = 0;
474 	if (dev == NULL) {
475 		/*
476 		 * Called from early boot or resume.
477 		 * xenpci will invoke us again later.
478 		 */
479 		return;
480 	}
481 
482 	irq = pci_get_irq(dev);
483 	if (irq < 16) {
484 		xhp.value = HVM_CALLBACK_GSI(irq);
485 	} else {
486 		u_int slot;
487 		u_int pin;
488 
489 		slot = pci_get_slot(dev);
490 		pin = pci_get_intpin(dev) - 1;
491 		xhp.value = HVM_CALLBACK_PCI_INTX(slot, pin);
492 	}
493 
494 	if (HYPERVISOR_hvm_op(HVMOP_set_param, &xhp) != 0)
495 		panic("Can't set evtchn callback");
496 }
497 
498 #define	XEN_MAGIC_IOPORT 0x10
499 enum {
500 	XMI_MAGIC			 = 0x49d2,
501 	XMI_UNPLUG_IDE_DISKS		 = 0x01,
502 	XMI_UNPLUG_NICS			 = 0x02,
503 	XMI_UNPLUG_IDE_EXCEPT_PRI_MASTER = 0x04
504 };
505 
506 static void
507 xen_hvm_disable_emulated_devices(void)
508 {
509 
510 	if (xen_pv_domain()) {
511 		/*
512 		 * No emulated devices in the PV case, so no need to unplug
513 		 * anything.
514 		 */
515 		return;
516 	}
517 
518 	if (inw(XEN_MAGIC_IOPORT) != XMI_MAGIC)
519 		return;
520 
521 	if (bootverbose)
522 		printf("XEN: Disabling emulated block and network devices\n");
523 	outw(XEN_MAGIC_IOPORT, XMI_UNPLUG_IDE_DISKS|XMI_UNPLUG_NICS);
524 }
525 
526 static void
527 xen_hvm_init(enum xen_hvm_init_type init_type)
528 {
529 	int error;
530 	int i;
531 
532 	if (init_type == XEN_HVM_INIT_CANCELLED_SUSPEND)
533 		return;
534 
535 	error = xen_hvm_init_hypercall_stubs(init_type);
536 
537 	switch (init_type) {
538 	case XEN_HVM_INIT_COLD:
539 		if (error != 0)
540 			return;
541 
542 		/*
543 		 * If xen_domain_type is not set at this point
544 		 * it means we are inside a (PV)HVM guest, because
545 		 * for PVH the guest type is set much earlier
546 		 * (see hammer_time_xen).
547 		 */
548 		if (!xen_domain()) {
549 			xen_domain_type = XEN_HVM_DOMAIN;
550 			vm_guest = VM_GUEST_XEN;
551 		}
552 
553 		setup_xen_features();
554 		cpu_ops = xen_hvm_cpu_ops;
555 		break;
556 	case XEN_HVM_INIT_RESUME:
557 		if (error != 0)
558 			panic("Unable to init Xen hypercall stubs on resume");
559 
560 		/* Clear stale vcpu_info. */
561 		CPU_FOREACH(i)
562 			DPCPU_ID_SET(i, vcpu_info, NULL);
563 		break;
564 	default:
565 		panic("Unsupported HVM initialization type");
566 	}
567 
568 	xen_vector_callback_enabled = 0;
569 	xen_hvm_set_callback(NULL);
570 
571 	/*
572 	 * On (PV)HVM domains we need to request the hypervisor to
573 	 * fill the shared info page, for PVH guest the shared_info page
574 	 * is passed inside the start_info struct and is already set, so this
575 	 * functions are no-ops.
576 	 */
577 	xen_hvm_init_shared_info_page();
578 	xen_hvm_disable_emulated_devices();
579 }
580 
581 void
582 xen_hvm_suspend(void)
583 {
584 }
585 
586 void
587 xen_hvm_resume(bool suspend_cancelled)
588 {
589 
590 	xen_hvm_init(suspend_cancelled ?
591 	    XEN_HVM_INIT_CANCELLED_SUSPEND : XEN_HVM_INIT_RESUME);
592 
593 	/* Register vcpu_info area for CPU#0. */
594 	xen_hvm_cpu_init();
595 }
596 
597 static void
598 xen_hvm_sysinit(void *arg __unused)
599 {
600 	xen_hvm_init(XEN_HVM_INIT_COLD);
601 }
602 
603 static void
604 xen_set_vcpu_id(void)
605 {
606 	struct pcpu *pc;
607 	int i;
608 
609 	if (!xen_hvm_domain())
610 		return;
611 
612 	/* Set vcpu_id to acpi_id */
613 	CPU_FOREACH(i) {
614 		pc = pcpu_find(i);
615 		pc->pc_vcpu_id = pc->pc_acpi_id;
616 		if (bootverbose)
617 			printf("XEN: CPU %u has VCPU ID %u\n",
618 			       i, pc->pc_vcpu_id);
619 	}
620 }
621 
622 static void
623 xen_hvm_cpu_init(void)
624 {
625 	struct vcpu_register_vcpu_info info;
626 	struct vcpu_info *vcpu_info;
627 	int cpu, rc;
628 
629 	if (!xen_domain())
630 		return;
631 
632 	if (DPCPU_GET(vcpu_info) != NULL) {
633 		/*
634 		 * vcpu_info is already set.  We're resuming
635 		 * from a failed migration and our pre-suspend
636 		 * configuration is still valid.
637 		 */
638 		return;
639 	}
640 
641 	vcpu_info = DPCPU_PTR(vcpu_local_info);
642 	cpu = PCPU_GET(vcpu_id);
643 	info.mfn = vtophys(vcpu_info) >> PAGE_SHIFT;
644 	info.offset = vtophys(vcpu_info) - trunc_page(vtophys(vcpu_info));
645 
646 	rc = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
647 	if (rc != 0)
648 		DPCPU_SET(vcpu_info, &HYPERVISOR_shared_info->vcpu_info[cpu]);
649 	else
650 		DPCPU_SET(vcpu_info, vcpu_info);
651 }
652 
653 SYSINIT(xen_hvm_init, SI_SUB_HYPERVISOR, SI_ORDER_FIRST, xen_hvm_sysinit, NULL);
654 #ifdef SMP
655 /* We need to setup IPIs before APs are started */
656 SYSINIT(xen_setup_cpus, SI_SUB_SMP-1, SI_ORDER_FIRST, xen_setup_cpus, NULL);
657 #endif
658 SYSINIT(xen_hvm_cpu_init, SI_SUB_INTR, SI_ORDER_FIRST, xen_hvm_cpu_init, NULL);
659 SYSINIT(xen_set_vcpu_id, SI_SUB_CPU, SI_ORDER_ANY, xen_set_vcpu_id, NULL);
660