xref: /freebsd/sys/x86/xen/xen_apic.c (revision f0cf86c075cda435c1481fe59440d6c4bbf855dd)
1 /*
2  * Copyright (c) 2014 Roger Pau Monné <roger.pau@citrix.com>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/param.h>
28 #include <sys/bus.h>
29 #include <sys/kernel.h>
30 #include <sys/malloc.h>
31 #include <sys/proc.h>
32 #include <sys/smp.h>
33 #include <sys/systm.h>
34 
35 #include <vm/vm.h>
36 #include <vm/pmap.h>
37 
38 #include <machine/cpufunc.h>
39 #include <machine/cpu.h>
40 #include <machine/intr_machdep.h>
41 #include <machine/md_var.h>
42 #include <machine/smp.h>
43 
44 #include <x86/apicreg.h>
45 #include <x86/apicvar.h>
46 
47 #include <xen/xen-os.h>
48 #include <xen/features.h>
49 #include <xen/gnttab.h>
50 #include <xen/hypervisor.h>
51 #include <xen/hvm.h>
52 #include <xen/xen_intr.h>
53 
54 #include <contrib/xen/arch-x86/cpuid.h>
55 #include <contrib/xen/vcpu.h>
56 
57 /*--------------------------- Forward Declarations ---------------------------*/
58 static driver_filter_t xen_smp_rendezvous_action;
59 #ifdef __amd64__
60 static driver_filter_t xen_invlop;
61 #else
62 static driver_filter_t xen_invltlb;
63 static driver_filter_t xen_invlpg;
64 static driver_filter_t xen_invlrng;
65 static driver_filter_t xen_invlcache;
66 #endif
67 static driver_filter_t xen_ipi_bitmap_handler;
68 static driver_filter_t xen_cpustop_handler;
69 static driver_filter_t xen_cpususpend_handler;
70 static driver_filter_t xen_ipi_swi_handler;
71 
72 /*---------------------------------- Macros ----------------------------------*/
73 #define	IPI_TO_IDX(ipi) ((ipi) - APIC_IPI_INTS)
74 
75 /*--------------------------------- Xen IPIs ---------------------------------*/
76 struct xen_ipi_handler
77 {
78 	driver_filter_t	*filter;
79 	const char	*description;
80 };
81 
82 static struct xen_ipi_handler xen_ipis[] =
83 {
84 	[IPI_TO_IDX(IPI_RENDEZVOUS)]	= { xen_smp_rendezvous_action,	"r"   },
85 #ifdef __amd64__
86 	[IPI_TO_IDX(IPI_INVLOP)]	= { xen_invlop,			"itlb"},
87 #else
88 	[IPI_TO_IDX(IPI_INVLTLB)]	= { xen_invltlb,		"itlb"},
89 	[IPI_TO_IDX(IPI_INVLPG)]	= { xen_invlpg,			"ipg" },
90 	[IPI_TO_IDX(IPI_INVLRNG)]	= { xen_invlrng,		"irg" },
91 	[IPI_TO_IDX(IPI_INVLCACHE)]	= { xen_invlcache,		"ic"  },
92 #endif
93 	[IPI_TO_IDX(IPI_BITMAP_VECTOR)] = { xen_ipi_bitmap_handler,	"b"   },
94 	[IPI_TO_IDX(IPI_STOP)]		= { xen_cpustop_handler,	"st"  },
95 	[IPI_TO_IDX(IPI_SUSPEND)]	= { xen_cpususpend_handler,	"sp"  },
96 	[IPI_TO_IDX(IPI_SWI)]		= { xen_ipi_swi_handler,	"sw"  },
97 };
98 
99 /*
100  * Save previous (native) handler as a fallback. Xen < 4.7 doesn't support
101  * VCPUOP_send_nmi for HVM guests, and thus we need a fallback in that case:
102  *
103  * https://lists.freebsd.org/archives/freebsd-xen/2022-January/000032.html
104  */
105 void (*native_ipi_vectored)(u_int, int);
106 
107 /*------------------------------- Per-CPU Data -------------------------------*/
108 DPCPU_DEFINE(xen_intr_handle_t, ipi_handle[nitems(xen_ipis)]);
109 
110 /*------------------------------- Xen PV APIC --------------------------------*/
111 
112 #define PCPU_ID_GET(id, field) (pcpu_find(id)->pc_##field)
113 static int
send_nmi(int dest)114 send_nmi(int dest)
115 {
116 	unsigned int cpu;
117 	int rc = 0;
118 
119 	/*
120 	 * NMIs are not routed over event channels, and instead delivered as on
121 	 * native using the exception vector (#2). Triggering them can be done
122 	 * using the local APIC, or an hypercall as a shortcut like it's done
123 	 * below.
124 	 */
125 	switch(dest) {
126 	case APIC_IPI_DEST_SELF:
127 		rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, PCPU_GET(vcpu_id), NULL);
128 		break;
129 	case APIC_IPI_DEST_ALL:
130 		CPU_FOREACH(cpu) {
131 			rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi,
132 			    PCPU_ID_GET(cpu, vcpu_id), NULL);
133 			if (rc != 0)
134 				break;
135 		}
136 		break;
137 	case APIC_IPI_DEST_OTHERS:
138 		CPU_FOREACH(cpu) {
139 			if (cpu != PCPU_GET(cpuid)) {
140 				rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi,
141 				    PCPU_ID_GET(cpu, vcpu_id), NULL);
142 				if (rc != 0)
143 					break;
144 			}
145 		}
146 		break;
147 	default:
148 		rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi,
149 		    PCPU_ID_GET(apic_cpuid(dest), vcpu_id), NULL);
150 		break;
151 	}
152 
153 	return rc;
154 }
155 #undef PCPU_ID_GET
156 
157 static void
xen_pv_lapic_ipi_vectored(u_int vector,int dest)158 xen_pv_lapic_ipi_vectored(u_int vector, int dest)
159 {
160 	xen_intr_handle_t *ipi_handle;
161 	int ipi_idx, to_cpu, self;
162 	static bool pvnmi = true;
163 
164 	if (vector >= IPI_NMI_FIRST) {
165 		if (pvnmi) {
166 			int rc = send_nmi(dest);
167 
168 			if (rc != 0) {
169 				printf(
170     "Sending NMI using hypercall failed (%d) switching to APIC\n", rc);
171 				pvnmi = false;
172 				native_ipi_vectored(vector, dest);
173 			}
174 		} else
175 			native_ipi_vectored(vector, dest);
176 
177 		return;
178 	}
179 
180 	ipi_idx = IPI_TO_IDX(vector);
181 	if (ipi_idx >= nitems(xen_ipis))
182 		panic("IPI out of range");
183 
184 	switch(dest) {
185 	case APIC_IPI_DEST_SELF:
186 		ipi_handle = DPCPU_GET(ipi_handle);
187 		xen_intr_signal(ipi_handle[ipi_idx]);
188 		break;
189 	case APIC_IPI_DEST_ALL:
190 		CPU_FOREACH(to_cpu) {
191 			ipi_handle = DPCPU_ID_GET(to_cpu, ipi_handle);
192 			xen_intr_signal(ipi_handle[ipi_idx]);
193 		}
194 		break;
195 	case APIC_IPI_DEST_OTHERS:
196 		self = PCPU_GET(cpuid);
197 		CPU_FOREACH(to_cpu) {
198 			if (to_cpu != self) {
199 				ipi_handle = DPCPU_ID_GET(to_cpu, ipi_handle);
200 				xen_intr_signal(ipi_handle[ipi_idx]);
201 			}
202 		}
203 		break;
204 	default:
205 		to_cpu = apic_cpuid(dest);
206 		ipi_handle = DPCPU_ID_GET(to_cpu, ipi_handle);
207 		xen_intr_signal(ipi_handle[ipi_idx]);
208 		break;
209 	}
210 }
211 
212 /*---------------------------- XEN PV IPI Handlers ---------------------------*/
213 /*
214  * These are C clones of the ASM functions found in apic_vector.
215  */
216 static int
xen_ipi_bitmap_handler(void * arg)217 xen_ipi_bitmap_handler(void *arg)
218 {
219 
220 	ipi_bitmap_handler(*curthread->td_intr_frame);
221 	return (FILTER_HANDLED);
222 }
223 
224 static int
xen_smp_rendezvous_action(void * arg)225 xen_smp_rendezvous_action(void *arg)
226 {
227 #ifdef COUNT_IPIS
228 	(*ipi_rendezvous_counts[PCPU_GET(cpuid)])++;
229 #endif /* COUNT_IPIS */
230 
231 	smp_rendezvous_action();
232 	return (FILTER_HANDLED);
233 }
234 
235 #ifdef __amd64__
236 static int
xen_invlop(void * arg)237 xen_invlop(void *arg)
238 {
239 
240 	invlop_handler();
241 	return (FILTER_HANDLED);
242 }
243 
244 #else /* __i386__ */
245 
246 static int
xen_invltlb(void * arg)247 xen_invltlb(void *arg)
248 {
249 
250 	invltlb_handler();
251 	return (FILTER_HANDLED);
252 }
253 
254 static int
xen_invlpg(void * arg)255 xen_invlpg(void *arg)
256 {
257 
258 	invlpg_handler();
259 	return (FILTER_HANDLED);
260 }
261 
262 static int
xen_invlrng(void * arg)263 xen_invlrng(void *arg)
264 {
265 
266 	invlrng_handler();
267 	return (FILTER_HANDLED);
268 }
269 
270 static int
xen_invlcache(void * arg)271 xen_invlcache(void *arg)
272 {
273 
274 	invlcache_handler();
275 	return (FILTER_HANDLED);
276 }
277 #endif /* __amd64__ */
278 
279 static int
xen_cpustop_handler(void * arg)280 xen_cpustop_handler(void *arg)
281 {
282 
283 	cpustop_handler();
284 	return (FILTER_HANDLED);
285 }
286 
287 static int
xen_cpususpend_handler(void * arg)288 xen_cpususpend_handler(void *arg)
289 {
290 
291 	cpususpend_handler();
292 	return (FILTER_HANDLED);
293 }
294 
295 static int
xen_ipi_swi_handler(void * arg)296 xen_ipi_swi_handler(void *arg)
297 {
298 
299 	ipi_swi_handler(*curthread->td_intr_frame);
300 	return (FILTER_HANDLED);
301 }
302 
303 /*----------------------------- XEN PV IPI setup -----------------------------*/
304 /*
305  * Those functions are provided outside of the Xen PV APIC implementation
306  * so PVHVM guests can also use PV IPIs without having an actual Xen PV APIC,
307  * because on PVHVM there's an emulated LAPIC provided by Xen.
308  */
309 static void
xen_cpu_ipi_init(int cpu)310 xen_cpu_ipi_init(int cpu)
311 {
312 	xen_intr_handle_t *ipi_handle;
313 	const struct xen_ipi_handler *ipi;
314 	int idx, rc;
315 
316 	ipi_handle = DPCPU_ID_GET(cpu, ipi_handle);
317 
318 	for (ipi = xen_ipis, idx = 0; idx < nitems(xen_ipis); ipi++, idx++) {
319 		if (ipi->filter == NULL) {
320 			ipi_handle[idx] = NULL;
321 			continue;
322 		}
323 
324 		rc = xen_intr_alloc_and_bind_ipi(cpu, ipi->filter,
325 		    INTR_TYPE_TTY, &ipi_handle[idx]);
326 		if (rc != 0)
327 			panic("Unable to allocate a XEN IPI port");
328 		xen_intr_describe(ipi_handle[idx], "%s", ipi->description);
329 	}
330 }
331 
332 static void
xen_setup_cpus(void)333 xen_setup_cpus(void)
334 {
335 	uint32_t regs[4];
336 	int i;
337 
338 	if (!xen_vector_callback_enabled)
339 		return;
340 
341 	/*
342 	 * Check whether the APIC virtualization is hardware assisted, as
343 	 * that's faster than using event channels because it avoids the VM
344 	 * exit.
345 	 */
346 	KASSERT(hv_base != 0, ("Invalid base Xen CPUID leaf"));
347 	cpuid_count(hv_base + 4, 0, regs);
348 	if ((x2apic_mode && (regs[0] & XEN_HVM_CPUID_X2APIC_VIRT)) ||
349 	    (!x2apic_mode && (regs[0] & XEN_HVM_CPUID_APIC_ACCESS_VIRT)))
350 		return;
351 
352 	CPU_FOREACH(i)
353 		xen_cpu_ipi_init(i);
354 
355 	/* Set the xen pv ipi ops to replace the native ones */
356 	ipi_vectored = xen_pv_lapic_ipi_vectored;
357 	native_ipi_vectored = ipi_vectored;
358 }
359 
360 /* Switch to using PV IPIs as soon as the vcpu_id is set. */
361 SYSINIT(xen_setup_cpus, SI_SUB_SMP, SI_ORDER_SECOND, xen_setup_cpus, NULL);
362