xref: /freebsd/sys/x86/xen/xen_arch_intr.c (revision 783d3ff6d7fae619db8a7990b8a6387de0c677b5)
1 /*-
2  * SPDX-License-Identifier: MIT OR GPL-2.0-only
3  *
4  * Copyright © 2015 Julien Grall
5  * Copyright © 2013 Spectra Logic Corporation
6  * Copyright © 2018 John Baldwin/The FreeBSD Foundation
7  * Copyright © 2019 Roger Pau Monné/Citrix Systems R&D
8  * Copyright © 2021 Elliott Mitchell
9  *
10  * This file may be distributed separately from the Linux kernel, or
11  * incorporated into other software packages, subject to the following license:
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a copy
14  * of this source file (the "Software"), to deal in the Software without
15  * restriction, including without limitation the rights to use, copy, modify,
16  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17  * and to permit persons to whom the Software is furnished to do so, subject to
18  * the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29  * IN THE SOFTWARE.
30  */
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/bus.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
37 #include <sys/limits.h>
38 #include <sys/lock.h>
39 #include <sys/mutex.h>
40 #include <sys/interrupt.h>
41 #include <sys/pcpu.h>
42 #include <sys/proc.h>
43 #include <sys/smp.h>
44 #include <sys/stddef.h>
45 
46 #include <xen/xen-os.h>
47 #include <xen/xen_intr.h>
48 #include <machine/xen/arch-intr.h>
49 
50 #include <x86/apicvar.h>
51 
52 /************************ Xen x86 interrupt interface ************************/
53 
54 /*
55  * Pointers to the interrupt counters
56  */
57 DPCPU_DEFINE_STATIC(u_long *, pintrcnt);
58 
59 static void
60 xen_intrcnt_init(void *dummy __unused)
61 {
62 	unsigned int i;
63 
64 	if (!xen_domain())
65 		return;
66 
67 	CPU_FOREACH(i) {
68 		char buf[MAXCOMLEN + 1];
69 
70 		snprintf(buf, sizeof(buf), "cpu%d:xen", i);
71 		intrcnt_add(buf, DPCPU_ID_PTR(i, pintrcnt));
72 	}
73 }
74 SYSINIT(xen_intrcnt_init, SI_SUB_INTR, SI_ORDER_MIDDLE, xen_intrcnt_init, NULL);
75 
76 /*
77  * Transition from assembly language, called from
78  * sys/{amd64/amd64|i386/i386}/apic_vector.S
79  */
80 extern void xen_arch_intr_handle_upcall(struct trapframe *);
81 void
82 xen_arch_intr_handle_upcall(struct trapframe *trap_frame)
83 {
84 	struct trapframe *old;
85 
86 	/*
87 	 * Disable preemption in order to always check and fire events
88 	 * on the right vCPU
89 	 */
90 	critical_enter();
91 
92 	++*DPCPU_GET(pintrcnt);
93 
94 	++curthread->td_intr_nesting_level;
95 	old = curthread->td_intr_frame;
96 	curthread->td_intr_frame = trap_frame;
97 
98 	xen_intr_handle_upcall(NULL);
99 
100 	curthread->td_intr_frame = old;
101 	--curthread->td_intr_nesting_level;
102 
103 	if (xen_evtchn_needs_ack)
104 		lapic_eoi();
105 
106 	critical_exit();
107 }
108 
109 /******************************** EVTCHN PIC *********************************/
110 
111 static MALLOC_DEFINE(M_XENINTR, "xen_intr", "Xen Interrupt Services");
112 
113 /*
114  * Lock for x86-related structures.  Notably modifying
115  * xen_intr_auto_vector_count, and allocating interrupts require this lock be
116  * held.
117  */
118 static struct mtx	xen_intr_x86_lock;
119 
120 static u_int		first_evtchn_irq;
121 
122 static u_int		xen_intr_auto_vector_count;
123 
124 /*
125  * list of released isrcs
126  * This is meant to overlay struct xenisrc, with only the xen_arch_isrc_t
127  * portion being preserved, everything else can be wiped.
128  */
129 struct avail_list {
130 	xen_arch_isrc_t preserve;
131 	SLIST_ENTRY(avail_list) free;
132 };
133 static SLIST_HEAD(free, avail_list) avail_list =
134     SLIST_HEAD_INITIALIZER(avail_list);
135 
136 void
137 xen_intr_alloc_irqs(void)
138 {
139 
140 	if (num_io_irqs > UINT_MAX - NR_EVENT_CHANNELS)
141 		panic("IRQ allocation overflow (num_msi_irqs too high?)");
142 	first_evtchn_irq = num_io_irqs;
143 	num_io_irqs += NR_EVENT_CHANNELS;
144 }
145 
146 static void
147 xen_intr_pic_enable_source(struct intsrc *isrc)
148 {
149 
150 	_Static_assert(offsetof(struct xenisrc, xi_arch.intsrc) == 0,
151 	    "xi_arch MUST be at top of xenisrc for x86");
152 	xen_intr_enable_source((struct xenisrc *)isrc);
153 }
154 
155 /*
156  * Perform any necessary end-of-interrupt acknowledgements.
157  *
158  * \param isrc  The interrupt source to EOI.
159  */
160 static void
161 xen_intr_pic_disable_source(struct intsrc *isrc, int eoi)
162 {
163 
164 	_Static_assert(offsetof(struct xenisrc, xi_arch.intsrc) == 0,
165 	    "xi_arch MUST be at top of xenisrc for x86");
166 	xen_intr_disable_source((struct xenisrc *)isrc);
167 }
168 
169 static void
170 xen_intr_pic_eoi_source(struct intsrc *isrc)
171 {
172 
173 	/* Nothing to do on end-of-interrupt */
174 }
175 
176 static void
177 xen_intr_pic_enable_intr(struct intsrc *isrc)
178 {
179 
180 	_Static_assert(offsetof(struct xenisrc, xi_arch.intsrc) == 0,
181 	    "xi_arch MUST be at top of xenisrc for x86");
182 	xen_intr_enable_intr((struct xenisrc *)isrc);
183 }
184 
185 static void
186 xen_intr_pic_disable_intr(struct intsrc *isrc)
187 {
188 
189 	_Static_assert(offsetof(struct xenisrc, xi_arch.intsrc) == 0,
190 	    "xi_arch MUST be at top of xenisrc for x86");
191 	xen_intr_disable_intr((struct xenisrc *)isrc);
192 }
193 
194 /**
195  * Determine the global interrupt vector number for
196  * a Xen interrupt source.
197  *
198  * \param isrc  The interrupt source to query.
199  *
200  * \return  The vector number corresponding to the given interrupt source.
201  */
202 static int
203 xen_intr_pic_vector(struct intsrc *isrc)
204 {
205 
206 	_Static_assert(offsetof(struct xenisrc, xi_arch.intsrc) == 0,
207 	    "xi_arch MUST be at top of xenisrc for x86");
208 
209 	return (((struct xenisrc *)isrc)->xi_arch.vector);
210 }
211 
212 /**
213  * Determine whether or not interrupt events are pending on the
214  * the given interrupt source.
215  *
216  * \param isrc  The interrupt source to query.
217  *
218  * \returns  0 if no events are pending, otherwise non-zero.
219  */
220 static int
221 xen_intr_pic_source_pending(struct intsrc *isrc)
222 {
223 	/*
224 	 * EventChannels are edge triggered and never masked.
225 	 * There can be no pending events.
226 	 */
227 	return (0);
228 }
229 
230 /**
231  * Prepare this PIC for system suspension.
232  */
233 static void
234 xen_intr_pic_suspend(struct pic *pic)
235 {
236 
237 	/* Nothing to do on suspend */
238 }
239 
240 static void
241 xen_intr_pic_resume(struct pic *pic, bool suspend_cancelled)
242 {
243 
244 	if (!suspend_cancelled)
245 		xen_intr_resume();
246 }
247 
248 /**
249  * Perform configuration of an interrupt source.
250  *
251  * \param isrc  The interrupt source to configure.
252  * \param trig  Edge or level.
253  * \param pol   Active high or low.
254  *
255  * \returns  0 if no events are pending, otherwise non-zero.
256  */
257 static int
258 xen_intr_pic_config_intr(struct intsrc *isrc, enum intr_trigger trig,
259     enum intr_polarity pol)
260 {
261 	/* Configuration is only possible via the evtchn apis. */
262 	return (ENODEV);
263 }
264 
265 
266 static int
267 xen_intr_pic_assign_cpu(struct intsrc *isrc, u_int apic_id)
268 {
269 
270 	_Static_assert(offsetof(struct xenisrc, xi_arch.intsrc) == 0,
271 	    "xi_arch MUST be at top of xenisrc for x86");
272 	return (xen_intr_assign_cpu((struct xenisrc *)isrc,
273 	    apic_cpuid(apic_id)));
274 }
275 
276 /**
277  * PIC interface for all event channel port types except physical IRQs.
278  */
279 static struct pic xen_intr_pic = {
280 	.pic_enable_source  = xen_intr_pic_enable_source,
281 	.pic_disable_source = xen_intr_pic_disable_source,
282 	.pic_eoi_source     = xen_intr_pic_eoi_source,
283 	.pic_enable_intr    = xen_intr_pic_enable_intr,
284 	.pic_disable_intr   = xen_intr_pic_disable_intr,
285 	.pic_vector         = xen_intr_pic_vector,
286 	.pic_source_pending = xen_intr_pic_source_pending,
287 	.pic_suspend        = xen_intr_pic_suspend,
288 	.pic_resume         = xen_intr_pic_resume,
289 	.pic_config_intr    = xen_intr_pic_config_intr,
290 	.pic_assign_cpu     = xen_intr_pic_assign_cpu,
291 };
292 
293 /******************************* ARCH wrappers *******************************/
294 
295 void
296 xen_arch_intr_init(void)
297 {
298 	int error;
299 
300 	mtx_init(&xen_intr_x86_lock, "xen-x86-table-lock", NULL, MTX_DEF);
301 
302 	error = intr_register_pic(&xen_intr_pic);
303 	if (error != 0)
304 		panic("%s(): failed registering Xen/x86 PIC, error=%d\n",
305 		    __func__, error);
306 }
307 
308 /**
309  * Allocate a Xen interrupt source object.
310  *
311  * \param type  The type of interrupt source to create.
312  *
313  * \return  A pointer to a newly allocated Xen interrupt source
314  *          object or NULL.
315  */
316 struct xenisrc *
317 xen_arch_intr_alloc(void)
318 {
319 	static int warned;
320 	struct xenisrc *isrc;
321 	unsigned int vector;
322 	int error;
323 
324 	mtx_lock(&xen_intr_x86_lock);
325 	isrc = (struct xenisrc *)SLIST_FIRST(&avail_list);
326 	if (isrc != NULL) {
327 		SLIST_REMOVE_HEAD(&avail_list, free);
328 		mtx_unlock(&xen_intr_x86_lock);
329 
330 		KASSERT(isrc->xi_arch.intsrc.is_pic == &xen_intr_pic,
331 		    ("interrupt not owned by Xen code?"));
332 
333 		KASSERT(isrc->xi_arch.intsrc.is_handlers == 0,
334 		    ("Free evtchn still has handlers"));
335 
336 		return (isrc);
337 	}
338 
339 	if (xen_intr_auto_vector_count >= NR_EVENT_CHANNELS) {
340 		if (!warned) {
341 			warned = 1;
342 			printf("%s: Xen interrupts exhausted.\n", __func__);
343 		}
344 		mtx_unlock(&xen_intr_x86_lock);
345 		return (NULL);
346 	}
347 
348 	vector = first_evtchn_irq + xen_intr_auto_vector_count;
349 	xen_intr_auto_vector_count++;
350 
351 	KASSERT((intr_lookup_source(vector) == NULL),
352 	    ("Trying to use an already allocated vector"));
353 
354 	mtx_unlock(&xen_intr_x86_lock);
355 	isrc = malloc(sizeof(*isrc), M_XENINTR, M_WAITOK | M_ZERO);
356 	isrc->xi_arch.intsrc.is_pic = &xen_intr_pic;
357 	isrc->xi_arch.vector = vector;
358 	error = intr_register_source(&isrc->xi_arch.intsrc);
359 	if (error != 0)
360 		panic("%s(): failed registering interrupt %u, error=%d\n",
361 		    __func__, vector, error);
362 
363 	return (isrc);
364 }
365 
366 void
367 xen_arch_intr_release(struct xenisrc *isrc)
368 {
369 
370 	KASSERT(isrc->xi_arch.intsrc.is_handlers == 0,
371 	    ("Release called, but xenisrc still in use"));
372 
373 	_Static_assert(sizeof(struct xenisrc) >= sizeof(struct avail_list),
374 	    "unused structure MUST be no larger than in-use structure");
375 	_Static_assert(offsetof(struct xenisrc, xi_arch) ==
376 	    offsetof(struct avail_list, preserve),
377 	    "unused structure does not properly overlay in-use structure");
378 
379 	mtx_lock(&xen_intr_x86_lock);
380 	SLIST_INSERT_HEAD(&avail_list, (struct avail_list *)isrc, free);
381 	mtx_unlock(&xen_intr_x86_lock);
382 }
383