xref: /freebsd/sys/dev/xen/bus/xen_intr.c (revision 5e2183dab87140a0107f0d8c761d6c01ba9e25cd)
1*5e2183daSJulien Grall /******************************************************************************
2*5e2183daSJulien Grall  * xen_intr.c
3*5e2183daSJulien Grall  *
4*5e2183daSJulien Grall  * Xen event and interrupt services for x86 HVM guests.
5*5e2183daSJulien Grall  *
6*5e2183daSJulien Grall  * Copyright (c) 2002-2005, K A Fraser
7*5e2183daSJulien Grall  * Copyright (c) 2005, Intel Corporation <xiaofeng.ling@intel.com>
8*5e2183daSJulien Grall  * Copyright (c) 2012, Spectra Logic Corporation
9*5e2183daSJulien Grall  * Copyright © 2021-2023, Elliott Mitchell
10*5e2183daSJulien Grall  *
11*5e2183daSJulien Grall  * This file may be distributed separately from the Linux kernel, or
12*5e2183daSJulien Grall  * incorporated into other software packages, subject to the following license:
13*5e2183daSJulien Grall  *
14*5e2183daSJulien Grall  * Permission is hereby granted, free of charge, to any person obtaining a copy
15*5e2183daSJulien Grall  * of this source file (the "Software"), to deal in the Software without
16*5e2183daSJulien Grall  * restriction, including without limitation the rights to use, copy, modify,
17*5e2183daSJulien Grall  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18*5e2183daSJulien Grall  * and to permit persons to whom the Software is furnished to do so, subject to
19*5e2183daSJulien Grall  * the following conditions:
20*5e2183daSJulien Grall  *
21*5e2183daSJulien Grall  * The above copyright notice and this permission notice shall be included in
22*5e2183daSJulien Grall  * all copies or substantial portions of the Software.
23*5e2183daSJulien Grall  *
24*5e2183daSJulien Grall  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25*5e2183daSJulien Grall  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26*5e2183daSJulien Grall  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27*5e2183daSJulien Grall  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28*5e2183daSJulien Grall  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29*5e2183daSJulien Grall  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30*5e2183daSJulien Grall  * IN THE SOFTWARE.
31*5e2183daSJulien Grall  */
32*5e2183daSJulien Grall 
33*5e2183daSJulien Grall #include <sys/cdefs.h>
34*5e2183daSJulien Grall __FBSDID("$FreeBSD$");
35*5e2183daSJulien Grall 
36*5e2183daSJulien Grall #include "opt_ddb.h"
37*5e2183daSJulien Grall 
38*5e2183daSJulien Grall #include <sys/param.h>
39*5e2183daSJulien Grall #include <sys/systm.h>
40*5e2183daSJulien Grall #include <sys/bus.h>
41*5e2183daSJulien Grall #include <sys/kernel.h>
42*5e2183daSJulien Grall #include <sys/limits.h>
43*5e2183daSJulien Grall #include <sys/lock.h>
44*5e2183daSJulien Grall #include <sys/mutex.h>
45*5e2183daSJulien Grall #include <sys/interrupt.h>
46*5e2183daSJulien Grall #include <sys/pcpu.h>
47*5e2183daSJulien Grall #include <sys/proc.h>
48*5e2183daSJulien Grall #include <sys/smp.h>
49*5e2183daSJulien Grall #include <sys/refcount.h>
50*5e2183daSJulien Grall 
51*5e2183daSJulien Grall #include <vm/vm.h>
52*5e2183daSJulien Grall #include <vm/pmap.h>
53*5e2183daSJulien Grall 
54*5e2183daSJulien Grall #include <machine/smp.h>
55*5e2183daSJulien Grall #include <machine/stdarg.h>
56*5e2183daSJulien Grall 
57*5e2183daSJulien Grall #include <xen/xen-os.h>
58*5e2183daSJulien Grall #include <xen/hypervisor.h>
59*5e2183daSJulien Grall #include <xen/xen_intr.h>
60*5e2183daSJulien Grall #include <xen/evtchn/evtchnvar.h>
61*5e2183daSJulien Grall 
62*5e2183daSJulien Grall #include <dev/xen/xenpci/xenpcivar.h>
63*5e2183daSJulien Grall #include <dev/pci/pcivar.h>
64*5e2183daSJulien Grall #include <machine/xen/arch-intr.h>
65*5e2183daSJulien Grall 
66*5e2183daSJulien Grall #ifdef DDB
67*5e2183daSJulien Grall #include <ddb/ddb.h>
68*5e2183daSJulien Grall #endif
69*5e2183daSJulien Grall 
70*5e2183daSJulien Grall /**
71*5e2183daSJulien Grall  * Per-cpu event channel processing state.
72*5e2183daSJulien Grall  */
73*5e2183daSJulien Grall struct xen_intr_pcpu_data {
74*5e2183daSJulien Grall 	/**
75*5e2183daSJulien Grall 	 * The last event channel bitmap section (level one bit) processed.
76*5e2183daSJulien Grall 	 * This is used to ensure we scan all ports before
77*5e2183daSJulien Grall 	 * servicing an already servied port again.
78*5e2183daSJulien Grall 	 */
79*5e2183daSJulien Grall 	u_int	last_processed_l1i;
80*5e2183daSJulien Grall 
81*5e2183daSJulien Grall 	/**
82*5e2183daSJulien Grall 	 * The last event channel processed within the event channel
83*5e2183daSJulien Grall 	 * bitmap being scanned.
84*5e2183daSJulien Grall 	 */
85*5e2183daSJulien Grall 	u_int	last_processed_l2i;
86*5e2183daSJulien Grall 
87*5e2183daSJulien Grall 	/**
88*5e2183daSJulien Grall 	 * A bitmap of ports that can be serviced from this CPU.
89*5e2183daSJulien Grall 	 * A set bit means interrupt handling is enabled.
90*5e2183daSJulien Grall 	 */
91*5e2183daSJulien Grall 	u_long	evtchn_enabled[sizeof(u_long) * 8];
92*5e2183daSJulien Grall };
93*5e2183daSJulien Grall 
94*5e2183daSJulien Grall /*
95*5e2183daSJulien Grall  * Start the scan at port 0 by initializing the last scanned
96*5e2183daSJulien Grall  * location as the highest numbered event channel port.
97*5e2183daSJulien Grall  */
98*5e2183daSJulien Grall DPCPU_DEFINE_STATIC(struct xen_intr_pcpu_data, xen_intr_pcpu) = {
99*5e2183daSJulien Grall 	.last_processed_l1i = LONG_BIT - 1,
100*5e2183daSJulien Grall 	.last_processed_l2i = LONG_BIT - 1
101*5e2183daSJulien Grall };
102*5e2183daSJulien Grall 
103*5e2183daSJulien Grall DPCPU_DECLARE(struct vcpu_info *, vcpu_info);
104*5e2183daSJulien Grall 
105*5e2183daSJulien Grall #define	INVALID_EVTCHN		(~(evtchn_port_t)0) /* Invalid event channel */
106*5e2183daSJulien Grall #define	is_valid_evtchn(x)	((uintmax_t)(x) < NR_EVENT_CHANNELS)
107*5e2183daSJulien Grall 
108*5e2183daSJulien Grall /*
109*5e2183daSJulien Grall  * Lock for interrupt core data.
110*5e2183daSJulien Grall  *
111*5e2183daSJulien Grall  * Modifying xen_intr_port_to_isrc[], or isrc->xi_port (implies the former)
112*5e2183daSJulien Grall  * requires this lock be held.  Any time this lock is not held, the condition
113*5e2183daSJulien Grall  * `!xen_intr_port_to_isrc[i] || (xen_intr_port_to_isrc[i]->ix_port == i)`
114*5e2183daSJulien Grall  * MUST be true for all values of i which are valid indicies of the array.
115*5e2183daSJulien Grall  *
116*5e2183daSJulien Grall  * Acquire/release operations for isrc->xi_refcount require this lock be held.
117*5e2183daSJulien Grall  */
118*5e2183daSJulien Grall static struct mtx	 xen_intr_isrc_lock;
119*5e2183daSJulien Grall static struct xenisrc	*xen_intr_port_to_isrc[NR_EVENT_CHANNELS];
120*5e2183daSJulien Grall 
121*5e2183daSJulien Grall /*------------------------- Private Functions --------------------------------*/
122*5e2183daSJulien Grall 
123*5e2183daSJulien Grall /**
124*5e2183daSJulien Grall  * Retrieve a handle for a Xen interrupt source.
125*5e2183daSJulien Grall  *
126*5e2183daSJulien Grall  * \param isrc  A valid Xen interrupt source structure.
127*5e2183daSJulien Grall  *
128*5e2183daSJulien Grall  * \returns  A handle suitable for use with xen_intr_isrc_from_handle()
129*5e2183daSJulien Grall  *           to retrieve the original Xen interrupt source structure.
130*5e2183daSJulien Grall  */
131*5e2183daSJulien Grall 
132*5e2183daSJulien Grall static inline xen_intr_handle_t
133*5e2183daSJulien Grall xen_intr_handle_from_isrc(struct xenisrc *isrc)
134*5e2183daSJulien Grall {
135*5e2183daSJulien Grall 	return (isrc);
136*5e2183daSJulien Grall }
137*5e2183daSJulien Grall 
138*5e2183daSJulien Grall /**
139*5e2183daSJulien Grall  * Lookup a Xen interrupt source object given an interrupt binding handle.
140*5e2183daSJulien Grall  *
141*5e2183daSJulien Grall  * \param handle  A handle initialized by a previous call to
142*5e2183daSJulien Grall  *                xen_intr_bind_isrc().
143*5e2183daSJulien Grall  *
144*5e2183daSJulien Grall  * \returns  A pointer to the Xen interrupt source object associated
145*5e2183daSJulien Grall  *           with the given interrupt handle.  NULL if no association
146*5e2183daSJulien Grall  *           currently exists.
147*5e2183daSJulien Grall  */
148*5e2183daSJulien Grall static inline struct xenisrc *
149*5e2183daSJulien Grall xen_intr_isrc_from_handle(xen_intr_handle_t handle)
150*5e2183daSJulien Grall {
151*5e2183daSJulien Grall 	return ((struct xenisrc *)handle);
152*5e2183daSJulien Grall }
153*5e2183daSJulien Grall 
154*5e2183daSJulien Grall /**
155*5e2183daSJulien Grall  * Disable signal delivery for an event channel port on the
156*5e2183daSJulien Grall  * specified CPU.
157*5e2183daSJulien Grall  *
158*5e2183daSJulien Grall  * \param port  The event channel port to mask.
159*5e2183daSJulien Grall  *
160*5e2183daSJulien Grall  * This API is used to manage the port<=>CPU binding of event
161*5e2183daSJulien Grall  * channel handlers.
162*5e2183daSJulien Grall  *
163*5e2183daSJulien Grall  * \note  This operation does not preclude reception of an event
164*5e2183daSJulien Grall  *        for this event channel on another CPU.  To mask the
165*5e2183daSJulien Grall  *        event channel globally, use evtchn_mask().
166*5e2183daSJulien Grall  */
167*5e2183daSJulien Grall static inline void
168*5e2183daSJulien Grall evtchn_cpu_mask_port(u_int cpu, evtchn_port_t port)
169*5e2183daSJulien Grall {
170*5e2183daSJulien Grall 	struct xen_intr_pcpu_data *pcpu;
171*5e2183daSJulien Grall 
172*5e2183daSJulien Grall 	pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
173*5e2183daSJulien Grall 	xen_clear_bit(port, pcpu->evtchn_enabled);
174*5e2183daSJulien Grall }
175*5e2183daSJulien Grall 
176*5e2183daSJulien Grall /**
177*5e2183daSJulien Grall  * Enable signal delivery for an event channel port on the
178*5e2183daSJulien Grall  * specified CPU.
179*5e2183daSJulien Grall  *
180*5e2183daSJulien Grall  * \param port  The event channel port to unmask.
181*5e2183daSJulien Grall  *
182*5e2183daSJulien Grall  * This API is used to manage the port<=>CPU binding of event
183*5e2183daSJulien Grall  * channel handlers.
184*5e2183daSJulien Grall  *
185*5e2183daSJulien Grall  * \note  This operation does not guarantee that event delivery
186*5e2183daSJulien Grall  *        is enabled for this event channel port.  The port must
187*5e2183daSJulien Grall  *        also be globally enabled.  See evtchn_unmask().
188*5e2183daSJulien Grall  */
189*5e2183daSJulien Grall static inline void
190*5e2183daSJulien Grall evtchn_cpu_unmask_port(u_int cpu, evtchn_port_t port)
191*5e2183daSJulien Grall {
192*5e2183daSJulien Grall 	struct xen_intr_pcpu_data *pcpu;
193*5e2183daSJulien Grall 
194*5e2183daSJulien Grall 	pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
195*5e2183daSJulien Grall 	xen_set_bit(port, pcpu->evtchn_enabled);
196*5e2183daSJulien Grall }
197*5e2183daSJulien Grall 
198*5e2183daSJulien Grall /**
199*5e2183daSJulien Grall  * Attempt to free an active Xen interrupt source object.
200*5e2183daSJulien Grall  *
201*5e2183daSJulien Grall  * \param isrc  The interrupt source object to release.
202*5e2183daSJulien Grall  *
203*5e2183daSJulien Grall  * \returns  EBUSY if the source is still in use, otherwise 0.
204*5e2183daSJulien Grall  */
205*5e2183daSJulien Grall static int
206*5e2183daSJulien Grall xen_intr_release_isrc(struct xenisrc *isrc)
207*5e2183daSJulien Grall {
208*5e2183daSJulien Grall 
209*5e2183daSJulien Grall 	mtx_lock(&xen_intr_isrc_lock);
210*5e2183daSJulien Grall 	if (is_valid_evtchn(isrc->xi_port)) {
211*5e2183daSJulien Grall 		evtchn_mask_port(isrc->xi_port);
212*5e2183daSJulien Grall 		evtchn_clear_port(isrc->xi_port);
213*5e2183daSJulien Grall 
214*5e2183daSJulien Grall 		/* Rebind port to CPU 0. */
215*5e2183daSJulien Grall 		evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
216*5e2183daSJulien Grall 		evtchn_cpu_unmask_port(0, isrc->xi_port);
217*5e2183daSJulien Grall 
218*5e2183daSJulien Grall 		if (isrc->xi_close != 0) {
219*5e2183daSJulien Grall 			struct evtchn_close close = { .port = isrc->xi_port };
220*5e2183daSJulien Grall 
221*5e2183daSJulien Grall 			if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
222*5e2183daSJulien Grall 				panic("EVTCHNOP_close failed");
223*5e2183daSJulien Grall 		}
224*5e2183daSJulien Grall 
225*5e2183daSJulien Grall 		xen_intr_port_to_isrc[isrc->xi_port] = NULL;
226*5e2183daSJulien Grall 	}
227*5e2183daSJulien Grall 	/* not reachable from xen_intr_port_to_isrc[], unlock */
228*5e2183daSJulien Grall 	mtx_unlock(&xen_intr_isrc_lock);
229*5e2183daSJulien Grall 
230*5e2183daSJulien Grall 	xen_arch_intr_release(isrc);
231*5e2183daSJulien Grall 	return (0);
232*5e2183daSJulien Grall }
233*5e2183daSJulien Grall 
234*5e2183daSJulien Grall /**
235*5e2183daSJulien Grall  * Associate an interrupt handler with an already allocated local Xen
236*5e2183daSJulien Grall  * event channel port.
237*5e2183daSJulien Grall  *
238*5e2183daSJulien Grall  * \param isrcp       The returned Xen interrupt object associated with
239*5e2183daSJulien Grall  *                    the specified local port.
240*5e2183daSJulien Grall  * \param local_port  The event channel to bind.
241*5e2183daSJulien Grall  * \param type        The event channel type of local_port.
242*5e2183daSJulien Grall  * \param intr_owner  The device making this bind request.
243*5e2183daSJulien Grall  * \param filter      An interrupt filter handler.  Specify NULL
244*5e2183daSJulien Grall  *                    to always dispatch to the ithread handler.
245*5e2183daSJulien Grall  * \param handler     An interrupt ithread handler.  Optional (can
246*5e2183daSJulien Grall  *                    specify NULL) if all necessary event actions
247*5e2183daSJulien Grall  *                    are performed by filter.
248*5e2183daSJulien Grall  * \param arg         Argument to present to both filter and handler.
249*5e2183daSJulien Grall  * \param irqflags    Interrupt handler flags.  See sys/bus.h.
250*5e2183daSJulien Grall  * \param handlep     Pointer to an opaque handle used to manage this
251*5e2183daSJulien Grall  *                    registration.
252*5e2183daSJulien Grall  *
253*5e2183daSJulien Grall  * \returns  0 on success, otherwise an errno.
254*5e2183daSJulien Grall  */
255*5e2183daSJulien Grall static int
256*5e2183daSJulien Grall xen_intr_bind_isrc(struct xenisrc **isrcp, evtchn_port_t local_port,
257*5e2183daSJulien Grall     enum evtchn_type type, const char *intr_owner, driver_filter_t filter,
258*5e2183daSJulien Grall     driver_intr_t handler, void *arg, enum intr_type flags,
259*5e2183daSJulien Grall     xen_intr_handle_t *const port_handlep)
260*5e2183daSJulien Grall {
261*5e2183daSJulien Grall 	struct xenisrc *isrc;
262*5e2183daSJulien Grall 	int error;
263*5e2183daSJulien Grall 
264*5e2183daSJulien Grall 	*isrcp = NULL;
265*5e2183daSJulien Grall 	if (port_handlep == NULL) {
266*5e2183daSJulien Grall 		printf("%s: %s: Bad event handle\n", intr_owner, __func__);
267*5e2183daSJulien Grall 		return (EINVAL);
268*5e2183daSJulien Grall 	}
269*5e2183daSJulien Grall 	*port_handlep = NULL;
270*5e2183daSJulien Grall 
271*5e2183daSJulien Grall 	isrc = xen_arch_intr_alloc();
272*5e2183daSJulien Grall 	if (isrc == NULL)
273*5e2183daSJulien Grall 		return (ENOSPC);
274*5e2183daSJulien Grall 
275*5e2183daSJulien Grall 	isrc->xi_cookie = NULL;
276*5e2183daSJulien Grall 	isrc->xi_type = type;
277*5e2183daSJulien Grall 	isrc->xi_port = local_port;
278*5e2183daSJulien Grall 	isrc->xi_close = false;
279*5e2183daSJulien Grall 	isrc->xi_cpu = 0;
280*5e2183daSJulien Grall 	refcount_init(&isrc->xi_refcount, 1);
281*5e2183daSJulien Grall 	mtx_lock(&xen_intr_isrc_lock);
282*5e2183daSJulien Grall 	xen_intr_port_to_isrc[isrc->xi_port] = isrc;
283*5e2183daSJulien Grall 	mtx_unlock(&xen_intr_isrc_lock);
284*5e2183daSJulien Grall 
285*5e2183daSJulien Grall #ifdef SMP
286*5e2183daSJulien Grall 	if (type == EVTCHN_TYPE_PORT) {
287*5e2183daSJulien Grall 		/*
288*5e2183daSJulien Grall 		 * By default all interrupts are assigned to vCPU#0
289*5e2183daSJulien Grall 		 * unless specified otherwise, so shuffle them to balance
290*5e2183daSJulien Grall 		 * the interrupt load.
291*5e2183daSJulien Grall 		 */
292*5e2183daSJulien Grall 		xen_intr_assign_cpu(isrc, xen_arch_intr_next_cpu(isrc));
293*5e2183daSJulien Grall 	}
294*5e2183daSJulien Grall #endif
295*5e2183daSJulien Grall 
296*5e2183daSJulien Grall 	/*
297*5e2183daSJulien Grall 	 * If a filter or handler function is provided, add it to the event.
298*5e2183daSJulien Grall 	 * Otherwise the event channel is left masked and without a handler,
299*5e2183daSJulien Grall 	 * the caller is in charge of setting that up.
300*5e2183daSJulien Grall 	 */
301*5e2183daSJulien Grall 	if (filter != NULL || handler != NULL) {
302*5e2183daSJulien Grall 		error = xen_intr_add_handler(intr_owner, filter, handler, arg,
303*5e2183daSJulien Grall 		    flags, xen_intr_handle_from_isrc(isrc));
304*5e2183daSJulien Grall 		if (error != 0) {
305*5e2183daSJulien Grall 			xen_intr_release_isrc(isrc);
306*5e2183daSJulien Grall 			return (error);
307*5e2183daSJulien Grall 		}
308*5e2183daSJulien Grall 	}
309*5e2183daSJulien Grall 
310*5e2183daSJulien Grall 	*isrcp = isrc;
311*5e2183daSJulien Grall 	/* Assign the opaque handler */
312*5e2183daSJulien Grall 	*port_handlep = xen_intr_handle_from_isrc(isrc);
313*5e2183daSJulien Grall 	return (0);
314*5e2183daSJulien Grall }
315*5e2183daSJulien Grall 
316*5e2183daSJulien Grall /**
317*5e2183daSJulien Grall  * Determine the event channel ports at the given section of the
318*5e2183daSJulien Grall  * event port bitmap which have pending events for the given cpu.
319*5e2183daSJulien Grall  *
320*5e2183daSJulien Grall  * \param pcpu  The Xen interrupt pcpu data for the cpu being queried.
321*5e2183daSJulien Grall  * \param sh    The Xen shared info area.
322*5e2183daSJulien Grall  * \param idx   The index of the section of the event channel bitmap to
323*5e2183daSJulien Grall  *              inspect.
324*5e2183daSJulien Grall  *
325*5e2183daSJulien Grall  * \returns  A u_long with bits set for every event channel with pending
326*5e2183daSJulien Grall  *           events.
327*5e2183daSJulien Grall  */
328*5e2183daSJulien Grall static inline u_long
329*5e2183daSJulien Grall xen_intr_active_ports(const struct xen_intr_pcpu_data *const pcpu,
330*5e2183daSJulien Grall     const u_int idx)
331*5e2183daSJulien Grall {
332*5e2183daSJulien Grall 	volatile const shared_info_t *const sh = HYPERVISOR_shared_info;
333*5e2183daSJulien Grall 
334*5e2183daSJulien Grall 	CTASSERT(sizeof(sh->evtchn_mask[0]) == sizeof(sh->evtchn_pending[0]));
335*5e2183daSJulien Grall 	CTASSERT(sizeof(sh->evtchn_mask[0]) == sizeof(pcpu->evtchn_enabled[0]));
336*5e2183daSJulien Grall 	CTASSERT(sizeof(sh->evtchn_mask) == sizeof(sh->evtchn_pending));
337*5e2183daSJulien Grall 	CTASSERT(sizeof(sh->evtchn_mask) == sizeof(pcpu->evtchn_enabled));
338*5e2183daSJulien Grall 	return (sh->evtchn_pending[idx]
339*5e2183daSJulien Grall 	      & ~sh->evtchn_mask[idx]
340*5e2183daSJulien Grall 	      & pcpu->evtchn_enabled[idx]);
341*5e2183daSJulien Grall }
342*5e2183daSJulien Grall 
343*5e2183daSJulien Grall /**
344*5e2183daSJulien Grall  * Interrupt handler for processing all Xen event channel events.
345*5e2183daSJulien Grall  *
346*5e2183daSJulien Grall  * \param trap_frame  The trap frame context for the current interrupt.
347*5e2183daSJulien Grall  */
348*5e2183daSJulien Grall int
349*5e2183daSJulien Grall xen_intr_handle_upcall(void *unused __unused)
350*5e2183daSJulien Grall {
351*5e2183daSJulien Grall 	struct trapframe *trap_frame = curthread->td_intr_frame;
352*5e2183daSJulien Grall 	u_int l1i, l2i, port, cpu __diagused;
353*5e2183daSJulien Grall 	u_long masked_l1, masked_l2;
354*5e2183daSJulien Grall 	struct xenisrc *isrc;
355*5e2183daSJulien Grall 	vcpu_info_t *v;
356*5e2183daSJulien Grall 	struct xen_intr_pcpu_data *pc;
357*5e2183daSJulien Grall 	u_long l1, l2;
358*5e2183daSJulien Grall 
359*5e2183daSJulien Grall 	/* We must remain on the same vCPU during this function */
360*5e2183daSJulien Grall 	CRITICAL_ASSERT(curthread);
361*5e2183daSJulien Grall 
362*5e2183daSJulien Grall 	cpu = PCPU_GET(cpuid);
363*5e2183daSJulien Grall 	pc  = DPCPU_PTR(xen_intr_pcpu);
364*5e2183daSJulien Grall 	v   = DPCPU_GET(vcpu_info);
365*5e2183daSJulien Grall 
366*5e2183daSJulien Grall 	if (!xen_has_percpu_evtchn()) {
367*5e2183daSJulien Grall 		KASSERT((cpu == 0), ("Fired PCI event callback on wrong CPU"));
368*5e2183daSJulien Grall 	}
369*5e2183daSJulien Grall 
370*5e2183daSJulien Grall 	v->evtchn_upcall_pending = 0;
371*5e2183daSJulien Grall /* No need for a barrier on x86 -- XCHG is a barrier on x86. */
372*5e2183daSJulien Grall #if !defined(__amd64__) && !defined(__i386__)
373*5e2183daSJulien Grall 	/* Clear master flag /before/ clearing selector flag. */
374*5e2183daSJulien Grall 	wmb();
375*5e2183daSJulien Grall #endif
376*5e2183daSJulien Grall 	l1 = atomic_readandclear_long(&v->evtchn_pending_sel);
377*5e2183daSJulien Grall 
378*5e2183daSJulien Grall 	l1i = pc->last_processed_l1i;
379*5e2183daSJulien Grall 	l2i = pc->last_processed_l2i;
380*5e2183daSJulien Grall 
381*5e2183daSJulien Grall 	while (l1 != 0) {
382*5e2183daSJulien Grall 		l1i = (l1i + 1) % LONG_BIT;
383*5e2183daSJulien Grall 		masked_l1 = l1 & ((~0UL) << l1i);
384*5e2183daSJulien Grall 
385*5e2183daSJulien Grall 		if (masked_l1 == 0) {
386*5e2183daSJulien Grall 			/*
387*5e2183daSJulien Grall 			 * if we masked out all events, wrap around
388*5e2183daSJulien Grall 			 * to the beginning.
389*5e2183daSJulien Grall 			 */
390*5e2183daSJulien Grall 			l1i = LONG_BIT - 1;
391*5e2183daSJulien Grall 			l2i = LONG_BIT - 1;
392*5e2183daSJulien Grall 			continue;
393*5e2183daSJulien Grall 		}
394*5e2183daSJulien Grall 		l1i = ffsl(masked_l1) - 1;
395*5e2183daSJulien Grall 
396*5e2183daSJulien Grall 		do {
397*5e2183daSJulien Grall 			l2 = xen_intr_active_ports(pc, l1i);
398*5e2183daSJulien Grall 
399*5e2183daSJulien Grall 			l2i = (l2i + 1) % LONG_BIT;
400*5e2183daSJulien Grall 			masked_l2 = l2 & ((~0UL) << l2i);
401*5e2183daSJulien Grall 
402*5e2183daSJulien Grall 			if (masked_l2 == 0) {
403*5e2183daSJulien Grall 				/* if we masked out all events, move on */
404*5e2183daSJulien Grall 				l2i = LONG_BIT - 1;
405*5e2183daSJulien Grall 				break;
406*5e2183daSJulien Grall 			}
407*5e2183daSJulien Grall 			l2i = ffsl(masked_l2) - 1;
408*5e2183daSJulien Grall 
409*5e2183daSJulien Grall 			/* process port */
410*5e2183daSJulien Grall 			port = (l1i * LONG_BIT) + l2i;
411*5e2183daSJulien Grall 			evtchn_clear_port(port);
412*5e2183daSJulien Grall 
413*5e2183daSJulien Grall 			isrc = xen_intr_port_to_isrc[port];
414*5e2183daSJulien Grall 			if (__predict_false(isrc == NULL))
415*5e2183daSJulien Grall 				continue;
416*5e2183daSJulien Grall 
417*5e2183daSJulien Grall 			/* Make sure we are firing on the right vCPU */
418*5e2183daSJulien Grall 			KASSERT((isrc->xi_cpu == PCPU_GET(cpuid)),
419*5e2183daSJulien Grall 				("Received unexpected event on vCPU#%u, event bound to vCPU#%u",
420*5e2183daSJulien Grall 				PCPU_GET(cpuid), isrc->xi_cpu));
421*5e2183daSJulien Grall 
422*5e2183daSJulien Grall 			xen_arch_intr_execute_handlers(isrc, trap_frame);
423*5e2183daSJulien Grall 
424*5e2183daSJulien Grall 			/*
425*5e2183daSJulien Grall 			 * If this is the final port processed,
426*5e2183daSJulien Grall 			 * we'll pick up here+1 next time.
427*5e2183daSJulien Grall 			 */
428*5e2183daSJulien Grall 			pc->last_processed_l1i = l1i;
429*5e2183daSJulien Grall 			pc->last_processed_l2i = l2i;
430*5e2183daSJulien Grall 
431*5e2183daSJulien Grall 		} while (l2i != LONG_BIT - 1);
432*5e2183daSJulien Grall 
433*5e2183daSJulien Grall 		l2 = xen_intr_active_ports(pc, l1i);
434*5e2183daSJulien Grall 		if (l2 == 0) {
435*5e2183daSJulien Grall 			/*
436*5e2183daSJulien Grall 			 * We handled all ports, so we can clear the
437*5e2183daSJulien Grall 			 * selector bit.
438*5e2183daSJulien Grall 			 */
439*5e2183daSJulien Grall 			l1 &= ~(1UL << l1i);
440*5e2183daSJulien Grall 		}
441*5e2183daSJulien Grall 	}
442*5e2183daSJulien Grall 
443*5e2183daSJulien Grall 	return (FILTER_HANDLED);
444*5e2183daSJulien Grall }
445*5e2183daSJulien Grall 
446*5e2183daSJulien Grall static int
447*5e2183daSJulien Grall xen_intr_init(void *dummy __unused)
448*5e2183daSJulien Grall {
449*5e2183daSJulien Grall 	shared_info_t *s = HYPERVISOR_shared_info;
450*5e2183daSJulien Grall 	struct xen_intr_pcpu_data *pcpu;
451*5e2183daSJulien Grall 	int i;
452*5e2183daSJulien Grall 
453*5e2183daSJulien Grall 	if (!xen_domain())
454*5e2183daSJulien Grall 		return (0);
455*5e2183daSJulien Grall 
456*5e2183daSJulien Grall 	_Static_assert(is_valid_evtchn(0),
457*5e2183daSJulien Grall 	    "is_valid_evtchn(0) fails (unused by Xen, but valid by interface");
458*5e2183daSJulien Grall 	_Static_assert(is_valid_evtchn(NR_EVENT_CHANNELS - 1),
459*5e2183daSJulien Grall 	    "is_valid_evtchn(max) fails (is a valid channel)");
460*5e2183daSJulien Grall 	_Static_assert(!is_valid_evtchn(NR_EVENT_CHANNELS),
461*5e2183daSJulien Grall 	    "is_valid_evtchn(>max) fails (NOT a valid channel)");
462*5e2183daSJulien Grall 	_Static_assert(!is_valid_evtchn(~(evtchn_port_t)0),
463*5e2183daSJulien Grall 	    "is_valid_evtchn(maxint) fails (overflow?)");
464*5e2183daSJulien Grall 	_Static_assert(!is_valid_evtchn(INVALID_EVTCHN),
465*5e2183daSJulien Grall 	    "is_valid_evtchn(INVALID_EVTCHN) fails (must be invalid!)");
466*5e2183daSJulien Grall 	_Static_assert(!is_valid_evtchn(-1),
467*5e2183daSJulien Grall 	    "is_valid_evtchn(-1) fails (negative are invalid)");
468*5e2183daSJulien Grall 
469*5e2183daSJulien Grall 	mtx_init(&xen_intr_isrc_lock, "xen-irq-lock", NULL, MTX_DEF);
470*5e2183daSJulien Grall 
471*5e2183daSJulien Grall 	/*
472*5e2183daSJulien Grall 	 * Set the per-cpu mask of CPU#0 to enable all, since by default all
473*5e2183daSJulien Grall 	 * event channels are bound to CPU#0.
474*5e2183daSJulien Grall 	 */
475*5e2183daSJulien Grall 	CPU_FOREACH(i) {
476*5e2183daSJulien Grall 		pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
477*5e2183daSJulien Grall 		memset(pcpu->evtchn_enabled, i == 0 ? ~0 : 0,
478*5e2183daSJulien Grall 		    sizeof(pcpu->evtchn_enabled));
479*5e2183daSJulien Grall 	}
480*5e2183daSJulien Grall 
481*5e2183daSJulien Grall 	for (i = 0; i < nitems(s->evtchn_mask); i++)
482*5e2183daSJulien Grall 		atomic_store_rel_long(&s->evtchn_mask[i], ~0);
483*5e2183daSJulien Grall 
484*5e2183daSJulien Grall 	xen_arch_intr_init();
485*5e2183daSJulien Grall 
486*5e2183daSJulien Grall 	if (bootverbose)
487*5e2183daSJulien Grall 		printf("Xen interrupt system initialized\n");
488*5e2183daSJulien Grall 
489*5e2183daSJulien Grall 	return (0);
490*5e2183daSJulien Grall }
491*5e2183daSJulien Grall SYSINIT(xen_intr_init, SI_SUB_INTR, SI_ORDER_SECOND, xen_intr_init, NULL);
492*5e2183daSJulien Grall 
493*5e2183daSJulien Grall /*--------------------------- Common PIC Functions ---------------------------*/
494*5e2183daSJulien Grall 
495*5e2183daSJulien Grall static void
496*5e2183daSJulien Grall xen_rebind_ipi(struct xenisrc *isrc)
497*5e2183daSJulien Grall {
498*5e2183daSJulien Grall #ifdef SMP
499*5e2183daSJulien Grall 	u_int cpu = isrc->xi_cpu;
500*5e2183daSJulien Grall 	u_int vcpu_id = XEN_CPUID_TO_VCPUID(cpu);
501*5e2183daSJulien Grall 	int error;
502*5e2183daSJulien Grall 	struct evtchn_bind_ipi bind_ipi = { .vcpu = vcpu_id };
503*5e2183daSJulien Grall 
504*5e2183daSJulien Grall 	error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
505*5e2183daSJulien Grall 	                                    &bind_ipi);
506*5e2183daSJulien Grall 	if (error != 0)
507*5e2183daSJulien Grall 		panic("unable to rebind xen IPI: %d", error);
508*5e2183daSJulien Grall 
509*5e2183daSJulien Grall 	isrc->xi_port = bind_ipi.port;
510*5e2183daSJulien Grall #else
511*5e2183daSJulien Grall 	panic("Resume IPI event channel on UP");
512*5e2183daSJulien Grall #endif
513*5e2183daSJulien Grall }
514*5e2183daSJulien Grall 
515*5e2183daSJulien Grall static void
516*5e2183daSJulien Grall xen_rebind_virq(struct xenisrc *isrc)
517*5e2183daSJulien Grall {
518*5e2183daSJulien Grall 	u_int cpu = isrc->xi_cpu;
519*5e2183daSJulien Grall 	u_int vcpu_id = XEN_CPUID_TO_VCPUID(cpu);
520*5e2183daSJulien Grall 	int error;
521*5e2183daSJulien Grall 	struct evtchn_bind_virq bind_virq = { .virq = isrc->xi_virq,
522*5e2183daSJulien Grall 	                                      .vcpu = vcpu_id };
523*5e2183daSJulien Grall 
524*5e2183daSJulien Grall 	error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
525*5e2183daSJulien Grall 	                                    &bind_virq);
526*5e2183daSJulien Grall 	if (error != 0)
527*5e2183daSJulien Grall 		panic("unable to rebind xen VIRQ#%u: %d", isrc->xi_virq, error);
528*5e2183daSJulien Grall 
529*5e2183daSJulien Grall 	isrc->xi_port = bind_virq.port;
530*5e2183daSJulien Grall }
531*5e2183daSJulien Grall 
532*5e2183daSJulien Grall static struct xenisrc *
533*5e2183daSJulien Grall xen_intr_rebind_isrc(struct xenisrc *isrc)
534*5e2183daSJulien Grall {
535*5e2183daSJulien Grall #ifdef SMP
536*5e2183daSJulien Grall 	u_int cpu = isrc->xi_cpu;
537*5e2183daSJulien Grall 	int error;
538*5e2183daSJulien Grall #endif
539*5e2183daSJulien Grall 	struct xenisrc *prev;
540*5e2183daSJulien Grall 
541*5e2183daSJulien Grall 	switch (isrc->xi_type) {
542*5e2183daSJulien Grall 	case EVTCHN_TYPE_IPI:
543*5e2183daSJulien Grall 		xen_rebind_ipi(isrc);
544*5e2183daSJulien Grall 		break;
545*5e2183daSJulien Grall 	case EVTCHN_TYPE_VIRQ:
546*5e2183daSJulien Grall 		xen_rebind_virq(isrc);
547*5e2183daSJulien Grall 		break;
548*5e2183daSJulien Grall 	default:
549*5e2183daSJulien Grall 		return (NULL);
550*5e2183daSJulien Grall 	}
551*5e2183daSJulien Grall 
552*5e2183daSJulien Grall 	prev = xen_intr_port_to_isrc[isrc->xi_port];
553*5e2183daSJulien Grall 	xen_intr_port_to_isrc[isrc->xi_port] = isrc;
554*5e2183daSJulien Grall 
555*5e2183daSJulien Grall #ifdef SMP
556*5e2183daSJulien Grall 	isrc->xi_cpu = 0;
557*5e2183daSJulien Grall 	error = xen_intr_assign_cpu(isrc, cpu);
558*5e2183daSJulien Grall 	if (error)
559*5e2183daSJulien Grall 		panic("%s(): unable to rebind Xen channel %u to vCPU%u: %d",
560*5e2183daSJulien Grall 		    __func__, isrc->xi_port, cpu, error);
561*5e2183daSJulien Grall #endif
562*5e2183daSJulien Grall 
563*5e2183daSJulien Grall 	evtchn_unmask_port(isrc->xi_port);
564*5e2183daSJulien Grall 
565*5e2183daSJulien Grall 	return (prev);
566*5e2183daSJulien Grall }
567*5e2183daSJulien Grall 
568*5e2183daSJulien Grall /**
569*5e2183daSJulien Grall  * Return this PIC to service after being suspended.
570*5e2183daSJulien Grall  */
571*5e2183daSJulien Grall void
572*5e2183daSJulien Grall xen_intr_resume(void)
573*5e2183daSJulien Grall {
574*5e2183daSJulien Grall 	shared_info_t *s = HYPERVISOR_shared_info;
575*5e2183daSJulien Grall 	u_int isrc_idx;
576*5e2183daSJulien Grall 	int i;
577*5e2183daSJulien Grall 
578*5e2183daSJulien Grall 	/* Reset the per-CPU masks */
579*5e2183daSJulien Grall 	CPU_FOREACH(i) {
580*5e2183daSJulien Grall 		struct xen_intr_pcpu_data *pcpu;
581*5e2183daSJulien Grall 
582*5e2183daSJulien Grall 		pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
583*5e2183daSJulien Grall 		memset(pcpu->evtchn_enabled, i == 0 ? ~0 : 0,
584*5e2183daSJulien Grall 		    sizeof(pcpu->evtchn_enabled));
585*5e2183daSJulien Grall 	}
586*5e2183daSJulien Grall 
587*5e2183daSJulien Grall 	/* Mask all event channels. */
588*5e2183daSJulien Grall 	for (i = 0; i < nitems(s->evtchn_mask); i++)
589*5e2183daSJulien Grall 		atomic_store_rel_long(&s->evtchn_mask[i], ~0);
590*5e2183daSJulien Grall 
591*5e2183daSJulien Grall 	/* Clear existing port mappings */
592*5e2183daSJulien Grall 	for (isrc_idx = 0; isrc_idx < NR_EVENT_CHANNELS; ++isrc_idx)
593*5e2183daSJulien Grall 		if (xen_intr_port_to_isrc[isrc_idx] != NULL)
594*5e2183daSJulien Grall 			xen_intr_port_to_isrc[isrc_idx]->xi_port =
595*5e2183daSJulien Grall 			    INVALID_EVTCHN;
596*5e2183daSJulien Grall 
597*5e2183daSJulien Grall 	/* Remap in-use isrcs, using xen_intr_port_to_isrc as listing */
598*5e2183daSJulien Grall 	for (isrc_idx = 0; isrc_idx < NR_EVENT_CHANNELS; ++isrc_idx) {
599*5e2183daSJulien Grall 		struct xenisrc *cur = xen_intr_port_to_isrc[isrc_idx];
600*5e2183daSJulien Grall 
601*5e2183daSJulien Grall 		/* empty or entry already taken care of */
602*5e2183daSJulien Grall 		if (cur == NULL || cur->xi_port == isrc_idx)
603*5e2183daSJulien Grall 			continue;
604*5e2183daSJulien Grall 
605*5e2183daSJulien Grall 		xen_intr_port_to_isrc[isrc_idx] = NULL;
606*5e2183daSJulien Grall 
607*5e2183daSJulien Grall 		do {
608*5e2183daSJulien Grall 			KASSERT(!is_valid_evtchn(cur->xi_port),
609*5e2183daSJulien Grall 			    ("%s(): Multiple channels on single intr?",
610*5e2183daSJulien Grall 			    __func__));
611*5e2183daSJulien Grall 
612*5e2183daSJulien Grall 			cur = xen_intr_rebind_isrc(cur);
613*5e2183daSJulien Grall 		} while (cur != NULL);
614*5e2183daSJulien Grall 	}
615*5e2183daSJulien Grall }
616*5e2183daSJulien Grall 
617*5e2183daSJulien Grall /**
618*5e2183daSJulien Grall  * Disable a Xen interrupt source.
619*5e2183daSJulien Grall  *
620*5e2183daSJulien Grall  * \param isrc  The interrupt source to disable.
621*5e2183daSJulien Grall  */
622*5e2183daSJulien Grall void
623*5e2183daSJulien Grall xen_intr_disable_intr(struct xenisrc *isrc)
624*5e2183daSJulien Grall {
625*5e2183daSJulien Grall 
626*5e2183daSJulien Grall 	evtchn_mask_port(isrc->xi_port);
627*5e2183daSJulien Grall }
628*5e2183daSJulien Grall 
629*5e2183daSJulien Grall /**
630*5e2183daSJulien Grall  * Configure CPU affinity for interrupt source event delivery.
631*5e2183daSJulien Grall  *
632*5e2183daSJulien Grall  * \param isrc     The interrupt source to configure.
633*5e2183daSJulien Grall  * \param to_cpu   The id of the CPU for handling future events.
634*5e2183daSJulien Grall  *
635*5e2183daSJulien Grall  * \returns  0 if successful, otherwise an errno.
636*5e2183daSJulien Grall  */
637*5e2183daSJulien Grall int
638*5e2183daSJulien Grall xen_intr_assign_cpu(struct xenisrc *isrc, u_int to_cpu)
639*5e2183daSJulien Grall {
640*5e2183daSJulien Grall #ifdef SMP
641*5e2183daSJulien Grall 	struct evtchn_bind_vcpu bind_vcpu;
642*5e2183daSJulien Grall 	u_int vcpu_id = XEN_CPUID_TO_VCPUID(to_cpu);
643*5e2183daSJulien Grall 	int error, masked;
644*5e2183daSJulien Grall 
645*5e2183daSJulien Grall 	if (!xen_has_percpu_evtchn())
646*5e2183daSJulien Grall 		return (EOPNOTSUPP);
647*5e2183daSJulien Grall 
648*5e2183daSJulien Grall 	mtx_lock(&xen_intr_isrc_lock);
649*5e2183daSJulien Grall 	if (!is_valid_evtchn(isrc->xi_port)) {
650*5e2183daSJulien Grall 		mtx_unlock(&xen_intr_isrc_lock);
651*5e2183daSJulien Grall 		return (EINVAL);
652*5e2183daSJulien Grall 	}
653*5e2183daSJulien Grall 
654*5e2183daSJulien Grall 	/*
655*5e2183daSJulien Grall 	 * Mask the event channel while binding it to prevent interrupt
656*5e2183daSJulien Grall 	 * delivery with an inconsistent state in isrc->xi_cpu.
657*5e2183daSJulien Grall 	 */
658*5e2183daSJulien Grall 	masked = evtchn_test_and_set_mask(isrc->xi_port);
659*5e2183daSJulien Grall 	if ((isrc->xi_type == EVTCHN_TYPE_VIRQ) ||
660*5e2183daSJulien Grall 		(isrc->xi_type == EVTCHN_TYPE_IPI)) {
661*5e2183daSJulien Grall 		/*
662*5e2183daSJulien Grall 		 * Virtual IRQs are associated with a cpu by
663*5e2183daSJulien Grall 		 * the Hypervisor at evtchn_bind_virq time, so
664*5e2183daSJulien Grall 		 * all we need to do is update the per-CPU masks.
665*5e2183daSJulien Grall 		 */
666*5e2183daSJulien Grall 		evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
667*5e2183daSJulien Grall 		isrc->xi_cpu = to_cpu;
668*5e2183daSJulien Grall 		evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port);
669*5e2183daSJulien Grall 		goto out;
670*5e2183daSJulien Grall 	}
671*5e2183daSJulien Grall 
672*5e2183daSJulien Grall 	bind_vcpu.port = isrc->xi_port;
673*5e2183daSJulien Grall 	bind_vcpu.vcpu = vcpu_id;
674*5e2183daSJulien Grall 
675*5e2183daSJulien Grall 	error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu);
676*5e2183daSJulien Grall 	if (isrc->xi_cpu != to_cpu) {
677*5e2183daSJulien Grall 		if (error == 0) {
678*5e2183daSJulien Grall 			/* Commit to new binding by removing the old one. */
679*5e2183daSJulien Grall 			evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
680*5e2183daSJulien Grall 			isrc->xi_cpu = to_cpu;
681*5e2183daSJulien Grall 			evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port);
682*5e2183daSJulien Grall 		}
683*5e2183daSJulien Grall 	}
684*5e2183daSJulien Grall 
685*5e2183daSJulien Grall out:
686*5e2183daSJulien Grall 	if (masked == 0)
687*5e2183daSJulien Grall 		evtchn_unmask_port(isrc->xi_port);
688*5e2183daSJulien Grall 	mtx_unlock(&xen_intr_isrc_lock);
689*5e2183daSJulien Grall 	return (0);
690*5e2183daSJulien Grall #else
691*5e2183daSJulien Grall 	return (EOPNOTSUPP);
692*5e2183daSJulien Grall #endif
693*5e2183daSJulien Grall }
694*5e2183daSJulien Grall 
695*5e2183daSJulien Grall /*------------------- Virtual Interrupt Source PIC Functions -----------------*/
696*5e2183daSJulien Grall /*
697*5e2183daSJulien Grall  * Mask a level triggered interrupt source.
698*5e2183daSJulien Grall  *
699*5e2183daSJulien Grall  * \param isrc  The interrupt source to mask (if necessary).
700*5e2183daSJulien Grall  */
701*5e2183daSJulien Grall void
702*5e2183daSJulien Grall xen_intr_disable_source(struct xenisrc *isrc)
703*5e2183daSJulien Grall {
704*5e2183daSJulien Grall 
705*5e2183daSJulien Grall 	/*
706*5e2183daSJulien Grall 	 * NB: checking if the event channel is already masked is
707*5e2183daSJulien Grall 	 * needed because the event channel user-space device
708*5e2183daSJulien Grall 	 * masks event channels on its filter as part of its
709*5e2183daSJulien Grall 	 * normal operation, and those shouldn't be automatically
710*5e2183daSJulien Grall 	 * unmasked by the generic interrupt code. The event channel
711*5e2183daSJulien Grall 	 * device will unmask them when needed.
712*5e2183daSJulien Grall 	 */
713*5e2183daSJulien Grall 	isrc->xi_masked = !!evtchn_test_and_set_mask(isrc->xi_port);
714*5e2183daSJulien Grall }
715*5e2183daSJulien Grall 
716*5e2183daSJulien Grall /*
717*5e2183daSJulien Grall  * Unmask a level triggered interrupt source.
718*5e2183daSJulien Grall  *
719*5e2183daSJulien Grall  * \param isrc  The interrupt source to unmask (if necessary).
720*5e2183daSJulien Grall  */
721*5e2183daSJulien Grall void
722*5e2183daSJulien Grall xen_intr_enable_source(struct xenisrc *isrc)
723*5e2183daSJulien Grall {
724*5e2183daSJulien Grall 
725*5e2183daSJulien Grall 	if (isrc->xi_masked == 0)
726*5e2183daSJulien Grall 		evtchn_unmask_port(isrc->xi_port);
727*5e2183daSJulien Grall }
728*5e2183daSJulien Grall 
729*5e2183daSJulien Grall /*
730*5e2183daSJulien Grall  * Enable and unmask the interrupt source.
731*5e2183daSJulien Grall  *
732*5e2183daSJulien Grall  * \param isrc  The interrupt source to enable.
733*5e2183daSJulien Grall  */
734*5e2183daSJulien Grall void
735*5e2183daSJulien Grall xen_intr_enable_intr(struct xenisrc *isrc)
736*5e2183daSJulien Grall {
737*5e2183daSJulien Grall 
738*5e2183daSJulien Grall 	evtchn_unmask_port(isrc->xi_port);
739*5e2183daSJulien Grall }
740*5e2183daSJulien Grall 
741*5e2183daSJulien Grall /*--------------------------- Public Functions -------------------------------*/
742*5e2183daSJulien Grall /*------- API comments for these methods can be found in xen/xenintr.h -------*/
743*5e2183daSJulien Grall int
744*5e2183daSJulien Grall xen_intr_bind_local_port(device_t dev, evtchn_port_t local_port,
745*5e2183daSJulien Grall     driver_filter_t filter, driver_intr_t handler, void *arg,
746*5e2183daSJulien Grall     enum intr_type flags, xen_intr_handle_t *port_handlep)
747*5e2183daSJulien Grall {
748*5e2183daSJulien Grall 	struct xenisrc *isrc;
749*5e2183daSJulien Grall 	int error;
750*5e2183daSJulien Grall 
751*5e2183daSJulien Grall 	error = xen_intr_bind_isrc(&isrc, local_port, EVTCHN_TYPE_PORT,
752*5e2183daSJulien Grall 	    device_get_nameunit(dev), filter, handler, arg, flags,
753*5e2183daSJulien Grall 	    port_handlep);
754*5e2183daSJulien Grall 	if (error != 0)
755*5e2183daSJulien Grall 		return (error);
756*5e2183daSJulien Grall 
757*5e2183daSJulien Grall 	/*
758*5e2183daSJulien Grall 	 * The Event Channel API didn't open this port, so it is not
759*5e2183daSJulien Grall 	 * responsible for closing it automatically on unbind.
760*5e2183daSJulien Grall 	 */
761*5e2183daSJulien Grall 	isrc->xi_close = 0;
762*5e2183daSJulien Grall 	return (0);
763*5e2183daSJulien Grall }
764*5e2183daSJulien Grall 
765*5e2183daSJulien Grall int
766*5e2183daSJulien Grall xen_intr_alloc_and_bind_local_port(device_t dev, u_int remote_domain,
767*5e2183daSJulien Grall     driver_filter_t filter, driver_intr_t handler, void *arg,
768*5e2183daSJulien Grall     enum intr_type flags, xen_intr_handle_t *port_handlep)
769*5e2183daSJulien Grall {
770*5e2183daSJulien Grall 	struct xenisrc *isrc;
771*5e2183daSJulien Grall 	struct evtchn_alloc_unbound alloc_unbound;
772*5e2183daSJulien Grall 	int error;
773*5e2183daSJulien Grall 
774*5e2183daSJulien Grall 	alloc_unbound.dom        = DOMID_SELF;
775*5e2183daSJulien Grall 	alloc_unbound.remote_dom = remote_domain;
776*5e2183daSJulien Grall 	error = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
777*5e2183daSJulien Grall 		    &alloc_unbound);
778*5e2183daSJulien Grall 	if (error != 0) {
779*5e2183daSJulien Grall 		/*
780*5e2183daSJulien Grall 		 * XXX Trap Hypercall error code Linuxisms in
781*5e2183daSJulien Grall 		 *     the HYPERCALL layer.
782*5e2183daSJulien Grall 		 */
783*5e2183daSJulien Grall 		return (-error);
784*5e2183daSJulien Grall 	}
785*5e2183daSJulien Grall 
786*5e2183daSJulien Grall 	error = xen_intr_bind_isrc(&isrc, alloc_unbound.port, EVTCHN_TYPE_PORT,
787*5e2183daSJulien Grall 	    device_get_nameunit(dev), filter, handler, arg, flags,
788*5e2183daSJulien Grall 	    port_handlep);
789*5e2183daSJulien Grall 	if (error != 0) {
790*5e2183daSJulien Grall 		evtchn_close_t close = { .port = alloc_unbound.port };
791*5e2183daSJulien Grall 		if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
792*5e2183daSJulien Grall 			panic("EVTCHNOP_close failed");
793*5e2183daSJulien Grall 		return (error);
794*5e2183daSJulien Grall 	}
795*5e2183daSJulien Grall 
796*5e2183daSJulien Grall 	isrc->xi_close = 1;
797*5e2183daSJulien Grall 	return (0);
798*5e2183daSJulien Grall }
799*5e2183daSJulien Grall 
800*5e2183daSJulien Grall int
801*5e2183daSJulien Grall xen_intr_bind_remote_port(device_t dev, u_int remote_domain,
802*5e2183daSJulien Grall     u_int remote_port, driver_filter_t filter, driver_intr_t handler,
803*5e2183daSJulien Grall     void *arg, enum intr_type flags, xen_intr_handle_t *port_handlep)
804*5e2183daSJulien Grall {
805*5e2183daSJulien Grall 	struct xenisrc *isrc;
806*5e2183daSJulien Grall 	struct evtchn_bind_interdomain bind_interdomain;
807*5e2183daSJulien Grall 	int error;
808*5e2183daSJulien Grall 
809*5e2183daSJulien Grall 	bind_interdomain.remote_dom  = remote_domain;
810*5e2183daSJulien Grall 	bind_interdomain.remote_port = remote_port;
811*5e2183daSJulien Grall 	error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
812*5e2183daSJulien Grall 					    &bind_interdomain);
813*5e2183daSJulien Grall 	if (error != 0) {
814*5e2183daSJulien Grall 		/*
815*5e2183daSJulien Grall 		 * XXX Trap Hypercall error code Linuxisms in
816*5e2183daSJulien Grall 		 *     the HYPERCALL layer.
817*5e2183daSJulien Grall 		 */
818*5e2183daSJulien Grall 		return (-error);
819*5e2183daSJulien Grall 	}
820*5e2183daSJulien Grall 
821*5e2183daSJulien Grall 	error = xen_intr_bind_isrc(&isrc, bind_interdomain.local_port,
822*5e2183daSJulien Grall 	    EVTCHN_TYPE_PORT, device_get_nameunit(dev), filter, handler, arg,
823*5e2183daSJulien Grall 	    flags, port_handlep);
824*5e2183daSJulien Grall 	if (error) {
825*5e2183daSJulien Grall 		evtchn_close_t close = { .port = bind_interdomain.local_port };
826*5e2183daSJulien Grall 		if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
827*5e2183daSJulien Grall 			panic("EVTCHNOP_close failed");
828*5e2183daSJulien Grall 		return (error);
829*5e2183daSJulien Grall 	}
830*5e2183daSJulien Grall 
831*5e2183daSJulien Grall 	/*
832*5e2183daSJulien Grall 	 * The Event Channel API opened this port, so it is
833*5e2183daSJulien Grall 	 * responsible for closing it automatically on unbind.
834*5e2183daSJulien Grall 	 */
835*5e2183daSJulien Grall 	isrc->xi_close = 1;
836*5e2183daSJulien Grall 	return (0);
837*5e2183daSJulien Grall }
838*5e2183daSJulien Grall 
839*5e2183daSJulien Grall int
840*5e2183daSJulien Grall xen_intr_bind_virq(device_t dev, u_int virq, u_int cpu,
841*5e2183daSJulien Grall     driver_filter_t filter, driver_intr_t handler, void *arg,
842*5e2183daSJulien Grall     enum intr_type flags, xen_intr_handle_t *port_handlep)
843*5e2183daSJulien Grall {
844*5e2183daSJulien Grall 	u_int vcpu_id = XEN_CPUID_TO_VCPUID(cpu);
845*5e2183daSJulien Grall 	struct xenisrc *isrc;
846*5e2183daSJulien Grall 	struct evtchn_bind_virq bind_virq = { .virq = virq, .vcpu = vcpu_id };
847*5e2183daSJulien Grall 	int error;
848*5e2183daSJulien Grall 
849*5e2183daSJulien Grall 	isrc = NULL;
850*5e2183daSJulien Grall 	error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq);
851*5e2183daSJulien Grall 	if (error != 0) {
852*5e2183daSJulien Grall 		/*
853*5e2183daSJulien Grall 		 * XXX Trap Hypercall error code Linuxisms in
854*5e2183daSJulien Grall 		 *     the HYPERCALL layer.
855*5e2183daSJulien Grall 		 */
856*5e2183daSJulien Grall 		return (-error);
857*5e2183daSJulien Grall 	}
858*5e2183daSJulien Grall 
859*5e2183daSJulien Grall 	error = xen_intr_bind_isrc(&isrc, bind_virq.port, EVTCHN_TYPE_VIRQ,
860*5e2183daSJulien Grall 	    device_get_nameunit(dev), filter, handler, arg, flags,
861*5e2183daSJulien Grall 	    port_handlep);
862*5e2183daSJulien Grall 
863*5e2183daSJulien Grall #ifdef SMP
864*5e2183daSJulien Grall 	if (error == 0)
865*5e2183daSJulien Grall 		error = xen_arch_intr_event_bind(isrc, cpu);
866*5e2183daSJulien Grall #endif
867*5e2183daSJulien Grall 
868*5e2183daSJulien Grall 	if (error != 0) {
869*5e2183daSJulien Grall 		evtchn_close_t close = { .port = bind_virq.port };
870*5e2183daSJulien Grall 
871*5e2183daSJulien Grall 		xen_intr_unbind(*port_handlep);
872*5e2183daSJulien Grall 		if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
873*5e2183daSJulien Grall 			panic("EVTCHNOP_close failed");
874*5e2183daSJulien Grall 		return (error);
875*5e2183daSJulien Grall 	}
876*5e2183daSJulien Grall 
877*5e2183daSJulien Grall #ifdef SMP
878*5e2183daSJulien Grall 	if (isrc->xi_cpu != cpu) {
879*5e2183daSJulien Grall 		/*
880*5e2183daSJulien Grall 		 * Too early in the boot process for the generic interrupt
881*5e2183daSJulien Grall 		 * code to perform the binding.  Update our event channel
882*5e2183daSJulien Grall 		 * masks manually so events can't fire on the wrong cpu
883*5e2183daSJulien Grall 		 * during AP startup.
884*5e2183daSJulien Grall 		 */
885*5e2183daSJulien Grall 		xen_intr_assign_cpu(isrc, cpu);
886*5e2183daSJulien Grall 	}
887*5e2183daSJulien Grall #endif
888*5e2183daSJulien Grall 
889*5e2183daSJulien Grall 	/*
890*5e2183daSJulien Grall 	 * The Event Channel API opened this port, so it is
891*5e2183daSJulien Grall 	 * responsible for closing it automatically on unbind.
892*5e2183daSJulien Grall 	 */
893*5e2183daSJulien Grall 	isrc->xi_close = 1;
894*5e2183daSJulien Grall 	isrc->xi_virq = virq;
895*5e2183daSJulien Grall 
896*5e2183daSJulien Grall 	return (0);
897*5e2183daSJulien Grall }
898*5e2183daSJulien Grall 
899*5e2183daSJulien Grall int
900*5e2183daSJulien Grall xen_intr_alloc_and_bind_ipi(u_int cpu, driver_filter_t filter,
901*5e2183daSJulien Grall     enum intr_type flags, xen_intr_handle_t *port_handlep)
902*5e2183daSJulien Grall {
903*5e2183daSJulien Grall #ifdef SMP
904*5e2183daSJulien Grall 	u_int vcpu_id = XEN_CPUID_TO_VCPUID(cpu);
905*5e2183daSJulien Grall 	struct xenisrc *isrc;
906*5e2183daSJulien Grall 	struct evtchn_bind_ipi bind_ipi = { .vcpu = vcpu_id };
907*5e2183daSJulien Grall 	/* Same size as the one used by intr_handler->ih_name. */
908*5e2183daSJulien Grall 	char name[MAXCOMLEN + 1];
909*5e2183daSJulien Grall 	int error;
910*5e2183daSJulien Grall 
911*5e2183daSJulien Grall 	isrc = NULL;
912*5e2183daSJulien Grall 	error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi);
913*5e2183daSJulien Grall 	if (error != 0) {
914*5e2183daSJulien Grall 		/*
915*5e2183daSJulien Grall 		 * XXX Trap Hypercall error code Linuxisms in
916*5e2183daSJulien Grall 		 *     the HYPERCALL layer.
917*5e2183daSJulien Grall 		 */
918*5e2183daSJulien Grall 		return (-error);
919*5e2183daSJulien Grall 	}
920*5e2183daSJulien Grall 
921*5e2183daSJulien Grall 	snprintf(name, sizeof(name), "cpu%u", cpu);
922*5e2183daSJulien Grall 
923*5e2183daSJulien Grall 	error = xen_intr_bind_isrc(&isrc, bind_ipi.port, EVTCHN_TYPE_IPI,
924*5e2183daSJulien Grall 	    name, filter, NULL, NULL, flags, port_handlep);
925*5e2183daSJulien Grall 	if (error != 0) {
926*5e2183daSJulien Grall 		evtchn_close_t close = { .port = bind_ipi.port };
927*5e2183daSJulien Grall 
928*5e2183daSJulien Grall 		xen_intr_unbind(*port_handlep);
929*5e2183daSJulien Grall 		if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
930*5e2183daSJulien Grall 			panic("EVTCHNOP_close failed");
931*5e2183daSJulien Grall 		return (error);
932*5e2183daSJulien Grall 	}
933*5e2183daSJulien Grall 
934*5e2183daSJulien Grall 	if (isrc->xi_cpu != cpu) {
935*5e2183daSJulien Grall 		/*
936*5e2183daSJulien Grall 		 * Too early in the boot process for the generic interrupt
937*5e2183daSJulien Grall 		 * code to perform the binding.  Update our event channel
938*5e2183daSJulien Grall 		 * masks manually so events can't fire on the wrong cpu
939*5e2183daSJulien Grall 		 * during AP startup.
940*5e2183daSJulien Grall 		 */
941*5e2183daSJulien Grall 		xen_intr_assign_cpu(isrc, cpu);
942*5e2183daSJulien Grall 	}
943*5e2183daSJulien Grall 
944*5e2183daSJulien Grall 	/*
945*5e2183daSJulien Grall 	 * The Event Channel API opened this port, so it is
946*5e2183daSJulien Grall 	 * responsible for closing it automatically on unbind.
947*5e2183daSJulien Grall 	 */
948*5e2183daSJulien Grall 	isrc->xi_close = 1;
949*5e2183daSJulien Grall 	return (0);
950*5e2183daSJulien Grall #else
951*5e2183daSJulien Grall 	return (EOPNOTSUPP);
952*5e2183daSJulien Grall #endif
953*5e2183daSJulien Grall }
954*5e2183daSJulien Grall 
955*5e2183daSJulien Grall int
956*5e2183daSJulien Grall xen_intr_describe(xen_intr_handle_t port_handle, const char *fmt, ...)
957*5e2183daSJulien Grall {
958*5e2183daSJulien Grall 	char descr[MAXCOMLEN + 1];
959*5e2183daSJulien Grall 	struct xenisrc *isrc;
960*5e2183daSJulien Grall 	va_list ap;
961*5e2183daSJulien Grall 
962*5e2183daSJulien Grall 	isrc = xen_intr_isrc_from_handle(port_handle);
963*5e2183daSJulien Grall 	if (isrc == NULL)
964*5e2183daSJulien Grall 		return (EINVAL);
965*5e2183daSJulien Grall 
966*5e2183daSJulien Grall 	va_start(ap, fmt);
967*5e2183daSJulien Grall 	vsnprintf(descr, sizeof(descr), fmt, ap);
968*5e2183daSJulien Grall 	va_end(ap);
969*5e2183daSJulien Grall 	return (xen_arch_intr_describe(isrc, isrc->xi_cookie, descr));
970*5e2183daSJulien Grall }
971*5e2183daSJulien Grall 
972*5e2183daSJulien Grall void
973*5e2183daSJulien Grall xen_intr_unbind(xen_intr_handle_t *port_handlep)
974*5e2183daSJulien Grall {
975*5e2183daSJulien Grall 	struct xenisrc *isrc;
976*5e2183daSJulien Grall 
977*5e2183daSJulien Grall 	KASSERT(port_handlep != NULL,
978*5e2183daSJulien Grall 	    ("NULL xen_intr_handle_t passed to %s", __func__));
979*5e2183daSJulien Grall 
980*5e2183daSJulien Grall 	isrc = xen_intr_isrc_from_handle(*port_handlep);
981*5e2183daSJulien Grall 	*port_handlep = NULL;
982*5e2183daSJulien Grall 	if (isrc == NULL)
983*5e2183daSJulien Grall 		return;
984*5e2183daSJulien Grall 
985*5e2183daSJulien Grall 	mtx_lock(&xen_intr_isrc_lock);
986*5e2183daSJulien Grall 	if (refcount_release(&isrc->xi_refcount) == 0) {
987*5e2183daSJulien Grall 		mtx_unlock(&xen_intr_isrc_lock);
988*5e2183daSJulien Grall 		return;
989*5e2183daSJulien Grall 	}
990*5e2183daSJulien Grall 	mtx_unlock(&xen_intr_isrc_lock);
991*5e2183daSJulien Grall 
992*5e2183daSJulien Grall 	if (isrc->xi_cookie != NULL)
993*5e2183daSJulien Grall 		xen_arch_intr_remove_handler(isrc, isrc->xi_cookie);
994*5e2183daSJulien Grall 	xen_intr_release_isrc(isrc);
995*5e2183daSJulien Grall }
996*5e2183daSJulien Grall 
997*5e2183daSJulien Grall void
998*5e2183daSJulien Grall xen_intr_signal(xen_intr_handle_t handle)
999*5e2183daSJulien Grall {
1000*5e2183daSJulien Grall 	struct xenisrc *isrc;
1001*5e2183daSJulien Grall 
1002*5e2183daSJulien Grall 	isrc = xen_intr_isrc_from_handle(handle);
1003*5e2183daSJulien Grall 	if (isrc != NULL) {
1004*5e2183daSJulien Grall 		KASSERT(isrc->xi_type == EVTCHN_TYPE_PORT ||
1005*5e2183daSJulien Grall 			isrc->xi_type == EVTCHN_TYPE_IPI,
1006*5e2183daSJulien Grall 			("evtchn_signal on something other than a local port"));
1007*5e2183daSJulien Grall 		struct evtchn_send send = { .port = isrc->xi_port };
1008*5e2183daSJulien Grall 		(void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send);
1009*5e2183daSJulien Grall 	}
1010*5e2183daSJulien Grall }
1011*5e2183daSJulien Grall 
1012*5e2183daSJulien Grall evtchn_port_t
1013*5e2183daSJulien Grall xen_intr_port(xen_intr_handle_t handle)
1014*5e2183daSJulien Grall {
1015*5e2183daSJulien Grall 	struct xenisrc *isrc;
1016*5e2183daSJulien Grall 
1017*5e2183daSJulien Grall 	isrc = xen_intr_isrc_from_handle(handle);
1018*5e2183daSJulien Grall 	if (isrc == NULL)
1019*5e2183daSJulien Grall 		return (0);
1020*5e2183daSJulien Grall 
1021*5e2183daSJulien Grall 	return (isrc->xi_port);
1022*5e2183daSJulien Grall }
1023*5e2183daSJulien Grall 
1024*5e2183daSJulien Grall int
1025*5e2183daSJulien Grall xen_intr_add_handler(const char *name, driver_filter_t filter,
1026*5e2183daSJulien Grall     driver_intr_t handler, void *arg, enum intr_type flags,
1027*5e2183daSJulien Grall     xen_intr_handle_t handle)
1028*5e2183daSJulien Grall {
1029*5e2183daSJulien Grall 	struct xenisrc *isrc;
1030*5e2183daSJulien Grall 	int error;
1031*5e2183daSJulien Grall 
1032*5e2183daSJulien Grall 	isrc = xen_intr_isrc_from_handle(handle);
1033*5e2183daSJulien Grall 	if (isrc == NULL || isrc->xi_cookie != NULL)
1034*5e2183daSJulien Grall 		return (EINVAL);
1035*5e2183daSJulien Grall 
1036*5e2183daSJulien Grall 	error = xen_arch_intr_add_handler(name, filter, handler, arg,
1037*5e2183daSJulien Grall 	    flags | INTR_EXCL, isrc, &isrc->xi_cookie);
1038*5e2183daSJulien Grall 	if (error != 0)
1039*5e2183daSJulien Grall 		printf("%s: %s: add handler failed: %d\n", name, __func__,
1040*5e2183daSJulien Grall 		    error);
1041*5e2183daSJulien Grall 
1042*5e2183daSJulien Grall 	return (error);
1043*5e2183daSJulien Grall }
1044*5e2183daSJulien Grall 
1045*5e2183daSJulien Grall int
1046*5e2183daSJulien Grall xen_intr_get_evtchn_from_port(evtchn_port_t port, xen_intr_handle_t *handlep)
1047*5e2183daSJulien Grall {
1048*5e2183daSJulien Grall 
1049*5e2183daSJulien Grall 	if (!is_valid_evtchn(port))
1050*5e2183daSJulien Grall 		return (EINVAL);
1051*5e2183daSJulien Grall 
1052*5e2183daSJulien Grall 	if (handlep == NULL) {
1053*5e2183daSJulien Grall 		return (EINVAL);
1054*5e2183daSJulien Grall 	}
1055*5e2183daSJulien Grall 
1056*5e2183daSJulien Grall 	mtx_lock(&xen_intr_isrc_lock);
1057*5e2183daSJulien Grall 	if (xen_intr_port_to_isrc[port] == NULL) {
1058*5e2183daSJulien Grall 		mtx_unlock(&xen_intr_isrc_lock);
1059*5e2183daSJulien Grall 		return (EINVAL);
1060*5e2183daSJulien Grall 	}
1061*5e2183daSJulien Grall 	refcount_acquire(&xen_intr_port_to_isrc[port]->xi_refcount);
1062*5e2183daSJulien Grall 	mtx_unlock(&xen_intr_isrc_lock);
1063*5e2183daSJulien Grall 
1064*5e2183daSJulien Grall 	/* Assign the opaque handler */
1065*5e2183daSJulien Grall 	*handlep = xen_intr_handle_from_isrc(xen_intr_port_to_isrc[port]);
1066*5e2183daSJulien Grall 
1067*5e2183daSJulien Grall 	return (0);
1068*5e2183daSJulien Grall }
1069*5e2183daSJulien Grall 
1070*5e2183daSJulien Grall #ifdef DDB
1071*5e2183daSJulien Grall static const char *
1072*5e2183daSJulien Grall xen_intr_print_type(enum evtchn_type type)
1073*5e2183daSJulien Grall {
1074*5e2183daSJulien Grall 	static const char *evtchn_type_to_string[EVTCHN_TYPE_COUNT] = {
1075*5e2183daSJulien Grall 		[EVTCHN_TYPE_UNBOUND]	= "UNBOUND",
1076*5e2183daSJulien Grall 		[EVTCHN_TYPE_VIRQ]	= "VIRQ",
1077*5e2183daSJulien Grall 		[EVTCHN_TYPE_IPI]	= "IPI",
1078*5e2183daSJulien Grall 		[EVTCHN_TYPE_PORT]	= "PORT",
1079*5e2183daSJulien Grall 	};
1080*5e2183daSJulien Grall 
1081*5e2183daSJulien Grall 	if (type >= EVTCHN_TYPE_COUNT)
1082*5e2183daSJulien Grall 		return ("UNKNOWN");
1083*5e2183daSJulien Grall 
1084*5e2183daSJulien Grall 	return (evtchn_type_to_string[type]);
1085*5e2183daSJulien Grall }
1086*5e2183daSJulien Grall 
1087*5e2183daSJulien Grall static void
1088*5e2183daSJulien Grall xen_intr_dump_port(struct xenisrc *isrc)
1089*5e2183daSJulien Grall {
1090*5e2183daSJulien Grall 	struct xen_intr_pcpu_data *pcpu;
1091*5e2183daSJulien Grall 	shared_info_t *s = HYPERVISOR_shared_info;
1092*5e2183daSJulien Grall 	u_int i;
1093*5e2183daSJulien Grall 
1094*5e2183daSJulien Grall 	db_printf("Port %d Type: %s\n",
1095*5e2183daSJulien Grall 	    isrc->xi_port, xen_intr_print_type(isrc->xi_type));
1096*5e2183daSJulien Grall 	if (isrc->xi_type == EVTCHN_TYPE_VIRQ)
1097*5e2183daSJulien Grall 		db_printf("\tVirq: %u\n", isrc->xi_virq);
1098*5e2183daSJulien Grall 
1099*5e2183daSJulien Grall 	db_printf("\tMasked: %d Pending: %d\n",
1100*5e2183daSJulien Grall 	    !!xen_test_bit(isrc->xi_port, &s->evtchn_mask[0]),
1101*5e2183daSJulien Grall 	    !!xen_test_bit(isrc->xi_port, &s->evtchn_pending[0]));
1102*5e2183daSJulien Grall 
1103*5e2183daSJulien Grall 	db_printf("\tPer-CPU Masks: ");
1104*5e2183daSJulien Grall 	CPU_FOREACH(i) {
1105*5e2183daSJulien Grall 		pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
1106*5e2183daSJulien Grall 		db_printf("cpu#%u: %d ", i,
1107*5e2183daSJulien Grall 		    !!xen_test_bit(isrc->xi_port, pcpu->evtchn_enabled));
1108*5e2183daSJulien Grall 	}
1109*5e2183daSJulien Grall 	db_printf("\n");
1110*5e2183daSJulien Grall }
1111*5e2183daSJulien Grall 
1112*5e2183daSJulien Grall DB_SHOW_COMMAND(xen_evtchn, db_show_xen_evtchn)
1113*5e2183daSJulien Grall {
1114*5e2183daSJulien Grall 	u_int i;
1115*5e2183daSJulien Grall 
1116*5e2183daSJulien Grall 	if (!xen_domain()) {
1117*5e2183daSJulien Grall 		db_printf("Only available on Xen guests\n");
1118*5e2183daSJulien Grall 		return;
1119*5e2183daSJulien Grall 	}
1120*5e2183daSJulien Grall 
1121*5e2183daSJulien Grall 	for (i = 0; i < NR_EVENT_CHANNELS; i++) {
1122*5e2183daSJulien Grall 		struct xenisrc *isrc;
1123*5e2183daSJulien Grall 
1124*5e2183daSJulien Grall 		isrc = xen_intr_port_to_isrc[i];
1125*5e2183daSJulien Grall 		if (isrc == NULL)
1126*5e2183daSJulien Grall 			continue;
1127*5e2183daSJulien Grall 
1128*5e2183daSJulien Grall 		xen_intr_dump_port(isrc);
1129*5e2183daSJulien Grall 	}
1130*5e2183daSJulien Grall }
1131*5e2183daSJulien Grall #endif /* DDB */
1132