xref: /freebsd/sys/dev/xen/bus/xen_intr.c (revision c8e7f78a3d28ff6e6223ed136ada8e1e2f34965e)
1 /******************************************************************************
2  * xen_intr.c
3  *
4  * Xen event and interrupt services for x86 HVM guests.
5  *
6  * Copyright (c) 2002-2005, K A Fraser
7  * Copyright (c) 2005, Intel Corporation <xiaofeng.ling@intel.com>
8  * Copyright (c) 2012, Spectra Logic Corporation
9  * Copyright © 2021-2023, Elliott Mitchell
10  *
11  * This file may be distributed separately from the Linux kernel, or
12  * incorporated into other software packages, subject to the following license:
13  *
14  * Permission is hereby granted, free of charge, to any person obtaining a copy
15  * of this source file (the "Software"), to deal in the Software without
16  * restriction, including without limitation the rights to use, copy, modify,
17  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18  * and to permit persons to whom the Software is furnished to do so, subject to
19  * the following conditions:
20  *
21  * The above copyright notice and this permission notice shall be included in
22  * all copies or substantial portions of the Software.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30  * IN THE SOFTWARE.
31  */
32 
33 #include <sys/cdefs.h>
34 #include "opt_ddb.h"
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/bus.h>
39 #include <sys/kernel.h>
40 #include <sys/limits.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/interrupt.h>
44 #include <sys/pcpu.h>
45 #include <sys/proc.h>
46 #include <sys/smp.h>
47 #include <sys/refcount.h>
48 
49 #include <vm/vm.h>
50 #include <vm/pmap.h>
51 
52 #include <machine/smp.h>
53 #include <machine/stdarg.h>
54 
55 #include <xen/xen-os.h>
56 #include <xen/hypervisor.h>
57 #include <xen/xen_intr.h>
58 #include <xen/evtchn/evtchnvar.h>
59 
60 #include <machine/xen/arch-intr.h>
61 
62 #ifdef DDB
63 #include <ddb/ddb.h>
64 #endif
65 
66 /**
67  * Per-cpu event channel processing state.
68  */
69 struct xen_intr_pcpu_data {
70 	/**
71 	 * The last event channel bitmap section (level one bit) processed.
72 	 * This is used to ensure we scan all ports before
73 	 * servicing an already servied port again.
74 	 */
75 	u_int	last_processed_l1i;
76 
77 	/**
78 	 * The last event channel processed within the event channel
79 	 * bitmap being scanned.
80 	 */
81 	u_int	last_processed_l2i;
82 
83 	/**
84 	 * A bitmap of ports that can be serviced from this CPU.
85 	 * A set bit means interrupt handling is enabled.
86 	 */
87 	xen_ulong_t	evtchn_enabled[sizeof(xen_ulong_t) * 8];
88 };
89 
90 /*
91  * Start the scan at port 0 by initializing the last scanned
92  * location as the highest numbered event channel port.
93  */
94 DPCPU_DEFINE_STATIC(struct xen_intr_pcpu_data, xen_intr_pcpu) = {
95 	.last_processed_l1i = LONG_BIT - 1,
96 	.last_processed_l2i = LONG_BIT - 1
97 };
98 
99 DPCPU_DECLARE(struct vcpu_info *, vcpu_info);
100 
101 #define	INVALID_EVTCHN		(~(evtchn_port_t)0) /* Invalid event channel */
102 #define	is_valid_evtchn(x)	((uintmax_t)(x) < NR_EVENT_CHANNELS)
103 
104 /*
105  * Lock for interrupt core data.
106  *
107  * Modifying xen_intr_port_to_isrc[], or isrc->xi_port (implies the former)
108  * requires this lock be held.  Any time this lock is not held, the condition
109  * `!xen_intr_port_to_isrc[i] || (xen_intr_port_to_isrc[i]->ix_port == i)`
110  * MUST be true for all values of i which are valid indicies of the array.
111  *
112  * Acquire/release operations for isrc->xi_refcount require this lock be held.
113  */
114 static struct mtx	 xen_intr_isrc_lock;
115 static struct xenisrc	*xen_intr_port_to_isrc[NR_EVENT_CHANNELS];
116 
117 /*------------------------- Private Functions --------------------------------*/
118 
119 /**
120  * Retrieve a handle for a Xen interrupt source.
121  *
122  * \param isrc  A valid Xen interrupt source structure.
123  *
124  * \returns  A handle suitable for use with xen_intr_isrc_from_handle()
125  *           to retrieve the original Xen interrupt source structure.
126  */
127 
128 static inline xen_intr_handle_t
129 xen_intr_handle_from_isrc(struct xenisrc *isrc)
130 {
131 	return (isrc);
132 }
133 
134 /**
135  * Lookup a Xen interrupt source object given an interrupt binding handle.
136  *
137  * \param handle  A handle initialized by a previous call to
138  *                xen_intr_bind_isrc().
139  *
140  * \returns  A pointer to the Xen interrupt source object associated
141  *           with the given interrupt handle.  NULL if no association
142  *           currently exists.
143  */
144 static inline struct xenisrc *
145 xen_intr_isrc_from_handle(xen_intr_handle_t handle)
146 {
147 	return ((struct xenisrc *)handle);
148 }
149 
150 /**
151  * Disable signal delivery for an event channel port on the
152  * specified CPU.
153  *
154  * \param port  The event channel port to mask.
155  *
156  * This API is used to manage the port<=>CPU binding of event
157  * channel handlers.
158  *
159  * \note  This operation does not preclude reception of an event
160  *        for this event channel on another CPU.  To mask the
161  *        event channel globally, use evtchn_mask().
162  */
163 static inline void
164 evtchn_cpu_mask_port(u_int cpu, evtchn_port_t port)
165 {
166 	struct xen_intr_pcpu_data *pcpu;
167 
168 	pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
169 	xen_clear_bit(port, pcpu->evtchn_enabled);
170 }
171 
172 /**
173  * Enable signal delivery for an event channel port on the
174  * specified CPU.
175  *
176  * \param port  The event channel port to unmask.
177  *
178  * This API is used to manage the port<=>CPU binding of event
179  * channel handlers.
180  *
181  * \note  This operation does not guarantee that event delivery
182  *        is enabled for this event channel port.  The port must
183  *        also be globally enabled.  See evtchn_unmask().
184  */
185 static inline void
186 evtchn_cpu_unmask_port(u_int cpu, evtchn_port_t port)
187 {
188 	struct xen_intr_pcpu_data *pcpu;
189 
190 	pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
191 	xen_set_bit(port, pcpu->evtchn_enabled);
192 }
193 
194 /**
195  * Attempt to free an active Xen interrupt source object.
196  *
197  * \param isrc  The interrupt source object to release.
198  *
199  * \returns  EBUSY if the source is still in use, otherwise 0.
200  */
201 static int
202 xen_intr_release_isrc(struct xenisrc *isrc)
203 {
204 
205 	mtx_lock(&xen_intr_isrc_lock);
206 	if (is_valid_evtchn(isrc->xi_port)) {
207 		evtchn_mask_port(isrc->xi_port);
208 		evtchn_clear_port(isrc->xi_port);
209 
210 		/* Rebind port to CPU 0. */
211 		evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
212 		evtchn_cpu_unmask_port(0, isrc->xi_port);
213 
214 		if (isrc->xi_close != 0) {
215 			struct evtchn_close close = { .port = isrc->xi_port };
216 
217 			if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
218 				panic("EVTCHNOP_close failed");
219 		}
220 
221 		xen_intr_port_to_isrc[isrc->xi_port] = NULL;
222 	}
223 	/* not reachable from xen_intr_port_to_isrc[], unlock */
224 	mtx_unlock(&xen_intr_isrc_lock);
225 
226 	xen_arch_intr_release(isrc);
227 	return (0);
228 }
229 
230 /**
231  * Associate an interrupt handler with an already allocated local Xen
232  * event channel port.
233  *
234  * \param isrcp       The returned Xen interrupt object associated with
235  *                    the specified local port.
236  * \param local_port  The event channel to bind.
237  * \param type        The event channel type of local_port.
238  * \param intr_owner  The device making this bind request.
239  * \param filter      An interrupt filter handler.  Specify NULL
240  *                    to always dispatch to the ithread handler.
241  * \param handler     An interrupt ithread handler.  Optional (can
242  *                    specify NULL) if all necessary event actions
243  *                    are performed by filter.
244  * \param arg         Argument to present to both filter and handler.
245  * \param irqflags    Interrupt handler flags.  See sys/bus.h.
246  * \param handlep     Pointer to an opaque handle used to manage this
247  *                    registration.
248  *
249  * \returns  0 on success, otherwise an errno.
250  */
251 static int
252 xen_intr_bind_isrc(struct xenisrc **isrcp, evtchn_port_t local_port,
253     enum evtchn_type type, const char *intr_owner, driver_filter_t filter,
254     driver_intr_t handler, void *arg, enum intr_type flags,
255     xen_intr_handle_t *const port_handlep)
256 {
257 	struct xenisrc *isrc;
258 	int error;
259 
260 	*isrcp = NULL;
261 	if (port_handlep == NULL) {
262 		printf("%s: %s: Bad event handle\n", intr_owner, __func__);
263 		return (EINVAL);
264 	}
265 	*port_handlep = NULL;
266 
267 	isrc = xen_arch_intr_alloc();
268 	if (isrc == NULL)
269 		return (ENOSPC);
270 
271 	isrc->xi_cookie = NULL;
272 	isrc->xi_type = type;
273 	isrc->xi_port = local_port;
274 	isrc->xi_close = false;
275 	isrc->xi_cpu = 0;
276 	refcount_init(&isrc->xi_refcount, 1);
277 	mtx_lock(&xen_intr_isrc_lock);
278 	xen_intr_port_to_isrc[isrc->xi_port] = isrc;
279 	mtx_unlock(&xen_intr_isrc_lock);
280 
281 #ifdef SMP
282 	if (type == EVTCHN_TYPE_PORT) {
283 		/*
284 		 * By default all interrupts are assigned to vCPU#0
285 		 * unless specified otherwise, so shuffle them to balance
286 		 * the interrupt load.
287 		 */
288 		xen_intr_assign_cpu(isrc, xen_arch_intr_next_cpu(isrc));
289 	}
290 #endif
291 
292 	/*
293 	 * If a filter or handler function is provided, add it to the event.
294 	 * Otherwise the event channel is left masked and without a handler,
295 	 * the caller is in charge of setting that up.
296 	 */
297 	if (filter != NULL || handler != NULL) {
298 		error = xen_intr_add_handler(intr_owner, filter, handler, arg,
299 		    flags, xen_intr_handle_from_isrc(isrc));
300 		if (error != 0) {
301 			xen_intr_release_isrc(isrc);
302 			return (error);
303 		}
304 	}
305 
306 	*isrcp = isrc;
307 	/* Assign the opaque handler */
308 	*port_handlep = xen_intr_handle_from_isrc(isrc);
309 	return (0);
310 }
311 
312 /**
313  * Determine the event channel ports at the given section of the
314  * event port bitmap which have pending events for the given cpu.
315  *
316  * \param pcpu  The Xen interrupt pcpu data for the cpu being queried.
317  * \param sh    The Xen shared info area.
318  * \param idx   The index of the section of the event channel bitmap to
319  *              inspect.
320  *
321  * \returns  A u_long with bits set for every event channel with pending
322  *           events.
323  */
324 static inline u_long
325 xen_intr_active_ports(const struct xen_intr_pcpu_data *const pcpu,
326     const u_int idx)
327 {
328 	volatile const shared_info_t *const sh = HYPERVISOR_shared_info;
329 
330 	CTASSERT(sizeof(sh->evtchn_mask[0]) == sizeof(sh->evtchn_pending[0]));
331 	CTASSERT(sizeof(sh->evtchn_mask[0]) == sizeof(pcpu->evtchn_enabled[0]));
332 	CTASSERT(sizeof(sh->evtchn_mask) == sizeof(sh->evtchn_pending));
333 	CTASSERT(sizeof(sh->evtchn_mask) == sizeof(pcpu->evtchn_enabled));
334 	return (sh->evtchn_pending[idx]
335 	      & ~sh->evtchn_mask[idx]
336 	      & pcpu->evtchn_enabled[idx]);
337 }
338 
339 /**
340  * Interrupt handler for processing all Xen event channel events.
341  *
342  * \param trap_frame  The trap frame context for the current interrupt.
343  */
344 int
345 xen_intr_handle_upcall(void *unused __unused)
346 {
347 	struct trapframe *trap_frame = curthread->td_intr_frame;
348 	u_int l1i, l2i, port, cpu __diagused;
349 	u_long masked_l1, masked_l2;
350 	struct xenisrc *isrc;
351 	vcpu_info_t *v;
352 	struct xen_intr_pcpu_data *pc;
353 	u_long l1, l2;
354 
355 	/* We must remain on the same vCPU during this function */
356 	CRITICAL_ASSERT(curthread);
357 
358 	cpu = PCPU_GET(cpuid);
359 	pc  = DPCPU_PTR(xen_intr_pcpu);
360 	v   = DPCPU_GET(vcpu_info);
361 
362 	if (!xen_has_percpu_evtchn()) {
363 		KASSERT((cpu == 0), ("Fired PCI event callback on wrong CPU"));
364 	}
365 
366 	v->evtchn_upcall_pending = 0;
367 /* No need for a barrier on x86 -- XCHG is a barrier on x86. */
368 #if !defined(__amd64__) && !defined(__i386__)
369 	/* Clear master flag /before/ clearing selector flag. */
370 	wmb();
371 #endif
372 	l1 = atomic_readandclear_xen_ulong(&v->evtchn_pending_sel);
373 
374 	l1i = pc->last_processed_l1i;
375 	l2i = pc->last_processed_l2i;
376 
377 	while (l1 != 0) {
378 		l1i = (l1i + 1) % LONG_BIT;
379 		masked_l1 = l1 & ((~0UL) << l1i);
380 
381 		if (masked_l1 == 0) {
382 			/*
383 			 * if we masked out all events, wrap around
384 			 * to the beginning.
385 			 */
386 			l1i = LONG_BIT - 1;
387 			l2i = LONG_BIT - 1;
388 			continue;
389 		}
390 		l1i = ffsl(masked_l1) - 1;
391 
392 		do {
393 			l2 = xen_intr_active_ports(pc, l1i);
394 
395 			l2i = (l2i + 1) % LONG_BIT;
396 			masked_l2 = l2 & ((~0UL) << l2i);
397 
398 			if (masked_l2 == 0) {
399 				/* if we masked out all events, move on */
400 				l2i = LONG_BIT - 1;
401 				break;
402 			}
403 			l2i = ffsl(masked_l2) - 1;
404 
405 			/* process port */
406 			port = (l1i * LONG_BIT) + l2i;
407 			evtchn_clear_port(port);
408 
409 			isrc = xen_intr_port_to_isrc[port];
410 			if (__predict_false(isrc == NULL))
411 				continue;
412 
413 			/* Make sure we are firing on the right vCPU */
414 			KASSERT((isrc->xi_cpu == PCPU_GET(cpuid)),
415 				("Received unexpected event on vCPU#%u, event bound to vCPU#%u",
416 				PCPU_GET(cpuid), isrc->xi_cpu));
417 
418 			xen_arch_intr_execute_handlers(isrc, trap_frame);
419 
420 			/*
421 			 * If this is the final port processed,
422 			 * we'll pick up here+1 next time.
423 			 */
424 			pc->last_processed_l1i = l1i;
425 			pc->last_processed_l2i = l2i;
426 
427 		} while (l2i != LONG_BIT - 1);
428 
429 		l2 = xen_intr_active_ports(pc, l1i);
430 		if (l2 == 0) {
431 			/*
432 			 * We handled all ports, so we can clear the
433 			 * selector bit.
434 			 */
435 			l1 &= ~(1UL << l1i);
436 		}
437 	}
438 
439 	return (FILTER_HANDLED);
440 }
441 
442 static int
443 xen_intr_init(void *dummy __unused)
444 {
445 	shared_info_t *s = HYPERVISOR_shared_info;
446 	struct xen_intr_pcpu_data *pcpu;
447 	int i;
448 
449 	if (!xen_domain())
450 		return (0);
451 
452 	_Static_assert(is_valid_evtchn(0),
453 	    "is_valid_evtchn(0) fails (unused by Xen, but valid by interface");
454 	_Static_assert(is_valid_evtchn(NR_EVENT_CHANNELS - 1),
455 	    "is_valid_evtchn(max) fails (is a valid channel)");
456 	_Static_assert(!is_valid_evtchn(NR_EVENT_CHANNELS),
457 	    "is_valid_evtchn(>max) fails (NOT a valid channel)");
458 	_Static_assert(!is_valid_evtchn(~(evtchn_port_t)0),
459 	    "is_valid_evtchn(maxint) fails (overflow?)");
460 	_Static_assert(!is_valid_evtchn(INVALID_EVTCHN),
461 	    "is_valid_evtchn(INVALID_EVTCHN) fails (must be invalid!)");
462 	_Static_assert(!is_valid_evtchn(-1),
463 	    "is_valid_evtchn(-1) fails (negative are invalid)");
464 
465 	mtx_init(&xen_intr_isrc_lock, "xen-irq-lock", NULL, MTX_DEF);
466 
467 	/*
468 	 * Set the per-cpu mask of CPU#0 to enable all, since by default all
469 	 * event channels are bound to CPU#0.
470 	 */
471 	CPU_FOREACH(i) {
472 		pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
473 		memset(pcpu->evtchn_enabled, i == 0 ? ~0 : 0,
474 		    sizeof(pcpu->evtchn_enabled));
475 	}
476 
477 	for (i = 0; i < nitems(s->evtchn_mask); i++)
478 		atomic_store_rel_xen_ulong(&s->evtchn_mask[i], ~0);
479 
480 	xen_arch_intr_init();
481 
482 	if (bootverbose)
483 		printf("Xen interrupt system initialized\n");
484 
485 	return (0);
486 }
487 SYSINIT(xen_intr_init, SI_SUB_INTR, SI_ORDER_SECOND, xen_intr_init, NULL);
488 
489 /*--------------------------- Common PIC Functions ---------------------------*/
490 
491 static void
492 xen_rebind_ipi(struct xenisrc *isrc)
493 {
494 #ifdef SMP
495 	u_int cpu = isrc->xi_cpu;
496 	u_int vcpu_id = XEN_CPUID_TO_VCPUID(cpu);
497 	int error;
498 	struct evtchn_bind_ipi bind_ipi = { .vcpu = vcpu_id };
499 
500 	error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
501 	                                    &bind_ipi);
502 	if (error != 0)
503 		panic("unable to rebind xen IPI: %d", error);
504 
505 	isrc->xi_port = bind_ipi.port;
506 #else
507 	panic("Resume IPI event channel on UP");
508 #endif
509 }
510 
511 static void
512 xen_rebind_virq(struct xenisrc *isrc)
513 {
514 	u_int cpu = isrc->xi_cpu;
515 	u_int vcpu_id = XEN_CPUID_TO_VCPUID(cpu);
516 	int error;
517 	struct evtchn_bind_virq bind_virq = { .virq = isrc->xi_virq,
518 	                                      .vcpu = vcpu_id };
519 
520 	error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
521 	                                    &bind_virq);
522 	if (error != 0)
523 		panic("unable to rebind xen VIRQ#%u: %d", isrc->xi_virq, error);
524 
525 	isrc->xi_port = bind_virq.port;
526 }
527 
528 static struct xenisrc *
529 xen_intr_rebind_isrc(struct xenisrc *isrc)
530 {
531 #ifdef SMP
532 	u_int cpu = isrc->xi_cpu;
533 	int error;
534 #endif
535 	struct xenisrc *prev;
536 
537 	switch (isrc->xi_type) {
538 	case EVTCHN_TYPE_IPI:
539 		xen_rebind_ipi(isrc);
540 		break;
541 	case EVTCHN_TYPE_VIRQ:
542 		xen_rebind_virq(isrc);
543 		break;
544 	default:
545 		return (NULL);
546 	}
547 
548 	prev = xen_intr_port_to_isrc[isrc->xi_port];
549 	xen_intr_port_to_isrc[isrc->xi_port] = isrc;
550 
551 #ifdef SMP
552 	isrc->xi_cpu = 0;
553 	error = xen_intr_assign_cpu(isrc, cpu);
554 	if (error)
555 		panic("%s(): unable to rebind Xen channel %u to vCPU%u: %d",
556 		    __func__, isrc->xi_port, cpu, error);
557 #endif
558 
559 	evtchn_unmask_port(isrc->xi_port);
560 
561 	return (prev);
562 }
563 
564 /**
565  * Return this PIC to service after being suspended.
566  */
567 void
568 xen_intr_resume(void)
569 {
570 	shared_info_t *s = HYPERVISOR_shared_info;
571 	u_int isrc_idx;
572 	int i;
573 
574 	/* Reset the per-CPU masks */
575 	CPU_FOREACH(i) {
576 		struct xen_intr_pcpu_data *pcpu;
577 
578 		pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
579 		memset(pcpu->evtchn_enabled, i == 0 ? ~0 : 0,
580 		    sizeof(pcpu->evtchn_enabled));
581 	}
582 
583 	/* Mask all event channels. */
584 	for (i = 0; i < nitems(s->evtchn_mask); i++)
585 		atomic_store_rel_xen_ulong(&s->evtchn_mask[i], ~0);
586 
587 	/* Clear existing port mappings */
588 	for (isrc_idx = 0; isrc_idx < NR_EVENT_CHANNELS; ++isrc_idx)
589 		if (xen_intr_port_to_isrc[isrc_idx] != NULL)
590 			xen_intr_port_to_isrc[isrc_idx]->xi_port =
591 			    INVALID_EVTCHN;
592 
593 	/* Remap in-use isrcs, using xen_intr_port_to_isrc as listing */
594 	for (isrc_idx = 0; isrc_idx < NR_EVENT_CHANNELS; ++isrc_idx) {
595 		struct xenisrc *cur = xen_intr_port_to_isrc[isrc_idx];
596 
597 		/* empty or entry already taken care of */
598 		if (cur == NULL || cur->xi_port == isrc_idx)
599 			continue;
600 
601 		xen_intr_port_to_isrc[isrc_idx] = NULL;
602 
603 		do {
604 			KASSERT(!is_valid_evtchn(cur->xi_port),
605 			    ("%s(): Multiple channels on single intr?",
606 			    __func__));
607 
608 			cur = xen_intr_rebind_isrc(cur);
609 		} while (cur != NULL);
610 	}
611 }
612 
613 /**
614  * Disable a Xen interrupt source.
615  *
616  * \param isrc  The interrupt source to disable.
617  */
618 void
619 xen_intr_disable_intr(struct xenisrc *isrc)
620 {
621 
622 	evtchn_mask_port(isrc->xi_port);
623 }
624 
625 /**
626  * Configure CPU affinity for interrupt source event delivery.
627  *
628  * \param isrc     The interrupt source to configure.
629  * \param to_cpu   The id of the CPU for handling future events.
630  *
631  * \returns  0 if successful, otherwise an errno.
632  */
633 int
634 xen_intr_assign_cpu(struct xenisrc *isrc, u_int to_cpu)
635 {
636 #ifdef SMP
637 	struct evtchn_bind_vcpu bind_vcpu;
638 	u_int vcpu_id = XEN_CPUID_TO_VCPUID(to_cpu);
639 	int error, masked;
640 
641 	if (!xen_has_percpu_evtchn())
642 		return (EOPNOTSUPP);
643 
644 	mtx_lock(&xen_intr_isrc_lock);
645 	if (!is_valid_evtchn(isrc->xi_port)) {
646 		mtx_unlock(&xen_intr_isrc_lock);
647 		return (EINVAL);
648 	}
649 
650 	/*
651 	 * Mask the event channel while binding it to prevent interrupt
652 	 * delivery with an inconsistent state in isrc->xi_cpu.
653 	 */
654 	masked = evtchn_test_and_set_mask(isrc->xi_port);
655 	if ((isrc->xi_type == EVTCHN_TYPE_VIRQ) ||
656 		(isrc->xi_type == EVTCHN_TYPE_IPI)) {
657 		/*
658 		 * Virtual IRQs are associated with a cpu by
659 		 * the Hypervisor at evtchn_bind_virq time, so
660 		 * all we need to do is update the per-CPU masks.
661 		 */
662 		evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
663 		isrc->xi_cpu = to_cpu;
664 		evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port);
665 		goto out;
666 	}
667 
668 	bind_vcpu.port = isrc->xi_port;
669 	bind_vcpu.vcpu = vcpu_id;
670 
671 	error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu);
672 	if (isrc->xi_cpu != to_cpu) {
673 		if (error == 0) {
674 			/* Commit to new binding by removing the old one. */
675 			evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
676 			isrc->xi_cpu = to_cpu;
677 			evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port);
678 		}
679 	}
680 
681 out:
682 	if (masked == 0)
683 		evtchn_unmask_port(isrc->xi_port);
684 	mtx_unlock(&xen_intr_isrc_lock);
685 	return (0);
686 #else
687 	return (EOPNOTSUPP);
688 #endif
689 }
690 
691 /*------------------- Virtual Interrupt Source PIC Functions -----------------*/
692 /*
693  * Mask a level triggered interrupt source.
694  *
695  * \param isrc  The interrupt source to mask (if necessary).
696  */
697 void
698 xen_intr_disable_source(struct xenisrc *isrc)
699 {
700 
701 	/*
702 	 * NB: checking if the event channel is already masked is
703 	 * needed because the event channel user-space device
704 	 * masks event channels on its filter as part of its
705 	 * normal operation, and those shouldn't be automatically
706 	 * unmasked by the generic interrupt code. The event channel
707 	 * device will unmask them when needed.
708 	 */
709 	isrc->xi_masked = !!evtchn_test_and_set_mask(isrc->xi_port);
710 }
711 
712 /*
713  * Unmask a level triggered interrupt source.
714  *
715  * \param isrc  The interrupt source to unmask (if necessary).
716  */
717 void
718 xen_intr_enable_source(struct xenisrc *isrc)
719 {
720 
721 	if (isrc->xi_masked == 0)
722 		evtchn_unmask_port(isrc->xi_port);
723 }
724 
725 /*
726  * Enable and unmask the interrupt source.
727  *
728  * \param isrc  The interrupt source to enable.
729  */
730 void
731 xen_intr_enable_intr(struct xenisrc *isrc)
732 {
733 
734 	evtchn_unmask_port(isrc->xi_port);
735 }
736 
737 /*--------------------------- Public Functions -------------------------------*/
738 /*------- API comments for these methods can be found in xen/xenintr.h -------*/
739 int
740 xen_intr_bind_local_port(device_t dev, evtchn_port_t local_port,
741     driver_filter_t filter, driver_intr_t handler, void *arg,
742     enum intr_type flags, xen_intr_handle_t *port_handlep)
743 {
744 	struct xenisrc *isrc;
745 	int error;
746 
747 	error = xen_intr_bind_isrc(&isrc, local_port, EVTCHN_TYPE_PORT,
748 	    device_get_nameunit(dev), filter, handler, arg, flags,
749 	    port_handlep);
750 	if (error != 0)
751 		return (error);
752 
753 	/*
754 	 * The Event Channel API didn't open this port, so it is not
755 	 * responsible for closing it automatically on unbind.
756 	 */
757 	isrc->xi_close = 0;
758 	return (0);
759 }
760 
761 int
762 xen_intr_alloc_and_bind_local_port(device_t dev, u_int remote_domain,
763     driver_filter_t filter, driver_intr_t handler, void *arg,
764     enum intr_type flags, xen_intr_handle_t *port_handlep)
765 {
766 	struct xenisrc *isrc;
767 	struct evtchn_alloc_unbound alloc_unbound;
768 	int error;
769 
770 	alloc_unbound.dom        = DOMID_SELF;
771 	alloc_unbound.remote_dom = remote_domain;
772 	error = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
773 		    &alloc_unbound);
774 	if (error != 0) {
775 		/*
776 		 * XXX Trap Hypercall error code Linuxisms in
777 		 *     the HYPERCALL layer.
778 		 */
779 		return (-error);
780 	}
781 
782 	error = xen_intr_bind_isrc(&isrc, alloc_unbound.port, EVTCHN_TYPE_PORT,
783 	    device_get_nameunit(dev), filter, handler, arg, flags,
784 	    port_handlep);
785 	if (error != 0) {
786 		evtchn_close_t close = { .port = alloc_unbound.port };
787 		if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
788 			panic("EVTCHNOP_close failed");
789 		return (error);
790 	}
791 
792 	isrc->xi_close = 1;
793 	return (0);
794 }
795 
796 int
797 xen_intr_bind_remote_port(device_t dev, u_int remote_domain,
798     u_int remote_port, driver_filter_t filter, driver_intr_t handler,
799     void *arg, enum intr_type flags, xen_intr_handle_t *port_handlep)
800 {
801 	struct xenisrc *isrc;
802 	struct evtchn_bind_interdomain bind_interdomain;
803 	int error;
804 
805 	bind_interdomain.remote_dom  = remote_domain;
806 	bind_interdomain.remote_port = remote_port;
807 	error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
808 					    &bind_interdomain);
809 	if (error != 0) {
810 		/*
811 		 * XXX Trap Hypercall error code Linuxisms in
812 		 *     the HYPERCALL layer.
813 		 */
814 		return (-error);
815 	}
816 
817 	error = xen_intr_bind_isrc(&isrc, bind_interdomain.local_port,
818 	    EVTCHN_TYPE_PORT, device_get_nameunit(dev), filter, handler, arg,
819 	    flags, port_handlep);
820 	if (error) {
821 		evtchn_close_t close = { .port = bind_interdomain.local_port };
822 		if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
823 			panic("EVTCHNOP_close failed");
824 		return (error);
825 	}
826 
827 	/*
828 	 * The Event Channel API opened this port, so it is
829 	 * responsible for closing it automatically on unbind.
830 	 */
831 	isrc->xi_close = 1;
832 	return (0);
833 }
834 
835 int
836 xen_intr_bind_virq(device_t dev, u_int virq, u_int cpu,
837     driver_filter_t filter, driver_intr_t handler, void *arg,
838     enum intr_type flags, xen_intr_handle_t *port_handlep)
839 {
840 	u_int vcpu_id = XEN_CPUID_TO_VCPUID(cpu);
841 	struct xenisrc *isrc;
842 	struct evtchn_bind_virq bind_virq = { .virq = virq, .vcpu = vcpu_id };
843 	int error;
844 
845 	isrc = NULL;
846 	error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq);
847 	if (error != 0) {
848 		/*
849 		 * XXX Trap Hypercall error code Linuxisms in
850 		 *     the HYPERCALL layer.
851 		 */
852 		return (-error);
853 	}
854 
855 	error = xen_intr_bind_isrc(&isrc, bind_virq.port, EVTCHN_TYPE_VIRQ,
856 	    device_get_nameunit(dev), filter, handler, arg, flags,
857 	    port_handlep);
858 
859 #ifdef SMP
860 	if (error == 0)
861 		error = xen_arch_intr_event_bind(isrc, cpu);
862 #endif
863 
864 	if (error != 0) {
865 		evtchn_close_t close = { .port = bind_virq.port };
866 
867 		xen_intr_unbind(port_handlep);
868 		if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
869 			panic("EVTCHNOP_close failed");
870 		return (error);
871 	}
872 
873 #ifdef SMP
874 	if (isrc->xi_cpu != cpu) {
875 		/*
876 		 * Too early in the boot process for the generic interrupt
877 		 * code to perform the binding.  Update our event channel
878 		 * masks manually so events can't fire on the wrong cpu
879 		 * during AP startup.
880 		 */
881 		xen_intr_assign_cpu(isrc, cpu);
882 	}
883 #endif
884 
885 	/*
886 	 * The Event Channel API opened this port, so it is
887 	 * responsible for closing it automatically on unbind.
888 	 */
889 	isrc->xi_close = 1;
890 	isrc->xi_virq = virq;
891 
892 	return (0);
893 }
894 
895 int
896 xen_intr_alloc_and_bind_ipi(u_int cpu, driver_filter_t filter,
897     enum intr_type flags, xen_intr_handle_t *port_handlep)
898 {
899 #ifdef SMP
900 	u_int vcpu_id = XEN_CPUID_TO_VCPUID(cpu);
901 	struct xenisrc *isrc;
902 	struct evtchn_bind_ipi bind_ipi = { .vcpu = vcpu_id };
903 	/* Same size as the one used by intr_handler->ih_name. */
904 	char name[MAXCOMLEN + 1];
905 	int error;
906 
907 	isrc = NULL;
908 	error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi);
909 	if (error != 0) {
910 		/*
911 		 * XXX Trap Hypercall error code Linuxisms in
912 		 *     the HYPERCALL layer.
913 		 */
914 		return (-error);
915 	}
916 
917 	snprintf(name, sizeof(name), "cpu%u", cpu);
918 
919 	error = xen_intr_bind_isrc(&isrc, bind_ipi.port, EVTCHN_TYPE_IPI,
920 	    name, filter, NULL, NULL, flags, port_handlep);
921 	if (error != 0) {
922 		evtchn_close_t close = { .port = bind_ipi.port };
923 
924 		xen_intr_unbind(port_handlep);
925 		if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
926 			panic("EVTCHNOP_close failed");
927 		return (error);
928 	}
929 
930 	if (isrc->xi_cpu != cpu) {
931 		/*
932 		 * Too early in the boot process for the generic interrupt
933 		 * code to perform the binding.  Update our event channel
934 		 * masks manually so events can't fire on the wrong cpu
935 		 * during AP startup.
936 		 */
937 		xen_intr_assign_cpu(isrc, cpu);
938 	}
939 
940 	/*
941 	 * The Event Channel API opened this port, so it is
942 	 * responsible for closing it automatically on unbind.
943 	 */
944 	isrc->xi_close = 1;
945 	return (0);
946 #else
947 	return (EOPNOTSUPP);
948 #endif
949 }
950 
951 int
952 xen_intr_describe(xen_intr_handle_t port_handle, const char *fmt, ...)
953 {
954 	char descr[MAXCOMLEN + 1];
955 	struct xenisrc *isrc;
956 	va_list ap;
957 
958 	isrc = xen_intr_isrc_from_handle(port_handle);
959 	if (isrc == NULL)
960 		return (EINVAL);
961 
962 	va_start(ap, fmt);
963 	vsnprintf(descr, sizeof(descr), fmt, ap);
964 	va_end(ap);
965 	return (xen_arch_intr_describe(isrc, isrc->xi_cookie, descr));
966 }
967 
968 void
969 xen_intr_unbind(xen_intr_handle_t *port_handlep)
970 {
971 	struct xenisrc *isrc;
972 
973 	KASSERT(port_handlep != NULL,
974 	    ("NULL xen_intr_handle_t passed to %s", __func__));
975 
976 	isrc = xen_intr_isrc_from_handle(*port_handlep);
977 	*port_handlep = NULL;
978 	if (isrc == NULL)
979 		return;
980 
981 	mtx_lock(&xen_intr_isrc_lock);
982 	if (refcount_release(&isrc->xi_refcount) == 0) {
983 		mtx_unlock(&xen_intr_isrc_lock);
984 		return;
985 	}
986 	mtx_unlock(&xen_intr_isrc_lock);
987 
988 	if (isrc->xi_cookie != NULL)
989 		xen_arch_intr_remove_handler(isrc, isrc->xi_cookie);
990 	xen_intr_release_isrc(isrc);
991 }
992 
993 void
994 xen_intr_signal(xen_intr_handle_t handle)
995 {
996 	struct xenisrc *isrc;
997 
998 	isrc = xen_intr_isrc_from_handle(handle);
999 	if (isrc != NULL) {
1000 		KASSERT(isrc->xi_type == EVTCHN_TYPE_PORT ||
1001 			isrc->xi_type == EVTCHN_TYPE_IPI,
1002 			("evtchn_signal on something other than a local port"));
1003 		struct evtchn_send send = { .port = isrc->xi_port };
1004 		(void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send);
1005 	}
1006 }
1007 
1008 evtchn_port_t
1009 xen_intr_port(xen_intr_handle_t handle)
1010 {
1011 	struct xenisrc *isrc;
1012 
1013 	isrc = xen_intr_isrc_from_handle(handle);
1014 	if (isrc == NULL)
1015 		return (0);
1016 
1017 	return (isrc->xi_port);
1018 }
1019 
1020 int
1021 xen_intr_add_handler(const char *name, driver_filter_t filter,
1022     driver_intr_t handler, void *arg, enum intr_type flags,
1023     xen_intr_handle_t handle)
1024 {
1025 	struct xenisrc *isrc;
1026 	int error;
1027 
1028 	isrc = xen_intr_isrc_from_handle(handle);
1029 	if (isrc == NULL || isrc->xi_cookie != NULL)
1030 		return (EINVAL);
1031 
1032 	error = xen_arch_intr_add_handler(name, filter, handler, arg,
1033 	    flags | INTR_EXCL, isrc, &isrc->xi_cookie);
1034 	if (error != 0)
1035 		printf("%s: %s: add handler failed: %d\n", name, __func__,
1036 		    error);
1037 
1038 	return (error);
1039 }
1040 
1041 int
1042 xen_intr_get_evtchn_from_port(evtchn_port_t port, xen_intr_handle_t *handlep)
1043 {
1044 
1045 	if (!is_valid_evtchn(port))
1046 		return (EINVAL);
1047 
1048 	if (handlep == NULL) {
1049 		return (EINVAL);
1050 	}
1051 
1052 	mtx_lock(&xen_intr_isrc_lock);
1053 	if (xen_intr_port_to_isrc[port] == NULL) {
1054 		mtx_unlock(&xen_intr_isrc_lock);
1055 		return (EINVAL);
1056 	}
1057 	refcount_acquire(&xen_intr_port_to_isrc[port]->xi_refcount);
1058 	mtx_unlock(&xen_intr_isrc_lock);
1059 
1060 	/* Assign the opaque handler */
1061 	*handlep = xen_intr_handle_from_isrc(xen_intr_port_to_isrc[port]);
1062 
1063 	return (0);
1064 }
1065 
1066 #ifdef DDB
1067 static const char *
1068 xen_intr_print_type(enum evtchn_type type)
1069 {
1070 	static const char *evtchn_type_to_string[EVTCHN_TYPE_COUNT] = {
1071 		[EVTCHN_TYPE_UNBOUND]	= "UNBOUND",
1072 		[EVTCHN_TYPE_VIRQ]	= "VIRQ",
1073 		[EVTCHN_TYPE_IPI]	= "IPI",
1074 		[EVTCHN_TYPE_PORT]	= "PORT",
1075 	};
1076 
1077 	if (type >= EVTCHN_TYPE_COUNT)
1078 		return ("UNKNOWN");
1079 
1080 	return (evtchn_type_to_string[type]);
1081 }
1082 
1083 static void
1084 xen_intr_dump_port(struct xenisrc *isrc)
1085 {
1086 	struct xen_intr_pcpu_data *pcpu;
1087 	shared_info_t *s = HYPERVISOR_shared_info;
1088 	u_int i;
1089 
1090 	db_printf("Port %d Type: %s\n",
1091 	    isrc->xi_port, xen_intr_print_type(isrc->xi_type));
1092 	if (isrc->xi_type == EVTCHN_TYPE_VIRQ)
1093 		db_printf("\tVirq: %u\n", isrc->xi_virq);
1094 
1095 	db_printf("\tMasked: %d Pending: %d\n",
1096 	    !!xen_test_bit(isrc->xi_port, &s->evtchn_mask[0]),
1097 	    !!xen_test_bit(isrc->xi_port, &s->evtchn_pending[0]));
1098 
1099 	db_printf("\tPer-CPU Masks: ");
1100 	CPU_FOREACH(i) {
1101 		pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
1102 		db_printf("cpu#%u: %d ", i,
1103 		    !!xen_test_bit(isrc->xi_port, pcpu->evtchn_enabled));
1104 	}
1105 	db_printf("\n");
1106 }
1107 
1108 DB_SHOW_COMMAND(xen_evtchn, db_show_xen_evtchn)
1109 {
1110 	u_int i;
1111 
1112 	if (!xen_domain()) {
1113 		db_printf("Only available on Xen guests\n");
1114 		return;
1115 	}
1116 
1117 	for (i = 0; i < NR_EVENT_CHANNELS; i++) {
1118 		struct xenisrc *isrc;
1119 
1120 		isrc = xen_intr_port_to_isrc[i];
1121 		if (isrc == NULL)
1122 			continue;
1123 
1124 		xen_intr_dump_port(isrc);
1125 	}
1126 }
1127 #endif /* DDB */
1128