xref: /freebsd/sys/dev/xen/bus/xen_intr.c (revision 5e2183dab87140a0107f0d8c761d6c01ba9e25cd)
1 /******************************************************************************
2  * xen_intr.c
3  *
4  * Xen event and interrupt services for x86 HVM guests.
5  *
6  * Copyright (c) 2002-2005, K A Fraser
7  * Copyright (c) 2005, Intel Corporation <xiaofeng.ling@intel.com>
8  * Copyright (c) 2012, Spectra Logic Corporation
9  * Copyright © 2021-2023, Elliott Mitchell
10  *
11  * This file may be distributed separately from the Linux kernel, or
12  * incorporated into other software packages, subject to the following license:
13  *
14  * Permission is hereby granted, free of charge, to any person obtaining a copy
15  * of this source file (the "Software"), to deal in the Software without
16  * restriction, including without limitation the rights to use, copy, modify,
17  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18  * and to permit persons to whom the Software is furnished to do so, subject to
19  * the following conditions:
20  *
21  * The above copyright notice and this permission notice shall be included in
22  * all copies or substantial portions of the Software.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30  * IN THE SOFTWARE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include "opt_ddb.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bus.h>
41 #include <sys/kernel.h>
42 #include <sys/limits.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/interrupt.h>
46 #include <sys/pcpu.h>
47 #include <sys/proc.h>
48 #include <sys/smp.h>
49 #include <sys/refcount.h>
50 
51 #include <vm/vm.h>
52 #include <vm/pmap.h>
53 
54 #include <machine/smp.h>
55 #include <machine/stdarg.h>
56 
57 #include <xen/xen-os.h>
58 #include <xen/hypervisor.h>
59 #include <xen/xen_intr.h>
60 #include <xen/evtchn/evtchnvar.h>
61 
62 #include <dev/xen/xenpci/xenpcivar.h>
63 #include <dev/pci/pcivar.h>
64 #include <machine/xen/arch-intr.h>
65 
66 #ifdef DDB
67 #include <ddb/ddb.h>
68 #endif
69 
70 /**
71  * Per-cpu event channel processing state.
72  */
73 struct xen_intr_pcpu_data {
74 	/**
75 	 * The last event channel bitmap section (level one bit) processed.
76 	 * This is used to ensure we scan all ports before
77 	 * servicing an already servied port again.
78 	 */
79 	u_int	last_processed_l1i;
80 
81 	/**
82 	 * The last event channel processed within the event channel
83 	 * bitmap being scanned.
84 	 */
85 	u_int	last_processed_l2i;
86 
87 	/**
88 	 * A bitmap of ports that can be serviced from this CPU.
89 	 * A set bit means interrupt handling is enabled.
90 	 */
91 	u_long	evtchn_enabled[sizeof(u_long) * 8];
92 };
93 
94 /*
95  * Start the scan at port 0 by initializing the last scanned
96  * location as the highest numbered event channel port.
97  */
98 DPCPU_DEFINE_STATIC(struct xen_intr_pcpu_data, xen_intr_pcpu) = {
99 	.last_processed_l1i = LONG_BIT - 1,
100 	.last_processed_l2i = LONG_BIT - 1
101 };
102 
103 DPCPU_DECLARE(struct vcpu_info *, vcpu_info);
104 
105 #define	INVALID_EVTCHN		(~(evtchn_port_t)0) /* Invalid event channel */
106 #define	is_valid_evtchn(x)	((uintmax_t)(x) < NR_EVENT_CHANNELS)
107 
108 /*
109  * Lock for interrupt core data.
110  *
111  * Modifying xen_intr_port_to_isrc[], or isrc->xi_port (implies the former)
112  * requires this lock be held.  Any time this lock is not held, the condition
113  * `!xen_intr_port_to_isrc[i] || (xen_intr_port_to_isrc[i]->ix_port == i)`
114  * MUST be true for all values of i which are valid indicies of the array.
115  *
116  * Acquire/release operations for isrc->xi_refcount require this lock be held.
117  */
118 static struct mtx	 xen_intr_isrc_lock;
119 static struct xenisrc	*xen_intr_port_to_isrc[NR_EVENT_CHANNELS];
120 
121 /*------------------------- Private Functions --------------------------------*/
122 
123 /**
124  * Retrieve a handle for a Xen interrupt source.
125  *
126  * \param isrc  A valid Xen interrupt source structure.
127  *
128  * \returns  A handle suitable for use with xen_intr_isrc_from_handle()
129  *           to retrieve the original Xen interrupt source structure.
130  */
131 
132 static inline xen_intr_handle_t
133 xen_intr_handle_from_isrc(struct xenisrc *isrc)
134 {
135 	return (isrc);
136 }
137 
138 /**
139  * Lookup a Xen interrupt source object given an interrupt binding handle.
140  *
141  * \param handle  A handle initialized by a previous call to
142  *                xen_intr_bind_isrc().
143  *
144  * \returns  A pointer to the Xen interrupt source object associated
145  *           with the given interrupt handle.  NULL if no association
146  *           currently exists.
147  */
148 static inline struct xenisrc *
149 xen_intr_isrc_from_handle(xen_intr_handle_t handle)
150 {
151 	return ((struct xenisrc *)handle);
152 }
153 
154 /**
155  * Disable signal delivery for an event channel port on the
156  * specified CPU.
157  *
158  * \param port  The event channel port to mask.
159  *
160  * This API is used to manage the port<=>CPU binding of event
161  * channel handlers.
162  *
163  * \note  This operation does not preclude reception of an event
164  *        for this event channel on another CPU.  To mask the
165  *        event channel globally, use evtchn_mask().
166  */
167 static inline void
168 evtchn_cpu_mask_port(u_int cpu, evtchn_port_t port)
169 {
170 	struct xen_intr_pcpu_data *pcpu;
171 
172 	pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
173 	xen_clear_bit(port, pcpu->evtchn_enabled);
174 }
175 
176 /**
177  * Enable signal delivery for an event channel port on the
178  * specified CPU.
179  *
180  * \param port  The event channel port to unmask.
181  *
182  * This API is used to manage the port<=>CPU binding of event
183  * channel handlers.
184  *
185  * \note  This operation does not guarantee that event delivery
186  *        is enabled for this event channel port.  The port must
187  *        also be globally enabled.  See evtchn_unmask().
188  */
189 static inline void
190 evtchn_cpu_unmask_port(u_int cpu, evtchn_port_t port)
191 {
192 	struct xen_intr_pcpu_data *pcpu;
193 
194 	pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
195 	xen_set_bit(port, pcpu->evtchn_enabled);
196 }
197 
198 /**
199  * Attempt to free an active Xen interrupt source object.
200  *
201  * \param isrc  The interrupt source object to release.
202  *
203  * \returns  EBUSY if the source is still in use, otherwise 0.
204  */
205 static int
206 xen_intr_release_isrc(struct xenisrc *isrc)
207 {
208 
209 	mtx_lock(&xen_intr_isrc_lock);
210 	if (is_valid_evtchn(isrc->xi_port)) {
211 		evtchn_mask_port(isrc->xi_port);
212 		evtchn_clear_port(isrc->xi_port);
213 
214 		/* Rebind port to CPU 0. */
215 		evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
216 		evtchn_cpu_unmask_port(0, isrc->xi_port);
217 
218 		if (isrc->xi_close != 0) {
219 			struct evtchn_close close = { .port = isrc->xi_port };
220 
221 			if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
222 				panic("EVTCHNOP_close failed");
223 		}
224 
225 		xen_intr_port_to_isrc[isrc->xi_port] = NULL;
226 	}
227 	/* not reachable from xen_intr_port_to_isrc[], unlock */
228 	mtx_unlock(&xen_intr_isrc_lock);
229 
230 	xen_arch_intr_release(isrc);
231 	return (0);
232 }
233 
234 /**
235  * Associate an interrupt handler with an already allocated local Xen
236  * event channel port.
237  *
238  * \param isrcp       The returned Xen interrupt object associated with
239  *                    the specified local port.
240  * \param local_port  The event channel to bind.
241  * \param type        The event channel type of local_port.
242  * \param intr_owner  The device making this bind request.
243  * \param filter      An interrupt filter handler.  Specify NULL
244  *                    to always dispatch to the ithread handler.
245  * \param handler     An interrupt ithread handler.  Optional (can
246  *                    specify NULL) if all necessary event actions
247  *                    are performed by filter.
248  * \param arg         Argument to present to both filter and handler.
249  * \param irqflags    Interrupt handler flags.  See sys/bus.h.
250  * \param handlep     Pointer to an opaque handle used to manage this
251  *                    registration.
252  *
253  * \returns  0 on success, otherwise an errno.
254  */
255 static int
256 xen_intr_bind_isrc(struct xenisrc **isrcp, evtchn_port_t local_port,
257     enum evtchn_type type, const char *intr_owner, driver_filter_t filter,
258     driver_intr_t handler, void *arg, enum intr_type flags,
259     xen_intr_handle_t *const port_handlep)
260 {
261 	struct xenisrc *isrc;
262 	int error;
263 
264 	*isrcp = NULL;
265 	if (port_handlep == NULL) {
266 		printf("%s: %s: Bad event handle\n", intr_owner, __func__);
267 		return (EINVAL);
268 	}
269 	*port_handlep = NULL;
270 
271 	isrc = xen_arch_intr_alloc();
272 	if (isrc == NULL)
273 		return (ENOSPC);
274 
275 	isrc->xi_cookie = NULL;
276 	isrc->xi_type = type;
277 	isrc->xi_port = local_port;
278 	isrc->xi_close = false;
279 	isrc->xi_cpu = 0;
280 	refcount_init(&isrc->xi_refcount, 1);
281 	mtx_lock(&xen_intr_isrc_lock);
282 	xen_intr_port_to_isrc[isrc->xi_port] = isrc;
283 	mtx_unlock(&xen_intr_isrc_lock);
284 
285 #ifdef SMP
286 	if (type == EVTCHN_TYPE_PORT) {
287 		/*
288 		 * By default all interrupts are assigned to vCPU#0
289 		 * unless specified otherwise, so shuffle them to balance
290 		 * the interrupt load.
291 		 */
292 		xen_intr_assign_cpu(isrc, xen_arch_intr_next_cpu(isrc));
293 	}
294 #endif
295 
296 	/*
297 	 * If a filter or handler function is provided, add it to the event.
298 	 * Otherwise the event channel is left masked and without a handler,
299 	 * the caller is in charge of setting that up.
300 	 */
301 	if (filter != NULL || handler != NULL) {
302 		error = xen_intr_add_handler(intr_owner, filter, handler, arg,
303 		    flags, xen_intr_handle_from_isrc(isrc));
304 		if (error != 0) {
305 			xen_intr_release_isrc(isrc);
306 			return (error);
307 		}
308 	}
309 
310 	*isrcp = isrc;
311 	/* Assign the opaque handler */
312 	*port_handlep = xen_intr_handle_from_isrc(isrc);
313 	return (0);
314 }
315 
316 /**
317  * Determine the event channel ports at the given section of the
318  * event port bitmap which have pending events for the given cpu.
319  *
320  * \param pcpu  The Xen interrupt pcpu data for the cpu being queried.
321  * \param sh    The Xen shared info area.
322  * \param idx   The index of the section of the event channel bitmap to
323  *              inspect.
324  *
325  * \returns  A u_long with bits set for every event channel with pending
326  *           events.
327  */
328 static inline u_long
329 xen_intr_active_ports(const struct xen_intr_pcpu_data *const pcpu,
330     const u_int idx)
331 {
332 	volatile const shared_info_t *const sh = HYPERVISOR_shared_info;
333 
334 	CTASSERT(sizeof(sh->evtchn_mask[0]) == sizeof(sh->evtchn_pending[0]));
335 	CTASSERT(sizeof(sh->evtchn_mask[0]) == sizeof(pcpu->evtchn_enabled[0]));
336 	CTASSERT(sizeof(sh->evtchn_mask) == sizeof(sh->evtchn_pending));
337 	CTASSERT(sizeof(sh->evtchn_mask) == sizeof(pcpu->evtchn_enabled));
338 	return (sh->evtchn_pending[idx]
339 	      & ~sh->evtchn_mask[idx]
340 	      & pcpu->evtchn_enabled[idx]);
341 }
342 
343 /**
344  * Interrupt handler for processing all Xen event channel events.
345  *
346  * \param trap_frame  The trap frame context for the current interrupt.
347  */
348 int
349 xen_intr_handle_upcall(void *unused __unused)
350 {
351 	struct trapframe *trap_frame = curthread->td_intr_frame;
352 	u_int l1i, l2i, port, cpu __diagused;
353 	u_long masked_l1, masked_l2;
354 	struct xenisrc *isrc;
355 	vcpu_info_t *v;
356 	struct xen_intr_pcpu_data *pc;
357 	u_long l1, l2;
358 
359 	/* We must remain on the same vCPU during this function */
360 	CRITICAL_ASSERT(curthread);
361 
362 	cpu = PCPU_GET(cpuid);
363 	pc  = DPCPU_PTR(xen_intr_pcpu);
364 	v   = DPCPU_GET(vcpu_info);
365 
366 	if (!xen_has_percpu_evtchn()) {
367 		KASSERT((cpu == 0), ("Fired PCI event callback on wrong CPU"));
368 	}
369 
370 	v->evtchn_upcall_pending = 0;
371 /* No need for a barrier on x86 -- XCHG is a barrier on x86. */
372 #if !defined(__amd64__) && !defined(__i386__)
373 	/* Clear master flag /before/ clearing selector flag. */
374 	wmb();
375 #endif
376 	l1 = atomic_readandclear_long(&v->evtchn_pending_sel);
377 
378 	l1i = pc->last_processed_l1i;
379 	l2i = pc->last_processed_l2i;
380 
381 	while (l1 != 0) {
382 		l1i = (l1i + 1) % LONG_BIT;
383 		masked_l1 = l1 & ((~0UL) << l1i);
384 
385 		if (masked_l1 == 0) {
386 			/*
387 			 * if we masked out all events, wrap around
388 			 * to the beginning.
389 			 */
390 			l1i = LONG_BIT - 1;
391 			l2i = LONG_BIT - 1;
392 			continue;
393 		}
394 		l1i = ffsl(masked_l1) - 1;
395 
396 		do {
397 			l2 = xen_intr_active_ports(pc, l1i);
398 
399 			l2i = (l2i + 1) % LONG_BIT;
400 			masked_l2 = l2 & ((~0UL) << l2i);
401 
402 			if (masked_l2 == 0) {
403 				/* if we masked out all events, move on */
404 				l2i = LONG_BIT - 1;
405 				break;
406 			}
407 			l2i = ffsl(masked_l2) - 1;
408 
409 			/* process port */
410 			port = (l1i * LONG_BIT) + l2i;
411 			evtchn_clear_port(port);
412 
413 			isrc = xen_intr_port_to_isrc[port];
414 			if (__predict_false(isrc == NULL))
415 				continue;
416 
417 			/* Make sure we are firing on the right vCPU */
418 			KASSERT((isrc->xi_cpu == PCPU_GET(cpuid)),
419 				("Received unexpected event on vCPU#%u, event bound to vCPU#%u",
420 				PCPU_GET(cpuid), isrc->xi_cpu));
421 
422 			xen_arch_intr_execute_handlers(isrc, trap_frame);
423 
424 			/*
425 			 * If this is the final port processed,
426 			 * we'll pick up here+1 next time.
427 			 */
428 			pc->last_processed_l1i = l1i;
429 			pc->last_processed_l2i = l2i;
430 
431 		} while (l2i != LONG_BIT - 1);
432 
433 		l2 = xen_intr_active_ports(pc, l1i);
434 		if (l2 == 0) {
435 			/*
436 			 * We handled all ports, so we can clear the
437 			 * selector bit.
438 			 */
439 			l1 &= ~(1UL << l1i);
440 		}
441 	}
442 
443 	return (FILTER_HANDLED);
444 }
445 
446 static int
447 xen_intr_init(void *dummy __unused)
448 {
449 	shared_info_t *s = HYPERVISOR_shared_info;
450 	struct xen_intr_pcpu_data *pcpu;
451 	int i;
452 
453 	if (!xen_domain())
454 		return (0);
455 
456 	_Static_assert(is_valid_evtchn(0),
457 	    "is_valid_evtchn(0) fails (unused by Xen, but valid by interface");
458 	_Static_assert(is_valid_evtchn(NR_EVENT_CHANNELS - 1),
459 	    "is_valid_evtchn(max) fails (is a valid channel)");
460 	_Static_assert(!is_valid_evtchn(NR_EVENT_CHANNELS),
461 	    "is_valid_evtchn(>max) fails (NOT a valid channel)");
462 	_Static_assert(!is_valid_evtchn(~(evtchn_port_t)0),
463 	    "is_valid_evtchn(maxint) fails (overflow?)");
464 	_Static_assert(!is_valid_evtchn(INVALID_EVTCHN),
465 	    "is_valid_evtchn(INVALID_EVTCHN) fails (must be invalid!)");
466 	_Static_assert(!is_valid_evtchn(-1),
467 	    "is_valid_evtchn(-1) fails (negative are invalid)");
468 
469 	mtx_init(&xen_intr_isrc_lock, "xen-irq-lock", NULL, MTX_DEF);
470 
471 	/*
472 	 * Set the per-cpu mask of CPU#0 to enable all, since by default all
473 	 * event channels are bound to CPU#0.
474 	 */
475 	CPU_FOREACH(i) {
476 		pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
477 		memset(pcpu->evtchn_enabled, i == 0 ? ~0 : 0,
478 		    sizeof(pcpu->evtchn_enabled));
479 	}
480 
481 	for (i = 0; i < nitems(s->evtchn_mask); i++)
482 		atomic_store_rel_long(&s->evtchn_mask[i], ~0);
483 
484 	xen_arch_intr_init();
485 
486 	if (bootverbose)
487 		printf("Xen interrupt system initialized\n");
488 
489 	return (0);
490 }
491 SYSINIT(xen_intr_init, SI_SUB_INTR, SI_ORDER_SECOND, xen_intr_init, NULL);
492 
493 /*--------------------------- Common PIC Functions ---------------------------*/
494 
495 static void
496 xen_rebind_ipi(struct xenisrc *isrc)
497 {
498 #ifdef SMP
499 	u_int cpu = isrc->xi_cpu;
500 	u_int vcpu_id = XEN_CPUID_TO_VCPUID(cpu);
501 	int error;
502 	struct evtchn_bind_ipi bind_ipi = { .vcpu = vcpu_id };
503 
504 	error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
505 	                                    &bind_ipi);
506 	if (error != 0)
507 		panic("unable to rebind xen IPI: %d", error);
508 
509 	isrc->xi_port = bind_ipi.port;
510 #else
511 	panic("Resume IPI event channel on UP");
512 #endif
513 }
514 
515 static void
516 xen_rebind_virq(struct xenisrc *isrc)
517 {
518 	u_int cpu = isrc->xi_cpu;
519 	u_int vcpu_id = XEN_CPUID_TO_VCPUID(cpu);
520 	int error;
521 	struct evtchn_bind_virq bind_virq = { .virq = isrc->xi_virq,
522 	                                      .vcpu = vcpu_id };
523 
524 	error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
525 	                                    &bind_virq);
526 	if (error != 0)
527 		panic("unable to rebind xen VIRQ#%u: %d", isrc->xi_virq, error);
528 
529 	isrc->xi_port = bind_virq.port;
530 }
531 
532 static struct xenisrc *
533 xen_intr_rebind_isrc(struct xenisrc *isrc)
534 {
535 #ifdef SMP
536 	u_int cpu = isrc->xi_cpu;
537 	int error;
538 #endif
539 	struct xenisrc *prev;
540 
541 	switch (isrc->xi_type) {
542 	case EVTCHN_TYPE_IPI:
543 		xen_rebind_ipi(isrc);
544 		break;
545 	case EVTCHN_TYPE_VIRQ:
546 		xen_rebind_virq(isrc);
547 		break;
548 	default:
549 		return (NULL);
550 	}
551 
552 	prev = xen_intr_port_to_isrc[isrc->xi_port];
553 	xen_intr_port_to_isrc[isrc->xi_port] = isrc;
554 
555 #ifdef SMP
556 	isrc->xi_cpu = 0;
557 	error = xen_intr_assign_cpu(isrc, cpu);
558 	if (error)
559 		panic("%s(): unable to rebind Xen channel %u to vCPU%u: %d",
560 		    __func__, isrc->xi_port, cpu, error);
561 #endif
562 
563 	evtchn_unmask_port(isrc->xi_port);
564 
565 	return (prev);
566 }
567 
568 /**
569  * Return this PIC to service after being suspended.
570  */
571 void
572 xen_intr_resume(void)
573 {
574 	shared_info_t *s = HYPERVISOR_shared_info;
575 	u_int isrc_idx;
576 	int i;
577 
578 	/* Reset the per-CPU masks */
579 	CPU_FOREACH(i) {
580 		struct xen_intr_pcpu_data *pcpu;
581 
582 		pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
583 		memset(pcpu->evtchn_enabled, i == 0 ? ~0 : 0,
584 		    sizeof(pcpu->evtchn_enabled));
585 	}
586 
587 	/* Mask all event channels. */
588 	for (i = 0; i < nitems(s->evtchn_mask); i++)
589 		atomic_store_rel_long(&s->evtchn_mask[i], ~0);
590 
591 	/* Clear existing port mappings */
592 	for (isrc_idx = 0; isrc_idx < NR_EVENT_CHANNELS; ++isrc_idx)
593 		if (xen_intr_port_to_isrc[isrc_idx] != NULL)
594 			xen_intr_port_to_isrc[isrc_idx]->xi_port =
595 			    INVALID_EVTCHN;
596 
597 	/* Remap in-use isrcs, using xen_intr_port_to_isrc as listing */
598 	for (isrc_idx = 0; isrc_idx < NR_EVENT_CHANNELS; ++isrc_idx) {
599 		struct xenisrc *cur = xen_intr_port_to_isrc[isrc_idx];
600 
601 		/* empty or entry already taken care of */
602 		if (cur == NULL || cur->xi_port == isrc_idx)
603 			continue;
604 
605 		xen_intr_port_to_isrc[isrc_idx] = NULL;
606 
607 		do {
608 			KASSERT(!is_valid_evtchn(cur->xi_port),
609 			    ("%s(): Multiple channels on single intr?",
610 			    __func__));
611 
612 			cur = xen_intr_rebind_isrc(cur);
613 		} while (cur != NULL);
614 	}
615 }
616 
617 /**
618  * Disable a Xen interrupt source.
619  *
620  * \param isrc  The interrupt source to disable.
621  */
622 void
623 xen_intr_disable_intr(struct xenisrc *isrc)
624 {
625 
626 	evtchn_mask_port(isrc->xi_port);
627 }
628 
629 /**
630  * Configure CPU affinity for interrupt source event delivery.
631  *
632  * \param isrc     The interrupt source to configure.
633  * \param to_cpu   The id of the CPU for handling future events.
634  *
635  * \returns  0 if successful, otherwise an errno.
636  */
637 int
638 xen_intr_assign_cpu(struct xenisrc *isrc, u_int to_cpu)
639 {
640 #ifdef SMP
641 	struct evtchn_bind_vcpu bind_vcpu;
642 	u_int vcpu_id = XEN_CPUID_TO_VCPUID(to_cpu);
643 	int error, masked;
644 
645 	if (!xen_has_percpu_evtchn())
646 		return (EOPNOTSUPP);
647 
648 	mtx_lock(&xen_intr_isrc_lock);
649 	if (!is_valid_evtchn(isrc->xi_port)) {
650 		mtx_unlock(&xen_intr_isrc_lock);
651 		return (EINVAL);
652 	}
653 
654 	/*
655 	 * Mask the event channel while binding it to prevent interrupt
656 	 * delivery with an inconsistent state in isrc->xi_cpu.
657 	 */
658 	masked = evtchn_test_and_set_mask(isrc->xi_port);
659 	if ((isrc->xi_type == EVTCHN_TYPE_VIRQ) ||
660 		(isrc->xi_type == EVTCHN_TYPE_IPI)) {
661 		/*
662 		 * Virtual IRQs are associated with a cpu by
663 		 * the Hypervisor at evtchn_bind_virq time, so
664 		 * all we need to do is update the per-CPU masks.
665 		 */
666 		evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
667 		isrc->xi_cpu = to_cpu;
668 		evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port);
669 		goto out;
670 	}
671 
672 	bind_vcpu.port = isrc->xi_port;
673 	bind_vcpu.vcpu = vcpu_id;
674 
675 	error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu);
676 	if (isrc->xi_cpu != to_cpu) {
677 		if (error == 0) {
678 			/* Commit to new binding by removing the old one. */
679 			evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
680 			isrc->xi_cpu = to_cpu;
681 			evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port);
682 		}
683 	}
684 
685 out:
686 	if (masked == 0)
687 		evtchn_unmask_port(isrc->xi_port);
688 	mtx_unlock(&xen_intr_isrc_lock);
689 	return (0);
690 #else
691 	return (EOPNOTSUPP);
692 #endif
693 }
694 
695 /*------------------- Virtual Interrupt Source PIC Functions -----------------*/
696 /*
697  * Mask a level triggered interrupt source.
698  *
699  * \param isrc  The interrupt source to mask (if necessary).
700  */
701 void
702 xen_intr_disable_source(struct xenisrc *isrc)
703 {
704 
705 	/*
706 	 * NB: checking if the event channel is already masked is
707 	 * needed because the event channel user-space device
708 	 * masks event channels on its filter as part of its
709 	 * normal operation, and those shouldn't be automatically
710 	 * unmasked by the generic interrupt code. The event channel
711 	 * device will unmask them when needed.
712 	 */
713 	isrc->xi_masked = !!evtchn_test_and_set_mask(isrc->xi_port);
714 }
715 
716 /*
717  * Unmask a level triggered interrupt source.
718  *
719  * \param isrc  The interrupt source to unmask (if necessary).
720  */
721 void
722 xen_intr_enable_source(struct xenisrc *isrc)
723 {
724 
725 	if (isrc->xi_masked == 0)
726 		evtchn_unmask_port(isrc->xi_port);
727 }
728 
729 /*
730  * Enable and unmask the interrupt source.
731  *
732  * \param isrc  The interrupt source to enable.
733  */
734 void
735 xen_intr_enable_intr(struct xenisrc *isrc)
736 {
737 
738 	evtchn_unmask_port(isrc->xi_port);
739 }
740 
741 /*--------------------------- Public Functions -------------------------------*/
742 /*------- API comments for these methods can be found in xen/xenintr.h -------*/
743 int
744 xen_intr_bind_local_port(device_t dev, evtchn_port_t local_port,
745     driver_filter_t filter, driver_intr_t handler, void *arg,
746     enum intr_type flags, xen_intr_handle_t *port_handlep)
747 {
748 	struct xenisrc *isrc;
749 	int error;
750 
751 	error = xen_intr_bind_isrc(&isrc, local_port, EVTCHN_TYPE_PORT,
752 	    device_get_nameunit(dev), filter, handler, arg, flags,
753 	    port_handlep);
754 	if (error != 0)
755 		return (error);
756 
757 	/*
758 	 * The Event Channel API didn't open this port, so it is not
759 	 * responsible for closing it automatically on unbind.
760 	 */
761 	isrc->xi_close = 0;
762 	return (0);
763 }
764 
765 int
766 xen_intr_alloc_and_bind_local_port(device_t dev, u_int remote_domain,
767     driver_filter_t filter, driver_intr_t handler, void *arg,
768     enum intr_type flags, xen_intr_handle_t *port_handlep)
769 {
770 	struct xenisrc *isrc;
771 	struct evtchn_alloc_unbound alloc_unbound;
772 	int error;
773 
774 	alloc_unbound.dom        = DOMID_SELF;
775 	alloc_unbound.remote_dom = remote_domain;
776 	error = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
777 		    &alloc_unbound);
778 	if (error != 0) {
779 		/*
780 		 * XXX Trap Hypercall error code Linuxisms in
781 		 *     the HYPERCALL layer.
782 		 */
783 		return (-error);
784 	}
785 
786 	error = xen_intr_bind_isrc(&isrc, alloc_unbound.port, EVTCHN_TYPE_PORT,
787 	    device_get_nameunit(dev), filter, handler, arg, flags,
788 	    port_handlep);
789 	if (error != 0) {
790 		evtchn_close_t close = { .port = alloc_unbound.port };
791 		if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
792 			panic("EVTCHNOP_close failed");
793 		return (error);
794 	}
795 
796 	isrc->xi_close = 1;
797 	return (0);
798 }
799 
800 int
801 xen_intr_bind_remote_port(device_t dev, u_int remote_domain,
802     u_int remote_port, driver_filter_t filter, driver_intr_t handler,
803     void *arg, enum intr_type flags, xen_intr_handle_t *port_handlep)
804 {
805 	struct xenisrc *isrc;
806 	struct evtchn_bind_interdomain bind_interdomain;
807 	int error;
808 
809 	bind_interdomain.remote_dom  = remote_domain;
810 	bind_interdomain.remote_port = remote_port;
811 	error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
812 					    &bind_interdomain);
813 	if (error != 0) {
814 		/*
815 		 * XXX Trap Hypercall error code Linuxisms in
816 		 *     the HYPERCALL layer.
817 		 */
818 		return (-error);
819 	}
820 
821 	error = xen_intr_bind_isrc(&isrc, bind_interdomain.local_port,
822 	    EVTCHN_TYPE_PORT, device_get_nameunit(dev), filter, handler, arg,
823 	    flags, port_handlep);
824 	if (error) {
825 		evtchn_close_t close = { .port = bind_interdomain.local_port };
826 		if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
827 			panic("EVTCHNOP_close failed");
828 		return (error);
829 	}
830 
831 	/*
832 	 * The Event Channel API opened this port, so it is
833 	 * responsible for closing it automatically on unbind.
834 	 */
835 	isrc->xi_close = 1;
836 	return (0);
837 }
838 
839 int
840 xen_intr_bind_virq(device_t dev, u_int virq, u_int cpu,
841     driver_filter_t filter, driver_intr_t handler, void *arg,
842     enum intr_type flags, xen_intr_handle_t *port_handlep)
843 {
844 	u_int vcpu_id = XEN_CPUID_TO_VCPUID(cpu);
845 	struct xenisrc *isrc;
846 	struct evtchn_bind_virq bind_virq = { .virq = virq, .vcpu = vcpu_id };
847 	int error;
848 
849 	isrc = NULL;
850 	error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq);
851 	if (error != 0) {
852 		/*
853 		 * XXX Trap Hypercall error code Linuxisms in
854 		 *     the HYPERCALL layer.
855 		 */
856 		return (-error);
857 	}
858 
859 	error = xen_intr_bind_isrc(&isrc, bind_virq.port, EVTCHN_TYPE_VIRQ,
860 	    device_get_nameunit(dev), filter, handler, arg, flags,
861 	    port_handlep);
862 
863 #ifdef SMP
864 	if (error == 0)
865 		error = xen_arch_intr_event_bind(isrc, cpu);
866 #endif
867 
868 	if (error != 0) {
869 		evtchn_close_t close = { .port = bind_virq.port };
870 
871 		xen_intr_unbind(*port_handlep);
872 		if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
873 			panic("EVTCHNOP_close failed");
874 		return (error);
875 	}
876 
877 #ifdef SMP
878 	if (isrc->xi_cpu != cpu) {
879 		/*
880 		 * Too early in the boot process for the generic interrupt
881 		 * code to perform the binding.  Update our event channel
882 		 * masks manually so events can't fire on the wrong cpu
883 		 * during AP startup.
884 		 */
885 		xen_intr_assign_cpu(isrc, cpu);
886 	}
887 #endif
888 
889 	/*
890 	 * The Event Channel API opened this port, so it is
891 	 * responsible for closing it automatically on unbind.
892 	 */
893 	isrc->xi_close = 1;
894 	isrc->xi_virq = virq;
895 
896 	return (0);
897 }
898 
899 int
900 xen_intr_alloc_and_bind_ipi(u_int cpu, driver_filter_t filter,
901     enum intr_type flags, xen_intr_handle_t *port_handlep)
902 {
903 #ifdef SMP
904 	u_int vcpu_id = XEN_CPUID_TO_VCPUID(cpu);
905 	struct xenisrc *isrc;
906 	struct evtchn_bind_ipi bind_ipi = { .vcpu = vcpu_id };
907 	/* Same size as the one used by intr_handler->ih_name. */
908 	char name[MAXCOMLEN + 1];
909 	int error;
910 
911 	isrc = NULL;
912 	error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi);
913 	if (error != 0) {
914 		/*
915 		 * XXX Trap Hypercall error code Linuxisms in
916 		 *     the HYPERCALL layer.
917 		 */
918 		return (-error);
919 	}
920 
921 	snprintf(name, sizeof(name), "cpu%u", cpu);
922 
923 	error = xen_intr_bind_isrc(&isrc, bind_ipi.port, EVTCHN_TYPE_IPI,
924 	    name, filter, NULL, NULL, flags, port_handlep);
925 	if (error != 0) {
926 		evtchn_close_t close = { .port = bind_ipi.port };
927 
928 		xen_intr_unbind(*port_handlep);
929 		if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
930 			panic("EVTCHNOP_close failed");
931 		return (error);
932 	}
933 
934 	if (isrc->xi_cpu != cpu) {
935 		/*
936 		 * Too early in the boot process for the generic interrupt
937 		 * code to perform the binding.  Update our event channel
938 		 * masks manually so events can't fire on the wrong cpu
939 		 * during AP startup.
940 		 */
941 		xen_intr_assign_cpu(isrc, cpu);
942 	}
943 
944 	/*
945 	 * The Event Channel API opened this port, so it is
946 	 * responsible for closing it automatically on unbind.
947 	 */
948 	isrc->xi_close = 1;
949 	return (0);
950 #else
951 	return (EOPNOTSUPP);
952 #endif
953 }
954 
955 int
956 xen_intr_describe(xen_intr_handle_t port_handle, const char *fmt, ...)
957 {
958 	char descr[MAXCOMLEN + 1];
959 	struct xenisrc *isrc;
960 	va_list ap;
961 
962 	isrc = xen_intr_isrc_from_handle(port_handle);
963 	if (isrc == NULL)
964 		return (EINVAL);
965 
966 	va_start(ap, fmt);
967 	vsnprintf(descr, sizeof(descr), fmt, ap);
968 	va_end(ap);
969 	return (xen_arch_intr_describe(isrc, isrc->xi_cookie, descr));
970 }
971 
972 void
973 xen_intr_unbind(xen_intr_handle_t *port_handlep)
974 {
975 	struct xenisrc *isrc;
976 
977 	KASSERT(port_handlep != NULL,
978 	    ("NULL xen_intr_handle_t passed to %s", __func__));
979 
980 	isrc = xen_intr_isrc_from_handle(*port_handlep);
981 	*port_handlep = NULL;
982 	if (isrc == NULL)
983 		return;
984 
985 	mtx_lock(&xen_intr_isrc_lock);
986 	if (refcount_release(&isrc->xi_refcount) == 0) {
987 		mtx_unlock(&xen_intr_isrc_lock);
988 		return;
989 	}
990 	mtx_unlock(&xen_intr_isrc_lock);
991 
992 	if (isrc->xi_cookie != NULL)
993 		xen_arch_intr_remove_handler(isrc, isrc->xi_cookie);
994 	xen_intr_release_isrc(isrc);
995 }
996 
997 void
998 xen_intr_signal(xen_intr_handle_t handle)
999 {
1000 	struct xenisrc *isrc;
1001 
1002 	isrc = xen_intr_isrc_from_handle(handle);
1003 	if (isrc != NULL) {
1004 		KASSERT(isrc->xi_type == EVTCHN_TYPE_PORT ||
1005 			isrc->xi_type == EVTCHN_TYPE_IPI,
1006 			("evtchn_signal on something other than a local port"));
1007 		struct evtchn_send send = { .port = isrc->xi_port };
1008 		(void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send);
1009 	}
1010 }
1011 
1012 evtchn_port_t
1013 xen_intr_port(xen_intr_handle_t handle)
1014 {
1015 	struct xenisrc *isrc;
1016 
1017 	isrc = xen_intr_isrc_from_handle(handle);
1018 	if (isrc == NULL)
1019 		return (0);
1020 
1021 	return (isrc->xi_port);
1022 }
1023 
1024 int
1025 xen_intr_add_handler(const char *name, driver_filter_t filter,
1026     driver_intr_t handler, void *arg, enum intr_type flags,
1027     xen_intr_handle_t handle)
1028 {
1029 	struct xenisrc *isrc;
1030 	int error;
1031 
1032 	isrc = xen_intr_isrc_from_handle(handle);
1033 	if (isrc == NULL || isrc->xi_cookie != NULL)
1034 		return (EINVAL);
1035 
1036 	error = xen_arch_intr_add_handler(name, filter, handler, arg,
1037 	    flags | INTR_EXCL, isrc, &isrc->xi_cookie);
1038 	if (error != 0)
1039 		printf("%s: %s: add handler failed: %d\n", name, __func__,
1040 		    error);
1041 
1042 	return (error);
1043 }
1044 
1045 int
1046 xen_intr_get_evtchn_from_port(evtchn_port_t port, xen_intr_handle_t *handlep)
1047 {
1048 
1049 	if (!is_valid_evtchn(port))
1050 		return (EINVAL);
1051 
1052 	if (handlep == NULL) {
1053 		return (EINVAL);
1054 	}
1055 
1056 	mtx_lock(&xen_intr_isrc_lock);
1057 	if (xen_intr_port_to_isrc[port] == NULL) {
1058 		mtx_unlock(&xen_intr_isrc_lock);
1059 		return (EINVAL);
1060 	}
1061 	refcount_acquire(&xen_intr_port_to_isrc[port]->xi_refcount);
1062 	mtx_unlock(&xen_intr_isrc_lock);
1063 
1064 	/* Assign the opaque handler */
1065 	*handlep = xen_intr_handle_from_isrc(xen_intr_port_to_isrc[port]);
1066 
1067 	return (0);
1068 }
1069 
1070 #ifdef DDB
1071 static const char *
1072 xen_intr_print_type(enum evtchn_type type)
1073 {
1074 	static const char *evtchn_type_to_string[EVTCHN_TYPE_COUNT] = {
1075 		[EVTCHN_TYPE_UNBOUND]	= "UNBOUND",
1076 		[EVTCHN_TYPE_VIRQ]	= "VIRQ",
1077 		[EVTCHN_TYPE_IPI]	= "IPI",
1078 		[EVTCHN_TYPE_PORT]	= "PORT",
1079 	};
1080 
1081 	if (type >= EVTCHN_TYPE_COUNT)
1082 		return ("UNKNOWN");
1083 
1084 	return (evtchn_type_to_string[type]);
1085 }
1086 
1087 static void
1088 xen_intr_dump_port(struct xenisrc *isrc)
1089 {
1090 	struct xen_intr_pcpu_data *pcpu;
1091 	shared_info_t *s = HYPERVISOR_shared_info;
1092 	u_int i;
1093 
1094 	db_printf("Port %d Type: %s\n",
1095 	    isrc->xi_port, xen_intr_print_type(isrc->xi_type));
1096 	if (isrc->xi_type == EVTCHN_TYPE_VIRQ)
1097 		db_printf("\tVirq: %u\n", isrc->xi_virq);
1098 
1099 	db_printf("\tMasked: %d Pending: %d\n",
1100 	    !!xen_test_bit(isrc->xi_port, &s->evtchn_mask[0]),
1101 	    !!xen_test_bit(isrc->xi_port, &s->evtchn_pending[0]));
1102 
1103 	db_printf("\tPer-CPU Masks: ");
1104 	CPU_FOREACH(i) {
1105 		pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
1106 		db_printf("cpu#%u: %d ", i,
1107 		    !!xen_test_bit(isrc->xi_port, pcpu->evtchn_enabled));
1108 	}
1109 	db_printf("\n");
1110 }
1111 
1112 DB_SHOW_COMMAND(xen_evtchn, db_show_xen_evtchn)
1113 {
1114 	u_int i;
1115 
1116 	if (!xen_domain()) {
1117 		db_printf("Only available on Xen guests\n");
1118 		return;
1119 	}
1120 
1121 	for (i = 0; i < NR_EVENT_CHANNELS; i++) {
1122 		struct xenisrc *isrc;
1123 
1124 		isrc = xen_intr_port_to_isrc[i];
1125 		if (isrc == NULL)
1126 			continue;
1127 
1128 		xen_intr_dump_port(isrc);
1129 	}
1130 }
1131 #endif /* DDB */
1132