1 /******************************************************************************
2 * xen_intr.c
3 *
4 * Xen event and interrupt services for x86 HVM guests.
5 *
6 * Copyright (c) 2002-2005, K A Fraser
7 * Copyright (c) 2005, Intel Corporation <xiaofeng.ling@intel.com>
8 * Copyright (c) 2012, Spectra Logic Corporation
9 * Copyright © 2021-2023, Elliott Mitchell
10 *
11 * This file may be distributed separately from the Linux kernel, or
12 * incorporated into other software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
32
33 #include <sys/cdefs.h>
34 #include "opt_ddb.h"
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/bus.h>
39 #include <sys/kernel.h>
40 #include <sys/limits.h>
41 #include <sys/lock.h>
42 #include <sys/mutex.h>
43 #include <sys/interrupt.h>
44 #include <sys/pcpu.h>
45 #include <sys/proc.h>
46 #include <sys/smp.h>
47 #include <sys/refcount.h>
48 #include <sys/stdarg.h>
49
50 #include <vm/vm.h>
51 #include <vm/pmap.h>
52
53 #include <machine/smp.h>
54
55 #include <xen/xen-os.h>
56 #include <xen/hypervisor.h>
57 #include <xen/xen_intr.h>
58 #include <xen/evtchn/evtchnvar.h>
59
60 #include <machine/xen/arch-intr.h>
61
62 #ifdef DDB
63 #include <ddb/ddb.h>
64 #endif
65
66 /**
67 * Per-cpu event channel processing state.
68 */
69 struct xen_intr_pcpu_data {
70 /**
71 * The last event channel bitmap section (level one bit) processed.
72 * This is used to ensure we scan all ports before
73 * servicing an already servied port again.
74 */
75 u_int last_processed_l1i;
76
77 /**
78 * The last event channel processed within the event channel
79 * bitmap being scanned.
80 */
81 u_int last_processed_l2i;
82
83 /**
84 * A bitmap of ports that can be serviced from this CPU.
85 * A set bit means interrupt handling is enabled.
86 */
87 xen_ulong_t evtchn_enabled[sizeof(xen_ulong_t) * 8];
88 };
89
90 /*
91 * Start the scan at port 0 by initializing the last scanned
92 * location as the highest numbered event channel port.
93 */
94 DPCPU_DEFINE_STATIC(struct xen_intr_pcpu_data, xen_intr_pcpu) = {
95 .last_processed_l1i = LONG_BIT - 1,
96 .last_processed_l2i = LONG_BIT - 1
97 };
98
99 DPCPU_DECLARE(struct vcpu_info *, vcpu_info);
100
101 #define INVALID_EVTCHN (~(evtchn_port_t)0) /* Invalid event channel */
102 #define is_valid_evtchn(x) ((uintmax_t)(x) < NR_EVENT_CHANNELS)
103
104 /*
105 * Lock for interrupt core data.
106 *
107 * Modifying xen_intr_port_to_isrc[], or isrc->xi_port (implies the former)
108 * requires this lock be held. Any time this lock is not held, the condition
109 * `!xen_intr_port_to_isrc[i] || (xen_intr_port_to_isrc[i]->ix_port == i)`
110 * MUST be true for all values of i which are valid indicies of the array.
111 *
112 * Acquire/release operations for isrc->xi_refcount require this lock be held.
113 */
114 static struct mtx xen_intr_isrc_lock;
115 static struct xenisrc *xen_intr_port_to_isrc[NR_EVENT_CHANNELS];
116
117 /*------------------------- Private Functions --------------------------------*/
118
119 /**
120 * Retrieve a handle for a Xen interrupt source.
121 *
122 * \param isrc A valid Xen interrupt source structure.
123 *
124 * \returns A handle suitable for use with xen_intr_isrc_from_handle()
125 * to retrieve the original Xen interrupt source structure.
126 */
127
128 static inline xen_intr_handle_t
xen_intr_handle_from_isrc(struct xenisrc * isrc)129 xen_intr_handle_from_isrc(struct xenisrc *isrc)
130 {
131 return (isrc);
132 }
133
134 /**
135 * Lookup a Xen interrupt source object given an interrupt binding handle.
136 *
137 * \param handle A handle initialized by a previous call to
138 * xen_intr_bind_isrc().
139 *
140 * \returns A pointer to the Xen interrupt source object associated
141 * with the given interrupt handle. NULL if no association
142 * currently exists.
143 */
144 static inline struct xenisrc *
xen_intr_isrc_from_handle(xen_intr_handle_t handle)145 xen_intr_isrc_from_handle(xen_intr_handle_t handle)
146 {
147 return ((struct xenisrc *)handle);
148 }
149
150 /**
151 * Disable signal delivery for an event channel port on the
152 * specified CPU.
153 *
154 * \param port The event channel port to mask.
155 *
156 * This API is used to manage the port<=>CPU binding of event
157 * channel handlers.
158 *
159 * \note This operation does not preclude reception of an event
160 * for this event channel on another CPU. To mask the
161 * event channel globally, use evtchn_mask().
162 */
163 static inline void
evtchn_cpu_mask_port(u_int cpu,evtchn_port_t port)164 evtchn_cpu_mask_port(u_int cpu, evtchn_port_t port)
165 {
166 struct xen_intr_pcpu_data *pcpu;
167
168 pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
169 KASSERT(is_valid_evtchn(port), ("Invalid event channel port"));
170 xen_clear_bit(port, pcpu->evtchn_enabled);
171 }
172
173 /**
174 * Enable signal delivery for an event channel port on the
175 * specified CPU.
176 *
177 * \param port The event channel port to unmask.
178 *
179 * This API is used to manage the port<=>CPU binding of event
180 * channel handlers.
181 *
182 * \note This operation does not guarantee that event delivery
183 * is enabled for this event channel port. The port must
184 * also be globally enabled. See evtchn_unmask().
185 */
186 static inline void
evtchn_cpu_unmask_port(u_int cpu,evtchn_port_t port)187 evtchn_cpu_unmask_port(u_int cpu, evtchn_port_t port)
188 {
189 struct xen_intr_pcpu_data *pcpu;
190
191 pcpu = DPCPU_ID_PTR(cpu, xen_intr_pcpu);
192 KASSERT(is_valid_evtchn(port), ("Invalid event channel port"));
193 xen_set_bit(port, pcpu->evtchn_enabled);
194 }
195
196 /**
197 * Attempt to free an active Xen interrupt source object.
198 *
199 * \param isrc The interrupt source object to release.
200 *
201 * \returns EBUSY if the source is still in use, otherwise 0.
202 */
203 static int
xen_intr_release_isrc(struct xenisrc * isrc)204 xen_intr_release_isrc(struct xenisrc *isrc)
205 {
206
207 mtx_lock(&xen_intr_isrc_lock);
208 if (is_valid_evtchn(isrc->xi_port)) {
209 evtchn_mask_port(isrc->xi_port);
210 evtchn_clear_port(isrc->xi_port);
211
212 /* Rebind port to CPU 0. */
213 evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
214 evtchn_cpu_unmask_port(0, isrc->xi_port);
215
216 if (isrc->xi_close != 0) {
217 struct evtchn_close close = { .port = isrc->xi_port };
218
219 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
220 panic("EVTCHNOP_close failed");
221 }
222
223 xen_intr_port_to_isrc[isrc->xi_port] = NULL;
224 }
225 /* not reachable from xen_intr_port_to_isrc[], unlock */
226 mtx_unlock(&xen_intr_isrc_lock);
227
228 xen_arch_intr_release(isrc);
229 return (0);
230 }
231
232 /**
233 * Associate an interrupt handler with an already allocated local Xen
234 * event channel port.
235 *
236 * \param isrcp The returned Xen interrupt object associated with
237 * the specified local port.
238 * \param local_port The event channel to bind.
239 * \param type The event channel type of local_port.
240 * \param intr_owner The device making this bind request.
241 * \param filter An interrupt filter handler. Specify NULL
242 * to always dispatch to the ithread handler.
243 * \param handler An interrupt ithread handler. Optional (can
244 * specify NULL) if all necessary event actions
245 * are performed by filter.
246 * \param arg Argument to present to both filter and handler.
247 * \param irqflags Interrupt handler flags. See sys/bus.h.
248 * \param handlep Pointer to an opaque handle used to manage this
249 * registration.
250 *
251 * \returns 0 on success, otherwise an errno.
252 */
253 static int
xen_intr_bind_isrc(struct xenisrc ** isrcp,evtchn_port_t local_port,enum evtchn_type type,const char * intr_owner,driver_filter_t filter,driver_intr_t handler,void * arg,enum intr_type flags,xen_intr_handle_t * const port_handlep)254 xen_intr_bind_isrc(struct xenisrc **isrcp, evtchn_port_t local_port,
255 enum evtchn_type type, const char *intr_owner, driver_filter_t filter,
256 driver_intr_t handler, void *arg, enum intr_type flags,
257 xen_intr_handle_t *const port_handlep)
258 {
259 struct xenisrc *isrc;
260 int error;
261
262 *isrcp = NULL;
263 if (port_handlep == NULL) {
264 printf("%s: %s: Bad event handle\n", intr_owner, __func__);
265 return (EINVAL);
266 }
267 *port_handlep = NULL;
268
269 isrc = xen_arch_intr_alloc();
270 if (isrc == NULL)
271 return (ENOSPC);
272
273 isrc->xi_cookie = NULL;
274 isrc->xi_type = type;
275 isrc->xi_port = local_port;
276 isrc->xi_close = false;
277 isrc->xi_cpu = 0;
278 refcount_init(&isrc->xi_refcount, 1);
279 mtx_lock(&xen_intr_isrc_lock);
280 xen_intr_port_to_isrc[isrc->xi_port] = isrc;
281 mtx_unlock(&xen_intr_isrc_lock);
282
283 #ifdef SMP
284 if (type == EVTCHN_TYPE_PORT) {
285 /*
286 * By default all interrupts are assigned to vCPU#0
287 * unless specified otherwise, so shuffle them to balance
288 * the interrupt load.
289 */
290 xen_intr_assign_cpu(isrc, xen_arch_intr_next_cpu(isrc));
291 }
292 #endif
293
294 /*
295 * If a filter or handler function is provided, add it to the event.
296 * Otherwise the event channel is left masked and without a handler,
297 * the caller is in charge of setting that up.
298 */
299 if (filter != NULL || handler != NULL) {
300 error = xen_intr_add_handler(intr_owner, filter, handler, arg,
301 flags, xen_intr_handle_from_isrc(isrc));
302 if (error != 0) {
303 xen_intr_release_isrc(isrc);
304 return (error);
305 }
306 }
307
308 *isrcp = isrc;
309 /* Assign the opaque handler */
310 *port_handlep = xen_intr_handle_from_isrc(isrc);
311 return (0);
312 }
313
314 /**
315 * Determine the event channel ports at the given section of the
316 * event port bitmap which have pending events for the given cpu.
317 *
318 * \param pcpu The Xen interrupt pcpu data for the cpu being queried.
319 * \param sh The Xen shared info area.
320 * \param idx The index of the section of the event channel bitmap to
321 * inspect.
322 *
323 * \returns A u_long with bits set for every event channel with pending
324 * events.
325 */
326 static inline u_long
xen_intr_active_ports(const struct xen_intr_pcpu_data * const pcpu,const u_int idx)327 xen_intr_active_ports(const struct xen_intr_pcpu_data *const pcpu,
328 const u_int idx)
329 {
330 volatile const shared_info_t *const sh = HYPERVISOR_shared_info;
331
332 CTASSERT(sizeof(sh->evtchn_mask[0]) == sizeof(sh->evtchn_pending[0]));
333 CTASSERT(sizeof(sh->evtchn_mask[0]) == sizeof(pcpu->evtchn_enabled[0]));
334 CTASSERT(sizeof(sh->evtchn_mask) == sizeof(sh->evtchn_pending));
335 CTASSERT(sizeof(sh->evtchn_mask) == sizeof(pcpu->evtchn_enabled));
336 return (sh->evtchn_pending[idx]
337 & ~sh->evtchn_mask[idx]
338 & pcpu->evtchn_enabled[idx]);
339 }
340
341 /**
342 * Interrupt handler for processing all Xen event channel events.
343 *
344 * \param unused
345 */
346 int
xen_intr_handle_upcall(void * unused __unused)347 xen_intr_handle_upcall(void *unused __unused)
348 {
349 struct trapframe *trap_frame = curthread->td_intr_frame;
350 u_int l1i, l2i, port, cpu __diagused;
351 u_long masked_l1, masked_l2;
352 struct xenisrc *isrc;
353 vcpu_info_t *v;
354 struct xen_intr_pcpu_data *pc;
355 u_long l1, l2;
356
357 /*
358 * The upcall handler is an interrupt handler itself (that calls other
359 * interrupt handlers), hence the caller has the responsibility to
360 * increase td_intr_nesting_level ahead of dispatching the upcall
361 * handler.
362 */
363 KASSERT(curthread->td_intr_nesting_level > 0,
364 ("Unexpected thread context"));
365
366 /* We must remain on the same vCPU during this function */
367 CRITICAL_ASSERT(curthread);
368
369 cpu = PCPU_GET(cpuid);
370 pc = DPCPU_PTR(xen_intr_pcpu);
371 v = DPCPU_GET(vcpu_info);
372
373 if (!xen_has_percpu_evtchn()) {
374 KASSERT((cpu == 0), ("Fired PCI event callback on wrong CPU"));
375 }
376
377 v->evtchn_upcall_pending = 0;
378 /* No need for a barrier on x86 -- XCHG is a barrier on x86. */
379 #if !defined(__amd64__) && !defined(__i386__)
380 /* Clear master flag /before/ clearing selector flag. */
381 wmb();
382 #endif
383 l1 = atomic_readandclear_xen_ulong(&v->evtchn_pending_sel);
384
385 l1i = pc->last_processed_l1i;
386 l2i = pc->last_processed_l2i;
387
388 while (l1 != 0) {
389 l1i = (l1i + 1) % LONG_BIT;
390 masked_l1 = l1 & ((~0UL) << l1i);
391
392 if (masked_l1 == 0) {
393 /*
394 * if we masked out all events, wrap around
395 * to the beginning.
396 */
397 l1i = LONG_BIT - 1;
398 l2i = LONG_BIT - 1;
399 continue;
400 }
401 l1i = ffsl(masked_l1) - 1;
402
403 do {
404 l2 = xen_intr_active_ports(pc, l1i);
405
406 l2i = (l2i + 1) % LONG_BIT;
407 masked_l2 = l2 & ((~0UL) << l2i);
408
409 if (masked_l2 == 0) {
410 /* if we masked out all events, move on */
411 l2i = LONG_BIT - 1;
412 break;
413 }
414 l2i = ffsl(masked_l2) - 1;
415
416 /* process port */
417 port = (l1i * LONG_BIT) + l2i;
418 evtchn_clear_port(port);
419
420 isrc = xen_intr_port_to_isrc[port];
421 if (__predict_false(isrc == NULL))
422 continue;
423
424 /* Make sure we are firing on the right vCPU */
425 KASSERT((isrc->xi_cpu == PCPU_GET(cpuid)),
426 ("Received unexpected event on vCPU#%u, event bound to vCPU#%u",
427 PCPU_GET(cpuid), isrc->xi_cpu));
428
429 /*
430 * Reduce interrupt nesting level ahead of calling the
431 * per-arch interrupt dispatch helper. This is
432 * required because the per-arch dispatcher will also
433 * increase td_intr_nesting_level, and then handlers
434 * would wrongly see td_intr_nesting_level = 2 when
435 * there's no nesting at all.
436 */
437 curthread->td_intr_nesting_level--;
438 xen_arch_intr_execute_handlers(isrc, trap_frame);
439 curthread->td_intr_nesting_level++;
440
441 /*
442 * If this is the final port processed,
443 * we'll pick up here+1 next time.
444 */
445 pc->last_processed_l1i = l1i;
446 pc->last_processed_l2i = l2i;
447
448 } while (l2i != LONG_BIT - 1);
449
450 l2 = xen_intr_active_ports(pc, l1i);
451 if (l2 == 0) {
452 /*
453 * We handled all ports, so we can clear the
454 * selector bit.
455 */
456 l1 &= ~(1UL << l1i);
457 }
458 }
459
460 return (FILTER_HANDLED);
461 }
462
463 static void
xen_intr_init(void * dummy __unused)464 xen_intr_init(void *dummy __unused)
465 {
466 shared_info_t *s = HYPERVISOR_shared_info;
467 struct xen_intr_pcpu_data *pcpu;
468 int i;
469
470 if (!xen_domain())
471 return;
472
473 _Static_assert(is_valid_evtchn(0),
474 "is_valid_evtchn(0) fails (unused by Xen, but valid by interface");
475 _Static_assert(is_valid_evtchn(NR_EVENT_CHANNELS - 1),
476 "is_valid_evtchn(max) fails (is a valid channel)");
477 _Static_assert(!is_valid_evtchn(NR_EVENT_CHANNELS),
478 "is_valid_evtchn(>max) fails (NOT a valid channel)");
479 _Static_assert(!is_valid_evtchn(~(evtchn_port_t)0),
480 "is_valid_evtchn(maxint) fails (overflow?)");
481 _Static_assert(!is_valid_evtchn(INVALID_EVTCHN),
482 "is_valid_evtchn(INVALID_EVTCHN) fails (must be invalid!)");
483 _Static_assert(!is_valid_evtchn(-1),
484 "is_valid_evtchn(-1) fails (negative are invalid)");
485
486 mtx_init(&xen_intr_isrc_lock, "xen-irq-lock", NULL, MTX_DEF);
487
488 /*
489 * Set the per-cpu mask of CPU#0 to enable all, since by default all
490 * event channels are bound to CPU#0.
491 */
492 CPU_FOREACH(i) {
493 pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
494 memset(pcpu->evtchn_enabled, i == 0 ? ~0 : 0,
495 sizeof(pcpu->evtchn_enabled));
496 }
497
498 for (i = 0; i < nitems(s->evtchn_mask); i++)
499 atomic_store_rel_xen_ulong(&s->evtchn_mask[i], ~0);
500
501 xen_arch_intr_init();
502
503 if (bootverbose)
504 printf("Xen interrupt system initialized\n");
505 }
506 SYSINIT(xen_intr_init, SI_SUB_INTR, SI_ORDER_SECOND, xen_intr_init, NULL);
507
508 /*--------------------------- Common PIC Functions ---------------------------*/
509
510 static void
xen_rebind_ipi(struct xenisrc * isrc)511 xen_rebind_ipi(struct xenisrc *isrc)
512 {
513 #ifdef SMP
514 u_int cpu = isrc->xi_cpu;
515 u_int vcpu_id = XEN_CPUID_TO_VCPUID(cpu);
516 int error;
517 struct evtchn_bind_ipi bind_ipi = { .vcpu = vcpu_id };
518
519 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
520 &bind_ipi);
521 if (error != 0)
522 panic("unable to rebind xen IPI: %d", error);
523
524 isrc->xi_port = bind_ipi.port;
525 #else
526 panic("Resume IPI event channel on UP");
527 #endif
528 }
529
530 static void
xen_rebind_virq(struct xenisrc * isrc)531 xen_rebind_virq(struct xenisrc *isrc)
532 {
533 u_int cpu = isrc->xi_cpu;
534 u_int vcpu_id = XEN_CPUID_TO_VCPUID(cpu);
535 int error;
536 struct evtchn_bind_virq bind_virq = { .virq = isrc->xi_virq,
537 .vcpu = vcpu_id };
538
539 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
540 &bind_virq);
541 if (error != 0)
542 panic("unable to rebind xen VIRQ#%u: %d", isrc->xi_virq, error);
543
544 isrc->xi_port = bind_virq.port;
545 }
546
547 static struct xenisrc *
xen_intr_rebind_isrc(struct xenisrc * isrc)548 xen_intr_rebind_isrc(struct xenisrc *isrc)
549 {
550 #ifdef SMP
551 u_int cpu = isrc->xi_cpu;
552 int error;
553 #endif
554 struct xenisrc *prev;
555
556 switch (isrc->xi_type) {
557 case EVTCHN_TYPE_IPI:
558 xen_rebind_ipi(isrc);
559 break;
560 case EVTCHN_TYPE_VIRQ:
561 xen_rebind_virq(isrc);
562 break;
563 default:
564 return (NULL);
565 }
566
567 prev = xen_intr_port_to_isrc[isrc->xi_port];
568 xen_intr_port_to_isrc[isrc->xi_port] = isrc;
569
570 #ifdef SMP
571 isrc->xi_cpu = 0;
572 error = xen_intr_assign_cpu(isrc, cpu);
573 if (error)
574 panic("%s(): unable to rebind Xen channel %u to vCPU%u: %d",
575 __func__, isrc->xi_port, cpu, error);
576 #endif
577
578 evtchn_unmask_port(isrc->xi_port);
579
580 return (prev);
581 }
582
583 /**
584 * Return this PIC to service after being suspended.
585 */
586 void
xen_intr_resume(void)587 xen_intr_resume(void)
588 {
589 shared_info_t *s = HYPERVISOR_shared_info;
590 u_int isrc_idx;
591 int i;
592
593 /* Reset the per-CPU masks */
594 CPU_FOREACH(i) {
595 struct xen_intr_pcpu_data *pcpu;
596
597 pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
598 memset(pcpu->evtchn_enabled, i == 0 ? ~0 : 0,
599 sizeof(pcpu->evtchn_enabled));
600 }
601
602 /* Mask all event channels. */
603 for (i = 0; i < nitems(s->evtchn_mask); i++)
604 atomic_store_rel_xen_ulong(&s->evtchn_mask[i], ~0);
605
606 /* Clear existing port mappings */
607 for (isrc_idx = 0; isrc_idx < NR_EVENT_CHANNELS; ++isrc_idx)
608 if (xen_intr_port_to_isrc[isrc_idx] != NULL)
609 xen_intr_port_to_isrc[isrc_idx]->xi_port =
610 INVALID_EVTCHN;
611
612 /* Remap in-use isrcs, using xen_intr_port_to_isrc as listing */
613 for (isrc_idx = 0; isrc_idx < NR_EVENT_CHANNELS; ++isrc_idx) {
614 struct xenisrc *cur = xen_intr_port_to_isrc[isrc_idx];
615
616 /* empty or entry already taken care of */
617 if (cur == NULL || cur->xi_port == isrc_idx)
618 continue;
619
620 xen_intr_port_to_isrc[isrc_idx] = NULL;
621
622 do {
623 KASSERT(!is_valid_evtchn(cur->xi_port),
624 ("%s(): Multiple channels on single intr?",
625 __func__));
626
627 cur = xen_intr_rebind_isrc(cur);
628 } while (cur != NULL);
629 }
630 }
631
632 /**
633 * Disable a Xen interrupt source.
634 *
635 * \param isrc The interrupt source to disable.
636 */
637 void
xen_intr_disable_intr(struct xenisrc * isrc)638 xen_intr_disable_intr(struct xenisrc *isrc)
639 {
640
641 if (__predict_true(is_valid_evtchn(isrc->xi_port)))
642 evtchn_mask_port(isrc->xi_port);
643 }
644
645 /**
646 * Configure CPU affinity for interrupt source event delivery.
647 *
648 * \param isrc The interrupt source to configure.
649 * \param to_cpu The id of the CPU for handling future events.
650 *
651 * \returns 0 if successful, otherwise an errno.
652 */
653 int
xen_intr_assign_cpu(struct xenisrc * isrc,u_int to_cpu)654 xen_intr_assign_cpu(struct xenisrc *isrc, u_int to_cpu)
655 {
656 #ifdef SMP
657 struct evtchn_bind_vcpu bind_vcpu;
658 u_int vcpu_id = XEN_CPUID_TO_VCPUID(to_cpu);
659 int error, masked;
660
661 if (!xen_has_percpu_evtchn())
662 return (EOPNOTSUPP);
663
664 mtx_lock(&xen_intr_isrc_lock);
665 if (!is_valid_evtchn(isrc->xi_port)) {
666 mtx_unlock(&xen_intr_isrc_lock);
667 return (EINVAL);
668 }
669
670 /*
671 * Mask the event channel while binding it to prevent interrupt
672 * delivery with an inconsistent state in isrc->xi_cpu.
673 */
674 masked = evtchn_test_and_set_mask(isrc->xi_port);
675 if ((isrc->xi_type == EVTCHN_TYPE_VIRQ) ||
676 (isrc->xi_type == EVTCHN_TYPE_IPI)) {
677 /*
678 * Virtual IRQs are associated with a cpu by
679 * the Hypervisor at evtchn_bind_virq time, so
680 * all we need to do is update the per-CPU masks.
681 */
682 evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
683 isrc->xi_cpu = to_cpu;
684 evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port);
685 goto out;
686 }
687
688 bind_vcpu.port = isrc->xi_port;
689 bind_vcpu.vcpu = vcpu_id;
690
691 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu);
692 if (isrc->xi_cpu != to_cpu) {
693 if (error == 0) {
694 /* Commit to new binding by removing the old one. */
695 evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
696 isrc->xi_cpu = to_cpu;
697 evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port);
698 }
699 }
700
701 out:
702 if (masked == 0)
703 evtchn_unmask_port(isrc->xi_port);
704 mtx_unlock(&xen_intr_isrc_lock);
705 return (0);
706 #else
707 return (EOPNOTSUPP);
708 #endif
709 }
710
711 /*------------------- Virtual Interrupt Source PIC Functions -----------------*/
712 /*
713 * Mask a level triggered interrupt source.
714 *
715 * \param isrc The interrupt source to mask (if necessary).
716 */
717 void
xen_intr_disable_source(struct xenisrc * isrc)718 xen_intr_disable_source(struct xenisrc *isrc)
719 {
720
721 /*
722 * NB: checking if the event channel is already masked is
723 * needed because the event channel user-space device
724 * masks event channels on its filter as part of its
725 * normal operation, and those shouldn't be automatically
726 * unmasked by the generic interrupt code. The event channel
727 * device will unmask them when needed.
728 */
729 if (__predict_true(is_valid_evtchn(isrc->xi_port)))
730 isrc->xi_masked = !!evtchn_test_and_set_mask(isrc->xi_port);
731 }
732
733 /*
734 * Unmask a level triggered interrupt source.
735 *
736 * \param isrc The interrupt source to unmask (if necessary).
737 */
738 void
xen_intr_enable_source(struct xenisrc * isrc)739 xen_intr_enable_source(struct xenisrc *isrc)
740 {
741
742 if (isrc->xi_masked == 0)
743 evtchn_unmask_port(isrc->xi_port);
744 }
745
746 /*
747 * Enable and unmask the interrupt source.
748 *
749 * \param isrc The interrupt source to enable.
750 */
751 void
xen_intr_enable_intr(struct xenisrc * isrc)752 xen_intr_enable_intr(struct xenisrc *isrc)
753 {
754
755 evtchn_unmask_port(isrc->xi_port);
756 }
757
758 /*--------------------------- Public Functions -------------------------------*/
759 /*------- API comments for these methods can be found in xen/xenintr.h -------*/
760 int
xen_intr_bind_local_port(device_t dev,evtchn_port_t local_port,driver_filter_t filter,driver_intr_t handler,void * arg,enum intr_type flags,xen_intr_handle_t * port_handlep)761 xen_intr_bind_local_port(device_t dev, evtchn_port_t local_port,
762 driver_filter_t filter, driver_intr_t handler, void *arg,
763 enum intr_type flags, xen_intr_handle_t *port_handlep)
764 {
765 struct xenisrc *isrc;
766 int error;
767
768 error = xen_intr_bind_isrc(&isrc, local_port, EVTCHN_TYPE_PORT,
769 device_get_nameunit(dev), filter, handler, arg, flags,
770 port_handlep);
771 if (error != 0)
772 return (error);
773
774 /*
775 * The Event Channel API didn't open this port, so it is not
776 * responsible for closing it automatically on unbind.
777 */
778 isrc->xi_close = 0;
779 return (0);
780 }
781
782 int
xen_intr_alloc_and_bind_local_port(device_t dev,u_int remote_domain,driver_filter_t filter,driver_intr_t handler,void * arg,enum intr_type flags,xen_intr_handle_t * port_handlep)783 xen_intr_alloc_and_bind_local_port(device_t dev, u_int remote_domain,
784 driver_filter_t filter, driver_intr_t handler, void *arg,
785 enum intr_type flags, xen_intr_handle_t *port_handlep)
786 {
787 struct xenisrc *isrc;
788 struct evtchn_alloc_unbound alloc_unbound;
789 int error;
790
791 alloc_unbound.dom = DOMID_SELF;
792 alloc_unbound.remote_dom = remote_domain;
793 error = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
794 &alloc_unbound);
795 if (error != 0) {
796 /*
797 * XXX Trap Hypercall error code Linuxisms in
798 * the HYPERCALL layer.
799 */
800 return (-error);
801 }
802
803 error = xen_intr_bind_isrc(&isrc, alloc_unbound.port, EVTCHN_TYPE_PORT,
804 device_get_nameunit(dev), filter, handler, arg, flags,
805 port_handlep);
806 if (error != 0) {
807 evtchn_close_t close = { .port = alloc_unbound.port };
808 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
809 panic("EVTCHNOP_close failed");
810 return (error);
811 }
812
813 isrc->xi_close = 1;
814 return (0);
815 }
816
817 int
xen_intr_bind_remote_port(device_t dev,u_int remote_domain,u_int remote_port,driver_filter_t filter,driver_intr_t handler,void * arg,enum intr_type flags,xen_intr_handle_t * port_handlep)818 xen_intr_bind_remote_port(device_t dev, u_int remote_domain,
819 u_int remote_port, driver_filter_t filter, driver_intr_t handler,
820 void *arg, enum intr_type flags, xen_intr_handle_t *port_handlep)
821 {
822 struct xenisrc *isrc;
823 struct evtchn_bind_interdomain bind_interdomain;
824 int error;
825
826 bind_interdomain.remote_dom = remote_domain;
827 bind_interdomain.remote_port = remote_port;
828 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
829 &bind_interdomain);
830 if (error != 0) {
831 /*
832 * XXX Trap Hypercall error code Linuxisms in
833 * the HYPERCALL layer.
834 */
835 return (-error);
836 }
837
838 error = xen_intr_bind_isrc(&isrc, bind_interdomain.local_port,
839 EVTCHN_TYPE_PORT, device_get_nameunit(dev), filter, handler, arg,
840 flags, port_handlep);
841 if (error) {
842 evtchn_close_t close = { .port = bind_interdomain.local_port };
843 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
844 panic("EVTCHNOP_close failed");
845 return (error);
846 }
847
848 /*
849 * The Event Channel API opened this port, so it is
850 * responsible for closing it automatically on unbind.
851 */
852 isrc->xi_close = 1;
853 return (0);
854 }
855
856 int
xen_intr_bind_virq(device_t dev,u_int virq,u_int cpu,driver_filter_t filter,driver_intr_t handler,void * arg,enum intr_type flags,xen_intr_handle_t * port_handlep)857 xen_intr_bind_virq(device_t dev, u_int virq, u_int cpu,
858 driver_filter_t filter, driver_intr_t handler, void *arg,
859 enum intr_type flags, xen_intr_handle_t *port_handlep)
860 {
861 u_int vcpu_id = XEN_CPUID_TO_VCPUID(cpu);
862 struct xenisrc *isrc;
863 struct evtchn_bind_virq bind_virq = { .virq = virq, .vcpu = vcpu_id };
864 int error;
865
866 isrc = NULL;
867 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq);
868 if (error != 0) {
869 /*
870 * XXX Trap Hypercall error code Linuxisms in
871 * the HYPERCALL layer.
872 */
873 return (-error);
874 }
875
876 error = xen_intr_bind_isrc(&isrc, bind_virq.port, EVTCHN_TYPE_VIRQ,
877 device_get_nameunit(dev), filter, handler, arg, flags,
878 port_handlep);
879
880 #ifdef SMP
881 if (error == 0)
882 error = xen_arch_intr_event_bind(isrc, cpu);
883 #endif
884
885 if (error != 0) {
886 evtchn_close_t close = { .port = bind_virq.port };
887
888 xen_intr_unbind(port_handlep);
889 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
890 panic("EVTCHNOP_close failed");
891 return (error);
892 }
893
894 #ifdef SMP
895 if (isrc->xi_cpu != cpu) {
896 /*
897 * Too early in the boot process for the generic interrupt
898 * code to perform the binding. Update our event channel
899 * masks manually so events can't fire on the wrong cpu
900 * during AP startup.
901 */
902 xen_intr_assign_cpu(isrc, cpu);
903 }
904 #endif
905
906 /*
907 * The Event Channel API opened this port, so it is
908 * responsible for closing it automatically on unbind.
909 */
910 isrc->xi_close = 1;
911 isrc->xi_virq = virq;
912
913 return (0);
914 }
915
916 int
xen_intr_alloc_and_bind_ipi(u_int cpu,driver_filter_t filter,enum intr_type flags,xen_intr_handle_t * port_handlep)917 xen_intr_alloc_and_bind_ipi(u_int cpu, driver_filter_t filter,
918 enum intr_type flags, xen_intr_handle_t *port_handlep)
919 {
920 #ifdef SMP
921 u_int vcpu_id = XEN_CPUID_TO_VCPUID(cpu);
922 struct xenisrc *isrc;
923 struct evtchn_bind_ipi bind_ipi = { .vcpu = vcpu_id };
924 /* Same size as the one used by intr_handler->ih_name. */
925 char name[MAXCOMLEN + 1];
926 int error;
927
928 isrc = NULL;
929 error = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi);
930 if (error != 0) {
931 /*
932 * XXX Trap Hypercall error code Linuxisms in
933 * the HYPERCALL layer.
934 */
935 return (-error);
936 }
937
938 snprintf(name, sizeof(name), "cpu%u", cpu);
939
940 error = xen_intr_bind_isrc(&isrc, bind_ipi.port, EVTCHN_TYPE_IPI,
941 name, filter, NULL, NULL, flags, port_handlep);
942 if (error != 0) {
943 evtchn_close_t close = { .port = bind_ipi.port };
944
945 xen_intr_unbind(port_handlep);
946 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
947 panic("EVTCHNOP_close failed");
948 return (error);
949 }
950
951 if (isrc->xi_cpu != cpu) {
952 /*
953 * Too early in the boot process for the generic interrupt
954 * code to perform the binding. Update our event channel
955 * masks manually so events can't fire on the wrong cpu
956 * during AP startup.
957 */
958 xen_intr_assign_cpu(isrc, cpu);
959 }
960
961 /*
962 * The Event Channel API opened this port, so it is
963 * responsible for closing it automatically on unbind.
964 */
965 isrc->xi_close = 1;
966 return (0);
967 #else
968 return (EOPNOTSUPP);
969 #endif
970 }
971
972 int
xen_intr_describe(xen_intr_handle_t port_handle,const char * fmt,...)973 xen_intr_describe(xen_intr_handle_t port_handle, const char *fmt, ...)
974 {
975 char descr[MAXCOMLEN + 1];
976 struct xenisrc *isrc;
977 va_list ap;
978
979 isrc = xen_intr_isrc_from_handle(port_handle);
980 if (isrc == NULL)
981 return (EINVAL);
982
983 va_start(ap, fmt);
984 vsnprintf(descr, sizeof(descr), fmt, ap);
985 va_end(ap);
986 return (xen_arch_intr_describe(isrc, isrc->xi_cookie, descr));
987 }
988
989 void
xen_intr_unbind(xen_intr_handle_t * port_handlep)990 xen_intr_unbind(xen_intr_handle_t *port_handlep)
991 {
992 struct xenisrc *isrc;
993
994 KASSERT(port_handlep != NULL,
995 ("NULL xen_intr_handle_t passed to %s", __func__));
996
997 isrc = xen_intr_isrc_from_handle(*port_handlep);
998 *port_handlep = NULL;
999 if (isrc == NULL)
1000 return;
1001
1002 mtx_lock(&xen_intr_isrc_lock);
1003 if (refcount_release(&isrc->xi_refcount) == 0) {
1004 mtx_unlock(&xen_intr_isrc_lock);
1005 return;
1006 }
1007 mtx_unlock(&xen_intr_isrc_lock);
1008
1009 if (isrc->xi_cookie != NULL)
1010 xen_arch_intr_remove_handler(isrc, isrc->xi_cookie);
1011 xen_intr_release_isrc(isrc);
1012 }
1013
1014 void
xen_intr_signal(xen_intr_handle_t handle)1015 xen_intr_signal(xen_intr_handle_t handle)
1016 {
1017 struct xenisrc *isrc;
1018
1019 isrc = xen_intr_isrc_from_handle(handle);
1020 if (isrc != NULL) {
1021 KASSERT(isrc->xi_type == EVTCHN_TYPE_PORT ||
1022 isrc->xi_type == EVTCHN_TYPE_IPI,
1023 ("evtchn_signal on something other than a local port"));
1024 struct evtchn_send send = { .port = isrc->xi_port };
1025 (void)HYPERVISOR_event_channel_op(EVTCHNOP_send, &send);
1026 }
1027 }
1028
1029 evtchn_port_t
xen_intr_port(xen_intr_handle_t handle)1030 xen_intr_port(xen_intr_handle_t handle)
1031 {
1032 struct xenisrc *isrc;
1033
1034 isrc = xen_intr_isrc_from_handle(handle);
1035 if (isrc == NULL)
1036 return (0);
1037
1038 return (isrc->xi_port);
1039 }
1040
1041 int
xen_intr_add_handler(const char * name,driver_filter_t filter,driver_intr_t handler,void * arg,enum intr_type flags,xen_intr_handle_t handle)1042 xen_intr_add_handler(const char *name, driver_filter_t filter,
1043 driver_intr_t handler, void *arg, enum intr_type flags,
1044 xen_intr_handle_t handle)
1045 {
1046 struct xenisrc *isrc;
1047 int error;
1048
1049 isrc = xen_intr_isrc_from_handle(handle);
1050 if (isrc == NULL || isrc->xi_cookie != NULL)
1051 return (EINVAL);
1052
1053 error = xen_arch_intr_add_handler(name, filter, handler, arg,
1054 flags | INTR_EXCL, isrc, &isrc->xi_cookie);
1055 if (error != 0)
1056 printf("%s: %s: add handler failed: %d\n", name, __func__,
1057 error);
1058
1059 return (error);
1060 }
1061
1062 int
xen_intr_get_evtchn_from_port(evtchn_port_t port,xen_intr_handle_t * handlep)1063 xen_intr_get_evtchn_from_port(evtchn_port_t port, xen_intr_handle_t *handlep)
1064 {
1065
1066 if (!is_valid_evtchn(port))
1067 return (EINVAL);
1068
1069 if (handlep == NULL) {
1070 return (EINVAL);
1071 }
1072
1073 mtx_lock(&xen_intr_isrc_lock);
1074 if (xen_intr_port_to_isrc[port] == NULL) {
1075 mtx_unlock(&xen_intr_isrc_lock);
1076 return (EINVAL);
1077 }
1078 refcount_acquire(&xen_intr_port_to_isrc[port]->xi_refcount);
1079 mtx_unlock(&xen_intr_isrc_lock);
1080
1081 /* Assign the opaque handler */
1082 *handlep = xen_intr_handle_from_isrc(xen_intr_port_to_isrc[port]);
1083
1084 return (0);
1085 }
1086
1087 #ifdef DDB
1088 static const char *
xen_intr_print_type(enum evtchn_type type)1089 xen_intr_print_type(enum evtchn_type type)
1090 {
1091 static const char *evtchn_type_to_string[EVTCHN_TYPE_COUNT] = {
1092 [EVTCHN_TYPE_UNBOUND] = "UNBOUND",
1093 [EVTCHN_TYPE_VIRQ] = "VIRQ",
1094 [EVTCHN_TYPE_IPI] = "IPI",
1095 [EVTCHN_TYPE_PORT] = "PORT",
1096 };
1097
1098 if (type >= EVTCHN_TYPE_COUNT)
1099 return ("UNKNOWN");
1100
1101 return (evtchn_type_to_string[type]);
1102 }
1103
1104 static void
xen_intr_dump_port(struct xenisrc * isrc)1105 xen_intr_dump_port(struct xenisrc *isrc)
1106 {
1107 struct xen_intr_pcpu_data *pcpu;
1108 shared_info_t *s = HYPERVISOR_shared_info;
1109 u_int i;
1110
1111 db_printf("Port %d Type: %s\n",
1112 isrc->xi_port, xen_intr_print_type(isrc->xi_type));
1113 if (isrc->xi_type == EVTCHN_TYPE_VIRQ)
1114 db_printf("\tVirq: %u\n", isrc->xi_virq);
1115
1116 db_printf("\tMasked: %d Pending: %d\n",
1117 !!xen_test_bit(isrc->xi_port, &s->evtchn_mask[0]),
1118 !!xen_test_bit(isrc->xi_port, &s->evtchn_pending[0]));
1119
1120 db_printf("\tPer-CPU Masks: ");
1121 CPU_FOREACH(i) {
1122 pcpu = DPCPU_ID_PTR(i, xen_intr_pcpu);
1123 db_printf("cpu#%u: %d ", i,
1124 !!xen_test_bit(isrc->xi_port, pcpu->evtchn_enabled));
1125 }
1126 db_printf("\n");
1127 }
1128
DB_SHOW_COMMAND(xen_evtchn,db_show_xen_evtchn)1129 DB_SHOW_COMMAND(xen_evtchn, db_show_xen_evtchn)
1130 {
1131 u_int i;
1132
1133 if (!xen_domain()) {
1134 db_printf("Only available on Xen guests\n");
1135 return;
1136 }
1137
1138 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
1139 struct xenisrc *isrc;
1140
1141 isrc = xen_intr_port_to_isrc[i];
1142 if (isrc == NULL)
1143 continue;
1144
1145 xen_intr_dump_port(isrc);
1146 }
1147 }
1148 #endif /* DDB */
1149