Lines Matching +full:vm +full:- +full:active +full:- +full:channels
6 * Copyright (c) 2002-2005, K A Fraser
9 * Copyright © 2021-2023, Elliott Mitchell
49 #include <vm/vm.h>
50 #include <vm/pmap.h>
55 #include <xen/xen-os.h>
60 #include <machine/xen/arch-intr.h>
67 * Per-cpu event channel processing state.
95 .last_processed_l1i = LONG_BIT - 1,
96 .last_processed_l2i = LONG_BIT - 1
107 * Modifying xen_intr_port_to_isrc[], or isrc->xi_port (implies the former)
109 * `!xen_intr_port_to_isrc[i] || (xen_intr_port_to_isrc[i]->ix_port == i)`
112 * Acquire/release operations for isrc->xi_refcount require this lock be held.
117 /*------------------------- Private Functions --------------------------------*/
170 xen_clear_bit(port, pcpu->evtchn_enabled);
193 xen_set_bit(port, pcpu->evtchn_enabled);
197 * Attempt to free an active Xen interrupt source object.
208 if (is_valid_evtchn(isrc->xi_port)) {
209 evtchn_mask_port(isrc->xi_port);
210 evtchn_clear_port(isrc->xi_port);
213 evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
214 evtchn_cpu_unmask_port(0, isrc->xi_port);
216 if (isrc->xi_close != 0) {
217 struct evtchn_close close = { .port = isrc->xi_port };
223 xen_intr_port_to_isrc[isrc->xi_port] = NULL;
273 isrc->xi_cookie = NULL;
274 isrc->xi_type = type;
275 isrc->xi_port = local_port;
276 isrc->xi_close = false;
277 isrc->xi_cpu = 0;
278 refcount_init(&isrc->xi_refcount, 1);
280 xen_intr_port_to_isrc[isrc->xi_port] = isrc;
332 CTASSERT(sizeof(sh->evtchn_mask[0]) == sizeof(sh->evtchn_pending[0]));
333 CTASSERT(sizeof(sh->evtchn_mask[0]) == sizeof(pcpu->evtchn_enabled[0]));
334 CTASSERT(sizeof(sh->evtchn_mask) == sizeof(sh->evtchn_pending));
335 CTASSERT(sizeof(sh->evtchn_mask) == sizeof(pcpu->evtchn_enabled));
336 return (sh->evtchn_pending[idx]
337 & ~sh->evtchn_mask[idx]
338 & pcpu->evtchn_enabled[idx]);
349 struct trapframe *trap_frame = curthread->td_intr_frame;
363 KASSERT(curthread->td_intr_nesting_level > 0,
377 v->evtchn_upcall_pending = 0;
378 /* No need for a barrier on x86 -- XCHG is a barrier on x86. */
383 l1 = atomic_readandclear_xen_ulong(&v->evtchn_pending_sel);
385 l1i = pc->last_processed_l1i;
386 l2i = pc->last_processed_l2i;
397 l1i = LONG_BIT - 1;
398 l2i = LONG_BIT - 1;
401 l1i = ffsl(masked_l1) - 1;
411 l2i = LONG_BIT - 1;
414 l2i = ffsl(masked_l2) - 1;
425 KASSERT((isrc->xi_cpu == PCPU_GET(cpuid)),
427 PCPU_GET(cpuid), isrc->xi_cpu));
431 * per-arch interrupt dispatch helper. This is
432 * required because the per-arch dispatcher will also
437 curthread->td_intr_nesting_level--;
439 curthread->td_intr_nesting_level++;
445 pc->last_processed_l1i = l1i;
446 pc->last_processed_l2i = l2i;
448 } while (l2i != LONG_BIT - 1);
475 _Static_assert(is_valid_evtchn(NR_EVENT_CHANNELS - 1),
483 _Static_assert(!is_valid_evtchn(-1),
484 "is_valid_evtchn(-1) fails (negative are invalid)");
486 mtx_init(&xen_intr_isrc_lock, "xen-irq-lock", NULL, MTX_DEF);
489 * Set the per-cpu mask of CPU#0 to enable all, since by default all
490 * event channels are bound to CPU#0.
494 memset(pcpu->evtchn_enabled, i == 0 ? ~0 : 0,
495 sizeof(pcpu->evtchn_enabled));
498 for (i = 0; i < nitems(s->evtchn_mask); i++)
499 atomic_store_rel_xen_ulong(&s->evtchn_mask[i], ~0);
510 /*--------------------------- Common PIC Functions ---------------------------*/
516 u_int cpu = isrc->xi_cpu;
526 isrc->xi_port = bind_ipi.port;
535 u_int cpu = isrc->xi_cpu;
538 struct evtchn_bind_virq bind_virq = { .virq = isrc->xi_virq,
544 panic("unable to rebind xen VIRQ#%u: %d", isrc->xi_virq, error);
546 isrc->xi_port = bind_virq.port;
553 u_int cpu = isrc->xi_cpu;
558 switch (isrc->xi_type) {
569 prev = xen_intr_port_to_isrc[isrc->xi_port];
570 xen_intr_port_to_isrc[isrc->xi_port] = isrc;
573 isrc->xi_cpu = 0;
577 __func__, isrc->xi_port, cpu, error);
580 evtchn_unmask_port(isrc->xi_port);
595 /* Reset the per-CPU masks */
600 memset(pcpu->evtchn_enabled, i == 0 ? ~0 : 0,
601 sizeof(pcpu->evtchn_enabled));
604 /* Mask all event channels. */
605 for (i = 0; i < nitems(s->evtchn_mask); i++)
606 atomic_store_rel_xen_ulong(&s->evtchn_mask[i], ~0);
611 xen_intr_port_to_isrc[isrc_idx]->xi_port =
614 /* Remap in-use isrcs, using xen_intr_port_to_isrc as listing */
619 if (cur == NULL || cur->xi_port == isrc_idx)
625 KASSERT(!is_valid_evtchn(cur->xi_port),
626 ("%s(): Multiple channels on single intr?",
643 if (__predict_true(is_valid_evtchn(isrc->xi_port)))
644 evtchn_mask_port(isrc->xi_port);
667 if (!is_valid_evtchn(isrc->xi_port)) {
674 * delivery with an inconsistent state in isrc->xi_cpu.
676 masked = evtchn_test_and_set_mask(isrc->xi_port);
677 if ((isrc->xi_type == EVTCHN_TYPE_VIRQ) ||
678 (isrc->xi_type == EVTCHN_TYPE_IPI)) {
682 * all we need to do is update the per-CPU masks.
684 evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
685 isrc->xi_cpu = to_cpu;
686 evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port);
690 bind_vcpu.port = isrc->xi_port;
694 if (isrc->xi_cpu != to_cpu) {
697 evtchn_cpu_mask_port(isrc->xi_cpu, isrc->xi_port);
698 isrc->xi_cpu = to_cpu;
699 evtchn_cpu_unmask_port(isrc->xi_cpu, isrc->xi_port);
705 evtchn_unmask_port(isrc->xi_port);
713 /*------------------- Virtual Interrupt Source PIC Functions -----------------*/
725 * needed because the event channel user-space device
726 * masks event channels on its filter as part of its
731 if (__predict_true(is_valid_evtchn(isrc->xi_port)))
732 isrc->xi_masked = !!evtchn_test_and_set_mask(isrc->xi_port);
744 if (isrc->xi_masked == 0)
745 evtchn_unmask_port(isrc->xi_port);
757 evtchn_unmask_port(isrc->xi_port);
760 /*--------------------------- Public Functions -------------------------------*/
761 /*------- API comments for these methods can be found in xen/xenintr.h -------*/
780 isrc->xi_close = 0;
802 return (-error);
815 isrc->xi_close = 1;
837 return (-error);
854 isrc->xi_close = 1;
875 return (-error);
897 if (isrc->xi_cpu != cpu) {
912 isrc->xi_close = 1;
913 isrc->xi_virq = virq;
926 /* Same size as the one used by intr_handler->ih_name. */
937 return (-error);
953 if (isrc->xi_cpu != cpu) {
967 isrc->xi_close = 1;
988 return (xen_arch_intr_describe(isrc, isrc->xi_cookie, descr));
1005 if (refcount_release(&isrc->xi_refcount) == 0) {
1011 if (isrc->xi_cookie != NULL)
1012 xen_arch_intr_remove_handler(isrc, isrc->xi_cookie);
1023 KASSERT(isrc->xi_type == EVTCHN_TYPE_PORT ||
1024 isrc->xi_type == EVTCHN_TYPE_IPI,
1026 struct evtchn_send send = { .port = isrc->xi_port };
1040 return (isrc->xi_port);
1052 if (isrc == NULL || isrc->xi_cookie != NULL)
1056 flags | INTR_EXCL, isrc, &isrc->xi_cookie);
1080 refcount_acquire(&xen_intr_port_to_isrc[port]->xi_refcount);
1114 isrc->xi_port, xen_intr_print_type(isrc->xi_type));
1115 if (isrc->xi_type == EVTCHN_TYPE_VIRQ)
1116 db_printf("\tVirq: %u\n", isrc->xi_virq);
1119 !!xen_test_bit(isrc->xi_port, &s->evtchn_mask[0]),
1120 !!xen_test_bit(isrc->xi_port, &s->evtchn_pending[0]));
1122 db_printf("\tPer-CPU Masks: ");
1126 !!xen_test_bit(isrc->xi_port, pcpu->evtchn_enabled));