19454b2d8SWarner Losh /*- 2425f9fdaSStefan Eßer * Copyright (c) 1997, Stefan Esser <se@freebsd.org> 3425f9fdaSStefan Eßer * All rights reserved. 4425f9fdaSStefan Eßer * 5425f9fdaSStefan Eßer * Redistribution and use in source and binary forms, with or without 6425f9fdaSStefan Eßer * modification, are permitted provided that the following conditions 7425f9fdaSStefan Eßer * are met: 8425f9fdaSStefan Eßer * 1. Redistributions of source code must retain the above copyright 9425f9fdaSStefan Eßer * notice unmodified, this list of conditions, and the following 10425f9fdaSStefan Eßer * disclaimer. 11425f9fdaSStefan Eßer * 2. Redistributions in binary form must reproduce the above copyright 12425f9fdaSStefan Eßer * notice, this list of conditions and the following disclaimer in the 13425f9fdaSStefan Eßer * documentation and/or other materials provided with the distribution. 14425f9fdaSStefan Eßer * 15425f9fdaSStefan Eßer * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16425f9fdaSStefan Eßer * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17425f9fdaSStefan Eßer * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18425f9fdaSStefan Eßer * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19425f9fdaSStefan Eßer * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20425f9fdaSStefan Eßer * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21425f9fdaSStefan Eßer * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22425f9fdaSStefan Eßer * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23425f9fdaSStefan Eßer * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24425f9fdaSStefan Eßer * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25425f9fdaSStefan Eßer */ 26425f9fdaSStefan Eßer 27677b542eSDavid E. O'Brien #include <sys/cdefs.h> 28677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 293900ddb2SDoug Rabson 308b201c42SJohn Baldwin #include "opt_ddb.h" 318b201c42SJohn Baldwin 321c5bb3eaSPeter Wemm #include <sys/param.h> 339a94c9c5SJohn Baldwin #include <sys/bus.h> 34c11110eaSAlfred Perlstein #include <sys/conf.h> 359b33b154SJeff Roberson #include <sys/cpuset.h> 369a94c9c5SJohn Baldwin #include <sys/rtprio.h> 37425f9fdaSStefan Eßer #include <sys/systm.h> 3868352337SDoug Rabson #include <sys/interrupt.h> 391931cf94SJohn Baldwin #include <sys/kernel.h> 401931cf94SJohn Baldwin #include <sys/kthread.h> 411931cf94SJohn Baldwin #include <sys/ktr.h> 4205b2c96fSBruce Evans #include <sys/limits.h> 43f34fa851SJohn Baldwin #include <sys/lock.h> 441931cf94SJohn Baldwin #include <sys/malloc.h> 4535e0e5b3SJohn Baldwin #include <sys/mutex.h> 46cebc7fb1SJohn Baldwin #include <sys/priv.h> 471931cf94SJohn Baldwin #include <sys/proc.h> 483e5da754SJohn Baldwin #include <sys/random.h> 49b4151f71SJohn Baldwin #include <sys/resourcevar.h> 5063710c4dSJohn Baldwin #include <sys/sched.h> 51eaf86d16SJohn Baldwin #include <sys/smp.h> 52d279178dSThomas Moestl #include <sys/sysctl.h> 536205924aSKip Macy #include <sys/syslog.h> 541931cf94SJohn Baldwin #include <sys/unistd.h> 551931cf94SJohn Baldwin #include <sys/vmmeter.h> 561931cf94SJohn Baldwin #include <machine/atomic.h> 571931cf94SJohn Baldwin #include <machine/cpu.h> 588088699fSJohn Baldwin #include <machine/md_var.h> 59b4151f71SJohn Baldwin #include <machine/stdarg.h> 608b201c42SJohn Baldwin #ifdef DDB 618b201c42SJohn Baldwin #include <ddb/ddb.h> 628b201c42SJohn Baldwin #include <ddb/db_sym.h> 638b201c42SJohn Baldwin #endif 64425f9fdaSStefan Eßer 65e0f66ef8SJohn Baldwin /* 66e0f66ef8SJohn Baldwin * Describe an interrupt thread. There is one of these per interrupt event. 67e0f66ef8SJohn Baldwin */ 68e0f66ef8SJohn Baldwin struct intr_thread { 69e0f66ef8SJohn Baldwin struct intr_event *it_event; 70e0f66ef8SJohn Baldwin struct thread *it_thread; /* Kernel thread. */ 71e0f66ef8SJohn Baldwin int it_flags; /* (j) IT_* flags. */ 72e0f66ef8SJohn Baldwin int it_need; /* Needs service. */ 733e5da754SJohn Baldwin }; 743e5da754SJohn Baldwin 75e0f66ef8SJohn Baldwin /* Interrupt thread flags kept in it_flags */ 76e0f66ef8SJohn Baldwin #define IT_DEAD 0x000001 /* Thread is waiting to exit. */ 77e0f66ef8SJohn Baldwin 78e0f66ef8SJohn Baldwin struct intr_entropy { 79e0f66ef8SJohn Baldwin struct thread *td; 80e0f66ef8SJohn Baldwin uintptr_t event; 81e0f66ef8SJohn Baldwin }; 82e0f66ef8SJohn Baldwin 83e0f66ef8SJohn Baldwin struct intr_event *clk_intr_event; 84e0f66ef8SJohn Baldwin struct intr_event *tty_intr_event; 857b1fe905SBruce Evans void *vm_ih; 867ab24ea3SJulian Elischer struct proc *intrproc; 871931cf94SJohn Baldwin 88b4151f71SJohn Baldwin static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); 89b4151f71SJohn Baldwin 900ae62c18SNate Lawson static int intr_storm_threshold = 1000; 917870c3c6SJohn Baldwin TUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold); 927870c3c6SJohn Baldwin SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW, 937870c3c6SJohn Baldwin &intr_storm_threshold, 0, 947b1fe905SBruce Evans "Number of consecutive interrupts before storm protection is enabled"); 95e0f66ef8SJohn Baldwin static TAILQ_HEAD(, intr_event) event_list = 96e0f66ef8SJohn Baldwin TAILQ_HEAD_INITIALIZER(event_list); 979b33b154SJeff Roberson static struct mtx event_lock; 989b33b154SJeff Roberson MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF); 997b1fe905SBruce Evans 100e0f66ef8SJohn Baldwin static void intr_event_update(struct intr_event *ie); 101bafe5a31SPaolo Pisati #ifdef INTR_FILTER 1021ee1b687SJohn Baldwin static int intr_event_schedule_thread(struct intr_event *ie, 1031ee1b687SJohn Baldwin struct intr_thread *ithd); 1041ee1b687SJohn Baldwin static int intr_filter_loop(struct intr_event *ie, 1051ee1b687SJohn Baldwin struct trapframe *frame, struct intr_thread **ithd); 106bafe5a31SPaolo Pisati static struct intr_thread *ithread_create(const char *name, 107bafe5a31SPaolo Pisati struct intr_handler *ih); 108bafe5a31SPaolo Pisati #else 1091ee1b687SJohn Baldwin static int intr_event_schedule_thread(struct intr_event *ie); 110e0f66ef8SJohn Baldwin static struct intr_thread *ithread_create(const char *name); 111bafe5a31SPaolo Pisati #endif 112e0f66ef8SJohn Baldwin static void ithread_destroy(struct intr_thread *ithread); 113bafe5a31SPaolo Pisati static void ithread_execute_handlers(struct proc *p, 114bafe5a31SPaolo Pisati struct intr_event *ie); 115bafe5a31SPaolo Pisati #ifdef INTR_FILTER 116bafe5a31SPaolo Pisati static void priv_ithread_execute_handler(struct proc *p, 117bafe5a31SPaolo Pisati struct intr_handler *ih); 118bafe5a31SPaolo Pisati #endif 1197b1fe905SBruce Evans static void ithread_loop(void *); 120e0f66ef8SJohn Baldwin static void ithread_update(struct intr_thread *ithd); 1217b1fe905SBruce Evans static void start_softintr(void *); 1227870c3c6SJohn Baldwin 123bc17acb2SJohn Baldwin /* Map an interrupt type to an ithread priority. */ 124b4151f71SJohn Baldwin u_char 125e0f66ef8SJohn Baldwin intr_priority(enum intr_type flags) 1269a94c9c5SJohn Baldwin { 127b4151f71SJohn Baldwin u_char pri; 1289a94c9c5SJohn Baldwin 129b4151f71SJohn Baldwin flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET | 1305a280d9cSPeter Wemm INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV); 1319a94c9c5SJohn Baldwin switch (flags) { 132b4151f71SJohn Baldwin case INTR_TYPE_TTY: 1339a94c9c5SJohn Baldwin pri = PI_TTYLOW; 1349a94c9c5SJohn Baldwin break; 1359a94c9c5SJohn Baldwin case INTR_TYPE_BIO: 1369a94c9c5SJohn Baldwin /* 1379a94c9c5SJohn Baldwin * XXX We need to refine this. BSD/OS distinguishes 1389a94c9c5SJohn Baldwin * between tape and disk priorities. 1399a94c9c5SJohn Baldwin */ 1409a94c9c5SJohn Baldwin pri = PI_DISK; 1419a94c9c5SJohn Baldwin break; 1429a94c9c5SJohn Baldwin case INTR_TYPE_NET: 1439a94c9c5SJohn Baldwin pri = PI_NET; 1449a94c9c5SJohn Baldwin break; 1459a94c9c5SJohn Baldwin case INTR_TYPE_CAM: 1469a94c9c5SJohn Baldwin pri = PI_DISK; /* XXX or PI_CAM? */ 1479a94c9c5SJohn Baldwin break; 1485a280d9cSPeter Wemm case INTR_TYPE_AV: /* Audio/video */ 1495a280d9cSPeter Wemm pri = PI_AV; 1505a280d9cSPeter Wemm break; 151b4151f71SJohn Baldwin case INTR_TYPE_CLK: 152b4151f71SJohn Baldwin pri = PI_REALTIME; 153b4151f71SJohn Baldwin break; 1549a94c9c5SJohn Baldwin case INTR_TYPE_MISC: 1559a94c9c5SJohn Baldwin pri = PI_DULL; /* don't care */ 1569a94c9c5SJohn Baldwin break; 1579a94c9c5SJohn Baldwin default: 158b4151f71SJohn Baldwin /* We didn't specify an interrupt level. */ 159e0f66ef8SJohn Baldwin panic("intr_priority: no interrupt type in flags"); 1609a94c9c5SJohn Baldwin } 1619a94c9c5SJohn Baldwin 1629a94c9c5SJohn Baldwin return pri; 1639a94c9c5SJohn Baldwin } 1649a94c9c5SJohn Baldwin 165b4151f71SJohn Baldwin /* 166e0f66ef8SJohn Baldwin * Update an ithread based on the associated intr_event. 167b4151f71SJohn Baldwin */ 168b4151f71SJohn Baldwin static void 169e0f66ef8SJohn Baldwin ithread_update(struct intr_thread *ithd) 170b4151f71SJohn Baldwin { 171e0f66ef8SJohn Baldwin struct intr_event *ie; 172b40ce416SJulian Elischer struct thread *td; 173e0f66ef8SJohn Baldwin u_char pri; 1748088699fSJohn Baldwin 175e0f66ef8SJohn Baldwin ie = ithd->it_event; 176e0f66ef8SJohn Baldwin td = ithd->it_thread; 177b4151f71SJohn Baldwin 178e0f66ef8SJohn Baldwin /* Determine the overall priority of this event. */ 179e0f66ef8SJohn Baldwin if (TAILQ_EMPTY(&ie->ie_handlers)) 180e0f66ef8SJohn Baldwin pri = PRI_MAX_ITHD; 181e0f66ef8SJohn Baldwin else 182e0f66ef8SJohn Baldwin pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri; 183e80fb434SRobert Drehmel 184e0f66ef8SJohn Baldwin /* Update name and priority. */ 1857ab24ea3SJulian Elischer strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name)); 186982d11f8SJeff Roberson thread_lock(td); 187e0f66ef8SJohn Baldwin sched_prio(td, pri); 188982d11f8SJeff Roberson thread_unlock(td); 189b4151f71SJohn Baldwin } 190e0f66ef8SJohn Baldwin 191e0f66ef8SJohn Baldwin /* 192e0f66ef8SJohn Baldwin * Regenerate the full name of an interrupt event and update its priority. 193e0f66ef8SJohn Baldwin */ 194e0f66ef8SJohn Baldwin static void 195e0f66ef8SJohn Baldwin intr_event_update(struct intr_event *ie) 196e0f66ef8SJohn Baldwin { 197e0f66ef8SJohn Baldwin struct intr_handler *ih; 198e0f66ef8SJohn Baldwin char *last; 199e0f66ef8SJohn Baldwin int missed, space; 200e0f66ef8SJohn Baldwin 201e0f66ef8SJohn Baldwin /* Start off with no entropy and just the name of the event. */ 202e0f66ef8SJohn Baldwin mtx_assert(&ie->ie_lock, MA_OWNED); 203e0f66ef8SJohn Baldwin strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 204e0f66ef8SJohn Baldwin ie->ie_flags &= ~IE_ENTROPY; 2050811d60aSJohn Baldwin missed = 0; 206e0f66ef8SJohn Baldwin space = 1; 207e0f66ef8SJohn Baldwin 208e0f66ef8SJohn Baldwin /* Run through all the handlers updating values. */ 209e0f66ef8SJohn Baldwin TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 210e0f66ef8SJohn Baldwin if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < 211e0f66ef8SJohn Baldwin sizeof(ie->ie_fullname)) { 212e0f66ef8SJohn Baldwin strcat(ie->ie_fullname, " "); 213e0f66ef8SJohn Baldwin strcat(ie->ie_fullname, ih->ih_name); 214e0f66ef8SJohn Baldwin space = 0; 2150811d60aSJohn Baldwin } else 2160811d60aSJohn Baldwin missed++; 2170811d60aSJohn Baldwin if (ih->ih_flags & IH_ENTROPY) 218e0f66ef8SJohn Baldwin ie->ie_flags |= IE_ENTROPY; 2190811d60aSJohn Baldwin } 220e0f66ef8SJohn Baldwin 221e0f66ef8SJohn Baldwin /* 222e0f66ef8SJohn Baldwin * If the handler names were too long, add +'s to indicate missing 223e0f66ef8SJohn Baldwin * names. If we run out of room and still have +'s to add, change 224e0f66ef8SJohn Baldwin * the last character from a + to a *. 225e0f66ef8SJohn Baldwin */ 226e0f66ef8SJohn Baldwin last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2]; 2270811d60aSJohn Baldwin while (missed-- > 0) { 228e0f66ef8SJohn Baldwin if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) { 229e0f66ef8SJohn Baldwin if (*last == '+') { 230e0f66ef8SJohn Baldwin *last = '*'; 231e0f66ef8SJohn Baldwin break; 232b4151f71SJohn Baldwin } else 233e0f66ef8SJohn Baldwin *last = '+'; 234e0f66ef8SJohn Baldwin } else if (space) { 235e0f66ef8SJohn Baldwin strcat(ie->ie_fullname, " +"); 236e0f66ef8SJohn Baldwin space = 0; 237e0f66ef8SJohn Baldwin } else 238e0f66ef8SJohn Baldwin strcat(ie->ie_fullname, "+"); 239b4151f71SJohn Baldwin } 240e0f66ef8SJohn Baldwin 241e0f66ef8SJohn Baldwin /* 242e0f66ef8SJohn Baldwin * If this event has an ithread, update it's priority and 243e0f66ef8SJohn Baldwin * name. 244e0f66ef8SJohn Baldwin */ 245e0f66ef8SJohn Baldwin if (ie->ie_thread != NULL) 246e0f66ef8SJohn Baldwin ithread_update(ie->ie_thread); 247e0f66ef8SJohn Baldwin CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname); 248b4151f71SJohn Baldwin } 249b4151f71SJohn Baldwin 250b4151f71SJohn Baldwin int 2519b33b154SJeff Roberson intr_event_create(struct intr_event **event, void *source, int flags, int irq, 2521ee1b687SJohn Baldwin void (*pre_ithread)(void *), void (*post_ithread)(void *), 2531ee1b687SJohn Baldwin void (*post_filter)(void *), int (*assign_cpu)(void *, u_char), 2541ee1b687SJohn Baldwin const char *fmt, ...) 255bafe5a31SPaolo Pisati { 256bafe5a31SPaolo Pisati struct intr_event *ie; 257bafe5a31SPaolo Pisati va_list ap; 258bafe5a31SPaolo Pisati 259bafe5a31SPaolo Pisati /* The only valid flag during creation is IE_SOFT. */ 260bafe5a31SPaolo Pisati if ((flags & ~IE_SOFT) != 0) 261bafe5a31SPaolo Pisati return (EINVAL); 262bafe5a31SPaolo Pisati ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 263bafe5a31SPaolo Pisati ie->ie_source = source; 2641ee1b687SJohn Baldwin ie->ie_pre_ithread = pre_ithread; 2651ee1b687SJohn Baldwin ie->ie_post_ithread = post_ithread; 2661ee1b687SJohn Baldwin ie->ie_post_filter = post_filter; 2676d2d1c04SJohn Baldwin ie->ie_assign_cpu = assign_cpu; 268bafe5a31SPaolo Pisati ie->ie_flags = flags; 2699b33b154SJeff Roberson ie->ie_irq = irq; 270eaf86d16SJohn Baldwin ie->ie_cpu = NOCPU; 271bafe5a31SPaolo Pisati TAILQ_INIT(&ie->ie_handlers); 272bafe5a31SPaolo Pisati mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 273bafe5a31SPaolo Pisati 274bafe5a31SPaolo Pisati va_start(ap, fmt); 275bafe5a31SPaolo Pisati vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 276bafe5a31SPaolo Pisati va_end(ap); 277bafe5a31SPaolo Pisati strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 2789b33b154SJeff Roberson mtx_lock(&event_lock); 279bafe5a31SPaolo Pisati TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 2809b33b154SJeff Roberson mtx_unlock(&event_lock); 281bafe5a31SPaolo Pisati if (event != NULL) 282bafe5a31SPaolo Pisati *event = ie; 283bafe5a31SPaolo Pisati CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 284bafe5a31SPaolo Pisati return (0); 285bafe5a31SPaolo Pisati } 286b4151f71SJohn Baldwin 287eaf86d16SJohn Baldwin /* 288eaf86d16SJohn Baldwin * Bind an interrupt event to the specified CPU. Note that not all 289eaf86d16SJohn Baldwin * platforms support binding an interrupt to a CPU. For those 290eaf86d16SJohn Baldwin * platforms this request will fail. For supported platforms, any 291eaf86d16SJohn Baldwin * associated ithreads as well as the primary interrupt context will 292eaf86d16SJohn Baldwin * be bound to the specificed CPU. Using a cpu id of NOCPU unbinds 293eaf86d16SJohn Baldwin * the interrupt event. 294eaf86d16SJohn Baldwin */ 295eaf86d16SJohn Baldwin int 296eaf86d16SJohn Baldwin intr_event_bind(struct intr_event *ie, u_char cpu) 297eaf86d16SJohn Baldwin { 2989b33b154SJeff Roberson cpuset_t mask; 2999b33b154SJeff Roberson lwpid_t id; 300eaf86d16SJohn Baldwin int error; 301eaf86d16SJohn Baldwin 302eaf86d16SJohn Baldwin /* Need a CPU to bind to. */ 303eaf86d16SJohn Baldwin if (cpu != NOCPU && CPU_ABSENT(cpu)) 304eaf86d16SJohn Baldwin return (EINVAL); 305eaf86d16SJohn Baldwin 306eaf86d16SJohn Baldwin if (ie->ie_assign_cpu == NULL) 307eaf86d16SJohn Baldwin return (EOPNOTSUPP); 308cebc7fb1SJohn Baldwin 309cebc7fb1SJohn Baldwin error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR); 310cebc7fb1SJohn Baldwin if (error) 311cebc7fb1SJohn Baldwin return (error); 312cebc7fb1SJohn Baldwin 3139b33b154SJeff Roberson /* 314cebc7fb1SJohn Baldwin * If we have any ithreads try to set their mask first to verify 315cebc7fb1SJohn Baldwin * permissions, etc. 3169b33b154SJeff Roberson */ 317eaf86d16SJohn Baldwin mtx_lock(&ie->ie_lock); 3189b33b154SJeff Roberson if (ie->ie_thread != NULL) { 3199b33b154SJeff Roberson CPU_ZERO(&mask); 3209b33b154SJeff Roberson if (cpu == NOCPU) 3219b33b154SJeff Roberson CPU_COPY(cpuset_root, &mask); 3229b33b154SJeff Roberson else 3239b33b154SJeff Roberson CPU_SET(cpu, &mask); 3249b33b154SJeff Roberson id = ie->ie_thread->it_thread->td_tid; 325eaf86d16SJohn Baldwin mtx_unlock(&ie->ie_lock); 3269b33b154SJeff Roberson error = cpuset_setthread(id, &mask); 3279b33b154SJeff Roberson if (error) 3289b33b154SJeff Roberson return (error); 3299b33b154SJeff Roberson } else 330eaf86d16SJohn Baldwin mtx_unlock(&ie->ie_lock); 331eaf86d16SJohn Baldwin error = ie->ie_assign_cpu(ie->ie_source, cpu); 332cebc7fb1SJohn Baldwin if (error) { 333cebc7fb1SJohn Baldwin mtx_lock(&ie->ie_lock); 334cebc7fb1SJohn Baldwin if (ie->ie_thread != NULL) { 335cebc7fb1SJohn Baldwin CPU_ZERO(&mask); 336cebc7fb1SJohn Baldwin if (ie->ie_cpu == NOCPU) 337cebc7fb1SJohn Baldwin CPU_COPY(cpuset_root, &mask); 338cebc7fb1SJohn Baldwin else 339cebc7fb1SJohn Baldwin CPU_SET(cpu, &mask); 340cebc7fb1SJohn Baldwin id = ie->ie_thread->it_thread->td_tid; 341cebc7fb1SJohn Baldwin mtx_unlock(&ie->ie_lock); 342cebc7fb1SJohn Baldwin (void)cpuset_setthread(id, &mask); 343cebc7fb1SJohn Baldwin } else 344cebc7fb1SJohn Baldwin mtx_unlock(&ie->ie_lock); 345eaf86d16SJohn Baldwin return (error); 346cebc7fb1SJohn Baldwin } 347cebc7fb1SJohn Baldwin 348eaf86d16SJohn Baldwin mtx_lock(&ie->ie_lock); 349eaf86d16SJohn Baldwin ie->ie_cpu = cpu; 3509b33b154SJeff Roberson mtx_unlock(&ie->ie_lock); 3519b33b154SJeff Roberson 3529b33b154SJeff Roberson return (error); 3539b33b154SJeff Roberson } 3549b33b154SJeff Roberson 3559b33b154SJeff Roberson static struct intr_event * 3569b33b154SJeff Roberson intr_lookup(int irq) 3579b33b154SJeff Roberson { 3589b33b154SJeff Roberson struct intr_event *ie; 3599b33b154SJeff Roberson 3609b33b154SJeff Roberson mtx_lock(&event_lock); 3619b33b154SJeff Roberson TAILQ_FOREACH(ie, &event_list, ie_list) 3629b33b154SJeff Roberson if (ie->ie_irq == irq && 3639b33b154SJeff Roberson (ie->ie_flags & IE_SOFT) == 0 && 3649b33b154SJeff Roberson TAILQ_FIRST(&ie->ie_handlers) != NULL) 3659b33b154SJeff Roberson break; 3669b33b154SJeff Roberson mtx_unlock(&event_lock); 3679b33b154SJeff Roberson return (ie); 3689b33b154SJeff Roberson } 3699b33b154SJeff Roberson 3709b33b154SJeff Roberson int 3719b33b154SJeff Roberson intr_setaffinity(int irq, void *m) 3729b33b154SJeff Roberson { 3739b33b154SJeff Roberson struct intr_event *ie; 3749b33b154SJeff Roberson cpuset_t *mask; 3759b33b154SJeff Roberson u_char cpu; 3769b33b154SJeff Roberson int n; 3779b33b154SJeff Roberson 3789b33b154SJeff Roberson mask = m; 3799b33b154SJeff Roberson cpu = NOCPU; 3809b33b154SJeff Roberson /* 3819b33b154SJeff Roberson * If we're setting all cpus we can unbind. Otherwise make sure 3829b33b154SJeff Roberson * only one cpu is in the set. 3839b33b154SJeff Roberson */ 3849b33b154SJeff Roberson if (CPU_CMP(cpuset_root, mask)) { 3859b33b154SJeff Roberson for (n = 0; n < CPU_SETSIZE; n++) { 3869b33b154SJeff Roberson if (!CPU_ISSET(n, mask)) 3879b33b154SJeff Roberson continue; 3889b33b154SJeff Roberson if (cpu != NOCPU) 3899b33b154SJeff Roberson return (EINVAL); 3909b33b154SJeff Roberson cpu = (u_char)n; 3919b33b154SJeff Roberson } 3929b33b154SJeff Roberson } 3939b33b154SJeff Roberson ie = intr_lookup(irq); 3949b33b154SJeff Roberson if (ie == NULL) 3959b33b154SJeff Roberson return (ESRCH); 3969bd55acfSJohn Baldwin return (intr_event_bind(ie, cpu)); 3979b33b154SJeff Roberson } 3989b33b154SJeff Roberson 3999b33b154SJeff Roberson int 4009b33b154SJeff Roberson intr_getaffinity(int irq, void *m) 4019b33b154SJeff Roberson { 4029b33b154SJeff Roberson struct intr_event *ie; 4039b33b154SJeff Roberson cpuset_t *mask; 4049b33b154SJeff Roberson 4059b33b154SJeff Roberson mask = m; 4069b33b154SJeff Roberson ie = intr_lookup(irq); 4079b33b154SJeff Roberson if (ie == NULL) 4089b33b154SJeff Roberson return (ESRCH); 4099b33b154SJeff Roberson CPU_ZERO(mask); 4109b33b154SJeff Roberson mtx_lock(&ie->ie_lock); 4119b33b154SJeff Roberson if (ie->ie_cpu == NOCPU) 4129b33b154SJeff Roberson CPU_COPY(cpuset_root, mask); 4139b33b154SJeff Roberson else 4149b33b154SJeff Roberson CPU_SET(ie->ie_cpu, mask); 415eaf86d16SJohn Baldwin mtx_unlock(&ie->ie_lock); 416eaf86d16SJohn Baldwin return (0); 417eaf86d16SJohn Baldwin } 418eaf86d16SJohn Baldwin 419b4151f71SJohn Baldwin int 420e0f66ef8SJohn Baldwin intr_event_destroy(struct intr_event *ie) 421b4151f71SJohn Baldwin { 422b4151f71SJohn Baldwin 4239b33b154SJeff Roberson mtx_lock(&event_lock); 424e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 425e0f66ef8SJohn Baldwin if (!TAILQ_EMPTY(&ie->ie_handlers)) { 426e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 4279b33b154SJeff Roberson mtx_unlock(&event_lock); 428e0f66ef8SJohn Baldwin return (EBUSY); 4294d29cb2dSJohn Baldwin } 430e0f66ef8SJohn Baldwin TAILQ_REMOVE(&event_list, ie, ie_list); 4319477358dSJohn Baldwin #ifndef notyet 4329477358dSJohn Baldwin if (ie->ie_thread != NULL) { 4339477358dSJohn Baldwin ithread_destroy(ie->ie_thread); 4349477358dSJohn Baldwin ie->ie_thread = NULL; 4359477358dSJohn Baldwin } 4369477358dSJohn Baldwin #endif 437e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 4389b33b154SJeff Roberson mtx_unlock(&event_lock); 439e0f66ef8SJohn Baldwin mtx_destroy(&ie->ie_lock); 440e0f66ef8SJohn Baldwin free(ie, M_ITHREAD); 441e0f66ef8SJohn Baldwin return (0); 442e0f66ef8SJohn Baldwin } 443e0f66ef8SJohn Baldwin 444bafe5a31SPaolo Pisati #ifndef INTR_FILTER 445e0f66ef8SJohn Baldwin static struct intr_thread * 446e0f66ef8SJohn Baldwin ithread_create(const char *name) 447e0f66ef8SJohn Baldwin { 448e0f66ef8SJohn Baldwin struct intr_thread *ithd; 449e0f66ef8SJohn Baldwin struct thread *td; 450e0f66ef8SJohn Baldwin int error; 451e0f66ef8SJohn Baldwin 452e0f66ef8SJohn Baldwin ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 453e0f66ef8SJohn Baldwin 4547ab24ea3SJulian Elischer error = kproc_kthread_add(ithread_loop, ithd, &intrproc, 4557ab24ea3SJulian Elischer &td, RFSTOPPED | RFHIGHPID, 4569ef95d01SJulian Elischer 0, "intr", "%s", name); 457e0f66ef8SJohn Baldwin if (error) 4583745c395SJulian Elischer panic("kproc_create() failed with %d", error); 459982d11f8SJeff Roberson thread_lock(td); 460ad1e7d28SJulian Elischer sched_class(td, PRI_ITHD); 461e0f66ef8SJohn Baldwin TD_SET_IWAIT(td); 462982d11f8SJeff Roberson thread_unlock(td); 463e0f66ef8SJohn Baldwin td->td_pflags |= TDP_ITHREAD; 464e0f66ef8SJohn Baldwin ithd->it_thread = td; 465e0f66ef8SJohn Baldwin CTR2(KTR_INTR, "%s: created %s", __func__, name); 466e0f66ef8SJohn Baldwin return (ithd); 467e0f66ef8SJohn Baldwin } 468bafe5a31SPaolo Pisati #else 469bafe5a31SPaolo Pisati static struct intr_thread * 470bafe5a31SPaolo Pisati ithread_create(const char *name, struct intr_handler *ih) 471bafe5a31SPaolo Pisati { 472bafe5a31SPaolo Pisati struct intr_thread *ithd; 473bafe5a31SPaolo Pisati struct thread *td; 474bafe5a31SPaolo Pisati int error; 475bafe5a31SPaolo Pisati 476bafe5a31SPaolo Pisati ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 477bafe5a31SPaolo Pisati 478539976ffSJulian Elischer error = kproc_kthread_add(ithread_loop, ih, &intrproc, 4797ab24ea3SJulian Elischer &td, RFSTOPPED | RFHIGHPID, 4809ef95d01SJulian Elischer 0, "intr", "%s", name); 481bafe5a31SPaolo Pisati if (error) 4823745c395SJulian Elischer panic("kproc_create() failed with %d", error); 483982d11f8SJeff Roberson thread_lock(td); 484bafe5a31SPaolo Pisati sched_class(td, PRI_ITHD); 485bafe5a31SPaolo Pisati TD_SET_IWAIT(td); 486982d11f8SJeff Roberson thread_unlock(td); 487bafe5a31SPaolo Pisati td->td_pflags |= TDP_ITHREAD; 488bafe5a31SPaolo Pisati ithd->it_thread = td; 489bafe5a31SPaolo Pisati CTR2(KTR_INTR, "%s: created %s", __func__, name); 490bafe5a31SPaolo Pisati return (ithd); 491bafe5a31SPaolo Pisati } 492bafe5a31SPaolo Pisati #endif 493e0f66ef8SJohn Baldwin 494e0f66ef8SJohn Baldwin static void 495e0f66ef8SJohn Baldwin ithread_destroy(struct intr_thread *ithread) 496e0f66ef8SJohn Baldwin { 497e0f66ef8SJohn Baldwin struct thread *td; 498e0f66ef8SJohn Baldwin 499bb141be1SScott Long CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name); 500e0f66ef8SJohn Baldwin td = ithread->it_thread; 501982d11f8SJeff Roberson thread_lock(td); 502e0f66ef8SJohn Baldwin ithread->it_flags |= IT_DEAD; 50371fad9fdSJulian Elischer if (TD_AWAITING_INTR(td)) { 50471fad9fdSJulian Elischer TD_CLR_IWAIT(td); 505f0393f06SJeff Roberson sched_add(td, SRQ_INTR); 506b4151f71SJohn Baldwin } 507982d11f8SJeff Roberson thread_unlock(td); 508b4151f71SJohn Baldwin } 509b4151f71SJohn Baldwin 510bafe5a31SPaolo Pisati #ifndef INTR_FILTER 511b4151f71SJohn Baldwin int 512e0f66ef8SJohn Baldwin intr_event_add_handler(struct intr_event *ie, const char *name, 513ef544f63SPaolo Pisati driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 514ef544f63SPaolo Pisati enum intr_type flags, void **cookiep) 515b4151f71SJohn Baldwin { 516e0f66ef8SJohn Baldwin struct intr_handler *ih, *temp_ih; 517e0f66ef8SJohn Baldwin struct intr_thread *it; 518b4151f71SJohn Baldwin 519ef544f63SPaolo Pisati if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 520b4151f71SJohn Baldwin return (EINVAL); 521b4151f71SJohn Baldwin 522e0f66ef8SJohn Baldwin /* Allocate and populate an interrupt handler structure. */ 523e0f66ef8SJohn Baldwin ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 524ef544f63SPaolo Pisati ih->ih_filter = filter; 525b4151f71SJohn Baldwin ih->ih_handler = handler; 526b4151f71SJohn Baldwin ih->ih_argument = arg; 527b4151f71SJohn Baldwin ih->ih_name = name; 528e0f66ef8SJohn Baldwin ih->ih_event = ie; 529b4151f71SJohn Baldwin ih->ih_pri = pri; 530ef544f63SPaolo Pisati if (flags & INTR_EXCL) 531b4151f71SJohn Baldwin ih->ih_flags = IH_EXCLUSIVE; 532b4151f71SJohn Baldwin if (flags & INTR_MPSAFE) 533b4151f71SJohn Baldwin ih->ih_flags |= IH_MPSAFE; 534b4151f71SJohn Baldwin if (flags & INTR_ENTROPY) 535b4151f71SJohn Baldwin ih->ih_flags |= IH_ENTROPY; 536b4151f71SJohn Baldwin 537e0f66ef8SJohn Baldwin /* We can only have one exclusive handler in a event. */ 538e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 539e0f66ef8SJohn Baldwin if (!TAILQ_EMPTY(&ie->ie_handlers)) { 540e0f66ef8SJohn Baldwin if ((flags & INTR_EXCL) || 541e0f66ef8SJohn Baldwin (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 542e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 543b4151f71SJohn Baldwin free(ih, M_ITHREAD); 544b4151f71SJohn Baldwin return (EINVAL); 545b4151f71SJohn Baldwin } 546e0f66ef8SJohn Baldwin } 547e0f66ef8SJohn Baldwin 548e0f66ef8SJohn Baldwin /* Add the new handler to the event in priority order. */ 549e0f66ef8SJohn Baldwin TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 550e0f66ef8SJohn Baldwin if (temp_ih->ih_pri > ih->ih_pri) 551e0f66ef8SJohn Baldwin break; 552e0f66ef8SJohn Baldwin } 553e0f66ef8SJohn Baldwin if (temp_ih == NULL) 554e0f66ef8SJohn Baldwin TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 555e0f66ef8SJohn Baldwin else 556e0f66ef8SJohn Baldwin TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 557e0f66ef8SJohn Baldwin intr_event_update(ie); 558e0f66ef8SJohn Baldwin 559e0f66ef8SJohn Baldwin /* Create a thread if we need one. */ 560ef544f63SPaolo Pisati while (ie->ie_thread == NULL && handler != NULL) { 561e0f66ef8SJohn Baldwin if (ie->ie_flags & IE_ADDING_THREAD) 5620f180a7cSJohn Baldwin msleep(ie, &ie->ie_lock, 0, "ithread", 0); 563e0f66ef8SJohn Baldwin else { 564e0f66ef8SJohn Baldwin ie->ie_flags |= IE_ADDING_THREAD; 565e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 566e0f66ef8SJohn Baldwin it = ithread_create("intr: newborn"); 567e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 568e0f66ef8SJohn Baldwin ie->ie_flags &= ~IE_ADDING_THREAD; 569e0f66ef8SJohn Baldwin ie->ie_thread = it; 570e0f66ef8SJohn Baldwin it->it_event = ie; 571e0f66ef8SJohn Baldwin ithread_update(it); 572e0f66ef8SJohn Baldwin wakeup(ie); 573e0f66ef8SJohn Baldwin } 574e0f66ef8SJohn Baldwin } 575e0f66ef8SJohn Baldwin CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 576e0f66ef8SJohn Baldwin ie->ie_name); 577e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 578e0f66ef8SJohn Baldwin 579e0f66ef8SJohn Baldwin if (cookiep != NULL) 580e0f66ef8SJohn Baldwin *cookiep = ih; 581e0f66ef8SJohn Baldwin return (0); 582e0f66ef8SJohn Baldwin } 583bafe5a31SPaolo Pisati #else 584bafe5a31SPaolo Pisati int 585bafe5a31SPaolo Pisati intr_event_add_handler(struct intr_event *ie, const char *name, 586bafe5a31SPaolo Pisati driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 587bafe5a31SPaolo Pisati enum intr_type flags, void **cookiep) 588bafe5a31SPaolo Pisati { 589bafe5a31SPaolo Pisati struct intr_handler *ih, *temp_ih; 590bafe5a31SPaolo Pisati struct intr_thread *it; 591bafe5a31SPaolo Pisati 592bafe5a31SPaolo Pisati if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 593bafe5a31SPaolo Pisati return (EINVAL); 594bafe5a31SPaolo Pisati 595bafe5a31SPaolo Pisati /* Allocate and populate an interrupt handler structure. */ 596bafe5a31SPaolo Pisati ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 597bafe5a31SPaolo Pisati ih->ih_filter = filter; 598bafe5a31SPaolo Pisati ih->ih_handler = handler; 599bafe5a31SPaolo Pisati ih->ih_argument = arg; 600bafe5a31SPaolo Pisati ih->ih_name = name; 601bafe5a31SPaolo Pisati ih->ih_event = ie; 602bafe5a31SPaolo Pisati ih->ih_pri = pri; 603bafe5a31SPaolo Pisati if (flags & INTR_EXCL) 604bafe5a31SPaolo Pisati ih->ih_flags = IH_EXCLUSIVE; 605bafe5a31SPaolo Pisati if (flags & INTR_MPSAFE) 606bafe5a31SPaolo Pisati ih->ih_flags |= IH_MPSAFE; 607bafe5a31SPaolo Pisati if (flags & INTR_ENTROPY) 608bafe5a31SPaolo Pisati ih->ih_flags |= IH_ENTROPY; 609bafe5a31SPaolo Pisati 610bafe5a31SPaolo Pisati /* We can only have one exclusive handler in a event. */ 611bafe5a31SPaolo Pisati mtx_lock(&ie->ie_lock); 612bafe5a31SPaolo Pisati if (!TAILQ_EMPTY(&ie->ie_handlers)) { 613bafe5a31SPaolo Pisati if ((flags & INTR_EXCL) || 614bafe5a31SPaolo Pisati (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 615bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 616bafe5a31SPaolo Pisati free(ih, M_ITHREAD); 617bafe5a31SPaolo Pisati return (EINVAL); 618bafe5a31SPaolo Pisati } 619bafe5a31SPaolo Pisati } 620bafe5a31SPaolo Pisati 621bafe5a31SPaolo Pisati /* Add the new handler to the event in priority order. */ 622bafe5a31SPaolo Pisati TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 623bafe5a31SPaolo Pisati if (temp_ih->ih_pri > ih->ih_pri) 624bafe5a31SPaolo Pisati break; 625bafe5a31SPaolo Pisati } 626bafe5a31SPaolo Pisati if (temp_ih == NULL) 627bafe5a31SPaolo Pisati TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 628bafe5a31SPaolo Pisati else 629bafe5a31SPaolo Pisati TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 630bafe5a31SPaolo Pisati intr_event_update(ie); 631bafe5a31SPaolo Pisati 632bafe5a31SPaolo Pisati /* For filtered handlers, create a private ithread to run on. */ 633bafe5a31SPaolo Pisati if (filter != NULL && handler != NULL) { 634bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 635bafe5a31SPaolo Pisati it = ithread_create("intr: newborn", ih); 636bafe5a31SPaolo Pisati mtx_lock(&ie->ie_lock); 637bafe5a31SPaolo Pisati it->it_event = ie; 638bafe5a31SPaolo Pisati ih->ih_thread = it; 639bafe5a31SPaolo Pisati ithread_update(it); // XXX - do we really need this?!?!? 640bafe5a31SPaolo Pisati } else { /* Create the global per-event thread if we need one. */ 641bafe5a31SPaolo Pisati while (ie->ie_thread == NULL && handler != NULL) { 642bafe5a31SPaolo Pisati if (ie->ie_flags & IE_ADDING_THREAD) 643bafe5a31SPaolo Pisati msleep(ie, &ie->ie_lock, 0, "ithread", 0); 644bafe5a31SPaolo Pisati else { 645bafe5a31SPaolo Pisati ie->ie_flags |= IE_ADDING_THREAD; 646bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 647bafe5a31SPaolo Pisati it = ithread_create("intr: newborn", ih); 648bafe5a31SPaolo Pisati mtx_lock(&ie->ie_lock); 649bafe5a31SPaolo Pisati ie->ie_flags &= ~IE_ADDING_THREAD; 650bafe5a31SPaolo Pisati ie->ie_thread = it; 651bafe5a31SPaolo Pisati it->it_event = ie; 652bafe5a31SPaolo Pisati ithread_update(it); 653bafe5a31SPaolo Pisati wakeup(ie); 654bafe5a31SPaolo Pisati } 655bafe5a31SPaolo Pisati } 656bafe5a31SPaolo Pisati } 657bafe5a31SPaolo Pisati CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 658bafe5a31SPaolo Pisati ie->ie_name); 659bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 660bafe5a31SPaolo Pisati 661bafe5a31SPaolo Pisati if (cookiep != NULL) 662bafe5a31SPaolo Pisati *cookiep = ih; 663bafe5a31SPaolo Pisati return (0); 664bafe5a31SPaolo Pisati } 665bafe5a31SPaolo Pisati #endif 666b4151f71SJohn Baldwin 667c3045318SJohn Baldwin /* 668c3045318SJohn Baldwin * Return the ie_source field from the intr_event an intr_handler is 669c3045318SJohn Baldwin * associated with. 670c3045318SJohn Baldwin */ 671c3045318SJohn Baldwin void * 672c3045318SJohn Baldwin intr_handler_source(void *cookie) 673c3045318SJohn Baldwin { 674c3045318SJohn Baldwin struct intr_handler *ih; 675c3045318SJohn Baldwin struct intr_event *ie; 676c3045318SJohn Baldwin 677c3045318SJohn Baldwin ih = (struct intr_handler *)cookie; 678c3045318SJohn Baldwin if (ih == NULL) 679c3045318SJohn Baldwin return (NULL); 680c3045318SJohn Baldwin ie = ih->ih_event; 681c3045318SJohn Baldwin KASSERT(ie != NULL, 682c3045318SJohn Baldwin ("interrupt handler \"%s\" has a NULL interrupt event", 683c3045318SJohn Baldwin ih->ih_name)); 684c3045318SJohn Baldwin return (ie->ie_source); 685c3045318SJohn Baldwin } 686c3045318SJohn Baldwin 687bafe5a31SPaolo Pisati #ifndef INTR_FILTER 688b4151f71SJohn Baldwin int 689e0f66ef8SJohn Baldwin intr_event_remove_handler(void *cookie) 690b4151f71SJohn Baldwin { 691e0f66ef8SJohn Baldwin struct intr_handler *handler = (struct intr_handler *)cookie; 692e0f66ef8SJohn Baldwin struct intr_event *ie; 693b4151f71SJohn Baldwin #ifdef INVARIANTS 694e0f66ef8SJohn Baldwin struct intr_handler *ih; 695e0f66ef8SJohn Baldwin #endif 696e0f66ef8SJohn Baldwin #ifdef notyet 697e0f66ef8SJohn Baldwin int dead; 698b4151f71SJohn Baldwin #endif 699b4151f71SJohn Baldwin 7003e5da754SJohn Baldwin if (handler == NULL) 701b4151f71SJohn Baldwin return (EINVAL); 702e0f66ef8SJohn Baldwin ie = handler->ih_event; 703e0f66ef8SJohn Baldwin KASSERT(ie != NULL, 704e0f66ef8SJohn Baldwin ("interrupt handler \"%s\" has a NULL interrupt event", 7053e5da754SJohn Baldwin handler->ih_name)); 706e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 70791f91617SDavid E. O'Brien CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 708e0f66ef8SJohn Baldwin ie->ie_name); 709b4151f71SJohn Baldwin #ifdef INVARIANTS 710e0f66ef8SJohn Baldwin TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 7113e5da754SJohn Baldwin if (ih == handler) 7123e5da754SJohn Baldwin goto ok; 713e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 714e0f66ef8SJohn Baldwin panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 715e0f66ef8SJohn Baldwin ih->ih_name, ie->ie_name); 7163e5da754SJohn Baldwin ok: 717b4151f71SJohn Baldwin #endif 718de271f01SJohn Baldwin /* 719e0f66ef8SJohn Baldwin * If there is no ithread, then just remove the handler and return. 720e0f66ef8SJohn Baldwin * XXX: Note that an INTR_FAST handler might be running on another 721e0f66ef8SJohn Baldwin * CPU! 722e0f66ef8SJohn Baldwin */ 723e0f66ef8SJohn Baldwin if (ie->ie_thread == NULL) { 724e0f66ef8SJohn Baldwin TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 725e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 726e0f66ef8SJohn Baldwin free(handler, M_ITHREAD); 727e0f66ef8SJohn Baldwin return (0); 728e0f66ef8SJohn Baldwin } 729e0f66ef8SJohn Baldwin 730e0f66ef8SJohn Baldwin /* 731de271f01SJohn Baldwin * If the interrupt thread is already running, then just mark this 732de271f01SJohn Baldwin * handler as being dead and let the ithread do the actual removal. 733288e351bSDon Lewis * 734288e351bSDon Lewis * During a cold boot while cold is set, msleep() does not sleep, 735288e351bSDon Lewis * so we have to remove the handler here rather than letting the 736288e351bSDon Lewis * thread do it. 737de271f01SJohn Baldwin */ 738982d11f8SJeff Roberson thread_lock(ie->ie_thread->it_thread); 739e0f66ef8SJohn Baldwin if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) { 740de271f01SJohn Baldwin handler->ih_flags |= IH_DEAD; 741de271f01SJohn Baldwin 742de271f01SJohn Baldwin /* 743de271f01SJohn Baldwin * Ensure that the thread will process the handler list 744de271f01SJohn Baldwin * again and remove this handler if it has already passed 745de271f01SJohn Baldwin * it on the list. 746de271f01SJohn Baldwin */ 747e0f66ef8SJohn Baldwin ie->ie_thread->it_need = 1; 7484d29cb2dSJohn Baldwin } else 749e0f66ef8SJohn Baldwin TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 750982d11f8SJeff Roberson thread_unlock(ie->ie_thread->it_thread); 751e0f66ef8SJohn Baldwin while (handler->ih_flags & IH_DEAD) 7520f180a7cSJohn Baldwin msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 753e0f66ef8SJohn Baldwin intr_event_update(ie); 754e0f66ef8SJohn Baldwin #ifdef notyet 755e0f66ef8SJohn Baldwin /* 756e0f66ef8SJohn Baldwin * XXX: This could be bad in the case of ppbus(8). Also, I think 757e0f66ef8SJohn Baldwin * this could lead to races of stale data when servicing an 758e0f66ef8SJohn Baldwin * interrupt. 759e0f66ef8SJohn Baldwin */ 760e0f66ef8SJohn Baldwin dead = 1; 761e0f66ef8SJohn Baldwin TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 762e0f66ef8SJohn Baldwin if (!(ih->ih_flags & IH_FAST)) { 763e0f66ef8SJohn Baldwin dead = 0; 764e0f66ef8SJohn Baldwin break; 765e0f66ef8SJohn Baldwin } 766e0f66ef8SJohn Baldwin } 767e0f66ef8SJohn Baldwin if (dead) { 768e0f66ef8SJohn Baldwin ithread_destroy(ie->ie_thread); 769e0f66ef8SJohn Baldwin ie->ie_thread = NULL; 770e0f66ef8SJohn Baldwin } 771e0f66ef8SJohn Baldwin #endif 772e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 773b4151f71SJohn Baldwin free(handler, M_ITHREAD); 774b4151f71SJohn Baldwin return (0); 775b4151f71SJohn Baldwin } 776b4151f71SJohn Baldwin 7771ee1b687SJohn Baldwin static int 778e0f66ef8SJohn Baldwin intr_event_schedule_thread(struct intr_event *ie) 7793e5da754SJohn Baldwin { 780e0f66ef8SJohn Baldwin struct intr_entropy entropy; 781e0f66ef8SJohn Baldwin struct intr_thread *it; 782b40ce416SJulian Elischer struct thread *td; 78304774f23SJulian Elischer struct thread *ctd; 7843e5da754SJohn Baldwin struct proc *p; 7853e5da754SJohn Baldwin 7863e5da754SJohn Baldwin /* 7873e5da754SJohn Baldwin * If no ithread or no handlers, then we have a stray interrupt. 7883e5da754SJohn Baldwin */ 789e0f66ef8SJohn Baldwin if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || 790e0f66ef8SJohn Baldwin ie->ie_thread == NULL) 7913e5da754SJohn Baldwin return (EINVAL); 7923e5da754SJohn Baldwin 79304774f23SJulian Elischer ctd = curthread; 794e0f66ef8SJohn Baldwin it = ie->ie_thread; 795e0f66ef8SJohn Baldwin td = it->it_thread; 7966f40c417SRobert Watson p = td->td_proc; 797e0f66ef8SJohn Baldwin 7983e5da754SJohn Baldwin /* 7993e5da754SJohn Baldwin * If any of the handlers for this ithread claim to be good 8003e5da754SJohn Baldwin * sources of entropy, then gather some. 8013e5da754SJohn Baldwin */ 802e0f66ef8SJohn Baldwin if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 8036f40c417SRobert Watson CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 8047ab24ea3SJulian Elischer p->p_pid, td->td_name); 805e0f66ef8SJohn Baldwin entropy.event = (uintptr_t)ie; 806e0f66ef8SJohn Baldwin entropy.td = ctd; 8073e5da754SJohn Baldwin random_harvest(&entropy, sizeof(entropy), 2, 0, 8083e5da754SJohn Baldwin RANDOM_INTERRUPT); 8093e5da754SJohn Baldwin } 8103e5da754SJohn Baldwin 811e0f66ef8SJohn Baldwin KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 8123e5da754SJohn Baldwin 8133e5da754SJohn Baldwin /* 8143e5da754SJohn Baldwin * Set it_need to tell the thread to keep running if it is already 815982d11f8SJeff Roberson * running. Then, lock the thread and see if we actually need to 816982d11f8SJeff Roberson * put it on the runqueue. 8173e5da754SJohn Baldwin */ 818e0f66ef8SJohn Baldwin it->it_need = 1; 819982d11f8SJeff Roberson thread_lock(td); 82071fad9fdSJulian Elischer if (TD_AWAITING_INTR(td)) { 821e0f66ef8SJohn Baldwin CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 8227ab24ea3SJulian Elischer td->td_name); 82371fad9fdSJulian Elischer TD_CLR_IWAIT(td); 824f0393f06SJeff Roberson sched_add(td, SRQ_INTR); 8253e5da754SJohn Baldwin } else { 826e0f66ef8SJohn Baldwin CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 8277ab24ea3SJulian Elischer __func__, p->p_pid, td->td_name, it->it_need, td->td_state); 8283e5da754SJohn Baldwin } 829982d11f8SJeff Roberson thread_unlock(td); 8303e5da754SJohn Baldwin 8313e5da754SJohn Baldwin return (0); 8323e5da754SJohn Baldwin } 833bafe5a31SPaolo Pisati #else 834bafe5a31SPaolo Pisati int 835bafe5a31SPaolo Pisati intr_event_remove_handler(void *cookie) 836bafe5a31SPaolo Pisati { 837bafe5a31SPaolo Pisati struct intr_handler *handler = (struct intr_handler *)cookie; 838bafe5a31SPaolo Pisati struct intr_event *ie; 839bafe5a31SPaolo Pisati struct intr_thread *it; 840bafe5a31SPaolo Pisati #ifdef INVARIANTS 841bafe5a31SPaolo Pisati struct intr_handler *ih; 842bafe5a31SPaolo Pisati #endif 843bafe5a31SPaolo Pisati #ifdef notyet 844bafe5a31SPaolo Pisati int dead; 845bafe5a31SPaolo Pisati #endif 846bafe5a31SPaolo Pisati 847bafe5a31SPaolo Pisati if (handler == NULL) 848bafe5a31SPaolo Pisati return (EINVAL); 849bafe5a31SPaolo Pisati ie = handler->ih_event; 850bafe5a31SPaolo Pisati KASSERT(ie != NULL, 851bafe5a31SPaolo Pisati ("interrupt handler \"%s\" has a NULL interrupt event", 852bafe5a31SPaolo Pisati handler->ih_name)); 853bafe5a31SPaolo Pisati mtx_lock(&ie->ie_lock); 854bafe5a31SPaolo Pisati CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 855bafe5a31SPaolo Pisati ie->ie_name); 856bafe5a31SPaolo Pisati #ifdef INVARIANTS 857bafe5a31SPaolo Pisati TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 858bafe5a31SPaolo Pisati if (ih == handler) 859bafe5a31SPaolo Pisati goto ok; 860bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 861bafe5a31SPaolo Pisati panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 862bafe5a31SPaolo Pisati ih->ih_name, ie->ie_name); 863bafe5a31SPaolo Pisati ok: 864bafe5a31SPaolo Pisati #endif 865bafe5a31SPaolo Pisati /* 866bafe5a31SPaolo Pisati * If there are no ithreads (per event and per handler), then 867bafe5a31SPaolo Pisati * just remove the handler and return. 868bafe5a31SPaolo Pisati * XXX: Note that an INTR_FAST handler might be running on another CPU! 869bafe5a31SPaolo Pisati */ 870bafe5a31SPaolo Pisati if (ie->ie_thread == NULL && handler->ih_thread == NULL) { 871bafe5a31SPaolo Pisati TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 872bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 873bafe5a31SPaolo Pisati free(handler, M_ITHREAD); 874bafe5a31SPaolo Pisati return (0); 875bafe5a31SPaolo Pisati } 876bafe5a31SPaolo Pisati 877bafe5a31SPaolo Pisati /* Private or global ithread? */ 878bafe5a31SPaolo Pisati it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread; 879bafe5a31SPaolo Pisati /* 880bafe5a31SPaolo Pisati * If the interrupt thread is already running, then just mark this 881bafe5a31SPaolo Pisati * handler as being dead and let the ithread do the actual removal. 882bafe5a31SPaolo Pisati * 883bafe5a31SPaolo Pisati * During a cold boot while cold is set, msleep() does not sleep, 884bafe5a31SPaolo Pisati * so we have to remove the handler here rather than letting the 885bafe5a31SPaolo Pisati * thread do it. 886bafe5a31SPaolo Pisati */ 887982d11f8SJeff Roberson thread_lock(it->it_thread); 888bafe5a31SPaolo Pisati if (!TD_AWAITING_INTR(it->it_thread) && !cold) { 889bafe5a31SPaolo Pisati handler->ih_flags |= IH_DEAD; 890bafe5a31SPaolo Pisati 891bafe5a31SPaolo Pisati /* 892bafe5a31SPaolo Pisati * Ensure that the thread will process the handler list 893bafe5a31SPaolo Pisati * again and remove this handler if it has already passed 894bafe5a31SPaolo Pisati * it on the list. 895bafe5a31SPaolo Pisati */ 896bafe5a31SPaolo Pisati it->it_need = 1; 897bafe5a31SPaolo Pisati } else 898bafe5a31SPaolo Pisati TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 899982d11f8SJeff Roberson thread_unlock(it->it_thread); 900bafe5a31SPaolo Pisati while (handler->ih_flags & IH_DEAD) 901bafe5a31SPaolo Pisati msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 902bafe5a31SPaolo Pisati /* 903bafe5a31SPaolo Pisati * At this point, the handler has been disconnected from the event, 904bafe5a31SPaolo Pisati * so we can kill the private ithread if any. 905bafe5a31SPaolo Pisati */ 906bafe5a31SPaolo Pisati if (handler->ih_thread) { 907bafe5a31SPaolo Pisati ithread_destroy(handler->ih_thread); 908bafe5a31SPaolo Pisati handler->ih_thread = NULL; 909bafe5a31SPaolo Pisati } 910bafe5a31SPaolo Pisati intr_event_update(ie); 911bafe5a31SPaolo Pisati #ifdef notyet 912bafe5a31SPaolo Pisati /* 913bafe5a31SPaolo Pisati * XXX: This could be bad in the case of ppbus(8). Also, I think 914bafe5a31SPaolo Pisati * this could lead to races of stale data when servicing an 915bafe5a31SPaolo Pisati * interrupt. 916bafe5a31SPaolo Pisati */ 917bafe5a31SPaolo Pisati dead = 1; 918bafe5a31SPaolo Pisati TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 919bafe5a31SPaolo Pisati if (handler != NULL) { 920bafe5a31SPaolo Pisati dead = 0; 921bafe5a31SPaolo Pisati break; 922bafe5a31SPaolo Pisati } 923bafe5a31SPaolo Pisati } 924bafe5a31SPaolo Pisati if (dead) { 925bafe5a31SPaolo Pisati ithread_destroy(ie->ie_thread); 926bafe5a31SPaolo Pisati ie->ie_thread = NULL; 927bafe5a31SPaolo Pisati } 928bafe5a31SPaolo Pisati #endif 929bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 930bafe5a31SPaolo Pisati free(handler, M_ITHREAD); 931bafe5a31SPaolo Pisati return (0); 932bafe5a31SPaolo Pisati } 933bafe5a31SPaolo Pisati 9341ee1b687SJohn Baldwin static int 935bafe5a31SPaolo Pisati intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it) 936bafe5a31SPaolo Pisati { 937bafe5a31SPaolo Pisati struct intr_entropy entropy; 938bafe5a31SPaolo Pisati struct thread *td; 939bafe5a31SPaolo Pisati struct thread *ctd; 940bafe5a31SPaolo Pisati struct proc *p; 941bafe5a31SPaolo Pisati 942bafe5a31SPaolo Pisati /* 943bafe5a31SPaolo Pisati * If no ithread or no handlers, then we have a stray interrupt. 944bafe5a31SPaolo Pisati */ 945bafe5a31SPaolo Pisati if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL) 946bafe5a31SPaolo Pisati return (EINVAL); 947bafe5a31SPaolo Pisati 948bafe5a31SPaolo Pisati ctd = curthread; 949bafe5a31SPaolo Pisati td = it->it_thread; 950bafe5a31SPaolo Pisati p = td->td_proc; 951bafe5a31SPaolo Pisati 952bafe5a31SPaolo Pisati /* 953bafe5a31SPaolo Pisati * If any of the handlers for this ithread claim to be good 954bafe5a31SPaolo Pisati * sources of entropy, then gather some. 955bafe5a31SPaolo Pisati */ 956bafe5a31SPaolo Pisati if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 957bafe5a31SPaolo Pisati CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 9587ab24ea3SJulian Elischer p->p_pid, td->td_name); 959bafe5a31SPaolo Pisati entropy.event = (uintptr_t)ie; 960bafe5a31SPaolo Pisati entropy.td = ctd; 961bafe5a31SPaolo Pisati random_harvest(&entropy, sizeof(entropy), 2, 0, 962bafe5a31SPaolo Pisati RANDOM_INTERRUPT); 963bafe5a31SPaolo Pisati } 964bafe5a31SPaolo Pisati 965bafe5a31SPaolo Pisati KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 966bafe5a31SPaolo Pisati 967bafe5a31SPaolo Pisati /* 968bafe5a31SPaolo Pisati * Set it_need to tell the thread to keep running if it is already 969982d11f8SJeff Roberson * running. Then, lock the thread and see if we actually need to 970982d11f8SJeff Roberson * put it on the runqueue. 971bafe5a31SPaolo Pisati */ 972bafe5a31SPaolo Pisati it->it_need = 1; 973982d11f8SJeff Roberson thread_lock(td); 974bafe5a31SPaolo Pisati if (TD_AWAITING_INTR(td)) { 975bafe5a31SPaolo Pisati CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 9763c1ffc32SJulian Elischer td->td_name); 977bafe5a31SPaolo Pisati TD_CLR_IWAIT(td); 978bafe5a31SPaolo Pisati sched_add(td, SRQ_INTR); 979bafe5a31SPaolo Pisati } else { 980bafe5a31SPaolo Pisati CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 9817ab24ea3SJulian Elischer __func__, p->p_pid, td->td_name, it->it_need, td->td_state); 982bafe5a31SPaolo Pisati } 983982d11f8SJeff Roberson thread_unlock(td); 984bafe5a31SPaolo Pisati 985bafe5a31SPaolo Pisati return (0); 986bafe5a31SPaolo Pisati } 987bafe5a31SPaolo Pisati #endif 9883e5da754SJohn Baldwin 989fe486a37SJohn Baldwin /* 990e84bcd84SRobert Watson * Allow interrupt event binding for software interrupt handlers -- a no-op, 991e84bcd84SRobert Watson * since interrupts are generated in software rather than being directed by 992e84bcd84SRobert Watson * a PIC. 993e84bcd84SRobert Watson */ 994e84bcd84SRobert Watson static int 995e84bcd84SRobert Watson swi_assign_cpu(void *arg, u_char cpu) 996e84bcd84SRobert Watson { 997e84bcd84SRobert Watson 998e84bcd84SRobert Watson return (0); 999e84bcd84SRobert Watson } 1000e84bcd84SRobert Watson 1001e84bcd84SRobert Watson /* 1002fe486a37SJohn Baldwin * Add a software interrupt handler to a specified event. If a given event 1003fe486a37SJohn Baldwin * is not specified, then a new event is created. 1004fe486a37SJohn Baldwin */ 10053e5da754SJohn Baldwin int 1006e0f66ef8SJohn Baldwin swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, 1007b4151f71SJohn Baldwin void *arg, int pri, enum intr_type flags, void **cookiep) 10088088699fSJohn Baldwin { 1009e0f66ef8SJohn Baldwin struct intr_event *ie; 1010b4151f71SJohn Baldwin int error; 10118088699fSJohn Baldwin 1012bafe5a31SPaolo Pisati if (flags & INTR_ENTROPY) 10133e5da754SJohn Baldwin return (EINVAL); 10143e5da754SJohn Baldwin 1015e0f66ef8SJohn Baldwin ie = (eventp != NULL) ? *eventp : NULL; 10168088699fSJohn Baldwin 1017e0f66ef8SJohn Baldwin if (ie != NULL) { 1018e0f66ef8SJohn Baldwin if (!(ie->ie_flags & IE_SOFT)) 10193e5da754SJohn Baldwin return (EINVAL); 10203e5da754SJohn Baldwin } else { 10219b33b154SJeff Roberson error = intr_event_create(&ie, NULL, IE_SOFT, 0, 1022e84bcd84SRobert Watson NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri); 10238088699fSJohn Baldwin if (error) 1024b4151f71SJohn Baldwin return (error); 1025e0f66ef8SJohn Baldwin if (eventp != NULL) 1026e0f66ef8SJohn Baldwin *eventp = ie; 10278088699fSJohn Baldwin } 10288d809d50SJeff Roberson error = intr_event_add_handler(ie, name, NULL, handler, arg, 10298d809d50SJeff Roberson (pri * RQ_PPQ) + PI_SOFT, flags, cookiep); 10308d809d50SJeff Roberson if (error) 10318d809d50SJeff Roberson return (error); 10328d809d50SJeff Roberson if (pri == SWI_CLOCK) { 10338d809d50SJeff Roberson struct proc *p; 10348d809d50SJeff Roberson p = ie->ie_thread->it_thread->td_proc; 10358d809d50SJeff Roberson PROC_LOCK(p); 10368d809d50SJeff Roberson p->p_flag |= P_NOLOAD; 10378d809d50SJeff Roberson PROC_UNLOCK(p); 10388d809d50SJeff Roberson } 10398d809d50SJeff Roberson return (0); 10408088699fSJohn Baldwin } 10418088699fSJohn Baldwin 10421931cf94SJohn Baldwin /* 1043e0f66ef8SJohn Baldwin * Schedule a software interrupt thread. 10441931cf94SJohn Baldwin */ 10451931cf94SJohn Baldwin void 1046b4151f71SJohn Baldwin swi_sched(void *cookie, int flags) 10471931cf94SJohn Baldwin { 1048e0f66ef8SJohn Baldwin struct intr_handler *ih = (struct intr_handler *)cookie; 1049e0f66ef8SJohn Baldwin struct intr_event *ie = ih->ih_event; 10503e5da754SJohn Baldwin int error; 10518088699fSJohn Baldwin 1052e0f66ef8SJohn Baldwin CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, 1053e0f66ef8SJohn Baldwin ih->ih_need); 10541931cf94SJohn Baldwin 10551931cf94SJohn Baldwin /* 10563e5da754SJohn Baldwin * Set ih_need for this handler so that if the ithread is already 10573e5da754SJohn Baldwin * running it will execute this handler on the next pass. Otherwise, 10583e5da754SJohn Baldwin * it will execute it the next time it runs. 10591931cf94SJohn Baldwin */ 1060b4151f71SJohn Baldwin atomic_store_rel_int(&ih->ih_need, 1); 10611ca2c018SBruce Evans 1062b4151f71SJohn Baldwin if (!(flags & SWI_DELAY)) { 106367596082SAttilio Rao PCPU_INC(cnt.v_soft); 1064bafe5a31SPaolo Pisati #ifdef INTR_FILTER 1065bafe5a31SPaolo Pisati error = intr_event_schedule_thread(ie, ie->ie_thread); 1066bafe5a31SPaolo Pisati #else 1067e0f66ef8SJohn Baldwin error = intr_event_schedule_thread(ie); 1068bafe5a31SPaolo Pisati #endif 10693e5da754SJohn Baldwin KASSERT(error == 0, ("stray software interrupt")); 10708088699fSJohn Baldwin } 10718088699fSJohn Baldwin } 10728088699fSJohn Baldwin 1073fe486a37SJohn Baldwin /* 1074fe486a37SJohn Baldwin * Remove a software interrupt handler. Currently this code does not 1075fe486a37SJohn Baldwin * remove the associated interrupt event if it becomes empty. Calling code 1076fe486a37SJohn Baldwin * may do so manually via intr_event_destroy(), but that's not really 1077fe486a37SJohn Baldwin * an optimal interface. 1078fe486a37SJohn Baldwin */ 1079fe486a37SJohn Baldwin int 1080fe486a37SJohn Baldwin swi_remove(void *cookie) 1081fe486a37SJohn Baldwin { 1082fe486a37SJohn Baldwin 1083fe486a37SJohn Baldwin return (intr_event_remove_handler(cookie)); 1084fe486a37SJohn Baldwin } 1085fe486a37SJohn Baldwin 1086bafe5a31SPaolo Pisati #ifdef INTR_FILTER 1087bafe5a31SPaolo Pisati static void 1088bafe5a31SPaolo Pisati priv_ithread_execute_handler(struct proc *p, struct intr_handler *ih) 1089bafe5a31SPaolo Pisati { 1090bafe5a31SPaolo Pisati struct intr_event *ie; 1091bafe5a31SPaolo Pisati 1092bafe5a31SPaolo Pisati ie = ih->ih_event; 1093bafe5a31SPaolo Pisati /* 1094bafe5a31SPaolo Pisati * If this handler is marked for death, remove it from 1095bafe5a31SPaolo Pisati * the list of handlers and wake up the sleeper. 1096bafe5a31SPaolo Pisati */ 1097bafe5a31SPaolo Pisati if (ih->ih_flags & IH_DEAD) { 1098bafe5a31SPaolo Pisati mtx_lock(&ie->ie_lock); 1099bafe5a31SPaolo Pisati TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 1100bafe5a31SPaolo Pisati ih->ih_flags &= ~IH_DEAD; 1101bafe5a31SPaolo Pisati wakeup(ih); 1102bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 1103bafe5a31SPaolo Pisati return; 1104bafe5a31SPaolo Pisati } 1105bafe5a31SPaolo Pisati 1106bafe5a31SPaolo Pisati /* Execute this handler. */ 1107bafe5a31SPaolo Pisati CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1108bafe5a31SPaolo Pisati __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument, 1109bafe5a31SPaolo Pisati ih->ih_name, ih->ih_flags); 1110bafe5a31SPaolo Pisati 1111bafe5a31SPaolo Pisati if (!(ih->ih_flags & IH_MPSAFE)) 1112bafe5a31SPaolo Pisati mtx_lock(&Giant); 1113bafe5a31SPaolo Pisati ih->ih_handler(ih->ih_argument); 1114bafe5a31SPaolo Pisati if (!(ih->ih_flags & IH_MPSAFE)) 1115bafe5a31SPaolo Pisati mtx_unlock(&Giant); 1116bafe5a31SPaolo Pisati } 1117bafe5a31SPaolo Pisati #endif 1118bafe5a31SPaolo Pisati 111937e9511fSJohn Baldwin /* 112037e9511fSJohn Baldwin * This is a public function for use by drivers that mux interrupt 112137e9511fSJohn Baldwin * handlers for child devices from their interrupt handler. 112237e9511fSJohn Baldwin */ 112337e9511fSJohn Baldwin void 112437e9511fSJohn Baldwin intr_event_execute_handlers(struct proc *p, struct intr_event *ie) 1125e0f66ef8SJohn Baldwin { 1126e0f66ef8SJohn Baldwin struct intr_handler *ih, *ihn; 1127e0f66ef8SJohn Baldwin 1128e0f66ef8SJohn Baldwin TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) { 1129e0f66ef8SJohn Baldwin /* 1130e0f66ef8SJohn Baldwin * If this handler is marked for death, remove it from 1131e0f66ef8SJohn Baldwin * the list of handlers and wake up the sleeper. 1132e0f66ef8SJohn Baldwin */ 1133e0f66ef8SJohn Baldwin if (ih->ih_flags & IH_DEAD) { 1134e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 1135e0f66ef8SJohn Baldwin TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 1136e0f66ef8SJohn Baldwin ih->ih_flags &= ~IH_DEAD; 1137e0f66ef8SJohn Baldwin wakeup(ih); 1138e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 1139e0f66ef8SJohn Baldwin continue; 1140e0f66ef8SJohn Baldwin } 1141e0f66ef8SJohn Baldwin 1142f2d619c8SPaolo Pisati /* Skip filter only handlers */ 1143f2d619c8SPaolo Pisati if (ih->ih_handler == NULL) 1144f2d619c8SPaolo Pisati continue; 1145f2d619c8SPaolo Pisati 1146e0f66ef8SJohn Baldwin /* 1147e0f66ef8SJohn Baldwin * For software interrupt threads, we only execute 1148e0f66ef8SJohn Baldwin * handlers that have their need flag set. Hardware 1149e0f66ef8SJohn Baldwin * interrupt threads always invoke all of their handlers. 1150e0f66ef8SJohn Baldwin */ 1151e0f66ef8SJohn Baldwin if (ie->ie_flags & IE_SOFT) { 1152e0f66ef8SJohn Baldwin if (!ih->ih_need) 1153e0f66ef8SJohn Baldwin continue; 1154e0f66ef8SJohn Baldwin else 1155e0f66ef8SJohn Baldwin atomic_store_rel_int(&ih->ih_need, 0); 1156e0f66ef8SJohn Baldwin } 1157e0f66ef8SJohn Baldwin 1158e0f66ef8SJohn Baldwin /* Execute this handler. */ 1159e0f66ef8SJohn Baldwin CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1160bafe5a31SPaolo Pisati __func__, p->p_pid, (void *)ih->ih_handler, 1161bafe5a31SPaolo Pisati ih->ih_argument, ih->ih_name, ih->ih_flags); 1162e0f66ef8SJohn Baldwin 1163e0f66ef8SJohn Baldwin if (!(ih->ih_flags & IH_MPSAFE)) 1164e0f66ef8SJohn Baldwin mtx_lock(&Giant); 1165e0f66ef8SJohn Baldwin ih->ih_handler(ih->ih_argument); 1166e0f66ef8SJohn Baldwin if (!(ih->ih_flags & IH_MPSAFE)) 1167e0f66ef8SJohn Baldwin mtx_unlock(&Giant); 1168e0f66ef8SJohn Baldwin } 116937e9511fSJohn Baldwin } 117037e9511fSJohn Baldwin 117137e9511fSJohn Baldwin static void 117237e9511fSJohn Baldwin ithread_execute_handlers(struct proc *p, struct intr_event *ie) 117337e9511fSJohn Baldwin { 117437e9511fSJohn Baldwin 117537e9511fSJohn Baldwin /* Interrupt handlers should not sleep. */ 117637e9511fSJohn Baldwin if (!(ie->ie_flags & IE_SOFT)) 117737e9511fSJohn Baldwin THREAD_NO_SLEEPING(); 117837e9511fSJohn Baldwin intr_event_execute_handlers(p, ie); 1179e0f66ef8SJohn Baldwin if (!(ie->ie_flags & IE_SOFT)) 1180e0f66ef8SJohn Baldwin THREAD_SLEEPING_OK(); 1181e0f66ef8SJohn Baldwin 1182e0f66ef8SJohn Baldwin /* 1183e0f66ef8SJohn Baldwin * Interrupt storm handling: 1184e0f66ef8SJohn Baldwin * 1185e0f66ef8SJohn Baldwin * If this interrupt source is currently storming, then throttle 1186e0f66ef8SJohn Baldwin * it to only fire the handler once per clock tick. 1187e0f66ef8SJohn Baldwin * 1188e0f66ef8SJohn Baldwin * If this interrupt source is not currently storming, but the 1189e0f66ef8SJohn Baldwin * number of back to back interrupts exceeds the storm threshold, 1190e0f66ef8SJohn Baldwin * then enter storming mode. 1191e0f66ef8SJohn Baldwin */ 1192e41bcf3cSJohn Baldwin if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold && 1193e41bcf3cSJohn Baldwin !(ie->ie_flags & IE_SOFT)) { 11940ae62c18SNate Lawson /* Report the message only once every second. */ 11950ae62c18SNate Lawson if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) { 1196e0f66ef8SJohn Baldwin printf( 11970ae62c18SNate Lawson "interrupt storm detected on \"%s\"; throttling interrupt source\n", 1198e0f66ef8SJohn Baldwin ie->ie_name); 1199e0f66ef8SJohn Baldwin } 1200e41bcf3cSJohn Baldwin pause("istorm", 1); 1201e0f66ef8SJohn Baldwin } else 1202e0f66ef8SJohn Baldwin ie->ie_count++; 1203e0f66ef8SJohn Baldwin 1204e0f66ef8SJohn Baldwin /* 1205e0f66ef8SJohn Baldwin * Now that all the handlers have had a chance to run, reenable 1206e0f66ef8SJohn Baldwin * the interrupt source. 1207e0f66ef8SJohn Baldwin */ 12081ee1b687SJohn Baldwin if (ie->ie_post_ithread != NULL) 12091ee1b687SJohn Baldwin ie->ie_post_ithread(ie->ie_source); 1210e0f66ef8SJohn Baldwin } 1211e0f66ef8SJohn Baldwin 1212bafe5a31SPaolo Pisati #ifndef INTR_FILTER 12138088699fSJohn Baldwin /* 1214b4151f71SJohn Baldwin * This is the main code for interrupt threads. 12158088699fSJohn Baldwin */ 121637c84183SPoul-Henning Kamp static void 1217b4151f71SJohn Baldwin ithread_loop(void *arg) 12188088699fSJohn Baldwin { 1219e0f66ef8SJohn Baldwin struct intr_thread *ithd; 1220e0f66ef8SJohn Baldwin struct intr_event *ie; 1221b40ce416SJulian Elischer struct thread *td; 1222b4151f71SJohn Baldwin struct proc *p; 12238088699fSJohn Baldwin 1224b40ce416SJulian Elischer td = curthread; 1225b40ce416SJulian Elischer p = td->td_proc; 1226e0f66ef8SJohn Baldwin ithd = (struct intr_thread *)arg; 1227e0f66ef8SJohn Baldwin KASSERT(ithd->it_thread == td, 122891f91617SDavid E. O'Brien ("%s: ithread and proc linkage out of sync", __func__)); 1229e0f66ef8SJohn Baldwin ie = ithd->it_event; 1230e0f66ef8SJohn Baldwin ie->ie_count = 0; 12318088699fSJohn Baldwin 12328088699fSJohn Baldwin /* 12338088699fSJohn Baldwin * As long as we have interrupts outstanding, go through the 12348088699fSJohn Baldwin * list of handlers, giving each one a go at it. 12358088699fSJohn Baldwin */ 12368088699fSJohn Baldwin for (;;) { 1237b4151f71SJohn Baldwin /* 1238b4151f71SJohn Baldwin * If we are an orphaned thread, then just die. 1239b4151f71SJohn Baldwin */ 1240b4151f71SJohn Baldwin if (ithd->it_flags & IT_DEAD) { 1241e0f66ef8SJohn Baldwin CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 12427ab24ea3SJulian Elischer p->p_pid, td->td_name); 1243b4151f71SJohn Baldwin free(ithd, M_ITHREAD); 1244ca9a0ddfSJulian Elischer kthread_exit(); 1245b4151f71SJohn Baldwin } 1246b4151f71SJohn Baldwin 1247e0f66ef8SJohn Baldwin /* 1248e0f66ef8SJohn Baldwin * Service interrupts. If another interrupt arrives while 1249e0f66ef8SJohn Baldwin * we are running, it will set it_need to note that we 1250e0f66ef8SJohn Baldwin * should make another pass. 1251e0f66ef8SJohn Baldwin */ 1252b4151f71SJohn Baldwin while (ithd->it_need) { 12538088699fSJohn Baldwin /* 1254e0f66ef8SJohn Baldwin * This might need a full read and write barrier 1255e0f66ef8SJohn Baldwin * to make sure that this write posts before any 1256e0f66ef8SJohn Baldwin * of the memory or device accesses in the 1257e0f66ef8SJohn Baldwin * handlers. 12588088699fSJohn Baldwin */ 1259b4151f71SJohn Baldwin atomic_store_rel_int(&ithd->it_need, 0); 1260e0f66ef8SJohn Baldwin ithread_execute_handlers(p, ie); 12618088699fSJohn Baldwin } 12627870c3c6SJohn Baldwin WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 12637870c3c6SJohn Baldwin mtx_assert(&Giant, MA_NOTOWNED); 12648088699fSJohn Baldwin 12658088699fSJohn Baldwin /* 12668088699fSJohn Baldwin * Processed all our interrupts. Now get the sched 12678088699fSJohn Baldwin * lock. This may take a while and it_need may get 12688088699fSJohn Baldwin * set again, so we have to check it again. 12698088699fSJohn Baldwin */ 1270982d11f8SJeff Roberson thread_lock(td); 1271e0f66ef8SJohn Baldwin if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { 12727870c3c6SJohn Baldwin TD_SET_IWAIT(td); 1273e0f66ef8SJohn Baldwin ie->ie_count = 0; 12748df78c41SJeff Roberson mi_switch(SW_VOL | SWT_IWAIT, NULL); 12758088699fSJohn Baldwin } 1276982d11f8SJeff Roberson thread_unlock(td); 12778088699fSJohn Baldwin } 12781931cf94SJohn Baldwin } 12791ee1b687SJohn Baldwin 12801ee1b687SJohn Baldwin /* 12811ee1b687SJohn Baldwin * Main interrupt handling body. 12821ee1b687SJohn Baldwin * 12831ee1b687SJohn Baldwin * Input: 12841ee1b687SJohn Baldwin * o ie: the event connected to this interrupt. 12851ee1b687SJohn Baldwin * o frame: some archs (i.e. i386) pass a frame to some. 12861ee1b687SJohn Baldwin * handlers as their main argument. 12871ee1b687SJohn Baldwin * Return value: 12881ee1b687SJohn Baldwin * o 0: everything ok. 12891ee1b687SJohn Baldwin * o EINVAL: stray interrupt. 12901ee1b687SJohn Baldwin */ 12911ee1b687SJohn Baldwin int 12921ee1b687SJohn Baldwin intr_event_handle(struct intr_event *ie, struct trapframe *frame) 12931ee1b687SJohn Baldwin { 12941ee1b687SJohn Baldwin struct intr_handler *ih; 12951ee1b687SJohn Baldwin struct thread *td; 12961ee1b687SJohn Baldwin int error, ret, thread; 12971ee1b687SJohn Baldwin 12981ee1b687SJohn Baldwin td = curthread; 12991ee1b687SJohn Baldwin 13001ee1b687SJohn Baldwin /* An interrupt with no event or handlers is a stray interrupt. */ 13011ee1b687SJohn Baldwin if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) 13021ee1b687SJohn Baldwin return (EINVAL); 13031ee1b687SJohn Baldwin 13041ee1b687SJohn Baldwin /* 13051ee1b687SJohn Baldwin * Execute fast interrupt handlers directly. 13061ee1b687SJohn Baldwin * To support clock handlers, if a handler registers 13071ee1b687SJohn Baldwin * with a NULL argument, then we pass it a pointer to 13081ee1b687SJohn Baldwin * a trapframe as its argument. 13091ee1b687SJohn Baldwin */ 13101ee1b687SJohn Baldwin td->td_intr_nesting_level++; 13111ee1b687SJohn Baldwin thread = 0; 13121ee1b687SJohn Baldwin ret = 0; 13131ee1b687SJohn Baldwin critical_enter(); 13141ee1b687SJohn Baldwin TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 13151ee1b687SJohn Baldwin if (ih->ih_filter == NULL) { 13161ee1b687SJohn Baldwin thread = 1; 13171ee1b687SJohn Baldwin continue; 13181ee1b687SJohn Baldwin } 13191ee1b687SJohn Baldwin CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__, 13201ee1b687SJohn Baldwin ih->ih_filter, ih->ih_argument == NULL ? frame : 13211ee1b687SJohn Baldwin ih->ih_argument, ih->ih_name); 13221ee1b687SJohn Baldwin if (ih->ih_argument == NULL) 13231ee1b687SJohn Baldwin ret = ih->ih_filter(frame); 13241ee1b687SJohn Baldwin else 13251ee1b687SJohn Baldwin ret = ih->ih_filter(ih->ih_argument); 13261ee1b687SJohn Baldwin /* 13271ee1b687SJohn Baldwin * Wrapper handler special handling: 13281ee1b687SJohn Baldwin * 13291ee1b687SJohn Baldwin * in some particular cases (like pccard and pccbb), 13301ee1b687SJohn Baldwin * the _real_ device handler is wrapped in a couple of 13311ee1b687SJohn Baldwin * functions - a filter wrapper and an ithread wrapper. 13321ee1b687SJohn Baldwin * In this case (and just in this case), the filter wrapper 13331ee1b687SJohn Baldwin * could ask the system to schedule the ithread and mask 13341ee1b687SJohn Baldwin * the interrupt source if the wrapped handler is composed 13351ee1b687SJohn Baldwin * of just an ithread handler. 13361ee1b687SJohn Baldwin * 13371ee1b687SJohn Baldwin * TODO: write a generic wrapper to avoid people rolling 13381ee1b687SJohn Baldwin * their own 13391ee1b687SJohn Baldwin */ 13401ee1b687SJohn Baldwin if (!thread) { 13411ee1b687SJohn Baldwin if (ret == FILTER_SCHEDULE_THREAD) 13421ee1b687SJohn Baldwin thread = 1; 13431ee1b687SJohn Baldwin } 13441ee1b687SJohn Baldwin } 13451ee1b687SJohn Baldwin 13461ee1b687SJohn Baldwin if (thread) { 13471ee1b687SJohn Baldwin if (ie->ie_pre_ithread != NULL) 13481ee1b687SJohn Baldwin ie->ie_pre_ithread(ie->ie_source); 13491ee1b687SJohn Baldwin } else { 13501ee1b687SJohn Baldwin if (ie->ie_post_filter != NULL) 13511ee1b687SJohn Baldwin ie->ie_post_filter(ie->ie_source); 13521ee1b687SJohn Baldwin } 13531ee1b687SJohn Baldwin 13541ee1b687SJohn Baldwin /* Schedule the ithread if needed. */ 13551ee1b687SJohn Baldwin if (thread) { 13561ee1b687SJohn Baldwin error = intr_event_schedule_thread(ie); 13576205924aSKip Macy #ifndef XEN 13581ee1b687SJohn Baldwin KASSERT(error == 0, ("bad stray interrupt")); 13596205924aSKip Macy #else 13606205924aSKip Macy if (error != 0) 13616205924aSKip Macy log(LOG_WARNING, "bad stray interrupt"); 13626205924aSKip Macy #endif 13631ee1b687SJohn Baldwin } 13641ee1b687SJohn Baldwin critical_exit(); 13651ee1b687SJohn Baldwin td->td_intr_nesting_level--; 13661ee1b687SJohn Baldwin return (0); 13671ee1b687SJohn Baldwin } 1368bafe5a31SPaolo Pisati #else 1369bafe5a31SPaolo Pisati /* 1370bafe5a31SPaolo Pisati * This is the main code for interrupt threads. 1371bafe5a31SPaolo Pisati */ 1372bafe5a31SPaolo Pisati static void 1373bafe5a31SPaolo Pisati ithread_loop(void *arg) 1374bafe5a31SPaolo Pisati { 1375bafe5a31SPaolo Pisati struct intr_thread *ithd; 1376bafe5a31SPaolo Pisati struct intr_handler *ih; 1377bafe5a31SPaolo Pisati struct intr_event *ie; 1378bafe5a31SPaolo Pisati struct thread *td; 1379bafe5a31SPaolo Pisati struct proc *p; 1380bafe5a31SPaolo Pisati int priv; 1381bafe5a31SPaolo Pisati 1382bafe5a31SPaolo Pisati td = curthread; 1383bafe5a31SPaolo Pisati p = td->td_proc; 1384bafe5a31SPaolo Pisati ih = (struct intr_handler *)arg; 1385bafe5a31SPaolo Pisati priv = (ih->ih_thread != NULL) ? 1 : 0; 1386bafe5a31SPaolo Pisati ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread; 1387bafe5a31SPaolo Pisati KASSERT(ithd->it_thread == td, 1388bafe5a31SPaolo Pisati ("%s: ithread and proc linkage out of sync", __func__)); 1389bafe5a31SPaolo Pisati ie = ithd->it_event; 1390bafe5a31SPaolo Pisati ie->ie_count = 0; 1391bafe5a31SPaolo Pisati 1392bafe5a31SPaolo Pisati /* 1393bafe5a31SPaolo Pisati * As long as we have interrupts outstanding, go through the 1394bafe5a31SPaolo Pisati * list of handlers, giving each one a go at it. 1395bafe5a31SPaolo Pisati */ 1396bafe5a31SPaolo Pisati for (;;) { 1397bafe5a31SPaolo Pisati /* 1398bafe5a31SPaolo Pisati * If we are an orphaned thread, then just die. 1399bafe5a31SPaolo Pisati */ 1400bafe5a31SPaolo Pisati if (ithd->it_flags & IT_DEAD) { 1401bafe5a31SPaolo Pisati CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 14027ab24ea3SJulian Elischer p->p_pid, td->td_name); 1403bafe5a31SPaolo Pisati free(ithd, M_ITHREAD); 1404ca9a0ddfSJulian Elischer kthread_exit(); 1405bafe5a31SPaolo Pisati } 1406bafe5a31SPaolo Pisati 1407bafe5a31SPaolo Pisati /* 1408bafe5a31SPaolo Pisati * Service interrupts. If another interrupt arrives while 1409bafe5a31SPaolo Pisati * we are running, it will set it_need to note that we 1410bafe5a31SPaolo Pisati * should make another pass. 1411bafe5a31SPaolo Pisati */ 1412bafe5a31SPaolo Pisati while (ithd->it_need) { 1413bafe5a31SPaolo Pisati /* 1414bafe5a31SPaolo Pisati * This might need a full read and write barrier 1415bafe5a31SPaolo Pisati * to make sure that this write posts before any 1416bafe5a31SPaolo Pisati * of the memory or device accesses in the 1417bafe5a31SPaolo Pisati * handlers. 1418bafe5a31SPaolo Pisati */ 1419bafe5a31SPaolo Pisati atomic_store_rel_int(&ithd->it_need, 0); 1420bafe5a31SPaolo Pisati if (priv) 1421bafe5a31SPaolo Pisati priv_ithread_execute_handler(p, ih); 1422bafe5a31SPaolo Pisati else 1423bafe5a31SPaolo Pisati ithread_execute_handlers(p, ie); 1424bafe5a31SPaolo Pisati } 1425bafe5a31SPaolo Pisati WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1426bafe5a31SPaolo Pisati mtx_assert(&Giant, MA_NOTOWNED); 1427bafe5a31SPaolo Pisati 1428bafe5a31SPaolo Pisati /* 1429bafe5a31SPaolo Pisati * Processed all our interrupts. Now get the sched 1430bafe5a31SPaolo Pisati * lock. This may take a while and it_need may get 1431bafe5a31SPaolo Pisati * set again, so we have to check it again. 1432bafe5a31SPaolo Pisati */ 1433982d11f8SJeff Roberson thread_lock(td); 1434bafe5a31SPaolo Pisati if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { 1435bafe5a31SPaolo Pisati TD_SET_IWAIT(td); 1436bafe5a31SPaolo Pisati ie->ie_count = 0; 14378df78c41SJeff Roberson mi_switch(SW_VOL | SWT_IWAIT, NULL); 1438bafe5a31SPaolo Pisati } 1439982d11f8SJeff Roberson thread_unlock(td); 1440bafe5a31SPaolo Pisati } 1441bafe5a31SPaolo Pisati } 1442bafe5a31SPaolo Pisati 1443bafe5a31SPaolo Pisati /* 1444bafe5a31SPaolo Pisati * Main loop for interrupt filter. 1445bafe5a31SPaolo Pisati * 1446bafe5a31SPaolo Pisati * Some architectures (i386, amd64 and arm) require the optional frame 1447bafe5a31SPaolo Pisati * parameter, and use it as the main argument for fast handler execution 1448bafe5a31SPaolo Pisati * when ih_argument == NULL. 1449bafe5a31SPaolo Pisati * 1450bafe5a31SPaolo Pisati * Return value: 1451bafe5a31SPaolo Pisati * o FILTER_STRAY: No filter recognized the event, and no 1452bafe5a31SPaolo Pisati * filter-less handler is registered on this 1453bafe5a31SPaolo Pisati * line. 1454bafe5a31SPaolo Pisati * o FILTER_HANDLED: A filter claimed the event and served it. 1455bafe5a31SPaolo Pisati * o FILTER_SCHEDULE_THREAD: No filter claimed the event, but there's at 1456bafe5a31SPaolo Pisati * least one filter-less handler on this line. 1457bafe5a31SPaolo Pisati * o FILTER_HANDLED | 1458bafe5a31SPaolo Pisati * FILTER_SCHEDULE_THREAD: A filter claimed the event, and asked for 1459bafe5a31SPaolo Pisati * scheduling the per-handler ithread. 1460bafe5a31SPaolo Pisati * 1461bafe5a31SPaolo Pisati * In case an ithread has to be scheduled, in *ithd there will be a 1462bafe5a31SPaolo Pisati * pointer to a struct intr_thread containing the thread to be 1463bafe5a31SPaolo Pisati * scheduled. 1464bafe5a31SPaolo Pisati */ 1465bafe5a31SPaolo Pisati 14661ee1b687SJohn Baldwin static int 1467bafe5a31SPaolo Pisati intr_filter_loop(struct intr_event *ie, struct trapframe *frame, 1468bafe5a31SPaolo Pisati struct intr_thread **ithd) 1469bafe5a31SPaolo Pisati { 1470bafe5a31SPaolo Pisati struct intr_handler *ih; 1471bafe5a31SPaolo Pisati void *arg; 1472bafe5a31SPaolo Pisati int ret, thread_only; 1473bafe5a31SPaolo Pisati 1474bafe5a31SPaolo Pisati ret = 0; 1475bafe5a31SPaolo Pisati thread_only = 0; 1476bafe5a31SPaolo Pisati TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1477bafe5a31SPaolo Pisati /* 1478bafe5a31SPaolo Pisati * Execute fast interrupt handlers directly. 1479bafe5a31SPaolo Pisati * To support clock handlers, if a handler registers 1480bafe5a31SPaolo Pisati * with a NULL argument, then we pass it a pointer to 1481bafe5a31SPaolo Pisati * a trapframe as its argument. 1482bafe5a31SPaolo Pisati */ 1483bafe5a31SPaolo Pisati arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument); 1484bafe5a31SPaolo Pisati 1485bafe5a31SPaolo Pisati CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__, 1486bafe5a31SPaolo Pisati ih->ih_filter, ih->ih_handler, arg, ih->ih_name); 1487bafe5a31SPaolo Pisati 1488bafe5a31SPaolo Pisati if (ih->ih_filter != NULL) 1489bafe5a31SPaolo Pisati ret = ih->ih_filter(arg); 1490bafe5a31SPaolo Pisati else { 1491bafe5a31SPaolo Pisati thread_only = 1; 1492bafe5a31SPaolo Pisati continue; 1493bafe5a31SPaolo Pisati } 1494bafe5a31SPaolo Pisati 1495bafe5a31SPaolo Pisati if (ret & FILTER_STRAY) 1496bafe5a31SPaolo Pisati continue; 1497bafe5a31SPaolo Pisati else { 1498bafe5a31SPaolo Pisati *ithd = ih->ih_thread; 1499bafe5a31SPaolo Pisati return (ret); 1500bafe5a31SPaolo Pisati } 1501bafe5a31SPaolo Pisati } 1502bafe5a31SPaolo Pisati 1503bafe5a31SPaolo Pisati /* 1504bafe5a31SPaolo Pisati * No filters handled the interrupt and we have at least 1505bafe5a31SPaolo Pisati * one handler without a filter. In this case, we schedule 1506bafe5a31SPaolo Pisati * all of the filter-less handlers to run in the ithread. 1507bafe5a31SPaolo Pisati */ 1508bafe5a31SPaolo Pisati if (thread_only) { 1509bafe5a31SPaolo Pisati *ithd = ie->ie_thread; 1510bafe5a31SPaolo Pisati return (FILTER_SCHEDULE_THREAD); 1511bafe5a31SPaolo Pisati } 1512bafe5a31SPaolo Pisati return (FILTER_STRAY); 1513bafe5a31SPaolo Pisati } 1514bafe5a31SPaolo Pisati 1515bafe5a31SPaolo Pisati /* 1516bafe5a31SPaolo Pisati * Main interrupt handling body. 1517bafe5a31SPaolo Pisati * 1518bafe5a31SPaolo Pisati * Input: 1519bafe5a31SPaolo Pisati * o ie: the event connected to this interrupt. 1520bafe5a31SPaolo Pisati * o frame: some archs (i.e. i386) pass a frame to some. 1521bafe5a31SPaolo Pisati * handlers as their main argument. 1522bafe5a31SPaolo Pisati * Return value: 1523bafe5a31SPaolo Pisati * o 0: everything ok. 1524bafe5a31SPaolo Pisati * o EINVAL: stray interrupt. 1525bafe5a31SPaolo Pisati */ 1526bafe5a31SPaolo Pisati int 1527bafe5a31SPaolo Pisati intr_event_handle(struct intr_event *ie, struct trapframe *frame) 1528bafe5a31SPaolo Pisati { 1529bafe5a31SPaolo Pisati struct intr_thread *ithd; 1530bafe5a31SPaolo Pisati struct thread *td; 1531bafe5a31SPaolo Pisati int thread; 1532bafe5a31SPaolo Pisati 1533bafe5a31SPaolo Pisati ithd = NULL; 1534bafe5a31SPaolo Pisati td = curthread; 1535bafe5a31SPaolo Pisati 1536bafe5a31SPaolo Pisati if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) 1537bafe5a31SPaolo Pisati return (EINVAL); 1538bafe5a31SPaolo Pisati 1539bafe5a31SPaolo Pisati td->td_intr_nesting_level++; 1540bafe5a31SPaolo Pisati thread = 0; 1541bafe5a31SPaolo Pisati critical_enter(); 1542bafe5a31SPaolo Pisati thread = intr_filter_loop(ie, frame, &ithd); 1543bafe5a31SPaolo Pisati if (thread & FILTER_HANDLED) { 15441ee1b687SJohn Baldwin if (ie->ie_post_filter != NULL) 15451ee1b687SJohn Baldwin ie->ie_post_filter(ie->ie_source); 1546bafe5a31SPaolo Pisati } else { 15471ee1b687SJohn Baldwin if (ie->ie_pre_ithread != NULL) 15481ee1b687SJohn Baldwin ie->ie_pre_ithread(ie->ie_source); 1549bafe5a31SPaolo Pisati } 1550bafe5a31SPaolo Pisati critical_exit(); 1551bafe5a31SPaolo Pisati 1552bafe5a31SPaolo Pisati /* Interrupt storm logic */ 1553bafe5a31SPaolo Pisati if (thread & FILTER_STRAY) { 1554bafe5a31SPaolo Pisati ie->ie_count++; 1555bafe5a31SPaolo Pisati if (ie->ie_count < intr_storm_threshold) 1556bafe5a31SPaolo Pisati printf("Interrupt stray detection not present\n"); 1557bafe5a31SPaolo Pisati } 1558bafe5a31SPaolo Pisati 1559bafe5a31SPaolo Pisati /* Schedule an ithread if needed. */ 1560bafe5a31SPaolo Pisati if (thread & FILTER_SCHEDULE_THREAD) { 1561bafe5a31SPaolo Pisati if (intr_event_schedule_thread(ie, ithd) != 0) 1562bafe5a31SPaolo Pisati panic("%s: impossible stray interrupt", __func__); 1563bafe5a31SPaolo Pisati } 1564bafe5a31SPaolo Pisati td->td_intr_nesting_level--; 1565bafe5a31SPaolo Pisati return (0); 1566bafe5a31SPaolo Pisati } 1567bafe5a31SPaolo Pisati #endif 15681931cf94SJohn Baldwin 15698b201c42SJohn Baldwin #ifdef DDB 15708b201c42SJohn Baldwin /* 15718b201c42SJohn Baldwin * Dump details about an interrupt handler 15728b201c42SJohn Baldwin */ 15738b201c42SJohn Baldwin static void 1574e0f66ef8SJohn Baldwin db_dump_intrhand(struct intr_handler *ih) 15758b201c42SJohn Baldwin { 15768b201c42SJohn Baldwin int comma; 15778b201c42SJohn Baldwin 15788b201c42SJohn Baldwin db_printf("\t%-10s ", ih->ih_name); 15798b201c42SJohn Baldwin switch (ih->ih_pri) { 15808b201c42SJohn Baldwin case PI_REALTIME: 15818b201c42SJohn Baldwin db_printf("CLK "); 15828b201c42SJohn Baldwin break; 15838b201c42SJohn Baldwin case PI_AV: 15848b201c42SJohn Baldwin db_printf("AV "); 15858b201c42SJohn Baldwin break; 15868b201c42SJohn Baldwin case PI_TTYHIGH: 15878b201c42SJohn Baldwin case PI_TTYLOW: 15888b201c42SJohn Baldwin db_printf("TTY "); 15898b201c42SJohn Baldwin break; 15908b201c42SJohn Baldwin case PI_TAPE: 15918b201c42SJohn Baldwin db_printf("TAPE"); 15928b201c42SJohn Baldwin break; 15938b201c42SJohn Baldwin case PI_NET: 15948b201c42SJohn Baldwin db_printf("NET "); 15958b201c42SJohn Baldwin break; 15968b201c42SJohn Baldwin case PI_DISK: 15978b201c42SJohn Baldwin case PI_DISKLOW: 15988b201c42SJohn Baldwin db_printf("DISK"); 15998b201c42SJohn Baldwin break; 16008b201c42SJohn Baldwin case PI_DULL: 16018b201c42SJohn Baldwin db_printf("DULL"); 16028b201c42SJohn Baldwin break; 16038b201c42SJohn Baldwin default: 16048b201c42SJohn Baldwin if (ih->ih_pri >= PI_SOFT) 16058b201c42SJohn Baldwin db_printf("SWI "); 16068b201c42SJohn Baldwin else 16078b201c42SJohn Baldwin db_printf("%4u", ih->ih_pri); 16088b201c42SJohn Baldwin break; 16098b201c42SJohn Baldwin } 16108b201c42SJohn Baldwin db_printf(" "); 16118b201c42SJohn Baldwin db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC); 16128b201c42SJohn Baldwin db_printf("(%p)", ih->ih_argument); 16138b201c42SJohn Baldwin if (ih->ih_need || 1614ef544f63SPaolo Pisati (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD | 16158b201c42SJohn Baldwin IH_MPSAFE)) != 0) { 16168b201c42SJohn Baldwin db_printf(" {"); 16178b201c42SJohn Baldwin comma = 0; 16188b201c42SJohn Baldwin if (ih->ih_flags & IH_EXCLUSIVE) { 16198b201c42SJohn Baldwin if (comma) 16208b201c42SJohn Baldwin db_printf(", "); 16218b201c42SJohn Baldwin db_printf("EXCL"); 16228b201c42SJohn Baldwin comma = 1; 16238b201c42SJohn Baldwin } 16248b201c42SJohn Baldwin if (ih->ih_flags & IH_ENTROPY) { 16258b201c42SJohn Baldwin if (comma) 16268b201c42SJohn Baldwin db_printf(", "); 16278b201c42SJohn Baldwin db_printf("ENTROPY"); 16288b201c42SJohn Baldwin comma = 1; 16298b201c42SJohn Baldwin } 16308b201c42SJohn Baldwin if (ih->ih_flags & IH_DEAD) { 16318b201c42SJohn Baldwin if (comma) 16328b201c42SJohn Baldwin db_printf(", "); 16338b201c42SJohn Baldwin db_printf("DEAD"); 16348b201c42SJohn Baldwin comma = 1; 16358b201c42SJohn Baldwin } 16368b201c42SJohn Baldwin if (ih->ih_flags & IH_MPSAFE) { 16378b201c42SJohn Baldwin if (comma) 16388b201c42SJohn Baldwin db_printf(", "); 16398b201c42SJohn Baldwin db_printf("MPSAFE"); 16408b201c42SJohn Baldwin comma = 1; 16418b201c42SJohn Baldwin } 16428b201c42SJohn Baldwin if (ih->ih_need) { 16438b201c42SJohn Baldwin if (comma) 16448b201c42SJohn Baldwin db_printf(", "); 16458b201c42SJohn Baldwin db_printf("NEED"); 16468b201c42SJohn Baldwin } 16478b201c42SJohn Baldwin db_printf("}"); 16488b201c42SJohn Baldwin } 16498b201c42SJohn Baldwin db_printf("\n"); 16508b201c42SJohn Baldwin } 16518b201c42SJohn Baldwin 16528b201c42SJohn Baldwin /* 1653e0f66ef8SJohn Baldwin * Dump details about a event. 16548b201c42SJohn Baldwin */ 16558b201c42SJohn Baldwin void 1656e0f66ef8SJohn Baldwin db_dump_intr_event(struct intr_event *ie, int handlers) 16578b201c42SJohn Baldwin { 1658e0f66ef8SJohn Baldwin struct intr_handler *ih; 1659e0f66ef8SJohn Baldwin struct intr_thread *it; 16608b201c42SJohn Baldwin int comma; 16618b201c42SJohn Baldwin 1662e0f66ef8SJohn Baldwin db_printf("%s ", ie->ie_fullname); 1663e0f66ef8SJohn Baldwin it = ie->ie_thread; 1664e0f66ef8SJohn Baldwin if (it != NULL) 1665e0f66ef8SJohn Baldwin db_printf("(pid %d)", it->it_thread->td_proc->p_pid); 1666e0f66ef8SJohn Baldwin else 1667e0f66ef8SJohn Baldwin db_printf("(no thread)"); 1668e0f66ef8SJohn Baldwin if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 || 1669e0f66ef8SJohn Baldwin (it != NULL && it->it_need)) { 16708b201c42SJohn Baldwin db_printf(" {"); 16718b201c42SJohn Baldwin comma = 0; 1672e0f66ef8SJohn Baldwin if (ie->ie_flags & IE_SOFT) { 16738b201c42SJohn Baldwin db_printf("SOFT"); 16748b201c42SJohn Baldwin comma = 1; 16758b201c42SJohn Baldwin } 1676e0f66ef8SJohn Baldwin if (ie->ie_flags & IE_ENTROPY) { 16778b201c42SJohn Baldwin if (comma) 16788b201c42SJohn Baldwin db_printf(", "); 16798b201c42SJohn Baldwin db_printf("ENTROPY"); 16808b201c42SJohn Baldwin comma = 1; 16818b201c42SJohn Baldwin } 1682e0f66ef8SJohn Baldwin if (ie->ie_flags & IE_ADDING_THREAD) { 16838b201c42SJohn Baldwin if (comma) 16848b201c42SJohn Baldwin db_printf(", "); 1685e0f66ef8SJohn Baldwin db_printf("ADDING_THREAD"); 16868b201c42SJohn Baldwin comma = 1; 16878b201c42SJohn Baldwin } 1688e0f66ef8SJohn Baldwin if (it != NULL && it->it_need) { 16898b201c42SJohn Baldwin if (comma) 16908b201c42SJohn Baldwin db_printf(", "); 16918b201c42SJohn Baldwin db_printf("NEED"); 16928b201c42SJohn Baldwin } 16938b201c42SJohn Baldwin db_printf("}"); 16948b201c42SJohn Baldwin } 16958b201c42SJohn Baldwin db_printf("\n"); 16968b201c42SJohn Baldwin 16978b201c42SJohn Baldwin if (handlers) 1698e0f66ef8SJohn Baldwin TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 16998b201c42SJohn Baldwin db_dump_intrhand(ih); 17008b201c42SJohn Baldwin } 1701e0f66ef8SJohn Baldwin 1702e0f66ef8SJohn Baldwin /* 1703e0f66ef8SJohn Baldwin * Dump data about interrupt handlers 1704e0f66ef8SJohn Baldwin */ 1705e0f66ef8SJohn Baldwin DB_SHOW_COMMAND(intr, db_show_intr) 1706e0f66ef8SJohn Baldwin { 1707e0f66ef8SJohn Baldwin struct intr_event *ie; 170819e9205aSJohn Baldwin int all, verbose; 1709e0f66ef8SJohn Baldwin 1710e0f66ef8SJohn Baldwin verbose = index(modif, 'v') != NULL; 1711e0f66ef8SJohn Baldwin all = index(modif, 'a') != NULL; 1712e0f66ef8SJohn Baldwin TAILQ_FOREACH(ie, &event_list, ie_list) { 1713e0f66ef8SJohn Baldwin if (!all && TAILQ_EMPTY(&ie->ie_handlers)) 1714e0f66ef8SJohn Baldwin continue; 1715e0f66ef8SJohn Baldwin db_dump_intr_event(ie, verbose); 171619e9205aSJohn Baldwin if (db_pager_quit) 171719e9205aSJohn Baldwin break; 1718e0f66ef8SJohn Baldwin } 1719e0f66ef8SJohn Baldwin } 17208b201c42SJohn Baldwin #endif /* DDB */ 17218b201c42SJohn Baldwin 1722b4151f71SJohn Baldwin /* 17238088699fSJohn Baldwin * Start standard software interrupt threads 17241931cf94SJohn Baldwin */ 17251931cf94SJohn Baldwin static void 1726b4151f71SJohn Baldwin start_softintr(void *dummy) 17271931cf94SJohn Baldwin { 1728b4151f71SJohn Baldwin 17298d809d50SJeff Roberson if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih)) 17308d809d50SJeff Roberson panic("died while creating vm swi ithread"); 17311931cf94SJohn Baldwin } 1732237fdd78SRobert Watson SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, 1733237fdd78SRobert Watson NULL); 17341931cf94SJohn Baldwin 1735d279178dSThomas Moestl /* 1736d279178dSThomas Moestl * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 1737d279178dSThomas Moestl * The data for this machine dependent, and the declarations are in machine 1738d279178dSThomas Moestl * dependent code. The layout of intrnames and intrcnt however is machine 1739d279178dSThomas Moestl * independent. 1740d279178dSThomas Moestl * 1741d279178dSThomas Moestl * We do not know the length of intrcnt and intrnames at compile time, so 1742d279178dSThomas Moestl * calculate things at run time. 1743d279178dSThomas Moestl */ 1744d279178dSThomas Moestl static int 1745d279178dSThomas Moestl sysctl_intrnames(SYSCTL_HANDLER_ARGS) 1746d279178dSThomas Moestl { 1747d279178dSThomas Moestl return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames, 1748d279178dSThomas Moestl req)); 1749d279178dSThomas Moestl } 1750d279178dSThomas Moestl 1751d279178dSThomas Moestl SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, 1752d279178dSThomas Moestl NULL, 0, sysctl_intrnames, "", "Interrupt Names"); 1753d279178dSThomas Moestl 1754d279178dSThomas Moestl static int 1755d279178dSThomas Moestl sysctl_intrcnt(SYSCTL_HANDLER_ARGS) 1756d279178dSThomas Moestl { 1757d279178dSThomas Moestl return (sysctl_handle_opaque(oidp, intrcnt, 1758d279178dSThomas Moestl (char *)eintrcnt - (char *)intrcnt, req)); 1759d279178dSThomas Moestl } 1760d279178dSThomas Moestl 1761d279178dSThomas Moestl SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, 1762d279178dSThomas Moestl NULL, 0, sysctl_intrcnt, "", "Interrupt Counts"); 17638b201c42SJohn Baldwin 17648b201c42SJohn Baldwin #ifdef DDB 17658b201c42SJohn Baldwin /* 17668b201c42SJohn Baldwin * DDB command to dump the interrupt statistics. 17678b201c42SJohn Baldwin */ 17688b201c42SJohn Baldwin DB_SHOW_COMMAND(intrcnt, db_show_intrcnt) 17698b201c42SJohn Baldwin { 17708b201c42SJohn Baldwin u_long *i; 17718b201c42SJohn Baldwin char *cp; 17728b201c42SJohn Baldwin 17738b201c42SJohn Baldwin cp = intrnames; 177419e9205aSJohn Baldwin for (i = intrcnt; i != eintrcnt && !db_pager_quit; i++) { 17758b201c42SJohn Baldwin if (*cp == '\0') 17768b201c42SJohn Baldwin break; 17778b201c42SJohn Baldwin if (*i != 0) 17788b201c42SJohn Baldwin db_printf("%s\t%lu\n", cp, *i); 17798b201c42SJohn Baldwin cp += strlen(cp) + 1; 17808b201c42SJohn Baldwin } 17818b201c42SJohn Baldwin } 17828b201c42SJohn Baldwin #endif 1783