19454b2d8SWarner Losh /*- 2425f9fdaSStefan Eßer * Copyright (c) 1997, Stefan Esser <se@freebsd.org> 3425f9fdaSStefan Eßer * All rights reserved. 4425f9fdaSStefan Eßer * 5425f9fdaSStefan Eßer * Redistribution and use in source and binary forms, with or without 6425f9fdaSStefan Eßer * modification, are permitted provided that the following conditions 7425f9fdaSStefan Eßer * are met: 8425f9fdaSStefan Eßer * 1. Redistributions of source code must retain the above copyright 9425f9fdaSStefan Eßer * notice unmodified, this list of conditions, and the following 10425f9fdaSStefan Eßer * disclaimer. 11425f9fdaSStefan Eßer * 2. Redistributions in binary form must reproduce the above copyright 12425f9fdaSStefan Eßer * notice, this list of conditions and the following disclaimer in the 13425f9fdaSStefan Eßer * documentation and/or other materials provided with the distribution. 14425f9fdaSStefan Eßer * 15425f9fdaSStefan Eßer * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16425f9fdaSStefan Eßer * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17425f9fdaSStefan Eßer * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18425f9fdaSStefan Eßer * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19425f9fdaSStefan Eßer * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20425f9fdaSStefan Eßer * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21425f9fdaSStefan Eßer * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22425f9fdaSStefan Eßer * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23425f9fdaSStefan Eßer * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24425f9fdaSStefan Eßer * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25425f9fdaSStefan Eßer */ 26425f9fdaSStefan Eßer 27677b542eSDavid E. O'Brien #include <sys/cdefs.h> 28677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 293900ddb2SDoug Rabson 308b201c42SJohn Baldwin #include "opt_ddb.h" 318b201c42SJohn Baldwin 321c5bb3eaSPeter Wemm #include <sys/param.h> 339a94c9c5SJohn Baldwin #include <sys/bus.h> 34c11110eaSAlfred Perlstein #include <sys/conf.h> 359b33b154SJeff Roberson #include <sys/cpuset.h> 369a94c9c5SJohn Baldwin #include <sys/rtprio.h> 37425f9fdaSStefan Eßer #include <sys/systm.h> 3868352337SDoug Rabson #include <sys/interrupt.h> 391931cf94SJohn Baldwin #include <sys/kernel.h> 401931cf94SJohn Baldwin #include <sys/kthread.h> 411931cf94SJohn Baldwin #include <sys/ktr.h> 4205b2c96fSBruce Evans #include <sys/limits.h> 43f34fa851SJohn Baldwin #include <sys/lock.h> 441931cf94SJohn Baldwin #include <sys/malloc.h> 4535e0e5b3SJohn Baldwin #include <sys/mutex.h> 46cebc7fb1SJohn Baldwin #include <sys/priv.h> 471931cf94SJohn Baldwin #include <sys/proc.h> 483e5da754SJohn Baldwin #include <sys/random.h> 49b4151f71SJohn Baldwin #include <sys/resourcevar.h> 5063710c4dSJohn Baldwin #include <sys/sched.h> 51eaf86d16SJohn Baldwin #include <sys/smp.h> 52d279178dSThomas Moestl #include <sys/sysctl.h> 536205924aSKip Macy #include <sys/syslog.h> 541931cf94SJohn Baldwin #include <sys/unistd.h> 551931cf94SJohn Baldwin #include <sys/vmmeter.h> 561931cf94SJohn Baldwin #include <machine/atomic.h> 571931cf94SJohn Baldwin #include <machine/cpu.h> 588088699fSJohn Baldwin #include <machine/md_var.h> 59b4151f71SJohn Baldwin #include <machine/stdarg.h> 608b201c42SJohn Baldwin #ifdef DDB 618b201c42SJohn Baldwin #include <ddb/ddb.h> 628b201c42SJohn Baldwin #include <ddb/db_sym.h> 638b201c42SJohn Baldwin #endif 64425f9fdaSStefan Eßer 65e0f66ef8SJohn Baldwin /* 66e0f66ef8SJohn Baldwin * Describe an interrupt thread. There is one of these per interrupt event. 67e0f66ef8SJohn Baldwin */ 68e0f66ef8SJohn Baldwin struct intr_thread { 69e0f66ef8SJohn Baldwin struct intr_event *it_event; 70e0f66ef8SJohn Baldwin struct thread *it_thread; /* Kernel thread. */ 71e0f66ef8SJohn Baldwin int it_flags; /* (j) IT_* flags. */ 72e0f66ef8SJohn Baldwin int it_need; /* Needs service. */ 733e5da754SJohn Baldwin }; 743e5da754SJohn Baldwin 75e0f66ef8SJohn Baldwin /* Interrupt thread flags kept in it_flags */ 76e0f66ef8SJohn Baldwin #define IT_DEAD 0x000001 /* Thread is waiting to exit. */ 77e4cd31ddSJeff Roberson #define IT_WAIT 0x000002 /* Thread is waiting for completion. */ 78e0f66ef8SJohn Baldwin 79e0f66ef8SJohn Baldwin struct intr_entropy { 80e0f66ef8SJohn Baldwin struct thread *td; 81e0f66ef8SJohn Baldwin uintptr_t event; 82e0f66ef8SJohn Baldwin }; 83e0f66ef8SJohn Baldwin 84e0f66ef8SJohn Baldwin struct intr_event *clk_intr_event; 85e0f66ef8SJohn Baldwin struct intr_event *tty_intr_event; 867b1fe905SBruce Evans void *vm_ih; 877ab24ea3SJulian Elischer struct proc *intrproc; 881931cf94SJohn Baldwin 89b4151f71SJohn Baldwin static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); 90b4151f71SJohn Baldwin 910ae62c18SNate Lawson static int intr_storm_threshold = 1000; 927870c3c6SJohn Baldwin TUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold); 937870c3c6SJohn Baldwin SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW, 947870c3c6SJohn Baldwin &intr_storm_threshold, 0, 957b1fe905SBruce Evans "Number of consecutive interrupts before storm protection is enabled"); 96e0f66ef8SJohn Baldwin static TAILQ_HEAD(, intr_event) event_list = 97e0f66ef8SJohn Baldwin TAILQ_HEAD_INITIALIZER(event_list); 989b33b154SJeff Roberson static struct mtx event_lock; 999b33b154SJeff Roberson MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF); 1007b1fe905SBruce Evans 101e0f66ef8SJohn Baldwin static void intr_event_update(struct intr_event *ie); 102bafe5a31SPaolo Pisati #ifdef INTR_FILTER 1031ee1b687SJohn Baldwin static int intr_event_schedule_thread(struct intr_event *ie, 1041ee1b687SJohn Baldwin struct intr_thread *ithd); 1051ee1b687SJohn Baldwin static int intr_filter_loop(struct intr_event *ie, 1061ee1b687SJohn Baldwin struct trapframe *frame, struct intr_thread **ithd); 107bafe5a31SPaolo Pisati static struct intr_thread *ithread_create(const char *name, 108bafe5a31SPaolo Pisati struct intr_handler *ih); 109bafe5a31SPaolo Pisati #else 1101ee1b687SJohn Baldwin static int intr_event_schedule_thread(struct intr_event *ie); 111e0f66ef8SJohn Baldwin static struct intr_thread *ithread_create(const char *name); 112bafe5a31SPaolo Pisati #endif 113e0f66ef8SJohn Baldwin static void ithread_destroy(struct intr_thread *ithread); 114bafe5a31SPaolo Pisati static void ithread_execute_handlers(struct proc *p, 115bafe5a31SPaolo Pisati struct intr_event *ie); 116bafe5a31SPaolo Pisati #ifdef INTR_FILTER 117bafe5a31SPaolo Pisati static void priv_ithread_execute_handler(struct proc *p, 118bafe5a31SPaolo Pisati struct intr_handler *ih); 119bafe5a31SPaolo Pisati #endif 1207b1fe905SBruce Evans static void ithread_loop(void *); 121e0f66ef8SJohn Baldwin static void ithread_update(struct intr_thread *ithd); 1227b1fe905SBruce Evans static void start_softintr(void *); 1237870c3c6SJohn Baldwin 124bc17acb2SJohn Baldwin /* Map an interrupt type to an ithread priority. */ 125b4151f71SJohn Baldwin u_char 126e0f66ef8SJohn Baldwin intr_priority(enum intr_type flags) 1279a94c9c5SJohn Baldwin { 128b4151f71SJohn Baldwin u_char pri; 1299a94c9c5SJohn Baldwin 130b4151f71SJohn Baldwin flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET | 1315a280d9cSPeter Wemm INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV); 1329a94c9c5SJohn Baldwin switch (flags) { 133b4151f71SJohn Baldwin case INTR_TYPE_TTY: 134d3305205SJohn Baldwin pri = PI_TTY; 1359a94c9c5SJohn Baldwin break; 1369a94c9c5SJohn Baldwin case INTR_TYPE_BIO: 1379a94c9c5SJohn Baldwin pri = PI_DISK; 1389a94c9c5SJohn Baldwin break; 1399a94c9c5SJohn Baldwin case INTR_TYPE_NET: 1409a94c9c5SJohn Baldwin pri = PI_NET; 1419a94c9c5SJohn Baldwin break; 1429a94c9c5SJohn Baldwin case INTR_TYPE_CAM: 143d3305205SJohn Baldwin pri = PI_DISK; 1449a94c9c5SJohn Baldwin break; 145d3305205SJohn Baldwin case INTR_TYPE_AV: 1465a280d9cSPeter Wemm pri = PI_AV; 1475a280d9cSPeter Wemm break; 148b4151f71SJohn Baldwin case INTR_TYPE_CLK: 149b4151f71SJohn Baldwin pri = PI_REALTIME; 150b4151f71SJohn Baldwin break; 1519a94c9c5SJohn Baldwin case INTR_TYPE_MISC: 1529a94c9c5SJohn Baldwin pri = PI_DULL; /* don't care */ 1539a94c9c5SJohn Baldwin break; 1549a94c9c5SJohn Baldwin default: 155b4151f71SJohn Baldwin /* We didn't specify an interrupt level. */ 156e0f66ef8SJohn Baldwin panic("intr_priority: no interrupt type in flags"); 1579a94c9c5SJohn Baldwin } 1589a94c9c5SJohn Baldwin 1599a94c9c5SJohn Baldwin return pri; 1609a94c9c5SJohn Baldwin } 1619a94c9c5SJohn Baldwin 162b4151f71SJohn Baldwin /* 163e0f66ef8SJohn Baldwin * Update an ithread based on the associated intr_event. 164b4151f71SJohn Baldwin */ 165b4151f71SJohn Baldwin static void 166e0f66ef8SJohn Baldwin ithread_update(struct intr_thread *ithd) 167b4151f71SJohn Baldwin { 168e0f66ef8SJohn Baldwin struct intr_event *ie; 169b40ce416SJulian Elischer struct thread *td; 170e0f66ef8SJohn Baldwin u_char pri; 1718088699fSJohn Baldwin 172e0f66ef8SJohn Baldwin ie = ithd->it_event; 173e0f66ef8SJohn Baldwin td = ithd->it_thread; 174b4151f71SJohn Baldwin 175e0f66ef8SJohn Baldwin /* Determine the overall priority of this event. */ 176e0f66ef8SJohn Baldwin if (TAILQ_EMPTY(&ie->ie_handlers)) 177e0f66ef8SJohn Baldwin pri = PRI_MAX_ITHD; 178e0f66ef8SJohn Baldwin else 179e0f66ef8SJohn Baldwin pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri; 180e80fb434SRobert Drehmel 181e0f66ef8SJohn Baldwin /* Update name and priority. */ 1827ab24ea3SJulian Elischer strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name)); 183982d11f8SJeff Roberson thread_lock(td); 184e0f66ef8SJohn Baldwin sched_prio(td, pri); 185982d11f8SJeff Roberson thread_unlock(td); 186b4151f71SJohn Baldwin } 187e0f66ef8SJohn Baldwin 188e0f66ef8SJohn Baldwin /* 189e0f66ef8SJohn Baldwin * Regenerate the full name of an interrupt event and update its priority. 190e0f66ef8SJohn Baldwin */ 191e0f66ef8SJohn Baldwin static void 192e0f66ef8SJohn Baldwin intr_event_update(struct intr_event *ie) 193e0f66ef8SJohn Baldwin { 194e0f66ef8SJohn Baldwin struct intr_handler *ih; 195e0f66ef8SJohn Baldwin char *last; 196e0f66ef8SJohn Baldwin int missed, space; 197e0f66ef8SJohn Baldwin 198e0f66ef8SJohn Baldwin /* Start off with no entropy and just the name of the event. */ 199e0f66ef8SJohn Baldwin mtx_assert(&ie->ie_lock, MA_OWNED); 200e0f66ef8SJohn Baldwin strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 201e0f66ef8SJohn Baldwin ie->ie_flags &= ~IE_ENTROPY; 2020811d60aSJohn Baldwin missed = 0; 203e0f66ef8SJohn Baldwin space = 1; 204e0f66ef8SJohn Baldwin 205e0f66ef8SJohn Baldwin /* Run through all the handlers updating values. */ 206e0f66ef8SJohn Baldwin TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 207e0f66ef8SJohn Baldwin if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < 208e0f66ef8SJohn Baldwin sizeof(ie->ie_fullname)) { 209e0f66ef8SJohn Baldwin strcat(ie->ie_fullname, " "); 210e0f66ef8SJohn Baldwin strcat(ie->ie_fullname, ih->ih_name); 211e0f66ef8SJohn Baldwin space = 0; 2120811d60aSJohn Baldwin } else 2130811d60aSJohn Baldwin missed++; 2140811d60aSJohn Baldwin if (ih->ih_flags & IH_ENTROPY) 215e0f66ef8SJohn Baldwin ie->ie_flags |= IE_ENTROPY; 2160811d60aSJohn Baldwin } 217e0f66ef8SJohn Baldwin 218e0f66ef8SJohn Baldwin /* 219e0f66ef8SJohn Baldwin * If the handler names were too long, add +'s to indicate missing 220e0f66ef8SJohn Baldwin * names. If we run out of room and still have +'s to add, change 221e0f66ef8SJohn Baldwin * the last character from a + to a *. 222e0f66ef8SJohn Baldwin */ 223e0f66ef8SJohn Baldwin last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2]; 2240811d60aSJohn Baldwin while (missed-- > 0) { 225e0f66ef8SJohn Baldwin if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) { 226e0f66ef8SJohn Baldwin if (*last == '+') { 227e0f66ef8SJohn Baldwin *last = '*'; 228e0f66ef8SJohn Baldwin break; 229b4151f71SJohn Baldwin } else 230e0f66ef8SJohn Baldwin *last = '+'; 231e0f66ef8SJohn Baldwin } else if (space) { 232e0f66ef8SJohn Baldwin strcat(ie->ie_fullname, " +"); 233e0f66ef8SJohn Baldwin space = 0; 234e0f66ef8SJohn Baldwin } else 235e0f66ef8SJohn Baldwin strcat(ie->ie_fullname, "+"); 236b4151f71SJohn Baldwin } 237e0f66ef8SJohn Baldwin 238e0f66ef8SJohn Baldwin /* 239e0f66ef8SJohn Baldwin * If this event has an ithread, update it's priority and 240e0f66ef8SJohn Baldwin * name. 241e0f66ef8SJohn Baldwin */ 242e0f66ef8SJohn Baldwin if (ie->ie_thread != NULL) 243e0f66ef8SJohn Baldwin ithread_update(ie->ie_thread); 244e0f66ef8SJohn Baldwin CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname); 245b4151f71SJohn Baldwin } 246b4151f71SJohn Baldwin 247b4151f71SJohn Baldwin int 2489b33b154SJeff Roberson intr_event_create(struct intr_event **event, void *source, int flags, int irq, 2491ee1b687SJohn Baldwin void (*pre_ithread)(void *), void (*post_ithread)(void *), 2501ee1b687SJohn Baldwin void (*post_filter)(void *), int (*assign_cpu)(void *, u_char), 2511ee1b687SJohn Baldwin const char *fmt, ...) 252bafe5a31SPaolo Pisati { 253bafe5a31SPaolo Pisati struct intr_event *ie; 254bafe5a31SPaolo Pisati va_list ap; 255bafe5a31SPaolo Pisati 256bafe5a31SPaolo Pisati /* The only valid flag during creation is IE_SOFT. */ 257bafe5a31SPaolo Pisati if ((flags & ~IE_SOFT) != 0) 258bafe5a31SPaolo Pisati return (EINVAL); 259bafe5a31SPaolo Pisati ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 260bafe5a31SPaolo Pisati ie->ie_source = source; 2611ee1b687SJohn Baldwin ie->ie_pre_ithread = pre_ithread; 2621ee1b687SJohn Baldwin ie->ie_post_ithread = post_ithread; 2631ee1b687SJohn Baldwin ie->ie_post_filter = post_filter; 2646d2d1c04SJohn Baldwin ie->ie_assign_cpu = assign_cpu; 265bafe5a31SPaolo Pisati ie->ie_flags = flags; 2669b33b154SJeff Roberson ie->ie_irq = irq; 267eaf86d16SJohn Baldwin ie->ie_cpu = NOCPU; 268bafe5a31SPaolo Pisati TAILQ_INIT(&ie->ie_handlers); 269bafe5a31SPaolo Pisati mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 270bafe5a31SPaolo Pisati 271bafe5a31SPaolo Pisati va_start(ap, fmt); 272bafe5a31SPaolo Pisati vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 273bafe5a31SPaolo Pisati va_end(ap); 274bafe5a31SPaolo Pisati strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 2759b33b154SJeff Roberson mtx_lock(&event_lock); 276bafe5a31SPaolo Pisati TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 2779b33b154SJeff Roberson mtx_unlock(&event_lock); 278bafe5a31SPaolo Pisati if (event != NULL) 279bafe5a31SPaolo Pisati *event = ie; 280bafe5a31SPaolo Pisati CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 281bafe5a31SPaolo Pisati return (0); 282bafe5a31SPaolo Pisati } 283b4151f71SJohn Baldwin 284eaf86d16SJohn Baldwin /* 285eaf86d16SJohn Baldwin * Bind an interrupt event to the specified CPU. Note that not all 286eaf86d16SJohn Baldwin * platforms support binding an interrupt to a CPU. For those 287eaf86d16SJohn Baldwin * platforms this request will fail. For supported platforms, any 288eaf86d16SJohn Baldwin * associated ithreads as well as the primary interrupt context will 289eaf86d16SJohn Baldwin * be bound to the specificed CPU. Using a cpu id of NOCPU unbinds 290eaf86d16SJohn Baldwin * the interrupt event. 291eaf86d16SJohn Baldwin */ 292eaf86d16SJohn Baldwin int 293eaf86d16SJohn Baldwin intr_event_bind(struct intr_event *ie, u_char cpu) 294eaf86d16SJohn Baldwin { 2959b33b154SJeff Roberson cpuset_t mask; 2969b33b154SJeff Roberson lwpid_t id; 297eaf86d16SJohn Baldwin int error; 298eaf86d16SJohn Baldwin 299eaf86d16SJohn Baldwin /* Need a CPU to bind to. */ 300eaf86d16SJohn Baldwin if (cpu != NOCPU && CPU_ABSENT(cpu)) 301eaf86d16SJohn Baldwin return (EINVAL); 302eaf86d16SJohn Baldwin 303eaf86d16SJohn Baldwin if (ie->ie_assign_cpu == NULL) 304eaf86d16SJohn Baldwin return (EOPNOTSUPP); 305cebc7fb1SJohn Baldwin 306cebc7fb1SJohn Baldwin error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR); 307cebc7fb1SJohn Baldwin if (error) 308cebc7fb1SJohn Baldwin return (error); 309cebc7fb1SJohn Baldwin 3109b33b154SJeff Roberson /* 311cebc7fb1SJohn Baldwin * If we have any ithreads try to set their mask first to verify 312cebc7fb1SJohn Baldwin * permissions, etc. 3139b33b154SJeff Roberson */ 314eaf86d16SJohn Baldwin mtx_lock(&ie->ie_lock); 3159b33b154SJeff Roberson if (ie->ie_thread != NULL) { 3169b33b154SJeff Roberson CPU_ZERO(&mask); 3179b33b154SJeff Roberson if (cpu == NOCPU) 3189b33b154SJeff Roberson CPU_COPY(cpuset_root, &mask); 3199b33b154SJeff Roberson else 3209b33b154SJeff Roberson CPU_SET(cpu, &mask); 3219b33b154SJeff Roberson id = ie->ie_thread->it_thread->td_tid; 322eaf86d16SJohn Baldwin mtx_unlock(&ie->ie_lock); 3239b33b154SJeff Roberson error = cpuset_setthread(id, &mask); 3249b33b154SJeff Roberson if (error) 3259b33b154SJeff Roberson return (error); 3269b33b154SJeff Roberson } else 327eaf86d16SJohn Baldwin mtx_unlock(&ie->ie_lock); 328eaf86d16SJohn Baldwin error = ie->ie_assign_cpu(ie->ie_source, cpu); 329cebc7fb1SJohn Baldwin if (error) { 330cebc7fb1SJohn Baldwin mtx_lock(&ie->ie_lock); 331cebc7fb1SJohn Baldwin if (ie->ie_thread != NULL) { 332cebc7fb1SJohn Baldwin CPU_ZERO(&mask); 333cebc7fb1SJohn Baldwin if (ie->ie_cpu == NOCPU) 334cebc7fb1SJohn Baldwin CPU_COPY(cpuset_root, &mask); 335cebc7fb1SJohn Baldwin else 336cebc7fb1SJohn Baldwin CPU_SET(cpu, &mask); 337cebc7fb1SJohn Baldwin id = ie->ie_thread->it_thread->td_tid; 338cebc7fb1SJohn Baldwin mtx_unlock(&ie->ie_lock); 339cebc7fb1SJohn Baldwin (void)cpuset_setthread(id, &mask); 340cebc7fb1SJohn Baldwin } else 341cebc7fb1SJohn Baldwin mtx_unlock(&ie->ie_lock); 342eaf86d16SJohn Baldwin return (error); 343cebc7fb1SJohn Baldwin } 344cebc7fb1SJohn Baldwin 345eaf86d16SJohn Baldwin mtx_lock(&ie->ie_lock); 346eaf86d16SJohn Baldwin ie->ie_cpu = cpu; 3479b33b154SJeff Roberson mtx_unlock(&ie->ie_lock); 3489b33b154SJeff Roberson 3499b33b154SJeff Roberson return (error); 3509b33b154SJeff Roberson } 3519b33b154SJeff Roberson 3529b33b154SJeff Roberson static struct intr_event * 3539b33b154SJeff Roberson intr_lookup(int irq) 3549b33b154SJeff Roberson { 3559b33b154SJeff Roberson struct intr_event *ie; 3569b33b154SJeff Roberson 3579b33b154SJeff Roberson mtx_lock(&event_lock); 3589b33b154SJeff Roberson TAILQ_FOREACH(ie, &event_list, ie_list) 3599b33b154SJeff Roberson if (ie->ie_irq == irq && 3609b33b154SJeff Roberson (ie->ie_flags & IE_SOFT) == 0 && 3619b33b154SJeff Roberson TAILQ_FIRST(&ie->ie_handlers) != NULL) 3629b33b154SJeff Roberson break; 3639b33b154SJeff Roberson mtx_unlock(&event_lock); 3649b33b154SJeff Roberson return (ie); 3659b33b154SJeff Roberson } 3669b33b154SJeff Roberson 3679b33b154SJeff Roberson int 3689b33b154SJeff Roberson intr_setaffinity(int irq, void *m) 3699b33b154SJeff Roberson { 3709b33b154SJeff Roberson struct intr_event *ie; 3719b33b154SJeff Roberson cpuset_t *mask; 3729b33b154SJeff Roberson u_char cpu; 3739b33b154SJeff Roberson int n; 3749b33b154SJeff Roberson 3759b33b154SJeff Roberson mask = m; 3769b33b154SJeff Roberson cpu = NOCPU; 3779b33b154SJeff Roberson /* 3789b33b154SJeff Roberson * If we're setting all cpus we can unbind. Otherwise make sure 3799b33b154SJeff Roberson * only one cpu is in the set. 3809b33b154SJeff Roberson */ 3819b33b154SJeff Roberson if (CPU_CMP(cpuset_root, mask)) { 3829b33b154SJeff Roberson for (n = 0; n < CPU_SETSIZE; n++) { 3839b33b154SJeff Roberson if (!CPU_ISSET(n, mask)) 3849b33b154SJeff Roberson continue; 3859b33b154SJeff Roberson if (cpu != NOCPU) 3869b33b154SJeff Roberson return (EINVAL); 3879b33b154SJeff Roberson cpu = (u_char)n; 3889b33b154SJeff Roberson } 3899b33b154SJeff Roberson } 3909b33b154SJeff Roberson ie = intr_lookup(irq); 3919b33b154SJeff Roberson if (ie == NULL) 3929b33b154SJeff Roberson return (ESRCH); 3939bd55acfSJohn Baldwin return (intr_event_bind(ie, cpu)); 3949b33b154SJeff Roberson } 3959b33b154SJeff Roberson 3969b33b154SJeff Roberson int 3979b33b154SJeff Roberson intr_getaffinity(int irq, void *m) 3989b33b154SJeff Roberson { 3999b33b154SJeff Roberson struct intr_event *ie; 4009b33b154SJeff Roberson cpuset_t *mask; 4019b33b154SJeff Roberson 4029b33b154SJeff Roberson mask = m; 4039b33b154SJeff Roberson ie = intr_lookup(irq); 4049b33b154SJeff Roberson if (ie == NULL) 4059b33b154SJeff Roberson return (ESRCH); 4069b33b154SJeff Roberson CPU_ZERO(mask); 4079b33b154SJeff Roberson mtx_lock(&ie->ie_lock); 4089b33b154SJeff Roberson if (ie->ie_cpu == NOCPU) 4099b33b154SJeff Roberson CPU_COPY(cpuset_root, mask); 4109b33b154SJeff Roberson else 4119b33b154SJeff Roberson CPU_SET(ie->ie_cpu, mask); 412eaf86d16SJohn Baldwin mtx_unlock(&ie->ie_lock); 413eaf86d16SJohn Baldwin return (0); 414eaf86d16SJohn Baldwin } 415eaf86d16SJohn Baldwin 416b4151f71SJohn Baldwin int 417e0f66ef8SJohn Baldwin intr_event_destroy(struct intr_event *ie) 418b4151f71SJohn Baldwin { 419b4151f71SJohn Baldwin 4209b33b154SJeff Roberson mtx_lock(&event_lock); 421e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 422e0f66ef8SJohn Baldwin if (!TAILQ_EMPTY(&ie->ie_handlers)) { 423e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 4249b33b154SJeff Roberson mtx_unlock(&event_lock); 425e0f66ef8SJohn Baldwin return (EBUSY); 4264d29cb2dSJohn Baldwin } 427e0f66ef8SJohn Baldwin TAILQ_REMOVE(&event_list, ie, ie_list); 4289477358dSJohn Baldwin #ifndef notyet 4299477358dSJohn Baldwin if (ie->ie_thread != NULL) { 4309477358dSJohn Baldwin ithread_destroy(ie->ie_thread); 4319477358dSJohn Baldwin ie->ie_thread = NULL; 4329477358dSJohn Baldwin } 4339477358dSJohn Baldwin #endif 434e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 4359b33b154SJeff Roberson mtx_unlock(&event_lock); 436e0f66ef8SJohn Baldwin mtx_destroy(&ie->ie_lock); 437e0f66ef8SJohn Baldwin free(ie, M_ITHREAD); 438e0f66ef8SJohn Baldwin return (0); 439e0f66ef8SJohn Baldwin } 440e0f66ef8SJohn Baldwin 441bafe5a31SPaolo Pisati #ifndef INTR_FILTER 442e0f66ef8SJohn Baldwin static struct intr_thread * 443e0f66ef8SJohn Baldwin ithread_create(const char *name) 444e0f66ef8SJohn Baldwin { 445e0f66ef8SJohn Baldwin struct intr_thread *ithd; 446e0f66ef8SJohn Baldwin struct thread *td; 447e0f66ef8SJohn Baldwin int error; 448e0f66ef8SJohn Baldwin 449e0f66ef8SJohn Baldwin ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 450e0f66ef8SJohn Baldwin 4517ab24ea3SJulian Elischer error = kproc_kthread_add(ithread_loop, ithd, &intrproc, 4527ab24ea3SJulian Elischer &td, RFSTOPPED | RFHIGHPID, 4539ef95d01SJulian Elischer 0, "intr", "%s", name); 454e0f66ef8SJohn Baldwin if (error) 4553745c395SJulian Elischer panic("kproc_create() failed with %d", error); 456982d11f8SJeff Roberson thread_lock(td); 457ad1e7d28SJulian Elischer sched_class(td, PRI_ITHD); 458e0f66ef8SJohn Baldwin TD_SET_IWAIT(td); 459982d11f8SJeff Roberson thread_unlock(td); 460e0f66ef8SJohn Baldwin td->td_pflags |= TDP_ITHREAD; 461e0f66ef8SJohn Baldwin ithd->it_thread = td; 462e0f66ef8SJohn Baldwin CTR2(KTR_INTR, "%s: created %s", __func__, name); 463e0f66ef8SJohn Baldwin return (ithd); 464e0f66ef8SJohn Baldwin } 465bafe5a31SPaolo Pisati #else 466bafe5a31SPaolo Pisati static struct intr_thread * 467bafe5a31SPaolo Pisati ithread_create(const char *name, struct intr_handler *ih) 468bafe5a31SPaolo Pisati { 469bafe5a31SPaolo Pisati struct intr_thread *ithd; 470bafe5a31SPaolo Pisati struct thread *td; 471bafe5a31SPaolo Pisati int error; 472bafe5a31SPaolo Pisati 473bafe5a31SPaolo Pisati ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 474bafe5a31SPaolo Pisati 475539976ffSJulian Elischer error = kproc_kthread_add(ithread_loop, ih, &intrproc, 4767ab24ea3SJulian Elischer &td, RFSTOPPED | RFHIGHPID, 4779ef95d01SJulian Elischer 0, "intr", "%s", name); 478bafe5a31SPaolo Pisati if (error) 4793745c395SJulian Elischer panic("kproc_create() failed with %d", error); 480982d11f8SJeff Roberson thread_lock(td); 481bafe5a31SPaolo Pisati sched_class(td, PRI_ITHD); 482bafe5a31SPaolo Pisati TD_SET_IWAIT(td); 483982d11f8SJeff Roberson thread_unlock(td); 484bafe5a31SPaolo Pisati td->td_pflags |= TDP_ITHREAD; 485bafe5a31SPaolo Pisati ithd->it_thread = td; 486bafe5a31SPaolo Pisati CTR2(KTR_INTR, "%s: created %s", __func__, name); 487bafe5a31SPaolo Pisati return (ithd); 488bafe5a31SPaolo Pisati } 489bafe5a31SPaolo Pisati #endif 490e0f66ef8SJohn Baldwin 491e0f66ef8SJohn Baldwin static void 492e0f66ef8SJohn Baldwin ithread_destroy(struct intr_thread *ithread) 493e0f66ef8SJohn Baldwin { 494e0f66ef8SJohn Baldwin struct thread *td; 495e0f66ef8SJohn Baldwin 496bb141be1SScott Long CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name); 497e0f66ef8SJohn Baldwin td = ithread->it_thread; 498982d11f8SJeff Roberson thread_lock(td); 499e0f66ef8SJohn Baldwin ithread->it_flags |= IT_DEAD; 50071fad9fdSJulian Elischer if (TD_AWAITING_INTR(td)) { 50171fad9fdSJulian Elischer TD_CLR_IWAIT(td); 502f0393f06SJeff Roberson sched_add(td, SRQ_INTR); 503b4151f71SJohn Baldwin } 504982d11f8SJeff Roberson thread_unlock(td); 505b4151f71SJohn Baldwin } 506b4151f71SJohn Baldwin 507bafe5a31SPaolo Pisati #ifndef INTR_FILTER 508b4151f71SJohn Baldwin int 509e0f66ef8SJohn Baldwin intr_event_add_handler(struct intr_event *ie, const char *name, 510ef544f63SPaolo Pisati driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 511ef544f63SPaolo Pisati enum intr_type flags, void **cookiep) 512b4151f71SJohn Baldwin { 513e0f66ef8SJohn Baldwin struct intr_handler *ih, *temp_ih; 514e0f66ef8SJohn Baldwin struct intr_thread *it; 515b4151f71SJohn Baldwin 516ef544f63SPaolo Pisati if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 517b4151f71SJohn Baldwin return (EINVAL); 518b4151f71SJohn Baldwin 519e0f66ef8SJohn Baldwin /* Allocate and populate an interrupt handler structure. */ 520e0f66ef8SJohn Baldwin ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 521ef544f63SPaolo Pisati ih->ih_filter = filter; 522b4151f71SJohn Baldwin ih->ih_handler = handler; 523b4151f71SJohn Baldwin ih->ih_argument = arg; 52437b8ef16SJohn Baldwin strlcpy(ih->ih_name, name, sizeof(ih->ih_name)); 525e0f66ef8SJohn Baldwin ih->ih_event = ie; 526b4151f71SJohn Baldwin ih->ih_pri = pri; 527ef544f63SPaolo Pisati if (flags & INTR_EXCL) 528b4151f71SJohn Baldwin ih->ih_flags = IH_EXCLUSIVE; 529b4151f71SJohn Baldwin if (flags & INTR_MPSAFE) 530b4151f71SJohn Baldwin ih->ih_flags |= IH_MPSAFE; 531b4151f71SJohn Baldwin if (flags & INTR_ENTROPY) 532b4151f71SJohn Baldwin ih->ih_flags |= IH_ENTROPY; 533b4151f71SJohn Baldwin 534e0f66ef8SJohn Baldwin /* We can only have one exclusive handler in a event. */ 535e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 536e0f66ef8SJohn Baldwin if (!TAILQ_EMPTY(&ie->ie_handlers)) { 537e0f66ef8SJohn Baldwin if ((flags & INTR_EXCL) || 538e0f66ef8SJohn Baldwin (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 539e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 540b4151f71SJohn Baldwin free(ih, M_ITHREAD); 541b4151f71SJohn Baldwin return (EINVAL); 542b4151f71SJohn Baldwin } 543e0f66ef8SJohn Baldwin } 544e0f66ef8SJohn Baldwin 545e0f66ef8SJohn Baldwin /* Add the new handler to the event in priority order. */ 546e0f66ef8SJohn Baldwin TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 547e0f66ef8SJohn Baldwin if (temp_ih->ih_pri > ih->ih_pri) 548e0f66ef8SJohn Baldwin break; 549e0f66ef8SJohn Baldwin } 550e0f66ef8SJohn Baldwin if (temp_ih == NULL) 551e0f66ef8SJohn Baldwin TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 552e0f66ef8SJohn Baldwin else 553e0f66ef8SJohn Baldwin TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 554e0f66ef8SJohn Baldwin intr_event_update(ie); 555e0f66ef8SJohn Baldwin 556e0f66ef8SJohn Baldwin /* Create a thread if we need one. */ 557ef544f63SPaolo Pisati while (ie->ie_thread == NULL && handler != NULL) { 558e0f66ef8SJohn Baldwin if (ie->ie_flags & IE_ADDING_THREAD) 5590f180a7cSJohn Baldwin msleep(ie, &ie->ie_lock, 0, "ithread", 0); 560e0f66ef8SJohn Baldwin else { 561e0f66ef8SJohn Baldwin ie->ie_flags |= IE_ADDING_THREAD; 562e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 563e0f66ef8SJohn Baldwin it = ithread_create("intr: newborn"); 564e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 565e0f66ef8SJohn Baldwin ie->ie_flags &= ~IE_ADDING_THREAD; 566e0f66ef8SJohn Baldwin ie->ie_thread = it; 567e0f66ef8SJohn Baldwin it->it_event = ie; 568e0f66ef8SJohn Baldwin ithread_update(it); 569e0f66ef8SJohn Baldwin wakeup(ie); 570e0f66ef8SJohn Baldwin } 571e0f66ef8SJohn Baldwin } 572e0f66ef8SJohn Baldwin CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 573e0f66ef8SJohn Baldwin ie->ie_name); 574e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 575e0f66ef8SJohn Baldwin 576e0f66ef8SJohn Baldwin if (cookiep != NULL) 577e0f66ef8SJohn Baldwin *cookiep = ih; 578e0f66ef8SJohn Baldwin return (0); 579e0f66ef8SJohn Baldwin } 580bafe5a31SPaolo Pisati #else 581bafe5a31SPaolo Pisati int 582bafe5a31SPaolo Pisati intr_event_add_handler(struct intr_event *ie, const char *name, 583bafe5a31SPaolo Pisati driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 584bafe5a31SPaolo Pisati enum intr_type flags, void **cookiep) 585bafe5a31SPaolo Pisati { 586bafe5a31SPaolo Pisati struct intr_handler *ih, *temp_ih; 587bafe5a31SPaolo Pisati struct intr_thread *it; 588bafe5a31SPaolo Pisati 589bafe5a31SPaolo Pisati if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 590bafe5a31SPaolo Pisati return (EINVAL); 591bafe5a31SPaolo Pisati 592bafe5a31SPaolo Pisati /* Allocate and populate an interrupt handler structure. */ 593bafe5a31SPaolo Pisati ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 594bafe5a31SPaolo Pisati ih->ih_filter = filter; 595bafe5a31SPaolo Pisati ih->ih_handler = handler; 596bafe5a31SPaolo Pisati ih->ih_argument = arg; 59737b8ef16SJohn Baldwin strlcpy(ih->ih_name, name, sizeof(ih->ih_name)); 598bafe5a31SPaolo Pisati ih->ih_event = ie; 599bafe5a31SPaolo Pisati ih->ih_pri = pri; 600bafe5a31SPaolo Pisati if (flags & INTR_EXCL) 601bafe5a31SPaolo Pisati ih->ih_flags = IH_EXCLUSIVE; 602bafe5a31SPaolo Pisati if (flags & INTR_MPSAFE) 603bafe5a31SPaolo Pisati ih->ih_flags |= IH_MPSAFE; 604bafe5a31SPaolo Pisati if (flags & INTR_ENTROPY) 605bafe5a31SPaolo Pisati ih->ih_flags |= IH_ENTROPY; 606bafe5a31SPaolo Pisati 607bafe5a31SPaolo Pisati /* We can only have one exclusive handler in a event. */ 608bafe5a31SPaolo Pisati mtx_lock(&ie->ie_lock); 609bafe5a31SPaolo Pisati if (!TAILQ_EMPTY(&ie->ie_handlers)) { 610bafe5a31SPaolo Pisati if ((flags & INTR_EXCL) || 611bafe5a31SPaolo Pisati (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 612bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 613bafe5a31SPaolo Pisati free(ih, M_ITHREAD); 614bafe5a31SPaolo Pisati return (EINVAL); 615bafe5a31SPaolo Pisati } 616bafe5a31SPaolo Pisati } 617bafe5a31SPaolo Pisati 618bafe5a31SPaolo Pisati /* Add the new handler to the event in priority order. */ 619bafe5a31SPaolo Pisati TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 620bafe5a31SPaolo Pisati if (temp_ih->ih_pri > ih->ih_pri) 621bafe5a31SPaolo Pisati break; 622bafe5a31SPaolo Pisati } 623bafe5a31SPaolo Pisati if (temp_ih == NULL) 624bafe5a31SPaolo Pisati TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 625bafe5a31SPaolo Pisati else 626bafe5a31SPaolo Pisati TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 627bafe5a31SPaolo Pisati intr_event_update(ie); 628bafe5a31SPaolo Pisati 629bafe5a31SPaolo Pisati /* For filtered handlers, create a private ithread to run on. */ 630bafe5a31SPaolo Pisati if (filter != NULL && handler != NULL) { 631bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 632bafe5a31SPaolo Pisati it = ithread_create("intr: newborn", ih); 633bafe5a31SPaolo Pisati mtx_lock(&ie->ie_lock); 634bafe5a31SPaolo Pisati it->it_event = ie; 635bafe5a31SPaolo Pisati ih->ih_thread = it; 636bafe5a31SPaolo Pisati ithread_update(it); // XXX - do we really need this?!?!? 637bafe5a31SPaolo Pisati } else { /* Create the global per-event thread if we need one. */ 638bafe5a31SPaolo Pisati while (ie->ie_thread == NULL && handler != NULL) { 639bafe5a31SPaolo Pisati if (ie->ie_flags & IE_ADDING_THREAD) 640bafe5a31SPaolo Pisati msleep(ie, &ie->ie_lock, 0, "ithread", 0); 641bafe5a31SPaolo Pisati else { 642bafe5a31SPaolo Pisati ie->ie_flags |= IE_ADDING_THREAD; 643bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 644bafe5a31SPaolo Pisati it = ithread_create("intr: newborn", ih); 645bafe5a31SPaolo Pisati mtx_lock(&ie->ie_lock); 646bafe5a31SPaolo Pisati ie->ie_flags &= ~IE_ADDING_THREAD; 647bafe5a31SPaolo Pisati ie->ie_thread = it; 648bafe5a31SPaolo Pisati it->it_event = ie; 649bafe5a31SPaolo Pisati ithread_update(it); 650bafe5a31SPaolo Pisati wakeup(ie); 651bafe5a31SPaolo Pisati } 652bafe5a31SPaolo Pisati } 653bafe5a31SPaolo Pisati } 654bafe5a31SPaolo Pisati CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 655bafe5a31SPaolo Pisati ie->ie_name); 656bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 657bafe5a31SPaolo Pisati 658bafe5a31SPaolo Pisati if (cookiep != NULL) 659bafe5a31SPaolo Pisati *cookiep = ih; 660bafe5a31SPaolo Pisati return (0); 661bafe5a31SPaolo Pisati } 662bafe5a31SPaolo Pisati #endif 663b4151f71SJohn Baldwin 664c3045318SJohn Baldwin /* 66537b8ef16SJohn Baldwin * Append a description preceded by a ':' to the name of the specified 66637b8ef16SJohn Baldwin * interrupt handler. 66737b8ef16SJohn Baldwin */ 66837b8ef16SJohn Baldwin int 66937b8ef16SJohn Baldwin intr_event_describe_handler(struct intr_event *ie, void *cookie, 67037b8ef16SJohn Baldwin const char *descr) 67137b8ef16SJohn Baldwin { 67237b8ef16SJohn Baldwin struct intr_handler *ih; 67337b8ef16SJohn Baldwin size_t space; 67437b8ef16SJohn Baldwin char *start; 67537b8ef16SJohn Baldwin 67637b8ef16SJohn Baldwin mtx_lock(&ie->ie_lock); 67737b8ef16SJohn Baldwin #ifdef INVARIANTS 67837b8ef16SJohn Baldwin TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 67937b8ef16SJohn Baldwin if (ih == cookie) 68037b8ef16SJohn Baldwin break; 68137b8ef16SJohn Baldwin } 68237b8ef16SJohn Baldwin if (ih == NULL) { 68337b8ef16SJohn Baldwin mtx_unlock(&ie->ie_lock); 684d0c9a291SJohn Baldwin panic("handler %p not found in interrupt event %p", cookie, ie); 68537b8ef16SJohn Baldwin } 68637b8ef16SJohn Baldwin #endif 68737b8ef16SJohn Baldwin ih = cookie; 68837b8ef16SJohn Baldwin 68937b8ef16SJohn Baldwin /* 69037b8ef16SJohn Baldwin * Look for an existing description by checking for an 69137b8ef16SJohn Baldwin * existing ":". This assumes device names do not include 69237b8ef16SJohn Baldwin * colons. If one is found, prepare to insert the new 69337b8ef16SJohn Baldwin * description at that point. If one is not found, find the 69437b8ef16SJohn Baldwin * end of the name to use as the insertion point. 69537b8ef16SJohn Baldwin */ 69637b8ef16SJohn Baldwin start = index(ih->ih_name, ':'); 69737b8ef16SJohn Baldwin if (start == NULL) 69837b8ef16SJohn Baldwin start = index(ih->ih_name, 0); 69937b8ef16SJohn Baldwin 70037b8ef16SJohn Baldwin /* 70137b8ef16SJohn Baldwin * See if there is enough remaining room in the string for the 70237b8ef16SJohn Baldwin * description + ":". The "- 1" leaves room for the trailing 70337b8ef16SJohn Baldwin * '\0'. The "+ 1" accounts for the colon. 70437b8ef16SJohn Baldwin */ 70537b8ef16SJohn Baldwin space = sizeof(ih->ih_name) - (start - ih->ih_name) - 1; 70637b8ef16SJohn Baldwin if (strlen(descr) + 1 > space) { 70737b8ef16SJohn Baldwin mtx_unlock(&ie->ie_lock); 70837b8ef16SJohn Baldwin return (ENOSPC); 70937b8ef16SJohn Baldwin } 71037b8ef16SJohn Baldwin 71137b8ef16SJohn Baldwin /* Append a colon followed by the description. */ 71237b8ef16SJohn Baldwin *start = ':'; 71337b8ef16SJohn Baldwin strcpy(start + 1, descr); 71437b8ef16SJohn Baldwin intr_event_update(ie); 71537b8ef16SJohn Baldwin mtx_unlock(&ie->ie_lock); 71637b8ef16SJohn Baldwin return (0); 71737b8ef16SJohn Baldwin } 71837b8ef16SJohn Baldwin 71937b8ef16SJohn Baldwin /* 720c3045318SJohn Baldwin * Return the ie_source field from the intr_event an intr_handler is 721c3045318SJohn Baldwin * associated with. 722c3045318SJohn Baldwin */ 723c3045318SJohn Baldwin void * 724c3045318SJohn Baldwin intr_handler_source(void *cookie) 725c3045318SJohn Baldwin { 726c3045318SJohn Baldwin struct intr_handler *ih; 727c3045318SJohn Baldwin struct intr_event *ie; 728c3045318SJohn Baldwin 729c3045318SJohn Baldwin ih = (struct intr_handler *)cookie; 730c3045318SJohn Baldwin if (ih == NULL) 731c3045318SJohn Baldwin return (NULL); 732c3045318SJohn Baldwin ie = ih->ih_event; 733c3045318SJohn Baldwin KASSERT(ie != NULL, 734c3045318SJohn Baldwin ("interrupt handler \"%s\" has a NULL interrupt event", 735c3045318SJohn Baldwin ih->ih_name)); 736c3045318SJohn Baldwin return (ie->ie_source); 737c3045318SJohn Baldwin } 738c3045318SJohn Baldwin 739e4cd31ddSJeff Roberson /* 740e4cd31ddSJeff Roberson * Sleep until an ithread finishes executing an interrupt handler. 741e4cd31ddSJeff Roberson * 742e4cd31ddSJeff Roberson * XXX Doesn't currently handle interrupt filters or fast interrupt 743e4cd31ddSJeff Roberson * handlers. This is intended for compatibility with linux drivers 744e4cd31ddSJeff Roberson * only. Do not use in BSD code. 745e4cd31ddSJeff Roberson */ 746e4cd31ddSJeff Roberson void 747e4cd31ddSJeff Roberson _intr_drain(int irq) 748e4cd31ddSJeff Roberson { 749e4cd31ddSJeff Roberson struct intr_event *ie; 750e4cd31ddSJeff Roberson struct intr_thread *ithd; 751e4cd31ddSJeff Roberson struct thread *td; 752e4cd31ddSJeff Roberson 753e4cd31ddSJeff Roberson ie = intr_lookup(irq); 754e4cd31ddSJeff Roberson if (ie == NULL) 755e4cd31ddSJeff Roberson return; 756e4cd31ddSJeff Roberson if (ie->ie_thread == NULL) 757e4cd31ddSJeff Roberson return; 758e4cd31ddSJeff Roberson ithd = ie->ie_thread; 759e4cd31ddSJeff Roberson td = ithd->it_thread; 7605bd186a6SJeff Roberson /* 7615bd186a6SJeff Roberson * We set the flag and wait for it to be cleared to avoid 7625bd186a6SJeff Roberson * long delays with potentially busy interrupt handlers 7635bd186a6SJeff Roberson * were we to only sample TD_AWAITING_INTR() every tick. 7645bd186a6SJeff Roberson */ 765e4cd31ddSJeff Roberson thread_lock(td); 766e4cd31ddSJeff Roberson if (!TD_AWAITING_INTR(td)) { 767e4cd31ddSJeff Roberson ithd->it_flags |= IT_WAIT; 7685bd186a6SJeff Roberson while (ithd->it_flags & IT_WAIT) { 7695bd186a6SJeff Roberson thread_unlock(td); 7705bd186a6SJeff Roberson pause("idrain", 1); 7715bd186a6SJeff Roberson thread_lock(td); 772e4cd31ddSJeff Roberson } 7735bd186a6SJeff Roberson } 7745bd186a6SJeff Roberson thread_unlock(td); 775e4cd31ddSJeff Roberson return; 776e4cd31ddSJeff Roberson } 777e4cd31ddSJeff Roberson 778e4cd31ddSJeff Roberson 779bafe5a31SPaolo Pisati #ifndef INTR_FILTER 780b4151f71SJohn Baldwin int 781e0f66ef8SJohn Baldwin intr_event_remove_handler(void *cookie) 782b4151f71SJohn Baldwin { 783e0f66ef8SJohn Baldwin struct intr_handler *handler = (struct intr_handler *)cookie; 784e0f66ef8SJohn Baldwin struct intr_event *ie; 785b4151f71SJohn Baldwin #ifdef INVARIANTS 786e0f66ef8SJohn Baldwin struct intr_handler *ih; 787e0f66ef8SJohn Baldwin #endif 788e0f66ef8SJohn Baldwin #ifdef notyet 789e0f66ef8SJohn Baldwin int dead; 790b4151f71SJohn Baldwin #endif 791b4151f71SJohn Baldwin 7923e5da754SJohn Baldwin if (handler == NULL) 793b4151f71SJohn Baldwin return (EINVAL); 794e0f66ef8SJohn Baldwin ie = handler->ih_event; 795e0f66ef8SJohn Baldwin KASSERT(ie != NULL, 796e0f66ef8SJohn Baldwin ("interrupt handler \"%s\" has a NULL interrupt event", 7973e5da754SJohn Baldwin handler->ih_name)); 798e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 79991f91617SDavid E. O'Brien CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 800e0f66ef8SJohn Baldwin ie->ie_name); 801b4151f71SJohn Baldwin #ifdef INVARIANTS 802e0f66ef8SJohn Baldwin TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 8033e5da754SJohn Baldwin if (ih == handler) 8043e5da754SJohn Baldwin goto ok; 805e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 806e0f66ef8SJohn Baldwin panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 807e0f66ef8SJohn Baldwin ih->ih_name, ie->ie_name); 8083e5da754SJohn Baldwin ok: 809b4151f71SJohn Baldwin #endif 810de271f01SJohn Baldwin /* 811e0f66ef8SJohn Baldwin * If there is no ithread, then just remove the handler and return. 812e0f66ef8SJohn Baldwin * XXX: Note that an INTR_FAST handler might be running on another 813e0f66ef8SJohn Baldwin * CPU! 814e0f66ef8SJohn Baldwin */ 815e0f66ef8SJohn Baldwin if (ie->ie_thread == NULL) { 816e0f66ef8SJohn Baldwin TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 817e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 818e0f66ef8SJohn Baldwin free(handler, M_ITHREAD); 819e0f66ef8SJohn Baldwin return (0); 820e0f66ef8SJohn Baldwin } 821e0f66ef8SJohn Baldwin 822e0f66ef8SJohn Baldwin /* 823de271f01SJohn Baldwin * If the interrupt thread is already running, then just mark this 824de271f01SJohn Baldwin * handler as being dead and let the ithread do the actual removal. 825288e351bSDon Lewis * 826288e351bSDon Lewis * During a cold boot while cold is set, msleep() does not sleep, 827288e351bSDon Lewis * so we have to remove the handler here rather than letting the 828288e351bSDon Lewis * thread do it. 829de271f01SJohn Baldwin */ 830982d11f8SJeff Roberson thread_lock(ie->ie_thread->it_thread); 831e0f66ef8SJohn Baldwin if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) { 832de271f01SJohn Baldwin handler->ih_flags |= IH_DEAD; 833de271f01SJohn Baldwin 834de271f01SJohn Baldwin /* 835de271f01SJohn Baldwin * Ensure that the thread will process the handler list 836de271f01SJohn Baldwin * again and remove this handler if it has already passed 837de271f01SJohn Baldwin * it on the list. 838de271f01SJohn Baldwin */ 839e0f66ef8SJohn Baldwin ie->ie_thread->it_need = 1; 8404d29cb2dSJohn Baldwin } else 841e0f66ef8SJohn Baldwin TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 842982d11f8SJeff Roberson thread_unlock(ie->ie_thread->it_thread); 843e0f66ef8SJohn Baldwin while (handler->ih_flags & IH_DEAD) 8440f180a7cSJohn Baldwin msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 845e0f66ef8SJohn Baldwin intr_event_update(ie); 846e0f66ef8SJohn Baldwin #ifdef notyet 847e0f66ef8SJohn Baldwin /* 848e0f66ef8SJohn Baldwin * XXX: This could be bad in the case of ppbus(8). Also, I think 849e0f66ef8SJohn Baldwin * this could lead to races of stale data when servicing an 850e0f66ef8SJohn Baldwin * interrupt. 851e0f66ef8SJohn Baldwin */ 852e0f66ef8SJohn Baldwin dead = 1; 853e0f66ef8SJohn Baldwin TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 854e0f66ef8SJohn Baldwin if (!(ih->ih_flags & IH_FAST)) { 855e0f66ef8SJohn Baldwin dead = 0; 856e0f66ef8SJohn Baldwin break; 857e0f66ef8SJohn Baldwin } 858e0f66ef8SJohn Baldwin } 859e0f66ef8SJohn Baldwin if (dead) { 860e0f66ef8SJohn Baldwin ithread_destroy(ie->ie_thread); 861e0f66ef8SJohn Baldwin ie->ie_thread = NULL; 862e0f66ef8SJohn Baldwin } 863e0f66ef8SJohn Baldwin #endif 864e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 865b4151f71SJohn Baldwin free(handler, M_ITHREAD); 866b4151f71SJohn Baldwin return (0); 867b4151f71SJohn Baldwin } 868b4151f71SJohn Baldwin 8691ee1b687SJohn Baldwin static int 870e0f66ef8SJohn Baldwin intr_event_schedule_thread(struct intr_event *ie) 8713e5da754SJohn Baldwin { 872e0f66ef8SJohn Baldwin struct intr_entropy entropy; 873e0f66ef8SJohn Baldwin struct intr_thread *it; 874b40ce416SJulian Elischer struct thread *td; 87504774f23SJulian Elischer struct thread *ctd; 8763e5da754SJohn Baldwin struct proc *p; 8773e5da754SJohn Baldwin 8783e5da754SJohn Baldwin /* 8793e5da754SJohn Baldwin * If no ithread or no handlers, then we have a stray interrupt. 8803e5da754SJohn Baldwin */ 881e0f66ef8SJohn Baldwin if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || 882e0f66ef8SJohn Baldwin ie->ie_thread == NULL) 8833e5da754SJohn Baldwin return (EINVAL); 8843e5da754SJohn Baldwin 88504774f23SJulian Elischer ctd = curthread; 886e0f66ef8SJohn Baldwin it = ie->ie_thread; 887e0f66ef8SJohn Baldwin td = it->it_thread; 8886f40c417SRobert Watson p = td->td_proc; 889e0f66ef8SJohn Baldwin 8903e5da754SJohn Baldwin /* 8913e5da754SJohn Baldwin * If any of the handlers for this ithread claim to be good 8923e5da754SJohn Baldwin * sources of entropy, then gather some. 8933e5da754SJohn Baldwin */ 894e0f66ef8SJohn Baldwin if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 8956f40c417SRobert Watson CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 8967ab24ea3SJulian Elischer p->p_pid, td->td_name); 897e0f66ef8SJohn Baldwin entropy.event = (uintptr_t)ie; 898e0f66ef8SJohn Baldwin entropy.td = ctd; 8993e5da754SJohn Baldwin random_harvest(&entropy, sizeof(entropy), 2, 0, 9003e5da754SJohn Baldwin RANDOM_INTERRUPT); 9013e5da754SJohn Baldwin } 9023e5da754SJohn Baldwin 903e0f66ef8SJohn Baldwin KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 9043e5da754SJohn Baldwin 9053e5da754SJohn Baldwin /* 9063e5da754SJohn Baldwin * Set it_need to tell the thread to keep running if it is already 907982d11f8SJeff Roberson * running. Then, lock the thread and see if we actually need to 908982d11f8SJeff Roberson * put it on the runqueue. 9093e5da754SJohn Baldwin */ 910e0f66ef8SJohn Baldwin it->it_need = 1; 911982d11f8SJeff Roberson thread_lock(td); 91271fad9fdSJulian Elischer if (TD_AWAITING_INTR(td)) { 913e0f66ef8SJohn Baldwin CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 9147ab24ea3SJulian Elischer td->td_name); 91571fad9fdSJulian Elischer TD_CLR_IWAIT(td); 916f0393f06SJeff Roberson sched_add(td, SRQ_INTR); 9173e5da754SJohn Baldwin } else { 918e0f66ef8SJohn Baldwin CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 9197ab24ea3SJulian Elischer __func__, p->p_pid, td->td_name, it->it_need, td->td_state); 9203e5da754SJohn Baldwin } 921982d11f8SJeff Roberson thread_unlock(td); 9223e5da754SJohn Baldwin 9233e5da754SJohn Baldwin return (0); 9243e5da754SJohn Baldwin } 925bafe5a31SPaolo Pisati #else 926bafe5a31SPaolo Pisati int 927bafe5a31SPaolo Pisati intr_event_remove_handler(void *cookie) 928bafe5a31SPaolo Pisati { 929bafe5a31SPaolo Pisati struct intr_handler *handler = (struct intr_handler *)cookie; 930bafe5a31SPaolo Pisati struct intr_event *ie; 931bafe5a31SPaolo Pisati struct intr_thread *it; 932bafe5a31SPaolo Pisati #ifdef INVARIANTS 933bafe5a31SPaolo Pisati struct intr_handler *ih; 934bafe5a31SPaolo Pisati #endif 935bafe5a31SPaolo Pisati #ifdef notyet 936bafe5a31SPaolo Pisati int dead; 937bafe5a31SPaolo Pisati #endif 938bafe5a31SPaolo Pisati 939bafe5a31SPaolo Pisati if (handler == NULL) 940bafe5a31SPaolo Pisati return (EINVAL); 941bafe5a31SPaolo Pisati ie = handler->ih_event; 942bafe5a31SPaolo Pisati KASSERT(ie != NULL, 943bafe5a31SPaolo Pisati ("interrupt handler \"%s\" has a NULL interrupt event", 944bafe5a31SPaolo Pisati handler->ih_name)); 945bafe5a31SPaolo Pisati mtx_lock(&ie->ie_lock); 946bafe5a31SPaolo Pisati CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 947bafe5a31SPaolo Pisati ie->ie_name); 948bafe5a31SPaolo Pisati #ifdef INVARIANTS 949bafe5a31SPaolo Pisati TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 950bafe5a31SPaolo Pisati if (ih == handler) 951bafe5a31SPaolo Pisati goto ok; 952bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 953bafe5a31SPaolo Pisati panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 954bafe5a31SPaolo Pisati ih->ih_name, ie->ie_name); 955bafe5a31SPaolo Pisati ok: 956bafe5a31SPaolo Pisati #endif 957bafe5a31SPaolo Pisati /* 958bafe5a31SPaolo Pisati * If there are no ithreads (per event and per handler), then 959bafe5a31SPaolo Pisati * just remove the handler and return. 960bafe5a31SPaolo Pisati * XXX: Note that an INTR_FAST handler might be running on another CPU! 961bafe5a31SPaolo Pisati */ 962bafe5a31SPaolo Pisati if (ie->ie_thread == NULL && handler->ih_thread == NULL) { 963bafe5a31SPaolo Pisati TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 964bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 965bafe5a31SPaolo Pisati free(handler, M_ITHREAD); 966bafe5a31SPaolo Pisati return (0); 967bafe5a31SPaolo Pisati } 968bafe5a31SPaolo Pisati 969bafe5a31SPaolo Pisati /* Private or global ithread? */ 970bafe5a31SPaolo Pisati it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread; 971bafe5a31SPaolo Pisati /* 972bafe5a31SPaolo Pisati * If the interrupt thread is already running, then just mark this 973bafe5a31SPaolo Pisati * handler as being dead and let the ithread do the actual removal. 974bafe5a31SPaolo Pisati * 975bafe5a31SPaolo Pisati * During a cold boot while cold is set, msleep() does not sleep, 976bafe5a31SPaolo Pisati * so we have to remove the handler here rather than letting the 977bafe5a31SPaolo Pisati * thread do it. 978bafe5a31SPaolo Pisati */ 979982d11f8SJeff Roberson thread_lock(it->it_thread); 980bafe5a31SPaolo Pisati if (!TD_AWAITING_INTR(it->it_thread) && !cold) { 981bafe5a31SPaolo Pisati handler->ih_flags |= IH_DEAD; 982bafe5a31SPaolo Pisati 983bafe5a31SPaolo Pisati /* 984bafe5a31SPaolo Pisati * Ensure that the thread will process the handler list 985bafe5a31SPaolo Pisati * again and remove this handler if it has already passed 986bafe5a31SPaolo Pisati * it on the list. 987bafe5a31SPaolo Pisati */ 988bafe5a31SPaolo Pisati it->it_need = 1; 989bafe5a31SPaolo Pisati } else 990bafe5a31SPaolo Pisati TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 991982d11f8SJeff Roberson thread_unlock(it->it_thread); 992bafe5a31SPaolo Pisati while (handler->ih_flags & IH_DEAD) 993bafe5a31SPaolo Pisati msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 994bafe5a31SPaolo Pisati /* 995bafe5a31SPaolo Pisati * At this point, the handler has been disconnected from the event, 996bafe5a31SPaolo Pisati * so we can kill the private ithread if any. 997bafe5a31SPaolo Pisati */ 998bafe5a31SPaolo Pisati if (handler->ih_thread) { 999bafe5a31SPaolo Pisati ithread_destroy(handler->ih_thread); 1000bafe5a31SPaolo Pisati handler->ih_thread = NULL; 1001bafe5a31SPaolo Pisati } 1002bafe5a31SPaolo Pisati intr_event_update(ie); 1003bafe5a31SPaolo Pisati #ifdef notyet 1004bafe5a31SPaolo Pisati /* 1005bafe5a31SPaolo Pisati * XXX: This could be bad in the case of ppbus(8). Also, I think 1006bafe5a31SPaolo Pisati * this could lead to races of stale data when servicing an 1007bafe5a31SPaolo Pisati * interrupt. 1008bafe5a31SPaolo Pisati */ 1009bafe5a31SPaolo Pisati dead = 1; 1010bafe5a31SPaolo Pisati TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1011bafe5a31SPaolo Pisati if (handler != NULL) { 1012bafe5a31SPaolo Pisati dead = 0; 1013bafe5a31SPaolo Pisati break; 1014bafe5a31SPaolo Pisati } 1015bafe5a31SPaolo Pisati } 1016bafe5a31SPaolo Pisati if (dead) { 1017bafe5a31SPaolo Pisati ithread_destroy(ie->ie_thread); 1018bafe5a31SPaolo Pisati ie->ie_thread = NULL; 1019bafe5a31SPaolo Pisati } 1020bafe5a31SPaolo Pisati #endif 1021bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 1022bafe5a31SPaolo Pisati free(handler, M_ITHREAD); 1023bafe5a31SPaolo Pisati return (0); 1024bafe5a31SPaolo Pisati } 1025bafe5a31SPaolo Pisati 10261ee1b687SJohn Baldwin static int 1027bafe5a31SPaolo Pisati intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it) 1028bafe5a31SPaolo Pisati { 1029bafe5a31SPaolo Pisati struct intr_entropy entropy; 1030bafe5a31SPaolo Pisati struct thread *td; 1031bafe5a31SPaolo Pisati struct thread *ctd; 1032bafe5a31SPaolo Pisati struct proc *p; 1033bafe5a31SPaolo Pisati 1034bafe5a31SPaolo Pisati /* 1035bafe5a31SPaolo Pisati * If no ithread or no handlers, then we have a stray interrupt. 1036bafe5a31SPaolo Pisati */ 1037bafe5a31SPaolo Pisati if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL) 1038bafe5a31SPaolo Pisati return (EINVAL); 1039bafe5a31SPaolo Pisati 1040bafe5a31SPaolo Pisati ctd = curthread; 1041bafe5a31SPaolo Pisati td = it->it_thread; 1042bafe5a31SPaolo Pisati p = td->td_proc; 1043bafe5a31SPaolo Pisati 1044bafe5a31SPaolo Pisati /* 1045bafe5a31SPaolo Pisati * If any of the handlers for this ithread claim to be good 1046bafe5a31SPaolo Pisati * sources of entropy, then gather some. 1047bafe5a31SPaolo Pisati */ 1048bafe5a31SPaolo Pisati if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 1049bafe5a31SPaolo Pisati CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 10507ab24ea3SJulian Elischer p->p_pid, td->td_name); 1051bafe5a31SPaolo Pisati entropy.event = (uintptr_t)ie; 1052bafe5a31SPaolo Pisati entropy.td = ctd; 1053bafe5a31SPaolo Pisati random_harvest(&entropy, sizeof(entropy), 2, 0, 1054bafe5a31SPaolo Pisati RANDOM_INTERRUPT); 1055bafe5a31SPaolo Pisati } 1056bafe5a31SPaolo Pisati 1057bafe5a31SPaolo Pisati KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 1058bafe5a31SPaolo Pisati 1059bafe5a31SPaolo Pisati /* 1060bafe5a31SPaolo Pisati * Set it_need to tell the thread to keep running if it is already 1061982d11f8SJeff Roberson * running. Then, lock the thread and see if we actually need to 1062982d11f8SJeff Roberson * put it on the runqueue. 1063bafe5a31SPaolo Pisati */ 1064bafe5a31SPaolo Pisati it->it_need = 1; 1065982d11f8SJeff Roberson thread_lock(td); 1066bafe5a31SPaolo Pisati if (TD_AWAITING_INTR(td)) { 1067bafe5a31SPaolo Pisati CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 10683c1ffc32SJulian Elischer td->td_name); 1069bafe5a31SPaolo Pisati TD_CLR_IWAIT(td); 1070bafe5a31SPaolo Pisati sched_add(td, SRQ_INTR); 1071bafe5a31SPaolo Pisati } else { 1072bafe5a31SPaolo Pisati CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 10737ab24ea3SJulian Elischer __func__, p->p_pid, td->td_name, it->it_need, td->td_state); 1074bafe5a31SPaolo Pisati } 1075982d11f8SJeff Roberson thread_unlock(td); 1076bafe5a31SPaolo Pisati 1077bafe5a31SPaolo Pisati return (0); 1078bafe5a31SPaolo Pisati } 1079bafe5a31SPaolo Pisati #endif 10803e5da754SJohn Baldwin 1081fe486a37SJohn Baldwin /* 1082e84bcd84SRobert Watson * Allow interrupt event binding for software interrupt handlers -- a no-op, 1083e84bcd84SRobert Watson * since interrupts are generated in software rather than being directed by 1084e84bcd84SRobert Watson * a PIC. 1085e84bcd84SRobert Watson */ 1086e84bcd84SRobert Watson static int 1087e84bcd84SRobert Watson swi_assign_cpu(void *arg, u_char cpu) 1088e84bcd84SRobert Watson { 1089e84bcd84SRobert Watson 1090e84bcd84SRobert Watson return (0); 1091e84bcd84SRobert Watson } 1092e84bcd84SRobert Watson 1093e84bcd84SRobert Watson /* 1094fe486a37SJohn Baldwin * Add a software interrupt handler to a specified event. If a given event 1095fe486a37SJohn Baldwin * is not specified, then a new event is created. 1096fe486a37SJohn Baldwin */ 10973e5da754SJohn Baldwin int 1098e0f66ef8SJohn Baldwin swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, 1099b4151f71SJohn Baldwin void *arg, int pri, enum intr_type flags, void **cookiep) 11008088699fSJohn Baldwin { 11011b9d701fSAttilio Rao struct thread *td; 1102e0f66ef8SJohn Baldwin struct intr_event *ie; 1103b4151f71SJohn Baldwin int error; 11048088699fSJohn Baldwin 1105bafe5a31SPaolo Pisati if (flags & INTR_ENTROPY) 11063e5da754SJohn Baldwin return (EINVAL); 11073e5da754SJohn Baldwin 1108e0f66ef8SJohn Baldwin ie = (eventp != NULL) ? *eventp : NULL; 11098088699fSJohn Baldwin 1110e0f66ef8SJohn Baldwin if (ie != NULL) { 1111e0f66ef8SJohn Baldwin if (!(ie->ie_flags & IE_SOFT)) 11123e5da754SJohn Baldwin return (EINVAL); 11133e5da754SJohn Baldwin } else { 11149b33b154SJeff Roberson error = intr_event_create(&ie, NULL, IE_SOFT, 0, 1115e84bcd84SRobert Watson NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri); 11168088699fSJohn Baldwin if (error) 1117b4151f71SJohn Baldwin return (error); 1118e0f66ef8SJohn Baldwin if (eventp != NULL) 1119e0f66ef8SJohn Baldwin *eventp = ie; 11208088699fSJohn Baldwin } 11218d809d50SJeff Roberson error = intr_event_add_handler(ie, name, NULL, handler, arg, 1122d3305205SJohn Baldwin PI_SWI(pri), flags, cookiep); 11238d809d50SJeff Roberson if (error) 11248d809d50SJeff Roberson return (error); 11258d809d50SJeff Roberson if (pri == SWI_CLOCK) { 11261b9d701fSAttilio Rao td = ie->ie_thread->it_thread; 11271b9d701fSAttilio Rao thread_lock(td); 11281b9d701fSAttilio Rao td->td_flags |= TDF_NOLOAD; 11291b9d701fSAttilio Rao thread_unlock(td); 11308d809d50SJeff Roberson } 11318d809d50SJeff Roberson return (0); 11328088699fSJohn Baldwin } 11338088699fSJohn Baldwin 11341931cf94SJohn Baldwin /* 1135e0f66ef8SJohn Baldwin * Schedule a software interrupt thread. 11361931cf94SJohn Baldwin */ 11371931cf94SJohn Baldwin void 1138b4151f71SJohn Baldwin swi_sched(void *cookie, int flags) 11391931cf94SJohn Baldwin { 1140e0f66ef8SJohn Baldwin struct intr_handler *ih = (struct intr_handler *)cookie; 1141e0f66ef8SJohn Baldwin struct intr_event *ie = ih->ih_event; 11423e5da754SJohn Baldwin int error; 11438088699fSJohn Baldwin 1144e0f66ef8SJohn Baldwin CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, 1145e0f66ef8SJohn Baldwin ih->ih_need); 11461931cf94SJohn Baldwin 11471931cf94SJohn Baldwin /* 11483e5da754SJohn Baldwin * Set ih_need for this handler so that if the ithread is already 11493e5da754SJohn Baldwin * running it will execute this handler on the next pass. Otherwise, 11503e5da754SJohn Baldwin * it will execute it the next time it runs. 11511931cf94SJohn Baldwin */ 1152b4151f71SJohn Baldwin atomic_store_rel_int(&ih->ih_need, 1); 11531ca2c018SBruce Evans 1154b4151f71SJohn Baldwin if (!(flags & SWI_DELAY)) { 115567596082SAttilio Rao PCPU_INC(cnt.v_soft); 1156bafe5a31SPaolo Pisati #ifdef INTR_FILTER 1157bafe5a31SPaolo Pisati error = intr_event_schedule_thread(ie, ie->ie_thread); 1158bafe5a31SPaolo Pisati #else 1159e0f66ef8SJohn Baldwin error = intr_event_schedule_thread(ie); 1160bafe5a31SPaolo Pisati #endif 11613e5da754SJohn Baldwin KASSERT(error == 0, ("stray software interrupt")); 11628088699fSJohn Baldwin } 11638088699fSJohn Baldwin } 11648088699fSJohn Baldwin 1165fe486a37SJohn Baldwin /* 1166fe486a37SJohn Baldwin * Remove a software interrupt handler. Currently this code does not 1167fe486a37SJohn Baldwin * remove the associated interrupt event if it becomes empty. Calling code 1168fe486a37SJohn Baldwin * may do so manually via intr_event_destroy(), but that's not really 1169fe486a37SJohn Baldwin * an optimal interface. 1170fe486a37SJohn Baldwin */ 1171fe486a37SJohn Baldwin int 1172fe486a37SJohn Baldwin swi_remove(void *cookie) 1173fe486a37SJohn Baldwin { 1174fe486a37SJohn Baldwin 1175fe486a37SJohn Baldwin return (intr_event_remove_handler(cookie)); 1176fe486a37SJohn Baldwin } 1177fe486a37SJohn Baldwin 1178bafe5a31SPaolo Pisati #ifdef INTR_FILTER 1179bafe5a31SPaolo Pisati static void 1180bafe5a31SPaolo Pisati priv_ithread_execute_handler(struct proc *p, struct intr_handler *ih) 1181bafe5a31SPaolo Pisati { 1182bafe5a31SPaolo Pisati struct intr_event *ie; 1183bafe5a31SPaolo Pisati 1184bafe5a31SPaolo Pisati ie = ih->ih_event; 1185bafe5a31SPaolo Pisati /* 1186bafe5a31SPaolo Pisati * If this handler is marked for death, remove it from 1187bafe5a31SPaolo Pisati * the list of handlers and wake up the sleeper. 1188bafe5a31SPaolo Pisati */ 1189bafe5a31SPaolo Pisati if (ih->ih_flags & IH_DEAD) { 1190bafe5a31SPaolo Pisati mtx_lock(&ie->ie_lock); 1191bafe5a31SPaolo Pisati TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 1192bafe5a31SPaolo Pisati ih->ih_flags &= ~IH_DEAD; 1193bafe5a31SPaolo Pisati wakeup(ih); 1194bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 1195bafe5a31SPaolo Pisati return; 1196bafe5a31SPaolo Pisati } 1197bafe5a31SPaolo Pisati 1198bafe5a31SPaolo Pisati /* Execute this handler. */ 1199bafe5a31SPaolo Pisati CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1200bafe5a31SPaolo Pisati __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument, 1201bafe5a31SPaolo Pisati ih->ih_name, ih->ih_flags); 1202bafe5a31SPaolo Pisati 1203bafe5a31SPaolo Pisati if (!(ih->ih_flags & IH_MPSAFE)) 1204bafe5a31SPaolo Pisati mtx_lock(&Giant); 1205bafe5a31SPaolo Pisati ih->ih_handler(ih->ih_argument); 1206bafe5a31SPaolo Pisati if (!(ih->ih_flags & IH_MPSAFE)) 1207bafe5a31SPaolo Pisati mtx_unlock(&Giant); 1208bafe5a31SPaolo Pisati } 1209bafe5a31SPaolo Pisati #endif 1210bafe5a31SPaolo Pisati 121137e9511fSJohn Baldwin /* 121237e9511fSJohn Baldwin * This is a public function for use by drivers that mux interrupt 121337e9511fSJohn Baldwin * handlers for child devices from their interrupt handler. 121437e9511fSJohn Baldwin */ 121537e9511fSJohn Baldwin void 121637e9511fSJohn Baldwin intr_event_execute_handlers(struct proc *p, struct intr_event *ie) 1217e0f66ef8SJohn Baldwin { 1218e0f66ef8SJohn Baldwin struct intr_handler *ih, *ihn; 1219e0f66ef8SJohn Baldwin 1220e0f66ef8SJohn Baldwin TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) { 1221e0f66ef8SJohn Baldwin /* 1222e0f66ef8SJohn Baldwin * If this handler is marked for death, remove it from 1223e0f66ef8SJohn Baldwin * the list of handlers and wake up the sleeper. 1224e0f66ef8SJohn Baldwin */ 1225e0f66ef8SJohn Baldwin if (ih->ih_flags & IH_DEAD) { 1226e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 1227e0f66ef8SJohn Baldwin TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 1228e0f66ef8SJohn Baldwin ih->ih_flags &= ~IH_DEAD; 1229e0f66ef8SJohn Baldwin wakeup(ih); 1230e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 1231e0f66ef8SJohn Baldwin continue; 1232e0f66ef8SJohn Baldwin } 1233e0f66ef8SJohn Baldwin 1234f2d619c8SPaolo Pisati /* Skip filter only handlers */ 1235f2d619c8SPaolo Pisati if (ih->ih_handler == NULL) 1236f2d619c8SPaolo Pisati continue; 1237f2d619c8SPaolo Pisati 1238e0f66ef8SJohn Baldwin /* 1239e0f66ef8SJohn Baldwin * For software interrupt threads, we only execute 1240e0f66ef8SJohn Baldwin * handlers that have their need flag set. Hardware 1241e0f66ef8SJohn Baldwin * interrupt threads always invoke all of their handlers. 1242e0f66ef8SJohn Baldwin */ 1243e0f66ef8SJohn Baldwin if (ie->ie_flags & IE_SOFT) { 1244e0f66ef8SJohn Baldwin if (!ih->ih_need) 1245e0f66ef8SJohn Baldwin continue; 1246e0f66ef8SJohn Baldwin else 1247e0f66ef8SJohn Baldwin atomic_store_rel_int(&ih->ih_need, 0); 1248e0f66ef8SJohn Baldwin } 1249e0f66ef8SJohn Baldwin 1250e0f66ef8SJohn Baldwin /* Execute this handler. */ 1251e0f66ef8SJohn Baldwin CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1252bafe5a31SPaolo Pisati __func__, p->p_pid, (void *)ih->ih_handler, 1253bafe5a31SPaolo Pisati ih->ih_argument, ih->ih_name, ih->ih_flags); 1254e0f66ef8SJohn Baldwin 1255e0f66ef8SJohn Baldwin if (!(ih->ih_flags & IH_MPSAFE)) 1256e0f66ef8SJohn Baldwin mtx_lock(&Giant); 1257e0f66ef8SJohn Baldwin ih->ih_handler(ih->ih_argument); 1258e0f66ef8SJohn Baldwin if (!(ih->ih_flags & IH_MPSAFE)) 1259e0f66ef8SJohn Baldwin mtx_unlock(&Giant); 1260e0f66ef8SJohn Baldwin } 126137e9511fSJohn Baldwin } 126237e9511fSJohn Baldwin 126337e9511fSJohn Baldwin static void 126437e9511fSJohn Baldwin ithread_execute_handlers(struct proc *p, struct intr_event *ie) 126537e9511fSJohn Baldwin { 126637e9511fSJohn Baldwin 126737e9511fSJohn Baldwin /* Interrupt handlers should not sleep. */ 126837e9511fSJohn Baldwin if (!(ie->ie_flags & IE_SOFT)) 126937e9511fSJohn Baldwin THREAD_NO_SLEEPING(); 127037e9511fSJohn Baldwin intr_event_execute_handlers(p, ie); 1271e0f66ef8SJohn Baldwin if (!(ie->ie_flags & IE_SOFT)) 1272e0f66ef8SJohn Baldwin THREAD_SLEEPING_OK(); 1273e0f66ef8SJohn Baldwin 1274e0f66ef8SJohn Baldwin /* 1275e0f66ef8SJohn Baldwin * Interrupt storm handling: 1276e0f66ef8SJohn Baldwin * 1277e0f66ef8SJohn Baldwin * If this interrupt source is currently storming, then throttle 1278e0f66ef8SJohn Baldwin * it to only fire the handler once per clock tick. 1279e0f66ef8SJohn Baldwin * 1280e0f66ef8SJohn Baldwin * If this interrupt source is not currently storming, but the 1281e0f66ef8SJohn Baldwin * number of back to back interrupts exceeds the storm threshold, 1282e0f66ef8SJohn Baldwin * then enter storming mode. 1283e0f66ef8SJohn Baldwin */ 1284e41bcf3cSJohn Baldwin if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold && 1285e41bcf3cSJohn Baldwin !(ie->ie_flags & IE_SOFT)) { 12860ae62c18SNate Lawson /* Report the message only once every second. */ 12870ae62c18SNate Lawson if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) { 1288e0f66ef8SJohn Baldwin printf( 12890ae62c18SNate Lawson "interrupt storm detected on \"%s\"; throttling interrupt source\n", 1290e0f66ef8SJohn Baldwin ie->ie_name); 1291e0f66ef8SJohn Baldwin } 1292e41bcf3cSJohn Baldwin pause("istorm", 1); 1293e0f66ef8SJohn Baldwin } else 1294e0f66ef8SJohn Baldwin ie->ie_count++; 1295e0f66ef8SJohn Baldwin 1296e0f66ef8SJohn Baldwin /* 1297e0f66ef8SJohn Baldwin * Now that all the handlers have had a chance to run, reenable 1298e0f66ef8SJohn Baldwin * the interrupt source. 1299e0f66ef8SJohn Baldwin */ 13001ee1b687SJohn Baldwin if (ie->ie_post_ithread != NULL) 13011ee1b687SJohn Baldwin ie->ie_post_ithread(ie->ie_source); 1302e0f66ef8SJohn Baldwin } 1303e0f66ef8SJohn Baldwin 1304bafe5a31SPaolo Pisati #ifndef INTR_FILTER 13058088699fSJohn Baldwin /* 1306b4151f71SJohn Baldwin * This is the main code for interrupt threads. 13078088699fSJohn Baldwin */ 130837c84183SPoul-Henning Kamp static void 1309b4151f71SJohn Baldwin ithread_loop(void *arg) 13108088699fSJohn Baldwin { 1311e0f66ef8SJohn Baldwin struct intr_thread *ithd; 1312e0f66ef8SJohn Baldwin struct intr_event *ie; 1313b40ce416SJulian Elischer struct thread *td; 1314b4151f71SJohn Baldwin struct proc *p; 1315e4cd31ddSJeff Roberson int wake; 13168088699fSJohn Baldwin 1317b40ce416SJulian Elischer td = curthread; 1318b40ce416SJulian Elischer p = td->td_proc; 1319e0f66ef8SJohn Baldwin ithd = (struct intr_thread *)arg; 1320e0f66ef8SJohn Baldwin KASSERT(ithd->it_thread == td, 132191f91617SDavid E. O'Brien ("%s: ithread and proc linkage out of sync", __func__)); 1322e0f66ef8SJohn Baldwin ie = ithd->it_event; 1323e0f66ef8SJohn Baldwin ie->ie_count = 0; 1324e4cd31ddSJeff Roberson wake = 0; 13258088699fSJohn Baldwin 13268088699fSJohn Baldwin /* 13278088699fSJohn Baldwin * As long as we have interrupts outstanding, go through the 13288088699fSJohn Baldwin * list of handlers, giving each one a go at it. 13298088699fSJohn Baldwin */ 13308088699fSJohn Baldwin for (;;) { 1331b4151f71SJohn Baldwin /* 1332b4151f71SJohn Baldwin * If we are an orphaned thread, then just die. 1333b4151f71SJohn Baldwin */ 1334b4151f71SJohn Baldwin if (ithd->it_flags & IT_DEAD) { 1335e0f66ef8SJohn Baldwin CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 13367ab24ea3SJulian Elischer p->p_pid, td->td_name); 1337b4151f71SJohn Baldwin free(ithd, M_ITHREAD); 1338ca9a0ddfSJulian Elischer kthread_exit(); 1339b4151f71SJohn Baldwin } 1340b4151f71SJohn Baldwin 1341e0f66ef8SJohn Baldwin /* 1342e0f66ef8SJohn Baldwin * Service interrupts. If another interrupt arrives while 1343e0f66ef8SJohn Baldwin * we are running, it will set it_need to note that we 1344e0f66ef8SJohn Baldwin * should make another pass. 1345e0f66ef8SJohn Baldwin */ 1346b4151f71SJohn Baldwin while (ithd->it_need) { 13478088699fSJohn Baldwin /* 1348e0f66ef8SJohn Baldwin * This might need a full read and write barrier 1349e0f66ef8SJohn Baldwin * to make sure that this write posts before any 1350e0f66ef8SJohn Baldwin * of the memory or device accesses in the 1351e0f66ef8SJohn Baldwin * handlers. 13528088699fSJohn Baldwin */ 1353b4151f71SJohn Baldwin atomic_store_rel_int(&ithd->it_need, 0); 1354e0f66ef8SJohn Baldwin ithread_execute_handlers(p, ie); 13558088699fSJohn Baldwin } 13567870c3c6SJohn Baldwin WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 13577870c3c6SJohn Baldwin mtx_assert(&Giant, MA_NOTOWNED); 13588088699fSJohn Baldwin 13598088699fSJohn Baldwin /* 13608088699fSJohn Baldwin * Processed all our interrupts. Now get the sched 13618088699fSJohn Baldwin * lock. This may take a while and it_need may get 13628088699fSJohn Baldwin * set again, so we have to check it again. 13638088699fSJohn Baldwin */ 1364982d11f8SJeff Roberson thread_lock(td); 1365e4cd31ddSJeff Roberson if (!ithd->it_need && !(ithd->it_flags & (IT_DEAD | IT_WAIT))) { 13667870c3c6SJohn Baldwin TD_SET_IWAIT(td); 1367e0f66ef8SJohn Baldwin ie->ie_count = 0; 13688df78c41SJeff Roberson mi_switch(SW_VOL | SWT_IWAIT, NULL); 13698088699fSJohn Baldwin } 1370e4cd31ddSJeff Roberson if (ithd->it_flags & IT_WAIT) { 1371e4cd31ddSJeff Roberson wake = 1; 1372e4cd31ddSJeff Roberson ithd->it_flags &= ~IT_WAIT; 1373e4cd31ddSJeff Roberson } 1374982d11f8SJeff Roberson thread_unlock(td); 1375e4cd31ddSJeff Roberson if (wake) { 1376e4cd31ddSJeff Roberson wakeup(ithd); 1377e4cd31ddSJeff Roberson wake = 0; 1378e4cd31ddSJeff Roberson } 13798088699fSJohn Baldwin } 13801931cf94SJohn Baldwin } 13811ee1b687SJohn Baldwin 13821ee1b687SJohn Baldwin /* 13831ee1b687SJohn Baldwin * Main interrupt handling body. 13841ee1b687SJohn Baldwin * 13851ee1b687SJohn Baldwin * Input: 13861ee1b687SJohn Baldwin * o ie: the event connected to this interrupt. 13871ee1b687SJohn Baldwin * o frame: some archs (i.e. i386) pass a frame to some. 13881ee1b687SJohn Baldwin * handlers as their main argument. 13891ee1b687SJohn Baldwin * Return value: 13901ee1b687SJohn Baldwin * o 0: everything ok. 13911ee1b687SJohn Baldwin * o EINVAL: stray interrupt. 13921ee1b687SJohn Baldwin */ 13931ee1b687SJohn Baldwin int 13941ee1b687SJohn Baldwin intr_event_handle(struct intr_event *ie, struct trapframe *frame) 13951ee1b687SJohn Baldwin { 13961ee1b687SJohn Baldwin struct intr_handler *ih; 13971f255bd3SAlexander Motin struct trapframe *oldframe; 13981ee1b687SJohn Baldwin struct thread *td; 13991ee1b687SJohn Baldwin int error, ret, thread; 14001ee1b687SJohn Baldwin 14011ee1b687SJohn Baldwin td = curthread; 14021ee1b687SJohn Baldwin 14031ee1b687SJohn Baldwin /* An interrupt with no event or handlers is a stray interrupt. */ 14041ee1b687SJohn Baldwin if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) 14051ee1b687SJohn Baldwin return (EINVAL); 14061ee1b687SJohn Baldwin 14071ee1b687SJohn Baldwin /* 14081ee1b687SJohn Baldwin * Execute fast interrupt handlers directly. 14091ee1b687SJohn Baldwin * To support clock handlers, if a handler registers 14101ee1b687SJohn Baldwin * with a NULL argument, then we pass it a pointer to 14111ee1b687SJohn Baldwin * a trapframe as its argument. 14121ee1b687SJohn Baldwin */ 14131ee1b687SJohn Baldwin td->td_intr_nesting_level++; 14141ee1b687SJohn Baldwin thread = 0; 14151ee1b687SJohn Baldwin ret = 0; 14161ee1b687SJohn Baldwin critical_enter(); 14171f255bd3SAlexander Motin oldframe = td->td_intr_frame; 14181f255bd3SAlexander Motin td->td_intr_frame = frame; 14191ee1b687SJohn Baldwin TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 14201ee1b687SJohn Baldwin if (ih->ih_filter == NULL) { 14211ee1b687SJohn Baldwin thread = 1; 14221ee1b687SJohn Baldwin continue; 14231ee1b687SJohn Baldwin } 14241ee1b687SJohn Baldwin CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__, 14251ee1b687SJohn Baldwin ih->ih_filter, ih->ih_argument == NULL ? frame : 14261ee1b687SJohn Baldwin ih->ih_argument, ih->ih_name); 14271ee1b687SJohn Baldwin if (ih->ih_argument == NULL) 14281ee1b687SJohn Baldwin ret = ih->ih_filter(frame); 14291ee1b687SJohn Baldwin else 14301ee1b687SJohn Baldwin ret = ih->ih_filter(ih->ih_argument); 143189fc20ccSAndriy Gapon KASSERT(ret == FILTER_STRAY || 143289fc20ccSAndriy Gapon ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 && 143389fc20ccSAndriy Gapon (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0), 143489fc20ccSAndriy Gapon ("%s: incorrect return value %#x from %s", __func__, ret, 143589fc20ccSAndriy Gapon ih->ih_name)); 143689fc20ccSAndriy Gapon 14371ee1b687SJohn Baldwin /* 14381ee1b687SJohn Baldwin * Wrapper handler special handling: 14391ee1b687SJohn Baldwin * 14401ee1b687SJohn Baldwin * in some particular cases (like pccard and pccbb), 14411ee1b687SJohn Baldwin * the _real_ device handler is wrapped in a couple of 14421ee1b687SJohn Baldwin * functions - a filter wrapper and an ithread wrapper. 14431ee1b687SJohn Baldwin * In this case (and just in this case), the filter wrapper 14441ee1b687SJohn Baldwin * could ask the system to schedule the ithread and mask 14451ee1b687SJohn Baldwin * the interrupt source if the wrapped handler is composed 14461ee1b687SJohn Baldwin * of just an ithread handler. 14471ee1b687SJohn Baldwin * 14481ee1b687SJohn Baldwin * TODO: write a generic wrapper to avoid people rolling 14491ee1b687SJohn Baldwin * their own 14501ee1b687SJohn Baldwin */ 14511ee1b687SJohn Baldwin if (!thread) { 14521ee1b687SJohn Baldwin if (ret == FILTER_SCHEDULE_THREAD) 14531ee1b687SJohn Baldwin thread = 1; 14541ee1b687SJohn Baldwin } 14551ee1b687SJohn Baldwin } 14561f255bd3SAlexander Motin td->td_intr_frame = oldframe; 14571ee1b687SJohn Baldwin 14581ee1b687SJohn Baldwin if (thread) { 14591ee1b687SJohn Baldwin if (ie->ie_pre_ithread != NULL) 14601ee1b687SJohn Baldwin ie->ie_pre_ithread(ie->ie_source); 14611ee1b687SJohn Baldwin } else { 14621ee1b687SJohn Baldwin if (ie->ie_post_filter != NULL) 14631ee1b687SJohn Baldwin ie->ie_post_filter(ie->ie_source); 14641ee1b687SJohn Baldwin } 14651ee1b687SJohn Baldwin 14661ee1b687SJohn Baldwin /* Schedule the ithread if needed. */ 14671ee1b687SJohn Baldwin if (thread) { 14681ee1b687SJohn Baldwin error = intr_event_schedule_thread(ie); 14696205924aSKip Macy #ifndef XEN 14701ee1b687SJohn Baldwin KASSERT(error == 0, ("bad stray interrupt")); 14716205924aSKip Macy #else 14726205924aSKip Macy if (error != 0) 14736205924aSKip Macy log(LOG_WARNING, "bad stray interrupt"); 14746205924aSKip Macy #endif 14751ee1b687SJohn Baldwin } 14761ee1b687SJohn Baldwin critical_exit(); 14771ee1b687SJohn Baldwin td->td_intr_nesting_level--; 14781ee1b687SJohn Baldwin return (0); 14791ee1b687SJohn Baldwin } 1480bafe5a31SPaolo Pisati #else 1481bafe5a31SPaolo Pisati /* 1482bafe5a31SPaolo Pisati * This is the main code for interrupt threads. 1483bafe5a31SPaolo Pisati */ 1484bafe5a31SPaolo Pisati static void 1485bafe5a31SPaolo Pisati ithread_loop(void *arg) 1486bafe5a31SPaolo Pisati { 1487bafe5a31SPaolo Pisati struct intr_thread *ithd; 1488bafe5a31SPaolo Pisati struct intr_handler *ih; 1489bafe5a31SPaolo Pisati struct intr_event *ie; 1490bafe5a31SPaolo Pisati struct thread *td; 1491bafe5a31SPaolo Pisati struct proc *p; 1492bafe5a31SPaolo Pisati int priv; 1493e4cd31ddSJeff Roberson int wake; 1494bafe5a31SPaolo Pisati 1495bafe5a31SPaolo Pisati td = curthread; 1496bafe5a31SPaolo Pisati p = td->td_proc; 1497bafe5a31SPaolo Pisati ih = (struct intr_handler *)arg; 1498bafe5a31SPaolo Pisati priv = (ih->ih_thread != NULL) ? 1 : 0; 1499bafe5a31SPaolo Pisati ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread; 1500bafe5a31SPaolo Pisati KASSERT(ithd->it_thread == td, 1501bafe5a31SPaolo Pisati ("%s: ithread and proc linkage out of sync", __func__)); 1502bafe5a31SPaolo Pisati ie = ithd->it_event; 1503bafe5a31SPaolo Pisati ie->ie_count = 0; 1504e4cd31ddSJeff Roberson wake = 0; 1505bafe5a31SPaolo Pisati 1506bafe5a31SPaolo Pisati /* 1507bafe5a31SPaolo Pisati * As long as we have interrupts outstanding, go through the 1508bafe5a31SPaolo Pisati * list of handlers, giving each one a go at it. 1509bafe5a31SPaolo Pisati */ 1510bafe5a31SPaolo Pisati for (;;) { 1511bafe5a31SPaolo Pisati /* 1512bafe5a31SPaolo Pisati * If we are an orphaned thread, then just die. 1513bafe5a31SPaolo Pisati */ 1514bafe5a31SPaolo Pisati if (ithd->it_flags & IT_DEAD) { 1515bafe5a31SPaolo Pisati CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 15167ab24ea3SJulian Elischer p->p_pid, td->td_name); 1517bafe5a31SPaolo Pisati free(ithd, M_ITHREAD); 1518ca9a0ddfSJulian Elischer kthread_exit(); 1519bafe5a31SPaolo Pisati } 1520bafe5a31SPaolo Pisati 1521bafe5a31SPaolo Pisati /* 1522bafe5a31SPaolo Pisati * Service interrupts. If another interrupt arrives while 1523bafe5a31SPaolo Pisati * we are running, it will set it_need to note that we 1524bafe5a31SPaolo Pisati * should make another pass. 1525bafe5a31SPaolo Pisati */ 1526bafe5a31SPaolo Pisati while (ithd->it_need) { 1527bafe5a31SPaolo Pisati /* 1528bafe5a31SPaolo Pisati * This might need a full read and write barrier 1529bafe5a31SPaolo Pisati * to make sure that this write posts before any 1530bafe5a31SPaolo Pisati * of the memory or device accesses in the 1531bafe5a31SPaolo Pisati * handlers. 1532bafe5a31SPaolo Pisati */ 1533bafe5a31SPaolo Pisati atomic_store_rel_int(&ithd->it_need, 0); 1534bafe5a31SPaolo Pisati if (priv) 1535bafe5a31SPaolo Pisati priv_ithread_execute_handler(p, ih); 1536bafe5a31SPaolo Pisati else 1537bafe5a31SPaolo Pisati ithread_execute_handlers(p, ie); 1538bafe5a31SPaolo Pisati } 1539bafe5a31SPaolo Pisati WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1540bafe5a31SPaolo Pisati mtx_assert(&Giant, MA_NOTOWNED); 1541bafe5a31SPaolo Pisati 1542bafe5a31SPaolo Pisati /* 1543bafe5a31SPaolo Pisati * Processed all our interrupts. Now get the sched 1544bafe5a31SPaolo Pisati * lock. This may take a while and it_need may get 1545bafe5a31SPaolo Pisati * set again, so we have to check it again. 1546bafe5a31SPaolo Pisati */ 1547982d11f8SJeff Roberson thread_lock(td); 1548e4cd31ddSJeff Roberson if (!ithd->it_need && !(ithd->it_flags & (IT_DEAD | IT_WAIT))) { 1549bafe5a31SPaolo Pisati TD_SET_IWAIT(td); 1550bafe5a31SPaolo Pisati ie->ie_count = 0; 15518df78c41SJeff Roberson mi_switch(SW_VOL | SWT_IWAIT, NULL); 1552bafe5a31SPaolo Pisati } 1553e4cd31ddSJeff Roberson if (ithd->it_flags & IT_WAIT) { 1554e4cd31ddSJeff Roberson wake = 1; 1555e4cd31ddSJeff Roberson ithd->it_flags &= ~IT_WAIT; 1556e4cd31ddSJeff Roberson } 1557982d11f8SJeff Roberson thread_unlock(td); 1558e4cd31ddSJeff Roberson if (wake) { 1559e4cd31ddSJeff Roberson wakeup(ithd); 1560e4cd31ddSJeff Roberson wake = 0; 1561e4cd31ddSJeff Roberson } 1562bafe5a31SPaolo Pisati } 1563bafe5a31SPaolo Pisati } 1564bafe5a31SPaolo Pisati 1565bafe5a31SPaolo Pisati /* 1566bafe5a31SPaolo Pisati * Main loop for interrupt filter. 1567bafe5a31SPaolo Pisati * 1568bafe5a31SPaolo Pisati * Some architectures (i386, amd64 and arm) require the optional frame 1569bafe5a31SPaolo Pisati * parameter, and use it as the main argument for fast handler execution 1570bafe5a31SPaolo Pisati * when ih_argument == NULL. 1571bafe5a31SPaolo Pisati * 1572bafe5a31SPaolo Pisati * Return value: 1573bafe5a31SPaolo Pisati * o FILTER_STRAY: No filter recognized the event, and no 1574bafe5a31SPaolo Pisati * filter-less handler is registered on this 1575bafe5a31SPaolo Pisati * line. 1576bafe5a31SPaolo Pisati * o FILTER_HANDLED: A filter claimed the event and served it. 1577bafe5a31SPaolo Pisati * o FILTER_SCHEDULE_THREAD: No filter claimed the event, but there's at 1578bafe5a31SPaolo Pisati * least one filter-less handler on this line. 1579bafe5a31SPaolo Pisati * o FILTER_HANDLED | 1580bafe5a31SPaolo Pisati * FILTER_SCHEDULE_THREAD: A filter claimed the event, and asked for 1581bafe5a31SPaolo Pisati * scheduling the per-handler ithread. 1582bafe5a31SPaolo Pisati * 1583bafe5a31SPaolo Pisati * In case an ithread has to be scheduled, in *ithd there will be a 1584bafe5a31SPaolo Pisati * pointer to a struct intr_thread containing the thread to be 1585bafe5a31SPaolo Pisati * scheduled. 1586bafe5a31SPaolo Pisati */ 1587bafe5a31SPaolo Pisati 15881ee1b687SJohn Baldwin static int 1589bafe5a31SPaolo Pisati intr_filter_loop(struct intr_event *ie, struct trapframe *frame, 1590bafe5a31SPaolo Pisati struct intr_thread **ithd) 1591bafe5a31SPaolo Pisati { 1592bafe5a31SPaolo Pisati struct intr_handler *ih; 1593bafe5a31SPaolo Pisati void *arg; 1594bafe5a31SPaolo Pisati int ret, thread_only; 1595bafe5a31SPaolo Pisati 1596bafe5a31SPaolo Pisati ret = 0; 1597bafe5a31SPaolo Pisati thread_only = 0; 1598bafe5a31SPaolo Pisati TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1599bafe5a31SPaolo Pisati /* 1600bafe5a31SPaolo Pisati * Execute fast interrupt handlers directly. 1601bafe5a31SPaolo Pisati * To support clock handlers, if a handler registers 1602bafe5a31SPaolo Pisati * with a NULL argument, then we pass it a pointer to 1603bafe5a31SPaolo Pisati * a trapframe as its argument. 1604bafe5a31SPaolo Pisati */ 1605bafe5a31SPaolo Pisati arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument); 1606bafe5a31SPaolo Pisati 1607bafe5a31SPaolo Pisati CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__, 1608bafe5a31SPaolo Pisati ih->ih_filter, ih->ih_handler, arg, ih->ih_name); 1609bafe5a31SPaolo Pisati 1610bafe5a31SPaolo Pisati if (ih->ih_filter != NULL) 1611bafe5a31SPaolo Pisati ret = ih->ih_filter(arg); 1612bafe5a31SPaolo Pisati else { 1613bafe5a31SPaolo Pisati thread_only = 1; 1614bafe5a31SPaolo Pisati continue; 1615bafe5a31SPaolo Pisati } 161689fc20ccSAndriy Gapon KASSERT(ret == FILTER_STRAY || 161789fc20ccSAndriy Gapon ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 && 161889fc20ccSAndriy Gapon (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0), 161989fc20ccSAndriy Gapon ("%s: incorrect return value %#x from %s", __func__, ret, 162089fc20ccSAndriy Gapon ih->ih_name)); 1621bafe5a31SPaolo Pisati if (ret & FILTER_STRAY) 1622bafe5a31SPaolo Pisati continue; 1623bafe5a31SPaolo Pisati else { 1624bafe5a31SPaolo Pisati *ithd = ih->ih_thread; 1625bafe5a31SPaolo Pisati return (ret); 1626bafe5a31SPaolo Pisati } 1627bafe5a31SPaolo Pisati } 1628bafe5a31SPaolo Pisati 1629bafe5a31SPaolo Pisati /* 1630bafe5a31SPaolo Pisati * No filters handled the interrupt and we have at least 1631bafe5a31SPaolo Pisati * one handler without a filter. In this case, we schedule 1632bafe5a31SPaolo Pisati * all of the filter-less handlers to run in the ithread. 1633bafe5a31SPaolo Pisati */ 1634bafe5a31SPaolo Pisati if (thread_only) { 1635bafe5a31SPaolo Pisati *ithd = ie->ie_thread; 1636bafe5a31SPaolo Pisati return (FILTER_SCHEDULE_THREAD); 1637bafe5a31SPaolo Pisati } 1638bafe5a31SPaolo Pisati return (FILTER_STRAY); 1639bafe5a31SPaolo Pisati } 1640bafe5a31SPaolo Pisati 1641bafe5a31SPaolo Pisati /* 1642bafe5a31SPaolo Pisati * Main interrupt handling body. 1643bafe5a31SPaolo Pisati * 1644bafe5a31SPaolo Pisati * Input: 1645bafe5a31SPaolo Pisati * o ie: the event connected to this interrupt. 1646bafe5a31SPaolo Pisati * o frame: some archs (i.e. i386) pass a frame to some. 1647bafe5a31SPaolo Pisati * handlers as their main argument. 1648bafe5a31SPaolo Pisati * Return value: 1649bafe5a31SPaolo Pisati * o 0: everything ok. 1650bafe5a31SPaolo Pisati * o EINVAL: stray interrupt. 1651bafe5a31SPaolo Pisati */ 1652bafe5a31SPaolo Pisati int 1653bafe5a31SPaolo Pisati intr_event_handle(struct intr_event *ie, struct trapframe *frame) 1654bafe5a31SPaolo Pisati { 1655bafe5a31SPaolo Pisati struct intr_thread *ithd; 16561f255bd3SAlexander Motin struct trapframe *oldframe; 1657bafe5a31SPaolo Pisati struct thread *td; 1658bafe5a31SPaolo Pisati int thread; 1659bafe5a31SPaolo Pisati 1660bafe5a31SPaolo Pisati ithd = NULL; 1661bafe5a31SPaolo Pisati td = curthread; 1662bafe5a31SPaolo Pisati 1663bafe5a31SPaolo Pisati if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) 1664bafe5a31SPaolo Pisati return (EINVAL); 1665bafe5a31SPaolo Pisati 1666bafe5a31SPaolo Pisati td->td_intr_nesting_level++; 1667bafe5a31SPaolo Pisati thread = 0; 1668bafe5a31SPaolo Pisati critical_enter(); 16691f255bd3SAlexander Motin oldframe = td->td_intr_frame; 16701f255bd3SAlexander Motin td->td_intr_frame = frame; 1671bafe5a31SPaolo Pisati thread = intr_filter_loop(ie, frame, &ithd); 1672bafe5a31SPaolo Pisati if (thread & FILTER_HANDLED) { 16731ee1b687SJohn Baldwin if (ie->ie_post_filter != NULL) 16741ee1b687SJohn Baldwin ie->ie_post_filter(ie->ie_source); 1675bafe5a31SPaolo Pisati } else { 16761ee1b687SJohn Baldwin if (ie->ie_pre_ithread != NULL) 16771ee1b687SJohn Baldwin ie->ie_pre_ithread(ie->ie_source); 1678bafe5a31SPaolo Pisati } 16791f255bd3SAlexander Motin td->td_intr_frame = oldframe; 1680bafe5a31SPaolo Pisati critical_exit(); 1681bafe5a31SPaolo Pisati 1682bafe5a31SPaolo Pisati /* Interrupt storm logic */ 1683bafe5a31SPaolo Pisati if (thread & FILTER_STRAY) { 1684bafe5a31SPaolo Pisati ie->ie_count++; 1685bafe5a31SPaolo Pisati if (ie->ie_count < intr_storm_threshold) 1686bafe5a31SPaolo Pisati printf("Interrupt stray detection not present\n"); 1687bafe5a31SPaolo Pisati } 1688bafe5a31SPaolo Pisati 1689bafe5a31SPaolo Pisati /* Schedule an ithread if needed. */ 1690bafe5a31SPaolo Pisati if (thread & FILTER_SCHEDULE_THREAD) { 1691bafe5a31SPaolo Pisati if (intr_event_schedule_thread(ie, ithd) != 0) 1692bafe5a31SPaolo Pisati panic("%s: impossible stray interrupt", __func__); 1693bafe5a31SPaolo Pisati } 1694bafe5a31SPaolo Pisati td->td_intr_nesting_level--; 1695bafe5a31SPaolo Pisati return (0); 1696bafe5a31SPaolo Pisati } 1697bafe5a31SPaolo Pisati #endif 16981931cf94SJohn Baldwin 16998b201c42SJohn Baldwin #ifdef DDB 17008b201c42SJohn Baldwin /* 17018b201c42SJohn Baldwin * Dump details about an interrupt handler 17028b201c42SJohn Baldwin */ 17038b201c42SJohn Baldwin static void 1704e0f66ef8SJohn Baldwin db_dump_intrhand(struct intr_handler *ih) 17058b201c42SJohn Baldwin { 17068b201c42SJohn Baldwin int comma; 17078b201c42SJohn Baldwin 17088b201c42SJohn Baldwin db_printf("\t%-10s ", ih->ih_name); 17098b201c42SJohn Baldwin switch (ih->ih_pri) { 17108b201c42SJohn Baldwin case PI_REALTIME: 17118b201c42SJohn Baldwin db_printf("CLK "); 17128b201c42SJohn Baldwin break; 17138b201c42SJohn Baldwin case PI_AV: 17148b201c42SJohn Baldwin db_printf("AV "); 17158b201c42SJohn Baldwin break; 1716d3305205SJohn Baldwin case PI_TTY: 17178b201c42SJohn Baldwin db_printf("TTY "); 17188b201c42SJohn Baldwin break; 17198b201c42SJohn Baldwin case PI_NET: 17208b201c42SJohn Baldwin db_printf("NET "); 17218b201c42SJohn Baldwin break; 17228b201c42SJohn Baldwin case PI_DISK: 17238b201c42SJohn Baldwin db_printf("DISK"); 17248b201c42SJohn Baldwin break; 17258b201c42SJohn Baldwin case PI_DULL: 17268b201c42SJohn Baldwin db_printf("DULL"); 17278b201c42SJohn Baldwin break; 17288b201c42SJohn Baldwin default: 17298b201c42SJohn Baldwin if (ih->ih_pri >= PI_SOFT) 17308b201c42SJohn Baldwin db_printf("SWI "); 17318b201c42SJohn Baldwin else 17328b201c42SJohn Baldwin db_printf("%4u", ih->ih_pri); 17338b201c42SJohn Baldwin break; 17348b201c42SJohn Baldwin } 17358b201c42SJohn Baldwin db_printf(" "); 17368b201c42SJohn Baldwin db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC); 17378b201c42SJohn Baldwin db_printf("(%p)", ih->ih_argument); 17388b201c42SJohn Baldwin if (ih->ih_need || 1739ef544f63SPaolo Pisati (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD | 17408b201c42SJohn Baldwin IH_MPSAFE)) != 0) { 17418b201c42SJohn Baldwin db_printf(" {"); 17428b201c42SJohn Baldwin comma = 0; 17438b201c42SJohn Baldwin if (ih->ih_flags & IH_EXCLUSIVE) { 17448b201c42SJohn Baldwin if (comma) 17458b201c42SJohn Baldwin db_printf(", "); 17468b201c42SJohn Baldwin db_printf("EXCL"); 17478b201c42SJohn Baldwin comma = 1; 17488b201c42SJohn Baldwin } 17498b201c42SJohn Baldwin if (ih->ih_flags & IH_ENTROPY) { 17508b201c42SJohn Baldwin if (comma) 17518b201c42SJohn Baldwin db_printf(", "); 17528b201c42SJohn Baldwin db_printf("ENTROPY"); 17538b201c42SJohn Baldwin comma = 1; 17548b201c42SJohn Baldwin } 17558b201c42SJohn Baldwin if (ih->ih_flags & IH_DEAD) { 17568b201c42SJohn Baldwin if (comma) 17578b201c42SJohn Baldwin db_printf(", "); 17588b201c42SJohn Baldwin db_printf("DEAD"); 17598b201c42SJohn Baldwin comma = 1; 17608b201c42SJohn Baldwin } 17618b201c42SJohn Baldwin if (ih->ih_flags & IH_MPSAFE) { 17628b201c42SJohn Baldwin if (comma) 17638b201c42SJohn Baldwin db_printf(", "); 17648b201c42SJohn Baldwin db_printf("MPSAFE"); 17658b201c42SJohn Baldwin comma = 1; 17668b201c42SJohn Baldwin } 17678b201c42SJohn Baldwin if (ih->ih_need) { 17688b201c42SJohn Baldwin if (comma) 17698b201c42SJohn Baldwin db_printf(", "); 17708b201c42SJohn Baldwin db_printf("NEED"); 17718b201c42SJohn Baldwin } 17728b201c42SJohn Baldwin db_printf("}"); 17738b201c42SJohn Baldwin } 17748b201c42SJohn Baldwin db_printf("\n"); 17758b201c42SJohn Baldwin } 17768b201c42SJohn Baldwin 17778b201c42SJohn Baldwin /* 1778e0f66ef8SJohn Baldwin * Dump details about a event. 17798b201c42SJohn Baldwin */ 17808b201c42SJohn Baldwin void 1781e0f66ef8SJohn Baldwin db_dump_intr_event(struct intr_event *ie, int handlers) 17828b201c42SJohn Baldwin { 1783e0f66ef8SJohn Baldwin struct intr_handler *ih; 1784e0f66ef8SJohn Baldwin struct intr_thread *it; 17858b201c42SJohn Baldwin int comma; 17868b201c42SJohn Baldwin 1787e0f66ef8SJohn Baldwin db_printf("%s ", ie->ie_fullname); 1788e0f66ef8SJohn Baldwin it = ie->ie_thread; 1789e0f66ef8SJohn Baldwin if (it != NULL) 1790e0f66ef8SJohn Baldwin db_printf("(pid %d)", it->it_thread->td_proc->p_pid); 1791e0f66ef8SJohn Baldwin else 1792e0f66ef8SJohn Baldwin db_printf("(no thread)"); 1793e0f66ef8SJohn Baldwin if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 || 1794e0f66ef8SJohn Baldwin (it != NULL && it->it_need)) { 17958b201c42SJohn Baldwin db_printf(" {"); 17968b201c42SJohn Baldwin comma = 0; 1797e0f66ef8SJohn Baldwin if (ie->ie_flags & IE_SOFT) { 17988b201c42SJohn Baldwin db_printf("SOFT"); 17998b201c42SJohn Baldwin comma = 1; 18008b201c42SJohn Baldwin } 1801e0f66ef8SJohn Baldwin if (ie->ie_flags & IE_ENTROPY) { 18028b201c42SJohn Baldwin if (comma) 18038b201c42SJohn Baldwin db_printf(", "); 18048b201c42SJohn Baldwin db_printf("ENTROPY"); 18058b201c42SJohn Baldwin comma = 1; 18068b201c42SJohn Baldwin } 1807e0f66ef8SJohn Baldwin if (ie->ie_flags & IE_ADDING_THREAD) { 18088b201c42SJohn Baldwin if (comma) 18098b201c42SJohn Baldwin db_printf(", "); 1810e0f66ef8SJohn Baldwin db_printf("ADDING_THREAD"); 18118b201c42SJohn Baldwin comma = 1; 18128b201c42SJohn Baldwin } 1813e0f66ef8SJohn Baldwin if (it != NULL && it->it_need) { 18148b201c42SJohn Baldwin if (comma) 18158b201c42SJohn Baldwin db_printf(", "); 18168b201c42SJohn Baldwin db_printf("NEED"); 18178b201c42SJohn Baldwin } 18188b201c42SJohn Baldwin db_printf("}"); 18198b201c42SJohn Baldwin } 18208b201c42SJohn Baldwin db_printf("\n"); 18218b201c42SJohn Baldwin 18228b201c42SJohn Baldwin if (handlers) 1823e0f66ef8SJohn Baldwin TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 18248b201c42SJohn Baldwin db_dump_intrhand(ih); 18258b201c42SJohn Baldwin } 1826e0f66ef8SJohn Baldwin 1827e0f66ef8SJohn Baldwin /* 1828e0f66ef8SJohn Baldwin * Dump data about interrupt handlers 1829e0f66ef8SJohn Baldwin */ 1830e0f66ef8SJohn Baldwin DB_SHOW_COMMAND(intr, db_show_intr) 1831e0f66ef8SJohn Baldwin { 1832e0f66ef8SJohn Baldwin struct intr_event *ie; 183319e9205aSJohn Baldwin int all, verbose; 1834e0f66ef8SJohn Baldwin 1835e0f66ef8SJohn Baldwin verbose = index(modif, 'v') != NULL; 1836e0f66ef8SJohn Baldwin all = index(modif, 'a') != NULL; 1837e0f66ef8SJohn Baldwin TAILQ_FOREACH(ie, &event_list, ie_list) { 1838e0f66ef8SJohn Baldwin if (!all && TAILQ_EMPTY(&ie->ie_handlers)) 1839e0f66ef8SJohn Baldwin continue; 1840e0f66ef8SJohn Baldwin db_dump_intr_event(ie, verbose); 184119e9205aSJohn Baldwin if (db_pager_quit) 184219e9205aSJohn Baldwin break; 1843e0f66ef8SJohn Baldwin } 1844e0f66ef8SJohn Baldwin } 18458b201c42SJohn Baldwin #endif /* DDB */ 18468b201c42SJohn Baldwin 1847b4151f71SJohn Baldwin /* 18488088699fSJohn Baldwin * Start standard software interrupt threads 18491931cf94SJohn Baldwin */ 18501931cf94SJohn Baldwin static void 1851b4151f71SJohn Baldwin start_softintr(void *dummy) 18521931cf94SJohn Baldwin { 1853b4151f71SJohn Baldwin 18548d809d50SJeff Roberson if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih)) 18558d809d50SJeff Roberson panic("died while creating vm swi ithread"); 18561931cf94SJohn Baldwin } 1857237fdd78SRobert Watson SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, 1858237fdd78SRobert Watson NULL); 18591931cf94SJohn Baldwin 1860d279178dSThomas Moestl /* 1861d279178dSThomas Moestl * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 1862d279178dSThomas Moestl * The data for this machine dependent, and the declarations are in machine 1863d279178dSThomas Moestl * dependent code. The layout of intrnames and intrcnt however is machine 1864d279178dSThomas Moestl * independent. 1865d279178dSThomas Moestl * 1866d279178dSThomas Moestl * We do not know the length of intrcnt and intrnames at compile time, so 1867d279178dSThomas Moestl * calculate things at run time. 1868d279178dSThomas Moestl */ 1869d279178dSThomas Moestl static int 1870d279178dSThomas Moestl sysctl_intrnames(SYSCTL_HANDLER_ARGS) 1871d279178dSThomas Moestl { 1872*521ea19dSAttilio Rao return (sysctl_handle_opaque(oidp, intrnames, sintrnames, req)); 1873d279178dSThomas Moestl } 1874d279178dSThomas Moestl 1875d279178dSThomas Moestl SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, 1876d279178dSThomas Moestl NULL, 0, sysctl_intrnames, "", "Interrupt Names"); 1877d279178dSThomas Moestl 1878d279178dSThomas Moestl static int 1879d279178dSThomas Moestl sysctl_intrcnt(SYSCTL_HANDLER_ARGS) 1880d279178dSThomas Moestl { 1881*521ea19dSAttilio Rao return (sysctl_handle_opaque(oidp, intrcnt, sintrcnt, req)); 1882d279178dSThomas Moestl } 1883d279178dSThomas Moestl 1884d279178dSThomas Moestl SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, 1885d279178dSThomas Moestl NULL, 0, sysctl_intrcnt, "", "Interrupt Counts"); 18868b201c42SJohn Baldwin 18878b201c42SJohn Baldwin #ifdef DDB 18888b201c42SJohn Baldwin /* 18898b201c42SJohn Baldwin * DDB command to dump the interrupt statistics. 18908b201c42SJohn Baldwin */ 18918b201c42SJohn Baldwin DB_SHOW_COMMAND(intrcnt, db_show_intrcnt) 18928b201c42SJohn Baldwin { 18938b201c42SJohn Baldwin u_long *i; 18948b201c42SJohn Baldwin char *cp; 1895*521ea19dSAttilio Rao u_int j; 18968b201c42SJohn Baldwin 18978b201c42SJohn Baldwin cp = intrnames; 1898*521ea19dSAttilio Rao j = 0; 1899*521ea19dSAttilio Rao for (i = intrcnt; j < (sintrcnt / sizeof(u_long)) && !db_pager_quit; 1900*521ea19dSAttilio Rao i++, j++) { 19018b201c42SJohn Baldwin if (*cp == '\0') 19028b201c42SJohn Baldwin break; 19038b201c42SJohn Baldwin if (*i != 0) 19048b201c42SJohn Baldwin db_printf("%s\t%lu\n", cp, *i); 19058b201c42SJohn Baldwin cp += strlen(cp) + 1; 19068b201c42SJohn Baldwin } 19078b201c42SJohn Baldwin } 19088b201c42SJohn Baldwin #endif 1909