19454b2d8SWarner Losh /*- 2425f9fdaSStefan Eßer * Copyright (c) 1997, Stefan Esser <se@freebsd.org> 3425f9fdaSStefan Eßer * All rights reserved. 4425f9fdaSStefan Eßer * 5425f9fdaSStefan Eßer * Redistribution and use in source and binary forms, with or without 6425f9fdaSStefan Eßer * modification, are permitted provided that the following conditions 7425f9fdaSStefan Eßer * are met: 8425f9fdaSStefan Eßer * 1. Redistributions of source code must retain the above copyright 9425f9fdaSStefan Eßer * notice unmodified, this list of conditions, and the following 10425f9fdaSStefan Eßer * disclaimer. 11425f9fdaSStefan Eßer * 2. Redistributions in binary form must reproduce the above copyright 12425f9fdaSStefan Eßer * notice, this list of conditions and the following disclaimer in the 13425f9fdaSStefan Eßer * documentation and/or other materials provided with the distribution. 14425f9fdaSStefan Eßer * 15425f9fdaSStefan Eßer * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16425f9fdaSStefan Eßer * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17425f9fdaSStefan Eßer * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18425f9fdaSStefan Eßer * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19425f9fdaSStefan Eßer * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20425f9fdaSStefan Eßer * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21425f9fdaSStefan Eßer * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22425f9fdaSStefan Eßer * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23425f9fdaSStefan Eßer * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24425f9fdaSStefan Eßer * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25425f9fdaSStefan Eßer */ 26425f9fdaSStefan Eßer 27677b542eSDavid E. O'Brien #include <sys/cdefs.h> 28677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 293900ddb2SDoug Rabson 308b201c42SJohn Baldwin #include "opt_ddb.h" 318b201c42SJohn Baldwin 321c5bb3eaSPeter Wemm #include <sys/param.h> 339a94c9c5SJohn Baldwin #include <sys/bus.h> 34c11110eaSAlfred Perlstein #include <sys/conf.h> 359a94c9c5SJohn Baldwin #include <sys/rtprio.h> 36425f9fdaSStefan Eßer #include <sys/systm.h> 3768352337SDoug Rabson #include <sys/interrupt.h> 381931cf94SJohn Baldwin #include <sys/kernel.h> 391931cf94SJohn Baldwin #include <sys/kthread.h> 401931cf94SJohn Baldwin #include <sys/ktr.h> 4105b2c96fSBruce Evans #include <sys/limits.h> 42f34fa851SJohn Baldwin #include <sys/lock.h> 431931cf94SJohn Baldwin #include <sys/malloc.h> 4435e0e5b3SJohn Baldwin #include <sys/mutex.h> 451931cf94SJohn Baldwin #include <sys/proc.h> 463e5da754SJohn Baldwin #include <sys/random.h> 47b4151f71SJohn Baldwin #include <sys/resourcevar.h> 4863710c4dSJohn Baldwin #include <sys/sched.h> 49eaf86d16SJohn Baldwin #include <sys/smp.h> 50d279178dSThomas Moestl #include <sys/sysctl.h> 511931cf94SJohn Baldwin #include <sys/unistd.h> 521931cf94SJohn Baldwin #include <sys/vmmeter.h> 531931cf94SJohn Baldwin #include <machine/atomic.h> 541931cf94SJohn Baldwin #include <machine/cpu.h> 558088699fSJohn Baldwin #include <machine/md_var.h> 56b4151f71SJohn Baldwin #include <machine/stdarg.h> 578b201c42SJohn Baldwin #ifdef DDB 588b201c42SJohn Baldwin #include <ddb/ddb.h> 598b201c42SJohn Baldwin #include <ddb/db_sym.h> 608b201c42SJohn Baldwin #endif 61425f9fdaSStefan Eßer 62e0f66ef8SJohn Baldwin /* 63e0f66ef8SJohn Baldwin * Describe an interrupt thread. There is one of these per interrupt event. 64e0f66ef8SJohn Baldwin */ 65e0f66ef8SJohn Baldwin struct intr_thread { 66e0f66ef8SJohn Baldwin struct intr_event *it_event; 67e0f66ef8SJohn Baldwin struct thread *it_thread; /* Kernel thread. */ 68e0f66ef8SJohn Baldwin int it_flags; /* (j) IT_* flags. */ 69e0f66ef8SJohn Baldwin int it_need; /* Needs service. */ 703e5da754SJohn Baldwin }; 713e5da754SJohn Baldwin 72e0f66ef8SJohn Baldwin /* Interrupt thread flags kept in it_flags */ 73e0f66ef8SJohn Baldwin #define IT_DEAD 0x000001 /* Thread is waiting to exit. */ 74e0f66ef8SJohn Baldwin 75e0f66ef8SJohn Baldwin struct intr_entropy { 76e0f66ef8SJohn Baldwin struct thread *td; 77e0f66ef8SJohn Baldwin uintptr_t event; 78e0f66ef8SJohn Baldwin }; 79e0f66ef8SJohn Baldwin 80e0f66ef8SJohn Baldwin struct intr_event *clk_intr_event; 81e0f66ef8SJohn Baldwin struct intr_event *tty_intr_event; 827b1fe905SBruce Evans void *softclock_ih; 837b1fe905SBruce Evans void *vm_ih; 847ab24ea3SJulian Elischer struct proc *intrproc; 851931cf94SJohn Baldwin 86b4151f71SJohn Baldwin static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); 87b4151f71SJohn Baldwin 880ae62c18SNate Lawson static int intr_storm_threshold = 1000; 897870c3c6SJohn Baldwin TUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold); 907870c3c6SJohn Baldwin SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW, 917870c3c6SJohn Baldwin &intr_storm_threshold, 0, 927b1fe905SBruce Evans "Number of consecutive interrupts before storm protection is enabled"); 93e0f66ef8SJohn Baldwin static TAILQ_HEAD(, intr_event) event_list = 94e0f66ef8SJohn Baldwin TAILQ_HEAD_INITIALIZER(event_list); 957b1fe905SBruce Evans 96e0f66ef8SJohn Baldwin static void intr_event_update(struct intr_event *ie); 97bafe5a31SPaolo Pisati #ifdef INTR_FILTER 98bafe5a31SPaolo Pisati static struct intr_thread *ithread_create(const char *name, 99bafe5a31SPaolo Pisati struct intr_handler *ih); 100bafe5a31SPaolo Pisati #else 101e0f66ef8SJohn Baldwin static struct intr_thread *ithread_create(const char *name); 102bafe5a31SPaolo Pisati #endif 103e0f66ef8SJohn Baldwin static void ithread_destroy(struct intr_thread *ithread); 104bafe5a31SPaolo Pisati static void ithread_execute_handlers(struct proc *p, 105bafe5a31SPaolo Pisati struct intr_event *ie); 106bafe5a31SPaolo Pisati #ifdef INTR_FILTER 107bafe5a31SPaolo Pisati static void priv_ithread_execute_handler(struct proc *p, 108bafe5a31SPaolo Pisati struct intr_handler *ih); 109bafe5a31SPaolo Pisati #endif 1107b1fe905SBruce Evans static void ithread_loop(void *); 111e0f66ef8SJohn Baldwin static void ithread_update(struct intr_thread *ithd); 1127b1fe905SBruce Evans static void start_softintr(void *); 1137870c3c6SJohn Baldwin 114bc17acb2SJohn Baldwin /* Map an interrupt type to an ithread priority. */ 115b4151f71SJohn Baldwin u_char 116e0f66ef8SJohn Baldwin intr_priority(enum intr_type flags) 1179a94c9c5SJohn Baldwin { 118b4151f71SJohn Baldwin u_char pri; 1199a94c9c5SJohn Baldwin 120b4151f71SJohn Baldwin flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET | 1215a280d9cSPeter Wemm INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV); 1229a94c9c5SJohn Baldwin switch (flags) { 123b4151f71SJohn Baldwin case INTR_TYPE_TTY: 1249a94c9c5SJohn Baldwin pri = PI_TTYLOW; 1259a94c9c5SJohn Baldwin break; 1269a94c9c5SJohn Baldwin case INTR_TYPE_BIO: 1279a94c9c5SJohn Baldwin /* 1289a94c9c5SJohn Baldwin * XXX We need to refine this. BSD/OS distinguishes 1299a94c9c5SJohn Baldwin * between tape and disk priorities. 1309a94c9c5SJohn Baldwin */ 1319a94c9c5SJohn Baldwin pri = PI_DISK; 1329a94c9c5SJohn Baldwin break; 1339a94c9c5SJohn Baldwin case INTR_TYPE_NET: 1349a94c9c5SJohn Baldwin pri = PI_NET; 1359a94c9c5SJohn Baldwin break; 1369a94c9c5SJohn Baldwin case INTR_TYPE_CAM: 1379a94c9c5SJohn Baldwin pri = PI_DISK; /* XXX or PI_CAM? */ 1389a94c9c5SJohn Baldwin break; 1395a280d9cSPeter Wemm case INTR_TYPE_AV: /* Audio/video */ 1405a280d9cSPeter Wemm pri = PI_AV; 1415a280d9cSPeter Wemm break; 142b4151f71SJohn Baldwin case INTR_TYPE_CLK: 143b4151f71SJohn Baldwin pri = PI_REALTIME; 144b4151f71SJohn Baldwin break; 1459a94c9c5SJohn Baldwin case INTR_TYPE_MISC: 1469a94c9c5SJohn Baldwin pri = PI_DULL; /* don't care */ 1479a94c9c5SJohn Baldwin break; 1489a94c9c5SJohn Baldwin default: 149b4151f71SJohn Baldwin /* We didn't specify an interrupt level. */ 150e0f66ef8SJohn Baldwin panic("intr_priority: no interrupt type in flags"); 1519a94c9c5SJohn Baldwin } 1529a94c9c5SJohn Baldwin 1539a94c9c5SJohn Baldwin return pri; 1549a94c9c5SJohn Baldwin } 1559a94c9c5SJohn Baldwin 156b4151f71SJohn Baldwin /* 157e0f66ef8SJohn Baldwin * Update an ithread based on the associated intr_event. 158b4151f71SJohn Baldwin */ 159b4151f71SJohn Baldwin static void 160e0f66ef8SJohn Baldwin ithread_update(struct intr_thread *ithd) 161b4151f71SJohn Baldwin { 162e0f66ef8SJohn Baldwin struct intr_event *ie; 163b40ce416SJulian Elischer struct thread *td; 164e0f66ef8SJohn Baldwin u_char pri; 1658088699fSJohn Baldwin 166e0f66ef8SJohn Baldwin ie = ithd->it_event; 167e0f66ef8SJohn Baldwin td = ithd->it_thread; 168b4151f71SJohn Baldwin 169e0f66ef8SJohn Baldwin /* Determine the overall priority of this event. */ 170e0f66ef8SJohn Baldwin if (TAILQ_EMPTY(&ie->ie_handlers)) 171e0f66ef8SJohn Baldwin pri = PRI_MAX_ITHD; 172e0f66ef8SJohn Baldwin else 173e0f66ef8SJohn Baldwin pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri; 174e80fb434SRobert Drehmel 175e0f66ef8SJohn Baldwin /* Update name and priority. */ 1767ab24ea3SJulian Elischer strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name)); 177982d11f8SJeff Roberson thread_lock(td); 178e0f66ef8SJohn Baldwin sched_prio(td, pri); 179982d11f8SJeff Roberson thread_unlock(td); 180b4151f71SJohn Baldwin } 181e0f66ef8SJohn Baldwin 182e0f66ef8SJohn Baldwin /* 183e0f66ef8SJohn Baldwin * Regenerate the full name of an interrupt event and update its priority. 184e0f66ef8SJohn Baldwin */ 185e0f66ef8SJohn Baldwin static void 186e0f66ef8SJohn Baldwin intr_event_update(struct intr_event *ie) 187e0f66ef8SJohn Baldwin { 188e0f66ef8SJohn Baldwin struct intr_handler *ih; 189e0f66ef8SJohn Baldwin char *last; 190e0f66ef8SJohn Baldwin int missed, space; 191e0f66ef8SJohn Baldwin 192e0f66ef8SJohn Baldwin /* Start off with no entropy and just the name of the event. */ 193e0f66ef8SJohn Baldwin mtx_assert(&ie->ie_lock, MA_OWNED); 194e0f66ef8SJohn Baldwin strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 195e0f66ef8SJohn Baldwin ie->ie_flags &= ~IE_ENTROPY; 1960811d60aSJohn Baldwin missed = 0; 197e0f66ef8SJohn Baldwin space = 1; 198e0f66ef8SJohn Baldwin 199e0f66ef8SJohn Baldwin /* Run through all the handlers updating values. */ 200e0f66ef8SJohn Baldwin TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 201e0f66ef8SJohn Baldwin if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < 202e0f66ef8SJohn Baldwin sizeof(ie->ie_fullname)) { 203e0f66ef8SJohn Baldwin strcat(ie->ie_fullname, " "); 204e0f66ef8SJohn Baldwin strcat(ie->ie_fullname, ih->ih_name); 205e0f66ef8SJohn Baldwin space = 0; 2060811d60aSJohn Baldwin } else 2070811d60aSJohn Baldwin missed++; 2080811d60aSJohn Baldwin if (ih->ih_flags & IH_ENTROPY) 209e0f66ef8SJohn Baldwin ie->ie_flags |= IE_ENTROPY; 2100811d60aSJohn Baldwin } 211e0f66ef8SJohn Baldwin 212e0f66ef8SJohn Baldwin /* 213e0f66ef8SJohn Baldwin * If the handler names were too long, add +'s to indicate missing 214e0f66ef8SJohn Baldwin * names. If we run out of room and still have +'s to add, change 215e0f66ef8SJohn Baldwin * the last character from a + to a *. 216e0f66ef8SJohn Baldwin */ 217e0f66ef8SJohn Baldwin last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2]; 2180811d60aSJohn Baldwin while (missed-- > 0) { 219e0f66ef8SJohn Baldwin if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) { 220e0f66ef8SJohn Baldwin if (*last == '+') { 221e0f66ef8SJohn Baldwin *last = '*'; 222e0f66ef8SJohn Baldwin break; 223b4151f71SJohn Baldwin } else 224e0f66ef8SJohn Baldwin *last = '+'; 225e0f66ef8SJohn Baldwin } else if (space) { 226e0f66ef8SJohn Baldwin strcat(ie->ie_fullname, " +"); 227e0f66ef8SJohn Baldwin space = 0; 228e0f66ef8SJohn Baldwin } else 229e0f66ef8SJohn Baldwin strcat(ie->ie_fullname, "+"); 230b4151f71SJohn Baldwin } 231e0f66ef8SJohn Baldwin 232e0f66ef8SJohn Baldwin /* 233e0f66ef8SJohn Baldwin * If this event has an ithread, update it's priority and 234e0f66ef8SJohn Baldwin * name. 235e0f66ef8SJohn Baldwin */ 236e0f66ef8SJohn Baldwin if (ie->ie_thread != NULL) 237e0f66ef8SJohn Baldwin ithread_update(ie->ie_thread); 238e0f66ef8SJohn Baldwin CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname); 239b4151f71SJohn Baldwin } 240b4151f71SJohn Baldwin 241bafe5a31SPaolo Pisati #ifndef INTR_FILTER 242b4151f71SJohn Baldwin int 243e0f66ef8SJohn Baldwin intr_event_create(struct intr_event **event, void *source, int flags, 244eaf86d16SJohn Baldwin void (*enable)(void *), int (*assign_cpu)(void *, u_char), const char *fmt, 245eaf86d16SJohn Baldwin ...) 246b4151f71SJohn Baldwin { 247e0f66ef8SJohn Baldwin struct intr_event *ie; 248b4151f71SJohn Baldwin va_list ap; 249b4151f71SJohn Baldwin 250e0f66ef8SJohn Baldwin /* The only valid flag during creation is IE_SOFT. */ 251e0f66ef8SJohn Baldwin if ((flags & ~IE_SOFT) != 0) 2523e5da754SJohn Baldwin return (EINVAL); 253e0f66ef8SJohn Baldwin ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 254e0f66ef8SJohn Baldwin ie->ie_source = source; 255e0f66ef8SJohn Baldwin ie->ie_enable = enable; 256eaf86d16SJohn Baldwin ie->ie_assign_cpu = assign_cpu; 257e0f66ef8SJohn Baldwin ie->ie_flags = flags; 258eaf86d16SJohn Baldwin ie->ie_cpu = NOCPU; 259e0f66ef8SJohn Baldwin TAILQ_INIT(&ie->ie_handlers); 260e0f66ef8SJohn Baldwin mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 261b4151f71SJohn Baldwin 262b4151f71SJohn Baldwin va_start(ap, fmt); 263e0f66ef8SJohn Baldwin vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 264b4151f71SJohn Baldwin va_end(ap); 265e0f66ef8SJohn Baldwin strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 266e0f66ef8SJohn Baldwin mtx_pool_lock(mtxpool_sleep, &event_list); 267e0f66ef8SJohn Baldwin TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 268e0f66ef8SJohn Baldwin mtx_pool_unlock(mtxpool_sleep, &event_list); 269e0f66ef8SJohn Baldwin if (event != NULL) 270e0f66ef8SJohn Baldwin *event = ie; 271e0f66ef8SJohn Baldwin CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 272b4151f71SJohn Baldwin return (0); 273b4151f71SJohn Baldwin } 274bafe5a31SPaolo Pisati #else 275bafe5a31SPaolo Pisati int 276bafe5a31SPaolo Pisati intr_event_create(struct intr_event **event, void *source, int flags, 277bafe5a31SPaolo Pisati void (*enable)(void *), void (*eoi)(void *), void (*disab)(void *), 278eaf86d16SJohn Baldwin int (*assign_cpu)(void *, u_char), const char *fmt, ...) 279bafe5a31SPaolo Pisati { 280bafe5a31SPaolo Pisati struct intr_event *ie; 281bafe5a31SPaolo Pisati va_list ap; 282bafe5a31SPaolo Pisati 283bafe5a31SPaolo Pisati /* The only valid flag during creation is IE_SOFT. */ 284bafe5a31SPaolo Pisati if ((flags & ~IE_SOFT) != 0) 285bafe5a31SPaolo Pisati return (EINVAL); 286bafe5a31SPaolo Pisati ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 287bafe5a31SPaolo Pisati ie->ie_source = source; 288bafe5a31SPaolo Pisati ie->ie_enable = enable; 289eaf86d16SJohn Baldwin ie->ie_assign_cpu = assign_cpu; 290bafe5a31SPaolo Pisati ie->ie_eoi = eoi; 291bafe5a31SPaolo Pisati ie->ie_disab = disab; 292bafe5a31SPaolo Pisati ie->ie_flags = flags; 293eaf86d16SJohn Baldwin ie->ie_cpu = NOCPU; 294bafe5a31SPaolo Pisati TAILQ_INIT(&ie->ie_handlers); 295bafe5a31SPaolo Pisati mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 296bafe5a31SPaolo Pisati 297bafe5a31SPaolo Pisati va_start(ap, fmt); 298bafe5a31SPaolo Pisati vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 299bafe5a31SPaolo Pisati va_end(ap); 300bafe5a31SPaolo Pisati strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 301bafe5a31SPaolo Pisati mtx_pool_lock(mtxpool_sleep, &event_list); 302bafe5a31SPaolo Pisati TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 303bafe5a31SPaolo Pisati mtx_pool_unlock(mtxpool_sleep, &event_list); 304bafe5a31SPaolo Pisati if (event != NULL) 305bafe5a31SPaolo Pisati *event = ie; 306bafe5a31SPaolo Pisati CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 307bafe5a31SPaolo Pisati return (0); 308bafe5a31SPaolo Pisati } 309bafe5a31SPaolo Pisati #endif 310b4151f71SJohn Baldwin 311eaf86d16SJohn Baldwin /* 312eaf86d16SJohn Baldwin * Bind an interrupt event to the specified CPU. Note that not all 313eaf86d16SJohn Baldwin * platforms support binding an interrupt to a CPU. For those 314eaf86d16SJohn Baldwin * platforms this request will fail. For supported platforms, any 315eaf86d16SJohn Baldwin * associated ithreads as well as the primary interrupt context will 316eaf86d16SJohn Baldwin * be bound to the specificed CPU. Using a cpu id of NOCPU unbinds 317eaf86d16SJohn Baldwin * the interrupt event. 318eaf86d16SJohn Baldwin */ 319eaf86d16SJohn Baldwin int 320eaf86d16SJohn Baldwin intr_event_bind(struct intr_event *ie, u_char cpu) 321eaf86d16SJohn Baldwin { 322eaf86d16SJohn Baldwin struct thread *td; 323eaf86d16SJohn Baldwin int error; 324eaf86d16SJohn Baldwin 325eaf86d16SJohn Baldwin /* Need a CPU to bind to. */ 326eaf86d16SJohn Baldwin if (cpu != NOCPU && CPU_ABSENT(cpu)) 327eaf86d16SJohn Baldwin return (EINVAL); 328eaf86d16SJohn Baldwin 329eaf86d16SJohn Baldwin if (ie->ie_assign_cpu == NULL) 330eaf86d16SJohn Baldwin return (EOPNOTSUPP); 331eaf86d16SJohn Baldwin 332eaf86d16SJohn Baldwin /* Don't allow a bind request if the interrupt is already bound. */ 333eaf86d16SJohn Baldwin mtx_lock(&ie->ie_lock); 334eaf86d16SJohn Baldwin if (ie->ie_cpu != NOCPU && cpu != NOCPU) { 335eaf86d16SJohn Baldwin mtx_unlock(&ie->ie_lock); 336eaf86d16SJohn Baldwin return (EBUSY); 337eaf86d16SJohn Baldwin } 338eaf86d16SJohn Baldwin mtx_unlock(&ie->ie_lock); 339eaf86d16SJohn Baldwin 340eaf86d16SJohn Baldwin error = ie->ie_assign_cpu(ie->ie_source, cpu); 341eaf86d16SJohn Baldwin if (error) 342eaf86d16SJohn Baldwin return (error); 343eaf86d16SJohn Baldwin mtx_lock(&ie->ie_lock); 344eaf86d16SJohn Baldwin if (ie->ie_thread != NULL) 345eaf86d16SJohn Baldwin td = ie->ie_thread->it_thread; 346eaf86d16SJohn Baldwin else 347eaf86d16SJohn Baldwin td = NULL; 348eaf86d16SJohn Baldwin if (td != NULL) 349eaf86d16SJohn Baldwin thread_lock(td); 350eaf86d16SJohn Baldwin ie->ie_cpu = cpu; 351eaf86d16SJohn Baldwin if (td != NULL) 352eaf86d16SJohn Baldwin thread_unlock(td); 353eaf86d16SJohn Baldwin mtx_unlock(&ie->ie_lock); 354eaf86d16SJohn Baldwin return (0); 355eaf86d16SJohn Baldwin } 356eaf86d16SJohn Baldwin 357b4151f71SJohn Baldwin int 358e0f66ef8SJohn Baldwin intr_event_destroy(struct intr_event *ie) 359b4151f71SJohn Baldwin { 360b4151f71SJohn Baldwin 361e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 362e0f66ef8SJohn Baldwin if (!TAILQ_EMPTY(&ie->ie_handlers)) { 363e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 364e0f66ef8SJohn Baldwin return (EBUSY); 3654d29cb2dSJohn Baldwin } 366e0f66ef8SJohn Baldwin mtx_pool_lock(mtxpool_sleep, &event_list); 367e0f66ef8SJohn Baldwin TAILQ_REMOVE(&event_list, ie, ie_list); 368e0f66ef8SJohn Baldwin mtx_pool_unlock(mtxpool_sleep, &event_list); 3699477358dSJohn Baldwin #ifndef notyet 3709477358dSJohn Baldwin if (ie->ie_thread != NULL) { 3719477358dSJohn Baldwin ithread_destroy(ie->ie_thread); 3729477358dSJohn Baldwin ie->ie_thread = NULL; 3739477358dSJohn Baldwin } 3749477358dSJohn Baldwin #endif 375e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 376e0f66ef8SJohn Baldwin mtx_destroy(&ie->ie_lock); 377e0f66ef8SJohn Baldwin free(ie, M_ITHREAD); 378e0f66ef8SJohn Baldwin return (0); 379e0f66ef8SJohn Baldwin } 380e0f66ef8SJohn Baldwin 381bafe5a31SPaolo Pisati #ifndef INTR_FILTER 382e0f66ef8SJohn Baldwin static struct intr_thread * 383e0f66ef8SJohn Baldwin ithread_create(const char *name) 384e0f66ef8SJohn Baldwin { 385e0f66ef8SJohn Baldwin struct intr_thread *ithd; 386e0f66ef8SJohn Baldwin struct thread *td; 387e0f66ef8SJohn Baldwin int error; 388e0f66ef8SJohn Baldwin 389e0f66ef8SJohn Baldwin ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 390e0f66ef8SJohn Baldwin 3917ab24ea3SJulian Elischer error = kproc_kthread_add(ithread_loop, ithd, &intrproc, 3927ab24ea3SJulian Elischer &td, RFSTOPPED | RFHIGHPID, 3939ef95d01SJulian Elischer 0, "intr", "%s", name); 394e0f66ef8SJohn Baldwin if (error) 3953745c395SJulian Elischer panic("kproc_create() failed with %d", error); 396982d11f8SJeff Roberson thread_lock(td); 397ad1e7d28SJulian Elischer sched_class(td, PRI_ITHD); 398e0f66ef8SJohn Baldwin TD_SET_IWAIT(td); 399982d11f8SJeff Roberson thread_unlock(td); 400e0f66ef8SJohn Baldwin td->td_pflags |= TDP_ITHREAD; 401e0f66ef8SJohn Baldwin ithd->it_thread = td; 402e0f66ef8SJohn Baldwin CTR2(KTR_INTR, "%s: created %s", __func__, name); 403e0f66ef8SJohn Baldwin return (ithd); 404e0f66ef8SJohn Baldwin } 405bafe5a31SPaolo Pisati #else 406bafe5a31SPaolo Pisati static struct intr_thread * 407bafe5a31SPaolo Pisati ithread_create(const char *name, struct intr_handler *ih) 408bafe5a31SPaolo Pisati { 409bafe5a31SPaolo Pisati struct intr_thread *ithd; 410bafe5a31SPaolo Pisati struct thread *td; 411bafe5a31SPaolo Pisati int error; 412bafe5a31SPaolo Pisati 413bafe5a31SPaolo Pisati ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 414bafe5a31SPaolo Pisati 415539976ffSJulian Elischer error = kproc_kthread_add(ithread_loop, ih, &intrproc, 4167ab24ea3SJulian Elischer &td, RFSTOPPED | RFHIGHPID, 4179ef95d01SJulian Elischer 0, "intr", "%s", name); 418bafe5a31SPaolo Pisati if (error) 4193745c395SJulian Elischer panic("kproc_create() failed with %d", error); 420982d11f8SJeff Roberson thread_lock(td); 421bafe5a31SPaolo Pisati sched_class(td, PRI_ITHD); 422bafe5a31SPaolo Pisati TD_SET_IWAIT(td); 423982d11f8SJeff Roberson thread_unlock(td); 424bafe5a31SPaolo Pisati td->td_pflags |= TDP_ITHREAD; 425bafe5a31SPaolo Pisati ithd->it_thread = td; 426bafe5a31SPaolo Pisati CTR2(KTR_INTR, "%s: created %s", __func__, name); 427bafe5a31SPaolo Pisati return (ithd); 428bafe5a31SPaolo Pisati } 429bafe5a31SPaolo Pisati #endif 430e0f66ef8SJohn Baldwin 431e0f66ef8SJohn Baldwin static void 432e0f66ef8SJohn Baldwin ithread_destroy(struct intr_thread *ithread) 433e0f66ef8SJohn Baldwin { 434e0f66ef8SJohn Baldwin struct thread *td; 435e0f66ef8SJohn Baldwin 436bb141be1SScott Long CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name); 437e0f66ef8SJohn Baldwin td = ithread->it_thread; 438982d11f8SJeff Roberson thread_lock(td); 439e0f66ef8SJohn Baldwin ithread->it_flags |= IT_DEAD; 44071fad9fdSJulian Elischer if (TD_AWAITING_INTR(td)) { 44171fad9fdSJulian Elischer TD_CLR_IWAIT(td); 442f0393f06SJeff Roberson sched_add(td, SRQ_INTR); 443b4151f71SJohn Baldwin } 444982d11f8SJeff Roberson thread_unlock(td); 445b4151f71SJohn Baldwin } 446b4151f71SJohn Baldwin 447bafe5a31SPaolo Pisati #ifndef INTR_FILTER 448b4151f71SJohn Baldwin int 449e0f66ef8SJohn Baldwin intr_event_add_handler(struct intr_event *ie, const char *name, 450ef544f63SPaolo Pisati driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 451ef544f63SPaolo Pisati enum intr_type flags, void **cookiep) 452b4151f71SJohn Baldwin { 453e0f66ef8SJohn Baldwin struct intr_handler *ih, *temp_ih; 454e0f66ef8SJohn Baldwin struct intr_thread *it; 455b4151f71SJohn Baldwin 456ef544f63SPaolo Pisati if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 457b4151f71SJohn Baldwin return (EINVAL); 458b4151f71SJohn Baldwin 459e0f66ef8SJohn Baldwin /* Allocate and populate an interrupt handler structure. */ 460e0f66ef8SJohn Baldwin ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 461ef544f63SPaolo Pisati ih->ih_filter = filter; 462b4151f71SJohn Baldwin ih->ih_handler = handler; 463b4151f71SJohn Baldwin ih->ih_argument = arg; 464b4151f71SJohn Baldwin ih->ih_name = name; 465e0f66ef8SJohn Baldwin ih->ih_event = ie; 466b4151f71SJohn Baldwin ih->ih_pri = pri; 467ef544f63SPaolo Pisati if (flags & INTR_EXCL) 468b4151f71SJohn Baldwin ih->ih_flags = IH_EXCLUSIVE; 469b4151f71SJohn Baldwin if (flags & INTR_MPSAFE) 470b4151f71SJohn Baldwin ih->ih_flags |= IH_MPSAFE; 471b4151f71SJohn Baldwin if (flags & INTR_ENTROPY) 472b4151f71SJohn Baldwin ih->ih_flags |= IH_ENTROPY; 473b4151f71SJohn Baldwin 474e0f66ef8SJohn Baldwin /* We can only have one exclusive handler in a event. */ 475e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 476e0f66ef8SJohn Baldwin if (!TAILQ_EMPTY(&ie->ie_handlers)) { 477e0f66ef8SJohn Baldwin if ((flags & INTR_EXCL) || 478e0f66ef8SJohn Baldwin (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 479e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 480b4151f71SJohn Baldwin free(ih, M_ITHREAD); 481b4151f71SJohn Baldwin return (EINVAL); 482b4151f71SJohn Baldwin } 483e0f66ef8SJohn Baldwin } 484e0f66ef8SJohn Baldwin 485e0f66ef8SJohn Baldwin /* Add the new handler to the event in priority order. */ 486e0f66ef8SJohn Baldwin TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 487e0f66ef8SJohn Baldwin if (temp_ih->ih_pri > ih->ih_pri) 488e0f66ef8SJohn Baldwin break; 489e0f66ef8SJohn Baldwin } 490e0f66ef8SJohn Baldwin if (temp_ih == NULL) 491e0f66ef8SJohn Baldwin TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 492e0f66ef8SJohn Baldwin else 493e0f66ef8SJohn Baldwin TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 494e0f66ef8SJohn Baldwin intr_event_update(ie); 495e0f66ef8SJohn Baldwin 496e0f66ef8SJohn Baldwin /* Create a thread if we need one. */ 497ef544f63SPaolo Pisati while (ie->ie_thread == NULL && handler != NULL) { 498e0f66ef8SJohn Baldwin if (ie->ie_flags & IE_ADDING_THREAD) 4990f180a7cSJohn Baldwin msleep(ie, &ie->ie_lock, 0, "ithread", 0); 500e0f66ef8SJohn Baldwin else { 501e0f66ef8SJohn Baldwin ie->ie_flags |= IE_ADDING_THREAD; 502e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 503e0f66ef8SJohn Baldwin it = ithread_create("intr: newborn"); 504e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 505e0f66ef8SJohn Baldwin ie->ie_flags &= ~IE_ADDING_THREAD; 506e0f66ef8SJohn Baldwin ie->ie_thread = it; 507e0f66ef8SJohn Baldwin it->it_event = ie; 508e0f66ef8SJohn Baldwin ithread_update(it); 509e0f66ef8SJohn Baldwin wakeup(ie); 510e0f66ef8SJohn Baldwin } 511e0f66ef8SJohn Baldwin } 512e0f66ef8SJohn Baldwin CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 513e0f66ef8SJohn Baldwin ie->ie_name); 514e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 515e0f66ef8SJohn Baldwin 516e0f66ef8SJohn Baldwin if (cookiep != NULL) 517e0f66ef8SJohn Baldwin *cookiep = ih; 518e0f66ef8SJohn Baldwin return (0); 519e0f66ef8SJohn Baldwin } 520bafe5a31SPaolo Pisati #else 521bafe5a31SPaolo Pisati int 522bafe5a31SPaolo Pisati intr_event_add_handler(struct intr_event *ie, const char *name, 523bafe5a31SPaolo Pisati driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 524bafe5a31SPaolo Pisati enum intr_type flags, void **cookiep) 525bafe5a31SPaolo Pisati { 526bafe5a31SPaolo Pisati struct intr_handler *ih, *temp_ih; 527bafe5a31SPaolo Pisati struct intr_thread *it; 528bafe5a31SPaolo Pisati 529bafe5a31SPaolo Pisati if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 530bafe5a31SPaolo Pisati return (EINVAL); 531bafe5a31SPaolo Pisati 532bafe5a31SPaolo Pisati /* Allocate and populate an interrupt handler structure. */ 533bafe5a31SPaolo Pisati ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 534bafe5a31SPaolo Pisati ih->ih_filter = filter; 535bafe5a31SPaolo Pisati ih->ih_handler = handler; 536bafe5a31SPaolo Pisati ih->ih_argument = arg; 537bafe5a31SPaolo Pisati ih->ih_name = name; 538bafe5a31SPaolo Pisati ih->ih_event = ie; 539bafe5a31SPaolo Pisati ih->ih_pri = pri; 540bafe5a31SPaolo Pisati if (flags & INTR_EXCL) 541bafe5a31SPaolo Pisati ih->ih_flags = IH_EXCLUSIVE; 542bafe5a31SPaolo Pisati if (flags & INTR_MPSAFE) 543bafe5a31SPaolo Pisati ih->ih_flags |= IH_MPSAFE; 544bafe5a31SPaolo Pisati if (flags & INTR_ENTROPY) 545bafe5a31SPaolo Pisati ih->ih_flags |= IH_ENTROPY; 546bafe5a31SPaolo Pisati 547bafe5a31SPaolo Pisati /* We can only have one exclusive handler in a event. */ 548bafe5a31SPaolo Pisati mtx_lock(&ie->ie_lock); 549bafe5a31SPaolo Pisati if (!TAILQ_EMPTY(&ie->ie_handlers)) { 550bafe5a31SPaolo Pisati if ((flags & INTR_EXCL) || 551bafe5a31SPaolo Pisati (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 552bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 553bafe5a31SPaolo Pisati free(ih, M_ITHREAD); 554bafe5a31SPaolo Pisati return (EINVAL); 555bafe5a31SPaolo Pisati } 556bafe5a31SPaolo Pisati } 557bafe5a31SPaolo Pisati 558bafe5a31SPaolo Pisati /* Add the new handler to the event in priority order. */ 559bafe5a31SPaolo Pisati TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 560bafe5a31SPaolo Pisati if (temp_ih->ih_pri > ih->ih_pri) 561bafe5a31SPaolo Pisati break; 562bafe5a31SPaolo Pisati } 563bafe5a31SPaolo Pisati if (temp_ih == NULL) 564bafe5a31SPaolo Pisati TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 565bafe5a31SPaolo Pisati else 566bafe5a31SPaolo Pisati TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 567bafe5a31SPaolo Pisati intr_event_update(ie); 568bafe5a31SPaolo Pisati 569bafe5a31SPaolo Pisati /* For filtered handlers, create a private ithread to run on. */ 570bafe5a31SPaolo Pisati if (filter != NULL && handler != NULL) { 571bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 572bafe5a31SPaolo Pisati it = ithread_create("intr: newborn", ih); 573bafe5a31SPaolo Pisati mtx_lock(&ie->ie_lock); 574bafe5a31SPaolo Pisati it->it_event = ie; 575bafe5a31SPaolo Pisati ih->ih_thread = it; 576bafe5a31SPaolo Pisati ithread_update(it); // XXX - do we really need this?!?!? 577bafe5a31SPaolo Pisati } else { /* Create the global per-event thread if we need one. */ 578bafe5a31SPaolo Pisati while (ie->ie_thread == NULL && handler != NULL) { 579bafe5a31SPaolo Pisati if (ie->ie_flags & IE_ADDING_THREAD) 580bafe5a31SPaolo Pisati msleep(ie, &ie->ie_lock, 0, "ithread", 0); 581bafe5a31SPaolo Pisati else { 582bafe5a31SPaolo Pisati ie->ie_flags |= IE_ADDING_THREAD; 583bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 584bafe5a31SPaolo Pisati it = ithread_create("intr: newborn", ih); 585bafe5a31SPaolo Pisati mtx_lock(&ie->ie_lock); 586bafe5a31SPaolo Pisati ie->ie_flags &= ~IE_ADDING_THREAD; 587bafe5a31SPaolo Pisati ie->ie_thread = it; 588bafe5a31SPaolo Pisati it->it_event = ie; 589bafe5a31SPaolo Pisati ithread_update(it); 590bafe5a31SPaolo Pisati wakeup(ie); 591bafe5a31SPaolo Pisati } 592bafe5a31SPaolo Pisati } 593bafe5a31SPaolo Pisati } 594bafe5a31SPaolo Pisati CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 595bafe5a31SPaolo Pisati ie->ie_name); 596bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 597bafe5a31SPaolo Pisati 598bafe5a31SPaolo Pisati if (cookiep != NULL) 599bafe5a31SPaolo Pisati *cookiep = ih; 600bafe5a31SPaolo Pisati return (0); 601bafe5a31SPaolo Pisati } 602bafe5a31SPaolo Pisati #endif 603b4151f71SJohn Baldwin 604c3045318SJohn Baldwin /* 605c3045318SJohn Baldwin * Return the ie_source field from the intr_event an intr_handler is 606c3045318SJohn Baldwin * associated with. 607c3045318SJohn Baldwin */ 608c3045318SJohn Baldwin void * 609c3045318SJohn Baldwin intr_handler_source(void *cookie) 610c3045318SJohn Baldwin { 611c3045318SJohn Baldwin struct intr_handler *ih; 612c3045318SJohn Baldwin struct intr_event *ie; 613c3045318SJohn Baldwin 614c3045318SJohn Baldwin ih = (struct intr_handler *)cookie; 615c3045318SJohn Baldwin if (ih == NULL) 616c3045318SJohn Baldwin return (NULL); 617c3045318SJohn Baldwin ie = ih->ih_event; 618c3045318SJohn Baldwin KASSERT(ie != NULL, 619c3045318SJohn Baldwin ("interrupt handler \"%s\" has a NULL interrupt event", 620c3045318SJohn Baldwin ih->ih_name)); 621c3045318SJohn Baldwin return (ie->ie_source); 622c3045318SJohn Baldwin } 623c3045318SJohn Baldwin 624bafe5a31SPaolo Pisati #ifndef INTR_FILTER 625b4151f71SJohn Baldwin int 626e0f66ef8SJohn Baldwin intr_event_remove_handler(void *cookie) 627b4151f71SJohn Baldwin { 628e0f66ef8SJohn Baldwin struct intr_handler *handler = (struct intr_handler *)cookie; 629e0f66ef8SJohn Baldwin struct intr_event *ie; 630b4151f71SJohn Baldwin #ifdef INVARIANTS 631e0f66ef8SJohn Baldwin struct intr_handler *ih; 632e0f66ef8SJohn Baldwin #endif 633e0f66ef8SJohn Baldwin #ifdef notyet 634e0f66ef8SJohn Baldwin int dead; 635b4151f71SJohn Baldwin #endif 636b4151f71SJohn Baldwin 6373e5da754SJohn Baldwin if (handler == NULL) 638b4151f71SJohn Baldwin return (EINVAL); 639e0f66ef8SJohn Baldwin ie = handler->ih_event; 640e0f66ef8SJohn Baldwin KASSERT(ie != NULL, 641e0f66ef8SJohn Baldwin ("interrupt handler \"%s\" has a NULL interrupt event", 6423e5da754SJohn Baldwin handler->ih_name)); 643e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 64491f91617SDavid E. O'Brien CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 645e0f66ef8SJohn Baldwin ie->ie_name); 646b4151f71SJohn Baldwin #ifdef INVARIANTS 647e0f66ef8SJohn Baldwin TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 6483e5da754SJohn Baldwin if (ih == handler) 6493e5da754SJohn Baldwin goto ok; 650e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 651e0f66ef8SJohn Baldwin panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 652e0f66ef8SJohn Baldwin ih->ih_name, ie->ie_name); 6533e5da754SJohn Baldwin ok: 654b4151f71SJohn Baldwin #endif 655de271f01SJohn Baldwin /* 656e0f66ef8SJohn Baldwin * If there is no ithread, then just remove the handler and return. 657e0f66ef8SJohn Baldwin * XXX: Note that an INTR_FAST handler might be running on another 658e0f66ef8SJohn Baldwin * CPU! 659e0f66ef8SJohn Baldwin */ 660e0f66ef8SJohn Baldwin if (ie->ie_thread == NULL) { 661e0f66ef8SJohn Baldwin TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 662e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 663e0f66ef8SJohn Baldwin free(handler, M_ITHREAD); 664e0f66ef8SJohn Baldwin return (0); 665e0f66ef8SJohn Baldwin } 666e0f66ef8SJohn Baldwin 667e0f66ef8SJohn Baldwin /* 668de271f01SJohn Baldwin * If the interrupt thread is already running, then just mark this 669de271f01SJohn Baldwin * handler as being dead and let the ithread do the actual removal. 670288e351bSDon Lewis * 671288e351bSDon Lewis * During a cold boot while cold is set, msleep() does not sleep, 672288e351bSDon Lewis * so we have to remove the handler here rather than letting the 673288e351bSDon Lewis * thread do it. 674de271f01SJohn Baldwin */ 675982d11f8SJeff Roberson thread_lock(ie->ie_thread->it_thread); 676e0f66ef8SJohn Baldwin if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) { 677de271f01SJohn Baldwin handler->ih_flags |= IH_DEAD; 678de271f01SJohn Baldwin 679de271f01SJohn Baldwin /* 680de271f01SJohn Baldwin * Ensure that the thread will process the handler list 681de271f01SJohn Baldwin * again and remove this handler if it has already passed 682de271f01SJohn Baldwin * it on the list. 683de271f01SJohn Baldwin */ 684e0f66ef8SJohn Baldwin ie->ie_thread->it_need = 1; 6854d29cb2dSJohn Baldwin } else 686e0f66ef8SJohn Baldwin TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 687982d11f8SJeff Roberson thread_unlock(ie->ie_thread->it_thread); 688e0f66ef8SJohn Baldwin while (handler->ih_flags & IH_DEAD) 6890f180a7cSJohn Baldwin msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 690e0f66ef8SJohn Baldwin intr_event_update(ie); 691e0f66ef8SJohn Baldwin #ifdef notyet 692e0f66ef8SJohn Baldwin /* 693e0f66ef8SJohn Baldwin * XXX: This could be bad in the case of ppbus(8). Also, I think 694e0f66ef8SJohn Baldwin * this could lead to races of stale data when servicing an 695e0f66ef8SJohn Baldwin * interrupt. 696e0f66ef8SJohn Baldwin */ 697e0f66ef8SJohn Baldwin dead = 1; 698e0f66ef8SJohn Baldwin TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 699e0f66ef8SJohn Baldwin if (!(ih->ih_flags & IH_FAST)) { 700e0f66ef8SJohn Baldwin dead = 0; 701e0f66ef8SJohn Baldwin break; 702e0f66ef8SJohn Baldwin } 703e0f66ef8SJohn Baldwin } 704e0f66ef8SJohn Baldwin if (dead) { 705e0f66ef8SJohn Baldwin ithread_destroy(ie->ie_thread); 706e0f66ef8SJohn Baldwin ie->ie_thread = NULL; 707e0f66ef8SJohn Baldwin } 708e0f66ef8SJohn Baldwin #endif 709e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 710b4151f71SJohn Baldwin free(handler, M_ITHREAD); 711b4151f71SJohn Baldwin return (0); 712b4151f71SJohn Baldwin } 713b4151f71SJohn Baldwin 714b4151f71SJohn Baldwin int 715e0f66ef8SJohn Baldwin intr_event_schedule_thread(struct intr_event *ie) 7163e5da754SJohn Baldwin { 717e0f66ef8SJohn Baldwin struct intr_entropy entropy; 718e0f66ef8SJohn Baldwin struct intr_thread *it; 719b40ce416SJulian Elischer struct thread *td; 72004774f23SJulian Elischer struct thread *ctd; 7213e5da754SJohn Baldwin struct proc *p; 7223e5da754SJohn Baldwin 7233e5da754SJohn Baldwin /* 7243e5da754SJohn Baldwin * If no ithread or no handlers, then we have a stray interrupt. 7253e5da754SJohn Baldwin */ 726e0f66ef8SJohn Baldwin if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || 727e0f66ef8SJohn Baldwin ie->ie_thread == NULL) 7283e5da754SJohn Baldwin return (EINVAL); 7293e5da754SJohn Baldwin 73004774f23SJulian Elischer ctd = curthread; 731e0f66ef8SJohn Baldwin it = ie->ie_thread; 732e0f66ef8SJohn Baldwin td = it->it_thread; 7336f40c417SRobert Watson p = td->td_proc; 734e0f66ef8SJohn Baldwin 7353e5da754SJohn Baldwin /* 7363e5da754SJohn Baldwin * If any of the handlers for this ithread claim to be good 7373e5da754SJohn Baldwin * sources of entropy, then gather some. 7383e5da754SJohn Baldwin */ 739e0f66ef8SJohn Baldwin if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 7406f40c417SRobert Watson CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 7417ab24ea3SJulian Elischer p->p_pid, td->td_name); 742e0f66ef8SJohn Baldwin entropy.event = (uintptr_t)ie; 743e0f66ef8SJohn Baldwin entropy.td = ctd; 7443e5da754SJohn Baldwin random_harvest(&entropy, sizeof(entropy), 2, 0, 7453e5da754SJohn Baldwin RANDOM_INTERRUPT); 7463e5da754SJohn Baldwin } 7473e5da754SJohn Baldwin 748e0f66ef8SJohn Baldwin KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 7493e5da754SJohn Baldwin 7503e5da754SJohn Baldwin /* 7513e5da754SJohn Baldwin * Set it_need to tell the thread to keep running if it is already 752982d11f8SJeff Roberson * running. Then, lock the thread and see if we actually need to 753982d11f8SJeff Roberson * put it on the runqueue. 7543e5da754SJohn Baldwin */ 755e0f66ef8SJohn Baldwin it->it_need = 1; 756982d11f8SJeff Roberson thread_lock(td); 75771fad9fdSJulian Elischer if (TD_AWAITING_INTR(td)) { 758e0f66ef8SJohn Baldwin CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 7597ab24ea3SJulian Elischer td->td_name); 76071fad9fdSJulian Elischer TD_CLR_IWAIT(td); 761f0393f06SJeff Roberson sched_add(td, SRQ_INTR); 7623e5da754SJohn Baldwin } else { 763e0f66ef8SJohn Baldwin CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 7647ab24ea3SJulian Elischer __func__, p->p_pid, td->td_name, it->it_need, td->td_state); 7653e5da754SJohn Baldwin } 766982d11f8SJeff Roberson thread_unlock(td); 7673e5da754SJohn Baldwin 7683e5da754SJohn Baldwin return (0); 7693e5da754SJohn Baldwin } 770bafe5a31SPaolo Pisati #else 771bafe5a31SPaolo Pisati int 772bafe5a31SPaolo Pisati intr_event_remove_handler(void *cookie) 773bafe5a31SPaolo Pisati { 774bafe5a31SPaolo Pisati struct intr_handler *handler = (struct intr_handler *)cookie; 775bafe5a31SPaolo Pisati struct intr_event *ie; 776bafe5a31SPaolo Pisati struct intr_thread *it; 777bafe5a31SPaolo Pisati #ifdef INVARIANTS 778bafe5a31SPaolo Pisati struct intr_handler *ih; 779bafe5a31SPaolo Pisati #endif 780bafe5a31SPaolo Pisati #ifdef notyet 781bafe5a31SPaolo Pisati int dead; 782bafe5a31SPaolo Pisati #endif 783bafe5a31SPaolo Pisati 784bafe5a31SPaolo Pisati if (handler == NULL) 785bafe5a31SPaolo Pisati return (EINVAL); 786bafe5a31SPaolo Pisati ie = handler->ih_event; 787bafe5a31SPaolo Pisati KASSERT(ie != NULL, 788bafe5a31SPaolo Pisati ("interrupt handler \"%s\" has a NULL interrupt event", 789bafe5a31SPaolo Pisati handler->ih_name)); 790bafe5a31SPaolo Pisati mtx_lock(&ie->ie_lock); 791bafe5a31SPaolo Pisati CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 792bafe5a31SPaolo Pisati ie->ie_name); 793bafe5a31SPaolo Pisati #ifdef INVARIANTS 794bafe5a31SPaolo Pisati TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 795bafe5a31SPaolo Pisati if (ih == handler) 796bafe5a31SPaolo Pisati goto ok; 797bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 798bafe5a31SPaolo Pisati panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 799bafe5a31SPaolo Pisati ih->ih_name, ie->ie_name); 800bafe5a31SPaolo Pisati ok: 801bafe5a31SPaolo Pisati #endif 802bafe5a31SPaolo Pisati /* 803bafe5a31SPaolo Pisati * If there are no ithreads (per event and per handler), then 804bafe5a31SPaolo Pisati * just remove the handler and return. 805bafe5a31SPaolo Pisati * XXX: Note that an INTR_FAST handler might be running on another CPU! 806bafe5a31SPaolo Pisati */ 807bafe5a31SPaolo Pisati if (ie->ie_thread == NULL && handler->ih_thread == NULL) { 808bafe5a31SPaolo Pisati TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 809bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 810bafe5a31SPaolo Pisati free(handler, M_ITHREAD); 811bafe5a31SPaolo Pisati return (0); 812bafe5a31SPaolo Pisati } 813bafe5a31SPaolo Pisati 814bafe5a31SPaolo Pisati /* Private or global ithread? */ 815bafe5a31SPaolo Pisati it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread; 816bafe5a31SPaolo Pisati /* 817bafe5a31SPaolo Pisati * If the interrupt thread is already running, then just mark this 818bafe5a31SPaolo Pisati * handler as being dead and let the ithread do the actual removal. 819bafe5a31SPaolo Pisati * 820bafe5a31SPaolo Pisati * During a cold boot while cold is set, msleep() does not sleep, 821bafe5a31SPaolo Pisati * so we have to remove the handler here rather than letting the 822bafe5a31SPaolo Pisati * thread do it. 823bafe5a31SPaolo Pisati */ 824982d11f8SJeff Roberson thread_lock(it->it_thread); 825bafe5a31SPaolo Pisati if (!TD_AWAITING_INTR(it->it_thread) && !cold) { 826bafe5a31SPaolo Pisati handler->ih_flags |= IH_DEAD; 827bafe5a31SPaolo Pisati 828bafe5a31SPaolo Pisati /* 829bafe5a31SPaolo Pisati * Ensure that the thread will process the handler list 830bafe5a31SPaolo Pisati * again and remove this handler if it has already passed 831bafe5a31SPaolo Pisati * it on the list. 832bafe5a31SPaolo Pisati */ 833bafe5a31SPaolo Pisati it->it_need = 1; 834bafe5a31SPaolo Pisati } else 835bafe5a31SPaolo Pisati TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 836982d11f8SJeff Roberson thread_unlock(it->it_thread); 837bafe5a31SPaolo Pisati while (handler->ih_flags & IH_DEAD) 838bafe5a31SPaolo Pisati msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 839bafe5a31SPaolo Pisati /* 840bafe5a31SPaolo Pisati * At this point, the handler has been disconnected from the event, 841bafe5a31SPaolo Pisati * so we can kill the private ithread if any. 842bafe5a31SPaolo Pisati */ 843bafe5a31SPaolo Pisati if (handler->ih_thread) { 844bafe5a31SPaolo Pisati ithread_destroy(handler->ih_thread); 845bafe5a31SPaolo Pisati handler->ih_thread = NULL; 846bafe5a31SPaolo Pisati } 847bafe5a31SPaolo Pisati intr_event_update(ie); 848bafe5a31SPaolo Pisati #ifdef notyet 849bafe5a31SPaolo Pisati /* 850bafe5a31SPaolo Pisati * XXX: This could be bad in the case of ppbus(8). Also, I think 851bafe5a31SPaolo Pisati * this could lead to races of stale data when servicing an 852bafe5a31SPaolo Pisati * interrupt. 853bafe5a31SPaolo Pisati */ 854bafe5a31SPaolo Pisati dead = 1; 855bafe5a31SPaolo Pisati TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 856bafe5a31SPaolo Pisati if (handler != NULL) { 857bafe5a31SPaolo Pisati dead = 0; 858bafe5a31SPaolo Pisati break; 859bafe5a31SPaolo Pisati } 860bafe5a31SPaolo Pisati } 861bafe5a31SPaolo Pisati if (dead) { 862bafe5a31SPaolo Pisati ithread_destroy(ie->ie_thread); 863bafe5a31SPaolo Pisati ie->ie_thread = NULL; 864bafe5a31SPaolo Pisati } 865bafe5a31SPaolo Pisati #endif 866bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 867bafe5a31SPaolo Pisati free(handler, M_ITHREAD); 868bafe5a31SPaolo Pisati return (0); 869bafe5a31SPaolo Pisati } 870bafe5a31SPaolo Pisati 871bafe5a31SPaolo Pisati int 872bafe5a31SPaolo Pisati intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it) 873bafe5a31SPaolo Pisati { 874bafe5a31SPaolo Pisati struct intr_entropy entropy; 875bafe5a31SPaolo Pisati struct thread *td; 876bafe5a31SPaolo Pisati struct thread *ctd; 877bafe5a31SPaolo Pisati struct proc *p; 878bafe5a31SPaolo Pisati 879bafe5a31SPaolo Pisati /* 880bafe5a31SPaolo Pisati * If no ithread or no handlers, then we have a stray interrupt. 881bafe5a31SPaolo Pisati */ 882bafe5a31SPaolo Pisati if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL) 883bafe5a31SPaolo Pisati return (EINVAL); 884bafe5a31SPaolo Pisati 885bafe5a31SPaolo Pisati ctd = curthread; 886bafe5a31SPaolo Pisati td = it->it_thread; 887bafe5a31SPaolo Pisati p = td->td_proc; 888bafe5a31SPaolo Pisati 889bafe5a31SPaolo Pisati /* 890bafe5a31SPaolo Pisati * If any of the handlers for this ithread claim to be good 891bafe5a31SPaolo Pisati * sources of entropy, then gather some. 892bafe5a31SPaolo Pisati */ 893bafe5a31SPaolo Pisati if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 894bafe5a31SPaolo Pisati CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 8957ab24ea3SJulian Elischer p->p_pid, td->td_name); 896bafe5a31SPaolo Pisati entropy.event = (uintptr_t)ie; 897bafe5a31SPaolo Pisati entropy.td = ctd; 898bafe5a31SPaolo Pisati random_harvest(&entropy, sizeof(entropy), 2, 0, 899bafe5a31SPaolo Pisati RANDOM_INTERRUPT); 900bafe5a31SPaolo Pisati } 901bafe5a31SPaolo Pisati 902bafe5a31SPaolo Pisati KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 903bafe5a31SPaolo Pisati 904bafe5a31SPaolo Pisati /* 905bafe5a31SPaolo Pisati * Set it_need to tell the thread to keep running if it is already 906982d11f8SJeff Roberson * running. Then, lock the thread and see if we actually need to 907982d11f8SJeff Roberson * put it on the runqueue. 908bafe5a31SPaolo Pisati */ 909bafe5a31SPaolo Pisati it->it_need = 1; 910982d11f8SJeff Roberson thread_lock(td); 911bafe5a31SPaolo Pisati if (TD_AWAITING_INTR(td)) { 912bafe5a31SPaolo Pisati CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 9133c1ffc32SJulian Elischer td->td_name); 914bafe5a31SPaolo Pisati TD_CLR_IWAIT(td); 915bafe5a31SPaolo Pisati sched_add(td, SRQ_INTR); 916bafe5a31SPaolo Pisati } else { 917bafe5a31SPaolo Pisati CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 9187ab24ea3SJulian Elischer __func__, p->p_pid, td->td_name, it->it_need, td->td_state); 919bafe5a31SPaolo Pisati } 920982d11f8SJeff Roberson thread_unlock(td); 921bafe5a31SPaolo Pisati 922bafe5a31SPaolo Pisati return (0); 923bafe5a31SPaolo Pisati } 924bafe5a31SPaolo Pisati #endif 9253e5da754SJohn Baldwin 926fe486a37SJohn Baldwin /* 927fe486a37SJohn Baldwin * Add a software interrupt handler to a specified event. If a given event 928fe486a37SJohn Baldwin * is not specified, then a new event is created. 929fe486a37SJohn Baldwin */ 9303e5da754SJohn Baldwin int 931e0f66ef8SJohn Baldwin swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, 932b4151f71SJohn Baldwin void *arg, int pri, enum intr_type flags, void **cookiep) 9338088699fSJohn Baldwin { 934e0f66ef8SJohn Baldwin struct intr_event *ie; 935b4151f71SJohn Baldwin int error; 9368088699fSJohn Baldwin 937bafe5a31SPaolo Pisati if (flags & INTR_ENTROPY) 9383e5da754SJohn Baldwin return (EINVAL); 9393e5da754SJohn Baldwin 940e0f66ef8SJohn Baldwin ie = (eventp != NULL) ? *eventp : NULL; 9418088699fSJohn Baldwin 942e0f66ef8SJohn Baldwin if (ie != NULL) { 943e0f66ef8SJohn Baldwin if (!(ie->ie_flags & IE_SOFT)) 9443e5da754SJohn Baldwin return (EINVAL); 9453e5da754SJohn Baldwin } else { 946bafe5a31SPaolo Pisati #ifdef INTR_FILTER 947bafe5a31SPaolo Pisati error = intr_event_create(&ie, NULL, IE_SOFT, 948eaf86d16SJohn Baldwin NULL, NULL, NULL, NULL, "swi%d:", pri); 949bafe5a31SPaolo Pisati #else 950bafe5a31SPaolo Pisati error = intr_event_create(&ie, NULL, IE_SOFT, 951eaf86d16SJohn Baldwin NULL, NULL, "swi%d:", pri); 952bafe5a31SPaolo Pisati #endif 9538088699fSJohn Baldwin if (error) 954b4151f71SJohn Baldwin return (error); 955e0f66ef8SJohn Baldwin if (eventp != NULL) 956e0f66ef8SJohn Baldwin *eventp = ie; 9578088699fSJohn Baldwin } 958ef544f63SPaolo Pisati return (intr_event_add_handler(ie, name, NULL, handler, arg, 959d5a08a60SJake Burkholder (pri * RQ_PPQ) + PI_SOFT, flags, cookiep)); 9608088699fSJohn Baldwin } 9618088699fSJohn Baldwin 9621931cf94SJohn Baldwin /* 963e0f66ef8SJohn Baldwin * Schedule a software interrupt thread. 9641931cf94SJohn Baldwin */ 9651931cf94SJohn Baldwin void 966b4151f71SJohn Baldwin swi_sched(void *cookie, int flags) 9671931cf94SJohn Baldwin { 968e0f66ef8SJohn Baldwin struct intr_handler *ih = (struct intr_handler *)cookie; 969e0f66ef8SJohn Baldwin struct intr_event *ie = ih->ih_event; 9703e5da754SJohn Baldwin int error; 9718088699fSJohn Baldwin 972e0f66ef8SJohn Baldwin CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, 973e0f66ef8SJohn Baldwin ih->ih_need); 9741931cf94SJohn Baldwin 9751931cf94SJohn Baldwin /* 9763e5da754SJohn Baldwin * Set ih_need for this handler so that if the ithread is already 9773e5da754SJohn Baldwin * running it will execute this handler on the next pass. Otherwise, 9783e5da754SJohn Baldwin * it will execute it the next time it runs. 9791931cf94SJohn Baldwin */ 980b4151f71SJohn Baldwin atomic_store_rel_int(&ih->ih_need, 1); 9811ca2c018SBruce Evans 982b4151f71SJohn Baldwin if (!(flags & SWI_DELAY)) { 98367596082SAttilio Rao PCPU_INC(cnt.v_soft); 984bafe5a31SPaolo Pisati #ifdef INTR_FILTER 985bafe5a31SPaolo Pisati error = intr_event_schedule_thread(ie, ie->ie_thread); 986bafe5a31SPaolo Pisati #else 987e0f66ef8SJohn Baldwin error = intr_event_schedule_thread(ie); 988bafe5a31SPaolo Pisati #endif 9893e5da754SJohn Baldwin KASSERT(error == 0, ("stray software interrupt")); 9908088699fSJohn Baldwin } 9918088699fSJohn Baldwin } 9928088699fSJohn Baldwin 993fe486a37SJohn Baldwin /* 994fe486a37SJohn Baldwin * Remove a software interrupt handler. Currently this code does not 995fe486a37SJohn Baldwin * remove the associated interrupt event if it becomes empty. Calling code 996fe486a37SJohn Baldwin * may do so manually via intr_event_destroy(), but that's not really 997fe486a37SJohn Baldwin * an optimal interface. 998fe486a37SJohn Baldwin */ 999fe486a37SJohn Baldwin int 1000fe486a37SJohn Baldwin swi_remove(void *cookie) 1001fe486a37SJohn Baldwin { 1002fe486a37SJohn Baldwin 1003fe486a37SJohn Baldwin return (intr_event_remove_handler(cookie)); 1004fe486a37SJohn Baldwin } 1005fe486a37SJohn Baldwin 1006bafe5a31SPaolo Pisati #ifdef INTR_FILTER 1007bafe5a31SPaolo Pisati static void 1008bafe5a31SPaolo Pisati priv_ithread_execute_handler(struct proc *p, struct intr_handler *ih) 1009bafe5a31SPaolo Pisati { 1010bafe5a31SPaolo Pisati struct intr_event *ie; 1011bafe5a31SPaolo Pisati 1012bafe5a31SPaolo Pisati ie = ih->ih_event; 1013bafe5a31SPaolo Pisati /* 1014bafe5a31SPaolo Pisati * If this handler is marked for death, remove it from 1015bafe5a31SPaolo Pisati * the list of handlers and wake up the sleeper. 1016bafe5a31SPaolo Pisati */ 1017bafe5a31SPaolo Pisati if (ih->ih_flags & IH_DEAD) { 1018bafe5a31SPaolo Pisati mtx_lock(&ie->ie_lock); 1019bafe5a31SPaolo Pisati TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 1020bafe5a31SPaolo Pisati ih->ih_flags &= ~IH_DEAD; 1021bafe5a31SPaolo Pisati wakeup(ih); 1022bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 1023bafe5a31SPaolo Pisati return; 1024bafe5a31SPaolo Pisati } 1025bafe5a31SPaolo Pisati 1026bafe5a31SPaolo Pisati /* Execute this handler. */ 1027bafe5a31SPaolo Pisati CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1028bafe5a31SPaolo Pisati __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument, 1029bafe5a31SPaolo Pisati ih->ih_name, ih->ih_flags); 1030bafe5a31SPaolo Pisati 1031bafe5a31SPaolo Pisati if (!(ih->ih_flags & IH_MPSAFE)) 1032bafe5a31SPaolo Pisati mtx_lock(&Giant); 1033bafe5a31SPaolo Pisati ih->ih_handler(ih->ih_argument); 1034bafe5a31SPaolo Pisati if (!(ih->ih_flags & IH_MPSAFE)) 1035bafe5a31SPaolo Pisati mtx_unlock(&Giant); 1036bafe5a31SPaolo Pisati } 1037bafe5a31SPaolo Pisati #endif 1038bafe5a31SPaolo Pisati 1039e0f66ef8SJohn Baldwin static void 1040e0f66ef8SJohn Baldwin ithread_execute_handlers(struct proc *p, struct intr_event *ie) 1041e0f66ef8SJohn Baldwin { 1042e0f66ef8SJohn Baldwin struct intr_handler *ih, *ihn; 1043e0f66ef8SJohn Baldwin 1044e0f66ef8SJohn Baldwin /* Interrupt handlers should not sleep. */ 1045e0f66ef8SJohn Baldwin if (!(ie->ie_flags & IE_SOFT)) 1046e0f66ef8SJohn Baldwin THREAD_NO_SLEEPING(); 1047e0f66ef8SJohn Baldwin TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) { 1048e0f66ef8SJohn Baldwin 1049e0f66ef8SJohn Baldwin /* 1050e0f66ef8SJohn Baldwin * If this handler is marked for death, remove it from 1051e0f66ef8SJohn Baldwin * the list of handlers and wake up the sleeper. 1052e0f66ef8SJohn Baldwin */ 1053e0f66ef8SJohn Baldwin if (ih->ih_flags & IH_DEAD) { 1054e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 1055e0f66ef8SJohn Baldwin TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 1056e0f66ef8SJohn Baldwin ih->ih_flags &= ~IH_DEAD; 1057e0f66ef8SJohn Baldwin wakeup(ih); 1058e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 1059e0f66ef8SJohn Baldwin continue; 1060e0f66ef8SJohn Baldwin } 1061e0f66ef8SJohn Baldwin 1062f2d619c8SPaolo Pisati /* Skip filter only handlers */ 1063f2d619c8SPaolo Pisati if (ih->ih_handler == NULL) 1064f2d619c8SPaolo Pisati continue; 1065f2d619c8SPaolo Pisati 1066e0f66ef8SJohn Baldwin /* 1067e0f66ef8SJohn Baldwin * For software interrupt threads, we only execute 1068e0f66ef8SJohn Baldwin * handlers that have their need flag set. Hardware 1069e0f66ef8SJohn Baldwin * interrupt threads always invoke all of their handlers. 1070e0f66ef8SJohn Baldwin */ 1071e0f66ef8SJohn Baldwin if (ie->ie_flags & IE_SOFT) { 1072e0f66ef8SJohn Baldwin if (!ih->ih_need) 1073e0f66ef8SJohn Baldwin continue; 1074e0f66ef8SJohn Baldwin else 1075e0f66ef8SJohn Baldwin atomic_store_rel_int(&ih->ih_need, 0); 1076e0f66ef8SJohn Baldwin } 1077e0f66ef8SJohn Baldwin 1078e0f66ef8SJohn Baldwin /* Execute this handler. */ 1079e0f66ef8SJohn Baldwin CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1080bafe5a31SPaolo Pisati __func__, p->p_pid, (void *)ih->ih_handler, 1081bafe5a31SPaolo Pisati ih->ih_argument, ih->ih_name, ih->ih_flags); 1082e0f66ef8SJohn Baldwin 1083e0f66ef8SJohn Baldwin if (!(ih->ih_flags & IH_MPSAFE)) 1084e0f66ef8SJohn Baldwin mtx_lock(&Giant); 1085e0f66ef8SJohn Baldwin ih->ih_handler(ih->ih_argument); 1086e0f66ef8SJohn Baldwin if (!(ih->ih_flags & IH_MPSAFE)) 1087e0f66ef8SJohn Baldwin mtx_unlock(&Giant); 1088e0f66ef8SJohn Baldwin } 1089e0f66ef8SJohn Baldwin if (!(ie->ie_flags & IE_SOFT)) 1090e0f66ef8SJohn Baldwin THREAD_SLEEPING_OK(); 1091e0f66ef8SJohn Baldwin 1092e0f66ef8SJohn Baldwin /* 1093e0f66ef8SJohn Baldwin * Interrupt storm handling: 1094e0f66ef8SJohn Baldwin * 1095e0f66ef8SJohn Baldwin * If this interrupt source is currently storming, then throttle 1096e0f66ef8SJohn Baldwin * it to only fire the handler once per clock tick. 1097e0f66ef8SJohn Baldwin * 1098e0f66ef8SJohn Baldwin * If this interrupt source is not currently storming, but the 1099e0f66ef8SJohn Baldwin * number of back to back interrupts exceeds the storm threshold, 1100e0f66ef8SJohn Baldwin * then enter storming mode. 1101e0f66ef8SJohn Baldwin */ 1102e41bcf3cSJohn Baldwin if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold && 1103e41bcf3cSJohn Baldwin !(ie->ie_flags & IE_SOFT)) { 11040ae62c18SNate Lawson /* Report the message only once every second. */ 11050ae62c18SNate Lawson if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) { 1106e0f66ef8SJohn Baldwin printf( 11070ae62c18SNate Lawson "interrupt storm detected on \"%s\"; throttling interrupt source\n", 1108e0f66ef8SJohn Baldwin ie->ie_name); 1109e0f66ef8SJohn Baldwin } 1110e41bcf3cSJohn Baldwin pause("istorm", 1); 1111e0f66ef8SJohn Baldwin } else 1112e0f66ef8SJohn Baldwin ie->ie_count++; 1113e0f66ef8SJohn Baldwin 1114e0f66ef8SJohn Baldwin /* 1115e0f66ef8SJohn Baldwin * Now that all the handlers have had a chance to run, reenable 1116e0f66ef8SJohn Baldwin * the interrupt source. 1117e0f66ef8SJohn Baldwin */ 1118e0f66ef8SJohn Baldwin if (ie->ie_enable != NULL) 1119e0f66ef8SJohn Baldwin ie->ie_enable(ie->ie_source); 1120e0f66ef8SJohn Baldwin } 1121e0f66ef8SJohn Baldwin 1122bafe5a31SPaolo Pisati #ifndef INTR_FILTER 11238088699fSJohn Baldwin /* 1124b4151f71SJohn Baldwin * This is the main code for interrupt threads. 11258088699fSJohn Baldwin */ 112637c84183SPoul-Henning Kamp static void 1127b4151f71SJohn Baldwin ithread_loop(void *arg) 11288088699fSJohn Baldwin { 1129e0f66ef8SJohn Baldwin struct intr_thread *ithd; 1130e0f66ef8SJohn Baldwin struct intr_event *ie; 1131b40ce416SJulian Elischer struct thread *td; 1132b4151f71SJohn Baldwin struct proc *p; 1133eaf86d16SJohn Baldwin u_char cpu; 11348088699fSJohn Baldwin 1135b40ce416SJulian Elischer td = curthread; 1136b40ce416SJulian Elischer p = td->td_proc; 1137e0f66ef8SJohn Baldwin ithd = (struct intr_thread *)arg; 1138e0f66ef8SJohn Baldwin KASSERT(ithd->it_thread == td, 113991f91617SDavid E. O'Brien ("%s: ithread and proc linkage out of sync", __func__)); 1140e0f66ef8SJohn Baldwin ie = ithd->it_event; 1141e0f66ef8SJohn Baldwin ie->ie_count = 0; 1142eaf86d16SJohn Baldwin cpu = NOCPU; 11438088699fSJohn Baldwin 11448088699fSJohn Baldwin /* 11458088699fSJohn Baldwin * As long as we have interrupts outstanding, go through the 11468088699fSJohn Baldwin * list of handlers, giving each one a go at it. 11478088699fSJohn Baldwin */ 11488088699fSJohn Baldwin for (;;) { 1149b4151f71SJohn Baldwin /* 1150b4151f71SJohn Baldwin * If we are an orphaned thread, then just die. 1151b4151f71SJohn Baldwin */ 1152b4151f71SJohn Baldwin if (ithd->it_flags & IT_DEAD) { 1153e0f66ef8SJohn Baldwin CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 11547ab24ea3SJulian Elischer p->p_pid, td->td_name); 1155b4151f71SJohn Baldwin free(ithd, M_ITHREAD); 1156ca9a0ddfSJulian Elischer kthread_exit(); 1157b4151f71SJohn Baldwin } 1158b4151f71SJohn Baldwin 1159e0f66ef8SJohn Baldwin /* 1160e0f66ef8SJohn Baldwin * Service interrupts. If another interrupt arrives while 1161e0f66ef8SJohn Baldwin * we are running, it will set it_need to note that we 1162e0f66ef8SJohn Baldwin * should make another pass. 1163e0f66ef8SJohn Baldwin */ 1164b4151f71SJohn Baldwin while (ithd->it_need) { 11658088699fSJohn Baldwin /* 1166e0f66ef8SJohn Baldwin * This might need a full read and write barrier 1167e0f66ef8SJohn Baldwin * to make sure that this write posts before any 1168e0f66ef8SJohn Baldwin * of the memory or device accesses in the 1169e0f66ef8SJohn Baldwin * handlers. 11708088699fSJohn Baldwin */ 1171b4151f71SJohn Baldwin atomic_store_rel_int(&ithd->it_need, 0); 1172e0f66ef8SJohn Baldwin ithread_execute_handlers(p, ie); 11738088699fSJohn Baldwin } 11747870c3c6SJohn Baldwin WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 11757870c3c6SJohn Baldwin mtx_assert(&Giant, MA_NOTOWNED); 11768088699fSJohn Baldwin 11778088699fSJohn Baldwin /* 11788088699fSJohn Baldwin * Processed all our interrupts. Now get the sched 11798088699fSJohn Baldwin * lock. This may take a while and it_need may get 11808088699fSJohn Baldwin * set again, so we have to check it again. 11818088699fSJohn Baldwin */ 1182982d11f8SJeff Roberson thread_lock(td); 1183e0f66ef8SJohn Baldwin if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { 11847870c3c6SJohn Baldwin TD_SET_IWAIT(td); 1185e0f66ef8SJohn Baldwin ie->ie_count = 0; 1186bf0acc27SJohn Baldwin mi_switch(SW_VOL, NULL); 11878088699fSJohn Baldwin } 1188eaf86d16SJohn Baldwin 1189eaf86d16SJohn Baldwin #ifdef SMP 1190eaf86d16SJohn Baldwin /* 1191eaf86d16SJohn Baldwin * Ensure we are bound to the correct CPU. We can't 1192eaf86d16SJohn Baldwin * move ithreads until SMP is running however, so just 1193eaf86d16SJohn Baldwin * leave interrupts on the boor CPU during boot. 1194eaf86d16SJohn Baldwin */ 1195eaf86d16SJohn Baldwin if (ie->ie_cpu != cpu && smp_started) { 1196eaf86d16SJohn Baldwin cpu = ie->ie_cpu; 1197eaf86d16SJohn Baldwin if (cpu == NOCPU) 1198eaf86d16SJohn Baldwin sched_unbind(td); 1199eaf86d16SJohn Baldwin else 1200eaf86d16SJohn Baldwin sched_bind(td, cpu); 1201eaf86d16SJohn Baldwin } 1202eaf86d16SJohn Baldwin #endif 1203982d11f8SJeff Roberson thread_unlock(td); 12048088699fSJohn Baldwin } 12051931cf94SJohn Baldwin } 1206bafe5a31SPaolo Pisati #else 1207bafe5a31SPaolo Pisati /* 1208bafe5a31SPaolo Pisati * This is the main code for interrupt threads. 1209bafe5a31SPaolo Pisati */ 1210bafe5a31SPaolo Pisati static void 1211bafe5a31SPaolo Pisati ithread_loop(void *arg) 1212bafe5a31SPaolo Pisati { 1213bafe5a31SPaolo Pisati struct intr_thread *ithd; 1214bafe5a31SPaolo Pisati struct intr_handler *ih; 1215bafe5a31SPaolo Pisati struct intr_event *ie; 1216bafe5a31SPaolo Pisati struct thread *td; 1217bafe5a31SPaolo Pisati struct proc *p; 1218bafe5a31SPaolo Pisati int priv; 1219eaf86d16SJohn Baldwin u_char cpu; 1220bafe5a31SPaolo Pisati 1221bafe5a31SPaolo Pisati td = curthread; 1222bafe5a31SPaolo Pisati p = td->td_proc; 1223bafe5a31SPaolo Pisati ih = (struct intr_handler *)arg; 1224bafe5a31SPaolo Pisati priv = (ih->ih_thread != NULL) ? 1 : 0; 1225bafe5a31SPaolo Pisati ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread; 1226bafe5a31SPaolo Pisati KASSERT(ithd->it_thread == td, 1227bafe5a31SPaolo Pisati ("%s: ithread and proc linkage out of sync", __func__)); 1228bafe5a31SPaolo Pisati ie = ithd->it_event; 1229bafe5a31SPaolo Pisati ie->ie_count = 0; 1230eaf86d16SJohn Baldwin cpu = NOCPU; 1231bafe5a31SPaolo Pisati 1232bafe5a31SPaolo Pisati /* 1233bafe5a31SPaolo Pisati * As long as we have interrupts outstanding, go through the 1234bafe5a31SPaolo Pisati * list of handlers, giving each one a go at it. 1235bafe5a31SPaolo Pisati */ 1236bafe5a31SPaolo Pisati for (;;) { 1237bafe5a31SPaolo Pisati /* 1238bafe5a31SPaolo Pisati * If we are an orphaned thread, then just die. 1239bafe5a31SPaolo Pisati */ 1240bafe5a31SPaolo Pisati if (ithd->it_flags & IT_DEAD) { 1241bafe5a31SPaolo Pisati CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 12427ab24ea3SJulian Elischer p->p_pid, td->td_name); 1243bafe5a31SPaolo Pisati free(ithd, M_ITHREAD); 1244ca9a0ddfSJulian Elischer kthread_exit(); 1245bafe5a31SPaolo Pisati } 1246bafe5a31SPaolo Pisati 1247bafe5a31SPaolo Pisati /* 1248bafe5a31SPaolo Pisati * Service interrupts. If another interrupt arrives while 1249bafe5a31SPaolo Pisati * we are running, it will set it_need to note that we 1250bafe5a31SPaolo Pisati * should make another pass. 1251bafe5a31SPaolo Pisati */ 1252bafe5a31SPaolo Pisati while (ithd->it_need) { 1253bafe5a31SPaolo Pisati /* 1254bafe5a31SPaolo Pisati * This might need a full read and write barrier 1255bafe5a31SPaolo Pisati * to make sure that this write posts before any 1256bafe5a31SPaolo Pisati * of the memory or device accesses in the 1257bafe5a31SPaolo Pisati * handlers. 1258bafe5a31SPaolo Pisati */ 1259bafe5a31SPaolo Pisati atomic_store_rel_int(&ithd->it_need, 0); 1260bafe5a31SPaolo Pisati if (priv) 1261bafe5a31SPaolo Pisati priv_ithread_execute_handler(p, ih); 1262bafe5a31SPaolo Pisati else 1263bafe5a31SPaolo Pisati ithread_execute_handlers(p, ie); 1264bafe5a31SPaolo Pisati } 1265bafe5a31SPaolo Pisati WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1266bafe5a31SPaolo Pisati mtx_assert(&Giant, MA_NOTOWNED); 1267bafe5a31SPaolo Pisati 1268bafe5a31SPaolo Pisati /* 1269bafe5a31SPaolo Pisati * Processed all our interrupts. Now get the sched 1270bafe5a31SPaolo Pisati * lock. This may take a while and it_need may get 1271bafe5a31SPaolo Pisati * set again, so we have to check it again. 1272bafe5a31SPaolo Pisati */ 1273982d11f8SJeff Roberson thread_lock(td); 1274bafe5a31SPaolo Pisati if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { 1275bafe5a31SPaolo Pisati TD_SET_IWAIT(td); 1276bafe5a31SPaolo Pisati ie->ie_count = 0; 1277bafe5a31SPaolo Pisati mi_switch(SW_VOL, NULL); 1278bafe5a31SPaolo Pisati } 1279eaf86d16SJohn Baldwin 1280eaf86d16SJohn Baldwin #ifdef SMP 1281eaf86d16SJohn Baldwin /* 1282eaf86d16SJohn Baldwin * Ensure we are bound to the correct CPU. We can't 1283eaf86d16SJohn Baldwin * move ithreads until SMP is running however, so just 1284eaf86d16SJohn Baldwin * leave interrupts on the boor CPU during boot. 1285eaf86d16SJohn Baldwin */ 1286eaf86d16SJohn Baldwin if (!priv && ie->ie_cpu != cpu && smp_started) { 1287eaf86d16SJohn Baldwin cpu = ie->ie_cpu; 1288eaf86d16SJohn Baldwin if (cpu == NOCPU) 1289eaf86d16SJohn Baldwin sched_unbind(td); 1290eaf86d16SJohn Baldwin else 1291eaf86d16SJohn Baldwin sched_bind(td, cpu); 1292eaf86d16SJohn Baldwin } 1293eaf86d16SJohn Baldwin #endif 1294982d11f8SJeff Roberson thread_unlock(td); 1295bafe5a31SPaolo Pisati } 1296bafe5a31SPaolo Pisati } 1297bafe5a31SPaolo Pisati 1298bafe5a31SPaolo Pisati /* 1299bafe5a31SPaolo Pisati * Main loop for interrupt filter. 1300bafe5a31SPaolo Pisati * 1301bafe5a31SPaolo Pisati * Some architectures (i386, amd64 and arm) require the optional frame 1302bafe5a31SPaolo Pisati * parameter, and use it as the main argument for fast handler execution 1303bafe5a31SPaolo Pisati * when ih_argument == NULL. 1304bafe5a31SPaolo Pisati * 1305bafe5a31SPaolo Pisati * Return value: 1306bafe5a31SPaolo Pisati * o FILTER_STRAY: No filter recognized the event, and no 1307bafe5a31SPaolo Pisati * filter-less handler is registered on this 1308bafe5a31SPaolo Pisati * line. 1309bafe5a31SPaolo Pisati * o FILTER_HANDLED: A filter claimed the event and served it. 1310bafe5a31SPaolo Pisati * o FILTER_SCHEDULE_THREAD: No filter claimed the event, but there's at 1311bafe5a31SPaolo Pisati * least one filter-less handler on this line. 1312bafe5a31SPaolo Pisati * o FILTER_HANDLED | 1313bafe5a31SPaolo Pisati * FILTER_SCHEDULE_THREAD: A filter claimed the event, and asked for 1314bafe5a31SPaolo Pisati * scheduling the per-handler ithread. 1315bafe5a31SPaolo Pisati * 1316bafe5a31SPaolo Pisati * In case an ithread has to be scheduled, in *ithd there will be a 1317bafe5a31SPaolo Pisati * pointer to a struct intr_thread containing the thread to be 1318bafe5a31SPaolo Pisati * scheduled. 1319bafe5a31SPaolo Pisati */ 1320bafe5a31SPaolo Pisati 1321bafe5a31SPaolo Pisati int 1322bafe5a31SPaolo Pisati intr_filter_loop(struct intr_event *ie, struct trapframe *frame, 1323bafe5a31SPaolo Pisati struct intr_thread **ithd) 1324bafe5a31SPaolo Pisati { 1325bafe5a31SPaolo Pisati struct intr_handler *ih; 1326bafe5a31SPaolo Pisati void *arg; 1327bafe5a31SPaolo Pisati int ret, thread_only; 1328bafe5a31SPaolo Pisati 1329bafe5a31SPaolo Pisati ret = 0; 1330bafe5a31SPaolo Pisati thread_only = 0; 1331bafe5a31SPaolo Pisati TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1332bafe5a31SPaolo Pisati /* 1333bafe5a31SPaolo Pisati * Execute fast interrupt handlers directly. 1334bafe5a31SPaolo Pisati * To support clock handlers, if a handler registers 1335bafe5a31SPaolo Pisati * with a NULL argument, then we pass it a pointer to 1336bafe5a31SPaolo Pisati * a trapframe as its argument. 1337bafe5a31SPaolo Pisati */ 1338bafe5a31SPaolo Pisati arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument); 1339bafe5a31SPaolo Pisati 1340bafe5a31SPaolo Pisati CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__, 1341bafe5a31SPaolo Pisati ih->ih_filter, ih->ih_handler, arg, ih->ih_name); 1342bafe5a31SPaolo Pisati 1343bafe5a31SPaolo Pisati if (ih->ih_filter != NULL) 1344bafe5a31SPaolo Pisati ret = ih->ih_filter(arg); 1345bafe5a31SPaolo Pisati else { 1346bafe5a31SPaolo Pisati thread_only = 1; 1347bafe5a31SPaolo Pisati continue; 1348bafe5a31SPaolo Pisati } 1349bafe5a31SPaolo Pisati 1350bafe5a31SPaolo Pisati if (ret & FILTER_STRAY) 1351bafe5a31SPaolo Pisati continue; 1352bafe5a31SPaolo Pisati else { 1353bafe5a31SPaolo Pisati *ithd = ih->ih_thread; 1354bafe5a31SPaolo Pisati return (ret); 1355bafe5a31SPaolo Pisati } 1356bafe5a31SPaolo Pisati } 1357bafe5a31SPaolo Pisati 1358bafe5a31SPaolo Pisati /* 1359bafe5a31SPaolo Pisati * No filters handled the interrupt and we have at least 1360bafe5a31SPaolo Pisati * one handler without a filter. In this case, we schedule 1361bafe5a31SPaolo Pisati * all of the filter-less handlers to run in the ithread. 1362bafe5a31SPaolo Pisati */ 1363bafe5a31SPaolo Pisati if (thread_only) { 1364bafe5a31SPaolo Pisati *ithd = ie->ie_thread; 1365bafe5a31SPaolo Pisati return (FILTER_SCHEDULE_THREAD); 1366bafe5a31SPaolo Pisati } 1367bafe5a31SPaolo Pisati return (FILTER_STRAY); 1368bafe5a31SPaolo Pisati } 1369bafe5a31SPaolo Pisati 1370bafe5a31SPaolo Pisati /* 1371bafe5a31SPaolo Pisati * Main interrupt handling body. 1372bafe5a31SPaolo Pisati * 1373bafe5a31SPaolo Pisati * Input: 1374bafe5a31SPaolo Pisati * o ie: the event connected to this interrupt. 1375bafe5a31SPaolo Pisati * o frame: some archs (i.e. i386) pass a frame to some. 1376bafe5a31SPaolo Pisati * handlers as their main argument. 1377bafe5a31SPaolo Pisati * Return value: 1378bafe5a31SPaolo Pisati * o 0: everything ok. 1379bafe5a31SPaolo Pisati * o EINVAL: stray interrupt. 1380bafe5a31SPaolo Pisati */ 1381bafe5a31SPaolo Pisati int 1382bafe5a31SPaolo Pisati intr_event_handle(struct intr_event *ie, struct trapframe *frame) 1383bafe5a31SPaolo Pisati { 1384bafe5a31SPaolo Pisati struct intr_thread *ithd; 1385bafe5a31SPaolo Pisati struct thread *td; 1386bafe5a31SPaolo Pisati int thread; 1387bafe5a31SPaolo Pisati 1388bafe5a31SPaolo Pisati ithd = NULL; 1389bafe5a31SPaolo Pisati td = curthread; 1390bafe5a31SPaolo Pisati 1391bafe5a31SPaolo Pisati if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) 1392bafe5a31SPaolo Pisati return (EINVAL); 1393bafe5a31SPaolo Pisati 1394bafe5a31SPaolo Pisati td->td_intr_nesting_level++; 1395bafe5a31SPaolo Pisati thread = 0; 1396bafe5a31SPaolo Pisati critical_enter(); 1397bafe5a31SPaolo Pisati thread = intr_filter_loop(ie, frame, &ithd); 1398bafe5a31SPaolo Pisati 1399bafe5a31SPaolo Pisati /* 1400bafe5a31SPaolo Pisati * If the interrupt was fully served, send it an EOI but leave 1401bafe5a31SPaolo Pisati * it unmasked. Otherwise, mask the source as well as sending 1402bafe5a31SPaolo Pisati * it an EOI. 1403bafe5a31SPaolo Pisati */ 1404bafe5a31SPaolo Pisati if (thread & FILTER_HANDLED) { 1405bafe5a31SPaolo Pisati if (ie->ie_eoi != NULL) 1406bafe5a31SPaolo Pisati ie->ie_eoi(ie->ie_source); 1407bafe5a31SPaolo Pisati } else { 1408bafe5a31SPaolo Pisati if (ie->ie_disab != NULL) 1409bafe5a31SPaolo Pisati ie->ie_disab(ie->ie_source); 1410bafe5a31SPaolo Pisati } 1411bafe5a31SPaolo Pisati critical_exit(); 1412bafe5a31SPaolo Pisati 1413bafe5a31SPaolo Pisati /* Interrupt storm logic */ 1414bafe5a31SPaolo Pisati if (thread & FILTER_STRAY) { 1415bafe5a31SPaolo Pisati ie->ie_count++; 1416bafe5a31SPaolo Pisati if (ie->ie_count < intr_storm_threshold) 1417bafe5a31SPaolo Pisati printf("Interrupt stray detection not present\n"); 1418bafe5a31SPaolo Pisati } 1419bafe5a31SPaolo Pisati 1420bafe5a31SPaolo Pisati /* Schedule an ithread if needed. */ 1421bafe5a31SPaolo Pisati if (thread & FILTER_SCHEDULE_THREAD) { 1422bafe5a31SPaolo Pisati if (intr_event_schedule_thread(ie, ithd) != 0) 1423bafe5a31SPaolo Pisati panic("%s: impossible stray interrupt", __func__); 1424bafe5a31SPaolo Pisati } 1425bafe5a31SPaolo Pisati td->td_intr_nesting_level--; 1426bafe5a31SPaolo Pisati return (0); 1427bafe5a31SPaolo Pisati } 1428bafe5a31SPaolo Pisati #endif 14291931cf94SJohn Baldwin 14308b201c42SJohn Baldwin #ifdef DDB 14318b201c42SJohn Baldwin /* 14328b201c42SJohn Baldwin * Dump details about an interrupt handler 14338b201c42SJohn Baldwin */ 14348b201c42SJohn Baldwin static void 1435e0f66ef8SJohn Baldwin db_dump_intrhand(struct intr_handler *ih) 14368b201c42SJohn Baldwin { 14378b201c42SJohn Baldwin int comma; 14388b201c42SJohn Baldwin 14398b201c42SJohn Baldwin db_printf("\t%-10s ", ih->ih_name); 14408b201c42SJohn Baldwin switch (ih->ih_pri) { 14418b201c42SJohn Baldwin case PI_REALTIME: 14428b201c42SJohn Baldwin db_printf("CLK "); 14438b201c42SJohn Baldwin break; 14448b201c42SJohn Baldwin case PI_AV: 14458b201c42SJohn Baldwin db_printf("AV "); 14468b201c42SJohn Baldwin break; 14478b201c42SJohn Baldwin case PI_TTYHIGH: 14488b201c42SJohn Baldwin case PI_TTYLOW: 14498b201c42SJohn Baldwin db_printf("TTY "); 14508b201c42SJohn Baldwin break; 14518b201c42SJohn Baldwin case PI_TAPE: 14528b201c42SJohn Baldwin db_printf("TAPE"); 14538b201c42SJohn Baldwin break; 14548b201c42SJohn Baldwin case PI_NET: 14558b201c42SJohn Baldwin db_printf("NET "); 14568b201c42SJohn Baldwin break; 14578b201c42SJohn Baldwin case PI_DISK: 14588b201c42SJohn Baldwin case PI_DISKLOW: 14598b201c42SJohn Baldwin db_printf("DISK"); 14608b201c42SJohn Baldwin break; 14618b201c42SJohn Baldwin case PI_DULL: 14628b201c42SJohn Baldwin db_printf("DULL"); 14638b201c42SJohn Baldwin break; 14648b201c42SJohn Baldwin default: 14658b201c42SJohn Baldwin if (ih->ih_pri >= PI_SOFT) 14668b201c42SJohn Baldwin db_printf("SWI "); 14678b201c42SJohn Baldwin else 14688b201c42SJohn Baldwin db_printf("%4u", ih->ih_pri); 14698b201c42SJohn Baldwin break; 14708b201c42SJohn Baldwin } 14718b201c42SJohn Baldwin db_printf(" "); 14728b201c42SJohn Baldwin db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC); 14738b201c42SJohn Baldwin db_printf("(%p)", ih->ih_argument); 14748b201c42SJohn Baldwin if (ih->ih_need || 1475ef544f63SPaolo Pisati (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD | 14768b201c42SJohn Baldwin IH_MPSAFE)) != 0) { 14778b201c42SJohn Baldwin db_printf(" {"); 14788b201c42SJohn Baldwin comma = 0; 14798b201c42SJohn Baldwin if (ih->ih_flags & IH_EXCLUSIVE) { 14808b201c42SJohn Baldwin if (comma) 14818b201c42SJohn Baldwin db_printf(", "); 14828b201c42SJohn Baldwin db_printf("EXCL"); 14838b201c42SJohn Baldwin comma = 1; 14848b201c42SJohn Baldwin } 14858b201c42SJohn Baldwin if (ih->ih_flags & IH_ENTROPY) { 14868b201c42SJohn Baldwin if (comma) 14878b201c42SJohn Baldwin db_printf(", "); 14888b201c42SJohn Baldwin db_printf("ENTROPY"); 14898b201c42SJohn Baldwin comma = 1; 14908b201c42SJohn Baldwin } 14918b201c42SJohn Baldwin if (ih->ih_flags & IH_DEAD) { 14928b201c42SJohn Baldwin if (comma) 14938b201c42SJohn Baldwin db_printf(", "); 14948b201c42SJohn Baldwin db_printf("DEAD"); 14958b201c42SJohn Baldwin comma = 1; 14968b201c42SJohn Baldwin } 14978b201c42SJohn Baldwin if (ih->ih_flags & IH_MPSAFE) { 14988b201c42SJohn Baldwin if (comma) 14998b201c42SJohn Baldwin db_printf(", "); 15008b201c42SJohn Baldwin db_printf("MPSAFE"); 15018b201c42SJohn Baldwin comma = 1; 15028b201c42SJohn Baldwin } 15038b201c42SJohn Baldwin if (ih->ih_need) { 15048b201c42SJohn Baldwin if (comma) 15058b201c42SJohn Baldwin db_printf(", "); 15068b201c42SJohn Baldwin db_printf("NEED"); 15078b201c42SJohn Baldwin } 15088b201c42SJohn Baldwin db_printf("}"); 15098b201c42SJohn Baldwin } 15108b201c42SJohn Baldwin db_printf("\n"); 15118b201c42SJohn Baldwin } 15128b201c42SJohn Baldwin 15138b201c42SJohn Baldwin /* 1514e0f66ef8SJohn Baldwin * Dump details about a event. 15158b201c42SJohn Baldwin */ 15168b201c42SJohn Baldwin void 1517e0f66ef8SJohn Baldwin db_dump_intr_event(struct intr_event *ie, int handlers) 15188b201c42SJohn Baldwin { 1519e0f66ef8SJohn Baldwin struct intr_handler *ih; 1520e0f66ef8SJohn Baldwin struct intr_thread *it; 15218b201c42SJohn Baldwin int comma; 15228b201c42SJohn Baldwin 1523e0f66ef8SJohn Baldwin db_printf("%s ", ie->ie_fullname); 1524e0f66ef8SJohn Baldwin it = ie->ie_thread; 1525e0f66ef8SJohn Baldwin if (it != NULL) 1526e0f66ef8SJohn Baldwin db_printf("(pid %d)", it->it_thread->td_proc->p_pid); 1527e0f66ef8SJohn Baldwin else 1528e0f66ef8SJohn Baldwin db_printf("(no thread)"); 1529eaf86d16SJohn Baldwin if (ie->ie_cpu != NOCPU) 1530eaf86d16SJohn Baldwin db_printf(" (CPU %d)", ie->ie_cpu); 1531e0f66ef8SJohn Baldwin if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 || 1532e0f66ef8SJohn Baldwin (it != NULL && it->it_need)) { 15338b201c42SJohn Baldwin db_printf(" {"); 15348b201c42SJohn Baldwin comma = 0; 1535e0f66ef8SJohn Baldwin if (ie->ie_flags & IE_SOFT) { 15368b201c42SJohn Baldwin db_printf("SOFT"); 15378b201c42SJohn Baldwin comma = 1; 15388b201c42SJohn Baldwin } 1539e0f66ef8SJohn Baldwin if (ie->ie_flags & IE_ENTROPY) { 15408b201c42SJohn Baldwin if (comma) 15418b201c42SJohn Baldwin db_printf(", "); 15428b201c42SJohn Baldwin db_printf("ENTROPY"); 15438b201c42SJohn Baldwin comma = 1; 15448b201c42SJohn Baldwin } 1545e0f66ef8SJohn Baldwin if (ie->ie_flags & IE_ADDING_THREAD) { 15468b201c42SJohn Baldwin if (comma) 15478b201c42SJohn Baldwin db_printf(", "); 1548e0f66ef8SJohn Baldwin db_printf("ADDING_THREAD"); 15498b201c42SJohn Baldwin comma = 1; 15508b201c42SJohn Baldwin } 1551e0f66ef8SJohn Baldwin if (it != NULL && it->it_need) { 15528b201c42SJohn Baldwin if (comma) 15538b201c42SJohn Baldwin db_printf(", "); 15548b201c42SJohn Baldwin db_printf("NEED"); 15558b201c42SJohn Baldwin } 15568b201c42SJohn Baldwin db_printf("}"); 15578b201c42SJohn Baldwin } 15588b201c42SJohn Baldwin db_printf("\n"); 15598b201c42SJohn Baldwin 15608b201c42SJohn Baldwin if (handlers) 1561e0f66ef8SJohn Baldwin TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 15628b201c42SJohn Baldwin db_dump_intrhand(ih); 15638b201c42SJohn Baldwin } 1564e0f66ef8SJohn Baldwin 1565e0f66ef8SJohn Baldwin /* 1566e0f66ef8SJohn Baldwin * Dump data about interrupt handlers 1567e0f66ef8SJohn Baldwin */ 1568e0f66ef8SJohn Baldwin DB_SHOW_COMMAND(intr, db_show_intr) 1569e0f66ef8SJohn Baldwin { 1570e0f66ef8SJohn Baldwin struct intr_event *ie; 157119e9205aSJohn Baldwin int all, verbose; 1572e0f66ef8SJohn Baldwin 1573e0f66ef8SJohn Baldwin verbose = index(modif, 'v') != NULL; 1574e0f66ef8SJohn Baldwin all = index(modif, 'a') != NULL; 1575e0f66ef8SJohn Baldwin TAILQ_FOREACH(ie, &event_list, ie_list) { 1576e0f66ef8SJohn Baldwin if (!all && TAILQ_EMPTY(&ie->ie_handlers)) 1577e0f66ef8SJohn Baldwin continue; 1578e0f66ef8SJohn Baldwin db_dump_intr_event(ie, verbose); 157919e9205aSJohn Baldwin if (db_pager_quit) 158019e9205aSJohn Baldwin break; 1581e0f66ef8SJohn Baldwin } 1582e0f66ef8SJohn Baldwin } 15838b201c42SJohn Baldwin #endif /* DDB */ 15848b201c42SJohn Baldwin 1585b4151f71SJohn Baldwin /* 15868088699fSJohn Baldwin * Start standard software interrupt threads 15871931cf94SJohn Baldwin */ 15881931cf94SJohn Baldwin static void 1589b4151f71SJohn Baldwin start_softintr(void *dummy) 15901931cf94SJohn Baldwin { 15918804bf6bSJohn Baldwin struct proc *p; 1592b4151f71SJohn Baldwin 1593e0f66ef8SJohn Baldwin if (swi_add(&clk_intr_event, "clock", softclock, NULL, SWI_CLOCK, 1594b4151f71SJohn Baldwin INTR_MPSAFE, &softclock_ih) || 159579501b66SScott Long swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih)) 1596b4151f71SJohn Baldwin panic("died while creating standard software ithreads"); 15973e5da754SJohn Baldwin 1598e0f66ef8SJohn Baldwin p = clk_intr_event->ie_thread->it_thread->td_proc; 15998804bf6bSJohn Baldwin PROC_LOCK(p); 16008804bf6bSJohn Baldwin p->p_flag |= P_NOLOAD; 16018804bf6bSJohn Baldwin PROC_UNLOCK(p); 16021931cf94SJohn Baldwin } 1603237fdd78SRobert Watson SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, 1604237fdd78SRobert Watson NULL); 16051931cf94SJohn Baldwin 1606d279178dSThomas Moestl /* 1607d279178dSThomas Moestl * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 1608d279178dSThomas Moestl * The data for this machine dependent, and the declarations are in machine 1609d279178dSThomas Moestl * dependent code. The layout of intrnames and intrcnt however is machine 1610d279178dSThomas Moestl * independent. 1611d279178dSThomas Moestl * 1612d279178dSThomas Moestl * We do not know the length of intrcnt and intrnames at compile time, so 1613d279178dSThomas Moestl * calculate things at run time. 1614d279178dSThomas Moestl */ 1615d279178dSThomas Moestl static int 1616d279178dSThomas Moestl sysctl_intrnames(SYSCTL_HANDLER_ARGS) 1617d279178dSThomas Moestl { 1618d279178dSThomas Moestl return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames, 1619d279178dSThomas Moestl req)); 1620d279178dSThomas Moestl } 1621d279178dSThomas Moestl 1622d279178dSThomas Moestl SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, 1623d279178dSThomas Moestl NULL, 0, sysctl_intrnames, "", "Interrupt Names"); 1624d279178dSThomas Moestl 1625d279178dSThomas Moestl static int 1626d279178dSThomas Moestl sysctl_intrcnt(SYSCTL_HANDLER_ARGS) 1627d279178dSThomas Moestl { 1628d279178dSThomas Moestl return (sysctl_handle_opaque(oidp, intrcnt, 1629d279178dSThomas Moestl (char *)eintrcnt - (char *)intrcnt, req)); 1630d279178dSThomas Moestl } 1631d279178dSThomas Moestl 1632d279178dSThomas Moestl SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, 1633d279178dSThomas Moestl NULL, 0, sysctl_intrcnt, "", "Interrupt Counts"); 16348b201c42SJohn Baldwin 16358b201c42SJohn Baldwin #ifdef DDB 16368b201c42SJohn Baldwin /* 16378b201c42SJohn Baldwin * DDB command to dump the interrupt statistics. 16388b201c42SJohn Baldwin */ 16398b201c42SJohn Baldwin DB_SHOW_COMMAND(intrcnt, db_show_intrcnt) 16408b201c42SJohn Baldwin { 16418b201c42SJohn Baldwin u_long *i; 16428b201c42SJohn Baldwin char *cp; 16438b201c42SJohn Baldwin 16448b201c42SJohn Baldwin cp = intrnames; 164519e9205aSJohn Baldwin for (i = intrcnt; i != eintrcnt && !db_pager_quit; i++) { 16468b201c42SJohn Baldwin if (*cp == '\0') 16478b201c42SJohn Baldwin break; 16488b201c42SJohn Baldwin if (*i != 0) 16498b201c42SJohn Baldwin db_printf("%s\t%lu\n", cp, *i); 16508b201c42SJohn Baldwin cp += strlen(cp) + 1; 16518b201c42SJohn Baldwin } 16528b201c42SJohn Baldwin } 16538b201c42SJohn Baldwin #endif 1654