19454b2d8SWarner Losh /*- 2425f9fdaSStefan Eßer * Copyright (c) 1997, Stefan Esser <se@freebsd.org> 3425f9fdaSStefan Eßer * All rights reserved. 4425f9fdaSStefan Eßer * 5425f9fdaSStefan Eßer * Redistribution and use in source and binary forms, with or without 6425f9fdaSStefan Eßer * modification, are permitted provided that the following conditions 7425f9fdaSStefan Eßer * are met: 8425f9fdaSStefan Eßer * 1. Redistributions of source code must retain the above copyright 9425f9fdaSStefan Eßer * notice unmodified, this list of conditions, and the following 10425f9fdaSStefan Eßer * disclaimer. 11425f9fdaSStefan Eßer * 2. Redistributions in binary form must reproduce the above copyright 12425f9fdaSStefan Eßer * notice, this list of conditions and the following disclaimer in the 13425f9fdaSStefan Eßer * documentation and/or other materials provided with the distribution. 14425f9fdaSStefan Eßer * 15425f9fdaSStefan Eßer * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16425f9fdaSStefan Eßer * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17425f9fdaSStefan Eßer * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18425f9fdaSStefan Eßer * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19425f9fdaSStefan Eßer * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20425f9fdaSStefan Eßer * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21425f9fdaSStefan Eßer * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22425f9fdaSStefan Eßer * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23425f9fdaSStefan Eßer * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24425f9fdaSStefan Eßer * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25425f9fdaSStefan Eßer */ 26425f9fdaSStefan Eßer 27677b542eSDavid E. O'Brien #include <sys/cdefs.h> 28677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 293900ddb2SDoug Rabson 308b201c42SJohn Baldwin #include "opt_ddb.h" 318b201c42SJohn Baldwin 321c5bb3eaSPeter Wemm #include <sys/param.h> 339a94c9c5SJohn Baldwin #include <sys/bus.h> 34c11110eaSAlfred Perlstein #include <sys/conf.h> 359a94c9c5SJohn Baldwin #include <sys/rtprio.h> 36425f9fdaSStefan Eßer #include <sys/systm.h> 3768352337SDoug Rabson #include <sys/interrupt.h> 381931cf94SJohn Baldwin #include <sys/kernel.h> 391931cf94SJohn Baldwin #include <sys/kthread.h> 401931cf94SJohn Baldwin #include <sys/ktr.h> 4105b2c96fSBruce Evans #include <sys/limits.h> 42f34fa851SJohn Baldwin #include <sys/lock.h> 431931cf94SJohn Baldwin #include <sys/malloc.h> 4435e0e5b3SJohn Baldwin #include <sys/mutex.h> 451931cf94SJohn Baldwin #include <sys/proc.h> 463e5da754SJohn Baldwin #include <sys/random.h> 47b4151f71SJohn Baldwin #include <sys/resourcevar.h> 4863710c4dSJohn Baldwin #include <sys/sched.h> 49eaf86d16SJohn Baldwin #include <sys/smp.h> 50d279178dSThomas Moestl #include <sys/sysctl.h> 511931cf94SJohn Baldwin #include <sys/unistd.h> 521931cf94SJohn Baldwin #include <sys/vmmeter.h> 531931cf94SJohn Baldwin #include <machine/atomic.h> 541931cf94SJohn Baldwin #include <machine/cpu.h> 558088699fSJohn Baldwin #include <machine/md_var.h> 56b4151f71SJohn Baldwin #include <machine/stdarg.h> 578b201c42SJohn Baldwin #ifdef DDB 588b201c42SJohn Baldwin #include <ddb/ddb.h> 598b201c42SJohn Baldwin #include <ddb/db_sym.h> 608b201c42SJohn Baldwin #endif 61425f9fdaSStefan Eßer 62e0f66ef8SJohn Baldwin /* 63e0f66ef8SJohn Baldwin * Describe an interrupt thread. There is one of these per interrupt event. 64e0f66ef8SJohn Baldwin */ 65e0f66ef8SJohn Baldwin struct intr_thread { 66e0f66ef8SJohn Baldwin struct intr_event *it_event; 67e0f66ef8SJohn Baldwin struct thread *it_thread; /* Kernel thread. */ 68e0f66ef8SJohn Baldwin int it_flags; /* (j) IT_* flags. */ 69e0f66ef8SJohn Baldwin int it_need; /* Needs service. */ 703e5da754SJohn Baldwin }; 713e5da754SJohn Baldwin 72e0f66ef8SJohn Baldwin /* Interrupt thread flags kept in it_flags */ 73e0f66ef8SJohn Baldwin #define IT_DEAD 0x000001 /* Thread is waiting to exit. */ 74e0f66ef8SJohn Baldwin 75e0f66ef8SJohn Baldwin struct intr_entropy { 76e0f66ef8SJohn Baldwin struct thread *td; 77e0f66ef8SJohn Baldwin uintptr_t event; 78e0f66ef8SJohn Baldwin }; 79e0f66ef8SJohn Baldwin 80e0f66ef8SJohn Baldwin struct intr_event *clk_intr_event; 81e0f66ef8SJohn Baldwin struct intr_event *tty_intr_event; 827b1fe905SBruce Evans void *softclock_ih; 837b1fe905SBruce Evans void *vm_ih; 847ab24ea3SJulian Elischer struct proc *intrproc; 851931cf94SJohn Baldwin 86b4151f71SJohn Baldwin static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); 87b4151f71SJohn Baldwin 880ae62c18SNate Lawson static int intr_storm_threshold = 1000; 897870c3c6SJohn Baldwin TUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold); 907870c3c6SJohn Baldwin SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW, 917870c3c6SJohn Baldwin &intr_storm_threshold, 0, 927b1fe905SBruce Evans "Number of consecutive interrupts before storm protection is enabled"); 93e0f66ef8SJohn Baldwin static TAILQ_HEAD(, intr_event) event_list = 94e0f66ef8SJohn Baldwin TAILQ_HEAD_INITIALIZER(event_list); 957b1fe905SBruce Evans 96e0f66ef8SJohn Baldwin static void intr_event_update(struct intr_event *ie); 97bafe5a31SPaolo Pisati #ifdef INTR_FILTER 98bafe5a31SPaolo Pisati static struct intr_thread *ithread_create(const char *name, 99bafe5a31SPaolo Pisati struct intr_handler *ih); 100bafe5a31SPaolo Pisati #else 101e0f66ef8SJohn Baldwin static struct intr_thread *ithread_create(const char *name); 102bafe5a31SPaolo Pisati #endif 103e0f66ef8SJohn Baldwin static void ithread_destroy(struct intr_thread *ithread); 104bafe5a31SPaolo Pisati static void ithread_execute_handlers(struct proc *p, 105bafe5a31SPaolo Pisati struct intr_event *ie); 106bafe5a31SPaolo Pisati #ifdef INTR_FILTER 107bafe5a31SPaolo Pisati static void priv_ithread_execute_handler(struct proc *p, 108bafe5a31SPaolo Pisati struct intr_handler *ih); 109bafe5a31SPaolo Pisati #endif 1107b1fe905SBruce Evans static void ithread_loop(void *); 111e0f66ef8SJohn Baldwin static void ithread_update(struct intr_thread *ithd); 1127b1fe905SBruce Evans static void start_softintr(void *); 1137870c3c6SJohn Baldwin 114bc17acb2SJohn Baldwin /* Map an interrupt type to an ithread priority. */ 115b4151f71SJohn Baldwin u_char 116e0f66ef8SJohn Baldwin intr_priority(enum intr_type flags) 1179a94c9c5SJohn Baldwin { 118b4151f71SJohn Baldwin u_char pri; 1199a94c9c5SJohn Baldwin 120b4151f71SJohn Baldwin flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET | 1215a280d9cSPeter Wemm INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV); 1229a94c9c5SJohn Baldwin switch (flags) { 123b4151f71SJohn Baldwin case INTR_TYPE_TTY: 1249a94c9c5SJohn Baldwin pri = PI_TTYLOW; 1259a94c9c5SJohn Baldwin break; 1269a94c9c5SJohn Baldwin case INTR_TYPE_BIO: 1279a94c9c5SJohn Baldwin /* 1289a94c9c5SJohn Baldwin * XXX We need to refine this. BSD/OS distinguishes 1299a94c9c5SJohn Baldwin * between tape and disk priorities. 1309a94c9c5SJohn Baldwin */ 1319a94c9c5SJohn Baldwin pri = PI_DISK; 1329a94c9c5SJohn Baldwin break; 1339a94c9c5SJohn Baldwin case INTR_TYPE_NET: 1349a94c9c5SJohn Baldwin pri = PI_NET; 1359a94c9c5SJohn Baldwin break; 1369a94c9c5SJohn Baldwin case INTR_TYPE_CAM: 1379a94c9c5SJohn Baldwin pri = PI_DISK; /* XXX or PI_CAM? */ 1389a94c9c5SJohn Baldwin break; 1395a280d9cSPeter Wemm case INTR_TYPE_AV: /* Audio/video */ 1405a280d9cSPeter Wemm pri = PI_AV; 1415a280d9cSPeter Wemm break; 142b4151f71SJohn Baldwin case INTR_TYPE_CLK: 143b4151f71SJohn Baldwin pri = PI_REALTIME; 144b4151f71SJohn Baldwin break; 1459a94c9c5SJohn Baldwin case INTR_TYPE_MISC: 1469a94c9c5SJohn Baldwin pri = PI_DULL; /* don't care */ 1479a94c9c5SJohn Baldwin break; 1489a94c9c5SJohn Baldwin default: 149b4151f71SJohn Baldwin /* We didn't specify an interrupt level. */ 150e0f66ef8SJohn Baldwin panic("intr_priority: no interrupt type in flags"); 1519a94c9c5SJohn Baldwin } 1529a94c9c5SJohn Baldwin 1539a94c9c5SJohn Baldwin return pri; 1549a94c9c5SJohn Baldwin } 1559a94c9c5SJohn Baldwin 156b4151f71SJohn Baldwin /* 157e0f66ef8SJohn Baldwin * Update an ithread based on the associated intr_event. 158b4151f71SJohn Baldwin */ 159b4151f71SJohn Baldwin static void 160e0f66ef8SJohn Baldwin ithread_update(struct intr_thread *ithd) 161b4151f71SJohn Baldwin { 162e0f66ef8SJohn Baldwin struct intr_event *ie; 163b40ce416SJulian Elischer struct thread *td; 164e0f66ef8SJohn Baldwin u_char pri; 1658088699fSJohn Baldwin 166e0f66ef8SJohn Baldwin ie = ithd->it_event; 167e0f66ef8SJohn Baldwin td = ithd->it_thread; 168b4151f71SJohn Baldwin 169e0f66ef8SJohn Baldwin /* Determine the overall priority of this event. */ 170e0f66ef8SJohn Baldwin if (TAILQ_EMPTY(&ie->ie_handlers)) 171e0f66ef8SJohn Baldwin pri = PRI_MAX_ITHD; 172e0f66ef8SJohn Baldwin else 173e0f66ef8SJohn Baldwin pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri; 174e80fb434SRobert Drehmel 175e0f66ef8SJohn Baldwin /* Update name and priority. */ 1767ab24ea3SJulian Elischer strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name)); 177982d11f8SJeff Roberson thread_lock(td); 178e0f66ef8SJohn Baldwin sched_prio(td, pri); 179982d11f8SJeff Roberson thread_unlock(td); 180b4151f71SJohn Baldwin } 181e0f66ef8SJohn Baldwin 182e0f66ef8SJohn Baldwin /* 183e0f66ef8SJohn Baldwin * Regenerate the full name of an interrupt event and update its priority. 184e0f66ef8SJohn Baldwin */ 185e0f66ef8SJohn Baldwin static void 186e0f66ef8SJohn Baldwin intr_event_update(struct intr_event *ie) 187e0f66ef8SJohn Baldwin { 188e0f66ef8SJohn Baldwin struct intr_handler *ih; 189e0f66ef8SJohn Baldwin char *last; 190e0f66ef8SJohn Baldwin int missed, space; 191e0f66ef8SJohn Baldwin 192e0f66ef8SJohn Baldwin /* Start off with no entropy and just the name of the event. */ 193e0f66ef8SJohn Baldwin mtx_assert(&ie->ie_lock, MA_OWNED); 194e0f66ef8SJohn Baldwin strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 195e0f66ef8SJohn Baldwin ie->ie_flags &= ~IE_ENTROPY; 1960811d60aSJohn Baldwin missed = 0; 197e0f66ef8SJohn Baldwin space = 1; 198e0f66ef8SJohn Baldwin 199e0f66ef8SJohn Baldwin /* Run through all the handlers updating values. */ 200e0f66ef8SJohn Baldwin TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 201e0f66ef8SJohn Baldwin if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < 202e0f66ef8SJohn Baldwin sizeof(ie->ie_fullname)) { 203e0f66ef8SJohn Baldwin strcat(ie->ie_fullname, " "); 204e0f66ef8SJohn Baldwin strcat(ie->ie_fullname, ih->ih_name); 205e0f66ef8SJohn Baldwin space = 0; 2060811d60aSJohn Baldwin } else 2070811d60aSJohn Baldwin missed++; 2080811d60aSJohn Baldwin if (ih->ih_flags & IH_ENTROPY) 209e0f66ef8SJohn Baldwin ie->ie_flags |= IE_ENTROPY; 2100811d60aSJohn Baldwin } 211e0f66ef8SJohn Baldwin 212e0f66ef8SJohn Baldwin /* 213e0f66ef8SJohn Baldwin * If the handler names were too long, add +'s to indicate missing 214e0f66ef8SJohn Baldwin * names. If we run out of room and still have +'s to add, change 215e0f66ef8SJohn Baldwin * the last character from a + to a *. 216e0f66ef8SJohn Baldwin */ 217e0f66ef8SJohn Baldwin last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2]; 2180811d60aSJohn Baldwin while (missed-- > 0) { 219e0f66ef8SJohn Baldwin if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) { 220e0f66ef8SJohn Baldwin if (*last == '+') { 221e0f66ef8SJohn Baldwin *last = '*'; 222e0f66ef8SJohn Baldwin break; 223b4151f71SJohn Baldwin } else 224e0f66ef8SJohn Baldwin *last = '+'; 225e0f66ef8SJohn Baldwin } else if (space) { 226e0f66ef8SJohn Baldwin strcat(ie->ie_fullname, " +"); 227e0f66ef8SJohn Baldwin space = 0; 228e0f66ef8SJohn Baldwin } else 229e0f66ef8SJohn Baldwin strcat(ie->ie_fullname, "+"); 230b4151f71SJohn Baldwin } 231e0f66ef8SJohn Baldwin 232e0f66ef8SJohn Baldwin /* 233e0f66ef8SJohn Baldwin * If this event has an ithread, update it's priority and 234e0f66ef8SJohn Baldwin * name. 235e0f66ef8SJohn Baldwin */ 236e0f66ef8SJohn Baldwin if (ie->ie_thread != NULL) 237e0f66ef8SJohn Baldwin ithread_update(ie->ie_thread); 238e0f66ef8SJohn Baldwin CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname); 239b4151f71SJohn Baldwin } 240b4151f71SJohn Baldwin 241b4151f71SJohn Baldwin int 242e0f66ef8SJohn Baldwin intr_event_create(struct intr_event **event, void *source,int flags, 2436d2d1c04SJohn Baldwin void (*disable)(void *), void (*enable)(void *), void (*eoi)(void *), 244eaf86d16SJohn Baldwin int (*assign_cpu)(void *, u_char), const char *fmt, ...) 245bafe5a31SPaolo Pisati { 246bafe5a31SPaolo Pisati struct intr_event *ie; 247bafe5a31SPaolo Pisati va_list ap; 248bafe5a31SPaolo Pisati 249bafe5a31SPaolo Pisati /* The only valid flag during creation is IE_SOFT. */ 250bafe5a31SPaolo Pisati if ((flags & ~IE_SOFT) != 0) 251bafe5a31SPaolo Pisati return (EINVAL); 252bafe5a31SPaolo Pisati ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 253bafe5a31SPaolo Pisati ie->ie_source = source; 2546d2d1c04SJohn Baldwin ie->ie_disable = disable; 255bafe5a31SPaolo Pisati ie->ie_enable = enable; 256bafe5a31SPaolo Pisati ie->ie_eoi = eoi; 2576d2d1c04SJohn Baldwin ie->ie_assign_cpu = assign_cpu; 258bafe5a31SPaolo Pisati ie->ie_flags = flags; 259eaf86d16SJohn Baldwin ie->ie_cpu = NOCPU; 260bafe5a31SPaolo Pisati TAILQ_INIT(&ie->ie_handlers); 261bafe5a31SPaolo Pisati mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 262bafe5a31SPaolo Pisati 263bafe5a31SPaolo Pisati va_start(ap, fmt); 264bafe5a31SPaolo Pisati vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 265bafe5a31SPaolo Pisati va_end(ap); 266bafe5a31SPaolo Pisati strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 267bafe5a31SPaolo Pisati mtx_pool_lock(mtxpool_sleep, &event_list); 268bafe5a31SPaolo Pisati TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 269bafe5a31SPaolo Pisati mtx_pool_unlock(mtxpool_sleep, &event_list); 270bafe5a31SPaolo Pisati if (event != NULL) 271bafe5a31SPaolo Pisati *event = ie; 272bafe5a31SPaolo Pisati CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 273bafe5a31SPaolo Pisati return (0); 274bafe5a31SPaolo Pisati } 275b4151f71SJohn Baldwin 276eaf86d16SJohn Baldwin /* 277eaf86d16SJohn Baldwin * Bind an interrupt event to the specified CPU. Note that not all 278eaf86d16SJohn Baldwin * platforms support binding an interrupt to a CPU. For those 279eaf86d16SJohn Baldwin * platforms this request will fail. For supported platforms, any 280eaf86d16SJohn Baldwin * associated ithreads as well as the primary interrupt context will 281eaf86d16SJohn Baldwin * be bound to the specificed CPU. Using a cpu id of NOCPU unbinds 282eaf86d16SJohn Baldwin * the interrupt event. 283eaf86d16SJohn Baldwin */ 284eaf86d16SJohn Baldwin int 285eaf86d16SJohn Baldwin intr_event_bind(struct intr_event *ie, u_char cpu) 286eaf86d16SJohn Baldwin { 287eaf86d16SJohn Baldwin struct thread *td; 288eaf86d16SJohn Baldwin int error; 289eaf86d16SJohn Baldwin 290eaf86d16SJohn Baldwin /* Need a CPU to bind to. */ 291eaf86d16SJohn Baldwin if (cpu != NOCPU && CPU_ABSENT(cpu)) 292eaf86d16SJohn Baldwin return (EINVAL); 293eaf86d16SJohn Baldwin 294eaf86d16SJohn Baldwin if (ie->ie_assign_cpu == NULL) 295eaf86d16SJohn Baldwin return (EOPNOTSUPP); 296eaf86d16SJohn Baldwin 297eaf86d16SJohn Baldwin /* Don't allow a bind request if the interrupt is already bound. */ 298eaf86d16SJohn Baldwin mtx_lock(&ie->ie_lock); 299eaf86d16SJohn Baldwin if (ie->ie_cpu != NOCPU && cpu != NOCPU) { 300eaf86d16SJohn Baldwin mtx_unlock(&ie->ie_lock); 301eaf86d16SJohn Baldwin return (EBUSY); 302eaf86d16SJohn Baldwin } 303eaf86d16SJohn Baldwin mtx_unlock(&ie->ie_lock); 304eaf86d16SJohn Baldwin 305eaf86d16SJohn Baldwin error = ie->ie_assign_cpu(ie->ie_source, cpu); 306eaf86d16SJohn Baldwin if (error) 307eaf86d16SJohn Baldwin return (error); 308eaf86d16SJohn Baldwin mtx_lock(&ie->ie_lock); 309eaf86d16SJohn Baldwin if (ie->ie_thread != NULL) 310eaf86d16SJohn Baldwin td = ie->ie_thread->it_thread; 311eaf86d16SJohn Baldwin else 312eaf86d16SJohn Baldwin td = NULL; 313eaf86d16SJohn Baldwin if (td != NULL) 314eaf86d16SJohn Baldwin thread_lock(td); 315eaf86d16SJohn Baldwin ie->ie_cpu = cpu; 316eaf86d16SJohn Baldwin if (td != NULL) 317eaf86d16SJohn Baldwin thread_unlock(td); 318eaf86d16SJohn Baldwin mtx_unlock(&ie->ie_lock); 319eaf86d16SJohn Baldwin return (0); 320eaf86d16SJohn Baldwin } 321eaf86d16SJohn Baldwin 322b4151f71SJohn Baldwin int 323e0f66ef8SJohn Baldwin intr_event_destroy(struct intr_event *ie) 324b4151f71SJohn Baldwin { 325b4151f71SJohn Baldwin 326e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 327e0f66ef8SJohn Baldwin if (!TAILQ_EMPTY(&ie->ie_handlers)) { 328e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 329e0f66ef8SJohn Baldwin return (EBUSY); 3304d29cb2dSJohn Baldwin } 331e0f66ef8SJohn Baldwin mtx_pool_lock(mtxpool_sleep, &event_list); 332e0f66ef8SJohn Baldwin TAILQ_REMOVE(&event_list, ie, ie_list); 333e0f66ef8SJohn Baldwin mtx_pool_unlock(mtxpool_sleep, &event_list); 3349477358dSJohn Baldwin #ifndef notyet 3359477358dSJohn Baldwin if (ie->ie_thread != NULL) { 3369477358dSJohn Baldwin ithread_destroy(ie->ie_thread); 3379477358dSJohn Baldwin ie->ie_thread = NULL; 3389477358dSJohn Baldwin } 3399477358dSJohn Baldwin #endif 340e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 341e0f66ef8SJohn Baldwin mtx_destroy(&ie->ie_lock); 342e0f66ef8SJohn Baldwin free(ie, M_ITHREAD); 343e0f66ef8SJohn Baldwin return (0); 344e0f66ef8SJohn Baldwin } 345e0f66ef8SJohn Baldwin 346bafe5a31SPaolo Pisati #ifndef INTR_FILTER 347e0f66ef8SJohn Baldwin static struct intr_thread * 348e0f66ef8SJohn Baldwin ithread_create(const char *name) 349e0f66ef8SJohn Baldwin { 350e0f66ef8SJohn Baldwin struct intr_thread *ithd; 351e0f66ef8SJohn Baldwin struct thread *td; 352e0f66ef8SJohn Baldwin int error; 353e0f66ef8SJohn Baldwin 354e0f66ef8SJohn Baldwin ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 355e0f66ef8SJohn Baldwin 3567ab24ea3SJulian Elischer error = kproc_kthread_add(ithread_loop, ithd, &intrproc, 3577ab24ea3SJulian Elischer &td, RFSTOPPED | RFHIGHPID, 3589ef95d01SJulian Elischer 0, "intr", "%s", name); 359e0f66ef8SJohn Baldwin if (error) 3603745c395SJulian Elischer panic("kproc_create() failed with %d", error); 361982d11f8SJeff Roberson thread_lock(td); 362ad1e7d28SJulian Elischer sched_class(td, PRI_ITHD); 363e0f66ef8SJohn Baldwin TD_SET_IWAIT(td); 364982d11f8SJeff Roberson thread_unlock(td); 365e0f66ef8SJohn Baldwin td->td_pflags |= TDP_ITHREAD; 366e0f66ef8SJohn Baldwin ithd->it_thread = td; 367e0f66ef8SJohn Baldwin CTR2(KTR_INTR, "%s: created %s", __func__, name); 368e0f66ef8SJohn Baldwin return (ithd); 369e0f66ef8SJohn Baldwin } 370bafe5a31SPaolo Pisati #else 371bafe5a31SPaolo Pisati static struct intr_thread * 372bafe5a31SPaolo Pisati ithread_create(const char *name, struct intr_handler *ih) 373bafe5a31SPaolo Pisati { 374bafe5a31SPaolo Pisati struct intr_thread *ithd; 375bafe5a31SPaolo Pisati struct thread *td; 376bafe5a31SPaolo Pisati int error; 377bafe5a31SPaolo Pisati 378bafe5a31SPaolo Pisati ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 379bafe5a31SPaolo Pisati 380539976ffSJulian Elischer error = kproc_kthread_add(ithread_loop, ih, &intrproc, 3817ab24ea3SJulian Elischer &td, RFSTOPPED | RFHIGHPID, 3829ef95d01SJulian Elischer 0, "intr", "%s", name); 383bafe5a31SPaolo Pisati if (error) 3843745c395SJulian Elischer panic("kproc_create() failed with %d", error); 385982d11f8SJeff Roberson thread_lock(td); 386bafe5a31SPaolo Pisati sched_class(td, PRI_ITHD); 387bafe5a31SPaolo Pisati TD_SET_IWAIT(td); 388982d11f8SJeff Roberson thread_unlock(td); 389bafe5a31SPaolo Pisati td->td_pflags |= TDP_ITHREAD; 390bafe5a31SPaolo Pisati ithd->it_thread = td; 391bafe5a31SPaolo Pisati CTR2(KTR_INTR, "%s: created %s", __func__, name); 392bafe5a31SPaolo Pisati return (ithd); 393bafe5a31SPaolo Pisati } 394bafe5a31SPaolo Pisati #endif 395e0f66ef8SJohn Baldwin 396e0f66ef8SJohn Baldwin static void 397e0f66ef8SJohn Baldwin ithread_destroy(struct intr_thread *ithread) 398e0f66ef8SJohn Baldwin { 399e0f66ef8SJohn Baldwin struct thread *td; 400e0f66ef8SJohn Baldwin 401bb141be1SScott Long CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name); 402e0f66ef8SJohn Baldwin td = ithread->it_thread; 403982d11f8SJeff Roberson thread_lock(td); 404e0f66ef8SJohn Baldwin ithread->it_flags |= IT_DEAD; 40571fad9fdSJulian Elischer if (TD_AWAITING_INTR(td)) { 40671fad9fdSJulian Elischer TD_CLR_IWAIT(td); 407f0393f06SJeff Roberson sched_add(td, SRQ_INTR); 408b4151f71SJohn Baldwin } 409982d11f8SJeff Roberson thread_unlock(td); 410b4151f71SJohn Baldwin } 411b4151f71SJohn Baldwin 412bafe5a31SPaolo Pisati #ifndef INTR_FILTER 413b4151f71SJohn Baldwin int 414e0f66ef8SJohn Baldwin intr_event_add_handler(struct intr_event *ie, const char *name, 415ef544f63SPaolo Pisati driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 416ef544f63SPaolo Pisati enum intr_type flags, void **cookiep) 417b4151f71SJohn Baldwin { 418e0f66ef8SJohn Baldwin struct intr_handler *ih, *temp_ih; 419e0f66ef8SJohn Baldwin struct intr_thread *it; 420b4151f71SJohn Baldwin 421ef544f63SPaolo Pisati if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 422b4151f71SJohn Baldwin return (EINVAL); 423b4151f71SJohn Baldwin 424e0f66ef8SJohn Baldwin /* Allocate and populate an interrupt handler structure. */ 425e0f66ef8SJohn Baldwin ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 426ef544f63SPaolo Pisati ih->ih_filter = filter; 427b4151f71SJohn Baldwin ih->ih_handler = handler; 428b4151f71SJohn Baldwin ih->ih_argument = arg; 429b4151f71SJohn Baldwin ih->ih_name = name; 430e0f66ef8SJohn Baldwin ih->ih_event = ie; 431b4151f71SJohn Baldwin ih->ih_pri = pri; 432ef544f63SPaolo Pisati if (flags & INTR_EXCL) 433b4151f71SJohn Baldwin ih->ih_flags = IH_EXCLUSIVE; 434b4151f71SJohn Baldwin if (flags & INTR_MPSAFE) 435b4151f71SJohn Baldwin ih->ih_flags |= IH_MPSAFE; 436b4151f71SJohn Baldwin if (flags & INTR_ENTROPY) 437b4151f71SJohn Baldwin ih->ih_flags |= IH_ENTROPY; 438b4151f71SJohn Baldwin 439e0f66ef8SJohn Baldwin /* We can only have one exclusive handler in a event. */ 440e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 441e0f66ef8SJohn Baldwin if (!TAILQ_EMPTY(&ie->ie_handlers)) { 442e0f66ef8SJohn Baldwin if ((flags & INTR_EXCL) || 443e0f66ef8SJohn Baldwin (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 444e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 445b4151f71SJohn Baldwin free(ih, M_ITHREAD); 446b4151f71SJohn Baldwin return (EINVAL); 447b4151f71SJohn Baldwin } 448e0f66ef8SJohn Baldwin } 449e0f66ef8SJohn Baldwin 450e0f66ef8SJohn Baldwin /* Add the new handler to the event in priority order. */ 451e0f66ef8SJohn Baldwin TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 452e0f66ef8SJohn Baldwin if (temp_ih->ih_pri > ih->ih_pri) 453e0f66ef8SJohn Baldwin break; 454e0f66ef8SJohn Baldwin } 455e0f66ef8SJohn Baldwin if (temp_ih == NULL) 456e0f66ef8SJohn Baldwin TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 457e0f66ef8SJohn Baldwin else 458e0f66ef8SJohn Baldwin TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 459e0f66ef8SJohn Baldwin intr_event_update(ie); 460e0f66ef8SJohn Baldwin 461e0f66ef8SJohn Baldwin /* Create a thread if we need one. */ 462ef544f63SPaolo Pisati while (ie->ie_thread == NULL && handler != NULL) { 463e0f66ef8SJohn Baldwin if (ie->ie_flags & IE_ADDING_THREAD) 4640f180a7cSJohn Baldwin msleep(ie, &ie->ie_lock, 0, "ithread", 0); 465e0f66ef8SJohn Baldwin else { 466e0f66ef8SJohn Baldwin ie->ie_flags |= IE_ADDING_THREAD; 467e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 468e0f66ef8SJohn Baldwin it = ithread_create("intr: newborn"); 469e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 470e0f66ef8SJohn Baldwin ie->ie_flags &= ~IE_ADDING_THREAD; 471e0f66ef8SJohn Baldwin ie->ie_thread = it; 472e0f66ef8SJohn Baldwin it->it_event = ie; 473e0f66ef8SJohn Baldwin ithread_update(it); 474e0f66ef8SJohn Baldwin wakeup(ie); 475e0f66ef8SJohn Baldwin } 476e0f66ef8SJohn Baldwin } 477e0f66ef8SJohn Baldwin CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 478e0f66ef8SJohn Baldwin ie->ie_name); 479e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 480e0f66ef8SJohn Baldwin 481e0f66ef8SJohn Baldwin if (cookiep != NULL) 482e0f66ef8SJohn Baldwin *cookiep = ih; 483e0f66ef8SJohn Baldwin return (0); 484e0f66ef8SJohn Baldwin } 485bafe5a31SPaolo Pisati #else 486bafe5a31SPaolo Pisati int 487bafe5a31SPaolo Pisati intr_event_add_handler(struct intr_event *ie, const char *name, 488bafe5a31SPaolo Pisati driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 489bafe5a31SPaolo Pisati enum intr_type flags, void **cookiep) 490bafe5a31SPaolo Pisati { 491bafe5a31SPaolo Pisati struct intr_handler *ih, *temp_ih; 492bafe5a31SPaolo Pisati struct intr_thread *it; 493bafe5a31SPaolo Pisati 494bafe5a31SPaolo Pisati if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 495bafe5a31SPaolo Pisati return (EINVAL); 496bafe5a31SPaolo Pisati 497bafe5a31SPaolo Pisati /* Allocate and populate an interrupt handler structure. */ 498bafe5a31SPaolo Pisati ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 499bafe5a31SPaolo Pisati ih->ih_filter = filter; 500bafe5a31SPaolo Pisati ih->ih_handler = handler; 501bafe5a31SPaolo Pisati ih->ih_argument = arg; 502bafe5a31SPaolo Pisati ih->ih_name = name; 503bafe5a31SPaolo Pisati ih->ih_event = ie; 504bafe5a31SPaolo Pisati ih->ih_pri = pri; 505bafe5a31SPaolo Pisati if (flags & INTR_EXCL) 506bafe5a31SPaolo Pisati ih->ih_flags = IH_EXCLUSIVE; 507bafe5a31SPaolo Pisati if (flags & INTR_MPSAFE) 508bafe5a31SPaolo Pisati ih->ih_flags |= IH_MPSAFE; 509bafe5a31SPaolo Pisati if (flags & INTR_ENTROPY) 510bafe5a31SPaolo Pisati ih->ih_flags |= IH_ENTROPY; 511bafe5a31SPaolo Pisati 512bafe5a31SPaolo Pisati /* We can only have one exclusive handler in a event. */ 513bafe5a31SPaolo Pisati mtx_lock(&ie->ie_lock); 514bafe5a31SPaolo Pisati if (!TAILQ_EMPTY(&ie->ie_handlers)) { 515bafe5a31SPaolo Pisati if ((flags & INTR_EXCL) || 516bafe5a31SPaolo Pisati (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 517bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 518bafe5a31SPaolo Pisati free(ih, M_ITHREAD); 519bafe5a31SPaolo Pisati return (EINVAL); 520bafe5a31SPaolo Pisati } 521bafe5a31SPaolo Pisati } 522bafe5a31SPaolo Pisati 523bafe5a31SPaolo Pisati /* Add the new handler to the event in priority order. */ 524bafe5a31SPaolo Pisati TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 525bafe5a31SPaolo Pisati if (temp_ih->ih_pri > ih->ih_pri) 526bafe5a31SPaolo Pisati break; 527bafe5a31SPaolo Pisati } 528bafe5a31SPaolo Pisati if (temp_ih == NULL) 529bafe5a31SPaolo Pisati TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 530bafe5a31SPaolo Pisati else 531bafe5a31SPaolo Pisati TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 532bafe5a31SPaolo Pisati intr_event_update(ie); 533bafe5a31SPaolo Pisati 534bafe5a31SPaolo Pisati /* For filtered handlers, create a private ithread to run on. */ 535bafe5a31SPaolo Pisati if (filter != NULL && handler != NULL) { 536bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 537bafe5a31SPaolo Pisati it = ithread_create("intr: newborn", ih); 538bafe5a31SPaolo Pisati mtx_lock(&ie->ie_lock); 539bafe5a31SPaolo Pisati it->it_event = ie; 540bafe5a31SPaolo Pisati ih->ih_thread = it; 541bafe5a31SPaolo Pisati ithread_update(it); // XXX - do we really need this?!?!? 542bafe5a31SPaolo Pisati } else { /* Create the global per-event thread if we need one. */ 543bafe5a31SPaolo Pisati while (ie->ie_thread == NULL && handler != NULL) { 544bafe5a31SPaolo Pisati if (ie->ie_flags & IE_ADDING_THREAD) 545bafe5a31SPaolo Pisati msleep(ie, &ie->ie_lock, 0, "ithread", 0); 546bafe5a31SPaolo Pisati else { 547bafe5a31SPaolo Pisati ie->ie_flags |= IE_ADDING_THREAD; 548bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 549bafe5a31SPaolo Pisati it = ithread_create("intr: newborn", ih); 550bafe5a31SPaolo Pisati mtx_lock(&ie->ie_lock); 551bafe5a31SPaolo Pisati ie->ie_flags &= ~IE_ADDING_THREAD; 552bafe5a31SPaolo Pisati ie->ie_thread = it; 553bafe5a31SPaolo Pisati it->it_event = ie; 554bafe5a31SPaolo Pisati ithread_update(it); 555bafe5a31SPaolo Pisati wakeup(ie); 556bafe5a31SPaolo Pisati } 557bafe5a31SPaolo Pisati } 558bafe5a31SPaolo Pisati } 559bafe5a31SPaolo Pisati CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 560bafe5a31SPaolo Pisati ie->ie_name); 561bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 562bafe5a31SPaolo Pisati 563bafe5a31SPaolo Pisati if (cookiep != NULL) 564bafe5a31SPaolo Pisati *cookiep = ih; 565bafe5a31SPaolo Pisati return (0); 566bafe5a31SPaolo Pisati } 567bafe5a31SPaolo Pisati #endif 568b4151f71SJohn Baldwin 569c3045318SJohn Baldwin /* 570c3045318SJohn Baldwin * Return the ie_source field from the intr_event an intr_handler is 571c3045318SJohn Baldwin * associated with. 572c3045318SJohn Baldwin */ 573c3045318SJohn Baldwin void * 574c3045318SJohn Baldwin intr_handler_source(void *cookie) 575c3045318SJohn Baldwin { 576c3045318SJohn Baldwin struct intr_handler *ih; 577c3045318SJohn Baldwin struct intr_event *ie; 578c3045318SJohn Baldwin 579c3045318SJohn Baldwin ih = (struct intr_handler *)cookie; 580c3045318SJohn Baldwin if (ih == NULL) 581c3045318SJohn Baldwin return (NULL); 582c3045318SJohn Baldwin ie = ih->ih_event; 583c3045318SJohn Baldwin KASSERT(ie != NULL, 584c3045318SJohn Baldwin ("interrupt handler \"%s\" has a NULL interrupt event", 585c3045318SJohn Baldwin ih->ih_name)); 586c3045318SJohn Baldwin return (ie->ie_source); 587c3045318SJohn Baldwin } 588c3045318SJohn Baldwin 589bafe5a31SPaolo Pisati #ifndef INTR_FILTER 590b4151f71SJohn Baldwin int 591e0f66ef8SJohn Baldwin intr_event_remove_handler(void *cookie) 592b4151f71SJohn Baldwin { 593e0f66ef8SJohn Baldwin struct intr_handler *handler = (struct intr_handler *)cookie; 594e0f66ef8SJohn Baldwin struct intr_event *ie; 595b4151f71SJohn Baldwin #ifdef INVARIANTS 596e0f66ef8SJohn Baldwin struct intr_handler *ih; 597e0f66ef8SJohn Baldwin #endif 598e0f66ef8SJohn Baldwin #ifdef notyet 599e0f66ef8SJohn Baldwin int dead; 600b4151f71SJohn Baldwin #endif 601b4151f71SJohn Baldwin 6023e5da754SJohn Baldwin if (handler == NULL) 603b4151f71SJohn Baldwin return (EINVAL); 604e0f66ef8SJohn Baldwin ie = handler->ih_event; 605e0f66ef8SJohn Baldwin KASSERT(ie != NULL, 606e0f66ef8SJohn Baldwin ("interrupt handler \"%s\" has a NULL interrupt event", 6073e5da754SJohn Baldwin handler->ih_name)); 608e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 60991f91617SDavid E. O'Brien CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 610e0f66ef8SJohn Baldwin ie->ie_name); 611b4151f71SJohn Baldwin #ifdef INVARIANTS 612e0f66ef8SJohn Baldwin TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 6133e5da754SJohn Baldwin if (ih == handler) 6143e5da754SJohn Baldwin goto ok; 615e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 616e0f66ef8SJohn Baldwin panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 617e0f66ef8SJohn Baldwin ih->ih_name, ie->ie_name); 6183e5da754SJohn Baldwin ok: 619b4151f71SJohn Baldwin #endif 620de271f01SJohn Baldwin /* 621e0f66ef8SJohn Baldwin * If there is no ithread, then just remove the handler and return. 622e0f66ef8SJohn Baldwin * XXX: Note that an INTR_FAST handler might be running on another 623e0f66ef8SJohn Baldwin * CPU! 624e0f66ef8SJohn Baldwin */ 625e0f66ef8SJohn Baldwin if (ie->ie_thread == NULL) { 626e0f66ef8SJohn Baldwin TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 627e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 628e0f66ef8SJohn Baldwin free(handler, M_ITHREAD); 629e0f66ef8SJohn Baldwin return (0); 630e0f66ef8SJohn Baldwin } 631e0f66ef8SJohn Baldwin 632e0f66ef8SJohn Baldwin /* 633de271f01SJohn Baldwin * If the interrupt thread is already running, then just mark this 634de271f01SJohn Baldwin * handler as being dead and let the ithread do the actual removal. 635288e351bSDon Lewis * 636288e351bSDon Lewis * During a cold boot while cold is set, msleep() does not sleep, 637288e351bSDon Lewis * so we have to remove the handler here rather than letting the 638288e351bSDon Lewis * thread do it. 639de271f01SJohn Baldwin */ 640982d11f8SJeff Roberson thread_lock(ie->ie_thread->it_thread); 641e0f66ef8SJohn Baldwin if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) { 642de271f01SJohn Baldwin handler->ih_flags |= IH_DEAD; 643de271f01SJohn Baldwin 644de271f01SJohn Baldwin /* 645de271f01SJohn Baldwin * Ensure that the thread will process the handler list 646de271f01SJohn Baldwin * again and remove this handler if it has already passed 647de271f01SJohn Baldwin * it on the list. 648de271f01SJohn Baldwin */ 649e0f66ef8SJohn Baldwin ie->ie_thread->it_need = 1; 6504d29cb2dSJohn Baldwin } else 651e0f66ef8SJohn Baldwin TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 652982d11f8SJeff Roberson thread_unlock(ie->ie_thread->it_thread); 653e0f66ef8SJohn Baldwin while (handler->ih_flags & IH_DEAD) 6540f180a7cSJohn Baldwin msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 655e0f66ef8SJohn Baldwin intr_event_update(ie); 656e0f66ef8SJohn Baldwin #ifdef notyet 657e0f66ef8SJohn Baldwin /* 658e0f66ef8SJohn Baldwin * XXX: This could be bad in the case of ppbus(8). Also, I think 659e0f66ef8SJohn Baldwin * this could lead to races of stale data when servicing an 660e0f66ef8SJohn Baldwin * interrupt. 661e0f66ef8SJohn Baldwin */ 662e0f66ef8SJohn Baldwin dead = 1; 663e0f66ef8SJohn Baldwin TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 664e0f66ef8SJohn Baldwin if (!(ih->ih_flags & IH_FAST)) { 665e0f66ef8SJohn Baldwin dead = 0; 666e0f66ef8SJohn Baldwin break; 667e0f66ef8SJohn Baldwin } 668e0f66ef8SJohn Baldwin } 669e0f66ef8SJohn Baldwin if (dead) { 670e0f66ef8SJohn Baldwin ithread_destroy(ie->ie_thread); 671e0f66ef8SJohn Baldwin ie->ie_thread = NULL; 672e0f66ef8SJohn Baldwin } 673e0f66ef8SJohn Baldwin #endif 674e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 675b4151f71SJohn Baldwin free(handler, M_ITHREAD); 676b4151f71SJohn Baldwin return (0); 677b4151f71SJohn Baldwin } 678b4151f71SJohn Baldwin 679b4151f71SJohn Baldwin int 680e0f66ef8SJohn Baldwin intr_event_schedule_thread(struct intr_event *ie) 6813e5da754SJohn Baldwin { 682e0f66ef8SJohn Baldwin struct intr_entropy entropy; 683e0f66ef8SJohn Baldwin struct intr_thread *it; 684b40ce416SJulian Elischer struct thread *td; 68504774f23SJulian Elischer struct thread *ctd; 6863e5da754SJohn Baldwin struct proc *p; 6873e5da754SJohn Baldwin 6883e5da754SJohn Baldwin /* 6893e5da754SJohn Baldwin * If no ithread or no handlers, then we have a stray interrupt. 6903e5da754SJohn Baldwin */ 691e0f66ef8SJohn Baldwin if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || 692e0f66ef8SJohn Baldwin ie->ie_thread == NULL) 6933e5da754SJohn Baldwin return (EINVAL); 6943e5da754SJohn Baldwin 69504774f23SJulian Elischer ctd = curthread; 696e0f66ef8SJohn Baldwin it = ie->ie_thread; 697e0f66ef8SJohn Baldwin td = it->it_thread; 6986f40c417SRobert Watson p = td->td_proc; 699e0f66ef8SJohn Baldwin 7003e5da754SJohn Baldwin /* 7013e5da754SJohn Baldwin * If any of the handlers for this ithread claim to be good 7023e5da754SJohn Baldwin * sources of entropy, then gather some. 7033e5da754SJohn Baldwin */ 704e0f66ef8SJohn Baldwin if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 7056f40c417SRobert Watson CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 7067ab24ea3SJulian Elischer p->p_pid, td->td_name); 707e0f66ef8SJohn Baldwin entropy.event = (uintptr_t)ie; 708e0f66ef8SJohn Baldwin entropy.td = ctd; 7093e5da754SJohn Baldwin random_harvest(&entropy, sizeof(entropy), 2, 0, 7103e5da754SJohn Baldwin RANDOM_INTERRUPT); 7113e5da754SJohn Baldwin } 7123e5da754SJohn Baldwin 713e0f66ef8SJohn Baldwin KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 7143e5da754SJohn Baldwin 7153e5da754SJohn Baldwin /* 7163e5da754SJohn Baldwin * Set it_need to tell the thread to keep running if it is already 717982d11f8SJeff Roberson * running. Then, lock the thread and see if we actually need to 718982d11f8SJeff Roberson * put it on the runqueue. 7193e5da754SJohn Baldwin */ 720e0f66ef8SJohn Baldwin it->it_need = 1; 721982d11f8SJeff Roberson thread_lock(td); 72271fad9fdSJulian Elischer if (TD_AWAITING_INTR(td)) { 723e0f66ef8SJohn Baldwin CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 7247ab24ea3SJulian Elischer td->td_name); 72571fad9fdSJulian Elischer TD_CLR_IWAIT(td); 726f0393f06SJeff Roberson sched_add(td, SRQ_INTR); 7273e5da754SJohn Baldwin } else { 728e0f66ef8SJohn Baldwin CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 7297ab24ea3SJulian Elischer __func__, p->p_pid, td->td_name, it->it_need, td->td_state); 7303e5da754SJohn Baldwin } 731982d11f8SJeff Roberson thread_unlock(td); 7323e5da754SJohn Baldwin 7333e5da754SJohn Baldwin return (0); 7343e5da754SJohn Baldwin } 735bafe5a31SPaolo Pisati #else 736bafe5a31SPaolo Pisati int 737bafe5a31SPaolo Pisati intr_event_remove_handler(void *cookie) 738bafe5a31SPaolo Pisati { 739bafe5a31SPaolo Pisati struct intr_handler *handler = (struct intr_handler *)cookie; 740bafe5a31SPaolo Pisati struct intr_event *ie; 741bafe5a31SPaolo Pisati struct intr_thread *it; 742bafe5a31SPaolo Pisati #ifdef INVARIANTS 743bafe5a31SPaolo Pisati struct intr_handler *ih; 744bafe5a31SPaolo Pisati #endif 745bafe5a31SPaolo Pisati #ifdef notyet 746bafe5a31SPaolo Pisati int dead; 747bafe5a31SPaolo Pisati #endif 748bafe5a31SPaolo Pisati 749bafe5a31SPaolo Pisati if (handler == NULL) 750bafe5a31SPaolo Pisati return (EINVAL); 751bafe5a31SPaolo Pisati ie = handler->ih_event; 752bafe5a31SPaolo Pisati KASSERT(ie != NULL, 753bafe5a31SPaolo Pisati ("interrupt handler \"%s\" has a NULL interrupt event", 754bafe5a31SPaolo Pisati handler->ih_name)); 755bafe5a31SPaolo Pisati mtx_lock(&ie->ie_lock); 756bafe5a31SPaolo Pisati CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 757bafe5a31SPaolo Pisati ie->ie_name); 758bafe5a31SPaolo Pisati #ifdef INVARIANTS 759bafe5a31SPaolo Pisati TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 760bafe5a31SPaolo Pisati if (ih == handler) 761bafe5a31SPaolo Pisati goto ok; 762bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 763bafe5a31SPaolo Pisati panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 764bafe5a31SPaolo Pisati ih->ih_name, ie->ie_name); 765bafe5a31SPaolo Pisati ok: 766bafe5a31SPaolo Pisati #endif 767bafe5a31SPaolo Pisati /* 768bafe5a31SPaolo Pisati * If there are no ithreads (per event and per handler), then 769bafe5a31SPaolo Pisati * just remove the handler and return. 770bafe5a31SPaolo Pisati * XXX: Note that an INTR_FAST handler might be running on another CPU! 771bafe5a31SPaolo Pisati */ 772bafe5a31SPaolo Pisati if (ie->ie_thread == NULL && handler->ih_thread == NULL) { 773bafe5a31SPaolo Pisati TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 774bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 775bafe5a31SPaolo Pisati free(handler, M_ITHREAD); 776bafe5a31SPaolo Pisati return (0); 777bafe5a31SPaolo Pisati } 778bafe5a31SPaolo Pisati 779bafe5a31SPaolo Pisati /* Private or global ithread? */ 780bafe5a31SPaolo Pisati it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread; 781bafe5a31SPaolo Pisati /* 782bafe5a31SPaolo Pisati * If the interrupt thread is already running, then just mark this 783bafe5a31SPaolo Pisati * handler as being dead and let the ithread do the actual removal. 784bafe5a31SPaolo Pisati * 785bafe5a31SPaolo Pisati * During a cold boot while cold is set, msleep() does not sleep, 786bafe5a31SPaolo Pisati * so we have to remove the handler here rather than letting the 787bafe5a31SPaolo Pisati * thread do it. 788bafe5a31SPaolo Pisati */ 789982d11f8SJeff Roberson thread_lock(it->it_thread); 790bafe5a31SPaolo Pisati if (!TD_AWAITING_INTR(it->it_thread) && !cold) { 791bafe5a31SPaolo Pisati handler->ih_flags |= IH_DEAD; 792bafe5a31SPaolo Pisati 793bafe5a31SPaolo Pisati /* 794bafe5a31SPaolo Pisati * Ensure that the thread will process the handler list 795bafe5a31SPaolo Pisati * again and remove this handler if it has already passed 796bafe5a31SPaolo Pisati * it on the list. 797bafe5a31SPaolo Pisati */ 798bafe5a31SPaolo Pisati it->it_need = 1; 799bafe5a31SPaolo Pisati } else 800bafe5a31SPaolo Pisati TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 801982d11f8SJeff Roberson thread_unlock(it->it_thread); 802bafe5a31SPaolo Pisati while (handler->ih_flags & IH_DEAD) 803bafe5a31SPaolo Pisati msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 804bafe5a31SPaolo Pisati /* 805bafe5a31SPaolo Pisati * At this point, the handler has been disconnected from the event, 806bafe5a31SPaolo Pisati * so we can kill the private ithread if any. 807bafe5a31SPaolo Pisati */ 808bafe5a31SPaolo Pisati if (handler->ih_thread) { 809bafe5a31SPaolo Pisati ithread_destroy(handler->ih_thread); 810bafe5a31SPaolo Pisati handler->ih_thread = NULL; 811bafe5a31SPaolo Pisati } 812bafe5a31SPaolo Pisati intr_event_update(ie); 813bafe5a31SPaolo Pisati #ifdef notyet 814bafe5a31SPaolo Pisati /* 815bafe5a31SPaolo Pisati * XXX: This could be bad in the case of ppbus(8). Also, I think 816bafe5a31SPaolo Pisati * this could lead to races of stale data when servicing an 817bafe5a31SPaolo Pisati * interrupt. 818bafe5a31SPaolo Pisati */ 819bafe5a31SPaolo Pisati dead = 1; 820bafe5a31SPaolo Pisati TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 821bafe5a31SPaolo Pisati if (handler != NULL) { 822bafe5a31SPaolo Pisati dead = 0; 823bafe5a31SPaolo Pisati break; 824bafe5a31SPaolo Pisati } 825bafe5a31SPaolo Pisati } 826bafe5a31SPaolo Pisati if (dead) { 827bafe5a31SPaolo Pisati ithread_destroy(ie->ie_thread); 828bafe5a31SPaolo Pisati ie->ie_thread = NULL; 829bafe5a31SPaolo Pisati } 830bafe5a31SPaolo Pisati #endif 831bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 832bafe5a31SPaolo Pisati free(handler, M_ITHREAD); 833bafe5a31SPaolo Pisati return (0); 834bafe5a31SPaolo Pisati } 835bafe5a31SPaolo Pisati 836bafe5a31SPaolo Pisati int 837bafe5a31SPaolo Pisati intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it) 838bafe5a31SPaolo Pisati { 839bafe5a31SPaolo Pisati struct intr_entropy entropy; 840bafe5a31SPaolo Pisati struct thread *td; 841bafe5a31SPaolo Pisati struct thread *ctd; 842bafe5a31SPaolo Pisati struct proc *p; 843bafe5a31SPaolo Pisati 844bafe5a31SPaolo Pisati /* 845bafe5a31SPaolo Pisati * If no ithread or no handlers, then we have a stray interrupt. 846bafe5a31SPaolo Pisati */ 847bafe5a31SPaolo Pisati if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL) 848bafe5a31SPaolo Pisati return (EINVAL); 849bafe5a31SPaolo Pisati 850bafe5a31SPaolo Pisati ctd = curthread; 851bafe5a31SPaolo Pisati td = it->it_thread; 852bafe5a31SPaolo Pisati p = td->td_proc; 853bafe5a31SPaolo Pisati 854bafe5a31SPaolo Pisati /* 855bafe5a31SPaolo Pisati * If any of the handlers for this ithread claim to be good 856bafe5a31SPaolo Pisati * sources of entropy, then gather some. 857bafe5a31SPaolo Pisati */ 858bafe5a31SPaolo Pisati if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 859bafe5a31SPaolo Pisati CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 8607ab24ea3SJulian Elischer p->p_pid, td->td_name); 861bafe5a31SPaolo Pisati entropy.event = (uintptr_t)ie; 862bafe5a31SPaolo Pisati entropy.td = ctd; 863bafe5a31SPaolo Pisati random_harvest(&entropy, sizeof(entropy), 2, 0, 864bafe5a31SPaolo Pisati RANDOM_INTERRUPT); 865bafe5a31SPaolo Pisati } 866bafe5a31SPaolo Pisati 867bafe5a31SPaolo Pisati KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 868bafe5a31SPaolo Pisati 869bafe5a31SPaolo Pisati /* 870bafe5a31SPaolo Pisati * Set it_need to tell the thread to keep running if it is already 871982d11f8SJeff Roberson * running. Then, lock the thread and see if we actually need to 872982d11f8SJeff Roberson * put it on the runqueue. 873bafe5a31SPaolo Pisati */ 874bafe5a31SPaolo Pisati it->it_need = 1; 875982d11f8SJeff Roberson thread_lock(td); 876bafe5a31SPaolo Pisati if (TD_AWAITING_INTR(td)) { 877bafe5a31SPaolo Pisati CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 8783c1ffc32SJulian Elischer td->td_name); 879bafe5a31SPaolo Pisati TD_CLR_IWAIT(td); 880bafe5a31SPaolo Pisati sched_add(td, SRQ_INTR); 881bafe5a31SPaolo Pisati } else { 882bafe5a31SPaolo Pisati CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 8837ab24ea3SJulian Elischer __func__, p->p_pid, td->td_name, it->it_need, td->td_state); 884bafe5a31SPaolo Pisati } 885982d11f8SJeff Roberson thread_unlock(td); 886bafe5a31SPaolo Pisati 887bafe5a31SPaolo Pisati return (0); 888bafe5a31SPaolo Pisati } 889bafe5a31SPaolo Pisati #endif 8903e5da754SJohn Baldwin 891fe486a37SJohn Baldwin /* 892fe486a37SJohn Baldwin * Add a software interrupt handler to a specified event. If a given event 893fe486a37SJohn Baldwin * is not specified, then a new event is created. 894fe486a37SJohn Baldwin */ 8953e5da754SJohn Baldwin int 896e0f66ef8SJohn Baldwin swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, 897b4151f71SJohn Baldwin void *arg, int pri, enum intr_type flags, void **cookiep) 8988088699fSJohn Baldwin { 899e0f66ef8SJohn Baldwin struct intr_event *ie; 900b4151f71SJohn Baldwin int error; 9018088699fSJohn Baldwin 902bafe5a31SPaolo Pisati if (flags & INTR_ENTROPY) 9033e5da754SJohn Baldwin return (EINVAL); 9043e5da754SJohn Baldwin 905e0f66ef8SJohn Baldwin ie = (eventp != NULL) ? *eventp : NULL; 9068088699fSJohn Baldwin 907e0f66ef8SJohn Baldwin if (ie != NULL) { 908e0f66ef8SJohn Baldwin if (!(ie->ie_flags & IE_SOFT)) 9093e5da754SJohn Baldwin return (EINVAL); 9103e5da754SJohn Baldwin } else { 911bafe5a31SPaolo Pisati error = intr_event_create(&ie, NULL, IE_SOFT, 912eaf86d16SJohn Baldwin NULL, NULL, NULL, NULL, "swi%d:", pri); 9138088699fSJohn Baldwin if (error) 914b4151f71SJohn Baldwin return (error); 915e0f66ef8SJohn Baldwin if (eventp != NULL) 916e0f66ef8SJohn Baldwin *eventp = ie; 9178088699fSJohn Baldwin } 918ef544f63SPaolo Pisati return (intr_event_add_handler(ie, name, NULL, handler, arg, 919d5a08a60SJake Burkholder (pri * RQ_PPQ) + PI_SOFT, flags, cookiep)); 9208088699fSJohn Baldwin } 9218088699fSJohn Baldwin 9221931cf94SJohn Baldwin /* 923e0f66ef8SJohn Baldwin * Schedule a software interrupt thread. 9241931cf94SJohn Baldwin */ 9251931cf94SJohn Baldwin void 926b4151f71SJohn Baldwin swi_sched(void *cookie, int flags) 9271931cf94SJohn Baldwin { 928e0f66ef8SJohn Baldwin struct intr_handler *ih = (struct intr_handler *)cookie; 929e0f66ef8SJohn Baldwin struct intr_event *ie = ih->ih_event; 9303e5da754SJohn Baldwin int error; 9318088699fSJohn Baldwin 932e0f66ef8SJohn Baldwin CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, 933e0f66ef8SJohn Baldwin ih->ih_need); 9341931cf94SJohn Baldwin 9351931cf94SJohn Baldwin /* 9363e5da754SJohn Baldwin * Set ih_need for this handler so that if the ithread is already 9373e5da754SJohn Baldwin * running it will execute this handler on the next pass. Otherwise, 9383e5da754SJohn Baldwin * it will execute it the next time it runs. 9391931cf94SJohn Baldwin */ 940b4151f71SJohn Baldwin atomic_store_rel_int(&ih->ih_need, 1); 9411ca2c018SBruce Evans 942b4151f71SJohn Baldwin if (!(flags & SWI_DELAY)) { 94367596082SAttilio Rao PCPU_INC(cnt.v_soft); 944bafe5a31SPaolo Pisati #ifdef INTR_FILTER 945bafe5a31SPaolo Pisati error = intr_event_schedule_thread(ie, ie->ie_thread); 946bafe5a31SPaolo Pisati #else 947e0f66ef8SJohn Baldwin error = intr_event_schedule_thread(ie); 948bafe5a31SPaolo Pisati #endif 9493e5da754SJohn Baldwin KASSERT(error == 0, ("stray software interrupt")); 9508088699fSJohn Baldwin } 9518088699fSJohn Baldwin } 9528088699fSJohn Baldwin 953fe486a37SJohn Baldwin /* 954fe486a37SJohn Baldwin * Remove a software interrupt handler. Currently this code does not 955fe486a37SJohn Baldwin * remove the associated interrupt event if it becomes empty. Calling code 956fe486a37SJohn Baldwin * may do so manually via intr_event_destroy(), but that's not really 957fe486a37SJohn Baldwin * an optimal interface. 958fe486a37SJohn Baldwin */ 959fe486a37SJohn Baldwin int 960fe486a37SJohn Baldwin swi_remove(void *cookie) 961fe486a37SJohn Baldwin { 962fe486a37SJohn Baldwin 963fe486a37SJohn Baldwin return (intr_event_remove_handler(cookie)); 964fe486a37SJohn Baldwin } 965fe486a37SJohn Baldwin 966bafe5a31SPaolo Pisati #ifdef INTR_FILTER 967bafe5a31SPaolo Pisati static void 968bafe5a31SPaolo Pisati priv_ithread_execute_handler(struct proc *p, struct intr_handler *ih) 969bafe5a31SPaolo Pisati { 970bafe5a31SPaolo Pisati struct intr_event *ie; 971bafe5a31SPaolo Pisati 972bafe5a31SPaolo Pisati ie = ih->ih_event; 973bafe5a31SPaolo Pisati /* 974bafe5a31SPaolo Pisati * If this handler is marked for death, remove it from 975bafe5a31SPaolo Pisati * the list of handlers and wake up the sleeper. 976bafe5a31SPaolo Pisati */ 977bafe5a31SPaolo Pisati if (ih->ih_flags & IH_DEAD) { 978bafe5a31SPaolo Pisati mtx_lock(&ie->ie_lock); 979bafe5a31SPaolo Pisati TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 980bafe5a31SPaolo Pisati ih->ih_flags &= ~IH_DEAD; 981bafe5a31SPaolo Pisati wakeup(ih); 982bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 983bafe5a31SPaolo Pisati return; 984bafe5a31SPaolo Pisati } 985bafe5a31SPaolo Pisati 986bafe5a31SPaolo Pisati /* Execute this handler. */ 987bafe5a31SPaolo Pisati CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 988bafe5a31SPaolo Pisati __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument, 989bafe5a31SPaolo Pisati ih->ih_name, ih->ih_flags); 990bafe5a31SPaolo Pisati 991bafe5a31SPaolo Pisati if (!(ih->ih_flags & IH_MPSAFE)) 992bafe5a31SPaolo Pisati mtx_lock(&Giant); 993bafe5a31SPaolo Pisati ih->ih_handler(ih->ih_argument); 994bafe5a31SPaolo Pisati if (!(ih->ih_flags & IH_MPSAFE)) 995bafe5a31SPaolo Pisati mtx_unlock(&Giant); 996bafe5a31SPaolo Pisati } 997bafe5a31SPaolo Pisati #endif 998bafe5a31SPaolo Pisati 999e0f66ef8SJohn Baldwin static void 1000e0f66ef8SJohn Baldwin ithread_execute_handlers(struct proc *p, struct intr_event *ie) 1001e0f66ef8SJohn Baldwin { 1002e0f66ef8SJohn Baldwin struct intr_handler *ih, *ihn; 1003e0f66ef8SJohn Baldwin 1004e0f66ef8SJohn Baldwin /* Interrupt handlers should not sleep. */ 1005e0f66ef8SJohn Baldwin if (!(ie->ie_flags & IE_SOFT)) 1006e0f66ef8SJohn Baldwin THREAD_NO_SLEEPING(); 1007e0f66ef8SJohn Baldwin TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) { 1008e0f66ef8SJohn Baldwin 1009e0f66ef8SJohn Baldwin /* 1010e0f66ef8SJohn Baldwin * If this handler is marked for death, remove it from 1011e0f66ef8SJohn Baldwin * the list of handlers and wake up the sleeper. 1012e0f66ef8SJohn Baldwin */ 1013e0f66ef8SJohn Baldwin if (ih->ih_flags & IH_DEAD) { 1014e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 1015e0f66ef8SJohn Baldwin TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 1016e0f66ef8SJohn Baldwin ih->ih_flags &= ~IH_DEAD; 1017e0f66ef8SJohn Baldwin wakeup(ih); 1018e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 1019e0f66ef8SJohn Baldwin continue; 1020e0f66ef8SJohn Baldwin } 1021e0f66ef8SJohn Baldwin 1022f2d619c8SPaolo Pisati /* Skip filter only handlers */ 1023f2d619c8SPaolo Pisati if (ih->ih_handler == NULL) 1024f2d619c8SPaolo Pisati continue; 1025f2d619c8SPaolo Pisati 1026e0f66ef8SJohn Baldwin /* 1027e0f66ef8SJohn Baldwin * For software interrupt threads, we only execute 1028e0f66ef8SJohn Baldwin * handlers that have their need flag set. Hardware 1029e0f66ef8SJohn Baldwin * interrupt threads always invoke all of their handlers. 1030e0f66ef8SJohn Baldwin */ 1031e0f66ef8SJohn Baldwin if (ie->ie_flags & IE_SOFT) { 1032e0f66ef8SJohn Baldwin if (!ih->ih_need) 1033e0f66ef8SJohn Baldwin continue; 1034e0f66ef8SJohn Baldwin else 1035e0f66ef8SJohn Baldwin atomic_store_rel_int(&ih->ih_need, 0); 1036e0f66ef8SJohn Baldwin } 1037e0f66ef8SJohn Baldwin 1038e0f66ef8SJohn Baldwin /* Execute this handler. */ 1039e0f66ef8SJohn Baldwin CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1040bafe5a31SPaolo Pisati __func__, p->p_pid, (void *)ih->ih_handler, 1041bafe5a31SPaolo Pisati ih->ih_argument, ih->ih_name, ih->ih_flags); 1042e0f66ef8SJohn Baldwin 1043e0f66ef8SJohn Baldwin if (!(ih->ih_flags & IH_MPSAFE)) 1044e0f66ef8SJohn Baldwin mtx_lock(&Giant); 1045e0f66ef8SJohn Baldwin ih->ih_handler(ih->ih_argument); 1046e0f66ef8SJohn Baldwin if (!(ih->ih_flags & IH_MPSAFE)) 1047e0f66ef8SJohn Baldwin mtx_unlock(&Giant); 1048e0f66ef8SJohn Baldwin } 1049e0f66ef8SJohn Baldwin if (!(ie->ie_flags & IE_SOFT)) 1050e0f66ef8SJohn Baldwin THREAD_SLEEPING_OK(); 1051e0f66ef8SJohn Baldwin 1052e0f66ef8SJohn Baldwin /* 1053e0f66ef8SJohn Baldwin * Interrupt storm handling: 1054e0f66ef8SJohn Baldwin * 1055e0f66ef8SJohn Baldwin * If this interrupt source is currently storming, then throttle 1056e0f66ef8SJohn Baldwin * it to only fire the handler once per clock tick. 1057e0f66ef8SJohn Baldwin * 1058e0f66ef8SJohn Baldwin * If this interrupt source is not currently storming, but the 1059e0f66ef8SJohn Baldwin * number of back to back interrupts exceeds the storm threshold, 1060e0f66ef8SJohn Baldwin * then enter storming mode. 1061e0f66ef8SJohn Baldwin */ 1062e41bcf3cSJohn Baldwin if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold && 1063e41bcf3cSJohn Baldwin !(ie->ie_flags & IE_SOFT)) { 10640ae62c18SNate Lawson /* Report the message only once every second. */ 10650ae62c18SNate Lawson if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) { 1066e0f66ef8SJohn Baldwin printf( 10670ae62c18SNate Lawson "interrupt storm detected on \"%s\"; throttling interrupt source\n", 1068e0f66ef8SJohn Baldwin ie->ie_name); 1069e0f66ef8SJohn Baldwin } 1070e41bcf3cSJohn Baldwin pause("istorm", 1); 1071e0f66ef8SJohn Baldwin } else 1072e0f66ef8SJohn Baldwin ie->ie_count++; 1073e0f66ef8SJohn Baldwin 1074e0f66ef8SJohn Baldwin /* 1075e0f66ef8SJohn Baldwin * Now that all the handlers have had a chance to run, reenable 1076e0f66ef8SJohn Baldwin * the interrupt source. 1077e0f66ef8SJohn Baldwin */ 1078e0f66ef8SJohn Baldwin if (ie->ie_enable != NULL) 1079e0f66ef8SJohn Baldwin ie->ie_enable(ie->ie_source); 1080e0f66ef8SJohn Baldwin } 1081e0f66ef8SJohn Baldwin 1082bafe5a31SPaolo Pisati #ifndef INTR_FILTER 10838088699fSJohn Baldwin /* 1084b4151f71SJohn Baldwin * This is the main code for interrupt threads. 10858088699fSJohn Baldwin */ 108637c84183SPoul-Henning Kamp static void 1087b4151f71SJohn Baldwin ithread_loop(void *arg) 10888088699fSJohn Baldwin { 1089e0f66ef8SJohn Baldwin struct intr_thread *ithd; 1090e0f66ef8SJohn Baldwin struct intr_event *ie; 1091b40ce416SJulian Elischer struct thread *td; 1092b4151f71SJohn Baldwin struct proc *p; 1093eaf86d16SJohn Baldwin u_char cpu; 10948088699fSJohn Baldwin 1095b40ce416SJulian Elischer td = curthread; 1096b40ce416SJulian Elischer p = td->td_proc; 1097e0f66ef8SJohn Baldwin ithd = (struct intr_thread *)arg; 1098e0f66ef8SJohn Baldwin KASSERT(ithd->it_thread == td, 109991f91617SDavid E. O'Brien ("%s: ithread and proc linkage out of sync", __func__)); 1100e0f66ef8SJohn Baldwin ie = ithd->it_event; 1101e0f66ef8SJohn Baldwin ie->ie_count = 0; 1102eaf86d16SJohn Baldwin cpu = NOCPU; 11038088699fSJohn Baldwin 11048088699fSJohn Baldwin /* 11058088699fSJohn Baldwin * As long as we have interrupts outstanding, go through the 11068088699fSJohn Baldwin * list of handlers, giving each one a go at it. 11078088699fSJohn Baldwin */ 11088088699fSJohn Baldwin for (;;) { 1109b4151f71SJohn Baldwin /* 1110b4151f71SJohn Baldwin * If we are an orphaned thread, then just die. 1111b4151f71SJohn Baldwin */ 1112b4151f71SJohn Baldwin if (ithd->it_flags & IT_DEAD) { 1113e0f66ef8SJohn Baldwin CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 11147ab24ea3SJulian Elischer p->p_pid, td->td_name); 1115b4151f71SJohn Baldwin free(ithd, M_ITHREAD); 1116ca9a0ddfSJulian Elischer kthread_exit(); 1117b4151f71SJohn Baldwin } 1118b4151f71SJohn Baldwin 1119e0f66ef8SJohn Baldwin /* 1120e0f66ef8SJohn Baldwin * Service interrupts. If another interrupt arrives while 1121e0f66ef8SJohn Baldwin * we are running, it will set it_need to note that we 1122e0f66ef8SJohn Baldwin * should make another pass. 1123e0f66ef8SJohn Baldwin */ 1124b4151f71SJohn Baldwin while (ithd->it_need) { 11258088699fSJohn Baldwin /* 1126e0f66ef8SJohn Baldwin * This might need a full read and write barrier 1127e0f66ef8SJohn Baldwin * to make sure that this write posts before any 1128e0f66ef8SJohn Baldwin * of the memory or device accesses in the 1129e0f66ef8SJohn Baldwin * handlers. 11308088699fSJohn Baldwin */ 1131b4151f71SJohn Baldwin atomic_store_rel_int(&ithd->it_need, 0); 1132e0f66ef8SJohn Baldwin ithread_execute_handlers(p, ie); 11338088699fSJohn Baldwin } 11347870c3c6SJohn Baldwin WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 11357870c3c6SJohn Baldwin mtx_assert(&Giant, MA_NOTOWNED); 11368088699fSJohn Baldwin 11378088699fSJohn Baldwin /* 11388088699fSJohn Baldwin * Processed all our interrupts. Now get the sched 11398088699fSJohn Baldwin * lock. This may take a while and it_need may get 11408088699fSJohn Baldwin * set again, so we have to check it again. 11418088699fSJohn Baldwin */ 1142982d11f8SJeff Roberson thread_lock(td); 1143e0f66ef8SJohn Baldwin if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { 11447870c3c6SJohn Baldwin TD_SET_IWAIT(td); 1145e0f66ef8SJohn Baldwin ie->ie_count = 0; 1146bf0acc27SJohn Baldwin mi_switch(SW_VOL, NULL); 11478088699fSJohn Baldwin } 1148eaf86d16SJohn Baldwin 1149eaf86d16SJohn Baldwin #ifdef SMP 1150eaf86d16SJohn Baldwin /* 1151eaf86d16SJohn Baldwin * Ensure we are bound to the correct CPU. We can't 1152eaf86d16SJohn Baldwin * move ithreads until SMP is running however, so just 1153eaf86d16SJohn Baldwin * leave interrupts on the boor CPU during boot. 1154eaf86d16SJohn Baldwin */ 1155eaf86d16SJohn Baldwin if (ie->ie_cpu != cpu && smp_started) { 1156eaf86d16SJohn Baldwin cpu = ie->ie_cpu; 1157eaf86d16SJohn Baldwin if (cpu == NOCPU) 1158eaf86d16SJohn Baldwin sched_unbind(td); 1159eaf86d16SJohn Baldwin else 1160eaf86d16SJohn Baldwin sched_bind(td, cpu); 1161eaf86d16SJohn Baldwin } 1162eaf86d16SJohn Baldwin #endif 1163982d11f8SJeff Roberson thread_unlock(td); 11648088699fSJohn Baldwin } 11651931cf94SJohn Baldwin } 1166bafe5a31SPaolo Pisati #else 1167bafe5a31SPaolo Pisati /* 1168bafe5a31SPaolo Pisati * This is the main code for interrupt threads. 1169bafe5a31SPaolo Pisati */ 1170bafe5a31SPaolo Pisati static void 1171bafe5a31SPaolo Pisati ithread_loop(void *arg) 1172bafe5a31SPaolo Pisati { 1173bafe5a31SPaolo Pisati struct intr_thread *ithd; 1174bafe5a31SPaolo Pisati struct intr_handler *ih; 1175bafe5a31SPaolo Pisati struct intr_event *ie; 1176bafe5a31SPaolo Pisati struct thread *td; 1177bafe5a31SPaolo Pisati struct proc *p; 1178bafe5a31SPaolo Pisati int priv; 1179eaf86d16SJohn Baldwin u_char cpu; 1180bafe5a31SPaolo Pisati 1181bafe5a31SPaolo Pisati td = curthread; 1182bafe5a31SPaolo Pisati p = td->td_proc; 1183bafe5a31SPaolo Pisati ih = (struct intr_handler *)arg; 1184bafe5a31SPaolo Pisati priv = (ih->ih_thread != NULL) ? 1 : 0; 1185bafe5a31SPaolo Pisati ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread; 1186bafe5a31SPaolo Pisati KASSERT(ithd->it_thread == td, 1187bafe5a31SPaolo Pisati ("%s: ithread and proc linkage out of sync", __func__)); 1188bafe5a31SPaolo Pisati ie = ithd->it_event; 1189bafe5a31SPaolo Pisati ie->ie_count = 0; 1190eaf86d16SJohn Baldwin cpu = NOCPU; 1191bafe5a31SPaolo Pisati 1192bafe5a31SPaolo Pisati /* 1193bafe5a31SPaolo Pisati * As long as we have interrupts outstanding, go through the 1194bafe5a31SPaolo Pisati * list of handlers, giving each one a go at it. 1195bafe5a31SPaolo Pisati */ 1196bafe5a31SPaolo Pisati for (;;) { 1197bafe5a31SPaolo Pisati /* 1198bafe5a31SPaolo Pisati * If we are an orphaned thread, then just die. 1199bafe5a31SPaolo Pisati */ 1200bafe5a31SPaolo Pisati if (ithd->it_flags & IT_DEAD) { 1201bafe5a31SPaolo Pisati CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 12027ab24ea3SJulian Elischer p->p_pid, td->td_name); 1203bafe5a31SPaolo Pisati free(ithd, M_ITHREAD); 1204ca9a0ddfSJulian Elischer kthread_exit(); 1205bafe5a31SPaolo Pisati } 1206bafe5a31SPaolo Pisati 1207bafe5a31SPaolo Pisati /* 1208bafe5a31SPaolo Pisati * Service interrupts. If another interrupt arrives while 1209bafe5a31SPaolo Pisati * we are running, it will set it_need to note that we 1210bafe5a31SPaolo Pisati * should make another pass. 1211bafe5a31SPaolo Pisati */ 1212bafe5a31SPaolo Pisati while (ithd->it_need) { 1213bafe5a31SPaolo Pisati /* 1214bafe5a31SPaolo Pisati * This might need a full read and write barrier 1215bafe5a31SPaolo Pisati * to make sure that this write posts before any 1216bafe5a31SPaolo Pisati * of the memory or device accesses in the 1217bafe5a31SPaolo Pisati * handlers. 1218bafe5a31SPaolo Pisati */ 1219bafe5a31SPaolo Pisati atomic_store_rel_int(&ithd->it_need, 0); 1220bafe5a31SPaolo Pisati if (priv) 1221bafe5a31SPaolo Pisati priv_ithread_execute_handler(p, ih); 1222bafe5a31SPaolo Pisati else 1223bafe5a31SPaolo Pisati ithread_execute_handlers(p, ie); 1224bafe5a31SPaolo Pisati } 1225bafe5a31SPaolo Pisati WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1226bafe5a31SPaolo Pisati mtx_assert(&Giant, MA_NOTOWNED); 1227bafe5a31SPaolo Pisati 1228bafe5a31SPaolo Pisati /* 1229bafe5a31SPaolo Pisati * Processed all our interrupts. Now get the sched 1230bafe5a31SPaolo Pisati * lock. This may take a while and it_need may get 1231bafe5a31SPaolo Pisati * set again, so we have to check it again. 1232bafe5a31SPaolo Pisati */ 1233982d11f8SJeff Roberson thread_lock(td); 1234bafe5a31SPaolo Pisati if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { 1235bafe5a31SPaolo Pisati TD_SET_IWAIT(td); 1236bafe5a31SPaolo Pisati ie->ie_count = 0; 1237bafe5a31SPaolo Pisati mi_switch(SW_VOL, NULL); 1238bafe5a31SPaolo Pisati } 1239eaf86d16SJohn Baldwin 1240eaf86d16SJohn Baldwin #ifdef SMP 1241eaf86d16SJohn Baldwin /* 1242eaf86d16SJohn Baldwin * Ensure we are bound to the correct CPU. We can't 1243eaf86d16SJohn Baldwin * move ithreads until SMP is running however, so just 1244eaf86d16SJohn Baldwin * leave interrupts on the boor CPU during boot. 1245eaf86d16SJohn Baldwin */ 1246eaf86d16SJohn Baldwin if (!priv && ie->ie_cpu != cpu && smp_started) { 1247eaf86d16SJohn Baldwin cpu = ie->ie_cpu; 1248eaf86d16SJohn Baldwin if (cpu == NOCPU) 1249eaf86d16SJohn Baldwin sched_unbind(td); 1250eaf86d16SJohn Baldwin else 1251eaf86d16SJohn Baldwin sched_bind(td, cpu); 1252eaf86d16SJohn Baldwin } 1253eaf86d16SJohn Baldwin #endif 1254982d11f8SJeff Roberson thread_unlock(td); 1255bafe5a31SPaolo Pisati } 1256bafe5a31SPaolo Pisati } 1257bafe5a31SPaolo Pisati 1258bafe5a31SPaolo Pisati /* 1259bafe5a31SPaolo Pisati * Main loop for interrupt filter. 1260bafe5a31SPaolo Pisati * 1261bafe5a31SPaolo Pisati * Some architectures (i386, amd64 and arm) require the optional frame 1262bafe5a31SPaolo Pisati * parameter, and use it as the main argument for fast handler execution 1263bafe5a31SPaolo Pisati * when ih_argument == NULL. 1264bafe5a31SPaolo Pisati * 1265bafe5a31SPaolo Pisati * Return value: 1266bafe5a31SPaolo Pisati * o FILTER_STRAY: No filter recognized the event, and no 1267bafe5a31SPaolo Pisati * filter-less handler is registered on this 1268bafe5a31SPaolo Pisati * line. 1269bafe5a31SPaolo Pisati * o FILTER_HANDLED: A filter claimed the event and served it. 1270bafe5a31SPaolo Pisati * o FILTER_SCHEDULE_THREAD: No filter claimed the event, but there's at 1271bafe5a31SPaolo Pisati * least one filter-less handler on this line. 1272bafe5a31SPaolo Pisati * o FILTER_HANDLED | 1273bafe5a31SPaolo Pisati * FILTER_SCHEDULE_THREAD: A filter claimed the event, and asked for 1274bafe5a31SPaolo Pisati * scheduling the per-handler ithread. 1275bafe5a31SPaolo Pisati * 1276bafe5a31SPaolo Pisati * In case an ithread has to be scheduled, in *ithd there will be a 1277bafe5a31SPaolo Pisati * pointer to a struct intr_thread containing the thread to be 1278bafe5a31SPaolo Pisati * scheduled. 1279bafe5a31SPaolo Pisati */ 1280bafe5a31SPaolo Pisati 1281bafe5a31SPaolo Pisati int 1282bafe5a31SPaolo Pisati intr_filter_loop(struct intr_event *ie, struct trapframe *frame, 1283bafe5a31SPaolo Pisati struct intr_thread **ithd) 1284bafe5a31SPaolo Pisati { 1285bafe5a31SPaolo Pisati struct intr_handler *ih; 1286bafe5a31SPaolo Pisati void *arg; 1287bafe5a31SPaolo Pisati int ret, thread_only; 1288bafe5a31SPaolo Pisati 1289bafe5a31SPaolo Pisati ret = 0; 1290bafe5a31SPaolo Pisati thread_only = 0; 1291bafe5a31SPaolo Pisati TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1292bafe5a31SPaolo Pisati /* 1293bafe5a31SPaolo Pisati * Execute fast interrupt handlers directly. 1294bafe5a31SPaolo Pisati * To support clock handlers, if a handler registers 1295bafe5a31SPaolo Pisati * with a NULL argument, then we pass it a pointer to 1296bafe5a31SPaolo Pisati * a trapframe as its argument. 1297bafe5a31SPaolo Pisati */ 1298bafe5a31SPaolo Pisati arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument); 1299bafe5a31SPaolo Pisati 1300bafe5a31SPaolo Pisati CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__, 1301bafe5a31SPaolo Pisati ih->ih_filter, ih->ih_handler, arg, ih->ih_name); 1302bafe5a31SPaolo Pisati 1303bafe5a31SPaolo Pisati if (ih->ih_filter != NULL) 1304bafe5a31SPaolo Pisati ret = ih->ih_filter(arg); 1305bafe5a31SPaolo Pisati else { 1306bafe5a31SPaolo Pisati thread_only = 1; 1307bafe5a31SPaolo Pisati continue; 1308bafe5a31SPaolo Pisati } 1309bafe5a31SPaolo Pisati 1310bafe5a31SPaolo Pisati if (ret & FILTER_STRAY) 1311bafe5a31SPaolo Pisati continue; 1312bafe5a31SPaolo Pisati else { 1313bafe5a31SPaolo Pisati *ithd = ih->ih_thread; 1314bafe5a31SPaolo Pisati return (ret); 1315bafe5a31SPaolo Pisati } 1316bafe5a31SPaolo Pisati } 1317bafe5a31SPaolo Pisati 1318bafe5a31SPaolo Pisati /* 1319bafe5a31SPaolo Pisati * No filters handled the interrupt and we have at least 1320bafe5a31SPaolo Pisati * one handler without a filter. In this case, we schedule 1321bafe5a31SPaolo Pisati * all of the filter-less handlers to run in the ithread. 1322bafe5a31SPaolo Pisati */ 1323bafe5a31SPaolo Pisati if (thread_only) { 1324bafe5a31SPaolo Pisati *ithd = ie->ie_thread; 1325bafe5a31SPaolo Pisati return (FILTER_SCHEDULE_THREAD); 1326bafe5a31SPaolo Pisati } 1327bafe5a31SPaolo Pisati return (FILTER_STRAY); 1328bafe5a31SPaolo Pisati } 1329bafe5a31SPaolo Pisati 1330bafe5a31SPaolo Pisati /* 1331bafe5a31SPaolo Pisati * Main interrupt handling body. 1332bafe5a31SPaolo Pisati * 1333bafe5a31SPaolo Pisati * Input: 1334bafe5a31SPaolo Pisati * o ie: the event connected to this interrupt. 1335bafe5a31SPaolo Pisati * o frame: some archs (i.e. i386) pass a frame to some. 1336bafe5a31SPaolo Pisati * handlers as their main argument. 1337bafe5a31SPaolo Pisati * Return value: 1338bafe5a31SPaolo Pisati * o 0: everything ok. 1339bafe5a31SPaolo Pisati * o EINVAL: stray interrupt. 1340bafe5a31SPaolo Pisati */ 1341bafe5a31SPaolo Pisati int 1342bafe5a31SPaolo Pisati intr_event_handle(struct intr_event *ie, struct trapframe *frame) 1343bafe5a31SPaolo Pisati { 1344bafe5a31SPaolo Pisati struct intr_thread *ithd; 1345bafe5a31SPaolo Pisati struct thread *td; 1346bafe5a31SPaolo Pisati int thread; 1347bafe5a31SPaolo Pisati 1348bafe5a31SPaolo Pisati ithd = NULL; 1349bafe5a31SPaolo Pisati td = curthread; 1350bafe5a31SPaolo Pisati 1351bafe5a31SPaolo Pisati if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) 1352bafe5a31SPaolo Pisati return (EINVAL); 1353bafe5a31SPaolo Pisati 1354bafe5a31SPaolo Pisati td->td_intr_nesting_level++; 1355bafe5a31SPaolo Pisati thread = 0; 1356bafe5a31SPaolo Pisati critical_enter(); 1357bafe5a31SPaolo Pisati thread = intr_filter_loop(ie, frame, &ithd); 1358bafe5a31SPaolo Pisati 1359bafe5a31SPaolo Pisati /* 1360bafe5a31SPaolo Pisati * If the interrupt was fully served, send it an EOI but leave 1361bafe5a31SPaolo Pisati * it unmasked. Otherwise, mask the source as well as sending 1362bafe5a31SPaolo Pisati * it an EOI. 1363bafe5a31SPaolo Pisati */ 1364bafe5a31SPaolo Pisati if (thread & FILTER_HANDLED) { 1365bafe5a31SPaolo Pisati if (ie->ie_eoi != NULL) 1366bafe5a31SPaolo Pisati ie->ie_eoi(ie->ie_source); 1367bafe5a31SPaolo Pisati } else { 13686d2d1c04SJohn Baldwin if (ie->ie_disable != NULL) 13696d2d1c04SJohn Baldwin ie->ie_disable(ie->ie_source); 1370bafe5a31SPaolo Pisati } 1371bafe5a31SPaolo Pisati critical_exit(); 1372bafe5a31SPaolo Pisati 1373bafe5a31SPaolo Pisati /* Interrupt storm logic */ 1374bafe5a31SPaolo Pisati if (thread & FILTER_STRAY) { 1375bafe5a31SPaolo Pisati ie->ie_count++; 1376bafe5a31SPaolo Pisati if (ie->ie_count < intr_storm_threshold) 1377bafe5a31SPaolo Pisati printf("Interrupt stray detection not present\n"); 1378bafe5a31SPaolo Pisati } 1379bafe5a31SPaolo Pisati 1380bafe5a31SPaolo Pisati /* Schedule an ithread if needed. */ 1381bafe5a31SPaolo Pisati if (thread & FILTER_SCHEDULE_THREAD) { 1382bafe5a31SPaolo Pisati if (intr_event_schedule_thread(ie, ithd) != 0) 1383bafe5a31SPaolo Pisati panic("%s: impossible stray interrupt", __func__); 1384bafe5a31SPaolo Pisati } 1385bafe5a31SPaolo Pisati td->td_intr_nesting_level--; 1386bafe5a31SPaolo Pisati return (0); 1387bafe5a31SPaolo Pisati } 1388bafe5a31SPaolo Pisati #endif 13891931cf94SJohn Baldwin 13908b201c42SJohn Baldwin #ifdef DDB 13918b201c42SJohn Baldwin /* 13928b201c42SJohn Baldwin * Dump details about an interrupt handler 13938b201c42SJohn Baldwin */ 13948b201c42SJohn Baldwin static void 1395e0f66ef8SJohn Baldwin db_dump_intrhand(struct intr_handler *ih) 13968b201c42SJohn Baldwin { 13978b201c42SJohn Baldwin int comma; 13988b201c42SJohn Baldwin 13998b201c42SJohn Baldwin db_printf("\t%-10s ", ih->ih_name); 14008b201c42SJohn Baldwin switch (ih->ih_pri) { 14018b201c42SJohn Baldwin case PI_REALTIME: 14028b201c42SJohn Baldwin db_printf("CLK "); 14038b201c42SJohn Baldwin break; 14048b201c42SJohn Baldwin case PI_AV: 14058b201c42SJohn Baldwin db_printf("AV "); 14068b201c42SJohn Baldwin break; 14078b201c42SJohn Baldwin case PI_TTYHIGH: 14088b201c42SJohn Baldwin case PI_TTYLOW: 14098b201c42SJohn Baldwin db_printf("TTY "); 14108b201c42SJohn Baldwin break; 14118b201c42SJohn Baldwin case PI_TAPE: 14128b201c42SJohn Baldwin db_printf("TAPE"); 14138b201c42SJohn Baldwin break; 14148b201c42SJohn Baldwin case PI_NET: 14158b201c42SJohn Baldwin db_printf("NET "); 14168b201c42SJohn Baldwin break; 14178b201c42SJohn Baldwin case PI_DISK: 14188b201c42SJohn Baldwin case PI_DISKLOW: 14198b201c42SJohn Baldwin db_printf("DISK"); 14208b201c42SJohn Baldwin break; 14218b201c42SJohn Baldwin case PI_DULL: 14228b201c42SJohn Baldwin db_printf("DULL"); 14238b201c42SJohn Baldwin break; 14248b201c42SJohn Baldwin default: 14258b201c42SJohn Baldwin if (ih->ih_pri >= PI_SOFT) 14268b201c42SJohn Baldwin db_printf("SWI "); 14278b201c42SJohn Baldwin else 14288b201c42SJohn Baldwin db_printf("%4u", ih->ih_pri); 14298b201c42SJohn Baldwin break; 14308b201c42SJohn Baldwin } 14318b201c42SJohn Baldwin db_printf(" "); 14328b201c42SJohn Baldwin db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC); 14338b201c42SJohn Baldwin db_printf("(%p)", ih->ih_argument); 14348b201c42SJohn Baldwin if (ih->ih_need || 1435ef544f63SPaolo Pisati (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD | 14368b201c42SJohn Baldwin IH_MPSAFE)) != 0) { 14378b201c42SJohn Baldwin db_printf(" {"); 14388b201c42SJohn Baldwin comma = 0; 14398b201c42SJohn Baldwin if (ih->ih_flags & IH_EXCLUSIVE) { 14408b201c42SJohn Baldwin if (comma) 14418b201c42SJohn Baldwin db_printf(", "); 14428b201c42SJohn Baldwin db_printf("EXCL"); 14438b201c42SJohn Baldwin comma = 1; 14448b201c42SJohn Baldwin } 14458b201c42SJohn Baldwin if (ih->ih_flags & IH_ENTROPY) { 14468b201c42SJohn Baldwin if (comma) 14478b201c42SJohn Baldwin db_printf(", "); 14488b201c42SJohn Baldwin db_printf("ENTROPY"); 14498b201c42SJohn Baldwin comma = 1; 14508b201c42SJohn Baldwin } 14518b201c42SJohn Baldwin if (ih->ih_flags & IH_DEAD) { 14528b201c42SJohn Baldwin if (comma) 14538b201c42SJohn Baldwin db_printf(", "); 14548b201c42SJohn Baldwin db_printf("DEAD"); 14558b201c42SJohn Baldwin comma = 1; 14568b201c42SJohn Baldwin } 14578b201c42SJohn Baldwin if (ih->ih_flags & IH_MPSAFE) { 14588b201c42SJohn Baldwin if (comma) 14598b201c42SJohn Baldwin db_printf(", "); 14608b201c42SJohn Baldwin db_printf("MPSAFE"); 14618b201c42SJohn Baldwin comma = 1; 14628b201c42SJohn Baldwin } 14638b201c42SJohn Baldwin if (ih->ih_need) { 14648b201c42SJohn Baldwin if (comma) 14658b201c42SJohn Baldwin db_printf(", "); 14668b201c42SJohn Baldwin db_printf("NEED"); 14678b201c42SJohn Baldwin } 14688b201c42SJohn Baldwin db_printf("}"); 14698b201c42SJohn Baldwin } 14708b201c42SJohn Baldwin db_printf("\n"); 14718b201c42SJohn Baldwin } 14728b201c42SJohn Baldwin 14738b201c42SJohn Baldwin /* 1474e0f66ef8SJohn Baldwin * Dump details about a event. 14758b201c42SJohn Baldwin */ 14768b201c42SJohn Baldwin void 1477e0f66ef8SJohn Baldwin db_dump_intr_event(struct intr_event *ie, int handlers) 14788b201c42SJohn Baldwin { 1479e0f66ef8SJohn Baldwin struct intr_handler *ih; 1480e0f66ef8SJohn Baldwin struct intr_thread *it; 14818b201c42SJohn Baldwin int comma; 14828b201c42SJohn Baldwin 1483e0f66ef8SJohn Baldwin db_printf("%s ", ie->ie_fullname); 1484e0f66ef8SJohn Baldwin it = ie->ie_thread; 1485e0f66ef8SJohn Baldwin if (it != NULL) 1486e0f66ef8SJohn Baldwin db_printf("(pid %d)", it->it_thread->td_proc->p_pid); 1487e0f66ef8SJohn Baldwin else 1488e0f66ef8SJohn Baldwin db_printf("(no thread)"); 1489eaf86d16SJohn Baldwin if (ie->ie_cpu != NOCPU) 1490eaf86d16SJohn Baldwin db_printf(" (CPU %d)", ie->ie_cpu); 1491e0f66ef8SJohn Baldwin if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 || 1492e0f66ef8SJohn Baldwin (it != NULL && it->it_need)) { 14938b201c42SJohn Baldwin db_printf(" {"); 14948b201c42SJohn Baldwin comma = 0; 1495e0f66ef8SJohn Baldwin if (ie->ie_flags & IE_SOFT) { 14968b201c42SJohn Baldwin db_printf("SOFT"); 14978b201c42SJohn Baldwin comma = 1; 14988b201c42SJohn Baldwin } 1499e0f66ef8SJohn Baldwin if (ie->ie_flags & IE_ENTROPY) { 15008b201c42SJohn Baldwin if (comma) 15018b201c42SJohn Baldwin db_printf(", "); 15028b201c42SJohn Baldwin db_printf("ENTROPY"); 15038b201c42SJohn Baldwin comma = 1; 15048b201c42SJohn Baldwin } 1505e0f66ef8SJohn Baldwin if (ie->ie_flags & IE_ADDING_THREAD) { 15068b201c42SJohn Baldwin if (comma) 15078b201c42SJohn Baldwin db_printf(", "); 1508e0f66ef8SJohn Baldwin db_printf("ADDING_THREAD"); 15098b201c42SJohn Baldwin comma = 1; 15108b201c42SJohn Baldwin } 1511e0f66ef8SJohn Baldwin if (it != NULL && it->it_need) { 15128b201c42SJohn Baldwin if (comma) 15138b201c42SJohn Baldwin db_printf(", "); 15148b201c42SJohn Baldwin db_printf("NEED"); 15158b201c42SJohn Baldwin } 15168b201c42SJohn Baldwin db_printf("}"); 15178b201c42SJohn Baldwin } 15188b201c42SJohn Baldwin db_printf("\n"); 15198b201c42SJohn Baldwin 15208b201c42SJohn Baldwin if (handlers) 1521e0f66ef8SJohn Baldwin TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 15228b201c42SJohn Baldwin db_dump_intrhand(ih); 15238b201c42SJohn Baldwin } 1524e0f66ef8SJohn Baldwin 1525e0f66ef8SJohn Baldwin /* 1526e0f66ef8SJohn Baldwin * Dump data about interrupt handlers 1527e0f66ef8SJohn Baldwin */ 1528e0f66ef8SJohn Baldwin DB_SHOW_COMMAND(intr, db_show_intr) 1529e0f66ef8SJohn Baldwin { 1530e0f66ef8SJohn Baldwin struct intr_event *ie; 153119e9205aSJohn Baldwin int all, verbose; 1532e0f66ef8SJohn Baldwin 1533e0f66ef8SJohn Baldwin verbose = index(modif, 'v') != NULL; 1534e0f66ef8SJohn Baldwin all = index(modif, 'a') != NULL; 1535e0f66ef8SJohn Baldwin TAILQ_FOREACH(ie, &event_list, ie_list) { 1536e0f66ef8SJohn Baldwin if (!all && TAILQ_EMPTY(&ie->ie_handlers)) 1537e0f66ef8SJohn Baldwin continue; 1538e0f66ef8SJohn Baldwin db_dump_intr_event(ie, verbose); 153919e9205aSJohn Baldwin if (db_pager_quit) 154019e9205aSJohn Baldwin break; 1541e0f66ef8SJohn Baldwin } 1542e0f66ef8SJohn Baldwin } 15438b201c42SJohn Baldwin #endif /* DDB */ 15448b201c42SJohn Baldwin 1545b4151f71SJohn Baldwin /* 15468088699fSJohn Baldwin * Start standard software interrupt threads 15471931cf94SJohn Baldwin */ 15481931cf94SJohn Baldwin static void 1549b4151f71SJohn Baldwin start_softintr(void *dummy) 15501931cf94SJohn Baldwin { 15518804bf6bSJohn Baldwin struct proc *p; 1552b4151f71SJohn Baldwin 1553e0f66ef8SJohn Baldwin if (swi_add(&clk_intr_event, "clock", softclock, NULL, SWI_CLOCK, 1554b4151f71SJohn Baldwin INTR_MPSAFE, &softclock_ih) || 155579501b66SScott Long swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih)) 1556b4151f71SJohn Baldwin panic("died while creating standard software ithreads"); 15573e5da754SJohn Baldwin 1558e0f66ef8SJohn Baldwin p = clk_intr_event->ie_thread->it_thread->td_proc; 15598804bf6bSJohn Baldwin PROC_LOCK(p); 15608804bf6bSJohn Baldwin p->p_flag |= P_NOLOAD; 15618804bf6bSJohn Baldwin PROC_UNLOCK(p); 15621931cf94SJohn Baldwin } 1563237fdd78SRobert Watson SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, 1564237fdd78SRobert Watson NULL); 15651931cf94SJohn Baldwin 1566d279178dSThomas Moestl /* 1567d279178dSThomas Moestl * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 1568d279178dSThomas Moestl * The data for this machine dependent, and the declarations are in machine 1569d279178dSThomas Moestl * dependent code. The layout of intrnames and intrcnt however is machine 1570d279178dSThomas Moestl * independent. 1571d279178dSThomas Moestl * 1572d279178dSThomas Moestl * We do not know the length of intrcnt and intrnames at compile time, so 1573d279178dSThomas Moestl * calculate things at run time. 1574d279178dSThomas Moestl */ 1575d279178dSThomas Moestl static int 1576d279178dSThomas Moestl sysctl_intrnames(SYSCTL_HANDLER_ARGS) 1577d279178dSThomas Moestl { 1578d279178dSThomas Moestl return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames, 1579d279178dSThomas Moestl req)); 1580d279178dSThomas Moestl } 1581d279178dSThomas Moestl 1582d279178dSThomas Moestl SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, 1583d279178dSThomas Moestl NULL, 0, sysctl_intrnames, "", "Interrupt Names"); 1584d279178dSThomas Moestl 1585d279178dSThomas Moestl static int 1586d279178dSThomas Moestl sysctl_intrcnt(SYSCTL_HANDLER_ARGS) 1587d279178dSThomas Moestl { 1588d279178dSThomas Moestl return (sysctl_handle_opaque(oidp, intrcnt, 1589d279178dSThomas Moestl (char *)eintrcnt - (char *)intrcnt, req)); 1590d279178dSThomas Moestl } 1591d279178dSThomas Moestl 1592d279178dSThomas Moestl SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, 1593d279178dSThomas Moestl NULL, 0, sysctl_intrcnt, "", "Interrupt Counts"); 15948b201c42SJohn Baldwin 15958b201c42SJohn Baldwin #ifdef DDB 15968b201c42SJohn Baldwin /* 15978b201c42SJohn Baldwin * DDB command to dump the interrupt statistics. 15988b201c42SJohn Baldwin */ 15998b201c42SJohn Baldwin DB_SHOW_COMMAND(intrcnt, db_show_intrcnt) 16008b201c42SJohn Baldwin { 16018b201c42SJohn Baldwin u_long *i; 16028b201c42SJohn Baldwin char *cp; 16038b201c42SJohn Baldwin 16048b201c42SJohn Baldwin cp = intrnames; 160519e9205aSJohn Baldwin for (i = intrcnt; i != eintrcnt && !db_pager_quit; i++) { 16068b201c42SJohn Baldwin if (*cp == '\0') 16078b201c42SJohn Baldwin break; 16088b201c42SJohn Baldwin if (*i != 0) 16098b201c42SJohn Baldwin db_printf("%s\t%lu\n", cp, *i); 16108b201c42SJohn Baldwin cp += strlen(cp) + 1; 16118b201c42SJohn Baldwin } 16128b201c42SJohn Baldwin } 16138b201c42SJohn Baldwin #endif 1614