19454b2d8SWarner Losh /*- 2425f9fdaSStefan Eßer * Copyright (c) 1997, Stefan Esser <se@freebsd.org> 3425f9fdaSStefan Eßer * All rights reserved. 4425f9fdaSStefan Eßer * 5425f9fdaSStefan Eßer * Redistribution and use in source and binary forms, with or without 6425f9fdaSStefan Eßer * modification, are permitted provided that the following conditions 7425f9fdaSStefan Eßer * are met: 8425f9fdaSStefan Eßer * 1. Redistributions of source code must retain the above copyright 9425f9fdaSStefan Eßer * notice unmodified, this list of conditions, and the following 10425f9fdaSStefan Eßer * disclaimer. 11425f9fdaSStefan Eßer * 2. Redistributions in binary form must reproduce the above copyright 12425f9fdaSStefan Eßer * notice, this list of conditions and the following disclaimer in the 13425f9fdaSStefan Eßer * documentation and/or other materials provided with the distribution. 14425f9fdaSStefan Eßer * 15425f9fdaSStefan Eßer * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16425f9fdaSStefan Eßer * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17425f9fdaSStefan Eßer * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18425f9fdaSStefan Eßer * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19425f9fdaSStefan Eßer * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20425f9fdaSStefan Eßer * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21425f9fdaSStefan Eßer * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22425f9fdaSStefan Eßer * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23425f9fdaSStefan Eßer * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24425f9fdaSStefan Eßer * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25425f9fdaSStefan Eßer */ 26425f9fdaSStefan Eßer 27677b542eSDavid E. O'Brien #include <sys/cdefs.h> 28677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 293900ddb2SDoug Rabson 308b201c42SJohn Baldwin #include "opt_ddb.h" 318b201c42SJohn Baldwin 321c5bb3eaSPeter Wemm #include <sys/param.h> 339a94c9c5SJohn Baldwin #include <sys/bus.h> 34c11110eaSAlfred Perlstein #include <sys/conf.h> 359a94c9c5SJohn Baldwin #include <sys/rtprio.h> 36425f9fdaSStefan Eßer #include <sys/systm.h> 3768352337SDoug Rabson #include <sys/interrupt.h> 381931cf94SJohn Baldwin #include <sys/kernel.h> 391931cf94SJohn Baldwin #include <sys/kthread.h> 401931cf94SJohn Baldwin #include <sys/ktr.h> 4105b2c96fSBruce Evans #include <sys/limits.h> 42f34fa851SJohn Baldwin #include <sys/lock.h> 431931cf94SJohn Baldwin #include <sys/malloc.h> 4435e0e5b3SJohn Baldwin #include <sys/mutex.h> 451931cf94SJohn Baldwin #include <sys/proc.h> 463e5da754SJohn Baldwin #include <sys/random.h> 47b4151f71SJohn Baldwin #include <sys/resourcevar.h> 4863710c4dSJohn Baldwin #include <sys/sched.h> 49eaf86d16SJohn Baldwin #include <sys/smp.h> 50d279178dSThomas Moestl #include <sys/sysctl.h> 511931cf94SJohn Baldwin #include <sys/unistd.h> 521931cf94SJohn Baldwin #include <sys/vmmeter.h> 531931cf94SJohn Baldwin #include <machine/atomic.h> 541931cf94SJohn Baldwin #include <machine/cpu.h> 558088699fSJohn Baldwin #include <machine/md_var.h> 56b4151f71SJohn Baldwin #include <machine/stdarg.h> 578b201c42SJohn Baldwin #ifdef DDB 588b201c42SJohn Baldwin #include <ddb/ddb.h> 598b201c42SJohn Baldwin #include <ddb/db_sym.h> 608b201c42SJohn Baldwin #endif 61425f9fdaSStefan Eßer 62e0f66ef8SJohn Baldwin /* 63e0f66ef8SJohn Baldwin * Describe an interrupt thread. There is one of these per interrupt event. 64e0f66ef8SJohn Baldwin */ 65e0f66ef8SJohn Baldwin struct intr_thread { 66e0f66ef8SJohn Baldwin struct intr_event *it_event; 67e0f66ef8SJohn Baldwin struct thread *it_thread; /* Kernel thread. */ 68e0f66ef8SJohn Baldwin int it_flags; /* (j) IT_* flags. */ 69e0f66ef8SJohn Baldwin int it_need; /* Needs service. */ 703e5da754SJohn Baldwin }; 713e5da754SJohn Baldwin 72e0f66ef8SJohn Baldwin /* Interrupt thread flags kept in it_flags */ 73e0f66ef8SJohn Baldwin #define IT_DEAD 0x000001 /* Thread is waiting to exit. */ 74e0f66ef8SJohn Baldwin 75e0f66ef8SJohn Baldwin struct intr_entropy { 76e0f66ef8SJohn Baldwin struct thread *td; 77e0f66ef8SJohn Baldwin uintptr_t event; 78e0f66ef8SJohn Baldwin }; 79e0f66ef8SJohn Baldwin 80e0f66ef8SJohn Baldwin struct intr_event *clk_intr_event; 81e0f66ef8SJohn Baldwin struct intr_event *tty_intr_event; 827b1fe905SBruce Evans void *vm_ih; 837ab24ea3SJulian Elischer struct proc *intrproc; 841931cf94SJohn Baldwin 85b4151f71SJohn Baldwin static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); 86b4151f71SJohn Baldwin 870ae62c18SNate Lawson static int intr_storm_threshold = 1000; 887870c3c6SJohn Baldwin TUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold); 897870c3c6SJohn Baldwin SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW, 907870c3c6SJohn Baldwin &intr_storm_threshold, 0, 917b1fe905SBruce Evans "Number of consecutive interrupts before storm protection is enabled"); 92e0f66ef8SJohn Baldwin static TAILQ_HEAD(, intr_event) event_list = 93e0f66ef8SJohn Baldwin TAILQ_HEAD_INITIALIZER(event_list); 947b1fe905SBruce Evans 95e0f66ef8SJohn Baldwin static void intr_event_update(struct intr_event *ie); 96bafe5a31SPaolo Pisati #ifdef INTR_FILTER 97bafe5a31SPaolo Pisati static struct intr_thread *ithread_create(const char *name, 98bafe5a31SPaolo Pisati struct intr_handler *ih); 99bafe5a31SPaolo Pisati #else 100e0f66ef8SJohn Baldwin static struct intr_thread *ithread_create(const char *name); 101bafe5a31SPaolo Pisati #endif 102e0f66ef8SJohn Baldwin static void ithread_destroy(struct intr_thread *ithread); 103bafe5a31SPaolo Pisati static void ithread_execute_handlers(struct proc *p, 104bafe5a31SPaolo Pisati struct intr_event *ie); 105bafe5a31SPaolo Pisati #ifdef INTR_FILTER 106bafe5a31SPaolo Pisati static void priv_ithread_execute_handler(struct proc *p, 107bafe5a31SPaolo Pisati struct intr_handler *ih); 108bafe5a31SPaolo Pisati #endif 1097b1fe905SBruce Evans static void ithread_loop(void *); 110e0f66ef8SJohn Baldwin static void ithread_update(struct intr_thread *ithd); 1117b1fe905SBruce Evans static void start_softintr(void *); 1127870c3c6SJohn Baldwin 113bc17acb2SJohn Baldwin /* Map an interrupt type to an ithread priority. */ 114b4151f71SJohn Baldwin u_char 115e0f66ef8SJohn Baldwin intr_priority(enum intr_type flags) 1169a94c9c5SJohn Baldwin { 117b4151f71SJohn Baldwin u_char pri; 1189a94c9c5SJohn Baldwin 119b4151f71SJohn Baldwin flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET | 1205a280d9cSPeter Wemm INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV); 1219a94c9c5SJohn Baldwin switch (flags) { 122b4151f71SJohn Baldwin case INTR_TYPE_TTY: 1239a94c9c5SJohn Baldwin pri = PI_TTYLOW; 1249a94c9c5SJohn Baldwin break; 1259a94c9c5SJohn Baldwin case INTR_TYPE_BIO: 1269a94c9c5SJohn Baldwin /* 1279a94c9c5SJohn Baldwin * XXX We need to refine this. BSD/OS distinguishes 1289a94c9c5SJohn Baldwin * between tape and disk priorities. 1299a94c9c5SJohn Baldwin */ 1309a94c9c5SJohn Baldwin pri = PI_DISK; 1319a94c9c5SJohn Baldwin break; 1329a94c9c5SJohn Baldwin case INTR_TYPE_NET: 1339a94c9c5SJohn Baldwin pri = PI_NET; 1349a94c9c5SJohn Baldwin break; 1359a94c9c5SJohn Baldwin case INTR_TYPE_CAM: 1369a94c9c5SJohn Baldwin pri = PI_DISK; /* XXX or PI_CAM? */ 1379a94c9c5SJohn Baldwin break; 1385a280d9cSPeter Wemm case INTR_TYPE_AV: /* Audio/video */ 1395a280d9cSPeter Wemm pri = PI_AV; 1405a280d9cSPeter Wemm break; 141b4151f71SJohn Baldwin case INTR_TYPE_CLK: 142b4151f71SJohn Baldwin pri = PI_REALTIME; 143b4151f71SJohn Baldwin break; 1449a94c9c5SJohn Baldwin case INTR_TYPE_MISC: 1459a94c9c5SJohn Baldwin pri = PI_DULL; /* don't care */ 1469a94c9c5SJohn Baldwin break; 1479a94c9c5SJohn Baldwin default: 148b4151f71SJohn Baldwin /* We didn't specify an interrupt level. */ 149e0f66ef8SJohn Baldwin panic("intr_priority: no interrupt type in flags"); 1509a94c9c5SJohn Baldwin } 1519a94c9c5SJohn Baldwin 1529a94c9c5SJohn Baldwin return pri; 1539a94c9c5SJohn Baldwin } 1549a94c9c5SJohn Baldwin 155b4151f71SJohn Baldwin /* 156e0f66ef8SJohn Baldwin * Update an ithread based on the associated intr_event. 157b4151f71SJohn Baldwin */ 158b4151f71SJohn Baldwin static void 159e0f66ef8SJohn Baldwin ithread_update(struct intr_thread *ithd) 160b4151f71SJohn Baldwin { 161e0f66ef8SJohn Baldwin struct intr_event *ie; 162b40ce416SJulian Elischer struct thread *td; 163e0f66ef8SJohn Baldwin u_char pri; 1648088699fSJohn Baldwin 165e0f66ef8SJohn Baldwin ie = ithd->it_event; 166e0f66ef8SJohn Baldwin td = ithd->it_thread; 167b4151f71SJohn Baldwin 168e0f66ef8SJohn Baldwin /* Determine the overall priority of this event. */ 169e0f66ef8SJohn Baldwin if (TAILQ_EMPTY(&ie->ie_handlers)) 170e0f66ef8SJohn Baldwin pri = PRI_MAX_ITHD; 171e0f66ef8SJohn Baldwin else 172e0f66ef8SJohn Baldwin pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri; 173e80fb434SRobert Drehmel 174e0f66ef8SJohn Baldwin /* Update name and priority. */ 1757ab24ea3SJulian Elischer strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name)); 176982d11f8SJeff Roberson thread_lock(td); 177e0f66ef8SJohn Baldwin sched_prio(td, pri); 178982d11f8SJeff Roberson thread_unlock(td); 179b4151f71SJohn Baldwin } 180e0f66ef8SJohn Baldwin 181e0f66ef8SJohn Baldwin /* 182e0f66ef8SJohn Baldwin * Regenerate the full name of an interrupt event and update its priority. 183e0f66ef8SJohn Baldwin */ 184e0f66ef8SJohn Baldwin static void 185e0f66ef8SJohn Baldwin intr_event_update(struct intr_event *ie) 186e0f66ef8SJohn Baldwin { 187e0f66ef8SJohn Baldwin struct intr_handler *ih; 188e0f66ef8SJohn Baldwin char *last; 189e0f66ef8SJohn Baldwin int missed, space; 190e0f66ef8SJohn Baldwin 191e0f66ef8SJohn Baldwin /* Start off with no entropy and just the name of the event. */ 192e0f66ef8SJohn Baldwin mtx_assert(&ie->ie_lock, MA_OWNED); 193e0f66ef8SJohn Baldwin strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 194e0f66ef8SJohn Baldwin ie->ie_flags &= ~IE_ENTROPY; 1950811d60aSJohn Baldwin missed = 0; 196e0f66ef8SJohn Baldwin space = 1; 197e0f66ef8SJohn Baldwin 198e0f66ef8SJohn Baldwin /* Run through all the handlers updating values. */ 199e0f66ef8SJohn Baldwin TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 200e0f66ef8SJohn Baldwin if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < 201e0f66ef8SJohn Baldwin sizeof(ie->ie_fullname)) { 202e0f66ef8SJohn Baldwin strcat(ie->ie_fullname, " "); 203e0f66ef8SJohn Baldwin strcat(ie->ie_fullname, ih->ih_name); 204e0f66ef8SJohn Baldwin space = 0; 2050811d60aSJohn Baldwin } else 2060811d60aSJohn Baldwin missed++; 2070811d60aSJohn Baldwin if (ih->ih_flags & IH_ENTROPY) 208e0f66ef8SJohn Baldwin ie->ie_flags |= IE_ENTROPY; 2090811d60aSJohn Baldwin } 210e0f66ef8SJohn Baldwin 211e0f66ef8SJohn Baldwin /* 212e0f66ef8SJohn Baldwin * If the handler names were too long, add +'s to indicate missing 213e0f66ef8SJohn Baldwin * names. If we run out of room and still have +'s to add, change 214e0f66ef8SJohn Baldwin * the last character from a + to a *. 215e0f66ef8SJohn Baldwin */ 216e0f66ef8SJohn Baldwin last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2]; 2170811d60aSJohn Baldwin while (missed-- > 0) { 218e0f66ef8SJohn Baldwin if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) { 219e0f66ef8SJohn Baldwin if (*last == '+') { 220e0f66ef8SJohn Baldwin *last = '*'; 221e0f66ef8SJohn Baldwin break; 222b4151f71SJohn Baldwin } else 223e0f66ef8SJohn Baldwin *last = '+'; 224e0f66ef8SJohn Baldwin } else if (space) { 225e0f66ef8SJohn Baldwin strcat(ie->ie_fullname, " +"); 226e0f66ef8SJohn Baldwin space = 0; 227e0f66ef8SJohn Baldwin } else 228e0f66ef8SJohn Baldwin strcat(ie->ie_fullname, "+"); 229b4151f71SJohn Baldwin } 230e0f66ef8SJohn Baldwin 231e0f66ef8SJohn Baldwin /* 232e0f66ef8SJohn Baldwin * If this event has an ithread, update it's priority and 233e0f66ef8SJohn Baldwin * name. 234e0f66ef8SJohn Baldwin */ 235e0f66ef8SJohn Baldwin if (ie->ie_thread != NULL) 236e0f66ef8SJohn Baldwin ithread_update(ie->ie_thread); 237e0f66ef8SJohn Baldwin CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname); 238b4151f71SJohn Baldwin } 239b4151f71SJohn Baldwin 240b4151f71SJohn Baldwin int 241e0f66ef8SJohn Baldwin intr_event_create(struct intr_event **event, void *source,int flags, 2426d2d1c04SJohn Baldwin void (*disable)(void *), void (*enable)(void *), void (*eoi)(void *), 243eaf86d16SJohn Baldwin int (*assign_cpu)(void *, u_char), const char *fmt, ...) 244bafe5a31SPaolo Pisati { 245bafe5a31SPaolo Pisati struct intr_event *ie; 246bafe5a31SPaolo Pisati va_list ap; 247bafe5a31SPaolo Pisati 248bafe5a31SPaolo Pisati /* The only valid flag during creation is IE_SOFT. */ 249bafe5a31SPaolo Pisati if ((flags & ~IE_SOFT) != 0) 250bafe5a31SPaolo Pisati return (EINVAL); 251bafe5a31SPaolo Pisati ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 252bafe5a31SPaolo Pisati ie->ie_source = source; 2536d2d1c04SJohn Baldwin ie->ie_disable = disable; 254bafe5a31SPaolo Pisati ie->ie_enable = enable; 255bafe5a31SPaolo Pisati ie->ie_eoi = eoi; 2566d2d1c04SJohn Baldwin ie->ie_assign_cpu = assign_cpu; 257bafe5a31SPaolo Pisati ie->ie_flags = flags; 258eaf86d16SJohn Baldwin ie->ie_cpu = NOCPU; 259bafe5a31SPaolo Pisati TAILQ_INIT(&ie->ie_handlers); 260bafe5a31SPaolo Pisati mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 261bafe5a31SPaolo Pisati 262bafe5a31SPaolo Pisati va_start(ap, fmt); 263bafe5a31SPaolo Pisati vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 264bafe5a31SPaolo Pisati va_end(ap); 265bafe5a31SPaolo Pisati strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 266bafe5a31SPaolo Pisati mtx_pool_lock(mtxpool_sleep, &event_list); 267bafe5a31SPaolo Pisati TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 268bafe5a31SPaolo Pisati mtx_pool_unlock(mtxpool_sleep, &event_list); 269bafe5a31SPaolo Pisati if (event != NULL) 270bafe5a31SPaolo Pisati *event = ie; 271bafe5a31SPaolo Pisati CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 272bafe5a31SPaolo Pisati return (0); 273bafe5a31SPaolo Pisati } 274b4151f71SJohn Baldwin 275eaf86d16SJohn Baldwin /* 276eaf86d16SJohn Baldwin * Bind an interrupt event to the specified CPU. Note that not all 277eaf86d16SJohn Baldwin * platforms support binding an interrupt to a CPU. For those 278eaf86d16SJohn Baldwin * platforms this request will fail. For supported platforms, any 279eaf86d16SJohn Baldwin * associated ithreads as well as the primary interrupt context will 280eaf86d16SJohn Baldwin * be bound to the specificed CPU. Using a cpu id of NOCPU unbinds 281eaf86d16SJohn Baldwin * the interrupt event. 282eaf86d16SJohn Baldwin */ 283eaf86d16SJohn Baldwin int 284eaf86d16SJohn Baldwin intr_event_bind(struct intr_event *ie, u_char cpu) 285eaf86d16SJohn Baldwin { 286eaf86d16SJohn Baldwin struct thread *td; 287eaf86d16SJohn Baldwin int error; 288eaf86d16SJohn Baldwin 289eaf86d16SJohn Baldwin /* Need a CPU to bind to. */ 290eaf86d16SJohn Baldwin if (cpu != NOCPU && CPU_ABSENT(cpu)) 291eaf86d16SJohn Baldwin return (EINVAL); 292eaf86d16SJohn Baldwin 293eaf86d16SJohn Baldwin if (ie->ie_assign_cpu == NULL) 294eaf86d16SJohn Baldwin return (EOPNOTSUPP); 295eaf86d16SJohn Baldwin 296eaf86d16SJohn Baldwin /* Don't allow a bind request if the interrupt is already bound. */ 297eaf86d16SJohn Baldwin mtx_lock(&ie->ie_lock); 298eaf86d16SJohn Baldwin if (ie->ie_cpu != NOCPU && cpu != NOCPU) { 299eaf86d16SJohn Baldwin mtx_unlock(&ie->ie_lock); 300eaf86d16SJohn Baldwin return (EBUSY); 301eaf86d16SJohn Baldwin } 302eaf86d16SJohn Baldwin mtx_unlock(&ie->ie_lock); 303eaf86d16SJohn Baldwin 304eaf86d16SJohn Baldwin error = ie->ie_assign_cpu(ie->ie_source, cpu); 305eaf86d16SJohn Baldwin if (error) 306eaf86d16SJohn Baldwin return (error); 307eaf86d16SJohn Baldwin mtx_lock(&ie->ie_lock); 308eaf86d16SJohn Baldwin if (ie->ie_thread != NULL) 309eaf86d16SJohn Baldwin td = ie->ie_thread->it_thread; 310eaf86d16SJohn Baldwin else 311eaf86d16SJohn Baldwin td = NULL; 312eaf86d16SJohn Baldwin if (td != NULL) 313eaf86d16SJohn Baldwin thread_lock(td); 314eaf86d16SJohn Baldwin ie->ie_cpu = cpu; 315eaf86d16SJohn Baldwin if (td != NULL) 316eaf86d16SJohn Baldwin thread_unlock(td); 317eaf86d16SJohn Baldwin mtx_unlock(&ie->ie_lock); 318eaf86d16SJohn Baldwin return (0); 319eaf86d16SJohn Baldwin } 320eaf86d16SJohn Baldwin 321b4151f71SJohn Baldwin int 322e0f66ef8SJohn Baldwin intr_event_destroy(struct intr_event *ie) 323b4151f71SJohn Baldwin { 324b4151f71SJohn Baldwin 325e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 326e0f66ef8SJohn Baldwin if (!TAILQ_EMPTY(&ie->ie_handlers)) { 327e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 328e0f66ef8SJohn Baldwin return (EBUSY); 3294d29cb2dSJohn Baldwin } 330e0f66ef8SJohn Baldwin mtx_pool_lock(mtxpool_sleep, &event_list); 331e0f66ef8SJohn Baldwin TAILQ_REMOVE(&event_list, ie, ie_list); 332e0f66ef8SJohn Baldwin mtx_pool_unlock(mtxpool_sleep, &event_list); 3339477358dSJohn Baldwin #ifndef notyet 3349477358dSJohn Baldwin if (ie->ie_thread != NULL) { 3359477358dSJohn Baldwin ithread_destroy(ie->ie_thread); 3369477358dSJohn Baldwin ie->ie_thread = NULL; 3379477358dSJohn Baldwin } 3389477358dSJohn Baldwin #endif 339e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 340e0f66ef8SJohn Baldwin mtx_destroy(&ie->ie_lock); 341e0f66ef8SJohn Baldwin free(ie, M_ITHREAD); 342e0f66ef8SJohn Baldwin return (0); 343e0f66ef8SJohn Baldwin } 344e0f66ef8SJohn Baldwin 345bafe5a31SPaolo Pisati #ifndef INTR_FILTER 346e0f66ef8SJohn Baldwin static struct intr_thread * 347e0f66ef8SJohn Baldwin ithread_create(const char *name) 348e0f66ef8SJohn Baldwin { 349e0f66ef8SJohn Baldwin struct intr_thread *ithd; 350e0f66ef8SJohn Baldwin struct thread *td; 351e0f66ef8SJohn Baldwin int error; 352e0f66ef8SJohn Baldwin 353e0f66ef8SJohn Baldwin ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 354e0f66ef8SJohn Baldwin 3557ab24ea3SJulian Elischer error = kproc_kthread_add(ithread_loop, ithd, &intrproc, 3567ab24ea3SJulian Elischer &td, RFSTOPPED | RFHIGHPID, 3579ef95d01SJulian Elischer 0, "intr", "%s", name); 358e0f66ef8SJohn Baldwin if (error) 3593745c395SJulian Elischer panic("kproc_create() failed with %d", error); 360982d11f8SJeff Roberson thread_lock(td); 361ad1e7d28SJulian Elischer sched_class(td, PRI_ITHD); 362e0f66ef8SJohn Baldwin TD_SET_IWAIT(td); 363982d11f8SJeff Roberson thread_unlock(td); 364e0f66ef8SJohn Baldwin td->td_pflags |= TDP_ITHREAD; 365e0f66ef8SJohn Baldwin ithd->it_thread = td; 366e0f66ef8SJohn Baldwin CTR2(KTR_INTR, "%s: created %s", __func__, name); 367e0f66ef8SJohn Baldwin return (ithd); 368e0f66ef8SJohn Baldwin } 369bafe5a31SPaolo Pisati #else 370bafe5a31SPaolo Pisati static struct intr_thread * 371bafe5a31SPaolo Pisati ithread_create(const char *name, struct intr_handler *ih) 372bafe5a31SPaolo Pisati { 373bafe5a31SPaolo Pisati struct intr_thread *ithd; 374bafe5a31SPaolo Pisati struct thread *td; 375bafe5a31SPaolo Pisati int error; 376bafe5a31SPaolo Pisati 377bafe5a31SPaolo Pisati ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 378bafe5a31SPaolo Pisati 379539976ffSJulian Elischer error = kproc_kthread_add(ithread_loop, ih, &intrproc, 3807ab24ea3SJulian Elischer &td, RFSTOPPED | RFHIGHPID, 3819ef95d01SJulian Elischer 0, "intr", "%s", name); 382bafe5a31SPaolo Pisati if (error) 3833745c395SJulian Elischer panic("kproc_create() failed with %d", error); 384982d11f8SJeff Roberson thread_lock(td); 385bafe5a31SPaolo Pisati sched_class(td, PRI_ITHD); 386bafe5a31SPaolo Pisati TD_SET_IWAIT(td); 387982d11f8SJeff Roberson thread_unlock(td); 388bafe5a31SPaolo Pisati td->td_pflags |= TDP_ITHREAD; 389bafe5a31SPaolo Pisati ithd->it_thread = td; 390bafe5a31SPaolo Pisati CTR2(KTR_INTR, "%s: created %s", __func__, name); 391bafe5a31SPaolo Pisati return (ithd); 392bafe5a31SPaolo Pisati } 393bafe5a31SPaolo Pisati #endif 394e0f66ef8SJohn Baldwin 395e0f66ef8SJohn Baldwin static void 396e0f66ef8SJohn Baldwin ithread_destroy(struct intr_thread *ithread) 397e0f66ef8SJohn Baldwin { 398e0f66ef8SJohn Baldwin struct thread *td; 399e0f66ef8SJohn Baldwin 400bb141be1SScott Long CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name); 401e0f66ef8SJohn Baldwin td = ithread->it_thread; 402982d11f8SJeff Roberson thread_lock(td); 403e0f66ef8SJohn Baldwin ithread->it_flags |= IT_DEAD; 40471fad9fdSJulian Elischer if (TD_AWAITING_INTR(td)) { 40571fad9fdSJulian Elischer TD_CLR_IWAIT(td); 406f0393f06SJeff Roberson sched_add(td, SRQ_INTR); 407b4151f71SJohn Baldwin } 408982d11f8SJeff Roberson thread_unlock(td); 409b4151f71SJohn Baldwin } 410b4151f71SJohn Baldwin 411bafe5a31SPaolo Pisati #ifndef INTR_FILTER 412b4151f71SJohn Baldwin int 413e0f66ef8SJohn Baldwin intr_event_add_handler(struct intr_event *ie, const char *name, 414ef544f63SPaolo Pisati driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 415ef544f63SPaolo Pisati enum intr_type flags, void **cookiep) 416b4151f71SJohn Baldwin { 417e0f66ef8SJohn Baldwin struct intr_handler *ih, *temp_ih; 418e0f66ef8SJohn Baldwin struct intr_thread *it; 419b4151f71SJohn Baldwin 420ef544f63SPaolo Pisati if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 421b4151f71SJohn Baldwin return (EINVAL); 422b4151f71SJohn Baldwin 423e0f66ef8SJohn Baldwin /* Allocate and populate an interrupt handler structure. */ 424e0f66ef8SJohn Baldwin ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 425ef544f63SPaolo Pisati ih->ih_filter = filter; 426b4151f71SJohn Baldwin ih->ih_handler = handler; 427b4151f71SJohn Baldwin ih->ih_argument = arg; 428b4151f71SJohn Baldwin ih->ih_name = name; 429e0f66ef8SJohn Baldwin ih->ih_event = ie; 430b4151f71SJohn Baldwin ih->ih_pri = pri; 431ef544f63SPaolo Pisati if (flags & INTR_EXCL) 432b4151f71SJohn Baldwin ih->ih_flags = IH_EXCLUSIVE; 433b4151f71SJohn Baldwin if (flags & INTR_MPSAFE) 434b4151f71SJohn Baldwin ih->ih_flags |= IH_MPSAFE; 435b4151f71SJohn Baldwin if (flags & INTR_ENTROPY) 436b4151f71SJohn Baldwin ih->ih_flags |= IH_ENTROPY; 437b4151f71SJohn Baldwin 438e0f66ef8SJohn Baldwin /* We can only have one exclusive handler in a event. */ 439e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 440e0f66ef8SJohn Baldwin if (!TAILQ_EMPTY(&ie->ie_handlers)) { 441e0f66ef8SJohn Baldwin if ((flags & INTR_EXCL) || 442e0f66ef8SJohn Baldwin (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 443e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 444b4151f71SJohn Baldwin free(ih, M_ITHREAD); 445b4151f71SJohn Baldwin return (EINVAL); 446b4151f71SJohn Baldwin } 447e0f66ef8SJohn Baldwin } 448e0f66ef8SJohn Baldwin 449e0f66ef8SJohn Baldwin /* Add the new handler to the event in priority order. */ 450e0f66ef8SJohn Baldwin TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 451e0f66ef8SJohn Baldwin if (temp_ih->ih_pri > ih->ih_pri) 452e0f66ef8SJohn Baldwin break; 453e0f66ef8SJohn Baldwin } 454e0f66ef8SJohn Baldwin if (temp_ih == NULL) 455e0f66ef8SJohn Baldwin TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 456e0f66ef8SJohn Baldwin else 457e0f66ef8SJohn Baldwin TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 458e0f66ef8SJohn Baldwin intr_event_update(ie); 459e0f66ef8SJohn Baldwin 460e0f66ef8SJohn Baldwin /* Create a thread if we need one. */ 461ef544f63SPaolo Pisati while (ie->ie_thread == NULL && handler != NULL) { 462e0f66ef8SJohn Baldwin if (ie->ie_flags & IE_ADDING_THREAD) 4630f180a7cSJohn Baldwin msleep(ie, &ie->ie_lock, 0, "ithread", 0); 464e0f66ef8SJohn Baldwin else { 465e0f66ef8SJohn Baldwin ie->ie_flags |= IE_ADDING_THREAD; 466e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 467e0f66ef8SJohn Baldwin it = ithread_create("intr: newborn"); 468e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 469e0f66ef8SJohn Baldwin ie->ie_flags &= ~IE_ADDING_THREAD; 470e0f66ef8SJohn Baldwin ie->ie_thread = it; 471e0f66ef8SJohn Baldwin it->it_event = ie; 472e0f66ef8SJohn Baldwin ithread_update(it); 473e0f66ef8SJohn Baldwin wakeup(ie); 474e0f66ef8SJohn Baldwin } 475e0f66ef8SJohn Baldwin } 476e0f66ef8SJohn Baldwin CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 477e0f66ef8SJohn Baldwin ie->ie_name); 478e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 479e0f66ef8SJohn Baldwin 480e0f66ef8SJohn Baldwin if (cookiep != NULL) 481e0f66ef8SJohn Baldwin *cookiep = ih; 482e0f66ef8SJohn Baldwin return (0); 483e0f66ef8SJohn Baldwin } 484bafe5a31SPaolo Pisati #else 485bafe5a31SPaolo Pisati int 486bafe5a31SPaolo Pisati intr_event_add_handler(struct intr_event *ie, const char *name, 487bafe5a31SPaolo Pisati driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 488bafe5a31SPaolo Pisati enum intr_type flags, void **cookiep) 489bafe5a31SPaolo Pisati { 490bafe5a31SPaolo Pisati struct intr_handler *ih, *temp_ih; 491bafe5a31SPaolo Pisati struct intr_thread *it; 492bafe5a31SPaolo Pisati 493bafe5a31SPaolo Pisati if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 494bafe5a31SPaolo Pisati return (EINVAL); 495bafe5a31SPaolo Pisati 496bafe5a31SPaolo Pisati /* Allocate and populate an interrupt handler structure. */ 497bafe5a31SPaolo Pisati ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 498bafe5a31SPaolo Pisati ih->ih_filter = filter; 499bafe5a31SPaolo Pisati ih->ih_handler = handler; 500bafe5a31SPaolo Pisati ih->ih_argument = arg; 501bafe5a31SPaolo Pisati ih->ih_name = name; 502bafe5a31SPaolo Pisati ih->ih_event = ie; 503bafe5a31SPaolo Pisati ih->ih_pri = pri; 504bafe5a31SPaolo Pisati if (flags & INTR_EXCL) 505bafe5a31SPaolo Pisati ih->ih_flags = IH_EXCLUSIVE; 506bafe5a31SPaolo Pisati if (flags & INTR_MPSAFE) 507bafe5a31SPaolo Pisati ih->ih_flags |= IH_MPSAFE; 508bafe5a31SPaolo Pisati if (flags & INTR_ENTROPY) 509bafe5a31SPaolo Pisati ih->ih_flags |= IH_ENTROPY; 510bafe5a31SPaolo Pisati 511bafe5a31SPaolo Pisati /* We can only have one exclusive handler in a event. */ 512bafe5a31SPaolo Pisati mtx_lock(&ie->ie_lock); 513bafe5a31SPaolo Pisati if (!TAILQ_EMPTY(&ie->ie_handlers)) { 514bafe5a31SPaolo Pisati if ((flags & INTR_EXCL) || 515bafe5a31SPaolo Pisati (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 516bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 517bafe5a31SPaolo Pisati free(ih, M_ITHREAD); 518bafe5a31SPaolo Pisati return (EINVAL); 519bafe5a31SPaolo Pisati } 520bafe5a31SPaolo Pisati } 521bafe5a31SPaolo Pisati 522bafe5a31SPaolo Pisati /* Add the new handler to the event in priority order. */ 523bafe5a31SPaolo Pisati TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 524bafe5a31SPaolo Pisati if (temp_ih->ih_pri > ih->ih_pri) 525bafe5a31SPaolo Pisati break; 526bafe5a31SPaolo Pisati } 527bafe5a31SPaolo Pisati if (temp_ih == NULL) 528bafe5a31SPaolo Pisati TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 529bafe5a31SPaolo Pisati else 530bafe5a31SPaolo Pisati TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 531bafe5a31SPaolo Pisati intr_event_update(ie); 532bafe5a31SPaolo Pisati 533bafe5a31SPaolo Pisati /* For filtered handlers, create a private ithread to run on. */ 534bafe5a31SPaolo Pisati if (filter != NULL && handler != NULL) { 535bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 536bafe5a31SPaolo Pisati it = ithread_create("intr: newborn", ih); 537bafe5a31SPaolo Pisati mtx_lock(&ie->ie_lock); 538bafe5a31SPaolo Pisati it->it_event = ie; 539bafe5a31SPaolo Pisati ih->ih_thread = it; 540bafe5a31SPaolo Pisati ithread_update(it); // XXX - do we really need this?!?!? 541bafe5a31SPaolo Pisati } else { /* Create the global per-event thread if we need one. */ 542bafe5a31SPaolo Pisati while (ie->ie_thread == NULL && handler != NULL) { 543bafe5a31SPaolo Pisati if (ie->ie_flags & IE_ADDING_THREAD) 544bafe5a31SPaolo Pisati msleep(ie, &ie->ie_lock, 0, "ithread", 0); 545bafe5a31SPaolo Pisati else { 546bafe5a31SPaolo Pisati ie->ie_flags |= IE_ADDING_THREAD; 547bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 548bafe5a31SPaolo Pisati it = ithread_create("intr: newborn", ih); 549bafe5a31SPaolo Pisati mtx_lock(&ie->ie_lock); 550bafe5a31SPaolo Pisati ie->ie_flags &= ~IE_ADDING_THREAD; 551bafe5a31SPaolo Pisati ie->ie_thread = it; 552bafe5a31SPaolo Pisati it->it_event = ie; 553bafe5a31SPaolo Pisati ithread_update(it); 554bafe5a31SPaolo Pisati wakeup(ie); 555bafe5a31SPaolo Pisati } 556bafe5a31SPaolo Pisati } 557bafe5a31SPaolo Pisati } 558bafe5a31SPaolo Pisati CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 559bafe5a31SPaolo Pisati ie->ie_name); 560bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 561bafe5a31SPaolo Pisati 562bafe5a31SPaolo Pisati if (cookiep != NULL) 563bafe5a31SPaolo Pisati *cookiep = ih; 564bafe5a31SPaolo Pisati return (0); 565bafe5a31SPaolo Pisati } 566bafe5a31SPaolo Pisati #endif 567b4151f71SJohn Baldwin 568c3045318SJohn Baldwin /* 569c3045318SJohn Baldwin * Return the ie_source field from the intr_event an intr_handler is 570c3045318SJohn Baldwin * associated with. 571c3045318SJohn Baldwin */ 572c3045318SJohn Baldwin void * 573c3045318SJohn Baldwin intr_handler_source(void *cookie) 574c3045318SJohn Baldwin { 575c3045318SJohn Baldwin struct intr_handler *ih; 576c3045318SJohn Baldwin struct intr_event *ie; 577c3045318SJohn Baldwin 578c3045318SJohn Baldwin ih = (struct intr_handler *)cookie; 579c3045318SJohn Baldwin if (ih == NULL) 580c3045318SJohn Baldwin return (NULL); 581c3045318SJohn Baldwin ie = ih->ih_event; 582c3045318SJohn Baldwin KASSERT(ie != NULL, 583c3045318SJohn Baldwin ("interrupt handler \"%s\" has a NULL interrupt event", 584c3045318SJohn Baldwin ih->ih_name)); 585c3045318SJohn Baldwin return (ie->ie_source); 586c3045318SJohn Baldwin } 587c3045318SJohn Baldwin 588bafe5a31SPaolo Pisati #ifndef INTR_FILTER 589b4151f71SJohn Baldwin int 590e0f66ef8SJohn Baldwin intr_event_remove_handler(void *cookie) 591b4151f71SJohn Baldwin { 592e0f66ef8SJohn Baldwin struct intr_handler *handler = (struct intr_handler *)cookie; 593e0f66ef8SJohn Baldwin struct intr_event *ie; 594b4151f71SJohn Baldwin #ifdef INVARIANTS 595e0f66ef8SJohn Baldwin struct intr_handler *ih; 596e0f66ef8SJohn Baldwin #endif 597e0f66ef8SJohn Baldwin #ifdef notyet 598e0f66ef8SJohn Baldwin int dead; 599b4151f71SJohn Baldwin #endif 600b4151f71SJohn Baldwin 6013e5da754SJohn Baldwin if (handler == NULL) 602b4151f71SJohn Baldwin return (EINVAL); 603e0f66ef8SJohn Baldwin ie = handler->ih_event; 604e0f66ef8SJohn Baldwin KASSERT(ie != NULL, 605e0f66ef8SJohn Baldwin ("interrupt handler \"%s\" has a NULL interrupt event", 6063e5da754SJohn Baldwin handler->ih_name)); 607e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 60891f91617SDavid E. O'Brien CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 609e0f66ef8SJohn Baldwin ie->ie_name); 610b4151f71SJohn Baldwin #ifdef INVARIANTS 611e0f66ef8SJohn Baldwin TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 6123e5da754SJohn Baldwin if (ih == handler) 6133e5da754SJohn Baldwin goto ok; 614e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 615e0f66ef8SJohn Baldwin panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 616e0f66ef8SJohn Baldwin ih->ih_name, ie->ie_name); 6173e5da754SJohn Baldwin ok: 618b4151f71SJohn Baldwin #endif 619de271f01SJohn Baldwin /* 620e0f66ef8SJohn Baldwin * If there is no ithread, then just remove the handler and return. 621e0f66ef8SJohn Baldwin * XXX: Note that an INTR_FAST handler might be running on another 622e0f66ef8SJohn Baldwin * CPU! 623e0f66ef8SJohn Baldwin */ 624e0f66ef8SJohn Baldwin if (ie->ie_thread == NULL) { 625e0f66ef8SJohn Baldwin TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 626e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 627e0f66ef8SJohn Baldwin free(handler, M_ITHREAD); 628e0f66ef8SJohn Baldwin return (0); 629e0f66ef8SJohn Baldwin } 630e0f66ef8SJohn Baldwin 631e0f66ef8SJohn Baldwin /* 632de271f01SJohn Baldwin * If the interrupt thread is already running, then just mark this 633de271f01SJohn Baldwin * handler as being dead and let the ithread do the actual removal. 634288e351bSDon Lewis * 635288e351bSDon Lewis * During a cold boot while cold is set, msleep() does not sleep, 636288e351bSDon Lewis * so we have to remove the handler here rather than letting the 637288e351bSDon Lewis * thread do it. 638de271f01SJohn Baldwin */ 639982d11f8SJeff Roberson thread_lock(ie->ie_thread->it_thread); 640e0f66ef8SJohn Baldwin if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) { 641de271f01SJohn Baldwin handler->ih_flags |= IH_DEAD; 642de271f01SJohn Baldwin 643de271f01SJohn Baldwin /* 644de271f01SJohn Baldwin * Ensure that the thread will process the handler list 645de271f01SJohn Baldwin * again and remove this handler if it has already passed 646de271f01SJohn Baldwin * it on the list. 647de271f01SJohn Baldwin */ 648e0f66ef8SJohn Baldwin ie->ie_thread->it_need = 1; 6494d29cb2dSJohn Baldwin } else 650e0f66ef8SJohn Baldwin TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 651982d11f8SJeff Roberson thread_unlock(ie->ie_thread->it_thread); 652e0f66ef8SJohn Baldwin while (handler->ih_flags & IH_DEAD) 6530f180a7cSJohn Baldwin msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 654e0f66ef8SJohn Baldwin intr_event_update(ie); 655e0f66ef8SJohn Baldwin #ifdef notyet 656e0f66ef8SJohn Baldwin /* 657e0f66ef8SJohn Baldwin * XXX: This could be bad in the case of ppbus(8). Also, I think 658e0f66ef8SJohn Baldwin * this could lead to races of stale data when servicing an 659e0f66ef8SJohn Baldwin * interrupt. 660e0f66ef8SJohn Baldwin */ 661e0f66ef8SJohn Baldwin dead = 1; 662e0f66ef8SJohn Baldwin TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 663e0f66ef8SJohn Baldwin if (!(ih->ih_flags & IH_FAST)) { 664e0f66ef8SJohn Baldwin dead = 0; 665e0f66ef8SJohn Baldwin break; 666e0f66ef8SJohn Baldwin } 667e0f66ef8SJohn Baldwin } 668e0f66ef8SJohn Baldwin if (dead) { 669e0f66ef8SJohn Baldwin ithread_destroy(ie->ie_thread); 670e0f66ef8SJohn Baldwin ie->ie_thread = NULL; 671e0f66ef8SJohn Baldwin } 672e0f66ef8SJohn Baldwin #endif 673e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 674b4151f71SJohn Baldwin free(handler, M_ITHREAD); 675b4151f71SJohn Baldwin return (0); 676b4151f71SJohn Baldwin } 677b4151f71SJohn Baldwin 678b4151f71SJohn Baldwin int 679e0f66ef8SJohn Baldwin intr_event_schedule_thread(struct intr_event *ie) 6803e5da754SJohn Baldwin { 681e0f66ef8SJohn Baldwin struct intr_entropy entropy; 682e0f66ef8SJohn Baldwin struct intr_thread *it; 683b40ce416SJulian Elischer struct thread *td; 68404774f23SJulian Elischer struct thread *ctd; 6853e5da754SJohn Baldwin struct proc *p; 6863e5da754SJohn Baldwin 6873e5da754SJohn Baldwin /* 6883e5da754SJohn Baldwin * If no ithread or no handlers, then we have a stray interrupt. 6893e5da754SJohn Baldwin */ 690e0f66ef8SJohn Baldwin if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || 691e0f66ef8SJohn Baldwin ie->ie_thread == NULL) 6923e5da754SJohn Baldwin return (EINVAL); 6933e5da754SJohn Baldwin 69404774f23SJulian Elischer ctd = curthread; 695e0f66ef8SJohn Baldwin it = ie->ie_thread; 696e0f66ef8SJohn Baldwin td = it->it_thread; 6976f40c417SRobert Watson p = td->td_proc; 698e0f66ef8SJohn Baldwin 6993e5da754SJohn Baldwin /* 7003e5da754SJohn Baldwin * If any of the handlers for this ithread claim to be good 7013e5da754SJohn Baldwin * sources of entropy, then gather some. 7023e5da754SJohn Baldwin */ 703e0f66ef8SJohn Baldwin if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 7046f40c417SRobert Watson CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 7057ab24ea3SJulian Elischer p->p_pid, td->td_name); 706e0f66ef8SJohn Baldwin entropy.event = (uintptr_t)ie; 707e0f66ef8SJohn Baldwin entropy.td = ctd; 7083e5da754SJohn Baldwin random_harvest(&entropy, sizeof(entropy), 2, 0, 7093e5da754SJohn Baldwin RANDOM_INTERRUPT); 7103e5da754SJohn Baldwin } 7113e5da754SJohn Baldwin 712e0f66ef8SJohn Baldwin KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 7133e5da754SJohn Baldwin 7143e5da754SJohn Baldwin /* 7153e5da754SJohn Baldwin * Set it_need to tell the thread to keep running if it is already 716982d11f8SJeff Roberson * running. Then, lock the thread and see if we actually need to 717982d11f8SJeff Roberson * put it on the runqueue. 7183e5da754SJohn Baldwin */ 719e0f66ef8SJohn Baldwin it->it_need = 1; 720982d11f8SJeff Roberson thread_lock(td); 72171fad9fdSJulian Elischer if (TD_AWAITING_INTR(td)) { 722e0f66ef8SJohn Baldwin CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 7237ab24ea3SJulian Elischer td->td_name); 72471fad9fdSJulian Elischer TD_CLR_IWAIT(td); 725f0393f06SJeff Roberson sched_add(td, SRQ_INTR); 7263e5da754SJohn Baldwin } else { 727e0f66ef8SJohn Baldwin CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 7287ab24ea3SJulian Elischer __func__, p->p_pid, td->td_name, it->it_need, td->td_state); 7293e5da754SJohn Baldwin } 730982d11f8SJeff Roberson thread_unlock(td); 7313e5da754SJohn Baldwin 7323e5da754SJohn Baldwin return (0); 7333e5da754SJohn Baldwin } 734bafe5a31SPaolo Pisati #else 735bafe5a31SPaolo Pisati int 736bafe5a31SPaolo Pisati intr_event_remove_handler(void *cookie) 737bafe5a31SPaolo Pisati { 738bafe5a31SPaolo Pisati struct intr_handler *handler = (struct intr_handler *)cookie; 739bafe5a31SPaolo Pisati struct intr_event *ie; 740bafe5a31SPaolo Pisati struct intr_thread *it; 741bafe5a31SPaolo Pisati #ifdef INVARIANTS 742bafe5a31SPaolo Pisati struct intr_handler *ih; 743bafe5a31SPaolo Pisati #endif 744bafe5a31SPaolo Pisati #ifdef notyet 745bafe5a31SPaolo Pisati int dead; 746bafe5a31SPaolo Pisati #endif 747bafe5a31SPaolo Pisati 748bafe5a31SPaolo Pisati if (handler == NULL) 749bafe5a31SPaolo Pisati return (EINVAL); 750bafe5a31SPaolo Pisati ie = handler->ih_event; 751bafe5a31SPaolo Pisati KASSERT(ie != NULL, 752bafe5a31SPaolo Pisati ("interrupt handler \"%s\" has a NULL interrupt event", 753bafe5a31SPaolo Pisati handler->ih_name)); 754bafe5a31SPaolo Pisati mtx_lock(&ie->ie_lock); 755bafe5a31SPaolo Pisati CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 756bafe5a31SPaolo Pisati ie->ie_name); 757bafe5a31SPaolo Pisati #ifdef INVARIANTS 758bafe5a31SPaolo Pisati TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 759bafe5a31SPaolo Pisati if (ih == handler) 760bafe5a31SPaolo Pisati goto ok; 761bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 762bafe5a31SPaolo Pisati panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 763bafe5a31SPaolo Pisati ih->ih_name, ie->ie_name); 764bafe5a31SPaolo Pisati ok: 765bafe5a31SPaolo Pisati #endif 766bafe5a31SPaolo Pisati /* 767bafe5a31SPaolo Pisati * If there are no ithreads (per event and per handler), then 768bafe5a31SPaolo Pisati * just remove the handler and return. 769bafe5a31SPaolo Pisati * XXX: Note that an INTR_FAST handler might be running on another CPU! 770bafe5a31SPaolo Pisati */ 771bafe5a31SPaolo Pisati if (ie->ie_thread == NULL && handler->ih_thread == NULL) { 772bafe5a31SPaolo Pisati TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 773bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 774bafe5a31SPaolo Pisati free(handler, M_ITHREAD); 775bafe5a31SPaolo Pisati return (0); 776bafe5a31SPaolo Pisati } 777bafe5a31SPaolo Pisati 778bafe5a31SPaolo Pisati /* Private or global ithread? */ 779bafe5a31SPaolo Pisati it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread; 780bafe5a31SPaolo Pisati /* 781bafe5a31SPaolo Pisati * If the interrupt thread is already running, then just mark this 782bafe5a31SPaolo Pisati * handler as being dead and let the ithread do the actual removal. 783bafe5a31SPaolo Pisati * 784bafe5a31SPaolo Pisati * During a cold boot while cold is set, msleep() does not sleep, 785bafe5a31SPaolo Pisati * so we have to remove the handler here rather than letting the 786bafe5a31SPaolo Pisati * thread do it. 787bafe5a31SPaolo Pisati */ 788982d11f8SJeff Roberson thread_lock(it->it_thread); 789bafe5a31SPaolo Pisati if (!TD_AWAITING_INTR(it->it_thread) && !cold) { 790bafe5a31SPaolo Pisati handler->ih_flags |= IH_DEAD; 791bafe5a31SPaolo Pisati 792bafe5a31SPaolo Pisati /* 793bafe5a31SPaolo Pisati * Ensure that the thread will process the handler list 794bafe5a31SPaolo Pisati * again and remove this handler if it has already passed 795bafe5a31SPaolo Pisati * it on the list. 796bafe5a31SPaolo Pisati */ 797bafe5a31SPaolo Pisati it->it_need = 1; 798bafe5a31SPaolo Pisati } else 799bafe5a31SPaolo Pisati TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 800982d11f8SJeff Roberson thread_unlock(it->it_thread); 801bafe5a31SPaolo Pisati while (handler->ih_flags & IH_DEAD) 802bafe5a31SPaolo Pisati msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 803bafe5a31SPaolo Pisati /* 804bafe5a31SPaolo Pisati * At this point, the handler has been disconnected from the event, 805bafe5a31SPaolo Pisati * so we can kill the private ithread if any. 806bafe5a31SPaolo Pisati */ 807bafe5a31SPaolo Pisati if (handler->ih_thread) { 808bafe5a31SPaolo Pisati ithread_destroy(handler->ih_thread); 809bafe5a31SPaolo Pisati handler->ih_thread = NULL; 810bafe5a31SPaolo Pisati } 811bafe5a31SPaolo Pisati intr_event_update(ie); 812bafe5a31SPaolo Pisati #ifdef notyet 813bafe5a31SPaolo Pisati /* 814bafe5a31SPaolo Pisati * XXX: This could be bad in the case of ppbus(8). Also, I think 815bafe5a31SPaolo Pisati * this could lead to races of stale data when servicing an 816bafe5a31SPaolo Pisati * interrupt. 817bafe5a31SPaolo Pisati */ 818bafe5a31SPaolo Pisati dead = 1; 819bafe5a31SPaolo Pisati TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 820bafe5a31SPaolo Pisati if (handler != NULL) { 821bafe5a31SPaolo Pisati dead = 0; 822bafe5a31SPaolo Pisati break; 823bafe5a31SPaolo Pisati } 824bafe5a31SPaolo Pisati } 825bafe5a31SPaolo Pisati if (dead) { 826bafe5a31SPaolo Pisati ithread_destroy(ie->ie_thread); 827bafe5a31SPaolo Pisati ie->ie_thread = NULL; 828bafe5a31SPaolo Pisati } 829bafe5a31SPaolo Pisati #endif 830bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 831bafe5a31SPaolo Pisati free(handler, M_ITHREAD); 832bafe5a31SPaolo Pisati return (0); 833bafe5a31SPaolo Pisati } 834bafe5a31SPaolo Pisati 835bafe5a31SPaolo Pisati int 836bafe5a31SPaolo Pisati intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it) 837bafe5a31SPaolo Pisati { 838bafe5a31SPaolo Pisati struct intr_entropy entropy; 839bafe5a31SPaolo Pisati struct thread *td; 840bafe5a31SPaolo Pisati struct thread *ctd; 841bafe5a31SPaolo Pisati struct proc *p; 842bafe5a31SPaolo Pisati 843bafe5a31SPaolo Pisati /* 844bafe5a31SPaolo Pisati * If no ithread or no handlers, then we have a stray interrupt. 845bafe5a31SPaolo Pisati */ 846bafe5a31SPaolo Pisati if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL) 847bafe5a31SPaolo Pisati return (EINVAL); 848bafe5a31SPaolo Pisati 849bafe5a31SPaolo Pisati ctd = curthread; 850bafe5a31SPaolo Pisati td = it->it_thread; 851bafe5a31SPaolo Pisati p = td->td_proc; 852bafe5a31SPaolo Pisati 853bafe5a31SPaolo Pisati /* 854bafe5a31SPaolo Pisati * If any of the handlers for this ithread claim to be good 855bafe5a31SPaolo Pisati * sources of entropy, then gather some. 856bafe5a31SPaolo Pisati */ 857bafe5a31SPaolo Pisati if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 858bafe5a31SPaolo Pisati CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 8597ab24ea3SJulian Elischer p->p_pid, td->td_name); 860bafe5a31SPaolo Pisati entropy.event = (uintptr_t)ie; 861bafe5a31SPaolo Pisati entropy.td = ctd; 862bafe5a31SPaolo Pisati random_harvest(&entropy, sizeof(entropy), 2, 0, 863bafe5a31SPaolo Pisati RANDOM_INTERRUPT); 864bafe5a31SPaolo Pisati } 865bafe5a31SPaolo Pisati 866bafe5a31SPaolo Pisati KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 867bafe5a31SPaolo Pisati 868bafe5a31SPaolo Pisati /* 869bafe5a31SPaolo Pisati * Set it_need to tell the thread to keep running if it is already 870982d11f8SJeff Roberson * running. Then, lock the thread and see if we actually need to 871982d11f8SJeff Roberson * put it on the runqueue. 872bafe5a31SPaolo Pisati */ 873bafe5a31SPaolo Pisati it->it_need = 1; 874982d11f8SJeff Roberson thread_lock(td); 875bafe5a31SPaolo Pisati if (TD_AWAITING_INTR(td)) { 876bafe5a31SPaolo Pisati CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 8773c1ffc32SJulian Elischer td->td_name); 878bafe5a31SPaolo Pisati TD_CLR_IWAIT(td); 879bafe5a31SPaolo Pisati sched_add(td, SRQ_INTR); 880bafe5a31SPaolo Pisati } else { 881bafe5a31SPaolo Pisati CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 8827ab24ea3SJulian Elischer __func__, p->p_pid, td->td_name, it->it_need, td->td_state); 883bafe5a31SPaolo Pisati } 884982d11f8SJeff Roberson thread_unlock(td); 885bafe5a31SPaolo Pisati 886bafe5a31SPaolo Pisati return (0); 887bafe5a31SPaolo Pisati } 888bafe5a31SPaolo Pisati #endif 8893e5da754SJohn Baldwin 890fe486a37SJohn Baldwin /* 891fe486a37SJohn Baldwin * Add a software interrupt handler to a specified event. If a given event 892fe486a37SJohn Baldwin * is not specified, then a new event is created. 893fe486a37SJohn Baldwin */ 8943e5da754SJohn Baldwin int 895e0f66ef8SJohn Baldwin swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, 896b4151f71SJohn Baldwin void *arg, int pri, enum intr_type flags, void **cookiep) 8978088699fSJohn Baldwin { 898e0f66ef8SJohn Baldwin struct intr_event *ie; 899b4151f71SJohn Baldwin int error; 9008088699fSJohn Baldwin 901bafe5a31SPaolo Pisati if (flags & INTR_ENTROPY) 9023e5da754SJohn Baldwin return (EINVAL); 9033e5da754SJohn Baldwin 904e0f66ef8SJohn Baldwin ie = (eventp != NULL) ? *eventp : NULL; 9058088699fSJohn Baldwin 906e0f66ef8SJohn Baldwin if (ie != NULL) { 907e0f66ef8SJohn Baldwin if (!(ie->ie_flags & IE_SOFT)) 9083e5da754SJohn Baldwin return (EINVAL); 9093e5da754SJohn Baldwin } else { 910bafe5a31SPaolo Pisati error = intr_event_create(&ie, NULL, IE_SOFT, 911eaf86d16SJohn Baldwin NULL, NULL, NULL, NULL, "swi%d:", pri); 9128088699fSJohn Baldwin if (error) 913b4151f71SJohn Baldwin return (error); 914e0f66ef8SJohn Baldwin if (eventp != NULL) 915e0f66ef8SJohn Baldwin *eventp = ie; 9168088699fSJohn Baldwin } 917ef544f63SPaolo Pisati return (intr_event_add_handler(ie, name, NULL, handler, arg, 918d5a08a60SJake Burkholder (pri * RQ_PPQ) + PI_SOFT, flags, cookiep)); 9198d809d50SJeff Roberson error = intr_event_add_handler(ie, name, NULL, handler, arg, 9208d809d50SJeff Roberson (pri * RQ_PPQ) + PI_SOFT, flags, cookiep); 9218d809d50SJeff Roberson if (error) 9228d809d50SJeff Roberson return (error); 9238d809d50SJeff Roberson if (pri == SWI_CLOCK) { 9248d809d50SJeff Roberson struct proc *p; 9258d809d50SJeff Roberson p = ie->ie_thread->it_thread->td_proc; 9268d809d50SJeff Roberson PROC_LOCK(p); 9278d809d50SJeff Roberson p->p_flag |= P_NOLOAD; 9288d809d50SJeff Roberson PROC_UNLOCK(p); 9298d809d50SJeff Roberson } 9308d809d50SJeff Roberson return (0); 9318088699fSJohn Baldwin } 9328088699fSJohn Baldwin 9331931cf94SJohn Baldwin /* 934e0f66ef8SJohn Baldwin * Schedule a software interrupt thread. 9351931cf94SJohn Baldwin */ 9361931cf94SJohn Baldwin void 937b4151f71SJohn Baldwin swi_sched(void *cookie, int flags) 9381931cf94SJohn Baldwin { 939e0f66ef8SJohn Baldwin struct intr_handler *ih = (struct intr_handler *)cookie; 940e0f66ef8SJohn Baldwin struct intr_event *ie = ih->ih_event; 9413e5da754SJohn Baldwin int error; 9428088699fSJohn Baldwin 943e0f66ef8SJohn Baldwin CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, 944e0f66ef8SJohn Baldwin ih->ih_need); 9451931cf94SJohn Baldwin 9461931cf94SJohn Baldwin /* 9473e5da754SJohn Baldwin * Set ih_need for this handler so that if the ithread is already 9483e5da754SJohn Baldwin * running it will execute this handler on the next pass. Otherwise, 9493e5da754SJohn Baldwin * it will execute it the next time it runs. 9501931cf94SJohn Baldwin */ 951b4151f71SJohn Baldwin atomic_store_rel_int(&ih->ih_need, 1); 9521ca2c018SBruce Evans 953b4151f71SJohn Baldwin if (!(flags & SWI_DELAY)) { 95467596082SAttilio Rao PCPU_INC(cnt.v_soft); 955bafe5a31SPaolo Pisati #ifdef INTR_FILTER 956bafe5a31SPaolo Pisati error = intr_event_schedule_thread(ie, ie->ie_thread); 957bafe5a31SPaolo Pisati #else 958e0f66ef8SJohn Baldwin error = intr_event_schedule_thread(ie); 959bafe5a31SPaolo Pisati #endif 9603e5da754SJohn Baldwin KASSERT(error == 0, ("stray software interrupt")); 9618088699fSJohn Baldwin } 9628088699fSJohn Baldwin } 9638088699fSJohn Baldwin 964fe486a37SJohn Baldwin /* 965fe486a37SJohn Baldwin * Remove a software interrupt handler. Currently this code does not 966fe486a37SJohn Baldwin * remove the associated interrupt event if it becomes empty. Calling code 967fe486a37SJohn Baldwin * may do so manually via intr_event_destroy(), but that's not really 968fe486a37SJohn Baldwin * an optimal interface. 969fe486a37SJohn Baldwin */ 970fe486a37SJohn Baldwin int 971fe486a37SJohn Baldwin swi_remove(void *cookie) 972fe486a37SJohn Baldwin { 973fe486a37SJohn Baldwin 974fe486a37SJohn Baldwin return (intr_event_remove_handler(cookie)); 975fe486a37SJohn Baldwin } 976fe486a37SJohn Baldwin 977bafe5a31SPaolo Pisati #ifdef INTR_FILTER 978bafe5a31SPaolo Pisati static void 979bafe5a31SPaolo Pisati priv_ithread_execute_handler(struct proc *p, struct intr_handler *ih) 980bafe5a31SPaolo Pisati { 981bafe5a31SPaolo Pisati struct intr_event *ie; 982bafe5a31SPaolo Pisati 983bafe5a31SPaolo Pisati ie = ih->ih_event; 984bafe5a31SPaolo Pisati /* 985bafe5a31SPaolo Pisati * If this handler is marked for death, remove it from 986bafe5a31SPaolo Pisati * the list of handlers and wake up the sleeper. 987bafe5a31SPaolo Pisati */ 988bafe5a31SPaolo Pisati if (ih->ih_flags & IH_DEAD) { 989bafe5a31SPaolo Pisati mtx_lock(&ie->ie_lock); 990bafe5a31SPaolo Pisati TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 991bafe5a31SPaolo Pisati ih->ih_flags &= ~IH_DEAD; 992bafe5a31SPaolo Pisati wakeup(ih); 993bafe5a31SPaolo Pisati mtx_unlock(&ie->ie_lock); 994bafe5a31SPaolo Pisati return; 995bafe5a31SPaolo Pisati } 996bafe5a31SPaolo Pisati 997bafe5a31SPaolo Pisati /* Execute this handler. */ 998bafe5a31SPaolo Pisati CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 999bafe5a31SPaolo Pisati __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument, 1000bafe5a31SPaolo Pisati ih->ih_name, ih->ih_flags); 1001bafe5a31SPaolo Pisati 1002bafe5a31SPaolo Pisati if (!(ih->ih_flags & IH_MPSAFE)) 1003bafe5a31SPaolo Pisati mtx_lock(&Giant); 1004bafe5a31SPaolo Pisati ih->ih_handler(ih->ih_argument); 1005bafe5a31SPaolo Pisati if (!(ih->ih_flags & IH_MPSAFE)) 1006bafe5a31SPaolo Pisati mtx_unlock(&Giant); 1007bafe5a31SPaolo Pisati } 1008bafe5a31SPaolo Pisati #endif 1009bafe5a31SPaolo Pisati 1010e0f66ef8SJohn Baldwin static void 1011e0f66ef8SJohn Baldwin ithread_execute_handlers(struct proc *p, struct intr_event *ie) 1012e0f66ef8SJohn Baldwin { 1013e0f66ef8SJohn Baldwin struct intr_handler *ih, *ihn; 1014e0f66ef8SJohn Baldwin 1015e0f66ef8SJohn Baldwin /* Interrupt handlers should not sleep. */ 1016e0f66ef8SJohn Baldwin if (!(ie->ie_flags & IE_SOFT)) 1017e0f66ef8SJohn Baldwin THREAD_NO_SLEEPING(); 1018e0f66ef8SJohn Baldwin TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) { 1019e0f66ef8SJohn Baldwin 1020e0f66ef8SJohn Baldwin /* 1021e0f66ef8SJohn Baldwin * If this handler is marked for death, remove it from 1022e0f66ef8SJohn Baldwin * the list of handlers and wake up the sleeper. 1023e0f66ef8SJohn Baldwin */ 1024e0f66ef8SJohn Baldwin if (ih->ih_flags & IH_DEAD) { 1025e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 1026e0f66ef8SJohn Baldwin TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 1027e0f66ef8SJohn Baldwin ih->ih_flags &= ~IH_DEAD; 1028e0f66ef8SJohn Baldwin wakeup(ih); 1029e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 1030e0f66ef8SJohn Baldwin continue; 1031e0f66ef8SJohn Baldwin } 1032e0f66ef8SJohn Baldwin 1033f2d619c8SPaolo Pisati /* Skip filter only handlers */ 1034f2d619c8SPaolo Pisati if (ih->ih_handler == NULL) 1035f2d619c8SPaolo Pisati continue; 1036f2d619c8SPaolo Pisati 1037e0f66ef8SJohn Baldwin /* 1038e0f66ef8SJohn Baldwin * For software interrupt threads, we only execute 1039e0f66ef8SJohn Baldwin * handlers that have their need flag set. Hardware 1040e0f66ef8SJohn Baldwin * interrupt threads always invoke all of their handlers. 1041e0f66ef8SJohn Baldwin */ 1042e0f66ef8SJohn Baldwin if (ie->ie_flags & IE_SOFT) { 1043e0f66ef8SJohn Baldwin if (!ih->ih_need) 1044e0f66ef8SJohn Baldwin continue; 1045e0f66ef8SJohn Baldwin else 1046e0f66ef8SJohn Baldwin atomic_store_rel_int(&ih->ih_need, 0); 1047e0f66ef8SJohn Baldwin } 1048e0f66ef8SJohn Baldwin 1049e0f66ef8SJohn Baldwin /* Execute this handler. */ 1050e0f66ef8SJohn Baldwin CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1051bafe5a31SPaolo Pisati __func__, p->p_pid, (void *)ih->ih_handler, 1052bafe5a31SPaolo Pisati ih->ih_argument, ih->ih_name, ih->ih_flags); 1053e0f66ef8SJohn Baldwin 1054e0f66ef8SJohn Baldwin if (!(ih->ih_flags & IH_MPSAFE)) 1055e0f66ef8SJohn Baldwin mtx_lock(&Giant); 1056e0f66ef8SJohn Baldwin ih->ih_handler(ih->ih_argument); 1057e0f66ef8SJohn Baldwin if (!(ih->ih_flags & IH_MPSAFE)) 1058e0f66ef8SJohn Baldwin mtx_unlock(&Giant); 1059e0f66ef8SJohn Baldwin } 1060e0f66ef8SJohn Baldwin if (!(ie->ie_flags & IE_SOFT)) 1061e0f66ef8SJohn Baldwin THREAD_SLEEPING_OK(); 1062e0f66ef8SJohn Baldwin 1063e0f66ef8SJohn Baldwin /* 1064e0f66ef8SJohn Baldwin * Interrupt storm handling: 1065e0f66ef8SJohn Baldwin * 1066e0f66ef8SJohn Baldwin * If this interrupt source is currently storming, then throttle 1067e0f66ef8SJohn Baldwin * it to only fire the handler once per clock tick. 1068e0f66ef8SJohn Baldwin * 1069e0f66ef8SJohn Baldwin * If this interrupt source is not currently storming, but the 1070e0f66ef8SJohn Baldwin * number of back to back interrupts exceeds the storm threshold, 1071e0f66ef8SJohn Baldwin * then enter storming mode. 1072e0f66ef8SJohn Baldwin */ 1073e41bcf3cSJohn Baldwin if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold && 1074e41bcf3cSJohn Baldwin !(ie->ie_flags & IE_SOFT)) { 10750ae62c18SNate Lawson /* Report the message only once every second. */ 10760ae62c18SNate Lawson if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) { 1077e0f66ef8SJohn Baldwin printf( 10780ae62c18SNate Lawson "interrupt storm detected on \"%s\"; throttling interrupt source\n", 1079e0f66ef8SJohn Baldwin ie->ie_name); 1080e0f66ef8SJohn Baldwin } 1081e41bcf3cSJohn Baldwin pause("istorm", 1); 1082e0f66ef8SJohn Baldwin } else 1083e0f66ef8SJohn Baldwin ie->ie_count++; 1084e0f66ef8SJohn Baldwin 1085e0f66ef8SJohn Baldwin /* 1086e0f66ef8SJohn Baldwin * Now that all the handlers have had a chance to run, reenable 1087e0f66ef8SJohn Baldwin * the interrupt source. 1088e0f66ef8SJohn Baldwin */ 1089e0f66ef8SJohn Baldwin if (ie->ie_enable != NULL) 1090e0f66ef8SJohn Baldwin ie->ie_enable(ie->ie_source); 1091e0f66ef8SJohn Baldwin } 1092e0f66ef8SJohn Baldwin 1093bafe5a31SPaolo Pisati #ifndef INTR_FILTER 10948088699fSJohn Baldwin /* 1095b4151f71SJohn Baldwin * This is the main code for interrupt threads. 10968088699fSJohn Baldwin */ 109737c84183SPoul-Henning Kamp static void 1098b4151f71SJohn Baldwin ithread_loop(void *arg) 10998088699fSJohn Baldwin { 1100e0f66ef8SJohn Baldwin struct intr_thread *ithd; 1101e0f66ef8SJohn Baldwin struct intr_event *ie; 1102b40ce416SJulian Elischer struct thread *td; 1103b4151f71SJohn Baldwin struct proc *p; 1104eaf86d16SJohn Baldwin u_char cpu; 11058088699fSJohn Baldwin 1106b40ce416SJulian Elischer td = curthread; 1107b40ce416SJulian Elischer p = td->td_proc; 1108e0f66ef8SJohn Baldwin ithd = (struct intr_thread *)arg; 1109e0f66ef8SJohn Baldwin KASSERT(ithd->it_thread == td, 111091f91617SDavid E. O'Brien ("%s: ithread and proc linkage out of sync", __func__)); 1111e0f66ef8SJohn Baldwin ie = ithd->it_event; 1112e0f66ef8SJohn Baldwin ie->ie_count = 0; 1113eaf86d16SJohn Baldwin cpu = NOCPU; 11148088699fSJohn Baldwin 11158088699fSJohn Baldwin /* 11168088699fSJohn Baldwin * As long as we have interrupts outstanding, go through the 11178088699fSJohn Baldwin * list of handlers, giving each one a go at it. 11188088699fSJohn Baldwin */ 11198088699fSJohn Baldwin for (;;) { 1120b4151f71SJohn Baldwin /* 1121b4151f71SJohn Baldwin * If we are an orphaned thread, then just die. 1122b4151f71SJohn Baldwin */ 1123b4151f71SJohn Baldwin if (ithd->it_flags & IT_DEAD) { 1124e0f66ef8SJohn Baldwin CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 11257ab24ea3SJulian Elischer p->p_pid, td->td_name); 1126b4151f71SJohn Baldwin free(ithd, M_ITHREAD); 1127ca9a0ddfSJulian Elischer kthread_exit(); 1128b4151f71SJohn Baldwin } 1129b4151f71SJohn Baldwin 1130e0f66ef8SJohn Baldwin /* 1131e0f66ef8SJohn Baldwin * Service interrupts. If another interrupt arrives while 1132e0f66ef8SJohn Baldwin * we are running, it will set it_need to note that we 1133e0f66ef8SJohn Baldwin * should make another pass. 1134e0f66ef8SJohn Baldwin */ 1135b4151f71SJohn Baldwin while (ithd->it_need) { 11368088699fSJohn Baldwin /* 1137e0f66ef8SJohn Baldwin * This might need a full read and write barrier 1138e0f66ef8SJohn Baldwin * to make sure that this write posts before any 1139e0f66ef8SJohn Baldwin * of the memory or device accesses in the 1140e0f66ef8SJohn Baldwin * handlers. 11418088699fSJohn Baldwin */ 1142b4151f71SJohn Baldwin atomic_store_rel_int(&ithd->it_need, 0); 1143e0f66ef8SJohn Baldwin ithread_execute_handlers(p, ie); 11448088699fSJohn Baldwin } 11457870c3c6SJohn Baldwin WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 11467870c3c6SJohn Baldwin mtx_assert(&Giant, MA_NOTOWNED); 11478088699fSJohn Baldwin 11488088699fSJohn Baldwin /* 11498088699fSJohn Baldwin * Processed all our interrupts. Now get the sched 11508088699fSJohn Baldwin * lock. This may take a while and it_need may get 11518088699fSJohn Baldwin * set again, so we have to check it again. 11528088699fSJohn Baldwin */ 1153982d11f8SJeff Roberson thread_lock(td); 1154e0f66ef8SJohn Baldwin if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { 11557870c3c6SJohn Baldwin TD_SET_IWAIT(td); 1156e0f66ef8SJohn Baldwin ie->ie_count = 0; 1157bf0acc27SJohn Baldwin mi_switch(SW_VOL, NULL); 11588088699fSJohn Baldwin } 1159eaf86d16SJohn Baldwin 1160eaf86d16SJohn Baldwin #ifdef SMP 1161eaf86d16SJohn Baldwin /* 1162eaf86d16SJohn Baldwin * Ensure we are bound to the correct CPU. We can't 1163eaf86d16SJohn Baldwin * move ithreads until SMP is running however, so just 1164eaf86d16SJohn Baldwin * leave interrupts on the boor CPU during boot. 1165eaf86d16SJohn Baldwin */ 1166eaf86d16SJohn Baldwin if (ie->ie_cpu != cpu && smp_started) { 1167eaf86d16SJohn Baldwin cpu = ie->ie_cpu; 1168eaf86d16SJohn Baldwin if (cpu == NOCPU) 1169eaf86d16SJohn Baldwin sched_unbind(td); 1170eaf86d16SJohn Baldwin else 1171eaf86d16SJohn Baldwin sched_bind(td, cpu); 1172eaf86d16SJohn Baldwin } 1173eaf86d16SJohn Baldwin #endif 1174982d11f8SJeff Roberson thread_unlock(td); 11758088699fSJohn Baldwin } 11761931cf94SJohn Baldwin } 1177bafe5a31SPaolo Pisati #else 1178bafe5a31SPaolo Pisati /* 1179bafe5a31SPaolo Pisati * This is the main code for interrupt threads. 1180bafe5a31SPaolo Pisati */ 1181bafe5a31SPaolo Pisati static void 1182bafe5a31SPaolo Pisati ithread_loop(void *arg) 1183bafe5a31SPaolo Pisati { 1184bafe5a31SPaolo Pisati struct intr_thread *ithd; 1185bafe5a31SPaolo Pisati struct intr_handler *ih; 1186bafe5a31SPaolo Pisati struct intr_event *ie; 1187bafe5a31SPaolo Pisati struct thread *td; 1188bafe5a31SPaolo Pisati struct proc *p; 1189bafe5a31SPaolo Pisati int priv; 1190eaf86d16SJohn Baldwin u_char cpu; 1191bafe5a31SPaolo Pisati 1192bafe5a31SPaolo Pisati td = curthread; 1193bafe5a31SPaolo Pisati p = td->td_proc; 1194bafe5a31SPaolo Pisati ih = (struct intr_handler *)arg; 1195bafe5a31SPaolo Pisati priv = (ih->ih_thread != NULL) ? 1 : 0; 1196bafe5a31SPaolo Pisati ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread; 1197bafe5a31SPaolo Pisati KASSERT(ithd->it_thread == td, 1198bafe5a31SPaolo Pisati ("%s: ithread and proc linkage out of sync", __func__)); 1199bafe5a31SPaolo Pisati ie = ithd->it_event; 1200bafe5a31SPaolo Pisati ie->ie_count = 0; 1201eaf86d16SJohn Baldwin cpu = NOCPU; 1202bafe5a31SPaolo Pisati 1203bafe5a31SPaolo Pisati /* 1204bafe5a31SPaolo Pisati * As long as we have interrupts outstanding, go through the 1205bafe5a31SPaolo Pisati * list of handlers, giving each one a go at it. 1206bafe5a31SPaolo Pisati */ 1207bafe5a31SPaolo Pisati for (;;) { 1208bafe5a31SPaolo Pisati /* 1209bafe5a31SPaolo Pisati * If we are an orphaned thread, then just die. 1210bafe5a31SPaolo Pisati */ 1211bafe5a31SPaolo Pisati if (ithd->it_flags & IT_DEAD) { 1212bafe5a31SPaolo Pisati CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 12137ab24ea3SJulian Elischer p->p_pid, td->td_name); 1214bafe5a31SPaolo Pisati free(ithd, M_ITHREAD); 1215ca9a0ddfSJulian Elischer kthread_exit(); 1216bafe5a31SPaolo Pisati } 1217bafe5a31SPaolo Pisati 1218bafe5a31SPaolo Pisati /* 1219bafe5a31SPaolo Pisati * Service interrupts. If another interrupt arrives while 1220bafe5a31SPaolo Pisati * we are running, it will set it_need to note that we 1221bafe5a31SPaolo Pisati * should make another pass. 1222bafe5a31SPaolo Pisati */ 1223bafe5a31SPaolo Pisati while (ithd->it_need) { 1224bafe5a31SPaolo Pisati /* 1225bafe5a31SPaolo Pisati * This might need a full read and write barrier 1226bafe5a31SPaolo Pisati * to make sure that this write posts before any 1227bafe5a31SPaolo Pisati * of the memory or device accesses in the 1228bafe5a31SPaolo Pisati * handlers. 1229bafe5a31SPaolo Pisati */ 1230bafe5a31SPaolo Pisati atomic_store_rel_int(&ithd->it_need, 0); 1231bafe5a31SPaolo Pisati if (priv) 1232bafe5a31SPaolo Pisati priv_ithread_execute_handler(p, ih); 1233bafe5a31SPaolo Pisati else 1234bafe5a31SPaolo Pisati ithread_execute_handlers(p, ie); 1235bafe5a31SPaolo Pisati } 1236bafe5a31SPaolo Pisati WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1237bafe5a31SPaolo Pisati mtx_assert(&Giant, MA_NOTOWNED); 1238bafe5a31SPaolo Pisati 1239bafe5a31SPaolo Pisati /* 1240bafe5a31SPaolo Pisati * Processed all our interrupts. Now get the sched 1241bafe5a31SPaolo Pisati * lock. This may take a while and it_need may get 1242bafe5a31SPaolo Pisati * set again, so we have to check it again. 1243bafe5a31SPaolo Pisati */ 1244982d11f8SJeff Roberson thread_lock(td); 1245bafe5a31SPaolo Pisati if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { 1246bafe5a31SPaolo Pisati TD_SET_IWAIT(td); 1247bafe5a31SPaolo Pisati ie->ie_count = 0; 1248bafe5a31SPaolo Pisati mi_switch(SW_VOL, NULL); 1249bafe5a31SPaolo Pisati } 1250eaf86d16SJohn Baldwin 1251eaf86d16SJohn Baldwin #ifdef SMP 1252eaf86d16SJohn Baldwin /* 1253eaf86d16SJohn Baldwin * Ensure we are bound to the correct CPU. We can't 1254eaf86d16SJohn Baldwin * move ithreads until SMP is running however, so just 1255eaf86d16SJohn Baldwin * leave interrupts on the boor CPU during boot. 1256eaf86d16SJohn Baldwin */ 1257eaf86d16SJohn Baldwin if (!priv && ie->ie_cpu != cpu && smp_started) { 1258eaf86d16SJohn Baldwin cpu = ie->ie_cpu; 1259eaf86d16SJohn Baldwin if (cpu == NOCPU) 1260eaf86d16SJohn Baldwin sched_unbind(td); 1261eaf86d16SJohn Baldwin else 1262eaf86d16SJohn Baldwin sched_bind(td, cpu); 1263eaf86d16SJohn Baldwin } 1264eaf86d16SJohn Baldwin #endif 1265982d11f8SJeff Roberson thread_unlock(td); 1266bafe5a31SPaolo Pisati } 1267bafe5a31SPaolo Pisati } 1268bafe5a31SPaolo Pisati 1269bafe5a31SPaolo Pisati /* 1270bafe5a31SPaolo Pisati * Main loop for interrupt filter. 1271bafe5a31SPaolo Pisati * 1272bafe5a31SPaolo Pisati * Some architectures (i386, amd64 and arm) require the optional frame 1273bafe5a31SPaolo Pisati * parameter, and use it as the main argument for fast handler execution 1274bafe5a31SPaolo Pisati * when ih_argument == NULL. 1275bafe5a31SPaolo Pisati * 1276bafe5a31SPaolo Pisati * Return value: 1277bafe5a31SPaolo Pisati * o FILTER_STRAY: No filter recognized the event, and no 1278bafe5a31SPaolo Pisati * filter-less handler is registered on this 1279bafe5a31SPaolo Pisati * line. 1280bafe5a31SPaolo Pisati * o FILTER_HANDLED: A filter claimed the event and served it. 1281bafe5a31SPaolo Pisati * o FILTER_SCHEDULE_THREAD: No filter claimed the event, but there's at 1282bafe5a31SPaolo Pisati * least one filter-less handler on this line. 1283bafe5a31SPaolo Pisati * o FILTER_HANDLED | 1284bafe5a31SPaolo Pisati * FILTER_SCHEDULE_THREAD: A filter claimed the event, and asked for 1285bafe5a31SPaolo Pisati * scheduling the per-handler ithread. 1286bafe5a31SPaolo Pisati * 1287bafe5a31SPaolo Pisati * In case an ithread has to be scheduled, in *ithd there will be a 1288bafe5a31SPaolo Pisati * pointer to a struct intr_thread containing the thread to be 1289bafe5a31SPaolo Pisati * scheduled. 1290bafe5a31SPaolo Pisati */ 1291bafe5a31SPaolo Pisati 1292bafe5a31SPaolo Pisati int 1293bafe5a31SPaolo Pisati intr_filter_loop(struct intr_event *ie, struct trapframe *frame, 1294bafe5a31SPaolo Pisati struct intr_thread **ithd) 1295bafe5a31SPaolo Pisati { 1296bafe5a31SPaolo Pisati struct intr_handler *ih; 1297bafe5a31SPaolo Pisati void *arg; 1298bafe5a31SPaolo Pisati int ret, thread_only; 1299bafe5a31SPaolo Pisati 1300bafe5a31SPaolo Pisati ret = 0; 1301bafe5a31SPaolo Pisati thread_only = 0; 1302bafe5a31SPaolo Pisati TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1303bafe5a31SPaolo Pisati /* 1304bafe5a31SPaolo Pisati * Execute fast interrupt handlers directly. 1305bafe5a31SPaolo Pisati * To support clock handlers, if a handler registers 1306bafe5a31SPaolo Pisati * with a NULL argument, then we pass it a pointer to 1307bafe5a31SPaolo Pisati * a trapframe as its argument. 1308bafe5a31SPaolo Pisati */ 1309bafe5a31SPaolo Pisati arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument); 1310bafe5a31SPaolo Pisati 1311bafe5a31SPaolo Pisati CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__, 1312bafe5a31SPaolo Pisati ih->ih_filter, ih->ih_handler, arg, ih->ih_name); 1313bafe5a31SPaolo Pisati 1314bafe5a31SPaolo Pisati if (ih->ih_filter != NULL) 1315bafe5a31SPaolo Pisati ret = ih->ih_filter(arg); 1316bafe5a31SPaolo Pisati else { 1317bafe5a31SPaolo Pisati thread_only = 1; 1318bafe5a31SPaolo Pisati continue; 1319bafe5a31SPaolo Pisati } 1320bafe5a31SPaolo Pisati 1321bafe5a31SPaolo Pisati if (ret & FILTER_STRAY) 1322bafe5a31SPaolo Pisati continue; 1323bafe5a31SPaolo Pisati else { 1324bafe5a31SPaolo Pisati *ithd = ih->ih_thread; 1325bafe5a31SPaolo Pisati return (ret); 1326bafe5a31SPaolo Pisati } 1327bafe5a31SPaolo Pisati } 1328bafe5a31SPaolo Pisati 1329bafe5a31SPaolo Pisati /* 1330bafe5a31SPaolo Pisati * No filters handled the interrupt and we have at least 1331bafe5a31SPaolo Pisati * one handler without a filter. In this case, we schedule 1332bafe5a31SPaolo Pisati * all of the filter-less handlers to run in the ithread. 1333bafe5a31SPaolo Pisati */ 1334bafe5a31SPaolo Pisati if (thread_only) { 1335bafe5a31SPaolo Pisati *ithd = ie->ie_thread; 1336bafe5a31SPaolo Pisati return (FILTER_SCHEDULE_THREAD); 1337bafe5a31SPaolo Pisati } 1338bafe5a31SPaolo Pisati return (FILTER_STRAY); 1339bafe5a31SPaolo Pisati } 1340bafe5a31SPaolo Pisati 1341bafe5a31SPaolo Pisati /* 1342bafe5a31SPaolo Pisati * Main interrupt handling body. 1343bafe5a31SPaolo Pisati * 1344bafe5a31SPaolo Pisati * Input: 1345bafe5a31SPaolo Pisati * o ie: the event connected to this interrupt. 1346bafe5a31SPaolo Pisati * o frame: some archs (i.e. i386) pass a frame to some. 1347bafe5a31SPaolo Pisati * handlers as their main argument. 1348bafe5a31SPaolo Pisati * Return value: 1349bafe5a31SPaolo Pisati * o 0: everything ok. 1350bafe5a31SPaolo Pisati * o EINVAL: stray interrupt. 1351bafe5a31SPaolo Pisati */ 1352bafe5a31SPaolo Pisati int 1353bafe5a31SPaolo Pisati intr_event_handle(struct intr_event *ie, struct trapframe *frame) 1354bafe5a31SPaolo Pisati { 1355bafe5a31SPaolo Pisati struct intr_thread *ithd; 1356bafe5a31SPaolo Pisati struct thread *td; 1357bafe5a31SPaolo Pisati int thread; 1358bafe5a31SPaolo Pisati 1359bafe5a31SPaolo Pisati ithd = NULL; 1360bafe5a31SPaolo Pisati td = curthread; 1361bafe5a31SPaolo Pisati 1362bafe5a31SPaolo Pisati if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) 1363bafe5a31SPaolo Pisati return (EINVAL); 1364bafe5a31SPaolo Pisati 1365bafe5a31SPaolo Pisati td->td_intr_nesting_level++; 1366bafe5a31SPaolo Pisati thread = 0; 1367bafe5a31SPaolo Pisati critical_enter(); 1368bafe5a31SPaolo Pisati thread = intr_filter_loop(ie, frame, &ithd); 1369bafe5a31SPaolo Pisati 1370bafe5a31SPaolo Pisati /* 1371bafe5a31SPaolo Pisati * If the interrupt was fully served, send it an EOI but leave 1372bafe5a31SPaolo Pisati * it unmasked. Otherwise, mask the source as well as sending 1373bafe5a31SPaolo Pisati * it an EOI. 1374bafe5a31SPaolo Pisati */ 1375bafe5a31SPaolo Pisati if (thread & FILTER_HANDLED) { 1376bafe5a31SPaolo Pisati if (ie->ie_eoi != NULL) 1377bafe5a31SPaolo Pisati ie->ie_eoi(ie->ie_source); 1378bafe5a31SPaolo Pisati } else { 13796d2d1c04SJohn Baldwin if (ie->ie_disable != NULL) 13806d2d1c04SJohn Baldwin ie->ie_disable(ie->ie_source); 1381bafe5a31SPaolo Pisati } 1382bafe5a31SPaolo Pisati critical_exit(); 1383bafe5a31SPaolo Pisati 1384bafe5a31SPaolo Pisati /* Interrupt storm logic */ 1385bafe5a31SPaolo Pisati if (thread & FILTER_STRAY) { 1386bafe5a31SPaolo Pisati ie->ie_count++; 1387bafe5a31SPaolo Pisati if (ie->ie_count < intr_storm_threshold) 1388bafe5a31SPaolo Pisati printf("Interrupt stray detection not present\n"); 1389bafe5a31SPaolo Pisati } 1390bafe5a31SPaolo Pisati 1391bafe5a31SPaolo Pisati /* Schedule an ithread if needed. */ 1392bafe5a31SPaolo Pisati if (thread & FILTER_SCHEDULE_THREAD) { 1393bafe5a31SPaolo Pisati if (intr_event_schedule_thread(ie, ithd) != 0) 1394bafe5a31SPaolo Pisati panic("%s: impossible stray interrupt", __func__); 1395bafe5a31SPaolo Pisati } 1396bafe5a31SPaolo Pisati td->td_intr_nesting_level--; 1397bafe5a31SPaolo Pisati return (0); 1398bafe5a31SPaolo Pisati } 1399bafe5a31SPaolo Pisati #endif 14001931cf94SJohn Baldwin 14018b201c42SJohn Baldwin #ifdef DDB 14028b201c42SJohn Baldwin /* 14038b201c42SJohn Baldwin * Dump details about an interrupt handler 14048b201c42SJohn Baldwin */ 14058b201c42SJohn Baldwin static void 1406e0f66ef8SJohn Baldwin db_dump_intrhand(struct intr_handler *ih) 14078b201c42SJohn Baldwin { 14088b201c42SJohn Baldwin int comma; 14098b201c42SJohn Baldwin 14108b201c42SJohn Baldwin db_printf("\t%-10s ", ih->ih_name); 14118b201c42SJohn Baldwin switch (ih->ih_pri) { 14128b201c42SJohn Baldwin case PI_REALTIME: 14138b201c42SJohn Baldwin db_printf("CLK "); 14148b201c42SJohn Baldwin break; 14158b201c42SJohn Baldwin case PI_AV: 14168b201c42SJohn Baldwin db_printf("AV "); 14178b201c42SJohn Baldwin break; 14188b201c42SJohn Baldwin case PI_TTYHIGH: 14198b201c42SJohn Baldwin case PI_TTYLOW: 14208b201c42SJohn Baldwin db_printf("TTY "); 14218b201c42SJohn Baldwin break; 14228b201c42SJohn Baldwin case PI_TAPE: 14238b201c42SJohn Baldwin db_printf("TAPE"); 14248b201c42SJohn Baldwin break; 14258b201c42SJohn Baldwin case PI_NET: 14268b201c42SJohn Baldwin db_printf("NET "); 14278b201c42SJohn Baldwin break; 14288b201c42SJohn Baldwin case PI_DISK: 14298b201c42SJohn Baldwin case PI_DISKLOW: 14308b201c42SJohn Baldwin db_printf("DISK"); 14318b201c42SJohn Baldwin break; 14328b201c42SJohn Baldwin case PI_DULL: 14338b201c42SJohn Baldwin db_printf("DULL"); 14348b201c42SJohn Baldwin break; 14358b201c42SJohn Baldwin default: 14368b201c42SJohn Baldwin if (ih->ih_pri >= PI_SOFT) 14378b201c42SJohn Baldwin db_printf("SWI "); 14388b201c42SJohn Baldwin else 14398b201c42SJohn Baldwin db_printf("%4u", ih->ih_pri); 14408b201c42SJohn Baldwin break; 14418b201c42SJohn Baldwin } 14428b201c42SJohn Baldwin db_printf(" "); 14438b201c42SJohn Baldwin db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC); 14448b201c42SJohn Baldwin db_printf("(%p)", ih->ih_argument); 14458b201c42SJohn Baldwin if (ih->ih_need || 1446ef544f63SPaolo Pisati (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD | 14478b201c42SJohn Baldwin IH_MPSAFE)) != 0) { 14488b201c42SJohn Baldwin db_printf(" {"); 14498b201c42SJohn Baldwin comma = 0; 14508b201c42SJohn Baldwin if (ih->ih_flags & IH_EXCLUSIVE) { 14518b201c42SJohn Baldwin if (comma) 14528b201c42SJohn Baldwin db_printf(", "); 14538b201c42SJohn Baldwin db_printf("EXCL"); 14548b201c42SJohn Baldwin comma = 1; 14558b201c42SJohn Baldwin } 14568b201c42SJohn Baldwin if (ih->ih_flags & IH_ENTROPY) { 14578b201c42SJohn Baldwin if (comma) 14588b201c42SJohn Baldwin db_printf(", "); 14598b201c42SJohn Baldwin db_printf("ENTROPY"); 14608b201c42SJohn Baldwin comma = 1; 14618b201c42SJohn Baldwin } 14628b201c42SJohn Baldwin if (ih->ih_flags & IH_DEAD) { 14638b201c42SJohn Baldwin if (comma) 14648b201c42SJohn Baldwin db_printf(", "); 14658b201c42SJohn Baldwin db_printf("DEAD"); 14668b201c42SJohn Baldwin comma = 1; 14678b201c42SJohn Baldwin } 14688b201c42SJohn Baldwin if (ih->ih_flags & IH_MPSAFE) { 14698b201c42SJohn Baldwin if (comma) 14708b201c42SJohn Baldwin db_printf(", "); 14718b201c42SJohn Baldwin db_printf("MPSAFE"); 14728b201c42SJohn Baldwin comma = 1; 14738b201c42SJohn Baldwin } 14748b201c42SJohn Baldwin if (ih->ih_need) { 14758b201c42SJohn Baldwin if (comma) 14768b201c42SJohn Baldwin db_printf(", "); 14778b201c42SJohn Baldwin db_printf("NEED"); 14788b201c42SJohn Baldwin } 14798b201c42SJohn Baldwin db_printf("}"); 14808b201c42SJohn Baldwin } 14818b201c42SJohn Baldwin db_printf("\n"); 14828b201c42SJohn Baldwin } 14838b201c42SJohn Baldwin 14848b201c42SJohn Baldwin /* 1485e0f66ef8SJohn Baldwin * Dump details about a event. 14868b201c42SJohn Baldwin */ 14878b201c42SJohn Baldwin void 1488e0f66ef8SJohn Baldwin db_dump_intr_event(struct intr_event *ie, int handlers) 14898b201c42SJohn Baldwin { 1490e0f66ef8SJohn Baldwin struct intr_handler *ih; 1491e0f66ef8SJohn Baldwin struct intr_thread *it; 14928b201c42SJohn Baldwin int comma; 14938b201c42SJohn Baldwin 1494e0f66ef8SJohn Baldwin db_printf("%s ", ie->ie_fullname); 1495e0f66ef8SJohn Baldwin it = ie->ie_thread; 1496e0f66ef8SJohn Baldwin if (it != NULL) 1497e0f66ef8SJohn Baldwin db_printf("(pid %d)", it->it_thread->td_proc->p_pid); 1498e0f66ef8SJohn Baldwin else 1499e0f66ef8SJohn Baldwin db_printf("(no thread)"); 1500eaf86d16SJohn Baldwin if (ie->ie_cpu != NOCPU) 1501eaf86d16SJohn Baldwin db_printf(" (CPU %d)", ie->ie_cpu); 1502e0f66ef8SJohn Baldwin if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 || 1503e0f66ef8SJohn Baldwin (it != NULL && it->it_need)) { 15048b201c42SJohn Baldwin db_printf(" {"); 15058b201c42SJohn Baldwin comma = 0; 1506e0f66ef8SJohn Baldwin if (ie->ie_flags & IE_SOFT) { 15078b201c42SJohn Baldwin db_printf("SOFT"); 15088b201c42SJohn Baldwin comma = 1; 15098b201c42SJohn Baldwin } 1510e0f66ef8SJohn Baldwin if (ie->ie_flags & IE_ENTROPY) { 15118b201c42SJohn Baldwin if (comma) 15128b201c42SJohn Baldwin db_printf(", "); 15138b201c42SJohn Baldwin db_printf("ENTROPY"); 15148b201c42SJohn Baldwin comma = 1; 15158b201c42SJohn Baldwin } 1516e0f66ef8SJohn Baldwin if (ie->ie_flags & IE_ADDING_THREAD) { 15178b201c42SJohn Baldwin if (comma) 15188b201c42SJohn Baldwin db_printf(", "); 1519e0f66ef8SJohn Baldwin db_printf("ADDING_THREAD"); 15208b201c42SJohn Baldwin comma = 1; 15218b201c42SJohn Baldwin } 1522e0f66ef8SJohn Baldwin if (it != NULL && it->it_need) { 15238b201c42SJohn Baldwin if (comma) 15248b201c42SJohn Baldwin db_printf(", "); 15258b201c42SJohn Baldwin db_printf("NEED"); 15268b201c42SJohn Baldwin } 15278b201c42SJohn Baldwin db_printf("}"); 15288b201c42SJohn Baldwin } 15298b201c42SJohn Baldwin db_printf("\n"); 15308b201c42SJohn Baldwin 15318b201c42SJohn Baldwin if (handlers) 1532e0f66ef8SJohn Baldwin TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 15338b201c42SJohn Baldwin db_dump_intrhand(ih); 15348b201c42SJohn Baldwin } 1535e0f66ef8SJohn Baldwin 1536e0f66ef8SJohn Baldwin /* 1537e0f66ef8SJohn Baldwin * Dump data about interrupt handlers 1538e0f66ef8SJohn Baldwin */ 1539e0f66ef8SJohn Baldwin DB_SHOW_COMMAND(intr, db_show_intr) 1540e0f66ef8SJohn Baldwin { 1541e0f66ef8SJohn Baldwin struct intr_event *ie; 154219e9205aSJohn Baldwin int all, verbose; 1543e0f66ef8SJohn Baldwin 1544e0f66ef8SJohn Baldwin verbose = index(modif, 'v') != NULL; 1545e0f66ef8SJohn Baldwin all = index(modif, 'a') != NULL; 1546e0f66ef8SJohn Baldwin TAILQ_FOREACH(ie, &event_list, ie_list) { 1547e0f66ef8SJohn Baldwin if (!all && TAILQ_EMPTY(&ie->ie_handlers)) 1548e0f66ef8SJohn Baldwin continue; 1549e0f66ef8SJohn Baldwin db_dump_intr_event(ie, verbose); 155019e9205aSJohn Baldwin if (db_pager_quit) 155119e9205aSJohn Baldwin break; 1552e0f66ef8SJohn Baldwin } 1553e0f66ef8SJohn Baldwin } 15548b201c42SJohn Baldwin #endif /* DDB */ 15558b201c42SJohn Baldwin 1556b4151f71SJohn Baldwin /* 15578088699fSJohn Baldwin * Start standard software interrupt threads 15581931cf94SJohn Baldwin */ 15591931cf94SJohn Baldwin static void 1560b4151f71SJohn Baldwin start_softintr(void *dummy) 15611931cf94SJohn Baldwin { 1562b4151f71SJohn Baldwin 15638d809d50SJeff Roberson if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih)) 15648d809d50SJeff Roberson panic("died while creating vm swi ithread"); 15651931cf94SJohn Baldwin } 1566237fdd78SRobert Watson SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, 1567237fdd78SRobert Watson NULL); 15681931cf94SJohn Baldwin 1569d279178dSThomas Moestl /* 1570d279178dSThomas Moestl * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 1571d279178dSThomas Moestl * The data for this machine dependent, and the declarations are in machine 1572d279178dSThomas Moestl * dependent code. The layout of intrnames and intrcnt however is machine 1573d279178dSThomas Moestl * independent. 1574d279178dSThomas Moestl * 1575d279178dSThomas Moestl * We do not know the length of intrcnt and intrnames at compile time, so 1576d279178dSThomas Moestl * calculate things at run time. 1577d279178dSThomas Moestl */ 1578d279178dSThomas Moestl static int 1579d279178dSThomas Moestl sysctl_intrnames(SYSCTL_HANDLER_ARGS) 1580d279178dSThomas Moestl { 1581d279178dSThomas Moestl return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames, 1582d279178dSThomas Moestl req)); 1583d279178dSThomas Moestl } 1584d279178dSThomas Moestl 1585d279178dSThomas Moestl SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, 1586d279178dSThomas Moestl NULL, 0, sysctl_intrnames, "", "Interrupt Names"); 1587d279178dSThomas Moestl 1588d279178dSThomas Moestl static int 1589d279178dSThomas Moestl sysctl_intrcnt(SYSCTL_HANDLER_ARGS) 1590d279178dSThomas Moestl { 1591d279178dSThomas Moestl return (sysctl_handle_opaque(oidp, intrcnt, 1592d279178dSThomas Moestl (char *)eintrcnt - (char *)intrcnt, req)); 1593d279178dSThomas Moestl } 1594d279178dSThomas Moestl 1595d279178dSThomas Moestl SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, 1596d279178dSThomas Moestl NULL, 0, sysctl_intrcnt, "", "Interrupt Counts"); 15978b201c42SJohn Baldwin 15988b201c42SJohn Baldwin #ifdef DDB 15998b201c42SJohn Baldwin /* 16008b201c42SJohn Baldwin * DDB command to dump the interrupt statistics. 16018b201c42SJohn Baldwin */ 16028b201c42SJohn Baldwin DB_SHOW_COMMAND(intrcnt, db_show_intrcnt) 16038b201c42SJohn Baldwin { 16048b201c42SJohn Baldwin u_long *i; 16058b201c42SJohn Baldwin char *cp; 16068b201c42SJohn Baldwin 16078b201c42SJohn Baldwin cp = intrnames; 160819e9205aSJohn Baldwin for (i = intrcnt; i != eintrcnt && !db_pager_quit; i++) { 16098b201c42SJohn Baldwin if (*cp == '\0') 16108b201c42SJohn Baldwin break; 16118b201c42SJohn Baldwin if (*i != 0) 16128b201c42SJohn Baldwin db_printf("%s\t%lu\n", cp, *i); 16138b201c42SJohn Baldwin cp += strlen(cp) + 1; 16148b201c42SJohn Baldwin } 16158b201c42SJohn Baldwin } 16168b201c42SJohn Baldwin #endif 1617