19454b2d8SWarner Losh /*- 28a36da99SPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 38a36da99SPedro F. Giffuni * 4425f9fdaSStefan Eßer * Copyright (c) 1997, Stefan Esser <se@freebsd.org> 5425f9fdaSStefan Eßer * All rights reserved. 6425f9fdaSStefan Eßer * 7425f9fdaSStefan Eßer * Redistribution and use in source and binary forms, with or without 8425f9fdaSStefan Eßer * modification, are permitted provided that the following conditions 9425f9fdaSStefan Eßer * are met: 10425f9fdaSStefan Eßer * 1. Redistributions of source code must retain the above copyright 11425f9fdaSStefan Eßer * notice unmodified, this list of conditions, and the following 12425f9fdaSStefan Eßer * disclaimer. 13425f9fdaSStefan Eßer * 2. Redistributions in binary form must reproduce the above copyright 14425f9fdaSStefan Eßer * notice, this list of conditions and the following disclaimer in the 15425f9fdaSStefan Eßer * documentation and/or other materials provided with the distribution. 16425f9fdaSStefan Eßer * 17425f9fdaSStefan Eßer * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18425f9fdaSStefan Eßer * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19425f9fdaSStefan Eßer * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20425f9fdaSStefan Eßer * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21425f9fdaSStefan Eßer * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22425f9fdaSStefan Eßer * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23425f9fdaSStefan Eßer * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24425f9fdaSStefan Eßer * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25425f9fdaSStefan Eßer * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26425f9fdaSStefan Eßer * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27425f9fdaSStefan Eßer */ 28425f9fdaSStefan Eßer 29677b542eSDavid E. O'Brien #include <sys/cdefs.h> 30677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 313900ddb2SDoug Rabson 328b201c42SJohn Baldwin #include "opt_ddb.h" 33b7627840SKonstantin Belousov #include "opt_kstack_usage_prof.h" 348b201c42SJohn Baldwin 351c5bb3eaSPeter Wemm #include <sys/param.h> 369a94c9c5SJohn Baldwin #include <sys/bus.h> 37c11110eaSAlfred Perlstein #include <sys/conf.h> 389b33b154SJeff Roberson #include <sys/cpuset.h> 399a94c9c5SJohn Baldwin #include <sys/rtprio.h> 40425f9fdaSStefan Eßer #include <sys/systm.h> 4168352337SDoug Rabson #include <sys/interrupt.h> 421931cf94SJohn Baldwin #include <sys/kernel.h> 431931cf94SJohn Baldwin #include <sys/kthread.h> 441931cf94SJohn Baldwin #include <sys/ktr.h> 4505b2c96fSBruce Evans #include <sys/limits.h> 46f34fa851SJohn Baldwin #include <sys/lock.h> 471931cf94SJohn Baldwin #include <sys/malloc.h> 4835e0e5b3SJohn Baldwin #include <sys/mutex.h> 49cebc7fb1SJohn Baldwin #include <sys/priv.h> 501931cf94SJohn Baldwin #include <sys/proc.h> 51511d1afbSGleb Smirnoff #include <sys/epoch.h> 523e5da754SJohn Baldwin #include <sys/random.h> 53b4151f71SJohn Baldwin #include <sys/resourcevar.h> 5463710c4dSJohn Baldwin #include <sys/sched.h> 55eaf86d16SJohn Baldwin #include <sys/smp.h> 56d279178dSThomas Moestl #include <sys/sysctl.h> 576205924aSKip Macy #include <sys/syslog.h> 581931cf94SJohn Baldwin #include <sys/unistd.h> 591931cf94SJohn Baldwin #include <sys/vmmeter.h> 601931cf94SJohn Baldwin #include <machine/atomic.h> 611931cf94SJohn Baldwin #include <machine/cpu.h> 628088699fSJohn Baldwin #include <machine/md_var.h> 63aba10e13SAlexander Motin #include <machine/smp.h> 64b4151f71SJohn Baldwin #include <machine/stdarg.h> 658b201c42SJohn Baldwin #ifdef DDB 668b201c42SJohn Baldwin #include <ddb/ddb.h> 678b201c42SJohn Baldwin #include <ddb/db_sym.h> 688b201c42SJohn Baldwin #endif 69425f9fdaSStefan Eßer 70e0f66ef8SJohn Baldwin /* 71e0f66ef8SJohn Baldwin * Describe an interrupt thread. There is one of these per interrupt event. 72e0f66ef8SJohn Baldwin */ 73e0f66ef8SJohn Baldwin struct intr_thread { 74e0f66ef8SJohn Baldwin struct intr_event *it_event; 75e0f66ef8SJohn Baldwin struct thread *it_thread; /* Kernel thread. */ 76e0f66ef8SJohn Baldwin int it_flags; /* (j) IT_* flags. */ 77e0f66ef8SJohn Baldwin int it_need; /* Needs service. */ 783e5da754SJohn Baldwin }; 793e5da754SJohn Baldwin 80e0f66ef8SJohn Baldwin /* Interrupt thread flags kept in it_flags */ 81e0f66ef8SJohn Baldwin #define IT_DEAD 0x000001 /* Thread is waiting to exit. */ 82e4cd31ddSJeff Roberson #define IT_WAIT 0x000002 /* Thread is waiting for completion. */ 83e0f66ef8SJohn Baldwin 84e0f66ef8SJohn Baldwin struct intr_entropy { 85e0f66ef8SJohn Baldwin struct thread *td; 86e0f66ef8SJohn Baldwin uintptr_t event; 87e0f66ef8SJohn Baldwin }; 88e0f66ef8SJohn Baldwin 89aba10e13SAlexander Motin struct intr_event *clk_intr_event; 90e0f66ef8SJohn Baldwin struct intr_event *tty_intr_event; 917b1fe905SBruce Evans void *vm_ih; 927ab24ea3SJulian Elischer struct proc *intrproc; 931931cf94SJohn Baldwin 94b4151f71SJohn Baldwin static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); 95b4151f71SJohn Baldwin 965d0e8299SConrad Meyer static int intr_storm_threshold = 0; 97af3b2549SHans Petter Selasky SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RWTUN, 987870c3c6SJohn Baldwin &intr_storm_threshold, 0, 997b1fe905SBruce Evans "Number of consecutive interrupts before storm protection is enabled"); 100511d1afbSGleb Smirnoff static int intr_epoch_batch = 1000; 101511d1afbSGleb Smirnoff SYSCTL_INT(_hw, OID_AUTO, intr_epoch_batch, CTLFLAG_RWTUN, &intr_epoch_batch, 102511d1afbSGleb Smirnoff 0, "Maximum interrupt handler executions without re-entering epoch(9)"); 103e0f66ef8SJohn Baldwin static TAILQ_HEAD(, intr_event) event_list = 104e0f66ef8SJohn Baldwin TAILQ_HEAD_INITIALIZER(event_list); 1059b33b154SJeff Roberson static struct mtx event_lock; 1069b33b154SJeff Roberson MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF); 1077b1fe905SBruce Evans 108e0f66ef8SJohn Baldwin static void intr_event_update(struct intr_event *ie); 1091ee1b687SJohn Baldwin static int intr_event_schedule_thread(struct intr_event *ie); 110e0f66ef8SJohn Baldwin static struct intr_thread *ithread_create(const char *name); 111e0f66ef8SJohn Baldwin static void ithread_destroy(struct intr_thread *ithread); 112bafe5a31SPaolo Pisati static void ithread_execute_handlers(struct proc *p, 113bafe5a31SPaolo Pisati struct intr_event *ie); 1147b1fe905SBruce Evans static void ithread_loop(void *); 115e0f66ef8SJohn Baldwin static void ithread_update(struct intr_thread *ithd); 1167b1fe905SBruce Evans static void start_softintr(void *); 1177870c3c6SJohn Baldwin 118bc17acb2SJohn Baldwin /* Map an interrupt type to an ithread priority. */ 119b4151f71SJohn Baldwin u_char 120e0f66ef8SJohn Baldwin intr_priority(enum intr_type flags) 1219a94c9c5SJohn Baldwin { 122b4151f71SJohn Baldwin u_char pri; 1239a94c9c5SJohn Baldwin 124b4151f71SJohn Baldwin flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET | 1255a280d9cSPeter Wemm INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV); 1269a94c9c5SJohn Baldwin switch (flags) { 127b4151f71SJohn Baldwin case INTR_TYPE_TTY: 128d3305205SJohn Baldwin pri = PI_TTY; 1299a94c9c5SJohn Baldwin break; 1309a94c9c5SJohn Baldwin case INTR_TYPE_BIO: 1319a94c9c5SJohn Baldwin pri = PI_DISK; 1329a94c9c5SJohn Baldwin break; 1339a94c9c5SJohn Baldwin case INTR_TYPE_NET: 1349a94c9c5SJohn Baldwin pri = PI_NET; 1359a94c9c5SJohn Baldwin break; 1369a94c9c5SJohn Baldwin case INTR_TYPE_CAM: 137d3305205SJohn Baldwin pri = PI_DISK; 1389a94c9c5SJohn Baldwin break; 139d3305205SJohn Baldwin case INTR_TYPE_AV: 1405a280d9cSPeter Wemm pri = PI_AV; 1415a280d9cSPeter Wemm break; 142b4151f71SJohn Baldwin case INTR_TYPE_CLK: 143b4151f71SJohn Baldwin pri = PI_REALTIME; 144b4151f71SJohn Baldwin break; 1459a94c9c5SJohn Baldwin case INTR_TYPE_MISC: 1469a94c9c5SJohn Baldwin pri = PI_DULL; /* don't care */ 1479a94c9c5SJohn Baldwin break; 1489a94c9c5SJohn Baldwin default: 149b4151f71SJohn Baldwin /* We didn't specify an interrupt level. */ 150e0f66ef8SJohn Baldwin panic("intr_priority: no interrupt type in flags"); 1519a94c9c5SJohn Baldwin } 1529a94c9c5SJohn Baldwin 1539a94c9c5SJohn Baldwin return pri; 1549a94c9c5SJohn Baldwin } 1559a94c9c5SJohn Baldwin 156b4151f71SJohn Baldwin /* 157e0f66ef8SJohn Baldwin * Update an ithread based on the associated intr_event. 158b4151f71SJohn Baldwin */ 159b4151f71SJohn Baldwin static void 160e0f66ef8SJohn Baldwin ithread_update(struct intr_thread *ithd) 161b4151f71SJohn Baldwin { 162e0f66ef8SJohn Baldwin struct intr_event *ie; 163b40ce416SJulian Elischer struct thread *td; 164e0f66ef8SJohn Baldwin u_char pri; 1658088699fSJohn Baldwin 166e0f66ef8SJohn Baldwin ie = ithd->it_event; 167e0f66ef8SJohn Baldwin td = ithd->it_thread; 168111b043cSAndriy Gapon mtx_assert(&ie->ie_lock, MA_OWNED); 169b4151f71SJohn Baldwin 170e0f66ef8SJohn Baldwin /* Determine the overall priority of this event. */ 171111b043cSAndriy Gapon if (CK_SLIST_EMPTY(&ie->ie_handlers)) 172e0f66ef8SJohn Baldwin pri = PRI_MAX_ITHD; 173e0f66ef8SJohn Baldwin else 174111b043cSAndriy Gapon pri = CK_SLIST_FIRST(&ie->ie_handlers)->ih_pri; 175e80fb434SRobert Drehmel 176e0f66ef8SJohn Baldwin /* Update name and priority. */ 1777ab24ea3SJulian Elischer strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name)); 17844ad5475SJohn Baldwin #ifdef KTR 17944ad5475SJohn Baldwin sched_clear_tdname(td); 18044ad5475SJohn Baldwin #endif 181982d11f8SJeff Roberson thread_lock(td); 182e0f66ef8SJohn Baldwin sched_prio(td, pri); 183982d11f8SJeff Roberson thread_unlock(td); 184b4151f71SJohn Baldwin } 185e0f66ef8SJohn Baldwin 186e0f66ef8SJohn Baldwin /* 187e0f66ef8SJohn Baldwin * Regenerate the full name of an interrupt event and update its priority. 188e0f66ef8SJohn Baldwin */ 189e0f66ef8SJohn Baldwin static void 190e0f66ef8SJohn Baldwin intr_event_update(struct intr_event *ie) 191e0f66ef8SJohn Baldwin { 192e0f66ef8SJohn Baldwin struct intr_handler *ih; 193e0f66ef8SJohn Baldwin char *last; 194f912e8f2SHans Petter Selasky int missed, space, flags; 195e0f66ef8SJohn Baldwin 196e0f66ef8SJohn Baldwin /* Start off with no entropy and just the name of the event. */ 197e0f66ef8SJohn Baldwin mtx_assert(&ie->ie_lock, MA_OWNED); 198e0f66ef8SJohn Baldwin strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 199f912e8f2SHans Petter Selasky flags = 0; 2000811d60aSJohn Baldwin missed = 0; 201e0f66ef8SJohn Baldwin space = 1; 202e0f66ef8SJohn Baldwin 203e0f66ef8SJohn Baldwin /* Run through all the handlers updating values. */ 204111b043cSAndriy Gapon CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) { 2056d51c0feSIan Lepore if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < 206e0f66ef8SJohn Baldwin sizeof(ie->ie_fullname)) { 207e0f66ef8SJohn Baldwin strcat(ie->ie_fullname, " "); 208e0f66ef8SJohn Baldwin strcat(ie->ie_fullname, ih->ih_name); 209e0f66ef8SJohn Baldwin space = 0; 2100811d60aSJohn Baldwin } else 2110811d60aSJohn Baldwin missed++; 212f912e8f2SHans Petter Selasky flags |= ih->ih_flags; 2130811d60aSJohn Baldwin } 214f912e8f2SHans Petter Selasky ie->ie_hflags = flags; 215e0f66ef8SJohn Baldwin 216e0f66ef8SJohn Baldwin /* 21767da50a0SIan Lepore * If there is only one handler and its name is too long, just copy in 21867da50a0SIan Lepore * as much of the end of the name (includes the unit number) as will 21967da50a0SIan Lepore * fit. Otherwise, we have multiple handlers and not all of the names 22067da50a0SIan Lepore * will fit. Add +'s to indicate missing names. If we run out of room 22167da50a0SIan Lepore * and still have +'s to add, change the last character from a + to a *. 222e0f66ef8SJohn Baldwin */ 22367da50a0SIan Lepore if (missed == 1 && space == 1) { 22467da50a0SIan Lepore ih = CK_SLIST_FIRST(&ie->ie_handlers); 22567da50a0SIan Lepore missed = strlen(ie->ie_fullname) + strlen(ih->ih_name) + 2 - 22667da50a0SIan Lepore sizeof(ie->ie_fullname); 22767da50a0SIan Lepore strcat(ie->ie_fullname, (missed == 0) ? " " : "-"); 22867da50a0SIan Lepore strcat(ie->ie_fullname, &ih->ih_name[missed]); 22967da50a0SIan Lepore missed = 0; 23067da50a0SIan Lepore } 231e0f66ef8SJohn Baldwin last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2]; 2320811d60aSJohn Baldwin while (missed-- > 0) { 233e0f66ef8SJohn Baldwin if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) { 234e0f66ef8SJohn Baldwin if (*last == '+') { 235e0f66ef8SJohn Baldwin *last = '*'; 236e0f66ef8SJohn Baldwin break; 237b4151f71SJohn Baldwin } else 238e0f66ef8SJohn Baldwin *last = '+'; 239e0f66ef8SJohn Baldwin } else if (space) { 240e0f66ef8SJohn Baldwin strcat(ie->ie_fullname, " +"); 241e0f66ef8SJohn Baldwin space = 0; 242e0f66ef8SJohn Baldwin } else 243e0f66ef8SJohn Baldwin strcat(ie->ie_fullname, "+"); 244b4151f71SJohn Baldwin } 245e0f66ef8SJohn Baldwin 246e0f66ef8SJohn Baldwin /* 247e0f66ef8SJohn Baldwin * If this event has an ithread, update it's priority and 248e0f66ef8SJohn Baldwin * name. 249e0f66ef8SJohn Baldwin */ 250e0f66ef8SJohn Baldwin if (ie->ie_thread != NULL) 251e0f66ef8SJohn Baldwin ithread_update(ie->ie_thread); 252e0f66ef8SJohn Baldwin CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname); 253b4151f71SJohn Baldwin } 254b4151f71SJohn Baldwin 255b4151f71SJohn Baldwin int 2569b33b154SJeff Roberson intr_event_create(struct intr_event **event, void *source, int flags, int irq, 2571ee1b687SJohn Baldwin void (*pre_ithread)(void *), void (*post_ithread)(void *), 258066da805SAdrian Chadd void (*post_filter)(void *), int (*assign_cpu)(void *, int), 2591ee1b687SJohn Baldwin const char *fmt, ...) 260bafe5a31SPaolo Pisati { 261bafe5a31SPaolo Pisati struct intr_event *ie; 262bafe5a31SPaolo Pisati va_list ap; 263bafe5a31SPaolo Pisati 264bafe5a31SPaolo Pisati /* The only valid flag during creation is IE_SOFT. */ 265bafe5a31SPaolo Pisati if ((flags & ~IE_SOFT) != 0) 266bafe5a31SPaolo Pisati return (EINVAL); 267bafe5a31SPaolo Pisati ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 268bafe5a31SPaolo Pisati ie->ie_source = source; 2691ee1b687SJohn Baldwin ie->ie_pre_ithread = pre_ithread; 2701ee1b687SJohn Baldwin ie->ie_post_ithread = post_ithread; 2711ee1b687SJohn Baldwin ie->ie_post_filter = post_filter; 2726d2d1c04SJohn Baldwin ie->ie_assign_cpu = assign_cpu; 273bafe5a31SPaolo Pisati ie->ie_flags = flags; 2749b33b154SJeff Roberson ie->ie_irq = irq; 275eaf86d16SJohn Baldwin ie->ie_cpu = NOCPU; 276111b043cSAndriy Gapon CK_SLIST_INIT(&ie->ie_handlers); 277bafe5a31SPaolo Pisati mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 278bafe5a31SPaolo Pisati 279bafe5a31SPaolo Pisati va_start(ap, fmt); 280bafe5a31SPaolo Pisati vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 281bafe5a31SPaolo Pisati va_end(ap); 282bafe5a31SPaolo Pisati strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 2839b33b154SJeff Roberson mtx_lock(&event_lock); 284bafe5a31SPaolo Pisati TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 2859b33b154SJeff Roberson mtx_unlock(&event_lock); 286bafe5a31SPaolo Pisati if (event != NULL) 287bafe5a31SPaolo Pisati *event = ie; 288bafe5a31SPaolo Pisati CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 289bafe5a31SPaolo Pisati return (0); 290bafe5a31SPaolo Pisati } 291b4151f71SJohn Baldwin 292eaf86d16SJohn Baldwin /* 293eaf86d16SJohn Baldwin * Bind an interrupt event to the specified CPU. Note that not all 294eaf86d16SJohn Baldwin * platforms support binding an interrupt to a CPU. For those 29529dfb631SConrad Meyer * platforms this request will fail. Using a cpu id of NOCPU unbinds 296eaf86d16SJohn Baldwin * the interrupt event. 297eaf86d16SJohn Baldwin */ 29829dfb631SConrad Meyer static int 29929dfb631SConrad Meyer _intr_event_bind(struct intr_event *ie, int cpu, bool bindirq, bool bindithread) 300eaf86d16SJohn Baldwin { 3019b33b154SJeff Roberson lwpid_t id; 302eaf86d16SJohn Baldwin int error; 303eaf86d16SJohn Baldwin 304eaf86d16SJohn Baldwin /* Need a CPU to bind to. */ 305eaf86d16SJohn Baldwin if (cpu != NOCPU && CPU_ABSENT(cpu)) 306eaf86d16SJohn Baldwin return (EINVAL); 307eaf86d16SJohn Baldwin 308eaf86d16SJohn Baldwin if (ie->ie_assign_cpu == NULL) 309eaf86d16SJohn Baldwin return (EOPNOTSUPP); 310cebc7fb1SJohn Baldwin 311cebc7fb1SJohn Baldwin error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR); 312cebc7fb1SJohn Baldwin if (error) 313cebc7fb1SJohn Baldwin return (error); 314cebc7fb1SJohn Baldwin 3159b33b154SJeff Roberson /* 316cebc7fb1SJohn Baldwin * If we have any ithreads try to set their mask first to verify 317cebc7fb1SJohn Baldwin * permissions, etc. 3189b33b154SJeff Roberson */ 31929dfb631SConrad Meyer if (bindithread) { 320eaf86d16SJohn Baldwin mtx_lock(&ie->ie_lock); 3219b33b154SJeff Roberson if (ie->ie_thread != NULL) { 3229b33b154SJeff Roberson id = ie->ie_thread->it_thread->td_tid; 323eaf86d16SJohn Baldwin mtx_unlock(&ie->ie_lock); 32481198539SAlexander V. Chernikov error = cpuset_setithread(id, cpu); 3259b33b154SJeff Roberson if (error) 3269b33b154SJeff Roberson return (error); 3279b33b154SJeff Roberson } else 328eaf86d16SJohn Baldwin mtx_unlock(&ie->ie_lock); 32929dfb631SConrad Meyer } 33029dfb631SConrad Meyer if (bindirq) 331eaf86d16SJohn Baldwin error = ie->ie_assign_cpu(ie->ie_source, cpu); 332cebc7fb1SJohn Baldwin if (error) { 33329dfb631SConrad Meyer if (bindithread) { 334cebc7fb1SJohn Baldwin mtx_lock(&ie->ie_lock); 335cebc7fb1SJohn Baldwin if (ie->ie_thread != NULL) { 33681198539SAlexander V. Chernikov cpu = ie->ie_cpu; 337cebc7fb1SJohn Baldwin id = ie->ie_thread->it_thread->td_tid; 338cebc7fb1SJohn Baldwin mtx_unlock(&ie->ie_lock); 33981198539SAlexander V. Chernikov (void)cpuset_setithread(id, cpu); 340cebc7fb1SJohn Baldwin } else 341cebc7fb1SJohn Baldwin mtx_unlock(&ie->ie_lock); 34229dfb631SConrad Meyer } 343eaf86d16SJohn Baldwin return (error); 344cebc7fb1SJohn Baldwin } 345cebc7fb1SJohn Baldwin 34629dfb631SConrad Meyer if (bindirq) { 347eaf86d16SJohn Baldwin mtx_lock(&ie->ie_lock); 348eaf86d16SJohn Baldwin ie->ie_cpu = cpu; 3499b33b154SJeff Roberson mtx_unlock(&ie->ie_lock); 35029dfb631SConrad Meyer } 3519b33b154SJeff Roberson 3529b33b154SJeff Roberson return (error); 3539b33b154SJeff Roberson } 3549b33b154SJeff Roberson 35529dfb631SConrad Meyer /* 35629dfb631SConrad Meyer * Bind an interrupt event to the specified CPU. For supported platforms, any 35729dfb631SConrad Meyer * associated ithreads as well as the primary interrupt context will be bound 35829dfb631SConrad Meyer * to the specificed CPU. 35929dfb631SConrad Meyer */ 36029dfb631SConrad Meyer int 36129dfb631SConrad Meyer intr_event_bind(struct intr_event *ie, int cpu) 36229dfb631SConrad Meyer { 36329dfb631SConrad Meyer 36429dfb631SConrad Meyer return (_intr_event_bind(ie, cpu, true, true)); 36529dfb631SConrad Meyer } 36629dfb631SConrad Meyer 36729dfb631SConrad Meyer /* 36829dfb631SConrad Meyer * Bind an interrupt event to the specified CPU, but do not bind associated 36929dfb631SConrad Meyer * ithreads. 37029dfb631SConrad Meyer */ 37129dfb631SConrad Meyer int 37229dfb631SConrad Meyer intr_event_bind_irqonly(struct intr_event *ie, int cpu) 37329dfb631SConrad Meyer { 37429dfb631SConrad Meyer 37529dfb631SConrad Meyer return (_intr_event_bind(ie, cpu, true, false)); 37629dfb631SConrad Meyer } 37729dfb631SConrad Meyer 37829dfb631SConrad Meyer /* 37929dfb631SConrad Meyer * Bind an interrupt event's ithread to the specified CPU. 38029dfb631SConrad Meyer */ 38129dfb631SConrad Meyer int 38229dfb631SConrad Meyer intr_event_bind_ithread(struct intr_event *ie, int cpu) 38329dfb631SConrad Meyer { 38429dfb631SConrad Meyer 38529dfb631SConrad Meyer return (_intr_event_bind(ie, cpu, false, true)); 38629dfb631SConrad Meyer } 38729dfb631SConrad Meyer 3884e255d74SAndrew Gallatin /* 3894e255d74SAndrew Gallatin * Bind an interrupt event's ithread to the specified cpuset. 3904e255d74SAndrew Gallatin */ 3914e255d74SAndrew Gallatin int 3924e255d74SAndrew Gallatin intr_event_bind_ithread_cpuset(struct intr_event *ie, cpuset_t *cs) 3934e255d74SAndrew Gallatin { 3944e255d74SAndrew Gallatin lwpid_t id; 3954e255d74SAndrew Gallatin 3964e255d74SAndrew Gallatin mtx_lock(&ie->ie_lock); 3974e255d74SAndrew Gallatin if (ie->ie_thread != NULL) { 3984e255d74SAndrew Gallatin id = ie->ie_thread->it_thread->td_tid; 3994e255d74SAndrew Gallatin mtx_unlock(&ie->ie_lock); 4004e255d74SAndrew Gallatin return (cpuset_setthread(id, cs)); 4014e255d74SAndrew Gallatin } else { 4024e255d74SAndrew Gallatin mtx_unlock(&ie->ie_lock); 4034e255d74SAndrew Gallatin } 4044e255d74SAndrew Gallatin return (ENODEV); 4054e255d74SAndrew Gallatin } 4064e255d74SAndrew Gallatin 4079b33b154SJeff Roberson static struct intr_event * 4089b33b154SJeff Roberson intr_lookup(int irq) 4099b33b154SJeff Roberson { 4109b33b154SJeff Roberson struct intr_event *ie; 4119b33b154SJeff Roberson 4129b33b154SJeff Roberson mtx_lock(&event_lock); 4139b33b154SJeff Roberson TAILQ_FOREACH(ie, &event_list, ie_list) 4149b33b154SJeff Roberson if (ie->ie_irq == irq && 4159b33b154SJeff Roberson (ie->ie_flags & IE_SOFT) == 0 && 416111b043cSAndriy Gapon CK_SLIST_FIRST(&ie->ie_handlers) != NULL) 4179b33b154SJeff Roberson break; 4189b33b154SJeff Roberson mtx_unlock(&event_lock); 4199b33b154SJeff Roberson return (ie); 4209b33b154SJeff Roberson } 4219b33b154SJeff Roberson 4229b33b154SJeff Roberson int 42329dfb631SConrad Meyer intr_setaffinity(int irq, int mode, void *m) 4249b33b154SJeff Roberson { 4259b33b154SJeff Roberson struct intr_event *ie; 4269b33b154SJeff Roberson cpuset_t *mask; 4273fe93b94SAdrian Chadd int cpu, n; 4289b33b154SJeff Roberson 4299b33b154SJeff Roberson mask = m; 4309b33b154SJeff Roberson cpu = NOCPU; 4319b33b154SJeff Roberson /* 4329b33b154SJeff Roberson * If we're setting all cpus we can unbind. Otherwise make sure 4339b33b154SJeff Roberson * only one cpu is in the set. 4349b33b154SJeff Roberson */ 4359b33b154SJeff Roberson if (CPU_CMP(cpuset_root, mask)) { 4369b33b154SJeff Roberson for (n = 0; n < CPU_SETSIZE; n++) { 4379b33b154SJeff Roberson if (!CPU_ISSET(n, mask)) 4389b33b154SJeff Roberson continue; 4399b33b154SJeff Roberson if (cpu != NOCPU) 4409b33b154SJeff Roberson return (EINVAL); 4413fe93b94SAdrian Chadd cpu = n; 4429b33b154SJeff Roberson } 4439b33b154SJeff Roberson } 4449b33b154SJeff Roberson ie = intr_lookup(irq); 4459b33b154SJeff Roberson if (ie == NULL) 4469b33b154SJeff Roberson return (ESRCH); 44729dfb631SConrad Meyer switch (mode) { 44829dfb631SConrad Meyer case CPU_WHICH_IRQ: 4499bd55acfSJohn Baldwin return (intr_event_bind(ie, cpu)); 45029dfb631SConrad Meyer case CPU_WHICH_INTRHANDLER: 45129dfb631SConrad Meyer return (intr_event_bind_irqonly(ie, cpu)); 45229dfb631SConrad Meyer case CPU_WHICH_ITHREAD: 45329dfb631SConrad Meyer return (intr_event_bind_ithread(ie, cpu)); 45429dfb631SConrad Meyer default: 45529dfb631SConrad Meyer return (EINVAL); 45629dfb631SConrad Meyer } 4579b33b154SJeff Roberson } 4589b33b154SJeff Roberson 4599b33b154SJeff Roberson int 46029dfb631SConrad Meyer intr_getaffinity(int irq, int mode, void *m) 4619b33b154SJeff Roberson { 4629b33b154SJeff Roberson struct intr_event *ie; 46329dfb631SConrad Meyer struct thread *td; 46429dfb631SConrad Meyer struct proc *p; 4659b33b154SJeff Roberson cpuset_t *mask; 46629dfb631SConrad Meyer lwpid_t id; 46729dfb631SConrad Meyer int error; 4689b33b154SJeff Roberson 4699b33b154SJeff Roberson mask = m; 4709b33b154SJeff Roberson ie = intr_lookup(irq); 4719b33b154SJeff Roberson if (ie == NULL) 4729b33b154SJeff Roberson return (ESRCH); 47329dfb631SConrad Meyer 47429dfb631SConrad Meyer error = 0; 4759b33b154SJeff Roberson CPU_ZERO(mask); 47629dfb631SConrad Meyer switch (mode) { 47729dfb631SConrad Meyer case CPU_WHICH_IRQ: 47829dfb631SConrad Meyer case CPU_WHICH_INTRHANDLER: 4799b33b154SJeff Roberson mtx_lock(&ie->ie_lock); 4809b33b154SJeff Roberson if (ie->ie_cpu == NOCPU) 4819b33b154SJeff Roberson CPU_COPY(cpuset_root, mask); 4829b33b154SJeff Roberson else 4839b33b154SJeff Roberson CPU_SET(ie->ie_cpu, mask); 484eaf86d16SJohn Baldwin mtx_unlock(&ie->ie_lock); 48529dfb631SConrad Meyer break; 48629dfb631SConrad Meyer case CPU_WHICH_ITHREAD: 48729dfb631SConrad Meyer mtx_lock(&ie->ie_lock); 48829dfb631SConrad Meyer if (ie->ie_thread == NULL) { 48929dfb631SConrad Meyer mtx_unlock(&ie->ie_lock); 49029dfb631SConrad Meyer CPU_COPY(cpuset_root, mask); 49129dfb631SConrad Meyer } else { 49229dfb631SConrad Meyer id = ie->ie_thread->it_thread->td_tid; 49329dfb631SConrad Meyer mtx_unlock(&ie->ie_lock); 49429dfb631SConrad Meyer error = cpuset_which(CPU_WHICH_TID, id, &p, &td, NULL); 49529dfb631SConrad Meyer if (error != 0) 49629dfb631SConrad Meyer return (error); 49729dfb631SConrad Meyer CPU_COPY(&td->td_cpuset->cs_mask, mask); 49829dfb631SConrad Meyer PROC_UNLOCK(p); 49929dfb631SConrad Meyer } 50029dfb631SConrad Meyer default: 50129dfb631SConrad Meyer return (EINVAL); 50229dfb631SConrad Meyer } 503eaf86d16SJohn Baldwin return (0); 504eaf86d16SJohn Baldwin } 505eaf86d16SJohn Baldwin 506b4151f71SJohn Baldwin int 507e0f66ef8SJohn Baldwin intr_event_destroy(struct intr_event *ie) 508b4151f71SJohn Baldwin { 509b4151f71SJohn Baldwin 5109b33b154SJeff Roberson mtx_lock(&event_lock); 511e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 512111b043cSAndriy Gapon if (!CK_SLIST_EMPTY(&ie->ie_handlers)) { 513e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 5149b33b154SJeff Roberson mtx_unlock(&event_lock); 515e0f66ef8SJohn Baldwin return (EBUSY); 5164d29cb2dSJohn Baldwin } 517e0f66ef8SJohn Baldwin TAILQ_REMOVE(&event_list, ie, ie_list); 5189477358dSJohn Baldwin #ifndef notyet 5199477358dSJohn Baldwin if (ie->ie_thread != NULL) { 5209477358dSJohn Baldwin ithread_destroy(ie->ie_thread); 5219477358dSJohn Baldwin ie->ie_thread = NULL; 5229477358dSJohn Baldwin } 5239477358dSJohn Baldwin #endif 524e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 5259b33b154SJeff Roberson mtx_unlock(&event_lock); 526e0f66ef8SJohn Baldwin mtx_destroy(&ie->ie_lock); 527e0f66ef8SJohn Baldwin free(ie, M_ITHREAD); 528e0f66ef8SJohn Baldwin return (0); 529e0f66ef8SJohn Baldwin } 530e0f66ef8SJohn Baldwin 531e0f66ef8SJohn Baldwin static struct intr_thread * 532e0f66ef8SJohn Baldwin ithread_create(const char *name) 533e0f66ef8SJohn Baldwin { 534e0f66ef8SJohn Baldwin struct intr_thread *ithd; 535e0f66ef8SJohn Baldwin struct thread *td; 536e0f66ef8SJohn Baldwin int error; 537e0f66ef8SJohn Baldwin 538e0f66ef8SJohn Baldwin ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 539e0f66ef8SJohn Baldwin 5407ab24ea3SJulian Elischer error = kproc_kthread_add(ithread_loop, ithd, &intrproc, 5417ab24ea3SJulian Elischer &td, RFSTOPPED | RFHIGHPID, 5429ef95d01SJulian Elischer 0, "intr", "%s", name); 543e0f66ef8SJohn Baldwin if (error) 5443745c395SJulian Elischer panic("kproc_create() failed with %d", error); 545982d11f8SJeff Roberson thread_lock(td); 546ad1e7d28SJulian Elischer sched_class(td, PRI_ITHD); 547e0f66ef8SJohn Baldwin TD_SET_IWAIT(td); 548982d11f8SJeff Roberson thread_unlock(td); 549e0f66ef8SJohn Baldwin td->td_pflags |= TDP_ITHREAD; 550e0f66ef8SJohn Baldwin ithd->it_thread = td; 551e0f66ef8SJohn Baldwin CTR2(KTR_INTR, "%s: created %s", __func__, name); 552e0f66ef8SJohn Baldwin return (ithd); 553e0f66ef8SJohn Baldwin } 554e0f66ef8SJohn Baldwin 555e0f66ef8SJohn Baldwin static void 556e0f66ef8SJohn Baldwin ithread_destroy(struct intr_thread *ithread) 557e0f66ef8SJohn Baldwin { 558e0f66ef8SJohn Baldwin struct thread *td; 559e0f66ef8SJohn Baldwin 560bb141be1SScott Long CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name); 561e0f66ef8SJohn Baldwin td = ithread->it_thread; 562982d11f8SJeff Roberson thread_lock(td); 563e0f66ef8SJohn Baldwin ithread->it_flags |= IT_DEAD; 56471fad9fdSJulian Elischer if (TD_AWAITING_INTR(td)) { 56571fad9fdSJulian Elischer TD_CLR_IWAIT(td); 566f0393f06SJeff Roberson sched_add(td, SRQ_INTR); 56761a74c5cSJeff Roberson } else 568982d11f8SJeff Roberson thread_unlock(td); 569b4151f71SJohn Baldwin } 570b4151f71SJohn Baldwin 571b4151f71SJohn Baldwin int 572e0f66ef8SJohn Baldwin intr_event_add_handler(struct intr_event *ie, const char *name, 573ef544f63SPaolo Pisati driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 574ef544f63SPaolo Pisati enum intr_type flags, void **cookiep) 575b4151f71SJohn Baldwin { 576e0f66ef8SJohn Baldwin struct intr_handler *ih, *temp_ih; 577111b043cSAndriy Gapon struct intr_handler **prevptr; 578e0f66ef8SJohn Baldwin struct intr_thread *it; 579b4151f71SJohn Baldwin 580ef544f63SPaolo Pisati if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 581b4151f71SJohn Baldwin return (EINVAL); 582b4151f71SJohn Baldwin 583e0f66ef8SJohn Baldwin /* Allocate and populate an interrupt handler structure. */ 584e0f66ef8SJohn Baldwin ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 585ef544f63SPaolo Pisati ih->ih_filter = filter; 586b4151f71SJohn Baldwin ih->ih_handler = handler; 587b4151f71SJohn Baldwin ih->ih_argument = arg; 58837b8ef16SJohn Baldwin strlcpy(ih->ih_name, name, sizeof(ih->ih_name)); 589e0f66ef8SJohn Baldwin ih->ih_event = ie; 590b4151f71SJohn Baldwin ih->ih_pri = pri; 591ef544f63SPaolo Pisati if (flags & INTR_EXCL) 592b4151f71SJohn Baldwin ih->ih_flags = IH_EXCLUSIVE; 593b4151f71SJohn Baldwin if (flags & INTR_MPSAFE) 594b4151f71SJohn Baldwin ih->ih_flags |= IH_MPSAFE; 595b4151f71SJohn Baldwin if (flags & INTR_ENTROPY) 596b4151f71SJohn Baldwin ih->ih_flags |= IH_ENTROPY; 597511d1afbSGleb Smirnoff if (flags & INTR_TYPE_NET) 598511d1afbSGleb Smirnoff ih->ih_flags |= IH_NET; 599b4151f71SJohn Baldwin 600e0f66ef8SJohn Baldwin /* We can only have one exclusive handler in a event. */ 601e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 602111b043cSAndriy Gapon if (!CK_SLIST_EMPTY(&ie->ie_handlers)) { 603e0f66ef8SJohn Baldwin if ((flags & INTR_EXCL) || 604111b043cSAndriy Gapon (CK_SLIST_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 605e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 606b4151f71SJohn Baldwin free(ih, M_ITHREAD); 607b4151f71SJohn Baldwin return (EINVAL); 608b4151f71SJohn Baldwin } 609e0f66ef8SJohn Baldwin } 610e0f66ef8SJohn Baldwin 611e0f66ef8SJohn Baldwin /* Create a thread if we need one. */ 612ef544f63SPaolo Pisati while (ie->ie_thread == NULL && handler != NULL) { 613e0f66ef8SJohn Baldwin if (ie->ie_flags & IE_ADDING_THREAD) 6140f180a7cSJohn Baldwin msleep(ie, &ie->ie_lock, 0, "ithread", 0); 615e0f66ef8SJohn Baldwin else { 616e0f66ef8SJohn Baldwin ie->ie_flags |= IE_ADDING_THREAD; 617e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 618e0f66ef8SJohn Baldwin it = ithread_create("intr: newborn"); 619e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 620e0f66ef8SJohn Baldwin ie->ie_flags &= ~IE_ADDING_THREAD; 621e0f66ef8SJohn Baldwin ie->ie_thread = it; 622e0f66ef8SJohn Baldwin it->it_event = ie; 623e0f66ef8SJohn Baldwin ithread_update(it); 624e0f66ef8SJohn Baldwin wakeup(ie); 625e0f66ef8SJohn Baldwin } 626e0f66ef8SJohn Baldwin } 627c9516c94SAlexander Kabaev 628c9516c94SAlexander Kabaev /* Add the new handler to the event in priority order. */ 629111b043cSAndriy Gapon CK_SLIST_FOREACH_PREVPTR(temp_ih, prevptr, &ie->ie_handlers, ih_next) { 630c9516c94SAlexander Kabaev if (temp_ih->ih_pri > ih->ih_pri) 631c9516c94SAlexander Kabaev break; 632c9516c94SAlexander Kabaev } 633111b043cSAndriy Gapon CK_SLIST_INSERT_PREVPTR(prevptr, temp_ih, ih, ih_next); 634111b043cSAndriy Gapon 635c9516c94SAlexander Kabaev intr_event_update(ie); 636c9516c94SAlexander Kabaev 637e0f66ef8SJohn Baldwin CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 638e0f66ef8SJohn Baldwin ie->ie_name); 639e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 640e0f66ef8SJohn Baldwin 641e0f66ef8SJohn Baldwin if (cookiep != NULL) 642e0f66ef8SJohn Baldwin *cookiep = ih; 643e0f66ef8SJohn Baldwin return (0); 644e0f66ef8SJohn Baldwin } 645b4151f71SJohn Baldwin 646c3045318SJohn Baldwin /* 64737b8ef16SJohn Baldwin * Append a description preceded by a ':' to the name of the specified 64837b8ef16SJohn Baldwin * interrupt handler. 64937b8ef16SJohn Baldwin */ 65037b8ef16SJohn Baldwin int 65137b8ef16SJohn Baldwin intr_event_describe_handler(struct intr_event *ie, void *cookie, 65237b8ef16SJohn Baldwin const char *descr) 65337b8ef16SJohn Baldwin { 65437b8ef16SJohn Baldwin struct intr_handler *ih; 65537b8ef16SJohn Baldwin size_t space; 65637b8ef16SJohn Baldwin char *start; 65737b8ef16SJohn Baldwin 65837b8ef16SJohn Baldwin mtx_lock(&ie->ie_lock); 65937b8ef16SJohn Baldwin #ifdef INVARIANTS 660111b043cSAndriy Gapon CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) { 66137b8ef16SJohn Baldwin if (ih == cookie) 66237b8ef16SJohn Baldwin break; 66337b8ef16SJohn Baldwin } 66437b8ef16SJohn Baldwin if (ih == NULL) { 66537b8ef16SJohn Baldwin mtx_unlock(&ie->ie_lock); 666d0c9a291SJohn Baldwin panic("handler %p not found in interrupt event %p", cookie, ie); 66737b8ef16SJohn Baldwin } 66837b8ef16SJohn Baldwin #endif 66937b8ef16SJohn Baldwin ih = cookie; 67037b8ef16SJohn Baldwin 67137b8ef16SJohn Baldwin /* 67237b8ef16SJohn Baldwin * Look for an existing description by checking for an 67337b8ef16SJohn Baldwin * existing ":". This assumes device names do not include 67437b8ef16SJohn Baldwin * colons. If one is found, prepare to insert the new 67537b8ef16SJohn Baldwin * description at that point. If one is not found, find the 67637b8ef16SJohn Baldwin * end of the name to use as the insertion point. 67737b8ef16SJohn Baldwin */ 678dc15eac0SEd Schouten start = strchr(ih->ih_name, ':'); 67937b8ef16SJohn Baldwin if (start == NULL) 680dc15eac0SEd Schouten start = strchr(ih->ih_name, 0); 68137b8ef16SJohn Baldwin 68237b8ef16SJohn Baldwin /* 68337b8ef16SJohn Baldwin * See if there is enough remaining room in the string for the 68437b8ef16SJohn Baldwin * description + ":". The "- 1" leaves room for the trailing 68537b8ef16SJohn Baldwin * '\0'. The "+ 1" accounts for the colon. 68637b8ef16SJohn Baldwin */ 68737b8ef16SJohn Baldwin space = sizeof(ih->ih_name) - (start - ih->ih_name) - 1; 68837b8ef16SJohn Baldwin if (strlen(descr) + 1 > space) { 68937b8ef16SJohn Baldwin mtx_unlock(&ie->ie_lock); 69037b8ef16SJohn Baldwin return (ENOSPC); 69137b8ef16SJohn Baldwin } 69237b8ef16SJohn Baldwin 69337b8ef16SJohn Baldwin /* Append a colon followed by the description. */ 69437b8ef16SJohn Baldwin *start = ':'; 69537b8ef16SJohn Baldwin strcpy(start + 1, descr); 69637b8ef16SJohn Baldwin intr_event_update(ie); 69737b8ef16SJohn Baldwin mtx_unlock(&ie->ie_lock); 69837b8ef16SJohn Baldwin return (0); 69937b8ef16SJohn Baldwin } 70037b8ef16SJohn Baldwin 70137b8ef16SJohn Baldwin /* 702c3045318SJohn Baldwin * Return the ie_source field from the intr_event an intr_handler is 703c3045318SJohn Baldwin * associated with. 704c3045318SJohn Baldwin */ 705c3045318SJohn Baldwin void * 706c3045318SJohn Baldwin intr_handler_source(void *cookie) 707c3045318SJohn Baldwin { 708c3045318SJohn Baldwin struct intr_handler *ih; 709c3045318SJohn Baldwin struct intr_event *ie; 710c3045318SJohn Baldwin 711c3045318SJohn Baldwin ih = (struct intr_handler *)cookie; 712c3045318SJohn Baldwin if (ih == NULL) 713c3045318SJohn Baldwin return (NULL); 714c3045318SJohn Baldwin ie = ih->ih_event; 715c3045318SJohn Baldwin KASSERT(ie != NULL, 716c3045318SJohn Baldwin ("interrupt handler \"%s\" has a NULL interrupt event", 717c3045318SJohn Baldwin ih->ih_name)); 718c3045318SJohn Baldwin return (ie->ie_source); 719c3045318SJohn Baldwin } 720c3045318SJohn Baldwin 721e4cd31ddSJeff Roberson /* 722e0fa977eSAndriy Gapon * If intr_event_handle() is running in the ISR context at the time of the call, 723e0fa977eSAndriy Gapon * then wait for it to complete. 724e0fa977eSAndriy Gapon */ 725e0fa977eSAndriy Gapon static void 726e0fa977eSAndriy Gapon intr_event_barrier(struct intr_event *ie) 727e0fa977eSAndriy Gapon { 728e0fa977eSAndriy Gapon int phase; 729e0fa977eSAndriy Gapon 730e0fa977eSAndriy Gapon mtx_assert(&ie->ie_lock, MA_OWNED); 731e0fa977eSAndriy Gapon phase = ie->ie_phase; 732e0fa977eSAndriy Gapon 733e0fa977eSAndriy Gapon /* 734e0fa977eSAndriy Gapon * Switch phase to direct future interrupts to the other active counter. 735e0fa977eSAndriy Gapon * Make sure that any preceding stores are visible before the switch. 736e0fa977eSAndriy Gapon */ 737e0fa977eSAndriy Gapon KASSERT(ie->ie_active[!phase] == 0, ("idle phase has activity")); 738e0fa977eSAndriy Gapon atomic_store_rel_int(&ie->ie_phase, !phase); 739e0fa977eSAndriy Gapon 740e0fa977eSAndriy Gapon /* 741e0fa977eSAndriy Gapon * This code cooperates with wait-free iteration of ie_handlers 742e0fa977eSAndriy Gapon * in intr_event_handle. 743e0fa977eSAndriy Gapon * Make sure that the removal and the phase update are not reordered 744e0fa977eSAndriy Gapon * with the active count check. 745e0fa977eSAndriy Gapon * Note that no combination of acquire and release fences can provide 746e0fa977eSAndriy Gapon * that guarantee as Store->Load sequences can always be reordered. 747e0fa977eSAndriy Gapon */ 748e0fa977eSAndriy Gapon atomic_thread_fence_seq_cst(); 749e0fa977eSAndriy Gapon 750e0fa977eSAndriy Gapon /* 751e0fa977eSAndriy Gapon * Now wait on the inactive phase. 752e0fa977eSAndriy Gapon * The acquire fence is needed so that that all post-barrier accesses 753e0fa977eSAndriy Gapon * are after the check. 754e0fa977eSAndriy Gapon */ 755e0fa977eSAndriy Gapon while (ie->ie_active[phase] > 0) 756e0fa977eSAndriy Gapon cpu_spinwait(); 757e0fa977eSAndriy Gapon atomic_thread_fence_acq(); 758e0fa977eSAndriy Gapon } 759e0fa977eSAndriy Gapon 76082a5a275SAndriy Gapon static void 76182a5a275SAndriy Gapon intr_handler_barrier(struct intr_handler *handler) 76282a5a275SAndriy Gapon { 76382a5a275SAndriy Gapon struct intr_event *ie; 76482a5a275SAndriy Gapon 76582a5a275SAndriy Gapon ie = handler->ih_event; 76682a5a275SAndriy Gapon mtx_assert(&ie->ie_lock, MA_OWNED); 76782a5a275SAndriy Gapon KASSERT((handler->ih_flags & IH_DEAD) == 0, 76882a5a275SAndriy Gapon ("update for a removed handler")); 76982a5a275SAndriy Gapon 77082a5a275SAndriy Gapon if (ie->ie_thread == NULL) { 77182a5a275SAndriy Gapon intr_event_barrier(ie); 77282a5a275SAndriy Gapon return; 77382a5a275SAndriy Gapon } 77482a5a275SAndriy Gapon if ((handler->ih_flags & IH_CHANGED) == 0) { 77582a5a275SAndriy Gapon handler->ih_flags |= IH_CHANGED; 77682a5a275SAndriy Gapon intr_event_schedule_thread(ie); 77782a5a275SAndriy Gapon } 77882a5a275SAndriy Gapon while ((handler->ih_flags & IH_CHANGED) != 0) 77982a5a275SAndriy Gapon msleep(handler, &ie->ie_lock, 0, "ih_barr", 0); 78082a5a275SAndriy Gapon } 78182a5a275SAndriy Gapon 782e0fa977eSAndriy Gapon /* 783e4cd31ddSJeff Roberson * Sleep until an ithread finishes executing an interrupt handler. 784e4cd31ddSJeff Roberson * 785e4cd31ddSJeff Roberson * XXX Doesn't currently handle interrupt filters or fast interrupt 7866eb60f5bSHans Petter Selasky * handlers. This is intended for LinuxKPI drivers only. 7876eb60f5bSHans Petter Selasky * Do not use in BSD code. 788e4cd31ddSJeff Roberson */ 789e4cd31ddSJeff Roberson void 790e4cd31ddSJeff Roberson _intr_drain(int irq) 791e4cd31ddSJeff Roberson { 792e4cd31ddSJeff Roberson struct intr_event *ie; 793e4cd31ddSJeff Roberson struct intr_thread *ithd; 794e4cd31ddSJeff Roberson struct thread *td; 795e4cd31ddSJeff Roberson 796e4cd31ddSJeff Roberson ie = intr_lookup(irq); 797e4cd31ddSJeff Roberson if (ie == NULL) 798e4cd31ddSJeff Roberson return; 799e4cd31ddSJeff Roberson if (ie->ie_thread == NULL) 800e4cd31ddSJeff Roberson return; 801e4cd31ddSJeff Roberson ithd = ie->ie_thread; 802e4cd31ddSJeff Roberson td = ithd->it_thread; 8035bd186a6SJeff Roberson /* 8045bd186a6SJeff Roberson * We set the flag and wait for it to be cleared to avoid 8055bd186a6SJeff Roberson * long delays with potentially busy interrupt handlers 8065bd186a6SJeff Roberson * were we to only sample TD_AWAITING_INTR() every tick. 8075bd186a6SJeff Roberson */ 808e4cd31ddSJeff Roberson thread_lock(td); 809e4cd31ddSJeff Roberson if (!TD_AWAITING_INTR(td)) { 810e4cd31ddSJeff Roberson ithd->it_flags |= IT_WAIT; 8115bd186a6SJeff Roberson while (ithd->it_flags & IT_WAIT) { 8125bd186a6SJeff Roberson thread_unlock(td); 8135bd186a6SJeff Roberson pause("idrain", 1); 8145bd186a6SJeff Roberson thread_lock(td); 815e4cd31ddSJeff Roberson } 8165bd186a6SJeff Roberson } 8175bd186a6SJeff Roberson thread_unlock(td); 818e4cd31ddSJeff Roberson return; 819e4cd31ddSJeff Roberson } 820e4cd31ddSJeff Roberson 821b4151f71SJohn Baldwin int 822e0f66ef8SJohn Baldwin intr_event_remove_handler(void *cookie) 823b4151f71SJohn Baldwin { 824e0f66ef8SJohn Baldwin struct intr_handler *handler = (struct intr_handler *)cookie; 825e0f66ef8SJohn Baldwin struct intr_event *ie; 826e0f66ef8SJohn Baldwin struct intr_handler *ih; 827111b043cSAndriy Gapon struct intr_handler **prevptr; 828e0f66ef8SJohn Baldwin #ifdef notyet 829e0f66ef8SJohn Baldwin int dead; 830b4151f71SJohn Baldwin #endif 831b4151f71SJohn Baldwin 8323e5da754SJohn Baldwin if (handler == NULL) 833b4151f71SJohn Baldwin return (EINVAL); 834e0f66ef8SJohn Baldwin ie = handler->ih_event; 835e0f66ef8SJohn Baldwin KASSERT(ie != NULL, 836e0f66ef8SJohn Baldwin ("interrupt handler \"%s\" has a NULL interrupt event", 8373e5da754SJohn Baldwin handler->ih_name)); 838111b043cSAndriy Gapon 839e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 84091f91617SDavid E. O'Brien CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 841e0f66ef8SJohn Baldwin ie->ie_name); 842111b043cSAndriy Gapon CK_SLIST_FOREACH_PREVPTR(ih, prevptr, &ie->ie_handlers, ih_next) { 8433e5da754SJohn Baldwin if (ih == handler) 844111b043cSAndriy Gapon break; 845111b043cSAndriy Gapon } 846111b043cSAndriy Gapon if (ih == NULL) { 847111b043cSAndriy Gapon panic("interrupt handler \"%s\" not found in " 848111b043cSAndriy Gapon "interrupt event \"%s\"", handler->ih_name, ie->ie_name); 849111b043cSAndriy Gapon } 850111b043cSAndriy Gapon 851de271f01SJohn Baldwin /* 852e0fa977eSAndriy Gapon * If there is no ithread, then directly remove the handler. Note that 853e0fa977eSAndriy Gapon * intr_event_handle() iterates ie_handlers in a lock-less fashion, so 854e0fa977eSAndriy Gapon * care needs to be taken to keep ie_handlers consistent and to free 855e0fa977eSAndriy Gapon * the removed handler only when ie_handlers is quiescent. 856e0f66ef8SJohn Baldwin */ 857e0f66ef8SJohn Baldwin if (ie->ie_thread == NULL) { 858111b043cSAndriy Gapon CK_SLIST_REMOVE_PREVPTR(prevptr, ih, ih_next); 859e0fa977eSAndriy Gapon intr_event_barrier(ie); 860e0fa977eSAndriy Gapon intr_event_update(ie); 861e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 862e0f66ef8SJohn Baldwin free(handler, M_ITHREAD); 863e0f66ef8SJohn Baldwin return (0); 864e0f66ef8SJohn Baldwin } 865e0f66ef8SJohn Baldwin 866e0f66ef8SJohn Baldwin /* 867e0fa977eSAndriy Gapon * Let the interrupt thread do the job. 868e0fa977eSAndriy Gapon * The interrupt source is disabled when the interrupt thread is 869e0fa977eSAndriy Gapon * running, so it does not have to worry about interaction with 870e0fa977eSAndriy Gapon * intr_event_handle(). 871de271f01SJohn Baldwin */ 872e0fa977eSAndriy Gapon KASSERT((handler->ih_flags & IH_DEAD) == 0, 873e0fa977eSAndriy Gapon ("duplicate handle remove")); 874de271f01SJohn Baldwin handler->ih_flags |= IH_DEAD; 875e0fa977eSAndriy Gapon intr_event_schedule_thread(ie); 876e0f66ef8SJohn Baldwin while (handler->ih_flags & IH_DEAD) 8770f180a7cSJohn Baldwin msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 878e0f66ef8SJohn Baldwin intr_event_update(ie); 879111b043cSAndriy Gapon 880e0f66ef8SJohn Baldwin #ifdef notyet 881e0f66ef8SJohn Baldwin /* 882e0f66ef8SJohn Baldwin * XXX: This could be bad in the case of ppbus(8). Also, I think 883e0f66ef8SJohn Baldwin * this could lead to races of stale data when servicing an 884e0f66ef8SJohn Baldwin * interrupt. 885e0f66ef8SJohn Baldwin */ 886e0f66ef8SJohn Baldwin dead = 1; 887111b043cSAndriy Gapon CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) { 888111b043cSAndriy Gapon if (ih->ih_handler != NULL) { 889e0f66ef8SJohn Baldwin dead = 0; 890e0f66ef8SJohn Baldwin break; 891e0f66ef8SJohn Baldwin } 892e0f66ef8SJohn Baldwin } 893e0f66ef8SJohn Baldwin if (dead) { 894e0f66ef8SJohn Baldwin ithread_destroy(ie->ie_thread); 895e0f66ef8SJohn Baldwin ie->ie_thread = NULL; 896e0f66ef8SJohn Baldwin } 897e0f66ef8SJohn Baldwin #endif 898e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 899b4151f71SJohn Baldwin free(handler, M_ITHREAD); 900b4151f71SJohn Baldwin return (0); 901b4151f71SJohn Baldwin } 902b4151f71SJohn Baldwin 90382a5a275SAndriy Gapon int 90482a5a275SAndriy Gapon intr_event_suspend_handler(void *cookie) 90582a5a275SAndriy Gapon { 90682a5a275SAndriy Gapon struct intr_handler *handler = (struct intr_handler *)cookie; 90782a5a275SAndriy Gapon struct intr_event *ie; 90882a5a275SAndriy Gapon 90982a5a275SAndriy Gapon if (handler == NULL) 91082a5a275SAndriy Gapon return (EINVAL); 91182a5a275SAndriy Gapon ie = handler->ih_event; 91282a5a275SAndriy Gapon KASSERT(ie != NULL, 91382a5a275SAndriy Gapon ("interrupt handler \"%s\" has a NULL interrupt event", 91482a5a275SAndriy Gapon handler->ih_name)); 91582a5a275SAndriy Gapon mtx_lock(&ie->ie_lock); 91682a5a275SAndriy Gapon handler->ih_flags |= IH_SUSP; 91782a5a275SAndriy Gapon intr_handler_barrier(handler); 91882a5a275SAndriy Gapon mtx_unlock(&ie->ie_lock); 91982a5a275SAndriy Gapon return (0); 92082a5a275SAndriy Gapon } 92182a5a275SAndriy Gapon 92282a5a275SAndriy Gapon int 92382a5a275SAndriy Gapon intr_event_resume_handler(void *cookie) 92482a5a275SAndriy Gapon { 92582a5a275SAndriy Gapon struct intr_handler *handler = (struct intr_handler *)cookie; 92682a5a275SAndriy Gapon struct intr_event *ie; 92782a5a275SAndriy Gapon 92882a5a275SAndriy Gapon if (handler == NULL) 92982a5a275SAndriy Gapon return (EINVAL); 93082a5a275SAndriy Gapon ie = handler->ih_event; 93182a5a275SAndriy Gapon KASSERT(ie != NULL, 93282a5a275SAndriy Gapon ("interrupt handler \"%s\" has a NULL interrupt event", 93382a5a275SAndriy Gapon handler->ih_name)); 93482a5a275SAndriy Gapon 93582a5a275SAndriy Gapon /* 93682a5a275SAndriy Gapon * intr_handler_barrier() acts not only as a barrier, 93782a5a275SAndriy Gapon * it also allows to check for any pending interrupts. 93882a5a275SAndriy Gapon */ 93982a5a275SAndriy Gapon mtx_lock(&ie->ie_lock); 94082a5a275SAndriy Gapon handler->ih_flags &= ~IH_SUSP; 94182a5a275SAndriy Gapon intr_handler_barrier(handler); 94282a5a275SAndriy Gapon mtx_unlock(&ie->ie_lock); 94382a5a275SAndriy Gapon return (0); 94482a5a275SAndriy Gapon } 94582a5a275SAndriy Gapon 9461ee1b687SJohn Baldwin static int 947e0f66ef8SJohn Baldwin intr_event_schedule_thread(struct intr_event *ie) 9483e5da754SJohn Baldwin { 949e0f66ef8SJohn Baldwin struct intr_entropy entropy; 950e0f66ef8SJohn Baldwin struct intr_thread *it; 951b40ce416SJulian Elischer struct thread *td; 95204774f23SJulian Elischer struct thread *ctd; 9533e5da754SJohn Baldwin 9543e5da754SJohn Baldwin /* 9553e5da754SJohn Baldwin * If no ithread or no handlers, then we have a stray interrupt. 9563e5da754SJohn Baldwin */ 957111b043cSAndriy Gapon if (ie == NULL || CK_SLIST_EMPTY(&ie->ie_handlers) || 958e0f66ef8SJohn Baldwin ie->ie_thread == NULL) 9593e5da754SJohn Baldwin return (EINVAL); 9603e5da754SJohn Baldwin 96104774f23SJulian Elischer ctd = curthread; 962e0f66ef8SJohn Baldwin it = ie->ie_thread; 963e0f66ef8SJohn Baldwin td = it->it_thread; 964e0f66ef8SJohn Baldwin 9653e5da754SJohn Baldwin /* 9663e5da754SJohn Baldwin * If any of the handlers for this ithread claim to be good 9673e5da754SJohn Baldwin * sources of entropy, then gather some. 9683e5da754SJohn Baldwin */ 969c4eb6630SGleb Smirnoff if (ie->ie_hflags & IH_ENTROPY) { 970e0f66ef8SJohn Baldwin entropy.event = (uintptr_t)ie; 971e0f66ef8SJohn Baldwin entropy.td = ctd; 97219fa89e9SMark Murray random_harvest_queue(&entropy, sizeof(entropy), RANDOM_INTERRUPT); 9733e5da754SJohn Baldwin } 9743e5da754SJohn Baldwin 975ba3f7276SMatt Macy KASSERT(td->td_proc != NULL, ("ithread %s has no process", ie->ie_name)); 9763e5da754SJohn Baldwin 9773e5da754SJohn Baldwin /* 9783e5da754SJohn Baldwin * Set it_need to tell the thread to keep running if it is already 979982d11f8SJeff Roberson * running. Then, lock the thread and see if we actually need to 980982d11f8SJeff Roberson * put it on the runqueue. 981283dfee9SKonstantin Belousov * 982283dfee9SKonstantin Belousov * Use store_rel to arrange that the store to ih_need in 983283dfee9SKonstantin Belousov * swi_sched() is before the store to it_need and prepare for 984283dfee9SKonstantin Belousov * transfer of this order to loads in the ithread. 9853e5da754SJohn Baldwin */ 9863eebd44dSAlfred Perlstein atomic_store_rel_int(&it->it_need, 1); 987982d11f8SJeff Roberson thread_lock(td); 98871fad9fdSJulian Elischer if (TD_AWAITING_INTR(td)) { 989fc2e87beSMatt Macy CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, td->td_proc->p_pid, 9907ab24ea3SJulian Elischer td->td_name); 99171fad9fdSJulian Elischer TD_CLR_IWAIT(td); 992f0393f06SJeff Roberson sched_add(td, SRQ_INTR); 9933e5da754SJohn Baldwin } else { 994e0f66ef8SJohn Baldwin CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 995fa2528acSAlex Richardson __func__, td->td_proc->p_pid, td->td_name, it->it_need, TD_GET_STATE(td)); 996982d11f8SJeff Roberson thread_unlock(td); 99761a74c5cSJeff Roberson } 9983e5da754SJohn Baldwin 9993e5da754SJohn Baldwin return (0); 10003e5da754SJohn Baldwin } 10013e5da754SJohn Baldwin 1002fe486a37SJohn Baldwin /* 1003e84bcd84SRobert Watson * Allow interrupt event binding for software interrupt handlers -- a no-op, 1004e84bcd84SRobert Watson * since interrupts are generated in software rather than being directed by 1005e84bcd84SRobert Watson * a PIC. 1006e84bcd84SRobert Watson */ 1007e84bcd84SRobert Watson static int 1008066da805SAdrian Chadd swi_assign_cpu(void *arg, int cpu) 1009e84bcd84SRobert Watson { 1010e84bcd84SRobert Watson 1011e84bcd84SRobert Watson return (0); 1012e84bcd84SRobert Watson } 1013e84bcd84SRobert Watson 1014e84bcd84SRobert Watson /* 1015fe486a37SJohn Baldwin * Add a software interrupt handler to a specified event. If a given event 1016fe486a37SJohn Baldwin * is not specified, then a new event is created. 1017fe486a37SJohn Baldwin */ 10183e5da754SJohn Baldwin int 1019e0f66ef8SJohn Baldwin swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, 1020b4151f71SJohn Baldwin void *arg, int pri, enum intr_type flags, void **cookiep) 10218088699fSJohn Baldwin { 1022e0f66ef8SJohn Baldwin struct intr_event *ie; 1023aba10e13SAlexander Motin int error = 0; 10248088699fSJohn Baldwin 1025bafe5a31SPaolo Pisati if (flags & INTR_ENTROPY) 10263e5da754SJohn Baldwin return (EINVAL); 10273e5da754SJohn Baldwin 1028e0f66ef8SJohn Baldwin ie = (eventp != NULL) ? *eventp : NULL; 10298088699fSJohn Baldwin 1030e0f66ef8SJohn Baldwin if (ie != NULL) { 1031e0f66ef8SJohn Baldwin if (!(ie->ie_flags & IE_SOFT)) 10323e5da754SJohn Baldwin return (EINVAL); 10333e5da754SJohn Baldwin } else { 10349b33b154SJeff Roberson error = intr_event_create(&ie, NULL, IE_SOFT, 0, 1035e84bcd84SRobert Watson NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri); 10368088699fSJohn Baldwin if (error) 1037b4151f71SJohn Baldwin return (error); 1038e0f66ef8SJohn Baldwin if (eventp != NULL) 1039e0f66ef8SJohn Baldwin *eventp = ie; 10408088699fSJohn Baldwin } 1041aba10e13SAlexander Motin if (handler != NULL) { 10428d809d50SJeff Roberson error = intr_event_add_handler(ie, name, NULL, handler, arg, 1043d3305205SJohn Baldwin PI_SWI(pri), flags, cookiep); 1044aba10e13SAlexander Motin } 10458d809d50SJeff Roberson return (error); 10468088699fSJohn Baldwin } 10478088699fSJohn Baldwin 10481931cf94SJohn Baldwin /* 1049e0f66ef8SJohn Baldwin * Schedule a software interrupt thread. 10501931cf94SJohn Baldwin */ 10511931cf94SJohn Baldwin void 1052b4151f71SJohn Baldwin swi_sched(void *cookie, int flags) 10531931cf94SJohn Baldwin { 1054e0f66ef8SJohn Baldwin struct intr_handler *ih = (struct intr_handler *)cookie; 1055e0f66ef8SJohn Baldwin struct intr_event *ie = ih->ih_event; 1056d95dca1dSJohn Baldwin struct intr_entropy entropy; 1057ba3f7276SMatt Macy int error __unused; 10588088699fSJohn Baldwin 1059e0f66ef8SJohn Baldwin CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, 1060e0f66ef8SJohn Baldwin ih->ih_need); 10611931cf94SJohn Baldwin 1062aba10e13SAlexander Motin if ((flags & SWI_FROMNMI) == 0) { 1063d95dca1dSJohn Baldwin entropy.event = (uintptr_t)ih; 1064d95dca1dSJohn Baldwin entropy.td = curthread; 106519fa89e9SMark Murray random_harvest_queue(&entropy, sizeof(entropy), RANDOM_SWI); 1066aba10e13SAlexander Motin } 1067d95dca1dSJohn Baldwin 10681931cf94SJohn Baldwin /* 10693e5da754SJohn Baldwin * Set ih_need for this handler so that if the ithread is already 10703e5da754SJohn Baldwin * running it will execute this handler on the next pass. Otherwise, 10713e5da754SJohn Baldwin * it will execute it the next time it runs. 10721931cf94SJohn Baldwin */ 1073283dfee9SKonstantin Belousov ih->ih_need = 1; 10741ca2c018SBruce Evans 1075aba10e13SAlexander Motin if (flags & SWI_DELAY) 1076aba10e13SAlexander Motin return; 1077aba10e13SAlexander Motin 1078aba10e13SAlexander Motin if (flags & SWI_FROMNMI) { 1079aba10e13SAlexander Motin #if defined(SMP) && (defined(__i386__) || defined(__amd64__)) 1080aba10e13SAlexander Motin KASSERT(ie == clk_intr_event, 1081aba10e13SAlexander Motin ("SWI_FROMNMI used not with clk_intr_event")); 1082aba10e13SAlexander Motin ipi_self_from_nmi(IPI_SWI); 1083aba10e13SAlexander Motin #endif 1084aba10e13SAlexander Motin } else { 108583c9dea1SGleb Smirnoff VM_CNT_INC(v_soft); 1086e0f66ef8SJohn Baldwin error = intr_event_schedule_thread(ie); 10873e5da754SJohn Baldwin KASSERT(error == 0, ("stray software interrupt")); 10888088699fSJohn Baldwin } 10898088699fSJohn Baldwin } 10908088699fSJohn Baldwin 1091fe486a37SJohn Baldwin /* 1092fe486a37SJohn Baldwin * Remove a software interrupt handler. Currently this code does not 1093fe486a37SJohn Baldwin * remove the associated interrupt event if it becomes empty. Calling code 1094fe486a37SJohn Baldwin * may do so manually via intr_event_destroy(), but that's not really 1095fe486a37SJohn Baldwin * an optimal interface. 1096fe486a37SJohn Baldwin */ 1097fe486a37SJohn Baldwin int 1098fe486a37SJohn Baldwin swi_remove(void *cookie) 1099fe486a37SJohn Baldwin { 1100fe486a37SJohn Baldwin 1101fe486a37SJohn Baldwin return (intr_event_remove_handler(cookie)); 1102fe486a37SJohn Baldwin } 1103fe486a37SJohn Baldwin 1104111b043cSAndriy Gapon static void 110537e9511fSJohn Baldwin intr_event_execute_handlers(struct proc *p, struct intr_event *ie) 1106e0f66ef8SJohn Baldwin { 1107111b043cSAndriy Gapon struct intr_handler *ih, *ihn, *ihp; 1108e0f66ef8SJohn Baldwin 1109111b043cSAndriy Gapon ihp = NULL; 1110111b043cSAndriy Gapon CK_SLIST_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) { 1111e0f66ef8SJohn Baldwin /* 1112e0f66ef8SJohn Baldwin * If this handler is marked for death, remove it from 1113e0f66ef8SJohn Baldwin * the list of handlers and wake up the sleeper. 1114e0f66ef8SJohn Baldwin */ 1115e0f66ef8SJohn Baldwin if (ih->ih_flags & IH_DEAD) { 1116e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 1117111b043cSAndriy Gapon if (ihp == NULL) 1118111b043cSAndriy Gapon CK_SLIST_REMOVE_HEAD(&ie->ie_handlers, ih_next); 1119111b043cSAndriy Gapon else 1120111b043cSAndriy Gapon CK_SLIST_REMOVE_AFTER(ihp, ih_next); 1121e0f66ef8SJohn Baldwin ih->ih_flags &= ~IH_DEAD; 1122e0f66ef8SJohn Baldwin wakeup(ih); 1123e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 1124e0f66ef8SJohn Baldwin continue; 1125e0f66ef8SJohn Baldwin } 1126e0f66ef8SJohn Baldwin 1127111b043cSAndriy Gapon /* 1128111b043cSAndriy Gapon * Now that we know that the current element won't be removed 1129111b043cSAndriy Gapon * update the previous element. 1130111b043cSAndriy Gapon */ 1131111b043cSAndriy Gapon ihp = ih; 1132111b043cSAndriy Gapon 113382a5a275SAndriy Gapon if ((ih->ih_flags & IH_CHANGED) != 0) { 113482a5a275SAndriy Gapon mtx_lock(&ie->ie_lock); 113582a5a275SAndriy Gapon ih->ih_flags &= ~IH_CHANGED; 113682a5a275SAndriy Gapon wakeup(ih); 113782a5a275SAndriy Gapon mtx_unlock(&ie->ie_lock); 113882a5a275SAndriy Gapon } 113982a5a275SAndriy Gapon 1140f2d619c8SPaolo Pisati /* Skip filter only handlers */ 1141f2d619c8SPaolo Pisati if (ih->ih_handler == NULL) 1142f2d619c8SPaolo Pisati continue; 1143f2d619c8SPaolo Pisati 114482a5a275SAndriy Gapon /* Skip suspended handlers */ 114582a5a275SAndriy Gapon if ((ih->ih_flags & IH_SUSP) != 0) 114682a5a275SAndriy Gapon continue; 114782a5a275SAndriy Gapon 1148e0f66ef8SJohn Baldwin /* 1149e0f66ef8SJohn Baldwin * For software interrupt threads, we only execute 1150e0f66ef8SJohn Baldwin * handlers that have their need flag set. Hardware 1151e0f66ef8SJohn Baldwin * interrupt threads always invoke all of their handlers. 11521b79b949SKirk McKusick * 11531b79b949SKirk McKusick * ih_need can only be 0 or 1. Failed cmpset below 11541b79b949SKirk McKusick * means that there is no request to execute handlers, 11551b79b949SKirk McKusick * so a retry of the cmpset is not needed. 1156e0f66ef8SJohn Baldwin */ 11571b79b949SKirk McKusick if ((ie->ie_flags & IE_SOFT) != 0 && 11581b79b949SKirk McKusick atomic_cmpset_int(&ih->ih_need, 1, 0) == 0) 1159e0f66ef8SJohn Baldwin continue; 1160e0f66ef8SJohn Baldwin 1161e0f66ef8SJohn Baldwin /* Execute this handler. */ 1162e0f66ef8SJohn Baldwin CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1163bafe5a31SPaolo Pisati __func__, p->p_pid, (void *)ih->ih_handler, 1164bafe5a31SPaolo Pisati ih->ih_argument, ih->ih_name, ih->ih_flags); 1165e0f66ef8SJohn Baldwin 1166e0f66ef8SJohn Baldwin if (!(ih->ih_flags & IH_MPSAFE)) 1167e0f66ef8SJohn Baldwin mtx_lock(&Giant); 1168e0f66ef8SJohn Baldwin ih->ih_handler(ih->ih_argument); 1169e0f66ef8SJohn Baldwin if (!(ih->ih_flags & IH_MPSAFE)) 1170e0f66ef8SJohn Baldwin mtx_unlock(&Giant); 1171e0f66ef8SJohn Baldwin } 117237e9511fSJohn Baldwin } 117337e9511fSJohn Baldwin 117437e9511fSJohn Baldwin static void 117537e9511fSJohn Baldwin ithread_execute_handlers(struct proc *p, struct intr_event *ie) 117637e9511fSJohn Baldwin { 117737e9511fSJohn Baldwin 117837e9511fSJohn Baldwin /* Interrupt handlers should not sleep. */ 117937e9511fSJohn Baldwin if (!(ie->ie_flags & IE_SOFT)) 118037e9511fSJohn Baldwin THREAD_NO_SLEEPING(); 118137e9511fSJohn Baldwin intr_event_execute_handlers(p, ie); 1182e0f66ef8SJohn Baldwin if (!(ie->ie_flags & IE_SOFT)) 1183e0f66ef8SJohn Baldwin THREAD_SLEEPING_OK(); 1184e0f66ef8SJohn Baldwin 1185e0f66ef8SJohn Baldwin /* 1186e0f66ef8SJohn Baldwin * Interrupt storm handling: 1187e0f66ef8SJohn Baldwin * 1188e0f66ef8SJohn Baldwin * If this interrupt source is currently storming, then throttle 1189e0f66ef8SJohn Baldwin * it to only fire the handler once per clock tick. 1190e0f66ef8SJohn Baldwin * 1191e0f66ef8SJohn Baldwin * If this interrupt source is not currently storming, but the 1192e0f66ef8SJohn Baldwin * number of back to back interrupts exceeds the storm threshold, 1193e0f66ef8SJohn Baldwin * then enter storming mode. 1194e0f66ef8SJohn Baldwin */ 1195e41bcf3cSJohn Baldwin if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold && 1196e41bcf3cSJohn Baldwin !(ie->ie_flags & IE_SOFT)) { 11970ae62c18SNate Lawson /* Report the message only once every second. */ 11980ae62c18SNate Lawson if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) { 1199e0f66ef8SJohn Baldwin printf( 12000ae62c18SNate Lawson "interrupt storm detected on \"%s\"; throttling interrupt source\n", 1201e0f66ef8SJohn Baldwin ie->ie_name); 1202e0f66ef8SJohn Baldwin } 1203e41bcf3cSJohn Baldwin pause("istorm", 1); 1204e0f66ef8SJohn Baldwin } else 1205e0f66ef8SJohn Baldwin ie->ie_count++; 1206e0f66ef8SJohn Baldwin 1207e0f66ef8SJohn Baldwin /* 1208e0f66ef8SJohn Baldwin * Now that all the handlers have had a chance to run, reenable 1209e0f66ef8SJohn Baldwin * the interrupt source. 1210e0f66ef8SJohn Baldwin */ 12111ee1b687SJohn Baldwin if (ie->ie_post_ithread != NULL) 12121ee1b687SJohn Baldwin ie->ie_post_ithread(ie->ie_source); 1213e0f66ef8SJohn Baldwin } 1214e0f66ef8SJohn Baldwin 12158088699fSJohn Baldwin /* 1216b4151f71SJohn Baldwin * This is the main code for interrupt threads. 12178088699fSJohn Baldwin */ 121837c84183SPoul-Henning Kamp static void 1219b4151f71SJohn Baldwin ithread_loop(void *arg) 12208088699fSJohn Baldwin { 1221511d1afbSGleb Smirnoff struct epoch_tracker et; 1222e0f66ef8SJohn Baldwin struct intr_thread *ithd; 1223e0f66ef8SJohn Baldwin struct intr_event *ie; 1224b40ce416SJulian Elischer struct thread *td; 1225b4151f71SJohn Baldwin struct proc *p; 1226511d1afbSGleb Smirnoff int wake, epoch_count; 1227f912e8f2SHans Petter Selasky bool needs_epoch; 12288088699fSJohn Baldwin 1229b40ce416SJulian Elischer td = curthread; 1230b40ce416SJulian Elischer p = td->td_proc; 1231e0f66ef8SJohn Baldwin ithd = (struct intr_thread *)arg; 1232e0f66ef8SJohn Baldwin KASSERT(ithd->it_thread == td, 123391f91617SDavid E. O'Brien ("%s: ithread and proc linkage out of sync", __func__)); 1234e0f66ef8SJohn Baldwin ie = ithd->it_event; 1235e0f66ef8SJohn Baldwin ie->ie_count = 0; 1236e4cd31ddSJeff Roberson wake = 0; 12378088699fSJohn Baldwin 12388088699fSJohn Baldwin /* 12398088699fSJohn Baldwin * As long as we have interrupts outstanding, go through the 12408088699fSJohn Baldwin * list of handlers, giving each one a go at it. 12418088699fSJohn Baldwin */ 12428088699fSJohn Baldwin for (;;) { 1243b4151f71SJohn Baldwin /* 1244b4151f71SJohn Baldwin * If we are an orphaned thread, then just die. 1245b4151f71SJohn Baldwin */ 1246b4151f71SJohn Baldwin if (ithd->it_flags & IT_DEAD) { 1247e0f66ef8SJohn Baldwin CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 12487ab24ea3SJulian Elischer p->p_pid, td->td_name); 1249b4151f71SJohn Baldwin free(ithd, M_ITHREAD); 1250ca9a0ddfSJulian Elischer kthread_exit(); 1251b4151f71SJohn Baldwin } 1252b4151f71SJohn Baldwin 1253e0f66ef8SJohn Baldwin /* 1254e0f66ef8SJohn Baldwin * Service interrupts. If another interrupt arrives while 1255e0f66ef8SJohn Baldwin * we are running, it will set it_need to note that we 1256e0f66ef8SJohn Baldwin * should make another pass. 1257283dfee9SKonstantin Belousov * 1258283dfee9SKonstantin Belousov * The load_acq part of the following cmpset ensures 1259283dfee9SKonstantin Belousov * that the load of ih_need in ithread_execute_handlers() 1260283dfee9SKonstantin Belousov * is ordered after the load of it_need here. 1261e0f66ef8SJohn Baldwin */ 1262f912e8f2SHans Petter Selasky needs_epoch = 1263f912e8f2SHans Petter Selasky (atomic_load_int(&ie->ie_hflags) & IH_NET) != 0; 1264f912e8f2SHans Petter Selasky if (needs_epoch) { 1265511d1afbSGleb Smirnoff epoch_count = 0; 1266511d1afbSGleb Smirnoff NET_EPOCH_ENTER(et); 1267511d1afbSGleb Smirnoff } 1268511d1afbSGleb Smirnoff while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0) { 1269e0f66ef8SJohn Baldwin ithread_execute_handlers(p, ie); 1270f912e8f2SHans Petter Selasky if (needs_epoch && 1271511d1afbSGleb Smirnoff ++epoch_count >= intr_epoch_batch) { 1272511d1afbSGleb Smirnoff NET_EPOCH_EXIT(et); 1273511d1afbSGleb Smirnoff epoch_count = 0; 1274511d1afbSGleb Smirnoff NET_EPOCH_ENTER(et); 1275511d1afbSGleb Smirnoff } 1276511d1afbSGleb Smirnoff } 1277f912e8f2SHans Petter Selasky if (needs_epoch) 1278511d1afbSGleb Smirnoff NET_EPOCH_EXIT(et); 12797870c3c6SJohn Baldwin WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 12807870c3c6SJohn Baldwin mtx_assert(&Giant, MA_NOTOWNED); 12818088699fSJohn Baldwin 12828088699fSJohn Baldwin /* 12838088699fSJohn Baldwin * Processed all our interrupts. Now get the sched 12848088699fSJohn Baldwin * lock. This may take a while and it_need may get 12858088699fSJohn Baldwin * set again, so we have to check it again. 12868088699fSJohn Baldwin */ 1287982d11f8SJeff Roberson thread_lock(td); 128803bbcb2fSKonstantin Belousov if (atomic_load_acq_int(&ithd->it_need) == 0 && 128903bbcb2fSKonstantin Belousov (ithd->it_flags & (IT_DEAD | IT_WAIT)) == 0) { 12907870c3c6SJohn Baldwin TD_SET_IWAIT(td); 1291e0f66ef8SJohn Baldwin ie->ie_count = 0; 1292686bcb5cSJeff Roberson mi_switch(SW_VOL | SWT_IWAIT); 1293686bcb5cSJeff Roberson } else { 1294e4cd31ddSJeff Roberson if (ithd->it_flags & IT_WAIT) { 1295e4cd31ddSJeff Roberson wake = 1; 1296e4cd31ddSJeff Roberson ithd->it_flags &= ~IT_WAIT; 1297e4cd31ddSJeff Roberson } 1298982d11f8SJeff Roberson thread_unlock(td); 1299686bcb5cSJeff Roberson } 1300e4cd31ddSJeff Roberson if (wake) { 1301e4cd31ddSJeff Roberson wakeup(ithd); 1302e4cd31ddSJeff Roberson wake = 0; 1303e4cd31ddSJeff Roberson } 13048088699fSJohn Baldwin } 13051931cf94SJohn Baldwin } 13061ee1b687SJohn Baldwin 13071ee1b687SJohn Baldwin /* 13081ee1b687SJohn Baldwin * Main interrupt handling body. 13091ee1b687SJohn Baldwin * 13101ee1b687SJohn Baldwin * Input: 13111ee1b687SJohn Baldwin * o ie: the event connected to this interrupt. 13121ee1b687SJohn Baldwin * o frame: some archs (i.e. i386) pass a frame to some. 13131ee1b687SJohn Baldwin * handlers as their main argument. 13141ee1b687SJohn Baldwin * Return value: 13151ee1b687SJohn Baldwin * o 0: everything ok. 13161ee1b687SJohn Baldwin * o EINVAL: stray interrupt. 13171ee1b687SJohn Baldwin */ 13181ee1b687SJohn Baldwin int 13191ee1b687SJohn Baldwin intr_event_handle(struct intr_event *ie, struct trapframe *frame) 13201ee1b687SJohn Baldwin { 13211ee1b687SJohn Baldwin struct intr_handler *ih; 13221f255bd3SAlexander Motin struct trapframe *oldframe; 13231ee1b687SJohn Baldwin struct thread *td; 1324e0fa977eSAndriy Gapon int phase; 132582a5a275SAndriy Gapon int ret; 132682a5a275SAndriy Gapon bool filter, thread; 13271ee1b687SJohn Baldwin 13281ee1b687SJohn Baldwin td = curthread; 13291ee1b687SJohn Baldwin 1330b7627840SKonstantin Belousov #ifdef KSTACK_USAGE_PROF 1331b7627840SKonstantin Belousov intr_prof_stack_use(td, frame); 1332b7627840SKonstantin Belousov #endif 1333b7627840SKonstantin Belousov 13341ee1b687SJohn Baldwin /* An interrupt with no event or handlers is a stray interrupt. */ 1335111b043cSAndriy Gapon if (ie == NULL || CK_SLIST_EMPTY(&ie->ie_handlers)) 13361ee1b687SJohn Baldwin return (EINVAL); 13371ee1b687SJohn Baldwin 13381ee1b687SJohn Baldwin /* 13391ee1b687SJohn Baldwin * Execute fast interrupt handlers directly. 13401ee1b687SJohn Baldwin * To support clock handlers, if a handler registers 13411ee1b687SJohn Baldwin * with a NULL argument, then we pass it a pointer to 13421ee1b687SJohn Baldwin * a trapframe as its argument. 13431ee1b687SJohn Baldwin */ 13441ee1b687SJohn Baldwin td->td_intr_nesting_level++; 134582a5a275SAndriy Gapon filter = false; 134682a5a275SAndriy Gapon thread = false; 13471ee1b687SJohn Baldwin ret = 0; 13481ee1b687SJohn Baldwin critical_enter(); 13491f255bd3SAlexander Motin oldframe = td->td_intr_frame; 13501f255bd3SAlexander Motin td->td_intr_frame = frame; 1351111b043cSAndriy Gapon 1352e0fa977eSAndriy Gapon phase = ie->ie_phase; 1353e0fa977eSAndriy Gapon atomic_add_int(&ie->ie_active[phase], 1); 1354e0fa977eSAndriy Gapon 1355e0fa977eSAndriy Gapon /* 1356e0fa977eSAndriy Gapon * This fence is required to ensure that no later loads are 1357e0fa977eSAndriy Gapon * re-ordered before the ie_active store. 1358e0fa977eSAndriy Gapon */ 1359e0fa977eSAndriy Gapon atomic_thread_fence_seq_cst(); 1360e0fa977eSAndriy Gapon 1361111b043cSAndriy Gapon CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) { 136282a5a275SAndriy Gapon if ((ih->ih_flags & IH_SUSP) != 0) 136382a5a275SAndriy Gapon continue; 1364aba10e13SAlexander Motin if ((ie->ie_flags & IE_SOFT) != 0 && ih->ih_need == 0) 1365aba10e13SAlexander Motin continue; 13661ee1b687SJohn Baldwin if (ih->ih_filter == NULL) { 136782a5a275SAndriy Gapon thread = true; 13681ee1b687SJohn Baldwin continue; 13691ee1b687SJohn Baldwin } 13701ee1b687SJohn Baldwin CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__, 13711ee1b687SJohn Baldwin ih->ih_filter, ih->ih_argument == NULL ? frame : 13721ee1b687SJohn Baldwin ih->ih_argument, ih->ih_name); 13731ee1b687SJohn Baldwin if (ih->ih_argument == NULL) 13741ee1b687SJohn Baldwin ret = ih->ih_filter(frame); 13751ee1b687SJohn Baldwin else 13761ee1b687SJohn Baldwin ret = ih->ih_filter(ih->ih_argument); 137789fc20ccSAndriy Gapon KASSERT(ret == FILTER_STRAY || 137889fc20ccSAndriy Gapon ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 && 137989fc20ccSAndriy Gapon (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0), 138089fc20ccSAndriy Gapon ("%s: incorrect return value %#x from %s", __func__, ret, 138189fc20ccSAndriy Gapon ih->ih_name)); 138282a5a275SAndriy Gapon filter = filter || ret == FILTER_HANDLED; 138389fc20ccSAndriy Gapon 13841ee1b687SJohn Baldwin /* 13851ee1b687SJohn Baldwin * Wrapper handler special handling: 13861ee1b687SJohn Baldwin * 13871ee1b687SJohn Baldwin * in some particular cases (like pccard and pccbb), 13881ee1b687SJohn Baldwin * the _real_ device handler is wrapped in a couple of 13891ee1b687SJohn Baldwin * functions - a filter wrapper and an ithread wrapper. 13901ee1b687SJohn Baldwin * In this case (and just in this case), the filter wrapper 13911ee1b687SJohn Baldwin * could ask the system to schedule the ithread and mask 13921ee1b687SJohn Baldwin * the interrupt source if the wrapped handler is composed 13931ee1b687SJohn Baldwin * of just an ithread handler. 13941ee1b687SJohn Baldwin * 13951ee1b687SJohn Baldwin * TODO: write a generic wrapper to avoid people rolling 139682a5a275SAndriy Gapon * their own. 13971ee1b687SJohn Baldwin */ 13981ee1b687SJohn Baldwin if (!thread) { 13991ee1b687SJohn Baldwin if (ret == FILTER_SCHEDULE_THREAD) 140082a5a275SAndriy Gapon thread = true; 14011ee1b687SJohn Baldwin } 14021ee1b687SJohn Baldwin } 1403e0fa977eSAndriy Gapon atomic_add_rel_int(&ie->ie_active[phase], -1); 1404e0fa977eSAndriy Gapon 14051f255bd3SAlexander Motin td->td_intr_frame = oldframe; 14061ee1b687SJohn Baldwin 14071ee1b687SJohn Baldwin if (thread) { 14081ee1b687SJohn Baldwin if (ie->ie_pre_ithread != NULL) 14091ee1b687SJohn Baldwin ie->ie_pre_ithread(ie->ie_source); 14101ee1b687SJohn Baldwin } else { 14111ee1b687SJohn Baldwin if (ie->ie_post_filter != NULL) 14121ee1b687SJohn Baldwin ie->ie_post_filter(ie->ie_source); 14131ee1b687SJohn Baldwin } 14141ee1b687SJohn Baldwin 14151ee1b687SJohn Baldwin /* Schedule the ithread if needed. */ 14161ee1b687SJohn Baldwin if (thread) { 1417ba3f7276SMatt Macy int error __unused; 1418ba3f7276SMatt Macy 14191ee1b687SJohn Baldwin error = intr_event_schedule_thread(ie); 14201ee1b687SJohn Baldwin KASSERT(error == 0, ("bad stray interrupt")); 14211ee1b687SJohn Baldwin } 14221ee1b687SJohn Baldwin critical_exit(); 14231ee1b687SJohn Baldwin td->td_intr_nesting_level--; 142482a5a275SAndriy Gapon #ifdef notyet 142582a5a275SAndriy Gapon /* The interrupt is not aknowledged by any filter and has no ithread. */ 142682a5a275SAndriy Gapon if (!thread && !filter) 142782a5a275SAndriy Gapon return (EINVAL); 142882a5a275SAndriy Gapon #endif 14291ee1b687SJohn Baldwin return (0); 14301ee1b687SJohn Baldwin } 14311931cf94SJohn Baldwin 14328b201c42SJohn Baldwin #ifdef DDB 14338b201c42SJohn Baldwin /* 14348b201c42SJohn Baldwin * Dump details about an interrupt handler 14358b201c42SJohn Baldwin */ 14368b201c42SJohn Baldwin static void 1437e0f66ef8SJohn Baldwin db_dump_intrhand(struct intr_handler *ih) 14388b201c42SJohn Baldwin { 14398b201c42SJohn Baldwin int comma; 14408b201c42SJohn Baldwin 14418b201c42SJohn Baldwin db_printf("\t%-10s ", ih->ih_name); 14428b201c42SJohn Baldwin switch (ih->ih_pri) { 14438b201c42SJohn Baldwin case PI_REALTIME: 14448b201c42SJohn Baldwin db_printf("CLK "); 14458b201c42SJohn Baldwin break; 14468b201c42SJohn Baldwin case PI_AV: 14478b201c42SJohn Baldwin db_printf("AV "); 14488b201c42SJohn Baldwin break; 1449d3305205SJohn Baldwin case PI_TTY: 14508b201c42SJohn Baldwin db_printf("TTY "); 14518b201c42SJohn Baldwin break; 14528b201c42SJohn Baldwin case PI_NET: 14538b201c42SJohn Baldwin db_printf("NET "); 14548b201c42SJohn Baldwin break; 14558b201c42SJohn Baldwin case PI_DISK: 14568b201c42SJohn Baldwin db_printf("DISK"); 14578b201c42SJohn Baldwin break; 14588b201c42SJohn Baldwin case PI_DULL: 14598b201c42SJohn Baldwin db_printf("DULL"); 14608b201c42SJohn Baldwin break; 14618b201c42SJohn Baldwin default: 14628b201c42SJohn Baldwin if (ih->ih_pri >= PI_SOFT) 14638b201c42SJohn Baldwin db_printf("SWI "); 14648b201c42SJohn Baldwin else 14658b201c42SJohn Baldwin db_printf("%4u", ih->ih_pri); 14668b201c42SJohn Baldwin break; 14678b201c42SJohn Baldwin } 14688b201c42SJohn Baldwin db_printf(" "); 1469b887a155SKonstantin Belousov if (ih->ih_filter != NULL) { 1470b887a155SKonstantin Belousov db_printf("[F]"); 1471b887a155SKonstantin Belousov db_printsym((uintptr_t)ih->ih_filter, DB_STGY_PROC); 1472b887a155SKonstantin Belousov } 1473b887a155SKonstantin Belousov if (ih->ih_handler != NULL) { 1474b887a155SKonstantin Belousov if (ih->ih_filter != NULL) 1475b887a155SKonstantin Belousov db_printf(","); 1476b887a155SKonstantin Belousov db_printf("[H]"); 14778b201c42SJohn Baldwin db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC); 1478b887a155SKonstantin Belousov } 14798b201c42SJohn Baldwin db_printf("(%p)", ih->ih_argument); 14808b201c42SJohn Baldwin if (ih->ih_need || 1481ef544f63SPaolo Pisati (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD | 14828b201c42SJohn Baldwin IH_MPSAFE)) != 0) { 14838b201c42SJohn Baldwin db_printf(" {"); 14848b201c42SJohn Baldwin comma = 0; 14858b201c42SJohn Baldwin if (ih->ih_flags & IH_EXCLUSIVE) { 14868b201c42SJohn Baldwin if (comma) 14878b201c42SJohn Baldwin db_printf(", "); 14888b201c42SJohn Baldwin db_printf("EXCL"); 14898b201c42SJohn Baldwin comma = 1; 14908b201c42SJohn Baldwin } 14918b201c42SJohn Baldwin if (ih->ih_flags & IH_ENTROPY) { 14928b201c42SJohn Baldwin if (comma) 14938b201c42SJohn Baldwin db_printf(", "); 14948b201c42SJohn Baldwin db_printf("ENTROPY"); 14958b201c42SJohn Baldwin comma = 1; 14968b201c42SJohn Baldwin } 14978b201c42SJohn Baldwin if (ih->ih_flags & IH_DEAD) { 14988b201c42SJohn Baldwin if (comma) 14998b201c42SJohn Baldwin db_printf(", "); 15008b201c42SJohn Baldwin db_printf("DEAD"); 15018b201c42SJohn Baldwin comma = 1; 15028b201c42SJohn Baldwin } 15038b201c42SJohn Baldwin if (ih->ih_flags & IH_MPSAFE) { 15048b201c42SJohn Baldwin if (comma) 15058b201c42SJohn Baldwin db_printf(", "); 15068b201c42SJohn Baldwin db_printf("MPSAFE"); 15078b201c42SJohn Baldwin comma = 1; 15088b201c42SJohn Baldwin } 15098b201c42SJohn Baldwin if (ih->ih_need) { 15108b201c42SJohn Baldwin if (comma) 15118b201c42SJohn Baldwin db_printf(", "); 15128b201c42SJohn Baldwin db_printf("NEED"); 15138b201c42SJohn Baldwin } 15148b201c42SJohn Baldwin db_printf("}"); 15158b201c42SJohn Baldwin } 15168b201c42SJohn Baldwin db_printf("\n"); 15178b201c42SJohn Baldwin } 15188b201c42SJohn Baldwin 15198b201c42SJohn Baldwin /* 1520e0f66ef8SJohn Baldwin * Dump details about a event. 15218b201c42SJohn Baldwin */ 15228b201c42SJohn Baldwin void 1523e0f66ef8SJohn Baldwin db_dump_intr_event(struct intr_event *ie, int handlers) 15248b201c42SJohn Baldwin { 1525e0f66ef8SJohn Baldwin struct intr_handler *ih; 1526e0f66ef8SJohn Baldwin struct intr_thread *it; 15278b201c42SJohn Baldwin int comma; 15288b201c42SJohn Baldwin 1529e0f66ef8SJohn Baldwin db_printf("%s ", ie->ie_fullname); 1530e0f66ef8SJohn Baldwin it = ie->ie_thread; 1531e0f66ef8SJohn Baldwin if (it != NULL) 1532e0f66ef8SJohn Baldwin db_printf("(pid %d)", it->it_thread->td_proc->p_pid); 1533e0f66ef8SJohn Baldwin else 1534e0f66ef8SJohn Baldwin db_printf("(no thread)"); 1535c4eb6630SGleb Smirnoff if ((ie->ie_flags & (IE_SOFT | IE_ADDING_THREAD)) != 0 || 1536e0f66ef8SJohn Baldwin (it != NULL && it->it_need)) { 15378b201c42SJohn Baldwin db_printf(" {"); 15388b201c42SJohn Baldwin comma = 0; 1539e0f66ef8SJohn Baldwin if (ie->ie_flags & IE_SOFT) { 15408b201c42SJohn Baldwin db_printf("SOFT"); 15418b201c42SJohn Baldwin comma = 1; 15428b201c42SJohn Baldwin } 1543e0f66ef8SJohn Baldwin if (ie->ie_flags & IE_ADDING_THREAD) { 15448b201c42SJohn Baldwin if (comma) 15458b201c42SJohn Baldwin db_printf(", "); 1546e0f66ef8SJohn Baldwin db_printf("ADDING_THREAD"); 15478b201c42SJohn Baldwin comma = 1; 15488b201c42SJohn Baldwin } 1549e0f66ef8SJohn Baldwin if (it != NULL && it->it_need) { 15508b201c42SJohn Baldwin if (comma) 15518b201c42SJohn Baldwin db_printf(", "); 15528b201c42SJohn Baldwin db_printf("NEED"); 15538b201c42SJohn Baldwin } 15548b201c42SJohn Baldwin db_printf("}"); 15558b201c42SJohn Baldwin } 15568b201c42SJohn Baldwin db_printf("\n"); 15578b201c42SJohn Baldwin 15588b201c42SJohn Baldwin if (handlers) 1559111b043cSAndriy Gapon CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) 15608b201c42SJohn Baldwin db_dump_intrhand(ih); 15618b201c42SJohn Baldwin } 1562e0f66ef8SJohn Baldwin 1563e0f66ef8SJohn Baldwin /* 1564e0f66ef8SJohn Baldwin * Dump data about interrupt handlers 1565e0f66ef8SJohn Baldwin */ 1566e0f66ef8SJohn Baldwin DB_SHOW_COMMAND(intr, db_show_intr) 1567e0f66ef8SJohn Baldwin { 1568e0f66ef8SJohn Baldwin struct intr_event *ie; 156919e9205aSJohn Baldwin int all, verbose; 1570e0f66ef8SJohn Baldwin 1571dc15eac0SEd Schouten verbose = strchr(modif, 'v') != NULL; 1572dc15eac0SEd Schouten all = strchr(modif, 'a') != NULL; 1573e0f66ef8SJohn Baldwin TAILQ_FOREACH(ie, &event_list, ie_list) { 1574111b043cSAndriy Gapon if (!all && CK_SLIST_EMPTY(&ie->ie_handlers)) 1575e0f66ef8SJohn Baldwin continue; 1576e0f66ef8SJohn Baldwin db_dump_intr_event(ie, verbose); 157719e9205aSJohn Baldwin if (db_pager_quit) 157819e9205aSJohn Baldwin break; 1579e0f66ef8SJohn Baldwin } 1580e0f66ef8SJohn Baldwin } 15818b201c42SJohn Baldwin #endif /* DDB */ 15828b201c42SJohn Baldwin 1583b4151f71SJohn Baldwin /* 15848088699fSJohn Baldwin * Start standard software interrupt threads 15851931cf94SJohn Baldwin */ 15861931cf94SJohn Baldwin static void 1587b4151f71SJohn Baldwin start_softintr(void *dummy) 15881931cf94SJohn Baldwin { 1589b4151f71SJohn Baldwin 1590aba10e13SAlexander Motin if (swi_add(&clk_intr_event, "clk", NULL, NULL, SWI_CLOCK, 1591aba10e13SAlexander Motin INTR_MPSAFE, NULL)) 1592aba10e13SAlexander Motin panic("died while creating clk swi ithread"); 15938d809d50SJeff Roberson if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih)) 15948d809d50SJeff Roberson panic("died while creating vm swi ithread"); 15951931cf94SJohn Baldwin } 1596237fdd78SRobert Watson SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, 1597237fdd78SRobert Watson NULL); 15981931cf94SJohn Baldwin 1599d279178dSThomas Moestl /* 1600d279178dSThomas Moestl * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 1601d279178dSThomas Moestl * The data for this machine dependent, and the declarations are in machine 1602d279178dSThomas Moestl * dependent code. The layout of intrnames and intrcnt however is machine 1603d279178dSThomas Moestl * independent. 1604d279178dSThomas Moestl * 1605d279178dSThomas Moestl * We do not know the length of intrcnt and intrnames at compile time, so 1606d279178dSThomas Moestl * calculate things at run time. 1607d279178dSThomas Moestl */ 1608d279178dSThomas Moestl static int 1609d279178dSThomas Moestl sysctl_intrnames(SYSCTL_HANDLER_ARGS) 1610d279178dSThomas Moestl { 1611521ea19dSAttilio Rao return (sysctl_handle_opaque(oidp, intrnames, sintrnames, req)); 1612d279178dSThomas Moestl } 1613d279178dSThomas Moestl 16147029da5cSPawel Biernacki SYSCTL_PROC(_hw, OID_AUTO, intrnames, 1615*67f508dbSAlexander Motin CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, 16167029da5cSPawel Biernacki sysctl_intrnames, "", 16177029da5cSPawel Biernacki "Interrupt Names"); 1618d279178dSThomas Moestl 1619d279178dSThomas Moestl static int 1620d279178dSThomas Moestl sysctl_intrcnt(SYSCTL_HANDLER_ARGS) 1621d279178dSThomas Moestl { 162285729c2cSJuli Mallett #ifdef SCTL_MASK32 162385729c2cSJuli Mallett uint32_t *intrcnt32; 162485729c2cSJuli Mallett unsigned i; 162585729c2cSJuli Mallett int error; 162685729c2cSJuli Mallett 162785729c2cSJuli Mallett if (req->flags & SCTL_MASK32) { 162885729c2cSJuli Mallett if (!req->oldptr) 162985729c2cSJuli Mallett return (sysctl_handle_opaque(oidp, NULL, sintrcnt / 2, req)); 163085729c2cSJuli Mallett intrcnt32 = malloc(sintrcnt / 2, M_TEMP, M_NOWAIT); 163185729c2cSJuli Mallett if (intrcnt32 == NULL) 163285729c2cSJuli Mallett return (ENOMEM); 163385729c2cSJuli Mallett for (i = 0; i < sintrcnt / sizeof (u_long); i++) 163485729c2cSJuli Mallett intrcnt32[i] = intrcnt[i]; 163585729c2cSJuli Mallett error = sysctl_handle_opaque(oidp, intrcnt32, sintrcnt / 2, req); 163685729c2cSJuli Mallett free(intrcnt32, M_TEMP); 163785729c2cSJuli Mallett return (error); 163885729c2cSJuli Mallett } 163985729c2cSJuli Mallett #endif 1640521ea19dSAttilio Rao return (sysctl_handle_opaque(oidp, intrcnt, sintrcnt, req)); 1641d279178dSThomas Moestl } 1642d279178dSThomas Moestl 16437029da5cSPawel Biernacki SYSCTL_PROC(_hw, OID_AUTO, intrcnt, 1644*67f508dbSAlexander Motin CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, 16457029da5cSPawel Biernacki sysctl_intrcnt, "", 16467029da5cSPawel Biernacki "Interrupt Counts"); 16478b201c42SJohn Baldwin 16488b201c42SJohn Baldwin #ifdef DDB 16498b201c42SJohn Baldwin /* 16508b201c42SJohn Baldwin * DDB command to dump the interrupt statistics. 16518b201c42SJohn Baldwin */ 16528b201c42SJohn Baldwin DB_SHOW_COMMAND(intrcnt, db_show_intrcnt) 16538b201c42SJohn Baldwin { 16548b201c42SJohn Baldwin u_long *i; 16558b201c42SJohn Baldwin char *cp; 1656521ea19dSAttilio Rao u_int j; 16578b201c42SJohn Baldwin 16588b201c42SJohn Baldwin cp = intrnames; 1659521ea19dSAttilio Rao j = 0; 1660521ea19dSAttilio Rao for (i = intrcnt; j < (sintrcnt / sizeof(u_long)) && !db_pager_quit; 1661521ea19dSAttilio Rao i++, j++) { 16628b201c42SJohn Baldwin if (*cp == '\0') 16638b201c42SJohn Baldwin break; 16648b201c42SJohn Baldwin if (*i != 0) 16658b201c42SJohn Baldwin db_printf("%s\t%lu\n", cp, *i); 16668b201c42SJohn Baldwin cp += strlen(cp) + 1; 16678b201c42SJohn Baldwin } 16688b201c42SJohn Baldwin } 16698b201c42SJohn Baldwin #endif 1670