19454b2d8SWarner Losh /*- 28a36da99SPedro F. Giffuni * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 38a36da99SPedro F. Giffuni * 4425f9fdaSStefan Eßer * Copyright (c) 1997, Stefan Esser <se@freebsd.org> 5425f9fdaSStefan Eßer * All rights reserved. 6425f9fdaSStefan Eßer * 7425f9fdaSStefan Eßer * Redistribution and use in source and binary forms, with or without 8425f9fdaSStefan Eßer * modification, are permitted provided that the following conditions 9425f9fdaSStefan Eßer * are met: 10425f9fdaSStefan Eßer * 1. Redistributions of source code must retain the above copyright 11425f9fdaSStefan Eßer * notice unmodified, this list of conditions, and the following 12425f9fdaSStefan Eßer * disclaimer. 13425f9fdaSStefan Eßer * 2. Redistributions in binary form must reproduce the above copyright 14425f9fdaSStefan Eßer * notice, this list of conditions and the following disclaimer in the 15425f9fdaSStefan Eßer * documentation and/or other materials provided with the distribution. 16425f9fdaSStefan Eßer * 17425f9fdaSStefan Eßer * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18425f9fdaSStefan Eßer * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19425f9fdaSStefan Eßer * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20425f9fdaSStefan Eßer * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21425f9fdaSStefan Eßer * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22425f9fdaSStefan Eßer * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23425f9fdaSStefan Eßer * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24425f9fdaSStefan Eßer * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25425f9fdaSStefan Eßer * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26425f9fdaSStefan Eßer * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27425f9fdaSStefan Eßer */ 28425f9fdaSStefan Eßer 29677b542eSDavid E. O'Brien #include <sys/cdefs.h> 30677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 313900ddb2SDoug Rabson 328b201c42SJohn Baldwin #include "opt_ddb.h" 33*6fa041d7SWojciech Macek #include "opt_hwpmc_hooks.h" 34b7627840SKonstantin Belousov #include "opt_kstack_usage_prof.h" 358b201c42SJohn Baldwin 361c5bb3eaSPeter Wemm #include <sys/param.h> 379a94c9c5SJohn Baldwin #include <sys/bus.h> 38c11110eaSAlfred Perlstein #include <sys/conf.h> 399b33b154SJeff Roberson #include <sys/cpuset.h> 409a94c9c5SJohn Baldwin #include <sys/rtprio.h> 41425f9fdaSStefan Eßer #include <sys/systm.h> 4268352337SDoug Rabson #include <sys/interrupt.h> 431931cf94SJohn Baldwin #include <sys/kernel.h> 441931cf94SJohn Baldwin #include <sys/kthread.h> 451931cf94SJohn Baldwin #include <sys/ktr.h> 4605b2c96fSBruce Evans #include <sys/limits.h> 47f34fa851SJohn Baldwin #include <sys/lock.h> 481931cf94SJohn Baldwin #include <sys/malloc.h> 4935e0e5b3SJohn Baldwin #include <sys/mutex.h> 50cebc7fb1SJohn Baldwin #include <sys/priv.h> 511931cf94SJohn Baldwin #include <sys/proc.h> 52511d1afbSGleb Smirnoff #include <sys/epoch.h> 533e5da754SJohn Baldwin #include <sys/random.h> 54b4151f71SJohn Baldwin #include <sys/resourcevar.h> 5563710c4dSJohn Baldwin #include <sys/sched.h> 56eaf86d16SJohn Baldwin #include <sys/smp.h> 57d279178dSThomas Moestl #include <sys/sysctl.h> 586205924aSKip Macy #include <sys/syslog.h> 591931cf94SJohn Baldwin #include <sys/unistd.h> 601931cf94SJohn Baldwin #include <sys/vmmeter.h> 611931cf94SJohn Baldwin #include <machine/atomic.h> 621931cf94SJohn Baldwin #include <machine/cpu.h> 638088699fSJohn Baldwin #include <machine/md_var.h> 64aba10e13SAlexander Motin #include <machine/smp.h> 65b4151f71SJohn Baldwin #include <machine/stdarg.h> 668b201c42SJohn Baldwin #ifdef DDB 678b201c42SJohn Baldwin #include <ddb/ddb.h> 688b201c42SJohn Baldwin #include <ddb/db_sym.h> 698b201c42SJohn Baldwin #endif 70425f9fdaSStefan Eßer 71e0f66ef8SJohn Baldwin /* 72e0f66ef8SJohn Baldwin * Describe an interrupt thread. There is one of these per interrupt event. 73e0f66ef8SJohn Baldwin */ 74e0f66ef8SJohn Baldwin struct intr_thread { 75e0f66ef8SJohn Baldwin struct intr_event *it_event; 76e0f66ef8SJohn Baldwin struct thread *it_thread; /* Kernel thread. */ 77e0f66ef8SJohn Baldwin int it_flags; /* (j) IT_* flags. */ 78e0f66ef8SJohn Baldwin int it_need; /* Needs service. */ 79*6fa041d7SWojciech Macek int it_waiting; /* Waiting in the runq. */ 803e5da754SJohn Baldwin }; 813e5da754SJohn Baldwin 82e0f66ef8SJohn Baldwin /* Interrupt thread flags kept in it_flags */ 83e0f66ef8SJohn Baldwin #define IT_DEAD 0x000001 /* Thread is waiting to exit. */ 84e4cd31ddSJeff Roberson #define IT_WAIT 0x000002 /* Thread is waiting for completion. */ 85e0f66ef8SJohn Baldwin 86e0f66ef8SJohn Baldwin struct intr_entropy { 87e0f66ef8SJohn Baldwin struct thread *td; 88e0f66ef8SJohn Baldwin uintptr_t event; 89e0f66ef8SJohn Baldwin }; 90e0f66ef8SJohn Baldwin 91aba10e13SAlexander Motin struct intr_event *clk_intr_event; 92e0f66ef8SJohn Baldwin struct intr_event *tty_intr_event; 937b1fe905SBruce Evans void *vm_ih; 947ab24ea3SJulian Elischer struct proc *intrproc; 951931cf94SJohn Baldwin 96b4151f71SJohn Baldwin static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); 97b4151f71SJohn Baldwin 985d0e8299SConrad Meyer static int intr_storm_threshold = 0; 99af3b2549SHans Petter Selasky SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RWTUN, 1007870c3c6SJohn Baldwin &intr_storm_threshold, 0, 1017b1fe905SBruce Evans "Number of consecutive interrupts before storm protection is enabled"); 102511d1afbSGleb Smirnoff static int intr_epoch_batch = 1000; 103511d1afbSGleb Smirnoff SYSCTL_INT(_hw, OID_AUTO, intr_epoch_batch, CTLFLAG_RWTUN, &intr_epoch_batch, 104511d1afbSGleb Smirnoff 0, "Maximum interrupt handler executions without re-entering epoch(9)"); 105*6fa041d7SWojciech Macek #ifdef HWPMC_HOOKS 106*6fa041d7SWojciech Macek static int intr_hwpmc_waiting_report_threshold = 1; 107*6fa041d7SWojciech Macek SYSCTL_INT(_hw, OID_AUTO, intr_hwpmc_waiting_report_threshold, CTLFLAG_RWTUN, 108*6fa041d7SWojciech Macek &intr_hwpmc_waiting_report_threshold, 1, 109*6fa041d7SWojciech Macek "Threshold for reporting number of events in a workq"); 110*6fa041d7SWojciech Macek #endif 111e0f66ef8SJohn Baldwin static TAILQ_HEAD(, intr_event) event_list = 112e0f66ef8SJohn Baldwin TAILQ_HEAD_INITIALIZER(event_list); 1139b33b154SJeff Roberson static struct mtx event_lock; 1149b33b154SJeff Roberson MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF); 1157b1fe905SBruce Evans 116e0f66ef8SJohn Baldwin static void intr_event_update(struct intr_event *ie); 117*6fa041d7SWojciech Macek static int intr_event_schedule_thread(struct intr_event *ie, struct trapframe *frame); 118e0f66ef8SJohn Baldwin static struct intr_thread *ithread_create(const char *name); 119e0f66ef8SJohn Baldwin static void ithread_destroy(struct intr_thread *ithread); 120bafe5a31SPaolo Pisati static void ithread_execute_handlers(struct proc *p, 121bafe5a31SPaolo Pisati struct intr_event *ie); 1227b1fe905SBruce Evans static void ithread_loop(void *); 123e0f66ef8SJohn Baldwin static void ithread_update(struct intr_thread *ithd); 1247b1fe905SBruce Evans static void start_softintr(void *); 1257870c3c6SJohn Baldwin 126*6fa041d7SWojciech Macek #ifdef HWPMC_HOOKS 127*6fa041d7SWojciech Macek #include <sys/pmckern.h> 128*6fa041d7SWojciech Macek PMC_SOFT_DEFINE( , , intr, all); 129*6fa041d7SWojciech Macek PMC_SOFT_DEFINE( , , intr, ithread); 130*6fa041d7SWojciech Macek PMC_SOFT_DEFINE( , , intr, filter); 131*6fa041d7SWojciech Macek PMC_SOFT_DEFINE( , , intr, stray); 132*6fa041d7SWojciech Macek PMC_SOFT_DEFINE( , , intr, schedule); 133*6fa041d7SWojciech Macek PMC_SOFT_DEFINE( , , intr, waiting); 134*6fa041d7SWojciech Macek #endif 135*6fa041d7SWojciech Macek 136bc17acb2SJohn Baldwin /* Map an interrupt type to an ithread priority. */ 137b4151f71SJohn Baldwin u_char 138e0f66ef8SJohn Baldwin intr_priority(enum intr_type flags) 1399a94c9c5SJohn Baldwin { 140b4151f71SJohn Baldwin u_char pri; 1419a94c9c5SJohn Baldwin 142b4151f71SJohn Baldwin flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET | 1435a280d9cSPeter Wemm INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV); 1449a94c9c5SJohn Baldwin switch (flags) { 145b4151f71SJohn Baldwin case INTR_TYPE_TTY: 146d3305205SJohn Baldwin pri = PI_TTY; 1479a94c9c5SJohn Baldwin break; 1489a94c9c5SJohn Baldwin case INTR_TYPE_BIO: 1499a94c9c5SJohn Baldwin pri = PI_DISK; 1509a94c9c5SJohn Baldwin break; 1519a94c9c5SJohn Baldwin case INTR_TYPE_NET: 1529a94c9c5SJohn Baldwin pri = PI_NET; 1539a94c9c5SJohn Baldwin break; 1549a94c9c5SJohn Baldwin case INTR_TYPE_CAM: 155d3305205SJohn Baldwin pri = PI_DISK; 1569a94c9c5SJohn Baldwin break; 157d3305205SJohn Baldwin case INTR_TYPE_AV: 1585a280d9cSPeter Wemm pri = PI_AV; 1595a280d9cSPeter Wemm break; 160b4151f71SJohn Baldwin case INTR_TYPE_CLK: 161b4151f71SJohn Baldwin pri = PI_REALTIME; 162b4151f71SJohn Baldwin break; 1639a94c9c5SJohn Baldwin case INTR_TYPE_MISC: 1649a94c9c5SJohn Baldwin pri = PI_DULL; /* don't care */ 1659a94c9c5SJohn Baldwin break; 1669a94c9c5SJohn Baldwin default: 167b4151f71SJohn Baldwin /* We didn't specify an interrupt level. */ 168e0f66ef8SJohn Baldwin panic("intr_priority: no interrupt type in flags"); 1699a94c9c5SJohn Baldwin } 1709a94c9c5SJohn Baldwin 1719a94c9c5SJohn Baldwin return pri; 1729a94c9c5SJohn Baldwin } 1739a94c9c5SJohn Baldwin 174b4151f71SJohn Baldwin /* 175e0f66ef8SJohn Baldwin * Update an ithread based on the associated intr_event. 176b4151f71SJohn Baldwin */ 177b4151f71SJohn Baldwin static void 178e0f66ef8SJohn Baldwin ithread_update(struct intr_thread *ithd) 179b4151f71SJohn Baldwin { 180e0f66ef8SJohn Baldwin struct intr_event *ie; 181b40ce416SJulian Elischer struct thread *td; 182e0f66ef8SJohn Baldwin u_char pri; 1838088699fSJohn Baldwin 184e0f66ef8SJohn Baldwin ie = ithd->it_event; 185e0f66ef8SJohn Baldwin td = ithd->it_thread; 186111b043cSAndriy Gapon mtx_assert(&ie->ie_lock, MA_OWNED); 187b4151f71SJohn Baldwin 188e0f66ef8SJohn Baldwin /* Determine the overall priority of this event. */ 189111b043cSAndriy Gapon if (CK_SLIST_EMPTY(&ie->ie_handlers)) 190e0f66ef8SJohn Baldwin pri = PRI_MAX_ITHD; 191e0f66ef8SJohn Baldwin else 192111b043cSAndriy Gapon pri = CK_SLIST_FIRST(&ie->ie_handlers)->ih_pri; 193e80fb434SRobert Drehmel 194e0f66ef8SJohn Baldwin /* Update name and priority. */ 1957ab24ea3SJulian Elischer strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name)); 19644ad5475SJohn Baldwin #ifdef KTR 19744ad5475SJohn Baldwin sched_clear_tdname(td); 19844ad5475SJohn Baldwin #endif 199982d11f8SJeff Roberson thread_lock(td); 200e0f66ef8SJohn Baldwin sched_prio(td, pri); 201982d11f8SJeff Roberson thread_unlock(td); 202b4151f71SJohn Baldwin } 203e0f66ef8SJohn Baldwin 204e0f66ef8SJohn Baldwin /* 205e0f66ef8SJohn Baldwin * Regenerate the full name of an interrupt event and update its priority. 206e0f66ef8SJohn Baldwin */ 207e0f66ef8SJohn Baldwin static void 208e0f66ef8SJohn Baldwin intr_event_update(struct intr_event *ie) 209e0f66ef8SJohn Baldwin { 210e0f66ef8SJohn Baldwin struct intr_handler *ih; 211e0f66ef8SJohn Baldwin char *last; 212f912e8f2SHans Petter Selasky int missed, space, flags; 213e0f66ef8SJohn Baldwin 214e0f66ef8SJohn Baldwin /* Start off with no entropy and just the name of the event. */ 215e0f66ef8SJohn Baldwin mtx_assert(&ie->ie_lock, MA_OWNED); 216e0f66ef8SJohn Baldwin strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 217f912e8f2SHans Petter Selasky flags = 0; 2180811d60aSJohn Baldwin missed = 0; 219e0f66ef8SJohn Baldwin space = 1; 220e0f66ef8SJohn Baldwin 221e0f66ef8SJohn Baldwin /* Run through all the handlers updating values. */ 222111b043cSAndriy Gapon CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) { 2236d51c0feSIan Lepore if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < 224e0f66ef8SJohn Baldwin sizeof(ie->ie_fullname)) { 225e0f66ef8SJohn Baldwin strcat(ie->ie_fullname, " "); 226e0f66ef8SJohn Baldwin strcat(ie->ie_fullname, ih->ih_name); 227e0f66ef8SJohn Baldwin space = 0; 2280811d60aSJohn Baldwin } else 2290811d60aSJohn Baldwin missed++; 230f912e8f2SHans Petter Selasky flags |= ih->ih_flags; 2310811d60aSJohn Baldwin } 232f912e8f2SHans Petter Selasky ie->ie_hflags = flags; 233e0f66ef8SJohn Baldwin 234e0f66ef8SJohn Baldwin /* 23567da50a0SIan Lepore * If there is only one handler and its name is too long, just copy in 23667da50a0SIan Lepore * as much of the end of the name (includes the unit number) as will 23767da50a0SIan Lepore * fit. Otherwise, we have multiple handlers and not all of the names 23867da50a0SIan Lepore * will fit. Add +'s to indicate missing names. If we run out of room 23967da50a0SIan Lepore * and still have +'s to add, change the last character from a + to a *. 240e0f66ef8SJohn Baldwin */ 24167da50a0SIan Lepore if (missed == 1 && space == 1) { 24267da50a0SIan Lepore ih = CK_SLIST_FIRST(&ie->ie_handlers); 24367da50a0SIan Lepore missed = strlen(ie->ie_fullname) + strlen(ih->ih_name) + 2 - 24467da50a0SIan Lepore sizeof(ie->ie_fullname); 24567da50a0SIan Lepore strcat(ie->ie_fullname, (missed == 0) ? " " : "-"); 24667da50a0SIan Lepore strcat(ie->ie_fullname, &ih->ih_name[missed]); 24767da50a0SIan Lepore missed = 0; 24867da50a0SIan Lepore } 249e0f66ef8SJohn Baldwin last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2]; 2500811d60aSJohn Baldwin while (missed-- > 0) { 251e0f66ef8SJohn Baldwin if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) { 252e0f66ef8SJohn Baldwin if (*last == '+') { 253e0f66ef8SJohn Baldwin *last = '*'; 254e0f66ef8SJohn Baldwin break; 255b4151f71SJohn Baldwin } else 256e0f66ef8SJohn Baldwin *last = '+'; 257e0f66ef8SJohn Baldwin } else if (space) { 258e0f66ef8SJohn Baldwin strcat(ie->ie_fullname, " +"); 259e0f66ef8SJohn Baldwin space = 0; 260e0f66ef8SJohn Baldwin } else 261e0f66ef8SJohn Baldwin strcat(ie->ie_fullname, "+"); 262b4151f71SJohn Baldwin } 263e0f66ef8SJohn Baldwin 264e0f66ef8SJohn Baldwin /* 265e0f66ef8SJohn Baldwin * If this event has an ithread, update it's priority and 266e0f66ef8SJohn Baldwin * name. 267e0f66ef8SJohn Baldwin */ 268e0f66ef8SJohn Baldwin if (ie->ie_thread != NULL) 269e0f66ef8SJohn Baldwin ithread_update(ie->ie_thread); 270e0f66ef8SJohn Baldwin CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname); 271b4151f71SJohn Baldwin } 272b4151f71SJohn Baldwin 273b4151f71SJohn Baldwin int 2749b33b154SJeff Roberson intr_event_create(struct intr_event **event, void *source, int flags, int irq, 2751ee1b687SJohn Baldwin void (*pre_ithread)(void *), void (*post_ithread)(void *), 276066da805SAdrian Chadd void (*post_filter)(void *), int (*assign_cpu)(void *, int), 2771ee1b687SJohn Baldwin const char *fmt, ...) 278bafe5a31SPaolo Pisati { 279bafe5a31SPaolo Pisati struct intr_event *ie; 280bafe5a31SPaolo Pisati va_list ap; 281bafe5a31SPaolo Pisati 282bafe5a31SPaolo Pisati /* The only valid flag during creation is IE_SOFT. */ 283bafe5a31SPaolo Pisati if ((flags & ~IE_SOFT) != 0) 284bafe5a31SPaolo Pisati return (EINVAL); 285bafe5a31SPaolo Pisati ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 286bafe5a31SPaolo Pisati ie->ie_source = source; 2871ee1b687SJohn Baldwin ie->ie_pre_ithread = pre_ithread; 2881ee1b687SJohn Baldwin ie->ie_post_ithread = post_ithread; 2891ee1b687SJohn Baldwin ie->ie_post_filter = post_filter; 2906d2d1c04SJohn Baldwin ie->ie_assign_cpu = assign_cpu; 291bafe5a31SPaolo Pisati ie->ie_flags = flags; 2929b33b154SJeff Roberson ie->ie_irq = irq; 293eaf86d16SJohn Baldwin ie->ie_cpu = NOCPU; 294111b043cSAndriy Gapon CK_SLIST_INIT(&ie->ie_handlers); 295bafe5a31SPaolo Pisati mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 296bafe5a31SPaolo Pisati 297bafe5a31SPaolo Pisati va_start(ap, fmt); 298bafe5a31SPaolo Pisati vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 299bafe5a31SPaolo Pisati va_end(ap); 300bafe5a31SPaolo Pisati strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 3019b33b154SJeff Roberson mtx_lock(&event_lock); 302bafe5a31SPaolo Pisati TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 3039b33b154SJeff Roberson mtx_unlock(&event_lock); 304bafe5a31SPaolo Pisati if (event != NULL) 305bafe5a31SPaolo Pisati *event = ie; 306bafe5a31SPaolo Pisati CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 307bafe5a31SPaolo Pisati return (0); 308bafe5a31SPaolo Pisati } 309b4151f71SJohn Baldwin 310eaf86d16SJohn Baldwin /* 311eaf86d16SJohn Baldwin * Bind an interrupt event to the specified CPU. Note that not all 312eaf86d16SJohn Baldwin * platforms support binding an interrupt to a CPU. For those 31329dfb631SConrad Meyer * platforms this request will fail. Using a cpu id of NOCPU unbinds 314eaf86d16SJohn Baldwin * the interrupt event. 315eaf86d16SJohn Baldwin */ 31629dfb631SConrad Meyer static int 31729dfb631SConrad Meyer _intr_event_bind(struct intr_event *ie, int cpu, bool bindirq, bool bindithread) 318eaf86d16SJohn Baldwin { 3199b33b154SJeff Roberson lwpid_t id; 320eaf86d16SJohn Baldwin int error; 321eaf86d16SJohn Baldwin 322eaf86d16SJohn Baldwin /* Need a CPU to bind to. */ 323eaf86d16SJohn Baldwin if (cpu != NOCPU && CPU_ABSENT(cpu)) 324eaf86d16SJohn Baldwin return (EINVAL); 325eaf86d16SJohn Baldwin 326eaf86d16SJohn Baldwin if (ie->ie_assign_cpu == NULL) 327eaf86d16SJohn Baldwin return (EOPNOTSUPP); 328cebc7fb1SJohn Baldwin 329cebc7fb1SJohn Baldwin error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR); 330cebc7fb1SJohn Baldwin if (error) 331cebc7fb1SJohn Baldwin return (error); 332cebc7fb1SJohn Baldwin 3339b33b154SJeff Roberson /* 334cebc7fb1SJohn Baldwin * If we have any ithreads try to set their mask first to verify 335cebc7fb1SJohn Baldwin * permissions, etc. 3369b33b154SJeff Roberson */ 33729dfb631SConrad Meyer if (bindithread) { 338eaf86d16SJohn Baldwin mtx_lock(&ie->ie_lock); 3399b33b154SJeff Roberson if (ie->ie_thread != NULL) { 3409b33b154SJeff Roberson id = ie->ie_thread->it_thread->td_tid; 341eaf86d16SJohn Baldwin mtx_unlock(&ie->ie_lock); 34281198539SAlexander V. Chernikov error = cpuset_setithread(id, cpu); 3439b33b154SJeff Roberson if (error) 3449b33b154SJeff Roberson return (error); 3459b33b154SJeff Roberson } else 346eaf86d16SJohn Baldwin mtx_unlock(&ie->ie_lock); 34729dfb631SConrad Meyer } 34829dfb631SConrad Meyer if (bindirq) 349eaf86d16SJohn Baldwin error = ie->ie_assign_cpu(ie->ie_source, cpu); 350cebc7fb1SJohn Baldwin if (error) { 35129dfb631SConrad Meyer if (bindithread) { 352cebc7fb1SJohn Baldwin mtx_lock(&ie->ie_lock); 353cebc7fb1SJohn Baldwin if (ie->ie_thread != NULL) { 35481198539SAlexander V. Chernikov cpu = ie->ie_cpu; 355cebc7fb1SJohn Baldwin id = ie->ie_thread->it_thread->td_tid; 356cebc7fb1SJohn Baldwin mtx_unlock(&ie->ie_lock); 35781198539SAlexander V. Chernikov (void)cpuset_setithread(id, cpu); 358cebc7fb1SJohn Baldwin } else 359cebc7fb1SJohn Baldwin mtx_unlock(&ie->ie_lock); 36029dfb631SConrad Meyer } 361eaf86d16SJohn Baldwin return (error); 362cebc7fb1SJohn Baldwin } 363cebc7fb1SJohn Baldwin 36429dfb631SConrad Meyer if (bindirq) { 365eaf86d16SJohn Baldwin mtx_lock(&ie->ie_lock); 366eaf86d16SJohn Baldwin ie->ie_cpu = cpu; 3679b33b154SJeff Roberson mtx_unlock(&ie->ie_lock); 36829dfb631SConrad Meyer } 3699b33b154SJeff Roberson 3709b33b154SJeff Roberson return (error); 3719b33b154SJeff Roberson } 3729b33b154SJeff Roberson 37329dfb631SConrad Meyer /* 37429dfb631SConrad Meyer * Bind an interrupt event to the specified CPU. For supported platforms, any 37529dfb631SConrad Meyer * associated ithreads as well as the primary interrupt context will be bound 37629dfb631SConrad Meyer * to the specificed CPU. 37729dfb631SConrad Meyer */ 37829dfb631SConrad Meyer int 37929dfb631SConrad Meyer intr_event_bind(struct intr_event *ie, int cpu) 38029dfb631SConrad Meyer { 38129dfb631SConrad Meyer 38229dfb631SConrad Meyer return (_intr_event_bind(ie, cpu, true, true)); 38329dfb631SConrad Meyer } 38429dfb631SConrad Meyer 38529dfb631SConrad Meyer /* 38629dfb631SConrad Meyer * Bind an interrupt event to the specified CPU, but do not bind associated 38729dfb631SConrad Meyer * ithreads. 38829dfb631SConrad Meyer */ 38929dfb631SConrad Meyer int 39029dfb631SConrad Meyer intr_event_bind_irqonly(struct intr_event *ie, int cpu) 39129dfb631SConrad Meyer { 39229dfb631SConrad Meyer 39329dfb631SConrad Meyer return (_intr_event_bind(ie, cpu, true, false)); 39429dfb631SConrad Meyer } 39529dfb631SConrad Meyer 39629dfb631SConrad Meyer /* 39729dfb631SConrad Meyer * Bind an interrupt event's ithread to the specified CPU. 39829dfb631SConrad Meyer */ 39929dfb631SConrad Meyer int 40029dfb631SConrad Meyer intr_event_bind_ithread(struct intr_event *ie, int cpu) 40129dfb631SConrad Meyer { 40229dfb631SConrad Meyer 40329dfb631SConrad Meyer return (_intr_event_bind(ie, cpu, false, true)); 40429dfb631SConrad Meyer } 40529dfb631SConrad Meyer 4064e255d74SAndrew Gallatin /* 4074e255d74SAndrew Gallatin * Bind an interrupt event's ithread to the specified cpuset. 4084e255d74SAndrew Gallatin */ 4094e255d74SAndrew Gallatin int 4104e255d74SAndrew Gallatin intr_event_bind_ithread_cpuset(struct intr_event *ie, cpuset_t *cs) 4114e255d74SAndrew Gallatin { 4124e255d74SAndrew Gallatin lwpid_t id; 4134e255d74SAndrew Gallatin 4144e255d74SAndrew Gallatin mtx_lock(&ie->ie_lock); 4154e255d74SAndrew Gallatin if (ie->ie_thread != NULL) { 4164e255d74SAndrew Gallatin id = ie->ie_thread->it_thread->td_tid; 4174e255d74SAndrew Gallatin mtx_unlock(&ie->ie_lock); 4184e255d74SAndrew Gallatin return (cpuset_setthread(id, cs)); 4194e255d74SAndrew Gallatin } else { 4204e255d74SAndrew Gallatin mtx_unlock(&ie->ie_lock); 4214e255d74SAndrew Gallatin } 4224e255d74SAndrew Gallatin return (ENODEV); 4234e255d74SAndrew Gallatin } 4244e255d74SAndrew Gallatin 4259b33b154SJeff Roberson static struct intr_event * 4269b33b154SJeff Roberson intr_lookup(int irq) 4279b33b154SJeff Roberson { 4289b33b154SJeff Roberson struct intr_event *ie; 4299b33b154SJeff Roberson 4309b33b154SJeff Roberson mtx_lock(&event_lock); 4319b33b154SJeff Roberson TAILQ_FOREACH(ie, &event_list, ie_list) 4329b33b154SJeff Roberson if (ie->ie_irq == irq && 4339b33b154SJeff Roberson (ie->ie_flags & IE_SOFT) == 0 && 434111b043cSAndriy Gapon CK_SLIST_FIRST(&ie->ie_handlers) != NULL) 4359b33b154SJeff Roberson break; 4369b33b154SJeff Roberson mtx_unlock(&event_lock); 4379b33b154SJeff Roberson return (ie); 4389b33b154SJeff Roberson } 4399b33b154SJeff Roberson 4409b33b154SJeff Roberson int 44129dfb631SConrad Meyer intr_setaffinity(int irq, int mode, void *m) 4429b33b154SJeff Roberson { 4439b33b154SJeff Roberson struct intr_event *ie; 4449b33b154SJeff Roberson cpuset_t *mask; 4453fe93b94SAdrian Chadd int cpu, n; 4469b33b154SJeff Roberson 4479b33b154SJeff Roberson mask = m; 4489b33b154SJeff Roberson cpu = NOCPU; 4499b33b154SJeff Roberson /* 4509b33b154SJeff Roberson * If we're setting all cpus we can unbind. Otherwise make sure 4519b33b154SJeff Roberson * only one cpu is in the set. 4529b33b154SJeff Roberson */ 4539b33b154SJeff Roberson if (CPU_CMP(cpuset_root, mask)) { 4549b33b154SJeff Roberson for (n = 0; n < CPU_SETSIZE; n++) { 4559b33b154SJeff Roberson if (!CPU_ISSET(n, mask)) 4569b33b154SJeff Roberson continue; 4579b33b154SJeff Roberson if (cpu != NOCPU) 4589b33b154SJeff Roberson return (EINVAL); 4593fe93b94SAdrian Chadd cpu = n; 4609b33b154SJeff Roberson } 4619b33b154SJeff Roberson } 4629b33b154SJeff Roberson ie = intr_lookup(irq); 4639b33b154SJeff Roberson if (ie == NULL) 4649b33b154SJeff Roberson return (ESRCH); 46529dfb631SConrad Meyer switch (mode) { 46629dfb631SConrad Meyer case CPU_WHICH_IRQ: 4679bd55acfSJohn Baldwin return (intr_event_bind(ie, cpu)); 46829dfb631SConrad Meyer case CPU_WHICH_INTRHANDLER: 46929dfb631SConrad Meyer return (intr_event_bind_irqonly(ie, cpu)); 47029dfb631SConrad Meyer case CPU_WHICH_ITHREAD: 47129dfb631SConrad Meyer return (intr_event_bind_ithread(ie, cpu)); 47229dfb631SConrad Meyer default: 47329dfb631SConrad Meyer return (EINVAL); 47429dfb631SConrad Meyer } 4759b33b154SJeff Roberson } 4769b33b154SJeff Roberson 4779b33b154SJeff Roberson int 47829dfb631SConrad Meyer intr_getaffinity(int irq, int mode, void *m) 4799b33b154SJeff Roberson { 4809b33b154SJeff Roberson struct intr_event *ie; 48129dfb631SConrad Meyer struct thread *td; 48229dfb631SConrad Meyer struct proc *p; 4839b33b154SJeff Roberson cpuset_t *mask; 48429dfb631SConrad Meyer lwpid_t id; 48529dfb631SConrad Meyer int error; 4869b33b154SJeff Roberson 4879b33b154SJeff Roberson mask = m; 4889b33b154SJeff Roberson ie = intr_lookup(irq); 4899b33b154SJeff Roberson if (ie == NULL) 4909b33b154SJeff Roberson return (ESRCH); 49129dfb631SConrad Meyer 49229dfb631SConrad Meyer error = 0; 4939b33b154SJeff Roberson CPU_ZERO(mask); 49429dfb631SConrad Meyer switch (mode) { 49529dfb631SConrad Meyer case CPU_WHICH_IRQ: 49629dfb631SConrad Meyer case CPU_WHICH_INTRHANDLER: 4979b33b154SJeff Roberson mtx_lock(&ie->ie_lock); 4989b33b154SJeff Roberson if (ie->ie_cpu == NOCPU) 4999b33b154SJeff Roberson CPU_COPY(cpuset_root, mask); 5009b33b154SJeff Roberson else 5019b33b154SJeff Roberson CPU_SET(ie->ie_cpu, mask); 502eaf86d16SJohn Baldwin mtx_unlock(&ie->ie_lock); 50329dfb631SConrad Meyer break; 50429dfb631SConrad Meyer case CPU_WHICH_ITHREAD: 50529dfb631SConrad Meyer mtx_lock(&ie->ie_lock); 50629dfb631SConrad Meyer if (ie->ie_thread == NULL) { 50729dfb631SConrad Meyer mtx_unlock(&ie->ie_lock); 50829dfb631SConrad Meyer CPU_COPY(cpuset_root, mask); 50929dfb631SConrad Meyer } else { 51029dfb631SConrad Meyer id = ie->ie_thread->it_thread->td_tid; 51129dfb631SConrad Meyer mtx_unlock(&ie->ie_lock); 51229dfb631SConrad Meyer error = cpuset_which(CPU_WHICH_TID, id, &p, &td, NULL); 51329dfb631SConrad Meyer if (error != 0) 51429dfb631SConrad Meyer return (error); 51529dfb631SConrad Meyer CPU_COPY(&td->td_cpuset->cs_mask, mask); 51629dfb631SConrad Meyer PROC_UNLOCK(p); 51729dfb631SConrad Meyer } 51829dfb631SConrad Meyer default: 51929dfb631SConrad Meyer return (EINVAL); 52029dfb631SConrad Meyer } 521eaf86d16SJohn Baldwin return (0); 522eaf86d16SJohn Baldwin } 523eaf86d16SJohn Baldwin 524b4151f71SJohn Baldwin int 525e0f66ef8SJohn Baldwin intr_event_destroy(struct intr_event *ie) 526b4151f71SJohn Baldwin { 527b4151f71SJohn Baldwin 5289b33b154SJeff Roberson mtx_lock(&event_lock); 529e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 530111b043cSAndriy Gapon if (!CK_SLIST_EMPTY(&ie->ie_handlers)) { 531e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 5329b33b154SJeff Roberson mtx_unlock(&event_lock); 533e0f66ef8SJohn Baldwin return (EBUSY); 5344d29cb2dSJohn Baldwin } 535e0f66ef8SJohn Baldwin TAILQ_REMOVE(&event_list, ie, ie_list); 5369477358dSJohn Baldwin #ifndef notyet 5379477358dSJohn Baldwin if (ie->ie_thread != NULL) { 5389477358dSJohn Baldwin ithread_destroy(ie->ie_thread); 5399477358dSJohn Baldwin ie->ie_thread = NULL; 5409477358dSJohn Baldwin } 5419477358dSJohn Baldwin #endif 542e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 5439b33b154SJeff Roberson mtx_unlock(&event_lock); 544e0f66ef8SJohn Baldwin mtx_destroy(&ie->ie_lock); 545e0f66ef8SJohn Baldwin free(ie, M_ITHREAD); 546e0f66ef8SJohn Baldwin return (0); 547e0f66ef8SJohn Baldwin } 548e0f66ef8SJohn Baldwin 549e0f66ef8SJohn Baldwin static struct intr_thread * 550e0f66ef8SJohn Baldwin ithread_create(const char *name) 551e0f66ef8SJohn Baldwin { 552e0f66ef8SJohn Baldwin struct intr_thread *ithd; 553e0f66ef8SJohn Baldwin struct thread *td; 554e0f66ef8SJohn Baldwin int error; 555e0f66ef8SJohn Baldwin 556e0f66ef8SJohn Baldwin ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 557e0f66ef8SJohn Baldwin 5587ab24ea3SJulian Elischer error = kproc_kthread_add(ithread_loop, ithd, &intrproc, 5597ab24ea3SJulian Elischer &td, RFSTOPPED | RFHIGHPID, 5609ef95d01SJulian Elischer 0, "intr", "%s", name); 561e0f66ef8SJohn Baldwin if (error) 5623745c395SJulian Elischer panic("kproc_create() failed with %d", error); 563982d11f8SJeff Roberson thread_lock(td); 564ad1e7d28SJulian Elischer sched_class(td, PRI_ITHD); 565e0f66ef8SJohn Baldwin TD_SET_IWAIT(td); 566982d11f8SJeff Roberson thread_unlock(td); 567e0f66ef8SJohn Baldwin td->td_pflags |= TDP_ITHREAD; 568e0f66ef8SJohn Baldwin ithd->it_thread = td; 569e0f66ef8SJohn Baldwin CTR2(KTR_INTR, "%s: created %s", __func__, name); 570e0f66ef8SJohn Baldwin return (ithd); 571e0f66ef8SJohn Baldwin } 572e0f66ef8SJohn Baldwin 573e0f66ef8SJohn Baldwin static void 574e0f66ef8SJohn Baldwin ithread_destroy(struct intr_thread *ithread) 575e0f66ef8SJohn Baldwin { 576e0f66ef8SJohn Baldwin struct thread *td; 577e0f66ef8SJohn Baldwin 578bb141be1SScott Long CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name); 579e0f66ef8SJohn Baldwin td = ithread->it_thread; 580982d11f8SJeff Roberson thread_lock(td); 581e0f66ef8SJohn Baldwin ithread->it_flags |= IT_DEAD; 58271fad9fdSJulian Elischer if (TD_AWAITING_INTR(td)) { 58371fad9fdSJulian Elischer TD_CLR_IWAIT(td); 584f0393f06SJeff Roberson sched_add(td, SRQ_INTR); 58561a74c5cSJeff Roberson } else 586982d11f8SJeff Roberson thread_unlock(td); 587b4151f71SJohn Baldwin } 588b4151f71SJohn Baldwin 589b4151f71SJohn Baldwin int 590e0f66ef8SJohn Baldwin intr_event_add_handler(struct intr_event *ie, const char *name, 591ef544f63SPaolo Pisati driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 592ef544f63SPaolo Pisati enum intr_type flags, void **cookiep) 593b4151f71SJohn Baldwin { 594e0f66ef8SJohn Baldwin struct intr_handler *ih, *temp_ih; 595111b043cSAndriy Gapon struct intr_handler **prevptr; 596e0f66ef8SJohn Baldwin struct intr_thread *it; 597b4151f71SJohn Baldwin 598ef544f63SPaolo Pisati if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 599b4151f71SJohn Baldwin return (EINVAL); 600b4151f71SJohn Baldwin 601e0f66ef8SJohn Baldwin /* Allocate and populate an interrupt handler structure. */ 602e0f66ef8SJohn Baldwin ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 603ef544f63SPaolo Pisati ih->ih_filter = filter; 604b4151f71SJohn Baldwin ih->ih_handler = handler; 605b4151f71SJohn Baldwin ih->ih_argument = arg; 60637b8ef16SJohn Baldwin strlcpy(ih->ih_name, name, sizeof(ih->ih_name)); 607e0f66ef8SJohn Baldwin ih->ih_event = ie; 608b4151f71SJohn Baldwin ih->ih_pri = pri; 609ef544f63SPaolo Pisati if (flags & INTR_EXCL) 610b4151f71SJohn Baldwin ih->ih_flags = IH_EXCLUSIVE; 611b4151f71SJohn Baldwin if (flags & INTR_MPSAFE) 612b4151f71SJohn Baldwin ih->ih_flags |= IH_MPSAFE; 613b4151f71SJohn Baldwin if (flags & INTR_ENTROPY) 614b4151f71SJohn Baldwin ih->ih_flags |= IH_ENTROPY; 615511d1afbSGleb Smirnoff if (flags & INTR_TYPE_NET) 616511d1afbSGleb Smirnoff ih->ih_flags |= IH_NET; 617b4151f71SJohn Baldwin 618e0f66ef8SJohn Baldwin /* We can only have one exclusive handler in a event. */ 619e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 620111b043cSAndriy Gapon if (!CK_SLIST_EMPTY(&ie->ie_handlers)) { 621e0f66ef8SJohn Baldwin if ((flags & INTR_EXCL) || 622111b043cSAndriy Gapon (CK_SLIST_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 623e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 624b4151f71SJohn Baldwin free(ih, M_ITHREAD); 625b4151f71SJohn Baldwin return (EINVAL); 626b4151f71SJohn Baldwin } 627e0f66ef8SJohn Baldwin } 628e0f66ef8SJohn Baldwin 629e0f66ef8SJohn Baldwin /* Create a thread if we need one. */ 630ef544f63SPaolo Pisati while (ie->ie_thread == NULL && handler != NULL) { 631e0f66ef8SJohn Baldwin if (ie->ie_flags & IE_ADDING_THREAD) 6320f180a7cSJohn Baldwin msleep(ie, &ie->ie_lock, 0, "ithread", 0); 633e0f66ef8SJohn Baldwin else { 634e0f66ef8SJohn Baldwin ie->ie_flags |= IE_ADDING_THREAD; 635e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 636e0f66ef8SJohn Baldwin it = ithread_create("intr: newborn"); 637e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 638e0f66ef8SJohn Baldwin ie->ie_flags &= ~IE_ADDING_THREAD; 639e0f66ef8SJohn Baldwin ie->ie_thread = it; 640e0f66ef8SJohn Baldwin it->it_event = ie; 641e0f66ef8SJohn Baldwin ithread_update(it); 642e0f66ef8SJohn Baldwin wakeup(ie); 643e0f66ef8SJohn Baldwin } 644e0f66ef8SJohn Baldwin } 645c9516c94SAlexander Kabaev 646c9516c94SAlexander Kabaev /* Add the new handler to the event in priority order. */ 647111b043cSAndriy Gapon CK_SLIST_FOREACH_PREVPTR(temp_ih, prevptr, &ie->ie_handlers, ih_next) { 648c9516c94SAlexander Kabaev if (temp_ih->ih_pri > ih->ih_pri) 649c9516c94SAlexander Kabaev break; 650c9516c94SAlexander Kabaev } 651111b043cSAndriy Gapon CK_SLIST_INSERT_PREVPTR(prevptr, temp_ih, ih, ih_next); 652111b043cSAndriy Gapon 653c9516c94SAlexander Kabaev intr_event_update(ie); 654c9516c94SAlexander Kabaev 655e0f66ef8SJohn Baldwin CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 656e0f66ef8SJohn Baldwin ie->ie_name); 657e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 658e0f66ef8SJohn Baldwin 659e0f66ef8SJohn Baldwin if (cookiep != NULL) 660e0f66ef8SJohn Baldwin *cookiep = ih; 661e0f66ef8SJohn Baldwin return (0); 662e0f66ef8SJohn Baldwin } 663b4151f71SJohn Baldwin 664c3045318SJohn Baldwin /* 66537b8ef16SJohn Baldwin * Append a description preceded by a ':' to the name of the specified 66637b8ef16SJohn Baldwin * interrupt handler. 66737b8ef16SJohn Baldwin */ 66837b8ef16SJohn Baldwin int 66937b8ef16SJohn Baldwin intr_event_describe_handler(struct intr_event *ie, void *cookie, 67037b8ef16SJohn Baldwin const char *descr) 67137b8ef16SJohn Baldwin { 67237b8ef16SJohn Baldwin struct intr_handler *ih; 67337b8ef16SJohn Baldwin size_t space; 67437b8ef16SJohn Baldwin char *start; 67537b8ef16SJohn Baldwin 67637b8ef16SJohn Baldwin mtx_lock(&ie->ie_lock); 67737b8ef16SJohn Baldwin #ifdef INVARIANTS 678111b043cSAndriy Gapon CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) { 67937b8ef16SJohn Baldwin if (ih == cookie) 68037b8ef16SJohn Baldwin break; 68137b8ef16SJohn Baldwin } 68237b8ef16SJohn Baldwin if (ih == NULL) { 68337b8ef16SJohn Baldwin mtx_unlock(&ie->ie_lock); 684d0c9a291SJohn Baldwin panic("handler %p not found in interrupt event %p", cookie, ie); 68537b8ef16SJohn Baldwin } 68637b8ef16SJohn Baldwin #endif 68737b8ef16SJohn Baldwin ih = cookie; 68837b8ef16SJohn Baldwin 68937b8ef16SJohn Baldwin /* 69037b8ef16SJohn Baldwin * Look for an existing description by checking for an 69137b8ef16SJohn Baldwin * existing ":". This assumes device names do not include 69237b8ef16SJohn Baldwin * colons. If one is found, prepare to insert the new 69337b8ef16SJohn Baldwin * description at that point. If one is not found, find the 69437b8ef16SJohn Baldwin * end of the name to use as the insertion point. 69537b8ef16SJohn Baldwin */ 696dc15eac0SEd Schouten start = strchr(ih->ih_name, ':'); 69737b8ef16SJohn Baldwin if (start == NULL) 698dc15eac0SEd Schouten start = strchr(ih->ih_name, 0); 69937b8ef16SJohn Baldwin 70037b8ef16SJohn Baldwin /* 70137b8ef16SJohn Baldwin * See if there is enough remaining room in the string for the 70237b8ef16SJohn Baldwin * description + ":". The "- 1" leaves room for the trailing 70337b8ef16SJohn Baldwin * '\0'. The "+ 1" accounts for the colon. 70437b8ef16SJohn Baldwin */ 70537b8ef16SJohn Baldwin space = sizeof(ih->ih_name) - (start - ih->ih_name) - 1; 70637b8ef16SJohn Baldwin if (strlen(descr) + 1 > space) { 70737b8ef16SJohn Baldwin mtx_unlock(&ie->ie_lock); 70837b8ef16SJohn Baldwin return (ENOSPC); 70937b8ef16SJohn Baldwin } 71037b8ef16SJohn Baldwin 71137b8ef16SJohn Baldwin /* Append a colon followed by the description. */ 71237b8ef16SJohn Baldwin *start = ':'; 71337b8ef16SJohn Baldwin strcpy(start + 1, descr); 71437b8ef16SJohn Baldwin intr_event_update(ie); 71537b8ef16SJohn Baldwin mtx_unlock(&ie->ie_lock); 71637b8ef16SJohn Baldwin return (0); 71737b8ef16SJohn Baldwin } 71837b8ef16SJohn Baldwin 71937b8ef16SJohn Baldwin /* 720c3045318SJohn Baldwin * Return the ie_source field from the intr_event an intr_handler is 721c3045318SJohn Baldwin * associated with. 722c3045318SJohn Baldwin */ 723c3045318SJohn Baldwin void * 724c3045318SJohn Baldwin intr_handler_source(void *cookie) 725c3045318SJohn Baldwin { 726c3045318SJohn Baldwin struct intr_handler *ih; 727c3045318SJohn Baldwin struct intr_event *ie; 728c3045318SJohn Baldwin 729c3045318SJohn Baldwin ih = (struct intr_handler *)cookie; 730c3045318SJohn Baldwin if (ih == NULL) 731c3045318SJohn Baldwin return (NULL); 732c3045318SJohn Baldwin ie = ih->ih_event; 733c3045318SJohn Baldwin KASSERT(ie != NULL, 734c3045318SJohn Baldwin ("interrupt handler \"%s\" has a NULL interrupt event", 735c3045318SJohn Baldwin ih->ih_name)); 736c3045318SJohn Baldwin return (ie->ie_source); 737c3045318SJohn Baldwin } 738c3045318SJohn Baldwin 739e4cd31ddSJeff Roberson /* 740e0fa977eSAndriy Gapon * If intr_event_handle() is running in the ISR context at the time of the call, 741e0fa977eSAndriy Gapon * then wait for it to complete. 742e0fa977eSAndriy Gapon */ 743e0fa977eSAndriy Gapon static void 744e0fa977eSAndriy Gapon intr_event_barrier(struct intr_event *ie) 745e0fa977eSAndriy Gapon { 746e0fa977eSAndriy Gapon int phase; 747e0fa977eSAndriy Gapon 748e0fa977eSAndriy Gapon mtx_assert(&ie->ie_lock, MA_OWNED); 749e0fa977eSAndriy Gapon phase = ie->ie_phase; 750e0fa977eSAndriy Gapon 751e0fa977eSAndriy Gapon /* 752e0fa977eSAndriy Gapon * Switch phase to direct future interrupts to the other active counter. 753e0fa977eSAndriy Gapon * Make sure that any preceding stores are visible before the switch. 754e0fa977eSAndriy Gapon */ 755e0fa977eSAndriy Gapon KASSERT(ie->ie_active[!phase] == 0, ("idle phase has activity")); 756e0fa977eSAndriy Gapon atomic_store_rel_int(&ie->ie_phase, !phase); 757e0fa977eSAndriy Gapon 758e0fa977eSAndriy Gapon /* 759e0fa977eSAndriy Gapon * This code cooperates with wait-free iteration of ie_handlers 760e0fa977eSAndriy Gapon * in intr_event_handle. 761e0fa977eSAndriy Gapon * Make sure that the removal and the phase update are not reordered 762e0fa977eSAndriy Gapon * with the active count check. 763e0fa977eSAndriy Gapon * Note that no combination of acquire and release fences can provide 764e0fa977eSAndriy Gapon * that guarantee as Store->Load sequences can always be reordered. 765e0fa977eSAndriy Gapon */ 766e0fa977eSAndriy Gapon atomic_thread_fence_seq_cst(); 767e0fa977eSAndriy Gapon 768e0fa977eSAndriy Gapon /* 769e0fa977eSAndriy Gapon * Now wait on the inactive phase. 770e0fa977eSAndriy Gapon * The acquire fence is needed so that that all post-barrier accesses 771e0fa977eSAndriy Gapon * are after the check. 772e0fa977eSAndriy Gapon */ 773e0fa977eSAndriy Gapon while (ie->ie_active[phase] > 0) 774e0fa977eSAndriy Gapon cpu_spinwait(); 775e0fa977eSAndriy Gapon atomic_thread_fence_acq(); 776e0fa977eSAndriy Gapon } 777e0fa977eSAndriy Gapon 77882a5a275SAndriy Gapon static void 77982a5a275SAndriy Gapon intr_handler_barrier(struct intr_handler *handler) 78082a5a275SAndriy Gapon { 78182a5a275SAndriy Gapon struct intr_event *ie; 78282a5a275SAndriy Gapon 78382a5a275SAndriy Gapon ie = handler->ih_event; 78482a5a275SAndriy Gapon mtx_assert(&ie->ie_lock, MA_OWNED); 78582a5a275SAndriy Gapon KASSERT((handler->ih_flags & IH_DEAD) == 0, 78682a5a275SAndriy Gapon ("update for a removed handler")); 78782a5a275SAndriy Gapon 78882a5a275SAndriy Gapon if (ie->ie_thread == NULL) { 78982a5a275SAndriy Gapon intr_event_barrier(ie); 79082a5a275SAndriy Gapon return; 79182a5a275SAndriy Gapon } 79282a5a275SAndriy Gapon if ((handler->ih_flags & IH_CHANGED) == 0) { 79382a5a275SAndriy Gapon handler->ih_flags |= IH_CHANGED; 794*6fa041d7SWojciech Macek intr_event_schedule_thread(ie, NULL); 79582a5a275SAndriy Gapon } 79682a5a275SAndriy Gapon while ((handler->ih_flags & IH_CHANGED) != 0) 79782a5a275SAndriy Gapon msleep(handler, &ie->ie_lock, 0, "ih_barr", 0); 79882a5a275SAndriy Gapon } 79982a5a275SAndriy Gapon 800e0fa977eSAndriy Gapon /* 801e4cd31ddSJeff Roberson * Sleep until an ithread finishes executing an interrupt handler. 802e4cd31ddSJeff Roberson * 803e4cd31ddSJeff Roberson * XXX Doesn't currently handle interrupt filters or fast interrupt 8046eb60f5bSHans Petter Selasky * handlers. This is intended for LinuxKPI drivers only. 8056eb60f5bSHans Petter Selasky * Do not use in BSD code. 806e4cd31ddSJeff Roberson */ 807e4cd31ddSJeff Roberson void 808e4cd31ddSJeff Roberson _intr_drain(int irq) 809e4cd31ddSJeff Roberson { 810e4cd31ddSJeff Roberson struct intr_event *ie; 811e4cd31ddSJeff Roberson struct intr_thread *ithd; 812e4cd31ddSJeff Roberson struct thread *td; 813e4cd31ddSJeff Roberson 814e4cd31ddSJeff Roberson ie = intr_lookup(irq); 815e4cd31ddSJeff Roberson if (ie == NULL) 816e4cd31ddSJeff Roberson return; 817e4cd31ddSJeff Roberson if (ie->ie_thread == NULL) 818e4cd31ddSJeff Roberson return; 819e4cd31ddSJeff Roberson ithd = ie->ie_thread; 820e4cd31ddSJeff Roberson td = ithd->it_thread; 8215bd186a6SJeff Roberson /* 8225bd186a6SJeff Roberson * We set the flag and wait for it to be cleared to avoid 8235bd186a6SJeff Roberson * long delays with potentially busy interrupt handlers 8245bd186a6SJeff Roberson * were we to only sample TD_AWAITING_INTR() every tick. 8255bd186a6SJeff Roberson */ 826e4cd31ddSJeff Roberson thread_lock(td); 827e4cd31ddSJeff Roberson if (!TD_AWAITING_INTR(td)) { 828e4cd31ddSJeff Roberson ithd->it_flags |= IT_WAIT; 8295bd186a6SJeff Roberson while (ithd->it_flags & IT_WAIT) { 8305bd186a6SJeff Roberson thread_unlock(td); 8315bd186a6SJeff Roberson pause("idrain", 1); 8325bd186a6SJeff Roberson thread_lock(td); 833e4cd31ddSJeff Roberson } 8345bd186a6SJeff Roberson } 8355bd186a6SJeff Roberson thread_unlock(td); 836e4cd31ddSJeff Roberson return; 837e4cd31ddSJeff Roberson } 838e4cd31ddSJeff Roberson 839b4151f71SJohn Baldwin int 840e0f66ef8SJohn Baldwin intr_event_remove_handler(void *cookie) 841b4151f71SJohn Baldwin { 842e0f66ef8SJohn Baldwin struct intr_handler *handler = (struct intr_handler *)cookie; 843e0f66ef8SJohn Baldwin struct intr_event *ie; 844e0f66ef8SJohn Baldwin struct intr_handler *ih; 845111b043cSAndriy Gapon struct intr_handler **prevptr; 846e0f66ef8SJohn Baldwin #ifdef notyet 847e0f66ef8SJohn Baldwin int dead; 848b4151f71SJohn Baldwin #endif 849b4151f71SJohn Baldwin 8503e5da754SJohn Baldwin if (handler == NULL) 851b4151f71SJohn Baldwin return (EINVAL); 852e0f66ef8SJohn Baldwin ie = handler->ih_event; 853e0f66ef8SJohn Baldwin KASSERT(ie != NULL, 854e0f66ef8SJohn Baldwin ("interrupt handler \"%s\" has a NULL interrupt event", 8553e5da754SJohn Baldwin handler->ih_name)); 856111b043cSAndriy Gapon 857e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 85891f91617SDavid E. O'Brien CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 859e0f66ef8SJohn Baldwin ie->ie_name); 860111b043cSAndriy Gapon CK_SLIST_FOREACH_PREVPTR(ih, prevptr, &ie->ie_handlers, ih_next) { 8613e5da754SJohn Baldwin if (ih == handler) 862111b043cSAndriy Gapon break; 863111b043cSAndriy Gapon } 864111b043cSAndriy Gapon if (ih == NULL) { 865111b043cSAndriy Gapon panic("interrupt handler \"%s\" not found in " 866111b043cSAndriy Gapon "interrupt event \"%s\"", handler->ih_name, ie->ie_name); 867111b043cSAndriy Gapon } 868111b043cSAndriy Gapon 869de271f01SJohn Baldwin /* 870e0fa977eSAndriy Gapon * If there is no ithread, then directly remove the handler. Note that 871e0fa977eSAndriy Gapon * intr_event_handle() iterates ie_handlers in a lock-less fashion, so 872e0fa977eSAndriy Gapon * care needs to be taken to keep ie_handlers consistent and to free 873e0fa977eSAndriy Gapon * the removed handler only when ie_handlers is quiescent. 874e0f66ef8SJohn Baldwin */ 875e0f66ef8SJohn Baldwin if (ie->ie_thread == NULL) { 876111b043cSAndriy Gapon CK_SLIST_REMOVE_PREVPTR(prevptr, ih, ih_next); 877e0fa977eSAndriy Gapon intr_event_barrier(ie); 878e0fa977eSAndriy Gapon intr_event_update(ie); 879e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 880e0f66ef8SJohn Baldwin free(handler, M_ITHREAD); 881e0f66ef8SJohn Baldwin return (0); 882e0f66ef8SJohn Baldwin } 883e0f66ef8SJohn Baldwin 884e0f66ef8SJohn Baldwin /* 885e0fa977eSAndriy Gapon * Let the interrupt thread do the job. 886e0fa977eSAndriy Gapon * The interrupt source is disabled when the interrupt thread is 887e0fa977eSAndriy Gapon * running, so it does not have to worry about interaction with 888e0fa977eSAndriy Gapon * intr_event_handle(). 889de271f01SJohn Baldwin */ 890e0fa977eSAndriy Gapon KASSERT((handler->ih_flags & IH_DEAD) == 0, 891e0fa977eSAndriy Gapon ("duplicate handle remove")); 892de271f01SJohn Baldwin handler->ih_flags |= IH_DEAD; 893*6fa041d7SWojciech Macek intr_event_schedule_thread(ie, NULL); 894e0f66ef8SJohn Baldwin while (handler->ih_flags & IH_DEAD) 8950f180a7cSJohn Baldwin msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 896e0f66ef8SJohn Baldwin intr_event_update(ie); 897111b043cSAndriy Gapon 898e0f66ef8SJohn Baldwin #ifdef notyet 899e0f66ef8SJohn Baldwin /* 900e0f66ef8SJohn Baldwin * XXX: This could be bad in the case of ppbus(8). Also, I think 901e0f66ef8SJohn Baldwin * this could lead to races of stale data when servicing an 902e0f66ef8SJohn Baldwin * interrupt. 903e0f66ef8SJohn Baldwin */ 904e0f66ef8SJohn Baldwin dead = 1; 905111b043cSAndriy Gapon CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) { 906111b043cSAndriy Gapon if (ih->ih_handler != NULL) { 907e0f66ef8SJohn Baldwin dead = 0; 908e0f66ef8SJohn Baldwin break; 909e0f66ef8SJohn Baldwin } 910e0f66ef8SJohn Baldwin } 911e0f66ef8SJohn Baldwin if (dead) { 912e0f66ef8SJohn Baldwin ithread_destroy(ie->ie_thread); 913e0f66ef8SJohn Baldwin ie->ie_thread = NULL; 914e0f66ef8SJohn Baldwin } 915e0f66ef8SJohn Baldwin #endif 916e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 917b4151f71SJohn Baldwin free(handler, M_ITHREAD); 918b4151f71SJohn Baldwin return (0); 919b4151f71SJohn Baldwin } 920b4151f71SJohn Baldwin 92182a5a275SAndriy Gapon int 92282a5a275SAndriy Gapon intr_event_suspend_handler(void *cookie) 92382a5a275SAndriy Gapon { 92482a5a275SAndriy Gapon struct intr_handler *handler = (struct intr_handler *)cookie; 92582a5a275SAndriy Gapon struct intr_event *ie; 92682a5a275SAndriy Gapon 92782a5a275SAndriy Gapon if (handler == NULL) 92882a5a275SAndriy Gapon return (EINVAL); 92982a5a275SAndriy Gapon ie = handler->ih_event; 93082a5a275SAndriy Gapon KASSERT(ie != NULL, 93182a5a275SAndriy Gapon ("interrupt handler \"%s\" has a NULL interrupt event", 93282a5a275SAndriy Gapon handler->ih_name)); 93382a5a275SAndriy Gapon mtx_lock(&ie->ie_lock); 93482a5a275SAndriy Gapon handler->ih_flags |= IH_SUSP; 93582a5a275SAndriy Gapon intr_handler_barrier(handler); 93682a5a275SAndriy Gapon mtx_unlock(&ie->ie_lock); 93782a5a275SAndriy Gapon return (0); 93882a5a275SAndriy Gapon } 93982a5a275SAndriy Gapon 94082a5a275SAndriy Gapon int 94182a5a275SAndriy Gapon intr_event_resume_handler(void *cookie) 94282a5a275SAndriy Gapon { 94382a5a275SAndriy Gapon struct intr_handler *handler = (struct intr_handler *)cookie; 94482a5a275SAndriy Gapon struct intr_event *ie; 94582a5a275SAndriy Gapon 94682a5a275SAndriy Gapon if (handler == NULL) 94782a5a275SAndriy Gapon return (EINVAL); 94882a5a275SAndriy Gapon ie = handler->ih_event; 94982a5a275SAndriy Gapon KASSERT(ie != NULL, 95082a5a275SAndriy Gapon ("interrupt handler \"%s\" has a NULL interrupt event", 95182a5a275SAndriy Gapon handler->ih_name)); 95282a5a275SAndriy Gapon 95382a5a275SAndriy Gapon /* 95482a5a275SAndriy Gapon * intr_handler_barrier() acts not only as a barrier, 95582a5a275SAndriy Gapon * it also allows to check for any pending interrupts. 95682a5a275SAndriy Gapon */ 95782a5a275SAndriy Gapon mtx_lock(&ie->ie_lock); 95882a5a275SAndriy Gapon handler->ih_flags &= ~IH_SUSP; 95982a5a275SAndriy Gapon intr_handler_barrier(handler); 96082a5a275SAndriy Gapon mtx_unlock(&ie->ie_lock); 96182a5a275SAndriy Gapon return (0); 96282a5a275SAndriy Gapon } 96382a5a275SAndriy Gapon 9641ee1b687SJohn Baldwin static int 965*6fa041d7SWojciech Macek intr_event_schedule_thread(struct intr_event *ie, struct trapframe *frame) 9663e5da754SJohn Baldwin { 967e0f66ef8SJohn Baldwin struct intr_entropy entropy; 968e0f66ef8SJohn Baldwin struct intr_thread *it; 969b40ce416SJulian Elischer struct thread *td; 97004774f23SJulian Elischer struct thread *ctd; 9713e5da754SJohn Baldwin 9723e5da754SJohn Baldwin /* 9733e5da754SJohn Baldwin * If no ithread or no handlers, then we have a stray interrupt. 9743e5da754SJohn Baldwin */ 975111b043cSAndriy Gapon if (ie == NULL || CK_SLIST_EMPTY(&ie->ie_handlers) || 976e0f66ef8SJohn Baldwin ie->ie_thread == NULL) 9773e5da754SJohn Baldwin return (EINVAL); 9783e5da754SJohn Baldwin 97904774f23SJulian Elischer ctd = curthread; 980e0f66ef8SJohn Baldwin it = ie->ie_thread; 981e0f66ef8SJohn Baldwin td = it->it_thread; 982e0f66ef8SJohn Baldwin 9833e5da754SJohn Baldwin /* 9843e5da754SJohn Baldwin * If any of the handlers for this ithread claim to be good 9853e5da754SJohn Baldwin * sources of entropy, then gather some. 9863e5da754SJohn Baldwin */ 987c4eb6630SGleb Smirnoff if (ie->ie_hflags & IH_ENTROPY) { 988e0f66ef8SJohn Baldwin entropy.event = (uintptr_t)ie; 989e0f66ef8SJohn Baldwin entropy.td = ctd; 99019fa89e9SMark Murray random_harvest_queue(&entropy, sizeof(entropy), RANDOM_INTERRUPT); 9913e5da754SJohn Baldwin } 9923e5da754SJohn Baldwin 993ba3f7276SMatt Macy KASSERT(td->td_proc != NULL, ("ithread %s has no process", ie->ie_name)); 9943e5da754SJohn Baldwin 9953e5da754SJohn Baldwin /* 9963e5da754SJohn Baldwin * Set it_need to tell the thread to keep running if it is already 997982d11f8SJeff Roberson * running. Then, lock the thread and see if we actually need to 998982d11f8SJeff Roberson * put it on the runqueue. 999283dfee9SKonstantin Belousov * 1000283dfee9SKonstantin Belousov * Use store_rel to arrange that the store to ih_need in 1001283dfee9SKonstantin Belousov * swi_sched() is before the store to it_need and prepare for 1002283dfee9SKonstantin Belousov * transfer of this order to loads in the ithread. 10033e5da754SJohn Baldwin */ 10043eebd44dSAlfred Perlstein atomic_store_rel_int(&it->it_need, 1); 1005982d11f8SJeff Roberson thread_lock(td); 100671fad9fdSJulian Elischer if (TD_AWAITING_INTR(td)) { 1007*6fa041d7SWojciech Macek #ifdef HWPMC_HOOKS 1008*6fa041d7SWojciech Macek atomic_set_int(&it->it_waiting, 0); 1009*6fa041d7SWojciech Macek if (frame != NULL) 1010*6fa041d7SWojciech Macek PMC_SOFT_CALL_TF( , , intr, schedule, frame); 1011*6fa041d7SWojciech Macek else 1012*6fa041d7SWojciech Macek PMC_SOFT_CALL( , , intr, schedule); 1013*6fa041d7SWojciech Macek #endif 1014fc2e87beSMatt Macy CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, td->td_proc->p_pid, 10157ab24ea3SJulian Elischer td->td_name); 101671fad9fdSJulian Elischer TD_CLR_IWAIT(td); 1017f0393f06SJeff Roberson sched_add(td, SRQ_INTR); 10183e5da754SJohn Baldwin } else { 1019*6fa041d7SWojciech Macek #ifdef HWPMC_HOOKS 1020*6fa041d7SWojciech Macek atomic_add_int(&it->it_waiting, 1); 1021*6fa041d7SWojciech Macek 1022*6fa041d7SWojciech Macek if (atomic_load_int(&it->it_waiting) >= intr_hwpmc_waiting_report_threshold) { 1023*6fa041d7SWojciech Macek if (frame != NULL) 1024*6fa041d7SWojciech Macek PMC_SOFT_CALL_TF( , , intr, waiting, frame); 1025*6fa041d7SWojciech Macek else 1026*6fa041d7SWojciech Macek PMC_SOFT_CALL( , , intr, waiting); 1027*6fa041d7SWojciech Macek } 1028*6fa041d7SWojciech Macek #endif 1029e0f66ef8SJohn Baldwin CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 1030fa2528acSAlex Richardson __func__, td->td_proc->p_pid, td->td_name, it->it_need, TD_GET_STATE(td)); 1031982d11f8SJeff Roberson thread_unlock(td); 103261a74c5cSJeff Roberson } 10333e5da754SJohn Baldwin 10343e5da754SJohn Baldwin return (0); 10353e5da754SJohn Baldwin } 10363e5da754SJohn Baldwin 1037fe486a37SJohn Baldwin /* 1038e84bcd84SRobert Watson * Allow interrupt event binding for software interrupt handlers -- a no-op, 1039e84bcd84SRobert Watson * since interrupts are generated in software rather than being directed by 1040e84bcd84SRobert Watson * a PIC. 1041e84bcd84SRobert Watson */ 1042e84bcd84SRobert Watson static int 1043066da805SAdrian Chadd swi_assign_cpu(void *arg, int cpu) 1044e84bcd84SRobert Watson { 1045e84bcd84SRobert Watson 1046e84bcd84SRobert Watson return (0); 1047e84bcd84SRobert Watson } 1048e84bcd84SRobert Watson 1049e84bcd84SRobert Watson /* 1050fe486a37SJohn Baldwin * Add a software interrupt handler to a specified event. If a given event 1051fe486a37SJohn Baldwin * is not specified, then a new event is created. 1052fe486a37SJohn Baldwin */ 10533e5da754SJohn Baldwin int 1054e0f66ef8SJohn Baldwin swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, 1055b4151f71SJohn Baldwin void *arg, int pri, enum intr_type flags, void **cookiep) 10568088699fSJohn Baldwin { 1057e0f66ef8SJohn Baldwin struct intr_event *ie; 1058aba10e13SAlexander Motin int error = 0; 10598088699fSJohn Baldwin 1060bafe5a31SPaolo Pisati if (flags & INTR_ENTROPY) 10613e5da754SJohn Baldwin return (EINVAL); 10623e5da754SJohn Baldwin 1063e0f66ef8SJohn Baldwin ie = (eventp != NULL) ? *eventp : NULL; 10648088699fSJohn Baldwin 1065e0f66ef8SJohn Baldwin if (ie != NULL) { 1066e0f66ef8SJohn Baldwin if (!(ie->ie_flags & IE_SOFT)) 10673e5da754SJohn Baldwin return (EINVAL); 10683e5da754SJohn Baldwin } else { 10699b33b154SJeff Roberson error = intr_event_create(&ie, NULL, IE_SOFT, 0, 1070e84bcd84SRobert Watson NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri); 10718088699fSJohn Baldwin if (error) 1072b4151f71SJohn Baldwin return (error); 1073e0f66ef8SJohn Baldwin if (eventp != NULL) 1074e0f66ef8SJohn Baldwin *eventp = ie; 10758088699fSJohn Baldwin } 1076aba10e13SAlexander Motin if (handler != NULL) { 10778d809d50SJeff Roberson error = intr_event_add_handler(ie, name, NULL, handler, arg, 1078d3305205SJohn Baldwin PI_SWI(pri), flags, cookiep); 1079aba10e13SAlexander Motin } 10808d809d50SJeff Roberson return (error); 10818088699fSJohn Baldwin } 10828088699fSJohn Baldwin 10831931cf94SJohn Baldwin /* 1084e0f66ef8SJohn Baldwin * Schedule a software interrupt thread. 10851931cf94SJohn Baldwin */ 10861931cf94SJohn Baldwin void 1087b4151f71SJohn Baldwin swi_sched(void *cookie, int flags) 10881931cf94SJohn Baldwin { 1089e0f66ef8SJohn Baldwin struct intr_handler *ih = (struct intr_handler *)cookie; 1090e0f66ef8SJohn Baldwin struct intr_event *ie = ih->ih_event; 1091d95dca1dSJohn Baldwin struct intr_entropy entropy; 1092ba3f7276SMatt Macy int error __unused; 10938088699fSJohn Baldwin 1094e0f66ef8SJohn Baldwin CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, 1095e0f66ef8SJohn Baldwin ih->ih_need); 10961931cf94SJohn Baldwin 1097aba10e13SAlexander Motin if ((flags & SWI_FROMNMI) == 0) { 1098d95dca1dSJohn Baldwin entropy.event = (uintptr_t)ih; 1099d95dca1dSJohn Baldwin entropy.td = curthread; 110019fa89e9SMark Murray random_harvest_queue(&entropy, sizeof(entropy), RANDOM_SWI); 1101aba10e13SAlexander Motin } 1102d95dca1dSJohn Baldwin 11031931cf94SJohn Baldwin /* 11043e5da754SJohn Baldwin * Set ih_need for this handler so that if the ithread is already 11053e5da754SJohn Baldwin * running it will execute this handler on the next pass. Otherwise, 11063e5da754SJohn Baldwin * it will execute it the next time it runs. 11071931cf94SJohn Baldwin */ 1108283dfee9SKonstantin Belousov ih->ih_need = 1; 11091ca2c018SBruce Evans 1110aba10e13SAlexander Motin if (flags & SWI_DELAY) 1111aba10e13SAlexander Motin return; 1112aba10e13SAlexander Motin 1113aba10e13SAlexander Motin if (flags & SWI_FROMNMI) { 1114aba10e13SAlexander Motin #if defined(SMP) && (defined(__i386__) || defined(__amd64__)) 1115aba10e13SAlexander Motin KASSERT(ie == clk_intr_event, 1116aba10e13SAlexander Motin ("SWI_FROMNMI used not with clk_intr_event")); 1117aba10e13SAlexander Motin ipi_self_from_nmi(IPI_SWI); 1118aba10e13SAlexander Motin #endif 1119aba10e13SAlexander Motin } else { 112083c9dea1SGleb Smirnoff VM_CNT_INC(v_soft); 1121*6fa041d7SWojciech Macek error = intr_event_schedule_thread(ie, NULL); 11223e5da754SJohn Baldwin KASSERT(error == 0, ("stray software interrupt")); 11238088699fSJohn Baldwin } 11248088699fSJohn Baldwin } 11258088699fSJohn Baldwin 1126fe486a37SJohn Baldwin /* 1127fe486a37SJohn Baldwin * Remove a software interrupt handler. Currently this code does not 1128fe486a37SJohn Baldwin * remove the associated interrupt event if it becomes empty. Calling code 1129fe486a37SJohn Baldwin * may do so manually via intr_event_destroy(), but that's not really 1130fe486a37SJohn Baldwin * an optimal interface. 1131fe486a37SJohn Baldwin */ 1132fe486a37SJohn Baldwin int 1133fe486a37SJohn Baldwin swi_remove(void *cookie) 1134fe486a37SJohn Baldwin { 1135fe486a37SJohn Baldwin 1136fe486a37SJohn Baldwin return (intr_event_remove_handler(cookie)); 1137fe486a37SJohn Baldwin } 1138fe486a37SJohn Baldwin 1139111b043cSAndriy Gapon static void 114037e9511fSJohn Baldwin intr_event_execute_handlers(struct proc *p, struct intr_event *ie) 1141e0f66ef8SJohn Baldwin { 1142111b043cSAndriy Gapon struct intr_handler *ih, *ihn, *ihp; 1143e0f66ef8SJohn Baldwin 1144111b043cSAndriy Gapon ihp = NULL; 1145111b043cSAndriy Gapon CK_SLIST_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) { 1146e0f66ef8SJohn Baldwin /* 1147e0f66ef8SJohn Baldwin * If this handler is marked for death, remove it from 1148e0f66ef8SJohn Baldwin * the list of handlers and wake up the sleeper. 1149e0f66ef8SJohn Baldwin */ 1150e0f66ef8SJohn Baldwin if (ih->ih_flags & IH_DEAD) { 1151e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 1152111b043cSAndriy Gapon if (ihp == NULL) 1153111b043cSAndriy Gapon CK_SLIST_REMOVE_HEAD(&ie->ie_handlers, ih_next); 1154111b043cSAndriy Gapon else 1155111b043cSAndriy Gapon CK_SLIST_REMOVE_AFTER(ihp, ih_next); 1156e0f66ef8SJohn Baldwin ih->ih_flags &= ~IH_DEAD; 1157e0f66ef8SJohn Baldwin wakeup(ih); 1158e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 1159e0f66ef8SJohn Baldwin continue; 1160e0f66ef8SJohn Baldwin } 1161e0f66ef8SJohn Baldwin 1162111b043cSAndriy Gapon /* 1163111b043cSAndriy Gapon * Now that we know that the current element won't be removed 1164111b043cSAndriy Gapon * update the previous element. 1165111b043cSAndriy Gapon */ 1166111b043cSAndriy Gapon ihp = ih; 1167111b043cSAndriy Gapon 116882a5a275SAndriy Gapon if ((ih->ih_flags & IH_CHANGED) != 0) { 116982a5a275SAndriy Gapon mtx_lock(&ie->ie_lock); 117082a5a275SAndriy Gapon ih->ih_flags &= ~IH_CHANGED; 117182a5a275SAndriy Gapon wakeup(ih); 117282a5a275SAndriy Gapon mtx_unlock(&ie->ie_lock); 117382a5a275SAndriy Gapon } 117482a5a275SAndriy Gapon 1175f2d619c8SPaolo Pisati /* Skip filter only handlers */ 1176f2d619c8SPaolo Pisati if (ih->ih_handler == NULL) 1177f2d619c8SPaolo Pisati continue; 1178f2d619c8SPaolo Pisati 117982a5a275SAndriy Gapon /* Skip suspended handlers */ 118082a5a275SAndriy Gapon if ((ih->ih_flags & IH_SUSP) != 0) 118182a5a275SAndriy Gapon continue; 118282a5a275SAndriy Gapon 1183e0f66ef8SJohn Baldwin /* 1184e0f66ef8SJohn Baldwin * For software interrupt threads, we only execute 1185e0f66ef8SJohn Baldwin * handlers that have their need flag set. Hardware 1186e0f66ef8SJohn Baldwin * interrupt threads always invoke all of their handlers. 11871b79b949SKirk McKusick * 11881b79b949SKirk McKusick * ih_need can only be 0 or 1. Failed cmpset below 11891b79b949SKirk McKusick * means that there is no request to execute handlers, 11901b79b949SKirk McKusick * so a retry of the cmpset is not needed. 1191e0f66ef8SJohn Baldwin */ 11921b79b949SKirk McKusick if ((ie->ie_flags & IE_SOFT) != 0 && 11931b79b949SKirk McKusick atomic_cmpset_int(&ih->ih_need, 1, 0) == 0) 1194e0f66ef8SJohn Baldwin continue; 1195e0f66ef8SJohn Baldwin 1196e0f66ef8SJohn Baldwin /* Execute this handler. */ 1197e0f66ef8SJohn Baldwin CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1198bafe5a31SPaolo Pisati __func__, p->p_pid, (void *)ih->ih_handler, 1199bafe5a31SPaolo Pisati ih->ih_argument, ih->ih_name, ih->ih_flags); 1200e0f66ef8SJohn Baldwin 1201e0f66ef8SJohn Baldwin if (!(ih->ih_flags & IH_MPSAFE)) 1202e0f66ef8SJohn Baldwin mtx_lock(&Giant); 1203e0f66ef8SJohn Baldwin ih->ih_handler(ih->ih_argument); 1204e0f66ef8SJohn Baldwin if (!(ih->ih_flags & IH_MPSAFE)) 1205e0f66ef8SJohn Baldwin mtx_unlock(&Giant); 1206e0f66ef8SJohn Baldwin } 120737e9511fSJohn Baldwin } 120837e9511fSJohn Baldwin 120937e9511fSJohn Baldwin static void 121037e9511fSJohn Baldwin ithread_execute_handlers(struct proc *p, struct intr_event *ie) 121137e9511fSJohn Baldwin { 121237e9511fSJohn Baldwin 121337e9511fSJohn Baldwin /* Interrupt handlers should not sleep. */ 121437e9511fSJohn Baldwin if (!(ie->ie_flags & IE_SOFT)) 121537e9511fSJohn Baldwin THREAD_NO_SLEEPING(); 121637e9511fSJohn Baldwin intr_event_execute_handlers(p, ie); 1217e0f66ef8SJohn Baldwin if (!(ie->ie_flags & IE_SOFT)) 1218e0f66ef8SJohn Baldwin THREAD_SLEEPING_OK(); 1219e0f66ef8SJohn Baldwin 1220e0f66ef8SJohn Baldwin /* 1221e0f66ef8SJohn Baldwin * Interrupt storm handling: 1222e0f66ef8SJohn Baldwin * 1223e0f66ef8SJohn Baldwin * If this interrupt source is currently storming, then throttle 1224e0f66ef8SJohn Baldwin * it to only fire the handler once per clock tick. 1225e0f66ef8SJohn Baldwin * 1226e0f66ef8SJohn Baldwin * If this interrupt source is not currently storming, but the 1227e0f66ef8SJohn Baldwin * number of back to back interrupts exceeds the storm threshold, 1228e0f66ef8SJohn Baldwin * then enter storming mode. 1229e0f66ef8SJohn Baldwin */ 1230e41bcf3cSJohn Baldwin if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold && 1231e41bcf3cSJohn Baldwin !(ie->ie_flags & IE_SOFT)) { 12320ae62c18SNate Lawson /* Report the message only once every second. */ 12330ae62c18SNate Lawson if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) { 1234e0f66ef8SJohn Baldwin printf( 12350ae62c18SNate Lawson "interrupt storm detected on \"%s\"; throttling interrupt source\n", 1236e0f66ef8SJohn Baldwin ie->ie_name); 1237e0f66ef8SJohn Baldwin } 1238e41bcf3cSJohn Baldwin pause("istorm", 1); 1239e0f66ef8SJohn Baldwin } else 1240e0f66ef8SJohn Baldwin ie->ie_count++; 1241e0f66ef8SJohn Baldwin 1242e0f66ef8SJohn Baldwin /* 1243e0f66ef8SJohn Baldwin * Now that all the handlers have had a chance to run, reenable 1244e0f66ef8SJohn Baldwin * the interrupt source. 1245e0f66ef8SJohn Baldwin */ 12461ee1b687SJohn Baldwin if (ie->ie_post_ithread != NULL) 12471ee1b687SJohn Baldwin ie->ie_post_ithread(ie->ie_source); 1248e0f66ef8SJohn Baldwin } 1249e0f66ef8SJohn Baldwin 12508088699fSJohn Baldwin /* 1251b4151f71SJohn Baldwin * This is the main code for interrupt threads. 12528088699fSJohn Baldwin */ 125337c84183SPoul-Henning Kamp static void 1254b4151f71SJohn Baldwin ithread_loop(void *arg) 12558088699fSJohn Baldwin { 1256511d1afbSGleb Smirnoff struct epoch_tracker et; 1257e0f66ef8SJohn Baldwin struct intr_thread *ithd; 1258e0f66ef8SJohn Baldwin struct intr_event *ie; 1259b40ce416SJulian Elischer struct thread *td; 1260b4151f71SJohn Baldwin struct proc *p; 1261511d1afbSGleb Smirnoff int wake, epoch_count; 1262f912e8f2SHans Petter Selasky bool needs_epoch; 12638088699fSJohn Baldwin 1264b40ce416SJulian Elischer td = curthread; 1265b40ce416SJulian Elischer p = td->td_proc; 1266e0f66ef8SJohn Baldwin ithd = (struct intr_thread *)arg; 1267e0f66ef8SJohn Baldwin KASSERT(ithd->it_thread == td, 126891f91617SDavid E. O'Brien ("%s: ithread and proc linkage out of sync", __func__)); 1269e0f66ef8SJohn Baldwin ie = ithd->it_event; 1270e0f66ef8SJohn Baldwin ie->ie_count = 0; 1271e4cd31ddSJeff Roberson wake = 0; 12728088699fSJohn Baldwin 12738088699fSJohn Baldwin /* 12748088699fSJohn Baldwin * As long as we have interrupts outstanding, go through the 12758088699fSJohn Baldwin * list of handlers, giving each one a go at it. 12768088699fSJohn Baldwin */ 12778088699fSJohn Baldwin for (;;) { 1278b4151f71SJohn Baldwin /* 1279b4151f71SJohn Baldwin * If we are an orphaned thread, then just die. 1280b4151f71SJohn Baldwin */ 1281b4151f71SJohn Baldwin if (ithd->it_flags & IT_DEAD) { 1282e0f66ef8SJohn Baldwin CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 12837ab24ea3SJulian Elischer p->p_pid, td->td_name); 1284b4151f71SJohn Baldwin free(ithd, M_ITHREAD); 1285ca9a0ddfSJulian Elischer kthread_exit(); 1286b4151f71SJohn Baldwin } 1287b4151f71SJohn Baldwin 1288e0f66ef8SJohn Baldwin /* 1289e0f66ef8SJohn Baldwin * Service interrupts. If another interrupt arrives while 1290e0f66ef8SJohn Baldwin * we are running, it will set it_need to note that we 1291e0f66ef8SJohn Baldwin * should make another pass. 1292283dfee9SKonstantin Belousov * 1293283dfee9SKonstantin Belousov * The load_acq part of the following cmpset ensures 1294283dfee9SKonstantin Belousov * that the load of ih_need in ithread_execute_handlers() 1295283dfee9SKonstantin Belousov * is ordered after the load of it_need here. 1296e0f66ef8SJohn Baldwin */ 1297f912e8f2SHans Petter Selasky needs_epoch = 1298f912e8f2SHans Petter Selasky (atomic_load_int(&ie->ie_hflags) & IH_NET) != 0; 1299f912e8f2SHans Petter Selasky if (needs_epoch) { 1300511d1afbSGleb Smirnoff epoch_count = 0; 1301511d1afbSGleb Smirnoff NET_EPOCH_ENTER(et); 1302511d1afbSGleb Smirnoff } 1303511d1afbSGleb Smirnoff while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0) { 1304e0f66ef8SJohn Baldwin ithread_execute_handlers(p, ie); 1305f912e8f2SHans Petter Selasky if (needs_epoch && 1306511d1afbSGleb Smirnoff ++epoch_count >= intr_epoch_batch) { 1307511d1afbSGleb Smirnoff NET_EPOCH_EXIT(et); 1308511d1afbSGleb Smirnoff epoch_count = 0; 1309511d1afbSGleb Smirnoff NET_EPOCH_ENTER(et); 1310511d1afbSGleb Smirnoff } 1311511d1afbSGleb Smirnoff } 1312f912e8f2SHans Petter Selasky if (needs_epoch) 1313511d1afbSGleb Smirnoff NET_EPOCH_EXIT(et); 13147870c3c6SJohn Baldwin WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 13157870c3c6SJohn Baldwin mtx_assert(&Giant, MA_NOTOWNED); 13168088699fSJohn Baldwin 13178088699fSJohn Baldwin /* 13188088699fSJohn Baldwin * Processed all our interrupts. Now get the sched 13198088699fSJohn Baldwin * lock. This may take a while and it_need may get 13208088699fSJohn Baldwin * set again, so we have to check it again. 13218088699fSJohn Baldwin */ 1322982d11f8SJeff Roberson thread_lock(td); 132303bbcb2fSKonstantin Belousov if (atomic_load_acq_int(&ithd->it_need) == 0 && 132403bbcb2fSKonstantin Belousov (ithd->it_flags & (IT_DEAD | IT_WAIT)) == 0) { 13257870c3c6SJohn Baldwin TD_SET_IWAIT(td); 1326e0f66ef8SJohn Baldwin ie->ie_count = 0; 1327686bcb5cSJeff Roberson mi_switch(SW_VOL | SWT_IWAIT); 1328686bcb5cSJeff Roberson } else { 1329e4cd31ddSJeff Roberson if (ithd->it_flags & IT_WAIT) { 1330e4cd31ddSJeff Roberson wake = 1; 1331e4cd31ddSJeff Roberson ithd->it_flags &= ~IT_WAIT; 1332e4cd31ddSJeff Roberson } 1333982d11f8SJeff Roberson thread_unlock(td); 1334686bcb5cSJeff Roberson } 1335e4cd31ddSJeff Roberson if (wake) { 1336e4cd31ddSJeff Roberson wakeup(ithd); 1337e4cd31ddSJeff Roberson wake = 0; 1338e4cd31ddSJeff Roberson } 13398088699fSJohn Baldwin } 13401931cf94SJohn Baldwin } 13411ee1b687SJohn Baldwin 13421ee1b687SJohn Baldwin /* 13431ee1b687SJohn Baldwin * Main interrupt handling body. 13441ee1b687SJohn Baldwin * 13451ee1b687SJohn Baldwin * Input: 13461ee1b687SJohn Baldwin * o ie: the event connected to this interrupt. 13471ee1b687SJohn Baldwin * o frame: some archs (i.e. i386) pass a frame to some. 13481ee1b687SJohn Baldwin * handlers as their main argument. 13491ee1b687SJohn Baldwin * Return value: 13501ee1b687SJohn Baldwin * o 0: everything ok. 13511ee1b687SJohn Baldwin * o EINVAL: stray interrupt. 13521ee1b687SJohn Baldwin */ 13531ee1b687SJohn Baldwin int 13541ee1b687SJohn Baldwin intr_event_handle(struct intr_event *ie, struct trapframe *frame) 13551ee1b687SJohn Baldwin { 13561ee1b687SJohn Baldwin struct intr_handler *ih; 13571f255bd3SAlexander Motin struct trapframe *oldframe; 13581ee1b687SJohn Baldwin struct thread *td; 1359e0fa977eSAndriy Gapon int phase; 136082a5a275SAndriy Gapon int ret; 136182a5a275SAndriy Gapon bool filter, thread; 13621ee1b687SJohn Baldwin 13631ee1b687SJohn Baldwin td = curthread; 13641ee1b687SJohn Baldwin 1365b7627840SKonstantin Belousov #ifdef KSTACK_USAGE_PROF 1366b7627840SKonstantin Belousov intr_prof_stack_use(td, frame); 1367b7627840SKonstantin Belousov #endif 1368b7627840SKonstantin Belousov 13691ee1b687SJohn Baldwin /* An interrupt with no event or handlers is a stray interrupt. */ 1370111b043cSAndriy Gapon if (ie == NULL || CK_SLIST_EMPTY(&ie->ie_handlers)) 13711ee1b687SJohn Baldwin return (EINVAL); 13721ee1b687SJohn Baldwin 13731ee1b687SJohn Baldwin /* 13741ee1b687SJohn Baldwin * Execute fast interrupt handlers directly. 13751ee1b687SJohn Baldwin * To support clock handlers, if a handler registers 13761ee1b687SJohn Baldwin * with a NULL argument, then we pass it a pointer to 13771ee1b687SJohn Baldwin * a trapframe as its argument. 13781ee1b687SJohn Baldwin */ 13791ee1b687SJohn Baldwin td->td_intr_nesting_level++; 138082a5a275SAndriy Gapon filter = false; 138182a5a275SAndriy Gapon thread = false; 13821ee1b687SJohn Baldwin ret = 0; 13831ee1b687SJohn Baldwin critical_enter(); 13841f255bd3SAlexander Motin oldframe = td->td_intr_frame; 13851f255bd3SAlexander Motin td->td_intr_frame = frame; 1386111b043cSAndriy Gapon 1387e0fa977eSAndriy Gapon phase = ie->ie_phase; 1388e0fa977eSAndriy Gapon atomic_add_int(&ie->ie_active[phase], 1); 1389e0fa977eSAndriy Gapon 1390e0fa977eSAndriy Gapon /* 1391e0fa977eSAndriy Gapon * This fence is required to ensure that no later loads are 1392e0fa977eSAndriy Gapon * re-ordered before the ie_active store. 1393e0fa977eSAndriy Gapon */ 1394e0fa977eSAndriy Gapon atomic_thread_fence_seq_cst(); 1395e0fa977eSAndriy Gapon 1396111b043cSAndriy Gapon CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) { 139782a5a275SAndriy Gapon if ((ih->ih_flags & IH_SUSP) != 0) 139882a5a275SAndriy Gapon continue; 1399aba10e13SAlexander Motin if ((ie->ie_flags & IE_SOFT) != 0 && ih->ih_need == 0) 1400aba10e13SAlexander Motin continue; 14011ee1b687SJohn Baldwin if (ih->ih_filter == NULL) { 140282a5a275SAndriy Gapon thread = true; 14031ee1b687SJohn Baldwin continue; 14041ee1b687SJohn Baldwin } 14051ee1b687SJohn Baldwin CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__, 14061ee1b687SJohn Baldwin ih->ih_filter, ih->ih_argument == NULL ? frame : 14071ee1b687SJohn Baldwin ih->ih_argument, ih->ih_name); 14081ee1b687SJohn Baldwin if (ih->ih_argument == NULL) 14091ee1b687SJohn Baldwin ret = ih->ih_filter(frame); 14101ee1b687SJohn Baldwin else 14111ee1b687SJohn Baldwin ret = ih->ih_filter(ih->ih_argument); 1412*6fa041d7SWojciech Macek #ifdef HWPMC_HOOKS 1413*6fa041d7SWojciech Macek PMC_SOFT_CALL_TF( , , intr, all, frame); 1414*6fa041d7SWojciech Macek #endif 141589fc20ccSAndriy Gapon KASSERT(ret == FILTER_STRAY || 141689fc20ccSAndriy Gapon ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 && 141789fc20ccSAndriy Gapon (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0), 141889fc20ccSAndriy Gapon ("%s: incorrect return value %#x from %s", __func__, ret, 141989fc20ccSAndriy Gapon ih->ih_name)); 142082a5a275SAndriy Gapon filter = filter || ret == FILTER_HANDLED; 1421*6fa041d7SWojciech Macek #ifdef HWPMC_HOOKS 1422*6fa041d7SWojciech Macek if (ret & FILTER_SCHEDULE_THREAD) 1423*6fa041d7SWojciech Macek PMC_SOFT_CALL_TF( , , intr, ithread, frame); 1424*6fa041d7SWojciech Macek else if (ret & FILTER_HANDLED) 1425*6fa041d7SWojciech Macek PMC_SOFT_CALL_TF( , , intr, filter, frame); 1426*6fa041d7SWojciech Macek else if (ret == FILTER_STRAY) 1427*6fa041d7SWojciech Macek PMC_SOFT_CALL_TF( , , intr, stray, frame); 1428*6fa041d7SWojciech Macek #endif 142989fc20ccSAndriy Gapon 14301ee1b687SJohn Baldwin /* 14311ee1b687SJohn Baldwin * Wrapper handler special handling: 14321ee1b687SJohn Baldwin * 14331ee1b687SJohn Baldwin * in some particular cases (like pccard and pccbb), 14341ee1b687SJohn Baldwin * the _real_ device handler is wrapped in a couple of 14351ee1b687SJohn Baldwin * functions - a filter wrapper and an ithread wrapper. 14361ee1b687SJohn Baldwin * In this case (and just in this case), the filter wrapper 14371ee1b687SJohn Baldwin * could ask the system to schedule the ithread and mask 14381ee1b687SJohn Baldwin * the interrupt source if the wrapped handler is composed 14391ee1b687SJohn Baldwin * of just an ithread handler. 14401ee1b687SJohn Baldwin * 14411ee1b687SJohn Baldwin * TODO: write a generic wrapper to avoid people rolling 144282a5a275SAndriy Gapon * their own. 14431ee1b687SJohn Baldwin */ 14441ee1b687SJohn Baldwin if (!thread) { 14451ee1b687SJohn Baldwin if (ret == FILTER_SCHEDULE_THREAD) 144682a5a275SAndriy Gapon thread = true; 14471ee1b687SJohn Baldwin } 14481ee1b687SJohn Baldwin } 1449e0fa977eSAndriy Gapon atomic_add_rel_int(&ie->ie_active[phase], -1); 1450e0fa977eSAndriy Gapon 14511f255bd3SAlexander Motin td->td_intr_frame = oldframe; 14521ee1b687SJohn Baldwin 14531ee1b687SJohn Baldwin if (thread) { 14541ee1b687SJohn Baldwin if (ie->ie_pre_ithread != NULL) 14551ee1b687SJohn Baldwin ie->ie_pre_ithread(ie->ie_source); 14561ee1b687SJohn Baldwin } else { 14571ee1b687SJohn Baldwin if (ie->ie_post_filter != NULL) 14581ee1b687SJohn Baldwin ie->ie_post_filter(ie->ie_source); 14591ee1b687SJohn Baldwin } 14601ee1b687SJohn Baldwin 14611ee1b687SJohn Baldwin /* Schedule the ithread if needed. */ 14621ee1b687SJohn Baldwin if (thread) { 1463ba3f7276SMatt Macy int error __unused; 1464ba3f7276SMatt Macy 1465*6fa041d7SWojciech Macek error = intr_event_schedule_thread(ie, frame); 14661ee1b687SJohn Baldwin KASSERT(error == 0, ("bad stray interrupt")); 14671ee1b687SJohn Baldwin } 14681ee1b687SJohn Baldwin critical_exit(); 14691ee1b687SJohn Baldwin td->td_intr_nesting_level--; 147082a5a275SAndriy Gapon #ifdef notyet 147182a5a275SAndriy Gapon /* The interrupt is not aknowledged by any filter and has no ithread. */ 147282a5a275SAndriy Gapon if (!thread && !filter) 147382a5a275SAndriy Gapon return (EINVAL); 147482a5a275SAndriy Gapon #endif 14751ee1b687SJohn Baldwin return (0); 14761ee1b687SJohn Baldwin } 14771931cf94SJohn Baldwin 14788b201c42SJohn Baldwin #ifdef DDB 14798b201c42SJohn Baldwin /* 14808b201c42SJohn Baldwin * Dump details about an interrupt handler 14818b201c42SJohn Baldwin */ 14828b201c42SJohn Baldwin static void 1483e0f66ef8SJohn Baldwin db_dump_intrhand(struct intr_handler *ih) 14848b201c42SJohn Baldwin { 14858b201c42SJohn Baldwin int comma; 14868b201c42SJohn Baldwin 14878b201c42SJohn Baldwin db_printf("\t%-10s ", ih->ih_name); 14888b201c42SJohn Baldwin switch (ih->ih_pri) { 14898b201c42SJohn Baldwin case PI_REALTIME: 14908b201c42SJohn Baldwin db_printf("CLK "); 14918b201c42SJohn Baldwin break; 14928b201c42SJohn Baldwin case PI_AV: 14938b201c42SJohn Baldwin db_printf("AV "); 14948b201c42SJohn Baldwin break; 1495d3305205SJohn Baldwin case PI_TTY: 14968b201c42SJohn Baldwin db_printf("TTY "); 14978b201c42SJohn Baldwin break; 14988b201c42SJohn Baldwin case PI_NET: 14998b201c42SJohn Baldwin db_printf("NET "); 15008b201c42SJohn Baldwin break; 15018b201c42SJohn Baldwin case PI_DISK: 15028b201c42SJohn Baldwin db_printf("DISK"); 15038b201c42SJohn Baldwin break; 15048b201c42SJohn Baldwin case PI_DULL: 15058b201c42SJohn Baldwin db_printf("DULL"); 15068b201c42SJohn Baldwin break; 15078b201c42SJohn Baldwin default: 15088b201c42SJohn Baldwin if (ih->ih_pri >= PI_SOFT) 15098b201c42SJohn Baldwin db_printf("SWI "); 15108b201c42SJohn Baldwin else 15118b201c42SJohn Baldwin db_printf("%4u", ih->ih_pri); 15128b201c42SJohn Baldwin break; 15138b201c42SJohn Baldwin } 15148b201c42SJohn Baldwin db_printf(" "); 1515b887a155SKonstantin Belousov if (ih->ih_filter != NULL) { 1516b887a155SKonstantin Belousov db_printf("[F]"); 1517b887a155SKonstantin Belousov db_printsym((uintptr_t)ih->ih_filter, DB_STGY_PROC); 1518b887a155SKonstantin Belousov } 1519b887a155SKonstantin Belousov if (ih->ih_handler != NULL) { 1520b887a155SKonstantin Belousov if (ih->ih_filter != NULL) 1521b887a155SKonstantin Belousov db_printf(","); 1522b887a155SKonstantin Belousov db_printf("[H]"); 15238b201c42SJohn Baldwin db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC); 1524b887a155SKonstantin Belousov } 15258b201c42SJohn Baldwin db_printf("(%p)", ih->ih_argument); 15268b201c42SJohn Baldwin if (ih->ih_need || 1527ef544f63SPaolo Pisati (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD | 15288b201c42SJohn Baldwin IH_MPSAFE)) != 0) { 15298b201c42SJohn Baldwin db_printf(" {"); 15308b201c42SJohn Baldwin comma = 0; 15318b201c42SJohn Baldwin if (ih->ih_flags & IH_EXCLUSIVE) { 15328b201c42SJohn Baldwin if (comma) 15338b201c42SJohn Baldwin db_printf(", "); 15348b201c42SJohn Baldwin db_printf("EXCL"); 15358b201c42SJohn Baldwin comma = 1; 15368b201c42SJohn Baldwin } 15378b201c42SJohn Baldwin if (ih->ih_flags & IH_ENTROPY) { 15388b201c42SJohn Baldwin if (comma) 15398b201c42SJohn Baldwin db_printf(", "); 15408b201c42SJohn Baldwin db_printf("ENTROPY"); 15418b201c42SJohn Baldwin comma = 1; 15428b201c42SJohn Baldwin } 15438b201c42SJohn Baldwin if (ih->ih_flags & IH_DEAD) { 15448b201c42SJohn Baldwin if (comma) 15458b201c42SJohn Baldwin db_printf(", "); 15468b201c42SJohn Baldwin db_printf("DEAD"); 15478b201c42SJohn Baldwin comma = 1; 15488b201c42SJohn Baldwin } 15498b201c42SJohn Baldwin if (ih->ih_flags & IH_MPSAFE) { 15508b201c42SJohn Baldwin if (comma) 15518b201c42SJohn Baldwin db_printf(", "); 15528b201c42SJohn Baldwin db_printf("MPSAFE"); 15538b201c42SJohn Baldwin comma = 1; 15548b201c42SJohn Baldwin } 15558b201c42SJohn Baldwin if (ih->ih_need) { 15568b201c42SJohn Baldwin if (comma) 15578b201c42SJohn Baldwin db_printf(", "); 15588b201c42SJohn Baldwin db_printf("NEED"); 15598b201c42SJohn Baldwin } 15608b201c42SJohn Baldwin db_printf("}"); 15618b201c42SJohn Baldwin } 15628b201c42SJohn Baldwin db_printf("\n"); 15638b201c42SJohn Baldwin } 15648b201c42SJohn Baldwin 15658b201c42SJohn Baldwin /* 1566e0f66ef8SJohn Baldwin * Dump details about a event. 15678b201c42SJohn Baldwin */ 15688b201c42SJohn Baldwin void 1569e0f66ef8SJohn Baldwin db_dump_intr_event(struct intr_event *ie, int handlers) 15708b201c42SJohn Baldwin { 1571e0f66ef8SJohn Baldwin struct intr_handler *ih; 1572e0f66ef8SJohn Baldwin struct intr_thread *it; 15738b201c42SJohn Baldwin int comma; 15748b201c42SJohn Baldwin 1575e0f66ef8SJohn Baldwin db_printf("%s ", ie->ie_fullname); 1576e0f66ef8SJohn Baldwin it = ie->ie_thread; 1577e0f66ef8SJohn Baldwin if (it != NULL) 1578e0f66ef8SJohn Baldwin db_printf("(pid %d)", it->it_thread->td_proc->p_pid); 1579e0f66ef8SJohn Baldwin else 1580e0f66ef8SJohn Baldwin db_printf("(no thread)"); 1581c4eb6630SGleb Smirnoff if ((ie->ie_flags & (IE_SOFT | IE_ADDING_THREAD)) != 0 || 1582e0f66ef8SJohn Baldwin (it != NULL && it->it_need)) { 15838b201c42SJohn Baldwin db_printf(" {"); 15848b201c42SJohn Baldwin comma = 0; 1585e0f66ef8SJohn Baldwin if (ie->ie_flags & IE_SOFT) { 15868b201c42SJohn Baldwin db_printf("SOFT"); 15878b201c42SJohn Baldwin comma = 1; 15888b201c42SJohn Baldwin } 1589e0f66ef8SJohn Baldwin if (ie->ie_flags & IE_ADDING_THREAD) { 15908b201c42SJohn Baldwin if (comma) 15918b201c42SJohn Baldwin db_printf(", "); 1592e0f66ef8SJohn Baldwin db_printf("ADDING_THREAD"); 15938b201c42SJohn Baldwin comma = 1; 15948b201c42SJohn Baldwin } 1595e0f66ef8SJohn Baldwin if (it != NULL && it->it_need) { 15968b201c42SJohn Baldwin if (comma) 15978b201c42SJohn Baldwin db_printf(", "); 15988b201c42SJohn Baldwin db_printf("NEED"); 15998b201c42SJohn Baldwin } 16008b201c42SJohn Baldwin db_printf("}"); 16018b201c42SJohn Baldwin } 16028b201c42SJohn Baldwin db_printf("\n"); 16038b201c42SJohn Baldwin 16048b201c42SJohn Baldwin if (handlers) 1605111b043cSAndriy Gapon CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) 16068b201c42SJohn Baldwin db_dump_intrhand(ih); 16078b201c42SJohn Baldwin } 1608e0f66ef8SJohn Baldwin 1609e0f66ef8SJohn Baldwin /* 1610e0f66ef8SJohn Baldwin * Dump data about interrupt handlers 1611e0f66ef8SJohn Baldwin */ 1612e0f66ef8SJohn Baldwin DB_SHOW_COMMAND(intr, db_show_intr) 1613e0f66ef8SJohn Baldwin { 1614e0f66ef8SJohn Baldwin struct intr_event *ie; 161519e9205aSJohn Baldwin int all, verbose; 1616e0f66ef8SJohn Baldwin 1617dc15eac0SEd Schouten verbose = strchr(modif, 'v') != NULL; 1618dc15eac0SEd Schouten all = strchr(modif, 'a') != NULL; 1619e0f66ef8SJohn Baldwin TAILQ_FOREACH(ie, &event_list, ie_list) { 1620111b043cSAndriy Gapon if (!all && CK_SLIST_EMPTY(&ie->ie_handlers)) 1621e0f66ef8SJohn Baldwin continue; 1622e0f66ef8SJohn Baldwin db_dump_intr_event(ie, verbose); 162319e9205aSJohn Baldwin if (db_pager_quit) 162419e9205aSJohn Baldwin break; 1625e0f66ef8SJohn Baldwin } 1626e0f66ef8SJohn Baldwin } 16278b201c42SJohn Baldwin #endif /* DDB */ 16288b201c42SJohn Baldwin 1629b4151f71SJohn Baldwin /* 16308088699fSJohn Baldwin * Start standard software interrupt threads 16311931cf94SJohn Baldwin */ 16321931cf94SJohn Baldwin static void 1633b4151f71SJohn Baldwin start_softintr(void *dummy) 16341931cf94SJohn Baldwin { 1635b4151f71SJohn Baldwin 1636aba10e13SAlexander Motin if (swi_add(&clk_intr_event, "clk", NULL, NULL, SWI_CLOCK, 1637aba10e13SAlexander Motin INTR_MPSAFE, NULL)) 1638aba10e13SAlexander Motin panic("died while creating clk swi ithread"); 16398d809d50SJeff Roberson if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih)) 16408d809d50SJeff Roberson panic("died while creating vm swi ithread"); 16411931cf94SJohn Baldwin } 1642237fdd78SRobert Watson SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, 1643237fdd78SRobert Watson NULL); 16441931cf94SJohn Baldwin 1645d279178dSThomas Moestl /* 1646d279178dSThomas Moestl * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 1647d279178dSThomas Moestl * The data for this machine dependent, and the declarations are in machine 1648d279178dSThomas Moestl * dependent code. The layout of intrnames and intrcnt however is machine 1649d279178dSThomas Moestl * independent. 1650d279178dSThomas Moestl * 1651d279178dSThomas Moestl * We do not know the length of intrcnt and intrnames at compile time, so 1652d279178dSThomas Moestl * calculate things at run time. 1653d279178dSThomas Moestl */ 1654d279178dSThomas Moestl static int 1655d279178dSThomas Moestl sysctl_intrnames(SYSCTL_HANDLER_ARGS) 1656d279178dSThomas Moestl { 1657521ea19dSAttilio Rao return (sysctl_handle_opaque(oidp, intrnames, sintrnames, req)); 1658d279178dSThomas Moestl } 1659d279178dSThomas Moestl 16607029da5cSPawel Biernacki SYSCTL_PROC(_hw, OID_AUTO, intrnames, 166167f508dbSAlexander Motin CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, 16627029da5cSPawel Biernacki sysctl_intrnames, "", 16637029da5cSPawel Biernacki "Interrupt Names"); 1664d279178dSThomas Moestl 1665d279178dSThomas Moestl static int 1666d279178dSThomas Moestl sysctl_intrcnt(SYSCTL_HANDLER_ARGS) 1667d279178dSThomas Moestl { 166885729c2cSJuli Mallett #ifdef SCTL_MASK32 166985729c2cSJuli Mallett uint32_t *intrcnt32; 167085729c2cSJuli Mallett unsigned i; 167185729c2cSJuli Mallett int error; 167285729c2cSJuli Mallett 167385729c2cSJuli Mallett if (req->flags & SCTL_MASK32) { 167485729c2cSJuli Mallett if (!req->oldptr) 167585729c2cSJuli Mallett return (sysctl_handle_opaque(oidp, NULL, sintrcnt / 2, req)); 167685729c2cSJuli Mallett intrcnt32 = malloc(sintrcnt / 2, M_TEMP, M_NOWAIT); 167785729c2cSJuli Mallett if (intrcnt32 == NULL) 167885729c2cSJuli Mallett return (ENOMEM); 167985729c2cSJuli Mallett for (i = 0; i < sintrcnt / sizeof (u_long); i++) 168085729c2cSJuli Mallett intrcnt32[i] = intrcnt[i]; 168185729c2cSJuli Mallett error = sysctl_handle_opaque(oidp, intrcnt32, sintrcnt / 2, req); 168285729c2cSJuli Mallett free(intrcnt32, M_TEMP); 168385729c2cSJuli Mallett return (error); 168485729c2cSJuli Mallett } 168585729c2cSJuli Mallett #endif 1686521ea19dSAttilio Rao return (sysctl_handle_opaque(oidp, intrcnt, sintrcnt, req)); 1687d279178dSThomas Moestl } 1688d279178dSThomas Moestl 16897029da5cSPawel Biernacki SYSCTL_PROC(_hw, OID_AUTO, intrcnt, 169067f508dbSAlexander Motin CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, 16917029da5cSPawel Biernacki sysctl_intrcnt, "", 16927029da5cSPawel Biernacki "Interrupt Counts"); 16938b201c42SJohn Baldwin 16948b201c42SJohn Baldwin #ifdef DDB 16958b201c42SJohn Baldwin /* 16968b201c42SJohn Baldwin * DDB command to dump the interrupt statistics. 16978b201c42SJohn Baldwin */ 16988b201c42SJohn Baldwin DB_SHOW_COMMAND(intrcnt, db_show_intrcnt) 16998b201c42SJohn Baldwin { 17008b201c42SJohn Baldwin u_long *i; 17018b201c42SJohn Baldwin char *cp; 1702521ea19dSAttilio Rao u_int j; 17038b201c42SJohn Baldwin 17048b201c42SJohn Baldwin cp = intrnames; 1705521ea19dSAttilio Rao j = 0; 1706521ea19dSAttilio Rao for (i = intrcnt; j < (sintrcnt / sizeof(u_long)) && !db_pager_quit; 1707521ea19dSAttilio Rao i++, j++) { 17088b201c42SJohn Baldwin if (*cp == '\0') 17098b201c42SJohn Baldwin break; 17108b201c42SJohn Baldwin if (*i != 0) 17118b201c42SJohn Baldwin db_printf("%s\t%lu\n", cp, *i); 17128b201c42SJohn Baldwin cp += strlen(cp) + 1; 17138b201c42SJohn Baldwin } 17148b201c42SJohn Baldwin } 17158b201c42SJohn Baldwin #endif 1716