19454b2d8SWarner Losh /*- 24d846d26SWarner Losh * SPDX-License-Identifier: BSD-2-Clause 38a36da99SPedro F. Giffuni * 4425f9fdaSStefan Eßer * Copyright (c) 1997, Stefan Esser <se@freebsd.org> 5425f9fdaSStefan Eßer * All rights reserved. 6425f9fdaSStefan Eßer * 7425f9fdaSStefan Eßer * Redistribution and use in source and binary forms, with or without 8425f9fdaSStefan Eßer * modification, are permitted provided that the following conditions 9425f9fdaSStefan Eßer * are met: 10425f9fdaSStefan Eßer * 1. Redistributions of source code must retain the above copyright 11425f9fdaSStefan Eßer * notice unmodified, this list of conditions, and the following 12425f9fdaSStefan Eßer * disclaimer. 13425f9fdaSStefan Eßer * 2. Redistributions in binary form must reproduce the above copyright 14425f9fdaSStefan Eßer * notice, this list of conditions and the following disclaimer in the 15425f9fdaSStefan Eßer * documentation and/or other materials provided with the distribution. 16425f9fdaSStefan Eßer * 17425f9fdaSStefan Eßer * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18425f9fdaSStefan Eßer * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19425f9fdaSStefan Eßer * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20425f9fdaSStefan Eßer * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21425f9fdaSStefan Eßer * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22425f9fdaSStefan Eßer * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23425f9fdaSStefan Eßer * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24425f9fdaSStefan Eßer * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25425f9fdaSStefan Eßer * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26425f9fdaSStefan Eßer * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27425f9fdaSStefan Eßer */ 28425f9fdaSStefan Eßer 29677b542eSDavid E. O'Brien #include <sys/cdefs.h> 308b201c42SJohn Baldwin #include "opt_ddb.h" 316fa041d7SWojciech Macek #include "opt_hwpmc_hooks.h" 32b7627840SKonstantin Belousov #include "opt_kstack_usage_prof.h" 338b201c42SJohn Baldwin 341c5bb3eaSPeter Wemm #include <sys/param.h> 359a94c9c5SJohn Baldwin #include <sys/bus.h> 36c11110eaSAlfred Perlstein #include <sys/conf.h> 379b33b154SJeff Roberson #include <sys/cpuset.h> 389a94c9c5SJohn Baldwin #include <sys/rtprio.h> 39425f9fdaSStefan Eßer #include <sys/systm.h> 4068352337SDoug Rabson #include <sys/interrupt.h> 411931cf94SJohn Baldwin #include <sys/kernel.h> 421931cf94SJohn Baldwin #include <sys/kthread.h> 431931cf94SJohn Baldwin #include <sys/ktr.h> 4405b2c96fSBruce Evans #include <sys/limits.h> 45f34fa851SJohn Baldwin #include <sys/lock.h> 461931cf94SJohn Baldwin #include <sys/malloc.h> 4735e0e5b3SJohn Baldwin #include <sys/mutex.h> 48cebc7fb1SJohn Baldwin #include <sys/priv.h> 491931cf94SJohn Baldwin #include <sys/proc.h> 50511d1afbSGleb Smirnoff #include <sys/epoch.h> 513e5da754SJohn Baldwin #include <sys/random.h> 52b4151f71SJohn Baldwin #include <sys/resourcevar.h> 5363710c4dSJohn Baldwin #include <sys/sched.h> 54eaf86d16SJohn Baldwin #include <sys/smp.h> 55d279178dSThomas Moestl #include <sys/sysctl.h> 566205924aSKip Macy #include <sys/syslog.h> 571931cf94SJohn Baldwin #include <sys/unistd.h> 581931cf94SJohn Baldwin #include <sys/vmmeter.h> 591931cf94SJohn Baldwin #include <machine/atomic.h> 601931cf94SJohn Baldwin #include <machine/cpu.h> 618088699fSJohn Baldwin #include <machine/md_var.h> 62aba10e13SAlexander Motin #include <machine/smp.h> 63b4151f71SJohn Baldwin #include <machine/stdarg.h> 648b201c42SJohn Baldwin #ifdef DDB 658b201c42SJohn Baldwin #include <ddb/ddb.h> 668b201c42SJohn Baldwin #include <ddb/db_sym.h> 678b201c42SJohn Baldwin #endif 68425f9fdaSStefan Eßer 69e0f66ef8SJohn Baldwin /* 70e0f66ef8SJohn Baldwin * Describe an interrupt thread. There is one of these per interrupt event. 71e0f66ef8SJohn Baldwin */ 72e0f66ef8SJohn Baldwin struct intr_thread { 73e0f66ef8SJohn Baldwin struct intr_event *it_event; 74e0f66ef8SJohn Baldwin struct thread *it_thread; /* Kernel thread. */ 75e0f66ef8SJohn Baldwin int it_flags; /* (j) IT_* flags. */ 76e0f66ef8SJohn Baldwin int it_need; /* Needs service. */ 776fa041d7SWojciech Macek int it_waiting; /* Waiting in the runq. */ 783e5da754SJohn Baldwin }; 793e5da754SJohn Baldwin 80e0f66ef8SJohn Baldwin /* Interrupt thread flags kept in it_flags */ 81e0f66ef8SJohn Baldwin #define IT_DEAD 0x000001 /* Thread is waiting to exit. */ 82e4cd31ddSJeff Roberson #define IT_WAIT 0x000002 /* Thread is waiting for completion. */ 83e0f66ef8SJohn Baldwin 84e0f66ef8SJohn Baldwin struct intr_entropy { 85e0f66ef8SJohn Baldwin struct thread *td; 86e0f66ef8SJohn Baldwin uintptr_t event; 87e0f66ef8SJohn Baldwin }; 88e0f66ef8SJohn Baldwin 89aba10e13SAlexander Motin struct intr_event *clk_intr_event; 907ab24ea3SJulian Elischer struct proc *intrproc; 911931cf94SJohn Baldwin 92b4151f71SJohn Baldwin static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); 93b4151f71SJohn Baldwin 945d0e8299SConrad Meyer static int intr_storm_threshold = 0; 95af3b2549SHans Petter Selasky SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RWTUN, 967870c3c6SJohn Baldwin &intr_storm_threshold, 0, 977b1fe905SBruce Evans "Number of consecutive interrupts before storm protection is enabled"); 98511d1afbSGleb Smirnoff static int intr_epoch_batch = 1000; 99511d1afbSGleb Smirnoff SYSCTL_INT(_hw, OID_AUTO, intr_epoch_batch, CTLFLAG_RWTUN, &intr_epoch_batch, 100511d1afbSGleb Smirnoff 0, "Maximum interrupt handler executions without re-entering epoch(9)"); 1016fa041d7SWojciech Macek #ifdef HWPMC_HOOKS 1026fa041d7SWojciech Macek static int intr_hwpmc_waiting_report_threshold = 1; 1036fa041d7SWojciech Macek SYSCTL_INT(_hw, OID_AUTO, intr_hwpmc_waiting_report_threshold, CTLFLAG_RWTUN, 1046fa041d7SWojciech Macek &intr_hwpmc_waiting_report_threshold, 1, 1056fa041d7SWojciech Macek "Threshold for reporting number of events in a workq"); 1067bc13692SWojciech Macek #define PMC_HOOK_INSTALLED_ANY() __predict_false(pmc_hook != NULL) 1076fa041d7SWojciech Macek #endif 108e0f66ef8SJohn Baldwin static TAILQ_HEAD(, intr_event) event_list = 109e0f66ef8SJohn Baldwin TAILQ_HEAD_INITIALIZER(event_list); 1109b33b154SJeff Roberson static struct mtx event_lock; 1119b33b154SJeff Roberson MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF); 1127b1fe905SBruce Evans 113e0f66ef8SJohn Baldwin static void intr_event_update(struct intr_event *ie); 1146fa041d7SWojciech Macek static int intr_event_schedule_thread(struct intr_event *ie, struct trapframe *frame); 115e0f66ef8SJohn Baldwin static struct intr_thread *ithread_create(const char *name); 116e0f66ef8SJohn Baldwin static void ithread_destroy(struct intr_thread *ithread); 117bafe5a31SPaolo Pisati static void ithread_execute_handlers(struct proc *p, 118bafe5a31SPaolo Pisati struct intr_event *ie); 1197b1fe905SBruce Evans static void ithread_loop(void *); 120e0f66ef8SJohn Baldwin static void ithread_update(struct intr_thread *ithd); 1217b1fe905SBruce Evans static void start_softintr(void *); 1227870c3c6SJohn Baldwin 1236fa041d7SWojciech Macek #ifdef HWPMC_HOOKS 1246fa041d7SWojciech Macek #include <sys/pmckern.h> 1256fa041d7SWojciech Macek PMC_SOFT_DEFINE( , , intr, all); 1266fa041d7SWojciech Macek PMC_SOFT_DEFINE( , , intr, ithread); 1276fa041d7SWojciech Macek PMC_SOFT_DEFINE( , , intr, filter); 1286fa041d7SWojciech Macek PMC_SOFT_DEFINE( , , intr, stray); 1296fa041d7SWojciech Macek PMC_SOFT_DEFINE( , , intr, schedule); 1306fa041d7SWojciech Macek PMC_SOFT_DEFINE( , , intr, waiting); 1317bc13692SWojciech Macek 1327bc13692SWojciech Macek #define PMC_SOFT_CALL_INTR_HLPR(event, frame) \ 1337bc13692SWojciech Macek do { \ 1347bc13692SWojciech Macek if (frame != NULL) \ 1357bc13692SWojciech Macek PMC_SOFT_CALL_TF( , , intr, event, frame); \ 1367bc13692SWojciech Macek else \ 1377bc13692SWojciech Macek PMC_SOFT_CALL( , , intr, event); \ 1387bc13692SWojciech Macek } while (0) 1396fa041d7SWojciech Macek #endif 1406fa041d7SWojciech Macek 141bc17acb2SJohn Baldwin /* Map an interrupt type to an ithread priority. */ 142b4151f71SJohn Baldwin u_char 143e0f66ef8SJohn Baldwin intr_priority(enum intr_type flags) 1449a94c9c5SJohn Baldwin { 145b4151f71SJohn Baldwin u_char pri; 1469a94c9c5SJohn Baldwin 147b4151f71SJohn Baldwin flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET | 1485a280d9cSPeter Wemm INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV); 1499a94c9c5SJohn Baldwin switch (flags) { 150b4151f71SJohn Baldwin case INTR_TYPE_TTY: 151d3305205SJohn Baldwin pri = PI_TTY; 1529a94c9c5SJohn Baldwin break; 1539a94c9c5SJohn Baldwin case INTR_TYPE_BIO: 1549a94c9c5SJohn Baldwin pri = PI_DISK; 1559a94c9c5SJohn Baldwin break; 1569a94c9c5SJohn Baldwin case INTR_TYPE_NET: 1579a94c9c5SJohn Baldwin pri = PI_NET; 1589a94c9c5SJohn Baldwin break; 1599a94c9c5SJohn Baldwin case INTR_TYPE_CAM: 160d3305205SJohn Baldwin pri = PI_DISK; 1619a94c9c5SJohn Baldwin break; 162d3305205SJohn Baldwin case INTR_TYPE_AV: 1635a280d9cSPeter Wemm pri = PI_AV; 1645a280d9cSPeter Wemm break; 165b4151f71SJohn Baldwin case INTR_TYPE_CLK: 166b4151f71SJohn Baldwin pri = PI_REALTIME; 167b4151f71SJohn Baldwin break; 1689a94c9c5SJohn Baldwin case INTR_TYPE_MISC: 1699a94c9c5SJohn Baldwin pri = PI_DULL; /* don't care */ 1709a94c9c5SJohn Baldwin break; 1719a94c9c5SJohn Baldwin default: 172b4151f71SJohn Baldwin /* We didn't specify an interrupt level. */ 173e0f66ef8SJohn Baldwin panic("intr_priority: no interrupt type in flags"); 1749a94c9c5SJohn Baldwin } 1759a94c9c5SJohn Baldwin 1769a94c9c5SJohn Baldwin return pri; 1779a94c9c5SJohn Baldwin } 1789a94c9c5SJohn Baldwin 179b4151f71SJohn Baldwin /* 180e0f66ef8SJohn Baldwin * Update an ithread based on the associated intr_event. 181b4151f71SJohn Baldwin */ 182b4151f71SJohn Baldwin static void 183e0f66ef8SJohn Baldwin ithread_update(struct intr_thread *ithd) 184b4151f71SJohn Baldwin { 185e0f66ef8SJohn Baldwin struct intr_event *ie; 186b40ce416SJulian Elischer struct thread *td; 187e0f66ef8SJohn Baldwin u_char pri; 1888088699fSJohn Baldwin 189e0f66ef8SJohn Baldwin ie = ithd->it_event; 190e0f66ef8SJohn Baldwin td = ithd->it_thread; 191111b043cSAndriy Gapon mtx_assert(&ie->ie_lock, MA_OWNED); 192b4151f71SJohn Baldwin 193e0f66ef8SJohn Baldwin /* Determine the overall priority of this event. */ 194111b043cSAndriy Gapon if (CK_SLIST_EMPTY(&ie->ie_handlers)) 195e0f66ef8SJohn Baldwin pri = PRI_MAX_ITHD; 196e0f66ef8SJohn Baldwin else 197111b043cSAndriy Gapon pri = CK_SLIST_FIRST(&ie->ie_handlers)->ih_pri; 198e80fb434SRobert Drehmel 199e0f66ef8SJohn Baldwin /* Update name and priority. */ 2007ab24ea3SJulian Elischer strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name)); 20144ad5475SJohn Baldwin #ifdef KTR 20244ad5475SJohn Baldwin sched_clear_tdname(td); 20344ad5475SJohn Baldwin #endif 204982d11f8SJeff Roberson thread_lock(td); 205fea89a28SJohn Baldwin sched_ithread_prio(td, pri); 206982d11f8SJeff Roberson thread_unlock(td); 207b4151f71SJohn Baldwin } 208e0f66ef8SJohn Baldwin 209e0f66ef8SJohn Baldwin /* 210e0f66ef8SJohn Baldwin * Regenerate the full name of an interrupt event and update its priority. 211e0f66ef8SJohn Baldwin */ 212e0f66ef8SJohn Baldwin static void 213e0f66ef8SJohn Baldwin intr_event_update(struct intr_event *ie) 214e0f66ef8SJohn Baldwin { 215e0f66ef8SJohn Baldwin struct intr_handler *ih; 216e0f66ef8SJohn Baldwin char *last; 217f912e8f2SHans Petter Selasky int missed, space, flags; 218e0f66ef8SJohn Baldwin 219e0f66ef8SJohn Baldwin /* Start off with no entropy and just the name of the event. */ 220e0f66ef8SJohn Baldwin mtx_assert(&ie->ie_lock, MA_OWNED); 221e0f66ef8SJohn Baldwin strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 222f912e8f2SHans Petter Selasky flags = 0; 2230811d60aSJohn Baldwin missed = 0; 224e0f66ef8SJohn Baldwin space = 1; 225e0f66ef8SJohn Baldwin 226e0f66ef8SJohn Baldwin /* Run through all the handlers updating values. */ 227111b043cSAndriy Gapon CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) { 2286d51c0feSIan Lepore if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < 229e0f66ef8SJohn Baldwin sizeof(ie->ie_fullname)) { 230e0f66ef8SJohn Baldwin strcat(ie->ie_fullname, " "); 231e0f66ef8SJohn Baldwin strcat(ie->ie_fullname, ih->ih_name); 232e0f66ef8SJohn Baldwin space = 0; 2330811d60aSJohn Baldwin } else 2340811d60aSJohn Baldwin missed++; 235f912e8f2SHans Petter Selasky flags |= ih->ih_flags; 2360811d60aSJohn Baldwin } 237f912e8f2SHans Petter Selasky ie->ie_hflags = flags; 238e0f66ef8SJohn Baldwin 239e0f66ef8SJohn Baldwin /* 24067da50a0SIan Lepore * If there is only one handler and its name is too long, just copy in 24167da50a0SIan Lepore * as much of the end of the name (includes the unit number) as will 24267da50a0SIan Lepore * fit. Otherwise, we have multiple handlers and not all of the names 24367da50a0SIan Lepore * will fit. Add +'s to indicate missing names. If we run out of room 24467da50a0SIan Lepore * and still have +'s to add, change the last character from a + to a *. 245e0f66ef8SJohn Baldwin */ 24667da50a0SIan Lepore if (missed == 1 && space == 1) { 24767da50a0SIan Lepore ih = CK_SLIST_FIRST(&ie->ie_handlers); 24867da50a0SIan Lepore missed = strlen(ie->ie_fullname) + strlen(ih->ih_name) + 2 - 24967da50a0SIan Lepore sizeof(ie->ie_fullname); 25067da50a0SIan Lepore strcat(ie->ie_fullname, (missed == 0) ? " " : "-"); 25167da50a0SIan Lepore strcat(ie->ie_fullname, &ih->ih_name[missed]); 25267da50a0SIan Lepore missed = 0; 25367da50a0SIan Lepore } 254e0f66ef8SJohn Baldwin last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2]; 2550811d60aSJohn Baldwin while (missed-- > 0) { 256e0f66ef8SJohn Baldwin if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) { 257e0f66ef8SJohn Baldwin if (*last == '+') { 258e0f66ef8SJohn Baldwin *last = '*'; 259e0f66ef8SJohn Baldwin break; 260b4151f71SJohn Baldwin } else 261e0f66ef8SJohn Baldwin *last = '+'; 262e0f66ef8SJohn Baldwin } else if (space) { 263e0f66ef8SJohn Baldwin strcat(ie->ie_fullname, " +"); 264e0f66ef8SJohn Baldwin space = 0; 265e0f66ef8SJohn Baldwin } else 266e0f66ef8SJohn Baldwin strcat(ie->ie_fullname, "+"); 267b4151f71SJohn Baldwin } 268e0f66ef8SJohn Baldwin 269e0f66ef8SJohn Baldwin /* 270e0f66ef8SJohn Baldwin * If this event has an ithread, update it's priority and 271e0f66ef8SJohn Baldwin * name. 272e0f66ef8SJohn Baldwin */ 273e0f66ef8SJohn Baldwin if (ie->ie_thread != NULL) 274e0f66ef8SJohn Baldwin ithread_update(ie->ie_thread); 275e0f66ef8SJohn Baldwin CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname); 276b4151f71SJohn Baldwin } 277b4151f71SJohn Baldwin 278b4151f71SJohn Baldwin int 279cd04887bSElliott Mitchell intr_event_create(struct intr_event **event, void *source, int flags, u_int irq, 2801ee1b687SJohn Baldwin void (*pre_ithread)(void *), void (*post_ithread)(void *), 281066da805SAdrian Chadd void (*post_filter)(void *), int (*assign_cpu)(void *, int), 2821ee1b687SJohn Baldwin const char *fmt, ...) 283bafe5a31SPaolo Pisati { 284bafe5a31SPaolo Pisati struct intr_event *ie; 285bafe5a31SPaolo Pisati va_list ap; 286bafe5a31SPaolo Pisati 287bafe5a31SPaolo Pisati /* The only valid flag during creation is IE_SOFT. */ 288bafe5a31SPaolo Pisati if ((flags & ~IE_SOFT) != 0) 289bafe5a31SPaolo Pisati return (EINVAL); 290bafe5a31SPaolo Pisati ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 291bafe5a31SPaolo Pisati ie->ie_source = source; 2921ee1b687SJohn Baldwin ie->ie_pre_ithread = pre_ithread; 2931ee1b687SJohn Baldwin ie->ie_post_ithread = post_ithread; 2941ee1b687SJohn Baldwin ie->ie_post_filter = post_filter; 2956d2d1c04SJohn Baldwin ie->ie_assign_cpu = assign_cpu; 296bafe5a31SPaolo Pisati ie->ie_flags = flags; 2979b33b154SJeff Roberson ie->ie_irq = irq; 298eaf86d16SJohn Baldwin ie->ie_cpu = NOCPU; 299111b043cSAndriy Gapon CK_SLIST_INIT(&ie->ie_handlers); 300bafe5a31SPaolo Pisati mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 301bafe5a31SPaolo Pisati 302bafe5a31SPaolo Pisati va_start(ap, fmt); 303bafe5a31SPaolo Pisati vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 304bafe5a31SPaolo Pisati va_end(ap); 305bafe5a31SPaolo Pisati strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 3069b33b154SJeff Roberson mtx_lock(&event_lock); 307bafe5a31SPaolo Pisati TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 3089b33b154SJeff Roberson mtx_unlock(&event_lock); 309bafe5a31SPaolo Pisati if (event != NULL) 310bafe5a31SPaolo Pisati *event = ie; 311bafe5a31SPaolo Pisati CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 312bafe5a31SPaolo Pisati return (0); 313bafe5a31SPaolo Pisati } 314b4151f71SJohn Baldwin 315eaf86d16SJohn Baldwin /* 316eaf86d16SJohn Baldwin * Bind an interrupt event to the specified CPU. Note that not all 317eaf86d16SJohn Baldwin * platforms support binding an interrupt to a CPU. For those 31829dfb631SConrad Meyer * platforms this request will fail. Using a cpu id of NOCPU unbinds 319eaf86d16SJohn Baldwin * the interrupt event. 320eaf86d16SJohn Baldwin */ 32129dfb631SConrad Meyer static int 32229dfb631SConrad Meyer _intr_event_bind(struct intr_event *ie, int cpu, bool bindirq, bool bindithread) 323eaf86d16SJohn Baldwin { 3249b33b154SJeff Roberson lwpid_t id; 325eaf86d16SJohn Baldwin int error; 326eaf86d16SJohn Baldwin 327eaf86d16SJohn Baldwin /* Need a CPU to bind to. */ 328eaf86d16SJohn Baldwin if (cpu != NOCPU && CPU_ABSENT(cpu)) 329eaf86d16SJohn Baldwin return (EINVAL); 330eaf86d16SJohn Baldwin 331eaf86d16SJohn Baldwin if (ie->ie_assign_cpu == NULL) 332eaf86d16SJohn Baldwin return (EOPNOTSUPP); 333cebc7fb1SJohn Baldwin 334cebc7fb1SJohn Baldwin error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR); 335cebc7fb1SJohn Baldwin if (error) 336cebc7fb1SJohn Baldwin return (error); 337cebc7fb1SJohn Baldwin 3389b33b154SJeff Roberson /* 339cebc7fb1SJohn Baldwin * If we have any ithreads try to set their mask first to verify 340cebc7fb1SJohn Baldwin * permissions, etc. 3419b33b154SJeff Roberson */ 34229dfb631SConrad Meyer if (bindithread) { 343eaf86d16SJohn Baldwin mtx_lock(&ie->ie_lock); 3449b33b154SJeff Roberson if (ie->ie_thread != NULL) { 3459b33b154SJeff Roberson id = ie->ie_thread->it_thread->td_tid; 346eaf86d16SJohn Baldwin mtx_unlock(&ie->ie_lock); 34781198539SAlexander V. Chernikov error = cpuset_setithread(id, cpu); 3489b33b154SJeff Roberson if (error) 3499b33b154SJeff Roberson return (error); 3509b33b154SJeff Roberson } else 351eaf86d16SJohn Baldwin mtx_unlock(&ie->ie_lock); 35229dfb631SConrad Meyer } 35329dfb631SConrad Meyer if (bindirq) 354eaf86d16SJohn Baldwin error = ie->ie_assign_cpu(ie->ie_source, cpu); 355cebc7fb1SJohn Baldwin if (error) { 35629dfb631SConrad Meyer if (bindithread) { 357cebc7fb1SJohn Baldwin mtx_lock(&ie->ie_lock); 358cebc7fb1SJohn Baldwin if (ie->ie_thread != NULL) { 35981198539SAlexander V. Chernikov cpu = ie->ie_cpu; 360cebc7fb1SJohn Baldwin id = ie->ie_thread->it_thread->td_tid; 361cebc7fb1SJohn Baldwin mtx_unlock(&ie->ie_lock); 36281198539SAlexander V. Chernikov (void)cpuset_setithread(id, cpu); 363cebc7fb1SJohn Baldwin } else 364cebc7fb1SJohn Baldwin mtx_unlock(&ie->ie_lock); 36529dfb631SConrad Meyer } 366eaf86d16SJohn Baldwin return (error); 367cebc7fb1SJohn Baldwin } 368cebc7fb1SJohn Baldwin 36929dfb631SConrad Meyer if (bindirq) { 370eaf86d16SJohn Baldwin mtx_lock(&ie->ie_lock); 371eaf86d16SJohn Baldwin ie->ie_cpu = cpu; 3729b33b154SJeff Roberson mtx_unlock(&ie->ie_lock); 37329dfb631SConrad Meyer } 3749b33b154SJeff Roberson 3759b33b154SJeff Roberson return (error); 3769b33b154SJeff Roberson } 3779b33b154SJeff Roberson 37829dfb631SConrad Meyer /* 37929dfb631SConrad Meyer * Bind an interrupt event to the specified CPU. For supported platforms, any 38029dfb631SConrad Meyer * associated ithreads as well as the primary interrupt context will be bound 38129dfb631SConrad Meyer * to the specificed CPU. 38229dfb631SConrad Meyer */ 38329dfb631SConrad Meyer int 38429dfb631SConrad Meyer intr_event_bind(struct intr_event *ie, int cpu) 38529dfb631SConrad Meyer { 38629dfb631SConrad Meyer 38729dfb631SConrad Meyer return (_intr_event_bind(ie, cpu, true, true)); 38829dfb631SConrad Meyer } 38929dfb631SConrad Meyer 39029dfb631SConrad Meyer /* 39129dfb631SConrad Meyer * Bind an interrupt event to the specified CPU, but do not bind associated 39229dfb631SConrad Meyer * ithreads. 39329dfb631SConrad Meyer */ 39429dfb631SConrad Meyer int 39529dfb631SConrad Meyer intr_event_bind_irqonly(struct intr_event *ie, int cpu) 39629dfb631SConrad Meyer { 39729dfb631SConrad Meyer 39829dfb631SConrad Meyer return (_intr_event_bind(ie, cpu, true, false)); 39929dfb631SConrad Meyer } 40029dfb631SConrad Meyer 40129dfb631SConrad Meyer /* 40229dfb631SConrad Meyer * Bind an interrupt event's ithread to the specified CPU. 40329dfb631SConrad Meyer */ 40429dfb631SConrad Meyer int 40529dfb631SConrad Meyer intr_event_bind_ithread(struct intr_event *ie, int cpu) 40629dfb631SConrad Meyer { 40729dfb631SConrad Meyer 40829dfb631SConrad Meyer return (_intr_event_bind(ie, cpu, false, true)); 40929dfb631SConrad Meyer } 41029dfb631SConrad Meyer 4114e255d74SAndrew Gallatin /* 4124e255d74SAndrew Gallatin * Bind an interrupt event's ithread to the specified cpuset. 4134e255d74SAndrew Gallatin */ 4144e255d74SAndrew Gallatin int 4154e255d74SAndrew Gallatin intr_event_bind_ithread_cpuset(struct intr_event *ie, cpuset_t *cs) 4164e255d74SAndrew Gallatin { 4174e255d74SAndrew Gallatin lwpid_t id; 4184e255d74SAndrew Gallatin 4194e255d74SAndrew Gallatin mtx_lock(&ie->ie_lock); 4204e255d74SAndrew Gallatin if (ie->ie_thread != NULL) { 4214e255d74SAndrew Gallatin id = ie->ie_thread->it_thread->td_tid; 4224e255d74SAndrew Gallatin mtx_unlock(&ie->ie_lock); 4234e255d74SAndrew Gallatin return (cpuset_setthread(id, cs)); 4244e255d74SAndrew Gallatin } else { 4254e255d74SAndrew Gallatin mtx_unlock(&ie->ie_lock); 4264e255d74SAndrew Gallatin } 4274e255d74SAndrew Gallatin return (ENODEV); 4284e255d74SAndrew Gallatin } 4294e255d74SAndrew Gallatin 4309b33b154SJeff Roberson static struct intr_event * 4319b33b154SJeff Roberson intr_lookup(int irq) 4329b33b154SJeff Roberson { 4339b33b154SJeff Roberson struct intr_event *ie; 4349b33b154SJeff Roberson 4359b33b154SJeff Roberson mtx_lock(&event_lock); 4369b33b154SJeff Roberson TAILQ_FOREACH(ie, &event_list, ie_list) 4379b33b154SJeff Roberson if (ie->ie_irq == irq && 4389b33b154SJeff Roberson (ie->ie_flags & IE_SOFT) == 0 && 439111b043cSAndriy Gapon CK_SLIST_FIRST(&ie->ie_handlers) != NULL) 4409b33b154SJeff Roberson break; 4419b33b154SJeff Roberson mtx_unlock(&event_lock); 4429b33b154SJeff Roberson return (ie); 4439b33b154SJeff Roberson } 4449b33b154SJeff Roberson 4459b33b154SJeff Roberson int 446a9e0f316SElliott Mitchell intr_setaffinity(int irq, int mode, const void *m) 4479b33b154SJeff Roberson { 4489b33b154SJeff Roberson struct intr_event *ie; 449a9e0f316SElliott Mitchell const cpuset_t *mask; 4503fe93b94SAdrian Chadd int cpu, n; 4519b33b154SJeff Roberson 4529b33b154SJeff Roberson mask = m; 4539b33b154SJeff Roberson cpu = NOCPU; 4549b33b154SJeff Roberson /* 4559b33b154SJeff Roberson * If we're setting all cpus we can unbind. Otherwise make sure 4569b33b154SJeff Roberson * only one cpu is in the set. 4579b33b154SJeff Roberson */ 4589b33b154SJeff Roberson if (CPU_CMP(cpuset_root, mask)) { 4599b33b154SJeff Roberson for (n = 0; n < CPU_SETSIZE; n++) { 4609b33b154SJeff Roberson if (!CPU_ISSET(n, mask)) 4619b33b154SJeff Roberson continue; 4629b33b154SJeff Roberson if (cpu != NOCPU) 4639b33b154SJeff Roberson return (EINVAL); 4643fe93b94SAdrian Chadd cpu = n; 4659b33b154SJeff Roberson } 4669b33b154SJeff Roberson } 4679b33b154SJeff Roberson ie = intr_lookup(irq); 4689b33b154SJeff Roberson if (ie == NULL) 4699b33b154SJeff Roberson return (ESRCH); 47029dfb631SConrad Meyer switch (mode) { 47129dfb631SConrad Meyer case CPU_WHICH_IRQ: 4729bd55acfSJohn Baldwin return (intr_event_bind(ie, cpu)); 47329dfb631SConrad Meyer case CPU_WHICH_INTRHANDLER: 47429dfb631SConrad Meyer return (intr_event_bind_irqonly(ie, cpu)); 47529dfb631SConrad Meyer case CPU_WHICH_ITHREAD: 47629dfb631SConrad Meyer return (intr_event_bind_ithread(ie, cpu)); 47729dfb631SConrad Meyer default: 47829dfb631SConrad Meyer return (EINVAL); 47929dfb631SConrad Meyer } 4809b33b154SJeff Roberson } 4819b33b154SJeff Roberson 4829b33b154SJeff Roberson int 48329dfb631SConrad Meyer intr_getaffinity(int irq, int mode, void *m) 4849b33b154SJeff Roberson { 4859b33b154SJeff Roberson struct intr_event *ie; 48629dfb631SConrad Meyer struct thread *td; 48729dfb631SConrad Meyer struct proc *p; 4889b33b154SJeff Roberson cpuset_t *mask; 48929dfb631SConrad Meyer lwpid_t id; 49029dfb631SConrad Meyer int error; 4919b33b154SJeff Roberson 4929b33b154SJeff Roberson mask = m; 4939b33b154SJeff Roberson ie = intr_lookup(irq); 4949b33b154SJeff Roberson if (ie == NULL) 4959b33b154SJeff Roberson return (ESRCH); 49629dfb631SConrad Meyer 49729dfb631SConrad Meyer error = 0; 4989b33b154SJeff Roberson CPU_ZERO(mask); 49929dfb631SConrad Meyer switch (mode) { 50029dfb631SConrad Meyer case CPU_WHICH_IRQ: 50129dfb631SConrad Meyer case CPU_WHICH_INTRHANDLER: 5029b33b154SJeff Roberson mtx_lock(&ie->ie_lock); 5039b33b154SJeff Roberson if (ie->ie_cpu == NOCPU) 5049b33b154SJeff Roberson CPU_COPY(cpuset_root, mask); 5059b33b154SJeff Roberson else 5069b33b154SJeff Roberson CPU_SET(ie->ie_cpu, mask); 507eaf86d16SJohn Baldwin mtx_unlock(&ie->ie_lock); 50829dfb631SConrad Meyer break; 50929dfb631SConrad Meyer case CPU_WHICH_ITHREAD: 51029dfb631SConrad Meyer mtx_lock(&ie->ie_lock); 51129dfb631SConrad Meyer if (ie->ie_thread == NULL) { 51229dfb631SConrad Meyer mtx_unlock(&ie->ie_lock); 51329dfb631SConrad Meyer CPU_COPY(cpuset_root, mask); 51429dfb631SConrad Meyer } else { 51529dfb631SConrad Meyer id = ie->ie_thread->it_thread->td_tid; 51629dfb631SConrad Meyer mtx_unlock(&ie->ie_lock); 51729dfb631SConrad Meyer error = cpuset_which(CPU_WHICH_TID, id, &p, &td, NULL); 51829dfb631SConrad Meyer if (error != 0) 51929dfb631SConrad Meyer return (error); 52029dfb631SConrad Meyer CPU_COPY(&td->td_cpuset->cs_mask, mask); 52129dfb631SConrad Meyer PROC_UNLOCK(p); 52229dfb631SConrad Meyer } 52329dfb631SConrad Meyer default: 52429dfb631SConrad Meyer return (EINVAL); 52529dfb631SConrad Meyer } 526eaf86d16SJohn Baldwin return (0); 527eaf86d16SJohn Baldwin } 528eaf86d16SJohn Baldwin 529b4151f71SJohn Baldwin int 530e0f66ef8SJohn Baldwin intr_event_destroy(struct intr_event *ie) 531b4151f71SJohn Baldwin { 532b4151f71SJohn Baldwin 53339888ed7SMitchell Horne if (ie == NULL) 53439888ed7SMitchell Horne return (EINVAL); 53539888ed7SMitchell Horne 5369b33b154SJeff Roberson mtx_lock(&event_lock); 537e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 538111b043cSAndriy Gapon if (!CK_SLIST_EMPTY(&ie->ie_handlers)) { 539e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 5409b33b154SJeff Roberson mtx_unlock(&event_lock); 541e0f66ef8SJohn Baldwin return (EBUSY); 5424d29cb2dSJohn Baldwin } 543e0f66ef8SJohn Baldwin TAILQ_REMOVE(&event_list, ie, ie_list); 5449b33b154SJeff Roberson mtx_unlock(&event_lock); 5458381e9f4SMark Johnston if (ie->ie_thread != NULL) 5468381e9f4SMark Johnston ithread_destroy(ie->ie_thread); 5478381e9f4SMark Johnston mtx_unlock(&ie->ie_lock); 548e0f66ef8SJohn Baldwin mtx_destroy(&ie->ie_lock); 549e0f66ef8SJohn Baldwin free(ie, M_ITHREAD); 550e0f66ef8SJohn Baldwin return (0); 551e0f66ef8SJohn Baldwin } 552e0f66ef8SJohn Baldwin 553e0f66ef8SJohn Baldwin static struct intr_thread * 554e0f66ef8SJohn Baldwin ithread_create(const char *name) 555e0f66ef8SJohn Baldwin { 556e0f66ef8SJohn Baldwin struct intr_thread *ithd; 557e0f66ef8SJohn Baldwin struct thread *td; 558e0f66ef8SJohn Baldwin int error; 559e0f66ef8SJohn Baldwin 560e0f66ef8SJohn Baldwin ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 561e0f66ef8SJohn Baldwin 5627ab24ea3SJulian Elischer error = kproc_kthread_add(ithread_loop, ithd, &intrproc, 5637ab24ea3SJulian Elischer &td, RFSTOPPED | RFHIGHPID, 5649ef95d01SJulian Elischer 0, "intr", "%s", name); 565e0f66ef8SJohn Baldwin if (error) 5663745c395SJulian Elischer panic("kproc_create() failed with %d", error); 567982d11f8SJeff Roberson thread_lock(td); 568ad1e7d28SJulian Elischer sched_class(td, PRI_ITHD); 569e0f66ef8SJohn Baldwin TD_SET_IWAIT(td); 570982d11f8SJeff Roberson thread_unlock(td); 571e0f66ef8SJohn Baldwin td->td_pflags |= TDP_ITHREAD; 572e0f66ef8SJohn Baldwin ithd->it_thread = td; 573e0f66ef8SJohn Baldwin CTR2(KTR_INTR, "%s: created %s", __func__, name); 574e0f66ef8SJohn Baldwin return (ithd); 575e0f66ef8SJohn Baldwin } 576e0f66ef8SJohn Baldwin 577e0f66ef8SJohn Baldwin static void 578e0f66ef8SJohn Baldwin ithread_destroy(struct intr_thread *ithread) 579e0f66ef8SJohn Baldwin { 5808381e9f4SMark Johnston struct intr_event *ie; 581e0f66ef8SJohn Baldwin struct thread *td; 582e0f66ef8SJohn Baldwin 583e0f66ef8SJohn Baldwin td = ithread->it_thread; 5848381e9f4SMark Johnston ie = ithread->it_event; 5858381e9f4SMark Johnston 5868381e9f4SMark Johnston mtx_assert(&ie->ie_lock, MA_OWNED); 5878381e9f4SMark Johnston 5888381e9f4SMark Johnston CTR2(KTR_INTR, "%s: killing %s", __func__, ie->ie_name); 5898381e9f4SMark Johnston 590982d11f8SJeff Roberson thread_lock(td); 591e0f66ef8SJohn Baldwin ithread->it_flags |= IT_DEAD; 59271fad9fdSJulian Elischer if (TD_AWAITING_INTR(td)) { 59371fad9fdSJulian Elischer TD_CLR_IWAIT(td); 594ed998d1cSJohn Baldwin sched_wakeup(td, SRQ_INTR); 59561a74c5cSJeff Roberson } else 596982d11f8SJeff Roberson thread_unlock(td); 5978381e9f4SMark Johnston while (ie->ie_thread != NULL) 5988381e9f4SMark Johnston msleep(ithread, &ie->ie_lock, 0, "ithd_dth", 0); 599b4151f71SJohn Baldwin } 600b4151f71SJohn Baldwin 601b4151f71SJohn Baldwin int 602e0f66ef8SJohn Baldwin intr_event_add_handler(struct intr_event *ie, const char *name, 603ef544f63SPaolo Pisati driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 604ef544f63SPaolo Pisati enum intr_type flags, void **cookiep) 605b4151f71SJohn Baldwin { 606e0f66ef8SJohn Baldwin struct intr_handler *ih, *temp_ih; 607111b043cSAndriy Gapon struct intr_handler **prevptr; 608e0f66ef8SJohn Baldwin struct intr_thread *it; 609b4151f71SJohn Baldwin 610ef544f63SPaolo Pisati if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 611b4151f71SJohn Baldwin return (EINVAL); 612b4151f71SJohn Baldwin 613*0863dc10SAndrew Turner if ((flags & INTR_SLEEPABLE) != 0 && (flags & INTR_EXCL) == 0) { 614*0863dc10SAndrew Turner printf("%s: INTR_SLEEPABLE requires INTR_EXCL to be set\n", 615*0863dc10SAndrew Turner __func__); 616*0863dc10SAndrew Turner return (EINVAL); 617*0863dc10SAndrew Turner } 618*0863dc10SAndrew Turner 619e0f66ef8SJohn Baldwin /* Allocate and populate an interrupt handler structure. */ 620e0f66ef8SJohn Baldwin ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 621ef544f63SPaolo Pisati ih->ih_filter = filter; 622b4151f71SJohn Baldwin ih->ih_handler = handler; 623b4151f71SJohn Baldwin ih->ih_argument = arg; 62437b8ef16SJohn Baldwin strlcpy(ih->ih_name, name, sizeof(ih->ih_name)); 625e0f66ef8SJohn Baldwin ih->ih_event = ie; 626b4151f71SJohn Baldwin ih->ih_pri = pri; 627ef544f63SPaolo Pisati if (flags & INTR_EXCL) 628b4151f71SJohn Baldwin ih->ih_flags = IH_EXCLUSIVE; 629b4151f71SJohn Baldwin if (flags & INTR_MPSAFE) 630b4151f71SJohn Baldwin ih->ih_flags |= IH_MPSAFE; 631b4151f71SJohn Baldwin if (flags & INTR_ENTROPY) 632b4151f71SJohn Baldwin ih->ih_flags |= IH_ENTROPY; 633511d1afbSGleb Smirnoff if (flags & INTR_TYPE_NET) 634511d1afbSGleb Smirnoff ih->ih_flags |= IH_NET; 635b4151f71SJohn Baldwin 636*0863dc10SAndrew Turner /* We can only have one exclusive or sleepable handler in a event. */ 637e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 638111b043cSAndriy Gapon if (!CK_SLIST_EMPTY(&ie->ie_handlers)) { 639*0863dc10SAndrew Turner if ((flags & (INTR_EXCL | INTR_SLEEPABLE)) || 640111b043cSAndriy Gapon (CK_SLIST_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 641e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 642b4151f71SJohn Baldwin free(ih, M_ITHREAD); 643b4151f71SJohn Baldwin return (EINVAL); 644b4151f71SJohn Baldwin } 645e0f66ef8SJohn Baldwin } 646*0863dc10SAndrew Turner if (flags & INTR_SLEEPABLE) 647*0863dc10SAndrew Turner ie->ie_flags |= IE_SLEEPABLE; 648e0f66ef8SJohn Baldwin 649e0f66ef8SJohn Baldwin /* Create a thread if we need one. */ 650ef544f63SPaolo Pisati while (ie->ie_thread == NULL && handler != NULL) { 651e0f66ef8SJohn Baldwin if (ie->ie_flags & IE_ADDING_THREAD) 6520f180a7cSJohn Baldwin msleep(ie, &ie->ie_lock, 0, "ithread", 0); 653e0f66ef8SJohn Baldwin else { 654e0f66ef8SJohn Baldwin ie->ie_flags |= IE_ADDING_THREAD; 655e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 656e0f66ef8SJohn Baldwin it = ithread_create("intr: newborn"); 657e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 658e0f66ef8SJohn Baldwin ie->ie_flags &= ~IE_ADDING_THREAD; 659e0f66ef8SJohn Baldwin ie->ie_thread = it; 660e0f66ef8SJohn Baldwin it->it_event = ie; 661e0f66ef8SJohn Baldwin ithread_update(it); 662e0f66ef8SJohn Baldwin wakeup(ie); 663e0f66ef8SJohn Baldwin } 664e0f66ef8SJohn Baldwin } 665c9516c94SAlexander Kabaev 666c9516c94SAlexander Kabaev /* Add the new handler to the event in priority order. */ 667111b043cSAndriy Gapon CK_SLIST_FOREACH_PREVPTR(temp_ih, prevptr, &ie->ie_handlers, ih_next) { 668c9516c94SAlexander Kabaev if (temp_ih->ih_pri > ih->ih_pri) 669c9516c94SAlexander Kabaev break; 670c9516c94SAlexander Kabaev } 671111b043cSAndriy Gapon CK_SLIST_INSERT_PREVPTR(prevptr, temp_ih, ih, ih_next); 672111b043cSAndriy Gapon 673c9516c94SAlexander Kabaev intr_event_update(ie); 674c9516c94SAlexander Kabaev 675e0f66ef8SJohn Baldwin CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 676e0f66ef8SJohn Baldwin ie->ie_name); 677e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 678e0f66ef8SJohn Baldwin 679e0f66ef8SJohn Baldwin if (cookiep != NULL) 680e0f66ef8SJohn Baldwin *cookiep = ih; 681e0f66ef8SJohn Baldwin return (0); 682e0f66ef8SJohn Baldwin } 683b4151f71SJohn Baldwin 684c3045318SJohn Baldwin /* 68537b8ef16SJohn Baldwin * Append a description preceded by a ':' to the name of the specified 68637b8ef16SJohn Baldwin * interrupt handler. 68737b8ef16SJohn Baldwin */ 68837b8ef16SJohn Baldwin int 68937b8ef16SJohn Baldwin intr_event_describe_handler(struct intr_event *ie, void *cookie, 69037b8ef16SJohn Baldwin const char *descr) 69137b8ef16SJohn Baldwin { 69237b8ef16SJohn Baldwin struct intr_handler *ih; 69337b8ef16SJohn Baldwin size_t space; 69437b8ef16SJohn Baldwin char *start; 69537b8ef16SJohn Baldwin 69637b8ef16SJohn Baldwin mtx_lock(&ie->ie_lock); 69737b8ef16SJohn Baldwin #ifdef INVARIANTS 698111b043cSAndriy Gapon CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) { 69937b8ef16SJohn Baldwin if (ih == cookie) 70037b8ef16SJohn Baldwin break; 70137b8ef16SJohn Baldwin } 70237b8ef16SJohn Baldwin if (ih == NULL) { 70337b8ef16SJohn Baldwin mtx_unlock(&ie->ie_lock); 704d0c9a291SJohn Baldwin panic("handler %p not found in interrupt event %p", cookie, ie); 70537b8ef16SJohn Baldwin } 70637b8ef16SJohn Baldwin #endif 70737b8ef16SJohn Baldwin ih = cookie; 70837b8ef16SJohn Baldwin 70937b8ef16SJohn Baldwin /* 71037b8ef16SJohn Baldwin * Look for an existing description by checking for an 71137b8ef16SJohn Baldwin * existing ":". This assumes device names do not include 71237b8ef16SJohn Baldwin * colons. If one is found, prepare to insert the new 71337b8ef16SJohn Baldwin * description at that point. If one is not found, find the 71437b8ef16SJohn Baldwin * end of the name to use as the insertion point. 71537b8ef16SJohn Baldwin */ 716dc15eac0SEd Schouten start = strchr(ih->ih_name, ':'); 71737b8ef16SJohn Baldwin if (start == NULL) 718dc15eac0SEd Schouten start = strchr(ih->ih_name, 0); 71937b8ef16SJohn Baldwin 72037b8ef16SJohn Baldwin /* 72137b8ef16SJohn Baldwin * See if there is enough remaining room in the string for the 72237b8ef16SJohn Baldwin * description + ":". The "- 1" leaves room for the trailing 72337b8ef16SJohn Baldwin * '\0'. The "+ 1" accounts for the colon. 72437b8ef16SJohn Baldwin */ 72537b8ef16SJohn Baldwin space = sizeof(ih->ih_name) - (start - ih->ih_name) - 1; 72637b8ef16SJohn Baldwin if (strlen(descr) + 1 > space) { 72737b8ef16SJohn Baldwin mtx_unlock(&ie->ie_lock); 72837b8ef16SJohn Baldwin return (ENOSPC); 72937b8ef16SJohn Baldwin } 73037b8ef16SJohn Baldwin 73137b8ef16SJohn Baldwin /* Append a colon followed by the description. */ 73237b8ef16SJohn Baldwin *start = ':'; 73337b8ef16SJohn Baldwin strcpy(start + 1, descr); 73437b8ef16SJohn Baldwin intr_event_update(ie); 73537b8ef16SJohn Baldwin mtx_unlock(&ie->ie_lock); 73637b8ef16SJohn Baldwin return (0); 73737b8ef16SJohn Baldwin } 73837b8ef16SJohn Baldwin 73937b8ef16SJohn Baldwin /* 740c3045318SJohn Baldwin * Return the ie_source field from the intr_event an intr_handler is 741c3045318SJohn Baldwin * associated with. 742c3045318SJohn Baldwin */ 743c3045318SJohn Baldwin void * 744c3045318SJohn Baldwin intr_handler_source(void *cookie) 745c3045318SJohn Baldwin { 746c3045318SJohn Baldwin struct intr_handler *ih; 747c3045318SJohn Baldwin struct intr_event *ie; 748c3045318SJohn Baldwin 749c3045318SJohn Baldwin ih = (struct intr_handler *)cookie; 750c3045318SJohn Baldwin if (ih == NULL) 751c3045318SJohn Baldwin return (NULL); 752c3045318SJohn Baldwin ie = ih->ih_event; 753c3045318SJohn Baldwin KASSERT(ie != NULL, 754c3045318SJohn Baldwin ("interrupt handler \"%s\" has a NULL interrupt event", 755c3045318SJohn Baldwin ih->ih_name)); 756c3045318SJohn Baldwin return (ie->ie_source); 757c3045318SJohn Baldwin } 758c3045318SJohn Baldwin 759e4cd31ddSJeff Roberson /* 760e0fa977eSAndriy Gapon * If intr_event_handle() is running in the ISR context at the time of the call, 761e0fa977eSAndriy Gapon * then wait for it to complete. 762e0fa977eSAndriy Gapon */ 763e0fa977eSAndriy Gapon static void 764e0fa977eSAndriy Gapon intr_event_barrier(struct intr_event *ie) 765e0fa977eSAndriy Gapon { 766e0fa977eSAndriy Gapon int phase; 767e0fa977eSAndriy Gapon 768e0fa977eSAndriy Gapon mtx_assert(&ie->ie_lock, MA_OWNED); 769e0fa977eSAndriy Gapon phase = ie->ie_phase; 770e0fa977eSAndriy Gapon 771e0fa977eSAndriy Gapon /* 772e0fa977eSAndriy Gapon * Switch phase to direct future interrupts to the other active counter. 773e0fa977eSAndriy Gapon * Make sure that any preceding stores are visible before the switch. 774e0fa977eSAndriy Gapon */ 775e0fa977eSAndriy Gapon KASSERT(ie->ie_active[!phase] == 0, ("idle phase has activity")); 776e0fa977eSAndriy Gapon atomic_store_rel_int(&ie->ie_phase, !phase); 777e0fa977eSAndriy Gapon 778e0fa977eSAndriy Gapon /* 779e0fa977eSAndriy Gapon * This code cooperates with wait-free iteration of ie_handlers 780e0fa977eSAndriy Gapon * in intr_event_handle. 781e0fa977eSAndriy Gapon * Make sure that the removal and the phase update are not reordered 782e0fa977eSAndriy Gapon * with the active count check. 783e0fa977eSAndriy Gapon * Note that no combination of acquire and release fences can provide 784e0fa977eSAndriy Gapon * that guarantee as Store->Load sequences can always be reordered. 785e0fa977eSAndriy Gapon */ 786e0fa977eSAndriy Gapon atomic_thread_fence_seq_cst(); 787e0fa977eSAndriy Gapon 788e0fa977eSAndriy Gapon /* 789e0fa977eSAndriy Gapon * Now wait on the inactive phase. 790d744e271SGordon Bergling * The acquire fence is needed so that all post-barrier accesses 791e0fa977eSAndriy Gapon * are after the check. 792e0fa977eSAndriy Gapon */ 793e0fa977eSAndriy Gapon while (ie->ie_active[phase] > 0) 794e0fa977eSAndriy Gapon cpu_spinwait(); 795e0fa977eSAndriy Gapon atomic_thread_fence_acq(); 796e0fa977eSAndriy Gapon } 797e0fa977eSAndriy Gapon 79882a5a275SAndriy Gapon static void 79982a5a275SAndriy Gapon intr_handler_barrier(struct intr_handler *handler) 80082a5a275SAndriy Gapon { 80182a5a275SAndriy Gapon struct intr_event *ie; 80282a5a275SAndriy Gapon 80382a5a275SAndriy Gapon ie = handler->ih_event; 80482a5a275SAndriy Gapon mtx_assert(&ie->ie_lock, MA_OWNED); 80582a5a275SAndriy Gapon KASSERT((handler->ih_flags & IH_DEAD) == 0, 80682a5a275SAndriy Gapon ("update for a removed handler")); 80782a5a275SAndriy Gapon 80882a5a275SAndriy Gapon if (ie->ie_thread == NULL) { 80982a5a275SAndriy Gapon intr_event_barrier(ie); 81082a5a275SAndriy Gapon return; 81182a5a275SAndriy Gapon } 81282a5a275SAndriy Gapon if ((handler->ih_flags & IH_CHANGED) == 0) { 81382a5a275SAndriy Gapon handler->ih_flags |= IH_CHANGED; 8146fa041d7SWojciech Macek intr_event_schedule_thread(ie, NULL); 81582a5a275SAndriy Gapon } 81682a5a275SAndriy Gapon while ((handler->ih_flags & IH_CHANGED) != 0) 81782a5a275SAndriy Gapon msleep(handler, &ie->ie_lock, 0, "ih_barr", 0); 81882a5a275SAndriy Gapon } 81982a5a275SAndriy Gapon 820e0fa977eSAndriy Gapon /* 821e4cd31ddSJeff Roberson * Sleep until an ithread finishes executing an interrupt handler. 822e4cd31ddSJeff Roberson * 823e4cd31ddSJeff Roberson * XXX Doesn't currently handle interrupt filters or fast interrupt 8246eb60f5bSHans Petter Selasky * handlers. This is intended for LinuxKPI drivers only. 8256eb60f5bSHans Petter Selasky * Do not use in BSD code. 826e4cd31ddSJeff Roberson */ 827e4cd31ddSJeff Roberson void 828e4cd31ddSJeff Roberson _intr_drain(int irq) 829e4cd31ddSJeff Roberson { 830e4cd31ddSJeff Roberson struct intr_event *ie; 831e4cd31ddSJeff Roberson struct intr_thread *ithd; 832e4cd31ddSJeff Roberson struct thread *td; 833e4cd31ddSJeff Roberson 834e4cd31ddSJeff Roberson ie = intr_lookup(irq); 835e4cd31ddSJeff Roberson if (ie == NULL) 836e4cd31ddSJeff Roberson return; 837e4cd31ddSJeff Roberson if (ie->ie_thread == NULL) 838e4cd31ddSJeff Roberson return; 839e4cd31ddSJeff Roberson ithd = ie->ie_thread; 840e4cd31ddSJeff Roberson td = ithd->it_thread; 8415bd186a6SJeff Roberson /* 8425bd186a6SJeff Roberson * We set the flag and wait for it to be cleared to avoid 8435bd186a6SJeff Roberson * long delays with potentially busy interrupt handlers 8445bd186a6SJeff Roberson * were we to only sample TD_AWAITING_INTR() every tick. 8455bd186a6SJeff Roberson */ 846e4cd31ddSJeff Roberson thread_lock(td); 847e4cd31ddSJeff Roberson if (!TD_AWAITING_INTR(td)) { 848e4cd31ddSJeff Roberson ithd->it_flags |= IT_WAIT; 8495bd186a6SJeff Roberson while (ithd->it_flags & IT_WAIT) { 8505bd186a6SJeff Roberson thread_unlock(td); 8515bd186a6SJeff Roberson pause("idrain", 1); 8525bd186a6SJeff Roberson thread_lock(td); 853e4cd31ddSJeff Roberson } 8545bd186a6SJeff Roberson } 8555bd186a6SJeff Roberson thread_unlock(td); 856e4cd31ddSJeff Roberson return; 857e4cd31ddSJeff Roberson } 858e4cd31ddSJeff Roberson 859b4151f71SJohn Baldwin int 860e0f66ef8SJohn Baldwin intr_event_remove_handler(void *cookie) 861b4151f71SJohn Baldwin { 862e0f66ef8SJohn Baldwin struct intr_handler *handler = (struct intr_handler *)cookie; 863e0f66ef8SJohn Baldwin struct intr_event *ie; 864e0f66ef8SJohn Baldwin struct intr_handler *ih; 865111b043cSAndriy Gapon struct intr_handler **prevptr; 866b4151f71SJohn Baldwin 8673e5da754SJohn Baldwin if (handler == NULL) 868b4151f71SJohn Baldwin return (EINVAL); 869e0f66ef8SJohn Baldwin ie = handler->ih_event; 870e0f66ef8SJohn Baldwin KASSERT(ie != NULL, 871e0f66ef8SJohn Baldwin ("interrupt handler \"%s\" has a NULL interrupt event", 8723e5da754SJohn Baldwin handler->ih_name)); 873111b043cSAndriy Gapon 874e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 87591f91617SDavid E. O'Brien CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 876e0f66ef8SJohn Baldwin ie->ie_name); 877111b043cSAndriy Gapon CK_SLIST_FOREACH_PREVPTR(ih, prevptr, &ie->ie_handlers, ih_next) { 8783e5da754SJohn Baldwin if (ih == handler) 879111b043cSAndriy Gapon break; 880111b043cSAndriy Gapon } 881111b043cSAndriy Gapon if (ih == NULL) { 882111b043cSAndriy Gapon panic("interrupt handler \"%s\" not found in " 883111b043cSAndriy Gapon "interrupt event \"%s\"", handler->ih_name, ie->ie_name); 884111b043cSAndriy Gapon } 885111b043cSAndriy Gapon 886e0f66ef8SJohn Baldwin if (ie->ie_thread == NULL) { 887177624f2SMark Johnston /* 888177624f2SMark Johnston * If there is no ithread, then directly remove the handler. 889177624f2SMark Johnston * Note that intr_event_handle() iterates ie_handlers in a 890177624f2SMark Johnston * lock-less fashion, so care needs to be taken to keep 891177624f2SMark Johnston * ie_handlers consistent and to free the removed handler only 892177624f2SMark Johnston * when ie_handlers is quiescent. 893177624f2SMark Johnston */ 894111b043cSAndriy Gapon CK_SLIST_REMOVE_PREVPTR(prevptr, ih, ih_next); 895e0fa977eSAndriy Gapon intr_event_barrier(ie); 896177624f2SMark Johnston } else { 897e0f66ef8SJohn Baldwin /* 898177624f2SMark Johnston * Let the interrupt thread do the job. The interrupt source is 899177624f2SMark Johnston * disabled when the interrupt thread is running, so it does not 900177624f2SMark Johnston * have to worry about interaction with intr_event_handle(). 901de271f01SJohn Baldwin */ 902e0fa977eSAndriy Gapon KASSERT((handler->ih_flags & IH_DEAD) == 0, 903e0fa977eSAndriy Gapon ("duplicate handle remove")); 904de271f01SJohn Baldwin handler->ih_flags |= IH_DEAD; 9056fa041d7SWojciech Macek intr_event_schedule_thread(ie, NULL); 906e0f66ef8SJohn Baldwin while (handler->ih_flags & IH_DEAD) 9070f180a7cSJohn Baldwin msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 908177624f2SMark Johnston } 909e0f66ef8SJohn Baldwin intr_event_update(ie); 910e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 911b4151f71SJohn Baldwin free(handler, M_ITHREAD); 912b4151f71SJohn Baldwin return (0); 913b4151f71SJohn Baldwin } 914b4151f71SJohn Baldwin 91582a5a275SAndriy Gapon int 91682a5a275SAndriy Gapon intr_event_suspend_handler(void *cookie) 91782a5a275SAndriy Gapon { 91882a5a275SAndriy Gapon struct intr_handler *handler = (struct intr_handler *)cookie; 91982a5a275SAndriy Gapon struct intr_event *ie; 92082a5a275SAndriy Gapon 92182a5a275SAndriy Gapon if (handler == NULL) 92282a5a275SAndriy Gapon return (EINVAL); 92382a5a275SAndriy Gapon ie = handler->ih_event; 92482a5a275SAndriy Gapon KASSERT(ie != NULL, 92582a5a275SAndriy Gapon ("interrupt handler \"%s\" has a NULL interrupt event", 92682a5a275SAndriy Gapon handler->ih_name)); 92782a5a275SAndriy Gapon mtx_lock(&ie->ie_lock); 92882a5a275SAndriy Gapon handler->ih_flags |= IH_SUSP; 92982a5a275SAndriy Gapon intr_handler_barrier(handler); 93082a5a275SAndriy Gapon mtx_unlock(&ie->ie_lock); 93182a5a275SAndriy Gapon return (0); 93282a5a275SAndriy Gapon } 93382a5a275SAndriy Gapon 93482a5a275SAndriy Gapon int 93582a5a275SAndriy Gapon intr_event_resume_handler(void *cookie) 93682a5a275SAndriy Gapon { 93782a5a275SAndriy Gapon struct intr_handler *handler = (struct intr_handler *)cookie; 93882a5a275SAndriy Gapon struct intr_event *ie; 93982a5a275SAndriy Gapon 94082a5a275SAndriy Gapon if (handler == NULL) 94182a5a275SAndriy Gapon return (EINVAL); 94282a5a275SAndriy Gapon ie = handler->ih_event; 94382a5a275SAndriy Gapon KASSERT(ie != NULL, 94482a5a275SAndriy Gapon ("interrupt handler \"%s\" has a NULL interrupt event", 94582a5a275SAndriy Gapon handler->ih_name)); 94682a5a275SAndriy Gapon 94782a5a275SAndriy Gapon /* 94882a5a275SAndriy Gapon * intr_handler_barrier() acts not only as a barrier, 94982a5a275SAndriy Gapon * it also allows to check for any pending interrupts. 95082a5a275SAndriy Gapon */ 95182a5a275SAndriy Gapon mtx_lock(&ie->ie_lock); 95282a5a275SAndriy Gapon handler->ih_flags &= ~IH_SUSP; 95382a5a275SAndriy Gapon intr_handler_barrier(handler); 95482a5a275SAndriy Gapon mtx_unlock(&ie->ie_lock); 95582a5a275SAndriy Gapon return (0); 95682a5a275SAndriy Gapon } 95782a5a275SAndriy Gapon 9581ee1b687SJohn Baldwin static int 9596fa041d7SWojciech Macek intr_event_schedule_thread(struct intr_event *ie, struct trapframe *frame) 9603e5da754SJohn Baldwin { 961e0f66ef8SJohn Baldwin struct intr_entropy entropy; 962e0f66ef8SJohn Baldwin struct intr_thread *it; 963b40ce416SJulian Elischer struct thread *td; 96404774f23SJulian Elischer struct thread *ctd; 9653e5da754SJohn Baldwin 9663e5da754SJohn Baldwin /* 9673e5da754SJohn Baldwin * If no ithread or no handlers, then we have a stray interrupt. 9683e5da754SJohn Baldwin */ 969111b043cSAndriy Gapon if (ie == NULL || CK_SLIST_EMPTY(&ie->ie_handlers) || 970e0f66ef8SJohn Baldwin ie->ie_thread == NULL) 9713e5da754SJohn Baldwin return (EINVAL); 9723e5da754SJohn Baldwin 97304774f23SJulian Elischer ctd = curthread; 974e0f66ef8SJohn Baldwin it = ie->ie_thread; 975e0f66ef8SJohn Baldwin td = it->it_thread; 976e0f66ef8SJohn Baldwin 9773e5da754SJohn Baldwin /* 9783e5da754SJohn Baldwin * If any of the handlers for this ithread claim to be good 9793e5da754SJohn Baldwin * sources of entropy, then gather some. 9803e5da754SJohn Baldwin */ 981c4eb6630SGleb Smirnoff if (ie->ie_hflags & IH_ENTROPY) { 982e0f66ef8SJohn Baldwin entropy.event = (uintptr_t)ie; 983e0f66ef8SJohn Baldwin entropy.td = ctd; 98419fa89e9SMark Murray random_harvest_queue(&entropy, sizeof(entropy), RANDOM_INTERRUPT); 9853e5da754SJohn Baldwin } 9863e5da754SJohn Baldwin 987ba3f7276SMatt Macy KASSERT(td->td_proc != NULL, ("ithread %s has no process", ie->ie_name)); 9883e5da754SJohn Baldwin 9893e5da754SJohn Baldwin /* 9903e5da754SJohn Baldwin * Set it_need to tell the thread to keep running if it is already 991982d11f8SJeff Roberson * running. Then, lock the thread and see if we actually need to 992982d11f8SJeff Roberson * put it on the runqueue. 993283dfee9SKonstantin Belousov * 994283dfee9SKonstantin Belousov * Use store_rel to arrange that the store to ih_need in 995283dfee9SKonstantin Belousov * swi_sched() is before the store to it_need and prepare for 996283dfee9SKonstantin Belousov * transfer of this order to loads in the ithread. 9973e5da754SJohn Baldwin */ 9983eebd44dSAlfred Perlstein atomic_store_rel_int(&it->it_need, 1); 999982d11f8SJeff Roberson thread_lock(td); 100071fad9fdSJulian Elischer if (TD_AWAITING_INTR(td)) { 10016fa041d7SWojciech Macek #ifdef HWPMC_HOOKS 10027bc13692SWojciech Macek it->it_waiting = 0; 10037bc13692SWojciech Macek if (PMC_HOOK_INSTALLED_ANY()) 10047bc13692SWojciech Macek PMC_SOFT_CALL_INTR_HLPR(schedule, frame); 10056fa041d7SWojciech Macek #endif 1006fc2e87beSMatt Macy CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, td->td_proc->p_pid, 10077ab24ea3SJulian Elischer td->td_name); 100871fad9fdSJulian Elischer TD_CLR_IWAIT(td); 1009ed998d1cSJohn Baldwin sched_wakeup(td, SRQ_INTR); 10103e5da754SJohn Baldwin } else { 10116fa041d7SWojciech Macek #ifdef HWPMC_HOOKS 10127bc13692SWojciech Macek it->it_waiting++; 10137bc13692SWojciech Macek if (PMC_HOOK_INSTALLED_ANY() && 10147bc13692SWojciech Macek (it->it_waiting >= intr_hwpmc_waiting_report_threshold)) 10157bc13692SWojciech Macek PMC_SOFT_CALL_INTR_HLPR(waiting, frame); 10166fa041d7SWojciech Macek #endif 1017e0f66ef8SJohn Baldwin CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 1018fa2528acSAlex Richardson __func__, td->td_proc->p_pid, td->td_name, it->it_need, TD_GET_STATE(td)); 1019982d11f8SJeff Roberson thread_unlock(td); 102061a74c5cSJeff Roberson } 10213e5da754SJohn Baldwin 10223e5da754SJohn Baldwin return (0); 10233e5da754SJohn Baldwin } 10243e5da754SJohn Baldwin 1025fe486a37SJohn Baldwin /* 1026e84bcd84SRobert Watson * Allow interrupt event binding for software interrupt handlers -- a no-op, 1027e84bcd84SRobert Watson * since interrupts are generated in software rather than being directed by 1028e84bcd84SRobert Watson * a PIC. 1029e84bcd84SRobert Watson */ 1030e84bcd84SRobert Watson static int 1031066da805SAdrian Chadd swi_assign_cpu(void *arg, int cpu) 1032e84bcd84SRobert Watson { 1033e84bcd84SRobert Watson 1034e84bcd84SRobert Watson return (0); 1035e84bcd84SRobert Watson } 1036e84bcd84SRobert Watson 1037e84bcd84SRobert Watson /* 1038fe486a37SJohn Baldwin * Add a software interrupt handler to a specified event. If a given event 1039fe486a37SJohn Baldwin * is not specified, then a new event is created. 1040fe486a37SJohn Baldwin */ 10413e5da754SJohn Baldwin int 1042e0f66ef8SJohn Baldwin swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, 1043b4151f71SJohn Baldwin void *arg, int pri, enum intr_type flags, void **cookiep) 10448088699fSJohn Baldwin { 1045e0f66ef8SJohn Baldwin struct intr_event *ie; 1046aba10e13SAlexander Motin int error = 0; 10478088699fSJohn Baldwin 1048bafe5a31SPaolo Pisati if (flags & INTR_ENTROPY) 10493e5da754SJohn Baldwin return (EINVAL); 10503e5da754SJohn Baldwin 1051e0f66ef8SJohn Baldwin ie = (eventp != NULL) ? *eventp : NULL; 10528088699fSJohn Baldwin 1053e0f66ef8SJohn Baldwin if (ie != NULL) { 1054e0f66ef8SJohn Baldwin if (!(ie->ie_flags & IE_SOFT)) 10553e5da754SJohn Baldwin return (EINVAL); 10563e5da754SJohn Baldwin } else { 10579b33b154SJeff Roberson error = intr_event_create(&ie, NULL, IE_SOFT, 0, 1058e84bcd84SRobert Watson NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri); 10598088699fSJohn Baldwin if (error) 1060b4151f71SJohn Baldwin return (error); 1061e0f66ef8SJohn Baldwin if (eventp != NULL) 1062e0f66ef8SJohn Baldwin *eventp = ie; 10638088699fSJohn Baldwin } 1064aba10e13SAlexander Motin if (handler != NULL) { 10658d809d50SJeff Roberson error = intr_event_add_handler(ie, name, NULL, handler, arg, 1066d3305205SJohn Baldwin PI_SWI(pri), flags, cookiep); 1067aba10e13SAlexander Motin } 10688d809d50SJeff Roberson return (error); 10698088699fSJohn Baldwin } 10708088699fSJohn Baldwin 10711931cf94SJohn Baldwin /* 1072e0f66ef8SJohn Baldwin * Schedule a software interrupt thread. 10731931cf94SJohn Baldwin */ 10741931cf94SJohn Baldwin void 1075b4151f71SJohn Baldwin swi_sched(void *cookie, int flags) 10761931cf94SJohn Baldwin { 1077e0f66ef8SJohn Baldwin struct intr_handler *ih = (struct intr_handler *)cookie; 1078e0f66ef8SJohn Baldwin struct intr_event *ie = ih->ih_event; 1079d95dca1dSJohn Baldwin struct intr_entropy entropy; 1080ba3f7276SMatt Macy int error __unused; 10818088699fSJohn Baldwin 1082e0f66ef8SJohn Baldwin CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, 1083e0f66ef8SJohn Baldwin ih->ih_need); 10841931cf94SJohn Baldwin 1085aba10e13SAlexander Motin if ((flags & SWI_FROMNMI) == 0) { 1086d95dca1dSJohn Baldwin entropy.event = (uintptr_t)ih; 1087d95dca1dSJohn Baldwin entropy.td = curthread; 108819fa89e9SMark Murray random_harvest_queue(&entropy, sizeof(entropy), RANDOM_SWI); 1089aba10e13SAlexander Motin } 1090d95dca1dSJohn Baldwin 10911931cf94SJohn Baldwin /* 10923e5da754SJohn Baldwin * Set ih_need for this handler so that if the ithread is already 10933e5da754SJohn Baldwin * running it will execute this handler on the next pass. Otherwise, 10943e5da754SJohn Baldwin * it will execute it the next time it runs. 10951931cf94SJohn Baldwin */ 1096283dfee9SKonstantin Belousov ih->ih_need = 1; 10971ca2c018SBruce Evans 1098aba10e13SAlexander Motin if (flags & SWI_DELAY) 1099aba10e13SAlexander Motin return; 1100aba10e13SAlexander Motin 1101aba10e13SAlexander Motin if (flags & SWI_FROMNMI) { 1102aba10e13SAlexander Motin #if defined(SMP) && (defined(__i386__) || defined(__amd64__)) 1103aba10e13SAlexander Motin KASSERT(ie == clk_intr_event, 1104aba10e13SAlexander Motin ("SWI_FROMNMI used not with clk_intr_event")); 1105aba10e13SAlexander Motin ipi_self_from_nmi(IPI_SWI); 1106aba10e13SAlexander Motin #endif 1107aba10e13SAlexander Motin } else { 110883c9dea1SGleb Smirnoff VM_CNT_INC(v_soft); 11096fa041d7SWojciech Macek error = intr_event_schedule_thread(ie, NULL); 11103e5da754SJohn Baldwin KASSERT(error == 0, ("stray software interrupt")); 11118088699fSJohn Baldwin } 11128088699fSJohn Baldwin } 11138088699fSJohn Baldwin 1114fe486a37SJohn Baldwin /* 1115fe486a37SJohn Baldwin * Remove a software interrupt handler. Currently this code does not 1116fe486a37SJohn Baldwin * remove the associated interrupt event if it becomes empty. Calling code 1117fe486a37SJohn Baldwin * may do so manually via intr_event_destroy(), but that's not really 1118fe486a37SJohn Baldwin * an optimal interface. 1119fe486a37SJohn Baldwin */ 1120fe486a37SJohn Baldwin int 1121fe486a37SJohn Baldwin swi_remove(void *cookie) 1122fe486a37SJohn Baldwin { 1123fe486a37SJohn Baldwin 1124fe486a37SJohn Baldwin return (intr_event_remove_handler(cookie)); 1125fe486a37SJohn Baldwin } 1126fe486a37SJohn Baldwin 1127111b043cSAndriy Gapon static void 112837e9511fSJohn Baldwin intr_event_execute_handlers(struct proc *p, struct intr_event *ie) 1129e0f66ef8SJohn Baldwin { 1130111b043cSAndriy Gapon struct intr_handler *ih, *ihn, *ihp; 1131e0f66ef8SJohn Baldwin 1132111b043cSAndriy Gapon ihp = NULL; 1133111b043cSAndriy Gapon CK_SLIST_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) { 1134e0f66ef8SJohn Baldwin /* 1135e0f66ef8SJohn Baldwin * If this handler is marked for death, remove it from 1136e0f66ef8SJohn Baldwin * the list of handlers and wake up the sleeper. 1137e0f66ef8SJohn Baldwin */ 1138e0f66ef8SJohn Baldwin if (ih->ih_flags & IH_DEAD) { 1139e0f66ef8SJohn Baldwin mtx_lock(&ie->ie_lock); 1140111b043cSAndriy Gapon if (ihp == NULL) 1141111b043cSAndriy Gapon CK_SLIST_REMOVE_HEAD(&ie->ie_handlers, ih_next); 1142111b043cSAndriy Gapon else 1143111b043cSAndriy Gapon CK_SLIST_REMOVE_AFTER(ihp, ih_next); 1144e0f66ef8SJohn Baldwin ih->ih_flags &= ~IH_DEAD; 1145e0f66ef8SJohn Baldwin wakeup(ih); 1146e0f66ef8SJohn Baldwin mtx_unlock(&ie->ie_lock); 1147e0f66ef8SJohn Baldwin continue; 1148e0f66ef8SJohn Baldwin } 1149e0f66ef8SJohn Baldwin 1150111b043cSAndriy Gapon /* 1151111b043cSAndriy Gapon * Now that we know that the current element won't be removed 1152111b043cSAndriy Gapon * update the previous element. 1153111b043cSAndriy Gapon */ 1154111b043cSAndriy Gapon ihp = ih; 1155111b043cSAndriy Gapon 115682a5a275SAndriy Gapon if ((ih->ih_flags & IH_CHANGED) != 0) { 115782a5a275SAndriy Gapon mtx_lock(&ie->ie_lock); 115882a5a275SAndriy Gapon ih->ih_flags &= ~IH_CHANGED; 115982a5a275SAndriy Gapon wakeup(ih); 116082a5a275SAndriy Gapon mtx_unlock(&ie->ie_lock); 116182a5a275SAndriy Gapon } 116282a5a275SAndriy Gapon 1163f2d619c8SPaolo Pisati /* Skip filter only handlers */ 1164f2d619c8SPaolo Pisati if (ih->ih_handler == NULL) 1165f2d619c8SPaolo Pisati continue; 1166f2d619c8SPaolo Pisati 116782a5a275SAndriy Gapon /* Skip suspended handlers */ 116882a5a275SAndriy Gapon if ((ih->ih_flags & IH_SUSP) != 0) 116982a5a275SAndriy Gapon continue; 117082a5a275SAndriy Gapon 1171e0f66ef8SJohn Baldwin /* 1172e0f66ef8SJohn Baldwin * For software interrupt threads, we only execute 1173e0f66ef8SJohn Baldwin * handlers that have their need flag set. Hardware 1174e0f66ef8SJohn Baldwin * interrupt threads always invoke all of their handlers. 11751b79b949SKirk McKusick * 11761b79b949SKirk McKusick * ih_need can only be 0 or 1. Failed cmpset below 11771b79b949SKirk McKusick * means that there is no request to execute handlers, 11781b79b949SKirk McKusick * so a retry of the cmpset is not needed. 1179e0f66ef8SJohn Baldwin */ 11801b79b949SKirk McKusick if ((ie->ie_flags & IE_SOFT) != 0 && 11811b79b949SKirk McKusick atomic_cmpset_int(&ih->ih_need, 1, 0) == 0) 1182e0f66ef8SJohn Baldwin continue; 1183e0f66ef8SJohn Baldwin 1184e0f66ef8SJohn Baldwin /* Execute this handler. */ 1185e0f66ef8SJohn Baldwin CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1186bafe5a31SPaolo Pisati __func__, p->p_pid, (void *)ih->ih_handler, 1187bafe5a31SPaolo Pisati ih->ih_argument, ih->ih_name, ih->ih_flags); 1188e0f66ef8SJohn Baldwin 1189e0f66ef8SJohn Baldwin if (!(ih->ih_flags & IH_MPSAFE)) 1190e0f66ef8SJohn Baldwin mtx_lock(&Giant); 1191e0f66ef8SJohn Baldwin ih->ih_handler(ih->ih_argument); 1192e0f66ef8SJohn Baldwin if (!(ih->ih_flags & IH_MPSAFE)) 1193e0f66ef8SJohn Baldwin mtx_unlock(&Giant); 1194e0f66ef8SJohn Baldwin } 119537e9511fSJohn Baldwin } 119637e9511fSJohn Baldwin 119737e9511fSJohn Baldwin static void 119837e9511fSJohn Baldwin ithread_execute_handlers(struct proc *p, struct intr_event *ie) 119937e9511fSJohn Baldwin { 120037e9511fSJohn Baldwin 1201*0863dc10SAndrew Turner /* Only specifically marked sleepable interrupt handlers can sleep. */ 1202*0863dc10SAndrew Turner if (!(ie->ie_flags & (IE_SOFT | IE_SLEEPABLE))) 120337e9511fSJohn Baldwin THREAD_NO_SLEEPING(); 120437e9511fSJohn Baldwin intr_event_execute_handlers(p, ie); 1205*0863dc10SAndrew Turner if (!(ie->ie_flags & (IE_SOFT | IE_SLEEPABLE))) 1206e0f66ef8SJohn Baldwin THREAD_SLEEPING_OK(); 1207e0f66ef8SJohn Baldwin 1208e0f66ef8SJohn Baldwin /* 1209e0f66ef8SJohn Baldwin * Interrupt storm handling: 1210e0f66ef8SJohn Baldwin * 1211e0f66ef8SJohn Baldwin * If this interrupt source is currently storming, then throttle 1212e0f66ef8SJohn Baldwin * it to only fire the handler once per clock tick. 1213e0f66ef8SJohn Baldwin * 1214e0f66ef8SJohn Baldwin * If this interrupt source is not currently storming, but the 1215e0f66ef8SJohn Baldwin * number of back to back interrupts exceeds the storm threshold, 1216e0f66ef8SJohn Baldwin * then enter storming mode. 1217e0f66ef8SJohn Baldwin */ 1218c122d7ffSMark Johnston if (__predict_false(intr_storm_threshold != 0 && 1219c122d7ffSMark Johnston ie->ie_count >= intr_storm_threshold && 1220c122d7ffSMark Johnston (ie->ie_flags & IE_SOFT) == 0)) { 12210ae62c18SNate Lawson /* Report the message only once every second. */ 12220ae62c18SNate Lawson if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) { 1223e0f66ef8SJohn Baldwin printf( 12240ae62c18SNate Lawson "interrupt storm detected on \"%s\"; throttling interrupt source\n", 1225e0f66ef8SJohn Baldwin ie->ie_name); 1226e0f66ef8SJohn Baldwin } 1227e41bcf3cSJohn Baldwin pause("istorm", 1); 1228e0f66ef8SJohn Baldwin } else 1229e0f66ef8SJohn Baldwin ie->ie_count++; 1230e0f66ef8SJohn Baldwin 1231e0f66ef8SJohn Baldwin /* 1232e0f66ef8SJohn Baldwin * Now that all the handlers have had a chance to run, reenable 1233e0f66ef8SJohn Baldwin * the interrupt source. 1234e0f66ef8SJohn Baldwin */ 12351ee1b687SJohn Baldwin if (ie->ie_post_ithread != NULL) 12361ee1b687SJohn Baldwin ie->ie_post_ithread(ie->ie_source); 1237e0f66ef8SJohn Baldwin } 1238e0f66ef8SJohn Baldwin 12398088699fSJohn Baldwin /* 1240b4151f71SJohn Baldwin * This is the main code for interrupt threads. 12418088699fSJohn Baldwin */ 124237c84183SPoul-Henning Kamp static void 1243b4151f71SJohn Baldwin ithread_loop(void *arg) 12448088699fSJohn Baldwin { 1245511d1afbSGleb Smirnoff struct epoch_tracker et; 1246e0f66ef8SJohn Baldwin struct intr_thread *ithd; 1247e0f66ef8SJohn Baldwin struct intr_event *ie; 1248b40ce416SJulian Elischer struct thread *td; 1249b4151f71SJohn Baldwin struct proc *p; 12508381e9f4SMark Johnston int epoch_count; 1251f912e8f2SHans Petter Selasky bool needs_epoch; 12528088699fSJohn Baldwin 1253b40ce416SJulian Elischer td = curthread; 1254b40ce416SJulian Elischer p = td->td_proc; 1255e0f66ef8SJohn Baldwin ithd = (struct intr_thread *)arg; 1256e0f66ef8SJohn Baldwin KASSERT(ithd->it_thread == td, 125791f91617SDavid E. O'Brien ("%s: ithread and proc linkage out of sync", __func__)); 1258e0f66ef8SJohn Baldwin ie = ithd->it_event; 1259e0f66ef8SJohn Baldwin ie->ie_count = 0; 12608088699fSJohn Baldwin 12618088699fSJohn Baldwin /* 12628088699fSJohn Baldwin * As long as we have interrupts outstanding, go through the 12638088699fSJohn Baldwin * list of handlers, giving each one a go at it. 12648088699fSJohn Baldwin */ 12658088699fSJohn Baldwin for (;;) { 1266b4151f71SJohn Baldwin /* 1267b4151f71SJohn Baldwin * If we are an orphaned thread, then just die. 1268b4151f71SJohn Baldwin */ 12698381e9f4SMark Johnston if (__predict_false((ithd->it_flags & IT_DEAD) != 0)) { 1270e0f66ef8SJohn Baldwin CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 12717ab24ea3SJulian Elischer p->p_pid, td->td_name); 12728381e9f4SMark Johnston mtx_lock(&ie->ie_lock); 12738381e9f4SMark Johnston ie->ie_thread = NULL; 12748381e9f4SMark Johnston wakeup(ithd); 12758381e9f4SMark Johnston mtx_unlock(&ie->ie_lock); 12768381e9f4SMark Johnston 1277b4151f71SJohn Baldwin free(ithd, M_ITHREAD); 1278ca9a0ddfSJulian Elischer kthread_exit(); 1279b4151f71SJohn Baldwin } 1280b4151f71SJohn Baldwin 1281e0f66ef8SJohn Baldwin /* 1282e0f66ef8SJohn Baldwin * Service interrupts. If another interrupt arrives while 1283e0f66ef8SJohn Baldwin * we are running, it will set it_need to note that we 1284e0f66ef8SJohn Baldwin * should make another pass. 1285283dfee9SKonstantin Belousov * 1286283dfee9SKonstantin Belousov * The load_acq part of the following cmpset ensures 1287283dfee9SKonstantin Belousov * that the load of ih_need in ithread_execute_handlers() 1288283dfee9SKonstantin Belousov * is ordered after the load of it_need here. 1289e0f66ef8SJohn Baldwin */ 1290f912e8f2SHans Petter Selasky needs_epoch = 1291f912e8f2SHans Petter Selasky (atomic_load_int(&ie->ie_hflags) & IH_NET) != 0; 1292f912e8f2SHans Petter Selasky if (needs_epoch) { 1293511d1afbSGleb Smirnoff epoch_count = 0; 1294511d1afbSGleb Smirnoff NET_EPOCH_ENTER(et); 1295511d1afbSGleb Smirnoff } 1296511d1afbSGleb Smirnoff while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0) { 1297e0f66ef8SJohn Baldwin ithread_execute_handlers(p, ie); 1298f912e8f2SHans Petter Selasky if (needs_epoch && 1299511d1afbSGleb Smirnoff ++epoch_count >= intr_epoch_batch) { 1300511d1afbSGleb Smirnoff NET_EPOCH_EXIT(et); 1301511d1afbSGleb Smirnoff epoch_count = 0; 1302511d1afbSGleb Smirnoff NET_EPOCH_ENTER(et); 1303511d1afbSGleb Smirnoff } 1304511d1afbSGleb Smirnoff } 1305f912e8f2SHans Petter Selasky if (needs_epoch) 1306511d1afbSGleb Smirnoff NET_EPOCH_EXIT(et); 13077870c3c6SJohn Baldwin WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 13087870c3c6SJohn Baldwin mtx_assert(&Giant, MA_NOTOWNED); 13098088699fSJohn Baldwin 13108088699fSJohn Baldwin /* 13118088699fSJohn Baldwin * Processed all our interrupts. Now get the sched 13128088699fSJohn Baldwin * lock. This may take a while and it_need may get 13138088699fSJohn Baldwin * set again, so we have to check it again. 13148088699fSJohn Baldwin */ 1315982d11f8SJeff Roberson thread_lock(td); 131603bbcb2fSKonstantin Belousov if (atomic_load_acq_int(&ithd->it_need) == 0 && 131703bbcb2fSKonstantin Belousov (ithd->it_flags & (IT_DEAD | IT_WAIT)) == 0) { 13187870c3c6SJohn Baldwin TD_SET_IWAIT(td); 1319e0f66ef8SJohn Baldwin ie->ie_count = 0; 1320686bcb5cSJeff Roberson mi_switch(SW_VOL | SWT_IWAIT); 13218381e9f4SMark Johnston } else if ((ithd->it_flags & IT_WAIT) != 0) { 1322e4cd31ddSJeff Roberson ithd->it_flags &= ~IT_WAIT; 1323982d11f8SJeff Roberson thread_unlock(td); 1324e4cd31ddSJeff Roberson wakeup(ithd); 13258381e9f4SMark Johnston } else 13268381e9f4SMark Johnston thread_unlock(td); 13278088699fSJohn Baldwin } 13281931cf94SJohn Baldwin } 13291ee1b687SJohn Baldwin 13301ee1b687SJohn Baldwin /* 13311ee1b687SJohn Baldwin * Main interrupt handling body. 13321ee1b687SJohn Baldwin * 13331ee1b687SJohn Baldwin * Input: 13341ee1b687SJohn Baldwin * o ie: the event connected to this interrupt. 1335a2409f17SWarner Losh -------------------------------------------------------------------------------- 1336a2409f17SWarner Losh * o frame: the current trap frame. If the client interrupt 1337a2409f17SWarner Losh * handler needs this frame, they should get it 1338a2409f17SWarner Losh * via curthread->td_intr_frame. 133938c35248SElliott Mitchell * 13401ee1b687SJohn Baldwin * Return value: 13411ee1b687SJohn Baldwin * o 0: everything ok. 13421ee1b687SJohn Baldwin * o EINVAL: stray interrupt. 13431ee1b687SJohn Baldwin */ 13441ee1b687SJohn Baldwin int 13451ee1b687SJohn Baldwin intr_event_handle(struct intr_event *ie, struct trapframe *frame) 13461ee1b687SJohn Baldwin { 13471ee1b687SJohn Baldwin struct intr_handler *ih; 13481f255bd3SAlexander Motin struct trapframe *oldframe; 13491ee1b687SJohn Baldwin struct thread *td; 1350e0fa977eSAndriy Gapon int phase; 135182a5a275SAndriy Gapon int ret; 135282a5a275SAndriy Gapon bool filter, thread; 13531ee1b687SJohn Baldwin 13541ee1b687SJohn Baldwin td = curthread; 13551ee1b687SJohn Baldwin 1356b7627840SKonstantin Belousov #ifdef KSTACK_USAGE_PROF 1357b7627840SKonstantin Belousov intr_prof_stack_use(td, frame); 1358b7627840SKonstantin Belousov #endif 1359b7627840SKonstantin Belousov 13601ee1b687SJohn Baldwin /* An interrupt with no event or handlers is a stray interrupt. */ 1361111b043cSAndriy Gapon if (ie == NULL || CK_SLIST_EMPTY(&ie->ie_handlers)) 13621ee1b687SJohn Baldwin return (EINVAL); 13631ee1b687SJohn Baldwin 13641ee1b687SJohn Baldwin /* 13651ee1b687SJohn Baldwin * Execute fast interrupt handlers directly. 13661ee1b687SJohn Baldwin */ 13671ee1b687SJohn Baldwin td->td_intr_nesting_level++; 136882a5a275SAndriy Gapon filter = false; 136982a5a275SAndriy Gapon thread = false; 13701ee1b687SJohn Baldwin ret = 0; 13711ee1b687SJohn Baldwin critical_enter(); 13721f255bd3SAlexander Motin oldframe = td->td_intr_frame; 13731f255bd3SAlexander Motin td->td_intr_frame = frame; 1374111b043cSAndriy Gapon 1375e0fa977eSAndriy Gapon phase = ie->ie_phase; 1376e0fa977eSAndriy Gapon atomic_add_int(&ie->ie_active[phase], 1); 1377e0fa977eSAndriy Gapon 1378e0fa977eSAndriy Gapon /* 1379e0fa977eSAndriy Gapon * This fence is required to ensure that no later loads are 1380e0fa977eSAndriy Gapon * re-ordered before the ie_active store. 1381e0fa977eSAndriy Gapon */ 1382e0fa977eSAndriy Gapon atomic_thread_fence_seq_cst(); 1383e0fa977eSAndriy Gapon 1384111b043cSAndriy Gapon CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) { 138582a5a275SAndriy Gapon if ((ih->ih_flags & IH_SUSP) != 0) 138682a5a275SAndriy Gapon continue; 1387aba10e13SAlexander Motin if ((ie->ie_flags & IE_SOFT) != 0 && ih->ih_need == 0) 1388aba10e13SAlexander Motin continue; 13891ee1b687SJohn Baldwin if (ih->ih_filter == NULL) { 139082a5a275SAndriy Gapon thread = true; 13911ee1b687SJohn Baldwin continue; 13921ee1b687SJohn Baldwin } 13931ee1b687SJohn Baldwin CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__, 139438c35248SElliott Mitchell ih->ih_filter, ih->ih_argument, ih->ih_name); 13951ee1b687SJohn Baldwin ret = ih->ih_filter(ih->ih_argument); 13966fa041d7SWojciech Macek #ifdef HWPMC_HOOKS 13976fa041d7SWojciech Macek PMC_SOFT_CALL_TF( , , intr, all, frame); 13986fa041d7SWojciech Macek #endif 139989fc20ccSAndriy Gapon KASSERT(ret == FILTER_STRAY || 140089fc20ccSAndriy Gapon ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 && 140189fc20ccSAndriy Gapon (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0), 140289fc20ccSAndriy Gapon ("%s: incorrect return value %#x from %s", __func__, ret, 140389fc20ccSAndriy Gapon ih->ih_name)); 140482a5a275SAndriy Gapon filter = filter || ret == FILTER_HANDLED; 14056fa041d7SWojciech Macek #ifdef HWPMC_HOOKS 14066fa041d7SWojciech Macek if (ret & FILTER_SCHEDULE_THREAD) 14076fa041d7SWojciech Macek PMC_SOFT_CALL_TF( , , intr, ithread, frame); 14086fa041d7SWojciech Macek else if (ret & FILTER_HANDLED) 14096fa041d7SWojciech Macek PMC_SOFT_CALL_TF( , , intr, filter, frame); 14106fa041d7SWojciech Macek else if (ret == FILTER_STRAY) 14116fa041d7SWojciech Macek PMC_SOFT_CALL_TF( , , intr, stray, frame); 14126fa041d7SWojciech Macek #endif 141389fc20ccSAndriy Gapon 14141ee1b687SJohn Baldwin /* 14151ee1b687SJohn Baldwin * Wrapper handler special handling: 14161ee1b687SJohn Baldwin * 14171ee1b687SJohn Baldwin * in some particular cases (like pccard and pccbb), 14181ee1b687SJohn Baldwin * the _real_ device handler is wrapped in a couple of 14191ee1b687SJohn Baldwin * functions - a filter wrapper and an ithread wrapper. 14201ee1b687SJohn Baldwin * In this case (and just in this case), the filter wrapper 14211ee1b687SJohn Baldwin * could ask the system to schedule the ithread and mask 14221ee1b687SJohn Baldwin * the interrupt source if the wrapped handler is composed 14231ee1b687SJohn Baldwin * of just an ithread handler. 14241ee1b687SJohn Baldwin * 14251ee1b687SJohn Baldwin * TODO: write a generic wrapper to avoid people rolling 142682a5a275SAndriy Gapon * their own. 14271ee1b687SJohn Baldwin */ 14281ee1b687SJohn Baldwin if (!thread) { 14291ee1b687SJohn Baldwin if (ret == FILTER_SCHEDULE_THREAD) 143082a5a275SAndriy Gapon thread = true; 14311ee1b687SJohn Baldwin } 14321ee1b687SJohn Baldwin } 1433e0fa977eSAndriy Gapon atomic_add_rel_int(&ie->ie_active[phase], -1); 1434e0fa977eSAndriy Gapon 14351f255bd3SAlexander Motin td->td_intr_frame = oldframe; 14361ee1b687SJohn Baldwin 14371ee1b687SJohn Baldwin if (thread) { 14381ee1b687SJohn Baldwin if (ie->ie_pre_ithread != NULL) 14391ee1b687SJohn Baldwin ie->ie_pre_ithread(ie->ie_source); 14401ee1b687SJohn Baldwin } else { 14411ee1b687SJohn Baldwin if (ie->ie_post_filter != NULL) 14421ee1b687SJohn Baldwin ie->ie_post_filter(ie->ie_source); 14431ee1b687SJohn Baldwin } 14441ee1b687SJohn Baldwin 14451ee1b687SJohn Baldwin /* Schedule the ithread if needed. */ 14461ee1b687SJohn Baldwin if (thread) { 1447ba3f7276SMatt Macy int error __unused; 1448ba3f7276SMatt Macy 14496fa041d7SWojciech Macek error = intr_event_schedule_thread(ie, frame); 14501ee1b687SJohn Baldwin KASSERT(error == 0, ("bad stray interrupt")); 14511ee1b687SJohn Baldwin } 14521ee1b687SJohn Baldwin critical_exit(); 14531ee1b687SJohn Baldwin td->td_intr_nesting_level--; 145482a5a275SAndriy Gapon #ifdef notyet 145582a5a275SAndriy Gapon /* The interrupt is not aknowledged by any filter and has no ithread. */ 145682a5a275SAndriy Gapon if (!thread && !filter) 145782a5a275SAndriy Gapon return (EINVAL); 145882a5a275SAndriy Gapon #endif 14591ee1b687SJohn Baldwin return (0); 14601ee1b687SJohn Baldwin } 14611931cf94SJohn Baldwin 14628b201c42SJohn Baldwin #ifdef DDB 14638b201c42SJohn Baldwin /* 14648b201c42SJohn Baldwin * Dump details about an interrupt handler 14658b201c42SJohn Baldwin */ 14668b201c42SJohn Baldwin static void 1467e0f66ef8SJohn Baldwin db_dump_intrhand(struct intr_handler *ih) 14688b201c42SJohn Baldwin { 14698b201c42SJohn Baldwin int comma; 14708b201c42SJohn Baldwin 14718b201c42SJohn Baldwin db_printf("\t%-10s ", ih->ih_name); 14728b201c42SJohn Baldwin switch (ih->ih_pri) { 14738b201c42SJohn Baldwin case PI_REALTIME: 14748b201c42SJohn Baldwin db_printf("CLK "); 14758b201c42SJohn Baldwin break; 14762cf78708SJohn Baldwin case PI_INTR: 14772cf78708SJohn Baldwin db_printf("INTR"); 14788b201c42SJohn Baldwin break; 14798b201c42SJohn Baldwin default: 14808b201c42SJohn Baldwin if (ih->ih_pri >= PI_SOFT) 14818b201c42SJohn Baldwin db_printf("SWI "); 14828b201c42SJohn Baldwin else 14838b201c42SJohn Baldwin db_printf("%4u", ih->ih_pri); 14848b201c42SJohn Baldwin break; 14858b201c42SJohn Baldwin } 14868b201c42SJohn Baldwin db_printf(" "); 1487b887a155SKonstantin Belousov if (ih->ih_filter != NULL) { 1488b887a155SKonstantin Belousov db_printf("[F]"); 1489b887a155SKonstantin Belousov db_printsym((uintptr_t)ih->ih_filter, DB_STGY_PROC); 1490b887a155SKonstantin Belousov } 1491b887a155SKonstantin Belousov if (ih->ih_handler != NULL) { 1492b887a155SKonstantin Belousov if (ih->ih_filter != NULL) 1493b887a155SKonstantin Belousov db_printf(","); 1494b887a155SKonstantin Belousov db_printf("[H]"); 14958b201c42SJohn Baldwin db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC); 1496b887a155SKonstantin Belousov } 14978b201c42SJohn Baldwin db_printf("(%p)", ih->ih_argument); 14988b201c42SJohn Baldwin if (ih->ih_need || 1499ef544f63SPaolo Pisati (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD | 15008b201c42SJohn Baldwin IH_MPSAFE)) != 0) { 15018b201c42SJohn Baldwin db_printf(" {"); 15028b201c42SJohn Baldwin comma = 0; 15038b201c42SJohn Baldwin if (ih->ih_flags & IH_EXCLUSIVE) { 15048b201c42SJohn Baldwin if (comma) 15058b201c42SJohn Baldwin db_printf(", "); 15068b201c42SJohn Baldwin db_printf("EXCL"); 15078b201c42SJohn Baldwin comma = 1; 15088b201c42SJohn Baldwin } 15098b201c42SJohn Baldwin if (ih->ih_flags & IH_ENTROPY) { 15108b201c42SJohn Baldwin if (comma) 15118b201c42SJohn Baldwin db_printf(", "); 15128b201c42SJohn Baldwin db_printf("ENTROPY"); 15138b201c42SJohn Baldwin comma = 1; 15148b201c42SJohn Baldwin } 15158b201c42SJohn Baldwin if (ih->ih_flags & IH_DEAD) { 15168b201c42SJohn Baldwin if (comma) 15178b201c42SJohn Baldwin db_printf(", "); 15188b201c42SJohn Baldwin db_printf("DEAD"); 15198b201c42SJohn Baldwin comma = 1; 15208b201c42SJohn Baldwin } 15218b201c42SJohn Baldwin if (ih->ih_flags & IH_MPSAFE) { 15228b201c42SJohn Baldwin if (comma) 15238b201c42SJohn Baldwin db_printf(", "); 15248b201c42SJohn Baldwin db_printf("MPSAFE"); 15258b201c42SJohn Baldwin comma = 1; 15268b201c42SJohn Baldwin } 15278b201c42SJohn Baldwin if (ih->ih_need) { 15288b201c42SJohn Baldwin if (comma) 15298b201c42SJohn Baldwin db_printf(", "); 15308b201c42SJohn Baldwin db_printf("NEED"); 15318b201c42SJohn Baldwin } 15328b201c42SJohn Baldwin db_printf("}"); 15338b201c42SJohn Baldwin } 15348b201c42SJohn Baldwin db_printf("\n"); 15358b201c42SJohn Baldwin } 15368b201c42SJohn Baldwin 15378b201c42SJohn Baldwin /* 1538e0f66ef8SJohn Baldwin * Dump details about a event. 15398b201c42SJohn Baldwin */ 15408b201c42SJohn Baldwin void 1541e0f66ef8SJohn Baldwin db_dump_intr_event(struct intr_event *ie, int handlers) 15428b201c42SJohn Baldwin { 1543e0f66ef8SJohn Baldwin struct intr_handler *ih; 1544e0f66ef8SJohn Baldwin struct intr_thread *it; 15458b201c42SJohn Baldwin int comma; 15468b201c42SJohn Baldwin 1547e0f66ef8SJohn Baldwin db_printf("%s ", ie->ie_fullname); 1548e0f66ef8SJohn Baldwin it = ie->ie_thread; 1549e0f66ef8SJohn Baldwin if (it != NULL) 1550e0f66ef8SJohn Baldwin db_printf("(pid %d)", it->it_thread->td_proc->p_pid); 1551e0f66ef8SJohn Baldwin else 1552e0f66ef8SJohn Baldwin db_printf("(no thread)"); 1553c4eb6630SGleb Smirnoff if ((ie->ie_flags & (IE_SOFT | IE_ADDING_THREAD)) != 0 || 1554e0f66ef8SJohn Baldwin (it != NULL && it->it_need)) { 15558b201c42SJohn Baldwin db_printf(" {"); 15568b201c42SJohn Baldwin comma = 0; 1557e0f66ef8SJohn Baldwin if (ie->ie_flags & IE_SOFT) { 15588b201c42SJohn Baldwin db_printf("SOFT"); 15598b201c42SJohn Baldwin comma = 1; 15608b201c42SJohn Baldwin } 1561e0f66ef8SJohn Baldwin if (ie->ie_flags & IE_ADDING_THREAD) { 15628b201c42SJohn Baldwin if (comma) 15638b201c42SJohn Baldwin db_printf(", "); 1564e0f66ef8SJohn Baldwin db_printf("ADDING_THREAD"); 15658b201c42SJohn Baldwin comma = 1; 15668b201c42SJohn Baldwin } 1567e0f66ef8SJohn Baldwin if (it != NULL && it->it_need) { 15688b201c42SJohn Baldwin if (comma) 15698b201c42SJohn Baldwin db_printf(", "); 15708b201c42SJohn Baldwin db_printf("NEED"); 15718b201c42SJohn Baldwin } 15728b201c42SJohn Baldwin db_printf("}"); 15738b201c42SJohn Baldwin } 15748b201c42SJohn Baldwin db_printf("\n"); 15758b201c42SJohn Baldwin 15768b201c42SJohn Baldwin if (handlers) 1577111b043cSAndriy Gapon CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) 15788b201c42SJohn Baldwin db_dump_intrhand(ih); 15798b201c42SJohn Baldwin } 1580e0f66ef8SJohn Baldwin 1581e0f66ef8SJohn Baldwin /* 1582e0f66ef8SJohn Baldwin * Dump data about interrupt handlers 1583e0f66ef8SJohn Baldwin */ 1584c84c5e00SMitchell Horne DB_SHOW_COMMAND_FLAGS(intr, db_show_intr, DB_CMD_MEMSAFE) 1585e0f66ef8SJohn Baldwin { 1586e0f66ef8SJohn Baldwin struct intr_event *ie; 158719e9205aSJohn Baldwin int all, verbose; 1588e0f66ef8SJohn Baldwin 1589dc15eac0SEd Schouten verbose = strchr(modif, 'v') != NULL; 1590dc15eac0SEd Schouten all = strchr(modif, 'a') != NULL; 1591e0f66ef8SJohn Baldwin TAILQ_FOREACH(ie, &event_list, ie_list) { 1592111b043cSAndriy Gapon if (!all && CK_SLIST_EMPTY(&ie->ie_handlers)) 1593e0f66ef8SJohn Baldwin continue; 1594e0f66ef8SJohn Baldwin db_dump_intr_event(ie, verbose); 159519e9205aSJohn Baldwin if (db_pager_quit) 159619e9205aSJohn Baldwin break; 1597e0f66ef8SJohn Baldwin } 1598e0f66ef8SJohn Baldwin } 15998b201c42SJohn Baldwin #endif /* DDB */ 16008b201c42SJohn Baldwin 1601b4151f71SJohn Baldwin /* 16028088699fSJohn Baldwin * Start standard software interrupt threads 16031931cf94SJohn Baldwin */ 16041931cf94SJohn Baldwin static void 1605b4151f71SJohn Baldwin start_softintr(void *dummy) 16061931cf94SJohn Baldwin { 1607b4151f71SJohn Baldwin 1608aba10e13SAlexander Motin if (swi_add(&clk_intr_event, "clk", NULL, NULL, SWI_CLOCK, 1609aba10e13SAlexander Motin INTR_MPSAFE, NULL)) 1610aba10e13SAlexander Motin panic("died while creating clk swi ithread"); 16111931cf94SJohn Baldwin } 1612237fdd78SRobert Watson SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, 1613237fdd78SRobert Watson NULL); 16141931cf94SJohn Baldwin 1615d279178dSThomas Moestl /* 1616d279178dSThomas Moestl * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 1617d279178dSThomas Moestl * The data for this machine dependent, and the declarations are in machine 1618d279178dSThomas Moestl * dependent code. The layout of intrnames and intrcnt however is machine 1619d279178dSThomas Moestl * independent. 1620d279178dSThomas Moestl * 1621d279178dSThomas Moestl * We do not know the length of intrcnt and intrnames at compile time, so 1622d279178dSThomas Moestl * calculate things at run time. 1623d279178dSThomas Moestl */ 1624d279178dSThomas Moestl static int 1625d279178dSThomas Moestl sysctl_intrnames(SYSCTL_HANDLER_ARGS) 1626d279178dSThomas Moestl { 1627521ea19dSAttilio Rao return (sysctl_handle_opaque(oidp, intrnames, sintrnames, req)); 1628d279178dSThomas Moestl } 1629d279178dSThomas Moestl 16307029da5cSPawel Biernacki SYSCTL_PROC(_hw, OID_AUTO, intrnames, 163167f508dbSAlexander Motin CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, 16327029da5cSPawel Biernacki sysctl_intrnames, "", 16337029da5cSPawel Biernacki "Interrupt Names"); 1634d279178dSThomas Moestl 1635d279178dSThomas Moestl static int 1636d279178dSThomas Moestl sysctl_intrcnt(SYSCTL_HANDLER_ARGS) 1637d279178dSThomas Moestl { 163885729c2cSJuli Mallett #ifdef SCTL_MASK32 163985729c2cSJuli Mallett uint32_t *intrcnt32; 164085729c2cSJuli Mallett unsigned i; 164185729c2cSJuli Mallett int error; 164285729c2cSJuli Mallett 164385729c2cSJuli Mallett if (req->flags & SCTL_MASK32) { 164485729c2cSJuli Mallett if (!req->oldptr) 164585729c2cSJuli Mallett return (sysctl_handle_opaque(oidp, NULL, sintrcnt / 2, req)); 164685729c2cSJuli Mallett intrcnt32 = malloc(sintrcnt / 2, M_TEMP, M_NOWAIT); 164785729c2cSJuli Mallett if (intrcnt32 == NULL) 164885729c2cSJuli Mallett return (ENOMEM); 164985729c2cSJuli Mallett for (i = 0; i < sintrcnt / sizeof (u_long); i++) 165085729c2cSJuli Mallett intrcnt32[i] = intrcnt[i]; 165185729c2cSJuli Mallett error = sysctl_handle_opaque(oidp, intrcnt32, sintrcnt / 2, req); 165285729c2cSJuli Mallett free(intrcnt32, M_TEMP); 165385729c2cSJuli Mallett return (error); 165485729c2cSJuli Mallett } 165585729c2cSJuli Mallett #endif 1656521ea19dSAttilio Rao return (sysctl_handle_opaque(oidp, intrcnt, sintrcnt, req)); 1657d279178dSThomas Moestl } 1658d279178dSThomas Moestl 16597029da5cSPawel Biernacki SYSCTL_PROC(_hw, OID_AUTO, intrcnt, 166067f508dbSAlexander Motin CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, 0, 16617029da5cSPawel Biernacki sysctl_intrcnt, "", 16627029da5cSPawel Biernacki "Interrupt Counts"); 16638b201c42SJohn Baldwin 16648b201c42SJohn Baldwin #ifdef DDB 16658b201c42SJohn Baldwin /* 16668b201c42SJohn Baldwin * DDB command to dump the interrupt statistics. 16678b201c42SJohn Baldwin */ 1668c84c5e00SMitchell Horne DB_SHOW_COMMAND_FLAGS(intrcnt, db_show_intrcnt, DB_CMD_MEMSAFE) 16698b201c42SJohn Baldwin { 16708b201c42SJohn Baldwin u_long *i; 16718b201c42SJohn Baldwin char *cp; 1672521ea19dSAttilio Rao u_int j; 16738b201c42SJohn Baldwin 16748b201c42SJohn Baldwin cp = intrnames; 1675521ea19dSAttilio Rao j = 0; 1676521ea19dSAttilio Rao for (i = intrcnt; j < (sintrcnt / sizeof(u_long)) && !db_pager_quit; 1677521ea19dSAttilio Rao i++, j++) { 16788b201c42SJohn Baldwin if (*cp == '\0') 16798b201c42SJohn Baldwin break; 16808b201c42SJohn Baldwin if (*i != 0) 16818b201c42SJohn Baldwin db_printf("%s\t%lu\n", cp, *i); 16828b201c42SJohn Baldwin cp += strlen(cp) + 1; 16838b201c42SJohn Baldwin } 16848b201c42SJohn Baldwin } 16858b201c42SJohn Baldwin #endif 1686