1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_ddb.h" 33 #include "opt_kstack_usage_prof.h" 34 35 #include <sys/param.h> 36 #include <sys/bus.h> 37 #include <sys/conf.h> 38 #include <sys/cpuset.h> 39 #include <sys/rtprio.h> 40 #include <sys/systm.h> 41 #include <sys/interrupt.h> 42 #include <sys/kernel.h> 43 #include <sys/kthread.h> 44 #include <sys/ktr.h> 45 #include <sys/limits.h> 46 #include <sys/lock.h> 47 #include <sys/malloc.h> 48 #include <sys/mutex.h> 49 #include <sys/priv.h> 50 #include <sys/proc.h> 51 #include <sys/epoch.h> 52 #include <sys/random.h> 53 #include <sys/resourcevar.h> 54 #include <sys/sched.h> 55 #include <sys/smp.h> 56 #include <sys/sysctl.h> 57 #include <sys/syslog.h> 58 #include <sys/unistd.h> 59 #include <sys/vmmeter.h> 60 #include <machine/atomic.h> 61 #include <machine/cpu.h> 62 #include <machine/md_var.h> 63 #include <machine/stdarg.h> 64 #ifdef DDB 65 #include <ddb/ddb.h> 66 #include <ddb/db_sym.h> 67 #endif 68 69 /* 70 * Describe an interrupt thread. There is one of these per interrupt event. 71 */ 72 struct intr_thread { 73 struct intr_event *it_event; 74 struct thread *it_thread; /* Kernel thread. */ 75 int it_flags; /* (j) IT_* flags. */ 76 int it_need; /* Needs service. */ 77 }; 78 79 /* Interrupt thread flags kept in it_flags */ 80 #define IT_DEAD 0x000001 /* Thread is waiting to exit. */ 81 #define IT_WAIT 0x000002 /* Thread is waiting for completion. */ 82 83 struct intr_entropy { 84 struct thread *td; 85 uintptr_t event; 86 }; 87 88 struct intr_event *tty_intr_event; 89 void *vm_ih; 90 struct proc *intrproc; 91 92 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); 93 94 static int intr_storm_threshold = 0; 95 SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RWTUN, 96 &intr_storm_threshold, 0, 97 "Number of consecutive interrupts before storm protection is enabled"); 98 static int intr_epoch_batch = 1000; 99 SYSCTL_INT(_hw, OID_AUTO, intr_epoch_batch, CTLFLAG_RWTUN, &intr_epoch_batch, 100 0, "Maximum interrupt handler executions without re-entering epoch(9)"); 101 static TAILQ_HEAD(, intr_event) event_list = 102 TAILQ_HEAD_INITIALIZER(event_list); 103 static struct mtx event_lock; 104 MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF); 105 106 static void intr_event_update(struct intr_event *ie); 107 static int intr_event_schedule_thread(struct intr_event *ie); 108 static struct intr_thread *ithread_create(const char *name); 109 static void ithread_destroy(struct intr_thread *ithread); 110 static void ithread_execute_handlers(struct proc *p, 111 struct intr_event *ie); 112 static void ithread_loop(void *); 113 static void ithread_update(struct intr_thread *ithd); 114 static void start_softintr(void *); 115 116 /* Map an interrupt type to an ithread priority. */ 117 u_char 118 intr_priority(enum intr_type flags) 119 { 120 u_char pri; 121 122 flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET | 123 INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV); 124 switch (flags) { 125 case INTR_TYPE_TTY: 126 pri = PI_TTY; 127 break; 128 case INTR_TYPE_BIO: 129 pri = PI_DISK; 130 break; 131 case INTR_TYPE_NET: 132 pri = PI_NET; 133 break; 134 case INTR_TYPE_CAM: 135 pri = PI_DISK; 136 break; 137 case INTR_TYPE_AV: 138 pri = PI_AV; 139 break; 140 case INTR_TYPE_CLK: 141 pri = PI_REALTIME; 142 break; 143 case INTR_TYPE_MISC: 144 pri = PI_DULL; /* don't care */ 145 break; 146 default: 147 /* We didn't specify an interrupt level. */ 148 panic("intr_priority: no interrupt type in flags"); 149 } 150 151 return pri; 152 } 153 154 /* 155 * Update an ithread based on the associated intr_event. 156 */ 157 static void 158 ithread_update(struct intr_thread *ithd) 159 { 160 struct intr_event *ie; 161 struct thread *td; 162 u_char pri; 163 164 ie = ithd->it_event; 165 td = ithd->it_thread; 166 mtx_assert(&ie->ie_lock, MA_OWNED); 167 168 /* Determine the overall priority of this event. */ 169 if (CK_SLIST_EMPTY(&ie->ie_handlers)) 170 pri = PRI_MAX_ITHD; 171 else 172 pri = CK_SLIST_FIRST(&ie->ie_handlers)->ih_pri; 173 174 /* Update name and priority. */ 175 strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name)); 176 #ifdef KTR 177 sched_clear_tdname(td); 178 #endif 179 thread_lock(td); 180 sched_prio(td, pri); 181 thread_unlock(td); 182 } 183 184 /* 185 * Regenerate the full name of an interrupt event and update its priority. 186 */ 187 static void 188 intr_event_update(struct intr_event *ie) 189 { 190 struct intr_handler *ih; 191 char *last; 192 int missed, space; 193 194 /* Start off with no entropy and just the name of the event. */ 195 mtx_assert(&ie->ie_lock, MA_OWNED); 196 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 197 ie->ie_hflags = 0; 198 missed = 0; 199 space = 1; 200 201 /* Run through all the handlers updating values. */ 202 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) { 203 if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < 204 sizeof(ie->ie_fullname)) { 205 strcat(ie->ie_fullname, " "); 206 strcat(ie->ie_fullname, ih->ih_name); 207 space = 0; 208 } else 209 missed++; 210 ie->ie_hflags |= ih->ih_flags; 211 } 212 213 /* 214 * If there is only one handler and its name is too long, just copy in 215 * as much of the end of the name (includes the unit number) as will 216 * fit. Otherwise, we have multiple handlers and not all of the names 217 * will fit. Add +'s to indicate missing names. If we run out of room 218 * and still have +'s to add, change the last character from a + to a *. 219 */ 220 if (missed == 1 && space == 1) { 221 ih = CK_SLIST_FIRST(&ie->ie_handlers); 222 missed = strlen(ie->ie_fullname) + strlen(ih->ih_name) + 2 - 223 sizeof(ie->ie_fullname); 224 strcat(ie->ie_fullname, (missed == 0) ? " " : "-"); 225 strcat(ie->ie_fullname, &ih->ih_name[missed]); 226 missed = 0; 227 } 228 last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2]; 229 while (missed-- > 0) { 230 if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) { 231 if (*last == '+') { 232 *last = '*'; 233 break; 234 } else 235 *last = '+'; 236 } else if (space) { 237 strcat(ie->ie_fullname, " +"); 238 space = 0; 239 } else 240 strcat(ie->ie_fullname, "+"); 241 } 242 243 /* 244 * If this event has an ithread, update it's priority and 245 * name. 246 */ 247 if (ie->ie_thread != NULL) 248 ithread_update(ie->ie_thread); 249 CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname); 250 } 251 252 int 253 intr_event_create(struct intr_event **event, void *source, int flags, int irq, 254 void (*pre_ithread)(void *), void (*post_ithread)(void *), 255 void (*post_filter)(void *), int (*assign_cpu)(void *, int), 256 const char *fmt, ...) 257 { 258 struct intr_event *ie; 259 va_list ap; 260 261 /* The only valid flag during creation is IE_SOFT. */ 262 if ((flags & ~IE_SOFT) != 0) 263 return (EINVAL); 264 ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 265 ie->ie_source = source; 266 ie->ie_pre_ithread = pre_ithread; 267 ie->ie_post_ithread = post_ithread; 268 ie->ie_post_filter = post_filter; 269 ie->ie_assign_cpu = assign_cpu; 270 ie->ie_flags = flags; 271 ie->ie_irq = irq; 272 ie->ie_cpu = NOCPU; 273 CK_SLIST_INIT(&ie->ie_handlers); 274 mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 275 276 va_start(ap, fmt); 277 vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 278 va_end(ap); 279 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 280 mtx_lock(&event_lock); 281 TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 282 mtx_unlock(&event_lock); 283 if (event != NULL) 284 *event = ie; 285 CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 286 return (0); 287 } 288 289 /* 290 * Bind an interrupt event to the specified CPU. Note that not all 291 * platforms support binding an interrupt to a CPU. For those 292 * platforms this request will fail. Using a cpu id of NOCPU unbinds 293 * the interrupt event. 294 */ 295 static int 296 _intr_event_bind(struct intr_event *ie, int cpu, bool bindirq, bool bindithread) 297 { 298 lwpid_t id; 299 int error; 300 301 /* Need a CPU to bind to. */ 302 if (cpu != NOCPU && CPU_ABSENT(cpu)) 303 return (EINVAL); 304 305 if (ie->ie_assign_cpu == NULL) 306 return (EOPNOTSUPP); 307 308 error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR); 309 if (error) 310 return (error); 311 312 /* 313 * If we have any ithreads try to set their mask first to verify 314 * permissions, etc. 315 */ 316 if (bindithread) { 317 mtx_lock(&ie->ie_lock); 318 if (ie->ie_thread != NULL) { 319 id = ie->ie_thread->it_thread->td_tid; 320 mtx_unlock(&ie->ie_lock); 321 error = cpuset_setithread(id, cpu); 322 if (error) 323 return (error); 324 } else 325 mtx_unlock(&ie->ie_lock); 326 } 327 if (bindirq) 328 error = ie->ie_assign_cpu(ie->ie_source, cpu); 329 if (error) { 330 if (bindithread) { 331 mtx_lock(&ie->ie_lock); 332 if (ie->ie_thread != NULL) { 333 cpu = ie->ie_cpu; 334 id = ie->ie_thread->it_thread->td_tid; 335 mtx_unlock(&ie->ie_lock); 336 (void)cpuset_setithread(id, cpu); 337 } else 338 mtx_unlock(&ie->ie_lock); 339 } 340 return (error); 341 } 342 343 if (bindirq) { 344 mtx_lock(&ie->ie_lock); 345 ie->ie_cpu = cpu; 346 mtx_unlock(&ie->ie_lock); 347 } 348 349 return (error); 350 } 351 352 /* 353 * Bind an interrupt event to the specified CPU. For supported platforms, any 354 * associated ithreads as well as the primary interrupt context will be bound 355 * to the specificed CPU. 356 */ 357 int 358 intr_event_bind(struct intr_event *ie, int cpu) 359 { 360 361 return (_intr_event_bind(ie, cpu, true, true)); 362 } 363 364 /* 365 * Bind an interrupt event to the specified CPU, but do not bind associated 366 * ithreads. 367 */ 368 int 369 intr_event_bind_irqonly(struct intr_event *ie, int cpu) 370 { 371 372 return (_intr_event_bind(ie, cpu, true, false)); 373 } 374 375 /* 376 * Bind an interrupt event's ithread to the specified CPU. 377 */ 378 int 379 intr_event_bind_ithread(struct intr_event *ie, int cpu) 380 { 381 382 return (_intr_event_bind(ie, cpu, false, true)); 383 } 384 385 /* 386 * Bind an interrupt event's ithread to the specified cpuset. 387 */ 388 int 389 intr_event_bind_ithread_cpuset(struct intr_event *ie, cpuset_t *cs) 390 { 391 lwpid_t id; 392 393 mtx_lock(&ie->ie_lock); 394 if (ie->ie_thread != NULL) { 395 id = ie->ie_thread->it_thread->td_tid; 396 mtx_unlock(&ie->ie_lock); 397 return (cpuset_setthread(id, cs)); 398 } else { 399 mtx_unlock(&ie->ie_lock); 400 } 401 return (ENODEV); 402 } 403 404 static struct intr_event * 405 intr_lookup(int irq) 406 { 407 struct intr_event *ie; 408 409 mtx_lock(&event_lock); 410 TAILQ_FOREACH(ie, &event_list, ie_list) 411 if (ie->ie_irq == irq && 412 (ie->ie_flags & IE_SOFT) == 0 && 413 CK_SLIST_FIRST(&ie->ie_handlers) != NULL) 414 break; 415 mtx_unlock(&event_lock); 416 return (ie); 417 } 418 419 int 420 intr_setaffinity(int irq, int mode, void *m) 421 { 422 struct intr_event *ie; 423 cpuset_t *mask; 424 int cpu, n; 425 426 mask = m; 427 cpu = NOCPU; 428 /* 429 * If we're setting all cpus we can unbind. Otherwise make sure 430 * only one cpu is in the set. 431 */ 432 if (CPU_CMP(cpuset_root, mask)) { 433 for (n = 0; n < CPU_SETSIZE; n++) { 434 if (!CPU_ISSET(n, mask)) 435 continue; 436 if (cpu != NOCPU) 437 return (EINVAL); 438 cpu = n; 439 } 440 } 441 ie = intr_lookup(irq); 442 if (ie == NULL) 443 return (ESRCH); 444 switch (mode) { 445 case CPU_WHICH_IRQ: 446 return (intr_event_bind(ie, cpu)); 447 case CPU_WHICH_INTRHANDLER: 448 return (intr_event_bind_irqonly(ie, cpu)); 449 case CPU_WHICH_ITHREAD: 450 return (intr_event_bind_ithread(ie, cpu)); 451 default: 452 return (EINVAL); 453 } 454 } 455 456 int 457 intr_getaffinity(int irq, int mode, void *m) 458 { 459 struct intr_event *ie; 460 struct thread *td; 461 struct proc *p; 462 cpuset_t *mask; 463 lwpid_t id; 464 int error; 465 466 mask = m; 467 ie = intr_lookup(irq); 468 if (ie == NULL) 469 return (ESRCH); 470 471 error = 0; 472 CPU_ZERO(mask); 473 switch (mode) { 474 case CPU_WHICH_IRQ: 475 case CPU_WHICH_INTRHANDLER: 476 mtx_lock(&ie->ie_lock); 477 if (ie->ie_cpu == NOCPU) 478 CPU_COPY(cpuset_root, mask); 479 else 480 CPU_SET(ie->ie_cpu, mask); 481 mtx_unlock(&ie->ie_lock); 482 break; 483 case CPU_WHICH_ITHREAD: 484 mtx_lock(&ie->ie_lock); 485 if (ie->ie_thread == NULL) { 486 mtx_unlock(&ie->ie_lock); 487 CPU_COPY(cpuset_root, mask); 488 } else { 489 id = ie->ie_thread->it_thread->td_tid; 490 mtx_unlock(&ie->ie_lock); 491 error = cpuset_which(CPU_WHICH_TID, id, &p, &td, NULL); 492 if (error != 0) 493 return (error); 494 CPU_COPY(&td->td_cpuset->cs_mask, mask); 495 PROC_UNLOCK(p); 496 } 497 default: 498 return (EINVAL); 499 } 500 return (0); 501 } 502 503 int 504 intr_event_destroy(struct intr_event *ie) 505 { 506 507 mtx_lock(&event_lock); 508 mtx_lock(&ie->ie_lock); 509 if (!CK_SLIST_EMPTY(&ie->ie_handlers)) { 510 mtx_unlock(&ie->ie_lock); 511 mtx_unlock(&event_lock); 512 return (EBUSY); 513 } 514 TAILQ_REMOVE(&event_list, ie, ie_list); 515 #ifndef notyet 516 if (ie->ie_thread != NULL) { 517 ithread_destroy(ie->ie_thread); 518 ie->ie_thread = NULL; 519 } 520 #endif 521 mtx_unlock(&ie->ie_lock); 522 mtx_unlock(&event_lock); 523 mtx_destroy(&ie->ie_lock); 524 free(ie, M_ITHREAD); 525 return (0); 526 } 527 528 static struct intr_thread * 529 ithread_create(const char *name) 530 { 531 struct intr_thread *ithd; 532 struct thread *td; 533 int error; 534 535 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 536 537 error = kproc_kthread_add(ithread_loop, ithd, &intrproc, 538 &td, RFSTOPPED | RFHIGHPID, 539 0, "intr", "%s", name); 540 if (error) 541 panic("kproc_create() failed with %d", error); 542 thread_lock(td); 543 sched_class(td, PRI_ITHD); 544 TD_SET_IWAIT(td); 545 thread_unlock(td); 546 td->td_pflags |= TDP_ITHREAD; 547 ithd->it_thread = td; 548 CTR2(KTR_INTR, "%s: created %s", __func__, name); 549 return (ithd); 550 } 551 552 static void 553 ithread_destroy(struct intr_thread *ithread) 554 { 555 struct thread *td; 556 557 CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name); 558 td = ithread->it_thread; 559 thread_lock(td); 560 ithread->it_flags |= IT_DEAD; 561 if (TD_AWAITING_INTR(td)) { 562 TD_CLR_IWAIT(td); 563 sched_add(td, SRQ_INTR); 564 } else 565 thread_unlock(td); 566 } 567 568 int 569 intr_event_add_handler(struct intr_event *ie, const char *name, 570 driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 571 enum intr_type flags, void **cookiep) 572 { 573 struct intr_handler *ih, *temp_ih; 574 struct intr_handler **prevptr; 575 struct intr_thread *it; 576 577 if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 578 return (EINVAL); 579 580 /* Allocate and populate an interrupt handler structure. */ 581 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 582 ih->ih_filter = filter; 583 ih->ih_handler = handler; 584 ih->ih_argument = arg; 585 strlcpy(ih->ih_name, name, sizeof(ih->ih_name)); 586 ih->ih_event = ie; 587 ih->ih_pri = pri; 588 if (flags & INTR_EXCL) 589 ih->ih_flags = IH_EXCLUSIVE; 590 if (flags & INTR_MPSAFE) 591 ih->ih_flags |= IH_MPSAFE; 592 if (flags & INTR_ENTROPY) 593 ih->ih_flags |= IH_ENTROPY; 594 if (flags & INTR_TYPE_NET) 595 ih->ih_flags |= IH_NET; 596 597 /* We can only have one exclusive handler in a event. */ 598 mtx_lock(&ie->ie_lock); 599 if (!CK_SLIST_EMPTY(&ie->ie_handlers)) { 600 if ((flags & INTR_EXCL) || 601 (CK_SLIST_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 602 mtx_unlock(&ie->ie_lock); 603 free(ih, M_ITHREAD); 604 return (EINVAL); 605 } 606 } 607 608 /* Create a thread if we need one. */ 609 while (ie->ie_thread == NULL && handler != NULL) { 610 if (ie->ie_flags & IE_ADDING_THREAD) 611 msleep(ie, &ie->ie_lock, 0, "ithread", 0); 612 else { 613 ie->ie_flags |= IE_ADDING_THREAD; 614 mtx_unlock(&ie->ie_lock); 615 it = ithread_create("intr: newborn"); 616 mtx_lock(&ie->ie_lock); 617 ie->ie_flags &= ~IE_ADDING_THREAD; 618 ie->ie_thread = it; 619 it->it_event = ie; 620 ithread_update(it); 621 wakeup(ie); 622 } 623 } 624 625 /* Add the new handler to the event in priority order. */ 626 CK_SLIST_FOREACH_PREVPTR(temp_ih, prevptr, &ie->ie_handlers, ih_next) { 627 if (temp_ih->ih_pri > ih->ih_pri) 628 break; 629 } 630 CK_SLIST_INSERT_PREVPTR(prevptr, temp_ih, ih, ih_next); 631 632 intr_event_update(ie); 633 634 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 635 ie->ie_name); 636 mtx_unlock(&ie->ie_lock); 637 638 if (cookiep != NULL) 639 *cookiep = ih; 640 return (0); 641 } 642 643 /* 644 * Append a description preceded by a ':' to the name of the specified 645 * interrupt handler. 646 */ 647 int 648 intr_event_describe_handler(struct intr_event *ie, void *cookie, 649 const char *descr) 650 { 651 struct intr_handler *ih; 652 size_t space; 653 char *start; 654 655 mtx_lock(&ie->ie_lock); 656 #ifdef INVARIANTS 657 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) { 658 if (ih == cookie) 659 break; 660 } 661 if (ih == NULL) { 662 mtx_unlock(&ie->ie_lock); 663 panic("handler %p not found in interrupt event %p", cookie, ie); 664 } 665 #endif 666 ih = cookie; 667 668 /* 669 * Look for an existing description by checking for an 670 * existing ":". This assumes device names do not include 671 * colons. If one is found, prepare to insert the new 672 * description at that point. If one is not found, find the 673 * end of the name to use as the insertion point. 674 */ 675 start = strchr(ih->ih_name, ':'); 676 if (start == NULL) 677 start = strchr(ih->ih_name, 0); 678 679 /* 680 * See if there is enough remaining room in the string for the 681 * description + ":". The "- 1" leaves room for the trailing 682 * '\0'. The "+ 1" accounts for the colon. 683 */ 684 space = sizeof(ih->ih_name) - (start - ih->ih_name) - 1; 685 if (strlen(descr) + 1 > space) { 686 mtx_unlock(&ie->ie_lock); 687 return (ENOSPC); 688 } 689 690 /* Append a colon followed by the description. */ 691 *start = ':'; 692 strcpy(start + 1, descr); 693 intr_event_update(ie); 694 mtx_unlock(&ie->ie_lock); 695 return (0); 696 } 697 698 /* 699 * Return the ie_source field from the intr_event an intr_handler is 700 * associated with. 701 */ 702 void * 703 intr_handler_source(void *cookie) 704 { 705 struct intr_handler *ih; 706 struct intr_event *ie; 707 708 ih = (struct intr_handler *)cookie; 709 if (ih == NULL) 710 return (NULL); 711 ie = ih->ih_event; 712 KASSERT(ie != NULL, 713 ("interrupt handler \"%s\" has a NULL interrupt event", 714 ih->ih_name)); 715 return (ie->ie_source); 716 } 717 718 /* 719 * If intr_event_handle() is running in the ISR context at the time of the call, 720 * then wait for it to complete. 721 */ 722 static void 723 intr_event_barrier(struct intr_event *ie) 724 { 725 int phase; 726 727 mtx_assert(&ie->ie_lock, MA_OWNED); 728 phase = ie->ie_phase; 729 730 /* 731 * Switch phase to direct future interrupts to the other active counter. 732 * Make sure that any preceding stores are visible before the switch. 733 */ 734 KASSERT(ie->ie_active[!phase] == 0, ("idle phase has activity")); 735 atomic_store_rel_int(&ie->ie_phase, !phase); 736 737 /* 738 * This code cooperates with wait-free iteration of ie_handlers 739 * in intr_event_handle. 740 * Make sure that the removal and the phase update are not reordered 741 * with the active count check. 742 * Note that no combination of acquire and release fences can provide 743 * that guarantee as Store->Load sequences can always be reordered. 744 */ 745 atomic_thread_fence_seq_cst(); 746 747 /* 748 * Now wait on the inactive phase. 749 * The acquire fence is needed so that that all post-barrier accesses 750 * are after the check. 751 */ 752 while (ie->ie_active[phase] > 0) 753 cpu_spinwait(); 754 atomic_thread_fence_acq(); 755 } 756 757 static void 758 intr_handler_barrier(struct intr_handler *handler) 759 { 760 struct intr_event *ie; 761 762 ie = handler->ih_event; 763 mtx_assert(&ie->ie_lock, MA_OWNED); 764 KASSERT((handler->ih_flags & IH_DEAD) == 0, 765 ("update for a removed handler")); 766 767 if (ie->ie_thread == NULL) { 768 intr_event_barrier(ie); 769 return; 770 } 771 if ((handler->ih_flags & IH_CHANGED) == 0) { 772 handler->ih_flags |= IH_CHANGED; 773 intr_event_schedule_thread(ie); 774 } 775 while ((handler->ih_flags & IH_CHANGED) != 0) 776 msleep(handler, &ie->ie_lock, 0, "ih_barr", 0); 777 } 778 779 /* 780 * Sleep until an ithread finishes executing an interrupt handler. 781 * 782 * XXX Doesn't currently handle interrupt filters or fast interrupt 783 * handlers. This is intended for compatibility with linux drivers 784 * only. Do not use in BSD code. 785 */ 786 void 787 _intr_drain(int irq) 788 { 789 struct intr_event *ie; 790 struct intr_thread *ithd; 791 struct thread *td; 792 793 ie = intr_lookup(irq); 794 if (ie == NULL) 795 return; 796 if (ie->ie_thread == NULL) 797 return; 798 ithd = ie->ie_thread; 799 td = ithd->it_thread; 800 /* 801 * We set the flag and wait for it to be cleared to avoid 802 * long delays with potentially busy interrupt handlers 803 * were we to only sample TD_AWAITING_INTR() every tick. 804 */ 805 thread_lock(td); 806 if (!TD_AWAITING_INTR(td)) { 807 ithd->it_flags |= IT_WAIT; 808 while (ithd->it_flags & IT_WAIT) { 809 thread_unlock(td); 810 pause("idrain", 1); 811 thread_lock(td); 812 } 813 } 814 thread_unlock(td); 815 return; 816 } 817 818 int 819 intr_event_remove_handler(void *cookie) 820 { 821 struct intr_handler *handler = (struct intr_handler *)cookie; 822 struct intr_event *ie; 823 struct intr_handler *ih; 824 struct intr_handler **prevptr; 825 #ifdef notyet 826 int dead; 827 #endif 828 829 if (handler == NULL) 830 return (EINVAL); 831 ie = handler->ih_event; 832 KASSERT(ie != NULL, 833 ("interrupt handler \"%s\" has a NULL interrupt event", 834 handler->ih_name)); 835 836 mtx_lock(&ie->ie_lock); 837 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 838 ie->ie_name); 839 CK_SLIST_FOREACH_PREVPTR(ih, prevptr, &ie->ie_handlers, ih_next) { 840 if (ih == handler) 841 break; 842 } 843 if (ih == NULL) { 844 panic("interrupt handler \"%s\" not found in " 845 "interrupt event \"%s\"", handler->ih_name, ie->ie_name); 846 } 847 848 /* 849 * If there is no ithread, then directly remove the handler. Note that 850 * intr_event_handle() iterates ie_handlers in a lock-less fashion, so 851 * care needs to be taken to keep ie_handlers consistent and to free 852 * the removed handler only when ie_handlers is quiescent. 853 */ 854 if (ie->ie_thread == NULL) { 855 CK_SLIST_REMOVE_PREVPTR(prevptr, ih, ih_next); 856 intr_event_barrier(ie); 857 intr_event_update(ie); 858 mtx_unlock(&ie->ie_lock); 859 free(handler, M_ITHREAD); 860 return (0); 861 } 862 863 /* 864 * Let the interrupt thread do the job. 865 * The interrupt source is disabled when the interrupt thread is 866 * running, so it does not have to worry about interaction with 867 * intr_event_handle(). 868 */ 869 KASSERT((handler->ih_flags & IH_DEAD) == 0, 870 ("duplicate handle remove")); 871 handler->ih_flags |= IH_DEAD; 872 intr_event_schedule_thread(ie); 873 while (handler->ih_flags & IH_DEAD) 874 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 875 intr_event_update(ie); 876 877 #ifdef notyet 878 /* 879 * XXX: This could be bad in the case of ppbus(8). Also, I think 880 * this could lead to races of stale data when servicing an 881 * interrupt. 882 */ 883 dead = 1; 884 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) { 885 if (ih->ih_handler != NULL) { 886 dead = 0; 887 break; 888 } 889 } 890 if (dead) { 891 ithread_destroy(ie->ie_thread); 892 ie->ie_thread = NULL; 893 } 894 #endif 895 mtx_unlock(&ie->ie_lock); 896 free(handler, M_ITHREAD); 897 return (0); 898 } 899 900 int 901 intr_event_suspend_handler(void *cookie) 902 { 903 struct intr_handler *handler = (struct intr_handler *)cookie; 904 struct intr_event *ie; 905 906 if (handler == NULL) 907 return (EINVAL); 908 ie = handler->ih_event; 909 KASSERT(ie != NULL, 910 ("interrupt handler \"%s\" has a NULL interrupt event", 911 handler->ih_name)); 912 mtx_lock(&ie->ie_lock); 913 handler->ih_flags |= IH_SUSP; 914 intr_handler_barrier(handler); 915 mtx_unlock(&ie->ie_lock); 916 return (0); 917 } 918 919 int 920 intr_event_resume_handler(void *cookie) 921 { 922 struct intr_handler *handler = (struct intr_handler *)cookie; 923 struct intr_event *ie; 924 925 if (handler == NULL) 926 return (EINVAL); 927 ie = handler->ih_event; 928 KASSERT(ie != NULL, 929 ("interrupt handler \"%s\" has a NULL interrupt event", 930 handler->ih_name)); 931 932 /* 933 * intr_handler_barrier() acts not only as a barrier, 934 * it also allows to check for any pending interrupts. 935 */ 936 mtx_lock(&ie->ie_lock); 937 handler->ih_flags &= ~IH_SUSP; 938 intr_handler_barrier(handler); 939 mtx_unlock(&ie->ie_lock); 940 return (0); 941 } 942 943 static int 944 intr_event_schedule_thread(struct intr_event *ie) 945 { 946 struct intr_entropy entropy; 947 struct intr_thread *it; 948 struct thread *td; 949 struct thread *ctd; 950 951 /* 952 * If no ithread or no handlers, then we have a stray interrupt. 953 */ 954 if (ie == NULL || CK_SLIST_EMPTY(&ie->ie_handlers) || 955 ie->ie_thread == NULL) 956 return (EINVAL); 957 958 ctd = curthread; 959 it = ie->ie_thread; 960 td = it->it_thread; 961 962 /* 963 * If any of the handlers for this ithread claim to be good 964 * sources of entropy, then gather some. 965 */ 966 if (ie->ie_hflags & IH_ENTROPY) { 967 entropy.event = (uintptr_t)ie; 968 entropy.td = ctd; 969 random_harvest_queue(&entropy, sizeof(entropy), RANDOM_INTERRUPT); 970 } 971 972 KASSERT(td->td_proc != NULL, ("ithread %s has no process", ie->ie_name)); 973 974 /* 975 * Set it_need to tell the thread to keep running if it is already 976 * running. Then, lock the thread and see if we actually need to 977 * put it on the runqueue. 978 * 979 * Use store_rel to arrange that the store to ih_need in 980 * swi_sched() is before the store to it_need and prepare for 981 * transfer of this order to loads in the ithread. 982 */ 983 atomic_store_rel_int(&it->it_need, 1); 984 thread_lock(td); 985 if (TD_AWAITING_INTR(td)) { 986 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, td->td_proc->p_pid, 987 td->td_name); 988 TD_CLR_IWAIT(td); 989 sched_add(td, SRQ_INTR); 990 } else { 991 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 992 __func__, td->td_proc->p_pid, td->td_name, it->it_need, td->td_state); 993 thread_unlock(td); 994 } 995 996 return (0); 997 } 998 999 /* 1000 * Allow interrupt event binding for software interrupt handlers -- a no-op, 1001 * since interrupts are generated in software rather than being directed by 1002 * a PIC. 1003 */ 1004 static int 1005 swi_assign_cpu(void *arg, int cpu) 1006 { 1007 1008 return (0); 1009 } 1010 1011 /* 1012 * Add a software interrupt handler to a specified event. If a given event 1013 * is not specified, then a new event is created. 1014 */ 1015 int 1016 swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, 1017 void *arg, int pri, enum intr_type flags, void **cookiep) 1018 { 1019 struct intr_event *ie; 1020 int error; 1021 1022 if (flags & INTR_ENTROPY) 1023 return (EINVAL); 1024 1025 ie = (eventp != NULL) ? *eventp : NULL; 1026 1027 if (ie != NULL) { 1028 if (!(ie->ie_flags & IE_SOFT)) 1029 return (EINVAL); 1030 } else { 1031 error = intr_event_create(&ie, NULL, IE_SOFT, 0, 1032 NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri); 1033 if (error) 1034 return (error); 1035 if (eventp != NULL) 1036 *eventp = ie; 1037 } 1038 error = intr_event_add_handler(ie, name, NULL, handler, arg, 1039 PI_SWI(pri), flags, cookiep); 1040 return (error); 1041 } 1042 1043 /* 1044 * Schedule a software interrupt thread. 1045 */ 1046 void 1047 swi_sched(void *cookie, int flags) 1048 { 1049 struct intr_handler *ih = (struct intr_handler *)cookie; 1050 struct intr_event *ie = ih->ih_event; 1051 struct intr_entropy entropy; 1052 int error __unused; 1053 1054 CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, 1055 ih->ih_need); 1056 1057 entropy.event = (uintptr_t)ih; 1058 entropy.td = curthread; 1059 random_harvest_queue(&entropy, sizeof(entropy), RANDOM_SWI); 1060 1061 /* 1062 * Set ih_need for this handler so that if the ithread is already 1063 * running it will execute this handler on the next pass. Otherwise, 1064 * it will execute it the next time it runs. 1065 */ 1066 ih->ih_need = 1; 1067 1068 if (!(flags & SWI_DELAY)) { 1069 VM_CNT_INC(v_soft); 1070 error = intr_event_schedule_thread(ie); 1071 KASSERT(error == 0, ("stray software interrupt")); 1072 } 1073 } 1074 1075 /* 1076 * Remove a software interrupt handler. Currently this code does not 1077 * remove the associated interrupt event if it becomes empty. Calling code 1078 * may do so manually via intr_event_destroy(), but that's not really 1079 * an optimal interface. 1080 */ 1081 int 1082 swi_remove(void *cookie) 1083 { 1084 1085 return (intr_event_remove_handler(cookie)); 1086 } 1087 1088 static void 1089 intr_event_execute_handlers(struct proc *p, struct intr_event *ie) 1090 { 1091 struct intr_handler *ih, *ihn, *ihp; 1092 1093 ihp = NULL; 1094 CK_SLIST_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) { 1095 /* 1096 * If this handler is marked for death, remove it from 1097 * the list of handlers and wake up the sleeper. 1098 */ 1099 if (ih->ih_flags & IH_DEAD) { 1100 mtx_lock(&ie->ie_lock); 1101 if (ihp == NULL) 1102 CK_SLIST_REMOVE_HEAD(&ie->ie_handlers, ih_next); 1103 else 1104 CK_SLIST_REMOVE_AFTER(ihp, ih_next); 1105 ih->ih_flags &= ~IH_DEAD; 1106 wakeup(ih); 1107 mtx_unlock(&ie->ie_lock); 1108 continue; 1109 } 1110 1111 /* 1112 * Now that we know that the current element won't be removed 1113 * update the previous element. 1114 */ 1115 ihp = ih; 1116 1117 if ((ih->ih_flags & IH_CHANGED) != 0) { 1118 mtx_lock(&ie->ie_lock); 1119 ih->ih_flags &= ~IH_CHANGED; 1120 wakeup(ih); 1121 mtx_unlock(&ie->ie_lock); 1122 } 1123 1124 /* Skip filter only handlers */ 1125 if (ih->ih_handler == NULL) 1126 continue; 1127 1128 /* Skip suspended handlers */ 1129 if ((ih->ih_flags & IH_SUSP) != 0) 1130 continue; 1131 1132 /* 1133 * For software interrupt threads, we only execute 1134 * handlers that have their need flag set. Hardware 1135 * interrupt threads always invoke all of their handlers. 1136 * 1137 * ih_need can only be 0 or 1. Failed cmpset below 1138 * means that there is no request to execute handlers, 1139 * so a retry of the cmpset is not needed. 1140 */ 1141 if ((ie->ie_flags & IE_SOFT) != 0 && 1142 atomic_cmpset_int(&ih->ih_need, 1, 0) == 0) 1143 continue; 1144 1145 /* Execute this handler. */ 1146 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1147 __func__, p->p_pid, (void *)ih->ih_handler, 1148 ih->ih_argument, ih->ih_name, ih->ih_flags); 1149 1150 if (!(ih->ih_flags & IH_MPSAFE)) 1151 mtx_lock(&Giant); 1152 ih->ih_handler(ih->ih_argument); 1153 if (!(ih->ih_flags & IH_MPSAFE)) 1154 mtx_unlock(&Giant); 1155 } 1156 } 1157 1158 static void 1159 ithread_execute_handlers(struct proc *p, struct intr_event *ie) 1160 { 1161 1162 /* Interrupt handlers should not sleep. */ 1163 if (!(ie->ie_flags & IE_SOFT)) 1164 THREAD_NO_SLEEPING(); 1165 intr_event_execute_handlers(p, ie); 1166 if (!(ie->ie_flags & IE_SOFT)) 1167 THREAD_SLEEPING_OK(); 1168 1169 /* 1170 * Interrupt storm handling: 1171 * 1172 * If this interrupt source is currently storming, then throttle 1173 * it to only fire the handler once per clock tick. 1174 * 1175 * If this interrupt source is not currently storming, but the 1176 * number of back to back interrupts exceeds the storm threshold, 1177 * then enter storming mode. 1178 */ 1179 if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold && 1180 !(ie->ie_flags & IE_SOFT)) { 1181 /* Report the message only once every second. */ 1182 if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) { 1183 printf( 1184 "interrupt storm detected on \"%s\"; throttling interrupt source\n", 1185 ie->ie_name); 1186 } 1187 pause("istorm", 1); 1188 } else 1189 ie->ie_count++; 1190 1191 /* 1192 * Now that all the handlers have had a chance to run, reenable 1193 * the interrupt source. 1194 */ 1195 if (ie->ie_post_ithread != NULL) 1196 ie->ie_post_ithread(ie->ie_source); 1197 } 1198 1199 /* 1200 * This is the main code for interrupt threads. 1201 */ 1202 static void 1203 ithread_loop(void *arg) 1204 { 1205 struct epoch_tracker et; 1206 struct intr_thread *ithd; 1207 struct intr_event *ie; 1208 struct thread *td; 1209 struct proc *p; 1210 int wake, epoch_count; 1211 1212 td = curthread; 1213 p = td->td_proc; 1214 ithd = (struct intr_thread *)arg; 1215 KASSERT(ithd->it_thread == td, 1216 ("%s: ithread and proc linkage out of sync", __func__)); 1217 ie = ithd->it_event; 1218 ie->ie_count = 0; 1219 wake = 0; 1220 1221 /* 1222 * As long as we have interrupts outstanding, go through the 1223 * list of handlers, giving each one a go at it. 1224 */ 1225 for (;;) { 1226 /* 1227 * If we are an orphaned thread, then just die. 1228 */ 1229 if (ithd->it_flags & IT_DEAD) { 1230 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 1231 p->p_pid, td->td_name); 1232 free(ithd, M_ITHREAD); 1233 kthread_exit(); 1234 } 1235 1236 /* 1237 * Service interrupts. If another interrupt arrives while 1238 * we are running, it will set it_need to note that we 1239 * should make another pass. 1240 * 1241 * The load_acq part of the following cmpset ensures 1242 * that the load of ih_need in ithread_execute_handlers() 1243 * is ordered after the load of it_need here. 1244 */ 1245 if (ie->ie_hflags & IH_NET) { 1246 epoch_count = 0; 1247 NET_EPOCH_ENTER(et); 1248 } 1249 while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0) { 1250 ithread_execute_handlers(p, ie); 1251 if ((ie->ie_hflags & IH_NET) && 1252 ++epoch_count >= intr_epoch_batch) { 1253 NET_EPOCH_EXIT(et); 1254 epoch_count = 0; 1255 NET_EPOCH_ENTER(et); 1256 } 1257 } 1258 if (ie->ie_hflags & IH_NET) 1259 NET_EPOCH_EXIT(et); 1260 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1261 mtx_assert(&Giant, MA_NOTOWNED); 1262 1263 /* 1264 * Processed all our interrupts. Now get the sched 1265 * lock. This may take a while and it_need may get 1266 * set again, so we have to check it again. 1267 */ 1268 thread_lock(td); 1269 if (atomic_load_acq_int(&ithd->it_need) == 0 && 1270 (ithd->it_flags & (IT_DEAD | IT_WAIT)) == 0) { 1271 TD_SET_IWAIT(td); 1272 ie->ie_count = 0; 1273 mi_switch(SW_VOL | SWT_IWAIT); 1274 } else { 1275 if (ithd->it_flags & IT_WAIT) { 1276 wake = 1; 1277 ithd->it_flags &= ~IT_WAIT; 1278 } 1279 thread_unlock(td); 1280 } 1281 if (wake) { 1282 wakeup(ithd); 1283 wake = 0; 1284 } 1285 } 1286 } 1287 1288 /* 1289 * Main interrupt handling body. 1290 * 1291 * Input: 1292 * o ie: the event connected to this interrupt. 1293 * o frame: some archs (i.e. i386) pass a frame to some. 1294 * handlers as their main argument. 1295 * Return value: 1296 * o 0: everything ok. 1297 * o EINVAL: stray interrupt. 1298 */ 1299 int 1300 intr_event_handle(struct intr_event *ie, struct trapframe *frame) 1301 { 1302 struct intr_handler *ih; 1303 struct trapframe *oldframe; 1304 struct thread *td; 1305 int phase; 1306 int ret; 1307 bool filter, thread; 1308 1309 td = curthread; 1310 1311 #ifdef KSTACK_USAGE_PROF 1312 intr_prof_stack_use(td, frame); 1313 #endif 1314 1315 /* An interrupt with no event or handlers is a stray interrupt. */ 1316 if (ie == NULL || CK_SLIST_EMPTY(&ie->ie_handlers)) 1317 return (EINVAL); 1318 1319 /* 1320 * Execute fast interrupt handlers directly. 1321 * To support clock handlers, if a handler registers 1322 * with a NULL argument, then we pass it a pointer to 1323 * a trapframe as its argument. 1324 */ 1325 td->td_intr_nesting_level++; 1326 filter = false; 1327 thread = false; 1328 ret = 0; 1329 critical_enter(); 1330 oldframe = td->td_intr_frame; 1331 td->td_intr_frame = frame; 1332 1333 phase = ie->ie_phase; 1334 atomic_add_int(&ie->ie_active[phase], 1); 1335 1336 /* 1337 * This fence is required to ensure that no later loads are 1338 * re-ordered before the ie_active store. 1339 */ 1340 atomic_thread_fence_seq_cst(); 1341 1342 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) { 1343 if ((ih->ih_flags & IH_SUSP) != 0) 1344 continue; 1345 if (ih->ih_filter == NULL) { 1346 thread = true; 1347 continue; 1348 } 1349 CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__, 1350 ih->ih_filter, ih->ih_argument == NULL ? frame : 1351 ih->ih_argument, ih->ih_name); 1352 if (ih->ih_argument == NULL) 1353 ret = ih->ih_filter(frame); 1354 else 1355 ret = ih->ih_filter(ih->ih_argument); 1356 KASSERT(ret == FILTER_STRAY || 1357 ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 && 1358 (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0), 1359 ("%s: incorrect return value %#x from %s", __func__, ret, 1360 ih->ih_name)); 1361 filter = filter || ret == FILTER_HANDLED; 1362 1363 /* 1364 * Wrapper handler special handling: 1365 * 1366 * in some particular cases (like pccard and pccbb), 1367 * the _real_ device handler is wrapped in a couple of 1368 * functions - a filter wrapper and an ithread wrapper. 1369 * In this case (and just in this case), the filter wrapper 1370 * could ask the system to schedule the ithread and mask 1371 * the interrupt source if the wrapped handler is composed 1372 * of just an ithread handler. 1373 * 1374 * TODO: write a generic wrapper to avoid people rolling 1375 * their own. 1376 */ 1377 if (!thread) { 1378 if (ret == FILTER_SCHEDULE_THREAD) 1379 thread = true; 1380 } 1381 } 1382 atomic_add_rel_int(&ie->ie_active[phase], -1); 1383 1384 td->td_intr_frame = oldframe; 1385 1386 if (thread) { 1387 if (ie->ie_pre_ithread != NULL) 1388 ie->ie_pre_ithread(ie->ie_source); 1389 } else { 1390 if (ie->ie_post_filter != NULL) 1391 ie->ie_post_filter(ie->ie_source); 1392 } 1393 1394 /* Schedule the ithread if needed. */ 1395 if (thread) { 1396 int error __unused; 1397 1398 error = intr_event_schedule_thread(ie); 1399 KASSERT(error == 0, ("bad stray interrupt")); 1400 } 1401 critical_exit(); 1402 td->td_intr_nesting_level--; 1403 #ifdef notyet 1404 /* The interrupt is not aknowledged by any filter and has no ithread. */ 1405 if (!thread && !filter) 1406 return (EINVAL); 1407 #endif 1408 return (0); 1409 } 1410 1411 #ifdef DDB 1412 /* 1413 * Dump details about an interrupt handler 1414 */ 1415 static void 1416 db_dump_intrhand(struct intr_handler *ih) 1417 { 1418 int comma; 1419 1420 db_printf("\t%-10s ", ih->ih_name); 1421 switch (ih->ih_pri) { 1422 case PI_REALTIME: 1423 db_printf("CLK "); 1424 break; 1425 case PI_AV: 1426 db_printf("AV "); 1427 break; 1428 case PI_TTY: 1429 db_printf("TTY "); 1430 break; 1431 case PI_NET: 1432 db_printf("NET "); 1433 break; 1434 case PI_DISK: 1435 db_printf("DISK"); 1436 break; 1437 case PI_DULL: 1438 db_printf("DULL"); 1439 break; 1440 default: 1441 if (ih->ih_pri >= PI_SOFT) 1442 db_printf("SWI "); 1443 else 1444 db_printf("%4u", ih->ih_pri); 1445 break; 1446 } 1447 db_printf(" "); 1448 if (ih->ih_filter != NULL) { 1449 db_printf("[F]"); 1450 db_printsym((uintptr_t)ih->ih_filter, DB_STGY_PROC); 1451 } 1452 if (ih->ih_handler != NULL) { 1453 if (ih->ih_filter != NULL) 1454 db_printf(","); 1455 db_printf("[H]"); 1456 db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC); 1457 } 1458 db_printf("(%p)", ih->ih_argument); 1459 if (ih->ih_need || 1460 (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD | 1461 IH_MPSAFE)) != 0) { 1462 db_printf(" {"); 1463 comma = 0; 1464 if (ih->ih_flags & IH_EXCLUSIVE) { 1465 if (comma) 1466 db_printf(", "); 1467 db_printf("EXCL"); 1468 comma = 1; 1469 } 1470 if (ih->ih_flags & IH_ENTROPY) { 1471 if (comma) 1472 db_printf(", "); 1473 db_printf("ENTROPY"); 1474 comma = 1; 1475 } 1476 if (ih->ih_flags & IH_DEAD) { 1477 if (comma) 1478 db_printf(", "); 1479 db_printf("DEAD"); 1480 comma = 1; 1481 } 1482 if (ih->ih_flags & IH_MPSAFE) { 1483 if (comma) 1484 db_printf(", "); 1485 db_printf("MPSAFE"); 1486 comma = 1; 1487 } 1488 if (ih->ih_need) { 1489 if (comma) 1490 db_printf(", "); 1491 db_printf("NEED"); 1492 } 1493 db_printf("}"); 1494 } 1495 db_printf("\n"); 1496 } 1497 1498 /* 1499 * Dump details about a event. 1500 */ 1501 void 1502 db_dump_intr_event(struct intr_event *ie, int handlers) 1503 { 1504 struct intr_handler *ih; 1505 struct intr_thread *it; 1506 int comma; 1507 1508 db_printf("%s ", ie->ie_fullname); 1509 it = ie->ie_thread; 1510 if (it != NULL) 1511 db_printf("(pid %d)", it->it_thread->td_proc->p_pid); 1512 else 1513 db_printf("(no thread)"); 1514 if ((ie->ie_flags & (IE_SOFT | IE_ADDING_THREAD)) != 0 || 1515 (it != NULL && it->it_need)) { 1516 db_printf(" {"); 1517 comma = 0; 1518 if (ie->ie_flags & IE_SOFT) { 1519 db_printf("SOFT"); 1520 comma = 1; 1521 } 1522 if (ie->ie_flags & IE_ADDING_THREAD) { 1523 if (comma) 1524 db_printf(", "); 1525 db_printf("ADDING_THREAD"); 1526 comma = 1; 1527 } 1528 if (it != NULL && it->it_need) { 1529 if (comma) 1530 db_printf(", "); 1531 db_printf("NEED"); 1532 } 1533 db_printf("}"); 1534 } 1535 db_printf("\n"); 1536 1537 if (handlers) 1538 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) 1539 db_dump_intrhand(ih); 1540 } 1541 1542 /* 1543 * Dump data about interrupt handlers 1544 */ 1545 DB_SHOW_COMMAND(intr, db_show_intr) 1546 { 1547 struct intr_event *ie; 1548 int all, verbose; 1549 1550 verbose = strchr(modif, 'v') != NULL; 1551 all = strchr(modif, 'a') != NULL; 1552 TAILQ_FOREACH(ie, &event_list, ie_list) { 1553 if (!all && CK_SLIST_EMPTY(&ie->ie_handlers)) 1554 continue; 1555 db_dump_intr_event(ie, verbose); 1556 if (db_pager_quit) 1557 break; 1558 } 1559 } 1560 #endif /* DDB */ 1561 1562 /* 1563 * Start standard software interrupt threads 1564 */ 1565 static void 1566 start_softintr(void *dummy) 1567 { 1568 1569 if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih)) 1570 panic("died while creating vm swi ithread"); 1571 } 1572 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, 1573 NULL); 1574 1575 /* 1576 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 1577 * The data for this machine dependent, and the declarations are in machine 1578 * dependent code. The layout of intrnames and intrcnt however is machine 1579 * independent. 1580 * 1581 * We do not know the length of intrcnt and intrnames at compile time, so 1582 * calculate things at run time. 1583 */ 1584 static int 1585 sysctl_intrnames(SYSCTL_HANDLER_ARGS) 1586 { 1587 return (sysctl_handle_opaque(oidp, intrnames, sintrnames, req)); 1588 } 1589 1590 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, 1591 NULL, 0, sysctl_intrnames, "", "Interrupt Names"); 1592 1593 static int 1594 sysctl_intrcnt(SYSCTL_HANDLER_ARGS) 1595 { 1596 #ifdef SCTL_MASK32 1597 uint32_t *intrcnt32; 1598 unsigned i; 1599 int error; 1600 1601 if (req->flags & SCTL_MASK32) { 1602 if (!req->oldptr) 1603 return (sysctl_handle_opaque(oidp, NULL, sintrcnt / 2, req)); 1604 intrcnt32 = malloc(sintrcnt / 2, M_TEMP, M_NOWAIT); 1605 if (intrcnt32 == NULL) 1606 return (ENOMEM); 1607 for (i = 0; i < sintrcnt / sizeof (u_long); i++) 1608 intrcnt32[i] = intrcnt[i]; 1609 error = sysctl_handle_opaque(oidp, intrcnt32, sintrcnt / 2, req); 1610 free(intrcnt32, M_TEMP); 1611 return (error); 1612 } 1613 #endif 1614 return (sysctl_handle_opaque(oidp, intrcnt, sintrcnt, req)); 1615 } 1616 1617 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, 1618 NULL, 0, sysctl_intrcnt, "", "Interrupt Counts"); 1619 1620 #ifdef DDB 1621 /* 1622 * DDB command to dump the interrupt statistics. 1623 */ 1624 DB_SHOW_COMMAND(intrcnt, db_show_intrcnt) 1625 { 1626 u_long *i; 1627 char *cp; 1628 u_int j; 1629 1630 cp = intrnames; 1631 j = 0; 1632 for (i = intrcnt; j < (sintrcnt / sizeof(u_long)) && !db_pager_quit; 1633 i++, j++) { 1634 if (*cp == '\0') 1635 break; 1636 if (*i != 0) 1637 db_printf("%s\t%lu\n", cp, *i); 1638 cp += strlen(cp) + 1; 1639 } 1640 } 1641 #endif 1642