1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_ddb.h" 33 #include "opt_kstack_usage_prof.h" 34 35 #include <sys/param.h> 36 #include <sys/bus.h> 37 #include <sys/conf.h> 38 #include <sys/cpuset.h> 39 #include <sys/rtprio.h> 40 #include <sys/systm.h> 41 #include <sys/interrupt.h> 42 #include <sys/kernel.h> 43 #include <sys/kthread.h> 44 #include <sys/ktr.h> 45 #include <sys/limits.h> 46 #include <sys/lock.h> 47 #include <sys/malloc.h> 48 #include <sys/mutex.h> 49 #include <sys/priv.h> 50 #include <sys/proc.h> 51 #include <sys/random.h> 52 #include <sys/resourcevar.h> 53 #include <sys/sched.h> 54 #include <sys/smp.h> 55 #include <sys/sysctl.h> 56 #include <sys/syslog.h> 57 #include <sys/unistd.h> 58 #include <sys/vmmeter.h> 59 #include <machine/atomic.h> 60 #include <machine/cpu.h> 61 #include <machine/md_var.h> 62 #include <machine/stdarg.h> 63 #ifdef DDB 64 #include <ddb/ddb.h> 65 #include <ddb/db_sym.h> 66 #endif 67 68 /* 69 * Describe an interrupt thread. There is one of these per interrupt event. 70 */ 71 struct intr_thread { 72 struct intr_event *it_event; 73 struct thread *it_thread; /* Kernel thread. */ 74 int it_flags; /* (j) IT_* flags. */ 75 int it_need; /* Needs service. */ 76 }; 77 78 /* Interrupt thread flags kept in it_flags */ 79 #define IT_DEAD 0x000001 /* Thread is waiting to exit. */ 80 #define IT_WAIT 0x000002 /* Thread is waiting for completion. */ 81 82 struct intr_entropy { 83 struct thread *td; 84 uintptr_t event; 85 }; 86 87 struct intr_event *clk_intr_event; 88 struct intr_event *tty_intr_event; 89 void *vm_ih; 90 struct proc *intrproc; 91 92 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); 93 94 static int intr_storm_threshold = 1000; 95 SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RWTUN, 96 &intr_storm_threshold, 0, 97 "Number of consecutive interrupts before storm protection is enabled"); 98 static TAILQ_HEAD(, intr_event) event_list = 99 TAILQ_HEAD_INITIALIZER(event_list); 100 static struct mtx event_lock; 101 MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF); 102 103 static void intr_event_update(struct intr_event *ie); 104 static int intr_event_schedule_thread(struct intr_event *ie); 105 static struct intr_thread *ithread_create(const char *name); 106 static void ithread_destroy(struct intr_thread *ithread); 107 static void ithread_execute_handlers(struct proc *p, 108 struct intr_event *ie); 109 static void ithread_loop(void *); 110 static void ithread_update(struct intr_thread *ithd); 111 static void start_softintr(void *); 112 113 /* Map an interrupt type to an ithread priority. */ 114 u_char 115 intr_priority(enum intr_type flags) 116 { 117 u_char pri; 118 119 flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET | 120 INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV); 121 switch (flags) { 122 case INTR_TYPE_TTY: 123 pri = PI_TTY; 124 break; 125 case INTR_TYPE_BIO: 126 pri = PI_DISK; 127 break; 128 case INTR_TYPE_NET: 129 pri = PI_NET; 130 break; 131 case INTR_TYPE_CAM: 132 pri = PI_DISK; 133 break; 134 case INTR_TYPE_AV: 135 pri = PI_AV; 136 break; 137 case INTR_TYPE_CLK: 138 pri = PI_REALTIME; 139 break; 140 case INTR_TYPE_MISC: 141 pri = PI_DULL; /* don't care */ 142 break; 143 default: 144 /* We didn't specify an interrupt level. */ 145 panic("intr_priority: no interrupt type in flags"); 146 } 147 148 return pri; 149 } 150 151 /* 152 * Update an ithread based on the associated intr_event. 153 */ 154 static void 155 ithread_update(struct intr_thread *ithd) 156 { 157 struct intr_event *ie; 158 struct thread *td; 159 u_char pri; 160 161 ie = ithd->it_event; 162 td = ithd->it_thread; 163 mtx_assert(&ie->ie_lock, MA_OWNED); 164 165 /* Determine the overall priority of this event. */ 166 if (CK_SLIST_EMPTY(&ie->ie_handlers)) 167 pri = PRI_MAX_ITHD; 168 else 169 pri = CK_SLIST_FIRST(&ie->ie_handlers)->ih_pri; 170 171 /* Update name and priority. */ 172 strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name)); 173 #ifdef KTR 174 sched_clear_tdname(td); 175 #endif 176 thread_lock(td); 177 sched_prio(td, pri); 178 thread_unlock(td); 179 } 180 181 /* 182 * Regenerate the full name of an interrupt event and update its priority. 183 */ 184 static void 185 intr_event_update(struct intr_event *ie) 186 { 187 struct intr_handler *ih; 188 char *last; 189 int missed, space; 190 191 /* Start off with no entropy and just the name of the event. */ 192 mtx_assert(&ie->ie_lock, MA_OWNED); 193 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 194 ie->ie_flags &= ~IE_ENTROPY; 195 missed = 0; 196 space = 1; 197 198 /* Run through all the handlers updating values. */ 199 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) { 200 if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < 201 sizeof(ie->ie_fullname)) { 202 strcat(ie->ie_fullname, " "); 203 strcat(ie->ie_fullname, ih->ih_name); 204 space = 0; 205 } else 206 missed++; 207 if (ih->ih_flags & IH_ENTROPY) 208 ie->ie_flags |= IE_ENTROPY; 209 } 210 211 /* 212 * If the handler names were too long, add +'s to indicate missing 213 * names. If we run out of room and still have +'s to add, change 214 * the last character from a + to a *. 215 */ 216 last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2]; 217 while (missed-- > 0) { 218 if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) { 219 if (*last == '+') { 220 *last = '*'; 221 break; 222 } else 223 *last = '+'; 224 } else if (space) { 225 strcat(ie->ie_fullname, " +"); 226 space = 0; 227 } else 228 strcat(ie->ie_fullname, "+"); 229 } 230 231 /* 232 * If this event has an ithread, update it's priority and 233 * name. 234 */ 235 if (ie->ie_thread != NULL) 236 ithread_update(ie->ie_thread); 237 CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname); 238 } 239 240 int 241 intr_event_create(struct intr_event **event, void *source, int flags, int irq, 242 void (*pre_ithread)(void *), void (*post_ithread)(void *), 243 void (*post_filter)(void *), int (*assign_cpu)(void *, int), 244 const char *fmt, ...) 245 { 246 struct intr_event *ie; 247 va_list ap; 248 249 /* The only valid flag during creation is IE_SOFT. */ 250 if ((flags & ~IE_SOFT) != 0) 251 return (EINVAL); 252 ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 253 ie->ie_source = source; 254 ie->ie_pre_ithread = pre_ithread; 255 ie->ie_post_ithread = post_ithread; 256 ie->ie_post_filter = post_filter; 257 ie->ie_assign_cpu = assign_cpu; 258 ie->ie_flags = flags; 259 ie->ie_irq = irq; 260 ie->ie_cpu = NOCPU; 261 CK_SLIST_INIT(&ie->ie_handlers); 262 mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 263 264 va_start(ap, fmt); 265 vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 266 va_end(ap); 267 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 268 mtx_lock(&event_lock); 269 TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 270 mtx_unlock(&event_lock); 271 if (event != NULL) 272 *event = ie; 273 CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 274 return (0); 275 } 276 277 /* 278 * Bind an interrupt event to the specified CPU. Note that not all 279 * platforms support binding an interrupt to a CPU. For those 280 * platforms this request will fail. Using a cpu id of NOCPU unbinds 281 * the interrupt event. 282 */ 283 static int 284 _intr_event_bind(struct intr_event *ie, int cpu, bool bindirq, bool bindithread) 285 { 286 lwpid_t id; 287 int error; 288 289 /* Need a CPU to bind to. */ 290 if (cpu != NOCPU && CPU_ABSENT(cpu)) 291 return (EINVAL); 292 293 if (ie->ie_assign_cpu == NULL) 294 return (EOPNOTSUPP); 295 296 error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR); 297 if (error) 298 return (error); 299 300 /* 301 * If we have any ithreads try to set their mask first to verify 302 * permissions, etc. 303 */ 304 if (bindithread) { 305 mtx_lock(&ie->ie_lock); 306 if (ie->ie_thread != NULL) { 307 id = ie->ie_thread->it_thread->td_tid; 308 mtx_unlock(&ie->ie_lock); 309 error = cpuset_setithread(id, cpu); 310 if (error) 311 return (error); 312 } else 313 mtx_unlock(&ie->ie_lock); 314 } 315 if (bindirq) 316 error = ie->ie_assign_cpu(ie->ie_source, cpu); 317 if (error) { 318 if (bindithread) { 319 mtx_lock(&ie->ie_lock); 320 if (ie->ie_thread != NULL) { 321 cpu = ie->ie_cpu; 322 id = ie->ie_thread->it_thread->td_tid; 323 mtx_unlock(&ie->ie_lock); 324 (void)cpuset_setithread(id, cpu); 325 } else 326 mtx_unlock(&ie->ie_lock); 327 } 328 return (error); 329 } 330 331 if (bindirq) { 332 mtx_lock(&ie->ie_lock); 333 ie->ie_cpu = cpu; 334 mtx_unlock(&ie->ie_lock); 335 } 336 337 return (error); 338 } 339 340 /* 341 * Bind an interrupt event to the specified CPU. For supported platforms, any 342 * associated ithreads as well as the primary interrupt context will be bound 343 * to the specificed CPU. 344 */ 345 int 346 intr_event_bind(struct intr_event *ie, int cpu) 347 { 348 349 return (_intr_event_bind(ie, cpu, true, true)); 350 } 351 352 /* 353 * Bind an interrupt event to the specified CPU, but do not bind associated 354 * ithreads. 355 */ 356 int 357 intr_event_bind_irqonly(struct intr_event *ie, int cpu) 358 { 359 360 return (_intr_event_bind(ie, cpu, true, false)); 361 } 362 363 /* 364 * Bind an interrupt event's ithread to the specified CPU. 365 */ 366 int 367 intr_event_bind_ithread(struct intr_event *ie, int cpu) 368 { 369 370 return (_intr_event_bind(ie, cpu, false, true)); 371 } 372 373 static struct intr_event * 374 intr_lookup(int irq) 375 { 376 struct intr_event *ie; 377 378 mtx_lock(&event_lock); 379 TAILQ_FOREACH(ie, &event_list, ie_list) 380 if (ie->ie_irq == irq && 381 (ie->ie_flags & IE_SOFT) == 0 && 382 CK_SLIST_FIRST(&ie->ie_handlers) != NULL) 383 break; 384 mtx_unlock(&event_lock); 385 return (ie); 386 } 387 388 int 389 intr_setaffinity(int irq, int mode, void *m) 390 { 391 struct intr_event *ie; 392 cpuset_t *mask; 393 int cpu, n; 394 395 mask = m; 396 cpu = NOCPU; 397 /* 398 * If we're setting all cpus we can unbind. Otherwise make sure 399 * only one cpu is in the set. 400 */ 401 if (CPU_CMP(cpuset_root, mask)) { 402 for (n = 0; n < CPU_SETSIZE; n++) { 403 if (!CPU_ISSET(n, mask)) 404 continue; 405 if (cpu != NOCPU) 406 return (EINVAL); 407 cpu = n; 408 } 409 } 410 ie = intr_lookup(irq); 411 if (ie == NULL) 412 return (ESRCH); 413 switch (mode) { 414 case CPU_WHICH_IRQ: 415 return (intr_event_bind(ie, cpu)); 416 case CPU_WHICH_INTRHANDLER: 417 return (intr_event_bind_irqonly(ie, cpu)); 418 case CPU_WHICH_ITHREAD: 419 return (intr_event_bind_ithread(ie, cpu)); 420 default: 421 return (EINVAL); 422 } 423 } 424 425 int 426 intr_getaffinity(int irq, int mode, void *m) 427 { 428 struct intr_event *ie; 429 struct thread *td; 430 struct proc *p; 431 cpuset_t *mask; 432 lwpid_t id; 433 int error; 434 435 mask = m; 436 ie = intr_lookup(irq); 437 if (ie == NULL) 438 return (ESRCH); 439 440 error = 0; 441 CPU_ZERO(mask); 442 switch (mode) { 443 case CPU_WHICH_IRQ: 444 case CPU_WHICH_INTRHANDLER: 445 mtx_lock(&ie->ie_lock); 446 if (ie->ie_cpu == NOCPU) 447 CPU_COPY(cpuset_root, mask); 448 else 449 CPU_SET(ie->ie_cpu, mask); 450 mtx_unlock(&ie->ie_lock); 451 break; 452 case CPU_WHICH_ITHREAD: 453 mtx_lock(&ie->ie_lock); 454 if (ie->ie_thread == NULL) { 455 mtx_unlock(&ie->ie_lock); 456 CPU_COPY(cpuset_root, mask); 457 } else { 458 id = ie->ie_thread->it_thread->td_tid; 459 mtx_unlock(&ie->ie_lock); 460 error = cpuset_which(CPU_WHICH_TID, id, &p, &td, NULL); 461 if (error != 0) 462 return (error); 463 CPU_COPY(&td->td_cpuset->cs_mask, mask); 464 PROC_UNLOCK(p); 465 } 466 default: 467 return (EINVAL); 468 } 469 return (0); 470 } 471 472 int 473 intr_event_destroy(struct intr_event *ie) 474 { 475 476 mtx_lock(&event_lock); 477 mtx_lock(&ie->ie_lock); 478 if (!CK_SLIST_EMPTY(&ie->ie_handlers)) { 479 mtx_unlock(&ie->ie_lock); 480 mtx_unlock(&event_lock); 481 return (EBUSY); 482 } 483 TAILQ_REMOVE(&event_list, ie, ie_list); 484 #ifndef notyet 485 if (ie->ie_thread != NULL) { 486 ithread_destroy(ie->ie_thread); 487 ie->ie_thread = NULL; 488 } 489 #endif 490 mtx_unlock(&ie->ie_lock); 491 mtx_unlock(&event_lock); 492 mtx_destroy(&ie->ie_lock); 493 free(ie, M_ITHREAD); 494 return (0); 495 } 496 497 static struct intr_thread * 498 ithread_create(const char *name) 499 { 500 struct intr_thread *ithd; 501 struct thread *td; 502 int error; 503 504 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 505 506 error = kproc_kthread_add(ithread_loop, ithd, &intrproc, 507 &td, RFSTOPPED | RFHIGHPID, 508 0, "intr", "%s", name); 509 if (error) 510 panic("kproc_create() failed with %d", error); 511 thread_lock(td); 512 sched_class(td, PRI_ITHD); 513 TD_SET_IWAIT(td); 514 thread_unlock(td); 515 td->td_pflags |= TDP_ITHREAD; 516 ithd->it_thread = td; 517 CTR2(KTR_INTR, "%s: created %s", __func__, name); 518 return (ithd); 519 } 520 521 static void 522 ithread_destroy(struct intr_thread *ithread) 523 { 524 struct thread *td; 525 526 CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name); 527 td = ithread->it_thread; 528 thread_lock(td); 529 ithread->it_flags |= IT_DEAD; 530 if (TD_AWAITING_INTR(td)) { 531 TD_CLR_IWAIT(td); 532 sched_add(td, SRQ_INTR); 533 } 534 thread_unlock(td); 535 } 536 537 int 538 intr_event_add_handler(struct intr_event *ie, const char *name, 539 driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 540 enum intr_type flags, void **cookiep) 541 { 542 struct intr_handler *ih, *temp_ih; 543 struct intr_handler **prevptr; 544 struct intr_thread *it; 545 546 if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 547 return (EINVAL); 548 549 /* Allocate and populate an interrupt handler structure. */ 550 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 551 ih->ih_filter = filter; 552 ih->ih_handler = handler; 553 ih->ih_argument = arg; 554 strlcpy(ih->ih_name, name, sizeof(ih->ih_name)); 555 ih->ih_event = ie; 556 ih->ih_pri = pri; 557 if (flags & INTR_EXCL) 558 ih->ih_flags = IH_EXCLUSIVE; 559 if (flags & INTR_MPSAFE) 560 ih->ih_flags |= IH_MPSAFE; 561 if (flags & INTR_ENTROPY) 562 ih->ih_flags |= IH_ENTROPY; 563 564 /* We can only have one exclusive handler in a event. */ 565 mtx_lock(&ie->ie_lock); 566 if (!CK_SLIST_EMPTY(&ie->ie_handlers)) { 567 if ((flags & INTR_EXCL) || 568 (CK_SLIST_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 569 mtx_unlock(&ie->ie_lock); 570 free(ih, M_ITHREAD); 571 return (EINVAL); 572 } 573 } 574 575 /* Create a thread if we need one. */ 576 while (ie->ie_thread == NULL && handler != NULL) { 577 if (ie->ie_flags & IE_ADDING_THREAD) 578 msleep(ie, &ie->ie_lock, 0, "ithread", 0); 579 else { 580 ie->ie_flags |= IE_ADDING_THREAD; 581 mtx_unlock(&ie->ie_lock); 582 it = ithread_create("intr: newborn"); 583 mtx_lock(&ie->ie_lock); 584 ie->ie_flags &= ~IE_ADDING_THREAD; 585 ie->ie_thread = it; 586 it->it_event = ie; 587 ithread_update(it); 588 wakeup(ie); 589 } 590 } 591 592 /* Add the new handler to the event in priority order. */ 593 CK_SLIST_FOREACH_PREVPTR(temp_ih, prevptr, &ie->ie_handlers, ih_next) { 594 if (temp_ih->ih_pri > ih->ih_pri) 595 break; 596 } 597 CK_SLIST_INSERT_PREVPTR(prevptr, temp_ih, ih, ih_next); 598 599 intr_event_update(ie); 600 601 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 602 ie->ie_name); 603 mtx_unlock(&ie->ie_lock); 604 605 if (cookiep != NULL) 606 *cookiep = ih; 607 return (0); 608 } 609 610 /* 611 * Append a description preceded by a ':' to the name of the specified 612 * interrupt handler. 613 */ 614 int 615 intr_event_describe_handler(struct intr_event *ie, void *cookie, 616 const char *descr) 617 { 618 struct intr_handler *ih; 619 size_t space; 620 char *start; 621 622 mtx_lock(&ie->ie_lock); 623 #ifdef INVARIANTS 624 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) { 625 if (ih == cookie) 626 break; 627 } 628 if (ih == NULL) { 629 mtx_unlock(&ie->ie_lock); 630 panic("handler %p not found in interrupt event %p", cookie, ie); 631 } 632 #endif 633 ih = cookie; 634 635 /* 636 * Look for an existing description by checking for an 637 * existing ":". This assumes device names do not include 638 * colons. If one is found, prepare to insert the new 639 * description at that point. If one is not found, find the 640 * end of the name to use as the insertion point. 641 */ 642 start = strchr(ih->ih_name, ':'); 643 if (start == NULL) 644 start = strchr(ih->ih_name, 0); 645 646 /* 647 * See if there is enough remaining room in the string for the 648 * description + ":". The "- 1" leaves room for the trailing 649 * '\0'. The "+ 1" accounts for the colon. 650 */ 651 space = sizeof(ih->ih_name) - (start - ih->ih_name) - 1; 652 if (strlen(descr) + 1 > space) { 653 mtx_unlock(&ie->ie_lock); 654 return (ENOSPC); 655 } 656 657 /* Append a colon followed by the description. */ 658 *start = ':'; 659 strcpy(start + 1, descr); 660 intr_event_update(ie); 661 mtx_unlock(&ie->ie_lock); 662 return (0); 663 } 664 665 /* 666 * Return the ie_source field from the intr_event an intr_handler is 667 * associated with. 668 */ 669 void * 670 intr_handler_source(void *cookie) 671 { 672 struct intr_handler *ih; 673 struct intr_event *ie; 674 675 ih = (struct intr_handler *)cookie; 676 if (ih == NULL) 677 return (NULL); 678 ie = ih->ih_event; 679 KASSERT(ie != NULL, 680 ("interrupt handler \"%s\" has a NULL interrupt event", 681 ih->ih_name)); 682 return (ie->ie_source); 683 } 684 685 /* 686 * If intr_event_handle() is running in the ISR context at the time of the call, 687 * then wait for it to complete. 688 */ 689 static void 690 intr_event_barrier(struct intr_event *ie) 691 { 692 int phase; 693 694 mtx_assert(&ie->ie_lock, MA_OWNED); 695 phase = ie->ie_phase; 696 697 /* 698 * Switch phase to direct future interrupts to the other active counter. 699 * Make sure that any preceding stores are visible before the switch. 700 */ 701 KASSERT(ie->ie_active[!phase] == 0, ("idle phase has activity")); 702 atomic_store_rel_int(&ie->ie_phase, !phase); 703 704 /* 705 * This code cooperates with wait-free iteration of ie_handlers 706 * in intr_event_handle. 707 * Make sure that the removal and the phase update are not reordered 708 * with the active count check. 709 * Note that no combination of acquire and release fences can provide 710 * that guarantee as Store->Load sequences can always be reordered. 711 */ 712 atomic_thread_fence_seq_cst(); 713 714 /* 715 * Now wait on the inactive phase. 716 * The acquire fence is needed so that that all post-barrier accesses 717 * are after the check. 718 */ 719 while (ie->ie_active[phase] > 0) 720 cpu_spinwait(); 721 atomic_thread_fence_acq(); 722 } 723 724 static void 725 intr_handler_barrier(struct intr_handler *handler) 726 { 727 struct intr_event *ie; 728 729 ie = handler->ih_event; 730 mtx_assert(&ie->ie_lock, MA_OWNED); 731 KASSERT((handler->ih_flags & IH_DEAD) == 0, 732 ("update for a removed handler")); 733 734 if (ie->ie_thread == NULL) { 735 intr_event_barrier(ie); 736 return; 737 } 738 if ((handler->ih_flags & IH_CHANGED) == 0) { 739 handler->ih_flags |= IH_CHANGED; 740 intr_event_schedule_thread(ie); 741 } 742 while ((handler->ih_flags & IH_CHANGED) != 0) 743 msleep(handler, &ie->ie_lock, 0, "ih_barr", 0); 744 } 745 746 /* 747 * Sleep until an ithread finishes executing an interrupt handler. 748 * 749 * XXX Doesn't currently handle interrupt filters or fast interrupt 750 * handlers. This is intended for compatibility with linux drivers 751 * only. Do not use in BSD code. 752 */ 753 void 754 _intr_drain(int irq) 755 { 756 struct intr_event *ie; 757 struct intr_thread *ithd; 758 struct thread *td; 759 760 ie = intr_lookup(irq); 761 if (ie == NULL) 762 return; 763 if (ie->ie_thread == NULL) 764 return; 765 ithd = ie->ie_thread; 766 td = ithd->it_thread; 767 /* 768 * We set the flag and wait for it to be cleared to avoid 769 * long delays with potentially busy interrupt handlers 770 * were we to only sample TD_AWAITING_INTR() every tick. 771 */ 772 thread_lock(td); 773 if (!TD_AWAITING_INTR(td)) { 774 ithd->it_flags |= IT_WAIT; 775 while (ithd->it_flags & IT_WAIT) { 776 thread_unlock(td); 777 pause("idrain", 1); 778 thread_lock(td); 779 } 780 } 781 thread_unlock(td); 782 return; 783 } 784 785 int 786 intr_event_remove_handler(void *cookie) 787 { 788 struct intr_handler *handler = (struct intr_handler *)cookie; 789 struct intr_event *ie; 790 struct intr_handler *ih; 791 struct intr_handler **prevptr; 792 #ifdef notyet 793 int dead; 794 #endif 795 796 if (handler == NULL) 797 return (EINVAL); 798 ie = handler->ih_event; 799 KASSERT(ie != NULL, 800 ("interrupt handler \"%s\" has a NULL interrupt event", 801 handler->ih_name)); 802 803 mtx_lock(&ie->ie_lock); 804 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 805 ie->ie_name); 806 CK_SLIST_FOREACH_PREVPTR(ih, prevptr, &ie->ie_handlers, ih_next) { 807 if (ih == handler) 808 break; 809 } 810 if (ih == NULL) { 811 panic("interrupt handler \"%s\" not found in " 812 "interrupt event \"%s\"", handler->ih_name, ie->ie_name); 813 } 814 815 /* 816 * If there is no ithread, then directly remove the handler. Note that 817 * intr_event_handle() iterates ie_handlers in a lock-less fashion, so 818 * care needs to be taken to keep ie_handlers consistent and to free 819 * the removed handler only when ie_handlers is quiescent. 820 */ 821 if (ie->ie_thread == NULL) { 822 CK_SLIST_REMOVE_PREVPTR(prevptr, ih, ih_next); 823 intr_event_barrier(ie); 824 intr_event_update(ie); 825 mtx_unlock(&ie->ie_lock); 826 free(handler, M_ITHREAD); 827 return (0); 828 } 829 830 /* 831 * Let the interrupt thread do the job. 832 * The interrupt source is disabled when the interrupt thread is 833 * running, so it does not have to worry about interaction with 834 * intr_event_handle(). 835 */ 836 KASSERT((handler->ih_flags & IH_DEAD) == 0, 837 ("duplicate handle remove")); 838 handler->ih_flags |= IH_DEAD; 839 intr_event_schedule_thread(ie); 840 while (handler->ih_flags & IH_DEAD) 841 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 842 intr_event_update(ie); 843 844 #ifdef notyet 845 /* 846 * XXX: This could be bad in the case of ppbus(8). Also, I think 847 * this could lead to races of stale data when servicing an 848 * interrupt. 849 */ 850 dead = 1; 851 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) { 852 if (ih->ih_handler != NULL) { 853 dead = 0; 854 break; 855 } 856 } 857 if (dead) { 858 ithread_destroy(ie->ie_thread); 859 ie->ie_thread = NULL; 860 } 861 #endif 862 mtx_unlock(&ie->ie_lock); 863 free(handler, M_ITHREAD); 864 return (0); 865 } 866 867 int 868 intr_event_suspend_handler(void *cookie) 869 { 870 struct intr_handler *handler = (struct intr_handler *)cookie; 871 struct intr_event *ie; 872 873 if (handler == NULL) 874 return (EINVAL); 875 ie = handler->ih_event; 876 KASSERT(ie != NULL, 877 ("interrupt handler \"%s\" has a NULL interrupt event", 878 handler->ih_name)); 879 mtx_lock(&ie->ie_lock); 880 handler->ih_flags |= IH_SUSP; 881 intr_handler_barrier(handler); 882 mtx_unlock(&ie->ie_lock); 883 return (0); 884 } 885 886 int 887 intr_event_resume_handler(void *cookie) 888 { 889 struct intr_handler *handler = (struct intr_handler *)cookie; 890 struct intr_event *ie; 891 892 if (handler == NULL) 893 return (EINVAL); 894 ie = handler->ih_event; 895 KASSERT(ie != NULL, 896 ("interrupt handler \"%s\" has a NULL interrupt event", 897 handler->ih_name)); 898 899 /* 900 * intr_handler_barrier() acts not only as a barrier, 901 * it also allows to check for any pending interrupts. 902 */ 903 mtx_lock(&ie->ie_lock); 904 handler->ih_flags &= ~IH_SUSP; 905 intr_handler_barrier(handler); 906 mtx_unlock(&ie->ie_lock); 907 return (0); 908 } 909 910 static int 911 intr_event_schedule_thread(struct intr_event *ie) 912 { 913 struct intr_entropy entropy; 914 struct intr_thread *it; 915 struct thread *td; 916 struct thread *ctd; 917 918 /* 919 * If no ithread or no handlers, then we have a stray interrupt. 920 */ 921 if (ie == NULL || CK_SLIST_EMPTY(&ie->ie_handlers) || 922 ie->ie_thread == NULL) 923 return (EINVAL); 924 925 ctd = curthread; 926 it = ie->ie_thread; 927 td = it->it_thread; 928 929 /* 930 * If any of the handlers for this ithread claim to be good 931 * sources of entropy, then gather some. 932 */ 933 if (ie->ie_flags & IE_ENTROPY) { 934 entropy.event = (uintptr_t)ie; 935 entropy.td = ctd; 936 random_harvest_queue(&entropy, sizeof(entropy), RANDOM_INTERRUPT); 937 } 938 939 KASSERT(td->td_proc != NULL, ("ithread %s has no process", ie->ie_name)); 940 941 /* 942 * Set it_need to tell the thread to keep running if it is already 943 * running. Then, lock the thread and see if we actually need to 944 * put it on the runqueue. 945 * 946 * Use store_rel to arrange that the store to ih_need in 947 * swi_sched() is before the store to it_need and prepare for 948 * transfer of this order to loads in the ithread. 949 */ 950 atomic_store_rel_int(&it->it_need, 1); 951 thread_lock(td); 952 if (TD_AWAITING_INTR(td)) { 953 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, td->td_proc->p_pid, 954 td->td_name); 955 TD_CLR_IWAIT(td); 956 sched_add(td, SRQ_INTR); 957 } else { 958 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 959 __func__, td->td_proc->p_pid, td->td_name, it->it_need, td->td_state); 960 } 961 thread_unlock(td); 962 963 return (0); 964 } 965 966 /* 967 * Allow interrupt event binding for software interrupt handlers -- a no-op, 968 * since interrupts are generated in software rather than being directed by 969 * a PIC. 970 */ 971 static int 972 swi_assign_cpu(void *arg, int cpu) 973 { 974 975 return (0); 976 } 977 978 /* 979 * Add a software interrupt handler to a specified event. If a given event 980 * is not specified, then a new event is created. 981 */ 982 int 983 swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, 984 void *arg, int pri, enum intr_type flags, void **cookiep) 985 { 986 struct intr_event *ie; 987 int error; 988 989 if (flags & INTR_ENTROPY) 990 return (EINVAL); 991 992 ie = (eventp != NULL) ? *eventp : NULL; 993 994 if (ie != NULL) { 995 if (!(ie->ie_flags & IE_SOFT)) 996 return (EINVAL); 997 } else { 998 error = intr_event_create(&ie, NULL, IE_SOFT, 0, 999 NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri); 1000 if (error) 1001 return (error); 1002 if (eventp != NULL) 1003 *eventp = ie; 1004 } 1005 error = intr_event_add_handler(ie, name, NULL, handler, arg, 1006 PI_SWI(pri), flags, cookiep); 1007 return (error); 1008 } 1009 1010 /* 1011 * Schedule a software interrupt thread. 1012 */ 1013 void 1014 swi_sched(void *cookie, int flags) 1015 { 1016 struct intr_handler *ih = (struct intr_handler *)cookie; 1017 struct intr_event *ie = ih->ih_event; 1018 struct intr_entropy entropy; 1019 int error __unused; 1020 1021 CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, 1022 ih->ih_need); 1023 1024 entropy.event = (uintptr_t)ih; 1025 entropy.td = curthread; 1026 random_harvest_queue(&entropy, sizeof(entropy), RANDOM_SWI); 1027 1028 /* 1029 * Set ih_need for this handler so that if the ithread is already 1030 * running it will execute this handler on the next pass. Otherwise, 1031 * it will execute it the next time it runs. 1032 */ 1033 ih->ih_need = 1; 1034 1035 if (!(flags & SWI_DELAY)) { 1036 VM_CNT_INC(v_soft); 1037 error = intr_event_schedule_thread(ie); 1038 KASSERT(error == 0, ("stray software interrupt")); 1039 } 1040 } 1041 1042 /* 1043 * Remove a software interrupt handler. Currently this code does not 1044 * remove the associated interrupt event if it becomes empty. Calling code 1045 * may do so manually via intr_event_destroy(), but that's not really 1046 * an optimal interface. 1047 */ 1048 int 1049 swi_remove(void *cookie) 1050 { 1051 1052 return (intr_event_remove_handler(cookie)); 1053 } 1054 1055 static void 1056 intr_event_execute_handlers(struct proc *p, struct intr_event *ie) 1057 { 1058 struct intr_handler *ih, *ihn, *ihp; 1059 1060 ihp = NULL; 1061 CK_SLIST_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) { 1062 /* 1063 * If this handler is marked for death, remove it from 1064 * the list of handlers and wake up the sleeper. 1065 */ 1066 if (ih->ih_flags & IH_DEAD) { 1067 mtx_lock(&ie->ie_lock); 1068 if (ihp == NULL) 1069 CK_SLIST_REMOVE_HEAD(&ie->ie_handlers, ih_next); 1070 else 1071 CK_SLIST_REMOVE_AFTER(ihp, ih_next); 1072 ih->ih_flags &= ~IH_DEAD; 1073 wakeup(ih); 1074 mtx_unlock(&ie->ie_lock); 1075 continue; 1076 } 1077 1078 /* 1079 * Now that we know that the current element won't be removed 1080 * update the previous element. 1081 */ 1082 ihp = ih; 1083 1084 if ((ih->ih_flags & IH_CHANGED) != 0) { 1085 mtx_lock(&ie->ie_lock); 1086 ih->ih_flags &= ~IH_CHANGED; 1087 wakeup(ih); 1088 mtx_unlock(&ie->ie_lock); 1089 } 1090 1091 /* Skip filter only handlers */ 1092 if (ih->ih_handler == NULL) 1093 continue; 1094 1095 /* Skip suspended handlers */ 1096 if ((ih->ih_flags & IH_SUSP) != 0) 1097 continue; 1098 1099 /* 1100 * For software interrupt threads, we only execute 1101 * handlers that have their need flag set. Hardware 1102 * interrupt threads always invoke all of their handlers. 1103 * 1104 * ih_need can only be 0 or 1. Failed cmpset below 1105 * means that there is no request to execute handlers, 1106 * so a retry of the cmpset is not needed. 1107 */ 1108 if ((ie->ie_flags & IE_SOFT) != 0 && 1109 atomic_cmpset_int(&ih->ih_need, 1, 0) == 0) 1110 continue; 1111 1112 /* Execute this handler. */ 1113 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1114 __func__, p->p_pid, (void *)ih->ih_handler, 1115 ih->ih_argument, ih->ih_name, ih->ih_flags); 1116 1117 if (!(ih->ih_flags & IH_MPSAFE)) 1118 mtx_lock(&Giant); 1119 ih->ih_handler(ih->ih_argument); 1120 if (!(ih->ih_flags & IH_MPSAFE)) 1121 mtx_unlock(&Giant); 1122 } 1123 } 1124 1125 static void 1126 ithread_execute_handlers(struct proc *p, struct intr_event *ie) 1127 { 1128 1129 /* Interrupt handlers should not sleep. */ 1130 if (!(ie->ie_flags & IE_SOFT)) 1131 THREAD_NO_SLEEPING(); 1132 intr_event_execute_handlers(p, ie); 1133 if (!(ie->ie_flags & IE_SOFT)) 1134 THREAD_SLEEPING_OK(); 1135 1136 /* 1137 * Interrupt storm handling: 1138 * 1139 * If this interrupt source is currently storming, then throttle 1140 * it to only fire the handler once per clock tick. 1141 * 1142 * If this interrupt source is not currently storming, but the 1143 * number of back to back interrupts exceeds the storm threshold, 1144 * then enter storming mode. 1145 */ 1146 if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold && 1147 !(ie->ie_flags & IE_SOFT)) { 1148 /* Report the message only once every second. */ 1149 if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) { 1150 printf( 1151 "interrupt storm detected on \"%s\"; throttling interrupt source\n", 1152 ie->ie_name); 1153 } 1154 pause("istorm", 1); 1155 } else 1156 ie->ie_count++; 1157 1158 /* 1159 * Now that all the handlers have had a chance to run, reenable 1160 * the interrupt source. 1161 */ 1162 if (ie->ie_post_ithread != NULL) 1163 ie->ie_post_ithread(ie->ie_source); 1164 } 1165 1166 /* 1167 * This is the main code for interrupt threads. 1168 */ 1169 static void 1170 ithread_loop(void *arg) 1171 { 1172 struct intr_thread *ithd; 1173 struct intr_event *ie; 1174 struct thread *td; 1175 struct proc *p; 1176 int wake; 1177 1178 td = curthread; 1179 p = td->td_proc; 1180 ithd = (struct intr_thread *)arg; 1181 KASSERT(ithd->it_thread == td, 1182 ("%s: ithread and proc linkage out of sync", __func__)); 1183 ie = ithd->it_event; 1184 ie->ie_count = 0; 1185 wake = 0; 1186 1187 /* 1188 * As long as we have interrupts outstanding, go through the 1189 * list of handlers, giving each one a go at it. 1190 */ 1191 for (;;) { 1192 /* 1193 * If we are an orphaned thread, then just die. 1194 */ 1195 if (ithd->it_flags & IT_DEAD) { 1196 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 1197 p->p_pid, td->td_name); 1198 free(ithd, M_ITHREAD); 1199 kthread_exit(); 1200 } 1201 1202 /* 1203 * Service interrupts. If another interrupt arrives while 1204 * we are running, it will set it_need to note that we 1205 * should make another pass. 1206 * 1207 * The load_acq part of the following cmpset ensures 1208 * that the load of ih_need in ithread_execute_handlers() 1209 * is ordered after the load of it_need here. 1210 */ 1211 while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0) 1212 ithread_execute_handlers(p, ie); 1213 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1214 mtx_assert(&Giant, MA_NOTOWNED); 1215 1216 /* 1217 * Processed all our interrupts. Now get the sched 1218 * lock. This may take a while and it_need may get 1219 * set again, so we have to check it again. 1220 */ 1221 thread_lock(td); 1222 if (atomic_load_acq_int(&ithd->it_need) == 0 && 1223 (ithd->it_flags & (IT_DEAD | IT_WAIT)) == 0) { 1224 TD_SET_IWAIT(td); 1225 ie->ie_count = 0; 1226 mi_switch(SW_VOL | SWT_IWAIT, NULL); 1227 } 1228 if (ithd->it_flags & IT_WAIT) { 1229 wake = 1; 1230 ithd->it_flags &= ~IT_WAIT; 1231 } 1232 thread_unlock(td); 1233 if (wake) { 1234 wakeup(ithd); 1235 wake = 0; 1236 } 1237 } 1238 } 1239 1240 /* 1241 * Main interrupt handling body. 1242 * 1243 * Input: 1244 * o ie: the event connected to this interrupt. 1245 * o frame: some archs (i.e. i386) pass a frame to some. 1246 * handlers as their main argument. 1247 * Return value: 1248 * o 0: everything ok. 1249 * o EINVAL: stray interrupt. 1250 */ 1251 int 1252 intr_event_handle(struct intr_event *ie, struct trapframe *frame) 1253 { 1254 struct intr_handler *ih; 1255 struct trapframe *oldframe; 1256 struct thread *td; 1257 int phase; 1258 int ret; 1259 bool filter, thread; 1260 1261 td = curthread; 1262 1263 #ifdef KSTACK_USAGE_PROF 1264 intr_prof_stack_use(td, frame); 1265 #endif 1266 1267 /* An interrupt with no event or handlers is a stray interrupt. */ 1268 if (ie == NULL || CK_SLIST_EMPTY(&ie->ie_handlers)) 1269 return (EINVAL); 1270 1271 /* 1272 * Execute fast interrupt handlers directly. 1273 * To support clock handlers, if a handler registers 1274 * with a NULL argument, then we pass it a pointer to 1275 * a trapframe as its argument. 1276 */ 1277 td->td_intr_nesting_level++; 1278 filter = false; 1279 thread = false; 1280 ret = 0; 1281 critical_enter(); 1282 oldframe = td->td_intr_frame; 1283 td->td_intr_frame = frame; 1284 1285 phase = ie->ie_phase; 1286 atomic_add_int(&ie->ie_active[phase], 1); 1287 1288 /* 1289 * This fence is required to ensure that no later loads are 1290 * re-ordered before the ie_active store. 1291 */ 1292 atomic_thread_fence_seq_cst(); 1293 1294 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) { 1295 if ((ih->ih_flags & IH_SUSP) != 0) 1296 continue; 1297 if (ih->ih_filter == NULL) { 1298 thread = true; 1299 continue; 1300 } 1301 CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__, 1302 ih->ih_filter, ih->ih_argument == NULL ? frame : 1303 ih->ih_argument, ih->ih_name); 1304 if (ih->ih_argument == NULL) 1305 ret = ih->ih_filter(frame); 1306 else 1307 ret = ih->ih_filter(ih->ih_argument); 1308 KASSERT(ret == FILTER_STRAY || 1309 ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 && 1310 (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0), 1311 ("%s: incorrect return value %#x from %s", __func__, ret, 1312 ih->ih_name)); 1313 filter = filter || ret == FILTER_HANDLED; 1314 1315 /* 1316 * Wrapper handler special handling: 1317 * 1318 * in some particular cases (like pccard and pccbb), 1319 * the _real_ device handler is wrapped in a couple of 1320 * functions - a filter wrapper and an ithread wrapper. 1321 * In this case (and just in this case), the filter wrapper 1322 * could ask the system to schedule the ithread and mask 1323 * the interrupt source if the wrapped handler is composed 1324 * of just an ithread handler. 1325 * 1326 * TODO: write a generic wrapper to avoid people rolling 1327 * their own. 1328 */ 1329 if (!thread) { 1330 if (ret == FILTER_SCHEDULE_THREAD) 1331 thread = true; 1332 } 1333 } 1334 atomic_add_rel_int(&ie->ie_active[phase], -1); 1335 1336 td->td_intr_frame = oldframe; 1337 1338 if (thread) { 1339 if (ie->ie_pre_ithread != NULL) 1340 ie->ie_pre_ithread(ie->ie_source); 1341 } else { 1342 if (ie->ie_post_filter != NULL) 1343 ie->ie_post_filter(ie->ie_source); 1344 } 1345 1346 /* Schedule the ithread if needed. */ 1347 if (thread) { 1348 int error __unused; 1349 1350 error = intr_event_schedule_thread(ie); 1351 KASSERT(error == 0, ("bad stray interrupt")); 1352 } 1353 critical_exit(); 1354 td->td_intr_nesting_level--; 1355 #ifdef notyet 1356 /* The interrupt is not aknowledged by any filter and has no ithread. */ 1357 if (!thread && !filter) 1358 return (EINVAL); 1359 #endif 1360 return (0); 1361 } 1362 1363 #ifdef DDB 1364 /* 1365 * Dump details about an interrupt handler 1366 */ 1367 static void 1368 db_dump_intrhand(struct intr_handler *ih) 1369 { 1370 int comma; 1371 1372 db_printf("\t%-10s ", ih->ih_name); 1373 switch (ih->ih_pri) { 1374 case PI_REALTIME: 1375 db_printf("CLK "); 1376 break; 1377 case PI_AV: 1378 db_printf("AV "); 1379 break; 1380 case PI_TTY: 1381 db_printf("TTY "); 1382 break; 1383 case PI_NET: 1384 db_printf("NET "); 1385 break; 1386 case PI_DISK: 1387 db_printf("DISK"); 1388 break; 1389 case PI_DULL: 1390 db_printf("DULL"); 1391 break; 1392 default: 1393 if (ih->ih_pri >= PI_SOFT) 1394 db_printf("SWI "); 1395 else 1396 db_printf("%4u", ih->ih_pri); 1397 break; 1398 } 1399 db_printf(" "); 1400 if (ih->ih_filter != NULL) { 1401 db_printf("[F]"); 1402 db_printsym((uintptr_t)ih->ih_filter, DB_STGY_PROC); 1403 } 1404 if (ih->ih_handler != NULL) { 1405 if (ih->ih_filter != NULL) 1406 db_printf(","); 1407 db_printf("[H]"); 1408 db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC); 1409 } 1410 db_printf("(%p)", ih->ih_argument); 1411 if (ih->ih_need || 1412 (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD | 1413 IH_MPSAFE)) != 0) { 1414 db_printf(" {"); 1415 comma = 0; 1416 if (ih->ih_flags & IH_EXCLUSIVE) { 1417 if (comma) 1418 db_printf(", "); 1419 db_printf("EXCL"); 1420 comma = 1; 1421 } 1422 if (ih->ih_flags & IH_ENTROPY) { 1423 if (comma) 1424 db_printf(", "); 1425 db_printf("ENTROPY"); 1426 comma = 1; 1427 } 1428 if (ih->ih_flags & IH_DEAD) { 1429 if (comma) 1430 db_printf(", "); 1431 db_printf("DEAD"); 1432 comma = 1; 1433 } 1434 if (ih->ih_flags & IH_MPSAFE) { 1435 if (comma) 1436 db_printf(", "); 1437 db_printf("MPSAFE"); 1438 comma = 1; 1439 } 1440 if (ih->ih_need) { 1441 if (comma) 1442 db_printf(", "); 1443 db_printf("NEED"); 1444 } 1445 db_printf("}"); 1446 } 1447 db_printf("\n"); 1448 } 1449 1450 /* 1451 * Dump details about a event. 1452 */ 1453 void 1454 db_dump_intr_event(struct intr_event *ie, int handlers) 1455 { 1456 struct intr_handler *ih; 1457 struct intr_thread *it; 1458 int comma; 1459 1460 db_printf("%s ", ie->ie_fullname); 1461 it = ie->ie_thread; 1462 if (it != NULL) 1463 db_printf("(pid %d)", it->it_thread->td_proc->p_pid); 1464 else 1465 db_printf("(no thread)"); 1466 if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 || 1467 (it != NULL && it->it_need)) { 1468 db_printf(" {"); 1469 comma = 0; 1470 if (ie->ie_flags & IE_SOFT) { 1471 db_printf("SOFT"); 1472 comma = 1; 1473 } 1474 if (ie->ie_flags & IE_ENTROPY) { 1475 if (comma) 1476 db_printf(", "); 1477 db_printf("ENTROPY"); 1478 comma = 1; 1479 } 1480 if (ie->ie_flags & IE_ADDING_THREAD) { 1481 if (comma) 1482 db_printf(", "); 1483 db_printf("ADDING_THREAD"); 1484 comma = 1; 1485 } 1486 if (it != NULL && it->it_need) { 1487 if (comma) 1488 db_printf(", "); 1489 db_printf("NEED"); 1490 } 1491 db_printf("}"); 1492 } 1493 db_printf("\n"); 1494 1495 if (handlers) 1496 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) 1497 db_dump_intrhand(ih); 1498 } 1499 1500 /* 1501 * Dump data about interrupt handlers 1502 */ 1503 DB_SHOW_COMMAND(intr, db_show_intr) 1504 { 1505 struct intr_event *ie; 1506 int all, verbose; 1507 1508 verbose = strchr(modif, 'v') != NULL; 1509 all = strchr(modif, 'a') != NULL; 1510 TAILQ_FOREACH(ie, &event_list, ie_list) { 1511 if (!all && CK_SLIST_EMPTY(&ie->ie_handlers)) 1512 continue; 1513 db_dump_intr_event(ie, verbose); 1514 if (db_pager_quit) 1515 break; 1516 } 1517 } 1518 #endif /* DDB */ 1519 1520 /* 1521 * Start standard software interrupt threads 1522 */ 1523 static void 1524 start_softintr(void *dummy) 1525 { 1526 1527 if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih)) 1528 panic("died while creating vm swi ithread"); 1529 } 1530 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, 1531 NULL); 1532 1533 /* 1534 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 1535 * The data for this machine dependent, and the declarations are in machine 1536 * dependent code. The layout of intrnames and intrcnt however is machine 1537 * independent. 1538 * 1539 * We do not know the length of intrcnt and intrnames at compile time, so 1540 * calculate things at run time. 1541 */ 1542 static int 1543 sysctl_intrnames(SYSCTL_HANDLER_ARGS) 1544 { 1545 return (sysctl_handle_opaque(oidp, intrnames, sintrnames, req)); 1546 } 1547 1548 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, 1549 NULL, 0, sysctl_intrnames, "", "Interrupt Names"); 1550 1551 static int 1552 sysctl_intrcnt(SYSCTL_HANDLER_ARGS) 1553 { 1554 #ifdef SCTL_MASK32 1555 uint32_t *intrcnt32; 1556 unsigned i; 1557 int error; 1558 1559 if (req->flags & SCTL_MASK32) { 1560 if (!req->oldptr) 1561 return (sysctl_handle_opaque(oidp, NULL, sintrcnt / 2, req)); 1562 intrcnt32 = malloc(sintrcnt / 2, M_TEMP, M_NOWAIT); 1563 if (intrcnt32 == NULL) 1564 return (ENOMEM); 1565 for (i = 0; i < sintrcnt / sizeof (u_long); i++) 1566 intrcnt32[i] = intrcnt[i]; 1567 error = sysctl_handle_opaque(oidp, intrcnt32, sintrcnt / 2, req); 1568 free(intrcnt32, M_TEMP); 1569 return (error); 1570 } 1571 #endif 1572 return (sysctl_handle_opaque(oidp, intrcnt, sintrcnt, req)); 1573 } 1574 1575 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, 1576 NULL, 0, sysctl_intrcnt, "", "Interrupt Counts"); 1577 1578 #ifdef DDB 1579 /* 1580 * DDB command to dump the interrupt statistics. 1581 */ 1582 DB_SHOW_COMMAND(intrcnt, db_show_intrcnt) 1583 { 1584 u_long *i; 1585 char *cp; 1586 u_int j; 1587 1588 cp = intrnames; 1589 j = 0; 1590 for (i = intrcnt; j < (sintrcnt / sizeof(u_long)) && !db_pager_quit; 1591 i++, j++) { 1592 if (*cp == '\0') 1593 break; 1594 if (*i != 0) 1595 db_printf("%s\t%lu\n", cp, *i); 1596 cp += strlen(cp) + 1; 1597 } 1598 } 1599 #endif 1600