1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_ddb.h" 33 #include "opt_kstack_usage_prof.h" 34 35 #include <sys/param.h> 36 #include <sys/bus.h> 37 #include <sys/conf.h> 38 #include <sys/cpuset.h> 39 #include <sys/rtprio.h> 40 #include <sys/systm.h> 41 #include <sys/interrupt.h> 42 #include <sys/kernel.h> 43 #include <sys/kthread.h> 44 #include <sys/ktr.h> 45 #include <sys/limits.h> 46 #include <sys/lock.h> 47 #include <sys/malloc.h> 48 #include <sys/mutex.h> 49 #include <sys/priv.h> 50 #include <sys/proc.h> 51 #include <sys/random.h> 52 #include <sys/resourcevar.h> 53 #include <sys/sched.h> 54 #include <sys/smp.h> 55 #include <sys/sysctl.h> 56 #include <sys/syslog.h> 57 #include <sys/unistd.h> 58 #include <sys/vmmeter.h> 59 #include <machine/atomic.h> 60 #include <machine/cpu.h> 61 #include <machine/md_var.h> 62 #include <machine/stdarg.h> 63 #ifdef DDB 64 #include <ddb/ddb.h> 65 #include <ddb/db_sym.h> 66 #endif 67 68 /* 69 * Describe an interrupt thread. There is one of these per interrupt event. 70 */ 71 struct intr_thread { 72 struct intr_event *it_event; 73 struct thread *it_thread; /* Kernel thread. */ 74 int it_flags; /* (j) IT_* flags. */ 75 int it_need; /* Needs service. */ 76 }; 77 78 /* Interrupt thread flags kept in it_flags */ 79 #define IT_DEAD 0x000001 /* Thread is waiting to exit. */ 80 #define IT_WAIT 0x000002 /* Thread is waiting for completion. */ 81 82 struct intr_entropy { 83 struct thread *td; 84 uintptr_t event; 85 }; 86 87 struct intr_event *clk_intr_event; 88 struct intr_event *tty_intr_event; 89 void *vm_ih; 90 struct proc *intrproc; 91 92 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); 93 94 static int intr_storm_threshold = 1000; 95 SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RWTUN, 96 &intr_storm_threshold, 0, 97 "Number of consecutive interrupts before storm protection is enabled"); 98 static TAILQ_HEAD(, intr_event) event_list = 99 TAILQ_HEAD_INITIALIZER(event_list); 100 static struct mtx event_lock; 101 MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF); 102 103 static void intr_event_update(struct intr_event *ie); 104 static int intr_event_schedule_thread(struct intr_event *ie); 105 static struct intr_thread *ithread_create(const char *name); 106 static void ithread_destroy(struct intr_thread *ithread); 107 static void ithread_execute_handlers(struct proc *p, 108 struct intr_event *ie); 109 static void ithread_loop(void *); 110 static void ithread_update(struct intr_thread *ithd); 111 static void start_softintr(void *); 112 113 /* Map an interrupt type to an ithread priority. */ 114 u_char 115 intr_priority(enum intr_type flags) 116 { 117 u_char pri; 118 119 flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET | 120 INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV); 121 switch (flags) { 122 case INTR_TYPE_TTY: 123 pri = PI_TTY; 124 break; 125 case INTR_TYPE_BIO: 126 pri = PI_DISK; 127 break; 128 case INTR_TYPE_NET: 129 pri = PI_NET; 130 break; 131 case INTR_TYPE_CAM: 132 pri = PI_DISK; 133 break; 134 case INTR_TYPE_AV: 135 pri = PI_AV; 136 break; 137 case INTR_TYPE_CLK: 138 pri = PI_REALTIME; 139 break; 140 case INTR_TYPE_MISC: 141 pri = PI_DULL; /* don't care */ 142 break; 143 default: 144 /* We didn't specify an interrupt level. */ 145 panic("intr_priority: no interrupt type in flags"); 146 } 147 148 return pri; 149 } 150 151 /* 152 * Update an ithread based on the associated intr_event. 153 */ 154 static void 155 ithread_update(struct intr_thread *ithd) 156 { 157 struct intr_event *ie; 158 struct thread *td; 159 u_char pri; 160 161 ie = ithd->it_event; 162 td = ithd->it_thread; 163 mtx_assert(&ie->ie_lock, MA_OWNED); 164 165 /* Determine the overall priority of this event. */ 166 if (CK_SLIST_EMPTY(&ie->ie_handlers)) 167 pri = PRI_MAX_ITHD; 168 else 169 pri = CK_SLIST_FIRST(&ie->ie_handlers)->ih_pri; 170 171 /* Update name and priority. */ 172 strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name)); 173 #ifdef KTR 174 sched_clear_tdname(td); 175 #endif 176 thread_lock(td); 177 sched_prio(td, pri); 178 thread_unlock(td); 179 } 180 181 /* 182 * Regenerate the full name of an interrupt event and update its priority. 183 */ 184 static void 185 intr_event_update(struct intr_event *ie) 186 { 187 struct intr_handler *ih; 188 char *last; 189 int missed, space; 190 191 /* Start off with no entropy and just the name of the event. */ 192 mtx_assert(&ie->ie_lock, MA_OWNED); 193 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 194 ie->ie_flags &= ~IE_ENTROPY; 195 missed = 0; 196 space = 1; 197 198 /* Run through all the handlers updating values. */ 199 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) { 200 if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < 201 sizeof(ie->ie_fullname)) { 202 strcat(ie->ie_fullname, " "); 203 strcat(ie->ie_fullname, ih->ih_name); 204 space = 0; 205 } else 206 missed++; 207 if (ih->ih_flags & IH_ENTROPY) 208 ie->ie_flags |= IE_ENTROPY; 209 } 210 211 /* 212 * If there is only one handler and its name is too long, just copy in 213 * as much of the end of the name (includes the unit number) as will 214 * fit. Otherwise, we have multiple handlers and not all of the names 215 * will fit. Add +'s to indicate missing names. If we run out of room 216 * and still have +'s to add, change the last character from a + to a *. 217 */ 218 if (missed == 1 && space == 1) { 219 ih = CK_SLIST_FIRST(&ie->ie_handlers); 220 missed = strlen(ie->ie_fullname) + strlen(ih->ih_name) + 2 - 221 sizeof(ie->ie_fullname); 222 strcat(ie->ie_fullname, (missed == 0) ? " " : "-"); 223 strcat(ie->ie_fullname, &ih->ih_name[missed]); 224 missed = 0; 225 } 226 last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2]; 227 while (missed-- > 0) { 228 if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) { 229 if (*last == '+') { 230 *last = '*'; 231 break; 232 } else 233 *last = '+'; 234 } else if (space) { 235 strcat(ie->ie_fullname, " +"); 236 space = 0; 237 } else 238 strcat(ie->ie_fullname, "+"); 239 } 240 241 /* 242 * If this event has an ithread, update it's priority and 243 * name. 244 */ 245 if (ie->ie_thread != NULL) 246 ithread_update(ie->ie_thread); 247 CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname); 248 } 249 250 int 251 intr_event_create(struct intr_event **event, void *source, int flags, int irq, 252 void (*pre_ithread)(void *), void (*post_ithread)(void *), 253 void (*post_filter)(void *), int (*assign_cpu)(void *, int), 254 const char *fmt, ...) 255 { 256 struct intr_event *ie; 257 va_list ap; 258 259 /* The only valid flag during creation is IE_SOFT. */ 260 if ((flags & ~IE_SOFT) != 0) 261 return (EINVAL); 262 ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 263 ie->ie_source = source; 264 ie->ie_pre_ithread = pre_ithread; 265 ie->ie_post_ithread = post_ithread; 266 ie->ie_post_filter = post_filter; 267 ie->ie_assign_cpu = assign_cpu; 268 ie->ie_flags = flags; 269 ie->ie_irq = irq; 270 ie->ie_cpu = NOCPU; 271 CK_SLIST_INIT(&ie->ie_handlers); 272 mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 273 274 va_start(ap, fmt); 275 vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 276 va_end(ap); 277 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 278 mtx_lock(&event_lock); 279 TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 280 mtx_unlock(&event_lock); 281 if (event != NULL) 282 *event = ie; 283 CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 284 return (0); 285 } 286 287 /* 288 * Bind an interrupt event to the specified CPU. Note that not all 289 * platforms support binding an interrupt to a CPU. For those 290 * platforms this request will fail. Using a cpu id of NOCPU unbinds 291 * the interrupt event. 292 */ 293 static int 294 _intr_event_bind(struct intr_event *ie, int cpu, bool bindirq, bool bindithread) 295 { 296 lwpid_t id; 297 int error; 298 299 /* Need a CPU to bind to. */ 300 if (cpu != NOCPU && CPU_ABSENT(cpu)) 301 return (EINVAL); 302 303 if (ie->ie_assign_cpu == NULL) 304 return (EOPNOTSUPP); 305 306 error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR); 307 if (error) 308 return (error); 309 310 /* 311 * If we have any ithreads try to set their mask first to verify 312 * permissions, etc. 313 */ 314 if (bindithread) { 315 mtx_lock(&ie->ie_lock); 316 if (ie->ie_thread != NULL) { 317 id = ie->ie_thread->it_thread->td_tid; 318 mtx_unlock(&ie->ie_lock); 319 error = cpuset_setithread(id, cpu); 320 if (error) 321 return (error); 322 } else 323 mtx_unlock(&ie->ie_lock); 324 } 325 if (bindirq) 326 error = ie->ie_assign_cpu(ie->ie_source, cpu); 327 if (error) { 328 if (bindithread) { 329 mtx_lock(&ie->ie_lock); 330 if (ie->ie_thread != NULL) { 331 cpu = ie->ie_cpu; 332 id = ie->ie_thread->it_thread->td_tid; 333 mtx_unlock(&ie->ie_lock); 334 (void)cpuset_setithread(id, cpu); 335 } else 336 mtx_unlock(&ie->ie_lock); 337 } 338 return (error); 339 } 340 341 if (bindirq) { 342 mtx_lock(&ie->ie_lock); 343 ie->ie_cpu = cpu; 344 mtx_unlock(&ie->ie_lock); 345 } 346 347 return (error); 348 } 349 350 /* 351 * Bind an interrupt event to the specified CPU. For supported platforms, any 352 * associated ithreads as well as the primary interrupt context will be bound 353 * to the specificed CPU. 354 */ 355 int 356 intr_event_bind(struct intr_event *ie, int cpu) 357 { 358 359 return (_intr_event_bind(ie, cpu, true, true)); 360 } 361 362 /* 363 * Bind an interrupt event to the specified CPU, but do not bind associated 364 * ithreads. 365 */ 366 int 367 intr_event_bind_irqonly(struct intr_event *ie, int cpu) 368 { 369 370 return (_intr_event_bind(ie, cpu, true, false)); 371 } 372 373 /* 374 * Bind an interrupt event's ithread to the specified CPU. 375 */ 376 int 377 intr_event_bind_ithread(struct intr_event *ie, int cpu) 378 { 379 380 return (_intr_event_bind(ie, cpu, false, true)); 381 } 382 383 static struct intr_event * 384 intr_lookup(int irq) 385 { 386 struct intr_event *ie; 387 388 mtx_lock(&event_lock); 389 TAILQ_FOREACH(ie, &event_list, ie_list) 390 if (ie->ie_irq == irq && 391 (ie->ie_flags & IE_SOFT) == 0 && 392 CK_SLIST_FIRST(&ie->ie_handlers) != NULL) 393 break; 394 mtx_unlock(&event_lock); 395 return (ie); 396 } 397 398 int 399 intr_setaffinity(int irq, int mode, void *m) 400 { 401 struct intr_event *ie; 402 cpuset_t *mask; 403 int cpu, n; 404 405 mask = m; 406 cpu = NOCPU; 407 /* 408 * If we're setting all cpus we can unbind. Otherwise make sure 409 * only one cpu is in the set. 410 */ 411 if (CPU_CMP(cpuset_root, mask)) { 412 for (n = 0; n < CPU_SETSIZE; n++) { 413 if (!CPU_ISSET(n, mask)) 414 continue; 415 if (cpu != NOCPU) 416 return (EINVAL); 417 cpu = n; 418 } 419 } 420 ie = intr_lookup(irq); 421 if (ie == NULL) 422 return (ESRCH); 423 switch (mode) { 424 case CPU_WHICH_IRQ: 425 return (intr_event_bind(ie, cpu)); 426 case CPU_WHICH_INTRHANDLER: 427 return (intr_event_bind_irqonly(ie, cpu)); 428 case CPU_WHICH_ITHREAD: 429 return (intr_event_bind_ithread(ie, cpu)); 430 default: 431 return (EINVAL); 432 } 433 } 434 435 int 436 intr_getaffinity(int irq, int mode, void *m) 437 { 438 struct intr_event *ie; 439 struct thread *td; 440 struct proc *p; 441 cpuset_t *mask; 442 lwpid_t id; 443 int error; 444 445 mask = m; 446 ie = intr_lookup(irq); 447 if (ie == NULL) 448 return (ESRCH); 449 450 error = 0; 451 CPU_ZERO(mask); 452 switch (mode) { 453 case CPU_WHICH_IRQ: 454 case CPU_WHICH_INTRHANDLER: 455 mtx_lock(&ie->ie_lock); 456 if (ie->ie_cpu == NOCPU) 457 CPU_COPY(cpuset_root, mask); 458 else 459 CPU_SET(ie->ie_cpu, mask); 460 mtx_unlock(&ie->ie_lock); 461 break; 462 case CPU_WHICH_ITHREAD: 463 mtx_lock(&ie->ie_lock); 464 if (ie->ie_thread == NULL) { 465 mtx_unlock(&ie->ie_lock); 466 CPU_COPY(cpuset_root, mask); 467 } else { 468 id = ie->ie_thread->it_thread->td_tid; 469 mtx_unlock(&ie->ie_lock); 470 error = cpuset_which(CPU_WHICH_TID, id, &p, &td, NULL); 471 if (error != 0) 472 return (error); 473 CPU_COPY(&td->td_cpuset->cs_mask, mask); 474 PROC_UNLOCK(p); 475 } 476 default: 477 return (EINVAL); 478 } 479 return (0); 480 } 481 482 int 483 intr_event_destroy(struct intr_event *ie) 484 { 485 486 mtx_lock(&event_lock); 487 mtx_lock(&ie->ie_lock); 488 if (!CK_SLIST_EMPTY(&ie->ie_handlers)) { 489 mtx_unlock(&ie->ie_lock); 490 mtx_unlock(&event_lock); 491 return (EBUSY); 492 } 493 TAILQ_REMOVE(&event_list, ie, ie_list); 494 #ifndef notyet 495 if (ie->ie_thread != NULL) { 496 ithread_destroy(ie->ie_thread); 497 ie->ie_thread = NULL; 498 } 499 #endif 500 mtx_unlock(&ie->ie_lock); 501 mtx_unlock(&event_lock); 502 mtx_destroy(&ie->ie_lock); 503 free(ie, M_ITHREAD); 504 return (0); 505 } 506 507 static struct intr_thread * 508 ithread_create(const char *name) 509 { 510 struct intr_thread *ithd; 511 struct thread *td; 512 int error; 513 514 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 515 516 error = kproc_kthread_add(ithread_loop, ithd, &intrproc, 517 &td, RFSTOPPED | RFHIGHPID, 518 0, "intr", "%s", name); 519 if (error) 520 panic("kproc_create() failed with %d", error); 521 thread_lock(td); 522 sched_class(td, PRI_ITHD); 523 TD_SET_IWAIT(td); 524 thread_unlock(td); 525 td->td_pflags |= TDP_ITHREAD; 526 ithd->it_thread = td; 527 CTR2(KTR_INTR, "%s: created %s", __func__, name); 528 return (ithd); 529 } 530 531 static void 532 ithread_destroy(struct intr_thread *ithread) 533 { 534 struct thread *td; 535 536 CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name); 537 td = ithread->it_thread; 538 thread_lock(td); 539 ithread->it_flags |= IT_DEAD; 540 if (TD_AWAITING_INTR(td)) { 541 TD_CLR_IWAIT(td); 542 sched_add(td, SRQ_INTR); 543 } 544 thread_unlock(td); 545 } 546 547 int 548 intr_event_add_handler(struct intr_event *ie, const char *name, 549 driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 550 enum intr_type flags, void **cookiep) 551 { 552 struct intr_handler *ih, *temp_ih; 553 struct intr_handler **prevptr; 554 struct intr_thread *it; 555 556 if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 557 return (EINVAL); 558 559 /* Allocate and populate an interrupt handler structure. */ 560 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 561 ih->ih_filter = filter; 562 ih->ih_handler = handler; 563 ih->ih_argument = arg; 564 strlcpy(ih->ih_name, name, sizeof(ih->ih_name)); 565 ih->ih_event = ie; 566 ih->ih_pri = pri; 567 if (flags & INTR_EXCL) 568 ih->ih_flags = IH_EXCLUSIVE; 569 if (flags & INTR_MPSAFE) 570 ih->ih_flags |= IH_MPSAFE; 571 if (flags & INTR_ENTROPY) 572 ih->ih_flags |= IH_ENTROPY; 573 574 /* We can only have one exclusive handler in a event. */ 575 mtx_lock(&ie->ie_lock); 576 if (!CK_SLIST_EMPTY(&ie->ie_handlers)) { 577 if ((flags & INTR_EXCL) || 578 (CK_SLIST_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 579 mtx_unlock(&ie->ie_lock); 580 free(ih, M_ITHREAD); 581 return (EINVAL); 582 } 583 } 584 585 /* Create a thread if we need one. */ 586 while (ie->ie_thread == NULL && handler != NULL) { 587 if (ie->ie_flags & IE_ADDING_THREAD) 588 msleep(ie, &ie->ie_lock, 0, "ithread", 0); 589 else { 590 ie->ie_flags |= IE_ADDING_THREAD; 591 mtx_unlock(&ie->ie_lock); 592 it = ithread_create("intr: newborn"); 593 mtx_lock(&ie->ie_lock); 594 ie->ie_flags &= ~IE_ADDING_THREAD; 595 ie->ie_thread = it; 596 it->it_event = ie; 597 ithread_update(it); 598 wakeup(ie); 599 } 600 } 601 602 /* Add the new handler to the event in priority order. */ 603 CK_SLIST_FOREACH_PREVPTR(temp_ih, prevptr, &ie->ie_handlers, ih_next) { 604 if (temp_ih->ih_pri > ih->ih_pri) 605 break; 606 } 607 CK_SLIST_INSERT_PREVPTR(prevptr, temp_ih, ih, ih_next); 608 609 intr_event_update(ie); 610 611 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 612 ie->ie_name); 613 mtx_unlock(&ie->ie_lock); 614 615 if (cookiep != NULL) 616 *cookiep = ih; 617 return (0); 618 } 619 620 /* 621 * Append a description preceded by a ':' to the name of the specified 622 * interrupt handler. 623 */ 624 int 625 intr_event_describe_handler(struct intr_event *ie, void *cookie, 626 const char *descr) 627 { 628 struct intr_handler *ih; 629 size_t space; 630 char *start; 631 632 mtx_lock(&ie->ie_lock); 633 #ifdef INVARIANTS 634 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) { 635 if (ih == cookie) 636 break; 637 } 638 if (ih == NULL) { 639 mtx_unlock(&ie->ie_lock); 640 panic("handler %p not found in interrupt event %p", cookie, ie); 641 } 642 #endif 643 ih = cookie; 644 645 /* 646 * Look for an existing description by checking for an 647 * existing ":". This assumes device names do not include 648 * colons. If one is found, prepare to insert the new 649 * description at that point. If one is not found, find the 650 * end of the name to use as the insertion point. 651 */ 652 start = strchr(ih->ih_name, ':'); 653 if (start == NULL) 654 start = strchr(ih->ih_name, 0); 655 656 /* 657 * See if there is enough remaining room in the string for the 658 * description + ":". The "- 1" leaves room for the trailing 659 * '\0'. The "+ 1" accounts for the colon. 660 */ 661 space = sizeof(ih->ih_name) - (start - ih->ih_name) - 1; 662 if (strlen(descr) + 1 > space) { 663 mtx_unlock(&ie->ie_lock); 664 return (ENOSPC); 665 } 666 667 /* Append a colon followed by the description. */ 668 *start = ':'; 669 strcpy(start + 1, descr); 670 intr_event_update(ie); 671 mtx_unlock(&ie->ie_lock); 672 return (0); 673 } 674 675 /* 676 * Return the ie_source field from the intr_event an intr_handler is 677 * associated with. 678 */ 679 void * 680 intr_handler_source(void *cookie) 681 { 682 struct intr_handler *ih; 683 struct intr_event *ie; 684 685 ih = (struct intr_handler *)cookie; 686 if (ih == NULL) 687 return (NULL); 688 ie = ih->ih_event; 689 KASSERT(ie != NULL, 690 ("interrupt handler \"%s\" has a NULL interrupt event", 691 ih->ih_name)); 692 return (ie->ie_source); 693 } 694 695 /* 696 * If intr_event_handle() is running in the ISR context at the time of the call, 697 * then wait for it to complete. 698 */ 699 static void 700 intr_event_barrier(struct intr_event *ie) 701 { 702 int phase; 703 704 mtx_assert(&ie->ie_lock, MA_OWNED); 705 phase = ie->ie_phase; 706 707 /* 708 * Switch phase to direct future interrupts to the other active counter. 709 * Make sure that any preceding stores are visible before the switch. 710 */ 711 KASSERT(ie->ie_active[!phase] == 0, ("idle phase has activity")); 712 atomic_store_rel_int(&ie->ie_phase, !phase); 713 714 /* 715 * This code cooperates with wait-free iteration of ie_handlers 716 * in intr_event_handle. 717 * Make sure that the removal and the phase update are not reordered 718 * with the active count check. 719 * Note that no combination of acquire and release fences can provide 720 * that guarantee as Store->Load sequences can always be reordered. 721 */ 722 atomic_thread_fence_seq_cst(); 723 724 /* 725 * Now wait on the inactive phase. 726 * The acquire fence is needed so that that all post-barrier accesses 727 * are after the check. 728 */ 729 while (ie->ie_active[phase] > 0) 730 cpu_spinwait(); 731 atomic_thread_fence_acq(); 732 } 733 734 static void 735 intr_handler_barrier(struct intr_handler *handler) 736 { 737 struct intr_event *ie; 738 739 ie = handler->ih_event; 740 mtx_assert(&ie->ie_lock, MA_OWNED); 741 KASSERT((handler->ih_flags & IH_DEAD) == 0, 742 ("update for a removed handler")); 743 744 if (ie->ie_thread == NULL) { 745 intr_event_barrier(ie); 746 return; 747 } 748 if ((handler->ih_flags & IH_CHANGED) == 0) { 749 handler->ih_flags |= IH_CHANGED; 750 intr_event_schedule_thread(ie); 751 } 752 while ((handler->ih_flags & IH_CHANGED) != 0) 753 msleep(handler, &ie->ie_lock, 0, "ih_barr", 0); 754 } 755 756 /* 757 * Sleep until an ithread finishes executing an interrupt handler. 758 * 759 * XXX Doesn't currently handle interrupt filters or fast interrupt 760 * handlers. This is intended for compatibility with linux drivers 761 * only. Do not use in BSD code. 762 */ 763 void 764 _intr_drain(int irq) 765 { 766 struct intr_event *ie; 767 struct intr_thread *ithd; 768 struct thread *td; 769 770 ie = intr_lookup(irq); 771 if (ie == NULL) 772 return; 773 if (ie->ie_thread == NULL) 774 return; 775 ithd = ie->ie_thread; 776 td = ithd->it_thread; 777 /* 778 * We set the flag and wait for it to be cleared to avoid 779 * long delays with potentially busy interrupt handlers 780 * were we to only sample TD_AWAITING_INTR() every tick. 781 */ 782 thread_lock(td); 783 if (!TD_AWAITING_INTR(td)) { 784 ithd->it_flags |= IT_WAIT; 785 while (ithd->it_flags & IT_WAIT) { 786 thread_unlock(td); 787 pause("idrain", 1); 788 thread_lock(td); 789 } 790 } 791 thread_unlock(td); 792 return; 793 } 794 795 int 796 intr_event_remove_handler(void *cookie) 797 { 798 struct intr_handler *handler = (struct intr_handler *)cookie; 799 struct intr_event *ie; 800 struct intr_handler *ih; 801 struct intr_handler **prevptr; 802 #ifdef notyet 803 int dead; 804 #endif 805 806 if (handler == NULL) 807 return (EINVAL); 808 ie = handler->ih_event; 809 KASSERT(ie != NULL, 810 ("interrupt handler \"%s\" has a NULL interrupt event", 811 handler->ih_name)); 812 813 mtx_lock(&ie->ie_lock); 814 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 815 ie->ie_name); 816 CK_SLIST_FOREACH_PREVPTR(ih, prevptr, &ie->ie_handlers, ih_next) { 817 if (ih == handler) 818 break; 819 } 820 if (ih == NULL) { 821 panic("interrupt handler \"%s\" not found in " 822 "interrupt event \"%s\"", handler->ih_name, ie->ie_name); 823 } 824 825 /* 826 * If there is no ithread, then directly remove the handler. Note that 827 * intr_event_handle() iterates ie_handlers in a lock-less fashion, so 828 * care needs to be taken to keep ie_handlers consistent and to free 829 * the removed handler only when ie_handlers is quiescent. 830 */ 831 if (ie->ie_thread == NULL) { 832 CK_SLIST_REMOVE_PREVPTR(prevptr, ih, ih_next); 833 intr_event_barrier(ie); 834 intr_event_update(ie); 835 mtx_unlock(&ie->ie_lock); 836 free(handler, M_ITHREAD); 837 return (0); 838 } 839 840 /* 841 * Let the interrupt thread do the job. 842 * The interrupt source is disabled when the interrupt thread is 843 * running, so it does not have to worry about interaction with 844 * intr_event_handle(). 845 */ 846 KASSERT((handler->ih_flags & IH_DEAD) == 0, 847 ("duplicate handle remove")); 848 handler->ih_flags |= IH_DEAD; 849 intr_event_schedule_thread(ie); 850 while (handler->ih_flags & IH_DEAD) 851 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 852 intr_event_update(ie); 853 854 #ifdef notyet 855 /* 856 * XXX: This could be bad in the case of ppbus(8). Also, I think 857 * this could lead to races of stale data when servicing an 858 * interrupt. 859 */ 860 dead = 1; 861 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) { 862 if (ih->ih_handler != NULL) { 863 dead = 0; 864 break; 865 } 866 } 867 if (dead) { 868 ithread_destroy(ie->ie_thread); 869 ie->ie_thread = NULL; 870 } 871 #endif 872 mtx_unlock(&ie->ie_lock); 873 free(handler, M_ITHREAD); 874 return (0); 875 } 876 877 int 878 intr_event_suspend_handler(void *cookie) 879 { 880 struct intr_handler *handler = (struct intr_handler *)cookie; 881 struct intr_event *ie; 882 883 if (handler == NULL) 884 return (EINVAL); 885 ie = handler->ih_event; 886 KASSERT(ie != NULL, 887 ("interrupt handler \"%s\" has a NULL interrupt event", 888 handler->ih_name)); 889 mtx_lock(&ie->ie_lock); 890 handler->ih_flags |= IH_SUSP; 891 intr_handler_barrier(handler); 892 mtx_unlock(&ie->ie_lock); 893 return (0); 894 } 895 896 int 897 intr_event_resume_handler(void *cookie) 898 { 899 struct intr_handler *handler = (struct intr_handler *)cookie; 900 struct intr_event *ie; 901 902 if (handler == NULL) 903 return (EINVAL); 904 ie = handler->ih_event; 905 KASSERT(ie != NULL, 906 ("interrupt handler \"%s\" has a NULL interrupt event", 907 handler->ih_name)); 908 909 /* 910 * intr_handler_barrier() acts not only as a barrier, 911 * it also allows to check for any pending interrupts. 912 */ 913 mtx_lock(&ie->ie_lock); 914 handler->ih_flags &= ~IH_SUSP; 915 intr_handler_barrier(handler); 916 mtx_unlock(&ie->ie_lock); 917 return (0); 918 } 919 920 static int 921 intr_event_schedule_thread(struct intr_event *ie) 922 { 923 struct intr_entropy entropy; 924 struct intr_thread *it; 925 struct thread *td; 926 struct thread *ctd; 927 928 /* 929 * If no ithread or no handlers, then we have a stray interrupt. 930 */ 931 if (ie == NULL || CK_SLIST_EMPTY(&ie->ie_handlers) || 932 ie->ie_thread == NULL) 933 return (EINVAL); 934 935 ctd = curthread; 936 it = ie->ie_thread; 937 td = it->it_thread; 938 939 /* 940 * If any of the handlers for this ithread claim to be good 941 * sources of entropy, then gather some. 942 */ 943 if (ie->ie_flags & IE_ENTROPY) { 944 entropy.event = (uintptr_t)ie; 945 entropy.td = ctd; 946 random_harvest_queue(&entropy, sizeof(entropy), RANDOM_INTERRUPT); 947 } 948 949 KASSERT(td->td_proc != NULL, ("ithread %s has no process", ie->ie_name)); 950 951 /* 952 * Set it_need to tell the thread to keep running if it is already 953 * running. Then, lock the thread and see if we actually need to 954 * put it on the runqueue. 955 * 956 * Use store_rel to arrange that the store to ih_need in 957 * swi_sched() is before the store to it_need and prepare for 958 * transfer of this order to loads in the ithread. 959 */ 960 atomic_store_rel_int(&it->it_need, 1); 961 thread_lock(td); 962 if (TD_AWAITING_INTR(td)) { 963 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, td->td_proc->p_pid, 964 td->td_name); 965 TD_CLR_IWAIT(td); 966 sched_add(td, SRQ_INTR); 967 } else { 968 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 969 __func__, td->td_proc->p_pid, td->td_name, it->it_need, td->td_state); 970 } 971 thread_unlock(td); 972 973 return (0); 974 } 975 976 /* 977 * Allow interrupt event binding for software interrupt handlers -- a no-op, 978 * since interrupts are generated in software rather than being directed by 979 * a PIC. 980 */ 981 static int 982 swi_assign_cpu(void *arg, int cpu) 983 { 984 985 return (0); 986 } 987 988 /* 989 * Add a software interrupt handler to a specified event. If a given event 990 * is not specified, then a new event is created. 991 */ 992 int 993 swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, 994 void *arg, int pri, enum intr_type flags, void **cookiep) 995 { 996 struct intr_event *ie; 997 int error; 998 999 if (flags & INTR_ENTROPY) 1000 return (EINVAL); 1001 1002 ie = (eventp != NULL) ? *eventp : NULL; 1003 1004 if (ie != NULL) { 1005 if (!(ie->ie_flags & IE_SOFT)) 1006 return (EINVAL); 1007 } else { 1008 error = intr_event_create(&ie, NULL, IE_SOFT, 0, 1009 NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri); 1010 if (error) 1011 return (error); 1012 if (eventp != NULL) 1013 *eventp = ie; 1014 } 1015 error = intr_event_add_handler(ie, name, NULL, handler, arg, 1016 PI_SWI(pri), flags, cookiep); 1017 return (error); 1018 } 1019 1020 /* 1021 * Schedule a software interrupt thread. 1022 */ 1023 void 1024 swi_sched(void *cookie, int flags) 1025 { 1026 struct intr_handler *ih = (struct intr_handler *)cookie; 1027 struct intr_event *ie = ih->ih_event; 1028 struct intr_entropy entropy; 1029 int error __unused; 1030 1031 CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, 1032 ih->ih_need); 1033 1034 entropy.event = (uintptr_t)ih; 1035 entropy.td = curthread; 1036 random_harvest_queue(&entropy, sizeof(entropy), RANDOM_SWI); 1037 1038 /* 1039 * Set ih_need for this handler so that if the ithread is already 1040 * running it will execute this handler on the next pass. Otherwise, 1041 * it will execute it the next time it runs. 1042 */ 1043 ih->ih_need = 1; 1044 1045 if (!(flags & SWI_DELAY)) { 1046 VM_CNT_INC(v_soft); 1047 error = intr_event_schedule_thread(ie); 1048 KASSERT(error == 0, ("stray software interrupt")); 1049 } 1050 } 1051 1052 /* 1053 * Remove a software interrupt handler. Currently this code does not 1054 * remove the associated interrupt event if it becomes empty. Calling code 1055 * may do so manually via intr_event_destroy(), but that's not really 1056 * an optimal interface. 1057 */ 1058 int 1059 swi_remove(void *cookie) 1060 { 1061 1062 return (intr_event_remove_handler(cookie)); 1063 } 1064 1065 static void 1066 intr_event_execute_handlers(struct proc *p, struct intr_event *ie) 1067 { 1068 struct intr_handler *ih, *ihn, *ihp; 1069 1070 ihp = NULL; 1071 CK_SLIST_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) { 1072 /* 1073 * If this handler is marked for death, remove it from 1074 * the list of handlers and wake up the sleeper. 1075 */ 1076 if (ih->ih_flags & IH_DEAD) { 1077 mtx_lock(&ie->ie_lock); 1078 if (ihp == NULL) 1079 CK_SLIST_REMOVE_HEAD(&ie->ie_handlers, ih_next); 1080 else 1081 CK_SLIST_REMOVE_AFTER(ihp, ih_next); 1082 ih->ih_flags &= ~IH_DEAD; 1083 wakeup(ih); 1084 mtx_unlock(&ie->ie_lock); 1085 continue; 1086 } 1087 1088 /* 1089 * Now that we know that the current element won't be removed 1090 * update the previous element. 1091 */ 1092 ihp = ih; 1093 1094 if ((ih->ih_flags & IH_CHANGED) != 0) { 1095 mtx_lock(&ie->ie_lock); 1096 ih->ih_flags &= ~IH_CHANGED; 1097 wakeup(ih); 1098 mtx_unlock(&ie->ie_lock); 1099 } 1100 1101 /* Skip filter only handlers */ 1102 if (ih->ih_handler == NULL) 1103 continue; 1104 1105 /* Skip suspended handlers */ 1106 if ((ih->ih_flags & IH_SUSP) != 0) 1107 continue; 1108 1109 /* 1110 * For software interrupt threads, we only execute 1111 * handlers that have their need flag set. Hardware 1112 * interrupt threads always invoke all of their handlers. 1113 * 1114 * ih_need can only be 0 or 1. Failed cmpset below 1115 * means that there is no request to execute handlers, 1116 * so a retry of the cmpset is not needed. 1117 */ 1118 if ((ie->ie_flags & IE_SOFT) != 0 && 1119 atomic_cmpset_int(&ih->ih_need, 1, 0) == 0) 1120 continue; 1121 1122 /* Execute this handler. */ 1123 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1124 __func__, p->p_pid, (void *)ih->ih_handler, 1125 ih->ih_argument, ih->ih_name, ih->ih_flags); 1126 1127 if (!(ih->ih_flags & IH_MPSAFE)) 1128 mtx_lock(&Giant); 1129 ih->ih_handler(ih->ih_argument); 1130 if (!(ih->ih_flags & IH_MPSAFE)) 1131 mtx_unlock(&Giant); 1132 } 1133 } 1134 1135 static void 1136 ithread_execute_handlers(struct proc *p, struct intr_event *ie) 1137 { 1138 1139 /* Interrupt handlers should not sleep. */ 1140 if (!(ie->ie_flags & IE_SOFT)) 1141 THREAD_NO_SLEEPING(); 1142 intr_event_execute_handlers(p, ie); 1143 if (!(ie->ie_flags & IE_SOFT)) 1144 THREAD_SLEEPING_OK(); 1145 1146 /* 1147 * Interrupt storm handling: 1148 * 1149 * If this interrupt source is currently storming, then throttle 1150 * it to only fire the handler once per clock tick. 1151 * 1152 * If this interrupt source is not currently storming, but the 1153 * number of back to back interrupts exceeds the storm threshold, 1154 * then enter storming mode. 1155 */ 1156 if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold && 1157 !(ie->ie_flags & IE_SOFT)) { 1158 /* Report the message only once every second. */ 1159 if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) { 1160 printf( 1161 "interrupt storm detected on \"%s\"; throttling interrupt source\n", 1162 ie->ie_name); 1163 } 1164 pause("istorm", 1); 1165 } else 1166 ie->ie_count++; 1167 1168 /* 1169 * Now that all the handlers have had a chance to run, reenable 1170 * the interrupt source. 1171 */ 1172 if (ie->ie_post_ithread != NULL) 1173 ie->ie_post_ithread(ie->ie_source); 1174 } 1175 1176 /* 1177 * This is the main code for interrupt threads. 1178 */ 1179 static void 1180 ithread_loop(void *arg) 1181 { 1182 struct intr_thread *ithd; 1183 struct intr_event *ie; 1184 struct thread *td; 1185 struct proc *p; 1186 int wake; 1187 1188 td = curthread; 1189 p = td->td_proc; 1190 ithd = (struct intr_thread *)arg; 1191 KASSERT(ithd->it_thread == td, 1192 ("%s: ithread and proc linkage out of sync", __func__)); 1193 ie = ithd->it_event; 1194 ie->ie_count = 0; 1195 wake = 0; 1196 1197 /* 1198 * As long as we have interrupts outstanding, go through the 1199 * list of handlers, giving each one a go at it. 1200 */ 1201 for (;;) { 1202 /* 1203 * If we are an orphaned thread, then just die. 1204 */ 1205 if (ithd->it_flags & IT_DEAD) { 1206 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 1207 p->p_pid, td->td_name); 1208 free(ithd, M_ITHREAD); 1209 kthread_exit(); 1210 } 1211 1212 /* 1213 * Service interrupts. If another interrupt arrives while 1214 * we are running, it will set it_need to note that we 1215 * should make another pass. 1216 * 1217 * The load_acq part of the following cmpset ensures 1218 * that the load of ih_need in ithread_execute_handlers() 1219 * is ordered after the load of it_need here. 1220 */ 1221 while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0) 1222 ithread_execute_handlers(p, ie); 1223 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1224 mtx_assert(&Giant, MA_NOTOWNED); 1225 1226 /* 1227 * Processed all our interrupts. Now get the sched 1228 * lock. This may take a while and it_need may get 1229 * set again, so we have to check it again. 1230 */ 1231 thread_lock(td); 1232 if (atomic_load_acq_int(&ithd->it_need) == 0 && 1233 (ithd->it_flags & (IT_DEAD | IT_WAIT)) == 0) { 1234 TD_SET_IWAIT(td); 1235 ie->ie_count = 0; 1236 mi_switch(SW_VOL | SWT_IWAIT, NULL); 1237 } 1238 if (ithd->it_flags & IT_WAIT) { 1239 wake = 1; 1240 ithd->it_flags &= ~IT_WAIT; 1241 } 1242 thread_unlock(td); 1243 if (wake) { 1244 wakeup(ithd); 1245 wake = 0; 1246 } 1247 } 1248 } 1249 1250 /* 1251 * Main interrupt handling body. 1252 * 1253 * Input: 1254 * o ie: the event connected to this interrupt. 1255 * o frame: some archs (i.e. i386) pass a frame to some. 1256 * handlers as their main argument. 1257 * Return value: 1258 * o 0: everything ok. 1259 * o EINVAL: stray interrupt. 1260 */ 1261 int 1262 intr_event_handle(struct intr_event *ie, struct trapframe *frame) 1263 { 1264 struct intr_handler *ih; 1265 struct trapframe *oldframe; 1266 struct thread *td; 1267 int phase; 1268 int ret; 1269 bool filter, thread; 1270 1271 td = curthread; 1272 1273 #ifdef KSTACK_USAGE_PROF 1274 intr_prof_stack_use(td, frame); 1275 #endif 1276 1277 /* An interrupt with no event or handlers is a stray interrupt. */ 1278 if (ie == NULL || CK_SLIST_EMPTY(&ie->ie_handlers)) 1279 return (EINVAL); 1280 1281 /* 1282 * Execute fast interrupt handlers directly. 1283 * To support clock handlers, if a handler registers 1284 * with a NULL argument, then we pass it a pointer to 1285 * a trapframe as its argument. 1286 */ 1287 td->td_intr_nesting_level++; 1288 filter = false; 1289 thread = false; 1290 ret = 0; 1291 critical_enter(); 1292 oldframe = td->td_intr_frame; 1293 td->td_intr_frame = frame; 1294 1295 phase = ie->ie_phase; 1296 atomic_add_int(&ie->ie_active[phase], 1); 1297 1298 /* 1299 * This fence is required to ensure that no later loads are 1300 * re-ordered before the ie_active store. 1301 */ 1302 atomic_thread_fence_seq_cst(); 1303 1304 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) { 1305 if ((ih->ih_flags & IH_SUSP) != 0) 1306 continue; 1307 if (ih->ih_filter == NULL) { 1308 thread = true; 1309 continue; 1310 } 1311 CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__, 1312 ih->ih_filter, ih->ih_argument == NULL ? frame : 1313 ih->ih_argument, ih->ih_name); 1314 if (ih->ih_argument == NULL) 1315 ret = ih->ih_filter(frame); 1316 else 1317 ret = ih->ih_filter(ih->ih_argument); 1318 KASSERT(ret == FILTER_STRAY || 1319 ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 && 1320 (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0), 1321 ("%s: incorrect return value %#x from %s", __func__, ret, 1322 ih->ih_name)); 1323 filter = filter || ret == FILTER_HANDLED; 1324 1325 /* 1326 * Wrapper handler special handling: 1327 * 1328 * in some particular cases (like pccard and pccbb), 1329 * the _real_ device handler is wrapped in a couple of 1330 * functions - a filter wrapper and an ithread wrapper. 1331 * In this case (and just in this case), the filter wrapper 1332 * could ask the system to schedule the ithread and mask 1333 * the interrupt source if the wrapped handler is composed 1334 * of just an ithread handler. 1335 * 1336 * TODO: write a generic wrapper to avoid people rolling 1337 * their own. 1338 */ 1339 if (!thread) { 1340 if (ret == FILTER_SCHEDULE_THREAD) 1341 thread = true; 1342 } 1343 } 1344 atomic_add_rel_int(&ie->ie_active[phase], -1); 1345 1346 td->td_intr_frame = oldframe; 1347 1348 if (thread) { 1349 if (ie->ie_pre_ithread != NULL) 1350 ie->ie_pre_ithread(ie->ie_source); 1351 } else { 1352 if (ie->ie_post_filter != NULL) 1353 ie->ie_post_filter(ie->ie_source); 1354 } 1355 1356 /* Schedule the ithread if needed. */ 1357 if (thread) { 1358 int error __unused; 1359 1360 error = intr_event_schedule_thread(ie); 1361 KASSERT(error == 0, ("bad stray interrupt")); 1362 } 1363 critical_exit(); 1364 td->td_intr_nesting_level--; 1365 #ifdef notyet 1366 /* The interrupt is not aknowledged by any filter and has no ithread. */ 1367 if (!thread && !filter) 1368 return (EINVAL); 1369 #endif 1370 return (0); 1371 } 1372 1373 #ifdef DDB 1374 /* 1375 * Dump details about an interrupt handler 1376 */ 1377 static void 1378 db_dump_intrhand(struct intr_handler *ih) 1379 { 1380 int comma; 1381 1382 db_printf("\t%-10s ", ih->ih_name); 1383 switch (ih->ih_pri) { 1384 case PI_REALTIME: 1385 db_printf("CLK "); 1386 break; 1387 case PI_AV: 1388 db_printf("AV "); 1389 break; 1390 case PI_TTY: 1391 db_printf("TTY "); 1392 break; 1393 case PI_NET: 1394 db_printf("NET "); 1395 break; 1396 case PI_DISK: 1397 db_printf("DISK"); 1398 break; 1399 case PI_DULL: 1400 db_printf("DULL"); 1401 break; 1402 default: 1403 if (ih->ih_pri >= PI_SOFT) 1404 db_printf("SWI "); 1405 else 1406 db_printf("%4u", ih->ih_pri); 1407 break; 1408 } 1409 db_printf(" "); 1410 if (ih->ih_filter != NULL) { 1411 db_printf("[F]"); 1412 db_printsym((uintptr_t)ih->ih_filter, DB_STGY_PROC); 1413 } 1414 if (ih->ih_handler != NULL) { 1415 if (ih->ih_filter != NULL) 1416 db_printf(","); 1417 db_printf("[H]"); 1418 db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC); 1419 } 1420 db_printf("(%p)", ih->ih_argument); 1421 if (ih->ih_need || 1422 (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD | 1423 IH_MPSAFE)) != 0) { 1424 db_printf(" {"); 1425 comma = 0; 1426 if (ih->ih_flags & IH_EXCLUSIVE) { 1427 if (comma) 1428 db_printf(", "); 1429 db_printf("EXCL"); 1430 comma = 1; 1431 } 1432 if (ih->ih_flags & IH_ENTROPY) { 1433 if (comma) 1434 db_printf(", "); 1435 db_printf("ENTROPY"); 1436 comma = 1; 1437 } 1438 if (ih->ih_flags & IH_DEAD) { 1439 if (comma) 1440 db_printf(", "); 1441 db_printf("DEAD"); 1442 comma = 1; 1443 } 1444 if (ih->ih_flags & IH_MPSAFE) { 1445 if (comma) 1446 db_printf(", "); 1447 db_printf("MPSAFE"); 1448 comma = 1; 1449 } 1450 if (ih->ih_need) { 1451 if (comma) 1452 db_printf(", "); 1453 db_printf("NEED"); 1454 } 1455 db_printf("}"); 1456 } 1457 db_printf("\n"); 1458 } 1459 1460 /* 1461 * Dump details about a event. 1462 */ 1463 void 1464 db_dump_intr_event(struct intr_event *ie, int handlers) 1465 { 1466 struct intr_handler *ih; 1467 struct intr_thread *it; 1468 int comma; 1469 1470 db_printf("%s ", ie->ie_fullname); 1471 it = ie->ie_thread; 1472 if (it != NULL) 1473 db_printf("(pid %d)", it->it_thread->td_proc->p_pid); 1474 else 1475 db_printf("(no thread)"); 1476 if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 || 1477 (it != NULL && it->it_need)) { 1478 db_printf(" {"); 1479 comma = 0; 1480 if (ie->ie_flags & IE_SOFT) { 1481 db_printf("SOFT"); 1482 comma = 1; 1483 } 1484 if (ie->ie_flags & IE_ENTROPY) { 1485 if (comma) 1486 db_printf(", "); 1487 db_printf("ENTROPY"); 1488 comma = 1; 1489 } 1490 if (ie->ie_flags & IE_ADDING_THREAD) { 1491 if (comma) 1492 db_printf(", "); 1493 db_printf("ADDING_THREAD"); 1494 comma = 1; 1495 } 1496 if (it != NULL && it->it_need) { 1497 if (comma) 1498 db_printf(", "); 1499 db_printf("NEED"); 1500 } 1501 db_printf("}"); 1502 } 1503 db_printf("\n"); 1504 1505 if (handlers) 1506 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) 1507 db_dump_intrhand(ih); 1508 } 1509 1510 /* 1511 * Dump data about interrupt handlers 1512 */ 1513 DB_SHOW_COMMAND(intr, db_show_intr) 1514 { 1515 struct intr_event *ie; 1516 int all, verbose; 1517 1518 verbose = strchr(modif, 'v') != NULL; 1519 all = strchr(modif, 'a') != NULL; 1520 TAILQ_FOREACH(ie, &event_list, ie_list) { 1521 if (!all && CK_SLIST_EMPTY(&ie->ie_handlers)) 1522 continue; 1523 db_dump_intr_event(ie, verbose); 1524 if (db_pager_quit) 1525 break; 1526 } 1527 } 1528 #endif /* DDB */ 1529 1530 /* 1531 * Start standard software interrupt threads 1532 */ 1533 static void 1534 start_softintr(void *dummy) 1535 { 1536 1537 if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih)) 1538 panic("died while creating vm swi ithread"); 1539 } 1540 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, 1541 NULL); 1542 1543 /* 1544 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 1545 * The data for this machine dependent, and the declarations are in machine 1546 * dependent code. The layout of intrnames and intrcnt however is machine 1547 * independent. 1548 * 1549 * We do not know the length of intrcnt and intrnames at compile time, so 1550 * calculate things at run time. 1551 */ 1552 static int 1553 sysctl_intrnames(SYSCTL_HANDLER_ARGS) 1554 { 1555 return (sysctl_handle_opaque(oidp, intrnames, sintrnames, req)); 1556 } 1557 1558 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, 1559 NULL, 0, sysctl_intrnames, "", "Interrupt Names"); 1560 1561 static int 1562 sysctl_intrcnt(SYSCTL_HANDLER_ARGS) 1563 { 1564 #ifdef SCTL_MASK32 1565 uint32_t *intrcnt32; 1566 unsigned i; 1567 int error; 1568 1569 if (req->flags & SCTL_MASK32) { 1570 if (!req->oldptr) 1571 return (sysctl_handle_opaque(oidp, NULL, sintrcnt / 2, req)); 1572 intrcnt32 = malloc(sintrcnt / 2, M_TEMP, M_NOWAIT); 1573 if (intrcnt32 == NULL) 1574 return (ENOMEM); 1575 for (i = 0; i < sintrcnt / sizeof (u_long); i++) 1576 intrcnt32[i] = intrcnt[i]; 1577 error = sysctl_handle_opaque(oidp, intrcnt32, sintrcnt / 2, req); 1578 free(intrcnt32, M_TEMP); 1579 return (error); 1580 } 1581 #endif 1582 return (sysctl_handle_opaque(oidp, intrcnt, sintrcnt, req)); 1583 } 1584 1585 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, 1586 NULL, 0, sysctl_intrcnt, "", "Interrupt Counts"); 1587 1588 #ifdef DDB 1589 /* 1590 * DDB command to dump the interrupt statistics. 1591 */ 1592 DB_SHOW_COMMAND(intrcnt, db_show_intrcnt) 1593 { 1594 u_long *i; 1595 char *cp; 1596 u_int j; 1597 1598 cp = intrnames; 1599 j = 0; 1600 for (i = intrcnt; j < (sintrcnt / sizeof(u_long)) && !db_pager_quit; 1601 i++, j++) { 1602 if (*cp == '\0') 1603 break; 1604 if (*i != 0) 1605 db_printf("%s\t%lu\n", cp, *i); 1606 cp += strlen(cp) + 1; 1607 } 1608 } 1609 #endif 1610