1 /*- 2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_ddb.h" 31 32 #include <sys/param.h> 33 #include <sys/bus.h> 34 #include <sys/conf.h> 35 #include <sys/rtprio.h> 36 #include <sys/systm.h> 37 #include <sys/interrupt.h> 38 #include <sys/kernel.h> 39 #include <sys/kthread.h> 40 #include <sys/ktr.h> 41 #include <sys/limits.h> 42 #include <sys/lock.h> 43 #include <sys/malloc.h> 44 #include <sys/mutex.h> 45 #include <sys/proc.h> 46 #include <sys/random.h> 47 #include <sys/resourcevar.h> 48 #include <sys/sched.h> 49 #include <sys/sysctl.h> 50 #include <sys/unistd.h> 51 #include <sys/vmmeter.h> 52 #include <machine/atomic.h> 53 #include <machine/cpu.h> 54 #include <machine/md_var.h> 55 #include <machine/stdarg.h> 56 #ifdef DDB 57 #include <ddb/ddb.h> 58 #include <ddb/db_sym.h> 59 #endif 60 61 /* 62 * Describe an interrupt thread. There is one of these per interrupt event. 63 */ 64 struct intr_thread { 65 struct intr_event *it_event; 66 struct thread *it_thread; /* Kernel thread. */ 67 int it_flags; /* (j) IT_* flags. */ 68 int it_need; /* Needs service. */ 69 }; 70 71 /* Interrupt thread flags kept in it_flags */ 72 #define IT_DEAD 0x000001 /* Thread is waiting to exit. */ 73 74 struct intr_entropy { 75 struct thread *td; 76 uintptr_t event; 77 }; 78 79 struct intr_event *clk_intr_event; 80 struct intr_event *tty_intr_event; 81 void *softclock_ih; 82 void *vm_ih; 83 84 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); 85 86 static int intr_storm_threshold = 500; 87 TUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold); 88 SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW, 89 &intr_storm_threshold, 0, 90 "Number of consecutive interrupts before storm protection is enabled"); 91 static TAILQ_HEAD(, intr_event) event_list = 92 TAILQ_HEAD_INITIALIZER(event_list); 93 94 static void intr_event_update(struct intr_event *ie); 95 static struct intr_thread *ithread_create(const char *name); 96 static void ithread_destroy(struct intr_thread *ithread); 97 static void ithread_execute_handlers(struct proc *p, struct intr_event *ie); 98 static void ithread_loop(void *); 99 static void ithread_update(struct intr_thread *ithd); 100 static void start_softintr(void *); 101 102 u_char 103 intr_priority(enum intr_type flags) 104 { 105 u_char pri; 106 107 flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET | 108 INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV); 109 switch (flags) { 110 case INTR_TYPE_TTY: 111 pri = PI_TTYLOW; 112 break; 113 case INTR_TYPE_BIO: 114 /* 115 * XXX We need to refine this. BSD/OS distinguishes 116 * between tape and disk priorities. 117 */ 118 pri = PI_DISK; 119 break; 120 case INTR_TYPE_NET: 121 pri = PI_NET; 122 break; 123 case INTR_TYPE_CAM: 124 pri = PI_DISK; /* XXX or PI_CAM? */ 125 break; 126 case INTR_TYPE_AV: /* Audio/video */ 127 pri = PI_AV; 128 break; 129 case INTR_TYPE_CLK: 130 pri = PI_REALTIME; 131 break; 132 case INTR_TYPE_MISC: 133 pri = PI_DULL; /* don't care */ 134 break; 135 default: 136 /* We didn't specify an interrupt level. */ 137 panic("intr_priority: no interrupt type in flags"); 138 } 139 140 return pri; 141 } 142 143 /* 144 * Update an ithread based on the associated intr_event. 145 */ 146 static void 147 ithread_update(struct intr_thread *ithd) 148 { 149 struct intr_event *ie; 150 struct thread *td; 151 u_char pri; 152 153 ie = ithd->it_event; 154 td = ithd->it_thread; 155 156 /* Determine the overall priority of this event. */ 157 if (TAILQ_EMPTY(&ie->ie_handlers)) 158 pri = PRI_MAX_ITHD; 159 else 160 pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri; 161 162 /* Update name and priority. */ 163 strlcpy(td->td_proc->p_comm, ie->ie_fullname, 164 sizeof(td->td_proc->p_comm)); 165 mtx_lock_spin(&sched_lock); 166 sched_prio(td, pri); 167 mtx_unlock_spin(&sched_lock); 168 } 169 170 /* 171 * Regenerate the full name of an interrupt event and update its priority. 172 */ 173 static void 174 intr_event_update(struct intr_event *ie) 175 { 176 struct intr_handler *ih; 177 char *last; 178 int missed, space; 179 180 /* Start off with no entropy and just the name of the event. */ 181 mtx_assert(&ie->ie_lock, MA_OWNED); 182 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 183 ie->ie_flags &= ~IE_ENTROPY; 184 missed = 0; 185 space = 1; 186 187 /* Run through all the handlers updating values. */ 188 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 189 if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < 190 sizeof(ie->ie_fullname)) { 191 strcat(ie->ie_fullname, " "); 192 strcat(ie->ie_fullname, ih->ih_name); 193 space = 0; 194 } else 195 missed++; 196 if (ih->ih_flags & IH_ENTROPY) 197 ie->ie_flags |= IE_ENTROPY; 198 } 199 200 /* 201 * If the handler names were too long, add +'s to indicate missing 202 * names. If we run out of room and still have +'s to add, change 203 * the last character from a + to a *. 204 */ 205 last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2]; 206 while (missed-- > 0) { 207 if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) { 208 if (*last == '+') { 209 *last = '*'; 210 break; 211 } else 212 *last = '+'; 213 } else if (space) { 214 strcat(ie->ie_fullname, " +"); 215 space = 0; 216 } else 217 strcat(ie->ie_fullname, "+"); 218 } 219 220 /* 221 * If this event has an ithread, update it's priority and 222 * name. 223 */ 224 if (ie->ie_thread != NULL) 225 ithread_update(ie->ie_thread); 226 CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname); 227 } 228 229 int 230 intr_event_create(struct intr_event **event, void *source, int flags, 231 void (*enable)(void *), const char *fmt, ...) 232 { 233 struct intr_event *ie; 234 va_list ap; 235 236 /* The only valid flag during creation is IE_SOFT. */ 237 if ((flags & ~IE_SOFT) != 0) 238 return (EINVAL); 239 ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 240 ie->ie_source = source; 241 ie->ie_enable = enable; 242 ie->ie_flags = flags; 243 TAILQ_INIT(&ie->ie_handlers); 244 mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 245 246 va_start(ap, fmt); 247 vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 248 va_end(ap); 249 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 250 mtx_pool_lock(mtxpool_sleep, &event_list); 251 TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 252 mtx_pool_unlock(mtxpool_sleep, &event_list); 253 if (event != NULL) 254 *event = ie; 255 CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 256 return (0); 257 } 258 259 int 260 intr_event_destroy(struct intr_event *ie) 261 { 262 263 mtx_lock(&ie->ie_lock); 264 if (!TAILQ_EMPTY(&ie->ie_handlers)) { 265 mtx_unlock(&ie->ie_lock); 266 return (EBUSY); 267 } 268 mtx_pool_lock(mtxpool_sleep, &event_list); 269 TAILQ_REMOVE(&event_list, ie, ie_list); 270 mtx_pool_unlock(mtxpool_sleep, &event_list); 271 #ifndef notyet 272 if (ie->ie_thread != NULL) { 273 ithread_destroy(ie->ie_thread); 274 ie->ie_thread = NULL; 275 } 276 #endif 277 mtx_unlock(&ie->ie_lock); 278 mtx_destroy(&ie->ie_lock); 279 free(ie, M_ITHREAD); 280 return (0); 281 } 282 283 static struct intr_thread * 284 ithread_create(const char *name) 285 { 286 struct intr_thread *ithd; 287 struct thread *td; 288 struct proc *p; 289 int error; 290 291 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 292 293 error = kthread_create(ithread_loop, ithd, &p, RFSTOPPED | RFHIGHPID, 294 0, "%s", name); 295 if (error) 296 panic("kthread_create() failed with %d", error); 297 td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */ 298 mtx_lock_spin(&sched_lock); 299 #ifdef KSE 300 td->td_ksegrp->kg_pri_class = PRI_ITHD; 301 #else 302 td->td_pri_class = PRI_ITHD; 303 #endif 304 TD_SET_IWAIT(td); 305 mtx_unlock_spin(&sched_lock); 306 td->td_pflags |= TDP_ITHREAD; 307 ithd->it_thread = td; 308 CTR2(KTR_INTR, "%s: created %s", __func__, name); 309 return (ithd); 310 } 311 312 static void 313 ithread_destroy(struct intr_thread *ithread) 314 { 315 struct thread *td; 316 317 CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name); 318 td = ithread->it_thread; 319 mtx_lock_spin(&sched_lock); 320 ithread->it_flags |= IT_DEAD; 321 if (TD_AWAITING_INTR(td)) { 322 TD_CLR_IWAIT(td); 323 setrunqueue(td, SRQ_INTR); 324 } 325 mtx_unlock_spin(&sched_lock); 326 } 327 328 int 329 intr_event_add_handler(struct intr_event *ie, const char *name, 330 driver_intr_t handler, void *arg, u_char pri, enum intr_type flags, 331 void **cookiep) 332 { 333 struct intr_handler *ih, *temp_ih; 334 struct intr_thread *it; 335 336 if (ie == NULL || name == NULL || handler == NULL) 337 return (EINVAL); 338 339 /* Allocate and populate an interrupt handler structure. */ 340 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 341 ih->ih_handler = handler; 342 ih->ih_argument = arg; 343 ih->ih_name = name; 344 ih->ih_event = ie; 345 ih->ih_pri = pri; 346 if (flags & INTR_FAST) 347 ih->ih_flags = IH_FAST; 348 else if (flags & INTR_EXCL) 349 ih->ih_flags = IH_EXCLUSIVE; 350 if (flags & INTR_MPSAFE) 351 ih->ih_flags |= IH_MPSAFE; 352 if (flags & INTR_ENTROPY) 353 ih->ih_flags |= IH_ENTROPY; 354 355 /* We can only have one exclusive handler in a event. */ 356 mtx_lock(&ie->ie_lock); 357 if (!TAILQ_EMPTY(&ie->ie_handlers)) { 358 if ((flags & INTR_EXCL) || 359 (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 360 mtx_unlock(&ie->ie_lock); 361 free(ih, M_ITHREAD); 362 return (EINVAL); 363 } 364 } 365 366 /* Add the new handler to the event in priority order. */ 367 TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 368 if (temp_ih->ih_pri > ih->ih_pri) 369 break; 370 } 371 if (temp_ih == NULL) 372 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 373 else 374 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 375 intr_event_update(ie); 376 377 /* Create a thread if we need one. */ 378 while (ie->ie_thread == NULL && !(flags & INTR_FAST)) { 379 if (ie->ie_flags & IE_ADDING_THREAD) 380 msleep(ie, &ie->ie_lock, 0, "ithread", 0); 381 else { 382 ie->ie_flags |= IE_ADDING_THREAD; 383 mtx_unlock(&ie->ie_lock); 384 it = ithread_create("intr: newborn"); 385 mtx_lock(&ie->ie_lock); 386 ie->ie_flags &= ~IE_ADDING_THREAD; 387 ie->ie_thread = it; 388 it->it_event = ie; 389 ithread_update(it); 390 wakeup(ie); 391 } 392 } 393 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 394 ie->ie_name); 395 mtx_unlock(&ie->ie_lock); 396 397 if (cookiep != NULL) 398 *cookiep = ih; 399 return (0); 400 } 401 402 int 403 intr_event_remove_handler(void *cookie) 404 { 405 struct intr_handler *handler = (struct intr_handler *)cookie; 406 struct intr_event *ie; 407 #ifdef INVARIANTS 408 struct intr_handler *ih; 409 #endif 410 #ifdef notyet 411 int dead; 412 #endif 413 414 if (handler == NULL) 415 return (EINVAL); 416 ie = handler->ih_event; 417 KASSERT(ie != NULL, 418 ("interrupt handler \"%s\" has a NULL interrupt event", 419 handler->ih_name)); 420 mtx_lock(&ie->ie_lock); 421 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 422 ie->ie_name); 423 #ifdef INVARIANTS 424 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 425 if (ih == handler) 426 goto ok; 427 mtx_unlock(&ie->ie_lock); 428 panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 429 ih->ih_name, ie->ie_name); 430 ok: 431 #endif 432 /* 433 * If there is no ithread, then just remove the handler and return. 434 * XXX: Note that an INTR_FAST handler might be running on another 435 * CPU! 436 */ 437 if (ie->ie_thread == NULL) { 438 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 439 mtx_unlock(&ie->ie_lock); 440 free(handler, M_ITHREAD); 441 return (0); 442 } 443 444 /* 445 * If the interrupt thread is already running, then just mark this 446 * handler as being dead and let the ithread do the actual removal. 447 * 448 * During a cold boot while cold is set, msleep() does not sleep, 449 * so we have to remove the handler here rather than letting the 450 * thread do it. 451 */ 452 mtx_lock_spin(&sched_lock); 453 if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) { 454 handler->ih_flags |= IH_DEAD; 455 456 /* 457 * Ensure that the thread will process the handler list 458 * again and remove this handler if it has already passed 459 * it on the list. 460 */ 461 ie->ie_thread->it_need = 1; 462 } else 463 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 464 mtx_unlock_spin(&sched_lock); 465 while (handler->ih_flags & IH_DEAD) 466 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 467 intr_event_update(ie); 468 #ifdef notyet 469 /* 470 * XXX: This could be bad in the case of ppbus(8). Also, I think 471 * this could lead to races of stale data when servicing an 472 * interrupt. 473 */ 474 dead = 1; 475 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 476 if (!(ih->ih_flags & IH_FAST)) { 477 dead = 0; 478 break; 479 } 480 } 481 if (dead) { 482 ithread_destroy(ie->ie_thread); 483 ie->ie_thread = NULL; 484 } 485 #endif 486 mtx_unlock(&ie->ie_lock); 487 free(handler, M_ITHREAD); 488 return (0); 489 } 490 491 int 492 intr_event_schedule_thread(struct intr_event *ie) 493 { 494 struct intr_entropy entropy; 495 struct intr_thread *it; 496 struct thread *td; 497 struct thread *ctd; 498 struct proc *p; 499 500 /* 501 * If no ithread or no handlers, then we have a stray interrupt. 502 */ 503 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || 504 ie->ie_thread == NULL) 505 return (EINVAL); 506 507 ctd = curthread; 508 it = ie->ie_thread; 509 td = it->it_thread; 510 p = td->td_proc; 511 512 /* 513 * If any of the handlers for this ithread claim to be good 514 * sources of entropy, then gather some. 515 */ 516 if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 517 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 518 p->p_pid, p->p_comm); 519 entropy.event = (uintptr_t)ie; 520 entropy.td = ctd; 521 random_harvest(&entropy, sizeof(entropy), 2, 0, 522 RANDOM_INTERRUPT); 523 } 524 525 KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 526 527 /* 528 * Set it_need to tell the thread to keep running if it is already 529 * running. Then, grab sched_lock and see if we actually need to 530 * put this thread on the runqueue. 531 */ 532 it->it_need = 1; 533 mtx_lock_spin(&sched_lock); 534 if (TD_AWAITING_INTR(td)) { 535 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 536 p->p_comm); 537 TD_CLR_IWAIT(td); 538 setrunqueue(td, SRQ_INTR); 539 } else { 540 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 541 __func__, p->p_pid, p->p_comm, it->it_need, td->td_state); 542 } 543 mtx_unlock_spin(&sched_lock); 544 545 return (0); 546 } 547 548 /* 549 * Add a software interrupt handler to a specified event. If a given event 550 * is not specified, then a new event is created. 551 */ 552 int 553 swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, 554 void *arg, int pri, enum intr_type flags, void **cookiep) 555 { 556 struct intr_event *ie; 557 int error; 558 559 if (flags & (INTR_FAST | INTR_ENTROPY)) 560 return (EINVAL); 561 562 ie = (eventp != NULL) ? *eventp : NULL; 563 564 if (ie != NULL) { 565 if (!(ie->ie_flags & IE_SOFT)) 566 return (EINVAL); 567 } else { 568 error = intr_event_create(&ie, NULL, IE_SOFT, NULL, 569 "swi%d:", pri); 570 if (error) 571 return (error); 572 if (eventp != NULL) 573 *eventp = ie; 574 } 575 return (intr_event_add_handler(ie, name, handler, arg, 576 (pri * RQ_PPQ) + PI_SOFT, flags, cookiep)); 577 /* XXKSE.. think of a better way to get separate queues */ 578 } 579 580 /* 581 * Schedule a software interrupt thread. 582 */ 583 void 584 swi_sched(void *cookie, int flags) 585 { 586 struct intr_handler *ih = (struct intr_handler *)cookie; 587 struct intr_event *ie = ih->ih_event; 588 int error; 589 590 CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, 591 ih->ih_need); 592 593 /* 594 * Set ih_need for this handler so that if the ithread is already 595 * running it will execute this handler on the next pass. Otherwise, 596 * it will execute it the next time it runs. 597 */ 598 atomic_store_rel_int(&ih->ih_need, 1); 599 600 if (!(flags & SWI_DELAY)) { 601 PCPU_LAZY_INC(cnt.v_soft); 602 error = intr_event_schedule_thread(ie); 603 KASSERT(error == 0, ("stray software interrupt")); 604 } 605 } 606 607 /* 608 * Remove a software interrupt handler. Currently this code does not 609 * remove the associated interrupt event if it becomes empty. Calling code 610 * may do so manually via intr_event_destroy(), but that's not really 611 * an optimal interface. 612 */ 613 int 614 swi_remove(void *cookie) 615 { 616 617 return (intr_event_remove_handler(cookie)); 618 } 619 620 static void 621 ithread_execute_handlers(struct proc *p, struct intr_event *ie) 622 { 623 struct intr_handler *ih, *ihn; 624 625 /* Interrupt handlers should not sleep. */ 626 if (!(ie->ie_flags & IE_SOFT)) 627 THREAD_NO_SLEEPING(); 628 TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) { 629 630 /* 631 * If this handler is marked for death, remove it from 632 * the list of handlers and wake up the sleeper. 633 */ 634 if (ih->ih_flags & IH_DEAD) { 635 mtx_lock(&ie->ie_lock); 636 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 637 ih->ih_flags &= ~IH_DEAD; 638 wakeup(ih); 639 mtx_unlock(&ie->ie_lock); 640 continue; 641 } 642 643 /* 644 * For software interrupt threads, we only execute 645 * handlers that have their need flag set. Hardware 646 * interrupt threads always invoke all of their handlers. 647 */ 648 if (ie->ie_flags & IE_SOFT) { 649 if (!ih->ih_need) 650 continue; 651 else 652 atomic_store_rel_int(&ih->ih_need, 0); 653 } 654 655 /* Fast handlers are handled in primary interrupt context. */ 656 if (ih->ih_flags & IH_FAST) 657 continue; 658 659 /* Execute this handler. */ 660 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 661 __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument, 662 ih->ih_name, ih->ih_flags); 663 664 if (!(ih->ih_flags & IH_MPSAFE)) 665 mtx_lock(&Giant); 666 ih->ih_handler(ih->ih_argument); 667 if (!(ih->ih_flags & IH_MPSAFE)) 668 mtx_unlock(&Giant); 669 } 670 if (!(ie->ie_flags & IE_SOFT)) 671 THREAD_SLEEPING_OK(); 672 673 /* 674 * Interrupt storm handling: 675 * 676 * If this interrupt source is currently storming, then throttle 677 * it to only fire the handler once per clock tick. 678 * 679 * If this interrupt source is not currently storming, but the 680 * number of back to back interrupts exceeds the storm threshold, 681 * then enter storming mode. 682 */ 683 if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold) { 684 if (ie->ie_warned == 0) { 685 printf( 686 "Interrupt storm detected on \"%s\"; throttling interrupt source\n", 687 ie->ie_name); 688 ie->ie_warned = 1; 689 } 690 tsleep(&ie->ie_count, 0, "istorm", 1); 691 } else 692 ie->ie_count++; 693 694 /* 695 * Now that all the handlers have had a chance to run, reenable 696 * the interrupt source. 697 */ 698 if (ie->ie_enable != NULL) 699 ie->ie_enable(ie->ie_source); 700 } 701 702 /* 703 * This is the main code for interrupt threads. 704 */ 705 static void 706 ithread_loop(void *arg) 707 { 708 struct intr_thread *ithd; 709 struct intr_event *ie; 710 struct thread *td; 711 struct proc *p; 712 713 td = curthread; 714 p = td->td_proc; 715 ithd = (struct intr_thread *)arg; 716 KASSERT(ithd->it_thread == td, 717 ("%s: ithread and proc linkage out of sync", __func__)); 718 ie = ithd->it_event; 719 ie->ie_count = 0; 720 721 /* 722 * As long as we have interrupts outstanding, go through the 723 * list of handlers, giving each one a go at it. 724 */ 725 for (;;) { 726 /* 727 * If we are an orphaned thread, then just die. 728 */ 729 if (ithd->it_flags & IT_DEAD) { 730 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 731 p->p_pid, p->p_comm); 732 free(ithd, M_ITHREAD); 733 kthread_exit(0); 734 } 735 736 /* 737 * Service interrupts. If another interrupt arrives while 738 * we are running, it will set it_need to note that we 739 * should make another pass. 740 */ 741 while (ithd->it_need) { 742 /* 743 * This might need a full read and write barrier 744 * to make sure that this write posts before any 745 * of the memory or device accesses in the 746 * handlers. 747 */ 748 atomic_store_rel_int(&ithd->it_need, 0); 749 ithread_execute_handlers(p, ie); 750 } 751 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 752 mtx_assert(&Giant, MA_NOTOWNED); 753 754 /* 755 * Processed all our interrupts. Now get the sched 756 * lock. This may take a while and it_need may get 757 * set again, so we have to check it again. 758 */ 759 mtx_lock_spin(&sched_lock); 760 if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { 761 TD_SET_IWAIT(td); 762 ie->ie_count = 0; 763 mi_switch(SW_VOL, NULL); 764 } 765 mtx_unlock_spin(&sched_lock); 766 } 767 } 768 769 #ifdef DDB 770 /* 771 * Dump details about an interrupt handler 772 */ 773 static void 774 db_dump_intrhand(struct intr_handler *ih) 775 { 776 int comma; 777 778 db_printf("\t%-10s ", ih->ih_name); 779 switch (ih->ih_pri) { 780 case PI_REALTIME: 781 db_printf("CLK "); 782 break; 783 case PI_AV: 784 db_printf("AV "); 785 break; 786 case PI_TTYHIGH: 787 case PI_TTYLOW: 788 db_printf("TTY "); 789 break; 790 case PI_TAPE: 791 db_printf("TAPE"); 792 break; 793 case PI_NET: 794 db_printf("NET "); 795 break; 796 case PI_DISK: 797 case PI_DISKLOW: 798 db_printf("DISK"); 799 break; 800 case PI_DULL: 801 db_printf("DULL"); 802 break; 803 default: 804 if (ih->ih_pri >= PI_SOFT) 805 db_printf("SWI "); 806 else 807 db_printf("%4u", ih->ih_pri); 808 break; 809 } 810 db_printf(" "); 811 db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC); 812 db_printf("(%p)", ih->ih_argument); 813 if (ih->ih_need || 814 (ih->ih_flags & (IH_FAST | IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD | 815 IH_MPSAFE)) != 0) { 816 db_printf(" {"); 817 comma = 0; 818 if (ih->ih_flags & IH_FAST) { 819 db_printf("FAST"); 820 comma = 1; 821 } 822 if (ih->ih_flags & IH_EXCLUSIVE) { 823 if (comma) 824 db_printf(", "); 825 db_printf("EXCL"); 826 comma = 1; 827 } 828 if (ih->ih_flags & IH_ENTROPY) { 829 if (comma) 830 db_printf(", "); 831 db_printf("ENTROPY"); 832 comma = 1; 833 } 834 if (ih->ih_flags & IH_DEAD) { 835 if (comma) 836 db_printf(", "); 837 db_printf("DEAD"); 838 comma = 1; 839 } 840 if (ih->ih_flags & IH_MPSAFE) { 841 if (comma) 842 db_printf(", "); 843 db_printf("MPSAFE"); 844 comma = 1; 845 } 846 if (ih->ih_need) { 847 if (comma) 848 db_printf(", "); 849 db_printf("NEED"); 850 } 851 db_printf("}"); 852 } 853 db_printf("\n"); 854 } 855 856 /* 857 * Dump details about a event. 858 */ 859 void 860 db_dump_intr_event(struct intr_event *ie, int handlers) 861 { 862 struct intr_handler *ih; 863 struct intr_thread *it; 864 int comma; 865 866 db_printf("%s ", ie->ie_fullname); 867 it = ie->ie_thread; 868 if (it != NULL) 869 db_printf("(pid %d)", it->it_thread->td_proc->p_pid); 870 else 871 db_printf("(no thread)"); 872 if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 || 873 (it != NULL && it->it_need)) { 874 db_printf(" {"); 875 comma = 0; 876 if (ie->ie_flags & IE_SOFT) { 877 db_printf("SOFT"); 878 comma = 1; 879 } 880 if (ie->ie_flags & IE_ENTROPY) { 881 if (comma) 882 db_printf(", "); 883 db_printf("ENTROPY"); 884 comma = 1; 885 } 886 if (ie->ie_flags & IE_ADDING_THREAD) { 887 if (comma) 888 db_printf(", "); 889 db_printf("ADDING_THREAD"); 890 comma = 1; 891 } 892 if (it != NULL && it->it_need) { 893 if (comma) 894 db_printf(", "); 895 db_printf("NEED"); 896 } 897 db_printf("}"); 898 } 899 db_printf("\n"); 900 901 if (handlers) 902 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 903 db_dump_intrhand(ih); 904 } 905 906 /* 907 * Dump data about interrupt handlers 908 */ 909 DB_SHOW_COMMAND(intr, db_show_intr) 910 { 911 struct intr_event *ie; 912 int all, verbose; 913 914 verbose = index(modif, 'v') != NULL; 915 all = index(modif, 'a') != NULL; 916 TAILQ_FOREACH(ie, &event_list, ie_list) { 917 if (!all && TAILQ_EMPTY(&ie->ie_handlers)) 918 continue; 919 db_dump_intr_event(ie, verbose); 920 if (db_pager_quit) 921 break; 922 } 923 } 924 #endif /* DDB */ 925 926 /* 927 * Start standard software interrupt threads 928 */ 929 static void 930 start_softintr(void *dummy) 931 { 932 struct proc *p; 933 934 if (swi_add(&clk_intr_event, "clock", softclock, NULL, SWI_CLOCK, 935 INTR_MPSAFE, &softclock_ih) || 936 swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih)) 937 panic("died while creating standard software ithreads"); 938 939 p = clk_intr_event->ie_thread->it_thread->td_proc; 940 PROC_LOCK(p); 941 p->p_flag |= P_NOLOAD; 942 PROC_UNLOCK(p); 943 } 944 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, NULL) 945 946 /* 947 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 948 * The data for this machine dependent, and the declarations are in machine 949 * dependent code. The layout of intrnames and intrcnt however is machine 950 * independent. 951 * 952 * We do not know the length of intrcnt and intrnames at compile time, so 953 * calculate things at run time. 954 */ 955 static int 956 sysctl_intrnames(SYSCTL_HANDLER_ARGS) 957 { 958 return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames, 959 req)); 960 } 961 962 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, 963 NULL, 0, sysctl_intrnames, "", "Interrupt Names"); 964 965 static int 966 sysctl_intrcnt(SYSCTL_HANDLER_ARGS) 967 { 968 return (sysctl_handle_opaque(oidp, intrcnt, 969 (char *)eintrcnt - (char *)intrcnt, req)); 970 } 971 972 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, 973 NULL, 0, sysctl_intrcnt, "", "Interrupt Counts"); 974 975 #ifdef DDB 976 /* 977 * DDB command to dump the interrupt statistics. 978 */ 979 DB_SHOW_COMMAND(intrcnt, db_show_intrcnt) 980 { 981 u_long *i; 982 char *cp; 983 984 cp = intrnames; 985 for (i = intrcnt; i != eintrcnt && !db_pager_quit; i++) { 986 if (*cp == '\0') 987 break; 988 if (*i != 0) 989 db_printf("%s\t%lu\n", cp, *i); 990 cp += strlen(cp) + 1; 991 } 992 } 993 #endif 994