1 /*- 2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_ddb.h" 31 32 #include <sys/param.h> 33 #include <sys/bus.h> 34 #include <sys/conf.h> 35 #include <sys/rtprio.h> 36 #include <sys/systm.h> 37 #include <sys/interrupt.h> 38 #include <sys/kernel.h> 39 #include <sys/kthread.h> 40 #include <sys/ktr.h> 41 #include <sys/limits.h> 42 #include <sys/lock.h> 43 #include <sys/malloc.h> 44 #include <sys/mutex.h> 45 #include <sys/proc.h> 46 #include <sys/random.h> 47 #include <sys/resourcevar.h> 48 #include <sys/sched.h> 49 #include <sys/sysctl.h> 50 #include <sys/unistd.h> 51 #include <sys/vmmeter.h> 52 #include <machine/atomic.h> 53 #include <machine/cpu.h> 54 #include <machine/md_var.h> 55 #include <machine/stdarg.h> 56 #ifdef DDB 57 #include <ddb/ddb.h> 58 #include <ddb/db_sym.h> 59 #endif 60 61 /* 62 * Describe an interrupt thread. There is one of these per interrupt event. 63 */ 64 struct intr_thread { 65 struct intr_event *it_event; 66 struct thread *it_thread; /* Kernel thread. */ 67 int it_flags; /* (j) IT_* flags. */ 68 int it_need; /* Needs service. */ 69 }; 70 71 /* Interrupt thread flags kept in it_flags */ 72 #define IT_DEAD 0x000001 /* Thread is waiting to exit. */ 73 74 struct intr_entropy { 75 struct thread *td; 76 uintptr_t event; 77 }; 78 79 struct intr_event *clk_intr_event; 80 struct intr_event *tty_intr_event; 81 void *softclock_ih; 82 void *vm_ih; 83 84 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); 85 86 static int intr_storm_threshold = 500; 87 TUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold); 88 SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW, 89 &intr_storm_threshold, 0, 90 "Number of consecutive interrupts before storm protection is enabled"); 91 static TAILQ_HEAD(, intr_event) event_list = 92 TAILQ_HEAD_INITIALIZER(event_list); 93 94 static void intr_event_update(struct intr_event *ie); 95 static struct intr_thread *ithread_create(const char *name); 96 static void ithread_destroy(struct intr_thread *ithread); 97 static void ithread_execute_handlers(struct proc *p, struct intr_event *ie); 98 static void ithread_loop(void *); 99 static void ithread_update(struct intr_thread *ithd); 100 static void start_softintr(void *); 101 102 /* Map an interrupt type to an ithread priority. */ 103 u_char 104 intr_priority(enum intr_type flags) 105 { 106 u_char pri; 107 108 flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET | 109 INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV); 110 switch (flags) { 111 case INTR_TYPE_TTY: 112 pri = PI_TTYLOW; 113 break; 114 case INTR_TYPE_BIO: 115 /* 116 * XXX We need to refine this. BSD/OS distinguishes 117 * between tape and disk priorities. 118 */ 119 pri = PI_DISK; 120 break; 121 case INTR_TYPE_NET: 122 pri = PI_NET; 123 break; 124 case INTR_TYPE_CAM: 125 pri = PI_DISK; /* XXX or PI_CAM? */ 126 break; 127 case INTR_TYPE_AV: /* Audio/video */ 128 pri = PI_AV; 129 break; 130 case INTR_TYPE_CLK: 131 pri = PI_REALTIME; 132 break; 133 case INTR_TYPE_MISC: 134 pri = PI_DULL; /* don't care */ 135 break; 136 default: 137 /* We didn't specify an interrupt level. */ 138 panic("intr_priority: no interrupt type in flags"); 139 } 140 141 return pri; 142 } 143 144 /* 145 * Update an ithread based on the associated intr_event. 146 */ 147 static void 148 ithread_update(struct intr_thread *ithd) 149 { 150 struct intr_event *ie; 151 struct thread *td; 152 u_char pri; 153 154 ie = ithd->it_event; 155 td = ithd->it_thread; 156 157 /* Determine the overall priority of this event. */ 158 if (TAILQ_EMPTY(&ie->ie_handlers)) 159 pri = PRI_MAX_ITHD; 160 else 161 pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri; 162 163 /* Update name and priority. */ 164 strlcpy(td->td_proc->p_comm, ie->ie_fullname, 165 sizeof(td->td_proc->p_comm)); 166 mtx_lock_spin(&sched_lock); 167 sched_prio(td, pri); 168 mtx_unlock_spin(&sched_lock); 169 } 170 171 /* 172 * Regenerate the full name of an interrupt event and update its priority. 173 */ 174 static void 175 intr_event_update(struct intr_event *ie) 176 { 177 struct intr_handler *ih; 178 char *last; 179 int missed, space; 180 181 /* Start off with no entropy and just the name of the event. */ 182 mtx_assert(&ie->ie_lock, MA_OWNED); 183 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 184 ie->ie_flags &= ~IE_ENTROPY; 185 missed = 0; 186 space = 1; 187 188 /* Run through all the handlers updating values. */ 189 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 190 if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < 191 sizeof(ie->ie_fullname)) { 192 strcat(ie->ie_fullname, " "); 193 strcat(ie->ie_fullname, ih->ih_name); 194 space = 0; 195 } else 196 missed++; 197 if (ih->ih_flags & IH_ENTROPY) 198 ie->ie_flags |= IE_ENTROPY; 199 } 200 201 /* 202 * If the handler names were too long, add +'s to indicate missing 203 * names. If we run out of room and still have +'s to add, change 204 * the last character from a + to a *. 205 */ 206 last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2]; 207 while (missed-- > 0) { 208 if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) { 209 if (*last == '+') { 210 *last = '*'; 211 break; 212 } else 213 *last = '+'; 214 } else if (space) { 215 strcat(ie->ie_fullname, " +"); 216 space = 0; 217 } else 218 strcat(ie->ie_fullname, "+"); 219 } 220 221 /* 222 * If this event has an ithread, update it's priority and 223 * name. 224 */ 225 if (ie->ie_thread != NULL) 226 ithread_update(ie->ie_thread); 227 CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname); 228 } 229 230 int 231 intr_event_create(struct intr_event **event, void *source, int flags, 232 void (*enable)(void *), const char *fmt, ...) 233 { 234 struct intr_event *ie; 235 va_list ap; 236 237 /* The only valid flag during creation is IE_SOFT. */ 238 if ((flags & ~IE_SOFT) != 0) 239 return (EINVAL); 240 ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 241 ie->ie_source = source; 242 ie->ie_enable = enable; 243 ie->ie_flags = flags; 244 TAILQ_INIT(&ie->ie_handlers); 245 mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 246 247 va_start(ap, fmt); 248 vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 249 va_end(ap); 250 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 251 mtx_pool_lock(mtxpool_sleep, &event_list); 252 TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 253 mtx_pool_unlock(mtxpool_sleep, &event_list); 254 if (event != NULL) 255 *event = ie; 256 CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 257 return (0); 258 } 259 260 int 261 intr_event_destroy(struct intr_event *ie) 262 { 263 264 mtx_lock(&ie->ie_lock); 265 if (!TAILQ_EMPTY(&ie->ie_handlers)) { 266 mtx_unlock(&ie->ie_lock); 267 return (EBUSY); 268 } 269 mtx_pool_lock(mtxpool_sleep, &event_list); 270 TAILQ_REMOVE(&event_list, ie, ie_list); 271 mtx_pool_unlock(mtxpool_sleep, &event_list); 272 #ifndef notyet 273 if (ie->ie_thread != NULL) { 274 ithread_destroy(ie->ie_thread); 275 ie->ie_thread = NULL; 276 } 277 #endif 278 mtx_unlock(&ie->ie_lock); 279 mtx_destroy(&ie->ie_lock); 280 free(ie, M_ITHREAD); 281 return (0); 282 } 283 284 static struct intr_thread * 285 ithread_create(const char *name) 286 { 287 struct intr_thread *ithd; 288 struct thread *td; 289 struct proc *p; 290 int error; 291 292 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 293 294 error = kthread_create(ithread_loop, ithd, &p, RFSTOPPED | RFHIGHPID, 295 0, "%s", name); 296 if (error) 297 panic("kthread_create() failed with %d", error); 298 td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */ 299 mtx_lock_spin(&sched_lock); 300 sched_class(td, PRI_ITHD); 301 TD_SET_IWAIT(td); 302 mtx_unlock_spin(&sched_lock); 303 td->td_pflags |= TDP_ITHREAD; 304 ithd->it_thread = td; 305 CTR2(KTR_INTR, "%s: created %s", __func__, name); 306 return (ithd); 307 } 308 309 static void 310 ithread_destroy(struct intr_thread *ithread) 311 { 312 struct thread *td; 313 314 CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name); 315 td = ithread->it_thread; 316 mtx_lock_spin(&sched_lock); 317 ithread->it_flags |= IT_DEAD; 318 if (TD_AWAITING_INTR(td)) { 319 TD_CLR_IWAIT(td); 320 setrunqueue(td, SRQ_INTR); 321 } 322 mtx_unlock_spin(&sched_lock); 323 } 324 325 int 326 intr_event_add_handler(struct intr_event *ie, const char *name, 327 driver_intr_t handler, void *arg, u_char pri, enum intr_type flags, 328 void **cookiep) 329 { 330 struct intr_handler *ih, *temp_ih; 331 struct intr_thread *it; 332 333 if (ie == NULL || name == NULL || handler == NULL) 334 return (EINVAL); 335 336 /* Allocate and populate an interrupt handler structure. */ 337 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 338 ih->ih_handler = handler; 339 ih->ih_argument = arg; 340 ih->ih_name = name; 341 ih->ih_event = ie; 342 ih->ih_pri = pri; 343 if (flags & INTR_FAST) 344 ih->ih_flags = IH_FAST; 345 else if (flags & INTR_EXCL) 346 ih->ih_flags = IH_EXCLUSIVE; 347 if (flags & INTR_MPSAFE) 348 ih->ih_flags |= IH_MPSAFE; 349 if (flags & INTR_ENTROPY) 350 ih->ih_flags |= IH_ENTROPY; 351 352 /* We can only have one exclusive handler in a event. */ 353 mtx_lock(&ie->ie_lock); 354 if (!TAILQ_EMPTY(&ie->ie_handlers)) { 355 if ((flags & INTR_EXCL) || 356 (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 357 mtx_unlock(&ie->ie_lock); 358 free(ih, M_ITHREAD); 359 return (EINVAL); 360 } 361 } 362 363 /* Add the new handler to the event in priority order. */ 364 TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 365 if (temp_ih->ih_pri > ih->ih_pri) 366 break; 367 } 368 if (temp_ih == NULL) 369 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 370 else 371 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 372 intr_event_update(ie); 373 374 /* Create a thread if we need one. */ 375 while (ie->ie_thread == NULL && !(flags & INTR_FAST)) { 376 if (ie->ie_flags & IE_ADDING_THREAD) 377 msleep(ie, &ie->ie_lock, 0, "ithread", 0); 378 else { 379 ie->ie_flags |= IE_ADDING_THREAD; 380 mtx_unlock(&ie->ie_lock); 381 it = ithread_create("intr: newborn"); 382 mtx_lock(&ie->ie_lock); 383 ie->ie_flags &= ~IE_ADDING_THREAD; 384 ie->ie_thread = it; 385 it->it_event = ie; 386 ithread_update(it); 387 wakeup(ie); 388 } 389 } 390 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 391 ie->ie_name); 392 mtx_unlock(&ie->ie_lock); 393 394 if (cookiep != NULL) 395 *cookiep = ih; 396 return (0); 397 } 398 399 /* 400 * Return the ie_source field from the intr_event an intr_handler is 401 * associated with. 402 */ 403 void * 404 intr_handler_source(void *cookie) 405 { 406 struct intr_handler *ih; 407 struct intr_event *ie; 408 409 ih = (struct intr_handler *)cookie; 410 if (ih == NULL) 411 return (NULL); 412 ie = ih->ih_event; 413 KASSERT(ie != NULL, 414 ("interrupt handler \"%s\" has a NULL interrupt event", 415 ih->ih_name)); 416 return (ie->ie_source); 417 } 418 419 int 420 intr_event_remove_handler(void *cookie) 421 { 422 struct intr_handler *handler = (struct intr_handler *)cookie; 423 struct intr_event *ie; 424 #ifdef INVARIANTS 425 struct intr_handler *ih; 426 #endif 427 #ifdef notyet 428 int dead; 429 #endif 430 431 if (handler == NULL) 432 return (EINVAL); 433 ie = handler->ih_event; 434 KASSERT(ie != NULL, 435 ("interrupt handler \"%s\" has a NULL interrupt event", 436 handler->ih_name)); 437 mtx_lock(&ie->ie_lock); 438 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 439 ie->ie_name); 440 #ifdef INVARIANTS 441 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 442 if (ih == handler) 443 goto ok; 444 mtx_unlock(&ie->ie_lock); 445 panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 446 ih->ih_name, ie->ie_name); 447 ok: 448 #endif 449 /* 450 * If there is no ithread, then just remove the handler and return. 451 * XXX: Note that an INTR_FAST handler might be running on another 452 * CPU! 453 */ 454 if (ie->ie_thread == NULL) { 455 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 456 mtx_unlock(&ie->ie_lock); 457 free(handler, M_ITHREAD); 458 return (0); 459 } 460 461 /* 462 * If the interrupt thread is already running, then just mark this 463 * handler as being dead and let the ithread do the actual removal. 464 * 465 * During a cold boot while cold is set, msleep() does not sleep, 466 * so we have to remove the handler here rather than letting the 467 * thread do it. 468 */ 469 mtx_lock_spin(&sched_lock); 470 if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) { 471 handler->ih_flags |= IH_DEAD; 472 473 /* 474 * Ensure that the thread will process the handler list 475 * again and remove this handler if it has already passed 476 * it on the list. 477 */ 478 ie->ie_thread->it_need = 1; 479 } else 480 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 481 mtx_unlock_spin(&sched_lock); 482 while (handler->ih_flags & IH_DEAD) 483 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 484 intr_event_update(ie); 485 #ifdef notyet 486 /* 487 * XXX: This could be bad in the case of ppbus(8). Also, I think 488 * this could lead to races of stale data when servicing an 489 * interrupt. 490 */ 491 dead = 1; 492 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 493 if (!(ih->ih_flags & IH_FAST)) { 494 dead = 0; 495 break; 496 } 497 } 498 if (dead) { 499 ithread_destroy(ie->ie_thread); 500 ie->ie_thread = NULL; 501 } 502 #endif 503 mtx_unlock(&ie->ie_lock); 504 free(handler, M_ITHREAD); 505 return (0); 506 } 507 508 int 509 intr_event_schedule_thread(struct intr_event *ie) 510 { 511 struct intr_entropy entropy; 512 struct intr_thread *it; 513 struct thread *td; 514 struct thread *ctd; 515 struct proc *p; 516 517 /* 518 * If no ithread or no handlers, then we have a stray interrupt. 519 */ 520 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || 521 ie->ie_thread == NULL) 522 return (EINVAL); 523 524 ctd = curthread; 525 it = ie->ie_thread; 526 td = it->it_thread; 527 p = td->td_proc; 528 529 /* 530 * If any of the handlers for this ithread claim to be good 531 * sources of entropy, then gather some. 532 */ 533 if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 534 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 535 p->p_pid, p->p_comm); 536 entropy.event = (uintptr_t)ie; 537 entropy.td = ctd; 538 random_harvest(&entropy, sizeof(entropy), 2, 0, 539 RANDOM_INTERRUPT); 540 } 541 542 KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 543 544 /* 545 * Set it_need to tell the thread to keep running if it is already 546 * running. Then, grab sched_lock and see if we actually need to 547 * put this thread on the runqueue. 548 */ 549 it->it_need = 1; 550 mtx_lock_spin(&sched_lock); 551 if (TD_AWAITING_INTR(td)) { 552 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 553 p->p_comm); 554 TD_CLR_IWAIT(td); 555 setrunqueue(td, SRQ_INTR); 556 } else { 557 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 558 __func__, p->p_pid, p->p_comm, it->it_need, td->td_state); 559 } 560 mtx_unlock_spin(&sched_lock); 561 562 return (0); 563 } 564 565 /* 566 * Add a software interrupt handler to a specified event. If a given event 567 * is not specified, then a new event is created. 568 */ 569 int 570 swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, 571 void *arg, int pri, enum intr_type flags, void **cookiep) 572 { 573 struct intr_event *ie; 574 int error; 575 576 if (flags & (INTR_FAST | INTR_ENTROPY)) 577 return (EINVAL); 578 579 ie = (eventp != NULL) ? *eventp : NULL; 580 581 if (ie != NULL) { 582 if (!(ie->ie_flags & IE_SOFT)) 583 return (EINVAL); 584 } else { 585 error = intr_event_create(&ie, NULL, IE_SOFT, NULL, 586 "swi%d:", pri); 587 if (error) 588 return (error); 589 if (eventp != NULL) 590 *eventp = ie; 591 } 592 return (intr_event_add_handler(ie, name, handler, arg, 593 (pri * RQ_PPQ) + PI_SOFT, flags, cookiep)); 594 /* XXKSE.. think of a better way to get separate queues */ 595 } 596 597 /* 598 * Schedule a software interrupt thread. 599 */ 600 void 601 swi_sched(void *cookie, int flags) 602 { 603 struct intr_handler *ih = (struct intr_handler *)cookie; 604 struct intr_event *ie = ih->ih_event; 605 int error; 606 607 CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, 608 ih->ih_need); 609 610 /* 611 * Set ih_need for this handler so that if the ithread is already 612 * running it will execute this handler on the next pass. Otherwise, 613 * it will execute it the next time it runs. 614 */ 615 atomic_store_rel_int(&ih->ih_need, 1); 616 617 if (!(flags & SWI_DELAY)) { 618 PCPU_LAZY_INC(cnt.v_soft); 619 error = intr_event_schedule_thread(ie); 620 KASSERT(error == 0, ("stray software interrupt")); 621 } 622 } 623 624 /* 625 * Remove a software interrupt handler. Currently this code does not 626 * remove the associated interrupt event if it becomes empty. Calling code 627 * may do so manually via intr_event_destroy(), but that's not really 628 * an optimal interface. 629 */ 630 int 631 swi_remove(void *cookie) 632 { 633 634 return (intr_event_remove_handler(cookie)); 635 } 636 637 static void 638 ithread_execute_handlers(struct proc *p, struct intr_event *ie) 639 { 640 struct intr_handler *ih, *ihn; 641 642 /* Interrupt handlers should not sleep. */ 643 if (!(ie->ie_flags & IE_SOFT)) 644 THREAD_NO_SLEEPING(); 645 TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) { 646 647 /* 648 * If this handler is marked for death, remove it from 649 * the list of handlers and wake up the sleeper. 650 */ 651 if (ih->ih_flags & IH_DEAD) { 652 mtx_lock(&ie->ie_lock); 653 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 654 ih->ih_flags &= ~IH_DEAD; 655 wakeup(ih); 656 mtx_unlock(&ie->ie_lock); 657 continue; 658 } 659 660 /* 661 * For software interrupt threads, we only execute 662 * handlers that have their need flag set. Hardware 663 * interrupt threads always invoke all of their handlers. 664 */ 665 if (ie->ie_flags & IE_SOFT) { 666 if (!ih->ih_need) 667 continue; 668 else 669 atomic_store_rel_int(&ih->ih_need, 0); 670 } 671 672 /* Fast handlers are handled in primary interrupt context. */ 673 if (ih->ih_flags & IH_FAST) 674 continue; 675 676 /* Execute this handler. */ 677 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 678 __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument, 679 ih->ih_name, ih->ih_flags); 680 681 if (!(ih->ih_flags & IH_MPSAFE)) 682 mtx_lock(&Giant); 683 ih->ih_handler(ih->ih_argument); 684 if (!(ih->ih_flags & IH_MPSAFE)) 685 mtx_unlock(&Giant); 686 } 687 if (!(ie->ie_flags & IE_SOFT)) 688 THREAD_SLEEPING_OK(); 689 690 /* 691 * Interrupt storm handling: 692 * 693 * If this interrupt source is currently storming, then throttle 694 * it to only fire the handler once per clock tick. 695 * 696 * If this interrupt source is not currently storming, but the 697 * number of back to back interrupts exceeds the storm threshold, 698 * then enter storming mode. 699 */ 700 if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold) { 701 if (ie->ie_warned == 0) { 702 printf( 703 "Interrupt storm detected on \"%s\"; throttling interrupt source\n", 704 ie->ie_name); 705 ie->ie_warned = 1; 706 } 707 tsleep(&ie->ie_count, 0, "istorm", 1); 708 } else 709 ie->ie_count++; 710 711 /* 712 * Now that all the handlers have had a chance to run, reenable 713 * the interrupt source. 714 */ 715 if (ie->ie_enable != NULL) 716 ie->ie_enable(ie->ie_source); 717 } 718 719 /* 720 * This is the main code for interrupt threads. 721 */ 722 static void 723 ithread_loop(void *arg) 724 { 725 struct intr_thread *ithd; 726 struct intr_event *ie; 727 struct thread *td; 728 struct proc *p; 729 730 td = curthread; 731 p = td->td_proc; 732 ithd = (struct intr_thread *)arg; 733 KASSERT(ithd->it_thread == td, 734 ("%s: ithread and proc linkage out of sync", __func__)); 735 ie = ithd->it_event; 736 ie->ie_count = 0; 737 738 /* 739 * As long as we have interrupts outstanding, go through the 740 * list of handlers, giving each one a go at it. 741 */ 742 for (;;) { 743 /* 744 * If we are an orphaned thread, then just die. 745 */ 746 if (ithd->it_flags & IT_DEAD) { 747 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 748 p->p_pid, p->p_comm); 749 free(ithd, M_ITHREAD); 750 kthread_exit(0); 751 } 752 753 /* 754 * Service interrupts. If another interrupt arrives while 755 * we are running, it will set it_need to note that we 756 * should make another pass. 757 */ 758 while (ithd->it_need) { 759 /* 760 * This might need a full read and write barrier 761 * to make sure that this write posts before any 762 * of the memory or device accesses in the 763 * handlers. 764 */ 765 atomic_store_rel_int(&ithd->it_need, 0); 766 ithread_execute_handlers(p, ie); 767 } 768 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 769 mtx_assert(&Giant, MA_NOTOWNED); 770 771 /* 772 * Processed all our interrupts. Now get the sched 773 * lock. This may take a while and it_need may get 774 * set again, so we have to check it again. 775 */ 776 mtx_lock_spin(&sched_lock); 777 if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { 778 TD_SET_IWAIT(td); 779 ie->ie_count = 0; 780 mi_switch(SW_VOL, NULL); 781 } 782 mtx_unlock_spin(&sched_lock); 783 } 784 } 785 786 #ifdef DDB 787 /* 788 * Dump details about an interrupt handler 789 */ 790 static void 791 db_dump_intrhand(struct intr_handler *ih) 792 { 793 int comma; 794 795 db_printf("\t%-10s ", ih->ih_name); 796 switch (ih->ih_pri) { 797 case PI_REALTIME: 798 db_printf("CLK "); 799 break; 800 case PI_AV: 801 db_printf("AV "); 802 break; 803 case PI_TTYHIGH: 804 case PI_TTYLOW: 805 db_printf("TTY "); 806 break; 807 case PI_TAPE: 808 db_printf("TAPE"); 809 break; 810 case PI_NET: 811 db_printf("NET "); 812 break; 813 case PI_DISK: 814 case PI_DISKLOW: 815 db_printf("DISK"); 816 break; 817 case PI_DULL: 818 db_printf("DULL"); 819 break; 820 default: 821 if (ih->ih_pri >= PI_SOFT) 822 db_printf("SWI "); 823 else 824 db_printf("%4u", ih->ih_pri); 825 break; 826 } 827 db_printf(" "); 828 db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC); 829 db_printf("(%p)", ih->ih_argument); 830 if (ih->ih_need || 831 (ih->ih_flags & (IH_FAST | IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD | 832 IH_MPSAFE)) != 0) { 833 db_printf(" {"); 834 comma = 0; 835 if (ih->ih_flags & IH_FAST) { 836 db_printf("FAST"); 837 comma = 1; 838 } 839 if (ih->ih_flags & IH_EXCLUSIVE) { 840 if (comma) 841 db_printf(", "); 842 db_printf("EXCL"); 843 comma = 1; 844 } 845 if (ih->ih_flags & IH_ENTROPY) { 846 if (comma) 847 db_printf(", "); 848 db_printf("ENTROPY"); 849 comma = 1; 850 } 851 if (ih->ih_flags & IH_DEAD) { 852 if (comma) 853 db_printf(", "); 854 db_printf("DEAD"); 855 comma = 1; 856 } 857 if (ih->ih_flags & IH_MPSAFE) { 858 if (comma) 859 db_printf(", "); 860 db_printf("MPSAFE"); 861 comma = 1; 862 } 863 if (ih->ih_need) { 864 if (comma) 865 db_printf(", "); 866 db_printf("NEED"); 867 } 868 db_printf("}"); 869 } 870 db_printf("\n"); 871 } 872 873 /* 874 * Dump details about a event. 875 */ 876 void 877 db_dump_intr_event(struct intr_event *ie, int handlers) 878 { 879 struct intr_handler *ih; 880 struct intr_thread *it; 881 int comma; 882 883 db_printf("%s ", ie->ie_fullname); 884 it = ie->ie_thread; 885 if (it != NULL) 886 db_printf("(pid %d)", it->it_thread->td_proc->p_pid); 887 else 888 db_printf("(no thread)"); 889 if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 || 890 (it != NULL && it->it_need)) { 891 db_printf(" {"); 892 comma = 0; 893 if (ie->ie_flags & IE_SOFT) { 894 db_printf("SOFT"); 895 comma = 1; 896 } 897 if (ie->ie_flags & IE_ENTROPY) { 898 if (comma) 899 db_printf(", "); 900 db_printf("ENTROPY"); 901 comma = 1; 902 } 903 if (ie->ie_flags & IE_ADDING_THREAD) { 904 if (comma) 905 db_printf(", "); 906 db_printf("ADDING_THREAD"); 907 comma = 1; 908 } 909 if (it != NULL && it->it_need) { 910 if (comma) 911 db_printf(", "); 912 db_printf("NEED"); 913 } 914 db_printf("}"); 915 } 916 db_printf("\n"); 917 918 if (handlers) 919 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 920 db_dump_intrhand(ih); 921 } 922 923 /* 924 * Dump data about interrupt handlers 925 */ 926 DB_SHOW_COMMAND(intr, db_show_intr) 927 { 928 struct intr_event *ie; 929 int all, verbose; 930 931 verbose = index(modif, 'v') != NULL; 932 all = index(modif, 'a') != NULL; 933 TAILQ_FOREACH(ie, &event_list, ie_list) { 934 if (!all && TAILQ_EMPTY(&ie->ie_handlers)) 935 continue; 936 db_dump_intr_event(ie, verbose); 937 if (db_pager_quit) 938 break; 939 } 940 } 941 #endif /* DDB */ 942 943 /* 944 * Start standard software interrupt threads 945 */ 946 static void 947 start_softintr(void *dummy) 948 { 949 struct proc *p; 950 951 if (swi_add(&clk_intr_event, "clock", softclock, NULL, SWI_CLOCK, 952 INTR_MPSAFE, &softclock_ih) || 953 swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih)) 954 panic("died while creating standard software ithreads"); 955 956 p = clk_intr_event->ie_thread->it_thread->td_proc; 957 PROC_LOCK(p); 958 p->p_flag |= P_NOLOAD; 959 PROC_UNLOCK(p); 960 } 961 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, NULL) 962 963 /* 964 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 965 * The data for this machine dependent, and the declarations are in machine 966 * dependent code. The layout of intrnames and intrcnt however is machine 967 * independent. 968 * 969 * We do not know the length of intrcnt and intrnames at compile time, so 970 * calculate things at run time. 971 */ 972 static int 973 sysctl_intrnames(SYSCTL_HANDLER_ARGS) 974 { 975 return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames, 976 req)); 977 } 978 979 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, 980 NULL, 0, sysctl_intrnames, "", "Interrupt Names"); 981 982 static int 983 sysctl_intrcnt(SYSCTL_HANDLER_ARGS) 984 { 985 return (sysctl_handle_opaque(oidp, intrcnt, 986 (char *)eintrcnt - (char *)intrcnt, req)); 987 } 988 989 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, 990 NULL, 0, sysctl_intrcnt, "", "Interrupt Counts"); 991 992 #ifdef DDB 993 /* 994 * DDB command to dump the interrupt statistics. 995 */ 996 DB_SHOW_COMMAND(intrcnt, db_show_intrcnt) 997 { 998 u_long *i; 999 char *cp; 1000 1001 cp = intrnames; 1002 for (i = intrcnt; i != eintrcnt && !db_pager_quit; i++) { 1003 if (*cp == '\0') 1004 break; 1005 if (*i != 0) 1006 db_printf("%s\t%lu\n", cp, *i); 1007 cp += strlen(cp) + 1; 1008 } 1009 } 1010 #endif 1011