1 /*- 2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_ddb.h" 31 32 #include <sys/param.h> 33 #include <sys/bus.h> 34 #include <sys/conf.h> 35 #include <sys/rtprio.h> 36 #include <sys/systm.h> 37 #include <sys/interrupt.h> 38 #include <sys/kernel.h> 39 #include <sys/kthread.h> 40 #include <sys/ktr.h> 41 #include <sys/limits.h> 42 #include <sys/lock.h> 43 #include <sys/malloc.h> 44 #include <sys/mutex.h> 45 #include <sys/proc.h> 46 #include <sys/random.h> 47 #include <sys/resourcevar.h> 48 #include <sys/sched.h> 49 #include <sys/sysctl.h> 50 #include <sys/unistd.h> 51 #include <sys/vmmeter.h> 52 #include <machine/atomic.h> 53 #include <machine/cpu.h> 54 #include <machine/md_var.h> 55 #include <machine/stdarg.h> 56 #ifdef DDB 57 #include <ddb/ddb.h> 58 #include <ddb/db_sym.h> 59 #endif 60 61 /* 62 * Describe an interrupt thread. There is one of these per interrupt event. 63 */ 64 struct intr_thread { 65 struct intr_event *it_event; 66 struct thread *it_thread; /* Kernel thread. */ 67 int it_flags; /* (j) IT_* flags. */ 68 int it_need; /* Needs service. */ 69 }; 70 71 /* Interrupt thread flags kept in it_flags */ 72 #define IT_DEAD 0x000001 /* Thread is waiting to exit. */ 73 74 struct intr_entropy { 75 struct thread *td; 76 uintptr_t event; 77 }; 78 79 struct intr_event *clk_intr_event; 80 struct intr_event *tty_intr_event; 81 void *softclock_ih; 82 void *vm_ih; 83 84 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); 85 86 static int intr_storm_threshold = 1000; 87 TUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold); 88 SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW, 89 &intr_storm_threshold, 0, 90 "Number of consecutive interrupts before storm protection is enabled"); 91 static TAILQ_HEAD(, intr_event) event_list = 92 TAILQ_HEAD_INITIALIZER(event_list); 93 94 static void intr_event_update(struct intr_event *ie); 95 #ifdef INTR_FILTER 96 static struct intr_thread *ithread_create(const char *name, 97 struct intr_handler *ih); 98 #else 99 static struct intr_thread *ithread_create(const char *name); 100 #endif 101 static void ithread_destroy(struct intr_thread *ithread); 102 static void ithread_execute_handlers(struct proc *p, 103 struct intr_event *ie); 104 #ifdef INTR_FILTER 105 static void priv_ithread_execute_handler(struct proc *p, 106 struct intr_handler *ih); 107 #endif 108 static void ithread_loop(void *); 109 static void ithread_update(struct intr_thread *ithd); 110 static void start_softintr(void *); 111 112 /* Map an interrupt type to an ithread priority. */ 113 u_char 114 intr_priority(enum intr_type flags) 115 { 116 u_char pri; 117 118 flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET | 119 INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV); 120 switch (flags) { 121 case INTR_TYPE_TTY: 122 pri = PI_TTYLOW; 123 break; 124 case INTR_TYPE_BIO: 125 /* 126 * XXX We need to refine this. BSD/OS distinguishes 127 * between tape and disk priorities. 128 */ 129 pri = PI_DISK; 130 break; 131 case INTR_TYPE_NET: 132 pri = PI_NET; 133 break; 134 case INTR_TYPE_CAM: 135 pri = PI_DISK; /* XXX or PI_CAM? */ 136 break; 137 case INTR_TYPE_AV: /* Audio/video */ 138 pri = PI_AV; 139 break; 140 case INTR_TYPE_CLK: 141 pri = PI_REALTIME; 142 break; 143 case INTR_TYPE_MISC: 144 pri = PI_DULL; /* don't care */ 145 break; 146 default: 147 /* We didn't specify an interrupt level. */ 148 panic("intr_priority: no interrupt type in flags"); 149 } 150 151 return pri; 152 } 153 154 /* 155 * Update an ithread based on the associated intr_event. 156 */ 157 static void 158 ithread_update(struct intr_thread *ithd) 159 { 160 struct intr_event *ie; 161 struct thread *td; 162 u_char pri; 163 164 ie = ithd->it_event; 165 td = ithd->it_thread; 166 167 /* Determine the overall priority of this event. */ 168 if (TAILQ_EMPTY(&ie->ie_handlers)) 169 pri = PRI_MAX_ITHD; 170 else 171 pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri; 172 173 /* Update name and priority. */ 174 strlcpy(td->td_proc->p_comm, ie->ie_fullname, 175 sizeof(td->td_proc->p_comm)); 176 thread_lock(td); 177 sched_prio(td, pri); 178 thread_unlock(td); 179 } 180 181 /* 182 * Regenerate the full name of an interrupt event and update its priority. 183 */ 184 static void 185 intr_event_update(struct intr_event *ie) 186 { 187 struct intr_handler *ih; 188 char *last; 189 int missed, space; 190 191 /* Start off with no entropy and just the name of the event. */ 192 mtx_assert(&ie->ie_lock, MA_OWNED); 193 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 194 ie->ie_flags &= ~IE_ENTROPY; 195 missed = 0; 196 space = 1; 197 198 /* Run through all the handlers updating values. */ 199 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 200 if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < 201 sizeof(ie->ie_fullname)) { 202 strcat(ie->ie_fullname, " "); 203 strcat(ie->ie_fullname, ih->ih_name); 204 space = 0; 205 } else 206 missed++; 207 if (ih->ih_flags & IH_ENTROPY) 208 ie->ie_flags |= IE_ENTROPY; 209 } 210 211 /* 212 * If the handler names were too long, add +'s to indicate missing 213 * names. If we run out of room and still have +'s to add, change 214 * the last character from a + to a *. 215 */ 216 last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2]; 217 while (missed-- > 0) { 218 if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) { 219 if (*last == '+') { 220 *last = '*'; 221 break; 222 } else 223 *last = '+'; 224 } else if (space) { 225 strcat(ie->ie_fullname, " +"); 226 space = 0; 227 } else 228 strcat(ie->ie_fullname, "+"); 229 } 230 231 /* 232 * If this event has an ithread, update it's priority and 233 * name. 234 */ 235 if (ie->ie_thread != NULL) 236 ithread_update(ie->ie_thread); 237 CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname); 238 } 239 240 #ifndef INTR_FILTER 241 int 242 intr_event_create(struct intr_event **event, void *source, int flags, 243 void (*enable)(void *), const char *fmt, ...) 244 { 245 struct intr_event *ie; 246 va_list ap; 247 248 /* The only valid flag during creation is IE_SOFT. */ 249 if ((flags & ~IE_SOFT) != 0) 250 return (EINVAL); 251 ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 252 ie->ie_source = source; 253 ie->ie_enable = enable; 254 ie->ie_flags = flags; 255 TAILQ_INIT(&ie->ie_handlers); 256 mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 257 258 va_start(ap, fmt); 259 vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 260 va_end(ap); 261 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 262 mtx_pool_lock(mtxpool_sleep, &event_list); 263 TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 264 mtx_pool_unlock(mtxpool_sleep, &event_list); 265 if (event != NULL) 266 *event = ie; 267 CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 268 return (0); 269 } 270 #else 271 int 272 intr_event_create(struct intr_event **event, void *source, int flags, 273 void (*enable)(void *), void (*eoi)(void *), void (*disab)(void *), 274 const char *fmt, ...) 275 { 276 struct intr_event *ie; 277 va_list ap; 278 279 /* The only valid flag during creation is IE_SOFT. */ 280 if ((flags & ~IE_SOFT) != 0) 281 return (EINVAL); 282 ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 283 ie->ie_source = source; 284 ie->ie_enable = enable; 285 ie->ie_eoi = eoi; 286 ie->ie_disab = disab; 287 ie->ie_flags = flags; 288 TAILQ_INIT(&ie->ie_handlers); 289 mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 290 291 va_start(ap, fmt); 292 vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 293 va_end(ap); 294 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 295 mtx_pool_lock(mtxpool_sleep, &event_list); 296 TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 297 mtx_pool_unlock(mtxpool_sleep, &event_list); 298 if (event != NULL) 299 *event = ie; 300 CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 301 return (0); 302 } 303 #endif 304 305 int 306 intr_event_destroy(struct intr_event *ie) 307 { 308 309 mtx_lock(&ie->ie_lock); 310 if (!TAILQ_EMPTY(&ie->ie_handlers)) { 311 mtx_unlock(&ie->ie_lock); 312 return (EBUSY); 313 } 314 mtx_pool_lock(mtxpool_sleep, &event_list); 315 TAILQ_REMOVE(&event_list, ie, ie_list); 316 mtx_pool_unlock(mtxpool_sleep, &event_list); 317 #ifndef notyet 318 if (ie->ie_thread != NULL) { 319 ithread_destroy(ie->ie_thread); 320 ie->ie_thread = NULL; 321 } 322 #endif 323 mtx_unlock(&ie->ie_lock); 324 mtx_destroy(&ie->ie_lock); 325 free(ie, M_ITHREAD); 326 return (0); 327 } 328 329 #ifndef INTR_FILTER 330 static struct intr_thread * 331 ithread_create(const char *name) 332 { 333 struct intr_thread *ithd; 334 struct thread *td; 335 struct proc *p; 336 int error; 337 338 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 339 340 error = kthread_create(ithread_loop, ithd, &p, RFSTOPPED | RFHIGHPID, 341 0, "%s", name); 342 if (error) 343 panic("kthread_create() failed with %d", error); 344 td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */ 345 thread_lock(td); 346 sched_class(td, PRI_ITHD); 347 TD_SET_IWAIT(td); 348 thread_unlock(td); 349 td->td_pflags |= TDP_ITHREAD; 350 ithd->it_thread = td; 351 CTR2(KTR_INTR, "%s: created %s", __func__, name); 352 return (ithd); 353 } 354 #else 355 static struct intr_thread * 356 ithread_create(const char *name, struct intr_handler *ih) 357 { 358 struct intr_thread *ithd; 359 struct thread *td; 360 struct proc *p; 361 int error; 362 363 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 364 365 error = kthread_create(ithread_loop, ih, &p, RFSTOPPED | RFHIGHPID, 366 0, "%s", name); 367 if (error) 368 panic("kthread_create() failed with %d", error); 369 td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */ 370 thread_lock(td); 371 sched_class(td, PRI_ITHD); 372 TD_SET_IWAIT(td); 373 thread_unlock(td); 374 td->td_pflags |= TDP_ITHREAD; 375 ithd->it_thread = td; 376 CTR2(KTR_INTR, "%s: created %s", __func__, name); 377 return (ithd); 378 } 379 #endif 380 381 static void 382 ithread_destroy(struct intr_thread *ithread) 383 { 384 struct thread *td; 385 386 CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name); 387 td = ithread->it_thread; 388 thread_lock(td); 389 ithread->it_flags |= IT_DEAD; 390 if (TD_AWAITING_INTR(td)) { 391 TD_CLR_IWAIT(td); 392 sched_add(td, SRQ_INTR); 393 } 394 thread_unlock(td); 395 } 396 397 #ifndef INTR_FILTER 398 int 399 intr_event_add_handler(struct intr_event *ie, const char *name, 400 driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 401 enum intr_type flags, void **cookiep) 402 { 403 struct intr_handler *ih, *temp_ih; 404 struct intr_thread *it; 405 406 if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 407 return (EINVAL); 408 409 /* Allocate and populate an interrupt handler structure. */ 410 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 411 ih->ih_filter = filter; 412 ih->ih_handler = handler; 413 ih->ih_argument = arg; 414 ih->ih_name = name; 415 ih->ih_event = ie; 416 ih->ih_pri = pri; 417 if (flags & INTR_EXCL) 418 ih->ih_flags = IH_EXCLUSIVE; 419 if (flags & INTR_MPSAFE) 420 ih->ih_flags |= IH_MPSAFE; 421 if (flags & INTR_ENTROPY) 422 ih->ih_flags |= IH_ENTROPY; 423 424 /* We can only have one exclusive handler in a event. */ 425 mtx_lock(&ie->ie_lock); 426 if (!TAILQ_EMPTY(&ie->ie_handlers)) { 427 if ((flags & INTR_EXCL) || 428 (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 429 mtx_unlock(&ie->ie_lock); 430 free(ih, M_ITHREAD); 431 return (EINVAL); 432 } 433 } 434 435 /* Add the new handler to the event in priority order. */ 436 TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 437 if (temp_ih->ih_pri > ih->ih_pri) 438 break; 439 } 440 if (temp_ih == NULL) 441 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 442 else 443 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 444 intr_event_update(ie); 445 446 /* Create a thread if we need one. */ 447 while (ie->ie_thread == NULL && handler != NULL) { 448 if (ie->ie_flags & IE_ADDING_THREAD) 449 msleep(ie, &ie->ie_lock, 0, "ithread", 0); 450 else { 451 ie->ie_flags |= IE_ADDING_THREAD; 452 mtx_unlock(&ie->ie_lock); 453 it = ithread_create("intr: newborn"); 454 mtx_lock(&ie->ie_lock); 455 ie->ie_flags &= ~IE_ADDING_THREAD; 456 ie->ie_thread = it; 457 it->it_event = ie; 458 ithread_update(it); 459 wakeup(ie); 460 } 461 } 462 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 463 ie->ie_name); 464 mtx_unlock(&ie->ie_lock); 465 466 if (cookiep != NULL) 467 *cookiep = ih; 468 return (0); 469 } 470 #else 471 int 472 intr_event_add_handler(struct intr_event *ie, const char *name, 473 driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 474 enum intr_type flags, void **cookiep) 475 { 476 struct intr_handler *ih, *temp_ih; 477 struct intr_thread *it; 478 479 if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 480 return (EINVAL); 481 482 /* Allocate and populate an interrupt handler structure. */ 483 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 484 ih->ih_filter = filter; 485 ih->ih_handler = handler; 486 ih->ih_argument = arg; 487 ih->ih_name = name; 488 ih->ih_event = ie; 489 ih->ih_pri = pri; 490 if (flags & INTR_EXCL) 491 ih->ih_flags = IH_EXCLUSIVE; 492 if (flags & INTR_MPSAFE) 493 ih->ih_flags |= IH_MPSAFE; 494 if (flags & INTR_ENTROPY) 495 ih->ih_flags |= IH_ENTROPY; 496 497 /* We can only have one exclusive handler in a event. */ 498 mtx_lock(&ie->ie_lock); 499 if (!TAILQ_EMPTY(&ie->ie_handlers)) { 500 if ((flags & INTR_EXCL) || 501 (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 502 mtx_unlock(&ie->ie_lock); 503 free(ih, M_ITHREAD); 504 return (EINVAL); 505 } 506 } 507 508 /* Add the new handler to the event in priority order. */ 509 TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 510 if (temp_ih->ih_pri > ih->ih_pri) 511 break; 512 } 513 if (temp_ih == NULL) 514 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 515 else 516 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 517 intr_event_update(ie); 518 519 /* For filtered handlers, create a private ithread to run on. */ 520 if (filter != NULL && handler != NULL) { 521 mtx_unlock(&ie->ie_lock); 522 it = ithread_create("intr: newborn", ih); 523 mtx_lock(&ie->ie_lock); 524 it->it_event = ie; 525 ih->ih_thread = it; 526 ithread_update(it); // XXX - do we really need this?!?!? 527 } else { /* Create the global per-event thread if we need one. */ 528 while (ie->ie_thread == NULL && handler != NULL) { 529 if (ie->ie_flags & IE_ADDING_THREAD) 530 msleep(ie, &ie->ie_lock, 0, "ithread", 0); 531 else { 532 ie->ie_flags |= IE_ADDING_THREAD; 533 mtx_unlock(&ie->ie_lock); 534 it = ithread_create("intr: newborn", ih); 535 mtx_lock(&ie->ie_lock); 536 ie->ie_flags &= ~IE_ADDING_THREAD; 537 ie->ie_thread = it; 538 it->it_event = ie; 539 ithread_update(it); 540 wakeup(ie); 541 } 542 } 543 } 544 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 545 ie->ie_name); 546 mtx_unlock(&ie->ie_lock); 547 548 if (cookiep != NULL) 549 *cookiep = ih; 550 return (0); 551 } 552 #endif 553 554 /* 555 * Return the ie_source field from the intr_event an intr_handler is 556 * associated with. 557 */ 558 void * 559 intr_handler_source(void *cookie) 560 { 561 struct intr_handler *ih; 562 struct intr_event *ie; 563 564 ih = (struct intr_handler *)cookie; 565 if (ih == NULL) 566 return (NULL); 567 ie = ih->ih_event; 568 KASSERT(ie != NULL, 569 ("interrupt handler \"%s\" has a NULL interrupt event", 570 ih->ih_name)); 571 return (ie->ie_source); 572 } 573 574 #ifndef INTR_FILTER 575 int 576 intr_event_remove_handler(void *cookie) 577 { 578 struct intr_handler *handler = (struct intr_handler *)cookie; 579 struct intr_event *ie; 580 #ifdef INVARIANTS 581 struct intr_handler *ih; 582 #endif 583 #ifdef notyet 584 int dead; 585 #endif 586 587 if (handler == NULL) 588 return (EINVAL); 589 ie = handler->ih_event; 590 KASSERT(ie != NULL, 591 ("interrupt handler \"%s\" has a NULL interrupt event", 592 handler->ih_name)); 593 mtx_lock(&ie->ie_lock); 594 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 595 ie->ie_name); 596 #ifdef INVARIANTS 597 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 598 if (ih == handler) 599 goto ok; 600 mtx_unlock(&ie->ie_lock); 601 panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 602 ih->ih_name, ie->ie_name); 603 ok: 604 #endif 605 /* 606 * If there is no ithread, then just remove the handler and return. 607 * XXX: Note that an INTR_FAST handler might be running on another 608 * CPU! 609 */ 610 if (ie->ie_thread == NULL) { 611 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 612 mtx_unlock(&ie->ie_lock); 613 free(handler, M_ITHREAD); 614 return (0); 615 } 616 617 /* 618 * If the interrupt thread is already running, then just mark this 619 * handler as being dead and let the ithread do the actual removal. 620 * 621 * During a cold boot while cold is set, msleep() does not sleep, 622 * so we have to remove the handler here rather than letting the 623 * thread do it. 624 */ 625 thread_lock(ie->ie_thread->it_thread); 626 if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) { 627 handler->ih_flags |= IH_DEAD; 628 629 /* 630 * Ensure that the thread will process the handler list 631 * again and remove this handler if it has already passed 632 * it on the list. 633 */ 634 ie->ie_thread->it_need = 1; 635 } else 636 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 637 thread_unlock(ie->ie_thread->it_thread); 638 while (handler->ih_flags & IH_DEAD) 639 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 640 intr_event_update(ie); 641 #ifdef notyet 642 /* 643 * XXX: This could be bad in the case of ppbus(8). Also, I think 644 * this could lead to races of stale data when servicing an 645 * interrupt. 646 */ 647 dead = 1; 648 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 649 if (!(ih->ih_flags & IH_FAST)) { 650 dead = 0; 651 break; 652 } 653 } 654 if (dead) { 655 ithread_destroy(ie->ie_thread); 656 ie->ie_thread = NULL; 657 } 658 #endif 659 mtx_unlock(&ie->ie_lock); 660 free(handler, M_ITHREAD); 661 return (0); 662 } 663 664 int 665 intr_event_schedule_thread(struct intr_event *ie) 666 { 667 struct intr_entropy entropy; 668 struct intr_thread *it; 669 struct thread *td; 670 struct thread *ctd; 671 struct proc *p; 672 673 /* 674 * If no ithread or no handlers, then we have a stray interrupt. 675 */ 676 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || 677 ie->ie_thread == NULL) 678 return (EINVAL); 679 680 ctd = curthread; 681 it = ie->ie_thread; 682 td = it->it_thread; 683 p = td->td_proc; 684 685 /* 686 * If any of the handlers for this ithread claim to be good 687 * sources of entropy, then gather some. 688 */ 689 if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 690 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 691 p->p_pid, p->p_comm); 692 entropy.event = (uintptr_t)ie; 693 entropy.td = ctd; 694 random_harvest(&entropy, sizeof(entropy), 2, 0, 695 RANDOM_INTERRUPT); 696 } 697 698 KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 699 700 /* 701 * Set it_need to tell the thread to keep running if it is already 702 * running. Then, lock the thread and see if we actually need to 703 * put it on the runqueue. 704 */ 705 it->it_need = 1; 706 thread_lock(td); 707 if (TD_AWAITING_INTR(td)) { 708 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 709 p->p_comm); 710 TD_CLR_IWAIT(td); 711 sched_add(td, SRQ_INTR); 712 } else { 713 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 714 __func__, p->p_pid, p->p_comm, it->it_need, td->td_state); 715 } 716 thread_unlock(td); 717 718 return (0); 719 } 720 #else 721 int 722 intr_event_remove_handler(void *cookie) 723 { 724 struct intr_handler *handler = (struct intr_handler *)cookie; 725 struct intr_event *ie; 726 struct intr_thread *it; 727 #ifdef INVARIANTS 728 struct intr_handler *ih; 729 #endif 730 #ifdef notyet 731 int dead; 732 #endif 733 734 if (handler == NULL) 735 return (EINVAL); 736 ie = handler->ih_event; 737 KASSERT(ie != NULL, 738 ("interrupt handler \"%s\" has a NULL interrupt event", 739 handler->ih_name)); 740 mtx_lock(&ie->ie_lock); 741 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 742 ie->ie_name); 743 #ifdef INVARIANTS 744 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 745 if (ih == handler) 746 goto ok; 747 mtx_unlock(&ie->ie_lock); 748 panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 749 ih->ih_name, ie->ie_name); 750 ok: 751 #endif 752 /* 753 * If there are no ithreads (per event and per handler), then 754 * just remove the handler and return. 755 * XXX: Note that an INTR_FAST handler might be running on another CPU! 756 */ 757 if (ie->ie_thread == NULL && handler->ih_thread == NULL) { 758 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 759 mtx_unlock(&ie->ie_lock); 760 free(handler, M_ITHREAD); 761 return (0); 762 } 763 764 /* Private or global ithread? */ 765 it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread; 766 /* 767 * If the interrupt thread is already running, then just mark this 768 * handler as being dead and let the ithread do the actual removal. 769 * 770 * During a cold boot while cold is set, msleep() does not sleep, 771 * so we have to remove the handler here rather than letting the 772 * thread do it. 773 */ 774 thread_lock(it->it_thread); 775 if (!TD_AWAITING_INTR(it->it_thread) && !cold) { 776 handler->ih_flags |= IH_DEAD; 777 778 /* 779 * Ensure that the thread will process the handler list 780 * again and remove this handler if it has already passed 781 * it on the list. 782 */ 783 it->it_need = 1; 784 } else 785 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 786 thread_unlock(it->it_thread); 787 while (handler->ih_flags & IH_DEAD) 788 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 789 /* 790 * At this point, the handler has been disconnected from the event, 791 * so we can kill the private ithread if any. 792 */ 793 if (handler->ih_thread) { 794 ithread_destroy(handler->ih_thread); 795 handler->ih_thread = NULL; 796 } 797 intr_event_update(ie); 798 #ifdef notyet 799 /* 800 * XXX: This could be bad in the case of ppbus(8). Also, I think 801 * this could lead to races of stale data when servicing an 802 * interrupt. 803 */ 804 dead = 1; 805 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 806 if (handler != NULL) { 807 dead = 0; 808 break; 809 } 810 } 811 if (dead) { 812 ithread_destroy(ie->ie_thread); 813 ie->ie_thread = NULL; 814 } 815 #endif 816 mtx_unlock(&ie->ie_lock); 817 free(handler, M_ITHREAD); 818 return (0); 819 } 820 821 int 822 intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it) 823 { 824 struct intr_entropy entropy; 825 struct thread *td; 826 struct thread *ctd; 827 struct proc *p; 828 829 /* 830 * If no ithread or no handlers, then we have a stray interrupt. 831 */ 832 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL) 833 return (EINVAL); 834 835 ctd = curthread; 836 td = it->it_thread; 837 p = td->td_proc; 838 839 /* 840 * If any of the handlers for this ithread claim to be good 841 * sources of entropy, then gather some. 842 */ 843 if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 844 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 845 p->p_pid, p->p_comm); 846 entropy.event = (uintptr_t)ie; 847 entropy.td = ctd; 848 random_harvest(&entropy, sizeof(entropy), 2, 0, 849 RANDOM_INTERRUPT); 850 } 851 852 KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 853 854 /* 855 * Set it_need to tell the thread to keep running if it is already 856 * running. Then, lock the thread and see if we actually need to 857 * put it on the runqueue. 858 */ 859 it->it_need = 1; 860 thread_lock(td); 861 if (TD_AWAITING_INTR(td)) { 862 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 863 p->p_comm); 864 TD_CLR_IWAIT(td); 865 sched_add(td, SRQ_INTR); 866 } else { 867 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 868 __func__, p->p_pid, p->p_comm, it->it_need, td->td_state); 869 } 870 thread_unlock(td); 871 872 return (0); 873 } 874 #endif 875 876 /* 877 * Add a software interrupt handler to a specified event. If a given event 878 * is not specified, then a new event is created. 879 */ 880 int 881 swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, 882 void *arg, int pri, enum intr_type flags, void **cookiep) 883 { 884 struct intr_event *ie; 885 int error; 886 887 if (flags & INTR_ENTROPY) 888 return (EINVAL); 889 890 ie = (eventp != NULL) ? *eventp : NULL; 891 892 if (ie != NULL) { 893 if (!(ie->ie_flags & IE_SOFT)) 894 return (EINVAL); 895 } else { 896 #ifdef INTR_FILTER 897 error = intr_event_create(&ie, NULL, IE_SOFT, 898 NULL, NULL, NULL, "swi%d:", pri); 899 #else 900 error = intr_event_create(&ie, NULL, IE_SOFT, 901 NULL, "swi%d:", pri); 902 #endif 903 if (error) 904 return (error); 905 if (eventp != NULL) 906 *eventp = ie; 907 } 908 return (intr_event_add_handler(ie, name, NULL, handler, arg, 909 (pri * RQ_PPQ) + PI_SOFT, flags, cookiep)); 910 /* XXKSE.. think of a better way to get separate queues */ 911 } 912 913 /* 914 * Schedule a software interrupt thread. 915 */ 916 void 917 swi_sched(void *cookie, int flags) 918 { 919 struct intr_handler *ih = (struct intr_handler *)cookie; 920 struct intr_event *ie = ih->ih_event; 921 int error; 922 923 CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, 924 ih->ih_need); 925 926 /* 927 * Set ih_need for this handler so that if the ithread is already 928 * running it will execute this handler on the next pass. Otherwise, 929 * it will execute it the next time it runs. 930 */ 931 atomic_store_rel_int(&ih->ih_need, 1); 932 933 if (!(flags & SWI_DELAY)) { 934 PCPU_INC(cnt.v_soft); 935 #ifdef INTR_FILTER 936 error = intr_event_schedule_thread(ie, ie->ie_thread); 937 #else 938 error = intr_event_schedule_thread(ie); 939 #endif 940 KASSERT(error == 0, ("stray software interrupt")); 941 } 942 } 943 944 /* 945 * Remove a software interrupt handler. Currently this code does not 946 * remove the associated interrupt event if it becomes empty. Calling code 947 * may do so manually via intr_event_destroy(), but that's not really 948 * an optimal interface. 949 */ 950 int 951 swi_remove(void *cookie) 952 { 953 954 return (intr_event_remove_handler(cookie)); 955 } 956 957 #ifdef INTR_FILTER 958 static void 959 priv_ithread_execute_handler(struct proc *p, struct intr_handler *ih) 960 { 961 struct intr_event *ie; 962 963 ie = ih->ih_event; 964 /* 965 * If this handler is marked for death, remove it from 966 * the list of handlers and wake up the sleeper. 967 */ 968 if (ih->ih_flags & IH_DEAD) { 969 mtx_lock(&ie->ie_lock); 970 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 971 ih->ih_flags &= ~IH_DEAD; 972 wakeup(ih); 973 mtx_unlock(&ie->ie_lock); 974 return; 975 } 976 977 /* Execute this handler. */ 978 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 979 __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument, 980 ih->ih_name, ih->ih_flags); 981 982 if (!(ih->ih_flags & IH_MPSAFE)) 983 mtx_lock(&Giant); 984 ih->ih_handler(ih->ih_argument); 985 if (!(ih->ih_flags & IH_MPSAFE)) 986 mtx_unlock(&Giant); 987 } 988 #endif 989 990 static void 991 ithread_execute_handlers(struct proc *p, struct intr_event *ie) 992 { 993 struct intr_handler *ih, *ihn; 994 995 /* Interrupt handlers should not sleep. */ 996 if (!(ie->ie_flags & IE_SOFT)) 997 THREAD_NO_SLEEPING(); 998 TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) { 999 1000 /* 1001 * If this handler is marked for death, remove it from 1002 * the list of handlers and wake up the sleeper. 1003 */ 1004 if (ih->ih_flags & IH_DEAD) { 1005 mtx_lock(&ie->ie_lock); 1006 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 1007 ih->ih_flags &= ~IH_DEAD; 1008 wakeup(ih); 1009 mtx_unlock(&ie->ie_lock); 1010 continue; 1011 } 1012 1013 /* Skip filter only handlers */ 1014 if (ih->ih_handler == NULL) 1015 continue; 1016 1017 /* 1018 * For software interrupt threads, we only execute 1019 * handlers that have their need flag set. Hardware 1020 * interrupt threads always invoke all of their handlers. 1021 */ 1022 if (ie->ie_flags & IE_SOFT) { 1023 if (!ih->ih_need) 1024 continue; 1025 else 1026 atomic_store_rel_int(&ih->ih_need, 0); 1027 } 1028 1029 /* Execute this handler. */ 1030 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1031 __func__, p->p_pid, (void *)ih->ih_handler, 1032 ih->ih_argument, ih->ih_name, ih->ih_flags); 1033 1034 if (!(ih->ih_flags & IH_MPSAFE)) 1035 mtx_lock(&Giant); 1036 ih->ih_handler(ih->ih_argument); 1037 if (!(ih->ih_flags & IH_MPSAFE)) 1038 mtx_unlock(&Giant); 1039 } 1040 if (!(ie->ie_flags & IE_SOFT)) 1041 THREAD_SLEEPING_OK(); 1042 1043 /* 1044 * Interrupt storm handling: 1045 * 1046 * If this interrupt source is currently storming, then throttle 1047 * it to only fire the handler once per clock tick. 1048 * 1049 * If this interrupt source is not currently storming, but the 1050 * number of back to back interrupts exceeds the storm threshold, 1051 * then enter storming mode. 1052 */ 1053 if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold && 1054 !(ie->ie_flags & IE_SOFT)) { 1055 /* Report the message only once every second. */ 1056 if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) { 1057 printf( 1058 "interrupt storm detected on \"%s\"; throttling interrupt source\n", 1059 ie->ie_name); 1060 } 1061 pause("istorm", 1); 1062 } else 1063 ie->ie_count++; 1064 1065 /* 1066 * Now that all the handlers have had a chance to run, reenable 1067 * the interrupt source. 1068 */ 1069 if (ie->ie_enable != NULL) 1070 ie->ie_enable(ie->ie_source); 1071 } 1072 1073 #ifndef INTR_FILTER 1074 /* 1075 * This is the main code for interrupt threads. 1076 */ 1077 static void 1078 ithread_loop(void *arg) 1079 { 1080 struct intr_thread *ithd; 1081 struct intr_event *ie; 1082 struct thread *td; 1083 struct proc *p; 1084 1085 td = curthread; 1086 p = td->td_proc; 1087 ithd = (struct intr_thread *)arg; 1088 KASSERT(ithd->it_thread == td, 1089 ("%s: ithread and proc linkage out of sync", __func__)); 1090 ie = ithd->it_event; 1091 ie->ie_count = 0; 1092 1093 /* 1094 * As long as we have interrupts outstanding, go through the 1095 * list of handlers, giving each one a go at it. 1096 */ 1097 for (;;) { 1098 /* 1099 * If we are an orphaned thread, then just die. 1100 */ 1101 if (ithd->it_flags & IT_DEAD) { 1102 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 1103 p->p_pid, p->p_comm); 1104 free(ithd, M_ITHREAD); 1105 kthread_exit(0); 1106 } 1107 1108 /* 1109 * Service interrupts. If another interrupt arrives while 1110 * we are running, it will set it_need to note that we 1111 * should make another pass. 1112 */ 1113 while (ithd->it_need) { 1114 /* 1115 * This might need a full read and write barrier 1116 * to make sure that this write posts before any 1117 * of the memory or device accesses in the 1118 * handlers. 1119 */ 1120 atomic_store_rel_int(&ithd->it_need, 0); 1121 ithread_execute_handlers(p, ie); 1122 } 1123 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1124 mtx_assert(&Giant, MA_NOTOWNED); 1125 1126 /* 1127 * Processed all our interrupts. Now get the sched 1128 * lock. This may take a while and it_need may get 1129 * set again, so we have to check it again. 1130 */ 1131 thread_lock(td); 1132 if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { 1133 TD_SET_IWAIT(td); 1134 ie->ie_count = 0; 1135 mi_switch(SW_VOL, NULL); 1136 } 1137 thread_unlock(td); 1138 } 1139 } 1140 #else 1141 /* 1142 * This is the main code for interrupt threads. 1143 */ 1144 static void 1145 ithread_loop(void *arg) 1146 { 1147 struct intr_thread *ithd; 1148 struct intr_handler *ih; 1149 struct intr_event *ie; 1150 struct thread *td; 1151 struct proc *p; 1152 int priv; 1153 1154 td = curthread; 1155 p = td->td_proc; 1156 ih = (struct intr_handler *)arg; 1157 priv = (ih->ih_thread != NULL) ? 1 : 0; 1158 ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread; 1159 KASSERT(ithd->it_thread == td, 1160 ("%s: ithread and proc linkage out of sync", __func__)); 1161 ie = ithd->it_event; 1162 ie->ie_count = 0; 1163 1164 /* 1165 * As long as we have interrupts outstanding, go through the 1166 * list of handlers, giving each one a go at it. 1167 */ 1168 for (;;) { 1169 /* 1170 * If we are an orphaned thread, then just die. 1171 */ 1172 if (ithd->it_flags & IT_DEAD) { 1173 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 1174 p->p_pid, p->p_comm); 1175 free(ithd, M_ITHREAD); 1176 kthread_exit(0); 1177 } 1178 1179 /* 1180 * Service interrupts. If another interrupt arrives while 1181 * we are running, it will set it_need to note that we 1182 * should make another pass. 1183 */ 1184 while (ithd->it_need) { 1185 /* 1186 * This might need a full read and write barrier 1187 * to make sure that this write posts before any 1188 * of the memory or device accesses in the 1189 * handlers. 1190 */ 1191 atomic_store_rel_int(&ithd->it_need, 0); 1192 if (priv) 1193 priv_ithread_execute_handler(p, ih); 1194 else 1195 ithread_execute_handlers(p, ie); 1196 } 1197 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1198 mtx_assert(&Giant, MA_NOTOWNED); 1199 1200 /* 1201 * Processed all our interrupts. Now get the sched 1202 * lock. This may take a while and it_need may get 1203 * set again, so we have to check it again. 1204 */ 1205 thread_lock(td); 1206 if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { 1207 TD_SET_IWAIT(td); 1208 ie->ie_count = 0; 1209 mi_switch(SW_VOL, NULL); 1210 } 1211 thread_unlock(td); 1212 } 1213 } 1214 1215 /* 1216 * Main loop for interrupt filter. 1217 * 1218 * Some architectures (i386, amd64 and arm) require the optional frame 1219 * parameter, and use it as the main argument for fast handler execution 1220 * when ih_argument == NULL. 1221 * 1222 * Return value: 1223 * o FILTER_STRAY: No filter recognized the event, and no 1224 * filter-less handler is registered on this 1225 * line. 1226 * o FILTER_HANDLED: A filter claimed the event and served it. 1227 * o FILTER_SCHEDULE_THREAD: No filter claimed the event, but there's at 1228 * least one filter-less handler on this line. 1229 * o FILTER_HANDLED | 1230 * FILTER_SCHEDULE_THREAD: A filter claimed the event, and asked for 1231 * scheduling the per-handler ithread. 1232 * 1233 * In case an ithread has to be scheduled, in *ithd there will be a 1234 * pointer to a struct intr_thread containing the thread to be 1235 * scheduled. 1236 */ 1237 1238 int 1239 intr_filter_loop(struct intr_event *ie, struct trapframe *frame, 1240 struct intr_thread **ithd) 1241 { 1242 struct intr_handler *ih; 1243 void *arg; 1244 int ret, thread_only; 1245 1246 ret = 0; 1247 thread_only = 0; 1248 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1249 /* 1250 * Execute fast interrupt handlers directly. 1251 * To support clock handlers, if a handler registers 1252 * with a NULL argument, then we pass it a pointer to 1253 * a trapframe as its argument. 1254 */ 1255 arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument); 1256 1257 CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__, 1258 ih->ih_filter, ih->ih_handler, arg, ih->ih_name); 1259 1260 if (ih->ih_filter != NULL) 1261 ret = ih->ih_filter(arg); 1262 else { 1263 thread_only = 1; 1264 continue; 1265 } 1266 1267 if (ret & FILTER_STRAY) 1268 continue; 1269 else { 1270 *ithd = ih->ih_thread; 1271 return (ret); 1272 } 1273 } 1274 1275 /* 1276 * No filters handled the interrupt and we have at least 1277 * one handler without a filter. In this case, we schedule 1278 * all of the filter-less handlers to run in the ithread. 1279 */ 1280 if (thread_only) { 1281 *ithd = ie->ie_thread; 1282 return (FILTER_SCHEDULE_THREAD); 1283 } 1284 return (FILTER_STRAY); 1285 } 1286 1287 /* 1288 * Main interrupt handling body. 1289 * 1290 * Input: 1291 * o ie: the event connected to this interrupt. 1292 * o frame: some archs (i.e. i386) pass a frame to some. 1293 * handlers as their main argument. 1294 * Return value: 1295 * o 0: everything ok. 1296 * o EINVAL: stray interrupt. 1297 */ 1298 int 1299 intr_event_handle(struct intr_event *ie, struct trapframe *frame) 1300 { 1301 struct intr_thread *ithd; 1302 struct thread *td; 1303 int thread; 1304 1305 ithd = NULL; 1306 td = curthread; 1307 1308 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) 1309 return (EINVAL); 1310 1311 td->td_intr_nesting_level++; 1312 thread = 0; 1313 critical_enter(); 1314 thread = intr_filter_loop(ie, frame, &ithd); 1315 1316 /* 1317 * If the interrupt was fully served, send it an EOI but leave 1318 * it unmasked. Otherwise, mask the source as well as sending 1319 * it an EOI. 1320 */ 1321 if (thread & FILTER_HANDLED) { 1322 if (ie->ie_eoi != NULL) 1323 ie->ie_eoi(ie->ie_source); 1324 } else { 1325 if (ie->ie_disab != NULL) 1326 ie->ie_disab(ie->ie_source); 1327 } 1328 critical_exit(); 1329 1330 /* Interrupt storm logic */ 1331 if (thread & FILTER_STRAY) { 1332 ie->ie_count++; 1333 if (ie->ie_count < intr_storm_threshold) 1334 printf("Interrupt stray detection not present\n"); 1335 } 1336 1337 /* Schedule an ithread if needed. */ 1338 if (thread & FILTER_SCHEDULE_THREAD) { 1339 if (intr_event_schedule_thread(ie, ithd) != 0) 1340 panic("%s: impossible stray interrupt", __func__); 1341 } 1342 td->td_intr_nesting_level--; 1343 return (0); 1344 } 1345 #endif 1346 1347 #ifdef DDB 1348 /* 1349 * Dump details about an interrupt handler 1350 */ 1351 static void 1352 db_dump_intrhand(struct intr_handler *ih) 1353 { 1354 int comma; 1355 1356 db_printf("\t%-10s ", ih->ih_name); 1357 switch (ih->ih_pri) { 1358 case PI_REALTIME: 1359 db_printf("CLK "); 1360 break; 1361 case PI_AV: 1362 db_printf("AV "); 1363 break; 1364 case PI_TTYHIGH: 1365 case PI_TTYLOW: 1366 db_printf("TTY "); 1367 break; 1368 case PI_TAPE: 1369 db_printf("TAPE"); 1370 break; 1371 case PI_NET: 1372 db_printf("NET "); 1373 break; 1374 case PI_DISK: 1375 case PI_DISKLOW: 1376 db_printf("DISK"); 1377 break; 1378 case PI_DULL: 1379 db_printf("DULL"); 1380 break; 1381 default: 1382 if (ih->ih_pri >= PI_SOFT) 1383 db_printf("SWI "); 1384 else 1385 db_printf("%4u", ih->ih_pri); 1386 break; 1387 } 1388 db_printf(" "); 1389 db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC); 1390 db_printf("(%p)", ih->ih_argument); 1391 if (ih->ih_need || 1392 (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD | 1393 IH_MPSAFE)) != 0) { 1394 db_printf(" {"); 1395 comma = 0; 1396 if (ih->ih_flags & IH_EXCLUSIVE) { 1397 if (comma) 1398 db_printf(", "); 1399 db_printf("EXCL"); 1400 comma = 1; 1401 } 1402 if (ih->ih_flags & IH_ENTROPY) { 1403 if (comma) 1404 db_printf(", "); 1405 db_printf("ENTROPY"); 1406 comma = 1; 1407 } 1408 if (ih->ih_flags & IH_DEAD) { 1409 if (comma) 1410 db_printf(", "); 1411 db_printf("DEAD"); 1412 comma = 1; 1413 } 1414 if (ih->ih_flags & IH_MPSAFE) { 1415 if (comma) 1416 db_printf(", "); 1417 db_printf("MPSAFE"); 1418 comma = 1; 1419 } 1420 if (ih->ih_need) { 1421 if (comma) 1422 db_printf(", "); 1423 db_printf("NEED"); 1424 } 1425 db_printf("}"); 1426 } 1427 db_printf("\n"); 1428 } 1429 1430 /* 1431 * Dump details about a event. 1432 */ 1433 void 1434 db_dump_intr_event(struct intr_event *ie, int handlers) 1435 { 1436 struct intr_handler *ih; 1437 struct intr_thread *it; 1438 int comma; 1439 1440 db_printf("%s ", ie->ie_fullname); 1441 it = ie->ie_thread; 1442 if (it != NULL) 1443 db_printf("(pid %d)", it->it_thread->td_proc->p_pid); 1444 else 1445 db_printf("(no thread)"); 1446 if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 || 1447 (it != NULL && it->it_need)) { 1448 db_printf(" {"); 1449 comma = 0; 1450 if (ie->ie_flags & IE_SOFT) { 1451 db_printf("SOFT"); 1452 comma = 1; 1453 } 1454 if (ie->ie_flags & IE_ENTROPY) { 1455 if (comma) 1456 db_printf(", "); 1457 db_printf("ENTROPY"); 1458 comma = 1; 1459 } 1460 if (ie->ie_flags & IE_ADDING_THREAD) { 1461 if (comma) 1462 db_printf(", "); 1463 db_printf("ADDING_THREAD"); 1464 comma = 1; 1465 } 1466 if (it != NULL && it->it_need) { 1467 if (comma) 1468 db_printf(", "); 1469 db_printf("NEED"); 1470 } 1471 db_printf("}"); 1472 } 1473 db_printf("\n"); 1474 1475 if (handlers) 1476 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 1477 db_dump_intrhand(ih); 1478 } 1479 1480 /* 1481 * Dump data about interrupt handlers 1482 */ 1483 DB_SHOW_COMMAND(intr, db_show_intr) 1484 { 1485 struct intr_event *ie; 1486 int all, verbose; 1487 1488 verbose = index(modif, 'v') != NULL; 1489 all = index(modif, 'a') != NULL; 1490 TAILQ_FOREACH(ie, &event_list, ie_list) { 1491 if (!all && TAILQ_EMPTY(&ie->ie_handlers)) 1492 continue; 1493 db_dump_intr_event(ie, verbose); 1494 if (db_pager_quit) 1495 break; 1496 } 1497 } 1498 #endif /* DDB */ 1499 1500 /* 1501 * Start standard software interrupt threads 1502 */ 1503 static void 1504 start_softintr(void *dummy) 1505 { 1506 struct proc *p; 1507 1508 if (swi_add(&clk_intr_event, "clock", softclock, NULL, SWI_CLOCK, 1509 INTR_MPSAFE, &softclock_ih) || 1510 swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih)) 1511 panic("died while creating standard software ithreads"); 1512 1513 p = clk_intr_event->ie_thread->it_thread->td_proc; 1514 PROC_LOCK(p); 1515 p->p_flag |= P_NOLOAD; 1516 PROC_UNLOCK(p); 1517 } 1518 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, NULL) 1519 1520 /* 1521 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 1522 * The data for this machine dependent, and the declarations are in machine 1523 * dependent code. The layout of intrnames and intrcnt however is machine 1524 * independent. 1525 * 1526 * We do not know the length of intrcnt and intrnames at compile time, so 1527 * calculate things at run time. 1528 */ 1529 static int 1530 sysctl_intrnames(SYSCTL_HANDLER_ARGS) 1531 { 1532 return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames, 1533 req)); 1534 } 1535 1536 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, 1537 NULL, 0, sysctl_intrnames, "", "Interrupt Names"); 1538 1539 static int 1540 sysctl_intrcnt(SYSCTL_HANDLER_ARGS) 1541 { 1542 return (sysctl_handle_opaque(oidp, intrcnt, 1543 (char *)eintrcnt - (char *)intrcnt, req)); 1544 } 1545 1546 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, 1547 NULL, 0, sysctl_intrcnt, "", "Interrupt Counts"); 1548 1549 #ifdef DDB 1550 /* 1551 * DDB command to dump the interrupt statistics. 1552 */ 1553 DB_SHOW_COMMAND(intrcnt, db_show_intrcnt) 1554 { 1555 u_long *i; 1556 char *cp; 1557 1558 cp = intrnames; 1559 for (i = intrcnt; i != eintrcnt && !db_pager_quit; i++) { 1560 if (*cp == '\0') 1561 break; 1562 if (*i != 0) 1563 db_printf("%s\t%lu\n", cp, *i); 1564 cp += strlen(cp) + 1; 1565 } 1566 } 1567 #endif 1568