1 /*- 2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_ddb.h" 31 32 #include <sys/param.h> 33 #include <sys/bus.h> 34 #include <sys/conf.h> 35 #include <sys/rtprio.h> 36 #include <sys/systm.h> 37 #include <sys/interrupt.h> 38 #include <sys/kernel.h> 39 #include <sys/kthread.h> 40 #include <sys/ktr.h> 41 #include <sys/limits.h> 42 #include <sys/lock.h> 43 #include <sys/malloc.h> 44 #include <sys/mutex.h> 45 #include <sys/proc.h> 46 #include <sys/random.h> 47 #include <sys/resourcevar.h> 48 #include <sys/sched.h> 49 #include <sys/sysctl.h> 50 #include <sys/unistd.h> 51 #include <sys/vmmeter.h> 52 #include <machine/atomic.h> 53 #include <machine/cpu.h> 54 #include <machine/md_var.h> 55 #include <machine/stdarg.h> 56 #ifdef DDB 57 #include <ddb/ddb.h> 58 #include <ddb/db_sym.h> 59 #endif 60 61 /* 62 * Describe an interrupt thread. There is one of these per interrupt event. 63 */ 64 struct intr_thread { 65 struct intr_event *it_event; 66 struct thread *it_thread; /* Kernel thread. */ 67 int it_flags; /* (j) IT_* flags. */ 68 int it_need; /* Needs service. */ 69 }; 70 71 /* Interrupt thread flags kept in it_flags */ 72 #define IT_DEAD 0x000001 /* Thread is waiting to exit. */ 73 74 struct intr_entropy { 75 struct thread *td; 76 uintptr_t event; 77 }; 78 79 struct intr_event *clk_intr_event; 80 struct intr_event *tty_intr_event; 81 void *softclock_ih; 82 void *vm_ih; 83 struct proc *intrproc; 84 85 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); 86 87 static int intr_storm_threshold = 1000; 88 TUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold); 89 SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW, 90 &intr_storm_threshold, 0, 91 "Number of consecutive interrupts before storm protection is enabled"); 92 static TAILQ_HEAD(, intr_event) event_list = 93 TAILQ_HEAD_INITIALIZER(event_list); 94 95 static void intr_event_update(struct intr_event *ie); 96 #ifdef INTR_FILTER 97 static struct intr_thread *ithread_create(const char *name, 98 struct intr_handler *ih); 99 #else 100 static struct intr_thread *ithread_create(const char *name); 101 #endif 102 static void ithread_destroy(struct intr_thread *ithread); 103 static void ithread_execute_handlers(struct proc *p, 104 struct intr_event *ie); 105 #ifdef INTR_FILTER 106 static void priv_ithread_execute_handler(struct proc *p, 107 struct intr_handler *ih); 108 #endif 109 static void ithread_loop(void *); 110 static void ithread_update(struct intr_thread *ithd); 111 static void start_softintr(void *); 112 113 /* Map an interrupt type to an ithread priority. */ 114 u_char 115 intr_priority(enum intr_type flags) 116 { 117 u_char pri; 118 119 flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET | 120 INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV); 121 switch (flags) { 122 case INTR_TYPE_TTY: 123 pri = PI_TTYLOW; 124 break; 125 case INTR_TYPE_BIO: 126 /* 127 * XXX We need to refine this. BSD/OS distinguishes 128 * between tape and disk priorities. 129 */ 130 pri = PI_DISK; 131 break; 132 case INTR_TYPE_NET: 133 pri = PI_NET; 134 break; 135 case INTR_TYPE_CAM: 136 pri = PI_DISK; /* XXX or PI_CAM? */ 137 break; 138 case INTR_TYPE_AV: /* Audio/video */ 139 pri = PI_AV; 140 break; 141 case INTR_TYPE_CLK: 142 pri = PI_REALTIME; 143 break; 144 case INTR_TYPE_MISC: 145 pri = PI_DULL; /* don't care */ 146 break; 147 default: 148 /* We didn't specify an interrupt level. */ 149 panic("intr_priority: no interrupt type in flags"); 150 } 151 152 return pri; 153 } 154 155 /* 156 * Update an ithread based on the associated intr_event. 157 */ 158 static void 159 ithread_update(struct intr_thread *ithd) 160 { 161 struct intr_event *ie; 162 struct thread *td; 163 u_char pri; 164 165 ie = ithd->it_event; 166 td = ithd->it_thread; 167 168 /* Determine the overall priority of this event. */ 169 if (TAILQ_EMPTY(&ie->ie_handlers)) 170 pri = PRI_MAX_ITHD; 171 else 172 pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri; 173 174 /* Update name and priority. */ 175 strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name)); 176 thread_lock(td); 177 sched_prio(td, pri); 178 thread_unlock(td); 179 } 180 181 /* 182 * Regenerate the full name of an interrupt event and update its priority. 183 */ 184 static void 185 intr_event_update(struct intr_event *ie) 186 { 187 struct intr_handler *ih; 188 char *last; 189 int missed, space; 190 191 /* Start off with no entropy and just the name of the event. */ 192 mtx_assert(&ie->ie_lock, MA_OWNED); 193 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 194 ie->ie_flags &= ~IE_ENTROPY; 195 missed = 0; 196 space = 1; 197 198 /* Run through all the handlers updating values. */ 199 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 200 if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < 201 sizeof(ie->ie_fullname)) { 202 strcat(ie->ie_fullname, " "); 203 strcat(ie->ie_fullname, ih->ih_name); 204 space = 0; 205 } else 206 missed++; 207 if (ih->ih_flags & IH_ENTROPY) 208 ie->ie_flags |= IE_ENTROPY; 209 } 210 211 /* 212 * If the handler names were too long, add +'s to indicate missing 213 * names. If we run out of room and still have +'s to add, change 214 * the last character from a + to a *. 215 */ 216 last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2]; 217 while (missed-- > 0) { 218 if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) { 219 if (*last == '+') { 220 *last = '*'; 221 break; 222 } else 223 *last = '+'; 224 } else if (space) { 225 strcat(ie->ie_fullname, " +"); 226 space = 0; 227 } else 228 strcat(ie->ie_fullname, "+"); 229 } 230 231 /* 232 * If this event has an ithread, update it's priority and 233 * name. 234 */ 235 if (ie->ie_thread != NULL) 236 ithread_update(ie->ie_thread); 237 CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname); 238 } 239 240 #ifndef INTR_FILTER 241 int 242 intr_event_create(struct intr_event **event, void *source, int flags, 243 void (*enable)(void *), const char *fmt, ...) 244 { 245 struct intr_event *ie; 246 va_list ap; 247 248 /* The only valid flag during creation is IE_SOFT. */ 249 if ((flags & ~IE_SOFT) != 0) 250 return (EINVAL); 251 ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 252 ie->ie_source = source; 253 ie->ie_enable = enable; 254 ie->ie_flags = flags; 255 TAILQ_INIT(&ie->ie_handlers); 256 mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 257 258 va_start(ap, fmt); 259 vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 260 va_end(ap); 261 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 262 mtx_pool_lock(mtxpool_sleep, &event_list); 263 TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 264 mtx_pool_unlock(mtxpool_sleep, &event_list); 265 if (event != NULL) 266 *event = ie; 267 CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 268 return (0); 269 } 270 #else 271 int 272 intr_event_create(struct intr_event **event, void *source, int flags, 273 void (*enable)(void *), void (*eoi)(void *), void (*disab)(void *), 274 const char *fmt, ...) 275 { 276 struct intr_event *ie; 277 va_list ap; 278 279 /* The only valid flag during creation is IE_SOFT. */ 280 if ((flags & ~IE_SOFT) != 0) 281 return (EINVAL); 282 ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 283 ie->ie_source = source; 284 ie->ie_enable = enable; 285 ie->ie_eoi = eoi; 286 ie->ie_disab = disab; 287 ie->ie_flags = flags; 288 TAILQ_INIT(&ie->ie_handlers); 289 mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 290 291 va_start(ap, fmt); 292 vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 293 va_end(ap); 294 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 295 mtx_pool_lock(mtxpool_sleep, &event_list); 296 TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 297 mtx_pool_unlock(mtxpool_sleep, &event_list); 298 if (event != NULL) 299 *event = ie; 300 CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 301 return (0); 302 } 303 #endif 304 305 int 306 intr_event_destroy(struct intr_event *ie) 307 { 308 309 mtx_lock(&ie->ie_lock); 310 if (!TAILQ_EMPTY(&ie->ie_handlers)) { 311 mtx_unlock(&ie->ie_lock); 312 return (EBUSY); 313 } 314 mtx_pool_lock(mtxpool_sleep, &event_list); 315 TAILQ_REMOVE(&event_list, ie, ie_list); 316 mtx_pool_unlock(mtxpool_sleep, &event_list); 317 #ifndef notyet 318 if (ie->ie_thread != NULL) { 319 ithread_destroy(ie->ie_thread); 320 ie->ie_thread = NULL; 321 } 322 #endif 323 mtx_unlock(&ie->ie_lock); 324 mtx_destroy(&ie->ie_lock); 325 free(ie, M_ITHREAD); 326 return (0); 327 } 328 329 #ifndef INTR_FILTER 330 static struct intr_thread * 331 ithread_create(const char *name) 332 { 333 struct intr_thread *ithd; 334 struct thread *td; 335 int error; 336 337 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 338 339 error = kproc_kthread_add(ithread_loop, ithd, &intrproc, 340 &td, RFSTOPPED | RFHIGHPID, 341 0, "intr", "%s", name); 342 if (error) 343 panic("kproc_create() failed with %d", error); 344 thread_lock(td); 345 sched_class(td, PRI_ITHD); 346 TD_SET_IWAIT(td); 347 thread_unlock(td); 348 td->td_pflags |= TDP_ITHREAD; 349 ithd->it_thread = td; 350 CTR2(KTR_INTR, "%s: created %s", __func__, name); 351 return (ithd); 352 } 353 #else 354 static struct intr_thread * 355 ithread_create(const char *name, struct intr_handler *ih) 356 { 357 struct intr_thread *ithd; 358 struct thread *td; 359 int error; 360 361 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 362 363 error = kproc_kthread_add(ithread_loop, ih, &intrproc, 364 &td, RFSTOPPED | RFHIGHPID, 365 0, "intr", "%s", name); 366 if (error) 367 panic("kproc_create() failed with %d", error); 368 thread_lock(td); 369 sched_class(td, PRI_ITHD); 370 TD_SET_IWAIT(td); 371 thread_unlock(td); 372 td->td_pflags |= TDP_ITHREAD; 373 ithd->it_thread = td; 374 CTR2(KTR_INTR, "%s: created %s", __func__, name); 375 return (ithd); 376 } 377 #endif 378 379 static void 380 ithread_destroy(struct intr_thread *ithread) 381 { 382 struct thread *td; 383 384 CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name); 385 td = ithread->it_thread; 386 thread_lock(td); 387 ithread->it_flags |= IT_DEAD; 388 if (TD_AWAITING_INTR(td)) { 389 TD_CLR_IWAIT(td); 390 sched_add(td, SRQ_INTR); 391 } 392 thread_unlock(td); 393 } 394 395 #ifndef INTR_FILTER 396 int 397 intr_event_add_handler(struct intr_event *ie, const char *name, 398 driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 399 enum intr_type flags, void **cookiep) 400 { 401 struct intr_handler *ih, *temp_ih; 402 struct intr_thread *it; 403 404 if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 405 return (EINVAL); 406 407 /* Allocate and populate an interrupt handler structure. */ 408 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 409 ih->ih_filter = filter; 410 ih->ih_handler = handler; 411 ih->ih_argument = arg; 412 ih->ih_name = name; 413 ih->ih_event = ie; 414 ih->ih_pri = pri; 415 if (flags & INTR_EXCL) 416 ih->ih_flags = IH_EXCLUSIVE; 417 if (flags & INTR_MPSAFE) 418 ih->ih_flags |= IH_MPSAFE; 419 if (flags & INTR_ENTROPY) 420 ih->ih_flags |= IH_ENTROPY; 421 422 /* We can only have one exclusive handler in a event. */ 423 mtx_lock(&ie->ie_lock); 424 if (!TAILQ_EMPTY(&ie->ie_handlers)) { 425 if ((flags & INTR_EXCL) || 426 (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 427 mtx_unlock(&ie->ie_lock); 428 free(ih, M_ITHREAD); 429 return (EINVAL); 430 } 431 } 432 433 /* Add the new handler to the event in priority order. */ 434 TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 435 if (temp_ih->ih_pri > ih->ih_pri) 436 break; 437 } 438 if (temp_ih == NULL) 439 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 440 else 441 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 442 intr_event_update(ie); 443 444 /* Create a thread if we need one. */ 445 while (ie->ie_thread == NULL && handler != NULL) { 446 if (ie->ie_flags & IE_ADDING_THREAD) 447 msleep(ie, &ie->ie_lock, 0, "ithread", 0); 448 else { 449 ie->ie_flags |= IE_ADDING_THREAD; 450 mtx_unlock(&ie->ie_lock); 451 it = ithread_create("intr: newborn"); 452 mtx_lock(&ie->ie_lock); 453 ie->ie_flags &= ~IE_ADDING_THREAD; 454 ie->ie_thread = it; 455 it->it_event = ie; 456 ithread_update(it); 457 wakeup(ie); 458 } 459 } 460 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 461 ie->ie_name); 462 mtx_unlock(&ie->ie_lock); 463 464 if (cookiep != NULL) 465 *cookiep = ih; 466 return (0); 467 } 468 #else 469 int 470 intr_event_add_handler(struct intr_event *ie, const char *name, 471 driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 472 enum intr_type flags, void **cookiep) 473 { 474 struct intr_handler *ih, *temp_ih; 475 struct intr_thread *it; 476 477 if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 478 return (EINVAL); 479 480 /* Allocate and populate an interrupt handler structure. */ 481 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 482 ih->ih_filter = filter; 483 ih->ih_handler = handler; 484 ih->ih_argument = arg; 485 ih->ih_name = name; 486 ih->ih_event = ie; 487 ih->ih_pri = pri; 488 if (flags & INTR_EXCL) 489 ih->ih_flags = IH_EXCLUSIVE; 490 if (flags & INTR_MPSAFE) 491 ih->ih_flags |= IH_MPSAFE; 492 if (flags & INTR_ENTROPY) 493 ih->ih_flags |= IH_ENTROPY; 494 495 /* We can only have one exclusive handler in a event. */ 496 mtx_lock(&ie->ie_lock); 497 if (!TAILQ_EMPTY(&ie->ie_handlers)) { 498 if ((flags & INTR_EXCL) || 499 (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 500 mtx_unlock(&ie->ie_lock); 501 free(ih, M_ITHREAD); 502 return (EINVAL); 503 } 504 } 505 506 /* Add the new handler to the event in priority order. */ 507 TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 508 if (temp_ih->ih_pri > ih->ih_pri) 509 break; 510 } 511 if (temp_ih == NULL) 512 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 513 else 514 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 515 intr_event_update(ie); 516 517 /* For filtered handlers, create a private ithread to run on. */ 518 if (filter != NULL && handler != NULL) { 519 mtx_unlock(&ie->ie_lock); 520 it = ithread_create("intr: newborn", ih); 521 mtx_lock(&ie->ie_lock); 522 it->it_event = ie; 523 ih->ih_thread = it; 524 ithread_update(it); // XXX - do we really need this?!?!? 525 } else { /* Create the global per-event thread if we need one. */ 526 while (ie->ie_thread == NULL && handler != NULL) { 527 if (ie->ie_flags & IE_ADDING_THREAD) 528 msleep(ie, &ie->ie_lock, 0, "ithread", 0); 529 else { 530 ie->ie_flags |= IE_ADDING_THREAD; 531 mtx_unlock(&ie->ie_lock); 532 it = ithread_create("intr: newborn", ih); 533 mtx_lock(&ie->ie_lock); 534 ie->ie_flags &= ~IE_ADDING_THREAD; 535 ie->ie_thread = it; 536 it->it_event = ie; 537 ithread_update(it); 538 wakeup(ie); 539 } 540 } 541 } 542 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 543 ie->ie_name); 544 mtx_unlock(&ie->ie_lock); 545 546 if (cookiep != NULL) 547 *cookiep = ih; 548 return (0); 549 } 550 #endif 551 552 /* 553 * Return the ie_source field from the intr_event an intr_handler is 554 * associated with. 555 */ 556 void * 557 intr_handler_source(void *cookie) 558 { 559 struct intr_handler *ih; 560 struct intr_event *ie; 561 562 ih = (struct intr_handler *)cookie; 563 if (ih == NULL) 564 return (NULL); 565 ie = ih->ih_event; 566 KASSERT(ie != NULL, 567 ("interrupt handler \"%s\" has a NULL interrupt event", 568 ih->ih_name)); 569 return (ie->ie_source); 570 } 571 572 #ifndef INTR_FILTER 573 int 574 intr_event_remove_handler(void *cookie) 575 { 576 struct intr_handler *handler = (struct intr_handler *)cookie; 577 struct intr_event *ie; 578 #ifdef INVARIANTS 579 struct intr_handler *ih; 580 #endif 581 #ifdef notyet 582 int dead; 583 #endif 584 585 if (handler == NULL) 586 return (EINVAL); 587 ie = handler->ih_event; 588 KASSERT(ie != NULL, 589 ("interrupt handler \"%s\" has a NULL interrupt event", 590 handler->ih_name)); 591 mtx_lock(&ie->ie_lock); 592 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 593 ie->ie_name); 594 #ifdef INVARIANTS 595 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 596 if (ih == handler) 597 goto ok; 598 mtx_unlock(&ie->ie_lock); 599 panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 600 ih->ih_name, ie->ie_name); 601 ok: 602 #endif 603 /* 604 * If there is no ithread, then just remove the handler and return. 605 * XXX: Note that an INTR_FAST handler might be running on another 606 * CPU! 607 */ 608 if (ie->ie_thread == NULL) { 609 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 610 mtx_unlock(&ie->ie_lock); 611 free(handler, M_ITHREAD); 612 return (0); 613 } 614 615 /* 616 * If the interrupt thread is already running, then just mark this 617 * handler as being dead and let the ithread do the actual removal. 618 * 619 * During a cold boot while cold is set, msleep() does not sleep, 620 * so we have to remove the handler here rather than letting the 621 * thread do it. 622 */ 623 thread_lock(ie->ie_thread->it_thread); 624 if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) { 625 handler->ih_flags |= IH_DEAD; 626 627 /* 628 * Ensure that the thread will process the handler list 629 * again and remove this handler if it has already passed 630 * it on the list. 631 */ 632 ie->ie_thread->it_need = 1; 633 } else 634 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 635 thread_unlock(ie->ie_thread->it_thread); 636 while (handler->ih_flags & IH_DEAD) 637 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 638 intr_event_update(ie); 639 #ifdef notyet 640 /* 641 * XXX: This could be bad in the case of ppbus(8). Also, I think 642 * this could lead to races of stale data when servicing an 643 * interrupt. 644 */ 645 dead = 1; 646 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 647 if (!(ih->ih_flags & IH_FAST)) { 648 dead = 0; 649 break; 650 } 651 } 652 if (dead) { 653 ithread_destroy(ie->ie_thread); 654 ie->ie_thread = NULL; 655 } 656 #endif 657 mtx_unlock(&ie->ie_lock); 658 free(handler, M_ITHREAD); 659 return (0); 660 } 661 662 int 663 intr_event_schedule_thread(struct intr_event *ie) 664 { 665 struct intr_entropy entropy; 666 struct intr_thread *it; 667 struct thread *td; 668 struct thread *ctd; 669 struct proc *p; 670 671 /* 672 * If no ithread or no handlers, then we have a stray interrupt. 673 */ 674 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || 675 ie->ie_thread == NULL) 676 return (EINVAL); 677 678 ctd = curthread; 679 it = ie->ie_thread; 680 td = it->it_thread; 681 p = td->td_proc; 682 683 /* 684 * If any of the handlers for this ithread claim to be good 685 * sources of entropy, then gather some. 686 */ 687 if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 688 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 689 p->p_pid, td->td_name); 690 entropy.event = (uintptr_t)ie; 691 entropy.td = ctd; 692 random_harvest(&entropy, sizeof(entropy), 2, 0, 693 RANDOM_INTERRUPT); 694 } 695 696 KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 697 698 /* 699 * Set it_need to tell the thread to keep running if it is already 700 * running. Then, lock the thread and see if we actually need to 701 * put it on the runqueue. 702 */ 703 it->it_need = 1; 704 thread_lock(td); 705 if (TD_AWAITING_INTR(td)) { 706 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 707 td->td_name); 708 TD_CLR_IWAIT(td); 709 sched_add(td, SRQ_INTR); 710 } else { 711 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 712 __func__, p->p_pid, td->td_name, it->it_need, td->td_state); 713 } 714 thread_unlock(td); 715 716 return (0); 717 } 718 #else 719 int 720 intr_event_remove_handler(void *cookie) 721 { 722 struct intr_handler *handler = (struct intr_handler *)cookie; 723 struct intr_event *ie; 724 struct intr_thread *it; 725 #ifdef INVARIANTS 726 struct intr_handler *ih; 727 #endif 728 #ifdef notyet 729 int dead; 730 #endif 731 732 if (handler == NULL) 733 return (EINVAL); 734 ie = handler->ih_event; 735 KASSERT(ie != NULL, 736 ("interrupt handler \"%s\" has a NULL interrupt event", 737 handler->ih_name)); 738 mtx_lock(&ie->ie_lock); 739 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 740 ie->ie_name); 741 #ifdef INVARIANTS 742 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 743 if (ih == handler) 744 goto ok; 745 mtx_unlock(&ie->ie_lock); 746 panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 747 ih->ih_name, ie->ie_name); 748 ok: 749 #endif 750 /* 751 * If there are no ithreads (per event and per handler), then 752 * just remove the handler and return. 753 * XXX: Note that an INTR_FAST handler might be running on another CPU! 754 */ 755 if (ie->ie_thread == NULL && handler->ih_thread == NULL) { 756 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 757 mtx_unlock(&ie->ie_lock); 758 free(handler, M_ITHREAD); 759 return (0); 760 } 761 762 /* Private or global ithread? */ 763 it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread; 764 /* 765 * If the interrupt thread is already running, then just mark this 766 * handler as being dead and let the ithread do the actual removal. 767 * 768 * During a cold boot while cold is set, msleep() does not sleep, 769 * so we have to remove the handler here rather than letting the 770 * thread do it. 771 */ 772 thread_lock(it->it_thread); 773 if (!TD_AWAITING_INTR(it->it_thread) && !cold) { 774 handler->ih_flags |= IH_DEAD; 775 776 /* 777 * Ensure that the thread will process the handler list 778 * again and remove this handler if it has already passed 779 * it on the list. 780 */ 781 it->it_need = 1; 782 } else 783 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 784 thread_unlock(it->it_thread); 785 while (handler->ih_flags & IH_DEAD) 786 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 787 /* 788 * At this point, the handler has been disconnected from the event, 789 * so we can kill the private ithread if any. 790 */ 791 if (handler->ih_thread) { 792 ithread_destroy(handler->ih_thread); 793 handler->ih_thread = NULL; 794 } 795 intr_event_update(ie); 796 #ifdef notyet 797 /* 798 * XXX: This could be bad in the case of ppbus(8). Also, I think 799 * this could lead to races of stale data when servicing an 800 * interrupt. 801 */ 802 dead = 1; 803 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 804 if (handler != NULL) { 805 dead = 0; 806 break; 807 } 808 } 809 if (dead) { 810 ithread_destroy(ie->ie_thread); 811 ie->ie_thread = NULL; 812 } 813 #endif 814 mtx_unlock(&ie->ie_lock); 815 free(handler, M_ITHREAD); 816 return (0); 817 } 818 819 int 820 intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it) 821 { 822 struct intr_entropy entropy; 823 struct thread *td; 824 struct thread *ctd; 825 struct proc *p; 826 827 /* 828 * If no ithread or no handlers, then we have a stray interrupt. 829 */ 830 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL) 831 return (EINVAL); 832 833 ctd = curthread; 834 td = it->it_thread; 835 p = td->td_proc; 836 837 /* 838 * If any of the handlers for this ithread claim to be good 839 * sources of entropy, then gather some. 840 */ 841 if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 842 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 843 p->p_pid, td->td_name); 844 entropy.event = (uintptr_t)ie; 845 entropy.td = ctd; 846 random_harvest(&entropy, sizeof(entropy), 2, 0, 847 RANDOM_INTERRUPT); 848 } 849 850 KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 851 852 /* 853 * Set it_need to tell the thread to keep running if it is already 854 * running. Then, lock the thread and see if we actually need to 855 * put it on the runqueue. 856 */ 857 it->it_need = 1; 858 thread_lock(td); 859 if (TD_AWAITING_INTR(td)) { 860 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 861 td->td_name); 862 TD_CLR_IWAIT(td); 863 sched_add(td, SRQ_INTR); 864 } else { 865 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 866 __func__, p->p_pid, td->td_name, it->it_need, td->td_state); 867 } 868 thread_unlock(td); 869 870 return (0); 871 } 872 #endif 873 874 /* 875 * Add a software interrupt handler to a specified event. If a given event 876 * is not specified, then a new event is created. 877 */ 878 int 879 swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, 880 void *arg, int pri, enum intr_type flags, void **cookiep) 881 { 882 struct intr_event *ie; 883 int error; 884 885 if (flags & INTR_ENTROPY) 886 return (EINVAL); 887 888 ie = (eventp != NULL) ? *eventp : NULL; 889 890 if (ie != NULL) { 891 if (!(ie->ie_flags & IE_SOFT)) 892 return (EINVAL); 893 } else { 894 #ifdef INTR_FILTER 895 error = intr_event_create(&ie, NULL, IE_SOFT, 896 NULL, NULL, NULL, "swi%d:", pri); 897 #else 898 error = intr_event_create(&ie, NULL, IE_SOFT, 899 NULL, "swi%d:", pri); 900 #endif 901 if (error) 902 return (error); 903 if (eventp != NULL) 904 *eventp = ie; 905 } 906 return (intr_event_add_handler(ie, name, NULL, handler, arg, 907 (pri * RQ_PPQ) + PI_SOFT, flags, cookiep)); 908 /* XXKSE.. think of a better way to get separate queues */ 909 } 910 911 /* 912 * Schedule a software interrupt thread. 913 */ 914 void 915 swi_sched(void *cookie, int flags) 916 { 917 struct intr_handler *ih = (struct intr_handler *)cookie; 918 struct intr_event *ie = ih->ih_event; 919 int error; 920 921 CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, 922 ih->ih_need); 923 924 /* 925 * Set ih_need for this handler so that if the ithread is already 926 * running it will execute this handler on the next pass. Otherwise, 927 * it will execute it the next time it runs. 928 */ 929 atomic_store_rel_int(&ih->ih_need, 1); 930 931 if (!(flags & SWI_DELAY)) { 932 PCPU_INC(cnt.v_soft); 933 #ifdef INTR_FILTER 934 error = intr_event_schedule_thread(ie, ie->ie_thread); 935 #else 936 error = intr_event_schedule_thread(ie); 937 #endif 938 KASSERT(error == 0, ("stray software interrupt")); 939 } 940 } 941 942 /* 943 * Remove a software interrupt handler. Currently this code does not 944 * remove the associated interrupt event if it becomes empty. Calling code 945 * may do so manually via intr_event_destroy(), but that's not really 946 * an optimal interface. 947 */ 948 int 949 swi_remove(void *cookie) 950 { 951 952 return (intr_event_remove_handler(cookie)); 953 } 954 955 #ifdef INTR_FILTER 956 static void 957 priv_ithread_execute_handler(struct proc *p, struct intr_handler *ih) 958 { 959 struct intr_event *ie; 960 961 ie = ih->ih_event; 962 /* 963 * If this handler is marked for death, remove it from 964 * the list of handlers and wake up the sleeper. 965 */ 966 if (ih->ih_flags & IH_DEAD) { 967 mtx_lock(&ie->ie_lock); 968 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 969 ih->ih_flags &= ~IH_DEAD; 970 wakeup(ih); 971 mtx_unlock(&ie->ie_lock); 972 return; 973 } 974 975 /* Execute this handler. */ 976 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 977 __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument, 978 ih->ih_name, ih->ih_flags); 979 980 if (!(ih->ih_flags & IH_MPSAFE)) 981 mtx_lock(&Giant); 982 ih->ih_handler(ih->ih_argument); 983 if (!(ih->ih_flags & IH_MPSAFE)) 984 mtx_unlock(&Giant); 985 } 986 #endif 987 988 static void 989 ithread_execute_handlers(struct proc *p, struct intr_event *ie) 990 { 991 struct intr_handler *ih, *ihn; 992 993 /* Interrupt handlers should not sleep. */ 994 if (!(ie->ie_flags & IE_SOFT)) 995 THREAD_NO_SLEEPING(); 996 TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) { 997 998 /* 999 * If this handler is marked for death, remove it from 1000 * the list of handlers and wake up the sleeper. 1001 */ 1002 if (ih->ih_flags & IH_DEAD) { 1003 mtx_lock(&ie->ie_lock); 1004 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 1005 ih->ih_flags &= ~IH_DEAD; 1006 wakeup(ih); 1007 mtx_unlock(&ie->ie_lock); 1008 continue; 1009 } 1010 1011 /* Skip filter only handlers */ 1012 if (ih->ih_handler == NULL) 1013 continue; 1014 1015 /* 1016 * For software interrupt threads, we only execute 1017 * handlers that have their need flag set. Hardware 1018 * interrupt threads always invoke all of their handlers. 1019 */ 1020 if (ie->ie_flags & IE_SOFT) { 1021 if (!ih->ih_need) 1022 continue; 1023 else 1024 atomic_store_rel_int(&ih->ih_need, 0); 1025 } 1026 1027 /* Execute this handler. */ 1028 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1029 __func__, p->p_pid, (void *)ih->ih_handler, 1030 ih->ih_argument, ih->ih_name, ih->ih_flags); 1031 1032 if (!(ih->ih_flags & IH_MPSAFE)) 1033 mtx_lock(&Giant); 1034 ih->ih_handler(ih->ih_argument); 1035 if (!(ih->ih_flags & IH_MPSAFE)) 1036 mtx_unlock(&Giant); 1037 } 1038 if (!(ie->ie_flags & IE_SOFT)) 1039 THREAD_SLEEPING_OK(); 1040 1041 /* 1042 * Interrupt storm handling: 1043 * 1044 * If this interrupt source is currently storming, then throttle 1045 * it to only fire the handler once per clock tick. 1046 * 1047 * If this interrupt source is not currently storming, but the 1048 * number of back to back interrupts exceeds the storm threshold, 1049 * then enter storming mode. 1050 */ 1051 if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold && 1052 !(ie->ie_flags & IE_SOFT)) { 1053 /* Report the message only once every second. */ 1054 if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) { 1055 printf( 1056 "interrupt storm detected on \"%s\"; throttling interrupt source\n", 1057 ie->ie_name); 1058 } 1059 pause("istorm", 1); 1060 } else 1061 ie->ie_count++; 1062 1063 /* 1064 * Now that all the handlers have had a chance to run, reenable 1065 * the interrupt source. 1066 */ 1067 if (ie->ie_enable != NULL) 1068 ie->ie_enable(ie->ie_source); 1069 } 1070 1071 #ifndef INTR_FILTER 1072 /* 1073 * This is the main code for interrupt threads. 1074 */ 1075 static void 1076 ithread_loop(void *arg) 1077 { 1078 struct intr_thread *ithd; 1079 struct intr_event *ie; 1080 struct thread *td; 1081 struct proc *p; 1082 1083 td = curthread; 1084 p = td->td_proc; 1085 ithd = (struct intr_thread *)arg; 1086 KASSERT(ithd->it_thread == td, 1087 ("%s: ithread and proc linkage out of sync", __func__)); 1088 ie = ithd->it_event; 1089 ie->ie_count = 0; 1090 1091 /* 1092 * As long as we have interrupts outstanding, go through the 1093 * list of handlers, giving each one a go at it. 1094 */ 1095 for (;;) { 1096 /* 1097 * If we are an orphaned thread, then just die. 1098 */ 1099 if (ithd->it_flags & IT_DEAD) { 1100 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 1101 p->p_pid, td->td_name); 1102 free(ithd, M_ITHREAD); 1103 kthread_exit(); 1104 } 1105 1106 /* 1107 * Service interrupts. If another interrupt arrives while 1108 * we are running, it will set it_need to note that we 1109 * should make another pass. 1110 */ 1111 while (ithd->it_need) { 1112 /* 1113 * This might need a full read and write barrier 1114 * to make sure that this write posts before any 1115 * of the memory or device accesses in the 1116 * handlers. 1117 */ 1118 atomic_store_rel_int(&ithd->it_need, 0); 1119 ithread_execute_handlers(p, ie); 1120 } 1121 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1122 mtx_assert(&Giant, MA_NOTOWNED); 1123 1124 /* 1125 * Processed all our interrupts. Now get the sched 1126 * lock. This may take a while and it_need may get 1127 * set again, so we have to check it again. 1128 */ 1129 thread_lock(td); 1130 if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { 1131 TD_SET_IWAIT(td); 1132 ie->ie_count = 0; 1133 mi_switch(SW_VOL, NULL); 1134 } 1135 thread_unlock(td); 1136 } 1137 } 1138 #else 1139 /* 1140 * This is the main code for interrupt threads. 1141 */ 1142 static void 1143 ithread_loop(void *arg) 1144 { 1145 struct intr_thread *ithd; 1146 struct intr_handler *ih; 1147 struct intr_event *ie; 1148 struct thread *td; 1149 struct proc *p; 1150 int priv; 1151 1152 td = curthread; 1153 p = td->td_proc; 1154 ih = (struct intr_handler *)arg; 1155 priv = (ih->ih_thread != NULL) ? 1 : 0; 1156 ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread; 1157 KASSERT(ithd->it_thread == td, 1158 ("%s: ithread and proc linkage out of sync", __func__)); 1159 ie = ithd->it_event; 1160 ie->ie_count = 0; 1161 1162 /* 1163 * As long as we have interrupts outstanding, go through the 1164 * list of handlers, giving each one a go at it. 1165 */ 1166 for (;;) { 1167 /* 1168 * If we are an orphaned thread, then just die. 1169 */ 1170 if (ithd->it_flags & IT_DEAD) { 1171 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 1172 p->p_pid, td->td_name); 1173 free(ithd, M_ITHREAD); 1174 kthread_exit(); 1175 } 1176 1177 /* 1178 * Service interrupts. If another interrupt arrives while 1179 * we are running, it will set it_need to note that we 1180 * should make another pass. 1181 */ 1182 while (ithd->it_need) { 1183 /* 1184 * This might need a full read and write barrier 1185 * to make sure that this write posts before any 1186 * of the memory or device accesses in the 1187 * handlers. 1188 */ 1189 atomic_store_rel_int(&ithd->it_need, 0); 1190 if (priv) 1191 priv_ithread_execute_handler(p, ih); 1192 else 1193 ithread_execute_handlers(p, ie); 1194 } 1195 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1196 mtx_assert(&Giant, MA_NOTOWNED); 1197 1198 /* 1199 * Processed all our interrupts. Now get the sched 1200 * lock. This may take a while and it_need may get 1201 * set again, so we have to check it again. 1202 */ 1203 thread_lock(td); 1204 if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { 1205 TD_SET_IWAIT(td); 1206 ie->ie_count = 0; 1207 mi_switch(SW_VOL, NULL); 1208 } 1209 thread_unlock(td); 1210 } 1211 } 1212 1213 /* 1214 * Main loop for interrupt filter. 1215 * 1216 * Some architectures (i386, amd64 and arm) require the optional frame 1217 * parameter, and use it as the main argument for fast handler execution 1218 * when ih_argument == NULL. 1219 * 1220 * Return value: 1221 * o FILTER_STRAY: No filter recognized the event, and no 1222 * filter-less handler is registered on this 1223 * line. 1224 * o FILTER_HANDLED: A filter claimed the event and served it. 1225 * o FILTER_SCHEDULE_THREAD: No filter claimed the event, but there's at 1226 * least one filter-less handler on this line. 1227 * o FILTER_HANDLED | 1228 * FILTER_SCHEDULE_THREAD: A filter claimed the event, and asked for 1229 * scheduling the per-handler ithread. 1230 * 1231 * In case an ithread has to be scheduled, in *ithd there will be a 1232 * pointer to a struct intr_thread containing the thread to be 1233 * scheduled. 1234 */ 1235 1236 int 1237 intr_filter_loop(struct intr_event *ie, struct trapframe *frame, 1238 struct intr_thread **ithd) 1239 { 1240 struct intr_handler *ih; 1241 void *arg; 1242 int ret, thread_only; 1243 1244 ret = 0; 1245 thread_only = 0; 1246 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1247 /* 1248 * Execute fast interrupt handlers directly. 1249 * To support clock handlers, if a handler registers 1250 * with a NULL argument, then we pass it a pointer to 1251 * a trapframe as its argument. 1252 */ 1253 arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument); 1254 1255 CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__, 1256 ih->ih_filter, ih->ih_handler, arg, ih->ih_name); 1257 1258 if (ih->ih_filter != NULL) 1259 ret = ih->ih_filter(arg); 1260 else { 1261 thread_only = 1; 1262 continue; 1263 } 1264 1265 if (ret & FILTER_STRAY) 1266 continue; 1267 else { 1268 *ithd = ih->ih_thread; 1269 return (ret); 1270 } 1271 } 1272 1273 /* 1274 * No filters handled the interrupt and we have at least 1275 * one handler without a filter. In this case, we schedule 1276 * all of the filter-less handlers to run in the ithread. 1277 */ 1278 if (thread_only) { 1279 *ithd = ie->ie_thread; 1280 return (FILTER_SCHEDULE_THREAD); 1281 } 1282 return (FILTER_STRAY); 1283 } 1284 1285 /* 1286 * Main interrupt handling body. 1287 * 1288 * Input: 1289 * o ie: the event connected to this interrupt. 1290 * o frame: some archs (i.e. i386) pass a frame to some. 1291 * handlers as their main argument. 1292 * Return value: 1293 * o 0: everything ok. 1294 * o EINVAL: stray interrupt. 1295 */ 1296 int 1297 intr_event_handle(struct intr_event *ie, struct trapframe *frame) 1298 { 1299 struct intr_thread *ithd; 1300 struct thread *td; 1301 int thread; 1302 1303 ithd = NULL; 1304 td = curthread; 1305 1306 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) 1307 return (EINVAL); 1308 1309 td->td_intr_nesting_level++; 1310 thread = 0; 1311 critical_enter(); 1312 thread = intr_filter_loop(ie, frame, &ithd); 1313 1314 /* 1315 * If the interrupt was fully served, send it an EOI but leave 1316 * it unmasked. Otherwise, mask the source as well as sending 1317 * it an EOI. 1318 */ 1319 if (thread & FILTER_HANDLED) { 1320 if (ie->ie_eoi != NULL) 1321 ie->ie_eoi(ie->ie_source); 1322 } else { 1323 if (ie->ie_disab != NULL) 1324 ie->ie_disab(ie->ie_source); 1325 } 1326 critical_exit(); 1327 1328 /* Interrupt storm logic */ 1329 if (thread & FILTER_STRAY) { 1330 ie->ie_count++; 1331 if (ie->ie_count < intr_storm_threshold) 1332 printf("Interrupt stray detection not present\n"); 1333 } 1334 1335 /* Schedule an ithread if needed. */ 1336 if (thread & FILTER_SCHEDULE_THREAD) { 1337 if (intr_event_schedule_thread(ie, ithd) != 0) 1338 panic("%s: impossible stray interrupt", __func__); 1339 } 1340 td->td_intr_nesting_level--; 1341 return (0); 1342 } 1343 #endif 1344 1345 #ifdef DDB 1346 /* 1347 * Dump details about an interrupt handler 1348 */ 1349 static void 1350 db_dump_intrhand(struct intr_handler *ih) 1351 { 1352 int comma; 1353 1354 db_printf("\t%-10s ", ih->ih_name); 1355 switch (ih->ih_pri) { 1356 case PI_REALTIME: 1357 db_printf("CLK "); 1358 break; 1359 case PI_AV: 1360 db_printf("AV "); 1361 break; 1362 case PI_TTYHIGH: 1363 case PI_TTYLOW: 1364 db_printf("TTY "); 1365 break; 1366 case PI_TAPE: 1367 db_printf("TAPE"); 1368 break; 1369 case PI_NET: 1370 db_printf("NET "); 1371 break; 1372 case PI_DISK: 1373 case PI_DISKLOW: 1374 db_printf("DISK"); 1375 break; 1376 case PI_DULL: 1377 db_printf("DULL"); 1378 break; 1379 default: 1380 if (ih->ih_pri >= PI_SOFT) 1381 db_printf("SWI "); 1382 else 1383 db_printf("%4u", ih->ih_pri); 1384 break; 1385 } 1386 db_printf(" "); 1387 db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC); 1388 db_printf("(%p)", ih->ih_argument); 1389 if (ih->ih_need || 1390 (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD | 1391 IH_MPSAFE)) != 0) { 1392 db_printf(" {"); 1393 comma = 0; 1394 if (ih->ih_flags & IH_EXCLUSIVE) { 1395 if (comma) 1396 db_printf(", "); 1397 db_printf("EXCL"); 1398 comma = 1; 1399 } 1400 if (ih->ih_flags & IH_ENTROPY) { 1401 if (comma) 1402 db_printf(", "); 1403 db_printf("ENTROPY"); 1404 comma = 1; 1405 } 1406 if (ih->ih_flags & IH_DEAD) { 1407 if (comma) 1408 db_printf(", "); 1409 db_printf("DEAD"); 1410 comma = 1; 1411 } 1412 if (ih->ih_flags & IH_MPSAFE) { 1413 if (comma) 1414 db_printf(", "); 1415 db_printf("MPSAFE"); 1416 comma = 1; 1417 } 1418 if (ih->ih_need) { 1419 if (comma) 1420 db_printf(", "); 1421 db_printf("NEED"); 1422 } 1423 db_printf("}"); 1424 } 1425 db_printf("\n"); 1426 } 1427 1428 /* 1429 * Dump details about a event. 1430 */ 1431 void 1432 db_dump_intr_event(struct intr_event *ie, int handlers) 1433 { 1434 struct intr_handler *ih; 1435 struct intr_thread *it; 1436 int comma; 1437 1438 db_printf("%s ", ie->ie_fullname); 1439 it = ie->ie_thread; 1440 if (it != NULL) 1441 db_printf("(pid %d)", it->it_thread->td_proc->p_pid); 1442 else 1443 db_printf("(no thread)"); 1444 if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 || 1445 (it != NULL && it->it_need)) { 1446 db_printf(" {"); 1447 comma = 0; 1448 if (ie->ie_flags & IE_SOFT) { 1449 db_printf("SOFT"); 1450 comma = 1; 1451 } 1452 if (ie->ie_flags & IE_ENTROPY) { 1453 if (comma) 1454 db_printf(", "); 1455 db_printf("ENTROPY"); 1456 comma = 1; 1457 } 1458 if (ie->ie_flags & IE_ADDING_THREAD) { 1459 if (comma) 1460 db_printf(", "); 1461 db_printf("ADDING_THREAD"); 1462 comma = 1; 1463 } 1464 if (it != NULL && it->it_need) { 1465 if (comma) 1466 db_printf(", "); 1467 db_printf("NEED"); 1468 } 1469 db_printf("}"); 1470 } 1471 db_printf("\n"); 1472 1473 if (handlers) 1474 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 1475 db_dump_intrhand(ih); 1476 } 1477 1478 /* 1479 * Dump data about interrupt handlers 1480 */ 1481 DB_SHOW_COMMAND(intr, db_show_intr) 1482 { 1483 struct intr_event *ie; 1484 int all, verbose; 1485 1486 verbose = index(modif, 'v') != NULL; 1487 all = index(modif, 'a') != NULL; 1488 TAILQ_FOREACH(ie, &event_list, ie_list) { 1489 if (!all && TAILQ_EMPTY(&ie->ie_handlers)) 1490 continue; 1491 db_dump_intr_event(ie, verbose); 1492 if (db_pager_quit) 1493 break; 1494 } 1495 } 1496 #endif /* DDB */ 1497 1498 /* 1499 * Start standard software interrupt threads 1500 */ 1501 static void 1502 start_softintr(void *dummy) 1503 { 1504 struct proc *p; 1505 1506 if (swi_add(&clk_intr_event, "clock", softclock, NULL, SWI_CLOCK, 1507 INTR_MPSAFE, &softclock_ih) || 1508 swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih)) 1509 panic("died while creating standard software ithreads"); 1510 1511 p = clk_intr_event->ie_thread->it_thread->td_proc; 1512 PROC_LOCK(p); 1513 p->p_flag |= P_NOLOAD; 1514 PROC_UNLOCK(p); 1515 } 1516 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, NULL) 1517 1518 /* 1519 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 1520 * The data for this machine dependent, and the declarations are in machine 1521 * dependent code. The layout of intrnames and intrcnt however is machine 1522 * independent. 1523 * 1524 * We do not know the length of intrcnt and intrnames at compile time, so 1525 * calculate things at run time. 1526 */ 1527 static int 1528 sysctl_intrnames(SYSCTL_HANDLER_ARGS) 1529 { 1530 return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames, 1531 req)); 1532 } 1533 1534 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, 1535 NULL, 0, sysctl_intrnames, "", "Interrupt Names"); 1536 1537 static int 1538 sysctl_intrcnt(SYSCTL_HANDLER_ARGS) 1539 { 1540 return (sysctl_handle_opaque(oidp, intrcnt, 1541 (char *)eintrcnt - (char *)intrcnt, req)); 1542 } 1543 1544 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, 1545 NULL, 0, sysctl_intrcnt, "", "Interrupt Counts"); 1546 1547 #ifdef DDB 1548 /* 1549 * DDB command to dump the interrupt statistics. 1550 */ 1551 DB_SHOW_COMMAND(intrcnt, db_show_intrcnt) 1552 { 1553 u_long *i; 1554 char *cp; 1555 1556 cp = intrnames; 1557 for (i = intrcnt; i != eintrcnt && !db_pager_quit; i++) { 1558 if (*cp == '\0') 1559 break; 1560 if (*i != 0) 1561 db_printf("%s\t%lu\n", cp, *i); 1562 cp += strlen(cp) + 1; 1563 } 1564 } 1565 #endif 1566