1 /*- 2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_ddb.h" 31 32 #include <sys/param.h> 33 #include <sys/bus.h> 34 #include <sys/conf.h> 35 #include <sys/cpuset.h> 36 #include <sys/rtprio.h> 37 #include <sys/systm.h> 38 #include <sys/interrupt.h> 39 #include <sys/kernel.h> 40 #include <sys/kthread.h> 41 #include <sys/ktr.h> 42 #include <sys/limits.h> 43 #include <sys/lock.h> 44 #include <sys/malloc.h> 45 #include <sys/mutex.h> 46 #include <sys/proc.h> 47 #include <sys/random.h> 48 #include <sys/resourcevar.h> 49 #include <sys/sched.h> 50 #include <sys/smp.h> 51 #include <sys/sysctl.h> 52 #include <sys/syslog.h> 53 #include <sys/unistd.h> 54 #include <sys/vmmeter.h> 55 #include <machine/atomic.h> 56 #include <machine/cpu.h> 57 #include <machine/md_var.h> 58 #include <machine/stdarg.h> 59 #ifdef DDB 60 #include <ddb/ddb.h> 61 #include <ddb/db_sym.h> 62 #endif 63 64 /* 65 * Describe an interrupt thread. There is one of these per interrupt event. 66 */ 67 struct intr_thread { 68 struct intr_event *it_event; 69 struct thread *it_thread; /* Kernel thread. */ 70 int it_flags; /* (j) IT_* flags. */ 71 int it_need; /* Needs service. */ 72 }; 73 74 /* Interrupt thread flags kept in it_flags */ 75 #define IT_DEAD 0x000001 /* Thread is waiting to exit. */ 76 77 struct intr_entropy { 78 struct thread *td; 79 uintptr_t event; 80 }; 81 82 struct intr_event *clk_intr_event; 83 struct intr_event *tty_intr_event; 84 void *vm_ih; 85 struct proc *intrproc; 86 87 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); 88 89 static int intr_storm_threshold = 1000; 90 TUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold); 91 SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW, 92 &intr_storm_threshold, 0, 93 "Number of consecutive interrupts before storm protection is enabled"); 94 static TAILQ_HEAD(, intr_event) event_list = 95 TAILQ_HEAD_INITIALIZER(event_list); 96 static struct mtx event_lock; 97 MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF); 98 99 static void intr_event_update(struct intr_event *ie); 100 #ifdef INTR_FILTER 101 static int intr_event_schedule_thread(struct intr_event *ie, 102 struct intr_thread *ithd); 103 static int intr_filter_loop(struct intr_event *ie, 104 struct trapframe *frame, struct intr_thread **ithd); 105 static struct intr_thread *ithread_create(const char *name, 106 struct intr_handler *ih); 107 #else 108 static int intr_event_schedule_thread(struct intr_event *ie); 109 static struct intr_thread *ithread_create(const char *name); 110 #endif 111 static void ithread_destroy(struct intr_thread *ithread); 112 static void ithread_execute_handlers(struct proc *p, 113 struct intr_event *ie); 114 #ifdef INTR_FILTER 115 static void priv_ithread_execute_handler(struct proc *p, 116 struct intr_handler *ih); 117 #endif 118 static void ithread_loop(void *); 119 static void ithread_update(struct intr_thread *ithd); 120 static void start_softintr(void *); 121 122 /* Map an interrupt type to an ithread priority. */ 123 u_char 124 intr_priority(enum intr_type flags) 125 { 126 u_char pri; 127 128 flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET | 129 INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV); 130 switch (flags) { 131 case INTR_TYPE_TTY: 132 pri = PI_TTYLOW; 133 break; 134 case INTR_TYPE_BIO: 135 /* 136 * XXX We need to refine this. BSD/OS distinguishes 137 * between tape and disk priorities. 138 */ 139 pri = PI_DISK; 140 break; 141 case INTR_TYPE_NET: 142 pri = PI_NET; 143 break; 144 case INTR_TYPE_CAM: 145 pri = PI_DISK; /* XXX or PI_CAM? */ 146 break; 147 case INTR_TYPE_AV: /* Audio/video */ 148 pri = PI_AV; 149 break; 150 case INTR_TYPE_CLK: 151 pri = PI_REALTIME; 152 break; 153 case INTR_TYPE_MISC: 154 pri = PI_DULL; /* don't care */ 155 break; 156 default: 157 /* We didn't specify an interrupt level. */ 158 panic("intr_priority: no interrupt type in flags"); 159 } 160 161 return pri; 162 } 163 164 /* 165 * Update an ithread based on the associated intr_event. 166 */ 167 static void 168 ithread_update(struct intr_thread *ithd) 169 { 170 struct intr_event *ie; 171 struct thread *td; 172 u_char pri; 173 174 ie = ithd->it_event; 175 td = ithd->it_thread; 176 177 /* Determine the overall priority of this event. */ 178 if (TAILQ_EMPTY(&ie->ie_handlers)) 179 pri = PRI_MAX_ITHD; 180 else 181 pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri; 182 183 /* Update name and priority. */ 184 strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name)); 185 thread_lock(td); 186 sched_prio(td, pri); 187 thread_unlock(td); 188 } 189 190 /* 191 * Regenerate the full name of an interrupt event and update its priority. 192 */ 193 static void 194 intr_event_update(struct intr_event *ie) 195 { 196 struct intr_handler *ih; 197 char *last; 198 int missed, space; 199 200 /* Start off with no entropy and just the name of the event. */ 201 mtx_assert(&ie->ie_lock, MA_OWNED); 202 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 203 ie->ie_flags &= ~IE_ENTROPY; 204 missed = 0; 205 space = 1; 206 207 /* Run through all the handlers updating values. */ 208 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 209 if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < 210 sizeof(ie->ie_fullname)) { 211 strcat(ie->ie_fullname, " "); 212 strcat(ie->ie_fullname, ih->ih_name); 213 space = 0; 214 } else 215 missed++; 216 if (ih->ih_flags & IH_ENTROPY) 217 ie->ie_flags |= IE_ENTROPY; 218 } 219 220 /* 221 * If the handler names were too long, add +'s to indicate missing 222 * names. If we run out of room and still have +'s to add, change 223 * the last character from a + to a *. 224 */ 225 last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2]; 226 while (missed-- > 0) { 227 if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) { 228 if (*last == '+') { 229 *last = '*'; 230 break; 231 } else 232 *last = '+'; 233 } else if (space) { 234 strcat(ie->ie_fullname, " +"); 235 space = 0; 236 } else 237 strcat(ie->ie_fullname, "+"); 238 } 239 240 /* 241 * If this event has an ithread, update it's priority and 242 * name. 243 */ 244 if (ie->ie_thread != NULL) 245 ithread_update(ie->ie_thread); 246 CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname); 247 } 248 249 int 250 intr_event_create(struct intr_event **event, void *source, int flags, int irq, 251 void (*pre_ithread)(void *), void (*post_ithread)(void *), 252 void (*post_filter)(void *), int (*assign_cpu)(void *, u_char), 253 const char *fmt, ...) 254 { 255 struct intr_event *ie; 256 va_list ap; 257 258 /* The only valid flag during creation is IE_SOFT. */ 259 if ((flags & ~IE_SOFT) != 0) 260 return (EINVAL); 261 ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 262 ie->ie_source = source; 263 ie->ie_pre_ithread = pre_ithread; 264 ie->ie_post_ithread = post_ithread; 265 ie->ie_post_filter = post_filter; 266 ie->ie_assign_cpu = assign_cpu; 267 ie->ie_flags = flags; 268 ie->ie_irq = irq; 269 ie->ie_cpu = NOCPU; 270 TAILQ_INIT(&ie->ie_handlers); 271 mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 272 273 va_start(ap, fmt); 274 vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 275 va_end(ap); 276 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 277 mtx_lock(&event_lock); 278 TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 279 mtx_unlock(&event_lock); 280 if (event != NULL) 281 *event = ie; 282 CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 283 return (0); 284 } 285 286 /* 287 * Bind an interrupt event to the specified CPU. Note that not all 288 * platforms support binding an interrupt to a CPU. For those 289 * platforms this request will fail. For supported platforms, any 290 * associated ithreads as well as the primary interrupt context will 291 * be bound to the specificed CPU. Using a cpu id of NOCPU unbinds 292 * the interrupt event. 293 */ 294 int 295 intr_event_bind(struct intr_event *ie, u_char cpu) 296 { 297 cpuset_t mask; 298 lwpid_t id; 299 int error; 300 301 /* Need a CPU to bind to. */ 302 if (cpu != NOCPU && CPU_ABSENT(cpu)) 303 return (EINVAL); 304 305 if (ie->ie_assign_cpu == NULL) 306 return (EOPNOTSUPP); 307 /* 308 * If we have any ithreads try to set their mask first since this 309 * can fail. 310 */ 311 mtx_lock(&ie->ie_lock); 312 if (ie->ie_thread != NULL) { 313 CPU_ZERO(&mask); 314 if (cpu == NOCPU) 315 CPU_COPY(cpuset_root, &mask); 316 else 317 CPU_SET(cpu, &mask); 318 id = ie->ie_thread->it_thread->td_tid; 319 mtx_unlock(&ie->ie_lock); 320 error = cpuset_setthread(id, &mask); 321 if (error) 322 return (error); 323 } else 324 mtx_unlock(&ie->ie_lock); 325 error = ie->ie_assign_cpu(ie->ie_source, cpu); 326 if (error) 327 return (error); 328 mtx_lock(&ie->ie_lock); 329 ie->ie_cpu = cpu; 330 mtx_unlock(&ie->ie_lock); 331 332 return (error); 333 } 334 335 static struct intr_event * 336 intr_lookup(int irq) 337 { 338 struct intr_event *ie; 339 340 mtx_lock(&event_lock); 341 TAILQ_FOREACH(ie, &event_list, ie_list) 342 if (ie->ie_irq == irq && 343 (ie->ie_flags & IE_SOFT) == 0 && 344 TAILQ_FIRST(&ie->ie_handlers) != NULL) 345 break; 346 mtx_unlock(&event_lock); 347 return (ie); 348 } 349 350 int 351 intr_setaffinity(int irq, void *m) 352 { 353 struct intr_event *ie; 354 cpuset_t *mask; 355 u_char cpu; 356 int n; 357 358 mask = m; 359 cpu = NOCPU; 360 /* 361 * If we're setting all cpus we can unbind. Otherwise make sure 362 * only one cpu is in the set. 363 */ 364 if (CPU_CMP(cpuset_root, mask)) { 365 for (n = 0; n < CPU_SETSIZE; n++) { 366 if (!CPU_ISSET(n, mask)) 367 continue; 368 if (cpu != NOCPU) 369 return (EINVAL); 370 cpu = (u_char)n; 371 } 372 } 373 ie = intr_lookup(irq); 374 if (ie == NULL) 375 return (ESRCH); 376 intr_event_bind(ie, cpu); 377 return (0); 378 } 379 380 int 381 intr_getaffinity(int irq, void *m) 382 { 383 struct intr_event *ie; 384 cpuset_t *mask; 385 386 mask = m; 387 ie = intr_lookup(irq); 388 if (ie == NULL) 389 return (ESRCH); 390 CPU_ZERO(mask); 391 mtx_lock(&ie->ie_lock); 392 if (ie->ie_cpu == NOCPU) 393 CPU_COPY(cpuset_root, mask); 394 else 395 CPU_SET(ie->ie_cpu, mask); 396 mtx_unlock(&ie->ie_lock); 397 return (0); 398 } 399 400 int 401 intr_event_destroy(struct intr_event *ie) 402 { 403 404 mtx_lock(&event_lock); 405 mtx_lock(&ie->ie_lock); 406 if (!TAILQ_EMPTY(&ie->ie_handlers)) { 407 mtx_unlock(&ie->ie_lock); 408 mtx_unlock(&event_lock); 409 return (EBUSY); 410 } 411 TAILQ_REMOVE(&event_list, ie, ie_list); 412 #ifndef notyet 413 if (ie->ie_thread != NULL) { 414 ithread_destroy(ie->ie_thread); 415 ie->ie_thread = NULL; 416 } 417 #endif 418 mtx_unlock(&ie->ie_lock); 419 mtx_unlock(&event_lock); 420 mtx_destroy(&ie->ie_lock); 421 free(ie, M_ITHREAD); 422 return (0); 423 } 424 425 #ifndef INTR_FILTER 426 static struct intr_thread * 427 ithread_create(const char *name) 428 { 429 struct intr_thread *ithd; 430 struct thread *td; 431 int error; 432 433 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 434 435 error = kproc_kthread_add(ithread_loop, ithd, &intrproc, 436 &td, RFSTOPPED | RFHIGHPID, 437 0, "intr", "%s", name); 438 if (error) 439 panic("kproc_create() failed with %d", error); 440 thread_lock(td); 441 sched_class(td, PRI_ITHD); 442 TD_SET_IWAIT(td); 443 thread_unlock(td); 444 td->td_pflags |= TDP_ITHREAD; 445 ithd->it_thread = td; 446 CTR2(KTR_INTR, "%s: created %s", __func__, name); 447 return (ithd); 448 } 449 #else 450 static struct intr_thread * 451 ithread_create(const char *name, struct intr_handler *ih) 452 { 453 struct intr_thread *ithd; 454 struct thread *td; 455 int error; 456 457 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 458 459 error = kproc_kthread_add(ithread_loop, ih, &intrproc, 460 &td, RFSTOPPED | RFHIGHPID, 461 0, "intr", "%s", name); 462 if (error) 463 panic("kproc_create() failed with %d", error); 464 thread_lock(td); 465 sched_class(td, PRI_ITHD); 466 TD_SET_IWAIT(td); 467 thread_unlock(td); 468 td->td_pflags |= TDP_ITHREAD; 469 ithd->it_thread = td; 470 CTR2(KTR_INTR, "%s: created %s", __func__, name); 471 return (ithd); 472 } 473 #endif 474 475 static void 476 ithread_destroy(struct intr_thread *ithread) 477 { 478 struct thread *td; 479 480 CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name); 481 td = ithread->it_thread; 482 thread_lock(td); 483 ithread->it_flags |= IT_DEAD; 484 if (TD_AWAITING_INTR(td)) { 485 TD_CLR_IWAIT(td); 486 sched_add(td, SRQ_INTR); 487 } 488 thread_unlock(td); 489 } 490 491 #ifndef INTR_FILTER 492 int 493 intr_event_add_handler(struct intr_event *ie, const char *name, 494 driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 495 enum intr_type flags, void **cookiep) 496 { 497 struct intr_handler *ih, *temp_ih; 498 struct intr_thread *it; 499 500 if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 501 return (EINVAL); 502 503 /* Allocate and populate an interrupt handler structure. */ 504 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 505 ih->ih_filter = filter; 506 ih->ih_handler = handler; 507 ih->ih_argument = arg; 508 ih->ih_name = name; 509 ih->ih_event = ie; 510 ih->ih_pri = pri; 511 if (flags & INTR_EXCL) 512 ih->ih_flags = IH_EXCLUSIVE; 513 if (flags & INTR_MPSAFE) 514 ih->ih_flags |= IH_MPSAFE; 515 if (flags & INTR_ENTROPY) 516 ih->ih_flags |= IH_ENTROPY; 517 518 /* We can only have one exclusive handler in a event. */ 519 mtx_lock(&ie->ie_lock); 520 if (!TAILQ_EMPTY(&ie->ie_handlers)) { 521 if ((flags & INTR_EXCL) || 522 (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 523 mtx_unlock(&ie->ie_lock); 524 free(ih, M_ITHREAD); 525 return (EINVAL); 526 } 527 } 528 529 /* Add the new handler to the event in priority order. */ 530 TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 531 if (temp_ih->ih_pri > ih->ih_pri) 532 break; 533 } 534 if (temp_ih == NULL) 535 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 536 else 537 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 538 intr_event_update(ie); 539 540 /* Create a thread if we need one. */ 541 while (ie->ie_thread == NULL && handler != NULL) { 542 if (ie->ie_flags & IE_ADDING_THREAD) 543 msleep(ie, &ie->ie_lock, 0, "ithread", 0); 544 else { 545 ie->ie_flags |= IE_ADDING_THREAD; 546 mtx_unlock(&ie->ie_lock); 547 it = ithread_create("intr: newborn"); 548 mtx_lock(&ie->ie_lock); 549 ie->ie_flags &= ~IE_ADDING_THREAD; 550 ie->ie_thread = it; 551 it->it_event = ie; 552 ithread_update(it); 553 wakeup(ie); 554 } 555 } 556 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 557 ie->ie_name); 558 mtx_unlock(&ie->ie_lock); 559 560 if (cookiep != NULL) 561 *cookiep = ih; 562 return (0); 563 } 564 #else 565 int 566 intr_event_add_handler(struct intr_event *ie, const char *name, 567 driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 568 enum intr_type flags, void **cookiep) 569 { 570 struct intr_handler *ih, *temp_ih; 571 struct intr_thread *it; 572 573 if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 574 return (EINVAL); 575 576 /* Allocate and populate an interrupt handler structure. */ 577 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 578 ih->ih_filter = filter; 579 ih->ih_handler = handler; 580 ih->ih_argument = arg; 581 ih->ih_name = name; 582 ih->ih_event = ie; 583 ih->ih_pri = pri; 584 if (flags & INTR_EXCL) 585 ih->ih_flags = IH_EXCLUSIVE; 586 if (flags & INTR_MPSAFE) 587 ih->ih_flags |= IH_MPSAFE; 588 if (flags & INTR_ENTROPY) 589 ih->ih_flags |= IH_ENTROPY; 590 591 /* We can only have one exclusive handler in a event. */ 592 mtx_lock(&ie->ie_lock); 593 if (!TAILQ_EMPTY(&ie->ie_handlers)) { 594 if ((flags & INTR_EXCL) || 595 (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 596 mtx_unlock(&ie->ie_lock); 597 free(ih, M_ITHREAD); 598 return (EINVAL); 599 } 600 } 601 602 /* Add the new handler to the event in priority order. */ 603 TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 604 if (temp_ih->ih_pri > ih->ih_pri) 605 break; 606 } 607 if (temp_ih == NULL) 608 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 609 else 610 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 611 intr_event_update(ie); 612 613 /* For filtered handlers, create a private ithread to run on. */ 614 if (filter != NULL && handler != NULL) { 615 mtx_unlock(&ie->ie_lock); 616 it = ithread_create("intr: newborn", ih); 617 mtx_lock(&ie->ie_lock); 618 it->it_event = ie; 619 ih->ih_thread = it; 620 ithread_update(it); // XXX - do we really need this?!?!? 621 } else { /* Create the global per-event thread if we need one. */ 622 while (ie->ie_thread == NULL && handler != NULL) { 623 if (ie->ie_flags & IE_ADDING_THREAD) 624 msleep(ie, &ie->ie_lock, 0, "ithread", 0); 625 else { 626 ie->ie_flags |= IE_ADDING_THREAD; 627 mtx_unlock(&ie->ie_lock); 628 it = ithread_create("intr: newborn", ih); 629 mtx_lock(&ie->ie_lock); 630 ie->ie_flags &= ~IE_ADDING_THREAD; 631 ie->ie_thread = it; 632 it->it_event = ie; 633 ithread_update(it); 634 wakeup(ie); 635 } 636 } 637 } 638 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 639 ie->ie_name); 640 mtx_unlock(&ie->ie_lock); 641 642 if (cookiep != NULL) 643 *cookiep = ih; 644 return (0); 645 } 646 #endif 647 648 /* 649 * Return the ie_source field from the intr_event an intr_handler is 650 * associated with. 651 */ 652 void * 653 intr_handler_source(void *cookie) 654 { 655 struct intr_handler *ih; 656 struct intr_event *ie; 657 658 ih = (struct intr_handler *)cookie; 659 if (ih == NULL) 660 return (NULL); 661 ie = ih->ih_event; 662 KASSERT(ie != NULL, 663 ("interrupt handler \"%s\" has a NULL interrupt event", 664 ih->ih_name)); 665 return (ie->ie_source); 666 } 667 668 #ifndef INTR_FILTER 669 int 670 intr_event_remove_handler(void *cookie) 671 { 672 struct intr_handler *handler = (struct intr_handler *)cookie; 673 struct intr_event *ie; 674 #ifdef INVARIANTS 675 struct intr_handler *ih; 676 #endif 677 #ifdef notyet 678 int dead; 679 #endif 680 681 if (handler == NULL) 682 return (EINVAL); 683 ie = handler->ih_event; 684 KASSERT(ie != NULL, 685 ("interrupt handler \"%s\" has a NULL interrupt event", 686 handler->ih_name)); 687 mtx_lock(&ie->ie_lock); 688 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 689 ie->ie_name); 690 #ifdef INVARIANTS 691 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 692 if (ih == handler) 693 goto ok; 694 mtx_unlock(&ie->ie_lock); 695 panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 696 ih->ih_name, ie->ie_name); 697 ok: 698 #endif 699 /* 700 * If there is no ithread, then just remove the handler and return. 701 * XXX: Note that an INTR_FAST handler might be running on another 702 * CPU! 703 */ 704 if (ie->ie_thread == NULL) { 705 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 706 mtx_unlock(&ie->ie_lock); 707 free(handler, M_ITHREAD); 708 return (0); 709 } 710 711 /* 712 * If the interrupt thread is already running, then just mark this 713 * handler as being dead and let the ithread do the actual removal. 714 * 715 * During a cold boot while cold is set, msleep() does not sleep, 716 * so we have to remove the handler here rather than letting the 717 * thread do it. 718 */ 719 thread_lock(ie->ie_thread->it_thread); 720 if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) { 721 handler->ih_flags |= IH_DEAD; 722 723 /* 724 * Ensure that the thread will process the handler list 725 * again and remove this handler if it has already passed 726 * it on the list. 727 */ 728 ie->ie_thread->it_need = 1; 729 } else 730 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 731 thread_unlock(ie->ie_thread->it_thread); 732 while (handler->ih_flags & IH_DEAD) 733 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 734 intr_event_update(ie); 735 #ifdef notyet 736 /* 737 * XXX: This could be bad in the case of ppbus(8). Also, I think 738 * this could lead to races of stale data when servicing an 739 * interrupt. 740 */ 741 dead = 1; 742 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 743 if (!(ih->ih_flags & IH_FAST)) { 744 dead = 0; 745 break; 746 } 747 } 748 if (dead) { 749 ithread_destroy(ie->ie_thread); 750 ie->ie_thread = NULL; 751 } 752 #endif 753 mtx_unlock(&ie->ie_lock); 754 free(handler, M_ITHREAD); 755 return (0); 756 } 757 758 static int 759 intr_event_schedule_thread(struct intr_event *ie) 760 { 761 struct intr_entropy entropy; 762 struct intr_thread *it; 763 struct thread *td; 764 struct thread *ctd; 765 struct proc *p; 766 767 /* 768 * If no ithread or no handlers, then we have a stray interrupt. 769 */ 770 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || 771 ie->ie_thread == NULL) 772 return (EINVAL); 773 774 ctd = curthread; 775 it = ie->ie_thread; 776 td = it->it_thread; 777 p = td->td_proc; 778 779 /* 780 * If any of the handlers for this ithread claim to be good 781 * sources of entropy, then gather some. 782 */ 783 if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 784 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 785 p->p_pid, td->td_name); 786 entropy.event = (uintptr_t)ie; 787 entropy.td = ctd; 788 random_harvest(&entropy, sizeof(entropy), 2, 0, 789 RANDOM_INTERRUPT); 790 } 791 792 KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 793 794 /* 795 * Set it_need to tell the thread to keep running if it is already 796 * running. Then, lock the thread and see if we actually need to 797 * put it on the runqueue. 798 */ 799 it->it_need = 1; 800 thread_lock(td); 801 if (TD_AWAITING_INTR(td)) { 802 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 803 td->td_name); 804 TD_CLR_IWAIT(td); 805 sched_add(td, SRQ_INTR); 806 } else { 807 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 808 __func__, p->p_pid, td->td_name, it->it_need, td->td_state); 809 } 810 thread_unlock(td); 811 812 return (0); 813 } 814 #else 815 int 816 intr_event_remove_handler(void *cookie) 817 { 818 struct intr_handler *handler = (struct intr_handler *)cookie; 819 struct intr_event *ie; 820 struct intr_thread *it; 821 #ifdef INVARIANTS 822 struct intr_handler *ih; 823 #endif 824 #ifdef notyet 825 int dead; 826 #endif 827 828 if (handler == NULL) 829 return (EINVAL); 830 ie = handler->ih_event; 831 KASSERT(ie != NULL, 832 ("interrupt handler \"%s\" has a NULL interrupt event", 833 handler->ih_name)); 834 mtx_lock(&ie->ie_lock); 835 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 836 ie->ie_name); 837 #ifdef INVARIANTS 838 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 839 if (ih == handler) 840 goto ok; 841 mtx_unlock(&ie->ie_lock); 842 panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 843 ih->ih_name, ie->ie_name); 844 ok: 845 #endif 846 /* 847 * If there are no ithreads (per event and per handler), then 848 * just remove the handler and return. 849 * XXX: Note that an INTR_FAST handler might be running on another CPU! 850 */ 851 if (ie->ie_thread == NULL && handler->ih_thread == NULL) { 852 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 853 mtx_unlock(&ie->ie_lock); 854 free(handler, M_ITHREAD); 855 return (0); 856 } 857 858 /* Private or global ithread? */ 859 it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread; 860 /* 861 * If the interrupt thread is already running, then just mark this 862 * handler as being dead and let the ithread do the actual removal. 863 * 864 * During a cold boot while cold is set, msleep() does not sleep, 865 * so we have to remove the handler here rather than letting the 866 * thread do it. 867 */ 868 thread_lock(it->it_thread); 869 if (!TD_AWAITING_INTR(it->it_thread) && !cold) { 870 handler->ih_flags |= IH_DEAD; 871 872 /* 873 * Ensure that the thread will process the handler list 874 * again and remove this handler if it has already passed 875 * it on the list. 876 */ 877 it->it_need = 1; 878 } else 879 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 880 thread_unlock(it->it_thread); 881 while (handler->ih_flags & IH_DEAD) 882 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 883 /* 884 * At this point, the handler has been disconnected from the event, 885 * so we can kill the private ithread if any. 886 */ 887 if (handler->ih_thread) { 888 ithread_destroy(handler->ih_thread); 889 handler->ih_thread = NULL; 890 } 891 intr_event_update(ie); 892 #ifdef notyet 893 /* 894 * XXX: This could be bad in the case of ppbus(8). Also, I think 895 * this could lead to races of stale data when servicing an 896 * interrupt. 897 */ 898 dead = 1; 899 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 900 if (handler != NULL) { 901 dead = 0; 902 break; 903 } 904 } 905 if (dead) { 906 ithread_destroy(ie->ie_thread); 907 ie->ie_thread = NULL; 908 } 909 #endif 910 mtx_unlock(&ie->ie_lock); 911 free(handler, M_ITHREAD); 912 return (0); 913 } 914 915 static int 916 intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it) 917 { 918 struct intr_entropy entropy; 919 struct thread *td; 920 struct thread *ctd; 921 struct proc *p; 922 923 /* 924 * If no ithread or no handlers, then we have a stray interrupt. 925 */ 926 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL) 927 return (EINVAL); 928 929 ctd = curthread; 930 td = it->it_thread; 931 p = td->td_proc; 932 933 /* 934 * If any of the handlers for this ithread claim to be good 935 * sources of entropy, then gather some. 936 */ 937 if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 938 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 939 p->p_pid, td->td_name); 940 entropy.event = (uintptr_t)ie; 941 entropy.td = ctd; 942 random_harvest(&entropy, sizeof(entropy), 2, 0, 943 RANDOM_INTERRUPT); 944 } 945 946 KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 947 948 /* 949 * Set it_need to tell the thread to keep running if it is already 950 * running. Then, lock the thread and see if we actually need to 951 * put it on the runqueue. 952 */ 953 it->it_need = 1; 954 thread_lock(td); 955 if (TD_AWAITING_INTR(td)) { 956 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 957 td->td_name); 958 TD_CLR_IWAIT(td); 959 sched_add(td, SRQ_INTR); 960 } else { 961 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 962 __func__, p->p_pid, td->td_name, it->it_need, td->td_state); 963 } 964 thread_unlock(td); 965 966 return (0); 967 } 968 #endif 969 970 /* 971 * Allow interrupt event binding for software interrupt handlers -- a no-op, 972 * since interrupts are generated in software rather than being directed by 973 * a PIC. 974 */ 975 static int 976 swi_assign_cpu(void *arg, u_char cpu) 977 { 978 979 return (0); 980 } 981 982 /* 983 * Add a software interrupt handler to a specified event. If a given event 984 * is not specified, then a new event is created. 985 */ 986 int 987 swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, 988 void *arg, int pri, enum intr_type flags, void **cookiep) 989 { 990 struct intr_event *ie; 991 int error; 992 993 if (flags & INTR_ENTROPY) 994 return (EINVAL); 995 996 ie = (eventp != NULL) ? *eventp : NULL; 997 998 if (ie != NULL) { 999 if (!(ie->ie_flags & IE_SOFT)) 1000 return (EINVAL); 1001 } else { 1002 error = intr_event_create(&ie, NULL, IE_SOFT, 0, 1003 NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri); 1004 if (error) 1005 return (error); 1006 if (eventp != NULL) 1007 *eventp = ie; 1008 } 1009 error = intr_event_add_handler(ie, name, NULL, handler, arg, 1010 (pri * RQ_PPQ) + PI_SOFT, flags, cookiep); 1011 if (error) 1012 return (error); 1013 if (pri == SWI_CLOCK) { 1014 struct proc *p; 1015 p = ie->ie_thread->it_thread->td_proc; 1016 PROC_LOCK(p); 1017 p->p_flag |= P_NOLOAD; 1018 PROC_UNLOCK(p); 1019 } 1020 return (0); 1021 } 1022 1023 /* 1024 * Schedule a software interrupt thread. 1025 */ 1026 void 1027 swi_sched(void *cookie, int flags) 1028 { 1029 struct intr_handler *ih = (struct intr_handler *)cookie; 1030 struct intr_event *ie = ih->ih_event; 1031 int error; 1032 1033 CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, 1034 ih->ih_need); 1035 1036 /* 1037 * Set ih_need for this handler so that if the ithread is already 1038 * running it will execute this handler on the next pass. Otherwise, 1039 * it will execute it the next time it runs. 1040 */ 1041 atomic_store_rel_int(&ih->ih_need, 1); 1042 1043 if (!(flags & SWI_DELAY)) { 1044 PCPU_INC(cnt.v_soft); 1045 #ifdef INTR_FILTER 1046 error = intr_event_schedule_thread(ie, ie->ie_thread); 1047 #else 1048 error = intr_event_schedule_thread(ie); 1049 #endif 1050 KASSERT(error == 0, ("stray software interrupt")); 1051 } 1052 } 1053 1054 /* 1055 * Remove a software interrupt handler. Currently this code does not 1056 * remove the associated interrupt event if it becomes empty. Calling code 1057 * may do so manually via intr_event_destroy(), but that's not really 1058 * an optimal interface. 1059 */ 1060 int 1061 swi_remove(void *cookie) 1062 { 1063 1064 return (intr_event_remove_handler(cookie)); 1065 } 1066 1067 #ifdef INTR_FILTER 1068 static void 1069 priv_ithread_execute_handler(struct proc *p, struct intr_handler *ih) 1070 { 1071 struct intr_event *ie; 1072 1073 ie = ih->ih_event; 1074 /* 1075 * If this handler is marked for death, remove it from 1076 * the list of handlers and wake up the sleeper. 1077 */ 1078 if (ih->ih_flags & IH_DEAD) { 1079 mtx_lock(&ie->ie_lock); 1080 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 1081 ih->ih_flags &= ~IH_DEAD; 1082 wakeup(ih); 1083 mtx_unlock(&ie->ie_lock); 1084 return; 1085 } 1086 1087 /* Execute this handler. */ 1088 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1089 __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument, 1090 ih->ih_name, ih->ih_flags); 1091 1092 if (!(ih->ih_flags & IH_MPSAFE)) 1093 mtx_lock(&Giant); 1094 ih->ih_handler(ih->ih_argument); 1095 if (!(ih->ih_flags & IH_MPSAFE)) 1096 mtx_unlock(&Giant); 1097 } 1098 #endif 1099 1100 /* 1101 * This is a public function for use by drivers that mux interrupt 1102 * handlers for child devices from their interrupt handler. 1103 */ 1104 void 1105 intr_event_execute_handlers(struct proc *p, struct intr_event *ie) 1106 { 1107 struct intr_handler *ih, *ihn; 1108 1109 TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) { 1110 /* 1111 * If this handler is marked for death, remove it from 1112 * the list of handlers and wake up the sleeper. 1113 */ 1114 if (ih->ih_flags & IH_DEAD) { 1115 mtx_lock(&ie->ie_lock); 1116 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 1117 ih->ih_flags &= ~IH_DEAD; 1118 wakeup(ih); 1119 mtx_unlock(&ie->ie_lock); 1120 continue; 1121 } 1122 1123 /* Skip filter only handlers */ 1124 if (ih->ih_handler == NULL) 1125 continue; 1126 1127 /* 1128 * For software interrupt threads, we only execute 1129 * handlers that have their need flag set. Hardware 1130 * interrupt threads always invoke all of their handlers. 1131 */ 1132 if (ie->ie_flags & IE_SOFT) { 1133 if (!ih->ih_need) 1134 continue; 1135 else 1136 atomic_store_rel_int(&ih->ih_need, 0); 1137 } 1138 1139 /* Execute this handler. */ 1140 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1141 __func__, p->p_pid, (void *)ih->ih_handler, 1142 ih->ih_argument, ih->ih_name, ih->ih_flags); 1143 1144 if (!(ih->ih_flags & IH_MPSAFE)) 1145 mtx_lock(&Giant); 1146 ih->ih_handler(ih->ih_argument); 1147 if (!(ih->ih_flags & IH_MPSAFE)) 1148 mtx_unlock(&Giant); 1149 } 1150 } 1151 1152 static void 1153 ithread_execute_handlers(struct proc *p, struct intr_event *ie) 1154 { 1155 1156 /* Interrupt handlers should not sleep. */ 1157 if (!(ie->ie_flags & IE_SOFT)) 1158 THREAD_NO_SLEEPING(); 1159 intr_event_execute_handlers(p, ie); 1160 if (!(ie->ie_flags & IE_SOFT)) 1161 THREAD_SLEEPING_OK(); 1162 1163 /* 1164 * Interrupt storm handling: 1165 * 1166 * If this interrupt source is currently storming, then throttle 1167 * it to only fire the handler once per clock tick. 1168 * 1169 * If this interrupt source is not currently storming, but the 1170 * number of back to back interrupts exceeds the storm threshold, 1171 * then enter storming mode. 1172 */ 1173 if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold && 1174 !(ie->ie_flags & IE_SOFT)) { 1175 /* Report the message only once every second. */ 1176 if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) { 1177 printf( 1178 "interrupt storm detected on \"%s\"; throttling interrupt source\n", 1179 ie->ie_name); 1180 } 1181 pause("istorm", 1); 1182 } else 1183 ie->ie_count++; 1184 1185 /* 1186 * Now that all the handlers have had a chance to run, reenable 1187 * the interrupt source. 1188 */ 1189 if (ie->ie_post_ithread != NULL) 1190 ie->ie_post_ithread(ie->ie_source); 1191 } 1192 1193 #ifndef INTR_FILTER 1194 /* 1195 * This is the main code for interrupt threads. 1196 */ 1197 static void 1198 ithread_loop(void *arg) 1199 { 1200 struct intr_thread *ithd; 1201 struct intr_event *ie; 1202 struct thread *td; 1203 struct proc *p; 1204 1205 td = curthread; 1206 p = td->td_proc; 1207 ithd = (struct intr_thread *)arg; 1208 KASSERT(ithd->it_thread == td, 1209 ("%s: ithread and proc linkage out of sync", __func__)); 1210 ie = ithd->it_event; 1211 ie->ie_count = 0; 1212 1213 /* 1214 * As long as we have interrupts outstanding, go through the 1215 * list of handlers, giving each one a go at it. 1216 */ 1217 for (;;) { 1218 /* 1219 * If we are an orphaned thread, then just die. 1220 */ 1221 if (ithd->it_flags & IT_DEAD) { 1222 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 1223 p->p_pid, td->td_name); 1224 free(ithd, M_ITHREAD); 1225 kthread_exit(); 1226 } 1227 1228 /* 1229 * Service interrupts. If another interrupt arrives while 1230 * we are running, it will set it_need to note that we 1231 * should make another pass. 1232 */ 1233 while (ithd->it_need) { 1234 /* 1235 * This might need a full read and write barrier 1236 * to make sure that this write posts before any 1237 * of the memory or device accesses in the 1238 * handlers. 1239 */ 1240 atomic_store_rel_int(&ithd->it_need, 0); 1241 ithread_execute_handlers(p, ie); 1242 } 1243 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1244 mtx_assert(&Giant, MA_NOTOWNED); 1245 1246 /* 1247 * Processed all our interrupts. Now get the sched 1248 * lock. This may take a while and it_need may get 1249 * set again, so we have to check it again. 1250 */ 1251 thread_lock(td); 1252 if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { 1253 TD_SET_IWAIT(td); 1254 ie->ie_count = 0; 1255 mi_switch(SW_VOL | SWT_IWAIT, NULL); 1256 } 1257 thread_unlock(td); 1258 } 1259 } 1260 1261 /* 1262 * Main interrupt handling body. 1263 * 1264 * Input: 1265 * o ie: the event connected to this interrupt. 1266 * o frame: some archs (i.e. i386) pass a frame to some. 1267 * handlers as their main argument. 1268 * Return value: 1269 * o 0: everything ok. 1270 * o EINVAL: stray interrupt. 1271 */ 1272 int 1273 intr_event_handle(struct intr_event *ie, struct trapframe *frame) 1274 { 1275 struct intr_handler *ih; 1276 struct thread *td; 1277 int error, ret, thread; 1278 1279 td = curthread; 1280 1281 /* An interrupt with no event or handlers is a stray interrupt. */ 1282 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) 1283 return (EINVAL); 1284 1285 /* 1286 * Execute fast interrupt handlers directly. 1287 * To support clock handlers, if a handler registers 1288 * with a NULL argument, then we pass it a pointer to 1289 * a trapframe as its argument. 1290 */ 1291 td->td_intr_nesting_level++; 1292 thread = 0; 1293 ret = 0; 1294 critical_enter(); 1295 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1296 if (ih->ih_filter == NULL) { 1297 thread = 1; 1298 continue; 1299 } 1300 CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__, 1301 ih->ih_filter, ih->ih_argument == NULL ? frame : 1302 ih->ih_argument, ih->ih_name); 1303 if (ih->ih_argument == NULL) 1304 ret = ih->ih_filter(frame); 1305 else 1306 ret = ih->ih_filter(ih->ih_argument); 1307 /* 1308 * Wrapper handler special handling: 1309 * 1310 * in some particular cases (like pccard and pccbb), 1311 * the _real_ device handler is wrapped in a couple of 1312 * functions - a filter wrapper and an ithread wrapper. 1313 * In this case (and just in this case), the filter wrapper 1314 * could ask the system to schedule the ithread and mask 1315 * the interrupt source if the wrapped handler is composed 1316 * of just an ithread handler. 1317 * 1318 * TODO: write a generic wrapper to avoid people rolling 1319 * their own 1320 */ 1321 if (!thread) { 1322 if (ret == FILTER_SCHEDULE_THREAD) 1323 thread = 1; 1324 } 1325 } 1326 1327 if (thread) { 1328 if (ie->ie_pre_ithread != NULL) 1329 ie->ie_pre_ithread(ie->ie_source); 1330 } else { 1331 if (ie->ie_post_filter != NULL) 1332 ie->ie_post_filter(ie->ie_source); 1333 } 1334 1335 /* Schedule the ithread if needed. */ 1336 if (thread) { 1337 error = intr_event_schedule_thread(ie); 1338 #ifndef XEN 1339 KASSERT(error == 0, ("bad stray interrupt")); 1340 #else 1341 if (error != 0) 1342 log(LOG_WARNING, "bad stray interrupt"); 1343 #endif 1344 } 1345 critical_exit(); 1346 td->td_intr_nesting_level--; 1347 return (0); 1348 } 1349 #else 1350 /* 1351 * This is the main code for interrupt threads. 1352 */ 1353 static void 1354 ithread_loop(void *arg) 1355 { 1356 struct intr_thread *ithd; 1357 struct intr_handler *ih; 1358 struct intr_event *ie; 1359 struct thread *td; 1360 struct proc *p; 1361 int priv; 1362 1363 td = curthread; 1364 p = td->td_proc; 1365 ih = (struct intr_handler *)arg; 1366 priv = (ih->ih_thread != NULL) ? 1 : 0; 1367 ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread; 1368 KASSERT(ithd->it_thread == td, 1369 ("%s: ithread and proc linkage out of sync", __func__)); 1370 ie = ithd->it_event; 1371 ie->ie_count = 0; 1372 1373 /* 1374 * As long as we have interrupts outstanding, go through the 1375 * list of handlers, giving each one a go at it. 1376 */ 1377 for (;;) { 1378 /* 1379 * If we are an orphaned thread, then just die. 1380 */ 1381 if (ithd->it_flags & IT_DEAD) { 1382 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 1383 p->p_pid, td->td_name); 1384 free(ithd, M_ITHREAD); 1385 kthread_exit(); 1386 } 1387 1388 /* 1389 * Service interrupts. If another interrupt arrives while 1390 * we are running, it will set it_need to note that we 1391 * should make another pass. 1392 */ 1393 while (ithd->it_need) { 1394 /* 1395 * This might need a full read and write barrier 1396 * to make sure that this write posts before any 1397 * of the memory or device accesses in the 1398 * handlers. 1399 */ 1400 atomic_store_rel_int(&ithd->it_need, 0); 1401 if (priv) 1402 priv_ithread_execute_handler(p, ih); 1403 else 1404 ithread_execute_handlers(p, ie); 1405 } 1406 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1407 mtx_assert(&Giant, MA_NOTOWNED); 1408 1409 /* 1410 * Processed all our interrupts. Now get the sched 1411 * lock. This may take a while and it_need may get 1412 * set again, so we have to check it again. 1413 */ 1414 thread_lock(td); 1415 if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { 1416 TD_SET_IWAIT(td); 1417 ie->ie_count = 0; 1418 mi_switch(SW_VOL | SWT_IWAIT, NULL); 1419 } 1420 thread_unlock(td); 1421 } 1422 } 1423 1424 /* 1425 * Main loop for interrupt filter. 1426 * 1427 * Some architectures (i386, amd64 and arm) require the optional frame 1428 * parameter, and use it as the main argument for fast handler execution 1429 * when ih_argument == NULL. 1430 * 1431 * Return value: 1432 * o FILTER_STRAY: No filter recognized the event, and no 1433 * filter-less handler is registered on this 1434 * line. 1435 * o FILTER_HANDLED: A filter claimed the event and served it. 1436 * o FILTER_SCHEDULE_THREAD: No filter claimed the event, but there's at 1437 * least one filter-less handler on this line. 1438 * o FILTER_HANDLED | 1439 * FILTER_SCHEDULE_THREAD: A filter claimed the event, and asked for 1440 * scheduling the per-handler ithread. 1441 * 1442 * In case an ithread has to be scheduled, in *ithd there will be a 1443 * pointer to a struct intr_thread containing the thread to be 1444 * scheduled. 1445 */ 1446 1447 static int 1448 intr_filter_loop(struct intr_event *ie, struct trapframe *frame, 1449 struct intr_thread **ithd) 1450 { 1451 struct intr_handler *ih; 1452 void *arg; 1453 int ret, thread_only; 1454 1455 ret = 0; 1456 thread_only = 0; 1457 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1458 /* 1459 * Execute fast interrupt handlers directly. 1460 * To support clock handlers, if a handler registers 1461 * with a NULL argument, then we pass it a pointer to 1462 * a trapframe as its argument. 1463 */ 1464 arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument); 1465 1466 CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__, 1467 ih->ih_filter, ih->ih_handler, arg, ih->ih_name); 1468 1469 if (ih->ih_filter != NULL) 1470 ret = ih->ih_filter(arg); 1471 else { 1472 thread_only = 1; 1473 continue; 1474 } 1475 1476 if (ret & FILTER_STRAY) 1477 continue; 1478 else { 1479 *ithd = ih->ih_thread; 1480 return (ret); 1481 } 1482 } 1483 1484 /* 1485 * No filters handled the interrupt and we have at least 1486 * one handler without a filter. In this case, we schedule 1487 * all of the filter-less handlers to run in the ithread. 1488 */ 1489 if (thread_only) { 1490 *ithd = ie->ie_thread; 1491 return (FILTER_SCHEDULE_THREAD); 1492 } 1493 return (FILTER_STRAY); 1494 } 1495 1496 /* 1497 * Main interrupt handling body. 1498 * 1499 * Input: 1500 * o ie: the event connected to this interrupt. 1501 * o frame: some archs (i.e. i386) pass a frame to some. 1502 * handlers as their main argument. 1503 * Return value: 1504 * o 0: everything ok. 1505 * o EINVAL: stray interrupt. 1506 */ 1507 int 1508 intr_event_handle(struct intr_event *ie, struct trapframe *frame) 1509 { 1510 struct intr_thread *ithd; 1511 struct thread *td; 1512 int thread; 1513 1514 ithd = NULL; 1515 td = curthread; 1516 1517 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) 1518 return (EINVAL); 1519 1520 td->td_intr_nesting_level++; 1521 thread = 0; 1522 critical_enter(); 1523 thread = intr_filter_loop(ie, frame, &ithd); 1524 if (thread & FILTER_HANDLED) { 1525 if (ie->ie_post_filter != NULL) 1526 ie->ie_post_filter(ie->ie_source); 1527 } else { 1528 if (ie->ie_pre_ithread != NULL) 1529 ie->ie_pre_ithread(ie->ie_source); 1530 } 1531 critical_exit(); 1532 1533 /* Interrupt storm logic */ 1534 if (thread & FILTER_STRAY) { 1535 ie->ie_count++; 1536 if (ie->ie_count < intr_storm_threshold) 1537 printf("Interrupt stray detection not present\n"); 1538 } 1539 1540 /* Schedule an ithread if needed. */ 1541 if (thread & FILTER_SCHEDULE_THREAD) { 1542 if (intr_event_schedule_thread(ie, ithd) != 0) 1543 panic("%s: impossible stray interrupt", __func__); 1544 } 1545 td->td_intr_nesting_level--; 1546 return (0); 1547 } 1548 #endif 1549 1550 #ifdef DDB 1551 /* 1552 * Dump details about an interrupt handler 1553 */ 1554 static void 1555 db_dump_intrhand(struct intr_handler *ih) 1556 { 1557 int comma; 1558 1559 db_printf("\t%-10s ", ih->ih_name); 1560 switch (ih->ih_pri) { 1561 case PI_REALTIME: 1562 db_printf("CLK "); 1563 break; 1564 case PI_AV: 1565 db_printf("AV "); 1566 break; 1567 case PI_TTYHIGH: 1568 case PI_TTYLOW: 1569 db_printf("TTY "); 1570 break; 1571 case PI_TAPE: 1572 db_printf("TAPE"); 1573 break; 1574 case PI_NET: 1575 db_printf("NET "); 1576 break; 1577 case PI_DISK: 1578 case PI_DISKLOW: 1579 db_printf("DISK"); 1580 break; 1581 case PI_DULL: 1582 db_printf("DULL"); 1583 break; 1584 default: 1585 if (ih->ih_pri >= PI_SOFT) 1586 db_printf("SWI "); 1587 else 1588 db_printf("%4u", ih->ih_pri); 1589 break; 1590 } 1591 db_printf(" "); 1592 db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC); 1593 db_printf("(%p)", ih->ih_argument); 1594 if (ih->ih_need || 1595 (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD | 1596 IH_MPSAFE)) != 0) { 1597 db_printf(" {"); 1598 comma = 0; 1599 if (ih->ih_flags & IH_EXCLUSIVE) { 1600 if (comma) 1601 db_printf(", "); 1602 db_printf("EXCL"); 1603 comma = 1; 1604 } 1605 if (ih->ih_flags & IH_ENTROPY) { 1606 if (comma) 1607 db_printf(", "); 1608 db_printf("ENTROPY"); 1609 comma = 1; 1610 } 1611 if (ih->ih_flags & IH_DEAD) { 1612 if (comma) 1613 db_printf(", "); 1614 db_printf("DEAD"); 1615 comma = 1; 1616 } 1617 if (ih->ih_flags & IH_MPSAFE) { 1618 if (comma) 1619 db_printf(", "); 1620 db_printf("MPSAFE"); 1621 comma = 1; 1622 } 1623 if (ih->ih_need) { 1624 if (comma) 1625 db_printf(", "); 1626 db_printf("NEED"); 1627 } 1628 db_printf("}"); 1629 } 1630 db_printf("\n"); 1631 } 1632 1633 /* 1634 * Dump details about a event. 1635 */ 1636 void 1637 db_dump_intr_event(struct intr_event *ie, int handlers) 1638 { 1639 struct intr_handler *ih; 1640 struct intr_thread *it; 1641 int comma; 1642 1643 db_printf("%s ", ie->ie_fullname); 1644 it = ie->ie_thread; 1645 if (it != NULL) 1646 db_printf("(pid %d)", it->it_thread->td_proc->p_pid); 1647 else 1648 db_printf("(no thread)"); 1649 if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 || 1650 (it != NULL && it->it_need)) { 1651 db_printf(" {"); 1652 comma = 0; 1653 if (ie->ie_flags & IE_SOFT) { 1654 db_printf("SOFT"); 1655 comma = 1; 1656 } 1657 if (ie->ie_flags & IE_ENTROPY) { 1658 if (comma) 1659 db_printf(", "); 1660 db_printf("ENTROPY"); 1661 comma = 1; 1662 } 1663 if (ie->ie_flags & IE_ADDING_THREAD) { 1664 if (comma) 1665 db_printf(", "); 1666 db_printf("ADDING_THREAD"); 1667 comma = 1; 1668 } 1669 if (it != NULL && it->it_need) { 1670 if (comma) 1671 db_printf(", "); 1672 db_printf("NEED"); 1673 } 1674 db_printf("}"); 1675 } 1676 db_printf("\n"); 1677 1678 if (handlers) 1679 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 1680 db_dump_intrhand(ih); 1681 } 1682 1683 /* 1684 * Dump data about interrupt handlers 1685 */ 1686 DB_SHOW_COMMAND(intr, db_show_intr) 1687 { 1688 struct intr_event *ie; 1689 int all, verbose; 1690 1691 verbose = index(modif, 'v') != NULL; 1692 all = index(modif, 'a') != NULL; 1693 TAILQ_FOREACH(ie, &event_list, ie_list) { 1694 if (!all && TAILQ_EMPTY(&ie->ie_handlers)) 1695 continue; 1696 db_dump_intr_event(ie, verbose); 1697 if (db_pager_quit) 1698 break; 1699 } 1700 } 1701 #endif /* DDB */ 1702 1703 /* 1704 * Start standard software interrupt threads 1705 */ 1706 static void 1707 start_softintr(void *dummy) 1708 { 1709 1710 if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih)) 1711 panic("died while creating vm swi ithread"); 1712 } 1713 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, 1714 NULL); 1715 1716 /* 1717 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 1718 * The data for this machine dependent, and the declarations are in machine 1719 * dependent code. The layout of intrnames and intrcnt however is machine 1720 * independent. 1721 * 1722 * We do not know the length of intrcnt and intrnames at compile time, so 1723 * calculate things at run time. 1724 */ 1725 static int 1726 sysctl_intrnames(SYSCTL_HANDLER_ARGS) 1727 { 1728 return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames, 1729 req)); 1730 } 1731 1732 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, 1733 NULL, 0, sysctl_intrnames, "", "Interrupt Names"); 1734 1735 static int 1736 sysctl_intrcnt(SYSCTL_HANDLER_ARGS) 1737 { 1738 return (sysctl_handle_opaque(oidp, intrcnt, 1739 (char *)eintrcnt - (char *)intrcnt, req)); 1740 } 1741 1742 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, 1743 NULL, 0, sysctl_intrcnt, "", "Interrupt Counts"); 1744 1745 #ifdef DDB 1746 /* 1747 * DDB command to dump the interrupt statistics. 1748 */ 1749 DB_SHOW_COMMAND(intrcnt, db_show_intrcnt) 1750 { 1751 u_long *i; 1752 char *cp; 1753 1754 cp = intrnames; 1755 for (i = intrcnt; i != eintrcnt && !db_pager_quit; i++) { 1756 if (*cp == '\0') 1757 break; 1758 if (*i != 0) 1759 db_printf("%s\t%lu\n", cp, *i); 1760 cp += strlen(cp) + 1; 1761 } 1762 } 1763 #endif 1764