1 /*- 2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_ddb.h" 31 32 #include <sys/param.h> 33 #include <sys/bus.h> 34 #include <sys/conf.h> 35 #include <sys/cpuset.h> 36 #include <sys/rtprio.h> 37 #include <sys/systm.h> 38 #include <sys/interrupt.h> 39 #include <sys/kernel.h> 40 #include <sys/kthread.h> 41 #include <sys/ktr.h> 42 #include <sys/limits.h> 43 #include <sys/lock.h> 44 #include <sys/malloc.h> 45 #include <sys/mutex.h> 46 #include <sys/proc.h> 47 #include <sys/random.h> 48 #include <sys/resourcevar.h> 49 #include <sys/sched.h> 50 #include <sys/smp.h> 51 #include <sys/sysctl.h> 52 #include <sys/syslog.h> 53 #include <sys/unistd.h> 54 #include <sys/vmmeter.h> 55 #include <machine/atomic.h> 56 #include <machine/cpu.h> 57 #include <machine/md_var.h> 58 #include <machine/stdarg.h> 59 #ifdef DDB 60 #include <ddb/ddb.h> 61 #include <ddb/db_sym.h> 62 #endif 63 64 /* 65 * Describe an interrupt thread. There is one of these per interrupt event. 66 */ 67 struct intr_thread { 68 struct intr_event *it_event; 69 struct thread *it_thread; /* Kernel thread. */ 70 int it_flags; /* (j) IT_* flags. */ 71 int it_need; /* Needs service. */ 72 }; 73 74 /* Interrupt thread flags kept in it_flags */ 75 #define IT_DEAD 0x000001 /* Thread is waiting to exit. */ 76 77 struct intr_entropy { 78 struct thread *td; 79 uintptr_t event; 80 }; 81 82 struct intr_event *clk_intr_event; 83 struct intr_event *tty_intr_event; 84 void *vm_ih; 85 struct proc *intrproc; 86 87 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); 88 89 static int intr_storm_threshold = 1000; 90 TUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold); 91 SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW, 92 &intr_storm_threshold, 0, 93 "Number of consecutive interrupts before storm protection is enabled"); 94 static TAILQ_HEAD(, intr_event) event_list = 95 TAILQ_HEAD_INITIALIZER(event_list); 96 static struct mtx event_lock; 97 MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF); 98 99 static void intr_event_update(struct intr_event *ie); 100 #ifdef INTR_FILTER 101 static int intr_event_schedule_thread(struct intr_event *ie, 102 struct intr_thread *ithd); 103 static int intr_filter_loop(struct intr_event *ie, 104 struct trapframe *frame, struct intr_thread **ithd); 105 static struct intr_thread *ithread_create(const char *name, 106 struct intr_handler *ih); 107 #else 108 static int intr_event_schedule_thread(struct intr_event *ie); 109 static struct intr_thread *ithread_create(const char *name); 110 #endif 111 static void ithread_destroy(struct intr_thread *ithread); 112 static void ithread_execute_handlers(struct proc *p, 113 struct intr_event *ie); 114 #ifdef INTR_FILTER 115 static void priv_ithread_execute_handler(struct proc *p, 116 struct intr_handler *ih); 117 #endif 118 static void ithread_loop(void *); 119 static void ithread_update(struct intr_thread *ithd); 120 static void start_softintr(void *); 121 122 /* Map an interrupt type to an ithread priority. */ 123 u_char 124 intr_priority(enum intr_type flags) 125 { 126 u_char pri; 127 128 flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET | 129 INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV); 130 switch (flags) { 131 case INTR_TYPE_TTY: 132 pri = PI_TTYLOW; 133 break; 134 case INTR_TYPE_BIO: 135 /* 136 * XXX We need to refine this. BSD/OS distinguishes 137 * between tape and disk priorities. 138 */ 139 pri = PI_DISK; 140 break; 141 case INTR_TYPE_NET: 142 pri = PI_NET; 143 break; 144 case INTR_TYPE_CAM: 145 pri = PI_DISK; /* XXX or PI_CAM? */ 146 break; 147 case INTR_TYPE_AV: /* Audio/video */ 148 pri = PI_AV; 149 break; 150 case INTR_TYPE_CLK: 151 pri = PI_REALTIME; 152 break; 153 case INTR_TYPE_MISC: 154 pri = PI_DULL; /* don't care */ 155 break; 156 default: 157 /* We didn't specify an interrupt level. */ 158 panic("intr_priority: no interrupt type in flags"); 159 } 160 161 return pri; 162 } 163 164 /* 165 * Update an ithread based on the associated intr_event. 166 */ 167 static void 168 ithread_update(struct intr_thread *ithd) 169 { 170 struct intr_event *ie; 171 struct thread *td; 172 u_char pri; 173 174 ie = ithd->it_event; 175 td = ithd->it_thread; 176 177 /* Determine the overall priority of this event. */ 178 if (TAILQ_EMPTY(&ie->ie_handlers)) 179 pri = PRI_MAX_ITHD; 180 else 181 pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri; 182 183 /* Update name and priority. */ 184 strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name)); 185 thread_lock(td); 186 sched_prio(td, pri); 187 thread_unlock(td); 188 } 189 190 /* 191 * Regenerate the full name of an interrupt event and update its priority. 192 */ 193 static void 194 intr_event_update(struct intr_event *ie) 195 { 196 struct intr_handler *ih; 197 char *last; 198 int missed, space; 199 200 /* Start off with no entropy and just the name of the event. */ 201 mtx_assert(&ie->ie_lock, MA_OWNED); 202 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 203 ie->ie_flags &= ~IE_ENTROPY; 204 missed = 0; 205 space = 1; 206 207 /* Run through all the handlers updating values. */ 208 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 209 if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < 210 sizeof(ie->ie_fullname)) { 211 strcat(ie->ie_fullname, " "); 212 strcat(ie->ie_fullname, ih->ih_name); 213 space = 0; 214 } else 215 missed++; 216 if (ih->ih_flags & IH_ENTROPY) 217 ie->ie_flags |= IE_ENTROPY; 218 } 219 220 /* 221 * If the handler names were too long, add +'s to indicate missing 222 * names. If we run out of room and still have +'s to add, change 223 * the last character from a + to a *. 224 */ 225 last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2]; 226 while (missed-- > 0) { 227 if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) { 228 if (*last == '+') { 229 *last = '*'; 230 break; 231 } else 232 *last = '+'; 233 } else if (space) { 234 strcat(ie->ie_fullname, " +"); 235 space = 0; 236 } else 237 strcat(ie->ie_fullname, "+"); 238 } 239 240 /* 241 * If this event has an ithread, update it's priority and 242 * name. 243 */ 244 if (ie->ie_thread != NULL) 245 ithread_update(ie->ie_thread); 246 CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname); 247 } 248 249 int 250 intr_event_create(struct intr_event **event, void *source, int flags, int irq, 251 void (*pre_ithread)(void *), void (*post_ithread)(void *), 252 void (*post_filter)(void *), int (*assign_cpu)(void *, u_char), 253 const char *fmt, ...) 254 { 255 struct intr_event *ie; 256 va_list ap; 257 258 /* The only valid flag during creation is IE_SOFT. */ 259 if ((flags & ~IE_SOFT) != 0) 260 return (EINVAL); 261 ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 262 ie->ie_source = source; 263 ie->ie_pre_ithread = pre_ithread; 264 ie->ie_post_ithread = post_ithread; 265 ie->ie_post_filter = post_filter; 266 ie->ie_assign_cpu = assign_cpu; 267 ie->ie_flags = flags; 268 ie->ie_irq = irq; 269 ie->ie_cpu = NOCPU; 270 TAILQ_INIT(&ie->ie_handlers); 271 mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 272 273 va_start(ap, fmt); 274 vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 275 va_end(ap); 276 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 277 mtx_lock(&event_lock); 278 TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 279 mtx_unlock(&event_lock); 280 if (event != NULL) 281 *event = ie; 282 CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 283 return (0); 284 } 285 286 /* 287 * Bind an interrupt event to the specified CPU. Note that not all 288 * platforms support binding an interrupt to a CPU. For those 289 * platforms this request will fail. For supported platforms, any 290 * associated ithreads as well as the primary interrupt context will 291 * be bound to the specificed CPU. Using a cpu id of NOCPU unbinds 292 * the interrupt event. 293 */ 294 int 295 intr_event_bind(struct intr_event *ie, u_char cpu) 296 { 297 cpuset_t mask; 298 lwpid_t id; 299 int error; 300 301 /* Need a CPU to bind to. */ 302 if (cpu != NOCPU && CPU_ABSENT(cpu)) 303 return (EINVAL); 304 305 if (ie->ie_assign_cpu == NULL) 306 return (EOPNOTSUPP); 307 /* 308 * If we have any ithreads try to set their mask first since this 309 * can fail. 310 */ 311 mtx_lock(&ie->ie_lock); 312 if (ie->ie_thread != NULL) { 313 CPU_ZERO(&mask); 314 if (cpu == NOCPU) 315 CPU_COPY(cpuset_root, &mask); 316 else 317 CPU_SET(cpu, &mask); 318 id = ie->ie_thread->it_thread->td_tid; 319 mtx_unlock(&ie->ie_lock); 320 error = cpuset_setthread(id, &mask); 321 if (error) 322 return (error); 323 } else 324 mtx_unlock(&ie->ie_lock); 325 error = ie->ie_assign_cpu(ie->ie_source, cpu); 326 if (error) 327 return (error); 328 mtx_lock(&ie->ie_lock); 329 ie->ie_cpu = cpu; 330 mtx_unlock(&ie->ie_lock); 331 332 return (error); 333 } 334 335 static struct intr_event * 336 intr_lookup(int irq) 337 { 338 struct intr_event *ie; 339 340 mtx_lock(&event_lock); 341 TAILQ_FOREACH(ie, &event_list, ie_list) 342 if (ie->ie_irq == irq && 343 (ie->ie_flags & IE_SOFT) == 0 && 344 TAILQ_FIRST(&ie->ie_handlers) != NULL) 345 break; 346 mtx_unlock(&event_lock); 347 return (ie); 348 } 349 350 int 351 intr_setaffinity(int irq, void *m) 352 { 353 struct intr_event *ie; 354 cpuset_t *mask; 355 u_char cpu; 356 int n; 357 358 mask = m; 359 cpu = NOCPU; 360 /* 361 * If we're setting all cpus we can unbind. Otherwise make sure 362 * only one cpu is in the set. 363 */ 364 if (CPU_CMP(cpuset_root, mask)) { 365 for (n = 0; n < CPU_SETSIZE; n++) { 366 if (!CPU_ISSET(n, mask)) 367 continue; 368 if (cpu != NOCPU) 369 return (EINVAL); 370 cpu = (u_char)n; 371 } 372 } 373 ie = intr_lookup(irq); 374 if (ie == NULL) 375 return (ESRCH); 376 return (intr_event_bind(ie, cpu)); 377 } 378 379 int 380 intr_getaffinity(int irq, void *m) 381 { 382 struct intr_event *ie; 383 cpuset_t *mask; 384 385 mask = m; 386 ie = intr_lookup(irq); 387 if (ie == NULL) 388 return (ESRCH); 389 CPU_ZERO(mask); 390 mtx_lock(&ie->ie_lock); 391 if (ie->ie_cpu == NOCPU) 392 CPU_COPY(cpuset_root, mask); 393 else 394 CPU_SET(ie->ie_cpu, mask); 395 mtx_unlock(&ie->ie_lock); 396 return (0); 397 } 398 399 int 400 intr_event_destroy(struct intr_event *ie) 401 { 402 403 mtx_lock(&event_lock); 404 mtx_lock(&ie->ie_lock); 405 if (!TAILQ_EMPTY(&ie->ie_handlers)) { 406 mtx_unlock(&ie->ie_lock); 407 mtx_unlock(&event_lock); 408 return (EBUSY); 409 } 410 TAILQ_REMOVE(&event_list, ie, ie_list); 411 #ifndef notyet 412 if (ie->ie_thread != NULL) { 413 ithread_destroy(ie->ie_thread); 414 ie->ie_thread = NULL; 415 } 416 #endif 417 mtx_unlock(&ie->ie_lock); 418 mtx_unlock(&event_lock); 419 mtx_destroy(&ie->ie_lock); 420 free(ie, M_ITHREAD); 421 return (0); 422 } 423 424 #ifndef INTR_FILTER 425 static struct intr_thread * 426 ithread_create(const char *name) 427 { 428 struct intr_thread *ithd; 429 struct thread *td; 430 int error; 431 432 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 433 434 error = kproc_kthread_add(ithread_loop, ithd, &intrproc, 435 &td, RFSTOPPED | RFHIGHPID, 436 0, "intr", "%s", name); 437 if (error) 438 panic("kproc_create() failed with %d", error); 439 thread_lock(td); 440 sched_class(td, PRI_ITHD); 441 TD_SET_IWAIT(td); 442 thread_unlock(td); 443 td->td_pflags |= TDP_ITHREAD; 444 ithd->it_thread = td; 445 CTR2(KTR_INTR, "%s: created %s", __func__, name); 446 return (ithd); 447 } 448 #else 449 static struct intr_thread * 450 ithread_create(const char *name, struct intr_handler *ih) 451 { 452 struct intr_thread *ithd; 453 struct thread *td; 454 int error; 455 456 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 457 458 error = kproc_kthread_add(ithread_loop, ih, &intrproc, 459 &td, RFSTOPPED | RFHIGHPID, 460 0, "intr", "%s", name); 461 if (error) 462 panic("kproc_create() failed with %d", error); 463 thread_lock(td); 464 sched_class(td, PRI_ITHD); 465 TD_SET_IWAIT(td); 466 thread_unlock(td); 467 td->td_pflags |= TDP_ITHREAD; 468 ithd->it_thread = td; 469 CTR2(KTR_INTR, "%s: created %s", __func__, name); 470 return (ithd); 471 } 472 #endif 473 474 static void 475 ithread_destroy(struct intr_thread *ithread) 476 { 477 struct thread *td; 478 479 CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name); 480 td = ithread->it_thread; 481 thread_lock(td); 482 ithread->it_flags |= IT_DEAD; 483 if (TD_AWAITING_INTR(td)) { 484 TD_CLR_IWAIT(td); 485 sched_add(td, SRQ_INTR); 486 } 487 thread_unlock(td); 488 } 489 490 #ifndef INTR_FILTER 491 int 492 intr_event_add_handler(struct intr_event *ie, const char *name, 493 driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 494 enum intr_type flags, void **cookiep) 495 { 496 struct intr_handler *ih, *temp_ih; 497 struct intr_thread *it; 498 499 if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 500 return (EINVAL); 501 502 /* Allocate and populate an interrupt handler structure. */ 503 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 504 ih->ih_filter = filter; 505 ih->ih_handler = handler; 506 ih->ih_argument = arg; 507 ih->ih_name = name; 508 ih->ih_event = ie; 509 ih->ih_pri = pri; 510 if (flags & INTR_EXCL) 511 ih->ih_flags = IH_EXCLUSIVE; 512 if (flags & INTR_MPSAFE) 513 ih->ih_flags |= IH_MPSAFE; 514 if (flags & INTR_ENTROPY) 515 ih->ih_flags |= IH_ENTROPY; 516 517 /* We can only have one exclusive handler in a event. */ 518 mtx_lock(&ie->ie_lock); 519 if (!TAILQ_EMPTY(&ie->ie_handlers)) { 520 if ((flags & INTR_EXCL) || 521 (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 522 mtx_unlock(&ie->ie_lock); 523 free(ih, M_ITHREAD); 524 return (EINVAL); 525 } 526 } 527 528 /* Add the new handler to the event in priority order. */ 529 TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 530 if (temp_ih->ih_pri > ih->ih_pri) 531 break; 532 } 533 if (temp_ih == NULL) 534 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 535 else 536 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 537 intr_event_update(ie); 538 539 /* Create a thread if we need one. */ 540 while (ie->ie_thread == NULL && handler != NULL) { 541 if (ie->ie_flags & IE_ADDING_THREAD) 542 msleep(ie, &ie->ie_lock, 0, "ithread", 0); 543 else { 544 ie->ie_flags |= IE_ADDING_THREAD; 545 mtx_unlock(&ie->ie_lock); 546 it = ithread_create("intr: newborn"); 547 mtx_lock(&ie->ie_lock); 548 ie->ie_flags &= ~IE_ADDING_THREAD; 549 ie->ie_thread = it; 550 it->it_event = ie; 551 ithread_update(it); 552 wakeup(ie); 553 } 554 } 555 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 556 ie->ie_name); 557 mtx_unlock(&ie->ie_lock); 558 559 if (cookiep != NULL) 560 *cookiep = ih; 561 return (0); 562 } 563 #else 564 int 565 intr_event_add_handler(struct intr_event *ie, const char *name, 566 driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 567 enum intr_type flags, void **cookiep) 568 { 569 struct intr_handler *ih, *temp_ih; 570 struct intr_thread *it; 571 572 if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 573 return (EINVAL); 574 575 /* Allocate and populate an interrupt handler structure. */ 576 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 577 ih->ih_filter = filter; 578 ih->ih_handler = handler; 579 ih->ih_argument = arg; 580 ih->ih_name = name; 581 ih->ih_event = ie; 582 ih->ih_pri = pri; 583 if (flags & INTR_EXCL) 584 ih->ih_flags = IH_EXCLUSIVE; 585 if (flags & INTR_MPSAFE) 586 ih->ih_flags |= IH_MPSAFE; 587 if (flags & INTR_ENTROPY) 588 ih->ih_flags |= IH_ENTROPY; 589 590 /* We can only have one exclusive handler in a event. */ 591 mtx_lock(&ie->ie_lock); 592 if (!TAILQ_EMPTY(&ie->ie_handlers)) { 593 if ((flags & INTR_EXCL) || 594 (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 595 mtx_unlock(&ie->ie_lock); 596 free(ih, M_ITHREAD); 597 return (EINVAL); 598 } 599 } 600 601 /* Add the new handler to the event in priority order. */ 602 TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 603 if (temp_ih->ih_pri > ih->ih_pri) 604 break; 605 } 606 if (temp_ih == NULL) 607 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 608 else 609 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 610 intr_event_update(ie); 611 612 /* For filtered handlers, create a private ithread to run on. */ 613 if (filter != NULL && handler != NULL) { 614 mtx_unlock(&ie->ie_lock); 615 it = ithread_create("intr: newborn", ih); 616 mtx_lock(&ie->ie_lock); 617 it->it_event = ie; 618 ih->ih_thread = it; 619 ithread_update(it); // XXX - do we really need this?!?!? 620 } else { /* Create the global per-event thread if we need one. */ 621 while (ie->ie_thread == NULL && handler != NULL) { 622 if (ie->ie_flags & IE_ADDING_THREAD) 623 msleep(ie, &ie->ie_lock, 0, "ithread", 0); 624 else { 625 ie->ie_flags |= IE_ADDING_THREAD; 626 mtx_unlock(&ie->ie_lock); 627 it = ithread_create("intr: newborn", ih); 628 mtx_lock(&ie->ie_lock); 629 ie->ie_flags &= ~IE_ADDING_THREAD; 630 ie->ie_thread = it; 631 it->it_event = ie; 632 ithread_update(it); 633 wakeup(ie); 634 } 635 } 636 } 637 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 638 ie->ie_name); 639 mtx_unlock(&ie->ie_lock); 640 641 if (cookiep != NULL) 642 *cookiep = ih; 643 return (0); 644 } 645 #endif 646 647 /* 648 * Return the ie_source field from the intr_event an intr_handler is 649 * associated with. 650 */ 651 void * 652 intr_handler_source(void *cookie) 653 { 654 struct intr_handler *ih; 655 struct intr_event *ie; 656 657 ih = (struct intr_handler *)cookie; 658 if (ih == NULL) 659 return (NULL); 660 ie = ih->ih_event; 661 KASSERT(ie != NULL, 662 ("interrupt handler \"%s\" has a NULL interrupt event", 663 ih->ih_name)); 664 return (ie->ie_source); 665 } 666 667 #ifndef INTR_FILTER 668 int 669 intr_event_remove_handler(void *cookie) 670 { 671 struct intr_handler *handler = (struct intr_handler *)cookie; 672 struct intr_event *ie; 673 #ifdef INVARIANTS 674 struct intr_handler *ih; 675 #endif 676 #ifdef notyet 677 int dead; 678 #endif 679 680 if (handler == NULL) 681 return (EINVAL); 682 ie = handler->ih_event; 683 KASSERT(ie != NULL, 684 ("interrupt handler \"%s\" has a NULL interrupt event", 685 handler->ih_name)); 686 mtx_lock(&ie->ie_lock); 687 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 688 ie->ie_name); 689 #ifdef INVARIANTS 690 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 691 if (ih == handler) 692 goto ok; 693 mtx_unlock(&ie->ie_lock); 694 panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 695 ih->ih_name, ie->ie_name); 696 ok: 697 #endif 698 /* 699 * If there is no ithread, then just remove the handler and return. 700 * XXX: Note that an INTR_FAST handler might be running on another 701 * CPU! 702 */ 703 if (ie->ie_thread == NULL) { 704 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 705 mtx_unlock(&ie->ie_lock); 706 free(handler, M_ITHREAD); 707 return (0); 708 } 709 710 /* 711 * If the interrupt thread is already running, then just mark this 712 * handler as being dead and let the ithread do the actual removal. 713 * 714 * During a cold boot while cold is set, msleep() does not sleep, 715 * so we have to remove the handler here rather than letting the 716 * thread do it. 717 */ 718 thread_lock(ie->ie_thread->it_thread); 719 if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) { 720 handler->ih_flags |= IH_DEAD; 721 722 /* 723 * Ensure that the thread will process the handler list 724 * again and remove this handler if it has already passed 725 * it on the list. 726 */ 727 ie->ie_thread->it_need = 1; 728 } else 729 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 730 thread_unlock(ie->ie_thread->it_thread); 731 while (handler->ih_flags & IH_DEAD) 732 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 733 intr_event_update(ie); 734 #ifdef notyet 735 /* 736 * XXX: This could be bad in the case of ppbus(8). Also, I think 737 * this could lead to races of stale data when servicing an 738 * interrupt. 739 */ 740 dead = 1; 741 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 742 if (!(ih->ih_flags & IH_FAST)) { 743 dead = 0; 744 break; 745 } 746 } 747 if (dead) { 748 ithread_destroy(ie->ie_thread); 749 ie->ie_thread = NULL; 750 } 751 #endif 752 mtx_unlock(&ie->ie_lock); 753 free(handler, M_ITHREAD); 754 return (0); 755 } 756 757 static int 758 intr_event_schedule_thread(struct intr_event *ie) 759 { 760 struct intr_entropy entropy; 761 struct intr_thread *it; 762 struct thread *td; 763 struct thread *ctd; 764 struct proc *p; 765 766 /* 767 * If no ithread or no handlers, then we have a stray interrupt. 768 */ 769 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || 770 ie->ie_thread == NULL) 771 return (EINVAL); 772 773 ctd = curthread; 774 it = ie->ie_thread; 775 td = it->it_thread; 776 p = td->td_proc; 777 778 /* 779 * If any of the handlers for this ithread claim to be good 780 * sources of entropy, then gather some. 781 */ 782 if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 783 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 784 p->p_pid, td->td_name); 785 entropy.event = (uintptr_t)ie; 786 entropy.td = ctd; 787 random_harvest(&entropy, sizeof(entropy), 2, 0, 788 RANDOM_INTERRUPT); 789 } 790 791 KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 792 793 /* 794 * Set it_need to tell the thread to keep running if it is already 795 * running. Then, lock the thread and see if we actually need to 796 * put it on the runqueue. 797 */ 798 it->it_need = 1; 799 thread_lock(td); 800 if (TD_AWAITING_INTR(td)) { 801 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 802 td->td_name); 803 TD_CLR_IWAIT(td); 804 sched_add(td, SRQ_INTR); 805 } else { 806 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 807 __func__, p->p_pid, td->td_name, it->it_need, td->td_state); 808 } 809 thread_unlock(td); 810 811 return (0); 812 } 813 #else 814 int 815 intr_event_remove_handler(void *cookie) 816 { 817 struct intr_handler *handler = (struct intr_handler *)cookie; 818 struct intr_event *ie; 819 struct intr_thread *it; 820 #ifdef INVARIANTS 821 struct intr_handler *ih; 822 #endif 823 #ifdef notyet 824 int dead; 825 #endif 826 827 if (handler == NULL) 828 return (EINVAL); 829 ie = handler->ih_event; 830 KASSERT(ie != NULL, 831 ("interrupt handler \"%s\" has a NULL interrupt event", 832 handler->ih_name)); 833 mtx_lock(&ie->ie_lock); 834 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 835 ie->ie_name); 836 #ifdef INVARIANTS 837 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 838 if (ih == handler) 839 goto ok; 840 mtx_unlock(&ie->ie_lock); 841 panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 842 ih->ih_name, ie->ie_name); 843 ok: 844 #endif 845 /* 846 * If there are no ithreads (per event and per handler), then 847 * just remove the handler and return. 848 * XXX: Note that an INTR_FAST handler might be running on another CPU! 849 */ 850 if (ie->ie_thread == NULL && handler->ih_thread == NULL) { 851 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 852 mtx_unlock(&ie->ie_lock); 853 free(handler, M_ITHREAD); 854 return (0); 855 } 856 857 /* Private or global ithread? */ 858 it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread; 859 /* 860 * If the interrupt thread is already running, then just mark this 861 * handler as being dead and let the ithread do the actual removal. 862 * 863 * During a cold boot while cold is set, msleep() does not sleep, 864 * so we have to remove the handler here rather than letting the 865 * thread do it. 866 */ 867 thread_lock(it->it_thread); 868 if (!TD_AWAITING_INTR(it->it_thread) && !cold) { 869 handler->ih_flags |= IH_DEAD; 870 871 /* 872 * Ensure that the thread will process the handler list 873 * again and remove this handler if it has already passed 874 * it on the list. 875 */ 876 it->it_need = 1; 877 } else 878 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 879 thread_unlock(it->it_thread); 880 while (handler->ih_flags & IH_DEAD) 881 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 882 /* 883 * At this point, the handler has been disconnected from the event, 884 * so we can kill the private ithread if any. 885 */ 886 if (handler->ih_thread) { 887 ithread_destroy(handler->ih_thread); 888 handler->ih_thread = NULL; 889 } 890 intr_event_update(ie); 891 #ifdef notyet 892 /* 893 * XXX: This could be bad in the case of ppbus(8). Also, I think 894 * this could lead to races of stale data when servicing an 895 * interrupt. 896 */ 897 dead = 1; 898 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 899 if (handler != NULL) { 900 dead = 0; 901 break; 902 } 903 } 904 if (dead) { 905 ithread_destroy(ie->ie_thread); 906 ie->ie_thread = NULL; 907 } 908 #endif 909 mtx_unlock(&ie->ie_lock); 910 free(handler, M_ITHREAD); 911 return (0); 912 } 913 914 static int 915 intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it) 916 { 917 struct intr_entropy entropy; 918 struct thread *td; 919 struct thread *ctd; 920 struct proc *p; 921 922 /* 923 * If no ithread or no handlers, then we have a stray interrupt. 924 */ 925 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL) 926 return (EINVAL); 927 928 ctd = curthread; 929 td = it->it_thread; 930 p = td->td_proc; 931 932 /* 933 * If any of the handlers for this ithread claim to be good 934 * sources of entropy, then gather some. 935 */ 936 if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 937 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 938 p->p_pid, td->td_name); 939 entropy.event = (uintptr_t)ie; 940 entropy.td = ctd; 941 random_harvest(&entropy, sizeof(entropy), 2, 0, 942 RANDOM_INTERRUPT); 943 } 944 945 KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 946 947 /* 948 * Set it_need to tell the thread to keep running if it is already 949 * running. Then, lock the thread and see if we actually need to 950 * put it on the runqueue. 951 */ 952 it->it_need = 1; 953 thread_lock(td); 954 if (TD_AWAITING_INTR(td)) { 955 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 956 td->td_name); 957 TD_CLR_IWAIT(td); 958 sched_add(td, SRQ_INTR); 959 } else { 960 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 961 __func__, p->p_pid, td->td_name, it->it_need, td->td_state); 962 } 963 thread_unlock(td); 964 965 return (0); 966 } 967 #endif 968 969 /* 970 * Allow interrupt event binding for software interrupt handlers -- a no-op, 971 * since interrupts are generated in software rather than being directed by 972 * a PIC. 973 */ 974 static int 975 swi_assign_cpu(void *arg, u_char cpu) 976 { 977 978 return (0); 979 } 980 981 /* 982 * Add a software interrupt handler to a specified event. If a given event 983 * is not specified, then a new event is created. 984 */ 985 int 986 swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, 987 void *arg, int pri, enum intr_type flags, void **cookiep) 988 { 989 struct intr_event *ie; 990 int error; 991 992 if (flags & INTR_ENTROPY) 993 return (EINVAL); 994 995 ie = (eventp != NULL) ? *eventp : NULL; 996 997 if (ie != NULL) { 998 if (!(ie->ie_flags & IE_SOFT)) 999 return (EINVAL); 1000 } else { 1001 error = intr_event_create(&ie, NULL, IE_SOFT, 0, 1002 NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri); 1003 if (error) 1004 return (error); 1005 if (eventp != NULL) 1006 *eventp = ie; 1007 } 1008 error = intr_event_add_handler(ie, name, NULL, handler, arg, 1009 (pri * RQ_PPQ) + PI_SOFT, flags, cookiep); 1010 if (error) 1011 return (error); 1012 if (pri == SWI_CLOCK) { 1013 struct proc *p; 1014 p = ie->ie_thread->it_thread->td_proc; 1015 PROC_LOCK(p); 1016 p->p_flag |= P_NOLOAD; 1017 PROC_UNLOCK(p); 1018 } 1019 return (0); 1020 } 1021 1022 /* 1023 * Schedule a software interrupt thread. 1024 */ 1025 void 1026 swi_sched(void *cookie, int flags) 1027 { 1028 struct intr_handler *ih = (struct intr_handler *)cookie; 1029 struct intr_event *ie = ih->ih_event; 1030 int error; 1031 1032 CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, 1033 ih->ih_need); 1034 1035 /* 1036 * Set ih_need for this handler so that if the ithread is already 1037 * running it will execute this handler on the next pass. Otherwise, 1038 * it will execute it the next time it runs. 1039 */ 1040 atomic_store_rel_int(&ih->ih_need, 1); 1041 1042 if (!(flags & SWI_DELAY)) { 1043 PCPU_INC(cnt.v_soft); 1044 #ifdef INTR_FILTER 1045 error = intr_event_schedule_thread(ie, ie->ie_thread); 1046 #else 1047 error = intr_event_schedule_thread(ie); 1048 #endif 1049 KASSERT(error == 0, ("stray software interrupt")); 1050 } 1051 } 1052 1053 /* 1054 * Remove a software interrupt handler. Currently this code does not 1055 * remove the associated interrupt event if it becomes empty. Calling code 1056 * may do so manually via intr_event_destroy(), but that's not really 1057 * an optimal interface. 1058 */ 1059 int 1060 swi_remove(void *cookie) 1061 { 1062 1063 return (intr_event_remove_handler(cookie)); 1064 } 1065 1066 #ifdef INTR_FILTER 1067 static void 1068 priv_ithread_execute_handler(struct proc *p, struct intr_handler *ih) 1069 { 1070 struct intr_event *ie; 1071 1072 ie = ih->ih_event; 1073 /* 1074 * If this handler is marked for death, remove it from 1075 * the list of handlers and wake up the sleeper. 1076 */ 1077 if (ih->ih_flags & IH_DEAD) { 1078 mtx_lock(&ie->ie_lock); 1079 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 1080 ih->ih_flags &= ~IH_DEAD; 1081 wakeup(ih); 1082 mtx_unlock(&ie->ie_lock); 1083 return; 1084 } 1085 1086 /* Execute this handler. */ 1087 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1088 __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument, 1089 ih->ih_name, ih->ih_flags); 1090 1091 if (!(ih->ih_flags & IH_MPSAFE)) 1092 mtx_lock(&Giant); 1093 ih->ih_handler(ih->ih_argument); 1094 if (!(ih->ih_flags & IH_MPSAFE)) 1095 mtx_unlock(&Giant); 1096 } 1097 #endif 1098 1099 /* 1100 * This is a public function for use by drivers that mux interrupt 1101 * handlers for child devices from their interrupt handler. 1102 */ 1103 void 1104 intr_event_execute_handlers(struct proc *p, struct intr_event *ie) 1105 { 1106 struct intr_handler *ih, *ihn; 1107 1108 TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) { 1109 /* 1110 * If this handler is marked for death, remove it from 1111 * the list of handlers and wake up the sleeper. 1112 */ 1113 if (ih->ih_flags & IH_DEAD) { 1114 mtx_lock(&ie->ie_lock); 1115 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 1116 ih->ih_flags &= ~IH_DEAD; 1117 wakeup(ih); 1118 mtx_unlock(&ie->ie_lock); 1119 continue; 1120 } 1121 1122 /* Skip filter only handlers */ 1123 if (ih->ih_handler == NULL) 1124 continue; 1125 1126 /* 1127 * For software interrupt threads, we only execute 1128 * handlers that have their need flag set. Hardware 1129 * interrupt threads always invoke all of their handlers. 1130 */ 1131 if (ie->ie_flags & IE_SOFT) { 1132 if (!ih->ih_need) 1133 continue; 1134 else 1135 atomic_store_rel_int(&ih->ih_need, 0); 1136 } 1137 1138 /* Execute this handler. */ 1139 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1140 __func__, p->p_pid, (void *)ih->ih_handler, 1141 ih->ih_argument, ih->ih_name, ih->ih_flags); 1142 1143 if (!(ih->ih_flags & IH_MPSAFE)) 1144 mtx_lock(&Giant); 1145 ih->ih_handler(ih->ih_argument); 1146 if (!(ih->ih_flags & IH_MPSAFE)) 1147 mtx_unlock(&Giant); 1148 } 1149 } 1150 1151 static void 1152 ithread_execute_handlers(struct proc *p, struct intr_event *ie) 1153 { 1154 1155 /* Interrupt handlers should not sleep. */ 1156 if (!(ie->ie_flags & IE_SOFT)) 1157 THREAD_NO_SLEEPING(); 1158 intr_event_execute_handlers(p, ie); 1159 if (!(ie->ie_flags & IE_SOFT)) 1160 THREAD_SLEEPING_OK(); 1161 1162 /* 1163 * Interrupt storm handling: 1164 * 1165 * If this interrupt source is currently storming, then throttle 1166 * it to only fire the handler once per clock tick. 1167 * 1168 * If this interrupt source is not currently storming, but the 1169 * number of back to back interrupts exceeds the storm threshold, 1170 * then enter storming mode. 1171 */ 1172 if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold && 1173 !(ie->ie_flags & IE_SOFT)) { 1174 /* Report the message only once every second. */ 1175 if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) { 1176 printf( 1177 "interrupt storm detected on \"%s\"; throttling interrupt source\n", 1178 ie->ie_name); 1179 } 1180 pause("istorm", 1); 1181 } else 1182 ie->ie_count++; 1183 1184 /* 1185 * Now that all the handlers have had a chance to run, reenable 1186 * the interrupt source. 1187 */ 1188 if (ie->ie_post_ithread != NULL) 1189 ie->ie_post_ithread(ie->ie_source); 1190 } 1191 1192 #ifndef INTR_FILTER 1193 /* 1194 * This is the main code for interrupt threads. 1195 */ 1196 static void 1197 ithread_loop(void *arg) 1198 { 1199 struct intr_thread *ithd; 1200 struct intr_event *ie; 1201 struct thread *td; 1202 struct proc *p; 1203 1204 td = curthread; 1205 p = td->td_proc; 1206 ithd = (struct intr_thread *)arg; 1207 KASSERT(ithd->it_thread == td, 1208 ("%s: ithread and proc linkage out of sync", __func__)); 1209 ie = ithd->it_event; 1210 ie->ie_count = 0; 1211 1212 /* 1213 * As long as we have interrupts outstanding, go through the 1214 * list of handlers, giving each one a go at it. 1215 */ 1216 for (;;) { 1217 /* 1218 * If we are an orphaned thread, then just die. 1219 */ 1220 if (ithd->it_flags & IT_DEAD) { 1221 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 1222 p->p_pid, td->td_name); 1223 free(ithd, M_ITHREAD); 1224 kthread_exit(); 1225 } 1226 1227 /* 1228 * Service interrupts. If another interrupt arrives while 1229 * we are running, it will set it_need to note that we 1230 * should make another pass. 1231 */ 1232 while (ithd->it_need) { 1233 /* 1234 * This might need a full read and write barrier 1235 * to make sure that this write posts before any 1236 * of the memory or device accesses in the 1237 * handlers. 1238 */ 1239 atomic_store_rel_int(&ithd->it_need, 0); 1240 ithread_execute_handlers(p, ie); 1241 } 1242 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1243 mtx_assert(&Giant, MA_NOTOWNED); 1244 1245 /* 1246 * Processed all our interrupts. Now get the sched 1247 * lock. This may take a while and it_need may get 1248 * set again, so we have to check it again. 1249 */ 1250 thread_lock(td); 1251 if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { 1252 TD_SET_IWAIT(td); 1253 ie->ie_count = 0; 1254 mi_switch(SW_VOL | SWT_IWAIT, NULL); 1255 } 1256 thread_unlock(td); 1257 } 1258 } 1259 1260 /* 1261 * Main interrupt handling body. 1262 * 1263 * Input: 1264 * o ie: the event connected to this interrupt. 1265 * o frame: some archs (i.e. i386) pass a frame to some. 1266 * handlers as their main argument. 1267 * Return value: 1268 * o 0: everything ok. 1269 * o EINVAL: stray interrupt. 1270 */ 1271 int 1272 intr_event_handle(struct intr_event *ie, struct trapframe *frame) 1273 { 1274 struct intr_handler *ih; 1275 struct thread *td; 1276 int error, ret, thread; 1277 1278 td = curthread; 1279 1280 /* An interrupt with no event or handlers is a stray interrupt. */ 1281 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) 1282 return (EINVAL); 1283 1284 /* 1285 * Execute fast interrupt handlers directly. 1286 * To support clock handlers, if a handler registers 1287 * with a NULL argument, then we pass it a pointer to 1288 * a trapframe as its argument. 1289 */ 1290 td->td_intr_nesting_level++; 1291 thread = 0; 1292 ret = 0; 1293 critical_enter(); 1294 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1295 if (ih->ih_filter == NULL) { 1296 thread = 1; 1297 continue; 1298 } 1299 CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__, 1300 ih->ih_filter, ih->ih_argument == NULL ? frame : 1301 ih->ih_argument, ih->ih_name); 1302 if (ih->ih_argument == NULL) 1303 ret = ih->ih_filter(frame); 1304 else 1305 ret = ih->ih_filter(ih->ih_argument); 1306 /* 1307 * Wrapper handler special handling: 1308 * 1309 * in some particular cases (like pccard and pccbb), 1310 * the _real_ device handler is wrapped in a couple of 1311 * functions - a filter wrapper and an ithread wrapper. 1312 * In this case (and just in this case), the filter wrapper 1313 * could ask the system to schedule the ithread and mask 1314 * the interrupt source if the wrapped handler is composed 1315 * of just an ithread handler. 1316 * 1317 * TODO: write a generic wrapper to avoid people rolling 1318 * their own 1319 */ 1320 if (!thread) { 1321 if (ret == FILTER_SCHEDULE_THREAD) 1322 thread = 1; 1323 } 1324 } 1325 1326 if (thread) { 1327 if (ie->ie_pre_ithread != NULL) 1328 ie->ie_pre_ithread(ie->ie_source); 1329 } else { 1330 if (ie->ie_post_filter != NULL) 1331 ie->ie_post_filter(ie->ie_source); 1332 } 1333 1334 /* Schedule the ithread if needed. */ 1335 if (thread) { 1336 error = intr_event_schedule_thread(ie); 1337 #ifndef XEN 1338 KASSERT(error == 0, ("bad stray interrupt")); 1339 #else 1340 if (error != 0) 1341 log(LOG_WARNING, "bad stray interrupt"); 1342 #endif 1343 } 1344 critical_exit(); 1345 td->td_intr_nesting_level--; 1346 return (0); 1347 } 1348 #else 1349 /* 1350 * This is the main code for interrupt threads. 1351 */ 1352 static void 1353 ithread_loop(void *arg) 1354 { 1355 struct intr_thread *ithd; 1356 struct intr_handler *ih; 1357 struct intr_event *ie; 1358 struct thread *td; 1359 struct proc *p; 1360 int priv; 1361 1362 td = curthread; 1363 p = td->td_proc; 1364 ih = (struct intr_handler *)arg; 1365 priv = (ih->ih_thread != NULL) ? 1 : 0; 1366 ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread; 1367 KASSERT(ithd->it_thread == td, 1368 ("%s: ithread and proc linkage out of sync", __func__)); 1369 ie = ithd->it_event; 1370 ie->ie_count = 0; 1371 1372 /* 1373 * As long as we have interrupts outstanding, go through the 1374 * list of handlers, giving each one a go at it. 1375 */ 1376 for (;;) { 1377 /* 1378 * If we are an orphaned thread, then just die. 1379 */ 1380 if (ithd->it_flags & IT_DEAD) { 1381 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 1382 p->p_pid, td->td_name); 1383 free(ithd, M_ITHREAD); 1384 kthread_exit(); 1385 } 1386 1387 /* 1388 * Service interrupts. If another interrupt arrives while 1389 * we are running, it will set it_need to note that we 1390 * should make another pass. 1391 */ 1392 while (ithd->it_need) { 1393 /* 1394 * This might need a full read and write barrier 1395 * to make sure that this write posts before any 1396 * of the memory or device accesses in the 1397 * handlers. 1398 */ 1399 atomic_store_rel_int(&ithd->it_need, 0); 1400 if (priv) 1401 priv_ithread_execute_handler(p, ih); 1402 else 1403 ithread_execute_handlers(p, ie); 1404 } 1405 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1406 mtx_assert(&Giant, MA_NOTOWNED); 1407 1408 /* 1409 * Processed all our interrupts. Now get the sched 1410 * lock. This may take a while and it_need may get 1411 * set again, so we have to check it again. 1412 */ 1413 thread_lock(td); 1414 if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { 1415 TD_SET_IWAIT(td); 1416 ie->ie_count = 0; 1417 mi_switch(SW_VOL | SWT_IWAIT, NULL); 1418 } 1419 thread_unlock(td); 1420 } 1421 } 1422 1423 /* 1424 * Main loop for interrupt filter. 1425 * 1426 * Some architectures (i386, amd64 and arm) require the optional frame 1427 * parameter, and use it as the main argument for fast handler execution 1428 * when ih_argument == NULL. 1429 * 1430 * Return value: 1431 * o FILTER_STRAY: No filter recognized the event, and no 1432 * filter-less handler is registered on this 1433 * line. 1434 * o FILTER_HANDLED: A filter claimed the event and served it. 1435 * o FILTER_SCHEDULE_THREAD: No filter claimed the event, but there's at 1436 * least one filter-less handler on this line. 1437 * o FILTER_HANDLED | 1438 * FILTER_SCHEDULE_THREAD: A filter claimed the event, and asked for 1439 * scheduling the per-handler ithread. 1440 * 1441 * In case an ithread has to be scheduled, in *ithd there will be a 1442 * pointer to a struct intr_thread containing the thread to be 1443 * scheduled. 1444 */ 1445 1446 static int 1447 intr_filter_loop(struct intr_event *ie, struct trapframe *frame, 1448 struct intr_thread **ithd) 1449 { 1450 struct intr_handler *ih; 1451 void *arg; 1452 int ret, thread_only; 1453 1454 ret = 0; 1455 thread_only = 0; 1456 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1457 /* 1458 * Execute fast interrupt handlers directly. 1459 * To support clock handlers, if a handler registers 1460 * with a NULL argument, then we pass it a pointer to 1461 * a trapframe as its argument. 1462 */ 1463 arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument); 1464 1465 CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__, 1466 ih->ih_filter, ih->ih_handler, arg, ih->ih_name); 1467 1468 if (ih->ih_filter != NULL) 1469 ret = ih->ih_filter(arg); 1470 else { 1471 thread_only = 1; 1472 continue; 1473 } 1474 1475 if (ret & FILTER_STRAY) 1476 continue; 1477 else { 1478 *ithd = ih->ih_thread; 1479 return (ret); 1480 } 1481 } 1482 1483 /* 1484 * No filters handled the interrupt and we have at least 1485 * one handler without a filter. In this case, we schedule 1486 * all of the filter-less handlers to run in the ithread. 1487 */ 1488 if (thread_only) { 1489 *ithd = ie->ie_thread; 1490 return (FILTER_SCHEDULE_THREAD); 1491 } 1492 return (FILTER_STRAY); 1493 } 1494 1495 /* 1496 * Main interrupt handling body. 1497 * 1498 * Input: 1499 * o ie: the event connected to this interrupt. 1500 * o frame: some archs (i.e. i386) pass a frame to some. 1501 * handlers as their main argument. 1502 * Return value: 1503 * o 0: everything ok. 1504 * o EINVAL: stray interrupt. 1505 */ 1506 int 1507 intr_event_handle(struct intr_event *ie, struct trapframe *frame) 1508 { 1509 struct intr_thread *ithd; 1510 struct thread *td; 1511 int thread; 1512 1513 ithd = NULL; 1514 td = curthread; 1515 1516 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) 1517 return (EINVAL); 1518 1519 td->td_intr_nesting_level++; 1520 thread = 0; 1521 critical_enter(); 1522 thread = intr_filter_loop(ie, frame, &ithd); 1523 if (thread & FILTER_HANDLED) { 1524 if (ie->ie_post_filter != NULL) 1525 ie->ie_post_filter(ie->ie_source); 1526 } else { 1527 if (ie->ie_pre_ithread != NULL) 1528 ie->ie_pre_ithread(ie->ie_source); 1529 } 1530 critical_exit(); 1531 1532 /* Interrupt storm logic */ 1533 if (thread & FILTER_STRAY) { 1534 ie->ie_count++; 1535 if (ie->ie_count < intr_storm_threshold) 1536 printf("Interrupt stray detection not present\n"); 1537 } 1538 1539 /* Schedule an ithread if needed. */ 1540 if (thread & FILTER_SCHEDULE_THREAD) { 1541 if (intr_event_schedule_thread(ie, ithd) != 0) 1542 panic("%s: impossible stray interrupt", __func__); 1543 } 1544 td->td_intr_nesting_level--; 1545 return (0); 1546 } 1547 #endif 1548 1549 #ifdef DDB 1550 /* 1551 * Dump details about an interrupt handler 1552 */ 1553 static void 1554 db_dump_intrhand(struct intr_handler *ih) 1555 { 1556 int comma; 1557 1558 db_printf("\t%-10s ", ih->ih_name); 1559 switch (ih->ih_pri) { 1560 case PI_REALTIME: 1561 db_printf("CLK "); 1562 break; 1563 case PI_AV: 1564 db_printf("AV "); 1565 break; 1566 case PI_TTYHIGH: 1567 case PI_TTYLOW: 1568 db_printf("TTY "); 1569 break; 1570 case PI_TAPE: 1571 db_printf("TAPE"); 1572 break; 1573 case PI_NET: 1574 db_printf("NET "); 1575 break; 1576 case PI_DISK: 1577 case PI_DISKLOW: 1578 db_printf("DISK"); 1579 break; 1580 case PI_DULL: 1581 db_printf("DULL"); 1582 break; 1583 default: 1584 if (ih->ih_pri >= PI_SOFT) 1585 db_printf("SWI "); 1586 else 1587 db_printf("%4u", ih->ih_pri); 1588 break; 1589 } 1590 db_printf(" "); 1591 db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC); 1592 db_printf("(%p)", ih->ih_argument); 1593 if (ih->ih_need || 1594 (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD | 1595 IH_MPSAFE)) != 0) { 1596 db_printf(" {"); 1597 comma = 0; 1598 if (ih->ih_flags & IH_EXCLUSIVE) { 1599 if (comma) 1600 db_printf(", "); 1601 db_printf("EXCL"); 1602 comma = 1; 1603 } 1604 if (ih->ih_flags & IH_ENTROPY) { 1605 if (comma) 1606 db_printf(", "); 1607 db_printf("ENTROPY"); 1608 comma = 1; 1609 } 1610 if (ih->ih_flags & IH_DEAD) { 1611 if (comma) 1612 db_printf(", "); 1613 db_printf("DEAD"); 1614 comma = 1; 1615 } 1616 if (ih->ih_flags & IH_MPSAFE) { 1617 if (comma) 1618 db_printf(", "); 1619 db_printf("MPSAFE"); 1620 comma = 1; 1621 } 1622 if (ih->ih_need) { 1623 if (comma) 1624 db_printf(", "); 1625 db_printf("NEED"); 1626 } 1627 db_printf("}"); 1628 } 1629 db_printf("\n"); 1630 } 1631 1632 /* 1633 * Dump details about a event. 1634 */ 1635 void 1636 db_dump_intr_event(struct intr_event *ie, int handlers) 1637 { 1638 struct intr_handler *ih; 1639 struct intr_thread *it; 1640 int comma; 1641 1642 db_printf("%s ", ie->ie_fullname); 1643 it = ie->ie_thread; 1644 if (it != NULL) 1645 db_printf("(pid %d)", it->it_thread->td_proc->p_pid); 1646 else 1647 db_printf("(no thread)"); 1648 if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 || 1649 (it != NULL && it->it_need)) { 1650 db_printf(" {"); 1651 comma = 0; 1652 if (ie->ie_flags & IE_SOFT) { 1653 db_printf("SOFT"); 1654 comma = 1; 1655 } 1656 if (ie->ie_flags & IE_ENTROPY) { 1657 if (comma) 1658 db_printf(", "); 1659 db_printf("ENTROPY"); 1660 comma = 1; 1661 } 1662 if (ie->ie_flags & IE_ADDING_THREAD) { 1663 if (comma) 1664 db_printf(", "); 1665 db_printf("ADDING_THREAD"); 1666 comma = 1; 1667 } 1668 if (it != NULL && it->it_need) { 1669 if (comma) 1670 db_printf(", "); 1671 db_printf("NEED"); 1672 } 1673 db_printf("}"); 1674 } 1675 db_printf("\n"); 1676 1677 if (handlers) 1678 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 1679 db_dump_intrhand(ih); 1680 } 1681 1682 /* 1683 * Dump data about interrupt handlers 1684 */ 1685 DB_SHOW_COMMAND(intr, db_show_intr) 1686 { 1687 struct intr_event *ie; 1688 int all, verbose; 1689 1690 verbose = index(modif, 'v') != NULL; 1691 all = index(modif, 'a') != NULL; 1692 TAILQ_FOREACH(ie, &event_list, ie_list) { 1693 if (!all && TAILQ_EMPTY(&ie->ie_handlers)) 1694 continue; 1695 db_dump_intr_event(ie, verbose); 1696 if (db_pager_quit) 1697 break; 1698 } 1699 } 1700 #endif /* DDB */ 1701 1702 /* 1703 * Start standard software interrupt threads 1704 */ 1705 static void 1706 start_softintr(void *dummy) 1707 { 1708 1709 if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih)) 1710 panic("died while creating vm swi ithread"); 1711 } 1712 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, 1713 NULL); 1714 1715 /* 1716 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 1717 * The data for this machine dependent, and the declarations are in machine 1718 * dependent code. The layout of intrnames and intrcnt however is machine 1719 * independent. 1720 * 1721 * We do not know the length of intrcnt and intrnames at compile time, so 1722 * calculate things at run time. 1723 */ 1724 static int 1725 sysctl_intrnames(SYSCTL_HANDLER_ARGS) 1726 { 1727 return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames, 1728 req)); 1729 } 1730 1731 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, 1732 NULL, 0, sysctl_intrnames, "", "Interrupt Names"); 1733 1734 static int 1735 sysctl_intrcnt(SYSCTL_HANDLER_ARGS) 1736 { 1737 return (sysctl_handle_opaque(oidp, intrcnt, 1738 (char *)eintrcnt - (char *)intrcnt, req)); 1739 } 1740 1741 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, 1742 NULL, 0, sysctl_intrcnt, "", "Interrupt Counts"); 1743 1744 #ifdef DDB 1745 /* 1746 * DDB command to dump the interrupt statistics. 1747 */ 1748 DB_SHOW_COMMAND(intrcnt, db_show_intrcnt) 1749 { 1750 u_long *i; 1751 char *cp; 1752 1753 cp = intrnames; 1754 for (i = intrcnt; i != eintrcnt && !db_pager_quit; i++) { 1755 if (*cp == '\0') 1756 break; 1757 if (*i != 0) 1758 db_printf("%s\t%lu\n", cp, *i); 1759 cp += strlen(cp) + 1; 1760 } 1761 } 1762 #endif 1763