1 /*- 2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_ddb.h" 31 32 #include <sys/param.h> 33 #include <sys/bus.h> 34 #include <sys/conf.h> 35 #include <sys/cpuset.h> 36 #include <sys/rtprio.h> 37 #include <sys/systm.h> 38 #include <sys/interrupt.h> 39 #include <sys/kernel.h> 40 #include <sys/kthread.h> 41 #include <sys/ktr.h> 42 #include <sys/limits.h> 43 #include <sys/lock.h> 44 #include <sys/malloc.h> 45 #include <sys/mutex.h> 46 #include <sys/priv.h> 47 #include <sys/proc.h> 48 #include <sys/random.h> 49 #include <sys/resourcevar.h> 50 #include <sys/sched.h> 51 #include <sys/smp.h> 52 #include <sys/sysctl.h> 53 #include <sys/syslog.h> 54 #include <sys/unistd.h> 55 #include <sys/vmmeter.h> 56 #include <machine/atomic.h> 57 #include <machine/cpu.h> 58 #include <machine/md_var.h> 59 #include <machine/stdarg.h> 60 #ifdef DDB 61 #include <ddb/ddb.h> 62 #include <ddb/db_sym.h> 63 #endif 64 65 /* 66 * Describe an interrupt thread. There is one of these per interrupt event. 67 */ 68 struct intr_thread { 69 struct intr_event *it_event; 70 struct thread *it_thread; /* Kernel thread. */ 71 int it_flags; /* (j) IT_* flags. */ 72 int it_need; /* Needs service. */ 73 }; 74 75 /* Interrupt thread flags kept in it_flags */ 76 #define IT_DEAD 0x000001 /* Thread is waiting to exit. */ 77 78 struct intr_entropy { 79 struct thread *td; 80 uintptr_t event; 81 }; 82 83 struct intr_event *clk_intr_event; 84 struct intr_event *tty_intr_event; 85 void *vm_ih; 86 struct proc *intrproc; 87 88 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); 89 90 static int intr_storm_threshold = 1000; 91 TUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold); 92 SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW, 93 &intr_storm_threshold, 0, 94 "Number of consecutive interrupts before storm protection is enabled"); 95 static TAILQ_HEAD(, intr_event) event_list = 96 TAILQ_HEAD_INITIALIZER(event_list); 97 static struct mtx event_lock; 98 MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF); 99 100 static void intr_event_update(struct intr_event *ie); 101 #ifdef INTR_FILTER 102 static int intr_event_schedule_thread(struct intr_event *ie, 103 struct intr_thread *ithd); 104 static int intr_filter_loop(struct intr_event *ie, 105 struct trapframe *frame, struct intr_thread **ithd); 106 static struct intr_thread *ithread_create(const char *name, 107 struct intr_handler *ih); 108 #else 109 static int intr_event_schedule_thread(struct intr_event *ie); 110 static struct intr_thread *ithread_create(const char *name); 111 #endif 112 static void ithread_destroy(struct intr_thread *ithread); 113 static void ithread_execute_handlers(struct proc *p, 114 struct intr_event *ie); 115 #ifdef INTR_FILTER 116 static void priv_ithread_execute_handler(struct proc *p, 117 struct intr_handler *ih); 118 #endif 119 static void ithread_loop(void *); 120 static void ithread_update(struct intr_thread *ithd); 121 static void start_softintr(void *); 122 123 /* Map an interrupt type to an ithread priority. */ 124 u_char 125 intr_priority(enum intr_type flags) 126 { 127 u_char pri; 128 129 flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET | 130 INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV); 131 switch (flags) { 132 case INTR_TYPE_TTY: 133 pri = PI_TTYLOW; 134 break; 135 case INTR_TYPE_BIO: 136 /* 137 * XXX We need to refine this. BSD/OS distinguishes 138 * between tape and disk priorities. 139 */ 140 pri = PI_DISK; 141 break; 142 case INTR_TYPE_NET: 143 pri = PI_NET; 144 break; 145 case INTR_TYPE_CAM: 146 pri = PI_DISK; /* XXX or PI_CAM? */ 147 break; 148 case INTR_TYPE_AV: /* Audio/video */ 149 pri = PI_AV; 150 break; 151 case INTR_TYPE_CLK: 152 pri = PI_REALTIME; 153 break; 154 case INTR_TYPE_MISC: 155 pri = PI_DULL; /* don't care */ 156 break; 157 default: 158 /* We didn't specify an interrupt level. */ 159 panic("intr_priority: no interrupt type in flags"); 160 } 161 162 return pri; 163 } 164 165 /* 166 * Update an ithread based on the associated intr_event. 167 */ 168 static void 169 ithread_update(struct intr_thread *ithd) 170 { 171 struct intr_event *ie; 172 struct thread *td; 173 u_char pri; 174 175 ie = ithd->it_event; 176 td = ithd->it_thread; 177 178 /* Determine the overall priority of this event. */ 179 if (TAILQ_EMPTY(&ie->ie_handlers)) 180 pri = PRI_MAX_ITHD; 181 else 182 pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri; 183 184 /* Update name and priority. */ 185 strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name)); 186 thread_lock(td); 187 sched_prio(td, pri); 188 thread_unlock(td); 189 } 190 191 /* 192 * Regenerate the full name of an interrupt event and update its priority. 193 */ 194 static void 195 intr_event_update(struct intr_event *ie) 196 { 197 struct intr_handler *ih; 198 char *last; 199 int missed, space; 200 201 /* Start off with no entropy and just the name of the event. */ 202 mtx_assert(&ie->ie_lock, MA_OWNED); 203 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 204 ie->ie_flags &= ~IE_ENTROPY; 205 missed = 0; 206 space = 1; 207 208 /* Run through all the handlers updating values. */ 209 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 210 if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < 211 sizeof(ie->ie_fullname)) { 212 strcat(ie->ie_fullname, " "); 213 strcat(ie->ie_fullname, ih->ih_name); 214 space = 0; 215 } else 216 missed++; 217 if (ih->ih_flags & IH_ENTROPY) 218 ie->ie_flags |= IE_ENTROPY; 219 } 220 221 /* 222 * If the handler names were too long, add +'s to indicate missing 223 * names. If we run out of room and still have +'s to add, change 224 * the last character from a + to a *. 225 */ 226 last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2]; 227 while (missed-- > 0) { 228 if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) { 229 if (*last == '+') { 230 *last = '*'; 231 break; 232 } else 233 *last = '+'; 234 } else if (space) { 235 strcat(ie->ie_fullname, " +"); 236 space = 0; 237 } else 238 strcat(ie->ie_fullname, "+"); 239 } 240 241 /* 242 * If this event has an ithread, update it's priority and 243 * name. 244 */ 245 if (ie->ie_thread != NULL) 246 ithread_update(ie->ie_thread); 247 CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname); 248 } 249 250 int 251 intr_event_create(struct intr_event **event, void *source, int flags, int irq, 252 void (*pre_ithread)(void *), void (*post_ithread)(void *), 253 void (*post_filter)(void *), int (*assign_cpu)(void *, u_char), 254 const char *fmt, ...) 255 { 256 struct intr_event *ie; 257 va_list ap; 258 259 /* The only valid flag during creation is IE_SOFT. */ 260 if ((flags & ~IE_SOFT) != 0) 261 return (EINVAL); 262 ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 263 ie->ie_source = source; 264 ie->ie_pre_ithread = pre_ithread; 265 ie->ie_post_ithread = post_ithread; 266 ie->ie_post_filter = post_filter; 267 ie->ie_assign_cpu = assign_cpu; 268 ie->ie_flags = flags; 269 ie->ie_irq = irq; 270 ie->ie_cpu = NOCPU; 271 TAILQ_INIT(&ie->ie_handlers); 272 mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 273 274 va_start(ap, fmt); 275 vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 276 va_end(ap); 277 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 278 mtx_lock(&event_lock); 279 TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 280 mtx_unlock(&event_lock); 281 if (event != NULL) 282 *event = ie; 283 CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 284 return (0); 285 } 286 287 /* 288 * Bind an interrupt event to the specified CPU. Note that not all 289 * platforms support binding an interrupt to a CPU. For those 290 * platforms this request will fail. For supported platforms, any 291 * associated ithreads as well as the primary interrupt context will 292 * be bound to the specificed CPU. Using a cpu id of NOCPU unbinds 293 * the interrupt event. 294 */ 295 int 296 intr_event_bind(struct intr_event *ie, u_char cpu) 297 { 298 cpuset_t mask; 299 lwpid_t id; 300 int error; 301 302 /* Need a CPU to bind to. */ 303 if (cpu != NOCPU && CPU_ABSENT(cpu)) 304 return (EINVAL); 305 306 if (ie->ie_assign_cpu == NULL) 307 return (EOPNOTSUPP); 308 309 error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR); 310 if (error) 311 return (error); 312 313 /* 314 * If we have any ithreads try to set their mask first to verify 315 * permissions, etc. 316 */ 317 mtx_lock(&ie->ie_lock); 318 if (ie->ie_thread != NULL) { 319 CPU_ZERO(&mask); 320 if (cpu == NOCPU) 321 CPU_COPY(cpuset_root, &mask); 322 else 323 CPU_SET(cpu, &mask); 324 id = ie->ie_thread->it_thread->td_tid; 325 mtx_unlock(&ie->ie_lock); 326 error = cpuset_setthread(id, &mask); 327 if (error) 328 return (error); 329 } else 330 mtx_unlock(&ie->ie_lock); 331 error = ie->ie_assign_cpu(ie->ie_source, cpu); 332 if (error) { 333 mtx_lock(&ie->ie_lock); 334 if (ie->ie_thread != NULL) { 335 CPU_ZERO(&mask); 336 if (ie->ie_cpu == NOCPU) 337 CPU_COPY(cpuset_root, &mask); 338 else 339 CPU_SET(cpu, &mask); 340 id = ie->ie_thread->it_thread->td_tid; 341 mtx_unlock(&ie->ie_lock); 342 (void)cpuset_setthread(id, &mask); 343 } else 344 mtx_unlock(&ie->ie_lock); 345 return (error); 346 } 347 348 mtx_lock(&ie->ie_lock); 349 ie->ie_cpu = cpu; 350 mtx_unlock(&ie->ie_lock); 351 352 return (error); 353 } 354 355 static struct intr_event * 356 intr_lookup(int irq) 357 { 358 struct intr_event *ie; 359 360 mtx_lock(&event_lock); 361 TAILQ_FOREACH(ie, &event_list, ie_list) 362 if (ie->ie_irq == irq && 363 (ie->ie_flags & IE_SOFT) == 0 && 364 TAILQ_FIRST(&ie->ie_handlers) != NULL) 365 break; 366 mtx_unlock(&event_lock); 367 return (ie); 368 } 369 370 int 371 intr_setaffinity(int irq, void *m) 372 { 373 struct intr_event *ie; 374 cpuset_t *mask; 375 u_char cpu; 376 int n; 377 378 mask = m; 379 cpu = NOCPU; 380 /* 381 * If we're setting all cpus we can unbind. Otherwise make sure 382 * only one cpu is in the set. 383 */ 384 if (CPU_CMP(cpuset_root, mask)) { 385 for (n = 0; n < CPU_SETSIZE; n++) { 386 if (!CPU_ISSET(n, mask)) 387 continue; 388 if (cpu != NOCPU) 389 return (EINVAL); 390 cpu = (u_char)n; 391 } 392 } 393 ie = intr_lookup(irq); 394 if (ie == NULL) 395 return (ESRCH); 396 return (intr_event_bind(ie, cpu)); 397 } 398 399 int 400 intr_getaffinity(int irq, void *m) 401 { 402 struct intr_event *ie; 403 cpuset_t *mask; 404 405 mask = m; 406 ie = intr_lookup(irq); 407 if (ie == NULL) 408 return (ESRCH); 409 CPU_ZERO(mask); 410 mtx_lock(&ie->ie_lock); 411 if (ie->ie_cpu == NOCPU) 412 CPU_COPY(cpuset_root, mask); 413 else 414 CPU_SET(ie->ie_cpu, mask); 415 mtx_unlock(&ie->ie_lock); 416 return (0); 417 } 418 419 int 420 intr_event_destroy(struct intr_event *ie) 421 { 422 423 mtx_lock(&event_lock); 424 mtx_lock(&ie->ie_lock); 425 if (!TAILQ_EMPTY(&ie->ie_handlers)) { 426 mtx_unlock(&ie->ie_lock); 427 mtx_unlock(&event_lock); 428 return (EBUSY); 429 } 430 TAILQ_REMOVE(&event_list, ie, ie_list); 431 #ifndef notyet 432 if (ie->ie_thread != NULL) { 433 ithread_destroy(ie->ie_thread); 434 ie->ie_thread = NULL; 435 } 436 #endif 437 mtx_unlock(&ie->ie_lock); 438 mtx_unlock(&event_lock); 439 mtx_destroy(&ie->ie_lock); 440 free(ie, M_ITHREAD); 441 return (0); 442 } 443 444 #ifndef INTR_FILTER 445 static struct intr_thread * 446 ithread_create(const char *name) 447 { 448 struct intr_thread *ithd; 449 struct thread *td; 450 int error; 451 452 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 453 454 error = kproc_kthread_add(ithread_loop, ithd, &intrproc, 455 &td, RFSTOPPED | RFHIGHPID, 456 0, "intr", "%s", name); 457 if (error) 458 panic("kproc_create() failed with %d", error); 459 thread_lock(td); 460 sched_class(td, PRI_ITHD); 461 TD_SET_IWAIT(td); 462 thread_unlock(td); 463 td->td_pflags |= TDP_ITHREAD; 464 ithd->it_thread = td; 465 CTR2(KTR_INTR, "%s: created %s", __func__, name); 466 return (ithd); 467 } 468 #else 469 static struct intr_thread * 470 ithread_create(const char *name, struct intr_handler *ih) 471 { 472 struct intr_thread *ithd; 473 struct thread *td; 474 int error; 475 476 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 477 478 error = kproc_kthread_add(ithread_loop, ih, &intrproc, 479 &td, RFSTOPPED | RFHIGHPID, 480 0, "intr", "%s", name); 481 if (error) 482 panic("kproc_create() failed with %d", error); 483 thread_lock(td); 484 sched_class(td, PRI_ITHD); 485 TD_SET_IWAIT(td); 486 thread_unlock(td); 487 td->td_pflags |= TDP_ITHREAD; 488 ithd->it_thread = td; 489 CTR2(KTR_INTR, "%s: created %s", __func__, name); 490 return (ithd); 491 } 492 #endif 493 494 static void 495 ithread_destroy(struct intr_thread *ithread) 496 { 497 struct thread *td; 498 499 CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name); 500 td = ithread->it_thread; 501 thread_lock(td); 502 ithread->it_flags |= IT_DEAD; 503 if (TD_AWAITING_INTR(td)) { 504 TD_CLR_IWAIT(td); 505 sched_add(td, SRQ_INTR); 506 } 507 thread_unlock(td); 508 } 509 510 #ifndef INTR_FILTER 511 int 512 intr_event_add_handler(struct intr_event *ie, const char *name, 513 driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 514 enum intr_type flags, void **cookiep) 515 { 516 struct intr_handler *ih, *temp_ih; 517 struct intr_thread *it; 518 519 if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 520 return (EINVAL); 521 522 /* Allocate and populate an interrupt handler structure. */ 523 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 524 ih->ih_filter = filter; 525 ih->ih_handler = handler; 526 ih->ih_argument = arg; 527 ih->ih_name = name; 528 ih->ih_event = ie; 529 ih->ih_pri = pri; 530 if (flags & INTR_EXCL) 531 ih->ih_flags = IH_EXCLUSIVE; 532 if (flags & INTR_MPSAFE) 533 ih->ih_flags |= IH_MPSAFE; 534 if (flags & INTR_ENTROPY) 535 ih->ih_flags |= IH_ENTROPY; 536 537 /* We can only have one exclusive handler in a event. */ 538 mtx_lock(&ie->ie_lock); 539 if (!TAILQ_EMPTY(&ie->ie_handlers)) { 540 if ((flags & INTR_EXCL) || 541 (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 542 mtx_unlock(&ie->ie_lock); 543 free(ih, M_ITHREAD); 544 return (EINVAL); 545 } 546 } 547 548 /* Add the new handler to the event in priority order. */ 549 TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 550 if (temp_ih->ih_pri > ih->ih_pri) 551 break; 552 } 553 if (temp_ih == NULL) 554 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 555 else 556 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 557 intr_event_update(ie); 558 559 /* Create a thread if we need one. */ 560 while (ie->ie_thread == NULL && handler != NULL) { 561 if (ie->ie_flags & IE_ADDING_THREAD) 562 msleep(ie, &ie->ie_lock, 0, "ithread", 0); 563 else { 564 ie->ie_flags |= IE_ADDING_THREAD; 565 mtx_unlock(&ie->ie_lock); 566 it = ithread_create("intr: newborn"); 567 mtx_lock(&ie->ie_lock); 568 ie->ie_flags &= ~IE_ADDING_THREAD; 569 ie->ie_thread = it; 570 it->it_event = ie; 571 ithread_update(it); 572 wakeup(ie); 573 } 574 } 575 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 576 ie->ie_name); 577 mtx_unlock(&ie->ie_lock); 578 579 if (cookiep != NULL) 580 *cookiep = ih; 581 return (0); 582 } 583 #else 584 int 585 intr_event_add_handler(struct intr_event *ie, const char *name, 586 driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 587 enum intr_type flags, void **cookiep) 588 { 589 struct intr_handler *ih, *temp_ih; 590 struct intr_thread *it; 591 592 if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 593 return (EINVAL); 594 595 /* Allocate and populate an interrupt handler structure. */ 596 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 597 ih->ih_filter = filter; 598 ih->ih_handler = handler; 599 ih->ih_argument = arg; 600 ih->ih_name = name; 601 ih->ih_event = ie; 602 ih->ih_pri = pri; 603 if (flags & INTR_EXCL) 604 ih->ih_flags = IH_EXCLUSIVE; 605 if (flags & INTR_MPSAFE) 606 ih->ih_flags |= IH_MPSAFE; 607 if (flags & INTR_ENTROPY) 608 ih->ih_flags |= IH_ENTROPY; 609 610 /* We can only have one exclusive handler in a event. */ 611 mtx_lock(&ie->ie_lock); 612 if (!TAILQ_EMPTY(&ie->ie_handlers)) { 613 if ((flags & INTR_EXCL) || 614 (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 615 mtx_unlock(&ie->ie_lock); 616 free(ih, M_ITHREAD); 617 return (EINVAL); 618 } 619 } 620 621 /* Add the new handler to the event in priority order. */ 622 TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 623 if (temp_ih->ih_pri > ih->ih_pri) 624 break; 625 } 626 if (temp_ih == NULL) 627 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 628 else 629 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 630 intr_event_update(ie); 631 632 /* For filtered handlers, create a private ithread to run on. */ 633 if (filter != NULL && handler != NULL) { 634 mtx_unlock(&ie->ie_lock); 635 it = ithread_create("intr: newborn", ih); 636 mtx_lock(&ie->ie_lock); 637 it->it_event = ie; 638 ih->ih_thread = it; 639 ithread_update(it); // XXX - do we really need this?!?!? 640 } else { /* Create the global per-event thread if we need one. */ 641 while (ie->ie_thread == NULL && handler != NULL) { 642 if (ie->ie_flags & IE_ADDING_THREAD) 643 msleep(ie, &ie->ie_lock, 0, "ithread", 0); 644 else { 645 ie->ie_flags |= IE_ADDING_THREAD; 646 mtx_unlock(&ie->ie_lock); 647 it = ithread_create("intr: newborn", ih); 648 mtx_lock(&ie->ie_lock); 649 ie->ie_flags &= ~IE_ADDING_THREAD; 650 ie->ie_thread = it; 651 it->it_event = ie; 652 ithread_update(it); 653 wakeup(ie); 654 } 655 } 656 } 657 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 658 ie->ie_name); 659 mtx_unlock(&ie->ie_lock); 660 661 if (cookiep != NULL) 662 *cookiep = ih; 663 return (0); 664 } 665 #endif 666 667 /* 668 * Return the ie_source field from the intr_event an intr_handler is 669 * associated with. 670 */ 671 void * 672 intr_handler_source(void *cookie) 673 { 674 struct intr_handler *ih; 675 struct intr_event *ie; 676 677 ih = (struct intr_handler *)cookie; 678 if (ih == NULL) 679 return (NULL); 680 ie = ih->ih_event; 681 KASSERT(ie != NULL, 682 ("interrupt handler \"%s\" has a NULL interrupt event", 683 ih->ih_name)); 684 return (ie->ie_source); 685 } 686 687 #ifndef INTR_FILTER 688 int 689 intr_event_remove_handler(void *cookie) 690 { 691 struct intr_handler *handler = (struct intr_handler *)cookie; 692 struct intr_event *ie; 693 #ifdef INVARIANTS 694 struct intr_handler *ih; 695 #endif 696 #ifdef notyet 697 int dead; 698 #endif 699 700 if (handler == NULL) 701 return (EINVAL); 702 ie = handler->ih_event; 703 KASSERT(ie != NULL, 704 ("interrupt handler \"%s\" has a NULL interrupt event", 705 handler->ih_name)); 706 mtx_lock(&ie->ie_lock); 707 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 708 ie->ie_name); 709 #ifdef INVARIANTS 710 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 711 if (ih == handler) 712 goto ok; 713 mtx_unlock(&ie->ie_lock); 714 panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 715 ih->ih_name, ie->ie_name); 716 ok: 717 #endif 718 /* 719 * If there is no ithread, then just remove the handler and return. 720 * XXX: Note that an INTR_FAST handler might be running on another 721 * CPU! 722 */ 723 if (ie->ie_thread == NULL) { 724 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 725 mtx_unlock(&ie->ie_lock); 726 free(handler, M_ITHREAD); 727 return (0); 728 } 729 730 /* 731 * If the interrupt thread is already running, then just mark this 732 * handler as being dead and let the ithread do the actual removal. 733 * 734 * During a cold boot while cold is set, msleep() does not sleep, 735 * so we have to remove the handler here rather than letting the 736 * thread do it. 737 */ 738 thread_lock(ie->ie_thread->it_thread); 739 if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) { 740 handler->ih_flags |= IH_DEAD; 741 742 /* 743 * Ensure that the thread will process the handler list 744 * again and remove this handler if it has already passed 745 * it on the list. 746 */ 747 ie->ie_thread->it_need = 1; 748 } else 749 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 750 thread_unlock(ie->ie_thread->it_thread); 751 while (handler->ih_flags & IH_DEAD) 752 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 753 intr_event_update(ie); 754 #ifdef notyet 755 /* 756 * XXX: This could be bad in the case of ppbus(8). Also, I think 757 * this could lead to races of stale data when servicing an 758 * interrupt. 759 */ 760 dead = 1; 761 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 762 if (!(ih->ih_flags & IH_FAST)) { 763 dead = 0; 764 break; 765 } 766 } 767 if (dead) { 768 ithread_destroy(ie->ie_thread); 769 ie->ie_thread = NULL; 770 } 771 #endif 772 mtx_unlock(&ie->ie_lock); 773 free(handler, M_ITHREAD); 774 return (0); 775 } 776 777 static int 778 intr_event_schedule_thread(struct intr_event *ie) 779 { 780 struct intr_entropy entropy; 781 struct intr_thread *it; 782 struct thread *td; 783 struct thread *ctd; 784 struct proc *p; 785 786 /* 787 * If no ithread or no handlers, then we have a stray interrupt. 788 */ 789 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || 790 ie->ie_thread == NULL) 791 return (EINVAL); 792 793 ctd = curthread; 794 it = ie->ie_thread; 795 td = it->it_thread; 796 p = td->td_proc; 797 798 /* 799 * If any of the handlers for this ithread claim to be good 800 * sources of entropy, then gather some. 801 */ 802 if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 803 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 804 p->p_pid, td->td_name); 805 entropy.event = (uintptr_t)ie; 806 entropy.td = ctd; 807 random_harvest(&entropy, sizeof(entropy), 2, 0, 808 RANDOM_INTERRUPT); 809 } 810 811 KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 812 813 /* 814 * Set it_need to tell the thread to keep running if it is already 815 * running. Then, lock the thread and see if we actually need to 816 * put it on the runqueue. 817 */ 818 it->it_need = 1; 819 thread_lock(td); 820 if (TD_AWAITING_INTR(td)) { 821 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 822 td->td_name); 823 TD_CLR_IWAIT(td); 824 sched_add(td, SRQ_INTR); 825 } else { 826 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 827 __func__, p->p_pid, td->td_name, it->it_need, td->td_state); 828 } 829 thread_unlock(td); 830 831 return (0); 832 } 833 #else 834 int 835 intr_event_remove_handler(void *cookie) 836 { 837 struct intr_handler *handler = (struct intr_handler *)cookie; 838 struct intr_event *ie; 839 struct intr_thread *it; 840 #ifdef INVARIANTS 841 struct intr_handler *ih; 842 #endif 843 #ifdef notyet 844 int dead; 845 #endif 846 847 if (handler == NULL) 848 return (EINVAL); 849 ie = handler->ih_event; 850 KASSERT(ie != NULL, 851 ("interrupt handler \"%s\" has a NULL interrupt event", 852 handler->ih_name)); 853 mtx_lock(&ie->ie_lock); 854 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 855 ie->ie_name); 856 #ifdef INVARIANTS 857 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 858 if (ih == handler) 859 goto ok; 860 mtx_unlock(&ie->ie_lock); 861 panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 862 ih->ih_name, ie->ie_name); 863 ok: 864 #endif 865 /* 866 * If there are no ithreads (per event and per handler), then 867 * just remove the handler and return. 868 * XXX: Note that an INTR_FAST handler might be running on another CPU! 869 */ 870 if (ie->ie_thread == NULL && handler->ih_thread == NULL) { 871 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 872 mtx_unlock(&ie->ie_lock); 873 free(handler, M_ITHREAD); 874 return (0); 875 } 876 877 /* Private or global ithread? */ 878 it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread; 879 /* 880 * If the interrupt thread is already running, then just mark this 881 * handler as being dead and let the ithread do the actual removal. 882 * 883 * During a cold boot while cold is set, msleep() does not sleep, 884 * so we have to remove the handler here rather than letting the 885 * thread do it. 886 */ 887 thread_lock(it->it_thread); 888 if (!TD_AWAITING_INTR(it->it_thread) && !cold) { 889 handler->ih_flags |= IH_DEAD; 890 891 /* 892 * Ensure that the thread will process the handler list 893 * again and remove this handler if it has already passed 894 * it on the list. 895 */ 896 it->it_need = 1; 897 } else 898 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 899 thread_unlock(it->it_thread); 900 while (handler->ih_flags & IH_DEAD) 901 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 902 /* 903 * At this point, the handler has been disconnected from the event, 904 * so we can kill the private ithread if any. 905 */ 906 if (handler->ih_thread) { 907 ithread_destroy(handler->ih_thread); 908 handler->ih_thread = NULL; 909 } 910 intr_event_update(ie); 911 #ifdef notyet 912 /* 913 * XXX: This could be bad in the case of ppbus(8). Also, I think 914 * this could lead to races of stale data when servicing an 915 * interrupt. 916 */ 917 dead = 1; 918 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 919 if (handler != NULL) { 920 dead = 0; 921 break; 922 } 923 } 924 if (dead) { 925 ithread_destroy(ie->ie_thread); 926 ie->ie_thread = NULL; 927 } 928 #endif 929 mtx_unlock(&ie->ie_lock); 930 free(handler, M_ITHREAD); 931 return (0); 932 } 933 934 static int 935 intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it) 936 { 937 struct intr_entropy entropy; 938 struct thread *td; 939 struct thread *ctd; 940 struct proc *p; 941 942 /* 943 * If no ithread or no handlers, then we have a stray interrupt. 944 */ 945 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL) 946 return (EINVAL); 947 948 ctd = curthread; 949 td = it->it_thread; 950 p = td->td_proc; 951 952 /* 953 * If any of the handlers for this ithread claim to be good 954 * sources of entropy, then gather some. 955 */ 956 if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 957 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 958 p->p_pid, td->td_name); 959 entropy.event = (uintptr_t)ie; 960 entropy.td = ctd; 961 random_harvest(&entropy, sizeof(entropy), 2, 0, 962 RANDOM_INTERRUPT); 963 } 964 965 KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 966 967 /* 968 * Set it_need to tell the thread to keep running if it is already 969 * running. Then, lock the thread and see if we actually need to 970 * put it on the runqueue. 971 */ 972 it->it_need = 1; 973 thread_lock(td); 974 if (TD_AWAITING_INTR(td)) { 975 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 976 td->td_name); 977 TD_CLR_IWAIT(td); 978 sched_add(td, SRQ_INTR); 979 } else { 980 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 981 __func__, p->p_pid, td->td_name, it->it_need, td->td_state); 982 } 983 thread_unlock(td); 984 985 return (0); 986 } 987 #endif 988 989 /* 990 * Allow interrupt event binding for software interrupt handlers -- a no-op, 991 * since interrupts are generated in software rather than being directed by 992 * a PIC. 993 */ 994 static int 995 swi_assign_cpu(void *arg, u_char cpu) 996 { 997 998 return (0); 999 } 1000 1001 /* 1002 * Add a software interrupt handler to a specified event. If a given event 1003 * is not specified, then a new event is created. 1004 */ 1005 int 1006 swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, 1007 void *arg, int pri, enum intr_type flags, void **cookiep) 1008 { 1009 struct intr_event *ie; 1010 int error; 1011 1012 if (flags & INTR_ENTROPY) 1013 return (EINVAL); 1014 1015 ie = (eventp != NULL) ? *eventp : NULL; 1016 1017 if (ie != NULL) { 1018 if (!(ie->ie_flags & IE_SOFT)) 1019 return (EINVAL); 1020 } else { 1021 error = intr_event_create(&ie, NULL, IE_SOFT, 0, 1022 NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri); 1023 if (error) 1024 return (error); 1025 if (eventp != NULL) 1026 *eventp = ie; 1027 } 1028 error = intr_event_add_handler(ie, name, NULL, handler, arg, 1029 (pri * RQ_PPQ) + PI_SOFT, flags, cookiep); 1030 if (error) 1031 return (error); 1032 if (pri == SWI_CLOCK) { 1033 struct proc *p; 1034 p = ie->ie_thread->it_thread->td_proc; 1035 PROC_LOCK(p); 1036 p->p_flag |= P_NOLOAD; 1037 PROC_UNLOCK(p); 1038 } 1039 return (0); 1040 } 1041 1042 /* 1043 * Schedule a software interrupt thread. 1044 */ 1045 void 1046 swi_sched(void *cookie, int flags) 1047 { 1048 struct intr_handler *ih = (struct intr_handler *)cookie; 1049 struct intr_event *ie = ih->ih_event; 1050 int error; 1051 1052 CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, 1053 ih->ih_need); 1054 1055 /* 1056 * Set ih_need for this handler so that if the ithread is already 1057 * running it will execute this handler on the next pass. Otherwise, 1058 * it will execute it the next time it runs. 1059 */ 1060 atomic_store_rel_int(&ih->ih_need, 1); 1061 1062 if (!(flags & SWI_DELAY)) { 1063 PCPU_INC(cnt.v_soft); 1064 #ifdef INTR_FILTER 1065 error = intr_event_schedule_thread(ie, ie->ie_thread); 1066 #else 1067 error = intr_event_schedule_thread(ie); 1068 #endif 1069 KASSERT(error == 0, ("stray software interrupt")); 1070 } 1071 } 1072 1073 /* 1074 * Remove a software interrupt handler. Currently this code does not 1075 * remove the associated interrupt event if it becomes empty. Calling code 1076 * may do so manually via intr_event_destroy(), but that's not really 1077 * an optimal interface. 1078 */ 1079 int 1080 swi_remove(void *cookie) 1081 { 1082 1083 return (intr_event_remove_handler(cookie)); 1084 } 1085 1086 #ifdef INTR_FILTER 1087 static void 1088 priv_ithread_execute_handler(struct proc *p, struct intr_handler *ih) 1089 { 1090 struct intr_event *ie; 1091 1092 ie = ih->ih_event; 1093 /* 1094 * If this handler is marked for death, remove it from 1095 * the list of handlers and wake up the sleeper. 1096 */ 1097 if (ih->ih_flags & IH_DEAD) { 1098 mtx_lock(&ie->ie_lock); 1099 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 1100 ih->ih_flags &= ~IH_DEAD; 1101 wakeup(ih); 1102 mtx_unlock(&ie->ie_lock); 1103 return; 1104 } 1105 1106 /* Execute this handler. */ 1107 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1108 __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument, 1109 ih->ih_name, ih->ih_flags); 1110 1111 if (!(ih->ih_flags & IH_MPSAFE)) 1112 mtx_lock(&Giant); 1113 ih->ih_handler(ih->ih_argument); 1114 if (!(ih->ih_flags & IH_MPSAFE)) 1115 mtx_unlock(&Giant); 1116 } 1117 #endif 1118 1119 /* 1120 * This is a public function for use by drivers that mux interrupt 1121 * handlers for child devices from their interrupt handler. 1122 */ 1123 void 1124 intr_event_execute_handlers(struct proc *p, struct intr_event *ie) 1125 { 1126 struct intr_handler *ih, *ihn; 1127 1128 TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) { 1129 /* 1130 * If this handler is marked for death, remove it from 1131 * the list of handlers and wake up the sleeper. 1132 */ 1133 if (ih->ih_flags & IH_DEAD) { 1134 mtx_lock(&ie->ie_lock); 1135 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 1136 ih->ih_flags &= ~IH_DEAD; 1137 wakeup(ih); 1138 mtx_unlock(&ie->ie_lock); 1139 continue; 1140 } 1141 1142 /* Skip filter only handlers */ 1143 if (ih->ih_handler == NULL) 1144 continue; 1145 1146 /* 1147 * For software interrupt threads, we only execute 1148 * handlers that have their need flag set. Hardware 1149 * interrupt threads always invoke all of their handlers. 1150 */ 1151 if (ie->ie_flags & IE_SOFT) { 1152 if (!ih->ih_need) 1153 continue; 1154 else 1155 atomic_store_rel_int(&ih->ih_need, 0); 1156 } 1157 1158 /* Execute this handler. */ 1159 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1160 __func__, p->p_pid, (void *)ih->ih_handler, 1161 ih->ih_argument, ih->ih_name, ih->ih_flags); 1162 1163 if (!(ih->ih_flags & IH_MPSAFE)) 1164 mtx_lock(&Giant); 1165 ih->ih_handler(ih->ih_argument); 1166 if (!(ih->ih_flags & IH_MPSAFE)) 1167 mtx_unlock(&Giant); 1168 } 1169 } 1170 1171 static void 1172 ithread_execute_handlers(struct proc *p, struct intr_event *ie) 1173 { 1174 1175 /* Interrupt handlers should not sleep. */ 1176 if (!(ie->ie_flags & IE_SOFT)) 1177 THREAD_NO_SLEEPING(); 1178 intr_event_execute_handlers(p, ie); 1179 if (!(ie->ie_flags & IE_SOFT)) 1180 THREAD_SLEEPING_OK(); 1181 1182 /* 1183 * Interrupt storm handling: 1184 * 1185 * If this interrupt source is currently storming, then throttle 1186 * it to only fire the handler once per clock tick. 1187 * 1188 * If this interrupt source is not currently storming, but the 1189 * number of back to back interrupts exceeds the storm threshold, 1190 * then enter storming mode. 1191 */ 1192 if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold && 1193 !(ie->ie_flags & IE_SOFT)) { 1194 /* Report the message only once every second. */ 1195 if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) { 1196 printf( 1197 "interrupt storm detected on \"%s\"; throttling interrupt source\n", 1198 ie->ie_name); 1199 } 1200 pause("istorm", 1); 1201 } else 1202 ie->ie_count++; 1203 1204 /* 1205 * Now that all the handlers have had a chance to run, reenable 1206 * the interrupt source. 1207 */ 1208 if (ie->ie_post_ithread != NULL) 1209 ie->ie_post_ithread(ie->ie_source); 1210 } 1211 1212 #ifndef INTR_FILTER 1213 /* 1214 * This is the main code for interrupt threads. 1215 */ 1216 static void 1217 ithread_loop(void *arg) 1218 { 1219 struct intr_thread *ithd; 1220 struct intr_event *ie; 1221 struct thread *td; 1222 struct proc *p; 1223 1224 td = curthread; 1225 p = td->td_proc; 1226 ithd = (struct intr_thread *)arg; 1227 KASSERT(ithd->it_thread == td, 1228 ("%s: ithread and proc linkage out of sync", __func__)); 1229 ie = ithd->it_event; 1230 ie->ie_count = 0; 1231 1232 /* 1233 * As long as we have interrupts outstanding, go through the 1234 * list of handlers, giving each one a go at it. 1235 */ 1236 for (;;) { 1237 /* 1238 * If we are an orphaned thread, then just die. 1239 */ 1240 if (ithd->it_flags & IT_DEAD) { 1241 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 1242 p->p_pid, td->td_name); 1243 free(ithd, M_ITHREAD); 1244 kthread_exit(); 1245 } 1246 1247 /* 1248 * Service interrupts. If another interrupt arrives while 1249 * we are running, it will set it_need to note that we 1250 * should make another pass. 1251 */ 1252 while (ithd->it_need) { 1253 /* 1254 * This might need a full read and write barrier 1255 * to make sure that this write posts before any 1256 * of the memory or device accesses in the 1257 * handlers. 1258 */ 1259 atomic_store_rel_int(&ithd->it_need, 0); 1260 ithread_execute_handlers(p, ie); 1261 } 1262 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1263 mtx_assert(&Giant, MA_NOTOWNED); 1264 1265 /* 1266 * Processed all our interrupts. Now get the sched 1267 * lock. This may take a while and it_need may get 1268 * set again, so we have to check it again. 1269 */ 1270 thread_lock(td); 1271 if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { 1272 TD_SET_IWAIT(td); 1273 ie->ie_count = 0; 1274 mi_switch(SW_VOL | SWT_IWAIT, NULL); 1275 } 1276 thread_unlock(td); 1277 } 1278 } 1279 1280 /* 1281 * Main interrupt handling body. 1282 * 1283 * Input: 1284 * o ie: the event connected to this interrupt. 1285 * o frame: some archs (i.e. i386) pass a frame to some. 1286 * handlers as their main argument. 1287 * Return value: 1288 * o 0: everything ok. 1289 * o EINVAL: stray interrupt. 1290 */ 1291 int 1292 intr_event_handle(struct intr_event *ie, struct trapframe *frame) 1293 { 1294 struct intr_handler *ih; 1295 struct thread *td; 1296 int error, ret, thread; 1297 1298 td = curthread; 1299 1300 /* An interrupt with no event or handlers is a stray interrupt. */ 1301 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) 1302 return (EINVAL); 1303 1304 /* 1305 * Execute fast interrupt handlers directly. 1306 * To support clock handlers, if a handler registers 1307 * with a NULL argument, then we pass it a pointer to 1308 * a trapframe as its argument. 1309 */ 1310 td->td_intr_nesting_level++; 1311 thread = 0; 1312 ret = 0; 1313 critical_enter(); 1314 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1315 if (ih->ih_filter == NULL) { 1316 thread = 1; 1317 continue; 1318 } 1319 CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__, 1320 ih->ih_filter, ih->ih_argument == NULL ? frame : 1321 ih->ih_argument, ih->ih_name); 1322 if (ih->ih_argument == NULL) 1323 ret = ih->ih_filter(frame); 1324 else 1325 ret = ih->ih_filter(ih->ih_argument); 1326 /* 1327 * Wrapper handler special handling: 1328 * 1329 * in some particular cases (like pccard and pccbb), 1330 * the _real_ device handler is wrapped in a couple of 1331 * functions - a filter wrapper and an ithread wrapper. 1332 * In this case (and just in this case), the filter wrapper 1333 * could ask the system to schedule the ithread and mask 1334 * the interrupt source if the wrapped handler is composed 1335 * of just an ithread handler. 1336 * 1337 * TODO: write a generic wrapper to avoid people rolling 1338 * their own 1339 */ 1340 if (!thread) { 1341 if (ret == FILTER_SCHEDULE_THREAD) 1342 thread = 1; 1343 } 1344 } 1345 1346 if (thread) { 1347 if (ie->ie_pre_ithread != NULL) 1348 ie->ie_pre_ithread(ie->ie_source); 1349 } else { 1350 if (ie->ie_post_filter != NULL) 1351 ie->ie_post_filter(ie->ie_source); 1352 } 1353 1354 /* Schedule the ithread if needed. */ 1355 if (thread) { 1356 error = intr_event_schedule_thread(ie); 1357 #ifndef XEN 1358 KASSERT(error == 0, ("bad stray interrupt")); 1359 #else 1360 if (error != 0) 1361 log(LOG_WARNING, "bad stray interrupt"); 1362 #endif 1363 } 1364 critical_exit(); 1365 td->td_intr_nesting_level--; 1366 return (0); 1367 } 1368 #else 1369 /* 1370 * This is the main code for interrupt threads. 1371 */ 1372 static void 1373 ithread_loop(void *arg) 1374 { 1375 struct intr_thread *ithd; 1376 struct intr_handler *ih; 1377 struct intr_event *ie; 1378 struct thread *td; 1379 struct proc *p; 1380 int priv; 1381 1382 td = curthread; 1383 p = td->td_proc; 1384 ih = (struct intr_handler *)arg; 1385 priv = (ih->ih_thread != NULL) ? 1 : 0; 1386 ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread; 1387 KASSERT(ithd->it_thread == td, 1388 ("%s: ithread and proc linkage out of sync", __func__)); 1389 ie = ithd->it_event; 1390 ie->ie_count = 0; 1391 1392 /* 1393 * As long as we have interrupts outstanding, go through the 1394 * list of handlers, giving each one a go at it. 1395 */ 1396 for (;;) { 1397 /* 1398 * If we are an orphaned thread, then just die. 1399 */ 1400 if (ithd->it_flags & IT_DEAD) { 1401 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 1402 p->p_pid, td->td_name); 1403 free(ithd, M_ITHREAD); 1404 kthread_exit(); 1405 } 1406 1407 /* 1408 * Service interrupts. If another interrupt arrives while 1409 * we are running, it will set it_need to note that we 1410 * should make another pass. 1411 */ 1412 while (ithd->it_need) { 1413 /* 1414 * This might need a full read and write barrier 1415 * to make sure that this write posts before any 1416 * of the memory or device accesses in the 1417 * handlers. 1418 */ 1419 atomic_store_rel_int(&ithd->it_need, 0); 1420 if (priv) 1421 priv_ithread_execute_handler(p, ih); 1422 else 1423 ithread_execute_handlers(p, ie); 1424 } 1425 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1426 mtx_assert(&Giant, MA_NOTOWNED); 1427 1428 /* 1429 * Processed all our interrupts. Now get the sched 1430 * lock. This may take a while and it_need may get 1431 * set again, so we have to check it again. 1432 */ 1433 thread_lock(td); 1434 if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { 1435 TD_SET_IWAIT(td); 1436 ie->ie_count = 0; 1437 mi_switch(SW_VOL | SWT_IWAIT, NULL); 1438 } 1439 thread_unlock(td); 1440 } 1441 } 1442 1443 /* 1444 * Main loop for interrupt filter. 1445 * 1446 * Some architectures (i386, amd64 and arm) require the optional frame 1447 * parameter, and use it as the main argument for fast handler execution 1448 * when ih_argument == NULL. 1449 * 1450 * Return value: 1451 * o FILTER_STRAY: No filter recognized the event, and no 1452 * filter-less handler is registered on this 1453 * line. 1454 * o FILTER_HANDLED: A filter claimed the event and served it. 1455 * o FILTER_SCHEDULE_THREAD: No filter claimed the event, but there's at 1456 * least one filter-less handler on this line. 1457 * o FILTER_HANDLED | 1458 * FILTER_SCHEDULE_THREAD: A filter claimed the event, and asked for 1459 * scheduling the per-handler ithread. 1460 * 1461 * In case an ithread has to be scheduled, in *ithd there will be a 1462 * pointer to a struct intr_thread containing the thread to be 1463 * scheduled. 1464 */ 1465 1466 static int 1467 intr_filter_loop(struct intr_event *ie, struct trapframe *frame, 1468 struct intr_thread **ithd) 1469 { 1470 struct intr_handler *ih; 1471 void *arg; 1472 int ret, thread_only; 1473 1474 ret = 0; 1475 thread_only = 0; 1476 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1477 /* 1478 * Execute fast interrupt handlers directly. 1479 * To support clock handlers, if a handler registers 1480 * with a NULL argument, then we pass it a pointer to 1481 * a trapframe as its argument. 1482 */ 1483 arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument); 1484 1485 CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__, 1486 ih->ih_filter, ih->ih_handler, arg, ih->ih_name); 1487 1488 if (ih->ih_filter != NULL) 1489 ret = ih->ih_filter(arg); 1490 else { 1491 thread_only = 1; 1492 continue; 1493 } 1494 1495 if (ret & FILTER_STRAY) 1496 continue; 1497 else { 1498 *ithd = ih->ih_thread; 1499 return (ret); 1500 } 1501 } 1502 1503 /* 1504 * No filters handled the interrupt and we have at least 1505 * one handler without a filter. In this case, we schedule 1506 * all of the filter-less handlers to run in the ithread. 1507 */ 1508 if (thread_only) { 1509 *ithd = ie->ie_thread; 1510 return (FILTER_SCHEDULE_THREAD); 1511 } 1512 return (FILTER_STRAY); 1513 } 1514 1515 /* 1516 * Main interrupt handling body. 1517 * 1518 * Input: 1519 * o ie: the event connected to this interrupt. 1520 * o frame: some archs (i.e. i386) pass a frame to some. 1521 * handlers as their main argument. 1522 * Return value: 1523 * o 0: everything ok. 1524 * o EINVAL: stray interrupt. 1525 */ 1526 int 1527 intr_event_handle(struct intr_event *ie, struct trapframe *frame) 1528 { 1529 struct intr_thread *ithd; 1530 struct thread *td; 1531 int thread; 1532 1533 ithd = NULL; 1534 td = curthread; 1535 1536 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) 1537 return (EINVAL); 1538 1539 td->td_intr_nesting_level++; 1540 thread = 0; 1541 critical_enter(); 1542 thread = intr_filter_loop(ie, frame, &ithd); 1543 if (thread & FILTER_HANDLED) { 1544 if (ie->ie_post_filter != NULL) 1545 ie->ie_post_filter(ie->ie_source); 1546 } else { 1547 if (ie->ie_pre_ithread != NULL) 1548 ie->ie_pre_ithread(ie->ie_source); 1549 } 1550 critical_exit(); 1551 1552 /* Interrupt storm logic */ 1553 if (thread & FILTER_STRAY) { 1554 ie->ie_count++; 1555 if (ie->ie_count < intr_storm_threshold) 1556 printf("Interrupt stray detection not present\n"); 1557 } 1558 1559 /* Schedule an ithread if needed. */ 1560 if (thread & FILTER_SCHEDULE_THREAD) { 1561 if (intr_event_schedule_thread(ie, ithd) != 0) 1562 panic("%s: impossible stray interrupt", __func__); 1563 } 1564 td->td_intr_nesting_level--; 1565 return (0); 1566 } 1567 #endif 1568 1569 #ifdef DDB 1570 /* 1571 * Dump details about an interrupt handler 1572 */ 1573 static void 1574 db_dump_intrhand(struct intr_handler *ih) 1575 { 1576 int comma; 1577 1578 db_printf("\t%-10s ", ih->ih_name); 1579 switch (ih->ih_pri) { 1580 case PI_REALTIME: 1581 db_printf("CLK "); 1582 break; 1583 case PI_AV: 1584 db_printf("AV "); 1585 break; 1586 case PI_TTYHIGH: 1587 case PI_TTYLOW: 1588 db_printf("TTY "); 1589 break; 1590 case PI_TAPE: 1591 db_printf("TAPE"); 1592 break; 1593 case PI_NET: 1594 db_printf("NET "); 1595 break; 1596 case PI_DISK: 1597 case PI_DISKLOW: 1598 db_printf("DISK"); 1599 break; 1600 case PI_DULL: 1601 db_printf("DULL"); 1602 break; 1603 default: 1604 if (ih->ih_pri >= PI_SOFT) 1605 db_printf("SWI "); 1606 else 1607 db_printf("%4u", ih->ih_pri); 1608 break; 1609 } 1610 db_printf(" "); 1611 db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC); 1612 db_printf("(%p)", ih->ih_argument); 1613 if (ih->ih_need || 1614 (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD | 1615 IH_MPSAFE)) != 0) { 1616 db_printf(" {"); 1617 comma = 0; 1618 if (ih->ih_flags & IH_EXCLUSIVE) { 1619 if (comma) 1620 db_printf(", "); 1621 db_printf("EXCL"); 1622 comma = 1; 1623 } 1624 if (ih->ih_flags & IH_ENTROPY) { 1625 if (comma) 1626 db_printf(", "); 1627 db_printf("ENTROPY"); 1628 comma = 1; 1629 } 1630 if (ih->ih_flags & IH_DEAD) { 1631 if (comma) 1632 db_printf(", "); 1633 db_printf("DEAD"); 1634 comma = 1; 1635 } 1636 if (ih->ih_flags & IH_MPSAFE) { 1637 if (comma) 1638 db_printf(", "); 1639 db_printf("MPSAFE"); 1640 comma = 1; 1641 } 1642 if (ih->ih_need) { 1643 if (comma) 1644 db_printf(", "); 1645 db_printf("NEED"); 1646 } 1647 db_printf("}"); 1648 } 1649 db_printf("\n"); 1650 } 1651 1652 /* 1653 * Dump details about a event. 1654 */ 1655 void 1656 db_dump_intr_event(struct intr_event *ie, int handlers) 1657 { 1658 struct intr_handler *ih; 1659 struct intr_thread *it; 1660 int comma; 1661 1662 db_printf("%s ", ie->ie_fullname); 1663 it = ie->ie_thread; 1664 if (it != NULL) 1665 db_printf("(pid %d)", it->it_thread->td_proc->p_pid); 1666 else 1667 db_printf("(no thread)"); 1668 if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 || 1669 (it != NULL && it->it_need)) { 1670 db_printf(" {"); 1671 comma = 0; 1672 if (ie->ie_flags & IE_SOFT) { 1673 db_printf("SOFT"); 1674 comma = 1; 1675 } 1676 if (ie->ie_flags & IE_ENTROPY) { 1677 if (comma) 1678 db_printf(", "); 1679 db_printf("ENTROPY"); 1680 comma = 1; 1681 } 1682 if (ie->ie_flags & IE_ADDING_THREAD) { 1683 if (comma) 1684 db_printf(", "); 1685 db_printf("ADDING_THREAD"); 1686 comma = 1; 1687 } 1688 if (it != NULL && it->it_need) { 1689 if (comma) 1690 db_printf(", "); 1691 db_printf("NEED"); 1692 } 1693 db_printf("}"); 1694 } 1695 db_printf("\n"); 1696 1697 if (handlers) 1698 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 1699 db_dump_intrhand(ih); 1700 } 1701 1702 /* 1703 * Dump data about interrupt handlers 1704 */ 1705 DB_SHOW_COMMAND(intr, db_show_intr) 1706 { 1707 struct intr_event *ie; 1708 int all, verbose; 1709 1710 verbose = index(modif, 'v') != NULL; 1711 all = index(modif, 'a') != NULL; 1712 TAILQ_FOREACH(ie, &event_list, ie_list) { 1713 if (!all && TAILQ_EMPTY(&ie->ie_handlers)) 1714 continue; 1715 db_dump_intr_event(ie, verbose); 1716 if (db_pager_quit) 1717 break; 1718 } 1719 } 1720 #endif /* DDB */ 1721 1722 /* 1723 * Start standard software interrupt threads 1724 */ 1725 static void 1726 start_softintr(void *dummy) 1727 { 1728 1729 if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih)) 1730 panic("died while creating vm swi ithread"); 1731 } 1732 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, 1733 NULL); 1734 1735 /* 1736 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 1737 * The data for this machine dependent, and the declarations are in machine 1738 * dependent code. The layout of intrnames and intrcnt however is machine 1739 * independent. 1740 * 1741 * We do not know the length of intrcnt and intrnames at compile time, so 1742 * calculate things at run time. 1743 */ 1744 static int 1745 sysctl_intrnames(SYSCTL_HANDLER_ARGS) 1746 { 1747 return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames, 1748 req)); 1749 } 1750 1751 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, 1752 NULL, 0, sysctl_intrnames, "", "Interrupt Names"); 1753 1754 static int 1755 sysctl_intrcnt(SYSCTL_HANDLER_ARGS) 1756 { 1757 return (sysctl_handle_opaque(oidp, intrcnt, 1758 (char *)eintrcnt - (char *)intrcnt, req)); 1759 } 1760 1761 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, 1762 NULL, 0, sysctl_intrcnt, "", "Interrupt Counts"); 1763 1764 #ifdef DDB 1765 /* 1766 * DDB command to dump the interrupt statistics. 1767 */ 1768 DB_SHOW_COMMAND(intrcnt, db_show_intrcnt) 1769 { 1770 u_long *i; 1771 char *cp; 1772 1773 cp = intrnames; 1774 for (i = intrcnt; i != eintrcnt && !db_pager_quit; i++) { 1775 if (*cp == '\0') 1776 break; 1777 if (*i != 0) 1778 db_printf("%s\t%lu\n", cp, *i); 1779 cp += strlen(cp) + 1; 1780 } 1781 } 1782 #endif 1783