1 /*- 2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_ddb.h" 31 32 #include <sys/param.h> 33 #include <sys/bus.h> 34 #include <sys/conf.h> 35 #include <sys/cpuset.h> 36 #include <sys/rtprio.h> 37 #include <sys/systm.h> 38 #include <sys/interrupt.h> 39 #include <sys/kernel.h> 40 #include <sys/kthread.h> 41 #include <sys/ktr.h> 42 #include <sys/limits.h> 43 #include <sys/lock.h> 44 #include <sys/malloc.h> 45 #include <sys/mutex.h> 46 #include <sys/priv.h> 47 #include <sys/proc.h> 48 #include <sys/random.h> 49 #include <sys/resourcevar.h> 50 #include <sys/sched.h> 51 #include <sys/smp.h> 52 #include <sys/sysctl.h> 53 #include <sys/syslog.h> 54 #include <sys/unistd.h> 55 #include <sys/vmmeter.h> 56 #include <machine/atomic.h> 57 #include <machine/cpu.h> 58 #include <machine/md_var.h> 59 #include <machine/stdarg.h> 60 #ifdef DDB 61 #include <ddb/ddb.h> 62 #include <ddb/db_sym.h> 63 #endif 64 65 /* 66 * Describe an interrupt thread. There is one of these per interrupt event. 67 */ 68 struct intr_thread { 69 struct intr_event *it_event; 70 struct thread *it_thread; /* Kernel thread. */ 71 int it_flags; /* (j) IT_* flags. */ 72 int it_need; /* Needs service. */ 73 }; 74 75 /* Interrupt thread flags kept in it_flags */ 76 #define IT_DEAD 0x000001 /* Thread is waiting to exit. */ 77 78 struct intr_entropy { 79 struct thread *td; 80 uintptr_t event; 81 }; 82 83 struct intr_event *clk_intr_event; 84 struct intr_event *tty_intr_event; 85 void *vm_ih; 86 struct proc *intrproc; 87 88 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); 89 90 static int intr_storm_threshold = 1000; 91 TUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold); 92 SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW, 93 &intr_storm_threshold, 0, 94 "Number of consecutive interrupts before storm protection is enabled"); 95 static TAILQ_HEAD(, intr_event) event_list = 96 TAILQ_HEAD_INITIALIZER(event_list); 97 static struct mtx event_lock; 98 MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF); 99 100 static void intr_event_update(struct intr_event *ie); 101 #ifdef INTR_FILTER 102 static int intr_event_schedule_thread(struct intr_event *ie, 103 struct intr_thread *ithd); 104 static int intr_filter_loop(struct intr_event *ie, 105 struct trapframe *frame, struct intr_thread **ithd); 106 static struct intr_thread *ithread_create(const char *name, 107 struct intr_handler *ih); 108 #else 109 static int intr_event_schedule_thread(struct intr_event *ie); 110 static struct intr_thread *ithread_create(const char *name); 111 #endif 112 static void ithread_destroy(struct intr_thread *ithread); 113 static void ithread_execute_handlers(struct proc *p, 114 struct intr_event *ie); 115 #ifdef INTR_FILTER 116 static void priv_ithread_execute_handler(struct proc *p, 117 struct intr_handler *ih); 118 #endif 119 static void ithread_loop(void *); 120 static void ithread_update(struct intr_thread *ithd); 121 static void start_softintr(void *); 122 123 /* Map an interrupt type to an ithread priority. */ 124 u_char 125 intr_priority(enum intr_type flags) 126 { 127 u_char pri; 128 129 flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET | 130 INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV); 131 switch (flags) { 132 case INTR_TYPE_TTY: 133 pri = PI_TTY; 134 break; 135 case INTR_TYPE_BIO: 136 pri = PI_DISK; 137 break; 138 case INTR_TYPE_NET: 139 pri = PI_NET; 140 break; 141 case INTR_TYPE_CAM: 142 pri = PI_DISK; 143 break; 144 case INTR_TYPE_AV: 145 pri = PI_AV; 146 break; 147 case INTR_TYPE_CLK: 148 pri = PI_REALTIME; 149 break; 150 case INTR_TYPE_MISC: 151 pri = PI_DULL; /* don't care */ 152 break; 153 default: 154 /* We didn't specify an interrupt level. */ 155 panic("intr_priority: no interrupt type in flags"); 156 } 157 158 return pri; 159 } 160 161 /* 162 * Update an ithread based on the associated intr_event. 163 */ 164 static void 165 ithread_update(struct intr_thread *ithd) 166 { 167 struct intr_event *ie; 168 struct thread *td; 169 u_char pri; 170 171 ie = ithd->it_event; 172 td = ithd->it_thread; 173 174 /* Determine the overall priority of this event. */ 175 if (TAILQ_EMPTY(&ie->ie_handlers)) 176 pri = PRI_MAX_ITHD; 177 else 178 pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri; 179 180 /* Update name and priority. */ 181 strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name)); 182 thread_lock(td); 183 sched_prio(td, pri); 184 thread_unlock(td); 185 } 186 187 /* 188 * Regenerate the full name of an interrupt event and update its priority. 189 */ 190 static void 191 intr_event_update(struct intr_event *ie) 192 { 193 struct intr_handler *ih; 194 char *last; 195 int missed, space; 196 197 /* Start off with no entropy and just the name of the event. */ 198 mtx_assert(&ie->ie_lock, MA_OWNED); 199 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 200 ie->ie_flags &= ~IE_ENTROPY; 201 missed = 0; 202 space = 1; 203 204 /* Run through all the handlers updating values. */ 205 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 206 if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < 207 sizeof(ie->ie_fullname)) { 208 strcat(ie->ie_fullname, " "); 209 strcat(ie->ie_fullname, ih->ih_name); 210 space = 0; 211 } else 212 missed++; 213 if (ih->ih_flags & IH_ENTROPY) 214 ie->ie_flags |= IE_ENTROPY; 215 } 216 217 /* 218 * If the handler names were too long, add +'s to indicate missing 219 * names. If we run out of room and still have +'s to add, change 220 * the last character from a + to a *. 221 */ 222 last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2]; 223 while (missed-- > 0) { 224 if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) { 225 if (*last == '+') { 226 *last = '*'; 227 break; 228 } else 229 *last = '+'; 230 } else if (space) { 231 strcat(ie->ie_fullname, " +"); 232 space = 0; 233 } else 234 strcat(ie->ie_fullname, "+"); 235 } 236 237 /* 238 * If this event has an ithread, update it's priority and 239 * name. 240 */ 241 if (ie->ie_thread != NULL) 242 ithread_update(ie->ie_thread); 243 CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname); 244 } 245 246 int 247 intr_event_create(struct intr_event **event, void *source, int flags, int irq, 248 void (*pre_ithread)(void *), void (*post_ithread)(void *), 249 void (*post_filter)(void *), int (*assign_cpu)(void *, u_char), 250 const char *fmt, ...) 251 { 252 struct intr_event *ie; 253 va_list ap; 254 255 /* The only valid flag during creation is IE_SOFT. */ 256 if ((flags & ~IE_SOFT) != 0) 257 return (EINVAL); 258 ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 259 ie->ie_source = source; 260 ie->ie_pre_ithread = pre_ithread; 261 ie->ie_post_ithread = post_ithread; 262 ie->ie_post_filter = post_filter; 263 ie->ie_assign_cpu = assign_cpu; 264 ie->ie_flags = flags; 265 ie->ie_irq = irq; 266 ie->ie_cpu = NOCPU; 267 TAILQ_INIT(&ie->ie_handlers); 268 mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 269 270 va_start(ap, fmt); 271 vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 272 va_end(ap); 273 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 274 mtx_lock(&event_lock); 275 TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 276 mtx_unlock(&event_lock); 277 if (event != NULL) 278 *event = ie; 279 CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 280 return (0); 281 } 282 283 /* 284 * Bind an interrupt event to the specified CPU. Note that not all 285 * platforms support binding an interrupt to a CPU. For those 286 * platforms this request will fail. For supported platforms, any 287 * associated ithreads as well as the primary interrupt context will 288 * be bound to the specificed CPU. Using a cpu id of NOCPU unbinds 289 * the interrupt event. 290 */ 291 int 292 intr_event_bind(struct intr_event *ie, u_char cpu) 293 { 294 cpuset_t mask; 295 lwpid_t id; 296 int error; 297 298 /* Need a CPU to bind to. */ 299 if (cpu != NOCPU && CPU_ABSENT(cpu)) 300 return (EINVAL); 301 302 if (ie->ie_assign_cpu == NULL) 303 return (EOPNOTSUPP); 304 305 error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR); 306 if (error) 307 return (error); 308 309 /* 310 * If we have any ithreads try to set their mask first to verify 311 * permissions, etc. 312 */ 313 mtx_lock(&ie->ie_lock); 314 if (ie->ie_thread != NULL) { 315 CPU_ZERO(&mask); 316 if (cpu == NOCPU) 317 CPU_COPY(cpuset_root, &mask); 318 else 319 CPU_SET(cpu, &mask); 320 id = ie->ie_thread->it_thread->td_tid; 321 mtx_unlock(&ie->ie_lock); 322 error = cpuset_setthread(id, &mask); 323 if (error) 324 return (error); 325 } else 326 mtx_unlock(&ie->ie_lock); 327 error = ie->ie_assign_cpu(ie->ie_source, cpu); 328 if (error) { 329 mtx_lock(&ie->ie_lock); 330 if (ie->ie_thread != NULL) { 331 CPU_ZERO(&mask); 332 if (ie->ie_cpu == NOCPU) 333 CPU_COPY(cpuset_root, &mask); 334 else 335 CPU_SET(cpu, &mask); 336 id = ie->ie_thread->it_thread->td_tid; 337 mtx_unlock(&ie->ie_lock); 338 (void)cpuset_setthread(id, &mask); 339 } else 340 mtx_unlock(&ie->ie_lock); 341 return (error); 342 } 343 344 mtx_lock(&ie->ie_lock); 345 ie->ie_cpu = cpu; 346 mtx_unlock(&ie->ie_lock); 347 348 return (error); 349 } 350 351 static struct intr_event * 352 intr_lookup(int irq) 353 { 354 struct intr_event *ie; 355 356 mtx_lock(&event_lock); 357 TAILQ_FOREACH(ie, &event_list, ie_list) 358 if (ie->ie_irq == irq && 359 (ie->ie_flags & IE_SOFT) == 0 && 360 TAILQ_FIRST(&ie->ie_handlers) != NULL) 361 break; 362 mtx_unlock(&event_lock); 363 return (ie); 364 } 365 366 int 367 intr_setaffinity(int irq, void *m) 368 { 369 struct intr_event *ie; 370 cpuset_t *mask; 371 u_char cpu; 372 int n; 373 374 mask = m; 375 cpu = NOCPU; 376 /* 377 * If we're setting all cpus we can unbind. Otherwise make sure 378 * only one cpu is in the set. 379 */ 380 if (CPU_CMP(cpuset_root, mask)) { 381 for (n = 0; n < CPU_SETSIZE; n++) { 382 if (!CPU_ISSET(n, mask)) 383 continue; 384 if (cpu != NOCPU) 385 return (EINVAL); 386 cpu = (u_char)n; 387 } 388 } 389 ie = intr_lookup(irq); 390 if (ie == NULL) 391 return (ESRCH); 392 return (intr_event_bind(ie, cpu)); 393 } 394 395 int 396 intr_getaffinity(int irq, void *m) 397 { 398 struct intr_event *ie; 399 cpuset_t *mask; 400 401 mask = m; 402 ie = intr_lookup(irq); 403 if (ie == NULL) 404 return (ESRCH); 405 CPU_ZERO(mask); 406 mtx_lock(&ie->ie_lock); 407 if (ie->ie_cpu == NOCPU) 408 CPU_COPY(cpuset_root, mask); 409 else 410 CPU_SET(ie->ie_cpu, mask); 411 mtx_unlock(&ie->ie_lock); 412 return (0); 413 } 414 415 int 416 intr_event_destroy(struct intr_event *ie) 417 { 418 419 mtx_lock(&event_lock); 420 mtx_lock(&ie->ie_lock); 421 if (!TAILQ_EMPTY(&ie->ie_handlers)) { 422 mtx_unlock(&ie->ie_lock); 423 mtx_unlock(&event_lock); 424 return (EBUSY); 425 } 426 TAILQ_REMOVE(&event_list, ie, ie_list); 427 #ifndef notyet 428 if (ie->ie_thread != NULL) { 429 ithread_destroy(ie->ie_thread); 430 ie->ie_thread = NULL; 431 } 432 #endif 433 mtx_unlock(&ie->ie_lock); 434 mtx_unlock(&event_lock); 435 mtx_destroy(&ie->ie_lock); 436 free(ie, M_ITHREAD); 437 return (0); 438 } 439 440 #ifndef INTR_FILTER 441 static struct intr_thread * 442 ithread_create(const char *name) 443 { 444 struct intr_thread *ithd; 445 struct thread *td; 446 int error; 447 448 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 449 450 error = kproc_kthread_add(ithread_loop, ithd, &intrproc, 451 &td, RFSTOPPED | RFHIGHPID, 452 0, "intr", "%s", name); 453 if (error) 454 panic("kproc_create() failed with %d", error); 455 thread_lock(td); 456 sched_class(td, PRI_ITHD); 457 TD_SET_IWAIT(td); 458 thread_unlock(td); 459 td->td_pflags |= TDP_ITHREAD; 460 ithd->it_thread = td; 461 CTR2(KTR_INTR, "%s: created %s", __func__, name); 462 return (ithd); 463 } 464 #else 465 static struct intr_thread * 466 ithread_create(const char *name, struct intr_handler *ih) 467 { 468 struct intr_thread *ithd; 469 struct thread *td; 470 int error; 471 472 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 473 474 error = kproc_kthread_add(ithread_loop, ih, &intrproc, 475 &td, RFSTOPPED | RFHIGHPID, 476 0, "intr", "%s", name); 477 if (error) 478 panic("kproc_create() failed with %d", error); 479 thread_lock(td); 480 sched_class(td, PRI_ITHD); 481 TD_SET_IWAIT(td); 482 thread_unlock(td); 483 td->td_pflags |= TDP_ITHREAD; 484 ithd->it_thread = td; 485 CTR2(KTR_INTR, "%s: created %s", __func__, name); 486 return (ithd); 487 } 488 #endif 489 490 static void 491 ithread_destroy(struct intr_thread *ithread) 492 { 493 struct thread *td; 494 495 CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name); 496 td = ithread->it_thread; 497 thread_lock(td); 498 ithread->it_flags |= IT_DEAD; 499 if (TD_AWAITING_INTR(td)) { 500 TD_CLR_IWAIT(td); 501 sched_add(td, SRQ_INTR); 502 } 503 thread_unlock(td); 504 } 505 506 #ifndef INTR_FILTER 507 int 508 intr_event_add_handler(struct intr_event *ie, const char *name, 509 driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 510 enum intr_type flags, void **cookiep) 511 { 512 struct intr_handler *ih, *temp_ih; 513 struct intr_thread *it; 514 515 if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 516 return (EINVAL); 517 518 /* Allocate and populate an interrupt handler structure. */ 519 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 520 ih->ih_filter = filter; 521 ih->ih_handler = handler; 522 ih->ih_argument = arg; 523 strlcpy(ih->ih_name, name, sizeof(ih->ih_name)); 524 ih->ih_event = ie; 525 ih->ih_pri = pri; 526 if (flags & INTR_EXCL) 527 ih->ih_flags = IH_EXCLUSIVE; 528 if (flags & INTR_MPSAFE) 529 ih->ih_flags |= IH_MPSAFE; 530 if (flags & INTR_ENTROPY) 531 ih->ih_flags |= IH_ENTROPY; 532 533 /* We can only have one exclusive handler in a event. */ 534 mtx_lock(&ie->ie_lock); 535 if (!TAILQ_EMPTY(&ie->ie_handlers)) { 536 if ((flags & INTR_EXCL) || 537 (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 538 mtx_unlock(&ie->ie_lock); 539 free(ih, M_ITHREAD); 540 return (EINVAL); 541 } 542 } 543 544 /* Add the new handler to the event in priority order. */ 545 TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 546 if (temp_ih->ih_pri > ih->ih_pri) 547 break; 548 } 549 if (temp_ih == NULL) 550 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 551 else 552 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 553 intr_event_update(ie); 554 555 /* Create a thread if we need one. */ 556 while (ie->ie_thread == NULL && handler != NULL) { 557 if (ie->ie_flags & IE_ADDING_THREAD) 558 msleep(ie, &ie->ie_lock, 0, "ithread", 0); 559 else { 560 ie->ie_flags |= IE_ADDING_THREAD; 561 mtx_unlock(&ie->ie_lock); 562 it = ithread_create("intr: newborn"); 563 mtx_lock(&ie->ie_lock); 564 ie->ie_flags &= ~IE_ADDING_THREAD; 565 ie->ie_thread = it; 566 it->it_event = ie; 567 ithread_update(it); 568 wakeup(ie); 569 } 570 } 571 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 572 ie->ie_name); 573 mtx_unlock(&ie->ie_lock); 574 575 if (cookiep != NULL) 576 *cookiep = ih; 577 return (0); 578 } 579 #else 580 int 581 intr_event_add_handler(struct intr_event *ie, const char *name, 582 driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 583 enum intr_type flags, void **cookiep) 584 { 585 struct intr_handler *ih, *temp_ih; 586 struct intr_thread *it; 587 588 if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 589 return (EINVAL); 590 591 /* Allocate and populate an interrupt handler structure. */ 592 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 593 ih->ih_filter = filter; 594 ih->ih_handler = handler; 595 ih->ih_argument = arg; 596 strlcpy(ih->ih_name, name, sizeof(ih->ih_name)); 597 ih->ih_event = ie; 598 ih->ih_pri = pri; 599 if (flags & INTR_EXCL) 600 ih->ih_flags = IH_EXCLUSIVE; 601 if (flags & INTR_MPSAFE) 602 ih->ih_flags |= IH_MPSAFE; 603 if (flags & INTR_ENTROPY) 604 ih->ih_flags |= IH_ENTROPY; 605 606 /* We can only have one exclusive handler in a event. */ 607 mtx_lock(&ie->ie_lock); 608 if (!TAILQ_EMPTY(&ie->ie_handlers)) { 609 if ((flags & INTR_EXCL) || 610 (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 611 mtx_unlock(&ie->ie_lock); 612 free(ih, M_ITHREAD); 613 return (EINVAL); 614 } 615 } 616 617 /* Add the new handler to the event in priority order. */ 618 TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 619 if (temp_ih->ih_pri > ih->ih_pri) 620 break; 621 } 622 if (temp_ih == NULL) 623 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 624 else 625 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 626 intr_event_update(ie); 627 628 /* For filtered handlers, create a private ithread to run on. */ 629 if (filter != NULL && handler != NULL) { 630 mtx_unlock(&ie->ie_lock); 631 it = ithread_create("intr: newborn", ih); 632 mtx_lock(&ie->ie_lock); 633 it->it_event = ie; 634 ih->ih_thread = it; 635 ithread_update(it); // XXX - do we really need this?!?!? 636 } else { /* Create the global per-event thread if we need one. */ 637 while (ie->ie_thread == NULL && handler != NULL) { 638 if (ie->ie_flags & IE_ADDING_THREAD) 639 msleep(ie, &ie->ie_lock, 0, "ithread", 0); 640 else { 641 ie->ie_flags |= IE_ADDING_THREAD; 642 mtx_unlock(&ie->ie_lock); 643 it = ithread_create("intr: newborn", ih); 644 mtx_lock(&ie->ie_lock); 645 ie->ie_flags &= ~IE_ADDING_THREAD; 646 ie->ie_thread = it; 647 it->it_event = ie; 648 ithread_update(it); 649 wakeup(ie); 650 } 651 } 652 } 653 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 654 ie->ie_name); 655 mtx_unlock(&ie->ie_lock); 656 657 if (cookiep != NULL) 658 *cookiep = ih; 659 return (0); 660 } 661 #endif 662 663 /* 664 * Append a description preceded by a ':' to the name of the specified 665 * interrupt handler. 666 */ 667 int 668 intr_event_describe_handler(struct intr_event *ie, void *cookie, 669 const char *descr) 670 { 671 struct intr_handler *ih; 672 size_t space; 673 char *start; 674 675 mtx_lock(&ie->ie_lock); 676 #ifdef INVARIANTS 677 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 678 if (ih == cookie) 679 break; 680 } 681 if (ih == NULL) { 682 mtx_unlock(&ie->ie_lock); 683 panic("handler %p not found in interrupt event %p", cookie, ie); 684 } 685 #endif 686 ih = cookie; 687 688 /* 689 * Look for an existing description by checking for an 690 * existing ":". This assumes device names do not include 691 * colons. If one is found, prepare to insert the new 692 * description at that point. If one is not found, find the 693 * end of the name to use as the insertion point. 694 */ 695 start = index(ih->ih_name, ':'); 696 if (start == NULL) 697 start = index(ih->ih_name, 0); 698 699 /* 700 * See if there is enough remaining room in the string for the 701 * description + ":". The "- 1" leaves room for the trailing 702 * '\0'. The "+ 1" accounts for the colon. 703 */ 704 space = sizeof(ih->ih_name) - (start - ih->ih_name) - 1; 705 if (strlen(descr) + 1 > space) { 706 mtx_unlock(&ie->ie_lock); 707 return (ENOSPC); 708 } 709 710 /* Append a colon followed by the description. */ 711 *start = ':'; 712 strcpy(start + 1, descr); 713 intr_event_update(ie); 714 mtx_unlock(&ie->ie_lock); 715 return (0); 716 } 717 718 /* 719 * Return the ie_source field from the intr_event an intr_handler is 720 * associated with. 721 */ 722 void * 723 intr_handler_source(void *cookie) 724 { 725 struct intr_handler *ih; 726 struct intr_event *ie; 727 728 ih = (struct intr_handler *)cookie; 729 if (ih == NULL) 730 return (NULL); 731 ie = ih->ih_event; 732 KASSERT(ie != NULL, 733 ("interrupt handler \"%s\" has a NULL interrupt event", 734 ih->ih_name)); 735 return (ie->ie_source); 736 } 737 738 #ifndef INTR_FILTER 739 int 740 intr_event_remove_handler(void *cookie) 741 { 742 struct intr_handler *handler = (struct intr_handler *)cookie; 743 struct intr_event *ie; 744 #ifdef INVARIANTS 745 struct intr_handler *ih; 746 #endif 747 #ifdef notyet 748 int dead; 749 #endif 750 751 if (handler == NULL) 752 return (EINVAL); 753 ie = handler->ih_event; 754 KASSERT(ie != NULL, 755 ("interrupt handler \"%s\" has a NULL interrupt event", 756 handler->ih_name)); 757 mtx_lock(&ie->ie_lock); 758 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 759 ie->ie_name); 760 #ifdef INVARIANTS 761 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 762 if (ih == handler) 763 goto ok; 764 mtx_unlock(&ie->ie_lock); 765 panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 766 ih->ih_name, ie->ie_name); 767 ok: 768 #endif 769 /* 770 * If there is no ithread, then just remove the handler and return. 771 * XXX: Note that an INTR_FAST handler might be running on another 772 * CPU! 773 */ 774 if (ie->ie_thread == NULL) { 775 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 776 mtx_unlock(&ie->ie_lock); 777 free(handler, M_ITHREAD); 778 return (0); 779 } 780 781 /* 782 * If the interrupt thread is already running, then just mark this 783 * handler as being dead and let the ithread do the actual removal. 784 * 785 * During a cold boot while cold is set, msleep() does not sleep, 786 * so we have to remove the handler here rather than letting the 787 * thread do it. 788 */ 789 thread_lock(ie->ie_thread->it_thread); 790 if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) { 791 handler->ih_flags |= IH_DEAD; 792 793 /* 794 * Ensure that the thread will process the handler list 795 * again and remove this handler if it has already passed 796 * it on the list. 797 */ 798 ie->ie_thread->it_need = 1; 799 } else 800 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 801 thread_unlock(ie->ie_thread->it_thread); 802 while (handler->ih_flags & IH_DEAD) 803 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 804 intr_event_update(ie); 805 #ifdef notyet 806 /* 807 * XXX: This could be bad in the case of ppbus(8). Also, I think 808 * this could lead to races of stale data when servicing an 809 * interrupt. 810 */ 811 dead = 1; 812 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 813 if (!(ih->ih_flags & IH_FAST)) { 814 dead = 0; 815 break; 816 } 817 } 818 if (dead) { 819 ithread_destroy(ie->ie_thread); 820 ie->ie_thread = NULL; 821 } 822 #endif 823 mtx_unlock(&ie->ie_lock); 824 free(handler, M_ITHREAD); 825 return (0); 826 } 827 828 static int 829 intr_event_schedule_thread(struct intr_event *ie) 830 { 831 struct intr_entropy entropy; 832 struct intr_thread *it; 833 struct thread *td; 834 struct thread *ctd; 835 struct proc *p; 836 837 /* 838 * If no ithread or no handlers, then we have a stray interrupt. 839 */ 840 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || 841 ie->ie_thread == NULL) 842 return (EINVAL); 843 844 ctd = curthread; 845 it = ie->ie_thread; 846 td = it->it_thread; 847 p = td->td_proc; 848 849 /* 850 * If any of the handlers for this ithread claim to be good 851 * sources of entropy, then gather some. 852 */ 853 if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 854 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 855 p->p_pid, td->td_name); 856 entropy.event = (uintptr_t)ie; 857 entropy.td = ctd; 858 random_harvest(&entropy, sizeof(entropy), 2, 0, 859 RANDOM_INTERRUPT); 860 } 861 862 KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 863 864 /* 865 * Set it_need to tell the thread to keep running if it is already 866 * running. Then, lock the thread and see if we actually need to 867 * put it on the runqueue. 868 */ 869 it->it_need = 1; 870 thread_lock(td); 871 if (TD_AWAITING_INTR(td)) { 872 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 873 td->td_name); 874 TD_CLR_IWAIT(td); 875 sched_add(td, SRQ_INTR); 876 } else { 877 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 878 __func__, p->p_pid, td->td_name, it->it_need, td->td_state); 879 } 880 thread_unlock(td); 881 882 return (0); 883 } 884 #else 885 int 886 intr_event_remove_handler(void *cookie) 887 { 888 struct intr_handler *handler = (struct intr_handler *)cookie; 889 struct intr_event *ie; 890 struct intr_thread *it; 891 #ifdef INVARIANTS 892 struct intr_handler *ih; 893 #endif 894 #ifdef notyet 895 int dead; 896 #endif 897 898 if (handler == NULL) 899 return (EINVAL); 900 ie = handler->ih_event; 901 KASSERT(ie != NULL, 902 ("interrupt handler \"%s\" has a NULL interrupt event", 903 handler->ih_name)); 904 mtx_lock(&ie->ie_lock); 905 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 906 ie->ie_name); 907 #ifdef INVARIANTS 908 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 909 if (ih == handler) 910 goto ok; 911 mtx_unlock(&ie->ie_lock); 912 panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 913 ih->ih_name, ie->ie_name); 914 ok: 915 #endif 916 /* 917 * If there are no ithreads (per event and per handler), then 918 * just remove the handler and return. 919 * XXX: Note that an INTR_FAST handler might be running on another CPU! 920 */ 921 if (ie->ie_thread == NULL && handler->ih_thread == NULL) { 922 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 923 mtx_unlock(&ie->ie_lock); 924 free(handler, M_ITHREAD); 925 return (0); 926 } 927 928 /* Private or global ithread? */ 929 it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread; 930 /* 931 * If the interrupt thread is already running, then just mark this 932 * handler as being dead and let the ithread do the actual removal. 933 * 934 * During a cold boot while cold is set, msleep() does not sleep, 935 * so we have to remove the handler here rather than letting the 936 * thread do it. 937 */ 938 thread_lock(it->it_thread); 939 if (!TD_AWAITING_INTR(it->it_thread) && !cold) { 940 handler->ih_flags |= IH_DEAD; 941 942 /* 943 * Ensure that the thread will process the handler list 944 * again and remove this handler if it has already passed 945 * it on the list. 946 */ 947 it->it_need = 1; 948 } else 949 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 950 thread_unlock(it->it_thread); 951 while (handler->ih_flags & IH_DEAD) 952 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 953 /* 954 * At this point, the handler has been disconnected from the event, 955 * so we can kill the private ithread if any. 956 */ 957 if (handler->ih_thread) { 958 ithread_destroy(handler->ih_thread); 959 handler->ih_thread = NULL; 960 } 961 intr_event_update(ie); 962 #ifdef notyet 963 /* 964 * XXX: This could be bad in the case of ppbus(8). Also, I think 965 * this could lead to races of stale data when servicing an 966 * interrupt. 967 */ 968 dead = 1; 969 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 970 if (handler != NULL) { 971 dead = 0; 972 break; 973 } 974 } 975 if (dead) { 976 ithread_destroy(ie->ie_thread); 977 ie->ie_thread = NULL; 978 } 979 #endif 980 mtx_unlock(&ie->ie_lock); 981 free(handler, M_ITHREAD); 982 return (0); 983 } 984 985 static int 986 intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it) 987 { 988 struct intr_entropy entropy; 989 struct thread *td; 990 struct thread *ctd; 991 struct proc *p; 992 993 /* 994 * If no ithread or no handlers, then we have a stray interrupt. 995 */ 996 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL) 997 return (EINVAL); 998 999 ctd = curthread; 1000 td = it->it_thread; 1001 p = td->td_proc; 1002 1003 /* 1004 * If any of the handlers for this ithread claim to be good 1005 * sources of entropy, then gather some. 1006 */ 1007 if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 1008 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 1009 p->p_pid, td->td_name); 1010 entropy.event = (uintptr_t)ie; 1011 entropy.td = ctd; 1012 random_harvest(&entropy, sizeof(entropy), 2, 0, 1013 RANDOM_INTERRUPT); 1014 } 1015 1016 KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 1017 1018 /* 1019 * Set it_need to tell the thread to keep running if it is already 1020 * running. Then, lock the thread and see if we actually need to 1021 * put it on the runqueue. 1022 */ 1023 it->it_need = 1; 1024 thread_lock(td); 1025 if (TD_AWAITING_INTR(td)) { 1026 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 1027 td->td_name); 1028 TD_CLR_IWAIT(td); 1029 sched_add(td, SRQ_INTR); 1030 } else { 1031 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 1032 __func__, p->p_pid, td->td_name, it->it_need, td->td_state); 1033 } 1034 thread_unlock(td); 1035 1036 return (0); 1037 } 1038 #endif 1039 1040 /* 1041 * Allow interrupt event binding for software interrupt handlers -- a no-op, 1042 * since interrupts are generated in software rather than being directed by 1043 * a PIC. 1044 */ 1045 static int 1046 swi_assign_cpu(void *arg, u_char cpu) 1047 { 1048 1049 return (0); 1050 } 1051 1052 /* 1053 * Add a software interrupt handler to a specified event. If a given event 1054 * is not specified, then a new event is created. 1055 */ 1056 int 1057 swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, 1058 void *arg, int pri, enum intr_type flags, void **cookiep) 1059 { 1060 struct thread *td; 1061 struct intr_event *ie; 1062 int error; 1063 1064 if (flags & INTR_ENTROPY) 1065 return (EINVAL); 1066 1067 ie = (eventp != NULL) ? *eventp : NULL; 1068 1069 if (ie != NULL) { 1070 if (!(ie->ie_flags & IE_SOFT)) 1071 return (EINVAL); 1072 } else { 1073 error = intr_event_create(&ie, NULL, IE_SOFT, 0, 1074 NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri); 1075 if (error) 1076 return (error); 1077 if (eventp != NULL) 1078 *eventp = ie; 1079 } 1080 error = intr_event_add_handler(ie, name, NULL, handler, arg, 1081 PI_SWI(pri), flags, cookiep); 1082 if (error) 1083 return (error); 1084 if (pri == SWI_CLOCK) { 1085 td = ie->ie_thread->it_thread; 1086 thread_lock(td); 1087 td->td_flags |= TDF_NOLOAD; 1088 thread_unlock(td); 1089 } 1090 return (0); 1091 } 1092 1093 /* 1094 * Schedule a software interrupt thread. 1095 */ 1096 void 1097 swi_sched(void *cookie, int flags) 1098 { 1099 struct intr_handler *ih = (struct intr_handler *)cookie; 1100 struct intr_event *ie = ih->ih_event; 1101 int error; 1102 1103 CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, 1104 ih->ih_need); 1105 1106 /* 1107 * Set ih_need for this handler so that if the ithread is already 1108 * running it will execute this handler on the next pass. Otherwise, 1109 * it will execute it the next time it runs. 1110 */ 1111 atomic_store_rel_int(&ih->ih_need, 1); 1112 1113 if (!(flags & SWI_DELAY)) { 1114 PCPU_INC(cnt.v_soft); 1115 #ifdef INTR_FILTER 1116 error = intr_event_schedule_thread(ie, ie->ie_thread); 1117 #else 1118 error = intr_event_schedule_thread(ie); 1119 #endif 1120 KASSERT(error == 0, ("stray software interrupt")); 1121 } 1122 } 1123 1124 /* 1125 * Remove a software interrupt handler. Currently this code does not 1126 * remove the associated interrupt event if it becomes empty. Calling code 1127 * may do so manually via intr_event_destroy(), but that's not really 1128 * an optimal interface. 1129 */ 1130 int 1131 swi_remove(void *cookie) 1132 { 1133 1134 return (intr_event_remove_handler(cookie)); 1135 } 1136 1137 #ifdef INTR_FILTER 1138 static void 1139 priv_ithread_execute_handler(struct proc *p, struct intr_handler *ih) 1140 { 1141 struct intr_event *ie; 1142 1143 ie = ih->ih_event; 1144 /* 1145 * If this handler is marked for death, remove it from 1146 * the list of handlers and wake up the sleeper. 1147 */ 1148 if (ih->ih_flags & IH_DEAD) { 1149 mtx_lock(&ie->ie_lock); 1150 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 1151 ih->ih_flags &= ~IH_DEAD; 1152 wakeup(ih); 1153 mtx_unlock(&ie->ie_lock); 1154 return; 1155 } 1156 1157 /* Execute this handler. */ 1158 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1159 __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument, 1160 ih->ih_name, ih->ih_flags); 1161 1162 if (!(ih->ih_flags & IH_MPSAFE)) 1163 mtx_lock(&Giant); 1164 ih->ih_handler(ih->ih_argument); 1165 if (!(ih->ih_flags & IH_MPSAFE)) 1166 mtx_unlock(&Giant); 1167 } 1168 #endif 1169 1170 /* 1171 * This is a public function for use by drivers that mux interrupt 1172 * handlers for child devices from their interrupt handler. 1173 */ 1174 void 1175 intr_event_execute_handlers(struct proc *p, struct intr_event *ie) 1176 { 1177 struct intr_handler *ih, *ihn; 1178 1179 TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) { 1180 /* 1181 * If this handler is marked for death, remove it from 1182 * the list of handlers and wake up the sleeper. 1183 */ 1184 if (ih->ih_flags & IH_DEAD) { 1185 mtx_lock(&ie->ie_lock); 1186 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 1187 ih->ih_flags &= ~IH_DEAD; 1188 wakeup(ih); 1189 mtx_unlock(&ie->ie_lock); 1190 continue; 1191 } 1192 1193 /* Skip filter only handlers */ 1194 if (ih->ih_handler == NULL) 1195 continue; 1196 1197 /* 1198 * For software interrupt threads, we only execute 1199 * handlers that have their need flag set. Hardware 1200 * interrupt threads always invoke all of their handlers. 1201 */ 1202 if (ie->ie_flags & IE_SOFT) { 1203 if (!ih->ih_need) 1204 continue; 1205 else 1206 atomic_store_rel_int(&ih->ih_need, 0); 1207 } 1208 1209 /* Execute this handler. */ 1210 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1211 __func__, p->p_pid, (void *)ih->ih_handler, 1212 ih->ih_argument, ih->ih_name, ih->ih_flags); 1213 1214 if (!(ih->ih_flags & IH_MPSAFE)) 1215 mtx_lock(&Giant); 1216 ih->ih_handler(ih->ih_argument); 1217 if (!(ih->ih_flags & IH_MPSAFE)) 1218 mtx_unlock(&Giant); 1219 } 1220 } 1221 1222 static void 1223 ithread_execute_handlers(struct proc *p, struct intr_event *ie) 1224 { 1225 1226 /* Interrupt handlers should not sleep. */ 1227 if (!(ie->ie_flags & IE_SOFT)) 1228 THREAD_NO_SLEEPING(); 1229 intr_event_execute_handlers(p, ie); 1230 if (!(ie->ie_flags & IE_SOFT)) 1231 THREAD_SLEEPING_OK(); 1232 1233 /* 1234 * Interrupt storm handling: 1235 * 1236 * If this interrupt source is currently storming, then throttle 1237 * it to only fire the handler once per clock tick. 1238 * 1239 * If this interrupt source is not currently storming, but the 1240 * number of back to back interrupts exceeds the storm threshold, 1241 * then enter storming mode. 1242 */ 1243 if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold && 1244 !(ie->ie_flags & IE_SOFT)) { 1245 /* Report the message only once every second. */ 1246 if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) { 1247 printf( 1248 "interrupt storm detected on \"%s\"; throttling interrupt source\n", 1249 ie->ie_name); 1250 } 1251 pause("istorm", 1); 1252 } else 1253 ie->ie_count++; 1254 1255 /* 1256 * Now that all the handlers have had a chance to run, reenable 1257 * the interrupt source. 1258 */ 1259 if (ie->ie_post_ithread != NULL) 1260 ie->ie_post_ithread(ie->ie_source); 1261 } 1262 1263 #ifndef INTR_FILTER 1264 /* 1265 * This is the main code for interrupt threads. 1266 */ 1267 static void 1268 ithread_loop(void *arg) 1269 { 1270 struct intr_thread *ithd; 1271 struct intr_event *ie; 1272 struct thread *td; 1273 struct proc *p; 1274 1275 td = curthread; 1276 p = td->td_proc; 1277 ithd = (struct intr_thread *)arg; 1278 KASSERT(ithd->it_thread == td, 1279 ("%s: ithread and proc linkage out of sync", __func__)); 1280 ie = ithd->it_event; 1281 ie->ie_count = 0; 1282 1283 /* 1284 * As long as we have interrupts outstanding, go through the 1285 * list of handlers, giving each one a go at it. 1286 */ 1287 for (;;) { 1288 /* 1289 * If we are an orphaned thread, then just die. 1290 */ 1291 if (ithd->it_flags & IT_DEAD) { 1292 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 1293 p->p_pid, td->td_name); 1294 free(ithd, M_ITHREAD); 1295 kthread_exit(); 1296 } 1297 1298 /* 1299 * Service interrupts. If another interrupt arrives while 1300 * we are running, it will set it_need to note that we 1301 * should make another pass. 1302 */ 1303 while (ithd->it_need) { 1304 /* 1305 * This might need a full read and write barrier 1306 * to make sure that this write posts before any 1307 * of the memory or device accesses in the 1308 * handlers. 1309 */ 1310 atomic_store_rel_int(&ithd->it_need, 0); 1311 ithread_execute_handlers(p, ie); 1312 } 1313 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1314 mtx_assert(&Giant, MA_NOTOWNED); 1315 1316 /* 1317 * Processed all our interrupts. Now get the sched 1318 * lock. This may take a while and it_need may get 1319 * set again, so we have to check it again. 1320 */ 1321 thread_lock(td); 1322 if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { 1323 TD_SET_IWAIT(td); 1324 ie->ie_count = 0; 1325 mi_switch(SW_VOL | SWT_IWAIT, NULL); 1326 } 1327 thread_unlock(td); 1328 } 1329 } 1330 1331 /* 1332 * Main interrupt handling body. 1333 * 1334 * Input: 1335 * o ie: the event connected to this interrupt. 1336 * o frame: some archs (i.e. i386) pass a frame to some. 1337 * handlers as their main argument. 1338 * Return value: 1339 * o 0: everything ok. 1340 * o EINVAL: stray interrupt. 1341 */ 1342 int 1343 intr_event_handle(struct intr_event *ie, struct trapframe *frame) 1344 { 1345 struct intr_handler *ih; 1346 struct trapframe *oldframe; 1347 struct thread *td; 1348 int error, ret, thread; 1349 1350 td = curthread; 1351 1352 /* An interrupt with no event or handlers is a stray interrupt. */ 1353 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) 1354 return (EINVAL); 1355 1356 /* 1357 * Execute fast interrupt handlers directly. 1358 * To support clock handlers, if a handler registers 1359 * with a NULL argument, then we pass it a pointer to 1360 * a trapframe as its argument. 1361 */ 1362 td->td_intr_nesting_level++; 1363 thread = 0; 1364 ret = 0; 1365 critical_enter(); 1366 oldframe = td->td_intr_frame; 1367 td->td_intr_frame = frame; 1368 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1369 if (ih->ih_filter == NULL) { 1370 thread = 1; 1371 continue; 1372 } 1373 CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__, 1374 ih->ih_filter, ih->ih_argument == NULL ? frame : 1375 ih->ih_argument, ih->ih_name); 1376 if (ih->ih_argument == NULL) 1377 ret = ih->ih_filter(frame); 1378 else 1379 ret = ih->ih_filter(ih->ih_argument); 1380 KASSERT(ret == FILTER_STRAY || 1381 ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 && 1382 (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0), 1383 ("%s: incorrect return value %#x from %s", __func__, ret, 1384 ih->ih_name)); 1385 1386 /* 1387 * Wrapper handler special handling: 1388 * 1389 * in some particular cases (like pccard and pccbb), 1390 * the _real_ device handler is wrapped in a couple of 1391 * functions - a filter wrapper and an ithread wrapper. 1392 * In this case (and just in this case), the filter wrapper 1393 * could ask the system to schedule the ithread and mask 1394 * the interrupt source if the wrapped handler is composed 1395 * of just an ithread handler. 1396 * 1397 * TODO: write a generic wrapper to avoid people rolling 1398 * their own 1399 */ 1400 if (!thread) { 1401 if (ret == FILTER_SCHEDULE_THREAD) 1402 thread = 1; 1403 } 1404 } 1405 td->td_intr_frame = oldframe; 1406 1407 if (thread) { 1408 if (ie->ie_pre_ithread != NULL) 1409 ie->ie_pre_ithread(ie->ie_source); 1410 } else { 1411 if (ie->ie_post_filter != NULL) 1412 ie->ie_post_filter(ie->ie_source); 1413 } 1414 1415 /* Schedule the ithread if needed. */ 1416 if (thread) { 1417 error = intr_event_schedule_thread(ie); 1418 #ifndef XEN 1419 KASSERT(error == 0, ("bad stray interrupt")); 1420 #else 1421 if (error != 0) 1422 log(LOG_WARNING, "bad stray interrupt"); 1423 #endif 1424 } 1425 critical_exit(); 1426 td->td_intr_nesting_level--; 1427 return (0); 1428 } 1429 #else 1430 /* 1431 * This is the main code for interrupt threads. 1432 */ 1433 static void 1434 ithread_loop(void *arg) 1435 { 1436 struct intr_thread *ithd; 1437 struct intr_handler *ih; 1438 struct intr_event *ie; 1439 struct thread *td; 1440 struct proc *p; 1441 int priv; 1442 1443 td = curthread; 1444 p = td->td_proc; 1445 ih = (struct intr_handler *)arg; 1446 priv = (ih->ih_thread != NULL) ? 1 : 0; 1447 ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread; 1448 KASSERT(ithd->it_thread == td, 1449 ("%s: ithread and proc linkage out of sync", __func__)); 1450 ie = ithd->it_event; 1451 ie->ie_count = 0; 1452 1453 /* 1454 * As long as we have interrupts outstanding, go through the 1455 * list of handlers, giving each one a go at it. 1456 */ 1457 for (;;) { 1458 /* 1459 * If we are an orphaned thread, then just die. 1460 */ 1461 if (ithd->it_flags & IT_DEAD) { 1462 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 1463 p->p_pid, td->td_name); 1464 free(ithd, M_ITHREAD); 1465 kthread_exit(); 1466 } 1467 1468 /* 1469 * Service interrupts. If another interrupt arrives while 1470 * we are running, it will set it_need to note that we 1471 * should make another pass. 1472 */ 1473 while (ithd->it_need) { 1474 /* 1475 * This might need a full read and write barrier 1476 * to make sure that this write posts before any 1477 * of the memory or device accesses in the 1478 * handlers. 1479 */ 1480 atomic_store_rel_int(&ithd->it_need, 0); 1481 if (priv) 1482 priv_ithread_execute_handler(p, ih); 1483 else 1484 ithread_execute_handlers(p, ie); 1485 } 1486 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1487 mtx_assert(&Giant, MA_NOTOWNED); 1488 1489 /* 1490 * Processed all our interrupts. Now get the sched 1491 * lock. This may take a while and it_need may get 1492 * set again, so we have to check it again. 1493 */ 1494 thread_lock(td); 1495 if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { 1496 TD_SET_IWAIT(td); 1497 ie->ie_count = 0; 1498 mi_switch(SW_VOL | SWT_IWAIT, NULL); 1499 } 1500 thread_unlock(td); 1501 } 1502 } 1503 1504 /* 1505 * Main loop for interrupt filter. 1506 * 1507 * Some architectures (i386, amd64 and arm) require the optional frame 1508 * parameter, and use it as the main argument for fast handler execution 1509 * when ih_argument == NULL. 1510 * 1511 * Return value: 1512 * o FILTER_STRAY: No filter recognized the event, and no 1513 * filter-less handler is registered on this 1514 * line. 1515 * o FILTER_HANDLED: A filter claimed the event and served it. 1516 * o FILTER_SCHEDULE_THREAD: No filter claimed the event, but there's at 1517 * least one filter-less handler on this line. 1518 * o FILTER_HANDLED | 1519 * FILTER_SCHEDULE_THREAD: A filter claimed the event, and asked for 1520 * scheduling the per-handler ithread. 1521 * 1522 * In case an ithread has to be scheduled, in *ithd there will be a 1523 * pointer to a struct intr_thread containing the thread to be 1524 * scheduled. 1525 */ 1526 1527 static int 1528 intr_filter_loop(struct intr_event *ie, struct trapframe *frame, 1529 struct intr_thread **ithd) 1530 { 1531 struct intr_handler *ih; 1532 void *arg; 1533 int ret, thread_only; 1534 1535 ret = 0; 1536 thread_only = 0; 1537 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1538 /* 1539 * Execute fast interrupt handlers directly. 1540 * To support clock handlers, if a handler registers 1541 * with a NULL argument, then we pass it a pointer to 1542 * a trapframe as its argument. 1543 */ 1544 arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument); 1545 1546 CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__, 1547 ih->ih_filter, ih->ih_handler, arg, ih->ih_name); 1548 1549 if (ih->ih_filter != NULL) 1550 ret = ih->ih_filter(arg); 1551 else { 1552 thread_only = 1; 1553 continue; 1554 } 1555 KASSERT(ret == FILTER_STRAY || 1556 ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 && 1557 (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0), 1558 ("%s: incorrect return value %#x from %s", __func__, ret, 1559 ih->ih_name)); 1560 if (ret & FILTER_STRAY) 1561 continue; 1562 else { 1563 *ithd = ih->ih_thread; 1564 return (ret); 1565 } 1566 } 1567 1568 /* 1569 * No filters handled the interrupt and we have at least 1570 * one handler without a filter. In this case, we schedule 1571 * all of the filter-less handlers to run in the ithread. 1572 */ 1573 if (thread_only) { 1574 *ithd = ie->ie_thread; 1575 return (FILTER_SCHEDULE_THREAD); 1576 } 1577 return (FILTER_STRAY); 1578 } 1579 1580 /* 1581 * Main interrupt handling body. 1582 * 1583 * Input: 1584 * o ie: the event connected to this interrupt. 1585 * o frame: some archs (i.e. i386) pass a frame to some. 1586 * handlers as their main argument. 1587 * Return value: 1588 * o 0: everything ok. 1589 * o EINVAL: stray interrupt. 1590 */ 1591 int 1592 intr_event_handle(struct intr_event *ie, struct trapframe *frame) 1593 { 1594 struct intr_thread *ithd; 1595 struct trapframe *oldframe; 1596 struct thread *td; 1597 int thread; 1598 1599 ithd = NULL; 1600 td = curthread; 1601 1602 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) 1603 return (EINVAL); 1604 1605 td->td_intr_nesting_level++; 1606 thread = 0; 1607 critical_enter(); 1608 oldframe = td->td_intr_frame; 1609 td->td_intr_frame = frame; 1610 thread = intr_filter_loop(ie, frame, &ithd); 1611 if (thread & FILTER_HANDLED) { 1612 if (ie->ie_post_filter != NULL) 1613 ie->ie_post_filter(ie->ie_source); 1614 } else { 1615 if (ie->ie_pre_ithread != NULL) 1616 ie->ie_pre_ithread(ie->ie_source); 1617 } 1618 td->td_intr_frame = oldframe; 1619 critical_exit(); 1620 1621 /* Interrupt storm logic */ 1622 if (thread & FILTER_STRAY) { 1623 ie->ie_count++; 1624 if (ie->ie_count < intr_storm_threshold) 1625 printf("Interrupt stray detection not present\n"); 1626 } 1627 1628 /* Schedule an ithread if needed. */ 1629 if (thread & FILTER_SCHEDULE_THREAD) { 1630 if (intr_event_schedule_thread(ie, ithd) != 0) 1631 panic("%s: impossible stray interrupt", __func__); 1632 } 1633 td->td_intr_nesting_level--; 1634 return (0); 1635 } 1636 #endif 1637 1638 #ifdef DDB 1639 /* 1640 * Dump details about an interrupt handler 1641 */ 1642 static void 1643 db_dump_intrhand(struct intr_handler *ih) 1644 { 1645 int comma; 1646 1647 db_printf("\t%-10s ", ih->ih_name); 1648 switch (ih->ih_pri) { 1649 case PI_REALTIME: 1650 db_printf("CLK "); 1651 break; 1652 case PI_AV: 1653 db_printf("AV "); 1654 break; 1655 case PI_TTY: 1656 db_printf("TTY "); 1657 break; 1658 case PI_NET: 1659 db_printf("NET "); 1660 break; 1661 case PI_DISK: 1662 db_printf("DISK"); 1663 break; 1664 case PI_DULL: 1665 db_printf("DULL"); 1666 break; 1667 default: 1668 if (ih->ih_pri >= PI_SOFT) 1669 db_printf("SWI "); 1670 else 1671 db_printf("%4u", ih->ih_pri); 1672 break; 1673 } 1674 db_printf(" "); 1675 db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC); 1676 db_printf("(%p)", ih->ih_argument); 1677 if (ih->ih_need || 1678 (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD | 1679 IH_MPSAFE)) != 0) { 1680 db_printf(" {"); 1681 comma = 0; 1682 if (ih->ih_flags & IH_EXCLUSIVE) { 1683 if (comma) 1684 db_printf(", "); 1685 db_printf("EXCL"); 1686 comma = 1; 1687 } 1688 if (ih->ih_flags & IH_ENTROPY) { 1689 if (comma) 1690 db_printf(", "); 1691 db_printf("ENTROPY"); 1692 comma = 1; 1693 } 1694 if (ih->ih_flags & IH_DEAD) { 1695 if (comma) 1696 db_printf(", "); 1697 db_printf("DEAD"); 1698 comma = 1; 1699 } 1700 if (ih->ih_flags & IH_MPSAFE) { 1701 if (comma) 1702 db_printf(", "); 1703 db_printf("MPSAFE"); 1704 comma = 1; 1705 } 1706 if (ih->ih_need) { 1707 if (comma) 1708 db_printf(", "); 1709 db_printf("NEED"); 1710 } 1711 db_printf("}"); 1712 } 1713 db_printf("\n"); 1714 } 1715 1716 /* 1717 * Dump details about a event. 1718 */ 1719 void 1720 db_dump_intr_event(struct intr_event *ie, int handlers) 1721 { 1722 struct intr_handler *ih; 1723 struct intr_thread *it; 1724 int comma; 1725 1726 db_printf("%s ", ie->ie_fullname); 1727 it = ie->ie_thread; 1728 if (it != NULL) 1729 db_printf("(pid %d)", it->it_thread->td_proc->p_pid); 1730 else 1731 db_printf("(no thread)"); 1732 if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 || 1733 (it != NULL && it->it_need)) { 1734 db_printf(" {"); 1735 comma = 0; 1736 if (ie->ie_flags & IE_SOFT) { 1737 db_printf("SOFT"); 1738 comma = 1; 1739 } 1740 if (ie->ie_flags & IE_ENTROPY) { 1741 if (comma) 1742 db_printf(", "); 1743 db_printf("ENTROPY"); 1744 comma = 1; 1745 } 1746 if (ie->ie_flags & IE_ADDING_THREAD) { 1747 if (comma) 1748 db_printf(", "); 1749 db_printf("ADDING_THREAD"); 1750 comma = 1; 1751 } 1752 if (it != NULL && it->it_need) { 1753 if (comma) 1754 db_printf(", "); 1755 db_printf("NEED"); 1756 } 1757 db_printf("}"); 1758 } 1759 db_printf("\n"); 1760 1761 if (handlers) 1762 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 1763 db_dump_intrhand(ih); 1764 } 1765 1766 /* 1767 * Dump data about interrupt handlers 1768 */ 1769 DB_SHOW_COMMAND(intr, db_show_intr) 1770 { 1771 struct intr_event *ie; 1772 int all, verbose; 1773 1774 verbose = index(modif, 'v') != NULL; 1775 all = index(modif, 'a') != NULL; 1776 TAILQ_FOREACH(ie, &event_list, ie_list) { 1777 if (!all && TAILQ_EMPTY(&ie->ie_handlers)) 1778 continue; 1779 db_dump_intr_event(ie, verbose); 1780 if (db_pager_quit) 1781 break; 1782 } 1783 } 1784 #endif /* DDB */ 1785 1786 /* 1787 * Start standard software interrupt threads 1788 */ 1789 static void 1790 start_softintr(void *dummy) 1791 { 1792 1793 if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih)) 1794 panic("died while creating vm swi ithread"); 1795 } 1796 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, 1797 NULL); 1798 1799 /* 1800 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 1801 * The data for this machine dependent, and the declarations are in machine 1802 * dependent code. The layout of intrnames and intrcnt however is machine 1803 * independent. 1804 * 1805 * We do not know the length of intrcnt and intrnames at compile time, so 1806 * calculate things at run time. 1807 */ 1808 static int 1809 sysctl_intrnames(SYSCTL_HANDLER_ARGS) 1810 { 1811 return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames, 1812 req)); 1813 } 1814 1815 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, 1816 NULL, 0, sysctl_intrnames, "", "Interrupt Names"); 1817 1818 static int 1819 sysctl_intrcnt(SYSCTL_HANDLER_ARGS) 1820 { 1821 return (sysctl_handle_opaque(oidp, intrcnt, 1822 (char *)eintrcnt - (char *)intrcnt, req)); 1823 } 1824 1825 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, 1826 NULL, 0, sysctl_intrcnt, "", "Interrupt Counts"); 1827 1828 #ifdef DDB 1829 /* 1830 * DDB command to dump the interrupt statistics. 1831 */ 1832 DB_SHOW_COMMAND(intrcnt, db_show_intrcnt) 1833 { 1834 u_long *i; 1835 char *cp; 1836 1837 cp = intrnames; 1838 for (i = intrcnt; i != eintrcnt && !db_pager_quit; i++) { 1839 if (*cp == '\0') 1840 break; 1841 if (*i != 0) 1842 db_printf("%s\t%lu\n", cp, *i); 1843 cp += strlen(cp) + 1; 1844 } 1845 } 1846 #endif 1847