1 /*- 2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_ddb.h" 31 32 #include <sys/param.h> 33 #include <sys/bus.h> 34 #include <sys/conf.h> 35 #include <sys/cpuset.h> 36 #include <sys/rtprio.h> 37 #include <sys/systm.h> 38 #include <sys/interrupt.h> 39 #include <sys/kernel.h> 40 #include <sys/kthread.h> 41 #include <sys/ktr.h> 42 #include <sys/limits.h> 43 #include <sys/lock.h> 44 #include <sys/malloc.h> 45 #include <sys/mutex.h> 46 #include <sys/priv.h> 47 #include <sys/proc.h> 48 #include <sys/random.h> 49 #include <sys/resourcevar.h> 50 #include <sys/sched.h> 51 #include <sys/smp.h> 52 #include <sys/sysctl.h> 53 #include <sys/syslog.h> 54 #include <sys/unistd.h> 55 #include <sys/vmmeter.h> 56 #include <machine/atomic.h> 57 #include <machine/cpu.h> 58 #include <machine/md_var.h> 59 #include <machine/stdarg.h> 60 #ifdef DDB 61 #include <ddb/ddb.h> 62 #include <ddb/db_sym.h> 63 #endif 64 65 /* 66 * Describe an interrupt thread. There is one of these per interrupt event. 67 */ 68 struct intr_thread { 69 struct intr_event *it_event; 70 struct thread *it_thread; /* Kernel thread. */ 71 int it_flags; /* (j) IT_* flags. */ 72 int it_need; /* Needs service. */ 73 }; 74 75 /* Interrupt thread flags kept in it_flags */ 76 #define IT_DEAD 0x000001 /* Thread is waiting to exit. */ 77 #define IT_WAIT 0x000002 /* Thread is waiting for completion. */ 78 79 struct intr_entropy { 80 struct thread *td; 81 uintptr_t event; 82 }; 83 84 struct intr_event *clk_intr_event; 85 struct intr_event *tty_intr_event; 86 void *vm_ih; 87 struct proc *intrproc; 88 89 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); 90 91 static int intr_storm_threshold = 1000; 92 TUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold); 93 SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW, 94 &intr_storm_threshold, 0, 95 "Number of consecutive interrupts before storm protection is enabled"); 96 static TAILQ_HEAD(, intr_event) event_list = 97 TAILQ_HEAD_INITIALIZER(event_list); 98 static struct mtx event_lock; 99 MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF); 100 101 static void intr_event_update(struct intr_event *ie); 102 #ifdef INTR_FILTER 103 static int intr_event_schedule_thread(struct intr_event *ie, 104 struct intr_thread *ithd); 105 static int intr_filter_loop(struct intr_event *ie, 106 struct trapframe *frame, struct intr_thread **ithd); 107 static struct intr_thread *ithread_create(const char *name, 108 struct intr_handler *ih); 109 #else 110 static int intr_event_schedule_thread(struct intr_event *ie); 111 static struct intr_thread *ithread_create(const char *name); 112 #endif 113 static void ithread_destroy(struct intr_thread *ithread); 114 static void ithread_execute_handlers(struct proc *p, 115 struct intr_event *ie); 116 #ifdef INTR_FILTER 117 static void priv_ithread_execute_handler(struct proc *p, 118 struct intr_handler *ih); 119 #endif 120 static void ithread_loop(void *); 121 static void ithread_update(struct intr_thread *ithd); 122 static void start_softintr(void *); 123 124 /* Map an interrupt type to an ithread priority. */ 125 u_char 126 intr_priority(enum intr_type flags) 127 { 128 u_char pri; 129 130 flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET | 131 INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV); 132 switch (flags) { 133 case INTR_TYPE_TTY: 134 pri = PI_TTY; 135 break; 136 case INTR_TYPE_BIO: 137 pri = PI_DISK; 138 break; 139 case INTR_TYPE_NET: 140 pri = PI_NET; 141 break; 142 case INTR_TYPE_CAM: 143 pri = PI_DISK; 144 break; 145 case INTR_TYPE_AV: 146 pri = PI_AV; 147 break; 148 case INTR_TYPE_CLK: 149 pri = PI_REALTIME; 150 break; 151 case INTR_TYPE_MISC: 152 pri = PI_DULL; /* don't care */ 153 break; 154 default: 155 /* We didn't specify an interrupt level. */ 156 panic("intr_priority: no interrupt type in flags"); 157 } 158 159 return pri; 160 } 161 162 /* 163 * Update an ithread based on the associated intr_event. 164 */ 165 static void 166 ithread_update(struct intr_thread *ithd) 167 { 168 struct intr_event *ie; 169 struct thread *td; 170 u_char pri; 171 172 ie = ithd->it_event; 173 td = ithd->it_thread; 174 175 /* Determine the overall priority of this event. */ 176 if (TAILQ_EMPTY(&ie->ie_handlers)) 177 pri = PRI_MAX_ITHD; 178 else 179 pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri; 180 181 /* Update name and priority. */ 182 strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name)); 183 thread_lock(td); 184 sched_prio(td, pri); 185 thread_unlock(td); 186 } 187 188 /* 189 * Regenerate the full name of an interrupt event and update its priority. 190 */ 191 static void 192 intr_event_update(struct intr_event *ie) 193 { 194 struct intr_handler *ih; 195 char *last; 196 int missed, space; 197 198 /* Start off with no entropy and just the name of the event. */ 199 mtx_assert(&ie->ie_lock, MA_OWNED); 200 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 201 ie->ie_flags &= ~IE_ENTROPY; 202 missed = 0; 203 space = 1; 204 205 /* Run through all the handlers updating values. */ 206 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 207 if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < 208 sizeof(ie->ie_fullname)) { 209 strcat(ie->ie_fullname, " "); 210 strcat(ie->ie_fullname, ih->ih_name); 211 space = 0; 212 } else 213 missed++; 214 if (ih->ih_flags & IH_ENTROPY) 215 ie->ie_flags |= IE_ENTROPY; 216 } 217 218 /* 219 * If the handler names were too long, add +'s to indicate missing 220 * names. If we run out of room and still have +'s to add, change 221 * the last character from a + to a *. 222 */ 223 last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2]; 224 while (missed-- > 0) { 225 if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) { 226 if (*last == '+') { 227 *last = '*'; 228 break; 229 } else 230 *last = '+'; 231 } else if (space) { 232 strcat(ie->ie_fullname, " +"); 233 space = 0; 234 } else 235 strcat(ie->ie_fullname, "+"); 236 } 237 238 /* 239 * If this event has an ithread, update it's priority and 240 * name. 241 */ 242 if (ie->ie_thread != NULL) 243 ithread_update(ie->ie_thread); 244 CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname); 245 } 246 247 int 248 intr_event_create(struct intr_event **event, void *source, int flags, int irq, 249 void (*pre_ithread)(void *), void (*post_ithread)(void *), 250 void (*post_filter)(void *), int (*assign_cpu)(void *, u_char), 251 const char *fmt, ...) 252 { 253 struct intr_event *ie; 254 va_list ap; 255 256 /* The only valid flag during creation is IE_SOFT. */ 257 if ((flags & ~IE_SOFT) != 0) 258 return (EINVAL); 259 ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 260 ie->ie_source = source; 261 ie->ie_pre_ithread = pre_ithread; 262 ie->ie_post_ithread = post_ithread; 263 ie->ie_post_filter = post_filter; 264 ie->ie_assign_cpu = assign_cpu; 265 ie->ie_flags = flags; 266 ie->ie_irq = irq; 267 ie->ie_cpu = NOCPU; 268 TAILQ_INIT(&ie->ie_handlers); 269 mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 270 271 va_start(ap, fmt); 272 vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 273 va_end(ap); 274 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 275 mtx_lock(&event_lock); 276 TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 277 mtx_unlock(&event_lock); 278 if (event != NULL) 279 *event = ie; 280 CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 281 return (0); 282 } 283 284 /* 285 * Bind an interrupt event to the specified CPU. Note that not all 286 * platforms support binding an interrupt to a CPU. For those 287 * platforms this request will fail. For supported platforms, any 288 * associated ithreads as well as the primary interrupt context will 289 * be bound to the specificed CPU. Using a cpu id of NOCPU unbinds 290 * the interrupt event. 291 */ 292 int 293 intr_event_bind(struct intr_event *ie, u_char cpu) 294 { 295 cpuset_t mask; 296 lwpid_t id; 297 int error; 298 299 /* Need a CPU to bind to. */ 300 if (cpu != NOCPU && CPU_ABSENT(cpu)) 301 return (EINVAL); 302 303 if (ie->ie_assign_cpu == NULL) 304 return (EOPNOTSUPP); 305 306 error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR); 307 if (error) 308 return (error); 309 310 /* 311 * If we have any ithreads try to set their mask first to verify 312 * permissions, etc. 313 */ 314 mtx_lock(&ie->ie_lock); 315 if (ie->ie_thread != NULL) { 316 CPU_ZERO(&mask); 317 if (cpu == NOCPU) 318 CPU_COPY(cpuset_root, &mask); 319 else 320 CPU_SET(cpu, &mask); 321 id = ie->ie_thread->it_thread->td_tid; 322 mtx_unlock(&ie->ie_lock); 323 error = cpuset_setthread(id, &mask); 324 if (error) 325 return (error); 326 } else 327 mtx_unlock(&ie->ie_lock); 328 error = ie->ie_assign_cpu(ie->ie_source, cpu); 329 if (error) { 330 mtx_lock(&ie->ie_lock); 331 if (ie->ie_thread != NULL) { 332 CPU_ZERO(&mask); 333 if (ie->ie_cpu == NOCPU) 334 CPU_COPY(cpuset_root, &mask); 335 else 336 CPU_SET(cpu, &mask); 337 id = ie->ie_thread->it_thread->td_tid; 338 mtx_unlock(&ie->ie_lock); 339 (void)cpuset_setthread(id, &mask); 340 } else 341 mtx_unlock(&ie->ie_lock); 342 return (error); 343 } 344 345 mtx_lock(&ie->ie_lock); 346 ie->ie_cpu = cpu; 347 mtx_unlock(&ie->ie_lock); 348 349 return (error); 350 } 351 352 static struct intr_event * 353 intr_lookup(int irq) 354 { 355 struct intr_event *ie; 356 357 mtx_lock(&event_lock); 358 TAILQ_FOREACH(ie, &event_list, ie_list) 359 if (ie->ie_irq == irq && 360 (ie->ie_flags & IE_SOFT) == 0 && 361 TAILQ_FIRST(&ie->ie_handlers) != NULL) 362 break; 363 mtx_unlock(&event_lock); 364 return (ie); 365 } 366 367 int 368 intr_setaffinity(int irq, void *m) 369 { 370 struct intr_event *ie; 371 cpuset_t *mask; 372 u_char cpu; 373 int n; 374 375 mask = m; 376 cpu = NOCPU; 377 /* 378 * If we're setting all cpus we can unbind. Otherwise make sure 379 * only one cpu is in the set. 380 */ 381 if (CPU_CMP(cpuset_root, mask)) { 382 for (n = 0; n < CPU_SETSIZE; n++) { 383 if (!CPU_ISSET(n, mask)) 384 continue; 385 if (cpu != NOCPU) 386 return (EINVAL); 387 cpu = (u_char)n; 388 } 389 } 390 ie = intr_lookup(irq); 391 if (ie == NULL) 392 return (ESRCH); 393 return (intr_event_bind(ie, cpu)); 394 } 395 396 int 397 intr_getaffinity(int irq, void *m) 398 { 399 struct intr_event *ie; 400 cpuset_t *mask; 401 402 mask = m; 403 ie = intr_lookup(irq); 404 if (ie == NULL) 405 return (ESRCH); 406 CPU_ZERO(mask); 407 mtx_lock(&ie->ie_lock); 408 if (ie->ie_cpu == NOCPU) 409 CPU_COPY(cpuset_root, mask); 410 else 411 CPU_SET(ie->ie_cpu, mask); 412 mtx_unlock(&ie->ie_lock); 413 return (0); 414 } 415 416 int 417 intr_event_destroy(struct intr_event *ie) 418 { 419 420 mtx_lock(&event_lock); 421 mtx_lock(&ie->ie_lock); 422 if (!TAILQ_EMPTY(&ie->ie_handlers)) { 423 mtx_unlock(&ie->ie_lock); 424 mtx_unlock(&event_lock); 425 return (EBUSY); 426 } 427 TAILQ_REMOVE(&event_list, ie, ie_list); 428 #ifndef notyet 429 if (ie->ie_thread != NULL) { 430 ithread_destroy(ie->ie_thread); 431 ie->ie_thread = NULL; 432 } 433 #endif 434 mtx_unlock(&ie->ie_lock); 435 mtx_unlock(&event_lock); 436 mtx_destroy(&ie->ie_lock); 437 free(ie, M_ITHREAD); 438 return (0); 439 } 440 441 #ifndef INTR_FILTER 442 static struct intr_thread * 443 ithread_create(const char *name) 444 { 445 struct intr_thread *ithd; 446 struct thread *td; 447 int error; 448 449 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 450 451 error = kproc_kthread_add(ithread_loop, ithd, &intrproc, 452 &td, RFSTOPPED | RFHIGHPID, 453 0, "intr", "%s", name); 454 if (error) 455 panic("kproc_create() failed with %d", error); 456 thread_lock(td); 457 sched_class(td, PRI_ITHD); 458 TD_SET_IWAIT(td); 459 thread_unlock(td); 460 td->td_pflags |= TDP_ITHREAD; 461 ithd->it_thread = td; 462 CTR2(KTR_INTR, "%s: created %s", __func__, name); 463 return (ithd); 464 } 465 #else 466 static struct intr_thread * 467 ithread_create(const char *name, struct intr_handler *ih) 468 { 469 struct intr_thread *ithd; 470 struct thread *td; 471 int error; 472 473 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 474 475 error = kproc_kthread_add(ithread_loop, ih, &intrproc, 476 &td, RFSTOPPED | RFHIGHPID, 477 0, "intr", "%s", name); 478 if (error) 479 panic("kproc_create() failed with %d", error); 480 thread_lock(td); 481 sched_class(td, PRI_ITHD); 482 TD_SET_IWAIT(td); 483 thread_unlock(td); 484 td->td_pflags |= TDP_ITHREAD; 485 ithd->it_thread = td; 486 CTR2(KTR_INTR, "%s: created %s", __func__, name); 487 return (ithd); 488 } 489 #endif 490 491 static void 492 ithread_destroy(struct intr_thread *ithread) 493 { 494 struct thread *td; 495 496 CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name); 497 td = ithread->it_thread; 498 thread_lock(td); 499 ithread->it_flags |= IT_DEAD; 500 if (TD_AWAITING_INTR(td)) { 501 TD_CLR_IWAIT(td); 502 sched_add(td, SRQ_INTR); 503 } 504 thread_unlock(td); 505 } 506 507 #ifndef INTR_FILTER 508 int 509 intr_event_add_handler(struct intr_event *ie, const char *name, 510 driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 511 enum intr_type flags, void **cookiep) 512 { 513 struct intr_handler *ih, *temp_ih; 514 struct intr_thread *it; 515 516 if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 517 return (EINVAL); 518 519 /* Allocate and populate an interrupt handler structure. */ 520 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 521 ih->ih_filter = filter; 522 ih->ih_handler = handler; 523 ih->ih_argument = arg; 524 strlcpy(ih->ih_name, name, sizeof(ih->ih_name)); 525 ih->ih_event = ie; 526 ih->ih_pri = pri; 527 if (flags & INTR_EXCL) 528 ih->ih_flags = IH_EXCLUSIVE; 529 if (flags & INTR_MPSAFE) 530 ih->ih_flags |= IH_MPSAFE; 531 if (flags & INTR_ENTROPY) 532 ih->ih_flags |= IH_ENTROPY; 533 534 /* We can only have one exclusive handler in a event. */ 535 mtx_lock(&ie->ie_lock); 536 if (!TAILQ_EMPTY(&ie->ie_handlers)) { 537 if ((flags & INTR_EXCL) || 538 (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 539 mtx_unlock(&ie->ie_lock); 540 free(ih, M_ITHREAD); 541 return (EINVAL); 542 } 543 } 544 545 /* Add the new handler to the event in priority order. */ 546 TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 547 if (temp_ih->ih_pri > ih->ih_pri) 548 break; 549 } 550 if (temp_ih == NULL) 551 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 552 else 553 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 554 intr_event_update(ie); 555 556 /* Create a thread if we need one. */ 557 while (ie->ie_thread == NULL && handler != NULL) { 558 if (ie->ie_flags & IE_ADDING_THREAD) 559 msleep(ie, &ie->ie_lock, 0, "ithread", 0); 560 else { 561 ie->ie_flags |= IE_ADDING_THREAD; 562 mtx_unlock(&ie->ie_lock); 563 it = ithread_create("intr: newborn"); 564 mtx_lock(&ie->ie_lock); 565 ie->ie_flags &= ~IE_ADDING_THREAD; 566 ie->ie_thread = it; 567 it->it_event = ie; 568 ithread_update(it); 569 wakeup(ie); 570 } 571 } 572 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 573 ie->ie_name); 574 mtx_unlock(&ie->ie_lock); 575 576 if (cookiep != NULL) 577 *cookiep = ih; 578 return (0); 579 } 580 #else 581 int 582 intr_event_add_handler(struct intr_event *ie, const char *name, 583 driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 584 enum intr_type flags, void **cookiep) 585 { 586 struct intr_handler *ih, *temp_ih; 587 struct intr_thread *it; 588 589 if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 590 return (EINVAL); 591 592 /* Allocate and populate an interrupt handler structure. */ 593 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 594 ih->ih_filter = filter; 595 ih->ih_handler = handler; 596 ih->ih_argument = arg; 597 strlcpy(ih->ih_name, name, sizeof(ih->ih_name)); 598 ih->ih_event = ie; 599 ih->ih_pri = pri; 600 if (flags & INTR_EXCL) 601 ih->ih_flags = IH_EXCLUSIVE; 602 if (flags & INTR_MPSAFE) 603 ih->ih_flags |= IH_MPSAFE; 604 if (flags & INTR_ENTROPY) 605 ih->ih_flags |= IH_ENTROPY; 606 607 /* We can only have one exclusive handler in a event. */ 608 mtx_lock(&ie->ie_lock); 609 if (!TAILQ_EMPTY(&ie->ie_handlers)) { 610 if ((flags & INTR_EXCL) || 611 (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 612 mtx_unlock(&ie->ie_lock); 613 free(ih, M_ITHREAD); 614 return (EINVAL); 615 } 616 } 617 618 /* Add the new handler to the event in priority order. */ 619 TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 620 if (temp_ih->ih_pri > ih->ih_pri) 621 break; 622 } 623 if (temp_ih == NULL) 624 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 625 else 626 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 627 intr_event_update(ie); 628 629 /* For filtered handlers, create a private ithread to run on. */ 630 if (filter != NULL && handler != NULL) { 631 mtx_unlock(&ie->ie_lock); 632 it = ithread_create("intr: newborn", ih); 633 mtx_lock(&ie->ie_lock); 634 it->it_event = ie; 635 ih->ih_thread = it; 636 ithread_update(it); // XXX - do we really need this?!?!? 637 } else { /* Create the global per-event thread if we need one. */ 638 while (ie->ie_thread == NULL && handler != NULL) { 639 if (ie->ie_flags & IE_ADDING_THREAD) 640 msleep(ie, &ie->ie_lock, 0, "ithread", 0); 641 else { 642 ie->ie_flags |= IE_ADDING_THREAD; 643 mtx_unlock(&ie->ie_lock); 644 it = ithread_create("intr: newborn", ih); 645 mtx_lock(&ie->ie_lock); 646 ie->ie_flags &= ~IE_ADDING_THREAD; 647 ie->ie_thread = it; 648 it->it_event = ie; 649 ithread_update(it); 650 wakeup(ie); 651 } 652 } 653 } 654 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 655 ie->ie_name); 656 mtx_unlock(&ie->ie_lock); 657 658 if (cookiep != NULL) 659 *cookiep = ih; 660 return (0); 661 } 662 #endif 663 664 /* 665 * Append a description preceded by a ':' to the name of the specified 666 * interrupt handler. 667 */ 668 int 669 intr_event_describe_handler(struct intr_event *ie, void *cookie, 670 const char *descr) 671 { 672 struct intr_handler *ih; 673 size_t space; 674 char *start; 675 676 mtx_lock(&ie->ie_lock); 677 #ifdef INVARIANTS 678 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 679 if (ih == cookie) 680 break; 681 } 682 if (ih == NULL) { 683 mtx_unlock(&ie->ie_lock); 684 panic("handler %p not found in interrupt event %p", cookie, ie); 685 } 686 #endif 687 ih = cookie; 688 689 /* 690 * Look for an existing description by checking for an 691 * existing ":". This assumes device names do not include 692 * colons. If one is found, prepare to insert the new 693 * description at that point. If one is not found, find the 694 * end of the name to use as the insertion point. 695 */ 696 start = index(ih->ih_name, ':'); 697 if (start == NULL) 698 start = index(ih->ih_name, 0); 699 700 /* 701 * See if there is enough remaining room in the string for the 702 * description + ":". The "- 1" leaves room for the trailing 703 * '\0'. The "+ 1" accounts for the colon. 704 */ 705 space = sizeof(ih->ih_name) - (start - ih->ih_name) - 1; 706 if (strlen(descr) + 1 > space) { 707 mtx_unlock(&ie->ie_lock); 708 return (ENOSPC); 709 } 710 711 /* Append a colon followed by the description. */ 712 *start = ':'; 713 strcpy(start + 1, descr); 714 intr_event_update(ie); 715 mtx_unlock(&ie->ie_lock); 716 return (0); 717 } 718 719 /* 720 * Return the ie_source field from the intr_event an intr_handler is 721 * associated with. 722 */ 723 void * 724 intr_handler_source(void *cookie) 725 { 726 struct intr_handler *ih; 727 struct intr_event *ie; 728 729 ih = (struct intr_handler *)cookie; 730 if (ih == NULL) 731 return (NULL); 732 ie = ih->ih_event; 733 KASSERT(ie != NULL, 734 ("interrupt handler \"%s\" has a NULL interrupt event", 735 ih->ih_name)); 736 return (ie->ie_source); 737 } 738 739 /* 740 * Sleep until an ithread finishes executing an interrupt handler. 741 * 742 * XXX Doesn't currently handle interrupt filters or fast interrupt 743 * handlers. This is intended for compatibility with linux drivers 744 * only. Do not use in BSD code. 745 */ 746 void 747 _intr_drain(int irq) 748 { 749 struct mtx *mtx; 750 struct intr_event *ie; 751 struct intr_thread *ithd; 752 struct thread *td; 753 754 ie = intr_lookup(irq); 755 if (ie == NULL) 756 return; 757 if (ie->ie_thread == NULL) 758 return; 759 ithd = ie->ie_thread; 760 td = ithd->it_thread; 761 thread_lock(td); 762 mtx = td->td_lock; 763 if (!TD_AWAITING_INTR(td)) { 764 ithd->it_flags |= IT_WAIT; 765 msleep_spin(ithd, mtx, "isync", 0); 766 } 767 mtx_unlock_spin(mtx); 768 return; 769 } 770 771 772 #ifndef INTR_FILTER 773 int 774 intr_event_remove_handler(void *cookie) 775 { 776 struct intr_handler *handler = (struct intr_handler *)cookie; 777 struct intr_event *ie; 778 #ifdef INVARIANTS 779 struct intr_handler *ih; 780 #endif 781 #ifdef notyet 782 int dead; 783 #endif 784 785 if (handler == NULL) 786 return (EINVAL); 787 ie = handler->ih_event; 788 KASSERT(ie != NULL, 789 ("interrupt handler \"%s\" has a NULL interrupt event", 790 handler->ih_name)); 791 mtx_lock(&ie->ie_lock); 792 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 793 ie->ie_name); 794 #ifdef INVARIANTS 795 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 796 if (ih == handler) 797 goto ok; 798 mtx_unlock(&ie->ie_lock); 799 panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 800 ih->ih_name, ie->ie_name); 801 ok: 802 #endif 803 /* 804 * If there is no ithread, then just remove the handler and return. 805 * XXX: Note that an INTR_FAST handler might be running on another 806 * CPU! 807 */ 808 if (ie->ie_thread == NULL) { 809 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 810 mtx_unlock(&ie->ie_lock); 811 free(handler, M_ITHREAD); 812 return (0); 813 } 814 815 /* 816 * If the interrupt thread is already running, then just mark this 817 * handler as being dead and let the ithread do the actual removal. 818 * 819 * During a cold boot while cold is set, msleep() does not sleep, 820 * so we have to remove the handler here rather than letting the 821 * thread do it. 822 */ 823 thread_lock(ie->ie_thread->it_thread); 824 if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) { 825 handler->ih_flags |= IH_DEAD; 826 827 /* 828 * Ensure that the thread will process the handler list 829 * again and remove this handler if it has already passed 830 * it on the list. 831 */ 832 ie->ie_thread->it_need = 1; 833 } else 834 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 835 thread_unlock(ie->ie_thread->it_thread); 836 while (handler->ih_flags & IH_DEAD) 837 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 838 intr_event_update(ie); 839 #ifdef notyet 840 /* 841 * XXX: This could be bad in the case of ppbus(8). Also, I think 842 * this could lead to races of stale data when servicing an 843 * interrupt. 844 */ 845 dead = 1; 846 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 847 if (!(ih->ih_flags & IH_FAST)) { 848 dead = 0; 849 break; 850 } 851 } 852 if (dead) { 853 ithread_destroy(ie->ie_thread); 854 ie->ie_thread = NULL; 855 } 856 #endif 857 mtx_unlock(&ie->ie_lock); 858 free(handler, M_ITHREAD); 859 return (0); 860 } 861 862 static int 863 intr_event_schedule_thread(struct intr_event *ie) 864 { 865 struct intr_entropy entropy; 866 struct intr_thread *it; 867 struct thread *td; 868 struct thread *ctd; 869 struct proc *p; 870 871 /* 872 * If no ithread or no handlers, then we have a stray interrupt. 873 */ 874 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || 875 ie->ie_thread == NULL) 876 return (EINVAL); 877 878 ctd = curthread; 879 it = ie->ie_thread; 880 td = it->it_thread; 881 p = td->td_proc; 882 883 /* 884 * If any of the handlers for this ithread claim to be good 885 * sources of entropy, then gather some. 886 */ 887 if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 888 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 889 p->p_pid, td->td_name); 890 entropy.event = (uintptr_t)ie; 891 entropy.td = ctd; 892 random_harvest(&entropy, sizeof(entropy), 2, 0, 893 RANDOM_INTERRUPT); 894 } 895 896 KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 897 898 /* 899 * Set it_need to tell the thread to keep running if it is already 900 * running. Then, lock the thread and see if we actually need to 901 * put it on the runqueue. 902 */ 903 it->it_need = 1; 904 thread_lock(td); 905 if (TD_AWAITING_INTR(td)) { 906 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 907 td->td_name); 908 TD_CLR_IWAIT(td); 909 sched_add(td, SRQ_INTR); 910 } else { 911 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 912 __func__, p->p_pid, td->td_name, it->it_need, td->td_state); 913 } 914 thread_unlock(td); 915 916 return (0); 917 } 918 #else 919 int 920 intr_event_remove_handler(void *cookie) 921 { 922 struct intr_handler *handler = (struct intr_handler *)cookie; 923 struct intr_event *ie; 924 struct intr_thread *it; 925 #ifdef INVARIANTS 926 struct intr_handler *ih; 927 #endif 928 #ifdef notyet 929 int dead; 930 #endif 931 932 if (handler == NULL) 933 return (EINVAL); 934 ie = handler->ih_event; 935 KASSERT(ie != NULL, 936 ("interrupt handler \"%s\" has a NULL interrupt event", 937 handler->ih_name)); 938 mtx_lock(&ie->ie_lock); 939 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 940 ie->ie_name); 941 #ifdef INVARIANTS 942 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 943 if (ih == handler) 944 goto ok; 945 mtx_unlock(&ie->ie_lock); 946 panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 947 ih->ih_name, ie->ie_name); 948 ok: 949 #endif 950 /* 951 * If there are no ithreads (per event and per handler), then 952 * just remove the handler and return. 953 * XXX: Note that an INTR_FAST handler might be running on another CPU! 954 */ 955 if (ie->ie_thread == NULL && handler->ih_thread == NULL) { 956 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 957 mtx_unlock(&ie->ie_lock); 958 free(handler, M_ITHREAD); 959 return (0); 960 } 961 962 /* Private or global ithread? */ 963 it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread; 964 /* 965 * If the interrupt thread is already running, then just mark this 966 * handler as being dead and let the ithread do the actual removal. 967 * 968 * During a cold boot while cold is set, msleep() does not sleep, 969 * so we have to remove the handler here rather than letting the 970 * thread do it. 971 */ 972 thread_lock(it->it_thread); 973 if (!TD_AWAITING_INTR(it->it_thread) && !cold) { 974 handler->ih_flags |= IH_DEAD; 975 976 /* 977 * Ensure that the thread will process the handler list 978 * again and remove this handler if it has already passed 979 * it on the list. 980 */ 981 it->it_need = 1; 982 } else 983 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 984 thread_unlock(it->it_thread); 985 while (handler->ih_flags & IH_DEAD) 986 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 987 /* 988 * At this point, the handler has been disconnected from the event, 989 * so we can kill the private ithread if any. 990 */ 991 if (handler->ih_thread) { 992 ithread_destroy(handler->ih_thread); 993 handler->ih_thread = NULL; 994 } 995 intr_event_update(ie); 996 #ifdef notyet 997 /* 998 * XXX: This could be bad in the case of ppbus(8). Also, I think 999 * this could lead to races of stale data when servicing an 1000 * interrupt. 1001 */ 1002 dead = 1; 1003 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1004 if (handler != NULL) { 1005 dead = 0; 1006 break; 1007 } 1008 } 1009 if (dead) { 1010 ithread_destroy(ie->ie_thread); 1011 ie->ie_thread = NULL; 1012 } 1013 #endif 1014 mtx_unlock(&ie->ie_lock); 1015 free(handler, M_ITHREAD); 1016 return (0); 1017 } 1018 1019 static int 1020 intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it) 1021 { 1022 struct intr_entropy entropy; 1023 struct thread *td; 1024 struct thread *ctd; 1025 struct proc *p; 1026 1027 /* 1028 * If no ithread or no handlers, then we have a stray interrupt. 1029 */ 1030 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL) 1031 return (EINVAL); 1032 1033 ctd = curthread; 1034 td = it->it_thread; 1035 p = td->td_proc; 1036 1037 /* 1038 * If any of the handlers for this ithread claim to be good 1039 * sources of entropy, then gather some. 1040 */ 1041 if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 1042 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 1043 p->p_pid, td->td_name); 1044 entropy.event = (uintptr_t)ie; 1045 entropy.td = ctd; 1046 random_harvest(&entropy, sizeof(entropy), 2, 0, 1047 RANDOM_INTERRUPT); 1048 } 1049 1050 KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 1051 1052 /* 1053 * Set it_need to tell the thread to keep running if it is already 1054 * running. Then, lock the thread and see if we actually need to 1055 * put it on the runqueue. 1056 */ 1057 it->it_need = 1; 1058 thread_lock(td); 1059 if (TD_AWAITING_INTR(td)) { 1060 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 1061 td->td_name); 1062 TD_CLR_IWAIT(td); 1063 sched_add(td, SRQ_INTR); 1064 } else { 1065 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 1066 __func__, p->p_pid, td->td_name, it->it_need, td->td_state); 1067 } 1068 thread_unlock(td); 1069 1070 return (0); 1071 } 1072 #endif 1073 1074 /* 1075 * Allow interrupt event binding for software interrupt handlers -- a no-op, 1076 * since interrupts are generated in software rather than being directed by 1077 * a PIC. 1078 */ 1079 static int 1080 swi_assign_cpu(void *arg, u_char cpu) 1081 { 1082 1083 return (0); 1084 } 1085 1086 /* 1087 * Add a software interrupt handler to a specified event. If a given event 1088 * is not specified, then a new event is created. 1089 */ 1090 int 1091 swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, 1092 void *arg, int pri, enum intr_type flags, void **cookiep) 1093 { 1094 struct thread *td; 1095 struct intr_event *ie; 1096 int error; 1097 1098 if (flags & INTR_ENTROPY) 1099 return (EINVAL); 1100 1101 ie = (eventp != NULL) ? *eventp : NULL; 1102 1103 if (ie != NULL) { 1104 if (!(ie->ie_flags & IE_SOFT)) 1105 return (EINVAL); 1106 } else { 1107 error = intr_event_create(&ie, NULL, IE_SOFT, 0, 1108 NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri); 1109 if (error) 1110 return (error); 1111 if (eventp != NULL) 1112 *eventp = ie; 1113 } 1114 error = intr_event_add_handler(ie, name, NULL, handler, arg, 1115 PI_SWI(pri), flags, cookiep); 1116 if (error) 1117 return (error); 1118 if (pri == SWI_CLOCK) { 1119 td = ie->ie_thread->it_thread; 1120 thread_lock(td); 1121 td->td_flags |= TDF_NOLOAD; 1122 thread_unlock(td); 1123 } 1124 return (0); 1125 } 1126 1127 /* 1128 * Schedule a software interrupt thread. 1129 */ 1130 void 1131 swi_sched(void *cookie, int flags) 1132 { 1133 struct intr_handler *ih = (struct intr_handler *)cookie; 1134 struct intr_event *ie = ih->ih_event; 1135 int error; 1136 1137 CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, 1138 ih->ih_need); 1139 1140 /* 1141 * Set ih_need for this handler so that if the ithread is already 1142 * running it will execute this handler on the next pass. Otherwise, 1143 * it will execute it the next time it runs. 1144 */ 1145 atomic_store_rel_int(&ih->ih_need, 1); 1146 1147 if (!(flags & SWI_DELAY)) { 1148 PCPU_INC(cnt.v_soft); 1149 #ifdef INTR_FILTER 1150 error = intr_event_schedule_thread(ie, ie->ie_thread); 1151 #else 1152 error = intr_event_schedule_thread(ie); 1153 #endif 1154 KASSERT(error == 0, ("stray software interrupt")); 1155 } 1156 } 1157 1158 /* 1159 * Remove a software interrupt handler. Currently this code does not 1160 * remove the associated interrupt event if it becomes empty. Calling code 1161 * may do so manually via intr_event_destroy(), but that's not really 1162 * an optimal interface. 1163 */ 1164 int 1165 swi_remove(void *cookie) 1166 { 1167 1168 return (intr_event_remove_handler(cookie)); 1169 } 1170 1171 #ifdef INTR_FILTER 1172 static void 1173 priv_ithread_execute_handler(struct proc *p, struct intr_handler *ih) 1174 { 1175 struct intr_event *ie; 1176 1177 ie = ih->ih_event; 1178 /* 1179 * If this handler is marked for death, remove it from 1180 * the list of handlers and wake up the sleeper. 1181 */ 1182 if (ih->ih_flags & IH_DEAD) { 1183 mtx_lock(&ie->ie_lock); 1184 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 1185 ih->ih_flags &= ~IH_DEAD; 1186 wakeup(ih); 1187 mtx_unlock(&ie->ie_lock); 1188 return; 1189 } 1190 1191 /* Execute this handler. */ 1192 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1193 __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument, 1194 ih->ih_name, ih->ih_flags); 1195 1196 if (!(ih->ih_flags & IH_MPSAFE)) 1197 mtx_lock(&Giant); 1198 ih->ih_handler(ih->ih_argument); 1199 if (!(ih->ih_flags & IH_MPSAFE)) 1200 mtx_unlock(&Giant); 1201 } 1202 #endif 1203 1204 /* 1205 * This is a public function for use by drivers that mux interrupt 1206 * handlers for child devices from their interrupt handler. 1207 */ 1208 void 1209 intr_event_execute_handlers(struct proc *p, struct intr_event *ie) 1210 { 1211 struct intr_handler *ih, *ihn; 1212 1213 TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) { 1214 /* 1215 * If this handler is marked for death, remove it from 1216 * the list of handlers and wake up the sleeper. 1217 */ 1218 if (ih->ih_flags & IH_DEAD) { 1219 mtx_lock(&ie->ie_lock); 1220 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 1221 ih->ih_flags &= ~IH_DEAD; 1222 wakeup(ih); 1223 mtx_unlock(&ie->ie_lock); 1224 continue; 1225 } 1226 1227 /* Skip filter only handlers */ 1228 if (ih->ih_handler == NULL) 1229 continue; 1230 1231 /* 1232 * For software interrupt threads, we only execute 1233 * handlers that have their need flag set. Hardware 1234 * interrupt threads always invoke all of their handlers. 1235 */ 1236 if (ie->ie_flags & IE_SOFT) { 1237 if (!ih->ih_need) 1238 continue; 1239 else 1240 atomic_store_rel_int(&ih->ih_need, 0); 1241 } 1242 1243 /* Execute this handler. */ 1244 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1245 __func__, p->p_pid, (void *)ih->ih_handler, 1246 ih->ih_argument, ih->ih_name, ih->ih_flags); 1247 1248 if (!(ih->ih_flags & IH_MPSAFE)) 1249 mtx_lock(&Giant); 1250 ih->ih_handler(ih->ih_argument); 1251 if (!(ih->ih_flags & IH_MPSAFE)) 1252 mtx_unlock(&Giant); 1253 } 1254 } 1255 1256 static void 1257 ithread_execute_handlers(struct proc *p, struct intr_event *ie) 1258 { 1259 1260 /* Interrupt handlers should not sleep. */ 1261 if (!(ie->ie_flags & IE_SOFT)) 1262 THREAD_NO_SLEEPING(); 1263 intr_event_execute_handlers(p, ie); 1264 if (!(ie->ie_flags & IE_SOFT)) 1265 THREAD_SLEEPING_OK(); 1266 1267 /* 1268 * Interrupt storm handling: 1269 * 1270 * If this interrupt source is currently storming, then throttle 1271 * it to only fire the handler once per clock tick. 1272 * 1273 * If this interrupt source is not currently storming, but the 1274 * number of back to back interrupts exceeds the storm threshold, 1275 * then enter storming mode. 1276 */ 1277 if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold && 1278 !(ie->ie_flags & IE_SOFT)) { 1279 /* Report the message only once every second. */ 1280 if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) { 1281 printf( 1282 "interrupt storm detected on \"%s\"; throttling interrupt source\n", 1283 ie->ie_name); 1284 } 1285 pause("istorm", 1); 1286 } else 1287 ie->ie_count++; 1288 1289 /* 1290 * Now that all the handlers have had a chance to run, reenable 1291 * the interrupt source. 1292 */ 1293 if (ie->ie_post_ithread != NULL) 1294 ie->ie_post_ithread(ie->ie_source); 1295 } 1296 1297 #ifndef INTR_FILTER 1298 /* 1299 * This is the main code for interrupt threads. 1300 */ 1301 static void 1302 ithread_loop(void *arg) 1303 { 1304 struct intr_thread *ithd; 1305 struct intr_event *ie; 1306 struct thread *td; 1307 struct proc *p; 1308 int wake; 1309 1310 td = curthread; 1311 p = td->td_proc; 1312 ithd = (struct intr_thread *)arg; 1313 KASSERT(ithd->it_thread == td, 1314 ("%s: ithread and proc linkage out of sync", __func__)); 1315 ie = ithd->it_event; 1316 ie->ie_count = 0; 1317 wake = 0; 1318 1319 /* 1320 * As long as we have interrupts outstanding, go through the 1321 * list of handlers, giving each one a go at it. 1322 */ 1323 for (;;) { 1324 /* 1325 * If we are an orphaned thread, then just die. 1326 */ 1327 if (ithd->it_flags & IT_DEAD) { 1328 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 1329 p->p_pid, td->td_name); 1330 free(ithd, M_ITHREAD); 1331 kthread_exit(); 1332 } 1333 1334 /* 1335 * Service interrupts. If another interrupt arrives while 1336 * we are running, it will set it_need to note that we 1337 * should make another pass. 1338 */ 1339 while (ithd->it_need) { 1340 /* 1341 * This might need a full read and write barrier 1342 * to make sure that this write posts before any 1343 * of the memory or device accesses in the 1344 * handlers. 1345 */ 1346 atomic_store_rel_int(&ithd->it_need, 0); 1347 ithread_execute_handlers(p, ie); 1348 } 1349 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1350 mtx_assert(&Giant, MA_NOTOWNED); 1351 1352 /* 1353 * Processed all our interrupts. Now get the sched 1354 * lock. This may take a while and it_need may get 1355 * set again, so we have to check it again. 1356 */ 1357 thread_lock(td); 1358 if (!ithd->it_need && !(ithd->it_flags & (IT_DEAD | IT_WAIT))) { 1359 TD_SET_IWAIT(td); 1360 ie->ie_count = 0; 1361 mi_switch(SW_VOL | SWT_IWAIT, NULL); 1362 } 1363 if (ithd->it_flags & IT_WAIT) { 1364 wake = 1; 1365 ithd->it_flags &= ~IT_WAIT; 1366 } 1367 thread_unlock(td); 1368 if (wake) { 1369 wakeup(ithd); 1370 wake = 0; 1371 } 1372 } 1373 } 1374 1375 /* 1376 * Main interrupt handling body. 1377 * 1378 * Input: 1379 * o ie: the event connected to this interrupt. 1380 * o frame: some archs (i.e. i386) pass a frame to some. 1381 * handlers as their main argument. 1382 * Return value: 1383 * o 0: everything ok. 1384 * o EINVAL: stray interrupt. 1385 */ 1386 int 1387 intr_event_handle(struct intr_event *ie, struct trapframe *frame) 1388 { 1389 struct intr_handler *ih; 1390 struct trapframe *oldframe; 1391 struct thread *td; 1392 int error, ret, thread; 1393 1394 td = curthread; 1395 1396 /* An interrupt with no event or handlers is a stray interrupt. */ 1397 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) 1398 return (EINVAL); 1399 1400 /* 1401 * Execute fast interrupt handlers directly. 1402 * To support clock handlers, if a handler registers 1403 * with a NULL argument, then we pass it a pointer to 1404 * a trapframe as its argument. 1405 */ 1406 td->td_intr_nesting_level++; 1407 thread = 0; 1408 ret = 0; 1409 critical_enter(); 1410 oldframe = td->td_intr_frame; 1411 td->td_intr_frame = frame; 1412 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1413 if (ih->ih_filter == NULL) { 1414 thread = 1; 1415 continue; 1416 } 1417 CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__, 1418 ih->ih_filter, ih->ih_argument == NULL ? frame : 1419 ih->ih_argument, ih->ih_name); 1420 if (ih->ih_argument == NULL) 1421 ret = ih->ih_filter(frame); 1422 else 1423 ret = ih->ih_filter(ih->ih_argument); 1424 KASSERT(ret == FILTER_STRAY || 1425 ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 && 1426 (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0), 1427 ("%s: incorrect return value %#x from %s", __func__, ret, 1428 ih->ih_name)); 1429 1430 /* 1431 * Wrapper handler special handling: 1432 * 1433 * in some particular cases (like pccard and pccbb), 1434 * the _real_ device handler is wrapped in a couple of 1435 * functions - a filter wrapper and an ithread wrapper. 1436 * In this case (and just in this case), the filter wrapper 1437 * could ask the system to schedule the ithread and mask 1438 * the interrupt source if the wrapped handler is composed 1439 * of just an ithread handler. 1440 * 1441 * TODO: write a generic wrapper to avoid people rolling 1442 * their own 1443 */ 1444 if (!thread) { 1445 if (ret == FILTER_SCHEDULE_THREAD) 1446 thread = 1; 1447 } 1448 } 1449 td->td_intr_frame = oldframe; 1450 1451 if (thread) { 1452 if (ie->ie_pre_ithread != NULL) 1453 ie->ie_pre_ithread(ie->ie_source); 1454 } else { 1455 if (ie->ie_post_filter != NULL) 1456 ie->ie_post_filter(ie->ie_source); 1457 } 1458 1459 /* Schedule the ithread if needed. */ 1460 if (thread) { 1461 error = intr_event_schedule_thread(ie); 1462 #ifndef XEN 1463 KASSERT(error == 0, ("bad stray interrupt")); 1464 #else 1465 if (error != 0) 1466 log(LOG_WARNING, "bad stray interrupt"); 1467 #endif 1468 } 1469 critical_exit(); 1470 td->td_intr_nesting_level--; 1471 return (0); 1472 } 1473 #else 1474 /* 1475 * This is the main code for interrupt threads. 1476 */ 1477 static void 1478 ithread_loop(void *arg) 1479 { 1480 struct intr_thread *ithd; 1481 struct intr_handler *ih; 1482 struct intr_event *ie; 1483 struct thread *td; 1484 struct proc *p; 1485 int priv; 1486 int wake; 1487 1488 td = curthread; 1489 p = td->td_proc; 1490 ih = (struct intr_handler *)arg; 1491 priv = (ih->ih_thread != NULL) ? 1 : 0; 1492 ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread; 1493 KASSERT(ithd->it_thread == td, 1494 ("%s: ithread and proc linkage out of sync", __func__)); 1495 ie = ithd->it_event; 1496 ie->ie_count = 0; 1497 wake = 0; 1498 1499 /* 1500 * As long as we have interrupts outstanding, go through the 1501 * list of handlers, giving each one a go at it. 1502 */ 1503 for (;;) { 1504 /* 1505 * If we are an orphaned thread, then just die. 1506 */ 1507 if (ithd->it_flags & IT_DEAD) { 1508 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 1509 p->p_pid, td->td_name); 1510 free(ithd, M_ITHREAD); 1511 kthread_exit(); 1512 } 1513 1514 /* 1515 * Service interrupts. If another interrupt arrives while 1516 * we are running, it will set it_need to note that we 1517 * should make another pass. 1518 */ 1519 while (ithd->it_need) { 1520 /* 1521 * This might need a full read and write barrier 1522 * to make sure that this write posts before any 1523 * of the memory or device accesses in the 1524 * handlers. 1525 */ 1526 atomic_store_rel_int(&ithd->it_need, 0); 1527 if (priv) 1528 priv_ithread_execute_handler(p, ih); 1529 else 1530 ithread_execute_handlers(p, ie); 1531 } 1532 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1533 mtx_assert(&Giant, MA_NOTOWNED); 1534 1535 /* 1536 * Processed all our interrupts. Now get the sched 1537 * lock. This may take a while and it_need may get 1538 * set again, so we have to check it again. 1539 */ 1540 thread_lock(td); 1541 if (!ithd->it_need && !(ithd->it_flags & (IT_DEAD | IT_WAIT))) { 1542 TD_SET_IWAIT(td); 1543 ie->ie_count = 0; 1544 mi_switch(SW_VOL | SWT_IWAIT, NULL); 1545 } 1546 if (ithd->it_flags & IT_WAIT) { 1547 wake = 1; 1548 ithd->it_flags &= ~IT_WAIT; 1549 } 1550 thread_unlock(td); 1551 if (wake) { 1552 wakeup(ithd); 1553 wake = 0; 1554 } 1555 } 1556 } 1557 1558 /* 1559 * Main loop for interrupt filter. 1560 * 1561 * Some architectures (i386, amd64 and arm) require the optional frame 1562 * parameter, and use it as the main argument for fast handler execution 1563 * when ih_argument == NULL. 1564 * 1565 * Return value: 1566 * o FILTER_STRAY: No filter recognized the event, and no 1567 * filter-less handler is registered on this 1568 * line. 1569 * o FILTER_HANDLED: A filter claimed the event and served it. 1570 * o FILTER_SCHEDULE_THREAD: No filter claimed the event, but there's at 1571 * least one filter-less handler on this line. 1572 * o FILTER_HANDLED | 1573 * FILTER_SCHEDULE_THREAD: A filter claimed the event, and asked for 1574 * scheduling the per-handler ithread. 1575 * 1576 * In case an ithread has to be scheduled, in *ithd there will be a 1577 * pointer to a struct intr_thread containing the thread to be 1578 * scheduled. 1579 */ 1580 1581 static int 1582 intr_filter_loop(struct intr_event *ie, struct trapframe *frame, 1583 struct intr_thread **ithd) 1584 { 1585 struct intr_handler *ih; 1586 void *arg; 1587 int ret, thread_only; 1588 1589 ret = 0; 1590 thread_only = 0; 1591 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1592 /* 1593 * Execute fast interrupt handlers directly. 1594 * To support clock handlers, if a handler registers 1595 * with a NULL argument, then we pass it a pointer to 1596 * a trapframe as its argument. 1597 */ 1598 arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument); 1599 1600 CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__, 1601 ih->ih_filter, ih->ih_handler, arg, ih->ih_name); 1602 1603 if (ih->ih_filter != NULL) 1604 ret = ih->ih_filter(arg); 1605 else { 1606 thread_only = 1; 1607 continue; 1608 } 1609 KASSERT(ret == FILTER_STRAY || 1610 ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 && 1611 (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0), 1612 ("%s: incorrect return value %#x from %s", __func__, ret, 1613 ih->ih_name)); 1614 if (ret & FILTER_STRAY) 1615 continue; 1616 else { 1617 *ithd = ih->ih_thread; 1618 return (ret); 1619 } 1620 } 1621 1622 /* 1623 * No filters handled the interrupt and we have at least 1624 * one handler without a filter. In this case, we schedule 1625 * all of the filter-less handlers to run in the ithread. 1626 */ 1627 if (thread_only) { 1628 *ithd = ie->ie_thread; 1629 return (FILTER_SCHEDULE_THREAD); 1630 } 1631 return (FILTER_STRAY); 1632 } 1633 1634 /* 1635 * Main interrupt handling body. 1636 * 1637 * Input: 1638 * o ie: the event connected to this interrupt. 1639 * o frame: some archs (i.e. i386) pass a frame to some. 1640 * handlers as their main argument. 1641 * Return value: 1642 * o 0: everything ok. 1643 * o EINVAL: stray interrupt. 1644 */ 1645 int 1646 intr_event_handle(struct intr_event *ie, struct trapframe *frame) 1647 { 1648 struct intr_thread *ithd; 1649 struct trapframe *oldframe; 1650 struct thread *td; 1651 int thread; 1652 1653 ithd = NULL; 1654 td = curthread; 1655 1656 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) 1657 return (EINVAL); 1658 1659 td->td_intr_nesting_level++; 1660 thread = 0; 1661 critical_enter(); 1662 oldframe = td->td_intr_frame; 1663 td->td_intr_frame = frame; 1664 thread = intr_filter_loop(ie, frame, &ithd); 1665 if (thread & FILTER_HANDLED) { 1666 if (ie->ie_post_filter != NULL) 1667 ie->ie_post_filter(ie->ie_source); 1668 } else { 1669 if (ie->ie_pre_ithread != NULL) 1670 ie->ie_pre_ithread(ie->ie_source); 1671 } 1672 td->td_intr_frame = oldframe; 1673 critical_exit(); 1674 1675 /* Interrupt storm logic */ 1676 if (thread & FILTER_STRAY) { 1677 ie->ie_count++; 1678 if (ie->ie_count < intr_storm_threshold) 1679 printf("Interrupt stray detection not present\n"); 1680 } 1681 1682 /* Schedule an ithread if needed. */ 1683 if (thread & FILTER_SCHEDULE_THREAD) { 1684 if (intr_event_schedule_thread(ie, ithd) != 0) 1685 panic("%s: impossible stray interrupt", __func__); 1686 } 1687 td->td_intr_nesting_level--; 1688 return (0); 1689 } 1690 #endif 1691 1692 #ifdef DDB 1693 /* 1694 * Dump details about an interrupt handler 1695 */ 1696 static void 1697 db_dump_intrhand(struct intr_handler *ih) 1698 { 1699 int comma; 1700 1701 db_printf("\t%-10s ", ih->ih_name); 1702 switch (ih->ih_pri) { 1703 case PI_REALTIME: 1704 db_printf("CLK "); 1705 break; 1706 case PI_AV: 1707 db_printf("AV "); 1708 break; 1709 case PI_TTY: 1710 db_printf("TTY "); 1711 break; 1712 case PI_NET: 1713 db_printf("NET "); 1714 break; 1715 case PI_DISK: 1716 db_printf("DISK"); 1717 break; 1718 case PI_DULL: 1719 db_printf("DULL"); 1720 break; 1721 default: 1722 if (ih->ih_pri >= PI_SOFT) 1723 db_printf("SWI "); 1724 else 1725 db_printf("%4u", ih->ih_pri); 1726 break; 1727 } 1728 db_printf(" "); 1729 db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC); 1730 db_printf("(%p)", ih->ih_argument); 1731 if (ih->ih_need || 1732 (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD | 1733 IH_MPSAFE)) != 0) { 1734 db_printf(" {"); 1735 comma = 0; 1736 if (ih->ih_flags & IH_EXCLUSIVE) { 1737 if (comma) 1738 db_printf(", "); 1739 db_printf("EXCL"); 1740 comma = 1; 1741 } 1742 if (ih->ih_flags & IH_ENTROPY) { 1743 if (comma) 1744 db_printf(", "); 1745 db_printf("ENTROPY"); 1746 comma = 1; 1747 } 1748 if (ih->ih_flags & IH_DEAD) { 1749 if (comma) 1750 db_printf(", "); 1751 db_printf("DEAD"); 1752 comma = 1; 1753 } 1754 if (ih->ih_flags & IH_MPSAFE) { 1755 if (comma) 1756 db_printf(", "); 1757 db_printf("MPSAFE"); 1758 comma = 1; 1759 } 1760 if (ih->ih_need) { 1761 if (comma) 1762 db_printf(", "); 1763 db_printf("NEED"); 1764 } 1765 db_printf("}"); 1766 } 1767 db_printf("\n"); 1768 } 1769 1770 /* 1771 * Dump details about a event. 1772 */ 1773 void 1774 db_dump_intr_event(struct intr_event *ie, int handlers) 1775 { 1776 struct intr_handler *ih; 1777 struct intr_thread *it; 1778 int comma; 1779 1780 db_printf("%s ", ie->ie_fullname); 1781 it = ie->ie_thread; 1782 if (it != NULL) 1783 db_printf("(pid %d)", it->it_thread->td_proc->p_pid); 1784 else 1785 db_printf("(no thread)"); 1786 if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 || 1787 (it != NULL && it->it_need)) { 1788 db_printf(" {"); 1789 comma = 0; 1790 if (ie->ie_flags & IE_SOFT) { 1791 db_printf("SOFT"); 1792 comma = 1; 1793 } 1794 if (ie->ie_flags & IE_ENTROPY) { 1795 if (comma) 1796 db_printf(", "); 1797 db_printf("ENTROPY"); 1798 comma = 1; 1799 } 1800 if (ie->ie_flags & IE_ADDING_THREAD) { 1801 if (comma) 1802 db_printf(", "); 1803 db_printf("ADDING_THREAD"); 1804 comma = 1; 1805 } 1806 if (it != NULL && it->it_need) { 1807 if (comma) 1808 db_printf(", "); 1809 db_printf("NEED"); 1810 } 1811 db_printf("}"); 1812 } 1813 db_printf("\n"); 1814 1815 if (handlers) 1816 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 1817 db_dump_intrhand(ih); 1818 } 1819 1820 /* 1821 * Dump data about interrupt handlers 1822 */ 1823 DB_SHOW_COMMAND(intr, db_show_intr) 1824 { 1825 struct intr_event *ie; 1826 int all, verbose; 1827 1828 verbose = index(modif, 'v') != NULL; 1829 all = index(modif, 'a') != NULL; 1830 TAILQ_FOREACH(ie, &event_list, ie_list) { 1831 if (!all && TAILQ_EMPTY(&ie->ie_handlers)) 1832 continue; 1833 db_dump_intr_event(ie, verbose); 1834 if (db_pager_quit) 1835 break; 1836 } 1837 } 1838 #endif /* DDB */ 1839 1840 /* 1841 * Start standard software interrupt threads 1842 */ 1843 static void 1844 start_softintr(void *dummy) 1845 { 1846 1847 if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih)) 1848 panic("died while creating vm swi ithread"); 1849 } 1850 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, 1851 NULL); 1852 1853 /* 1854 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 1855 * The data for this machine dependent, and the declarations are in machine 1856 * dependent code. The layout of intrnames and intrcnt however is machine 1857 * independent. 1858 * 1859 * We do not know the length of intrcnt and intrnames at compile time, so 1860 * calculate things at run time. 1861 */ 1862 static int 1863 sysctl_intrnames(SYSCTL_HANDLER_ARGS) 1864 { 1865 return (sysctl_handle_opaque(oidp, intrnames, eintrnames - intrnames, 1866 req)); 1867 } 1868 1869 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, 1870 NULL, 0, sysctl_intrnames, "", "Interrupt Names"); 1871 1872 static int 1873 sysctl_intrcnt(SYSCTL_HANDLER_ARGS) 1874 { 1875 return (sysctl_handle_opaque(oidp, intrcnt, 1876 (char *)eintrcnt - (char *)intrcnt, req)); 1877 } 1878 1879 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, 1880 NULL, 0, sysctl_intrcnt, "", "Interrupt Counts"); 1881 1882 #ifdef DDB 1883 /* 1884 * DDB command to dump the interrupt statistics. 1885 */ 1886 DB_SHOW_COMMAND(intrcnt, db_show_intrcnt) 1887 { 1888 u_long *i; 1889 char *cp; 1890 1891 cp = intrnames; 1892 for (i = intrcnt; i != eintrcnt && !db_pager_quit; i++) { 1893 if (*cp == '\0') 1894 break; 1895 if (*i != 0) 1896 db_printf("%s\t%lu\n", cp, *i); 1897 cp += strlen(cp) + 1; 1898 } 1899 } 1900 #endif 1901