1 /*- 2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_ddb.h" 31 32 #include <sys/param.h> 33 #include <sys/bus.h> 34 #include <sys/conf.h> 35 #include <sys/cpuset.h> 36 #include <sys/rtprio.h> 37 #include <sys/systm.h> 38 #include <sys/interrupt.h> 39 #include <sys/kernel.h> 40 #include <sys/kthread.h> 41 #include <sys/ktr.h> 42 #include <sys/limits.h> 43 #include <sys/lock.h> 44 #include <sys/malloc.h> 45 #include <sys/mutex.h> 46 #include <sys/priv.h> 47 #include <sys/proc.h> 48 #include <sys/random.h> 49 #include <sys/resourcevar.h> 50 #include <sys/sched.h> 51 #include <sys/smp.h> 52 #include <sys/sysctl.h> 53 #include <sys/syslog.h> 54 #include <sys/unistd.h> 55 #include <sys/vmmeter.h> 56 #include <machine/atomic.h> 57 #include <machine/cpu.h> 58 #include <machine/md_var.h> 59 #include <machine/stdarg.h> 60 #ifdef DDB 61 #include <ddb/ddb.h> 62 #include <ddb/db_sym.h> 63 #endif 64 65 /* 66 * Describe an interrupt thread. There is one of these per interrupt event. 67 */ 68 struct intr_thread { 69 struct intr_event *it_event; 70 struct thread *it_thread; /* Kernel thread. */ 71 int it_flags; /* (j) IT_* flags. */ 72 int it_need; /* Needs service. */ 73 }; 74 75 /* Interrupt thread flags kept in it_flags */ 76 #define IT_DEAD 0x000001 /* Thread is waiting to exit. */ 77 #define IT_WAIT 0x000002 /* Thread is waiting for completion. */ 78 79 struct intr_entropy { 80 struct thread *td; 81 uintptr_t event; 82 }; 83 84 struct intr_event *clk_intr_event; 85 struct intr_event *tty_intr_event; 86 void *vm_ih; 87 struct proc *intrproc; 88 89 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); 90 91 static int intr_storm_threshold = 1000; 92 SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RWTUN, 93 &intr_storm_threshold, 0, 94 "Number of consecutive interrupts before storm protection is enabled"); 95 static TAILQ_HEAD(, intr_event) event_list = 96 TAILQ_HEAD_INITIALIZER(event_list); 97 static struct mtx event_lock; 98 MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF); 99 100 static void intr_event_update(struct intr_event *ie); 101 #ifdef INTR_FILTER 102 static int intr_event_schedule_thread(struct intr_event *ie, 103 struct intr_thread *ithd); 104 static int intr_filter_loop(struct intr_event *ie, 105 struct trapframe *frame, struct intr_thread **ithd); 106 static struct intr_thread *ithread_create(const char *name, 107 struct intr_handler *ih); 108 #else 109 static int intr_event_schedule_thread(struct intr_event *ie); 110 static struct intr_thread *ithread_create(const char *name); 111 #endif 112 static void ithread_destroy(struct intr_thread *ithread); 113 static void ithread_execute_handlers(struct proc *p, 114 struct intr_event *ie); 115 #ifdef INTR_FILTER 116 static void priv_ithread_execute_handler(struct proc *p, 117 struct intr_handler *ih); 118 #endif 119 static void ithread_loop(void *); 120 static void ithread_update(struct intr_thread *ithd); 121 static void start_softintr(void *); 122 123 /* Map an interrupt type to an ithread priority. */ 124 u_char 125 intr_priority(enum intr_type flags) 126 { 127 u_char pri; 128 129 flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET | 130 INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV); 131 switch (flags) { 132 case INTR_TYPE_TTY: 133 pri = PI_TTY; 134 break; 135 case INTR_TYPE_BIO: 136 pri = PI_DISK; 137 break; 138 case INTR_TYPE_NET: 139 pri = PI_NET; 140 break; 141 case INTR_TYPE_CAM: 142 pri = PI_DISK; 143 break; 144 case INTR_TYPE_AV: 145 pri = PI_AV; 146 break; 147 case INTR_TYPE_CLK: 148 pri = PI_REALTIME; 149 break; 150 case INTR_TYPE_MISC: 151 pri = PI_DULL; /* don't care */ 152 break; 153 default: 154 /* We didn't specify an interrupt level. */ 155 panic("intr_priority: no interrupt type in flags"); 156 } 157 158 return pri; 159 } 160 161 /* 162 * Update an ithread based on the associated intr_event. 163 */ 164 static void 165 ithread_update(struct intr_thread *ithd) 166 { 167 struct intr_event *ie; 168 struct thread *td; 169 u_char pri; 170 171 ie = ithd->it_event; 172 td = ithd->it_thread; 173 174 /* Determine the overall priority of this event. */ 175 if (TAILQ_EMPTY(&ie->ie_handlers)) 176 pri = PRI_MAX_ITHD; 177 else 178 pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri; 179 180 /* Update name and priority. */ 181 strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name)); 182 #ifdef KTR 183 sched_clear_tdname(td); 184 #endif 185 thread_lock(td); 186 sched_prio(td, pri); 187 thread_unlock(td); 188 } 189 190 /* 191 * Regenerate the full name of an interrupt event and update its priority. 192 */ 193 static void 194 intr_event_update(struct intr_event *ie) 195 { 196 struct intr_handler *ih; 197 char *last; 198 int missed, space; 199 200 /* Start off with no entropy and just the name of the event. */ 201 mtx_assert(&ie->ie_lock, MA_OWNED); 202 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 203 ie->ie_flags &= ~IE_ENTROPY; 204 missed = 0; 205 space = 1; 206 207 /* Run through all the handlers updating values. */ 208 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 209 if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < 210 sizeof(ie->ie_fullname)) { 211 strcat(ie->ie_fullname, " "); 212 strcat(ie->ie_fullname, ih->ih_name); 213 space = 0; 214 } else 215 missed++; 216 if (ih->ih_flags & IH_ENTROPY) 217 ie->ie_flags |= IE_ENTROPY; 218 } 219 220 /* 221 * If the handler names were too long, add +'s to indicate missing 222 * names. If we run out of room and still have +'s to add, change 223 * the last character from a + to a *. 224 */ 225 last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2]; 226 while (missed-- > 0) { 227 if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) { 228 if (*last == '+') { 229 *last = '*'; 230 break; 231 } else 232 *last = '+'; 233 } else if (space) { 234 strcat(ie->ie_fullname, " +"); 235 space = 0; 236 } else 237 strcat(ie->ie_fullname, "+"); 238 } 239 240 /* 241 * If this event has an ithread, update it's priority and 242 * name. 243 */ 244 if (ie->ie_thread != NULL) 245 ithread_update(ie->ie_thread); 246 CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname); 247 } 248 249 int 250 intr_event_create(struct intr_event **event, void *source, int flags, int irq, 251 void (*pre_ithread)(void *), void (*post_ithread)(void *), 252 void (*post_filter)(void *), int (*assign_cpu)(void *, u_char), 253 const char *fmt, ...) 254 { 255 struct intr_event *ie; 256 va_list ap; 257 258 /* The only valid flag during creation is IE_SOFT. */ 259 if ((flags & ~IE_SOFT) != 0) 260 return (EINVAL); 261 ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 262 ie->ie_source = source; 263 ie->ie_pre_ithread = pre_ithread; 264 ie->ie_post_ithread = post_ithread; 265 ie->ie_post_filter = post_filter; 266 ie->ie_assign_cpu = assign_cpu; 267 ie->ie_flags = flags; 268 ie->ie_irq = irq; 269 ie->ie_cpu = NOCPU; 270 TAILQ_INIT(&ie->ie_handlers); 271 mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 272 273 va_start(ap, fmt); 274 vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 275 va_end(ap); 276 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 277 mtx_lock(&event_lock); 278 TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 279 mtx_unlock(&event_lock); 280 if (event != NULL) 281 *event = ie; 282 CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 283 return (0); 284 } 285 286 /* 287 * Bind an interrupt event to the specified CPU. Note that not all 288 * platforms support binding an interrupt to a CPU. For those 289 * platforms this request will fail. For supported platforms, any 290 * associated ithreads as well as the primary interrupt context will 291 * be bound to the specificed CPU. Using a cpu id of NOCPU unbinds 292 * the interrupt event. 293 */ 294 int 295 intr_event_bind(struct intr_event *ie, u_char cpu) 296 { 297 lwpid_t id; 298 int error; 299 300 /* Need a CPU to bind to. */ 301 if (cpu != NOCPU && CPU_ABSENT(cpu)) 302 return (EINVAL); 303 304 if (ie->ie_assign_cpu == NULL) 305 return (EOPNOTSUPP); 306 307 error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR); 308 if (error) 309 return (error); 310 311 /* 312 * If we have any ithreads try to set their mask first to verify 313 * permissions, etc. 314 */ 315 mtx_lock(&ie->ie_lock); 316 if (ie->ie_thread != NULL) { 317 id = ie->ie_thread->it_thread->td_tid; 318 mtx_unlock(&ie->ie_lock); 319 error = cpuset_setithread(id, cpu); 320 if (error) 321 return (error); 322 } else 323 mtx_unlock(&ie->ie_lock); 324 error = ie->ie_assign_cpu(ie->ie_source, cpu); 325 if (error) { 326 mtx_lock(&ie->ie_lock); 327 if (ie->ie_thread != NULL) { 328 cpu = ie->ie_cpu; 329 id = ie->ie_thread->it_thread->td_tid; 330 mtx_unlock(&ie->ie_lock); 331 (void)cpuset_setithread(id, cpu); 332 } else 333 mtx_unlock(&ie->ie_lock); 334 return (error); 335 } 336 337 mtx_lock(&ie->ie_lock); 338 ie->ie_cpu = cpu; 339 mtx_unlock(&ie->ie_lock); 340 341 return (error); 342 } 343 344 static struct intr_event * 345 intr_lookup(int irq) 346 { 347 struct intr_event *ie; 348 349 mtx_lock(&event_lock); 350 TAILQ_FOREACH(ie, &event_list, ie_list) 351 if (ie->ie_irq == irq && 352 (ie->ie_flags & IE_SOFT) == 0 && 353 TAILQ_FIRST(&ie->ie_handlers) != NULL) 354 break; 355 mtx_unlock(&event_lock); 356 return (ie); 357 } 358 359 int 360 intr_setaffinity(int irq, void *m) 361 { 362 struct intr_event *ie; 363 cpuset_t *mask; 364 u_char cpu; 365 int n; 366 367 mask = m; 368 cpu = NOCPU; 369 /* 370 * If we're setting all cpus we can unbind. Otherwise make sure 371 * only one cpu is in the set. 372 */ 373 if (CPU_CMP(cpuset_root, mask)) { 374 for (n = 0; n < CPU_SETSIZE; n++) { 375 if (!CPU_ISSET(n, mask)) 376 continue; 377 if (cpu != NOCPU) 378 return (EINVAL); 379 cpu = (u_char)n; 380 } 381 } 382 ie = intr_lookup(irq); 383 if (ie == NULL) 384 return (ESRCH); 385 return (intr_event_bind(ie, cpu)); 386 } 387 388 int 389 intr_getaffinity(int irq, void *m) 390 { 391 struct intr_event *ie; 392 cpuset_t *mask; 393 394 mask = m; 395 ie = intr_lookup(irq); 396 if (ie == NULL) 397 return (ESRCH); 398 CPU_ZERO(mask); 399 mtx_lock(&ie->ie_lock); 400 if (ie->ie_cpu == NOCPU) 401 CPU_COPY(cpuset_root, mask); 402 else 403 CPU_SET(ie->ie_cpu, mask); 404 mtx_unlock(&ie->ie_lock); 405 return (0); 406 } 407 408 int 409 intr_event_destroy(struct intr_event *ie) 410 { 411 412 mtx_lock(&event_lock); 413 mtx_lock(&ie->ie_lock); 414 if (!TAILQ_EMPTY(&ie->ie_handlers)) { 415 mtx_unlock(&ie->ie_lock); 416 mtx_unlock(&event_lock); 417 return (EBUSY); 418 } 419 TAILQ_REMOVE(&event_list, ie, ie_list); 420 #ifndef notyet 421 if (ie->ie_thread != NULL) { 422 ithread_destroy(ie->ie_thread); 423 ie->ie_thread = NULL; 424 } 425 #endif 426 mtx_unlock(&ie->ie_lock); 427 mtx_unlock(&event_lock); 428 mtx_destroy(&ie->ie_lock); 429 free(ie, M_ITHREAD); 430 return (0); 431 } 432 433 #ifndef INTR_FILTER 434 static struct intr_thread * 435 ithread_create(const char *name) 436 { 437 struct intr_thread *ithd; 438 struct thread *td; 439 int error; 440 441 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 442 443 error = kproc_kthread_add(ithread_loop, ithd, &intrproc, 444 &td, RFSTOPPED | RFHIGHPID, 445 0, "intr", "%s", name); 446 if (error) 447 panic("kproc_create() failed with %d", error); 448 thread_lock(td); 449 sched_class(td, PRI_ITHD); 450 TD_SET_IWAIT(td); 451 thread_unlock(td); 452 td->td_pflags |= TDP_ITHREAD; 453 ithd->it_thread = td; 454 CTR2(KTR_INTR, "%s: created %s", __func__, name); 455 return (ithd); 456 } 457 #else 458 static struct intr_thread * 459 ithread_create(const char *name, struct intr_handler *ih) 460 { 461 struct intr_thread *ithd; 462 struct thread *td; 463 int error; 464 465 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 466 467 error = kproc_kthread_add(ithread_loop, ih, &intrproc, 468 &td, RFSTOPPED | RFHIGHPID, 469 0, "intr", "%s", name); 470 if (error) 471 panic("kproc_create() failed with %d", error); 472 thread_lock(td); 473 sched_class(td, PRI_ITHD); 474 TD_SET_IWAIT(td); 475 thread_unlock(td); 476 td->td_pflags |= TDP_ITHREAD; 477 ithd->it_thread = td; 478 CTR2(KTR_INTR, "%s: created %s", __func__, name); 479 return (ithd); 480 } 481 #endif 482 483 static void 484 ithread_destroy(struct intr_thread *ithread) 485 { 486 struct thread *td; 487 488 CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name); 489 td = ithread->it_thread; 490 thread_lock(td); 491 ithread->it_flags |= IT_DEAD; 492 if (TD_AWAITING_INTR(td)) { 493 TD_CLR_IWAIT(td); 494 sched_add(td, SRQ_INTR); 495 } 496 thread_unlock(td); 497 } 498 499 #ifndef INTR_FILTER 500 int 501 intr_event_add_handler(struct intr_event *ie, const char *name, 502 driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 503 enum intr_type flags, void **cookiep) 504 { 505 struct intr_handler *ih, *temp_ih; 506 struct intr_thread *it; 507 508 if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 509 return (EINVAL); 510 511 /* Allocate and populate an interrupt handler structure. */ 512 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 513 ih->ih_filter = filter; 514 ih->ih_handler = handler; 515 ih->ih_argument = arg; 516 strlcpy(ih->ih_name, name, sizeof(ih->ih_name)); 517 ih->ih_event = ie; 518 ih->ih_pri = pri; 519 if (flags & INTR_EXCL) 520 ih->ih_flags = IH_EXCLUSIVE; 521 if (flags & INTR_MPSAFE) 522 ih->ih_flags |= IH_MPSAFE; 523 if (flags & INTR_ENTROPY) 524 ih->ih_flags |= IH_ENTROPY; 525 526 /* We can only have one exclusive handler in a event. */ 527 mtx_lock(&ie->ie_lock); 528 if (!TAILQ_EMPTY(&ie->ie_handlers)) { 529 if ((flags & INTR_EXCL) || 530 (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 531 mtx_unlock(&ie->ie_lock); 532 free(ih, M_ITHREAD); 533 return (EINVAL); 534 } 535 } 536 537 /* Create a thread if we need one. */ 538 while (ie->ie_thread == NULL && handler != NULL) { 539 if (ie->ie_flags & IE_ADDING_THREAD) 540 msleep(ie, &ie->ie_lock, 0, "ithread", 0); 541 else { 542 ie->ie_flags |= IE_ADDING_THREAD; 543 mtx_unlock(&ie->ie_lock); 544 it = ithread_create("intr: newborn"); 545 mtx_lock(&ie->ie_lock); 546 ie->ie_flags &= ~IE_ADDING_THREAD; 547 ie->ie_thread = it; 548 it->it_event = ie; 549 ithread_update(it); 550 wakeup(ie); 551 } 552 } 553 554 /* Add the new handler to the event in priority order. */ 555 TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 556 if (temp_ih->ih_pri > ih->ih_pri) 557 break; 558 } 559 if (temp_ih == NULL) 560 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 561 else 562 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 563 intr_event_update(ie); 564 565 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 566 ie->ie_name); 567 mtx_unlock(&ie->ie_lock); 568 569 if (cookiep != NULL) 570 *cookiep = ih; 571 return (0); 572 } 573 #else 574 int 575 intr_event_add_handler(struct intr_event *ie, const char *name, 576 driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 577 enum intr_type flags, void **cookiep) 578 { 579 struct intr_handler *ih, *temp_ih; 580 struct intr_thread *it; 581 582 if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 583 return (EINVAL); 584 585 /* Allocate and populate an interrupt handler structure. */ 586 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 587 ih->ih_filter = filter; 588 ih->ih_handler = handler; 589 ih->ih_argument = arg; 590 strlcpy(ih->ih_name, name, sizeof(ih->ih_name)); 591 ih->ih_event = ie; 592 ih->ih_pri = pri; 593 if (flags & INTR_EXCL) 594 ih->ih_flags = IH_EXCLUSIVE; 595 if (flags & INTR_MPSAFE) 596 ih->ih_flags |= IH_MPSAFE; 597 if (flags & INTR_ENTROPY) 598 ih->ih_flags |= IH_ENTROPY; 599 600 /* We can only have one exclusive handler in a event. */ 601 mtx_lock(&ie->ie_lock); 602 if (!TAILQ_EMPTY(&ie->ie_handlers)) { 603 if ((flags & INTR_EXCL) || 604 (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 605 mtx_unlock(&ie->ie_lock); 606 free(ih, M_ITHREAD); 607 return (EINVAL); 608 } 609 } 610 611 /* For filtered handlers, create a private ithread to run on. */ 612 if (filter != NULL && handler != NULL) { 613 mtx_unlock(&ie->ie_lock); 614 it = ithread_create("intr: newborn", ih); 615 mtx_lock(&ie->ie_lock); 616 it->it_event = ie; 617 ih->ih_thread = it; 618 ithread_update(it); /* XXX - do we really need this?!?!? */ 619 } else { /* Create the global per-event thread if we need one. */ 620 while (ie->ie_thread == NULL && handler != NULL) { 621 if (ie->ie_flags & IE_ADDING_THREAD) 622 msleep(ie, &ie->ie_lock, 0, "ithread", 0); 623 else { 624 ie->ie_flags |= IE_ADDING_THREAD; 625 mtx_unlock(&ie->ie_lock); 626 it = ithread_create("intr: newborn", ih); 627 mtx_lock(&ie->ie_lock); 628 ie->ie_flags &= ~IE_ADDING_THREAD; 629 ie->ie_thread = it; 630 it->it_event = ie; 631 ithread_update(it); 632 wakeup(ie); 633 } 634 } 635 } 636 637 /* Add the new handler to the event in priority order. */ 638 TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 639 if (temp_ih->ih_pri > ih->ih_pri) 640 break; 641 } 642 if (temp_ih == NULL) 643 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 644 else 645 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 646 intr_event_update(ie); 647 648 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 649 ie->ie_name); 650 mtx_unlock(&ie->ie_lock); 651 652 if (cookiep != NULL) 653 *cookiep = ih; 654 return (0); 655 } 656 #endif 657 658 /* 659 * Append a description preceded by a ':' to the name of the specified 660 * interrupt handler. 661 */ 662 int 663 intr_event_describe_handler(struct intr_event *ie, void *cookie, 664 const char *descr) 665 { 666 struct intr_handler *ih; 667 size_t space; 668 char *start; 669 670 mtx_lock(&ie->ie_lock); 671 #ifdef INVARIANTS 672 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 673 if (ih == cookie) 674 break; 675 } 676 if (ih == NULL) { 677 mtx_unlock(&ie->ie_lock); 678 panic("handler %p not found in interrupt event %p", cookie, ie); 679 } 680 #endif 681 ih = cookie; 682 683 /* 684 * Look for an existing description by checking for an 685 * existing ":". This assumes device names do not include 686 * colons. If one is found, prepare to insert the new 687 * description at that point. If one is not found, find the 688 * end of the name to use as the insertion point. 689 */ 690 start = strchr(ih->ih_name, ':'); 691 if (start == NULL) 692 start = strchr(ih->ih_name, 0); 693 694 /* 695 * See if there is enough remaining room in the string for the 696 * description + ":". The "- 1" leaves room for the trailing 697 * '\0'. The "+ 1" accounts for the colon. 698 */ 699 space = sizeof(ih->ih_name) - (start - ih->ih_name) - 1; 700 if (strlen(descr) + 1 > space) { 701 mtx_unlock(&ie->ie_lock); 702 return (ENOSPC); 703 } 704 705 /* Append a colon followed by the description. */ 706 *start = ':'; 707 strcpy(start + 1, descr); 708 intr_event_update(ie); 709 mtx_unlock(&ie->ie_lock); 710 return (0); 711 } 712 713 /* 714 * Return the ie_source field from the intr_event an intr_handler is 715 * associated with. 716 */ 717 void * 718 intr_handler_source(void *cookie) 719 { 720 struct intr_handler *ih; 721 struct intr_event *ie; 722 723 ih = (struct intr_handler *)cookie; 724 if (ih == NULL) 725 return (NULL); 726 ie = ih->ih_event; 727 KASSERT(ie != NULL, 728 ("interrupt handler \"%s\" has a NULL interrupt event", 729 ih->ih_name)); 730 return (ie->ie_source); 731 } 732 733 /* 734 * Sleep until an ithread finishes executing an interrupt handler. 735 * 736 * XXX Doesn't currently handle interrupt filters or fast interrupt 737 * handlers. This is intended for compatibility with linux drivers 738 * only. Do not use in BSD code. 739 */ 740 void 741 _intr_drain(int irq) 742 { 743 struct intr_event *ie; 744 struct intr_thread *ithd; 745 struct thread *td; 746 747 ie = intr_lookup(irq); 748 if (ie == NULL) 749 return; 750 if (ie->ie_thread == NULL) 751 return; 752 ithd = ie->ie_thread; 753 td = ithd->it_thread; 754 /* 755 * We set the flag and wait for it to be cleared to avoid 756 * long delays with potentially busy interrupt handlers 757 * were we to only sample TD_AWAITING_INTR() every tick. 758 */ 759 thread_lock(td); 760 if (!TD_AWAITING_INTR(td)) { 761 ithd->it_flags |= IT_WAIT; 762 while (ithd->it_flags & IT_WAIT) { 763 thread_unlock(td); 764 pause("idrain", 1); 765 thread_lock(td); 766 } 767 } 768 thread_unlock(td); 769 return; 770 } 771 772 773 #ifndef INTR_FILTER 774 int 775 intr_event_remove_handler(void *cookie) 776 { 777 struct intr_handler *handler = (struct intr_handler *)cookie; 778 struct intr_event *ie; 779 #ifdef INVARIANTS 780 struct intr_handler *ih; 781 #endif 782 #ifdef notyet 783 int dead; 784 #endif 785 786 if (handler == NULL) 787 return (EINVAL); 788 ie = handler->ih_event; 789 KASSERT(ie != NULL, 790 ("interrupt handler \"%s\" has a NULL interrupt event", 791 handler->ih_name)); 792 mtx_lock(&ie->ie_lock); 793 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 794 ie->ie_name); 795 #ifdef INVARIANTS 796 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 797 if (ih == handler) 798 goto ok; 799 mtx_unlock(&ie->ie_lock); 800 panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 801 ih->ih_name, ie->ie_name); 802 ok: 803 #endif 804 /* 805 * If there is no ithread, then just remove the handler and return. 806 * XXX: Note that an INTR_FAST handler might be running on another 807 * CPU! 808 */ 809 if (ie->ie_thread == NULL) { 810 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 811 mtx_unlock(&ie->ie_lock); 812 free(handler, M_ITHREAD); 813 return (0); 814 } 815 816 /* 817 * If the interrupt thread is already running, then just mark this 818 * handler as being dead and let the ithread do the actual removal. 819 * 820 * During a cold boot while cold is set, msleep() does not sleep, 821 * so we have to remove the handler here rather than letting the 822 * thread do it. 823 */ 824 thread_lock(ie->ie_thread->it_thread); 825 if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) { 826 handler->ih_flags |= IH_DEAD; 827 828 /* 829 * Ensure that the thread will process the handler list 830 * again and remove this handler if it has already passed 831 * it on the list. 832 */ 833 atomic_store_rel_int(&ie->ie_thread->it_need, 1); 834 } else 835 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 836 thread_unlock(ie->ie_thread->it_thread); 837 while (handler->ih_flags & IH_DEAD) 838 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 839 intr_event_update(ie); 840 #ifdef notyet 841 /* 842 * XXX: This could be bad in the case of ppbus(8). Also, I think 843 * this could lead to races of stale data when servicing an 844 * interrupt. 845 */ 846 dead = 1; 847 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 848 if (!(ih->ih_flags & IH_FAST)) { 849 dead = 0; 850 break; 851 } 852 } 853 if (dead) { 854 ithread_destroy(ie->ie_thread); 855 ie->ie_thread = NULL; 856 } 857 #endif 858 mtx_unlock(&ie->ie_lock); 859 free(handler, M_ITHREAD); 860 return (0); 861 } 862 863 static int 864 intr_event_schedule_thread(struct intr_event *ie) 865 { 866 struct intr_entropy entropy; 867 struct intr_thread *it; 868 struct thread *td; 869 struct thread *ctd; 870 struct proc *p; 871 872 /* 873 * If no ithread or no handlers, then we have a stray interrupt. 874 */ 875 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || 876 ie->ie_thread == NULL) 877 return (EINVAL); 878 879 ctd = curthread; 880 it = ie->ie_thread; 881 td = it->it_thread; 882 p = td->td_proc; 883 884 /* 885 * If any of the handlers for this ithread claim to be good 886 * sources of entropy, then gather some. 887 */ 888 if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 889 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 890 p->p_pid, td->td_name); 891 entropy.event = (uintptr_t)ie; 892 entropy.td = ctd; 893 random_harvest(&entropy, sizeof(entropy), 2, 894 RANDOM_INTERRUPT); 895 } 896 897 KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 898 899 /* 900 * Set it_need to tell the thread to keep running if it is already 901 * running. Then, lock the thread and see if we actually need to 902 * put it on the runqueue. 903 */ 904 atomic_store_rel_int(&it->it_need, 1); 905 thread_lock(td); 906 if (TD_AWAITING_INTR(td)) { 907 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 908 td->td_name); 909 TD_CLR_IWAIT(td); 910 sched_add(td, SRQ_INTR); 911 } else { 912 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 913 __func__, p->p_pid, td->td_name, it->it_need, td->td_state); 914 } 915 thread_unlock(td); 916 917 return (0); 918 } 919 #else 920 int 921 intr_event_remove_handler(void *cookie) 922 { 923 struct intr_handler *handler = (struct intr_handler *)cookie; 924 struct intr_event *ie; 925 struct intr_thread *it; 926 #ifdef INVARIANTS 927 struct intr_handler *ih; 928 #endif 929 #ifdef notyet 930 int dead; 931 #endif 932 933 if (handler == NULL) 934 return (EINVAL); 935 ie = handler->ih_event; 936 KASSERT(ie != NULL, 937 ("interrupt handler \"%s\" has a NULL interrupt event", 938 handler->ih_name)); 939 mtx_lock(&ie->ie_lock); 940 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 941 ie->ie_name); 942 #ifdef INVARIANTS 943 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 944 if (ih == handler) 945 goto ok; 946 mtx_unlock(&ie->ie_lock); 947 panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 948 ih->ih_name, ie->ie_name); 949 ok: 950 #endif 951 /* 952 * If there are no ithreads (per event and per handler), then 953 * just remove the handler and return. 954 * XXX: Note that an INTR_FAST handler might be running on another CPU! 955 */ 956 if (ie->ie_thread == NULL && handler->ih_thread == NULL) { 957 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 958 mtx_unlock(&ie->ie_lock); 959 free(handler, M_ITHREAD); 960 return (0); 961 } 962 963 /* Private or global ithread? */ 964 it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread; 965 /* 966 * If the interrupt thread is already running, then just mark this 967 * handler as being dead and let the ithread do the actual removal. 968 * 969 * During a cold boot while cold is set, msleep() does not sleep, 970 * so we have to remove the handler here rather than letting the 971 * thread do it. 972 */ 973 thread_lock(it->it_thread); 974 if (!TD_AWAITING_INTR(it->it_thread) && !cold) { 975 handler->ih_flags |= IH_DEAD; 976 977 /* 978 * Ensure that the thread will process the handler list 979 * again and remove this handler if it has already passed 980 * it on the list. 981 */ 982 atomic_store_rel_int(&it->it_need, 1); 983 } else 984 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 985 thread_unlock(it->it_thread); 986 while (handler->ih_flags & IH_DEAD) 987 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 988 /* 989 * At this point, the handler has been disconnected from the event, 990 * so we can kill the private ithread if any. 991 */ 992 if (handler->ih_thread) { 993 ithread_destroy(handler->ih_thread); 994 handler->ih_thread = NULL; 995 } 996 intr_event_update(ie); 997 #ifdef notyet 998 /* 999 * XXX: This could be bad in the case of ppbus(8). Also, I think 1000 * this could lead to races of stale data when servicing an 1001 * interrupt. 1002 */ 1003 dead = 1; 1004 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1005 if (handler != NULL) { 1006 dead = 0; 1007 break; 1008 } 1009 } 1010 if (dead) { 1011 ithread_destroy(ie->ie_thread); 1012 ie->ie_thread = NULL; 1013 } 1014 #endif 1015 mtx_unlock(&ie->ie_lock); 1016 free(handler, M_ITHREAD); 1017 return (0); 1018 } 1019 1020 static int 1021 intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it) 1022 { 1023 struct intr_entropy entropy; 1024 struct thread *td; 1025 struct thread *ctd; 1026 struct proc *p; 1027 1028 /* 1029 * If no ithread or no handlers, then we have a stray interrupt. 1030 */ 1031 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL) 1032 return (EINVAL); 1033 1034 ctd = curthread; 1035 td = it->it_thread; 1036 p = td->td_proc; 1037 1038 /* 1039 * If any of the handlers for this ithread claim to be good 1040 * sources of entropy, then gather some. 1041 */ 1042 if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 1043 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 1044 p->p_pid, td->td_name); 1045 entropy.event = (uintptr_t)ie; 1046 entropy.td = ctd; 1047 random_harvest(&entropy, sizeof(entropy), 2, 1048 RANDOM_INTERRUPT); 1049 } 1050 1051 KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 1052 1053 /* 1054 * Set it_need to tell the thread to keep running if it is already 1055 * running. Then, lock the thread and see if we actually need to 1056 * put it on the runqueue. 1057 */ 1058 atomic_store_rel_int(&it->it_need, 1); 1059 thread_lock(td); 1060 if (TD_AWAITING_INTR(td)) { 1061 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 1062 td->td_name); 1063 TD_CLR_IWAIT(td); 1064 sched_add(td, SRQ_INTR); 1065 } else { 1066 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 1067 __func__, p->p_pid, td->td_name, it->it_need, td->td_state); 1068 } 1069 thread_unlock(td); 1070 1071 return (0); 1072 } 1073 #endif 1074 1075 /* 1076 * Allow interrupt event binding for software interrupt handlers -- a no-op, 1077 * since interrupts are generated in software rather than being directed by 1078 * a PIC. 1079 */ 1080 static int 1081 swi_assign_cpu(void *arg, u_char cpu) 1082 { 1083 1084 return (0); 1085 } 1086 1087 /* 1088 * Add a software interrupt handler to a specified event. If a given event 1089 * is not specified, then a new event is created. 1090 */ 1091 int 1092 swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, 1093 void *arg, int pri, enum intr_type flags, void **cookiep) 1094 { 1095 struct intr_event *ie; 1096 int error; 1097 1098 if (flags & INTR_ENTROPY) 1099 return (EINVAL); 1100 1101 ie = (eventp != NULL) ? *eventp : NULL; 1102 1103 if (ie != NULL) { 1104 if (!(ie->ie_flags & IE_SOFT)) 1105 return (EINVAL); 1106 } else { 1107 error = intr_event_create(&ie, NULL, IE_SOFT, 0, 1108 NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri); 1109 if (error) 1110 return (error); 1111 if (eventp != NULL) 1112 *eventp = ie; 1113 } 1114 error = intr_event_add_handler(ie, name, NULL, handler, arg, 1115 PI_SWI(pri), flags, cookiep); 1116 return (error); 1117 } 1118 1119 /* 1120 * Schedule a software interrupt thread. 1121 */ 1122 void 1123 swi_sched(void *cookie, int flags) 1124 { 1125 struct intr_handler *ih = (struct intr_handler *)cookie; 1126 struct intr_event *ie = ih->ih_event; 1127 struct intr_entropy entropy; 1128 int error; 1129 1130 CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, 1131 ih->ih_need); 1132 1133 if (harvest.swi) { 1134 CTR2(KTR_INTR, "swi_sched: pid %d (%s) gathering entropy", 1135 curproc->p_pid, curthread->td_name); 1136 entropy.event = (uintptr_t)ih; 1137 entropy.td = curthread; 1138 random_harvest(&entropy, sizeof(entropy), 1, 1139 RANDOM_SWI); 1140 } 1141 1142 /* 1143 * Set ih_need for this handler so that if the ithread is already 1144 * running it will execute this handler on the next pass. Otherwise, 1145 * it will execute it the next time it runs. 1146 */ 1147 atomic_store_rel_int(&ih->ih_need, 1); 1148 1149 if (!(flags & SWI_DELAY)) { 1150 PCPU_INC(cnt.v_soft); 1151 #ifdef INTR_FILTER 1152 error = intr_event_schedule_thread(ie, ie->ie_thread); 1153 #else 1154 error = intr_event_schedule_thread(ie); 1155 #endif 1156 KASSERT(error == 0, ("stray software interrupt")); 1157 } 1158 } 1159 1160 /* 1161 * Remove a software interrupt handler. Currently this code does not 1162 * remove the associated interrupt event if it becomes empty. Calling code 1163 * may do so manually via intr_event_destroy(), but that's not really 1164 * an optimal interface. 1165 */ 1166 int 1167 swi_remove(void *cookie) 1168 { 1169 1170 return (intr_event_remove_handler(cookie)); 1171 } 1172 1173 #ifdef INTR_FILTER 1174 static void 1175 priv_ithread_execute_handler(struct proc *p, struct intr_handler *ih) 1176 { 1177 struct intr_event *ie; 1178 1179 ie = ih->ih_event; 1180 /* 1181 * If this handler is marked for death, remove it from 1182 * the list of handlers and wake up the sleeper. 1183 */ 1184 if (ih->ih_flags & IH_DEAD) { 1185 mtx_lock(&ie->ie_lock); 1186 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 1187 ih->ih_flags &= ~IH_DEAD; 1188 wakeup(ih); 1189 mtx_unlock(&ie->ie_lock); 1190 return; 1191 } 1192 1193 /* Execute this handler. */ 1194 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1195 __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument, 1196 ih->ih_name, ih->ih_flags); 1197 1198 if (!(ih->ih_flags & IH_MPSAFE)) 1199 mtx_lock(&Giant); 1200 ih->ih_handler(ih->ih_argument); 1201 if (!(ih->ih_flags & IH_MPSAFE)) 1202 mtx_unlock(&Giant); 1203 } 1204 #endif 1205 1206 /* 1207 * This is a public function for use by drivers that mux interrupt 1208 * handlers for child devices from their interrupt handler. 1209 */ 1210 void 1211 intr_event_execute_handlers(struct proc *p, struct intr_event *ie) 1212 { 1213 struct intr_handler *ih, *ihn; 1214 1215 TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) { 1216 /* 1217 * If this handler is marked for death, remove it from 1218 * the list of handlers and wake up the sleeper. 1219 */ 1220 if (ih->ih_flags & IH_DEAD) { 1221 mtx_lock(&ie->ie_lock); 1222 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 1223 ih->ih_flags &= ~IH_DEAD; 1224 wakeup(ih); 1225 mtx_unlock(&ie->ie_lock); 1226 continue; 1227 } 1228 1229 /* Skip filter only handlers */ 1230 if (ih->ih_handler == NULL) 1231 continue; 1232 1233 /* 1234 * For software interrupt threads, we only execute 1235 * handlers that have their need flag set. Hardware 1236 * interrupt threads always invoke all of their handlers. 1237 */ 1238 if (ie->ie_flags & IE_SOFT) { 1239 if (atomic_load_acq_int(&ih->ih_need) == 0) 1240 continue; 1241 else 1242 atomic_store_rel_int(&ih->ih_need, 0); 1243 } 1244 1245 /* Execute this handler. */ 1246 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1247 __func__, p->p_pid, (void *)ih->ih_handler, 1248 ih->ih_argument, ih->ih_name, ih->ih_flags); 1249 1250 if (!(ih->ih_flags & IH_MPSAFE)) 1251 mtx_lock(&Giant); 1252 ih->ih_handler(ih->ih_argument); 1253 if (!(ih->ih_flags & IH_MPSAFE)) 1254 mtx_unlock(&Giant); 1255 } 1256 } 1257 1258 static void 1259 ithread_execute_handlers(struct proc *p, struct intr_event *ie) 1260 { 1261 1262 /* Interrupt handlers should not sleep. */ 1263 if (!(ie->ie_flags & IE_SOFT)) 1264 THREAD_NO_SLEEPING(); 1265 intr_event_execute_handlers(p, ie); 1266 if (!(ie->ie_flags & IE_SOFT)) 1267 THREAD_SLEEPING_OK(); 1268 1269 /* 1270 * Interrupt storm handling: 1271 * 1272 * If this interrupt source is currently storming, then throttle 1273 * it to only fire the handler once per clock tick. 1274 * 1275 * If this interrupt source is not currently storming, but the 1276 * number of back to back interrupts exceeds the storm threshold, 1277 * then enter storming mode. 1278 */ 1279 if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold && 1280 !(ie->ie_flags & IE_SOFT)) { 1281 /* Report the message only once every second. */ 1282 if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) { 1283 printf( 1284 "interrupt storm detected on \"%s\"; throttling interrupt source\n", 1285 ie->ie_name); 1286 } 1287 pause("istorm", 1); 1288 } else 1289 ie->ie_count++; 1290 1291 /* 1292 * Now that all the handlers have had a chance to run, reenable 1293 * the interrupt source. 1294 */ 1295 if (ie->ie_post_ithread != NULL) 1296 ie->ie_post_ithread(ie->ie_source); 1297 } 1298 1299 #ifndef INTR_FILTER 1300 /* 1301 * This is the main code for interrupt threads. 1302 */ 1303 static void 1304 ithread_loop(void *arg) 1305 { 1306 struct intr_thread *ithd; 1307 struct intr_event *ie; 1308 struct thread *td; 1309 struct proc *p; 1310 int wake; 1311 1312 td = curthread; 1313 p = td->td_proc; 1314 ithd = (struct intr_thread *)arg; 1315 KASSERT(ithd->it_thread == td, 1316 ("%s: ithread and proc linkage out of sync", __func__)); 1317 ie = ithd->it_event; 1318 ie->ie_count = 0; 1319 wake = 0; 1320 1321 /* 1322 * As long as we have interrupts outstanding, go through the 1323 * list of handlers, giving each one a go at it. 1324 */ 1325 for (;;) { 1326 /* 1327 * If we are an orphaned thread, then just die. 1328 */ 1329 if (ithd->it_flags & IT_DEAD) { 1330 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 1331 p->p_pid, td->td_name); 1332 free(ithd, M_ITHREAD); 1333 kthread_exit(); 1334 } 1335 1336 /* 1337 * Service interrupts. If another interrupt arrives while 1338 * we are running, it will set it_need to note that we 1339 * should make another pass. 1340 */ 1341 while (atomic_load_acq_int(&ithd->it_need) != 0) { 1342 /* 1343 * This might need a full read and write barrier 1344 * to make sure that this write posts before any 1345 * of the memory or device accesses in the 1346 * handlers. 1347 */ 1348 atomic_store_rel_int(&ithd->it_need, 0); 1349 ithread_execute_handlers(p, ie); 1350 } 1351 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1352 mtx_assert(&Giant, MA_NOTOWNED); 1353 1354 /* 1355 * Processed all our interrupts. Now get the sched 1356 * lock. This may take a while and it_need may get 1357 * set again, so we have to check it again. 1358 */ 1359 thread_lock(td); 1360 if ((atomic_load_acq_int(&ithd->it_need) == 0) && 1361 !(ithd->it_flags & (IT_DEAD | IT_WAIT))) { 1362 TD_SET_IWAIT(td); 1363 ie->ie_count = 0; 1364 mi_switch(SW_VOL | SWT_IWAIT, NULL); 1365 } 1366 if (ithd->it_flags & IT_WAIT) { 1367 wake = 1; 1368 ithd->it_flags &= ~IT_WAIT; 1369 } 1370 thread_unlock(td); 1371 if (wake) { 1372 wakeup(ithd); 1373 wake = 0; 1374 } 1375 } 1376 } 1377 1378 /* 1379 * Main interrupt handling body. 1380 * 1381 * Input: 1382 * o ie: the event connected to this interrupt. 1383 * o frame: some archs (i.e. i386) pass a frame to some. 1384 * handlers as their main argument. 1385 * Return value: 1386 * o 0: everything ok. 1387 * o EINVAL: stray interrupt. 1388 */ 1389 int 1390 intr_event_handle(struct intr_event *ie, struct trapframe *frame) 1391 { 1392 struct intr_handler *ih; 1393 struct trapframe *oldframe; 1394 struct thread *td; 1395 int error, ret, thread; 1396 1397 td = curthread; 1398 1399 /* An interrupt with no event or handlers is a stray interrupt. */ 1400 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) 1401 return (EINVAL); 1402 1403 /* 1404 * Execute fast interrupt handlers directly. 1405 * To support clock handlers, if a handler registers 1406 * with a NULL argument, then we pass it a pointer to 1407 * a trapframe as its argument. 1408 */ 1409 td->td_intr_nesting_level++; 1410 thread = 0; 1411 ret = 0; 1412 critical_enter(); 1413 oldframe = td->td_intr_frame; 1414 td->td_intr_frame = frame; 1415 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1416 if (ih->ih_filter == NULL) { 1417 thread = 1; 1418 continue; 1419 } 1420 CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__, 1421 ih->ih_filter, ih->ih_argument == NULL ? frame : 1422 ih->ih_argument, ih->ih_name); 1423 if (ih->ih_argument == NULL) 1424 ret = ih->ih_filter(frame); 1425 else 1426 ret = ih->ih_filter(ih->ih_argument); 1427 KASSERT(ret == FILTER_STRAY || 1428 ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 && 1429 (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0), 1430 ("%s: incorrect return value %#x from %s", __func__, ret, 1431 ih->ih_name)); 1432 1433 /* 1434 * Wrapper handler special handling: 1435 * 1436 * in some particular cases (like pccard and pccbb), 1437 * the _real_ device handler is wrapped in a couple of 1438 * functions - a filter wrapper and an ithread wrapper. 1439 * In this case (and just in this case), the filter wrapper 1440 * could ask the system to schedule the ithread and mask 1441 * the interrupt source if the wrapped handler is composed 1442 * of just an ithread handler. 1443 * 1444 * TODO: write a generic wrapper to avoid people rolling 1445 * their own 1446 */ 1447 if (!thread) { 1448 if (ret == FILTER_SCHEDULE_THREAD) 1449 thread = 1; 1450 } 1451 } 1452 td->td_intr_frame = oldframe; 1453 1454 if (thread) { 1455 if (ie->ie_pre_ithread != NULL) 1456 ie->ie_pre_ithread(ie->ie_source); 1457 } else { 1458 if (ie->ie_post_filter != NULL) 1459 ie->ie_post_filter(ie->ie_source); 1460 } 1461 1462 /* Schedule the ithread if needed. */ 1463 if (thread) { 1464 error = intr_event_schedule_thread(ie); 1465 #ifndef XEN 1466 KASSERT(error == 0, ("bad stray interrupt")); 1467 #else 1468 if (error != 0) 1469 log(LOG_WARNING, "bad stray interrupt"); 1470 #endif 1471 } 1472 critical_exit(); 1473 td->td_intr_nesting_level--; 1474 return (0); 1475 } 1476 #else 1477 /* 1478 * This is the main code for interrupt threads. 1479 */ 1480 static void 1481 ithread_loop(void *arg) 1482 { 1483 struct intr_thread *ithd; 1484 struct intr_handler *ih; 1485 struct intr_event *ie; 1486 struct thread *td; 1487 struct proc *p; 1488 int priv; 1489 int wake; 1490 1491 td = curthread; 1492 p = td->td_proc; 1493 ih = (struct intr_handler *)arg; 1494 priv = (ih->ih_thread != NULL) ? 1 : 0; 1495 ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread; 1496 KASSERT(ithd->it_thread == td, 1497 ("%s: ithread and proc linkage out of sync", __func__)); 1498 ie = ithd->it_event; 1499 ie->ie_count = 0; 1500 wake = 0; 1501 1502 /* 1503 * As long as we have interrupts outstanding, go through the 1504 * list of handlers, giving each one a go at it. 1505 */ 1506 for (;;) { 1507 /* 1508 * If we are an orphaned thread, then just die. 1509 */ 1510 if (ithd->it_flags & IT_DEAD) { 1511 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 1512 p->p_pid, td->td_name); 1513 free(ithd, M_ITHREAD); 1514 kthread_exit(); 1515 } 1516 1517 /* 1518 * Service interrupts. If another interrupt arrives while 1519 * we are running, it will set it_need to note that we 1520 * should make another pass. 1521 */ 1522 while (atomic_load_acq_int(&ithd->it_need) != 0) { 1523 /* 1524 * This might need a full read and write barrier 1525 * to make sure that this write posts before any 1526 * of the memory or device accesses in the 1527 * handlers. 1528 */ 1529 atomic_store_rel_int(&ithd->it_need, 0); 1530 if (priv) 1531 priv_ithread_execute_handler(p, ih); 1532 else 1533 ithread_execute_handlers(p, ie); 1534 } 1535 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1536 mtx_assert(&Giant, MA_NOTOWNED); 1537 1538 /* 1539 * Processed all our interrupts. Now get the sched 1540 * lock. This may take a while and it_need may get 1541 * set again, so we have to check it again. 1542 */ 1543 thread_lock(td); 1544 if ((atomic_load_acq_int(&ithd->it_need) == 0) && 1545 !(ithd->it_flags & (IT_DEAD | IT_WAIT))) { 1546 TD_SET_IWAIT(td); 1547 ie->ie_count = 0; 1548 mi_switch(SW_VOL | SWT_IWAIT, NULL); 1549 } 1550 if (ithd->it_flags & IT_WAIT) { 1551 wake = 1; 1552 ithd->it_flags &= ~IT_WAIT; 1553 } 1554 thread_unlock(td); 1555 if (wake) { 1556 wakeup(ithd); 1557 wake = 0; 1558 } 1559 } 1560 } 1561 1562 /* 1563 * Main loop for interrupt filter. 1564 * 1565 * Some architectures (i386, amd64 and arm) require the optional frame 1566 * parameter, and use it as the main argument for fast handler execution 1567 * when ih_argument == NULL. 1568 * 1569 * Return value: 1570 * o FILTER_STRAY: No filter recognized the event, and no 1571 * filter-less handler is registered on this 1572 * line. 1573 * o FILTER_HANDLED: A filter claimed the event and served it. 1574 * o FILTER_SCHEDULE_THREAD: No filter claimed the event, but there's at 1575 * least one filter-less handler on this line. 1576 * o FILTER_HANDLED | 1577 * FILTER_SCHEDULE_THREAD: A filter claimed the event, and asked for 1578 * scheduling the per-handler ithread. 1579 * 1580 * In case an ithread has to be scheduled, in *ithd there will be a 1581 * pointer to a struct intr_thread containing the thread to be 1582 * scheduled. 1583 */ 1584 1585 static int 1586 intr_filter_loop(struct intr_event *ie, struct trapframe *frame, 1587 struct intr_thread **ithd) 1588 { 1589 struct intr_handler *ih; 1590 void *arg; 1591 int ret, thread_only; 1592 1593 ret = 0; 1594 thread_only = 0; 1595 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1596 /* 1597 * Execute fast interrupt handlers directly. 1598 * To support clock handlers, if a handler registers 1599 * with a NULL argument, then we pass it a pointer to 1600 * a trapframe as its argument. 1601 */ 1602 arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument); 1603 1604 CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__, 1605 ih->ih_filter, ih->ih_handler, arg, ih->ih_name); 1606 1607 if (ih->ih_filter != NULL) 1608 ret = ih->ih_filter(arg); 1609 else { 1610 thread_only = 1; 1611 continue; 1612 } 1613 KASSERT(ret == FILTER_STRAY || 1614 ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 && 1615 (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0), 1616 ("%s: incorrect return value %#x from %s", __func__, ret, 1617 ih->ih_name)); 1618 if (ret & FILTER_STRAY) 1619 continue; 1620 else { 1621 *ithd = ih->ih_thread; 1622 return (ret); 1623 } 1624 } 1625 1626 /* 1627 * No filters handled the interrupt and we have at least 1628 * one handler without a filter. In this case, we schedule 1629 * all of the filter-less handlers to run in the ithread. 1630 */ 1631 if (thread_only) { 1632 *ithd = ie->ie_thread; 1633 return (FILTER_SCHEDULE_THREAD); 1634 } 1635 return (FILTER_STRAY); 1636 } 1637 1638 /* 1639 * Main interrupt handling body. 1640 * 1641 * Input: 1642 * o ie: the event connected to this interrupt. 1643 * o frame: some archs (i.e. i386) pass a frame to some. 1644 * handlers as their main argument. 1645 * Return value: 1646 * o 0: everything ok. 1647 * o EINVAL: stray interrupt. 1648 */ 1649 int 1650 intr_event_handle(struct intr_event *ie, struct trapframe *frame) 1651 { 1652 struct intr_thread *ithd; 1653 struct trapframe *oldframe; 1654 struct thread *td; 1655 int thread; 1656 1657 ithd = NULL; 1658 td = curthread; 1659 1660 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) 1661 return (EINVAL); 1662 1663 td->td_intr_nesting_level++; 1664 thread = 0; 1665 critical_enter(); 1666 oldframe = td->td_intr_frame; 1667 td->td_intr_frame = frame; 1668 thread = intr_filter_loop(ie, frame, &ithd); 1669 if (thread & FILTER_HANDLED) { 1670 if (ie->ie_post_filter != NULL) 1671 ie->ie_post_filter(ie->ie_source); 1672 } else { 1673 if (ie->ie_pre_ithread != NULL) 1674 ie->ie_pre_ithread(ie->ie_source); 1675 } 1676 td->td_intr_frame = oldframe; 1677 critical_exit(); 1678 1679 /* Interrupt storm logic */ 1680 if (thread & FILTER_STRAY) { 1681 ie->ie_count++; 1682 if (ie->ie_count < intr_storm_threshold) 1683 printf("Interrupt stray detection not present\n"); 1684 } 1685 1686 /* Schedule an ithread if needed. */ 1687 if (thread & FILTER_SCHEDULE_THREAD) { 1688 if (intr_event_schedule_thread(ie, ithd) != 0) 1689 panic("%s: impossible stray interrupt", __func__); 1690 } 1691 td->td_intr_nesting_level--; 1692 return (0); 1693 } 1694 #endif 1695 1696 #ifdef DDB 1697 /* 1698 * Dump details about an interrupt handler 1699 */ 1700 static void 1701 db_dump_intrhand(struct intr_handler *ih) 1702 { 1703 int comma; 1704 1705 db_printf("\t%-10s ", ih->ih_name); 1706 switch (ih->ih_pri) { 1707 case PI_REALTIME: 1708 db_printf("CLK "); 1709 break; 1710 case PI_AV: 1711 db_printf("AV "); 1712 break; 1713 case PI_TTY: 1714 db_printf("TTY "); 1715 break; 1716 case PI_NET: 1717 db_printf("NET "); 1718 break; 1719 case PI_DISK: 1720 db_printf("DISK"); 1721 break; 1722 case PI_DULL: 1723 db_printf("DULL"); 1724 break; 1725 default: 1726 if (ih->ih_pri >= PI_SOFT) 1727 db_printf("SWI "); 1728 else 1729 db_printf("%4u", ih->ih_pri); 1730 break; 1731 } 1732 db_printf(" "); 1733 if (ih->ih_filter != NULL) { 1734 db_printf("[F]"); 1735 db_printsym((uintptr_t)ih->ih_filter, DB_STGY_PROC); 1736 } 1737 if (ih->ih_handler != NULL) { 1738 if (ih->ih_filter != NULL) 1739 db_printf(","); 1740 db_printf("[H]"); 1741 db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC); 1742 } 1743 db_printf("(%p)", ih->ih_argument); 1744 if (ih->ih_need || 1745 (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD | 1746 IH_MPSAFE)) != 0) { 1747 db_printf(" {"); 1748 comma = 0; 1749 if (ih->ih_flags & IH_EXCLUSIVE) { 1750 if (comma) 1751 db_printf(", "); 1752 db_printf("EXCL"); 1753 comma = 1; 1754 } 1755 if (ih->ih_flags & IH_ENTROPY) { 1756 if (comma) 1757 db_printf(", "); 1758 db_printf("ENTROPY"); 1759 comma = 1; 1760 } 1761 if (ih->ih_flags & IH_DEAD) { 1762 if (comma) 1763 db_printf(", "); 1764 db_printf("DEAD"); 1765 comma = 1; 1766 } 1767 if (ih->ih_flags & IH_MPSAFE) { 1768 if (comma) 1769 db_printf(", "); 1770 db_printf("MPSAFE"); 1771 comma = 1; 1772 } 1773 if (ih->ih_need) { 1774 if (comma) 1775 db_printf(", "); 1776 db_printf("NEED"); 1777 } 1778 db_printf("}"); 1779 } 1780 db_printf("\n"); 1781 } 1782 1783 /* 1784 * Dump details about a event. 1785 */ 1786 void 1787 db_dump_intr_event(struct intr_event *ie, int handlers) 1788 { 1789 struct intr_handler *ih; 1790 struct intr_thread *it; 1791 int comma; 1792 1793 db_printf("%s ", ie->ie_fullname); 1794 it = ie->ie_thread; 1795 if (it != NULL) 1796 db_printf("(pid %d)", it->it_thread->td_proc->p_pid); 1797 else 1798 db_printf("(no thread)"); 1799 if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 || 1800 (it != NULL && it->it_need)) { 1801 db_printf(" {"); 1802 comma = 0; 1803 if (ie->ie_flags & IE_SOFT) { 1804 db_printf("SOFT"); 1805 comma = 1; 1806 } 1807 if (ie->ie_flags & IE_ENTROPY) { 1808 if (comma) 1809 db_printf(", "); 1810 db_printf("ENTROPY"); 1811 comma = 1; 1812 } 1813 if (ie->ie_flags & IE_ADDING_THREAD) { 1814 if (comma) 1815 db_printf(", "); 1816 db_printf("ADDING_THREAD"); 1817 comma = 1; 1818 } 1819 if (it != NULL && it->it_need) { 1820 if (comma) 1821 db_printf(", "); 1822 db_printf("NEED"); 1823 } 1824 db_printf("}"); 1825 } 1826 db_printf("\n"); 1827 1828 if (handlers) 1829 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 1830 db_dump_intrhand(ih); 1831 } 1832 1833 /* 1834 * Dump data about interrupt handlers 1835 */ 1836 DB_SHOW_COMMAND(intr, db_show_intr) 1837 { 1838 struct intr_event *ie; 1839 int all, verbose; 1840 1841 verbose = strchr(modif, 'v') != NULL; 1842 all = strchr(modif, 'a') != NULL; 1843 TAILQ_FOREACH(ie, &event_list, ie_list) { 1844 if (!all && TAILQ_EMPTY(&ie->ie_handlers)) 1845 continue; 1846 db_dump_intr_event(ie, verbose); 1847 if (db_pager_quit) 1848 break; 1849 } 1850 } 1851 #endif /* DDB */ 1852 1853 /* 1854 * Start standard software interrupt threads 1855 */ 1856 static void 1857 start_softintr(void *dummy) 1858 { 1859 1860 if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih)) 1861 panic("died while creating vm swi ithread"); 1862 } 1863 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, 1864 NULL); 1865 1866 /* 1867 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 1868 * The data for this machine dependent, and the declarations are in machine 1869 * dependent code. The layout of intrnames and intrcnt however is machine 1870 * independent. 1871 * 1872 * We do not know the length of intrcnt and intrnames at compile time, so 1873 * calculate things at run time. 1874 */ 1875 static int 1876 sysctl_intrnames(SYSCTL_HANDLER_ARGS) 1877 { 1878 return (sysctl_handle_opaque(oidp, intrnames, sintrnames, req)); 1879 } 1880 1881 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, 1882 NULL, 0, sysctl_intrnames, "", "Interrupt Names"); 1883 1884 static int 1885 sysctl_intrcnt(SYSCTL_HANDLER_ARGS) 1886 { 1887 #ifdef SCTL_MASK32 1888 uint32_t *intrcnt32; 1889 unsigned i; 1890 int error; 1891 1892 if (req->flags & SCTL_MASK32) { 1893 if (!req->oldptr) 1894 return (sysctl_handle_opaque(oidp, NULL, sintrcnt / 2, req)); 1895 intrcnt32 = malloc(sintrcnt / 2, M_TEMP, M_NOWAIT); 1896 if (intrcnt32 == NULL) 1897 return (ENOMEM); 1898 for (i = 0; i < sintrcnt / sizeof (u_long); i++) 1899 intrcnt32[i] = intrcnt[i]; 1900 error = sysctl_handle_opaque(oidp, intrcnt32, sintrcnt / 2, req); 1901 free(intrcnt32, M_TEMP); 1902 return (error); 1903 } 1904 #endif 1905 return (sysctl_handle_opaque(oidp, intrcnt, sintrcnt, req)); 1906 } 1907 1908 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, 1909 NULL, 0, sysctl_intrcnt, "", "Interrupt Counts"); 1910 1911 #ifdef DDB 1912 /* 1913 * DDB command to dump the interrupt statistics. 1914 */ 1915 DB_SHOW_COMMAND(intrcnt, db_show_intrcnt) 1916 { 1917 u_long *i; 1918 char *cp; 1919 u_int j; 1920 1921 cp = intrnames; 1922 j = 0; 1923 for (i = intrcnt; j < (sintrcnt / sizeof(u_long)) && !db_pager_quit; 1924 i++, j++) { 1925 if (*cp == '\0') 1926 break; 1927 if (*i != 0) 1928 db_printf("%s\t%lu\n", cp, *i); 1929 cp += strlen(cp) + 1; 1930 } 1931 } 1932 #endif 1933