1 /*- 2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_ddb.h" 31 32 #include <sys/param.h> 33 #include <sys/bus.h> 34 #include <sys/conf.h> 35 #include <sys/cpuset.h> 36 #include <sys/rtprio.h> 37 #include <sys/systm.h> 38 #include <sys/interrupt.h> 39 #include <sys/kernel.h> 40 #include <sys/kthread.h> 41 #include <sys/ktr.h> 42 #include <sys/limits.h> 43 #include <sys/lock.h> 44 #include <sys/malloc.h> 45 #include <sys/mutex.h> 46 #include <sys/priv.h> 47 #include <sys/proc.h> 48 #include <sys/random.h> 49 #include <sys/resourcevar.h> 50 #include <sys/sched.h> 51 #include <sys/smp.h> 52 #include <sys/sysctl.h> 53 #include <sys/syslog.h> 54 #include <sys/unistd.h> 55 #include <sys/vmmeter.h> 56 #include <machine/atomic.h> 57 #include <machine/cpu.h> 58 #include <machine/md_var.h> 59 #include <machine/stdarg.h> 60 #ifdef DDB 61 #include <ddb/ddb.h> 62 #include <ddb/db_sym.h> 63 #endif 64 65 /* 66 * Describe an interrupt thread. There is one of these per interrupt event. 67 */ 68 struct intr_thread { 69 struct intr_event *it_event; 70 struct thread *it_thread; /* Kernel thread. */ 71 int it_flags; /* (j) IT_* flags. */ 72 int it_need; /* Needs service. */ 73 }; 74 75 /* Interrupt thread flags kept in it_flags */ 76 #define IT_DEAD 0x000001 /* Thread is waiting to exit. */ 77 #define IT_WAIT 0x000002 /* Thread is waiting for completion. */ 78 79 struct intr_entropy { 80 struct thread *td; 81 uintptr_t event; 82 }; 83 84 struct intr_event *clk_intr_event; 85 struct intr_event *tty_intr_event; 86 void *vm_ih; 87 struct proc *intrproc; 88 89 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); 90 91 static int intr_storm_threshold = 1000; 92 TUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold); 93 SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW, 94 &intr_storm_threshold, 0, 95 "Number of consecutive interrupts before storm protection is enabled"); 96 static TAILQ_HEAD(, intr_event) event_list = 97 TAILQ_HEAD_INITIALIZER(event_list); 98 static struct mtx event_lock; 99 MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF); 100 101 static void intr_event_update(struct intr_event *ie); 102 #ifdef INTR_FILTER 103 static int intr_event_schedule_thread(struct intr_event *ie, 104 struct intr_thread *ithd); 105 static int intr_filter_loop(struct intr_event *ie, 106 struct trapframe *frame, struct intr_thread **ithd); 107 static struct intr_thread *ithread_create(const char *name, 108 struct intr_handler *ih); 109 #else 110 static int intr_event_schedule_thread(struct intr_event *ie); 111 static struct intr_thread *ithread_create(const char *name); 112 #endif 113 static void ithread_destroy(struct intr_thread *ithread); 114 static void ithread_execute_handlers(struct proc *p, 115 struct intr_event *ie); 116 #ifdef INTR_FILTER 117 static void priv_ithread_execute_handler(struct proc *p, 118 struct intr_handler *ih); 119 #endif 120 static void ithread_loop(void *); 121 static void ithread_update(struct intr_thread *ithd); 122 static void start_softintr(void *); 123 124 /* Map an interrupt type to an ithread priority. */ 125 u_char 126 intr_priority(enum intr_type flags) 127 { 128 u_char pri; 129 130 flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET | 131 INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV); 132 switch (flags) { 133 case INTR_TYPE_TTY: 134 pri = PI_TTY; 135 break; 136 case INTR_TYPE_BIO: 137 pri = PI_DISK; 138 break; 139 case INTR_TYPE_NET: 140 pri = PI_NET; 141 break; 142 case INTR_TYPE_CAM: 143 pri = PI_DISK; 144 break; 145 case INTR_TYPE_AV: 146 pri = PI_AV; 147 break; 148 case INTR_TYPE_CLK: 149 pri = PI_REALTIME; 150 break; 151 case INTR_TYPE_MISC: 152 pri = PI_DULL; /* don't care */ 153 break; 154 default: 155 /* We didn't specify an interrupt level. */ 156 panic("intr_priority: no interrupt type in flags"); 157 } 158 159 return pri; 160 } 161 162 /* 163 * Update an ithread based on the associated intr_event. 164 */ 165 static void 166 ithread_update(struct intr_thread *ithd) 167 { 168 struct intr_event *ie; 169 struct thread *td; 170 u_char pri; 171 172 ie = ithd->it_event; 173 td = ithd->it_thread; 174 175 /* Determine the overall priority of this event. */ 176 if (TAILQ_EMPTY(&ie->ie_handlers)) 177 pri = PRI_MAX_ITHD; 178 else 179 pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri; 180 181 /* Update name and priority. */ 182 strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name)); 183 #ifdef KTR 184 sched_clear_tdname(td); 185 #endif 186 thread_lock(td); 187 sched_prio(td, pri); 188 thread_unlock(td); 189 } 190 191 /* 192 * Regenerate the full name of an interrupt event and update its priority. 193 */ 194 static void 195 intr_event_update(struct intr_event *ie) 196 { 197 struct intr_handler *ih; 198 char *last; 199 int missed, space; 200 201 /* Start off with no entropy and just the name of the event. */ 202 mtx_assert(&ie->ie_lock, MA_OWNED); 203 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 204 ie->ie_flags &= ~IE_ENTROPY; 205 missed = 0; 206 space = 1; 207 208 /* Run through all the handlers updating values. */ 209 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 210 if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < 211 sizeof(ie->ie_fullname)) { 212 strcat(ie->ie_fullname, " "); 213 strcat(ie->ie_fullname, ih->ih_name); 214 space = 0; 215 } else 216 missed++; 217 if (ih->ih_flags & IH_ENTROPY) 218 ie->ie_flags |= IE_ENTROPY; 219 } 220 221 /* 222 * If the handler names were too long, add +'s to indicate missing 223 * names. If we run out of room and still have +'s to add, change 224 * the last character from a + to a *. 225 */ 226 last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2]; 227 while (missed-- > 0) { 228 if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) { 229 if (*last == '+') { 230 *last = '*'; 231 break; 232 } else 233 *last = '+'; 234 } else if (space) { 235 strcat(ie->ie_fullname, " +"); 236 space = 0; 237 } else 238 strcat(ie->ie_fullname, "+"); 239 } 240 241 /* 242 * If this event has an ithread, update it's priority and 243 * name. 244 */ 245 if (ie->ie_thread != NULL) 246 ithread_update(ie->ie_thread); 247 CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname); 248 } 249 250 int 251 intr_event_create(struct intr_event **event, void *source, int flags, int irq, 252 void (*pre_ithread)(void *), void (*post_ithread)(void *), 253 void (*post_filter)(void *), int (*assign_cpu)(void *, u_char), 254 const char *fmt, ...) 255 { 256 struct intr_event *ie; 257 va_list ap; 258 259 /* The only valid flag during creation is IE_SOFT. */ 260 if ((flags & ~IE_SOFT) != 0) 261 return (EINVAL); 262 ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 263 ie->ie_source = source; 264 ie->ie_pre_ithread = pre_ithread; 265 ie->ie_post_ithread = post_ithread; 266 ie->ie_post_filter = post_filter; 267 ie->ie_assign_cpu = assign_cpu; 268 ie->ie_flags = flags; 269 ie->ie_irq = irq; 270 ie->ie_cpu = NOCPU; 271 TAILQ_INIT(&ie->ie_handlers); 272 mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 273 274 va_start(ap, fmt); 275 vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 276 va_end(ap); 277 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 278 mtx_lock(&event_lock); 279 TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 280 mtx_unlock(&event_lock); 281 if (event != NULL) 282 *event = ie; 283 CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 284 return (0); 285 } 286 287 /* 288 * Bind an interrupt event to the specified CPU. Note that not all 289 * platforms support binding an interrupt to a CPU. For those 290 * platforms this request will fail. For supported platforms, any 291 * associated ithreads as well as the primary interrupt context will 292 * be bound to the specificed CPU. Using a cpu id of NOCPU unbinds 293 * the interrupt event. 294 */ 295 int 296 intr_event_bind(struct intr_event *ie, u_char cpu) 297 { 298 cpuset_t mask; 299 lwpid_t id; 300 int error; 301 302 /* Need a CPU to bind to. */ 303 if (cpu != NOCPU && CPU_ABSENT(cpu)) 304 return (EINVAL); 305 306 if (ie->ie_assign_cpu == NULL) 307 return (EOPNOTSUPP); 308 309 error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR); 310 if (error) 311 return (error); 312 313 /* 314 * If we have any ithreads try to set their mask first to verify 315 * permissions, etc. 316 */ 317 mtx_lock(&ie->ie_lock); 318 if (ie->ie_thread != NULL) { 319 CPU_ZERO(&mask); 320 if (cpu == NOCPU) 321 CPU_COPY(cpuset_root, &mask); 322 else 323 CPU_SET(cpu, &mask); 324 id = ie->ie_thread->it_thread->td_tid; 325 mtx_unlock(&ie->ie_lock); 326 error = cpuset_setthread(id, &mask); 327 if (error) 328 return (error); 329 } else 330 mtx_unlock(&ie->ie_lock); 331 error = ie->ie_assign_cpu(ie->ie_source, cpu); 332 if (error) { 333 mtx_lock(&ie->ie_lock); 334 if (ie->ie_thread != NULL) { 335 CPU_ZERO(&mask); 336 if (ie->ie_cpu == NOCPU) 337 CPU_COPY(cpuset_root, &mask); 338 else 339 CPU_SET(ie->ie_cpu, &mask); 340 id = ie->ie_thread->it_thread->td_tid; 341 mtx_unlock(&ie->ie_lock); 342 (void)cpuset_setthread(id, &mask); 343 } else 344 mtx_unlock(&ie->ie_lock); 345 return (error); 346 } 347 348 mtx_lock(&ie->ie_lock); 349 ie->ie_cpu = cpu; 350 mtx_unlock(&ie->ie_lock); 351 352 return (error); 353 } 354 355 static struct intr_event * 356 intr_lookup(int irq) 357 { 358 struct intr_event *ie; 359 360 mtx_lock(&event_lock); 361 TAILQ_FOREACH(ie, &event_list, ie_list) 362 if (ie->ie_irq == irq && 363 (ie->ie_flags & IE_SOFT) == 0 && 364 TAILQ_FIRST(&ie->ie_handlers) != NULL) 365 break; 366 mtx_unlock(&event_lock); 367 return (ie); 368 } 369 370 int 371 intr_setaffinity(int irq, void *m) 372 { 373 struct intr_event *ie; 374 cpuset_t *mask; 375 u_char cpu; 376 int n; 377 378 mask = m; 379 cpu = NOCPU; 380 /* 381 * If we're setting all cpus we can unbind. Otherwise make sure 382 * only one cpu is in the set. 383 */ 384 if (CPU_CMP(cpuset_root, mask)) { 385 for (n = 0; n < CPU_SETSIZE; n++) { 386 if (!CPU_ISSET(n, mask)) 387 continue; 388 if (cpu != NOCPU) 389 return (EINVAL); 390 cpu = (u_char)n; 391 } 392 } 393 ie = intr_lookup(irq); 394 if (ie == NULL) 395 return (ESRCH); 396 return (intr_event_bind(ie, cpu)); 397 } 398 399 int 400 intr_getaffinity(int irq, void *m) 401 { 402 struct intr_event *ie; 403 cpuset_t *mask; 404 405 mask = m; 406 ie = intr_lookup(irq); 407 if (ie == NULL) 408 return (ESRCH); 409 CPU_ZERO(mask); 410 mtx_lock(&ie->ie_lock); 411 if (ie->ie_cpu == NOCPU) 412 CPU_COPY(cpuset_root, mask); 413 else 414 CPU_SET(ie->ie_cpu, mask); 415 mtx_unlock(&ie->ie_lock); 416 return (0); 417 } 418 419 int 420 intr_event_destroy(struct intr_event *ie) 421 { 422 423 mtx_lock(&event_lock); 424 mtx_lock(&ie->ie_lock); 425 if (!TAILQ_EMPTY(&ie->ie_handlers)) { 426 mtx_unlock(&ie->ie_lock); 427 mtx_unlock(&event_lock); 428 return (EBUSY); 429 } 430 TAILQ_REMOVE(&event_list, ie, ie_list); 431 #ifndef notyet 432 if (ie->ie_thread != NULL) { 433 ithread_destroy(ie->ie_thread); 434 ie->ie_thread = NULL; 435 } 436 #endif 437 mtx_unlock(&ie->ie_lock); 438 mtx_unlock(&event_lock); 439 mtx_destroy(&ie->ie_lock); 440 free(ie, M_ITHREAD); 441 return (0); 442 } 443 444 #ifndef INTR_FILTER 445 static struct intr_thread * 446 ithread_create(const char *name) 447 { 448 struct intr_thread *ithd; 449 struct thread *td; 450 int error; 451 452 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 453 454 error = kproc_kthread_add(ithread_loop, ithd, &intrproc, 455 &td, RFSTOPPED | RFHIGHPID, 456 0, "intr", "%s", name); 457 if (error) 458 panic("kproc_create() failed with %d", error); 459 thread_lock(td); 460 sched_class(td, PRI_ITHD); 461 TD_SET_IWAIT(td); 462 thread_unlock(td); 463 td->td_pflags |= TDP_ITHREAD; 464 ithd->it_thread = td; 465 CTR2(KTR_INTR, "%s: created %s", __func__, name); 466 return (ithd); 467 } 468 #else 469 static struct intr_thread * 470 ithread_create(const char *name, struct intr_handler *ih) 471 { 472 struct intr_thread *ithd; 473 struct thread *td; 474 int error; 475 476 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 477 478 error = kproc_kthread_add(ithread_loop, ih, &intrproc, 479 &td, RFSTOPPED | RFHIGHPID, 480 0, "intr", "%s", name); 481 if (error) 482 panic("kproc_create() failed with %d", error); 483 thread_lock(td); 484 sched_class(td, PRI_ITHD); 485 TD_SET_IWAIT(td); 486 thread_unlock(td); 487 td->td_pflags |= TDP_ITHREAD; 488 ithd->it_thread = td; 489 CTR2(KTR_INTR, "%s: created %s", __func__, name); 490 return (ithd); 491 } 492 #endif 493 494 static void 495 ithread_destroy(struct intr_thread *ithread) 496 { 497 struct thread *td; 498 499 CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name); 500 td = ithread->it_thread; 501 thread_lock(td); 502 ithread->it_flags |= IT_DEAD; 503 if (TD_AWAITING_INTR(td)) { 504 TD_CLR_IWAIT(td); 505 sched_add(td, SRQ_INTR); 506 } 507 thread_unlock(td); 508 } 509 510 #ifndef INTR_FILTER 511 int 512 intr_event_add_handler(struct intr_event *ie, const char *name, 513 driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 514 enum intr_type flags, void **cookiep) 515 { 516 struct intr_handler *ih, *temp_ih; 517 struct intr_thread *it; 518 519 if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 520 return (EINVAL); 521 522 /* Allocate and populate an interrupt handler structure. */ 523 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 524 ih->ih_filter = filter; 525 ih->ih_handler = handler; 526 ih->ih_argument = arg; 527 strlcpy(ih->ih_name, name, sizeof(ih->ih_name)); 528 ih->ih_event = ie; 529 ih->ih_pri = pri; 530 if (flags & INTR_EXCL) 531 ih->ih_flags = IH_EXCLUSIVE; 532 if (flags & INTR_MPSAFE) 533 ih->ih_flags |= IH_MPSAFE; 534 if (flags & INTR_ENTROPY) 535 ih->ih_flags |= IH_ENTROPY; 536 537 /* We can only have one exclusive handler in a event. */ 538 mtx_lock(&ie->ie_lock); 539 if (!TAILQ_EMPTY(&ie->ie_handlers)) { 540 if ((flags & INTR_EXCL) || 541 (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 542 mtx_unlock(&ie->ie_lock); 543 free(ih, M_ITHREAD); 544 return (EINVAL); 545 } 546 } 547 548 /* Create a thread if we need one. */ 549 while (ie->ie_thread == NULL && handler != NULL) { 550 if (ie->ie_flags & IE_ADDING_THREAD) 551 msleep(ie, &ie->ie_lock, 0, "ithread", 0); 552 else { 553 ie->ie_flags |= IE_ADDING_THREAD; 554 mtx_unlock(&ie->ie_lock); 555 it = ithread_create("intr: newborn"); 556 mtx_lock(&ie->ie_lock); 557 ie->ie_flags &= ~IE_ADDING_THREAD; 558 ie->ie_thread = it; 559 it->it_event = ie; 560 ithread_update(it); 561 wakeup(ie); 562 } 563 } 564 565 /* Add the new handler to the event in priority order. */ 566 TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 567 if (temp_ih->ih_pri > ih->ih_pri) 568 break; 569 } 570 if (temp_ih == NULL) 571 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 572 else 573 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 574 intr_event_update(ie); 575 576 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 577 ie->ie_name); 578 mtx_unlock(&ie->ie_lock); 579 580 if (cookiep != NULL) 581 *cookiep = ih; 582 return (0); 583 } 584 #else 585 int 586 intr_event_add_handler(struct intr_event *ie, const char *name, 587 driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 588 enum intr_type flags, void **cookiep) 589 { 590 struct intr_handler *ih, *temp_ih; 591 struct intr_thread *it; 592 593 if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 594 return (EINVAL); 595 596 /* Allocate and populate an interrupt handler structure. */ 597 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 598 ih->ih_filter = filter; 599 ih->ih_handler = handler; 600 ih->ih_argument = arg; 601 strlcpy(ih->ih_name, name, sizeof(ih->ih_name)); 602 ih->ih_event = ie; 603 ih->ih_pri = pri; 604 if (flags & INTR_EXCL) 605 ih->ih_flags = IH_EXCLUSIVE; 606 if (flags & INTR_MPSAFE) 607 ih->ih_flags |= IH_MPSAFE; 608 if (flags & INTR_ENTROPY) 609 ih->ih_flags |= IH_ENTROPY; 610 611 /* We can only have one exclusive handler in a event. */ 612 mtx_lock(&ie->ie_lock); 613 if (!TAILQ_EMPTY(&ie->ie_handlers)) { 614 if ((flags & INTR_EXCL) || 615 (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 616 mtx_unlock(&ie->ie_lock); 617 free(ih, M_ITHREAD); 618 return (EINVAL); 619 } 620 } 621 622 /* For filtered handlers, create a private ithread to run on. */ 623 if (filter != NULL && handler != NULL) { 624 mtx_unlock(&ie->ie_lock); 625 it = ithread_create("intr: newborn", ih); 626 mtx_lock(&ie->ie_lock); 627 it->it_event = ie; 628 ih->ih_thread = it; 629 ithread_update(it); /* XXX - do we really need this?!?!? */ 630 } else { /* Create the global per-event thread if we need one. */ 631 while (ie->ie_thread == NULL && handler != NULL) { 632 if (ie->ie_flags & IE_ADDING_THREAD) 633 msleep(ie, &ie->ie_lock, 0, "ithread", 0); 634 else { 635 ie->ie_flags |= IE_ADDING_THREAD; 636 mtx_unlock(&ie->ie_lock); 637 it = ithread_create("intr: newborn", ih); 638 mtx_lock(&ie->ie_lock); 639 ie->ie_flags &= ~IE_ADDING_THREAD; 640 ie->ie_thread = it; 641 it->it_event = ie; 642 ithread_update(it); 643 wakeup(ie); 644 } 645 } 646 } 647 648 /* Add the new handler to the event in priority order. */ 649 TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 650 if (temp_ih->ih_pri > ih->ih_pri) 651 break; 652 } 653 if (temp_ih == NULL) 654 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 655 else 656 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 657 intr_event_update(ie); 658 659 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 660 ie->ie_name); 661 mtx_unlock(&ie->ie_lock); 662 663 if (cookiep != NULL) 664 *cookiep = ih; 665 return (0); 666 } 667 #endif 668 669 /* 670 * Append a description preceded by a ':' to the name of the specified 671 * interrupt handler. 672 */ 673 int 674 intr_event_describe_handler(struct intr_event *ie, void *cookie, 675 const char *descr) 676 { 677 struct intr_handler *ih; 678 size_t space; 679 char *start; 680 681 mtx_lock(&ie->ie_lock); 682 #ifdef INVARIANTS 683 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 684 if (ih == cookie) 685 break; 686 } 687 if (ih == NULL) { 688 mtx_unlock(&ie->ie_lock); 689 panic("handler %p not found in interrupt event %p", cookie, ie); 690 } 691 #endif 692 ih = cookie; 693 694 /* 695 * Look for an existing description by checking for an 696 * existing ":". This assumes device names do not include 697 * colons. If one is found, prepare to insert the new 698 * description at that point. If one is not found, find the 699 * end of the name to use as the insertion point. 700 */ 701 start = strchr(ih->ih_name, ':'); 702 if (start == NULL) 703 start = strchr(ih->ih_name, 0); 704 705 /* 706 * See if there is enough remaining room in the string for the 707 * description + ":". The "- 1" leaves room for the trailing 708 * '\0'. The "+ 1" accounts for the colon. 709 */ 710 space = sizeof(ih->ih_name) - (start - ih->ih_name) - 1; 711 if (strlen(descr) + 1 > space) { 712 mtx_unlock(&ie->ie_lock); 713 return (ENOSPC); 714 } 715 716 /* Append a colon followed by the description. */ 717 *start = ':'; 718 strcpy(start + 1, descr); 719 intr_event_update(ie); 720 mtx_unlock(&ie->ie_lock); 721 return (0); 722 } 723 724 /* 725 * Return the ie_source field from the intr_event an intr_handler is 726 * associated with. 727 */ 728 void * 729 intr_handler_source(void *cookie) 730 { 731 struct intr_handler *ih; 732 struct intr_event *ie; 733 734 ih = (struct intr_handler *)cookie; 735 if (ih == NULL) 736 return (NULL); 737 ie = ih->ih_event; 738 KASSERT(ie != NULL, 739 ("interrupt handler \"%s\" has a NULL interrupt event", 740 ih->ih_name)); 741 return (ie->ie_source); 742 } 743 744 /* 745 * Sleep until an ithread finishes executing an interrupt handler. 746 * 747 * XXX Doesn't currently handle interrupt filters or fast interrupt 748 * handlers. This is intended for compatibility with linux drivers 749 * only. Do not use in BSD code. 750 */ 751 void 752 _intr_drain(int irq) 753 { 754 struct intr_event *ie; 755 struct intr_thread *ithd; 756 struct thread *td; 757 758 ie = intr_lookup(irq); 759 if (ie == NULL) 760 return; 761 if (ie->ie_thread == NULL) 762 return; 763 ithd = ie->ie_thread; 764 td = ithd->it_thread; 765 /* 766 * We set the flag and wait for it to be cleared to avoid 767 * long delays with potentially busy interrupt handlers 768 * were we to only sample TD_AWAITING_INTR() every tick. 769 */ 770 thread_lock(td); 771 if (!TD_AWAITING_INTR(td)) { 772 ithd->it_flags |= IT_WAIT; 773 while (ithd->it_flags & IT_WAIT) { 774 thread_unlock(td); 775 pause("idrain", 1); 776 thread_lock(td); 777 } 778 } 779 thread_unlock(td); 780 return; 781 } 782 783 784 #ifndef INTR_FILTER 785 int 786 intr_event_remove_handler(void *cookie) 787 { 788 struct intr_handler *handler = (struct intr_handler *)cookie; 789 struct intr_event *ie; 790 #ifdef INVARIANTS 791 struct intr_handler *ih; 792 #endif 793 #ifdef notyet 794 int dead; 795 #endif 796 797 if (handler == NULL) 798 return (EINVAL); 799 ie = handler->ih_event; 800 KASSERT(ie != NULL, 801 ("interrupt handler \"%s\" has a NULL interrupt event", 802 handler->ih_name)); 803 mtx_lock(&ie->ie_lock); 804 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 805 ie->ie_name); 806 #ifdef INVARIANTS 807 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 808 if (ih == handler) 809 goto ok; 810 mtx_unlock(&ie->ie_lock); 811 panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 812 ih->ih_name, ie->ie_name); 813 ok: 814 #endif 815 /* 816 * If there is no ithread, then just remove the handler and return. 817 * XXX: Note that an INTR_FAST handler might be running on another 818 * CPU! 819 */ 820 if (ie->ie_thread == NULL) { 821 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 822 mtx_unlock(&ie->ie_lock); 823 free(handler, M_ITHREAD); 824 return (0); 825 } 826 827 /* 828 * If the interrupt thread is already running, then just mark this 829 * handler as being dead and let the ithread do the actual removal. 830 * 831 * During a cold boot while cold is set, msleep() does not sleep, 832 * so we have to remove the handler here rather than letting the 833 * thread do it. 834 */ 835 thread_lock(ie->ie_thread->it_thread); 836 if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) { 837 handler->ih_flags |= IH_DEAD; 838 839 /* 840 * Ensure that the thread will process the handler list 841 * again and remove this handler if it has already passed 842 * it on the list. 843 */ 844 ie->ie_thread->it_need = 1; 845 } else 846 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 847 thread_unlock(ie->ie_thread->it_thread); 848 while (handler->ih_flags & IH_DEAD) 849 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 850 intr_event_update(ie); 851 #ifdef notyet 852 /* 853 * XXX: This could be bad in the case of ppbus(8). Also, I think 854 * this could lead to races of stale data when servicing an 855 * interrupt. 856 */ 857 dead = 1; 858 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 859 if (!(ih->ih_flags & IH_FAST)) { 860 dead = 0; 861 break; 862 } 863 } 864 if (dead) { 865 ithread_destroy(ie->ie_thread); 866 ie->ie_thread = NULL; 867 } 868 #endif 869 mtx_unlock(&ie->ie_lock); 870 free(handler, M_ITHREAD); 871 return (0); 872 } 873 874 static int 875 intr_event_schedule_thread(struct intr_event *ie) 876 { 877 struct intr_entropy entropy; 878 struct intr_thread *it; 879 struct thread *td; 880 struct thread *ctd; 881 struct proc *p; 882 883 /* 884 * If no ithread or no handlers, then we have a stray interrupt. 885 */ 886 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || 887 ie->ie_thread == NULL) 888 return (EINVAL); 889 890 ctd = curthread; 891 it = ie->ie_thread; 892 td = it->it_thread; 893 p = td->td_proc; 894 895 /* 896 * If any of the handlers for this ithread claim to be good 897 * sources of entropy, then gather some. 898 */ 899 if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 900 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 901 p->p_pid, td->td_name); 902 entropy.event = (uintptr_t)ie; 903 entropy.td = ctd; 904 random_harvest(&entropy, sizeof(entropy), 2, 0, 905 RANDOM_INTERRUPT); 906 } 907 908 KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 909 910 /* 911 * Set it_need to tell the thread to keep running if it is already 912 * running. Then, lock the thread and see if we actually need to 913 * put it on the runqueue. 914 */ 915 it->it_need = 1; 916 thread_lock(td); 917 if (TD_AWAITING_INTR(td)) { 918 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 919 td->td_name); 920 TD_CLR_IWAIT(td); 921 sched_add(td, SRQ_INTR); 922 } else { 923 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 924 __func__, p->p_pid, td->td_name, it->it_need, td->td_state); 925 } 926 thread_unlock(td); 927 928 return (0); 929 } 930 #else 931 int 932 intr_event_remove_handler(void *cookie) 933 { 934 struct intr_handler *handler = (struct intr_handler *)cookie; 935 struct intr_event *ie; 936 struct intr_thread *it; 937 #ifdef INVARIANTS 938 struct intr_handler *ih; 939 #endif 940 #ifdef notyet 941 int dead; 942 #endif 943 944 if (handler == NULL) 945 return (EINVAL); 946 ie = handler->ih_event; 947 KASSERT(ie != NULL, 948 ("interrupt handler \"%s\" has a NULL interrupt event", 949 handler->ih_name)); 950 mtx_lock(&ie->ie_lock); 951 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 952 ie->ie_name); 953 #ifdef INVARIANTS 954 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 955 if (ih == handler) 956 goto ok; 957 mtx_unlock(&ie->ie_lock); 958 panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 959 ih->ih_name, ie->ie_name); 960 ok: 961 #endif 962 /* 963 * If there are no ithreads (per event and per handler), then 964 * just remove the handler and return. 965 * XXX: Note that an INTR_FAST handler might be running on another CPU! 966 */ 967 if (ie->ie_thread == NULL && handler->ih_thread == NULL) { 968 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 969 mtx_unlock(&ie->ie_lock); 970 free(handler, M_ITHREAD); 971 return (0); 972 } 973 974 /* Private or global ithread? */ 975 it = (handler->ih_thread) ? handler->ih_thread : ie->ie_thread; 976 /* 977 * If the interrupt thread is already running, then just mark this 978 * handler as being dead and let the ithread do the actual removal. 979 * 980 * During a cold boot while cold is set, msleep() does not sleep, 981 * so we have to remove the handler here rather than letting the 982 * thread do it. 983 */ 984 thread_lock(it->it_thread); 985 if (!TD_AWAITING_INTR(it->it_thread) && !cold) { 986 handler->ih_flags |= IH_DEAD; 987 988 /* 989 * Ensure that the thread will process the handler list 990 * again and remove this handler if it has already passed 991 * it on the list. 992 */ 993 it->it_need = 1; 994 } else 995 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 996 thread_unlock(it->it_thread); 997 while (handler->ih_flags & IH_DEAD) 998 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 999 /* 1000 * At this point, the handler has been disconnected from the event, 1001 * so we can kill the private ithread if any. 1002 */ 1003 if (handler->ih_thread) { 1004 ithread_destroy(handler->ih_thread); 1005 handler->ih_thread = NULL; 1006 } 1007 intr_event_update(ie); 1008 #ifdef notyet 1009 /* 1010 * XXX: This could be bad in the case of ppbus(8). Also, I think 1011 * this could lead to races of stale data when servicing an 1012 * interrupt. 1013 */ 1014 dead = 1; 1015 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1016 if (handler != NULL) { 1017 dead = 0; 1018 break; 1019 } 1020 } 1021 if (dead) { 1022 ithread_destroy(ie->ie_thread); 1023 ie->ie_thread = NULL; 1024 } 1025 #endif 1026 mtx_unlock(&ie->ie_lock); 1027 free(handler, M_ITHREAD); 1028 return (0); 1029 } 1030 1031 static int 1032 intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it) 1033 { 1034 struct intr_entropy entropy; 1035 struct thread *td; 1036 struct thread *ctd; 1037 struct proc *p; 1038 1039 /* 1040 * If no ithread or no handlers, then we have a stray interrupt. 1041 */ 1042 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || it == NULL) 1043 return (EINVAL); 1044 1045 ctd = curthread; 1046 td = it->it_thread; 1047 p = td->td_proc; 1048 1049 /* 1050 * If any of the handlers for this ithread claim to be good 1051 * sources of entropy, then gather some. 1052 */ 1053 if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) { 1054 CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__, 1055 p->p_pid, td->td_name); 1056 entropy.event = (uintptr_t)ie; 1057 entropy.td = ctd; 1058 random_harvest(&entropy, sizeof(entropy), 2, 0, 1059 RANDOM_INTERRUPT); 1060 } 1061 1062 KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); 1063 1064 /* 1065 * Set it_need to tell the thread to keep running if it is already 1066 * running. Then, lock the thread and see if we actually need to 1067 * put it on the runqueue. 1068 */ 1069 it->it_need = 1; 1070 thread_lock(td); 1071 if (TD_AWAITING_INTR(td)) { 1072 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, 1073 td->td_name); 1074 TD_CLR_IWAIT(td); 1075 sched_add(td, SRQ_INTR); 1076 } else { 1077 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 1078 __func__, p->p_pid, td->td_name, it->it_need, td->td_state); 1079 } 1080 thread_unlock(td); 1081 1082 return (0); 1083 } 1084 #endif 1085 1086 /* 1087 * Allow interrupt event binding for software interrupt handlers -- a no-op, 1088 * since interrupts are generated in software rather than being directed by 1089 * a PIC. 1090 */ 1091 static int 1092 swi_assign_cpu(void *arg, u_char cpu) 1093 { 1094 1095 return (0); 1096 } 1097 1098 /* 1099 * Add a software interrupt handler to a specified event. If a given event 1100 * is not specified, then a new event is created. 1101 */ 1102 int 1103 swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, 1104 void *arg, int pri, enum intr_type flags, void **cookiep) 1105 { 1106 struct intr_event *ie; 1107 int error; 1108 1109 if (flags & INTR_ENTROPY) 1110 return (EINVAL); 1111 1112 ie = (eventp != NULL) ? *eventp : NULL; 1113 1114 if (ie != NULL) { 1115 if (!(ie->ie_flags & IE_SOFT)) 1116 return (EINVAL); 1117 } else { 1118 error = intr_event_create(&ie, NULL, IE_SOFT, 0, 1119 NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri); 1120 if (error) 1121 return (error); 1122 if (eventp != NULL) 1123 *eventp = ie; 1124 } 1125 error = intr_event_add_handler(ie, name, NULL, handler, arg, 1126 PI_SWI(pri), flags, cookiep); 1127 return (error); 1128 } 1129 1130 /* 1131 * Schedule a software interrupt thread. 1132 */ 1133 void 1134 swi_sched(void *cookie, int flags) 1135 { 1136 struct intr_handler *ih = (struct intr_handler *)cookie; 1137 struct intr_event *ie = ih->ih_event; 1138 struct intr_entropy entropy; 1139 int error; 1140 1141 CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, 1142 ih->ih_need); 1143 1144 if (harvest.swi) { 1145 CTR2(KTR_INTR, "swi_sched: pid %d (%s) gathering entropy", 1146 curproc->p_pid, curthread->td_name); 1147 entropy.event = (uintptr_t)ih; 1148 entropy.td = curthread; 1149 random_harvest(&entropy, sizeof(entropy), 1, 0, 1150 RANDOM_INTERRUPT); 1151 } 1152 1153 /* 1154 * Set ih_need for this handler so that if the ithread is already 1155 * running it will execute this handler on the next pass. Otherwise, 1156 * it will execute it the next time it runs. 1157 */ 1158 atomic_store_rel_int(&ih->ih_need, 1); 1159 1160 if (!(flags & SWI_DELAY)) { 1161 PCPU_INC(cnt.v_soft); 1162 #ifdef INTR_FILTER 1163 error = intr_event_schedule_thread(ie, ie->ie_thread); 1164 #else 1165 error = intr_event_schedule_thread(ie); 1166 #endif 1167 KASSERT(error == 0, ("stray software interrupt")); 1168 } 1169 } 1170 1171 /* 1172 * Remove a software interrupt handler. Currently this code does not 1173 * remove the associated interrupt event if it becomes empty. Calling code 1174 * may do so manually via intr_event_destroy(), but that's not really 1175 * an optimal interface. 1176 */ 1177 int 1178 swi_remove(void *cookie) 1179 { 1180 1181 return (intr_event_remove_handler(cookie)); 1182 } 1183 1184 #ifdef INTR_FILTER 1185 static void 1186 priv_ithread_execute_handler(struct proc *p, struct intr_handler *ih) 1187 { 1188 struct intr_event *ie; 1189 1190 ie = ih->ih_event; 1191 /* 1192 * If this handler is marked for death, remove it from 1193 * the list of handlers and wake up the sleeper. 1194 */ 1195 if (ih->ih_flags & IH_DEAD) { 1196 mtx_lock(&ie->ie_lock); 1197 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 1198 ih->ih_flags &= ~IH_DEAD; 1199 wakeup(ih); 1200 mtx_unlock(&ie->ie_lock); 1201 return; 1202 } 1203 1204 /* Execute this handler. */ 1205 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1206 __func__, p->p_pid, (void *)ih->ih_handler, ih->ih_argument, 1207 ih->ih_name, ih->ih_flags); 1208 1209 if (!(ih->ih_flags & IH_MPSAFE)) 1210 mtx_lock(&Giant); 1211 ih->ih_handler(ih->ih_argument); 1212 if (!(ih->ih_flags & IH_MPSAFE)) 1213 mtx_unlock(&Giant); 1214 } 1215 #endif 1216 1217 /* 1218 * This is a public function for use by drivers that mux interrupt 1219 * handlers for child devices from their interrupt handler. 1220 */ 1221 void 1222 intr_event_execute_handlers(struct proc *p, struct intr_event *ie) 1223 { 1224 struct intr_handler *ih, *ihn; 1225 1226 TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) { 1227 /* 1228 * If this handler is marked for death, remove it from 1229 * the list of handlers and wake up the sleeper. 1230 */ 1231 if (ih->ih_flags & IH_DEAD) { 1232 mtx_lock(&ie->ie_lock); 1233 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 1234 ih->ih_flags &= ~IH_DEAD; 1235 wakeup(ih); 1236 mtx_unlock(&ie->ie_lock); 1237 continue; 1238 } 1239 1240 /* Skip filter only handlers */ 1241 if (ih->ih_handler == NULL) 1242 continue; 1243 1244 /* 1245 * For software interrupt threads, we only execute 1246 * handlers that have their need flag set. Hardware 1247 * interrupt threads always invoke all of their handlers. 1248 */ 1249 if (ie->ie_flags & IE_SOFT) { 1250 if (!ih->ih_need) 1251 continue; 1252 else 1253 atomic_store_rel_int(&ih->ih_need, 0); 1254 } 1255 1256 /* Execute this handler. */ 1257 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1258 __func__, p->p_pid, (void *)ih->ih_handler, 1259 ih->ih_argument, ih->ih_name, ih->ih_flags); 1260 1261 if (!(ih->ih_flags & IH_MPSAFE)) 1262 mtx_lock(&Giant); 1263 ih->ih_handler(ih->ih_argument); 1264 if (!(ih->ih_flags & IH_MPSAFE)) 1265 mtx_unlock(&Giant); 1266 } 1267 } 1268 1269 static void 1270 ithread_execute_handlers(struct proc *p, struct intr_event *ie) 1271 { 1272 1273 /* Interrupt handlers should not sleep. */ 1274 if (!(ie->ie_flags & IE_SOFT)) 1275 THREAD_NO_SLEEPING(); 1276 intr_event_execute_handlers(p, ie); 1277 if (!(ie->ie_flags & IE_SOFT)) 1278 THREAD_SLEEPING_OK(); 1279 1280 /* 1281 * Interrupt storm handling: 1282 * 1283 * If this interrupt source is currently storming, then throttle 1284 * it to only fire the handler once per clock tick. 1285 * 1286 * If this interrupt source is not currently storming, but the 1287 * number of back to back interrupts exceeds the storm threshold, 1288 * then enter storming mode. 1289 */ 1290 if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold && 1291 !(ie->ie_flags & IE_SOFT)) { 1292 /* Report the message only once every second. */ 1293 if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) { 1294 printf( 1295 "interrupt storm detected on \"%s\"; throttling interrupt source\n", 1296 ie->ie_name); 1297 } 1298 pause("istorm", 1); 1299 } else 1300 ie->ie_count++; 1301 1302 /* 1303 * Now that all the handlers have had a chance to run, reenable 1304 * the interrupt source. 1305 */ 1306 if (ie->ie_post_ithread != NULL) 1307 ie->ie_post_ithread(ie->ie_source); 1308 } 1309 1310 #ifndef INTR_FILTER 1311 /* 1312 * This is the main code for interrupt threads. 1313 */ 1314 static void 1315 ithread_loop(void *arg) 1316 { 1317 struct intr_thread *ithd; 1318 struct intr_event *ie; 1319 struct thread *td; 1320 struct proc *p; 1321 int wake; 1322 1323 td = curthread; 1324 p = td->td_proc; 1325 ithd = (struct intr_thread *)arg; 1326 KASSERT(ithd->it_thread == td, 1327 ("%s: ithread and proc linkage out of sync", __func__)); 1328 ie = ithd->it_event; 1329 ie->ie_count = 0; 1330 wake = 0; 1331 1332 /* 1333 * As long as we have interrupts outstanding, go through the 1334 * list of handlers, giving each one a go at it. 1335 */ 1336 for (;;) { 1337 /* 1338 * If we are an orphaned thread, then just die. 1339 */ 1340 if (ithd->it_flags & IT_DEAD) { 1341 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 1342 p->p_pid, td->td_name); 1343 free(ithd, M_ITHREAD); 1344 kthread_exit(); 1345 } 1346 1347 /* 1348 * Service interrupts. If another interrupt arrives while 1349 * we are running, it will set it_need to note that we 1350 * should make another pass. 1351 */ 1352 while (ithd->it_need) { 1353 /* 1354 * This might need a full read and write barrier 1355 * to make sure that this write posts before any 1356 * of the memory or device accesses in the 1357 * handlers. 1358 */ 1359 atomic_store_rel_int(&ithd->it_need, 0); 1360 ithread_execute_handlers(p, ie); 1361 } 1362 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1363 mtx_assert(&Giant, MA_NOTOWNED); 1364 1365 /* 1366 * Processed all our interrupts. Now get the sched 1367 * lock. This may take a while and it_need may get 1368 * set again, so we have to check it again. 1369 */ 1370 thread_lock(td); 1371 if (!ithd->it_need && !(ithd->it_flags & (IT_DEAD | IT_WAIT))) { 1372 TD_SET_IWAIT(td); 1373 ie->ie_count = 0; 1374 mi_switch(SW_VOL | SWT_IWAIT, NULL); 1375 } 1376 if (ithd->it_flags & IT_WAIT) { 1377 wake = 1; 1378 ithd->it_flags &= ~IT_WAIT; 1379 } 1380 thread_unlock(td); 1381 if (wake) { 1382 wakeup(ithd); 1383 wake = 0; 1384 } 1385 } 1386 } 1387 1388 /* 1389 * Main interrupt handling body. 1390 * 1391 * Input: 1392 * o ie: the event connected to this interrupt. 1393 * o frame: some archs (i.e. i386) pass a frame to some. 1394 * handlers as their main argument. 1395 * Return value: 1396 * o 0: everything ok. 1397 * o EINVAL: stray interrupt. 1398 */ 1399 int 1400 intr_event_handle(struct intr_event *ie, struct trapframe *frame) 1401 { 1402 struct intr_handler *ih; 1403 struct trapframe *oldframe; 1404 struct thread *td; 1405 int error, ret, thread; 1406 1407 td = curthread; 1408 1409 /* An interrupt with no event or handlers is a stray interrupt. */ 1410 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) 1411 return (EINVAL); 1412 1413 /* 1414 * Execute fast interrupt handlers directly. 1415 * To support clock handlers, if a handler registers 1416 * with a NULL argument, then we pass it a pointer to 1417 * a trapframe as its argument. 1418 */ 1419 td->td_intr_nesting_level++; 1420 thread = 0; 1421 ret = 0; 1422 critical_enter(); 1423 oldframe = td->td_intr_frame; 1424 td->td_intr_frame = frame; 1425 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1426 if (ih->ih_filter == NULL) { 1427 thread = 1; 1428 continue; 1429 } 1430 CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__, 1431 ih->ih_filter, ih->ih_argument == NULL ? frame : 1432 ih->ih_argument, ih->ih_name); 1433 if (ih->ih_argument == NULL) 1434 ret = ih->ih_filter(frame); 1435 else 1436 ret = ih->ih_filter(ih->ih_argument); 1437 KASSERT(ret == FILTER_STRAY || 1438 ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 && 1439 (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0), 1440 ("%s: incorrect return value %#x from %s", __func__, ret, 1441 ih->ih_name)); 1442 1443 /* 1444 * Wrapper handler special handling: 1445 * 1446 * in some particular cases (like pccard and pccbb), 1447 * the _real_ device handler is wrapped in a couple of 1448 * functions - a filter wrapper and an ithread wrapper. 1449 * In this case (and just in this case), the filter wrapper 1450 * could ask the system to schedule the ithread and mask 1451 * the interrupt source if the wrapped handler is composed 1452 * of just an ithread handler. 1453 * 1454 * TODO: write a generic wrapper to avoid people rolling 1455 * their own 1456 */ 1457 if (!thread) { 1458 if (ret == FILTER_SCHEDULE_THREAD) 1459 thread = 1; 1460 } 1461 } 1462 td->td_intr_frame = oldframe; 1463 1464 if (thread) { 1465 if (ie->ie_pre_ithread != NULL) 1466 ie->ie_pre_ithread(ie->ie_source); 1467 } else { 1468 if (ie->ie_post_filter != NULL) 1469 ie->ie_post_filter(ie->ie_source); 1470 } 1471 1472 /* Schedule the ithread if needed. */ 1473 if (thread) { 1474 error = intr_event_schedule_thread(ie); 1475 #ifndef XEN 1476 KASSERT(error == 0, ("bad stray interrupt")); 1477 #else 1478 if (error != 0) 1479 log(LOG_WARNING, "bad stray interrupt"); 1480 #endif 1481 } 1482 critical_exit(); 1483 td->td_intr_nesting_level--; 1484 return (0); 1485 } 1486 #else 1487 /* 1488 * This is the main code for interrupt threads. 1489 */ 1490 static void 1491 ithread_loop(void *arg) 1492 { 1493 struct intr_thread *ithd; 1494 struct intr_handler *ih; 1495 struct intr_event *ie; 1496 struct thread *td; 1497 struct proc *p; 1498 int priv; 1499 int wake; 1500 1501 td = curthread; 1502 p = td->td_proc; 1503 ih = (struct intr_handler *)arg; 1504 priv = (ih->ih_thread != NULL) ? 1 : 0; 1505 ithd = (priv) ? ih->ih_thread : ih->ih_event->ie_thread; 1506 KASSERT(ithd->it_thread == td, 1507 ("%s: ithread and proc linkage out of sync", __func__)); 1508 ie = ithd->it_event; 1509 ie->ie_count = 0; 1510 wake = 0; 1511 1512 /* 1513 * As long as we have interrupts outstanding, go through the 1514 * list of handlers, giving each one a go at it. 1515 */ 1516 for (;;) { 1517 /* 1518 * If we are an orphaned thread, then just die. 1519 */ 1520 if (ithd->it_flags & IT_DEAD) { 1521 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 1522 p->p_pid, td->td_name); 1523 free(ithd, M_ITHREAD); 1524 kthread_exit(); 1525 } 1526 1527 /* 1528 * Service interrupts. If another interrupt arrives while 1529 * we are running, it will set it_need to note that we 1530 * should make another pass. 1531 */ 1532 while (ithd->it_need) { 1533 /* 1534 * This might need a full read and write barrier 1535 * to make sure that this write posts before any 1536 * of the memory or device accesses in the 1537 * handlers. 1538 */ 1539 atomic_store_rel_int(&ithd->it_need, 0); 1540 if (priv) 1541 priv_ithread_execute_handler(p, ih); 1542 else 1543 ithread_execute_handlers(p, ie); 1544 } 1545 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1546 mtx_assert(&Giant, MA_NOTOWNED); 1547 1548 /* 1549 * Processed all our interrupts. Now get the sched 1550 * lock. This may take a while and it_need may get 1551 * set again, so we have to check it again. 1552 */ 1553 thread_lock(td); 1554 if (!ithd->it_need && !(ithd->it_flags & (IT_DEAD | IT_WAIT))) { 1555 TD_SET_IWAIT(td); 1556 ie->ie_count = 0; 1557 mi_switch(SW_VOL | SWT_IWAIT, NULL); 1558 } 1559 if (ithd->it_flags & IT_WAIT) { 1560 wake = 1; 1561 ithd->it_flags &= ~IT_WAIT; 1562 } 1563 thread_unlock(td); 1564 if (wake) { 1565 wakeup(ithd); 1566 wake = 0; 1567 } 1568 } 1569 } 1570 1571 /* 1572 * Main loop for interrupt filter. 1573 * 1574 * Some architectures (i386, amd64 and arm) require the optional frame 1575 * parameter, and use it as the main argument for fast handler execution 1576 * when ih_argument == NULL. 1577 * 1578 * Return value: 1579 * o FILTER_STRAY: No filter recognized the event, and no 1580 * filter-less handler is registered on this 1581 * line. 1582 * o FILTER_HANDLED: A filter claimed the event and served it. 1583 * o FILTER_SCHEDULE_THREAD: No filter claimed the event, but there's at 1584 * least one filter-less handler on this line. 1585 * o FILTER_HANDLED | 1586 * FILTER_SCHEDULE_THREAD: A filter claimed the event, and asked for 1587 * scheduling the per-handler ithread. 1588 * 1589 * In case an ithread has to be scheduled, in *ithd there will be a 1590 * pointer to a struct intr_thread containing the thread to be 1591 * scheduled. 1592 */ 1593 1594 static int 1595 intr_filter_loop(struct intr_event *ie, struct trapframe *frame, 1596 struct intr_thread **ithd) 1597 { 1598 struct intr_handler *ih; 1599 void *arg; 1600 int ret, thread_only; 1601 1602 ret = 0; 1603 thread_only = 0; 1604 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1605 /* 1606 * Execute fast interrupt handlers directly. 1607 * To support clock handlers, if a handler registers 1608 * with a NULL argument, then we pass it a pointer to 1609 * a trapframe as its argument. 1610 */ 1611 arg = ((ih->ih_argument == NULL) ? frame : ih->ih_argument); 1612 1613 CTR5(KTR_INTR, "%s: exec %p/%p(%p) for %s", __func__, 1614 ih->ih_filter, ih->ih_handler, arg, ih->ih_name); 1615 1616 if (ih->ih_filter != NULL) 1617 ret = ih->ih_filter(arg); 1618 else { 1619 thread_only = 1; 1620 continue; 1621 } 1622 KASSERT(ret == FILTER_STRAY || 1623 ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 && 1624 (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0), 1625 ("%s: incorrect return value %#x from %s", __func__, ret, 1626 ih->ih_name)); 1627 if (ret & FILTER_STRAY) 1628 continue; 1629 else { 1630 *ithd = ih->ih_thread; 1631 return (ret); 1632 } 1633 } 1634 1635 /* 1636 * No filters handled the interrupt and we have at least 1637 * one handler without a filter. In this case, we schedule 1638 * all of the filter-less handlers to run in the ithread. 1639 */ 1640 if (thread_only) { 1641 *ithd = ie->ie_thread; 1642 return (FILTER_SCHEDULE_THREAD); 1643 } 1644 return (FILTER_STRAY); 1645 } 1646 1647 /* 1648 * Main interrupt handling body. 1649 * 1650 * Input: 1651 * o ie: the event connected to this interrupt. 1652 * o frame: some archs (i.e. i386) pass a frame to some. 1653 * handlers as their main argument. 1654 * Return value: 1655 * o 0: everything ok. 1656 * o EINVAL: stray interrupt. 1657 */ 1658 int 1659 intr_event_handle(struct intr_event *ie, struct trapframe *frame) 1660 { 1661 struct intr_thread *ithd; 1662 struct trapframe *oldframe; 1663 struct thread *td; 1664 int thread; 1665 1666 ithd = NULL; 1667 td = curthread; 1668 1669 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) 1670 return (EINVAL); 1671 1672 td->td_intr_nesting_level++; 1673 thread = 0; 1674 critical_enter(); 1675 oldframe = td->td_intr_frame; 1676 td->td_intr_frame = frame; 1677 thread = intr_filter_loop(ie, frame, &ithd); 1678 if (thread & FILTER_HANDLED) { 1679 if (ie->ie_post_filter != NULL) 1680 ie->ie_post_filter(ie->ie_source); 1681 } else { 1682 if (ie->ie_pre_ithread != NULL) 1683 ie->ie_pre_ithread(ie->ie_source); 1684 } 1685 td->td_intr_frame = oldframe; 1686 critical_exit(); 1687 1688 /* Interrupt storm logic */ 1689 if (thread & FILTER_STRAY) { 1690 ie->ie_count++; 1691 if (ie->ie_count < intr_storm_threshold) 1692 printf("Interrupt stray detection not present\n"); 1693 } 1694 1695 /* Schedule an ithread if needed. */ 1696 if (thread & FILTER_SCHEDULE_THREAD) { 1697 if (intr_event_schedule_thread(ie, ithd) != 0) 1698 panic("%s: impossible stray interrupt", __func__); 1699 } 1700 td->td_intr_nesting_level--; 1701 return (0); 1702 } 1703 #endif 1704 1705 #ifdef DDB 1706 /* 1707 * Dump details about an interrupt handler 1708 */ 1709 static void 1710 db_dump_intrhand(struct intr_handler *ih) 1711 { 1712 int comma; 1713 1714 db_printf("\t%-10s ", ih->ih_name); 1715 switch (ih->ih_pri) { 1716 case PI_REALTIME: 1717 db_printf("CLK "); 1718 break; 1719 case PI_AV: 1720 db_printf("AV "); 1721 break; 1722 case PI_TTY: 1723 db_printf("TTY "); 1724 break; 1725 case PI_NET: 1726 db_printf("NET "); 1727 break; 1728 case PI_DISK: 1729 db_printf("DISK"); 1730 break; 1731 case PI_DULL: 1732 db_printf("DULL"); 1733 break; 1734 default: 1735 if (ih->ih_pri >= PI_SOFT) 1736 db_printf("SWI "); 1737 else 1738 db_printf("%4u", ih->ih_pri); 1739 break; 1740 } 1741 db_printf(" "); 1742 if (ih->ih_filter != NULL) { 1743 db_printf("[F]"); 1744 db_printsym((uintptr_t)ih->ih_filter, DB_STGY_PROC); 1745 } 1746 if (ih->ih_handler != NULL) { 1747 if (ih->ih_filter != NULL) 1748 db_printf(","); 1749 db_printf("[H]"); 1750 db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC); 1751 } 1752 db_printf("(%p)", ih->ih_argument); 1753 if (ih->ih_need || 1754 (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD | 1755 IH_MPSAFE)) != 0) { 1756 db_printf(" {"); 1757 comma = 0; 1758 if (ih->ih_flags & IH_EXCLUSIVE) { 1759 if (comma) 1760 db_printf(", "); 1761 db_printf("EXCL"); 1762 comma = 1; 1763 } 1764 if (ih->ih_flags & IH_ENTROPY) { 1765 if (comma) 1766 db_printf(", "); 1767 db_printf("ENTROPY"); 1768 comma = 1; 1769 } 1770 if (ih->ih_flags & IH_DEAD) { 1771 if (comma) 1772 db_printf(", "); 1773 db_printf("DEAD"); 1774 comma = 1; 1775 } 1776 if (ih->ih_flags & IH_MPSAFE) { 1777 if (comma) 1778 db_printf(", "); 1779 db_printf("MPSAFE"); 1780 comma = 1; 1781 } 1782 if (ih->ih_need) { 1783 if (comma) 1784 db_printf(", "); 1785 db_printf("NEED"); 1786 } 1787 db_printf("}"); 1788 } 1789 db_printf("\n"); 1790 } 1791 1792 /* 1793 * Dump details about a event. 1794 */ 1795 void 1796 db_dump_intr_event(struct intr_event *ie, int handlers) 1797 { 1798 struct intr_handler *ih; 1799 struct intr_thread *it; 1800 int comma; 1801 1802 db_printf("%s ", ie->ie_fullname); 1803 it = ie->ie_thread; 1804 if (it != NULL) 1805 db_printf("(pid %d)", it->it_thread->td_proc->p_pid); 1806 else 1807 db_printf("(no thread)"); 1808 if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 || 1809 (it != NULL && it->it_need)) { 1810 db_printf(" {"); 1811 comma = 0; 1812 if (ie->ie_flags & IE_SOFT) { 1813 db_printf("SOFT"); 1814 comma = 1; 1815 } 1816 if (ie->ie_flags & IE_ENTROPY) { 1817 if (comma) 1818 db_printf(", "); 1819 db_printf("ENTROPY"); 1820 comma = 1; 1821 } 1822 if (ie->ie_flags & IE_ADDING_THREAD) { 1823 if (comma) 1824 db_printf(", "); 1825 db_printf("ADDING_THREAD"); 1826 comma = 1; 1827 } 1828 if (it != NULL && it->it_need) { 1829 if (comma) 1830 db_printf(", "); 1831 db_printf("NEED"); 1832 } 1833 db_printf("}"); 1834 } 1835 db_printf("\n"); 1836 1837 if (handlers) 1838 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 1839 db_dump_intrhand(ih); 1840 } 1841 1842 /* 1843 * Dump data about interrupt handlers 1844 */ 1845 DB_SHOW_COMMAND(intr, db_show_intr) 1846 { 1847 struct intr_event *ie; 1848 int all, verbose; 1849 1850 verbose = strchr(modif, 'v') != NULL; 1851 all = strchr(modif, 'a') != NULL; 1852 TAILQ_FOREACH(ie, &event_list, ie_list) { 1853 if (!all && TAILQ_EMPTY(&ie->ie_handlers)) 1854 continue; 1855 db_dump_intr_event(ie, verbose); 1856 if (db_pager_quit) 1857 break; 1858 } 1859 } 1860 #endif /* DDB */ 1861 1862 /* 1863 * Start standard software interrupt threads 1864 */ 1865 static void 1866 start_softintr(void *dummy) 1867 { 1868 1869 if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih)) 1870 panic("died while creating vm swi ithread"); 1871 } 1872 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, 1873 NULL); 1874 1875 /* 1876 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 1877 * The data for this machine dependent, and the declarations are in machine 1878 * dependent code. The layout of intrnames and intrcnt however is machine 1879 * independent. 1880 * 1881 * We do not know the length of intrcnt and intrnames at compile time, so 1882 * calculate things at run time. 1883 */ 1884 static int 1885 sysctl_intrnames(SYSCTL_HANDLER_ARGS) 1886 { 1887 return (sysctl_handle_opaque(oidp, intrnames, sintrnames, req)); 1888 } 1889 1890 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, 1891 NULL, 0, sysctl_intrnames, "", "Interrupt Names"); 1892 1893 static int 1894 sysctl_intrcnt(SYSCTL_HANDLER_ARGS) 1895 { 1896 #ifdef SCTL_MASK32 1897 uint32_t *intrcnt32; 1898 unsigned i; 1899 int error; 1900 1901 if (req->flags & SCTL_MASK32) { 1902 if (!req->oldptr) 1903 return (sysctl_handle_opaque(oidp, NULL, sintrcnt / 2, req)); 1904 intrcnt32 = malloc(sintrcnt / 2, M_TEMP, M_NOWAIT); 1905 if (intrcnt32 == NULL) 1906 return (ENOMEM); 1907 for (i = 0; i < sintrcnt / sizeof (u_long); i++) 1908 intrcnt32[i] = intrcnt[i]; 1909 error = sysctl_handle_opaque(oidp, intrcnt32, sintrcnt / 2, req); 1910 free(intrcnt32, M_TEMP); 1911 return (error); 1912 } 1913 #endif 1914 return (sysctl_handle_opaque(oidp, intrcnt, sintrcnt, req)); 1915 } 1916 1917 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, 1918 NULL, 0, sysctl_intrcnt, "", "Interrupt Counts"); 1919 1920 #ifdef DDB 1921 /* 1922 * DDB command to dump the interrupt statistics. 1923 */ 1924 DB_SHOW_COMMAND(intrcnt, db_show_intrcnt) 1925 { 1926 u_long *i; 1927 char *cp; 1928 u_int j; 1929 1930 cp = intrnames; 1931 j = 0; 1932 for (i = intrcnt; j < (sintrcnt / sizeof(u_long)) && !db_pager_quit; 1933 i++, j++) { 1934 if (*cp == '\0') 1935 break; 1936 if (*i != 0) 1937 db_printf("%s\t%lu\n", cp, *i); 1938 cp += strlen(cp) + 1; 1939 } 1940 } 1941 #endif 1942