1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_ddb.h" 33 #include "opt_kstack_usage_prof.h" 34 35 #include <sys/param.h> 36 #include <sys/bus.h> 37 #include <sys/conf.h> 38 #include <sys/cpuset.h> 39 #include <sys/rtprio.h> 40 #include <sys/systm.h> 41 #include <sys/interrupt.h> 42 #include <sys/kernel.h> 43 #include <sys/kthread.h> 44 #include <sys/ktr.h> 45 #include <sys/limits.h> 46 #include <sys/lock.h> 47 #include <sys/malloc.h> 48 #include <sys/mutex.h> 49 #include <sys/priv.h> 50 #include <sys/proc.h> 51 #include <sys/epoch.h> 52 #include <sys/random.h> 53 #include <sys/resourcevar.h> 54 #include <sys/sched.h> 55 #include <sys/smp.h> 56 #include <sys/sysctl.h> 57 #include <sys/syslog.h> 58 #include <sys/unistd.h> 59 #include <sys/vmmeter.h> 60 #include <machine/atomic.h> 61 #include <machine/cpu.h> 62 #include <machine/md_var.h> 63 #include <machine/stdarg.h> 64 #ifdef DDB 65 #include <ddb/ddb.h> 66 #include <ddb/db_sym.h> 67 #endif 68 69 /* 70 * Describe an interrupt thread. There is one of these per interrupt event. 71 */ 72 struct intr_thread { 73 struct intr_event *it_event; 74 struct thread *it_thread; /* Kernel thread. */ 75 int it_flags; /* (j) IT_* flags. */ 76 int it_need; /* Needs service. */ 77 }; 78 79 /* Interrupt thread flags kept in it_flags */ 80 #define IT_DEAD 0x000001 /* Thread is waiting to exit. */ 81 #define IT_WAIT 0x000002 /* Thread is waiting for completion. */ 82 83 struct intr_entropy { 84 struct thread *td; 85 uintptr_t event; 86 }; 87 88 struct intr_event *tty_intr_event; 89 void *vm_ih; 90 struct proc *intrproc; 91 92 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); 93 94 static int intr_storm_threshold = 0; 95 SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RWTUN, 96 &intr_storm_threshold, 0, 97 "Number of consecutive interrupts before storm protection is enabled"); 98 static int intr_epoch_batch = 1000; 99 SYSCTL_INT(_hw, OID_AUTO, intr_epoch_batch, CTLFLAG_RWTUN, &intr_epoch_batch, 100 0, "Maximum interrupt handler executions without re-entering epoch(9)"); 101 static TAILQ_HEAD(, intr_event) event_list = 102 TAILQ_HEAD_INITIALIZER(event_list); 103 static struct mtx event_lock; 104 MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF); 105 106 static void intr_event_update(struct intr_event *ie); 107 static int intr_event_schedule_thread(struct intr_event *ie); 108 static struct intr_thread *ithread_create(const char *name); 109 static void ithread_destroy(struct intr_thread *ithread); 110 static void ithread_execute_handlers(struct proc *p, 111 struct intr_event *ie); 112 static void ithread_loop(void *); 113 static void ithread_update(struct intr_thread *ithd); 114 static void start_softintr(void *); 115 116 /* Map an interrupt type to an ithread priority. */ 117 u_char 118 intr_priority(enum intr_type flags) 119 { 120 u_char pri; 121 122 flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET | 123 INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV); 124 switch (flags) { 125 case INTR_TYPE_TTY: 126 pri = PI_TTY; 127 break; 128 case INTR_TYPE_BIO: 129 pri = PI_DISK; 130 break; 131 case INTR_TYPE_NET: 132 pri = PI_NET; 133 break; 134 case INTR_TYPE_CAM: 135 pri = PI_DISK; 136 break; 137 case INTR_TYPE_AV: 138 pri = PI_AV; 139 break; 140 case INTR_TYPE_CLK: 141 pri = PI_REALTIME; 142 break; 143 case INTR_TYPE_MISC: 144 pri = PI_DULL; /* don't care */ 145 break; 146 default: 147 /* We didn't specify an interrupt level. */ 148 panic("intr_priority: no interrupt type in flags"); 149 } 150 151 return pri; 152 } 153 154 /* 155 * Update an ithread based on the associated intr_event. 156 */ 157 static void 158 ithread_update(struct intr_thread *ithd) 159 { 160 struct intr_event *ie; 161 struct thread *td; 162 u_char pri; 163 164 ie = ithd->it_event; 165 td = ithd->it_thread; 166 mtx_assert(&ie->ie_lock, MA_OWNED); 167 168 /* Determine the overall priority of this event. */ 169 if (CK_SLIST_EMPTY(&ie->ie_handlers)) 170 pri = PRI_MAX_ITHD; 171 else 172 pri = CK_SLIST_FIRST(&ie->ie_handlers)->ih_pri; 173 174 /* Update name and priority. */ 175 strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name)); 176 #ifdef KTR 177 sched_clear_tdname(td); 178 #endif 179 thread_lock(td); 180 sched_prio(td, pri); 181 thread_unlock(td); 182 } 183 184 /* 185 * Regenerate the full name of an interrupt event and update its priority. 186 */ 187 static void 188 intr_event_update(struct intr_event *ie) 189 { 190 struct intr_handler *ih; 191 char *last; 192 int missed, space, flags; 193 194 /* Start off with no entropy and just the name of the event. */ 195 mtx_assert(&ie->ie_lock, MA_OWNED); 196 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 197 flags = 0; 198 missed = 0; 199 space = 1; 200 201 /* Run through all the handlers updating values. */ 202 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) { 203 if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < 204 sizeof(ie->ie_fullname)) { 205 strcat(ie->ie_fullname, " "); 206 strcat(ie->ie_fullname, ih->ih_name); 207 space = 0; 208 } else 209 missed++; 210 flags |= ih->ih_flags; 211 } 212 ie->ie_hflags = flags; 213 214 /* 215 * If there is only one handler and its name is too long, just copy in 216 * as much of the end of the name (includes the unit number) as will 217 * fit. Otherwise, we have multiple handlers and not all of the names 218 * will fit. Add +'s to indicate missing names. If we run out of room 219 * and still have +'s to add, change the last character from a + to a *. 220 */ 221 if (missed == 1 && space == 1) { 222 ih = CK_SLIST_FIRST(&ie->ie_handlers); 223 missed = strlen(ie->ie_fullname) + strlen(ih->ih_name) + 2 - 224 sizeof(ie->ie_fullname); 225 strcat(ie->ie_fullname, (missed == 0) ? " " : "-"); 226 strcat(ie->ie_fullname, &ih->ih_name[missed]); 227 missed = 0; 228 } 229 last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2]; 230 while (missed-- > 0) { 231 if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) { 232 if (*last == '+') { 233 *last = '*'; 234 break; 235 } else 236 *last = '+'; 237 } else if (space) { 238 strcat(ie->ie_fullname, " +"); 239 space = 0; 240 } else 241 strcat(ie->ie_fullname, "+"); 242 } 243 244 /* 245 * If this event has an ithread, update it's priority and 246 * name. 247 */ 248 if (ie->ie_thread != NULL) 249 ithread_update(ie->ie_thread); 250 CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname); 251 } 252 253 int 254 intr_event_create(struct intr_event **event, void *source, int flags, int irq, 255 void (*pre_ithread)(void *), void (*post_ithread)(void *), 256 void (*post_filter)(void *), int (*assign_cpu)(void *, int), 257 const char *fmt, ...) 258 { 259 struct intr_event *ie; 260 va_list ap; 261 262 /* The only valid flag during creation is IE_SOFT. */ 263 if ((flags & ~IE_SOFT) != 0) 264 return (EINVAL); 265 ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 266 ie->ie_source = source; 267 ie->ie_pre_ithread = pre_ithread; 268 ie->ie_post_ithread = post_ithread; 269 ie->ie_post_filter = post_filter; 270 ie->ie_assign_cpu = assign_cpu; 271 ie->ie_flags = flags; 272 ie->ie_irq = irq; 273 ie->ie_cpu = NOCPU; 274 CK_SLIST_INIT(&ie->ie_handlers); 275 mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 276 277 va_start(ap, fmt); 278 vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 279 va_end(ap); 280 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 281 mtx_lock(&event_lock); 282 TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 283 mtx_unlock(&event_lock); 284 if (event != NULL) 285 *event = ie; 286 CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 287 return (0); 288 } 289 290 /* 291 * Bind an interrupt event to the specified CPU. Note that not all 292 * platforms support binding an interrupt to a CPU. For those 293 * platforms this request will fail. Using a cpu id of NOCPU unbinds 294 * the interrupt event. 295 */ 296 static int 297 _intr_event_bind(struct intr_event *ie, int cpu, bool bindirq, bool bindithread) 298 { 299 lwpid_t id; 300 int error; 301 302 /* Need a CPU to bind to. */ 303 if (cpu != NOCPU && CPU_ABSENT(cpu)) 304 return (EINVAL); 305 306 if (ie->ie_assign_cpu == NULL) 307 return (EOPNOTSUPP); 308 309 error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR); 310 if (error) 311 return (error); 312 313 /* 314 * If we have any ithreads try to set their mask first to verify 315 * permissions, etc. 316 */ 317 if (bindithread) { 318 mtx_lock(&ie->ie_lock); 319 if (ie->ie_thread != NULL) { 320 id = ie->ie_thread->it_thread->td_tid; 321 mtx_unlock(&ie->ie_lock); 322 error = cpuset_setithread(id, cpu); 323 if (error) 324 return (error); 325 } else 326 mtx_unlock(&ie->ie_lock); 327 } 328 if (bindirq) 329 error = ie->ie_assign_cpu(ie->ie_source, cpu); 330 if (error) { 331 if (bindithread) { 332 mtx_lock(&ie->ie_lock); 333 if (ie->ie_thread != NULL) { 334 cpu = ie->ie_cpu; 335 id = ie->ie_thread->it_thread->td_tid; 336 mtx_unlock(&ie->ie_lock); 337 (void)cpuset_setithread(id, cpu); 338 } else 339 mtx_unlock(&ie->ie_lock); 340 } 341 return (error); 342 } 343 344 if (bindirq) { 345 mtx_lock(&ie->ie_lock); 346 ie->ie_cpu = cpu; 347 mtx_unlock(&ie->ie_lock); 348 } 349 350 return (error); 351 } 352 353 /* 354 * Bind an interrupt event to the specified CPU. For supported platforms, any 355 * associated ithreads as well as the primary interrupt context will be bound 356 * to the specificed CPU. 357 */ 358 int 359 intr_event_bind(struct intr_event *ie, int cpu) 360 { 361 362 return (_intr_event_bind(ie, cpu, true, true)); 363 } 364 365 /* 366 * Bind an interrupt event to the specified CPU, but do not bind associated 367 * ithreads. 368 */ 369 int 370 intr_event_bind_irqonly(struct intr_event *ie, int cpu) 371 { 372 373 return (_intr_event_bind(ie, cpu, true, false)); 374 } 375 376 /* 377 * Bind an interrupt event's ithread to the specified CPU. 378 */ 379 int 380 intr_event_bind_ithread(struct intr_event *ie, int cpu) 381 { 382 383 return (_intr_event_bind(ie, cpu, false, true)); 384 } 385 386 /* 387 * Bind an interrupt event's ithread to the specified cpuset. 388 */ 389 int 390 intr_event_bind_ithread_cpuset(struct intr_event *ie, cpuset_t *cs) 391 { 392 lwpid_t id; 393 394 mtx_lock(&ie->ie_lock); 395 if (ie->ie_thread != NULL) { 396 id = ie->ie_thread->it_thread->td_tid; 397 mtx_unlock(&ie->ie_lock); 398 return (cpuset_setthread(id, cs)); 399 } else { 400 mtx_unlock(&ie->ie_lock); 401 } 402 return (ENODEV); 403 } 404 405 static struct intr_event * 406 intr_lookup(int irq) 407 { 408 struct intr_event *ie; 409 410 mtx_lock(&event_lock); 411 TAILQ_FOREACH(ie, &event_list, ie_list) 412 if (ie->ie_irq == irq && 413 (ie->ie_flags & IE_SOFT) == 0 && 414 CK_SLIST_FIRST(&ie->ie_handlers) != NULL) 415 break; 416 mtx_unlock(&event_lock); 417 return (ie); 418 } 419 420 int 421 intr_setaffinity(int irq, int mode, void *m) 422 { 423 struct intr_event *ie; 424 cpuset_t *mask; 425 int cpu, n; 426 427 mask = m; 428 cpu = NOCPU; 429 /* 430 * If we're setting all cpus we can unbind. Otherwise make sure 431 * only one cpu is in the set. 432 */ 433 if (CPU_CMP(cpuset_root, mask)) { 434 for (n = 0; n < CPU_SETSIZE; n++) { 435 if (!CPU_ISSET(n, mask)) 436 continue; 437 if (cpu != NOCPU) 438 return (EINVAL); 439 cpu = n; 440 } 441 } 442 ie = intr_lookup(irq); 443 if (ie == NULL) 444 return (ESRCH); 445 switch (mode) { 446 case CPU_WHICH_IRQ: 447 return (intr_event_bind(ie, cpu)); 448 case CPU_WHICH_INTRHANDLER: 449 return (intr_event_bind_irqonly(ie, cpu)); 450 case CPU_WHICH_ITHREAD: 451 return (intr_event_bind_ithread(ie, cpu)); 452 default: 453 return (EINVAL); 454 } 455 } 456 457 int 458 intr_getaffinity(int irq, int mode, void *m) 459 { 460 struct intr_event *ie; 461 struct thread *td; 462 struct proc *p; 463 cpuset_t *mask; 464 lwpid_t id; 465 int error; 466 467 mask = m; 468 ie = intr_lookup(irq); 469 if (ie == NULL) 470 return (ESRCH); 471 472 error = 0; 473 CPU_ZERO(mask); 474 switch (mode) { 475 case CPU_WHICH_IRQ: 476 case CPU_WHICH_INTRHANDLER: 477 mtx_lock(&ie->ie_lock); 478 if (ie->ie_cpu == NOCPU) 479 CPU_COPY(cpuset_root, mask); 480 else 481 CPU_SET(ie->ie_cpu, mask); 482 mtx_unlock(&ie->ie_lock); 483 break; 484 case CPU_WHICH_ITHREAD: 485 mtx_lock(&ie->ie_lock); 486 if (ie->ie_thread == NULL) { 487 mtx_unlock(&ie->ie_lock); 488 CPU_COPY(cpuset_root, mask); 489 } else { 490 id = ie->ie_thread->it_thread->td_tid; 491 mtx_unlock(&ie->ie_lock); 492 error = cpuset_which(CPU_WHICH_TID, id, &p, &td, NULL); 493 if (error != 0) 494 return (error); 495 CPU_COPY(&td->td_cpuset->cs_mask, mask); 496 PROC_UNLOCK(p); 497 } 498 default: 499 return (EINVAL); 500 } 501 return (0); 502 } 503 504 int 505 intr_event_destroy(struct intr_event *ie) 506 { 507 508 mtx_lock(&event_lock); 509 mtx_lock(&ie->ie_lock); 510 if (!CK_SLIST_EMPTY(&ie->ie_handlers)) { 511 mtx_unlock(&ie->ie_lock); 512 mtx_unlock(&event_lock); 513 return (EBUSY); 514 } 515 TAILQ_REMOVE(&event_list, ie, ie_list); 516 #ifndef notyet 517 if (ie->ie_thread != NULL) { 518 ithread_destroy(ie->ie_thread); 519 ie->ie_thread = NULL; 520 } 521 #endif 522 mtx_unlock(&ie->ie_lock); 523 mtx_unlock(&event_lock); 524 mtx_destroy(&ie->ie_lock); 525 free(ie, M_ITHREAD); 526 return (0); 527 } 528 529 static struct intr_thread * 530 ithread_create(const char *name) 531 { 532 struct intr_thread *ithd; 533 struct thread *td; 534 int error; 535 536 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 537 538 error = kproc_kthread_add(ithread_loop, ithd, &intrproc, 539 &td, RFSTOPPED | RFHIGHPID, 540 0, "intr", "%s", name); 541 if (error) 542 panic("kproc_create() failed with %d", error); 543 thread_lock(td); 544 sched_class(td, PRI_ITHD); 545 TD_SET_IWAIT(td); 546 thread_unlock(td); 547 td->td_pflags |= TDP_ITHREAD; 548 ithd->it_thread = td; 549 CTR2(KTR_INTR, "%s: created %s", __func__, name); 550 return (ithd); 551 } 552 553 static void 554 ithread_destroy(struct intr_thread *ithread) 555 { 556 struct thread *td; 557 558 CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name); 559 td = ithread->it_thread; 560 thread_lock(td); 561 ithread->it_flags |= IT_DEAD; 562 if (TD_AWAITING_INTR(td)) { 563 TD_CLR_IWAIT(td); 564 sched_add(td, SRQ_INTR); 565 } else 566 thread_unlock(td); 567 } 568 569 int 570 intr_event_add_handler(struct intr_event *ie, const char *name, 571 driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 572 enum intr_type flags, void **cookiep) 573 { 574 struct intr_handler *ih, *temp_ih; 575 struct intr_handler **prevptr; 576 struct intr_thread *it; 577 578 if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 579 return (EINVAL); 580 581 /* Allocate and populate an interrupt handler structure. */ 582 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 583 ih->ih_filter = filter; 584 ih->ih_handler = handler; 585 ih->ih_argument = arg; 586 strlcpy(ih->ih_name, name, sizeof(ih->ih_name)); 587 ih->ih_event = ie; 588 ih->ih_pri = pri; 589 if (flags & INTR_EXCL) 590 ih->ih_flags = IH_EXCLUSIVE; 591 if (flags & INTR_MPSAFE) 592 ih->ih_flags |= IH_MPSAFE; 593 if (flags & INTR_ENTROPY) 594 ih->ih_flags |= IH_ENTROPY; 595 if (flags & INTR_TYPE_NET) 596 ih->ih_flags |= IH_NET; 597 598 /* We can only have one exclusive handler in a event. */ 599 mtx_lock(&ie->ie_lock); 600 if (!CK_SLIST_EMPTY(&ie->ie_handlers)) { 601 if ((flags & INTR_EXCL) || 602 (CK_SLIST_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 603 mtx_unlock(&ie->ie_lock); 604 free(ih, M_ITHREAD); 605 return (EINVAL); 606 } 607 } 608 609 /* Create a thread if we need one. */ 610 while (ie->ie_thread == NULL && handler != NULL) { 611 if (ie->ie_flags & IE_ADDING_THREAD) 612 msleep(ie, &ie->ie_lock, 0, "ithread", 0); 613 else { 614 ie->ie_flags |= IE_ADDING_THREAD; 615 mtx_unlock(&ie->ie_lock); 616 it = ithread_create("intr: newborn"); 617 mtx_lock(&ie->ie_lock); 618 ie->ie_flags &= ~IE_ADDING_THREAD; 619 ie->ie_thread = it; 620 it->it_event = ie; 621 ithread_update(it); 622 wakeup(ie); 623 } 624 } 625 626 /* Add the new handler to the event in priority order. */ 627 CK_SLIST_FOREACH_PREVPTR(temp_ih, prevptr, &ie->ie_handlers, ih_next) { 628 if (temp_ih->ih_pri > ih->ih_pri) 629 break; 630 } 631 CK_SLIST_INSERT_PREVPTR(prevptr, temp_ih, ih, ih_next); 632 633 intr_event_update(ie); 634 635 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 636 ie->ie_name); 637 mtx_unlock(&ie->ie_lock); 638 639 if (cookiep != NULL) 640 *cookiep = ih; 641 return (0); 642 } 643 644 /* 645 * Append a description preceded by a ':' to the name of the specified 646 * interrupt handler. 647 */ 648 int 649 intr_event_describe_handler(struct intr_event *ie, void *cookie, 650 const char *descr) 651 { 652 struct intr_handler *ih; 653 size_t space; 654 char *start; 655 656 mtx_lock(&ie->ie_lock); 657 #ifdef INVARIANTS 658 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) { 659 if (ih == cookie) 660 break; 661 } 662 if (ih == NULL) { 663 mtx_unlock(&ie->ie_lock); 664 panic("handler %p not found in interrupt event %p", cookie, ie); 665 } 666 #endif 667 ih = cookie; 668 669 /* 670 * Look for an existing description by checking for an 671 * existing ":". This assumes device names do not include 672 * colons. If one is found, prepare to insert the new 673 * description at that point. If one is not found, find the 674 * end of the name to use as the insertion point. 675 */ 676 start = strchr(ih->ih_name, ':'); 677 if (start == NULL) 678 start = strchr(ih->ih_name, 0); 679 680 /* 681 * See if there is enough remaining room in the string for the 682 * description + ":". The "- 1" leaves room for the trailing 683 * '\0'. The "+ 1" accounts for the colon. 684 */ 685 space = sizeof(ih->ih_name) - (start - ih->ih_name) - 1; 686 if (strlen(descr) + 1 > space) { 687 mtx_unlock(&ie->ie_lock); 688 return (ENOSPC); 689 } 690 691 /* Append a colon followed by the description. */ 692 *start = ':'; 693 strcpy(start + 1, descr); 694 intr_event_update(ie); 695 mtx_unlock(&ie->ie_lock); 696 return (0); 697 } 698 699 /* 700 * Return the ie_source field from the intr_event an intr_handler is 701 * associated with. 702 */ 703 void * 704 intr_handler_source(void *cookie) 705 { 706 struct intr_handler *ih; 707 struct intr_event *ie; 708 709 ih = (struct intr_handler *)cookie; 710 if (ih == NULL) 711 return (NULL); 712 ie = ih->ih_event; 713 KASSERT(ie != NULL, 714 ("interrupt handler \"%s\" has a NULL interrupt event", 715 ih->ih_name)); 716 return (ie->ie_source); 717 } 718 719 /* 720 * If intr_event_handle() is running in the ISR context at the time of the call, 721 * then wait for it to complete. 722 */ 723 static void 724 intr_event_barrier(struct intr_event *ie) 725 { 726 int phase; 727 728 mtx_assert(&ie->ie_lock, MA_OWNED); 729 phase = ie->ie_phase; 730 731 /* 732 * Switch phase to direct future interrupts to the other active counter. 733 * Make sure that any preceding stores are visible before the switch. 734 */ 735 KASSERT(ie->ie_active[!phase] == 0, ("idle phase has activity")); 736 atomic_store_rel_int(&ie->ie_phase, !phase); 737 738 /* 739 * This code cooperates with wait-free iteration of ie_handlers 740 * in intr_event_handle. 741 * Make sure that the removal and the phase update are not reordered 742 * with the active count check. 743 * Note that no combination of acquire and release fences can provide 744 * that guarantee as Store->Load sequences can always be reordered. 745 */ 746 atomic_thread_fence_seq_cst(); 747 748 /* 749 * Now wait on the inactive phase. 750 * The acquire fence is needed so that that all post-barrier accesses 751 * are after the check. 752 */ 753 while (ie->ie_active[phase] > 0) 754 cpu_spinwait(); 755 atomic_thread_fence_acq(); 756 } 757 758 static void 759 intr_handler_barrier(struct intr_handler *handler) 760 { 761 struct intr_event *ie; 762 763 ie = handler->ih_event; 764 mtx_assert(&ie->ie_lock, MA_OWNED); 765 KASSERT((handler->ih_flags & IH_DEAD) == 0, 766 ("update for a removed handler")); 767 768 if (ie->ie_thread == NULL) { 769 intr_event_barrier(ie); 770 return; 771 } 772 if ((handler->ih_flags & IH_CHANGED) == 0) { 773 handler->ih_flags |= IH_CHANGED; 774 intr_event_schedule_thread(ie); 775 } 776 while ((handler->ih_flags & IH_CHANGED) != 0) 777 msleep(handler, &ie->ie_lock, 0, "ih_barr", 0); 778 } 779 780 /* 781 * Sleep until an ithread finishes executing an interrupt handler. 782 * 783 * XXX Doesn't currently handle interrupt filters or fast interrupt 784 * handlers. This is intended for compatibility with linux drivers 785 * only. Do not use in BSD code. 786 */ 787 void 788 _intr_drain(int irq) 789 { 790 struct intr_event *ie; 791 struct intr_thread *ithd; 792 struct thread *td; 793 794 ie = intr_lookup(irq); 795 if (ie == NULL) 796 return; 797 if (ie->ie_thread == NULL) 798 return; 799 ithd = ie->ie_thread; 800 td = ithd->it_thread; 801 /* 802 * We set the flag and wait for it to be cleared to avoid 803 * long delays with potentially busy interrupt handlers 804 * were we to only sample TD_AWAITING_INTR() every tick. 805 */ 806 thread_lock(td); 807 if (!TD_AWAITING_INTR(td)) { 808 ithd->it_flags |= IT_WAIT; 809 while (ithd->it_flags & IT_WAIT) { 810 thread_unlock(td); 811 pause("idrain", 1); 812 thread_lock(td); 813 } 814 } 815 thread_unlock(td); 816 return; 817 } 818 819 int 820 intr_event_remove_handler(void *cookie) 821 { 822 struct intr_handler *handler = (struct intr_handler *)cookie; 823 struct intr_event *ie; 824 struct intr_handler *ih; 825 struct intr_handler **prevptr; 826 #ifdef notyet 827 int dead; 828 #endif 829 830 if (handler == NULL) 831 return (EINVAL); 832 ie = handler->ih_event; 833 KASSERT(ie != NULL, 834 ("interrupt handler \"%s\" has a NULL interrupt event", 835 handler->ih_name)); 836 837 mtx_lock(&ie->ie_lock); 838 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 839 ie->ie_name); 840 CK_SLIST_FOREACH_PREVPTR(ih, prevptr, &ie->ie_handlers, ih_next) { 841 if (ih == handler) 842 break; 843 } 844 if (ih == NULL) { 845 panic("interrupt handler \"%s\" not found in " 846 "interrupt event \"%s\"", handler->ih_name, ie->ie_name); 847 } 848 849 /* 850 * If there is no ithread, then directly remove the handler. Note that 851 * intr_event_handle() iterates ie_handlers in a lock-less fashion, so 852 * care needs to be taken to keep ie_handlers consistent and to free 853 * the removed handler only when ie_handlers is quiescent. 854 */ 855 if (ie->ie_thread == NULL) { 856 CK_SLIST_REMOVE_PREVPTR(prevptr, ih, ih_next); 857 intr_event_barrier(ie); 858 intr_event_update(ie); 859 mtx_unlock(&ie->ie_lock); 860 free(handler, M_ITHREAD); 861 return (0); 862 } 863 864 /* 865 * Let the interrupt thread do the job. 866 * The interrupt source is disabled when the interrupt thread is 867 * running, so it does not have to worry about interaction with 868 * intr_event_handle(). 869 */ 870 KASSERT((handler->ih_flags & IH_DEAD) == 0, 871 ("duplicate handle remove")); 872 handler->ih_flags |= IH_DEAD; 873 intr_event_schedule_thread(ie); 874 while (handler->ih_flags & IH_DEAD) 875 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 876 intr_event_update(ie); 877 878 #ifdef notyet 879 /* 880 * XXX: This could be bad in the case of ppbus(8). Also, I think 881 * this could lead to races of stale data when servicing an 882 * interrupt. 883 */ 884 dead = 1; 885 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) { 886 if (ih->ih_handler != NULL) { 887 dead = 0; 888 break; 889 } 890 } 891 if (dead) { 892 ithread_destroy(ie->ie_thread); 893 ie->ie_thread = NULL; 894 } 895 #endif 896 mtx_unlock(&ie->ie_lock); 897 free(handler, M_ITHREAD); 898 return (0); 899 } 900 901 int 902 intr_event_suspend_handler(void *cookie) 903 { 904 struct intr_handler *handler = (struct intr_handler *)cookie; 905 struct intr_event *ie; 906 907 if (handler == NULL) 908 return (EINVAL); 909 ie = handler->ih_event; 910 KASSERT(ie != NULL, 911 ("interrupt handler \"%s\" has a NULL interrupt event", 912 handler->ih_name)); 913 mtx_lock(&ie->ie_lock); 914 handler->ih_flags |= IH_SUSP; 915 intr_handler_barrier(handler); 916 mtx_unlock(&ie->ie_lock); 917 return (0); 918 } 919 920 int 921 intr_event_resume_handler(void *cookie) 922 { 923 struct intr_handler *handler = (struct intr_handler *)cookie; 924 struct intr_event *ie; 925 926 if (handler == NULL) 927 return (EINVAL); 928 ie = handler->ih_event; 929 KASSERT(ie != NULL, 930 ("interrupt handler \"%s\" has a NULL interrupt event", 931 handler->ih_name)); 932 933 /* 934 * intr_handler_barrier() acts not only as a barrier, 935 * it also allows to check for any pending interrupts. 936 */ 937 mtx_lock(&ie->ie_lock); 938 handler->ih_flags &= ~IH_SUSP; 939 intr_handler_barrier(handler); 940 mtx_unlock(&ie->ie_lock); 941 return (0); 942 } 943 944 static int 945 intr_event_schedule_thread(struct intr_event *ie) 946 { 947 struct intr_entropy entropy; 948 struct intr_thread *it; 949 struct thread *td; 950 struct thread *ctd; 951 952 /* 953 * If no ithread or no handlers, then we have a stray interrupt. 954 */ 955 if (ie == NULL || CK_SLIST_EMPTY(&ie->ie_handlers) || 956 ie->ie_thread == NULL) 957 return (EINVAL); 958 959 ctd = curthread; 960 it = ie->ie_thread; 961 td = it->it_thread; 962 963 /* 964 * If any of the handlers for this ithread claim to be good 965 * sources of entropy, then gather some. 966 */ 967 if (ie->ie_hflags & IH_ENTROPY) { 968 entropy.event = (uintptr_t)ie; 969 entropy.td = ctd; 970 random_harvest_queue(&entropy, sizeof(entropy), RANDOM_INTERRUPT); 971 } 972 973 KASSERT(td->td_proc != NULL, ("ithread %s has no process", ie->ie_name)); 974 975 /* 976 * Set it_need to tell the thread to keep running if it is already 977 * running. Then, lock the thread and see if we actually need to 978 * put it on the runqueue. 979 * 980 * Use store_rel to arrange that the store to ih_need in 981 * swi_sched() is before the store to it_need and prepare for 982 * transfer of this order to loads in the ithread. 983 */ 984 atomic_store_rel_int(&it->it_need, 1); 985 thread_lock(td); 986 if (TD_AWAITING_INTR(td)) { 987 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, td->td_proc->p_pid, 988 td->td_name); 989 TD_CLR_IWAIT(td); 990 sched_add(td, SRQ_INTR); 991 } else { 992 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 993 __func__, td->td_proc->p_pid, td->td_name, it->it_need, td->td_state); 994 thread_unlock(td); 995 } 996 997 return (0); 998 } 999 1000 /* 1001 * Allow interrupt event binding for software interrupt handlers -- a no-op, 1002 * since interrupts are generated in software rather than being directed by 1003 * a PIC. 1004 */ 1005 static int 1006 swi_assign_cpu(void *arg, int cpu) 1007 { 1008 1009 return (0); 1010 } 1011 1012 /* 1013 * Add a software interrupt handler to a specified event. If a given event 1014 * is not specified, then a new event is created. 1015 */ 1016 int 1017 swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, 1018 void *arg, int pri, enum intr_type flags, void **cookiep) 1019 { 1020 struct intr_event *ie; 1021 int error; 1022 1023 if (flags & INTR_ENTROPY) 1024 return (EINVAL); 1025 1026 ie = (eventp != NULL) ? *eventp : NULL; 1027 1028 if (ie != NULL) { 1029 if (!(ie->ie_flags & IE_SOFT)) 1030 return (EINVAL); 1031 } else { 1032 error = intr_event_create(&ie, NULL, IE_SOFT, 0, 1033 NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri); 1034 if (error) 1035 return (error); 1036 if (eventp != NULL) 1037 *eventp = ie; 1038 } 1039 error = intr_event_add_handler(ie, name, NULL, handler, arg, 1040 PI_SWI(pri), flags, cookiep); 1041 return (error); 1042 } 1043 1044 /* 1045 * Schedule a software interrupt thread. 1046 */ 1047 void 1048 swi_sched(void *cookie, int flags) 1049 { 1050 struct intr_handler *ih = (struct intr_handler *)cookie; 1051 struct intr_event *ie = ih->ih_event; 1052 struct intr_entropy entropy; 1053 int error __unused; 1054 1055 CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, 1056 ih->ih_need); 1057 1058 entropy.event = (uintptr_t)ih; 1059 entropy.td = curthread; 1060 random_harvest_queue(&entropy, sizeof(entropy), RANDOM_SWI); 1061 1062 /* 1063 * Set ih_need for this handler so that if the ithread is already 1064 * running it will execute this handler on the next pass. Otherwise, 1065 * it will execute it the next time it runs. 1066 */ 1067 ih->ih_need = 1; 1068 1069 if (!(flags & SWI_DELAY)) { 1070 VM_CNT_INC(v_soft); 1071 error = intr_event_schedule_thread(ie); 1072 KASSERT(error == 0, ("stray software interrupt")); 1073 } 1074 } 1075 1076 /* 1077 * Remove a software interrupt handler. Currently this code does not 1078 * remove the associated interrupt event if it becomes empty. Calling code 1079 * may do so manually via intr_event_destroy(), but that's not really 1080 * an optimal interface. 1081 */ 1082 int 1083 swi_remove(void *cookie) 1084 { 1085 1086 return (intr_event_remove_handler(cookie)); 1087 } 1088 1089 static void 1090 intr_event_execute_handlers(struct proc *p, struct intr_event *ie) 1091 { 1092 struct intr_handler *ih, *ihn, *ihp; 1093 1094 ihp = NULL; 1095 CK_SLIST_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) { 1096 /* 1097 * If this handler is marked for death, remove it from 1098 * the list of handlers and wake up the sleeper. 1099 */ 1100 if (ih->ih_flags & IH_DEAD) { 1101 mtx_lock(&ie->ie_lock); 1102 if (ihp == NULL) 1103 CK_SLIST_REMOVE_HEAD(&ie->ie_handlers, ih_next); 1104 else 1105 CK_SLIST_REMOVE_AFTER(ihp, ih_next); 1106 ih->ih_flags &= ~IH_DEAD; 1107 wakeup(ih); 1108 mtx_unlock(&ie->ie_lock); 1109 continue; 1110 } 1111 1112 /* 1113 * Now that we know that the current element won't be removed 1114 * update the previous element. 1115 */ 1116 ihp = ih; 1117 1118 if ((ih->ih_flags & IH_CHANGED) != 0) { 1119 mtx_lock(&ie->ie_lock); 1120 ih->ih_flags &= ~IH_CHANGED; 1121 wakeup(ih); 1122 mtx_unlock(&ie->ie_lock); 1123 } 1124 1125 /* Skip filter only handlers */ 1126 if (ih->ih_handler == NULL) 1127 continue; 1128 1129 /* Skip suspended handlers */ 1130 if ((ih->ih_flags & IH_SUSP) != 0) 1131 continue; 1132 1133 /* 1134 * For software interrupt threads, we only execute 1135 * handlers that have their need flag set. Hardware 1136 * interrupt threads always invoke all of their handlers. 1137 * 1138 * ih_need can only be 0 or 1. Failed cmpset below 1139 * means that there is no request to execute handlers, 1140 * so a retry of the cmpset is not needed. 1141 */ 1142 if ((ie->ie_flags & IE_SOFT) != 0 && 1143 atomic_cmpset_int(&ih->ih_need, 1, 0) == 0) 1144 continue; 1145 1146 /* Execute this handler. */ 1147 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1148 __func__, p->p_pid, (void *)ih->ih_handler, 1149 ih->ih_argument, ih->ih_name, ih->ih_flags); 1150 1151 if (!(ih->ih_flags & IH_MPSAFE)) 1152 mtx_lock(&Giant); 1153 ih->ih_handler(ih->ih_argument); 1154 if (!(ih->ih_flags & IH_MPSAFE)) 1155 mtx_unlock(&Giant); 1156 } 1157 } 1158 1159 static void 1160 ithread_execute_handlers(struct proc *p, struct intr_event *ie) 1161 { 1162 1163 /* Interrupt handlers should not sleep. */ 1164 if (!(ie->ie_flags & IE_SOFT)) 1165 THREAD_NO_SLEEPING(); 1166 intr_event_execute_handlers(p, ie); 1167 if (!(ie->ie_flags & IE_SOFT)) 1168 THREAD_SLEEPING_OK(); 1169 1170 /* 1171 * Interrupt storm handling: 1172 * 1173 * If this interrupt source is currently storming, then throttle 1174 * it to only fire the handler once per clock tick. 1175 * 1176 * If this interrupt source is not currently storming, but the 1177 * number of back to back interrupts exceeds the storm threshold, 1178 * then enter storming mode. 1179 */ 1180 if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold && 1181 !(ie->ie_flags & IE_SOFT)) { 1182 /* Report the message only once every second. */ 1183 if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) { 1184 printf( 1185 "interrupt storm detected on \"%s\"; throttling interrupt source\n", 1186 ie->ie_name); 1187 } 1188 pause("istorm", 1); 1189 } else 1190 ie->ie_count++; 1191 1192 /* 1193 * Now that all the handlers have had a chance to run, reenable 1194 * the interrupt source. 1195 */ 1196 if (ie->ie_post_ithread != NULL) 1197 ie->ie_post_ithread(ie->ie_source); 1198 } 1199 1200 /* 1201 * This is the main code for interrupt threads. 1202 */ 1203 static void 1204 ithread_loop(void *arg) 1205 { 1206 struct epoch_tracker et; 1207 struct intr_thread *ithd; 1208 struct intr_event *ie; 1209 struct thread *td; 1210 struct proc *p; 1211 int wake, epoch_count; 1212 bool needs_epoch; 1213 1214 td = curthread; 1215 p = td->td_proc; 1216 ithd = (struct intr_thread *)arg; 1217 KASSERT(ithd->it_thread == td, 1218 ("%s: ithread and proc linkage out of sync", __func__)); 1219 ie = ithd->it_event; 1220 ie->ie_count = 0; 1221 wake = 0; 1222 1223 /* 1224 * As long as we have interrupts outstanding, go through the 1225 * list of handlers, giving each one a go at it. 1226 */ 1227 for (;;) { 1228 /* 1229 * If we are an orphaned thread, then just die. 1230 */ 1231 if (ithd->it_flags & IT_DEAD) { 1232 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 1233 p->p_pid, td->td_name); 1234 free(ithd, M_ITHREAD); 1235 kthread_exit(); 1236 } 1237 1238 /* 1239 * Service interrupts. If another interrupt arrives while 1240 * we are running, it will set it_need to note that we 1241 * should make another pass. 1242 * 1243 * The load_acq part of the following cmpset ensures 1244 * that the load of ih_need in ithread_execute_handlers() 1245 * is ordered after the load of it_need here. 1246 */ 1247 needs_epoch = 1248 (atomic_load_int(&ie->ie_hflags) & IH_NET) != 0; 1249 if (needs_epoch) { 1250 epoch_count = 0; 1251 NET_EPOCH_ENTER(et); 1252 } 1253 while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0) { 1254 ithread_execute_handlers(p, ie); 1255 if (needs_epoch && 1256 ++epoch_count >= intr_epoch_batch) { 1257 NET_EPOCH_EXIT(et); 1258 epoch_count = 0; 1259 NET_EPOCH_ENTER(et); 1260 } 1261 } 1262 if (needs_epoch) 1263 NET_EPOCH_EXIT(et); 1264 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1265 mtx_assert(&Giant, MA_NOTOWNED); 1266 1267 /* 1268 * Processed all our interrupts. Now get the sched 1269 * lock. This may take a while and it_need may get 1270 * set again, so we have to check it again. 1271 */ 1272 thread_lock(td); 1273 if (atomic_load_acq_int(&ithd->it_need) == 0 && 1274 (ithd->it_flags & (IT_DEAD | IT_WAIT)) == 0) { 1275 TD_SET_IWAIT(td); 1276 ie->ie_count = 0; 1277 mi_switch(SW_VOL | SWT_IWAIT); 1278 } else { 1279 if (ithd->it_flags & IT_WAIT) { 1280 wake = 1; 1281 ithd->it_flags &= ~IT_WAIT; 1282 } 1283 thread_unlock(td); 1284 } 1285 if (wake) { 1286 wakeup(ithd); 1287 wake = 0; 1288 } 1289 } 1290 } 1291 1292 /* 1293 * Main interrupt handling body. 1294 * 1295 * Input: 1296 * o ie: the event connected to this interrupt. 1297 * o frame: some archs (i.e. i386) pass a frame to some. 1298 * handlers as their main argument. 1299 * Return value: 1300 * o 0: everything ok. 1301 * o EINVAL: stray interrupt. 1302 */ 1303 int 1304 intr_event_handle(struct intr_event *ie, struct trapframe *frame) 1305 { 1306 struct intr_handler *ih; 1307 struct trapframe *oldframe; 1308 struct thread *td; 1309 int phase; 1310 int ret; 1311 bool filter, thread; 1312 1313 td = curthread; 1314 1315 #ifdef KSTACK_USAGE_PROF 1316 intr_prof_stack_use(td, frame); 1317 #endif 1318 1319 /* An interrupt with no event or handlers is a stray interrupt. */ 1320 if (ie == NULL || CK_SLIST_EMPTY(&ie->ie_handlers)) 1321 return (EINVAL); 1322 1323 /* 1324 * Execute fast interrupt handlers directly. 1325 * To support clock handlers, if a handler registers 1326 * with a NULL argument, then we pass it a pointer to 1327 * a trapframe as its argument. 1328 */ 1329 td->td_intr_nesting_level++; 1330 filter = false; 1331 thread = false; 1332 ret = 0; 1333 critical_enter(); 1334 oldframe = td->td_intr_frame; 1335 td->td_intr_frame = frame; 1336 1337 phase = ie->ie_phase; 1338 atomic_add_int(&ie->ie_active[phase], 1); 1339 1340 /* 1341 * This fence is required to ensure that no later loads are 1342 * re-ordered before the ie_active store. 1343 */ 1344 atomic_thread_fence_seq_cst(); 1345 1346 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) { 1347 if ((ih->ih_flags & IH_SUSP) != 0) 1348 continue; 1349 if (ih->ih_filter == NULL) { 1350 thread = true; 1351 continue; 1352 } 1353 CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__, 1354 ih->ih_filter, ih->ih_argument == NULL ? frame : 1355 ih->ih_argument, ih->ih_name); 1356 if (ih->ih_argument == NULL) 1357 ret = ih->ih_filter(frame); 1358 else 1359 ret = ih->ih_filter(ih->ih_argument); 1360 KASSERT(ret == FILTER_STRAY || 1361 ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 && 1362 (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0), 1363 ("%s: incorrect return value %#x from %s", __func__, ret, 1364 ih->ih_name)); 1365 filter = filter || ret == FILTER_HANDLED; 1366 1367 /* 1368 * Wrapper handler special handling: 1369 * 1370 * in some particular cases (like pccard and pccbb), 1371 * the _real_ device handler is wrapped in a couple of 1372 * functions - a filter wrapper and an ithread wrapper. 1373 * In this case (and just in this case), the filter wrapper 1374 * could ask the system to schedule the ithread and mask 1375 * the interrupt source if the wrapped handler is composed 1376 * of just an ithread handler. 1377 * 1378 * TODO: write a generic wrapper to avoid people rolling 1379 * their own. 1380 */ 1381 if (!thread) { 1382 if (ret == FILTER_SCHEDULE_THREAD) 1383 thread = true; 1384 } 1385 } 1386 atomic_add_rel_int(&ie->ie_active[phase], -1); 1387 1388 td->td_intr_frame = oldframe; 1389 1390 if (thread) { 1391 if (ie->ie_pre_ithread != NULL) 1392 ie->ie_pre_ithread(ie->ie_source); 1393 } else { 1394 if (ie->ie_post_filter != NULL) 1395 ie->ie_post_filter(ie->ie_source); 1396 } 1397 1398 /* Schedule the ithread if needed. */ 1399 if (thread) { 1400 int error __unused; 1401 1402 error = intr_event_schedule_thread(ie); 1403 KASSERT(error == 0, ("bad stray interrupt")); 1404 } 1405 critical_exit(); 1406 td->td_intr_nesting_level--; 1407 #ifdef notyet 1408 /* The interrupt is not aknowledged by any filter and has no ithread. */ 1409 if (!thread && !filter) 1410 return (EINVAL); 1411 #endif 1412 return (0); 1413 } 1414 1415 #ifdef DDB 1416 /* 1417 * Dump details about an interrupt handler 1418 */ 1419 static void 1420 db_dump_intrhand(struct intr_handler *ih) 1421 { 1422 int comma; 1423 1424 db_printf("\t%-10s ", ih->ih_name); 1425 switch (ih->ih_pri) { 1426 case PI_REALTIME: 1427 db_printf("CLK "); 1428 break; 1429 case PI_AV: 1430 db_printf("AV "); 1431 break; 1432 case PI_TTY: 1433 db_printf("TTY "); 1434 break; 1435 case PI_NET: 1436 db_printf("NET "); 1437 break; 1438 case PI_DISK: 1439 db_printf("DISK"); 1440 break; 1441 case PI_DULL: 1442 db_printf("DULL"); 1443 break; 1444 default: 1445 if (ih->ih_pri >= PI_SOFT) 1446 db_printf("SWI "); 1447 else 1448 db_printf("%4u", ih->ih_pri); 1449 break; 1450 } 1451 db_printf(" "); 1452 if (ih->ih_filter != NULL) { 1453 db_printf("[F]"); 1454 db_printsym((uintptr_t)ih->ih_filter, DB_STGY_PROC); 1455 } 1456 if (ih->ih_handler != NULL) { 1457 if (ih->ih_filter != NULL) 1458 db_printf(","); 1459 db_printf("[H]"); 1460 db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC); 1461 } 1462 db_printf("(%p)", ih->ih_argument); 1463 if (ih->ih_need || 1464 (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD | 1465 IH_MPSAFE)) != 0) { 1466 db_printf(" {"); 1467 comma = 0; 1468 if (ih->ih_flags & IH_EXCLUSIVE) { 1469 if (comma) 1470 db_printf(", "); 1471 db_printf("EXCL"); 1472 comma = 1; 1473 } 1474 if (ih->ih_flags & IH_ENTROPY) { 1475 if (comma) 1476 db_printf(", "); 1477 db_printf("ENTROPY"); 1478 comma = 1; 1479 } 1480 if (ih->ih_flags & IH_DEAD) { 1481 if (comma) 1482 db_printf(", "); 1483 db_printf("DEAD"); 1484 comma = 1; 1485 } 1486 if (ih->ih_flags & IH_MPSAFE) { 1487 if (comma) 1488 db_printf(", "); 1489 db_printf("MPSAFE"); 1490 comma = 1; 1491 } 1492 if (ih->ih_need) { 1493 if (comma) 1494 db_printf(", "); 1495 db_printf("NEED"); 1496 } 1497 db_printf("}"); 1498 } 1499 db_printf("\n"); 1500 } 1501 1502 /* 1503 * Dump details about a event. 1504 */ 1505 void 1506 db_dump_intr_event(struct intr_event *ie, int handlers) 1507 { 1508 struct intr_handler *ih; 1509 struct intr_thread *it; 1510 int comma; 1511 1512 db_printf("%s ", ie->ie_fullname); 1513 it = ie->ie_thread; 1514 if (it != NULL) 1515 db_printf("(pid %d)", it->it_thread->td_proc->p_pid); 1516 else 1517 db_printf("(no thread)"); 1518 if ((ie->ie_flags & (IE_SOFT | IE_ADDING_THREAD)) != 0 || 1519 (it != NULL && it->it_need)) { 1520 db_printf(" {"); 1521 comma = 0; 1522 if (ie->ie_flags & IE_SOFT) { 1523 db_printf("SOFT"); 1524 comma = 1; 1525 } 1526 if (ie->ie_flags & IE_ADDING_THREAD) { 1527 if (comma) 1528 db_printf(", "); 1529 db_printf("ADDING_THREAD"); 1530 comma = 1; 1531 } 1532 if (it != NULL && it->it_need) { 1533 if (comma) 1534 db_printf(", "); 1535 db_printf("NEED"); 1536 } 1537 db_printf("}"); 1538 } 1539 db_printf("\n"); 1540 1541 if (handlers) 1542 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) 1543 db_dump_intrhand(ih); 1544 } 1545 1546 /* 1547 * Dump data about interrupt handlers 1548 */ 1549 DB_SHOW_COMMAND(intr, db_show_intr) 1550 { 1551 struct intr_event *ie; 1552 int all, verbose; 1553 1554 verbose = strchr(modif, 'v') != NULL; 1555 all = strchr(modif, 'a') != NULL; 1556 TAILQ_FOREACH(ie, &event_list, ie_list) { 1557 if (!all && CK_SLIST_EMPTY(&ie->ie_handlers)) 1558 continue; 1559 db_dump_intr_event(ie, verbose); 1560 if (db_pager_quit) 1561 break; 1562 } 1563 } 1564 #endif /* DDB */ 1565 1566 /* 1567 * Start standard software interrupt threads 1568 */ 1569 static void 1570 start_softintr(void *dummy) 1571 { 1572 1573 if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih)) 1574 panic("died while creating vm swi ithread"); 1575 } 1576 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, 1577 NULL); 1578 1579 /* 1580 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 1581 * The data for this machine dependent, and the declarations are in machine 1582 * dependent code. The layout of intrnames and intrcnt however is machine 1583 * independent. 1584 * 1585 * We do not know the length of intrcnt and intrnames at compile time, so 1586 * calculate things at run time. 1587 */ 1588 static int 1589 sysctl_intrnames(SYSCTL_HANDLER_ARGS) 1590 { 1591 return (sysctl_handle_opaque(oidp, intrnames, sintrnames, req)); 1592 } 1593 1594 SYSCTL_PROC(_hw, OID_AUTO, intrnames, 1595 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_NEEDGIANT, NULL, 0, 1596 sysctl_intrnames, "", 1597 "Interrupt Names"); 1598 1599 static int 1600 sysctl_intrcnt(SYSCTL_HANDLER_ARGS) 1601 { 1602 #ifdef SCTL_MASK32 1603 uint32_t *intrcnt32; 1604 unsigned i; 1605 int error; 1606 1607 if (req->flags & SCTL_MASK32) { 1608 if (!req->oldptr) 1609 return (sysctl_handle_opaque(oidp, NULL, sintrcnt / 2, req)); 1610 intrcnt32 = malloc(sintrcnt / 2, M_TEMP, M_NOWAIT); 1611 if (intrcnt32 == NULL) 1612 return (ENOMEM); 1613 for (i = 0; i < sintrcnt / sizeof (u_long); i++) 1614 intrcnt32[i] = intrcnt[i]; 1615 error = sysctl_handle_opaque(oidp, intrcnt32, sintrcnt / 2, req); 1616 free(intrcnt32, M_TEMP); 1617 return (error); 1618 } 1619 #endif 1620 return (sysctl_handle_opaque(oidp, intrcnt, sintrcnt, req)); 1621 } 1622 1623 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, 1624 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_NEEDGIANT, NULL, 0, 1625 sysctl_intrcnt, "", 1626 "Interrupt Counts"); 1627 1628 #ifdef DDB 1629 /* 1630 * DDB command to dump the interrupt statistics. 1631 */ 1632 DB_SHOW_COMMAND(intrcnt, db_show_intrcnt) 1633 { 1634 u_long *i; 1635 char *cp; 1636 u_int j; 1637 1638 cp = intrnames; 1639 j = 0; 1640 for (i = intrcnt; j < (sintrcnt / sizeof(u_long)) && !db_pager_quit; 1641 i++, j++) { 1642 if (*cp == '\0') 1643 break; 1644 if (*i != 0) 1645 db_printf("%s\t%lu\n", cp, *i); 1646 cp += strlen(cp) + 1; 1647 } 1648 } 1649 #endif 1650