1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_ddb.h" 33 #include "opt_kstack_usage_prof.h" 34 35 #include <sys/param.h> 36 #include <sys/bus.h> 37 #include <sys/conf.h> 38 #include <sys/cpuset.h> 39 #include <sys/rtprio.h> 40 #include <sys/systm.h> 41 #include <sys/interrupt.h> 42 #include <sys/kernel.h> 43 #include <sys/kthread.h> 44 #include <sys/ktr.h> 45 #include <sys/limits.h> 46 #include <sys/lock.h> 47 #include <sys/malloc.h> 48 #include <sys/mutex.h> 49 #include <sys/priv.h> 50 #include <sys/proc.h> 51 #include <sys/random.h> 52 #include <sys/resourcevar.h> 53 #include <sys/sched.h> 54 #include <sys/smp.h> 55 #include <sys/sysctl.h> 56 #include <sys/syslog.h> 57 #include <sys/unistd.h> 58 #include <sys/vmmeter.h> 59 #include <machine/atomic.h> 60 #include <machine/cpu.h> 61 #include <machine/md_var.h> 62 #include <machine/stdarg.h> 63 #ifdef DDB 64 #include <ddb/ddb.h> 65 #include <ddb/db_sym.h> 66 #endif 67 68 /* 69 * Describe an interrupt thread. There is one of these per interrupt event. 70 */ 71 struct intr_thread { 72 struct intr_event *it_event; 73 struct thread *it_thread; /* Kernel thread. */ 74 int it_flags; /* (j) IT_* flags. */ 75 int it_need; /* Needs service. */ 76 }; 77 78 /* Interrupt thread flags kept in it_flags */ 79 #define IT_DEAD 0x000001 /* Thread is waiting to exit. */ 80 #define IT_WAIT 0x000002 /* Thread is waiting for completion. */ 81 82 struct intr_entropy { 83 struct thread *td; 84 uintptr_t event; 85 }; 86 87 struct intr_event *tty_intr_event; 88 void *vm_ih; 89 struct proc *intrproc; 90 91 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); 92 93 static int intr_storm_threshold = 0; 94 SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RWTUN, 95 &intr_storm_threshold, 0, 96 "Number of consecutive interrupts before storm protection is enabled"); 97 static TAILQ_HEAD(, intr_event) event_list = 98 TAILQ_HEAD_INITIALIZER(event_list); 99 static struct mtx event_lock; 100 MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF); 101 102 static void intr_event_update(struct intr_event *ie); 103 static int intr_event_schedule_thread(struct intr_event *ie); 104 static struct intr_thread *ithread_create(const char *name); 105 static void ithread_destroy(struct intr_thread *ithread); 106 static void ithread_execute_handlers(struct proc *p, 107 struct intr_event *ie); 108 static void ithread_loop(void *); 109 static void ithread_update(struct intr_thread *ithd); 110 static void start_softintr(void *); 111 112 /* Map an interrupt type to an ithread priority. */ 113 u_char 114 intr_priority(enum intr_type flags) 115 { 116 u_char pri; 117 118 flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET | 119 INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV); 120 switch (flags) { 121 case INTR_TYPE_TTY: 122 pri = PI_TTY; 123 break; 124 case INTR_TYPE_BIO: 125 pri = PI_DISK; 126 break; 127 case INTR_TYPE_NET: 128 pri = PI_NET; 129 break; 130 case INTR_TYPE_CAM: 131 pri = PI_DISK; 132 break; 133 case INTR_TYPE_AV: 134 pri = PI_AV; 135 break; 136 case INTR_TYPE_CLK: 137 pri = PI_REALTIME; 138 break; 139 case INTR_TYPE_MISC: 140 pri = PI_DULL; /* don't care */ 141 break; 142 default: 143 /* We didn't specify an interrupt level. */ 144 panic("intr_priority: no interrupt type in flags"); 145 } 146 147 return pri; 148 } 149 150 /* 151 * Update an ithread based on the associated intr_event. 152 */ 153 static void 154 ithread_update(struct intr_thread *ithd) 155 { 156 struct intr_event *ie; 157 struct thread *td; 158 u_char pri; 159 160 ie = ithd->it_event; 161 td = ithd->it_thread; 162 mtx_assert(&ie->ie_lock, MA_OWNED); 163 164 /* Determine the overall priority of this event. */ 165 if (CK_SLIST_EMPTY(&ie->ie_handlers)) 166 pri = PRI_MAX_ITHD; 167 else 168 pri = CK_SLIST_FIRST(&ie->ie_handlers)->ih_pri; 169 170 /* Update name and priority. */ 171 strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name)); 172 #ifdef KTR 173 sched_clear_tdname(td); 174 #endif 175 thread_lock(td); 176 sched_prio(td, pri); 177 thread_unlock(td); 178 } 179 180 /* 181 * Regenerate the full name of an interrupt event and update its priority. 182 */ 183 static void 184 intr_event_update(struct intr_event *ie) 185 { 186 struct intr_handler *ih; 187 char *last; 188 int missed, space; 189 190 /* Start off with no entropy and just the name of the event. */ 191 mtx_assert(&ie->ie_lock, MA_OWNED); 192 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 193 ie->ie_flags &= ~IE_ENTROPY; 194 missed = 0; 195 space = 1; 196 197 /* Run through all the handlers updating values. */ 198 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) { 199 if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < 200 sizeof(ie->ie_fullname)) { 201 strcat(ie->ie_fullname, " "); 202 strcat(ie->ie_fullname, ih->ih_name); 203 space = 0; 204 } else 205 missed++; 206 if (ih->ih_flags & IH_ENTROPY) 207 ie->ie_flags |= IE_ENTROPY; 208 } 209 210 /* 211 * If there is only one handler and its name is too long, just copy in 212 * as much of the end of the name (includes the unit number) as will 213 * fit. Otherwise, we have multiple handlers and not all of the names 214 * will fit. Add +'s to indicate missing names. If we run out of room 215 * and still have +'s to add, change the last character from a + to a *. 216 */ 217 if (missed == 1 && space == 1) { 218 ih = CK_SLIST_FIRST(&ie->ie_handlers); 219 missed = strlen(ie->ie_fullname) + strlen(ih->ih_name) + 2 - 220 sizeof(ie->ie_fullname); 221 strcat(ie->ie_fullname, (missed == 0) ? " " : "-"); 222 strcat(ie->ie_fullname, &ih->ih_name[missed]); 223 missed = 0; 224 } 225 last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2]; 226 while (missed-- > 0) { 227 if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) { 228 if (*last == '+') { 229 *last = '*'; 230 break; 231 } else 232 *last = '+'; 233 } else if (space) { 234 strcat(ie->ie_fullname, " +"); 235 space = 0; 236 } else 237 strcat(ie->ie_fullname, "+"); 238 } 239 240 /* 241 * If this event has an ithread, update it's priority and 242 * name. 243 */ 244 if (ie->ie_thread != NULL) 245 ithread_update(ie->ie_thread); 246 CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname); 247 } 248 249 int 250 intr_event_create(struct intr_event **event, void *source, int flags, int irq, 251 void (*pre_ithread)(void *), void (*post_ithread)(void *), 252 void (*post_filter)(void *), int (*assign_cpu)(void *, int), 253 const char *fmt, ...) 254 { 255 struct intr_event *ie; 256 va_list ap; 257 258 /* The only valid flag during creation is IE_SOFT. */ 259 if ((flags & ~IE_SOFT) != 0) 260 return (EINVAL); 261 ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 262 ie->ie_source = source; 263 ie->ie_pre_ithread = pre_ithread; 264 ie->ie_post_ithread = post_ithread; 265 ie->ie_post_filter = post_filter; 266 ie->ie_assign_cpu = assign_cpu; 267 ie->ie_flags = flags; 268 ie->ie_irq = irq; 269 ie->ie_cpu = NOCPU; 270 CK_SLIST_INIT(&ie->ie_handlers); 271 mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 272 273 va_start(ap, fmt); 274 vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 275 va_end(ap); 276 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 277 mtx_lock(&event_lock); 278 TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 279 mtx_unlock(&event_lock); 280 if (event != NULL) 281 *event = ie; 282 CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 283 return (0); 284 } 285 286 /* 287 * Bind an interrupt event to the specified CPU. Note that not all 288 * platforms support binding an interrupt to a CPU. For those 289 * platforms this request will fail. Using a cpu id of NOCPU unbinds 290 * the interrupt event. 291 */ 292 static int 293 _intr_event_bind(struct intr_event *ie, int cpu, bool bindirq, bool bindithread) 294 { 295 lwpid_t id; 296 int error; 297 298 /* Need a CPU to bind to. */ 299 if (cpu != NOCPU && CPU_ABSENT(cpu)) 300 return (EINVAL); 301 302 if (ie->ie_assign_cpu == NULL) 303 return (EOPNOTSUPP); 304 305 error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR); 306 if (error) 307 return (error); 308 309 /* 310 * If we have any ithreads try to set their mask first to verify 311 * permissions, etc. 312 */ 313 if (bindithread) { 314 mtx_lock(&ie->ie_lock); 315 if (ie->ie_thread != NULL) { 316 id = ie->ie_thread->it_thread->td_tid; 317 mtx_unlock(&ie->ie_lock); 318 error = cpuset_setithread(id, cpu); 319 if (error) 320 return (error); 321 } else 322 mtx_unlock(&ie->ie_lock); 323 } 324 if (bindirq) 325 error = ie->ie_assign_cpu(ie->ie_source, cpu); 326 if (error) { 327 if (bindithread) { 328 mtx_lock(&ie->ie_lock); 329 if (ie->ie_thread != NULL) { 330 cpu = ie->ie_cpu; 331 id = ie->ie_thread->it_thread->td_tid; 332 mtx_unlock(&ie->ie_lock); 333 (void)cpuset_setithread(id, cpu); 334 } else 335 mtx_unlock(&ie->ie_lock); 336 } 337 return (error); 338 } 339 340 if (bindirq) { 341 mtx_lock(&ie->ie_lock); 342 ie->ie_cpu = cpu; 343 mtx_unlock(&ie->ie_lock); 344 } 345 346 return (error); 347 } 348 349 /* 350 * Bind an interrupt event to the specified CPU. For supported platforms, any 351 * associated ithreads as well as the primary interrupt context will be bound 352 * to the specificed CPU. 353 */ 354 int 355 intr_event_bind(struct intr_event *ie, int cpu) 356 { 357 358 return (_intr_event_bind(ie, cpu, true, true)); 359 } 360 361 /* 362 * Bind an interrupt event to the specified CPU, but do not bind associated 363 * ithreads. 364 */ 365 int 366 intr_event_bind_irqonly(struct intr_event *ie, int cpu) 367 { 368 369 return (_intr_event_bind(ie, cpu, true, false)); 370 } 371 372 /* 373 * Bind an interrupt event's ithread to the specified CPU. 374 */ 375 int 376 intr_event_bind_ithread(struct intr_event *ie, int cpu) 377 { 378 379 return (_intr_event_bind(ie, cpu, false, true)); 380 } 381 382 /* 383 * Bind an interrupt event's ithread to the specified cpuset. 384 */ 385 int 386 intr_event_bind_ithread_cpuset(struct intr_event *ie, cpuset_t *cs) 387 { 388 lwpid_t id; 389 390 mtx_lock(&ie->ie_lock); 391 if (ie->ie_thread != NULL) { 392 id = ie->ie_thread->it_thread->td_tid; 393 mtx_unlock(&ie->ie_lock); 394 return (cpuset_setthread(id, cs)); 395 } else { 396 mtx_unlock(&ie->ie_lock); 397 } 398 return (ENODEV); 399 } 400 401 static struct intr_event * 402 intr_lookup(int irq) 403 { 404 struct intr_event *ie; 405 406 mtx_lock(&event_lock); 407 TAILQ_FOREACH(ie, &event_list, ie_list) 408 if (ie->ie_irq == irq && 409 (ie->ie_flags & IE_SOFT) == 0 && 410 CK_SLIST_FIRST(&ie->ie_handlers) != NULL) 411 break; 412 mtx_unlock(&event_lock); 413 return (ie); 414 } 415 416 int 417 intr_setaffinity(int irq, int mode, void *m) 418 { 419 struct intr_event *ie; 420 cpuset_t *mask; 421 int cpu, n; 422 423 mask = m; 424 cpu = NOCPU; 425 /* 426 * If we're setting all cpus we can unbind. Otherwise make sure 427 * only one cpu is in the set. 428 */ 429 if (CPU_CMP(cpuset_root, mask)) { 430 for (n = 0; n < CPU_SETSIZE; n++) { 431 if (!CPU_ISSET(n, mask)) 432 continue; 433 if (cpu != NOCPU) 434 return (EINVAL); 435 cpu = n; 436 } 437 } 438 ie = intr_lookup(irq); 439 if (ie == NULL) 440 return (ESRCH); 441 switch (mode) { 442 case CPU_WHICH_IRQ: 443 return (intr_event_bind(ie, cpu)); 444 case CPU_WHICH_INTRHANDLER: 445 return (intr_event_bind_irqonly(ie, cpu)); 446 case CPU_WHICH_ITHREAD: 447 return (intr_event_bind_ithread(ie, cpu)); 448 default: 449 return (EINVAL); 450 } 451 } 452 453 int 454 intr_getaffinity(int irq, int mode, void *m) 455 { 456 struct intr_event *ie; 457 struct thread *td; 458 struct proc *p; 459 cpuset_t *mask; 460 lwpid_t id; 461 int error; 462 463 mask = m; 464 ie = intr_lookup(irq); 465 if (ie == NULL) 466 return (ESRCH); 467 468 error = 0; 469 CPU_ZERO(mask); 470 switch (mode) { 471 case CPU_WHICH_IRQ: 472 case CPU_WHICH_INTRHANDLER: 473 mtx_lock(&ie->ie_lock); 474 if (ie->ie_cpu == NOCPU) 475 CPU_COPY(cpuset_root, mask); 476 else 477 CPU_SET(ie->ie_cpu, mask); 478 mtx_unlock(&ie->ie_lock); 479 break; 480 case CPU_WHICH_ITHREAD: 481 mtx_lock(&ie->ie_lock); 482 if (ie->ie_thread == NULL) { 483 mtx_unlock(&ie->ie_lock); 484 CPU_COPY(cpuset_root, mask); 485 } else { 486 id = ie->ie_thread->it_thread->td_tid; 487 mtx_unlock(&ie->ie_lock); 488 error = cpuset_which(CPU_WHICH_TID, id, &p, &td, NULL); 489 if (error != 0) 490 return (error); 491 CPU_COPY(&td->td_cpuset->cs_mask, mask); 492 PROC_UNLOCK(p); 493 } 494 default: 495 return (EINVAL); 496 } 497 return (0); 498 } 499 500 int 501 intr_event_destroy(struct intr_event *ie) 502 { 503 504 mtx_lock(&event_lock); 505 mtx_lock(&ie->ie_lock); 506 if (!CK_SLIST_EMPTY(&ie->ie_handlers)) { 507 mtx_unlock(&ie->ie_lock); 508 mtx_unlock(&event_lock); 509 return (EBUSY); 510 } 511 TAILQ_REMOVE(&event_list, ie, ie_list); 512 #ifndef notyet 513 if (ie->ie_thread != NULL) { 514 ithread_destroy(ie->ie_thread); 515 ie->ie_thread = NULL; 516 } 517 #endif 518 mtx_unlock(&ie->ie_lock); 519 mtx_unlock(&event_lock); 520 mtx_destroy(&ie->ie_lock); 521 free(ie, M_ITHREAD); 522 return (0); 523 } 524 525 static struct intr_thread * 526 ithread_create(const char *name) 527 { 528 struct intr_thread *ithd; 529 struct thread *td; 530 int error; 531 532 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 533 534 error = kproc_kthread_add(ithread_loop, ithd, &intrproc, 535 &td, RFSTOPPED | RFHIGHPID, 536 0, "intr", "%s", name); 537 if (error) 538 panic("kproc_create() failed with %d", error); 539 thread_lock(td); 540 sched_class(td, PRI_ITHD); 541 TD_SET_IWAIT(td); 542 thread_unlock(td); 543 td->td_pflags |= TDP_ITHREAD; 544 ithd->it_thread = td; 545 CTR2(KTR_INTR, "%s: created %s", __func__, name); 546 return (ithd); 547 } 548 549 static void 550 ithread_destroy(struct intr_thread *ithread) 551 { 552 struct thread *td; 553 554 CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name); 555 td = ithread->it_thread; 556 thread_lock(td); 557 ithread->it_flags |= IT_DEAD; 558 if (TD_AWAITING_INTR(td)) { 559 TD_CLR_IWAIT(td); 560 sched_add(td, SRQ_INTR); 561 } else 562 thread_unlock(td); 563 } 564 565 int 566 intr_event_add_handler(struct intr_event *ie, const char *name, 567 driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 568 enum intr_type flags, void **cookiep) 569 { 570 struct intr_handler *ih, *temp_ih; 571 struct intr_handler **prevptr; 572 struct intr_thread *it; 573 574 if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 575 return (EINVAL); 576 577 /* Allocate and populate an interrupt handler structure. */ 578 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 579 ih->ih_filter = filter; 580 ih->ih_handler = handler; 581 ih->ih_argument = arg; 582 strlcpy(ih->ih_name, name, sizeof(ih->ih_name)); 583 ih->ih_event = ie; 584 ih->ih_pri = pri; 585 if (flags & INTR_EXCL) 586 ih->ih_flags = IH_EXCLUSIVE; 587 if (flags & INTR_MPSAFE) 588 ih->ih_flags |= IH_MPSAFE; 589 if (flags & INTR_ENTROPY) 590 ih->ih_flags |= IH_ENTROPY; 591 592 /* We can only have one exclusive handler in a event. */ 593 mtx_lock(&ie->ie_lock); 594 if (!CK_SLIST_EMPTY(&ie->ie_handlers)) { 595 if ((flags & INTR_EXCL) || 596 (CK_SLIST_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 597 mtx_unlock(&ie->ie_lock); 598 free(ih, M_ITHREAD); 599 return (EINVAL); 600 } 601 } 602 603 /* Create a thread if we need one. */ 604 while (ie->ie_thread == NULL && handler != NULL) { 605 if (ie->ie_flags & IE_ADDING_THREAD) 606 msleep(ie, &ie->ie_lock, 0, "ithread", 0); 607 else { 608 ie->ie_flags |= IE_ADDING_THREAD; 609 mtx_unlock(&ie->ie_lock); 610 it = ithread_create("intr: newborn"); 611 mtx_lock(&ie->ie_lock); 612 ie->ie_flags &= ~IE_ADDING_THREAD; 613 ie->ie_thread = it; 614 it->it_event = ie; 615 ithread_update(it); 616 wakeup(ie); 617 } 618 } 619 620 /* Add the new handler to the event in priority order. */ 621 CK_SLIST_FOREACH_PREVPTR(temp_ih, prevptr, &ie->ie_handlers, ih_next) { 622 if (temp_ih->ih_pri > ih->ih_pri) 623 break; 624 } 625 CK_SLIST_INSERT_PREVPTR(prevptr, temp_ih, ih, ih_next); 626 627 intr_event_update(ie); 628 629 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 630 ie->ie_name); 631 mtx_unlock(&ie->ie_lock); 632 633 if (cookiep != NULL) 634 *cookiep = ih; 635 return (0); 636 } 637 638 /* 639 * Append a description preceded by a ':' to the name of the specified 640 * interrupt handler. 641 */ 642 int 643 intr_event_describe_handler(struct intr_event *ie, void *cookie, 644 const char *descr) 645 { 646 struct intr_handler *ih; 647 size_t space; 648 char *start; 649 650 mtx_lock(&ie->ie_lock); 651 #ifdef INVARIANTS 652 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) { 653 if (ih == cookie) 654 break; 655 } 656 if (ih == NULL) { 657 mtx_unlock(&ie->ie_lock); 658 panic("handler %p not found in interrupt event %p", cookie, ie); 659 } 660 #endif 661 ih = cookie; 662 663 /* 664 * Look for an existing description by checking for an 665 * existing ":". This assumes device names do not include 666 * colons. If one is found, prepare to insert the new 667 * description at that point. If one is not found, find the 668 * end of the name to use as the insertion point. 669 */ 670 start = strchr(ih->ih_name, ':'); 671 if (start == NULL) 672 start = strchr(ih->ih_name, 0); 673 674 /* 675 * See if there is enough remaining room in the string for the 676 * description + ":". The "- 1" leaves room for the trailing 677 * '\0'. The "+ 1" accounts for the colon. 678 */ 679 space = sizeof(ih->ih_name) - (start - ih->ih_name) - 1; 680 if (strlen(descr) + 1 > space) { 681 mtx_unlock(&ie->ie_lock); 682 return (ENOSPC); 683 } 684 685 /* Append a colon followed by the description. */ 686 *start = ':'; 687 strcpy(start + 1, descr); 688 intr_event_update(ie); 689 mtx_unlock(&ie->ie_lock); 690 return (0); 691 } 692 693 /* 694 * Return the ie_source field from the intr_event an intr_handler is 695 * associated with. 696 */ 697 void * 698 intr_handler_source(void *cookie) 699 { 700 struct intr_handler *ih; 701 struct intr_event *ie; 702 703 ih = (struct intr_handler *)cookie; 704 if (ih == NULL) 705 return (NULL); 706 ie = ih->ih_event; 707 KASSERT(ie != NULL, 708 ("interrupt handler \"%s\" has a NULL interrupt event", 709 ih->ih_name)); 710 return (ie->ie_source); 711 } 712 713 /* 714 * If intr_event_handle() is running in the ISR context at the time of the call, 715 * then wait for it to complete. 716 */ 717 static void 718 intr_event_barrier(struct intr_event *ie) 719 { 720 int phase; 721 722 mtx_assert(&ie->ie_lock, MA_OWNED); 723 phase = ie->ie_phase; 724 725 /* 726 * Switch phase to direct future interrupts to the other active counter. 727 * Make sure that any preceding stores are visible before the switch. 728 */ 729 KASSERT(ie->ie_active[!phase] == 0, ("idle phase has activity")); 730 atomic_store_rel_int(&ie->ie_phase, !phase); 731 732 /* 733 * This code cooperates with wait-free iteration of ie_handlers 734 * in intr_event_handle. 735 * Make sure that the removal and the phase update are not reordered 736 * with the active count check. 737 * Note that no combination of acquire and release fences can provide 738 * that guarantee as Store->Load sequences can always be reordered. 739 */ 740 atomic_thread_fence_seq_cst(); 741 742 /* 743 * Now wait on the inactive phase. 744 * The acquire fence is needed so that that all post-barrier accesses 745 * are after the check. 746 */ 747 while (ie->ie_active[phase] > 0) 748 cpu_spinwait(); 749 atomic_thread_fence_acq(); 750 } 751 752 static void 753 intr_handler_barrier(struct intr_handler *handler) 754 { 755 struct intr_event *ie; 756 757 ie = handler->ih_event; 758 mtx_assert(&ie->ie_lock, MA_OWNED); 759 KASSERT((handler->ih_flags & IH_DEAD) == 0, 760 ("update for a removed handler")); 761 762 if (ie->ie_thread == NULL) { 763 intr_event_barrier(ie); 764 return; 765 } 766 if ((handler->ih_flags & IH_CHANGED) == 0) { 767 handler->ih_flags |= IH_CHANGED; 768 intr_event_schedule_thread(ie); 769 } 770 while ((handler->ih_flags & IH_CHANGED) != 0) 771 msleep(handler, &ie->ie_lock, 0, "ih_barr", 0); 772 } 773 774 /* 775 * Sleep until an ithread finishes executing an interrupt handler. 776 * 777 * XXX Doesn't currently handle interrupt filters or fast interrupt 778 * handlers. This is intended for compatibility with linux drivers 779 * only. Do not use in BSD code. 780 */ 781 void 782 _intr_drain(int irq) 783 { 784 struct intr_event *ie; 785 struct intr_thread *ithd; 786 struct thread *td; 787 788 ie = intr_lookup(irq); 789 if (ie == NULL) 790 return; 791 if (ie->ie_thread == NULL) 792 return; 793 ithd = ie->ie_thread; 794 td = ithd->it_thread; 795 /* 796 * We set the flag and wait for it to be cleared to avoid 797 * long delays with potentially busy interrupt handlers 798 * were we to only sample TD_AWAITING_INTR() every tick. 799 */ 800 thread_lock(td); 801 if (!TD_AWAITING_INTR(td)) { 802 ithd->it_flags |= IT_WAIT; 803 while (ithd->it_flags & IT_WAIT) { 804 thread_unlock(td); 805 pause("idrain", 1); 806 thread_lock(td); 807 } 808 } 809 thread_unlock(td); 810 return; 811 } 812 813 int 814 intr_event_remove_handler(void *cookie) 815 { 816 struct intr_handler *handler = (struct intr_handler *)cookie; 817 struct intr_event *ie; 818 struct intr_handler *ih; 819 struct intr_handler **prevptr; 820 #ifdef notyet 821 int dead; 822 #endif 823 824 if (handler == NULL) 825 return (EINVAL); 826 ie = handler->ih_event; 827 KASSERT(ie != NULL, 828 ("interrupt handler \"%s\" has a NULL interrupt event", 829 handler->ih_name)); 830 831 mtx_lock(&ie->ie_lock); 832 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 833 ie->ie_name); 834 CK_SLIST_FOREACH_PREVPTR(ih, prevptr, &ie->ie_handlers, ih_next) { 835 if (ih == handler) 836 break; 837 } 838 if (ih == NULL) { 839 panic("interrupt handler \"%s\" not found in " 840 "interrupt event \"%s\"", handler->ih_name, ie->ie_name); 841 } 842 843 /* 844 * If there is no ithread, then directly remove the handler. Note that 845 * intr_event_handle() iterates ie_handlers in a lock-less fashion, so 846 * care needs to be taken to keep ie_handlers consistent and to free 847 * the removed handler only when ie_handlers is quiescent. 848 */ 849 if (ie->ie_thread == NULL) { 850 CK_SLIST_REMOVE_PREVPTR(prevptr, ih, ih_next); 851 intr_event_barrier(ie); 852 intr_event_update(ie); 853 mtx_unlock(&ie->ie_lock); 854 free(handler, M_ITHREAD); 855 return (0); 856 } 857 858 /* 859 * Let the interrupt thread do the job. 860 * The interrupt source is disabled when the interrupt thread is 861 * running, so it does not have to worry about interaction with 862 * intr_event_handle(). 863 */ 864 KASSERT((handler->ih_flags & IH_DEAD) == 0, 865 ("duplicate handle remove")); 866 handler->ih_flags |= IH_DEAD; 867 intr_event_schedule_thread(ie); 868 while (handler->ih_flags & IH_DEAD) 869 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 870 intr_event_update(ie); 871 872 #ifdef notyet 873 /* 874 * XXX: This could be bad in the case of ppbus(8). Also, I think 875 * this could lead to races of stale data when servicing an 876 * interrupt. 877 */ 878 dead = 1; 879 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) { 880 if (ih->ih_handler != NULL) { 881 dead = 0; 882 break; 883 } 884 } 885 if (dead) { 886 ithread_destroy(ie->ie_thread); 887 ie->ie_thread = NULL; 888 } 889 #endif 890 mtx_unlock(&ie->ie_lock); 891 free(handler, M_ITHREAD); 892 return (0); 893 } 894 895 int 896 intr_event_suspend_handler(void *cookie) 897 { 898 struct intr_handler *handler = (struct intr_handler *)cookie; 899 struct intr_event *ie; 900 901 if (handler == NULL) 902 return (EINVAL); 903 ie = handler->ih_event; 904 KASSERT(ie != NULL, 905 ("interrupt handler \"%s\" has a NULL interrupt event", 906 handler->ih_name)); 907 mtx_lock(&ie->ie_lock); 908 handler->ih_flags |= IH_SUSP; 909 intr_handler_barrier(handler); 910 mtx_unlock(&ie->ie_lock); 911 return (0); 912 } 913 914 int 915 intr_event_resume_handler(void *cookie) 916 { 917 struct intr_handler *handler = (struct intr_handler *)cookie; 918 struct intr_event *ie; 919 920 if (handler == NULL) 921 return (EINVAL); 922 ie = handler->ih_event; 923 KASSERT(ie != NULL, 924 ("interrupt handler \"%s\" has a NULL interrupt event", 925 handler->ih_name)); 926 927 /* 928 * intr_handler_barrier() acts not only as a barrier, 929 * it also allows to check for any pending interrupts. 930 */ 931 mtx_lock(&ie->ie_lock); 932 handler->ih_flags &= ~IH_SUSP; 933 intr_handler_barrier(handler); 934 mtx_unlock(&ie->ie_lock); 935 return (0); 936 } 937 938 static int 939 intr_event_schedule_thread(struct intr_event *ie) 940 { 941 struct intr_entropy entropy; 942 struct intr_thread *it; 943 struct thread *td; 944 struct thread *ctd; 945 946 /* 947 * If no ithread or no handlers, then we have a stray interrupt. 948 */ 949 if (ie == NULL || CK_SLIST_EMPTY(&ie->ie_handlers) || 950 ie->ie_thread == NULL) 951 return (EINVAL); 952 953 ctd = curthread; 954 it = ie->ie_thread; 955 td = it->it_thread; 956 957 /* 958 * If any of the handlers for this ithread claim to be good 959 * sources of entropy, then gather some. 960 */ 961 if (ie->ie_flags & IE_ENTROPY) { 962 entropy.event = (uintptr_t)ie; 963 entropy.td = ctd; 964 random_harvest_queue(&entropy, sizeof(entropy), RANDOM_INTERRUPT); 965 } 966 967 KASSERT(td->td_proc != NULL, ("ithread %s has no process", ie->ie_name)); 968 969 /* 970 * Set it_need to tell the thread to keep running if it is already 971 * running. Then, lock the thread and see if we actually need to 972 * put it on the runqueue. 973 * 974 * Use store_rel to arrange that the store to ih_need in 975 * swi_sched() is before the store to it_need and prepare for 976 * transfer of this order to loads in the ithread. 977 */ 978 atomic_store_rel_int(&it->it_need, 1); 979 thread_lock(td); 980 if (TD_AWAITING_INTR(td)) { 981 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, td->td_proc->p_pid, 982 td->td_name); 983 TD_CLR_IWAIT(td); 984 sched_add(td, SRQ_INTR); 985 } else { 986 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 987 __func__, td->td_proc->p_pid, td->td_name, it->it_need, td->td_state); 988 thread_unlock(td); 989 } 990 991 return (0); 992 } 993 994 /* 995 * Allow interrupt event binding for software interrupt handlers -- a no-op, 996 * since interrupts are generated in software rather than being directed by 997 * a PIC. 998 */ 999 static int 1000 swi_assign_cpu(void *arg, int cpu) 1001 { 1002 1003 return (0); 1004 } 1005 1006 /* 1007 * Add a software interrupt handler to a specified event. If a given event 1008 * is not specified, then a new event is created. 1009 */ 1010 int 1011 swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, 1012 void *arg, int pri, enum intr_type flags, void **cookiep) 1013 { 1014 struct intr_event *ie; 1015 int error; 1016 1017 if (flags & INTR_ENTROPY) 1018 return (EINVAL); 1019 1020 ie = (eventp != NULL) ? *eventp : NULL; 1021 1022 if (ie != NULL) { 1023 if (!(ie->ie_flags & IE_SOFT)) 1024 return (EINVAL); 1025 } else { 1026 error = intr_event_create(&ie, NULL, IE_SOFT, 0, 1027 NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri); 1028 if (error) 1029 return (error); 1030 if (eventp != NULL) 1031 *eventp = ie; 1032 } 1033 error = intr_event_add_handler(ie, name, NULL, handler, arg, 1034 PI_SWI(pri), flags, cookiep); 1035 return (error); 1036 } 1037 1038 /* 1039 * Schedule a software interrupt thread. 1040 */ 1041 void 1042 swi_sched(void *cookie, int flags) 1043 { 1044 struct intr_handler *ih = (struct intr_handler *)cookie; 1045 struct intr_event *ie = ih->ih_event; 1046 struct intr_entropy entropy; 1047 int error __unused; 1048 1049 CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, 1050 ih->ih_need); 1051 1052 entropy.event = (uintptr_t)ih; 1053 entropy.td = curthread; 1054 random_harvest_queue(&entropy, sizeof(entropy), RANDOM_SWI); 1055 1056 /* 1057 * Set ih_need for this handler so that if the ithread is already 1058 * running it will execute this handler on the next pass. Otherwise, 1059 * it will execute it the next time it runs. 1060 */ 1061 ih->ih_need = 1; 1062 1063 if (!(flags & SWI_DELAY)) { 1064 VM_CNT_INC(v_soft); 1065 error = intr_event_schedule_thread(ie); 1066 KASSERT(error == 0, ("stray software interrupt")); 1067 } 1068 } 1069 1070 /* 1071 * Remove a software interrupt handler. Currently this code does not 1072 * remove the associated interrupt event if it becomes empty. Calling code 1073 * may do so manually via intr_event_destroy(), but that's not really 1074 * an optimal interface. 1075 */ 1076 int 1077 swi_remove(void *cookie) 1078 { 1079 1080 return (intr_event_remove_handler(cookie)); 1081 } 1082 1083 static void 1084 intr_event_execute_handlers(struct proc *p, struct intr_event *ie) 1085 { 1086 struct intr_handler *ih, *ihn, *ihp; 1087 1088 ihp = NULL; 1089 CK_SLIST_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) { 1090 /* 1091 * If this handler is marked for death, remove it from 1092 * the list of handlers and wake up the sleeper. 1093 */ 1094 if (ih->ih_flags & IH_DEAD) { 1095 mtx_lock(&ie->ie_lock); 1096 if (ihp == NULL) 1097 CK_SLIST_REMOVE_HEAD(&ie->ie_handlers, ih_next); 1098 else 1099 CK_SLIST_REMOVE_AFTER(ihp, ih_next); 1100 ih->ih_flags &= ~IH_DEAD; 1101 wakeup(ih); 1102 mtx_unlock(&ie->ie_lock); 1103 continue; 1104 } 1105 1106 /* 1107 * Now that we know that the current element won't be removed 1108 * update the previous element. 1109 */ 1110 ihp = ih; 1111 1112 if ((ih->ih_flags & IH_CHANGED) != 0) { 1113 mtx_lock(&ie->ie_lock); 1114 ih->ih_flags &= ~IH_CHANGED; 1115 wakeup(ih); 1116 mtx_unlock(&ie->ie_lock); 1117 } 1118 1119 /* Skip filter only handlers */ 1120 if (ih->ih_handler == NULL) 1121 continue; 1122 1123 /* Skip suspended handlers */ 1124 if ((ih->ih_flags & IH_SUSP) != 0) 1125 continue; 1126 1127 /* 1128 * For software interrupt threads, we only execute 1129 * handlers that have their need flag set. Hardware 1130 * interrupt threads always invoke all of their handlers. 1131 * 1132 * ih_need can only be 0 or 1. Failed cmpset below 1133 * means that there is no request to execute handlers, 1134 * so a retry of the cmpset is not needed. 1135 */ 1136 if ((ie->ie_flags & IE_SOFT) != 0 && 1137 atomic_cmpset_int(&ih->ih_need, 1, 0) == 0) 1138 continue; 1139 1140 /* Execute this handler. */ 1141 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1142 __func__, p->p_pid, (void *)ih->ih_handler, 1143 ih->ih_argument, ih->ih_name, ih->ih_flags); 1144 1145 if (!(ih->ih_flags & IH_MPSAFE)) 1146 mtx_lock(&Giant); 1147 ih->ih_handler(ih->ih_argument); 1148 if (!(ih->ih_flags & IH_MPSAFE)) 1149 mtx_unlock(&Giant); 1150 } 1151 } 1152 1153 static void 1154 ithread_execute_handlers(struct proc *p, struct intr_event *ie) 1155 { 1156 1157 /* Interrupt handlers should not sleep. */ 1158 if (!(ie->ie_flags & IE_SOFT)) 1159 THREAD_NO_SLEEPING(); 1160 intr_event_execute_handlers(p, ie); 1161 if (!(ie->ie_flags & IE_SOFT)) 1162 THREAD_SLEEPING_OK(); 1163 1164 /* 1165 * Interrupt storm handling: 1166 * 1167 * If this interrupt source is currently storming, then throttle 1168 * it to only fire the handler once per clock tick. 1169 * 1170 * If this interrupt source is not currently storming, but the 1171 * number of back to back interrupts exceeds the storm threshold, 1172 * then enter storming mode. 1173 */ 1174 if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold && 1175 !(ie->ie_flags & IE_SOFT)) { 1176 /* Report the message only once every second. */ 1177 if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) { 1178 printf( 1179 "interrupt storm detected on \"%s\"; throttling interrupt source\n", 1180 ie->ie_name); 1181 } 1182 pause("istorm", 1); 1183 } else 1184 ie->ie_count++; 1185 1186 /* 1187 * Now that all the handlers have had a chance to run, reenable 1188 * the interrupt source. 1189 */ 1190 if (ie->ie_post_ithread != NULL) 1191 ie->ie_post_ithread(ie->ie_source); 1192 } 1193 1194 /* 1195 * This is the main code for interrupt threads. 1196 */ 1197 static void 1198 ithread_loop(void *arg) 1199 { 1200 struct intr_thread *ithd; 1201 struct intr_event *ie; 1202 struct thread *td; 1203 struct proc *p; 1204 int wake; 1205 1206 td = curthread; 1207 p = td->td_proc; 1208 ithd = (struct intr_thread *)arg; 1209 KASSERT(ithd->it_thread == td, 1210 ("%s: ithread and proc linkage out of sync", __func__)); 1211 ie = ithd->it_event; 1212 ie->ie_count = 0; 1213 wake = 0; 1214 1215 /* 1216 * As long as we have interrupts outstanding, go through the 1217 * list of handlers, giving each one a go at it. 1218 */ 1219 for (;;) { 1220 /* 1221 * If we are an orphaned thread, then just die. 1222 */ 1223 if (ithd->it_flags & IT_DEAD) { 1224 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 1225 p->p_pid, td->td_name); 1226 free(ithd, M_ITHREAD); 1227 kthread_exit(); 1228 } 1229 1230 /* 1231 * Service interrupts. If another interrupt arrives while 1232 * we are running, it will set it_need to note that we 1233 * should make another pass. 1234 * 1235 * The load_acq part of the following cmpset ensures 1236 * that the load of ih_need in ithread_execute_handlers() 1237 * is ordered after the load of it_need here. 1238 */ 1239 while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0) 1240 ithread_execute_handlers(p, ie); 1241 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1242 mtx_assert(&Giant, MA_NOTOWNED); 1243 1244 /* 1245 * Processed all our interrupts. Now get the sched 1246 * lock. This may take a while and it_need may get 1247 * set again, so we have to check it again. 1248 */ 1249 thread_lock(td); 1250 if (atomic_load_acq_int(&ithd->it_need) == 0 && 1251 (ithd->it_flags & (IT_DEAD | IT_WAIT)) == 0) { 1252 TD_SET_IWAIT(td); 1253 ie->ie_count = 0; 1254 mi_switch(SW_VOL | SWT_IWAIT); 1255 } else { 1256 if (ithd->it_flags & IT_WAIT) { 1257 wake = 1; 1258 ithd->it_flags &= ~IT_WAIT; 1259 } 1260 thread_unlock(td); 1261 } 1262 if (wake) { 1263 wakeup(ithd); 1264 wake = 0; 1265 } 1266 } 1267 } 1268 1269 /* 1270 * Main interrupt handling body. 1271 * 1272 * Input: 1273 * o ie: the event connected to this interrupt. 1274 * o frame: some archs (i.e. i386) pass a frame to some. 1275 * handlers as their main argument. 1276 * Return value: 1277 * o 0: everything ok. 1278 * o EINVAL: stray interrupt. 1279 */ 1280 int 1281 intr_event_handle(struct intr_event *ie, struct trapframe *frame) 1282 { 1283 struct intr_handler *ih; 1284 struct trapframe *oldframe; 1285 struct thread *td; 1286 int phase; 1287 int ret; 1288 bool filter, thread; 1289 1290 td = curthread; 1291 1292 #ifdef KSTACK_USAGE_PROF 1293 intr_prof_stack_use(td, frame); 1294 #endif 1295 1296 /* An interrupt with no event or handlers is a stray interrupt. */ 1297 if (ie == NULL || CK_SLIST_EMPTY(&ie->ie_handlers)) 1298 return (EINVAL); 1299 1300 /* 1301 * Execute fast interrupt handlers directly. 1302 * To support clock handlers, if a handler registers 1303 * with a NULL argument, then we pass it a pointer to 1304 * a trapframe as its argument. 1305 */ 1306 td->td_intr_nesting_level++; 1307 filter = false; 1308 thread = false; 1309 ret = 0; 1310 critical_enter(); 1311 oldframe = td->td_intr_frame; 1312 td->td_intr_frame = frame; 1313 1314 phase = ie->ie_phase; 1315 atomic_add_int(&ie->ie_active[phase], 1); 1316 1317 /* 1318 * This fence is required to ensure that no later loads are 1319 * re-ordered before the ie_active store. 1320 */ 1321 atomic_thread_fence_seq_cst(); 1322 1323 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) { 1324 if ((ih->ih_flags & IH_SUSP) != 0) 1325 continue; 1326 if (ih->ih_filter == NULL) { 1327 thread = true; 1328 continue; 1329 } 1330 CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__, 1331 ih->ih_filter, ih->ih_argument == NULL ? frame : 1332 ih->ih_argument, ih->ih_name); 1333 if (ih->ih_argument == NULL) 1334 ret = ih->ih_filter(frame); 1335 else 1336 ret = ih->ih_filter(ih->ih_argument); 1337 KASSERT(ret == FILTER_STRAY || 1338 ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 && 1339 (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0), 1340 ("%s: incorrect return value %#x from %s", __func__, ret, 1341 ih->ih_name)); 1342 filter = filter || ret == FILTER_HANDLED; 1343 1344 /* 1345 * Wrapper handler special handling: 1346 * 1347 * in some particular cases (like pccard and pccbb), 1348 * the _real_ device handler is wrapped in a couple of 1349 * functions - a filter wrapper and an ithread wrapper. 1350 * In this case (and just in this case), the filter wrapper 1351 * could ask the system to schedule the ithread and mask 1352 * the interrupt source if the wrapped handler is composed 1353 * of just an ithread handler. 1354 * 1355 * TODO: write a generic wrapper to avoid people rolling 1356 * their own. 1357 */ 1358 if (!thread) { 1359 if (ret == FILTER_SCHEDULE_THREAD) 1360 thread = true; 1361 } 1362 } 1363 atomic_add_rel_int(&ie->ie_active[phase], -1); 1364 1365 td->td_intr_frame = oldframe; 1366 1367 if (thread) { 1368 if (ie->ie_pre_ithread != NULL) 1369 ie->ie_pre_ithread(ie->ie_source); 1370 } else { 1371 if (ie->ie_post_filter != NULL) 1372 ie->ie_post_filter(ie->ie_source); 1373 } 1374 1375 /* Schedule the ithread if needed. */ 1376 if (thread) { 1377 int error __unused; 1378 1379 error = intr_event_schedule_thread(ie); 1380 KASSERT(error == 0, ("bad stray interrupt")); 1381 } 1382 critical_exit(); 1383 td->td_intr_nesting_level--; 1384 #ifdef notyet 1385 /* The interrupt is not aknowledged by any filter and has no ithread. */ 1386 if (!thread && !filter) 1387 return (EINVAL); 1388 #endif 1389 return (0); 1390 } 1391 1392 #ifdef DDB 1393 /* 1394 * Dump details about an interrupt handler 1395 */ 1396 static void 1397 db_dump_intrhand(struct intr_handler *ih) 1398 { 1399 int comma; 1400 1401 db_printf("\t%-10s ", ih->ih_name); 1402 switch (ih->ih_pri) { 1403 case PI_REALTIME: 1404 db_printf("CLK "); 1405 break; 1406 case PI_AV: 1407 db_printf("AV "); 1408 break; 1409 case PI_TTY: 1410 db_printf("TTY "); 1411 break; 1412 case PI_NET: 1413 db_printf("NET "); 1414 break; 1415 case PI_DISK: 1416 db_printf("DISK"); 1417 break; 1418 case PI_DULL: 1419 db_printf("DULL"); 1420 break; 1421 default: 1422 if (ih->ih_pri >= PI_SOFT) 1423 db_printf("SWI "); 1424 else 1425 db_printf("%4u", ih->ih_pri); 1426 break; 1427 } 1428 db_printf(" "); 1429 if (ih->ih_filter != NULL) { 1430 db_printf("[F]"); 1431 db_printsym((uintptr_t)ih->ih_filter, DB_STGY_PROC); 1432 } 1433 if (ih->ih_handler != NULL) { 1434 if (ih->ih_filter != NULL) 1435 db_printf(","); 1436 db_printf("[H]"); 1437 db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC); 1438 } 1439 db_printf("(%p)", ih->ih_argument); 1440 if (ih->ih_need || 1441 (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD | 1442 IH_MPSAFE)) != 0) { 1443 db_printf(" {"); 1444 comma = 0; 1445 if (ih->ih_flags & IH_EXCLUSIVE) { 1446 if (comma) 1447 db_printf(", "); 1448 db_printf("EXCL"); 1449 comma = 1; 1450 } 1451 if (ih->ih_flags & IH_ENTROPY) { 1452 if (comma) 1453 db_printf(", "); 1454 db_printf("ENTROPY"); 1455 comma = 1; 1456 } 1457 if (ih->ih_flags & IH_DEAD) { 1458 if (comma) 1459 db_printf(", "); 1460 db_printf("DEAD"); 1461 comma = 1; 1462 } 1463 if (ih->ih_flags & IH_MPSAFE) { 1464 if (comma) 1465 db_printf(", "); 1466 db_printf("MPSAFE"); 1467 comma = 1; 1468 } 1469 if (ih->ih_need) { 1470 if (comma) 1471 db_printf(", "); 1472 db_printf("NEED"); 1473 } 1474 db_printf("}"); 1475 } 1476 db_printf("\n"); 1477 } 1478 1479 /* 1480 * Dump details about a event. 1481 */ 1482 void 1483 db_dump_intr_event(struct intr_event *ie, int handlers) 1484 { 1485 struct intr_handler *ih; 1486 struct intr_thread *it; 1487 int comma; 1488 1489 db_printf("%s ", ie->ie_fullname); 1490 it = ie->ie_thread; 1491 if (it != NULL) 1492 db_printf("(pid %d)", it->it_thread->td_proc->p_pid); 1493 else 1494 db_printf("(no thread)"); 1495 if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 || 1496 (it != NULL && it->it_need)) { 1497 db_printf(" {"); 1498 comma = 0; 1499 if (ie->ie_flags & IE_SOFT) { 1500 db_printf("SOFT"); 1501 comma = 1; 1502 } 1503 if (ie->ie_flags & IE_ENTROPY) { 1504 if (comma) 1505 db_printf(", "); 1506 db_printf("ENTROPY"); 1507 comma = 1; 1508 } 1509 if (ie->ie_flags & IE_ADDING_THREAD) { 1510 if (comma) 1511 db_printf(", "); 1512 db_printf("ADDING_THREAD"); 1513 comma = 1; 1514 } 1515 if (it != NULL && it->it_need) { 1516 if (comma) 1517 db_printf(", "); 1518 db_printf("NEED"); 1519 } 1520 db_printf("}"); 1521 } 1522 db_printf("\n"); 1523 1524 if (handlers) 1525 CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) 1526 db_dump_intrhand(ih); 1527 } 1528 1529 /* 1530 * Dump data about interrupt handlers 1531 */ 1532 DB_SHOW_COMMAND(intr, db_show_intr) 1533 { 1534 struct intr_event *ie; 1535 int all, verbose; 1536 1537 verbose = strchr(modif, 'v') != NULL; 1538 all = strchr(modif, 'a') != NULL; 1539 TAILQ_FOREACH(ie, &event_list, ie_list) { 1540 if (!all && CK_SLIST_EMPTY(&ie->ie_handlers)) 1541 continue; 1542 db_dump_intr_event(ie, verbose); 1543 if (db_pager_quit) 1544 break; 1545 } 1546 } 1547 #endif /* DDB */ 1548 1549 /* 1550 * Start standard software interrupt threads 1551 */ 1552 static void 1553 start_softintr(void *dummy) 1554 { 1555 1556 if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih)) 1557 panic("died while creating vm swi ithread"); 1558 } 1559 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, 1560 NULL); 1561 1562 /* 1563 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 1564 * The data for this machine dependent, and the declarations are in machine 1565 * dependent code. The layout of intrnames and intrcnt however is machine 1566 * independent. 1567 * 1568 * We do not know the length of intrcnt and intrnames at compile time, so 1569 * calculate things at run time. 1570 */ 1571 static int 1572 sysctl_intrnames(SYSCTL_HANDLER_ARGS) 1573 { 1574 return (sysctl_handle_opaque(oidp, intrnames, sintrnames, req)); 1575 } 1576 1577 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, 1578 NULL, 0, sysctl_intrnames, "", "Interrupt Names"); 1579 1580 static int 1581 sysctl_intrcnt(SYSCTL_HANDLER_ARGS) 1582 { 1583 #ifdef SCTL_MASK32 1584 uint32_t *intrcnt32; 1585 unsigned i; 1586 int error; 1587 1588 if (req->flags & SCTL_MASK32) { 1589 if (!req->oldptr) 1590 return (sysctl_handle_opaque(oidp, NULL, sintrcnt / 2, req)); 1591 intrcnt32 = malloc(sintrcnt / 2, M_TEMP, M_NOWAIT); 1592 if (intrcnt32 == NULL) 1593 return (ENOMEM); 1594 for (i = 0; i < sintrcnt / sizeof (u_long); i++) 1595 intrcnt32[i] = intrcnt[i]; 1596 error = sysctl_handle_opaque(oidp, intrcnt32, sintrcnt / 2, req); 1597 free(intrcnt32, M_TEMP); 1598 return (error); 1599 } 1600 #endif 1601 return (sysctl_handle_opaque(oidp, intrcnt, sintrcnt, req)); 1602 } 1603 1604 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, 1605 NULL, 0, sysctl_intrcnt, "", "Interrupt Counts"); 1606 1607 #ifdef DDB 1608 /* 1609 * DDB command to dump the interrupt statistics. 1610 */ 1611 DB_SHOW_COMMAND(intrcnt, db_show_intrcnt) 1612 { 1613 u_long *i; 1614 char *cp; 1615 u_int j; 1616 1617 cp = intrnames; 1618 j = 0; 1619 for (i = intrcnt; j < (sintrcnt / sizeof(u_long)) && !db_pager_quit; 1620 i++, j++) { 1621 if (*cp == '\0') 1622 break; 1623 if (*i != 0) 1624 db_printf("%s\t%lu\n", cp, *i); 1625 cp += strlen(cp) + 1; 1626 } 1627 } 1628 #endif 1629