1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 1997, Stefan Esser <se@freebsd.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_ddb.h" 33 #include "opt_kstack_usage_prof.h" 34 35 #include <sys/param.h> 36 #include <sys/bus.h> 37 #include <sys/conf.h> 38 #include <sys/cpuset.h> 39 #include <sys/rtprio.h> 40 #include <sys/systm.h> 41 #include <sys/interrupt.h> 42 #include <sys/kernel.h> 43 #include <sys/kthread.h> 44 #include <sys/ktr.h> 45 #include <sys/limits.h> 46 #include <sys/lock.h> 47 #include <sys/malloc.h> 48 #include <sys/mutex.h> 49 #include <sys/priv.h> 50 #include <sys/proc.h> 51 #include <sys/random.h> 52 #include <sys/resourcevar.h> 53 #include <sys/sched.h> 54 #include <sys/smp.h> 55 #include <sys/sysctl.h> 56 #include <sys/syslog.h> 57 #include <sys/unistd.h> 58 #include <sys/vmmeter.h> 59 #include <machine/atomic.h> 60 #include <machine/cpu.h> 61 #include <machine/md_var.h> 62 #include <machine/stdarg.h> 63 #ifdef DDB 64 #include <ddb/ddb.h> 65 #include <ddb/db_sym.h> 66 #endif 67 68 /* 69 * Describe an interrupt thread. There is one of these per interrupt event. 70 */ 71 struct intr_thread { 72 struct intr_event *it_event; 73 struct thread *it_thread; /* Kernel thread. */ 74 int it_flags; /* (j) IT_* flags. */ 75 int it_need; /* Needs service. */ 76 }; 77 78 /* Interrupt thread flags kept in it_flags */ 79 #define IT_DEAD 0x000001 /* Thread is waiting to exit. */ 80 #define IT_WAIT 0x000002 /* Thread is waiting for completion. */ 81 82 struct intr_entropy { 83 struct thread *td; 84 uintptr_t event; 85 }; 86 87 struct intr_event *clk_intr_event; 88 struct intr_event *tty_intr_event; 89 void *vm_ih; 90 struct proc *intrproc; 91 92 static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); 93 94 static int intr_storm_threshold = 1000; 95 SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RWTUN, 96 &intr_storm_threshold, 0, 97 "Number of consecutive interrupts before storm protection is enabled"); 98 static TAILQ_HEAD(, intr_event) event_list = 99 TAILQ_HEAD_INITIALIZER(event_list); 100 static struct mtx event_lock; 101 MTX_SYSINIT(intr_event_list, &event_lock, "intr event list", MTX_DEF); 102 103 static void intr_event_update(struct intr_event *ie); 104 static int intr_event_schedule_thread(struct intr_event *ie); 105 static struct intr_thread *ithread_create(const char *name); 106 static void ithread_destroy(struct intr_thread *ithread); 107 static void ithread_execute_handlers(struct proc *p, 108 struct intr_event *ie); 109 static void ithread_loop(void *); 110 static void ithread_update(struct intr_thread *ithd); 111 static void start_softintr(void *); 112 113 /* Map an interrupt type to an ithread priority. */ 114 u_char 115 intr_priority(enum intr_type flags) 116 { 117 u_char pri; 118 119 flags &= (INTR_TYPE_TTY | INTR_TYPE_BIO | INTR_TYPE_NET | 120 INTR_TYPE_CAM | INTR_TYPE_MISC | INTR_TYPE_CLK | INTR_TYPE_AV); 121 switch (flags) { 122 case INTR_TYPE_TTY: 123 pri = PI_TTY; 124 break; 125 case INTR_TYPE_BIO: 126 pri = PI_DISK; 127 break; 128 case INTR_TYPE_NET: 129 pri = PI_NET; 130 break; 131 case INTR_TYPE_CAM: 132 pri = PI_DISK; 133 break; 134 case INTR_TYPE_AV: 135 pri = PI_AV; 136 break; 137 case INTR_TYPE_CLK: 138 pri = PI_REALTIME; 139 break; 140 case INTR_TYPE_MISC: 141 pri = PI_DULL; /* don't care */ 142 break; 143 default: 144 /* We didn't specify an interrupt level. */ 145 panic("intr_priority: no interrupt type in flags"); 146 } 147 148 return pri; 149 } 150 151 /* 152 * Update an ithread based on the associated intr_event. 153 */ 154 static void 155 ithread_update(struct intr_thread *ithd) 156 { 157 struct intr_event *ie; 158 struct thread *td; 159 u_char pri; 160 161 ie = ithd->it_event; 162 td = ithd->it_thread; 163 164 /* Determine the overall priority of this event. */ 165 if (TAILQ_EMPTY(&ie->ie_handlers)) 166 pri = PRI_MAX_ITHD; 167 else 168 pri = TAILQ_FIRST(&ie->ie_handlers)->ih_pri; 169 170 /* Update name and priority. */ 171 strlcpy(td->td_name, ie->ie_fullname, sizeof(td->td_name)); 172 #ifdef KTR 173 sched_clear_tdname(td); 174 #endif 175 thread_lock(td); 176 sched_prio(td, pri); 177 thread_unlock(td); 178 } 179 180 /* 181 * Regenerate the full name of an interrupt event and update its priority. 182 */ 183 static void 184 intr_event_update(struct intr_event *ie) 185 { 186 struct intr_handler *ih; 187 char *last; 188 int missed, space; 189 190 /* Start off with no entropy and just the name of the event. */ 191 mtx_assert(&ie->ie_lock, MA_OWNED); 192 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 193 ie->ie_flags &= ~IE_ENTROPY; 194 missed = 0; 195 space = 1; 196 197 /* Run through all the handlers updating values. */ 198 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 199 if (strlen(ie->ie_fullname) + strlen(ih->ih_name) + 1 < 200 sizeof(ie->ie_fullname)) { 201 strcat(ie->ie_fullname, " "); 202 strcat(ie->ie_fullname, ih->ih_name); 203 space = 0; 204 } else 205 missed++; 206 if (ih->ih_flags & IH_ENTROPY) 207 ie->ie_flags |= IE_ENTROPY; 208 } 209 210 /* 211 * If the handler names were too long, add +'s to indicate missing 212 * names. If we run out of room and still have +'s to add, change 213 * the last character from a + to a *. 214 */ 215 last = &ie->ie_fullname[sizeof(ie->ie_fullname) - 2]; 216 while (missed-- > 0) { 217 if (strlen(ie->ie_fullname) + 1 == sizeof(ie->ie_fullname)) { 218 if (*last == '+') { 219 *last = '*'; 220 break; 221 } else 222 *last = '+'; 223 } else if (space) { 224 strcat(ie->ie_fullname, " +"); 225 space = 0; 226 } else 227 strcat(ie->ie_fullname, "+"); 228 } 229 230 /* 231 * If this event has an ithread, update it's priority and 232 * name. 233 */ 234 if (ie->ie_thread != NULL) 235 ithread_update(ie->ie_thread); 236 CTR2(KTR_INTR, "%s: updated %s", __func__, ie->ie_fullname); 237 } 238 239 int 240 intr_event_create(struct intr_event **event, void *source, int flags, int irq, 241 void (*pre_ithread)(void *), void (*post_ithread)(void *), 242 void (*post_filter)(void *), int (*assign_cpu)(void *, int), 243 const char *fmt, ...) 244 { 245 struct intr_event *ie; 246 va_list ap; 247 248 /* The only valid flag during creation is IE_SOFT. */ 249 if ((flags & ~IE_SOFT) != 0) 250 return (EINVAL); 251 ie = malloc(sizeof(struct intr_event), M_ITHREAD, M_WAITOK | M_ZERO); 252 ie->ie_source = source; 253 ie->ie_pre_ithread = pre_ithread; 254 ie->ie_post_ithread = post_ithread; 255 ie->ie_post_filter = post_filter; 256 ie->ie_assign_cpu = assign_cpu; 257 ie->ie_flags = flags; 258 ie->ie_irq = irq; 259 ie->ie_cpu = NOCPU; 260 TAILQ_INIT(&ie->ie_handlers); 261 mtx_init(&ie->ie_lock, "intr event", NULL, MTX_DEF); 262 263 va_start(ap, fmt); 264 vsnprintf(ie->ie_name, sizeof(ie->ie_name), fmt, ap); 265 va_end(ap); 266 strlcpy(ie->ie_fullname, ie->ie_name, sizeof(ie->ie_fullname)); 267 mtx_lock(&event_lock); 268 TAILQ_INSERT_TAIL(&event_list, ie, ie_list); 269 mtx_unlock(&event_lock); 270 if (event != NULL) 271 *event = ie; 272 CTR2(KTR_INTR, "%s: created %s", __func__, ie->ie_name); 273 return (0); 274 } 275 276 /* 277 * Bind an interrupt event to the specified CPU. Note that not all 278 * platforms support binding an interrupt to a CPU. For those 279 * platforms this request will fail. Using a cpu id of NOCPU unbinds 280 * the interrupt event. 281 */ 282 static int 283 _intr_event_bind(struct intr_event *ie, int cpu, bool bindirq, bool bindithread) 284 { 285 lwpid_t id; 286 int error; 287 288 /* Need a CPU to bind to. */ 289 if (cpu != NOCPU && CPU_ABSENT(cpu)) 290 return (EINVAL); 291 292 if (ie->ie_assign_cpu == NULL) 293 return (EOPNOTSUPP); 294 295 error = priv_check(curthread, PRIV_SCHED_CPUSET_INTR); 296 if (error) 297 return (error); 298 299 /* 300 * If we have any ithreads try to set their mask first to verify 301 * permissions, etc. 302 */ 303 if (bindithread) { 304 mtx_lock(&ie->ie_lock); 305 if (ie->ie_thread != NULL) { 306 id = ie->ie_thread->it_thread->td_tid; 307 mtx_unlock(&ie->ie_lock); 308 error = cpuset_setithread(id, cpu); 309 if (error) 310 return (error); 311 } else 312 mtx_unlock(&ie->ie_lock); 313 } 314 if (bindirq) 315 error = ie->ie_assign_cpu(ie->ie_source, cpu); 316 if (error) { 317 if (bindithread) { 318 mtx_lock(&ie->ie_lock); 319 if (ie->ie_thread != NULL) { 320 cpu = ie->ie_cpu; 321 id = ie->ie_thread->it_thread->td_tid; 322 mtx_unlock(&ie->ie_lock); 323 (void)cpuset_setithread(id, cpu); 324 } else 325 mtx_unlock(&ie->ie_lock); 326 } 327 return (error); 328 } 329 330 if (bindirq) { 331 mtx_lock(&ie->ie_lock); 332 ie->ie_cpu = cpu; 333 mtx_unlock(&ie->ie_lock); 334 } 335 336 return (error); 337 } 338 339 /* 340 * Bind an interrupt event to the specified CPU. For supported platforms, any 341 * associated ithreads as well as the primary interrupt context will be bound 342 * to the specificed CPU. 343 */ 344 int 345 intr_event_bind(struct intr_event *ie, int cpu) 346 { 347 348 return (_intr_event_bind(ie, cpu, true, true)); 349 } 350 351 /* 352 * Bind an interrupt event to the specified CPU, but do not bind associated 353 * ithreads. 354 */ 355 int 356 intr_event_bind_irqonly(struct intr_event *ie, int cpu) 357 { 358 359 return (_intr_event_bind(ie, cpu, true, false)); 360 } 361 362 /* 363 * Bind an interrupt event's ithread to the specified CPU. 364 */ 365 int 366 intr_event_bind_ithread(struct intr_event *ie, int cpu) 367 { 368 369 return (_intr_event_bind(ie, cpu, false, true)); 370 } 371 372 static struct intr_event * 373 intr_lookup(int irq) 374 { 375 struct intr_event *ie; 376 377 mtx_lock(&event_lock); 378 TAILQ_FOREACH(ie, &event_list, ie_list) 379 if (ie->ie_irq == irq && 380 (ie->ie_flags & IE_SOFT) == 0 && 381 TAILQ_FIRST(&ie->ie_handlers) != NULL) 382 break; 383 mtx_unlock(&event_lock); 384 return (ie); 385 } 386 387 int 388 intr_setaffinity(int irq, int mode, void *m) 389 { 390 struct intr_event *ie; 391 cpuset_t *mask; 392 int cpu, n; 393 394 mask = m; 395 cpu = NOCPU; 396 /* 397 * If we're setting all cpus we can unbind. Otherwise make sure 398 * only one cpu is in the set. 399 */ 400 if (CPU_CMP(cpuset_root, mask)) { 401 for (n = 0; n < CPU_SETSIZE; n++) { 402 if (!CPU_ISSET(n, mask)) 403 continue; 404 if (cpu != NOCPU) 405 return (EINVAL); 406 cpu = n; 407 } 408 } 409 ie = intr_lookup(irq); 410 if (ie == NULL) 411 return (ESRCH); 412 switch (mode) { 413 case CPU_WHICH_IRQ: 414 return (intr_event_bind(ie, cpu)); 415 case CPU_WHICH_INTRHANDLER: 416 return (intr_event_bind_irqonly(ie, cpu)); 417 case CPU_WHICH_ITHREAD: 418 return (intr_event_bind_ithread(ie, cpu)); 419 default: 420 return (EINVAL); 421 } 422 } 423 424 int 425 intr_getaffinity(int irq, int mode, void *m) 426 { 427 struct intr_event *ie; 428 struct thread *td; 429 struct proc *p; 430 cpuset_t *mask; 431 lwpid_t id; 432 int error; 433 434 mask = m; 435 ie = intr_lookup(irq); 436 if (ie == NULL) 437 return (ESRCH); 438 439 error = 0; 440 CPU_ZERO(mask); 441 switch (mode) { 442 case CPU_WHICH_IRQ: 443 case CPU_WHICH_INTRHANDLER: 444 mtx_lock(&ie->ie_lock); 445 if (ie->ie_cpu == NOCPU) 446 CPU_COPY(cpuset_root, mask); 447 else 448 CPU_SET(ie->ie_cpu, mask); 449 mtx_unlock(&ie->ie_lock); 450 break; 451 case CPU_WHICH_ITHREAD: 452 mtx_lock(&ie->ie_lock); 453 if (ie->ie_thread == NULL) { 454 mtx_unlock(&ie->ie_lock); 455 CPU_COPY(cpuset_root, mask); 456 } else { 457 id = ie->ie_thread->it_thread->td_tid; 458 mtx_unlock(&ie->ie_lock); 459 error = cpuset_which(CPU_WHICH_TID, id, &p, &td, NULL); 460 if (error != 0) 461 return (error); 462 CPU_COPY(&td->td_cpuset->cs_mask, mask); 463 PROC_UNLOCK(p); 464 } 465 default: 466 return (EINVAL); 467 } 468 return (0); 469 } 470 471 int 472 intr_event_destroy(struct intr_event *ie) 473 { 474 475 mtx_lock(&event_lock); 476 mtx_lock(&ie->ie_lock); 477 if (!TAILQ_EMPTY(&ie->ie_handlers)) { 478 mtx_unlock(&ie->ie_lock); 479 mtx_unlock(&event_lock); 480 return (EBUSY); 481 } 482 TAILQ_REMOVE(&event_list, ie, ie_list); 483 #ifndef notyet 484 if (ie->ie_thread != NULL) { 485 ithread_destroy(ie->ie_thread); 486 ie->ie_thread = NULL; 487 } 488 #endif 489 mtx_unlock(&ie->ie_lock); 490 mtx_unlock(&event_lock); 491 mtx_destroy(&ie->ie_lock); 492 free(ie, M_ITHREAD); 493 return (0); 494 } 495 496 static struct intr_thread * 497 ithread_create(const char *name) 498 { 499 struct intr_thread *ithd; 500 struct thread *td; 501 int error; 502 503 ithd = malloc(sizeof(struct intr_thread), M_ITHREAD, M_WAITOK | M_ZERO); 504 505 error = kproc_kthread_add(ithread_loop, ithd, &intrproc, 506 &td, RFSTOPPED | RFHIGHPID, 507 0, "intr", "%s", name); 508 if (error) 509 panic("kproc_create() failed with %d", error); 510 thread_lock(td); 511 sched_class(td, PRI_ITHD); 512 TD_SET_IWAIT(td); 513 thread_unlock(td); 514 td->td_pflags |= TDP_ITHREAD; 515 ithd->it_thread = td; 516 CTR2(KTR_INTR, "%s: created %s", __func__, name); 517 return (ithd); 518 } 519 520 static void 521 ithread_destroy(struct intr_thread *ithread) 522 { 523 struct thread *td; 524 525 CTR2(KTR_INTR, "%s: killing %s", __func__, ithread->it_event->ie_name); 526 td = ithread->it_thread; 527 thread_lock(td); 528 ithread->it_flags |= IT_DEAD; 529 if (TD_AWAITING_INTR(td)) { 530 TD_CLR_IWAIT(td); 531 sched_add(td, SRQ_INTR); 532 } 533 thread_unlock(td); 534 } 535 536 int 537 intr_event_add_handler(struct intr_event *ie, const char *name, 538 driver_filter_t filter, driver_intr_t handler, void *arg, u_char pri, 539 enum intr_type flags, void **cookiep) 540 { 541 struct intr_handler *ih, *temp_ih; 542 struct intr_thread *it; 543 544 if (ie == NULL || name == NULL || (handler == NULL && filter == NULL)) 545 return (EINVAL); 546 547 /* Allocate and populate an interrupt handler structure. */ 548 ih = malloc(sizeof(struct intr_handler), M_ITHREAD, M_WAITOK | M_ZERO); 549 ih->ih_filter = filter; 550 ih->ih_handler = handler; 551 ih->ih_argument = arg; 552 strlcpy(ih->ih_name, name, sizeof(ih->ih_name)); 553 ih->ih_event = ie; 554 ih->ih_pri = pri; 555 if (flags & INTR_EXCL) 556 ih->ih_flags = IH_EXCLUSIVE; 557 if (flags & INTR_MPSAFE) 558 ih->ih_flags |= IH_MPSAFE; 559 if (flags & INTR_ENTROPY) 560 ih->ih_flags |= IH_ENTROPY; 561 562 /* We can only have one exclusive handler in a event. */ 563 mtx_lock(&ie->ie_lock); 564 if (!TAILQ_EMPTY(&ie->ie_handlers)) { 565 if ((flags & INTR_EXCL) || 566 (TAILQ_FIRST(&ie->ie_handlers)->ih_flags & IH_EXCLUSIVE)) { 567 mtx_unlock(&ie->ie_lock); 568 free(ih, M_ITHREAD); 569 return (EINVAL); 570 } 571 } 572 573 /* Create a thread if we need one. */ 574 while (ie->ie_thread == NULL && handler != NULL) { 575 if (ie->ie_flags & IE_ADDING_THREAD) 576 msleep(ie, &ie->ie_lock, 0, "ithread", 0); 577 else { 578 ie->ie_flags |= IE_ADDING_THREAD; 579 mtx_unlock(&ie->ie_lock); 580 it = ithread_create("intr: newborn"); 581 mtx_lock(&ie->ie_lock); 582 ie->ie_flags &= ~IE_ADDING_THREAD; 583 ie->ie_thread = it; 584 it->it_event = ie; 585 ithread_update(it); 586 wakeup(ie); 587 } 588 } 589 590 /* Add the new handler to the event in priority order. */ 591 TAILQ_FOREACH(temp_ih, &ie->ie_handlers, ih_next) { 592 if (temp_ih->ih_pri > ih->ih_pri) 593 break; 594 } 595 if (temp_ih == NULL) 596 TAILQ_INSERT_TAIL(&ie->ie_handlers, ih, ih_next); 597 else 598 TAILQ_INSERT_BEFORE(temp_ih, ih, ih_next); 599 intr_event_update(ie); 600 601 CTR3(KTR_INTR, "%s: added %s to %s", __func__, ih->ih_name, 602 ie->ie_name); 603 mtx_unlock(&ie->ie_lock); 604 605 if (cookiep != NULL) 606 *cookiep = ih; 607 return (0); 608 } 609 610 /* 611 * Append a description preceded by a ':' to the name of the specified 612 * interrupt handler. 613 */ 614 int 615 intr_event_describe_handler(struct intr_event *ie, void *cookie, 616 const char *descr) 617 { 618 struct intr_handler *ih; 619 size_t space; 620 char *start; 621 622 mtx_lock(&ie->ie_lock); 623 #ifdef INVARIANTS 624 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 625 if (ih == cookie) 626 break; 627 } 628 if (ih == NULL) { 629 mtx_unlock(&ie->ie_lock); 630 panic("handler %p not found in interrupt event %p", cookie, ie); 631 } 632 #endif 633 ih = cookie; 634 635 /* 636 * Look for an existing description by checking for an 637 * existing ":". This assumes device names do not include 638 * colons. If one is found, prepare to insert the new 639 * description at that point. If one is not found, find the 640 * end of the name to use as the insertion point. 641 */ 642 start = strchr(ih->ih_name, ':'); 643 if (start == NULL) 644 start = strchr(ih->ih_name, 0); 645 646 /* 647 * See if there is enough remaining room in the string for the 648 * description + ":". The "- 1" leaves room for the trailing 649 * '\0'. The "+ 1" accounts for the colon. 650 */ 651 space = sizeof(ih->ih_name) - (start - ih->ih_name) - 1; 652 if (strlen(descr) + 1 > space) { 653 mtx_unlock(&ie->ie_lock); 654 return (ENOSPC); 655 } 656 657 /* Append a colon followed by the description. */ 658 *start = ':'; 659 strcpy(start + 1, descr); 660 intr_event_update(ie); 661 mtx_unlock(&ie->ie_lock); 662 return (0); 663 } 664 665 /* 666 * Return the ie_source field from the intr_event an intr_handler is 667 * associated with. 668 */ 669 void * 670 intr_handler_source(void *cookie) 671 { 672 struct intr_handler *ih; 673 struct intr_event *ie; 674 675 ih = (struct intr_handler *)cookie; 676 if (ih == NULL) 677 return (NULL); 678 ie = ih->ih_event; 679 KASSERT(ie != NULL, 680 ("interrupt handler \"%s\" has a NULL interrupt event", 681 ih->ih_name)); 682 return (ie->ie_source); 683 } 684 685 /* 686 * Sleep until an ithread finishes executing an interrupt handler. 687 * 688 * XXX Doesn't currently handle interrupt filters or fast interrupt 689 * handlers. This is intended for compatibility with linux drivers 690 * only. Do not use in BSD code. 691 */ 692 void 693 _intr_drain(int irq) 694 { 695 struct intr_event *ie; 696 struct intr_thread *ithd; 697 struct thread *td; 698 699 ie = intr_lookup(irq); 700 if (ie == NULL) 701 return; 702 if (ie->ie_thread == NULL) 703 return; 704 ithd = ie->ie_thread; 705 td = ithd->it_thread; 706 /* 707 * We set the flag and wait for it to be cleared to avoid 708 * long delays with potentially busy interrupt handlers 709 * were we to only sample TD_AWAITING_INTR() every tick. 710 */ 711 thread_lock(td); 712 if (!TD_AWAITING_INTR(td)) { 713 ithd->it_flags |= IT_WAIT; 714 while (ithd->it_flags & IT_WAIT) { 715 thread_unlock(td); 716 pause("idrain", 1); 717 thread_lock(td); 718 } 719 } 720 thread_unlock(td); 721 return; 722 } 723 724 725 int 726 intr_event_remove_handler(void *cookie) 727 { 728 struct intr_handler *handler = (struct intr_handler *)cookie; 729 struct intr_event *ie; 730 #ifdef INVARIANTS 731 struct intr_handler *ih; 732 #endif 733 #ifdef notyet 734 int dead; 735 #endif 736 737 if (handler == NULL) 738 return (EINVAL); 739 ie = handler->ih_event; 740 KASSERT(ie != NULL, 741 ("interrupt handler \"%s\" has a NULL interrupt event", 742 handler->ih_name)); 743 mtx_lock(&ie->ie_lock); 744 CTR3(KTR_INTR, "%s: removing %s from %s", __func__, handler->ih_name, 745 ie->ie_name); 746 #ifdef INVARIANTS 747 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 748 if (ih == handler) 749 goto ok; 750 mtx_unlock(&ie->ie_lock); 751 panic("interrupt handler \"%s\" not found in interrupt event \"%s\"", 752 ih->ih_name, ie->ie_name); 753 ok: 754 #endif 755 /* 756 * If there is no ithread, then just remove the handler and return. 757 * XXX: Note that an INTR_FAST handler might be running on another 758 * CPU! 759 */ 760 if (ie->ie_thread == NULL) { 761 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 762 mtx_unlock(&ie->ie_lock); 763 free(handler, M_ITHREAD); 764 return (0); 765 } 766 767 /* 768 * If the interrupt thread is already running, then just mark this 769 * handler as being dead and let the ithread do the actual removal. 770 * 771 * During a cold boot while cold is set, msleep() does not sleep, 772 * so we have to remove the handler here rather than letting the 773 * thread do it. 774 */ 775 thread_lock(ie->ie_thread->it_thread); 776 if (!TD_AWAITING_INTR(ie->ie_thread->it_thread) && !cold) { 777 handler->ih_flags |= IH_DEAD; 778 779 /* 780 * Ensure that the thread will process the handler list 781 * again and remove this handler if it has already passed 782 * it on the list. 783 * 784 * The release part of the following store ensures 785 * that the update of ih_flags is ordered before the 786 * it_need setting. See the comment before 787 * atomic_cmpset_acq(&ithd->it_need, ...) operation in 788 * the ithread_execute_handlers(). 789 */ 790 atomic_store_rel_int(&ie->ie_thread->it_need, 1); 791 } else 792 TAILQ_REMOVE(&ie->ie_handlers, handler, ih_next); 793 thread_unlock(ie->ie_thread->it_thread); 794 while (handler->ih_flags & IH_DEAD) 795 msleep(handler, &ie->ie_lock, 0, "iev_rmh", 0); 796 intr_event_update(ie); 797 #ifdef notyet 798 /* 799 * XXX: This could be bad in the case of ppbus(8). Also, I think 800 * this could lead to races of stale data when servicing an 801 * interrupt. 802 */ 803 dead = 1; 804 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 805 if (!(ih->ih_flags & IH_FAST)) { 806 dead = 0; 807 break; 808 } 809 } 810 if (dead) { 811 ithread_destroy(ie->ie_thread); 812 ie->ie_thread = NULL; 813 } 814 #endif 815 mtx_unlock(&ie->ie_lock); 816 free(handler, M_ITHREAD); 817 return (0); 818 } 819 820 static int 821 intr_event_schedule_thread(struct intr_event *ie) 822 { 823 struct intr_entropy entropy; 824 struct intr_thread *it; 825 struct thread *td; 826 struct thread *ctd; 827 828 /* 829 * If no ithread or no handlers, then we have a stray interrupt. 830 */ 831 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || 832 ie->ie_thread == NULL) 833 return (EINVAL); 834 835 ctd = curthread; 836 it = ie->ie_thread; 837 td = it->it_thread; 838 839 /* 840 * If any of the handlers for this ithread claim to be good 841 * sources of entropy, then gather some. 842 */ 843 if (ie->ie_flags & IE_ENTROPY) { 844 entropy.event = (uintptr_t)ie; 845 entropy.td = ctd; 846 random_harvest_queue(&entropy, sizeof(entropy), 2, RANDOM_INTERRUPT); 847 } 848 849 KASSERT(td->td_proc != NULL, ("ithread %s has no process", ie->ie_name)); 850 851 /* 852 * Set it_need to tell the thread to keep running if it is already 853 * running. Then, lock the thread and see if we actually need to 854 * put it on the runqueue. 855 * 856 * Use store_rel to arrange that the store to ih_need in 857 * swi_sched() is before the store to it_need and prepare for 858 * transfer of this order to loads in the ithread. 859 */ 860 atomic_store_rel_int(&it->it_need, 1); 861 thread_lock(td); 862 if (TD_AWAITING_INTR(td)) { 863 CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, td->td_proc->p_pid, 864 td->td_name); 865 TD_CLR_IWAIT(td); 866 sched_add(td, SRQ_INTR); 867 } else { 868 CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", 869 __func__, td->td_proc->p_pid, td->td_name, it->it_need, td->td_state); 870 } 871 thread_unlock(td); 872 873 return (0); 874 } 875 876 /* 877 * Allow interrupt event binding for software interrupt handlers -- a no-op, 878 * since interrupts are generated in software rather than being directed by 879 * a PIC. 880 */ 881 static int 882 swi_assign_cpu(void *arg, int cpu) 883 { 884 885 return (0); 886 } 887 888 /* 889 * Add a software interrupt handler to a specified event. If a given event 890 * is not specified, then a new event is created. 891 */ 892 int 893 swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, 894 void *arg, int pri, enum intr_type flags, void **cookiep) 895 { 896 struct intr_event *ie; 897 int error; 898 899 if (flags & INTR_ENTROPY) 900 return (EINVAL); 901 902 ie = (eventp != NULL) ? *eventp : NULL; 903 904 if (ie != NULL) { 905 if (!(ie->ie_flags & IE_SOFT)) 906 return (EINVAL); 907 } else { 908 error = intr_event_create(&ie, NULL, IE_SOFT, 0, 909 NULL, NULL, NULL, swi_assign_cpu, "swi%d:", pri); 910 if (error) 911 return (error); 912 if (eventp != NULL) 913 *eventp = ie; 914 } 915 error = intr_event_add_handler(ie, name, NULL, handler, arg, 916 PI_SWI(pri), flags, cookiep); 917 return (error); 918 } 919 920 /* 921 * Schedule a software interrupt thread. 922 */ 923 void 924 swi_sched(void *cookie, int flags) 925 { 926 struct intr_handler *ih = (struct intr_handler *)cookie; 927 struct intr_event *ie = ih->ih_event; 928 struct intr_entropy entropy; 929 int error __unused; 930 931 CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name, 932 ih->ih_need); 933 934 entropy.event = (uintptr_t)ih; 935 entropy.td = curthread; 936 random_harvest_queue(&entropy, sizeof(entropy), 1, RANDOM_SWI); 937 938 /* 939 * Set ih_need for this handler so that if the ithread is already 940 * running it will execute this handler on the next pass. Otherwise, 941 * it will execute it the next time it runs. 942 */ 943 ih->ih_need = 1; 944 945 if (!(flags & SWI_DELAY)) { 946 VM_CNT_INC(v_soft); 947 error = intr_event_schedule_thread(ie); 948 KASSERT(error == 0, ("stray software interrupt")); 949 } 950 } 951 952 /* 953 * Remove a software interrupt handler. Currently this code does not 954 * remove the associated interrupt event if it becomes empty. Calling code 955 * may do so manually via intr_event_destroy(), but that's not really 956 * an optimal interface. 957 */ 958 int 959 swi_remove(void *cookie) 960 { 961 962 return (intr_event_remove_handler(cookie)); 963 } 964 965 966 /* 967 * This is a public function for use by drivers that mux interrupt 968 * handlers for child devices from their interrupt handler. 969 */ 970 void 971 intr_event_execute_handlers(struct proc *p, struct intr_event *ie) 972 { 973 struct intr_handler *ih, *ihn; 974 975 TAILQ_FOREACH_SAFE(ih, &ie->ie_handlers, ih_next, ihn) { 976 /* 977 * If this handler is marked for death, remove it from 978 * the list of handlers and wake up the sleeper. 979 */ 980 if (ih->ih_flags & IH_DEAD) { 981 mtx_lock(&ie->ie_lock); 982 TAILQ_REMOVE(&ie->ie_handlers, ih, ih_next); 983 ih->ih_flags &= ~IH_DEAD; 984 wakeup(ih); 985 mtx_unlock(&ie->ie_lock); 986 continue; 987 } 988 989 /* Skip filter only handlers */ 990 if (ih->ih_handler == NULL) 991 continue; 992 993 /* 994 * For software interrupt threads, we only execute 995 * handlers that have their need flag set. Hardware 996 * interrupt threads always invoke all of their handlers. 997 * 998 * ih_need can only be 0 or 1. Failed cmpset below 999 * means that there is no request to execute handlers, 1000 * so a retry of the cmpset is not needed. 1001 */ 1002 if ((ie->ie_flags & IE_SOFT) != 0 && 1003 atomic_cmpset_int(&ih->ih_need, 1, 0) == 0) 1004 continue; 1005 1006 /* Execute this handler. */ 1007 CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x", 1008 __func__, p->p_pid, (void *)ih->ih_handler, 1009 ih->ih_argument, ih->ih_name, ih->ih_flags); 1010 1011 if (!(ih->ih_flags & IH_MPSAFE)) 1012 mtx_lock(&Giant); 1013 ih->ih_handler(ih->ih_argument); 1014 if (!(ih->ih_flags & IH_MPSAFE)) 1015 mtx_unlock(&Giant); 1016 } 1017 } 1018 1019 static void 1020 ithread_execute_handlers(struct proc *p, struct intr_event *ie) 1021 { 1022 1023 /* Interrupt handlers should not sleep. */ 1024 if (!(ie->ie_flags & IE_SOFT)) 1025 THREAD_NO_SLEEPING(); 1026 intr_event_execute_handlers(p, ie); 1027 if (!(ie->ie_flags & IE_SOFT)) 1028 THREAD_SLEEPING_OK(); 1029 1030 /* 1031 * Interrupt storm handling: 1032 * 1033 * If this interrupt source is currently storming, then throttle 1034 * it to only fire the handler once per clock tick. 1035 * 1036 * If this interrupt source is not currently storming, but the 1037 * number of back to back interrupts exceeds the storm threshold, 1038 * then enter storming mode. 1039 */ 1040 if (intr_storm_threshold != 0 && ie->ie_count >= intr_storm_threshold && 1041 !(ie->ie_flags & IE_SOFT)) { 1042 /* Report the message only once every second. */ 1043 if (ppsratecheck(&ie->ie_warntm, &ie->ie_warncnt, 1)) { 1044 printf( 1045 "interrupt storm detected on \"%s\"; throttling interrupt source\n", 1046 ie->ie_name); 1047 } 1048 pause("istorm", 1); 1049 } else 1050 ie->ie_count++; 1051 1052 /* 1053 * Now that all the handlers have had a chance to run, reenable 1054 * the interrupt source. 1055 */ 1056 if (ie->ie_post_ithread != NULL) 1057 ie->ie_post_ithread(ie->ie_source); 1058 } 1059 1060 /* 1061 * This is the main code for interrupt threads. 1062 */ 1063 static void 1064 ithread_loop(void *arg) 1065 { 1066 struct intr_thread *ithd; 1067 struct intr_event *ie; 1068 struct thread *td; 1069 struct proc *p; 1070 int wake; 1071 1072 td = curthread; 1073 p = td->td_proc; 1074 ithd = (struct intr_thread *)arg; 1075 KASSERT(ithd->it_thread == td, 1076 ("%s: ithread and proc linkage out of sync", __func__)); 1077 ie = ithd->it_event; 1078 ie->ie_count = 0; 1079 wake = 0; 1080 1081 /* 1082 * As long as we have interrupts outstanding, go through the 1083 * list of handlers, giving each one a go at it. 1084 */ 1085 for (;;) { 1086 /* 1087 * If we are an orphaned thread, then just die. 1088 */ 1089 if (ithd->it_flags & IT_DEAD) { 1090 CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, 1091 p->p_pid, td->td_name); 1092 free(ithd, M_ITHREAD); 1093 kthread_exit(); 1094 } 1095 1096 /* 1097 * Service interrupts. If another interrupt arrives while 1098 * we are running, it will set it_need to note that we 1099 * should make another pass. 1100 * 1101 * The load_acq part of the following cmpset ensures 1102 * that the load of ih_need in ithread_execute_handlers() 1103 * is ordered after the load of it_need here. 1104 */ 1105 while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0) 1106 ithread_execute_handlers(p, ie); 1107 WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread"); 1108 mtx_assert(&Giant, MA_NOTOWNED); 1109 1110 /* 1111 * Processed all our interrupts. Now get the sched 1112 * lock. This may take a while and it_need may get 1113 * set again, so we have to check it again. 1114 */ 1115 thread_lock(td); 1116 if (atomic_load_acq_int(&ithd->it_need) == 0 && 1117 (ithd->it_flags & (IT_DEAD | IT_WAIT)) == 0) { 1118 TD_SET_IWAIT(td); 1119 ie->ie_count = 0; 1120 mi_switch(SW_VOL | SWT_IWAIT, NULL); 1121 } 1122 if (ithd->it_flags & IT_WAIT) { 1123 wake = 1; 1124 ithd->it_flags &= ~IT_WAIT; 1125 } 1126 thread_unlock(td); 1127 if (wake) { 1128 wakeup(ithd); 1129 wake = 0; 1130 } 1131 } 1132 } 1133 1134 /* 1135 * Main interrupt handling body. 1136 * 1137 * Input: 1138 * o ie: the event connected to this interrupt. 1139 * o frame: some archs (i.e. i386) pass a frame to some. 1140 * handlers as their main argument. 1141 * Return value: 1142 * o 0: everything ok. 1143 * o EINVAL: stray interrupt. 1144 */ 1145 int 1146 intr_event_handle(struct intr_event *ie, struct trapframe *frame) 1147 { 1148 struct intr_handler *ih; 1149 struct trapframe *oldframe; 1150 struct thread *td; 1151 int ret, thread; 1152 1153 td = curthread; 1154 1155 #ifdef KSTACK_USAGE_PROF 1156 intr_prof_stack_use(td, frame); 1157 #endif 1158 1159 /* An interrupt with no event or handlers is a stray interrupt. */ 1160 if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers)) 1161 return (EINVAL); 1162 1163 /* 1164 * Execute fast interrupt handlers directly. 1165 * To support clock handlers, if a handler registers 1166 * with a NULL argument, then we pass it a pointer to 1167 * a trapframe as its argument. 1168 */ 1169 td->td_intr_nesting_level++; 1170 thread = 0; 1171 ret = 0; 1172 critical_enter(); 1173 oldframe = td->td_intr_frame; 1174 td->td_intr_frame = frame; 1175 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) { 1176 if (ih->ih_filter == NULL) { 1177 thread = 1; 1178 continue; 1179 } 1180 CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__, 1181 ih->ih_filter, ih->ih_argument == NULL ? frame : 1182 ih->ih_argument, ih->ih_name); 1183 if (ih->ih_argument == NULL) 1184 ret = ih->ih_filter(frame); 1185 else 1186 ret = ih->ih_filter(ih->ih_argument); 1187 KASSERT(ret == FILTER_STRAY || 1188 ((ret & (FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) != 0 && 1189 (ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0), 1190 ("%s: incorrect return value %#x from %s", __func__, ret, 1191 ih->ih_name)); 1192 1193 /* 1194 * Wrapper handler special handling: 1195 * 1196 * in some particular cases (like pccard and pccbb), 1197 * the _real_ device handler is wrapped in a couple of 1198 * functions - a filter wrapper and an ithread wrapper. 1199 * In this case (and just in this case), the filter wrapper 1200 * could ask the system to schedule the ithread and mask 1201 * the interrupt source if the wrapped handler is composed 1202 * of just an ithread handler. 1203 * 1204 * TODO: write a generic wrapper to avoid people rolling 1205 * their own 1206 */ 1207 if (!thread) { 1208 if (ret == FILTER_SCHEDULE_THREAD) 1209 thread = 1; 1210 } 1211 } 1212 td->td_intr_frame = oldframe; 1213 1214 if (thread) { 1215 if (ie->ie_pre_ithread != NULL) 1216 ie->ie_pre_ithread(ie->ie_source); 1217 } else { 1218 if (ie->ie_post_filter != NULL) 1219 ie->ie_post_filter(ie->ie_source); 1220 } 1221 1222 /* Schedule the ithread if needed. */ 1223 if (thread) { 1224 int error __unused; 1225 1226 error = intr_event_schedule_thread(ie); 1227 KASSERT(error == 0, ("bad stray interrupt")); 1228 } 1229 critical_exit(); 1230 td->td_intr_nesting_level--; 1231 return (0); 1232 } 1233 1234 #ifdef DDB 1235 /* 1236 * Dump details about an interrupt handler 1237 */ 1238 static void 1239 db_dump_intrhand(struct intr_handler *ih) 1240 { 1241 int comma; 1242 1243 db_printf("\t%-10s ", ih->ih_name); 1244 switch (ih->ih_pri) { 1245 case PI_REALTIME: 1246 db_printf("CLK "); 1247 break; 1248 case PI_AV: 1249 db_printf("AV "); 1250 break; 1251 case PI_TTY: 1252 db_printf("TTY "); 1253 break; 1254 case PI_NET: 1255 db_printf("NET "); 1256 break; 1257 case PI_DISK: 1258 db_printf("DISK"); 1259 break; 1260 case PI_DULL: 1261 db_printf("DULL"); 1262 break; 1263 default: 1264 if (ih->ih_pri >= PI_SOFT) 1265 db_printf("SWI "); 1266 else 1267 db_printf("%4u", ih->ih_pri); 1268 break; 1269 } 1270 db_printf(" "); 1271 if (ih->ih_filter != NULL) { 1272 db_printf("[F]"); 1273 db_printsym((uintptr_t)ih->ih_filter, DB_STGY_PROC); 1274 } 1275 if (ih->ih_handler != NULL) { 1276 if (ih->ih_filter != NULL) 1277 db_printf(","); 1278 db_printf("[H]"); 1279 db_printsym((uintptr_t)ih->ih_handler, DB_STGY_PROC); 1280 } 1281 db_printf("(%p)", ih->ih_argument); 1282 if (ih->ih_need || 1283 (ih->ih_flags & (IH_EXCLUSIVE | IH_ENTROPY | IH_DEAD | 1284 IH_MPSAFE)) != 0) { 1285 db_printf(" {"); 1286 comma = 0; 1287 if (ih->ih_flags & IH_EXCLUSIVE) { 1288 if (comma) 1289 db_printf(", "); 1290 db_printf("EXCL"); 1291 comma = 1; 1292 } 1293 if (ih->ih_flags & IH_ENTROPY) { 1294 if (comma) 1295 db_printf(", "); 1296 db_printf("ENTROPY"); 1297 comma = 1; 1298 } 1299 if (ih->ih_flags & IH_DEAD) { 1300 if (comma) 1301 db_printf(", "); 1302 db_printf("DEAD"); 1303 comma = 1; 1304 } 1305 if (ih->ih_flags & IH_MPSAFE) { 1306 if (comma) 1307 db_printf(", "); 1308 db_printf("MPSAFE"); 1309 comma = 1; 1310 } 1311 if (ih->ih_need) { 1312 if (comma) 1313 db_printf(", "); 1314 db_printf("NEED"); 1315 } 1316 db_printf("}"); 1317 } 1318 db_printf("\n"); 1319 } 1320 1321 /* 1322 * Dump details about a event. 1323 */ 1324 void 1325 db_dump_intr_event(struct intr_event *ie, int handlers) 1326 { 1327 struct intr_handler *ih; 1328 struct intr_thread *it; 1329 int comma; 1330 1331 db_printf("%s ", ie->ie_fullname); 1332 it = ie->ie_thread; 1333 if (it != NULL) 1334 db_printf("(pid %d)", it->it_thread->td_proc->p_pid); 1335 else 1336 db_printf("(no thread)"); 1337 if ((ie->ie_flags & (IE_SOFT | IE_ENTROPY | IE_ADDING_THREAD)) != 0 || 1338 (it != NULL && it->it_need)) { 1339 db_printf(" {"); 1340 comma = 0; 1341 if (ie->ie_flags & IE_SOFT) { 1342 db_printf("SOFT"); 1343 comma = 1; 1344 } 1345 if (ie->ie_flags & IE_ENTROPY) { 1346 if (comma) 1347 db_printf(", "); 1348 db_printf("ENTROPY"); 1349 comma = 1; 1350 } 1351 if (ie->ie_flags & IE_ADDING_THREAD) { 1352 if (comma) 1353 db_printf(", "); 1354 db_printf("ADDING_THREAD"); 1355 comma = 1; 1356 } 1357 if (it != NULL && it->it_need) { 1358 if (comma) 1359 db_printf(", "); 1360 db_printf("NEED"); 1361 } 1362 db_printf("}"); 1363 } 1364 db_printf("\n"); 1365 1366 if (handlers) 1367 TAILQ_FOREACH(ih, &ie->ie_handlers, ih_next) 1368 db_dump_intrhand(ih); 1369 } 1370 1371 /* 1372 * Dump data about interrupt handlers 1373 */ 1374 DB_SHOW_COMMAND(intr, db_show_intr) 1375 { 1376 struct intr_event *ie; 1377 int all, verbose; 1378 1379 verbose = strchr(modif, 'v') != NULL; 1380 all = strchr(modif, 'a') != NULL; 1381 TAILQ_FOREACH(ie, &event_list, ie_list) { 1382 if (!all && TAILQ_EMPTY(&ie->ie_handlers)) 1383 continue; 1384 db_dump_intr_event(ie, verbose); 1385 if (db_pager_quit) 1386 break; 1387 } 1388 } 1389 #endif /* DDB */ 1390 1391 /* 1392 * Start standard software interrupt threads 1393 */ 1394 static void 1395 start_softintr(void *dummy) 1396 { 1397 1398 if (swi_add(NULL, "vm", swi_vm, NULL, SWI_VM, INTR_MPSAFE, &vm_ih)) 1399 panic("died while creating vm swi ithread"); 1400 } 1401 SYSINIT(start_softintr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softintr, 1402 NULL); 1403 1404 /* 1405 * Sysctls used by systat and others: hw.intrnames and hw.intrcnt. 1406 * The data for this machine dependent, and the declarations are in machine 1407 * dependent code. The layout of intrnames and intrcnt however is machine 1408 * independent. 1409 * 1410 * We do not know the length of intrcnt and intrnames at compile time, so 1411 * calculate things at run time. 1412 */ 1413 static int 1414 sysctl_intrnames(SYSCTL_HANDLER_ARGS) 1415 { 1416 return (sysctl_handle_opaque(oidp, intrnames, sintrnames, req)); 1417 } 1418 1419 SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD, 1420 NULL, 0, sysctl_intrnames, "", "Interrupt Names"); 1421 1422 static int 1423 sysctl_intrcnt(SYSCTL_HANDLER_ARGS) 1424 { 1425 #ifdef SCTL_MASK32 1426 uint32_t *intrcnt32; 1427 unsigned i; 1428 int error; 1429 1430 if (req->flags & SCTL_MASK32) { 1431 if (!req->oldptr) 1432 return (sysctl_handle_opaque(oidp, NULL, sintrcnt / 2, req)); 1433 intrcnt32 = malloc(sintrcnt / 2, M_TEMP, M_NOWAIT); 1434 if (intrcnt32 == NULL) 1435 return (ENOMEM); 1436 for (i = 0; i < sintrcnt / sizeof (u_long); i++) 1437 intrcnt32[i] = intrcnt[i]; 1438 error = sysctl_handle_opaque(oidp, intrcnt32, sintrcnt / 2, req); 1439 free(intrcnt32, M_TEMP); 1440 return (error); 1441 } 1442 #endif 1443 return (sysctl_handle_opaque(oidp, intrcnt, sintrcnt, req)); 1444 } 1445 1446 SYSCTL_PROC(_hw, OID_AUTO, intrcnt, CTLTYPE_OPAQUE | CTLFLAG_RD, 1447 NULL, 0, sysctl_intrcnt, "", "Interrupt Counts"); 1448 1449 #ifdef DDB 1450 /* 1451 * DDB command to dump the interrupt statistics. 1452 */ 1453 DB_SHOW_COMMAND(intrcnt, db_show_intrcnt) 1454 { 1455 u_long *i; 1456 char *cp; 1457 u_int j; 1458 1459 cp = intrnames; 1460 j = 0; 1461 for (i = intrcnt; j < (sintrcnt / sizeof(u_long)) && !db_pager_quit; 1462 i++, j++) { 1463 if (*cp == '\0') 1464 break; 1465 if (*i != 0) 1466 db_printf("%s\t%lu\n", cp, *i); 1467 cp += strlen(cp) + 1; 1468 } 1469 } 1470 #endif 1471