1 /*- 2 * Copyright (c) 2015-2016 Svatopluk Kraus 3 * Copyright (c) 2015-2016 Michal Meloun 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 /* 32 * New-style Interrupt Framework 33 * 34 * TODO: - to support IPI (PPI) enabling on other CPUs if already started 35 * - to complete things for removable PICs 36 */ 37 38 #include "opt_ddb.h" 39 #include "opt_hwpmc_hooks.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/kernel.h> 44 #include <sys/syslog.h> 45 #include <sys/malloc.h> 46 #include <sys/proc.h> 47 #include <sys/queue.h> 48 #include <sys/bus.h> 49 #include <sys/interrupt.h> 50 #include <sys/conf.h> 51 #include <sys/cpuset.h> 52 #include <sys/rman.h> 53 #include <sys/sched.h> 54 #include <sys/smp.h> 55 #ifdef HWPMC_HOOKS 56 #include <sys/pmckern.h> 57 #endif 58 59 #include <machine/atomic.h> 60 #include <machine/intr.h> 61 #include <machine/cpu.h> 62 #include <machine/smp.h> 63 #include <machine/stdarg.h> 64 65 #ifdef DDB 66 #include <ddb/ddb.h> 67 #endif 68 69 #include "pic_if.h" 70 #include "msi_if.h" 71 72 #define INTRNAME_LEN (2*MAXCOMLEN + 1) 73 74 #ifdef DEBUG 75 #define debugf(fmt, args...) do { printf("%s(): ", __func__); \ 76 printf(fmt,##args); } while (0) 77 #else 78 #define debugf(fmt, args...) 79 #endif 80 81 MALLOC_DECLARE(M_INTRNG); 82 MALLOC_DEFINE(M_INTRNG, "intr", "intr interrupt handling"); 83 84 /* Main interrupt handler called from assembler -> 'hidden' for C code. */ 85 void intr_irq_handler(struct trapframe *tf); 86 87 /* Root interrupt controller stuff. */ 88 device_t intr_irq_root_dev; 89 static intr_irq_filter_t *irq_root_filter; 90 static void *irq_root_arg; 91 static u_int irq_root_ipicount; 92 93 struct intr_pic_child { 94 SLIST_ENTRY(intr_pic_child) pc_next; 95 struct intr_pic *pc_pic; 96 intr_child_irq_filter_t *pc_filter; 97 void *pc_filter_arg; 98 uintptr_t pc_start; 99 uintptr_t pc_length; 100 }; 101 102 /* Interrupt controller definition. */ 103 struct intr_pic { 104 SLIST_ENTRY(intr_pic) pic_next; 105 intptr_t pic_xref; /* hardware identification */ 106 device_t pic_dev; 107 #define FLAG_PIC (1 << 0) 108 #define FLAG_MSI (1 << 1) 109 u_int pic_flags; 110 struct mtx pic_child_lock; 111 SLIST_HEAD(, intr_pic_child) pic_children; 112 }; 113 114 static struct mtx pic_list_lock; 115 static SLIST_HEAD(, intr_pic) pic_list; 116 117 static struct intr_pic *pic_lookup(device_t dev, intptr_t xref); 118 119 /* Interrupt source definition. */ 120 static struct mtx isrc_table_lock; 121 static struct intr_irqsrc *irq_sources[NIRQ]; 122 u_int irq_next_free; 123 124 #ifdef SMP 125 static boolean_t irq_assign_cpu = FALSE; 126 #endif 127 128 /* 129 * - 2 counters for each I/O interrupt. 130 * - MAXCPU counters for each IPI counters for SMP. 131 */ 132 #ifdef SMP 133 #define INTRCNT_COUNT (NIRQ * 2 + INTR_IPI_COUNT * MAXCPU) 134 #else 135 #define INTRCNT_COUNT (NIRQ * 2) 136 #endif 137 138 /* Data for MI statistics reporting. */ 139 u_long intrcnt[INTRCNT_COUNT]; 140 char intrnames[INTRCNT_COUNT * INTRNAME_LEN]; 141 size_t sintrcnt = sizeof(intrcnt); 142 size_t sintrnames = sizeof(intrnames); 143 static u_int intrcnt_index; 144 145 /* 146 * Interrupt framework initialization routine. 147 */ 148 static void 149 intr_irq_init(void *dummy __unused) 150 { 151 152 SLIST_INIT(&pic_list); 153 mtx_init(&pic_list_lock, "intr pic list", NULL, MTX_DEF); 154 155 mtx_init(&isrc_table_lock, "intr isrc table", NULL, MTX_DEF); 156 } 157 SYSINIT(intr_irq_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_irq_init, NULL); 158 159 static void 160 intrcnt_setname(const char *name, int index) 161 { 162 163 snprintf(intrnames + INTRNAME_LEN * index, INTRNAME_LEN, "%-*s", 164 INTRNAME_LEN - 1, name); 165 } 166 167 /* 168 * Update name for interrupt source with interrupt event. 169 */ 170 static void 171 intrcnt_updatename(struct intr_irqsrc *isrc) 172 { 173 174 /* QQQ: What about stray counter name? */ 175 mtx_assert(&isrc_table_lock, MA_OWNED); 176 intrcnt_setname(isrc->isrc_event->ie_fullname, isrc->isrc_index); 177 } 178 179 /* 180 * Virtualization for interrupt source interrupt counter increment. 181 */ 182 static inline void 183 isrc_increment_count(struct intr_irqsrc *isrc) 184 { 185 186 if (isrc->isrc_flags & INTR_ISRCF_PPI) 187 atomic_add_long(&isrc->isrc_count[0], 1); 188 else 189 isrc->isrc_count[0]++; 190 } 191 192 /* 193 * Virtualization for interrupt source interrupt stray counter increment. 194 */ 195 static inline void 196 isrc_increment_straycount(struct intr_irqsrc *isrc) 197 { 198 199 isrc->isrc_count[1]++; 200 } 201 202 /* 203 * Virtualization for interrupt source interrupt name update. 204 */ 205 static void 206 isrc_update_name(struct intr_irqsrc *isrc, const char *name) 207 { 208 char str[INTRNAME_LEN]; 209 210 mtx_assert(&isrc_table_lock, MA_OWNED); 211 212 if (name != NULL) { 213 snprintf(str, INTRNAME_LEN, "%s: %s", isrc->isrc_name, name); 214 intrcnt_setname(str, isrc->isrc_index); 215 snprintf(str, INTRNAME_LEN, "stray %s: %s", isrc->isrc_name, 216 name); 217 intrcnt_setname(str, isrc->isrc_index + 1); 218 } else { 219 snprintf(str, INTRNAME_LEN, "%s:", isrc->isrc_name); 220 intrcnt_setname(str, isrc->isrc_index); 221 snprintf(str, INTRNAME_LEN, "stray %s:", isrc->isrc_name); 222 intrcnt_setname(str, isrc->isrc_index + 1); 223 } 224 } 225 226 /* 227 * Virtualization for interrupt source interrupt counters setup. 228 */ 229 static void 230 isrc_setup_counters(struct intr_irqsrc *isrc) 231 { 232 u_int index; 233 234 /* 235 * XXX - it does not work well with removable controllers and 236 * interrupt sources !!! 237 */ 238 index = atomic_fetchadd_int(&intrcnt_index, 2); 239 isrc->isrc_index = index; 240 isrc->isrc_count = &intrcnt[index]; 241 isrc_update_name(isrc, NULL); 242 } 243 244 /* 245 * Virtualization for interrupt source interrupt counters release. 246 */ 247 static void 248 isrc_release_counters(struct intr_irqsrc *isrc) 249 { 250 251 panic("%s: not implemented", __func__); 252 } 253 254 #ifdef SMP 255 /* 256 * Virtualization for interrupt source IPI counters setup. 257 */ 258 u_long * 259 intr_ipi_setup_counters(const char *name) 260 { 261 u_int index, i; 262 char str[INTRNAME_LEN]; 263 264 index = atomic_fetchadd_int(&intrcnt_index, MAXCPU); 265 for (i = 0; i < MAXCPU; i++) { 266 snprintf(str, INTRNAME_LEN, "cpu%d:%s", i, name); 267 intrcnt_setname(str, index + i); 268 } 269 return (&intrcnt[index]); 270 } 271 #endif 272 273 /* 274 * Main interrupt dispatch handler. It's called straight 275 * from the assembler, where CPU interrupt is served. 276 */ 277 void 278 intr_irq_handler(struct trapframe *tf) 279 { 280 struct trapframe * oldframe; 281 struct thread * td; 282 283 KASSERT(irq_root_filter != NULL, ("%s: no filter", __func__)); 284 285 PCPU_INC(cnt.v_intr); 286 critical_enter(); 287 td = curthread; 288 oldframe = td->td_intr_frame; 289 td->td_intr_frame = tf; 290 irq_root_filter(irq_root_arg); 291 td->td_intr_frame = oldframe; 292 critical_exit(); 293 #ifdef HWPMC_HOOKS 294 if (pmc_hook && TRAPF_USERMODE(tf) && 295 (PCPU_GET(curthread)->td_pflags & TDP_CALLCHAIN)) 296 pmc_hook(PCPU_GET(curthread), PMC_FN_USER_CALLCHAIN, tf); 297 #endif 298 } 299 300 int 301 intr_child_irq_handler(struct intr_pic *parent, uintptr_t irq) 302 { 303 struct intr_pic_child *child; 304 bool found; 305 306 found = false; 307 mtx_lock_spin(&parent->pic_child_lock); 308 SLIST_FOREACH(child, &parent->pic_children, pc_next) { 309 if (child->pc_start <= irq && 310 irq < (child->pc_start + child->pc_length)) { 311 found = true; 312 break; 313 } 314 } 315 mtx_unlock_spin(&parent->pic_child_lock); 316 317 if (found) 318 return (child->pc_filter(child->pc_filter_arg, irq)); 319 320 return (FILTER_STRAY); 321 } 322 323 /* 324 * interrupt controller dispatch function for interrupts. It should 325 * be called straight from the interrupt controller, when associated interrupt 326 * source is learned. 327 */ 328 int 329 intr_isrc_dispatch(struct intr_irqsrc *isrc, struct trapframe *tf) 330 { 331 332 KASSERT(isrc != NULL, ("%s: no source", __func__)); 333 334 isrc_increment_count(isrc); 335 336 #ifdef INTR_SOLO 337 if (isrc->isrc_filter != NULL) { 338 int error; 339 error = isrc->isrc_filter(isrc->isrc_arg, tf); 340 PIC_POST_FILTER(isrc->isrc_dev, isrc); 341 if (error == FILTER_HANDLED) 342 return (0); 343 } else 344 #endif 345 if (isrc->isrc_event != NULL) { 346 if (intr_event_handle(isrc->isrc_event, tf) == 0) 347 return (0); 348 } 349 350 isrc_increment_straycount(isrc); 351 return (EINVAL); 352 } 353 354 /* 355 * Alloc unique interrupt number (resource handle) for interrupt source. 356 * 357 * There could be various strategies how to allocate free interrupt number 358 * (resource handle) for new interrupt source. 359 * 360 * 1. Handles are always allocated forward, so handles are not recycled 361 * immediately. However, if only one free handle left which is reused 362 * constantly... 363 */ 364 static inline int 365 isrc_alloc_irq(struct intr_irqsrc *isrc) 366 { 367 u_int maxirqs, irq; 368 369 mtx_assert(&isrc_table_lock, MA_OWNED); 370 371 maxirqs = nitems(irq_sources); 372 if (irq_next_free >= maxirqs) 373 return (ENOSPC); 374 375 for (irq = irq_next_free; irq < maxirqs; irq++) { 376 if (irq_sources[irq] == NULL) 377 goto found; 378 } 379 for (irq = 0; irq < irq_next_free; irq++) { 380 if (irq_sources[irq] == NULL) 381 goto found; 382 } 383 384 irq_next_free = maxirqs; 385 return (ENOSPC); 386 387 found: 388 isrc->isrc_irq = irq; 389 irq_sources[irq] = isrc; 390 391 irq_next_free = irq + 1; 392 if (irq_next_free >= maxirqs) 393 irq_next_free = 0; 394 return (0); 395 } 396 397 /* 398 * Free unique interrupt number (resource handle) from interrupt source. 399 */ 400 static inline int 401 isrc_free_irq(struct intr_irqsrc *isrc) 402 { 403 404 mtx_assert(&isrc_table_lock, MA_OWNED); 405 406 if (isrc->isrc_irq >= nitems(irq_sources)) 407 return (EINVAL); 408 if (irq_sources[isrc->isrc_irq] != isrc) 409 return (EINVAL); 410 411 irq_sources[isrc->isrc_irq] = NULL; 412 isrc->isrc_irq = INTR_IRQ_INVALID; /* just to be safe */ 413 return (0); 414 } 415 416 /* 417 * Lookup interrupt source by interrupt number (resource handle). 418 */ 419 static inline struct intr_irqsrc * 420 isrc_lookup(u_int irq) 421 { 422 423 if (irq < nitems(irq_sources)) 424 return (irq_sources[irq]); 425 return (NULL); 426 } 427 428 /* 429 * Initialize interrupt source and register it into global interrupt table. 430 */ 431 int 432 intr_isrc_register(struct intr_irqsrc *isrc, device_t dev, u_int flags, 433 const char *fmt, ...) 434 { 435 int error; 436 va_list ap; 437 438 bzero(isrc, sizeof(struct intr_irqsrc)); 439 isrc->isrc_dev = dev; 440 isrc->isrc_irq = INTR_IRQ_INVALID; /* just to be safe */ 441 isrc->isrc_flags = flags; 442 443 va_start(ap, fmt); 444 vsnprintf(isrc->isrc_name, INTR_ISRC_NAMELEN, fmt, ap); 445 va_end(ap); 446 447 mtx_lock(&isrc_table_lock); 448 error = isrc_alloc_irq(isrc); 449 if (error != 0) { 450 mtx_unlock(&isrc_table_lock); 451 return (error); 452 } 453 /* 454 * Setup interrupt counters, but not for IPI sources. Those are setup 455 * later and only for used ones (up to INTR_IPI_COUNT) to not exhaust 456 * our counter pool. 457 */ 458 if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0) 459 isrc_setup_counters(isrc); 460 mtx_unlock(&isrc_table_lock); 461 return (0); 462 } 463 464 /* 465 * Deregister interrupt source from global interrupt table. 466 */ 467 int 468 intr_isrc_deregister(struct intr_irqsrc *isrc) 469 { 470 int error; 471 472 mtx_lock(&isrc_table_lock); 473 if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0) 474 isrc_release_counters(isrc); 475 error = isrc_free_irq(isrc); 476 mtx_unlock(&isrc_table_lock); 477 return (error); 478 } 479 480 #ifdef SMP 481 /* 482 * A support function for a PIC to decide if provided ISRC should be inited 483 * on given cpu. The logic of INTR_ISRCF_BOUND flag and isrc_cpu member of 484 * struct intr_irqsrc is the following: 485 * 486 * If INTR_ISRCF_BOUND is set, the ISRC should be inited only on cpus 487 * set in isrc_cpu. If not, the ISRC should be inited on every cpu and 488 * isrc_cpu is kept consistent with it. Thus isrc_cpu is always correct. 489 */ 490 bool 491 intr_isrc_init_on_cpu(struct intr_irqsrc *isrc, u_int cpu) 492 { 493 494 if (isrc->isrc_handlers == 0) 495 return (false); 496 if ((isrc->isrc_flags & (INTR_ISRCF_PPI | INTR_ISRCF_IPI)) == 0) 497 return (false); 498 if (isrc->isrc_flags & INTR_ISRCF_BOUND) 499 return (CPU_ISSET(cpu, &isrc->isrc_cpu)); 500 501 CPU_SET(cpu, &isrc->isrc_cpu); 502 return (true); 503 } 504 #endif 505 506 #ifdef INTR_SOLO 507 /* 508 * Setup filter into interrupt source. 509 */ 510 static int 511 iscr_setup_filter(struct intr_irqsrc *isrc, const char *name, 512 intr_irq_filter_t *filter, void *arg, void **cookiep) 513 { 514 515 if (filter == NULL) 516 return (EINVAL); 517 518 mtx_lock(&isrc_table_lock); 519 /* 520 * Make sure that we do not mix the two ways 521 * how we handle interrupt sources. 522 */ 523 if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) { 524 mtx_unlock(&isrc_table_lock); 525 return (EBUSY); 526 } 527 isrc->isrc_filter = filter; 528 isrc->isrc_arg = arg; 529 isrc_update_name(isrc, name); 530 mtx_unlock(&isrc_table_lock); 531 532 *cookiep = isrc; 533 return (0); 534 } 535 #endif 536 537 /* 538 * Interrupt source pre_ithread method for MI interrupt framework. 539 */ 540 static void 541 intr_isrc_pre_ithread(void *arg) 542 { 543 struct intr_irqsrc *isrc = arg; 544 545 PIC_PRE_ITHREAD(isrc->isrc_dev, isrc); 546 } 547 548 /* 549 * Interrupt source post_ithread method for MI interrupt framework. 550 */ 551 static void 552 intr_isrc_post_ithread(void *arg) 553 { 554 struct intr_irqsrc *isrc = arg; 555 556 PIC_POST_ITHREAD(isrc->isrc_dev, isrc); 557 } 558 559 /* 560 * Interrupt source post_filter method for MI interrupt framework. 561 */ 562 static void 563 intr_isrc_post_filter(void *arg) 564 { 565 struct intr_irqsrc *isrc = arg; 566 567 PIC_POST_FILTER(isrc->isrc_dev, isrc); 568 } 569 570 /* 571 * Interrupt source assign_cpu method for MI interrupt framework. 572 */ 573 static int 574 intr_isrc_assign_cpu(void *arg, int cpu) 575 { 576 #ifdef SMP 577 struct intr_irqsrc *isrc = arg; 578 int error; 579 580 if (isrc->isrc_dev != intr_irq_root_dev) 581 return (EINVAL); 582 583 mtx_lock(&isrc_table_lock); 584 if (cpu == NOCPU) { 585 CPU_ZERO(&isrc->isrc_cpu); 586 isrc->isrc_flags &= ~INTR_ISRCF_BOUND; 587 } else { 588 CPU_SETOF(cpu, &isrc->isrc_cpu); 589 isrc->isrc_flags |= INTR_ISRCF_BOUND; 590 } 591 592 /* 593 * In NOCPU case, it's up to PIC to either leave ISRC on same CPU or 594 * re-balance it to another CPU or enable it on more CPUs. However, 595 * PIC is expected to change isrc_cpu appropriately to keep us well 596 * informed if the call is successful. 597 */ 598 if (irq_assign_cpu) { 599 error = PIC_BIND_INTR(isrc->isrc_dev, isrc); 600 if (error) { 601 CPU_ZERO(&isrc->isrc_cpu); 602 mtx_unlock(&isrc_table_lock); 603 return (error); 604 } 605 } 606 mtx_unlock(&isrc_table_lock); 607 return (0); 608 #else 609 return (EOPNOTSUPP); 610 #endif 611 } 612 613 /* 614 * Create interrupt event for interrupt source. 615 */ 616 static int 617 isrc_event_create(struct intr_irqsrc *isrc) 618 { 619 struct intr_event *ie; 620 int error; 621 622 error = intr_event_create(&ie, isrc, 0, isrc->isrc_irq, 623 intr_isrc_pre_ithread, intr_isrc_post_ithread, intr_isrc_post_filter, 624 intr_isrc_assign_cpu, "%s:", isrc->isrc_name); 625 if (error) 626 return (error); 627 628 mtx_lock(&isrc_table_lock); 629 /* 630 * Make sure that we do not mix the two ways 631 * how we handle interrupt sources. Let contested event wins. 632 */ 633 #ifdef INTR_SOLO 634 if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) { 635 #else 636 if (isrc->isrc_event != NULL) { 637 #endif 638 mtx_unlock(&isrc_table_lock); 639 intr_event_destroy(ie); 640 return (isrc->isrc_event != NULL ? EBUSY : 0); 641 } 642 isrc->isrc_event = ie; 643 mtx_unlock(&isrc_table_lock); 644 645 return (0); 646 } 647 #ifdef notyet 648 /* 649 * Destroy interrupt event for interrupt source. 650 */ 651 static void 652 isrc_event_destroy(struct intr_irqsrc *isrc) 653 { 654 struct intr_event *ie; 655 656 mtx_lock(&isrc_table_lock); 657 ie = isrc->isrc_event; 658 isrc->isrc_event = NULL; 659 mtx_unlock(&isrc_table_lock); 660 661 if (ie != NULL) 662 intr_event_destroy(ie); 663 } 664 #endif 665 /* 666 * Add handler to interrupt source. 667 */ 668 static int 669 isrc_add_handler(struct intr_irqsrc *isrc, const char *name, 670 driver_filter_t filter, driver_intr_t handler, void *arg, 671 enum intr_type flags, void **cookiep) 672 { 673 int error; 674 675 if (isrc->isrc_event == NULL) { 676 error = isrc_event_create(isrc); 677 if (error) 678 return (error); 679 } 680 681 error = intr_event_add_handler(isrc->isrc_event, name, filter, handler, 682 arg, intr_priority(flags), flags, cookiep); 683 if (error == 0) { 684 mtx_lock(&isrc_table_lock); 685 intrcnt_updatename(isrc); 686 mtx_unlock(&isrc_table_lock); 687 } 688 689 return (error); 690 } 691 692 /* 693 * Lookup interrupt controller locked. 694 */ 695 static inline struct intr_pic * 696 pic_lookup_locked(device_t dev, intptr_t xref) 697 { 698 struct intr_pic *pic; 699 700 mtx_assert(&pic_list_lock, MA_OWNED); 701 702 if (dev == NULL && xref == 0) 703 return (NULL); 704 705 /* Note that pic->pic_dev is never NULL on registered PIC. */ 706 SLIST_FOREACH(pic, &pic_list, pic_next) { 707 if (dev == NULL) { 708 if (xref == pic->pic_xref) 709 return (pic); 710 } else if (xref == 0 || pic->pic_xref == 0) { 711 if (dev == pic->pic_dev) 712 return (pic); 713 } else if (xref == pic->pic_xref && dev == pic->pic_dev) 714 return (pic); 715 } 716 return (NULL); 717 } 718 719 /* 720 * Lookup interrupt controller. 721 */ 722 static struct intr_pic * 723 pic_lookup(device_t dev, intptr_t xref) 724 { 725 struct intr_pic *pic; 726 727 mtx_lock(&pic_list_lock); 728 pic = pic_lookup_locked(dev, xref); 729 mtx_unlock(&pic_list_lock); 730 return (pic); 731 } 732 733 /* 734 * Create interrupt controller. 735 */ 736 static struct intr_pic * 737 pic_create(device_t dev, intptr_t xref) 738 { 739 struct intr_pic *pic; 740 741 mtx_lock(&pic_list_lock); 742 pic = pic_lookup_locked(dev, xref); 743 if (pic != NULL) { 744 mtx_unlock(&pic_list_lock); 745 return (pic); 746 } 747 pic = malloc(sizeof(*pic), M_INTRNG, M_NOWAIT | M_ZERO); 748 if (pic == NULL) { 749 mtx_unlock(&pic_list_lock); 750 return (NULL); 751 } 752 pic->pic_xref = xref; 753 pic->pic_dev = dev; 754 mtx_init(&pic->pic_child_lock, "pic child lock", NULL, MTX_SPIN); 755 SLIST_INSERT_HEAD(&pic_list, pic, pic_next); 756 mtx_unlock(&pic_list_lock); 757 758 return (pic); 759 } 760 #ifdef notyet 761 /* 762 * Destroy interrupt controller. 763 */ 764 static void 765 pic_destroy(device_t dev, intptr_t xref) 766 { 767 struct intr_pic *pic; 768 769 mtx_lock(&pic_list_lock); 770 pic = pic_lookup_locked(dev, xref); 771 if (pic == NULL) { 772 mtx_unlock(&pic_list_lock); 773 return; 774 } 775 SLIST_REMOVE(&pic_list, pic, intr_pic, pic_next); 776 mtx_unlock(&pic_list_lock); 777 778 free(pic, M_INTRNG); 779 } 780 #endif 781 /* 782 * Register interrupt controller. 783 */ 784 struct intr_pic * 785 intr_pic_register(device_t dev, intptr_t xref) 786 { 787 struct intr_pic *pic; 788 789 if (dev == NULL) 790 return (NULL); 791 pic = pic_create(dev, xref); 792 if (pic == NULL) 793 return (NULL); 794 795 pic->pic_flags |= FLAG_PIC; 796 797 debugf("PIC %p registered for %s <dev %p, xref %x>\n", pic, 798 device_get_nameunit(dev), dev, xref); 799 return (pic); 800 } 801 802 /* 803 * Unregister interrupt controller. 804 */ 805 int 806 intr_pic_deregister(device_t dev, intptr_t xref) 807 { 808 809 panic("%s: not implemented", __func__); 810 } 811 812 /* 813 * Mark interrupt controller (itself) as a root one. 814 * 815 * Note that only an interrupt controller can really know its position 816 * in interrupt controller's tree. So root PIC must claim itself as a root. 817 * 818 * In FDT case, according to ePAPR approved version 1.1 from 08 April 2011, 819 * page 30: 820 * "The root of the interrupt tree is determined when traversal 821 * of the interrupt tree reaches an interrupt controller node without 822 * an interrupts property and thus no explicit interrupt parent." 823 */ 824 int 825 intr_pic_claim_root(device_t dev, intptr_t xref, intr_irq_filter_t *filter, 826 void *arg, u_int ipicount) 827 { 828 struct intr_pic *pic; 829 830 pic = pic_lookup(dev, xref); 831 if (pic == NULL) { 832 device_printf(dev, "not registered\n"); 833 return (EINVAL); 834 } 835 836 KASSERT((pic->pic_flags & FLAG_PIC) != 0, 837 ("%s: Found a non-PIC controller: %s", __func__, 838 device_get_name(pic->pic_dev))); 839 840 if (filter == NULL) { 841 device_printf(dev, "filter missing\n"); 842 return (EINVAL); 843 } 844 845 /* 846 * Only one interrupt controllers could be on the root for now. 847 * Note that we further suppose that there is not threaded interrupt 848 * routine (handler) on the root. See intr_irq_handler(). 849 */ 850 if (intr_irq_root_dev != NULL) { 851 device_printf(dev, "another root already set\n"); 852 return (EBUSY); 853 } 854 855 intr_irq_root_dev = dev; 856 irq_root_filter = filter; 857 irq_root_arg = arg; 858 irq_root_ipicount = ipicount; 859 860 debugf("irq root set to %s\n", device_get_nameunit(dev)); 861 return (0); 862 } 863 864 /* 865 * Add a handler to manage a sub range of a parents interrupts. 866 */ 867 struct intr_pic * 868 intr_pic_add_handler(device_t parent, struct intr_pic *pic, 869 intr_child_irq_filter_t *filter, void *arg, uintptr_t start, 870 uintptr_t length) 871 { 872 struct intr_pic *parent_pic; 873 struct intr_pic_child *newchild; 874 #ifdef INVARIANTS 875 struct intr_pic_child *child; 876 #endif 877 878 parent_pic = pic_lookup(parent, 0); 879 if (parent_pic == NULL) 880 return (NULL); 881 882 newchild = malloc(sizeof(*newchild), M_INTRNG, M_WAITOK | M_ZERO); 883 newchild->pc_pic = pic; 884 newchild->pc_filter = filter; 885 newchild->pc_filter_arg = arg; 886 newchild->pc_start = start; 887 newchild->pc_length = length; 888 889 mtx_lock_spin(&parent_pic->pic_child_lock); 890 #ifdef INVARIANTS 891 SLIST_FOREACH(child, &parent_pic->pic_children, pc_next) { 892 KASSERT(child->pc_pic != pic, ("%s: Adding a child PIC twice", 893 __func__)); 894 } 895 #endif 896 SLIST_INSERT_HEAD(&parent_pic->pic_children, newchild, pc_next); 897 mtx_unlock_spin(&parent_pic->pic_child_lock); 898 899 return (pic); 900 } 901 902 int 903 intr_map_irq(device_t dev, intptr_t xref, struct intr_map_data *data, 904 u_int *irqp) 905 { 906 int error; 907 struct intr_irqsrc *isrc; 908 struct intr_pic *pic; 909 910 if (data == NULL) 911 return (EINVAL); 912 913 pic = pic_lookup(dev, xref); 914 if (pic == NULL) 915 return (ESRCH); 916 917 KASSERT((pic->pic_flags & FLAG_PIC) != 0, 918 ("%s: Found a non-PIC controller: %s", __func__, 919 device_get_name(pic->pic_dev))); 920 921 error = PIC_MAP_INTR(pic->pic_dev, data, &isrc); 922 if (error == 0) 923 *irqp = isrc->isrc_irq; 924 return (error); 925 } 926 927 int 928 intr_alloc_irq(device_t dev, struct resource *res) 929 { 930 struct intr_map_data *data; 931 struct intr_irqsrc *isrc; 932 933 KASSERT(rman_get_start(res) == rman_get_end(res), 934 ("%s: more interrupts in resource", __func__)); 935 936 isrc = isrc_lookup(rman_get_start(res)); 937 if (isrc == NULL) 938 return (EINVAL); 939 940 data = rman_get_virtual(res); 941 return (PIC_ALLOC_INTR(isrc->isrc_dev, isrc, res, data)); 942 } 943 944 int 945 intr_release_irq(device_t dev, struct resource *res) 946 { 947 struct intr_map_data *data; 948 struct intr_irqsrc *isrc; 949 950 KASSERT(rman_get_start(res) == rman_get_end(res), 951 ("%s: more interrupts in resource", __func__)); 952 953 isrc = isrc_lookup(rman_get_start(res)); 954 if (isrc == NULL) 955 return (EINVAL); 956 957 data = rman_get_virtual(res); 958 return (PIC_RELEASE_INTR(isrc->isrc_dev, isrc, res, data)); 959 } 960 961 int 962 intr_setup_irq(device_t dev, struct resource *res, driver_filter_t filt, 963 driver_intr_t hand, void *arg, int flags, void **cookiep) 964 { 965 int error; 966 struct intr_map_data *data; 967 struct intr_irqsrc *isrc; 968 const char *name; 969 970 KASSERT(rman_get_start(res) == rman_get_end(res), 971 ("%s: more interrupts in resource", __func__)); 972 973 isrc = isrc_lookup(rman_get_start(res)); 974 if (isrc == NULL) 975 return (EINVAL); 976 977 data = rman_get_virtual(res); 978 name = device_get_nameunit(dev); 979 980 #ifdef INTR_SOLO 981 /* 982 * Standard handling is done through MI interrupt framework. However, 983 * some interrupts could request solely own special handling. This 984 * non standard handling can be used for interrupt controllers without 985 * handler (filter only), so in case that interrupt controllers are 986 * chained, MI interrupt framework is called only in leaf controller. 987 * 988 * Note that root interrupt controller routine is served as well, 989 * however in intr_irq_handler(), i.e. main system dispatch routine. 990 */ 991 if (flags & INTR_SOLO && hand != NULL) { 992 debugf("irq %u cannot solo on %s\n", irq, name); 993 return (EINVAL); 994 } 995 996 if (flags & INTR_SOLO) { 997 error = iscr_setup_filter(isrc, name, (intr_irq_filter_t *)filt, 998 arg, cookiep); 999 debugf("irq %u setup filter error %d on %s\n", irq, error, 1000 name); 1001 } else 1002 #endif 1003 { 1004 error = isrc_add_handler(isrc, name, filt, hand, arg, flags, 1005 cookiep); 1006 debugf("irq %u add handler error %d on %s\n", irq, error, name); 1007 } 1008 if (error != 0) 1009 return (error); 1010 1011 mtx_lock(&isrc_table_lock); 1012 error = PIC_SETUP_INTR(isrc->isrc_dev, isrc, res, data); 1013 if (error == 0) { 1014 isrc->isrc_handlers++; 1015 if (isrc->isrc_handlers == 1) 1016 PIC_ENABLE_INTR(isrc->isrc_dev, isrc); 1017 } 1018 mtx_unlock(&isrc_table_lock); 1019 if (error != 0) 1020 intr_event_remove_handler(*cookiep); 1021 return (error); 1022 } 1023 1024 int 1025 intr_teardown_irq(device_t dev, struct resource *res, void *cookie) 1026 { 1027 int error; 1028 struct intr_map_data *data; 1029 struct intr_irqsrc *isrc; 1030 1031 KASSERT(rman_get_start(res) == rman_get_end(res), 1032 ("%s: more interrupts in resource", __func__)); 1033 1034 isrc = isrc_lookup(rman_get_start(res)); 1035 if (isrc == NULL || isrc->isrc_handlers == 0) 1036 return (EINVAL); 1037 1038 data = rman_get_virtual(res); 1039 1040 #ifdef INTR_SOLO 1041 if (isrc->isrc_filter != NULL) { 1042 if (isrc != cookie) 1043 return (EINVAL); 1044 1045 mtx_lock(&isrc_table_lock); 1046 isrc->isrc_filter = NULL; 1047 isrc->isrc_arg = NULL; 1048 isrc->isrc_handlers = 0; 1049 PIC_DISABLE_INTR(isrc->isrc_dev, isrc); 1050 PIC_TEARDOWN_INTR(isrc->isrc_dev, isrc, res, data); 1051 isrc_update_name(isrc, NULL); 1052 mtx_unlock(&isrc_table_lock); 1053 return (0); 1054 } 1055 #endif 1056 if (isrc != intr_handler_source(cookie)) 1057 return (EINVAL); 1058 1059 error = intr_event_remove_handler(cookie); 1060 if (error == 0) { 1061 mtx_lock(&isrc_table_lock); 1062 isrc->isrc_handlers--; 1063 if (isrc->isrc_handlers == 0) 1064 PIC_DISABLE_INTR(isrc->isrc_dev, isrc); 1065 PIC_TEARDOWN_INTR(isrc->isrc_dev, isrc, res, data); 1066 intrcnt_updatename(isrc); 1067 mtx_unlock(&isrc_table_lock); 1068 } 1069 return (error); 1070 } 1071 1072 int 1073 intr_describe_irq(device_t dev, struct resource *res, void *cookie, 1074 const char *descr) 1075 { 1076 int error; 1077 struct intr_irqsrc *isrc; 1078 1079 KASSERT(rman_get_start(res) == rman_get_end(res), 1080 ("%s: more interrupts in resource", __func__)); 1081 1082 isrc = isrc_lookup(rman_get_start(res)); 1083 if (isrc == NULL || isrc->isrc_handlers == 0) 1084 return (EINVAL); 1085 #ifdef INTR_SOLO 1086 if (isrc->isrc_filter != NULL) { 1087 if (isrc != cookie) 1088 return (EINVAL); 1089 1090 mtx_lock(&isrc_table_lock); 1091 isrc_update_name(isrc, descr); 1092 mtx_unlock(&isrc_table_lock); 1093 return (0); 1094 } 1095 #endif 1096 error = intr_event_describe_handler(isrc->isrc_event, cookie, descr); 1097 if (error == 0) { 1098 mtx_lock(&isrc_table_lock); 1099 intrcnt_updatename(isrc); 1100 mtx_unlock(&isrc_table_lock); 1101 } 1102 return (error); 1103 } 1104 1105 #ifdef SMP 1106 int 1107 intr_bind_irq(device_t dev, struct resource *res, int cpu) 1108 { 1109 struct intr_irqsrc *isrc; 1110 1111 KASSERT(rman_get_start(res) == rman_get_end(res), 1112 ("%s: more interrupts in resource", __func__)); 1113 1114 isrc = isrc_lookup(rman_get_start(res)); 1115 if (isrc == NULL || isrc->isrc_handlers == 0) 1116 return (EINVAL); 1117 #ifdef INTR_SOLO 1118 if (isrc->isrc_filter != NULL) 1119 return (intr_isrc_assign_cpu(isrc, cpu)); 1120 #endif 1121 return (intr_event_bind(isrc->isrc_event, cpu)); 1122 } 1123 1124 /* 1125 * Return the CPU that the next interrupt source should use. 1126 * For now just returns the next CPU according to round-robin. 1127 */ 1128 u_int 1129 intr_irq_next_cpu(u_int last_cpu, cpuset_t *cpumask) 1130 { 1131 1132 if (!irq_assign_cpu || mp_ncpus == 1) 1133 return (PCPU_GET(cpuid)); 1134 1135 do { 1136 last_cpu++; 1137 if (last_cpu > mp_maxid) 1138 last_cpu = 0; 1139 } while (!CPU_ISSET(last_cpu, cpumask)); 1140 return (last_cpu); 1141 } 1142 1143 /* 1144 * Distribute all the interrupt sources among the available 1145 * CPUs once the AP's have been launched. 1146 */ 1147 static void 1148 intr_irq_shuffle(void *arg __unused) 1149 { 1150 struct intr_irqsrc *isrc; 1151 u_int i; 1152 1153 if (mp_ncpus == 1) 1154 return; 1155 1156 mtx_lock(&isrc_table_lock); 1157 irq_assign_cpu = TRUE; 1158 for (i = 0; i < NIRQ; i++) { 1159 isrc = irq_sources[i]; 1160 if (isrc == NULL || isrc->isrc_handlers == 0 || 1161 isrc->isrc_flags & (INTR_ISRCF_PPI | INTR_ISRCF_IPI)) 1162 continue; 1163 1164 if (isrc->isrc_event != NULL && 1165 isrc->isrc_flags & INTR_ISRCF_BOUND && 1166 isrc->isrc_event->ie_cpu != CPU_FFS(&isrc->isrc_cpu) - 1) 1167 panic("%s: CPU inconsistency", __func__); 1168 1169 if ((isrc->isrc_flags & INTR_ISRCF_BOUND) == 0) 1170 CPU_ZERO(&isrc->isrc_cpu); /* start again */ 1171 1172 /* 1173 * We are in wicked position here if the following call fails 1174 * for bound ISRC. The best thing we can do is to clear 1175 * isrc_cpu so inconsistency with ie_cpu will be detectable. 1176 */ 1177 if (PIC_BIND_INTR(isrc->isrc_dev, isrc) != 0) 1178 CPU_ZERO(&isrc->isrc_cpu); 1179 } 1180 mtx_unlock(&isrc_table_lock); 1181 } 1182 SYSINIT(intr_irq_shuffle, SI_SUB_SMP, SI_ORDER_SECOND, intr_irq_shuffle, NULL); 1183 1184 #else 1185 u_int 1186 intr_irq_next_cpu(u_int current_cpu, cpuset_t *cpumask) 1187 { 1188 1189 return (PCPU_GET(cpuid)); 1190 } 1191 #endif 1192 1193 /* 1194 * Register a MSI/MSI-X interrupt controller 1195 */ 1196 int 1197 intr_msi_register(device_t dev, intptr_t xref) 1198 { 1199 struct intr_pic *pic; 1200 1201 if (dev == NULL) 1202 return (EINVAL); 1203 pic = pic_create(dev, xref); 1204 if (pic == NULL) 1205 return (ENOMEM); 1206 1207 pic->pic_flags |= FLAG_MSI; 1208 1209 debugf("PIC %p registered for %s <dev %p, xref %jx>\n", pic, 1210 device_get_nameunit(dev), dev, (uintmax_t)xref); 1211 return (0); 1212 } 1213 1214 int 1215 intr_alloc_msi(device_t pci, device_t child, intptr_t xref, int count, 1216 int maxcount, int *irqs) 1217 { 1218 struct intr_irqsrc **isrc; 1219 struct intr_pic *pic; 1220 device_t pdev; 1221 int err, i; 1222 1223 pic = pic_lookup(NULL, xref); 1224 if (pic == NULL) 1225 return (ESRCH); 1226 1227 KASSERT((pic->pic_flags & FLAG_MSI) != 0, 1228 ("%s: Found a non-MSI controller: %s", __func__, 1229 device_get_name(pic->pic_dev))); 1230 1231 isrc = malloc(sizeof(*isrc) * count, M_INTRNG, M_WAITOK); 1232 err = MSI_ALLOC_MSI(pic->pic_dev, child, count, maxcount, &pdev, isrc); 1233 if (err == 0) { 1234 for (i = 0; i < count; i++) { 1235 irqs[i] = isrc[i]->isrc_irq; 1236 } 1237 } 1238 1239 free(isrc, M_INTRNG); 1240 1241 return (err); 1242 } 1243 1244 int 1245 intr_release_msi(device_t pci, device_t child, intptr_t xref, int count, 1246 int *irqs) 1247 { 1248 struct intr_irqsrc **isrc; 1249 struct intr_pic *pic; 1250 int i, err; 1251 1252 pic = pic_lookup(NULL, xref); 1253 if (pic == NULL) 1254 return (ESRCH); 1255 1256 KASSERT((pic->pic_flags & FLAG_MSI) != 0, 1257 ("%s: Found a non-MSI controller: %s", __func__, 1258 device_get_name(pic->pic_dev))); 1259 1260 isrc = malloc(sizeof(*isrc) * count, M_INTRNG, M_WAITOK); 1261 1262 for (i = 0; i < count; i++) { 1263 isrc[i] = isrc_lookup(irqs[i]); 1264 if (isrc == NULL) { 1265 free(isrc, M_INTRNG); 1266 return (EINVAL); 1267 } 1268 } 1269 1270 err = MSI_RELEASE_MSI(pic->pic_dev, child, count, isrc); 1271 free(isrc, M_INTRNG); 1272 return (err); 1273 } 1274 1275 int 1276 intr_alloc_msix(device_t pci, device_t child, intptr_t xref, int *irq) 1277 { 1278 struct intr_irqsrc *isrc; 1279 struct intr_pic *pic; 1280 device_t pdev; 1281 int err; 1282 1283 pic = pic_lookup(NULL, xref); 1284 if (pic == NULL) 1285 return (ESRCH); 1286 1287 KASSERT((pic->pic_flags & FLAG_MSI) != 0, 1288 ("%s: Found a non-MSI controller: %s", __func__, 1289 device_get_name(pic->pic_dev))); 1290 1291 err = MSI_ALLOC_MSIX(pic->pic_dev, child, &pdev, &isrc); 1292 if (err != 0) 1293 return (err); 1294 1295 *irq = isrc->isrc_irq; 1296 return (0); 1297 } 1298 1299 int 1300 intr_release_msix(device_t pci, device_t child, intptr_t xref, int irq) 1301 { 1302 struct intr_irqsrc *isrc; 1303 struct intr_pic *pic; 1304 int err; 1305 1306 pic = pic_lookup(NULL, xref); 1307 if (pic == NULL) 1308 return (ESRCH); 1309 1310 KASSERT((pic->pic_flags & FLAG_MSI) != 0, 1311 ("%s: Found a non-MSI controller: %s", __func__, 1312 device_get_name(pic->pic_dev))); 1313 1314 isrc = isrc_lookup(irq); 1315 if (isrc == NULL) 1316 return (EINVAL); 1317 1318 err = MSI_RELEASE_MSIX(pic->pic_dev, child, isrc); 1319 return (err); 1320 } 1321 1322 int 1323 intr_map_msi(device_t pci, device_t child, intptr_t xref, int irq, 1324 uint64_t *addr, uint32_t *data) 1325 { 1326 struct intr_irqsrc *isrc; 1327 struct intr_pic *pic; 1328 int err; 1329 1330 pic = pic_lookup(NULL, xref); 1331 if (pic == NULL) 1332 return (ESRCH); 1333 1334 KASSERT((pic->pic_flags & FLAG_MSI) != 0, 1335 ("%s: Found a non-MSI controller: %s", __func__, 1336 device_get_name(pic->pic_dev))); 1337 1338 isrc = isrc_lookup(irq); 1339 if (isrc == NULL) 1340 return (EINVAL); 1341 1342 err = MSI_MAP_MSI(pic->pic_dev, child, isrc, addr, data); 1343 return (err); 1344 } 1345 1346 1347 void dosoftints(void); 1348 void 1349 dosoftints(void) 1350 { 1351 } 1352 1353 #ifdef SMP 1354 /* 1355 * Init interrupt controller on another CPU. 1356 */ 1357 void 1358 intr_pic_init_secondary(void) 1359 { 1360 1361 /* 1362 * QQQ: Only root PIC is aware of other CPUs ??? 1363 */ 1364 KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__)); 1365 1366 //mtx_lock(&isrc_table_lock); 1367 PIC_INIT_SECONDARY(intr_irq_root_dev); 1368 //mtx_unlock(&isrc_table_lock); 1369 } 1370 #endif 1371 1372 #ifdef DDB 1373 DB_SHOW_COMMAND(irqs, db_show_irqs) 1374 { 1375 u_int i, irqsum; 1376 u_long num; 1377 struct intr_irqsrc *isrc; 1378 1379 for (irqsum = 0, i = 0; i < NIRQ; i++) { 1380 isrc = irq_sources[i]; 1381 if (isrc == NULL) 1382 continue; 1383 1384 num = isrc->isrc_count != NULL ? isrc->isrc_count[0] : 0; 1385 db_printf("irq%-3u <%s>: cpu %02lx%s cnt %lu\n", i, 1386 isrc->isrc_name, isrc->isrc_cpu.__bits[0], 1387 isrc->isrc_flags & INTR_ISRCF_BOUND ? " (bound)" : "", num); 1388 irqsum += num; 1389 } 1390 db_printf("irq total %u\n", irqsum); 1391 } 1392 #endif 1393