1 /*- 2 * Copyright (c) 2015-2016 Svatopluk Kraus 3 * Copyright (c) 2015-2016 Michal Meloun 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 /* 32 * New-style Interrupt Framework 33 * 34 * TODO: - add support for disconnected PICs. 35 * - to support IPI (PPI) enabling on other CPUs if already started. 36 * - to complete things for removable PICs. 37 */ 38 39 #include "opt_ddb.h" 40 #include "opt_hwpmc_hooks.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/syslog.h> 46 #include <sys/malloc.h> 47 #include <sys/proc.h> 48 #include <sys/queue.h> 49 #include <sys/bus.h> 50 #include <sys/interrupt.h> 51 #include <sys/conf.h> 52 #include <sys/cpuset.h> 53 #include <sys/rman.h> 54 #include <sys/sched.h> 55 #include <sys/smp.h> 56 #ifdef HWPMC_HOOKS 57 #include <sys/pmckern.h> 58 #endif 59 60 #include <machine/atomic.h> 61 #include <machine/intr.h> 62 #include <machine/cpu.h> 63 #include <machine/smp.h> 64 #include <machine/stdarg.h> 65 66 #ifdef DDB 67 #include <ddb/ddb.h> 68 #endif 69 70 #include "pic_if.h" 71 #include "msi_if.h" 72 73 #define INTRNAME_LEN (2*MAXCOMLEN + 1) 74 75 #ifdef DEBUG 76 #define debugf(fmt, args...) do { printf("%s(): ", __func__); \ 77 printf(fmt,##args); } while (0) 78 #else 79 #define debugf(fmt, args...) 80 #endif 81 82 MALLOC_DECLARE(M_INTRNG); 83 MALLOC_DEFINE(M_INTRNG, "intr", "intr interrupt handling"); 84 85 /* Main interrupt handler called from assembler -> 'hidden' for C code. */ 86 void intr_irq_handler(struct trapframe *tf); 87 88 /* Root interrupt controller stuff. */ 89 device_t intr_irq_root_dev; 90 static intr_irq_filter_t *irq_root_filter; 91 static void *irq_root_arg; 92 static u_int irq_root_ipicount; 93 94 struct intr_pic_child { 95 SLIST_ENTRY(intr_pic_child) pc_next; 96 struct intr_pic *pc_pic; 97 intr_child_irq_filter_t *pc_filter; 98 void *pc_filter_arg; 99 uintptr_t pc_start; 100 uintptr_t pc_length; 101 }; 102 103 /* Interrupt controller definition. */ 104 struct intr_pic { 105 SLIST_ENTRY(intr_pic) pic_next; 106 intptr_t pic_xref; /* hardware identification */ 107 device_t pic_dev; 108 #define FLAG_PIC (1 << 0) 109 #define FLAG_MSI (1 << 1) 110 u_int pic_flags; 111 struct mtx pic_child_lock; 112 SLIST_HEAD(, intr_pic_child) pic_children; 113 }; 114 115 static struct mtx pic_list_lock; 116 static SLIST_HEAD(, intr_pic) pic_list; 117 118 static struct intr_pic *pic_lookup(device_t dev, intptr_t xref); 119 120 /* Interrupt source definition. */ 121 static struct mtx isrc_table_lock; 122 static struct intr_irqsrc *irq_sources[NIRQ]; 123 u_int irq_next_free; 124 125 #ifdef SMP 126 static boolean_t irq_assign_cpu = FALSE; 127 #endif 128 129 /* 130 * - 2 counters for each I/O interrupt. 131 * - MAXCPU counters for each IPI counters for SMP. 132 */ 133 #ifdef SMP 134 #define INTRCNT_COUNT (NIRQ * 2 + INTR_IPI_COUNT * MAXCPU) 135 #else 136 #define INTRCNT_COUNT (NIRQ * 2) 137 #endif 138 139 /* Data for MI statistics reporting. */ 140 u_long intrcnt[INTRCNT_COUNT]; 141 char intrnames[INTRCNT_COUNT * INTRNAME_LEN]; 142 size_t sintrcnt = sizeof(intrcnt); 143 size_t sintrnames = sizeof(intrnames); 144 static u_int intrcnt_index; 145 146 static struct intr_irqsrc *intr_map_get_isrc(u_int res_id); 147 static void intr_map_set_isrc(u_int res_id, struct intr_irqsrc *isrc); 148 static void intr_map_copy_map_data(u_int res_id, device_t *dev, intptr_t *xref, 149 struct intr_map_data **data); 150 151 /* 152 * Interrupt framework initialization routine. 153 */ 154 static void 155 intr_irq_init(void *dummy __unused) 156 { 157 158 SLIST_INIT(&pic_list); 159 mtx_init(&pic_list_lock, "intr pic list", NULL, MTX_DEF); 160 161 mtx_init(&isrc_table_lock, "intr isrc table", NULL, MTX_DEF); 162 } 163 SYSINIT(intr_irq_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_irq_init, NULL); 164 165 static void 166 intrcnt_setname(const char *name, int index) 167 { 168 169 snprintf(intrnames + INTRNAME_LEN * index, INTRNAME_LEN, "%-*s", 170 INTRNAME_LEN - 1, name); 171 } 172 173 /* 174 * Update name for interrupt source with interrupt event. 175 */ 176 static void 177 intrcnt_updatename(struct intr_irqsrc *isrc) 178 { 179 180 /* QQQ: What about stray counter name? */ 181 mtx_assert(&isrc_table_lock, MA_OWNED); 182 intrcnt_setname(isrc->isrc_event->ie_fullname, isrc->isrc_index); 183 } 184 185 /* 186 * Virtualization for interrupt source interrupt counter increment. 187 */ 188 static inline void 189 isrc_increment_count(struct intr_irqsrc *isrc) 190 { 191 192 if (isrc->isrc_flags & INTR_ISRCF_PPI) 193 atomic_add_long(&isrc->isrc_count[0], 1); 194 else 195 isrc->isrc_count[0]++; 196 } 197 198 /* 199 * Virtualization for interrupt source interrupt stray counter increment. 200 */ 201 static inline void 202 isrc_increment_straycount(struct intr_irqsrc *isrc) 203 { 204 205 isrc->isrc_count[1]++; 206 } 207 208 /* 209 * Virtualization for interrupt source interrupt name update. 210 */ 211 static void 212 isrc_update_name(struct intr_irqsrc *isrc, const char *name) 213 { 214 char str[INTRNAME_LEN]; 215 216 mtx_assert(&isrc_table_lock, MA_OWNED); 217 218 if (name != NULL) { 219 snprintf(str, INTRNAME_LEN, "%s: %s", isrc->isrc_name, name); 220 intrcnt_setname(str, isrc->isrc_index); 221 snprintf(str, INTRNAME_LEN, "stray %s: %s", isrc->isrc_name, 222 name); 223 intrcnt_setname(str, isrc->isrc_index + 1); 224 } else { 225 snprintf(str, INTRNAME_LEN, "%s:", isrc->isrc_name); 226 intrcnt_setname(str, isrc->isrc_index); 227 snprintf(str, INTRNAME_LEN, "stray %s:", isrc->isrc_name); 228 intrcnt_setname(str, isrc->isrc_index + 1); 229 } 230 } 231 232 /* 233 * Virtualization for interrupt source interrupt counters setup. 234 */ 235 static void 236 isrc_setup_counters(struct intr_irqsrc *isrc) 237 { 238 u_int index; 239 240 /* 241 * XXX - it does not work well with removable controllers and 242 * interrupt sources !!! 243 */ 244 index = atomic_fetchadd_int(&intrcnt_index, 2); 245 isrc->isrc_index = index; 246 isrc->isrc_count = &intrcnt[index]; 247 isrc_update_name(isrc, NULL); 248 } 249 250 /* 251 * Virtualization for interrupt source interrupt counters release. 252 */ 253 static void 254 isrc_release_counters(struct intr_irqsrc *isrc) 255 { 256 257 panic("%s: not implemented", __func__); 258 } 259 260 #ifdef SMP 261 /* 262 * Virtualization for interrupt source IPI counters setup. 263 */ 264 u_long * 265 intr_ipi_setup_counters(const char *name) 266 { 267 u_int index, i; 268 char str[INTRNAME_LEN]; 269 270 index = atomic_fetchadd_int(&intrcnt_index, MAXCPU); 271 for (i = 0; i < MAXCPU; i++) { 272 snprintf(str, INTRNAME_LEN, "cpu%d:%s", i, name); 273 intrcnt_setname(str, index + i); 274 } 275 return (&intrcnt[index]); 276 } 277 #endif 278 279 /* 280 * Main interrupt dispatch handler. It's called straight 281 * from the assembler, where CPU interrupt is served. 282 */ 283 void 284 intr_irq_handler(struct trapframe *tf) 285 { 286 struct trapframe * oldframe; 287 struct thread * td; 288 289 KASSERT(irq_root_filter != NULL, ("%s: no filter", __func__)); 290 291 PCPU_INC(cnt.v_intr); 292 critical_enter(); 293 td = curthread; 294 oldframe = td->td_intr_frame; 295 td->td_intr_frame = tf; 296 irq_root_filter(irq_root_arg); 297 td->td_intr_frame = oldframe; 298 critical_exit(); 299 #ifdef HWPMC_HOOKS 300 if (pmc_hook && TRAPF_USERMODE(tf) && 301 (PCPU_GET(curthread)->td_pflags & TDP_CALLCHAIN)) 302 pmc_hook(PCPU_GET(curthread), PMC_FN_USER_CALLCHAIN, tf); 303 #endif 304 } 305 306 int 307 intr_child_irq_handler(struct intr_pic *parent, uintptr_t irq) 308 { 309 struct intr_pic_child *child; 310 bool found; 311 312 found = false; 313 mtx_lock_spin(&parent->pic_child_lock); 314 SLIST_FOREACH(child, &parent->pic_children, pc_next) { 315 if (child->pc_start <= irq && 316 irq < (child->pc_start + child->pc_length)) { 317 found = true; 318 break; 319 } 320 } 321 mtx_unlock_spin(&parent->pic_child_lock); 322 323 if (found) 324 return (child->pc_filter(child->pc_filter_arg, irq)); 325 326 return (FILTER_STRAY); 327 } 328 329 /* 330 * interrupt controller dispatch function for interrupts. It should 331 * be called straight from the interrupt controller, when associated interrupt 332 * source is learned. 333 */ 334 int 335 intr_isrc_dispatch(struct intr_irqsrc *isrc, struct trapframe *tf) 336 { 337 338 KASSERT(isrc != NULL, ("%s: no source", __func__)); 339 340 isrc_increment_count(isrc); 341 342 #ifdef INTR_SOLO 343 if (isrc->isrc_filter != NULL) { 344 int error; 345 error = isrc->isrc_filter(isrc->isrc_arg, tf); 346 PIC_POST_FILTER(isrc->isrc_dev, isrc); 347 if (error == FILTER_HANDLED) 348 return (0); 349 } else 350 #endif 351 if (isrc->isrc_event != NULL) { 352 if (intr_event_handle(isrc->isrc_event, tf) == 0) 353 return (0); 354 } 355 356 isrc_increment_straycount(isrc); 357 return (EINVAL); 358 } 359 360 /* 361 * Alloc unique interrupt number (resource handle) for interrupt source. 362 * 363 * There could be various strategies how to allocate free interrupt number 364 * (resource handle) for new interrupt source. 365 * 366 * 1. Handles are always allocated forward, so handles are not recycled 367 * immediately. However, if only one free handle left which is reused 368 * constantly... 369 */ 370 static inline int 371 isrc_alloc_irq(struct intr_irqsrc *isrc) 372 { 373 u_int maxirqs, irq; 374 375 mtx_assert(&isrc_table_lock, MA_OWNED); 376 377 maxirqs = nitems(irq_sources); 378 if (irq_next_free >= maxirqs) 379 return (ENOSPC); 380 381 for (irq = irq_next_free; irq < maxirqs; irq++) { 382 if (irq_sources[irq] == NULL) 383 goto found; 384 } 385 for (irq = 0; irq < irq_next_free; irq++) { 386 if (irq_sources[irq] == NULL) 387 goto found; 388 } 389 390 irq_next_free = maxirqs; 391 return (ENOSPC); 392 393 found: 394 isrc->isrc_irq = irq; 395 irq_sources[irq] = isrc; 396 397 irq_next_free = irq + 1; 398 if (irq_next_free >= maxirqs) 399 irq_next_free = 0; 400 return (0); 401 } 402 403 /* 404 * Free unique interrupt number (resource handle) from interrupt source. 405 */ 406 static inline int 407 isrc_free_irq(struct intr_irqsrc *isrc) 408 { 409 410 mtx_assert(&isrc_table_lock, MA_OWNED); 411 412 if (isrc->isrc_irq >= nitems(irq_sources)) 413 return (EINVAL); 414 if (irq_sources[isrc->isrc_irq] != isrc) 415 return (EINVAL); 416 417 irq_sources[isrc->isrc_irq] = NULL; 418 isrc->isrc_irq = INTR_IRQ_INVALID; /* just to be safe */ 419 return (0); 420 } 421 422 /* 423 * Initialize interrupt source and register it into global interrupt table. 424 */ 425 int 426 intr_isrc_register(struct intr_irqsrc *isrc, device_t dev, u_int flags, 427 const char *fmt, ...) 428 { 429 int error; 430 va_list ap; 431 432 bzero(isrc, sizeof(struct intr_irqsrc)); 433 isrc->isrc_dev = dev; 434 isrc->isrc_irq = INTR_IRQ_INVALID; /* just to be safe */ 435 isrc->isrc_flags = flags; 436 437 va_start(ap, fmt); 438 vsnprintf(isrc->isrc_name, INTR_ISRC_NAMELEN, fmt, ap); 439 va_end(ap); 440 441 mtx_lock(&isrc_table_lock); 442 error = isrc_alloc_irq(isrc); 443 if (error != 0) { 444 mtx_unlock(&isrc_table_lock); 445 return (error); 446 } 447 /* 448 * Setup interrupt counters, but not for IPI sources. Those are setup 449 * later and only for used ones (up to INTR_IPI_COUNT) to not exhaust 450 * our counter pool. 451 */ 452 if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0) 453 isrc_setup_counters(isrc); 454 mtx_unlock(&isrc_table_lock); 455 return (0); 456 } 457 458 /* 459 * Deregister interrupt source from global interrupt table. 460 */ 461 int 462 intr_isrc_deregister(struct intr_irqsrc *isrc) 463 { 464 int error; 465 466 mtx_lock(&isrc_table_lock); 467 if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0) 468 isrc_release_counters(isrc); 469 error = isrc_free_irq(isrc); 470 mtx_unlock(&isrc_table_lock); 471 return (error); 472 } 473 474 #ifdef SMP 475 /* 476 * A support function for a PIC to decide if provided ISRC should be inited 477 * on given cpu. The logic of INTR_ISRCF_BOUND flag and isrc_cpu member of 478 * struct intr_irqsrc is the following: 479 * 480 * If INTR_ISRCF_BOUND is set, the ISRC should be inited only on cpus 481 * set in isrc_cpu. If not, the ISRC should be inited on every cpu and 482 * isrc_cpu is kept consistent with it. Thus isrc_cpu is always correct. 483 */ 484 bool 485 intr_isrc_init_on_cpu(struct intr_irqsrc *isrc, u_int cpu) 486 { 487 488 if (isrc->isrc_handlers == 0) 489 return (false); 490 if ((isrc->isrc_flags & (INTR_ISRCF_PPI | INTR_ISRCF_IPI)) == 0) 491 return (false); 492 if (isrc->isrc_flags & INTR_ISRCF_BOUND) 493 return (CPU_ISSET(cpu, &isrc->isrc_cpu)); 494 495 CPU_SET(cpu, &isrc->isrc_cpu); 496 return (true); 497 } 498 #endif 499 500 #ifdef INTR_SOLO 501 /* 502 * Setup filter into interrupt source. 503 */ 504 static int 505 iscr_setup_filter(struct intr_irqsrc *isrc, const char *name, 506 intr_irq_filter_t *filter, void *arg, void **cookiep) 507 { 508 509 if (filter == NULL) 510 return (EINVAL); 511 512 mtx_lock(&isrc_table_lock); 513 /* 514 * Make sure that we do not mix the two ways 515 * how we handle interrupt sources. 516 */ 517 if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) { 518 mtx_unlock(&isrc_table_lock); 519 return (EBUSY); 520 } 521 isrc->isrc_filter = filter; 522 isrc->isrc_arg = arg; 523 isrc_update_name(isrc, name); 524 mtx_unlock(&isrc_table_lock); 525 526 *cookiep = isrc; 527 return (0); 528 } 529 #endif 530 531 /* 532 * Interrupt source pre_ithread method for MI interrupt framework. 533 */ 534 static void 535 intr_isrc_pre_ithread(void *arg) 536 { 537 struct intr_irqsrc *isrc = arg; 538 539 PIC_PRE_ITHREAD(isrc->isrc_dev, isrc); 540 } 541 542 /* 543 * Interrupt source post_ithread method for MI interrupt framework. 544 */ 545 static void 546 intr_isrc_post_ithread(void *arg) 547 { 548 struct intr_irqsrc *isrc = arg; 549 550 PIC_POST_ITHREAD(isrc->isrc_dev, isrc); 551 } 552 553 /* 554 * Interrupt source post_filter method for MI interrupt framework. 555 */ 556 static void 557 intr_isrc_post_filter(void *arg) 558 { 559 struct intr_irqsrc *isrc = arg; 560 561 PIC_POST_FILTER(isrc->isrc_dev, isrc); 562 } 563 564 /* 565 * Interrupt source assign_cpu method for MI interrupt framework. 566 */ 567 static int 568 intr_isrc_assign_cpu(void *arg, int cpu) 569 { 570 #ifdef SMP 571 struct intr_irqsrc *isrc = arg; 572 int error; 573 574 if (isrc->isrc_dev != intr_irq_root_dev) 575 return (EINVAL); 576 577 mtx_lock(&isrc_table_lock); 578 if (cpu == NOCPU) { 579 CPU_ZERO(&isrc->isrc_cpu); 580 isrc->isrc_flags &= ~INTR_ISRCF_BOUND; 581 } else { 582 CPU_SETOF(cpu, &isrc->isrc_cpu); 583 isrc->isrc_flags |= INTR_ISRCF_BOUND; 584 } 585 586 /* 587 * In NOCPU case, it's up to PIC to either leave ISRC on same CPU or 588 * re-balance it to another CPU or enable it on more CPUs. However, 589 * PIC is expected to change isrc_cpu appropriately to keep us well 590 * informed if the call is successful. 591 */ 592 if (irq_assign_cpu) { 593 error = PIC_BIND_INTR(isrc->isrc_dev, isrc); 594 if (error) { 595 CPU_ZERO(&isrc->isrc_cpu); 596 mtx_unlock(&isrc_table_lock); 597 return (error); 598 } 599 } 600 mtx_unlock(&isrc_table_lock); 601 return (0); 602 #else 603 return (EOPNOTSUPP); 604 #endif 605 } 606 607 /* 608 * Create interrupt event for interrupt source. 609 */ 610 static int 611 isrc_event_create(struct intr_irqsrc *isrc) 612 { 613 struct intr_event *ie; 614 int error; 615 616 error = intr_event_create(&ie, isrc, 0, isrc->isrc_irq, 617 intr_isrc_pre_ithread, intr_isrc_post_ithread, intr_isrc_post_filter, 618 intr_isrc_assign_cpu, "%s:", isrc->isrc_name); 619 if (error) 620 return (error); 621 622 mtx_lock(&isrc_table_lock); 623 /* 624 * Make sure that we do not mix the two ways 625 * how we handle interrupt sources. Let contested event wins. 626 */ 627 #ifdef INTR_SOLO 628 if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) { 629 #else 630 if (isrc->isrc_event != NULL) { 631 #endif 632 mtx_unlock(&isrc_table_lock); 633 intr_event_destroy(ie); 634 return (isrc->isrc_event != NULL ? EBUSY : 0); 635 } 636 isrc->isrc_event = ie; 637 mtx_unlock(&isrc_table_lock); 638 639 return (0); 640 } 641 #ifdef notyet 642 /* 643 * Destroy interrupt event for interrupt source. 644 */ 645 static void 646 isrc_event_destroy(struct intr_irqsrc *isrc) 647 { 648 struct intr_event *ie; 649 650 mtx_lock(&isrc_table_lock); 651 ie = isrc->isrc_event; 652 isrc->isrc_event = NULL; 653 mtx_unlock(&isrc_table_lock); 654 655 if (ie != NULL) 656 intr_event_destroy(ie); 657 } 658 #endif 659 /* 660 * Add handler to interrupt source. 661 */ 662 static int 663 isrc_add_handler(struct intr_irqsrc *isrc, const char *name, 664 driver_filter_t filter, driver_intr_t handler, void *arg, 665 enum intr_type flags, void **cookiep) 666 { 667 int error; 668 669 if (isrc->isrc_event == NULL) { 670 error = isrc_event_create(isrc); 671 if (error) 672 return (error); 673 } 674 675 error = intr_event_add_handler(isrc->isrc_event, name, filter, handler, 676 arg, intr_priority(flags), flags, cookiep); 677 if (error == 0) { 678 mtx_lock(&isrc_table_lock); 679 intrcnt_updatename(isrc); 680 mtx_unlock(&isrc_table_lock); 681 } 682 683 return (error); 684 } 685 686 /* 687 * Lookup interrupt controller locked. 688 */ 689 static inline struct intr_pic * 690 pic_lookup_locked(device_t dev, intptr_t xref) 691 { 692 struct intr_pic *pic; 693 694 mtx_assert(&pic_list_lock, MA_OWNED); 695 696 if (dev == NULL && xref == 0) 697 return (NULL); 698 699 /* Note that pic->pic_dev is never NULL on registered PIC. */ 700 SLIST_FOREACH(pic, &pic_list, pic_next) { 701 if (dev == NULL) { 702 if (xref == pic->pic_xref) 703 return (pic); 704 } else if (xref == 0 || pic->pic_xref == 0) { 705 if (dev == pic->pic_dev) 706 return (pic); 707 } else if (xref == pic->pic_xref && dev == pic->pic_dev) 708 return (pic); 709 } 710 return (NULL); 711 } 712 713 /* 714 * Lookup interrupt controller. 715 */ 716 static struct intr_pic * 717 pic_lookup(device_t dev, intptr_t xref) 718 { 719 struct intr_pic *pic; 720 721 mtx_lock(&pic_list_lock); 722 pic = pic_lookup_locked(dev, xref); 723 mtx_unlock(&pic_list_lock); 724 return (pic); 725 } 726 727 /* 728 * Create interrupt controller. 729 */ 730 static struct intr_pic * 731 pic_create(device_t dev, intptr_t xref) 732 { 733 struct intr_pic *pic; 734 735 mtx_lock(&pic_list_lock); 736 pic = pic_lookup_locked(dev, xref); 737 if (pic != NULL) { 738 mtx_unlock(&pic_list_lock); 739 return (pic); 740 } 741 pic = malloc(sizeof(*pic), M_INTRNG, M_NOWAIT | M_ZERO); 742 if (pic == NULL) { 743 mtx_unlock(&pic_list_lock); 744 return (NULL); 745 } 746 pic->pic_xref = xref; 747 pic->pic_dev = dev; 748 mtx_init(&pic->pic_child_lock, "pic child lock", NULL, MTX_SPIN); 749 SLIST_INSERT_HEAD(&pic_list, pic, pic_next); 750 mtx_unlock(&pic_list_lock); 751 752 return (pic); 753 } 754 #ifdef notyet 755 /* 756 * Destroy interrupt controller. 757 */ 758 static void 759 pic_destroy(device_t dev, intptr_t xref) 760 { 761 struct intr_pic *pic; 762 763 mtx_lock(&pic_list_lock); 764 pic = pic_lookup_locked(dev, xref); 765 if (pic == NULL) { 766 mtx_unlock(&pic_list_lock); 767 return; 768 } 769 SLIST_REMOVE(&pic_list, pic, intr_pic, pic_next); 770 mtx_unlock(&pic_list_lock); 771 772 free(pic, M_INTRNG); 773 } 774 #endif 775 /* 776 * Register interrupt controller. 777 */ 778 struct intr_pic * 779 intr_pic_register(device_t dev, intptr_t xref) 780 { 781 struct intr_pic *pic; 782 783 if (dev == NULL) 784 return (NULL); 785 pic = pic_create(dev, xref); 786 if (pic == NULL) 787 return (NULL); 788 789 pic->pic_flags |= FLAG_PIC; 790 791 debugf("PIC %p registered for %s <dev %p, xref %x>\n", pic, 792 device_get_nameunit(dev), dev, xref); 793 return (pic); 794 } 795 796 /* 797 * Unregister interrupt controller. 798 */ 799 int 800 intr_pic_deregister(device_t dev, intptr_t xref) 801 { 802 803 panic("%s: not implemented", __func__); 804 } 805 806 /* 807 * Mark interrupt controller (itself) as a root one. 808 * 809 * Note that only an interrupt controller can really know its position 810 * in interrupt controller's tree. So root PIC must claim itself as a root. 811 * 812 * In FDT case, according to ePAPR approved version 1.1 from 08 April 2011, 813 * page 30: 814 * "The root of the interrupt tree is determined when traversal 815 * of the interrupt tree reaches an interrupt controller node without 816 * an interrupts property and thus no explicit interrupt parent." 817 */ 818 int 819 intr_pic_claim_root(device_t dev, intptr_t xref, intr_irq_filter_t *filter, 820 void *arg, u_int ipicount) 821 { 822 struct intr_pic *pic; 823 824 pic = pic_lookup(dev, xref); 825 if (pic == NULL) { 826 device_printf(dev, "not registered\n"); 827 return (EINVAL); 828 } 829 830 KASSERT((pic->pic_flags & FLAG_PIC) != 0, 831 ("%s: Found a non-PIC controller: %s", __func__, 832 device_get_name(pic->pic_dev))); 833 834 if (filter == NULL) { 835 device_printf(dev, "filter missing\n"); 836 return (EINVAL); 837 } 838 839 /* 840 * Only one interrupt controllers could be on the root for now. 841 * Note that we further suppose that there is not threaded interrupt 842 * routine (handler) on the root. See intr_irq_handler(). 843 */ 844 if (intr_irq_root_dev != NULL) { 845 device_printf(dev, "another root already set\n"); 846 return (EBUSY); 847 } 848 849 intr_irq_root_dev = dev; 850 irq_root_filter = filter; 851 irq_root_arg = arg; 852 irq_root_ipicount = ipicount; 853 854 debugf("irq root set to %s\n", device_get_nameunit(dev)); 855 return (0); 856 } 857 858 /* 859 * Add a handler to manage a sub range of a parents interrupts. 860 */ 861 struct intr_pic * 862 intr_pic_add_handler(device_t parent, struct intr_pic *pic, 863 intr_child_irq_filter_t *filter, void *arg, uintptr_t start, 864 uintptr_t length) 865 { 866 struct intr_pic *parent_pic; 867 struct intr_pic_child *newchild; 868 #ifdef INVARIANTS 869 struct intr_pic_child *child; 870 #endif 871 872 parent_pic = pic_lookup(parent, 0); 873 if (parent_pic == NULL) 874 return (NULL); 875 876 newchild = malloc(sizeof(*newchild), M_INTRNG, M_WAITOK | M_ZERO); 877 newchild->pc_pic = pic; 878 newchild->pc_filter = filter; 879 newchild->pc_filter_arg = arg; 880 newchild->pc_start = start; 881 newchild->pc_length = length; 882 883 mtx_lock_spin(&parent_pic->pic_child_lock); 884 #ifdef INVARIANTS 885 SLIST_FOREACH(child, &parent_pic->pic_children, pc_next) { 886 KASSERT(child->pc_pic != pic, ("%s: Adding a child PIC twice", 887 __func__)); 888 } 889 #endif 890 SLIST_INSERT_HEAD(&parent_pic->pic_children, newchild, pc_next); 891 mtx_unlock_spin(&parent_pic->pic_child_lock); 892 893 return (pic); 894 } 895 896 static int 897 intr_resolve_irq(device_t dev, intptr_t xref, struct intr_map_data *data, 898 struct intr_irqsrc **isrc) 899 { 900 struct intr_pic *pic; 901 struct intr_map_data_msi *msi; 902 903 if (data == NULL) 904 return (EINVAL); 905 906 pic = pic_lookup(dev, xref); 907 if (pic == NULL) 908 return (ESRCH); 909 910 switch (data->type) { 911 case INTR_MAP_DATA_MSI: 912 KASSERT((pic->pic_flags & FLAG_MSI) != 0, 913 ("%s: Found a non-MSI controller: %s", __func__, 914 device_get_name(pic->pic_dev))); 915 msi = (struct intr_map_data_msi *)data; 916 *isrc = msi->isrc; 917 return (0); 918 919 default: 920 KASSERT((pic->pic_flags & FLAG_PIC) != 0, 921 ("%s: Found a non-PIC controller: %s", __func__, 922 device_get_name(pic->pic_dev))); 923 return (PIC_MAP_INTR(pic->pic_dev, data, isrc)); 924 925 } 926 } 927 928 int 929 intr_activate_irq(device_t dev, struct resource *res) 930 { 931 device_t map_dev; 932 intptr_t map_xref; 933 struct intr_map_data *data; 934 struct intr_irqsrc *isrc; 935 u_int res_id; 936 int error; 937 938 KASSERT(rman_get_start(res) == rman_get_end(res), 939 ("%s: more interrupts in resource", __func__)); 940 941 res_id = (u_int)rman_get_start(res); 942 if (intr_map_get_isrc(res_id) != NULL) 943 panic("Attempt to double activation of resource id: %u\n", 944 res_id); 945 intr_map_copy_map_data(res_id, &map_dev, &map_xref, &data); 946 error = intr_resolve_irq(map_dev, map_xref, data, &isrc); 947 if (error != 0) { 948 free(data, M_INTRNG); 949 /* XXX TODO DISCONECTED PICs */ 950 /* if (error == EINVAL) return(0); */ 951 return (error); 952 } 953 intr_map_set_isrc(res_id, isrc); 954 rman_set_virtual(res, data); 955 return (PIC_ACTIVATE_INTR(isrc->isrc_dev, isrc, res, data)); 956 } 957 958 int 959 intr_deactivate_irq(device_t dev, struct resource *res) 960 { 961 struct intr_map_data *data; 962 struct intr_irqsrc *isrc; 963 u_int res_id; 964 int error; 965 966 KASSERT(rman_get_start(res) == rman_get_end(res), 967 ("%s: more interrupts in resource", __func__)); 968 969 res_id = (u_int)rman_get_start(res); 970 isrc = intr_map_get_isrc(res_id); 971 if (isrc == NULL) 972 panic("Attempt to deactivate non-active resource id: %u\n", 973 res_id); 974 975 data = rman_get_virtual(res); 976 error = PIC_DEACTIVATE_INTR(isrc->isrc_dev, isrc, res, data); 977 intr_map_set_isrc(res_id, NULL); 978 rman_set_virtual(res, NULL); 979 free(data, M_INTRNG); 980 return (error); 981 } 982 983 int 984 intr_setup_irq(device_t dev, struct resource *res, driver_filter_t filt, 985 driver_intr_t hand, void *arg, int flags, void **cookiep) 986 { 987 int error; 988 struct intr_map_data *data; 989 struct intr_irqsrc *isrc; 990 const char *name; 991 u_int res_id; 992 993 KASSERT(rman_get_start(res) == rman_get_end(res), 994 ("%s: more interrupts in resource", __func__)); 995 996 res_id = (u_int)rman_get_start(res); 997 isrc = intr_map_get_isrc(res_id); 998 if (isrc == NULL) { 999 /* XXX TODO DISCONECTED PICs */ 1000 return (EINVAL); 1001 } 1002 1003 data = rman_get_virtual(res); 1004 name = device_get_nameunit(dev); 1005 1006 #ifdef INTR_SOLO 1007 /* 1008 * Standard handling is done through MI interrupt framework. However, 1009 * some interrupts could request solely own special handling. This 1010 * non standard handling can be used for interrupt controllers without 1011 * handler (filter only), so in case that interrupt controllers are 1012 * chained, MI interrupt framework is called only in leaf controller. 1013 * 1014 * Note that root interrupt controller routine is served as well, 1015 * however in intr_irq_handler(), i.e. main system dispatch routine. 1016 */ 1017 if (flags & INTR_SOLO && hand != NULL) { 1018 debugf("irq %u cannot solo on %s\n", irq, name); 1019 return (EINVAL); 1020 } 1021 1022 if (flags & INTR_SOLO) { 1023 error = iscr_setup_filter(isrc, name, (intr_irq_filter_t *)filt, 1024 arg, cookiep); 1025 debugf("irq %u setup filter error %d on %s\n", irq, error, 1026 name); 1027 } else 1028 #endif 1029 { 1030 error = isrc_add_handler(isrc, name, filt, hand, arg, flags, 1031 cookiep); 1032 debugf("irq %u add handler error %d on %s\n", irq, error, name); 1033 } 1034 if (error != 0) 1035 return (error); 1036 1037 mtx_lock(&isrc_table_lock); 1038 error = PIC_SETUP_INTR(isrc->isrc_dev, isrc, res, data); 1039 if (error == 0) { 1040 isrc->isrc_handlers++; 1041 if (isrc->isrc_handlers == 1) 1042 PIC_ENABLE_INTR(isrc->isrc_dev, isrc); 1043 } 1044 mtx_unlock(&isrc_table_lock); 1045 if (error != 0) 1046 intr_event_remove_handler(*cookiep); 1047 return (error); 1048 } 1049 1050 int 1051 intr_teardown_irq(device_t dev, struct resource *res, void *cookie) 1052 { 1053 int error; 1054 struct intr_map_data *data; 1055 struct intr_irqsrc *isrc; 1056 u_int res_id; 1057 1058 KASSERT(rman_get_start(res) == rman_get_end(res), 1059 ("%s: more interrupts in resource", __func__)); 1060 1061 res_id = (u_int)rman_get_start(res); 1062 isrc = intr_map_get_isrc(res_id); 1063 if (isrc == NULL || isrc->isrc_handlers == 0) 1064 return (EINVAL); 1065 1066 data = rman_get_virtual(res); 1067 1068 #ifdef INTR_SOLO 1069 if (isrc->isrc_filter != NULL) { 1070 if (isrc != cookie) 1071 return (EINVAL); 1072 1073 mtx_lock(&isrc_table_lock); 1074 isrc->isrc_filter = NULL; 1075 isrc->isrc_arg = NULL; 1076 isrc->isrc_handlers = 0; 1077 PIC_DISABLE_INTR(isrc->isrc_dev, isrc); 1078 PIC_TEARDOWN_INTR(isrc->isrc_dev, isrc, res, data); 1079 isrc_update_name(isrc, NULL); 1080 mtx_unlock(&isrc_table_lock); 1081 return (0); 1082 } 1083 #endif 1084 if (isrc != intr_handler_source(cookie)) 1085 return (EINVAL); 1086 1087 error = intr_event_remove_handler(cookie); 1088 if (error == 0) { 1089 mtx_lock(&isrc_table_lock); 1090 isrc->isrc_handlers--; 1091 if (isrc->isrc_handlers == 0) 1092 PIC_DISABLE_INTR(isrc->isrc_dev, isrc); 1093 PIC_TEARDOWN_INTR(isrc->isrc_dev, isrc, res, data); 1094 intrcnt_updatename(isrc); 1095 mtx_unlock(&isrc_table_lock); 1096 } 1097 return (error); 1098 } 1099 1100 int 1101 intr_describe_irq(device_t dev, struct resource *res, void *cookie, 1102 const char *descr) 1103 { 1104 int error; 1105 struct intr_irqsrc *isrc; 1106 u_int res_id; 1107 1108 KASSERT(rman_get_start(res) == rman_get_end(res), 1109 ("%s: more interrupts in resource", __func__)); 1110 1111 res_id = (u_int)rman_get_start(res); 1112 isrc = intr_map_get_isrc(res_id); 1113 if (isrc == NULL || isrc->isrc_handlers == 0) 1114 return (EINVAL); 1115 #ifdef INTR_SOLO 1116 if (isrc->isrc_filter != NULL) { 1117 if (isrc != cookie) 1118 return (EINVAL); 1119 1120 mtx_lock(&isrc_table_lock); 1121 isrc_update_name(isrc, descr); 1122 mtx_unlock(&isrc_table_lock); 1123 return (0); 1124 } 1125 #endif 1126 error = intr_event_describe_handler(isrc->isrc_event, cookie, descr); 1127 if (error == 0) { 1128 mtx_lock(&isrc_table_lock); 1129 intrcnt_updatename(isrc); 1130 mtx_unlock(&isrc_table_lock); 1131 } 1132 return (error); 1133 } 1134 1135 #ifdef SMP 1136 int 1137 intr_bind_irq(device_t dev, struct resource *res, int cpu) 1138 { 1139 struct intr_irqsrc *isrc; 1140 u_int res_id; 1141 1142 KASSERT(rman_get_start(res) == rman_get_end(res), 1143 ("%s: more interrupts in resource", __func__)); 1144 1145 res_id = (u_int)rman_get_start(res); 1146 isrc = intr_map_get_isrc(res_id); 1147 if (isrc == NULL || isrc->isrc_handlers == 0) 1148 return (EINVAL); 1149 #ifdef INTR_SOLO 1150 if (isrc->isrc_filter != NULL) 1151 return (intr_isrc_assign_cpu(isrc, cpu)); 1152 #endif 1153 return (intr_event_bind(isrc->isrc_event, cpu)); 1154 } 1155 1156 /* 1157 * Return the CPU that the next interrupt source should use. 1158 * For now just returns the next CPU according to round-robin. 1159 */ 1160 u_int 1161 intr_irq_next_cpu(u_int last_cpu, cpuset_t *cpumask) 1162 { 1163 1164 if (!irq_assign_cpu || mp_ncpus == 1) 1165 return (PCPU_GET(cpuid)); 1166 1167 do { 1168 last_cpu++; 1169 if (last_cpu > mp_maxid) 1170 last_cpu = 0; 1171 } while (!CPU_ISSET(last_cpu, cpumask)); 1172 return (last_cpu); 1173 } 1174 1175 /* 1176 * Distribute all the interrupt sources among the available 1177 * CPUs once the AP's have been launched. 1178 */ 1179 static void 1180 intr_irq_shuffle(void *arg __unused) 1181 { 1182 struct intr_irqsrc *isrc; 1183 u_int i; 1184 1185 if (mp_ncpus == 1) 1186 return; 1187 1188 mtx_lock(&isrc_table_lock); 1189 irq_assign_cpu = TRUE; 1190 for (i = 0; i < NIRQ; i++) { 1191 isrc = irq_sources[i]; 1192 if (isrc == NULL || isrc->isrc_handlers == 0 || 1193 isrc->isrc_flags & (INTR_ISRCF_PPI | INTR_ISRCF_IPI)) 1194 continue; 1195 1196 if (isrc->isrc_event != NULL && 1197 isrc->isrc_flags & INTR_ISRCF_BOUND && 1198 isrc->isrc_event->ie_cpu != CPU_FFS(&isrc->isrc_cpu) - 1) 1199 panic("%s: CPU inconsistency", __func__); 1200 1201 if ((isrc->isrc_flags & INTR_ISRCF_BOUND) == 0) 1202 CPU_ZERO(&isrc->isrc_cpu); /* start again */ 1203 1204 /* 1205 * We are in wicked position here if the following call fails 1206 * for bound ISRC. The best thing we can do is to clear 1207 * isrc_cpu so inconsistency with ie_cpu will be detectable. 1208 */ 1209 if (PIC_BIND_INTR(isrc->isrc_dev, isrc) != 0) 1210 CPU_ZERO(&isrc->isrc_cpu); 1211 } 1212 mtx_unlock(&isrc_table_lock); 1213 } 1214 SYSINIT(intr_irq_shuffle, SI_SUB_SMP, SI_ORDER_SECOND, intr_irq_shuffle, NULL); 1215 1216 #else 1217 u_int 1218 intr_irq_next_cpu(u_int current_cpu, cpuset_t *cpumask) 1219 { 1220 1221 return (PCPU_GET(cpuid)); 1222 } 1223 #endif 1224 1225 /* 1226 * Allocate memory for new intr_map_data structure. 1227 * Initialize common fields. 1228 */ 1229 struct intr_map_data * 1230 intr_alloc_map_data(enum intr_map_data_type type, size_t len, int flags) 1231 { 1232 struct intr_map_data *data; 1233 1234 data = malloc(len, M_INTRNG, flags); 1235 data->type = type; 1236 data->len = len; 1237 return (data); 1238 } 1239 1240 void intr_free_intr_map_data(struct intr_map_data *data) 1241 { 1242 1243 free(data, M_INTRNG); 1244 } 1245 1246 1247 /* 1248 * Register a MSI/MSI-X interrupt controller 1249 */ 1250 int 1251 intr_msi_register(device_t dev, intptr_t xref) 1252 { 1253 struct intr_pic *pic; 1254 1255 if (dev == NULL) 1256 return (EINVAL); 1257 pic = pic_create(dev, xref); 1258 if (pic == NULL) 1259 return (ENOMEM); 1260 1261 pic->pic_flags |= FLAG_MSI; 1262 1263 debugf("PIC %p registered for %s <dev %p, xref %jx>\n", pic, 1264 device_get_nameunit(dev), dev, (uintmax_t)xref); 1265 return (0); 1266 } 1267 1268 int 1269 intr_alloc_msi(device_t pci, device_t child, intptr_t xref, int count, 1270 int maxcount, int *irqs) 1271 { 1272 struct intr_irqsrc **isrc; 1273 struct intr_pic *pic; 1274 device_t pdev; 1275 struct intr_map_data_msi *msi; 1276 int err, i; 1277 1278 pic = pic_lookup(NULL, xref); 1279 if (pic == NULL) 1280 return (ESRCH); 1281 1282 KASSERT((pic->pic_flags & FLAG_MSI) != 0, 1283 ("%s: Found a non-MSI controller: %s", __func__, 1284 device_get_name(pic->pic_dev))); 1285 1286 isrc = malloc(sizeof(*isrc) * count, M_INTRNG, M_WAITOK); 1287 err = MSI_ALLOC_MSI(pic->pic_dev, child, count, maxcount, &pdev, isrc); 1288 if (err != 0) { 1289 free(isrc, M_INTRNG); 1290 return (err); 1291 } 1292 1293 for (i = 0; i < count; i++) { 1294 msi = (struct intr_map_data_msi *)intr_alloc_map_data( 1295 INTR_MAP_DATA_MSI, sizeof(*msi), M_WAITOK | M_ZERO); 1296 msi-> isrc = isrc[i]; 1297 irqs[i] = intr_map_irq(pic->pic_dev, xref, 1298 (struct intr_map_data *)msi); 1299 1300 } 1301 free(isrc, M_INTRNG); 1302 1303 return (err); 1304 } 1305 1306 int 1307 intr_release_msi(device_t pci, device_t child, intptr_t xref, int count, 1308 int *irqs) 1309 { 1310 struct intr_irqsrc **isrc; 1311 struct intr_pic *pic; 1312 int i, err; 1313 1314 pic = pic_lookup(NULL, xref); 1315 if (pic == NULL) 1316 return (ESRCH); 1317 1318 KASSERT((pic->pic_flags & FLAG_MSI) != 0, 1319 ("%s: Found a non-MSI controller: %s", __func__, 1320 device_get_name(pic->pic_dev))); 1321 1322 isrc = malloc(sizeof(*isrc) * count, M_INTRNG, M_WAITOK); 1323 1324 for (i = 0; i < count; i++) 1325 isrc[i] = intr_map_get_isrc(irqs[i]); 1326 1327 err = MSI_RELEASE_MSI(pic->pic_dev, child, count, isrc); 1328 1329 for (i = 0; i < count; i++) { 1330 if (isrc[i] != NULL) 1331 intr_unmap_irq(irqs[i]); 1332 } 1333 1334 free(isrc, M_INTRNG); 1335 return (err); 1336 } 1337 1338 int 1339 intr_alloc_msix(device_t pci, device_t child, intptr_t xref, int *irq) 1340 { 1341 struct intr_irqsrc *isrc; 1342 struct intr_pic *pic; 1343 device_t pdev; 1344 struct intr_map_data_msi *msi; 1345 int err; 1346 1347 pic = pic_lookup(NULL, xref); 1348 if (pic == NULL) 1349 return (ESRCH); 1350 1351 KASSERT((pic->pic_flags & FLAG_MSI) != 0, 1352 ("%s: Found a non-MSI controller: %s", __func__, 1353 device_get_name(pic->pic_dev))); 1354 1355 1356 err = MSI_ALLOC_MSIX(pic->pic_dev, child, &pdev, &isrc); 1357 if (err != 0) 1358 return (err); 1359 1360 msi = (struct intr_map_data_msi *)intr_alloc_map_data( 1361 INTR_MAP_DATA_MSI, sizeof(*msi), M_WAITOK | M_ZERO); 1362 msi->isrc = isrc; 1363 *irq = intr_map_irq(pic->pic_dev, xref, (struct intr_map_data *)msi); 1364 return (0); 1365 } 1366 1367 int 1368 intr_release_msix(device_t pci, device_t child, intptr_t xref, int irq) 1369 { 1370 struct intr_irqsrc *isrc; 1371 struct intr_pic *pic; 1372 int err; 1373 1374 pic = pic_lookup(NULL, xref); 1375 if (pic == NULL) 1376 return (ESRCH); 1377 1378 KASSERT((pic->pic_flags & FLAG_MSI) != 0, 1379 ("%s: Found a non-MSI controller: %s", __func__, 1380 device_get_name(pic->pic_dev))); 1381 1382 isrc = intr_map_get_isrc(irq); 1383 if (isrc == NULL) { 1384 intr_unmap_irq(irq); 1385 return (EINVAL); 1386 } 1387 1388 err = MSI_RELEASE_MSIX(pic->pic_dev, child, isrc); 1389 intr_unmap_irq(irq); 1390 1391 return (err); 1392 } 1393 1394 int 1395 intr_map_msi(device_t pci, device_t child, intptr_t xref, int irq, 1396 uint64_t *addr, uint32_t *data) 1397 { 1398 struct intr_irqsrc *isrc; 1399 struct intr_pic *pic; 1400 int err; 1401 1402 pic = pic_lookup(NULL, xref); 1403 if (pic == NULL) 1404 return (ESRCH); 1405 1406 KASSERT((pic->pic_flags & FLAG_MSI) != 0, 1407 ("%s: Found a non-MSI controller: %s", __func__, 1408 device_get_name(pic->pic_dev))); 1409 1410 isrc = intr_map_get_isrc(irq); 1411 if (isrc == NULL) 1412 return (EINVAL); 1413 1414 err = MSI_MAP_MSI(pic->pic_dev, child, isrc, addr, data); 1415 return (err); 1416 } 1417 1418 1419 void dosoftints(void); 1420 void 1421 dosoftints(void) 1422 { 1423 } 1424 1425 #ifdef SMP 1426 /* 1427 * Init interrupt controller on another CPU. 1428 */ 1429 void 1430 intr_pic_init_secondary(void) 1431 { 1432 1433 /* 1434 * QQQ: Only root PIC is aware of other CPUs ??? 1435 */ 1436 KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__)); 1437 1438 //mtx_lock(&isrc_table_lock); 1439 PIC_INIT_SECONDARY(intr_irq_root_dev); 1440 //mtx_unlock(&isrc_table_lock); 1441 } 1442 #endif 1443 1444 #ifdef DDB 1445 DB_SHOW_COMMAND(irqs, db_show_irqs) 1446 { 1447 u_int i, irqsum; 1448 u_long num; 1449 struct intr_irqsrc *isrc; 1450 1451 for (irqsum = 0, i = 0; i < NIRQ; i++) { 1452 isrc = irq_sources[i]; 1453 if (isrc == NULL) 1454 continue; 1455 1456 num = isrc->isrc_count != NULL ? isrc->isrc_count[0] : 0; 1457 db_printf("irq%-3u <%s>: cpu %02lx%s cnt %lu\n", i, 1458 isrc->isrc_name, isrc->isrc_cpu.__bits[0], 1459 isrc->isrc_flags & INTR_ISRCF_BOUND ? " (bound)" : "", num); 1460 irqsum += num; 1461 } 1462 db_printf("irq total %u\n", irqsum); 1463 } 1464 #endif 1465 1466 /* 1467 * Interrupt mapping table functions. 1468 * 1469 * Please, keep this part separately, it can be transformed to 1470 * extension of standard resources. 1471 */ 1472 struct intr_map_entry 1473 { 1474 device_t dev; 1475 intptr_t xref; 1476 struct intr_map_data *map_data; 1477 struct intr_irqsrc *isrc; 1478 /* XXX TODO DISCONECTED PICs */ 1479 /*int flags */ 1480 }; 1481 1482 /* XXX Convert irq_map[] to dynamicaly expandable one. */ 1483 static struct intr_map_entry *irq_map[2 * NIRQ]; 1484 static int irq_map_count = nitems(irq_map); 1485 static int irq_map_first_free_idx; 1486 static struct mtx irq_map_lock; 1487 1488 static struct intr_irqsrc * 1489 intr_map_get_isrc(u_int res_id) 1490 { 1491 struct intr_irqsrc *isrc; 1492 1493 mtx_lock(&irq_map_lock); 1494 if ((res_id >= irq_map_count) || (irq_map[res_id] == NULL)) { 1495 mtx_unlock(&irq_map_lock); 1496 return (NULL); 1497 } 1498 isrc = irq_map[res_id]->isrc; 1499 mtx_unlock(&irq_map_lock); 1500 return (isrc); 1501 } 1502 1503 static void 1504 intr_map_set_isrc(u_int res_id, struct intr_irqsrc *isrc) 1505 { 1506 1507 mtx_lock(&irq_map_lock); 1508 if ((res_id >= irq_map_count) || (irq_map[res_id] == NULL)) { 1509 mtx_unlock(&irq_map_lock); 1510 return; 1511 } 1512 irq_map[res_id]->isrc = isrc; 1513 mtx_unlock(&irq_map_lock); 1514 } 1515 1516 /* 1517 * Get a copy of intr_map_entry data 1518 */ 1519 static void 1520 intr_map_copy_map_data(u_int res_id, device_t *map_dev, intptr_t *map_xref, 1521 struct intr_map_data **data) 1522 { 1523 size_t len; 1524 1525 len = 0; 1526 mtx_lock(&irq_map_lock); 1527 if (res_id >= irq_map_count || irq_map[res_id] == NULL) 1528 panic("Attempt to copy invalid resource id: %u\n", res_id); 1529 if (irq_map[res_id]->map_data != NULL) 1530 len = irq_map[res_id]->map_data->len; 1531 mtx_unlock(&irq_map_lock); 1532 1533 if (len == 0) 1534 *data = NULL; 1535 else 1536 *data = malloc(len, M_INTRNG, M_WAITOK | M_ZERO); 1537 mtx_lock(&irq_map_lock); 1538 if (irq_map[res_id] == NULL) 1539 panic("Attempt to copy invalid resource id: %u\n", res_id); 1540 if (len != 0) { 1541 if (len != irq_map[res_id]->map_data->len) 1542 panic("Resource id: %u has changed.\n", res_id); 1543 memcpy(*data, irq_map[res_id]->map_data, len); 1544 } 1545 *map_dev = irq_map[res_id]->dev; 1546 *map_xref = irq_map[res_id]->xref; 1547 mtx_unlock(&irq_map_lock); 1548 } 1549 1550 1551 /* 1552 * Allocate and fill new entry in irq_map table. 1553 */ 1554 u_int 1555 intr_map_irq(device_t dev, intptr_t xref, struct intr_map_data *data) 1556 { 1557 u_int i; 1558 struct intr_map_entry *entry; 1559 1560 /* Prepare new entry first. */ 1561 entry = malloc(sizeof(*entry), M_INTRNG, M_WAITOK | M_ZERO); 1562 1563 entry->dev = dev; 1564 entry->xref = xref; 1565 entry->map_data = data; 1566 entry->isrc = NULL; 1567 1568 mtx_lock(&irq_map_lock); 1569 for (i = irq_map_first_free_idx; i < irq_map_count; i++) { 1570 if (irq_map[i] == NULL) { 1571 irq_map[i] = entry; 1572 irq_map_first_free_idx = i + 1; 1573 mtx_unlock(&irq_map_lock); 1574 return (i); 1575 } 1576 } 1577 mtx_unlock(&irq_map_lock); 1578 1579 /* XXX Expand irq_map table */ 1580 panic("IRQ mapping table is full."); 1581 } 1582 1583 /* 1584 * Remove and free mapping entry. 1585 */ 1586 void 1587 intr_unmap_irq(u_int res_id) 1588 { 1589 struct intr_map_entry *entry; 1590 1591 mtx_lock(&irq_map_lock); 1592 if ((res_id >= irq_map_count) || (irq_map[res_id] == NULL)) 1593 panic("Attempt to unmap invalid resource id: %u\n", res_id); 1594 entry = irq_map[res_id]; 1595 irq_map[res_id] = NULL; 1596 irq_map_first_free_idx = res_id; 1597 mtx_unlock(&irq_map_lock); 1598 intr_free_intr_map_data(entry->map_data); 1599 free(entry, M_INTRNG); 1600 } 1601 1602 /* 1603 * Clone mapping entry. 1604 */ 1605 u_int 1606 intr_map_clone_irq(u_int old_res_id) 1607 { 1608 device_t map_dev; 1609 intptr_t map_xref; 1610 struct intr_map_data *data; 1611 1612 intr_map_copy_map_data(old_res_id, &map_dev, &map_xref, &data); 1613 return (intr_map_irq(map_dev, map_xref, data)); 1614 } 1615 1616 static void 1617 intr_map_init(void *dummy __unused) 1618 { 1619 1620 mtx_init(&irq_map_lock, "intr map table", NULL, MTX_DEF); 1621 } 1622 SYSINIT(intr_map_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_map_init, NULL); 1623