1 /*- 2 * Copyright (c) 2015-2016 Svatopluk Kraus 3 * Copyright (c) 2015-2016 Michal Meloun 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 /* 32 * New-style Interrupt Framework 33 * 34 * TODO: - add support for disconnected PICs. 35 * - to support IPI (PPI) enabling on other CPUs if already started. 36 * - to complete things for removable PICs. 37 */ 38 39 #include "opt_ddb.h" 40 #include "opt_hwpmc_hooks.h" 41 #include "opt_iommu.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/kernel.h> 46 #include <sys/lock.h> 47 #include <sys/mutex.h> 48 #include <sys/syslog.h> 49 #include <sys/malloc.h> 50 #include <sys/proc.h> 51 #include <sys/queue.h> 52 #include <sys/bus.h> 53 #include <sys/interrupt.h> 54 #include <sys/taskqueue.h> 55 #include <sys/tree.h> 56 #include <sys/conf.h> 57 #include <sys/cpuset.h> 58 #include <sys/rman.h> 59 #include <sys/sched.h> 60 #include <sys/smp.h> 61 #include <sys/vmmeter.h> 62 #ifdef HWPMC_HOOKS 63 #include <sys/pmckern.h> 64 #endif 65 66 #include <machine/atomic.h> 67 #include <machine/intr.h> 68 #include <machine/cpu.h> 69 #include <machine/smp.h> 70 #include <machine/stdarg.h> 71 72 #ifdef DDB 73 #include <ddb/ddb.h> 74 #endif 75 76 #ifdef IOMMU 77 #include <dev/iommu/iommu_msi.h> 78 #endif 79 80 #include "pic_if.h" 81 #include "msi_if.h" 82 83 #define INTRNAME_LEN (2*MAXCOMLEN + 1) 84 85 #ifdef DEBUG 86 #define debugf(fmt, args...) do { printf("%s(): ", __func__); \ 87 printf(fmt,##args); } while (0) 88 #else 89 #define debugf(fmt, args...) 90 #endif 91 92 MALLOC_DECLARE(M_INTRNG); 93 MALLOC_DEFINE(M_INTRNG, "intr", "intr interrupt handling"); 94 95 /* Main interrupt handler called from assembler -> 'hidden' for C code. */ 96 void intr_irq_handler(struct trapframe *tf); 97 98 /* Root interrupt controller stuff. */ 99 device_t intr_irq_root_dev; 100 static intr_irq_filter_t *irq_root_filter; 101 static void *irq_root_arg; 102 static u_int irq_root_ipicount; 103 104 struct intr_pic_child { 105 SLIST_ENTRY(intr_pic_child) pc_next; 106 struct intr_pic *pc_pic; 107 intr_child_irq_filter_t *pc_filter; 108 void *pc_filter_arg; 109 uintptr_t pc_start; 110 uintptr_t pc_length; 111 }; 112 113 /* Interrupt controller definition. */ 114 struct intr_pic { 115 SLIST_ENTRY(intr_pic) pic_next; 116 intptr_t pic_xref; /* hardware identification */ 117 device_t pic_dev; 118 /* Only one of FLAG_PIC or FLAG_MSI may be set */ 119 #define FLAG_PIC (1 << 0) 120 #define FLAG_MSI (1 << 1) 121 #define FLAG_TYPE_MASK (FLAG_PIC | FLAG_MSI) 122 u_int pic_flags; 123 struct mtx pic_child_lock; 124 SLIST_HEAD(, intr_pic_child) pic_children; 125 }; 126 127 static struct mtx pic_list_lock; 128 static SLIST_HEAD(, intr_pic) pic_list; 129 130 static struct intr_pic *pic_lookup(device_t dev, intptr_t xref, int flags); 131 132 /* Interrupt source definition. */ 133 static struct mtx isrc_table_lock; 134 static struct intr_irqsrc *irq_sources[NIRQ]; 135 u_int irq_next_free; 136 137 #ifdef SMP 138 #ifdef EARLY_AP_STARTUP 139 static bool irq_assign_cpu = true; 140 #else 141 static bool irq_assign_cpu = false; 142 #endif 143 #endif 144 145 /* 146 * - 2 counters for each I/O interrupt. 147 * - MAXCPU counters for each IPI counters for SMP. 148 */ 149 #ifdef SMP 150 #define INTRCNT_COUNT (NIRQ * 2 + INTR_IPI_COUNT * MAXCPU) 151 #else 152 #define INTRCNT_COUNT (NIRQ * 2) 153 #endif 154 155 /* Data for MI statistics reporting. */ 156 u_long intrcnt[INTRCNT_COUNT]; 157 char intrnames[INTRCNT_COUNT * INTRNAME_LEN]; 158 size_t sintrcnt = sizeof(intrcnt); 159 size_t sintrnames = sizeof(intrnames); 160 static u_int intrcnt_index; 161 162 static struct intr_irqsrc *intr_map_get_isrc(u_int res_id); 163 static void intr_map_set_isrc(u_int res_id, struct intr_irqsrc *isrc); 164 static struct intr_map_data * intr_map_get_map_data(u_int res_id); 165 static void intr_map_copy_map_data(u_int res_id, device_t *dev, intptr_t *xref, 166 struct intr_map_data **data); 167 168 /* 169 * Interrupt framework initialization routine. 170 */ 171 static void 172 intr_irq_init(void *dummy __unused) 173 { 174 175 SLIST_INIT(&pic_list); 176 mtx_init(&pic_list_lock, "intr pic list", NULL, MTX_DEF); 177 178 mtx_init(&isrc_table_lock, "intr isrc table", NULL, MTX_DEF); 179 } 180 SYSINIT(intr_irq_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_irq_init, NULL); 181 182 static void 183 intrcnt_setname(const char *name, int index) 184 { 185 186 snprintf(intrnames + INTRNAME_LEN * index, INTRNAME_LEN, "%-*s", 187 INTRNAME_LEN - 1, name); 188 } 189 190 /* 191 * Update name for interrupt source with interrupt event. 192 */ 193 static void 194 intrcnt_updatename(struct intr_irqsrc *isrc) 195 { 196 197 /* QQQ: What about stray counter name? */ 198 mtx_assert(&isrc_table_lock, MA_OWNED); 199 intrcnt_setname(isrc->isrc_event->ie_fullname, isrc->isrc_index); 200 } 201 202 /* 203 * Virtualization for interrupt source interrupt counter increment. 204 */ 205 static inline void 206 isrc_increment_count(struct intr_irqsrc *isrc) 207 { 208 209 if (isrc->isrc_flags & INTR_ISRCF_PPI) 210 atomic_add_long(&isrc->isrc_count[0], 1); 211 else 212 isrc->isrc_count[0]++; 213 } 214 215 /* 216 * Virtualization for interrupt source interrupt stray counter increment. 217 */ 218 static inline void 219 isrc_increment_straycount(struct intr_irqsrc *isrc) 220 { 221 222 isrc->isrc_count[1]++; 223 } 224 225 /* 226 * Virtualization for interrupt source interrupt name update. 227 */ 228 static void 229 isrc_update_name(struct intr_irqsrc *isrc, const char *name) 230 { 231 char str[INTRNAME_LEN]; 232 233 mtx_assert(&isrc_table_lock, MA_OWNED); 234 235 if (name != NULL) { 236 snprintf(str, INTRNAME_LEN, "%s: %s", isrc->isrc_name, name); 237 intrcnt_setname(str, isrc->isrc_index); 238 snprintf(str, INTRNAME_LEN, "stray %s: %s", isrc->isrc_name, 239 name); 240 intrcnt_setname(str, isrc->isrc_index + 1); 241 } else { 242 snprintf(str, INTRNAME_LEN, "%s:", isrc->isrc_name); 243 intrcnt_setname(str, isrc->isrc_index); 244 snprintf(str, INTRNAME_LEN, "stray %s:", isrc->isrc_name); 245 intrcnt_setname(str, isrc->isrc_index + 1); 246 } 247 } 248 249 /* 250 * Virtualization for interrupt source interrupt counters setup. 251 */ 252 static void 253 isrc_setup_counters(struct intr_irqsrc *isrc) 254 { 255 u_int index; 256 257 /* 258 * XXX - it does not work well with removable controllers and 259 * interrupt sources !!! 260 */ 261 index = atomic_fetchadd_int(&intrcnt_index, 2); 262 isrc->isrc_index = index; 263 isrc->isrc_count = &intrcnt[index]; 264 isrc_update_name(isrc, NULL); 265 } 266 267 /* 268 * Virtualization for interrupt source interrupt counters release. 269 */ 270 static void 271 isrc_release_counters(struct intr_irqsrc *isrc) 272 { 273 274 panic("%s: not implemented", __func__); 275 } 276 277 #ifdef SMP 278 /* 279 * Virtualization for interrupt source IPI counters setup. 280 */ 281 u_long * 282 intr_ipi_setup_counters(const char *name) 283 { 284 u_int index, i; 285 char str[INTRNAME_LEN]; 286 287 index = atomic_fetchadd_int(&intrcnt_index, MAXCPU); 288 for (i = 0; i < MAXCPU; i++) { 289 snprintf(str, INTRNAME_LEN, "cpu%d:%s", i, name); 290 intrcnt_setname(str, index + i); 291 } 292 return (&intrcnt[index]); 293 } 294 #endif 295 296 /* 297 * Main interrupt dispatch handler. It's called straight 298 * from the assembler, where CPU interrupt is served. 299 */ 300 void 301 intr_irq_handler(struct trapframe *tf) 302 { 303 struct trapframe * oldframe; 304 struct thread * td; 305 306 KASSERT(irq_root_filter != NULL, ("%s: no filter", __func__)); 307 308 VM_CNT_INC(v_intr); 309 critical_enter(); 310 td = curthread; 311 oldframe = td->td_intr_frame; 312 td->td_intr_frame = tf; 313 irq_root_filter(irq_root_arg); 314 td->td_intr_frame = oldframe; 315 critical_exit(); 316 #ifdef HWPMC_HOOKS 317 if (pmc_hook && TRAPF_USERMODE(tf) && 318 (PCPU_GET(curthread)->td_pflags & TDP_CALLCHAIN)) 319 pmc_hook(PCPU_GET(curthread), PMC_FN_USER_CALLCHAIN, tf); 320 #endif 321 } 322 323 int 324 intr_child_irq_handler(struct intr_pic *parent, uintptr_t irq) 325 { 326 struct intr_pic_child *child; 327 bool found; 328 329 found = false; 330 mtx_lock_spin(&parent->pic_child_lock); 331 SLIST_FOREACH(child, &parent->pic_children, pc_next) { 332 if (child->pc_start <= irq && 333 irq < (child->pc_start + child->pc_length)) { 334 found = true; 335 break; 336 } 337 } 338 mtx_unlock_spin(&parent->pic_child_lock); 339 340 if (found) 341 return (child->pc_filter(child->pc_filter_arg, irq)); 342 343 return (FILTER_STRAY); 344 } 345 346 /* 347 * interrupt controller dispatch function for interrupts. It should 348 * be called straight from the interrupt controller, when associated interrupt 349 * source is learned. 350 */ 351 int 352 intr_isrc_dispatch(struct intr_irqsrc *isrc, struct trapframe *tf) 353 { 354 355 KASSERT(isrc != NULL, ("%s: no source", __func__)); 356 357 isrc_increment_count(isrc); 358 359 #ifdef INTR_SOLO 360 if (isrc->isrc_filter != NULL) { 361 int error; 362 error = isrc->isrc_filter(isrc->isrc_arg, tf); 363 PIC_POST_FILTER(isrc->isrc_dev, isrc); 364 if (error == FILTER_HANDLED) 365 return (0); 366 } else 367 #endif 368 if (isrc->isrc_event != NULL) { 369 if (intr_event_handle(isrc->isrc_event, tf) == 0) 370 return (0); 371 } 372 373 isrc_increment_straycount(isrc); 374 return (EINVAL); 375 } 376 377 /* 378 * Alloc unique interrupt number (resource handle) for interrupt source. 379 * 380 * There could be various strategies how to allocate free interrupt number 381 * (resource handle) for new interrupt source. 382 * 383 * 1. Handles are always allocated forward, so handles are not recycled 384 * immediately. However, if only one free handle left which is reused 385 * constantly... 386 */ 387 static inline int 388 isrc_alloc_irq(struct intr_irqsrc *isrc) 389 { 390 u_int maxirqs, irq; 391 392 mtx_assert(&isrc_table_lock, MA_OWNED); 393 394 maxirqs = nitems(irq_sources); 395 if (irq_next_free >= maxirqs) 396 return (ENOSPC); 397 398 for (irq = irq_next_free; irq < maxirqs; irq++) { 399 if (irq_sources[irq] == NULL) 400 goto found; 401 } 402 for (irq = 0; irq < irq_next_free; irq++) { 403 if (irq_sources[irq] == NULL) 404 goto found; 405 } 406 407 irq_next_free = maxirqs; 408 return (ENOSPC); 409 410 found: 411 isrc->isrc_irq = irq; 412 irq_sources[irq] = isrc; 413 414 irq_next_free = irq + 1; 415 if (irq_next_free >= maxirqs) 416 irq_next_free = 0; 417 return (0); 418 } 419 420 /* 421 * Free unique interrupt number (resource handle) from interrupt source. 422 */ 423 static inline int 424 isrc_free_irq(struct intr_irqsrc *isrc) 425 { 426 427 mtx_assert(&isrc_table_lock, MA_OWNED); 428 429 if (isrc->isrc_irq >= nitems(irq_sources)) 430 return (EINVAL); 431 if (irq_sources[isrc->isrc_irq] != isrc) 432 return (EINVAL); 433 434 irq_sources[isrc->isrc_irq] = NULL; 435 isrc->isrc_irq = INTR_IRQ_INVALID; /* just to be safe */ 436 return (0); 437 } 438 439 /* 440 * Initialize interrupt source and register it into global interrupt table. 441 */ 442 int 443 intr_isrc_register(struct intr_irqsrc *isrc, device_t dev, u_int flags, 444 const char *fmt, ...) 445 { 446 int error; 447 va_list ap; 448 449 bzero(isrc, sizeof(struct intr_irqsrc)); 450 isrc->isrc_dev = dev; 451 isrc->isrc_irq = INTR_IRQ_INVALID; /* just to be safe */ 452 isrc->isrc_flags = flags; 453 454 va_start(ap, fmt); 455 vsnprintf(isrc->isrc_name, INTR_ISRC_NAMELEN, fmt, ap); 456 va_end(ap); 457 458 mtx_lock(&isrc_table_lock); 459 error = isrc_alloc_irq(isrc); 460 if (error != 0) { 461 mtx_unlock(&isrc_table_lock); 462 return (error); 463 } 464 /* 465 * Setup interrupt counters, but not for IPI sources. Those are setup 466 * later and only for used ones (up to INTR_IPI_COUNT) to not exhaust 467 * our counter pool. 468 */ 469 if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0) 470 isrc_setup_counters(isrc); 471 mtx_unlock(&isrc_table_lock); 472 return (0); 473 } 474 475 /* 476 * Deregister interrupt source from global interrupt table. 477 */ 478 int 479 intr_isrc_deregister(struct intr_irqsrc *isrc) 480 { 481 int error; 482 483 mtx_lock(&isrc_table_lock); 484 if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0) 485 isrc_release_counters(isrc); 486 error = isrc_free_irq(isrc); 487 mtx_unlock(&isrc_table_lock); 488 return (error); 489 } 490 491 #ifdef SMP 492 /* 493 * A support function for a PIC to decide if provided ISRC should be inited 494 * on given cpu. The logic of INTR_ISRCF_BOUND flag and isrc_cpu member of 495 * struct intr_irqsrc is the following: 496 * 497 * If INTR_ISRCF_BOUND is set, the ISRC should be inited only on cpus 498 * set in isrc_cpu. If not, the ISRC should be inited on every cpu and 499 * isrc_cpu is kept consistent with it. Thus isrc_cpu is always correct. 500 */ 501 bool 502 intr_isrc_init_on_cpu(struct intr_irqsrc *isrc, u_int cpu) 503 { 504 505 if (isrc->isrc_handlers == 0) 506 return (false); 507 if ((isrc->isrc_flags & (INTR_ISRCF_PPI | INTR_ISRCF_IPI)) == 0) 508 return (false); 509 if (isrc->isrc_flags & INTR_ISRCF_BOUND) 510 return (CPU_ISSET(cpu, &isrc->isrc_cpu)); 511 512 CPU_SET(cpu, &isrc->isrc_cpu); 513 return (true); 514 } 515 #endif 516 517 #ifdef INTR_SOLO 518 /* 519 * Setup filter into interrupt source. 520 */ 521 static int 522 iscr_setup_filter(struct intr_irqsrc *isrc, const char *name, 523 intr_irq_filter_t *filter, void *arg, void **cookiep) 524 { 525 526 if (filter == NULL) 527 return (EINVAL); 528 529 mtx_lock(&isrc_table_lock); 530 /* 531 * Make sure that we do not mix the two ways 532 * how we handle interrupt sources. 533 */ 534 if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) { 535 mtx_unlock(&isrc_table_lock); 536 return (EBUSY); 537 } 538 isrc->isrc_filter = filter; 539 isrc->isrc_arg = arg; 540 isrc_update_name(isrc, name); 541 mtx_unlock(&isrc_table_lock); 542 543 *cookiep = isrc; 544 return (0); 545 } 546 #endif 547 548 /* 549 * Interrupt source pre_ithread method for MI interrupt framework. 550 */ 551 static void 552 intr_isrc_pre_ithread(void *arg) 553 { 554 struct intr_irqsrc *isrc = arg; 555 556 PIC_PRE_ITHREAD(isrc->isrc_dev, isrc); 557 } 558 559 /* 560 * Interrupt source post_ithread method for MI interrupt framework. 561 */ 562 static void 563 intr_isrc_post_ithread(void *arg) 564 { 565 struct intr_irqsrc *isrc = arg; 566 567 PIC_POST_ITHREAD(isrc->isrc_dev, isrc); 568 } 569 570 /* 571 * Interrupt source post_filter method for MI interrupt framework. 572 */ 573 static void 574 intr_isrc_post_filter(void *arg) 575 { 576 struct intr_irqsrc *isrc = arg; 577 578 PIC_POST_FILTER(isrc->isrc_dev, isrc); 579 } 580 581 /* 582 * Interrupt source assign_cpu method for MI interrupt framework. 583 */ 584 static int 585 intr_isrc_assign_cpu(void *arg, int cpu) 586 { 587 #ifdef SMP 588 struct intr_irqsrc *isrc = arg; 589 int error; 590 591 mtx_lock(&isrc_table_lock); 592 if (cpu == NOCPU) { 593 CPU_ZERO(&isrc->isrc_cpu); 594 isrc->isrc_flags &= ~INTR_ISRCF_BOUND; 595 } else { 596 CPU_SETOF(cpu, &isrc->isrc_cpu); 597 isrc->isrc_flags |= INTR_ISRCF_BOUND; 598 } 599 600 /* 601 * In NOCPU case, it's up to PIC to either leave ISRC on same CPU or 602 * re-balance it to another CPU or enable it on more CPUs. However, 603 * PIC is expected to change isrc_cpu appropriately to keep us well 604 * informed if the call is successful. 605 */ 606 if (irq_assign_cpu) { 607 error = PIC_BIND_INTR(isrc->isrc_dev, isrc); 608 if (error) { 609 CPU_ZERO(&isrc->isrc_cpu); 610 mtx_unlock(&isrc_table_lock); 611 return (error); 612 } 613 } 614 mtx_unlock(&isrc_table_lock); 615 return (0); 616 #else 617 return (EOPNOTSUPP); 618 #endif 619 } 620 621 /* 622 * Create interrupt event for interrupt source. 623 */ 624 static int 625 isrc_event_create(struct intr_irqsrc *isrc) 626 { 627 struct intr_event *ie; 628 int error; 629 630 error = intr_event_create(&ie, isrc, 0, isrc->isrc_irq, 631 intr_isrc_pre_ithread, intr_isrc_post_ithread, intr_isrc_post_filter, 632 intr_isrc_assign_cpu, "%s:", isrc->isrc_name); 633 if (error) 634 return (error); 635 636 mtx_lock(&isrc_table_lock); 637 /* 638 * Make sure that we do not mix the two ways 639 * how we handle interrupt sources. Let contested event wins. 640 */ 641 #ifdef INTR_SOLO 642 if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) { 643 #else 644 if (isrc->isrc_event != NULL) { 645 #endif 646 mtx_unlock(&isrc_table_lock); 647 intr_event_destroy(ie); 648 return (isrc->isrc_event != NULL ? EBUSY : 0); 649 } 650 isrc->isrc_event = ie; 651 mtx_unlock(&isrc_table_lock); 652 653 return (0); 654 } 655 #ifdef notyet 656 /* 657 * Destroy interrupt event for interrupt source. 658 */ 659 static void 660 isrc_event_destroy(struct intr_irqsrc *isrc) 661 { 662 struct intr_event *ie; 663 664 mtx_lock(&isrc_table_lock); 665 ie = isrc->isrc_event; 666 isrc->isrc_event = NULL; 667 mtx_unlock(&isrc_table_lock); 668 669 if (ie != NULL) 670 intr_event_destroy(ie); 671 } 672 #endif 673 /* 674 * Add handler to interrupt source. 675 */ 676 static int 677 isrc_add_handler(struct intr_irqsrc *isrc, const char *name, 678 driver_filter_t filter, driver_intr_t handler, void *arg, 679 enum intr_type flags, void **cookiep) 680 { 681 int error; 682 683 if (isrc->isrc_event == NULL) { 684 error = isrc_event_create(isrc); 685 if (error) 686 return (error); 687 } 688 689 error = intr_event_add_handler(isrc->isrc_event, name, filter, handler, 690 arg, intr_priority(flags), flags, cookiep); 691 if (error == 0) { 692 mtx_lock(&isrc_table_lock); 693 intrcnt_updatename(isrc); 694 mtx_unlock(&isrc_table_lock); 695 } 696 697 return (error); 698 } 699 700 /* 701 * Lookup interrupt controller locked. 702 */ 703 static inline struct intr_pic * 704 pic_lookup_locked(device_t dev, intptr_t xref, int flags) 705 { 706 struct intr_pic *pic; 707 708 mtx_assert(&pic_list_lock, MA_OWNED); 709 710 if (dev == NULL && xref == 0) 711 return (NULL); 712 713 /* Note that pic->pic_dev is never NULL on registered PIC. */ 714 SLIST_FOREACH(pic, &pic_list, pic_next) { 715 if ((pic->pic_flags & FLAG_TYPE_MASK) != 716 (flags & FLAG_TYPE_MASK)) 717 continue; 718 719 if (dev == NULL) { 720 if (xref == pic->pic_xref) 721 return (pic); 722 } else if (xref == 0 || pic->pic_xref == 0) { 723 if (dev == pic->pic_dev) 724 return (pic); 725 } else if (xref == pic->pic_xref && dev == pic->pic_dev) 726 return (pic); 727 } 728 return (NULL); 729 } 730 731 /* 732 * Lookup interrupt controller. 733 */ 734 static struct intr_pic * 735 pic_lookup(device_t dev, intptr_t xref, int flags) 736 { 737 struct intr_pic *pic; 738 739 mtx_lock(&pic_list_lock); 740 pic = pic_lookup_locked(dev, xref, flags); 741 mtx_unlock(&pic_list_lock); 742 return (pic); 743 } 744 745 /* 746 * Create interrupt controller. 747 */ 748 static struct intr_pic * 749 pic_create(device_t dev, intptr_t xref, int flags) 750 { 751 struct intr_pic *pic; 752 753 mtx_lock(&pic_list_lock); 754 pic = pic_lookup_locked(dev, xref, flags); 755 if (pic != NULL) { 756 mtx_unlock(&pic_list_lock); 757 return (pic); 758 } 759 pic = malloc(sizeof(*pic), M_INTRNG, M_NOWAIT | M_ZERO); 760 if (pic == NULL) { 761 mtx_unlock(&pic_list_lock); 762 return (NULL); 763 } 764 pic->pic_xref = xref; 765 pic->pic_dev = dev; 766 pic->pic_flags = flags; 767 mtx_init(&pic->pic_child_lock, "pic child lock", NULL, MTX_SPIN); 768 SLIST_INSERT_HEAD(&pic_list, pic, pic_next); 769 mtx_unlock(&pic_list_lock); 770 771 return (pic); 772 } 773 #ifdef notyet 774 /* 775 * Destroy interrupt controller. 776 */ 777 static void 778 pic_destroy(device_t dev, intptr_t xref, int flags) 779 { 780 struct intr_pic *pic; 781 782 mtx_lock(&pic_list_lock); 783 pic = pic_lookup_locked(dev, xref, flags); 784 if (pic == NULL) { 785 mtx_unlock(&pic_list_lock); 786 return; 787 } 788 SLIST_REMOVE(&pic_list, pic, intr_pic, pic_next); 789 mtx_unlock(&pic_list_lock); 790 791 free(pic, M_INTRNG); 792 } 793 #endif 794 /* 795 * Register interrupt controller. 796 */ 797 struct intr_pic * 798 intr_pic_register(device_t dev, intptr_t xref) 799 { 800 struct intr_pic *pic; 801 802 if (dev == NULL) 803 return (NULL); 804 pic = pic_create(dev, xref, FLAG_PIC); 805 if (pic == NULL) 806 return (NULL); 807 808 debugf("PIC %p registered for %s <dev %p, xref %jx>\n", pic, 809 device_get_nameunit(dev), dev, (uintmax_t)xref); 810 return (pic); 811 } 812 813 /* 814 * Unregister interrupt controller. 815 */ 816 int 817 intr_pic_deregister(device_t dev, intptr_t xref) 818 { 819 820 panic("%s: not implemented", __func__); 821 } 822 823 /* 824 * Mark interrupt controller (itself) as a root one. 825 * 826 * Note that only an interrupt controller can really know its position 827 * in interrupt controller's tree. So root PIC must claim itself as a root. 828 * 829 * In FDT case, according to ePAPR approved version 1.1 from 08 April 2011, 830 * page 30: 831 * "The root of the interrupt tree is determined when traversal 832 * of the interrupt tree reaches an interrupt controller node without 833 * an interrupts property and thus no explicit interrupt parent." 834 */ 835 int 836 intr_pic_claim_root(device_t dev, intptr_t xref, intr_irq_filter_t *filter, 837 void *arg, u_int ipicount) 838 { 839 struct intr_pic *pic; 840 841 pic = pic_lookup(dev, xref, FLAG_PIC); 842 if (pic == NULL) { 843 device_printf(dev, "not registered\n"); 844 return (EINVAL); 845 } 846 847 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_PIC, 848 ("%s: Found a non-PIC controller: %s", __func__, 849 device_get_name(pic->pic_dev))); 850 851 if (filter == NULL) { 852 device_printf(dev, "filter missing\n"); 853 return (EINVAL); 854 } 855 856 /* 857 * Only one interrupt controllers could be on the root for now. 858 * Note that we further suppose that there is not threaded interrupt 859 * routine (handler) on the root. See intr_irq_handler(). 860 */ 861 if (intr_irq_root_dev != NULL) { 862 device_printf(dev, "another root already set\n"); 863 return (EBUSY); 864 } 865 866 intr_irq_root_dev = dev; 867 irq_root_filter = filter; 868 irq_root_arg = arg; 869 irq_root_ipicount = ipicount; 870 871 debugf("irq root set to %s\n", device_get_nameunit(dev)); 872 return (0); 873 } 874 875 /* 876 * Add a handler to manage a sub range of a parents interrupts. 877 */ 878 struct intr_pic * 879 intr_pic_add_handler(device_t parent, struct intr_pic *pic, 880 intr_child_irq_filter_t *filter, void *arg, uintptr_t start, 881 uintptr_t length) 882 { 883 struct intr_pic *parent_pic; 884 struct intr_pic_child *newchild; 885 #ifdef INVARIANTS 886 struct intr_pic_child *child; 887 #endif 888 889 /* Find the parent PIC */ 890 parent_pic = pic_lookup(parent, 0, FLAG_PIC); 891 if (parent_pic == NULL) 892 return (NULL); 893 894 newchild = malloc(sizeof(*newchild), M_INTRNG, M_WAITOK | M_ZERO); 895 newchild->pc_pic = pic; 896 newchild->pc_filter = filter; 897 newchild->pc_filter_arg = arg; 898 newchild->pc_start = start; 899 newchild->pc_length = length; 900 901 mtx_lock_spin(&parent_pic->pic_child_lock); 902 #ifdef INVARIANTS 903 SLIST_FOREACH(child, &parent_pic->pic_children, pc_next) { 904 KASSERT(child->pc_pic != pic, ("%s: Adding a child PIC twice", 905 __func__)); 906 } 907 #endif 908 SLIST_INSERT_HEAD(&parent_pic->pic_children, newchild, pc_next); 909 mtx_unlock_spin(&parent_pic->pic_child_lock); 910 911 return (pic); 912 } 913 914 static int 915 intr_resolve_irq(device_t dev, intptr_t xref, struct intr_map_data *data, 916 struct intr_irqsrc **isrc) 917 { 918 struct intr_pic *pic; 919 struct intr_map_data_msi *msi; 920 921 if (data == NULL) 922 return (EINVAL); 923 924 pic = pic_lookup(dev, xref, 925 (data->type == INTR_MAP_DATA_MSI) ? FLAG_MSI : FLAG_PIC); 926 if (pic == NULL) 927 return (ESRCH); 928 929 switch (data->type) { 930 case INTR_MAP_DATA_MSI: 931 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI, 932 ("%s: Found a non-MSI controller: %s", __func__, 933 device_get_name(pic->pic_dev))); 934 msi = (struct intr_map_data_msi *)data; 935 *isrc = msi->isrc; 936 return (0); 937 938 default: 939 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_PIC, 940 ("%s: Found a non-PIC controller: %s", __func__, 941 device_get_name(pic->pic_dev))); 942 return (PIC_MAP_INTR(pic->pic_dev, data, isrc)); 943 } 944 } 945 946 bool 947 intr_is_per_cpu(struct resource *res) 948 { 949 u_int res_id; 950 struct intr_irqsrc *isrc; 951 952 res_id = (u_int)rman_get_start(res); 953 isrc = intr_map_get_isrc(res_id); 954 955 if (isrc == NULL) 956 panic("Attempt to get isrc for non-active resource id: %u\n", 957 res_id); 958 return ((isrc->isrc_flags & INTR_ISRCF_PPI) != 0); 959 } 960 961 int 962 intr_activate_irq(device_t dev, struct resource *res) 963 { 964 device_t map_dev; 965 intptr_t map_xref; 966 struct intr_map_data *data; 967 struct intr_irqsrc *isrc; 968 u_int res_id; 969 int error; 970 971 KASSERT(rman_get_start(res) == rman_get_end(res), 972 ("%s: more interrupts in resource", __func__)); 973 974 res_id = (u_int)rman_get_start(res); 975 if (intr_map_get_isrc(res_id) != NULL) 976 panic("Attempt to double activation of resource id: %u\n", 977 res_id); 978 intr_map_copy_map_data(res_id, &map_dev, &map_xref, &data); 979 error = intr_resolve_irq(map_dev, map_xref, data, &isrc); 980 if (error != 0) { 981 free(data, M_INTRNG); 982 /* XXX TODO DISCONECTED PICs */ 983 /* if (error == EINVAL) return(0); */ 984 return (error); 985 } 986 intr_map_set_isrc(res_id, isrc); 987 rman_set_virtual(res, data); 988 return (PIC_ACTIVATE_INTR(isrc->isrc_dev, isrc, res, data)); 989 } 990 991 int 992 intr_deactivate_irq(device_t dev, struct resource *res) 993 { 994 struct intr_map_data *data; 995 struct intr_irqsrc *isrc; 996 u_int res_id; 997 int error; 998 999 KASSERT(rman_get_start(res) == rman_get_end(res), 1000 ("%s: more interrupts in resource", __func__)); 1001 1002 res_id = (u_int)rman_get_start(res); 1003 isrc = intr_map_get_isrc(res_id); 1004 if (isrc == NULL) 1005 panic("Attempt to deactivate non-active resource id: %u\n", 1006 res_id); 1007 1008 data = rman_get_virtual(res); 1009 error = PIC_DEACTIVATE_INTR(isrc->isrc_dev, isrc, res, data); 1010 intr_map_set_isrc(res_id, NULL); 1011 rman_set_virtual(res, NULL); 1012 free(data, M_INTRNG); 1013 return (error); 1014 } 1015 1016 int 1017 intr_setup_irq(device_t dev, struct resource *res, driver_filter_t filt, 1018 driver_intr_t hand, void *arg, int flags, void **cookiep) 1019 { 1020 int error; 1021 struct intr_map_data *data; 1022 struct intr_irqsrc *isrc; 1023 const char *name; 1024 u_int res_id; 1025 1026 KASSERT(rman_get_start(res) == rman_get_end(res), 1027 ("%s: more interrupts in resource", __func__)); 1028 1029 res_id = (u_int)rman_get_start(res); 1030 isrc = intr_map_get_isrc(res_id); 1031 if (isrc == NULL) { 1032 /* XXX TODO DISCONECTED PICs */ 1033 return (EINVAL); 1034 } 1035 1036 data = rman_get_virtual(res); 1037 name = device_get_nameunit(dev); 1038 1039 #ifdef INTR_SOLO 1040 /* 1041 * Standard handling is done through MI interrupt framework. However, 1042 * some interrupts could request solely own special handling. This 1043 * non standard handling can be used for interrupt controllers without 1044 * handler (filter only), so in case that interrupt controllers are 1045 * chained, MI interrupt framework is called only in leaf controller. 1046 * 1047 * Note that root interrupt controller routine is served as well, 1048 * however in intr_irq_handler(), i.e. main system dispatch routine. 1049 */ 1050 if (flags & INTR_SOLO && hand != NULL) { 1051 debugf("irq %u cannot solo on %s\n", irq, name); 1052 return (EINVAL); 1053 } 1054 1055 if (flags & INTR_SOLO) { 1056 error = iscr_setup_filter(isrc, name, (intr_irq_filter_t *)filt, 1057 arg, cookiep); 1058 debugf("irq %u setup filter error %d on %s\n", isrc->isrc_irq, error, 1059 name); 1060 } else 1061 #endif 1062 { 1063 error = isrc_add_handler(isrc, name, filt, hand, arg, flags, 1064 cookiep); 1065 debugf("irq %u add handler error %d on %s\n", isrc->isrc_irq, error, name); 1066 } 1067 if (error != 0) 1068 return (error); 1069 1070 mtx_lock(&isrc_table_lock); 1071 error = PIC_SETUP_INTR(isrc->isrc_dev, isrc, res, data); 1072 if (error == 0) { 1073 isrc->isrc_handlers++; 1074 if (isrc->isrc_handlers == 1) 1075 PIC_ENABLE_INTR(isrc->isrc_dev, isrc); 1076 } 1077 mtx_unlock(&isrc_table_lock); 1078 if (error != 0) 1079 intr_event_remove_handler(*cookiep); 1080 return (error); 1081 } 1082 1083 int 1084 intr_teardown_irq(device_t dev, struct resource *res, void *cookie) 1085 { 1086 int error; 1087 struct intr_map_data *data; 1088 struct intr_irqsrc *isrc; 1089 u_int res_id; 1090 1091 KASSERT(rman_get_start(res) == rman_get_end(res), 1092 ("%s: more interrupts in resource", __func__)); 1093 1094 res_id = (u_int)rman_get_start(res); 1095 isrc = intr_map_get_isrc(res_id); 1096 if (isrc == NULL || isrc->isrc_handlers == 0) 1097 return (EINVAL); 1098 1099 data = rman_get_virtual(res); 1100 1101 #ifdef INTR_SOLO 1102 if (isrc->isrc_filter != NULL) { 1103 if (isrc != cookie) 1104 return (EINVAL); 1105 1106 mtx_lock(&isrc_table_lock); 1107 isrc->isrc_filter = NULL; 1108 isrc->isrc_arg = NULL; 1109 isrc->isrc_handlers = 0; 1110 PIC_DISABLE_INTR(isrc->isrc_dev, isrc); 1111 PIC_TEARDOWN_INTR(isrc->isrc_dev, isrc, res, data); 1112 isrc_update_name(isrc, NULL); 1113 mtx_unlock(&isrc_table_lock); 1114 return (0); 1115 } 1116 #endif 1117 if (isrc != intr_handler_source(cookie)) 1118 return (EINVAL); 1119 1120 error = intr_event_remove_handler(cookie); 1121 if (error == 0) { 1122 mtx_lock(&isrc_table_lock); 1123 isrc->isrc_handlers--; 1124 if (isrc->isrc_handlers == 0) 1125 PIC_DISABLE_INTR(isrc->isrc_dev, isrc); 1126 PIC_TEARDOWN_INTR(isrc->isrc_dev, isrc, res, data); 1127 intrcnt_updatename(isrc); 1128 mtx_unlock(&isrc_table_lock); 1129 } 1130 return (error); 1131 } 1132 1133 int 1134 intr_describe_irq(device_t dev, struct resource *res, void *cookie, 1135 const char *descr) 1136 { 1137 int error; 1138 struct intr_irqsrc *isrc; 1139 u_int res_id; 1140 1141 KASSERT(rman_get_start(res) == rman_get_end(res), 1142 ("%s: more interrupts in resource", __func__)); 1143 1144 res_id = (u_int)rman_get_start(res); 1145 isrc = intr_map_get_isrc(res_id); 1146 if (isrc == NULL || isrc->isrc_handlers == 0) 1147 return (EINVAL); 1148 #ifdef INTR_SOLO 1149 if (isrc->isrc_filter != NULL) { 1150 if (isrc != cookie) 1151 return (EINVAL); 1152 1153 mtx_lock(&isrc_table_lock); 1154 isrc_update_name(isrc, descr); 1155 mtx_unlock(&isrc_table_lock); 1156 return (0); 1157 } 1158 #endif 1159 error = intr_event_describe_handler(isrc->isrc_event, cookie, descr); 1160 if (error == 0) { 1161 mtx_lock(&isrc_table_lock); 1162 intrcnt_updatename(isrc); 1163 mtx_unlock(&isrc_table_lock); 1164 } 1165 return (error); 1166 } 1167 1168 #ifdef SMP 1169 int 1170 intr_bind_irq(device_t dev, struct resource *res, int cpu) 1171 { 1172 struct intr_irqsrc *isrc; 1173 u_int res_id; 1174 1175 KASSERT(rman_get_start(res) == rman_get_end(res), 1176 ("%s: more interrupts in resource", __func__)); 1177 1178 res_id = (u_int)rman_get_start(res); 1179 isrc = intr_map_get_isrc(res_id); 1180 if (isrc == NULL || isrc->isrc_handlers == 0) 1181 return (EINVAL); 1182 #ifdef INTR_SOLO 1183 if (isrc->isrc_filter != NULL) 1184 return (intr_isrc_assign_cpu(isrc, cpu)); 1185 #endif 1186 return (intr_event_bind(isrc->isrc_event, cpu)); 1187 } 1188 1189 /* 1190 * Return the CPU that the next interrupt source should use. 1191 * For now just returns the next CPU according to round-robin. 1192 */ 1193 u_int 1194 intr_irq_next_cpu(u_int last_cpu, cpuset_t *cpumask) 1195 { 1196 u_int cpu; 1197 1198 KASSERT(!CPU_EMPTY(cpumask), ("%s: Empty CPU mask", __func__)); 1199 if (!irq_assign_cpu || mp_ncpus == 1) { 1200 cpu = PCPU_GET(cpuid); 1201 1202 if (CPU_ISSET(cpu, cpumask)) 1203 return (curcpu); 1204 1205 return (CPU_FFS(cpumask) - 1); 1206 } 1207 1208 do { 1209 last_cpu++; 1210 if (last_cpu > mp_maxid) 1211 last_cpu = 0; 1212 } while (!CPU_ISSET(last_cpu, cpumask)); 1213 return (last_cpu); 1214 } 1215 1216 #ifndef EARLY_AP_STARTUP 1217 /* 1218 * Distribute all the interrupt sources among the available 1219 * CPUs once the AP's have been launched. 1220 */ 1221 static void 1222 intr_irq_shuffle(void *arg __unused) 1223 { 1224 struct intr_irqsrc *isrc; 1225 u_int i; 1226 1227 if (mp_ncpus == 1) 1228 return; 1229 1230 mtx_lock(&isrc_table_lock); 1231 irq_assign_cpu = true; 1232 for (i = 0; i < NIRQ; i++) { 1233 isrc = irq_sources[i]; 1234 if (isrc == NULL || isrc->isrc_handlers == 0 || 1235 isrc->isrc_flags & (INTR_ISRCF_PPI | INTR_ISRCF_IPI)) 1236 continue; 1237 1238 if (isrc->isrc_event != NULL && 1239 isrc->isrc_flags & INTR_ISRCF_BOUND && 1240 isrc->isrc_event->ie_cpu != CPU_FFS(&isrc->isrc_cpu) - 1) 1241 panic("%s: CPU inconsistency", __func__); 1242 1243 if ((isrc->isrc_flags & INTR_ISRCF_BOUND) == 0) 1244 CPU_ZERO(&isrc->isrc_cpu); /* start again */ 1245 1246 /* 1247 * We are in wicked position here if the following call fails 1248 * for bound ISRC. The best thing we can do is to clear 1249 * isrc_cpu so inconsistency with ie_cpu will be detectable. 1250 */ 1251 if (PIC_BIND_INTR(isrc->isrc_dev, isrc) != 0) 1252 CPU_ZERO(&isrc->isrc_cpu); 1253 } 1254 mtx_unlock(&isrc_table_lock); 1255 } 1256 SYSINIT(intr_irq_shuffle, SI_SUB_SMP, SI_ORDER_SECOND, intr_irq_shuffle, NULL); 1257 #endif /* !EARLY_AP_STARTUP */ 1258 1259 #else 1260 u_int 1261 intr_irq_next_cpu(u_int current_cpu, cpuset_t *cpumask) 1262 { 1263 1264 return (PCPU_GET(cpuid)); 1265 } 1266 #endif /* SMP */ 1267 1268 /* 1269 * Allocate memory for new intr_map_data structure. 1270 * Initialize common fields. 1271 */ 1272 struct intr_map_data * 1273 intr_alloc_map_data(enum intr_map_data_type type, size_t len, int flags) 1274 { 1275 struct intr_map_data *data; 1276 1277 data = malloc(len, M_INTRNG, flags); 1278 data->type = type; 1279 data->len = len; 1280 return (data); 1281 } 1282 1283 void intr_free_intr_map_data(struct intr_map_data *data) 1284 { 1285 1286 free(data, M_INTRNG); 1287 } 1288 1289 /* 1290 * Register a MSI/MSI-X interrupt controller 1291 */ 1292 int 1293 intr_msi_register(device_t dev, intptr_t xref) 1294 { 1295 struct intr_pic *pic; 1296 1297 if (dev == NULL) 1298 return (EINVAL); 1299 pic = pic_create(dev, xref, FLAG_MSI); 1300 if (pic == NULL) 1301 return (ENOMEM); 1302 1303 debugf("PIC %p registered for %s <dev %p, xref %jx>\n", pic, 1304 device_get_nameunit(dev), dev, (uintmax_t)xref); 1305 return (0); 1306 } 1307 1308 int 1309 intr_alloc_msi(device_t pci, device_t child, intptr_t xref, int count, 1310 int maxcount, int *irqs) 1311 { 1312 struct iommu_domain *domain; 1313 struct intr_irqsrc **isrc; 1314 struct intr_pic *pic; 1315 device_t pdev; 1316 struct intr_map_data_msi *msi; 1317 int err, i; 1318 1319 pic = pic_lookup(NULL, xref, FLAG_MSI); 1320 if (pic == NULL) 1321 return (ESRCH); 1322 1323 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI, 1324 ("%s: Found a non-MSI controller: %s", __func__, 1325 device_get_name(pic->pic_dev))); 1326 1327 /* 1328 * If this is the first time we have used this context ask the 1329 * interrupt controller to map memory the msi source will need. 1330 */ 1331 err = MSI_IOMMU_INIT(pic->pic_dev, child, &domain); 1332 if (err != 0) 1333 return (err); 1334 1335 isrc = malloc(sizeof(*isrc) * count, M_INTRNG, M_WAITOK); 1336 err = MSI_ALLOC_MSI(pic->pic_dev, child, count, maxcount, &pdev, isrc); 1337 if (err != 0) { 1338 free(isrc, M_INTRNG); 1339 return (err); 1340 } 1341 1342 for (i = 0; i < count; i++) { 1343 isrc[i]->isrc_iommu = domain; 1344 msi = (struct intr_map_data_msi *)intr_alloc_map_data( 1345 INTR_MAP_DATA_MSI, sizeof(*msi), M_WAITOK | M_ZERO); 1346 msi-> isrc = isrc[i]; 1347 1348 irqs[i] = intr_map_irq(pic->pic_dev, xref, 1349 (struct intr_map_data *)msi); 1350 } 1351 free(isrc, M_INTRNG); 1352 1353 return (err); 1354 } 1355 1356 int 1357 intr_release_msi(device_t pci, device_t child, intptr_t xref, int count, 1358 int *irqs) 1359 { 1360 struct intr_irqsrc **isrc; 1361 struct intr_pic *pic; 1362 struct intr_map_data_msi *msi; 1363 int i, err; 1364 1365 pic = pic_lookup(NULL, xref, FLAG_MSI); 1366 if (pic == NULL) 1367 return (ESRCH); 1368 1369 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI, 1370 ("%s: Found a non-MSI controller: %s", __func__, 1371 device_get_name(pic->pic_dev))); 1372 1373 isrc = malloc(sizeof(*isrc) * count, M_INTRNG, M_WAITOK); 1374 1375 for (i = 0; i < count; i++) { 1376 msi = (struct intr_map_data_msi *) 1377 intr_map_get_map_data(irqs[i]); 1378 KASSERT(msi->hdr.type == INTR_MAP_DATA_MSI, 1379 ("%s: irq %d map data is not MSI", __func__, 1380 irqs[i])); 1381 isrc[i] = msi->isrc; 1382 } 1383 1384 MSI_IOMMU_DEINIT(pic->pic_dev, child); 1385 1386 err = MSI_RELEASE_MSI(pic->pic_dev, child, count, isrc); 1387 1388 for (i = 0; i < count; i++) { 1389 if (isrc[i] != NULL) 1390 intr_unmap_irq(irqs[i]); 1391 } 1392 1393 free(isrc, M_INTRNG); 1394 return (err); 1395 } 1396 1397 int 1398 intr_alloc_msix(device_t pci, device_t child, intptr_t xref, int *irq) 1399 { 1400 struct iommu_domain *domain; 1401 struct intr_irqsrc *isrc; 1402 struct intr_pic *pic; 1403 device_t pdev; 1404 struct intr_map_data_msi *msi; 1405 int err; 1406 1407 pic = pic_lookup(NULL, xref, FLAG_MSI); 1408 if (pic == NULL) 1409 return (ESRCH); 1410 1411 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI, 1412 ("%s: Found a non-MSI controller: %s", __func__, 1413 device_get_name(pic->pic_dev))); 1414 1415 /* 1416 * If this is the first time we have used this context ask the 1417 * interrupt controller to map memory the msi source will need. 1418 */ 1419 err = MSI_IOMMU_INIT(pic->pic_dev, child, &domain); 1420 if (err != 0) 1421 return (err); 1422 1423 err = MSI_ALLOC_MSIX(pic->pic_dev, child, &pdev, &isrc); 1424 if (err != 0) 1425 return (err); 1426 1427 isrc->isrc_iommu = domain; 1428 msi = (struct intr_map_data_msi *)intr_alloc_map_data( 1429 INTR_MAP_DATA_MSI, sizeof(*msi), M_WAITOK | M_ZERO); 1430 msi->isrc = isrc; 1431 *irq = intr_map_irq(pic->pic_dev, xref, (struct intr_map_data *)msi); 1432 return (0); 1433 } 1434 1435 int 1436 intr_release_msix(device_t pci, device_t child, intptr_t xref, int irq) 1437 { 1438 struct intr_irqsrc *isrc; 1439 struct intr_pic *pic; 1440 struct intr_map_data_msi *msi; 1441 int err; 1442 1443 pic = pic_lookup(NULL, xref, FLAG_MSI); 1444 if (pic == NULL) 1445 return (ESRCH); 1446 1447 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI, 1448 ("%s: Found a non-MSI controller: %s", __func__, 1449 device_get_name(pic->pic_dev))); 1450 1451 msi = (struct intr_map_data_msi *) 1452 intr_map_get_map_data(irq); 1453 KASSERT(msi->hdr.type == INTR_MAP_DATA_MSI, 1454 ("%s: irq %d map data is not MSI", __func__, 1455 irq)); 1456 isrc = msi->isrc; 1457 if (isrc == NULL) { 1458 intr_unmap_irq(irq); 1459 return (EINVAL); 1460 } 1461 1462 MSI_IOMMU_DEINIT(pic->pic_dev, child); 1463 1464 err = MSI_RELEASE_MSIX(pic->pic_dev, child, isrc); 1465 intr_unmap_irq(irq); 1466 1467 return (err); 1468 } 1469 1470 int 1471 intr_map_msi(device_t pci, device_t child, intptr_t xref, int irq, 1472 uint64_t *addr, uint32_t *data) 1473 { 1474 struct intr_irqsrc *isrc; 1475 struct intr_pic *pic; 1476 int err; 1477 1478 pic = pic_lookup(NULL, xref, FLAG_MSI); 1479 if (pic == NULL) 1480 return (ESRCH); 1481 1482 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI, 1483 ("%s: Found a non-MSI controller: %s", __func__, 1484 device_get_name(pic->pic_dev))); 1485 1486 isrc = intr_map_get_isrc(irq); 1487 if (isrc == NULL) 1488 return (EINVAL); 1489 1490 err = MSI_MAP_MSI(pic->pic_dev, child, isrc, addr, data); 1491 1492 #ifdef IOMMU 1493 if (isrc->isrc_iommu != NULL) 1494 iommu_translate_msi(isrc->isrc_iommu, addr); 1495 #endif 1496 1497 return (err); 1498 } 1499 1500 void dosoftints(void); 1501 void 1502 dosoftints(void) 1503 { 1504 } 1505 1506 #ifdef SMP 1507 /* 1508 * Init interrupt controller on another CPU. 1509 */ 1510 void 1511 intr_pic_init_secondary(void) 1512 { 1513 1514 /* 1515 * QQQ: Only root PIC is aware of other CPUs ??? 1516 */ 1517 KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__)); 1518 1519 //mtx_lock(&isrc_table_lock); 1520 PIC_INIT_SECONDARY(intr_irq_root_dev); 1521 //mtx_unlock(&isrc_table_lock); 1522 } 1523 #endif 1524 1525 #ifdef DDB 1526 DB_SHOW_COMMAND(irqs, db_show_irqs) 1527 { 1528 u_int i, irqsum; 1529 u_long num; 1530 struct intr_irqsrc *isrc; 1531 1532 for (irqsum = 0, i = 0; i < NIRQ; i++) { 1533 isrc = irq_sources[i]; 1534 if (isrc == NULL) 1535 continue; 1536 1537 num = isrc->isrc_count != NULL ? isrc->isrc_count[0] : 0; 1538 db_printf("irq%-3u <%s>: cpu %02lx%s cnt %lu\n", i, 1539 isrc->isrc_name, isrc->isrc_cpu.__bits[0], 1540 isrc->isrc_flags & INTR_ISRCF_BOUND ? " (bound)" : "", num); 1541 irqsum += num; 1542 } 1543 db_printf("irq total %u\n", irqsum); 1544 } 1545 #endif 1546 1547 /* 1548 * Interrupt mapping table functions. 1549 * 1550 * Please, keep this part separately, it can be transformed to 1551 * extension of standard resources. 1552 */ 1553 struct intr_map_entry 1554 { 1555 device_t dev; 1556 intptr_t xref; 1557 struct intr_map_data *map_data; 1558 struct intr_irqsrc *isrc; 1559 /* XXX TODO DISCONECTED PICs */ 1560 /*int flags */ 1561 }; 1562 1563 /* XXX Convert irq_map[] to dynamicaly expandable one. */ 1564 static struct intr_map_entry *irq_map[2 * NIRQ]; 1565 static int irq_map_count = nitems(irq_map); 1566 static int irq_map_first_free_idx; 1567 static struct mtx irq_map_lock; 1568 1569 static struct intr_irqsrc * 1570 intr_map_get_isrc(u_int res_id) 1571 { 1572 struct intr_irqsrc *isrc; 1573 1574 isrc = NULL; 1575 mtx_lock(&irq_map_lock); 1576 if (res_id < irq_map_count && irq_map[res_id] != NULL) 1577 isrc = irq_map[res_id]->isrc; 1578 mtx_unlock(&irq_map_lock); 1579 1580 return (isrc); 1581 } 1582 1583 static void 1584 intr_map_set_isrc(u_int res_id, struct intr_irqsrc *isrc) 1585 { 1586 1587 mtx_lock(&irq_map_lock); 1588 if (res_id < irq_map_count && irq_map[res_id] != NULL) 1589 irq_map[res_id]->isrc = isrc; 1590 mtx_unlock(&irq_map_lock); 1591 } 1592 1593 /* 1594 * Get a copy of intr_map_entry data 1595 */ 1596 static struct intr_map_data * 1597 intr_map_get_map_data(u_int res_id) 1598 { 1599 struct intr_map_data *data; 1600 1601 data = NULL; 1602 mtx_lock(&irq_map_lock); 1603 if (res_id >= irq_map_count || irq_map[res_id] == NULL) 1604 panic("Attempt to copy invalid resource id: %u\n", res_id); 1605 data = irq_map[res_id]->map_data; 1606 mtx_unlock(&irq_map_lock); 1607 1608 return (data); 1609 } 1610 1611 /* 1612 * Get a copy of intr_map_entry data 1613 */ 1614 static void 1615 intr_map_copy_map_data(u_int res_id, device_t *map_dev, intptr_t *map_xref, 1616 struct intr_map_data **data) 1617 { 1618 size_t len; 1619 1620 len = 0; 1621 mtx_lock(&irq_map_lock); 1622 if (res_id >= irq_map_count || irq_map[res_id] == NULL) 1623 panic("Attempt to copy invalid resource id: %u\n", res_id); 1624 if (irq_map[res_id]->map_data != NULL) 1625 len = irq_map[res_id]->map_data->len; 1626 mtx_unlock(&irq_map_lock); 1627 1628 if (len == 0) 1629 *data = NULL; 1630 else 1631 *data = malloc(len, M_INTRNG, M_WAITOK | M_ZERO); 1632 mtx_lock(&irq_map_lock); 1633 if (irq_map[res_id] == NULL) 1634 panic("Attempt to copy invalid resource id: %u\n", res_id); 1635 if (len != 0) { 1636 if (len != irq_map[res_id]->map_data->len) 1637 panic("Resource id: %u has changed.\n", res_id); 1638 memcpy(*data, irq_map[res_id]->map_data, len); 1639 } 1640 *map_dev = irq_map[res_id]->dev; 1641 *map_xref = irq_map[res_id]->xref; 1642 mtx_unlock(&irq_map_lock); 1643 } 1644 1645 /* 1646 * Allocate and fill new entry in irq_map table. 1647 */ 1648 u_int 1649 intr_map_irq(device_t dev, intptr_t xref, struct intr_map_data *data) 1650 { 1651 u_int i; 1652 struct intr_map_entry *entry; 1653 1654 /* Prepare new entry first. */ 1655 entry = malloc(sizeof(*entry), M_INTRNG, M_WAITOK | M_ZERO); 1656 1657 entry->dev = dev; 1658 entry->xref = xref; 1659 entry->map_data = data; 1660 entry->isrc = NULL; 1661 1662 mtx_lock(&irq_map_lock); 1663 for (i = irq_map_first_free_idx; i < irq_map_count; i++) { 1664 if (irq_map[i] == NULL) { 1665 irq_map[i] = entry; 1666 irq_map_first_free_idx = i + 1; 1667 mtx_unlock(&irq_map_lock); 1668 return (i); 1669 } 1670 } 1671 mtx_unlock(&irq_map_lock); 1672 1673 /* XXX Expand irq_map table */ 1674 panic("IRQ mapping table is full."); 1675 } 1676 1677 /* 1678 * Remove and free mapping entry. 1679 */ 1680 void 1681 intr_unmap_irq(u_int res_id) 1682 { 1683 struct intr_map_entry *entry; 1684 1685 mtx_lock(&irq_map_lock); 1686 if ((res_id >= irq_map_count) || (irq_map[res_id] == NULL)) 1687 panic("Attempt to unmap invalid resource id: %u\n", res_id); 1688 entry = irq_map[res_id]; 1689 irq_map[res_id] = NULL; 1690 irq_map_first_free_idx = res_id; 1691 mtx_unlock(&irq_map_lock); 1692 intr_free_intr_map_data(entry->map_data); 1693 free(entry, M_INTRNG); 1694 } 1695 1696 /* 1697 * Clone mapping entry. 1698 */ 1699 u_int 1700 intr_map_clone_irq(u_int old_res_id) 1701 { 1702 device_t map_dev; 1703 intptr_t map_xref; 1704 struct intr_map_data *data; 1705 1706 intr_map_copy_map_data(old_res_id, &map_dev, &map_xref, &data); 1707 return (intr_map_irq(map_dev, map_xref, data)); 1708 } 1709 1710 static void 1711 intr_map_init(void *dummy __unused) 1712 { 1713 1714 mtx_init(&irq_map_lock, "intr map table", NULL, MTX_DEF); 1715 } 1716 SYSINIT(intr_map_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_map_init, NULL); 1717