1 /*- 2 * Copyright (c) 2015-2016 Svatopluk Kraus 3 * Copyright (c) 2015-2016 Michal Meloun 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 /* 32 * New-style Interrupt Framework 33 * 34 * TODO: - add support for disconnected PICs. 35 * - to support IPI (PPI) enabling on other CPUs if already started. 36 * - to complete things for removable PICs. 37 */ 38 39 #include "opt_ddb.h" 40 #include "opt_hwpmc_hooks.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/lock.h> 46 #include <sys/mutex.h> 47 #include <sys/syslog.h> 48 #include <sys/malloc.h> 49 #include <sys/proc.h> 50 #include <sys/queue.h> 51 #include <sys/bus.h> 52 #include <sys/interrupt.h> 53 #include <sys/conf.h> 54 #include <sys/cpuset.h> 55 #include <sys/rman.h> 56 #include <sys/sched.h> 57 #include <sys/smp.h> 58 #include <sys/vmmeter.h> 59 #ifdef HWPMC_HOOKS 60 #include <sys/pmckern.h> 61 #endif 62 63 #include <machine/atomic.h> 64 #include <machine/intr.h> 65 #include <machine/cpu.h> 66 #include <machine/smp.h> 67 #include <machine/stdarg.h> 68 69 #ifdef DDB 70 #include <ddb/ddb.h> 71 #endif 72 73 #include "pic_if.h" 74 #include "msi_if.h" 75 76 #define INTRNAME_LEN (2*MAXCOMLEN + 1) 77 78 #ifdef DEBUG 79 #define debugf(fmt, args...) do { printf("%s(): ", __func__); \ 80 printf(fmt,##args); } while (0) 81 #else 82 #define debugf(fmt, args...) 83 #endif 84 85 MALLOC_DECLARE(M_INTRNG); 86 MALLOC_DEFINE(M_INTRNG, "intr", "intr interrupt handling"); 87 88 /* Main interrupt handler called from assembler -> 'hidden' for C code. */ 89 void intr_irq_handler(struct trapframe *tf); 90 91 /* Root interrupt controller stuff. */ 92 device_t intr_irq_root_dev; 93 static intr_irq_filter_t *irq_root_filter; 94 static void *irq_root_arg; 95 static u_int irq_root_ipicount; 96 97 struct intr_pic_child { 98 SLIST_ENTRY(intr_pic_child) pc_next; 99 struct intr_pic *pc_pic; 100 intr_child_irq_filter_t *pc_filter; 101 void *pc_filter_arg; 102 uintptr_t pc_start; 103 uintptr_t pc_length; 104 }; 105 106 /* Interrupt controller definition. */ 107 struct intr_pic { 108 SLIST_ENTRY(intr_pic) pic_next; 109 intptr_t pic_xref; /* hardware identification */ 110 device_t pic_dev; 111 /* Only one of FLAG_PIC or FLAG_MSI may be set */ 112 #define FLAG_PIC (1 << 0) 113 #define FLAG_MSI (1 << 1) 114 #define FLAG_TYPE_MASK (FLAG_PIC | FLAG_MSI) 115 u_int pic_flags; 116 struct mtx pic_child_lock; 117 SLIST_HEAD(, intr_pic_child) pic_children; 118 }; 119 120 static struct mtx pic_list_lock; 121 static SLIST_HEAD(, intr_pic) pic_list; 122 123 static struct intr_pic *pic_lookup(device_t dev, intptr_t xref, int flags); 124 125 /* Interrupt source definition. */ 126 static struct mtx isrc_table_lock; 127 static struct intr_irqsrc *irq_sources[NIRQ]; 128 u_int irq_next_free; 129 130 #ifdef SMP 131 static boolean_t irq_assign_cpu = FALSE; 132 #endif 133 134 /* 135 * - 2 counters for each I/O interrupt. 136 * - MAXCPU counters for each IPI counters for SMP. 137 */ 138 #ifdef SMP 139 #define INTRCNT_COUNT (NIRQ * 2 + INTR_IPI_COUNT * MAXCPU) 140 #else 141 #define INTRCNT_COUNT (NIRQ * 2) 142 #endif 143 144 /* Data for MI statistics reporting. */ 145 u_long intrcnt[INTRCNT_COUNT]; 146 char intrnames[INTRCNT_COUNT * INTRNAME_LEN]; 147 size_t sintrcnt = sizeof(intrcnt); 148 size_t sintrnames = sizeof(intrnames); 149 static u_int intrcnt_index; 150 151 static struct intr_irqsrc *intr_map_get_isrc(u_int res_id); 152 static void intr_map_set_isrc(u_int res_id, struct intr_irqsrc *isrc); 153 static struct intr_map_data * intr_map_get_map_data(u_int res_id); 154 static void intr_map_copy_map_data(u_int res_id, device_t *dev, intptr_t *xref, 155 struct intr_map_data **data); 156 157 /* 158 * Interrupt framework initialization routine. 159 */ 160 static void 161 intr_irq_init(void *dummy __unused) 162 { 163 164 SLIST_INIT(&pic_list); 165 mtx_init(&pic_list_lock, "intr pic list", NULL, MTX_DEF); 166 167 mtx_init(&isrc_table_lock, "intr isrc table", NULL, MTX_DEF); 168 } 169 SYSINIT(intr_irq_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_irq_init, NULL); 170 171 static void 172 intrcnt_setname(const char *name, int index) 173 { 174 175 snprintf(intrnames + INTRNAME_LEN * index, INTRNAME_LEN, "%-*s", 176 INTRNAME_LEN - 1, name); 177 } 178 179 /* 180 * Update name for interrupt source with interrupt event. 181 */ 182 static void 183 intrcnt_updatename(struct intr_irqsrc *isrc) 184 { 185 186 /* QQQ: What about stray counter name? */ 187 mtx_assert(&isrc_table_lock, MA_OWNED); 188 intrcnt_setname(isrc->isrc_event->ie_fullname, isrc->isrc_index); 189 } 190 191 /* 192 * Virtualization for interrupt source interrupt counter increment. 193 */ 194 static inline void 195 isrc_increment_count(struct intr_irqsrc *isrc) 196 { 197 198 if (isrc->isrc_flags & INTR_ISRCF_PPI) 199 atomic_add_long(&isrc->isrc_count[0], 1); 200 else 201 isrc->isrc_count[0]++; 202 } 203 204 /* 205 * Virtualization for interrupt source interrupt stray counter increment. 206 */ 207 static inline void 208 isrc_increment_straycount(struct intr_irqsrc *isrc) 209 { 210 211 isrc->isrc_count[1]++; 212 } 213 214 /* 215 * Virtualization for interrupt source interrupt name update. 216 */ 217 static void 218 isrc_update_name(struct intr_irqsrc *isrc, const char *name) 219 { 220 char str[INTRNAME_LEN]; 221 222 mtx_assert(&isrc_table_lock, MA_OWNED); 223 224 if (name != NULL) { 225 snprintf(str, INTRNAME_LEN, "%s: %s", isrc->isrc_name, name); 226 intrcnt_setname(str, isrc->isrc_index); 227 snprintf(str, INTRNAME_LEN, "stray %s: %s", isrc->isrc_name, 228 name); 229 intrcnt_setname(str, isrc->isrc_index + 1); 230 } else { 231 snprintf(str, INTRNAME_LEN, "%s:", isrc->isrc_name); 232 intrcnt_setname(str, isrc->isrc_index); 233 snprintf(str, INTRNAME_LEN, "stray %s:", isrc->isrc_name); 234 intrcnt_setname(str, isrc->isrc_index + 1); 235 } 236 } 237 238 /* 239 * Virtualization for interrupt source interrupt counters setup. 240 */ 241 static void 242 isrc_setup_counters(struct intr_irqsrc *isrc) 243 { 244 u_int index; 245 246 /* 247 * XXX - it does not work well with removable controllers and 248 * interrupt sources !!! 249 */ 250 index = atomic_fetchadd_int(&intrcnt_index, 2); 251 isrc->isrc_index = index; 252 isrc->isrc_count = &intrcnt[index]; 253 isrc_update_name(isrc, NULL); 254 } 255 256 /* 257 * Virtualization for interrupt source interrupt counters release. 258 */ 259 static void 260 isrc_release_counters(struct intr_irqsrc *isrc) 261 { 262 263 panic("%s: not implemented", __func__); 264 } 265 266 #ifdef SMP 267 /* 268 * Virtualization for interrupt source IPI counters setup. 269 */ 270 u_long * 271 intr_ipi_setup_counters(const char *name) 272 { 273 u_int index, i; 274 char str[INTRNAME_LEN]; 275 276 index = atomic_fetchadd_int(&intrcnt_index, MAXCPU); 277 for (i = 0; i < MAXCPU; i++) { 278 snprintf(str, INTRNAME_LEN, "cpu%d:%s", i, name); 279 intrcnt_setname(str, index + i); 280 } 281 return (&intrcnt[index]); 282 } 283 #endif 284 285 /* 286 * Main interrupt dispatch handler. It's called straight 287 * from the assembler, where CPU interrupt is served. 288 */ 289 void 290 intr_irq_handler(struct trapframe *tf) 291 { 292 struct trapframe * oldframe; 293 struct thread * td; 294 295 KASSERT(irq_root_filter != NULL, ("%s: no filter", __func__)); 296 297 VM_CNT_INC(v_intr); 298 critical_enter(); 299 td = curthread; 300 oldframe = td->td_intr_frame; 301 td->td_intr_frame = tf; 302 irq_root_filter(irq_root_arg); 303 td->td_intr_frame = oldframe; 304 critical_exit(); 305 #ifdef HWPMC_HOOKS 306 if (pmc_hook && TRAPF_USERMODE(tf) && 307 (PCPU_GET(curthread)->td_pflags & TDP_CALLCHAIN)) 308 pmc_hook(PCPU_GET(curthread), PMC_FN_USER_CALLCHAIN, tf); 309 #endif 310 } 311 312 int 313 intr_child_irq_handler(struct intr_pic *parent, uintptr_t irq) 314 { 315 struct intr_pic_child *child; 316 bool found; 317 318 found = false; 319 mtx_lock_spin(&parent->pic_child_lock); 320 SLIST_FOREACH(child, &parent->pic_children, pc_next) { 321 if (child->pc_start <= irq && 322 irq < (child->pc_start + child->pc_length)) { 323 found = true; 324 break; 325 } 326 } 327 mtx_unlock_spin(&parent->pic_child_lock); 328 329 if (found) 330 return (child->pc_filter(child->pc_filter_arg, irq)); 331 332 return (FILTER_STRAY); 333 } 334 335 /* 336 * interrupt controller dispatch function for interrupts. It should 337 * be called straight from the interrupt controller, when associated interrupt 338 * source is learned. 339 */ 340 int 341 intr_isrc_dispatch(struct intr_irqsrc *isrc, struct trapframe *tf) 342 { 343 344 KASSERT(isrc != NULL, ("%s: no source", __func__)); 345 346 isrc_increment_count(isrc); 347 348 #ifdef INTR_SOLO 349 if (isrc->isrc_filter != NULL) { 350 int error; 351 error = isrc->isrc_filter(isrc->isrc_arg, tf); 352 PIC_POST_FILTER(isrc->isrc_dev, isrc); 353 if (error == FILTER_HANDLED) 354 return (0); 355 } else 356 #endif 357 if (isrc->isrc_event != NULL) { 358 if (intr_event_handle(isrc->isrc_event, tf) == 0) 359 return (0); 360 } 361 362 isrc_increment_straycount(isrc); 363 return (EINVAL); 364 } 365 366 /* 367 * Alloc unique interrupt number (resource handle) for interrupt source. 368 * 369 * There could be various strategies how to allocate free interrupt number 370 * (resource handle) for new interrupt source. 371 * 372 * 1. Handles are always allocated forward, so handles are not recycled 373 * immediately. However, if only one free handle left which is reused 374 * constantly... 375 */ 376 static inline int 377 isrc_alloc_irq(struct intr_irqsrc *isrc) 378 { 379 u_int maxirqs, irq; 380 381 mtx_assert(&isrc_table_lock, MA_OWNED); 382 383 maxirqs = nitems(irq_sources); 384 if (irq_next_free >= maxirqs) 385 return (ENOSPC); 386 387 for (irq = irq_next_free; irq < maxirqs; irq++) { 388 if (irq_sources[irq] == NULL) 389 goto found; 390 } 391 for (irq = 0; irq < irq_next_free; irq++) { 392 if (irq_sources[irq] == NULL) 393 goto found; 394 } 395 396 irq_next_free = maxirqs; 397 return (ENOSPC); 398 399 found: 400 isrc->isrc_irq = irq; 401 irq_sources[irq] = isrc; 402 403 irq_next_free = irq + 1; 404 if (irq_next_free >= maxirqs) 405 irq_next_free = 0; 406 return (0); 407 } 408 409 /* 410 * Free unique interrupt number (resource handle) from interrupt source. 411 */ 412 static inline int 413 isrc_free_irq(struct intr_irqsrc *isrc) 414 { 415 416 mtx_assert(&isrc_table_lock, MA_OWNED); 417 418 if (isrc->isrc_irq >= nitems(irq_sources)) 419 return (EINVAL); 420 if (irq_sources[isrc->isrc_irq] != isrc) 421 return (EINVAL); 422 423 irq_sources[isrc->isrc_irq] = NULL; 424 isrc->isrc_irq = INTR_IRQ_INVALID; /* just to be safe */ 425 return (0); 426 } 427 428 /* 429 * Initialize interrupt source and register it into global interrupt table. 430 */ 431 int 432 intr_isrc_register(struct intr_irqsrc *isrc, device_t dev, u_int flags, 433 const char *fmt, ...) 434 { 435 int error; 436 va_list ap; 437 438 bzero(isrc, sizeof(struct intr_irqsrc)); 439 isrc->isrc_dev = dev; 440 isrc->isrc_irq = INTR_IRQ_INVALID; /* just to be safe */ 441 isrc->isrc_flags = flags; 442 443 va_start(ap, fmt); 444 vsnprintf(isrc->isrc_name, INTR_ISRC_NAMELEN, fmt, ap); 445 va_end(ap); 446 447 mtx_lock(&isrc_table_lock); 448 error = isrc_alloc_irq(isrc); 449 if (error != 0) { 450 mtx_unlock(&isrc_table_lock); 451 return (error); 452 } 453 /* 454 * Setup interrupt counters, but not for IPI sources. Those are setup 455 * later and only for used ones (up to INTR_IPI_COUNT) to not exhaust 456 * our counter pool. 457 */ 458 if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0) 459 isrc_setup_counters(isrc); 460 mtx_unlock(&isrc_table_lock); 461 return (0); 462 } 463 464 /* 465 * Deregister interrupt source from global interrupt table. 466 */ 467 int 468 intr_isrc_deregister(struct intr_irqsrc *isrc) 469 { 470 int error; 471 472 mtx_lock(&isrc_table_lock); 473 if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0) 474 isrc_release_counters(isrc); 475 error = isrc_free_irq(isrc); 476 mtx_unlock(&isrc_table_lock); 477 return (error); 478 } 479 480 #ifdef SMP 481 /* 482 * A support function for a PIC to decide if provided ISRC should be inited 483 * on given cpu. The logic of INTR_ISRCF_BOUND flag and isrc_cpu member of 484 * struct intr_irqsrc is the following: 485 * 486 * If INTR_ISRCF_BOUND is set, the ISRC should be inited only on cpus 487 * set in isrc_cpu. If not, the ISRC should be inited on every cpu and 488 * isrc_cpu is kept consistent with it. Thus isrc_cpu is always correct. 489 */ 490 bool 491 intr_isrc_init_on_cpu(struct intr_irqsrc *isrc, u_int cpu) 492 { 493 494 if (isrc->isrc_handlers == 0) 495 return (false); 496 if ((isrc->isrc_flags & (INTR_ISRCF_PPI | INTR_ISRCF_IPI)) == 0) 497 return (false); 498 if (isrc->isrc_flags & INTR_ISRCF_BOUND) 499 return (CPU_ISSET(cpu, &isrc->isrc_cpu)); 500 501 CPU_SET(cpu, &isrc->isrc_cpu); 502 return (true); 503 } 504 #endif 505 506 #ifdef INTR_SOLO 507 /* 508 * Setup filter into interrupt source. 509 */ 510 static int 511 iscr_setup_filter(struct intr_irqsrc *isrc, const char *name, 512 intr_irq_filter_t *filter, void *arg, void **cookiep) 513 { 514 515 if (filter == NULL) 516 return (EINVAL); 517 518 mtx_lock(&isrc_table_lock); 519 /* 520 * Make sure that we do not mix the two ways 521 * how we handle interrupt sources. 522 */ 523 if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) { 524 mtx_unlock(&isrc_table_lock); 525 return (EBUSY); 526 } 527 isrc->isrc_filter = filter; 528 isrc->isrc_arg = arg; 529 isrc_update_name(isrc, name); 530 mtx_unlock(&isrc_table_lock); 531 532 *cookiep = isrc; 533 return (0); 534 } 535 #endif 536 537 /* 538 * Interrupt source pre_ithread method for MI interrupt framework. 539 */ 540 static void 541 intr_isrc_pre_ithread(void *arg) 542 { 543 struct intr_irqsrc *isrc = arg; 544 545 PIC_PRE_ITHREAD(isrc->isrc_dev, isrc); 546 } 547 548 /* 549 * Interrupt source post_ithread method for MI interrupt framework. 550 */ 551 static void 552 intr_isrc_post_ithread(void *arg) 553 { 554 struct intr_irqsrc *isrc = arg; 555 556 PIC_POST_ITHREAD(isrc->isrc_dev, isrc); 557 } 558 559 /* 560 * Interrupt source post_filter method for MI interrupt framework. 561 */ 562 static void 563 intr_isrc_post_filter(void *arg) 564 { 565 struct intr_irqsrc *isrc = arg; 566 567 PIC_POST_FILTER(isrc->isrc_dev, isrc); 568 } 569 570 /* 571 * Interrupt source assign_cpu method for MI interrupt framework. 572 */ 573 static int 574 intr_isrc_assign_cpu(void *arg, int cpu) 575 { 576 #ifdef SMP 577 struct intr_irqsrc *isrc = arg; 578 int error; 579 580 if (isrc->isrc_dev != intr_irq_root_dev) 581 return (EINVAL); 582 583 mtx_lock(&isrc_table_lock); 584 if (cpu == NOCPU) { 585 CPU_ZERO(&isrc->isrc_cpu); 586 isrc->isrc_flags &= ~INTR_ISRCF_BOUND; 587 } else { 588 CPU_SETOF(cpu, &isrc->isrc_cpu); 589 isrc->isrc_flags |= INTR_ISRCF_BOUND; 590 } 591 592 /* 593 * In NOCPU case, it's up to PIC to either leave ISRC on same CPU or 594 * re-balance it to another CPU or enable it on more CPUs. However, 595 * PIC is expected to change isrc_cpu appropriately to keep us well 596 * informed if the call is successful. 597 */ 598 if (irq_assign_cpu) { 599 error = PIC_BIND_INTR(isrc->isrc_dev, isrc); 600 if (error) { 601 CPU_ZERO(&isrc->isrc_cpu); 602 mtx_unlock(&isrc_table_lock); 603 return (error); 604 } 605 } 606 mtx_unlock(&isrc_table_lock); 607 return (0); 608 #else 609 return (EOPNOTSUPP); 610 #endif 611 } 612 613 /* 614 * Create interrupt event for interrupt source. 615 */ 616 static int 617 isrc_event_create(struct intr_irqsrc *isrc) 618 { 619 struct intr_event *ie; 620 int error; 621 622 error = intr_event_create(&ie, isrc, 0, isrc->isrc_irq, 623 intr_isrc_pre_ithread, intr_isrc_post_ithread, intr_isrc_post_filter, 624 intr_isrc_assign_cpu, "%s:", isrc->isrc_name); 625 if (error) 626 return (error); 627 628 mtx_lock(&isrc_table_lock); 629 /* 630 * Make sure that we do not mix the two ways 631 * how we handle interrupt sources. Let contested event wins. 632 */ 633 #ifdef INTR_SOLO 634 if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) { 635 #else 636 if (isrc->isrc_event != NULL) { 637 #endif 638 mtx_unlock(&isrc_table_lock); 639 intr_event_destroy(ie); 640 return (isrc->isrc_event != NULL ? EBUSY : 0); 641 } 642 isrc->isrc_event = ie; 643 mtx_unlock(&isrc_table_lock); 644 645 return (0); 646 } 647 #ifdef notyet 648 /* 649 * Destroy interrupt event for interrupt source. 650 */ 651 static void 652 isrc_event_destroy(struct intr_irqsrc *isrc) 653 { 654 struct intr_event *ie; 655 656 mtx_lock(&isrc_table_lock); 657 ie = isrc->isrc_event; 658 isrc->isrc_event = NULL; 659 mtx_unlock(&isrc_table_lock); 660 661 if (ie != NULL) 662 intr_event_destroy(ie); 663 } 664 #endif 665 /* 666 * Add handler to interrupt source. 667 */ 668 static int 669 isrc_add_handler(struct intr_irqsrc *isrc, const char *name, 670 driver_filter_t filter, driver_intr_t handler, void *arg, 671 enum intr_type flags, void **cookiep) 672 { 673 int error; 674 675 if (isrc->isrc_event == NULL) { 676 error = isrc_event_create(isrc); 677 if (error) 678 return (error); 679 } 680 681 error = intr_event_add_handler(isrc->isrc_event, name, filter, handler, 682 arg, intr_priority(flags), flags, cookiep); 683 if (error == 0) { 684 mtx_lock(&isrc_table_lock); 685 intrcnt_updatename(isrc); 686 mtx_unlock(&isrc_table_lock); 687 } 688 689 return (error); 690 } 691 692 /* 693 * Lookup interrupt controller locked. 694 */ 695 static inline struct intr_pic * 696 pic_lookup_locked(device_t dev, intptr_t xref, int flags) 697 { 698 struct intr_pic *pic; 699 700 mtx_assert(&pic_list_lock, MA_OWNED); 701 702 if (dev == NULL && xref == 0) 703 return (NULL); 704 705 /* Note that pic->pic_dev is never NULL on registered PIC. */ 706 SLIST_FOREACH(pic, &pic_list, pic_next) { 707 if ((pic->pic_flags & FLAG_TYPE_MASK) != 708 (flags & FLAG_TYPE_MASK)) 709 continue; 710 711 if (dev == NULL) { 712 if (xref == pic->pic_xref) 713 return (pic); 714 } else if (xref == 0 || pic->pic_xref == 0) { 715 if (dev == pic->pic_dev) 716 return (pic); 717 } else if (xref == pic->pic_xref && dev == pic->pic_dev) 718 return (pic); 719 } 720 return (NULL); 721 } 722 723 /* 724 * Lookup interrupt controller. 725 */ 726 static struct intr_pic * 727 pic_lookup(device_t dev, intptr_t xref, int flags) 728 { 729 struct intr_pic *pic; 730 731 mtx_lock(&pic_list_lock); 732 pic = pic_lookup_locked(dev, xref, flags); 733 mtx_unlock(&pic_list_lock); 734 return (pic); 735 } 736 737 /* 738 * Create interrupt controller. 739 */ 740 static struct intr_pic * 741 pic_create(device_t dev, intptr_t xref, int flags) 742 { 743 struct intr_pic *pic; 744 745 mtx_lock(&pic_list_lock); 746 pic = pic_lookup_locked(dev, xref, flags); 747 if (pic != NULL) { 748 mtx_unlock(&pic_list_lock); 749 return (pic); 750 } 751 pic = malloc(sizeof(*pic), M_INTRNG, M_NOWAIT | M_ZERO); 752 if (pic == NULL) { 753 mtx_unlock(&pic_list_lock); 754 return (NULL); 755 } 756 pic->pic_xref = xref; 757 pic->pic_dev = dev; 758 pic->pic_flags = flags; 759 mtx_init(&pic->pic_child_lock, "pic child lock", NULL, MTX_SPIN); 760 SLIST_INSERT_HEAD(&pic_list, pic, pic_next); 761 mtx_unlock(&pic_list_lock); 762 763 return (pic); 764 } 765 #ifdef notyet 766 /* 767 * Destroy interrupt controller. 768 */ 769 static void 770 pic_destroy(device_t dev, intptr_t xref, int flags) 771 { 772 struct intr_pic *pic; 773 774 mtx_lock(&pic_list_lock); 775 pic = pic_lookup_locked(dev, xref, flags); 776 if (pic == NULL) { 777 mtx_unlock(&pic_list_lock); 778 return; 779 } 780 SLIST_REMOVE(&pic_list, pic, intr_pic, pic_next); 781 mtx_unlock(&pic_list_lock); 782 783 free(pic, M_INTRNG); 784 } 785 #endif 786 /* 787 * Register interrupt controller. 788 */ 789 struct intr_pic * 790 intr_pic_register(device_t dev, intptr_t xref) 791 { 792 struct intr_pic *pic; 793 794 if (dev == NULL) 795 return (NULL); 796 pic = pic_create(dev, xref, FLAG_PIC); 797 if (pic == NULL) 798 return (NULL); 799 800 debugf("PIC %p registered for %s <dev %p, xref %x>\n", pic, 801 device_get_nameunit(dev), dev, xref); 802 return (pic); 803 } 804 805 /* 806 * Unregister interrupt controller. 807 */ 808 int 809 intr_pic_deregister(device_t dev, intptr_t xref) 810 { 811 812 panic("%s: not implemented", __func__); 813 } 814 815 /* 816 * Mark interrupt controller (itself) as a root one. 817 * 818 * Note that only an interrupt controller can really know its position 819 * in interrupt controller's tree. So root PIC must claim itself as a root. 820 * 821 * In FDT case, according to ePAPR approved version 1.1 from 08 April 2011, 822 * page 30: 823 * "The root of the interrupt tree is determined when traversal 824 * of the interrupt tree reaches an interrupt controller node without 825 * an interrupts property and thus no explicit interrupt parent." 826 */ 827 int 828 intr_pic_claim_root(device_t dev, intptr_t xref, intr_irq_filter_t *filter, 829 void *arg, u_int ipicount) 830 { 831 struct intr_pic *pic; 832 833 pic = pic_lookup(dev, xref, FLAG_PIC); 834 if (pic == NULL) { 835 device_printf(dev, "not registered\n"); 836 return (EINVAL); 837 } 838 839 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_PIC, 840 ("%s: Found a non-PIC controller: %s", __func__, 841 device_get_name(pic->pic_dev))); 842 843 if (filter == NULL) { 844 device_printf(dev, "filter missing\n"); 845 return (EINVAL); 846 } 847 848 /* 849 * Only one interrupt controllers could be on the root for now. 850 * Note that we further suppose that there is not threaded interrupt 851 * routine (handler) on the root. See intr_irq_handler(). 852 */ 853 if (intr_irq_root_dev != NULL) { 854 device_printf(dev, "another root already set\n"); 855 return (EBUSY); 856 } 857 858 intr_irq_root_dev = dev; 859 irq_root_filter = filter; 860 irq_root_arg = arg; 861 irq_root_ipicount = ipicount; 862 863 debugf("irq root set to %s\n", device_get_nameunit(dev)); 864 return (0); 865 } 866 867 /* 868 * Add a handler to manage a sub range of a parents interrupts. 869 */ 870 struct intr_pic * 871 intr_pic_add_handler(device_t parent, struct intr_pic *pic, 872 intr_child_irq_filter_t *filter, void *arg, uintptr_t start, 873 uintptr_t length) 874 { 875 struct intr_pic *parent_pic; 876 struct intr_pic_child *newchild; 877 #ifdef INVARIANTS 878 struct intr_pic_child *child; 879 #endif 880 881 /* Find the parent PIC */ 882 parent_pic = pic_lookup(parent, 0, FLAG_PIC); 883 if (parent_pic == NULL) 884 return (NULL); 885 886 newchild = malloc(sizeof(*newchild), M_INTRNG, M_WAITOK | M_ZERO); 887 newchild->pc_pic = pic; 888 newchild->pc_filter = filter; 889 newchild->pc_filter_arg = arg; 890 newchild->pc_start = start; 891 newchild->pc_length = length; 892 893 mtx_lock_spin(&parent_pic->pic_child_lock); 894 #ifdef INVARIANTS 895 SLIST_FOREACH(child, &parent_pic->pic_children, pc_next) { 896 KASSERT(child->pc_pic != pic, ("%s: Adding a child PIC twice", 897 __func__)); 898 } 899 #endif 900 SLIST_INSERT_HEAD(&parent_pic->pic_children, newchild, pc_next); 901 mtx_unlock_spin(&parent_pic->pic_child_lock); 902 903 return (pic); 904 } 905 906 static int 907 intr_resolve_irq(device_t dev, intptr_t xref, struct intr_map_data *data, 908 struct intr_irqsrc **isrc) 909 { 910 struct intr_pic *pic; 911 struct intr_map_data_msi *msi; 912 913 if (data == NULL) 914 return (EINVAL); 915 916 pic = pic_lookup(dev, xref, 917 (data->type == INTR_MAP_DATA_MSI) ? FLAG_MSI : FLAG_PIC); 918 if (pic == NULL) 919 return (ESRCH); 920 921 switch (data->type) { 922 case INTR_MAP_DATA_MSI: 923 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI, 924 ("%s: Found a non-MSI controller: %s", __func__, 925 device_get_name(pic->pic_dev))); 926 msi = (struct intr_map_data_msi *)data; 927 *isrc = msi->isrc; 928 return (0); 929 930 default: 931 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_PIC, 932 ("%s: Found a non-PIC controller: %s", __func__, 933 device_get_name(pic->pic_dev))); 934 return (PIC_MAP_INTR(pic->pic_dev, data, isrc)); 935 936 } 937 } 938 939 int 940 intr_activate_irq(device_t dev, struct resource *res) 941 { 942 device_t map_dev; 943 intptr_t map_xref; 944 struct intr_map_data *data; 945 struct intr_irqsrc *isrc; 946 u_int res_id; 947 int error; 948 949 KASSERT(rman_get_start(res) == rman_get_end(res), 950 ("%s: more interrupts in resource", __func__)); 951 952 res_id = (u_int)rman_get_start(res); 953 if (intr_map_get_isrc(res_id) != NULL) 954 panic("Attempt to double activation of resource id: %u\n", 955 res_id); 956 intr_map_copy_map_data(res_id, &map_dev, &map_xref, &data); 957 error = intr_resolve_irq(map_dev, map_xref, data, &isrc); 958 if (error != 0) { 959 free(data, M_INTRNG); 960 /* XXX TODO DISCONECTED PICs */ 961 /* if (error == EINVAL) return(0); */ 962 return (error); 963 } 964 intr_map_set_isrc(res_id, isrc); 965 rman_set_virtual(res, data); 966 return (PIC_ACTIVATE_INTR(isrc->isrc_dev, isrc, res, data)); 967 } 968 969 int 970 intr_deactivate_irq(device_t dev, struct resource *res) 971 { 972 struct intr_map_data *data; 973 struct intr_irqsrc *isrc; 974 u_int res_id; 975 int error; 976 977 KASSERT(rman_get_start(res) == rman_get_end(res), 978 ("%s: more interrupts in resource", __func__)); 979 980 res_id = (u_int)rman_get_start(res); 981 isrc = intr_map_get_isrc(res_id); 982 if (isrc == NULL) 983 panic("Attempt to deactivate non-active resource id: %u\n", 984 res_id); 985 986 data = rman_get_virtual(res); 987 error = PIC_DEACTIVATE_INTR(isrc->isrc_dev, isrc, res, data); 988 intr_map_set_isrc(res_id, NULL); 989 rman_set_virtual(res, NULL); 990 free(data, M_INTRNG); 991 return (error); 992 } 993 994 int 995 intr_setup_irq(device_t dev, struct resource *res, driver_filter_t filt, 996 driver_intr_t hand, void *arg, int flags, void **cookiep) 997 { 998 int error; 999 struct intr_map_data *data; 1000 struct intr_irqsrc *isrc; 1001 const char *name; 1002 u_int res_id; 1003 1004 KASSERT(rman_get_start(res) == rman_get_end(res), 1005 ("%s: more interrupts in resource", __func__)); 1006 1007 res_id = (u_int)rman_get_start(res); 1008 isrc = intr_map_get_isrc(res_id); 1009 if (isrc == NULL) { 1010 /* XXX TODO DISCONECTED PICs */ 1011 return (EINVAL); 1012 } 1013 1014 data = rman_get_virtual(res); 1015 name = device_get_nameunit(dev); 1016 1017 #ifdef INTR_SOLO 1018 /* 1019 * Standard handling is done through MI interrupt framework. However, 1020 * some interrupts could request solely own special handling. This 1021 * non standard handling can be used for interrupt controllers without 1022 * handler (filter only), so in case that interrupt controllers are 1023 * chained, MI interrupt framework is called only in leaf controller. 1024 * 1025 * Note that root interrupt controller routine is served as well, 1026 * however in intr_irq_handler(), i.e. main system dispatch routine. 1027 */ 1028 if (flags & INTR_SOLO && hand != NULL) { 1029 debugf("irq %u cannot solo on %s\n", irq, name); 1030 return (EINVAL); 1031 } 1032 1033 if (flags & INTR_SOLO) { 1034 error = iscr_setup_filter(isrc, name, (intr_irq_filter_t *)filt, 1035 arg, cookiep); 1036 debugf("irq %u setup filter error %d on %s\n", isrc->isrc_irq, error, 1037 name); 1038 } else 1039 #endif 1040 { 1041 error = isrc_add_handler(isrc, name, filt, hand, arg, flags, 1042 cookiep); 1043 debugf("irq %u add handler error %d on %s\n", isrc->isrc_irq, error, name); 1044 } 1045 if (error != 0) 1046 return (error); 1047 1048 mtx_lock(&isrc_table_lock); 1049 error = PIC_SETUP_INTR(isrc->isrc_dev, isrc, res, data); 1050 if (error == 0) { 1051 isrc->isrc_handlers++; 1052 if (isrc->isrc_handlers == 1) 1053 PIC_ENABLE_INTR(isrc->isrc_dev, isrc); 1054 } 1055 mtx_unlock(&isrc_table_lock); 1056 if (error != 0) 1057 intr_event_remove_handler(*cookiep); 1058 return (error); 1059 } 1060 1061 int 1062 intr_teardown_irq(device_t dev, struct resource *res, void *cookie) 1063 { 1064 int error; 1065 struct intr_map_data *data; 1066 struct intr_irqsrc *isrc; 1067 u_int res_id; 1068 1069 KASSERT(rman_get_start(res) == rman_get_end(res), 1070 ("%s: more interrupts in resource", __func__)); 1071 1072 res_id = (u_int)rman_get_start(res); 1073 isrc = intr_map_get_isrc(res_id); 1074 if (isrc == NULL || isrc->isrc_handlers == 0) 1075 return (EINVAL); 1076 1077 data = rman_get_virtual(res); 1078 1079 #ifdef INTR_SOLO 1080 if (isrc->isrc_filter != NULL) { 1081 if (isrc != cookie) 1082 return (EINVAL); 1083 1084 mtx_lock(&isrc_table_lock); 1085 isrc->isrc_filter = NULL; 1086 isrc->isrc_arg = NULL; 1087 isrc->isrc_handlers = 0; 1088 PIC_DISABLE_INTR(isrc->isrc_dev, isrc); 1089 PIC_TEARDOWN_INTR(isrc->isrc_dev, isrc, res, data); 1090 isrc_update_name(isrc, NULL); 1091 mtx_unlock(&isrc_table_lock); 1092 return (0); 1093 } 1094 #endif 1095 if (isrc != intr_handler_source(cookie)) 1096 return (EINVAL); 1097 1098 error = intr_event_remove_handler(cookie); 1099 if (error == 0) { 1100 mtx_lock(&isrc_table_lock); 1101 isrc->isrc_handlers--; 1102 if (isrc->isrc_handlers == 0) 1103 PIC_DISABLE_INTR(isrc->isrc_dev, isrc); 1104 PIC_TEARDOWN_INTR(isrc->isrc_dev, isrc, res, data); 1105 intrcnt_updatename(isrc); 1106 mtx_unlock(&isrc_table_lock); 1107 } 1108 return (error); 1109 } 1110 1111 int 1112 intr_describe_irq(device_t dev, struct resource *res, void *cookie, 1113 const char *descr) 1114 { 1115 int error; 1116 struct intr_irqsrc *isrc; 1117 u_int res_id; 1118 1119 KASSERT(rman_get_start(res) == rman_get_end(res), 1120 ("%s: more interrupts in resource", __func__)); 1121 1122 res_id = (u_int)rman_get_start(res); 1123 isrc = intr_map_get_isrc(res_id); 1124 if (isrc == NULL || isrc->isrc_handlers == 0) 1125 return (EINVAL); 1126 #ifdef INTR_SOLO 1127 if (isrc->isrc_filter != NULL) { 1128 if (isrc != cookie) 1129 return (EINVAL); 1130 1131 mtx_lock(&isrc_table_lock); 1132 isrc_update_name(isrc, descr); 1133 mtx_unlock(&isrc_table_lock); 1134 return (0); 1135 } 1136 #endif 1137 error = intr_event_describe_handler(isrc->isrc_event, cookie, descr); 1138 if (error == 0) { 1139 mtx_lock(&isrc_table_lock); 1140 intrcnt_updatename(isrc); 1141 mtx_unlock(&isrc_table_lock); 1142 } 1143 return (error); 1144 } 1145 1146 #ifdef SMP 1147 int 1148 intr_bind_irq(device_t dev, struct resource *res, int cpu) 1149 { 1150 struct intr_irqsrc *isrc; 1151 u_int res_id; 1152 1153 KASSERT(rman_get_start(res) == rman_get_end(res), 1154 ("%s: more interrupts in resource", __func__)); 1155 1156 res_id = (u_int)rman_get_start(res); 1157 isrc = intr_map_get_isrc(res_id); 1158 if (isrc == NULL || isrc->isrc_handlers == 0) 1159 return (EINVAL); 1160 #ifdef INTR_SOLO 1161 if (isrc->isrc_filter != NULL) 1162 return (intr_isrc_assign_cpu(isrc, cpu)); 1163 #endif 1164 return (intr_event_bind(isrc->isrc_event, cpu)); 1165 } 1166 1167 /* 1168 * Return the CPU that the next interrupt source should use. 1169 * For now just returns the next CPU according to round-robin. 1170 */ 1171 u_int 1172 intr_irq_next_cpu(u_int last_cpu, cpuset_t *cpumask) 1173 { 1174 u_int cpu; 1175 1176 KASSERT(!CPU_EMPTY(cpumask), ("%s: Empty CPU mask", __func__)); 1177 if (!irq_assign_cpu || mp_ncpus == 1) { 1178 cpu = PCPU_GET(cpuid); 1179 1180 if (CPU_ISSET(cpu, cpumask)) 1181 return (curcpu); 1182 1183 return (CPU_FFS(cpumask) - 1); 1184 } 1185 1186 do { 1187 last_cpu++; 1188 if (last_cpu > mp_maxid) 1189 last_cpu = 0; 1190 } while (!CPU_ISSET(last_cpu, cpumask)); 1191 return (last_cpu); 1192 } 1193 1194 /* 1195 * Distribute all the interrupt sources among the available 1196 * CPUs once the AP's have been launched. 1197 */ 1198 static void 1199 intr_irq_shuffle(void *arg __unused) 1200 { 1201 struct intr_irqsrc *isrc; 1202 u_int i; 1203 1204 if (mp_ncpus == 1) 1205 return; 1206 1207 mtx_lock(&isrc_table_lock); 1208 irq_assign_cpu = TRUE; 1209 for (i = 0; i < NIRQ; i++) { 1210 isrc = irq_sources[i]; 1211 if (isrc == NULL || isrc->isrc_handlers == 0 || 1212 isrc->isrc_flags & (INTR_ISRCF_PPI | INTR_ISRCF_IPI)) 1213 continue; 1214 1215 if (isrc->isrc_event != NULL && 1216 isrc->isrc_flags & INTR_ISRCF_BOUND && 1217 isrc->isrc_event->ie_cpu != CPU_FFS(&isrc->isrc_cpu) - 1) 1218 panic("%s: CPU inconsistency", __func__); 1219 1220 if ((isrc->isrc_flags & INTR_ISRCF_BOUND) == 0) 1221 CPU_ZERO(&isrc->isrc_cpu); /* start again */ 1222 1223 /* 1224 * We are in wicked position here if the following call fails 1225 * for bound ISRC. The best thing we can do is to clear 1226 * isrc_cpu so inconsistency with ie_cpu will be detectable. 1227 */ 1228 if (PIC_BIND_INTR(isrc->isrc_dev, isrc) != 0) 1229 CPU_ZERO(&isrc->isrc_cpu); 1230 } 1231 mtx_unlock(&isrc_table_lock); 1232 } 1233 SYSINIT(intr_irq_shuffle, SI_SUB_SMP, SI_ORDER_SECOND, intr_irq_shuffle, NULL); 1234 1235 #else 1236 u_int 1237 intr_irq_next_cpu(u_int current_cpu, cpuset_t *cpumask) 1238 { 1239 1240 return (PCPU_GET(cpuid)); 1241 } 1242 #endif 1243 1244 /* 1245 * Allocate memory for new intr_map_data structure. 1246 * Initialize common fields. 1247 */ 1248 struct intr_map_data * 1249 intr_alloc_map_data(enum intr_map_data_type type, size_t len, int flags) 1250 { 1251 struct intr_map_data *data; 1252 1253 data = malloc(len, M_INTRNG, flags); 1254 data->type = type; 1255 data->len = len; 1256 return (data); 1257 } 1258 1259 void intr_free_intr_map_data(struct intr_map_data *data) 1260 { 1261 1262 free(data, M_INTRNG); 1263 } 1264 1265 1266 /* 1267 * Register a MSI/MSI-X interrupt controller 1268 */ 1269 int 1270 intr_msi_register(device_t dev, intptr_t xref) 1271 { 1272 struct intr_pic *pic; 1273 1274 if (dev == NULL) 1275 return (EINVAL); 1276 pic = pic_create(dev, xref, FLAG_MSI); 1277 if (pic == NULL) 1278 return (ENOMEM); 1279 1280 debugf("PIC %p registered for %s <dev %p, xref %jx>\n", pic, 1281 device_get_nameunit(dev), dev, (uintmax_t)xref); 1282 return (0); 1283 } 1284 1285 int 1286 intr_alloc_msi(device_t pci, device_t child, intptr_t xref, int count, 1287 int maxcount, int *irqs) 1288 { 1289 struct intr_irqsrc **isrc; 1290 struct intr_pic *pic; 1291 device_t pdev; 1292 struct intr_map_data_msi *msi; 1293 int err, i; 1294 1295 pic = pic_lookup(NULL, xref, FLAG_MSI); 1296 if (pic == NULL) 1297 return (ESRCH); 1298 1299 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI, 1300 ("%s: Found a non-MSI controller: %s", __func__, 1301 device_get_name(pic->pic_dev))); 1302 1303 isrc = malloc(sizeof(*isrc) * count, M_INTRNG, M_WAITOK); 1304 err = MSI_ALLOC_MSI(pic->pic_dev, child, count, maxcount, &pdev, isrc); 1305 if (err != 0) { 1306 free(isrc, M_INTRNG); 1307 return (err); 1308 } 1309 1310 for (i = 0; i < count; i++) { 1311 msi = (struct intr_map_data_msi *)intr_alloc_map_data( 1312 INTR_MAP_DATA_MSI, sizeof(*msi), M_WAITOK | M_ZERO); 1313 msi-> isrc = isrc[i]; 1314 irqs[i] = intr_map_irq(pic->pic_dev, xref, 1315 (struct intr_map_data *)msi); 1316 1317 } 1318 free(isrc, M_INTRNG); 1319 1320 return (err); 1321 } 1322 1323 int 1324 intr_release_msi(device_t pci, device_t child, intptr_t xref, int count, 1325 int *irqs) 1326 { 1327 struct intr_irqsrc **isrc; 1328 struct intr_pic *pic; 1329 struct intr_map_data_msi *msi; 1330 int i, err; 1331 1332 pic = pic_lookup(NULL, xref, FLAG_MSI); 1333 if (pic == NULL) 1334 return (ESRCH); 1335 1336 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI, 1337 ("%s: Found a non-MSI controller: %s", __func__, 1338 device_get_name(pic->pic_dev))); 1339 1340 isrc = malloc(sizeof(*isrc) * count, M_INTRNG, M_WAITOK); 1341 1342 for (i = 0; i < count; i++) { 1343 msi = (struct intr_map_data_msi *) 1344 intr_map_get_map_data(irqs[i]); 1345 KASSERT(msi->hdr.type == INTR_MAP_DATA_MSI, 1346 ("%s: irq %d map data is not MSI", __func__, 1347 irqs[i])); 1348 isrc[i] = msi->isrc; 1349 } 1350 1351 err = MSI_RELEASE_MSI(pic->pic_dev, child, count, isrc); 1352 1353 for (i = 0; i < count; i++) { 1354 if (isrc[i] != NULL) 1355 intr_unmap_irq(irqs[i]); 1356 } 1357 1358 free(isrc, M_INTRNG); 1359 return (err); 1360 } 1361 1362 int 1363 intr_alloc_msix(device_t pci, device_t child, intptr_t xref, int *irq) 1364 { 1365 struct intr_irqsrc *isrc; 1366 struct intr_pic *pic; 1367 device_t pdev; 1368 struct intr_map_data_msi *msi; 1369 int err; 1370 1371 pic = pic_lookup(NULL, xref, FLAG_MSI); 1372 if (pic == NULL) 1373 return (ESRCH); 1374 1375 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI, 1376 ("%s: Found a non-MSI controller: %s", __func__, 1377 device_get_name(pic->pic_dev))); 1378 1379 1380 err = MSI_ALLOC_MSIX(pic->pic_dev, child, &pdev, &isrc); 1381 if (err != 0) 1382 return (err); 1383 1384 msi = (struct intr_map_data_msi *)intr_alloc_map_data( 1385 INTR_MAP_DATA_MSI, sizeof(*msi), M_WAITOK | M_ZERO); 1386 msi->isrc = isrc; 1387 *irq = intr_map_irq(pic->pic_dev, xref, (struct intr_map_data *)msi); 1388 return (0); 1389 } 1390 1391 int 1392 intr_release_msix(device_t pci, device_t child, intptr_t xref, int irq) 1393 { 1394 struct intr_irqsrc *isrc; 1395 struct intr_pic *pic; 1396 struct intr_map_data_msi *msi; 1397 int err; 1398 1399 pic = pic_lookup(NULL, xref, FLAG_MSI); 1400 if (pic == NULL) 1401 return (ESRCH); 1402 1403 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI, 1404 ("%s: Found a non-MSI controller: %s", __func__, 1405 device_get_name(pic->pic_dev))); 1406 1407 msi = (struct intr_map_data_msi *) 1408 intr_map_get_map_data(irq); 1409 KASSERT(msi->hdr.type == INTR_MAP_DATA_MSI, 1410 ("%s: irq %d map data is not MSI", __func__, 1411 irq)); 1412 isrc = msi->isrc; 1413 if (isrc == NULL) { 1414 intr_unmap_irq(irq); 1415 return (EINVAL); 1416 } 1417 1418 err = MSI_RELEASE_MSIX(pic->pic_dev, child, isrc); 1419 intr_unmap_irq(irq); 1420 1421 return (err); 1422 } 1423 1424 int 1425 intr_map_msi(device_t pci, device_t child, intptr_t xref, int irq, 1426 uint64_t *addr, uint32_t *data) 1427 { 1428 struct intr_irqsrc *isrc; 1429 struct intr_pic *pic; 1430 int err; 1431 1432 pic = pic_lookup(NULL, xref, FLAG_MSI); 1433 if (pic == NULL) 1434 return (ESRCH); 1435 1436 KASSERT((pic->pic_flags & FLAG_TYPE_MASK) == FLAG_MSI, 1437 ("%s: Found a non-MSI controller: %s", __func__, 1438 device_get_name(pic->pic_dev))); 1439 1440 isrc = intr_map_get_isrc(irq); 1441 if (isrc == NULL) 1442 return (EINVAL); 1443 1444 err = MSI_MAP_MSI(pic->pic_dev, child, isrc, addr, data); 1445 return (err); 1446 } 1447 1448 1449 void dosoftints(void); 1450 void 1451 dosoftints(void) 1452 { 1453 } 1454 1455 #ifdef SMP 1456 /* 1457 * Init interrupt controller on another CPU. 1458 */ 1459 void 1460 intr_pic_init_secondary(void) 1461 { 1462 1463 /* 1464 * QQQ: Only root PIC is aware of other CPUs ??? 1465 */ 1466 KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__)); 1467 1468 //mtx_lock(&isrc_table_lock); 1469 PIC_INIT_SECONDARY(intr_irq_root_dev); 1470 //mtx_unlock(&isrc_table_lock); 1471 } 1472 #endif 1473 1474 #ifdef DDB 1475 DB_SHOW_COMMAND(irqs, db_show_irqs) 1476 { 1477 u_int i, irqsum; 1478 u_long num; 1479 struct intr_irqsrc *isrc; 1480 1481 for (irqsum = 0, i = 0; i < NIRQ; i++) { 1482 isrc = irq_sources[i]; 1483 if (isrc == NULL) 1484 continue; 1485 1486 num = isrc->isrc_count != NULL ? isrc->isrc_count[0] : 0; 1487 db_printf("irq%-3u <%s>: cpu %02lx%s cnt %lu\n", i, 1488 isrc->isrc_name, isrc->isrc_cpu.__bits[0], 1489 isrc->isrc_flags & INTR_ISRCF_BOUND ? " (bound)" : "", num); 1490 irqsum += num; 1491 } 1492 db_printf("irq total %u\n", irqsum); 1493 } 1494 #endif 1495 1496 /* 1497 * Interrupt mapping table functions. 1498 * 1499 * Please, keep this part separately, it can be transformed to 1500 * extension of standard resources. 1501 */ 1502 struct intr_map_entry 1503 { 1504 device_t dev; 1505 intptr_t xref; 1506 struct intr_map_data *map_data; 1507 struct intr_irqsrc *isrc; 1508 /* XXX TODO DISCONECTED PICs */ 1509 /*int flags */ 1510 }; 1511 1512 /* XXX Convert irq_map[] to dynamicaly expandable one. */ 1513 static struct intr_map_entry *irq_map[2 * NIRQ]; 1514 static int irq_map_count = nitems(irq_map); 1515 static int irq_map_first_free_idx; 1516 static struct mtx irq_map_lock; 1517 1518 static struct intr_irqsrc * 1519 intr_map_get_isrc(u_int res_id) 1520 { 1521 struct intr_irqsrc *isrc; 1522 1523 mtx_lock(&irq_map_lock); 1524 if ((res_id >= irq_map_count) || (irq_map[res_id] == NULL)) { 1525 mtx_unlock(&irq_map_lock); 1526 return (NULL); 1527 } 1528 isrc = irq_map[res_id]->isrc; 1529 mtx_unlock(&irq_map_lock); 1530 return (isrc); 1531 } 1532 1533 static void 1534 intr_map_set_isrc(u_int res_id, struct intr_irqsrc *isrc) 1535 { 1536 1537 mtx_lock(&irq_map_lock); 1538 if ((res_id >= irq_map_count) || (irq_map[res_id] == NULL)) { 1539 mtx_unlock(&irq_map_lock); 1540 return; 1541 } 1542 irq_map[res_id]->isrc = isrc; 1543 mtx_unlock(&irq_map_lock); 1544 } 1545 1546 /* 1547 * Get a copy of intr_map_entry data 1548 */ 1549 static struct intr_map_data * 1550 intr_map_get_map_data(u_int res_id) 1551 { 1552 struct intr_map_data *data; 1553 1554 data = NULL; 1555 mtx_lock(&irq_map_lock); 1556 if (res_id >= irq_map_count || irq_map[res_id] == NULL) 1557 panic("Attempt to copy invalid resource id: %u\n", res_id); 1558 data = irq_map[res_id]->map_data; 1559 mtx_unlock(&irq_map_lock); 1560 1561 return (data); 1562 } 1563 1564 /* 1565 * Get a copy of intr_map_entry data 1566 */ 1567 static void 1568 intr_map_copy_map_data(u_int res_id, device_t *map_dev, intptr_t *map_xref, 1569 struct intr_map_data **data) 1570 { 1571 size_t len; 1572 1573 len = 0; 1574 mtx_lock(&irq_map_lock); 1575 if (res_id >= irq_map_count || irq_map[res_id] == NULL) 1576 panic("Attempt to copy invalid resource id: %u\n", res_id); 1577 if (irq_map[res_id]->map_data != NULL) 1578 len = irq_map[res_id]->map_data->len; 1579 mtx_unlock(&irq_map_lock); 1580 1581 if (len == 0) 1582 *data = NULL; 1583 else 1584 *data = malloc(len, M_INTRNG, M_WAITOK | M_ZERO); 1585 mtx_lock(&irq_map_lock); 1586 if (irq_map[res_id] == NULL) 1587 panic("Attempt to copy invalid resource id: %u\n", res_id); 1588 if (len != 0) { 1589 if (len != irq_map[res_id]->map_data->len) 1590 panic("Resource id: %u has changed.\n", res_id); 1591 memcpy(*data, irq_map[res_id]->map_data, len); 1592 } 1593 *map_dev = irq_map[res_id]->dev; 1594 *map_xref = irq_map[res_id]->xref; 1595 mtx_unlock(&irq_map_lock); 1596 } 1597 1598 1599 /* 1600 * Allocate and fill new entry in irq_map table. 1601 */ 1602 u_int 1603 intr_map_irq(device_t dev, intptr_t xref, struct intr_map_data *data) 1604 { 1605 u_int i; 1606 struct intr_map_entry *entry; 1607 1608 /* Prepare new entry first. */ 1609 entry = malloc(sizeof(*entry), M_INTRNG, M_WAITOK | M_ZERO); 1610 1611 entry->dev = dev; 1612 entry->xref = xref; 1613 entry->map_data = data; 1614 entry->isrc = NULL; 1615 1616 mtx_lock(&irq_map_lock); 1617 for (i = irq_map_first_free_idx; i < irq_map_count; i++) { 1618 if (irq_map[i] == NULL) { 1619 irq_map[i] = entry; 1620 irq_map_first_free_idx = i + 1; 1621 mtx_unlock(&irq_map_lock); 1622 return (i); 1623 } 1624 } 1625 mtx_unlock(&irq_map_lock); 1626 1627 /* XXX Expand irq_map table */ 1628 panic("IRQ mapping table is full."); 1629 } 1630 1631 /* 1632 * Remove and free mapping entry. 1633 */ 1634 void 1635 intr_unmap_irq(u_int res_id) 1636 { 1637 struct intr_map_entry *entry; 1638 1639 mtx_lock(&irq_map_lock); 1640 if ((res_id >= irq_map_count) || (irq_map[res_id] == NULL)) 1641 panic("Attempt to unmap invalid resource id: %u\n", res_id); 1642 entry = irq_map[res_id]; 1643 irq_map[res_id] = NULL; 1644 irq_map_first_free_idx = res_id; 1645 mtx_unlock(&irq_map_lock); 1646 intr_free_intr_map_data(entry->map_data); 1647 free(entry, M_INTRNG); 1648 } 1649 1650 /* 1651 * Clone mapping entry. 1652 */ 1653 u_int 1654 intr_map_clone_irq(u_int old_res_id) 1655 { 1656 device_t map_dev; 1657 intptr_t map_xref; 1658 struct intr_map_data *data; 1659 1660 intr_map_copy_map_data(old_res_id, &map_dev, &map_xref, &data); 1661 return (intr_map_irq(map_dev, map_xref, data)); 1662 } 1663 1664 static void 1665 intr_map_init(void *dummy __unused) 1666 { 1667 1668 mtx_init(&irq_map_lock, "intr map table", NULL, MTX_DEF); 1669 } 1670 SYSINIT(intr_map_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_map_init, NULL); 1671