1 /*- 2 * Copyright (c) 2015-2016 Svatopluk Kraus 3 * Copyright (c) 2015-2016 Michal Meloun 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 #include <sys/cdefs.h> 29 __FBSDID("$FreeBSD$"); 30 31 /* 32 * New-style Interrupt Framework 33 * 34 * TODO: - to support IPI (PPI) enabling on other CPUs if already started 35 * - to complete things for removable PICs 36 */ 37 38 #include "opt_acpi.h" 39 #include "opt_ddb.h" 40 #include "opt_platform.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/kernel.h> 45 #include <sys/syslog.h> 46 #include <sys/malloc.h> 47 #include <sys/proc.h> 48 #include <sys/queue.h> 49 #include <sys/bus.h> 50 #include <sys/interrupt.h> 51 #include <sys/conf.h> 52 #include <sys/cpuset.h> 53 #include <sys/rman.h> 54 #include <sys/sched.h> 55 #include <sys/smp.h> 56 #include <machine/atomic.h> 57 #include <machine/intr.h> 58 #include <machine/cpu.h> 59 #include <machine/smp.h> 60 #include <machine/stdarg.h> 61 62 #ifdef FDT 63 #include <dev/ofw/openfirm.h> 64 #include <dev/ofw/ofw_bus.h> 65 #include <dev/ofw/ofw_bus_subr.h> 66 #endif 67 68 #ifdef DDB 69 #include <ddb/ddb.h> 70 #endif 71 72 #include "pic_if.h" 73 74 #define INTRNAME_LEN (2*MAXCOMLEN + 1) 75 76 #ifdef DEBUG 77 #define debugf(fmt, args...) do { printf("%s(): ", __func__); \ 78 printf(fmt,##args); } while (0) 79 #else 80 #define debugf(fmt, args...) 81 #endif 82 83 MALLOC_DECLARE(M_INTRNG); 84 MALLOC_DEFINE(M_INTRNG, "intr", "intr interrupt handling"); 85 86 /* Main interrupt handler called from assembler -> 'hidden' for C code. */ 87 void intr_irq_handler(struct trapframe *tf); 88 89 /* Root interrupt controller stuff. */ 90 device_t intr_irq_root_dev; 91 static intr_irq_filter_t *irq_root_filter; 92 static void *irq_root_arg; 93 static u_int irq_root_ipicount; 94 95 /* Interrupt controller definition. */ 96 struct intr_pic { 97 SLIST_ENTRY(intr_pic) pic_next; 98 intptr_t pic_xref; /* hardware identification */ 99 device_t pic_dev; 100 }; 101 102 static struct mtx pic_list_lock; 103 static SLIST_HEAD(, intr_pic) pic_list; 104 105 static struct intr_pic *pic_lookup(device_t dev, intptr_t xref); 106 107 /* Interrupt source definition. */ 108 static struct mtx isrc_table_lock; 109 static struct intr_irqsrc *irq_sources[NIRQ]; 110 u_int irq_next_free; 111 112 #define IRQ_INVALID nitems(irq_sources) 113 114 /* 115 * XXX - All stuff around struct intr_dev_data is considered as temporary 116 * until better place for storing struct intr_map_data will be find. 117 * 118 * For now, there are two global interrupt numbers spaces: 119 * <0, NIRQ) ... interrupts without config data 120 * managed in irq_sources[] 121 * IRQ_DDATA_BASE + <0, 2 * NIRQ) ... interrupts with config data 122 * managed in intr_ddata_tab[] 123 * 124 * Read intr_ddata_lookup() to see how these spaces are worked with. 125 * Note that each interrupt number from second space duplicates some number 126 * from first space at this moment. An interrupt number from first space can 127 * be duplicated even multiple times in second space. 128 */ 129 struct intr_dev_data { 130 device_t idd_dev; 131 intptr_t idd_xref; 132 u_int idd_irq; 133 struct intr_map_data idd_data; 134 struct intr_irqsrc * idd_isrc; 135 }; 136 137 static struct intr_dev_data *intr_ddata_tab[2 * NIRQ]; 138 static u_int intr_ddata_first_unused; 139 140 #define IRQ_DDATA_BASE 10000 141 CTASSERT(IRQ_DDATA_BASE > IRQ_INVALID); 142 143 #ifdef SMP 144 static boolean_t irq_assign_cpu = FALSE; 145 #endif 146 147 /* 148 * - 2 counters for each I/O interrupt. 149 * - MAXCPU counters for each IPI counters for SMP. 150 */ 151 #ifdef SMP 152 #define INTRCNT_COUNT (NIRQ * 2 + INTR_IPI_COUNT * MAXCPU) 153 #else 154 #define INTRCNT_COUNT (NIRQ * 2) 155 #endif 156 157 /* Data for MI statistics reporting. */ 158 u_long intrcnt[INTRCNT_COUNT]; 159 char intrnames[INTRCNT_COUNT * INTRNAME_LEN]; 160 size_t sintrcnt = sizeof(intrcnt); 161 size_t sintrnames = sizeof(intrnames); 162 static u_int intrcnt_index; 163 164 /* 165 * Interrupt framework initialization routine. 166 */ 167 static void 168 intr_irq_init(void *dummy __unused) 169 { 170 171 SLIST_INIT(&pic_list); 172 mtx_init(&pic_list_lock, "intr pic list", NULL, MTX_DEF); 173 mtx_init(&isrc_table_lock, "intr isrc table", NULL, MTX_DEF); 174 } 175 SYSINIT(intr_irq_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_irq_init, NULL); 176 177 static void 178 intrcnt_setname(const char *name, int index) 179 { 180 181 snprintf(intrnames + INTRNAME_LEN * index, INTRNAME_LEN, "%-*s", 182 INTRNAME_LEN - 1, name); 183 } 184 185 /* 186 * Update name for interrupt source with interrupt event. 187 */ 188 static void 189 intrcnt_updatename(struct intr_irqsrc *isrc) 190 { 191 192 /* QQQ: What about stray counter name? */ 193 mtx_assert(&isrc_table_lock, MA_OWNED); 194 intrcnt_setname(isrc->isrc_event->ie_fullname, isrc->isrc_index); 195 } 196 197 /* 198 * Virtualization for interrupt source interrupt counter increment. 199 */ 200 static inline void 201 isrc_increment_count(struct intr_irqsrc *isrc) 202 { 203 204 if (isrc->isrc_flags & INTR_ISRCF_PPI) 205 atomic_add_long(&isrc->isrc_count[0], 1); 206 else 207 isrc->isrc_count[0]++; 208 } 209 210 /* 211 * Virtualization for interrupt source interrupt stray counter increment. 212 */ 213 static inline void 214 isrc_increment_straycount(struct intr_irqsrc *isrc) 215 { 216 217 isrc->isrc_count[1]++; 218 } 219 220 /* 221 * Virtualization for interrupt source interrupt name update. 222 */ 223 static void 224 isrc_update_name(struct intr_irqsrc *isrc, const char *name) 225 { 226 char str[INTRNAME_LEN]; 227 228 mtx_assert(&isrc_table_lock, MA_OWNED); 229 230 if (name != NULL) { 231 snprintf(str, INTRNAME_LEN, "%s: %s", isrc->isrc_name, name); 232 intrcnt_setname(str, isrc->isrc_index); 233 snprintf(str, INTRNAME_LEN, "stray %s: %s", isrc->isrc_name, 234 name); 235 intrcnt_setname(str, isrc->isrc_index + 1); 236 } else { 237 snprintf(str, INTRNAME_LEN, "%s:", isrc->isrc_name); 238 intrcnt_setname(str, isrc->isrc_index); 239 snprintf(str, INTRNAME_LEN, "stray %s:", isrc->isrc_name); 240 intrcnt_setname(str, isrc->isrc_index + 1); 241 } 242 } 243 244 /* 245 * Virtualization for interrupt source interrupt counters setup. 246 */ 247 static void 248 isrc_setup_counters(struct intr_irqsrc *isrc) 249 { 250 u_int index; 251 252 /* 253 * XXX - it does not work well with removable controllers and 254 * interrupt sources !!! 255 */ 256 index = atomic_fetchadd_int(&intrcnt_index, 2); 257 isrc->isrc_index = index; 258 isrc->isrc_count = &intrcnt[index]; 259 isrc_update_name(isrc, NULL); 260 } 261 262 /* 263 * Virtualization for interrupt source interrupt counters release. 264 */ 265 static void 266 isrc_release_counters(struct intr_irqsrc *isrc) 267 { 268 269 panic("%s: not implemented", __func__); 270 } 271 272 #ifdef SMP 273 /* 274 * Virtualization for interrupt source IPI counters setup. 275 */ 276 u_long * 277 intr_ipi_setup_counters(const char *name) 278 { 279 u_int index, i; 280 char str[INTRNAME_LEN]; 281 282 index = atomic_fetchadd_int(&intrcnt_index, MAXCPU); 283 for (i = 0; i < MAXCPU; i++) { 284 snprintf(str, INTRNAME_LEN, "cpu%d:%s", i, name); 285 intrcnt_setname(str, index + i); 286 } 287 return (&intrcnt[index]); 288 } 289 #endif 290 291 /* 292 * Main interrupt dispatch handler. It's called straight 293 * from the assembler, where CPU interrupt is served. 294 */ 295 void 296 intr_irq_handler(struct trapframe *tf) 297 { 298 struct trapframe * oldframe; 299 struct thread * td; 300 301 KASSERT(irq_root_filter != NULL, ("%s: no filter", __func__)); 302 303 PCPU_INC(cnt.v_intr); 304 critical_enter(); 305 td = curthread; 306 oldframe = td->td_intr_frame; 307 td->td_intr_frame = tf; 308 irq_root_filter(irq_root_arg); 309 td->td_intr_frame = oldframe; 310 critical_exit(); 311 } 312 313 /* 314 * interrupt controller dispatch function for interrupts. It should 315 * be called straight from the interrupt controller, when associated interrupt 316 * source is learned. 317 */ 318 int 319 intr_isrc_dispatch(struct intr_irqsrc *isrc, struct trapframe *tf) 320 { 321 322 KASSERT(isrc != NULL, ("%s: no source", __func__)); 323 324 isrc_increment_count(isrc); 325 326 #ifdef INTR_SOLO 327 if (isrc->isrc_filter != NULL) { 328 int error; 329 error = isrc->isrc_filter(isrc->isrc_arg, tf); 330 PIC_POST_FILTER(isrc->isrc_dev, isrc); 331 if (error == FILTER_HANDLED) 332 return (0); 333 } else 334 #endif 335 if (isrc->isrc_event != NULL) { 336 if (intr_event_handle(isrc->isrc_event, tf) == 0) 337 return (0); 338 } 339 340 isrc_increment_straycount(isrc); 341 return (EINVAL); 342 } 343 344 /* 345 * Alloc unique interrupt number (resource handle) for interrupt source. 346 * 347 * There could be various strategies how to allocate free interrupt number 348 * (resource handle) for new interrupt source. 349 * 350 * 1. Handles are always allocated forward, so handles are not recycled 351 * immediately. However, if only one free handle left which is reused 352 * constantly... 353 */ 354 static inline int 355 isrc_alloc_irq(struct intr_irqsrc *isrc) 356 { 357 u_int maxirqs, irq; 358 359 mtx_assert(&isrc_table_lock, MA_OWNED); 360 361 maxirqs = nitems(irq_sources); 362 if (irq_next_free >= maxirqs) 363 return (ENOSPC); 364 365 for (irq = irq_next_free; irq < maxirqs; irq++) { 366 if (irq_sources[irq] == NULL) 367 goto found; 368 } 369 for (irq = 0; irq < irq_next_free; irq++) { 370 if (irq_sources[irq] == NULL) 371 goto found; 372 } 373 374 irq_next_free = maxirqs; 375 return (ENOSPC); 376 377 found: 378 isrc->isrc_irq = irq; 379 irq_sources[irq] = isrc; 380 381 irq_next_free = irq + 1; 382 if (irq_next_free >= maxirqs) 383 irq_next_free = 0; 384 return (0); 385 } 386 387 /* 388 * Free unique interrupt number (resource handle) from interrupt source. 389 */ 390 static inline int 391 isrc_free_irq(struct intr_irqsrc *isrc) 392 { 393 394 mtx_assert(&isrc_table_lock, MA_OWNED); 395 396 if (isrc->isrc_irq >= nitems(irq_sources)) 397 return (EINVAL); 398 if (irq_sources[isrc->isrc_irq] != isrc) 399 return (EINVAL); 400 401 irq_sources[isrc->isrc_irq] = NULL; 402 isrc->isrc_irq = IRQ_INVALID; /* just to be safe */ 403 return (0); 404 } 405 406 /* 407 * Lookup interrupt source by interrupt number (resource handle). 408 */ 409 static inline struct intr_irqsrc * 410 isrc_lookup(u_int irq) 411 { 412 413 if (irq < nitems(irq_sources)) 414 return (irq_sources[irq]); 415 return (NULL); 416 } 417 418 /* 419 * Initialize interrupt source and register it into global interrupt table. 420 */ 421 int 422 intr_isrc_register(struct intr_irqsrc *isrc, device_t dev, u_int flags, 423 const char *fmt, ...) 424 { 425 int error; 426 va_list ap; 427 428 bzero(isrc, sizeof(struct intr_irqsrc)); 429 isrc->isrc_dev = dev; 430 isrc->isrc_irq = IRQ_INVALID; /* just to be safe */ 431 isrc->isrc_flags = flags; 432 433 va_start(ap, fmt); 434 vsnprintf(isrc->isrc_name, INTR_ISRC_NAMELEN, fmt, ap); 435 va_end(ap); 436 437 mtx_lock(&isrc_table_lock); 438 error = isrc_alloc_irq(isrc); 439 if (error != 0) { 440 mtx_unlock(&isrc_table_lock); 441 return (error); 442 } 443 /* 444 * Setup interrupt counters, but not for IPI sources. Those are setup 445 * later and only for used ones (up to INTR_IPI_COUNT) to not exhaust 446 * our counter pool. 447 */ 448 if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0) 449 isrc_setup_counters(isrc); 450 mtx_unlock(&isrc_table_lock); 451 return (0); 452 } 453 454 /* 455 * Deregister interrupt source from global interrupt table. 456 */ 457 int 458 intr_isrc_deregister(struct intr_irqsrc *isrc) 459 { 460 int error; 461 462 mtx_lock(&isrc_table_lock); 463 if ((isrc->isrc_flags & INTR_ISRCF_IPI) == 0) 464 isrc_release_counters(isrc); 465 error = isrc_free_irq(isrc); 466 mtx_unlock(&isrc_table_lock); 467 return (error); 468 } 469 470 #ifdef SMP 471 /* 472 * A support function for a PIC to decide if provided ISRC should be inited 473 * on given cpu. The logic of INTR_ISRCF_BOUND flag and isrc_cpu member of 474 * struct intr_irqsrc is the following: 475 * 476 * If INTR_ISRCF_BOUND is set, the ISRC should be inited only on cpus 477 * set in isrc_cpu. If not, the ISRC should be inited on every cpu and 478 * isrc_cpu is kept consistent with it. Thus isrc_cpu is always correct. 479 */ 480 bool 481 intr_isrc_init_on_cpu(struct intr_irqsrc *isrc, u_int cpu) 482 { 483 484 if (isrc->isrc_handlers == 0) 485 return (false); 486 if ((isrc->isrc_flags & (INTR_ISRCF_PPI | INTR_ISRCF_IPI)) == 0) 487 return (false); 488 if (isrc->isrc_flags & INTR_ISRCF_BOUND) 489 return (CPU_ISSET(cpu, &isrc->isrc_cpu)); 490 491 CPU_SET(cpu, &isrc->isrc_cpu); 492 return (true); 493 } 494 #endif 495 496 static struct intr_dev_data * 497 intr_ddata_alloc(u_int extsize) 498 { 499 struct intr_dev_data *ddata; 500 501 ddata = malloc(sizeof(*ddata) + extsize, M_INTRNG, M_WAITOK | M_ZERO); 502 503 mtx_lock(&isrc_table_lock); 504 if (intr_ddata_first_unused >= nitems(intr_ddata_tab)) { 505 mtx_unlock(&isrc_table_lock); 506 free(ddata, M_INTRNG); 507 return (NULL); 508 } 509 intr_ddata_tab[intr_ddata_first_unused] = ddata; 510 ddata->idd_irq = IRQ_DDATA_BASE + intr_ddata_first_unused++; 511 mtx_unlock(&isrc_table_lock); 512 return (ddata); 513 } 514 515 static struct intr_irqsrc * 516 intr_ddata_lookup(u_int irq, struct intr_map_data **datap) 517 { 518 int error; 519 struct intr_irqsrc *isrc; 520 struct intr_dev_data *ddata; 521 522 isrc = isrc_lookup(irq); 523 if (isrc != NULL) { 524 if (datap != NULL) 525 *datap = NULL; 526 return (isrc); 527 } 528 529 if (irq < IRQ_DDATA_BASE) 530 return (NULL); 531 532 irq -= IRQ_DDATA_BASE; 533 if (irq >= nitems(intr_ddata_tab)) 534 return (NULL); 535 536 ddata = intr_ddata_tab[irq]; 537 if (ddata->idd_isrc == NULL) { 538 error = intr_map_irq(ddata->idd_dev, ddata->idd_xref, 539 &ddata->idd_data, &irq); 540 if (error != 0) 541 return (NULL); 542 ddata->idd_isrc = isrc_lookup(irq); 543 } 544 if (datap != NULL) 545 *datap = &ddata->idd_data; 546 return (ddata->idd_isrc); 547 } 548 549 #ifdef DEV_ACPI 550 /* 551 * Map interrupt source according to ACPI info into framework. If such mapping 552 * does not exist, create it. Return unique interrupt number (resource handle) 553 * associated with mapped interrupt source. 554 */ 555 u_int 556 intr_acpi_map_irq(device_t dev, u_int irq, enum intr_polarity pol, 557 enum intr_trigger trig) 558 { 559 struct intr_dev_data *ddata; 560 561 ddata = intr_ddata_alloc(0); 562 if (ddata == NULL) 563 return (0xFFFFFFFF); /* no space left */ 564 565 ddata->idd_dev = dev; 566 ddata->idd_data.type = INTR_MAP_DATA_ACPI; 567 ddata->idd_data.acpi.irq = irq; 568 ddata->idd_data.acpi.pol = pol; 569 ddata->idd_data.acpi.trig = trig; 570 return (ddata->idd_irq); 571 } 572 #endif 573 #ifdef FDT 574 /* 575 * Map interrupt source according to FDT data into framework. If such mapping 576 * does not exist, create it. Return unique interrupt number (resource handle) 577 * associated with mapped interrupt source. 578 */ 579 u_int 580 intr_fdt_map_irq(phandle_t node, pcell_t *cells, u_int ncells) 581 { 582 struct intr_dev_data *ddata; 583 u_int cellsize; 584 585 cellsize = ncells * sizeof(*cells); 586 ddata = intr_ddata_alloc(cellsize); 587 if (ddata == NULL) 588 return (0xFFFFFFFF); /* no space left */ 589 590 ddata->idd_xref = (intptr_t)node; 591 ddata->idd_data.type = INTR_MAP_DATA_FDT; 592 ddata->idd_data.fdt.ncells = ncells; 593 ddata->idd_data.fdt.cells = (pcell_t *)(ddata + 1); 594 memcpy(ddata->idd_data.fdt.cells, cells, cellsize); 595 return (ddata->idd_irq); 596 } 597 #endif 598 599 #ifdef INTR_SOLO 600 /* 601 * Setup filter into interrupt source. 602 */ 603 static int 604 iscr_setup_filter(struct intr_irqsrc *isrc, const char *name, 605 intr_irq_filter_t *filter, void *arg, void **cookiep) 606 { 607 608 if (filter == NULL) 609 return (EINVAL); 610 611 mtx_lock(&isrc_table_lock); 612 /* 613 * Make sure that we do not mix the two ways 614 * how we handle interrupt sources. 615 */ 616 if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) { 617 mtx_unlock(&isrc_table_lock); 618 return (EBUSY); 619 } 620 isrc->isrc_filter = filter; 621 isrc->isrc_arg = arg; 622 isrc_update_name(isrc, name); 623 mtx_unlock(&isrc_table_lock); 624 625 *cookiep = isrc; 626 return (0); 627 } 628 #endif 629 630 /* 631 * Interrupt source pre_ithread method for MI interrupt framework. 632 */ 633 static void 634 intr_isrc_pre_ithread(void *arg) 635 { 636 struct intr_irqsrc *isrc = arg; 637 638 PIC_PRE_ITHREAD(isrc->isrc_dev, isrc); 639 } 640 641 /* 642 * Interrupt source post_ithread method for MI interrupt framework. 643 */ 644 static void 645 intr_isrc_post_ithread(void *arg) 646 { 647 struct intr_irqsrc *isrc = arg; 648 649 PIC_POST_ITHREAD(isrc->isrc_dev, isrc); 650 } 651 652 /* 653 * Interrupt source post_filter method for MI interrupt framework. 654 */ 655 static void 656 intr_isrc_post_filter(void *arg) 657 { 658 struct intr_irqsrc *isrc = arg; 659 660 PIC_POST_FILTER(isrc->isrc_dev, isrc); 661 } 662 663 /* 664 * Interrupt source assign_cpu method for MI interrupt framework. 665 */ 666 static int 667 intr_isrc_assign_cpu(void *arg, int cpu) 668 { 669 #ifdef SMP 670 struct intr_irqsrc *isrc = arg; 671 int error; 672 673 if (isrc->isrc_dev != intr_irq_root_dev) 674 return (EINVAL); 675 676 mtx_lock(&isrc_table_lock); 677 if (cpu == NOCPU) { 678 CPU_ZERO(&isrc->isrc_cpu); 679 isrc->isrc_flags &= ~INTR_ISRCF_BOUND; 680 } else { 681 CPU_SETOF(cpu, &isrc->isrc_cpu); 682 isrc->isrc_flags |= INTR_ISRCF_BOUND; 683 } 684 685 /* 686 * In NOCPU case, it's up to PIC to either leave ISRC on same CPU or 687 * re-balance it to another CPU or enable it on more CPUs. However, 688 * PIC is expected to change isrc_cpu appropriately to keep us well 689 * informed if the call is successfull. 690 */ 691 if (irq_assign_cpu) { 692 error = PIC_BIND_INTR(isrc->isrc_dev, isrc); 693 if (error) { 694 CPU_ZERO(&isrc->isrc_cpu); 695 mtx_unlock(&isrc_table_lock); 696 return (error); 697 } 698 } 699 mtx_unlock(&isrc_table_lock); 700 return (0); 701 #else 702 return (EOPNOTSUPP); 703 #endif 704 } 705 706 /* 707 * Create interrupt event for interrupt source. 708 */ 709 static int 710 isrc_event_create(struct intr_irqsrc *isrc) 711 { 712 struct intr_event *ie; 713 int error; 714 715 error = intr_event_create(&ie, isrc, 0, isrc->isrc_irq, 716 intr_isrc_pre_ithread, intr_isrc_post_ithread, intr_isrc_post_filter, 717 intr_isrc_assign_cpu, "%s:", isrc->isrc_name); 718 if (error) 719 return (error); 720 721 mtx_lock(&isrc_table_lock); 722 /* 723 * Make sure that we do not mix the two ways 724 * how we handle interrupt sources. Let contested event wins. 725 */ 726 #ifdef INTR_SOLO 727 if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) { 728 #else 729 if (isrc->isrc_event != NULL) { 730 #endif 731 mtx_unlock(&isrc_table_lock); 732 intr_event_destroy(ie); 733 return (isrc->isrc_event != NULL ? EBUSY : 0); 734 } 735 isrc->isrc_event = ie; 736 mtx_unlock(&isrc_table_lock); 737 738 return (0); 739 } 740 #ifdef notyet 741 /* 742 * Destroy interrupt event for interrupt source. 743 */ 744 static void 745 isrc_event_destroy(struct intr_irqsrc *isrc) 746 { 747 struct intr_event *ie; 748 749 mtx_lock(&isrc_table_lock); 750 ie = isrc->isrc_event; 751 isrc->isrc_event = NULL; 752 mtx_unlock(&isrc_table_lock); 753 754 if (ie != NULL) 755 intr_event_destroy(ie); 756 } 757 #endif 758 /* 759 * Add handler to interrupt source. 760 */ 761 static int 762 isrc_add_handler(struct intr_irqsrc *isrc, const char *name, 763 driver_filter_t filter, driver_intr_t handler, void *arg, 764 enum intr_type flags, void **cookiep) 765 { 766 int error; 767 768 if (isrc->isrc_event == NULL) { 769 error = isrc_event_create(isrc); 770 if (error) 771 return (error); 772 } 773 774 error = intr_event_add_handler(isrc->isrc_event, name, filter, handler, 775 arg, intr_priority(flags), flags, cookiep); 776 if (error == 0) { 777 mtx_lock(&isrc_table_lock); 778 intrcnt_updatename(isrc); 779 mtx_unlock(&isrc_table_lock); 780 } 781 782 return (error); 783 } 784 785 /* 786 * Lookup interrupt controller locked. 787 */ 788 static inline struct intr_pic * 789 pic_lookup_locked(device_t dev, intptr_t xref) 790 { 791 struct intr_pic *pic; 792 793 mtx_assert(&pic_list_lock, MA_OWNED); 794 795 if (dev == NULL && xref == 0) 796 return (NULL); 797 798 /* Note that pic->pic_dev is never NULL on registered PIC. */ 799 SLIST_FOREACH(pic, &pic_list, pic_next) { 800 if (dev == NULL) { 801 if (xref == pic->pic_xref) 802 return (pic); 803 } else if (xref == 0 || pic->pic_xref == 0) { 804 if (dev == pic->pic_dev) 805 return (pic); 806 } else if (xref == pic->pic_xref && dev == pic->pic_dev) 807 return (pic); 808 } 809 return (NULL); 810 } 811 812 /* 813 * Lookup interrupt controller. 814 */ 815 static struct intr_pic * 816 pic_lookup(device_t dev, intptr_t xref) 817 { 818 struct intr_pic *pic; 819 820 mtx_lock(&pic_list_lock); 821 pic = pic_lookup_locked(dev, xref); 822 mtx_unlock(&pic_list_lock); 823 return (pic); 824 } 825 826 /* 827 * Create interrupt controller. 828 */ 829 static struct intr_pic * 830 pic_create(device_t dev, intptr_t xref) 831 { 832 struct intr_pic *pic; 833 834 mtx_lock(&pic_list_lock); 835 pic = pic_lookup_locked(dev, xref); 836 if (pic != NULL) { 837 mtx_unlock(&pic_list_lock); 838 return (pic); 839 } 840 pic = malloc(sizeof(*pic), M_INTRNG, M_NOWAIT | M_ZERO); 841 pic->pic_xref = xref; 842 pic->pic_dev = dev; 843 SLIST_INSERT_HEAD(&pic_list, pic, pic_next); 844 mtx_unlock(&pic_list_lock); 845 846 return (pic); 847 } 848 #ifdef notyet 849 /* 850 * Destroy interrupt controller. 851 */ 852 static void 853 pic_destroy(device_t dev, intptr_t xref) 854 { 855 struct intr_pic *pic; 856 857 mtx_lock(&pic_list_lock); 858 pic = pic_lookup_locked(dev, xref); 859 if (pic == NULL) { 860 mtx_unlock(&pic_list_lock); 861 return; 862 } 863 SLIST_REMOVE(&pic_list, pic, intr_pic, pic_next); 864 mtx_unlock(&pic_list_lock); 865 866 free(pic, M_INTRNG); 867 } 868 #endif 869 /* 870 * Register interrupt controller. 871 */ 872 int 873 intr_pic_register(device_t dev, intptr_t xref) 874 { 875 struct intr_pic *pic; 876 877 if (dev == NULL) 878 return (EINVAL); 879 pic = pic_create(dev, xref); 880 if (pic == NULL) 881 return (ENOMEM); 882 883 debugf("PIC %p registered for %s <dev %p, xref %x>\n", pic, 884 device_get_nameunit(dev), dev, xref); 885 return (0); 886 } 887 888 /* 889 * Unregister interrupt controller. 890 */ 891 int 892 intr_pic_deregister(device_t dev, intptr_t xref) 893 { 894 895 panic("%s: not implemented", __func__); 896 } 897 898 /* 899 * Mark interrupt controller (itself) as a root one. 900 * 901 * Note that only an interrupt controller can really know its position 902 * in interrupt controller's tree. So root PIC must claim itself as a root. 903 * 904 * In FDT case, according to ePAPR approved version 1.1 from 08 April 2011, 905 * page 30: 906 * "The root of the interrupt tree is determined when traversal 907 * of the interrupt tree reaches an interrupt controller node without 908 * an interrupts property and thus no explicit interrupt parent." 909 */ 910 int 911 intr_pic_claim_root(device_t dev, intptr_t xref, intr_irq_filter_t *filter, 912 void *arg, u_int ipicount) 913 { 914 915 if (pic_lookup(dev, xref) == NULL) { 916 device_printf(dev, "not registered\n"); 917 return (EINVAL); 918 } 919 if (filter == NULL) { 920 device_printf(dev, "filter missing\n"); 921 return (EINVAL); 922 } 923 924 /* 925 * Only one interrupt controllers could be on the root for now. 926 * Note that we further suppose that there is not threaded interrupt 927 * routine (handler) on the root. See intr_irq_handler(). 928 */ 929 if (intr_irq_root_dev != NULL) { 930 device_printf(dev, "another root already set\n"); 931 return (EBUSY); 932 } 933 934 intr_irq_root_dev = dev; 935 irq_root_filter = filter; 936 irq_root_arg = arg; 937 irq_root_ipicount = ipicount; 938 939 debugf("irq root set to %s\n", device_get_nameunit(dev)); 940 return (0); 941 } 942 943 int 944 intr_map_irq(device_t dev, intptr_t xref, struct intr_map_data *data, 945 u_int *irqp) 946 { 947 int error; 948 struct intr_irqsrc *isrc; 949 struct intr_pic *pic; 950 951 if (data == NULL) 952 return (EINVAL); 953 954 pic = pic_lookup(dev, xref); 955 if (pic == NULL || pic->pic_dev == NULL) 956 return (ESRCH); 957 958 error = PIC_MAP_INTR(pic->pic_dev, data, &isrc); 959 if (error == 0) 960 *irqp = isrc->isrc_irq; 961 return (error); 962 } 963 964 int 965 intr_alloc_irq(device_t dev, struct resource *res) 966 { 967 struct intr_map_data *data; 968 struct intr_irqsrc *isrc; 969 970 KASSERT(rman_get_start(res) == rman_get_end(res), 971 ("%s: more interrupts in resource", __func__)); 972 973 isrc = intr_ddata_lookup(rman_get_start(res), &data); 974 if (isrc == NULL) 975 return (EINVAL); 976 977 return (PIC_ALLOC_INTR(isrc->isrc_dev, isrc, res, data)); 978 } 979 980 int 981 intr_release_irq(device_t dev, struct resource *res) 982 { 983 struct intr_map_data *data; 984 struct intr_irqsrc *isrc; 985 986 KASSERT(rman_get_start(res) == rman_get_end(res), 987 ("%s: more interrupts in resource", __func__)); 988 989 isrc = intr_ddata_lookup(rman_get_start(res), &data); 990 if (isrc == NULL) 991 return (EINVAL); 992 993 return (PIC_RELEASE_INTR(isrc->isrc_dev, isrc, res, data)); 994 } 995 996 int 997 intr_setup_irq(device_t dev, struct resource *res, driver_filter_t filt, 998 driver_intr_t hand, void *arg, int flags, void **cookiep) 999 { 1000 int error; 1001 struct intr_map_data *data; 1002 struct intr_irqsrc *isrc; 1003 const char *name; 1004 1005 KASSERT(rman_get_start(res) == rman_get_end(res), 1006 ("%s: more interrupts in resource", __func__)); 1007 1008 isrc = intr_ddata_lookup(rman_get_start(res), &data); 1009 if (isrc == NULL) 1010 return (EINVAL); 1011 1012 name = device_get_nameunit(dev); 1013 1014 #ifdef INTR_SOLO 1015 /* 1016 * Standard handling is done thru MI interrupt framework. However, 1017 * some interrupts could request solely own special handling. This 1018 * non standard handling can be used for interrupt controllers without 1019 * handler (filter only), so in case that interrupt controllers are 1020 * chained, MI interrupt framework is called only in leaf controller. 1021 * 1022 * Note that root interrupt controller routine is served as well, 1023 * however in intr_irq_handler(), i.e. main system dispatch routine. 1024 */ 1025 if (flags & INTR_SOLO && hand != NULL) { 1026 debugf("irq %u cannot solo on %s\n", irq, name); 1027 return (EINVAL); 1028 } 1029 1030 if (flags & INTR_SOLO) { 1031 error = iscr_setup_filter(isrc, name, (intr_irq_filter_t *)filt, 1032 arg, cookiep); 1033 debugf("irq %u setup filter error %d on %s\n", irq, error, 1034 name); 1035 } else 1036 #endif 1037 { 1038 error = isrc_add_handler(isrc, name, filt, hand, arg, flags, 1039 cookiep); 1040 debugf("irq %u add handler error %d on %s\n", irq, error, name); 1041 } 1042 if (error != 0) 1043 return (error); 1044 1045 mtx_lock(&isrc_table_lock); 1046 error = PIC_SETUP_INTR(isrc->isrc_dev, isrc, res, data); 1047 if (error == 0) { 1048 isrc->isrc_handlers++; 1049 if (isrc->isrc_handlers == 1) 1050 PIC_ENABLE_INTR(isrc->isrc_dev, isrc); 1051 } 1052 mtx_unlock(&isrc_table_lock); 1053 if (error != 0) 1054 intr_event_remove_handler(*cookiep); 1055 return (error); 1056 } 1057 1058 int 1059 intr_teardown_irq(device_t dev, struct resource *res, void *cookie) 1060 { 1061 int error; 1062 struct intr_map_data *data; 1063 struct intr_irqsrc *isrc; 1064 1065 KASSERT(rman_get_start(res) == rman_get_end(res), 1066 ("%s: more interrupts in resource", __func__)); 1067 1068 isrc = intr_ddata_lookup(rman_get_start(res), &data); 1069 if (isrc == NULL || isrc->isrc_handlers == 0) 1070 return (EINVAL); 1071 1072 #ifdef INTR_SOLO 1073 if (isrc->isrc_filter != NULL) { 1074 if (isrc != cookie) 1075 return (EINVAL); 1076 1077 mtx_lock(&isrc_table_lock); 1078 isrc->isrc_filter = NULL; 1079 isrc->isrc_arg = NULL; 1080 isrc->isrc_handlers = 0; 1081 PIC_DISABLE_INTR(isrc->isrc_dev, isrc); 1082 PIC_TEARDOWN_INTR(isrc->isrc_dev, isrc, res, data); 1083 isrc_update_name(isrc, NULL); 1084 mtx_unlock(&isrc_table_lock); 1085 return (0); 1086 } 1087 #endif 1088 if (isrc != intr_handler_source(cookie)) 1089 return (EINVAL); 1090 1091 error = intr_event_remove_handler(cookie); 1092 if (error == 0) { 1093 mtx_lock(&isrc_table_lock); 1094 isrc->isrc_handlers--; 1095 if (isrc->isrc_handlers == 0) 1096 PIC_DISABLE_INTR(isrc->isrc_dev, isrc); 1097 PIC_TEARDOWN_INTR(isrc->isrc_dev, isrc, res, data); 1098 intrcnt_updatename(isrc); 1099 mtx_unlock(&isrc_table_lock); 1100 } 1101 return (error); 1102 } 1103 1104 int 1105 intr_describe_irq(device_t dev, struct resource *res, void *cookie, 1106 const char *descr) 1107 { 1108 int error; 1109 struct intr_irqsrc *isrc; 1110 1111 KASSERT(rman_get_start(res) == rman_get_end(res), 1112 ("%s: more interrupts in resource", __func__)); 1113 1114 isrc = intr_ddata_lookup(rman_get_start(res), NULL); 1115 if (isrc == NULL || isrc->isrc_handlers == 0) 1116 return (EINVAL); 1117 #ifdef INTR_SOLO 1118 if (isrc->isrc_filter != NULL) { 1119 if (isrc != cookie) 1120 return (EINVAL); 1121 1122 mtx_lock(&isrc_table_lock); 1123 isrc_update_name(isrc, descr); 1124 mtx_unlock(&isrc_table_lock); 1125 return (0); 1126 } 1127 #endif 1128 error = intr_event_describe_handler(isrc->isrc_event, cookie, descr); 1129 if (error == 0) { 1130 mtx_lock(&isrc_table_lock); 1131 intrcnt_updatename(isrc); 1132 mtx_unlock(&isrc_table_lock); 1133 } 1134 return (error); 1135 } 1136 1137 #ifdef SMP 1138 int 1139 intr_bind_irq(device_t dev, struct resource *res, int cpu) 1140 { 1141 struct intr_irqsrc *isrc; 1142 1143 KASSERT(rman_get_start(res) == rman_get_end(res), 1144 ("%s: more interrupts in resource", __func__)); 1145 1146 isrc = intr_ddata_lookup(rman_get_start(res), NULL); 1147 if (isrc == NULL || isrc->isrc_handlers == 0) 1148 return (EINVAL); 1149 #ifdef INTR_SOLO 1150 if (isrc->isrc_filter != NULL) 1151 return (intr_isrc_assign_cpu(isrc, cpu)); 1152 #endif 1153 return (intr_event_bind(isrc->isrc_event, cpu)); 1154 } 1155 1156 /* 1157 * Return the CPU that the next interrupt source should use. 1158 * For now just returns the next CPU according to round-robin. 1159 */ 1160 u_int 1161 intr_irq_next_cpu(u_int last_cpu, cpuset_t *cpumask) 1162 { 1163 1164 if (!irq_assign_cpu || mp_ncpus == 1) 1165 return (PCPU_GET(cpuid)); 1166 1167 do { 1168 last_cpu++; 1169 if (last_cpu > mp_maxid) 1170 last_cpu = 0; 1171 } while (!CPU_ISSET(last_cpu, cpumask)); 1172 return (last_cpu); 1173 } 1174 1175 /* 1176 * Distribute all the interrupt sources among the available 1177 * CPUs once the AP's have been launched. 1178 */ 1179 static void 1180 intr_irq_shuffle(void *arg __unused) 1181 { 1182 struct intr_irqsrc *isrc; 1183 u_int i; 1184 1185 if (mp_ncpus == 1) 1186 return; 1187 1188 mtx_lock(&isrc_table_lock); 1189 irq_assign_cpu = TRUE; 1190 for (i = 0; i < NIRQ; i++) { 1191 isrc = irq_sources[i]; 1192 if (isrc == NULL || isrc->isrc_handlers == 0 || 1193 isrc->isrc_flags & (INTR_ISRCF_PPI | INTR_ISRCF_IPI)) 1194 continue; 1195 1196 if (isrc->isrc_event != NULL && 1197 isrc->isrc_flags & INTR_ISRCF_BOUND && 1198 isrc->isrc_event->ie_cpu != CPU_FFS(&isrc->isrc_cpu) - 1) 1199 panic("%s: CPU inconsistency", __func__); 1200 1201 if ((isrc->isrc_flags & INTR_ISRCF_BOUND) == 0) 1202 CPU_ZERO(&isrc->isrc_cpu); /* start again */ 1203 1204 /* 1205 * We are in wicked position here if the following call fails 1206 * for bound ISRC. The best thing we can do is to clear 1207 * isrc_cpu so inconsistency with ie_cpu will be detectable. 1208 */ 1209 if (PIC_BIND_INTR(isrc->isrc_dev, isrc) != 0) 1210 CPU_ZERO(&isrc->isrc_cpu); 1211 } 1212 mtx_unlock(&isrc_table_lock); 1213 } 1214 SYSINIT(intr_irq_shuffle, SI_SUB_SMP, SI_ORDER_SECOND, intr_irq_shuffle, NULL); 1215 1216 #else 1217 u_int 1218 intr_irq_next_cpu(u_int current_cpu, cpuset_t *cpumask) 1219 { 1220 1221 return (PCPU_GET(cpuid)); 1222 } 1223 #endif 1224 1225 void dosoftints(void); 1226 void 1227 dosoftints(void) 1228 { 1229 } 1230 1231 #ifdef SMP 1232 /* 1233 * Init interrupt controller on another CPU. 1234 */ 1235 void 1236 intr_pic_init_secondary(void) 1237 { 1238 1239 /* 1240 * QQQ: Only root PIC is aware of other CPUs ??? 1241 */ 1242 KASSERT(intr_irq_root_dev != NULL, ("%s: no root attached", __func__)); 1243 1244 //mtx_lock(&isrc_table_lock); 1245 PIC_INIT_SECONDARY(intr_irq_root_dev); 1246 //mtx_unlock(&isrc_table_lock); 1247 } 1248 #endif 1249 1250 #ifdef DDB 1251 DB_SHOW_COMMAND(irqs, db_show_irqs) 1252 { 1253 u_int i, irqsum; 1254 u_long num; 1255 struct intr_irqsrc *isrc; 1256 1257 for (irqsum = 0, i = 0; i < NIRQ; i++) { 1258 isrc = irq_sources[i]; 1259 if (isrc == NULL) 1260 continue; 1261 1262 num = isrc->isrc_count != NULL ? isrc->isrc_count[0] : 0; 1263 db_printf("irq%-3u <%s>: cpu %02lx%s cnt %lu\n", i, 1264 isrc->isrc_name, isrc->isrc_cpu.__bits[0], 1265 isrc->isrc_flags & INTR_ISRCF_BOUND ? " (bound)" : "", num); 1266 irqsum += num; 1267 } 1268 db_printf("irq total %u\n", irqsum); 1269 } 1270 #endif 1271