1 /*- 2 * Copyright (c) 2012-2014 Jakub Wojciech Klama <jceel@FreeBSD.org>. 3 * Copyright (c) 2015 Svatopluk Kraus 4 * Copyright (c) 2015 Michal Meloun 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 /* 35 * New-style Interrupt Framework 36 * 37 * TODO: - to support IPI (PPI) enabling on other CPUs if already started 38 * - to complete things for removable PICs 39 */ 40 41 #include "opt_ddb.h" 42 #include "opt_platform.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/kernel.h> 47 #include <sys/syslog.h> 48 #include <sys/malloc.h> 49 #include <sys/proc.h> 50 #include <sys/queue.h> 51 #include <sys/bus.h> 52 #include <sys/interrupt.h> 53 #include <sys/conf.h> 54 #include <sys/cpuset.h> 55 #include <sys/sched.h> 56 #include <sys/smp.h> 57 #include <machine/atomic.h> 58 #include <machine/intr.h> 59 #include <machine/cpu.h> 60 #include <machine/smp.h> 61 #include <machine/stdarg.h> 62 63 #ifdef FDT 64 #include <dev/ofw/openfirm.h> 65 #include <dev/ofw/ofw_bus.h> 66 #include <dev/ofw/ofw_bus_subr.h> 67 #endif 68 69 #ifdef DDB 70 #include <ddb/ddb.h> 71 #endif 72 73 #include "pic_if.h" 74 75 #define INTRNAME_LEN (2*MAXCOMLEN + 1) 76 77 #ifdef DEBUG 78 #define debugf(fmt, args...) do { printf("%s(): ", __func__); \ 79 printf(fmt,##args); } while (0) 80 #else 81 #define debugf(fmt, args...) 82 #endif 83 84 MALLOC_DECLARE(M_INTRNG); 85 MALLOC_DEFINE(M_INTRNG, "intr", "intr interrupt handling"); 86 87 /* Main interrupt handler called from assembler -> 'hidden' for C code. */ 88 void intr_irq_handler(struct trapframe *tf); 89 90 /* Root interrupt controller stuff. */ 91 static struct intr_irqsrc *irq_root_isrc; 92 static device_t irq_root_dev; 93 static intr_irq_filter_t *irq_root_filter; 94 static void *irq_root_arg; 95 static u_int irq_root_ipicount; 96 97 /* Interrupt controller definition. */ 98 struct intr_pic { 99 SLIST_ENTRY(intr_pic) pic_next; 100 intptr_t pic_xref; /* hardware identification */ 101 device_t pic_dev; 102 }; 103 104 static struct mtx pic_list_lock; 105 static SLIST_HEAD(, intr_pic) pic_list; 106 107 static struct intr_pic *pic_lookup(device_t dev, intptr_t xref); 108 109 /* Interrupt source definition. */ 110 static struct mtx isrc_table_lock; 111 static struct intr_irqsrc *irq_sources[NIRQ]; 112 u_int irq_next_free; 113 114 #define IRQ_INVALID nitems(irq_sources) 115 116 #ifdef SMP 117 static boolean_t irq_assign_cpu = FALSE; 118 119 static struct intr_irqsrc ipi_sources[INTR_IPI_COUNT]; 120 static u_int ipi_next_num; 121 #endif 122 123 /* 124 * - 2 counters for each I/O interrupt. 125 * - MAXCPU counters for each IPI counters for SMP. 126 */ 127 #ifdef SMP 128 #define INTRCNT_COUNT (NIRQ * 2 + INTR_IPI_COUNT * MAXCPU) 129 #else 130 #define INTRCNT_COUNT (NIRQ * 2) 131 #endif 132 133 /* Data for MI statistics reporting. */ 134 u_long intrcnt[INTRCNT_COUNT]; 135 char intrnames[INTRCNT_COUNT * INTRNAME_LEN]; 136 size_t sintrcnt = sizeof(intrcnt); 137 size_t sintrnames = sizeof(intrnames); 138 static u_int intrcnt_index; 139 140 /* 141 * Interrupt framework initialization routine. 142 */ 143 static void 144 intr_irq_init(void *dummy __unused) 145 { 146 147 SLIST_INIT(&pic_list); 148 mtx_init(&pic_list_lock, "intr pic list", NULL, MTX_DEF); 149 mtx_init(&isrc_table_lock, "intr isrc table", NULL, MTX_DEF); 150 } 151 SYSINIT(intr_irq_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_irq_init, NULL); 152 153 static void 154 intrcnt_setname(const char *name, int index) 155 { 156 157 snprintf(intrnames + INTRNAME_LEN * index, INTRNAME_LEN, "%-*s", 158 INTRNAME_LEN - 1, name); 159 } 160 161 /* 162 * Update name for interrupt source with interrupt event. 163 */ 164 static void 165 intrcnt_updatename(struct intr_irqsrc *isrc) 166 { 167 168 /* QQQ: What about stray counter name? */ 169 mtx_assert(&isrc_table_lock, MA_OWNED); 170 intrcnt_setname(isrc->isrc_event->ie_fullname, isrc->isrc_index); 171 } 172 173 /* 174 * Virtualization for interrupt source interrupt counter increment. 175 */ 176 static inline void 177 isrc_increment_count(struct intr_irqsrc *isrc) 178 { 179 180 /* 181 * XXX - It should be atomic for PPI interrupts. It was proven that 182 * the lost is measurable easily for timer PPI interrupts. 183 */ 184 isrc->isrc_count[0]++; 185 /*atomic_add_long(&isrc->isrc_count[0], 1);*/ 186 } 187 188 /* 189 * Virtualization for interrupt source interrupt stray counter increment. 190 */ 191 static inline void 192 isrc_increment_straycount(struct intr_irqsrc *isrc) 193 { 194 195 isrc->isrc_count[1]++; 196 } 197 198 /* 199 * Virtualization for interrupt source interrupt name update. 200 */ 201 static void 202 isrc_update_name(struct intr_irqsrc *isrc, const char *name) 203 { 204 char str[INTRNAME_LEN]; 205 206 mtx_assert(&isrc_table_lock, MA_OWNED); 207 208 if (name != NULL) { 209 snprintf(str, INTRNAME_LEN, "%s: %s", isrc->isrc_name, name); 210 intrcnt_setname(str, isrc->isrc_index); 211 snprintf(str, INTRNAME_LEN, "stray %s: %s", isrc->isrc_name, 212 name); 213 intrcnt_setname(str, isrc->isrc_index + 1); 214 } else { 215 snprintf(str, INTRNAME_LEN, "%s:", isrc->isrc_name); 216 intrcnt_setname(str, isrc->isrc_index); 217 snprintf(str, INTRNAME_LEN, "stray %s:", isrc->isrc_name); 218 intrcnt_setname(str, isrc->isrc_index + 1); 219 } 220 } 221 222 /* 223 * Virtualization for interrupt source interrupt counters setup. 224 */ 225 static void 226 isrc_setup_counters(struct intr_irqsrc *isrc) 227 { 228 u_int index; 229 230 /* 231 * XXX - it does not work well with removable controllers and 232 * interrupt sources !!! 233 */ 234 index = atomic_fetchadd_int(&intrcnt_index, 2); 235 isrc->isrc_index = index; 236 isrc->isrc_count = &intrcnt[index]; 237 isrc_update_name(isrc, NULL); 238 } 239 240 #ifdef SMP 241 /* 242 * Virtualization for interrupt source IPI counter increment. 243 */ 244 static inline void 245 isrc_increment_ipi_count(struct intr_irqsrc *isrc, u_int cpu) 246 { 247 248 isrc->isrc_count[cpu]++; 249 } 250 251 /* 252 * Virtualization for interrupt source IPI counters setup. 253 */ 254 static void 255 isrc_setup_ipi_counters(struct intr_irqsrc *isrc, const char *name) 256 { 257 u_int index, i; 258 char str[INTRNAME_LEN]; 259 260 index = atomic_fetchadd_int(&intrcnt_index, MAXCPU); 261 isrc->isrc_index = index; 262 isrc->isrc_count = &intrcnt[index]; 263 264 for (i = 0; i < MAXCPU; i++) { 265 /* 266 * We do not expect any race in IPI case here, 267 * so locking is not needed. 268 */ 269 snprintf(str, INTRNAME_LEN, "cpu%d:%s", i, name); 270 intrcnt_setname(str, index + i); 271 } 272 } 273 #endif 274 275 /* 276 * Main interrupt dispatch handler. It's called straight 277 * from the assembler, where CPU interrupt is served. 278 */ 279 void 280 intr_irq_handler(struct trapframe *tf) 281 { 282 struct trapframe * oldframe; 283 struct thread * td; 284 285 KASSERT(irq_root_filter != NULL, ("%s: no filter", __func__)); 286 287 PCPU_INC(cnt.v_intr); 288 critical_enter(); 289 td = curthread; 290 oldframe = td->td_intr_frame; 291 td->td_intr_frame = tf; 292 irq_root_filter(irq_root_arg); 293 td->td_intr_frame = oldframe; 294 critical_exit(); 295 } 296 297 /* 298 * interrupt controller dispatch function for interrupts. It should 299 * be called straight from the interrupt controller, when associated interrupt 300 * source is learned. 301 */ 302 void 303 intr_irq_dispatch(struct intr_irqsrc *isrc, struct trapframe *tf) 304 { 305 306 KASSERT(isrc != NULL, ("%s: no source", __func__)); 307 308 isrc_increment_count(isrc); 309 310 #ifdef INTR_SOLO 311 if (isrc->isrc_filter != NULL) { 312 int error; 313 error = isrc->isrc_filter(isrc->isrc_arg, tf); 314 PIC_POST_FILTER(isrc->isrc_dev, isrc); 315 if (error == FILTER_HANDLED) 316 return; 317 } else 318 #endif 319 if (isrc->isrc_event != NULL) { 320 if (intr_event_handle(isrc->isrc_event, tf) == 0) 321 return; 322 } 323 324 isrc_increment_straycount(isrc); 325 PIC_DISABLE_SOURCE(isrc->isrc_dev, isrc); 326 327 device_printf(isrc->isrc_dev, "stray irq <%s> disabled", 328 isrc->isrc_name); 329 } 330 331 /* 332 * Allocate interrupt source. 333 */ 334 static struct intr_irqsrc * 335 isrc_alloc(u_int type, u_int extsize) 336 { 337 struct intr_irqsrc *isrc; 338 339 isrc = malloc(sizeof(*isrc) + extsize, M_INTRNG, M_WAITOK | M_ZERO); 340 isrc->isrc_irq = IRQ_INVALID; /* just to be safe */ 341 isrc->isrc_type = type; 342 isrc->isrc_nspc_type = INTR_IRQ_NSPC_NONE; 343 isrc->isrc_trig = INTR_TRIGGER_CONFORM; 344 isrc->isrc_pol = INTR_POLARITY_CONFORM; 345 CPU_ZERO(&isrc->isrc_cpu); 346 return (isrc); 347 } 348 349 /* 350 * Free interrupt source. 351 */ 352 static void 353 isrc_free(struct intr_irqsrc *isrc) 354 { 355 356 free(isrc, M_INTRNG); 357 } 358 359 void 360 intr_irq_set_name(struct intr_irqsrc *isrc, const char *fmt, ...) 361 { 362 va_list ap; 363 364 va_start(ap, fmt); 365 vsnprintf(isrc->isrc_name, INTR_ISRC_NAMELEN, fmt, ap); 366 va_end(ap); 367 } 368 369 /* 370 * Alloc unique interrupt number (resource handle) for interrupt source. 371 * 372 * There could be various strategies how to allocate free interrupt number 373 * (resource handle) for new interrupt source. 374 * 375 * 1. Handles are always allocated forward, so handles are not recycled 376 * immediately. However, if only one free handle left which is reused 377 * constantly... 378 */ 379 static int 380 isrc_alloc_irq_locked(struct intr_irqsrc *isrc) 381 { 382 u_int maxirqs, irq; 383 384 mtx_assert(&isrc_table_lock, MA_OWNED); 385 386 maxirqs = nitems(irq_sources); 387 if (irq_next_free >= maxirqs) 388 return (ENOSPC); 389 390 for (irq = irq_next_free; irq < maxirqs; irq++) { 391 if (irq_sources[irq] == NULL) 392 goto found; 393 } 394 for (irq = 0; irq < irq_next_free; irq++) { 395 if (irq_sources[irq] == NULL) 396 goto found; 397 } 398 399 irq_next_free = maxirqs; 400 return (ENOSPC); 401 402 found: 403 isrc->isrc_irq = irq; 404 irq_sources[irq] = isrc; 405 406 intr_irq_set_name(isrc, "irq%u", irq); 407 isrc_setup_counters(isrc); 408 409 irq_next_free = irq + 1; 410 if (irq_next_free >= maxirqs) 411 irq_next_free = 0; 412 return (0); 413 } 414 #ifdef notyet 415 /* 416 * Free unique interrupt number (resource handle) from interrupt source. 417 */ 418 static int 419 isrc_free_irq(struct intr_irqsrc *isrc) 420 { 421 u_int maxirqs; 422 423 mtx_assert(&isrc_table_lock, MA_NOTOWNED); 424 425 maxirqs = nitems(irq_sources); 426 if (isrc->isrc_irq >= maxirqs) 427 return (EINVAL); 428 429 mtx_lock(&isrc_table_lock); 430 if (irq_sources[isrc->isrc_irq] != isrc) { 431 mtx_unlock(&isrc_table_lock); 432 return (EINVAL); 433 } 434 435 irq_sources[isrc->isrc_irq] = NULL; 436 isrc->isrc_irq = IRQ_INVALID; /* just to be safe */ 437 mtx_unlock(&isrc_table_lock); 438 439 return (0); 440 } 441 #endif 442 /* 443 * Lookup interrupt source by interrupt number (resource handle). 444 */ 445 static struct intr_irqsrc * 446 isrc_lookup(u_int irq) 447 { 448 449 if (irq < nitems(irq_sources)) 450 return (irq_sources[irq]); 451 return (NULL); 452 } 453 454 /* 455 * Lookup interrupt source by namespace description. 456 */ 457 static struct intr_irqsrc * 458 isrc_namespace_lookup(device_t dev, uint16_t type, uint16_t num) 459 { 460 u_int irq; 461 struct intr_irqsrc *isrc; 462 463 mtx_assert(&isrc_table_lock, MA_OWNED); 464 465 for (irq = 0; irq < nitems(irq_sources); irq++) { 466 isrc = irq_sources[irq]; 467 if (isrc != NULL && isrc->isrc_dev == dev && 468 isrc->isrc_nspc_type == type && isrc->isrc_nspc_num == num) 469 return (isrc); 470 } 471 return (NULL); 472 } 473 474 /* 475 * Map interrupt source according to namespace into framework. If such mapping 476 * does not exist, create it. Return unique interrupt number (resource handle) 477 * associated with mapped interrupt source. 478 */ 479 u_int 480 intr_namespace_map_irq(device_t dev, uint16_t type, uint16_t num) 481 { 482 struct intr_irqsrc *isrc, *new_isrc; 483 int error; 484 485 new_isrc = isrc_alloc(INTR_ISRCT_NAMESPACE, 0); 486 487 mtx_lock(&isrc_table_lock); 488 isrc = isrc_namespace_lookup(dev, type, num); 489 if (isrc != NULL) { 490 mtx_unlock(&isrc_table_lock); 491 isrc_free(new_isrc); 492 return (isrc->isrc_irq); /* already mapped */ 493 } 494 495 error = isrc_alloc_irq_locked(new_isrc); 496 if (error != 0) { 497 mtx_unlock(&isrc_table_lock); 498 isrc_free(new_isrc); 499 return (IRQ_INVALID); /* no space left */ 500 } 501 502 new_isrc->isrc_dev = dev; 503 new_isrc->isrc_nspc_type = type; 504 new_isrc->isrc_nspc_num = num; 505 mtx_unlock(&isrc_table_lock); 506 507 return (new_isrc->isrc_irq); 508 } 509 510 #ifdef FDT 511 /* 512 * Lookup interrupt source by FDT description. 513 */ 514 static struct intr_irqsrc * 515 isrc_fdt_lookup(intptr_t xref, pcell_t *cells, u_int ncells) 516 { 517 u_int irq, cellsize; 518 struct intr_irqsrc *isrc; 519 520 mtx_assert(&isrc_table_lock, MA_OWNED); 521 522 cellsize = ncells * sizeof(*cells); 523 for (irq = 0; irq < nitems(irq_sources); irq++) { 524 isrc = irq_sources[irq]; 525 if (isrc != NULL && isrc->isrc_type == INTR_ISRCT_FDT && 526 isrc->isrc_xref == xref && isrc->isrc_ncells == ncells && 527 memcmp(isrc->isrc_cells, cells, cellsize) == 0) 528 return (isrc); 529 } 530 return (NULL); 531 } 532 533 /* 534 * Map interrupt source according to FDT data into framework. If such mapping 535 * does not exist, create it. Return unique interrupt number (resource handle) 536 * associated with mapped interrupt source. 537 */ 538 u_int 539 intr_fdt_map_irq(phandle_t node, pcell_t *cells, u_int ncells) 540 { 541 struct intr_irqsrc *isrc, *new_isrc; 542 u_int cellsize; 543 intptr_t xref; 544 int error; 545 546 xref = (intptr_t)node; /* It's so simple for now. */ 547 548 cellsize = ncells * sizeof(*cells); 549 new_isrc = isrc_alloc(INTR_ISRCT_FDT, cellsize); 550 551 mtx_lock(&isrc_table_lock); 552 isrc = isrc_fdt_lookup(xref, cells, ncells); 553 if (isrc != NULL) { 554 mtx_unlock(&isrc_table_lock); 555 isrc_free(new_isrc); 556 return (isrc->isrc_irq); /* already mapped */ 557 } 558 559 error = isrc_alloc_irq_locked(new_isrc); 560 if (error != 0) { 561 mtx_unlock(&isrc_table_lock); 562 isrc_free(new_isrc); 563 return (IRQ_INVALID); /* no space left */ 564 } 565 566 new_isrc->isrc_xref = xref; 567 new_isrc->isrc_ncells = ncells; 568 memcpy(new_isrc->isrc_cells, cells, cellsize); 569 mtx_unlock(&isrc_table_lock); 570 571 return (new_isrc->isrc_irq); 572 } 573 #endif 574 575 /* 576 * Register interrupt source into interrupt controller. 577 */ 578 static int 579 isrc_register(struct intr_irqsrc *isrc) 580 { 581 struct intr_pic *pic; 582 boolean_t is_percpu; 583 int error; 584 585 if (isrc->isrc_flags & INTR_ISRCF_REGISTERED) 586 return (0); 587 588 if (isrc->isrc_dev == NULL) { 589 pic = pic_lookup(NULL, isrc->isrc_xref); 590 if (pic == NULL || pic->pic_dev == NULL) 591 return (ESRCH); 592 isrc->isrc_dev = pic->pic_dev; 593 } 594 595 error = PIC_REGISTER(isrc->isrc_dev, isrc, &is_percpu); 596 if (error != 0) 597 return (error); 598 599 mtx_lock(&isrc_table_lock); 600 isrc->isrc_flags |= INTR_ISRCF_REGISTERED; 601 if (is_percpu) 602 isrc->isrc_flags |= INTR_ISRCF_PERCPU; 603 isrc_update_name(isrc, NULL); 604 mtx_unlock(&isrc_table_lock); 605 return (0); 606 } 607 608 #ifdef INTR_SOLO 609 /* 610 * Setup filter into interrupt source. 611 */ 612 static int 613 iscr_setup_filter(struct intr_irqsrc *isrc, const char *name, 614 intr_irq_filter_t *filter, void *arg, void **cookiep) 615 { 616 617 if (filter == NULL) 618 return (EINVAL); 619 620 mtx_lock(&isrc_table_lock); 621 /* 622 * Make sure that we do not mix the two ways 623 * how we handle interrupt sources. 624 */ 625 if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) { 626 mtx_unlock(&isrc_table_lock); 627 return (EBUSY); 628 } 629 isrc->isrc_filter = filter; 630 isrc->isrc_arg = arg; 631 isrc_update_name(isrc, name); 632 mtx_unlock(&isrc_table_lock); 633 634 *cookiep = isrc; 635 return (0); 636 } 637 #endif 638 639 /* 640 * Interrupt source pre_ithread method for MI interrupt framework. 641 */ 642 static void 643 intr_isrc_pre_ithread(void *arg) 644 { 645 struct intr_irqsrc *isrc = arg; 646 647 PIC_PRE_ITHREAD(isrc->isrc_dev, isrc); 648 } 649 650 /* 651 * Interrupt source post_ithread method for MI interrupt framework. 652 */ 653 static void 654 intr_isrc_post_ithread(void *arg) 655 { 656 struct intr_irqsrc *isrc = arg; 657 658 PIC_POST_ITHREAD(isrc->isrc_dev, isrc); 659 } 660 661 /* 662 * Interrupt source post_filter method for MI interrupt framework. 663 */ 664 static void 665 intr_isrc_post_filter(void *arg) 666 { 667 struct intr_irqsrc *isrc = arg; 668 669 PIC_POST_FILTER(isrc->isrc_dev, isrc); 670 } 671 672 /* 673 * Interrupt source assign_cpu method for MI interrupt framework. 674 */ 675 static int 676 intr_isrc_assign_cpu(void *arg, int cpu) 677 { 678 #ifdef SMP 679 struct intr_irqsrc *isrc = arg; 680 int error; 681 682 if (isrc->isrc_dev != irq_root_dev) 683 return (EINVAL); 684 685 mtx_lock(&isrc_table_lock); 686 if (cpu == NOCPU) { 687 CPU_ZERO(&isrc->isrc_cpu); 688 isrc->isrc_flags &= ~INTR_ISRCF_BOUND; 689 } else { 690 CPU_SETOF(cpu, &isrc->isrc_cpu); 691 isrc->isrc_flags |= INTR_ISRCF_BOUND; 692 } 693 694 /* 695 * In NOCPU case, it's up to PIC to either leave ISRC on same CPU or 696 * re-balance it to another CPU or enable it on more CPUs. However, 697 * PIC is expected to change isrc_cpu appropriately to keep us well 698 * informed if the call is successfull. 699 */ 700 if (irq_assign_cpu) { 701 error = PIC_BIND(isrc->isrc_dev, isrc); 702 if (error) { 703 CPU_ZERO(&isrc->isrc_cpu); 704 mtx_unlock(&isrc_table_lock); 705 return (error); 706 } 707 } 708 mtx_unlock(&isrc_table_lock); 709 return (0); 710 #else 711 return (EOPNOTSUPP); 712 #endif 713 } 714 715 /* 716 * Create interrupt event for interrupt source. 717 */ 718 static int 719 isrc_event_create(struct intr_irqsrc *isrc) 720 { 721 struct intr_event *ie; 722 int error; 723 724 error = intr_event_create(&ie, isrc, 0, isrc->isrc_irq, 725 intr_isrc_pre_ithread, intr_isrc_post_ithread, intr_isrc_post_filter, 726 intr_isrc_assign_cpu, "%s:", isrc->isrc_name); 727 if (error) 728 return (error); 729 730 mtx_lock(&isrc_table_lock); 731 /* 732 * Make sure that we do not mix the two ways 733 * how we handle interrupt sources. Let contested event wins. 734 */ 735 if (isrc->isrc_filter != NULL || isrc->isrc_event != NULL) { 736 mtx_unlock(&isrc_table_lock); 737 intr_event_destroy(ie); 738 return (isrc->isrc_event != NULL ? EBUSY : 0); 739 } 740 isrc->isrc_event = ie; 741 mtx_unlock(&isrc_table_lock); 742 743 return (0); 744 } 745 #ifdef notyet 746 /* 747 * Destroy interrupt event for interrupt source. 748 */ 749 static void 750 isrc_event_destroy(struct intr_irqsrc *isrc) 751 { 752 struct intr_event *ie; 753 754 mtx_lock(&isrc_table_lock); 755 ie = isrc->isrc_event; 756 isrc->isrc_event = NULL; 757 mtx_unlock(&isrc_table_lock); 758 759 if (ie != NULL) 760 intr_event_destroy(ie); 761 } 762 #endif 763 /* 764 * Add handler to interrupt source. 765 */ 766 static int 767 isrc_add_handler(struct intr_irqsrc *isrc, const char *name, 768 driver_filter_t filter, driver_intr_t handler, void *arg, 769 enum intr_type flags, void **cookiep) 770 { 771 int error; 772 773 if (isrc->isrc_event == NULL) { 774 error = isrc_event_create(isrc); 775 if (error) 776 return (error); 777 } 778 779 error = intr_event_add_handler(isrc->isrc_event, name, filter, handler, 780 arg, intr_priority(flags), flags, cookiep); 781 if (error == 0) { 782 mtx_lock(&isrc_table_lock); 783 intrcnt_updatename(isrc); 784 mtx_unlock(&isrc_table_lock); 785 } 786 787 return (error); 788 } 789 790 /* 791 * Lookup interrupt controller locked. 792 */ 793 static struct intr_pic * 794 pic_lookup_locked(device_t dev, intptr_t xref) 795 { 796 struct intr_pic *pic; 797 798 mtx_assert(&pic_list_lock, MA_OWNED); 799 800 SLIST_FOREACH(pic, &pic_list, pic_next) { 801 if (pic->pic_xref != xref) 802 continue; 803 if (pic->pic_xref != 0 || pic->pic_dev == dev) 804 return (pic); 805 } 806 return (NULL); 807 } 808 809 /* 810 * Lookup interrupt controller. 811 */ 812 static struct intr_pic * 813 pic_lookup(device_t dev, intptr_t xref) 814 { 815 struct intr_pic *pic; 816 817 mtx_lock(&pic_list_lock); 818 pic = pic_lookup_locked(dev, xref); 819 mtx_unlock(&pic_list_lock); 820 821 return (pic); 822 } 823 824 /* 825 * Create interrupt controller. 826 */ 827 static struct intr_pic * 828 pic_create(device_t dev, intptr_t xref) 829 { 830 struct intr_pic *pic; 831 832 mtx_lock(&pic_list_lock); 833 pic = pic_lookup_locked(dev, xref); 834 if (pic != NULL) { 835 mtx_unlock(&pic_list_lock); 836 return (pic); 837 } 838 pic = malloc(sizeof(*pic), M_INTRNG, M_NOWAIT | M_ZERO); 839 pic->pic_xref = xref; 840 pic->pic_dev = dev; 841 SLIST_INSERT_HEAD(&pic_list, pic, pic_next); 842 mtx_unlock(&pic_list_lock); 843 844 return (pic); 845 } 846 #ifdef notyet 847 /* 848 * Destroy interrupt controller. 849 */ 850 static void 851 pic_destroy(device_t dev, intptr_t xref) 852 { 853 struct intr_pic *pic; 854 855 mtx_lock(&pic_list_lock); 856 pic = pic_lookup_locked(dev, xref); 857 if (pic == NULL) { 858 mtx_unlock(&pic_list_lock); 859 return; 860 } 861 SLIST_REMOVE(&pic_list, pic, intr_pic, pic_next); 862 mtx_unlock(&pic_list_lock); 863 864 free(pic, M_INTRNG); 865 } 866 #endif 867 /* 868 * Register interrupt controller. 869 */ 870 int 871 intr_pic_register(device_t dev, intptr_t xref) 872 { 873 struct intr_pic *pic; 874 875 pic = pic_create(dev, xref); 876 if (pic == NULL) 877 return (ENOMEM); 878 if (pic->pic_dev != dev) 879 return (EINVAL); /* XXX it could be many things. */ 880 881 debugf("PIC %p registered for %s <xref %x>\n", pic, 882 device_get_nameunit(dev), xref); 883 return (0); 884 } 885 886 /* 887 * Unregister interrupt controller. 888 */ 889 int 890 intr_pic_unregister(device_t dev, intptr_t xref) 891 { 892 893 panic("%s: not implemented", __func__); 894 } 895 896 /* 897 * Mark interrupt controller (itself) as a root one. 898 * 899 * Note that only an interrupt controller can really know its position 900 * in interrupt controller's tree. So root PIC must claim itself as a root. 901 * 902 * In FDT case, according to ePAPR approved version 1.1 from 08 April 2011, 903 * page 30: 904 * "The root of the interrupt tree is determined when traversal 905 * of the interrupt tree reaches an interrupt controller node without 906 * an interrupts property and thus no explicit interrupt parent." 907 */ 908 int 909 intr_pic_claim_root(device_t dev, intptr_t xref, intr_irq_filter_t *filter, 910 void *arg, u_int ipicount) 911 { 912 int error; 913 u_int rootirq; 914 915 if (pic_lookup(dev, xref) == NULL) { 916 device_printf(dev, "not registered\n"); 917 return (EINVAL); 918 } 919 if (filter == NULL) { 920 device_printf(dev, "filter missing\n"); 921 return (EINVAL); 922 } 923 924 /* 925 * Only one interrupt controllers could be on the root for now. 926 * Note that we further suppose that there is not threaded interrupt 927 * routine (handler) on the root. See intr_irq_handler(). 928 */ 929 if (irq_root_dev != NULL) { 930 device_printf(dev, "another root already set\n"); 931 return (EBUSY); 932 } 933 934 rootirq = intr_namespace_map_irq(device_get_parent(dev), 0, 0); 935 if (rootirq == IRQ_INVALID) { 936 device_printf(dev, "failed to map an irq for the root pic\n"); 937 return (ENOMEM); 938 } 939 940 /* Create the isrc. */ 941 irq_root_isrc = isrc_lookup(rootirq); 942 943 /* XXX "register" with the PIC. We are the "pic" here, so fake it. */ 944 irq_root_isrc->isrc_flags |= INTR_ISRCF_REGISTERED; 945 946 error = intr_irq_add_handler(device_get_parent(dev), 947 (void*)filter, NULL, arg, rootirq, INTR_TYPE_CLK, NULL); 948 if (error != 0) { 949 device_printf(dev, "failed to install root pic handler\n"); 950 return (error); 951 } 952 irq_root_dev = dev; 953 irq_root_filter = filter; 954 irq_root_arg = arg; 955 irq_root_ipicount = ipicount; 956 957 debugf("irq root set to %s\n", device_get_nameunit(dev)); 958 return (0); 959 } 960 961 int 962 intr_irq_add_handler(device_t dev, driver_filter_t filt, driver_intr_t hand, 963 void *arg, u_int irq, int flags, void **cookiep) 964 { 965 const char *name; 966 struct intr_irqsrc *isrc; 967 int error; 968 969 name = device_get_nameunit(dev); 970 971 #ifdef INTR_SOLO 972 /* 973 * Standard handling is done thru MI interrupt framework. However, 974 * some interrupts could request solely own special handling. This 975 * non standard handling can be used for interrupt controllers without 976 * handler (filter only), so in case that interrupt controllers are 977 * chained, MI interrupt framework is called only in leaf controller. 978 * 979 * Note that root interrupt controller routine is served as well, 980 * however in intr_irq_handler(), i.e. main system dispatch routine. 981 */ 982 if (flags & INTR_SOLO && hand != NULL) { 983 debugf("irq %u cannot solo on %s\n", irq, name); 984 return (EINVAL); 985 } 986 #endif 987 988 isrc = isrc_lookup(irq); 989 if (isrc == NULL) { 990 debugf("irq %u without source on %s\n", irq, name); 991 return (EINVAL); 992 } 993 994 error = isrc_register(isrc); 995 if (error != 0) { 996 debugf("irq %u map error %d on %s\n", irq, error, name); 997 return (error); 998 } 999 1000 #ifdef INTR_SOLO 1001 if (flags & INTR_SOLO) { 1002 error = iscr_setup_filter(isrc, name, (intr_irq_filter_t *)filt, 1003 arg, cookiep); 1004 debugf("irq %u setup filter error %d on %s\n", irq, error, 1005 name); 1006 } else 1007 #endif 1008 { 1009 error = isrc_add_handler(isrc, name, filt, hand, arg, flags, 1010 cookiep); 1011 debugf("irq %u add handler error %d on %s\n", irq, error, name); 1012 } 1013 if (error != 0) 1014 return (error); 1015 1016 mtx_lock(&isrc_table_lock); 1017 isrc->isrc_handlers++; 1018 if (isrc->isrc_handlers == 1) { 1019 PIC_ENABLE_INTR(isrc->isrc_dev, isrc); 1020 PIC_ENABLE_SOURCE(isrc->isrc_dev, isrc); 1021 } 1022 mtx_unlock(&isrc_table_lock); 1023 return (0); 1024 } 1025 1026 int 1027 intr_irq_remove_handler(device_t dev, u_int irq, void *cookie) 1028 { 1029 struct intr_irqsrc *isrc; 1030 int error; 1031 1032 isrc = isrc_lookup(irq); 1033 if (isrc == NULL || isrc->isrc_handlers == 0) 1034 return (EINVAL); 1035 1036 if (isrc->isrc_filter != NULL) { 1037 if (isrc != cookie) 1038 return (EINVAL); 1039 1040 mtx_lock(&isrc_table_lock); 1041 isrc->isrc_filter = NULL; 1042 isrc->isrc_arg = NULL; 1043 isrc->isrc_handlers = 0; 1044 PIC_DISABLE_SOURCE(isrc->isrc_dev, isrc); 1045 PIC_DISABLE_INTR(isrc->isrc_dev, isrc); 1046 isrc_update_name(isrc, NULL); 1047 mtx_unlock(&isrc_table_lock); 1048 return (0); 1049 } 1050 1051 if (isrc != intr_handler_source(cookie)) 1052 return (EINVAL); 1053 1054 error = intr_event_remove_handler(cookie); 1055 if (error == 0) { 1056 mtx_lock(&isrc_table_lock); 1057 isrc->isrc_handlers--; 1058 if (isrc->isrc_handlers == 0) { 1059 PIC_DISABLE_SOURCE(isrc->isrc_dev, isrc); 1060 PIC_DISABLE_INTR(isrc->isrc_dev, isrc); 1061 } 1062 intrcnt_updatename(isrc); 1063 mtx_unlock(&isrc_table_lock); 1064 } 1065 return (error); 1066 } 1067 1068 int 1069 intr_irq_config(u_int irq, enum intr_trigger trig, enum intr_polarity pol) 1070 { 1071 struct intr_irqsrc *isrc; 1072 1073 isrc = isrc_lookup(irq); 1074 if (isrc == NULL) 1075 return (EINVAL); 1076 1077 if (isrc->isrc_handlers != 0) 1078 return (EBUSY); /* interrrupt is enabled (active) */ 1079 1080 /* 1081 * Once an interrupt is enabled, we do not change its configuration. 1082 * A controller PIC_ENABLE_INTR() method is called when an interrupt 1083 * is going to be enabled. In this method, a controller should setup 1084 * the interrupt according to saved configuration parameters. 1085 */ 1086 isrc->isrc_trig = trig; 1087 isrc->isrc_pol = pol; 1088 1089 return (0); 1090 } 1091 1092 int 1093 intr_irq_describe(u_int irq, void *cookie, const char *descr) 1094 { 1095 struct intr_irqsrc *isrc; 1096 int error; 1097 1098 isrc = isrc_lookup(irq); 1099 if (isrc == NULL || isrc->isrc_handlers == 0) 1100 return (EINVAL); 1101 1102 if (isrc->isrc_filter != NULL) { 1103 if (isrc != cookie) 1104 return (EINVAL); 1105 1106 mtx_lock(&isrc_table_lock); 1107 isrc_update_name(isrc, descr); 1108 mtx_unlock(&isrc_table_lock); 1109 return (0); 1110 } 1111 1112 error = intr_event_describe_handler(isrc->isrc_event, cookie, descr); 1113 if (error == 0) { 1114 mtx_lock(&isrc_table_lock); 1115 intrcnt_updatename(isrc); 1116 mtx_unlock(&isrc_table_lock); 1117 } 1118 return (error); 1119 } 1120 1121 #ifdef SMP 1122 int 1123 intr_irq_bind(u_int irq, int cpu) 1124 { 1125 struct intr_irqsrc *isrc; 1126 1127 isrc = isrc_lookup(irq); 1128 if (isrc == NULL || isrc->isrc_handlers == 0) 1129 return (EINVAL); 1130 1131 if (isrc->isrc_filter != NULL) 1132 return (intr_isrc_assign_cpu(isrc, cpu)); 1133 1134 return (intr_event_bind(isrc->isrc_event, cpu)); 1135 } 1136 1137 /* 1138 * Return the CPU that the next interrupt source should use. 1139 * For now just returns the next CPU according to round-robin. 1140 */ 1141 u_int 1142 intr_irq_next_cpu(u_int last_cpu, cpuset_t *cpumask) 1143 { 1144 1145 if (!irq_assign_cpu || mp_ncpus == 1) 1146 return (PCPU_GET(cpuid)); 1147 1148 do { 1149 last_cpu++; 1150 if (last_cpu > mp_maxid) 1151 last_cpu = 0; 1152 } while (!CPU_ISSET(last_cpu, cpumask)); 1153 return (last_cpu); 1154 } 1155 1156 /* 1157 * Distribute all the interrupt sources among the available 1158 * CPUs once the AP's have been launched. 1159 */ 1160 static void 1161 intr_irq_shuffle(void *arg __unused) 1162 { 1163 struct intr_irqsrc *isrc; 1164 u_int i; 1165 1166 if (mp_ncpus == 1) 1167 return; 1168 1169 mtx_lock(&isrc_table_lock); 1170 irq_assign_cpu = TRUE; 1171 for (i = 0; i < NIRQ; i++) { 1172 isrc = irq_sources[i]; 1173 if (isrc == NULL || isrc->isrc_handlers == 0 || 1174 isrc->isrc_flags & INTR_ISRCF_PERCPU) 1175 continue; 1176 1177 if (isrc->isrc_event != NULL && 1178 isrc->isrc_flags & INTR_ISRCF_BOUND && 1179 isrc->isrc_event->ie_cpu != CPU_FFS(&isrc->isrc_cpu) - 1) 1180 panic("%s: CPU inconsistency", __func__); 1181 1182 if ((isrc->isrc_flags & INTR_ISRCF_BOUND) == 0) 1183 CPU_ZERO(&isrc->isrc_cpu); /* start again */ 1184 1185 /* 1186 * We are in wicked position here if the following call fails 1187 * for bound ISRC. The best thing we can do is to clear 1188 * isrc_cpu so inconsistency with ie_cpu will be detectable. 1189 */ 1190 if (PIC_BIND(isrc->isrc_dev, isrc) != 0) 1191 CPU_ZERO(&isrc->isrc_cpu); 1192 } 1193 mtx_unlock(&isrc_table_lock); 1194 } 1195 SYSINIT(intr_irq_shuffle, SI_SUB_SMP, SI_ORDER_SECOND, intr_irq_shuffle, NULL); 1196 1197 #else 1198 u_int 1199 intr_irq_next_cpu(u_int current_cpu, cpuset_t *cpumask) 1200 { 1201 1202 return (PCPU_GET(cpuid)); 1203 } 1204 #endif 1205 1206 void dosoftints(void); 1207 void 1208 dosoftints(void) 1209 { 1210 } 1211 1212 #ifdef SMP 1213 /* 1214 * Lookup IPI source. 1215 */ 1216 static struct intr_irqsrc * 1217 intr_ipi_lookup(u_int ipi) 1218 { 1219 1220 if (ipi >= INTR_IPI_COUNT) 1221 panic("%s: no such IPI %u", __func__, ipi); 1222 1223 return (&ipi_sources[ipi]); 1224 } 1225 1226 /* 1227 * interrupt controller dispatch function for IPIs. It should 1228 * be called straight from the interrupt controller, when associated 1229 * interrupt source is learned. Or from anybody who has an interrupt 1230 * source mapped. 1231 */ 1232 void 1233 intr_ipi_dispatch(struct intr_irqsrc *isrc, struct trapframe *tf) 1234 { 1235 void *arg; 1236 1237 KASSERT(isrc != NULL, ("%s: no source", __func__)); 1238 1239 isrc_increment_ipi_count(isrc, PCPU_GET(cpuid)); 1240 1241 /* 1242 * Supply ipi filter with trapframe argument 1243 * if none is registered. 1244 */ 1245 arg = isrc->isrc_arg != NULL ? isrc->isrc_arg : tf; 1246 isrc->isrc_ipifilter(arg); 1247 } 1248 1249 /* 1250 * Map IPI into interrupt controller. 1251 * 1252 * Not SMP coherent. 1253 */ 1254 static int 1255 ipi_map(struct intr_irqsrc *isrc, u_int ipi) 1256 { 1257 boolean_t is_percpu; 1258 int error; 1259 1260 if (ipi >= INTR_IPI_COUNT) 1261 panic("%s: no such IPI %u", __func__, ipi); 1262 1263 KASSERT(irq_root_dev != NULL, ("%s: no root attached", __func__)); 1264 1265 isrc->isrc_type = INTR_ISRCT_NAMESPACE; 1266 isrc->isrc_nspc_type = INTR_IRQ_NSPC_IPI; 1267 isrc->isrc_nspc_num = ipi_next_num; 1268 1269 error = PIC_REGISTER(irq_root_dev, isrc, &is_percpu); 1270 1271 debugf("ipi %u mapped to %u on %s - error %d\n", ipi, ipi_next_num, 1272 device_get_nameunit(irq_root_dev), error); 1273 1274 if (error == 0) { 1275 isrc->isrc_dev = irq_root_dev; 1276 ipi_next_num++; 1277 } 1278 return (error); 1279 } 1280 1281 /* 1282 * Setup IPI handler to interrupt source. 1283 * 1284 * Note that there could be more ways how to send and receive IPIs 1285 * on a platform like fast interrupts for example. In that case, 1286 * one can call this function with ASIF_NOALLOC flag set and then 1287 * call intr_ipi_dispatch() when appropriate. 1288 * 1289 * Not SMP coherent. 1290 */ 1291 int 1292 intr_ipi_set_handler(u_int ipi, const char *name, intr_ipi_filter_t *filter, 1293 void *arg, u_int flags) 1294 { 1295 struct intr_irqsrc *isrc; 1296 int error; 1297 1298 if (filter == NULL) 1299 return(EINVAL); 1300 1301 isrc = intr_ipi_lookup(ipi); 1302 if (isrc->isrc_ipifilter != NULL) 1303 return (EEXIST); 1304 1305 if ((flags & AISHF_NOALLOC) == 0) { 1306 error = ipi_map(isrc, ipi); 1307 if (error != 0) 1308 return (error); 1309 } 1310 1311 isrc->isrc_ipifilter = filter; 1312 isrc->isrc_arg = arg; 1313 isrc->isrc_handlers = 1; 1314 isrc_setup_ipi_counters(isrc, name); 1315 1316 if (isrc->isrc_dev != NULL) { 1317 mtx_lock(&isrc_table_lock); 1318 PIC_ENABLE_INTR(isrc->isrc_dev, isrc); 1319 PIC_ENABLE_SOURCE(isrc->isrc_dev, isrc); 1320 mtx_unlock(&isrc_table_lock); 1321 } 1322 return (0); 1323 } 1324 1325 /* 1326 * Send IPI thru interrupt controller. 1327 */ 1328 void 1329 pic_ipi_send(cpuset_t cpus, u_int ipi) 1330 { 1331 struct intr_irqsrc *isrc; 1332 1333 isrc = intr_ipi_lookup(ipi); 1334 1335 KASSERT(irq_root_dev != NULL, ("%s: no root attached", __func__)); 1336 PIC_IPI_SEND(irq_root_dev, isrc, cpus); 1337 } 1338 1339 /* 1340 * Init interrupt controller on another CPU. 1341 */ 1342 void 1343 intr_pic_init_secondary(void) 1344 { 1345 1346 /* 1347 * QQQ: Only root PIC is aware of other CPUs ??? 1348 */ 1349 KASSERT(irq_root_dev != NULL, ("%s: no root attached", __func__)); 1350 1351 //mtx_lock(&isrc_table_lock); 1352 PIC_INIT_SECONDARY(irq_root_dev); 1353 //mtx_unlock(&isrc_table_lock); 1354 } 1355 #endif 1356 1357 #ifdef DDB 1358 DB_SHOW_COMMAND(irqs, db_show_irqs) 1359 { 1360 u_int i, irqsum; 1361 struct intr_irqsrc *isrc; 1362 1363 #ifdef SMP 1364 for (i = 0; i <= mp_maxid; i++) { 1365 struct pcpu *pc; 1366 u_int ipi, ipisum; 1367 1368 pc = pcpu_find(i); 1369 if (pc != NULL) { 1370 for (ipisum = 0, ipi = 0; ipi < INTR_IPI_COUNT; ipi++) { 1371 isrc = intr_ipi_lookup(ipi); 1372 if (isrc->isrc_count != NULL) 1373 ipisum += isrc->isrc_count[i]; 1374 } 1375 printf ("cpu%u: total %u ipis %u\n", i, 1376 pc->pc_cnt.v_intr, ipisum); 1377 } 1378 } 1379 db_printf("\n"); 1380 #endif 1381 1382 for (irqsum = 0, i = 0; i < NIRQ; i++) { 1383 isrc = irq_sources[i]; 1384 if (isrc == NULL) 1385 continue; 1386 1387 db_printf("irq%-3u <%s>: cpu %02lx%s cnt %lu\n", i, 1388 isrc->isrc_name, isrc->isrc_cpu.__bits[0], 1389 isrc->isrc_flags & INTR_ISRCF_BOUND ? " (bound)" : "", 1390 isrc->isrc_count[0]); 1391 irqsum += isrc->isrc_count[0]; 1392 } 1393 db_printf("irq total %u\n", irqsum); 1394 } 1395 #endif 1396