1 /* 2 * Derived from arch/i386/kernel/irq.c 3 * Copyright (C) 1992 Linus Torvalds 4 * Adapted from arch/i386 by Gary Thomas 5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 6 * Updated and modified by Cort Dougan <cort@fsmlabs.com> 7 * Copyright (C) 1996-2001 Cort Dougan 8 * Adapted for Power Macintosh by Paul Mackerras 9 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) 10 * 11 * This program is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU General Public License 13 * as published by the Free Software Foundation; either version 14 * 2 of the License, or (at your option) any later version. 15 * 16 * This file contains the code used by various IRQ handling routines: 17 * asking for different IRQ's should be done through these routines 18 * instead of just grabbing them. Thus setups with different IRQ numbers 19 * shouldn't result in any weird surprises, and installing new handlers 20 * should be easier. 21 * 22 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the 23 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit 24 * mask register (of which only 16 are defined), hence the weird shifting 25 * and complement of the cached_irq_mask. I want to be able to stuff 26 * this right into the SIU SMASK register. 27 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx 28 * to reduce code space and undefined function references. 29 */ 30 31 #undef DEBUG 32 33 #include <linux/module.h> 34 #include <linux/threads.h> 35 #include <linux/kernel_stat.h> 36 #include <linux/signal.h> 37 #include <linux/sched.h> 38 #include <linux/ptrace.h> 39 #include <linux/ioport.h> 40 #include <linux/interrupt.h> 41 #include <linux/timex.h> 42 #include <linux/init.h> 43 #include <linux/slab.h> 44 #include <linux/delay.h> 45 #include <linux/irq.h> 46 #include <linux/seq_file.h> 47 #include <linux/cpumask.h> 48 #include <linux/profile.h> 49 #include <linux/bitops.h> 50 #include <linux/list.h> 51 #include <linux/radix-tree.h> 52 #include <linux/mutex.h> 53 #include <linux/bootmem.h> 54 #include <linux/pci.h> 55 #include <linux/debugfs.h> 56 #include <linux/of.h> 57 #include <linux/of_irq.h> 58 59 #include <asm/uaccess.h> 60 #include <asm/system.h> 61 #include <asm/io.h> 62 #include <asm/pgtable.h> 63 #include <asm/irq.h> 64 #include <asm/cache.h> 65 #include <asm/prom.h> 66 #include <asm/ptrace.h> 67 #include <asm/machdep.h> 68 #include <asm/udbg.h> 69 #include <asm/dbell.h> 70 #include <asm/smp.h> 71 72 #ifdef CONFIG_PPC64 73 #include <asm/paca.h> 74 #include <asm/firmware.h> 75 #include <asm/lv1call.h> 76 #endif 77 #define CREATE_TRACE_POINTS 78 #include <asm/trace.h> 79 80 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); 81 EXPORT_PER_CPU_SYMBOL(irq_stat); 82 83 int __irq_offset_value; 84 85 #ifdef CONFIG_PPC32 86 EXPORT_SYMBOL(__irq_offset_value); 87 atomic_t ppc_n_lost_interrupts; 88 89 #ifdef CONFIG_TAU_INT 90 extern int tau_initialized; 91 extern int tau_interrupts(int); 92 #endif 93 #endif /* CONFIG_PPC32 */ 94 95 #ifdef CONFIG_PPC64 96 97 #ifndef CONFIG_SPARSE_IRQ 98 EXPORT_SYMBOL(irq_desc); 99 #endif 100 101 int distribute_irqs = 1; 102 103 static inline notrace unsigned long get_hard_enabled(void) 104 { 105 unsigned long enabled; 106 107 __asm__ __volatile__("lbz %0,%1(13)" 108 : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled))); 109 110 return enabled; 111 } 112 113 static inline notrace void set_soft_enabled(unsigned long enable) 114 { 115 __asm__ __volatile__("stb %0,%1(13)" 116 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); 117 } 118 119 notrace void raw_local_irq_restore(unsigned long en) 120 { 121 /* 122 * get_paca()->soft_enabled = en; 123 * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1? 124 * That was allowed before, and in such a case we do need to take care 125 * that gcc will set soft_enabled directly via r13, not choose to use 126 * an intermediate register, lest we're preempted to a different cpu. 127 */ 128 set_soft_enabled(en); 129 if (!en) 130 return; 131 132 #ifdef CONFIG_PPC_STD_MMU_64 133 if (firmware_has_feature(FW_FEATURE_ISERIES)) { 134 /* 135 * Do we need to disable preemption here? Not really: in the 136 * unlikely event that we're preempted to a different cpu in 137 * between getting r13, loading its lppaca_ptr, and loading 138 * its any_int, we might call iseries_handle_interrupts without 139 * an interrupt pending on the new cpu, but that's no disaster, 140 * is it? And the business of preempting us off the old cpu 141 * would itself involve a local_irq_restore which handles the 142 * interrupt to that cpu. 143 * 144 * But use "local_paca->lppaca_ptr" instead of "get_lppaca()" 145 * to avoid any preemption checking added into get_paca(). 146 */ 147 if (local_paca->lppaca_ptr->int_dword.any_int) 148 iseries_handle_interrupts(); 149 } 150 #endif /* CONFIG_PPC_STD_MMU_64 */ 151 152 /* 153 * if (get_paca()->hard_enabled) return; 154 * But again we need to take care that gcc gets hard_enabled directly 155 * via r13, not choose to use an intermediate register, lest we're 156 * preempted to a different cpu in between the two instructions. 157 */ 158 if (get_hard_enabled()) 159 return; 160 161 #if defined(CONFIG_BOOKE) && defined(CONFIG_SMP) 162 /* Check for pending doorbell interrupts and resend to ourself */ 163 doorbell_check_self(); 164 #endif 165 166 /* 167 * Need to hard-enable interrupts here. Since currently disabled, 168 * no need to take further asm precautions against preemption; but 169 * use local_paca instead of get_paca() to avoid preemption checking. 170 */ 171 local_paca->hard_enabled = en; 172 173 #ifndef CONFIG_BOOKE 174 /* On server, re-trigger the decrementer if it went negative since 175 * some processors only trigger on edge transitions of the sign bit. 176 * 177 * BookE has a level sensitive decrementer (latches in TSR) so we 178 * don't need that 179 */ 180 if ((int)mfspr(SPRN_DEC) < 0) 181 mtspr(SPRN_DEC, 1); 182 #endif /* CONFIG_BOOKE */ 183 184 /* 185 * Force the delivery of pending soft-disabled interrupts on PS3. 186 * Any HV call will have this side effect. 187 */ 188 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) { 189 u64 tmp; 190 lv1_get_version_info(&tmp); 191 } 192 193 __hard_irq_enable(); 194 } 195 EXPORT_SYMBOL(raw_local_irq_restore); 196 #endif /* CONFIG_PPC64 */ 197 198 static int show_other_interrupts(struct seq_file *p, int prec) 199 { 200 int j; 201 202 #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT) 203 if (tau_initialized) { 204 seq_printf(p, "%*s: ", prec, "TAU"); 205 for_each_online_cpu(j) 206 seq_printf(p, "%10u ", tau_interrupts(j)); 207 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); 208 } 209 #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */ 210 211 seq_printf(p, "%*s: ", prec, "LOC"); 212 for_each_online_cpu(j) 213 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs); 214 seq_printf(p, " Local timer interrupts\n"); 215 216 seq_printf(p, "%*s: ", prec, "SPU"); 217 for_each_online_cpu(j) 218 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs); 219 seq_printf(p, " Spurious interrupts\n"); 220 221 seq_printf(p, "%*s: ", prec, "CNT"); 222 for_each_online_cpu(j) 223 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs); 224 seq_printf(p, " Performance monitoring interrupts\n"); 225 226 seq_printf(p, "%*s: ", prec, "MCE"); 227 for_each_online_cpu(j) 228 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions); 229 seq_printf(p, " Machine check exceptions\n"); 230 231 return 0; 232 } 233 234 int show_interrupts(struct seq_file *p, void *v) 235 { 236 unsigned long flags, any_count = 0; 237 int i = *(loff_t *) v, j, prec; 238 struct irqaction *action; 239 struct irq_desc *desc; 240 241 if (i > nr_irqs) 242 return 0; 243 244 for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) 245 j *= 10; 246 247 if (i == nr_irqs) 248 return show_other_interrupts(p, prec); 249 250 /* print header */ 251 if (i == 0) { 252 seq_printf(p, "%*s", prec + 8, ""); 253 for_each_online_cpu(j) 254 seq_printf(p, "CPU%-8d", j); 255 seq_putc(p, '\n'); 256 } 257 258 desc = irq_to_desc(i); 259 if (!desc) 260 return 0; 261 262 raw_spin_lock_irqsave(&desc->lock, flags); 263 for_each_online_cpu(j) 264 any_count |= kstat_irqs_cpu(i, j); 265 action = desc->action; 266 if (!action && !any_count) 267 goto out; 268 269 seq_printf(p, "%*d: ", prec, i); 270 for_each_online_cpu(j) 271 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 272 273 if (desc->chip) 274 seq_printf(p, " %-16s", desc->chip->name); 275 else 276 seq_printf(p, " %-16s", "None"); 277 seq_printf(p, " %-8s", (desc->status & IRQ_LEVEL) ? "Level" : "Edge"); 278 279 if (action) { 280 seq_printf(p, " %s", action->name); 281 while ((action = action->next) != NULL) 282 seq_printf(p, ", %s", action->name); 283 } 284 285 seq_putc(p, '\n'); 286 out: 287 raw_spin_unlock_irqrestore(&desc->lock, flags); 288 return 0; 289 } 290 291 /* 292 * /proc/stat helpers 293 */ 294 u64 arch_irq_stat_cpu(unsigned int cpu) 295 { 296 u64 sum = per_cpu(irq_stat, cpu).timer_irqs; 297 298 sum += per_cpu(irq_stat, cpu).pmu_irqs; 299 sum += per_cpu(irq_stat, cpu).mce_exceptions; 300 sum += per_cpu(irq_stat, cpu).spurious_irqs; 301 302 return sum; 303 } 304 305 #ifdef CONFIG_HOTPLUG_CPU 306 void fixup_irqs(const struct cpumask *map) 307 { 308 struct irq_desc *desc; 309 unsigned int irq; 310 static int warned; 311 cpumask_var_t mask; 312 313 alloc_cpumask_var(&mask, GFP_KERNEL); 314 315 for_each_irq(irq) { 316 desc = irq_to_desc(irq); 317 if (!desc) 318 continue; 319 320 if (desc->status & IRQ_PER_CPU) 321 continue; 322 323 cpumask_and(mask, desc->affinity, map); 324 if (cpumask_any(mask) >= nr_cpu_ids) { 325 printk("Breaking affinity for irq %i\n", irq); 326 cpumask_copy(mask, map); 327 } 328 if (desc->chip->set_affinity) 329 desc->chip->set_affinity(irq, mask); 330 else if (desc->action && !(warned++)) 331 printk("Cannot set affinity for irq %i\n", irq); 332 } 333 334 free_cpumask_var(mask); 335 336 local_irq_enable(); 337 mdelay(1); 338 local_irq_disable(); 339 } 340 #endif 341 342 static inline void handle_one_irq(unsigned int irq) 343 { 344 struct thread_info *curtp, *irqtp; 345 unsigned long saved_sp_limit; 346 struct irq_desc *desc; 347 348 /* Switch to the irq stack to handle this */ 349 curtp = current_thread_info(); 350 irqtp = hardirq_ctx[smp_processor_id()]; 351 352 if (curtp == irqtp) { 353 /* We're already on the irq stack, just handle it */ 354 generic_handle_irq(irq); 355 return; 356 } 357 358 desc = irq_to_desc(irq); 359 saved_sp_limit = current->thread.ksp_limit; 360 361 irqtp->task = curtp->task; 362 irqtp->flags = 0; 363 364 /* Copy the softirq bits in preempt_count so that the 365 * softirq checks work in the hardirq context. */ 366 irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) | 367 (curtp->preempt_count & SOFTIRQ_MASK); 368 369 current->thread.ksp_limit = (unsigned long)irqtp + 370 _ALIGN_UP(sizeof(struct thread_info), 16); 371 372 call_handle_irq(irq, desc, irqtp, desc->handle_irq); 373 current->thread.ksp_limit = saved_sp_limit; 374 irqtp->task = NULL; 375 376 /* Set any flag that may have been set on the 377 * alternate stack 378 */ 379 if (irqtp->flags) 380 set_bits(irqtp->flags, &curtp->flags); 381 } 382 383 static inline void check_stack_overflow(void) 384 { 385 #ifdef CONFIG_DEBUG_STACKOVERFLOW 386 long sp; 387 388 sp = __get_SP() & (THREAD_SIZE-1); 389 390 /* check for stack overflow: is there less than 2KB free? */ 391 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { 392 printk("do_IRQ: stack overflow: %ld\n", 393 sp - sizeof(struct thread_info)); 394 dump_stack(); 395 } 396 #endif 397 } 398 399 void do_IRQ(struct pt_regs *regs) 400 { 401 struct pt_regs *old_regs = set_irq_regs(regs); 402 unsigned int irq; 403 404 trace_irq_entry(regs); 405 406 irq_enter(); 407 408 check_stack_overflow(); 409 410 irq = ppc_md.get_irq(); 411 412 if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) 413 handle_one_irq(irq); 414 else if (irq != NO_IRQ_IGNORE) 415 __get_cpu_var(irq_stat).spurious_irqs++; 416 417 irq_exit(); 418 set_irq_regs(old_regs); 419 420 #ifdef CONFIG_PPC_ISERIES 421 if (firmware_has_feature(FW_FEATURE_ISERIES) && 422 get_lppaca()->int_dword.fields.decr_int) { 423 get_lppaca()->int_dword.fields.decr_int = 0; 424 /* Signal a fake decrementer interrupt */ 425 timer_interrupt(regs); 426 } 427 #endif 428 429 trace_irq_exit(regs); 430 } 431 432 void __init init_IRQ(void) 433 { 434 if (ppc_md.init_IRQ) 435 ppc_md.init_IRQ(); 436 437 exc_lvl_ctx_init(); 438 439 irq_ctx_init(); 440 } 441 442 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) 443 struct thread_info *critirq_ctx[NR_CPUS] __read_mostly; 444 struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly; 445 struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly; 446 447 void exc_lvl_ctx_init(void) 448 { 449 struct thread_info *tp; 450 int i, hw_cpu; 451 452 for_each_possible_cpu(i) { 453 hw_cpu = get_hard_smp_processor_id(i); 454 memset((void *)critirq_ctx[hw_cpu], 0, THREAD_SIZE); 455 tp = critirq_ctx[hw_cpu]; 456 tp->cpu = i; 457 tp->preempt_count = 0; 458 459 #ifdef CONFIG_BOOKE 460 memset((void *)dbgirq_ctx[hw_cpu], 0, THREAD_SIZE); 461 tp = dbgirq_ctx[hw_cpu]; 462 tp->cpu = i; 463 tp->preempt_count = 0; 464 465 memset((void *)mcheckirq_ctx[hw_cpu], 0, THREAD_SIZE); 466 tp = mcheckirq_ctx[hw_cpu]; 467 tp->cpu = i; 468 tp->preempt_count = HARDIRQ_OFFSET; 469 #endif 470 } 471 } 472 #endif 473 474 struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; 475 struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly; 476 477 void irq_ctx_init(void) 478 { 479 struct thread_info *tp; 480 int i; 481 482 for_each_possible_cpu(i) { 483 memset((void *)softirq_ctx[i], 0, THREAD_SIZE); 484 tp = softirq_ctx[i]; 485 tp->cpu = i; 486 tp->preempt_count = 0; 487 488 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); 489 tp = hardirq_ctx[i]; 490 tp->cpu = i; 491 tp->preempt_count = HARDIRQ_OFFSET; 492 } 493 } 494 495 static inline void do_softirq_onstack(void) 496 { 497 struct thread_info *curtp, *irqtp; 498 unsigned long saved_sp_limit = current->thread.ksp_limit; 499 500 curtp = current_thread_info(); 501 irqtp = softirq_ctx[smp_processor_id()]; 502 irqtp->task = curtp->task; 503 current->thread.ksp_limit = (unsigned long)irqtp + 504 _ALIGN_UP(sizeof(struct thread_info), 16); 505 call_do_softirq(irqtp); 506 current->thread.ksp_limit = saved_sp_limit; 507 irqtp->task = NULL; 508 } 509 510 void do_softirq(void) 511 { 512 unsigned long flags; 513 514 if (in_interrupt()) 515 return; 516 517 local_irq_save(flags); 518 519 if (local_softirq_pending()) 520 do_softirq_onstack(); 521 522 local_irq_restore(flags); 523 } 524 525 526 /* 527 * IRQ controller and virtual interrupts 528 */ 529 530 static LIST_HEAD(irq_hosts); 531 static DEFINE_RAW_SPINLOCK(irq_big_lock); 532 static unsigned int revmap_trees_allocated; 533 static DEFINE_MUTEX(revmap_trees_mutex); 534 struct irq_map_entry irq_map[NR_IRQS]; 535 static unsigned int irq_virq_count = NR_IRQS; 536 static struct irq_host *irq_default_host; 537 538 irq_hw_number_t virq_to_hw(unsigned int virq) 539 { 540 return irq_map[virq].hwirq; 541 } 542 EXPORT_SYMBOL_GPL(virq_to_hw); 543 544 static int default_irq_host_match(struct irq_host *h, struct device_node *np) 545 { 546 return h->of_node != NULL && h->of_node == np; 547 } 548 549 struct irq_host *irq_alloc_host(struct device_node *of_node, 550 unsigned int revmap_type, 551 unsigned int revmap_arg, 552 struct irq_host_ops *ops, 553 irq_hw_number_t inval_irq) 554 { 555 struct irq_host *host; 556 unsigned int size = sizeof(struct irq_host); 557 unsigned int i; 558 unsigned int *rmap; 559 unsigned long flags; 560 561 /* Allocate structure and revmap table if using linear mapping */ 562 if (revmap_type == IRQ_HOST_MAP_LINEAR) 563 size += revmap_arg * sizeof(unsigned int); 564 host = zalloc_maybe_bootmem(size, GFP_KERNEL); 565 if (host == NULL) 566 return NULL; 567 568 /* Fill structure */ 569 host->revmap_type = revmap_type; 570 host->inval_irq = inval_irq; 571 host->ops = ops; 572 host->of_node = of_node_get(of_node); 573 574 if (host->ops->match == NULL) 575 host->ops->match = default_irq_host_match; 576 577 raw_spin_lock_irqsave(&irq_big_lock, flags); 578 579 /* If it's a legacy controller, check for duplicates and 580 * mark it as allocated (we use irq 0 host pointer for that 581 */ 582 if (revmap_type == IRQ_HOST_MAP_LEGACY) { 583 if (irq_map[0].host != NULL) { 584 raw_spin_unlock_irqrestore(&irq_big_lock, flags); 585 /* If we are early boot, we can't free the structure, 586 * too bad... 587 * this will be fixed once slab is made available early 588 * instead of the current cruft 589 */ 590 if (mem_init_done) 591 kfree(host); 592 return NULL; 593 } 594 irq_map[0].host = host; 595 } 596 597 list_add(&host->link, &irq_hosts); 598 raw_spin_unlock_irqrestore(&irq_big_lock, flags); 599 600 /* Additional setups per revmap type */ 601 switch(revmap_type) { 602 case IRQ_HOST_MAP_LEGACY: 603 /* 0 is always the invalid number for legacy */ 604 host->inval_irq = 0; 605 /* setup us as the host for all legacy interrupts */ 606 for (i = 1; i < NUM_ISA_INTERRUPTS; i++) { 607 irq_map[i].hwirq = i; 608 smp_wmb(); 609 irq_map[i].host = host; 610 smp_wmb(); 611 612 /* Clear norequest flags */ 613 irq_to_desc(i)->status &= ~IRQ_NOREQUEST; 614 615 /* Legacy flags are left to default at this point, 616 * one can then use irq_create_mapping() to 617 * explicitly change them 618 */ 619 ops->map(host, i, i); 620 } 621 break; 622 case IRQ_HOST_MAP_LINEAR: 623 rmap = (unsigned int *)(host + 1); 624 for (i = 0; i < revmap_arg; i++) 625 rmap[i] = NO_IRQ; 626 host->revmap_data.linear.size = revmap_arg; 627 smp_wmb(); 628 host->revmap_data.linear.revmap = rmap; 629 break; 630 default: 631 break; 632 } 633 634 pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host); 635 636 return host; 637 } 638 639 struct irq_host *irq_find_host(struct device_node *node) 640 { 641 struct irq_host *h, *found = NULL; 642 unsigned long flags; 643 644 /* We might want to match the legacy controller last since 645 * it might potentially be set to match all interrupts in 646 * the absence of a device node. This isn't a problem so far 647 * yet though... 648 */ 649 raw_spin_lock_irqsave(&irq_big_lock, flags); 650 list_for_each_entry(h, &irq_hosts, link) 651 if (h->ops->match(h, node)) { 652 found = h; 653 break; 654 } 655 raw_spin_unlock_irqrestore(&irq_big_lock, flags); 656 return found; 657 } 658 EXPORT_SYMBOL_GPL(irq_find_host); 659 660 void irq_set_default_host(struct irq_host *host) 661 { 662 pr_debug("irq: Default host set to @0x%p\n", host); 663 664 irq_default_host = host; 665 } 666 667 void irq_set_virq_count(unsigned int count) 668 { 669 pr_debug("irq: Trying to set virq count to %d\n", count); 670 671 BUG_ON(count < NUM_ISA_INTERRUPTS); 672 if (count < NR_IRQS) 673 irq_virq_count = count; 674 } 675 676 static int irq_setup_virq(struct irq_host *host, unsigned int virq, 677 irq_hw_number_t hwirq) 678 { 679 struct irq_desc *desc; 680 681 desc = irq_to_desc_alloc_node(virq, 0); 682 if (!desc) { 683 pr_debug("irq: -> allocating desc failed\n"); 684 goto error; 685 } 686 687 /* Clear IRQ_NOREQUEST flag */ 688 desc->status &= ~IRQ_NOREQUEST; 689 690 /* map it */ 691 smp_wmb(); 692 irq_map[virq].hwirq = hwirq; 693 smp_mb(); 694 695 if (host->ops->map(host, virq, hwirq)) { 696 pr_debug("irq: -> mapping failed, freeing\n"); 697 goto error; 698 } 699 700 return 0; 701 702 error: 703 irq_free_virt(virq, 1); 704 return -1; 705 } 706 707 unsigned int irq_create_direct_mapping(struct irq_host *host) 708 { 709 unsigned int virq; 710 711 if (host == NULL) 712 host = irq_default_host; 713 714 BUG_ON(host == NULL); 715 WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP); 716 717 virq = irq_alloc_virt(host, 1, 0); 718 if (virq == NO_IRQ) { 719 pr_debug("irq: create_direct virq allocation failed\n"); 720 return NO_IRQ; 721 } 722 723 pr_debug("irq: create_direct obtained virq %d\n", virq); 724 725 if (irq_setup_virq(host, virq, virq)) 726 return NO_IRQ; 727 728 return virq; 729 } 730 731 unsigned int irq_create_mapping(struct irq_host *host, 732 irq_hw_number_t hwirq) 733 { 734 unsigned int virq, hint; 735 736 pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq); 737 738 /* Look for default host if nececssary */ 739 if (host == NULL) 740 host = irq_default_host; 741 if (host == NULL) { 742 printk(KERN_WARNING "irq_create_mapping called for" 743 " NULL host, hwirq=%lx\n", hwirq); 744 WARN_ON(1); 745 return NO_IRQ; 746 } 747 pr_debug("irq: -> using host @%p\n", host); 748 749 /* Check if mapping already exist, if it does, call 750 * host->ops->map() to update the flags 751 */ 752 virq = irq_find_mapping(host, hwirq); 753 if (virq != NO_IRQ) { 754 if (host->ops->remap) 755 host->ops->remap(host, virq, hwirq); 756 pr_debug("irq: -> existing mapping on virq %d\n", virq); 757 return virq; 758 } 759 760 /* Get a virtual interrupt number */ 761 if (host->revmap_type == IRQ_HOST_MAP_LEGACY) { 762 /* Handle legacy */ 763 virq = (unsigned int)hwirq; 764 if (virq == 0 || virq >= NUM_ISA_INTERRUPTS) 765 return NO_IRQ; 766 return virq; 767 } else { 768 /* Allocate a virtual interrupt number */ 769 hint = hwirq % irq_virq_count; 770 virq = irq_alloc_virt(host, 1, hint); 771 if (virq == NO_IRQ) { 772 pr_debug("irq: -> virq allocation failed\n"); 773 return NO_IRQ; 774 } 775 } 776 777 if (irq_setup_virq(host, virq, hwirq)) 778 return NO_IRQ; 779 780 printk(KERN_DEBUG "irq: irq %lu on host %s mapped to virtual irq %u\n", 781 hwirq, host->of_node ? host->of_node->full_name : "null", virq); 782 783 return virq; 784 } 785 EXPORT_SYMBOL_GPL(irq_create_mapping); 786 787 unsigned int irq_create_of_mapping(struct device_node *controller, 788 const u32 *intspec, unsigned int intsize) 789 { 790 struct irq_host *host; 791 irq_hw_number_t hwirq; 792 unsigned int type = IRQ_TYPE_NONE; 793 unsigned int virq; 794 795 if (controller == NULL) 796 host = irq_default_host; 797 else 798 host = irq_find_host(controller); 799 if (host == NULL) { 800 printk(KERN_WARNING "irq: no irq host found for %s !\n", 801 controller->full_name); 802 return NO_IRQ; 803 } 804 805 /* If host has no translation, then we assume interrupt line */ 806 if (host->ops->xlate == NULL) 807 hwirq = intspec[0]; 808 else { 809 if (host->ops->xlate(host, controller, intspec, intsize, 810 &hwirq, &type)) 811 return NO_IRQ; 812 } 813 814 /* Create mapping */ 815 virq = irq_create_mapping(host, hwirq); 816 if (virq == NO_IRQ) 817 return virq; 818 819 /* Set type if specified and different than the current one */ 820 if (type != IRQ_TYPE_NONE && 821 type != (irq_to_desc(virq)->status & IRQF_TRIGGER_MASK)) 822 set_irq_type(virq, type); 823 return virq; 824 } 825 EXPORT_SYMBOL_GPL(irq_create_of_mapping); 826 827 void irq_dispose_mapping(unsigned int virq) 828 { 829 struct irq_host *host; 830 irq_hw_number_t hwirq; 831 832 if (virq == NO_IRQ) 833 return; 834 835 host = irq_map[virq].host; 836 WARN_ON (host == NULL); 837 if (host == NULL) 838 return; 839 840 /* Never unmap legacy interrupts */ 841 if (host->revmap_type == IRQ_HOST_MAP_LEGACY) 842 return; 843 844 /* remove chip and handler */ 845 set_irq_chip_and_handler(virq, NULL, NULL); 846 847 /* Make sure it's completed */ 848 synchronize_irq(virq); 849 850 /* Tell the PIC about it */ 851 if (host->ops->unmap) 852 host->ops->unmap(host, virq); 853 smp_mb(); 854 855 /* Clear reverse map */ 856 hwirq = irq_map[virq].hwirq; 857 switch(host->revmap_type) { 858 case IRQ_HOST_MAP_LINEAR: 859 if (hwirq < host->revmap_data.linear.size) 860 host->revmap_data.linear.revmap[hwirq] = NO_IRQ; 861 break; 862 case IRQ_HOST_MAP_TREE: 863 /* 864 * Check if radix tree allocated yet, if not then nothing to 865 * remove. 866 */ 867 smp_rmb(); 868 if (revmap_trees_allocated < 1) 869 break; 870 mutex_lock(&revmap_trees_mutex); 871 radix_tree_delete(&host->revmap_data.tree, hwirq); 872 mutex_unlock(&revmap_trees_mutex); 873 break; 874 } 875 876 /* Destroy map */ 877 smp_mb(); 878 irq_map[virq].hwirq = host->inval_irq; 879 880 /* Set some flags */ 881 irq_to_desc(virq)->status |= IRQ_NOREQUEST; 882 883 /* Free it */ 884 irq_free_virt(virq, 1); 885 } 886 EXPORT_SYMBOL_GPL(irq_dispose_mapping); 887 888 unsigned int irq_find_mapping(struct irq_host *host, 889 irq_hw_number_t hwirq) 890 { 891 unsigned int i; 892 unsigned int hint = hwirq % irq_virq_count; 893 894 /* Look for default host if nececssary */ 895 if (host == NULL) 896 host = irq_default_host; 897 if (host == NULL) 898 return NO_IRQ; 899 900 /* legacy -> bail early */ 901 if (host->revmap_type == IRQ_HOST_MAP_LEGACY) 902 return hwirq; 903 904 /* Slow path does a linear search of the map */ 905 if (hint < NUM_ISA_INTERRUPTS) 906 hint = NUM_ISA_INTERRUPTS; 907 i = hint; 908 do { 909 if (irq_map[i].host == host && 910 irq_map[i].hwirq == hwirq) 911 return i; 912 i++; 913 if (i >= irq_virq_count) 914 i = NUM_ISA_INTERRUPTS; 915 } while(i != hint); 916 return NO_IRQ; 917 } 918 EXPORT_SYMBOL_GPL(irq_find_mapping); 919 920 921 unsigned int irq_radix_revmap_lookup(struct irq_host *host, 922 irq_hw_number_t hwirq) 923 { 924 struct irq_map_entry *ptr; 925 unsigned int virq; 926 927 WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); 928 929 /* 930 * Check if the radix tree exists and has bee initialized. 931 * If not, we fallback to slow mode 932 */ 933 if (revmap_trees_allocated < 2) 934 return irq_find_mapping(host, hwirq); 935 936 /* Now try to resolve */ 937 /* 938 * No rcu_read_lock(ing) needed, the ptr returned can't go under us 939 * as it's referencing an entry in the static irq_map table. 940 */ 941 ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq); 942 943 /* 944 * If found in radix tree, then fine. 945 * Else fallback to linear lookup - this should not happen in practice 946 * as it means that we failed to insert the node in the radix tree. 947 */ 948 if (ptr) 949 virq = ptr - irq_map; 950 else 951 virq = irq_find_mapping(host, hwirq); 952 953 return virq; 954 } 955 956 void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, 957 irq_hw_number_t hwirq) 958 { 959 960 WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); 961 962 /* 963 * Check if the radix tree exists yet. 964 * If not, then the irq will be inserted into the tree when it gets 965 * initialized. 966 */ 967 smp_rmb(); 968 if (revmap_trees_allocated < 1) 969 return; 970 971 if (virq != NO_IRQ) { 972 mutex_lock(&revmap_trees_mutex); 973 radix_tree_insert(&host->revmap_data.tree, hwirq, 974 &irq_map[virq]); 975 mutex_unlock(&revmap_trees_mutex); 976 } 977 } 978 979 unsigned int irq_linear_revmap(struct irq_host *host, 980 irq_hw_number_t hwirq) 981 { 982 unsigned int *revmap; 983 984 WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR); 985 986 /* Check revmap bounds */ 987 if (unlikely(hwirq >= host->revmap_data.linear.size)) 988 return irq_find_mapping(host, hwirq); 989 990 /* Check if revmap was allocated */ 991 revmap = host->revmap_data.linear.revmap; 992 if (unlikely(revmap == NULL)) 993 return irq_find_mapping(host, hwirq); 994 995 /* Fill up revmap with slow path if no mapping found */ 996 if (unlikely(revmap[hwirq] == NO_IRQ)) 997 revmap[hwirq] = irq_find_mapping(host, hwirq); 998 999 return revmap[hwirq]; 1000 } 1001 1002 unsigned int irq_alloc_virt(struct irq_host *host, 1003 unsigned int count, 1004 unsigned int hint) 1005 { 1006 unsigned long flags; 1007 unsigned int i, j, found = NO_IRQ; 1008 1009 if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS)) 1010 return NO_IRQ; 1011 1012 raw_spin_lock_irqsave(&irq_big_lock, flags); 1013 1014 /* Use hint for 1 interrupt if any */ 1015 if (count == 1 && hint >= NUM_ISA_INTERRUPTS && 1016 hint < irq_virq_count && irq_map[hint].host == NULL) { 1017 found = hint; 1018 goto hint_found; 1019 } 1020 1021 /* Look for count consecutive numbers in the allocatable 1022 * (non-legacy) space 1023 */ 1024 for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) { 1025 if (irq_map[i].host != NULL) 1026 j = 0; 1027 else 1028 j++; 1029 1030 if (j == count) { 1031 found = i - count + 1; 1032 break; 1033 } 1034 } 1035 if (found == NO_IRQ) { 1036 raw_spin_unlock_irqrestore(&irq_big_lock, flags); 1037 return NO_IRQ; 1038 } 1039 hint_found: 1040 for (i = found; i < (found + count); i++) { 1041 irq_map[i].hwirq = host->inval_irq; 1042 smp_wmb(); 1043 irq_map[i].host = host; 1044 } 1045 raw_spin_unlock_irqrestore(&irq_big_lock, flags); 1046 return found; 1047 } 1048 1049 void irq_free_virt(unsigned int virq, unsigned int count) 1050 { 1051 unsigned long flags; 1052 unsigned int i; 1053 1054 WARN_ON (virq < NUM_ISA_INTERRUPTS); 1055 WARN_ON (count == 0 || (virq + count) > irq_virq_count); 1056 1057 raw_spin_lock_irqsave(&irq_big_lock, flags); 1058 for (i = virq; i < (virq + count); i++) { 1059 struct irq_host *host; 1060 1061 if (i < NUM_ISA_INTERRUPTS || 1062 (virq + count) > irq_virq_count) 1063 continue; 1064 1065 host = irq_map[i].host; 1066 irq_map[i].hwirq = host->inval_irq; 1067 smp_wmb(); 1068 irq_map[i].host = NULL; 1069 } 1070 raw_spin_unlock_irqrestore(&irq_big_lock, flags); 1071 } 1072 1073 int arch_early_irq_init(void) 1074 { 1075 struct irq_desc *desc; 1076 int i; 1077 1078 for (i = 0; i < NR_IRQS; i++) { 1079 desc = irq_to_desc(i); 1080 if (desc) 1081 desc->status |= IRQ_NOREQUEST; 1082 } 1083 1084 return 0; 1085 } 1086 1087 int arch_init_chip_data(struct irq_desc *desc, int node) 1088 { 1089 desc->status |= IRQ_NOREQUEST; 1090 return 0; 1091 } 1092 1093 /* We need to create the radix trees late */ 1094 static int irq_late_init(void) 1095 { 1096 struct irq_host *h; 1097 unsigned int i; 1098 1099 /* 1100 * No mutual exclusion with respect to accessors of the tree is needed 1101 * here as the synchronization is done via the state variable 1102 * revmap_trees_allocated. 1103 */ 1104 list_for_each_entry(h, &irq_hosts, link) { 1105 if (h->revmap_type == IRQ_HOST_MAP_TREE) 1106 INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL); 1107 } 1108 1109 /* 1110 * Make sure the radix trees inits are visible before setting 1111 * the flag 1112 */ 1113 smp_wmb(); 1114 revmap_trees_allocated = 1; 1115 1116 /* 1117 * Insert the reverse mapping for those interrupts already present 1118 * in irq_map[]. 1119 */ 1120 mutex_lock(&revmap_trees_mutex); 1121 for (i = 0; i < irq_virq_count; i++) { 1122 if (irq_map[i].host && 1123 (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE)) 1124 radix_tree_insert(&irq_map[i].host->revmap_data.tree, 1125 irq_map[i].hwirq, &irq_map[i]); 1126 } 1127 mutex_unlock(&revmap_trees_mutex); 1128 1129 /* 1130 * Make sure the radix trees insertions are visible before setting 1131 * the flag 1132 */ 1133 smp_wmb(); 1134 revmap_trees_allocated = 2; 1135 1136 return 0; 1137 } 1138 arch_initcall(irq_late_init); 1139 1140 #ifdef CONFIG_VIRQ_DEBUG 1141 static int virq_debug_show(struct seq_file *m, void *private) 1142 { 1143 unsigned long flags; 1144 struct irq_desc *desc; 1145 const char *p; 1146 char none[] = "none"; 1147 int i; 1148 1149 seq_printf(m, "%-5s %-7s %-15s %s\n", "virq", "hwirq", 1150 "chip name", "host name"); 1151 1152 for (i = 1; i < nr_irqs; i++) { 1153 desc = irq_to_desc(i); 1154 if (!desc) 1155 continue; 1156 1157 raw_spin_lock_irqsave(&desc->lock, flags); 1158 1159 if (desc->action && desc->action->handler) { 1160 seq_printf(m, "%5d ", i); 1161 seq_printf(m, "0x%05lx ", virq_to_hw(i)); 1162 1163 if (desc->chip && desc->chip->name) 1164 p = desc->chip->name; 1165 else 1166 p = none; 1167 seq_printf(m, "%-15s ", p); 1168 1169 if (irq_map[i].host && irq_map[i].host->of_node) 1170 p = irq_map[i].host->of_node->full_name; 1171 else 1172 p = none; 1173 seq_printf(m, "%s\n", p); 1174 } 1175 1176 raw_spin_unlock_irqrestore(&desc->lock, flags); 1177 } 1178 1179 return 0; 1180 } 1181 1182 static int virq_debug_open(struct inode *inode, struct file *file) 1183 { 1184 return single_open(file, virq_debug_show, inode->i_private); 1185 } 1186 1187 static const struct file_operations virq_debug_fops = { 1188 .open = virq_debug_open, 1189 .read = seq_read, 1190 .llseek = seq_lseek, 1191 .release = single_release, 1192 }; 1193 1194 static int __init irq_debugfs_init(void) 1195 { 1196 if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root, 1197 NULL, &virq_debug_fops) == NULL) 1198 return -ENOMEM; 1199 1200 return 0; 1201 } 1202 __initcall(irq_debugfs_init); 1203 #endif /* CONFIG_VIRQ_DEBUG */ 1204 1205 #ifdef CONFIG_PPC64 1206 static int __init setup_noirqdistrib(char *str) 1207 { 1208 distribute_irqs = 0; 1209 return 1; 1210 } 1211 1212 __setup("noirqdistrib", setup_noirqdistrib); 1213 #endif /* CONFIG_PPC64 */ 1214