1 /* 2 * Derived from arch/i386/kernel/irq.c 3 * Copyright (C) 1992 Linus Torvalds 4 * Adapted from arch/i386 by Gary Thomas 5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 6 * Updated and modified by Cort Dougan <cort@fsmlabs.com> 7 * Copyright (C) 1996-2001 Cort Dougan 8 * Adapted for Power Macintosh by Paul Mackerras 9 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) 10 * 11 * This program is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU General Public License 13 * as published by the Free Software Foundation; either version 14 * 2 of the License, or (at your option) any later version. 15 * 16 * This file contains the code used by various IRQ handling routines: 17 * asking for different IRQ's should be done through these routines 18 * instead of just grabbing them. Thus setups with different IRQ numbers 19 * shouldn't result in any weird surprises, and installing new handlers 20 * should be easier. 21 * 22 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the 23 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit 24 * mask register (of which only 16 are defined), hence the weird shifting 25 * and complement of the cached_irq_mask. I want to be able to stuff 26 * this right into the SIU SMASK register. 27 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx 28 * to reduce code space and undefined function references. 29 */ 30 31 #undef DEBUG 32 33 #include <linux/module.h> 34 #include <linux/threads.h> 35 #include <linux/kernel_stat.h> 36 #include <linux/signal.h> 37 #include <linux/sched.h> 38 #include <linux/ptrace.h> 39 #include <linux/ioport.h> 40 #include <linux/interrupt.h> 41 #include <linux/timex.h> 42 #include <linux/init.h> 43 #include <linux/slab.h> 44 #include <linux/delay.h> 45 #include <linux/irq.h> 46 #include <linux/seq_file.h> 47 #include <linux/cpumask.h> 48 #include <linux/profile.h> 49 #include <linux/bitops.h> 50 #include <linux/list.h> 51 #include <linux/radix-tree.h> 52 #include <linux/mutex.h> 53 #include <linux/bootmem.h> 54 #include <linux/pci.h> 55 #include <linux/debugfs.h> 56 57 #include <asm/uaccess.h> 58 #include <asm/system.h> 59 #include <asm/io.h> 60 #include <asm/pgtable.h> 61 #include <asm/irq.h> 62 #include <asm/cache.h> 63 #include <asm/prom.h> 64 #include <asm/ptrace.h> 65 #include <asm/machdep.h> 66 #include <asm/udbg.h> 67 #ifdef CONFIG_PPC64 68 #include <asm/paca.h> 69 #include <asm/firmware.h> 70 #include <asm/lv1call.h> 71 #endif 72 #define CREATE_TRACE_POINTS 73 #include <asm/trace.h> 74 75 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); 76 EXPORT_PER_CPU_SYMBOL(irq_stat); 77 78 int __irq_offset_value; 79 80 #ifdef CONFIG_PPC32 81 EXPORT_SYMBOL(__irq_offset_value); 82 atomic_t ppc_n_lost_interrupts; 83 84 #ifdef CONFIG_TAU_INT 85 extern int tau_initialized; 86 extern int tau_interrupts(int); 87 #endif 88 #endif /* CONFIG_PPC32 */ 89 90 #ifdef CONFIG_PPC64 91 92 #ifndef CONFIG_SPARSE_IRQ 93 EXPORT_SYMBOL(irq_desc); 94 #endif 95 96 int distribute_irqs = 1; 97 98 static inline notrace unsigned long get_hard_enabled(void) 99 { 100 unsigned long enabled; 101 102 __asm__ __volatile__("lbz %0,%1(13)" 103 : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled))); 104 105 return enabled; 106 } 107 108 static inline notrace void set_soft_enabled(unsigned long enable) 109 { 110 __asm__ __volatile__("stb %0,%1(13)" 111 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); 112 } 113 114 notrace void raw_local_irq_restore(unsigned long en) 115 { 116 /* 117 * get_paca()->soft_enabled = en; 118 * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1? 119 * That was allowed before, and in such a case we do need to take care 120 * that gcc will set soft_enabled directly via r13, not choose to use 121 * an intermediate register, lest we're preempted to a different cpu. 122 */ 123 set_soft_enabled(en); 124 if (!en) 125 return; 126 127 #ifdef CONFIG_PPC_STD_MMU_64 128 if (firmware_has_feature(FW_FEATURE_ISERIES)) { 129 /* 130 * Do we need to disable preemption here? Not really: in the 131 * unlikely event that we're preempted to a different cpu in 132 * between getting r13, loading its lppaca_ptr, and loading 133 * its any_int, we might call iseries_handle_interrupts without 134 * an interrupt pending on the new cpu, but that's no disaster, 135 * is it? And the business of preempting us off the old cpu 136 * would itself involve a local_irq_restore which handles the 137 * interrupt to that cpu. 138 * 139 * But use "local_paca->lppaca_ptr" instead of "get_lppaca()" 140 * to avoid any preemption checking added into get_paca(). 141 */ 142 if (local_paca->lppaca_ptr->int_dword.any_int) 143 iseries_handle_interrupts(); 144 } 145 #endif /* CONFIG_PPC_STD_MMU_64 */ 146 147 /* 148 * if (get_paca()->hard_enabled) return; 149 * But again we need to take care that gcc gets hard_enabled directly 150 * via r13, not choose to use an intermediate register, lest we're 151 * preempted to a different cpu in between the two instructions. 152 */ 153 if (get_hard_enabled()) 154 return; 155 156 /* 157 * Need to hard-enable interrupts here. Since currently disabled, 158 * no need to take further asm precautions against preemption; but 159 * use local_paca instead of get_paca() to avoid preemption checking. 160 */ 161 local_paca->hard_enabled = en; 162 if ((int)mfspr(SPRN_DEC) < 0) 163 mtspr(SPRN_DEC, 1); 164 165 /* 166 * Force the delivery of pending soft-disabled interrupts on PS3. 167 * Any HV call will have this side effect. 168 */ 169 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) { 170 u64 tmp; 171 lv1_get_version_info(&tmp); 172 } 173 174 __hard_irq_enable(); 175 } 176 EXPORT_SYMBOL(raw_local_irq_restore); 177 #endif /* CONFIG_PPC64 */ 178 179 static int show_other_interrupts(struct seq_file *p, int prec) 180 { 181 int j; 182 183 #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT) 184 if (tau_initialized) { 185 seq_printf(p, "%*s: ", prec, "TAU"); 186 for_each_online_cpu(j) 187 seq_printf(p, "%10u ", tau_interrupts(j)); 188 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); 189 } 190 #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */ 191 192 seq_printf(p, "%*s: ", prec, "LOC"); 193 for_each_online_cpu(j) 194 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs); 195 seq_printf(p, " Local timer interrupts\n"); 196 197 seq_printf(p, "%*s: ", prec, "SPU"); 198 for_each_online_cpu(j) 199 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs); 200 seq_printf(p, " Spurious interrupts\n"); 201 202 seq_printf(p, "%*s: ", prec, "CNT"); 203 for_each_online_cpu(j) 204 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs); 205 seq_printf(p, " Performance monitoring interrupts\n"); 206 207 seq_printf(p, "%*s: ", prec, "MCE"); 208 for_each_online_cpu(j) 209 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions); 210 seq_printf(p, " Machine check exceptions\n"); 211 212 return 0; 213 } 214 215 int show_interrupts(struct seq_file *p, void *v) 216 { 217 unsigned long flags, any_count = 0; 218 int i = *(loff_t *) v, j, prec; 219 struct irqaction *action; 220 struct irq_desc *desc; 221 222 if (i > nr_irqs) 223 return 0; 224 225 for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) 226 j *= 10; 227 228 if (i == nr_irqs) 229 return show_other_interrupts(p, prec); 230 231 /* print header */ 232 if (i == 0) { 233 seq_printf(p, "%*s", prec + 8, ""); 234 for_each_online_cpu(j) 235 seq_printf(p, "CPU%-8d", j); 236 seq_putc(p, '\n'); 237 } 238 239 desc = irq_to_desc(i); 240 if (!desc) 241 return 0; 242 243 raw_spin_lock_irqsave(&desc->lock, flags); 244 for_each_online_cpu(j) 245 any_count |= kstat_irqs_cpu(i, j); 246 action = desc->action; 247 if (!action && !any_count) 248 goto out; 249 250 seq_printf(p, "%*d: ", prec, i); 251 for_each_online_cpu(j) 252 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 253 254 if (desc->chip) 255 seq_printf(p, " %-16s", desc->chip->name); 256 else 257 seq_printf(p, " %-16s", "None"); 258 seq_printf(p, " %-8s", (desc->status & IRQ_LEVEL) ? "Level" : "Edge"); 259 260 if (action) { 261 seq_printf(p, " %s", action->name); 262 while ((action = action->next) != NULL) 263 seq_printf(p, ", %s", action->name); 264 } 265 266 seq_putc(p, '\n'); 267 out: 268 raw_spin_unlock_irqrestore(&desc->lock, flags); 269 return 0; 270 } 271 272 /* 273 * /proc/stat helpers 274 */ 275 u64 arch_irq_stat_cpu(unsigned int cpu) 276 { 277 u64 sum = per_cpu(irq_stat, cpu).timer_irqs; 278 279 sum += per_cpu(irq_stat, cpu).pmu_irqs; 280 sum += per_cpu(irq_stat, cpu).mce_exceptions; 281 sum += per_cpu(irq_stat, cpu).spurious_irqs; 282 283 return sum; 284 } 285 286 #ifdef CONFIG_HOTPLUG_CPU 287 void fixup_irqs(const struct cpumask *map) 288 { 289 struct irq_desc *desc; 290 unsigned int irq; 291 static int warned; 292 cpumask_var_t mask; 293 294 alloc_cpumask_var(&mask, GFP_KERNEL); 295 296 for_each_irq(irq) { 297 desc = irq_to_desc(irq); 298 if (!desc) 299 continue; 300 301 if (desc->status & IRQ_PER_CPU) 302 continue; 303 304 cpumask_and(mask, desc->affinity, map); 305 if (cpumask_any(mask) >= nr_cpu_ids) { 306 printk("Breaking affinity for irq %i\n", irq); 307 cpumask_copy(mask, map); 308 } 309 if (desc->chip->set_affinity) 310 desc->chip->set_affinity(irq, mask); 311 else if (desc->action && !(warned++)) 312 printk("Cannot set affinity for irq %i\n", irq); 313 } 314 315 free_cpumask_var(mask); 316 317 local_irq_enable(); 318 mdelay(1); 319 local_irq_disable(); 320 } 321 #endif 322 323 static inline void handle_one_irq(unsigned int irq) 324 { 325 struct thread_info *curtp, *irqtp; 326 unsigned long saved_sp_limit; 327 struct irq_desc *desc; 328 329 /* Switch to the irq stack to handle this */ 330 curtp = current_thread_info(); 331 irqtp = hardirq_ctx[smp_processor_id()]; 332 333 if (curtp == irqtp) { 334 /* We're already on the irq stack, just handle it */ 335 generic_handle_irq(irq); 336 return; 337 } 338 339 desc = irq_to_desc(irq); 340 saved_sp_limit = current->thread.ksp_limit; 341 342 irqtp->task = curtp->task; 343 irqtp->flags = 0; 344 345 /* Copy the softirq bits in preempt_count so that the 346 * softirq checks work in the hardirq context. */ 347 irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) | 348 (curtp->preempt_count & SOFTIRQ_MASK); 349 350 current->thread.ksp_limit = (unsigned long)irqtp + 351 _ALIGN_UP(sizeof(struct thread_info), 16); 352 353 call_handle_irq(irq, desc, irqtp, desc->handle_irq); 354 current->thread.ksp_limit = saved_sp_limit; 355 irqtp->task = NULL; 356 357 /* Set any flag that may have been set on the 358 * alternate stack 359 */ 360 if (irqtp->flags) 361 set_bits(irqtp->flags, &curtp->flags); 362 } 363 364 static inline void check_stack_overflow(void) 365 { 366 #ifdef CONFIG_DEBUG_STACKOVERFLOW 367 long sp; 368 369 sp = __get_SP() & (THREAD_SIZE-1); 370 371 /* check for stack overflow: is there less than 2KB free? */ 372 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { 373 printk("do_IRQ: stack overflow: %ld\n", 374 sp - sizeof(struct thread_info)); 375 dump_stack(); 376 } 377 #endif 378 } 379 380 void do_IRQ(struct pt_regs *regs) 381 { 382 struct pt_regs *old_regs = set_irq_regs(regs); 383 unsigned int irq; 384 385 trace_irq_entry(regs); 386 387 irq_enter(); 388 389 check_stack_overflow(); 390 391 irq = ppc_md.get_irq(); 392 393 if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) 394 handle_one_irq(irq); 395 else if (irq != NO_IRQ_IGNORE) 396 __get_cpu_var(irq_stat).spurious_irqs++; 397 398 irq_exit(); 399 set_irq_regs(old_regs); 400 401 #ifdef CONFIG_PPC_ISERIES 402 if (firmware_has_feature(FW_FEATURE_ISERIES) && 403 get_lppaca()->int_dword.fields.decr_int) { 404 get_lppaca()->int_dword.fields.decr_int = 0; 405 /* Signal a fake decrementer interrupt */ 406 timer_interrupt(regs); 407 } 408 #endif 409 410 trace_irq_exit(regs); 411 } 412 413 void __init init_IRQ(void) 414 { 415 if (ppc_md.init_IRQ) 416 ppc_md.init_IRQ(); 417 418 exc_lvl_ctx_init(); 419 420 irq_ctx_init(); 421 } 422 423 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) 424 struct thread_info *critirq_ctx[NR_CPUS] __read_mostly; 425 struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly; 426 struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly; 427 428 void exc_lvl_ctx_init(void) 429 { 430 struct thread_info *tp; 431 int i; 432 433 for_each_possible_cpu(i) { 434 memset((void *)critirq_ctx[i], 0, THREAD_SIZE); 435 tp = critirq_ctx[i]; 436 tp->cpu = i; 437 tp->preempt_count = 0; 438 439 #ifdef CONFIG_BOOKE 440 memset((void *)dbgirq_ctx[i], 0, THREAD_SIZE); 441 tp = dbgirq_ctx[i]; 442 tp->cpu = i; 443 tp->preempt_count = 0; 444 445 memset((void *)mcheckirq_ctx[i], 0, THREAD_SIZE); 446 tp = mcheckirq_ctx[i]; 447 tp->cpu = i; 448 tp->preempt_count = HARDIRQ_OFFSET; 449 #endif 450 } 451 } 452 #endif 453 454 struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; 455 struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly; 456 457 void irq_ctx_init(void) 458 { 459 struct thread_info *tp; 460 int i; 461 462 for_each_possible_cpu(i) { 463 memset((void *)softirq_ctx[i], 0, THREAD_SIZE); 464 tp = softirq_ctx[i]; 465 tp->cpu = i; 466 tp->preempt_count = 0; 467 468 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); 469 tp = hardirq_ctx[i]; 470 tp->cpu = i; 471 tp->preempt_count = HARDIRQ_OFFSET; 472 } 473 } 474 475 static inline void do_softirq_onstack(void) 476 { 477 struct thread_info *curtp, *irqtp; 478 unsigned long saved_sp_limit = current->thread.ksp_limit; 479 480 curtp = current_thread_info(); 481 irqtp = softirq_ctx[smp_processor_id()]; 482 irqtp->task = curtp->task; 483 current->thread.ksp_limit = (unsigned long)irqtp + 484 _ALIGN_UP(sizeof(struct thread_info), 16); 485 call_do_softirq(irqtp); 486 current->thread.ksp_limit = saved_sp_limit; 487 irqtp->task = NULL; 488 } 489 490 void do_softirq(void) 491 { 492 unsigned long flags; 493 494 if (in_interrupt()) 495 return; 496 497 local_irq_save(flags); 498 499 if (local_softirq_pending()) 500 do_softirq_onstack(); 501 502 local_irq_restore(flags); 503 } 504 505 506 /* 507 * IRQ controller and virtual interrupts 508 */ 509 510 static LIST_HEAD(irq_hosts); 511 static DEFINE_RAW_SPINLOCK(irq_big_lock); 512 static unsigned int revmap_trees_allocated; 513 static DEFINE_MUTEX(revmap_trees_mutex); 514 struct irq_map_entry irq_map[NR_IRQS]; 515 static unsigned int irq_virq_count = NR_IRQS; 516 static struct irq_host *irq_default_host; 517 518 irq_hw_number_t virq_to_hw(unsigned int virq) 519 { 520 return irq_map[virq].hwirq; 521 } 522 EXPORT_SYMBOL_GPL(virq_to_hw); 523 524 static int default_irq_host_match(struct irq_host *h, struct device_node *np) 525 { 526 return h->of_node != NULL && h->of_node == np; 527 } 528 529 struct irq_host *irq_alloc_host(struct device_node *of_node, 530 unsigned int revmap_type, 531 unsigned int revmap_arg, 532 struct irq_host_ops *ops, 533 irq_hw_number_t inval_irq) 534 { 535 struct irq_host *host; 536 unsigned int size = sizeof(struct irq_host); 537 unsigned int i; 538 unsigned int *rmap; 539 unsigned long flags; 540 541 /* Allocate structure and revmap table if using linear mapping */ 542 if (revmap_type == IRQ_HOST_MAP_LINEAR) 543 size += revmap_arg * sizeof(unsigned int); 544 host = zalloc_maybe_bootmem(size, GFP_KERNEL); 545 if (host == NULL) 546 return NULL; 547 548 /* Fill structure */ 549 host->revmap_type = revmap_type; 550 host->inval_irq = inval_irq; 551 host->ops = ops; 552 host->of_node = of_node_get(of_node); 553 554 if (host->ops->match == NULL) 555 host->ops->match = default_irq_host_match; 556 557 raw_spin_lock_irqsave(&irq_big_lock, flags); 558 559 /* If it's a legacy controller, check for duplicates and 560 * mark it as allocated (we use irq 0 host pointer for that 561 */ 562 if (revmap_type == IRQ_HOST_MAP_LEGACY) { 563 if (irq_map[0].host != NULL) { 564 raw_spin_unlock_irqrestore(&irq_big_lock, flags); 565 /* If we are early boot, we can't free the structure, 566 * too bad... 567 * this will be fixed once slab is made available early 568 * instead of the current cruft 569 */ 570 if (mem_init_done) 571 kfree(host); 572 return NULL; 573 } 574 irq_map[0].host = host; 575 } 576 577 list_add(&host->link, &irq_hosts); 578 raw_spin_unlock_irqrestore(&irq_big_lock, flags); 579 580 /* Additional setups per revmap type */ 581 switch(revmap_type) { 582 case IRQ_HOST_MAP_LEGACY: 583 /* 0 is always the invalid number for legacy */ 584 host->inval_irq = 0; 585 /* setup us as the host for all legacy interrupts */ 586 for (i = 1; i < NUM_ISA_INTERRUPTS; i++) { 587 irq_map[i].hwirq = i; 588 smp_wmb(); 589 irq_map[i].host = host; 590 smp_wmb(); 591 592 /* Clear norequest flags */ 593 irq_to_desc(i)->status &= ~IRQ_NOREQUEST; 594 595 /* Legacy flags are left to default at this point, 596 * one can then use irq_create_mapping() to 597 * explicitly change them 598 */ 599 ops->map(host, i, i); 600 } 601 break; 602 case IRQ_HOST_MAP_LINEAR: 603 rmap = (unsigned int *)(host + 1); 604 for (i = 0; i < revmap_arg; i++) 605 rmap[i] = NO_IRQ; 606 host->revmap_data.linear.size = revmap_arg; 607 smp_wmb(); 608 host->revmap_data.linear.revmap = rmap; 609 break; 610 default: 611 break; 612 } 613 614 pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host); 615 616 return host; 617 } 618 619 struct irq_host *irq_find_host(struct device_node *node) 620 { 621 struct irq_host *h, *found = NULL; 622 unsigned long flags; 623 624 /* We might want to match the legacy controller last since 625 * it might potentially be set to match all interrupts in 626 * the absence of a device node. This isn't a problem so far 627 * yet though... 628 */ 629 raw_spin_lock_irqsave(&irq_big_lock, flags); 630 list_for_each_entry(h, &irq_hosts, link) 631 if (h->ops->match(h, node)) { 632 found = h; 633 break; 634 } 635 raw_spin_unlock_irqrestore(&irq_big_lock, flags); 636 return found; 637 } 638 EXPORT_SYMBOL_GPL(irq_find_host); 639 640 void irq_set_default_host(struct irq_host *host) 641 { 642 pr_debug("irq: Default host set to @0x%p\n", host); 643 644 irq_default_host = host; 645 } 646 647 void irq_set_virq_count(unsigned int count) 648 { 649 pr_debug("irq: Trying to set virq count to %d\n", count); 650 651 BUG_ON(count < NUM_ISA_INTERRUPTS); 652 if (count < NR_IRQS) 653 irq_virq_count = count; 654 } 655 656 static int irq_setup_virq(struct irq_host *host, unsigned int virq, 657 irq_hw_number_t hwirq) 658 { 659 struct irq_desc *desc; 660 661 desc = irq_to_desc_alloc_node(virq, 0); 662 if (!desc) { 663 pr_debug("irq: -> allocating desc failed\n"); 664 goto error; 665 } 666 667 /* Clear IRQ_NOREQUEST flag */ 668 desc->status &= ~IRQ_NOREQUEST; 669 670 /* map it */ 671 smp_wmb(); 672 irq_map[virq].hwirq = hwirq; 673 smp_mb(); 674 675 if (host->ops->map(host, virq, hwirq)) { 676 pr_debug("irq: -> mapping failed, freeing\n"); 677 goto error; 678 } 679 680 return 0; 681 682 error: 683 irq_free_virt(virq, 1); 684 return -1; 685 } 686 687 unsigned int irq_create_direct_mapping(struct irq_host *host) 688 { 689 unsigned int virq; 690 691 if (host == NULL) 692 host = irq_default_host; 693 694 BUG_ON(host == NULL); 695 WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP); 696 697 virq = irq_alloc_virt(host, 1, 0); 698 if (virq == NO_IRQ) { 699 pr_debug("irq: create_direct virq allocation failed\n"); 700 return NO_IRQ; 701 } 702 703 pr_debug("irq: create_direct obtained virq %d\n", virq); 704 705 if (irq_setup_virq(host, virq, virq)) 706 return NO_IRQ; 707 708 return virq; 709 } 710 711 unsigned int irq_create_mapping(struct irq_host *host, 712 irq_hw_number_t hwirq) 713 { 714 unsigned int virq, hint; 715 716 pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq); 717 718 /* Look for default host if nececssary */ 719 if (host == NULL) 720 host = irq_default_host; 721 if (host == NULL) { 722 printk(KERN_WARNING "irq_create_mapping called for" 723 " NULL host, hwirq=%lx\n", hwirq); 724 WARN_ON(1); 725 return NO_IRQ; 726 } 727 pr_debug("irq: -> using host @%p\n", host); 728 729 /* Check if mapping already exist, if it does, call 730 * host->ops->map() to update the flags 731 */ 732 virq = irq_find_mapping(host, hwirq); 733 if (virq != NO_IRQ) { 734 if (host->ops->remap) 735 host->ops->remap(host, virq, hwirq); 736 pr_debug("irq: -> existing mapping on virq %d\n", virq); 737 return virq; 738 } 739 740 /* Get a virtual interrupt number */ 741 if (host->revmap_type == IRQ_HOST_MAP_LEGACY) { 742 /* Handle legacy */ 743 virq = (unsigned int)hwirq; 744 if (virq == 0 || virq >= NUM_ISA_INTERRUPTS) 745 return NO_IRQ; 746 return virq; 747 } else { 748 /* Allocate a virtual interrupt number */ 749 hint = hwirq % irq_virq_count; 750 virq = irq_alloc_virt(host, 1, hint); 751 if (virq == NO_IRQ) { 752 pr_debug("irq: -> virq allocation failed\n"); 753 return NO_IRQ; 754 } 755 } 756 757 if (irq_setup_virq(host, virq, hwirq)) 758 return NO_IRQ; 759 760 printk(KERN_DEBUG "irq: irq %lu on host %s mapped to virtual irq %u\n", 761 hwirq, host->of_node ? host->of_node->full_name : "null", virq); 762 763 return virq; 764 } 765 EXPORT_SYMBOL_GPL(irq_create_mapping); 766 767 unsigned int irq_create_of_mapping(struct device_node *controller, 768 const u32 *intspec, unsigned int intsize) 769 { 770 struct irq_host *host; 771 irq_hw_number_t hwirq; 772 unsigned int type = IRQ_TYPE_NONE; 773 unsigned int virq; 774 775 if (controller == NULL) 776 host = irq_default_host; 777 else 778 host = irq_find_host(controller); 779 if (host == NULL) { 780 printk(KERN_WARNING "irq: no irq host found for %s !\n", 781 controller->full_name); 782 return NO_IRQ; 783 } 784 785 /* If host has no translation, then we assume interrupt line */ 786 if (host->ops->xlate == NULL) 787 hwirq = intspec[0]; 788 else { 789 if (host->ops->xlate(host, controller, intspec, intsize, 790 &hwirq, &type)) 791 return NO_IRQ; 792 } 793 794 /* Create mapping */ 795 virq = irq_create_mapping(host, hwirq); 796 if (virq == NO_IRQ) 797 return virq; 798 799 /* Set type if specified and different than the current one */ 800 if (type != IRQ_TYPE_NONE && 801 type != (irq_to_desc(virq)->status & IRQF_TRIGGER_MASK)) 802 set_irq_type(virq, type); 803 return virq; 804 } 805 EXPORT_SYMBOL_GPL(irq_create_of_mapping); 806 807 unsigned int irq_of_parse_and_map(struct device_node *dev, int index) 808 { 809 struct of_irq oirq; 810 811 if (of_irq_map_one(dev, index, &oirq)) 812 return NO_IRQ; 813 814 return irq_create_of_mapping(oirq.controller, oirq.specifier, 815 oirq.size); 816 } 817 EXPORT_SYMBOL_GPL(irq_of_parse_and_map); 818 819 void irq_dispose_mapping(unsigned int virq) 820 { 821 struct irq_host *host; 822 irq_hw_number_t hwirq; 823 824 if (virq == NO_IRQ) 825 return; 826 827 host = irq_map[virq].host; 828 WARN_ON (host == NULL); 829 if (host == NULL) 830 return; 831 832 /* Never unmap legacy interrupts */ 833 if (host->revmap_type == IRQ_HOST_MAP_LEGACY) 834 return; 835 836 /* remove chip and handler */ 837 set_irq_chip_and_handler(virq, NULL, NULL); 838 839 /* Make sure it's completed */ 840 synchronize_irq(virq); 841 842 /* Tell the PIC about it */ 843 if (host->ops->unmap) 844 host->ops->unmap(host, virq); 845 smp_mb(); 846 847 /* Clear reverse map */ 848 hwirq = irq_map[virq].hwirq; 849 switch(host->revmap_type) { 850 case IRQ_HOST_MAP_LINEAR: 851 if (hwirq < host->revmap_data.linear.size) 852 host->revmap_data.linear.revmap[hwirq] = NO_IRQ; 853 break; 854 case IRQ_HOST_MAP_TREE: 855 /* 856 * Check if radix tree allocated yet, if not then nothing to 857 * remove. 858 */ 859 smp_rmb(); 860 if (revmap_trees_allocated < 1) 861 break; 862 mutex_lock(&revmap_trees_mutex); 863 radix_tree_delete(&host->revmap_data.tree, hwirq); 864 mutex_unlock(&revmap_trees_mutex); 865 break; 866 } 867 868 /* Destroy map */ 869 smp_mb(); 870 irq_map[virq].hwirq = host->inval_irq; 871 872 /* Set some flags */ 873 irq_to_desc(virq)->status |= IRQ_NOREQUEST; 874 875 /* Free it */ 876 irq_free_virt(virq, 1); 877 } 878 EXPORT_SYMBOL_GPL(irq_dispose_mapping); 879 880 unsigned int irq_find_mapping(struct irq_host *host, 881 irq_hw_number_t hwirq) 882 { 883 unsigned int i; 884 unsigned int hint = hwirq % irq_virq_count; 885 886 /* Look for default host if nececssary */ 887 if (host == NULL) 888 host = irq_default_host; 889 if (host == NULL) 890 return NO_IRQ; 891 892 /* legacy -> bail early */ 893 if (host->revmap_type == IRQ_HOST_MAP_LEGACY) 894 return hwirq; 895 896 /* Slow path does a linear search of the map */ 897 if (hint < NUM_ISA_INTERRUPTS) 898 hint = NUM_ISA_INTERRUPTS; 899 i = hint; 900 do { 901 if (irq_map[i].host == host && 902 irq_map[i].hwirq == hwirq) 903 return i; 904 i++; 905 if (i >= irq_virq_count) 906 i = NUM_ISA_INTERRUPTS; 907 } while(i != hint); 908 return NO_IRQ; 909 } 910 EXPORT_SYMBOL_GPL(irq_find_mapping); 911 912 913 unsigned int irq_radix_revmap_lookup(struct irq_host *host, 914 irq_hw_number_t hwirq) 915 { 916 struct irq_map_entry *ptr; 917 unsigned int virq; 918 919 WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); 920 921 /* 922 * Check if the radix tree exists and has bee initialized. 923 * If not, we fallback to slow mode 924 */ 925 if (revmap_trees_allocated < 2) 926 return irq_find_mapping(host, hwirq); 927 928 /* Now try to resolve */ 929 /* 930 * No rcu_read_lock(ing) needed, the ptr returned can't go under us 931 * as it's referencing an entry in the static irq_map table. 932 */ 933 ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq); 934 935 /* 936 * If found in radix tree, then fine. 937 * Else fallback to linear lookup - this should not happen in practice 938 * as it means that we failed to insert the node in the radix tree. 939 */ 940 if (ptr) 941 virq = ptr - irq_map; 942 else 943 virq = irq_find_mapping(host, hwirq); 944 945 return virq; 946 } 947 948 void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, 949 irq_hw_number_t hwirq) 950 { 951 952 WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); 953 954 /* 955 * Check if the radix tree exists yet. 956 * If not, then the irq will be inserted into the tree when it gets 957 * initialized. 958 */ 959 smp_rmb(); 960 if (revmap_trees_allocated < 1) 961 return; 962 963 if (virq != NO_IRQ) { 964 mutex_lock(&revmap_trees_mutex); 965 radix_tree_insert(&host->revmap_data.tree, hwirq, 966 &irq_map[virq]); 967 mutex_unlock(&revmap_trees_mutex); 968 } 969 } 970 971 unsigned int irq_linear_revmap(struct irq_host *host, 972 irq_hw_number_t hwirq) 973 { 974 unsigned int *revmap; 975 976 WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR); 977 978 /* Check revmap bounds */ 979 if (unlikely(hwirq >= host->revmap_data.linear.size)) 980 return irq_find_mapping(host, hwirq); 981 982 /* Check if revmap was allocated */ 983 revmap = host->revmap_data.linear.revmap; 984 if (unlikely(revmap == NULL)) 985 return irq_find_mapping(host, hwirq); 986 987 /* Fill up revmap with slow path if no mapping found */ 988 if (unlikely(revmap[hwirq] == NO_IRQ)) 989 revmap[hwirq] = irq_find_mapping(host, hwirq); 990 991 return revmap[hwirq]; 992 } 993 994 unsigned int irq_alloc_virt(struct irq_host *host, 995 unsigned int count, 996 unsigned int hint) 997 { 998 unsigned long flags; 999 unsigned int i, j, found = NO_IRQ; 1000 1001 if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS)) 1002 return NO_IRQ; 1003 1004 raw_spin_lock_irqsave(&irq_big_lock, flags); 1005 1006 /* Use hint for 1 interrupt if any */ 1007 if (count == 1 && hint >= NUM_ISA_INTERRUPTS && 1008 hint < irq_virq_count && irq_map[hint].host == NULL) { 1009 found = hint; 1010 goto hint_found; 1011 } 1012 1013 /* Look for count consecutive numbers in the allocatable 1014 * (non-legacy) space 1015 */ 1016 for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) { 1017 if (irq_map[i].host != NULL) 1018 j = 0; 1019 else 1020 j++; 1021 1022 if (j == count) { 1023 found = i - count + 1; 1024 break; 1025 } 1026 } 1027 if (found == NO_IRQ) { 1028 raw_spin_unlock_irqrestore(&irq_big_lock, flags); 1029 return NO_IRQ; 1030 } 1031 hint_found: 1032 for (i = found; i < (found + count); i++) { 1033 irq_map[i].hwirq = host->inval_irq; 1034 smp_wmb(); 1035 irq_map[i].host = host; 1036 } 1037 raw_spin_unlock_irqrestore(&irq_big_lock, flags); 1038 return found; 1039 } 1040 1041 void irq_free_virt(unsigned int virq, unsigned int count) 1042 { 1043 unsigned long flags; 1044 unsigned int i; 1045 1046 WARN_ON (virq < NUM_ISA_INTERRUPTS); 1047 WARN_ON (count == 0 || (virq + count) > irq_virq_count); 1048 1049 raw_spin_lock_irqsave(&irq_big_lock, flags); 1050 for (i = virq; i < (virq + count); i++) { 1051 struct irq_host *host; 1052 1053 if (i < NUM_ISA_INTERRUPTS || 1054 (virq + count) > irq_virq_count) 1055 continue; 1056 1057 host = irq_map[i].host; 1058 irq_map[i].hwirq = host->inval_irq; 1059 smp_wmb(); 1060 irq_map[i].host = NULL; 1061 } 1062 raw_spin_unlock_irqrestore(&irq_big_lock, flags); 1063 } 1064 1065 int arch_early_irq_init(void) 1066 { 1067 struct irq_desc *desc; 1068 int i; 1069 1070 for (i = 0; i < NR_IRQS; i++) { 1071 desc = irq_to_desc(i); 1072 if (desc) 1073 desc->status |= IRQ_NOREQUEST; 1074 } 1075 1076 return 0; 1077 } 1078 1079 int arch_init_chip_data(struct irq_desc *desc, int node) 1080 { 1081 desc->status |= IRQ_NOREQUEST; 1082 return 0; 1083 } 1084 1085 /* We need to create the radix trees late */ 1086 static int irq_late_init(void) 1087 { 1088 struct irq_host *h; 1089 unsigned int i; 1090 1091 /* 1092 * No mutual exclusion with respect to accessors of the tree is needed 1093 * here as the synchronization is done via the state variable 1094 * revmap_trees_allocated. 1095 */ 1096 list_for_each_entry(h, &irq_hosts, link) { 1097 if (h->revmap_type == IRQ_HOST_MAP_TREE) 1098 INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL); 1099 } 1100 1101 /* 1102 * Make sure the radix trees inits are visible before setting 1103 * the flag 1104 */ 1105 smp_wmb(); 1106 revmap_trees_allocated = 1; 1107 1108 /* 1109 * Insert the reverse mapping for those interrupts already present 1110 * in irq_map[]. 1111 */ 1112 mutex_lock(&revmap_trees_mutex); 1113 for (i = 0; i < irq_virq_count; i++) { 1114 if (irq_map[i].host && 1115 (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE)) 1116 radix_tree_insert(&irq_map[i].host->revmap_data.tree, 1117 irq_map[i].hwirq, &irq_map[i]); 1118 } 1119 mutex_unlock(&revmap_trees_mutex); 1120 1121 /* 1122 * Make sure the radix trees insertions are visible before setting 1123 * the flag 1124 */ 1125 smp_wmb(); 1126 revmap_trees_allocated = 2; 1127 1128 return 0; 1129 } 1130 arch_initcall(irq_late_init); 1131 1132 #ifdef CONFIG_VIRQ_DEBUG 1133 static int virq_debug_show(struct seq_file *m, void *private) 1134 { 1135 unsigned long flags; 1136 struct irq_desc *desc; 1137 const char *p; 1138 char none[] = "none"; 1139 int i; 1140 1141 seq_printf(m, "%-5s %-7s %-15s %s\n", "virq", "hwirq", 1142 "chip name", "host name"); 1143 1144 for (i = 1; i < nr_irqs; i++) { 1145 desc = irq_to_desc(i); 1146 if (!desc) 1147 continue; 1148 1149 raw_spin_lock_irqsave(&desc->lock, flags); 1150 1151 if (desc->action && desc->action->handler) { 1152 seq_printf(m, "%5d ", i); 1153 seq_printf(m, "0x%05lx ", virq_to_hw(i)); 1154 1155 if (desc->chip && desc->chip->name) 1156 p = desc->chip->name; 1157 else 1158 p = none; 1159 seq_printf(m, "%-15s ", p); 1160 1161 if (irq_map[i].host && irq_map[i].host->of_node) 1162 p = irq_map[i].host->of_node->full_name; 1163 else 1164 p = none; 1165 seq_printf(m, "%s\n", p); 1166 } 1167 1168 raw_spin_unlock_irqrestore(&desc->lock, flags); 1169 } 1170 1171 return 0; 1172 } 1173 1174 static int virq_debug_open(struct inode *inode, struct file *file) 1175 { 1176 return single_open(file, virq_debug_show, inode->i_private); 1177 } 1178 1179 static const struct file_operations virq_debug_fops = { 1180 .open = virq_debug_open, 1181 .read = seq_read, 1182 .llseek = seq_lseek, 1183 .release = single_release, 1184 }; 1185 1186 static int __init irq_debugfs_init(void) 1187 { 1188 if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root, 1189 NULL, &virq_debug_fops) == NULL) 1190 return -ENOMEM; 1191 1192 return 0; 1193 } 1194 __initcall(irq_debugfs_init); 1195 #endif /* CONFIG_VIRQ_DEBUG */ 1196 1197 #ifdef CONFIG_PPC64 1198 static int __init setup_noirqdistrib(char *str) 1199 { 1200 distribute_irqs = 0; 1201 return 1; 1202 } 1203 1204 __setup("noirqdistrib", setup_noirqdistrib); 1205 #endif /* CONFIG_PPC64 */ 1206