1 /* 2 * arch/ppc/kernel/irq.c 3 * 4 * Derived from arch/i386/kernel/irq.c 5 * Copyright (C) 1992 Linus Torvalds 6 * Adapted from arch/i386 by Gary Thomas 7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 8 * Updated and modified by Cort Dougan <cort@fsmlabs.com> 9 * Copyright (C) 1996-2001 Cort Dougan 10 * Adapted for Power Macintosh by Paul Mackerras 11 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) 12 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). 13 * 14 * This program is free software; you can redistribute it and/or 15 * modify it under the terms of the GNU General Public License 16 * as published by the Free Software Foundation; either version 17 * 2 of the License, or (at your option) any later version. 18 * 19 * This file contains the code used by various IRQ handling routines: 20 * asking for different IRQ's should be done through these routines 21 * instead of just grabbing them. Thus setups with different IRQ numbers 22 * shouldn't result in any weird surprises, and installing new handlers 23 * should be easier. 24 * 25 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the 26 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit 27 * mask register (of which only 16 are defined), hence the weird shifting 28 * and complement of the cached_irq_mask. I want to be able to stuff 29 * this right into the SIU SMASK register. 30 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx 31 * to reduce code space and undefined function references. 32 */ 33 34 #include <linux/errno.h> 35 #include <linux/module.h> 36 #include <linux/threads.h> 37 #include <linux/kernel_stat.h> 38 #include <linux/signal.h> 39 #include <linux/sched.h> 40 #include <linux/ptrace.h> 41 #include <linux/ioport.h> 42 #include <linux/interrupt.h> 43 #include <linux/timex.h> 44 #include <linux/config.h> 45 #include <linux/init.h> 46 #include <linux/slab.h> 47 #include <linux/pci.h> 48 #include <linux/delay.h> 49 #include <linux/irq.h> 50 #include <linux/proc_fs.h> 51 #include <linux/random.h> 52 #include <linux/seq_file.h> 53 #include <linux/cpumask.h> 54 #include <linux/profile.h> 55 #include <linux/bitops.h> 56 #ifdef CONFIG_PPC64 57 #include <linux/kallsyms.h> 58 #endif 59 60 #include <asm/uaccess.h> 61 #include <asm/system.h> 62 #include <asm/io.h> 63 #include <asm/pgtable.h> 64 #include <asm/irq.h> 65 #include <asm/cache.h> 66 #include <asm/prom.h> 67 #include <asm/ptrace.h> 68 #include <asm/machdep.h> 69 #ifdef CONFIG_PPC64 70 #include <asm/iseries/it_lp_queue.h> 71 #include <asm/paca.h> 72 #endif 73 74 int __irq_offset_value; 75 #ifdef CONFIG_PPC32 76 EXPORT_SYMBOL(__irq_offset_value); 77 #endif 78 79 static int ppc_spurious_interrupts; 80 81 #if defined(CONFIG_PPC_ISERIES) && defined(CONFIG_SMP) 82 extern void iSeries_smp_message_recv(struct pt_regs *); 83 #endif 84 85 #ifdef CONFIG_PPC32 86 #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) 87 88 unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; 89 atomic_t ppc_n_lost_interrupts; 90 91 #ifdef CONFIG_TAU_INT 92 extern int tau_initialized; 93 extern int tau_interrupts(int); 94 #endif 95 96 #if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE) 97 extern atomic_t ipi_recv; 98 extern atomic_t ipi_sent; 99 #endif 100 #endif /* CONFIG_PPC32 */ 101 102 #ifdef CONFIG_PPC64 103 EXPORT_SYMBOL(irq_desc); 104 105 int distribute_irqs = 1; 106 u64 ppc64_interrupt_controller; 107 #endif /* CONFIG_PPC64 */ 108 109 int show_interrupts(struct seq_file *p, void *v) 110 { 111 int i = *(loff_t *)v, j; 112 struct irqaction *action; 113 irq_desc_t *desc; 114 unsigned long flags; 115 116 if (i == 0) { 117 seq_puts(p, " "); 118 for_each_online_cpu(j) 119 seq_printf(p, "CPU%d ", j); 120 seq_putc(p, '\n'); 121 } 122 123 if (i < NR_IRQS) { 124 desc = get_irq_desc(i); 125 spin_lock_irqsave(&desc->lock, flags); 126 action = desc->action; 127 if (!action || !action->handler) 128 goto skip; 129 seq_printf(p, "%3d: ", i); 130 #ifdef CONFIG_SMP 131 for_each_online_cpu(j) 132 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 133 #else 134 seq_printf(p, "%10u ", kstat_irqs(i)); 135 #endif /* CONFIG_SMP */ 136 if (desc->handler) 137 seq_printf(p, " %s ", desc->handler->typename); 138 else 139 seq_puts(p, " None "); 140 seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge "); 141 seq_printf(p, " %s", action->name); 142 for (action = action->next; action; action = action->next) 143 seq_printf(p, ", %s", action->name); 144 seq_putc(p, '\n'); 145 skip: 146 spin_unlock_irqrestore(&desc->lock, flags); 147 } else if (i == NR_IRQS) { 148 #ifdef CONFIG_PPC32 149 #ifdef CONFIG_TAU_INT 150 if (tau_initialized){ 151 seq_puts(p, "TAU: "); 152 for (j = 0; j < NR_CPUS; j++) 153 if (cpu_online(j)) 154 seq_printf(p, "%10u ", tau_interrupts(j)); 155 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); 156 } 157 #endif 158 #if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE) 159 /* should this be per processor send/receive? */ 160 seq_printf(p, "IPI (recv/sent): %10u/%u\n", 161 atomic_read(&ipi_recv), atomic_read(&ipi_sent)); 162 #endif 163 #endif /* CONFIG_PPC32 */ 164 seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts); 165 } 166 return 0; 167 } 168 169 #ifdef CONFIG_HOTPLUG_CPU 170 void fixup_irqs(cpumask_t map) 171 { 172 unsigned int irq; 173 static int warned; 174 175 for_each_irq(irq) { 176 cpumask_t mask; 177 178 if (irq_desc[irq].status & IRQ_PER_CPU) 179 continue; 180 181 cpus_and(mask, irq_affinity[irq], map); 182 if (any_online_cpu(mask) == NR_CPUS) { 183 printk("Breaking affinity for irq %i\n", irq); 184 mask = map; 185 } 186 if (irq_desc[irq].handler->set_affinity) 187 irq_desc[irq].handler->set_affinity(irq, mask); 188 else if (irq_desc[irq].action && !(warned++)) 189 printk("Cannot set affinity for irq %i\n", irq); 190 } 191 192 local_irq_enable(); 193 mdelay(1); 194 local_irq_disable(); 195 } 196 #endif 197 198 #ifdef CONFIG_PPC_ISERIES 199 void do_IRQ(struct pt_regs *regs) 200 { 201 struct paca_struct *lpaca; 202 203 irq_enter(); 204 205 #ifdef CONFIG_DEBUG_STACKOVERFLOW 206 /* Debugging check for stack overflow: is there less than 2KB free? */ 207 { 208 long sp; 209 210 sp = __get_SP() & (THREAD_SIZE-1); 211 212 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { 213 printk("do_IRQ: stack overflow: %ld\n", 214 sp - sizeof(struct thread_info)); 215 dump_stack(); 216 } 217 } 218 #endif 219 220 lpaca = get_paca(); 221 #ifdef CONFIG_SMP 222 if (lpaca->lppaca.int_dword.fields.ipi_cnt) { 223 lpaca->lppaca.int_dword.fields.ipi_cnt = 0; 224 iSeries_smp_message_recv(regs); 225 } 226 #endif /* CONFIG_SMP */ 227 if (hvlpevent_is_pending()) 228 process_hvlpevents(regs); 229 230 irq_exit(); 231 232 if (lpaca->lppaca.int_dword.fields.decr_int) { 233 lpaca->lppaca.int_dword.fields.decr_int = 0; 234 /* Signal a fake decrementer interrupt */ 235 timer_interrupt(regs); 236 } 237 } 238 239 #else /* CONFIG_PPC_ISERIES */ 240 241 void do_IRQ(struct pt_regs *regs) 242 { 243 int irq; 244 #ifdef CONFIG_IRQSTACKS 245 struct thread_info *curtp, *irqtp; 246 #endif 247 248 irq_enter(); 249 250 #ifdef CONFIG_DEBUG_STACKOVERFLOW 251 /* Debugging check for stack overflow: is there less than 2KB free? */ 252 { 253 long sp; 254 255 sp = __get_SP() & (THREAD_SIZE-1); 256 257 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { 258 printk("do_IRQ: stack overflow: %ld\n", 259 sp - sizeof(struct thread_info)); 260 dump_stack(); 261 } 262 } 263 #endif 264 265 /* 266 * Every platform is required to implement ppc_md.get_irq. 267 * This function will either return an irq number or -1 to 268 * indicate there are no more pending. 269 * The value -2 is for buggy hardware and means that this IRQ 270 * has already been handled. -- Tom 271 */ 272 irq = ppc_md.get_irq(regs); 273 274 if (irq >= 0) { 275 #ifdef CONFIG_IRQSTACKS 276 /* Switch to the irq stack to handle this */ 277 curtp = current_thread_info(); 278 irqtp = hardirq_ctx[smp_processor_id()]; 279 if (curtp != irqtp) { 280 irqtp->task = curtp->task; 281 irqtp->flags = 0; 282 call___do_IRQ(irq, regs, irqtp); 283 irqtp->task = NULL; 284 if (irqtp->flags) 285 set_bits(irqtp->flags, &curtp->flags); 286 } else 287 #endif 288 __do_IRQ(irq, regs); 289 } else 290 #ifdef CONFIG_PPC32 291 if (irq != -2) 292 #endif 293 /* That's not SMP safe ... but who cares ? */ 294 ppc_spurious_interrupts++; 295 irq_exit(); 296 } 297 298 #endif /* CONFIG_PPC_ISERIES */ 299 300 void __init init_IRQ(void) 301 { 302 #ifdef CONFIG_PPC64 303 static int once = 0; 304 305 if (once) 306 return; 307 308 once++; 309 310 #endif 311 ppc_md.init_IRQ(); 312 #ifdef CONFIG_PPC64 313 irq_ctx_init(); 314 #endif 315 } 316 317 #ifdef CONFIG_PPC64 318 /* 319 * Virtual IRQ mapping code, used on systems with XICS interrupt controllers. 320 */ 321 322 #define UNDEFINED_IRQ 0xffffffff 323 unsigned int virt_irq_to_real_map[NR_IRQS]; 324 325 /* 326 * Don't use virtual irqs 0, 1, 2 for devices. 327 * The pcnet32 driver considers interrupt numbers < 2 to be invalid, 328 * and 2 is the XICS IPI interrupt. 329 * We limit virtual irqs to 17 less than NR_IRQS so that when we 330 * offset them by 16 (to reserve the first 16 for ISA interrupts) 331 * we don't end up with an interrupt number >= NR_IRQS. 332 */ 333 #define MIN_VIRT_IRQ 3 334 #define MAX_VIRT_IRQ (NR_IRQS - NUM_ISA_INTERRUPTS - 1) 335 #define NR_VIRT_IRQS (MAX_VIRT_IRQ - MIN_VIRT_IRQ + 1) 336 337 void 338 virt_irq_init(void) 339 { 340 int i; 341 for (i = 0; i < NR_IRQS; i++) 342 virt_irq_to_real_map[i] = UNDEFINED_IRQ; 343 } 344 345 /* Create a mapping for a real_irq if it doesn't already exist. 346 * Return the virtual irq as a convenience. 347 */ 348 int virt_irq_create_mapping(unsigned int real_irq) 349 { 350 unsigned int virq, first_virq; 351 static int warned; 352 353 if (ppc64_interrupt_controller == IC_OPEN_PIC) 354 return real_irq; /* no mapping for openpic (for now) */ 355 356 if (ppc64_interrupt_controller == IC_CELL_PIC) 357 return real_irq; /* no mapping for iic either */ 358 359 /* don't map interrupts < MIN_VIRT_IRQ */ 360 if (real_irq < MIN_VIRT_IRQ) { 361 virt_irq_to_real_map[real_irq] = real_irq; 362 return real_irq; 363 } 364 365 /* map to a number between MIN_VIRT_IRQ and MAX_VIRT_IRQ */ 366 virq = real_irq; 367 if (virq > MAX_VIRT_IRQ) 368 virq = (virq % NR_VIRT_IRQS) + MIN_VIRT_IRQ; 369 370 /* search for this number or a free slot */ 371 first_virq = virq; 372 while (virt_irq_to_real_map[virq] != UNDEFINED_IRQ) { 373 if (virt_irq_to_real_map[virq] == real_irq) 374 return virq; 375 if (++virq > MAX_VIRT_IRQ) 376 virq = MIN_VIRT_IRQ; 377 if (virq == first_virq) 378 goto nospace; /* oops, no free slots */ 379 } 380 381 virt_irq_to_real_map[virq] = real_irq; 382 return virq; 383 384 nospace: 385 if (!warned) { 386 printk(KERN_CRIT "Interrupt table is full\n"); 387 printk(KERN_CRIT "Increase NR_IRQS (currently %d) " 388 "in your kernel sources and rebuild.\n", NR_IRQS); 389 warned = 1; 390 } 391 return NO_IRQ; 392 } 393 394 /* 395 * In most cases will get a hit on the very first slot checked in the 396 * virt_irq_to_real_map. Only when there are a large number of 397 * IRQs will this be expensive. 398 */ 399 unsigned int real_irq_to_virt_slowpath(unsigned int real_irq) 400 { 401 unsigned int virq; 402 unsigned int first_virq; 403 404 virq = real_irq; 405 406 if (virq > MAX_VIRT_IRQ) 407 virq = (virq % NR_VIRT_IRQS) + MIN_VIRT_IRQ; 408 409 first_virq = virq; 410 411 do { 412 if (virt_irq_to_real_map[virq] == real_irq) 413 return virq; 414 415 virq++; 416 417 if (virq >= MAX_VIRT_IRQ) 418 virq = 0; 419 420 } while (first_virq != virq); 421 422 return NO_IRQ; 423 424 } 425 426 #ifdef CONFIG_IRQSTACKS 427 struct thread_info *softirq_ctx[NR_CPUS]; 428 struct thread_info *hardirq_ctx[NR_CPUS]; 429 430 void irq_ctx_init(void) 431 { 432 struct thread_info *tp; 433 int i; 434 435 for_each_cpu(i) { 436 memset((void *)softirq_ctx[i], 0, THREAD_SIZE); 437 tp = softirq_ctx[i]; 438 tp->cpu = i; 439 tp->preempt_count = SOFTIRQ_OFFSET; 440 441 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); 442 tp = hardirq_ctx[i]; 443 tp->cpu = i; 444 tp->preempt_count = HARDIRQ_OFFSET; 445 } 446 } 447 448 void do_softirq(void) 449 { 450 unsigned long flags; 451 struct thread_info *curtp, *irqtp; 452 453 if (in_interrupt()) 454 return; 455 456 local_irq_save(flags); 457 458 if (local_softirq_pending()) { 459 curtp = current_thread_info(); 460 irqtp = softirq_ctx[smp_processor_id()]; 461 irqtp->task = curtp->task; 462 call_do_softirq(irqtp); 463 irqtp->task = NULL; 464 } 465 466 local_irq_restore(flags); 467 } 468 EXPORT_SYMBOL(do_softirq); 469 470 #endif /* CONFIG_IRQSTACKS */ 471 472 static int __init setup_noirqdistrib(char *str) 473 { 474 distribute_irqs = 0; 475 return 1; 476 } 477 478 __setup("noirqdistrib", setup_noirqdistrib); 479 #endif /* CONFIG_PPC64 */ 480