1 /* 2 * Derived from arch/i386/kernel/irq.c 3 * Copyright (C) 1992 Linus Torvalds 4 * Adapted from arch/i386 by Gary Thomas 5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 6 * Updated and modified by Cort Dougan <cort@fsmlabs.com> 7 * Copyright (C) 1996-2001 Cort Dougan 8 * Adapted for Power Macintosh by Paul Mackerras 9 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) 10 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). 11 * 12 * This program is free software; you can redistribute it and/or 13 * modify it under the terms of the GNU General Public License 14 * as published by the Free Software Foundation; either version 15 * 2 of the License, or (at your option) any later version. 16 * 17 * This file contains the code used by various IRQ handling routines: 18 * asking for different IRQ's should be done through these routines 19 * instead of just grabbing them. Thus setups with different IRQ numbers 20 * shouldn't result in any weird surprises, and installing new handlers 21 * should be easier. 22 * 23 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the 24 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit 25 * mask register (of which only 16 are defined), hence the weird shifting 26 * and complement of the cached_irq_mask. I want to be able to stuff 27 * this right into the SIU SMASK register. 28 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx 29 * to reduce code space and undefined function references. 30 */ 31 32 #include <linux/module.h> 33 #include <linux/threads.h> 34 #include <linux/kernel_stat.h> 35 #include <linux/signal.h> 36 #include <linux/sched.h> 37 #include <linux/ptrace.h> 38 #include <linux/ioport.h> 39 #include <linux/interrupt.h> 40 #include <linux/timex.h> 41 #include <linux/config.h> 42 #include <linux/init.h> 43 #include <linux/slab.h> 44 #include <linux/delay.h> 45 #include <linux/irq.h> 46 #include <linux/seq_file.h> 47 #include <linux/cpumask.h> 48 #include <linux/profile.h> 49 #include <linux/bitops.h> 50 51 #include <asm/uaccess.h> 52 #include <asm/system.h> 53 #include <asm/io.h> 54 #include <asm/pgtable.h> 55 #include <asm/irq.h> 56 #include <asm/cache.h> 57 #include <asm/prom.h> 58 #include <asm/ptrace.h> 59 #include <asm/machdep.h> 60 #ifdef CONFIG_PPC_ISERIES 61 #include <asm/paca.h> 62 #endif 63 64 int __irq_offset_value; 65 #ifdef CONFIG_PPC32 66 EXPORT_SYMBOL(__irq_offset_value); 67 #endif 68 69 static int ppc_spurious_interrupts; 70 71 #ifdef CONFIG_PPC32 72 #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) 73 74 unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; 75 atomic_t ppc_n_lost_interrupts; 76 77 #ifdef CONFIG_TAU_INT 78 extern int tau_initialized; 79 extern int tau_interrupts(int); 80 #endif 81 82 #if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE) 83 extern atomic_t ipi_recv; 84 extern atomic_t ipi_sent; 85 #endif 86 #endif /* CONFIG_PPC32 */ 87 88 #ifdef CONFIG_PPC64 89 EXPORT_SYMBOL(irq_desc); 90 91 int distribute_irqs = 1; 92 u64 ppc64_interrupt_controller; 93 #endif /* CONFIG_PPC64 */ 94 95 int show_interrupts(struct seq_file *p, void *v) 96 { 97 int i = *(loff_t *)v, j; 98 struct irqaction *action; 99 irq_desc_t *desc; 100 unsigned long flags; 101 102 if (i == 0) { 103 seq_puts(p, " "); 104 for_each_online_cpu(j) 105 seq_printf(p, "CPU%d ", j); 106 seq_putc(p, '\n'); 107 } 108 109 if (i < NR_IRQS) { 110 desc = get_irq_desc(i); 111 spin_lock_irqsave(&desc->lock, flags); 112 action = desc->action; 113 if (!action || !action->handler) 114 goto skip; 115 seq_printf(p, "%3d: ", i); 116 #ifdef CONFIG_SMP 117 for_each_online_cpu(j) 118 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 119 #else 120 seq_printf(p, "%10u ", kstat_irqs(i)); 121 #endif /* CONFIG_SMP */ 122 if (desc->handler) 123 seq_printf(p, " %s ", desc->handler->typename); 124 else 125 seq_puts(p, " None "); 126 seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge "); 127 seq_printf(p, " %s", action->name); 128 for (action = action->next; action; action = action->next) 129 seq_printf(p, ", %s", action->name); 130 seq_putc(p, '\n'); 131 skip: 132 spin_unlock_irqrestore(&desc->lock, flags); 133 } else if (i == NR_IRQS) { 134 #ifdef CONFIG_PPC32 135 #ifdef CONFIG_TAU_INT 136 if (tau_initialized){ 137 seq_puts(p, "TAU: "); 138 for_each_online_cpu(j) 139 seq_printf(p, "%10u ", tau_interrupts(j)); 140 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); 141 } 142 #endif 143 #if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE) 144 /* should this be per processor send/receive? */ 145 seq_printf(p, "IPI (recv/sent): %10u/%u\n", 146 atomic_read(&ipi_recv), atomic_read(&ipi_sent)); 147 #endif 148 #endif /* CONFIG_PPC32 */ 149 seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts); 150 } 151 return 0; 152 } 153 154 #ifdef CONFIG_HOTPLUG_CPU 155 void fixup_irqs(cpumask_t map) 156 { 157 unsigned int irq; 158 static int warned; 159 160 for_each_irq(irq) { 161 cpumask_t mask; 162 163 if (irq_desc[irq].status & IRQ_PER_CPU) 164 continue; 165 166 cpus_and(mask, irq_affinity[irq], map); 167 if (any_online_cpu(mask) == NR_CPUS) { 168 printk("Breaking affinity for irq %i\n", irq); 169 mask = map; 170 } 171 if (irq_desc[irq].handler->set_affinity) 172 irq_desc[irq].handler->set_affinity(irq, mask); 173 else if (irq_desc[irq].action && !(warned++)) 174 printk("Cannot set affinity for irq %i\n", irq); 175 } 176 177 local_irq_enable(); 178 mdelay(1); 179 local_irq_disable(); 180 } 181 #endif 182 183 void do_IRQ(struct pt_regs *regs) 184 { 185 int irq; 186 #ifdef CONFIG_IRQSTACKS 187 struct thread_info *curtp, *irqtp; 188 #endif 189 190 irq_enter(); 191 192 #ifdef CONFIG_DEBUG_STACKOVERFLOW 193 /* Debugging check for stack overflow: is there less than 2KB free? */ 194 { 195 long sp; 196 197 sp = __get_SP() & (THREAD_SIZE-1); 198 199 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { 200 printk("do_IRQ: stack overflow: %ld\n", 201 sp - sizeof(struct thread_info)); 202 dump_stack(); 203 } 204 } 205 #endif 206 207 /* 208 * Every platform is required to implement ppc_md.get_irq. 209 * This function will either return an irq number or -1 to 210 * indicate there are no more pending. 211 * The value -2 is for buggy hardware and means that this IRQ 212 * has already been handled. -- Tom 213 */ 214 irq = ppc_md.get_irq(regs); 215 216 if (irq >= 0) { 217 #ifdef CONFIG_IRQSTACKS 218 /* Switch to the irq stack to handle this */ 219 curtp = current_thread_info(); 220 irqtp = hardirq_ctx[smp_processor_id()]; 221 if (curtp != irqtp) { 222 irqtp->task = curtp->task; 223 irqtp->flags = 0; 224 call___do_IRQ(irq, regs, irqtp); 225 irqtp->task = NULL; 226 if (irqtp->flags) 227 set_bits(irqtp->flags, &curtp->flags); 228 } else 229 #endif 230 __do_IRQ(irq, regs); 231 } else if (irq != -2) 232 /* That's not SMP safe ... but who cares ? */ 233 ppc_spurious_interrupts++; 234 235 irq_exit(); 236 237 #ifdef CONFIG_PPC_ISERIES 238 if (get_lppaca()->int_dword.fields.decr_int) { 239 get_lppaca()->int_dword.fields.decr_int = 0; 240 /* Signal a fake decrementer interrupt */ 241 timer_interrupt(regs); 242 } 243 #endif 244 } 245 246 void __init init_IRQ(void) 247 { 248 #ifdef CONFIG_PPC64 249 static int once = 0; 250 251 if (once) 252 return; 253 254 once++; 255 256 #endif 257 ppc_md.init_IRQ(); 258 #ifdef CONFIG_PPC64 259 irq_ctx_init(); 260 #endif 261 } 262 263 #ifdef CONFIG_PPC64 264 /* 265 * Virtual IRQ mapping code, used on systems with XICS interrupt controllers. 266 */ 267 268 #define UNDEFINED_IRQ 0xffffffff 269 unsigned int virt_irq_to_real_map[NR_IRQS]; 270 271 /* 272 * Don't use virtual irqs 0, 1, 2 for devices. 273 * The pcnet32 driver considers interrupt numbers < 2 to be invalid, 274 * and 2 is the XICS IPI interrupt. 275 * We limit virtual irqs to __irq_offet_value less than virt_irq_max so 276 * that when we offset them we don't end up with an interrupt 277 * number >= virt_irq_max. 278 */ 279 #define MIN_VIRT_IRQ 3 280 281 unsigned int virt_irq_max; 282 static unsigned int max_virt_irq; 283 static unsigned int nr_virt_irqs; 284 285 void 286 virt_irq_init(void) 287 { 288 int i; 289 290 if ((virt_irq_max == 0) || (virt_irq_max > (NR_IRQS - 1))) 291 virt_irq_max = NR_IRQS - 1; 292 max_virt_irq = virt_irq_max - __irq_offset_value; 293 nr_virt_irqs = max_virt_irq - MIN_VIRT_IRQ + 1; 294 295 for (i = 0; i < NR_IRQS; i++) 296 virt_irq_to_real_map[i] = UNDEFINED_IRQ; 297 } 298 299 /* Create a mapping for a real_irq if it doesn't already exist. 300 * Return the virtual irq as a convenience. 301 */ 302 int virt_irq_create_mapping(unsigned int real_irq) 303 { 304 unsigned int virq, first_virq; 305 static int warned; 306 307 if (ppc64_interrupt_controller == IC_OPEN_PIC) 308 return real_irq; /* no mapping for openpic (for now) */ 309 310 if (ppc64_interrupt_controller == IC_CELL_PIC) 311 return real_irq; /* no mapping for iic either */ 312 313 /* don't map interrupts < MIN_VIRT_IRQ */ 314 if (real_irq < MIN_VIRT_IRQ) { 315 virt_irq_to_real_map[real_irq] = real_irq; 316 return real_irq; 317 } 318 319 /* map to a number between MIN_VIRT_IRQ and max_virt_irq */ 320 virq = real_irq; 321 if (virq > max_virt_irq) 322 virq = (virq % nr_virt_irqs) + MIN_VIRT_IRQ; 323 324 /* search for this number or a free slot */ 325 first_virq = virq; 326 while (virt_irq_to_real_map[virq] != UNDEFINED_IRQ) { 327 if (virt_irq_to_real_map[virq] == real_irq) 328 return virq; 329 if (++virq > max_virt_irq) 330 virq = MIN_VIRT_IRQ; 331 if (virq == first_virq) 332 goto nospace; /* oops, no free slots */ 333 } 334 335 virt_irq_to_real_map[virq] = real_irq; 336 return virq; 337 338 nospace: 339 if (!warned) { 340 printk(KERN_CRIT "Interrupt table is full\n"); 341 printk(KERN_CRIT "Increase virt_irq_max (currently %d) " 342 "in your kernel sources and rebuild.\n", virt_irq_max); 343 warned = 1; 344 } 345 return NO_IRQ; 346 } 347 348 /* 349 * In most cases will get a hit on the very first slot checked in the 350 * virt_irq_to_real_map. Only when there are a large number of 351 * IRQs will this be expensive. 352 */ 353 unsigned int real_irq_to_virt_slowpath(unsigned int real_irq) 354 { 355 unsigned int virq; 356 unsigned int first_virq; 357 358 virq = real_irq; 359 360 if (virq > max_virt_irq) 361 virq = (virq % nr_virt_irqs) + MIN_VIRT_IRQ; 362 363 first_virq = virq; 364 365 do { 366 if (virt_irq_to_real_map[virq] == real_irq) 367 return virq; 368 369 virq++; 370 371 if (virq >= max_virt_irq) 372 virq = 0; 373 374 } while (first_virq != virq); 375 376 return NO_IRQ; 377 378 } 379 #endif /* CONFIG_PPC64 */ 380 381 #ifdef CONFIG_IRQSTACKS 382 struct thread_info *softirq_ctx[NR_CPUS]; 383 struct thread_info *hardirq_ctx[NR_CPUS]; 384 385 void irq_ctx_init(void) 386 { 387 struct thread_info *tp; 388 int i; 389 390 for_each_possible_cpu(i) { 391 memset((void *)softirq_ctx[i], 0, THREAD_SIZE); 392 tp = softirq_ctx[i]; 393 tp->cpu = i; 394 tp->preempt_count = SOFTIRQ_OFFSET; 395 396 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); 397 tp = hardirq_ctx[i]; 398 tp->cpu = i; 399 tp->preempt_count = HARDIRQ_OFFSET; 400 } 401 } 402 403 static inline void do_softirq_onstack(void) 404 { 405 struct thread_info *curtp, *irqtp; 406 407 curtp = current_thread_info(); 408 irqtp = softirq_ctx[smp_processor_id()]; 409 irqtp->task = curtp->task; 410 call_do_softirq(irqtp); 411 irqtp->task = NULL; 412 } 413 414 #else 415 #define do_softirq_onstack() __do_softirq() 416 #endif /* CONFIG_IRQSTACKS */ 417 418 void do_softirq(void) 419 { 420 unsigned long flags; 421 422 if (in_interrupt()) 423 return; 424 425 local_irq_save(flags); 426 427 if (local_softirq_pending()) { 428 account_system_vtime(current); 429 local_bh_disable(); 430 do_softirq_onstack(); 431 account_system_vtime(current); 432 __local_bh_enable(); 433 } 434 435 local_irq_restore(flags); 436 } 437 EXPORT_SYMBOL(do_softirq); 438 439 #ifdef CONFIG_PPC64 440 static int __init setup_noirqdistrib(char *str) 441 { 442 distribute_irqs = 0; 443 return 1; 444 } 445 446 __setup("noirqdistrib", setup_noirqdistrib); 447 #endif /* CONFIG_PPC64 */ 448