1 /* 2 * arch/sparc/kernel/irq.c: Interrupt request handling routines. On the 3 * Sparc the IRQs are basically 'cast in stone' 4 * and you are supposed to probe the prom's device 5 * node trees to find out who's got which IRQ. 6 * 7 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 8 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx) 9 * Copyright (C) 1995,2002 Pete A. Zaitcev (zaitcev@yahoo.com) 10 * Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk) 11 * Copyright (C) 1998-2000 Anton Blanchard (anton@samba.org) 12 */ 13 14 #include <linux/module.h> 15 #include <linux/sched.h> 16 #include <linux/ptrace.h> 17 #include <linux/errno.h> 18 #include <linux/linkage.h> 19 #include <linux/kernel_stat.h> 20 #include <linux/signal.h> 21 #include <linux/interrupt.h> 22 #include <linux/slab.h> 23 #include <linux/random.h> 24 #include <linux/init.h> 25 #include <linux/smp.h> 26 #include <linux/delay.h> 27 #include <linux/threads.h> 28 #include <linux/spinlock.h> 29 #include <linux/seq_file.h> 30 31 #include <asm/ptrace.h> 32 #include <asm/processor.h> 33 #include <asm/system.h> 34 #include <asm/psr.h> 35 #include <asm/smp.h> 36 #include <asm/vaddrs.h> 37 #include <asm/timer.h> 38 #include <asm/openprom.h> 39 #include <asm/oplib.h> 40 #include <asm/traps.h> 41 #include <asm/irq.h> 42 #include <asm/io.h> 43 #include <asm/pgalloc.h> 44 #include <asm/pgtable.h> 45 #include <asm/pcic.h> 46 #include <asm/cacheflush.h> 47 #include <asm/irq_regs.h> 48 49 #include "kernel.h" 50 #include "irq.h" 51 52 #ifdef CONFIG_SMP 53 #define SMP_NOP2 "nop; nop;\n\t" 54 #define SMP_NOP3 "nop; nop; nop;\n\t" 55 #else 56 #define SMP_NOP2 57 #define SMP_NOP3 58 #endif /* SMP */ 59 unsigned long __raw_local_irq_save(void) 60 { 61 unsigned long retval; 62 unsigned long tmp; 63 64 __asm__ __volatile__( 65 "rd %%psr, %0\n\t" 66 SMP_NOP3 /* Sun4m + Cypress + SMP bug */ 67 "or %0, %2, %1\n\t" 68 "wr %1, 0, %%psr\n\t" 69 "nop; nop; nop\n" 70 : "=&r" (retval), "=r" (tmp) 71 : "i" (PSR_PIL) 72 : "memory"); 73 74 return retval; 75 } 76 77 void raw_local_irq_enable(void) 78 { 79 unsigned long tmp; 80 81 __asm__ __volatile__( 82 "rd %%psr, %0\n\t" 83 SMP_NOP3 /* Sun4m + Cypress + SMP bug */ 84 "andn %0, %1, %0\n\t" 85 "wr %0, 0, %%psr\n\t" 86 "nop; nop; nop\n" 87 : "=&r" (tmp) 88 : "i" (PSR_PIL) 89 : "memory"); 90 } 91 92 void raw_local_irq_restore(unsigned long old_psr) 93 { 94 unsigned long tmp; 95 96 __asm__ __volatile__( 97 "rd %%psr, %0\n\t" 98 "and %2, %1, %2\n\t" 99 SMP_NOP2 /* Sun4m + Cypress + SMP bug */ 100 "andn %0, %1, %0\n\t" 101 "wr %0, %2, %%psr\n\t" 102 "nop; nop; nop\n" 103 : "=&r" (tmp) 104 : "i" (PSR_PIL), "r" (old_psr) 105 : "memory"); 106 } 107 108 EXPORT_SYMBOL(__raw_local_irq_save); 109 EXPORT_SYMBOL(raw_local_irq_enable); 110 EXPORT_SYMBOL(raw_local_irq_restore); 111 112 /* 113 * Dave Redman (djhr@tadpole.co.uk) 114 * 115 * IRQ numbers.. These are no longer restricted to 15.. 116 * 117 * this is done to enable SBUS cards and onboard IO to be masked 118 * correctly. using the interrupt level isn't good enough. 119 * 120 * For example: 121 * A device interrupting at sbus level6 and the Floppy both come in 122 * at IRQ11, but enabling and disabling them requires writing to 123 * different bits in the SLAVIO/SEC. 124 * 125 * As a result of these changes sun4m machines could now support 126 * directed CPU interrupts using the existing enable/disable irq code 127 * with tweaks. 128 * 129 */ 130 131 static void irq_panic(void) 132 { 133 extern char *cputypval; 134 prom_printf("machine: %s doesn't have irq handlers defined!\n",cputypval); 135 prom_halt(); 136 } 137 138 void (*sparc_init_timers)(irq_handler_t ) = 139 (void (*)(irq_handler_t )) irq_panic; 140 141 /* 142 * Dave Redman (djhr@tadpole.co.uk) 143 * 144 * There used to be extern calls and hard coded values here.. very sucky! 145 * instead, because some of the devices attach very early, I do something 146 * equally sucky but at least we'll never try to free statically allocated 147 * space or call kmalloc before kmalloc_init :(. 148 * 149 * In fact it's the timer10 that attaches first.. then timer14 150 * then kmalloc_init is called.. then the tty interrupts attach. 151 * hmmm.... 152 * 153 */ 154 #define MAX_STATIC_ALLOC 4 155 struct irqaction static_irqaction[MAX_STATIC_ALLOC]; 156 int static_irq_count; 157 158 static struct { 159 struct irqaction *action; 160 int flags; 161 } sparc_irq[NR_IRQS]; 162 #define SPARC_IRQ_INPROGRESS 1 163 164 /* Used to protect the IRQ action lists */ 165 DEFINE_SPINLOCK(irq_action_lock); 166 167 int show_interrupts(struct seq_file *p, void *v) 168 { 169 int i = *(loff_t *) v; 170 struct irqaction * action; 171 unsigned long flags; 172 #ifdef CONFIG_SMP 173 int j; 174 #endif 175 176 if (sparc_cpu_model == sun4d) { 177 extern int show_sun4d_interrupts(struct seq_file *, void *); 178 179 return show_sun4d_interrupts(p, v); 180 } 181 spin_lock_irqsave(&irq_action_lock, flags); 182 if (i < NR_IRQS) { 183 action = sparc_irq[i].action; 184 if (!action) 185 goto out_unlock; 186 seq_printf(p, "%3d: ", i); 187 #ifndef CONFIG_SMP 188 seq_printf(p, "%10u ", kstat_irqs(i)); 189 #else 190 for_each_online_cpu(j) { 191 seq_printf(p, "%10u ", 192 kstat_cpu(j).irqs[i]); 193 } 194 #endif 195 seq_printf(p, " %c %s", 196 (action->flags & IRQF_DISABLED) ? '+' : ' ', 197 action->name); 198 for (action=action->next; action; action = action->next) { 199 seq_printf(p, ",%s %s", 200 (action->flags & IRQF_DISABLED) ? " +" : "", 201 action->name); 202 } 203 seq_putc(p, '\n'); 204 } 205 out_unlock: 206 spin_unlock_irqrestore(&irq_action_lock, flags); 207 return 0; 208 } 209 210 void free_irq(unsigned int irq, void *dev_id) 211 { 212 struct irqaction * action; 213 struct irqaction **actionp; 214 unsigned long flags; 215 unsigned int cpu_irq; 216 217 if (sparc_cpu_model == sun4d) { 218 extern void sun4d_free_irq(unsigned int, void *); 219 220 sun4d_free_irq(irq, dev_id); 221 return; 222 } 223 cpu_irq = irq & (NR_IRQS - 1); 224 if (cpu_irq > 14) { /* 14 irq levels on the sparc */ 225 printk("Trying to free bogus IRQ %d\n", irq); 226 return; 227 } 228 229 spin_lock_irqsave(&irq_action_lock, flags); 230 231 actionp = &sparc_irq[cpu_irq].action; 232 action = *actionp; 233 234 if (!action->handler) { 235 printk("Trying to free free IRQ%d\n",irq); 236 goto out_unlock; 237 } 238 if (dev_id) { 239 for (; action; action = action->next) { 240 if (action->dev_id == dev_id) 241 break; 242 actionp = &action->next; 243 } 244 if (!action) { 245 printk("Trying to free free shared IRQ%d\n",irq); 246 goto out_unlock; 247 } 248 } else if (action->flags & IRQF_SHARED) { 249 printk("Trying to free shared IRQ%d with NULL device ID\n", irq); 250 goto out_unlock; 251 } 252 if (action->flags & SA_STATIC_ALLOC) 253 { 254 /* This interrupt is marked as specially allocated 255 * so it is a bad idea to free it. 256 */ 257 printk("Attempt to free statically allocated IRQ%d (%s)\n", 258 irq, action->name); 259 goto out_unlock; 260 } 261 262 *actionp = action->next; 263 264 spin_unlock_irqrestore(&irq_action_lock, flags); 265 266 synchronize_irq(irq); 267 268 spin_lock_irqsave(&irq_action_lock, flags); 269 270 kfree(action); 271 272 if (!sparc_irq[cpu_irq].action) 273 __disable_irq(irq); 274 275 out_unlock: 276 spin_unlock_irqrestore(&irq_action_lock, flags); 277 } 278 279 EXPORT_SYMBOL(free_irq); 280 281 /* 282 * This is called when we want to synchronize with 283 * interrupts. We may for example tell a device to 284 * stop sending interrupts: but to make sure there 285 * are no interrupts that are executing on another 286 * CPU we need to call this function. 287 */ 288 #ifdef CONFIG_SMP 289 void synchronize_irq(unsigned int irq) 290 { 291 unsigned int cpu_irq; 292 293 cpu_irq = irq & (NR_IRQS - 1); 294 while (sparc_irq[cpu_irq].flags & SPARC_IRQ_INPROGRESS) 295 cpu_relax(); 296 } 297 #endif /* SMP */ 298 299 void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs) 300 { 301 int i; 302 struct irqaction * action; 303 unsigned int cpu_irq; 304 305 cpu_irq = irq & (NR_IRQS - 1); 306 action = sparc_irq[cpu_irq].action; 307 308 printk("IO device interrupt, irq = %d\n", irq); 309 printk("PC = %08lx NPC = %08lx FP=%08lx\n", regs->pc, 310 regs->npc, regs->u_regs[14]); 311 if (action) { 312 printk("Expecting: "); 313 for (i = 0; i < 16; i++) 314 if (action->handler) 315 printk("[%s:%d:0x%x] ", action->name, 316 (int) i, (unsigned int) action->handler); 317 } 318 printk("AIEEE\n"); 319 panic("bogus interrupt received"); 320 } 321 322 void handler_irq(int irq, struct pt_regs * regs) 323 { 324 struct pt_regs *old_regs; 325 struct irqaction * action; 326 int cpu = smp_processor_id(); 327 #ifdef CONFIG_SMP 328 extern void smp4m_irq_rotate(int cpu); 329 #endif 330 331 old_regs = set_irq_regs(regs); 332 irq_enter(); 333 disable_pil_irq(irq); 334 #ifdef CONFIG_SMP 335 /* Only rotate on lower priority IRQs (scsi, ethernet, etc.). */ 336 if((sparc_cpu_model==sun4m) && (irq < 10)) 337 smp4m_irq_rotate(cpu); 338 #endif 339 action = sparc_irq[irq].action; 340 sparc_irq[irq].flags |= SPARC_IRQ_INPROGRESS; 341 kstat_cpu(cpu).irqs[irq]++; 342 do { 343 if (!action || !action->handler) 344 unexpected_irq(irq, NULL, regs); 345 action->handler(irq, action->dev_id); 346 action = action->next; 347 } while (action); 348 sparc_irq[irq].flags &= ~SPARC_IRQ_INPROGRESS; 349 enable_pil_irq(irq); 350 irq_exit(); 351 set_irq_regs(old_regs); 352 } 353 354 #if defined(CONFIG_BLK_DEV_FD) || defined(CONFIG_BLK_DEV_FD_MODULE) 355 356 /* Fast IRQs on the Sparc can only have one routine attached to them, 357 * thus no sharing possible. 358 */ 359 static int request_fast_irq(unsigned int irq, 360 void (*handler)(void), 361 unsigned long irqflags, const char *devname) 362 { 363 struct irqaction *action; 364 unsigned long flags; 365 unsigned int cpu_irq; 366 int ret; 367 #ifdef CONFIG_SMP 368 struct tt_entry *trap_table; 369 extern struct tt_entry trapbase_cpu1, trapbase_cpu2, trapbase_cpu3; 370 #endif 371 372 cpu_irq = irq & (NR_IRQS - 1); 373 if(cpu_irq > 14) { 374 ret = -EINVAL; 375 goto out; 376 } 377 if(!handler) { 378 ret = -EINVAL; 379 goto out; 380 } 381 382 spin_lock_irqsave(&irq_action_lock, flags); 383 384 action = sparc_irq[cpu_irq].action; 385 if(action) { 386 if(action->flags & IRQF_SHARED) 387 panic("Trying to register fast irq when already shared.\n"); 388 if(irqflags & IRQF_SHARED) 389 panic("Trying to register fast irq as shared.\n"); 390 391 /* Anyway, someone already owns it so cannot be made fast. */ 392 printk("request_fast_irq: Trying to register yet already owned.\n"); 393 ret = -EBUSY; 394 goto out_unlock; 395 } 396 397 /* If this is flagged as statically allocated then we use our 398 * private struct which is never freed. 399 */ 400 if (irqflags & SA_STATIC_ALLOC) { 401 if (static_irq_count < MAX_STATIC_ALLOC) 402 action = &static_irqaction[static_irq_count++]; 403 else 404 printk("Fast IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", 405 irq, devname); 406 } 407 408 if (action == NULL) 409 action = kmalloc(sizeof(struct irqaction), 410 GFP_ATOMIC); 411 412 if (!action) { 413 ret = -ENOMEM; 414 goto out_unlock; 415 } 416 417 /* Dork with trap table if we get this far. */ 418 #define INSTANTIATE(table) \ 419 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_one = SPARC_RD_PSR_L0; \ 420 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two = \ 421 SPARC_BRANCH((unsigned long) handler, \ 422 (unsigned long) &table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two);\ 423 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_three = SPARC_RD_WIM_L3; \ 424 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP; 425 426 INSTANTIATE(sparc_ttable) 427 #ifdef CONFIG_SMP 428 trap_table = &trapbase_cpu1; INSTANTIATE(trap_table) 429 trap_table = &trapbase_cpu2; INSTANTIATE(trap_table) 430 trap_table = &trapbase_cpu3; INSTANTIATE(trap_table) 431 #endif 432 #undef INSTANTIATE 433 /* 434 * XXX Correct thing whould be to flush only I- and D-cache lines 435 * which contain the handler in question. But as of time of the 436 * writing we have no CPU-neutral interface to fine-grained flushes. 437 */ 438 flush_cache_all(); 439 440 action->flags = irqflags; 441 cpus_clear(action->mask); 442 action->name = devname; 443 action->dev_id = NULL; 444 action->next = NULL; 445 446 sparc_irq[cpu_irq].action = action; 447 448 __enable_irq(irq); 449 450 ret = 0; 451 out_unlock: 452 spin_unlock_irqrestore(&irq_action_lock, flags); 453 out: 454 return ret; 455 } 456 457 /* These variables are used to access state from the assembler 458 * interrupt handler, floppy_hardint, so we cannot put these in 459 * the floppy driver image because that would not work in the 460 * modular case. 461 */ 462 volatile unsigned char *fdc_status; 463 EXPORT_SYMBOL(fdc_status); 464 465 char *pdma_vaddr; 466 EXPORT_SYMBOL(pdma_vaddr); 467 468 unsigned long pdma_size; 469 EXPORT_SYMBOL(pdma_size); 470 471 volatile int doing_pdma; 472 EXPORT_SYMBOL(doing_pdma); 473 474 char *pdma_base; 475 EXPORT_SYMBOL(pdma_base); 476 477 unsigned long pdma_areasize; 478 EXPORT_SYMBOL(pdma_areasize); 479 480 extern void floppy_hardint(void); 481 482 static irq_handler_t floppy_irq_handler; 483 484 void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs) 485 { 486 struct pt_regs *old_regs; 487 int cpu = smp_processor_id(); 488 489 old_regs = set_irq_regs(regs); 490 disable_pil_irq(irq); 491 irq_enter(); 492 kstat_cpu(cpu).irqs[irq]++; 493 floppy_irq_handler(irq, dev_id); 494 irq_exit(); 495 enable_pil_irq(irq); 496 set_irq_regs(old_regs); 497 // XXX Eek, it's totally changed with preempt_count() and such 498 // if (softirq_pending(cpu)) 499 // do_softirq(); 500 } 501 502 int sparc_floppy_request_irq(int irq, unsigned long flags, 503 irq_handler_t irq_handler) 504 { 505 floppy_irq_handler = irq_handler; 506 return request_fast_irq(irq, floppy_hardint, flags, "floppy"); 507 } 508 EXPORT_SYMBOL(sparc_floppy_request_irq); 509 510 #endif 511 512 int request_irq(unsigned int irq, 513 irq_handler_t handler, 514 unsigned long irqflags, const char * devname, void *dev_id) 515 { 516 struct irqaction * action, **actionp; 517 unsigned long flags; 518 unsigned int cpu_irq; 519 int ret; 520 521 if (sparc_cpu_model == sun4d) { 522 extern int sun4d_request_irq(unsigned int, 523 irq_handler_t , 524 unsigned long, const char *, void *); 525 return sun4d_request_irq(irq, handler, irqflags, devname, dev_id); 526 } 527 cpu_irq = irq & (NR_IRQS - 1); 528 if(cpu_irq > 14) { 529 ret = -EINVAL; 530 goto out; 531 } 532 if (!handler) { 533 ret = -EINVAL; 534 goto out; 535 } 536 537 spin_lock_irqsave(&irq_action_lock, flags); 538 539 actionp = &sparc_irq[cpu_irq].action; 540 action = *actionp; 541 if (action) { 542 if (!(action->flags & IRQF_SHARED) || !(irqflags & IRQF_SHARED)) { 543 ret = -EBUSY; 544 goto out_unlock; 545 } 546 if ((action->flags & IRQF_DISABLED) != (irqflags & IRQF_DISABLED)) { 547 printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq); 548 ret = -EBUSY; 549 goto out_unlock; 550 } 551 for ( ; action; action = *actionp) 552 actionp = &action->next; 553 } 554 555 /* If this is flagged as statically allocated then we use our 556 * private struct which is never freed. 557 */ 558 if (irqflags & SA_STATIC_ALLOC) { 559 if (static_irq_count < MAX_STATIC_ALLOC) 560 action = &static_irqaction[static_irq_count++]; 561 else 562 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", irq, devname); 563 } 564 565 if (action == NULL) 566 action = kmalloc(sizeof(struct irqaction), 567 GFP_ATOMIC); 568 569 if (!action) { 570 ret = -ENOMEM; 571 goto out_unlock; 572 } 573 574 action->handler = handler; 575 action->flags = irqflags; 576 cpus_clear(action->mask); 577 action->name = devname; 578 action->next = NULL; 579 action->dev_id = dev_id; 580 581 *actionp = action; 582 583 __enable_irq(irq); 584 585 ret = 0; 586 out_unlock: 587 spin_unlock_irqrestore(&irq_action_lock, flags); 588 out: 589 return ret; 590 } 591 592 EXPORT_SYMBOL(request_irq); 593 594 void disable_irq_nosync(unsigned int irq) 595 { 596 __disable_irq(irq); 597 } 598 EXPORT_SYMBOL(disable_irq_nosync); 599 600 void disable_irq(unsigned int irq) 601 { 602 __disable_irq(irq); 603 } 604 EXPORT_SYMBOL(disable_irq); 605 606 void enable_irq(unsigned int irq) 607 { 608 __enable_irq(irq); 609 } 610 611 EXPORT_SYMBOL(enable_irq); 612 613 /* We really don't need these at all on the Sparc. We only have 614 * stubs here because they are exported to modules. 615 */ 616 unsigned long probe_irq_on(void) 617 { 618 return 0; 619 } 620 621 EXPORT_SYMBOL(probe_irq_on); 622 623 int probe_irq_off(unsigned long mask) 624 { 625 return 0; 626 } 627 628 EXPORT_SYMBOL(probe_irq_off); 629 630 /* djhr 631 * This could probably be made indirect too and assigned in the CPU 632 * bits of the code. That would be much nicer I think and would also 633 * fit in with the idea of being able to tune your kernel for your machine 634 * by removing unrequired machine and device support. 635 * 636 */ 637 638 void __init init_IRQ(void) 639 { 640 extern void sun4c_init_IRQ( void ); 641 extern void sun4m_init_IRQ( void ); 642 extern void sun4d_init_IRQ( void ); 643 644 switch(sparc_cpu_model) { 645 case sun4c: 646 case sun4: 647 sun4c_init_IRQ(); 648 break; 649 650 case sun4m: 651 #ifdef CONFIG_PCI 652 pcic_probe(); 653 if (pcic_present()) { 654 sun4m_pci_init_IRQ(); 655 break; 656 } 657 #endif 658 sun4m_init_IRQ(); 659 break; 660 661 case sun4d: 662 sun4d_init_IRQ(); 663 break; 664 665 default: 666 prom_printf("Cannot initialize IRQs on this Sun machine..."); 667 break; 668 } 669 btfixup(); 670 } 671 672 #ifdef CONFIG_PROC_FS 673 void init_irq_proc(void) 674 { 675 /* For now, nothing... */ 676 } 677 #endif /* CONFIG_PROC_FS */ 678