1 /* 2 * linux/arch/alpha/kernel/irq.c 3 * 4 * Copyright (C) 1995 Linus Torvalds 5 * 6 * This file contains the code used by various IRQ handling routines: 7 * asking for different IRQ's should be done through these routines 8 * instead of just grabbing them. Thus setups with different IRQ numbers 9 * shouldn't result in any weird surprises, and installing new handlers 10 * should be easier. 11 */ 12 13 #include <linux/config.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/errno.h> 17 #include <linux/kernel_stat.h> 18 #include <linux/signal.h> 19 #include <linux/sched.h> 20 #include <linux/ptrace.h> 21 #include <linux/interrupt.h> 22 #include <linux/slab.h> 23 #include <linux/random.h> 24 #include <linux/init.h> 25 #include <linux/irq.h> 26 #include <linux/proc_fs.h> 27 #include <linux/seq_file.h> 28 #include <linux/profile.h> 29 #include <linux/bitops.h> 30 31 #include <asm/system.h> 32 #include <asm/io.h> 33 #include <asm/uaccess.h> 34 35 /* 36 * Controller mappings for all interrupt sources: 37 */ 38 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = { 39 [0 ... NR_IRQS-1] = { 40 .handler = &no_irq_type, 41 .lock = SPIN_LOCK_UNLOCKED 42 } 43 }; 44 45 static void register_irq_proc(unsigned int irq); 46 47 volatile unsigned long irq_err_count; 48 49 /* 50 * Special irq handlers. 51 */ 52 53 irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs) 54 { 55 return IRQ_NONE; 56 } 57 58 /* 59 * Generic no controller code 60 */ 61 62 static void no_irq_enable_disable(unsigned int irq) { } 63 static unsigned int no_irq_startup(unsigned int irq) { return 0; } 64 65 static void 66 no_irq_ack(unsigned int irq) 67 { 68 irq_err_count++; 69 printk(KERN_CRIT "Unexpected IRQ trap at vector %u\n", irq); 70 } 71 72 struct hw_interrupt_type no_irq_type = { 73 .typename = "none", 74 .startup = no_irq_startup, 75 .shutdown = no_irq_enable_disable, 76 .enable = no_irq_enable_disable, 77 .disable = no_irq_enable_disable, 78 .ack = no_irq_ack, 79 .end = no_irq_enable_disable, 80 }; 81 82 int 83 handle_IRQ_event(unsigned int irq, struct pt_regs *regs, 84 struct irqaction *action) 85 { 86 int status = 1; /* Force the "do bottom halves" bit */ 87 int ret; 88 89 do { 90 if (!(action->flags & SA_INTERRUPT)) 91 local_irq_enable(); 92 else 93 local_irq_disable(); 94 95 ret = action->handler(irq, action->dev_id, regs); 96 if (ret == IRQ_HANDLED) 97 status |= action->flags; 98 action = action->next; 99 } while (action); 100 if (status & SA_SAMPLE_RANDOM) 101 add_interrupt_randomness(irq); 102 local_irq_disable(); 103 104 return status; 105 } 106 107 /* 108 * Generic enable/disable code: this just calls 109 * down into the PIC-specific version for the actual 110 * hardware disable after having gotten the irq 111 * controller lock. 112 */ 113 void inline 114 disable_irq_nosync(unsigned int irq) 115 { 116 irq_desc_t *desc = irq_desc + irq; 117 unsigned long flags; 118 119 spin_lock_irqsave(&desc->lock, flags); 120 if (!desc->depth++) { 121 desc->status |= IRQ_DISABLED; 122 desc->handler->disable(irq); 123 } 124 spin_unlock_irqrestore(&desc->lock, flags); 125 } 126 127 /* 128 * Synchronous version of the above, making sure the IRQ is 129 * no longer running on any other IRQ.. 130 */ 131 void 132 disable_irq(unsigned int irq) 133 { 134 disable_irq_nosync(irq); 135 synchronize_irq(irq); 136 } 137 138 void 139 enable_irq(unsigned int irq) 140 { 141 irq_desc_t *desc = irq_desc + irq; 142 unsigned long flags; 143 144 spin_lock_irqsave(&desc->lock, flags); 145 switch (desc->depth) { 146 case 1: { 147 unsigned int status = desc->status & ~IRQ_DISABLED; 148 desc->status = status; 149 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { 150 desc->status = status | IRQ_REPLAY; 151 hw_resend_irq(desc->handler,irq); 152 } 153 desc->handler->enable(irq); 154 /* fall-through */ 155 } 156 default: 157 desc->depth--; 158 break; 159 case 0: 160 printk(KERN_ERR "enable_irq() unbalanced from %p\n", 161 __builtin_return_address(0)); 162 } 163 spin_unlock_irqrestore(&desc->lock, flags); 164 } 165 166 int 167 setup_irq(unsigned int irq, struct irqaction * new) 168 { 169 int shared = 0; 170 struct irqaction *old, **p; 171 unsigned long flags; 172 irq_desc_t *desc = irq_desc + irq; 173 174 if (desc->handler == &no_irq_type) 175 return -ENOSYS; 176 177 /* 178 * Some drivers like serial.c use request_irq() heavily, 179 * so we have to be careful not to interfere with a 180 * running system. 181 */ 182 if (new->flags & SA_SAMPLE_RANDOM) { 183 /* 184 * This function might sleep, we want to call it first, 185 * outside of the atomic block. 186 * Yes, this might clear the entropy pool if the wrong 187 * driver is attempted to be loaded, without actually 188 * installing a new handler, but is this really a problem, 189 * only the sysadmin is able to do this. 190 */ 191 rand_initialize_irq(irq); 192 } 193 194 /* 195 * The following block of code has to be executed atomically 196 */ 197 spin_lock_irqsave(&desc->lock,flags); 198 p = &desc->action; 199 if ((old = *p) != NULL) { 200 /* Can't share interrupts unless both agree to */ 201 if (!(old->flags & new->flags & SA_SHIRQ)) { 202 spin_unlock_irqrestore(&desc->lock,flags); 203 return -EBUSY; 204 } 205 206 /* add new interrupt at end of irq queue */ 207 do { 208 p = &old->next; 209 old = *p; 210 } while (old); 211 shared = 1; 212 } 213 214 *p = new; 215 216 if (!shared) { 217 desc->depth = 0; 218 desc->status &= 219 ~(IRQ_DISABLED|IRQ_AUTODETECT|IRQ_WAITING|IRQ_INPROGRESS); 220 desc->handler->startup(irq); 221 } 222 spin_unlock_irqrestore(&desc->lock,flags); 223 224 return 0; 225 } 226 227 static struct proc_dir_entry * root_irq_dir; 228 static struct proc_dir_entry * irq_dir[NR_IRQS]; 229 230 #ifdef CONFIG_SMP 231 static struct proc_dir_entry * smp_affinity_entry[NR_IRQS]; 232 static char irq_user_affinity[NR_IRQS]; 233 static cpumask_t irq_affinity[NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL }; 234 235 static void 236 select_smp_affinity(int irq) 237 { 238 static int last_cpu; 239 int cpu = last_cpu + 1; 240 241 if (! irq_desc[irq].handler->set_affinity || irq_user_affinity[irq]) 242 return; 243 244 while (!cpu_possible(cpu)) 245 cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); 246 last_cpu = cpu; 247 248 irq_affinity[irq] = cpumask_of_cpu(cpu); 249 irq_desc[irq].handler->set_affinity(irq, cpumask_of_cpu(cpu)); 250 } 251 252 static int 253 irq_affinity_read_proc (char *page, char **start, off_t off, 254 int count, int *eof, void *data) 255 { 256 int len = cpumask_scnprintf(page, count, irq_affinity[(long)data]); 257 if (count - len < 2) 258 return -EINVAL; 259 len += sprintf(page + len, "\n"); 260 return len; 261 } 262 263 static int 264 irq_affinity_write_proc(struct file *file, const char __user *buffer, 265 unsigned long count, void *data) 266 { 267 int irq = (long) data, full_count = count, err; 268 cpumask_t new_value; 269 270 if (!irq_desc[irq].handler->set_affinity) 271 return -EIO; 272 273 err = cpumask_parse(buffer, count, new_value); 274 275 /* The special value 0 means release control of the 276 affinity to kernel. */ 277 cpus_and(new_value, new_value, cpu_online_map); 278 if (cpus_empty(new_value)) { 279 irq_user_affinity[irq] = 0; 280 select_smp_affinity(irq); 281 } 282 /* Do not allow disabling IRQs completely - it's a too easy 283 way to make the system unusable accidentally :-) At least 284 one online CPU still has to be targeted. */ 285 else { 286 irq_affinity[irq] = new_value; 287 irq_user_affinity[irq] = 1; 288 irq_desc[irq].handler->set_affinity(irq, new_value); 289 } 290 291 return full_count; 292 } 293 294 #endif /* CONFIG_SMP */ 295 296 #define MAX_NAMELEN 10 297 298 static void 299 register_irq_proc (unsigned int irq) 300 { 301 char name [MAX_NAMELEN]; 302 303 if (!root_irq_dir || (irq_desc[irq].handler == &no_irq_type) || 304 irq_dir[irq]) 305 return; 306 307 memset(name, 0, MAX_NAMELEN); 308 sprintf(name, "%d", irq); 309 310 /* create /proc/irq/1234 */ 311 irq_dir[irq] = proc_mkdir(name, root_irq_dir); 312 313 #ifdef CONFIG_SMP 314 if (irq_desc[irq].handler->set_affinity) { 315 struct proc_dir_entry *entry; 316 /* create /proc/irq/1234/smp_affinity */ 317 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]); 318 319 if (entry) { 320 entry->nlink = 1; 321 entry->data = (void *)(long)irq; 322 entry->read_proc = irq_affinity_read_proc; 323 entry->write_proc = irq_affinity_write_proc; 324 } 325 326 smp_affinity_entry[irq] = entry; 327 } 328 #endif 329 } 330 331 void 332 init_irq_proc (void) 333 { 334 int i; 335 336 /* create /proc/irq */ 337 root_irq_dir = proc_mkdir("irq", NULL); 338 339 #ifdef CONFIG_SMP 340 /* create /proc/irq/prof_cpu_mask */ 341 create_prof_cpu_mask(root_irq_dir); 342 #endif 343 344 /* 345 * Create entries for all existing IRQs. 346 */ 347 for (i = 0; i < ACTUAL_NR_IRQS; i++) { 348 if (irq_desc[i].handler == &no_irq_type) 349 continue; 350 register_irq_proc(i); 351 } 352 } 353 354 int 355 request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *), 356 unsigned long irqflags, const char * devname, void *dev_id) 357 { 358 int retval; 359 struct irqaction * action; 360 361 if (irq >= ACTUAL_NR_IRQS) 362 return -EINVAL; 363 if (!handler) 364 return -EINVAL; 365 366 #if 1 367 /* 368 * Sanity-check: shared interrupts should REALLY pass in 369 * a real dev-ID, otherwise we'll have trouble later trying 370 * to figure out which interrupt is which (messes up the 371 * interrupt freeing logic etc). 372 */ 373 if ((irqflags & SA_SHIRQ) && !dev_id) { 374 printk(KERN_ERR 375 "Bad boy: %s (at %p) called us without a dev_id!\n", 376 devname, __builtin_return_address(0)); 377 } 378 #endif 379 380 action = (struct irqaction *) 381 kmalloc(sizeof(struct irqaction), GFP_KERNEL); 382 if (!action) 383 return -ENOMEM; 384 385 action->handler = handler; 386 action->flags = irqflags; 387 cpus_clear(action->mask); 388 action->name = devname; 389 action->next = NULL; 390 action->dev_id = dev_id; 391 392 #ifdef CONFIG_SMP 393 select_smp_affinity(irq); 394 #endif 395 396 retval = setup_irq(irq, action); 397 if (retval) 398 kfree(action); 399 return retval; 400 } 401 402 EXPORT_SYMBOL(request_irq); 403 404 void 405 free_irq(unsigned int irq, void *dev_id) 406 { 407 irq_desc_t *desc; 408 struct irqaction **p; 409 unsigned long flags; 410 411 if (irq >= ACTUAL_NR_IRQS) { 412 printk(KERN_CRIT "Trying to free IRQ%d\n", irq); 413 return; 414 } 415 416 desc = irq_desc + irq; 417 spin_lock_irqsave(&desc->lock,flags); 418 p = &desc->action; 419 for (;;) { 420 struct irqaction * action = *p; 421 if (action) { 422 struct irqaction **pp = p; 423 p = &action->next; 424 if (action->dev_id != dev_id) 425 continue; 426 427 /* Found - now remove it from the list of entries. */ 428 *pp = action->next; 429 if (!desc->action) { 430 desc->status |= IRQ_DISABLED; 431 desc->handler->shutdown(irq); 432 } 433 spin_unlock_irqrestore(&desc->lock,flags); 434 435 #ifdef CONFIG_SMP 436 /* Wait to make sure it's not being used on 437 another CPU. */ 438 while (desc->status & IRQ_INPROGRESS) 439 barrier(); 440 #endif 441 kfree(action); 442 return; 443 } 444 printk(KERN_ERR "Trying to free free IRQ%d\n",irq); 445 spin_unlock_irqrestore(&desc->lock,flags); 446 return; 447 } 448 } 449 450 EXPORT_SYMBOL(free_irq); 451 452 int 453 show_interrupts(struct seq_file *p, void *v) 454 { 455 #ifdef CONFIG_SMP 456 int j; 457 #endif 458 int i = *(loff_t *) v; 459 struct irqaction * action; 460 unsigned long flags; 461 462 #ifdef CONFIG_SMP 463 if (i == 0) { 464 seq_puts(p, " "); 465 for (i = 0; i < NR_CPUS; i++) 466 if (cpu_online(i)) 467 seq_printf(p, "CPU%d ", i); 468 seq_putc(p, '\n'); 469 } 470 #endif 471 472 if (i < ACTUAL_NR_IRQS) { 473 spin_lock_irqsave(&irq_desc[i].lock, flags); 474 action = irq_desc[i].action; 475 if (!action) 476 goto unlock; 477 seq_printf(p, "%3d: ",i); 478 #ifndef CONFIG_SMP 479 seq_printf(p, "%10u ", kstat_irqs(i)); 480 #else 481 for (j = 0; j < NR_CPUS; j++) 482 if (cpu_online(j)) 483 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 484 #endif 485 seq_printf(p, " %14s", irq_desc[i].handler->typename); 486 seq_printf(p, " %c%s", 487 (action->flags & SA_INTERRUPT)?'+':' ', 488 action->name); 489 490 for (action=action->next; action; action = action->next) { 491 seq_printf(p, ", %c%s", 492 (action->flags & SA_INTERRUPT)?'+':' ', 493 action->name); 494 } 495 496 seq_putc(p, '\n'); 497 unlock: 498 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 499 } else if (i == ACTUAL_NR_IRQS) { 500 #ifdef CONFIG_SMP 501 seq_puts(p, "IPI: "); 502 for (i = 0; i < NR_CPUS; i++) 503 if (cpu_online(i)) 504 seq_printf(p, "%10lu ", cpu_data[i].ipi_count); 505 seq_putc(p, '\n'); 506 #endif 507 seq_printf(p, "ERR: %10lu\n", irq_err_count); 508 } 509 return 0; 510 } 511 512 513 /* 514 * handle_irq handles all normal device IRQ's (the special 515 * SMP cross-CPU interrupts have their own specific 516 * handlers). 517 */ 518 519 #define MAX_ILLEGAL_IRQS 16 520 521 void 522 handle_irq(int irq, struct pt_regs * regs) 523 { 524 /* 525 * We ack quickly, we don't want the irq controller 526 * thinking we're snobs just because some other CPU has 527 * disabled global interrupts (we have already done the 528 * INT_ACK cycles, it's too late to try to pretend to the 529 * controller that we aren't taking the interrupt). 530 * 531 * 0 return value means that this irq is already being 532 * handled by some other CPU. (or is disabled) 533 */ 534 int cpu = smp_processor_id(); 535 irq_desc_t *desc = irq_desc + irq; 536 struct irqaction * action; 537 unsigned int status; 538 static unsigned int illegal_count=0; 539 540 if ((unsigned) irq > ACTUAL_NR_IRQS && illegal_count < MAX_ILLEGAL_IRQS ) { 541 irq_err_count++; 542 illegal_count++; 543 printk(KERN_CRIT "device_interrupt: invalid interrupt %d\n", 544 irq); 545 return; 546 } 547 548 irq_enter(); 549 kstat_cpu(cpu).irqs[irq]++; 550 spin_lock_irq(&desc->lock); /* mask also the higher prio events */ 551 desc->handler->ack(irq); 552 /* 553 * REPLAY is when Linux resends an IRQ that was dropped earlier. 554 * WAITING is used by probe to mark irqs that are being tested. 555 */ 556 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); 557 status |= IRQ_PENDING; /* we _want_ to handle it */ 558 559 /* 560 * If the IRQ is disabled for whatever reason, we cannot 561 * use the action we have. 562 */ 563 action = NULL; 564 if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) { 565 action = desc->action; 566 status &= ~IRQ_PENDING; /* we commit to handling */ 567 status |= IRQ_INPROGRESS; /* we are handling it */ 568 } 569 desc->status = status; 570 571 /* 572 * If there is no IRQ handler or it was disabled, exit early. 573 * Since we set PENDING, if another processor is handling 574 * a different instance of this same irq, the other processor 575 * will take care of it. 576 */ 577 if (!action) 578 goto out; 579 580 /* 581 * Edge triggered interrupts need to remember pending events. 582 * This applies to any hw interrupts that allow a second 583 * instance of the same irq to arrive while we are in handle_irq 584 * or in the handler. But the code here only handles the _second_ 585 * instance of the irq, not the third or fourth. So it is mostly 586 * useful for irq hardware that does not mask cleanly in an 587 * SMP environment. 588 */ 589 for (;;) { 590 spin_unlock(&desc->lock); 591 handle_IRQ_event(irq, regs, action); 592 spin_lock(&desc->lock); 593 594 if (!(desc->status & IRQ_PENDING) 595 || (desc->status & IRQ_LEVEL)) 596 break; 597 desc->status &= ~IRQ_PENDING; 598 } 599 desc->status &= ~IRQ_INPROGRESS; 600 out: 601 /* 602 * The ->end() handler has to deal with interrupts which got 603 * disabled while the handler was running. 604 */ 605 desc->handler->end(irq); 606 spin_unlock(&desc->lock); 607 608 irq_exit(); 609 } 610 611 /* 612 * IRQ autodetection code.. 613 * 614 * This depends on the fact that any interrupt that 615 * comes in on to an unassigned handler will get stuck 616 * with "IRQ_WAITING" cleared and the interrupt 617 * disabled. 618 */ 619 unsigned long 620 probe_irq_on(void) 621 { 622 int i; 623 irq_desc_t *desc; 624 unsigned long delay; 625 unsigned long val; 626 627 /* Something may have generated an irq long ago and we want to 628 flush such a longstanding irq before considering it as spurious. */ 629 for (i = NR_IRQS-1; i >= 0; i--) { 630 desc = irq_desc + i; 631 632 spin_lock_irq(&desc->lock); 633 if (!irq_desc[i].action) 634 irq_desc[i].handler->startup(i); 635 spin_unlock_irq(&desc->lock); 636 } 637 638 /* Wait for longstanding interrupts to trigger. */ 639 for (delay = jiffies + HZ/50; time_after(delay, jiffies); ) 640 /* about 20ms delay */ barrier(); 641 642 /* enable any unassigned irqs (we must startup again here because 643 if a longstanding irq happened in the previous stage, it may have 644 masked itself) first, enable any unassigned irqs. */ 645 for (i = NR_IRQS-1; i >= 0; i--) { 646 desc = irq_desc + i; 647 648 spin_lock_irq(&desc->lock); 649 if (!desc->action) { 650 desc->status |= IRQ_AUTODETECT | IRQ_WAITING; 651 if (desc->handler->startup(i)) 652 desc->status |= IRQ_PENDING; 653 } 654 spin_unlock_irq(&desc->lock); 655 } 656 657 /* 658 * Wait for spurious interrupts to trigger 659 */ 660 for (delay = jiffies + HZ/10; time_after(delay, jiffies); ) 661 /* about 100ms delay */ barrier(); 662 663 /* 664 * Now filter out any obviously spurious interrupts 665 */ 666 val = 0; 667 for (i=0; i<NR_IRQS; i++) { 668 irq_desc_t *desc = irq_desc + i; 669 unsigned int status; 670 671 spin_lock_irq(&desc->lock); 672 status = desc->status; 673 674 if (status & IRQ_AUTODETECT) { 675 /* It triggered already - consider it spurious. */ 676 if (!(status & IRQ_WAITING)) { 677 desc->status = status & ~IRQ_AUTODETECT; 678 desc->handler->shutdown(i); 679 } else 680 if (i < 32) 681 val |= 1 << i; 682 } 683 spin_unlock_irq(&desc->lock); 684 } 685 686 return val; 687 } 688 689 EXPORT_SYMBOL(probe_irq_on); 690 691 /* 692 * Return a mask of triggered interrupts (this 693 * can handle only legacy ISA interrupts). 694 */ 695 unsigned int 696 probe_irq_mask(unsigned long val) 697 { 698 int i; 699 unsigned int mask; 700 701 mask = 0; 702 for (i = 0; i < NR_IRQS; i++) { 703 irq_desc_t *desc = irq_desc + i; 704 unsigned int status; 705 706 spin_lock_irq(&desc->lock); 707 status = desc->status; 708 709 if (status & IRQ_AUTODETECT) { 710 /* We only react to ISA interrupts */ 711 if (!(status & IRQ_WAITING)) { 712 if (i < 16) 713 mask |= 1 << i; 714 } 715 716 desc->status = status & ~IRQ_AUTODETECT; 717 desc->handler->shutdown(i); 718 } 719 spin_unlock_irq(&desc->lock); 720 } 721 722 return mask & val; 723 } 724 725 /* 726 * Get the result of the IRQ probe.. A negative result means that 727 * we have several candidates (but we return the lowest-numbered 728 * one). 729 */ 730 731 int 732 probe_irq_off(unsigned long val) 733 { 734 int i, irq_found, nr_irqs; 735 736 nr_irqs = 0; 737 irq_found = 0; 738 for (i=0; i<NR_IRQS; i++) { 739 irq_desc_t *desc = irq_desc + i; 740 unsigned int status; 741 742 spin_lock_irq(&desc->lock); 743 status = desc->status; 744 745 if (status & IRQ_AUTODETECT) { 746 if (!(status & IRQ_WAITING)) { 747 if (!nr_irqs) 748 irq_found = i; 749 nr_irqs++; 750 } 751 desc->status = status & ~IRQ_AUTODETECT; 752 desc->handler->shutdown(i); 753 } 754 spin_unlock_irq(&desc->lock); 755 } 756 757 if (nr_irqs > 1) 758 irq_found = -irq_found; 759 return irq_found; 760 } 761 762 EXPORT_SYMBOL(probe_irq_off); 763 764 #ifdef CONFIG_SMP 765 void synchronize_irq(unsigned int irq) 766 { 767 /* is there anything to synchronize with? */ 768 if (!irq_desc[irq].action) 769 return; 770 771 while (irq_desc[irq].status & IRQ_INPROGRESS) 772 barrier(); 773 } 774 #endif 775