1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Code to handle x86 style IRQs plus some generic interrupt stuff. 7 * 8 * Copyright (C) 1992 Linus Torvalds 9 * Copyright (C) 1994 - 2000 Ralf Baechle 10 */ 11 #include <linux/kernel.h> 12 #include <linux/delay.h> 13 #include <linux/init.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel_stat.h> 16 #include <linux/proc_fs.h> 17 #include <linux/mm.h> 18 #include <linux/random.h> 19 #include <linux/sched.h> 20 #include <linux/seq_file.h> 21 #include <linux/kallsyms.h> 22 #include <linux/kgdb.h> 23 #include <linux/ftrace.h> 24 25 #include <linux/atomic.h> 26 #include <asm/system.h> 27 #include <asm/uaccess.h> 28 29 #ifdef CONFIG_KGDB 30 int kgdb_early_setup; 31 #endif 32 33 static unsigned long irq_map[NR_IRQS / BITS_PER_LONG]; 34 35 int allocate_irqno(void) 36 { 37 int irq; 38 39 again: 40 irq = find_first_zero_bit(irq_map, NR_IRQS); 41 42 if (irq >= NR_IRQS) 43 return -ENOSPC; 44 45 if (test_and_set_bit(irq, irq_map)) 46 goto again; 47 48 return irq; 49 } 50 51 /* 52 * Allocate the 16 legacy interrupts for i8259 devices. This happens early 53 * in the kernel initialization so treating allocation failure as BUG() is 54 * ok. 55 */ 56 void __init alloc_legacy_irqno(void) 57 { 58 int i; 59 60 for (i = 0; i <= 16; i++) 61 BUG_ON(test_and_set_bit(i, irq_map)); 62 } 63 64 void free_irqno(unsigned int irq) 65 { 66 smp_mb__before_clear_bit(); 67 clear_bit(irq, irq_map); 68 smp_mb__after_clear_bit(); 69 } 70 71 /* 72 * 'what should we do if we get a hw irq event on an illegal vector'. 73 * each architecture has to answer this themselves. 74 */ 75 void ack_bad_irq(unsigned int irq) 76 { 77 smtc_im_ack_irq(irq); 78 printk("unexpected IRQ # %d\n", irq); 79 } 80 81 atomic_t irq_err_count; 82 83 int arch_show_interrupts(struct seq_file *p, int prec) 84 { 85 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); 86 return 0; 87 } 88 89 asmlinkage void spurious_interrupt(void) 90 { 91 atomic_inc(&irq_err_count); 92 } 93 94 void __init init_IRQ(void) 95 { 96 int i; 97 98 #ifdef CONFIG_KGDB 99 if (kgdb_early_setup) 100 return; 101 #endif 102 103 for (i = 0; i < NR_IRQS; i++) 104 irq_set_noprobe(i); 105 106 arch_init_irq(); 107 108 #ifdef CONFIG_KGDB 109 if (!kgdb_early_setup) 110 kgdb_early_setup = 1; 111 #endif 112 } 113 114 #ifdef DEBUG_STACKOVERFLOW 115 static inline void check_stack_overflow(void) 116 { 117 unsigned long sp; 118 119 __asm__ __volatile__("move %0, $sp" : "=r" (sp)); 120 sp &= THREAD_MASK; 121 122 /* 123 * Check for stack overflow: is there less than STACK_WARN free? 124 * STACK_WARN is defined as 1/8 of THREAD_SIZE by default. 125 */ 126 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { 127 printk("do_IRQ: stack overflow: %ld\n", 128 sp - sizeof(struct thread_info)); 129 dump_stack(); 130 } 131 } 132 #else 133 static inline void check_stack_overflow(void) {} 134 #endif 135 136 137 /* 138 * do_IRQ handles all normal device IRQ's (the special 139 * SMP cross-CPU interrupts have their own specific 140 * handlers). 141 */ 142 void __irq_entry do_IRQ(unsigned int irq) 143 { 144 irq_enter(); 145 check_stack_overflow(); 146 if (!smtc_handle_on_other_cpu(irq)) 147 generic_handle_irq(irq); 148 irq_exit(); 149 } 150 151 #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 152 /* 153 * To avoid inefficient and in some cases pathological re-checking of 154 * IRQ affinity, we have this variant that skips the affinity check. 155 */ 156 157 void __irq_entry do_IRQ_no_affinity(unsigned int irq) 158 { 159 irq_enter(); 160 smtc_im_backstop(irq); 161 generic_handle_irq(irq); 162 irq_exit(); 163 } 164 165 #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 166