1 /* 2 * linux/kernel/irq/handle.c 3 * 4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 6 * 7 * This file contains the core interrupt handling code. 8 * 9 * Detailed information is available in Documentation/DocBook/genericirq 10 * 11 */ 12 13 #include <linux/irq.h> 14 #include <linux/module.h> 15 #include <linux/random.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel_stat.h> 18 19 #include "internals.h" 20 21 /** 22 * handle_bad_irq - handle spurious and unhandled irqs 23 * @irq: the interrupt number 24 * @desc: description of the interrupt 25 * @regs: pointer to a register structure 26 * 27 * Handles spurious and unhandled IRQ's. It also prints a debugmessage. 28 */ 29 void fastcall 30 handle_bad_irq(unsigned int irq, struct irq_desc *desc) 31 { 32 print_irq_desc(irq, desc); 33 kstat_this_cpu.irqs[irq]++; 34 ack_bad_irq(irq); 35 } 36 37 /* 38 * Linux has a controller-independent interrupt architecture. 39 * Every controller has a 'controller-template', that is used 40 * by the main code to do the right thing. Each driver-visible 41 * interrupt source is transparently wired to the appropriate 42 * controller. Thus drivers need not be aware of the 43 * interrupt-controller. 44 * 45 * The code is designed to be easily extended with new/different 46 * interrupt controllers, without having to do assembly magic or 47 * having to touch the generic code. 48 * 49 * Controller mappings for all interrupt sources: 50 */ 51 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned = { 52 [0 ... NR_IRQS-1] = { 53 .status = IRQ_DISABLED, 54 .chip = &no_irq_chip, 55 .handle_irq = handle_bad_irq, 56 .depth = 1, 57 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), 58 #ifdef CONFIG_SMP 59 .affinity = CPU_MASK_ALL 60 #endif 61 } 62 }; 63 64 /* 65 * What should we do if we get a hw irq event on an illegal vector? 66 * Each architecture has to answer this themself. 67 */ 68 static void ack_bad(unsigned int irq) 69 { 70 print_irq_desc(irq, irq_desc + irq); 71 ack_bad_irq(irq); 72 } 73 74 /* 75 * NOP functions 76 */ 77 static void noop(unsigned int irq) 78 { 79 } 80 81 static unsigned int noop_ret(unsigned int irq) 82 { 83 return 0; 84 } 85 86 /* 87 * Generic no controller implementation 88 */ 89 struct irq_chip no_irq_chip = { 90 .name = "none", 91 .startup = noop_ret, 92 .shutdown = noop, 93 .enable = noop, 94 .disable = noop, 95 .ack = ack_bad, 96 .end = noop, 97 }; 98 99 /* 100 * Generic dummy implementation which can be used for 101 * real dumb interrupt sources 102 */ 103 struct irq_chip dummy_irq_chip = { 104 .name = "dummy", 105 .startup = noop_ret, 106 .shutdown = noop, 107 .enable = noop, 108 .disable = noop, 109 .ack = noop, 110 .mask = noop, 111 .unmask = noop, 112 .end = noop, 113 }; 114 115 /* 116 * Special, empty irq handler: 117 */ 118 irqreturn_t no_action(int cpl, void *dev_id) 119 { 120 return IRQ_NONE; 121 } 122 123 /** 124 * handle_IRQ_event - irq action chain handler 125 * @irq: the interrupt number 126 * @action: the interrupt action chain for this irq 127 * 128 * Handles the action chain of an irq event 129 */ 130 irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) 131 { 132 irqreturn_t ret, retval = IRQ_NONE; 133 unsigned int status = 0; 134 135 handle_dynamic_tick(action); 136 137 if (!(action->flags & IRQF_DISABLED)) 138 local_irq_enable_in_hardirq(); 139 140 do { 141 ret = action->handler(irq, action->dev_id); 142 if (ret == IRQ_HANDLED) 143 status |= action->flags; 144 retval |= ret; 145 action = action->next; 146 } while (action); 147 148 if (status & IRQF_SAMPLE_RANDOM) 149 add_interrupt_randomness(irq); 150 local_irq_disable(); 151 152 return retval; 153 } 154 155 #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ 156 /** 157 * __do_IRQ - original all in one highlevel IRQ handler 158 * @irq: the interrupt number 159 * 160 * __do_IRQ handles all normal device IRQ's (the special 161 * SMP cross-CPU interrupts have their own specific 162 * handlers). 163 * 164 * This is the original x86 implementation which is used for every 165 * interrupt type. 166 */ 167 fastcall unsigned int __do_IRQ(unsigned int irq) 168 { 169 struct irq_desc *desc = irq_desc + irq; 170 struct irqaction *action; 171 unsigned int status; 172 173 kstat_this_cpu.irqs[irq]++; 174 if (CHECK_IRQ_PER_CPU(desc->status)) { 175 irqreturn_t action_ret; 176 177 /* 178 * No locking required for CPU-local interrupts: 179 */ 180 if (desc->chip->ack) 181 desc->chip->ack(irq); 182 action_ret = handle_IRQ_event(irq, desc->action); 183 desc->chip->end(irq); 184 return 1; 185 } 186 187 spin_lock(&desc->lock); 188 if (desc->chip->ack) 189 desc->chip->ack(irq); 190 /* 191 * REPLAY is when Linux resends an IRQ that was dropped earlier 192 * WAITING is used by probe to mark irqs that are being tested 193 */ 194 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); 195 status |= IRQ_PENDING; /* we _want_ to handle it */ 196 197 /* 198 * If the IRQ is disabled for whatever reason, we cannot 199 * use the action we have. 200 */ 201 action = NULL; 202 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) { 203 action = desc->action; 204 status &= ~IRQ_PENDING; /* we commit to handling */ 205 status |= IRQ_INPROGRESS; /* we are handling it */ 206 } 207 desc->status = status; 208 209 /* 210 * If there is no IRQ handler or it was disabled, exit early. 211 * Since we set PENDING, if another processor is handling 212 * a different instance of this same irq, the other processor 213 * will take care of it. 214 */ 215 if (unlikely(!action)) 216 goto out; 217 218 /* 219 * Edge triggered interrupts need to remember 220 * pending events. 221 * This applies to any hw interrupts that allow a second 222 * instance of the same irq to arrive while we are in do_IRQ 223 * or in the handler. But the code here only handles the _second_ 224 * instance of the irq, not the third or fourth. So it is mostly 225 * useful for irq hardware that does not mask cleanly in an 226 * SMP environment. 227 */ 228 for (;;) { 229 irqreturn_t action_ret; 230 231 spin_unlock(&desc->lock); 232 233 action_ret = handle_IRQ_event(irq, action); 234 if (!noirqdebug) 235 note_interrupt(irq, desc, action_ret); 236 237 spin_lock(&desc->lock); 238 if (likely(!(desc->status & IRQ_PENDING))) 239 break; 240 desc->status &= ~IRQ_PENDING; 241 } 242 desc->status &= ~IRQ_INPROGRESS; 243 244 out: 245 /* 246 * The ->end() handler has to deal with interrupts which got 247 * disabled while the handler was running. 248 */ 249 desc->chip->end(irq); 250 spin_unlock(&desc->lock); 251 252 return 1; 253 } 254 #endif 255 256 #ifdef CONFIG_TRACE_IRQFLAGS 257 258 /* 259 * lockdep: we want to handle all irq_desc locks as a single lock-class: 260 */ 261 static struct lock_class_key irq_desc_lock_class; 262 263 void early_init_irq_lock_class(void) 264 { 265 int i; 266 267 for (i = 0; i < NR_IRQS; i++) 268 lockdep_set_class(&irq_desc[i].lock, &irq_desc_lock_class); 269 } 270 271 #endif 272