1 /* 2 * linux/kernel/irq/handle.c 3 * 4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 6 * 7 * This file contains the core interrupt handling code. 8 * 9 * Detailed information is available in Documentation/DocBook/genericirq 10 * 11 */ 12 13 #include <linux/irq.h> 14 #include <linux/module.h> 15 #include <linux/random.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel_stat.h> 18 #include <linux/rculist.h> 19 #include <linux/hash.h> 20 21 #include "internals.h" 22 23 /* 24 * lockdep: we want to handle all irq_desc locks as a single lock-class: 25 */ 26 struct lock_class_key irq_desc_lock_class; 27 28 /** 29 * handle_bad_irq - handle spurious and unhandled irqs 30 * @irq: the interrupt number 31 * @desc: description of the interrupt 32 * 33 * Handles spurious and unhandled IRQ's. It also prints a debugmessage. 34 */ 35 void handle_bad_irq(unsigned int irq, struct irq_desc *desc) 36 { 37 print_irq_desc(irq, desc); 38 kstat_incr_irqs_this_cpu(irq, desc); 39 ack_bad_irq(irq); 40 } 41 42 /* 43 * Linux has a controller-independent interrupt architecture. 44 * Every controller has a 'controller-template', that is used 45 * by the main code to do the right thing. Each driver-visible 46 * interrupt source is transparently wired to the appropriate 47 * controller. Thus drivers need not be aware of the 48 * interrupt-controller. 49 * 50 * The code is designed to be easily extended with new/different 51 * interrupt controllers, without having to do assembly magic or 52 * having to touch the generic code. 53 * 54 * Controller mappings for all interrupt sources: 55 */ 56 int nr_irqs = NR_IRQS; 57 EXPORT_SYMBOL_GPL(nr_irqs); 58 59 #ifdef CONFIG_SPARSE_IRQ 60 static struct irq_desc irq_desc_init = { 61 .irq = -1, 62 .status = IRQ_DISABLED, 63 .chip = &no_irq_chip, 64 .handle_irq = handle_bad_irq, 65 .depth = 1, 66 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 67 #ifdef CONFIG_SMP 68 .affinity = CPU_MASK_ALL 69 #endif 70 }; 71 72 void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) 73 { 74 unsigned long bytes; 75 char *ptr; 76 int node; 77 78 /* Compute how many bytes we need per irq and allocate them */ 79 bytes = nr * sizeof(unsigned int); 80 81 node = cpu_to_node(cpu); 82 ptr = kzalloc_node(bytes, GFP_ATOMIC, node); 83 printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", cpu, node); 84 85 if (ptr) 86 desc->kstat_irqs = (unsigned int *)ptr; 87 } 88 89 static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) 90 { 91 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); 92 93 spin_lock_init(&desc->lock); 94 desc->irq = irq; 95 #ifdef CONFIG_SMP 96 desc->cpu = cpu; 97 #endif 98 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 99 init_kstat_irqs(desc, cpu, nr_cpu_ids); 100 if (!desc->kstat_irqs) { 101 printk(KERN_ERR "can not alloc kstat_irqs\n"); 102 BUG_ON(1); 103 } 104 arch_init_chip_data(desc, cpu); 105 } 106 107 /* 108 * Protect the sparse_irqs: 109 */ 110 DEFINE_SPINLOCK(sparse_irq_lock); 111 112 struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly; 113 114 static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { 115 [0 ... NR_IRQS_LEGACY-1] = { 116 .irq = -1, 117 .status = IRQ_DISABLED, 118 .chip = &no_irq_chip, 119 .handle_irq = handle_bad_irq, 120 .depth = 1, 121 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 122 #ifdef CONFIG_SMP 123 .affinity = CPU_MASK_ALL 124 #endif 125 } 126 }; 127 128 /* FIXME: use bootmem alloc ...*/ 129 static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS]; 130 131 int __init early_irq_init(void) 132 { 133 struct irq_desc *desc; 134 int legacy_count; 135 int i; 136 137 desc = irq_desc_legacy; 138 legacy_count = ARRAY_SIZE(irq_desc_legacy); 139 140 for (i = 0; i < legacy_count; i++) { 141 desc[i].irq = i; 142 desc[i].kstat_irqs = kstat_irqs_legacy[i]; 143 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 144 145 irq_desc_ptrs[i] = desc + i; 146 } 147 148 for (i = legacy_count; i < NR_IRQS; i++) 149 irq_desc_ptrs[i] = NULL; 150 151 return arch_early_irq_init(); 152 } 153 154 struct irq_desc *irq_to_desc(unsigned int irq) 155 { 156 return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL; 157 } 158 159 struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) 160 { 161 struct irq_desc *desc; 162 unsigned long flags; 163 int node; 164 165 if (irq >= NR_IRQS) { 166 printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n", 167 irq, NR_IRQS); 168 WARN_ON(1); 169 return NULL; 170 } 171 172 desc = irq_desc_ptrs[irq]; 173 if (desc) 174 return desc; 175 176 spin_lock_irqsave(&sparse_irq_lock, flags); 177 178 /* We have to check it to avoid races with another CPU */ 179 desc = irq_desc_ptrs[irq]; 180 if (desc) 181 goto out_unlock; 182 183 node = cpu_to_node(cpu); 184 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); 185 printk(KERN_DEBUG " alloc irq_desc for %d on cpu %d node %d\n", 186 irq, cpu, node); 187 if (!desc) { 188 printk(KERN_ERR "can not alloc irq_desc\n"); 189 BUG_ON(1); 190 } 191 init_one_irq_desc(irq, desc, cpu); 192 193 irq_desc_ptrs[irq] = desc; 194 195 out_unlock: 196 spin_unlock_irqrestore(&sparse_irq_lock, flags); 197 198 return desc; 199 } 200 201 #else /* !CONFIG_SPARSE_IRQ */ 202 203 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { 204 [0 ... NR_IRQS-1] = { 205 .status = IRQ_DISABLED, 206 .chip = &no_irq_chip, 207 .handle_irq = handle_bad_irq, 208 .depth = 1, 209 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), 210 #ifdef CONFIG_SMP 211 .affinity = CPU_MASK_ALL 212 #endif 213 } 214 }; 215 216 int __init early_irq_init(void) 217 { 218 struct irq_desc *desc; 219 int count; 220 int i; 221 222 desc = irq_desc; 223 count = ARRAY_SIZE(irq_desc); 224 225 for (i = 0; i < count; i++) 226 desc[i].irq = i; 227 228 return arch_early_irq_init(); 229 } 230 231 struct irq_desc *irq_to_desc(unsigned int irq) 232 { 233 return (irq < NR_IRQS) ? irq_desc + irq : NULL; 234 } 235 236 struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) 237 { 238 return irq_to_desc(irq); 239 } 240 #endif /* !CONFIG_SPARSE_IRQ */ 241 242 /* 243 * What should we do if we get a hw irq event on an illegal vector? 244 * Each architecture has to answer this themself. 245 */ 246 static void ack_bad(unsigned int irq) 247 { 248 struct irq_desc *desc = irq_to_desc(irq); 249 250 print_irq_desc(irq, desc); 251 ack_bad_irq(irq); 252 } 253 254 /* 255 * NOP functions 256 */ 257 static void noop(unsigned int irq) 258 { 259 } 260 261 static unsigned int noop_ret(unsigned int irq) 262 { 263 return 0; 264 } 265 266 /* 267 * Generic no controller implementation 268 */ 269 struct irq_chip no_irq_chip = { 270 .name = "none", 271 .startup = noop_ret, 272 .shutdown = noop, 273 .enable = noop, 274 .disable = noop, 275 .ack = ack_bad, 276 .end = noop, 277 }; 278 279 /* 280 * Generic dummy implementation which can be used for 281 * real dumb interrupt sources 282 */ 283 struct irq_chip dummy_irq_chip = { 284 .name = "dummy", 285 .startup = noop_ret, 286 .shutdown = noop, 287 .enable = noop, 288 .disable = noop, 289 .ack = noop, 290 .mask = noop, 291 .unmask = noop, 292 .end = noop, 293 }; 294 295 /* 296 * Special, empty irq handler: 297 */ 298 irqreturn_t no_action(int cpl, void *dev_id) 299 { 300 return IRQ_NONE; 301 } 302 303 /** 304 * handle_IRQ_event - irq action chain handler 305 * @irq: the interrupt number 306 * @action: the interrupt action chain for this irq 307 * 308 * Handles the action chain of an irq event 309 */ 310 irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) 311 { 312 irqreturn_t ret, retval = IRQ_NONE; 313 unsigned int status = 0; 314 315 if (!(action->flags & IRQF_DISABLED)) 316 local_irq_enable_in_hardirq(); 317 318 do { 319 ret = action->handler(irq, action->dev_id); 320 if (ret == IRQ_HANDLED) 321 status |= action->flags; 322 retval |= ret; 323 action = action->next; 324 } while (action); 325 326 if (status & IRQF_SAMPLE_RANDOM) 327 add_interrupt_randomness(irq); 328 local_irq_disable(); 329 330 return retval; 331 } 332 333 #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ 334 /** 335 * __do_IRQ - original all in one highlevel IRQ handler 336 * @irq: the interrupt number 337 * 338 * __do_IRQ handles all normal device IRQ's (the special 339 * SMP cross-CPU interrupts have their own specific 340 * handlers). 341 * 342 * This is the original x86 implementation which is used for every 343 * interrupt type. 344 */ 345 unsigned int __do_IRQ(unsigned int irq) 346 { 347 struct irq_desc *desc = irq_to_desc(irq); 348 struct irqaction *action; 349 unsigned int status; 350 351 kstat_incr_irqs_this_cpu(irq, desc); 352 353 if (CHECK_IRQ_PER_CPU(desc->status)) { 354 irqreturn_t action_ret; 355 356 /* 357 * No locking required for CPU-local interrupts: 358 */ 359 if (desc->chip->ack) { 360 desc->chip->ack(irq); 361 /* get new one */ 362 desc = irq_remap_to_desc(irq, desc); 363 } 364 if (likely(!(desc->status & IRQ_DISABLED))) { 365 action_ret = handle_IRQ_event(irq, desc->action); 366 if (!noirqdebug) 367 note_interrupt(irq, desc, action_ret); 368 } 369 desc->chip->end(irq); 370 return 1; 371 } 372 373 spin_lock(&desc->lock); 374 if (desc->chip->ack) { 375 desc->chip->ack(irq); 376 desc = irq_remap_to_desc(irq, desc); 377 } 378 /* 379 * REPLAY is when Linux resends an IRQ that was dropped earlier 380 * WAITING is used by probe to mark irqs that are being tested 381 */ 382 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); 383 status |= IRQ_PENDING; /* we _want_ to handle it */ 384 385 /* 386 * If the IRQ is disabled for whatever reason, we cannot 387 * use the action we have. 388 */ 389 action = NULL; 390 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) { 391 action = desc->action; 392 status &= ~IRQ_PENDING; /* we commit to handling */ 393 status |= IRQ_INPROGRESS; /* we are handling it */ 394 } 395 desc->status = status; 396 397 /* 398 * If there is no IRQ handler or it was disabled, exit early. 399 * Since we set PENDING, if another processor is handling 400 * a different instance of this same irq, the other processor 401 * will take care of it. 402 */ 403 if (unlikely(!action)) 404 goto out; 405 406 /* 407 * Edge triggered interrupts need to remember 408 * pending events. 409 * This applies to any hw interrupts that allow a second 410 * instance of the same irq to arrive while we are in do_IRQ 411 * or in the handler. But the code here only handles the _second_ 412 * instance of the irq, not the third or fourth. So it is mostly 413 * useful for irq hardware that does not mask cleanly in an 414 * SMP environment. 415 */ 416 for (;;) { 417 irqreturn_t action_ret; 418 419 spin_unlock(&desc->lock); 420 421 action_ret = handle_IRQ_event(irq, action); 422 if (!noirqdebug) 423 note_interrupt(irq, desc, action_ret); 424 425 spin_lock(&desc->lock); 426 if (likely(!(desc->status & IRQ_PENDING))) 427 break; 428 desc->status &= ~IRQ_PENDING; 429 } 430 desc->status &= ~IRQ_INPROGRESS; 431 432 out: 433 /* 434 * The ->end() handler has to deal with interrupts which got 435 * disabled while the handler was running. 436 */ 437 desc->chip->end(irq); 438 spin_unlock(&desc->lock); 439 440 return 1; 441 } 442 #endif 443 444 void early_init_irq_lock_class(void) 445 { 446 struct irq_desc *desc; 447 int i; 448 449 for_each_irq_desc(i, desc) { 450 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 451 } 452 } 453 454 #ifdef CONFIG_SPARSE_IRQ 455 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) 456 { 457 struct irq_desc *desc = irq_to_desc(irq); 458 return desc ? desc->kstat_irqs[cpu] : 0; 459 } 460 #endif 461 EXPORT_SYMBOL(kstat_irqs_cpu); 462 463