1 /* 2 * linux/kernel/irq/handle.c 3 * 4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 6 * 7 * This file contains the core interrupt handling code. 8 * 9 * Detailed information is available in Documentation/DocBook/genericirq 10 * 11 */ 12 13 #include <linux/irq.h> 14 #include <linux/sched.h> 15 #include <linux/slab.h> 16 #include <linux/module.h> 17 #include <linux/random.h> 18 #include <linux/interrupt.h> 19 #include <linux/kernel_stat.h> 20 #include <linux/rculist.h> 21 #include <linux/hash.h> 22 #include <linux/radix-tree.h> 23 #include <trace/events/irq.h> 24 25 #include "internals.h" 26 27 /* 28 * lockdep: we want to handle all irq_desc locks as a single lock-class: 29 */ 30 struct lock_class_key irq_desc_lock_class; 31 32 /** 33 * handle_bad_irq - handle spurious and unhandled irqs 34 * @irq: the interrupt number 35 * @desc: description of the interrupt 36 * 37 * Handles spurious and unhandled IRQ's. It also prints a debugmessage. 38 */ 39 void handle_bad_irq(unsigned int irq, struct irq_desc *desc) 40 { 41 print_irq_desc(irq, desc); 42 kstat_incr_irqs_this_cpu(irq, desc); 43 ack_bad_irq(irq); 44 } 45 46 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) 47 static void __init init_irq_default_affinity(void) 48 { 49 alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); 50 cpumask_setall(irq_default_affinity); 51 } 52 #else 53 static void __init init_irq_default_affinity(void) 54 { 55 } 56 #endif 57 58 /* 59 * Linux has a controller-independent interrupt architecture. 60 * Every controller has a 'controller-template', that is used 61 * by the main code to do the right thing. Each driver-visible 62 * interrupt source is transparently wired to the appropriate 63 * controller. Thus drivers need not be aware of the 64 * interrupt-controller. 65 * 66 * The code is designed to be easily extended with new/different 67 * interrupt controllers, without having to do assembly magic or 68 * having to touch the generic code. 69 * 70 * Controller mappings for all interrupt sources: 71 */ 72 int nr_irqs = NR_IRQS; 73 EXPORT_SYMBOL_GPL(nr_irqs); 74 75 #ifdef CONFIG_SPARSE_IRQ 76 77 static struct irq_desc irq_desc_init = { 78 .irq = -1, 79 .status = IRQ_DISABLED, 80 .chip = &no_irq_chip, 81 .handle_irq = handle_bad_irq, 82 .depth = 1, 83 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 84 }; 85 86 void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr) 87 { 88 void *ptr; 89 90 ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), 91 GFP_ATOMIC, node); 92 93 /* 94 * don't overwite if can not get new one 95 * init_copy_kstat_irqs() could still use old one 96 */ 97 if (ptr) { 98 printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node); 99 desc->kstat_irqs = ptr; 100 } 101 } 102 103 static void init_one_irq_desc(int irq, struct irq_desc *desc, int node) 104 { 105 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); 106 107 raw_spin_lock_init(&desc->lock); 108 desc->irq = irq; 109 #ifdef CONFIG_SMP 110 desc->node = node; 111 #endif 112 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 113 init_kstat_irqs(desc, node, nr_cpu_ids); 114 if (!desc->kstat_irqs) { 115 printk(KERN_ERR "can not alloc kstat_irqs\n"); 116 BUG_ON(1); 117 } 118 if (!alloc_desc_masks(desc, node, false)) { 119 printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); 120 BUG_ON(1); 121 } 122 init_desc_masks(desc); 123 arch_init_chip_data(desc, node); 124 } 125 126 /* 127 * Protect the sparse_irqs: 128 */ 129 DEFINE_RAW_SPINLOCK(sparse_irq_lock); 130 131 static RADIX_TREE(irq_desc_tree, GFP_ATOMIC); 132 133 static void set_irq_desc(unsigned int irq, struct irq_desc *desc) 134 { 135 radix_tree_insert(&irq_desc_tree, irq, desc); 136 } 137 138 struct irq_desc *irq_to_desc(unsigned int irq) 139 { 140 return radix_tree_lookup(&irq_desc_tree, irq); 141 } 142 143 void replace_irq_desc(unsigned int irq, struct irq_desc *desc) 144 { 145 void **ptr; 146 147 ptr = radix_tree_lookup_slot(&irq_desc_tree, irq); 148 if (ptr) 149 radix_tree_replace_slot(ptr, desc); 150 } 151 152 static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { 153 [0 ... NR_IRQS_LEGACY-1] = { 154 .irq = -1, 155 .status = IRQ_DISABLED, 156 .chip = &no_irq_chip, 157 .handle_irq = handle_bad_irq, 158 .depth = 1, 159 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 160 } 161 }; 162 163 static unsigned int *kstat_irqs_legacy; 164 165 int __init early_irq_init(void) 166 { 167 struct irq_desc *desc; 168 int legacy_count; 169 int node; 170 int i; 171 172 init_irq_default_affinity(); 173 174 /* initialize nr_irqs based on nr_cpu_ids */ 175 arch_probe_nr_irqs(); 176 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs); 177 178 desc = irq_desc_legacy; 179 legacy_count = ARRAY_SIZE(irq_desc_legacy); 180 node = first_online_node; 181 182 /* allocate based on nr_cpu_ids */ 183 kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids * 184 sizeof(int), GFP_NOWAIT, node); 185 186 for (i = 0; i < legacy_count; i++) { 187 desc[i].irq = i; 188 #ifdef CONFIG_SMP 189 desc[i].node = node; 190 #endif 191 desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; 192 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 193 alloc_desc_masks(&desc[i], node, true); 194 init_desc_masks(&desc[i]); 195 set_irq_desc(i, &desc[i]); 196 } 197 198 return arch_early_irq_init(); 199 } 200 201 struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) 202 { 203 struct irq_desc *desc; 204 unsigned long flags; 205 206 if (irq >= nr_irqs) { 207 WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", 208 irq, nr_irqs); 209 return NULL; 210 } 211 212 desc = irq_to_desc(irq); 213 if (desc) 214 return desc; 215 216 raw_spin_lock_irqsave(&sparse_irq_lock, flags); 217 218 /* We have to check it to avoid races with another CPU */ 219 desc = irq_to_desc(irq); 220 if (desc) 221 goto out_unlock; 222 223 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); 224 225 printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node); 226 if (!desc) { 227 printk(KERN_ERR "can not alloc irq_desc\n"); 228 BUG_ON(1); 229 } 230 init_one_irq_desc(irq, desc, node); 231 232 set_irq_desc(irq, desc); 233 234 out_unlock: 235 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); 236 237 return desc; 238 } 239 240 #else /* !CONFIG_SPARSE_IRQ */ 241 242 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { 243 [0 ... NR_IRQS-1] = { 244 .status = IRQ_DISABLED, 245 .chip = &no_irq_chip, 246 .handle_irq = handle_bad_irq, 247 .depth = 1, 248 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), 249 } 250 }; 251 252 static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS]; 253 int __init early_irq_init(void) 254 { 255 struct irq_desc *desc; 256 int count; 257 int i; 258 259 init_irq_default_affinity(); 260 261 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); 262 263 desc = irq_desc; 264 count = ARRAY_SIZE(irq_desc); 265 266 for (i = 0; i < count; i++) { 267 desc[i].irq = i; 268 alloc_desc_masks(&desc[i], 0, true); 269 init_desc_masks(&desc[i]); 270 desc[i].kstat_irqs = kstat_irqs_all[i]; 271 } 272 return arch_early_irq_init(); 273 } 274 275 struct irq_desc *irq_to_desc(unsigned int irq) 276 { 277 return (irq < NR_IRQS) ? irq_desc + irq : NULL; 278 } 279 280 struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node) 281 { 282 return irq_to_desc(irq); 283 } 284 #endif /* !CONFIG_SPARSE_IRQ */ 285 286 void clear_kstat_irqs(struct irq_desc *desc) 287 { 288 memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); 289 } 290 291 /* 292 * What should we do if we get a hw irq event on an illegal vector? 293 * Each architecture has to answer this themself. 294 */ 295 static void ack_bad(unsigned int irq) 296 { 297 struct irq_desc *desc = irq_to_desc(irq); 298 299 print_irq_desc(irq, desc); 300 ack_bad_irq(irq); 301 } 302 303 /* 304 * NOP functions 305 */ 306 static void noop(unsigned int irq) 307 { 308 } 309 310 static unsigned int noop_ret(unsigned int irq) 311 { 312 return 0; 313 } 314 315 /* 316 * Generic no controller implementation 317 */ 318 struct irq_chip no_irq_chip = { 319 .name = "none", 320 .startup = noop_ret, 321 .shutdown = noop, 322 .enable = noop, 323 .disable = noop, 324 .ack = ack_bad, 325 .end = noop, 326 }; 327 328 /* 329 * Generic dummy implementation which can be used for 330 * real dumb interrupt sources 331 */ 332 struct irq_chip dummy_irq_chip = { 333 .name = "dummy", 334 .startup = noop_ret, 335 .shutdown = noop, 336 .enable = noop, 337 .disable = noop, 338 .ack = noop, 339 .mask = noop, 340 .unmask = noop, 341 .end = noop, 342 }; 343 344 /* 345 * Special, empty irq handler: 346 */ 347 irqreturn_t no_action(int cpl, void *dev_id) 348 { 349 return IRQ_NONE; 350 } 351 352 static void warn_no_thread(unsigned int irq, struct irqaction *action) 353 { 354 if (test_and_set_bit(IRQTF_WARNED, &action->thread_flags)) 355 return; 356 357 printk(KERN_WARNING "IRQ %d device %s returned IRQ_WAKE_THREAD " 358 "but no thread function available.", irq, action->name); 359 } 360 361 /** 362 * handle_IRQ_event - irq action chain handler 363 * @irq: the interrupt number 364 * @action: the interrupt action chain for this irq 365 * 366 * Handles the action chain of an irq event 367 */ 368 irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) 369 { 370 irqreturn_t ret, retval = IRQ_NONE; 371 unsigned int status = 0; 372 373 if (!(action->flags & IRQF_DISABLED)) 374 local_irq_enable_in_hardirq(); 375 376 do { 377 trace_irq_handler_entry(irq, action); 378 ret = action->handler(irq, action->dev_id); 379 trace_irq_handler_exit(irq, action, ret); 380 381 switch (ret) { 382 case IRQ_WAKE_THREAD: 383 /* 384 * Set result to handled so the spurious check 385 * does not trigger. 386 */ 387 ret = IRQ_HANDLED; 388 389 /* 390 * Catch drivers which return WAKE_THREAD but 391 * did not set up a thread function 392 */ 393 if (unlikely(!action->thread_fn)) { 394 warn_no_thread(irq, action); 395 break; 396 } 397 398 /* 399 * Wake up the handler thread for this 400 * action. In case the thread crashed and was 401 * killed we just pretend that we handled the 402 * interrupt. The hardirq handler above has 403 * disabled the device interrupt, so no irq 404 * storm is lurking. 405 */ 406 if (likely(!test_bit(IRQTF_DIED, 407 &action->thread_flags))) { 408 set_bit(IRQTF_RUNTHREAD, &action->thread_flags); 409 wake_up_process(action->thread); 410 } 411 412 /* Fall through to add to randomness */ 413 case IRQ_HANDLED: 414 status |= action->flags; 415 break; 416 417 default: 418 break; 419 } 420 421 retval |= ret; 422 action = action->next; 423 } while (action); 424 425 if (status & IRQF_SAMPLE_RANDOM) 426 add_interrupt_randomness(irq); 427 local_irq_disable(); 428 429 return retval; 430 } 431 432 #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ 433 434 #ifdef CONFIG_ENABLE_WARN_DEPRECATED 435 # warning __do_IRQ is deprecated. Please convert to proper flow handlers 436 #endif 437 438 /** 439 * __do_IRQ - original all in one highlevel IRQ handler 440 * @irq: the interrupt number 441 * 442 * __do_IRQ handles all normal device IRQ's (the special 443 * SMP cross-CPU interrupts have their own specific 444 * handlers). 445 * 446 * This is the original x86 implementation which is used for every 447 * interrupt type. 448 */ 449 unsigned int __do_IRQ(unsigned int irq) 450 { 451 struct irq_desc *desc = irq_to_desc(irq); 452 struct irqaction *action; 453 unsigned int status; 454 455 kstat_incr_irqs_this_cpu(irq, desc); 456 457 if (CHECK_IRQ_PER_CPU(desc->status)) { 458 irqreturn_t action_ret; 459 460 /* 461 * No locking required for CPU-local interrupts: 462 */ 463 if (desc->chip->ack) 464 desc->chip->ack(irq); 465 if (likely(!(desc->status & IRQ_DISABLED))) { 466 action_ret = handle_IRQ_event(irq, desc->action); 467 if (!noirqdebug) 468 note_interrupt(irq, desc, action_ret); 469 } 470 desc->chip->end(irq); 471 return 1; 472 } 473 474 raw_spin_lock(&desc->lock); 475 if (desc->chip->ack) 476 desc->chip->ack(irq); 477 /* 478 * REPLAY is when Linux resends an IRQ that was dropped earlier 479 * WAITING is used by probe to mark irqs that are being tested 480 */ 481 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); 482 status |= IRQ_PENDING; /* we _want_ to handle it */ 483 484 /* 485 * If the IRQ is disabled for whatever reason, we cannot 486 * use the action we have. 487 */ 488 action = NULL; 489 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) { 490 action = desc->action; 491 status &= ~IRQ_PENDING; /* we commit to handling */ 492 status |= IRQ_INPROGRESS; /* we are handling it */ 493 } 494 desc->status = status; 495 496 /* 497 * If there is no IRQ handler or it was disabled, exit early. 498 * Since we set PENDING, if another processor is handling 499 * a different instance of this same irq, the other processor 500 * will take care of it. 501 */ 502 if (unlikely(!action)) 503 goto out; 504 505 /* 506 * Edge triggered interrupts need to remember 507 * pending events. 508 * This applies to any hw interrupts that allow a second 509 * instance of the same irq to arrive while we are in do_IRQ 510 * or in the handler. But the code here only handles the _second_ 511 * instance of the irq, not the third or fourth. So it is mostly 512 * useful for irq hardware that does not mask cleanly in an 513 * SMP environment. 514 */ 515 for (;;) { 516 irqreturn_t action_ret; 517 518 raw_spin_unlock(&desc->lock); 519 520 action_ret = handle_IRQ_event(irq, action); 521 if (!noirqdebug) 522 note_interrupt(irq, desc, action_ret); 523 524 raw_spin_lock(&desc->lock); 525 if (likely(!(desc->status & IRQ_PENDING))) 526 break; 527 desc->status &= ~IRQ_PENDING; 528 } 529 desc->status &= ~IRQ_INPROGRESS; 530 531 out: 532 /* 533 * The ->end() handler has to deal with interrupts which got 534 * disabled while the handler was running. 535 */ 536 desc->chip->end(irq); 537 raw_spin_unlock(&desc->lock); 538 539 return 1; 540 } 541 #endif 542 543 void early_init_irq_lock_class(void) 544 { 545 struct irq_desc *desc; 546 int i; 547 548 for_each_irq_desc(i, desc) { 549 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 550 } 551 } 552 553 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) 554 { 555 struct irq_desc *desc = irq_to_desc(irq); 556 return desc ? desc->kstat_irqs[cpu] : 0; 557 } 558 EXPORT_SYMBOL(kstat_irqs_cpu); 559 560