1 /* 2 * linux/kernel/irq/manage.c 3 * 4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 5 * Copyright (C) 2005-2006 Thomas Gleixner 6 * 7 * This file contains driver APIs to the irq subsystem. 8 */ 9 10 #include <linux/irq.h> 11 #include <linux/kthread.h> 12 #include <linux/module.h> 13 #include <linux/random.h> 14 #include <linux/interrupt.h> 15 #include <linux/slab.h> 16 #include <linux/sched.h> 17 18 #include "internals.h" 19 20 /** 21 * synchronize_irq - wait for pending IRQ handlers (on other CPUs) 22 * @irq: interrupt number to wait for 23 * 24 * This function waits for any pending IRQ handlers for this interrupt 25 * to complete before returning. If you use this function while 26 * holding a resource the IRQ handler may need you will deadlock. 27 * 28 * This function may be called - with care - from IRQ context. 29 */ 30 void synchronize_irq(unsigned int irq) 31 { 32 struct irq_desc *desc = irq_to_desc(irq); 33 unsigned int status; 34 35 if (!desc) 36 return; 37 38 do { 39 unsigned long flags; 40 41 /* 42 * Wait until we're out of the critical section. This might 43 * give the wrong answer due to the lack of memory barriers. 44 */ 45 while (desc->status & IRQ_INPROGRESS) 46 cpu_relax(); 47 48 /* Ok, that indicated we're done: double-check carefully. */ 49 raw_spin_lock_irqsave(&desc->lock, flags); 50 status = desc->status; 51 raw_spin_unlock_irqrestore(&desc->lock, flags); 52 53 /* Oops, that failed? */ 54 } while (status & IRQ_INPROGRESS); 55 56 /* 57 * We made sure that no hardirq handler is running. Now verify 58 * that no threaded handlers are active. 59 */ 60 wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); 61 } 62 EXPORT_SYMBOL(synchronize_irq); 63 64 #ifdef CONFIG_SMP 65 cpumask_var_t irq_default_affinity; 66 67 /** 68 * irq_can_set_affinity - Check if the affinity of a given irq can be set 69 * @irq: Interrupt to check 70 * 71 */ 72 int irq_can_set_affinity(unsigned int irq) 73 { 74 struct irq_desc *desc = irq_to_desc(irq); 75 76 if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip || 77 !desc->chip->set_affinity) 78 return 0; 79 80 return 1; 81 } 82 83 /** 84 * irq_set_thread_affinity - Notify irq threads to adjust affinity 85 * @desc: irq descriptor which has affitnity changed 86 * 87 * We just set IRQTF_AFFINITY and delegate the affinity setting 88 * to the interrupt thread itself. We can not call 89 * set_cpus_allowed_ptr() here as we hold desc->lock and this 90 * code can be called from hard interrupt context. 91 */ 92 void irq_set_thread_affinity(struct irq_desc *desc) 93 { 94 struct irqaction *action = desc->action; 95 96 while (action) { 97 if (action->thread) 98 set_bit(IRQTF_AFFINITY, &action->thread_flags); 99 action = action->next; 100 } 101 } 102 103 /** 104 * irq_set_affinity - Set the irq affinity of a given irq 105 * @irq: Interrupt to set affinity 106 * @cpumask: cpumask 107 * 108 */ 109 int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) 110 { 111 struct irq_desc *desc = irq_to_desc(irq); 112 unsigned long flags; 113 114 if (!desc->chip->set_affinity) 115 return -EINVAL; 116 117 raw_spin_lock_irqsave(&desc->lock, flags); 118 119 #ifdef CONFIG_GENERIC_PENDING_IRQ 120 if (desc->status & IRQ_MOVE_PCNTXT) { 121 if (!desc->chip->set_affinity(irq, cpumask)) { 122 cpumask_copy(desc->affinity, cpumask); 123 irq_set_thread_affinity(desc); 124 } 125 } 126 else { 127 desc->status |= IRQ_MOVE_PENDING; 128 cpumask_copy(desc->pending_mask, cpumask); 129 } 130 #else 131 if (!desc->chip->set_affinity(irq, cpumask)) { 132 cpumask_copy(desc->affinity, cpumask); 133 irq_set_thread_affinity(desc); 134 } 135 #endif 136 desc->status |= IRQ_AFFINITY_SET; 137 raw_spin_unlock_irqrestore(&desc->lock, flags); 138 return 0; 139 } 140 141 #ifndef CONFIG_AUTO_IRQ_AFFINITY 142 /* 143 * Generic version of the affinity autoselector. 144 */ 145 static int setup_affinity(unsigned int irq, struct irq_desc *desc) 146 { 147 if (!irq_can_set_affinity(irq)) 148 return 0; 149 150 /* 151 * Preserve an userspace affinity setup, but make sure that 152 * one of the targets is online. 153 */ 154 if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { 155 if (cpumask_any_and(desc->affinity, cpu_online_mask) 156 < nr_cpu_ids) 157 goto set_affinity; 158 else 159 desc->status &= ~IRQ_AFFINITY_SET; 160 } 161 162 cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity); 163 set_affinity: 164 desc->chip->set_affinity(irq, desc->affinity); 165 166 return 0; 167 } 168 #else 169 static inline int setup_affinity(unsigned int irq, struct irq_desc *d) 170 { 171 return irq_select_affinity(irq); 172 } 173 #endif 174 175 /* 176 * Called when affinity is set via /proc/irq 177 */ 178 int irq_select_affinity_usr(unsigned int irq) 179 { 180 struct irq_desc *desc = irq_to_desc(irq); 181 unsigned long flags; 182 int ret; 183 184 raw_spin_lock_irqsave(&desc->lock, flags); 185 ret = setup_affinity(irq, desc); 186 if (!ret) 187 irq_set_thread_affinity(desc); 188 raw_spin_unlock_irqrestore(&desc->lock, flags); 189 190 return ret; 191 } 192 193 #else 194 static inline int setup_affinity(unsigned int irq, struct irq_desc *desc) 195 { 196 return 0; 197 } 198 #endif 199 200 void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) 201 { 202 if (suspend) { 203 if (!desc->action || (desc->action->flags & IRQF_TIMER)) 204 return; 205 desc->status |= IRQ_SUSPENDED; 206 } 207 208 if (!desc->depth++) { 209 desc->status |= IRQ_DISABLED; 210 desc->chip->disable(irq); 211 } 212 } 213 214 /** 215 * disable_irq_nosync - disable an irq without waiting 216 * @irq: Interrupt to disable 217 * 218 * Disable the selected interrupt line. Disables and Enables are 219 * nested. 220 * Unlike disable_irq(), this function does not ensure existing 221 * instances of the IRQ handler have completed before returning. 222 * 223 * This function may be called from IRQ context. 224 */ 225 void disable_irq_nosync(unsigned int irq) 226 { 227 struct irq_desc *desc = irq_to_desc(irq); 228 unsigned long flags; 229 230 if (!desc) 231 return; 232 233 chip_bus_lock(irq, desc); 234 raw_spin_lock_irqsave(&desc->lock, flags); 235 __disable_irq(desc, irq, false); 236 raw_spin_unlock_irqrestore(&desc->lock, flags); 237 chip_bus_sync_unlock(irq, desc); 238 } 239 EXPORT_SYMBOL(disable_irq_nosync); 240 241 /** 242 * disable_irq - disable an irq and wait for completion 243 * @irq: Interrupt to disable 244 * 245 * Disable the selected interrupt line. Enables and Disables are 246 * nested. 247 * This function waits for any pending IRQ handlers for this interrupt 248 * to complete before returning. If you use this function while 249 * holding a resource the IRQ handler may need you will deadlock. 250 * 251 * This function may be called - with care - from IRQ context. 252 */ 253 void disable_irq(unsigned int irq) 254 { 255 struct irq_desc *desc = irq_to_desc(irq); 256 257 if (!desc) 258 return; 259 260 disable_irq_nosync(irq); 261 if (desc->action) 262 synchronize_irq(irq); 263 } 264 EXPORT_SYMBOL(disable_irq); 265 266 void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) 267 { 268 if (resume) 269 desc->status &= ~IRQ_SUSPENDED; 270 271 switch (desc->depth) { 272 case 0: 273 err_out: 274 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); 275 break; 276 case 1: { 277 unsigned int status = desc->status & ~IRQ_DISABLED; 278 279 if (desc->status & IRQ_SUSPENDED) 280 goto err_out; 281 /* Prevent probing on this irq: */ 282 desc->status = status | IRQ_NOPROBE; 283 check_irq_resend(desc, irq); 284 /* fall-through */ 285 } 286 default: 287 desc->depth--; 288 } 289 } 290 291 /** 292 * enable_irq - enable handling of an irq 293 * @irq: Interrupt to enable 294 * 295 * Undoes the effect of one call to disable_irq(). If this 296 * matches the last disable, processing of interrupts on this 297 * IRQ line is re-enabled. 298 * 299 * This function may be called from IRQ context only when 300 * desc->chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! 301 */ 302 void enable_irq(unsigned int irq) 303 { 304 struct irq_desc *desc = irq_to_desc(irq); 305 unsigned long flags; 306 307 if (!desc) 308 return; 309 310 chip_bus_lock(irq, desc); 311 raw_spin_lock_irqsave(&desc->lock, flags); 312 __enable_irq(desc, irq, false); 313 raw_spin_unlock_irqrestore(&desc->lock, flags); 314 chip_bus_sync_unlock(irq, desc); 315 } 316 EXPORT_SYMBOL(enable_irq); 317 318 static int set_irq_wake_real(unsigned int irq, unsigned int on) 319 { 320 struct irq_desc *desc = irq_to_desc(irq); 321 int ret = -ENXIO; 322 323 if (desc->chip->set_wake) 324 ret = desc->chip->set_wake(irq, on); 325 326 return ret; 327 } 328 329 /** 330 * set_irq_wake - control irq power management wakeup 331 * @irq: interrupt to control 332 * @on: enable/disable power management wakeup 333 * 334 * Enable/disable power management wakeup mode, which is 335 * disabled by default. Enables and disables must match, 336 * just as they match for non-wakeup mode support. 337 * 338 * Wakeup mode lets this IRQ wake the system from sleep 339 * states like "suspend to RAM". 340 */ 341 int set_irq_wake(unsigned int irq, unsigned int on) 342 { 343 struct irq_desc *desc = irq_to_desc(irq); 344 unsigned long flags; 345 int ret = 0; 346 347 /* wakeup-capable irqs can be shared between drivers that 348 * don't need to have the same sleep mode behaviors. 349 */ 350 raw_spin_lock_irqsave(&desc->lock, flags); 351 if (on) { 352 if (desc->wake_depth++ == 0) { 353 ret = set_irq_wake_real(irq, on); 354 if (ret) 355 desc->wake_depth = 0; 356 else 357 desc->status |= IRQ_WAKEUP; 358 } 359 } else { 360 if (desc->wake_depth == 0) { 361 WARN(1, "Unbalanced IRQ %d wake disable\n", irq); 362 } else if (--desc->wake_depth == 0) { 363 ret = set_irq_wake_real(irq, on); 364 if (ret) 365 desc->wake_depth = 1; 366 else 367 desc->status &= ~IRQ_WAKEUP; 368 } 369 } 370 371 raw_spin_unlock_irqrestore(&desc->lock, flags); 372 return ret; 373 } 374 EXPORT_SYMBOL(set_irq_wake); 375 376 /* 377 * Internal function that tells the architecture code whether a 378 * particular irq has been exclusively allocated or is available 379 * for driver use. 380 */ 381 int can_request_irq(unsigned int irq, unsigned long irqflags) 382 { 383 struct irq_desc *desc = irq_to_desc(irq); 384 struct irqaction *action; 385 unsigned long flags; 386 387 if (!desc) 388 return 0; 389 390 if (desc->status & IRQ_NOREQUEST) 391 return 0; 392 393 raw_spin_lock_irqsave(&desc->lock, flags); 394 action = desc->action; 395 if (action) 396 if (irqflags & action->flags & IRQF_SHARED) 397 action = NULL; 398 399 raw_spin_unlock_irqrestore(&desc->lock, flags); 400 401 return !action; 402 } 403 404 void compat_irq_chip_set_default_handler(struct irq_desc *desc) 405 { 406 /* 407 * If the architecture still has not overriden 408 * the flow handler then zap the default. This 409 * should catch incorrect flow-type setting. 410 */ 411 if (desc->handle_irq == &handle_bad_irq) 412 desc->handle_irq = NULL; 413 } 414 415 int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, 416 unsigned long flags) 417 { 418 int ret; 419 struct irq_chip *chip = desc->chip; 420 421 if (!chip || !chip->set_type) { 422 /* 423 * IRQF_TRIGGER_* but the PIC does not support multiple 424 * flow-types? 425 */ 426 pr_debug("No set_type function for IRQ %d (%s)\n", irq, 427 chip ? (chip->name ? : "unknown") : "unknown"); 428 return 0; 429 } 430 431 /* caller masked out all except trigger mode flags */ 432 ret = chip->set_type(irq, flags); 433 434 if (ret) 435 pr_err("setting trigger mode %d for irq %u failed (%pF)\n", 436 (int)flags, irq, chip->set_type); 437 else { 438 if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 439 flags |= IRQ_LEVEL; 440 /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */ 441 desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); 442 desc->status |= flags; 443 } 444 445 return ret; 446 } 447 448 /* 449 * Default primary interrupt handler for threaded interrupts. Is 450 * assigned as primary handler when request_threaded_irq is called 451 * with handler == NULL. Useful for oneshot interrupts. 452 */ 453 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) 454 { 455 return IRQ_WAKE_THREAD; 456 } 457 458 /* 459 * Primary handler for nested threaded interrupts. Should never be 460 * called. 461 */ 462 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) 463 { 464 WARN(1, "Primary handler called for nested irq %d\n", irq); 465 return IRQ_NONE; 466 } 467 468 static int irq_wait_for_interrupt(struct irqaction *action) 469 { 470 while (!kthread_should_stop()) { 471 set_current_state(TASK_INTERRUPTIBLE); 472 473 if (test_and_clear_bit(IRQTF_RUNTHREAD, 474 &action->thread_flags)) { 475 __set_current_state(TASK_RUNNING); 476 return 0; 477 } 478 schedule(); 479 } 480 return -1; 481 } 482 483 /* 484 * Oneshot interrupts keep the irq line masked until the threaded 485 * handler finished. unmask if the interrupt has not been disabled and 486 * is marked MASKED. 487 */ 488 static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) 489 { 490 again: 491 chip_bus_lock(irq, desc); 492 raw_spin_lock_irq(&desc->lock); 493 494 /* 495 * Implausible though it may be we need to protect us against 496 * the following scenario: 497 * 498 * The thread is faster done than the hard interrupt handler 499 * on the other CPU. If we unmask the irq line then the 500 * interrupt can come in again and masks the line, leaves due 501 * to IRQ_INPROGRESS and the irq line is masked forever. 502 */ 503 if (unlikely(desc->status & IRQ_INPROGRESS)) { 504 raw_spin_unlock_irq(&desc->lock); 505 chip_bus_sync_unlock(irq, desc); 506 cpu_relax(); 507 goto again; 508 } 509 510 if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { 511 desc->status &= ~IRQ_MASKED; 512 desc->chip->unmask(irq); 513 } 514 raw_spin_unlock_irq(&desc->lock); 515 chip_bus_sync_unlock(irq, desc); 516 } 517 518 #ifdef CONFIG_SMP 519 /* 520 * Check whether we need to change the affinity of the interrupt thread. 521 */ 522 static void 523 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) 524 { 525 cpumask_var_t mask; 526 527 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) 528 return; 529 530 /* 531 * In case we are out of memory we set IRQTF_AFFINITY again and 532 * try again next time 533 */ 534 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { 535 set_bit(IRQTF_AFFINITY, &action->thread_flags); 536 return; 537 } 538 539 raw_spin_lock_irq(&desc->lock); 540 cpumask_copy(mask, desc->affinity); 541 raw_spin_unlock_irq(&desc->lock); 542 543 set_cpus_allowed_ptr(current, mask); 544 free_cpumask_var(mask); 545 } 546 #else 547 static inline void 548 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } 549 #endif 550 551 /* 552 * Interrupt handler thread 553 */ 554 static int irq_thread(void *data) 555 { 556 struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, }; 557 struct irqaction *action = data; 558 struct irq_desc *desc = irq_to_desc(action->irq); 559 int wake, oneshot = desc->status & IRQ_ONESHOT; 560 561 sched_setscheduler(current, SCHED_FIFO, ¶m); 562 current->irqaction = action; 563 564 while (!irq_wait_for_interrupt(action)) { 565 566 irq_thread_check_affinity(desc, action); 567 568 atomic_inc(&desc->threads_active); 569 570 raw_spin_lock_irq(&desc->lock); 571 if (unlikely(desc->status & IRQ_DISABLED)) { 572 /* 573 * CHECKME: We might need a dedicated 574 * IRQ_THREAD_PENDING flag here, which 575 * retriggers the thread in check_irq_resend() 576 * but AFAICT IRQ_PENDING should be fine as it 577 * retriggers the interrupt itself --- tglx 578 */ 579 desc->status |= IRQ_PENDING; 580 raw_spin_unlock_irq(&desc->lock); 581 } else { 582 raw_spin_unlock_irq(&desc->lock); 583 584 action->thread_fn(action->irq, action->dev_id); 585 586 if (oneshot) 587 irq_finalize_oneshot(action->irq, desc); 588 } 589 590 wake = atomic_dec_and_test(&desc->threads_active); 591 592 if (wake && waitqueue_active(&desc->wait_for_threads)) 593 wake_up(&desc->wait_for_threads); 594 } 595 596 /* 597 * Clear irqaction. Otherwise exit_irq_thread() would make 598 * fuzz about an active irq thread going into nirvana. 599 */ 600 current->irqaction = NULL; 601 return 0; 602 } 603 604 /* 605 * Called from do_exit() 606 */ 607 void exit_irq_thread(void) 608 { 609 struct task_struct *tsk = current; 610 611 if (!tsk->irqaction) 612 return; 613 614 printk(KERN_ERR 615 "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", 616 tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); 617 618 /* 619 * Set the THREAD DIED flag to prevent further wakeups of the 620 * soon to be gone threaded handler. 621 */ 622 set_bit(IRQTF_DIED, &tsk->irqaction->flags); 623 } 624 625 /* 626 * Internal function to register an irqaction - typically used to 627 * allocate special interrupts that are part of the architecture. 628 */ 629 static int 630 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) 631 { 632 struct irqaction *old, **old_ptr; 633 const char *old_name = NULL; 634 unsigned long flags; 635 int nested, shared = 0; 636 int ret; 637 638 if (!desc) 639 return -EINVAL; 640 641 if (desc->chip == &no_irq_chip) 642 return -ENOSYS; 643 /* 644 * Some drivers like serial.c use request_irq() heavily, 645 * so we have to be careful not to interfere with a 646 * running system. 647 */ 648 if (new->flags & IRQF_SAMPLE_RANDOM) { 649 /* 650 * This function might sleep, we want to call it first, 651 * outside of the atomic block. 652 * Yes, this might clear the entropy pool if the wrong 653 * driver is attempted to be loaded, without actually 654 * installing a new handler, but is this really a problem, 655 * only the sysadmin is able to do this. 656 */ 657 rand_initialize_irq(irq); 658 } 659 660 /* Oneshot interrupts are not allowed with shared */ 661 if ((new->flags & IRQF_ONESHOT) && (new->flags & IRQF_SHARED)) 662 return -EINVAL; 663 664 /* 665 * Check whether the interrupt nests into another interrupt 666 * thread. 667 */ 668 nested = desc->status & IRQ_NESTED_THREAD; 669 if (nested) { 670 if (!new->thread_fn) 671 return -EINVAL; 672 /* 673 * Replace the primary handler which was provided from 674 * the driver for non nested interrupt handling by the 675 * dummy function which warns when called. 676 */ 677 new->handler = irq_nested_primary_handler; 678 } 679 680 /* 681 * Create a handler thread when a thread function is supplied 682 * and the interrupt does not nest into another interrupt 683 * thread. 684 */ 685 if (new->thread_fn && !nested) { 686 struct task_struct *t; 687 688 t = kthread_create(irq_thread, new, "irq/%d-%s", irq, 689 new->name); 690 if (IS_ERR(t)) 691 return PTR_ERR(t); 692 /* 693 * We keep the reference to the task struct even if 694 * the thread dies to avoid that the interrupt code 695 * references an already freed task_struct. 696 */ 697 get_task_struct(t); 698 new->thread = t; 699 } 700 701 /* 702 * The following block of code has to be executed atomically 703 */ 704 raw_spin_lock_irqsave(&desc->lock, flags); 705 old_ptr = &desc->action; 706 old = *old_ptr; 707 if (old) { 708 /* 709 * Can't share interrupts unless both agree to and are 710 * the same type (level, edge, polarity). So both flag 711 * fields must have IRQF_SHARED set and the bits which 712 * set the trigger type must match. 713 */ 714 if (!((old->flags & new->flags) & IRQF_SHARED) || 715 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) { 716 old_name = old->name; 717 goto mismatch; 718 } 719 720 #if defined(CONFIG_IRQ_PER_CPU) 721 /* All handlers must agree on per-cpuness */ 722 if ((old->flags & IRQF_PERCPU) != 723 (new->flags & IRQF_PERCPU)) 724 goto mismatch; 725 #endif 726 727 /* add new interrupt at end of irq queue */ 728 do { 729 old_ptr = &old->next; 730 old = *old_ptr; 731 } while (old); 732 shared = 1; 733 } 734 735 if (!shared) { 736 irq_chip_set_defaults(desc->chip); 737 738 init_waitqueue_head(&desc->wait_for_threads); 739 740 /* Setup the type (level, edge polarity) if configured: */ 741 if (new->flags & IRQF_TRIGGER_MASK) { 742 ret = __irq_set_trigger(desc, irq, 743 new->flags & IRQF_TRIGGER_MASK); 744 745 if (ret) 746 goto out_thread; 747 } else 748 compat_irq_chip_set_default_handler(desc); 749 #if defined(CONFIG_IRQ_PER_CPU) 750 if (new->flags & IRQF_PERCPU) 751 desc->status |= IRQ_PER_CPU; 752 #endif 753 754 desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | IRQ_ONESHOT | 755 IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); 756 757 if (new->flags & IRQF_ONESHOT) 758 desc->status |= IRQ_ONESHOT; 759 760 /* 761 * Force MSI interrupts to run with interrupts 762 * disabled. The multi vector cards can cause stack 763 * overflows due to nested interrupts when enough of 764 * them are directed to a core and fire at the same 765 * time. 766 */ 767 if (desc->msi_desc) 768 new->flags |= IRQF_DISABLED; 769 770 if (!(desc->status & IRQ_NOAUTOEN)) { 771 desc->depth = 0; 772 desc->status &= ~IRQ_DISABLED; 773 desc->chip->startup(irq); 774 } else 775 /* Undo nested disables: */ 776 desc->depth = 1; 777 778 /* Exclude IRQ from balancing if requested */ 779 if (new->flags & IRQF_NOBALANCING) 780 desc->status |= IRQ_NO_BALANCING; 781 782 /* Set default affinity mask once everything is setup */ 783 setup_affinity(irq, desc); 784 785 } else if ((new->flags & IRQF_TRIGGER_MASK) 786 && (new->flags & IRQF_TRIGGER_MASK) 787 != (desc->status & IRQ_TYPE_SENSE_MASK)) { 788 /* hope the handler works with the actual trigger mode... */ 789 pr_warning("IRQ %d uses trigger mode %d; requested %d\n", 790 irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK), 791 (int)(new->flags & IRQF_TRIGGER_MASK)); 792 } 793 794 new->irq = irq; 795 *old_ptr = new; 796 797 /* Reset broken irq detection when installing new handler */ 798 desc->irq_count = 0; 799 desc->irqs_unhandled = 0; 800 801 /* 802 * Check whether we disabled the irq via the spurious handler 803 * before. Reenable it and give it another chance. 804 */ 805 if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { 806 desc->status &= ~IRQ_SPURIOUS_DISABLED; 807 __enable_irq(desc, irq, false); 808 } 809 810 raw_spin_unlock_irqrestore(&desc->lock, flags); 811 812 /* 813 * Strictly no need to wake it up, but hung_task complains 814 * when no hard interrupt wakes the thread up. 815 */ 816 if (new->thread) 817 wake_up_process(new->thread); 818 819 register_irq_proc(irq, desc); 820 new->dir = NULL; 821 register_handler_proc(irq, new); 822 823 return 0; 824 825 mismatch: 826 #ifdef CONFIG_DEBUG_SHIRQ 827 if (!(new->flags & IRQF_PROBE_SHARED)) { 828 printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq); 829 if (old_name) 830 printk(KERN_ERR "current handler: %s\n", old_name); 831 dump_stack(); 832 } 833 #endif 834 ret = -EBUSY; 835 836 out_thread: 837 raw_spin_unlock_irqrestore(&desc->lock, flags); 838 if (new->thread) { 839 struct task_struct *t = new->thread; 840 841 new->thread = NULL; 842 if (likely(!test_bit(IRQTF_DIED, &new->thread_flags))) 843 kthread_stop(t); 844 put_task_struct(t); 845 } 846 return ret; 847 } 848 849 /** 850 * setup_irq - setup an interrupt 851 * @irq: Interrupt line to setup 852 * @act: irqaction for the interrupt 853 * 854 * Used to statically setup interrupts in the early boot process. 855 */ 856 int setup_irq(unsigned int irq, struct irqaction *act) 857 { 858 struct irq_desc *desc = irq_to_desc(irq); 859 860 return __setup_irq(irq, desc, act); 861 } 862 EXPORT_SYMBOL_GPL(setup_irq); 863 864 /* 865 * Internal function to unregister an irqaction - used to free 866 * regular and special interrupts that are part of the architecture. 867 */ 868 static struct irqaction *__free_irq(unsigned int irq, void *dev_id) 869 { 870 struct irq_desc *desc = irq_to_desc(irq); 871 struct irqaction *action, **action_ptr; 872 unsigned long flags; 873 874 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); 875 876 if (!desc) 877 return NULL; 878 879 raw_spin_lock_irqsave(&desc->lock, flags); 880 881 /* 882 * There can be multiple actions per IRQ descriptor, find the right 883 * one based on the dev_id: 884 */ 885 action_ptr = &desc->action; 886 for (;;) { 887 action = *action_ptr; 888 889 if (!action) { 890 WARN(1, "Trying to free already-free IRQ %d\n", irq); 891 raw_spin_unlock_irqrestore(&desc->lock, flags); 892 893 return NULL; 894 } 895 896 if (action->dev_id == dev_id) 897 break; 898 action_ptr = &action->next; 899 } 900 901 /* Found it - now remove it from the list of entries: */ 902 *action_ptr = action->next; 903 904 /* Currently used only by UML, might disappear one day: */ 905 #ifdef CONFIG_IRQ_RELEASE_METHOD 906 if (desc->chip->release) 907 desc->chip->release(irq, dev_id); 908 #endif 909 910 /* If this was the last handler, shut down the IRQ line: */ 911 if (!desc->action) { 912 desc->status |= IRQ_DISABLED; 913 if (desc->chip->shutdown) 914 desc->chip->shutdown(irq); 915 else 916 desc->chip->disable(irq); 917 } 918 919 raw_spin_unlock_irqrestore(&desc->lock, flags); 920 921 unregister_handler_proc(irq, action); 922 923 /* Make sure it's not being used on another CPU: */ 924 synchronize_irq(irq); 925 926 #ifdef CONFIG_DEBUG_SHIRQ 927 /* 928 * It's a shared IRQ -- the driver ought to be prepared for an IRQ 929 * event to happen even now it's being freed, so let's make sure that 930 * is so by doing an extra call to the handler .... 931 * 932 * ( We do this after actually deregistering it, to make sure that a 933 * 'real' IRQ doesn't run in * parallel with our fake. ) 934 */ 935 if (action->flags & IRQF_SHARED) { 936 local_irq_save(flags); 937 action->handler(irq, dev_id); 938 local_irq_restore(flags); 939 } 940 #endif 941 942 if (action->thread) { 943 if (!test_bit(IRQTF_DIED, &action->thread_flags)) 944 kthread_stop(action->thread); 945 put_task_struct(action->thread); 946 } 947 948 return action; 949 } 950 951 /** 952 * remove_irq - free an interrupt 953 * @irq: Interrupt line to free 954 * @act: irqaction for the interrupt 955 * 956 * Used to remove interrupts statically setup by the early boot process. 957 */ 958 void remove_irq(unsigned int irq, struct irqaction *act) 959 { 960 __free_irq(irq, act->dev_id); 961 } 962 EXPORT_SYMBOL_GPL(remove_irq); 963 964 /** 965 * free_irq - free an interrupt allocated with request_irq 966 * @irq: Interrupt line to free 967 * @dev_id: Device identity to free 968 * 969 * Remove an interrupt handler. The handler is removed and if the 970 * interrupt line is no longer in use by any driver it is disabled. 971 * On a shared IRQ the caller must ensure the interrupt is disabled 972 * on the card it drives before calling this function. The function 973 * does not return until any executing interrupts for this IRQ 974 * have completed. 975 * 976 * This function must not be called from interrupt context. 977 */ 978 void free_irq(unsigned int irq, void *dev_id) 979 { 980 struct irq_desc *desc = irq_to_desc(irq); 981 982 if (!desc) 983 return; 984 985 chip_bus_lock(irq, desc); 986 kfree(__free_irq(irq, dev_id)); 987 chip_bus_sync_unlock(irq, desc); 988 } 989 EXPORT_SYMBOL(free_irq); 990 991 /** 992 * request_threaded_irq - allocate an interrupt line 993 * @irq: Interrupt line to allocate 994 * @handler: Function to be called when the IRQ occurs. 995 * Primary handler for threaded interrupts 996 * If NULL and thread_fn != NULL the default 997 * primary handler is installed 998 * @thread_fn: Function called from the irq handler thread 999 * If NULL, no irq thread is created 1000 * @irqflags: Interrupt type flags 1001 * @devname: An ascii name for the claiming device 1002 * @dev_id: A cookie passed back to the handler function 1003 * 1004 * This call allocates interrupt resources and enables the 1005 * interrupt line and IRQ handling. From the point this 1006 * call is made your handler function may be invoked. Since 1007 * your handler function must clear any interrupt the board 1008 * raises, you must take care both to initialise your hardware 1009 * and to set up the interrupt handler in the right order. 1010 * 1011 * If you want to set up a threaded irq handler for your device 1012 * then you need to supply @handler and @thread_fn. @handler ist 1013 * still called in hard interrupt context and has to check 1014 * whether the interrupt originates from the device. If yes it 1015 * needs to disable the interrupt on the device and return 1016 * IRQ_WAKE_THREAD which will wake up the handler thread and run 1017 * @thread_fn. This split handler design is necessary to support 1018 * shared interrupts. 1019 * 1020 * Dev_id must be globally unique. Normally the address of the 1021 * device data structure is used as the cookie. Since the handler 1022 * receives this value it makes sense to use it. 1023 * 1024 * If your interrupt is shared you must pass a non NULL dev_id 1025 * as this is required when freeing the interrupt. 1026 * 1027 * Flags: 1028 * 1029 * IRQF_SHARED Interrupt is shared 1030 * IRQF_DISABLED Disable local interrupts while processing 1031 * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy 1032 * IRQF_TRIGGER_* Specify active edge(s) or level 1033 * 1034 */ 1035 int request_threaded_irq(unsigned int irq, irq_handler_t handler, 1036 irq_handler_t thread_fn, unsigned long irqflags, 1037 const char *devname, void *dev_id) 1038 { 1039 struct irqaction *action; 1040 struct irq_desc *desc; 1041 int retval; 1042 1043 /* 1044 * handle_IRQ_event() always ignores IRQF_DISABLED except for 1045 * the _first_ irqaction (sigh). That can cause oopsing, but 1046 * the behavior is classified as "will not fix" so we need to 1047 * start nudging drivers away from using that idiom. 1048 */ 1049 if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) == 1050 (IRQF_SHARED|IRQF_DISABLED)) { 1051 pr_warning( 1052 "IRQ %d/%s: IRQF_DISABLED is not guaranteed on shared IRQs\n", 1053 irq, devname); 1054 } 1055 1056 #ifdef CONFIG_LOCKDEP 1057 /* 1058 * Lockdep wants atomic interrupt handlers: 1059 */ 1060 irqflags |= IRQF_DISABLED; 1061 #endif 1062 /* 1063 * Sanity-check: shared interrupts must pass in a real dev-ID, 1064 * otherwise we'll have trouble later trying to figure out 1065 * which interrupt is which (messes up the interrupt freeing 1066 * logic etc). 1067 */ 1068 if ((irqflags & IRQF_SHARED) && !dev_id) 1069 return -EINVAL; 1070 1071 desc = irq_to_desc(irq); 1072 if (!desc) 1073 return -EINVAL; 1074 1075 if (desc->status & IRQ_NOREQUEST) 1076 return -EINVAL; 1077 1078 if (!handler) { 1079 if (!thread_fn) 1080 return -EINVAL; 1081 handler = irq_default_primary_handler; 1082 } 1083 1084 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); 1085 if (!action) 1086 return -ENOMEM; 1087 1088 action->handler = handler; 1089 action->thread_fn = thread_fn; 1090 action->flags = irqflags; 1091 action->name = devname; 1092 action->dev_id = dev_id; 1093 1094 chip_bus_lock(irq, desc); 1095 retval = __setup_irq(irq, desc, action); 1096 chip_bus_sync_unlock(irq, desc); 1097 1098 if (retval) 1099 kfree(action); 1100 1101 #ifdef CONFIG_DEBUG_SHIRQ 1102 if (!retval && (irqflags & IRQF_SHARED)) { 1103 /* 1104 * It's a shared IRQ -- the driver ought to be prepared for it 1105 * to happen immediately, so let's make sure.... 1106 * We disable the irq to make sure that a 'real' IRQ doesn't 1107 * run in parallel with our fake. 1108 */ 1109 unsigned long flags; 1110 1111 disable_irq(irq); 1112 local_irq_save(flags); 1113 1114 handler(irq, dev_id); 1115 1116 local_irq_restore(flags); 1117 enable_irq(irq); 1118 } 1119 #endif 1120 return retval; 1121 } 1122 EXPORT_SYMBOL(request_threaded_irq); 1123