1 /* 2 * linux/kernel/irq/manage.c 3 * 4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 5 * Copyright (C) 2005-2006 Thomas Gleixner 6 * 7 * This file contains driver APIs to the irq subsystem. 8 */ 9 10 #include <linux/irq.h> 11 #include <linux/kthread.h> 12 #include <linux/module.h> 13 #include <linux/random.h> 14 #include <linux/interrupt.h> 15 #include <linux/slab.h> 16 #include <linux/sched.h> 17 18 #include "internals.h" 19 20 /** 21 * synchronize_irq - wait for pending IRQ handlers (on other CPUs) 22 * @irq: interrupt number to wait for 23 * 24 * This function waits for any pending IRQ handlers for this interrupt 25 * to complete before returning. If you use this function while 26 * holding a resource the IRQ handler may need you will deadlock. 27 * 28 * This function may be called - with care - from IRQ context. 29 */ 30 void synchronize_irq(unsigned int irq) 31 { 32 struct irq_desc *desc = irq_to_desc(irq); 33 unsigned int status; 34 35 if (!desc) 36 return; 37 38 do { 39 unsigned long flags; 40 41 /* 42 * Wait until we're out of the critical section. This might 43 * give the wrong answer due to the lack of memory barriers. 44 */ 45 while (desc->status & IRQ_INPROGRESS) 46 cpu_relax(); 47 48 /* Ok, that indicated we're done: double-check carefully. */ 49 raw_spin_lock_irqsave(&desc->lock, flags); 50 status = desc->status; 51 raw_spin_unlock_irqrestore(&desc->lock, flags); 52 53 /* Oops, that failed? */ 54 } while (status & IRQ_INPROGRESS); 55 56 /* 57 * We made sure that no hardirq handler is running. Now verify 58 * that no threaded handlers are active. 59 */ 60 wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); 61 } 62 EXPORT_SYMBOL(synchronize_irq); 63 64 #ifdef CONFIG_SMP 65 cpumask_var_t irq_default_affinity; 66 67 /** 68 * irq_can_set_affinity - Check if the affinity of a given irq can be set 69 * @irq: Interrupt to check 70 * 71 */ 72 int irq_can_set_affinity(unsigned int irq) 73 { 74 struct irq_desc *desc = irq_to_desc(irq); 75 76 if (CHECK_IRQ_PER_CPU(desc->status) || !desc->irq_data.chip || 77 !desc->irq_data.chip->irq_set_affinity) 78 return 0; 79 80 return 1; 81 } 82 83 /** 84 * irq_set_thread_affinity - Notify irq threads to adjust affinity 85 * @desc: irq descriptor which has affitnity changed 86 * 87 * We just set IRQTF_AFFINITY and delegate the affinity setting 88 * to the interrupt thread itself. We can not call 89 * set_cpus_allowed_ptr() here as we hold desc->lock and this 90 * code can be called from hard interrupt context. 91 */ 92 void irq_set_thread_affinity(struct irq_desc *desc) 93 { 94 struct irqaction *action = desc->action; 95 96 while (action) { 97 if (action->thread) 98 set_bit(IRQTF_AFFINITY, &action->thread_flags); 99 action = action->next; 100 } 101 } 102 103 /** 104 * irq_set_affinity - Set the irq affinity of a given irq 105 * @irq: Interrupt to set affinity 106 * @cpumask: cpumask 107 * 108 */ 109 int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) 110 { 111 struct irq_desc *desc = irq_to_desc(irq); 112 struct irq_chip *chip = desc->irq_data.chip; 113 unsigned long flags; 114 115 if (!chip->irq_set_affinity) 116 return -EINVAL; 117 118 raw_spin_lock_irqsave(&desc->lock, flags); 119 120 #ifdef CONFIG_GENERIC_PENDING_IRQ 121 if (desc->status & IRQ_MOVE_PCNTXT) { 122 if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) { 123 cpumask_copy(desc->irq_data.affinity, cpumask); 124 irq_set_thread_affinity(desc); 125 } 126 } 127 else { 128 desc->status |= IRQ_MOVE_PENDING; 129 cpumask_copy(desc->pending_mask, cpumask); 130 } 131 #else 132 if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) { 133 cpumask_copy(desc->irq_data.affinity, cpumask); 134 irq_set_thread_affinity(desc); 135 } 136 #endif 137 desc->status |= IRQ_AFFINITY_SET; 138 raw_spin_unlock_irqrestore(&desc->lock, flags); 139 return 0; 140 } 141 142 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) 143 { 144 struct irq_desc *desc = irq_to_desc(irq); 145 unsigned long flags; 146 147 if (!desc) 148 return -EINVAL; 149 150 raw_spin_lock_irqsave(&desc->lock, flags); 151 desc->affinity_hint = m; 152 raw_spin_unlock_irqrestore(&desc->lock, flags); 153 154 return 0; 155 } 156 EXPORT_SYMBOL_GPL(irq_set_affinity_hint); 157 158 #ifndef CONFIG_AUTO_IRQ_AFFINITY 159 /* 160 * Generic version of the affinity autoselector. 161 */ 162 static int setup_affinity(unsigned int irq, struct irq_desc *desc) 163 { 164 if (!irq_can_set_affinity(irq)) 165 return 0; 166 167 /* 168 * Preserve an userspace affinity setup, but make sure that 169 * one of the targets is online. 170 */ 171 if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { 172 if (cpumask_any_and(desc->irq_data.affinity, cpu_online_mask) 173 < nr_cpu_ids) 174 goto set_affinity; 175 else 176 desc->status &= ~IRQ_AFFINITY_SET; 177 } 178 179 cpumask_and(desc->irq_data.affinity, cpu_online_mask, irq_default_affinity); 180 set_affinity: 181 desc->irq_data.chip->irq_set_affinity(&desc->irq_data, desc->irq_data.affinity, false); 182 183 return 0; 184 } 185 #else 186 static inline int setup_affinity(unsigned int irq, struct irq_desc *d) 187 { 188 return irq_select_affinity(irq); 189 } 190 #endif 191 192 /* 193 * Called when affinity is set via /proc/irq 194 */ 195 int irq_select_affinity_usr(unsigned int irq) 196 { 197 struct irq_desc *desc = irq_to_desc(irq); 198 unsigned long flags; 199 int ret; 200 201 raw_spin_lock_irqsave(&desc->lock, flags); 202 ret = setup_affinity(irq, desc); 203 if (!ret) 204 irq_set_thread_affinity(desc); 205 raw_spin_unlock_irqrestore(&desc->lock, flags); 206 207 return ret; 208 } 209 210 #else 211 static inline int setup_affinity(unsigned int irq, struct irq_desc *desc) 212 { 213 return 0; 214 } 215 #endif 216 217 void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) 218 { 219 if (suspend) { 220 if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND)) 221 return; 222 desc->status |= IRQ_SUSPENDED; 223 } 224 225 if (!desc->depth++) { 226 desc->status |= IRQ_DISABLED; 227 desc->irq_data.chip->irq_disable(&desc->irq_data); 228 } 229 } 230 231 /** 232 * disable_irq_nosync - disable an irq without waiting 233 * @irq: Interrupt to disable 234 * 235 * Disable the selected interrupt line. Disables and Enables are 236 * nested. 237 * Unlike disable_irq(), this function does not ensure existing 238 * instances of the IRQ handler have completed before returning. 239 * 240 * This function may be called from IRQ context. 241 */ 242 void disable_irq_nosync(unsigned int irq) 243 { 244 struct irq_desc *desc = irq_to_desc(irq); 245 unsigned long flags; 246 247 if (!desc) 248 return; 249 250 chip_bus_lock(desc); 251 raw_spin_lock_irqsave(&desc->lock, flags); 252 __disable_irq(desc, irq, false); 253 raw_spin_unlock_irqrestore(&desc->lock, flags); 254 chip_bus_sync_unlock(desc); 255 } 256 EXPORT_SYMBOL(disable_irq_nosync); 257 258 /** 259 * disable_irq - disable an irq and wait for completion 260 * @irq: Interrupt to disable 261 * 262 * Disable the selected interrupt line. Enables and Disables are 263 * nested. 264 * This function waits for any pending IRQ handlers for this interrupt 265 * to complete before returning. If you use this function while 266 * holding a resource the IRQ handler may need you will deadlock. 267 * 268 * This function may be called - with care - from IRQ context. 269 */ 270 void disable_irq(unsigned int irq) 271 { 272 struct irq_desc *desc = irq_to_desc(irq); 273 274 if (!desc) 275 return; 276 277 disable_irq_nosync(irq); 278 if (desc->action) 279 synchronize_irq(irq); 280 } 281 EXPORT_SYMBOL(disable_irq); 282 283 void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) 284 { 285 if (resume) 286 desc->status &= ~IRQ_SUSPENDED; 287 288 switch (desc->depth) { 289 case 0: 290 err_out: 291 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); 292 break; 293 case 1: { 294 unsigned int status = desc->status & ~IRQ_DISABLED; 295 296 if (desc->status & IRQ_SUSPENDED) 297 goto err_out; 298 /* Prevent probing on this irq: */ 299 desc->status = status | IRQ_NOPROBE; 300 check_irq_resend(desc, irq); 301 /* fall-through */ 302 } 303 default: 304 desc->depth--; 305 } 306 } 307 308 /** 309 * enable_irq - enable handling of an irq 310 * @irq: Interrupt to enable 311 * 312 * Undoes the effect of one call to disable_irq(). If this 313 * matches the last disable, processing of interrupts on this 314 * IRQ line is re-enabled. 315 * 316 * This function may be called from IRQ context only when 317 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! 318 */ 319 void enable_irq(unsigned int irq) 320 { 321 struct irq_desc *desc = irq_to_desc(irq); 322 unsigned long flags; 323 324 if (!desc) 325 return; 326 327 chip_bus_lock(desc); 328 raw_spin_lock_irqsave(&desc->lock, flags); 329 __enable_irq(desc, irq, false); 330 raw_spin_unlock_irqrestore(&desc->lock, flags); 331 chip_bus_sync_unlock(desc); 332 } 333 EXPORT_SYMBOL(enable_irq); 334 335 static int set_irq_wake_real(unsigned int irq, unsigned int on) 336 { 337 struct irq_desc *desc = irq_to_desc(irq); 338 int ret = -ENXIO; 339 340 if (desc->irq_data.chip->irq_set_wake) 341 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); 342 343 return ret; 344 } 345 346 /** 347 * set_irq_wake - control irq power management wakeup 348 * @irq: interrupt to control 349 * @on: enable/disable power management wakeup 350 * 351 * Enable/disable power management wakeup mode, which is 352 * disabled by default. Enables and disables must match, 353 * just as they match for non-wakeup mode support. 354 * 355 * Wakeup mode lets this IRQ wake the system from sleep 356 * states like "suspend to RAM". 357 */ 358 int set_irq_wake(unsigned int irq, unsigned int on) 359 { 360 struct irq_desc *desc = irq_to_desc(irq); 361 unsigned long flags; 362 int ret = 0; 363 364 /* wakeup-capable irqs can be shared between drivers that 365 * don't need to have the same sleep mode behaviors. 366 */ 367 raw_spin_lock_irqsave(&desc->lock, flags); 368 if (on) { 369 if (desc->wake_depth++ == 0) { 370 ret = set_irq_wake_real(irq, on); 371 if (ret) 372 desc->wake_depth = 0; 373 else 374 desc->status |= IRQ_WAKEUP; 375 } 376 } else { 377 if (desc->wake_depth == 0) { 378 WARN(1, "Unbalanced IRQ %d wake disable\n", irq); 379 } else if (--desc->wake_depth == 0) { 380 ret = set_irq_wake_real(irq, on); 381 if (ret) 382 desc->wake_depth = 1; 383 else 384 desc->status &= ~IRQ_WAKEUP; 385 } 386 } 387 388 raw_spin_unlock_irqrestore(&desc->lock, flags); 389 return ret; 390 } 391 EXPORT_SYMBOL(set_irq_wake); 392 393 /* 394 * Internal function that tells the architecture code whether a 395 * particular irq has been exclusively allocated or is available 396 * for driver use. 397 */ 398 int can_request_irq(unsigned int irq, unsigned long irqflags) 399 { 400 struct irq_desc *desc = irq_to_desc(irq); 401 struct irqaction *action; 402 unsigned long flags; 403 404 if (!desc) 405 return 0; 406 407 if (desc->status & IRQ_NOREQUEST) 408 return 0; 409 410 raw_spin_lock_irqsave(&desc->lock, flags); 411 action = desc->action; 412 if (action) 413 if (irqflags & action->flags & IRQF_SHARED) 414 action = NULL; 415 416 raw_spin_unlock_irqrestore(&desc->lock, flags); 417 418 return !action; 419 } 420 421 void compat_irq_chip_set_default_handler(struct irq_desc *desc) 422 { 423 /* 424 * If the architecture still has not overriden 425 * the flow handler then zap the default. This 426 * should catch incorrect flow-type setting. 427 */ 428 if (desc->handle_irq == &handle_bad_irq) 429 desc->handle_irq = NULL; 430 } 431 432 int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, 433 unsigned long flags) 434 { 435 int ret; 436 struct irq_chip *chip = desc->irq_data.chip; 437 438 if (!chip || !chip->irq_set_type) { 439 /* 440 * IRQF_TRIGGER_* but the PIC does not support multiple 441 * flow-types? 442 */ 443 pr_debug("No set_type function for IRQ %d (%s)\n", irq, 444 chip ? (chip->name ? : "unknown") : "unknown"); 445 return 0; 446 } 447 448 /* caller masked out all except trigger mode flags */ 449 ret = chip->irq_set_type(&desc->irq_data, flags); 450 451 if (ret) 452 pr_err("setting trigger mode %lu for irq %u failed (%pF)\n", 453 flags, irq, chip->irq_set_type); 454 else { 455 if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 456 flags |= IRQ_LEVEL; 457 /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */ 458 desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); 459 desc->status |= flags; 460 461 if (chip != desc->irq_data.chip) 462 irq_chip_set_defaults(desc->irq_data.chip); 463 } 464 465 return ret; 466 } 467 468 /* 469 * Default primary interrupt handler for threaded interrupts. Is 470 * assigned as primary handler when request_threaded_irq is called 471 * with handler == NULL. Useful for oneshot interrupts. 472 */ 473 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) 474 { 475 return IRQ_WAKE_THREAD; 476 } 477 478 /* 479 * Primary handler for nested threaded interrupts. Should never be 480 * called. 481 */ 482 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) 483 { 484 WARN(1, "Primary handler called for nested irq %d\n", irq); 485 return IRQ_NONE; 486 } 487 488 static int irq_wait_for_interrupt(struct irqaction *action) 489 { 490 while (!kthread_should_stop()) { 491 set_current_state(TASK_INTERRUPTIBLE); 492 493 if (test_and_clear_bit(IRQTF_RUNTHREAD, 494 &action->thread_flags)) { 495 __set_current_state(TASK_RUNNING); 496 return 0; 497 } 498 schedule(); 499 } 500 return -1; 501 } 502 503 /* 504 * Oneshot interrupts keep the irq line masked until the threaded 505 * handler finished. unmask if the interrupt has not been disabled and 506 * is marked MASKED. 507 */ 508 static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) 509 { 510 again: 511 chip_bus_lock(desc); 512 raw_spin_lock_irq(&desc->lock); 513 514 /* 515 * Implausible though it may be we need to protect us against 516 * the following scenario: 517 * 518 * The thread is faster done than the hard interrupt handler 519 * on the other CPU. If we unmask the irq line then the 520 * interrupt can come in again and masks the line, leaves due 521 * to IRQ_INPROGRESS and the irq line is masked forever. 522 */ 523 if (unlikely(desc->status & IRQ_INPROGRESS)) { 524 raw_spin_unlock_irq(&desc->lock); 525 chip_bus_sync_unlock(desc); 526 cpu_relax(); 527 goto again; 528 } 529 530 if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { 531 desc->status &= ~IRQ_MASKED; 532 desc->irq_data.chip->irq_unmask(&desc->irq_data); 533 } 534 raw_spin_unlock_irq(&desc->lock); 535 chip_bus_sync_unlock(desc); 536 } 537 538 #ifdef CONFIG_SMP 539 /* 540 * Check whether we need to change the affinity of the interrupt thread. 541 */ 542 static void 543 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) 544 { 545 cpumask_var_t mask; 546 547 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) 548 return; 549 550 /* 551 * In case we are out of memory we set IRQTF_AFFINITY again and 552 * try again next time 553 */ 554 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { 555 set_bit(IRQTF_AFFINITY, &action->thread_flags); 556 return; 557 } 558 559 raw_spin_lock_irq(&desc->lock); 560 cpumask_copy(mask, desc->irq_data.affinity); 561 raw_spin_unlock_irq(&desc->lock); 562 563 set_cpus_allowed_ptr(current, mask); 564 free_cpumask_var(mask); 565 } 566 #else 567 static inline void 568 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } 569 #endif 570 571 /* 572 * Interrupt handler thread 573 */ 574 static int irq_thread(void *data) 575 { 576 struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, }; 577 struct irqaction *action = data; 578 struct irq_desc *desc = irq_to_desc(action->irq); 579 int wake, oneshot = desc->status & IRQ_ONESHOT; 580 581 sched_setscheduler(current, SCHED_FIFO, ¶m); 582 current->irqaction = action; 583 584 while (!irq_wait_for_interrupt(action)) { 585 586 irq_thread_check_affinity(desc, action); 587 588 atomic_inc(&desc->threads_active); 589 590 raw_spin_lock_irq(&desc->lock); 591 if (unlikely(desc->status & IRQ_DISABLED)) { 592 /* 593 * CHECKME: We might need a dedicated 594 * IRQ_THREAD_PENDING flag here, which 595 * retriggers the thread in check_irq_resend() 596 * but AFAICT IRQ_PENDING should be fine as it 597 * retriggers the interrupt itself --- tglx 598 */ 599 desc->status |= IRQ_PENDING; 600 raw_spin_unlock_irq(&desc->lock); 601 } else { 602 raw_spin_unlock_irq(&desc->lock); 603 604 action->thread_fn(action->irq, action->dev_id); 605 606 if (oneshot) 607 irq_finalize_oneshot(action->irq, desc); 608 } 609 610 wake = atomic_dec_and_test(&desc->threads_active); 611 612 if (wake && waitqueue_active(&desc->wait_for_threads)) 613 wake_up(&desc->wait_for_threads); 614 } 615 616 /* 617 * Clear irqaction. Otherwise exit_irq_thread() would make 618 * fuzz about an active irq thread going into nirvana. 619 */ 620 current->irqaction = NULL; 621 return 0; 622 } 623 624 /* 625 * Called from do_exit() 626 */ 627 void exit_irq_thread(void) 628 { 629 struct task_struct *tsk = current; 630 631 if (!tsk->irqaction) 632 return; 633 634 printk(KERN_ERR 635 "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", 636 tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); 637 638 /* 639 * Set the THREAD DIED flag to prevent further wakeups of the 640 * soon to be gone threaded handler. 641 */ 642 set_bit(IRQTF_DIED, &tsk->irqaction->flags); 643 } 644 645 /* 646 * Internal function to register an irqaction - typically used to 647 * allocate special interrupts that are part of the architecture. 648 */ 649 static int 650 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) 651 { 652 struct irqaction *old, **old_ptr; 653 const char *old_name = NULL; 654 unsigned long flags; 655 int nested, shared = 0; 656 int ret; 657 658 if (!desc) 659 return -EINVAL; 660 661 if (desc->irq_data.chip == &no_irq_chip) 662 return -ENOSYS; 663 /* 664 * Some drivers like serial.c use request_irq() heavily, 665 * so we have to be careful not to interfere with a 666 * running system. 667 */ 668 if (new->flags & IRQF_SAMPLE_RANDOM) { 669 /* 670 * This function might sleep, we want to call it first, 671 * outside of the atomic block. 672 * Yes, this might clear the entropy pool if the wrong 673 * driver is attempted to be loaded, without actually 674 * installing a new handler, but is this really a problem, 675 * only the sysadmin is able to do this. 676 */ 677 rand_initialize_irq(irq); 678 } 679 680 /* Oneshot interrupts are not allowed with shared */ 681 if ((new->flags & IRQF_ONESHOT) && (new->flags & IRQF_SHARED)) 682 return -EINVAL; 683 684 /* 685 * Check whether the interrupt nests into another interrupt 686 * thread. 687 */ 688 nested = desc->status & IRQ_NESTED_THREAD; 689 if (nested) { 690 if (!new->thread_fn) 691 return -EINVAL; 692 /* 693 * Replace the primary handler which was provided from 694 * the driver for non nested interrupt handling by the 695 * dummy function which warns when called. 696 */ 697 new->handler = irq_nested_primary_handler; 698 } 699 700 /* 701 * Create a handler thread when a thread function is supplied 702 * and the interrupt does not nest into another interrupt 703 * thread. 704 */ 705 if (new->thread_fn && !nested) { 706 struct task_struct *t; 707 708 t = kthread_create(irq_thread, new, "irq/%d-%s", irq, 709 new->name); 710 if (IS_ERR(t)) 711 return PTR_ERR(t); 712 /* 713 * We keep the reference to the task struct even if 714 * the thread dies to avoid that the interrupt code 715 * references an already freed task_struct. 716 */ 717 get_task_struct(t); 718 new->thread = t; 719 } 720 721 /* 722 * The following block of code has to be executed atomically 723 */ 724 raw_spin_lock_irqsave(&desc->lock, flags); 725 old_ptr = &desc->action; 726 old = *old_ptr; 727 if (old) { 728 /* 729 * Can't share interrupts unless both agree to and are 730 * the same type (level, edge, polarity). So both flag 731 * fields must have IRQF_SHARED set and the bits which 732 * set the trigger type must match. 733 */ 734 if (!((old->flags & new->flags) & IRQF_SHARED) || 735 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) { 736 old_name = old->name; 737 goto mismatch; 738 } 739 740 #if defined(CONFIG_IRQ_PER_CPU) 741 /* All handlers must agree on per-cpuness */ 742 if ((old->flags & IRQF_PERCPU) != 743 (new->flags & IRQF_PERCPU)) 744 goto mismatch; 745 #endif 746 747 /* add new interrupt at end of irq queue */ 748 do { 749 old_ptr = &old->next; 750 old = *old_ptr; 751 } while (old); 752 shared = 1; 753 } 754 755 if (!shared) { 756 irq_chip_set_defaults(desc->irq_data.chip); 757 758 init_waitqueue_head(&desc->wait_for_threads); 759 760 /* Setup the type (level, edge polarity) if configured: */ 761 if (new->flags & IRQF_TRIGGER_MASK) { 762 ret = __irq_set_trigger(desc, irq, 763 new->flags & IRQF_TRIGGER_MASK); 764 765 if (ret) 766 goto out_thread; 767 } else 768 compat_irq_chip_set_default_handler(desc); 769 #if defined(CONFIG_IRQ_PER_CPU) 770 if (new->flags & IRQF_PERCPU) 771 desc->status |= IRQ_PER_CPU; 772 #endif 773 774 desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | IRQ_ONESHOT | 775 IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); 776 777 if (new->flags & IRQF_ONESHOT) 778 desc->status |= IRQ_ONESHOT; 779 780 if (!(desc->status & IRQ_NOAUTOEN)) { 781 desc->depth = 0; 782 desc->status &= ~IRQ_DISABLED; 783 desc->irq_data.chip->irq_startup(&desc->irq_data); 784 } else 785 /* Undo nested disables: */ 786 desc->depth = 1; 787 788 /* Exclude IRQ from balancing if requested */ 789 if (new->flags & IRQF_NOBALANCING) 790 desc->status |= IRQ_NO_BALANCING; 791 792 /* Set default affinity mask once everything is setup */ 793 setup_affinity(irq, desc); 794 795 } else if ((new->flags & IRQF_TRIGGER_MASK) 796 && (new->flags & IRQF_TRIGGER_MASK) 797 != (desc->status & IRQ_TYPE_SENSE_MASK)) { 798 /* hope the handler works with the actual trigger mode... */ 799 pr_warning("IRQ %d uses trigger mode %d; requested %d\n", 800 irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK), 801 (int)(new->flags & IRQF_TRIGGER_MASK)); 802 } 803 804 new->irq = irq; 805 *old_ptr = new; 806 807 /* Reset broken irq detection when installing new handler */ 808 desc->irq_count = 0; 809 desc->irqs_unhandled = 0; 810 811 /* 812 * Check whether we disabled the irq via the spurious handler 813 * before. Reenable it and give it another chance. 814 */ 815 if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { 816 desc->status &= ~IRQ_SPURIOUS_DISABLED; 817 __enable_irq(desc, irq, false); 818 } 819 820 raw_spin_unlock_irqrestore(&desc->lock, flags); 821 822 /* 823 * Strictly no need to wake it up, but hung_task complains 824 * when no hard interrupt wakes the thread up. 825 */ 826 if (new->thread) 827 wake_up_process(new->thread); 828 829 register_irq_proc(irq, desc); 830 new->dir = NULL; 831 register_handler_proc(irq, new); 832 833 return 0; 834 835 mismatch: 836 #ifdef CONFIG_DEBUG_SHIRQ 837 if (!(new->flags & IRQF_PROBE_SHARED)) { 838 printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq); 839 if (old_name) 840 printk(KERN_ERR "current handler: %s\n", old_name); 841 dump_stack(); 842 } 843 #endif 844 ret = -EBUSY; 845 846 out_thread: 847 raw_spin_unlock_irqrestore(&desc->lock, flags); 848 if (new->thread) { 849 struct task_struct *t = new->thread; 850 851 new->thread = NULL; 852 if (likely(!test_bit(IRQTF_DIED, &new->thread_flags))) 853 kthread_stop(t); 854 put_task_struct(t); 855 } 856 return ret; 857 } 858 859 /** 860 * setup_irq - setup an interrupt 861 * @irq: Interrupt line to setup 862 * @act: irqaction for the interrupt 863 * 864 * Used to statically setup interrupts in the early boot process. 865 */ 866 int setup_irq(unsigned int irq, struct irqaction *act) 867 { 868 struct irq_desc *desc = irq_to_desc(irq); 869 870 return __setup_irq(irq, desc, act); 871 } 872 EXPORT_SYMBOL_GPL(setup_irq); 873 874 /* 875 * Internal function to unregister an irqaction - used to free 876 * regular and special interrupts that are part of the architecture. 877 */ 878 static struct irqaction *__free_irq(unsigned int irq, void *dev_id) 879 { 880 struct irq_desc *desc = irq_to_desc(irq); 881 struct irqaction *action, **action_ptr; 882 unsigned long flags; 883 884 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); 885 886 if (!desc) 887 return NULL; 888 889 raw_spin_lock_irqsave(&desc->lock, flags); 890 891 /* 892 * There can be multiple actions per IRQ descriptor, find the right 893 * one based on the dev_id: 894 */ 895 action_ptr = &desc->action; 896 for (;;) { 897 action = *action_ptr; 898 899 if (!action) { 900 WARN(1, "Trying to free already-free IRQ %d\n", irq); 901 raw_spin_unlock_irqrestore(&desc->lock, flags); 902 903 return NULL; 904 } 905 906 if (action->dev_id == dev_id) 907 break; 908 action_ptr = &action->next; 909 } 910 911 /* Found it - now remove it from the list of entries: */ 912 *action_ptr = action->next; 913 914 /* Currently used only by UML, might disappear one day: */ 915 #ifdef CONFIG_IRQ_RELEASE_METHOD 916 if (desc->irq_data.chip->release) 917 desc->irq_data.chip->release(irq, dev_id); 918 #endif 919 920 /* If this was the last handler, shut down the IRQ line: */ 921 if (!desc->action) { 922 desc->status |= IRQ_DISABLED; 923 if (desc->irq_data.chip->irq_shutdown) 924 desc->irq_data.chip->irq_shutdown(&desc->irq_data); 925 else 926 desc->irq_data.chip->irq_disable(&desc->irq_data); 927 } 928 929 #ifdef CONFIG_SMP 930 /* make sure affinity_hint is cleaned up */ 931 if (WARN_ON_ONCE(desc->affinity_hint)) 932 desc->affinity_hint = NULL; 933 #endif 934 935 raw_spin_unlock_irqrestore(&desc->lock, flags); 936 937 unregister_handler_proc(irq, action); 938 939 /* Make sure it's not being used on another CPU: */ 940 synchronize_irq(irq); 941 942 #ifdef CONFIG_DEBUG_SHIRQ 943 /* 944 * It's a shared IRQ -- the driver ought to be prepared for an IRQ 945 * event to happen even now it's being freed, so let's make sure that 946 * is so by doing an extra call to the handler .... 947 * 948 * ( We do this after actually deregistering it, to make sure that a 949 * 'real' IRQ doesn't run in * parallel with our fake. ) 950 */ 951 if (action->flags & IRQF_SHARED) { 952 local_irq_save(flags); 953 action->handler(irq, dev_id); 954 local_irq_restore(flags); 955 } 956 #endif 957 958 if (action->thread) { 959 if (!test_bit(IRQTF_DIED, &action->thread_flags)) 960 kthread_stop(action->thread); 961 put_task_struct(action->thread); 962 } 963 964 return action; 965 } 966 967 /** 968 * remove_irq - free an interrupt 969 * @irq: Interrupt line to free 970 * @act: irqaction for the interrupt 971 * 972 * Used to remove interrupts statically setup by the early boot process. 973 */ 974 void remove_irq(unsigned int irq, struct irqaction *act) 975 { 976 __free_irq(irq, act->dev_id); 977 } 978 EXPORT_SYMBOL_GPL(remove_irq); 979 980 /** 981 * free_irq - free an interrupt allocated with request_irq 982 * @irq: Interrupt line to free 983 * @dev_id: Device identity to free 984 * 985 * Remove an interrupt handler. The handler is removed and if the 986 * interrupt line is no longer in use by any driver it is disabled. 987 * On a shared IRQ the caller must ensure the interrupt is disabled 988 * on the card it drives before calling this function. The function 989 * does not return until any executing interrupts for this IRQ 990 * have completed. 991 * 992 * This function must not be called from interrupt context. 993 */ 994 void free_irq(unsigned int irq, void *dev_id) 995 { 996 struct irq_desc *desc = irq_to_desc(irq); 997 998 if (!desc) 999 return; 1000 1001 chip_bus_lock(desc); 1002 kfree(__free_irq(irq, dev_id)); 1003 chip_bus_sync_unlock(desc); 1004 } 1005 EXPORT_SYMBOL(free_irq); 1006 1007 /** 1008 * request_threaded_irq - allocate an interrupt line 1009 * @irq: Interrupt line to allocate 1010 * @handler: Function to be called when the IRQ occurs. 1011 * Primary handler for threaded interrupts 1012 * If NULL and thread_fn != NULL the default 1013 * primary handler is installed 1014 * @thread_fn: Function called from the irq handler thread 1015 * If NULL, no irq thread is created 1016 * @irqflags: Interrupt type flags 1017 * @devname: An ascii name for the claiming device 1018 * @dev_id: A cookie passed back to the handler function 1019 * 1020 * This call allocates interrupt resources and enables the 1021 * interrupt line and IRQ handling. From the point this 1022 * call is made your handler function may be invoked. Since 1023 * your handler function must clear any interrupt the board 1024 * raises, you must take care both to initialise your hardware 1025 * and to set up the interrupt handler in the right order. 1026 * 1027 * If you want to set up a threaded irq handler for your device 1028 * then you need to supply @handler and @thread_fn. @handler ist 1029 * still called in hard interrupt context and has to check 1030 * whether the interrupt originates from the device. If yes it 1031 * needs to disable the interrupt on the device and return 1032 * IRQ_WAKE_THREAD which will wake up the handler thread and run 1033 * @thread_fn. This split handler design is necessary to support 1034 * shared interrupts. 1035 * 1036 * Dev_id must be globally unique. Normally the address of the 1037 * device data structure is used as the cookie. Since the handler 1038 * receives this value it makes sense to use it. 1039 * 1040 * If your interrupt is shared you must pass a non NULL dev_id 1041 * as this is required when freeing the interrupt. 1042 * 1043 * Flags: 1044 * 1045 * IRQF_SHARED Interrupt is shared 1046 * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy 1047 * IRQF_TRIGGER_* Specify active edge(s) or level 1048 * 1049 */ 1050 int request_threaded_irq(unsigned int irq, irq_handler_t handler, 1051 irq_handler_t thread_fn, unsigned long irqflags, 1052 const char *devname, void *dev_id) 1053 { 1054 struct irqaction *action; 1055 struct irq_desc *desc; 1056 int retval; 1057 1058 /* 1059 * Sanity-check: shared interrupts must pass in a real dev-ID, 1060 * otherwise we'll have trouble later trying to figure out 1061 * which interrupt is which (messes up the interrupt freeing 1062 * logic etc). 1063 */ 1064 if ((irqflags & IRQF_SHARED) && !dev_id) 1065 return -EINVAL; 1066 1067 desc = irq_to_desc(irq); 1068 if (!desc) 1069 return -EINVAL; 1070 1071 if (desc->status & IRQ_NOREQUEST) 1072 return -EINVAL; 1073 1074 if (!handler) { 1075 if (!thread_fn) 1076 return -EINVAL; 1077 handler = irq_default_primary_handler; 1078 } 1079 1080 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); 1081 if (!action) 1082 return -ENOMEM; 1083 1084 action->handler = handler; 1085 action->thread_fn = thread_fn; 1086 action->flags = irqflags; 1087 action->name = devname; 1088 action->dev_id = dev_id; 1089 1090 chip_bus_lock(desc); 1091 retval = __setup_irq(irq, desc, action); 1092 chip_bus_sync_unlock(desc); 1093 1094 if (retval) 1095 kfree(action); 1096 1097 #ifdef CONFIG_DEBUG_SHIRQ 1098 if (!retval && (irqflags & IRQF_SHARED)) { 1099 /* 1100 * It's a shared IRQ -- the driver ought to be prepared for it 1101 * to happen immediately, so let's make sure.... 1102 * We disable the irq to make sure that a 'real' IRQ doesn't 1103 * run in parallel with our fake. 1104 */ 1105 unsigned long flags; 1106 1107 disable_irq(irq); 1108 local_irq_save(flags); 1109 1110 handler(irq, dev_id); 1111 1112 local_irq_restore(flags); 1113 enable_irq(irq); 1114 } 1115 #endif 1116 return retval; 1117 } 1118 EXPORT_SYMBOL(request_threaded_irq); 1119 1120 /** 1121 * request_any_context_irq - allocate an interrupt line 1122 * @irq: Interrupt line to allocate 1123 * @handler: Function to be called when the IRQ occurs. 1124 * Threaded handler for threaded interrupts. 1125 * @flags: Interrupt type flags 1126 * @name: An ascii name for the claiming device 1127 * @dev_id: A cookie passed back to the handler function 1128 * 1129 * This call allocates interrupt resources and enables the 1130 * interrupt line and IRQ handling. It selects either a 1131 * hardirq or threaded handling method depending on the 1132 * context. 1133 * 1134 * On failure, it returns a negative value. On success, 1135 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED. 1136 */ 1137 int request_any_context_irq(unsigned int irq, irq_handler_t handler, 1138 unsigned long flags, const char *name, void *dev_id) 1139 { 1140 struct irq_desc *desc = irq_to_desc(irq); 1141 int ret; 1142 1143 if (!desc) 1144 return -EINVAL; 1145 1146 if (desc->status & IRQ_NESTED_THREAD) { 1147 ret = request_threaded_irq(irq, NULL, handler, 1148 flags, name, dev_id); 1149 return !ret ? IRQC_IS_NESTED : ret; 1150 } 1151 1152 ret = request_irq(irq, handler, flags, name, dev_id); 1153 return !ret ? IRQC_IS_HARDIRQ : ret; 1154 } 1155 EXPORT_SYMBOL_GPL(request_any_context_irq); 1156