1 /* 2 * linux/kernel/irq/manage.c 3 * 4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 5 * Copyright (C) 2005-2006 Thomas Gleixner 6 * 7 * This file contains driver APIs to the irq subsystem. 8 */ 9 10 #define pr_fmt(fmt) "genirq: " fmt 11 12 #include <linux/irq.h> 13 #include <linux/kthread.h> 14 #include <linux/module.h> 15 #include <linux/random.h> 16 #include <linux/interrupt.h> 17 #include <linux/slab.h> 18 #include <linux/sched.h> 19 #include <linux/sched/rt.h> 20 #include <linux/task_work.h> 21 22 #include "internals.h" 23 24 #ifdef CONFIG_IRQ_FORCED_THREADING 25 __read_mostly bool force_irqthreads; 26 27 static int __init setup_forced_irqthreads(char *arg) 28 { 29 force_irqthreads = true; 30 return 0; 31 } 32 early_param("threadirqs", setup_forced_irqthreads); 33 #endif 34 35 static void __synchronize_hardirq(struct irq_desc *desc) 36 { 37 bool inprogress; 38 39 do { 40 unsigned long flags; 41 42 /* 43 * Wait until we're out of the critical section. This might 44 * give the wrong answer due to the lack of memory barriers. 45 */ 46 while (irqd_irq_inprogress(&desc->irq_data)) 47 cpu_relax(); 48 49 /* Ok, that indicated we're done: double-check carefully. */ 50 raw_spin_lock_irqsave(&desc->lock, flags); 51 inprogress = irqd_irq_inprogress(&desc->irq_data); 52 raw_spin_unlock_irqrestore(&desc->lock, flags); 53 54 /* Oops, that failed? */ 55 } while (inprogress); 56 } 57 58 /** 59 * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs) 60 * @irq: interrupt number to wait for 61 * 62 * This function waits for any pending hard IRQ handlers for this 63 * interrupt to complete before returning. If you use this 64 * function while holding a resource the IRQ handler may need you 65 * will deadlock. It does not take associated threaded handlers 66 * into account. 67 * 68 * Do not use this for shutdown scenarios where you must be sure 69 * that all parts (hardirq and threaded handler) have completed. 70 * 71 * This function may be called - with care - from IRQ context. 72 */ 73 void synchronize_hardirq(unsigned int irq) 74 { 75 struct irq_desc *desc = irq_to_desc(irq); 76 77 if (desc) 78 __synchronize_hardirq(desc); 79 } 80 EXPORT_SYMBOL(synchronize_hardirq); 81 82 /** 83 * synchronize_irq - wait for pending IRQ handlers (on other CPUs) 84 * @irq: interrupt number to wait for 85 * 86 * This function waits for any pending IRQ handlers for this interrupt 87 * to complete before returning. If you use this function while 88 * holding a resource the IRQ handler may need you will deadlock. 89 * 90 * This function may be called - with care - from IRQ context. 91 */ 92 void synchronize_irq(unsigned int irq) 93 { 94 struct irq_desc *desc = irq_to_desc(irq); 95 96 if (desc) { 97 __synchronize_hardirq(desc); 98 /* 99 * We made sure that no hardirq handler is 100 * running. Now verify that no threaded handlers are 101 * active. 102 */ 103 wait_event(desc->wait_for_threads, 104 !atomic_read(&desc->threads_active)); 105 } 106 } 107 EXPORT_SYMBOL(synchronize_irq); 108 109 #ifdef CONFIG_SMP 110 cpumask_var_t irq_default_affinity; 111 112 /** 113 * irq_can_set_affinity - Check if the affinity of a given irq can be set 114 * @irq: Interrupt to check 115 * 116 */ 117 int irq_can_set_affinity(unsigned int irq) 118 { 119 struct irq_desc *desc = irq_to_desc(irq); 120 121 if (!desc || !irqd_can_balance(&desc->irq_data) || 122 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) 123 return 0; 124 125 return 1; 126 } 127 128 /** 129 * irq_set_thread_affinity - Notify irq threads to adjust affinity 130 * @desc: irq descriptor which has affitnity changed 131 * 132 * We just set IRQTF_AFFINITY and delegate the affinity setting 133 * to the interrupt thread itself. We can not call 134 * set_cpus_allowed_ptr() here as we hold desc->lock and this 135 * code can be called from hard interrupt context. 136 */ 137 void irq_set_thread_affinity(struct irq_desc *desc) 138 { 139 struct irqaction *action = desc->action; 140 141 while (action) { 142 if (action->thread) 143 set_bit(IRQTF_AFFINITY, &action->thread_flags); 144 action = action->next; 145 } 146 } 147 148 #ifdef CONFIG_GENERIC_PENDING_IRQ 149 static inline bool irq_can_move_pcntxt(struct irq_data *data) 150 { 151 return irqd_can_move_in_process_context(data); 152 } 153 static inline bool irq_move_pending(struct irq_data *data) 154 { 155 return irqd_is_setaffinity_pending(data); 156 } 157 static inline void 158 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) 159 { 160 cpumask_copy(desc->pending_mask, mask); 161 } 162 static inline void 163 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) 164 { 165 cpumask_copy(mask, desc->pending_mask); 166 } 167 #else 168 static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; } 169 static inline bool irq_move_pending(struct irq_data *data) { return false; } 170 static inline void 171 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } 172 static inline void 173 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } 174 #endif 175 176 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, 177 bool force) 178 { 179 struct irq_desc *desc = irq_data_to_desc(data); 180 struct irq_chip *chip = irq_data_get_irq_chip(data); 181 int ret; 182 183 ret = chip->irq_set_affinity(data, mask, force); 184 switch (ret) { 185 case IRQ_SET_MASK_OK: 186 case IRQ_SET_MASK_OK_DONE: 187 cpumask_copy(data->affinity, mask); 188 case IRQ_SET_MASK_OK_NOCOPY: 189 irq_set_thread_affinity(desc); 190 ret = 0; 191 } 192 193 return ret; 194 } 195 196 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, 197 bool force) 198 { 199 struct irq_chip *chip = irq_data_get_irq_chip(data); 200 struct irq_desc *desc = irq_data_to_desc(data); 201 int ret = 0; 202 203 if (!chip || !chip->irq_set_affinity) 204 return -EINVAL; 205 206 if (irq_can_move_pcntxt(data)) { 207 ret = irq_do_set_affinity(data, mask, force); 208 } else { 209 irqd_set_move_pending(data); 210 irq_copy_pending(desc, mask); 211 } 212 213 if (desc->affinity_notify) { 214 kref_get(&desc->affinity_notify->kref); 215 schedule_work(&desc->affinity_notify->work); 216 } 217 irqd_set(data, IRQD_AFFINITY_SET); 218 219 return ret; 220 } 221 222 int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force) 223 { 224 struct irq_desc *desc = irq_to_desc(irq); 225 unsigned long flags; 226 int ret; 227 228 if (!desc) 229 return -EINVAL; 230 231 raw_spin_lock_irqsave(&desc->lock, flags); 232 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force); 233 raw_spin_unlock_irqrestore(&desc->lock, flags); 234 return ret; 235 } 236 237 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) 238 { 239 unsigned long flags; 240 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 241 242 if (!desc) 243 return -EINVAL; 244 desc->affinity_hint = m; 245 irq_put_desc_unlock(desc, flags); 246 /* set the initial affinity to prevent every interrupt being on CPU0 */ 247 if (m) 248 __irq_set_affinity(irq, m, false); 249 return 0; 250 } 251 EXPORT_SYMBOL_GPL(irq_set_affinity_hint); 252 253 static void irq_affinity_notify(struct work_struct *work) 254 { 255 struct irq_affinity_notify *notify = 256 container_of(work, struct irq_affinity_notify, work); 257 struct irq_desc *desc = irq_to_desc(notify->irq); 258 cpumask_var_t cpumask; 259 unsigned long flags; 260 261 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) 262 goto out; 263 264 raw_spin_lock_irqsave(&desc->lock, flags); 265 if (irq_move_pending(&desc->irq_data)) 266 irq_get_pending(cpumask, desc); 267 else 268 cpumask_copy(cpumask, desc->irq_data.affinity); 269 raw_spin_unlock_irqrestore(&desc->lock, flags); 270 271 notify->notify(notify, cpumask); 272 273 free_cpumask_var(cpumask); 274 out: 275 kref_put(¬ify->kref, notify->release); 276 } 277 278 /** 279 * irq_set_affinity_notifier - control notification of IRQ affinity changes 280 * @irq: Interrupt for which to enable/disable notification 281 * @notify: Context for notification, or %NULL to disable 282 * notification. Function pointers must be initialised; 283 * the other fields will be initialised by this function. 284 * 285 * Must be called in process context. Notification may only be enabled 286 * after the IRQ is allocated and must be disabled before the IRQ is 287 * freed using free_irq(). 288 */ 289 int 290 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) 291 { 292 struct irq_desc *desc = irq_to_desc(irq); 293 struct irq_affinity_notify *old_notify; 294 unsigned long flags; 295 296 /* The release function is promised process context */ 297 might_sleep(); 298 299 if (!desc) 300 return -EINVAL; 301 302 /* Complete initialisation of *notify */ 303 if (notify) { 304 notify->irq = irq; 305 kref_init(¬ify->kref); 306 INIT_WORK(¬ify->work, irq_affinity_notify); 307 } 308 309 raw_spin_lock_irqsave(&desc->lock, flags); 310 old_notify = desc->affinity_notify; 311 desc->affinity_notify = notify; 312 raw_spin_unlock_irqrestore(&desc->lock, flags); 313 314 if (old_notify) 315 kref_put(&old_notify->kref, old_notify->release); 316 317 return 0; 318 } 319 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); 320 321 #ifndef CONFIG_AUTO_IRQ_AFFINITY 322 /* 323 * Generic version of the affinity autoselector. 324 */ 325 static int 326 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) 327 { 328 struct cpumask *set = irq_default_affinity; 329 int node = desc->irq_data.node; 330 331 /* Excludes PER_CPU and NO_BALANCE interrupts */ 332 if (!irq_can_set_affinity(irq)) 333 return 0; 334 335 /* 336 * Preserve an userspace affinity setup, but make sure that 337 * one of the targets is online. 338 */ 339 if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { 340 if (cpumask_intersects(desc->irq_data.affinity, 341 cpu_online_mask)) 342 set = desc->irq_data.affinity; 343 else 344 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); 345 } 346 347 cpumask_and(mask, cpu_online_mask, set); 348 if (node != NUMA_NO_NODE) { 349 const struct cpumask *nodemask = cpumask_of_node(node); 350 351 /* make sure at least one of the cpus in nodemask is online */ 352 if (cpumask_intersects(mask, nodemask)) 353 cpumask_and(mask, mask, nodemask); 354 } 355 irq_do_set_affinity(&desc->irq_data, mask, false); 356 return 0; 357 } 358 #else 359 static inline int 360 setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask) 361 { 362 return irq_select_affinity(irq); 363 } 364 #endif 365 366 /* 367 * Called when affinity is set via /proc/irq 368 */ 369 int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask) 370 { 371 struct irq_desc *desc = irq_to_desc(irq); 372 unsigned long flags; 373 int ret; 374 375 raw_spin_lock_irqsave(&desc->lock, flags); 376 ret = setup_affinity(irq, desc, mask); 377 raw_spin_unlock_irqrestore(&desc->lock, flags); 378 return ret; 379 } 380 381 #else 382 static inline int 383 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) 384 { 385 return 0; 386 } 387 #endif 388 389 void __disable_irq(struct irq_desc *desc, unsigned int irq) 390 { 391 if (!desc->depth++) 392 irq_disable(desc); 393 } 394 395 static int __disable_irq_nosync(unsigned int irq) 396 { 397 unsigned long flags; 398 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 399 400 if (!desc) 401 return -EINVAL; 402 __disable_irq(desc, irq); 403 irq_put_desc_busunlock(desc, flags); 404 return 0; 405 } 406 407 /** 408 * disable_irq_nosync - disable an irq without waiting 409 * @irq: Interrupt to disable 410 * 411 * Disable the selected interrupt line. Disables and Enables are 412 * nested. 413 * Unlike disable_irq(), this function does not ensure existing 414 * instances of the IRQ handler have completed before returning. 415 * 416 * This function may be called from IRQ context. 417 */ 418 void disable_irq_nosync(unsigned int irq) 419 { 420 __disable_irq_nosync(irq); 421 } 422 EXPORT_SYMBOL(disable_irq_nosync); 423 424 /** 425 * disable_irq - disable an irq and wait for completion 426 * @irq: Interrupt to disable 427 * 428 * Disable the selected interrupt line. Enables and Disables are 429 * nested. 430 * This function waits for any pending IRQ handlers for this interrupt 431 * to complete before returning. If you use this function while 432 * holding a resource the IRQ handler may need you will deadlock. 433 * 434 * This function may be called - with care - from IRQ context. 435 */ 436 void disable_irq(unsigned int irq) 437 { 438 if (!__disable_irq_nosync(irq)) 439 synchronize_irq(irq); 440 } 441 EXPORT_SYMBOL(disable_irq); 442 443 void __enable_irq(struct irq_desc *desc, unsigned int irq) 444 { 445 switch (desc->depth) { 446 case 0: 447 err_out: 448 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); 449 break; 450 case 1: { 451 if (desc->istate & IRQS_SUSPENDED) 452 goto err_out; 453 /* Prevent probing on this irq: */ 454 irq_settings_set_noprobe(desc); 455 irq_enable(desc); 456 check_irq_resend(desc, irq); 457 /* fall-through */ 458 } 459 default: 460 desc->depth--; 461 } 462 } 463 464 /** 465 * enable_irq - enable handling of an irq 466 * @irq: Interrupt to enable 467 * 468 * Undoes the effect of one call to disable_irq(). If this 469 * matches the last disable, processing of interrupts on this 470 * IRQ line is re-enabled. 471 * 472 * This function may be called from IRQ context only when 473 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! 474 */ 475 void enable_irq(unsigned int irq) 476 { 477 unsigned long flags; 478 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 479 480 if (!desc) 481 return; 482 if (WARN(!desc->irq_data.chip, 483 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) 484 goto out; 485 486 __enable_irq(desc, irq); 487 out: 488 irq_put_desc_busunlock(desc, flags); 489 } 490 EXPORT_SYMBOL(enable_irq); 491 492 static int set_irq_wake_real(unsigned int irq, unsigned int on) 493 { 494 struct irq_desc *desc = irq_to_desc(irq); 495 int ret = -ENXIO; 496 497 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE) 498 return 0; 499 500 if (desc->irq_data.chip->irq_set_wake) 501 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); 502 503 return ret; 504 } 505 506 /** 507 * irq_set_irq_wake - control irq power management wakeup 508 * @irq: interrupt to control 509 * @on: enable/disable power management wakeup 510 * 511 * Enable/disable power management wakeup mode, which is 512 * disabled by default. Enables and disables must match, 513 * just as they match for non-wakeup mode support. 514 * 515 * Wakeup mode lets this IRQ wake the system from sleep 516 * states like "suspend to RAM". 517 */ 518 int irq_set_irq_wake(unsigned int irq, unsigned int on) 519 { 520 unsigned long flags; 521 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 522 int ret = 0; 523 524 if (!desc) 525 return -EINVAL; 526 527 /* wakeup-capable irqs can be shared between drivers that 528 * don't need to have the same sleep mode behaviors. 529 */ 530 if (on) { 531 if (desc->wake_depth++ == 0) { 532 ret = set_irq_wake_real(irq, on); 533 if (ret) 534 desc->wake_depth = 0; 535 else 536 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); 537 } 538 } else { 539 if (desc->wake_depth == 0) { 540 WARN(1, "Unbalanced IRQ %d wake disable\n", irq); 541 } else if (--desc->wake_depth == 0) { 542 ret = set_irq_wake_real(irq, on); 543 if (ret) 544 desc->wake_depth = 1; 545 else 546 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); 547 } 548 } 549 irq_put_desc_busunlock(desc, flags); 550 return ret; 551 } 552 EXPORT_SYMBOL(irq_set_irq_wake); 553 554 /* 555 * Internal function that tells the architecture code whether a 556 * particular irq has been exclusively allocated or is available 557 * for driver use. 558 */ 559 int can_request_irq(unsigned int irq, unsigned long irqflags) 560 { 561 unsigned long flags; 562 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 563 int canrequest = 0; 564 565 if (!desc) 566 return 0; 567 568 if (irq_settings_can_request(desc)) { 569 if (!desc->action || 570 irqflags & desc->action->flags & IRQF_SHARED) 571 canrequest = 1; 572 } 573 irq_put_desc_unlock(desc, flags); 574 return canrequest; 575 } 576 577 int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, 578 unsigned long flags) 579 { 580 struct irq_chip *chip = desc->irq_data.chip; 581 int ret, unmask = 0; 582 583 if (!chip || !chip->irq_set_type) { 584 /* 585 * IRQF_TRIGGER_* but the PIC does not support multiple 586 * flow-types? 587 */ 588 pr_debug("No set_type function for IRQ %d (%s)\n", irq, 589 chip ? (chip->name ? : "unknown") : "unknown"); 590 return 0; 591 } 592 593 flags &= IRQ_TYPE_SENSE_MASK; 594 595 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { 596 if (!irqd_irq_masked(&desc->irq_data)) 597 mask_irq(desc); 598 if (!irqd_irq_disabled(&desc->irq_data)) 599 unmask = 1; 600 } 601 602 /* caller masked out all except trigger mode flags */ 603 ret = chip->irq_set_type(&desc->irq_data, flags); 604 605 switch (ret) { 606 case IRQ_SET_MASK_OK: 607 case IRQ_SET_MASK_OK_DONE: 608 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); 609 irqd_set(&desc->irq_data, flags); 610 611 case IRQ_SET_MASK_OK_NOCOPY: 612 flags = irqd_get_trigger_type(&desc->irq_data); 613 irq_settings_set_trigger_mask(desc, flags); 614 irqd_clear(&desc->irq_data, IRQD_LEVEL); 615 irq_settings_clr_level(desc); 616 if (flags & IRQ_TYPE_LEVEL_MASK) { 617 irq_settings_set_level(desc); 618 irqd_set(&desc->irq_data, IRQD_LEVEL); 619 } 620 621 ret = 0; 622 break; 623 default: 624 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n", 625 flags, irq, chip->irq_set_type); 626 } 627 if (unmask) 628 unmask_irq(desc); 629 return ret; 630 } 631 632 #ifdef CONFIG_HARDIRQS_SW_RESEND 633 int irq_set_parent(int irq, int parent_irq) 634 { 635 unsigned long flags; 636 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 637 638 if (!desc) 639 return -EINVAL; 640 641 desc->parent_irq = parent_irq; 642 643 irq_put_desc_unlock(desc, flags); 644 return 0; 645 } 646 #endif 647 648 /* 649 * Default primary interrupt handler for threaded interrupts. Is 650 * assigned as primary handler when request_threaded_irq is called 651 * with handler == NULL. Useful for oneshot interrupts. 652 */ 653 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) 654 { 655 return IRQ_WAKE_THREAD; 656 } 657 658 /* 659 * Primary handler for nested threaded interrupts. Should never be 660 * called. 661 */ 662 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) 663 { 664 WARN(1, "Primary handler called for nested irq %d\n", irq); 665 return IRQ_NONE; 666 } 667 668 static int irq_wait_for_interrupt(struct irqaction *action) 669 { 670 set_current_state(TASK_INTERRUPTIBLE); 671 672 while (!kthread_should_stop()) { 673 674 if (test_and_clear_bit(IRQTF_RUNTHREAD, 675 &action->thread_flags)) { 676 __set_current_state(TASK_RUNNING); 677 return 0; 678 } 679 schedule(); 680 set_current_state(TASK_INTERRUPTIBLE); 681 } 682 __set_current_state(TASK_RUNNING); 683 return -1; 684 } 685 686 /* 687 * Oneshot interrupts keep the irq line masked until the threaded 688 * handler finished. unmask if the interrupt has not been disabled and 689 * is marked MASKED. 690 */ 691 static void irq_finalize_oneshot(struct irq_desc *desc, 692 struct irqaction *action) 693 { 694 if (!(desc->istate & IRQS_ONESHOT)) 695 return; 696 again: 697 chip_bus_lock(desc); 698 raw_spin_lock_irq(&desc->lock); 699 700 /* 701 * Implausible though it may be we need to protect us against 702 * the following scenario: 703 * 704 * The thread is faster done than the hard interrupt handler 705 * on the other CPU. If we unmask the irq line then the 706 * interrupt can come in again and masks the line, leaves due 707 * to IRQS_INPROGRESS and the irq line is masked forever. 708 * 709 * This also serializes the state of shared oneshot handlers 710 * versus "desc->threads_onehsot |= action->thread_mask;" in 711 * irq_wake_thread(). See the comment there which explains the 712 * serialization. 713 */ 714 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { 715 raw_spin_unlock_irq(&desc->lock); 716 chip_bus_sync_unlock(desc); 717 cpu_relax(); 718 goto again; 719 } 720 721 /* 722 * Now check again, whether the thread should run. Otherwise 723 * we would clear the threads_oneshot bit of this thread which 724 * was just set. 725 */ 726 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) 727 goto out_unlock; 728 729 desc->threads_oneshot &= ~action->thread_mask; 730 731 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && 732 irqd_irq_masked(&desc->irq_data)) 733 unmask_threaded_irq(desc); 734 735 out_unlock: 736 raw_spin_unlock_irq(&desc->lock); 737 chip_bus_sync_unlock(desc); 738 } 739 740 #ifdef CONFIG_SMP 741 /* 742 * Check whether we need to change the affinity of the interrupt thread. 743 */ 744 static void 745 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) 746 { 747 cpumask_var_t mask; 748 bool valid = true; 749 750 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) 751 return; 752 753 /* 754 * In case we are out of memory we set IRQTF_AFFINITY again and 755 * try again next time 756 */ 757 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { 758 set_bit(IRQTF_AFFINITY, &action->thread_flags); 759 return; 760 } 761 762 raw_spin_lock_irq(&desc->lock); 763 /* 764 * This code is triggered unconditionally. Check the affinity 765 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. 766 */ 767 if (desc->irq_data.affinity) 768 cpumask_copy(mask, desc->irq_data.affinity); 769 else 770 valid = false; 771 raw_spin_unlock_irq(&desc->lock); 772 773 if (valid) 774 set_cpus_allowed_ptr(current, mask); 775 free_cpumask_var(mask); 776 } 777 #else 778 static inline void 779 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } 780 #endif 781 782 /* 783 * Interrupts which are not explicitely requested as threaded 784 * interrupts rely on the implicit bh/preempt disable of the hard irq 785 * context. So we need to disable bh here to avoid deadlocks and other 786 * side effects. 787 */ 788 static irqreturn_t 789 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) 790 { 791 irqreturn_t ret; 792 793 local_bh_disable(); 794 ret = action->thread_fn(action->irq, action->dev_id); 795 irq_finalize_oneshot(desc, action); 796 local_bh_enable(); 797 return ret; 798 } 799 800 /* 801 * Interrupts explicitly requested as threaded interrupts want to be 802 * preemtible - many of them need to sleep and wait for slow busses to 803 * complete. 804 */ 805 static irqreturn_t irq_thread_fn(struct irq_desc *desc, 806 struct irqaction *action) 807 { 808 irqreturn_t ret; 809 810 ret = action->thread_fn(action->irq, action->dev_id); 811 irq_finalize_oneshot(desc, action); 812 return ret; 813 } 814 815 static void wake_threads_waitq(struct irq_desc *desc) 816 { 817 if (atomic_dec_and_test(&desc->threads_active)) 818 wake_up(&desc->wait_for_threads); 819 } 820 821 static void irq_thread_dtor(struct callback_head *unused) 822 { 823 struct task_struct *tsk = current; 824 struct irq_desc *desc; 825 struct irqaction *action; 826 827 if (WARN_ON_ONCE(!(current->flags & PF_EXITING))) 828 return; 829 830 action = kthread_data(tsk); 831 832 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", 833 tsk->comm, tsk->pid, action->irq); 834 835 836 desc = irq_to_desc(action->irq); 837 /* 838 * If IRQTF_RUNTHREAD is set, we need to decrement 839 * desc->threads_active and wake possible waiters. 840 */ 841 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) 842 wake_threads_waitq(desc); 843 844 /* Prevent a stale desc->threads_oneshot */ 845 irq_finalize_oneshot(desc, action); 846 } 847 848 /* 849 * Interrupt handler thread 850 */ 851 static int irq_thread(void *data) 852 { 853 struct callback_head on_exit_work; 854 struct irqaction *action = data; 855 struct irq_desc *desc = irq_to_desc(action->irq); 856 irqreturn_t (*handler_fn)(struct irq_desc *desc, 857 struct irqaction *action); 858 859 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD, 860 &action->thread_flags)) 861 handler_fn = irq_forced_thread_fn; 862 else 863 handler_fn = irq_thread_fn; 864 865 init_task_work(&on_exit_work, irq_thread_dtor); 866 task_work_add(current, &on_exit_work, false); 867 868 irq_thread_check_affinity(desc, action); 869 870 while (!irq_wait_for_interrupt(action)) { 871 irqreturn_t action_ret; 872 873 irq_thread_check_affinity(desc, action); 874 875 action_ret = handler_fn(desc, action); 876 if (action_ret == IRQ_HANDLED) 877 atomic_inc(&desc->threads_handled); 878 879 wake_threads_waitq(desc); 880 } 881 882 /* 883 * This is the regular exit path. __free_irq() is stopping the 884 * thread via kthread_stop() after calling 885 * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the 886 * oneshot mask bit can be set. We cannot verify that as we 887 * cannot touch the oneshot mask at this point anymore as 888 * __setup_irq() might have given out currents thread_mask 889 * again. 890 */ 891 task_work_cancel(current, irq_thread_dtor); 892 return 0; 893 } 894 895 /** 896 * irq_wake_thread - wake the irq thread for the action identified by dev_id 897 * @irq: Interrupt line 898 * @dev_id: Device identity for which the thread should be woken 899 * 900 */ 901 void irq_wake_thread(unsigned int irq, void *dev_id) 902 { 903 struct irq_desc *desc = irq_to_desc(irq); 904 struct irqaction *action; 905 unsigned long flags; 906 907 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) 908 return; 909 910 raw_spin_lock_irqsave(&desc->lock, flags); 911 for (action = desc->action; action; action = action->next) { 912 if (action->dev_id == dev_id) { 913 if (action->thread) 914 __irq_wake_thread(desc, action); 915 break; 916 } 917 } 918 raw_spin_unlock_irqrestore(&desc->lock, flags); 919 } 920 EXPORT_SYMBOL_GPL(irq_wake_thread); 921 922 static void irq_setup_forced_threading(struct irqaction *new) 923 { 924 if (!force_irqthreads) 925 return; 926 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) 927 return; 928 929 new->flags |= IRQF_ONESHOT; 930 931 if (!new->thread_fn) { 932 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags); 933 new->thread_fn = new->handler; 934 new->handler = irq_default_primary_handler; 935 } 936 } 937 938 static int irq_request_resources(struct irq_desc *desc) 939 { 940 struct irq_data *d = &desc->irq_data; 941 struct irq_chip *c = d->chip; 942 943 return c->irq_request_resources ? c->irq_request_resources(d) : 0; 944 } 945 946 static void irq_release_resources(struct irq_desc *desc) 947 { 948 struct irq_data *d = &desc->irq_data; 949 struct irq_chip *c = d->chip; 950 951 if (c->irq_release_resources) 952 c->irq_release_resources(d); 953 } 954 955 /* 956 * Internal function to register an irqaction - typically used to 957 * allocate special interrupts that are part of the architecture. 958 */ 959 static int 960 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) 961 { 962 struct irqaction *old, **old_ptr; 963 unsigned long flags, thread_mask = 0; 964 int ret, nested, shared = 0; 965 cpumask_var_t mask; 966 967 if (!desc) 968 return -EINVAL; 969 970 if (desc->irq_data.chip == &no_irq_chip) 971 return -ENOSYS; 972 if (!try_module_get(desc->owner)) 973 return -ENODEV; 974 975 /* 976 * Check whether the interrupt nests into another interrupt 977 * thread. 978 */ 979 nested = irq_settings_is_nested_thread(desc); 980 if (nested) { 981 if (!new->thread_fn) { 982 ret = -EINVAL; 983 goto out_mput; 984 } 985 /* 986 * Replace the primary handler which was provided from 987 * the driver for non nested interrupt handling by the 988 * dummy function which warns when called. 989 */ 990 new->handler = irq_nested_primary_handler; 991 } else { 992 if (irq_settings_can_thread(desc)) 993 irq_setup_forced_threading(new); 994 } 995 996 /* 997 * Create a handler thread when a thread function is supplied 998 * and the interrupt does not nest into another interrupt 999 * thread. 1000 */ 1001 if (new->thread_fn && !nested) { 1002 struct task_struct *t; 1003 static const struct sched_param param = { 1004 .sched_priority = MAX_USER_RT_PRIO/2, 1005 }; 1006 1007 t = kthread_create(irq_thread, new, "irq/%d-%s", irq, 1008 new->name); 1009 if (IS_ERR(t)) { 1010 ret = PTR_ERR(t); 1011 goto out_mput; 1012 } 1013 1014 sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m); 1015 1016 /* 1017 * We keep the reference to the task struct even if 1018 * the thread dies to avoid that the interrupt code 1019 * references an already freed task_struct. 1020 */ 1021 get_task_struct(t); 1022 new->thread = t; 1023 /* 1024 * Tell the thread to set its affinity. This is 1025 * important for shared interrupt handlers as we do 1026 * not invoke setup_affinity() for the secondary 1027 * handlers as everything is already set up. Even for 1028 * interrupts marked with IRQF_NO_BALANCE this is 1029 * correct as we want the thread to move to the cpu(s) 1030 * on which the requesting code placed the interrupt. 1031 */ 1032 set_bit(IRQTF_AFFINITY, &new->thread_flags); 1033 } 1034 1035 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { 1036 ret = -ENOMEM; 1037 goto out_thread; 1038 } 1039 1040 /* 1041 * Drivers are often written to work w/o knowledge about the 1042 * underlying irq chip implementation, so a request for a 1043 * threaded irq without a primary hard irq context handler 1044 * requires the ONESHOT flag to be set. Some irq chips like 1045 * MSI based interrupts are per se one shot safe. Check the 1046 * chip flags, so we can avoid the unmask dance at the end of 1047 * the threaded handler for those. 1048 */ 1049 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE) 1050 new->flags &= ~IRQF_ONESHOT; 1051 1052 /* 1053 * The following block of code has to be executed atomically 1054 */ 1055 raw_spin_lock_irqsave(&desc->lock, flags); 1056 old_ptr = &desc->action; 1057 old = *old_ptr; 1058 if (old) { 1059 /* 1060 * Can't share interrupts unless both agree to and are 1061 * the same type (level, edge, polarity). So both flag 1062 * fields must have IRQF_SHARED set and the bits which 1063 * set the trigger type must match. Also all must 1064 * agree on ONESHOT. 1065 */ 1066 if (!((old->flags & new->flags) & IRQF_SHARED) || 1067 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) || 1068 ((old->flags ^ new->flags) & IRQF_ONESHOT)) 1069 goto mismatch; 1070 1071 /* All handlers must agree on per-cpuness */ 1072 if ((old->flags & IRQF_PERCPU) != 1073 (new->flags & IRQF_PERCPU)) 1074 goto mismatch; 1075 1076 /* add new interrupt at end of irq queue */ 1077 do { 1078 /* 1079 * Or all existing action->thread_mask bits, 1080 * so we can find the next zero bit for this 1081 * new action. 1082 */ 1083 thread_mask |= old->thread_mask; 1084 old_ptr = &old->next; 1085 old = *old_ptr; 1086 } while (old); 1087 shared = 1; 1088 } 1089 1090 /* 1091 * Setup the thread mask for this irqaction for ONESHOT. For 1092 * !ONESHOT irqs the thread mask is 0 so we can avoid a 1093 * conditional in irq_wake_thread(). 1094 */ 1095 if (new->flags & IRQF_ONESHOT) { 1096 /* 1097 * Unlikely to have 32 resp 64 irqs sharing one line, 1098 * but who knows. 1099 */ 1100 if (thread_mask == ~0UL) { 1101 ret = -EBUSY; 1102 goto out_mask; 1103 } 1104 /* 1105 * The thread_mask for the action is or'ed to 1106 * desc->thread_active to indicate that the 1107 * IRQF_ONESHOT thread handler has been woken, but not 1108 * yet finished. The bit is cleared when a thread 1109 * completes. When all threads of a shared interrupt 1110 * line have completed desc->threads_active becomes 1111 * zero and the interrupt line is unmasked. See 1112 * handle.c:irq_wake_thread() for further information. 1113 * 1114 * If no thread is woken by primary (hard irq context) 1115 * interrupt handlers, then desc->threads_active is 1116 * also checked for zero to unmask the irq line in the 1117 * affected hard irq flow handlers 1118 * (handle_[fasteoi|level]_irq). 1119 * 1120 * The new action gets the first zero bit of 1121 * thread_mask assigned. See the loop above which or's 1122 * all existing action->thread_mask bits. 1123 */ 1124 new->thread_mask = 1 << ffz(thread_mask); 1125 1126 } else if (new->handler == irq_default_primary_handler && 1127 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) { 1128 /* 1129 * The interrupt was requested with handler = NULL, so 1130 * we use the default primary handler for it. But it 1131 * does not have the oneshot flag set. In combination 1132 * with level interrupts this is deadly, because the 1133 * default primary handler just wakes the thread, then 1134 * the irq lines is reenabled, but the device still 1135 * has the level irq asserted. Rinse and repeat.... 1136 * 1137 * While this works for edge type interrupts, we play 1138 * it safe and reject unconditionally because we can't 1139 * say for sure which type this interrupt really 1140 * has. The type flags are unreliable as the 1141 * underlying chip implementation can override them. 1142 */ 1143 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n", 1144 irq); 1145 ret = -EINVAL; 1146 goto out_mask; 1147 } 1148 1149 if (!shared) { 1150 ret = irq_request_resources(desc); 1151 if (ret) { 1152 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n", 1153 new->name, irq, desc->irq_data.chip->name); 1154 goto out_mask; 1155 } 1156 1157 init_waitqueue_head(&desc->wait_for_threads); 1158 1159 /* Setup the type (level, edge polarity) if configured: */ 1160 if (new->flags & IRQF_TRIGGER_MASK) { 1161 ret = __irq_set_trigger(desc, irq, 1162 new->flags & IRQF_TRIGGER_MASK); 1163 1164 if (ret) 1165 goto out_mask; 1166 } 1167 1168 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ 1169 IRQS_ONESHOT | IRQS_WAITING); 1170 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 1171 1172 if (new->flags & IRQF_PERCPU) { 1173 irqd_set(&desc->irq_data, IRQD_PER_CPU); 1174 irq_settings_set_per_cpu(desc); 1175 } 1176 1177 if (new->flags & IRQF_ONESHOT) 1178 desc->istate |= IRQS_ONESHOT; 1179 1180 if (irq_settings_can_autoenable(desc)) 1181 irq_startup(desc, true); 1182 else 1183 /* Undo nested disables: */ 1184 desc->depth = 1; 1185 1186 /* Exclude IRQ from balancing if requested */ 1187 if (new->flags & IRQF_NOBALANCING) { 1188 irq_settings_set_no_balancing(desc); 1189 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); 1190 } 1191 1192 /* Set default affinity mask once everything is setup */ 1193 setup_affinity(irq, desc, mask); 1194 1195 } else if (new->flags & IRQF_TRIGGER_MASK) { 1196 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; 1197 unsigned int omsk = irq_settings_get_trigger_mask(desc); 1198 1199 if (nmsk != omsk) 1200 /* hope the handler works with current trigger mode */ 1201 pr_warning("irq %d uses trigger mode %u; requested %u\n", 1202 irq, nmsk, omsk); 1203 } 1204 1205 new->irq = irq; 1206 *old_ptr = new; 1207 1208 irq_pm_install_action(desc, new); 1209 1210 /* Reset broken irq detection when installing new handler */ 1211 desc->irq_count = 0; 1212 desc->irqs_unhandled = 0; 1213 1214 /* 1215 * Check whether we disabled the irq via the spurious handler 1216 * before. Reenable it and give it another chance. 1217 */ 1218 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { 1219 desc->istate &= ~IRQS_SPURIOUS_DISABLED; 1220 __enable_irq(desc, irq); 1221 } 1222 1223 raw_spin_unlock_irqrestore(&desc->lock, flags); 1224 1225 /* 1226 * Strictly no need to wake it up, but hung_task complains 1227 * when no hard interrupt wakes the thread up. 1228 */ 1229 if (new->thread) 1230 wake_up_process(new->thread); 1231 1232 register_irq_proc(irq, desc); 1233 new->dir = NULL; 1234 register_handler_proc(irq, new); 1235 free_cpumask_var(mask); 1236 1237 return 0; 1238 1239 mismatch: 1240 if (!(new->flags & IRQF_PROBE_SHARED)) { 1241 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n", 1242 irq, new->flags, new->name, old->flags, old->name); 1243 #ifdef CONFIG_DEBUG_SHIRQ 1244 dump_stack(); 1245 #endif 1246 } 1247 ret = -EBUSY; 1248 1249 out_mask: 1250 raw_spin_unlock_irqrestore(&desc->lock, flags); 1251 free_cpumask_var(mask); 1252 1253 out_thread: 1254 if (new->thread) { 1255 struct task_struct *t = new->thread; 1256 1257 new->thread = NULL; 1258 kthread_stop(t); 1259 put_task_struct(t); 1260 } 1261 out_mput: 1262 module_put(desc->owner); 1263 return ret; 1264 } 1265 1266 /** 1267 * setup_irq - setup an interrupt 1268 * @irq: Interrupt line to setup 1269 * @act: irqaction for the interrupt 1270 * 1271 * Used to statically setup interrupts in the early boot process. 1272 */ 1273 int setup_irq(unsigned int irq, struct irqaction *act) 1274 { 1275 int retval; 1276 struct irq_desc *desc = irq_to_desc(irq); 1277 1278 if (WARN_ON(irq_settings_is_per_cpu_devid(desc))) 1279 return -EINVAL; 1280 chip_bus_lock(desc); 1281 retval = __setup_irq(irq, desc, act); 1282 chip_bus_sync_unlock(desc); 1283 1284 return retval; 1285 } 1286 EXPORT_SYMBOL_GPL(setup_irq); 1287 1288 /* 1289 * Internal function to unregister an irqaction - used to free 1290 * regular and special interrupts that are part of the architecture. 1291 */ 1292 static struct irqaction *__free_irq(unsigned int irq, void *dev_id) 1293 { 1294 struct irq_desc *desc = irq_to_desc(irq); 1295 struct irqaction *action, **action_ptr; 1296 unsigned long flags; 1297 1298 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); 1299 1300 if (!desc) 1301 return NULL; 1302 1303 raw_spin_lock_irqsave(&desc->lock, flags); 1304 1305 /* 1306 * There can be multiple actions per IRQ descriptor, find the right 1307 * one based on the dev_id: 1308 */ 1309 action_ptr = &desc->action; 1310 for (;;) { 1311 action = *action_ptr; 1312 1313 if (!action) { 1314 WARN(1, "Trying to free already-free IRQ %d\n", irq); 1315 raw_spin_unlock_irqrestore(&desc->lock, flags); 1316 1317 return NULL; 1318 } 1319 1320 if (action->dev_id == dev_id) 1321 break; 1322 action_ptr = &action->next; 1323 } 1324 1325 /* Found it - now remove it from the list of entries: */ 1326 *action_ptr = action->next; 1327 1328 irq_pm_remove_action(desc, action); 1329 1330 /* If this was the last handler, shut down the IRQ line: */ 1331 if (!desc->action) { 1332 irq_shutdown(desc); 1333 irq_release_resources(desc); 1334 } 1335 1336 #ifdef CONFIG_SMP 1337 /* make sure affinity_hint is cleaned up */ 1338 if (WARN_ON_ONCE(desc->affinity_hint)) 1339 desc->affinity_hint = NULL; 1340 #endif 1341 1342 raw_spin_unlock_irqrestore(&desc->lock, flags); 1343 1344 unregister_handler_proc(irq, action); 1345 1346 /* Make sure it's not being used on another CPU: */ 1347 synchronize_irq(irq); 1348 1349 #ifdef CONFIG_DEBUG_SHIRQ 1350 /* 1351 * It's a shared IRQ -- the driver ought to be prepared for an IRQ 1352 * event to happen even now it's being freed, so let's make sure that 1353 * is so by doing an extra call to the handler .... 1354 * 1355 * ( We do this after actually deregistering it, to make sure that a 1356 * 'real' IRQ doesn't run in * parallel with our fake. ) 1357 */ 1358 if (action->flags & IRQF_SHARED) { 1359 local_irq_save(flags); 1360 action->handler(irq, dev_id); 1361 local_irq_restore(flags); 1362 } 1363 #endif 1364 1365 if (action->thread) { 1366 kthread_stop(action->thread); 1367 put_task_struct(action->thread); 1368 } 1369 1370 module_put(desc->owner); 1371 return action; 1372 } 1373 1374 /** 1375 * remove_irq - free an interrupt 1376 * @irq: Interrupt line to free 1377 * @act: irqaction for the interrupt 1378 * 1379 * Used to remove interrupts statically setup by the early boot process. 1380 */ 1381 void remove_irq(unsigned int irq, struct irqaction *act) 1382 { 1383 struct irq_desc *desc = irq_to_desc(irq); 1384 1385 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc))) 1386 __free_irq(irq, act->dev_id); 1387 } 1388 EXPORT_SYMBOL_GPL(remove_irq); 1389 1390 /** 1391 * free_irq - free an interrupt allocated with request_irq 1392 * @irq: Interrupt line to free 1393 * @dev_id: Device identity to free 1394 * 1395 * Remove an interrupt handler. The handler is removed and if the 1396 * interrupt line is no longer in use by any driver it is disabled. 1397 * On a shared IRQ the caller must ensure the interrupt is disabled 1398 * on the card it drives before calling this function. The function 1399 * does not return until any executing interrupts for this IRQ 1400 * have completed. 1401 * 1402 * This function must not be called from interrupt context. 1403 */ 1404 void free_irq(unsigned int irq, void *dev_id) 1405 { 1406 struct irq_desc *desc = irq_to_desc(irq); 1407 1408 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) 1409 return; 1410 1411 #ifdef CONFIG_SMP 1412 if (WARN_ON(desc->affinity_notify)) 1413 desc->affinity_notify = NULL; 1414 #endif 1415 1416 chip_bus_lock(desc); 1417 kfree(__free_irq(irq, dev_id)); 1418 chip_bus_sync_unlock(desc); 1419 } 1420 EXPORT_SYMBOL(free_irq); 1421 1422 /** 1423 * request_threaded_irq - allocate an interrupt line 1424 * @irq: Interrupt line to allocate 1425 * @handler: Function to be called when the IRQ occurs. 1426 * Primary handler for threaded interrupts 1427 * If NULL and thread_fn != NULL the default 1428 * primary handler is installed 1429 * @thread_fn: Function called from the irq handler thread 1430 * If NULL, no irq thread is created 1431 * @irqflags: Interrupt type flags 1432 * @devname: An ascii name for the claiming device 1433 * @dev_id: A cookie passed back to the handler function 1434 * 1435 * This call allocates interrupt resources and enables the 1436 * interrupt line and IRQ handling. From the point this 1437 * call is made your handler function may be invoked. Since 1438 * your handler function must clear any interrupt the board 1439 * raises, you must take care both to initialise your hardware 1440 * and to set up the interrupt handler in the right order. 1441 * 1442 * If you want to set up a threaded irq handler for your device 1443 * then you need to supply @handler and @thread_fn. @handler is 1444 * still called in hard interrupt context and has to check 1445 * whether the interrupt originates from the device. If yes it 1446 * needs to disable the interrupt on the device and return 1447 * IRQ_WAKE_THREAD which will wake up the handler thread and run 1448 * @thread_fn. This split handler design is necessary to support 1449 * shared interrupts. 1450 * 1451 * Dev_id must be globally unique. Normally the address of the 1452 * device data structure is used as the cookie. Since the handler 1453 * receives this value it makes sense to use it. 1454 * 1455 * If your interrupt is shared you must pass a non NULL dev_id 1456 * as this is required when freeing the interrupt. 1457 * 1458 * Flags: 1459 * 1460 * IRQF_SHARED Interrupt is shared 1461 * IRQF_TRIGGER_* Specify active edge(s) or level 1462 * 1463 */ 1464 int request_threaded_irq(unsigned int irq, irq_handler_t handler, 1465 irq_handler_t thread_fn, unsigned long irqflags, 1466 const char *devname, void *dev_id) 1467 { 1468 struct irqaction *action; 1469 struct irq_desc *desc; 1470 int retval; 1471 1472 /* 1473 * Sanity-check: shared interrupts must pass in a real dev-ID, 1474 * otherwise we'll have trouble later trying to figure out 1475 * which interrupt is which (messes up the interrupt freeing 1476 * logic etc). 1477 */ 1478 if ((irqflags & IRQF_SHARED) && !dev_id) 1479 return -EINVAL; 1480 1481 desc = irq_to_desc(irq); 1482 if (!desc) 1483 return -EINVAL; 1484 1485 if (!irq_settings_can_request(desc) || 1486 WARN_ON(irq_settings_is_per_cpu_devid(desc))) 1487 return -EINVAL; 1488 1489 if (!handler) { 1490 if (!thread_fn) 1491 return -EINVAL; 1492 handler = irq_default_primary_handler; 1493 } 1494 1495 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); 1496 if (!action) 1497 return -ENOMEM; 1498 1499 action->handler = handler; 1500 action->thread_fn = thread_fn; 1501 action->flags = irqflags; 1502 action->name = devname; 1503 action->dev_id = dev_id; 1504 1505 chip_bus_lock(desc); 1506 retval = __setup_irq(irq, desc, action); 1507 chip_bus_sync_unlock(desc); 1508 1509 if (retval) 1510 kfree(action); 1511 1512 #ifdef CONFIG_DEBUG_SHIRQ_FIXME 1513 if (!retval && (irqflags & IRQF_SHARED)) { 1514 /* 1515 * It's a shared IRQ -- the driver ought to be prepared for it 1516 * to happen immediately, so let's make sure.... 1517 * We disable the irq to make sure that a 'real' IRQ doesn't 1518 * run in parallel with our fake. 1519 */ 1520 unsigned long flags; 1521 1522 disable_irq(irq); 1523 local_irq_save(flags); 1524 1525 handler(irq, dev_id); 1526 1527 local_irq_restore(flags); 1528 enable_irq(irq); 1529 } 1530 #endif 1531 return retval; 1532 } 1533 EXPORT_SYMBOL(request_threaded_irq); 1534 1535 /** 1536 * request_any_context_irq - allocate an interrupt line 1537 * @irq: Interrupt line to allocate 1538 * @handler: Function to be called when the IRQ occurs. 1539 * Threaded handler for threaded interrupts. 1540 * @flags: Interrupt type flags 1541 * @name: An ascii name for the claiming device 1542 * @dev_id: A cookie passed back to the handler function 1543 * 1544 * This call allocates interrupt resources and enables the 1545 * interrupt line and IRQ handling. It selects either a 1546 * hardirq or threaded handling method depending on the 1547 * context. 1548 * 1549 * On failure, it returns a negative value. On success, 1550 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED. 1551 */ 1552 int request_any_context_irq(unsigned int irq, irq_handler_t handler, 1553 unsigned long flags, const char *name, void *dev_id) 1554 { 1555 struct irq_desc *desc = irq_to_desc(irq); 1556 int ret; 1557 1558 if (!desc) 1559 return -EINVAL; 1560 1561 if (irq_settings_is_nested_thread(desc)) { 1562 ret = request_threaded_irq(irq, NULL, handler, 1563 flags, name, dev_id); 1564 return !ret ? IRQC_IS_NESTED : ret; 1565 } 1566 1567 ret = request_irq(irq, handler, flags, name, dev_id); 1568 return !ret ? IRQC_IS_HARDIRQ : ret; 1569 } 1570 EXPORT_SYMBOL_GPL(request_any_context_irq); 1571 1572 void enable_percpu_irq(unsigned int irq, unsigned int type) 1573 { 1574 unsigned int cpu = smp_processor_id(); 1575 unsigned long flags; 1576 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); 1577 1578 if (!desc) 1579 return; 1580 1581 type &= IRQ_TYPE_SENSE_MASK; 1582 if (type != IRQ_TYPE_NONE) { 1583 int ret; 1584 1585 ret = __irq_set_trigger(desc, irq, type); 1586 1587 if (ret) { 1588 WARN(1, "failed to set type for IRQ%d\n", irq); 1589 goto out; 1590 } 1591 } 1592 1593 irq_percpu_enable(desc, cpu); 1594 out: 1595 irq_put_desc_unlock(desc, flags); 1596 } 1597 EXPORT_SYMBOL_GPL(enable_percpu_irq); 1598 1599 void disable_percpu_irq(unsigned int irq) 1600 { 1601 unsigned int cpu = smp_processor_id(); 1602 unsigned long flags; 1603 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); 1604 1605 if (!desc) 1606 return; 1607 1608 irq_percpu_disable(desc, cpu); 1609 irq_put_desc_unlock(desc, flags); 1610 } 1611 EXPORT_SYMBOL_GPL(disable_percpu_irq); 1612 1613 /* 1614 * Internal function to unregister a percpu irqaction. 1615 */ 1616 static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id) 1617 { 1618 struct irq_desc *desc = irq_to_desc(irq); 1619 struct irqaction *action; 1620 unsigned long flags; 1621 1622 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); 1623 1624 if (!desc) 1625 return NULL; 1626 1627 raw_spin_lock_irqsave(&desc->lock, flags); 1628 1629 action = desc->action; 1630 if (!action || action->percpu_dev_id != dev_id) { 1631 WARN(1, "Trying to free already-free IRQ %d\n", irq); 1632 goto bad; 1633 } 1634 1635 if (!cpumask_empty(desc->percpu_enabled)) { 1636 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n", 1637 irq, cpumask_first(desc->percpu_enabled)); 1638 goto bad; 1639 } 1640 1641 /* Found it - now remove it from the list of entries: */ 1642 desc->action = NULL; 1643 1644 raw_spin_unlock_irqrestore(&desc->lock, flags); 1645 1646 unregister_handler_proc(irq, action); 1647 1648 module_put(desc->owner); 1649 return action; 1650 1651 bad: 1652 raw_spin_unlock_irqrestore(&desc->lock, flags); 1653 return NULL; 1654 } 1655 1656 /** 1657 * remove_percpu_irq - free a per-cpu interrupt 1658 * @irq: Interrupt line to free 1659 * @act: irqaction for the interrupt 1660 * 1661 * Used to remove interrupts statically setup by the early boot process. 1662 */ 1663 void remove_percpu_irq(unsigned int irq, struct irqaction *act) 1664 { 1665 struct irq_desc *desc = irq_to_desc(irq); 1666 1667 if (desc && irq_settings_is_per_cpu_devid(desc)) 1668 __free_percpu_irq(irq, act->percpu_dev_id); 1669 } 1670 1671 /** 1672 * free_percpu_irq - free an interrupt allocated with request_percpu_irq 1673 * @irq: Interrupt line to free 1674 * @dev_id: Device identity to free 1675 * 1676 * Remove a percpu interrupt handler. The handler is removed, but 1677 * the interrupt line is not disabled. This must be done on each 1678 * CPU before calling this function. The function does not return 1679 * until any executing interrupts for this IRQ have completed. 1680 * 1681 * This function must not be called from interrupt context. 1682 */ 1683 void free_percpu_irq(unsigned int irq, void __percpu *dev_id) 1684 { 1685 struct irq_desc *desc = irq_to_desc(irq); 1686 1687 if (!desc || !irq_settings_is_per_cpu_devid(desc)) 1688 return; 1689 1690 chip_bus_lock(desc); 1691 kfree(__free_percpu_irq(irq, dev_id)); 1692 chip_bus_sync_unlock(desc); 1693 } 1694 1695 /** 1696 * setup_percpu_irq - setup a per-cpu interrupt 1697 * @irq: Interrupt line to setup 1698 * @act: irqaction for the interrupt 1699 * 1700 * Used to statically setup per-cpu interrupts in the early boot process. 1701 */ 1702 int setup_percpu_irq(unsigned int irq, struct irqaction *act) 1703 { 1704 struct irq_desc *desc = irq_to_desc(irq); 1705 int retval; 1706 1707 if (!desc || !irq_settings_is_per_cpu_devid(desc)) 1708 return -EINVAL; 1709 chip_bus_lock(desc); 1710 retval = __setup_irq(irq, desc, act); 1711 chip_bus_sync_unlock(desc); 1712 1713 return retval; 1714 } 1715 1716 /** 1717 * request_percpu_irq - allocate a percpu interrupt line 1718 * @irq: Interrupt line to allocate 1719 * @handler: Function to be called when the IRQ occurs. 1720 * @devname: An ascii name for the claiming device 1721 * @dev_id: A percpu cookie passed back to the handler function 1722 * 1723 * This call allocates interrupt resources, but doesn't 1724 * automatically enable the interrupt. It has to be done on each 1725 * CPU using enable_percpu_irq(). 1726 * 1727 * Dev_id must be globally unique. It is a per-cpu variable, and 1728 * the handler gets called with the interrupted CPU's instance of 1729 * that variable. 1730 */ 1731 int request_percpu_irq(unsigned int irq, irq_handler_t handler, 1732 const char *devname, void __percpu *dev_id) 1733 { 1734 struct irqaction *action; 1735 struct irq_desc *desc; 1736 int retval; 1737 1738 if (!dev_id) 1739 return -EINVAL; 1740 1741 desc = irq_to_desc(irq); 1742 if (!desc || !irq_settings_can_request(desc) || 1743 !irq_settings_is_per_cpu_devid(desc)) 1744 return -EINVAL; 1745 1746 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); 1747 if (!action) 1748 return -ENOMEM; 1749 1750 action->handler = handler; 1751 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND; 1752 action->name = devname; 1753 action->percpu_dev_id = dev_id; 1754 1755 chip_bus_lock(desc); 1756 retval = __setup_irq(irq, desc, action); 1757 chip_bus_sync_unlock(desc); 1758 1759 if (retval) 1760 kfree(action); 1761 1762 return retval; 1763 } 1764