1 /* 2 * linux/kernel/irq/manage.c 3 * 4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 5 * Copyright (C) 2005-2006 Thomas Gleixner 6 * 7 * This file contains driver APIs to the irq subsystem. 8 */ 9 10 #define pr_fmt(fmt) "genirq: " fmt 11 12 #include <linux/irq.h> 13 #include <linux/kthread.h> 14 #include <linux/module.h> 15 #include <linux/random.h> 16 #include <linux/interrupt.h> 17 #include <linux/slab.h> 18 #include <linux/sched.h> 19 20 #include "internals.h" 21 22 #ifdef CONFIG_IRQ_FORCED_THREADING 23 __read_mostly bool force_irqthreads; 24 25 static int __init setup_forced_irqthreads(char *arg) 26 { 27 force_irqthreads = true; 28 return 0; 29 } 30 early_param("threadirqs", setup_forced_irqthreads); 31 #endif 32 33 /** 34 * synchronize_irq - wait for pending IRQ handlers (on other CPUs) 35 * @irq: interrupt number to wait for 36 * 37 * This function waits for any pending IRQ handlers for this interrupt 38 * to complete before returning. If you use this function while 39 * holding a resource the IRQ handler may need you will deadlock. 40 * 41 * This function may be called - with care - from IRQ context. 42 */ 43 void synchronize_irq(unsigned int irq) 44 { 45 struct irq_desc *desc = irq_to_desc(irq); 46 bool inprogress; 47 48 if (!desc) 49 return; 50 51 do { 52 unsigned long flags; 53 54 /* 55 * Wait until we're out of the critical section. This might 56 * give the wrong answer due to the lack of memory barriers. 57 */ 58 while (irqd_irq_inprogress(&desc->irq_data)) 59 cpu_relax(); 60 61 /* Ok, that indicated we're done: double-check carefully. */ 62 raw_spin_lock_irqsave(&desc->lock, flags); 63 inprogress = irqd_irq_inprogress(&desc->irq_data); 64 raw_spin_unlock_irqrestore(&desc->lock, flags); 65 66 /* Oops, that failed? */ 67 } while (inprogress); 68 69 /* 70 * We made sure that no hardirq handler is running. Now verify 71 * that no threaded handlers are active. 72 */ 73 wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); 74 } 75 EXPORT_SYMBOL(synchronize_irq); 76 77 #ifdef CONFIG_SMP 78 cpumask_var_t irq_default_affinity; 79 80 /** 81 * irq_can_set_affinity - Check if the affinity of a given irq can be set 82 * @irq: Interrupt to check 83 * 84 */ 85 int irq_can_set_affinity(unsigned int irq) 86 { 87 struct irq_desc *desc = irq_to_desc(irq); 88 89 if (!desc || !irqd_can_balance(&desc->irq_data) || 90 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) 91 return 0; 92 93 return 1; 94 } 95 96 /** 97 * irq_set_thread_affinity - Notify irq threads to adjust affinity 98 * @desc: irq descriptor which has affitnity changed 99 * 100 * We just set IRQTF_AFFINITY and delegate the affinity setting 101 * to the interrupt thread itself. We can not call 102 * set_cpus_allowed_ptr() here as we hold desc->lock and this 103 * code can be called from hard interrupt context. 104 */ 105 void irq_set_thread_affinity(struct irq_desc *desc) 106 { 107 struct irqaction *action = desc->action; 108 109 while (action) { 110 if (action->thread) 111 set_bit(IRQTF_AFFINITY, &action->thread_flags); 112 action = action->next; 113 } 114 } 115 116 #ifdef CONFIG_GENERIC_PENDING_IRQ 117 static inline bool irq_can_move_pcntxt(struct irq_data *data) 118 { 119 return irqd_can_move_in_process_context(data); 120 } 121 static inline bool irq_move_pending(struct irq_data *data) 122 { 123 return irqd_is_setaffinity_pending(data); 124 } 125 static inline void 126 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) 127 { 128 cpumask_copy(desc->pending_mask, mask); 129 } 130 static inline void 131 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) 132 { 133 cpumask_copy(mask, desc->pending_mask); 134 } 135 #else 136 static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; } 137 static inline bool irq_move_pending(struct irq_data *data) { return false; } 138 static inline void 139 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } 140 static inline void 141 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } 142 #endif 143 144 int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) 145 { 146 struct irq_chip *chip = irq_data_get_irq_chip(data); 147 struct irq_desc *desc = irq_data_to_desc(data); 148 int ret = 0; 149 150 if (!chip || !chip->irq_set_affinity) 151 return -EINVAL; 152 153 if (irq_can_move_pcntxt(data)) { 154 ret = chip->irq_set_affinity(data, mask, false); 155 switch (ret) { 156 case IRQ_SET_MASK_OK: 157 cpumask_copy(data->affinity, mask); 158 case IRQ_SET_MASK_OK_NOCOPY: 159 irq_set_thread_affinity(desc); 160 ret = 0; 161 } 162 } else { 163 irqd_set_move_pending(data); 164 irq_copy_pending(desc, mask); 165 } 166 167 if (desc->affinity_notify) { 168 kref_get(&desc->affinity_notify->kref); 169 schedule_work(&desc->affinity_notify->work); 170 } 171 irqd_set(data, IRQD_AFFINITY_SET); 172 173 return ret; 174 } 175 176 /** 177 * irq_set_affinity - Set the irq affinity of a given irq 178 * @irq: Interrupt to set affinity 179 * @mask: cpumask 180 * 181 */ 182 int irq_set_affinity(unsigned int irq, const struct cpumask *mask) 183 { 184 struct irq_desc *desc = irq_to_desc(irq); 185 unsigned long flags; 186 int ret; 187 188 if (!desc) 189 return -EINVAL; 190 191 raw_spin_lock_irqsave(&desc->lock, flags); 192 ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask); 193 raw_spin_unlock_irqrestore(&desc->lock, flags); 194 return ret; 195 } 196 197 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) 198 { 199 unsigned long flags; 200 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 201 202 if (!desc) 203 return -EINVAL; 204 desc->affinity_hint = m; 205 irq_put_desc_unlock(desc, flags); 206 return 0; 207 } 208 EXPORT_SYMBOL_GPL(irq_set_affinity_hint); 209 210 static void irq_affinity_notify(struct work_struct *work) 211 { 212 struct irq_affinity_notify *notify = 213 container_of(work, struct irq_affinity_notify, work); 214 struct irq_desc *desc = irq_to_desc(notify->irq); 215 cpumask_var_t cpumask; 216 unsigned long flags; 217 218 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) 219 goto out; 220 221 raw_spin_lock_irqsave(&desc->lock, flags); 222 if (irq_move_pending(&desc->irq_data)) 223 irq_get_pending(cpumask, desc); 224 else 225 cpumask_copy(cpumask, desc->irq_data.affinity); 226 raw_spin_unlock_irqrestore(&desc->lock, flags); 227 228 notify->notify(notify, cpumask); 229 230 free_cpumask_var(cpumask); 231 out: 232 kref_put(¬ify->kref, notify->release); 233 } 234 235 /** 236 * irq_set_affinity_notifier - control notification of IRQ affinity changes 237 * @irq: Interrupt for which to enable/disable notification 238 * @notify: Context for notification, or %NULL to disable 239 * notification. Function pointers must be initialised; 240 * the other fields will be initialised by this function. 241 * 242 * Must be called in process context. Notification may only be enabled 243 * after the IRQ is allocated and must be disabled before the IRQ is 244 * freed using free_irq(). 245 */ 246 int 247 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) 248 { 249 struct irq_desc *desc = irq_to_desc(irq); 250 struct irq_affinity_notify *old_notify; 251 unsigned long flags; 252 253 /* The release function is promised process context */ 254 might_sleep(); 255 256 if (!desc) 257 return -EINVAL; 258 259 /* Complete initialisation of *notify */ 260 if (notify) { 261 notify->irq = irq; 262 kref_init(¬ify->kref); 263 INIT_WORK(¬ify->work, irq_affinity_notify); 264 } 265 266 raw_spin_lock_irqsave(&desc->lock, flags); 267 old_notify = desc->affinity_notify; 268 desc->affinity_notify = notify; 269 raw_spin_unlock_irqrestore(&desc->lock, flags); 270 271 if (old_notify) 272 kref_put(&old_notify->kref, old_notify->release); 273 274 return 0; 275 } 276 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); 277 278 #ifndef CONFIG_AUTO_IRQ_AFFINITY 279 /* 280 * Generic version of the affinity autoselector. 281 */ 282 static int 283 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) 284 { 285 struct irq_chip *chip = irq_desc_get_chip(desc); 286 struct cpumask *set = irq_default_affinity; 287 int ret, node = desc->irq_data.node; 288 289 /* Excludes PER_CPU and NO_BALANCE interrupts */ 290 if (!irq_can_set_affinity(irq)) 291 return 0; 292 293 /* 294 * Preserve an userspace affinity setup, but make sure that 295 * one of the targets is online. 296 */ 297 if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { 298 if (cpumask_intersects(desc->irq_data.affinity, 299 cpu_online_mask)) 300 set = desc->irq_data.affinity; 301 else 302 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); 303 } 304 305 cpumask_and(mask, cpu_online_mask, set); 306 if (node != NUMA_NO_NODE) { 307 const struct cpumask *nodemask = cpumask_of_node(node); 308 309 /* make sure at least one of the cpus in nodemask is online */ 310 if (cpumask_intersects(mask, nodemask)) 311 cpumask_and(mask, mask, nodemask); 312 } 313 ret = chip->irq_set_affinity(&desc->irq_data, mask, false); 314 switch (ret) { 315 case IRQ_SET_MASK_OK: 316 cpumask_copy(desc->irq_data.affinity, mask); 317 case IRQ_SET_MASK_OK_NOCOPY: 318 irq_set_thread_affinity(desc); 319 } 320 return 0; 321 } 322 #else 323 static inline int 324 setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask) 325 { 326 return irq_select_affinity(irq); 327 } 328 #endif 329 330 /* 331 * Called when affinity is set via /proc/irq 332 */ 333 int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask) 334 { 335 struct irq_desc *desc = irq_to_desc(irq); 336 unsigned long flags; 337 int ret; 338 339 raw_spin_lock_irqsave(&desc->lock, flags); 340 ret = setup_affinity(irq, desc, mask); 341 raw_spin_unlock_irqrestore(&desc->lock, flags); 342 return ret; 343 } 344 345 #else 346 static inline int 347 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) 348 { 349 return 0; 350 } 351 #endif 352 353 void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) 354 { 355 if (suspend) { 356 if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND)) 357 return; 358 desc->istate |= IRQS_SUSPENDED; 359 } 360 361 if (!desc->depth++) 362 irq_disable(desc); 363 } 364 365 static int __disable_irq_nosync(unsigned int irq) 366 { 367 unsigned long flags; 368 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 369 370 if (!desc) 371 return -EINVAL; 372 __disable_irq(desc, irq, false); 373 irq_put_desc_busunlock(desc, flags); 374 return 0; 375 } 376 377 /** 378 * disable_irq_nosync - disable an irq without waiting 379 * @irq: Interrupt to disable 380 * 381 * Disable the selected interrupt line. Disables and Enables are 382 * nested. 383 * Unlike disable_irq(), this function does not ensure existing 384 * instances of the IRQ handler have completed before returning. 385 * 386 * This function may be called from IRQ context. 387 */ 388 void disable_irq_nosync(unsigned int irq) 389 { 390 __disable_irq_nosync(irq); 391 } 392 EXPORT_SYMBOL(disable_irq_nosync); 393 394 /** 395 * disable_irq - disable an irq and wait for completion 396 * @irq: Interrupt to disable 397 * 398 * Disable the selected interrupt line. Enables and Disables are 399 * nested. 400 * This function waits for any pending IRQ handlers for this interrupt 401 * to complete before returning. If you use this function while 402 * holding a resource the IRQ handler may need you will deadlock. 403 * 404 * This function may be called - with care - from IRQ context. 405 */ 406 void disable_irq(unsigned int irq) 407 { 408 if (!__disable_irq_nosync(irq)) 409 synchronize_irq(irq); 410 } 411 EXPORT_SYMBOL(disable_irq); 412 413 void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) 414 { 415 if (resume) { 416 if (!(desc->istate & IRQS_SUSPENDED)) { 417 if (!desc->action) 418 return; 419 if (!(desc->action->flags & IRQF_FORCE_RESUME)) 420 return; 421 /* Pretend that it got disabled ! */ 422 desc->depth++; 423 } 424 desc->istate &= ~IRQS_SUSPENDED; 425 } 426 427 switch (desc->depth) { 428 case 0: 429 err_out: 430 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); 431 break; 432 case 1: { 433 if (desc->istate & IRQS_SUSPENDED) 434 goto err_out; 435 /* Prevent probing on this irq: */ 436 irq_settings_set_noprobe(desc); 437 irq_enable(desc); 438 check_irq_resend(desc, irq); 439 /* fall-through */ 440 } 441 default: 442 desc->depth--; 443 } 444 } 445 446 /** 447 * enable_irq - enable handling of an irq 448 * @irq: Interrupt to enable 449 * 450 * Undoes the effect of one call to disable_irq(). If this 451 * matches the last disable, processing of interrupts on this 452 * IRQ line is re-enabled. 453 * 454 * This function may be called from IRQ context only when 455 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! 456 */ 457 void enable_irq(unsigned int irq) 458 { 459 unsigned long flags; 460 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 461 462 if (!desc) 463 return; 464 if (WARN(!desc->irq_data.chip, 465 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) 466 goto out; 467 468 __enable_irq(desc, irq, false); 469 out: 470 irq_put_desc_busunlock(desc, flags); 471 } 472 EXPORT_SYMBOL(enable_irq); 473 474 static int set_irq_wake_real(unsigned int irq, unsigned int on) 475 { 476 struct irq_desc *desc = irq_to_desc(irq); 477 int ret = -ENXIO; 478 479 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE) 480 return 0; 481 482 if (desc->irq_data.chip->irq_set_wake) 483 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); 484 485 return ret; 486 } 487 488 /** 489 * irq_set_irq_wake - control irq power management wakeup 490 * @irq: interrupt to control 491 * @on: enable/disable power management wakeup 492 * 493 * Enable/disable power management wakeup mode, which is 494 * disabled by default. Enables and disables must match, 495 * just as they match for non-wakeup mode support. 496 * 497 * Wakeup mode lets this IRQ wake the system from sleep 498 * states like "suspend to RAM". 499 */ 500 int irq_set_irq_wake(unsigned int irq, unsigned int on) 501 { 502 unsigned long flags; 503 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 504 int ret = 0; 505 506 if (!desc) 507 return -EINVAL; 508 509 /* wakeup-capable irqs can be shared between drivers that 510 * don't need to have the same sleep mode behaviors. 511 */ 512 if (on) { 513 if (desc->wake_depth++ == 0) { 514 ret = set_irq_wake_real(irq, on); 515 if (ret) 516 desc->wake_depth = 0; 517 else 518 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); 519 } 520 } else { 521 if (desc->wake_depth == 0) { 522 WARN(1, "Unbalanced IRQ %d wake disable\n", irq); 523 } else if (--desc->wake_depth == 0) { 524 ret = set_irq_wake_real(irq, on); 525 if (ret) 526 desc->wake_depth = 1; 527 else 528 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); 529 } 530 } 531 irq_put_desc_busunlock(desc, flags); 532 return ret; 533 } 534 EXPORT_SYMBOL(irq_set_irq_wake); 535 536 /* 537 * Internal function that tells the architecture code whether a 538 * particular irq has been exclusively allocated or is available 539 * for driver use. 540 */ 541 int can_request_irq(unsigned int irq, unsigned long irqflags) 542 { 543 unsigned long flags; 544 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 545 int canrequest = 0; 546 547 if (!desc) 548 return 0; 549 550 if (irq_settings_can_request(desc)) { 551 if (desc->action) 552 if (irqflags & desc->action->flags & IRQF_SHARED) 553 canrequest =1; 554 } 555 irq_put_desc_unlock(desc, flags); 556 return canrequest; 557 } 558 559 int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, 560 unsigned long flags) 561 { 562 struct irq_chip *chip = desc->irq_data.chip; 563 int ret, unmask = 0; 564 565 if (!chip || !chip->irq_set_type) { 566 /* 567 * IRQF_TRIGGER_* but the PIC does not support multiple 568 * flow-types? 569 */ 570 pr_debug("No set_type function for IRQ %d (%s)\n", irq, 571 chip ? (chip->name ? : "unknown") : "unknown"); 572 return 0; 573 } 574 575 flags &= IRQ_TYPE_SENSE_MASK; 576 577 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { 578 if (!irqd_irq_masked(&desc->irq_data)) 579 mask_irq(desc); 580 if (!irqd_irq_disabled(&desc->irq_data)) 581 unmask = 1; 582 } 583 584 /* caller masked out all except trigger mode flags */ 585 ret = chip->irq_set_type(&desc->irq_data, flags); 586 587 switch (ret) { 588 case IRQ_SET_MASK_OK: 589 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); 590 irqd_set(&desc->irq_data, flags); 591 592 case IRQ_SET_MASK_OK_NOCOPY: 593 flags = irqd_get_trigger_type(&desc->irq_data); 594 irq_settings_set_trigger_mask(desc, flags); 595 irqd_clear(&desc->irq_data, IRQD_LEVEL); 596 irq_settings_clr_level(desc); 597 if (flags & IRQ_TYPE_LEVEL_MASK) { 598 irq_settings_set_level(desc); 599 irqd_set(&desc->irq_data, IRQD_LEVEL); 600 } 601 602 ret = 0; 603 break; 604 default: 605 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n", 606 flags, irq, chip->irq_set_type); 607 } 608 if (unmask) 609 unmask_irq(desc); 610 return ret; 611 } 612 613 /* 614 * Default primary interrupt handler for threaded interrupts. Is 615 * assigned as primary handler when request_threaded_irq is called 616 * with handler == NULL. Useful for oneshot interrupts. 617 */ 618 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) 619 { 620 return IRQ_WAKE_THREAD; 621 } 622 623 /* 624 * Primary handler for nested threaded interrupts. Should never be 625 * called. 626 */ 627 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) 628 { 629 WARN(1, "Primary handler called for nested irq %d\n", irq); 630 return IRQ_NONE; 631 } 632 633 static int irq_wait_for_interrupt(struct irqaction *action) 634 { 635 set_current_state(TASK_INTERRUPTIBLE); 636 637 while (!kthread_should_stop()) { 638 639 if (test_and_clear_bit(IRQTF_RUNTHREAD, 640 &action->thread_flags)) { 641 __set_current_state(TASK_RUNNING); 642 return 0; 643 } 644 schedule(); 645 set_current_state(TASK_INTERRUPTIBLE); 646 } 647 __set_current_state(TASK_RUNNING); 648 return -1; 649 } 650 651 /* 652 * Oneshot interrupts keep the irq line masked until the threaded 653 * handler finished. unmask if the interrupt has not been disabled and 654 * is marked MASKED. 655 */ 656 static void irq_finalize_oneshot(struct irq_desc *desc, 657 struct irqaction *action) 658 { 659 if (!(desc->istate & IRQS_ONESHOT)) 660 return; 661 again: 662 chip_bus_lock(desc); 663 raw_spin_lock_irq(&desc->lock); 664 665 /* 666 * Implausible though it may be we need to protect us against 667 * the following scenario: 668 * 669 * The thread is faster done than the hard interrupt handler 670 * on the other CPU. If we unmask the irq line then the 671 * interrupt can come in again and masks the line, leaves due 672 * to IRQS_INPROGRESS and the irq line is masked forever. 673 * 674 * This also serializes the state of shared oneshot handlers 675 * versus "desc->threads_onehsot |= action->thread_mask;" in 676 * irq_wake_thread(). See the comment there which explains the 677 * serialization. 678 */ 679 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { 680 raw_spin_unlock_irq(&desc->lock); 681 chip_bus_sync_unlock(desc); 682 cpu_relax(); 683 goto again; 684 } 685 686 /* 687 * Now check again, whether the thread should run. Otherwise 688 * we would clear the threads_oneshot bit of this thread which 689 * was just set. 690 */ 691 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) 692 goto out_unlock; 693 694 desc->threads_oneshot &= ~action->thread_mask; 695 696 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && 697 irqd_irq_masked(&desc->irq_data)) 698 unmask_irq(desc); 699 700 out_unlock: 701 raw_spin_unlock_irq(&desc->lock); 702 chip_bus_sync_unlock(desc); 703 } 704 705 #ifdef CONFIG_SMP 706 /* 707 * Check whether we need to chasnge the affinity of the interrupt thread. 708 */ 709 static void 710 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) 711 { 712 cpumask_var_t mask; 713 714 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) 715 return; 716 717 /* 718 * In case we are out of memory we set IRQTF_AFFINITY again and 719 * try again next time 720 */ 721 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { 722 set_bit(IRQTF_AFFINITY, &action->thread_flags); 723 return; 724 } 725 726 raw_spin_lock_irq(&desc->lock); 727 cpumask_copy(mask, desc->irq_data.affinity); 728 raw_spin_unlock_irq(&desc->lock); 729 730 set_cpus_allowed_ptr(current, mask); 731 free_cpumask_var(mask); 732 } 733 #else 734 static inline void 735 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } 736 #endif 737 738 /* 739 * Interrupts which are not explicitely requested as threaded 740 * interrupts rely on the implicit bh/preempt disable of the hard irq 741 * context. So we need to disable bh here to avoid deadlocks and other 742 * side effects. 743 */ 744 static irqreturn_t 745 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) 746 { 747 irqreturn_t ret; 748 749 local_bh_disable(); 750 ret = action->thread_fn(action->irq, action->dev_id); 751 irq_finalize_oneshot(desc, action); 752 local_bh_enable(); 753 return ret; 754 } 755 756 /* 757 * Interrupts explicitely requested as threaded interupts want to be 758 * preemtible - many of them need to sleep and wait for slow busses to 759 * complete. 760 */ 761 static irqreturn_t irq_thread_fn(struct irq_desc *desc, 762 struct irqaction *action) 763 { 764 irqreturn_t ret; 765 766 ret = action->thread_fn(action->irq, action->dev_id); 767 irq_finalize_oneshot(desc, action); 768 return ret; 769 } 770 771 static void wake_threads_waitq(struct irq_desc *desc) 772 { 773 if (atomic_dec_and_test(&desc->threads_active) && 774 waitqueue_active(&desc->wait_for_threads)) 775 wake_up(&desc->wait_for_threads); 776 } 777 778 /* 779 * Interrupt handler thread 780 */ 781 static int irq_thread(void *data) 782 { 783 static const struct sched_param param = { 784 .sched_priority = MAX_USER_RT_PRIO/2, 785 }; 786 struct irqaction *action = data; 787 struct irq_desc *desc = irq_to_desc(action->irq); 788 irqreturn_t (*handler_fn)(struct irq_desc *desc, 789 struct irqaction *action); 790 791 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD, 792 &action->thread_flags)) 793 handler_fn = irq_forced_thread_fn; 794 else 795 handler_fn = irq_thread_fn; 796 797 sched_setscheduler(current, SCHED_FIFO, ¶m); 798 current->irq_thread = 1; 799 800 while (!irq_wait_for_interrupt(action)) { 801 irqreturn_t action_ret; 802 803 irq_thread_check_affinity(desc, action); 804 805 action_ret = handler_fn(desc, action); 806 if (!noirqdebug) 807 note_interrupt(action->irq, desc, action_ret); 808 809 wake_threads_waitq(desc); 810 } 811 812 /* 813 * This is the regular exit path. __free_irq() is stopping the 814 * thread via kthread_stop() after calling 815 * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the 816 * oneshot mask bit can be set. We cannot verify that as we 817 * cannot touch the oneshot mask at this point anymore as 818 * __setup_irq() might have given out currents thread_mask 819 * again. 820 * 821 * Clear irq_thread. Otherwise exit_irq_thread() would make 822 * fuzz about an active irq thread going into nirvana. 823 */ 824 current->irq_thread = 0; 825 return 0; 826 } 827 828 /* 829 * Called from do_exit() 830 */ 831 void exit_irq_thread(void) 832 { 833 struct task_struct *tsk = current; 834 struct irq_desc *desc; 835 struct irqaction *action; 836 837 if (!tsk->irq_thread) 838 return; 839 840 action = kthread_data(tsk); 841 842 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", 843 tsk->comm ? tsk->comm : "", tsk->pid, action->irq); 844 845 desc = irq_to_desc(action->irq); 846 847 /* 848 * If IRQTF_RUNTHREAD is set, we need to decrement 849 * desc->threads_active and wake possible waiters. 850 */ 851 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) 852 wake_threads_waitq(desc); 853 854 /* Prevent a stale desc->threads_oneshot */ 855 irq_finalize_oneshot(desc, action); 856 } 857 858 static void irq_setup_forced_threading(struct irqaction *new) 859 { 860 if (!force_irqthreads) 861 return; 862 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) 863 return; 864 865 new->flags |= IRQF_ONESHOT; 866 867 if (!new->thread_fn) { 868 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags); 869 new->thread_fn = new->handler; 870 new->handler = irq_default_primary_handler; 871 } 872 } 873 874 /* 875 * Internal function to register an irqaction - typically used to 876 * allocate special interrupts that are part of the architecture. 877 */ 878 static int 879 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) 880 { 881 struct irqaction *old, **old_ptr; 882 unsigned long flags, thread_mask = 0; 883 int ret, nested, shared = 0; 884 cpumask_var_t mask; 885 886 if (!desc) 887 return -EINVAL; 888 889 if (desc->irq_data.chip == &no_irq_chip) 890 return -ENOSYS; 891 if (!try_module_get(desc->owner)) 892 return -ENODEV; 893 /* 894 * Some drivers like serial.c use request_irq() heavily, 895 * so we have to be careful not to interfere with a 896 * running system. 897 */ 898 if (new->flags & IRQF_SAMPLE_RANDOM) { 899 /* 900 * This function might sleep, we want to call it first, 901 * outside of the atomic block. 902 * Yes, this might clear the entropy pool if the wrong 903 * driver is attempted to be loaded, without actually 904 * installing a new handler, but is this really a problem, 905 * only the sysadmin is able to do this. 906 */ 907 rand_initialize_irq(irq); 908 } 909 910 /* 911 * Check whether the interrupt nests into another interrupt 912 * thread. 913 */ 914 nested = irq_settings_is_nested_thread(desc); 915 if (nested) { 916 if (!new->thread_fn) { 917 ret = -EINVAL; 918 goto out_mput; 919 } 920 /* 921 * Replace the primary handler which was provided from 922 * the driver for non nested interrupt handling by the 923 * dummy function which warns when called. 924 */ 925 new->handler = irq_nested_primary_handler; 926 } else { 927 if (irq_settings_can_thread(desc)) 928 irq_setup_forced_threading(new); 929 } 930 931 /* 932 * Create a handler thread when a thread function is supplied 933 * and the interrupt does not nest into another interrupt 934 * thread. 935 */ 936 if (new->thread_fn && !nested) { 937 struct task_struct *t; 938 939 t = kthread_create(irq_thread, new, "irq/%d-%s", irq, 940 new->name); 941 if (IS_ERR(t)) { 942 ret = PTR_ERR(t); 943 goto out_mput; 944 } 945 /* 946 * We keep the reference to the task struct even if 947 * the thread dies to avoid that the interrupt code 948 * references an already freed task_struct. 949 */ 950 get_task_struct(t); 951 new->thread = t; 952 } 953 954 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { 955 ret = -ENOMEM; 956 goto out_thread; 957 } 958 959 /* 960 * The following block of code has to be executed atomically 961 */ 962 raw_spin_lock_irqsave(&desc->lock, flags); 963 old_ptr = &desc->action; 964 old = *old_ptr; 965 if (old) { 966 /* 967 * Can't share interrupts unless both agree to and are 968 * the same type (level, edge, polarity). So both flag 969 * fields must have IRQF_SHARED set and the bits which 970 * set the trigger type must match. Also all must 971 * agree on ONESHOT. 972 */ 973 if (!((old->flags & new->flags) & IRQF_SHARED) || 974 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) || 975 ((old->flags ^ new->flags) & IRQF_ONESHOT)) 976 goto mismatch; 977 978 /* All handlers must agree on per-cpuness */ 979 if ((old->flags & IRQF_PERCPU) != 980 (new->flags & IRQF_PERCPU)) 981 goto mismatch; 982 983 /* add new interrupt at end of irq queue */ 984 do { 985 /* 986 * Or all existing action->thread_mask bits, 987 * so we can find the next zero bit for this 988 * new action. 989 */ 990 thread_mask |= old->thread_mask; 991 old_ptr = &old->next; 992 old = *old_ptr; 993 } while (old); 994 shared = 1; 995 } 996 997 /* 998 * Setup the thread mask for this irqaction for ONESHOT. For 999 * !ONESHOT irqs the thread mask is 0 so we can avoid a 1000 * conditional in irq_wake_thread(). 1001 */ 1002 if (new->flags & IRQF_ONESHOT) { 1003 /* 1004 * Unlikely to have 32 resp 64 irqs sharing one line, 1005 * but who knows. 1006 */ 1007 if (thread_mask == ~0UL) { 1008 ret = -EBUSY; 1009 goto out_mask; 1010 } 1011 /* 1012 * The thread_mask for the action is or'ed to 1013 * desc->thread_active to indicate that the 1014 * IRQF_ONESHOT thread handler has been woken, but not 1015 * yet finished. The bit is cleared when a thread 1016 * completes. When all threads of a shared interrupt 1017 * line have completed desc->threads_active becomes 1018 * zero and the interrupt line is unmasked. See 1019 * handle.c:irq_wake_thread() for further information. 1020 * 1021 * If no thread is woken by primary (hard irq context) 1022 * interrupt handlers, then desc->threads_active is 1023 * also checked for zero to unmask the irq line in the 1024 * affected hard irq flow handlers 1025 * (handle_[fasteoi|level]_irq). 1026 * 1027 * The new action gets the first zero bit of 1028 * thread_mask assigned. See the loop above which or's 1029 * all existing action->thread_mask bits. 1030 */ 1031 new->thread_mask = 1 << ffz(thread_mask); 1032 1033 } else if (new->handler == irq_default_primary_handler) { 1034 /* 1035 * The interrupt was requested with handler = NULL, so 1036 * we use the default primary handler for it. But it 1037 * does not have the oneshot flag set. In combination 1038 * with level interrupts this is deadly, because the 1039 * default primary handler just wakes the thread, then 1040 * the irq lines is reenabled, but the device still 1041 * has the level irq asserted. Rinse and repeat.... 1042 * 1043 * While this works for edge type interrupts, we play 1044 * it safe and reject unconditionally because we can't 1045 * say for sure which type this interrupt really 1046 * has. The type flags are unreliable as the 1047 * underlying chip implementation can override them. 1048 */ 1049 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n", 1050 irq); 1051 ret = -EINVAL; 1052 goto out_mask; 1053 } 1054 1055 if (!shared) { 1056 init_waitqueue_head(&desc->wait_for_threads); 1057 1058 /* Setup the type (level, edge polarity) if configured: */ 1059 if (new->flags & IRQF_TRIGGER_MASK) { 1060 ret = __irq_set_trigger(desc, irq, 1061 new->flags & IRQF_TRIGGER_MASK); 1062 1063 if (ret) 1064 goto out_mask; 1065 } 1066 1067 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ 1068 IRQS_ONESHOT | IRQS_WAITING); 1069 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 1070 1071 if (new->flags & IRQF_PERCPU) { 1072 irqd_set(&desc->irq_data, IRQD_PER_CPU); 1073 irq_settings_set_per_cpu(desc); 1074 } 1075 1076 if (new->flags & IRQF_ONESHOT) 1077 desc->istate |= IRQS_ONESHOT; 1078 1079 if (irq_settings_can_autoenable(desc)) 1080 irq_startup(desc, true); 1081 else 1082 /* Undo nested disables: */ 1083 desc->depth = 1; 1084 1085 /* Exclude IRQ from balancing if requested */ 1086 if (new->flags & IRQF_NOBALANCING) { 1087 irq_settings_set_no_balancing(desc); 1088 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); 1089 } 1090 1091 /* Set default affinity mask once everything is setup */ 1092 setup_affinity(irq, desc, mask); 1093 1094 } else if (new->flags & IRQF_TRIGGER_MASK) { 1095 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; 1096 unsigned int omsk = irq_settings_get_trigger_mask(desc); 1097 1098 if (nmsk != omsk) 1099 /* hope the handler works with current trigger mode */ 1100 pr_warning("irq %d uses trigger mode %u; requested %u\n", 1101 irq, nmsk, omsk); 1102 } 1103 1104 new->irq = irq; 1105 *old_ptr = new; 1106 1107 /* Reset broken irq detection when installing new handler */ 1108 desc->irq_count = 0; 1109 desc->irqs_unhandled = 0; 1110 1111 /* 1112 * Check whether we disabled the irq via the spurious handler 1113 * before. Reenable it and give it another chance. 1114 */ 1115 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { 1116 desc->istate &= ~IRQS_SPURIOUS_DISABLED; 1117 __enable_irq(desc, irq, false); 1118 } 1119 1120 raw_spin_unlock_irqrestore(&desc->lock, flags); 1121 1122 /* 1123 * Strictly no need to wake it up, but hung_task complains 1124 * when no hard interrupt wakes the thread up. 1125 */ 1126 if (new->thread) 1127 wake_up_process(new->thread); 1128 1129 register_irq_proc(irq, desc); 1130 new->dir = NULL; 1131 register_handler_proc(irq, new); 1132 free_cpumask_var(mask); 1133 1134 return 0; 1135 1136 mismatch: 1137 if (!(new->flags & IRQF_PROBE_SHARED)) { 1138 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n", 1139 irq, new->flags, new->name, old->flags, old->name); 1140 #ifdef CONFIG_DEBUG_SHIRQ 1141 dump_stack(); 1142 #endif 1143 } 1144 ret = -EBUSY; 1145 1146 out_mask: 1147 raw_spin_unlock_irqrestore(&desc->lock, flags); 1148 free_cpumask_var(mask); 1149 1150 out_thread: 1151 if (new->thread) { 1152 struct task_struct *t = new->thread; 1153 1154 new->thread = NULL; 1155 kthread_stop(t); 1156 put_task_struct(t); 1157 } 1158 out_mput: 1159 module_put(desc->owner); 1160 return ret; 1161 } 1162 1163 /** 1164 * setup_irq - setup an interrupt 1165 * @irq: Interrupt line to setup 1166 * @act: irqaction for the interrupt 1167 * 1168 * Used to statically setup interrupts in the early boot process. 1169 */ 1170 int setup_irq(unsigned int irq, struct irqaction *act) 1171 { 1172 int retval; 1173 struct irq_desc *desc = irq_to_desc(irq); 1174 1175 if (WARN_ON(irq_settings_is_per_cpu_devid(desc))) 1176 return -EINVAL; 1177 chip_bus_lock(desc); 1178 retval = __setup_irq(irq, desc, act); 1179 chip_bus_sync_unlock(desc); 1180 1181 return retval; 1182 } 1183 EXPORT_SYMBOL_GPL(setup_irq); 1184 1185 /* 1186 * Internal function to unregister an irqaction - used to free 1187 * regular and special interrupts that are part of the architecture. 1188 */ 1189 static struct irqaction *__free_irq(unsigned int irq, void *dev_id) 1190 { 1191 struct irq_desc *desc = irq_to_desc(irq); 1192 struct irqaction *action, **action_ptr; 1193 unsigned long flags; 1194 1195 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); 1196 1197 if (!desc) 1198 return NULL; 1199 1200 raw_spin_lock_irqsave(&desc->lock, flags); 1201 1202 /* 1203 * There can be multiple actions per IRQ descriptor, find the right 1204 * one based on the dev_id: 1205 */ 1206 action_ptr = &desc->action; 1207 for (;;) { 1208 action = *action_ptr; 1209 1210 if (!action) { 1211 WARN(1, "Trying to free already-free IRQ %d\n", irq); 1212 raw_spin_unlock_irqrestore(&desc->lock, flags); 1213 1214 return NULL; 1215 } 1216 1217 if (action->dev_id == dev_id) 1218 break; 1219 action_ptr = &action->next; 1220 } 1221 1222 /* Found it - now remove it from the list of entries: */ 1223 *action_ptr = action->next; 1224 1225 /* If this was the last handler, shut down the IRQ line: */ 1226 if (!desc->action) 1227 irq_shutdown(desc); 1228 1229 #ifdef CONFIG_SMP 1230 /* make sure affinity_hint is cleaned up */ 1231 if (WARN_ON_ONCE(desc->affinity_hint)) 1232 desc->affinity_hint = NULL; 1233 #endif 1234 1235 raw_spin_unlock_irqrestore(&desc->lock, flags); 1236 1237 unregister_handler_proc(irq, action); 1238 1239 /* Make sure it's not being used on another CPU: */ 1240 synchronize_irq(irq); 1241 1242 #ifdef CONFIG_DEBUG_SHIRQ 1243 /* 1244 * It's a shared IRQ -- the driver ought to be prepared for an IRQ 1245 * event to happen even now it's being freed, so let's make sure that 1246 * is so by doing an extra call to the handler .... 1247 * 1248 * ( We do this after actually deregistering it, to make sure that a 1249 * 'real' IRQ doesn't run in * parallel with our fake. ) 1250 */ 1251 if (action->flags & IRQF_SHARED) { 1252 local_irq_save(flags); 1253 action->handler(irq, dev_id); 1254 local_irq_restore(flags); 1255 } 1256 #endif 1257 1258 if (action->thread) { 1259 kthread_stop(action->thread); 1260 put_task_struct(action->thread); 1261 } 1262 1263 module_put(desc->owner); 1264 return action; 1265 } 1266 1267 /** 1268 * remove_irq - free an interrupt 1269 * @irq: Interrupt line to free 1270 * @act: irqaction for the interrupt 1271 * 1272 * Used to remove interrupts statically setup by the early boot process. 1273 */ 1274 void remove_irq(unsigned int irq, struct irqaction *act) 1275 { 1276 struct irq_desc *desc = irq_to_desc(irq); 1277 1278 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc))) 1279 __free_irq(irq, act->dev_id); 1280 } 1281 EXPORT_SYMBOL_GPL(remove_irq); 1282 1283 /** 1284 * free_irq - free an interrupt allocated with request_irq 1285 * @irq: Interrupt line to free 1286 * @dev_id: Device identity to free 1287 * 1288 * Remove an interrupt handler. The handler is removed and if the 1289 * interrupt line is no longer in use by any driver it is disabled. 1290 * On a shared IRQ the caller must ensure the interrupt is disabled 1291 * on the card it drives before calling this function. The function 1292 * does not return until any executing interrupts for this IRQ 1293 * have completed. 1294 * 1295 * This function must not be called from interrupt context. 1296 */ 1297 void free_irq(unsigned int irq, void *dev_id) 1298 { 1299 struct irq_desc *desc = irq_to_desc(irq); 1300 1301 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) 1302 return; 1303 1304 #ifdef CONFIG_SMP 1305 if (WARN_ON(desc->affinity_notify)) 1306 desc->affinity_notify = NULL; 1307 #endif 1308 1309 chip_bus_lock(desc); 1310 kfree(__free_irq(irq, dev_id)); 1311 chip_bus_sync_unlock(desc); 1312 } 1313 EXPORT_SYMBOL(free_irq); 1314 1315 /** 1316 * request_threaded_irq - allocate an interrupt line 1317 * @irq: Interrupt line to allocate 1318 * @handler: Function to be called when the IRQ occurs. 1319 * Primary handler for threaded interrupts 1320 * If NULL and thread_fn != NULL the default 1321 * primary handler is installed 1322 * @thread_fn: Function called from the irq handler thread 1323 * If NULL, no irq thread is created 1324 * @irqflags: Interrupt type flags 1325 * @devname: An ascii name for the claiming device 1326 * @dev_id: A cookie passed back to the handler function 1327 * 1328 * This call allocates interrupt resources and enables the 1329 * interrupt line and IRQ handling. From the point this 1330 * call is made your handler function may be invoked. Since 1331 * your handler function must clear any interrupt the board 1332 * raises, you must take care both to initialise your hardware 1333 * and to set up the interrupt handler in the right order. 1334 * 1335 * If you want to set up a threaded irq handler for your device 1336 * then you need to supply @handler and @thread_fn. @handler is 1337 * still called in hard interrupt context and has to check 1338 * whether the interrupt originates from the device. If yes it 1339 * needs to disable the interrupt on the device and return 1340 * IRQ_WAKE_THREAD which will wake up the handler thread and run 1341 * @thread_fn. This split handler design is necessary to support 1342 * shared interrupts. 1343 * 1344 * Dev_id must be globally unique. Normally the address of the 1345 * device data structure is used as the cookie. Since the handler 1346 * receives this value it makes sense to use it. 1347 * 1348 * If your interrupt is shared you must pass a non NULL dev_id 1349 * as this is required when freeing the interrupt. 1350 * 1351 * Flags: 1352 * 1353 * IRQF_SHARED Interrupt is shared 1354 * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy 1355 * IRQF_TRIGGER_* Specify active edge(s) or level 1356 * 1357 */ 1358 int request_threaded_irq(unsigned int irq, irq_handler_t handler, 1359 irq_handler_t thread_fn, unsigned long irqflags, 1360 const char *devname, void *dev_id) 1361 { 1362 struct irqaction *action; 1363 struct irq_desc *desc; 1364 int retval; 1365 1366 /* 1367 * Sanity-check: shared interrupts must pass in a real dev-ID, 1368 * otherwise we'll have trouble later trying to figure out 1369 * which interrupt is which (messes up the interrupt freeing 1370 * logic etc). 1371 */ 1372 if ((irqflags & IRQF_SHARED) && !dev_id) 1373 return -EINVAL; 1374 1375 desc = irq_to_desc(irq); 1376 if (!desc) 1377 return -EINVAL; 1378 1379 if (!irq_settings_can_request(desc) || 1380 WARN_ON(irq_settings_is_per_cpu_devid(desc))) 1381 return -EINVAL; 1382 1383 if (!handler) { 1384 if (!thread_fn) 1385 return -EINVAL; 1386 handler = irq_default_primary_handler; 1387 } 1388 1389 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); 1390 if (!action) 1391 return -ENOMEM; 1392 1393 action->handler = handler; 1394 action->thread_fn = thread_fn; 1395 action->flags = irqflags; 1396 action->name = devname; 1397 action->dev_id = dev_id; 1398 1399 chip_bus_lock(desc); 1400 retval = __setup_irq(irq, desc, action); 1401 chip_bus_sync_unlock(desc); 1402 1403 if (retval) 1404 kfree(action); 1405 1406 #ifdef CONFIG_DEBUG_SHIRQ_FIXME 1407 if (!retval && (irqflags & IRQF_SHARED)) { 1408 /* 1409 * It's a shared IRQ -- the driver ought to be prepared for it 1410 * to happen immediately, so let's make sure.... 1411 * We disable the irq to make sure that a 'real' IRQ doesn't 1412 * run in parallel with our fake. 1413 */ 1414 unsigned long flags; 1415 1416 disable_irq(irq); 1417 local_irq_save(flags); 1418 1419 handler(irq, dev_id); 1420 1421 local_irq_restore(flags); 1422 enable_irq(irq); 1423 } 1424 #endif 1425 return retval; 1426 } 1427 EXPORT_SYMBOL(request_threaded_irq); 1428 1429 /** 1430 * request_any_context_irq - allocate an interrupt line 1431 * @irq: Interrupt line to allocate 1432 * @handler: Function to be called when the IRQ occurs. 1433 * Threaded handler for threaded interrupts. 1434 * @flags: Interrupt type flags 1435 * @name: An ascii name for the claiming device 1436 * @dev_id: A cookie passed back to the handler function 1437 * 1438 * This call allocates interrupt resources and enables the 1439 * interrupt line and IRQ handling. It selects either a 1440 * hardirq or threaded handling method depending on the 1441 * context. 1442 * 1443 * On failure, it returns a negative value. On success, 1444 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED. 1445 */ 1446 int request_any_context_irq(unsigned int irq, irq_handler_t handler, 1447 unsigned long flags, const char *name, void *dev_id) 1448 { 1449 struct irq_desc *desc = irq_to_desc(irq); 1450 int ret; 1451 1452 if (!desc) 1453 return -EINVAL; 1454 1455 if (irq_settings_is_nested_thread(desc)) { 1456 ret = request_threaded_irq(irq, NULL, handler, 1457 flags, name, dev_id); 1458 return !ret ? IRQC_IS_NESTED : ret; 1459 } 1460 1461 ret = request_irq(irq, handler, flags, name, dev_id); 1462 return !ret ? IRQC_IS_HARDIRQ : ret; 1463 } 1464 EXPORT_SYMBOL_GPL(request_any_context_irq); 1465 1466 void enable_percpu_irq(unsigned int irq, unsigned int type) 1467 { 1468 unsigned int cpu = smp_processor_id(); 1469 unsigned long flags; 1470 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); 1471 1472 if (!desc) 1473 return; 1474 1475 type &= IRQ_TYPE_SENSE_MASK; 1476 if (type != IRQ_TYPE_NONE) { 1477 int ret; 1478 1479 ret = __irq_set_trigger(desc, irq, type); 1480 1481 if (ret) { 1482 WARN(1, "failed to set type for IRQ%d\n", irq); 1483 goto out; 1484 } 1485 } 1486 1487 irq_percpu_enable(desc, cpu); 1488 out: 1489 irq_put_desc_unlock(desc, flags); 1490 } 1491 1492 void disable_percpu_irq(unsigned int irq) 1493 { 1494 unsigned int cpu = smp_processor_id(); 1495 unsigned long flags; 1496 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); 1497 1498 if (!desc) 1499 return; 1500 1501 irq_percpu_disable(desc, cpu); 1502 irq_put_desc_unlock(desc, flags); 1503 } 1504 1505 /* 1506 * Internal function to unregister a percpu irqaction. 1507 */ 1508 static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id) 1509 { 1510 struct irq_desc *desc = irq_to_desc(irq); 1511 struct irqaction *action; 1512 unsigned long flags; 1513 1514 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); 1515 1516 if (!desc) 1517 return NULL; 1518 1519 raw_spin_lock_irqsave(&desc->lock, flags); 1520 1521 action = desc->action; 1522 if (!action || action->percpu_dev_id != dev_id) { 1523 WARN(1, "Trying to free already-free IRQ %d\n", irq); 1524 goto bad; 1525 } 1526 1527 if (!cpumask_empty(desc->percpu_enabled)) { 1528 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n", 1529 irq, cpumask_first(desc->percpu_enabled)); 1530 goto bad; 1531 } 1532 1533 /* Found it - now remove it from the list of entries: */ 1534 desc->action = NULL; 1535 1536 raw_spin_unlock_irqrestore(&desc->lock, flags); 1537 1538 unregister_handler_proc(irq, action); 1539 1540 module_put(desc->owner); 1541 return action; 1542 1543 bad: 1544 raw_spin_unlock_irqrestore(&desc->lock, flags); 1545 return NULL; 1546 } 1547 1548 /** 1549 * remove_percpu_irq - free a per-cpu interrupt 1550 * @irq: Interrupt line to free 1551 * @act: irqaction for the interrupt 1552 * 1553 * Used to remove interrupts statically setup by the early boot process. 1554 */ 1555 void remove_percpu_irq(unsigned int irq, struct irqaction *act) 1556 { 1557 struct irq_desc *desc = irq_to_desc(irq); 1558 1559 if (desc && irq_settings_is_per_cpu_devid(desc)) 1560 __free_percpu_irq(irq, act->percpu_dev_id); 1561 } 1562 1563 /** 1564 * free_percpu_irq - free an interrupt allocated with request_percpu_irq 1565 * @irq: Interrupt line to free 1566 * @dev_id: Device identity to free 1567 * 1568 * Remove a percpu interrupt handler. The handler is removed, but 1569 * the interrupt line is not disabled. This must be done on each 1570 * CPU before calling this function. The function does not return 1571 * until any executing interrupts for this IRQ have completed. 1572 * 1573 * This function must not be called from interrupt context. 1574 */ 1575 void free_percpu_irq(unsigned int irq, void __percpu *dev_id) 1576 { 1577 struct irq_desc *desc = irq_to_desc(irq); 1578 1579 if (!desc || !irq_settings_is_per_cpu_devid(desc)) 1580 return; 1581 1582 chip_bus_lock(desc); 1583 kfree(__free_percpu_irq(irq, dev_id)); 1584 chip_bus_sync_unlock(desc); 1585 } 1586 1587 /** 1588 * setup_percpu_irq - setup a per-cpu interrupt 1589 * @irq: Interrupt line to setup 1590 * @act: irqaction for the interrupt 1591 * 1592 * Used to statically setup per-cpu interrupts in the early boot process. 1593 */ 1594 int setup_percpu_irq(unsigned int irq, struct irqaction *act) 1595 { 1596 struct irq_desc *desc = irq_to_desc(irq); 1597 int retval; 1598 1599 if (!desc || !irq_settings_is_per_cpu_devid(desc)) 1600 return -EINVAL; 1601 chip_bus_lock(desc); 1602 retval = __setup_irq(irq, desc, act); 1603 chip_bus_sync_unlock(desc); 1604 1605 return retval; 1606 } 1607 1608 /** 1609 * request_percpu_irq - allocate a percpu interrupt line 1610 * @irq: Interrupt line to allocate 1611 * @handler: Function to be called when the IRQ occurs. 1612 * @devname: An ascii name for the claiming device 1613 * @dev_id: A percpu cookie passed back to the handler function 1614 * 1615 * This call allocates interrupt resources, but doesn't 1616 * automatically enable the interrupt. It has to be done on each 1617 * CPU using enable_percpu_irq(). 1618 * 1619 * Dev_id must be globally unique. It is a per-cpu variable, and 1620 * the handler gets called with the interrupted CPU's instance of 1621 * that variable. 1622 */ 1623 int request_percpu_irq(unsigned int irq, irq_handler_t handler, 1624 const char *devname, void __percpu *dev_id) 1625 { 1626 struct irqaction *action; 1627 struct irq_desc *desc; 1628 int retval; 1629 1630 if (!dev_id) 1631 return -EINVAL; 1632 1633 desc = irq_to_desc(irq); 1634 if (!desc || !irq_settings_can_request(desc) || 1635 !irq_settings_is_per_cpu_devid(desc)) 1636 return -EINVAL; 1637 1638 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); 1639 if (!action) 1640 return -ENOMEM; 1641 1642 action->handler = handler; 1643 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND; 1644 action->name = devname; 1645 action->percpu_dev_id = dev_id; 1646 1647 chip_bus_lock(desc); 1648 retval = __setup_irq(irq, desc, action); 1649 chip_bus_sync_unlock(desc); 1650 1651 if (retval) 1652 kfree(action); 1653 1654 return retval; 1655 } 1656