1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 4 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 5 * 6 * This file contains the core interrupt handling code, for irq-chip based 7 * architectures. Detailed information is available in 8 * Documentation/core-api/genericirq.rst 9 */ 10 11 #include <linux/irq.h> 12 #include <linux/msi.h> 13 #include <linux/module.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel_stat.h> 16 #include <linux/irqdomain.h> 17 18 #include <trace/events/irq.h> 19 20 #include "internals.h" 21 22 static irqreturn_t bad_chained_irq(int irq, void *dev_id) 23 { 24 WARN_ONCE(1, "Chained irq %d should not call an action\n", irq); 25 return IRQ_NONE; 26 } 27 28 /* 29 * Chained handlers should never call action on their IRQ. This default 30 * action will emit warning if such thing happens. 31 */ 32 struct irqaction chained_action = { 33 .handler = bad_chained_irq, 34 }; 35 36 /** 37 * irq_set_chip - set the irq chip for an irq 38 * @irq: irq number 39 * @chip: pointer to irq chip description structure 40 */ 41 int irq_set_chip(unsigned int irq, const struct irq_chip *chip) 42 { 43 int ret = -EINVAL; 44 45 scoped_irqdesc_get_and_lock(irq, 0) { 46 scoped_irqdesc->irq_data.chip = (struct irq_chip *)(chip ?: &no_irq_chip); 47 ret = 0; 48 } 49 /* For !CONFIG_SPARSE_IRQ make the irq show up in allocated_irqs. */ 50 if (!ret) 51 irq_mark_irq(irq); 52 return ret; 53 } 54 EXPORT_SYMBOL(irq_set_chip); 55 56 /** 57 * irq_set_irq_type - set the irq trigger type for an irq 58 * @irq: irq number 59 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 60 */ 61 int irq_set_irq_type(unsigned int irq, unsigned int type) 62 { 63 scoped_irqdesc_get_and_buslock(irq, IRQ_GET_DESC_CHECK_GLOBAL) 64 return __irq_set_trigger(scoped_irqdesc, type); 65 return -EINVAL; 66 } 67 EXPORT_SYMBOL(irq_set_irq_type); 68 69 /** 70 * irq_set_handler_data - set irq handler data for an irq 71 * @irq: Interrupt number 72 * @data: Pointer to interrupt specific data 73 * 74 * Set the hardware irq controller data for an irq 75 */ 76 int irq_set_handler_data(unsigned int irq, void *data) 77 { 78 scoped_irqdesc_get_and_lock(irq, 0) { 79 scoped_irqdesc->irq_common_data.handler_data = data; 80 return 0; 81 } 82 return -EINVAL; 83 } 84 EXPORT_SYMBOL(irq_set_handler_data); 85 86 /** 87 * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset 88 * @irq_base: Interrupt number base 89 * @irq_offset: Interrupt number offset 90 * @entry: Pointer to MSI descriptor data 91 * 92 * Set the MSI descriptor entry for an irq at offset 93 */ 94 int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, struct msi_desc *entry) 95 { 96 scoped_irqdesc_get_and_lock(irq_base + irq_offset, IRQ_GET_DESC_CHECK_GLOBAL) { 97 scoped_irqdesc->irq_common_data.msi_desc = entry; 98 if (entry && !irq_offset) 99 entry->irq = irq_base; 100 return 0; 101 } 102 return -EINVAL; 103 } 104 105 /** 106 * irq_set_msi_desc - set MSI descriptor data for an irq 107 * @irq: Interrupt number 108 * @entry: Pointer to MSI descriptor data 109 * 110 * Set the MSI descriptor entry for an irq 111 */ 112 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) 113 { 114 return irq_set_msi_desc_off(irq, 0, entry); 115 } 116 117 /** 118 * irq_set_chip_data - set irq chip data for an irq 119 * @irq: Interrupt number 120 * @data: Pointer to chip specific data 121 * 122 * Set the hardware irq chip data for an irq 123 */ 124 int irq_set_chip_data(unsigned int irq, void *data) 125 { 126 scoped_irqdesc_get_and_lock(irq, 0) { 127 scoped_irqdesc->irq_data.chip_data = data; 128 return 0; 129 } 130 return -EINVAL; 131 } 132 EXPORT_SYMBOL(irq_set_chip_data); 133 134 struct irq_data *irq_get_irq_data(unsigned int irq) 135 { 136 struct irq_desc *desc = irq_to_desc(irq); 137 138 return desc ? &desc->irq_data : NULL; 139 } 140 EXPORT_SYMBOL_GPL(irq_get_irq_data); 141 142 static void irq_state_clr_disabled(struct irq_desc *desc) 143 { 144 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); 145 } 146 147 static void irq_state_clr_masked(struct irq_desc *desc) 148 { 149 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); 150 } 151 152 static void irq_state_clr_started(struct irq_desc *desc) 153 { 154 irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED); 155 } 156 157 static void irq_state_set_started(struct irq_desc *desc) 158 { 159 irqd_set(&desc->irq_data, IRQD_IRQ_STARTED); 160 } 161 162 enum { 163 IRQ_STARTUP_NORMAL, 164 IRQ_STARTUP_MANAGED, 165 IRQ_STARTUP_ABORT, 166 }; 167 168 #ifdef CONFIG_SMP 169 static int 170 __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff, 171 bool force) 172 { 173 struct irq_data *d = irq_desc_get_irq_data(desc); 174 175 if (!irqd_affinity_is_managed(d)) 176 return IRQ_STARTUP_NORMAL; 177 178 irqd_clr_managed_shutdown(d); 179 180 if (!cpumask_intersects(aff, cpu_online_mask)) { 181 /* 182 * Catch code which fiddles with enable_irq() on a managed 183 * and potentially shutdown IRQ. Chained interrupt 184 * installment or irq auto probing should not happen on 185 * managed irqs either. 186 */ 187 if (WARN_ON_ONCE(force)) 188 return IRQ_STARTUP_ABORT; 189 /* 190 * The interrupt was requested, but there is no online CPU 191 * in it's affinity mask. Put it into managed shutdown 192 * state and let the cpu hotplug mechanism start it up once 193 * a CPU in the mask becomes available. 194 */ 195 return IRQ_STARTUP_ABORT; 196 } 197 /* 198 * Managed interrupts have reserved resources, so this should not 199 * happen. 200 */ 201 if (WARN_ON(irq_domain_activate_irq(d, false))) 202 return IRQ_STARTUP_ABORT; 203 return IRQ_STARTUP_MANAGED; 204 } 205 206 void irq_startup_managed(struct irq_desc *desc) 207 { 208 struct irq_data *d = irq_desc_get_irq_data(desc); 209 210 /* 211 * Clear managed-shutdown flag, so we don't repeat managed-startup for 212 * multiple hotplugs, and cause imbalanced disable depth. 213 */ 214 irqd_clr_managed_shutdown(d); 215 216 /* 217 * Only start it up when the disable depth is 1, so that a disable, 218 * hotunplug, hotplug sequence does not end up enabling it during 219 * hotplug unconditionally. 220 */ 221 desc->depth--; 222 if (!desc->depth) 223 irq_startup(desc, IRQ_RESEND, IRQ_START_COND); 224 } 225 226 #else 227 static __always_inline int 228 __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff, 229 bool force) 230 { 231 return IRQ_STARTUP_NORMAL; 232 } 233 #endif 234 235 static void irq_enable(struct irq_desc *desc) 236 { 237 if (!irqd_irq_disabled(&desc->irq_data)) { 238 unmask_irq(desc); 239 } else { 240 irq_state_clr_disabled(desc); 241 if (desc->irq_data.chip->irq_enable) { 242 desc->irq_data.chip->irq_enable(&desc->irq_data); 243 irq_state_clr_masked(desc); 244 } else { 245 unmask_irq(desc); 246 } 247 } 248 } 249 250 static int __irq_startup(struct irq_desc *desc) 251 { 252 struct irq_data *d = irq_desc_get_irq_data(desc); 253 int ret = 0; 254 255 /* Warn if this interrupt is not activated but try nevertheless */ 256 WARN_ON_ONCE(!irqd_is_activated(d)); 257 258 if (d->chip->irq_startup) { 259 ret = d->chip->irq_startup(d); 260 irq_state_clr_disabled(desc); 261 irq_state_clr_masked(desc); 262 } else { 263 irq_enable(desc); 264 } 265 irq_state_set_started(desc); 266 return ret; 267 } 268 269 int irq_startup(struct irq_desc *desc, bool resend, bool force) 270 { 271 struct irq_data *d = irq_desc_get_irq_data(desc); 272 const struct cpumask *aff = irq_data_get_affinity_mask(d); 273 int ret = 0; 274 275 desc->depth = 0; 276 277 if (irqd_is_started(d)) { 278 irq_enable(desc); 279 } else { 280 switch (__irq_startup_managed(desc, aff, force)) { 281 case IRQ_STARTUP_NORMAL: 282 if (d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP) 283 irq_setup_affinity(desc); 284 ret = __irq_startup(desc); 285 if (!(d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP)) 286 irq_setup_affinity(desc); 287 break; 288 case IRQ_STARTUP_MANAGED: 289 irq_do_set_affinity(d, aff, false); 290 ret = __irq_startup(desc); 291 break; 292 case IRQ_STARTUP_ABORT: 293 desc->depth = 1; 294 irqd_set_managed_shutdown(d); 295 return 0; 296 } 297 } 298 if (resend) 299 check_irq_resend(desc, false); 300 301 return ret; 302 } 303 304 int irq_activate(struct irq_desc *desc) 305 { 306 struct irq_data *d = irq_desc_get_irq_data(desc); 307 308 if (!irqd_affinity_is_managed(d)) 309 return irq_domain_activate_irq(d, false); 310 return 0; 311 } 312 313 int irq_activate_and_startup(struct irq_desc *desc, bool resend) 314 { 315 if (WARN_ON(irq_activate(desc))) 316 return 0; 317 return irq_startup(desc, resend, IRQ_START_FORCE); 318 } 319 320 static void __irq_disable(struct irq_desc *desc, bool mask); 321 322 void irq_shutdown(struct irq_desc *desc) 323 { 324 if (irqd_is_started(&desc->irq_data)) { 325 clear_irq_resend(desc); 326 /* 327 * Increment disable depth, so that a managed shutdown on 328 * CPU hotunplug preserves the actual disabled state when the 329 * CPU comes back online. See irq_startup_managed(). 330 */ 331 desc->depth++; 332 333 if (desc->irq_data.chip->irq_shutdown) { 334 desc->irq_data.chip->irq_shutdown(&desc->irq_data); 335 irq_state_set_disabled(desc); 336 irq_state_set_masked(desc); 337 } else { 338 __irq_disable(desc, true); 339 } 340 irq_state_clr_started(desc); 341 } 342 } 343 344 345 void irq_shutdown_and_deactivate(struct irq_desc *desc) 346 { 347 irq_shutdown(desc); 348 /* 349 * This must be called even if the interrupt was never started up, 350 * because the activation can happen before the interrupt is 351 * available for request/startup. It has it's own state tracking so 352 * it's safe to call it unconditionally. 353 */ 354 irq_domain_deactivate_irq(&desc->irq_data); 355 } 356 357 static void __irq_disable(struct irq_desc *desc, bool mask) 358 { 359 if (irqd_irq_disabled(&desc->irq_data)) { 360 if (mask) 361 mask_irq(desc); 362 } else { 363 irq_state_set_disabled(desc); 364 if (desc->irq_data.chip->irq_disable) { 365 desc->irq_data.chip->irq_disable(&desc->irq_data); 366 irq_state_set_masked(desc); 367 } else if (mask) { 368 mask_irq(desc); 369 } 370 } 371 } 372 373 /** 374 * irq_disable - Mark interrupt disabled 375 * @desc: irq descriptor which should be disabled 376 * 377 * If the chip does not implement the irq_disable callback, we 378 * use a lazy disable approach. That means we mark the interrupt 379 * disabled, but leave the hardware unmasked. That's an 380 * optimization because we avoid the hardware access for the 381 * common case where no interrupt happens after we marked it 382 * disabled. If an interrupt happens, then the interrupt flow 383 * handler masks the line at the hardware level and marks it 384 * pending. 385 * 386 * If the interrupt chip does not implement the irq_disable callback, 387 * a driver can disable the lazy approach for a particular irq line by 388 * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can 389 * be used for devices which cannot disable the interrupt at the 390 * device level under certain circumstances and have to use 391 * disable_irq[_nosync] instead. 392 */ 393 void irq_disable(struct irq_desc *desc) 394 { 395 __irq_disable(desc, irq_settings_disable_unlazy(desc)); 396 } 397 398 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) 399 { 400 if (desc->irq_data.chip->irq_enable) 401 desc->irq_data.chip->irq_enable(&desc->irq_data); 402 else 403 desc->irq_data.chip->irq_unmask(&desc->irq_data); 404 cpumask_set_cpu(cpu, desc->percpu_enabled); 405 } 406 407 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) 408 { 409 if (desc->irq_data.chip->irq_disable) 410 desc->irq_data.chip->irq_disable(&desc->irq_data); 411 else 412 desc->irq_data.chip->irq_mask(&desc->irq_data); 413 cpumask_clear_cpu(cpu, desc->percpu_enabled); 414 } 415 416 static inline void mask_ack_irq(struct irq_desc *desc) 417 { 418 if (desc->irq_data.chip->irq_mask_ack) { 419 desc->irq_data.chip->irq_mask_ack(&desc->irq_data); 420 irq_state_set_masked(desc); 421 } else { 422 mask_irq(desc); 423 if (desc->irq_data.chip->irq_ack) 424 desc->irq_data.chip->irq_ack(&desc->irq_data); 425 } 426 } 427 428 void mask_irq(struct irq_desc *desc) 429 { 430 if (irqd_irq_masked(&desc->irq_data)) 431 return; 432 433 if (desc->irq_data.chip->irq_mask) { 434 desc->irq_data.chip->irq_mask(&desc->irq_data); 435 irq_state_set_masked(desc); 436 } 437 } 438 439 void unmask_irq(struct irq_desc *desc) 440 { 441 if (!irqd_irq_masked(&desc->irq_data)) 442 return; 443 444 if (desc->irq_data.chip->irq_unmask) { 445 desc->irq_data.chip->irq_unmask(&desc->irq_data); 446 irq_state_clr_masked(desc); 447 } 448 } 449 450 void unmask_threaded_irq(struct irq_desc *desc) 451 { 452 struct irq_chip *chip = desc->irq_data.chip; 453 454 if (chip->flags & IRQCHIP_EOI_THREADED) 455 chip->irq_eoi(&desc->irq_data); 456 457 unmask_irq(desc); 458 } 459 460 /* Busy wait until INPROGRESS is cleared */ 461 static bool irq_wait_on_inprogress(struct irq_desc *desc) 462 { 463 if (IS_ENABLED(CONFIG_SMP)) { 464 do { 465 raw_spin_unlock(&desc->lock); 466 while (irqd_irq_inprogress(&desc->irq_data)) 467 cpu_relax(); 468 raw_spin_lock(&desc->lock); 469 } while (irqd_irq_inprogress(&desc->irq_data)); 470 471 /* Might have been disabled in meantime */ 472 return !irqd_irq_disabled(&desc->irq_data) && desc->action; 473 } 474 return false; 475 } 476 477 static bool irq_can_handle_pm(struct irq_desc *desc) 478 { 479 struct irq_data *irqd = &desc->irq_data; 480 const struct cpumask *aff; 481 482 /* 483 * If the interrupt is not in progress and is not an armed 484 * wakeup interrupt, proceed. 485 */ 486 if (!irqd_has_set(irqd, IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED)) 487 return true; 488 489 /* 490 * If the interrupt is an armed wakeup source, mark it pending 491 * and suspended, disable it and notify the pm core about the 492 * event. 493 */ 494 if (unlikely(irqd_has_set(irqd, IRQD_WAKEUP_ARMED))) { 495 irq_pm_handle_wakeup(desc); 496 return false; 497 } 498 499 /* Check whether the interrupt is polled on another CPU */ 500 if (unlikely(desc->istate & IRQS_POLL_INPROGRESS)) { 501 if (WARN_ONCE(irq_poll_cpu == smp_processor_id(), 502 "irq poll in progress on cpu %d for irq %d\n", 503 smp_processor_id(), desc->irq_data.irq)) 504 return false; 505 return irq_wait_on_inprogress(desc); 506 } 507 508 /* The below works only for single target interrupts */ 509 if (!IS_ENABLED(CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK) || 510 !irqd_is_single_target(irqd) || desc->handle_irq != handle_edge_irq) 511 return false; 512 513 /* 514 * If the interrupt affinity was moved to this CPU and the 515 * interrupt is currently handled on the previous target CPU, then 516 * busy wait for INPROGRESS to be cleared. Otherwise for edge type 517 * interrupts the handler might get stuck on the previous target: 518 * 519 * CPU 0 CPU 1 (new target) 520 * handle_edge_irq() 521 * repeat: 522 * handle_event() handle_edge_irq() 523 * if (INPROGESS) { 524 * set(PENDING); 525 * mask(); 526 * return; 527 * } 528 * if (PENDING) { 529 * clear(PENDING); 530 * unmask(); 531 * goto repeat; 532 * } 533 * 534 * This happens when the device raises interrupts with a high rate 535 * and always before handle_event() completes and the CPU0 handler 536 * can clear INPROGRESS. This has been observed in virtual machines. 537 */ 538 aff = irq_data_get_effective_affinity_mask(irqd); 539 if (cpumask_first(aff) != smp_processor_id()) 540 return false; 541 return irq_wait_on_inprogress(desc); 542 } 543 544 static inline bool irq_can_handle_actions(struct irq_desc *desc) 545 { 546 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 547 548 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 549 desc->istate |= IRQS_PENDING; 550 return false; 551 } 552 return true; 553 } 554 555 static inline bool irq_can_handle(struct irq_desc *desc) 556 { 557 if (!irq_can_handle_pm(desc)) 558 return false; 559 560 return irq_can_handle_actions(desc); 561 } 562 563 /** 564 * handle_nested_irq - Handle a nested irq from a irq thread 565 * @irq: the interrupt number 566 * 567 * Handle interrupts which are nested into a threaded interrupt 568 * handler. The handler function is called inside the calling threads 569 * context. 570 */ 571 void handle_nested_irq(unsigned int irq) 572 { 573 struct irq_desc *desc = irq_to_desc(irq); 574 struct irqaction *action; 575 irqreturn_t action_ret; 576 577 might_sleep(); 578 579 scoped_guard(raw_spinlock_irq, &desc->lock) { 580 if (!irq_can_handle_actions(desc)) 581 return; 582 583 action = desc->action; 584 kstat_incr_irqs_this_cpu(desc); 585 atomic_inc(&desc->threads_active); 586 } 587 588 action_ret = IRQ_NONE; 589 for_each_action_of_desc(desc, action) 590 action_ret |= action->thread_fn(action->irq, action->dev_id); 591 592 if (!irq_settings_no_debug(desc)) 593 note_interrupt(desc, action_ret); 594 595 wake_threads_waitq(desc); 596 } 597 EXPORT_SYMBOL_GPL(handle_nested_irq); 598 599 /** 600 * handle_simple_irq - Simple and software-decoded IRQs. 601 * @desc: the interrupt description structure for this irq 602 * 603 * Simple interrupts are either sent from a demultiplexing interrupt 604 * handler or come from hardware, where no interrupt hardware control is 605 * necessary. 606 * 607 * Note: The caller is expected to handle the ack, clear, mask and unmask 608 * issues if necessary. 609 */ 610 void handle_simple_irq(struct irq_desc *desc) 611 { 612 guard(raw_spinlock)(&desc->lock); 613 614 if (!irq_can_handle_pm(desc)) { 615 if (irqd_needs_resend_when_in_progress(&desc->irq_data)) 616 desc->istate |= IRQS_PENDING; 617 return; 618 } 619 620 if (!irq_can_handle_actions(desc)) 621 return; 622 623 kstat_incr_irqs_this_cpu(desc); 624 handle_irq_event(desc); 625 } 626 EXPORT_SYMBOL_GPL(handle_simple_irq); 627 628 /** 629 * handle_untracked_irq - Simple and software-decoded IRQs. 630 * @desc: the interrupt description structure for this irq 631 * 632 * Untracked interrupts are sent from a demultiplexing interrupt handler 633 * when the demultiplexer does not know which device it its multiplexed irq 634 * domain generated the interrupt. IRQ's handled through here are not 635 * subjected to stats tracking, randomness, or spurious interrupt 636 * detection. 637 * 638 * Note: Like handle_simple_irq, the caller is expected to handle the ack, 639 * clear, mask and unmask issues if necessary. 640 */ 641 void handle_untracked_irq(struct irq_desc *desc) 642 { 643 scoped_guard(raw_spinlock, &desc->lock) { 644 if (!irq_can_handle(desc)) 645 return; 646 647 desc->istate &= ~IRQS_PENDING; 648 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 649 } 650 651 __handle_irq_event_percpu(desc); 652 653 scoped_guard(raw_spinlock, &desc->lock) 654 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 655 } 656 EXPORT_SYMBOL_GPL(handle_untracked_irq); 657 658 /* 659 * Called unconditionally from handle_level_irq() and only for oneshot 660 * interrupts from handle_fasteoi_irq() 661 */ 662 static void cond_unmask_irq(struct irq_desc *desc) 663 { 664 /* 665 * We need to unmask in the following cases: 666 * - Standard level irq (IRQF_ONESHOT is not set) 667 * - Oneshot irq which did not wake the thread (caused by a 668 * spurious interrupt or a primary handler handling it 669 * completely). 670 */ 671 if (!irqd_irq_disabled(&desc->irq_data) && 672 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) 673 unmask_irq(desc); 674 } 675 676 /** 677 * handle_level_irq - Level type irq handler 678 * @desc: the interrupt description structure for this irq 679 * 680 * Level type interrupts are active as long as the hardware line has the 681 * active level. This may require to mask the interrupt and unmask it after 682 * the associated handler has acknowledged the device, so the interrupt 683 * line is back to inactive. 684 */ 685 void handle_level_irq(struct irq_desc *desc) 686 { 687 guard(raw_spinlock)(&desc->lock); 688 mask_ack_irq(desc); 689 690 if (!irq_can_handle(desc)) 691 return; 692 693 kstat_incr_irqs_this_cpu(desc); 694 handle_irq_event(desc); 695 696 cond_unmask_irq(desc); 697 } 698 EXPORT_SYMBOL_GPL(handle_level_irq); 699 700 static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) 701 { 702 if (!(desc->istate & IRQS_ONESHOT)) { 703 chip->irq_eoi(&desc->irq_data); 704 return; 705 } 706 /* 707 * We need to unmask in the following cases: 708 * - Oneshot irq which did not wake the thread (caused by a 709 * spurious interrupt or a primary handler handling it 710 * completely). 711 */ 712 if (!irqd_irq_disabled(&desc->irq_data) && 713 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) { 714 chip->irq_eoi(&desc->irq_data); 715 unmask_irq(desc); 716 } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) { 717 chip->irq_eoi(&desc->irq_data); 718 } 719 } 720 721 static inline void cond_eoi_irq(struct irq_chip *chip, struct irq_data *data) 722 { 723 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) 724 chip->irq_eoi(data); 725 } 726 727 /** 728 * handle_fasteoi_irq - irq handler for transparent controllers 729 * @desc: the interrupt description structure for this irq 730 * 731 * Only a single callback will be issued to the chip: an ->eoi() call when 732 * the interrupt has been serviced. This enables support for modern forms 733 * of interrupt handlers, which handle the flow details in hardware, 734 * transparently. 735 */ 736 void handle_fasteoi_irq(struct irq_desc *desc) 737 { 738 struct irq_chip *chip = desc->irq_data.chip; 739 740 guard(raw_spinlock)(&desc->lock); 741 742 /* 743 * When an affinity change races with IRQ handling, the next interrupt 744 * can arrive on the new CPU before the original CPU has completed 745 * handling the previous one - it may need to be resent. 746 */ 747 if (!irq_can_handle_pm(desc)) { 748 if (irqd_needs_resend_when_in_progress(&desc->irq_data)) 749 desc->istate |= IRQS_PENDING; 750 cond_eoi_irq(chip, &desc->irq_data); 751 return; 752 } 753 754 if (!irq_can_handle_actions(desc)) { 755 mask_irq(desc); 756 cond_eoi_irq(chip, &desc->irq_data); 757 return; 758 } 759 760 kstat_incr_irqs_this_cpu(desc); 761 if (desc->istate & IRQS_ONESHOT) 762 mask_irq(desc); 763 764 handle_irq_event(desc); 765 766 cond_unmask_eoi_irq(desc, chip); 767 768 /* 769 * When the race described above happens this will resend the interrupt. 770 */ 771 if (unlikely(desc->istate & IRQS_PENDING)) 772 check_irq_resend(desc, false); 773 } 774 EXPORT_SYMBOL_GPL(handle_fasteoi_irq); 775 776 /** 777 * handle_fasteoi_nmi - irq handler for NMI interrupt lines 778 * @desc: the interrupt description structure for this irq 779 * 780 * A simple NMI-safe handler, considering the restrictions 781 * from request_nmi. 782 * 783 * Only a single callback will be issued to the chip: an ->eoi() 784 * call when the interrupt has been serviced. This enables support 785 * for modern forms of interrupt handlers, which handle the flow 786 * details in hardware, transparently. 787 */ 788 void handle_fasteoi_nmi(struct irq_desc *desc) 789 { 790 struct irq_chip *chip = irq_desc_get_chip(desc); 791 struct irqaction *action = desc->action; 792 unsigned int irq = irq_desc_get_irq(desc); 793 irqreturn_t res; 794 795 __kstat_incr_irqs_this_cpu(desc); 796 797 trace_irq_handler_entry(irq, action); 798 /* 799 * NMIs cannot be shared, there is only one action. 800 */ 801 res = action->handler(irq, action->dev_id); 802 trace_irq_handler_exit(irq, action, res); 803 804 if (chip->irq_eoi) 805 chip->irq_eoi(&desc->irq_data); 806 } 807 EXPORT_SYMBOL_GPL(handle_fasteoi_nmi); 808 809 /** 810 * handle_edge_irq - edge type IRQ handler 811 * @desc: the interrupt description structure for this irq 812 * 813 * Interrupt occurs on the falling and/or rising edge of a hardware 814 * signal. The occurrence is latched into the irq controller hardware and 815 * must be acked in order to be reenabled. After the ack another interrupt 816 * can happen on the same source even before the first one is handled by 817 * the associated event handler. If this happens it might be necessary to 818 * disable (mask) the interrupt depending on the controller hardware. This 819 * requires to reenable the interrupt inside of the loop which handles the 820 * interrupts which have arrived while the handler was running. If all 821 * pending interrupts are handled, the loop is left. 822 */ 823 void handle_edge_irq(struct irq_desc *desc) 824 { 825 guard(raw_spinlock)(&desc->lock); 826 827 if (!irq_can_handle(desc)) { 828 desc->istate |= IRQS_PENDING; 829 mask_ack_irq(desc); 830 return; 831 } 832 833 kstat_incr_irqs_this_cpu(desc); 834 835 /* Start handling the irq */ 836 desc->irq_data.chip->irq_ack(&desc->irq_data); 837 838 do { 839 if (unlikely(!desc->action)) { 840 mask_irq(desc); 841 return; 842 } 843 844 /* 845 * When another irq arrived while we were handling 846 * one, we could have masked the irq. 847 * Reenable it, if it was not disabled in meantime. 848 */ 849 if (unlikely(desc->istate & IRQS_PENDING)) { 850 if (!irqd_irq_disabled(&desc->irq_data) && 851 irqd_irq_masked(&desc->irq_data)) 852 unmask_irq(desc); 853 } 854 855 handle_irq_event(desc); 856 857 } while ((desc->istate & IRQS_PENDING) && !irqd_irq_disabled(&desc->irq_data)); 858 } 859 EXPORT_SYMBOL(handle_edge_irq); 860 861 /** 862 * handle_percpu_irq - Per CPU local irq handler 863 * @desc: the interrupt description structure for this irq 864 * 865 * Per CPU interrupts on SMP machines without locking requirements 866 */ 867 void handle_percpu_irq(struct irq_desc *desc) 868 { 869 struct irq_chip *chip = irq_desc_get_chip(desc); 870 871 /* 872 * PER CPU interrupts are not serialized. Do not touch 873 * desc->tot_count. 874 */ 875 __kstat_incr_irqs_this_cpu(desc); 876 877 if (chip->irq_ack) 878 chip->irq_ack(&desc->irq_data); 879 880 handle_irq_event_percpu(desc); 881 882 if (chip->irq_eoi) 883 chip->irq_eoi(&desc->irq_data); 884 } 885 886 /** 887 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids 888 * @desc: the interrupt description structure for this irq 889 * 890 * Per CPU interrupts on SMP machines without locking requirements. Same as 891 * handle_percpu_irq() above but with the following extras: 892 * 893 * action->percpu_dev_id is a pointer to percpu variables which 894 * contain the real device id for the cpu on which this handler is 895 * called 896 */ 897 void handle_percpu_devid_irq(struct irq_desc *desc) 898 { 899 struct irq_chip *chip = irq_desc_get_chip(desc); 900 unsigned int irq = irq_desc_get_irq(desc); 901 unsigned int cpu = smp_processor_id(); 902 struct irqaction *action; 903 irqreturn_t res; 904 905 /* 906 * PER CPU interrupts are not serialized. Do not touch 907 * desc->tot_count. 908 */ 909 __kstat_incr_irqs_this_cpu(desc); 910 911 if (chip->irq_ack) 912 chip->irq_ack(&desc->irq_data); 913 914 for (action = desc->action; action; action = action->next) 915 if (cpumask_test_cpu(cpu, action->affinity)) 916 break; 917 918 if (likely(action)) { 919 trace_irq_handler_entry(irq, action); 920 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); 921 trace_irq_handler_exit(irq, action, res); 922 } else { 923 bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled); 924 925 if (enabled) 926 irq_percpu_disable(desc, cpu); 927 928 pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n", 929 enabled ? " and unmasked" : "", irq, cpu); 930 } 931 932 if (chip->irq_eoi) 933 chip->irq_eoi(&desc->irq_data); 934 } 935 936 static void 937 __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, 938 int is_chained, const char *name) 939 { 940 if (!handle) { 941 handle = handle_bad_irq; 942 } else { 943 struct irq_data *irq_data = &desc->irq_data; 944 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 945 /* 946 * With hierarchical domains we might run into a 947 * situation where the outermost chip is not yet set 948 * up, but the inner chips are there. Instead of 949 * bailing we install the handler, but obviously we 950 * cannot enable/startup the interrupt at this point. 951 */ 952 while (irq_data) { 953 if (irq_data->chip != &no_irq_chip) 954 break; 955 /* 956 * Bail out if the outer chip is not set up 957 * and the interrupt supposed to be started 958 * right away. 959 */ 960 if (WARN_ON(is_chained)) 961 return; 962 /* Try the parent */ 963 irq_data = irq_data->parent_data; 964 } 965 #endif 966 if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip)) 967 return; 968 } 969 970 /* Uninstall? */ 971 if (handle == handle_bad_irq) { 972 if (desc->irq_data.chip != &no_irq_chip) 973 mask_ack_irq(desc); 974 irq_state_set_disabled(desc); 975 if (is_chained) { 976 desc->action = NULL; 977 WARN_ON(irq_chip_pm_put(irq_desc_get_irq_data(desc))); 978 } 979 desc->depth = 1; 980 } 981 desc->handle_irq = handle; 982 desc->name = name; 983 984 if (handle != handle_bad_irq && is_chained) { 985 unsigned int type = irqd_get_trigger_type(&desc->irq_data); 986 987 /* 988 * We're about to start this interrupt immediately, 989 * hence the need to set the trigger configuration. 990 * But the .set_type callback may have overridden the 991 * flow handler, ignoring that we're dealing with a 992 * chained interrupt. Reset it immediately because we 993 * do know better. 994 */ 995 if (type != IRQ_TYPE_NONE) { 996 __irq_set_trigger(desc, type); 997 desc->handle_irq = handle; 998 } 999 1000 irq_settings_set_noprobe(desc); 1001 irq_settings_set_norequest(desc); 1002 irq_settings_set_nothread(desc); 1003 desc->action = &chained_action; 1004 WARN_ON(irq_chip_pm_get(irq_desc_get_irq_data(desc))); 1005 irq_activate_and_startup(desc, IRQ_RESEND); 1006 } 1007 } 1008 1009 void __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 1010 const char *name) 1011 { 1012 scoped_irqdesc_get_and_buslock(irq, 0) 1013 __irq_do_set_handler(scoped_irqdesc, handle, is_chained, name); 1014 } 1015 EXPORT_SYMBOL_GPL(__irq_set_handler); 1016 1017 void irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, 1018 void *data) 1019 { 1020 scoped_irqdesc_get_and_buslock(irq, 0) { 1021 struct irq_desc *desc = scoped_irqdesc; 1022 1023 desc->irq_common_data.handler_data = data; 1024 __irq_do_set_handler(desc, handle, 1, NULL); 1025 } 1026 } 1027 EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data); 1028 1029 void 1030 irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip, 1031 irq_flow_handler_t handle, const char *name) 1032 { 1033 irq_set_chip(irq, chip); 1034 __irq_set_handler(irq, handle, 0, name); 1035 } 1036 EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name); 1037 1038 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) 1039 { 1040 scoped_irqdesc_get_and_lock(irq, 0) { 1041 struct irq_desc *desc = scoped_irqdesc; 1042 unsigned long trigger, tmp; 1043 /* 1044 * Warn when a driver sets the no autoenable flag on an already 1045 * active interrupt. 1046 */ 1047 WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN)); 1048 1049 irq_settings_clr_and_set(desc, clr, set); 1050 1051 trigger = irqd_get_trigger_type(&desc->irq_data); 1052 1053 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | 1054 IRQD_TRIGGER_MASK | IRQD_LEVEL); 1055 if (irq_settings_has_no_balance_set(desc)) 1056 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); 1057 if (irq_settings_is_per_cpu(desc)) 1058 irqd_set(&desc->irq_data, IRQD_PER_CPU); 1059 if (irq_settings_is_level(desc)) 1060 irqd_set(&desc->irq_data, IRQD_LEVEL); 1061 1062 tmp = irq_settings_get_trigger_mask(desc); 1063 if (tmp != IRQ_TYPE_NONE) 1064 trigger = tmp; 1065 1066 irqd_set(&desc->irq_data, trigger); 1067 } 1068 } 1069 EXPORT_SYMBOL_GPL(irq_modify_status); 1070 1071 #ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE 1072 /** 1073 * irq_cpu_online - Invoke all irq_cpu_online functions. 1074 * 1075 * Iterate through all irqs and invoke the chip.irq_cpu_online() 1076 * for each. 1077 */ 1078 void irq_cpu_online(void) 1079 { 1080 unsigned int irq; 1081 1082 for_each_active_irq(irq) { 1083 struct irq_desc *desc = irq_to_desc(irq); 1084 struct irq_chip *chip; 1085 1086 if (!desc) 1087 continue; 1088 1089 guard(raw_spinlock_irqsave)(&desc->lock); 1090 chip = irq_data_get_irq_chip(&desc->irq_data); 1091 if (chip && chip->irq_cpu_online && 1092 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 1093 !irqd_irq_disabled(&desc->irq_data))) 1094 chip->irq_cpu_online(&desc->irq_data); 1095 } 1096 } 1097 1098 /** 1099 * irq_cpu_offline - Invoke all irq_cpu_offline functions. 1100 * 1101 * Iterate through all irqs and invoke the chip.irq_cpu_offline() 1102 * for each. 1103 */ 1104 void irq_cpu_offline(void) 1105 { 1106 unsigned int irq; 1107 1108 for_each_active_irq(irq) { 1109 struct irq_desc *desc = irq_to_desc(irq); 1110 struct irq_chip *chip; 1111 1112 if (!desc) 1113 continue; 1114 1115 guard(raw_spinlock_irqsave)(&desc->lock); 1116 chip = irq_data_get_irq_chip(&desc->irq_data); 1117 if (chip && chip->irq_cpu_offline && 1118 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 1119 !irqd_irq_disabled(&desc->irq_data))) 1120 chip->irq_cpu_offline(&desc->irq_data); 1121 } 1122 } 1123 #endif 1124 1125 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 1126 1127 #ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS 1128 /** 1129 * handle_fasteoi_ack_irq - irq handler for edge hierarchy stacked on 1130 * transparent controllers 1131 * 1132 * @desc: the interrupt description structure for this irq 1133 * 1134 * Like handle_fasteoi_irq(), but for use with hierarchy where the irq_chip 1135 * also needs to have its ->irq_ack() function called. 1136 */ 1137 void handle_fasteoi_ack_irq(struct irq_desc *desc) 1138 { 1139 struct irq_chip *chip = desc->irq_data.chip; 1140 1141 guard(raw_spinlock)(&desc->lock); 1142 1143 if (!irq_can_handle_pm(desc)) { 1144 cond_eoi_irq(chip, &desc->irq_data); 1145 return; 1146 } 1147 1148 if (unlikely(!irq_can_handle_actions(desc))) { 1149 mask_irq(desc); 1150 cond_eoi_irq(chip, &desc->irq_data); 1151 return; 1152 } 1153 1154 kstat_incr_irqs_this_cpu(desc); 1155 if (desc->istate & IRQS_ONESHOT) 1156 mask_irq(desc); 1157 1158 desc->irq_data.chip->irq_ack(&desc->irq_data); 1159 1160 handle_irq_event(desc); 1161 1162 cond_unmask_eoi_irq(desc, chip); 1163 } 1164 EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq); 1165 1166 /** 1167 * handle_fasteoi_mask_irq - irq handler for level hierarchy stacked on 1168 * transparent controllers 1169 * 1170 * @desc: the interrupt description structure for this irq 1171 * 1172 * Like handle_fasteoi_irq(), but for use with hierarchy where the irq_chip 1173 * also needs to have its ->irq_mask_ack() function called. 1174 */ 1175 void handle_fasteoi_mask_irq(struct irq_desc *desc) 1176 { 1177 struct irq_chip *chip = desc->irq_data.chip; 1178 1179 guard(raw_spinlock)(&desc->lock); 1180 mask_ack_irq(desc); 1181 1182 if (!irq_can_handle(desc)) { 1183 cond_eoi_irq(chip, &desc->irq_data); 1184 return; 1185 } 1186 1187 kstat_incr_irqs_this_cpu(desc); 1188 1189 handle_irq_event(desc); 1190 1191 cond_unmask_eoi_irq(desc, chip); 1192 } 1193 EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq); 1194 1195 #endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */ 1196 1197 /** 1198 * irq_chip_set_parent_state - set the state of a parent interrupt. 1199 * 1200 * @data: Pointer to interrupt specific data 1201 * @which: State to be restored (one of IRQCHIP_STATE_*) 1202 * @val: Value corresponding to @which 1203 * 1204 * Conditional success, if the underlying irqchip does not implement it. 1205 */ 1206 int irq_chip_set_parent_state(struct irq_data *data, 1207 enum irqchip_irq_state which, 1208 bool val) 1209 { 1210 data = data->parent_data; 1211 1212 if (!data || !data->chip->irq_set_irqchip_state) 1213 return 0; 1214 1215 return data->chip->irq_set_irqchip_state(data, which, val); 1216 } 1217 EXPORT_SYMBOL_GPL(irq_chip_set_parent_state); 1218 1219 /** 1220 * irq_chip_get_parent_state - get the state of a parent interrupt. 1221 * 1222 * @data: Pointer to interrupt specific data 1223 * @which: one of IRQCHIP_STATE_* the caller wants to know 1224 * @state: a pointer to a boolean where the state is to be stored 1225 * 1226 * Conditional success, if the underlying irqchip does not implement it. 1227 */ 1228 int irq_chip_get_parent_state(struct irq_data *data, 1229 enum irqchip_irq_state which, 1230 bool *state) 1231 { 1232 data = data->parent_data; 1233 1234 if (!data || !data->chip->irq_get_irqchip_state) 1235 return 0; 1236 1237 return data->chip->irq_get_irqchip_state(data, which, state); 1238 } 1239 EXPORT_SYMBOL_GPL(irq_chip_get_parent_state); 1240 1241 /** 1242 * irq_chip_shutdown_parent - Shutdown the parent interrupt 1243 * @data: Pointer to interrupt specific data 1244 * 1245 * Invokes the irq_shutdown() callback of the parent if available or falls 1246 * back to irq_chip_disable_parent(). 1247 */ 1248 void irq_chip_shutdown_parent(struct irq_data *data) 1249 { 1250 struct irq_data *parent = data->parent_data; 1251 1252 if (parent->chip->irq_shutdown) 1253 parent->chip->irq_shutdown(parent); 1254 else 1255 irq_chip_disable_parent(data); 1256 } 1257 EXPORT_SYMBOL_GPL(irq_chip_shutdown_parent); 1258 1259 /** 1260 * irq_chip_startup_parent - Startup the parent interrupt 1261 * @data: Pointer to interrupt specific data 1262 * 1263 * Invokes the irq_startup() callback of the parent if available or falls 1264 * back to irq_chip_enable_parent(). 1265 */ 1266 unsigned int irq_chip_startup_parent(struct irq_data *data) 1267 { 1268 struct irq_data *parent = data->parent_data; 1269 1270 if (parent->chip->irq_startup) 1271 return parent->chip->irq_startup(parent); 1272 1273 irq_chip_enable_parent(data); 1274 return 0; 1275 } 1276 EXPORT_SYMBOL_GPL(irq_chip_startup_parent); 1277 1278 /** 1279 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if 1280 * NULL) 1281 * @data: Pointer to interrupt specific data 1282 */ 1283 void irq_chip_enable_parent(struct irq_data *data) 1284 { 1285 data = data->parent_data; 1286 if (data->chip->irq_enable) 1287 data->chip->irq_enable(data); 1288 else 1289 data->chip->irq_unmask(data); 1290 } 1291 EXPORT_SYMBOL_GPL(irq_chip_enable_parent); 1292 1293 /** 1294 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if 1295 * NULL) 1296 * @data: Pointer to interrupt specific data 1297 */ 1298 void irq_chip_disable_parent(struct irq_data *data) 1299 { 1300 data = data->parent_data; 1301 if (data->chip->irq_disable) 1302 data->chip->irq_disable(data); 1303 else 1304 data->chip->irq_mask(data); 1305 } 1306 EXPORT_SYMBOL_GPL(irq_chip_disable_parent); 1307 1308 /** 1309 * irq_chip_ack_parent - Acknowledge the parent interrupt 1310 * @data: Pointer to interrupt specific data 1311 */ 1312 void irq_chip_ack_parent(struct irq_data *data) 1313 { 1314 data = data->parent_data; 1315 data->chip->irq_ack(data); 1316 } 1317 EXPORT_SYMBOL_GPL(irq_chip_ack_parent); 1318 1319 /** 1320 * irq_chip_mask_parent - Mask the parent interrupt 1321 * @data: Pointer to interrupt specific data 1322 */ 1323 void irq_chip_mask_parent(struct irq_data *data) 1324 { 1325 data = data->parent_data; 1326 data->chip->irq_mask(data); 1327 } 1328 EXPORT_SYMBOL_GPL(irq_chip_mask_parent); 1329 1330 /** 1331 * irq_chip_mask_ack_parent - Mask and acknowledge the parent interrupt 1332 * @data: Pointer to interrupt specific data 1333 */ 1334 void irq_chip_mask_ack_parent(struct irq_data *data) 1335 { 1336 data = data->parent_data; 1337 data->chip->irq_mask_ack(data); 1338 } 1339 EXPORT_SYMBOL_GPL(irq_chip_mask_ack_parent); 1340 1341 /** 1342 * irq_chip_unmask_parent - Unmask the parent interrupt 1343 * @data: Pointer to interrupt specific data 1344 */ 1345 void irq_chip_unmask_parent(struct irq_data *data) 1346 { 1347 data = data->parent_data; 1348 data->chip->irq_unmask(data); 1349 } 1350 EXPORT_SYMBOL_GPL(irq_chip_unmask_parent); 1351 1352 /** 1353 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt 1354 * @data: Pointer to interrupt specific data 1355 */ 1356 void irq_chip_eoi_parent(struct irq_data *data) 1357 { 1358 data = data->parent_data; 1359 data->chip->irq_eoi(data); 1360 } 1361 EXPORT_SYMBOL_GPL(irq_chip_eoi_parent); 1362 1363 /** 1364 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt 1365 * @data: Pointer to interrupt specific data 1366 * @dest: The affinity mask to set 1367 * @force: Flag to enforce setting (disable online checks) 1368 * 1369 * Conditional, as the underlying parent chip might not implement it. 1370 */ 1371 int irq_chip_set_affinity_parent(struct irq_data *data, 1372 const struct cpumask *dest, bool force) 1373 { 1374 data = data->parent_data; 1375 if (data->chip->irq_set_affinity) 1376 return data->chip->irq_set_affinity(data, dest, force); 1377 1378 return -ENOSYS; 1379 } 1380 EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent); 1381 1382 /** 1383 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt 1384 * @data: Pointer to interrupt specific data 1385 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 1386 * 1387 * Conditional, as the underlying parent chip might not implement it. 1388 */ 1389 int irq_chip_set_type_parent(struct irq_data *data, unsigned int type) 1390 { 1391 data = data->parent_data; 1392 1393 if (data->chip->irq_set_type) 1394 return data->chip->irq_set_type(data, type); 1395 1396 return -ENOSYS; 1397 } 1398 EXPORT_SYMBOL_GPL(irq_chip_set_type_parent); 1399 1400 /** 1401 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware 1402 * @data: Pointer to interrupt specific data 1403 * 1404 * Iterate through the domain hierarchy of the interrupt and check 1405 * whether a hw retrigger function exists. If yes, invoke it. 1406 */ 1407 int irq_chip_retrigger_hierarchy(struct irq_data *data) 1408 { 1409 for (data = data->parent_data; data; data = data->parent_data) 1410 if (data->chip && data->chip->irq_retrigger) 1411 return data->chip->irq_retrigger(data); 1412 1413 return 0; 1414 } 1415 EXPORT_SYMBOL_GPL(irq_chip_retrigger_hierarchy); 1416 1417 /** 1418 * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt 1419 * @data: Pointer to interrupt specific data 1420 * @vcpu_info: The vcpu affinity information 1421 */ 1422 int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info) 1423 { 1424 data = data->parent_data; 1425 if (data->chip->irq_set_vcpu_affinity) 1426 return data->chip->irq_set_vcpu_affinity(data, vcpu_info); 1427 1428 return -ENOSYS; 1429 } 1430 EXPORT_SYMBOL_GPL(irq_chip_set_vcpu_affinity_parent); 1431 /** 1432 * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt 1433 * @data: Pointer to interrupt specific data 1434 * @on: Whether to set or reset the wake-up capability of this irq 1435 * 1436 * Conditional, as the underlying parent chip might not implement it. 1437 */ 1438 int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on) 1439 { 1440 data = data->parent_data; 1441 1442 if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE) 1443 return 0; 1444 1445 if (data->chip->irq_set_wake) 1446 return data->chip->irq_set_wake(data, on); 1447 1448 return -ENOSYS; 1449 } 1450 EXPORT_SYMBOL_GPL(irq_chip_set_wake_parent); 1451 1452 /** 1453 * irq_chip_request_resources_parent - Request resources on the parent interrupt 1454 * @data: Pointer to interrupt specific data 1455 */ 1456 int irq_chip_request_resources_parent(struct irq_data *data) 1457 { 1458 data = data->parent_data; 1459 1460 if (data->chip->irq_request_resources) 1461 return data->chip->irq_request_resources(data); 1462 1463 /* no error on missing optional irq_chip::irq_request_resources */ 1464 return 0; 1465 } 1466 EXPORT_SYMBOL_GPL(irq_chip_request_resources_parent); 1467 1468 /** 1469 * irq_chip_release_resources_parent - Release resources on the parent interrupt 1470 * @data: Pointer to interrupt specific data 1471 */ 1472 void irq_chip_release_resources_parent(struct irq_data *data) 1473 { 1474 data = data->parent_data; 1475 if (data->chip->irq_release_resources) 1476 data->chip->irq_release_resources(data); 1477 } 1478 EXPORT_SYMBOL_GPL(irq_chip_release_resources_parent); 1479 #endif 1480 1481 /** 1482 * irq_chip_compose_msi_msg - Compose msi message for a irq chip 1483 * @data: Pointer to interrupt specific data 1484 * @msg: Pointer to the MSI message 1485 * 1486 * For hierarchical domains we find the first chip in the hierarchy 1487 * which implements the irq_compose_msi_msg callback. For non 1488 * hierarchical we use the top level chip. 1489 */ 1490 int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) 1491 { 1492 struct irq_data *pos; 1493 1494 for (pos = NULL; !pos && data; data = irqd_get_parent_data(data)) { 1495 if (data->chip && data->chip->irq_compose_msi_msg) 1496 pos = data; 1497 } 1498 1499 if (!pos) 1500 return -ENOSYS; 1501 1502 pos->chip->irq_compose_msi_msg(pos, msg); 1503 return 0; 1504 } 1505 1506 static struct device *irq_get_pm_device(struct irq_data *data) 1507 { 1508 if (data->domain) 1509 return data->domain->pm_dev; 1510 1511 return NULL; 1512 } 1513 1514 /** 1515 * irq_chip_pm_get - Enable power for an IRQ chip 1516 * @data: Pointer to interrupt specific data 1517 * 1518 * Enable the power to the IRQ chip referenced by the interrupt data 1519 * structure. 1520 */ 1521 int irq_chip_pm_get(struct irq_data *data) 1522 { 1523 struct device *dev = irq_get_pm_device(data); 1524 int retval = 0; 1525 1526 if (IS_ENABLED(CONFIG_PM) && dev) 1527 retval = pm_runtime_resume_and_get(dev); 1528 1529 return retval; 1530 } 1531 1532 /** 1533 * irq_chip_pm_put - Disable power for an IRQ chip 1534 * @data: Pointer to interrupt specific data 1535 * 1536 * Disable the power to the IRQ chip referenced by the interrupt data 1537 * structure, belongs. Note that power will only be disabled, once this 1538 * function has been called for all IRQs that have called irq_chip_pm_get(). 1539 */ 1540 int irq_chip_pm_put(struct irq_data *data) 1541 { 1542 struct device *dev = irq_get_pm_device(data); 1543 int retval = 0; 1544 1545 if (IS_ENABLED(CONFIG_PM) && dev) 1546 retval = pm_runtime_put(dev); 1547 1548 return (retval < 0) ? retval : 0; 1549 } 1550