1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 4 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 5 * 6 * This file contains the core interrupt handling code, for irq-chip based 7 * architectures. Detailed information is available in 8 * Documentation/core-api/genericirq.rst 9 */ 10 11 #include <linux/irq.h> 12 #include <linux/msi.h> 13 #include <linux/module.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel_stat.h> 16 #include <linux/irqdomain.h> 17 18 #include <trace/events/irq.h> 19 20 #include "internals.h" 21 22 static irqreturn_t bad_chained_irq(int irq, void *dev_id) 23 { 24 WARN_ONCE(1, "Chained irq %d should not call an action\n", irq); 25 return IRQ_NONE; 26 } 27 28 /* 29 * Chained handlers should never call action on their IRQ. This default 30 * action will emit warning if such thing happens. 31 */ 32 struct irqaction chained_action = { 33 .handler = bad_chained_irq, 34 }; 35 36 /** 37 * irq_set_chip - set the irq chip for an irq 38 * @irq: irq number 39 * @chip: pointer to irq chip description structure 40 */ 41 int irq_set_chip(unsigned int irq, const struct irq_chip *chip) 42 { 43 unsigned long flags; 44 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 45 46 if (!desc) 47 return -EINVAL; 48 49 desc->irq_data.chip = (struct irq_chip *)(chip ?: &no_irq_chip); 50 irq_put_desc_unlock(desc, flags); 51 /* 52 * For !CONFIG_SPARSE_IRQ make the irq show up in 53 * allocated_irqs. 54 */ 55 irq_mark_irq(irq); 56 return 0; 57 } 58 EXPORT_SYMBOL(irq_set_chip); 59 60 /** 61 * irq_set_irq_type - set the irq trigger type for an irq 62 * @irq: irq number 63 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 64 */ 65 int irq_set_irq_type(unsigned int irq, unsigned int type) 66 { 67 unsigned long flags; 68 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 69 int ret = 0; 70 71 if (!desc) 72 return -EINVAL; 73 74 ret = __irq_set_trigger(desc, type); 75 irq_put_desc_busunlock(desc, flags); 76 return ret; 77 } 78 EXPORT_SYMBOL(irq_set_irq_type); 79 80 /** 81 * irq_set_handler_data - set irq handler data for an irq 82 * @irq: Interrupt number 83 * @data: Pointer to interrupt specific data 84 * 85 * Set the hardware irq controller data for an irq 86 */ 87 int irq_set_handler_data(unsigned int irq, void *data) 88 { 89 unsigned long flags; 90 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 91 92 if (!desc) 93 return -EINVAL; 94 desc->irq_common_data.handler_data = data; 95 irq_put_desc_unlock(desc, flags); 96 return 0; 97 } 98 EXPORT_SYMBOL(irq_set_handler_data); 99 100 /** 101 * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset 102 * @irq_base: Interrupt number base 103 * @irq_offset: Interrupt number offset 104 * @entry: Pointer to MSI descriptor data 105 * 106 * Set the MSI descriptor entry for an irq at offset 107 */ 108 int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, 109 struct msi_desc *entry) 110 { 111 unsigned long flags; 112 struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 113 114 if (!desc) 115 return -EINVAL; 116 desc->irq_common_data.msi_desc = entry; 117 if (entry && !irq_offset) 118 entry->irq = irq_base; 119 irq_put_desc_unlock(desc, flags); 120 return 0; 121 } 122 123 /** 124 * irq_set_msi_desc - set MSI descriptor data for an irq 125 * @irq: Interrupt number 126 * @entry: Pointer to MSI descriptor data 127 * 128 * Set the MSI descriptor entry for an irq 129 */ 130 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) 131 { 132 return irq_set_msi_desc_off(irq, 0, entry); 133 } 134 135 /** 136 * irq_set_chip_data - set irq chip data for an irq 137 * @irq: Interrupt number 138 * @data: Pointer to chip specific data 139 * 140 * Set the hardware irq chip data for an irq 141 */ 142 int irq_set_chip_data(unsigned int irq, void *data) 143 { 144 unsigned long flags; 145 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 146 147 if (!desc) 148 return -EINVAL; 149 desc->irq_data.chip_data = data; 150 irq_put_desc_unlock(desc, flags); 151 return 0; 152 } 153 EXPORT_SYMBOL(irq_set_chip_data); 154 155 struct irq_data *irq_get_irq_data(unsigned int irq) 156 { 157 struct irq_desc *desc = irq_to_desc(irq); 158 159 return desc ? &desc->irq_data : NULL; 160 } 161 EXPORT_SYMBOL_GPL(irq_get_irq_data); 162 163 static void irq_state_clr_disabled(struct irq_desc *desc) 164 { 165 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); 166 } 167 168 static void irq_state_clr_masked(struct irq_desc *desc) 169 { 170 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); 171 } 172 173 static void irq_state_clr_started(struct irq_desc *desc) 174 { 175 irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED); 176 } 177 178 static void irq_state_set_started(struct irq_desc *desc) 179 { 180 irqd_set(&desc->irq_data, IRQD_IRQ_STARTED); 181 } 182 183 enum { 184 IRQ_STARTUP_NORMAL, 185 IRQ_STARTUP_MANAGED, 186 IRQ_STARTUP_ABORT, 187 }; 188 189 #ifdef CONFIG_SMP 190 static int 191 __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff, 192 bool force) 193 { 194 struct irq_data *d = irq_desc_get_irq_data(desc); 195 196 if (!irqd_affinity_is_managed(d)) 197 return IRQ_STARTUP_NORMAL; 198 199 irqd_clr_managed_shutdown(d); 200 201 if (!cpumask_intersects(aff, cpu_online_mask)) { 202 /* 203 * Catch code which fiddles with enable_irq() on a managed 204 * and potentially shutdown IRQ. Chained interrupt 205 * installment or irq auto probing should not happen on 206 * managed irqs either. 207 */ 208 if (WARN_ON_ONCE(force)) 209 return IRQ_STARTUP_ABORT; 210 /* 211 * The interrupt was requested, but there is no online CPU 212 * in it's affinity mask. Put it into managed shutdown 213 * state and let the cpu hotplug mechanism start it up once 214 * a CPU in the mask becomes available. 215 */ 216 return IRQ_STARTUP_ABORT; 217 } 218 /* 219 * Managed interrupts have reserved resources, so this should not 220 * happen. 221 */ 222 if (WARN_ON(irq_domain_activate_irq(d, false))) 223 return IRQ_STARTUP_ABORT; 224 return IRQ_STARTUP_MANAGED; 225 } 226 #else 227 static __always_inline int 228 __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff, 229 bool force) 230 { 231 return IRQ_STARTUP_NORMAL; 232 } 233 #endif 234 235 static int __irq_startup(struct irq_desc *desc) 236 { 237 struct irq_data *d = irq_desc_get_irq_data(desc); 238 int ret = 0; 239 240 /* Warn if this interrupt is not activated but try nevertheless */ 241 WARN_ON_ONCE(!irqd_is_activated(d)); 242 243 if (d->chip->irq_startup) { 244 ret = d->chip->irq_startup(d); 245 irq_state_clr_disabled(desc); 246 irq_state_clr_masked(desc); 247 } else { 248 irq_enable(desc); 249 } 250 irq_state_set_started(desc); 251 return ret; 252 } 253 254 int irq_startup(struct irq_desc *desc, bool resend, bool force) 255 { 256 struct irq_data *d = irq_desc_get_irq_data(desc); 257 const struct cpumask *aff = irq_data_get_affinity_mask(d); 258 int ret = 0; 259 260 desc->depth = 0; 261 262 if (irqd_is_started(d)) { 263 irq_enable(desc); 264 } else { 265 switch (__irq_startup_managed(desc, aff, force)) { 266 case IRQ_STARTUP_NORMAL: 267 if (d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP) 268 irq_setup_affinity(desc); 269 ret = __irq_startup(desc); 270 if (!(d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP)) 271 irq_setup_affinity(desc); 272 break; 273 case IRQ_STARTUP_MANAGED: 274 irq_do_set_affinity(d, aff, false); 275 ret = __irq_startup(desc); 276 break; 277 case IRQ_STARTUP_ABORT: 278 irqd_set_managed_shutdown(d); 279 return 0; 280 } 281 } 282 if (resend) 283 check_irq_resend(desc, false); 284 285 return ret; 286 } 287 288 int irq_activate(struct irq_desc *desc) 289 { 290 struct irq_data *d = irq_desc_get_irq_data(desc); 291 292 if (!irqd_affinity_is_managed(d)) 293 return irq_domain_activate_irq(d, false); 294 return 0; 295 } 296 297 int irq_activate_and_startup(struct irq_desc *desc, bool resend) 298 { 299 if (WARN_ON(irq_activate(desc))) 300 return 0; 301 return irq_startup(desc, resend, IRQ_START_FORCE); 302 } 303 304 static void __irq_disable(struct irq_desc *desc, bool mask); 305 306 void irq_shutdown(struct irq_desc *desc) 307 { 308 if (irqd_is_started(&desc->irq_data)) { 309 clear_irq_resend(desc); 310 desc->depth = 1; 311 if (desc->irq_data.chip->irq_shutdown) { 312 desc->irq_data.chip->irq_shutdown(&desc->irq_data); 313 irq_state_set_disabled(desc); 314 irq_state_set_masked(desc); 315 } else { 316 __irq_disable(desc, true); 317 } 318 irq_state_clr_started(desc); 319 } 320 } 321 322 323 void irq_shutdown_and_deactivate(struct irq_desc *desc) 324 { 325 irq_shutdown(desc); 326 /* 327 * This must be called even if the interrupt was never started up, 328 * because the activation can happen before the interrupt is 329 * available for request/startup. It has it's own state tracking so 330 * it's safe to call it unconditionally. 331 */ 332 irq_domain_deactivate_irq(&desc->irq_data); 333 } 334 335 void irq_enable(struct irq_desc *desc) 336 { 337 if (!irqd_irq_disabled(&desc->irq_data)) { 338 unmask_irq(desc); 339 } else { 340 irq_state_clr_disabled(desc); 341 if (desc->irq_data.chip->irq_enable) { 342 desc->irq_data.chip->irq_enable(&desc->irq_data); 343 irq_state_clr_masked(desc); 344 } else { 345 unmask_irq(desc); 346 } 347 } 348 } 349 350 static void __irq_disable(struct irq_desc *desc, bool mask) 351 { 352 if (irqd_irq_disabled(&desc->irq_data)) { 353 if (mask) 354 mask_irq(desc); 355 } else { 356 irq_state_set_disabled(desc); 357 if (desc->irq_data.chip->irq_disable) { 358 desc->irq_data.chip->irq_disable(&desc->irq_data); 359 irq_state_set_masked(desc); 360 } else if (mask) { 361 mask_irq(desc); 362 } 363 } 364 } 365 366 /** 367 * irq_disable - Mark interrupt disabled 368 * @desc: irq descriptor which should be disabled 369 * 370 * If the chip does not implement the irq_disable callback, we 371 * use a lazy disable approach. That means we mark the interrupt 372 * disabled, but leave the hardware unmasked. That's an 373 * optimization because we avoid the hardware access for the 374 * common case where no interrupt happens after we marked it 375 * disabled. If an interrupt happens, then the interrupt flow 376 * handler masks the line at the hardware level and marks it 377 * pending. 378 * 379 * If the interrupt chip does not implement the irq_disable callback, 380 * a driver can disable the lazy approach for a particular irq line by 381 * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can 382 * be used for devices which cannot disable the interrupt at the 383 * device level under certain circumstances and have to use 384 * disable_irq[_nosync] instead. 385 */ 386 void irq_disable(struct irq_desc *desc) 387 { 388 __irq_disable(desc, irq_settings_disable_unlazy(desc)); 389 } 390 391 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) 392 { 393 if (desc->irq_data.chip->irq_enable) 394 desc->irq_data.chip->irq_enable(&desc->irq_data); 395 else 396 desc->irq_data.chip->irq_unmask(&desc->irq_data); 397 cpumask_set_cpu(cpu, desc->percpu_enabled); 398 } 399 400 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) 401 { 402 if (desc->irq_data.chip->irq_disable) 403 desc->irq_data.chip->irq_disable(&desc->irq_data); 404 else 405 desc->irq_data.chip->irq_mask(&desc->irq_data); 406 cpumask_clear_cpu(cpu, desc->percpu_enabled); 407 } 408 409 static inline void mask_ack_irq(struct irq_desc *desc) 410 { 411 if (desc->irq_data.chip->irq_mask_ack) { 412 desc->irq_data.chip->irq_mask_ack(&desc->irq_data); 413 irq_state_set_masked(desc); 414 } else { 415 mask_irq(desc); 416 if (desc->irq_data.chip->irq_ack) 417 desc->irq_data.chip->irq_ack(&desc->irq_data); 418 } 419 } 420 421 void mask_irq(struct irq_desc *desc) 422 { 423 if (irqd_irq_masked(&desc->irq_data)) 424 return; 425 426 if (desc->irq_data.chip->irq_mask) { 427 desc->irq_data.chip->irq_mask(&desc->irq_data); 428 irq_state_set_masked(desc); 429 } 430 } 431 432 void unmask_irq(struct irq_desc *desc) 433 { 434 if (!irqd_irq_masked(&desc->irq_data)) 435 return; 436 437 if (desc->irq_data.chip->irq_unmask) { 438 desc->irq_data.chip->irq_unmask(&desc->irq_data); 439 irq_state_clr_masked(desc); 440 } 441 } 442 443 void unmask_threaded_irq(struct irq_desc *desc) 444 { 445 struct irq_chip *chip = desc->irq_data.chip; 446 447 if (chip->flags & IRQCHIP_EOI_THREADED) 448 chip->irq_eoi(&desc->irq_data); 449 450 unmask_irq(desc); 451 } 452 453 /* 454 * handle_nested_irq - Handle a nested irq from a irq thread 455 * @irq: the interrupt number 456 * 457 * Handle interrupts which are nested into a threaded interrupt 458 * handler. The handler function is called inside the calling 459 * threads context. 460 */ 461 void handle_nested_irq(unsigned int irq) 462 { 463 struct irq_desc *desc = irq_to_desc(irq); 464 struct irqaction *action; 465 irqreturn_t action_ret; 466 467 might_sleep(); 468 469 raw_spin_lock_irq(&desc->lock); 470 471 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 472 473 action = desc->action; 474 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { 475 desc->istate |= IRQS_PENDING; 476 raw_spin_unlock_irq(&desc->lock); 477 return; 478 } 479 480 kstat_incr_irqs_this_cpu(desc); 481 atomic_inc(&desc->threads_active); 482 raw_spin_unlock_irq(&desc->lock); 483 484 action_ret = IRQ_NONE; 485 for_each_action_of_desc(desc, action) 486 action_ret |= action->thread_fn(action->irq, action->dev_id); 487 488 if (!irq_settings_no_debug(desc)) 489 note_interrupt(desc, action_ret); 490 491 wake_threads_waitq(desc); 492 } 493 EXPORT_SYMBOL_GPL(handle_nested_irq); 494 495 static bool irq_check_poll(struct irq_desc *desc) 496 { 497 if (!(desc->istate & IRQS_POLL_INPROGRESS)) 498 return false; 499 return irq_wait_for_poll(desc); 500 } 501 502 static bool irq_may_run(struct irq_desc *desc) 503 { 504 unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED; 505 506 /* 507 * If the interrupt is not in progress and is not an armed 508 * wakeup interrupt, proceed. 509 */ 510 if (!irqd_has_set(&desc->irq_data, mask)) 511 return true; 512 513 /* 514 * If the interrupt is an armed wakeup source, mark it pending 515 * and suspended, disable it and notify the pm core about the 516 * event. 517 */ 518 if (irq_pm_check_wakeup(desc)) 519 return false; 520 521 /* 522 * Handle a potential concurrent poll on a different core. 523 */ 524 return irq_check_poll(desc); 525 } 526 527 /** 528 * handle_simple_irq - Simple and software-decoded IRQs. 529 * @desc: the interrupt description structure for this irq 530 * 531 * Simple interrupts are either sent from a demultiplexing interrupt 532 * handler or come from hardware, where no interrupt hardware control 533 * is necessary. 534 * 535 * Note: The caller is expected to handle the ack, clear, mask and 536 * unmask issues if necessary. 537 */ 538 void handle_simple_irq(struct irq_desc *desc) 539 { 540 raw_spin_lock(&desc->lock); 541 542 if (!irq_may_run(desc)) 543 goto out_unlock; 544 545 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 546 547 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 548 desc->istate |= IRQS_PENDING; 549 goto out_unlock; 550 } 551 552 kstat_incr_irqs_this_cpu(desc); 553 handle_irq_event(desc); 554 555 out_unlock: 556 raw_spin_unlock(&desc->lock); 557 } 558 EXPORT_SYMBOL_GPL(handle_simple_irq); 559 560 /** 561 * handle_untracked_irq - Simple and software-decoded IRQs. 562 * @desc: the interrupt description structure for this irq 563 * 564 * Untracked interrupts are sent from a demultiplexing interrupt 565 * handler when the demultiplexer does not know which device it its 566 * multiplexed irq domain generated the interrupt. IRQ's handled 567 * through here are not subjected to stats tracking, randomness, or 568 * spurious interrupt detection. 569 * 570 * Note: Like handle_simple_irq, the caller is expected to handle 571 * the ack, clear, mask and unmask issues if necessary. 572 */ 573 void handle_untracked_irq(struct irq_desc *desc) 574 { 575 raw_spin_lock(&desc->lock); 576 577 if (!irq_may_run(desc)) 578 goto out_unlock; 579 580 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 581 582 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 583 desc->istate |= IRQS_PENDING; 584 goto out_unlock; 585 } 586 587 desc->istate &= ~IRQS_PENDING; 588 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 589 raw_spin_unlock(&desc->lock); 590 591 __handle_irq_event_percpu(desc); 592 593 raw_spin_lock(&desc->lock); 594 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 595 596 out_unlock: 597 raw_spin_unlock(&desc->lock); 598 } 599 EXPORT_SYMBOL_GPL(handle_untracked_irq); 600 601 /* 602 * Called unconditionally from handle_level_irq() and only for oneshot 603 * interrupts from handle_fasteoi_irq() 604 */ 605 static void cond_unmask_irq(struct irq_desc *desc) 606 { 607 /* 608 * We need to unmask in the following cases: 609 * - Standard level irq (IRQF_ONESHOT is not set) 610 * - Oneshot irq which did not wake the thread (caused by a 611 * spurious interrupt or a primary handler handling it 612 * completely). 613 */ 614 if (!irqd_irq_disabled(&desc->irq_data) && 615 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) 616 unmask_irq(desc); 617 } 618 619 /** 620 * handle_level_irq - Level type irq handler 621 * @desc: the interrupt description structure for this irq 622 * 623 * Level type interrupts are active as long as the hardware line has 624 * the active level. This may require to mask the interrupt and unmask 625 * it after the associated handler has acknowledged the device, so the 626 * interrupt line is back to inactive. 627 */ 628 void handle_level_irq(struct irq_desc *desc) 629 { 630 raw_spin_lock(&desc->lock); 631 mask_ack_irq(desc); 632 633 if (!irq_may_run(desc)) 634 goto out_unlock; 635 636 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 637 638 /* 639 * If its disabled or no action available 640 * keep it masked and get out of here 641 */ 642 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 643 desc->istate |= IRQS_PENDING; 644 goto out_unlock; 645 } 646 647 kstat_incr_irqs_this_cpu(desc); 648 handle_irq_event(desc); 649 650 cond_unmask_irq(desc); 651 652 out_unlock: 653 raw_spin_unlock(&desc->lock); 654 } 655 EXPORT_SYMBOL_GPL(handle_level_irq); 656 657 static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) 658 { 659 if (!(desc->istate & IRQS_ONESHOT)) { 660 chip->irq_eoi(&desc->irq_data); 661 return; 662 } 663 /* 664 * We need to unmask in the following cases: 665 * - Oneshot irq which did not wake the thread (caused by a 666 * spurious interrupt or a primary handler handling it 667 * completely). 668 */ 669 if (!irqd_irq_disabled(&desc->irq_data) && 670 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) { 671 chip->irq_eoi(&desc->irq_data); 672 unmask_irq(desc); 673 } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) { 674 chip->irq_eoi(&desc->irq_data); 675 } 676 } 677 678 /** 679 * handle_fasteoi_irq - irq handler for transparent controllers 680 * @desc: the interrupt description structure for this irq 681 * 682 * Only a single callback will be issued to the chip: an ->eoi() 683 * call when the interrupt has been serviced. This enables support 684 * for modern forms of interrupt handlers, which handle the flow 685 * details in hardware, transparently. 686 */ 687 void handle_fasteoi_irq(struct irq_desc *desc) 688 { 689 struct irq_chip *chip = desc->irq_data.chip; 690 691 raw_spin_lock(&desc->lock); 692 693 /* 694 * When an affinity change races with IRQ handling, the next interrupt 695 * can arrive on the new CPU before the original CPU has completed 696 * handling the previous one - it may need to be resent. 697 */ 698 if (!irq_may_run(desc)) { 699 if (irqd_needs_resend_when_in_progress(&desc->irq_data)) 700 desc->istate |= IRQS_PENDING; 701 goto out; 702 } 703 704 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 705 706 /* 707 * If its disabled or no action available 708 * then mask it and get out of here: 709 */ 710 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 711 desc->istate |= IRQS_PENDING; 712 mask_irq(desc); 713 goto out; 714 } 715 716 kstat_incr_irqs_this_cpu(desc); 717 if (desc->istate & IRQS_ONESHOT) 718 mask_irq(desc); 719 720 handle_irq_event(desc); 721 722 cond_unmask_eoi_irq(desc, chip); 723 724 /* 725 * When the race described above happens this will resend the interrupt. 726 */ 727 if (unlikely(desc->istate & IRQS_PENDING)) 728 check_irq_resend(desc, false); 729 730 raw_spin_unlock(&desc->lock); 731 return; 732 out: 733 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) 734 chip->irq_eoi(&desc->irq_data); 735 raw_spin_unlock(&desc->lock); 736 } 737 EXPORT_SYMBOL_GPL(handle_fasteoi_irq); 738 739 /** 740 * handle_fasteoi_nmi - irq handler for NMI interrupt lines 741 * @desc: the interrupt description structure for this irq 742 * 743 * A simple NMI-safe handler, considering the restrictions 744 * from request_nmi. 745 * 746 * Only a single callback will be issued to the chip: an ->eoi() 747 * call when the interrupt has been serviced. This enables support 748 * for modern forms of interrupt handlers, which handle the flow 749 * details in hardware, transparently. 750 */ 751 void handle_fasteoi_nmi(struct irq_desc *desc) 752 { 753 struct irq_chip *chip = irq_desc_get_chip(desc); 754 struct irqaction *action = desc->action; 755 unsigned int irq = irq_desc_get_irq(desc); 756 irqreturn_t res; 757 758 __kstat_incr_irqs_this_cpu(desc); 759 760 trace_irq_handler_entry(irq, action); 761 /* 762 * NMIs cannot be shared, there is only one action. 763 */ 764 res = action->handler(irq, action->dev_id); 765 trace_irq_handler_exit(irq, action, res); 766 767 if (chip->irq_eoi) 768 chip->irq_eoi(&desc->irq_data); 769 } 770 EXPORT_SYMBOL_GPL(handle_fasteoi_nmi); 771 772 /** 773 * handle_edge_irq - edge type IRQ handler 774 * @desc: the interrupt description structure for this irq 775 * 776 * Interrupt occurs on the falling and/or rising edge of a hardware 777 * signal. The occurrence is latched into the irq controller hardware 778 * and must be acked in order to be reenabled. After the ack another 779 * interrupt can happen on the same source even before the first one 780 * is handled by the associated event handler. If this happens it 781 * might be necessary to disable (mask) the interrupt depending on the 782 * controller hardware. This requires to reenable the interrupt inside 783 * of the loop which handles the interrupts which have arrived while 784 * the handler was running. If all pending interrupts are handled, the 785 * loop is left. 786 */ 787 void handle_edge_irq(struct irq_desc *desc) 788 { 789 raw_spin_lock(&desc->lock); 790 791 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 792 793 if (!irq_may_run(desc)) { 794 desc->istate |= IRQS_PENDING; 795 mask_ack_irq(desc); 796 goto out_unlock; 797 } 798 799 /* 800 * If its disabled or no action available then mask it and get 801 * out of here. 802 */ 803 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { 804 desc->istate |= IRQS_PENDING; 805 mask_ack_irq(desc); 806 goto out_unlock; 807 } 808 809 kstat_incr_irqs_this_cpu(desc); 810 811 /* Start handling the irq */ 812 desc->irq_data.chip->irq_ack(&desc->irq_data); 813 814 do { 815 if (unlikely(!desc->action)) { 816 mask_irq(desc); 817 goto out_unlock; 818 } 819 820 /* 821 * When another irq arrived while we were handling 822 * one, we could have masked the irq. 823 * Reenable it, if it was not disabled in meantime. 824 */ 825 if (unlikely(desc->istate & IRQS_PENDING)) { 826 if (!irqd_irq_disabled(&desc->irq_data) && 827 irqd_irq_masked(&desc->irq_data)) 828 unmask_irq(desc); 829 } 830 831 handle_irq_event(desc); 832 833 } while ((desc->istate & IRQS_PENDING) && 834 !irqd_irq_disabled(&desc->irq_data)); 835 836 out_unlock: 837 raw_spin_unlock(&desc->lock); 838 } 839 EXPORT_SYMBOL(handle_edge_irq); 840 841 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER 842 /** 843 * handle_edge_eoi_irq - edge eoi type IRQ handler 844 * @desc: the interrupt description structure for this irq 845 * 846 * Similar as the above handle_edge_irq, but using eoi and w/o the 847 * mask/unmask logic. 848 */ 849 void handle_edge_eoi_irq(struct irq_desc *desc) 850 { 851 struct irq_chip *chip = irq_desc_get_chip(desc); 852 853 raw_spin_lock(&desc->lock); 854 855 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 856 857 if (!irq_may_run(desc)) { 858 desc->istate |= IRQS_PENDING; 859 goto out_eoi; 860 } 861 862 /* 863 * If its disabled or no action available then mask it and get 864 * out of here. 865 */ 866 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { 867 desc->istate |= IRQS_PENDING; 868 goto out_eoi; 869 } 870 871 kstat_incr_irqs_this_cpu(desc); 872 873 do { 874 if (unlikely(!desc->action)) 875 goto out_eoi; 876 877 handle_irq_event(desc); 878 879 } while ((desc->istate & IRQS_PENDING) && 880 !irqd_irq_disabled(&desc->irq_data)); 881 882 out_eoi: 883 chip->irq_eoi(&desc->irq_data); 884 raw_spin_unlock(&desc->lock); 885 } 886 #endif 887 888 /** 889 * handle_percpu_irq - Per CPU local irq handler 890 * @desc: the interrupt description structure for this irq 891 * 892 * Per CPU interrupts on SMP machines without locking requirements 893 */ 894 void handle_percpu_irq(struct irq_desc *desc) 895 { 896 struct irq_chip *chip = irq_desc_get_chip(desc); 897 898 /* 899 * PER CPU interrupts are not serialized. Do not touch 900 * desc->tot_count. 901 */ 902 __kstat_incr_irqs_this_cpu(desc); 903 904 if (chip->irq_ack) 905 chip->irq_ack(&desc->irq_data); 906 907 handle_irq_event_percpu(desc); 908 909 if (chip->irq_eoi) 910 chip->irq_eoi(&desc->irq_data); 911 } 912 913 /** 914 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids 915 * @desc: the interrupt description structure for this irq 916 * 917 * Per CPU interrupts on SMP machines without locking requirements. Same as 918 * handle_percpu_irq() above but with the following extras: 919 * 920 * action->percpu_dev_id is a pointer to percpu variables which 921 * contain the real device id for the cpu on which this handler is 922 * called 923 */ 924 void handle_percpu_devid_irq(struct irq_desc *desc) 925 { 926 struct irq_chip *chip = irq_desc_get_chip(desc); 927 struct irqaction *action = desc->action; 928 unsigned int irq = irq_desc_get_irq(desc); 929 irqreturn_t res; 930 931 /* 932 * PER CPU interrupts are not serialized. Do not touch 933 * desc->tot_count. 934 */ 935 __kstat_incr_irqs_this_cpu(desc); 936 937 if (chip->irq_ack) 938 chip->irq_ack(&desc->irq_data); 939 940 if (likely(action)) { 941 trace_irq_handler_entry(irq, action); 942 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); 943 trace_irq_handler_exit(irq, action, res); 944 } else { 945 unsigned int cpu = smp_processor_id(); 946 bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled); 947 948 if (enabled) 949 irq_percpu_disable(desc, cpu); 950 951 pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n", 952 enabled ? " and unmasked" : "", irq, cpu); 953 } 954 955 if (chip->irq_eoi) 956 chip->irq_eoi(&desc->irq_data); 957 } 958 959 /** 960 * handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu 961 * dev ids 962 * @desc: the interrupt description structure for this irq 963 * 964 * Similar to handle_fasteoi_nmi, but handling the dev_id cookie 965 * as a percpu pointer. 966 */ 967 void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc) 968 { 969 struct irq_chip *chip = irq_desc_get_chip(desc); 970 struct irqaction *action = desc->action; 971 unsigned int irq = irq_desc_get_irq(desc); 972 irqreturn_t res; 973 974 __kstat_incr_irqs_this_cpu(desc); 975 976 trace_irq_handler_entry(irq, action); 977 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); 978 trace_irq_handler_exit(irq, action, res); 979 980 if (chip->irq_eoi) 981 chip->irq_eoi(&desc->irq_data); 982 } 983 984 static void 985 __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, 986 int is_chained, const char *name) 987 { 988 if (!handle) { 989 handle = handle_bad_irq; 990 } else { 991 struct irq_data *irq_data = &desc->irq_data; 992 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 993 /* 994 * With hierarchical domains we might run into a 995 * situation where the outermost chip is not yet set 996 * up, but the inner chips are there. Instead of 997 * bailing we install the handler, but obviously we 998 * cannot enable/startup the interrupt at this point. 999 */ 1000 while (irq_data) { 1001 if (irq_data->chip != &no_irq_chip) 1002 break; 1003 /* 1004 * Bail out if the outer chip is not set up 1005 * and the interrupt supposed to be started 1006 * right away. 1007 */ 1008 if (WARN_ON(is_chained)) 1009 return; 1010 /* Try the parent */ 1011 irq_data = irq_data->parent_data; 1012 } 1013 #endif 1014 if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip)) 1015 return; 1016 } 1017 1018 /* Uninstall? */ 1019 if (handle == handle_bad_irq) { 1020 if (desc->irq_data.chip != &no_irq_chip) 1021 mask_ack_irq(desc); 1022 irq_state_set_disabled(desc); 1023 if (is_chained) { 1024 desc->action = NULL; 1025 WARN_ON(irq_chip_pm_put(irq_desc_get_irq_data(desc))); 1026 } 1027 desc->depth = 1; 1028 } 1029 desc->handle_irq = handle; 1030 desc->name = name; 1031 1032 if (handle != handle_bad_irq && is_chained) { 1033 unsigned int type = irqd_get_trigger_type(&desc->irq_data); 1034 1035 /* 1036 * We're about to start this interrupt immediately, 1037 * hence the need to set the trigger configuration. 1038 * But the .set_type callback may have overridden the 1039 * flow handler, ignoring that we're dealing with a 1040 * chained interrupt. Reset it immediately because we 1041 * do know better. 1042 */ 1043 if (type != IRQ_TYPE_NONE) { 1044 __irq_set_trigger(desc, type); 1045 desc->handle_irq = handle; 1046 } 1047 1048 irq_settings_set_noprobe(desc); 1049 irq_settings_set_norequest(desc); 1050 irq_settings_set_nothread(desc); 1051 desc->action = &chained_action; 1052 WARN_ON(irq_chip_pm_get(irq_desc_get_irq_data(desc))); 1053 irq_activate_and_startup(desc, IRQ_RESEND); 1054 } 1055 } 1056 1057 void 1058 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 1059 const char *name) 1060 { 1061 unsigned long flags; 1062 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); 1063 1064 if (!desc) 1065 return; 1066 1067 __irq_do_set_handler(desc, handle, is_chained, name); 1068 irq_put_desc_busunlock(desc, flags); 1069 } 1070 EXPORT_SYMBOL_GPL(__irq_set_handler); 1071 1072 void 1073 irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, 1074 void *data) 1075 { 1076 unsigned long flags; 1077 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); 1078 1079 if (!desc) 1080 return; 1081 1082 desc->irq_common_data.handler_data = data; 1083 __irq_do_set_handler(desc, handle, 1, NULL); 1084 1085 irq_put_desc_busunlock(desc, flags); 1086 } 1087 EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data); 1088 1089 void 1090 irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip, 1091 irq_flow_handler_t handle, const char *name) 1092 { 1093 irq_set_chip(irq, chip); 1094 __irq_set_handler(irq, handle, 0, name); 1095 } 1096 EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name); 1097 1098 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) 1099 { 1100 unsigned long flags, trigger, tmp; 1101 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 1102 1103 if (!desc) 1104 return; 1105 1106 /* 1107 * Warn when a driver sets the no autoenable flag on an already 1108 * active interrupt. 1109 */ 1110 WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN)); 1111 1112 irq_settings_clr_and_set(desc, clr, set); 1113 1114 trigger = irqd_get_trigger_type(&desc->irq_data); 1115 1116 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | 1117 IRQD_TRIGGER_MASK | IRQD_LEVEL); 1118 if (irq_settings_has_no_balance_set(desc)) 1119 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); 1120 if (irq_settings_is_per_cpu(desc)) 1121 irqd_set(&desc->irq_data, IRQD_PER_CPU); 1122 if (irq_settings_is_level(desc)) 1123 irqd_set(&desc->irq_data, IRQD_LEVEL); 1124 1125 tmp = irq_settings_get_trigger_mask(desc); 1126 if (tmp != IRQ_TYPE_NONE) 1127 trigger = tmp; 1128 1129 irqd_set(&desc->irq_data, trigger); 1130 1131 irq_put_desc_unlock(desc, flags); 1132 } 1133 EXPORT_SYMBOL_GPL(irq_modify_status); 1134 1135 #ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE 1136 /** 1137 * irq_cpu_online - Invoke all irq_cpu_online functions. 1138 * 1139 * Iterate through all irqs and invoke the chip.irq_cpu_online() 1140 * for each. 1141 */ 1142 void irq_cpu_online(void) 1143 { 1144 struct irq_desc *desc; 1145 struct irq_chip *chip; 1146 unsigned long flags; 1147 unsigned int irq; 1148 1149 for_each_active_irq(irq) { 1150 desc = irq_to_desc(irq); 1151 if (!desc) 1152 continue; 1153 1154 raw_spin_lock_irqsave(&desc->lock, flags); 1155 1156 chip = irq_data_get_irq_chip(&desc->irq_data); 1157 if (chip && chip->irq_cpu_online && 1158 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 1159 !irqd_irq_disabled(&desc->irq_data))) 1160 chip->irq_cpu_online(&desc->irq_data); 1161 1162 raw_spin_unlock_irqrestore(&desc->lock, flags); 1163 } 1164 } 1165 1166 /** 1167 * irq_cpu_offline - Invoke all irq_cpu_offline functions. 1168 * 1169 * Iterate through all irqs and invoke the chip.irq_cpu_offline() 1170 * for each. 1171 */ 1172 void irq_cpu_offline(void) 1173 { 1174 struct irq_desc *desc; 1175 struct irq_chip *chip; 1176 unsigned long flags; 1177 unsigned int irq; 1178 1179 for_each_active_irq(irq) { 1180 desc = irq_to_desc(irq); 1181 if (!desc) 1182 continue; 1183 1184 raw_spin_lock_irqsave(&desc->lock, flags); 1185 1186 chip = irq_data_get_irq_chip(&desc->irq_data); 1187 if (chip && chip->irq_cpu_offline && 1188 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 1189 !irqd_irq_disabled(&desc->irq_data))) 1190 chip->irq_cpu_offline(&desc->irq_data); 1191 1192 raw_spin_unlock_irqrestore(&desc->lock, flags); 1193 } 1194 } 1195 #endif 1196 1197 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 1198 1199 #ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS 1200 /** 1201 * handle_fasteoi_ack_irq - irq handler for edge hierarchy 1202 * stacked on transparent controllers 1203 * 1204 * @desc: the interrupt description structure for this irq 1205 * 1206 * Like handle_fasteoi_irq(), but for use with hierarchy where 1207 * the irq_chip also needs to have its ->irq_ack() function 1208 * called. 1209 */ 1210 void handle_fasteoi_ack_irq(struct irq_desc *desc) 1211 { 1212 struct irq_chip *chip = desc->irq_data.chip; 1213 1214 raw_spin_lock(&desc->lock); 1215 1216 if (!irq_may_run(desc)) 1217 goto out; 1218 1219 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 1220 1221 /* 1222 * If its disabled or no action available 1223 * then mask it and get out of here: 1224 */ 1225 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 1226 desc->istate |= IRQS_PENDING; 1227 mask_irq(desc); 1228 goto out; 1229 } 1230 1231 kstat_incr_irqs_this_cpu(desc); 1232 if (desc->istate & IRQS_ONESHOT) 1233 mask_irq(desc); 1234 1235 /* Start handling the irq */ 1236 desc->irq_data.chip->irq_ack(&desc->irq_data); 1237 1238 handle_irq_event(desc); 1239 1240 cond_unmask_eoi_irq(desc, chip); 1241 1242 raw_spin_unlock(&desc->lock); 1243 return; 1244 out: 1245 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) 1246 chip->irq_eoi(&desc->irq_data); 1247 raw_spin_unlock(&desc->lock); 1248 } 1249 EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq); 1250 1251 /** 1252 * handle_fasteoi_mask_irq - irq handler for level hierarchy 1253 * stacked on transparent controllers 1254 * 1255 * @desc: the interrupt description structure for this irq 1256 * 1257 * Like handle_fasteoi_irq(), but for use with hierarchy where 1258 * the irq_chip also needs to have its ->irq_mask_ack() function 1259 * called. 1260 */ 1261 void handle_fasteoi_mask_irq(struct irq_desc *desc) 1262 { 1263 struct irq_chip *chip = desc->irq_data.chip; 1264 1265 raw_spin_lock(&desc->lock); 1266 mask_ack_irq(desc); 1267 1268 if (!irq_may_run(desc)) 1269 goto out; 1270 1271 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 1272 1273 /* 1274 * If its disabled or no action available 1275 * then mask it and get out of here: 1276 */ 1277 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 1278 desc->istate |= IRQS_PENDING; 1279 mask_irq(desc); 1280 goto out; 1281 } 1282 1283 kstat_incr_irqs_this_cpu(desc); 1284 if (desc->istate & IRQS_ONESHOT) 1285 mask_irq(desc); 1286 1287 handle_irq_event(desc); 1288 1289 cond_unmask_eoi_irq(desc, chip); 1290 1291 raw_spin_unlock(&desc->lock); 1292 return; 1293 out: 1294 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) 1295 chip->irq_eoi(&desc->irq_data); 1296 raw_spin_unlock(&desc->lock); 1297 } 1298 EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq); 1299 1300 #endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */ 1301 1302 /** 1303 * irq_chip_set_parent_state - set the state of a parent interrupt. 1304 * 1305 * @data: Pointer to interrupt specific data 1306 * @which: State to be restored (one of IRQCHIP_STATE_*) 1307 * @val: Value corresponding to @which 1308 * 1309 * Conditional success, if the underlying irqchip does not implement it. 1310 */ 1311 int irq_chip_set_parent_state(struct irq_data *data, 1312 enum irqchip_irq_state which, 1313 bool val) 1314 { 1315 data = data->parent_data; 1316 1317 if (!data || !data->chip->irq_set_irqchip_state) 1318 return 0; 1319 1320 return data->chip->irq_set_irqchip_state(data, which, val); 1321 } 1322 EXPORT_SYMBOL_GPL(irq_chip_set_parent_state); 1323 1324 /** 1325 * irq_chip_get_parent_state - get the state of a parent interrupt. 1326 * 1327 * @data: Pointer to interrupt specific data 1328 * @which: one of IRQCHIP_STATE_* the caller wants to know 1329 * @state: a pointer to a boolean where the state is to be stored 1330 * 1331 * Conditional success, if the underlying irqchip does not implement it. 1332 */ 1333 int irq_chip_get_parent_state(struct irq_data *data, 1334 enum irqchip_irq_state which, 1335 bool *state) 1336 { 1337 data = data->parent_data; 1338 1339 if (!data || !data->chip->irq_get_irqchip_state) 1340 return 0; 1341 1342 return data->chip->irq_get_irqchip_state(data, which, state); 1343 } 1344 EXPORT_SYMBOL_GPL(irq_chip_get_parent_state); 1345 1346 /** 1347 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if 1348 * NULL) 1349 * @data: Pointer to interrupt specific data 1350 */ 1351 void irq_chip_enable_parent(struct irq_data *data) 1352 { 1353 data = data->parent_data; 1354 if (data->chip->irq_enable) 1355 data->chip->irq_enable(data); 1356 else 1357 data->chip->irq_unmask(data); 1358 } 1359 EXPORT_SYMBOL_GPL(irq_chip_enable_parent); 1360 1361 /** 1362 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if 1363 * NULL) 1364 * @data: Pointer to interrupt specific data 1365 */ 1366 void irq_chip_disable_parent(struct irq_data *data) 1367 { 1368 data = data->parent_data; 1369 if (data->chip->irq_disable) 1370 data->chip->irq_disable(data); 1371 else 1372 data->chip->irq_mask(data); 1373 } 1374 EXPORT_SYMBOL_GPL(irq_chip_disable_parent); 1375 1376 /** 1377 * irq_chip_ack_parent - Acknowledge the parent interrupt 1378 * @data: Pointer to interrupt specific data 1379 */ 1380 void irq_chip_ack_parent(struct irq_data *data) 1381 { 1382 data = data->parent_data; 1383 data->chip->irq_ack(data); 1384 } 1385 EXPORT_SYMBOL_GPL(irq_chip_ack_parent); 1386 1387 /** 1388 * irq_chip_mask_parent - Mask the parent interrupt 1389 * @data: Pointer to interrupt specific data 1390 */ 1391 void irq_chip_mask_parent(struct irq_data *data) 1392 { 1393 data = data->parent_data; 1394 data->chip->irq_mask(data); 1395 } 1396 EXPORT_SYMBOL_GPL(irq_chip_mask_parent); 1397 1398 /** 1399 * irq_chip_mask_ack_parent - Mask and acknowledge the parent interrupt 1400 * @data: Pointer to interrupt specific data 1401 */ 1402 void irq_chip_mask_ack_parent(struct irq_data *data) 1403 { 1404 data = data->parent_data; 1405 data->chip->irq_mask_ack(data); 1406 } 1407 EXPORT_SYMBOL_GPL(irq_chip_mask_ack_parent); 1408 1409 /** 1410 * irq_chip_unmask_parent - Unmask the parent interrupt 1411 * @data: Pointer to interrupt specific data 1412 */ 1413 void irq_chip_unmask_parent(struct irq_data *data) 1414 { 1415 data = data->parent_data; 1416 data->chip->irq_unmask(data); 1417 } 1418 EXPORT_SYMBOL_GPL(irq_chip_unmask_parent); 1419 1420 /** 1421 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt 1422 * @data: Pointer to interrupt specific data 1423 */ 1424 void irq_chip_eoi_parent(struct irq_data *data) 1425 { 1426 data = data->parent_data; 1427 data->chip->irq_eoi(data); 1428 } 1429 EXPORT_SYMBOL_GPL(irq_chip_eoi_parent); 1430 1431 /** 1432 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt 1433 * @data: Pointer to interrupt specific data 1434 * @dest: The affinity mask to set 1435 * @force: Flag to enforce setting (disable online checks) 1436 * 1437 * Conditional, as the underlying parent chip might not implement it. 1438 */ 1439 int irq_chip_set_affinity_parent(struct irq_data *data, 1440 const struct cpumask *dest, bool force) 1441 { 1442 data = data->parent_data; 1443 if (data->chip->irq_set_affinity) 1444 return data->chip->irq_set_affinity(data, dest, force); 1445 1446 return -ENOSYS; 1447 } 1448 EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent); 1449 1450 /** 1451 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt 1452 * @data: Pointer to interrupt specific data 1453 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 1454 * 1455 * Conditional, as the underlying parent chip might not implement it. 1456 */ 1457 int irq_chip_set_type_parent(struct irq_data *data, unsigned int type) 1458 { 1459 data = data->parent_data; 1460 1461 if (data->chip->irq_set_type) 1462 return data->chip->irq_set_type(data, type); 1463 1464 return -ENOSYS; 1465 } 1466 EXPORT_SYMBOL_GPL(irq_chip_set_type_parent); 1467 1468 /** 1469 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware 1470 * @data: Pointer to interrupt specific data 1471 * 1472 * Iterate through the domain hierarchy of the interrupt and check 1473 * whether a hw retrigger function exists. If yes, invoke it. 1474 */ 1475 int irq_chip_retrigger_hierarchy(struct irq_data *data) 1476 { 1477 for (data = data->parent_data; data; data = data->parent_data) 1478 if (data->chip && data->chip->irq_retrigger) 1479 return data->chip->irq_retrigger(data); 1480 1481 return 0; 1482 } 1483 EXPORT_SYMBOL_GPL(irq_chip_retrigger_hierarchy); 1484 1485 /** 1486 * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt 1487 * @data: Pointer to interrupt specific data 1488 * @vcpu_info: The vcpu affinity information 1489 */ 1490 int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info) 1491 { 1492 data = data->parent_data; 1493 if (data->chip->irq_set_vcpu_affinity) 1494 return data->chip->irq_set_vcpu_affinity(data, vcpu_info); 1495 1496 return -ENOSYS; 1497 } 1498 EXPORT_SYMBOL_GPL(irq_chip_set_vcpu_affinity_parent); 1499 /** 1500 * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt 1501 * @data: Pointer to interrupt specific data 1502 * @on: Whether to set or reset the wake-up capability of this irq 1503 * 1504 * Conditional, as the underlying parent chip might not implement it. 1505 */ 1506 int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on) 1507 { 1508 data = data->parent_data; 1509 1510 if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE) 1511 return 0; 1512 1513 if (data->chip->irq_set_wake) 1514 return data->chip->irq_set_wake(data, on); 1515 1516 return -ENOSYS; 1517 } 1518 EXPORT_SYMBOL_GPL(irq_chip_set_wake_parent); 1519 1520 /** 1521 * irq_chip_request_resources_parent - Request resources on the parent interrupt 1522 * @data: Pointer to interrupt specific data 1523 */ 1524 int irq_chip_request_resources_parent(struct irq_data *data) 1525 { 1526 data = data->parent_data; 1527 1528 if (data->chip->irq_request_resources) 1529 return data->chip->irq_request_resources(data); 1530 1531 /* no error on missing optional irq_chip::irq_request_resources */ 1532 return 0; 1533 } 1534 EXPORT_SYMBOL_GPL(irq_chip_request_resources_parent); 1535 1536 /** 1537 * irq_chip_release_resources_parent - Release resources on the parent interrupt 1538 * @data: Pointer to interrupt specific data 1539 */ 1540 void irq_chip_release_resources_parent(struct irq_data *data) 1541 { 1542 data = data->parent_data; 1543 if (data->chip->irq_release_resources) 1544 data->chip->irq_release_resources(data); 1545 } 1546 EXPORT_SYMBOL_GPL(irq_chip_release_resources_parent); 1547 #endif 1548 1549 /** 1550 * irq_chip_compose_msi_msg - Compose msi message for a irq chip 1551 * @data: Pointer to interrupt specific data 1552 * @msg: Pointer to the MSI message 1553 * 1554 * For hierarchical domains we find the first chip in the hierarchy 1555 * which implements the irq_compose_msi_msg callback. For non 1556 * hierarchical we use the top level chip. 1557 */ 1558 int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) 1559 { 1560 struct irq_data *pos; 1561 1562 for (pos = NULL; !pos && data; data = irqd_get_parent_data(data)) { 1563 if (data->chip && data->chip->irq_compose_msi_msg) 1564 pos = data; 1565 } 1566 1567 if (!pos) 1568 return -ENOSYS; 1569 1570 pos->chip->irq_compose_msi_msg(pos, msg); 1571 return 0; 1572 } 1573 1574 static struct device *irq_get_pm_device(struct irq_data *data) 1575 { 1576 if (data->domain) 1577 return data->domain->pm_dev; 1578 1579 return NULL; 1580 } 1581 1582 /** 1583 * irq_chip_pm_get - Enable power for an IRQ chip 1584 * @data: Pointer to interrupt specific data 1585 * 1586 * Enable the power to the IRQ chip referenced by the interrupt data 1587 * structure. 1588 */ 1589 int irq_chip_pm_get(struct irq_data *data) 1590 { 1591 struct device *dev = irq_get_pm_device(data); 1592 int retval = 0; 1593 1594 if (IS_ENABLED(CONFIG_PM) && dev) 1595 retval = pm_runtime_resume_and_get(dev); 1596 1597 return retval; 1598 } 1599 1600 /** 1601 * irq_chip_pm_put - Disable power for an IRQ chip 1602 * @data: Pointer to interrupt specific data 1603 * 1604 * Disable the power to the IRQ chip referenced by the interrupt data 1605 * structure, belongs. Note that power will only be disabled, once this 1606 * function has been called for all IRQs that have called irq_chip_pm_get(). 1607 */ 1608 int irq_chip_pm_put(struct irq_data *data) 1609 { 1610 struct device *dev = irq_get_pm_device(data); 1611 int retval = 0; 1612 1613 if (IS_ENABLED(CONFIG_PM) && dev) 1614 retval = pm_runtime_put(dev); 1615 1616 return (retval < 0) ? retval : 0; 1617 } 1618