1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 4 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 5 * 6 * This file contains the core interrupt handling code, for irq-chip based 7 * architectures. Detailed information is available in 8 * Documentation/core-api/genericirq.rst 9 */ 10 11 #include <linux/irq.h> 12 #include <linux/msi.h> 13 #include <linux/module.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel_stat.h> 16 #include <linux/irqdomain.h> 17 18 #include <trace/events/irq.h> 19 20 #include "internals.h" 21 22 static irqreturn_t bad_chained_irq(int irq, void *dev_id) 23 { 24 WARN_ONCE(1, "Chained irq %d should not call an action\n", irq); 25 return IRQ_NONE; 26 } 27 28 /* 29 * Chained handlers should never call action on their IRQ. This default 30 * action will emit warning if such thing happens. 31 */ 32 struct irqaction chained_action = { 33 .handler = bad_chained_irq, 34 }; 35 36 /** 37 * irq_set_chip - set the irq chip for an irq 38 * @irq: irq number 39 * @chip: pointer to irq chip description structure 40 */ 41 int irq_set_chip(unsigned int irq, const struct irq_chip *chip) 42 { 43 unsigned long flags; 44 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 45 46 if (!desc) 47 return -EINVAL; 48 49 desc->irq_data.chip = (struct irq_chip *)(chip ?: &no_irq_chip); 50 irq_put_desc_unlock(desc, flags); 51 /* 52 * For !CONFIG_SPARSE_IRQ make the irq show up in 53 * allocated_irqs. 54 */ 55 irq_mark_irq(irq); 56 return 0; 57 } 58 EXPORT_SYMBOL(irq_set_chip); 59 60 /** 61 * irq_set_irq_type - set the irq trigger type for an irq 62 * @irq: irq number 63 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 64 */ 65 int irq_set_irq_type(unsigned int irq, unsigned int type) 66 { 67 unsigned long flags; 68 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 69 int ret = 0; 70 71 if (!desc) 72 return -EINVAL; 73 74 ret = __irq_set_trigger(desc, type); 75 irq_put_desc_busunlock(desc, flags); 76 return ret; 77 } 78 EXPORT_SYMBOL(irq_set_irq_type); 79 80 /** 81 * irq_set_handler_data - set irq handler data for an irq 82 * @irq: Interrupt number 83 * @data: Pointer to interrupt specific data 84 * 85 * Set the hardware irq controller data for an irq 86 */ 87 int irq_set_handler_data(unsigned int irq, void *data) 88 { 89 unsigned long flags; 90 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 91 92 if (!desc) 93 return -EINVAL; 94 desc->irq_common_data.handler_data = data; 95 irq_put_desc_unlock(desc, flags); 96 return 0; 97 } 98 EXPORT_SYMBOL(irq_set_handler_data); 99 100 /** 101 * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset 102 * @irq_base: Interrupt number base 103 * @irq_offset: Interrupt number offset 104 * @entry: Pointer to MSI descriptor data 105 * 106 * Set the MSI descriptor entry for an irq at offset 107 */ 108 int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, 109 struct msi_desc *entry) 110 { 111 unsigned long flags; 112 struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 113 114 if (!desc) 115 return -EINVAL; 116 desc->irq_common_data.msi_desc = entry; 117 if (entry && !irq_offset) 118 entry->irq = irq_base; 119 irq_put_desc_unlock(desc, flags); 120 return 0; 121 } 122 123 /** 124 * irq_set_msi_desc - set MSI descriptor data for an irq 125 * @irq: Interrupt number 126 * @entry: Pointer to MSI descriptor data 127 * 128 * Set the MSI descriptor entry for an irq 129 */ 130 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) 131 { 132 return irq_set_msi_desc_off(irq, 0, entry); 133 } 134 135 /** 136 * irq_set_chip_data - set irq chip data for an irq 137 * @irq: Interrupt number 138 * @data: Pointer to chip specific data 139 * 140 * Set the hardware irq chip data for an irq 141 */ 142 int irq_set_chip_data(unsigned int irq, void *data) 143 { 144 unsigned long flags; 145 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 146 147 if (!desc) 148 return -EINVAL; 149 desc->irq_data.chip_data = data; 150 irq_put_desc_unlock(desc, flags); 151 return 0; 152 } 153 EXPORT_SYMBOL(irq_set_chip_data); 154 155 struct irq_data *irq_get_irq_data(unsigned int irq) 156 { 157 struct irq_desc *desc = irq_to_desc(irq); 158 159 return desc ? &desc->irq_data : NULL; 160 } 161 EXPORT_SYMBOL_GPL(irq_get_irq_data); 162 163 static void irq_state_clr_disabled(struct irq_desc *desc) 164 { 165 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); 166 } 167 168 static void irq_state_clr_masked(struct irq_desc *desc) 169 { 170 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); 171 } 172 173 static void irq_state_clr_started(struct irq_desc *desc) 174 { 175 irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED); 176 } 177 178 static void irq_state_set_started(struct irq_desc *desc) 179 { 180 irqd_set(&desc->irq_data, IRQD_IRQ_STARTED); 181 } 182 183 enum { 184 IRQ_STARTUP_NORMAL, 185 IRQ_STARTUP_MANAGED, 186 IRQ_STARTUP_ABORT, 187 }; 188 189 #ifdef CONFIG_SMP 190 static int 191 __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff, 192 bool force) 193 { 194 struct irq_data *d = irq_desc_get_irq_data(desc); 195 196 if (!irqd_affinity_is_managed(d)) 197 return IRQ_STARTUP_NORMAL; 198 199 irqd_clr_managed_shutdown(d); 200 201 if (cpumask_any_and(aff, cpu_online_mask) >= nr_cpu_ids) { 202 /* 203 * Catch code which fiddles with enable_irq() on a managed 204 * and potentially shutdown IRQ. Chained interrupt 205 * installment or irq auto probing should not happen on 206 * managed irqs either. 207 */ 208 if (WARN_ON_ONCE(force)) 209 return IRQ_STARTUP_ABORT; 210 /* 211 * The interrupt was requested, but there is no online CPU 212 * in it's affinity mask. Put it into managed shutdown 213 * state and let the cpu hotplug mechanism start it up once 214 * a CPU in the mask becomes available. 215 */ 216 return IRQ_STARTUP_ABORT; 217 } 218 /* 219 * Managed interrupts have reserved resources, so this should not 220 * happen. 221 */ 222 if (WARN_ON(irq_domain_activate_irq(d, false))) 223 return IRQ_STARTUP_ABORT; 224 return IRQ_STARTUP_MANAGED; 225 } 226 #else 227 static __always_inline int 228 __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff, 229 bool force) 230 { 231 return IRQ_STARTUP_NORMAL; 232 } 233 #endif 234 235 static int __irq_startup(struct irq_desc *desc) 236 { 237 struct irq_data *d = irq_desc_get_irq_data(desc); 238 int ret = 0; 239 240 /* Warn if this interrupt is not activated but try nevertheless */ 241 WARN_ON_ONCE(!irqd_is_activated(d)); 242 243 if (d->chip->irq_startup) { 244 ret = d->chip->irq_startup(d); 245 irq_state_clr_disabled(desc); 246 irq_state_clr_masked(desc); 247 } else { 248 irq_enable(desc); 249 } 250 irq_state_set_started(desc); 251 return ret; 252 } 253 254 int irq_startup(struct irq_desc *desc, bool resend, bool force) 255 { 256 struct irq_data *d = irq_desc_get_irq_data(desc); 257 const struct cpumask *aff = irq_data_get_affinity_mask(d); 258 int ret = 0; 259 260 desc->depth = 0; 261 262 if (irqd_is_started(d)) { 263 irq_enable(desc); 264 } else { 265 switch (__irq_startup_managed(desc, aff, force)) { 266 case IRQ_STARTUP_NORMAL: 267 if (d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP) 268 irq_setup_affinity(desc); 269 ret = __irq_startup(desc); 270 if (!(d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP)) 271 irq_setup_affinity(desc); 272 break; 273 case IRQ_STARTUP_MANAGED: 274 irq_do_set_affinity(d, aff, false); 275 ret = __irq_startup(desc); 276 break; 277 case IRQ_STARTUP_ABORT: 278 irqd_set_managed_shutdown(d); 279 return 0; 280 } 281 } 282 if (resend) 283 check_irq_resend(desc, false); 284 285 return ret; 286 } 287 288 int irq_activate(struct irq_desc *desc) 289 { 290 struct irq_data *d = irq_desc_get_irq_data(desc); 291 292 if (!irqd_affinity_is_managed(d)) 293 return irq_domain_activate_irq(d, false); 294 return 0; 295 } 296 297 int irq_activate_and_startup(struct irq_desc *desc, bool resend) 298 { 299 if (WARN_ON(irq_activate(desc))) 300 return 0; 301 return irq_startup(desc, resend, IRQ_START_FORCE); 302 } 303 304 static void __irq_disable(struct irq_desc *desc, bool mask); 305 306 void irq_shutdown(struct irq_desc *desc) 307 { 308 if (irqd_is_started(&desc->irq_data)) { 309 clear_irq_resend(desc); 310 desc->depth = 1; 311 if (desc->irq_data.chip->irq_shutdown) { 312 desc->irq_data.chip->irq_shutdown(&desc->irq_data); 313 irq_state_set_disabled(desc); 314 irq_state_set_masked(desc); 315 } else { 316 __irq_disable(desc, true); 317 } 318 irq_state_clr_started(desc); 319 } 320 } 321 322 323 void irq_shutdown_and_deactivate(struct irq_desc *desc) 324 { 325 irq_shutdown(desc); 326 /* 327 * This must be called even if the interrupt was never started up, 328 * because the activation can happen before the interrupt is 329 * available for request/startup. It has it's own state tracking so 330 * it's safe to call it unconditionally. 331 */ 332 irq_domain_deactivate_irq(&desc->irq_data); 333 } 334 335 void irq_enable(struct irq_desc *desc) 336 { 337 if (!irqd_irq_disabled(&desc->irq_data)) { 338 unmask_irq(desc); 339 } else { 340 irq_state_clr_disabled(desc); 341 if (desc->irq_data.chip->irq_enable) { 342 desc->irq_data.chip->irq_enable(&desc->irq_data); 343 irq_state_clr_masked(desc); 344 } else { 345 unmask_irq(desc); 346 } 347 } 348 } 349 350 static void __irq_disable(struct irq_desc *desc, bool mask) 351 { 352 if (irqd_irq_disabled(&desc->irq_data)) { 353 if (mask) 354 mask_irq(desc); 355 } else { 356 irq_state_set_disabled(desc); 357 if (desc->irq_data.chip->irq_disable) { 358 desc->irq_data.chip->irq_disable(&desc->irq_data); 359 irq_state_set_masked(desc); 360 } else if (mask) { 361 mask_irq(desc); 362 } 363 } 364 } 365 366 /** 367 * irq_disable - Mark interrupt disabled 368 * @desc: irq descriptor which should be disabled 369 * 370 * If the chip does not implement the irq_disable callback, we 371 * use a lazy disable approach. That means we mark the interrupt 372 * disabled, but leave the hardware unmasked. That's an 373 * optimization because we avoid the hardware access for the 374 * common case where no interrupt happens after we marked it 375 * disabled. If an interrupt happens, then the interrupt flow 376 * handler masks the line at the hardware level and marks it 377 * pending. 378 * 379 * If the interrupt chip does not implement the irq_disable callback, 380 * a driver can disable the lazy approach for a particular irq line by 381 * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can 382 * be used for devices which cannot disable the interrupt at the 383 * device level under certain circumstances and have to use 384 * disable_irq[_nosync] instead. 385 */ 386 void irq_disable(struct irq_desc *desc) 387 { 388 __irq_disable(desc, irq_settings_disable_unlazy(desc)); 389 } 390 391 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) 392 { 393 if (desc->irq_data.chip->irq_enable) 394 desc->irq_data.chip->irq_enable(&desc->irq_data); 395 else 396 desc->irq_data.chip->irq_unmask(&desc->irq_data); 397 cpumask_set_cpu(cpu, desc->percpu_enabled); 398 } 399 400 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) 401 { 402 if (desc->irq_data.chip->irq_disable) 403 desc->irq_data.chip->irq_disable(&desc->irq_data); 404 else 405 desc->irq_data.chip->irq_mask(&desc->irq_data); 406 cpumask_clear_cpu(cpu, desc->percpu_enabled); 407 } 408 409 static inline void mask_ack_irq(struct irq_desc *desc) 410 { 411 if (desc->irq_data.chip->irq_mask_ack) { 412 desc->irq_data.chip->irq_mask_ack(&desc->irq_data); 413 irq_state_set_masked(desc); 414 } else { 415 mask_irq(desc); 416 if (desc->irq_data.chip->irq_ack) 417 desc->irq_data.chip->irq_ack(&desc->irq_data); 418 } 419 } 420 421 void mask_irq(struct irq_desc *desc) 422 { 423 if (irqd_irq_masked(&desc->irq_data)) 424 return; 425 426 if (desc->irq_data.chip->irq_mask) { 427 desc->irq_data.chip->irq_mask(&desc->irq_data); 428 irq_state_set_masked(desc); 429 } 430 } 431 432 void unmask_irq(struct irq_desc *desc) 433 { 434 if (!irqd_irq_masked(&desc->irq_data)) 435 return; 436 437 if (desc->irq_data.chip->irq_unmask) { 438 desc->irq_data.chip->irq_unmask(&desc->irq_data); 439 irq_state_clr_masked(desc); 440 } 441 } 442 443 void unmask_threaded_irq(struct irq_desc *desc) 444 { 445 struct irq_chip *chip = desc->irq_data.chip; 446 447 if (chip->flags & IRQCHIP_EOI_THREADED) 448 chip->irq_eoi(&desc->irq_data); 449 450 unmask_irq(desc); 451 } 452 453 /* 454 * handle_nested_irq - Handle a nested irq from a irq thread 455 * @irq: the interrupt number 456 * 457 * Handle interrupts which are nested into a threaded interrupt 458 * handler. The handler function is called inside the calling 459 * threads context. 460 */ 461 void handle_nested_irq(unsigned int irq) 462 { 463 struct irq_desc *desc = irq_to_desc(irq); 464 struct irqaction *action; 465 irqreturn_t action_ret; 466 467 might_sleep(); 468 469 raw_spin_lock_irq(&desc->lock); 470 471 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 472 473 action = desc->action; 474 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { 475 desc->istate |= IRQS_PENDING; 476 goto out_unlock; 477 } 478 479 kstat_incr_irqs_this_cpu(desc); 480 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 481 raw_spin_unlock_irq(&desc->lock); 482 483 action_ret = IRQ_NONE; 484 for_each_action_of_desc(desc, action) 485 action_ret |= action->thread_fn(action->irq, action->dev_id); 486 487 if (!irq_settings_no_debug(desc)) 488 note_interrupt(desc, action_ret); 489 490 raw_spin_lock_irq(&desc->lock); 491 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 492 493 out_unlock: 494 raw_spin_unlock_irq(&desc->lock); 495 } 496 EXPORT_SYMBOL_GPL(handle_nested_irq); 497 498 static bool irq_check_poll(struct irq_desc *desc) 499 { 500 if (!(desc->istate & IRQS_POLL_INPROGRESS)) 501 return false; 502 return irq_wait_for_poll(desc); 503 } 504 505 static bool irq_may_run(struct irq_desc *desc) 506 { 507 unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED; 508 509 /* 510 * If the interrupt is not in progress and is not an armed 511 * wakeup interrupt, proceed. 512 */ 513 if (!irqd_has_set(&desc->irq_data, mask)) 514 return true; 515 516 /* 517 * If the interrupt is an armed wakeup source, mark it pending 518 * and suspended, disable it and notify the pm core about the 519 * event. 520 */ 521 if (irq_pm_check_wakeup(desc)) 522 return false; 523 524 /* 525 * Handle a potential concurrent poll on a different core. 526 */ 527 return irq_check_poll(desc); 528 } 529 530 /** 531 * handle_simple_irq - Simple and software-decoded IRQs. 532 * @desc: the interrupt description structure for this irq 533 * 534 * Simple interrupts are either sent from a demultiplexing interrupt 535 * handler or come from hardware, where no interrupt hardware control 536 * is necessary. 537 * 538 * Note: The caller is expected to handle the ack, clear, mask and 539 * unmask issues if necessary. 540 */ 541 void handle_simple_irq(struct irq_desc *desc) 542 { 543 raw_spin_lock(&desc->lock); 544 545 if (!irq_may_run(desc)) 546 goto out_unlock; 547 548 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 549 550 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 551 desc->istate |= IRQS_PENDING; 552 goto out_unlock; 553 } 554 555 kstat_incr_irqs_this_cpu(desc); 556 handle_irq_event(desc); 557 558 out_unlock: 559 raw_spin_unlock(&desc->lock); 560 } 561 EXPORT_SYMBOL_GPL(handle_simple_irq); 562 563 /** 564 * handle_untracked_irq - Simple and software-decoded IRQs. 565 * @desc: the interrupt description structure for this irq 566 * 567 * Untracked interrupts are sent from a demultiplexing interrupt 568 * handler when the demultiplexer does not know which device it its 569 * multiplexed irq domain generated the interrupt. IRQ's handled 570 * through here are not subjected to stats tracking, randomness, or 571 * spurious interrupt detection. 572 * 573 * Note: Like handle_simple_irq, the caller is expected to handle 574 * the ack, clear, mask and unmask issues if necessary. 575 */ 576 void handle_untracked_irq(struct irq_desc *desc) 577 { 578 raw_spin_lock(&desc->lock); 579 580 if (!irq_may_run(desc)) 581 goto out_unlock; 582 583 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 584 585 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 586 desc->istate |= IRQS_PENDING; 587 goto out_unlock; 588 } 589 590 desc->istate &= ~IRQS_PENDING; 591 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 592 raw_spin_unlock(&desc->lock); 593 594 __handle_irq_event_percpu(desc); 595 596 raw_spin_lock(&desc->lock); 597 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 598 599 out_unlock: 600 raw_spin_unlock(&desc->lock); 601 } 602 EXPORT_SYMBOL_GPL(handle_untracked_irq); 603 604 /* 605 * Called unconditionally from handle_level_irq() and only for oneshot 606 * interrupts from handle_fasteoi_irq() 607 */ 608 static void cond_unmask_irq(struct irq_desc *desc) 609 { 610 /* 611 * We need to unmask in the following cases: 612 * - Standard level irq (IRQF_ONESHOT is not set) 613 * - Oneshot irq which did not wake the thread (caused by a 614 * spurious interrupt or a primary handler handling it 615 * completely). 616 */ 617 if (!irqd_irq_disabled(&desc->irq_data) && 618 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) 619 unmask_irq(desc); 620 } 621 622 /** 623 * handle_level_irq - Level type irq handler 624 * @desc: the interrupt description structure for this irq 625 * 626 * Level type interrupts are active as long as the hardware line has 627 * the active level. This may require to mask the interrupt and unmask 628 * it after the associated handler has acknowledged the device, so the 629 * interrupt line is back to inactive. 630 */ 631 void handle_level_irq(struct irq_desc *desc) 632 { 633 raw_spin_lock(&desc->lock); 634 mask_ack_irq(desc); 635 636 if (!irq_may_run(desc)) 637 goto out_unlock; 638 639 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 640 641 /* 642 * If its disabled or no action available 643 * keep it masked and get out of here 644 */ 645 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 646 desc->istate |= IRQS_PENDING; 647 goto out_unlock; 648 } 649 650 kstat_incr_irqs_this_cpu(desc); 651 handle_irq_event(desc); 652 653 cond_unmask_irq(desc); 654 655 out_unlock: 656 raw_spin_unlock(&desc->lock); 657 } 658 EXPORT_SYMBOL_GPL(handle_level_irq); 659 660 static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) 661 { 662 if (!(desc->istate & IRQS_ONESHOT)) { 663 chip->irq_eoi(&desc->irq_data); 664 return; 665 } 666 /* 667 * We need to unmask in the following cases: 668 * - Oneshot irq which did not wake the thread (caused by a 669 * spurious interrupt or a primary handler handling it 670 * completely). 671 */ 672 if (!irqd_irq_disabled(&desc->irq_data) && 673 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) { 674 chip->irq_eoi(&desc->irq_data); 675 unmask_irq(desc); 676 } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) { 677 chip->irq_eoi(&desc->irq_data); 678 } 679 } 680 681 /** 682 * handle_fasteoi_irq - irq handler for transparent controllers 683 * @desc: the interrupt description structure for this irq 684 * 685 * Only a single callback will be issued to the chip: an ->eoi() 686 * call when the interrupt has been serviced. This enables support 687 * for modern forms of interrupt handlers, which handle the flow 688 * details in hardware, transparently. 689 */ 690 void handle_fasteoi_irq(struct irq_desc *desc) 691 { 692 struct irq_chip *chip = desc->irq_data.chip; 693 694 raw_spin_lock(&desc->lock); 695 696 /* 697 * When an affinity change races with IRQ handling, the next interrupt 698 * can arrive on the new CPU before the original CPU has completed 699 * handling the previous one - it may need to be resent. 700 */ 701 if (!irq_may_run(desc)) { 702 if (irqd_needs_resend_when_in_progress(&desc->irq_data)) 703 desc->istate |= IRQS_PENDING; 704 goto out; 705 } 706 707 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 708 709 /* 710 * If its disabled or no action available 711 * then mask it and get out of here: 712 */ 713 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 714 desc->istate |= IRQS_PENDING; 715 mask_irq(desc); 716 goto out; 717 } 718 719 kstat_incr_irqs_this_cpu(desc); 720 if (desc->istate & IRQS_ONESHOT) 721 mask_irq(desc); 722 723 handle_irq_event(desc); 724 725 cond_unmask_eoi_irq(desc, chip); 726 727 /* 728 * When the race described above happens this will resend the interrupt. 729 */ 730 if (unlikely(desc->istate & IRQS_PENDING)) 731 check_irq_resend(desc, false); 732 733 raw_spin_unlock(&desc->lock); 734 return; 735 out: 736 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) 737 chip->irq_eoi(&desc->irq_data); 738 raw_spin_unlock(&desc->lock); 739 } 740 EXPORT_SYMBOL_GPL(handle_fasteoi_irq); 741 742 /** 743 * handle_fasteoi_nmi - irq handler for NMI interrupt lines 744 * @desc: the interrupt description structure for this irq 745 * 746 * A simple NMI-safe handler, considering the restrictions 747 * from request_nmi. 748 * 749 * Only a single callback will be issued to the chip: an ->eoi() 750 * call when the interrupt has been serviced. This enables support 751 * for modern forms of interrupt handlers, which handle the flow 752 * details in hardware, transparently. 753 */ 754 void handle_fasteoi_nmi(struct irq_desc *desc) 755 { 756 struct irq_chip *chip = irq_desc_get_chip(desc); 757 struct irqaction *action = desc->action; 758 unsigned int irq = irq_desc_get_irq(desc); 759 irqreturn_t res; 760 761 __kstat_incr_irqs_this_cpu(desc); 762 763 trace_irq_handler_entry(irq, action); 764 /* 765 * NMIs cannot be shared, there is only one action. 766 */ 767 res = action->handler(irq, action->dev_id); 768 trace_irq_handler_exit(irq, action, res); 769 770 if (chip->irq_eoi) 771 chip->irq_eoi(&desc->irq_data); 772 } 773 EXPORT_SYMBOL_GPL(handle_fasteoi_nmi); 774 775 /** 776 * handle_edge_irq - edge type IRQ handler 777 * @desc: the interrupt description structure for this irq 778 * 779 * Interrupt occurs on the falling and/or rising edge of a hardware 780 * signal. The occurrence is latched into the irq controller hardware 781 * and must be acked in order to be reenabled. After the ack another 782 * interrupt can happen on the same source even before the first one 783 * is handled by the associated event handler. If this happens it 784 * might be necessary to disable (mask) the interrupt depending on the 785 * controller hardware. This requires to reenable the interrupt inside 786 * of the loop which handles the interrupts which have arrived while 787 * the handler was running. If all pending interrupts are handled, the 788 * loop is left. 789 */ 790 void handle_edge_irq(struct irq_desc *desc) 791 { 792 raw_spin_lock(&desc->lock); 793 794 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 795 796 if (!irq_may_run(desc)) { 797 desc->istate |= IRQS_PENDING; 798 mask_ack_irq(desc); 799 goto out_unlock; 800 } 801 802 /* 803 * If its disabled or no action available then mask it and get 804 * out of here. 805 */ 806 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { 807 desc->istate |= IRQS_PENDING; 808 mask_ack_irq(desc); 809 goto out_unlock; 810 } 811 812 kstat_incr_irqs_this_cpu(desc); 813 814 /* Start handling the irq */ 815 desc->irq_data.chip->irq_ack(&desc->irq_data); 816 817 do { 818 if (unlikely(!desc->action)) { 819 mask_irq(desc); 820 goto out_unlock; 821 } 822 823 /* 824 * When another irq arrived while we were handling 825 * one, we could have masked the irq. 826 * Reenable it, if it was not disabled in meantime. 827 */ 828 if (unlikely(desc->istate & IRQS_PENDING)) { 829 if (!irqd_irq_disabled(&desc->irq_data) && 830 irqd_irq_masked(&desc->irq_data)) 831 unmask_irq(desc); 832 } 833 834 handle_irq_event(desc); 835 836 } while ((desc->istate & IRQS_PENDING) && 837 !irqd_irq_disabled(&desc->irq_data)); 838 839 out_unlock: 840 raw_spin_unlock(&desc->lock); 841 } 842 EXPORT_SYMBOL(handle_edge_irq); 843 844 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER 845 /** 846 * handle_edge_eoi_irq - edge eoi type IRQ handler 847 * @desc: the interrupt description structure for this irq 848 * 849 * Similar as the above handle_edge_irq, but using eoi and w/o the 850 * mask/unmask logic. 851 */ 852 void handle_edge_eoi_irq(struct irq_desc *desc) 853 { 854 struct irq_chip *chip = irq_desc_get_chip(desc); 855 856 raw_spin_lock(&desc->lock); 857 858 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 859 860 if (!irq_may_run(desc)) { 861 desc->istate |= IRQS_PENDING; 862 goto out_eoi; 863 } 864 865 /* 866 * If its disabled or no action available then mask it and get 867 * out of here. 868 */ 869 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { 870 desc->istate |= IRQS_PENDING; 871 goto out_eoi; 872 } 873 874 kstat_incr_irqs_this_cpu(desc); 875 876 do { 877 if (unlikely(!desc->action)) 878 goto out_eoi; 879 880 handle_irq_event(desc); 881 882 } while ((desc->istate & IRQS_PENDING) && 883 !irqd_irq_disabled(&desc->irq_data)); 884 885 out_eoi: 886 chip->irq_eoi(&desc->irq_data); 887 raw_spin_unlock(&desc->lock); 888 } 889 #endif 890 891 /** 892 * handle_percpu_irq - Per CPU local irq handler 893 * @desc: the interrupt description structure for this irq 894 * 895 * Per CPU interrupts on SMP machines without locking requirements 896 */ 897 void handle_percpu_irq(struct irq_desc *desc) 898 { 899 struct irq_chip *chip = irq_desc_get_chip(desc); 900 901 /* 902 * PER CPU interrupts are not serialized. Do not touch 903 * desc->tot_count. 904 */ 905 __kstat_incr_irqs_this_cpu(desc); 906 907 if (chip->irq_ack) 908 chip->irq_ack(&desc->irq_data); 909 910 handle_irq_event_percpu(desc); 911 912 if (chip->irq_eoi) 913 chip->irq_eoi(&desc->irq_data); 914 } 915 916 /** 917 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids 918 * @desc: the interrupt description structure for this irq 919 * 920 * Per CPU interrupts on SMP machines without locking requirements. Same as 921 * handle_percpu_irq() above but with the following extras: 922 * 923 * action->percpu_dev_id is a pointer to percpu variables which 924 * contain the real device id for the cpu on which this handler is 925 * called 926 */ 927 void handle_percpu_devid_irq(struct irq_desc *desc) 928 { 929 struct irq_chip *chip = irq_desc_get_chip(desc); 930 struct irqaction *action = desc->action; 931 unsigned int irq = irq_desc_get_irq(desc); 932 irqreturn_t res; 933 934 /* 935 * PER CPU interrupts are not serialized. Do not touch 936 * desc->tot_count. 937 */ 938 __kstat_incr_irqs_this_cpu(desc); 939 940 if (chip->irq_ack) 941 chip->irq_ack(&desc->irq_data); 942 943 if (likely(action)) { 944 trace_irq_handler_entry(irq, action); 945 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); 946 trace_irq_handler_exit(irq, action, res); 947 } else { 948 unsigned int cpu = smp_processor_id(); 949 bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled); 950 951 if (enabled) 952 irq_percpu_disable(desc, cpu); 953 954 pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n", 955 enabled ? " and unmasked" : "", irq, cpu); 956 } 957 958 if (chip->irq_eoi) 959 chip->irq_eoi(&desc->irq_data); 960 } 961 962 /** 963 * handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu 964 * dev ids 965 * @desc: the interrupt description structure for this irq 966 * 967 * Similar to handle_fasteoi_nmi, but handling the dev_id cookie 968 * as a percpu pointer. 969 */ 970 void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc) 971 { 972 struct irq_chip *chip = irq_desc_get_chip(desc); 973 struct irqaction *action = desc->action; 974 unsigned int irq = irq_desc_get_irq(desc); 975 irqreturn_t res; 976 977 __kstat_incr_irqs_this_cpu(desc); 978 979 trace_irq_handler_entry(irq, action); 980 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); 981 trace_irq_handler_exit(irq, action, res); 982 983 if (chip->irq_eoi) 984 chip->irq_eoi(&desc->irq_data); 985 } 986 987 static void 988 __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, 989 int is_chained, const char *name) 990 { 991 if (!handle) { 992 handle = handle_bad_irq; 993 } else { 994 struct irq_data *irq_data = &desc->irq_data; 995 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 996 /* 997 * With hierarchical domains we might run into a 998 * situation where the outermost chip is not yet set 999 * up, but the inner chips are there. Instead of 1000 * bailing we install the handler, but obviously we 1001 * cannot enable/startup the interrupt at this point. 1002 */ 1003 while (irq_data) { 1004 if (irq_data->chip != &no_irq_chip) 1005 break; 1006 /* 1007 * Bail out if the outer chip is not set up 1008 * and the interrupt supposed to be started 1009 * right away. 1010 */ 1011 if (WARN_ON(is_chained)) 1012 return; 1013 /* Try the parent */ 1014 irq_data = irq_data->parent_data; 1015 } 1016 #endif 1017 if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip)) 1018 return; 1019 } 1020 1021 /* Uninstall? */ 1022 if (handle == handle_bad_irq) { 1023 if (desc->irq_data.chip != &no_irq_chip) 1024 mask_ack_irq(desc); 1025 irq_state_set_disabled(desc); 1026 if (is_chained) { 1027 desc->action = NULL; 1028 WARN_ON(irq_chip_pm_put(irq_desc_get_irq_data(desc))); 1029 } 1030 desc->depth = 1; 1031 } 1032 desc->handle_irq = handle; 1033 desc->name = name; 1034 1035 if (handle != handle_bad_irq && is_chained) { 1036 unsigned int type = irqd_get_trigger_type(&desc->irq_data); 1037 1038 /* 1039 * We're about to start this interrupt immediately, 1040 * hence the need to set the trigger configuration. 1041 * But the .set_type callback may have overridden the 1042 * flow handler, ignoring that we're dealing with a 1043 * chained interrupt. Reset it immediately because we 1044 * do know better. 1045 */ 1046 if (type != IRQ_TYPE_NONE) { 1047 __irq_set_trigger(desc, type); 1048 desc->handle_irq = handle; 1049 } 1050 1051 irq_settings_set_noprobe(desc); 1052 irq_settings_set_norequest(desc); 1053 irq_settings_set_nothread(desc); 1054 desc->action = &chained_action; 1055 WARN_ON(irq_chip_pm_get(irq_desc_get_irq_data(desc))); 1056 irq_activate_and_startup(desc, IRQ_RESEND); 1057 } 1058 } 1059 1060 void 1061 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 1062 const char *name) 1063 { 1064 unsigned long flags; 1065 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); 1066 1067 if (!desc) 1068 return; 1069 1070 __irq_do_set_handler(desc, handle, is_chained, name); 1071 irq_put_desc_busunlock(desc, flags); 1072 } 1073 EXPORT_SYMBOL_GPL(__irq_set_handler); 1074 1075 void 1076 irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, 1077 void *data) 1078 { 1079 unsigned long flags; 1080 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); 1081 1082 if (!desc) 1083 return; 1084 1085 desc->irq_common_data.handler_data = data; 1086 __irq_do_set_handler(desc, handle, 1, NULL); 1087 1088 irq_put_desc_busunlock(desc, flags); 1089 } 1090 EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data); 1091 1092 void 1093 irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip, 1094 irq_flow_handler_t handle, const char *name) 1095 { 1096 irq_set_chip(irq, chip); 1097 __irq_set_handler(irq, handle, 0, name); 1098 } 1099 EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name); 1100 1101 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) 1102 { 1103 unsigned long flags, trigger, tmp; 1104 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 1105 1106 if (!desc) 1107 return; 1108 1109 /* 1110 * Warn when a driver sets the no autoenable flag on an already 1111 * active interrupt. 1112 */ 1113 WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN)); 1114 1115 irq_settings_clr_and_set(desc, clr, set); 1116 1117 trigger = irqd_get_trigger_type(&desc->irq_data); 1118 1119 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | 1120 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT); 1121 if (irq_settings_has_no_balance_set(desc)) 1122 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); 1123 if (irq_settings_is_per_cpu(desc)) 1124 irqd_set(&desc->irq_data, IRQD_PER_CPU); 1125 if (irq_settings_can_move_pcntxt(desc)) 1126 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); 1127 if (irq_settings_is_level(desc)) 1128 irqd_set(&desc->irq_data, IRQD_LEVEL); 1129 1130 tmp = irq_settings_get_trigger_mask(desc); 1131 if (tmp != IRQ_TYPE_NONE) 1132 trigger = tmp; 1133 1134 irqd_set(&desc->irq_data, trigger); 1135 1136 irq_put_desc_unlock(desc, flags); 1137 } 1138 EXPORT_SYMBOL_GPL(irq_modify_status); 1139 1140 #ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE 1141 /** 1142 * irq_cpu_online - Invoke all irq_cpu_online functions. 1143 * 1144 * Iterate through all irqs and invoke the chip.irq_cpu_online() 1145 * for each. 1146 */ 1147 void irq_cpu_online(void) 1148 { 1149 struct irq_desc *desc; 1150 struct irq_chip *chip; 1151 unsigned long flags; 1152 unsigned int irq; 1153 1154 for_each_active_irq(irq) { 1155 desc = irq_to_desc(irq); 1156 if (!desc) 1157 continue; 1158 1159 raw_spin_lock_irqsave(&desc->lock, flags); 1160 1161 chip = irq_data_get_irq_chip(&desc->irq_data); 1162 if (chip && chip->irq_cpu_online && 1163 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 1164 !irqd_irq_disabled(&desc->irq_data))) 1165 chip->irq_cpu_online(&desc->irq_data); 1166 1167 raw_spin_unlock_irqrestore(&desc->lock, flags); 1168 } 1169 } 1170 1171 /** 1172 * irq_cpu_offline - Invoke all irq_cpu_offline functions. 1173 * 1174 * Iterate through all irqs and invoke the chip.irq_cpu_offline() 1175 * for each. 1176 */ 1177 void irq_cpu_offline(void) 1178 { 1179 struct irq_desc *desc; 1180 struct irq_chip *chip; 1181 unsigned long flags; 1182 unsigned int irq; 1183 1184 for_each_active_irq(irq) { 1185 desc = irq_to_desc(irq); 1186 if (!desc) 1187 continue; 1188 1189 raw_spin_lock_irqsave(&desc->lock, flags); 1190 1191 chip = irq_data_get_irq_chip(&desc->irq_data); 1192 if (chip && chip->irq_cpu_offline && 1193 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 1194 !irqd_irq_disabled(&desc->irq_data))) 1195 chip->irq_cpu_offline(&desc->irq_data); 1196 1197 raw_spin_unlock_irqrestore(&desc->lock, flags); 1198 } 1199 } 1200 #endif 1201 1202 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 1203 1204 #ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS 1205 /** 1206 * handle_fasteoi_ack_irq - irq handler for edge hierarchy 1207 * stacked on transparent controllers 1208 * 1209 * @desc: the interrupt description structure for this irq 1210 * 1211 * Like handle_fasteoi_irq(), but for use with hierarchy where 1212 * the irq_chip also needs to have its ->irq_ack() function 1213 * called. 1214 */ 1215 void handle_fasteoi_ack_irq(struct irq_desc *desc) 1216 { 1217 struct irq_chip *chip = desc->irq_data.chip; 1218 1219 raw_spin_lock(&desc->lock); 1220 1221 if (!irq_may_run(desc)) 1222 goto out; 1223 1224 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 1225 1226 /* 1227 * If its disabled or no action available 1228 * then mask it and get out of here: 1229 */ 1230 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 1231 desc->istate |= IRQS_PENDING; 1232 mask_irq(desc); 1233 goto out; 1234 } 1235 1236 kstat_incr_irqs_this_cpu(desc); 1237 if (desc->istate & IRQS_ONESHOT) 1238 mask_irq(desc); 1239 1240 /* Start handling the irq */ 1241 desc->irq_data.chip->irq_ack(&desc->irq_data); 1242 1243 handle_irq_event(desc); 1244 1245 cond_unmask_eoi_irq(desc, chip); 1246 1247 raw_spin_unlock(&desc->lock); 1248 return; 1249 out: 1250 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) 1251 chip->irq_eoi(&desc->irq_data); 1252 raw_spin_unlock(&desc->lock); 1253 } 1254 EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq); 1255 1256 /** 1257 * handle_fasteoi_mask_irq - irq handler for level hierarchy 1258 * stacked on transparent controllers 1259 * 1260 * @desc: the interrupt description structure for this irq 1261 * 1262 * Like handle_fasteoi_irq(), but for use with hierarchy where 1263 * the irq_chip also needs to have its ->irq_mask_ack() function 1264 * called. 1265 */ 1266 void handle_fasteoi_mask_irq(struct irq_desc *desc) 1267 { 1268 struct irq_chip *chip = desc->irq_data.chip; 1269 1270 raw_spin_lock(&desc->lock); 1271 mask_ack_irq(desc); 1272 1273 if (!irq_may_run(desc)) 1274 goto out; 1275 1276 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 1277 1278 /* 1279 * If its disabled or no action available 1280 * then mask it and get out of here: 1281 */ 1282 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 1283 desc->istate |= IRQS_PENDING; 1284 mask_irq(desc); 1285 goto out; 1286 } 1287 1288 kstat_incr_irqs_this_cpu(desc); 1289 if (desc->istate & IRQS_ONESHOT) 1290 mask_irq(desc); 1291 1292 handle_irq_event(desc); 1293 1294 cond_unmask_eoi_irq(desc, chip); 1295 1296 raw_spin_unlock(&desc->lock); 1297 return; 1298 out: 1299 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) 1300 chip->irq_eoi(&desc->irq_data); 1301 raw_spin_unlock(&desc->lock); 1302 } 1303 EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq); 1304 1305 #endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */ 1306 1307 /** 1308 * irq_chip_set_parent_state - set the state of a parent interrupt. 1309 * 1310 * @data: Pointer to interrupt specific data 1311 * @which: State to be restored (one of IRQCHIP_STATE_*) 1312 * @val: Value corresponding to @which 1313 * 1314 * Conditional success, if the underlying irqchip does not implement it. 1315 */ 1316 int irq_chip_set_parent_state(struct irq_data *data, 1317 enum irqchip_irq_state which, 1318 bool val) 1319 { 1320 data = data->parent_data; 1321 1322 if (!data || !data->chip->irq_set_irqchip_state) 1323 return 0; 1324 1325 return data->chip->irq_set_irqchip_state(data, which, val); 1326 } 1327 EXPORT_SYMBOL_GPL(irq_chip_set_parent_state); 1328 1329 /** 1330 * irq_chip_get_parent_state - get the state of a parent interrupt. 1331 * 1332 * @data: Pointer to interrupt specific data 1333 * @which: one of IRQCHIP_STATE_* the caller wants to know 1334 * @state: a pointer to a boolean where the state is to be stored 1335 * 1336 * Conditional success, if the underlying irqchip does not implement it. 1337 */ 1338 int irq_chip_get_parent_state(struct irq_data *data, 1339 enum irqchip_irq_state which, 1340 bool *state) 1341 { 1342 data = data->parent_data; 1343 1344 if (!data || !data->chip->irq_get_irqchip_state) 1345 return 0; 1346 1347 return data->chip->irq_get_irqchip_state(data, which, state); 1348 } 1349 EXPORT_SYMBOL_GPL(irq_chip_get_parent_state); 1350 1351 /** 1352 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if 1353 * NULL) 1354 * @data: Pointer to interrupt specific data 1355 */ 1356 void irq_chip_enable_parent(struct irq_data *data) 1357 { 1358 data = data->parent_data; 1359 if (data->chip->irq_enable) 1360 data->chip->irq_enable(data); 1361 else 1362 data->chip->irq_unmask(data); 1363 } 1364 EXPORT_SYMBOL_GPL(irq_chip_enable_parent); 1365 1366 /** 1367 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if 1368 * NULL) 1369 * @data: Pointer to interrupt specific data 1370 */ 1371 void irq_chip_disable_parent(struct irq_data *data) 1372 { 1373 data = data->parent_data; 1374 if (data->chip->irq_disable) 1375 data->chip->irq_disable(data); 1376 else 1377 data->chip->irq_mask(data); 1378 } 1379 EXPORT_SYMBOL_GPL(irq_chip_disable_parent); 1380 1381 /** 1382 * irq_chip_ack_parent - Acknowledge the parent interrupt 1383 * @data: Pointer to interrupt specific data 1384 */ 1385 void irq_chip_ack_parent(struct irq_data *data) 1386 { 1387 data = data->parent_data; 1388 data->chip->irq_ack(data); 1389 } 1390 EXPORT_SYMBOL_GPL(irq_chip_ack_parent); 1391 1392 /** 1393 * irq_chip_mask_parent - Mask the parent interrupt 1394 * @data: Pointer to interrupt specific data 1395 */ 1396 void irq_chip_mask_parent(struct irq_data *data) 1397 { 1398 data = data->parent_data; 1399 data->chip->irq_mask(data); 1400 } 1401 EXPORT_SYMBOL_GPL(irq_chip_mask_parent); 1402 1403 /** 1404 * irq_chip_mask_ack_parent - Mask and acknowledge the parent interrupt 1405 * @data: Pointer to interrupt specific data 1406 */ 1407 void irq_chip_mask_ack_parent(struct irq_data *data) 1408 { 1409 data = data->parent_data; 1410 data->chip->irq_mask_ack(data); 1411 } 1412 EXPORT_SYMBOL_GPL(irq_chip_mask_ack_parent); 1413 1414 /** 1415 * irq_chip_unmask_parent - Unmask the parent interrupt 1416 * @data: Pointer to interrupt specific data 1417 */ 1418 void irq_chip_unmask_parent(struct irq_data *data) 1419 { 1420 data = data->parent_data; 1421 data->chip->irq_unmask(data); 1422 } 1423 EXPORT_SYMBOL_GPL(irq_chip_unmask_parent); 1424 1425 /** 1426 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt 1427 * @data: Pointer to interrupt specific data 1428 */ 1429 void irq_chip_eoi_parent(struct irq_data *data) 1430 { 1431 data = data->parent_data; 1432 data->chip->irq_eoi(data); 1433 } 1434 EXPORT_SYMBOL_GPL(irq_chip_eoi_parent); 1435 1436 /** 1437 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt 1438 * @data: Pointer to interrupt specific data 1439 * @dest: The affinity mask to set 1440 * @force: Flag to enforce setting (disable online checks) 1441 * 1442 * Conditional, as the underlying parent chip might not implement it. 1443 */ 1444 int irq_chip_set_affinity_parent(struct irq_data *data, 1445 const struct cpumask *dest, bool force) 1446 { 1447 data = data->parent_data; 1448 if (data->chip->irq_set_affinity) 1449 return data->chip->irq_set_affinity(data, dest, force); 1450 1451 return -ENOSYS; 1452 } 1453 EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent); 1454 1455 /** 1456 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt 1457 * @data: Pointer to interrupt specific data 1458 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 1459 * 1460 * Conditional, as the underlying parent chip might not implement it. 1461 */ 1462 int irq_chip_set_type_parent(struct irq_data *data, unsigned int type) 1463 { 1464 data = data->parent_data; 1465 1466 if (data->chip->irq_set_type) 1467 return data->chip->irq_set_type(data, type); 1468 1469 return -ENOSYS; 1470 } 1471 EXPORT_SYMBOL_GPL(irq_chip_set_type_parent); 1472 1473 /** 1474 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware 1475 * @data: Pointer to interrupt specific data 1476 * 1477 * Iterate through the domain hierarchy of the interrupt and check 1478 * whether a hw retrigger function exists. If yes, invoke it. 1479 */ 1480 int irq_chip_retrigger_hierarchy(struct irq_data *data) 1481 { 1482 for (data = data->parent_data; data; data = data->parent_data) 1483 if (data->chip && data->chip->irq_retrigger) 1484 return data->chip->irq_retrigger(data); 1485 1486 return 0; 1487 } 1488 EXPORT_SYMBOL_GPL(irq_chip_retrigger_hierarchy); 1489 1490 /** 1491 * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt 1492 * @data: Pointer to interrupt specific data 1493 * @vcpu_info: The vcpu affinity information 1494 */ 1495 int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info) 1496 { 1497 data = data->parent_data; 1498 if (data->chip->irq_set_vcpu_affinity) 1499 return data->chip->irq_set_vcpu_affinity(data, vcpu_info); 1500 1501 return -ENOSYS; 1502 } 1503 EXPORT_SYMBOL_GPL(irq_chip_set_vcpu_affinity_parent); 1504 /** 1505 * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt 1506 * @data: Pointer to interrupt specific data 1507 * @on: Whether to set or reset the wake-up capability of this irq 1508 * 1509 * Conditional, as the underlying parent chip might not implement it. 1510 */ 1511 int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on) 1512 { 1513 data = data->parent_data; 1514 1515 if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE) 1516 return 0; 1517 1518 if (data->chip->irq_set_wake) 1519 return data->chip->irq_set_wake(data, on); 1520 1521 return -ENOSYS; 1522 } 1523 EXPORT_SYMBOL_GPL(irq_chip_set_wake_parent); 1524 1525 /** 1526 * irq_chip_request_resources_parent - Request resources on the parent interrupt 1527 * @data: Pointer to interrupt specific data 1528 */ 1529 int irq_chip_request_resources_parent(struct irq_data *data) 1530 { 1531 data = data->parent_data; 1532 1533 if (data->chip->irq_request_resources) 1534 return data->chip->irq_request_resources(data); 1535 1536 /* no error on missing optional irq_chip::irq_request_resources */ 1537 return 0; 1538 } 1539 EXPORT_SYMBOL_GPL(irq_chip_request_resources_parent); 1540 1541 /** 1542 * irq_chip_release_resources_parent - Release resources on the parent interrupt 1543 * @data: Pointer to interrupt specific data 1544 */ 1545 void irq_chip_release_resources_parent(struct irq_data *data) 1546 { 1547 data = data->parent_data; 1548 if (data->chip->irq_release_resources) 1549 data->chip->irq_release_resources(data); 1550 } 1551 EXPORT_SYMBOL_GPL(irq_chip_release_resources_parent); 1552 #endif 1553 1554 /** 1555 * irq_chip_compose_msi_msg - Compose msi message for a irq chip 1556 * @data: Pointer to interrupt specific data 1557 * @msg: Pointer to the MSI message 1558 * 1559 * For hierarchical domains we find the first chip in the hierarchy 1560 * which implements the irq_compose_msi_msg callback. For non 1561 * hierarchical we use the top level chip. 1562 */ 1563 int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) 1564 { 1565 struct irq_data *pos; 1566 1567 for (pos = NULL; !pos && data; data = irqd_get_parent_data(data)) { 1568 if (data->chip && data->chip->irq_compose_msi_msg) 1569 pos = data; 1570 } 1571 1572 if (!pos) 1573 return -ENOSYS; 1574 1575 pos->chip->irq_compose_msi_msg(pos, msg); 1576 return 0; 1577 } 1578 1579 static struct device *irq_get_pm_device(struct irq_data *data) 1580 { 1581 if (data->domain) 1582 return data->domain->pm_dev; 1583 1584 return NULL; 1585 } 1586 1587 /** 1588 * irq_chip_pm_get - Enable power for an IRQ chip 1589 * @data: Pointer to interrupt specific data 1590 * 1591 * Enable the power to the IRQ chip referenced by the interrupt data 1592 * structure. 1593 */ 1594 int irq_chip_pm_get(struct irq_data *data) 1595 { 1596 struct device *dev = irq_get_pm_device(data); 1597 int retval = 0; 1598 1599 if (IS_ENABLED(CONFIG_PM) && dev) 1600 retval = pm_runtime_resume_and_get(dev); 1601 1602 return retval; 1603 } 1604 1605 /** 1606 * irq_chip_pm_put - Disable power for an IRQ chip 1607 * @data: Pointer to interrupt specific data 1608 * 1609 * Disable the power to the IRQ chip referenced by the interrupt data 1610 * structure, belongs. Note that power will only be disabled, once this 1611 * function has been called for all IRQs that have called irq_chip_pm_get(). 1612 */ 1613 int irq_chip_pm_put(struct irq_data *data) 1614 { 1615 struct device *dev = irq_get_pm_device(data); 1616 int retval = 0; 1617 1618 if (IS_ENABLED(CONFIG_PM) && dev) 1619 retval = pm_runtime_put(dev); 1620 1621 return (retval < 0) ? retval : 0; 1622 } 1623