Lines Matching +full:on +full:- +full:chip
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6 * This file contains the core interrupt handling code, for irq-chip based
8 * Documentation/core-api/genericirq.rst
29 * Chained handlers should never call action on their IRQ. This default
37 * irq_set_chip - set the irq chip for an irq
39 * @chip: pointer to irq chip description structure
41 int irq_set_chip(unsigned int irq, const struct irq_chip *chip) in irq_set_chip() argument
43 int ret = -EINVAL; in irq_set_chip()
46 scoped_irqdesc->irq_data.chip = (struct irq_chip *)(chip ?: &no_irq_chip); in irq_set_chip()
57 * irq_set_irq_type - set the irq trigger type for an irq
59 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
65 return -EINVAL; in irq_set_irq_type()
70 * irq_set_handler_data - set irq handler data for an irq
79 scoped_irqdesc->irq_common_data.handler_data = data; in irq_set_handler_data()
82 return -EINVAL; in irq_set_handler_data()
87 * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
97 scoped_irqdesc->irq_common_data.msi_desc = entry; in irq_set_msi_desc_off()
99 entry->irq = irq_base; in irq_set_msi_desc_off()
102 return -EINVAL; in irq_set_msi_desc_off()
106 * irq_set_msi_desc - set MSI descriptor data for an irq
118 * irq_set_chip_data - set irq chip data for an irq
120 * @data: Pointer to chip specific data
122 * Set the hardware irq chip data for an irq
127 scoped_irqdesc->irq_data.chip_data = data; in irq_set_chip_data()
130 return -EINVAL; in irq_set_chip_data()
138 return desc ? &desc->irq_data : NULL; in irq_get_irq_data()
144 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); in irq_state_clr_disabled()
149 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); in irq_state_clr_masked()
154 irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED); in irq_state_clr_started()
159 irqd_set(&desc->irq_data, IRQD_IRQ_STARTED); in irq_state_set_started()
182 * Catch code which fiddles with enable_irq() on a managed in __irq_startup_managed()
184 * installment or irq auto probing should not happen on in __irq_startup_managed()
211 * Clear managed-shutdown flag, so we don't repeat managed-startup for in irq_startup_managed()
221 desc->depth--; in irq_startup_managed()
222 if (!desc->depth) in irq_startup_managed()
237 if (!irqd_irq_disabled(&desc->irq_data)) { in irq_enable()
241 if (desc->irq_data.chip->irq_enable) { in irq_enable()
242 desc->irq_data.chip->irq_enable(&desc->irq_data); in irq_enable()
258 if (d->chip->irq_startup) { in __irq_startup()
259 ret = d->chip->irq_startup(d); in __irq_startup()
275 desc->depth = 0; in irq_startup()
282 if (d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP) in irq_startup()
285 if (!(d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP)) in irq_startup()
293 desc->depth = 1; in irq_startup()
324 if (irqd_is_started(&desc->irq_data)) { in irq_shutdown()
327 * Increment disable depth, so that a managed shutdown on in irq_shutdown()
331 desc->depth++; in irq_shutdown()
333 if (desc->irq_data.chip->irq_shutdown) { in irq_shutdown()
334 desc->irq_data.chip->irq_shutdown(&desc->irq_data); in irq_shutdown()
354 irq_domain_deactivate_irq(&desc->irq_data); in irq_shutdown_and_deactivate()
359 if (irqd_irq_disabled(&desc->irq_data)) { in __irq_disable()
364 if (desc->irq_data.chip->irq_disable) { in __irq_disable()
365 desc->irq_data.chip->irq_disable(&desc->irq_data); in __irq_disable()
374 * irq_disable - Mark interrupt disabled
377 * If the chip does not implement the irq_disable callback, we
386 * If the interrupt chip does not implement the irq_disable callback,
400 if (desc->irq_data.chip->irq_enable) in irq_percpu_enable()
401 desc->irq_data.chip->irq_enable(&desc->irq_data); in irq_percpu_enable()
403 desc->irq_data.chip->irq_unmask(&desc->irq_data); in irq_percpu_enable()
404 cpumask_set_cpu(cpu, desc->percpu_enabled); in irq_percpu_enable()
409 if (desc->irq_data.chip->irq_disable) in irq_percpu_disable()
410 desc->irq_data.chip->irq_disable(&desc->irq_data); in irq_percpu_disable()
412 desc->irq_data.chip->irq_mask(&desc->irq_data); in irq_percpu_disable()
413 cpumask_clear_cpu(cpu, desc->percpu_enabled); in irq_percpu_disable()
418 if (desc->irq_data.chip->irq_mask_ack) { in mask_ack_irq()
419 desc->irq_data.chip->irq_mask_ack(&desc->irq_data); in mask_ack_irq()
423 if (desc->irq_data.chip->irq_ack) in mask_ack_irq()
424 desc->irq_data.chip->irq_ack(&desc->irq_data); in mask_ack_irq()
430 if (irqd_irq_masked(&desc->irq_data)) in mask_irq()
433 if (desc->irq_data.chip->irq_mask) { in mask_irq()
434 desc->irq_data.chip->irq_mask(&desc->irq_data); in mask_irq()
441 if (!irqd_irq_masked(&desc->irq_data)) in unmask_irq()
444 if (desc->irq_data.chip->irq_unmask) { in unmask_irq()
445 desc->irq_data.chip->irq_unmask(&desc->irq_data); in unmask_irq()
452 struct irq_chip *chip = desc->irq_data.chip; in unmask_threaded_irq() local
454 if (chip->flags & IRQCHIP_EOI_THREADED) in unmask_threaded_irq()
455 chip->irq_eoi(&desc->irq_data); in unmask_threaded_irq()
465 raw_spin_unlock(&desc->lock); in irq_wait_on_inprogress()
466 while (irqd_irq_inprogress(&desc->irq_data)) in irq_wait_on_inprogress()
468 raw_spin_lock(&desc->lock); in irq_wait_on_inprogress()
469 } while (irqd_irq_inprogress(&desc->irq_data)); in irq_wait_on_inprogress()
472 return !irqd_irq_disabled(&desc->irq_data) && desc->action; in irq_wait_on_inprogress()
479 struct irq_data *irqd = &desc->irq_data; in irq_can_handle_pm()
499 /* Check whether the interrupt is polled on another CPU */ in irq_can_handle_pm()
500 if (unlikely(desc->istate & IRQS_POLL_INPROGRESS)) { in irq_can_handle_pm()
502 "irq poll in progress on cpu %d for irq %d\n", in irq_can_handle_pm()
503 smp_processor_id(), desc->irq_data.irq)) in irq_can_handle_pm()
510 !irqd_is_single_target(irqd) || desc->handle_irq != handle_edge_irq) in irq_can_handle_pm()
515 * interrupt is currently handled on the previous target CPU, then in irq_can_handle_pm()
517 * interrupts the handler might get stuck on the previous target: in irq_can_handle_pm()
546 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); in irq_can_handle_actions()
548 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { in irq_can_handle_actions()
549 desc->istate |= IRQS_PENDING; in irq_can_handle_actions()
564 * handle_nested_irq - Handle a nested irq from a irq thread
579 scoped_guard(raw_spinlock_irq, &desc->lock) { in handle_nested_irq()
583 action = desc->action; in handle_nested_irq()
585 atomic_inc(&desc->threads_active); in handle_nested_irq()
590 action_ret |= action->thread_fn(action->irq, action->dev_id); in handle_nested_irq()
600 * handle_simple_irq - Simple and software-decoded IRQs.
612 guard(raw_spinlock)(&desc->lock); in handle_simple_irq()
615 if (irqd_needs_resend_when_in_progress(&desc->irq_data)) in handle_simple_irq()
616 desc->istate |= IRQS_PENDING; in handle_simple_irq()
629 * handle_untracked_irq - Simple and software-decoded IRQs.
643 scoped_guard(raw_spinlock, &desc->lock) { in handle_untracked_irq()
647 desc->istate &= ~IRQS_PENDING; in handle_untracked_irq()
648 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); in handle_untracked_irq()
653 scoped_guard(raw_spinlock, &desc->lock) in handle_untracked_irq()
654 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); in handle_untracked_irq()
666 * - Standard level irq (IRQF_ONESHOT is not set) in cond_unmask_irq()
667 * - Oneshot irq which did not wake the thread (caused by a in cond_unmask_irq()
671 if (!irqd_irq_disabled(&desc->irq_data) && in cond_unmask_irq()
672 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) in cond_unmask_irq()
677 * handle_level_irq - Level type irq handler
687 guard(raw_spinlock)(&desc->lock); in handle_level_irq()
700 static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) in cond_unmask_eoi_irq() argument
702 if (!(desc->istate & IRQS_ONESHOT)) { in cond_unmask_eoi_irq()
703 chip->irq_eoi(&desc->irq_data); in cond_unmask_eoi_irq()
708 * - Oneshot irq which did not wake the thread (caused by a in cond_unmask_eoi_irq()
712 if (!irqd_irq_disabled(&desc->irq_data) && in cond_unmask_eoi_irq()
713 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) { in cond_unmask_eoi_irq()
714 chip->irq_eoi(&desc->irq_data); in cond_unmask_eoi_irq()
716 } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) { in cond_unmask_eoi_irq()
717 chip->irq_eoi(&desc->irq_data); in cond_unmask_eoi_irq()
721 static inline void cond_eoi_irq(struct irq_chip *chip, struct irq_data *data) in cond_eoi_irq() argument
723 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) in cond_eoi_irq()
724 chip->irq_eoi(data); in cond_eoi_irq()
728 * handle_fasteoi_irq - irq handler for transparent controllers
731 * Only a single callback will be issued to the chip: an ->eoi() call when
738 struct irq_chip *chip = desc->irq_data.chip; in handle_fasteoi_irq() local
740 guard(raw_spinlock)(&desc->lock); in handle_fasteoi_irq()
744 * can arrive on the new CPU before the original CPU has completed in handle_fasteoi_irq()
745 * handling the previous one - it may need to be resent. in handle_fasteoi_irq()
748 if (irqd_needs_resend_when_in_progress(&desc->irq_data)) in handle_fasteoi_irq()
749 desc->istate |= IRQS_PENDING; in handle_fasteoi_irq()
750 cond_eoi_irq(chip, &desc->irq_data); in handle_fasteoi_irq()
756 cond_eoi_irq(chip, &desc->irq_data); in handle_fasteoi_irq()
761 if (desc->istate & IRQS_ONESHOT) in handle_fasteoi_irq()
766 cond_unmask_eoi_irq(desc, chip); in handle_fasteoi_irq()
771 if (unlikely(desc->istate & IRQS_PENDING)) in handle_fasteoi_irq()
777 * handle_fasteoi_nmi - irq handler for NMI interrupt lines
780 * A simple NMI-safe handler, considering the restrictions
783 * Only a single callback will be issued to the chip: an ->eoi()
790 struct irq_chip *chip = irq_desc_get_chip(desc); in handle_fasteoi_nmi() local
791 struct irqaction *action = desc->action; in handle_fasteoi_nmi()
801 res = action->handler(irq, action->dev_id); in handle_fasteoi_nmi()
804 if (chip->irq_eoi) in handle_fasteoi_nmi()
805 chip->irq_eoi(&desc->irq_data); in handle_fasteoi_nmi()
810 * handle_edge_irq - edge type IRQ handler
813 * Interrupt occurs on the falling and/or rising edge of a hardware
816 * can happen on the same source even before the first one is handled by
818 * disable (mask) the interrupt depending on the controller hardware. This
825 guard(raw_spinlock)(&desc->lock); in handle_edge_irq()
828 desc->istate |= IRQS_PENDING; in handle_edge_irq()
836 desc->irq_data.chip->irq_ack(&desc->irq_data); in handle_edge_irq()
839 if (unlikely(!desc->action)) { in handle_edge_irq()
849 if (unlikely(desc->istate & IRQS_PENDING)) { in handle_edge_irq()
850 if (!irqd_irq_disabled(&desc->irq_data) && in handle_edge_irq()
851 irqd_irq_masked(&desc->irq_data)) in handle_edge_irq()
857 } while ((desc->istate & IRQS_PENDING) && !irqd_irq_disabled(&desc->irq_data)); in handle_edge_irq()
862 * handle_percpu_irq - Per CPU local irq handler
865 * Per CPU interrupts on SMP machines without locking requirements
869 struct irq_chip *chip = irq_desc_get_chip(desc); in handle_percpu_irq() local
873 * desc->tot_count. in handle_percpu_irq()
877 if (chip->irq_ack) in handle_percpu_irq()
878 chip->irq_ack(&desc->irq_data); in handle_percpu_irq()
882 if (chip->irq_eoi) in handle_percpu_irq()
883 chip->irq_eoi(&desc->irq_data); in handle_percpu_irq()
887 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
890 * Per CPU interrupts on SMP machines without locking requirements. Same as
893 * action->percpu_dev_id is a pointer to percpu variables which
894 * contain the real device id for the cpu on which this handler is
899 struct irq_chip *chip = irq_desc_get_chip(desc); in handle_percpu_devid_irq() local
900 struct irqaction *action = desc->action; in handle_percpu_devid_irq()
906 * desc->tot_count. in handle_percpu_devid_irq()
910 if (chip->irq_ack) in handle_percpu_devid_irq()
911 chip->irq_ack(&desc->irq_data); in handle_percpu_devid_irq()
915 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); in handle_percpu_devid_irq()
919 bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled); in handle_percpu_devid_irq()
924 pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n", in handle_percpu_devid_irq()
928 if (chip->irq_eoi) in handle_percpu_devid_irq()
929 chip->irq_eoi(&desc->irq_data); in handle_percpu_devid_irq()
933 * handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu
942 struct irq_chip *chip = irq_desc_get_chip(desc); in handle_percpu_devid_fasteoi_nmi() local
943 struct irqaction *action = desc->action; in handle_percpu_devid_fasteoi_nmi()
950 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); in handle_percpu_devid_fasteoi_nmi()
953 if (chip->irq_eoi) in handle_percpu_devid_fasteoi_nmi()
954 chip->irq_eoi(&desc->irq_data); in handle_percpu_devid_fasteoi_nmi()
964 struct irq_data *irq_data = &desc->irq_data; in __irq_do_set_handler()
968 * situation where the outermost chip is not yet set in __irq_do_set_handler()
974 if (irq_data->chip != &no_irq_chip) in __irq_do_set_handler()
977 * Bail out if the outer chip is not set up in __irq_do_set_handler()
984 irq_data = irq_data->parent_data; in __irq_do_set_handler()
987 if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip)) in __irq_do_set_handler()
993 if (desc->irq_data.chip != &no_irq_chip) in __irq_do_set_handler()
997 desc->action = NULL; in __irq_do_set_handler()
1000 desc->depth = 1; in __irq_do_set_handler()
1002 desc->handle_irq = handle; in __irq_do_set_handler()
1003 desc->name = name; in __irq_do_set_handler()
1006 unsigned int type = irqd_get_trigger_type(&desc->irq_data); in __irq_do_set_handler()
1018 desc->handle_irq = handle; in __irq_do_set_handler()
1024 desc->action = &chained_action; in __irq_do_set_handler()
1044 desc->irq_common_data.handler_data = data; in irq_set_chained_handler_and_data()
1051 irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip, in irq_set_chip_and_handler_name() argument
1054 irq_set_chip(irq, chip); in irq_set_chip_and_handler_name()
1065 * Warn when a driver sets the no autoenable flag on an already in irq_modify_status()
1068 WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN)); in irq_modify_status()
1072 trigger = irqd_get_trigger_type(&desc->irq_data); in irq_modify_status()
1074 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | in irq_modify_status()
1077 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); in irq_modify_status()
1079 irqd_set(&desc->irq_data, IRQD_PER_CPU); in irq_modify_status()
1081 irqd_set(&desc->irq_data, IRQD_LEVEL); in irq_modify_status()
1087 irqd_set(&desc->irq_data, trigger); in irq_modify_status()
1094 * irq_cpu_online - Invoke all irq_cpu_online functions.
1096 * Iterate through all irqs and invoke the chip.irq_cpu_online()
1105 struct irq_chip *chip; in irq_cpu_online() local
1110 guard(raw_spinlock_irqsave)(&desc->lock); in irq_cpu_online()
1111 chip = irq_data_get_irq_chip(&desc->irq_data); in irq_cpu_online()
1112 if (chip && chip->irq_cpu_online && in irq_cpu_online()
1113 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || in irq_cpu_online()
1114 !irqd_irq_disabled(&desc->irq_data))) in irq_cpu_online()
1115 chip->irq_cpu_online(&desc->irq_data); in irq_cpu_online()
1120 * irq_cpu_offline - Invoke all irq_cpu_offline functions.
1122 * Iterate through all irqs and invoke the chip.irq_cpu_offline()
1131 struct irq_chip *chip; in irq_cpu_offline() local
1136 guard(raw_spinlock_irqsave)(&desc->lock); in irq_cpu_offline()
1137 chip = irq_data_get_irq_chip(&desc->irq_data); in irq_cpu_offline()
1138 if (chip && chip->irq_cpu_offline && in irq_cpu_offline()
1139 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || in irq_cpu_offline()
1140 !irqd_irq_disabled(&desc->irq_data))) in irq_cpu_offline()
1141 chip->irq_cpu_offline(&desc->irq_data); in irq_cpu_offline()
1150 * handle_fasteoi_ack_irq - irq handler for edge hierarchy stacked on
1156 * also needs to have its ->irq_ack() function called.
1160 struct irq_chip *chip = desc->irq_data.chip; in handle_fasteoi_ack_irq() local
1162 guard(raw_spinlock)(&desc->lock); in handle_fasteoi_ack_irq()
1165 cond_eoi_irq(chip, &desc->irq_data); in handle_fasteoi_ack_irq()
1171 cond_eoi_irq(chip, &desc->irq_data); in handle_fasteoi_ack_irq()
1176 if (desc->istate & IRQS_ONESHOT) in handle_fasteoi_ack_irq()
1179 desc->irq_data.chip->irq_ack(&desc->irq_data); in handle_fasteoi_ack_irq()
1183 cond_unmask_eoi_irq(desc, chip); in handle_fasteoi_ack_irq()
1188 * handle_fasteoi_mask_irq - irq handler for level hierarchy stacked on
1194 * also needs to have its ->irq_mask_ack() function called.
1198 struct irq_chip *chip = desc->irq_data.chip; in handle_fasteoi_mask_irq() local
1200 guard(raw_spinlock)(&desc->lock); in handle_fasteoi_mask_irq()
1204 cond_eoi_irq(chip, &desc->irq_data); in handle_fasteoi_mask_irq()
1212 cond_unmask_eoi_irq(desc, chip); in handle_fasteoi_mask_irq()
1219 * irq_chip_set_parent_state - set the state of a parent interrupt.
1231 data = data->parent_data; in irq_chip_set_parent_state()
1233 if (!data || !data->chip->irq_set_irqchip_state) in irq_chip_set_parent_state()
1236 return data->chip->irq_set_irqchip_state(data, which, val); in irq_chip_set_parent_state()
1241 * irq_chip_get_parent_state - get the state of a parent interrupt.
1253 data = data->parent_data; in irq_chip_get_parent_state()
1255 if (!data || !data->chip->irq_get_irqchip_state) in irq_chip_get_parent_state()
1258 return data->chip->irq_get_irqchip_state(data, which, state); in irq_chip_get_parent_state()
1263 * irq_chip_shutdown_parent - Shutdown the parent interrupt
1271 struct irq_data *parent = data->parent_data; in irq_chip_shutdown_parent()
1273 if (parent->chip->irq_shutdown) in irq_chip_shutdown_parent()
1274 parent->chip->irq_shutdown(parent); in irq_chip_shutdown_parent()
1281 * irq_chip_startup_parent - Startup the parent interrupt
1289 struct irq_data *parent = data->parent_data; in irq_chip_startup_parent()
1291 if (parent->chip->irq_startup) in irq_chip_startup_parent()
1292 return parent->chip->irq_startup(parent); in irq_chip_startup_parent()
1300 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
1306 data = data->parent_data; in irq_chip_enable_parent()
1307 if (data->chip->irq_enable) in irq_chip_enable_parent()
1308 data->chip->irq_enable(data); in irq_chip_enable_parent()
1310 data->chip->irq_unmask(data); in irq_chip_enable_parent()
1315 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
1321 data = data->parent_data; in irq_chip_disable_parent()
1322 if (data->chip->irq_disable) in irq_chip_disable_parent()
1323 data->chip->irq_disable(data); in irq_chip_disable_parent()
1325 data->chip->irq_mask(data); in irq_chip_disable_parent()
1330 * irq_chip_ack_parent - Acknowledge the parent interrupt
1335 data = data->parent_data; in irq_chip_ack_parent()
1336 data->chip->irq_ack(data); in irq_chip_ack_parent()
1341 * irq_chip_mask_parent - Mask the parent interrupt
1346 data = data->parent_data; in irq_chip_mask_parent()
1347 data->chip->irq_mask(data); in irq_chip_mask_parent()
1352 * irq_chip_mask_ack_parent - Mask and acknowledge the parent interrupt
1357 data = data->parent_data; in irq_chip_mask_ack_parent()
1358 data->chip->irq_mask_ack(data); in irq_chip_mask_ack_parent()
1363 * irq_chip_unmask_parent - Unmask the parent interrupt
1368 data = data->parent_data; in irq_chip_unmask_parent()
1369 data->chip->irq_unmask(data); in irq_chip_unmask_parent()
1374 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt
1379 data = data->parent_data; in irq_chip_eoi_parent()
1380 data->chip->irq_eoi(data); in irq_chip_eoi_parent()
1385 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt
1390 * Conditional, as the underlying parent chip might not implement it.
1395 data = data->parent_data; in irq_chip_set_affinity_parent()
1396 if (data->chip->irq_set_affinity) in irq_chip_set_affinity_parent()
1397 return data->chip->irq_set_affinity(data, dest, force); in irq_chip_set_affinity_parent()
1399 return -ENOSYS; in irq_chip_set_affinity_parent()
1404 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
1406 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
1408 * Conditional, as the underlying parent chip might not implement it.
1412 data = data->parent_data; in irq_chip_set_type_parent()
1414 if (data->chip->irq_set_type) in irq_chip_set_type_parent()
1415 return data->chip->irq_set_type(data, type); in irq_chip_set_type_parent()
1417 return -ENOSYS; in irq_chip_set_type_parent()
1422 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
1430 for (data = data->parent_data; data; data = data->parent_data) in irq_chip_retrigger_hierarchy()
1431 if (data->chip && data->chip->irq_retrigger) in irq_chip_retrigger_hierarchy()
1432 return data->chip->irq_retrigger(data); in irq_chip_retrigger_hierarchy()
1439 * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt
1445 data = data->parent_data; in irq_chip_set_vcpu_affinity_parent()
1446 if (data->chip->irq_set_vcpu_affinity) in irq_chip_set_vcpu_affinity_parent()
1447 return data->chip->irq_set_vcpu_affinity(data, vcpu_info); in irq_chip_set_vcpu_affinity_parent()
1449 return -ENOSYS; in irq_chip_set_vcpu_affinity_parent()
1453 * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt
1455 * @on: Whether to set or reset the wake-up capability of this irq
1457 * Conditional, as the underlying parent chip might not implement it.
1459 int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on) in irq_chip_set_wake_parent() argument
1461 data = data->parent_data; in irq_chip_set_wake_parent()
1463 if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE) in irq_chip_set_wake_parent()
1466 if (data->chip->irq_set_wake) in irq_chip_set_wake_parent()
1467 return data->chip->irq_set_wake(data, on); in irq_chip_set_wake_parent()
1469 return -ENOSYS; in irq_chip_set_wake_parent()
1474 * irq_chip_request_resources_parent - Request resources on the parent interrupt
1479 data = data->parent_data; in irq_chip_request_resources_parent()
1481 if (data->chip->irq_request_resources) in irq_chip_request_resources_parent()
1482 return data->chip->irq_request_resources(data); in irq_chip_request_resources_parent()
1484 /* no error on missing optional irq_chip::irq_request_resources */ in irq_chip_request_resources_parent()
1490 * irq_chip_release_resources_parent - Release resources on the parent interrupt
1495 data = data->parent_data; in irq_chip_release_resources_parent()
1496 if (data->chip->irq_release_resources) in irq_chip_release_resources_parent()
1497 data->chip->irq_release_resources(data); in irq_chip_release_resources_parent()
1503 * irq_chip_compose_msi_msg - Compose msi message for a irq chip
1507 * For hierarchical domains we find the first chip in the hierarchy
1509 * hierarchical we use the top level chip.
1516 if (data->chip && data->chip->irq_compose_msi_msg) in irq_chip_compose_msi_msg()
1521 return -ENOSYS; in irq_chip_compose_msi_msg()
1523 pos->chip->irq_compose_msi_msg(pos, msg); in irq_chip_compose_msi_msg()
1529 if (data->domain) in irq_get_pm_device()
1530 return data->domain->pm_dev; in irq_get_pm_device()
1536 * irq_chip_pm_get - Enable power for an IRQ chip
1539 * Enable the power to the IRQ chip referenced by the interrupt data
1554 * irq_chip_pm_put - Disable power for an IRQ chip
1557 * Disable the power to the IRQ chip referenced by the interrupt data