xref: /linux/kernel/irq/chip.c (revision fcab107abe1ab5be9dbe874baa722372da8f4f73)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
4  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
5  *
6  * This file contains the core interrupt handling code, for irq-chip based
7  * architectures. Detailed information is available in
8  * Documentation/core-api/genericirq.rst
9  */
10 
11 #include <linux/irq.h>
12 #include <linux/msi.h>
13 #include <linux/module.h>
14 #include <linux/interrupt.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/irqdomain.h>
17 
18 #include <trace/events/irq.h>
19 
20 #include "internals.h"
21 
22 static irqreturn_t bad_chained_irq(int irq, void *dev_id)
23 {
24 	WARN_ONCE(1, "Chained irq %d should not call an action\n", irq);
25 	return IRQ_NONE;
26 }
27 
28 /*
29  * Chained handlers should never call action on their IRQ. This default
30  * action will emit warning if such thing happens.
31  */
32 struct irqaction chained_action = {
33 	.handler = bad_chained_irq,
34 };
35 
36 /**
37  * irq_set_chip - set the irq chip for an irq
38  * @irq:	irq number
39  * @chip:	pointer to irq chip description structure
40  */
41 int irq_set_chip(unsigned int irq, const struct irq_chip *chip)
42 {
43 	int ret = -EINVAL;
44 
45 	scoped_irqdesc_get_and_lock(irq, 0) {
46 		scoped_irqdesc->irq_data.chip = (struct irq_chip *)(chip ?: &no_irq_chip);
47 		ret = 0;
48 	}
49 	/* For !CONFIG_SPARSE_IRQ make the irq show up in allocated_irqs. */
50 	if (!ret)
51 		irq_mark_irq(irq);
52 	return ret;
53 }
54 EXPORT_SYMBOL(irq_set_chip);
55 
56 /**
57  * irq_set_irq_type - set the irq trigger type for an irq
58  * @irq:	irq number
59  * @type:	IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
60  */
61 int irq_set_irq_type(unsigned int irq, unsigned int type)
62 {
63 	scoped_irqdesc_get_and_buslock(irq, IRQ_GET_DESC_CHECK_GLOBAL)
64 		return __irq_set_trigger(scoped_irqdesc, type);
65 	return -EINVAL;
66 }
67 EXPORT_SYMBOL(irq_set_irq_type);
68 
69 /**
70  * irq_set_handler_data - set irq handler data for an irq
71  * @irq:	Interrupt number
72  * @data:	Pointer to interrupt specific data
73  *
74  * Set the hardware irq controller data for an irq
75  */
76 int irq_set_handler_data(unsigned int irq, void *data)
77 {
78 	scoped_irqdesc_get_and_lock(irq, 0) {
79 		scoped_irqdesc->irq_common_data.handler_data = data;
80 		return 0;
81 	}
82 	return -EINVAL;
83 }
84 EXPORT_SYMBOL(irq_set_handler_data);
85 
86 /**
87  * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
88  * @irq_base:	Interrupt number base
89  * @irq_offset:	Interrupt number offset
90  * @entry:		Pointer to MSI descriptor data
91  *
92  * Set the MSI descriptor entry for an irq at offset
93  */
94 int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, struct msi_desc *entry)
95 {
96 	scoped_irqdesc_get_and_lock(irq_base + irq_offset, IRQ_GET_DESC_CHECK_GLOBAL) {
97 		scoped_irqdesc->irq_common_data.msi_desc = entry;
98 		if (entry && !irq_offset)
99 			entry->irq = irq_base;
100 		return 0;
101 	}
102 	return -EINVAL;
103 }
104 
105 /**
106  * irq_set_msi_desc - set MSI descriptor data for an irq
107  * @irq:	Interrupt number
108  * @entry:	Pointer to MSI descriptor data
109  *
110  * Set the MSI descriptor entry for an irq
111  */
112 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
113 {
114 	return irq_set_msi_desc_off(irq, 0, entry);
115 }
116 
117 /**
118  * irq_set_chip_data - set irq chip data for an irq
119  * @irq:	Interrupt number
120  * @data:	Pointer to chip specific data
121  *
122  * Set the hardware irq chip data for an irq
123  */
124 int irq_set_chip_data(unsigned int irq, void *data)
125 {
126 	scoped_irqdesc_get_and_lock(irq, 0) {
127 		scoped_irqdesc->irq_data.chip_data = data;
128 		return 0;
129 	}
130 	return -EINVAL;
131 }
132 EXPORT_SYMBOL(irq_set_chip_data);
133 
134 struct irq_data *irq_get_irq_data(unsigned int irq)
135 {
136 	struct irq_desc *desc = irq_to_desc(irq);
137 
138 	return desc ? &desc->irq_data : NULL;
139 }
140 EXPORT_SYMBOL_GPL(irq_get_irq_data);
141 
142 static void irq_state_clr_disabled(struct irq_desc *desc)
143 {
144 	irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
145 }
146 
147 static void irq_state_clr_masked(struct irq_desc *desc)
148 {
149 	irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
150 }
151 
152 static void irq_state_clr_started(struct irq_desc *desc)
153 {
154 	irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED);
155 }
156 
157 static void irq_state_set_started(struct irq_desc *desc)
158 {
159 	irqd_set(&desc->irq_data, IRQD_IRQ_STARTED);
160 }
161 
162 enum {
163 	IRQ_STARTUP_NORMAL,
164 	IRQ_STARTUP_MANAGED,
165 	IRQ_STARTUP_ABORT,
166 };
167 
168 #ifdef CONFIG_SMP
169 static int
170 __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff,
171 		      bool force)
172 {
173 	struct irq_data *d = irq_desc_get_irq_data(desc);
174 
175 	if (!irqd_affinity_is_managed(d))
176 		return IRQ_STARTUP_NORMAL;
177 
178 	irqd_clr_managed_shutdown(d);
179 
180 	if (!cpumask_intersects(aff, cpu_online_mask)) {
181 		/*
182 		 * Catch code which fiddles with enable_irq() on a managed
183 		 * and potentially shutdown IRQ. Chained interrupt
184 		 * installment or irq auto probing should not happen on
185 		 * managed irqs either.
186 		 */
187 		if (WARN_ON_ONCE(force))
188 			return IRQ_STARTUP_ABORT;
189 		/*
190 		 * The interrupt was requested, but there is no online CPU
191 		 * in it's affinity mask. Put it into managed shutdown
192 		 * state and let the cpu hotplug mechanism start it up once
193 		 * a CPU in the mask becomes available.
194 		 */
195 		return IRQ_STARTUP_ABORT;
196 	}
197 	/*
198 	 * Managed interrupts have reserved resources, so this should not
199 	 * happen.
200 	 */
201 	if (WARN_ON(irq_domain_activate_irq(d, false)))
202 		return IRQ_STARTUP_ABORT;
203 	return IRQ_STARTUP_MANAGED;
204 }
205 
206 void irq_startup_managed(struct irq_desc *desc)
207 {
208 	/*
209 	 * Only start it up when the disable depth is 1, so that a disable,
210 	 * hotunplug, hotplug sequence does not end up enabling it during
211 	 * hotplug unconditionally.
212 	 */
213 	desc->depth--;
214 	if (!desc->depth)
215 		irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
216 }
217 
218 #else
219 static __always_inline int
220 __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff,
221 		      bool force)
222 {
223 	return IRQ_STARTUP_NORMAL;
224 }
225 #endif
226 
227 static void irq_enable(struct irq_desc *desc)
228 {
229 	if (!irqd_irq_disabled(&desc->irq_data)) {
230 		unmask_irq(desc);
231 	} else {
232 		irq_state_clr_disabled(desc);
233 		if (desc->irq_data.chip->irq_enable) {
234 			desc->irq_data.chip->irq_enable(&desc->irq_data);
235 			irq_state_clr_masked(desc);
236 		} else {
237 			unmask_irq(desc);
238 		}
239 	}
240 }
241 
242 static int __irq_startup(struct irq_desc *desc)
243 {
244 	struct irq_data *d = irq_desc_get_irq_data(desc);
245 	int ret = 0;
246 
247 	/* Warn if this interrupt is not activated but try nevertheless */
248 	WARN_ON_ONCE(!irqd_is_activated(d));
249 
250 	if (d->chip->irq_startup) {
251 		ret = d->chip->irq_startup(d);
252 		irq_state_clr_disabled(desc);
253 		irq_state_clr_masked(desc);
254 	} else {
255 		irq_enable(desc);
256 	}
257 	irq_state_set_started(desc);
258 	return ret;
259 }
260 
261 int irq_startup(struct irq_desc *desc, bool resend, bool force)
262 {
263 	struct irq_data *d = irq_desc_get_irq_data(desc);
264 	const struct cpumask *aff = irq_data_get_affinity_mask(d);
265 	int ret = 0;
266 
267 	desc->depth = 0;
268 
269 	if (irqd_is_started(d)) {
270 		irq_enable(desc);
271 	} else {
272 		switch (__irq_startup_managed(desc, aff, force)) {
273 		case IRQ_STARTUP_NORMAL:
274 			if (d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP)
275 				irq_setup_affinity(desc);
276 			ret = __irq_startup(desc);
277 			if (!(d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP))
278 				irq_setup_affinity(desc);
279 			break;
280 		case IRQ_STARTUP_MANAGED:
281 			irq_do_set_affinity(d, aff, false);
282 			ret = __irq_startup(desc);
283 			break;
284 		case IRQ_STARTUP_ABORT:
285 			desc->depth = 1;
286 			irqd_set_managed_shutdown(d);
287 			return 0;
288 		}
289 	}
290 	if (resend)
291 		check_irq_resend(desc, false);
292 
293 	return ret;
294 }
295 
296 int irq_activate(struct irq_desc *desc)
297 {
298 	struct irq_data *d = irq_desc_get_irq_data(desc);
299 
300 	if (!irqd_affinity_is_managed(d))
301 		return irq_domain_activate_irq(d, false);
302 	return 0;
303 }
304 
305 int irq_activate_and_startup(struct irq_desc *desc, bool resend)
306 {
307 	if (WARN_ON(irq_activate(desc)))
308 		return 0;
309 	return irq_startup(desc, resend, IRQ_START_FORCE);
310 }
311 
312 static void __irq_disable(struct irq_desc *desc, bool mask);
313 
314 void irq_shutdown(struct irq_desc *desc)
315 {
316 	if (irqd_is_started(&desc->irq_data)) {
317 		clear_irq_resend(desc);
318 		/*
319 		 * Increment disable depth, so that a managed shutdown on
320 		 * CPU hotunplug preserves the actual disabled state when the
321 		 * CPU comes back online. See irq_startup_managed().
322 		 */
323 		desc->depth++;
324 
325 		if (desc->irq_data.chip->irq_shutdown) {
326 			desc->irq_data.chip->irq_shutdown(&desc->irq_data);
327 			irq_state_set_disabled(desc);
328 			irq_state_set_masked(desc);
329 		} else {
330 			__irq_disable(desc, true);
331 		}
332 		irq_state_clr_started(desc);
333 	}
334 }
335 
336 
337 void irq_shutdown_and_deactivate(struct irq_desc *desc)
338 {
339 	irq_shutdown(desc);
340 	/*
341 	 * This must be called even if the interrupt was never started up,
342 	 * because the activation can happen before the interrupt is
343 	 * available for request/startup. It has it's own state tracking so
344 	 * it's safe to call it unconditionally.
345 	 */
346 	irq_domain_deactivate_irq(&desc->irq_data);
347 }
348 
349 static void __irq_disable(struct irq_desc *desc, bool mask)
350 {
351 	if (irqd_irq_disabled(&desc->irq_data)) {
352 		if (mask)
353 			mask_irq(desc);
354 	} else {
355 		irq_state_set_disabled(desc);
356 		if (desc->irq_data.chip->irq_disable) {
357 			desc->irq_data.chip->irq_disable(&desc->irq_data);
358 			irq_state_set_masked(desc);
359 		} else if (mask) {
360 			mask_irq(desc);
361 		}
362 	}
363 }
364 
365 /**
366  * irq_disable - Mark interrupt disabled
367  * @desc:	irq descriptor which should be disabled
368  *
369  * If the chip does not implement the irq_disable callback, we
370  * use a lazy disable approach. That means we mark the interrupt
371  * disabled, but leave the hardware unmasked. That's an
372  * optimization because we avoid the hardware access for the
373  * common case where no interrupt happens after we marked it
374  * disabled. If an interrupt happens, then the interrupt flow
375  * handler masks the line at the hardware level and marks it
376  * pending.
377  *
378  * If the interrupt chip does not implement the irq_disable callback,
379  * a driver can disable the lazy approach for a particular irq line by
380  * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can
381  * be used for devices which cannot disable the interrupt at the
382  * device level under certain circumstances and have to use
383  * disable_irq[_nosync] instead.
384  */
385 void irq_disable(struct irq_desc *desc)
386 {
387 	__irq_disable(desc, irq_settings_disable_unlazy(desc));
388 }
389 
390 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
391 {
392 	if (desc->irq_data.chip->irq_enable)
393 		desc->irq_data.chip->irq_enable(&desc->irq_data);
394 	else
395 		desc->irq_data.chip->irq_unmask(&desc->irq_data);
396 	cpumask_set_cpu(cpu, desc->percpu_enabled);
397 }
398 
399 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
400 {
401 	if (desc->irq_data.chip->irq_disable)
402 		desc->irq_data.chip->irq_disable(&desc->irq_data);
403 	else
404 		desc->irq_data.chip->irq_mask(&desc->irq_data);
405 	cpumask_clear_cpu(cpu, desc->percpu_enabled);
406 }
407 
408 static inline void mask_ack_irq(struct irq_desc *desc)
409 {
410 	if (desc->irq_data.chip->irq_mask_ack) {
411 		desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
412 		irq_state_set_masked(desc);
413 	} else {
414 		mask_irq(desc);
415 		if (desc->irq_data.chip->irq_ack)
416 			desc->irq_data.chip->irq_ack(&desc->irq_data);
417 	}
418 }
419 
420 void mask_irq(struct irq_desc *desc)
421 {
422 	if (irqd_irq_masked(&desc->irq_data))
423 		return;
424 
425 	if (desc->irq_data.chip->irq_mask) {
426 		desc->irq_data.chip->irq_mask(&desc->irq_data);
427 		irq_state_set_masked(desc);
428 	}
429 }
430 
431 void unmask_irq(struct irq_desc *desc)
432 {
433 	if (!irqd_irq_masked(&desc->irq_data))
434 		return;
435 
436 	if (desc->irq_data.chip->irq_unmask) {
437 		desc->irq_data.chip->irq_unmask(&desc->irq_data);
438 		irq_state_clr_masked(desc);
439 	}
440 }
441 
442 void unmask_threaded_irq(struct irq_desc *desc)
443 {
444 	struct irq_chip *chip = desc->irq_data.chip;
445 
446 	if (chip->flags & IRQCHIP_EOI_THREADED)
447 		chip->irq_eoi(&desc->irq_data);
448 
449 	unmask_irq(desc);
450 }
451 
452 static bool irq_check_poll(struct irq_desc *desc)
453 {
454 	if (!(desc->istate & IRQS_POLL_INPROGRESS))
455 		return false;
456 	return irq_wait_for_poll(desc);
457 }
458 
459 static bool irq_can_handle_pm(struct irq_desc *desc)
460 {
461 	unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
462 
463 	/*
464 	 * If the interrupt is not in progress and is not an armed
465 	 * wakeup interrupt, proceed.
466 	 */
467 	if (!irqd_has_set(&desc->irq_data, mask))
468 		return true;
469 
470 	/*
471 	 * If the interrupt is an armed wakeup source, mark it pending
472 	 * and suspended, disable it and notify the pm core about the
473 	 * event.
474 	 */
475 	if (irq_pm_check_wakeup(desc))
476 		return false;
477 
478 	/*
479 	 * Handle a potential concurrent poll on a different core.
480 	 */
481 	return irq_check_poll(desc);
482 }
483 
484 static inline bool irq_can_handle_actions(struct irq_desc *desc)
485 {
486 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
487 
488 	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
489 		desc->istate |= IRQS_PENDING;
490 		return false;
491 	}
492 	return true;
493 }
494 
495 static inline bool irq_can_handle(struct irq_desc *desc)
496 {
497 	if (!irq_can_handle_pm(desc))
498 		return false;
499 
500 	return irq_can_handle_actions(desc);
501 }
502 
503 /**
504  * handle_nested_irq - Handle a nested irq from a irq thread
505  * @irq:	the interrupt number
506  *
507  * Handle interrupts which are nested into a threaded interrupt
508  * handler. The handler function is called inside the calling threads
509  * context.
510  */
511 void handle_nested_irq(unsigned int irq)
512 {
513 	struct irq_desc *desc = irq_to_desc(irq);
514 	struct irqaction *action;
515 	irqreturn_t action_ret;
516 
517 	might_sleep();
518 
519 	scoped_guard(raw_spinlock_irq, &desc->lock) {
520 		if (!irq_can_handle_actions(desc))
521 			return;
522 
523 		action = desc->action;
524 		kstat_incr_irqs_this_cpu(desc);
525 		atomic_inc(&desc->threads_active);
526 	}
527 
528 	action_ret = IRQ_NONE;
529 	for_each_action_of_desc(desc, action)
530 		action_ret |= action->thread_fn(action->irq, action->dev_id);
531 
532 	if (!irq_settings_no_debug(desc))
533 		note_interrupt(desc, action_ret);
534 
535 	wake_threads_waitq(desc);
536 }
537 EXPORT_SYMBOL_GPL(handle_nested_irq);
538 
539 /**
540  * handle_simple_irq - Simple and software-decoded IRQs.
541  * @desc:	the interrupt description structure for this irq
542  *
543  * Simple interrupts are either sent from a demultiplexing interrupt
544  * handler or come from hardware, where no interrupt hardware control is
545  * necessary.
546  *
547  * Note: The caller is expected to handle the ack, clear, mask and unmask
548  * issues if necessary.
549  */
550 void handle_simple_irq(struct irq_desc *desc)
551 {
552 	guard(raw_spinlock)(&desc->lock);
553 
554 	if (!irq_can_handle(desc))
555 		return;
556 
557 	kstat_incr_irqs_this_cpu(desc);
558 	handle_irq_event(desc);
559 }
560 EXPORT_SYMBOL_GPL(handle_simple_irq);
561 
562 /**
563  * handle_untracked_irq - Simple and software-decoded IRQs.
564  * @desc:	the interrupt description structure for this irq
565  *
566  * Untracked interrupts are sent from a demultiplexing interrupt handler
567  * when the demultiplexer does not know which device it its multiplexed irq
568  * domain generated the interrupt. IRQ's handled through here are not
569  * subjected to stats tracking, randomness, or spurious interrupt
570  * detection.
571  *
572  * Note: Like handle_simple_irq, the caller is expected to handle the ack,
573  * clear, mask and unmask issues if necessary.
574  */
575 void handle_untracked_irq(struct irq_desc *desc)
576 {
577 	scoped_guard(raw_spinlock, &desc->lock) {
578 		if (!irq_can_handle(desc))
579 			return;
580 
581 		desc->istate &= ~IRQS_PENDING;
582 		irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
583 	}
584 
585 	__handle_irq_event_percpu(desc);
586 
587 	scoped_guard(raw_spinlock, &desc->lock)
588 		irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
589 }
590 EXPORT_SYMBOL_GPL(handle_untracked_irq);
591 
592 /*
593  * Called unconditionally from handle_level_irq() and only for oneshot
594  * interrupts from handle_fasteoi_irq()
595  */
596 static void cond_unmask_irq(struct irq_desc *desc)
597 {
598 	/*
599 	 * We need to unmask in the following cases:
600 	 * - Standard level irq (IRQF_ONESHOT is not set)
601 	 * - Oneshot irq which did not wake the thread (caused by a
602 	 *   spurious interrupt or a primary handler handling it
603 	 *   completely).
604 	 */
605 	if (!irqd_irq_disabled(&desc->irq_data) &&
606 	    irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
607 		unmask_irq(desc);
608 }
609 
610 /**
611  * handle_level_irq - Level type irq handler
612  * @desc:	the interrupt description structure for this irq
613  *
614  * Level type interrupts are active as long as the hardware line has the
615  * active level. This may require to mask the interrupt and unmask it after
616  * the associated handler has acknowledged the device, so the interrupt
617  * line is back to inactive.
618  */
619 void handle_level_irq(struct irq_desc *desc)
620 {
621 	guard(raw_spinlock)(&desc->lock);
622 	mask_ack_irq(desc);
623 
624 	if (!irq_can_handle(desc))
625 		return;
626 
627 	kstat_incr_irqs_this_cpu(desc);
628 	handle_irq_event(desc);
629 
630 	cond_unmask_irq(desc);
631 }
632 EXPORT_SYMBOL_GPL(handle_level_irq);
633 
634 static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
635 {
636 	if (!(desc->istate & IRQS_ONESHOT)) {
637 		chip->irq_eoi(&desc->irq_data);
638 		return;
639 	}
640 	/*
641 	 * We need to unmask in the following cases:
642 	 * - Oneshot irq which did not wake the thread (caused by a
643 	 *   spurious interrupt or a primary handler handling it
644 	 *   completely).
645 	 */
646 	if (!irqd_irq_disabled(&desc->irq_data) &&
647 	    irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
648 		chip->irq_eoi(&desc->irq_data);
649 		unmask_irq(desc);
650 	} else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
651 		chip->irq_eoi(&desc->irq_data);
652 	}
653 }
654 
655 static inline void cond_eoi_irq(struct irq_chip *chip, struct irq_data *data)
656 {
657 	if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
658 		chip->irq_eoi(data);
659 }
660 
661 /**
662  * handle_fasteoi_irq - irq handler for transparent controllers
663  * @desc:	the interrupt description structure for this irq
664  *
665  * Only a single callback will be issued to the chip: an ->eoi() call when
666  * the interrupt has been serviced. This enables support for modern forms
667  * of interrupt handlers, which handle the flow details in hardware,
668  * transparently.
669  */
670 void handle_fasteoi_irq(struct irq_desc *desc)
671 {
672 	struct irq_chip *chip = desc->irq_data.chip;
673 
674 	guard(raw_spinlock)(&desc->lock);
675 
676 	/*
677 	 * When an affinity change races with IRQ handling, the next interrupt
678 	 * can arrive on the new CPU before the original CPU has completed
679 	 * handling the previous one - it may need to be resent.
680 	 */
681 	if (!irq_can_handle_pm(desc)) {
682 		if (irqd_needs_resend_when_in_progress(&desc->irq_data))
683 			desc->istate |= IRQS_PENDING;
684 		cond_eoi_irq(chip, &desc->irq_data);
685 		return;
686 	}
687 
688 	if (!irq_can_handle_actions(desc)) {
689 		mask_irq(desc);
690 		cond_eoi_irq(chip, &desc->irq_data);
691 		return;
692 	}
693 
694 	kstat_incr_irqs_this_cpu(desc);
695 	if (desc->istate & IRQS_ONESHOT)
696 		mask_irq(desc);
697 
698 	handle_irq_event(desc);
699 
700 	cond_unmask_eoi_irq(desc, chip);
701 
702 	/*
703 	 * When the race described above happens this will resend the interrupt.
704 	 */
705 	if (unlikely(desc->istate & IRQS_PENDING))
706 		check_irq_resend(desc, false);
707 }
708 EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
709 
710 /**
711  *	handle_fasteoi_nmi - irq handler for NMI interrupt lines
712  *	@desc:	the interrupt description structure for this irq
713  *
714  *	A simple NMI-safe handler, considering the restrictions
715  *	from request_nmi.
716  *
717  *	Only a single callback will be issued to the chip: an ->eoi()
718  *	call when the interrupt has been serviced. This enables support
719  *	for modern forms of interrupt handlers, which handle the flow
720  *	details in hardware, transparently.
721  */
722 void handle_fasteoi_nmi(struct irq_desc *desc)
723 {
724 	struct irq_chip *chip = irq_desc_get_chip(desc);
725 	struct irqaction *action = desc->action;
726 	unsigned int irq = irq_desc_get_irq(desc);
727 	irqreturn_t res;
728 
729 	__kstat_incr_irqs_this_cpu(desc);
730 
731 	trace_irq_handler_entry(irq, action);
732 	/*
733 	 * NMIs cannot be shared, there is only one action.
734 	 */
735 	res = action->handler(irq, action->dev_id);
736 	trace_irq_handler_exit(irq, action, res);
737 
738 	if (chip->irq_eoi)
739 		chip->irq_eoi(&desc->irq_data);
740 }
741 EXPORT_SYMBOL_GPL(handle_fasteoi_nmi);
742 
743 /**
744  * handle_edge_irq - edge type IRQ handler
745  * @desc:	the interrupt description structure for this irq
746  *
747  * Interrupt occurs on the falling and/or rising edge of a hardware
748  * signal. The occurrence is latched into the irq controller hardware and
749  * must be acked in order to be reenabled. After the ack another interrupt
750  * can happen on the same source even before the first one is handled by
751  * the associated event handler. If this happens it might be necessary to
752  * disable (mask) the interrupt depending on the controller hardware. This
753  * requires to reenable the interrupt inside of the loop which handles the
754  * interrupts which have arrived while the handler was running. If all
755  * pending interrupts are handled, the loop is left.
756  */
757 void handle_edge_irq(struct irq_desc *desc)
758 {
759 	guard(raw_spinlock)(&desc->lock);
760 
761 	if (!irq_can_handle(desc)) {
762 		desc->istate |= IRQS_PENDING;
763 		mask_ack_irq(desc);
764 		return;
765 	}
766 
767 	kstat_incr_irqs_this_cpu(desc);
768 
769 	/* Start handling the irq */
770 	desc->irq_data.chip->irq_ack(&desc->irq_data);
771 
772 	do {
773 		if (unlikely(!desc->action)) {
774 			mask_irq(desc);
775 			return;
776 		}
777 
778 		/*
779 		 * When another irq arrived while we were handling
780 		 * one, we could have masked the irq.
781 		 * Reenable it, if it was not disabled in meantime.
782 		 */
783 		if (unlikely(desc->istate & IRQS_PENDING)) {
784 			if (!irqd_irq_disabled(&desc->irq_data) &&
785 			    irqd_irq_masked(&desc->irq_data))
786 				unmask_irq(desc);
787 		}
788 
789 		handle_irq_event(desc);
790 
791 	} while ((desc->istate & IRQS_PENDING) && !irqd_irq_disabled(&desc->irq_data));
792 }
793 EXPORT_SYMBOL(handle_edge_irq);
794 
795 /**
796  *	handle_percpu_irq - Per CPU local irq handler
797  *	@desc:	the interrupt description structure for this irq
798  *
799  *	Per CPU interrupts on SMP machines without locking requirements
800  */
801 void handle_percpu_irq(struct irq_desc *desc)
802 {
803 	struct irq_chip *chip = irq_desc_get_chip(desc);
804 
805 	/*
806 	 * PER CPU interrupts are not serialized. Do not touch
807 	 * desc->tot_count.
808 	 */
809 	__kstat_incr_irqs_this_cpu(desc);
810 
811 	if (chip->irq_ack)
812 		chip->irq_ack(&desc->irq_data);
813 
814 	handle_irq_event_percpu(desc);
815 
816 	if (chip->irq_eoi)
817 		chip->irq_eoi(&desc->irq_data);
818 }
819 
820 /**
821  * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
822  * @desc:	the interrupt description structure for this irq
823  *
824  * Per CPU interrupts on SMP machines without locking requirements. Same as
825  * handle_percpu_irq() above but with the following extras:
826  *
827  * action->percpu_dev_id is a pointer to percpu variables which
828  * contain the real device id for the cpu on which this handler is
829  * called
830  */
831 void handle_percpu_devid_irq(struct irq_desc *desc)
832 {
833 	struct irq_chip *chip = irq_desc_get_chip(desc);
834 	struct irqaction *action = desc->action;
835 	unsigned int irq = irq_desc_get_irq(desc);
836 	irqreturn_t res;
837 
838 	/*
839 	 * PER CPU interrupts are not serialized. Do not touch
840 	 * desc->tot_count.
841 	 */
842 	__kstat_incr_irqs_this_cpu(desc);
843 
844 	if (chip->irq_ack)
845 		chip->irq_ack(&desc->irq_data);
846 
847 	if (likely(action)) {
848 		trace_irq_handler_entry(irq, action);
849 		res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
850 		trace_irq_handler_exit(irq, action, res);
851 	} else {
852 		unsigned int cpu = smp_processor_id();
853 		bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
854 
855 		if (enabled)
856 			irq_percpu_disable(desc, cpu);
857 
858 		pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n",
859 			    enabled ? " and unmasked" : "", irq, cpu);
860 	}
861 
862 	if (chip->irq_eoi)
863 		chip->irq_eoi(&desc->irq_data);
864 }
865 
866 /**
867  * handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu
868  *				     dev ids
869  * @desc:	the interrupt description structure for this irq
870  *
871  * Similar to handle_fasteoi_nmi, but handling the dev_id cookie
872  * as a percpu pointer.
873  */
874 void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc)
875 {
876 	struct irq_chip *chip = irq_desc_get_chip(desc);
877 	struct irqaction *action = desc->action;
878 	unsigned int irq = irq_desc_get_irq(desc);
879 	irqreturn_t res;
880 
881 	__kstat_incr_irqs_this_cpu(desc);
882 
883 	trace_irq_handler_entry(irq, action);
884 	res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
885 	trace_irq_handler_exit(irq, action, res);
886 
887 	if (chip->irq_eoi)
888 		chip->irq_eoi(&desc->irq_data);
889 }
890 
891 static void
892 __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
893 		     int is_chained, const char *name)
894 {
895 	if (!handle) {
896 		handle = handle_bad_irq;
897 	} else {
898 		struct irq_data *irq_data = &desc->irq_data;
899 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
900 		/*
901 		 * With hierarchical domains we might run into a
902 		 * situation where the outermost chip is not yet set
903 		 * up, but the inner chips are there.  Instead of
904 		 * bailing we install the handler, but obviously we
905 		 * cannot enable/startup the interrupt at this point.
906 		 */
907 		while (irq_data) {
908 			if (irq_data->chip != &no_irq_chip)
909 				break;
910 			/*
911 			 * Bail out if the outer chip is not set up
912 			 * and the interrupt supposed to be started
913 			 * right away.
914 			 */
915 			if (WARN_ON(is_chained))
916 				return;
917 			/* Try the parent */
918 			irq_data = irq_data->parent_data;
919 		}
920 #endif
921 		if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip))
922 			return;
923 	}
924 
925 	/* Uninstall? */
926 	if (handle == handle_bad_irq) {
927 		if (desc->irq_data.chip != &no_irq_chip)
928 			mask_ack_irq(desc);
929 		irq_state_set_disabled(desc);
930 		if (is_chained) {
931 			desc->action = NULL;
932 			WARN_ON(irq_chip_pm_put(irq_desc_get_irq_data(desc)));
933 		}
934 		desc->depth = 1;
935 	}
936 	desc->handle_irq = handle;
937 	desc->name = name;
938 
939 	if (handle != handle_bad_irq && is_chained) {
940 		unsigned int type = irqd_get_trigger_type(&desc->irq_data);
941 
942 		/*
943 		 * We're about to start this interrupt immediately,
944 		 * hence the need to set the trigger configuration.
945 		 * But the .set_type callback may have overridden the
946 		 * flow handler, ignoring that we're dealing with a
947 		 * chained interrupt. Reset it immediately because we
948 		 * do know better.
949 		 */
950 		if (type != IRQ_TYPE_NONE) {
951 			__irq_set_trigger(desc, type);
952 			desc->handle_irq = handle;
953 		}
954 
955 		irq_settings_set_noprobe(desc);
956 		irq_settings_set_norequest(desc);
957 		irq_settings_set_nothread(desc);
958 		desc->action = &chained_action;
959 		WARN_ON(irq_chip_pm_get(irq_desc_get_irq_data(desc)));
960 		irq_activate_and_startup(desc, IRQ_RESEND);
961 	}
962 }
963 
964 void __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
965 		       const char *name)
966 {
967 	scoped_irqdesc_get_and_lock(irq, 0)
968 		__irq_do_set_handler(scoped_irqdesc, handle, is_chained, name);
969 }
970 EXPORT_SYMBOL_GPL(__irq_set_handler);
971 
972 void irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
973 				      void *data)
974 {
975 	scoped_irqdesc_get_and_buslock(irq, 0) {
976 		struct irq_desc *desc = scoped_irqdesc;
977 
978 		desc->irq_common_data.handler_data = data;
979 		__irq_do_set_handler(desc, handle, 1, NULL);
980 	}
981 }
982 EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data);
983 
984 void
985 irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip,
986 			      irq_flow_handler_t handle, const char *name)
987 {
988 	irq_set_chip(irq, chip);
989 	__irq_set_handler(irq, handle, 0, name);
990 }
991 EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
992 
993 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
994 {
995 	scoped_irqdesc_get_and_lock(irq, 0) {
996 		struct irq_desc *desc = scoped_irqdesc;
997 		unsigned long trigger, tmp;
998 		/*
999 		 * Warn when a driver sets the no autoenable flag on an already
1000 		 * active interrupt.
1001 		 */
1002 		WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN));
1003 
1004 		irq_settings_clr_and_set(desc, clr, set);
1005 
1006 		trigger = irqd_get_trigger_type(&desc->irq_data);
1007 
1008 		irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
1009 			   IRQD_TRIGGER_MASK | IRQD_LEVEL);
1010 		if (irq_settings_has_no_balance_set(desc))
1011 			irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1012 		if (irq_settings_is_per_cpu(desc))
1013 			irqd_set(&desc->irq_data, IRQD_PER_CPU);
1014 		if (irq_settings_is_level(desc))
1015 			irqd_set(&desc->irq_data, IRQD_LEVEL);
1016 
1017 		tmp = irq_settings_get_trigger_mask(desc);
1018 		if (tmp != IRQ_TYPE_NONE)
1019 			trigger = tmp;
1020 
1021 		irqd_set(&desc->irq_data, trigger);
1022 	}
1023 }
1024 EXPORT_SYMBOL_GPL(irq_modify_status);
1025 
1026 #ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE
1027 /**
1028  *	irq_cpu_online - Invoke all irq_cpu_online functions.
1029  *
1030  *	Iterate through all irqs and invoke the chip.irq_cpu_online()
1031  *	for each.
1032  */
1033 void irq_cpu_online(void)
1034 {
1035 	unsigned int irq;
1036 
1037 	for_each_active_irq(irq) {
1038 		struct irq_desc *desc = irq_to_desc(irq);
1039 		struct irq_chip *chip;
1040 
1041 		if (!desc)
1042 			continue;
1043 
1044 		guard(raw_spinlock_irqsave)(&desc->lock);
1045 		chip = irq_data_get_irq_chip(&desc->irq_data);
1046 		if (chip && chip->irq_cpu_online &&
1047 		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
1048 		     !irqd_irq_disabled(&desc->irq_data)))
1049 			chip->irq_cpu_online(&desc->irq_data);
1050 	}
1051 }
1052 
1053 /**
1054  *	irq_cpu_offline - Invoke all irq_cpu_offline functions.
1055  *
1056  *	Iterate through all irqs and invoke the chip.irq_cpu_offline()
1057  *	for each.
1058  */
1059 void irq_cpu_offline(void)
1060 {
1061 	unsigned int irq;
1062 
1063 	for_each_active_irq(irq) {
1064 		struct irq_desc *desc = irq_to_desc(irq);
1065 		struct irq_chip *chip;
1066 
1067 		if (!desc)
1068 			continue;
1069 
1070 		guard(raw_spinlock_irqsave)(&desc->lock);
1071 		chip = irq_data_get_irq_chip(&desc->irq_data);
1072 		if (chip && chip->irq_cpu_offline &&
1073 		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
1074 		     !irqd_irq_disabled(&desc->irq_data)))
1075 			chip->irq_cpu_offline(&desc->irq_data);
1076 	}
1077 }
1078 #endif
1079 
1080 #ifdef	CONFIG_IRQ_DOMAIN_HIERARCHY
1081 
1082 #ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS
1083 /**
1084  * handle_fasteoi_ack_irq - irq handler for edge hierarchy stacked on
1085  *			    transparent controllers
1086  *
1087  * @desc:	the interrupt description structure for this irq
1088  *
1089  * Like handle_fasteoi_irq(), but for use with hierarchy where the irq_chip
1090  * also needs to have its ->irq_ack() function called.
1091  */
1092 void handle_fasteoi_ack_irq(struct irq_desc *desc)
1093 {
1094 	struct irq_chip *chip = desc->irq_data.chip;
1095 
1096 	guard(raw_spinlock)(&desc->lock);
1097 
1098 	if (!irq_can_handle_pm(desc)) {
1099 		cond_eoi_irq(chip, &desc->irq_data);
1100 		return;
1101 	}
1102 
1103 	if (unlikely(!irq_can_handle_actions(desc))) {
1104 		mask_irq(desc);
1105 		cond_eoi_irq(chip, &desc->irq_data);
1106 		return;
1107 	}
1108 
1109 	kstat_incr_irqs_this_cpu(desc);
1110 	if (desc->istate & IRQS_ONESHOT)
1111 		mask_irq(desc);
1112 
1113 	desc->irq_data.chip->irq_ack(&desc->irq_data);
1114 
1115 	handle_irq_event(desc);
1116 
1117 	cond_unmask_eoi_irq(desc, chip);
1118 }
1119 EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq);
1120 
1121 /**
1122  * handle_fasteoi_mask_irq - irq handler for level hierarchy stacked on
1123  *			     transparent controllers
1124  *
1125  * @desc:	the interrupt description structure for this irq
1126  *
1127  * Like handle_fasteoi_irq(), but for use with hierarchy where the irq_chip
1128  * also needs to have its ->irq_mask_ack() function called.
1129  */
1130 void handle_fasteoi_mask_irq(struct irq_desc *desc)
1131 {
1132 	struct irq_chip *chip = desc->irq_data.chip;
1133 
1134 	guard(raw_spinlock)(&desc->lock);
1135 	mask_ack_irq(desc);
1136 
1137 	if (!irq_can_handle(desc)) {
1138 		cond_eoi_irq(chip, &desc->irq_data);
1139 		return;
1140 	}
1141 
1142 	kstat_incr_irqs_this_cpu(desc);
1143 
1144 	handle_irq_event(desc);
1145 
1146 	cond_unmask_eoi_irq(desc, chip);
1147 }
1148 EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq);
1149 
1150 #endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */
1151 
1152 /**
1153  * irq_chip_set_parent_state - set the state of a parent interrupt.
1154  *
1155  * @data: Pointer to interrupt specific data
1156  * @which: State to be restored (one of IRQCHIP_STATE_*)
1157  * @val: Value corresponding to @which
1158  *
1159  * Conditional success, if the underlying irqchip does not implement it.
1160  */
1161 int irq_chip_set_parent_state(struct irq_data *data,
1162 			      enum irqchip_irq_state which,
1163 			      bool val)
1164 {
1165 	data = data->parent_data;
1166 
1167 	if (!data || !data->chip->irq_set_irqchip_state)
1168 		return 0;
1169 
1170 	return data->chip->irq_set_irqchip_state(data, which, val);
1171 }
1172 EXPORT_SYMBOL_GPL(irq_chip_set_parent_state);
1173 
1174 /**
1175  * irq_chip_get_parent_state - get the state of a parent interrupt.
1176  *
1177  * @data: Pointer to interrupt specific data
1178  * @which: one of IRQCHIP_STATE_* the caller wants to know
1179  * @state: a pointer to a boolean where the state is to be stored
1180  *
1181  * Conditional success, if the underlying irqchip does not implement it.
1182  */
1183 int irq_chip_get_parent_state(struct irq_data *data,
1184 			      enum irqchip_irq_state which,
1185 			      bool *state)
1186 {
1187 	data = data->parent_data;
1188 
1189 	if (!data || !data->chip->irq_get_irqchip_state)
1190 		return 0;
1191 
1192 	return data->chip->irq_get_irqchip_state(data, which, state);
1193 }
1194 EXPORT_SYMBOL_GPL(irq_chip_get_parent_state);
1195 
1196 /**
1197  * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
1198  * NULL)
1199  * @data:	Pointer to interrupt specific data
1200  */
1201 void irq_chip_enable_parent(struct irq_data *data)
1202 {
1203 	data = data->parent_data;
1204 	if (data->chip->irq_enable)
1205 		data->chip->irq_enable(data);
1206 	else
1207 		data->chip->irq_unmask(data);
1208 }
1209 EXPORT_SYMBOL_GPL(irq_chip_enable_parent);
1210 
1211 /**
1212  * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
1213  * NULL)
1214  * @data:	Pointer to interrupt specific data
1215  */
1216 void irq_chip_disable_parent(struct irq_data *data)
1217 {
1218 	data = data->parent_data;
1219 	if (data->chip->irq_disable)
1220 		data->chip->irq_disable(data);
1221 	else
1222 		data->chip->irq_mask(data);
1223 }
1224 EXPORT_SYMBOL_GPL(irq_chip_disable_parent);
1225 
1226 /**
1227  * irq_chip_ack_parent - Acknowledge the parent interrupt
1228  * @data:	Pointer to interrupt specific data
1229  */
1230 void irq_chip_ack_parent(struct irq_data *data)
1231 {
1232 	data = data->parent_data;
1233 	data->chip->irq_ack(data);
1234 }
1235 EXPORT_SYMBOL_GPL(irq_chip_ack_parent);
1236 
1237 /**
1238  * irq_chip_mask_parent - Mask the parent interrupt
1239  * @data:	Pointer to interrupt specific data
1240  */
1241 void irq_chip_mask_parent(struct irq_data *data)
1242 {
1243 	data = data->parent_data;
1244 	data->chip->irq_mask(data);
1245 }
1246 EXPORT_SYMBOL_GPL(irq_chip_mask_parent);
1247 
1248 /**
1249  * irq_chip_mask_ack_parent - Mask and acknowledge the parent interrupt
1250  * @data:	Pointer to interrupt specific data
1251  */
1252 void irq_chip_mask_ack_parent(struct irq_data *data)
1253 {
1254 	data = data->parent_data;
1255 	data->chip->irq_mask_ack(data);
1256 }
1257 EXPORT_SYMBOL_GPL(irq_chip_mask_ack_parent);
1258 
1259 /**
1260  * irq_chip_unmask_parent - Unmask the parent interrupt
1261  * @data:	Pointer to interrupt specific data
1262  */
1263 void irq_chip_unmask_parent(struct irq_data *data)
1264 {
1265 	data = data->parent_data;
1266 	data->chip->irq_unmask(data);
1267 }
1268 EXPORT_SYMBOL_GPL(irq_chip_unmask_parent);
1269 
1270 /**
1271  * irq_chip_eoi_parent - Invoke EOI on the parent interrupt
1272  * @data:	Pointer to interrupt specific data
1273  */
1274 void irq_chip_eoi_parent(struct irq_data *data)
1275 {
1276 	data = data->parent_data;
1277 	data->chip->irq_eoi(data);
1278 }
1279 EXPORT_SYMBOL_GPL(irq_chip_eoi_parent);
1280 
1281 /**
1282  * irq_chip_set_affinity_parent - Set affinity on the parent interrupt
1283  * @data:	Pointer to interrupt specific data
1284  * @dest:	The affinity mask to set
1285  * @force:	Flag to enforce setting (disable online checks)
1286  *
1287  * Conditional, as the underlying parent chip might not implement it.
1288  */
1289 int irq_chip_set_affinity_parent(struct irq_data *data,
1290 				 const struct cpumask *dest, bool force)
1291 {
1292 	data = data->parent_data;
1293 	if (data->chip->irq_set_affinity)
1294 		return data->chip->irq_set_affinity(data, dest, force);
1295 
1296 	return -ENOSYS;
1297 }
1298 EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent);
1299 
1300 /**
1301  * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
1302  * @data:	Pointer to interrupt specific data
1303  * @type:	IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
1304  *
1305  * Conditional, as the underlying parent chip might not implement it.
1306  */
1307 int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
1308 {
1309 	data = data->parent_data;
1310 
1311 	if (data->chip->irq_set_type)
1312 		return data->chip->irq_set_type(data, type);
1313 
1314 	return -ENOSYS;
1315 }
1316 EXPORT_SYMBOL_GPL(irq_chip_set_type_parent);
1317 
1318 /**
1319  * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
1320  * @data:	Pointer to interrupt specific data
1321  *
1322  * Iterate through the domain hierarchy of the interrupt and check
1323  * whether a hw retrigger function exists. If yes, invoke it.
1324  */
1325 int irq_chip_retrigger_hierarchy(struct irq_data *data)
1326 {
1327 	for (data = data->parent_data; data; data = data->parent_data)
1328 		if (data->chip && data->chip->irq_retrigger)
1329 			return data->chip->irq_retrigger(data);
1330 
1331 	return 0;
1332 }
1333 EXPORT_SYMBOL_GPL(irq_chip_retrigger_hierarchy);
1334 
1335 /**
1336  * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt
1337  * @data:	Pointer to interrupt specific data
1338  * @vcpu_info:	The vcpu affinity information
1339  */
1340 int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
1341 {
1342 	data = data->parent_data;
1343 	if (data->chip->irq_set_vcpu_affinity)
1344 		return data->chip->irq_set_vcpu_affinity(data, vcpu_info);
1345 
1346 	return -ENOSYS;
1347 }
1348 EXPORT_SYMBOL_GPL(irq_chip_set_vcpu_affinity_parent);
1349 /**
1350  * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt
1351  * @data:	Pointer to interrupt specific data
1352  * @on:		Whether to set or reset the wake-up capability of this irq
1353  *
1354  * Conditional, as the underlying parent chip might not implement it.
1355  */
1356 int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
1357 {
1358 	data = data->parent_data;
1359 
1360 	if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE)
1361 		return 0;
1362 
1363 	if (data->chip->irq_set_wake)
1364 		return data->chip->irq_set_wake(data, on);
1365 
1366 	return -ENOSYS;
1367 }
1368 EXPORT_SYMBOL_GPL(irq_chip_set_wake_parent);
1369 
1370 /**
1371  * irq_chip_request_resources_parent - Request resources on the parent interrupt
1372  * @data:	Pointer to interrupt specific data
1373  */
1374 int irq_chip_request_resources_parent(struct irq_data *data)
1375 {
1376 	data = data->parent_data;
1377 
1378 	if (data->chip->irq_request_resources)
1379 		return data->chip->irq_request_resources(data);
1380 
1381 	/* no error on missing optional irq_chip::irq_request_resources */
1382 	return 0;
1383 }
1384 EXPORT_SYMBOL_GPL(irq_chip_request_resources_parent);
1385 
1386 /**
1387  * irq_chip_release_resources_parent - Release resources on the parent interrupt
1388  * @data:	Pointer to interrupt specific data
1389  */
1390 void irq_chip_release_resources_parent(struct irq_data *data)
1391 {
1392 	data = data->parent_data;
1393 	if (data->chip->irq_release_resources)
1394 		data->chip->irq_release_resources(data);
1395 }
1396 EXPORT_SYMBOL_GPL(irq_chip_release_resources_parent);
1397 #endif
1398 
1399 /**
1400  * irq_chip_compose_msi_msg - Compose msi message for a irq chip
1401  * @data:	Pointer to interrupt specific data
1402  * @msg:	Pointer to the MSI message
1403  *
1404  * For hierarchical domains we find the first chip in the hierarchy
1405  * which implements the irq_compose_msi_msg callback. For non
1406  * hierarchical we use the top level chip.
1407  */
1408 int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1409 {
1410 	struct irq_data *pos;
1411 
1412 	for (pos = NULL; !pos && data; data = irqd_get_parent_data(data)) {
1413 		if (data->chip && data->chip->irq_compose_msi_msg)
1414 			pos = data;
1415 	}
1416 
1417 	if (!pos)
1418 		return -ENOSYS;
1419 
1420 	pos->chip->irq_compose_msi_msg(pos, msg);
1421 	return 0;
1422 }
1423 
1424 static struct device *irq_get_pm_device(struct irq_data *data)
1425 {
1426 	if (data->domain)
1427 		return data->domain->pm_dev;
1428 
1429 	return NULL;
1430 }
1431 
1432 /**
1433  * irq_chip_pm_get - Enable power for an IRQ chip
1434  * @data:	Pointer to interrupt specific data
1435  *
1436  * Enable the power to the IRQ chip referenced by the interrupt data
1437  * structure.
1438  */
1439 int irq_chip_pm_get(struct irq_data *data)
1440 {
1441 	struct device *dev = irq_get_pm_device(data);
1442 	int retval = 0;
1443 
1444 	if (IS_ENABLED(CONFIG_PM) && dev)
1445 		retval = pm_runtime_resume_and_get(dev);
1446 
1447 	return retval;
1448 }
1449 
1450 /**
1451  * irq_chip_pm_put - Disable power for an IRQ chip
1452  * @data:	Pointer to interrupt specific data
1453  *
1454  * Disable the power to the IRQ chip referenced by the interrupt data
1455  * structure, belongs. Note that power will only be disabled, once this
1456  * function has been called for all IRQs that have called irq_chip_pm_get().
1457  */
1458 int irq_chip_pm_put(struct irq_data *data)
1459 {
1460 	struct device *dev = irq_get_pm_device(data);
1461 	int retval = 0;
1462 
1463 	if (IS_ENABLED(CONFIG_PM) && dev)
1464 		retval = pm_runtime_put(dev);
1465 
1466 	return (retval < 0) ? retval : 0;
1467 }
1468