Lines Matching +full:compound +full:- +full:device
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
33 guard(raw_spinlock)(&desc->lock); in try_one_irq()
47 if (irqd_irq_disabled(&desc->irq_data) && !force) in try_one_irq()
54 action = desc->action; in try_one_irq()
55 if (!action || !(action->flags & IRQF_SHARED) || (action->flags & __IRQF_TIMER)) in try_one_irq()
59 if (irqd_irq_inprogress(&desc->irq_data)) { in try_one_irq()
64 desc->istate |= IRQS_PENDING; in try_one_irq()
69 desc->istate |= IRQS_POLL_INPROGRESS; in try_one_irq()
74 action = desc->action; in try_one_irq()
75 } while ((desc->istate & IRQS_PENDING) && action); in try_one_irq()
76 desc->istate &= ~IRQS_POLL_INPROGRESS; in try_one_irq()
122 state = READ_ONCE(desc->istate); in poll_spurious_irqs()
149 * (The other 100-of-100,000 interrupts may have been a correctly
150 * functioning device sharing an IRQ with the failing one)
165 * We need to take desc->lock here. note_interrupt() is called in __report_bad_irq()
166 * w/o desc->lock held, but IRQ_PROGRESS set. We might race in __report_bad_irq()
168 * desc->lock here. See synchronize_irq(). in __report_bad_irq()
170 guard(raw_spinlock_irqsave)(&desc->lock); in __report_bad_irq()
172 pr_err("[<%p>] %ps", action->handler, action->handler); in __report_bad_irq()
173 if (action->thread_fn) in __report_bad_irq()
174 pr_cont(" threaded [<%p>] %ps", action->thread_fn, action->thread_fn); in __report_bad_irq()
184 count--; in report_bad_irq()
197 /* We didn't actually handle the IRQ - see if it was misrouted? */ in try_misrouted_irq()
216 action = READ_ONCE(desc->action); in try_misrouted_irq()
217 return action && (action->flags & IRQF_IRQPOLL); in try_misrouted_irq()
226 if (desc->istate & IRQS_POLL_INPROGRESS || irq_settings_is_polled(desc)) in note_interrupt()
236 * because we need to look at the compound of all handlers in note_interrupt()
271 if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) { in note_interrupt()
272 desc->threads_handled_last |= SPURIOUS_DEFERRED; in note_interrupt()
287 handled = atomic_read(&desc->threads_handled); in note_interrupt()
289 if (handled != desc->threads_handled_last) { in note_interrupt()
299 desc->threads_handled_last = handled; in note_interrupt()
330 desc->threads_handled_last &= ~SPURIOUS_DEFERRED; in note_interrupt()
341 if (time_after(jiffies, desc->last_unhandled + HZ/10)) in note_interrupt()
342 desc->irqs_unhandled = 1; in note_interrupt()
344 desc->irqs_unhandled++; in note_interrupt()
345 desc->last_unhandled = jiffies; in note_interrupt()
352 desc->irqs_unhandled -= ok; in note_interrupt()
355 if (likely(!desc->irqs_unhandled)) in note_interrupt()
359 desc->irq_count++; in note_interrupt()
360 if (likely(desc->irq_count < 100000)) in note_interrupt()
363 desc->irq_count = 0; in note_interrupt()
364 if (unlikely(desc->irqs_unhandled > 99900)) { in note_interrupt()
373 desc->istate |= IRQS_SPURIOUS_DISABLED; in note_interrupt()
374 desc->depth++; in note_interrupt()
379 desc->irqs_unhandled = 0; in note_interrupt()