xref: /linux/kernel/irq/migration.c (revision b8bb76713ec50df2f11efee386e16f93d51e1076)
1 
2 #include <linux/irq.h>
3 
4 void move_masked_irq(int irq)
5 {
6 	struct irq_desc *desc = irq_to_desc(irq);
7 
8 	if (likely(!(desc->status & IRQ_MOVE_PENDING)))
9 		return;
10 
11 	/*
12 	 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
13 	 */
14 	if (CHECK_IRQ_PER_CPU(desc->status)) {
15 		WARN_ON(1);
16 		return;
17 	}
18 
19 	desc->status &= ~IRQ_MOVE_PENDING;
20 
21 	if (unlikely(cpumask_empty(desc->pending_mask)))
22 		return;
23 
24 	if (!desc->chip->set_affinity)
25 		return;
26 
27 	assert_spin_locked(&desc->lock);
28 
29 	/*
30 	 * If there was a valid mask to work with, please
31 	 * do the disable, re-program, enable sequence.
32 	 * This is *not* particularly important for level triggered
33 	 * but in a edge trigger case, we might be setting rte
34 	 * when an active trigger is comming in. This could
35 	 * cause some ioapics to mal-function.
36 	 * Being paranoid i guess!
37 	 *
38 	 * For correct operation this depends on the caller
39 	 * masking the irqs.
40 	 */
41 	if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
42 		   < nr_cpu_ids)) {
43 		cpumask_and(desc->affinity,
44 			    desc->pending_mask, cpu_online_mask);
45 		desc->chip->set_affinity(irq, desc->affinity);
46 	}
47 	cpumask_clear(desc->pending_mask);
48 }
49 
50 void move_native_irq(int irq)
51 {
52 	struct irq_desc *desc = irq_to_desc(irq);
53 
54 	if (likely(!(desc->status & IRQ_MOVE_PENDING)))
55 		return;
56 
57 	if (unlikely(desc->status & IRQ_DISABLED))
58 		return;
59 
60 	desc->chip->mask(irq);
61 	move_masked_irq(irq);
62 	desc->chip->unmask(irq);
63 }
64 
65