xref: /linux/kernel/irq/migration.c (revision 14b42963f64b98ab61fa9723c03d71aa5ef4f862)
1 
2 #include <linux/irq.h>
3 
4 void set_pending_irq(unsigned int irq, cpumask_t mask)
5 {
6 	struct irq_desc *desc = irq_desc + irq;
7 	unsigned long flags;
8 
9 	spin_lock_irqsave(&desc->lock, flags);
10 	desc->move_irq = 1;
11 	irq_desc[irq].pending_mask = mask;
12 	spin_unlock_irqrestore(&desc->lock, flags);
13 }
14 
15 void move_native_irq(int irq)
16 {
17 	struct irq_desc *desc = irq_desc + irq;
18 	cpumask_t tmp;
19 
20 	if (likely(!desc->move_irq))
21 		return;
22 
23 	/*
24 	 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
25 	 */
26 	if (CHECK_IRQ_PER_CPU(desc->status)) {
27 		WARN_ON(1);
28 		return;
29 	}
30 
31 	desc->move_irq = 0;
32 
33 	if (unlikely(cpus_empty(irq_desc[irq].pending_mask)))
34 		return;
35 
36 	if (!desc->chip->set_affinity)
37 		return;
38 
39 	assert_spin_locked(&desc->lock);
40 
41 	cpus_and(tmp, irq_desc[irq].pending_mask, cpu_online_map);
42 
43 	/*
44 	 * If there was a valid mask to work with, please
45 	 * do the disable, re-program, enable sequence.
46 	 * This is *not* particularly important for level triggered
47 	 * but in a edge trigger case, we might be setting rte
48 	 * when an active trigger is comming in. This could
49 	 * cause some ioapics to mal-function.
50 	 * Being paranoid i guess!
51 	 */
52 	if (likely(!cpus_empty(tmp))) {
53 		if (likely(!(desc->status & IRQ_DISABLED)))
54 			desc->chip->disable(irq);
55 
56 		desc->chip->set_affinity(irq,tmp);
57 
58 		if (likely(!(desc->status & IRQ_DISABLED)))
59 			desc->chip->enable(irq);
60 	}
61 	cpus_clear(irq_desc[irq].pending_mask);
62 }
63