1 #include <linux/irq.h> 2 3 #if defined(CONFIG_GENERIC_PENDING_IRQ) 4 5 void set_pending_irq(unsigned int irq, cpumask_t mask) 6 { 7 irq_desc_t *desc = irq_desc + irq; 8 unsigned long flags; 9 10 spin_lock_irqsave(&desc->lock, flags); 11 desc->move_irq = 1; 12 pending_irq_cpumask[irq] = mask; 13 spin_unlock_irqrestore(&desc->lock, flags); 14 } 15 16 void move_native_irq(int irq) 17 { 18 cpumask_t tmp; 19 irq_desc_t *desc = irq_descp(irq); 20 21 if (likely(!desc->move_irq)) 22 return; 23 24 /* 25 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway. 26 */ 27 if (CHECK_IRQ_PER_CPU(desc->status)) { 28 WARN_ON(1); 29 return; 30 } 31 32 desc->move_irq = 0; 33 34 if (likely(cpus_empty(pending_irq_cpumask[irq]))) 35 return; 36 37 if (!desc->handler->set_affinity) 38 return; 39 40 assert_spin_locked(&desc->lock); 41 42 cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map); 43 44 /* 45 * If there was a valid mask to work with, please 46 * do the disable, re-program, enable sequence. 47 * This is *not* particularly important for level triggered 48 * but in a edge trigger case, we might be setting rte 49 * when an active trigger is comming in. This could 50 * cause some ioapics to mal-function. 51 * Being paranoid i guess! 52 */ 53 if (unlikely(!cpus_empty(tmp))) { 54 if (likely(!(desc->status & IRQ_DISABLED))) 55 desc->handler->disable(irq); 56 57 desc->handler->set_affinity(irq,tmp); 58 59 if (likely(!(desc->status & IRQ_DISABLED))) 60 desc->handler->enable(irq); 61 } 62 cpus_clear(pending_irq_cpumask[irq]); 63 } 64 65 #endif 66