1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2c777ac55SAndrew Morton 3d824e66aSChristoph Hellwig #include <linux/irq.h> 457b150ccSYinghai Lu #include <linux/interrupt.h> 557b150ccSYinghai Lu 657b150ccSYinghai Lu #include "internals.h" 7c777ac55SAndrew Morton 8cdd16365SThomas Gleixner /** 9cdd16365SThomas Gleixner * irq_fixup_move_pending - Cleanup irq move pending from a dying CPU 105c982c58SKrzysztof Kozlowski * @desc: Interrupt descriptor to clean up 11cdd16365SThomas Gleixner * @force_clear: If set clear the move pending bit unconditionally. 12cdd16365SThomas Gleixner * If not set, clear it only when the dying CPU is the 13cdd16365SThomas Gleixner * last one in the pending mask. 14cdd16365SThomas Gleixner * 15cdd16365SThomas Gleixner * Returns true if the pending bit was set and the pending mask contains an 16cdd16365SThomas Gleixner * online CPU other than the dying CPU. 17cdd16365SThomas Gleixner */ 18cdd16365SThomas Gleixner bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear) 19cdd16365SThomas Gleixner { 20cdd16365SThomas Gleixner struct irq_data *data = irq_desc_get_irq_data(desc); 21cdd16365SThomas Gleixner 22cdd16365SThomas Gleixner if (!irqd_is_setaffinity_pending(data)) 23cdd16365SThomas Gleixner return false; 24cdd16365SThomas Gleixner 25cdd16365SThomas Gleixner /* 26cdd16365SThomas Gleixner * The outgoing CPU might be the last online target in a pending 27cdd16365SThomas Gleixner * interrupt move. If that's the case clear the pending move bit. 28cdd16365SThomas Gleixner */ 29*a6fe30d1SCosta Shulyupin if (!cpumask_intersects(desc->pending_mask, cpu_online_mask)) { 30cdd16365SThomas Gleixner irqd_clr_move_pending(data); 31cdd16365SThomas Gleixner return false; 32cdd16365SThomas Gleixner } 33cdd16365SThomas Gleixner if (force_clear) 34cdd16365SThomas Gleixner irqd_clr_move_pending(data); 35cdd16365SThomas Gleixner return true; 36cdd16365SThomas Gleixner } 37cdd16365SThomas Gleixner 38a439520fSThomas Gleixner void irq_move_masked_irq(struct irq_data *idata) 39c777ac55SAndrew Morton { 40a439520fSThomas Gleixner struct irq_desc *desc = irq_data_to_desc(idata); 41a33a5d2dSThomas Gleixner struct irq_data *data = &desc->irq_data; 42a33a5d2dSThomas Gleixner struct irq_chip *chip = data->chip; 43c777ac55SAndrew Morton 44a33a5d2dSThomas Gleixner if (likely(!irqd_is_setaffinity_pending(data))) 45c777ac55SAndrew Morton return; 46c777ac55SAndrew Morton 47a33a5d2dSThomas Gleixner irqd_clr_move_pending(data); 48a614a610SThomas Gleixner 49501f2499SBryan Holty /* 50501f2499SBryan Holty * Paranoia: cpu-local interrupts shouldn't be calling in here anyway. 51501f2499SBryan Holty */ 52a33a5d2dSThomas Gleixner if (irqd_is_per_cpu(data)) { 53501f2499SBryan Holty WARN_ON(1); 54501f2499SBryan Holty return; 55501f2499SBryan Holty } 56501f2499SBryan Holty 577f7ace0cSMike Travis if (unlikely(cpumask_empty(desc->pending_mask))) 58c777ac55SAndrew Morton return; 59c777ac55SAndrew Morton 60c96b3b3cSThomas Gleixner if (!chip->irq_set_affinity) 61c777ac55SAndrew Morton return; 62c777ac55SAndrew Morton 63239007b8SThomas Gleixner assert_raw_spin_locked(&desc->lock); 64501f2499SBryan Holty 65c777ac55SAndrew Morton /* 66c777ac55SAndrew Morton * If there was a valid mask to work with, please 67c777ac55SAndrew Morton * do the disable, re-program, enable sequence. 68c777ac55SAndrew Morton * This is *not* particularly important for level triggered 69c777ac55SAndrew Morton * but in a edge trigger case, we might be setting rte 7025985edcSLucas De Marchi * when an active trigger is coming in. This could 71c777ac55SAndrew Morton * cause some ioapics to mal-function. 72c777ac55SAndrew Morton * Being paranoid i guess! 73e7b946e9SEric W. Biederman * 74e7b946e9SEric W. Biederman * For correct operation this depends on the caller 75e7b946e9SEric W. Biederman * masking the irqs. 76c777ac55SAndrew Morton */ 77*a6fe30d1SCosta Shulyupin if (cpumask_intersects(desc->pending_mask, cpu_online_mask)) { 78a33a5d2dSThomas Gleixner int ret; 7957b150ccSYinghai Lu 80a33a5d2dSThomas Gleixner ret = irq_do_set_affinity(data, desc->pending_mask, false); 81a33a5d2dSThomas Gleixner /* 82a33a5d2dSThomas Gleixner * If the there is a cleanup pending in the underlying 83a33a5d2dSThomas Gleixner * vector management, reschedule the move for the next 84a33a5d2dSThomas Gleixner * interrupt. Leave desc->pending_mask intact. 85a33a5d2dSThomas Gleixner */ 86a33a5d2dSThomas Gleixner if (ret == -EBUSY) { 87a33a5d2dSThomas Gleixner irqd_set_move_pending(data); 88a33a5d2dSThomas Gleixner return; 89a33a5d2dSThomas Gleixner } 90a33a5d2dSThomas Gleixner } 917f7ace0cSMike Travis cpumask_clear(desc->pending_mask); 92e7b946e9SEric W. Biederman } 93e7b946e9SEric W. Biederman 94d340ebd6SThomas Gleixner void __irq_move_irq(struct irq_data *idata) 95a439520fSThomas Gleixner { 96f1a06390SThomas Gleixner bool masked; 97e7b946e9SEric W. Biederman 9877ed42f1SJiang Liu /* 9977ed42f1SJiang Liu * Get top level irq_data when CONFIG_IRQ_DOMAIN_HIERARCHY is enabled, 10077ed42f1SJiang Liu * and it should be optimized away when CONFIG_IRQ_DOMAIN_HIERARCHY is 10177ed42f1SJiang Liu * disabled. So we avoid an "#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY" here. 10277ed42f1SJiang Liu */ 10377ed42f1SJiang Liu idata = irq_desc_get_irq_data(irq_data_to_desc(idata)); 10477ed42f1SJiang Liu 10532f4125eSThomas Gleixner if (unlikely(irqd_irq_disabled(idata))) 1062a786b45SEric W. Biederman return; 107501f2499SBryan Holty 108f1a06390SThomas Gleixner /* 109f1a06390SThomas Gleixner * Be careful vs. already masked interrupts. If this is a 110f1a06390SThomas Gleixner * threaded interrupt with ONESHOT set, we can end up with an 111f1a06390SThomas Gleixner * interrupt storm. 112f1a06390SThomas Gleixner */ 11332f4125eSThomas Gleixner masked = irqd_irq_masked(idata); 114f1a06390SThomas Gleixner if (!masked) 115a439520fSThomas Gleixner idata->chip->irq_mask(idata); 116a439520fSThomas Gleixner irq_move_masked_irq(idata); 117f1a06390SThomas Gleixner if (!masked) 118a439520fSThomas Gleixner idata->chip->irq_unmask(idata); 119a439520fSThomas Gleixner } 120