xref: /linux/kernel/irq/cpuhotplug.c (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Generic cpu hotunplug interrupt migration code copied from the
4  * arch/arm implementation
5  *
6  * Copyright (C) Russell King
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 #include <linux/interrupt.h>
13 #include <linux/ratelimit.h>
14 #include <linux/irq.h>
15 #include <linux/sched/isolation.h>
16 
17 #include "internals.h"
18 
19 /* For !GENERIC_IRQ_EFFECTIVE_AFF_MASK this looks at general affinity mask */
20 static inline bool irq_needs_fixup(struct irq_data *d)
21 {
22 	const struct cpumask *m = irq_data_get_effective_affinity_mask(d);
23 	unsigned int cpu = smp_processor_id();
24 
25 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
26 	/*
27 	 * The cpumask_empty() check is a workaround for interrupt chips,
28 	 * which do not implement effective affinity, but the architecture has
29 	 * enabled the config switch. Use the general affinity mask instead.
30 	 */
31 	if (cpumask_empty(m))
32 		m = irq_data_get_affinity_mask(d);
33 
34 	/*
35 	 * Sanity check. If the mask is not empty when excluding the outgoing
36 	 * CPU then it must contain at least one online CPU. The outgoing CPU
37 	 * has been removed from the online mask already.
38 	 */
39 	if (cpumask_any_but(m, cpu) < nr_cpu_ids &&
40 	    cpumask_any_and(m, cpu_online_mask) >= nr_cpu_ids) {
41 		/*
42 		 * If this happens then there was a missed IRQ fixup at some
43 		 * point. Warn about it and enforce fixup.
44 		 */
45 		pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n",
46 			cpumask_pr_args(m), d->irq, cpu);
47 		return true;
48 	}
49 #endif
50 	return cpumask_test_cpu(cpu, m);
51 }
52 
53 static bool migrate_one_irq(struct irq_desc *desc)
54 {
55 	struct irq_data *d = irq_desc_get_irq_data(desc);
56 	struct irq_chip *chip = irq_data_get_irq_chip(d);
57 	bool maskchip = !irq_can_move_pcntxt(d) && !irqd_irq_masked(d);
58 	const struct cpumask *affinity;
59 	bool brokeaff = false;
60 	int err;
61 
62 	/*
63 	 * IRQ chip might be already torn down, but the irq descriptor is
64 	 * still in the radix tree. Also if the chip has no affinity setter,
65 	 * nothing can be done here.
66 	 */
67 	if (!chip || !chip->irq_set_affinity) {
68 		pr_debug("IRQ %u: Unable to migrate away\n", d->irq);
69 		return false;
70 	}
71 
72 	/*
73 	 * No move required, if:
74 	 * - Interrupt is per cpu
75 	 * - Interrupt is not started
76 	 * - Affinity mask does not include this CPU.
77 	 *
78 	 * Note: Do not check desc->action as this might be a chained
79 	 * interrupt.
80 	 */
81 	if (irqd_is_per_cpu(d) || !irqd_is_started(d) || !irq_needs_fixup(d)) {
82 		/*
83 		 * If an irq move is pending, abort it if the dying CPU is
84 		 * the sole target.
85 		 */
86 		irq_fixup_move_pending(desc, false);
87 		return false;
88 	}
89 
90 	/*
91 	 * Complete an eventually pending irq move cleanup. If this
92 	 * interrupt was moved in hard irq context, then the vectors need
93 	 * to be cleaned up. It can't wait until this interrupt actually
94 	 * happens and this CPU was involved.
95 	 */
96 	irq_force_complete_move(desc);
97 
98 	/*
99 	 * If there is a setaffinity pending, then try to reuse the pending
100 	 * mask, so the last change of the affinity does not get lost. If
101 	 * there is no move pending or the pending mask does not contain
102 	 * any online CPU, use the current affinity mask.
103 	 */
104 	if (irq_fixup_move_pending(desc, true))
105 		affinity = irq_desc_get_pending_mask(desc);
106 	else
107 		affinity = irq_data_get_affinity_mask(d);
108 
109 	/* Mask the chip for interrupts which cannot move in process context */
110 	if (maskchip && chip->irq_mask)
111 		chip->irq_mask(d);
112 
113 	if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
114 		/*
115 		 * If the interrupt is managed, then shut it down and leave
116 		 * the affinity untouched.
117 		 */
118 		if (irqd_affinity_is_managed(d)) {
119 			irqd_set_managed_shutdown(d);
120 			irq_shutdown_and_deactivate(desc);
121 			return false;
122 		}
123 		affinity = cpu_online_mask;
124 		brokeaff = true;
125 	}
126 	/*
127 	 * Do not set the force argument of irq_do_set_affinity() as this
128 	 * disables the masking of offline CPUs from the supplied affinity
129 	 * mask and therefore might keep/reassign the irq to the outgoing
130 	 * CPU.
131 	 */
132 	err = irq_do_set_affinity(d, affinity, false);
133 	if (err) {
134 		pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
135 				    d->irq, err);
136 		brokeaff = false;
137 	}
138 
139 	if (maskchip && chip->irq_unmask)
140 		chip->irq_unmask(d);
141 
142 	return brokeaff;
143 }
144 
145 /**
146  * irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu
147  *
148  * The current CPU has been marked offline.  Migrate IRQs off this CPU.
149  * If the affinity settings do not allow other CPUs, force them onto any
150  * available CPU.
151  *
152  * Note: we must iterate over all IRQs, whether they have an attached
153  * action structure or not, as we need to get chained interrupts too.
154  */
155 void irq_migrate_all_off_this_cpu(void)
156 {
157 	struct irq_desc *desc;
158 	unsigned int irq;
159 
160 	for_each_active_irq(irq) {
161 		bool affinity_broken;
162 
163 		desc = irq_to_desc(irq);
164 		raw_spin_lock(&desc->lock);
165 		affinity_broken = migrate_one_irq(desc);
166 		raw_spin_unlock(&desc->lock);
167 
168 		if (affinity_broken) {
169 			pr_debug_ratelimited("IRQ %u: no longer affine to CPU%u\n",
170 					    irq, smp_processor_id());
171 		}
172 	}
173 }
174 
175 static bool hk_should_isolate(struct irq_data *data, unsigned int cpu)
176 {
177 	const struct cpumask *hk_mask;
178 
179 	if (!housekeeping_enabled(HK_TYPE_MANAGED_IRQ))
180 		return false;
181 
182 	hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ);
183 	if (cpumask_subset(irq_data_get_effective_affinity_mask(data), hk_mask))
184 		return false;
185 
186 	return cpumask_test_cpu(cpu, hk_mask);
187 }
188 
189 static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu)
190 {
191 	struct irq_data *data = irq_desc_get_irq_data(desc);
192 	const struct cpumask *affinity = irq_data_get_affinity_mask(data);
193 
194 	if (!irqd_affinity_is_managed(data) || !desc->action ||
195 	    !irq_data_get_irq_chip(data) || !cpumask_test_cpu(cpu, affinity))
196 		return;
197 
198 	if (irqd_is_managed_and_shutdown(data)) {
199 		irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
200 		return;
201 	}
202 
203 	/*
204 	 * If the interrupt can only be directed to a single target
205 	 * CPU then it is already assigned to a CPU in the affinity
206 	 * mask. No point in trying to move it around unless the
207 	 * isolation mechanism requests to move it to an upcoming
208 	 * housekeeping CPU.
209 	 */
210 	if (!irqd_is_single_target(data) || hk_should_isolate(data, cpu))
211 		irq_set_affinity_locked(data, affinity, false);
212 }
213 
214 /**
215  * irq_affinity_online_cpu - Restore affinity for managed interrupts
216  * @cpu:	Upcoming CPU for which interrupts should be restored
217  */
218 int irq_affinity_online_cpu(unsigned int cpu)
219 {
220 	struct irq_desc *desc;
221 	unsigned int irq;
222 
223 	irq_lock_sparse();
224 	for_each_active_irq(irq) {
225 		desc = irq_to_desc(irq);
226 		raw_spin_lock_irq(&desc->lock);
227 		irq_restore_affinity_of_irq(desc, cpu);
228 		raw_spin_unlock_irq(&desc->lock);
229 	}
230 	irq_unlock_sparse();
231 
232 	return 0;
233 }
234