xref: /linux/arch/x86/kernel/smp.c (revision 7a5f1cd22d47f8ca4b760b6334378ae42c1bd24b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	Intel SMP support routines.
4  *
5  *	(c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
6  *	(c) 1998-99, 2000, 2009 Ingo Molnar <mingo@redhat.com>
7  *      (c) 2002,2003 Andi Kleen, SuSE Labs.
8  *
9  *	i386 and x86_64 integration by Glauber Costa <gcosta@redhat.com>
10  */
11 
12 #include <linux/init.h>
13 
14 #include <linux/mm.h>
15 #include <linux/delay.h>
16 #include <linux/spinlock.h>
17 #include <linux/export.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/mc146818rtc.h>
20 #include <linux/cache.h>
21 #include <linux/interrupt.h>
22 #include <linux/cpu.h>
23 #include <linux/gfp.h>
24 #include <linux/kexec.h>
25 
26 #include <asm/mtrr.h>
27 #include <asm/tlbflush.h>
28 #include <asm/mmu_context.h>
29 #include <asm/proto.h>
30 #include <asm/apic.h>
31 #include <asm/cpu.h>
32 #include <asm/idtentry.h>
33 #include <asm/nmi.h>
34 #include <asm/mce.h>
35 #include <asm/trace/irq_vectors.h>
36 #include <asm/kexec.h>
37 #include <asm/reboot.h>
38 #include <asm/virt.h>
39 
40 /*
41  *	Some notes on x86 processor bugs affecting SMP operation:
42  *
43  *	Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
44  *	The Linux implications for SMP are handled as follows:
45  *
46  *	Pentium III / [Xeon]
47  *		None of the E1AP-E3AP errata are visible to the user.
48  *
49  *	E1AP.	see PII A1AP
50  *	E2AP.	see PII A2AP
51  *	E3AP.	see PII A3AP
52  *
53  *	Pentium II / [Xeon]
54  *		None of the A1AP-A3AP errata are visible to the user.
55  *
56  *	A1AP.	see PPro 1AP
57  *	A2AP.	see PPro 2AP
58  *	A3AP.	see PPro 7AP
59  *
60  *	Pentium Pro
61  *		None of 1AP-9AP errata are visible to the normal user,
62  *	except occasional delivery of 'spurious interrupt' as trap #15.
63  *	This is very rare and a non-problem.
64  *
65  *	1AP.	Linux maps APIC as non-cacheable
66  *	2AP.	worked around in hardware
67  *	3AP.	fixed in C0 and above steppings microcode update.
68  *		Linux does not use excessive STARTUP_IPIs.
69  *	4AP.	worked around in hardware
70  *	5AP.	symmetric IO mode (normal Linux operation) not affected.
71  *		'noapic' mode has vector 0xf filled out properly.
72  *	6AP.	'noapic' mode might be affected - fixed in later steppings
73  *	7AP.	We do not assume writes to the LVT deasserting IRQs
74  *	8AP.	We do not enable low power mode (deep sleep) during MP bootup
75  *	9AP.	We do not use mixed mode
76  *
77  *	Pentium
78  *		There is a marginal case where REP MOVS on 100MHz SMP
79  *	machines with B stepping processors can fail. XXX should provide
80  *	an L1cache=Writethrough or L1cache=off option.
81  *
82  *		B stepping CPUs may hang. There are hardware work arounds
83  *	for this. We warn about it in case your board doesn't have the work
84  *	arounds. Basically that's so I can tell anyone with a B stepping
85  *	CPU and SMP problems "tough".
86  *
87  *	Specific items [From Pentium Processor Specification Update]
88  *
89  *	1AP.	Linux doesn't use remote read
90  *	2AP.	Linux doesn't trust APIC errors
91  *	3AP.	We work around this
92  *	4AP.	Linux never generated 3 interrupts of the same priority
93  *		to cause a lost local interrupt.
94  *	5AP.	Remote read is never used
95  *	6AP.	not affected - worked around in hardware
96  *	7AP.	not affected - worked around in hardware
97  *	8AP.	worked around in hardware - we get explicit CS errors if not
98  *	9AP.	only 'noapic' mode affected. Might generate spurious
99  *		interrupts, we log only the first one and count the
100  *		rest silently.
101  *	10AP.	not affected - worked around in hardware
102  *	11AP.	Linux reads the APIC between writes to avoid this, as per
103  *		the documentation. Make sure you preserve this as it affects
104  *		the C stepping chips too.
105  *	12AP.	not affected - worked around in hardware
106  *	13AP.	not affected - worked around in hardware
107  *	14AP.	we always deassert INIT during bootup
108  *	15AP.	not affected - worked around in hardware
109  *	16AP.	not affected - worked around in hardware
110  *	17AP.	not affected - worked around in hardware
111  *	18AP.	not affected - worked around in hardware
112  *	19AP.	not affected - worked around in BIOS
113  *
114  *	If this sounds worrying believe me these bugs are either ___RARE___,
115  *	or are signal timing bugs worked around in hardware and there's
116  *	about nothing of note with C stepping upwards.
117  */
118 
119 static atomic_t stopping_cpu = ATOMIC_INIT(-1);
120 static bool smp_no_nmi_ipi = false;
121 
122 static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
123 {
124 	/* We are registered on stopping cpu too, avoid spurious NMI */
125 	if (raw_smp_processor_id() == atomic_read(&stopping_cpu))
126 		return NMI_HANDLED;
127 
128 	x86_virt_emergency_disable_virtualization_cpu();
129 	stop_this_cpu(NULL);
130 
131 	return NMI_HANDLED;
132 }
133 
134 /*
135  * this function calls the 'stop' function on all other CPUs in the system.
136  */
137 DEFINE_IDTENTRY_SYSVEC(sysvec_reboot)
138 {
139 	apic_eoi();
140 	x86_virt_emergency_disable_virtualization_cpu();
141 	stop_this_cpu(NULL);
142 }
143 
144 static int register_stop_handler(void)
145 {
146 	return register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
147 				    NMI_FLAG_FIRST, "smp_stop");
148 }
149 
150 static void native_stop_other_cpus(int wait)
151 {
152 	unsigned int old_cpu, this_cpu;
153 	unsigned long flags, timeout;
154 
155 	if (reboot_force)
156 		return;
157 
158 	/* Only proceed if this is the first CPU to reach this code */
159 	old_cpu = -1;
160 	this_cpu = smp_processor_id();
161 	if (!atomic_try_cmpxchg(&stopping_cpu, &old_cpu, this_cpu))
162 		return;
163 
164 	/* For kexec, ensure that offline CPUs are out of MWAIT and in HLT */
165 	if (kexec_in_progress)
166 		smp_kick_mwait_play_dead();
167 
168 	/*
169 	 * 1) Send an IPI on the reboot vector to all other CPUs.
170 	 *
171 	 *    The other CPUs should react on it after leaving critical
172 	 *    sections and re-enabling interrupts. They might still hold
173 	 *    locks, but there is nothing which can be done about that.
174 	 *
175 	 * 2) Wait for all other CPUs to report that they reached the
176 	 *    HLT loop in stop_this_cpu()
177 	 *
178 	 * 3) If #2 timed out send an NMI to the CPUs which did not
179 	 *    yet report
180 	 *
181 	 * 4) Wait for all other CPUs to report that they reached the
182 	 *    HLT loop in stop_this_cpu()
183 	 *
184 	 * #3 can obviously race against a CPU reaching the HLT loop late.
185 	 * That CPU will have reported already and the "have all CPUs
186 	 * reached HLT" condition will be true despite the fact that the
187 	 * other CPU is still handling the NMI. Again, there is no
188 	 * protection against that as "disabled" APICs still respond to
189 	 * NMIs.
190 	 */
191 	cpumask_copy(&cpus_stop_mask, cpu_online_mask);
192 	cpumask_clear_cpu(this_cpu, &cpus_stop_mask);
193 
194 	if (!cpumask_empty(&cpus_stop_mask)) {
195 		apic_send_IPI_allbutself(REBOOT_VECTOR);
196 
197 		/*
198 		 * Don't wait longer than a second for IPI completion. The
199 		 * wait request is not checked here because that would
200 		 * prevent an NMI shutdown attempt in case that not all
201 		 * CPUs reach shutdown state.
202 		 */
203 		timeout = USEC_PER_SEC;
204 		while (!cpumask_empty(&cpus_stop_mask) && timeout--)
205 			udelay(1);
206 	}
207 
208 	/* if the REBOOT_VECTOR didn't work, try with the NMI */
209 	if (!cpumask_empty(&cpus_stop_mask)) {
210 		/*
211 		 * If NMI IPI is enabled, try to register the stop handler
212 		 * and send the IPI. In any case try to wait for the other
213 		 * CPUs to stop.
214 		 */
215 		if (!smp_no_nmi_ipi && !register_stop_handler()) {
216 			unsigned int cpu;
217 
218 			pr_emerg("Shutting down cpus with NMI\n");
219 
220 			for_each_cpu(cpu, &cpus_stop_mask)
221 				__apic_send_IPI(cpu, NMI_VECTOR);
222 		}
223 		/*
224 		 * Don't wait longer than 10 ms if the caller didn't
225 		 * request it. If wait is true, the machine hangs here if
226 		 * one or more CPUs do not reach shutdown state.
227 		 */
228 		timeout = USEC_PER_MSEC * 10;
229 		while (!cpumask_empty(&cpus_stop_mask) && (wait || timeout--))
230 			udelay(1);
231 	}
232 
233 	local_irq_save(flags);
234 	disable_local_APIC();
235 	mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
236 	local_irq_restore(flags);
237 
238 	/*
239 	 * Ensure that the cpus_stop_mask cache lines are invalidated on
240 	 * the other CPUs. See comment vs. SME in stop_this_cpu().
241 	 */
242 	cpumask_clear(&cpus_stop_mask);
243 }
244 
245 /*
246  * Reschedule call back. KVM uses this interrupt to force a cpu out of
247  * guest mode.
248  */
249 DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_reschedule_ipi)
250 {
251 	apic_eoi();
252 	trace_reschedule_entry(RESCHEDULE_VECTOR);
253 	inc_irq_stat(irq_resched_count);
254 	scheduler_ipi();
255 	trace_reschedule_exit(RESCHEDULE_VECTOR);
256 }
257 
258 DEFINE_IDTENTRY_SYSVEC(sysvec_call_function)
259 {
260 	apic_eoi();
261 	trace_call_function_entry(CALL_FUNCTION_VECTOR);
262 	inc_irq_stat(irq_call_count);
263 	generic_smp_call_function_interrupt();
264 	trace_call_function_exit(CALL_FUNCTION_VECTOR);
265 }
266 
267 DEFINE_IDTENTRY_SYSVEC(sysvec_call_function_single)
268 {
269 	apic_eoi();
270 	trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR);
271 	inc_irq_stat(irq_call_count);
272 	generic_smp_call_function_single_interrupt();
273 	trace_call_function_single_exit(CALL_FUNCTION_SINGLE_VECTOR);
274 }
275 
276 static int __init nonmi_ipi_setup(char *str)
277 {
278 	smp_no_nmi_ipi = true;
279 	return 1;
280 }
281 
282 __setup("nonmi_ipi", nonmi_ipi_setup);
283 
284 struct smp_ops smp_ops = {
285 	.smp_prepare_boot_cpu	= native_smp_prepare_boot_cpu,
286 	.smp_prepare_cpus	= native_smp_prepare_cpus,
287 	.smp_cpus_done		= native_smp_cpus_done,
288 
289 	.stop_other_cpus	= native_stop_other_cpus,
290 #if defined(CONFIG_CRASH_DUMP)
291 	.crash_stop_other_cpus	= kdump_nmi_shootdown_cpus,
292 #endif
293 	.smp_send_reschedule	= native_smp_send_reschedule,
294 
295 	.kick_ap_alive		= native_kick_ap,
296 	.cpu_disable		= native_cpu_disable,
297 	.play_dead		= native_play_dead,
298 
299 	.send_call_func_ipi	= native_send_call_func_ipi,
300 	.send_call_func_single_ipi = native_send_call_func_single_ipi,
301 };
302 EXPORT_SYMBOL_GPL(smp_ops);
303 
304 int arch_cpu_rescan_dead_smt_siblings(void)
305 {
306 	enum cpuhp_smt_control old = cpu_smt_control;
307 	int ret;
308 
309 	/*
310 	 * If SMT has been disabled and SMT siblings are in HLT, bring them back
311 	 * online and offline them again so that they end up in MWAIT proper.
312 	 *
313 	 * Called with hotplug enabled.
314 	 */
315 	if (old != CPU_SMT_DISABLED && old != CPU_SMT_FORCE_DISABLED)
316 		return 0;
317 
318 	ret = cpuhp_smt_enable();
319 	if (ret)
320 		return ret;
321 
322 	ret = cpuhp_smt_disable(old);
323 
324 	return ret;
325 }
326 EXPORT_SYMBOL_GPL(arch_cpu_rescan_dead_smt_siblings);
327