xref: /linux/arch/x86/kernel/smpboot.c (revision 9f3926e08c26607a0dd5b1bc8a8aa1d03f72fcdc)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3  *	x86 SMP booting functions
4  *
5  *	(c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
6  *	(c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
7  *	Copyright 2001 Andi Kleen, SuSE Labs.
8  *
9  *	Much of the core SMP work is based on previous work by Thomas Radke, to
10  *	whom a great many thanks are extended.
11  *
12  *	Thanks to Intel for making available several different Pentium,
13  *	Pentium Pro and Pentium-II/Xeon MP machines.
14  *	Original development of Linux SMP code supported by Caldera.
15  *
16  *	Fixes
17  *		Felix Koop	:	NR_CPUS used properly
18  *		Jose Renau	:	Handle single CPU case.
19  *		Alan Cox	:	By repeated request 8) - Total BogoMIPS report.
20  *		Greg Wright	:	Fix for kernel stacks panic.
21  *		Erich Boleyn	:	MP v1.4 and additional changes.
22  *	Matthias Sattler	:	Changes for 2.1 kernel map.
23  *	Michel Lespinasse	:	Changes for 2.1 kernel map.
24  *	Michael Chastain	:	Change trampoline.S to gnu as.
25  *		Alan Cox	:	Dumb bug: 'B' step PPro's are fine
26  *		Ingo Molnar	:	Added APIC timers, based on code
27  *					from Jose Renau
28  *		Ingo Molnar	:	various cleanups and rewrites
29  *		Tigran Aivazian	:	fixed "0.00 in /proc/uptime on SMP" bug.
30  *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs
31  *	Andi Kleen		:	Changed for SMP boot into long mode.
32  *		Martin J. Bligh	: 	Added support for multi-quad systems
33  *		Dave Jones	:	Report invalid combinations of Athlon CPUs.
34  *		Rusty Russell	:	Hacked into shape for new "hotplug" boot process.
35  *      Andi Kleen              :       Converted to new state machine.
36  *	Ashok Raj		: 	CPU hotplug support
37  *	Glauber Costa		:	i386 and x86_64 integration
38  */
39 
40 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
41 
42 #include <linux/init.h>
43 #include <linux/smp.h>
44 #include <linux/export.h>
45 #include <linux/sched.h>
46 #include <linux/sched/topology.h>
47 #include <linux/sched/hotplug.h>
48 #include <linux/sched/task_stack.h>
49 #include <linux/percpu.h>
50 #include <linux/memblock.h>
51 #include <linux/err.h>
52 #include <linux/nmi.h>
53 #include <linux/tboot.h>
54 #include <linux/stackprotector.h>
55 #include <linux/gfp.h>
56 #include <linux/cpuidle.h>
57 #include <linux/numa.h>
58 
59 #include <asm/acpi.h>
60 #include <asm/desc.h>
61 #include <asm/nmi.h>
62 #include <asm/irq.h>
63 #include <asm/realmode.h>
64 #include <asm/cpu.h>
65 #include <asm/numa.h>
66 #include <asm/pgtable.h>
67 #include <asm/tlbflush.h>
68 #include <asm/mtrr.h>
69 #include <asm/mwait.h>
70 #include <asm/apic.h>
71 #include <asm/io_apic.h>
72 #include <asm/fpu/internal.h>
73 #include <asm/setup.h>
74 #include <asm/uv/uv.h>
75 #include <linux/mc146818rtc.h>
76 #include <asm/i8259.h>
77 #include <asm/misc.h>
78 #include <asm/qspinlock.h>
79 #include <asm/intel-family.h>
80 #include <asm/cpu_device_id.h>
81 #include <asm/spec-ctrl.h>
82 #include <asm/hw_irq.h>
83 
84 /* representing HT siblings of each logical CPU */
85 DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
86 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
87 
88 /* representing HT and core siblings of each logical CPU */
89 DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
90 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
91 
92 /* representing HT, core, and die siblings of each logical CPU */
93 DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_die_map);
94 EXPORT_PER_CPU_SYMBOL(cpu_die_map);
95 
96 DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
97 
98 /* Per CPU bogomips and other parameters */
99 DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
100 EXPORT_PER_CPU_SYMBOL(cpu_info);
101 
102 /* Logical package management. We might want to allocate that dynamically */
103 unsigned int __max_logical_packages __read_mostly;
104 EXPORT_SYMBOL(__max_logical_packages);
105 static unsigned int logical_packages __read_mostly;
106 static unsigned int logical_die __read_mostly;
107 
108 /* Maximum number of SMT threads on any online core */
109 int __read_mostly __max_smt_threads = 1;
110 
111 /* Flag to indicate if a complete sched domain rebuild is required */
112 bool x86_topology_update;
113 
114 int arch_update_cpu_topology(void)
115 {
116 	int retval = x86_topology_update;
117 
118 	x86_topology_update = false;
119 	return retval;
120 }
121 
122 static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
123 {
124 	unsigned long flags;
125 
126 	spin_lock_irqsave(&rtc_lock, flags);
127 	CMOS_WRITE(0xa, 0xf);
128 	spin_unlock_irqrestore(&rtc_lock, flags);
129 	*((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
130 							start_eip >> 4;
131 	*((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
132 							start_eip & 0xf;
133 }
134 
135 static inline void smpboot_restore_warm_reset_vector(void)
136 {
137 	unsigned long flags;
138 
139 	/*
140 	 * Paranoid:  Set warm reset code and vector here back
141 	 * to default values.
142 	 */
143 	spin_lock_irqsave(&rtc_lock, flags);
144 	CMOS_WRITE(0, 0xf);
145 	spin_unlock_irqrestore(&rtc_lock, flags);
146 
147 	*((volatile u32 *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0;
148 }
149 
150 /*
151  * Report back to the Boot Processor during boot time or to the caller processor
152  * during CPU online.
153  */
154 static void smp_callin(void)
155 {
156 	int cpuid;
157 
158 	/*
159 	 * If waken up by an INIT in an 82489DX configuration
160 	 * cpu_callout_mask guarantees we don't get here before
161 	 * an INIT_deassert IPI reaches our local APIC, so it is
162 	 * now safe to touch our local APIC.
163 	 */
164 	cpuid = smp_processor_id();
165 
166 	/*
167 	 * the boot CPU has finished the init stage and is spinning
168 	 * on callin_map until we finish. We are free to set up this
169 	 * CPU, first the APIC. (this is probably redundant on most
170 	 * boards)
171 	 */
172 	apic_ap_setup();
173 
174 	/*
175 	 * Save our processor parameters. Note: this information
176 	 * is needed for clock calibration.
177 	 */
178 	smp_store_cpu_info(cpuid);
179 
180 	/*
181 	 * The topology information must be up to date before
182 	 * calibrate_delay() and notify_cpu_starting().
183 	 */
184 	set_cpu_sibling_map(raw_smp_processor_id());
185 
186 	/*
187 	 * Get our bogomips.
188 	 * Update loops_per_jiffy in cpu_data. Previous call to
189 	 * smp_store_cpu_info() stored a value that is close but not as
190 	 * accurate as the value just calculated.
191 	 */
192 	calibrate_delay();
193 	cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy;
194 	pr_debug("Stack at about %p\n", &cpuid);
195 
196 	wmb();
197 
198 	notify_cpu_starting(cpuid);
199 
200 	/*
201 	 * Allow the master to continue.
202 	 */
203 	cpumask_set_cpu(cpuid, cpu_callin_mask);
204 }
205 
206 static int cpu0_logical_apicid;
207 static int enable_start_cpu0;
208 /*
209  * Activate a secondary processor.
210  */
211 static void notrace start_secondary(void *unused)
212 {
213 	/*
214 	 * Don't put *anything* except direct CPU state initialization
215 	 * before cpu_init(), SMP booting is too fragile that we want to
216 	 * limit the things done here to the most necessary things.
217 	 */
218 	if (boot_cpu_has(X86_FEATURE_PCID))
219 		__write_cr4(__read_cr4() | X86_CR4_PCIDE);
220 
221 #ifdef CONFIG_X86_32
222 	/* switch away from the initial page table */
223 	load_cr3(swapper_pg_dir);
224 	/*
225 	 * Initialize the CR4 shadow before doing anything that could
226 	 * try to read it.
227 	 */
228 	cr4_init_shadow();
229 	__flush_tlb_all();
230 #endif
231 	load_current_idt();
232 	cpu_init();
233 	x86_cpuinit.early_percpu_clock_init();
234 	preempt_disable();
235 	smp_callin();
236 
237 	enable_start_cpu0 = 0;
238 
239 	/* otherwise gcc will move up smp_processor_id before the cpu_init */
240 	barrier();
241 	/*
242 	 * Check TSC synchronization with the boot CPU:
243 	 */
244 	check_tsc_sync_target();
245 
246 	speculative_store_bypass_ht_init();
247 
248 	/*
249 	 * Lock vector_lock, set CPU online and bring the vector
250 	 * allocator online. Online must be set with vector_lock held
251 	 * to prevent a concurrent irq setup/teardown from seeing a
252 	 * half valid vector space.
253 	 */
254 	lock_vector_lock();
255 	set_cpu_online(smp_processor_id(), true);
256 	lapic_online();
257 	unlock_vector_lock();
258 	cpu_set_state_online(smp_processor_id());
259 	x86_platform.nmi_init();
260 
261 	/* enable local interrupts */
262 	local_irq_enable();
263 
264 	/* to prevent fake stack check failure in clock setup */
265 	boot_init_stack_canary();
266 
267 	x86_cpuinit.setup_percpu_clockev();
268 
269 	wmb();
270 	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
271 }
272 
273 /**
274  * topology_is_primary_thread - Check whether CPU is the primary SMT thread
275  * @cpu:	CPU to check
276  */
277 bool topology_is_primary_thread(unsigned int cpu)
278 {
279 	return apic_id_is_primary_thread(per_cpu(x86_cpu_to_apicid, cpu));
280 }
281 
282 /**
283  * topology_smt_supported - Check whether SMT is supported by the CPUs
284  */
285 bool topology_smt_supported(void)
286 {
287 	return smp_num_siblings > 1;
288 }
289 
290 /**
291  * topology_phys_to_logical_pkg - Map a physical package id to a logical
292  *
293  * Returns logical package id or -1 if not found
294  */
295 int topology_phys_to_logical_pkg(unsigned int phys_pkg)
296 {
297 	int cpu;
298 
299 	for_each_possible_cpu(cpu) {
300 		struct cpuinfo_x86 *c = &cpu_data(cpu);
301 
302 		if (c->initialized && c->phys_proc_id == phys_pkg)
303 			return c->logical_proc_id;
304 	}
305 	return -1;
306 }
307 EXPORT_SYMBOL(topology_phys_to_logical_pkg);
308 /**
309  * topology_phys_to_logical_die - Map a physical die id to logical
310  *
311  * Returns logical die id or -1 if not found
312  */
313 int topology_phys_to_logical_die(unsigned int die_id, unsigned int cur_cpu)
314 {
315 	int cpu;
316 	int proc_id = cpu_data(cur_cpu).phys_proc_id;
317 
318 	for_each_possible_cpu(cpu) {
319 		struct cpuinfo_x86 *c = &cpu_data(cpu);
320 
321 		if (c->initialized && c->cpu_die_id == die_id &&
322 		    c->phys_proc_id == proc_id)
323 			return c->logical_die_id;
324 	}
325 	return -1;
326 }
327 EXPORT_SYMBOL(topology_phys_to_logical_die);
328 
329 /**
330  * topology_update_package_map - Update the physical to logical package map
331  * @pkg:	The physical package id as retrieved via CPUID
332  * @cpu:	The cpu for which this is updated
333  */
334 int topology_update_package_map(unsigned int pkg, unsigned int cpu)
335 {
336 	int new;
337 
338 	/* Already available somewhere? */
339 	new = topology_phys_to_logical_pkg(pkg);
340 	if (new >= 0)
341 		goto found;
342 
343 	new = logical_packages++;
344 	if (new != pkg) {
345 		pr_info("CPU %u Converting physical %u to logical package %u\n",
346 			cpu, pkg, new);
347 	}
348 found:
349 	cpu_data(cpu).logical_proc_id = new;
350 	return 0;
351 }
352 /**
353  * topology_update_die_map - Update the physical to logical die map
354  * @die:	The die id as retrieved via CPUID
355  * @cpu:	The cpu for which this is updated
356  */
357 int topology_update_die_map(unsigned int die, unsigned int cpu)
358 {
359 	int new;
360 
361 	/* Already available somewhere? */
362 	new = topology_phys_to_logical_die(die, cpu);
363 	if (new >= 0)
364 		goto found;
365 
366 	new = logical_die++;
367 	if (new != die) {
368 		pr_info("CPU %u Converting physical %u to logical die %u\n",
369 			cpu, die, new);
370 	}
371 found:
372 	cpu_data(cpu).logical_die_id = new;
373 	return 0;
374 }
375 
376 void __init smp_store_boot_cpu_info(void)
377 {
378 	int id = 0; /* CPU 0 */
379 	struct cpuinfo_x86 *c = &cpu_data(id);
380 
381 	*c = boot_cpu_data;
382 	c->cpu_index = id;
383 	topology_update_package_map(c->phys_proc_id, id);
384 	topology_update_die_map(c->cpu_die_id, id);
385 	c->initialized = true;
386 }
387 
388 /*
389  * The bootstrap kernel entry code has set these up. Save them for
390  * a given CPU
391  */
392 void smp_store_cpu_info(int id)
393 {
394 	struct cpuinfo_x86 *c = &cpu_data(id);
395 
396 	/* Copy boot_cpu_data only on the first bringup */
397 	if (!c->initialized)
398 		*c = boot_cpu_data;
399 	c->cpu_index = id;
400 	/*
401 	 * During boot time, CPU0 has this setup already. Save the info when
402 	 * bringing up AP or offlined CPU0.
403 	 */
404 	identify_secondary_cpu(c);
405 	c->initialized = true;
406 }
407 
408 static bool
409 topology_same_node(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
410 {
411 	int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
412 
413 	return (cpu_to_node(cpu1) == cpu_to_node(cpu2));
414 }
415 
416 static bool
417 topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name)
418 {
419 	int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
420 
421 	return !WARN_ONCE(!topology_same_node(c, o),
422 		"sched: CPU #%d's %s-sibling CPU #%d is not on the same node! "
423 		"[node: %d != %d]. Ignoring dependency.\n",
424 		cpu1, name, cpu2, cpu_to_node(cpu1), cpu_to_node(cpu2));
425 }
426 
427 #define link_mask(mfunc, c1, c2)					\
428 do {									\
429 	cpumask_set_cpu((c1), mfunc(c2));				\
430 	cpumask_set_cpu((c2), mfunc(c1));				\
431 } while (0)
432 
433 static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
434 {
435 	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
436 		int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
437 
438 		if (c->phys_proc_id == o->phys_proc_id &&
439 		    c->cpu_die_id == o->cpu_die_id &&
440 		    per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) {
441 			if (c->cpu_core_id == o->cpu_core_id)
442 				return topology_sane(c, o, "smt");
443 
444 			if ((c->cu_id != 0xff) &&
445 			    (o->cu_id != 0xff) &&
446 			    (c->cu_id == o->cu_id))
447 				return topology_sane(c, o, "smt");
448 		}
449 
450 	} else if (c->phys_proc_id == o->phys_proc_id &&
451 		   c->cpu_die_id == o->cpu_die_id &&
452 		   c->cpu_core_id == o->cpu_core_id) {
453 		return topology_sane(c, o, "smt");
454 	}
455 
456 	return false;
457 }
458 
459 /*
460  * Define snc_cpu[] for SNC (Sub-NUMA Cluster) CPUs.
461  *
462  * These are Intel CPUs that enumerate an LLC that is shared by
463  * multiple NUMA nodes. The LLC on these systems is shared for
464  * off-package data access but private to the NUMA node (half
465  * of the package) for on-package access.
466  *
467  * CPUID (the source of the information about the LLC) can only
468  * enumerate the cache as being shared *or* unshared, but not
469  * this particular configuration. The CPU in this case enumerates
470  * the cache to be shared across the entire package (spanning both
471  * NUMA nodes).
472  */
473 
474 static const struct x86_cpu_id snc_cpu[] = {
475 	{ X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_X },
476 	{}
477 };
478 
479 static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
480 {
481 	int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
482 
483 	/* Do not match if we do not have a valid APICID for cpu: */
484 	if (per_cpu(cpu_llc_id, cpu1) == BAD_APICID)
485 		return false;
486 
487 	/* Do not match if LLC id does not match: */
488 	if (per_cpu(cpu_llc_id, cpu1) != per_cpu(cpu_llc_id, cpu2))
489 		return false;
490 
491 	/*
492 	 * Allow the SNC topology without warning. Return of false
493 	 * means 'c' does not share the LLC of 'o'. This will be
494 	 * reflected to userspace.
495 	 */
496 	if (!topology_same_node(c, o) && x86_match_cpu(snc_cpu))
497 		return false;
498 
499 	return topology_sane(c, o, "llc");
500 }
501 
502 /*
503  * Unlike the other levels, we do not enforce keeping a
504  * multicore group inside a NUMA node.  If this happens, we will
505  * discard the MC level of the topology later.
506  */
507 static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
508 {
509 	if (c->phys_proc_id == o->phys_proc_id)
510 		return true;
511 	return false;
512 }
513 
514 static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
515 {
516 	if ((c->phys_proc_id == o->phys_proc_id) &&
517 		(c->cpu_die_id == o->cpu_die_id))
518 		return true;
519 	return false;
520 }
521 
522 
523 #if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC)
524 static inline int x86_sched_itmt_flags(void)
525 {
526 	return sysctl_sched_itmt_enabled ? SD_ASYM_PACKING : 0;
527 }
528 
529 #ifdef CONFIG_SCHED_MC
530 static int x86_core_flags(void)
531 {
532 	return cpu_core_flags() | x86_sched_itmt_flags();
533 }
534 #endif
535 #ifdef CONFIG_SCHED_SMT
536 static int x86_smt_flags(void)
537 {
538 	return cpu_smt_flags() | x86_sched_itmt_flags();
539 }
540 #endif
541 #endif
542 
543 static struct sched_domain_topology_level x86_numa_in_package_topology[] = {
544 #ifdef CONFIG_SCHED_SMT
545 	{ cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) },
546 #endif
547 #ifdef CONFIG_SCHED_MC
548 	{ cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC) },
549 #endif
550 	{ NULL, },
551 };
552 
553 static struct sched_domain_topology_level x86_topology[] = {
554 #ifdef CONFIG_SCHED_SMT
555 	{ cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) },
556 #endif
557 #ifdef CONFIG_SCHED_MC
558 	{ cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC) },
559 #endif
560 	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
561 	{ NULL, },
562 };
563 
564 /*
565  * Set if a package/die has multiple NUMA nodes inside.
566  * AMD Magny-Cours, Intel Cluster-on-Die, and Intel
567  * Sub-NUMA Clustering have this.
568  */
569 static bool x86_has_numa_in_package;
570 
571 void set_cpu_sibling_map(int cpu)
572 {
573 	bool has_smt = smp_num_siblings > 1;
574 	bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1;
575 	struct cpuinfo_x86 *c = &cpu_data(cpu);
576 	struct cpuinfo_x86 *o;
577 	int i, threads;
578 
579 	cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
580 
581 	if (!has_mp) {
582 		cpumask_set_cpu(cpu, topology_sibling_cpumask(cpu));
583 		cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
584 		cpumask_set_cpu(cpu, topology_core_cpumask(cpu));
585 		cpumask_set_cpu(cpu, topology_die_cpumask(cpu));
586 		c->booted_cores = 1;
587 		return;
588 	}
589 
590 	for_each_cpu(i, cpu_sibling_setup_mask) {
591 		o = &cpu_data(i);
592 
593 		if ((i == cpu) || (has_smt && match_smt(c, o)))
594 			link_mask(topology_sibling_cpumask, cpu, i);
595 
596 		if ((i == cpu) || (has_mp && match_llc(c, o)))
597 			link_mask(cpu_llc_shared_mask, cpu, i);
598 
599 	}
600 
601 	/*
602 	 * This needs a separate iteration over the cpus because we rely on all
603 	 * topology_sibling_cpumask links to be set-up.
604 	 */
605 	for_each_cpu(i, cpu_sibling_setup_mask) {
606 		o = &cpu_data(i);
607 
608 		if ((i == cpu) || (has_mp && match_pkg(c, o))) {
609 			link_mask(topology_core_cpumask, cpu, i);
610 
611 			/*
612 			 *  Does this new cpu bringup a new core?
613 			 */
614 			if (cpumask_weight(
615 			    topology_sibling_cpumask(cpu)) == 1) {
616 				/*
617 				 * for each core in package, increment
618 				 * the booted_cores for this new cpu
619 				 */
620 				if (cpumask_first(
621 				    topology_sibling_cpumask(i)) == i)
622 					c->booted_cores++;
623 				/*
624 				 * increment the core count for all
625 				 * the other cpus in this package
626 				 */
627 				if (i != cpu)
628 					cpu_data(i).booted_cores++;
629 			} else if (i != cpu && !c->booted_cores)
630 				c->booted_cores = cpu_data(i).booted_cores;
631 		}
632 		if (match_pkg(c, o) && !topology_same_node(c, o))
633 			x86_has_numa_in_package = true;
634 
635 		if ((i == cpu) || (has_mp && match_die(c, o)))
636 			link_mask(topology_die_cpumask, cpu, i);
637 	}
638 
639 	threads = cpumask_weight(topology_sibling_cpumask(cpu));
640 	if (threads > __max_smt_threads)
641 		__max_smt_threads = threads;
642 }
643 
644 /* maps the cpu to the sched domain representing multi-core */
645 const struct cpumask *cpu_coregroup_mask(int cpu)
646 {
647 	return cpu_llc_shared_mask(cpu);
648 }
649 
650 static void impress_friends(void)
651 {
652 	int cpu;
653 	unsigned long bogosum = 0;
654 	/*
655 	 * Allow the user to impress friends.
656 	 */
657 	pr_debug("Before bogomips\n");
658 	for_each_possible_cpu(cpu)
659 		if (cpumask_test_cpu(cpu, cpu_callout_mask))
660 			bogosum += cpu_data(cpu).loops_per_jiffy;
661 	pr_info("Total of %d processors activated (%lu.%02lu BogoMIPS)\n",
662 		num_online_cpus(),
663 		bogosum/(500000/HZ),
664 		(bogosum/(5000/HZ))%100);
665 
666 	pr_debug("Before bogocount - setting activated=1\n");
667 }
668 
669 void __inquire_remote_apic(int apicid)
670 {
671 	unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
672 	const char * const names[] = { "ID", "VERSION", "SPIV" };
673 	int timeout;
674 	u32 status;
675 
676 	pr_info("Inquiring remote APIC 0x%x...\n", apicid);
677 
678 	for (i = 0; i < ARRAY_SIZE(regs); i++) {
679 		pr_info("... APIC 0x%x %s: ", apicid, names[i]);
680 
681 		/*
682 		 * Wait for idle.
683 		 */
684 		status = safe_apic_wait_icr_idle();
685 		if (status)
686 			pr_cont("a previous APIC delivery may have failed\n");
687 
688 		apic_icr_write(APIC_DM_REMRD | regs[i], apicid);
689 
690 		timeout = 0;
691 		do {
692 			udelay(100);
693 			status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
694 		} while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
695 
696 		switch (status) {
697 		case APIC_ICR_RR_VALID:
698 			status = apic_read(APIC_RRR);
699 			pr_cont("%08x\n", status);
700 			break;
701 		default:
702 			pr_cont("failed\n");
703 		}
704 	}
705 }
706 
707 /*
708  * The Multiprocessor Specification 1.4 (1997) example code suggests
709  * that there should be a 10ms delay between the BSP asserting INIT
710  * and de-asserting INIT, when starting a remote processor.
711  * But that slows boot and resume on modern processors, which include
712  * many cores and don't require that delay.
713  *
714  * Cmdline "init_cpu_udelay=" is available to over-ride this delay.
715  * Modern processor families are quirked to remove the delay entirely.
716  */
717 #define UDELAY_10MS_DEFAULT 10000
718 
719 static unsigned int init_udelay = UINT_MAX;
720 
721 static int __init cpu_init_udelay(char *str)
722 {
723 	get_option(&str, &init_udelay);
724 
725 	return 0;
726 }
727 early_param("cpu_init_udelay", cpu_init_udelay);
728 
729 static void __init smp_quirk_init_udelay(void)
730 {
731 	/* if cmdline changed it from default, leave it alone */
732 	if (init_udelay != UINT_MAX)
733 		return;
734 
735 	/* if modern processor, use no delay */
736 	if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
737 	    ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && (boot_cpu_data.x86 >= 0x18)) ||
738 	    ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) {
739 		init_udelay = 0;
740 		return;
741 	}
742 	/* else, use legacy delay */
743 	init_udelay = UDELAY_10MS_DEFAULT;
744 }
745 
746 /*
747  * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
748  * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
749  * won't ... remember to clear down the APIC, etc later.
750  */
751 int
752 wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip)
753 {
754 	unsigned long send_status, accept_status = 0;
755 	int maxlvt;
756 
757 	/* Target chip */
758 	/* Boot on the stack */
759 	/* Kick the second */
760 	apic_icr_write(APIC_DM_NMI | apic->dest_logical, apicid);
761 
762 	pr_debug("Waiting for send to finish...\n");
763 	send_status = safe_apic_wait_icr_idle();
764 
765 	/*
766 	 * Give the other CPU some time to accept the IPI.
767 	 */
768 	udelay(200);
769 	if (APIC_INTEGRATED(boot_cpu_apic_version)) {
770 		maxlvt = lapic_get_maxlvt();
771 		if (maxlvt > 3)			/* Due to the Pentium erratum 3AP.  */
772 			apic_write(APIC_ESR, 0);
773 		accept_status = (apic_read(APIC_ESR) & 0xEF);
774 	}
775 	pr_debug("NMI sent\n");
776 
777 	if (send_status)
778 		pr_err("APIC never delivered???\n");
779 	if (accept_status)
780 		pr_err("APIC delivery error (%lx)\n", accept_status);
781 
782 	return (send_status | accept_status);
783 }
784 
785 static int
786 wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
787 {
788 	unsigned long send_status = 0, accept_status = 0;
789 	int maxlvt, num_starts, j;
790 
791 	maxlvt = lapic_get_maxlvt();
792 
793 	/*
794 	 * Be paranoid about clearing APIC errors.
795 	 */
796 	if (APIC_INTEGRATED(boot_cpu_apic_version)) {
797 		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
798 			apic_write(APIC_ESR, 0);
799 		apic_read(APIC_ESR);
800 	}
801 
802 	pr_debug("Asserting INIT\n");
803 
804 	/*
805 	 * Turn INIT on target chip
806 	 */
807 	/*
808 	 * Send IPI
809 	 */
810 	apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT,
811 		       phys_apicid);
812 
813 	pr_debug("Waiting for send to finish...\n");
814 	send_status = safe_apic_wait_icr_idle();
815 
816 	udelay(init_udelay);
817 
818 	pr_debug("Deasserting INIT\n");
819 
820 	/* Target chip */
821 	/* Send IPI */
822 	apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid);
823 
824 	pr_debug("Waiting for send to finish...\n");
825 	send_status = safe_apic_wait_icr_idle();
826 
827 	mb();
828 
829 	/*
830 	 * Should we send STARTUP IPIs ?
831 	 *
832 	 * Determine this based on the APIC version.
833 	 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
834 	 */
835 	if (APIC_INTEGRATED(boot_cpu_apic_version))
836 		num_starts = 2;
837 	else
838 		num_starts = 0;
839 
840 	/*
841 	 * Run STARTUP IPI loop.
842 	 */
843 	pr_debug("#startup loops: %d\n", num_starts);
844 
845 	for (j = 1; j <= num_starts; j++) {
846 		pr_debug("Sending STARTUP #%d\n", j);
847 		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
848 			apic_write(APIC_ESR, 0);
849 		apic_read(APIC_ESR);
850 		pr_debug("After apic_write\n");
851 
852 		/*
853 		 * STARTUP IPI
854 		 */
855 
856 		/* Target chip */
857 		/* Boot on the stack */
858 		/* Kick the second */
859 		apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12),
860 			       phys_apicid);
861 
862 		/*
863 		 * Give the other CPU some time to accept the IPI.
864 		 */
865 		if (init_udelay == 0)
866 			udelay(10);
867 		else
868 			udelay(300);
869 
870 		pr_debug("Startup point 1\n");
871 
872 		pr_debug("Waiting for send to finish...\n");
873 		send_status = safe_apic_wait_icr_idle();
874 
875 		/*
876 		 * Give the other CPU some time to accept the IPI.
877 		 */
878 		if (init_udelay == 0)
879 			udelay(10);
880 		else
881 			udelay(200);
882 
883 		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
884 			apic_write(APIC_ESR, 0);
885 		accept_status = (apic_read(APIC_ESR) & 0xEF);
886 		if (send_status || accept_status)
887 			break;
888 	}
889 	pr_debug("After Startup\n");
890 
891 	if (send_status)
892 		pr_err("APIC never delivered???\n");
893 	if (accept_status)
894 		pr_err("APIC delivery error (%lx)\n", accept_status);
895 
896 	return (send_status | accept_status);
897 }
898 
899 /* reduce the number of lines printed when booting a large cpu count system */
900 static void announce_cpu(int cpu, int apicid)
901 {
902 	static int current_node = NUMA_NO_NODE;
903 	int node = early_cpu_to_node(cpu);
904 	static int width, node_width;
905 
906 	if (!width)
907 		width = num_digits(num_possible_cpus()) + 1; /* + '#' sign */
908 
909 	if (!node_width)
910 		node_width = num_digits(num_possible_nodes()) + 1; /* + '#' */
911 
912 	if (cpu == 1)
913 		printk(KERN_INFO "x86: Booting SMP configuration:\n");
914 
915 	if (system_state < SYSTEM_RUNNING) {
916 		if (node != current_node) {
917 			if (current_node > (-1))
918 				pr_cont("\n");
919 			current_node = node;
920 
921 			printk(KERN_INFO ".... node %*s#%d, CPUs:  ",
922 			       node_width - num_digits(node), " ", node);
923 		}
924 
925 		/* Add padding for the BSP */
926 		if (cpu == 1)
927 			pr_cont("%*s", width + 1, " ");
928 
929 		pr_cont("%*s#%d", width - num_digits(cpu), " ", cpu);
930 
931 	} else
932 		pr_info("Booting Node %d Processor %d APIC 0x%x\n",
933 			node, cpu, apicid);
934 }
935 
936 static int wakeup_cpu0_nmi(unsigned int cmd, struct pt_regs *regs)
937 {
938 	int cpu;
939 
940 	cpu = smp_processor_id();
941 	if (cpu == 0 && !cpu_online(cpu) && enable_start_cpu0)
942 		return NMI_HANDLED;
943 
944 	return NMI_DONE;
945 }
946 
947 /*
948  * Wake up AP by INIT, INIT, STARTUP sequence.
949  *
950  * Instead of waiting for STARTUP after INITs, BSP will execute the BIOS
951  * boot-strap code which is not a desired behavior for waking up BSP. To
952  * void the boot-strap code, wake up CPU0 by NMI instead.
953  *
954  * This works to wake up soft offlined CPU0 only. If CPU0 is hard offlined
955  * (i.e. physically hot removed and then hot added), NMI won't wake it up.
956  * We'll change this code in the future to wake up hard offlined CPU0 if
957  * real platform and request are available.
958  */
959 static int
960 wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid,
961 	       int *cpu0_nmi_registered)
962 {
963 	int id;
964 	int boot_error;
965 
966 	preempt_disable();
967 
968 	/*
969 	 * Wake up AP by INIT, INIT, STARTUP sequence.
970 	 */
971 	if (cpu) {
972 		boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip);
973 		goto out;
974 	}
975 
976 	/*
977 	 * Wake up BSP by nmi.
978 	 *
979 	 * Register a NMI handler to help wake up CPU0.
980 	 */
981 	boot_error = register_nmi_handler(NMI_LOCAL,
982 					  wakeup_cpu0_nmi, 0, "wake_cpu0");
983 
984 	if (!boot_error) {
985 		enable_start_cpu0 = 1;
986 		*cpu0_nmi_registered = 1;
987 		if (apic->dest_logical == APIC_DEST_LOGICAL)
988 			id = cpu0_logical_apicid;
989 		else
990 			id = apicid;
991 		boot_error = wakeup_secondary_cpu_via_nmi(id, start_ip);
992 	}
993 
994 out:
995 	preempt_enable();
996 
997 	return boot_error;
998 }
999 
1000 int common_cpu_up(unsigned int cpu, struct task_struct *idle)
1001 {
1002 	int ret;
1003 
1004 	/* Just in case we booted with a single CPU. */
1005 	alternatives_enable_smp();
1006 
1007 	per_cpu(current_task, cpu) = idle;
1008 
1009 	/* Initialize the interrupt stack(s) */
1010 	ret = irq_init_percpu_irqstack(cpu);
1011 	if (ret)
1012 		return ret;
1013 
1014 #ifdef CONFIG_X86_32
1015 	/* Stack for startup_32 can be just as for start_secondary onwards */
1016 	per_cpu(cpu_current_top_of_stack, cpu) = task_top_of_stack(idle);
1017 #else
1018 	initial_gs = per_cpu_offset(cpu);
1019 #endif
1020 	return 0;
1021 }
1022 
1023 /*
1024  * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
1025  * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
1026  * Returns zero if CPU booted OK, else error code from
1027  * ->wakeup_secondary_cpu.
1028  */
1029 static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle,
1030 		       int *cpu0_nmi_registered)
1031 {
1032 	volatile u32 *trampoline_status =
1033 		(volatile u32 *) __va(real_mode_header->trampoline_status);
1034 	/* start_ip had better be page-aligned! */
1035 	unsigned long start_ip = real_mode_header->trampoline_start;
1036 
1037 	unsigned long boot_error = 0;
1038 	unsigned long timeout;
1039 
1040 	idle->thread.sp = (unsigned long)task_pt_regs(idle);
1041 	early_gdt_descr.address = (unsigned long)get_cpu_gdt_rw(cpu);
1042 	initial_code = (unsigned long)start_secondary;
1043 	initial_stack  = idle->thread.sp;
1044 
1045 	/* Enable the espfix hack for this CPU */
1046 	init_espfix_ap(cpu);
1047 
1048 	/* So we see what's up */
1049 	announce_cpu(cpu, apicid);
1050 
1051 	/*
1052 	 * This grunge runs the startup process for
1053 	 * the targeted processor.
1054 	 */
1055 
1056 	if (x86_platform.legacy.warm_reset) {
1057 
1058 		pr_debug("Setting warm reset code and vector.\n");
1059 
1060 		smpboot_setup_warm_reset_vector(start_ip);
1061 		/*
1062 		 * Be paranoid about clearing APIC errors.
1063 		*/
1064 		if (APIC_INTEGRATED(boot_cpu_apic_version)) {
1065 			apic_write(APIC_ESR, 0);
1066 			apic_read(APIC_ESR);
1067 		}
1068 	}
1069 
1070 	/*
1071 	 * AP might wait on cpu_callout_mask in cpu_init() with
1072 	 * cpu_initialized_mask set if previous attempt to online
1073 	 * it timed-out. Clear cpu_initialized_mask so that after
1074 	 * INIT/SIPI it could start with a clean state.
1075 	 */
1076 	cpumask_clear_cpu(cpu, cpu_initialized_mask);
1077 	smp_mb();
1078 
1079 	/*
1080 	 * Wake up a CPU in difference cases:
1081 	 * - Use the method in the APIC driver if it's defined
1082 	 * Otherwise,
1083 	 * - Use an INIT boot APIC message for APs or NMI for BSP.
1084 	 */
1085 	if (apic->wakeup_secondary_cpu)
1086 		boot_error = apic->wakeup_secondary_cpu(apicid, start_ip);
1087 	else
1088 		boot_error = wakeup_cpu_via_init_nmi(cpu, start_ip, apicid,
1089 						     cpu0_nmi_registered);
1090 
1091 	if (!boot_error) {
1092 		/*
1093 		 * Wait 10s total for first sign of life from AP
1094 		 */
1095 		boot_error = -1;
1096 		timeout = jiffies + 10*HZ;
1097 		while (time_before(jiffies, timeout)) {
1098 			if (cpumask_test_cpu(cpu, cpu_initialized_mask)) {
1099 				/*
1100 				 * Tell AP to proceed with initialization
1101 				 */
1102 				cpumask_set_cpu(cpu, cpu_callout_mask);
1103 				boot_error = 0;
1104 				break;
1105 			}
1106 			schedule();
1107 		}
1108 	}
1109 
1110 	if (!boot_error) {
1111 		/*
1112 		 * Wait till AP completes initial initialization
1113 		 */
1114 		while (!cpumask_test_cpu(cpu, cpu_callin_mask)) {
1115 			/*
1116 			 * Allow other tasks to run while we wait for the
1117 			 * AP to come online. This also gives a chance
1118 			 * for the MTRR work(triggered by the AP coming online)
1119 			 * to be completed in the stop machine context.
1120 			 */
1121 			schedule();
1122 		}
1123 	}
1124 
1125 	/* mark "stuck" area as not stuck */
1126 	*trampoline_status = 0;
1127 
1128 	if (x86_platform.legacy.warm_reset) {
1129 		/*
1130 		 * Cleanup possible dangling ends...
1131 		 */
1132 		smpboot_restore_warm_reset_vector();
1133 	}
1134 
1135 	return boot_error;
1136 }
1137 
1138 int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
1139 {
1140 	int apicid = apic->cpu_present_to_apicid(cpu);
1141 	int cpu0_nmi_registered = 0;
1142 	unsigned long flags;
1143 	int err, ret = 0;
1144 
1145 	lockdep_assert_irqs_enabled();
1146 
1147 	pr_debug("++++++++++++++++++++=_---CPU UP  %u\n", cpu);
1148 
1149 	if (apicid == BAD_APICID ||
1150 	    !physid_isset(apicid, phys_cpu_present_map) ||
1151 	    !apic->apic_id_valid(apicid)) {
1152 		pr_err("%s: bad cpu %d\n", __func__, cpu);
1153 		return -EINVAL;
1154 	}
1155 
1156 	/*
1157 	 * Already booted CPU?
1158 	 */
1159 	if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
1160 		pr_debug("do_boot_cpu %d Already started\n", cpu);
1161 		return -ENOSYS;
1162 	}
1163 
1164 	/*
1165 	 * Save current MTRR state in case it was changed since early boot
1166 	 * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
1167 	 */
1168 	mtrr_save_state();
1169 
1170 	/* x86 CPUs take themselves offline, so delayed offline is OK. */
1171 	err = cpu_check_up_prepare(cpu);
1172 	if (err && err != -EBUSY)
1173 		return err;
1174 
1175 	/* the FPU context is blank, nobody can own it */
1176 	per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
1177 
1178 	err = common_cpu_up(cpu, tidle);
1179 	if (err)
1180 		return err;
1181 
1182 	err = do_boot_cpu(apicid, cpu, tidle, &cpu0_nmi_registered);
1183 	if (err) {
1184 		pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
1185 		ret = -EIO;
1186 		goto unreg_nmi;
1187 	}
1188 
1189 	/*
1190 	 * Check TSC synchronization with the AP (keep irqs disabled
1191 	 * while doing so):
1192 	 */
1193 	local_irq_save(flags);
1194 	check_tsc_sync_source(cpu);
1195 	local_irq_restore(flags);
1196 
1197 	while (!cpu_online(cpu)) {
1198 		cpu_relax();
1199 		touch_nmi_watchdog();
1200 	}
1201 
1202 unreg_nmi:
1203 	/*
1204 	 * Clean up the nmi handler. Do this after the callin and callout sync
1205 	 * to avoid impact of possible long unregister time.
1206 	 */
1207 	if (cpu0_nmi_registered)
1208 		unregister_nmi_handler(NMI_LOCAL, "wake_cpu0");
1209 
1210 	return ret;
1211 }
1212 
1213 /**
1214  * arch_disable_smp_support() - disables SMP support for x86 at runtime
1215  */
1216 void arch_disable_smp_support(void)
1217 {
1218 	disable_ioapic_support();
1219 }
1220 
1221 /*
1222  * Fall back to non SMP mode after errors.
1223  *
1224  * RED-PEN audit/test this more. I bet there is more state messed up here.
1225  */
1226 static __init void disable_smp(void)
1227 {
1228 	pr_info("SMP disabled\n");
1229 
1230 	disable_ioapic_support();
1231 
1232 	init_cpu_present(cpumask_of(0));
1233 	init_cpu_possible(cpumask_of(0));
1234 
1235 	if (smp_found_config)
1236 		physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
1237 	else
1238 		physid_set_mask_of_physid(0, &phys_cpu_present_map);
1239 	cpumask_set_cpu(0, topology_sibling_cpumask(0));
1240 	cpumask_set_cpu(0, topology_core_cpumask(0));
1241 	cpumask_set_cpu(0, topology_die_cpumask(0));
1242 }
1243 
1244 /*
1245  * Various sanity checks.
1246  */
1247 static void __init smp_sanity_check(void)
1248 {
1249 	preempt_disable();
1250 
1251 #if !defined(CONFIG_X86_BIGSMP) && defined(CONFIG_X86_32)
1252 	if (def_to_bigsmp && nr_cpu_ids > 8) {
1253 		unsigned int cpu;
1254 		unsigned nr;
1255 
1256 		pr_warn("More than 8 CPUs detected - skipping them\n"
1257 			"Use CONFIG_X86_BIGSMP\n");
1258 
1259 		nr = 0;
1260 		for_each_present_cpu(cpu) {
1261 			if (nr >= 8)
1262 				set_cpu_present(cpu, false);
1263 			nr++;
1264 		}
1265 
1266 		nr = 0;
1267 		for_each_possible_cpu(cpu) {
1268 			if (nr >= 8)
1269 				set_cpu_possible(cpu, false);
1270 			nr++;
1271 		}
1272 
1273 		nr_cpu_ids = 8;
1274 	}
1275 #endif
1276 
1277 	if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
1278 		pr_warn("weird, boot CPU (#%d) not listed by the BIOS\n",
1279 			hard_smp_processor_id());
1280 
1281 		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1282 	}
1283 
1284 	/*
1285 	 * Should not be necessary because the MP table should list the boot
1286 	 * CPU too, but we do it for the sake of robustness anyway.
1287 	 */
1288 	if (!apic->check_phys_apicid_present(boot_cpu_physical_apicid)) {
1289 		pr_notice("weird, boot CPU (#%d) not listed by the BIOS\n",
1290 			  boot_cpu_physical_apicid);
1291 		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1292 	}
1293 	preempt_enable();
1294 }
1295 
1296 static void __init smp_cpu_index_default(void)
1297 {
1298 	int i;
1299 	struct cpuinfo_x86 *c;
1300 
1301 	for_each_possible_cpu(i) {
1302 		c = &cpu_data(i);
1303 		/* mark all to hotplug */
1304 		c->cpu_index = nr_cpu_ids;
1305 	}
1306 }
1307 
1308 static void __init smp_get_logical_apicid(void)
1309 {
1310 	if (x2apic_mode)
1311 		cpu0_logical_apicid = apic_read(APIC_LDR);
1312 	else
1313 		cpu0_logical_apicid = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
1314 }
1315 
1316 /*
1317  * Prepare for SMP bootup.
1318  * @max_cpus: configured maximum number of CPUs, It is a legacy parameter
1319  *            for common interface support.
1320  */
1321 void __init native_smp_prepare_cpus(unsigned int max_cpus)
1322 {
1323 	unsigned int i;
1324 
1325 	smp_cpu_index_default();
1326 
1327 	/*
1328 	 * Setup boot CPU information
1329 	 */
1330 	smp_store_boot_cpu_info(); /* Final full version of the data */
1331 	cpumask_copy(cpu_callin_mask, cpumask_of(0));
1332 	mb();
1333 
1334 	for_each_possible_cpu(i) {
1335 		zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
1336 		zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
1337 		zalloc_cpumask_var(&per_cpu(cpu_die_map, i), GFP_KERNEL);
1338 		zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
1339 	}
1340 
1341 	/*
1342 	 * Set 'default' x86 topology, this matches default_topology() in that
1343 	 * it has NUMA nodes as a topology level. See also
1344 	 * native_smp_cpus_done().
1345 	 *
1346 	 * Must be done before set_cpus_sibling_map() is ran.
1347 	 */
1348 	set_sched_topology(x86_topology);
1349 
1350 	set_cpu_sibling_map(0);
1351 
1352 	smp_sanity_check();
1353 
1354 	switch (apic_intr_mode) {
1355 	case APIC_PIC:
1356 	case APIC_VIRTUAL_WIRE_NO_CONFIG:
1357 		disable_smp();
1358 		return;
1359 	case APIC_SYMMETRIC_IO_NO_ROUTING:
1360 		disable_smp();
1361 		/* Setup local timer */
1362 		x86_init.timers.setup_percpu_clockev();
1363 		return;
1364 	case APIC_VIRTUAL_WIRE:
1365 	case APIC_SYMMETRIC_IO:
1366 		break;
1367 	}
1368 
1369 	/* Setup local timer */
1370 	x86_init.timers.setup_percpu_clockev();
1371 
1372 	smp_get_logical_apicid();
1373 
1374 	pr_info("CPU0: ");
1375 	print_cpu_info(&cpu_data(0));
1376 
1377 	native_pv_lock_init();
1378 
1379 	uv_system_init();
1380 
1381 	set_mtrr_aps_delayed_init();
1382 
1383 	smp_quirk_init_udelay();
1384 
1385 	speculative_store_bypass_ht_init();
1386 }
1387 
1388 void arch_enable_nonboot_cpus_begin(void)
1389 {
1390 	set_mtrr_aps_delayed_init();
1391 }
1392 
1393 void arch_enable_nonboot_cpus_end(void)
1394 {
1395 	mtrr_aps_init();
1396 }
1397 
1398 /*
1399  * Early setup to make printk work.
1400  */
1401 void __init native_smp_prepare_boot_cpu(void)
1402 {
1403 	int me = smp_processor_id();
1404 	switch_to_new_gdt(me);
1405 	/* already set me in cpu_online_mask in boot_cpu_init() */
1406 	cpumask_set_cpu(me, cpu_callout_mask);
1407 	cpu_set_state_online(me);
1408 }
1409 
1410 void __init calculate_max_logical_packages(void)
1411 {
1412 	int ncpus;
1413 
1414 	/*
1415 	 * Today neither Intel nor AMD support heterogenous systems so
1416 	 * extrapolate the boot cpu's data to all packages.
1417 	 */
1418 	ncpus = cpu_data(0).booted_cores * topology_max_smt_threads();
1419 	__max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
1420 	pr_info("Max logical packages: %u\n", __max_logical_packages);
1421 }
1422 
1423 void __init native_smp_cpus_done(unsigned int max_cpus)
1424 {
1425 	pr_debug("Boot done\n");
1426 
1427 	calculate_max_logical_packages();
1428 
1429 	if (x86_has_numa_in_package)
1430 		set_sched_topology(x86_numa_in_package_topology);
1431 
1432 	nmi_selftest();
1433 	impress_friends();
1434 	mtrr_aps_init();
1435 }
1436 
1437 static int __initdata setup_possible_cpus = -1;
1438 static int __init _setup_possible_cpus(char *str)
1439 {
1440 	get_option(&str, &setup_possible_cpus);
1441 	return 0;
1442 }
1443 early_param("possible_cpus", _setup_possible_cpus);
1444 
1445 
1446 /*
1447  * cpu_possible_mask should be static, it cannot change as cpu's
1448  * are onlined, or offlined. The reason is per-cpu data-structures
1449  * are allocated by some modules at init time, and dont expect to
1450  * do this dynamically on cpu arrival/departure.
1451  * cpu_present_mask on the other hand can change dynamically.
1452  * In case when cpu_hotplug is not compiled, then we resort to current
1453  * behaviour, which is cpu_possible == cpu_present.
1454  * - Ashok Raj
1455  *
1456  * Three ways to find out the number of additional hotplug CPUs:
1457  * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
1458  * - The user can overwrite it with possible_cpus=NUM
1459  * - Otherwise don't reserve additional CPUs.
1460  * We do this because additional CPUs waste a lot of memory.
1461  * -AK
1462  */
1463 __init void prefill_possible_map(void)
1464 {
1465 	int i, possible;
1466 
1467 	/* No boot processor was found in mptable or ACPI MADT */
1468 	if (!num_processors) {
1469 		if (boot_cpu_has(X86_FEATURE_APIC)) {
1470 			int apicid = boot_cpu_physical_apicid;
1471 			int cpu = hard_smp_processor_id();
1472 
1473 			pr_warn("Boot CPU (id %d) not listed by BIOS\n", cpu);
1474 
1475 			/* Make sure boot cpu is enumerated */
1476 			if (apic->cpu_present_to_apicid(0) == BAD_APICID &&
1477 			    apic->apic_id_valid(apicid))
1478 				generic_processor_info(apicid, boot_cpu_apic_version);
1479 		}
1480 
1481 		if (!num_processors)
1482 			num_processors = 1;
1483 	}
1484 
1485 	i = setup_max_cpus ?: 1;
1486 	if (setup_possible_cpus == -1) {
1487 		possible = num_processors;
1488 #ifdef CONFIG_HOTPLUG_CPU
1489 		if (setup_max_cpus)
1490 			possible += disabled_cpus;
1491 #else
1492 		if (possible > i)
1493 			possible = i;
1494 #endif
1495 	} else
1496 		possible = setup_possible_cpus;
1497 
1498 	total_cpus = max_t(int, possible, num_processors + disabled_cpus);
1499 
1500 	/* nr_cpu_ids could be reduced via nr_cpus= */
1501 	if (possible > nr_cpu_ids) {
1502 		pr_warn("%d Processors exceeds NR_CPUS limit of %u\n",
1503 			possible, nr_cpu_ids);
1504 		possible = nr_cpu_ids;
1505 	}
1506 
1507 #ifdef CONFIG_HOTPLUG_CPU
1508 	if (!setup_max_cpus)
1509 #endif
1510 	if (possible > i) {
1511 		pr_warn("%d Processors exceeds max_cpus limit of %u\n",
1512 			possible, setup_max_cpus);
1513 		possible = i;
1514 	}
1515 
1516 	nr_cpu_ids = possible;
1517 
1518 	pr_info("Allowing %d CPUs, %d hotplug CPUs\n",
1519 		possible, max_t(int, possible - num_processors, 0));
1520 
1521 	reset_cpu_possible_mask();
1522 
1523 	for (i = 0; i < possible; i++)
1524 		set_cpu_possible(i, true);
1525 }
1526 
1527 #ifdef CONFIG_HOTPLUG_CPU
1528 
1529 /* Recompute SMT state for all CPUs on offline */
1530 static void recompute_smt_state(void)
1531 {
1532 	int max_threads, cpu;
1533 
1534 	max_threads = 0;
1535 	for_each_online_cpu (cpu) {
1536 		int threads = cpumask_weight(topology_sibling_cpumask(cpu));
1537 
1538 		if (threads > max_threads)
1539 			max_threads = threads;
1540 	}
1541 	__max_smt_threads = max_threads;
1542 }
1543 
1544 static void remove_siblinginfo(int cpu)
1545 {
1546 	int sibling;
1547 	struct cpuinfo_x86 *c = &cpu_data(cpu);
1548 
1549 	for_each_cpu(sibling, topology_core_cpumask(cpu)) {
1550 		cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
1551 		/*/
1552 		 * last thread sibling in this cpu core going down
1553 		 */
1554 		if (cpumask_weight(topology_sibling_cpumask(cpu)) == 1)
1555 			cpu_data(sibling).booted_cores--;
1556 	}
1557 
1558 	for_each_cpu(sibling, topology_die_cpumask(cpu))
1559 		cpumask_clear_cpu(cpu, topology_die_cpumask(sibling));
1560 	for_each_cpu(sibling, topology_sibling_cpumask(cpu))
1561 		cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
1562 	for_each_cpu(sibling, cpu_llc_shared_mask(cpu))
1563 		cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling));
1564 	cpumask_clear(cpu_llc_shared_mask(cpu));
1565 	cpumask_clear(topology_sibling_cpumask(cpu));
1566 	cpumask_clear(topology_core_cpumask(cpu));
1567 	cpumask_clear(topology_die_cpumask(cpu));
1568 	c->cpu_core_id = 0;
1569 	c->booted_cores = 0;
1570 	cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
1571 	recompute_smt_state();
1572 }
1573 
1574 static void remove_cpu_from_maps(int cpu)
1575 {
1576 	set_cpu_online(cpu, false);
1577 	cpumask_clear_cpu(cpu, cpu_callout_mask);
1578 	cpumask_clear_cpu(cpu, cpu_callin_mask);
1579 	/* was set by cpu_init() */
1580 	cpumask_clear_cpu(cpu, cpu_initialized_mask);
1581 	numa_remove_cpu(cpu);
1582 }
1583 
1584 void cpu_disable_common(void)
1585 {
1586 	int cpu = smp_processor_id();
1587 
1588 	remove_siblinginfo(cpu);
1589 
1590 	/* It's now safe to remove this processor from the online map */
1591 	lock_vector_lock();
1592 	remove_cpu_from_maps(cpu);
1593 	unlock_vector_lock();
1594 	fixup_irqs();
1595 	lapic_offline();
1596 }
1597 
1598 int native_cpu_disable(void)
1599 {
1600 	int ret;
1601 
1602 	ret = lapic_can_unplug_cpu();
1603 	if (ret)
1604 		return ret;
1605 
1606 	clear_local_APIC();
1607 	cpu_disable_common();
1608 
1609 	return 0;
1610 }
1611 
1612 int common_cpu_die(unsigned int cpu)
1613 {
1614 	int ret = 0;
1615 
1616 	/* We don't do anything here: idle task is faking death itself. */
1617 
1618 	/* They ack this in play_dead() by setting CPU_DEAD */
1619 	if (cpu_wait_death(cpu, 5)) {
1620 		if (system_state == SYSTEM_RUNNING)
1621 			pr_info("CPU %u is now offline\n", cpu);
1622 	} else {
1623 		pr_err("CPU %u didn't die...\n", cpu);
1624 		ret = -1;
1625 	}
1626 
1627 	return ret;
1628 }
1629 
1630 void native_cpu_die(unsigned int cpu)
1631 {
1632 	common_cpu_die(cpu);
1633 }
1634 
1635 void play_dead_common(void)
1636 {
1637 	idle_task_exit();
1638 
1639 	/* Ack it */
1640 	(void)cpu_report_death();
1641 
1642 	/*
1643 	 * With physical CPU hotplug, we should halt the cpu
1644 	 */
1645 	local_irq_disable();
1646 }
1647 
1648 static bool wakeup_cpu0(void)
1649 {
1650 	if (smp_processor_id() == 0 && enable_start_cpu0)
1651 		return true;
1652 
1653 	return false;
1654 }
1655 
1656 /*
1657  * We need to flush the caches before going to sleep, lest we have
1658  * dirty data in our caches when we come back up.
1659  */
1660 static inline void mwait_play_dead(void)
1661 {
1662 	unsigned int eax, ebx, ecx, edx;
1663 	unsigned int highest_cstate = 0;
1664 	unsigned int highest_subcstate = 0;
1665 	void *mwait_ptr;
1666 	int i;
1667 
1668 	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
1669 	    boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
1670 		return;
1671 	if (!this_cpu_has(X86_FEATURE_MWAIT))
1672 		return;
1673 	if (!this_cpu_has(X86_FEATURE_CLFLUSH))
1674 		return;
1675 	if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF)
1676 		return;
1677 
1678 	eax = CPUID_MWAIT_LEAF;
1679 	ecx = 0;
1680 	native_cpuid(&eax, &ebx, &ecx, &edx);
1681 
1682 	/*
1683 	 * eax will be 0 if EDX enumeration is not valid.
1684 	 * Initialized below to cstate, sub_cstate value when EDX is valid.
1685 	 */
1686 	if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) {
1687 		eax = 0;
1688 	} else {
1689 		edx >>= MWAIT_SUBSTATE_SIZE;
1690 		for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
1691 			if (edx & MWAIT_SUBSTATE_MASK) {
1692 				highest_cstate = i;
1693 				highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
1694 			}
1695 		}
1696 		eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
1697 			(highest_subcstate - 1);
1698 	}
1699 
1700 	/*
1701 	 * This should be a memory location in a cache line which is
1702 	 * unlikely to be touched by other processors.  The actual
1703 	 * content is immaterial as it is not actually modified in any way.
1704 	 */
1705 	mwait_ptr = &current_thread_info()->flags;
1706 
1707 	wbinvd();
1708 
1709 	while (1) {
1710 		/*
1711 		 * The CLFLUSH is a workaround for erratum AAI65 for
1712 		 * the Xeon 7400 series.  It's not clear it is actually
1713 		 * needed, but it should be harmless in either case.
1714 		 * The WBINVD is insufficient due to the spurious-wakeup
1715 		 * case where we return around the loop.
1716 		 */
1717 		mb();
1718 		clflush(mwait_ptr);
1719 		mb();
1720 		__monitor(mwait_ptr, 0, 0);
1721 		mb();
1722 		__mwait(eax, 0);
1723 		/*
1724 		 * If NMI wants to wake up CPU0, start CPU0.
1725 		 */
1726 		if (wakeup_cpu0())
1727 			start_cpu0();
1728 	}
1729 }
1730 
1731 void hlt_play_dead(void)
1732 {
1733 	if (__this_cpu_read(cpu_info.x86) >= 4)
1734 		wbinvd();
1735 
1736 	while (1) {
1737 		native_halt();
1738 		/*
1739 		 * If NMI wants to wake up CPU0, start CPU0.
1740 		 */
1741 		if (wakeup_cpu0())
1742 			start_cpu0();
1743 	}
1744 }
1745 
1746 void native_play_dead(void)
1747 {
1748 	play_dead_common();
1749 	tboot_shutdown(TB_SHUTDOWN_WFS);
1750 
1751 	mwait_play_dead();	/* Only returns on failure */
1752 	if (cpuidle_play_dead())
1753 		hlt_play_dead();
1754 }
1755 
1756 #else /* ... !CONFIG_HOTPLUG_CPU */
1757 int native_cpu_disable(void)
1758 {
1759 	return -ENOSYS;
1760 }
1761 
1762 void native_cpu_die(unsigned int cpu)
1763 {
1764 	/* We said "no" in __cpu_disable */
1765 	BUG();
1766 }
1767 
1768 void native_play_dead(void)
1769 {
1770 	BUG();
1771 }
1772 
1773 #endif
1774