xref: /linux/arch/x86/kernel/smpboot.c (revision d8ce7263e1bc3b6b2b906fec0c5037bc27d21d6a)
1 /*
2  *	x86 SMP booting functions
3  *
4  *	(c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
5  *	(c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
6  *	Copyright 2001 Andi Kleen, SuSE Labs.
7  *
8  *	Much of the core SMP work is based on previous work by Thomas Radke, to
9  *	whom a great many thanks are extended.
10  *
11  *	Thanks to Intel for making available several different Pentium,
12  *	Pentium Pro and Pentium-II/Xeon MP machines.
13  *	Original development of Linux SMP code supported by Caldera.
14  *
15  *	This code is released under the GNU General Public License version 2 or
16  *	later.
17  *
18  *	Fixes
19  *		Felix Koop	:	NR_CPUS used properly
20  *		Jose Renau	:	Handle single CPU case.
21  *		Alan Cox	:	By repeated request 8) - Total BogoMIPS report.
22  *		Greg Wright	:	Fix for kernel stacks panic.
23  *		Erich Boleyn	:	MP v1.4 and additional changes.
24  *	Matthias Sattler	:	Changes for 2.1 kernel map.
25  *	Michel Lespinasse	:	Changes for 2.1 kernel map.
26  *	Michael Chastain	:	Change trampoline.S to gnu as.
27  *		Alan Cox	:	Dumb bug: 'B' step PPro's are fine
28  *		Ingo Molnar	:	Added APIC timers, based on code
29  *					from Jose Renau
30  *		Ingo Molnar	:	various cleanups and rewrites
31  *		Tigran Aivazian	:	fixed "0.00 in /proc/uptime on SMP" bug.
32  *	Maciej W. Rozycki	:	Bits for genuine 82489DX APICs
33  *	Andi Kleen		:	Changed for SMP boot into long mode.
34  *		Martin J. Bligh	: 	Added support for multi-quad systems
35  *		Dave Jones	:	Report invalid combinations of Athlon CPUs.
36  *		Rusty Russell	:	Hacked into shape for new "hotplug" boot process.
37  *      Andi Kleen              :       Converted to new state machine.
38  *	Ashok Raj		: 	CPU hotplug support
39  *	Glauber Costa		:	i386 and x86_64 integration
40  */
41 
42 #include <linux/init.h>
43 #include <linux/smp.h>
44 #include <linux/module.h>
45 #include <linux/sched.h>
46 #include <linux/percpu.h>
47 #include <linux/bootmem.h>
48 #include <linux/err.h>
49 #include <linux/nmi.h>
50 #include <linux/tboot.h>
51 #include <linux/stackprotector.h>
52 #include <linux/gfp.h>
53 #include <linux/cpuidle.h>
54 
55 #include <asm/acpi.h>
56 #include <asm/desc.h>
57 #include <asm/nmi.h>
58 #include <asm/irq.h>
59 #include <asm/idle.h>
60 #include <asm/realmode.h>
61 #include <asm/cpu.h>
62 #include <asm/numa.h>
63 #include <asm/pgtable.h>
64 #include <asm/tlbflush.h>
65 #include <asm/mtrr.h>
66 #include <asm/mwait.h>
67 #include <asm/apic.h>
68 #include <asm/io_apic.h>
69 #include <asm/setup.h>
70 #include <asm/uv/uv.h>
71 #include <linux/mc146818rtc.h>
72 
73 #include <asm/smpboot_hooks.h>
74 #include <asm/i8259.h>
75 
76 #include <asm/realmode.h>
77 
78 /* State of each CPU */
79 DEFINE_PER_CPU(int, cpu_state) = { 0 };
80 
81 #ifdef CONFIG_HOTPLUG_CPU
82 /*
83  * We need this for trampoline_base protection from concurrent accesses when
84  * off- and onlining cores wildly.
85  */
86 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
87 
88 void cpu_hotplug_driver_lock(void)
89 {
90 	mutex_lock(&x86_cpu_hotplug_driver_mutex);
91 }
92 
93 void cpu_hotplug_driver_unlock(void)
94 {
95 	mutex_unlock(&x86_cpu_hotplug_driver_mutex);
96 }
97 
98 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
99 ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; }
100 #endif
101 
102 /* Number of siblings per CPU package */
103 int smp_num_siblings = 1;
104 EXPORT_SYMBOL(smp_num_siblings);
105 
106 /* Last level cache ID of each logical CPU */
107 DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
108 
109 /* representing HT siblings of each logical CPU */
110 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
111 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
112 
113 /* representing HT and core siblings of each logical CPU */
114 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
115 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
116 
117 DEFINE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
118 
119 /* Per CPU bogomips and other parameters */
120 DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
121 EXPORT_PER_CPU_SYMBOL(cpu_info);
122 
123 atomic_t init_deasserted;
124 
125 /*
126  * Report back to the Boot Processor.
127  * Running on AP.
128  */
129 static void __cpuinit smp_callin(void)
130 {
131 	int cpuid, phys_id;
132 	unsigned long timeout;
133 
134 	/*
135 	 * If waken up by an INIT in an 82489DX configuration
136 	 * we may get here before an INIT-deassert IPI reaches
137 	 * our local APIC.  We have to wait for the IPI or we'll
138 	 * lock up on an APIC access.
139 	 */
140 	if (apic->wait_for_init_deassert)
141 		apic->wait_for_init_deassert(&init_deasserted);
142 
143 	/*
144 	 * (This works even if the APIC is not enabled.)
145 	 */
146 	phys_id = read_apic_id();
147 	cpuid = smp_processor_id();
148 	if (cpumask_test_cpu(cpuid, cpu_callin_mask)) {
149 		panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
150 					phys_id, cpuid);
151 	}
152 	pr_debug("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
153 
154 	/*
155 	 * STARTUP IPIs are fragile beasts as they might sometimes
156 	 * trigger some glue motherboard logic. Complete APIC bus
157 	 * silence for 1 second, this overestimates the time the
158 	 * boot CPU is spending to send the up to 2 STARTUP IPIs
159 	 * by a factor of two. This should be enough.
160 	 */
161 
162 	/*
163 	 * Waiting 2s total for startup (udelay is not yet working)
164 	 */
165 	timeout = jiffies + 2*HZ;
166 	while (time_before(jiffies, timeout)) {
167 		/*
168 		 * Has the boot CPU finished it's STARTUP sequence?
169 		 */
170 		if (cpumask_test_cpu(cpuid, cpu_callout_mask))
171 			break;
172 		cpu_relax();
173 	}
174 
175 	if (!time_before(jiffies, timeout)) {
176 		panic("%s: CPU%d started up but did not get a callout!\n",
177 		      __func__, cpuid);
178 	}
179 
180 	/*
181 	 * the boot CPU has finished the init stage and is spinning
182 	 * on callin_map until we finish. We are free to set up this
183 	 * CPU, first the APIC. (this is probably redundant on most
184 	 * boards)
185 	 */
186 
187 	pr_debug("CALLIN, before setup_local_APIC().\n");
188 	if (apic->smp_callin_clear_local_apic)
189 		apic->smp_callin_clear_local_apic();
190 	setup_local_APIC();
191 	end_local_APIC_setup();
192 
193 	/*
194 	 * Need to setup vector mappings before we enable interrupts.
195 	 */
196 	setup_vector_irq(smp_processor_id());
197 
198 	/*
199 	 * Save our processor parameters. Note: this information
200 	 * is needed for clock calibration.
201 	 */
202 	smp_store_cpu_info(cpuid);
203 
204 	/*
205 	 * Get our bogomips.
206 	 * Update loops_per_jiffy in cpu_data. Previous call to
207 	 * smp_store_cpu_info() stored a value that is close but not as
208 	 * accurate as the value just calculated.
209 	 */
210 	calibrate_delay();
211 	cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy;
212 	pr_debug("Stack at about %p\n", &cpuid);
213 
214 	/*
215 	 * This must be done before setting cpu_online_mask
216 	 * or calling notify_cpu_starting.
217 	 */
218 	set_cpu_sibling_map(raw_smp_processor_id());
219 	wmb();
220 
221 	notify_cpu_starting(cpuid);
222 
223 	/*
224 	 * Allow the master to continue.
225 	 */
226 	cpumask_set_cpu(cpuid, cpu_callin_mask);
227 }
228 
229 /*
230  * Activate a secondary processor.
231  */
232 notrace static void __cpuinit start_secondary(void *unused)
233 {
234 	/*
235 	 * Don't put *anything* before cpu_init(), SMP booting is too
236 	 * fragile that we want to limit the things done here to the
237 	 * most necessary things.
238 	 */
239 	cpu_init();
240 	x86_cpuinit.early_percpu_clock_init();
241 	preempt_disable();
242 	smp_callin();
243 
244 #ifdef CONFIG_X86_32
245 	/* switch away from the initial page table */
246 	load_cr3(swapper_pg_dir);
247 	__flush_tlb_all();
248 #endif
249 
250 	/* otherwise gcc will move up smp_processor_id before the cpu_init */
251 	barrier();
252 	/*
253 	 * Check TSC synchronization with the BP:
254 	 */
255 	check_tsc_sync_target();
256 
257 	/*
258 	 * We need to hold call_lock, so there is no inconsistency
259 	 * between the time smp_call_function() determines number of
260 	 * IPI recipients, and the time when the determination is made
261 	 * for which cpus receive the IPI. Holding this
262 	 * lock helps us to not include this cpu in a currently in progress
263 	 * smp_call_function().
264 	 *
265 	 * We need to hold vector_lock so there the set of online cpus
266 	 * does not change while we are assigning vectors to cpus.  Holding
267 	 * this lock ensures we don't half assign or remove an irq from a cpu.
268 	 */
269 	ipi_call_lock();
270 	lock_vector_lock();
271 	set_cpu_online(smp_processor_id(), true);
272 	unlock_vector_lock();
273 	ipi_call_unlock();
274 	per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
275 	x86_platform.nmi_init();
276 
277 	/* enable local interrupts */
278 	local_irq_enable();
279 
280 	/* to prevent fake stack check failure in clock setup */
281 	boot_init_stack_canary();
282 
283 	x86_cpuinit.setup_percpu_clockev();
284 
285 	wmb();
286 	cpu_idle();
287 }
288 
289 /*
290  * The bootstrap kernel entry code has set these up. Save them for
291  * a given CPU
292  */
293 
294 void __cpuinit smp_store_cpu_info(int id)
295 {
296 	struct cpuinfo_x86 *c = &cpu_data(id);
297 
298 	*c = boot_cpu_data;
299 	c->cpu_index = id;
300 	if (id != 0)
301 		identify_secondary_cpu(c);
302 }
303 
304 static bool __cpuinit
305 topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name)
306 {
307 	int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
308 
309 	return !WARN_ONCE(cpu_to_node(cpu1) != cpu_to_node(cpu2),
310 		"sched: CPU #%d's %s-sibling CPU #%d is not on the same node! "
311 		"[node: %d != %d]. Ignoring dependency.\n",
312 		cpu1, name, cpu2, cpu_to_node(cpu1), cpu_to_node(cpu2));
313 }
314 
315 #define link_mask(_m, c1, c2)						\
316 do {									\
317 	cpumask_set_cpu((c1), cpu_##_m##_mask(c2));			\
318 	cpumask_set_cpu((c2), cpu_##_m##_mask(c1));			\
319 } while (0)
320 
321 static bool __cpuinit match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
322 {
323 	if (cpu_has(c, X86_FEATURE_TOPOEXT)) {
324 		int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
325 
326 		if (c->phys_proc_id == o->phys_proc_id &&
327 		    per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) &&
328 		    c->compute_unit_id == o->compute_unit_id)
329 			return topology_sane(c, o, "smt");
330 
331 	} else if (c->phys_proc_id == o->phys_proc_id &&
332 		   c->cpu_core_id == o->cpu_core_id) {
333 		return topology_sane(c, o, "smt");
334 	}
335 
336 	return false;
337 }
338 
339 static bool __cpuinit match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
340 {
341 	int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
342 
343 	if (per_cpu(cpu_llc_id, cpu1) != BAD_APICID &&
344 	    per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2))
345 		return topology_sane(c, o, "llc");
346 
347 	return false;
348 }
349 
350 static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
351 {
352 	if (c->phys_proc_id == o->phys_proc_id)
353 		return topology_sane(c, o, "mc");
354 
355 	return false;
356 }
357 
358 void __cpuinit set_cpu_sibling_map(int cpu)
359 {
360 	bool has_mc = boot_cpu_data.x86_max_cores > 1;
361 	bool has_smt = smp_num_siblings > 1;
362 	struct cpuinfo_x86 *c = &cpu_data(cpu);
363 	struct cpuinfo_x86 *o;
364 	int i;
365 
366 	cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
367 
368 	if (!has_smt && !has_mc) {
369 		cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
370 		cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
371 		cpumask_set_cpu(cpu, cpu_core_mask(cpu));
372 		c->booted_cores = 1;
373 		return;
374 	}
375 
376 	for_each_cpu(i, cpu_sibling_setup_mask) {
377 		o = &cpu_data(i);
378 
379 		if ((i == cpu) || (has_smt && match_smt(c, o)))
380 			link_mask(sibling, cpu, i);
381 
382 		if ((i == cpu) || (has_mc && match_llc(c, o)))
383 			link_mask(llc_shared, cpu, i);
384 
385 		if ((i == cpu) || (has_mc && match_mc(c, o))) {
386 			link_mask(core, cpu, i);
387 
388 			/*
389 			 *  Does this new cpu bringup a new core?
390 			 */
391 			if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) {
392 				/*
393 				 * for each core in package, increment
394 				 * the booted_cores for this new cpu
395 				 */
396 				if (cpumask_first(cpu_sibling_mask(i)) == i)
397 					c->booted_cores++;
398 				/*
399 				 * increment the core count for all
400 				 * the other cpus in this package
401 				 */
402 				if (i != cpu)
403 					cpu_data(i).booted_cores++;
404 			} else if (i != cpu && !c->booted_cores)
405 				c->booted_cores = cpu_data(i).booted_cores;
406 		}
407 	}
408 }
409 
410 /* maps the cpu to the sched domain representing multi-core */
411 const struct cpumask *cpu_coregroup_mask(int cpu)
412 {
413 	struct cpuinfo_x86 *c = &cpu_data(cpu);
414 	/*
415 	 * For perf, we return last level cache shared map.
416 	 * And for power savings, we return cpu_core_map
417 	 */
418 	if (!(cpu_has(c, X86_FEATURE_AMD_DCM)))
419 		return cpu_core_mask(cpu);
420 	else
421 		return cpu_llc_shared_mask(cpu);
422 }
423 
424 static void impress_friends(void)
425 {
426 	int cpu;
427 	unsigned long bogosum = 0;
428 	/*
429 	 * Allow the user to impress friends.
430 	 */
431 	pr_debug("Before bogomips.\n");
432 	for_each_possible_cpu(cpu)
433 		if (cpumask_test_cpu(cpu, cpu_callout_mask))
434 			bogosum += cpu_data(cpu).loops_per_jiffy;
435 	printk(KERN_INFO
436 		"Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
437 		num_online_cpus(),
438 		bogosum/(500000/HZ),
439 		(bogosum/(5000/HZ))%100);
440 
441 	pr_debug("Before bogocount - setting activated=1.\n");
442 }
443 
444 void __inquire_remote_apic(int apicid)
445 {
446 	unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
447 	const char * const names[] = { "ID", "VERSION", "SPIV" };
448 	int timeout;
449 	u32 status;
450 
451 	printk(KERN_INFO "Inquiring remote APIC 0x%x...\n", apicid);
452 
453 	for (i = 0; i < ARRAY_SIZE(regs); i++) {
454 		printk(KERN_INFO "... APIC 0x%x %s: ", apicid, names[i]);
455 
456 		/*
457 		 * Wait for idle.
458 		 */
459 		status = safe_apic_wait_icr_idle();
460 		if (status)
461 			printk(KERN_CONT
462 			       "a previous APIC delivery may have failed\n");
463 
464 		apic_icr_write(APIC_DM_REMRD | regs[i], apicid);
465 
466 		timeout = 0;
467 		do {
468 			udelay(100);
469 			status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
470 		} while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
471 
472 		switch (status) {
473 		case APIC_ICR_RR_VALID:
474 			status = apic_read(APIC_RRR);
475 			printk(KERN_CONT "%08x\n", status);
476 			break;
477 		default:
478 			printk(KERN_CONT "failed\n");
479 		}
480 	}
481 }
482 
483 /*
484  * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
485  * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
486  * won't ... remember to clear down the APIC, etc later.
487  */
488 int __cpuinit
489 wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip)
490 {
491 	unsigned long send_status, accept_status = 0;
492 	int maxlvt;
493 
494 	/* Target chip */
495 	/* Boot on the stack */
496 	/* Kick the second */
497 	apic_icr_write(APIC_DM_NMI | apic->dest_logical, logical_apicid);
498 
499 	pr_debug("Waiting for send to finish...\n");
500 	send_status = safe_apic_wait_icr_idle();
501 
502 	/*
503 	 * Give the other CPU some time to accept the IPI.
504 	 */
505 	udelay(200);
506 	if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
507 		maxlvt = lapic_get_maxlvt();
508 		if (maxlvt > 3)			/* Due to the Pentium erratum 3AP.  */
509 			apic_write(APIC_ESR, 0);
510 		accept_status = (apic_read(APIC_ESR) & 0xEF);
511 	}
512 	pr_debug("NMI sent.\n");
513 
514 	if (send_status)
515 		printk(KERN_ERR "APIC never delivered???\n");
516 	if (accept_status)
517 		printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
518 
519 	return (send_status | accept_status);
520 }
521 
522 static int __cpuinit
523 wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
524 {
525 	unsigned long send_status, accept_status = 0;
526 	int maxlvt, num_starts, j;
527 
528 	maxlvt = lapic_get_maxlvt();
529 
530 	/*
531 	 * Be paranoid about clearing APIC errors.
532 	 */
533 	if (APIC_INTEGRATED(apic_version[phys_apicid])) {
534 		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
535 			apic_write(APIC_ESR, 0);
536 		apic_read(APIC_ESR);
537 	}
538 
539 	pr_debug("Asserting INIT.\n");
540 
541 	/*
542 	 * Turn INIT on target chip
543 	 */
544 	/*
545 	 * Send IPI
546 	 */
547 	apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT,
548 		       phys_apicid);
549 
550 	pr_debug("Waiting for send to finish...\n");
551 	send_status = safe_apic_wait_icr_idle();
552 
553 	mdelay(10);
554 
555 	pr_debug("Deasserting INIT.\n");
556 
557 	/* Target chip */
558 	/* Send IPI */
559 	apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid);
560 
561 	pr_debug("Waiting for send to finish...\n");
562 	send_status = safe_apic_wait_icr_idle();
563 
564 	mb();
565 	atomic_set(&init_deasserted, 1);
566 
567 	/*
568 	 * Should we send STARTUP IPIs ?
569 	 *
570 	 * Determine this based on the APIC version.
571 	 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
572 	 */
573 	if (APIC_INTEGRATED(apic_version[phys_apicid]))
574 		num_starts = 2;
575 	else
576 		num_starts = 0;
577 
578 	/*
579 	 * Paravirt / VMI wants a startup IPI hook here to set up the
580 	 * target processor state.
581 	 */
582 	startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
583 			 stack_start);
584 
585 	/*
586 	 * Run STARTUP IPI loop.
587 	 */
588 	pr_debug("#startup loops: %d.\n", num_starts);
589 
590 	for (j = 1; j <= num_starts; j++) {
591 		pr_debug("Sending STARTUP #%d.\n", j);
592 		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
593 			apic_write(APIC_ESR, 0);
594 		apic_read(APIC_ESR);
595 		pr_debug("After apic_write.\n");
596 
597 		/*
598 		 * STARTUP IPI
599 		 */
600 
601 		/* Target chip */
602 		/* Boot on the stack */
603 		/* Kick the second */
604 		apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12),
605 			       phys_apicid);
606 
607 		/*
608 		 * Give the other CPU some time to accept the IPI.
609 		 */
610 		udelay(300);
611 
612 		pr_debug("Startup point 1.\n");
613 
614 		pr_debug("Waiting for send to finish...\n");
615 		send_status = safe_apic_wait_icr_idle();
616 
617 		/*
618 		 * Give the other CPU some time to accept the IPI.
619 		 */
620 		udelay(200);
621 		if (maxlvt > 3)		/* Due to the Pentium erratum 3AP.  */
622 			apic_write(APIC_ESR, 0);
623 		accept_status = (apic_read(APIC_ESR) & 0xEF);
624 		if (send_status || accept_status)
625 			break;
626 	}
627 	pr_debug("After Startup.\n");
628 
629 	if (send_status)
630 		printk(KERN_ERR "APIC never delivered???\n");
631 	if (accept_status)
632 		printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
633 
634 	return (send_status | accept_status);
635 }
636 
637 /* reduce the number of lines printed when booting a large cpu count system */
638 static void __cpuinit announce_cpu(int cpu, int apicid)
639 {
640 	static int current_node = -1;
641 	int node = early_cpu_to_node(cpu);
642 
643 	if (system_state == SYSTEM_BOOTING) {
644 		if (node != current_node) {
645 			if (current_node > (-1))
646 				pr_cont(" Ok.\n");
647 			current_node = node;
648 			pr_info("Booting Node %3d, Processors ", node);
649 		}
650 		pr_cont(" #%d%s", cpu, cpu == (nr_cpu_ids - 1) ? " Ok.\n" : "");
651 		return;
652 	} else
653 		pr_info("Booting Node %d Processor %d APIC 0x%x\n",
654 			node, cpu, apicid);
655 }
656 
657 /*
658  * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
659  * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
660  * Returns zero if CPU booted OK, else error code from
661  * ->wakeup_secondary_cpu.
662  */
663 static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
664 {
665 	volatile u32 *trampoline_status =
666 		(volatile u32 *) __va(real_mode_header->trampoline_status);
667 	/* start_ip had better be page-aligned! */
668 	unsigned long start_ip = real_mode_header->trampoline_start;
669 
670 	unsigned long boot_error = 0;
671 	int timeout;
672 
673 	alternatives_smp_switch(1);
674 
675 	idle->thread.sp = (unsigned long) (((struct pt_regs *)
676 			  (THREAD_SIZE +  task_stack_page(idle))) - 1);
677 	per_cpu(current_task, cpu) = idle;
678 
679 #ifdef CONFIG_X86_32
680 	/* Stack for startup_32 can be just as for start_secondary onwards */
681 	irq_ctx_init(cpu);
682 #else
683 	clear_tsk_thread_flag(idle, TIF_FORK);
684 	initial_gs = per_cpu_offset(cpu);
685 	per_cpu(kernel_stack, cpu) =
686 		(unsigned long)task_stack_page(idle) -
687 		KERNEL_STACK_OFFSET + THREAD_SIZE;
688 #endif
689 	early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
690 	initial_code = (unsigned long)start_secondary;
691 	stack_start  = idle->thread.sp;
692 
693 	/* So we see what's up */
694 	announce_cpu(cpu, apicid);
695 
696 	/*
697 	 * This grunge runs the startup process for
698 	 * the targeted processor.
699 	 */
700 
701 	atomic_set(&init_deasserted, 0);
702 
703 	if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
704 
705 		pr_debug("Setting warm reset code and vector.\n");
706 
707 		smpboot_setup_warm_reset_vector(start_ip);
708 		/*
709 		 * Be paranoid about clearing APIC errors.
710 		*/
711 		if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
712 			apic_write(APIC_ESR, 0);
713 			apic_read(APIC_ESR);
714 		}
715 	}
716 
717 	/*
718 	 * Kick the secondary CPU. Use the method in the APIC driver
719 	 * if it's defined - or use an INIT boot APIC message otherwise:
720 	 */
721 	if (apic->wakeup_secondary_cpu)
722 		boot_error = apic->wakeup_secondary_cpu(apicid, start_ip);
723 	else
724 		boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip);
725 
726 	if (!boot_error) {
727 		/*
728 		 * allow APs to start initializing.
729 		 */
730 		pr_debug("Before Callout %d.\n", cpu);
731 		cpumask_set_cpu(cpu, cpu_callout_mask);
732 		pr_debug("After Callout %d.\n", cpu);
733 
734 		/*
735 		 * Wait 5s total for a response
736 		 */
737 		for (timeout = 0; timeout < 50000; timeout++) {
738 			if (cpumask_test_cpu(cpu, cpu_callin_mask))
739 				break;	/* It has booted */
740 			udelay(100);
741 			/*
742 			 * Allow other tasks to run while we wait for the
743 			 * AP to come online. This also gives a chance
744 			 * for the MTRR work(triggered by the AP coming online)
745 			 * to be completed in the stop machine context.
746 			 */
747 			schedule();
748 		}
749 
750 		if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
751 			print_cpu_msr(&cpu_data(cpu));
752 			pr_debug("CPU%d: has booted.\n", cpu);
753 		} else {
754 			boot_error = 1;
755 			if (*trampoline_status == 0xA5A5A5A5)
756 				/* trampoline started but...? */
757 				pr_err("CPU%d: Stuck ??\n", cpu);
758 			else
759 				/* trampoline code not run */
760 				pr_err("CPU%d: Not responding.\n", cpu);
761 			if (apic->inquire_remote_apic)
762 				apic->inquire_remote_apic(apicid);
763 		}
764 	}
765 
766 	if (boot_error) {
767 		/* Try to put things back the way they were before ... */
768 		numa_remove_cpu(cpu); /* was set by numa_add_cpu */
769 
770 		/* was set by do_boot_cpu() */
771 		cpumask_clear_cpu(cpu, cpu_callout_mask);
772 
773 		/* was set by cpu_init() */
774 		cpumask_clear_cpu(cpu, cpu_initialized_mask);
775 
776 		set_cpu_present(cpu, false);
777 		per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
778 	}
779 
780 	/* mark "stuck" area as not stuck */
781 	*trampoline_status = 0;
782 
783 	if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
784 		/*
785 		 * Cleanup possible dangling ends...
786 		 */
787 		smpboot_restore_warm_reset_vector();
788 	}
789 	return boot_error;
790 }
791 
792 int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
793 {
794 	int apicid = apic->cpu_present_to_apicid(cpu);
795 	unsigned long flags;
796 	int err;
797 
798 	WARN_ON(irqs_disabled());
799 
800 	pr_debug("++++++++++++++++++++=_---CPU UP  %u\n", cpu);
801 
802 	if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid ||
803 	    !physid_isset(apicid, phys_cpu_present_map) ||
804 	    !apic->apic_id_valid(apicid)) {
805 		printk(KERN_ERR "%s: bad cpu %d\n", __func__, cpu);
806 		return -EINVAL;
807 	}
808 
809 	/*
810 	 * Already booted CPU?
811 	 */
812 	if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
813 		pr_debug("do_boot_cpu %d Already started\n", cpu);
814 		return -ENOSYS;
815 	}
816 
817 	/*
818 	 * Save current MTRR state in case it was changed since early boot
819 	 * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
820 	 */
821 	mtrr_save_state();
822 
823 	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
824 
825 	err = do_boot_cpu(apicid, cpu, tidle);
826 	if (err) {
827 		pr_debug("do_boot_cpu failed %d\n", err);
828 		return -EIO;
829 	}
830 
831 	/*
832 	 * Check TSC synchronization with the AP (keep irqs disabled
833 	 * while doing so):
834 	 */
835 	local_irq_save(flags);
836 	check_tsc_sync_source(cpu);
837 	local_irq_restore(flags);
838 
839 	while (!cpu_online(cpu)) {
840 		cpu_relax();
841 		touch_nmi_watchdog();
842 	}
843 
844 	return 0;
845 }
846 
847 /**
848  * arch_disable_smp_support() - disables SMP support for x86 at runtime
849  */
850 void arch_disable_smp_support(void)
851 {
852 	disable_ioapic_support();
853 }
854 
855 /*
856  * Fall back to non SMP mode after errors.
857  *
858  * RED-PEN audit/test this more. I bet there is more state messed up here.
859  */
860 static __init void disable_smp(void)
861 {
862 	init_cpu_present(cpumask_of(0));
863 	init_cpu_possible(cpumask_of(0));
864 	smpboot_clear_io_apic_irqs();
865 
866 	if (smp_found_config)
867 		physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
868 	else
869 		physid_set_mask_of_physid(0, &phys_cpu_present_map);
870 	cpumask_set_cpu(0, cpu_sibling_mask(0));
871 	cpumask_set_cpu(0, cpu_core_mask(0));
872 }
873 
874 /*
875  * Various sanity checks.
876  */
877 static int __init smp_sanity_check(unsigned max_cpus)
878 {
879 	preempt_disable();
880 
881 #if !defined(CONFIG_X86_BIGSMP) && defined(CONFIG_X86_32)
882 	if (def_to_bigsmp && nr_cpu_ids > 8) {
883 		unsigned int cpu;
884 		unsigned nr;
885 
886 		printk(KERN_WARNING
887 		       "More than 8 CPUs detected - skipping them.\n"
888 		       "Use CONFIG_X86_BIGSMP.\n");
889 
890 		nr = 0;
891 		for_each_present_cpu(cpu) {
892 			if (nr >= 8)
893 				set_cpu_present(cpu, false);
894 			nr++;
895 		}
896 
897 		nr = 0;
898 		for_each_possible_cpu(cpu) {
899 			if (nr >= 8)
900 				set_cpu_possible(cpu, false);
901 			nr++;
902 		}
903 
904 		nr_cpu_ids = 8;
905 	}
906 #endif
907 
908 	if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
909 		printk(KERN_WARNING
910 			"weird, boot CPU (#%d) not listed by the BIOS.\n",
911 			hard_smp_processor_id());
912 
913 		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
914 	}
915 
916 	/*
917 	 * If we couldn't find an SMP configuration at boot time,
918 	 * get out of here now!
919 	 */
920 	if (!smp_found_config && !acpi_lapic) {
921 		preempt_enable();
922 		printk(KERN_NOTICE "SMP motherboard not detected.\n");
923 		disable_smp();
924 		if (APIC_init_uniprocessor())
925 			printk(KERN_NOTICE "Local APIC not detected."
926 					   " Using dummy APIC emulation.\n");
927 		return -1;
928 	}
929 
930 	/*
931 	 * Should not be necessary because the MP table should list the boot
932 	 * CPU too, but we do it for the sake of robustness anyway.
933 	 */
934 	if (!apic->check_phys_apicid_present(boot_cpu_physical_apicid)) {
935 		printk(KERN_NOTICE
936 			"weird, boot CPU (#%d) not listed by the BIOS.\n",
937 			boot_cpu_physical_apicid);
938 		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
939 	}
940 	preempt_enable();
941 
942 	/*
943 	 * If we couldn't find a local APIC, then get out of here now!
944 	 */
945 	if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) &&
946 	    !cpu_has_apic) {
947 		if (!disable_apic) {
948 			pr_err("BIOS bug, local APIC #%d not detected!...\n",
949 				boot_cpu_physical_apicid);
950 			pr_err("... forcing use of dummy APIC emulation."
951 				"(tell your hw vendor)\n");
952 		}
953 		smpboot_clear_io_apic();
954 		disable_ioapic_support();
955 		return -1;
956 	}
957 
958 	verify_local_APIC();
959 
960 	/*
961 	 * If SMP should be disabled, then really disable it!
962 	 */
963 	if (!max_cpus) {
964 		printk(KERN_INFO "SMP mode deactivated.\n");
965 		smpboot_clear_io_apic();
966 
967 		connect_bsp_APIC();
968 		setup_local_APIC();
969 		bsp_end_local_APIC_setup();
970 		return -1;
971 	}
972 
973 	return 0;
974 }
975 
976 static void __init smp_cpu_index_default(void)
977 {
978 	int i;
979 	struct cpuinfo_x86 *c;
980 
981 	for_each_possible_cpu(i) {
982 		c = &cpu_data(i);
983 		/* mark all to hotplug */
984 		c->cpu_index = nr_cpu_ids;
985 	}
986 }
987 
988 /*
989  * Prepare for SMP bootup.  The MP table or ACPI has been read
990  * earlier.  Just do some sanity checking here and enable APIC mode.
991  */
992 void __init native_smp_prepare_cpus(unsigned int max_cpus)
993 {
994 	unsigned int i;
995 
996 	preempt_disable();
997 	smp_cpu_index_default();
998 
999 	/*
1000 	 * Setup boot CPU information
1001 	 */
1002 	smp_store_cpu_info(0); /* Final full version of the data */
1003 	cpumask_copy(cpu_callin_mask, cpumask_of(0));
1004 	mb();
1005 
1006 	current_thread_info()->cpu = 0;  /* needed? */
1007 	for_each_possible_cpu(i) {
1008 		zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
1009 		zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
1010 		zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
1011 	}
1012 	set_cpu_sibling_map(0);
1013 
1014 
1015 	if (smp_sanity_check(max_cpus) < 0) {
1016 		printk(KERN_INFO "SMP disabled\n");
1017 		disable_smp();
1018 		goto out;
1019 	}
1020 
1021 	default_setup_apic_routing();
1022 
1023 	preempt_disable();
1024 	if (read_apic_id() != boot_cpu_physical_apicid) {
1025 		panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
1026 		     read_apic_id(), boot_cpu_physical_apicid);
1027 		/* Or can we switch back to PIC here? */
1028 	}
1029 	preempt_enable();
1030 
1031 	connect_bsp_APIC();
1032 
1033 	/*
1034 	 * Switch from PIC to APIC mode.
1035 	 */
1036 	setup_local_APIC();
1037 
1038 	/*
1039 	 * Enable IO APIC before setting up error vector
1040 	 */
1041 	if (!skip_ioapic_setup && nr_ioapics)
1042 		enable_IO_APIC();
1043 
1044 	bsp_end_local_APIC_setup();
1045 
1046 	if (apic->setup_portio_remap)
1047 		apic->setup_portio_remap();
1048 
1049 	smpboot_setup_io_apic();
1050 	/*
1051 	 * Set up local APIC timer on boot CPU.
1052 	 */
1053 
1054 	printk(KERN_INFO "CPU%d: ", 0);
1055 	print_cpu_info(&cpu_data(0));
1056 	x86_init.timers.setup_percpu_clockev();
1057 
1058 	if (is_uv_system())
1059 		uv_system_init();
1060 
1061 	set_mtrr_aps_delayed_init();
1062 out:
1063 	preempt_enable();
1064 }
1065 
1066 void arch_disable_nonboot_cpus_begin(void)
1067 {
1068 	/*
1069 	 * Avoid the smp alternatives switch during the disable_nonboot_cpus().
1070 	 * In the suspend path, we will be back in the SMP mode shortly anyways.
1071 	 */
1072 	skip_smp_alternatives = true;
1073 }
1074 
1075 void arch_disable_nonboot_cpus_end(void)
1076 {
1077 	skip_smp_alternatives = false;
1078 }
1079 
1080 void arch_enable_nonboot_cpus_begin(void)
1081 {
1082 	set_mtrr_aps_delayed_init();
1083 }
1084 
1085 void arch_enable_nonboot_cpus_end(void)
1086 {
1087 	mtrr_aps_init();
1088 }
1089 
1090 /*
1091  * Early setup to make printk work.
1092  */
1093 void __init native_smp_prepare_boot_cpu(void)
1094 {
1095 	int me = smp_processor_id();
1096 	switch_to_new_gdt(me);
1097 	/* already set me in cpu_online_mask in boot_cpu_init() */
1098 	cpumask_set_cpu(me, cpu_callout_mask);
1099 	per_cpu(cpu_state, me) = CPU_ONLINE;
1100 }
1101 
1102 void __init native_smp_cpus_done(unsigned int max_cpus)
1103 {
1104 	pr_debug("Boot done.\n");
1105 
1106 	nmi_selftest();
1107 	impress_friends();
1108 #ifdef CONFIG_X86_IO_APIC
1109 	setup_ioapic_dest();
1110 #endif
1111 	mtrr_aps_init();
1112 }
1113 
1114 static int __initdata setup_possible_cpus = -1;
1115 static int __init _setup_possible_cpus(char *str)
1116 {
1117 	get_option(&str, &setup_possible_cpus);
1118 	return 0;
1119 }
1120 early_param("possible_cpus", _setup_possible_cpus);
1121 
1122 
1123 /*
1124  * cpu_possible_mask should be static, it cannot change as cpu's
1125  * are onlined, or offlined. The reason is per-cpu data-structures
1126  * are allocated by some modules at init time, and dont expect to
1127  * do this dynamically on cpu arrival/departure.
1128  * cpu_present_mask on the other hand can change dynamically.
1129  * In case when cpu_hotplug is not compiled, then we resort to current
1130  * behaviour, which is cpu_possible == cpu_present.
1131  * - Ashok Raj
1132  *
1133  * Three ways to find out the number of additional hotplug CPUs:
1134  * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
1135  * - The user can overwrite it with possible_cpus=NUM
1136  * - Otherwise don't reserve additional CPUs.
1137  * We do this because additional CPUs waste a lot of memory.
1138  * -AK
1139  */
1140 __init void prefill_possible_map(void)
1141 {
1142 	int i, possible;
1143 
1144 	/* no processor from mptable or madt */
1145 	if (!num_processors)
1146 		num_processors = 1;
1147 
1148 	i = setup_max_cpus ?: 1;
1149 	if (setup_possible_cpus == -1) {
1150 		possible = num_processors;
1151 #ifdef CONFIG_HOTPLUG_CPU
1152 		if (setup_max_cpus)
1153 			possible += disabled_cpus;
1154 #else
1155 		if (possible > i)
1156 			possible = i;
1157 #endif
1158 	} else
1159 		possible = setup_possible_cpus;
1160 
1161 	total_cpus = max_t(int, possible, num_processors + disabled_cpus);
1162 
1163 	/* nr_cpu_ids could be reduced via nr_cpus= */
1164 	if (possible > nr_cpu_ids) {
1165 		printk(KERN_WARNING
1166 			"%d Processors exceeds NR_CPUS limit of %d\n",
1167 			possible, nr_cpu_ids);
1168 		possible = nr_cpu_ids;
1169 	}
1170 
1171 #ifdef CONFIG_HOTPLUG_CPU
1172 	if (!setup_max_cpus)
1173 #endif
1174 	if (possible > i) {
1175 		printk(KERN_WARNING
1176 			"%d Processors exceeds max_cpus limit of %u\n",
1177 			possible, setup_max_cpus);
1178 		possible = i;
1179 	}
1180 
1181 	printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
1182 		possible, max_t(int, possible - num_processors, 0));
1183 
1184 	for (i = 0; i < possible; i++)
1185 		set_cpu_possible(i, true);
1186 	for (; i < NR_CPUS; i++)
1187 		set_cpu_possible(i, false);
1188 
1189 	nr_cpu_ids = possible;
1190 }
1191 
1192 #ifdef CONFIG_HOTPLUG_CPU
1193 
1194 static void remove_siblinginfo(int cpu)
1195 {
1196 	int sibling;
1197 	struct cpuinfo_x86 *c = &cpu_data(cpu);
1198 
1199 	for_each_cpu(sibling, cpu_core_mask(cpu)) {
1200 		cpumask_clear_cpu(cpu, cpu_core_mask(sibling));
1201 		/*/
1202 		 * last thread sibling in this cpu core going down
1203 		 */
1204 		if (cpumask_weight(cpu_sibling_mask(cpu)) == 1)
1205 			cpu_data(sibling).booted_cores--;
1206 	}
1207 
1208 	for_each_cpu(sibling, cpu_sibling_mask(cpu))
1209 		cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling));
1210 	cpumask_clear(cpu_sibling_mask(cpu));
1211 	cpumask_clear(cpu_core_mask(cpu));
1212 	c->phys_proc_id = 0;
1213 	c->cpu_core_id = 0;
1214 	cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
1215 }
1216 
1217 static void __ref remove_cpu_from_maps(int cpu)
1218 {
1219 	set_cpu_online(cpu, false);
1220 	cpumask_clear_cpu(cpu, cpu_callout_mask);
1221 	cpumask_clear_cpu(cpu, cpu_callin_mask);
1222 	/* was set by cpu_init() */
1223 	cpumask_clear_cpu(cpu, cpu_initialized_mask);
1224 	numa_remove_cpu(cpu);
1225 }
1226 
1227 void cpu_disable_common(void)
1228 {
1229 	int cpu = smp_processor_id();
1230 
1231 	remove_siblinginfo(cpu);
1232 
1233 	/* It's now safe to remove this processor from the online map */
1234 	lock_vector_lock();
1235 	remove_cpu_from_maps(cpu);
1236 	unlock_vector_lock();
1237 	fixup_irqs();
1238 }
1239 
1240 int native_cpu_disable(void)
1241 {
1242 	int cpu = smp_processor_id();
1243 
1244 	/*
1245 	 * Perhaps use cpufreq to drop frequency, but that could go
1246 	 * into generic code.
1247 	 *
1248 	 * We won't take down the boot processor on i386 due to some
1249 	 * interrupts only being able to be serviced by the BSP.
1250 	 * Especially so if we're not using an IOAPIC	-zwane
1251 	 */
1252 	if (cpu == 0)
1253 		return -EBUSY;
1254 
1255 	clear_local_APIC();
1256 
1257 	cpu_disable_common();
1258 	return 0;
1259 }
1260 
1261 void native_cpu_die(unsigned int cpu)
1262 {
1263 	/* We don't do anything here: idle task is faking death itself. */
1264 	unsigned int i;
1265 
1266 	for (i = 0; i < 10; i++) {
1267 		/* They ack this in play_dead by setting CPU_DEAD */
1268 		if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
1269 			if (system_state == SYSTEM_RUNNING)
1270 				pr_info("CPU %u is now offline\n", cpu);
1271 
1272 			if (1 == num_online_cpus())
1273 				alternatives_smp_switch(0);
1274 			return;
1275 		}
1276 		msleep(100);
1277 	}
1278 	pr_err("CPU %u didn't die...\n", cpu);
1279 }
1280 
1281 void play_dead_common(void)
1282 {
1283 	idle_task_exit();
1284 	reset_lazy_tlbstate();
1285 	amd_e400_remove_cpu(raw_smp_processor_id());
1286 
1287 	mb();
1288 	/* Ack it */
1289 	__this_cpu_write(cpu_state, CPU_DEAD);
1290 
1291 	/*
1292 	 * With physical CPU hotplug, we should halt the cpu
1293 	 */
1294 	local_irq_disable();
1295 }
1296 
1297 /*
1298  * We need to flush the caches before going to sleep, lest we have
1299  * dirty data in our caches when we come back up.
1300  */
1301 static inline void mwait_play_dead(void)
1302 {
1303 	unsigned int eax, ebx, ecx, edx;
1304 	unsigned int highest_cstate = 0;
1305 	unsigned int highest_subcstate = 0;
1306 	int i;
1307 	void *mwait_ptr;
1308 	struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info);
1309 
1310 	if (!(this_cpu_has(X86_FEATURE_MWAIT) && mwait_usable(c)))
1311 		return;
1312 	if (!this_cpu_has(X86_FEATURE_CLFLSH))
1313 		return;
1314 	if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF)
1315 		return;
1316 
1317 	eax = CPUID_MWAIT_LEAF;
1318 	ecx = 0;
1319 	native_cpuid(&eax, &ebx, &ecx, &edx);
1320 
1321 	/*
1322 	 * eax will be 0 if EDX enumeration is not valid.
1323 	 * Initialized below to cstate, sub_cstate value when EDX is valid.
1324 	 */
1325 	if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) {
1326 		eax = 0;
1327 	} else {
1328 		edx >>= MWAIT_SUBSTATE_SIZE;
1329 		for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
1330 			if (edx & MWAIT_SUBSTATE_MASK) {
1331 				highest_cstate = i;
1332 				highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
1333 			}
1334 		}
1335 		eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
1336 			(highest_subcstate - 1);
1337 	}
1338 
1339 	/*
1340 	 * This should be a memory location in a cache line which is
1341 	 * unlikely to be touched by other processors.  The actual
1342 	 * content is immaterial as it is not actually modified in any way.
1343 	 */
1344 	mwait_ptr = &current_thread_info()->flags;
1345 
1346 	wbinvd();
1347 
1348 	while (1) {
1349 		/*
1350 		 * The CLFLUSH is a workaround for erratum AAI65 for
1351 		 * the Xeon 7400 series.  It's not clear it is actually
1352 		 * needed, but it should be harmless in either case.
1353 		 * The WBINVD is insufficient due to the spurious-wakeup
1354 		 * case where we return around the loop.
1355 		 */
1356 		clflush(mwait_ptr);
1357 		__monitor(mwait_ptr, 0, 0);
1358 		mb();
1359 		__mwait(eax, 0);
1360 	}
1361 }
1362 
1363 static inline void hlt_play_dead(void)
1364 {
1365 	if (__this_cpu_read(cpu_info.x86) >= 4)
1366 		wbinvd();
1367 
1368 	while (1) {
1369 		native_halt();
1370 	}
1371 }
1372 
1373 void native_play_dead(void)
1374 {
1375 	play_dead_common();
1376 	tboot_shutdown(TB_SHUTDOWN_WFS);
1377 
1378 	mwait_play_dead();	/* Only returns on failure */
1379 	if (cpuidle_play_dead())
1380 		hlt_play_dead();
1381 }
1382 
1383 #else /* ... !CONFIG_HOTPLUG_CPU */
1384 int native_cpu_disable(void)
1385 {
1386 	return -ENOSYS;
1387 }
1388 
1389 void native_cpu_die(unsigned int cpu)
1390 {
1391 	/* We said "no" in __cpu_disable */
1392 	BUG();
1393 }
1394 
1395 void native_play_dead(void)
1396 {
1397 	BUG();
1398 }
1399 
1400 #endif
1401