xref: /linux/arch/x86/kernel/cpu/intel.c (revision 092e0e7e520a1fca03e13c9f2d157432a8657ff2)
1 #include <linux/init.h>
2 #include <linux/kernel.h>
3 
4 #include <linux/string.h>
5 #include <linux/bitops.h>
6 #include <linux/smp.h>
7 #include <linux/sched.h>
8 #include <linux/thread_info.h>
9 #include <linux/module.h>
10 #include <linux/uaccess.h>
11 
12 #include <asm/processor.h>
13 #include <asm/pgtable.h>
14 #include <asm/msr.h>
15 #include <asm/bugs.h>
16 #include <asm/cpu.h>
17 
18 #ifdef CONFIG_X86_64
19 #include <linux/topology.h>
20 #include <asm/numa_64.h>
21 #endif
22 
23 #include "cpu.h"
24 
25 #ifdef CONFIG_X86_LOCAL_APIC
26 #include <asm/mpspec.h>
27 #include <asm/apic.h>
28 #endif
29 
30 static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
31 {
32 	/* Unmask CPUID levels if masked: */
33 	if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
34 		u64 misc_enable;
35 
36 		rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
37 
38 		if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) {
39 			misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
40 			wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
41 			c->cpuid_level = cpuid_eax(0);
42 			get_cpu_cap(c);
43 		}
44 	}
45 
46 	if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
47 		(c->x86 == 0x6 && c->x86_model >= 0x0e))
48 		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
49 
50 	/*
51 	 * Atom erratum AAE44/AAF40/AAG38/AAH41:
52 	 *
53 	 * A race condition between speculative fetches and invalidating
54 	 * a large page.  This is worked around in microcode, but we
55 	 * need the microcode to have already been loaded... so if it is
56 	 * not, recommend a BIOS update and disable large pages.
57 	 */
58 	if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2) {
59 		u32 ucode, junk;
60 
61 		wrmsr(MSR_IA32_UCODE_REV, 0, 0);
62 		sync_core();
63 		rdmsr(MSR_IA32_UCODE_REV, junk, ucode);
64 
65 		if (ucode < 0x20e) {
66 			printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n");
67 			clear_cpu_cap(c, X86_FEATURE_PSE);
68 		}
69 	}
70 
71 #ifdef CONFIG_X86_64
72 	set_cpu_cap(c, X86_FEATURE_SYSENTER32);
73 #else
74 	/* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
75 	if (c->x86 == 15 && c->x86_cache_alignment == 64)
76 		c->x86_cache_alignment = 128;
77 #endif
78 
79 	/* CPUID workaround for 0F33/0F34 CPU */
80 	if (c->x86 == 0xF && c->x86_model == 0x3
81 	    && (c->x86_mask == 0x3 || c->x86_mask == 0x4))
82 		c->x86_phys_bits = 36;
83 
84 	/*
85 	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
86 	 * with P/T states and does not stop in deep C-states.
87 	 *
88 	 * It is also reliable across cores and sockets. (but not across
89 	 * cabinets - we turn it off in that case explicitly.)
90 	 */
91 	if (c->x86_power & (1 << 8)) {
92 		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
93 		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
94 		if (!check_tsc_unstable())
95 			sched_clock_stable = 1;
96 	}
97 
98 	/*
99 	 * There is a known erratum on Pentium III and Core Solo
100 	 * and Core Duo CPUs.
101 	 * " Page with PAT set to WC while associated MTRR is UC
102 	 *   may consolidate to UC "
103 	 * Because of this erratum, it is better to stick with
104 	 * setting WC in MTRR rather than using PAT on these CPUs.
105 	 *
106 	 * Enable PAT WC only on P4, Core 2 or later CPUs.
107 	 */
108 	if (c->x86 == 6 && c->x86_model < 15)
109 		clear_cpu_cap(c, X86_FEATURE_PAT);
110 
111 #ifdef CONFIG_KMEMCHECK
112 	/*
113 	 * P4s have a "fast strings" feature which causes single-
114 	 * stepping REP instructions to only generate a #DB on
115 	 * cache-line boundaries.
116 	 *
117 	 * Ingo Molnar reported a Pentium D (model 6) and a Xeon
118 	 * (model 2) with the same problem.
119 	 */
120 	if (c->x86 == 15) {
121 		u64 misc_enable;
122 
123 		rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
124 
125 		if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) {
126 			printk(KERN_INFO "kmemcheck: Disabling fast string operations\n");
127 
128 			misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING;
129 			wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
130 		}
131 	}
132 #endif
133 }
134 
135 #ifdef CONFIG_X86_32
136 /*
137  *	Early probe support logic for ppro memory erratum #50
138  *
139  *	This is called before we do cpu ident work
140  */
141 
142 int __cpuinit ppro_with_ram_bug(void)
143 {
144 	/* Uses data from early_cpu_detect now */
145 	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
146 	    boot_cpu_data.x86 == 6 &&
147 	    boot_cpu_data.x86_model == 1 &&
148 	    boot_cpu_data.x86_mask < 8) {
149 		printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n");
150 		return 1;
151 	}
152 	return 0;
153 }
154 
155 #ifdef CONFIG_X86_F00F_BUG
156 static void __cpuinit trap_init_f00f_bug(void)
157 {
158 	__set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
159 
160 	/*
161 	 * Update the IDT descriptor and reload the IDT so that
162 	 * it uses the read-only mapped virtual address.
163 	 */
164 	idt_descr.address = fix_to_virt(FIX_F00F_IDT);
165 	load_idt(&idt_descr);
166 }
167 #endif
168 
169 static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
170 {
171 #ifdef CONFIG_SMP
172 	/* calling is from identify_secondary_cpu() ? */
173 	if (!c->cpu_index)
174 		return;
175 
176 	/*
177 	 * Mask B, Pentium, but not Pentium MMX
178 	 */
179 	if (c->x86 == 5 &&
180 	    c->x86_mask >= 1 && c->x86_mask <= 4 &&
181 	    c->x86_model <= 3) {
182 		/*
183 		 * Remember we have B step Pentia with bugs
184 		 */
185 		WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
186 				    "with B stepping processors.\n");
187 	}
188 #endif
189 }
190 
191 static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
192 {
193 	unsigned long lo, hi;
194 
195 #ifdef CONFIG_X86_F00F_BUG
196 	/*
197 	 * All current models of Pentium and Pentium with MMX technology CPUs
198 	 * have the F0 0F bug, which lets nonprivileged users lock up the
199 	 * system.
200 	 * Note that the workaround only should be initialized once...
201 	 */
202 	c->f00f_bug = 0;
203 	if (!paravirt_enabled() && c->x86 == 5) {
204 		static int f00f_workaround_enabled;
205 
206 		c->f00f_bug = 1;
207 		if (!f00f_workaround_enabled) {
208 			trap_init_f00f_bug();
209 			printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
210 			f00f_workaround_enabled = 1;
211 		}
212 	}
213 #endif
214 
215 	/*
216 	 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
217 	 * model 3 mask 3
218 	 */
219 	if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
220 		clear_cpu_cap(c, X86_FEATURE_SEP);
221 
222 	/*
223 	 * P4 Xeon errata 037 workaround.
224 	 * Hardware prefetcher may cause stale data to be loaded into the cache.
225 	 */
226 	if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
227 		rdmsr(MSR_IA32_MISC_ENABLE, lo, hi);
228 		if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) {
229 			printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
230 			printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
231 			lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE;
232 			wrmsr(MSR_IA32_MISC_ENABLE, lo, hi);
233 		}
234 	}
235 
236 	/*
237 	 * See if we have a good local APIC by checking for buggy Pentia,
238 	 * i.e. all B steppings and the C2 stepping of P54C when using their
239 	 * integrated APIC (see 11AP erratum in "Pentium Processor
240 	 * Specification Update").
241 	 */
242 	if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
243 	    (c->x86_mask < 0x6 || c->x86_mask == 0xb))
244 		set_cpu_cap(c, X86_FEATURE_11AP);
245 
246 
247 #ifdef CONFIG_X86_INTEL_USERCOPY
248 	/*
249 	 * Set up the preferred alignment for movsl bulk memory moves
250 	 */
251 	switch (c->x86) {
252 	case 4:		/* 486: untested */
253 		break;
254 	case 5:		/* Old Pentia: untested */
255 		break;
256 	case 6:		/* PII/PIII only like movsl with 8-byte alignment */
257 		movsl_mask.mask = 7;
258 		break;
259 	case 15:	/* P4 is OK down to 8-byte alignment */
260 		movsl_mask.mask = 7;
261 		break;
262 	}
263 #endif
264 
265 #ifdef CONFIG_X86_NUMAQ
266 	numaq_tsc_disable();
267 #endif
268 
269 	intel_smp_check(c);
270 }
271 #else
272 static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
273 {
274 }
275 #endif
276 
277 static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
278 {
279 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
280 	unsigned node;
281 	int cpu = smp_processor_id();
282 	int apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid;
283 
284 	/* Don't do the funky fallback heuristics the AMD version employs
285 	   for now. */
286 	node = apicid_to_node[apicid];
287 	if (node == NUMA_NO_NODE)
288 		node = first_node(node_online_map);
289 	else if (!node_online(node)) {
290 		/* reuse the value from init_cpu_to_node() */
291 		node = cpu_to_node(cpu);
292 	}
293 	numa_set_node(cpu, node);
294 #endif
295 }
296 
297 /*
298  * find out the number of processor cores on the die
299  */
300 static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
301 {
302 	unsigned int eax, ebx, ecx, edx;
303 
304 	if (c->cpuid_level < 4)
305 		return 1;
306 
307 	/* Intel has a non-standard dependency on %ecx for this CPUID level. */
308 	cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
309 	if (eax & 0x1f)
310 		return (eax >> 26) + 1;
311 	else
312 		return 1;
313 }
314 
315 static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c)
316 {
317 	/* Intel VMX MSR indicated features */
318 #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW	0x00200000
319 #define X86_VMX_FEATURE_PROC_CTLS_VNMI		0x00400000
320 #define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS	0x80000000
321 #define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC	0x00000001
322 #define X86_VMX_FEATURE_PROC_CTLS2_EPT		0x00000002
323 #define X86_VMX_FEATURE_PROC_CTLS2_VPID		0x00000020
324 
325 	u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2;
326 
327 	clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
328 	clear_cpu_cap(c, X86_FEATURE_VNMI);
329 	clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
330 	clear_cpu_cap(c, X86_FEATURE_EPT);
331 	clear_cpu_cap(c, X86_FEATURE_VPID);
332 
333 	rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high);
334 	msr_ctl = vmx_msr_high | vmx_msr_low;
335 	if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)
336 		set_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
337 	if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI)
338 		set_cpu_cap(c, X86_FEATURE_VNMI);
339 	if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) {
340 		rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
341 		      vmx_msr_low, vmx_msr_high);
342 		msr_ctl2 = vmx_msr_high | vmx_msr_low;
343 		if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) &&
344 		    (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW))
345 			set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
346 		if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT)
347 			set_cpu_cap(c, X86_FEATURE_EPT);
348 		if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID)
349 			set_cpu_cap(c, X86_FEATURE_VPID);
350 	}
351 }
352 
353 static void __cpuinit init_intel(struct cpuinfo_x86 *c)
354 {
355 	unsigned int l2 = 0;
356 
357 	early_init_intel(c);
358 
359 	intel_workarounds(c);
360 
361 	/*
362 	 * Detect the extended topology information if available. This
363 	 * will reinitialise the initial_apicid which will be used
364 	 * in init_intel_cacheinfo()
365 	 */
366 	detect_extended_topology(c);
367 
368 	l2 = init_intel_cacheinfo(c);
369 	if (c->cpuid_level > 9) {
370 		unsigned eax = cpuid_eax(10);
371 		/* Check for version and the number of counters */
372 		if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
373 			set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
374 	}
375 
376 	if (cpu_has_xmm2)
377 		set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
378 	if (cpu_has_ds) {
379 		unsigned int l1;
380 		rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
381 		if (!(l1 & (1<<11)))
382 			set_cpu_cap(c, X86_FEATURE_BTS);
383 		if (!(l1 & (1<<12)))
384 			set_cpu_cap(c, X86_FEATURE_PEBS);
385 	}
386 
387 	if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush)
388 		set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR);
389 
390 #ifdef CONFIG_X86_64
391 	if (c->x86 == 15)
392 		c->x86_cache_alignment = c->x86_clflush_size * 2;
393 	if (c->x86 == 6)
394 		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
395 #else
396 	/*
397 	 * Names for the Pentium II/Celeron processors
398 	 * detectable only by also checking the cache size.
399 	 * Dixon is NOT a Celeron.
400 	 */
401 	if (c->x86 == 6) {
402 		char *p = NULL;
403 
404 		switch (c->x86_model) {
405 		case 5:
406 			if (c->x86_mask == 0) {
407 				if (l2 == 0)
408 					p = "Celeron (Covington)";
409 				else if (l2 == 256)
410 					p = "Mobile Pentium II (Dixon)";
411 			}
412 			break;
413 
414 		case 6:
415 			if (l2 == 128)
416 				p = "Celeron (Mendocino)";
417 			else if (c->x86_mask == 0 || c->x86_mask == 5)
418 				p = "Celeron-A";
419 			break;
420 
421 		case 8:
422 			if (l2 == 128)
423 				p = "Celeron (Coppermine)";
424 			break;
425 		}
426 
427 		if (p)
428 			strcpy(c->x86_model_id, p);
429 	}
430 
431 	if (c->x86 == 15)
432 		set_cpu_cap(c, X86_FEATURE_P4);
433 	if (c->x86 == 6)
434 		set_cpu_cap(c, X86_FEATURE_P3);
435 #endif
436 
437 	if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
438 		/*
439 		 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
440 		 * detection.
441 		 */
442 		c->x86_max_cores = intel_num_cpu_cores(c);
443 #ifdef CONFIG_X86_32
444 		detect_ht(c);
445 #endif
446 	}
447 
448 	/* Work around errata */
449 	srat_detect_node(c);
450 
451 	if (cpu_has(c, X86_FEATURE_VMX))
452 		detect_vmx_virtcap(c);
453 }
454 
455 #ifdef CONFIG_X86_32
456 static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
457 {
458 	/*
459 	 * Intel PIII Tualatin. This comes in two flavours.
460 	 * One has 256kb of cache, the other 512. We have no way
461 	 * to determine which, so we use a boottime override
462 	 * for the 512kb model, and assume 256 otherwise.
463 	 */
464 	if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
465 		size = 256;
466 	return size;
467 }
468 #endif
469 
470 static const struct cpu_dev __cpuinitconst intel_cpu_dev = {
471 	.c_vendor	= "Intel",
472 	.c_ident	= { "GenuineIntel" },
473 #ifdef CONFIG_X86_32
474 	.c_models = {
475 		{ .vendor = X86_VENDOR_INTEL, .family = 4, .model_names =
476 		  {
477 			  [0] = "486 DX-25/33",
478 			  [1] = "486 DX-50",
479 			  [2] = "486 SX",
480 			  [3] = "486 DX/2",
481 			  [4] = "486 SL",
482 			  [5] = "486 SX/2",
483 			  [7] = "486 DX/2-WB",
484 			  [8] = "486 DX/4",
485 			  [9] = "486 DX/4-WB"
486 		  }
487 		},
488 		{ .vendor = X86_VENDOR_INTEL, .family = 5, .model_names =
489 		  {
490 			  [0] = "Pentium 60/66 A-step",
491 			  [1] = "Pentium 60/66",
492 			  [2] = "Pentium 75 - 200",
493 			  [3] = "OverDrive PODP5V83",
494 			  [4] = "Pentium MMX",
495 			  [7] = "Mobile Pentium 75 - 200",
496 			  [8] = "Mobile Pentium MMX"
497 		  }
498 		},
499 		{ .vendor = X86_VENDOR_INTEL, .family = 6, .model_names =
500 		  {
501 			  [0] = "Pentium Pro A-step",
502 			  [1] = "Pentium Pro",
503 			  [3] = "Pentium II (Klamath)",
504 			  [4] = "Pentium II (Deschutes)",
505 			  [5] = "Pentium II (Deschutes)",
506 			  [6] = "Mobile Pentium II",
507 			  [7] = "Pentium III (Katmai)",
508 			  [8] = "Pentium III (Coppermine)",
509 			  [10] = "Pentium III (Cascades)",
510 			  [11] = "Pentium III (Tualatin)",
511 		  }
512 		},
513 		{ .vendor = X86_VENDOR_INTEL, .family = 15, .model_names =
514 		  {
515 			  [0] = "Pentium 4 (Unknown)",
516 			  [1] = "Pentium 4 (Willamette)",
517 			  [2] = "Pentium 4 (Northwood)",
518 			  [4] = "Pentium 4 (Foster)",
519 			  [5] = "Pentium 4 (Foster)",
520 		  }
521 		},
522 	},
523 	.c_size_cache	= intel_size_cache,
524 #endif
525 	.c_early_init   = early_init_intel,
526 	.c_init		= init_intel,
527 	.c_x86_vendor	= X86_VENDOR_INTEL,
528 };
529 
530 cpu_dev_register(intel_cpu_dev);
531 
532