1 #include <linux/init.h> 2 #include <linux/kernel.h> 3 4 #include <linux/string.h> 5 #include <linux/bitops.h> 6 #include <linux/smp.h> 7 #include <linux/sched.h> 8 #include <linux/thread_info.h> 9 #include <linux/module.h> 10 #include <linux/uaccess.h> 11 12 #include <asm/processor.h> 13 #include <asm/pgtable.h> 14 #include <asm/msr.h> 15 #include <asm/ds.h> 16 #include <asm/bugs.h> 17 #include <asm/cpu.h> 18 19 #ifdef CONFIG_X86_64 20 #include <linux/topology.h> 21 #include <asm/numa_64.h> 22 #endif 23 24 #include "cpu.h" 25 26 #ifdef CONFIG_X86_LOCAL_APIC 27 #include <asm/mpspec.h> 28 #include <asm/apic.h> 29 #endif 30 31 static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) 32 { 33 /* Unmask CPUID levels if masked: */ 34 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { 35 u64 misc_enable; 36 37 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); 38 39 if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) { 40 misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; 41 wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); 42 c->cpuid_level = cpuid_eax(0); 43 } 44 } 45 46 if ((c->x86 == 0xf && c->x86_model >= 0x03) || 47 (c->x86 == 0x6 && c->x86_model >= 0x0e)) 48 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 49 50 #ifdef CONFIG_X86_64 51 set_cpu_cap(c, X86_FEATURE_SYSENTER32); 52 #else 53 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ 54 if (c->x86 == 15 && c->x86_cache_alignment == 64) 55 c->x86_cache_alignment = 128; 56 #endif 57 58 /* CPUID workaround for 0F33/0F34 CPU */ 59 if (c->x86 == 0xF && c->x86_model == 0x3 60 && (c->x86_mask == 0x3 || c->x86_mask == 0x4)) 61 c->x86_phys_bits = 36; 62 63 /* 64 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate 65 * with P/T states and does not stop in deep C-states. 66 * 67 * It is also reliable across cores and sockets. (but not across 68 * cabinets - we turn it off in that case explicitly.) 69 */ 70 if (c->x86_power & (1 << 8)) { 71 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 72 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); 73 set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE); 74 sched_clock_stable = 1; 75 } 76 77 /* 78 * There is a known erratum on Pentium III and Core Solo 79 * and Core Duo CPUs. 80 * " Page with PAT set to WC while associated MTRR is UC 81 * may consolidate to UC " 82 * Because of this erratum, it is better to stick with 83 * setting WC in MTRR rather than using PAT on these CPUs. 84 * 85 * Enable PAT WC only on P4, Core 2 or later CPUs. 86 */ 87 if (c->x86 == 6 && c->x86_model < 15) 88 clear_cpu_cap(c, X86_FEATURE_PAT); 89 90 #ifdef CONFIG_KMEMCHECK 91 /* 92 * P4s have a "fast strings" feature which causes single- 93 * stepping REP instructions to only generate a #DB on 94 * cache-line boundaries. 95 * 96 * Ingo Molnar reported a Pentium D (model 6) and a Xeon 97 * (model 2) with the same problem. 98 */ 99 if (c->x86 == 15) { 100 u64 misc_enable; 101 102 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); 103 104 if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) { 105 printk(KERN_INFO "kmemcheck: Disabling fast string operations\n"); 106 107 misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING; 108 wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); 109 } 110 } 111 #endif 112 } 113 114 #ifdef CONFIG_X86_32 115 /* 116 * Early probe support logic for ppro memory erratum #50 117 * 118 * This is called before we do cpu ident work 119 */ 120 121 int __cpuinit ppro_with_ram_bug(void) 122 { 123 /* Uses data from early_cpu_detect now */ 124 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && 125 boot_cpu_data.x86 == 6 && 126 boot_cpu_data.x86_model == 1 && 127 boot_cpu_data.x86_mask < 8) { 128 printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n"); 129 return 1; 130 } 131 return 0; 132 } 133 134 #ifdef CONFIG_X86_F00F_BUG 135 static void __cpuinit trap_init_f00f_bug(void) 136 { 137 __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO); 138 139 /* 140 * Update the IDT descriptor and reload the IDT so that 141 * it uses the read-only mapped virtual address. 142 */ 143 idt_descr.address = fix_to_virt(FIX_F00F_IDT); 144 load_idt(&idt_descr); 145 } 146 #endif 147 148 static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c) 149 { 150 #ifdef CONFIG_SMP 151 /* calling is from identify_secondary_cpu() ? */ 152 if (c->cpu_index == boot_cpu_id) 153 return; 154 155 /* 156 * Mask B, Pentium, but not Pentium MMX 157 */ 158 if (c->x86 == 5 && 159 c->x86_mask >= 1 && c->x86_mask <= 4 && 160 c->x86_model <= 3) { 161 /* 162 * Remember we have B step Pentia with bugs 163 */ 164 WARN_ONCE(1, "WARNING: SMP operation may be unreliable" 165 "with B stepping processors.\n"); 166 } 167 #endif 168 } 169 170 static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) 171 { 172 unsigned long lo, hi; 173 174 #ifdef CONFIG_X86_F00F_BUG 175 /* 176 * All current models of Pentium and Pentium with MMX technology CPUs 177 * have the F0 0F bug, which lets nonprivileged users lock up the 178 * system. 179 * Note that the workaround only should be initialized once... 180 */ 181 c->f00f_bug = 0; 182 if (!paravirt_enabled() && c->x86 == 5) { 183 static int f00f_workaround_enabled; 184 185 c->f00f_bug = 1; 186 if (!f00f_workaround_enabled) { 187 trap_init_f00f_bug(); 188 printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); 189 f00f_workaround_enabled = 1; 190 } 191 } 192 #endif 193 194 /* 195 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until 196 * model 3 mask 3 197 */ 198 if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) 199 clear_cpu_cap(c, X86_FEATURE_SEP); 200 201 /* 202 * P4 Xeon errata 037 workaround. 203 * Hardware prefetcher may cause stale data to be loaded into the cache. 204 */ 205 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { 206 rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); 207 if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) { 208 printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); 209 printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); 210 lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE; 211 wrmsr(MSR_IA32_MISC_ENABLE, lo, hi); 212 } 213 } 214 215 /* 216 * See if we have a good local APIC by checking for buggy Pentia, 217 * i.e. all B steppings and the C2 stepping of P54C when using their 218 * integrated APIC (see 11AP erratum in "Pentium Processor 219 * Specification Update"). 220 */ 221 if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 && 222 (c->x86_mask < 0x6 || c->x86_mask == 0xb)) 223 set_cpu_cap(c, X86_FEATURE_11AP); 224 225 226 #ifdef CONFIG_X86_INTEL_USERCOPY 227 /* 228 * Set up the preferred alignment for movsl bulk memory moves 229 */ 230 switch (c->x86) { 231 case 4: /* 486: untested */ 232 break; 233 case 5: /* Old Pentia: untested */ 234 break; 235 case 6: /* PII/PIII only like movsl with 8-byte alignment */ 236 movsl_mask.mask = 7; 237 break; 238 case 15: /* P4 is OK down to 8-byte alignment */ 239 movsl_mask.mask = 7; 240 break; 241 } 242 #endif 243 244 #ifdef CONFIG_X86_NUMAQ 245 numaq_tsc_disable(); 246 #endif 247 248 intel_smp_check(c); 249 } 250 #else 251 static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) 252 { 253 } 254 #endif 255 256 static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) 257 { 258 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) 259 unsigned node; 260 int cpu = smp_processor_id(); 261 int apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid; 262 263 /* Don't do the funky fallback heuristics the AMD version employs 264 for now. */ 265 node = apicid_to_node[apicid]; 266 if (node == NUMA_NO_NODE) 267 node = first_node(node_online_map); 268 else if (!node_online(node)) { 269 /* reuse the value from init_cpu_to_node() */ 270 node = cpu_to_node(cpu); 271 } 272 numa_set_node(cpu, node); 273 274 printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node); 275 #endif 276 } 277 278 /* 279 * find out the number of processor cores on the die 280 */ 281 static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) 282 { 283 unsigned int eax, ebx, ecx, edx; 284 285 if (c->cpuid_level < 4) 286 return 1; 287 288 /* Intel has a non-standard dependency on %ecx for this CPUID level. */ 289 cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); 290 if (eax & 0x1f) 291 return (eax >> 26) + 1; 292 else 293 return 1; 294 } 295 296 static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c) 297 { 298 /* Intel VMX MSR indicated features */ 299 #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000 300 #define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000 301 #define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000 302 #define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001 303 #define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002 304 #define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020 305 306 u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2; 307 308 clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW); 309 clear_cpu_cap(c, X86_FEATURE_VNMI); 310 clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); 311 clear_cpu_cap(c, X86_FEATURE_EPT); 312 clear_cpu_cap(c, X86_FEATURE_VPID); 313 314 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high); 315 msr_ctl = vmx_msr_high | vmx_msr_low; 316 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW) 317 set_cpu_cap(c, X86_FEATURE_TPR_SHADOW); 318 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI) 319 set_cpu_cap(c, X86_FEATURE_VNMI); 320 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) { 321 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, 322 vmx_msr_low, vmx_msr_high); 323 msr_ctl2 = vmx_msr_high | vmx_msr_low; 324 if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) && 325 (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)) 326 set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); 327 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT) 328 set_cpu_cap(c, X86_FEATURE_EPT); 329 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID) 330 set_cpu_cap(c, X86_FEATURE_VPID); 331 } 332 } 333 334 static void __cpuinit init_intel(struct cpuinfo_x86 *c) 335 { 336 unsigned int l2 = 0; 337 338 early_init_intel(c); 339 340 intel_workarounds(c); 341 342 /* 343 * Detect the extended topology information if available. This 344 * will reinitialise the initial_apicid which will be used 345 * in init_intel_cacheinfo() 346 */ 347 detect_extended_topology(c); 348 349 l2 = init_intel_cacheinfo(c); 350 if (c->cpuid_level > 9) { 351 unsigned eax = cpuid_eax(10); 352 /* Check for version and the number of counters */ 353 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) 354 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); 355 } 356 357 if (c->cpuid_level > 6) { 358 unsigned ecx = cpuid_ecx(6); 359 if (ecx & 0x01) 360 set_cpu_cap(c, X86_FEATURE_APERFMPERF); 361 } 362 363 if (cpu_has_xmm2) 364 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); 365 if (cpu_has_ds) { 366 unsigned int l1; 367 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); 368 if (!(l1 & (1<<11))) 369 set_cpu_cap(c, X86_FEATURE_BTS); 370 if (!(l1 & (1<<12))) 371 set_cpu_cap(c, X86_FEATURE_PEBS); 372 ds_init_intel(c); 373 } 374 375 if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush) 376 set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR); 377 378 #ifdef CONFIG_X86_64 379 if (c->x86 == 15) 380 c->x86_cache_alignment = c->x86_clflush_size * 2; 381 if (c->x86 == 6) 382 set_cpu_cap(c, X86_FEATURE_REP_GOOD); 383 #else 384 /* 385 * Names for the Pentium II/Celeron processors 386 * detectable only by also checking the cache size. 387 * Dixon is NOT a Celeron. 388 */ 389 if (c->x86 == 6) { 390 char *p = NULL; 391 392 switch (c->x86_model) { 393 case 5: 394 if (c->x86_mask == 0) { 395 if (l2 == 0) 396 p = "Celeron (Covington)"; 397 else if (l2 == 256) 398 p = "Mobile Pentium II (Dixon)"; 399 } 400 break; 401 402 case 6: 403 if (l2 == 128) 404 p = "Celeron (Mendocino)"; 405 else if (c->x86_mask == 0 || c->x86_mask == 5) 406 p = "Celeron-A"; 407 break; 408 409 case 8: 410 if (l2 == 128) 411 p = "Celeron (Coppermine)"; 412 break; 413 } 414 415 if (p) 416 strcpy(c->x86_model_id, p); 417 } 418 419 if (c->x86 == 15) 420 set_cpu_cap(c, X86_FEATURE_P4); 421 if (c->x86 == 6) 422 set_cpu_cap(c, X86_FEATURE_P3); 423 #endif 424 425 if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { 426 /* 427 * let's use the legacy cpuid vector 0x1 and 0x4 for topology 428 * detection. 429 */ 430 c->x86_max_cores = intel_num_cpu_cores(c); 431 #ifdef CONFIG_X86_32 432 detect_ht(c); 433 #endif 434 } 435 436 /* Work around errata */ 437 srat_detect_node(c); 438 439 if (cpu_has(c, X86_FEATURE_VMX)) 440 detect_vmx_virtcap(c); 441 } 442 443 #ifdef CONFIG_X86_32 444 static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) 445 { 446 /* 447 * Intel PIII Tualatin. This comes in two flavours. 448 * One has 256kb of cache, the other 512. We have no way 449 * to determine which, so we use a boottime override 450 * for the 512kb model, and assume 256 otherwise. 451 */ 452 if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0)) 453 size = 256; 454 return size; 455 } 456 #endif 457 458 static const struct cpu_dev __cpuinitconst intel_cpu_dev = { 459 .c_vendor = "Intel", 460 .c_ident = { "GenuineIntel" }, 461 #ifdef CONFIG_X86_32 462 .c_models = { 463 { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = 464 { 465 [0] = "486 DX-25/33", 466 [1] = "486 DX-50", 467 [2] = "486 SX", 468 [3] = "486 DX/2", 469 [4] = "486 SL", 470 [5] = "486 SX/2", 471 [7] = "486 DX/2-WB", 472 [8] = "486 DX/4", 473 [9] = "486 DX/4-WB" 474 } 475 }, 476 { .vendor = X86_VENDOR_INTEL, .family = 5, .model_names = 477 { 478 [0] = "Pentium 60/66 A-step", 479 [1] = "Pentium 60/66", 480 [2] = "Pentium 75 - 200", 481 [3] = "OverDrive PODP5V83", 482 [4] = "Pentium MMX", 483 [7] = "Mobile Pentium 75 - 200", 484 [8] = "Mobile Pentium MMX" 485 } 486 }, 487 { .vendor = X86_VENDOR_INTEL, .family = 6, .model_names = 488 { 489 [0] = "Pentium Pro A-step", 490 [1] = "Pentium Pro", 491 [3] = "Pentium II (Klamath)", 492 [4] = "Pentium II (Deschutes)", 493 [5] = "Pentium II (Deschutes)", 494 [6] = "Mobile Pentium II", 495 [7] = "Pentium III (Katmai)", 496 [8] = "Pentium III (Coppermine)", 497 [10] = "Pentium III (Cascades)", 498 [11] = "Pentium III (Tualatin)", 499 } 500 }, 501 { .vendor = X86_VENDOR_INTEL, .family = 15, .model_names = 502 { 503 [0] = "Pentium 4 (Unknown)", 504 [1] = "Pentium 4 (Willamette)", 505 [2] = "Pentium 4 (Northwood)", 506 [4] = "Pentium 4 (Foster)", 507 [5] = "Pentium 4 (Foster)", 508 } 509 }, 510 }, 511 .c_size_cache = intel_size_cache, 512 #endif 513 .c_early_init = early_init_intel, 514 .c_init = init_intel, 515 .c_x86_vendor = X86_VENDOR_INTEL, 516 }; 517 518 cpu_dev_register(intel_cpu_dev); 519 520