1 #include <linux/init.h> 2 #include <linux/kernel.h> 3 4 #include <linux/string.h> 5 #include <linux/bitops.h> 6 #include <linux/smp.h> 7 #include <linux/thread_info.h> 8 #include <linux/module.h> 9 10 #include <asm/processor.h> 11 #include <asm/pgtable.h> 12 #include <asm/msr.h> 13 #include <asm/uaccess.h> 14 #include <asm/ptrace.h> 15 #include <asm/ds.h> 16 #include <asm/bugs.h> 17 18 #ifdef CONFIG_X86_64 19 #include <asm/topology.h> 20 #include <asm/numa_64.h> 21 #endif 22 23 #include "cpu.h" 24 25 #ifdef CONFIG_X86_LOCAL_APIC 26 #include <asm/mpspec.h> 27 #include <asm/apic.h> 28 #include <mach_apic.h> 29 #endif 30 31 static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) 32 { 33 if ((c->x86 == 0xf && c->x86_model >= 0x03) || 34 (c->x86 == 0x6 && c->x86_model >= 0x0e)) 35 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 36 37 #ifdef CONFIG_X86_64 38 set_cpu_cap(c, X86_FEATURE_SYSENTER32); 39 #else 40 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ 41 if (c->x86 == 15 && c->x86_cache_alignment == 64) 42 c->x86_cache_alignment = 128; 43 #endif 44 } 45 46 #ifdef CONFIG_X86_32 47 /* 48 * Early probe support logic for ppro memory erratum #50 49 * 50 * This is called before we do cpu ident work 51 */ 52 53 int __cpuinit ppro_with_ram_bug(void) 54 { 55 /* Uses data from early_cpu_detect now */ 56 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && 57 boot_cpu_data.x86 == 6 && 58 boot_cpu_data.x86_model == 1 && 59 boot_cpu_data.x86_mask < 8) { 60 printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n"); 61 return 1; 62 } 63 return 0; 64 } 65 66 #ifdef CONFIG_X86_F00F_BUG 67 static void __cpuinit trap_init_f00f_bug(void) 68 { 69 __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO); 70 71 /* 72 * Update the IDT descriptor and reload the IDT so that 73 * it uses the read-only mapped virtual address. 74 */ 75 idt_descr.address = fix_to_virt(FIX_F00F_IDT); 76 load_idt(&idt_descr); 77 } 78 #endif 79 80 static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) 81 { 82 unsigned long lo, hi; 83 84 #ifdef CONFIG_X86_F00F_BUG 85 /* 86 * All current models of Pentium and Pentium with MMX technology CPUs 87 * have the F0 0F bug, which lets nonprivileged users lock up the system. 88 * Note that the workaround only should be initialized once... 89 */ 90 c->f00f_bug = 0; 91 if (!paravirt_enabled() && c->x86 == 5) { 92 static int f00f_workaround_enabled; 93 94 c->f00f_bug = 1; 95 if (!f00f_workaround_enabled) { 96 trap_init_f00f_bug(); 97 printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); 98 f00f_workaround_enabled = 1; 99 } 100 } 101 #endif 102 103 /* 104 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until 105 * model 3 mask 3 106 */ 107 if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) 108 clear_cpu_cap(c, X86_FEATURE_SEP); 109 110 /* 111 * P4 Xeon errata 037 workaround. 112 * Hardware prefetcher may cause stale data to be loaded into the cache. 113 */ 114 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { 115 rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); 116 if ((lo & (1<<9)) == 0) { 117 printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); 118 printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); 119 lo |= (1<<9); /* Disable hw prefetching */ 120 wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); 121 } 122 } 123 124 /* 125 * See if we have a good local APIC by checking for buggy Pentia, 126 * i.e. all B steppings and the C2 stepping of P54C when using their 127 * integrated APIC (see 11AP erratum in "Pentium Processor 128 * Specification Update"). 129 */ 130 if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 && 131 (c->x86_mask < 0x6 || c->x86_mask == 0xb)) 132 set_cpu_cap(c, X86_FEATURE_11AP); 133 134 135 #ifdef CONFIG_X86_INTEL_USERCOPY 136 /* 137 * Set up the preferred alignment for movsl bulk memory moves 138 */ 139 switch (c->x86) { 140 case 4: /* 486: untested */ 141 break; 142 case 5: /* Old Pentia: untested */ 143 break; 144 case 6: /* PII/PIII only like movsl with 8-byte alignment */ 145 movsl_mask.mask = 7; 146 break; 147 case 15: /* P4 is OK down to 8-byte alignment */ 148 movsl_mask.mask = 7; 149 break; 150 } 151 #endif 152 153 #ifdef CONFIG_X86_NUMAQ 154 numaq_tsc_disable(); 155 #endif 156 } 157 #else 158 static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) 159 { 160 } 161 #endif 162 163 static void __cpuinit srat_detect_node(void) 164 { 165 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) 166 unsigned node; 167 int cpu = smp_processor_id(); 168 int apicid = hard_smp_processor_id(); 169 170 /* Don't do the funky fallback heuristics the AMD version employs 171 for now. */ 172 node = apicid_to_node[apicid]; 173 if (node == NUMA_NO_NODE || !node_online(node)) 174 node = first_node(node_online_map); 175 numa_set_node(cpu, node); 176 177 printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node); 178 #endif 179 } 180 181 /* 182 * find out the number of processor cores on the die 183 */ 184 static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) 185 { 186 unsigned int eax, ebx, ecx, edx; 187 188 if (c->cpuid_level < 4) 189 return 1; 190 191 /* Intel has a non-standard dependency on %ecx for this CPUID level. */ 192 cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); 193 if (eax & 0x1f) 194 return ((eax >> 26) + 1); 195 else 196 return 1; 197 } 198 199 static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c) 200 { 201 /* Intel VMX MSR indicated features */ 202 #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000 203 #define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000 204 #define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000 205 #define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001 206 #define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002 207 #define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020 208 209 u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2; 210 211 clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW); 212 clear_cpu_cap(c, X86_FEATURE_VNMI); 213 clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); 214 clear_cpu_cap(c, X86_FEATURE_EPT); 215 clear_cpu_cap(c, X86_FEATURE_VPID); 216 217 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high); 218 msr_ctl = vmx_msr_high | vmx_msr_low; 219 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW) 220 set_cpu_cap(c, X86_FEATURE_TPR_SHADOW); 221 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI) 222 set_cpu_cap(c, X86_FEATURE_VNMI); 223 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) { 224 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, 225 vmx_msr_low, vmx_msr_high); 226 msr_ctl2 = vmx_msr_high | vmx_msr_low; 227 if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) && 228 (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)) 229 set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); 230 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT) 231 set_cpu_cap(c, X86_FEATURE_EPT); 232 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID) 233 set_cpu_cap(c, X86_FEATURE_VPID); 234 } 235 } 236 237 static void __cpuinit init_intel(struct cpuinfo_x86 *c) 238 { 239 unsigned int l2 = 0; 240 241 early_init_intel(c); 242 243 intel_workarounds(c); 244 245 l2 = init_intel_cacheinfo(c); 246 if (c->cpuid_level > 9) { 247 unsigned eax = cpuid_eax(10); 248 /* Check for version and the number of counters */ 249 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) 250 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); 251 } 252 253 if (cpu_has_xmm2) 254 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); 255 if (cpu_has_ds) { 256 unsigned int l1; 257 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); 258 if (!(l1 & (1<<11))) 259 set_cpu_cap(c, X86_FEATURE_BTS); 260 if (!(l1 & (1<<12))) 261 set_cpu_cap(c, X86_FEATURE_PEBS); 262 ds_init_intel(c); 263 } 264 265 #ifdef CONFIG_X86_64 266 if (c->x86 == 15) 267 c->x86_cache_alignment = c->x86_clflush_size * 2; 268 if (c->x86 == 6) 269 set_cpu_cap(c, X86_FEATURE_REP_GOOD); 270 #else 271 /* 272 * Names for the Pentium II/Celeron processors 273 * detectable only by also checking the cache size. 274 * Dixon is NOT a Celeron. 275 */ 276 if (c->x86 == 6) { 277 char *p = NULL; 278 279 switch (c->x86_model) { 280 case 5: 281 if (c->x86_mask == 0) { 282 if (l2 == 0) 283 p = "Celeron (Covington)"; 284 else if (l2 == 256) 285 p = "Mobile Pentium II (Dixon)"; 286 } 287 break; 288 289 case 6: 290 if (l2 == 128) 291 p = "Celeron (Mendocino)"; 292 else if (c->x86_mask == 0 || c->x86_mask == 5) 293 p = "Celeron-A"; 294 break; 295 296 case 8: 297 if (l2 == 128) 298 p = "Celeron (Coppermine)"; 299 break; 300 } 301 302 if (p) 303 strcpy(c->x86_model_id, p); 304 } 305 306 if (c->x86 == 15) 307 set_cpu_cap(c, X86_FEATURE_P4); 308 if (c->x86 == 6) 309 set_cpu_cap(c, X86_FEATURE_P3); 310 311 if (cpu_has_bts) 312 ptrace_bts_init_intel(c); 313 314 #endif 315 316 detect_extended_topology(c); 317 if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { 318 /* 319 * let's use the legacy cpuid vector 0x1 and 0x4 for topology 320 * detection. 321 */ 322 c->x86_max_cores = intel_num_cpu_cores(c); 323 #ifdef CONFIG_X86_32 324 detect_ht(c); 325 #endif 326 } 327 328 /* Work around errata */ 329 srat_detect_node(); 330 331 if (cpu_has(c, X86_FEATURE_VMX)) 332 detect_vmx_virtcap(c); 333 } 334 335 #ifdef CONFIG_X86_32 336 static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) 337 { 338 /* 339 * Intel PIII Tualatin. This comes in two flavours. 340 * One has 256kb of cache, the other 512. We have no way 341 * to determine which, so we use a boottime override 342 * for the 512kb model, and assume 256 otherwise. 343 */ 344 if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0)) 345 size = 256; 346 return size; 347 } 348 #endif 349 350 static struct cpu_dev intel_cpu_dev __cpuinitdata = { 351 .c_vendor = "Intel", 352 .c_ident = { "GenuineIntel" }, 353 #ifdef CONFIG_X86_32 354 .c_models = { 355 { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = 356 { 357 [0] = "486 DX-25/33", 358 [1] = "486 DX-50", 359 [2] = "486 SX", 360 [3] = "486 DX/2", 361 [4] = "486 SL", 362 [5] = "486 SX/2", 363 [7] = "486 DX/2-WB", 364 [8] = "486 DX/4", 365 [9] = "486 DX/4-WB" 366 } 367 }, 368 { .vendor = X86_VENDOR_INTEL, .family = 5, .model_names = 369 { 370 [0] = "Pentium 60/66 A-step", 371 [1] = "Pentium 60/66", 372 [2] = "Pentium 75 - 200", 373 [3] = "OverDrive PODP5V83", 374 [4] = "Pentium MMX", 375 [7] = "Mobile Pentium 75 - 200", 376 [8] = "Mobile Pentium MMX" 377 } 378 }, 379 { .vendor = X86_VENDOR_INTEL, .family = 6, .model_names = 380 { 381 [0] = "Pentium Pro A-step", 382 [1] = "Pentium Pro", 383 [3] = "Pentium II (Klamath)", 384 [4] = "Pentium II (Deschutes)", 385 [5] = "Pentium II (Deschutes)", 386 [6] = "Mobile Pentium II", 387 [7] = "Pentium III (Katmai)", 388 [8] = "Pentium III (Coppermine)", 389 [10] = "Pentium III (Cascades)", 390 [11] = "Pentium III (Tualatin)", 391 } 392 }, 393 { .vendor = X86_VENDOR_INTEL, .family = 15, .model_names = 394 { 395 [0] = "Pentium 4 (Unknown)", 396 [1] = "Pentium 4 (Willamette)", 397 [2] = "Pentium 4 (Northwood)", 398 [4] = "Pentium 4 (Foster)", 399 [5] = "Pentium 4 (Foster)", 400 } 401 }, 402 }, 403 .c_size_cache = intel_size_cache, 404 #endif 405 .c_early_init = early_init_intel, 406 .c_init = init_intel, 407 .c_x86_vendor = X86_VENDOR_INTEL, 408 }; 409 410 cpu_dev_register(intel_cpu_dev); 411 412