1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/bitops.h> 4 #include <linux/init.h> 5 #include <linux/kernel.h> 6 #include <linux/minmax.h> 7 #include <linux/smp.h> 8 #include <linux/string.h> 9 #include <linux/types.h> 10 11 #ifdef CONFIG_X86_64 12 #include <linux/topology.h> 13 #endif 14 15 #include <asm/bugs.h> 16 #include <asm/cpu_device_id.h> 17 #include <asm/cpufeature.h> 18 #include <asm/cpu.h> 19 #include <asm/cpuid/api.h> 20 #include <asm/hwcap2.h> 21 #include <asm/intel-family.h> 22 #include <asm/microcode.h> 23 #include <asm/msr.h> 24 #include <asm/numa.h> 25 #include <asm/resctrl.h> 26 #include <asm/thermal.h> 27 #include <asm/uaccess.h> 28 29 #include "cpu.h" 30 31 /* 32 * Processors which have self-snooping capability can handle conflicting 33 * memory type across CPUs by snooping its own cache. However, there exists 34 * CPU models in which having conflicting memory types still leads to 35 * unpredictable behavior, machine check errors, or hangs. Clear this 36 * feature to prevent its use on machines with known erratas. 37 */ 38 static void check_memory_type_self_snoop_errata(struct cpuinfo_x86 *c) 39 { 40 switch (c->x86_vfm) { 41 case INTEL_CORE_YONAH: 42 case INTEL_CORE2_MEROM: 43 case INTEL_CORE2_MEROM_L: 44 case INTEL_CORE2_PENRYN: 45 case INTEL_CORE2_DUNNINGTON: 46 case INTEL_NEHALEM: 47 case INTEL_NEHALEM_G: 48 case INTEL_NEHALEM_EP: 49 case INTEL_NEHALEM_EX: 50 case INTEL_WESTMERE: 51 case INTEL_WESTMERE_EP: 52 case INTEL_SANDYBRIDGE: 53 setup_clear_cpu_cap(X86_FEATURE_SELFSNOOP); 54 } 55 } 56 57 static bool ring3mwait_disabled __read_mostly; 58 59 static int __init ring3mwait_disable(char *__unused) 60 { 61 ring3mwait_disabled = true; 62 return 1; 63 } 64 __setup("ring3mwait=disable", ring3mwait_disable); 65 66 static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c) 67 { 68 /* 69 * Ring 3 MONITOR/MWAIT feature cannot be detected without 70 * cpu model and family comparison. 71 */ 72 if (c->x86 != 6) 73 return; 74 switch (c->x86_vfm) { 75 case INTEL_XEON_PHI_KNL: 76 case INTEL_XEON_PHI_KNM: 77 break; 78 default: 79 return; 80 } 81 82 if (ring3mwait_disabled) 83 return; 84 85 set_cpu_cap(c, X86_FEATURE_RING3MWAIT); 86 this_cpu_or(msr_misc_features_shadow, 87 1UL << MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT); 88 89 if (c == &boot_cpu_data) 90 ELF_HWCAP2 |= HWCAP2_RING3MWAIT; 91 } 92 93 /* 94 * Early microcode releases for the Spectre v2 mitigation were broken. 95 * Information taken from; 96 * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf 97 * - https://kb.vmware.com/s/article/52345 98 * - Microcode revisions observed in the wild 99 * - Release note from 20180108 microcode release 100 */ 101 struct sku_microcode { 102 u32 vfm; 103 u8 stepping; 104 u32 microcode; 105 }; 106 static const struct sku_microcode spectre_bad_microcodes[] = { 107 { INTEL_KABYLAKE, 0x0B, 0x80 }, 108 { INTEL_KABYLAKE, 0x0A, 0x80 }, 109 { INTEL_KABYLAKE, 0x09, 0x80 }, 110 { INTEL_KABYLAKE_L, 0x0A, 0x80 }, 111 { INTEL_KABYLAKE_L, 0x09, 0x80 }, 112 { INTEL_SKYLAKE_X, 0x03, 0x0100013e }, 113 { INTEL_SKYLAKE_X, 0x04, 0x0200003c }, 114 { INTEL_BROADWELL, 0x04, 0x28 }, 115 { INTEL_BROADWELL_G, 0x01, 0x1b }, 116 { INTEL_BROADWELL_D, 0x02, 0x14 }, 117 { INTEL_BROADWELL_D, 0x03, 0x07000011 }, 118 { INTEL_BROADWELL_X, 0x01, 0x0b000025 }, 119 { INTEL_HASWELL_L, 0x01, 0x21 }, 120 { INTEL_HASWELL_G, 0x01, 0x18 }, 121 { INTEL_HASWELL, 0x03, 0x23 }, 122 { INTEL_HASWELL_X, 0x02, 0x3b }, 123 { INTEL_HASWELL_X, 0x04, 0x10 }, 124 { INTEL_IVYBRIDGE_X, 0x04, 0x42a }, 125 /* Observed in the wild */ 126 { INTEL_SANDYBRIDGE_X, 0x06, 0x61b }, 127 { INTEL_SANDYBRIDGE_X, 0x07, 0x712 }, 128 }; 129 130 static bool bad_spectre_microcode(struct cpuinfo_x86 *c) 131 { 132 int i; 133 134 /* 135 * We know that the hypervisor lie to us on the microcode version so 136 * we may as well hope that it is running the correct version. 137 */ 138 if (cpu_has(c, X86_FEATURE_HYPERVISOR)) 139 return false; 140 141 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { 142 if (c->x86_vfm == spectre_bad_microcodes[i].vfm && 143 c->x86_stepping == spectre_bad_microcodes[i].stepping) 144 return (c->microcode <= spectre_bad_microcodes[i].microcode); 145 } 146 return false; 147 } 148 149 #define MSR_IA32_TME_ACTIVATE 0x982 150 151 /* Helpers to access TME_ACTIVATE MSR */ 152 #define TME_ACTIVATE_LOCKED(x) (x & 0x1) 153 #define TME_ACTIVATE_ENABLED(x) (x & 0x2) 154 155 #define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */ 156 157 static void detect_tme_early(struct cpuinfo_x86 *c) 158 { 159 u64 tme_activate; 160 int keyid_bits; 161 162 rdmsrq(MSR_IA32_TME_ACTIVATE, tme_activate); 163 164 if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) { 165 pr_info_once("x86/tme: not enabled by BIOS\n"); 166 clear_cpu_cap(c, X86_FEATURE_TME); 167 return; 168 } 169 pr_info_once("x86/tme: enabled by BIOS\n"); 170 keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate); 171 if (!keyid_bits) 172 return; 173 174 /* 175 * KeyID bits are set by BIOS and can be present regardless 176 * of whether the kernel is using them. They effectively lower 177 * the number of physical address bits. 178 * 179 * Update cpuinfo_x86::x86_phys_bits accordingly. 180 */ 181 c->x86_phys_bits -= keyid_bits; 182 pr_info_once("x86/mktme: BIOS enabled: x86_phys_bits reduced by %d\n", 183 keyid_bits); 184 } 185 186 void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c) 187 { 188 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) 189 return; 190 191 if (c->x86_vfm < INTEL_PENTIUM_M_DOTHAN) 192 return; 193 194 /* 195 * The BIOS can have limited CPUID to leaf 2, which breaks feature 196 * enumeration. Unlock it and update the maximum leaf info. 197 */ 198 if (msr_clear_bit(MSR_IA32_MISC_ENABLE, MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) 199 c->cpuid_level = cpuid_eax(0); 200 } 201 202 static void early_init_intel(struct cpuinfo_x86 *c) 203 { 204 u64 misc_enable; 205 206 if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) 207 c->microcode = intel_get_microcode_revision(); 208 c->intel_platform_id = intel_get_platform_id(); 209 210 /* Now if any of them are set, check the blacklist and clear the lot */ 211 if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) || 212 cpu_has(c, X86_FEATURE_INTEL_STIBP) || 213 cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) || 214 cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) { 215 pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n"); 216 setup_clear_cpu_cap(X86_FEATURE_IBRS); 217 setup_clear_cpu_cap(X86_FEATURE_IBPB); 218 setup_clear_cpu_cap(X86_FEATURE_STIBP); 219 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL); 220 setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL); 221 setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP); 222 setup_clear_cpu_cap(X86_FEATURE_SSBD); 223 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD); 224 } 225 226 /* 227 * Atom erratum AAE44/AAF40/AAG38/AAH41: 228 * 229 * A race condition between speculative fetches and invalidating 230 * a large page. This is worked around in microcode, but we 231 * need the microcode to have already been loaded... so if it is 232 * not, recommend a BIOS update and disable large pages. 233 */ 234 if (c->x86_vfm == INTEL_ATOM_BONNELL && c->x86_stepping <= 2 && 235 c->microcode < 0x20e) { 236 pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n"); 237 clear_cpu_cap(c, X86_FEATURE_PSE); 238 } 239 240 #ifndef CONFIG_X86_64 241 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ 242 if (c->x86 == 15 && c->x86_cache_alignment == 64) 243 c->x86_cache_alignment = 128; 244 #endif 245 246 /* CPUID workaround for 0F33/0F34 CPU */ 247 if (c->x86_vfm == INTEL_P4_PRESCOTT && 248 (c->x86_stepping == 0x3 || c->x86_stepping == 0x4)) 249 c->x86_phys_bits = 36; 250 251 /* 252 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate 253 * with P/T states and does not stop in deep C-states. 254 * 255 * It is also reliable across cores and sockets. (but not across 256 * cabinets - we turn it off in that case explicitly.) 257 * 258 * Use a model-specific check for some older CPUs that have invariant 259 * TSC but may not report it architecturally via 8000_0007. 260 */ 261 if (c->x86_power & (1 << 8)) { 262 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 263 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); 264 } else if ((c->x86_vfm >= INTEL_P4_PRESCOTT && c->x86_vfm <= INTEL_P4_CEDARMILL) || 265 (c->x86_vfm >= INTEL_CORE_YONAH && c->x86_vfm <= INTEL_IVYBRIDGE)) { 266 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 267 } 268 269 /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */ 270 switch (c->x86_vfm) { 271 case INTEL_ATOM_SALTWELL_MID: 272 case INTEL_ATOM_SALTWELL_TABLET: 273 case INTEL_ATOM_SILVERMONT_MID: 274 case INTEL_ATOM_AIRMONT_NP: 275 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3); 276 break; 277 } 278 279 /* 280 * PAT is broken on early family 6 CPUs, the last of which 281 * is "Yonah" where the erratum is named "AN7": 282 * 283 * Page with PAT (Page Attribute Table) Set to USWC 284 * (Uncacheable Speculative Write Combine) While 285 * Associated MTRR (Memory Type Range Register) Is UC 286 * (Uncacheable) May Consolidate to UC 287 * 288 * Disable PAT and fall back to MTRR on these CPUs. 289 */ 290 if (c->x86_vfm >= INTEL_PENTIUM_PRO && 291 c->x86_vfm <= INTEL_CORE_YONAH) 292 clear_cpu_cap(c, X86_FEATURE_PAT); 293 294 /* 295 * Modern CPUs are generally expected to have a sane fast string 296 * implementation. However, BIOSes typically have a knob to tweak 297 * the architectural MISC_ENABLE.FAST_STRING enable bit. 298 * 299 * Adhere to the preference and program the Linux-defined fast 300 * string flag and enhanced fast string capabilities accordingly. 301 */ 302 if (c->x86_vfm >= INTEL_PENTIUM_M_DOTHAN) { 303 rdmsrq(MSR_IA32_MISC_ENABLE, misc_enable); 304 if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) { 305 /* X86_FEATURE_ERMS is set based on CPUID */ 306 set_cpu_cap(c, X86_FEATURE_REP_GOOD); 307 } else { 308 pr_info("Disabled fast string operations\n"); 309 setup_clear_cpu_cap(X86_FEATURE_REP_GOOD); 310 setup_clear_cpu_cap(X86_FEATURE_ERMS); 311 } 312 } 313 314 /* 315 * Intel Quark Core DevMan_001.pdf section 6.4.11 316 * "The operating system also is required to invalidate (i.e., flush) 317 * the TLB when any changes are made to any of the page table entries. 318 * The operating system must reload CR3 to cause the TLB to be flushed" 319 * 320 * As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h 321 * should be false so that __flush_tlb_all() causes CR3 instead of CR4.PGE 322 * to be modified. 323 */ 324 if (c->x86_vfm == INTEL_QUARK_X1000) { 325 pr_info("Disabling PGE capability bit\n"); 326 setup_clear_cpu_cap(X86_FEATURE_PGE); 327 } 328 329 check_memory_type_self_snoop_errata(c); 330 331 /* 332 * Adjust the number of physical bits early because it affects the 333 * valid bits of the MTRR mask registers. 334 */ 335 if (cpu_has(c, X86_FEATURE_TME)) 336 detect_tme_early(c); 337 } 338 339 static void bsp_init_intel(struct cpuinfo_x86 *c) 340 { 341 resctrl_cpu_detect(c); 342 } 343 344 #ifdef CONFIG_X86_32 345 /* 346 * Early probe support logic for ppro memory erratum #50 347 * 348 * This is called before we do cpu ident work 349 */ 350 351 int ppro_with_ram_bug(void) 352 { 353 /* Uses data from early_cpu_detect now */ 354 if (boot_cpu_data.x86_vfm == INTEL_PENTIUM_PRO && 355 boot_cpu_data.x86_stepping < 8) { 356 pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n"); 357 return 1; 358 } 359 return 0; 360 } 361 362 static void intel_smp_check(struct cpuinfo_x86 *c) 363 { 364 /* calling is from identify_secondary_cpu() ? */ 365 if (!c->cpu_index) 366 return; 367 368 /* 369 * Mask B, Pentium, but not Pentium MMX 370 */ 371 if (c->x86_vfm >= INTEL_FAM5_START && c->x86_vfm < INTEL_PENTIUM_MMX && 372 c->x86_stepping >= 1 && c->x86_stepping <= 4) { 373 /* 374 * Remember we have B step Pentia with bugs 375 */ 376 WARN_ONCE(1, "WARNING: SMP operation may be unreliable" 377 "with B stepping processors.\n"); 378 } 379 } 380 381 static int forcepae; 382 static int __init forcepae_setup(char *__unused) 383 { 384 forcepae = 1; 385 return 1; 386 } 387 __setup("forcepae", forcepae_setup); 388 389 static void intel_workarounds(struct cpuinfo_x86 *c) 390 { 391 #ifdef CONFIG_X86_F00F_BUG 392 /* 393 * All models of Pentium and Pentium with MMX technology CPUs 394 * have the F0 0F bug, which lets nonprivileged users lock up the 395 * system. Announce that the fault handler will be checking for it. 396 * The Quark is also family 5, but does not have the same bug. 397 */ 398 clear_cpu_bug(c, X86_BUG_F00F); 399 if (c->x86_vfm >= INTEL_FAM5_START && c->x86_vfm < INTEL_QUARK_X1000) { 400 static int f00f_workaround_enabled; 401 402 set_cpu_bug(c, X86_BUG_F00F); 403 if (!f00f_workaround_enabled) { 404 pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n"); 405 f00f_workaround_enabled = 1; 406 } 407 } 408 #endif 409 410 /* 411 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until 412 * model 3 mask 3 413 */ 414 if ((c->x86_vfm == INTEL_PENTIUM_II_KLAMATH && c->x86_stepping < 3) || 415 c->x86_vfm < INTEL_PENTIUM_II_KLAMATH) 416 clear_cpu_cap(c, X86_FEATURE_SEP); 417 418 /* 419 * PAE CPUID issue: many Pentium M report no PAE but may have a 420 * functionally usable PAE implementation. 421 * Forcefully enable PAE if kernel parameter "forcepae" is present. 422 */ 423 if (forcepae) { 424 pr_warn("PAE forced!\n"); 425 set_cpu_cap(c, X86_FEATURE_PAE); 426 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE); 427 } 428 429 /* 430 * P4 Xeon erratum 037 workaround. 431 * Hardware prefetcher may cause stale data to be loaded into the cache. 432 */ 433 if (c->x86_vfm == INTEL_P4_WILLAMETTE && c->x86_stepping == 1) { 434 if (msr_set_bit(MSR_IA32_MISC_ENABLE, 435 MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) { 436 pr_info("CPU: C0 stepping P4 Xeon detected.\n"); 437 pr_info("CPU: Disabling hardware prefetching (Erratum 037)\n"); 438 } 439 } 440 441 /* 442 * See if we have a good local APIC by checking for buggy Pentia, 443 * i.e. all B steppings and the C2 stepping of P54C when using their 444 * integrated APIC (see 11AP erratum in "Pentium Processor 445 * Specification Update"). 446 */ 447 if (boot_cpu_has(X86_FEATURE_APIC) && c->x86_vfm == INTEL_PENTIUM_75 && 448 (c->x86_stepping < 0x6 || c->x86_stepping == 0xb)) 449 set_cpu_bug(c, X86_BUG_11AP); 450 451 #ifdef CONFIG_X86_INTEL_USERCOPY 452 /* 453 * MOVSL bulk memory moves can be slow when source and dest are not 454 * both 8-byte aligned. PII/PIII only like MOVSL with 8-byte alignment. 455 * 456 * Set the preferred alignment for Pentium Pro and newer processors, as 457 * it has only been tested on these. 458 */ 459 if (c->x86_vfm >= INTEL_PENTIUM_PRO) 460 movsl_mask.mask = 7; 461 #endif 462 463 intel_smp_check(c); 464 } 465 #else 466 static void intel_workarounds(struct cpuinfo_x86 *c) 467 { 468 } 469 #endif 470 471 static void srat_detect_node(struct cpuinfo_x86 *c) 472 { 473 #ifdef CONFIG_NUMA 474 unsigned node; 475 int cpu = smp_processor_id(); 476 477 /* Don't do the funky fallback heuristics the AMD version employs 478 for now. */ 479 node = numa_cpu_node(cpu); 480 if (node == NUMA_NO_NODE || !node_online(node)) { 481 /* reuse the value from init_cpu_to_node() */ 482 node = cpu_to_node(cpu); 483 } 484 numa_set_node(cpu, node); 485 #endif 486 } 487 488 static void init_cpuid_fault(struct cpuinfo_x86 *c) 489 { 490 u64 msr; 491 492 if (!rdmsrq_safe(MSR_PLATFORM_INFO, &msr)) { 493 if (msr & MSR_PLATFORM_INFO_CPUID_FAULT) 494 set_cpu_cap(c, X86_FEATURE_CPUID_FAULT); 495 } 496 } 497 498 static void init_intel_misc_features(struct cpuinfo_x86 *c) 499 { 500 u64 msr; 501 502 if (rdmsrq_safe(MSR_MISC_FEATURES_ENABLES, &msr)) 503 return; 504 505 /* Clear all MISC features */ 506 this_cpu_write(msr_misc_features_shadow, 0); 507 508 /* Check features and update capabilities and shadow control bits */ 509 init_cpuid_fault(c); 510 probe_xeon_phi_r3mwait(c); 511 512 msr = this_cpu_read(msr_misc_features_shadow); 513 wrmsrq(MSR_MISC_FEATURES_ENABLES, msr); 514 } 515 516 /* 517 * This is a list of Intel CPUs that are known to suffer from downclocking when 518 * ZMM registers (512-bit vectors) are used. On these CPUs, when the kernel 519 * executes SIMD-optimized code such as cryptography functions or CRCs, it 520 * should prefer 256-bit (YMM) code to 512-bit (ZMM) code. 521 */ 522 static const struct x86_cpu_id zmm_exclusion_list[] = { 523 X86_MATCH_VFM(INTEL_SKYLAKE_X, 0), 524 X86_MATCH_VFM(INTEL_ICELAKE_X, 0), 525 X86_MATCH_VFM(INTEL_ICELAKE_D, 0), 526 X86_MATCH_VFM(INTEL_ICELAKE, 0), 527 X86_MATCH_VFM(INTEL_ICELAKE_L, 0), 528 X86_MATCH_VFM(INTEL_ICELAKE_NNPI, 0), 529 X86_MATCH_VFM(INTEL_TIGERLAKE_L, 0), 530 X86_MATCH_VFM(INTEL_TIGERLAKE, 0), 531 /* Allow Rocket Lake and later, and Sapphire Rapids and later. */ 532 {}, 533 }; 534 535 static void init_intel(struct cpuinfo_x86 *c) 536 { 537 early_init_intel(c); 538 539 intel_workarounds(c); 540 541 init_intel_cacheinfo(c); 542 543 if (c->cpuid_level > 9) { 544 unsigned eax = cpuid_eax(10); 545 /* Check for version and the number of counters */ 546 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) 547 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); 548 } 549 550 if (cpu_has(c, X86_FEATURE_XMM2)) 551 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); 552 553 if (boot_cpu_has(X86_FEATURE_DS)) { 554 unsigned int l1, l2; 555 556 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); 557 if (!(l1 & MSR_IA32_MISC_ENABLE_BTS_UNAVAIL)) 558 set_cpu_cap(c, X86_FEATURE_BTS); 559 if (!(l1 & MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL)) 560 set_cpu_cap(c, X86_FEATURE_PEBS); 561 } 562 563 if (boot_cpu_has(X86_FEATURE_CLFLUSH) && 564 (c->x86_vfm == INTEL_CORE2_DUNNINGTON || 565 c->x86_vfm == INTEL_NEHALEM_EX || 566 c->x86_vfm == INTEL_WESTMERE_EX)) 567 set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR); 568 569 if (boot_cpu_has(X86_FEATURE_MWAIT) && 570 (c->x86_vfm == INTEL_ATOM_GOLDMONT || 571 c->x86_vfm == INTEL_LUNARLAKE_M)) 572 set_cpu_bug(c, X86_BUG_MONITOR); 573 574 #ifdef CONFIG_X86_64 575 if (c->x86 == 15) 576 c->x86_cache_alignment = c->x86_clflush_size * 2; 577 #else 578 /* 579 * Names for the Pentium II/Celeron processors 580 * detectable only by also checking the cache size. 581 * Dixon is NOT a Celeron. 582 */ 583 if (c->x86 == 6) { 584 unsigned int l2 = c->x86_cache_size; 585 char *p = NULL; 586 587 switch (c->x86_model) { 588 case 5: 589 if (l2 == 0) 590 p = "Celeron (Covington)"; 591 else if (l2 == 256) 592 p = "Mobile Pentium II (Dixon)"; 593 break; 594 595 case 6: 596 if (l2 == 128) 597 p = "Celeron (Mendocino)"; 598 else if (c->x86_stepping == 0 || c->x86_stepping == 5) 599 p = "Celeron-A"; 600 break; 601 602 case 8: 603 if (l2 == 128) 604 p = "Celeron (Coppermine)"; 605 break; 606 } 607 608 if (p) 609 strcpy(c->x86_model_id, p); 610 } 611 #endif 612 613 if (x86_match_cpu(zmm_exclusion_list)) 614 set_cpu_cap(c, X86_FEATURE_PREFER_YMM); 615 616 /* Work around errata */ 617 srat_detect_node(c); 618 619 init_ia32_feat_ctl(c); 620 621 init_intel_misc_features(c); 622 623 split_lock_init(); 624 625 intel_init_thermal(c); 626 } 627 628 #ifdef CONFIG_X86_32 629 static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) 630 { 631 /* 632 * Intel PIII Tualatin. This comes in two flavours. 633 * One has 256kb of cache, the other 512. We have no way 634 * to determine which, so we use a boottime override 635 * for the 512kb model, and assume 256 otherwise. 636 */ 637 if (c->x86_vfm == INTEL_PENTIUM_III_TUALATIN && size == 0) 638 size = 256; 639 640 /* 641 * Intel Quark SoC X1000 contains a 4-way set associative 642 * 16K cache with a 16 byte cache line and 256 lines per tag 643 */ 644 if (c->x86_vfm == INTEL_QUARK_X1000) 645 size = 16; 646 return size; 647 } 648 #endif 649 650 static void intel_tlb_lookup(const struct leaf_0x2_table *desc) 651 { 652 short entries = desc->entries; 653 654 switch (desc->t_type) { 655 case STLB_4K: 656 tlb_lli_4k = max(tlb_lli_4k, entries); 657 tlb_lld_4k = max(tlb_lld_4k, entries); 658 break; 659 case STLB_4K_2M: 660 tlb_lli_4k = max(tlb_lli_4k, entries); 661 tlb_lld_4k = max(tlb_lld_4k, entries); 662 tlb_lli_2m = max(tlb_lli_2m, entries); 663 tlb_lld_2m = max(tlb_lld_2m, entries); 664 tlb_lli_4m = max(tlb_lli_4m, entries); 665 tlb_lld_4m = max(tlb_lld_4m, entries); 666 break; 667 case TLB_INST_ALL: 668 tlb_lli_4k = max(tlb_lli_4k, entries); 669 tlb_lli_2m = max(tlb_lli_2m, entries); 670 tlb_lli_4m = max(tlb_lli_4m, entries); 671 break; 672 case TLB_INST_4K: 673 tlb_lli_4k = max(tlb_lli_4k, entries); 674 break; 675 case TLB_INST_4M: 676 tlb_lli_4m = max(tlb_lli_4m, entries); 677 break; 678 case TLB_INST_2M_4M: 679 tlb_lli_2m = max(tlb_lli_2m, entries); 680 tlb_lli_4m = max(tlb_lli_4m, entries); 681 break; 682 case TLB_DATA_4K: 683 case TLB_DATA0_4K: 684 tlb_lld_4k = max(tlb_lld_4k, entries); 685 break; 686 case TLB_DATA_4M: 687 case TLB_DATA0_4M: 688 tlb_lld_4m = max(tlb_lld_4m, entries); 689 break; 690 case TLB_DATA_2M_4M: 691 case TLB_DATA0_2M_4M: 692 tlb_lld_2m = max(tlb_lld_2m, entries); 693 tlb_lld_4m = max(tlb_lld_4m, entries); 694 break; 695 case TLB_DATA_4K_4M: 696 tlb_lld_4k = max(tlb_lld_4k, entries); 697 tlb_lld_4m = max(tlb_lld_4m, entries); 698 break; 699 case TLB_DATA_1G_2M_4M: 700 tlb_lld_2m = max(tlb_lld_2m, TLB_0x63_2M_4M_ENTRIES); 701 tlb_lld_4m = max(tlb_lld_4m, TLB_0x63_2M_4M_ENTRIES); 702 fallthrough; 703 case TLB_DATA_1G: 704 tlb_lld_1g = max(tlb_lld_1g, entries); 705 break; 706 } 707 } 708 709 static void intel_detect_tlb(struct cpuinfo_x86 *c) 710 { 711 const struct leaf_0x2_table *desc; 712 union leaf_0x2_regs regs; 713 u8 *ptr; 714 715 if (c->cpuid_level < 2) 716 return; 717 718 cpuid_leaf_0x2(®s); 719 for_each_cpuid_0x2_desc(regs, ptr, desc) 720 intel_tlb_lookup(desc); 721 } 722 723 static const struct cpu_dev intel_cpu_dev = { 724 .c_vendor = "Intel", 725 .c_ident = { "GenuineIntel" }, 726 #ifdef CONFIG_X86_32 727 .legacy_models = { 728 { .family = 4, .model_names = 729 { 730 [0] = "486 DX-25/33", 731 [1] = "486 DX-50", 732 [2] = "486 SX", 733 [3] = "486 DX/2", 734 [4] = "486 SL", 735 [5] = "486 SX/2", 736 [7] = "486 DX/2-WB", 737 [8] = "486 DX/4", 738 [9] = "486 DX/4-WB" 739 } 740 }, 741 { .family = 5, .model_names = 742 { 743 [0] = "Pentium 60/66 A-step", 744 [1] = "Pentium 60/66", 745 [2] = "Pentium 75 - 200", 746 [3] = "OverDrive PODP5V83", 747 [4] = "Pentium MMX", 748 [7] = "Mobile Pentium 75 - 200", 749 [8] = "Mobile Pentium MMX", 750 [9] = "Quark SoC X1000", 751 } 752 }, 753 { .family = 6, .model_names = 754 { 755 [0] = "Pentium Pro A-step", 756 [1] = "Pentium Pro", 757 [3] = "Pentium II (Klamath)", 758 [4] = "Pentium II (Deschutes)", 759 [5] = "Pentium II (Deschutes)", 760 [6] = "Mobile Pentium II", 761 [7] = "Pentium III (Katmai)", 762 [8] = "Pentium III (Coppermine)", 763 [10] = "Pentium III (Cascades)", 764 [11] = "Pentium III (Tualatin)", 765 } 766 }, 767 { .family = 15, .model_names = 768 { 769 [0] = "Pentium 4 (Unknown)", 770 [1] = "Pentium 4 (Willamette)", 771 [2] = "Pentium 4 (Northwood)", 772 [4] = "Pentium 4 (Foster)", 773 [5] = "Pentium 4 (Foster)", 774 } 775 }, 776 }, 777 .legacy_cache_size = intel_size_cache, 778 #endif 779 .c_detect_tlb = intel_detect_tlb, 780 .c_early_init = early_init_intel, 781 .c_bsp_init = bsp_init_intel, 782 .c_init = init_intel, 783 .c_x86_vendor = X86_VENDOR_INTEL, 784 }; 785 786 cpu_dev_register(intel_cpu_dev); 787