1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/bitops.h>
4 #include <linux/init.h>
5 #include <linux/kernel.h>
6 #include <linux/minmax.h>
7 #include <linux/smp.h>
8 #include <linux/string.h>
9
10 #ifdef CONFIG_X86_64
11 #include <linux/topology.h>
12 #endif
13
14 #include <asm/bugs.h>
15 #include <asm/cpu_device_id.h>
16 #include <asm/cpufeature.h>
17 #include <asm/cpu.h>
18 #include <asm/hwcap2.h>
19 #include <asm/intel-family.h>
20 #include <asm/microcode.h>
21 #include <asm/msr.h>
22 #include <asm/numa.h>
23 #include <asm/resctrl.h>
24 #include <asm/thermal.h>
25 #include <asm/uaccess.h>
26
27 #include "cpu.h"
28
29 /*
30 * Processors which have self-snooping capability can handle conflicting
31 * memory type across CPUs by snooping its own cache. However, there exists
32 * CPU models in which having conflicting memory types still leads to
33 * unpredictable behavior, machine check errors, or hangs. Clear this
34 * feature to prevent its use on machines with known erratas.
35 */
check_memory_type_self_snoop_errata(struct cpuinfo_x86 * c)36 static void check_memory_type_self_snoop_errata(struct cpuinfo_x86 *c)
37 {
38 switch (c->x86_vfm) {
39 case INTEL_CORE_YONAH:
40 case INTEL_CORE2_MEROM:
41 case INTEL_CORE2_MEROM_L:
42 case INTEL_CORE2_PENRYN:
43 case INTEL_CORE2_DUNNINGTON:
44 case INTEL_NEHALEM:
45 case INTEL_NEHALEM_G:
46 case INTEL_NEHALEM_EP:
47 case INTEL_NEHALEM_EX:
48 case INTEL_WESTMERE:
49 case INTEL_WESTMERE_EP:
50 case INTEL_SANDYBRIDGE:
51 setup_clear_cpu_cap(X86_FEATURE_SELFSNOOP);
52 }
53 }
54
55 static bool ring3mwait_disabled __read_mostly;
56
ring3mwait_disable(char * __unused)57 static int __init ring3mwait_disable(char *__unused)
58 {
59 ring3mwait_disabled = true;
60 return 1;
61 }
62 __setup("ring3mwait=disable", ring3mwait_disable);
63
probe_xeon_phi_r3mwait(struct cpuinfo_x86 * c)64 static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
65 {
66 /*
67 * Ring 3 MONITOR/MWAIT feature cannot be detected without
68 * cpu model and family comparison.
69 */
70 if (c->x86 != 6)
71 return;
72 switch (c->x86_vfm) {
73 case INTEL_XEON_PHI_KNL:
74 case INTEL_XEON_PHI_KNM:
75 break;
76 default:
77 return;
78 }
79
80 if (ring3mwait_disabled)
81 return;
82
83 set_cpu_cap(c, X86_FEATURE_RING3MWAIT);
84 this_cpu_or(msr_misc_features_shadow,
85 1UL << MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT);
86
87 if (c == &boot_cpu_data)
88 ELF_HWCAP2 |= HWCAP2_RING3MWAIT;
89 }
90
91 /*
92 * Early microcode releases for the Spectre v2 mitigation were broken.
93 * Information taken from;
94 * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
95 * - https://kb.vmware.com/s/article/52345
96 * - Microcode revisions observed in the wild
97 * - Release note from 20180108 microcode release
98 */
99 struct sku_microcode {
100 u32 vfm;
101 u8 stepping;
102 u32 microcode;
103 };
104 static const struct sku_microcode spectre_bad_microcodes[] = {
105 { INTEL_KABYLAKE, 0x0B, 0x80 },
106 { INTEL_KABYLAKE, 0x0A, 0x80 },
107 { INTEL_KABYLAKE, 0x09, 0x80 },
108 { INTEL_KABYLAKE_L, 0x0A, 0x80 },
109 { INTEL_KABYLAKE_L, 0x09, 0x80 },
110 { INTEL_SKYLAKE_X, 0x03, 0x0100013e },
111 { INTEL_SKYLAKE_X, 0x04, 0x0200003c },
112 { INTEL_BROADWELL, 0x04, 0x28 },
113 { INTEL_BROADWELL_G, 0x01, 0x1b },
114 { INTEL_BROADWELL_D, 0x02, 0x14 },
115 { INTEL_BROADWELL_D, 0x03, 0x07000011 },
116 { INTEL_BROADWELL_X, 0x01, 0x0b000025 },
117 { INTEL_HASWELL_L, 0x01, 0x21 },
118 { INTEL_HASWELL_G, 0x01, 0x18 },
119 { INTEL_HASWELL, 0x03, 0x23 },
120 { INTEL_HASWELL_X, 0x02, 0x3b },
121 { INTEL_HASWELL_X, 0x04, 0x10 },
122 { INTEL_IVYBRIDGE_X, 0x04, 0x42a },
123 /* Observed in the wild */
124 { INTEL_SANDYBRIDGE_X, 0x06, 0x61b },
125 { INTEL_SANDYBRIDGE_X, 0x07, 0x712 },
126 };
127
bad_spectre_microcode(struct cpuinfo_x86 * c)128 static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
129 {
130 int i;
131
132 /*
133 * We know that the hypervisor lie to us on the microcode version so
134 * we may as well hope that it is running the correct version.
135 */
136 if (cpu_has(c, X86_FEATURE_HYPERVISOR))
137 return false;
138
139 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
140 if (c->x86_vfm == spectre_bad_microcodes[i].vfm &&
141 c->x86_stepping == spectre_bad_microcodes[i].stepping)
142 return (c->microcode <= spectre_bad_microcodes[i].microcode);
143 }
144 return false;
145 }
146
147 #define MSR_IA32_TME_ACTIVATE 0x982
148
149 /* Helpers to access TME_ACTIVATE MSR */
150 #define TME_ACTIVATE_LOCKED(x) (x & 0x1)
151 #define TME_ACTIVATE_ENABLED(x) (x & 0x2)
152
153 #define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */
154
detect_tme_early(struct cpuinfo_x86 * c)155 static void detect_tme_early(struct cpuinfo_x86 *c)
156 {
157 u64 tme_activate;
158 int keyid_bits;
159
160 rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
161
162 if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
163 pr_info_once("x86/tme: not enabled by BIOS\n");
164 clear_cpu_cap(c, X86_FEATURE_TME);
165 return;
166 }
167 pr_info_once("x86/tme: enabled by BIOS\n");
168 keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
169 if (!keyid_bits)
170 return;
171
172 /*
173 * KeyID bits are set by BIOS and can be present regardless
174 * of whether the kernel is using them. They effectively lower
175 * the number of physical address bits.
176 *
177 * Update cpuinfo_x86::x86_phys_bits accordingly.
178 */
179 c->x86_phys_bits -= keyid_bits;
180 pr_info_once("x86/mktme: BIOS enabled: x86_phys_bits reduced by %d\n",
181 keyid_bits);
182 }
183
intel_unlock_cpuid_leafs(struct cpuinfo_x86 * c)184 void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c)
185 {
186 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
187 return;
188
189 if (c->x86_vfm < INTEL_PENTIUM_M_DOTHAN)
190 return;
191
192 /*
193 * The BIOS can have limited CPUID to leaf 2, which breaks feature
194 * enumeration. Unlock it and update the maximum leaf info.
195 */
196 if (msr_clear_bit(MSR_IA32_MISC_ENABLE, MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0)
197 c->cpuid_level = cpuid_eax(0);
198 }
199
early_init_intel(struct cpuinfo_x86 * c)200 static void early_init_intel(struct cpuinfo_x86 *c)
201 {
202 u64 misc_enable;
203
204 if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
205 c->microcode = intel_get_microcode_revision();
206
207 /* Now if any of them are set, check the blacklist and clear the lot */
208 if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) ||
209 cpu_has(c, X86_FEATURE_INTEL_STIBP) ||
210 cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) ||
211 cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) {
212 pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n");
213 setup_clear_cpu_cap(X86_FEATURE_IBRS);
214 setup_clear_cpu_cap(X86_FEATURE_IBPB);
215 setup_clear_cpu_cap(X86_FEATURE_STIBP);
216 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
217 setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
218 setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
219 setup_clear_cpu_cap(X86_FEATURE_SSBD);
220 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
221 }
222
223 /*
224 * Atom erratum AAE44/AAF40/AAG38/AAH41:
225 *
226 * A race condition between speculative fetches and invalidating
227 * a large page. This is worked around in microcode, but we
228 * need the microcode to have already been loaded... so if it is
229 * not, recommend a BIOS update and disable large pages.
230 */
231 if (c->x86_vfm == INTEL_ATOM_BONNELL && c->x86_stepping <= 2 &&
232 c->microcode < 0x20e) {
233 pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
234 clear_cpu_cap(c, X86_FEATURE_PSE);
235 }
236
237 #ifdef CONFIG_X86_64
238 set_cpu_cap(c, X86_FEATURE_SYSENTER32);
239 #else
240 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
241 if (c->x86 == 15 && c->x86_cache_alignment == 64)
242 c->x86_cache_alignment = 128;
243 #endif
244
245 /* CPUID workaround for 0F33/0F34 CPU */
246 if (c->x86_vfm == INTEL_P4_PRESCOTT &&
247 (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
248 c->x86_phys_bits = 36;
249
250 /*
251 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
252 * with P/T states and does not stop in deep C-states.
253 *
254 * It is also reliable across cores and sockets. (but not across
255 * cabinets - we turn it off in that case explicitly.)
256 *
257 * Use a model-specific check for some older CPUs that have invariant
258 * TSC but may not report it architecturally via 8000_0007.
259 */
260 if (c->x86_power & (1 << 8)) {
261 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
262 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
263 } else if ((c->x86_vfm >= INTEL_P4_PRESCOTT && c->x86_vfm <= INTEL_P4_WILLAMETTE) ||
264 (c->x86_vfm >= INTEL_CORE_YONAH && c->x86_vfm <= INTEL_IVYBRIDGE)) {
265 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
266 }
267
268 /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
269 switch (c->x86_vfm) {
270 case INTEL_ATOM_SALTWELL_MID:
271 case INTEL_ATOM_SALTWELL_TABLET:
272 case INTEL_ATOM_SILVERMONT_MID:
273 case INTEL_ATOM_AIRMONT_NP:
274 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
275 break;
276 }
277
278 /*
279 * PAT is broken on early family 6 CPUs, the last of which
280 * is "Yonah" where the erratum is named "AN7":
281 *
282 * Page with PAT (Page Attribute Table) Set to USWC
283 * (Uncacheable Speculative Write Combine) While
284 * Associated MTRR (Memory Type Range Register) Is UC
285 * (Uncacheable) May Consolidate to UC
286 *
287 * Disable PAT and fall back to MTRR on these CPUs.
288 */
289 if (c->x86_vfm >= INTEL_PENTIUM_PRO &&
290 c->x86_vfm <= INTEL_CORE_YONAH)
291 clear_cpu_cap(c, X86_FEATURE_PAT);
292
293 /*
294 * Modern CPUs are generally expected to have a sane fast string
295 * implementation. However, BIOSes typically have a knob to tweak
296 * the architectural MISC_ENABLE.FAST_STRING enable bit.
297 *
298 * Adhere to the preference and program the Linux-defined fast
299 * string flag and enhanced fast string capabilities accordingly.
300 */
301 if (c->x86_vfm >= INTEL_PENTIUM_M_DOTHAN) {
302 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
303 if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) {
304 /* X86_FEATURE_ERMS is set based on CPUID */
305 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
306 } else {
307 pr_info("Disabled fast string operations\n");
308 setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
309 setup_clear_cpu_cap(X86_FEATURE_ERMS);
310 }
311 }
312
313 /*
314 * Intel Quark Core DevMan_001.pdf section 6.4.11
315 * "The operating system also is required to invalidate (i.e., flush)
316 * the TLB when any changes are made to any of the page table entries.
317 * The operating system must reload CR3 to cause the TLB to be flushed"
318 *
319 * As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h
320 * should be false so that __flush_tlb_all() causes CR3 instead of CR4.PGE
321 * to be modified.
322 */
323 if (c->x86_vfm == INTEL_QUARK_X1000) {
324 pr_info("Disabling PGE capability bit\n");
325 setup_clear_cpu_cap(X86_FEATURE_PGE);
326 }
327
328 check_memory_type_self_snoop_errata(c);
329
330 /*
331 * Adjust the number of physical bits early because it affects the
332 * valid bits of the MTRR mask registers.
333 */
334 if (cpu_has(c, X86_FEATURE_TME))
335 detect_tme_early(c);
336 }
337
bsp_init_intel(struct cpuinfo_x86 * c)338 static void bsp_init_intel(struct cpuinfo_x86 *c)
339 {
340 resctrl_cpu_detect(c);
341 }
342
343 #ifdef CONFIG_X86_32
344 /*
345 * Early probe support logic for ppro memory erratum #50
346 *
347 * This is called before we do cpu ident work
348 */
349
ppro_with_ram_bug(void)350 int ppro_with_ram_bug(void)
351 {
352 /* Uses data from early_cpu_detect now */
353 if (boot_cpu_data.x86_vfm == INTEL_PENTIUM_PRO &&
354 boot_cpu_data.x86_stepping < 8) {
355 pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
356 return 1;
357 }
358 return 0;
359 }
360
intel_smp_check(struct cpuinfo_x86 * c)361 static void intel_smp_check(struct cpuinfo_x86 *c)
362 {
363 /* calling is from identify_secondary_cpu() ? */
364 if (!c->cpu_index)
365 return;
366
367 /*
368 * Mask B, Pentium, but not Pentium MMX
369 */
370 if (c->x86_vfm >= INTEL_FAM5_START && c->x86_vfm < INTEL_PENTIUM_MMX &&
371 c->x86_stepping >= 1 && c->x86_stepping <= 4) {
372 /*
373 * Remember we have B step Pentia with bugs
374 */
375 WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
376 "with B stepping processors.\n");
377 }
378 }
379
380 static int forcepae;
forcepae_setup(char * __unused)381 static int __init forcepae_setup(char *__unused)
382 {
383 forcepae = 1;
384 return 1;
385 }
386 __setup("forcepae", forcepae_setup);
387
intel_workarounds(struct cpuinfo_x86 * c)388 static void intel_workarounds(struct cpuinfo_x86 *c)
389 {
390 #ifdef CONFIG_X86_F00F_BUG
391 /*
392 * All models of Pentium and Pentium with MMX technology CPUs
393 * have the F0 0F bug, which lets nonprivileged users lock up the
394 * system. Announce that the fault handler will be checking for it.
395 * The Quark is also family 5, but does not have the same bug.
396 */
397 clear_cpu_bug(c, X86_BUG_F00F);
398 if (c->x86_vfm >= INTEL_FAM5_START && c->x86_vfm < INTEL_QUARK_X1000) {
399 static int f00f_workaround_enabled;
400
401 set_cpu_bug(c, X86_BUG_F00F);
402 if (!f00f_workaround_enabled) {
403 pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n");
404 f00f_workaround_enabled = 1;
405 }
406 }
407 #endif
408
409 /*
410 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
411 * model 3 mask 3
412 */
413 if ((c->x86_vfm == INTEL_PENTIUM_II_KLAMATH && c->x86_stepping < 3) ||
414 c->x86_vfm < INTEL_PENTIUM_II_KLAMATH)
415 clear_cpu_cap(c, X86_FEATURE_SEP);
416
417 /*
418 * PAE CPUID issue: many Pentium M report no PAE but may have a
419 * functionally usable PAE implementation.
420 * Forcefully enable PAE if kernel parameter "forcepae" is present.
421 */
422 if (forcepae) {
423 pr_warn("PAE forced!\n");
424 set_cpu_cap(c, X86_FEATURE_PAE);
425 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
426 }
427
428 /*
429 * P4 Xeon erratum 037 workaround.
430 * Hardware prefetcher may cause stale data to be loaded into the cache.
431 */
432 if (c->x86_vfm == INTEL_P4_WILLAMETTE && c->x86_stepping == 1) {
433 if (msr_set_bit(MSR_IA32_MISC_ENABLE,
434 MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
435 pr_info("CPU: C0 stepping P4 Xeon detected.\n");
436 pr_info("CPU: Disabling hardware prefetching (Erratum 037)\n");
437 }
438 }
439
440 /*
441 * See if we have a good local APIC by checking for buggy Pentia,
442 * i.e. all B steppings and the C2 stepping of P54C when using their
443 * integrated APIC (see 11AP erratum in "Pentium Processor
444 * Specification Update").
445 */
446 if (boot_cpu_has(X86_FEATURE_APIC) && c->x86_vfm == INTEL_PENTIUM_75 &&
447 (c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
448 set_cpu_bug(c, X86_BUG_11AP);
449
450 #ifdef CONFIG_X86_INTEL_USERCOPY
451 /*
452 * MOVSL bulk memory moves can be slow when source and dest are not
453 * both 8-byte aligned. PII/PIII only like MOVSL with 8-byte alignment.
454 *
455 * Set the preferred alignment for Pentium Pro and newer processors, as
456 * it has only been tested on these.
457 */
458 if (c->x86_vfm >= INTEL_PENTIUM_PRO)
459 movsl_mask.mask = 7;
460 #endif
461
462 intel_smp_check(c);
463 }
464 #else
intel_workarounds(struct cpuinfo_x86 * c)465 static void intel_workarounds(struct cpuinfo_x86 *c)
466 {
467 }
468 #endif
469
srat_detect_node(struct cpuinfo_x86 * c)470 static void srat_detect_node(struct cpuinfo_x86 *c)
471 {
472 #ifdef CONFIG_NUMA
473 unsigned node;
474 int cpu = smp_processor_id();
475
476 /* Don't do the funky fallback heuristics the AMD version employs
477 for now. */
478 node = numa_cpu_node(cpu);
479 if (node == NUMA_NO_NODE || !node_online(node)) {
480 /* reuse the value from init_cpu_to_node() */
481 node = cpu_to_node(cpu);
482 }
483 numa_set_node(cpu, node);
484 #endif
485 }
486
init_cpuid_fault(struct cpuinfo_x86 * c)487 static void init_cpuid_fault(struct cpuinfo_x86 *c)
488 {
489 u64 msr;
490
491 if (!rdmsrl_safe(MSR_PLATFORM_INFO, &msr)) {
492 if (msr & MSR_PLATFORM_INFO_CPUID_FAULT)
493 set_cpu_cap(c, X86_FEATURE_CPUID_FAULT);
494 }
495 }
496
init_intel_misc_features(struct cpuinfo_x86 * c)497 static void init_intel_misc_features(struct cpuinfo_x86 *c)
498 {
499 u64 msr;
500
501 if (rdmsrl_safe(MSR_MISC_FEATURES_ENABLES, &msr))
502 return;
503
504 /* Clear all MISC features */
505 this_cpu_write(msr_misc_features_shadow, 0);
506
507 /* Check features and update capabilities and shadow control bits */
508 init_cpuid_fault(c);
509 probe_xeon_phi_r3mwait(c);
510
511 msr = this_cpu_read(msr_misc_features_shadow);
512 wrmsrl(MSR_MISC_FEATURES_ENABLES, msr);
513 }
514
515 /*
516 * This is a list of Intel CPUs that are known to suffer from downclocking when
517 * ZMM registers (512-bit vectors) are used. On these CPUs, when the kernel
518 * executes SIMD-optimized code such as cryptography functions or CRCs, it
519 * should prefer 256-bit (YMM) code to 512-bit (ZMM) code.
520 */
521 static const struct x86_cpu_id zmm_exclusion_list[] = {
522 X86_MATCH_VFM(INTEL_SKYLAKE_X, 0),
523 X86_MATCH_VFM(INTEL_ICELAKE_X, 0),
524 X86_MATCH_VFM(INTEL_ICELAKE_D, 0),
525 X86_MATCH_VFM(INTEL_ICELAKE, 0),
526 X86_MATCH_VFM(INTEL_ICELAKE_L, 0),
527 X86_MATCH_VFM(INTEL_ICELAKE_NNPI, 0),
528 X86_MATCH_VFM(INTEL_TIGERLAKE_L, 0),
529 X86_MATCH_VFM(INTEL_TIGERLAKE, 0),
530 /* Allow Rocket Lake and later, and Sapphire Rapids and later. */
531 {},
532 };
533
init_intel(struct cpuinfo_x86 * c)534 static void init_intel(struct cpuinfo_x86 *c)
535 {
536 early_init_intel(c);
537
538 intel_workarounds(c);
539
540 init_intel_cacheinfo(c);
541
542 if (c->cpuid_level > 9) {
543 unsigned eax = cpuid_eax(10);
544 /* Check for version and the number of counters */
545 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
546 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
547 }
548
549 if (cpu_has(c, X86_FEATURE_XMM2))
550 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
551
552 if (boot_cpu_has(X86_FEATURE_DS)) {
553 unsigned int l1, l2;
554
555 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
556 if (!(l1 & MSR_IA32_MISC_ENABLE_BTS_UNAVAIL))
557 set_cpu_cap(c, X86_FEATURE_BTS);
558 if (!(l1 & MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL))
559 set_cpu_cap(c, X86_FEATURE_PEBS);
560 }
561
562 if (boot_cpu_has(X86_FEATURE_CLFLUSH) &&
563 (c->x86_vfm == INTEL_CORE2_DUNNINGTON ||
564 c->x86_vfm == INTEL_NEHALEM_EX ||
565 c->x86_vfm == INTEL_WESTMERE_EX))
566 set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
567
568 if (boot_cpu_has(X86_FEATURE_MWAIT) &&
569 (c->x86_vfm == INTEL_ATOM_GOLDMONT ||
570 c->x86_vfm == INTEL_LUNARLAKE_M))
571 set_cpu_bug(c, X86_BUG_MONITOR);
572
573 #ifdef CONFIG_X86_64
574 if (c->x86 == 15)
575 c->x86_cache_alignment = c->x86_clflush_size * 2;
576 #else
577 /*
578 * Names for the Pentium II/Celeron processors
579 * detectable only by also checking the cache size.
580 * Dixon is NOT a Celeron.
581 */
582 if (c->x86 == 6) {
583 unsigned int l2 = c->x86_cache_size;
584 char *p = NULL;
585
586 switch (c->x86_model) {
587 case 5:
588 if (l2 == 0)
589 p = "Celeron (Covington)";
590 else if (l2 == 256)
591 p = "Mobile Pentium II (Dixon)";
592 break;
593
594 case 6:
595 if (l2 == 128)
596 p = "Celeron (Mendocino)";
597 else if (c->x86_stepping == 0 || c->x86_stepping == 5)
598 p = "Celeron-A";
599 break;
600
601 case 8:
602 if (l2 == 128)
603 p = "Celeron (Coppermine)";
604 break;
605 }
606
607 if (p)
608 strcpy(c->x86_model_id, p);
609 }
610 #endif
611
612 if (x86_match_cpu(zmm_exclusion_list))
613 set_cpu_cap(c, X86_FEATURE_PREFER_YMM);
614
615 /* Work around errata */
616 srat_detect_node(c);
617
618 init_ia32_feat_ctl(c);
619
620 init_intel_misc_features(c);
621
622 split_lock_init();
623
624 intel_init_thermal(c);
625 }
626
627 #ifdef CONFIG_X86_32
intel_size_cache(struct cpuinfo_x86 * c,unsigned int size)628 static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
629 {
630 /*
631 * Intel PIII Tualatin. This comes in two flavours.
632 * One has 256kb of cache, the other 512. We have no way
633 * to determine which, so we use a boottime override
634 * for the 512kb model, and assume 256 otherwise.
635 */
636 if (c->x86_vfm == INTEL_PENTIUM_III_TUALATIN && size == 0)
637 size = 256;
638
639 /*
640 * Intel Quark SoC X1000 contains a 4-way set associative
641 * 16K cache with a 16 byte cache line and 256 lines per tag
642 */
643 if (c->x86_vfm == INTEL_QUARK_X1000)
644 size = 16;
645 return size;
646 }
647 #endif
648
649 #define TLB_INST_4K 0x01
650 #define TLB_INST_4M 0x02
651 #define TLB_INST_2M_4M 0x03
652
653 #define TLB_INST_ALL 0x05
654 #define TLB_INST_1G 0x06
655
656 #define TLB_DATA_4K 0x11
657 #define TLB_DATA_4M 0x12
658 #define TLB_DATA_2M_4M 0x13
659 #define TLB_DATA_4K_4M 0x14
660
661 #define TLB_DATA_1G 0x16
662 #define TLB_DATA_1G_2M_4M 0x17
663
664 #define TLB_DATA0_4K 0x21
665 #define TLB_DATA0_4M 0x22
666 #define TLB_DATA0_2M_4M 0x23
667
668 #define STLB_4K 0x41
669 #define STLB_4K_2M 0x42
670
671 /*
672 * All of leaf 0x2's one-byte TLB descriptors implies the same number of
673 * entries for their respective TLB types. The 0x63 descriptor is an
674 * exception: it implies 4 dTLB entries for 1GB pages 32 dTLB entries
675 * for 2MB or 4MB pages. Encode descriptor 0x63 dTLB entry count for
676 * 2MB/4MB pages here, as its count for dTLB 1GB pages is already at the
677 * intel_tlb_table[] mapping.
678 */
679 #define TLB_0x63_2M_4M_ENTRIES 32
680
681 struct _tlb_table {
682 unsigned char descriptor;
683 char tlb_type;
684 unsigned int entries;
685 };
686
687 static const struct _tlb_table intel_tlb_table[] = {
688 { 0x01, TLB_INST_4K, 32}, /* TLB_INST 4 KByte pages, 4-way set associative */
689 { 0x02, TLB_INST_4M, 2}, /* TLB_INST 4 MByte pages, full associative */
690 { 0x03, TLB_DATA_4K, 64}, /* TLB_DATA 4 KByte pages, 4-way set associative */
691 { 0x04, TLB_DATA_4M, 8}, /* TLB_DATA 4 MByte pages, 4-way set associative */
692 { 0x05, TLB_DATA_4M, 32}, /* TLB_DATA 4 MByte pages, 4-way set associative */
693 { 0x0b, TLB_INST_4M, 4}, /* TLB_INST 4 MByte pages, 4-way set associative */
694 { 0x4f, TLB_INST_4K, 32}, /* TLB_INST 4 KByte pages */
695 { 0x50, TLB_INST_ALL, 64}, /* TLB_INST 4 KByte and 2-MByte or 4-MByte pages */
696 { 0x51, TLB_INST_ALL, 128}, /* TLB_INST 4 KByte and 2-MByte or 4-MByte pages */
697 { 0x52, TLB_INST_ALL, 256}, /* TLB_INST 4 KByte and 2-MByte or 4-MByte pages */
698 { 0x55, TLB_INST_2M_4M, 7}, /* TLB_INST 2-MByte or 4-MByte pages, fully associative */
699 { 0x56, TLB_DATA0_4M, 16}, /* TLB_DATA0 4 MByte pages, 4-way set associative */
700 { 0x57, TLB_DATA0_4K, 16}, /* TLB_DATA0 4 KByte pages, 4-way associative */
701 { 0x59, TLB_DATA0_4K, 16}, /* TLB_DATA0 4 KByte pages, fully associative */
702 { 0x5a, TLB_DATA0_2M_4M, 32}, /* TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative */
703 { 0x5b, TLB_DATA_4K_4M, 64}, /* TLB_DATA 4 KByte and 4 MByte pages */
704 { 0x5c, TLB_DATA_4K_4M, 128}, /* TLB_DATA 4 KByte and 4 MByte pages */
705 { 0x5d, TLB_DATA_4K_4M, 256}, /* TLB_DATA 4 KByte and 4 MByte pages */
706 { 0x61, TLB_INST_4K, 48}, /* TLB_INST 4 KByte pages, full associative */
707 { 0x63, TLB_DATA_1G_2M_4M, 4}, /* TLB_DATA 1 GByte pages, 4-way set associative
708 * (plus 32 entries TLB_DATA 2 MByte or 4 MByte pages, not encoded here) */
709 { 0x6b, TLB_DATA_4K, 256}, /* TLB_DATA 4 KByte pages, 8-way associative */
710 { 0x6c, TLB_DATA_2M_4M, 128}, /* TLB_DATA 2 MByte or 4 MByte pages, 8-way associative */
711 { 0x6d, TLB_DATA_1G, 16}, /* TLB_DATA 1 GByte pages, fully associative */
712 { 0x76, TLB_INST_2M_4M, 8}, /* TLB_INST 2-MByte or 4-MByte pages, fully associative */
713 { 0xb0, TLB_INST_4K, 128}, /* TLB_INST 4 KByte pages, 4-way set associative */
714 { 0xb1, TLB_INST_2M_4M, 4}, /* TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries */
715 { 0xb2, TLB_INST_4K, 64}, /* TLB_INST 4KByte pages, 4-way set associative */
716 { 0xb3, TLB_DATA_4K, 128}, /* TLB_DATA 4 KByte pages, 4-way set associative */
717 { 0xb4, TLB_DATA_4K, 256}, /* TLB_DATA 4 KByte pages, 4-way associative */
718 { 0xb5, TLB_INST_4K, 64}, /* TLB_INST 4 KByte pages, 8-way set associative */
719 { 0xb6, TLB_INST_4K, 128}, /* TLB_INST 4 KByte pages, 8-way set associative */
720 { 0xba, TLB_DATA_4K, 64}, /* TLB_DATA 4 KByte pages, 4-way associative */
721 { 0xc0, TLB_DATA_4K_4M, 8}, /* TLB_DATA 4 KByte and 4 MByte pages, 4-way associative */
722 { 0xc1, STLB_4K_2M, 1024}, /* STLB 4 KByte and 2 MByte pages, 8-way associative */
723 { 0xc2, TLB_DATA_2M_4M, 16}, /* TLB_DATA 2 MByte/4MByte pages, 4-way associative */
724 { 0xca, STLB_4K, 512}, /* STLB 4 KByte pages, 4-way associative */
725 { 0x00, 0, 0 }
726 };
727
intel_tlb_lookup(const unsigned char desc)728 static void intel_tlb_lookup(const unsigned char desc)
729 {
730 unsigned int entries;
731 unsigned char k;
732
733 if (desc == 0)
734 return;
735
736 /* look up this descriptor in the table */
737 for (k = 0; intel_tlb_table[k].descriptor != desc &&
738 intel_tlb_table[k].descriptor != 0; k++)
739 ;
740
741 if (intel_tlb_table[k].tlb_type == 0)
742 return;
743
744 entries = intel_tlb_table[k].entries;
745 switch (intel_tlb_table[k].tlb_type) {
746 case STLB_4K:
747 tlb_lli_4k = max(tlb_lli_4k, entries);
748 tlb_lld_4k = max(tlb_lld_4k, entries);
749 break;
750 case STLB_4K_2M:
751 tlb_lli_4k = max(tlb_lli_4k, entries);
752 tlb_lld_4k = max(tlb_lld_4k, entries);
753 tlb_lli_2m = max(tlb_lli_2m, entries);
754 tlb_lld_2m = max(tlb_lld_2m, entries);
755 tlb_lli_4m = max(tlb_lli_4m, entries);
756 tlb_lld_4m = max(tlb_lld_4m, entries);
757 break;
758 case TLB_INST_ALL:
759 tlb_lli_4k = max(tlb_lli_4k, entries);
760 tlb_lli_2m = max(tlb_lli_2m, entries);
761 tlb_lli_4m = max(tlb_lli_4m, entries);
762 break;
763 case TLB_INST_4K:
764 tlb_lli_4k = max(tlb_lli_4k, entries);
765 break;
766 case TLB_INST_4M:
767 tlb_lli_4m = max(tlb_lli_4m, entries);
768 break;
769 case TLB_INST_2M_4M:
770 tlb_lli_2m = max(tlb_lli_2m, entries);
771 tlb_lli_4m = max(tlb_lli_4m, entries);
772 break;
773 case TLB_DATA_4K:
774 case TLB_DATA0_4K:
775 tlb_lld_4k = max(tlb_lld_4k, entries);
776 break;
777 case TLB_DATA_4M:
778 case TLB_DATA0_4M:
779 tlb_lld_4m = max(tlb_lld_4m, entries);
780 break;
781 case TLB_DATA_2M_4M:
782 case TLB_DATA0_2M_4M:
783 tlb_lld_2m = max(tlb_lld_2m, entries);
784 tlb_lld_4m = max(tlb_lld_4m, entries);
785 break;
786 case TLB_DATA_4K_4M:
787 tlb_lld_4k = max(tlb_lld_4k, entries);
788 tlb_lld_4m = max(tlb_lld_4m, entries);
789 break;
790 case TLB_DATA_1G_2M_4M:
791 tlb_lld_2m = max(tlb_lld_2m, TLB_0x63_2M_4M_ENTRIES);
792 tlb_lld_4m = max(tlb_lld_4m, TLB_0x63_2M_4M_ENTRIES);
793 fallthrough;
794 case TLB_DATA_1G:
795 tlb_lld_1g = max(tlb_lld_1g, entries);
796 break;
797 }
798 }
799
intel_detect_tlb(struct cpuinfo_x86 * c)800 static void intel_detect_tlb(struct cpuinfo_x86 *c)
801 {
802 int i, j, n;
803 unsigned int regs[4];
804 unsigned char *desc = (unsigned char *)regs;
805
806 if (c->cpuid_level < 2)
807 return;
808
809 /* Number of times to iterate */
810 n = cpuid_eax(2) & 0xFF;
811
812 for (i = 0 ; i < n ; i++) {
813 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
814
815 /* If bit 31 is set, this is an unknown format */
816 for (j = 0 ; j < 4 ; j++)
817 if (regs[j] & (1 << 31))
818 regs[j] = 0;
819
820 /* Byte 0 is level count, not a descriptor */
821 for (j = 1 ; j < 16 ; j++)
822 intel_tlb_lookup(desc[j]);
823 }
824 }
825
826 static const struct cpu_dev intel_cpu_dev = {
827 .c_vendor = "Intel",
828 .c_ident = { "GenuineIntel" },
829 #ifdef CONFIG_X86_32
830 .legacy_models = {
831 { .family = 4, .model_names =
832 {
833 [0] = "486 DX-25/33",
834 [1] = "486 DX-50",
835 [2] = "486 SX",
836 [3] = "486 DX/2",
837 [4] = "486 SL",
838 [5] = "486 SX/2",
839 [7] = "486 DX/2-WB",
840 [8] = "486 DX/4",
841 [9] = "486 DX/4-WB"
842 }
843 },
844 { .family = 5, .model_names =
845 {
846 [0] = "Pentium 60/66 A-step",
847 [1] = "Pentium 60/66",
848 [2] = "Pentium 75 - 200",
849 [3] = "OverDrive PODP5V83",
850 [4] = "Pentium MMX",
851 [7] = "Mobile Pentium 75 - 200",
852 [8] = "Mobile Pentium MMX",
853 [9] = "Quark SoC X1000",
854 }
855 },
856 { .family = 6, .model_names =
857 {
858 [0] = "Pentium Pro A-step",
859 [1] = "Pentium Pro",
860 [3] = "Pentium II (Klamath)",
861 [4] = "Pentium II (Deschutes)",
862 [5] = "Pentium II (Deschutes)",
863 [6] = "Mobile Pentium II",
864 [7] = "Pentium III (Katmai)",
865 [8] = "Pentium III (Coppermine)",
866 [10] = "Pentium III (Cascades)",
867 [11] = "Pentium III (Tualatin)",
868 }
869 },
870 { .family = 15, .model_names =
871 {
872 [0] = "Pentium 4 (Unknown)",
873 [1] = "Pentium 4 (Willamette)",
874 [2] = "Pentium 4 (Northwood)",
875 [4] = "Pentium 4 (Foster)",
876 [5] = "Pentium 4 (Foster)",
877 }
878 },
879 },
880 .legacy_cache_size = intel_size_cache,
881 #endif
882 .c_detect_tlb = intel_detect_tlb,
883 .c_early_init = early_init_intel,
884 .c_bsp_init = bsp_init_intel,
885 .c_init = init_intel,
886 .c_x86_vendor = X86_VENDOR_INTEL,
887 };
888
889 cpu_dev_register(intel_cpu_dev);
890