1 // SPDX-License-Identifier: GPL-2.0-only
2 /* cpu_feature_enabled() cannot be used this early */
3 #define USE_EARLY_PGTABLE_L5
4
5 #include <linux/memblock.h>
6 #include <linux/linkage.h>
7 #include <linux/bitops.h>
8 #include <linux/kernel.h>
9 #include <linux/export.h>
10 #include <linux/kvm_types.h>
11 #include <linux/percpu.h>
12 #include <linux/string.h>
13 #include <linux/ctype.h>
14 #include <linux/delay.h>
15 #include <linux/sched/mm.h>
16 #include <linux/sched/clock.h>
17 #include <linux/sched/task.h>
18 #include <linux/sched/smt.h>
19 #include <linux/init.h>
20 #include <linux/kprobes.h>
21 #include <linux/kgdb.h>
22 #include <linux/mem_encrypt.h>
23 #include <linux/smp.h>
24 #include <linux/cpu.h>
25 #include <linux/io.h>
26 #include <linux/syscore_ops.h>
27 #include <linux/pgtable.h>
28 #include <linux/stackprotector.h>
29 #include <linux/utsname.h>
30 #include <linux/efi.h>
31
32 #include <asm/alternative.h>
33 #include <asm/cmdline.h>
34 #include <asm/cpuid/api.h>
35 #include <asm/perf_event.h>
36 #include <asm/mmu_context.h>
37 #include <asm/doublefault.h>
38 #include <asm/archrandom.h>
39 #include <asm/hypervisor.h>
40 #include <asm/processor.h>
41 #include <asm/tlbflush.h>
42 #include <asm/debugreg.h>
43 #include <asm/sections.h>
44 #include <asm/vsyscall.h>
45 #include <linux/topology.h>
46 #include <linux/cpumask.h>
47 #include <linux/atomic.h>
48 #include <asm/proto.h>
49 #include <asm/setup.h>
50 #include <asm/apic.h>
51 #include <asm/desc.h>
52 #include <asm/fpu/api.h>
53 #include <asm/mtrr.h>
54 #include <asm/hwcap2.h>
55 #include <linux/numa.h>
56 #include <asm/numa.h>
57 #include <asm/asm.h>
58 #include <asm/bugs.h>
59 #include <asm/cpu.h>
60 #include <asm/mce.h>
61 #include <asm/msr.h>
62 #include <asm/cacheinfo.h>
63 #include <asm/memtype.h>
64 #include <asm/microcode.h>
65 #include <asm/intel-family.h>
66 #include <asm/cpu_device_id.h>
67 #include <asm/fred.h>
68 #include <asm/uv/uv.h>
69 #include <asm/ia32.h>
70 #include <asm/set_memory.h>
71 #include <asm/traps.h>
72 #include <asm/sev.h>
73 #include <asm/tdx.h>
74 #include <asm/posted_intr.h>
75 #include <asm/runtime-const.h>
76
77 #include "cpu.h"
78
79 DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
80 EXPORT_PER_CPU_SYMBOL(cpu_info);
81
82 /* Used for modules: built-in code uses runtime constants */
83 unsigned long USER_PTR_MAX;
84 EXPORT_SYMBOL(USER_PTR_MAX);
85
86 u32 elf_hwcap2 __read_mostly;
87
88 /* Number of siblings per CPU package */
89 unsigned int __max_threads_per_core __ro_after_init = 1;
90 EXPORT_SYMBOL(__max_threads_per_core);
91
92 unsigned int __max_dies_per_package __ro_after_init = 1;
93 EXPORT_SYMBOL(__max_dies_per_package);
94
95 unsigned int __max_logical_packages __ro_after_init = 1;
96 EXPORT_SYMBOL(__max_logical_packages);
97
98 unsigned int __num_nodes_per_package __ro_after_init = 1;
99 EXPORT_SYMBOL(__num_nodes_per_package);
100
101 unsigned int __num_cores_per_package __ro_after_init = 1;
102 EXPORT_SYMBOL(__num_cores_per_package);
103
104 unsigned int __num_threads_per_package __ro_after_init = 1;
105 EXPORT_SYMBOL(__num_threads_per_package);
106
107 static struct ppin_info {
108 int feature;
109 int msr_ppin_ctl;
110 int msr_ppin;
111 } ppin_info[] = {
112 [X86_VENDOR_INTEL] = {
113 .feature = X86_FEATURE_INTEL_PPIN,
114 .msr_ppin_ctl = MSR_PPIN_CTL,
115 .msr_ppin = MSR_PPIN
116 },
117 [X86_VENDOR_AMD] = {
118 .feature = X86_FEATURE_AMD_PPIN,
119 .msr_ppin_ctl = MSR_AMD_PPIN_CTL,
120 .msr_ppin = MSR_AMD_PPIN
121 },
122 };
123
124 static const struct x86_cpu_id ppin_cpuids[] = {
125 X86_MATCH_FEATURE(X86_FEATURE_AMD_PPIN, &ppin_info[X86_VENDOR_AMD]),
126 X86_MATCH_FEATURE(X86_FEATURE_INTEL_PPIN, &ppin_info[X86_VENDOR_INTEL]),
127
128 /* Legacy models without CPUID enumeration */
129 X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &ppin_info[X86_VENDOR_INTEL]),
130 X86_MATCH_VFM(INTEL_HASWELL_X, &ppin_info[X86_VENDOR_INTEL]),
131 X86_MATCH_VFM(INTEL_BROADWELL_D, &ppin_info[X86_VENDOR_INTEL]),
132 X86_MATCH_VFM(INTEL_BROADWELL_X, &ppin_info[X86_VENDOR_INTEL]),
133 X86_MATCH_VFM(INTEL_SKYLAKE_X, &ppin_info[X86_VENDOR_INTEL]),
134 X86_MATCH_VFM(INTEL_ICELAKE_X, &ppin_info[X86_VENDOR_INTEL]),
135 X86_MATCH_VFM(INTEL_ICELAKE_D, &ppin_info[X86_VENDOR_INTEL]),
136 X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &ppin_info[X86_VENDOR_INTEL]),
137 X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &ppin_info[X86_VENDOR_INTEL]),
138 X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &ppin_info[X86_VENDOR_INTEL]),
139 X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &ppin_info[X86_VENDOR_INTEL]),
140
141 {}
142 };
143
ppin_init(struct cpuinfo_x86 * c)144 static void ppin_init(struct cpuinfo_x86 *c)
145 {
146 const struct x86_cpu_id *id;
147 unsigned long long val;
148 struct ppin_info *info;
149
150 id = x86_match_cpu(ppin_cpuids);
151 if (!id)
152 return;
153
154 /*
155 * Testing the presence of the MSR is not enough. Need to check
156 * that the PPIN_CTL allows reading of the PPIN.
157 */
158 info = (struct ppin_info *)id->driver_data;
159
160 if (rdmsrq_safe(info->msr_ppin_ctl, &val))
161 goto clear_ppin;
162
163 if ((val & 3UL) == 1UL) {
164 /* PPIN locked in disabled mode */
165 goto clear_ppin;
166 }
167
168 /* If PPIN is disabled, try to enable */
169 if (!(val & 2UL)) {
170 wrmsrq_safe(info->msr_ppin_ctl, val | 2UL);
171 rdmsrq_safe(info->msr_ppin_ctl, &val);
172 }
173
174 /* Is the enable bit set? */
175 if (val & 2UL) {
176 c->ppin = native_rdmsrq(info->msr_ppin);
177 set_cpu_cap(c, info->feature);
178 return;
179 }
180
181 clear_ppin:
182 setup_clear_cpu_cap(info->feature);
183 }
184
default_init(struct cpuinfo_x86 * c)185 static void default_init(struct cpuinfo_x86 *c)
186 {
187 #ifdef CONFIG_X86_64
188 cpu_detect_cache_sizes(c);
189 #else
190 /* Not much we can do here... */
191 /* Check if at least it has cpuid */
192 if (c->cpuid_level == -1) {
193 /* No cpuid. It must be an ancient CPU */
194 if (c->x86 == 4)
195 strcpy(c->x86_model_id, "486");
196 else if (c->x86 == 3)
197 strcpy(c->x86_model_id, "386");
198 }
199 #endif
200 }
201
202 static const struct cpu_dev default_cpu = {
203 .c_init = default_init,
204 .c_vendor = "Unknown",
205 .c_x86_vendor = X86_VENDOR_UNKNOWN,
206 };
207
208 static const struct cpu_dev *this_cpu = &default_cpu;
209
210 DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
211 #ifdef CONFIG_X86_64
212 /*
213 * We need valid kernel segments for data and code in long mode too
214 * IRET will check the segment types kkeil 2000/10/28
215 * Also sysret mandates a special GDT layout
216 *
217 * TLS descriptors are currently at a different place compared to i386.
218 * Hopefully nobody expects them at a fixed place (Wine?)
219 */
220 [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(DESC_CODE32, 0, 0xfffff),
221 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(DESC_CODE64, 0, 0xfffff),
222 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(DESC_DATA64, 0, 0xfffff),
223 [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(DESC_CODE32 | DESC_USER, 0, 0xfffff),
224 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(DESC_DATA64 | DESC_USER, 0, 0xfffff),
225 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(DESC_CODE64 | DESC_USER, 0, 0xfffff),
226 #else
227 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(DESC_CODE32, 0, 0xfffff),
228 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(DESC_DATA32, 0, 0xfffff),
229 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(DESC_CODE32 | DESC_USER, 0, 0xfffff),
230 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(DESC_DATA32 | DESC_USER, 0, 0xfffff),
231 /*
232 * Segments used for calling PnP BIOS have byte granularity.
233 * They code segments and data segments have fixed 64k limits,
234 * the transfer segment sizes are set at run time.
235 */
236 [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(DESC_CODE32_BIOS, 0, 0xffff),
237 [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(DESC_CODE16, 0, 0xffff),
238 [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(DESC_DATA16, 0, 0xffff),
239 [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(DESC_DATA16, 0, 0),
240 [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(DESC_DATA16, 0, 0),
241 /*
242 * The APM segments have byte granularity and their bases
243 * are set at run time. All have 64k limits.
244 */
245 [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(DESC_CODE32_BIOS, 0, 0xffff),
246 [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(DESC_CODE16, 0, 0xffff),
247 [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(DESC_DATA32_BIOS, 0, 0xffff),
248
249 [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(DESC_DATA32, 0, 0xfffff),
250 [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(DESC_DATA32, 0, 0xfffff),
251 #endif
252 } };
253 EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
254 SYM_PIC_ALIAS(gdt_page);
255
256 #ifdef CONFIG_X86_64
x86_nopcid_setup(char * s)257 static int __init x86_nopcid_setup(char *s)
258 {
259 /* nopcid doesn't accept parameters */
260 if (s)
261 return -EINVAL;
262
263 /* do not emit a message if the feature is not present */
264 if (!boot_cpu_has(X86_FEATURE_PCID))
265 return 0;
266
267 setup_clear_cpu_cap(X86_FEATURE_PCID);
268 pr_info("nopcid: PCID feature disabled\n");
269 return 0;
270 }
271 early_param("nopcid", x86_nopcid_setup);
272 #endif
273
x86_noinvpcid_setup(char * s)274 static int __init x86_noinvpcid_setup(char *s)
275 {
276 /* noinvpcid doesn't accept parameters */
277 if (s)
278 return -EINVAL;
279
280 /* do not emit a message if the feature is not present */
281 if (!boot_cpu_has(X86_FEATURE_INVPCID))
282 return 0;
283
284 setup_clear_cpu_cap(X86_FEATURE_INVPCID);
285 pr_info("noinvpcid: INVPCID feature disabled\n");
286 return 0;
287 }
288 early_param("noinvpcid", x86_noinvpcid_setup);
289
290 /* Standard macro to see if a specific flag is changeable */
flag_is_changeable_p(unsigned long flag)291 static inline bool flag_is_changeable_p(unsigned long flag)
292 {
293 unsigned long f1, f2;
294
295 if (!IS_ENABLED(CONFIG_X86_32))
296 return true;
297
298 /*
299 * Cyrix and IDT cpus allow disabling of CPUID
300 * so the code below may return different results
301 * when it is executed before and after enabling
302 * the CPUID. Add "volatile" to not allow gcc to
303 * optimize the subsequent calls to this function.
304 */
305 asm volatile ("pushfl \n\t"
306 "pushfl \n\t"
307 "popl %0 \n\t"
308 "movl %0, %1 \n\t"
309 "xorl %2, %0 \n\t"
310 "pushl %0 \n\t"
311 "popfl \n\t"
312 "pushfl \n\t"
313 "popl %0 \n\t"
314 "popfl \n\t"
315
316 : "=&r" (f1), "=&r" (f2)
317 : "ir" (flag));
318
319 return (f1 ^ f2) & flag;
320 }
321
322 #ifdef CONFIG_X86_32
323 static int cachesize_override = -1;
324 static int disable_x86_serial_nr = 1;
325
cachesize_setup(char * str)326 static int __init cachesize_setup(char *str)
327 {
328 get_option(&str, &cachesize_override);
329 return 1;
330 }
331 __setup("cachesize=", cachesize_setup);
332
333 /* Probe for the CPUID instruction */
cpuid_feature(void)334 bool cpuid_feature(void)
335 {
336 return flag_is_changeable_p(X86_EFLAGS_ID);
337 }
338
squash_the_stupid_serial_number(struct cpuinfo_x86 * c)339 static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
340 {
341 unsigned long lo, hi;
342
343 if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
344 return;
345
346 /* Disable processor serial number: */
347
348 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
349 lo |= 0x200000;
350 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
351
352 pr_notice("CPU serial number disabled.\n");
353 clear_cpu_cap(c, X86_FEATURE_PN);
354
355 /* Disabling the serial number may affect the cpuid level */
356 c->cpuid_level = cpuid_eax(0);
357 }
358
x86_serial_nr_setup(char * s)359 static int __init x86_serial_nr_setup(char *s)
360 {
361 disable_x86_serial_nr = 0;
362 return 1;
363 }
364 __setup("serialnumber", x86_serial_nr_setup);
365 #else
squash_the_stupid_serial_number(struct cpuinfo_x86 * c)366 static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
367 {
368 }
369 #endif
370
setup_smep(struct cpuinfo_x86 * c)371 static __always_inline void setup_smep(struct cpuinfo_x86 *c)
372 {
373 if (cpu_has(c, X86_FEATURE_SMEP))
374 cr4_set_bits(X86_CR4_SMEP);
375 }
376
setup_smap(struct cpuinfo_x86 * c)377 static __always_inline void setup_smap(struct cpuinfo_x86 *c)
378 {
379 unsigned long eflags = native_save_fl();
380
381 /* This should have been cleared long ago */
382 BUG_ON(eflags & X86_EFLAGS_AC);
383
384 if (cpu_has(c, X86_FEATURE_SMAP))
385 cr4_set_bits(X86_CR4_SMAP);
386 }
387
setup_umip(struct cpuinfo_x86 * c)388 static __always_inline void setup_umip(struct cpuinfo_x86 *c)
389 {
390 /* Check the boot processor, plus build option for UMIP. */
391 if (!cpu_feature_enabled(X86_FEATURE_UMIP))
392 goto out;
393
394 /* Check the current processor's cpuid bits. */
395 if (!cpu_has(c, X86_FEATURE_UMIP))
396 goto out;
397
398 cr4_set_bits(X86_CR4_UMIP);
399
400 pr_info_once("x86/cpu: User Mode Instruction Prevention (UMIP) activated\n");
401
402 return;
403
404 out:
405 /*
406 * Make sure UMIP is disabled in case it was enabled in a
407 * previous boot (e.g., via kexec).
408 */
409 cr4_clear_bits(X86_CR4_UMIP);
410 }
411
setup_lass(struct cpuinfo_x86 * c)412 static __always_inline void setup_lass(struct cpuinfo_x86 *c)
413 {
414 if (!cpu_feature_enabled(X86_FEATURE_LASS))
415 return;
416
417 /*
418 * Legacy vsyscall page access causes a #GP when LASS is active.
419 * Disable LASS because the #GP handler doesn't support vsyscall
420 * emulation.
421 *
422 * Also disable LASS when running under EFI, as some runtime and
423 * boot services rely on 1:1 mappings in the lower half.
424 */
425 if (IS_ENABLED(CONFIG_X86_VSYSCALL_EMULATION) ||
426 IS_ENABLED(CONFIG_EFI)) {
427 setup_clear_cpu_cap(X86_FEATURE_LASS);
428 return;
429 }
430
431 cr4_set_bits(X86_CR4_LASS);
432 }
433
434 /* These bits should not change their value after CPU init is finished. */
435 static const unsigned long cr4_pinned_mask = X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP |
436 X86_CR4_FSGSBASE | X86_CR4_CET;
437
438 /*
439 * The CR pinning protects against ROP on the 'mov %reg, %CRn' instruction(s).
440 * Since you can ROP directly to these instructions (barring shadow stack),
441 * any protection must follow immediately and unconditionally after that.
442 *
443 * Specifically, the CR[04] write functions below will have the value
444 * validation controlled by the @cr_pinning static_branch which is
445 * __ro_after_init, just like the cr4_pinned_bits value.
446 *
447 * Once set, an attacker will have to defeat page-tables to get around these
448 * restrictions. Which is a much bigger ask than 'simple' ROP.
449 */
450 static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning);
451 static unsigned long cr4_pinned_bits __ro_after_init;
452
native_write_cr0(unsigned long val)453 void native_write_cr0(unsigned long val)
454 {
455 unsigned long bits_missing = 0;
456
457 set_register:
458 asm volatile("mov %0,%%cr0": "+r" (val) : : "memory");
459
460 if (static_branch_likely(&cr_pinning)) {
461 if (unlikely((val & X86_CR0_WP) != X86_CR0_WP)) {
462 bits_missing = X86_CR0_WP;
463 val |= bits_missing;
464 goto set_register;
465 }
466 /* Warn after we've set the missing bits. */
467 WARN_ONCE(bits_missing, "CR0 WP bit went missing!?\n");
468 }
469 }
470 EXPORT_SYMBOL(native_write_cr0);
471
native_write_cr4(unsigned long val)472 void __no_profile native_write_cr4(unsigned long val)
473 {
474 unsigned long bits_changed = 0;
475
476 set_register:
477 asm volatile("mov %0,%%cr4": "+r" (val) : : "memory");
478
479 if (static_branch_likely(&cr_pinning)) {
480 if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) {
481 bits_changed = (val & cr4_pinned_mask) ^ cr4_pinned_bits;
482 val = (val & ~cr4_pinned_mask) | cr4_pinned_bits;
483 goto set_register;
484 }
485 /* Warn after we've corrected the changed bits. */
486 WARN_ONCE(bits_changed, "pinned CR4 bits changed: 0x%lx!?\n",
487 bits_changed);
488 }
489 }
490 #if IS_MODULE(CONFIG_LKDTM)
491 EXPORT_SYMBOL_GPL(native_write_cr4);
492 #endif
493
cr4_update_irqsoff(unsigned long set,unsigned long clear)494 void cr4_update_irqsoff(unsigned long set, unsigned long clear)
495 {
496 unsigned long newval, cr4 = this_cpu_read(cpu_tlbstate.cr4);
497
498 lockdep_assert_irqs_disabled();
499
500 newval = (cr4 & ~clear) | set;
501 if (newval != cr4) {
502 this_cpu_write(cpu_tlbstate.cr4, newval);
503 __write_cr4(newval);
504 }
505 }
506 EXPORT_SYMBOL_FOR_KVM(cr4_update_irqsoff);
507
508 /* Read the CR4 shadow. */
cr4_read_shadow(void)509 unsigned long cr4_read_shadow(void)
510 {
511 return this_cpu_read(cpu_tlbstate.cr4);
512 }
513 EXPORT_SYMBOL_FOR_KVM(cr4_read_shadow);
514
cr4_init(void)515 void cr4_init(void)
516 {
517 unsigned long cr4 = __read_cr4();
518
519 if (boot_cpu_has(X86_FEATURE_PCID))
520 cr4 |= X86_CR4_PCIDE;
521 if (static_branch_likely(&cr_pinning))
522 cr4 = (cr4 & ~cr4_pinned_mask) | cr4_pinned_bits;
523
524 __write_cr4(cr4);
525
526 /* Initialize cr4 shadow for this CPU. */
527 this_cpu_write(cpu_tlbstate.cr4, cr4);
528 }
529
530 /*
531 * Once CPU feature detection is finished (and boot params have been
532 * parsed), record any of the sensitive CR bits that are set, and
533 * enable CR pinning.
534 */
setup_cr_pinning(void)535 static void __init setup_cr_pinning(void)
536 {
537 cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & cr4_pinned_mask;
538 static_key_enable(&cr_pinning.key);
539 }
540
x86_nofsgsbase_setup(char * arg)541 static __init int x86_nofsgsbase_setup(char *arg)
542 {
543 /* Require an exact match without trailing characters. */
544 if (strlen(arg))
545 return 0;
546
547 /* Do not emit a message if the feature is not present. */
548 if (!boot_cpu_has(X86_FEATURE_FSGSBASE))
549 return 1;
550
551 setup_clear_cpu_cap(X86_FEATURE_FSGSBASE);
552 pr_info("FSGSBASE disabled via kernel command line\n");
553 return 1;
554 }
555 __setup("nofsgsbase", x86_nofsgsbase_setup);
556
557 /*
558 * Protection Keys are not available in 32-bit mode.
559 */
560 static bool pku_disabled;
561
setup_pku(struct cpuinfo_x86 * c)562 static __always_inline void setup_pku(struct cpuinfo_x86 *c)
563 {
564 if (c == &boot_cpu_data) {
565 if (pku_disabled || !cpu_feature_enabled(X86_FEATURE_PKU))
566 return;
567 /*
568 * Setting CR4.PKE will cause the X86_FEATURE_OSPKE cpuid
569 * bit to be set. Enforce it.
570 */
571 setup_force_cpu_cap(X86_FEATURE_OSPKE);
572
573 } else if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) {
574 return;
575 }
576
577 cr4_set_bits(X86_CR4_PKE);
578 /* Load the default PKRU value */
579 pkru_write_default();
580 }
581
582 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
setup_disable_pku(char * arg)583 static __init int setup_disable_pku(char *arg)
584 {
585 /*
586 * Do not clear the X86_FEATURE_PKU bit. All of the
587 * runtime checks are against OSPKE so clearing the
588 * bit does nothing.
589 *
590 * This way, we will see "pku" in cpuinfo, but not
591 * "ospke", which is exactly what we want. It shows
592 * that the CPU has PKU, but the OS has not enabled it.
593 * This happens to be exactly how a system would look
594 * if we disabled the config option.
595 */
596 pr_info("x86: 'nopku' specified, disabling Memory Protection Keys\n");
597 pku_disabled = true;
598 return 1;
599 }
600 __setup("nopku", setup_disable_pku);
601 #endif
602
603 #ifdef CONFIG_X86_KERNEL_IBT
604
ibt_save(bool disable)605 __noendbr u64 ibt_save(bool disable)
606 {
607 u64 msr = 0;
608
609 if (cpu_feature_enabled(X86_FEATURE_IBT)) {
610 rdmsrq(MSR_IA32_S_CET, msr);
611 if (disable)
612 wrmsrq(MSR_IA32_S_CET, msr & ~CET_ENDBR_EN);
613 }
614
615 return msr;
616 }
617
ibt_restore(u64 save)618 __noendbr void ibt_restore(u64 save)
619 {
620 u64 msr;
621
622 if (cpu_feature_enabled(X86_FEATURE_IBT)) {
623 rdmsrq(MSR_IA32_S_CET, msr);
624 msr &= ~CET_ENDBR_EN;
625 msr |= (save & CET_ENDBR_EN);
626 wrmsrq(MSR_IA32_S_CET, msr);
627 }
628 }
629
630 #endif
631
setup_cet(struct cpuinfo_x86 * c)632 static __always_inline void setup_cet(struct cpuinfo_x86 *c)
633 {
634 bool user_shstk, kernel_ibt;
635
636 if (!IS_ENABLED(CONFIG_X86_CET))
637 return;
638
639 kernel_ibt = HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT);
640 user_shstk = cpu_feature_enabled(X86_FEATURE_SHSTK) &&
641 IS_ENABLED(CONFIG_X86_USER_SHADOW_STACK);
642
643 if (!kernel_ibt && !user_shstk)
644 return;
645
646 if (user_shstk)
647 set_cpu_cap(c, X86_FEATURE_USER_SHSTK);
648
649 if (kernel_ibt)
650 wrmsrq(MSR_IA32_S_CET, CET_ENDBR_EN);
651 else
652 wrmsrq(MSR_IA32_S_CET, 0);
653
654 cr4_set_bits(X86_CR4_CET);
655
656 if (kernel_ibt && ibt_selftest()) {
657 pr_err("IBT selftest: Failed!\n");
658 wrmsrq(MSR_IA32_S_CET, 0);
659 setup_clear_cpu_cap(X86_FEATURE_IBT);
660 }
661 }
662
cet_disable(void)663 __noendbr void cet_disable(void)
664 {
665 if (!(cpu_feature_enabled(X86_FEATURE_IBT) ||
666 cpu_feature_enabled(X86_FEATURE_SHSTK)))
667 return;
668
669 wrmsrq(MSR_IA32_S_CET, 0);
670 wrmsrq(MSR_IA32_U_CET, 0);
671 }
672
673 /*
674 * Some CPU features depend on higher CPUID levels, which may not always
675 * be available due to CPUID level capping or broken virtualization
676 * software. Add those features to this table to auto-disable them.
677 */
678 struct cpuid_dependent_feature {
679 u32 feature;
680 u32 level;
681 };
682
683 static const struct cpuid_dependent_feature
684 cpuid_dependent_features[] = {
685 { X86_FEATURE_MWAIT, CPUID_LEAF_MWAIT },
686 { X86_FEATURE_DCA, CPUID_LEAF_DCA },
687 { X86_FEATURE_XSAVE, CPUID_LEAF_XSTATE },
688 { 0, 0 }
689 };
690
filter_cpuid_features(struct cpuinfo_x86 * c,bool warn)691 static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
692 {
693 const struct cpuid_dependent_feature *df;
694
695 for (df = cpuid_dependent_features; df->feature; df++) {
696
697 if (!cpu_has(c, df->feature))
698 continue;
699 /*
700 * Note: cpuid_level is set to -1 if unavailable, but
701 * extended_extended_level is set to 0 if unavailable
702 * and the legitimate extended levels are all negative
703 * when signed; hence the weird messing around with
704 * signs here...
705 */
706 if (!((s32)df->level < 0 ?
707 (u32)df->level > (u32)c->extended_cpuid_level :
708 (s32)df->level > (s32)c->cpuid_level))
709 continue;
710
711 clear_cpu_cap(c, df->feature);
712 if (!warn)
713 continue;
714
715 pr_warn("CPU: CPU feature %s disabled, no CPUID level 0x%x\n",
716 x86_cap_flags[df->feature], df->level);
717 }
718 }
719
720 /*
721 * Naming convention should be: <Name> [(<Codename>)]
722 * This table only is used unless init_<vendor>() below doesn't set it;
723 * in particular, if CPUID levels 0x80000002..4 are supported, this
724 * isn't used
725 */
726
727 /* Look up CPU names by table lookup. */
table_lookup_model(struct cpuinfo_x86 * c)728 static const char *table_lookup_model(struct cpuinfo_x86 *c)
729 {
730 #ifdef CONFIG_X86_32
731 const struct legacy_cpu_model_info *info;
732
733 if (c->x86_model >= 16)
734 return NULL; /* Range check */
735
736 if (!this_cpu)
737 return NULL;
738
739 info = this_cpu->legacy_models;
740
741 while (info->family) {
742 if (info->family == c->x86)
743 return info->model_names[c->x86_model];
744 info++;
745 }
746 #endif
747 return NULL; /* Not found */
748 }
749
750 /* Aligned to unsigned long to avoid split lock in atomic bitmap ops */
751 __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long));
752 __u32 cpu_caps_set[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long));
753
754 #ifdef CONFIG_X86_32
755 /* The 32-bit entry code needs to find cpu_entry_area. */
756 DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
757 #endif
758
759 /* Load the original GDT from the per-cpu structure */
load_direct_gdt(int cpu)760 void load_direct_gdt(int cpu)
761 {
762 struct desc_ptr gdt_descr;
763
764 gdt_descr.address = (long)get_cpu_gdt_rw(cpu);
765 gdt_descr.size = GDT_SIZE - 1;
766 load_gdt(&gdt_descr);
767 }
768 EXPORT_SYMBOL_FOR_KVM(load_direct_gdt);
769
770 /* Load a fixmap remapping of the per-cpu GDT */
load_fixmap_gdt(int cpu)771 void load_fixmap_gdt(int cpu)
772 {
773 struct desc_ptr gdt_descr;
774
775 gdt_descr.address = (long)get_cpu_gdt_ro(cpu);
776 gdt_descr.size = GDT_SIZE - 1;
777 load_gdt(&gdt_descr);
778 }
779 EXPORT_SYMBOL_GPL(load_fixmap_gdt);
780
781 /**
782 * switch_gdt_and_percpu_base - Switch to direct GDT and runtime per CPU base
783 * @cpu: The CPU number for which this is invoked
784 *
785 * Invoked during early boot to switch from early GDT and early per CPU to
786 * the direct GDT and the runtime per CPU area. On 32-bit the percpu base
787 * switch is implicit by loading the direct GDT. On 64bit this requires
788 * to update GSBASE.
789 */
switch_gdt_and_percpu_base(int cpu)790 void __init switch_gdt_and_percpu_base(int cpu)
791 {
792 load_direct_gdt(cpu);
793
794 #ifdef CONFIG_X86_64
795 /*
796 * No need to load %gs. It is already correct.
797 *
798 * Writing %gs on 64bit would zero GSBASE which would make any per
799 * CPU operation up to the point of the wrmsrq() fault.
800 *
801 * Set GSBASE to the new offset. Until the wrmsrq() happens the
802 * early mapping is still valid. That means the GSBASE update will
803 * lose any prior per CPU data which was not copied over in
804 * setup_per_cpu_areas().
805 *
806 * This works even with stackprotector enabled because the
807 * per CPU stack canary is 0 in both per CPU areas.
808 */
809 wrmsrq(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu));
810 #else
811 /*
812 * %fs is already set to __KERNEL_PERCPU, but after switching GDT
813 * it is required to load FS again so that the 'hidden' part is
814 * updated from the new GDT. Up to this point the early per CPU
815 * translation is active. Any content of the early per CPU data
816 * which was not copied over in setup_per_cpu_areas() is lost.
817 */
818 loadsegment(fs, __KERNEL_PERCPU);
819 #endif
820 }
821
822 static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
823
get_model_name(struct cpuinfo_x86 * c)824 static void get_model_name(struct cpuinfo_x86 *c)
825 {
826 unsigned int *v;
827 char *p, *q, *s;
828
829 if (c->extended_cpuid_level < 0x80000004)
830 return;
831
832 v = (unsigned int *)c->x86_model_id;
833 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
834 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
835 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
836 c->x86_model_id[48] = 0;
837
838 /* Trim whitespace */
839 p = q = s = &c->x86_model_id[0];
840
841 while (*p == ' ')
842 p++;
843
844 while (*p) {
845 /* Note the last non-whitespace index */
846 if (!isspace(*p))
847 s = q;
848
849 *q++ = *p++;
850 }
851
852 *(s + 1) = '\0';
853 }
854
cpu_detect_cache_sizes(struct cpuinfo_x86 * c)855 void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
856 {
857 unsigned int n, dummy, ebx, ecx, edx, l2size;
858
859 n = c->extended_cpuid_level;
860
861 if (n >= 0x80000005) {
862 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
863 c->x86_cache_size = (ecx>>24) + (edx>>24);
864 #ifdef CONFIG_X86_64
865 /* On K8 L1 TLB is inclusive, so don't count it */
866 c->x86_tlbsize = 0;
867 #endif
868 }
869
870 if (n < 0x80000006) /* Some chips just has a large L1. */
871 return;
872
873 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
874 l2size = ecx >> 16;
875
876 #ifdef CONFIG_X86_64
877 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
878 #else
879 /* do processor-specific cache resizing */
880 if (this_cpu->legacy_cache_size)
881 l2size = this_cpu->legacy_cache_size(c, l2size);
882
883 /* Allow user to override all this if necessary. */
884 if (cachesize_override != -1)
885 l2size = cachesize_override;
886
887 if (l2size == 0)
888 return; /* Again, no L2 cache is possible */
889 #endif
890
891 c->x86_cache_size = l2size;
892 }
893
894 u16 __read_mostly tlb_lli_4k;
895 u16 __read_mostly tlb_lli_2m;
896 u16 __read_mostly tlb_lli_4m;
897 u16 __read_mostly tlb_lld_4k;
898 u16 __read_mostly tlb_lld_2m;
899 u16 __read_mostly tlb_lld_4m;
900 u16 __read_mostly tlb_lld_1g;
901
cpu_detect_tlb(struct cpuinfo_x86 * c)902 static void cpu_detect_tlb(struct cpuinfo_x86 *c)
903 {
904 if (this_cpu->c_detect_tlb)
905 this_cpu->c_detect_tlb(c);
906
907 pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n",
908 tlb_lli_4k, tlb_lli_2m, tlb_lli_4m);
909
910 pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n",
911 tlb_lld_4k, tlb_lld_2m, tlb_lld_4m, tlb_lld_1g);
912 }
913
get_cpu_vendor(struct cpuinfo_x86 * c)914 void get_cpu_vendor(struct cpuinfo_x86 *c)
915 {
916 char *v = c->x86_vendor_id;
917 int i;
918
919 for (i = 0; i < X86_VENDOR_NUM; i++) {
920 if (!cpu_devs[i])
921 break;
922
923 if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
924 (cpu_devs[i]->c_ident[1] &&
925 !strcmp(v, cpu_devs[i]->c_ident[1]))) {
926
927 this_cpu = cpu_devs[i];
928 c->x86_vendor = this_cpu->c_x86_vendor;
929 return;
930 }
931 }
932
933 pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \
934 "CPU: Your system may be unstable.\n", v);
935
936 c->x86_vendor = X86_VENDOR_UNKNOWN;
937 this_cpu = &default_cpu;
938 }
939
cpu_detect(struct cpuinfo_x86 * c)940 void cpu_detect(struct cpuinfo_x86 *c)
941 {
942 /* Get vendor name */
943 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
944 (unsigned int *)&c->x86_vendor_id[0],
945 (unsigned int *)&c->x86_vendor_id[8],
946 (unsigned int *)&c->x86_vendor_id[4]);
947
948 c->x86 = 4;
949 /* Intel-defined flags: level 0x00000001 */
950 if (c->cpuid_level >= 0x00000001) {
951 u32 junk, tfms, cap0, misc;
952
953 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
954 c->x86 = x86_family(tfms);
955 c->x86_model = x86_model(tfms);
956 c->x86_stepping = x86_stepping(tfms);
957
958 if (cap0 & (1<<19)) {
959 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
960 c->x86_cache_alignment = c->x86_clflush_size;
961 }
962 }
963 }
964
apply_forced_caps(struct cpuinfo_x86 * c)965 static void apply_forced_caps(struct cpuinfo_x86 *c)
966 {
967 int i;
968
969 for (i = 0; i < NCAPINTS + NBUGINTS; i++) {
970 c->x86_capability[i] &= ~cpu_caps_cleared[i];
971 c->x86_capability[i] |= cpu_caps_set[i];
972 }
973 }
974
init_speculation_control(struct cpuinfo_x86 * c)975 static void init_speculation_control(struct cpuinfo_x86 *c)
976 {
977 /*
978 * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support,
979 * and they also have a different bit for STIBP support. Also,
980 * a hypervisor might have set the individual AMD bits even on
981 * Intel CPUs, for finer-grained selection of what's available.
982 */
983 if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
984 set_cpu_cap(c, X86_FEATURE_IBRS);
985 set_cpu_cap(c, X86_FEATURE_IBPB);
986 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
987 }
988
989 if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
990 set_cpu_cap(c, X86_FEATURE_STIBP);
991
992 if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
993 cpu_has(c, X86_FEATURE_VIRT_SSBD))
994 set_cpu_cap(c, X86_FEATURE_SSBD);
995
996 if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
997 set_cpu_cap(c, X86_FEATURE_IBRS);
998 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
999 }
1000
1001 if (cpu_has(c, X86_FEATURE_AMD_IBPB))
1002 set_cpu_cap(c, X86_FEATURE_IBPB);
1003
1004 if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
1005 set_cpu_cap(c, X86_FEATURE_STIBP);
1006 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
1007 }
1008
1009 if (cpu_has(c, X86_FEATURE_AMD_SSBD)) {
1010 set_cpu_cap(c, X86_FEATURE_SSBD);
1011 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
1012 clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD);
1013 }
1014 }
1015
get_cpu_cap(struct cpuinfo_x86 * c)1016 void get_cpu_cap(struct cpuinfo_x86 *c)
1017 {
1018 u32 eax, ebx, ecx, edx;
1019
1020 /* Intel-defined flags: level 0x00000001 */
1021 if (c->cpuid_level >= 0x00000001) {
1022 cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
1023
1024 c->x86_capability[CPUID_1_ECX] = ecx;
1025 c->x86_capability[CPUID_1_EDX] = edx;
1026 }
1027
1028 /* Thermal and Power Management Leaf: level 0x00000006 (eax) */
1029 if (c->cpuid_level >= 0x00000006)
1030 c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
1031
1032 /* Additional Intel-defined flags: level 0x00000007 */
1033 if (c->cpuid_level >= 0x00000007) {
1034 cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
1035 c->x86_capability[CPUID_7_0_EBX] = ebx;
1036 c->x86_capability[CPUID_7_ECX] = ecx;
1037 c->x86_capability[CPUID_7_EDX] = edx;
1038
1039 /* Check valid sub-leaf index before accessing it */
1040 if (eax >= 1) {
1041 cpuid_count(0x00000007, 1, &eax, &ebx, &ecx, &edx);
1042 c->x86_capability[CPUID_7_1_EAX] = eax;
1043 }
1044 }
1045
1046 /* Extended state features: level 0x0000000d */
1047 if (c->cpuid_level >= 0x0000000d) {
1048 cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx);
1049
1050 c->x86_capability[CPUID_D_1_EAX] = eax;
1051 }
1052
1053 /*
1054 * Check if extended CPUID leaves are implemented: Max extended
1055 * CPUID leaf must be in the 0x80000001-0x8000ffff range.
1056 */
1057 eax = cpuid_eax(0x80000000);
1058 c->extended_cpuid_level = ((eax & 0xffff0000) == 0x80000000) ? eax : 0;
1059
1060 if (c->extended_cpuid_level >= 0x80000001) {
1061 cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
1062
1063 c->x86_capability[CPUID_8000_0001_ECX] = ecx;
1064 c->x86_capability[CPUID_8000_0001_EDX] = edx;
1065 }
1066
1067 if (c->extended_cpuid_level >= 0x80000007)
1068 c->x86_power = cpuid_edx(0x80000007);
1069
1070 if (c->extended_cpuid_level >= 0x80000008) {
1071 cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
1072 c->x86_capability[CPUID_8000_0008_EBX] = ebx;
1073 }
1074
1075 if (c->extended_cpuid_level >= 0x8000000a)
1076 c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
1077
1078 if (c->extended_cpuid_level >= 0x8000001f)
1079 c->x86_capability[CPUID_8000_001F_EAX] = cpuid_eax(0x8000001f);
1080
1081 if (c->extended_cpuid_level >= 0x80000021)
1082 c->x86_capability[CPUID_8000_0021_EAX] = cpuid_eax(0x80000021);
1083
1084 init_scattered_cpuid_features(c);
1085 init_speculation_control(c);
1086
1087 if (IS_ENABLED(CONFIG_X86_64) || cpu_has(c, X86_FEATURE_SEP))
1088 set_cpu_cap(c, X86_FEATURE_SYSFAST32);
1089
1090 /*
1091 * Clear/Set all flags overridden by options, after probe.
1092 * This needs to happen each time we re-probe, which may happen
1093 * several times during CPU initialization.
1094 */
1095 apply_forced_caps(c);
1096 }
1097
get_cpu_address_sizes(struct cpuinfo_x86 * c)1098 void get_cpu_address_sizes(struct cpuinfo_x86 *c)
1099 {
1100 u32 eax, ebx, ecx, edx;
1101
1102 if (!cpu_has(c, X86_FEATURE_CPUID) ||
1103 (c->extended_cpuid_level < 0x80000008)) {
1104 if (IS_ENABLED(CONFIG_X86_64)) {
1105 c->x86_clflush_size = 64;
1106 c->x86_phys_bits = 36;
1107 c->x86_virt_bits = 48;
1108 } else {
1109 c->x86_clflush_size = 32;
1110 c->x86_virt_bits = 32;
1111 c->x86_phys_bits = 32;
1112
1113 if (cpu_has(c, X86_FEATURE_PAE) ||
1114 cpu_has(c, X86_FEATURE_PSE36))
1115 c->x86_phys_bits = 36;
1116 }
1117 } else {
1118 cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
1119
1120 c->x86_virt_bits = (eax >> 8) & 0xff;
1121 c->x86_phys_bits = eax & 0xff;
1122
1123 /* Provide a sane default if not enumerated: */
1124 if (!c->x86_clflush_size)
1125 c->x86_clflush_size = 32;
1126 }
1127
1128 c->x86_cache_bits = c->x86_phys_bits;
1129 c->x86_cache_alignment = c->x86_clflush_size;
1130 }
1131
identify_cpu_without_cpuid(struct cpuinfo_x86 * c)1132 static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
1133 {
1134 int i;
1135
1136 /*
1137 * First of all, decide if this is a 486 or higher
1138 * It's a 486 if we can modify the AC flag
1139 */
1140 if (flag_is_changeable_p(X86_EFLAGS_AC))
1141 c->x86 = 4;
1142 else
1143 c->x86 = 3;
1144
1145 for (i = 0; i < X86_VENDOR_NUM; i++)
1146 if (cpu_devs[i] && cpu_devs[i]->c_identify) {
1147 c->x86_vendor_id[0] = 0;
1148 cpu_devs[i]->c_identify(c);
1149 if (c->x86_vendor_id[0]) {
1150 get_cpu_vendor(c);
1151 break;
1152 }
1153 }
1154 }
1155
1156 #define NO_SPECULATION BIT(0)
1157 #define NO_MELTDOWN BIT(1)
1158 #define NO_SSB BIT(2)
1159 #define NO_L1TF BIT(3)
1160 #define NO_MDS BIT(4)
1161 #define MSBDS_ONLY BIT(5)
1162 #define NO_SWAPGS BIT(6)
1163 #define NO_ITLB_MULTIHIT BIT(7)
1164 #define NO_SPECTRE_V2 BIT(8)
1165 #define NO_MMIO BIT(9)
1166 #define NO_EIBRS_PBRSB BIT(10)
1167 #define NO_BHI BIT(11)
1168
1169 #define VULNWL(vendor, family, model, whitelist) \
1170 X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist)
1171
1172 #define VULNWL_INTEL(vfm, whitelist) \
1173 X86_MATCH_VFM(vfm, whitelist)
1174
1175 #define VULNWL_AMD(family, whitelist) \
1176 VULNWL(AMD, family, X86_MODEL_ANY, whitelist)
1177
1178 #define VULNWL_HYGON(family, whitelist) \
1179 VULNWL(HYGON, family, X86_MODEL_ANY, whitelist)
1180
1181 static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
1182 VULNWL(ANY, 4, X86_MODEL_ANY, NO_SPECULATION),
1183 VULNWL(CENTAUR, 5, X86_MODEL_ANY, NO_SPECULATION),
1184 VULNWL(INTEL, 5, X86_MODEL_ANY, NO_SPECULATION),
1185 VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION),
1186 VULNWL(VORTEX, 5, X86_MODEL_ANY, NO_SPECULATION),
1187 VULNWL(VORTEX, 6, X86_MODEL_ANY, NO_SPECULATION),
1188
1189 /* Intel Family 6 */
1190 VULNWL_INTEL(INTEL_TIGERLAKE, NO_MMIO),
1191 VULNWL_INTEL(INTEL_TIGERLAKE_L, NO_MMIO),
1192 VULNWL_INTEL(INTEL_ALDERLAKE, NO_MMIO),
1193 VULNWL_INTEL(INTEL_ALDERLAKE_L, NO_MMIO),
1194
1195 VULNWL_INTEL(INTEL_ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
1196 VULNWL_INTEL(INTEL_ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT),
1197 VULNWL_INTEL(INTEL_ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
1198 VULNWL_INTEL(INTEL_ATOM_BONNELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
1199 VULNWL_INTEL(INTEL_ATOM_BONNELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
1200
1201 VULNWL_INTEL(INTEL_ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1202 VULNWL_INTEL(INTEL_ATOM_SILVERMONT_D, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1203 VULNWL_INTEL(INTEL_ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1204 VULNWL_INTEL(INTEL_ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1205 VULNWL_INTEL(INTEL_XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1206 VULNWL_INTEL(INTEL_XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1207
1208 VULNWL_INTEL(INTEL_CORE_YONAH, NO_SSB),
1209
1210 VULNWL_INTEL(INTEL_ATOM_SILVERMONT_MID2,NO_SSB | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | MSBDS_ONLY),
1211 VULNWL_INTEL(INTEL_ATOM_AIRMONT_NP, NO_SSB | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
1212
1213 VULNWL_INTEL(INTEL_ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
1214 VULNWL_INTEL(INTEL_ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
1215 VULNWL_INTEL(INTEL_ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
1216
1217 /*
1218 * Technically, swapgs isn't serializing on AMD (despite it previously
1219 * being documented as such in the APM). But according to AMD, %gs is
1220 * updated non-speculatively, and the issuing of %gs-relative memory
1221 * operands will be blocked until the %gs update completes, which is
1222 * good enough for our purposes.
1223 */
1224
1225 VULNWL_INTEL(INTEL_ATOM_TREMONT, NO_EIBRS_PBRSB),
1226 VULNWL_INTEL(INTEL_ATOM_TREMONT_L, NO_EIBRS_PBRSB),
1227 VULNWL_INTEL(INTEL_ATOM_TREMONT_D, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
1228
1229 /* AMD Family 0xf - 0x12 */
1230 VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
1231 VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
1232 VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
1233 VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
1234
1235 /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
1236 VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB | NO_BHI),
1237 VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB | NO_BHI),
1238
1239 /* Zhaoxin Family 7 */
1240 VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO | NO_BHI),
1241 VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO | NO_BHI),
1242 {}
1243 };
1244
1245 #define VULNBL(vendor, family, model, blacklist) \
1246 X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, blacklist)
1247
1248 #define VULNBL_INTEL_STEPS(vfm, max_stepping, issues) \
1249 X86_MATCH_VFM_STEPS(vfm, X86_STEP_MIN, max_stepping, issues)
1250
1251 #define VULNBL_INTEL_TYPE(vfm, cpu_type, issues) \
1252 X86_MATCH_VFM_CPU_TYPE(vfm, INTEL_CPU_TYPE_##cpu_type, issues)
1253
1254 #define VULNBL_AMD(family, blacklist) \
1255 VULNBL(AMD, family, X86_MODEL_ANY, blacklist)
1256
1257 #define VULNBL_HYGON(family, blacklist) \
1258 VULNBL(HYGON, family, X86_MODEL_ANY, blacklist)
1259
1260 #define SRBDS BIT(0)
1261 /* CPU is affected by X86_BUG_MMIO_STALE_DATA */
1262 #define MMIO BIT(1)
1263 /* CPU is affected by Shared Buffers Data Sampling (SBDS), a variant of X86_BUG_MMIO_STALE_DATA */
1264 #define MMIO_SBDS BIT(2)
1265 /* CPU is affected by RETbleed, speculating where you would not expect it */
1266 #define RETBLEED BIT(3)
1267 /* CPU is affected by SMT (cross-thread) return predictions */
1268 #define SMT_RSB BIT(4)
1269 /* CPU is affected by SRSO */
1270 #define SRSO BIT(5)
1271 /* CPU is affected by GDS */
1272 #define GDS BIT(6)
1273 /* CPU is affected by Register File Data Sampling */
1274 #define RFDS BIT(7)
1275 /* CPU is affected by Indirect Target Selection */
1276 #define ITS BIT(8)
1277 /* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */
1278 #define ITS_NATIVE_ONLY BIT(9)
1279 /* CPU is affected by Transient Scheduler Attacks */
1280 #define TSA BIT(10)
1281 /* CPU is affected by VMSCAPE */
1282 #define VMSCAPE BIT(11)
1283
1284 static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
1285 VULNBL_INTEL_STEPS(INTEL_SANDYBRIDGE_X, X86_STEP_MAX, VMSCAPE),
1286 VULNBL_INTEL_STEPS(INTEL_SANDYBRIDGE, X86_STEP_MAX, VMSCAPE),
1287 VULNBL_INTEL_STEPS(INTEL_IVYBRIDGE_X, X86_STEP_MAX, VMSCAPE),
1288 VULNBL_INTEL_STEPS(INTEL_IVYBRIDGE, X86_STEP_MAX, SRBDS | VMSCAPE),
1289 VULNBL_INTEL_STEPS(INTEL_HASWELL, X86_STEP_MAX, SRBDS | VMSCAPE),
1290 VULNBL_INTEL_STEPS(INTEL_HASWELL_L, X86_STEP_MAX, SRBDS | VMSCAPE),
1291 VULNBL_INTEL_STEPS(INTEL_HASWELL_G, X86_STEP_MAX, SRBDS | VMSCAPE),
1292 VULNBL_INTEL_STEPS(INTEL_HASWELL_X, X86_STEP_MAX, MMIO | VMSCAPE),
1293 VULNBL_INTEL_STEPS(INTEL_BROADWELL_D, X86_STEP_MAX, MMIO | VMSCAPE),
1294 VULNBL_INTEL_STEPS(INTEL_BROADWELL_X, X86_STEP_MAX, MMIO | VMSCAPE),
1295 VULNBL_INTEL_STEPS(INTEL_BROADWELL_G, X86_STEP_MAX, SRBDS | VMSCAPE),
1296 VULNBL_INTEL_STEPS(INTEL_BROADWELL, X86_STEP_MAX, SRBDS | VMSCAPE),
1297 VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X, 0x5, MMIO | RETBLEED | GDS | VMSCAPE),
1298 VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X, X86_STEP_MAX, MMIO | RETBLEED | GDS | ITS | VMSCAPE),
1299 VULNBL_INTEL_STEPS(INTEL_SKYLAKE_L, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
1300 VULNBL_INTEL_STEPS(INTEL_SKYLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
1301 VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L, 0xb, MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
1302 VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS | ITS | VMSCAPE),
1303 VULNBL_INTEL_STEPS(INTEL_KABYLAKE, 0xc, MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
1304 VULNBL_INTEL_STEPS(INTEL_KABYLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS | ITS | VMSCAPE),
1305 VULNBL_INTEL_STEPS(INTEL_CANNONLAKE_L, X86_STEP_MAX, RETBLEED | VMSCAPE),
1306 VULNBL_INTEL_STEPS(INTEL_ICELAKE_L, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY),
1307 VULNBL_INTEL_STEPS(INTEL_ICELAKE_D, X86_STEP_MAX, MMIO | GDS | ITS | ITS_NATIVE_ONLY),
1308 VULNBL_INTEL_STEPS(INTEL_ICELAKE_X, X86_STEP_MAX, MMIO | GDS | ITS | ITS_NATIVE_ONLY),
1309 VULNBL_INTEL_STEPS(INTEL_COMETLAKE, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | VMSCAPE),
1310 VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L, 0x0, MMIO | RETBLEED | ITS | VMSCAPE),
1311 VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | VMSCAPE),
1312 VULNBL_INTEL_STEPS(INTEL_TIGERLAKE_L, X86_STEP_MAX, GDS | ITS | ITS_NATIVE_ONLY),
1313 VULNBL_INTEL_STEPS(INTEL_TIGERLAKE, X86_STEP_MAX, GDS | ITS | ITS_NATIVE_ONLY),
1314 VULNBL_INTEL_STEPS(INTEL_LAKEFIELD, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED),
1315 VULNBL_INTEL_STEPS(INTEL_ROCKETLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY),
1316 VULNBL_INTEL_TYPE(INTEL_ALDERLAKE, ATOM, RFDS | VMSCAPE),
1317 VULNBL_INTEL_STEPS(INTEL_ALDERLAKE, X86_STEP_MAX, VMSCAPE),
1318 VULNBL_INTEL_STEPS(INTEL_ALDERLAKE_L, X86_STEP_MAX, RFDS | VMSCAPE),
1319 VULNBL_INTEL_TYPE(INTEL_RAPTORLAKE, ATOM, RFDS | VMSCAPE),
1320 VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE, X86_STEP_MAX, VMSCAPE),
1321 VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE_P, X86_STEP_MAX, RFDS | VMSCAPE),
1322 VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE_S, X86_STEP_MAX, RFDS | VMSCAPE),
1323 VULNBL_INTEL_STEPS(INTEL_METEORLAKE_L, X86_STEP_MAX, VMSCAPE),
1324 VULNBL_INTEL_STEPS(INTEL_ARROWLAKE_H, X86_STEP_MAX, VMSCAPE),
1325 VULNBL_INTEL_STEPS(INTEL_ARROWLAKE, X86_STEP_MAX, VMSCAPE),
1326 VULNBL_INTEL_STEPS(INTEL_ARROWLAKE_U, X86_STEP_MAX, VMSCAPE),
1327 VULNBL_INTEL_STEPS(INTEL_LUNARLAKE_M, X86_STEP_MAX, VMSCAPE),
1328 VULNBL_INTEL_STEPS(INTEL_SAPPHIRERAPIDS_X, X86_STEP_MAX, VMSCAPE),
1329 VULNBL_INTEL_STEPS(INTEL_GRANITERAPIDS_X, X86_STEP_MAX, VMSCAPE),
1330 VULNBL_INTEL_STEPS(INTEL_EMERALDRAPIDS_X, X86_STEP_MAX, VMSCAPE),
1331 VULNBL_INTEL_STEPS(INTEL_ATOM_GRACEMONT, X86_STEP_MAX, RFDS | VMSCAPE),
1332 VULNBL_INTEL_STEPS(INTEL_ATOM_TREMONT, X86_STEP_MAX, MMIO | MMIO_SBDS | RFDS),
1333 VULNBL_INTEL_STEPS(INTEL_ATOM_TREMONT_D, X86_STEP_MAX, MMIO | RFDS),
1334 VULNBL_INTEL_STEPS(INTEL_ATOM_TREMONT_L, X86_STEP_MAX, MMIO | MMIO_SBDS | RFDS),
1335 VULNBL_INTEL_STEPS(INTEL_ATOM_GOLDMONT, X86_STEP_MAX, RFDS),
1336 VULNBL_INTEL_STEPS(INTEL_ATOM_GOLDMONT_D, X86_STEP_MAX, RFDS),
1337 VULNBL_INTEL_STEPS(INTEL_ATOM_GOLDMONT_PLUS, X86_STEP_MAX, RFDS),
1338 VULNBL_INTEL_STEPS(INTEL_ATOM_CRESTMONT_X, X86_STEP_MAX, VMSCAPE),
1339
1340 VULNBL_AMD(0x15, RETBLEED),
1341 VULNBL_AMD(0x16, RETBLEED),
1342 VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO | VMSCAPE),
1343 VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO | VMSCAPE),
1344 VULNBL_AMD(0x19, SRSO | TSA | VMSCAPE),
1345 VULNBL_AMD(0x1a, SRSO | VMSCAPE),
1346 {}
1347 };
1348
cpu_matches(const struct x86_cpu_id * table,unsigned long which)1349 static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which)
1350 {
1351 const struct x86_cpu_id *m = x86_match_cpu(table);
1352
1353 return m && !!(m->driver_data & which);
1354 }
1355
x86_read_arch_cap_msr(void)1356 u64 x86_read_arch_cap_msr(void)
1357 {
1358 u64 x86_arch_cap_msr = 0;
1359
1360 if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
1361 rdmsrq(MSR_IA32_ARCH_CAPABILITIES, x86_arch_cap_msr);
1362
1363 return x86_arch_cap_msr;
1364 }
1365
arch_cap_mmio_immune(u64 x86_arch_cap_msr)1366 static bool arch_cap_mmio_immune(u64 x86_arch_cap_msr)
1367 {
1368 return (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO &&
1369 x86_arch_cap_msr & ARCH_CAP_PSDP_NO &&
1370 x86_arch_cap_msr & ARCH_CAP_SBDR_SSDP_NO);
1371 }
1372
vulnerable_to_rfds(u64 x86_arch_cap_msr)1373 static bool __init vulnerable_to_rfds(u64 x86_arch_cap_msr)
1374 {
1375 /* The "immunity" bit trumps everything else: */
1376 if (x86_arch_cap_msr & ARCH_CAP_RFDS_NO)
1377 return false;
1378
1379 /*
1380 * VMMs set ARCH_CAP_RFDS_CLEAR for processors not in the blacklist to
1381 * indicate that mitigation is needed because guest is running on a
1382 * vulnerable hardware or may migrate to such hardware:
1383 */
1384 if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
1385 return true;
1386
1387 /* Only consult the blacklist when there is no enumeration: */
1388 return cpu_matches(cpu_vuln_blacklist, RFDS);
1389 }
1390
vulnerable_to_its(u64 x86_arch_cap_msr)1391 static bool __init vulnerable_to_its(u64 x86_arch_cap_msr)
1392 {
1393 /* The "immunity" bit trumps everything else: */
1394 if (x86_arch_cap_msr & ARCH_CAP_ITS_NO)
1395 return false;
1396 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
1397 return false;
1398
1399 /* None of the affected CPUs have BHI_CTRL */
1400 if (boot_cpu_has(X86_FEATURE_BHI_CTRL))
1401 return false;
1402
1403 /*
1404 * If a VMM did not expose ITS_NO, assume that a guest could
1405 * be running on a vulnerable hardware or may migrate to such
1406 * hardware.
1407 */
1408 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
1409 return true;
1410
1411 if (cpu_matches(cpu_vuln_blacklist, ITS))
1412 return true;
1413
1414 return false;
1415 }
1416
1417 static struct x86_cpu_id cpu_latest_microcode[] = {
1418 #include "microcode/intel-ucode-defs.h"
1419 {}
1420 };
1421
cpu_has_old_microcode(void)1422 static bool __init cpu_has_old_microcode(void)
1423 {
1424 const struct x86_cpu_id *m = x86_match_cpu(cpu_latest_microcode);
1425
1426 /* Give unknown CPUs a pass: */
1427 if (!m) {
1428 /* Intel CPUs should be in the list. Warn if not: */
1429 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
1430 pr_info("x86/CPU: Model not found in latest microcode list\n");
1431 return false;
1432 }
1433
1434 /*
1435 * Hosts usually lie to guests with a super high microcode
1436 * version. Just ignore what hosts tell guests:
1437 */
1438 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
1439 return false;
1440
1441 /* Consider all debug microcode to be old: */
1442 if (boot_cpu_data.microcode & BIT(31))
1443 return true;
1444
1445 /* Give new microcode a pass: */
1446 if (boot_cpu_data.microcode >= m->driver_data)
1447 return false;
1448
1449 /* Uh oh, too old: */
1450 return true;
1451 }
1452
cpu_set_bug_bits(struct cpuinfo_x86 * c)1453 static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
1454 {
1455 u64 x86_arch_cap_msr = x86_read_arch_cap_msr();
1456
1457 if (cpu_has_old_microcode()) {
1458 pr_warn("x86/CPU: Running old microcode\n");
1459 setup_force_cpu_bug(X86_BUG_OLD_MICROCODE);
1460 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
1461 }
1462
1463 /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
1464 if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
1465 !(x86_arch_cap_msr & ARCH_CAP_PSCHANGE_MC_NO))
1466 setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
1467
1468 if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION))
1469 return;
1470
1471 setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
1472
1473 if (!cpu_matches(cpu_vuln_whitelist, NO_SPECTRE_V2)) {
1474 setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
1475 setup_force_cpu_bug(X86_BUG_SPECTRE_V2_USER);
1476 }
1477
1478 if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
1479 !(x86_arch_cap_msr & ARCH_CAP_SSB_NO) &&
1480 !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
1481 setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
1482
1483 /*
1484 * AMD's AutoIBRS is equivalent to Intel's eIBRS - use the Intel feature
1485 * flag and protect from vendor-specific bugs via the whitelist.
1486 *
1487 * Don't use AutoIBRS when SNP is enabled because it degrades host
1488 * userspace indirect branch performance.
1489 */
1490 if ((x86_arch_cap_msr & ARCH_CAP_IBRS_ALL) ||
1491 (cpu_has(c, X86_FEATURE_AUTOIBRS) &&
1492 !cpu_feature_enabled(X86_FEATURE_SEV_SNP))) {
1493 setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
1494 if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
1495 !(x86_arch_cap_msr & ARCH_CAP_PBRSB_NO))
1496 setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
1497 }
1498
1499 if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
1500 !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)) {
1501 setup_force_cpu_bug(X86_BUG_MDS);
1502 if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY))
1503 setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
1504 }
1505
1506 if (!cpu_matches(cpu_vuln_whitelist, NO_SWAPGS))
1507 setup_force_cpu_bug(X86_BUG_SWAPGS);
1508
1509 /*
1510 * When the CPU is not mitigated for TAA (TAA_NO=0) set TAA bug when:
1511 * - TSX is supported or
1512 * - TSX_CTRL is present
1513 *
1514 * TSX_CTRL check is needed for cases when TSX could be disabled before
1515 * the kernel boot e.g. kexec.
1516 * TSX_CTRL check alone is not sufficient for cases when the microcode
1517 * update is not present or running as guest that don't get TSX_CTRL.
1518 */
1519 if (!(x86_arch_cap_msr & ARCH_CAP_TAA_NO) &&
1520 (cpu_has(c, X86_FEATURE_RTM) ||
1521 (x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR)))
1522 setup_force_cpu_bug(X86_BUG_TAA);
1523
1524 /*
1525 * SRBDS affects CPUs which support RDRAND or RDSEED and are listed
1526 * in the vulnerability blacklist.
1527 *
1528 * Some of the implications and mitigation of Shared Buffers Data
1529 * Sampling (SBDS) are similar to SRBDS. Give SBDS same treatment as
1530 * SRBDS.
1531 */
1532 if ((cpu_has(c, X86_FEATURE_RDRAND) ||
1533 cpu_has(c, X86_FEATURE_RDSEED)) &&
1534 cpu_matches(cpu_vuln_blacklist, SRBDS | MMIO_SBDS))
1535 setup_force_cpu_bug(X86_BUG_SRBDS);
1536
1537 /*
1538 * Processor MMIO Stale Data bug enumeration
1539 *
1540 * Affected CPU list is generally enough to enumerate the vulnerability,
1541 * but for virtualization case check for ARCH_CAP MSR bits also, VMM may
1542 * not want the guest to enumerate the bug.
1543 */
1544 if (!arch_cap_mmio_immune(x86_arch_cap_msr)) {
1545 if (cpu_matches(cpu_vuln_blacklist, MMIO))
1546 setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
1547 }
1548
1549 if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
1550 if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (x86_arch_cap_msr & ARCH_CAP_RSBA))
1551 setup_force_cpu_bug(X86_BUG_RETBLEED);
1552 }
1553
1554 if (cpu_matches(cpu_vuln_blacklist, SMT_RSB))
1555 setup_force_cpu_bug(X86_BUG_SMT_RSB);
1556
1557 if (!cpu_has(c, X86_FEATURE_SRSO_NO)) {
1558 if (cpu_matches(cpu_vuln_blacklist, SRSO))
1559 setup_force_cpu_bug(X86_BUG_SRSO);
1560 }
1561
1562 /*
1563 * Check if CPU is vulnerable to GDS. If running in a virtual machine on
1564 * an affected processor, the VMM may have disabled the use of GATHER by
1565 * disabling AVX2. The only way to do this in HW is to clear XCR0[2],
1566 * which means that AVX will be disabled.
1567 */
1568 if (cpu_matches(cpu_vuln_blacklist, GDS) && !(x86_arch_cap_msr & ARCH_CAP_GDS_NO) &&
1569 boot_cpu_has(X86_FEATURE_AVX))
1570 setup_force_cpu_bug(X86_BUG_GDS);
1571
1572 if (vulnerable_to_rfds(x86_arch_cap_msr))
1573 setup_force_cpu_bug(X86_BUG_RFDS);
1574
1575 /*
1576 * Intel parts with eIBRS are vulnerable to BHI attacks. Parts with
1577 * BHI_NO still need to use the BHI mitigation to prevent Intra-mode
1578 * attacks. When virtualized, eIBRS could be hidden, assume vulnerable.
1579 */
1580 if (!cpu_matches(cpu_vuln_whitelist, NO_BHI) &&
1581 (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) ||
1582 boot_cpu_has(X86_FEATURE_HYPERVISOR)))
1583 setup_force_cpu_bug(X86_BUG_BHI);
1584
1585 if (cpu_has(c, X86_FEATURE_AMD_IBPB) && !cpu_has(c, X86_FEATURE_AMD_IBPB_RET))
1586 setup_force_cpu_bug(X86_BUG_IBPB_NO_RET);
1587
1588 if (vulnerable_to_its(x86_arch_cap_msr)) {
1589 setup_force_cpu_bug(X86_BUG_ITS);
1590 if (cpu_matches(cpu_vuln_blacklist, ITS_NATIVE_ONLY))
1591 setup_force_cpu_bug(X86_BUG_ITS_NATIVE_ONLY);
1592 }
1593
1594 if (c->x86_vendor == X86_VENDOR_AMD) {
1595 if (!cpu_has(c, X86_FEATURE_TSA_SQ_NO) ||
1596 !cpu_has(c, X86_FEATURE_TSA_L1_NO)) {
1597 if (cpu_matches(cpu_vuln_blacklist, TSA) ||
1598 /* Enable bug on Zen guests to allow for live migration. */
1599 (cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_ZEN)))
1600 setup_force_cpu_bug(X86_BUG_TSA);
1601 }
1602 }
1603
1604 /*
1605 * Set the bug only on bare-metal. A nested hypervisor should already be
1606 * deploying IBPB to isolate itself from nested guests.
1607 */
1608 if (cpu_matches(cpu_vuln_blacklist, VMSCAPE) &&
1609 !boot_cpu_has(X86_FEATURE_HYPERVISOR))
1610 setup_force_cpu_bug(X86_BUG_VMSCAPE);
1611
1612 if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
1613 return;
1614
1615 /* Rogue Data Cache Load? No! */
1616 if (x86_arch_cap_msr & ARCH_CAP_RDCL_NO)
1617 return;
1618
1619 setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
1620
1621 if (cpu_matches(cpu_vuln_whitelist, NO_L1TF))
1622 return;
1623
1624 setup_force_cpu_bug(X86_BUG_L1TF);
1625 }
1626
1627 /*
1628 * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
1629 * unfortunately, that's not true in practice because of early VIA
1630 * chips and (more importantly) broken virtualizers that are not easy
1631 * to detect. In the latter case it doesn't even *fail* reliably, so
1632 * probing for it doesn't even work. Disable it completely on 32-bit
1633 * unless we can find a reliable way to detect all the broken cases.
1634 * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
1635 */
detect_nopl(void)1636 static void detect_nopl(void)
1637 {
1638 #ifdef CONFIG_X86_32
1639 setup_clear_cpu_cap(X86_FEATURE_NOPL);
1640 #else
1641 setup_force_cpu_cap(X86_FEATURE_NOPL);
1642 #endif
1643 }
1644
parse_set_clear_cpuid(char * arg,bool set)1645 static inline bool parse_set_clear_cpuid(char *arg, bool set)
1646 {
1647 char *opt;
1648 int taint = 0;
1649
1650 while (arg) {
1651 bool found __maybe_unused = false;
1652 unsigned int bit;
1653
1654 opt = strsep(&arg, ",");
1655
1656 /*
1657 * Handle naked numbers first for feature flags which don't
1658 * have names. It doesn't make sense for a bug not to have a
1659 * name so don't handle bug flags here.
1660 */
1661 if (!kstrtouint(opt, 10, &bit)) {
1662 if (bit < NCAPINTS * 32) {
1663
1664 if (set) {
1665 pr_warn("setcpuid: force-enabling CPU feature flag:");
1666 setup_force_cpu_cap(bit);
1667 } else {
1668 pr_warn("clearcpuid: force-disabling CPU feature flag:");
1669 setup_clear_cpu_cap(bit);
1670 }
1671 /* empty-string, i.e., ""-defined feature flags */
1672 if (!x86_cap_flags[bit])
1673 pr_cont(" %d:%d\n", bit >> 5, bit & 31);
1674 else
1675 pr_cont(" %s\n", x86_cap_flags[bit]);
1676
1677 taint++;
1678 }
1679 /*
1680 * The assumption is that there are no feature names with only
1681 * numbers in the name thus go to the next argument.
1682 */
1683 continue;
1684 }
1685
1686 for (bit = 0; bit < 32 * (NCAPINTS + NBUGINTS); bit++) {
1687 const char *flag;
1688 const char *kind;
1689
1690 if (bit < 32 * NCAPINTS) {
1691 flag = x86_cap_flags[bit];
1692 kind = "feature";
1693 } else {
1694 kind = "bug";
1695 flag = x86_bug_flags[bit - (32 * NCAPINTS)];
1696 }
1697
1698 if (!flag)
1699 continue;
1700
1701 if (strcmp(flag, opt))
1702 continue;
1703
1704 if (set) {
1705 pr_warn("setcpuid: force-enabling CPU %s flag: %s\n",
1706 kind, flag);
1707 setup_force_cpu_cap(bit);
1708 } else {
1709 pr_warn("clearcpuid: force-disabling CPU %s flag: %s\n",
1710 kind, flag);
1711 setup_clear_cpu_cap(bit);
1712 }
1713 taint++;
1714 found = true;
1715 break;
1716 }
1717
1718 if (!found)
1719 pr_warn("%s: unknown CPU flag: %s", set ? "setcpuid" : "clearcpuid", opt);
1720 }
1721
1722 return taint;
1723 }
1724
1725
1726 /*
1727 * We parse cpu parameters early because fpu__init_system() is executed
1728 * before parse_early_param().
1729 */
cpu_parse_early_param(void)1730 static void __init cpu_parse_early_param(void)
1731 {
1732 bool cpuid_taint = false;
1733 char arg[128];
1734 int arglen;
1735
1736 #ifdef CONFIG_X86_32
1737 if (cmdline_find_option_bool(boot_command_line, "no387"))
1738 #ifdef CONFIG_MATH_EMULATION
1739 setup_clear_cpu_cap(X86_FEATURE_FPU);
1740 #else
1741 pr_err("Option 'no387' required CONFIG_MATH_EMULATION enabled.\n");
1742 #endif
1743
1744 if (cmdline_find_option_bool(boot_command_line, "nofxsr"))
1745 setup_clear_cpu_cap(X86_FEATURE_FXSR);
1746 #endif
1747
1748 if (cmdline_find_option_bool(boot_command_line, "noxsave"))
1749 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
1750
1751 if (cmdline_find_option_bool(boot_command_line, "noxsaveopt"))
1752 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
1753
1754 if (cmdline_find_option_bool(boot_command_line, "noxsaves"))
1755 setup_clear_cpu_cap(X86_FEATURE_XSAVES);
1756
1757 if (cmdline_find_option_bool(boot_command_line, "nousershstk"))
1758 setup_clear_cpu_cap(X86_FEATURE_USER_SHSTK);
1759
1760 /* Minimize the gap between FRED is available and available but disabled. */
1761 arglen = cmdline_find_option(boot_command_line, "fred", arg, sizeof(arg));
1762 if (arglen != 2 || strncmp(arg, "on", 2))
1763 setup_clear_cpu_cap(X86_FEATURE_FRED);
1764
1765 arglen = cmdline_find_option(boot_command_line, "clearcpuid", arg, sizeof(arg));
1766 if (arglen > 0)
1767 cpuid_taint |= parse_set_clear_cpuid(arg, false);
1768
1769 arglen = cmdline_find_option(boot_command_line, "setcpuid", arg, sizeof(arg));
1770 if (arglen > 0)
1771 cpuid_taint |= parse_set_clear_cpuid(arg, true);
1772
1773 if (cpuid_taint) {
1774 pr_warn("!!! setcpuid=/clearcpuid= in use, this is for TESTING ONLY, may break things horribly. Tainting kernel.\n");
1775 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
1776 }
1777 }
1778
1779 /*
1780 * Do minimum CPU detection early.
1781 * Fields really needed: vendor, cpuid_level, family, model, mask,
1782 * cache alignment.
1783 * The others are not touched to avoid unwanted side effects.
1784 *
1785 * WARNING: this function is only called on the boot CPU. Don't add code
1786 * here that is supposed to run on all CPUs.
1787 */
early_identify_cpu(struct cpuinfo_x86 * c)1788 static void __init early_identify_cpu(struct cpuinfo_x86 *c)
1789 {
1790 memset(&c->x86_capability, 0, sizeof(c->x86_capability));
1791 c->extended_cpuid_level = 0;
1792
1793 if (!cpuid_feature())
1794 identify_cpu_without_cpuid(c);
1795
1796 /* cyrix could have cpuid enabled via c_identify()*/
1797 if (cpuid_feature()) {
1798 cpu_detect(c);
1799 get_cpu_vendor(c);
1800 intel_unlock_cpuid_leafs(c);
1801 get_cpu_cap(c);
1802 setup_force_cpu_cap(X86_FEATURE_CPUID);
1803 get_cpu_address_sizes(c);
1804 cpu_parse_early_param();
1805
1806 cpu_init_topology(c);
1807
1808 if (this_cpu->c_early_init)
1809 this_cpu->c_early_init(c);
1810
1811 c->cpu_index = 0;
1812 filter_cpuid_features(c, false);
1813 check_cpufeature_deps(c);
1814
1815 if (this_cpu->c_bsp_init)
1816 this_cpu->c_bsp_init(c);
1817 } else {
1818 setup_clear_cpu_cap(X86_FEATURE_CPUID);
1819 get_cpu_address_sizes(c);
1820 cpu_init_topology(c);
1821 }
1822
1823 setup_force_cpu_cap(X86_FEATURE_ALWAYS);
1824
1825 cpu_set_bug_bits(c);
1826
1827 sld_setup(c);
1828
1829 #ifdef CONFIG_X86_32
1830 /*
1831 * Regardless of whether PCID is enumerated, the SDM says
1832 * that it can't be enabled in 32-bit mode.
1833 */
1834 setup_clear_cpu_cap(X86_FEATURE_PCID);
1835
1836 /*
1837 * Never use SYSCALL on a 32-bit kernel
1838 */
1839 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
1840 #endif
1841
1842 /*
1843 * Later in the boot process pgtable_l5_enabled() relies on
1844 * cpu_feature_enabled(X86_FEATURE_LA57). If 5-level paging is not
1845 * enabled by this point we need to clear the feature bit to avoid
1846 * false-positives at the later stage.
1847 *
1848 * pgtable_l5_enabled() can be false here for several reasons:
1849 * - 5-level paging is disabled compile-time;
1850 * - it's 32-bit kernel;
1851 * - machine doesn't support 5-level paging;
1852 * - user specified 'no5lvl' in kernel command line.
1853 */
1854 if (!pgtable_l5_enabled())
1855 setup_clear_cpu_cap(X86_FEATURE_LA57);
1856
1857 detect_nopl();
1858 mca_bsp_init(c);
1859 }
1860
init_cpu_devs(void)1861 void __init init_cpu_devs(void)
1862 {
1863 const struct cpu_dev *const *cdev;
1864 int count = 0;
1865
1866 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
1867 const struct cpu_dev *cpudev = *cdev;
1868
1869 if (count >= X86_VENDOR_NUM)
1870 break;
1871 cpu_devs[count] = cpudev;
1872 count++;
1873 }
1874 }
1875
early_cpu_init(void)1876 void __init early_cpu_init(void)
1877 {
1878 #ifdef CONFIG_PROCESSOR_SELECT
1879 unsigned int i, j;
1880
1881 pr_info("KERNEL supported cpus:\n");
1882 #endif
1883
1884 init_cpu_devs();
1885
1886 #ifdef CONFIG_PROCESSOR_SELECT
1887 for (i = 0; i < X86_VENDOR_NUM && cpu_devs[i]; i++) {
1888 for (j = 0; j < 2; j++) {
1889 if (!cpu_devs[i]->c_ident[j])
1890 continue;
1891 pr_info(" %s %s\n", cpu_devs[i]->c_vendor,
1892 cpu_devs[i]->c_ident[j]);
1893 }
1894 }
1895 #endif
1896
1897 early_identify_cpu(&boot_cpu_data);
1898 }
1899
detect_null_seg_behavior(void)1900 static bool detect_null_seg_behavior(void)
1901 {
1902 /*
1903 * Empirically, writing zero to a segment selector on AMD does
1904 * not clear the base, whereas writing zero to a segment
1905 * selector on Intel does clear the base. Intel's behavior
1906 * allows slightly faster context switches in the common case
1907 * where GS is unused by the prev and next threads.
1908 *
1909 * Since neither vendor documents this anywhere that I can see,
1910 * detect it directly instead of hard-coding the choice by
1911 * vendor.
1912 *
1913 * I've designated AMD's behavior as the "bug" because it's
1914 * counterintuitive and less friendly.
1915 */
1916
1917 unsigned long old_base, tmp;
1918 rdmsrq(MSR_FS_BASE, old_base);
1919 wrmsrq(MSR_FS_BASE, 1);
1920 loadsegment(fs, 0);
1921 rdmsrq(MSR_FS_BASE, tmp);
1922 wrmsrq(MSR_FS_BASE, old_base);
1923 return tmp == 0;
1924 }
1925
check_null_seg_clears_base(struct cpuinfo_x86 * c)1926 void check_null_seg_clears_base(struct cpuinfo_x86 *c)
1927 {
1928 /* BUG_NULL_SEG is only relevant with 64bit userspace */
1929 if (!IS_ENABLED(CONFIG_X86_64))
1930 return;
1931
1932 if (cpu_has(c, X86_FEATURE_NULL_SEL_CLR_BASE))
1933 return;
1934
1935 /*
1936 * CPUID bit above wasn't set. If this kernel is still running
1937 * as a HV guest, then the HV has decided not to advertize
1938 * that CPUID bit for whatever reason. For example, one
1939 * member of the migration pool might be vulnerable. Which
1940 * means, the bug is present: set the BUG flag and return.
1941 */
1942 if (cpu_has(c, X86_FEATURE_HYPERVISOR)) {
1943 set_cpu_bug(c, X86_BUG_NULL_SEG);
1944 return;
1945 }
1946
1947 /*
1948 * Zen2 CPUs also have this behaviour, but no CPUID bit.
1949 * 0x18 is the respective family for Hygon.
1950 */
1951 if ((c->x86 == 0x17 || c->x86 == 0x18) &&
1952 detect_null_seg_behavior())
1953 return;
1954
1955 /* All the remaining ones are affected */
1956 set_cpu_bug(c, X86_BUG_NULL_SEG);
1957 }
1958
generic_identify(struct cpuinfo_x86 * c)1959 static void generic_identify(struct cpuinfo_x86 *c)
1960 {
1961 c->extended_cpuid_level = 0;
1962
1963 if (!cpuid_feature())
1964 identify_cpu_without_cpuid(c);
1965
1966 /* cyrix could have cpuid enabled via c_identify()*/
1967 if (!cpuid_feature())
1968 return;
1969
1970 cpu_detect(c);
1971
1972 get_cpu_vendor(c);
1973 intel_unlock_cpuid_leafs(c);
1974 get_cpu_cap(c);
1975
1976 get_cpu_address_sizes(c);
1977
1978 get_model_name(c); /* Default name */
1979
1980 /*
1981 * ESPFIX is a strange bug. All real CPUs have it. Paravirt
1982 * systems that run Linux at CPL > 0 may or may not have the
1983 * issue, but, even if they have the issue, there's absolutely
1984 * nothing we can do about it because we can't use the real IRET
1985 * instruction.
1986 *
1987 * NB: For the time being, only 32-bit kernels support
1988 * X86_BUG_ESPFIX as such. 64-bit kernels directly choose
1989 * whether to apply espfix using paravirt hooks. If any
1990 * non-paravirt system ever shows up that does *not* have the
1991 * ESPFIX issue, we can change this.
1992 */
1993 #ifdef CONFIG_X86_32
1994 set_cpu_bug(c, X86_BUG_ESPFIX);
1995 #endif
1996 }
1997
1998 /*
1999 * This does the hard work of actually picking apart the CPU stuff...
2000 */
identify_cpu(struct cpuinfo_x86 * c)2001 static void identify_cpu(struct cpuinfo_x86 *c)
2002 {
2003 int i;
2004
2005 c->loops_per_jiffy = loops_per_jiffy;
2006 c->x86_cache_size = 0;
2007 c->x86_vendor = X86_VENDOR_UNKNOWN;
2008 c->x86_model = c->x86_stepping = 0; /* So far unknown... */
2009 c->x86_vendor_id[0] = '\0'; /* Unset */
2010 c->x86_model_id[0] = '\0'; /* Unset */
2011 #ifdef CONFIG_X86_64
2012 c->x86_clflush_size = 64;
2013 c->x86_phys_bits = 36;
2014 c->x86_virt_bits = 48;
2015 #else
2016 c->cpuid_level = -1; /* CPUID not detected */
2017 c->x86_clflush_size = 32;
2018 c->x86_phys_bits = 32;
2019 c->x86_virt_bits = 32;
2020 #endif
2021 c->x86_cache_alignment = c->x86_clflush_size;
2022 memset(&c->x86_capability, 0, sizeof(c->x86_capability));
2023 #ifdef CONFIG_X86_VMX_FEATURE_NAMES
2024 memset(&c->vmx_capability, 0, sizeof(c->vmx_capability));
2025 #endif
2026
2027 generic_identify(c);
2028
2029 cpu_parse_topology(c);
2030
2031 if (this_cpu->c_identify)
2032 this_cpu->c_identify(c);
2033
2034 /* Clear/Set all flags overridden by options, after probe */
2035 apply_forced_caps(c);
2036
2037 /*
2038 * Set default APIC and TSC_DEADLINE MSR fencing flag. AMD and
2039 * Hygon will clear it in ->c_init() below.
2040 */
2041 set_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE);
2042
2043 /*
2044 * Vendor-specific initialization. In this section we
2045 * canonicalize the feature flags, meaning if there are
2046 * features a certain CPU supports which CPUID doesn't
2047 * tell us, CPUID claiming incorrect flags, or other bugs,
2048 * we handle them here.
2049 *
2050 * At the end of this section, c->x86_capability better
2051 * indicate the features this CPU genuinely supports!
2052 */
2053 if (this_cpu->c_init)
2054 this_cpu->c_init(c);
2055
2056 bus_lock_init();
2057
2058 /* Disable the PN if appropriate */
2059 squash_the_stupid_serial_number(c);
2060
2061 setup_smep(c);
2062 setup_smap(c);
2063 setup_umip(c);
2064 setup_lass(c);
2065
2066 /*
2067 * The vendor-specific functions might have changed features.
2068 * Now we do "generic changes."
2069 */
2070
2071 /* Filter out anything that depends on CPUID levels we don't have */
2072 filter_cpuid_features(c, true);
2073
2074 /* Check for unmet dependencies based on the CPUID dependency table */
2075 check_cpufeature_deps(c);
2076
2077 /* If the model name is still unset, do table lookup. */
2078 if (!c->x86_model_id[0]) {
2079 const char *p;
2080 p = table_lookup_model(c);
2081 if (p)
2082 strcpy(c->x86_model_id, p);
2083 else
2084 /* Last resort... */
2085 sprintf(c->x86_model_id, "%02x/%02x",
2086 c->x86, c->x86_model);
2087 }
2088
2089 x86_init_rdrand(c);
2090 setup_pku(c);
2091 setup_cet(c);
2092
2093 /*
2094 * Clear/Set all flags overridden by options, need do it
2095 * before following smp all cpus cap AND.
2096 */
2097 apply_forced_caps(c);
2098
2099 /*
2100 * On SMP, boot_cpu_data holds the common feature set between
2101 * all CPUs; so make sure that we indicate which features are
2102 * common between the CPUs. The first time this routine gets
2103 * executed, c == &boot_cpu_data.
2104 */
2105 if (c != &boot_cpu_data) {
2106 /* AND the already accumulated flags with these */
2107 for (i = 0; i < NCAPINTS; i++)
2108 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
2109
2110 /* OR, i.e. replicate the bug flags */
2111 for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++)
2112 c->x86_capability[i] |= boot_cpu_data.x86_capability[i];
2113 }
2114
2115 ppin_init(c);
2116
2117 /* Init Machine Check Exception if available. */
2118 mcheck_cpu_init(c);
2119
2120 numa_add_cpu(smp_processor_id());
2121 }
2122
2123 /*
2124 * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions
2125 * on 32-bit kernels:
2126 */
2127 #ifdef CONFIG_X86_32
enable_sep_cpu(void)2128 void enable_sep_cpu(void)
2129 {
2130 struct tss_struct *tss;
2131 int cpu;
2132
2133 if (!boot_cpu_has(X86_FEATURE_SEP))
2134 return;
2135
2136 cpu = get_cpu();
2137 tss = &per_cpu(cpu_tss_rw, cpu);
2138
2139 /*
2140 * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
2141 * see the big comment in struct x86_hw_tss's definition.
2142 */
2143
2144 tss->x86_tss.ss1 = __KERNEL_CS;
2145 wrmsrq(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1);
2146 wrmsrq(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1));
2147 wrmsrq(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32);
2148
2149 put_cpu();
2150 }
2151 #endif
2152
identify_boot_cpu(void)2153 static __init void identify_boot_cpu(void)
2154 {
2155 identify_cpu(&boot_cpu_data);
2156 if (HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT))
2157 pr_info("CET detected: Indirect Branch Tracking enabled\n");
2158 #ifdef CONFIG_X86_32
2159 enable_sep_cpu();
2160 #endif
2161 cpu_detect_tlb(&boot_cpu_data);
2162 setup_cr_pinning();
2163
2164 tsx_init();
2165 tdx_init();
2166 lkgs_init();
2167 }
2168
identify_secondary_cpu(unsigned int cpu)2169 void identify_secondary_cpu(unsigned int cpu)
2170 {
2171 struct cpuinfo_x86 *c = &cpu_data(cpu);
2172
2173 /* Copy boot_cpu_data only on the first bringup */
2174 if (!c->initialized)
2175 *c = boot_cpu_data;
2176 c->cpu_index = cpu;
2177
2178 identify_cpu(c);
2179 #ifdef CONFIG_X86_32
2180 enable_sep_cpu();
2181 #endif
2182 x86_spec_ctrl_setup_ap();
2183 update_srbds_msr();
2184 if (boot_cpu_has_bug(X86_BUG_GDS))
2185 update_gds_msr();
2186
2187 tsx_ap_init();
2188 c->initialized = true;
2189 }
2190
print_cpu_info(struct cpuinfo_x86 * c)2191 void print_cpu_info(struct cpuinfo_x86 *c)
2192 {
2193 const char *vendor = NULL;
2194
2195 if (c->x86_vendor < X86_VENDOR_NUM) {
2196 vendor = this_cpu->c_vendor;
2197 } else {
2198 if (c->cpuid_level >= 0)
2199 vendor = c->x86_vendor_id;
2200 }
2201
2202 if (vendor && !strstr(c->x86_model_id, vendor))
2203 pr_cont("%s ", vendor);
2204
2205 if (c->x86_model_id[0])
2206 pr_cont("%s", c->x86_model_id);
2207 else
2208 pr_cont("%d86", c->x86);
2209
2210 pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
2211
2212 if (c->x86_stepping || c->cpuid_level >= 0)
2213 pr_cont(", stepping: 0x%x)\n", c->x86_stepping);
2214 else
2215 pr_cont(")\n");
2216 }
2217
2218 /*
2219 * clearcpuid= and setcpuid= were already parsed in cpu_parse_early_param().
2220 * These dummy functions prevent them from becoming an environment variable for
2221 * init.
2222 */
2223
setup_clearcpuid(char * arg)2224 static __init int setup_clearcpuid(char *arg)
2225 {
2226 return 1;
2227 }
2228 __setup("clearcpuid=", setup_clearcpuid);
2229
setup_setcpuid(char * arg)2230 static __init int setup_setcpuid(char *arg)
2231 {
2232 return 1;
2233 }
2234 __setup("setcpuid=", setup_setcpuid);
2235
2236 DEFINE_PER_CPU_CACHE_HOT(struct task_struct *, current_task) = &init_task;
2237 EXPORT_PER_CPU_SYMBOL(current_task);
2238 EXPORT_PER_CPU_SYMBOL(const_current_task);
2239
2240 DEFINE_PER_CPU_CACHE_HOT(int, __preempt_count) = INIT_PREEMPT_COUNT;
2241 EXPORT_PER_CPU_SYMBOL(__preempt_count);
2242
2243 DEFINE_PER_CPU_CACHE_HOT(unsigned long, cpu_current_top_of_stack) = TOP_OF_INIT_STACK;
2244
2245 #ifdef CONFIG_X86_64
2246 /*
2247 * Note: Do not make this dependant on CONFIG_MITIGATION_CALL_DEPTH_TRACKING
2248 * so that this space is reserved in the hot cache section even when the
2249 * mitigation is disabled.
2250 */
2251 DEFINE_PER_CPU_CACHE_HOT(u64, __x86_call_depth);
2252 EXPORT_PER_CPU_SYMBOL(__x86_call_depth);
2253
wrmsrq_cstar(unsigned long val)2254 static void wrmsrq_cstar(unsigned long val)
2255 {
2256 /*
2257 * Intel CPUs do not support 32-bit SYSCALL. Writing to MSR_CSTAR
2258 * is so far ignored by the CPU, but raises a #VE trap in a TDX
2259 * guest. Avoid the pointless write on all Intel CPUs.
2260 */
2261 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
2262 wrmsrq(MSR_CSTAR, val);
2263 }
2264
idt_syscall_init(void)2265 static inline void idt_syscall_init(void)
2266 {
2267 wrmsrq(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
2268
2269 if (ia32_enabled()) {
2270 wrmsrq_cstar((unsigned long)entry_SYSCALL_compat);
2271 /*
2272 * This only works on Intel CPUs.
2273 * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
2274 * This does not cause SYSENTER to jump to the wrong location, because
2275 * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
2276 */
2277 wrmsrq_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
2278 wrmsrq_safe(MSR_IA32_SYSENTER_ESP,
2279 (unsigned long)(cpu_entry_stack(smp_processor_id()) + 1));
2280 wrmsrq_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
2281 } else {
2282 wrmsrq_cstar((unsigned long)entry_SYSCALL32_ignore);
2283 wrmsrq_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
2284 wrmsrq_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
2285 wrmsrq_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
2286 }
2287
2288 /*
2289 * Flags to clear on syscall; clear as much as possible
2290 * to minimize user space-kernel interference.
2291 */
2292 wrmsrq(MSR_SYSCALL_MASK,
2293 X86_EFLAGS_CF|X86_EFLAGS_PF|X86_EFLAGS_AF|
2294 X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_TF|
2295 X86_EFLAGS_IF|X86_EFLAGS_DF|X86_EFLAGS_OF|
2296 X86_EFLAGS_IOPL|X86_EFLAGS_NT|X86_EFLAGS_RF|
2297 X86_EFLAGS_AC|X86_EFLAGS_ID);
2298 }
2299
2300 /* May not be marked __init: used by software suspend */
syscall_init(void)2301 void syscall_init(void)
2302 {
2303 /* The default user and kernel segments */
2304 wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
2305
2306 /*
2307 * Except the IA32_STAR MSR, there is NO need to setup SYSCALL and
2308 * SYSENTER MSRs for FRED, because FRED uses the ring 3 FRED
2309 * entrypoint for SYSCALL and SYSENTER, and ERETU is the only legit
2310 * instruction to return to ring 3 (both sysexit and sysret cause
2311 * #UD when FRED is enabled).
2312 */
2313 if (!cpu_feature_enabled(X86_FEATURE_FRED))
2314 idt_syscall_init();
2315 }
2316 #endif /* CONFIG_X86_64 */
2317
2318 #ifdef CONFIG_STACKPROTECTOR
2319 DEFINE_PER_CPU_CACHE_HOT(unsigned long, __stack_chk_guard);
2320 #ifndef CONFIG_SMP
2321 EXPORT_PER_CPU_SYMBOL(__stack_chk_guard);
2322 #endif
2323 #endif
2324
initialize_debug_regs(void)2325 static void initialize_debug_regs(void)
2326 {
2327 /* Control register first -- to make sure everything is disabled. */
2328 set_debugreg(DR7_FIXED_1, 7);
2329 set_debugreg(DR6_RESERVED, 6);
2330 /* dr5 and dr4 don't exist */
2331 set_debugreg(0, 3);
2332 set_debugreg(0, 2);
2333 set_debugreg(0, 1);
2334 set_debugreg(0, 0);
2335 }
2336
2337 #ifdef CONFIG_KGDB
2338 /*
2339 * Restore debug regs if using kgdbwait and you have a kernel debugger
2340 * connection established.
2341 */
dbg_restore_debug_regs(void)2342 static void dbg_restore_debug_regs(void)
2343 {
2344 if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break))
2345 arch_kgdb_ops.correct_hw_break();
2346 }
2347 #else /* ! CONFIG_KGDB */
2348 #define dbg_restore_debug_regs()
2349 #endif /* ! CONFIG_KGDB */
2350
setup_getcpu(int cpu)2351 static inline void setup_getcpu(int cpu)
2352 {
2353 unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu));
2354 struct desc_struct d = { };
2355
2356 if (boot_cpu_has(X86_FEATURE_RDTSCP) || boot_cpu_has(X86_FEATURE_RDPID))
2357 wrmsrq(MSR_TSC_AUX, cpudata);
2358
2359 /* Store CPU and node number in limit. */
2360 d.limit0 = cpudata;
2361 d.limit1 = cpudata >> 16;
2362
2363 d.type = 5; /* RO data, expand down, accessed */
2364 d.dpl = 3; /* Visible to user code */
2365 d.s = 1; /* Not a system segment */
2366 d.p = 1; /* Present */
2367 d.d = 1; /* 32-bit */
2368
2369 write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPUNODE, &d, DESCTYPE_S);
2370 }
2371
2372 #ifdef CONFIG_X86_64
tss_setup_ist(struct tss_struct * tss)2373 static inline void tss_setup_ist(struct tss_struct *tss)
2374 {
2375 /* Set up the per-CPU TSS IST stacks */
2376 tss->x86_tss.ist[IST_INDEX_DF] = __this_cpu_ist_top_va(DF);
2377 tss->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI);
2378 tss->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB);
2379 tss->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE);
2380 /* Only mapped when SEV-ES is active */
2381 tss->x86_tss.ist[IST_INDEX_VC] = __this_cpu_ist_top_va(VC);
2382 }
2383 #else /* CONFIG_X86_64 */
tss_setup_ist(struct tss_struct * tss)2384 static inline void tss_setup_ist(struct tss_struct *tss) { }
2385 #endif /* !CONFIG_X86_64 */
2386
tss_setup_io_bitmap(struct tss_struct * tss)2387 static inline void tss_setup_io_bitmap(struct tss_struct *tss)
2388 {
2389 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET_INVALID;
2390
2391 #ifdef CONFIG_X86_IOPL_IOPERM
2392 tss->io_bitmap.prev_max = 0;
2393 tss->io_bitmap.prev_sequence = 0;
2394 memset(tss->io_bitmap.bitmap, 0xff, sizeof(tss->io_bitmap.bitmap));
2395 /*
2396 * Invalidate the extra array entry past the end of the all
2397 * permission bitmap as required by the hardware.
2398 */
2399 tss->io_bitmap.mapall[IO_BITMAP_LONGS] = ~0UL;
2400 #endif
2401 }
2402
2403 /*
2404 * Setup everything needed to handle exceptions from the IDT, including the IST
2405 * exceptions which use paranoid_entry().
2406 */
cpu_init_exception_handling(bool boot_cpu)2407 void cpu_init_exception_handling(bool boot_cpu)
2408 {
2409 struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
2410 int cpu = raw_smp_processor_id();
2411
2412 /* paranoid_entry() gets the CPU number from the GDT */
2413 setup_getcpu(cpu);
2414
2415 /* For IDT mode, IST vectors need to be set in TSS. */
2416 if (!cpu_feature_enabled(X86_FEATURE_FRED))
2417 tss_setup_ist(tss);
2418 tss_setup_io_bitmap(tss);
2419 set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
2420
2421 load_TR_desc();
2422
2423 /* GHCB needs to be setup to handle #VC. */
2424 setup_ghcb();
2425
2426 /*
2427 * On CPUs with FSGSBASE support, paranoid_entry() uses
2428 * ALTERNATIVE-patched RDGSBASE/WRGSBASE instructions. Secondary CPUs
2429 * boot after alternatives are patched globally, so early exceptions
2430 * execute patched code that depends on FSGSBASE. Enable the feature
2431 * before any exceptions occur.
2432 */
2433 if (cpu_feature_enabled(X86_FEATURE_FSGSBASE)) {
2434 cr4_set_bits(X86_CR4_FSGSBASE);
2435 elf_hwcap2 |= HWCAP2_FSGSBASE;
2436 }
2437
2438 if (cpu_feature_enabled(X86_FEATURE_FRED)) {
2439 /* The boot CPU has enabled FRED during early boot */
2440 if (!boot_cpu)
2441 cpu_init_fred_exceptions();
2442
2443 cpu_init_fred_rsps();
2444 } else {
2445 load_current_idt();
2446 }
2447 }
2448
cpu_init_replace_early_idt(void)2449 void __init cpu_init_replace_early_idt(void)
2450 {
2451 if (cpu_feature_enabled(X86_FEATURE_FRED))
2452 cpu_init_fred_exceptions();
2453 else
2454 idt_setup_early_pf();
2455 }
2456
2457 /*
2458 * cpu_init() initializes state that is per-CPU. Some data is already
2459 * initialized (naturally) in the bootstrap process, such as the GDT. We
2460 * reload it nevertheless, this function acts as a 'CPU state barrier',
2461 * nothing should get across.
2462 */
cpu_init(void)2463 void cpu_init(void)
2464 {
2465 struct task_struct *cur = current;
2466 int cpu = raw_smp_processor_id();
2467
2468 #ifdef CONFIG_NUMA
2469 if (this_cpu_read(numa_node) == 0 &&
2470 early_cpu_to_node(cpu) != NUMA_NO_NODE)
2471 set_numa_node(early_cpu_to_node(cpu));
2472 #endif
2473 pr_debug("Initializing CPU#%d\n", cpu);
2474
2475 if (IS_ENABLED(CONFIG_X86_64) || cpu_feature_enabled(X86_FEATURE_VME) ||
2476 boot_cpu_has(X86_FEATURE_TSC) || boot_cpu_has(X86_FEATURE_DE))
2477 cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
2478
2479 if (IS_ENABLED(CONFIG_X86_64)) {
2480 loadsegment(fs, 0);
2481 memset(cur->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
2482 syscall_init();
2483
2484 wrmsrq(MSR_FS_BASE, 0);
2485 wrmsrq(MSR_KERNEL_GS_BASE, 0);
2486 barrier();
2487
2488 x2apic_setup();
2489
2490 intel_posted_msi_init();
2491 }
2492
2493 mmgrab(&init_mm);
2494 cur->active_mm = &init_mm;
2495 BUG_ON(cur->mm);
2496 initialize_tlbstate_and_flush();
2497 enter_lazy_tlb(&init_mm, cur);
2498
2499 /*
2500 * sp0 points to the entry trampoline stack regardless of what task
2501 * is running.
2502 */
2503 load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
2504
2505 load_mm_ldt(&init_mm);
2506
2507 initialize_debug_regs();
2508 dbg_restore_debug_regs();
2509
2510 doublefault_init_cpu_tss();
2511
2512 if (is_uv_system())
2513 uv_cpu_init();
2514
2515 load_fixmap_gdt(cpu);
2516 }
2517
2518 #ifdef CONFIG_MICROCODE_LATE_LOADING
2519 /**
2520 * store_cpu_caps() - Store a snapshot of CPU capabilities
2521 * @curr_info: Pointer where to store it
2522 *
2523 * Returns: None
2524 */
store_cpu_caps(struct cpuinfo_x86 * curr_info)2525 void store_cpu_caps(struct cpuinfo_x86 *curr_info)
2526 {
2527 /* Reload CPUID max function as it might've changed. */
2528 curr_info->cpuid_level = cpuid_eax(0);
2529
2530 /* Copy all capability leafs and pick up the synthetic ones. */
2531 memcpy(&curr_info->x86_capability, &boot_cpu_data.x86_capability,
2532 sizeof(curr_info->x86_capability));
2533
2534 /* Get the hardware CPUID leafs */
2535 get_cpu_cap(curr_info);
2536 }
2537
2538 /**
2539 * microcode_check() - Check if any CPU capabilities changed after an update.
2540 * @prev_info: CPU capabilities stored before an update.
2541 *
2542 * The microcode loader calls this upon late microcode load to recheck features,
2543 * only when microcode has been updated. Caller holds and CPU hotplug lock.
2544 *
2545 * Return: None
2546 */
microcode_check(struct cpuinfo_x86 * prev_info)2547 void microcode_check(struct cpuinfo_x86 *prev_info)
2548 {
2549 struct cpuinfo_x86 curr_info;
2550
2551 perf_check_microcode();
2552
2553 amd_check_microcode();
2554
2555 store_cpu_caps(&curr_info);
2556
2557 if (!memcmp(&prev_info->x86_capability, &curr_info.x86_capability,
2558 sizeof(prev_info->x86_capability)))
2559 return;
2560
2561 pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n");
2562 pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
2563 }
2564 #endif
2565
2566 /*
2567 * Invoked from core CPU hotplug code after hotplug operations
2568 */
arch_smt_update(void)2569 void arch_smt_update(void)
2570 {
2571 /* Handle the speculative execution misfeatures */
2572 cpu_bugs_smt_update();
2573 /* Check whether IPI broadcasting can be enabled */
2574 apic_smt_update();
2575 }
2576
arch_cpu_finalize_init(void)2577 void __init arch_cpu_finalize_init(void)
2578 {
2579 struct cpuinfo_x86 *c = this_cpu_ptr(&cpu_info);
2580
2581 identify_boot_cpu();
2582
2583 select_idle_routine();
2584
2585 /*
2586 * identify_boot_cpu() initialized SMT support information, let the
2587 * core code know.
2588 */
2589 cpu_smt_set_num_threads(__max_threads_per_core, __max_threads_per_core);
2590
2591 if (!IS_ENABLED(CONFIG_SMP)) {
2592 pr_info("CPU: ");
2593 print_cpu_info(&boot_cpu_data);
2594 }
2595
2596 cpu_select_mitigations();
2597
2598 arch_smt_update();
2599
2600 if (IS_ENABLED(CONFIG_X86_32)) {
2601 /*
2602 * Check whether this is a real i386 which is not longer
2603 * supported and fixup the utsname.
2604 */
2605 if (boot_cpu_data.x86 < 4)
2606 panic("Kernel requires i486+ for 'invlpg' and other features");
2607
2608 init_utsname()->machine[1] =
2609 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
2610 }
2611
2612 /*
2613 * Must be before alternatives because it might set or clear
2614 * feature bits.
2615 */
2616 fpu__init_system();
2617 fpu__init_cpu();
2618
2619 /*
2620 * This needs to follow the FPU initializtion, since EFI depends on it.
2621 */
2622 if (efi_enabled(EFI_RUNTIME_SERVICES))
2623 efi_enter_virtual_mode();
2624
2625 /*
2626 * Ensure that access to the per CPU representation has the initial
2627 * boot CPU configuration.
2628 */
2629 *c = boot_cpu_data;
2630 c->initialized = true;
2631
2632 alternative_instructions();
2633
2634 if (IS_ENABLED(CONFIG_X86_64)) {
2635 USER_PTR_MAX = TASK_SIZE_MAX;
2636
2637 /*
2638 * Enable this when LAM is gated on LASS support
2639 if (cpu_feature_enabled(X86_FEATURE_LAM))
2640 USER_PTR_MAX = (1ul << 63) - PAGE_SIZE;
2641 */
2642 runtime_const_init(ptr, USER_PTR_MAX);
2643
2644 /*
2645 * Make sure the first 2MB area is not mapped by huge pages
2646 * There are typically fixed size MTRRs in there and overlapping
2647 * MTRRs into large pages causes slow downs.
2648 *
2649 * Right now we don't do that with gbpages because there seems
2650 * very little benefit for that case.
2651 */
2652 if (!direct_gbpages)
2653 set_memory_4k((unsigned long)__va(0), 1);
2654 } else {
2655 fpu__init_check_bugs();
2656 }
2657
2658 /*
2659 * This needs to be called before any devices perform DMA
2660 * operations that might use the SWIOTLB bounce buffers. It will
2661 * mark the bounce buffers as decrypted so that their usage will
2662 * not cause "plain-text" data to be decrypted when accessed. It
2663 * must be called after late_time_init() so that Hyper-V x86/x64
2664 * hypercalls work when the SWIOTLB bounce buffers are decrypted.
2665 */
2666 mem_encrypt_init();
2667 }
2668