xref: /linux/arch/x86/kernel/cpu/common.c (revision 5567fc9dcd7ed46678cd68e6ca0662331d42f0ac)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* cpu_feature_enabled() cannot be used this early */
3 #define USE_EARLY_PGTABLE_L5
4 
5 #include <linux/memblock.h>
6 #include <linux/linkage.h>
7 #include <linux/bitops.h>
8 #include <linux/kernel.h>
9 #include <linux/export.h>
10 #include <linux/kvm_types.h>
11 #include <linux/percpu.h>
12 #include <linux/string.h>
13 #include <linux/ctype.h>
14 #include <linux/delay.h>
15 #include <linux/sched/mm.h>
16 #include <linux/sched/clock.h>
17 #include <linux/sched/task.h>
18 #include <linux/sched/smt.h>
19 #include <linux/init.h>
20 #include <linux/kprobes.h>
21 #include <linux/kgdb.h>
22 #include <linux/mem_encrypt.h>
23 #include <linux/smp.h>
24 #include <linux/cpu.h>
25 #include <linux/io.h>
26 #include <linux/syscore_ops.h>
27 #include <linux/pgtable.h>
28 #include <linux/stackprotector.h>
29 #include <linux/utsname.h>
30 #include <linux/efi.h>
31 
32 #include <asm/alternative.h>
33 #include <asm/cmdline.h>
34 #include <asm/cpuid/api.h>
35 #include <asm/perf_event.h>
36 #include <asm/mmu_context.h>
37 #include <asm/doublefault.h>
38 #include <asm/archrandom.h>
39 #include <asm/hypervisor.h>
40 #include <asm/processor.h>
41 #include <asm/tlbflush.h>
42 #include <asm/debugreg.h>
43 #include <asm/sections.h>
44 #include <asm/vsyscall.h>
45 #include <linux/topology.h>
46 #include <linux/cpumask.h>
47 #include <linux/atomic.h>
48 #include <asm/proto.h>
49 #include <asm/setup.h>
50 #include <asm/apic.h>
51 #include <asm/desc.h>
52 #include <asm/fpu/api.h>
53 #include <asm/mtrr.h>
54 #include <asm/hwcap2.h>
55 #include <linux/numa.h>
56 #include <asm/numa.h>
57 #include <asm/asm.h>
58 #include <asm/bugs.h>
59 #include <asm/cpu.h>
60 #include <asm/mce.h>
61 #include <asm/msr.h>
62 #include <asm/cacheinfo.h>
63 #include <asm/memtype.h>
64 #include <asm/microcode.h>
65 #include <asm/intel-family.h>
66 #include <asm/cpu_device_id.h>
67 #include <asm/fred.h>
68 #include <asm/uv/uv.h>
69 #include <asm/ia32.h>
70 #include <asm/set_memory.h>
71 #include <asm/traps.h>
72 #include <asm/sev.h>
73 #include <asm/tdx.h>
74 #include <asm/virt.h>
75 #include <asm/posted_intr.h>
76 #include <asm/runtime-const.h>
77 
78 #include "cpu.h"
79 
80 DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
81 EXPORT_PER_CPU_SYMBOL(cpu_info);
82 
83 /* Used for modules: built-in code uses runtime constants */
84 unsigned long USER_PTR_MAX;
85 EXPORT_SYMBOL(USER_PTR_MAX);
86 
87 u32 elf_hwcap2 __read_mostly;
88 
89 /* Number of siblings per CPU package */
90 unsigned int __max_threads_per_core __ro_after_init = 1;
91 EXPORT_SYMBOL(__max_threads_per_core);
92 
93 unsigned int __max_dies_per_package __ro_after_init = 1;
94 EXPORT_SYMBOL(__max_dies_per_package);
95 
96 unsigned int __max_logical_packages __ro_after_init = 1;
97 EXPORT_SYMBOL(__max_logical_packages);
98 
99 unsigned int __num_nodes_per_package __ro_after_init = 1;
100 EXPORT_SYMBOL(__num_nodes_per_package);
101 
102 unsigned int __num_cores_per_package __ro_after_init = 1;
103 EXPORT_SYMBOL(__num_cores_per_package);
104 
105 unsigned int __num_threads_per_package __ro_after_init = 1;
106 EXPORT_SYMBOL(__num_threads_per_package);
107 
108 static struct ppin_info {
109 	int	feature;
110 	int	msr_ppin_ctl;
111 	int	msr_ppin;
112 } ppin_info[] = {
113 	[X86_VENDOR_INTEL] = {
114 		.feature = X86_FEATURE_INTEL_PPIN,
115 		.msr_ppin_ctl = MSR_PPIN_CTL,
116 		.msr_ppin = MSR_PPIN
117 	},
118 	[X86_VENDOR_AMD] = {
119 		.feature = X86_FEATURE_AMD_PPIN,
120 		.msr_ppin_ctl = MSR_AMD_PPIN_CTL,
121 		.msr_ppin = MSR_AMD_PPIN
122 	},
123 };
124 
125 static const struct x86_cpu_id ppin_cpuids[] = {
126 	X86_MATCH_FEATURE(X86_FEATURE_AMD_PPIN, &ppin_info[X86_VENDOR_AMD]),
127 	X86_MATCH_FEATURE(X86_FEATURE_INTEL_PPIN, &ppin_info[X86_VENDOR_INTEL]),
128 
129 	/* Legacy models without CPUID enumeration */
130 	X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &ppin_info[X86_VENDOR_INTEL]),
131 	X86_MATCH_VFM(INTEL_HASWELL_X, &ppin_info[X86_VENDOR_INTEL]),
132 	X86_MATCH_VFM(INTEL_BROADWELL_D, &ppin_info[X86_VENDOR_INTEL]),
133 	X86_MATCH_VFM(INTEL_BROADWELL_X, &ppin_info[X86_VENDOR_INTEL]),
134 	X86_MATCH_VFM(INTEL_SKYLAKE_X, &ppin_info[X86_VENDOR_INTEL]),
135 	X86_MATCH_VFM(INTEL_ICELAKE_X, &ppin_info[X86_VENDOR_INTEL]),
136 	X86_MATCH_VFM(INTEL_ICELAKE_D, &ppin_info[X86_VENDOR_INTEL]),
137 	X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &ppin_info[X86_VENDOR_INTEL]),
138 	X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &ppin_info[X86_VENDOR_INTEL]),
139 	X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &ppin_info[X86_VENDOR_INTEL]),
140 	X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &ppin_info[X86_VENDOR_INTEL]),
141 
142 	{}
143 };
144 
145 static void ppin_init(struct cpuinfo_x86 *c)
146 {
147 	const struct x86_cpu_id *id;
148 	unsigned long long val;
149 	struct ppin_info *info;
150 
151 	id = x86_match_cpu(ppin_cpuids);
152 	if (!id)
153 		return;
154 
155 	/*
156 	 * Testing the presence of the MSR is not enough. Need to check
157 	 * that the PPIN_CTL allows reading of the PPIN.
158 	 */
159 	info = (struct ppin_info *)id->driver_data;
160 
161 	if (rdmsrq_safe(info->msr_ppin_ctl, &val))
162 		goto clear_ppin;
163 
164 	if ((val & 3UL) == 1UL) {
165 		/* PPIN locked in disabled mode */
166 		goto clear_ppin;
167 	}
168 
169 	/* If PPIN is disabled, try to enable */
170 	if (!(val & 2UL)) {
171 		wrmsrq_safe(info->msr_ppin_ctl,  val | 2UL);
172 		rdmsrq_safe(info->msr_ppin_ctl, &val);
173 	}
174 
175 	/* Is the enable bit set? */
176 	if (val & 2UL) {
177 		c->ppin = native_rdmsrq(info->msr_ppin);
178 		set_cpu_cap(c, info->feature);
179 		return;
180 	}
181 
182 clear_ppin:
183 	setup_clear_cpu_cap(info->feature);
184 }
185 
186 static void default_init(struct cpuinfo_x86 *c)
187 {
188 #ifdef CONFIG_X86_64
189 	cpu_detect_cache_sizes(c);
190 #else
191 	/* Not much we can do here... */
192 	/* Check if at least it has cpuid */
193 	if (c->cpuid_level == -1) {
194 		/* No cpuid. It must be an ancient CPU */
195 		if (c->x86 == 4)
196 			strcpy(c->x86_model_id, "486");
197 		else if (c->x86 == 3)
198 			strcpy(c->x86_model_id, "386");
199 	}
200 #endif
201 }
202 
203 static const struct cpu_dev default_cpu = {
204 	.c_init		= default_init,
205 	.c_vendor	= "Unknown",
206 	.c_x86_vendor	= X86_VENDOR_UNKNOWN,
207 };
208 
209 static const struct cpu_dev *this_cpu = &default_cpu;
210 
211 DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
212 #ifdef CONFIG_X86_64
213 	/*
214 	 * We need valid kernel segments for data and code in long mode too
215 	 * IRET will check the segment types  kkeil 2000/10/28
216 	 * Also sysret mandates a special GDT layout
217 	 *
218 	 * TLS descriptors are currently at a different place compared to i386.
219 	 * Hopefully nobody expects them at a fixed place (Wine?)
220 	 */
221 	[GDT_ENTRY_KERNEL32_CS]		= GDT_ENTRY_INIT(DESC_CODE32, 0, 0xfffff),
222 	[GDT_ENTRY_KERNEL_CS]		= GDT_ENTRY_INIT(DESC_CODE64, 0, 0xfffff),
223 	[GDT_ENTRY_KERNEL_DS]		= GDT_ENTRY_INIT(DESC_DATA64, 0, 0xfffff),
224 	[GDT_ENTRY_DEFAULT_USER32_CS]	= GDT_ENTRY_INIT(DESC_CODE32 | DESC_USER, 0, 0xfffff),
225 	[GDT_ENTRY_DEFAULT_USER_DS]	= GDT_ENTRY_INIT(DESC_DATA64 | DESC_USER, 0, 0xfffff),
226 	[GDT_ENTRY_DEFAULT_USER_CS]	= GDT_ENTRY_INIT(DESC_CODE64 | DESC_USER, 0, 0xfffff),
227 #else
228 	[GDT_ENTRY_KERNEL_CS]		= GDT_ENTRY_INIT(DESC_CODE32, 0, 0xfffff),
229 	[GDT_ENTRY_KERNEL_DS]		= GDT_ENTRY_INIT(DESC_DATA32, 0, 0xfffff),
230 	[GDT_ENTRY_DEFAULT_USER_CS]	= GDT_ENTRY_INIT(DESC_CODE32 | DESC_USER, 0, 0xfffff),
231 	[GDT_ENTRY_DEFAULT_USER_DS]	= GDT_ENTRY_INIT(DESC_DATA32 | DESC_USER, 0, 0xfffff),
232 	/*
233 	 * Segments used for calling PnP BIOS have byte granularity.
234 	 * They code segments and data segments have fixed 64k limits,
235 	 * the transfer segment sizes are set at run time.
236 	 */
237 	[GDT_ENTRY_PNPBIOS_CS32]	= GDT_ENTRY_INIT(DESC_CODE32_BIOS, 0, 0xffff),
238 	[GDT_ENTRY_PNPBIOS_CS16]	= GDT_ENTRY_INIT(DESC_CODE16, 0, 0xffff),
239 	[GDT_ENTRY_PNPBIOS_DS]		= GDT_ENTRY_INIT(DESC_DATA16, 0, 0xffff),
240 	[GDT_ENTRY_PNPBIOS_TS1]		= GDT_ENTRY_INIT(DESC_DATA16, 0, 0),
241 	[GDT_ENTRY_PNPBIOS_TS2]		= GDT_ENTRY_INIT(DESC_DATA16, 0, 0),
242 	/*
243 	 * The APM segments have byte granularity and their bases
244 	 * are set at run time.  All have 64k limits.
245 	 */
246 	[GDT_ENTRY_APMBIOS_BASE]	= GDT_ENTRY_INIT(DESC_CODE32_BIOS, 0, 0xffff),
247 	[GDT_ENTRY_APMBIOS_BASE+1]	= GDT_ENTRY_INIT(DESC_CODE16, 0, 0xffff),
248 	[GDT_ENTRY_APMBIOS_BASE+2]	= GDT_ENTRY_INIT(DESC_DATA32_BIOS, 0, 0xffff),
249 
250 	[GDT_ENTRY_ESPFIX_SS]		= GDT_ENTRY_INIT(DESC_DATA32, 0, 0xfffff),
251 	[GDT_ENTRY_PERCPU]		= GDT_ENTRY_INIT(DESC_DATA32, 0, 0xfffff),
252 #endif
253 } };
254 EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
255 SYM_PIC_ALIAS(gdt_page);
256 
257 #ifdef CONFIG_X86_64
258 static int __init x86_nopcid_setup(char *s)
259 {
260 	/* nopcid doesn't accept parameters */
261 	if (s)
262 		return -EINVAL;
263 
264 	/* do not emit a message if the feature is not present */
265 	if (!boot_cpu_has(X86_FEATURE_PCID))
266 		return 0;
267 
268 	setup_clear_cpu_cap(X86_FEATURE_PCID);
269 	pr_info("nopcid: PCID feature disabled\n");
270 	return 0;
271 }
272 early_param("nopcid", x86_nopcid_setup);
273 #endif
274 
275 static int __init x86_noinvpcid_setup(char *s)
276 {
277 	/* noinvpcid doesn't accept parameters */
278 	if (s)
279 		return -EINVAL;
280 
281 	/* do not emit a message if the feature is not present */
282 	if (!boot_cpu_has(X86_FEATURE_INVPCID))
283 		return 0;
284 
285 	setup_clear_cpu_cap(X86_FEATURE_INVPCID);
286 	pr_info("noinvpcid: INVPCID feature disabled\n");
287 	return 0;
288 }
289 early_param("noinvpcid", x86_noinvpcid_setup);
290 
291 /* Standard macro to see if a specific flag is changeable */
292 static inline bool flag_is_changeable_p(unsigned long flag)
293 {
294 	unsigned long f1, f2;
295 
296 	if (!IS_ENABLED(CONFIG_X86_32))
297 		return true;
298 
299 	/*
300 	 * Cyrix and IDT cpus allow disabling of CPUID
301 	 * so the code below may return different results
302 	 * when it is executed before and after enabling
303 	 * the CPUID. Add "volatile" to not allow gcc to
304 	 * optimize the subsequent calls to this function.
305 	 */
306 	asm volatile ("pushfl		\n\t"
307 		      "pushfl		\n\t"
308 		      "popl %0		\n\t"
309 		      "movl %0, %1	\n\t"
310 		      "xorl %2, %0	\n\t"
311 		      "pushl %0		\n\t"
312 		      "popfl		\n\t"
313 		      "pushfl		\n\t"
314 		      "popl %0		\n\t"
315 		      "popfl		\n\t"
316 
317 		      : "=&r" (f1), "=&r" (f2)
318 		      : "ir" (flag));
319 
320 	return (f1 ^ f2) & flag;
321 }
322 
323 #ifdef CONFIG_X86_32
324 static int cachesize_override = -1;
325 static int disable_x86_serial_nr = 1;
326 
327 static int __init cachesize_setup(char *str)
328 {
329 	get_option(&str, &cachesize_override);
330 	return 1;
331 }
332 __setup("cachesize=", cachesize_setup);
333 
334 /* Probe for the CPUID instruction */
335 bool cpuid_feature(void)
336 {
337 	return flag_is_changeable_p(X86_EFLAGS_ID);
338 }
339 
340 static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
341 {
342 	unsigned long lo, hi;
343 
344 	if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
345 		return;
346 
347 	/* Disable processor serial number: */
348 
349 	rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
350 	lo |= 0x200000;
351 	wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
352 
353 	pr_notice("CPU serial number disabled.\n");
354 	clear_cpu_cap(c, X86_FEATURE_PN);
355 
356 	/* Disabling the serial number may affect the cpuid level */
357 	c->cpuid_level = cpuid_eax(0);
358 }
359 
360 static int __init x86_serial_nr_setup(char *s)
361 {
362 	disable_x86_serial_nr = 0;
363 	return 1;
364 }
365 __setup("serialnumber", x86_serial_nr_setup);
366 #else
367 static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
368 {
369 }
370 #endif
371 
372 static __always_inline void setup_smep(struct cpuinfo_x86 *c)
373 {
374 	if (cpu_has(c, X86_FEATURE_SMEP))
375 		cr4_set_bits(X86_CR4_SMEP);
376 }
377 
378 static __always_inline void setup_smap(struct cpuinfo_x86 *c)
379 {
380 	unsigned long eflags = native_save_fl();
381 
382 	/* This should have been cleared long ago */
383 	BUG_ON(eflags & X86_EFLAGS_AC);
384 
385 	if (cpu_has(c, X86_FEATURE_SMAP))
386 		cr4_set_bits(X86_CR4_SMAP);
387 }
388 
389 static __always_inline void setup_umip(struct cpuinfo_x86 *c)
390 {
391 	/* Check the boot processor, plus build option for UMIP. */
392 	if (!cpu_feature_enabled(X86_FEATURE_UMIP))
393 		goto out;
394 
395 	/* Check the current processor's cpuid bits. */
396 	if (!cpu_has(c, X86_FEATURE_UMIP))
397 		goto out;
398 
399 	cr4_set_bits(X86_CR4_UMIP);
400 
401 	pr_info_once("x86/cpu: User Mode Instruction Prevention (UMIP) activated\n");
402 
403 	return;
404 
405 out:
406 	/*
407 	 * Make sure UMIP is disabled in case it was enabled in a
408 	 * previous boot (e.g., via kexec).
409 	 */
410 	cr4_clear_bits(X86_CR4_UMIP);
411 }
412 
413 static __always_inline void setup_lass(struct cpuinfo_x86 *c)
414 {
415 	if (!cpu_feature_enabled(X86_FEATURE_LASS))
416 		return;
417 
418 	/*
419 	 * Legacy vsyscall page access causes a #GP when LASS is active.
420 	 * Disable LASS because the #GP handler doesn't support vsyscall
421 	 * emulation.
422 	 *
423 	 * Also disable LASS when running under EFI, as some runtime and
424 	 * boot services rely on 1:1 mappings in the lower half.
425 	 */
426 	if (IS_ENABLED(CONFIG_X86_VSYSCALL_EMULATION) ||
427 	    IS_ENABLED(CONFIG_EFI)) {
428 		setup_clear_cpu_cap(X86_FEATURE_LASS);
429 		return;
430 	}
431 
432 	cr4_set_bits(X86_CR4_LASS);
433 }
434 
435 /* These bits should not change their value after CPU init is finished. */
436 static const unsigned long cr4_pinned_mask = X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP |
437 					     X86_CR4_FSGSBASE | X86_CR4_CET;
438 
439 /*
440  * The CR pinning protects against ROP on the 'mov %reg, %CRn' instruction(s).
441  * Since you can ROP directly to these instructions (barring shadow stack),
442  * any protection must follow immediately and unconditionally after that.
443  *
444  * Specifically, the CR[04] write functions below will have the value
445  * validation controlled by the @cr_pinning static_branch which is
446  * __ro_after_init, just like the cr4_pinned_bits value.
447  *
448  * Once set, an attacker will have to defeat page-tables to get around these
449  * restrictions. Which is a much bigger ask than 'simple' ROP.
450  */
451 static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning);
452 static unsigned long cr4_pinned_bits __ro_after_init;
453 
454 void native_write_cr0(unsigned long val)
455 {
456 	unsigned long bits_missing = 0;
457 
458 set_register:
459 	asm volatile("mov %0,%%cr0": "+r" (val) : : "memory");
460 
461 	if (static_branch_likely(&cr_pinning)) {
462 		if (unlikely((val & X86_CR0_WP) != X86_CR0_WP)) {
463 			bits_missing = X86_CR0_WP;
464 			val |= bits_missing;
465 			goto set_register;
466 		}
467 		/* Warn after we've set the missing bits. */
468 		WARN_ONCE(bits_missing, "CR0 WP bit went missing!?\n");
469 	}
470 }
471 EXPORT_SYMBOL(native_write_cr0);
472 
473 void __no_profile native_write_cr4(unsigned long val)
474 {
475 	unsigned long bits_changed = 0;
476 
477 set_register:
478 	asm volatile("mov %0,%%cr4": "+r" (val) : : "memory");
479 
480 	if (static_branch_likely(&cr_pinning)) {
481 		if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) {
482 			bits_changed = (val & cr4_pinned_mask) ^ cr4_pinned_bits;
483 			val = (val & ~cr4_pinned_mask) | cr4_pinned_bits;
484 			goto set_register;
485 		}
486 		/* Warn after we've corrected the changed bits. */
487 		WARN_ONCE(bits_changed, "pinned CR4 bits changed: 0x%lx!?\n",
488 			  bits_changed);
489 	}
490 }
491 #if IS_MODULE(CONFIG_LKDTM)
492 EXPORT_SYMBOL_GPL(native_write_cr4);
493 #endif
494 
495 void cr4_update_irqsoff(unsigned long set, unsigned long clear)
496 {
497 	unsigned long newval, cr4 = this_cpu_read(cpu_tlbstate.cr4);
498 
499 	lockdep_assert_irqs_disabled();
500 
501 	newval = (cr4 & ~clear) | set;
502 	if (newval != cr4) {
503 		this_cpu_write(cpu_tlbstate.cr4, newval);
504 		__write_cr4(newval);
505 	}
506 }
507 EXPORT_SYMBOL_FOR_KVM(cr4_update_irqsoff);
508 
509 /* Read the CR4 shadow. */
510 unsigned long cr4_read_shadow(void)
511 {
512 	return this_cpu_read(cpu_tlbstate.cr4);
513 }
514 EXPORT_SYMBOL_FOR_KVM(cr4_read_shadow);
515 
516 void cr4_init(void)
517 {
518 	unsigned long cr4 = __read_cr4();
519 
520 	if (boot_cpu_has(X86_FEATURE_PCID))
521 		cr4 |= X86_CR4_PCIDE;
522 	if (static_branch_likely(&cr_pinning))
523 		cr4 = (cr4 & ~cr4_pinned_mask) | cr4_pinned_bits;
524 
525 	__write_cr4(cr4);
526 
527 	/* Initialize cr4 shadow for this CPU. */
528 	this_cpu_write(cpu_tlbstate.cr4, cr4);
529 }
530 
531 /*
532  * Once CPU feature detection is finished (and boot params have been
533  * parsed), record any of the sensitive CR bits that are set, and
534  * enable CR pinning.
535  */
536 static void __init setup_cr_pinning(void)
537 {
538 	cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & cr4_pinned_mask;
539 	static_key_enable(&cr_pinning.key);
540 }
541 
542 static __init int x86_nofsgsbase_setup(char *arg)
543 {
544 	/* Require an exact match without trailing characters. */
545 	if (strlen(arg))
546 		return 0;
547 
548 	/* Do not emit a message if the feature is not present. */
549 	if (!boot_cpu_has(X86_FEATURE_FSGSBASE))
550 		return 1;
551 
552 	setup_clear_cpu_cap(X86_FEATURE_FSGSBASE);
553 	pr_info("FSGSBASE disabled via kernel command line\n");
554 	return 1;
555 }
556 __setup("nofsgsbase", x86_nofsgsbase_setup);
557 
558 /*
559  * Protection Keys are not available in 32-bit mode.
560  */
561 static bool pku_disabled;
562 
563 static __always_inline void setup_pku(struct cpuinfo_x86 *c)
564 {
565 	if (c == &boot_cpu_data) {
566 		if (pku_disabled || !cpu_feature_enabled(X86_FEATURE_PKU))
567 			return;
568 		/*
569 		 * Setting CR4.PKE will cause the X86_FEATURE_OSPKE cpuid
570 		 * bit to be set.  Enforce it.
571 		 */
572 		setup_force_cpu_cap(X86_FEATURE_OSPKE);
573 
574 	} else if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) {
575 		return;
576 	}
577 
578 	cr4_set_bits(X86_CR4_PKE);
579 	/* Load the default PKRU value */
580 	pkru_write_default();
581 }
582 
583 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
584 static __init int setup_disable_pku(char *arg)
585 {
586 	/*
587 	 * Do not clear the X86_FEATURE_PKU bit.  All of the
588 	 * runtime checks are against OSPKE so clearing the
589 	 * bit does nothing.
590 	 *
591 	 * This way, we will see "pku" in cpuinfo, but not
592 	 * "ospke", which is exactly what we want.  It shows
593 	 * that the CPU has PKU, but the OS has not enabled it.
594 	 * This happens to be exactly how a system would look
595 	 * if we disabled the config option.
596 	 */
597 	pr_info("x86: 'nopku' specified, disabling Memory Protection Keys\n");
598 	pku_disabled = true;
599 	return 1;
600 }
601 __setup("nopku", setup_disable_pku);
602 #endif
603 
604 #ifdef CONFIG_X86_KERNEL_IBT
605 
606 __noendbr u64 ibt_save(bool disable)
607 {
608 	u64 msr = 0;
609 
610 	if (cpu_feature_enabled(X86_FEATURE_IBT)) {
611 		rdmsrq(MSR_IA32_S_CET, msr);
612 		if (disable)
613 			wrmsrq(MSR_IA32_S_CET, msr & ~CET_ENDBR_EN);
614 	}
615 
616 	return msr;
617 }
618 
619 __noendbr void ibt_restore(u64 save)
620 {
621 	u64 msr;
622 
623 	if (cpu_feature_enabled(X86_FEATURE_IBT)) {
624 		rdmsrq(MSR_IA32_S_CET, msr);
625 		msr &= ~CET_ENDBR_EN;
626 		msr |= (save & CET_ENDBR_EN);
627 		wrmsrq(MSR_IA32_S_CET, msr);
628 	}
629 }
630 
631 #endif
632 
633 static __always_inline void setup_cet(struct cpuinfo_x86 *c)
634 {
635 	bool user_shstk, kernel_ibt;
636 
637 	if (!IS_ENABLED(CONFIG_X86_CET))
638 		return;
639 
640 	kernel_ibt = HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT);
641 	user_shstk = cpu_feature_enabled(X86_FEATURE_SHSTK) &&
642 		     IS_ENABLED(CONFIG_X86_USER_SHADOW_STACK);
643 
644 	if (!kernel_ibt && !user_shstk)
645 		return;
646 
647 	if (user_shstk)
648 		set_cpu_cap(c, X86_FEATURE_USER_SHSTK);
649 
650 	if (kernel_ibt)
651 		wrmsrq(MSR_IA32_S_CET, CET_ENDBR_EN);
652 	else
653 		wrmsrq(MSR_IA32_S_CET, 0);
654 
655 	cr4_set_bits(X86_CR4_CET);
656 
657 	if (kernel_ibt && ibt_selftest()) {
658 		pr_err("IBT selftest: Failed!\n");
659 		wrmsrq(MSR_IA32_S_CET, 0);
660 		setup_clear_cpu_cap(X86_FEATURE_IBT);
661 	}
662 }
663 
664 __noendbr void cet_disable(void)
665 {
666 	if (!(cpu_feature_enabled(X86_FEATURE_IBT) ||
667 	      cpu_feature_enabled(X86_FEATURE_SHSTK)))
668 		return;
669 
670 	wrmsrq(MSR_IA32_S_CET, 0);
671 	wrmsrq(MSR_IA32_U_CET, 0);
672 }
673 
674 /*
675  * Some CPU features depend on higher CPUID levels, which may not always
676  * be available due to CPUID level capping or broken virtualization
677  * software.  Add those features to this table to auto-disable them.
678  */
679 struct cpuid_dependent_feature {
680 	u32 feature;
681 	u32 level;
682 };
683 
684 static const struct cpuid_dependent_feature
685 cpuid_dependent_features[] = {
686 	{ X86_FEATURE_MWAIT,		CPUID_LEAF_MWAIT },
687 	{ X86_FEATURE_DCA,		CPUID_LEAF_DCA },
688 	{ X86_FEATURE_XSAVE,		CPUID_LEAF_XSTATE },
689 	{ 0, 0 }
690 };
691 
692 static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
693 {
694 	const struct cpuid_dependent_feature *df;
695 
696 	for (df = cpuid_dependent_features; df->feature; df++) {
697 
698 		if (!cpu_has(c, df->feature))
699 			continue;
700 		/*
701 		 * Note: cpuid_level is set to -1 if unavailable, but
702 		 * extended_extended_level is set to 0 if unavailable
703 		 * and the legitimate extended levels are all negative
704 		 * when signed; hence the weird messing around with
705 		 * signs here...
706 		 */
707 		if (!((s32)df->level < 0 ?
708 		     (u32)df->level > (u32)c->extended_cpuid_level :
709 		     (s32)df->level > (s32)c->cpuid_level))
710 			continue;
711 
712 		clear_cpu_cap(c, df->feature);
713 		if (!warn)
714 			continue;
715 
716 		pr_warn("CPU: CPU feature %s disabled, no CPUID level 0x%x\n",
717 			x86_cap_flags[df->feature], df->level);
718 	}
719 }
720 
721 /*
722  * Naming convention should be: <Name> [(<Codename>)]
723  * This table only is used unless init_<vendor>() below doesn't set it;
724  * in particular, if CPUID levels 0x80000002..4 are supported, this
725  * isn't used
726  */
727 
728 /* Look up CPU names by table lookup. */
729 static const char *table_lookup_model(struct cpuinfo_x86 *c)
730 {
731 #ifdef CONFIG_X86_32
732 	const struct legacy_cpu_model_info *info;
733 
734 	if (c->x86_model >= 16)
735 		return NULL;	/* Range check */
736 
737 	if (!this_cpu)
738 		return NULL;
739 
740 	info = this_cpu->legacy_models;
741 
742 	while (info->family) {
743 		if (info->family == c->x86)
744 			return info->model_names[c->x86_model];
745 		info++;
746 	}
747 #endif
748 	return NULL;		/* Not found */
749 }
750 
751 /* Aligned to unsigned long to avoid split lock in atomic bitmap ops */
752 __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long));
753 __u32 cpu_caps_set[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long));
754 
755 #ifdef CONFIG_X86_32
756 /* The 32-bit entry code needs to find cpu_entry_area. */
757 DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
758 #endif
759 
760 /* Load the original GDT from the per-cpu structure */
761 void load_direct_gdt(int cpu)
762 {
763 	struct desc_ptr gdt_descr;
764 
765 	gdt_descr.address = (long)get_cpu_gdt_rw(cpu);
766 	gdt_descr.size = GDT_SIZE - 1;
767 	load_gdt(&gdt_descr);
768 }
769 EXPORT_SYMBOL_FOR_KVM(load_direct_gdt);
770 
771 /* Load a fixmap remapping of the per-cpu GDT */
772 void load_fixmap_gdt(int cpu)
773 {
774 	struct desc_ptr gdt_descr;
775 
776 	gdt_descr.address = (long)get_cpu_gdt_ro(cpu);
777 	gdt_descr.size = GDT_SIZE - 1;
778 	load_gdt(&gdt_descr);
779 }
780 EXPORT_SYMBOL_GPL(load_fixmap_gdt);
781 
782 /**
783  * switch_gdt_and_percpu_base - Switch to direct GDT and runtime per CPU base
784  * @cpu:	The CPU number for which this is invoked
785  *
786  * Invoked during early boot to switch from early GDT and early per CPU to
787  * the direct GDT and the runtime per CPU area. On 32-bit the percpu base
788  * switch is implicit by loading the direct GDT. On 64bit this requires
789  * to update GSBASE.
790  */
791 void __init switch_gdt_and_percpu_base(int cpu)
792 {
793 	load_direct_gdt(cpu);
794 
795 #ifdef CONFIG_X86_64
796 	/*
797 	 * No need to load %gs. It is already correct.
798 	 *
799 	 * Writing %gs on 64bit would zero GSBASE which would make any per
800 	 * CPU operation up to the point of the wrmsrq() fault.
801 	 *
802 	 * Set GSBASE to the new offset. Until the wrmsrq() happens the
803 	 * early mapping is still valid. That means the GSBASE update will
804 	 * lose any prior per CPU data which was not copied over in
805 	 * setup_per_cpu_areas().
806 	 *
807 	 * This works even with stackprotector enabled because the
808 	 * per CPU stack canary is 0 in both per CPU areas.
809 	 */
810 	wrmsrq(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu));
811 #else
812 	/*
813 	 * %fs is already set to __KERNEL_PERCPU, but after switching GDT
814 	 * it is required to load FS again so that the 'hidden' part is
815 	 * updated from the new GDT. Up to this point the early per CPU
816 	 * translation is active. Any content of the early per CPU data
817 	 * which was not copied over in setup_per_cpu_areas() is lost.
818 	 */
819 	loadsegment(fs, __KERNEL_PERCPU);
820 #endif
821 }
822 
823 static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
824 
825 static void get_model_name(struct cpuinfo_x86 *c)
826 {
827 	unsigned int *v;
828 	char *p, *q, *s;
829 
830 	if (c->extended_cpuid_level < 0x80000004)
831 		return;
832 
833 	v = (unsigned int *)c->x86_model_id;
834 	cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
835 	cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
836 	cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
837 	c->x86_model_id[48] = 0;
838 
839 	/* Trim whitespace */
840 	p = q = s = &c->x86_model_id[0];
841 
842 	while (*p == ' ')
843 		p++;
844 
845 	while (*p) {
846 		/* Note the last non-whitespace index */
847 		if (!isspace(*p))
848 			s = q;
849 
850 		*q++ = *p++;
851 	}
852 
853 	*(s + 1) = '\0';
854 }
855 
856 void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
857 {
858 	unsigned int n, dummy, ebx, ecx, edx, l2size;
859 
860 	n = c->extended_cpuid_level;
861 
862 	if (n >= 0x80000005) {
863 		cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
864 		c->x86_cache_size = (ecx>>24) + (edx>>24);
865 #ifdef CONFIG_X86_64
866 		/* On K8 L1 TLB is inclusive, so don't count it */
867 		c->x86_tlbsize = 0;
868 #endif
869 	}
870 
871 	if (n < 0x80000006)	/* Some chips just has a large L1. */
872 		return;
873 
874 	cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
875 	l2size = ecx >> 16;
876 
877 #ifdef CONFIG_X86_64
878 	c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
879 #else
880 	/* do processor-specific cache resizing */
881 	if (this_cpu->legacy_cache_size)
882 		l2size = this_cpu->legacy_cache_size(c, l2size);
883 
884 	/* Allow user to override all this if necessary. */
885 	if (cachesize_override != -1)
886 		l2size = cachesize_override;
887 
888 	if (l2size == 0)
889 		return;		/* Again, no L2 cache is possible */
890 #endif
891 
892 	c->x86_cache_size = l2size;
893 }
894 
895 u16 __read_mostly tlb_lli_4k;
896 u16 __read_mostly tlb_lli_2m;
897 u16 __read_mostly tlb_lli_4m;
898 u16 __read_mostly tlb_lld_4k;
899 u16 __read_mostly tlb_lld_2m;
900 u16 __read_mostly tlb_lld_4m;
901 u16 __read_mostly tlb_lld_1g;
902 
903 static void cpu_detect_tlb(struct cpuinfo_x86 *c)
904 {
905 	if (this_cpu->c_detect_tlb)
906 		this_cpu->c_detect_tlb(c);
907 
908 	pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n",
909 		tlb_lli_4k, tlb_lli_2m, tlb_lli_4m);
910 
911 	pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n",
912 		tlb_lld_4k, tlb_lld_2m, tlb_lld_4m, tlb_lld_1g);
913 }
914 
915 void get_cpu_vendor(struct cpuinfo_x86 *c)
916 {
917 	char *v = c->x86_vendor_id;
918 	int i;
919 
920 	for (i = 0; i < X86_VENDOR_NUM; i++) {
921 		if (!cpu_devs[i])
922 			break;
923 
924 		if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
925 		    (cpu_devs[i]->c_ident[1] &&
926 		     !strcmp(v, cpu_devs[i]->c_ident[1]))) {
927 
928 			this_cpu = cpu_devs[i];
929 			c->x86_vendor = this_cpu->c_x86_vendor;
930 			return;
931 		}
932 	}
933 
934 	pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \
935 		    "CPU: Your system may be unstable.\n", v);
936 
937 	c->x86_vendor = X86_VENDOR_UNKNOWN;
938 	this_cpu = &default_cpu;
939 }
940 
941 void cpu_detect(struct cpuinfo_x86 *c)
942 {
943 	/* Get vendor name */
944 	cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
945 	      (unsigned int *)&c->x86_vendor_id[0],
946 	      (unsigned int *)&c->x86_vendor_id[8],
947 	      (unsigned int *)&c->x86_vendor_id[4]);
948 
949 	c->x86 = 4;
950 	/* Intel-defined flags: level 0x00000001 */
951 	if (c->cpuid_level >= 0x00000001) {
952 		u32 junk, tfms, cap0, misc;
953 
954 		cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
955 		c->x86		= x86_family(tfms);
956 		c->x86_model	= x86_model(tfms);
957 		c->x86_stepping	= x86_stepping(tfms);
958 
959 		if (cap0 & (1<<19)) {
960 			c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
961 			c->x86_cache_alignment = c->x86_clflush_size;
962 		}
963 	}
964 }
965 
966 static void apply_forced_caps(struct cpuinfo_x86 *c)
967 {
968 	int i;
969 
970 	for (i = 0; i < NCAPINTS + NBUGINTS; i++) {
971 		c->x86_capability[i] &= ~cpu_caps_cleared[i];
972 		c->x86_capability[i] |= cpu_caps_set[i];
973 	}
974 }
975 
976 static void init_speculation_control(struct cpuinfo_x86 *c)
977 {
978 	/*
979 	 * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support,
980 	 * and they also have a different bit for STIBP support. Also,
981 	 * a hypervisor might have set the individual AMD bits even on
982 	 * Intel CPUs, for finer-grained selection of what's available.
983 	 */
984 	if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
985 		set_cpu_cap(c, X86_FEATURE_IBRS);
986 		set_cpu_cap(c, X86_FEATURE_IBPB);
987 		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
988 	}
989 
990 	if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
991 		set_cpu_cap(c, X86_FEATURE_STIBP);
992 
993 	if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
994 	    cpu_has(c, X86_FEATURE_VIRT_SSBD))
995 		set_cpu_cap(c, X86_FEATURE_SSBD);
996 
997 	if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
998 		set_cpu_cap(c, X86_FEATURE_IBRS);
999 		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
1000 	}
1001 
1002 	if (cpu_has(c, X86_FEATURE_AMD_IBPB))
1003 		set_cpu_cap(c, X86_FEATURE_IBPB);
1004 
1005 	if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
1006 		set_cpu_cap(c, X86_FEATURE_STIBP);
1007 		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
1008 	}
1009 
1010 	if (cpu_has(c, X86_FEATURE_AMD_SSBD)) {
1011 		set_cpu_cap(c, X86_FEATURE_SSBD);
1012 		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
1013 		clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD);
1014 	}
1015 }
1016 
1017 void get_cpu_cap(struct cpuinfo_x86 *c)
1018 {
1019 	u32 eax, ebx, ecx, edx;
1020 
1021 	/* Intel-defined flags: level 0x00000001 */
1022 	if (c->cpuid_level >= 0x00000001) {
1023 		cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
1024 
1025 		c->x86_capability[CPUID_1_ECX] = ecx;
1026 		c->x86_capability[CPUID_1_EDX] = edx;
1027 	}
1028 
1029 	/* Thermal and Power Management Leaf: level 0x00000006 (eax) */
1030 	if (c->cpuid_level >= 0x00000006)
1031 		c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
1032 
1033 	/* Additional Intel-defined flags: level 0x00000007 */
1034 	if (c->cpuid_level >= 0x00000007) {
1035 		cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
1036 		c->x86_capability[CPUID_7_0_EBX] = ebx;
1037 		c->x86_capability[CPUID_7_ECX] = ecx;
1038 		c->x86_capability[CPUID_7_EDX] = edx;
1039 
1040 		/* Check valid sub-leaf index before accessing it */
1041 		if (eax >= 1) {
1042 			cpuid_count(0x00000007, 1, &eax, &ebx, &ecx, &edx);
1043 			c->x86_capability[CPUID_7_1_EAX] = eax;
1044 		}
1045 	}
1046 
1047 	/* Extended state features: level 0x0000000d */
1048 	if (c->cpuid_level >= 0x0000000d) {
1049 		cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx);
1050 
1051 		c->x86_capability[CPUID_D_1_EAX] = eax;
1052 	}
1053 
1054 	/*
1055 	 * Check if extended CPUID leaves are implemented: Max extended
1056 	 * CPUID leaf must be in the 0x80000001-0x8000ffff range.
1057 	 */
1058 	eax = cpuid_eax(0x80000000);
1059 	c->extended_cpuid_level = ((eax & 0xffff0000) == 0x80000000) ? eax : 0;
1060 
1061 	if (c->extended_cpuid_level >= 0x80000001) {
1062 		cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
1063 
1064 		c->x86_capability[CPUID_8000_0001_ECX] = ecx;
1065 		c->x86_capability[CPUID_8000_0001_EDX] = edx;
1066 	}
1067 
1068 	if (c->extended_cpuid_level >= 0x80000007)
1069 		c->x86_power = cpuid_edx(0x80000007);
1070 
1071 	if (c->extended_cpuid_level >= 0x80000008) {
1072 		cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
1073 		c->x86_capability[CPUID_8000_0008_EBX] = ebx;
1074 	}
1075 
1076 	if (c->extended_cpuid_level >= 0x8000000a)
1077 		c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
1078 
1079 	if (c->extended_cpuid_level >= 0x8000001f)
1080 		c->x86_capability[CPUID_8000_001F_EAX] = cpuid_eax(0x8000001f);
1081 
1082 	if (c->extended_cpuid_level >= 0x80000021)
1083 		c->x86_capability[CPUID_8000_0021_EAX] = cpuid_eax(0x80000021);
1084 
1085 	init_scattered_cpuid_features(c);
1086 	init_speculation_control(c);
1087 
1088 	if (IS_ENABLED(CONFIG_X86_64) || cpu_has(c, X86_FEATURE_SEP))
1089 		set_cpu_cap(c, X86_FEATURE_SYSFAST32);
1090 
1091 	/*
1092 	 * Clear/Set all flags overridden by options, after probe.
1093 	 * This needs to happen each time we re-probe, which may happen
1094 	 * several times during CPU initialization.
1095 	 */
1096 	apply_forced_caps(c);
1097 }
1098 
1099 void get_cpu_address_sizes(struct cpuinfo_x86 *c)
1100 {
1101 	u32 eax, ebx, ecx, edx;
1102 
1103 	if (!cpu_has(c, X86_FEATURE_CPUID) ||
1104 	    (c->extended_cpuid_level < 0x80000008)) {
1105 		if (IS_ENABLED(CONFIG_X86_64)) {
1106 			c->x86_clflush_size = 64;
1107 			c->x86_phys_bits = 36;
1108 			c->x86_virt_bits = 48;
1109 		} else {
1110 			c->x86_clflush_size = 32;
1111 			c->x86_virt_bits = 32;
1112 			c->x86_phys_bits = 32;
1113 
1114 			if (cpu_has(c, X86_FEATURE_PAE) ||
1115 			    cpu_has(c, X86_FEATURE_PSE36))
1116 				c->x86_phys_bits = 36;
1117 		}
1118 	} else {
1119 		cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
1120 
1121 		c->x86_virt_bits = (eax >> 8) & 0xff;
1122 		c->x86_phys_bits = eax & 0xff;
1123 
1124 		/* Provide a sane default if not enumerated: */
1125 		if (!c->x86_clflush_size)
1126 			c->x86_clflush_size = 32;
1127 	}
1128 
1129 	c->x86_cache_bits = c->x86_phys_bits;
1130 	c->x86_cache_alignment = c->x86_clflush_size;
1131 }
1132 
1133 static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
1134 {
1135 	int i;
1136 
1137 	/*
1138 	 * First of all, decide if this is a 486 or higher
1139 	 * It's a 486 if we can modify the AC flag
1140 	 */
1141 	if (flag_is_changeable_p(X86_EFLAGS_AC))
1142 		c->x86 = 4;
1143 	else
1144 		c->x86 = 3;
1145 
1146 	for (i = 0; i < X86_VENDOR_NUM; i++)
1147 		if (cpu_devs[i] && cpu_devs[i]->c_identify) {
1148 			c->x86_vendor_id[0] = 0;
1149 			cpu_devs[i]->c_identify(c);
1150 			if (c->x86_vendor_id[0]) {
1151 				get_cpu_vendor(c);
1152 				break;
1153 			}
1154 		}
1155 }
1156 
1157 #define NO_SPECULATION		BIT(0)
1158 #define NO_MELTDOWN		BIT(1)
1159 #define NO_SSB			BIT(2)
1160 #define NO_L1TF			BIT(3)
1161 #define NO_MDS			BIT(4)
1162 #define MSBDS_ONLY		BIT(5)
1163 #define NO_SWAPGS		BIT(6)
1164 #define NO_ITLB_MULTIHIT	BIT(7)
1165 #define NO_SPECTRE_V2		BIT(8)
1166 #define NO_MMIO			BIT(9)
1167 #define NO_EIBRS_PBRSB		BIT(10)
1168 #define NO_BHI			BIT(11)
1169 
1170 #define VULNWL(vendor, family, model, whitelist)	\
1171 	X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist)
1172 
1173 #define VULNWL_INTEL(vfm, whitelist)		\
1174 	X86_MATCH_VFM(vfm, whitelist)
1175 
1176 #define VULNWL_AMD(family, whitelist)		\
1177 	VULNWL(AMD, family, X86_MODEL_ANY, whitelist)
1178 
1179 #define VULNWL_HYGON(family, whitelist)		\
1180 	VULNWL(HYGON, family, X86_MODEL_ANY, whitelist)
1181 
1182 static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
1183 	VULNWL(ANY,	4, X86_MODEL_ANY,	NO_SPECULATION),
1184 	VULNWL(CENTAUR,	5, X86_MODEL_ANY,	NO_SPECULATION),
1185 	VULNWL(INTEL,	5, X86_MODEL_ANY,	NO_SPECULATION),
1186 	VULNWL(NSC,	5, X86_MODEL_ANY,	NO_SPECULATION),
1187 	VULNWL(VORTEX,	5, X86_MODEL_ANY,	NO_SPECULATION),
1188 	VULNWL(VORTEX,	6, X86_MODEL_ANY,	NO_SPECULATION),
1189 
1190 	/* Intel Family 6 */
1191 	VULNWL_INTEL(INTEL_TIGERLAKE,		NO_MMIO),
1192 	VULNWL_INTEL(INTEL_TIGERLAKE_L,		NO_MMIO),
1193 	VULNWL_INTEL(INTEL_ALDERLAKE,		NO_MMIO),
1194 	VULNWL_INTEL(INTEL_ALDERLAKE_L,		NO_MMIO),
1195 
1196 	VULNWL_INTEL(INTEL_ATOM_SALTWELL,	NO_SPECULATION | NO_ITLB_MULTIHIT),
1197 	VULNWL_INTEL(INTEL_ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT),
1198 	VULNWL_INTEL(INTEL_ATOM_SALTWELL_MID,	NO_SPECULATION | NO_ITLB_MULTIHIT),
1199 	VULNWL_INTEL(INTEL_ATOM_BONNELL,	NO_SPECULATION | NO_ITLB_MULTIHIT),
1200 	VULNWL_INTEL(INTEL_ATOM_BONNELL_MID,	NO_SPECULATION | NO_ITLB_MULTIHIT),
1201 
1202 	VULNWL_INTEL(INTEL_ATOM_SILVERMONT,	NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1203 	VULNWL_INTEL(INTEL_ATOM_SILVERMONT_D,	NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1204 	VULNWL_INTEL(INTEL_ATOM_SILVERMONT_MID,	NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1205 	VULNWL_INTEL(INTEL_ATOM_AIRMONT,	NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1206 	VULNWL_INTEL(INTEL_XEON_PHI_KNL,	NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1207 	VULNWL_INTEL(INTEL_XEON_PHI_KNM,	NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1208 
1209 	VULNWL_INTEL(INTEL_CORE_YONAH,		NO_SSB),
1210 
1211 	VULNWL_INTEL(INTEL_ATOM_SILVERMONT_MID2,NO_SSB | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | MSBDS_ONLY),
1212 	VULNWL_INTEL(INTEL_ATOM_AIRMONT_NP,	NO_SSB | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
1213 
1214 	VULNWL_INTEL(INTEL_ATOM_GOLDMONT,	NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
1215 	VULNWL_INTEL(INTEL_ATOM_GOLDMONT_D,	NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
1216 	VULNWL_INTEL(INTEL_ATOM_GOLDMONT_PLUS,	NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
1217 
1218 	/*
1219 	 * Technically, swapgs isn't serializing on AMD (despite it previously
1220 	 * being documented as such in the APM).  But according to AMD, %gs is
1221 	 * updated non-speculatively, and the issuing of %gs-relative memory
1222 	 * operands will be blocked until the %gs update completes, which is
1223 	 * good enough for our purposes.
1224 	 */
1225 
1226 	VULNWL_INTEL(INTEL_ATOM_TREMONT,	NO_EIBRS_PBRSB),
1227 	VULNWL_INTEL(INTEL_ATOM_TREMONT_L,	NO_EIBRS_PBRSB),
1228 	VULNWL_INTEL(INTEL_ATOM_TREMONT_D,	NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
1229 
1230 	/* AMD Family 0xf - 0x12 */
1231 	VULNWL_AMD(0x0f,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
1232 	VULNWL_AMD(0x10,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
1233 	VULNWL_AMD(0x11,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
1234 	VULNWL_AMD(0x12,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
1235 
1236 	/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
1237 	VULNWL_AMD(X86_FAMILY_ANY,	NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB | NO_BHI),
1238 	VULNWL_HYGON(X86_FAMILY_ANY,	NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB | NO_BHI),
1239 
1240 	/* Zhaoxin Family 7 */
1241 	VULNWL(CENTAUR,	7, X86_MODEL_ANY,	NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO | NO_BHI),
1242 	VULNWL(ZHAOXIN,	7, X86_MODEL_ANY,	NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO | NO_BHI),
1243 	{}
1244 };
1245 
1246 #define VULNBL(vendor, family, model, blacklist)	\
1247 	X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, blacklist)
1248 
1249 #define VULNBL_INTEL_STEPS(vfm, max_stepping, issues)		   \
1250 	X86_MATCH_VFM_STEPS(vfm, X86_STEP_MIN, max_stepping, issues)
1251 
1252 #define VULNBL_INTEL_TYPE(vfm, cpu_type, issues)	\
1253 	X86_MATCH_VFM_CPU_TYPE(vfm, INTEL_CPU_TYPE_##cpu_type, issues)
1254 
1255 #define VULNBL_AMD(family, blacklist)		\
1256 	VULNBL(AMD, family, X86_MODEL_ANY, blacklist)
1257 
1258 #define VULNBL_HYGON(family, blacklist)		\
1259 	VULNBL(HYGON, family, X86_MODEL_ANY, blacklist)
1260 
1261 #define SRBDS		BIT(0)
1262 /* CPU is affected by X86_BUG_MMIO_STALE_DATA */
1263 #define MMIO		BIT(1)
1264 /* CPU is affected by Shared Buffers Data Sampling (SBDS), a variant of X86_BUG_MMIO_STALE_DATA */
1265 #define MMIO_SBDS	BIT(2)
1266 /* CPU is affected by RETbleed, speculating where you would not expect it */
1267 #define RETBLEED	BIT(3)
1268 /* CPU is affected by SMT (cross-thread) return predictions */
1269 #define SMT_RSB		BIT(4)
1270 /* CPU is affected by SRSO */
1271 #define SRSO		BIT(5)
1272 /* CPU is affected by GDS */
1273 #define GDS		BIT(6)
1274 /* CPU is affected by Register File Data Sampling */
1275 #define RFDS		BIT(7)
1276 /* CPU is affected by Indirect Target Selection */
1277 #define ITS		BIT(8)
1278 /* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */
1279 #define ITS_NATIVE_ONLY	BIT(9)
1280 /* CPU is affected by Transient Scheduler Attacks */
1281 #define TSA		BIT(10)
1282 /* CPU is affected by VMSCAPE */
1283 #define VMSCAPE		BIT(11)
1284 
1285 static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
1286 	VULNBL_INTEL_STEPS(INTEL_SANDYBRIDGE_X,	     X86_STEP_MAX,	VMSCAPE),
1287 	VULNBL_INTEL_STEPS(INTEL_SANDYBRIDGE,	     X86_STEP_MAX,	VMSCAPE),
1288 	VULNBL_INTEL_STEPS(INTEL_IVYBRIDGE_X,	     X86_STEP_MAX,	VMSCAPE),
1289 	VULNBL_INTEL_STEPS(INTEL_IVYBRIDGE,	     X86_STEP_MAX,	SRBDS | VMSCAPE),
1290 	VULNBL_INTEL_STEPS(INTEL_HASWELL,	     X86_STEP_MAX,	SRBDS | VMSCAPE),
1291 	VULNBL_INTEL_STEPS(INTEL_HASWELL_L,	     X86_STEP_MAX,	SRBDS | VMSCAPE),
1292 	VULNBL_INTEL_STEPS(INTEL_HASWELL_G,	     X86_STEP_MAX,	SRBDS | VMSCAPE),
1293 	VULNBL_INTEL_STEPS(INTEL_HASWELL_X,	     X86_STEP_MAX,	MMIO | VMSCAPE),
1294 	VULNBL_INTEL_STEPS(INTEL_BROADWELL_D,	     X86_STEP_MAX,	MMIO | VMSCAPE),
1295 	VULNBL_INTEL_STEPS(INTEL_BROADWELL_X,	     X86_STEP_MAX,	MMIO | VMSCAPE),
1296 	VULNBL_INTEL_STEPS(INTEL_BROADWELL_G,	     X86_STEP_MAX,	SRBDS | VMSCAPE),
1297 	VULNBL_INTEL_STEPS(INTEL_BROADWELL,	     X86_STEP_MAX,	SRBDS | VMSCAPE),
1298 	VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X,		      0x5,	MMIO | RETBLEED | GDS | VMSCAPE),
1299 	VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X,	     X86_STEP_MAX,	MMIO | RETBLEED | GDS | ITS | VMSCAPE),
1300 	VULNBL_INTEL_STEPS(INTEL_SKYLAKE_L,	     X86_STEP_MAX,	MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
1301 	VULNBL_INTEL_STEPS(INTEL_SKYLAKE,	     X86_STEP_MAX,	MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
1302 	VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L,		      0xb,	MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
1303 	VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L,	     X86_STEP_MAX,	MMIO | RETBLEED | GDS | SRBDS | ITS | VMSCAPE),
1304 	VULNBL_INTEL_STEPS(INTEL_KABYLAKE,		      0xc,	MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
1305 	VULNBL_INTEL_STEPS(INTEL_KABYLAKE,	     X86_STEP_MAX,	MMIO | RETBLEED | GDS | SRBDS | ITS | VMSCAPE),
1306 	VULNBL_INTEL_STEPS(INTEL_CANNONLAKE_L,	     X86_STEP_MAX,	RETBLEED | VMSCAPE),
1307 	VULNBL_INTEL_STEPS(INTEL_ICELAKE_L,	     X86_STEP_MAX,	MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY),
1308 	VULNBL_INTEL_STEPS(INTEL_ICELAKE_D,	     X86_STEP_MAX,	MMIO | GDS | ITS | ITS_NATIVE_ONLY),
1309 	VULNBL_INTEL_STEPS(INTEL_ICELAKE_X,	     X86_STEP_MAX,	MMIO | GDS | ITS | ITS_NATIVE_ONLY),
1310 	VULNBL_INTEL_STEPS(INTEL_COMETLAKE,	     X86_STEP_MAX,	MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | VMSCAPE),
1311 	VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L,		      0x0,	MMIO | RETBLEED | ITS | VMSCAPE),
1312 	VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L,	     X86_STEP_MAX,	MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | VMSCAPE),
1313 	VULNBL_INTEL_STEPS(INTEL_TIGERLAKE_L,	     X86_STEP_MAX,	GDS | ITS | ITS_NATIVE_ONLY),
1314 	VULNBL_INTEL_STEPS(INTEL_TIGERLAKE,	     X86_STEP_MAX,	GDS | ITS | ITS_NATIVE_ONLY),
1315 	VULNBL_INTEL_STEPS(INTEL_LAKEFIELD,	     X86_STEP_MAX,	MMIO | MMIO_SBDS | RETBLEED),
1316 	VULNBL_INTEL_STEPS(INTEL_ROCKETLAKE,	     X86_STEP_MAX,	MMIO | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY),
1317 	VULNBL_INTEL_TYPE(INTEL_ALDERLAKE,		     ATOM,	RFDS | VMSCAPE),
1318 	VULNBL_INTEL_STEPS(INTEL_ALDERLAKE,	     X86_STEP_MAX,	VMSCAPE),
1319 	VULNBL_INTEL_STEPS(INTEL_ALDERLAKE_L,	     X86_STEP_MAX,	RFDS | VMSCAPE),
1320 	VULNBL_INTEL_TYPE(INTEL_RAPTORLAKE,		     ATOM,	RFDS | VMSCAPE),
1321 	VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE,	     X86_STEP_MAX,	VMSCAPE),
1322 	VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE_P,	     X86_STEP_MAX,	RFDS | VMSCAPE),
1323 	VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE_S,	     X86_STEP_MAX,	RFDS | VMSCAPE),
1324 	VULNBL_INTEL_STEPS(INTEL_METEORLAKE_L,	     X86_STEP_MAX,	VMSCAPE),
1325 	VULNBL_INTEL_STEPS(INTEL_ARROWLAKE_H,	     X86_STEP_MAX,	VMSCAPE),
1326 	VULNBL_INTEL_STEPS(INTEL_ARROWLAKE,	     X86_STEP_MAX,	VMSCAPE),
1327 	VULNBL_INTEL_STEPS(INTEL_ARROWLAKE_U,	     X86_STEP_MAX,	VMSCAPE),
1328 	VULNBL_INTEL_STEPS(INTEL_LUNARLAKE_M,	     X86_STEP_MAX,	VMSCAPE),
1329 	VULNBL_INTEL_STEPS(INTEL_SAPPHIRERAPIDS_X,   X86_STEP_MAX,	VMSCAPE),
1330 	VULNBL_INTEL_STEPS(INTEL_GRANITERAPIDS_X,    X86_STEP_MAX,	VMSCAPE),
1331 	VULNBL_INTEL_STEPS(INTEL_EMERALDRAPIDS_X,    X86_STEP_MAX,	VMSCAPE),
1332 	VULNBL_INTEL_STEPS(INTEL_ATOM_GRACEMONT,     X86_STEP_MAX,	RFDS | VMSCAPE),
1333 	VULNBL_INTEL_STEPS(INTEL_ATOM_TREMONT,	     X86_STEP_MAX,	MMIO | MMIO_SBDS | RFDS),
1334 	VULNBL_INTEL_STEPS(INTEL_ATOM_TREMONT_D,     X86_STEP_MAX,	MMIO | RFDS),
1335 	VULNBL_INTEL_STEPS(INTEL_ATOM_TREMONT_L,     X86_STEP_MAX,	MMIO | MMIO_SBDS | RFDS),
1336 	VULNBL_INTEL_STEPS(INTEL_ATOM_GOLDMONT,      X86_STEP_MAX,	RFDS),
1337 	VULNBL_INTEL_STEPS(INTEL_ATOM_GOLDMONT_D,    X86_STEP_MAX,	RFDS),
1338 	VULNBL_INTEL_STEPS(INTEL_ATOM_GOLDMONT_PLUS, X86_STEP_MAX,	RFDS),
1339 	VULNBL_INTEL_STEPS(INTEL_ATOM_CRESTMONT_X,   X86_STEP_MAX,	VMSCAPE),
1340 
1341 	VULNBL_AMD(0x15, RETBLEED),
1342 	VULNBL_AMD(0x16, RETBLEED),
1343 	VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO | VMSCAPE),
1344 	VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO | VMSCAPE),
1345 	VULNBL_AMD(0x19, SRSO | TSA | VMSCAPE),
1346 	VULNBL_AMD(0x1a, SRSO | VMSCAPE),
1347 	{}
1348 };
1349 
1350 static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which)
1351 {
1352 	const struct x86_cpu_id *m = x86_match_cpu(table);
1353 
1354 	return m && !!(m->driver_data & which);
1355 }
1356 
1357 u64 x86_read_arch_cap_msr(void)
1358 {
1359 	u64 x86_arch_cap_msr = 0;
1360 
1361 	if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
1362 		rdmsrq(MSR_IA32_ARCH_CAPABILITIES, x86_arch_cap_msr);
1363 
1364 	return x86_arch_cap_msr;
1365 }
1366 
1367 static bool arch_cap_mmio_immune(u64 x86_arch_cap_msr)
1368 {
1369 	return (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO &&
1370 		x86_arch_cap_msr & ARCH_CAP_PSDP_NO &&
1371 		x86_arch_cap_msr & ARCH_CAP_SBDR_SSDP_NO);
1372 }
1373 
1374 static bool __init vulnerable_to_rfds(u64 x86_arch_cap_msr)
1375 {
1376 	/* The "immunity" bit trumps everything else: */
1377 	if (x86_arch_cap_msr & ARCH_CAP_RFDS_NO)
1378 		return false;
1379 
1380 	/*
1381 	 * VMMs set ARCH_CAP_RFDS_CLEAR for processors not in the blacklist to
1382 	 * indicate that mitigation is needed because guest is running on a
1383 	 * vulnerable hardware or may migrate to such hardware:
1384 	 */
1385 	if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
1386 		return true;
1387 
1388 	/* Only consult the blacklist when there is no enumeration: */
1389 	return cpu_matches(cpu_vuln_blacklist, RFDS);
1390 }
1391 
1392 static bool __init vulnerable_to_its(u64 x86_arch_cap_msr)
1393 {
1394 	/* The "immunity" bit trumps everything else: */
1395 	if (x86_arch_cap_msr & ARCH_CAP_ITS_NO)
1396 		return false;
1397 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
1398 		return false;
1399 
1400 	/* None of the affected CPUs have BHI_CTRL */
1401 	if (boot_cpu_has(X86_FEATURE_BHI_CTRL))
1402 		return false;
1403 
1404 	/*
1405 	 * If a VMM did not expose ITS_NO, assume that a guest could
1406 	 * be running on a vulnerable hardware or may migrate to such
1407 	 * hardware.
1408 	 */
1409 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
1410 		return true;
1411 
1412 	if (cpu_matches(cpu_vuln_blacklist, ITS))
1413 		return true;
1414 
1415 	return false;
1416 }
1417 
1418 static struct x86_cpu_id cpu_latest_microcode[] = {
1419 #include "microcode/intel-ucode-defs.h"
1420 	{}
1421 };
1422 
1423 static bool __init cpu_has_old_microcode(void)
1424 {
1425 	const struct x86_cpu_id *m = x86_match_cpu(cpu_latest_microcode);
1426 
1427 	/* Give unknown CPUs a pass: */
1428 	if (!m) {
1429 		/* Intel CPUs should be in the list. Warn if not: */
1430 		if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
1431 			pr_info("x86/CPU: Model not found in latest microcode list\n");
1432 		return false;
1433 	}
1434 
1435 	/*
1436 	 * Hosts usually lie to guests with a super high microcode
1437 	 * version. Just ignore what hosts tell guests:
1438 	 */
1439 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
1440 		return false;
1441 
1442 	/* Consider all debug microcode to be old: */
1443 	if (boot_cpu_data.microcode & BIT(31))
1444 		return true;
1445 
1446 	/* Give new microcode a pass: */
1447 	if (boot_cpu_data.microcode >= m->driver_data)
1448 		return false;
1449 
1450 	/* Uh oh, too old: */
1451 	return true;
1452 }
1453 
1454 static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
1455 {
1456 	u64 x86_arch_cap_msr = x86_read_arch_cap_msr();
1457 
1458 	if (cpu_has_old_microcode()) {
1459 		pr_warn("x86/CPU: Running old microcode\n");
1460 		setup_force_cpu_bug(X86_BUG_OLD_MICROCODE);
1461 		add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
1462 	}
1463 
1464 	/* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
1465 	if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
1466 	    !(x86_arch_cap_msr & ARCH_CAP_PSCHANGE_MC_NO))
1467 		setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
1468 
1469 	if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION))
1470 		return;
1471 
1472 	setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
1473 
1474 	if (!cpu_matches(cpu_vuln_whitelist, NO_SPECTRE_V2)) {
1475 		setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
1476 		setup_force_cpu_bug(X86_BUG_SPECTRE_V2_USER);
1477 	}
1478 
1479 	if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
1480 	    !(x86_arch_cap_msr & ARCH_CAP_SSB_NO) &&
1481 	   !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
1482 		setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
1483 
1484 	/*
1485 	 * AMD's AutoIBRS is equivalent to Intel's eIBRS - use the Intel feature
1486 	 * flag and protect from vendor-specific bugs via the whitelist.
1487 	 *
1488 	 * Don't use AutoIBRS when SNP is enabled because it degrades host
1489 	 * userspace indirect branch performance.
1490 	 */
1491 	if ((x86_arch_cap_msr & ARCH_CAP_IBRS_ALL) ||
1492 	    (cpu_has(c, X86_FEATURE_AUTOIBRS) &&
1493 	     !cpu_feature_enabled(X86_FEATURE_SEV_SNP))) {
1494 		setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
1495 		if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
1496 		    !(x86_arch_cap_msr & ARCH_CAP_PBRSB_NO))
1497 			setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
1498 	}
1499 
1500 	if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
1501 	    !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)) {
1502 		setup_force_cpu_bug(X86_BUG_MDS);
1503 		if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY))
1504 			setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
1505 	}
1506 
1507 	if (!cpu_matches(cpu_vuln_whitelist, NO_SWAPGS))
1508 		setup_force_cpu_bug(X86_BUG_SWAPGS);
1509 
1510 	/*
1511 	 * When the CPU is not mitigated for TAA (TAA_NO=0) set TAA bug when:
1512 	 *	- TSX is supported or
1513 	 *	- TSX_CTRL is present
1514 	 *
1515 	 * TSX_CTRL check is needed for cases when TSX could be disabled before
1516 	 * the kernel boot e.g. kexec.
1517 	 * TSX_CTRL check alone is not sufficient for cases when the microcode
1518 	 * update is not present or running as guest that don't get TSX_CTRL.
1519 	 */
1520 	if (!(x86_arch_cap_msr & ARCH_CAP_TAA_NO) &&
1521 	    (cpu_has(c, X86_FEATURE_RTM) ||
1522 	     (x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR)))
1523 		setup_force_cpu_bug(X86_BUG_TAA);
1524 
1525 	/*
1526 	 * SRBDS affects CPUs which support RDRAND or RDSEED and are listed
1527 	 * in the vulnerability blacklist.
1528 	 *
1529 	 * Some of the implications and mitigation of Shared Buffers Data
1530 	 * Sampling (SBDS) are similar to SRBDS. Give SBDS same treatment as
1531 	 * SRBDS.
1532 	 */
1533 	if ((cpu_has(c, X86_FEATURE_RDRAND) ||
1534 	     cpu_has(c, X86_FEATURE_RDSEED)) &&
1535 	    cpu_matches(cpu_vuln_blacklist, SRBDS | MMIO_SBDS))
1536 		    setup_force_cpu_bug(X86_BUG_SRBDS);
1537 
1538 	/*
1539 	 * Processor MMIO Stale Data bug enumeration
1540 	 *
1541 	 * Affected CPU list is generally enough to enumerate the vulnerability,
1542 	 * but for virtualization case check for ARCH_CAP MSR bits also, VMM may
1543 	 * not want the guest to enumerate the bug.
1544 	 */
1545 	if (!arch_cap_mmio_immune(x86_arch_cap_msr)) {
1546 		if (cpu_matches(cpu_vuln_blacklist, MMIO))
1547 			setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
1548 	}
1549 
1550 	if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
1551 		if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (x86_arch_cap_msr & ARCH_CAP_RSBA))
1552 			setup_force_cpu_bug(X86_BUG_RETBLEED);
1553 	}
1554 
1555 	if (cpu_matches(cpu_vuln_blacklist, SMT_RSB))
1556 		setup_force_cpu_bug(X86_BUG_SMT_RSB);
1557 
1558 	if (!cpu_has(c, X86_FEATURE_SRSO_NO)) {
1559 		if (cpu_matches(cpu_vuln_blacklist, SRSO))
1560 			setup_force_cpu_bug(X86_BUG_SRSO);
1561 	}
1562 
1563 	/*
1564 	 * Check if CPU is vulnerable to GDS. If running in a virtual machine on
1565 	 * an affected processor, the VMM may have disabled the use of GATHER by
1566 	 * disabling AVX2. The only way to do this in HW is to clear XCR0[2],
1567 	 * which means that AVX will be disabled.
1568 	 */
1569 	if (cpu_matches(cpu_vuln_blacklist, GDS) && !(x86_arch_cap_msr & ARCH_CAP_GDS_NO) &&
1570 	    boot_cpu_has(X86_FEATURE_AVX))
1571 		setup_force_cpu_bug(X86_BUG_GDS);
1572 
1573 	if (vulnerable_to_rfds(x86_arch_cap_msr))
1574 		setup_force_cpu_bug(X86_BUG_RFDS);
1575 
1576 	/*
1577 	 * Intel parts with eIBRS are vulnerable to BHI attacks. Parts with
1578 	 * BHI_NO still need to use the BHI mitigation to prevent Intra-mode
1579 	 * attacks.  When virtualized, eIBRS could be hidden, assume vulnerable.
1580 	 */
1581 	if (!cpu_matches(cpu_vuln_whitelist, NO_BHI) &&
1582 	    (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) ||
1583 	     boot_cpu_has(X86_FEATURE_HYPERVISOR)))
1584 		setup_force_cpu_bug(X86_BUG_BHI);
1585 
1586 	if (cpu_has(c, X86_FEATURE_AMD_IBPB) && !cpu_has(c, X86_FEATURE_AMD_IBPB_RET))
1587 		setup_force_cpu_bug(X86_BUG_IBPB_NO_RET);
1588 
1589 	if (vulnerable_to_its(x86_arch_cap_msr)) {
1590 		setup_force_cpu_bug(X86_BUG_ITS);
1591 		if (cpu_matches(cpu_vuln_blacklist, ITS_NATIVE_ONLY))
1592 			setup_force_cpu_bug(X86_BUG_ITS_NATIVE_ONLY);
1593 	}
1594 
1595 	if (c->x86_vendor == X86_VENDOR_AMD) {
1596 		if (!cpu_has(c, X86_FEATURE_TSA_SQ_NO) ||
1597 		    !cpu_has(c, X86_FEATURE_TSA_L1_NO)) {
1598 			if (cpu_matches(cpu_vuln_blacklist, TSA) ||
1599 			    /* Enable bug on Zen guests to allow for live migration. */
1600 			    (cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_ZEN)))
1601 				setup_force_cpu_bug(X86_BUG_TSA);
1602 		}
1603 	}
1604 
1605 	/*
1606 	 * Set the bug only on bare-metal. A nested hypervisor should already be
1607 	 * deploying IBPB to isolate itself from nested guests.
1608 	 */
1609 	if (cpu_matches(cpu_vuln_blacklist, VMSCAPE) &&
1610 	    !boot_cpu_has(X86_FEATURE_HYPERVISOR))
1611 		setup_force_cpu_bug(X86_BUG_VMSCAPE);
1612 
1613 	if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
1614 		return;
1615 
1616 	/* Rogue Data Cache Load? No! */
1617 	if (x86_arch_cap_msr & ARCH_CAP_RDCL_NO)
1618 		return;
1619 
1620 	setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
1621 
1622 	if (cpu_matches(cpu_vuln_whitelist, NO_L1TF))
1623 		return;
1624 
1625 	setup_force_cpu_bug(X86_BUG_L1TF);
1626 }
1627 
1628 /*
1629  * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
1630  * unfortunately, that's not true in practice because of early VIA
1631  * chips and (more importantly) broken virtualizers that are not easy
1632  * to detect. In the latter case it doesn't even *fail* reliably, so
1633  * probing for it doesn't even work. Disable it completely on 32-bit
1634  * unless we can find a reliable way to detect all the broken cases.
1635  * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
1636  */
1637 static void detect_nopl(void)
1638 {
1639 #ifdef CONFIG_X86_32
1640 	setup_clear_cpu_cap(X86_FEATURE_NOPL);
1641 #else
1642 	setup_force_cpu_cap(X86_FEATURE_NOPL);
1643 #endif
1644 }
1645 
1646 static inline bool parse_set_clear_cpuid(char *arg, bool set)
1647 {
1648 	char *opt;
1649 	int taint = 0;
1650 
1651 	while (arg) {
1652 		bool found __maybe_unused = false;
1653 		unsigned int bit;
1654 
1655 		opt = strsep(&arg, ",");
1656 
1657 		/*
1658 		 * Handle naked numbers first for feature flags which don't
1659 		 * have names. It doesn't make sense for a bug not to have a
1660 		 * name so don't handle bug flags here.
1661 		 */
1662 		if (!kstrtouint(opt, 10, &bit)) {
1663 			if (bit < NCAPINTS * 32) {
1664 
1665 				if (set) {
1666 					pr_warn("setcpuid: force-enabling CPU feature flag:");
1667 					setup_force_cpu_cap(bit);
1668 				} else {
1669 					pr_warn("clearcpuid: force-disabling CPU feature flag:");
1670 					setup_clear_cpu_cap(bit);
1671 				}
1672 				/* empty-string, i.e., ""-defined feature flags */
1673 				if (!x86_cap_flags[bit])
1674 					pr_cont(" %d:%d\n", bit >> 5, bit & 31);
1675 				else
1676 					pr_cont(" %s\n", x86_cap_flags[bit]);
1677 
1678 				taint++;
1679 			}
1680 			/*
1681 			 * The assumption is that there are no feature names with only
1682 			 * numbers in the name thus go to the next argument.
1683 			 */
1684 			continue;
1685 		}
1686 
1687 		for (bit = 0; bit < 32 * (NCAPINTS + NBUGINTS); bit++) {
1688 			const char *flag;
1689 			const char *kind;
1690 
1691 			if (bit < 32 * NCAPINTS) {
1692 				flag = x86_cap_flags[bit];
1693 				kind = "feature";
1694 			} else {
1695 				kind = "bug";
1696 				flag = x86_bug_flags[bit - (32 * NCAPINTS)];
1697 			}
1698 
1699 			if (!flag)
1700 				continue;
1701 
1702 			if (strcmp(flag, opt))
1703 				continue;
1704 
1705 			if (set) {
1706 				pr_warn("setcpuid: force-enabling CPU %s flag: %s\n",
1707 					kind, flag);
1708 				setup_force_cpu_cap(bit);
1709 			} else {
1710 				pr_warn("clearcpuid: force-disabling CPU %s flag: %s\n",
1711 					kind, flag);
1712 				setup_clear_cpu_cap(bit);
1713 			}
1714 			taint++;
1715 			found = true;
1716 			break;
1717 		}
1718 
1719 		if (!found)
1720 			pr_warn("%s: unknown CPU flag: %s", set ? "setcpuid" : "clearcpuid", opt);
1721 	}
1722 
1723 	return taint;
1724 }
1725 
1726 
1727 /*
1728  * We parse cpu parameters early because fpu__init_system() is executed
1729  * before parse_early_param().
1730  */
1731 static void __init cpu_parse_early_param(void)
1732 {
1733 	bool cpuid_taint = false;
1734 	char arg[128];
1735 	int arglen;
1736 
1737 #ifdef CONFIG_X86_32
1738 	if (cmdline_find_option_bool(boot_command_line, "no387"))
1739 #ifdef CONFIG_MATH_EMULATION
1740 		setup_clear_cpu_cap(X86_FEATURE_FPU);
1741 #else
1742 		pr_err("Option 'no387' required CONFIG_MATH_EMULATION enabled.\n");
1743 #endif
1744 
1745 	if (cmdline_find_option_bool(boot_command_line, "nofxsr"))
1746 		setup_clear_cpu_cap(X86_FEATURE_FXSR);
1747 #endif
1748 
1749 	if (cmdline_find_option_bool(boot_command_line, "noxsave"))
1750 		setup_clear_cpu_cap(X86_FEATURE_XSAVE);
1751 
1752 	if (cmdline_find_option_bool(boot_command_line, "noxsaveopt"))
1753 		setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
1754 
1755 	if (cmdline_find_option_bool(boot_command_line, "noxsaves"))
1756 		setup_clear_cpu_cap(X86_FEATURE_XSAVES);
1757 
1758 	if (cmdline_find_option_bool(boot_command_line, "nousershstk"))
1759 		setup_clear_cpu_cap(X86_FEATURE_USER_SHSTK);
1760 
1761 	/* Minimize the gap between FRED is available and available but disabled. */
1762 	arglen = cmdline_find_option(boot_command_line, "fred", arg, sizeof(arg));
1763 	if (arglen != 2 || strncmp(arg, "on", 2))
1764 		setup_clear_cpu_cap(X86_FEATURE_FRED);
1765 
1766 	arglen = cmdline_find_option(boot_command_line, "clearcpuid", arg, sizeof(arg));
1767 	if (arglen > 0)
1768 		cpuid_taint |= parse_set_clear_cpuid(arg, false);
1769 
1770 	arglen = cmdline_find_option(boot_command_line, "setcpuid", arg, sizeof(arg));
1771 	if (arglen > 0)
1772 		cpuid_taint |= parse_set_clear_cpuid(arg, true);
1773 
1774 	if (cpuid_taint) {
1775 		pr_warn("!!! setcpuid=/clearcpuid= in use, this is for TESTING ONLY, may break things horribly. Tainting kernel.\n");
1776 		add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
1777 	}
1778 }
1779 
1780 /*
1781  * Do minimum CPU detection early.
1782  * Fields really needed: vendor, cpuid_level, family, model, mask,
1783  * cache alignment.
1784  * The others are not touched to avoid unwanted side effects.
1785  *
1786  * WARNING: this function is only called on the boot CPU.  Don't add code
1787  * here that is supposed to run on all CPUs.
1788  */
1789 static void __init early_identify_cpu(struct cpuinfo_x86 *c)
1790 {
1791 	memset(&c->x86_capability, 0, sizeof(c->x86_capability));
1792 	c->extended_cpuid_level = 0;
1793 
1794 	if (!cpuid_feature())
1795 		identify_cpu_without_cpuid(c);
1796 
1797 	/* cyrix could have cpuid enabled via c_identify()*/
1798 	if (cpuid_feature()) {
1799 		cpu_detect(c);
1800 		get_cpu_vendor(c);
1801 		intel_unlock_cpuid_leafs(c);
1802 		get_cpu_cap(c);
1803 		setup_force_cpu_cap(X86_FEATURE_CPUID);
1804 		get_cpu_address_sizes(c);
1805 		cpu_parse_early_param();
1806 
1807 		cpu_init_topology(c);
1808 
1809 		if (this_cpu->c_early_init)
1810 			this_cpu->c_early_init(c);
1811 
1812 		c->cpu_index = 0;
1813 		filter_cpuid_features(c, false);
1814 		check_cpufeature_deps(c);
1815 
1816 		if (this_cpu->c_bsp_init)
1817 			this_cpu->c_bsp_init(c);
1818 	} else {
1819 		setup_clear_cpu_cap(X86_FEATURE_CPUID);
1820 		get_cpu_address_sizes(c);
1821 		cpu_init_topology(c);
1822 	}
1823 
1824 	setup_force_cpu_cap(X86_FEATURE_ALWAYS);
1825 
1826 	cpu_set_bug_bits(c);
1827 
1828 	sld_setup(c);
1829 
1830 #ifdef CONFIG_X86_32
1831 	/*
1832 	 * Regardless of whether PCID is enumerated, the SDM says
1833 	 * that it can't be enabled in 32-bit mode.
1834 	 */
1835 	setup_clear_cpu_cap(X86_FEATURE_PCID);
1836 
1837 	/*
1838 	 * Never use SYSCALL on a 32-bit kernel
1839 	 */
1840 	setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
1841 #endif
1842 
1843 	/*
1844 	 * Later in the boot process pgtable_l5_enabled() relies on
1845 	 * cpu_feature_enabled(X86_FEATURE_LA57). If 5-level paging is not
1846 	 * enabled by this point we need to clear the feature bit to avoid
1847 	 * false-positives at the later stage.
1848 	 *
1849 	 * pgtable_l5_enabled() can be false here for several reasons:
1850 	 *  - 5-level paging is disabled compile-time;
1851 	 *  - it's 32-bit kernel;
1852 	 *  - machine doesn't support 5-level paging;
1853 	 *  - user specified 'no5lvl' in kernel command line.
1854 	 */
1855 	if (!pgtable_l5_enabled())
1856 		setup_clear_cpu_cap(X86_FEATURE_LA57);
1857 
1858 	detect_nopl();
1859 	mca_bsp_init(c);
1860 }
1861 
1862 void __init init_cpu_devs(void)
1863 {
1864 	const struct cpu_dev *const *cdev;
1865 	int count = 0;
1866 
1867 	for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
1868 		const struct cpu_dev *cpudev = *cdev;
1869 
1870 		if (count >= X86_VENDOR_NUM)
1871 			break;
1872 		cpu_devs[count] = cpudev;
1873 		count++;
1874 	}
1875 }
1876 
1877 void __init early_cpu_init(void)
1878 {
1879 #ifdef CONFIG_PROCESSOR_SELECT
1880 	unsigned int i, j;
1881 
1882 	pr_info("KERNEL supported cpus:\n");
1883 #endif
1884 
1885 	init_cpu_devs();
1886 
1887 #ifdef CONFIG_PROCESSOR_SELECT
1888 	for (i = 0; i < X86_VENDOR_NUM && cpu_devs[i]; i++) {
1889 		for (j = 0; j < 2; j++) {
1890 			if (!cpu_devs[i]->c_ident[j])
1891 				continue;
1892 			pr_info("  %s %s\n", cpu_devs[i]->c_vendor,
1893 				cpu_devs[i]->c_ident[j]);
1894 		}
1895 	}
1896 #endif
1897 
1898 	early_identify_cpu(&boot_cpu_data);
1899 }
1900 
1901 static bool detect_null_seg_behavior(void)
1902 {
1903 	/*
1904 	 * Empirically, writing zero to a segment selector on AMD does
1905 	 * not clear the base, whereas writing zero to a segment
1906 	 * selector on Intel does clear the base.  Intel's behavior
1907 	 * allows slightly faster context switches in the common case
1908 	 * where GS is unused by the prev and next threads.
1909 	 *
1910 	 * Since neither vendor documents this anywhere that I can see,
1911 	 * detect it directly instead of hard-coding the choice by
1912 	 * vendor.
1913 	 *
1914 	 * I've designated AMD's behavior as the "bug" because it's
1915 	 * counterintuitive and less friendly.
1916 	 */
1917 
1918 	unsigned long old_base, tmp;
1919 	rdmsrq(MSR_FS_BASE, old_base);
1920 	wrmsrq(MSR_FS_BASE, 1);
1921 	loadsegment(fs, 0);
1922 	rdmsrq(MSR_FS_BASE, tmp);
1923 	wrmsrq(MSR_FS_BASE, old_base);
1924 	return tmp == 0;
1925 }
1926 
1927 void check_null_seg_clears_base(struct cpuinfo_x86 *c)
1928 {
1929 	/* BUG_NULL_SEG is only relevant with 64bit userspace */
1930 	if (!IS_ENABLED(CONFIG_X86_64))
1931 		return;
1932 
1933 	if (cpu_has(c, X86_FEATURE_NULL_SEL_CLR_BASE))
1934 		return;
1935 
1936 	/*
1937 	 * CPUID bit above wasn't set. If this kernel is still running
1938 	 * as a HV guest, then the HV has decided not to advertize
1939 	 * that CPUID bit for whatever reason.	For example, one
1940 	 * member of the migration pool might be vulnerable.  Which
1941 	 * means, the bug is present: set the BUG flag and return.
1942 	 */
1943 	if (cpu_has(c, X86_FEATURE_HYPERVISOR)) {
1944 		set_cpu_bug(c, X86_BUG_NULL_SEG);
1945 		return;
1946 	}
1947 
1948 	/*
1949 	 * Zen2 CPUs also have this behaviour, but no CPUID bit.
1950 	 * 0x18 is the respective family for Hygon.
1951 	 */
1952 	if ((c->x86 == 0x17 || c->x86 == 0x18) &&
1953 	    detect_null_seg_behavior())
1954 		return;
1955 
1956 	/* All the remaining ones are affected */
1957 	set_cpu_bug(c, X86_BUG_NULL_SEG);
1958 }
1959 
1960 static void generic_identify(struct cpuinfo_x86 *c)
1961 {
1962 	c->extended_cpuid_level = 0;
1963 
1964 	if (!cpuid_feature())
1965 		identify_cpu_without_cpuid(c);
1966 
1967 	/* cyrix could have cpuid enabled via c_identify()*/
1968 	if (!cpuid_feature())
1969 		return;
1970 
1971 	cpu_detect(c);
1972 
1973 	get_cpu_vendor(c);
1974 	intel_unlock_cpuid_leafs(c);
1975 	get_cpu_cap(c);
1976 
1977 	get_cpu_address_sizes(c);
1978 
1979 	get_model_name(c); /* Default name */
1980 
1981 	/*
1982 	 * ESPFIX is a strange bug.  All real CPUs have it.  Paravirt
1983 	 * systems that run Linux at CPL > 0 may or may not have the
1984 	 * issue, but, even if they have the issue, there's absolutely
1985 	 * nothing we can do about it because we can't use the real IRET
1986 	 * instruction.
1987 	 *
1988 	 * NB: For the time being, only 32-bit kernels support
1989 	 * X86_BUG_ESPFIX as such.  64-bit kernels directly choose
1990 	 * whether to apply espfix using paravirt hooks.  If any
1991 	 * non-paravirt system ever shows up that does *not* have the
1992 	 * ESPFIX issue, we can change this.
1993 	 */
1994 #ifdef CONFIG_X86_32
1995 	set_cpu_bug(c, X86_BUG_ESPFIX);
1996 #endif
1997 }
1998 
1999 /*
2000  * This does the hard work of actually picking apart the CPU stuff...
2001  */
2002 static void identify_cpu(struct cpuinfo_x86 *c)
2003 {
2004 	int i;
2005 
2006 	c->loops_per_jiffy = loops_per_jiffy;
2007 	c->x86_cache_size = 0;
2008 	c->x86_vendor = X86_VENDOR_UNKNOWN;
2009 	c->x86_model = c->x86_stepping = 0;	/* So far unknown... */
2010 	c->x86_vendor_id[0] = '\0'; /* Unset */
2011 	c->x86_model_id[0] = '\0';  /* Unset */
2012 #ifdef CONFIG_X86_64
2013 	c->x86_clflush_size = 64;
2014 	c->x86_phys_bits = 36;
2015 	c->x86_virt_bits = 48;
2016 #else
2017 	c->cpuid_level = -1;	/* CPUID not detected */
2018 	c->x86_clflush_size = 32;
2019 	c->x86_phys_bits = 32;
2020 	c->x86_virt_bits = 32;
2021 #endif
2022 	c->x86_cache_alignment = c->x86_clflush_size;
2023 	memset(&c->x86_capability, 0, sizeof(c->x86_capability));
2024 #ifdef CONFIG_X86_VMX_FEATURE_NAMES
2025 	memset(&c->vmx_capability, 0, sizeof(c->vmx_capability));
2026 #endif
2027 
2028 	generic_identify(c);
2029 
2030 	cpu_parse_topology(c);
2031 
2032 	if (this_cpu->c_identify)
2033 		this_cpu->c_identify(c);
2034 
2035 	/* Clear/Set all flags overridden by options, after probe */
2036 	apply_forced_caps(c);
2037 
2038 	/*
2039 	 * Set default APIC and TSC_DEADLINE MSR fencing flag. AMD and
2040 	 * Hygon will clear it in ->c_init() below.
2041 	 */
2042 	set_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE);
2043 
2044 	/*
2045 	 * Vendor-specific initialization.  In this section we
2046 	 * canonicalize the feature flags, meaning if there are
2047 	 * features a certain CPU supports which CPUID doesn't
2048 	 * tell us, CPUID claiming incorrect flags, or other bugs,
2049 	 * we handle them here.
2050 	 *
2051 	 * At the end of this section, c->x86_capability better
2052 	 * indicate the features this CPU genuinely supports!
2053 	 */
2054 	if (this_cpu->c_init)
2055 		this_cpu->c_init(c);
2056 
2057 	bus_lock_init();
2058 
2059 	/* Disable the PN if appropriate */
2060 	squash_the_stupid_serial_number(c);
2061 
2062 	setup_smep(c);
2063 	setup_smap(c);
2064 	setup_umip(c);
2065 	setup_lass(c);
2066 
2067 	/*
2068 	 * The vendor-specific functions might have changed features.
2069 	 * Now we do "generic changes."
2070 	 */
2071 
2072 	/* Filter out anything that depends on CPUID levels we don't have */
2073 	filter_cpuid_features(c, true);
2074 
2075 	/* Check for unmet dependencies based on the CPUID dependency table */
2076 	check_cpufeature_deps(c);
2077 
2078 	/* If the model name is still unset, do table lookup. */
2079 	if (!c->x86_model_id[0]) {
2080 		const char *p;
2081 		p = table_lookup_model(c);
2082 		if (p)
2083 			strcpy(c->x86_model_id, p);
2084 		else
2085 			/* Last resort... */
2086 			sprintf(c->x86_model_id, "%02x/%02x",
2087 				c->x86, c->x86_model);
2088 	}
2089 
2090 	x86_init_rdrand(c);
2091 	setup_pku(c);
2092 	setup_cet(c);
2093 
2094 	/*
2095 	 * Clear/Set all flags overridden by options, need do it
2096 	 * before following smp all cpus cap AND.
2097 	 */
2098 	apply_forced_caps(c);
2099 
2100 	/*
2101 	 * On SMP, boot_cpu_data holds the common feature set between
2102 	 * all CPUs; so make sure that we indicate which features are
2103 	 * common between the CPUs.  The first time this routine gets
2104 	 * executed, c == &boot_cpu_data.
2105 	 */
2106 	if (c != &boot_cpu_data) {
2107 		/* AND the already accumulated flags with these */
2108 		for (i = 0; i < NCAPINTS; i++)
2109 			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
2110 
2111 		/* OR, i.e. replicate the bug flags */
2112 		for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++)
2113 			c->x86_capability[i] |= boot_cpu_data.x86_capability[i];
2114 	}
2115 
2116 	ppin_init(c);
2117 
2118 	/* Init Machine Check Exception if available. */
2119 	mcheck_cpu_init(c);
2120 
2121 	numa_add_cpu(smp_processor_id());
2122 }
2123 
2124 /*
2125  * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions
2126  * on 32-bit kernels:
2127  */
2128 #ifdef CONFIG_X86_32
2129 void enable_sep_cpu(void)
2130 {
2131 	struct tss_struct *tss;
2132 	int cpu;
2133 
2134 	if (!boot_cpu_has(X86_FEATURE_SEP))
2135 		return;
2136 
2137 	cpu = get_cpu();
2138 	tss = &per_cpu(cpu_tss_rw, cpu);
2139 
2140 	/*
2141 	 * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
2142 	 * see the big comment in struct x86_hw_tss's definition.
2143 	 */
2144 
2145 	tss->x86_tss.ss1 = __KERNEL_CS;
2146 	wrmsrq(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1);
2147 	wrmsrq(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1));
2148 	wrmsrq(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32);
2149 
2150 	put_cpu();
2151 }
2152 #endif
2153 
2154 static __init void identify_boot_cpu(void)
2155 {
2156 	identify_cpu(&boot_cpu_data);
2157 	if (HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT))
2158 		pr_info("CET detected: Indirect Branch Tracking enabled\n");
2159 #ifdef CONFIG_X86_32
2160 	enable_sep_cpu();
2161 #endif
2162 	cpu_detect_tlb(&boot_cpu_data);
2163 	setup_cr_pinning();
2164 
2165 	x86_virt_init();
2166 	tsx_init();
2167 	tdx_init();
2168 	lkgs_init();
2169 }
2170 
2171 void identify_secondary_cpu(unsigned int cpu)
2172 {
2173 	struct cpuinfo_x86 *c = &cpu_data(cpu);
2174 
2175 	/* Copy boot_cpu_data only on the first bringup */
2176 	if (!c->initialized)
2177 		*c = boot_cpu_data;
2178 	c->cpu_index = cpu;
2179 
2180 	identify_cpu(c);
2181 #ifdef CONFIG_X86_32
2182 	enable_sep_cpu();
2183 #endif
2184 	x86_spec_ctrl_setup_ap();
2185 	update_srbds_msr();
2186 	if (boot_cpu_has_bug(X86_BUG_GDS))
2187 		update_gds_msr();
2188 
2189 	tsx_ap_init();
2190 	c->initialized = true;
2191 }
2192 
2193 void print_cpu_info(struct cpuinfo_x86 *c)
2194 {
2195 	const char *vendor = NULL;
2196 
2197 	if (c->x86_vendor < X86_VENDOR_NUM) {
2198 		vendor = this_cpu->c_vendor;
2199 	} else {
2200 		if (c->cpuid_level >= 0)
2201 			vendor = c->x86_vendor_id;
2202 	}
2203 
2204 	if (vendor && !strstr(c->x86_model_id, vendor))
2205 		pr_cont("%s ", vendor);
2206 
2207 	if (c->x86_model_id[0])
2208 		pr_cont("%s", c->x86_model_id);
2209 	else
2210 		pr_cont("%d86", c->x86);
2211 
2212 	pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
2213 
2214 	if (c->x86_stepping || c->cpuid_level >= 0)
2215 		pr_cont(", stepping: 0x%x)\n", c->x86_stepping);
2216 	else
2217 		pr_cont(")\n");
2218 }
2219 
2220 /*
2221  * clearcpuid= and setcpuid= were already parsed in cpu_parse_early_param().
2222  * These dummy functions prevent them from becoming an environment variable for
2223  * init.
2224  */
2225 
2226 static __init int setup_clearcpuid(char *arg)
2227 {
2228 	return 1;
2229 }
2230 __setup("clearcpuid=", setup_clearcpuid);
2231 
2232 static __init int setup_setcpuid(char *arg)
2233 {
2234 	return 1;
2235 }
2236 __setup("setcpuid=", setup_setcpuid);
2237 
2238 DEFINE_PER_CPU_CACHE_HOT(struct task_struct *, current_task) = &init_task;
2239 EXPORT_PER_CPU_SYMBOL(current_task);
2240 EXPORT_PER_CPU_SYMBOL(const_current_task);
2241 
2242 DEFINE_PER_CPU_CACHE_HOT(int, __preempt_count) = INIT_PREEMPT_COUNT;
2243 EXPORT_PER_CPU_SYMBOL(__preempt_count);
2244 
2245 DEFINE_PER_CPU_CACHE_HOT(unsigned long, cpu_current_top_of_stack) = TOP_OF_INIT_STACK;
2246 
2247 #ifdef CONFIG_X86_64
2248 /*
2249  * Note: Do not make this dependant on CONFIG_MITIGATION_CALL_DEPTH_TRACKING
2250  * so that this space is reserved in the hot cache section even when the
2251  * mitigation is disabled.
2252  */
2253 DEFINE_PER_CPU_CACHE_HOT(u64, __x86_call_depth);
2254 EXPORT_PER_CPU_SYMBOL(__x86_call_depth);
2255 
2256 static void wrmsrq_cstar(unsigned long val)
2257 {
2258 	/*
2259 	 * Intel CPUs do not support 32-bit SYSCALL. Writing to MSR_CSTAR
2260 	 * is so far ignored by the CPU, but raises a #VE trap in a TDX
2261 	 * guest. Avoid the pointless write on all Intel CPUs.
2262 	 */
2263 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
2264 		wrmsrq(MSR_CSTAR, val);
2265 }
2266 
2267 static inline void idt_syscall_init(void)
2268 {
2269 	wrmsrq(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
2270 
2271 	if (ia32_enabled()) {
2272 		wrmsrq_cstar((unsigned long)entry_SYSCALL_compat);
2273 		/*
2274 		 * This only works on Intel CPUs.
2275 		 * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
2276 		 * This does not cause SYSENTER to jump to the wrong location, because
2277 		 * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
2278 		 */
2279 		wrmsrq_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
2280 		wrmsrq_safe(MSR_IA32_SYSENTER_ESP,
2281 			    (unsigned long)(cpu_entry_stack(smp_processor_id()) + 1));
2282 		wrmsrq_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
2283 	} else {
2284 		wrmsrq_cstar((unsigned long)entry_SYSCALL32_ignore);
2285 		wrmsrq_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
2286 		wrmsrq_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
2287 		wrmsrq_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
2288 	}
2289 
2290 	/*
2291 	 * Flags to clear on syscall; clear as much as possible
2292 	 * to minimize user space-kernel interference.
2293 	 */
2294 	wrmsrq(MSR_SYSCALL_MASK,
2295 	       X86_EFLAGS_CF|X86_EFLAGS_PF|X86_EFLAGS_AF|
2296 	       X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_TF|
2297 	       X86_EFLAGS_IF|X86_EFLAGS_DF|X86_EFLAGS_OF|
2298 	       X86_EFLAGS_IOPL|X86_EFLAGS_NT|X86_EFLAGS_RF|
2299 	       X86_EFLAGS_AC|X86_EFLAGS_ID);
2300 }
2301 
2302 /* May not be marked __init: used by software suspend */
2303 void syscall_init(void)
2304 {
2305 	/* The default user and kernel segments */
2306 	wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
2307 
2308 	/*
2309 	 * Except the IA32_STAR MSR, there is NO need to setup SYSCALL and
2310 	 * SYSENTER MSRs for FRED, because FRED uses the ring 3 FRED
2311 	 * entrypoint for SYSCALL and SYSENTER, and ERETU is the only legit
2312 	 * instruction to return to ring 3 (both sysexit and sysret cause
2313 	 * #UD when FRED is enabled).
2314 	 */
2315 	if (!cpu_feature_enabled(X86_FEATURE_FRED))
2316 		idt_syscall_init();
2317 }
2318 #endif /* CONFIG_X86_64 */
2319 
2320 #ifdef CONFIG_STACKPROTECTOR
2321 DEFINE_PER_CPU_CACHE_HOT(unsigned long, __stack_chk_guard);
2322 #ifndef CONFIG_SMP
2323 EXPORT_PER_CPU_SYMBOL(__stack_chk_guard);
2324 #endif
2325 #endif
2326 
2327 static void initialize_debug_regs(void)
2328 {
2329 	/* Control register first -- to make sure everything is disabled. */
2330 	set_debugreg(DR7_FIXED_1, 7);
2331 	set_debugreg(DR6_RESERVED, 6);
2332 	/* dr5 and dr4 don't exist */
2333 	set_debugreg(0, 3);
2334 	set_debugreg(0, 2);
2335 	set_debugreg(0, 1);
2336 	set_debugreg(0, 0);
2337 }
2338 
2339 #ifdef CONFIG_KGDB
2340 /*
2341  * Restore debug regs if using kgdbwait and you have a kernel debugger
2342  * connection established.
2343  */
2344 static void dbg_restore_debug_regs(void)
2345 {
2346 	if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break))
2347 		arch_kgdb_ops.correct_hw_break();
2348 }
2349 #else /* ! CONFIG_KGDB */
2350 #define dbg_restore_debug_regs()
2351 #endif /* ! CONFIG_KGDB */
2352 
2353 static inline void setup_getcpu(int cpu)
2354 {
2355 	unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu));
2356 	struct desc_struct d = { };
2357 
2358 	if (boot_cpu_has(X86_FEATURE_RDTSCP) || boot_cpu_has(X86_FEATURE_RDPID))
2359 		wrmsrq(MSR_TSC_AUX, cpudata);
2360 
2361 	/* Store CPU and node number in limit. */
2362 	d.limit0 = cpudata;
2363 	d.limit1 = cpudata >> 16;
2364 
2365 	d.type = 5;		/* RO data, expand down, accessed */
2366 	d.dpl = 3;		/* Visible to user code */
2367 	d.s = 1;		/* Not a system segment */
2368 	d.p = 1;		/* Present */
2369 	d.d = 1;		/* 32-bit */
2370 
2371 	write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPUNODE, &d, DESCTYPE_S);
2372 }
2373 
2374 #ifdef CONFIG_X86_64
2375 static inline void tss_setup_ist(struct tss_struct *tss)
2376 {
2377 	/* Set up the per-CPU TSS IST stacks */
2378 	tss->x86_tss.ist[IST_INDEX_DF] = __this_cpu_ist_top_va(DF);
2379 	tss->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI);
2380 	tss->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB);
2381 	tss->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE);
2382 	/* Only mapped when SEV-ES is active */
2383 	tss->x86_tss.ist[IST_INDEX_VC] = __this_cpu_ist_top_va(VC);
2384 }
2385 #else /* CONFIG_X86_64 */
2386 static inline void tss_setup_ist(struct tss_struct *tss) { }
2387 #endif /* !CONFIG_X86_64 */
2388 
2389 static inline void tss_setup_io_bitmap(struct tss_struct *tss)
2390 {
2391 	tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET_INVALID;
2392 
2393 #ifdef CONFIG_X86_IOPL_IOPERM
2394 	tss->io_bitmap.prev_max = 0;
2395 	tss->io_bitmap.prev_sequence = 0;
2396 	memset(tss->io_bitmap.bitmap, 0xff, sizeof(tss->io_bitmap.bitmap));
2397 	/*
2398 	 * Invalidate the extra array entry past the end of the all
2399 	 * permission bitmap as required by the hardware.
2400 	 */
2401 	tss->io_bitmap.mapall[IO_BITMAP_LONGS] = ~0UL;
2402 #endif
2403 }
2404 
2405 /*
2406  * Setup everything needed to handle exceptions from the IDT, including the IST
2407  * exceptions which use paranoid_entry().
2408  */
2409 void cpu_init_exception_handling(bool boot_cpu)
2410 {
2411 	struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
2412 	int cpu = raw_smp_processor_id();
2413 
2414 	/* paranoid_entry() gets the CPU number from the GDT */
2415 	setup_getcpu(cpu);
2416 
2417 	/* For IDT mode, IST vectors need to be set in TSS. */
2418 	if (!cpu_feature_enabled(X86_FEATURE_FRED))
2419 		tss_setup_ist(tss);
2420 	tss_setup_io_bitmap(tss);
2421 	set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
2422 
2423 	load_TR_desc();
2424 
2425 	/* GHCB needs to be setup to handle #VC. */
2426 	setup_ghcb();
2427 
2428 	/*
2429 	 * On CPUs with FSGSBASE support, paranoid_entry() uses
2430 	 * ALTERNATIVE-patched RDGSBASE/WRGSBASE instructions. Secondary CPUs
2431 	 * boot after alternatives are patched globally, so early exceptions
2432 	 * execute patched code that depends on FSGSBASE. Enable the feature
2433 	 * before any exceptions occur.
2434 	 */
2435 	if (cpu_feature_enabled(X86_FEATURE_FSGSBASE)) {
2436 		cr4_set_bits(X86_CR4_FSGSBASE);
2437 		elf_hwcap2 |= HWCAP2_FSGSBASE;
2438 	}
2439 
2440 	if (cpu_feature_enabled(X86_FEATURE_FRED)) {
2441 		/* The boot CPU has enabled FRED during early boot */
2442 		if (!boot_cpu)
2443 			cpu_init_fred_exceptions();
2444 
2445 		cpu_init_fred_rsps();
2446 	} else {
2447 		load_current_idt();
2448 	}
2449 }
2450 
2451 void __init cpu_init_replace_early_idt(void)
2452 {
2453 	if (cpu_feature_enabled(X86_FEATURE_FRED))
2454 		cpu_init_fred_exceptions();
2455 	else
2456 		idt_setup_early_pf();
2457 }
2458 
2459 /*
2460  * cpu_init() initializes state that is per-CPU. Some data is already
2461  * initialized (naturally) in the bootstrap process, such as the GDT.  We
2462  * reload it nevertheless, this function acts as a 'CPU state barrier',
2463  * nothing should get across.
2464  */
2465 void cpu_init(void)
2466 {
2467 	struct task_struct *cur = current;
2468 	int cpu = raw_smp_processor_id();
2469 
2470 #ifdef CONFIG_NUMA
2471 	if (this_cpu_read(numa_node) == 0 &&
2472 	    early_cpu_to_node(cpu) != NUMA_NO_NODE)
2473 		set_numa_node(early_cpu_to_node(cpu));
2474 #endif
2475 	pr_debug("Initializing CPU#%d\n", cpu);
2476 
2477 	if (IS_ENABLED(CONFIG_X86_64) || cpu_feature_enabled(X86_FEATURE_VME) ||
2478 	    boot_cpu_has(X86_FEATURE_TSC) || boot_cpu_has(X86_FEATURE_DE))
2479 		cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
2480 
2481 	if (IS_ENABLED(CONFIG_X86_64)) {
2482 		loadsegment(fs, 0);
2483 		memset(cur->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
2484 		syscall_init();
2485 
2486 		wrmsrq(MSR_FS_BASE, 0);
2487 		wrmsrq(MSR_KERNEL_GS_BASE, 0);
2488 		barrier();
2489 
2490 		x2apic_setup();
2491 
2492 		intel_posted_msi_init();
2493 	}
2494 
2495 	mmgrab(&init_mm);
2496 	cur->active_mm = &init_mm;
2497 	BUG_ON(cur->mm);
2498 	initialize_tlbstate_and_flush();
2499 	enter_lazy_tlb(&init_mm, cur);
2500 
2501 	/*
2502 	 * sp0 points to the entry trampoline stack regardless of what task
2503 	 * is running.
2504 	 */
2505 	load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
2506 
2507 	load_mm_ldt(&init_mm);
2508 
2509 	initialize_debug_regs();
2510 	dbg_restore_debug_regs();
2511 
2512 	doublefault_init_cpu_tss();
2513 
2514 	if (is_uv_system())
2515 		uv_cpu_init();
2516 
2517 	load_fixmap_gdt(cpu);
2518 }
2519 
2520 #ifdef CONFIG_MICROCODE_LATE_LOADING
2521 /**
2522  * store_cpu_caps() - Store a snapshot of CPU capabilities
2523  * @curr_info: Pointer where to store it
2524  *
2525  * Returns: None
2526  */
2527 void store_cpu_caps(struct cpuinfo_x86 *curr_info)
2528 {
2529 	/* Reload CPUID max function as it might've changed. */
2530 	curr_info->cpuid_level = cpuid_eax(0);
2531 
2532 	/* Copy all capability leafs and pick up the synthetic ones. */
2533 	memcpy(&curr_info->x86_capability, &boot_cpu_data.x86_capability,
2534 	       sizeof(curr_info->x86_capability));
2535 
2536 	/* Get the hardware CPUID leafs */
2537 	get_cpu_cap(curr_info);
2538 }
2539 
2540 /**
2541  * microcode_check() - Check if any CPU capabilities changed after an update.
2542  * @prev_info:	CPU capabilities stored before an update.
2543  *
2544  * The microcode loader calls this upon late microcode load to recheck features,
2545  * only when microcode has been updated. Caller holds and CPU hotplug lock.
2546  *
2547  * Return: None
2548  */
2549 void microcode_check(struct cpuinfo_x86 *prev_info)
2550 {
2551 	struct cpuinfo_x86 curr_info;
2552 
2553 	perf_check_microcode();
2554 
2555 	amd_check_microcode();
2556 
2557 	store_cpu_caps(&curr_info);
2558 
2559 	if (!memcmp(&prev_info->x86_capability, &curr_info.x86_capability,
2560 		    sizeof(prev_info->x86_capability)))
2561 		return;
2562 
2563 	pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n");
2564 	pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
2565 }
2566 #endif
2567 
2568 /*
2569  * Invoked from core CPU hotplug code after hotplug operations
2570  */
2571 void arch_smt_update(void)
2572 {
2573 	/* Handle the speculative execution misfeatures */
2574 	cpu_bugs_smt_update();
2575 	/* Check whether IPI broadcasting can be enabled */
2576 	apic_smt_update();
2577 }
2578 
2579 void __init arch_cpu_finalize_init(void)
2580 {
2581 	struct cpuinfo_x86 *c = this_cpu_ptr(&cpu_info);
2582 
2583 	identify_boot_cpu();
2584 
2585 	select_idle_routine();
2586 
2587 	/*
2588 	 * identify_boot_cpu() initialized SMT support information, let the
2589 	 * core code know.
2590 	 */
2591 	cpu_smt_set_num_threads(__max_threads_per_core, __max_threads_per_core);
2592 
2593 	if (!IS_ENABLED(CONFIG_SMP)) {
2594 		pr_info("CPU: ");
2595 		print_cpu_info(&boot_cpu_data);
2596 	}
2597 
2598 	cpu_select_mitigations();
2599 
2600 	arch_smt_update();
2601 
2602 	if (IS_ENABLED(CONFIG_X86_32)) {
2603 		/*
2604 		 * Check whether this is a real i386 which is not longer
2605 		 * supported and fixup the utsname.
2606 		 */
2607 		if (boot_cpu_data.x86 < 4)
2608 			panic("Kernel requires i486+ for 'invlpg' and other features");
2609 
2610 		init_utsname()->machine[1] =
2611 			'0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
2612 	}
2613 
2614 	/*
2615 	 * Must be before alternatives because it might set or clear
2616 	 * feature bits.
2617 	 */
2618 	fpu__init_system();
2619 	fpu__init_cpu();
2620 
2621 	/*
2622 	 * This needs to follow the FPU initializtion, since EFI depends on it.
2623 	 */
2624 	if (efi_enabled(EFI_RUNTIME_SERVICES))
2625 		efi_enter_virtual_mode();
2626 
2627 	/*
2628 	 * Ensure that access to the per CPU representation has the initial
2629 	 * boot CPU configuration.
2630 	 */
2631 	*c = boot_cpu_data;
2632 	c->initialized = true;
2633 
2634 	alternative_instructions();
2635 
2636 	if (IS_ENABLED(CONFIG_X86_64)) {
2637 		USER_PTR_MAX = TASK_SIZE_MAX;
2638 
2639 		/*
2640 		 * Enable this when LAM is gated on LASS support
2641 		if (cpu_feature_enabled(X86_FEATURE_LAM))
2642 			USER_PTR_MAX = (1ul << 63) - PAGE_SIZE;
2643 		 */
2644 		runtime_const_init(ptr, USER_PTR_MAX);
2645 
2646 		/*
2647 		 * Make sure the first 2MB area is not mapped by huge pages
2648 		 * There are typically fixed size MTRRs in there and overlapping
2649 		 * MTRRs into large pages causes slow downs.
2650 		 *
2651 		 * Right now we don't do that with gbpages because there seems
2652 		 * very little benefit for that case.
2653 		 */
2654 		if (!direct_gbpages)
2655 			set_memory_4k((unsigned long)__va(0), 1);
2656 	} else {
2657 		fpu__init_check_bugs();
2658 	}
2659 
2660 	/*
2661 	 * This needs to be called before any devices perform DMA
2662 	 * operations that might use the SWIOTLB bounce buffers. It will
2663 	 * mark the bounce buffers as decrypted so that their usage will
2664 	 * not cause "plain-text" data to be decrypted when accessed. It
2665 	 * must be called after late_time_init() so that Hyper-V x86/x64
2666 	 * hypercalls work when the SWIOTLB bounce buffers are decrypted.
2667 	 */
2668 	mem_encrypt_init();
2669 }
2670