xref: /linux/arch/x86/kernel/cpu/common.c (revision e724e7aaf9ca794670a4d4931af7a7e24e37fec3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* cpu_feature_enabled() cannot be used this early */
3 #define USE_EARLY_PGTABLE_L5
4 
5 #include <linux/memblock.h>
6 #include <linux/linkage.h>
7 #include <linux/bitops.h>
8 #include <linux/kernel.h>
9 #include <linux/export.h>
10 #include <linux/percpu.h>
11 #include <linux/string.h>
12 #include <linux/ctype.h>
13 #include <linux/delay.h>
14 #include <linux/sched/mm.h>
15 #include <linux/sched/clock.h>
16 #include <linux/sched/task.h>
17 #include <linux/sched/smt.h>
18 #include <linux/init.h>
19 #include <linux/kprobes.h>
20 #include <linux/kgdb.h>
21 #include <linux/mem_encrypt.h>
22 #include <linux/smp.h>
23 #include <linux/cpu.h>
24 #include <linux/io.h>
25 #include <linux/syscore_ops.h>
26 #include <linux/pgtable.h>
27 #include <linux/stackprotector.h>
28 #include <linux/utsname.h>
29 
30 #include <asm/alternative.h>
31 #include <asm/cmdline.h>
32 #include <asm/perf_event.h>
33 #include <asm/mmu_context.h>
34 #include <asm/doublefault.h>
35 #include <asm/archrandom.h>
36 #include <asm/hypervisor.h>
37 #include <asm/processor.h>
38 #include <asm/tlbflush.h>
39 #include <asm/debugreg.h>
40 #include <asm/sections.h>
41 #include <asm/vsyscall.h>
42 #include <linux/topology.h>
43 #include <linux/cpumask.h>
44 #include <linux/atomic.h>
45 #include <asm/proto.h>
46 #include <asm/setup.h>
47 #include <asm/apic.h>
48 #include <asm/desc.h>
49 #include <asm/fpu/api.h>
50 #include <asm/mtrr.h>
51 #include <asm/hwcap2.h>
52 #include <linux/numa.h>
53 #include <asm/numa.h>
54 #include <asm/asm.h>
55 #include <asm/bugs.h>
56 #include <asm/cpu.h>
57 #include <asm/mce.h>
58 #include <asm/msr.h>
59 #include <asm/cacheinfo.h>
60 #include <asm/memtype.h>
61 #include <asm/microcode.h>
62 #include <asm/microcode_intel.h>
63 #include <asm/intel-family.h>
64 #include <asm/cpu_device_id.h>
65 #include <asm/uv/uv.h>
66 #include <asm/set_memory.h>
67 #include <asm/traps.h>
68 #include <asm/sev.h>
69 
70 #include "cpu.h"
71 
72 u32 elf_hwcap2 __read_mostly;
73 
74 /* Number of siblings per CPU package */
75 int smp_num_siblings = 1;
76 EXPORT_SYMBOL(smp_num_siblings);
77 
78 /* Last level cache ID of each logical CPU */
79 DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID;
80 
81 u16 get_llc_id(unsigned int cpu)
82 {
83 	return per_cpu(cpu_llc_id, cpu);
84 }
85 EXPORT_SYMBOL_GPL(get_llc_id);
86 
87 /* L2 cache ID of each logical CPU */
88 DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_l2c_id) = BAD_APICID;
89 
90 static struct ppin_info {
91 	int	feature;
92 	int	msr_ppin_ctl;
93 	int	msr_ppin;
94 } ppin_info[] = {
95 	[X86_VENDOR_INTEL] = {
96 		.feature = X86_FEATURE_INTEL_PPIN,
97 		.msr_ppin_ctl = MSR_PPIN_CTL,
98 		.msr_ppin = MSR_PPIN
99 	},
100 	[X86_VENDOR_AMD] = {
101 		.feature = X86_FEATURE_AMD_PPIN,
102 		.msr_ppin_ctl = MSR_AMD_PPIN_CTL,
103 		.msr_ppin = MSR_AMD_PPIN
104 	},
105 };
106 
107 static const struct x86_cpu_id ppin_cpuids[] = {
108 	X86_MATCH_FEATURE(X86_FEATURE_AMD_PPIN, &ppin_info[X86_VENDOR_AMD]),
109 	X86_MATCH_FEATURE(X86_FEATURE_INTEL_PPIN, &ppin_info[X86_VENDOR_INTEL]),
110 
111 	/* Legacy models without CPUID enumeration */
112 	X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &ppin_info[X86_VENDOR_INTEL]),
113 	X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &ppin_info[X86_VENDOR_INTEL]),
114 	X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &ppin_info[X86_VENDOR_INTEL]),
115 	X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &ppin_info[X86_VENDOR_INTEL]),
116 	X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &ppin_info[X86_VENDOR_INTEL]),
117 	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &ppin_info[X86_VENDOR_INTEL]),
118 	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &ppin_info[X86_VENDOR_INTEL]),
119 	X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &ppin_info[X86_VENDOR_INTEL]),
120 	X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &ppin_info[X86_VENDOR_INTEL]),
121 	X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &ppin_info[X86_VENDOR_INTEL]),
122 	X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &ppin_info[X86_VENDOR_INTEL]),
123 
124 	{}
125 };
126 
127 static void ppin_init(struct cpuinfo_x86 *c)
128 {
129 	const struct x86_cpu_id *id;
130 	unsigned long long val;
131 	struct ppin_info *info;
132 
133 	id = x86_match_cpu(ppin_cpuids);
134 	if (!id)
135 		return;
136 
137 	/*
138 	 * Testing the presence of the MSR is not enough. Need to check
139 	 * that the PPIN_CTL allows reading of the PPIN.
140 	 */
141 	info = (struct ppin_info *)id->driver_data;
142 
143 	if (rdmsrl_safe(info->msr_ppin_ctl, &val))
144 		goto clear_ppin;
145 
146 	if ((val & 3UL) == 1UL) {
147 		/* PPIN locked in disabled mode */
148 		goto clear_ppin;
149 	}
150 
151 	/* If PPIN is disabled, try to enable */
152 	if (!(val & 2UL)) {
153 		wrmsrl_safe(info->msr_ppin_ctl,  val | 2UL);
154 		rdmsrl_safe(info->msr_ppin_ctl, &val);
155 	}
156 
157 	/* Is the enable bit set? */
158 	if (val & 2UL) {
159 		c->ppin = __rdmsr(info->msr_ppin);
160 		set_cpu_cap(c, info->feature);
161 		return;
162 	}
163 
164 clear_ppin:
165 	clear_cpu_cap(c, info->feature);
166 }
167 
168 static void default_init(struct cpuinfo_x86 *c)
169 {
170 #ifdef CONFIG_X86_64
171 	cpu_detect_cache_sizes(c);
172 #else
173 	/* Not much we can do here... */
174 	/* Check if at least it has cpuid */
175 	if (c->cpuid_level == -1) {
176 		/* No cpuid. It must be an ancient CPU */
177 		if (c->x86 == 4)
178 			strcpy(c->x86_model_id, "486");
179 		else if (c->x86 == 3)
180 			strcpy(c->x86_model_id, "386");
181 	}
182 #endif
183 }
184 
185 static const struct cpu_dev default_cpu = {
186 	.c_init		= default_init,
187 	.c_vendor	= "Unknown",
188 	.c_x86_vendor	= X86_VENDOR_UNKNOWN,
189 };
190 
191 static const struct cpu_dev *this_cpu = &default_cpu;
192 
193 DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
194 #ifdef CONFIG_X86_64
195 	/*
196 	 * We need valid kernel segments for data and code in long mode too
197 	 * IRET will check the segment types  kkeil 2000/10/28
198 	 * Also sysret mandates a special GDT layout
199 	 *
200 	 * TLS descriptors are currently at a different place compared to i386.
201 	 * Hopefully nobody expects them at a fixed place (Wine?)
202 	 */
203 	[GDT_ENTRY_KERNEL32_CS]		= GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
204 	[GDT_ENTRY_KERNEL_CS]		= GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
205 	[GDT_ENTRY_KERNEL_DS]		= GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
206 	[GDT_ENTRY_DEFAULT_USER32_CS]	= GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
207 	[GDT_ENTRY_DEFAULT_USER_DS]	= GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
208 	[GDT_ENTRY_DEFAULT_USER_CS]	= GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
209 #else
210 	[GDT_ENTRY_KERNEL_CS]		= GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
211 	[GDT_ENTRY_KERNEL_DS]		= GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
212 	[GDT_ENTRY_DEFAULT_USER_CS]	= GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
213 	[GDT_ENTRY_DEFAULT_USER_DS]	= GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
214 	/*
215 	 * Segments used for calling PnP BIOS have byte granularity.
216 	 * They code segments and data segments have fixed 64k limits,
217 	 * the transfer segment sizes are set at run time.
218 	 */
219 	/* 32-bit code */
220 	[GDT_ENTRY_PNPBIOS_CS32]	= GDT_ENTRY_INIT(0x409a, 0, 0xffff),
221 	/* 16-bit code */
222 	[GDT_ENTRY_PNPBIOS_CS16]	= GDT_ENTRY_INIT(0x009a, 0, 0xffff),
223 	/* 16-bit data */
224 	[GDT_ENTRY_PNPBIOS_DS]		= GDT_ENTRY_INIT(0x0092, 0, 0xffff),
225 	/* 16-bit data */
226 	[GDT_ENTRY_PNPBIOS_TS1]		= GDT_ENTRY_INIT(0x0092, 0, 0),
227 	/* 16-bit data */
228 	[GDT_ENTRY_PNPBIOS_TS2]		= GDT_ENTRY_INIT(0x0092, 0, 0),
229 	/*
230 	 * The APM segments have byte granularity and their bases
231 	 * are set at run time.  All have 64k limits.
232 	 */
233 	/* 32-bit code */
234 	[GDT_ENTRY_APMBIOS_BASE]	= GDT_ENTRY_INIT(0x409a, 0, 0xffff),
235 	/* 16-bit code */
236 	[GDT_ENTRY_APMBIOS_BASE+1]	= GDT_ENTRY_INIT(0x009a, 0, 0xffff),
237 	/* data */
238 	[GDT_ENTRY_APMBIOS_BASE+2]	= GDT_ENTRY_INIT(0x4092, 0, 0xffff),
239 
240 	[GDT_ENTRY_ESPFIX_SS]		= GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
241 	[GDT_ENTRY_PERCPU]		= GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
242 #endif
243 } };
244 EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
245 
246 #ifdef CONFIG_X86_64
247 static int __init x86_nopcid_setup(char *s)
248 {
249 	/* nopcid doesn't accept parameters */
250 	if (s)
251 		return -EINVAL;
252 
253 	/* do not emit a message if the feature is not present */
254 	if (!boot_cpu_has(X86_FEATURE_PCID))
255 		return 0;
256 
257 	setup_clear_cpu_cap(X86_FEATURE_PCID);
258 	pr_info("nopcid: PCID feature disabled\n");
259 	return 0;
260 }
261 early_param("nopcid", x86_nopcid_setup);
262 #endif
263 
264 static int __init x86_noinvpcid_setup(char *s)
265 {
266 	/* noinvpcid doesn't accept parameters */
267 	if (s)
268 		return -EINVAL;
269 
270 	/* do not emit a message if the feature is not present */
271 	if (!boot_cpu_has(X86_FEATURE_INVPCID))
272 		return 0;
273 
274 	setup_clear_cpu_cap(X86_FEATURE_INVPCID);
275 	pr_info("noinvpcid: INVPCID feature disabled\n");
276 	return 0;
277 }
278 early_param("noinvpcid", x86_noinvpcid_setup);
279 
280 #ifdef CONFIG_X86_32
281 static int cachesize_override = -1;
282 static int disable_x86_serial_nr = 1;
283 
284 static int __init cachesize_setup(char *str)
285 {
286 	get_option(&str, &cachesize_override);
287 	return 1;
288 }
289 __setup("cachesize=", cachesize_setup);
290 
291 /* Standard macro to see if a specific flag is changeable */
292 static inline int flag_is_changeable_p(u32 flag)
293 {
294 	u32 f1, f2;
295 
296 	/*
297 	 * Cyrix and IDT cpus allow disabling of CPUID
298 	 * so the code below may return different results
299 	 * when it is executed before and after enabling
300 	 * the CPUID. Add "volatile" to not allow gcc to
301 	 * optimize the subsequent calls to this function.
302 	 */
303 	asm volatile ("pushfl		\n\t"
304 		      "pushfl		\n\t"
305 		      "popl %0		\n\t"
306 		      "movl %0, %1	\n\t"
307 		      "xorl %2, %0	\n\t"
308 		      "pushl %0		\n\t"
309 		      "popfl		\n\t"
310 		      "pushfl		\n\t"
311 		      "popl %0		\n\t"
312 		      "popfl		\n\t"
313 
314 		      : "=&r" (f1), "=&r" (f2)
315 		      : "ir" (flag));
316 
317 	return ((f1^f2) & flag) != 0;
318 }
319 
320 /* Probe for the CPUID instruction */
321 int have_cpuid_p(void)
322 {
323 	return flag_is_changeable_p(X86_EFLAGS_ID);
324 }
325 
326 static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
327 {
328 	unsigned long lo, hi;
329 
330 	if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
331 		return;
332 
333 	/* Disable processor serial number: */
334 
335 	rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
336 	lo |= 0x200000;
337 	wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
338 
339 	pr_notice("CPU serial number disabled.\n");
340 	clear_cpu_cap(c, X86_FEATURE_PN);
341 
342 	/* Disabling the serial number may affect the cpuid level */
343 	c->cpuid_level = cpuid_eax(0);
344 }
345 
346 static int __init x86_serial_nr_setup(char *s)
347 {
348 	disable_x86_serial_nr = 0;
349 	return 1;
350 }
351 __setup("serialnumber", x86_serial_nr_setup);
352 #else
353 static inline int flag_is_changeable_p(u32 flag)
354 {
355 	return 1;
356 }
357 static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
358 {
359 }
360 #endif
361 
362 static __always_inline void setup_smep(struct cpuinfo_x86 *c)
363 {
364 	if (cpu_has(c, X86_FEATURE_SMEP))
365 		cr4_set_bits(X86_CR4_SMEP);
366 }
367 
368 static __always_inline void setup_smap(struct cpuinfo_x86 *c)
369 {
370 	unsigned long eflags = native_save_fl();
371 
372 	/* This should have been cleared long ago */
373 	BUG_ON(eflags & X86_EFLAGS_AC);
374 
375 	if (cpu_has(c, X86_FEATURE_SMAP))
376 		cr4_set_bits(X86_CR4_SMAP);
377 }
378 
379 static __always_inline void setup_umip(struct cpuinfo_x86 *c)
380 {
381 	/* Check the boot processor, plus build option for UMIP. */
382 	if (!cpu_feature_enabled(X86_FEATURE_UMIP))
383 		goto out;
384 
385 	/* Check the current processor's cpuid bits. */
386 	if (!cpu_has(c, X86_FEATURE_UMIP))
387 		goto out;
388 
389 	cr4_set_bits(X86_CR4_UMIP);
390 
391 	pr_info_once("x86/cpu: User Mode Instruction Prevention (UMIP) activated\n");
392 
393 	return;
394 
395 out:
396 	/*
397 	 * Make sure UMIP is disabled in case it was enabled in a
398 	 * previous boot (e.g., via kexec).
399 	 */
400 	cr4_clear_bits(X86_CR4_UMIP);
401 }
402 
403 /* These bits should not change their value after CPU init is finished. */
404 static const unsigned long cr4_pinned_mask =
405 	X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP |
406 	X86_CR4_FSGSBASE | X86_CR4_CET;
407 static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning);
408 static unsigned long cr4_pinned_bits __ro_after_init;
409 
410 void native_write_cr0(unsigned long val)
411 {
412 	unsigned long bits_missing = 0;
413 
414 set_register:
415 	asm volatile("mov %0,%%cr0": "+r" (val) : : "memory");
416 
417 	if (static_branch_likely(&cr_pinning)) {
418 		if (unlikely((val & X86_CR0_WP) != X86_CR0_WP)) {
419 			bits_missing = X86_CR0_WP;
420 			val |= bits_missing;
421 			goto set_register;
422 		}
423 		/* Warn after we've set the missing bits. */
424 		WARN_ONCE(bits_missing, "CR0 WP bit went missing!?\n");
425 	}
426 }
427 EXPORT_SYMBOL(native_write_cr0);
428 
429 void __no_profile native_write_cr4(unsigned long val)
430 {
431 	unsigned long bits_changed = 0;
432 
433 set_register:
434 	asm volatile("mov %0,%%cr4": "+r" (val) : : "memory");
435 
436 	if (static_branch_likely(&cr_pinning)) {
437 		if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) {
438 			bits_changed = (val & cr4_pinned_mask) ^ cr4_pinned_bits;
439 			val = (val & ~cr4_pinned_mask) | cr4_pinned_bits;
440 			goto set_register;
441 		}
442 		/* Warn after we've corrected the changed bits. */
443 		WARN_ONCE(bits_changed, "pinned CR4 bits changed: 0x%lx!?\n",
444 			  bits_changed);
445 	}
446 }
447 #if IS_MODULE(CONFIG_LKDTM)
448 EXPORT_SYMBOL_GPL(native_write_cr4);
449 #endif
450 
451 void cr4_update_irqsoff(unsigned long set, unsigned long clear)
452 {
453 	unsigned long newval, cr4 = this_cpu_read(cpu_tlbstate.cr4);
454 
455 	lockdep_assert_irqs_disabled();
456 
457 	newval = (cr4 & ~clear) | set;
458 	if (newval != cr4) {
459 		this_cpu_write(cpu_tlbstate.cr4, newval);
460 		__write_cr4(newval);
461 	}
462 }
463 EXPORT_SYMBOL(cr4_update_irqsoff);
464 
465 /* Read the CR4 shadow. */
466 unsigned long cr4_read_shadow(void)
467 {
468 	return this_cpu_read(cpu_tlbstate.cr4);
469 }
470 EXPORT_SYMBOL_GPL(cr4_read_shadow);
471 
472 void cr4_init(void)
473 {
474 	unsigned long cr4 = __read_cr4();
475 
476 	if (boot_cpu_has(X86_FEATURE_PCID))
477 		cr4 |= X86_CR4_PCIDE;
478 	if (static_branch_likely(&cr_pinning))
479 		cr4 = (cr4 & ~cr4_pinned_mask) | cr4_pinned_bits;
480 
481 	__write_cr4(cr4);
482 
483 	/* Initialize cr4 shadow for this CPU. */
484 	this_cpu_write(cpu_tlbstate.cr4, cr4);
485 }
486 
487 /*
488  * Once CPU feature detection is finished (and boot params have been
489  * parsed), record any of the sensitive CR bits that are set, and
490  * enable CR pinning.
491  */
492 static void __init setup_cr_pinning(void)
493 {
494 	cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & cr4_pinned_mask;
495 	static_key_enable(&cr_pinning.key);
496 }
497 
498 static __init int x86_nofsgsbase_setup(char *arg)
499 {
500 	/* Require an exact match without trailing characters. */
501 	if (strlen(arg))
502 		return 0;
503 
504 	/* Do not emit a message if the feature is not present. */
505 	if (!boot_cpu_has(X86_FEATURE_FSGSBASE))
506 		return 1;
507 
508 	setup_clear_cpu_cap(X86_FEATURE_FSGSBASE);
509 	pr_info("FSGSBASE disabled via kernel command line\n");
510 	return 1;
511 }
512 __setup("nofsgsbase", x86_nofsgsbase_setup);
513 
514 /*
515  * Protection Keys are not available in 32-bit mode.
516  */
517 static bool pku_disabled;
518 
519 static __always_inline void setup_pku(struct cpuinfo_x86 *c)
520 {
521 	if (c == &boot_cpu_data) {
522 		if (pku_disabled || !cpu_feature_enabled(X86_FEATURE_PKU))
523 			return;
524 		/*
525 		 * Setting CR4.PKE will cause the X86_FEATURE_OSPKE cpuid
526 		 * bit to be set.  Enforce it.
527 		 */
528 		setup_force_cpu_cap(X86_FEATURE_OSPKE);
529 
530 	} else if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) {
531 		return;
532 	}
533 
534 	cr4_set_bits(X86_CR4_PKE);
535 	/* Load the default PKRU value */
536 	pkru_write_default();
537 }
538 
539 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
540 static __init int setup_disable_pku(char *arg)
541 {
542 	/*
543 	 * Do not clear the X86_FEATURE_PKU bit.  All of the
544 	 * runtime checks are against OSPKE so clearing the
545 	 * bit does nothing.
546 	 *
547 	 * This way, we will see "pku" in cpuinfo, but not
548 	 * "ospke", which is exactly what we want.  It shows
549 	 * that the CPU has PKU, but the OS has not enabled it.
550 	 * This happens to be exactly how a system would look
551 	 * if we disabled the config option.
552 	 */
553 	pr_info("x86: 'nopku' specified, disabling Memory Protection Keys\n");
554 	pku_disabled = true;
555 	return 1;
556 }
557 __setup("nopku", setup_disable_pku);
558 #endif
559 
560 #ifdef CONFIG_X86_KERNEL_IBT
561 
562 __noendbr u64 ibt_save(bool disable)
563 {
564 	u64 msr = 0;
565 
566 	if (cpu_feature_enabled(X86_FEATURE_IBT)) {
567 		rdmsrl(MSR_IA32_S_CET, msr);
568 		if (disable)
569 			wrmsrl(MSR_IA32_S_CET, msr & ~CET_ENDBR_EN);
570 	}
571 
572 	return msr;
573 }
574 
575 __noendbr void ibt_restore(u64 save)
576 {
577 	u64 msr;
578 
579 	if (cpu_feature_enabled(X86_FEATURE_IBT)) {
580 		rdmsrl(MSR_IA32_S_CET, msr);
581 		msr &= ~CET_ENDBR_EN;
582 		msr |= (save & CET_ENDBR_EN);
583 		wrmsrl(MSR_IA32_S_CET, msr);
584 	}
585 }
586 
587 #endif
588 
589 static __always_inline void setup_cet(struct cpuinfo_x86 *c)
590 {
591 	u64 msr = CET_ENDBR_EN;
592 
593 	if (!HAS_KERNEL_IBT ||
594 	    !cpu_feature_enabled(X86_FEATURE_IBT))
595 		return;
596 
597 	wrmsrl(MSR_IA32_S_CET, msr);
598 	cr4_set_bits(X86_CR4_CET);
599 
600 	if (!ibt_selftest()) {
601 		pr_err("IBT selftest: Failed!\n");
602 		wrmsrl(MSR_IA32_S_CET, 0);
603 		setup_clear_cpu_cap(X86_FEATURE_IBT);
604 		return;
605 	}
606 }
607 
608 __noendbr void cet_disable(void)
609 {
610 	if (cpu_feature_enabled(X86_FEATURE_IBT))
611 		wrmsrl(MSR_IA32_S_CET, 0);
612 }
613 
614 /*
615  * Some CPU features depend on higher CPUID levels, which may not always
616  * be available due to CPUID level capping or broken virtualization
617  * software.  Add those features to this table to auto-disable them.
618  */
619 struct cpuid_dependent_feature {
620 	u32 feature;
621 	u32 level;
622 };
623 
624 static const struct cpuid_dependent_feature
625 cpuid_dependent_features[] = {
626 	{ X86_FEATURE_MWAIT,		0x00000005 },
627 	{ X86_FEATURE_DCA,		0x00000009 },
628 	{ X86_FEATURE_XSAVE,		0x0000000d },
629 	{ 0, 0 }
630 };
631 
632 static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
633 {
634 	const struct cpuid_dependent_feature *df;
635 
636 	for (df = cpuid_dependent_features; df->feature; df++) {
637 
638 		if (!cpu_has(c, df->feature))
639 			continue;
640 		/*
641 		 * Note: cpuid_level is set to -1 if unavailable, but
642 		 * extended_extended_level is set to 0 if unavailable
643 		 * and the legitimate extended levels are all negative
644 		 * when signed; hence the weird messing around with
645 		 * signs here...
646 		 */
647 		if (!((s32)df->level < 0 ?
648 		     (u32)df->level > (u32)c->extended_cpuid_level :
649 		     (s32)df->level > (s32)c->cpuid_level))
650 			continue;
651 
652 		clear_cpu_cap(c, df->feature);
653 		if (!warn)
654 			continue;
655 
656 		pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n",
657 			x86_cap_flag(df->feature), df->level);
658 	}
659 }
660 
661 /*
662  * Naming convention should be: <Name> [(<Codename>)]
663  * This table only is used unless init_<vendor>() below doesn't set it;
664  * in particular, if CPUID levels 0x80000002..4 are supported, this
665  * isn't used
666  */
667 
668 /* Look up CPU names by table lookup. */
669 static const char *table_lookup_model(struct cpuinfo_x86 *c)
670 {
671 #ifdef CONFIG_X86_32
672 	const struct legacy_cpu_model_info *info;
673 
674 	if (c->x86_model >= 16)
675 		return NULL;	/* Range check */
676 
677 	if (!this_cpu)
678 		return NULL;
679 
680 	info = this_cpu->legacy_models;
681 
682 	while (info->family) {
683 		if (info->family == c->x86)
684 			return info->model_names[c->x86_model];
685 		info++;
686 	}
687 #endif
688 	return NULL;		/* Not found */
689 }
690 
691 /* Aligned to unsigned long to avoid split lock in atomic bitmap ops */
692 __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long));
693 __u32 cpu_caps_set[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long));
694 
695 #ifdef CONFIG_X86_32
696 /* The 32-bit entry code needs to find cpu_entry_area. */
697 DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
698 #endif
699 
700 /* Load the original GDT from the per-cpu structure */
701 void load_direct_gdt(int cpu)
702 {
703 	struct desc_ptr gdt_descr;
704 
705 	gdt_descr.address = (long)get_cpu_gdt_rw(cpu);
706 	gdt_descr.size = GDT_SIZE - 1;
707 	load_gdt(&gdt_descr);
708 }
709 EXPORT_SYMBOL_GPL(load_direct_gdt);
710 
711 /* Load a fixmap remapping of the per-cpu GDT */
712 void load_fixmap_gdt(int cpu)
713 {
714 	struct desc_ptr gdt_descr;
715 
716 	gdt_descr.address = (long)get_cpu_gdt_ro(cpu);
717 	gdt_descr.size = GDT_SIZE - 1;
718 	load_gdt(&gdt_descr);
719 }
720 EXPORT_SYMBOL_GPL(load_fixmap_gdt);
721 
722 /**
723  * switch_gdt_and_percpu_base - Switch to direct GDT and runtime per CPU base
724  * @cpu:	The CPU number for which this is invoked
725  *
726  * Invoked during early boot to switch from early GDT and early per CPU to
727  * the direct GDT and the runtime per CPU area. On 32-bit the percpu base
728  * switch is implicit by loading the direct GDT. On 64bit this requires
729  * to update GSBASE.
730  */
731 void __init switch_gdt_and_percpu_base(int cpu)
732 {
733 	load_direct_gdt(cpu);
734 
735 #ifdef CONFIG_X86_64
736 	/*
737 	 * No need to load %gs. It is already correct.
738 	 *
739 	 * Writing %gs on 64bit would zero GSBASE which would make any per
740 	 * CPU operation up to the point of the wrmsrl() fault.
741 	 *
742 	 * Set GSBASE to the new offset. Until the wrmsrl() happens the
743 	 * early mapping is still valid. That means the GSBASE update will
744 	 * lose any prior per CPU data which was not copied over in
745 	 * setup_per_cpu_areas().
746 	 *
747 	 * This works even with stackprotector enabled because the
748 	 * per CPU stack canary is 0 in both per CPU areas.
749 	 */
750 	wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu));
751 #else
752 	/*
753 	 * %fs is already set to __KERNEL_PERCPU, but after switching GDT
754 	 * it is required to load FS again so that the 'hidden' part is
755 	 * updated from the new GDT. Up to this point the early per CPU
756 	 * translation is active. Any content of the early per CPU data
757 	 * which was not copied over in setup_per_cpu_areas() is lost.
758 	 */
759 	loadsegment(fs, __KERNEL_PERCPU);
760 #endif
761 }
762 
763 static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
764 
765 static void get_model_name(struct cpuinfo_x86 *c)
766 {
767 	unsigned int *v;
768 	char *p, *q, *s;
769 
770 	if (c->extended_cpuid_level < 0x80000004)
771 		return;
772 
773 	v = (unsigned int *)c->x86_model_id;
774 	cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
775 	cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
776 	cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
777 	c->x86_model_id[48] = 0;
778 
779 	/* Trim whitespace */
780 	p = q = s = &c->x86_model_id[0];
781 
782 	while (*p == ' ')
783 		p++;
784 
785 	while (*p) {
786 		/* Note the last non-whitespace index */
787 		if (!isspace(*p))
788 			s = q;
789 
790 		*q++ = *p++;
791 	}
792 
793 	*(s + 1) = '\0';
794 }
795 
796 void detect_num_cpu_cores(struct cpuinfo_x86 *c)
797 {
798 	unsigned int eax, ebx, ecx, edx;
799 
800 	c->x86_max_cores = 1;
801 	if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4)
802 		return;
803 
804 	cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
805 	if (eax & 0x1f)
806 		c->x86_max_cores = (eax >> 26) + 1;
807 }
808 
809 void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
810 {
811 	unsigned int n, dummy, ebx, ecx, edx, l2size;
812 
813 	n = c->extended_cpuid_level;
814 
815 	if (n >= 0x80000005) {
816 		cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
817 		c->x86_cache_size = (ecx>>24) + (edx>>24);
818 #ifdef CONFIG_X86_64
819 		/* On K8 L1 TLB is inclusive, so don't count it */
820 		c->x86_tlbsize = 0;
821 #endif
822 	}
823 
824 	if (n < 0x80000006)	/* Some chips just has a large L1. */
825 		return;
826 
827 	cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
828 	l2size = ecx >> 16;
829 
830 #ifdef CONFIG_X86_64
831 	c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
832 #else
833 	/* do processor-specific cache resizing */
834 	if (this_cpu->legacy_cache_size)
835 		l2size = this_cpu->legacy_cache_size(c, l2size);
836 
837 	/* Allow user to override all this if necessary. */
838 	if (cachesize_override != -1)
839 		l2size = cachesize_override;
840 
841 	if (l2size == 0)
842 		return;		/* Again, no L2 cache is possible */
843 #endif
844 
845 	c->x86_cache_size = l2size;
846 }
847 
848 u16 __read_mostly tlb_lli_4k[NR_INFO];
849 u16 __read_mostly tlb_lli_2m[NR_INFO];
850 u16 __read_mostly tlb_lli_4m[NR_INFO];
851 u16 __read_mostly tlb_lld_4k[NR_INFO];
852 u16 __read_mostly tlb_lld_2m[NR_INFO];
853 u16 __read_mostly tlb_lld_4m[NR_INFO];
854 u16 __read_mostly tlb_lld_1g[NR_INFO];
855 
856 static void cpu_detect_tlb(struct cpuinfo_x86 *c)
857 {
858 	if (this_cpu->c_detect_tlb)
859 		this_cpu->c_detect_tlb(c);
860 
861 	pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n",
862 		tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES],
863 		tlb_lli_4m[ENTRIES]);
864 
865 	pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n",
866 		tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES],
867 		tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
868 }
869 
870 int detect_ht_early(struct cpuinfo_x86 *c)
871 {
872 #ifdef CONFIG_SMP
873 	u32 eax, ebx, ecx, edx;
874 
875 	if (!cpu_has(c, X86_FEATURE_HT))
876 		return -1;
877 
878 	if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
879 		return -1;
880 
881 	if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
882 		return -1;
883 
884 	cpuid(1, &eax, &ebx, &ecx, &edx);
885 
886 	smp_num_siblings = (ebx & 0xff0000) >> 16;
887 	if (smp_num_siblings == 1)
888 		pr_info_once("CPU0: Hyper-Threading is disabled\n");
889 #endif
890 	return 0;
891 }
892 
893 void detect_ht(struct cpuinfo_x86 *c)
894 {
895 #ifdef CONFIG_SMP
896 	int index_msb, core_bits;
897 
898 	if (detect_ht_early(c) < 0)
899 		return;
900 
901 	index_msb = get_count_order(smp_num_siblings);
902 	c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
903 
904 	smp_num_siblings = smp_num_siblings / c->x86_max_cores;
905 
906 	index_msb = get_count_order(smp_num_siblings);
907 
908 	core_bits = get_count_order(c->x86_max_cores);
909 
910 	c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
911 				       ((1 << core_bits) - 1);
912 #endif
913 }
914 
915 static void get_cpu_vendor(struct cpuinfo_x86 *c)
916 {
917 	char *v = c->x86_vendor_id;
918 	int i;
919 
920 	for (i = 0; i < X86_VENDOR_NUM; i++) {
921 		if (!cpu_devs[i])
922 			break;
923 
924 		if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
925 		    (cpu_devs[i]->c_ident[1] &&
926 		     !strcmp(v, cpu_devs[i]->c_ident[1]))) {
927 
928 			this_cpu = cpu_devs[i];
929 			c->x86_vendor = this_cpu->c_x86_vendor;
930 			return;
931 		}
932 	}
933 
934 	pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \
935 		    "CPU: Your system may be unstable.\n", v);
936 
937 	c->x86_vendor = X86_VENDOR_UNKNOWN;
938 	this_cpu = &default_cpu;
939 }
940 
941 void cpu_detect(struct cpuinfo_x86 *c)
942 {
943 	/* Get vendor name */
944 	cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
945 	      (unsigned int *)&c->x86_vendor_id[0],
946 	      (unsigned int *)&c->x86_vendor_id[8],
947 	      (unsigned int *)&c->x86_vendor_id[4]);
948 
949 	c->x86 = 4;
950 	/* Intel-defined flags: level 0x00000001 */
951 	if (c->cpuid_level >= 0x00000001) {
952 		u32 junk, tfms, cap0, misc;
953 
954 		cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
955 		c->x86		= x86_family(tfms);
956 		c->x86_model	= x86_model(tfms);
957 		c->x86_stepping	= x86_stepping(tfms);
958 
959 		if (cap0 & (1<<19)) {
960 			c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
961 			c->x86_cache_alignment = c->x86_clflush_size;
962 		}
963 	}
964 }
965 
966 static void apply_forced_caps(struct cpuinfo_x86 *c)
967 {
968 	int i;
969 
970 	for (i = 0; i < NCAPINTS + NBUGINTS; i++) {
971 		c->x86_capability[i] &= ~cpu_caps_cleared[i];
972 		c->x86_capability[i] |= cpu_caps_set[i];
973 	}
974 }
975 
976 static void init_speculation_control(struct cpuinfo_x86 *c)
977 {
978 	/*
979 	 * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support,
980 	 * and they also have a different bit for STIBP support. Also,
981 	 * a hypervisor might have set the individual AMD bits even on
982 	 * Intel CPUs, for finer-grained selection of what's available.
983 	 */
984 	if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
985 		set_cpu_cap(c, X86_FEATURE_IBRS);
986 		set_cpu_cap(c, X86_FEATURE_IBPB);
987 		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
988 	}
989 
990 	if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
991 		set_cpu_cap(c, X86_FEATURE_STIBP);
992 
993 	if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
994 	    cpu_has(c, X86_FEATURE_VIRT_SSBD))
995 		set_cpu_cap(c, X86_FEATURE_SSBD);
996 
997 	if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
998 		set_cpu_cap(c, X86_FEATURE_IBRS);
999 		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
1000 	}
1001 
1002 	if (cpu_has(c, X86_FEATURE_AMD_IBPB))
1003 		set_cpu_cap(c, X86_FEATURE_IBPB);
1004 
1005 	if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
1006 		set_cpu_cap(c, X86_FEATURE_STIBP);
1007 		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
1008 	}
1009 
1010 	if (cpu_has(c, X86_FEATURE_AMD_SSBD)) {
1011 		set_cpu_cap(c, X86_FEATURE_SSBD);
1012 		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
1013 		clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD);
1014 	}
1015 }
1016 
1017 void get_cpu_cap(struct cpuinfo_x86 *c)
1018 {
1019 	u32 eax, ebx, ecx, edx;
1020 
1021 	/* Intel-defined flags: level 0x00000001 */
1022 	if (c->cpuid_level >= 0x00000001) {
1023 		cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
1024 
1025 		c->x86_capability[CPUID_1_ECX] = ecx;
1026 		c->x86_capability[CPUID_1_EDX] = edx;
1027 	}
1028 
1029 	/* Thermal and Power Management Leaf: level 0x00000006 (eax) */
1030 	if (c->cpuid_level >= 0x00000006)
1031 		c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
1032 
1033 	/* Additional Intel-defined flags: level 0x00000007 */
1034 	if (c->cpuid_level >= 0x00000007) {
1035 		cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
1036 		c->x86_capability[CPUID_7_0_EBX] = ebx;
1037 		c->x86_capability[CPUID_7_ECX] = ecx;
1038 		c->x86_capability[CPUID_7_EDX] = edx;
1039 
1040 		/* Check valid sub-leaf index before accessing it */
1041 		if (eax >= 1) {
1042 			cpuid_count(0x00000007, 1, &eax, &ebx, &ecx, &edx);
1043 			c->x86_capability[CPUID_7_1_EAX] = eax;
1044 		}
1045 	}
1046 
1047 	/* Extended state features: level 0x0000000d */
1048 	if (c->cpuid_level >= 0x0000000d) {
1049 		cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx);
1050 
1051 		c->x86_capability[CPUID_D_1_EAX] = eax;
1052 	}
1053 
1054 	/* AMD-defined flags: level 0x80000001 */
1055 	eax = cpuid_eax(0x80000000);
1056 	c->extended_cpuid_level = eax;
1057 
1058 	if ((eax & 0xffff0000) == 0x80000000) {
1059 		if (eax >= 0x80000001) {
1060 			cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
1061 
1062 			c->x86_capability[CPUID_8000_0001_ECX] = ecx;
1063 			c->x86_capability[CPUID_8000_0001_EDX] = edx;
1064 		}
1065 	}
1066 
1067 	if (c->extended_cpuid_level >= 0x80000007) {
1068 		cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
1069 
1070 		c->x86_capability[CPUID_8000_0007_EBX] = ebx;
1071 		c->x86_power = edx;
1072 	}
1073 
1074 	if (c->extended_cpuid_level >= 0x80000008) {
1075 		cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
1076 		c->x86_capability[CPUID_8000_0008_EBX] = ebx;
1077 	}
1078 
1079 	if (c->extended_cpuid_level >= 0x8000000a)
1080 		c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
1081 
1082 	if (c->extended_cpuid_level >= 0x8000001f)
1083 		c->x86_capability[CPUID_8000_001F_EAX] = cpuid_eax(0x8000001f);
1084 
1085 	if (c->extended_cpuid_level >= 0x80000021)
1086 		c->x86_capability[CPUID_8000_0021_EAX] = cpuid_eax(0x80000021);
1087 
1088 	init_scattered_cpuid_features(c);
1089 	init_speculation_control(c);
1090 
1091 	/*
1092 	 * Clear/Set all flags overridden by options, after probe.
1093 	 * This needs to happen each time we re-probe, which may happen
1094 	 * several times during CPU initialization.
1095 	 */
1096 	apply_forced_caps(c);
1097 }
1098 
1099 void get_cpu_address_sizes(struct cpuinfo_x86 *c)
1100 {
1101 	u32 eax, ebx, ecx, edx;
1102 
1103 	if (c->extended_cpuid_level >= 0x80000008) {
1104 		cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
1105 
1106 		c->x86_virt_bits = (eax >> 8) & 0xff;
1107 		c->x86_phys_bits = eax & 0xff;
1108 	}
1109 #ifdef CONFIG_X86_32
1110 	else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
1111 		c->x86_phys_bits = 36;
1112 #endif
1113 	c->x86_cache_bits = c->x86_phys_bits;
1114 }
1115 
1116 static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
1117 {
1118 #ifdef CONFIG_X86_32
1119 	int i;
1120 
1121 	/*
1122 	 * First of all, decide if this is a 486 or higher
1123 	 * It's a 486 if we can modify the AC flag
1124 	 */
1125 	if (flag_is_changeable_p(X86_EFLAGS_AC))
1126 		c->x86 = 4;
1127 	else
1128 		c->x86 = 3;
1129 
1130 	for (i = 0; i < X86_VENDOR_NUM; i++)
1131 		if (cpu_devs[i] && cpu_devs[i]->c_identify) {
1132 			c->x86_vendor_id[0] = 0;
1133 			cpu_devs[i]->c_identify(c);
1134 			if (c->x86_vendor_id[0]) {
1135 				get_cpu_vendor(c);
1136 				break;
1137 			}
1138 		}
1139 #endif
1140 }
1141 
1142 #define NO_SPECULATION		BIT(0)
1143 #define NO_MELTDOWN		BIT(1)
1144 #define NO_SSB			BIT(2)
1145 #define NO_L1TF			BIT(3)
1146 #define NO_MDS			BIT(4)
1147 #define MSBDS_ONLY		BIT(5)
1148 #define NO_SWAPGS		BIT(6)
1149 #define NO_ITLB_MULTIHIT	BIT(7)
1150 #define NO_SPECTRE_V2		BIT(8)
1151 #define NO_MMIO			BIT(9)
1152 #define NO_EIBRS_PBRSB		BIT(10)
1153 
1154 #define VULNWL(vendor, family, model, whitelist)	\
1155 	X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist)
1156 
1157 #define VULNWL_INTEL(model, whitelist)		\
1158 	VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist)
1159 
1160 #define VULNWL_AMD(family, whitelist)		\
1161 	VULNWL(AMD, family, X86_MODEL_ANY, whitelist)
1162 
1163 #define VULNWL_HYGON(family, whitelist)		\
1164 	VULNWL(HYGON, family, X86_MODEL_ANY, whitelist)
1165 
1166 static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
1167 	VULNWL(ANY,	4, X86_MODEL_ANY,	NO_SPECULATION),
1168 	VULNWL(CENTAUR,	5, X86_MODEL_ANY,	NO_SPECULATION),
1169 	VULNWL(INTEL,	5, X86_MODEL_ANY,	NO_SPECULATION),
1170 	VULNWL(NSC,	5, X86_MODEL_ANY,	NO_SPECULATION),
1171 	VULNWL(VORTEX,	5, X86_MODEL_ANY,	NO_SPECULATION),
1172 	VULNWL(VORTEX,	6, X86_MODEL_ANY,	NO_SPECULATION),
1173 
1174 	/* Intel Family 6 */
1175 	VULNWL_INTEL(TIGERLAKE,			NO_MMIO),
1176 	VULNWL_INTEL(TIGERLAKE_L,		NO_MMIO),
1177 	VULNWL_INTEL(ALDERLAKE,			NO_MMIO),
1178 	VULNWL_INTEL(ALDERLAKE_L,		NO_MMIO),
1179 
1180 	VULNWL_INTEL(ATOM_SALTWELL,		NO_SPECULATION | NO_ITLB_MULTIHIT),
1181 	VULNWL_INTEL(ATOM_SALTWELL_TABLET,	NO_SPECULATION | NO_ITLB_MULTIHIT),
1182 	VULNWL_INTEL(ATOM_SALTWELL_MID,		NO_SPECULATION | NO_ITLB_MULTIHIT),
1183 	VULNWL_INTEL(ATOM_BONNELL,		NO_SPECULATION | NO_ITLB_MULTIHIT),
1184 	VULNWL_INTEL(ATOM_BONNELL_MID,		NO_SPECULATION | NO_ITLB_MULTIHIT),
1185 
1186 	VULNWL_INTEL(ATOM_SILVERMONT,		NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1187 	VULNWL_INTEL(ATOM_SILVERMONT_D,		NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1188 	VULNWL_INTEL(ATOM_SILVERMONT_MID,	NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1189 	VULNWL_INTEL(ATOM_AIRMONT,		NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1190 	VULNWL_INTEL(XEON_PHI_KNL,		NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1191 	VULNWL_INTEL(XEON_PHI_KNM,		NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1192 
1193 	VULNWL_INTEL(CORE_YONAH,		NO_SSB),
1194 
1195 	VULNWL_INTEL(ATOM_AIRMONT_MID,		NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1196 	VULNWL_INTEL(ATOM_AIRMONT_NP,		NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
1197 
1198 	VULNWL_INTEL(ATOM_GOLDMONT,		NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
1199 	VULNWL_INTEL(ATOM_GOLDMONT_D,		NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
1200 	VULNWL_INTEL(ATOM_GOLDMONT_PLUS,	NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
1201 
1202 	/*
1203 	 * Technically, swapgs isn't serializing on AMD (despite it previously
1204 	 * being documented as such in the APM).  But according to AMD, %gs is
1205 	 * updated non-speculatively, and the issuing of %gs-relative memory
1206 	 * operands will be blocked until the %gs update completes, which is
1207 	 * good enough for our purposes.
1208 	 */
1209 
1210 	VULNWL_INTEL(ATOM_TREMONT,		NO_EIBRS_PBRSB),
1211 	VULNWL_INTEL(ATOM_TREMONT_L,		NO_EIBRS_PBRSB),
1212 	VULNWL_INTEL(ATOM_TREMONT_D,		NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
1213 
1214 	/* AMD Family 0xf - 0x12 */
1215 	VULNWL_AMD(0x0f,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
1216 	VULNWL_AMD(0x10,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
1217 	VULNWL_AMD(0x11,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
1218 	VULNWL_AMD(0x12,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
1219 
1220 	/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
1221 	VULNWL_AMD(X86_FAMILY_ANY,	NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
1222 	VULNWL_HYGON(X86_FAMILY_ANY,	NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
1223 
1224 	/* Zhaoxin Family 7 */
1225 	VULNWL(CENTAUR,	7, X86_MODEL_ANY,	NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
1226 	VULNWL(ZHAOXIN,	7, X86_MODEL_ANY,	NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
1227 	{}
1228 };
1229 
1230 #define VULNBL(vendor, family, model, blacklist)	\
1231 	X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, blacklist)
1232 
1233 #define VULNBL_INTEL_STEPPINGS(model, steppings, issues)		   \
1234 	X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6,		   \
1235 					    INTEL_FAM6_##model, steppings, \
1236 					    X86_FEATURE_ANY, issues)
1237 
1238 #define VULNBL_AMD(family, blacklist)		\
1239 	VULNBL(AMD, family, X86_MODEL_ANY, blacklist)
1240 
1241 #define VULNBL_HYGON(family, blacklist)		\
1242 	VULNBL(HYGON, family, X86_MODEL_ANY, blacklist)
1243 
1244 #define SRBDS		BIT(0)
1245 /* CPU is affected by X86_BUG_MMIO_STALE_DATA */
1246 #define MMIO		BIT(1)
1247 /* CPU is affected by Shared Buffers Data Sampling (SBDS), a variant of X86_BUG_MMIO_STALE_DATA */
1248 #define MMIO_SBDS	BIT(2)
1249 /* CPU is affected by RETbleed, speculating where you would not expect it */
1250 #define RETBLEED	BIT(3)
1251 /* CPU is affected by SMT (cross-thread) return predictions */
1252 #define SMT_RSB		BIT(4)
1253 
1254 static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
1255 	VULNBL_INTEL_STEPPINGS(IVYBRIDGE,	X86_STEPPING_ANY,		SRBDS),
1256 	VULNBL_INTEL_STEPPINGS(HASWELL,		X86_STEPPING_ANY,		SRBDS),
1257 	VULNBL_INTEL_STEPPINGS(HASWELL_L,	X86_STEPPING_ANY,		SRBDS),
1258 	VULNBL_INTEL_STEPPINGS(HASWELL_G,	X86_STEPPING_ANY,		SRBDS),
1259 	VULNBL_INTEL_STEPPINGS(HASWELL_X,	X86_STEPPING_ANY,		MMIO),
1260 	VULNBL_INTEL_STEPPINGS(BROADWELL_D,	X86_STEPPING_ANY,		MMIO),
1261 	VULNBL_INTEL_STEPPINGS(BROADWELL_G,	X86_STEPPING_ANY,		SRBDS),
1262 	VULNBL_INTEL_STEPPINGS(BROADWELL_X,	X86_STEPPING_ANY,		MMIO),
1263 	VULNBL_INTEL_STEPPINGS(BROADWELL,	X86_STEPPING_ANY,		SRBDS),
1264 	VULNBL_INTEL_STEPPINGS(SKYLAKE_L,	X86_STEPPING_ANY,		SRBDS | MMIO | RETBLEED),
1265 	VULNBL_INTEL_STEPPINGS(SKYLAKE_X,	X86_STEPPING_ANY,		MMIO | RETBLEED),
1266 	VULNBL_INTEL_STEPPINGS(SKYLAKE,		X86_STEPPING_ANY,		SRBDS | MMIO | RETBLEED),
1267 	VULNBL_INTEL_STEPPINGS(KABYLAKE_L,	X86_STEPPING_ANY,		SRBDS | MMIO | RETBLEED),
1268 	VULNBL_INTEL_STEPPINGS(KABYLAKE,	X86_STEPPING_ANY,		SRBDS | MMIO | RETBLEED),
1269 	VULNBL_INTEL_STEPPINGS(CANNONLAKE_L,	X86_STEPPING_ANY,		RETBLEED),
1270 	VULNBL_INTEL_STEPPINGS(ICELAKE_L,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED),
1271 	VULNBL_INTEL_STEPPINGS(ICELAKE_D,	X86_STEPPING_ANY,		MMIO),
1272 	VULNBL_INTEL_STEPPINGS(ICELAKE_X,	X86_STEPPING_ANY,		MMIO),
1273 	VULNBL_INTEL_STEPPINGS(COMETLAKE,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED),
1274 	VULNBL_INTEL_STEPPINGS(COMETLAKE_L,	X86_STEPPINGS(0x0, 0x0),	MMIO | RETBLEED),
1275 	VULNBL_INTEL_STEPPINGS(COMETLAKE_L,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED),
1276 	VULNBL_INTEL_STEPPINGS(LAKEFIELD,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED),
1277 	VULNBL_INTEL_STEPPINGS(ROCKETLAKE,	X86_STEPPING_ANY,		MMIO | RETBLEED),
1278 	VULNBL_INTEL_STEPPINGS(ATOM_TREMONT,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS),
1279 	VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D,	X86_STEPPING_ANY,		MMIO),
1280 	VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS),
1281 
1282 	VULNBL_AMD(0x15, RETBLEED),
1283 	VULNBL_AMD(0x16, RETBLEED),
1284 	VULNBL_AMD(0x17, RETBLEED | SMT_RSB),
1285 	VULNBL_HYGON(0x18, RETBLEED | SMT_RSB),
1286 	{}
1287 };
1288 
1289 static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which)
1290 {
1291 	const struct x86_cpu_id *m = x86_match_cpu(table);
1292 
1293 	return m && !!(m->driver_data & which);
1294 }
1295 
1296 u64 x86_read_arch_cap_msr(void)
1297 {
1298 	u64 ia32_cap = 0;
1299 
1300 	if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
1301 		rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
1302 
1303 	return ia32_cap;
1304 }
1305 
1306 static bool arch_cap_mmio_immune(u64 ia32_cap)
1307 {
1308 	return (ia32_cap & ARCH_CAP_FBSDP_NO &&
1309 		ia32_cap & ARCH_CAP_PSDP_NO &&
1310 		ia32_cap & ARCH_CAP_SBDR_SSDP_NO);
1311 }
1312 
1313 static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
1314 {
1315 	u64 ia32_cap = x86_read_arch_cap_msr();
1316 
1317 	/* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
1318 	if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
1319 	    !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
1320 		setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
1321 
1322 	if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION))
1323 		return;
1324 
1325 	setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
1326 
1327 	if (!cpu_matches(cpu_vuln_whitelist, NO_SPECTRE_V2))
1328 		setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
1329 
1330 	if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
1331 	    !(ia32_cap & ARCH_CAP_SSB_NO) &&
1332 	   !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
1333 		setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
1334 
1335 	/*
1336 	 * AMD's AutoIBRS is equivalent to Intel's eIBRS - use the Intel feature
1337 	 * flag and protect from vendor-specific bugs via the whitelist.
1338 	 */
1339 	if ((ia32_cap & ARCH_CAP_IBRS_ALL) || cpu_has(c, X86_FEATURE_AUTOIBRS)) {
1340 		setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
1341 		if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
1342 		    !(ia32_cap & ARCH_CAP_PBRSB_NO))
1343 			setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
1344 	}
1345 
1346 	if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
1347 	    !(ia32_cap & ARCH_CAP_MDS_NO)) {
1348 		setup_force_cpu_bug(X86_BUG_MDS);
1349 		if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY))
1350 			setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
1351 	}
1352 
1353 	if (!cpu_matches(cpu_vuln_whitelist, NO_SWAPGS))
1354 		setup_force_cpu_bug(X86_BUG_SWAPGS);
1355 
1356 	/*
1357 	 * When the CPU is not mitigated for TAA (TAA_NO=0) set TAA bug when:
1358 	 *	- TSX is supported or
1359 	 *	- TSX_CTRL is present
1360 	 *
1361 	 * TSX_CTRL check is needed for cases when TSX could be disabled before
1362 	 * the kernel boot e.g. kexec.
1363 	 * TSX_CTRL check alone is not sufficient for cases when the microcode
1364 	 * update is not present or running as guest that don't get TSX_CTRL.
1365 	 */
1366 	if (!(ia32_cap & ARCH_CAP_TAA_NO) &&
1367 	    (cpu_has(c, X86_FEATURE_RTM) ||
1368 	     (ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
1369 		setup_force_cpu_bug(X86_BUG_TAA);
1370 
1371 	/*
1372 	 * SRBDS affects CPUs which support RDRAND or RDSEED and are listed
1373 	 * in the vulnerability blacklist.
1374 	 *
1375 	 * Some of the implications and mitigation of Shared Buffers Data
1376 	 * Sampling (SBDS) are similar to SRBDS. Give SBDS same treatment as
1377 	 * SRBDS.
1378 	 */
1379 	if ((cpu_has(c, X86_FEATURE_RDRAND) ||
1380 	     cpu_has(c, X86_FEATURE_RDSEED)) &&
1381 	    cpu_matches(cpu_vuln_blacklist, SRBDS | MMIO_SBDS))
1382 		    setup_force_cpu_bug(X86_BUG_SRBDS);
1383 
1384 	/*
1385 	 * Processor MMIO Stale Data bug enumeration
1386 	 *
1387 	 * Affected CPU list is generally enough to enumerate the vulnerability,
1388 	 * but for virtualization case check for ARCH_CAP MSR bits also, VMM may
1389 	 * not want the guest to enumerate the bug.
1390 	 *
1391 	 * Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
1392 	 * nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
1393 	 */
1394 	if (!arch_cap_mmio_immune(ia32_cap)) {
1395 		if (cpu_matches(cpu_vuln_blacklist, MMIO))
1396 			setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
1397 		else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO))
1398 			setup_force_cpu_bug(X86_BUG_MMIO_UNKNOWN);
1399 	}
1400 
1401 	if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
1402 		if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))
1403 			setup_force_cpu_bug(X86_BUG_RETBLEED);
1404 	}
1405 
1406 	if (cpu_matches(cpu_vuln_blacklist, SMT_RSB))
1407 		setup_force_cpu_bug(X86_BUG_SMT_RSB);
1408 
1409 	if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
1410 		return;
1411 
1412 	/* Rogue Data Cache Load? No! */
1413 	if (ia32_cap & ARCH_CAP_RDCL_NO)
1414 		return;
1415 
1416 	setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
1417 
1418 	if (cpu_matches(cpu_vuln_whitelist, NO_L1TF))
1419 		return;
1420 
1421 	setup_force_cpu_bug(X86_BUG_L1TF);
1422 }
1423 
1424 /*
1425  * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
1426  * unfortunately, that's not true in practice because of early VIA
1427  * chips and (more importantly) broken virtualizers that are not easy
1428  * to detect. In the latter case it doesn't even *fail* reliably, so
1429  * probing for it doesn't even work. Disable it completely on 32-bit
1430  * unless we can find a reliable way to detect all the broken cases.
1431  * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
1432  */
1433 static void detect_nopl(void)
1434 {
1435 #ifdef CONFIG_X86_32
1436 	setup_clear_cpu_cap(X86_FEATURE_NOPL);
1437 #else
1438 	setup_force_cpu_cap(X86_FEATURE_NOPL);
1439 #endif
1440 }
1441 
1442 /*
1443  * We parse cpu parameters early because fpu__init_system() is executed
1444  * before parse_early_param().
1445  */
1446 static void __init cpu_parse_early_param(void)
1447 {
1448 	char arg[128];
1449 	char *argptr = arg, *opt;
1450 	int arglen, taint = 0;
1451 
1452 #ifdef CONFIG_X86_32
1453 	if (cmdline_find_option_bool(boot_command_line, "no387"))
1454 #ifdef CONFIG_MATH_EMULATION
1455 		setup_clear_cpu_cap(X86_FEATURE_FPU);
1456 #else
1457 		pr_err("Option 'no387' required CONFIG_MATH_EMULATION enabled.\n");
1458 #endif
1459 
1460 	if (cmdline_find_option_bool(boot_command_line, "nofxsr"))
1461 		setup_clear_cpu_cap(X86_FEATURE_FXSR);
1462 #endif
1463 
1464 	if (cmdline_find_option_bool(boot_command_line, "noxsave"))
1465 		setup_clear_cpu_cap(X86_FEATURE_XSAVE);
1466 
1467 	if (cmdline_find_option_bool(boot_command_line, "noxsaveopt"))
1468 		setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
1469 
1470 	if (cmdline_find_option_bool(boot_command_line, "noxsaves"))
1471 		setup_clear_cpu_cap(X86_FEATURE_XSAVES);
1472 
1473 	arglen = cmdline_find_option(boot_command_line, "clearcpuid", arg, sizeof(arg));
1474 	if (arglen <= 0)
1475 		return;
1476 
1477 	pr_info("Clearing CPUID bits:");
1478 
1479 	while (argptr) {
1480 		bool found __maybe_unused = false;
1481 		unsigned int bit;
1482 
1483 		opt = strsep(&argptr, ",");
1484 
1485 		/*
1486 		 * Handle naked numbers first for feature flags which don't
1487 		 * have names.
1488 		 */
1489 		if (!kstrtouint(opt, 10, &bit)) {
1490 			if (bit < NCAPINTS * 32) {
1491 
1492 				/* empty-string, i.e., ""-defined feature flags */
1493 				if (!x86_cap_flags[bit])
1494 					pr_cont(" " X86_CAP_FMT_NUM, x86_cap_flag_num(bit));
1495 				else
1496 					pr_cont(" " X86_CAP_FMT, x86_cap_flag(bit));
1497 
1498 				setup_clear_cpu_cap(bit);
1499 				taint++;
1500 			}
1501 			/*
1502 			 * The assumption is that there are no feature names with only
1503 			 * numbers in the name thus go to the next argument.
1504 			 */
1505 			continue;
1506 		}
1507 
1508 		for (bit = 0; bit < 32 * NCAPINTS; bit++) {
1509 			if (!x86_cap_flag(bit))
1510 				continue;
1511 
1512 			if (strcmp(x86_cap_flag(bit), opt))
1513 				continue;
1514 
1515 			pr_cont(" %s", opt);
1516 			setup_clear_cpu_cap(bit);
1517 			taint++;
1518 			found = true;
1519 			break;
1520 		}
1521 
1522 		if (!found)
1523 			pr_cont(" (unknown: %s)", opt);
1524 	}
1525 	pr_cont("\n");
1526 
1527 	if (taint)
1528 		add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
1529 }
1530 
1531 /*
1532  * Do minimum CPU detection early.
1533  * Fields really needed: vendor, cpuid_level, family, model, mask,
1534  * cache alignment.
1535  * The others are not touched to avoid unwanted side effects.
1536  *
1537  * WARNING: this function is only called on the boot CPU.  Don't add code
1538  * here that is supposed to run on all CPUs.
1539  */
1540 static void __init early_identify_cpu(struct cpuinfo_x86 *c)
1541 {
1542 #ifdef CONFIG_X86_64
1543 	c->x86_clflush_size = 64;
1544 	c->x86_phys_bits = 36;
1545 	c->x86_virt_bits = 48;
1546 #else
1547 	c->x86_clflush_size = 32;
1548 	c->x86_phys_bits = 32;
1549 	c->x86_virt_bits = 32;
1550 #endif
1551 	c->x86_cache_alignment = c->x86_clflush_size;
1552 
1553 	memset(&c->x86_capability, 0, sizeof(c->x86_capability));
1554 	c->extended_cpuid_level = 0;
1555 
1556 	if (!have_cpuid_p())
1557 		identify_cpu_without_cpuid(c);
1558 
1559 	/* cyrix could have cpuid enabled via c_identify()*/
1560 	if (have_cpuid_p()) {
1561 		cpu_detect(c);
1562 		get_cpu_vendor(c);
1563 		get_cpu_cap(c);
1564 		get_cpu_address_sizes(c);
1565 		setup_force_cpu_cap(X86_FEATURE_CPUID);
1566 		cpu_parse_early_param();
1567 
1568 		if (this_cpu->c_early_init)
1569 			this_cpu->c_early_init(c);
1570 
1571 		c->cpu_index = 0;
1572 		filter_cpuid_features(c, false);
1573 
1574 		if (this_cpu->c_bsp_init)
1575 			this_cpu->c_bsp_init(c);
1576 	} else {
1577 		setup_clear_cpu_cap(X86_FEATURE_CPUID);
1578 	}
1579 
1580 	setup_force_cpu_cap(X86_FEATURE_ALWAYS);
1581 
1582 	cpu_set_bug_bits(c);
1583 
1584 	sld_setup(c);
1585 
1586 #ifdef CONFIG_X86_32
1587 	/*
1588 	 * Regardless of whether PCID is enumerated, the SDM says
1589 	 * that it can't be enabled in 32-bit mode.
1590 	 */
1591 	setup_clear_cpu_cap(X86_FEATURE_PCID);
1592 #endif
1593 
1594 	/*
1595 	 * Later in the boot process pgtable_l5_enabled() relies on
1596 	 * cpu_feature_enabled(X86_FEATURE_LA57). If 5-level paging is not
1597 	 * enabled by this point we need to clear the feature bit to avoid
1598 	 * false-positives at the later stage.
1599 	 *
1600 	 * pgtable_l5_enabled() can be false here for several reasons:
1601 	 *  - 5-level paging is disabled compile-time;
1602 	 *  - it's 32-bit kernel;
1603 	 *  - machine doesn't support 5-level paging;
1604 	 *  - user specified 'no5lvl' in kernel command line.
1605 	 */
1606 	if (!pgtable_l5_enabled())
1607 		setup_clear_cpu_cap(X86_FEATURE_LA57);
1608 
1609 	detect_nopl();
1610 }
1611 
1612 void __init early_cpu_init(void)
1613 {
1614 	const struct cpu_dev *const *cdev;
1615 	int count = 0;
1616 
1617 #ifdef CONFIG_PROCESSOR_SELECT
1618 	pr_info("KERNEL supported cpus:\n");
1619 #endif
1620 
1621 	for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
1622 		const struct cpu_dev *cpudev = *cdev;
1623 
1624 		if (count >= X86_VENDOR_NUM)
1625 			break;
1626 		cpu_devs[count] = cpudev;
1627 		count++;
1628 
1629 #ifdef CONFIG_PROCESSOR_SELECT
1630 		{
1631 			unsigned int j;
1632 
1633 			for (j = 0; j < 2; j++) {
1634 				if (!cpudev->c_ident[j])
1635 					continue;
1636 				pr_info("  %s %s\n", cpudev->c_vendor,
1637 					cpudev->c_ident[j]);
1638 			}
1639 		}
1640 #endif
1641 	}
1642 	early_identify_cpu(&boot_cpu_data);
1643 }
1644 
1645 static bool detect_null_seg_behavior(void)
1646 {
1647 	/*
1648 	 * Empirically, writing zero to a segment selector on AMD does
1649 	 * not clear the base, whereas writing zero to a segment
1650 	 * selector on Intel does clear the base.  Intel's behavior
1651 	 * allows slightly faster context switches in the common case
1652 	 * where GS is unused by the prev and next threads.
1653 	 *
1654 	 * Since neither vendor documents this anywhere that I can see,
1655 	 * detect it directly instead of hard-coding the choice by
1656 	 * vendor.
1657 	 *
1658 	 * I've designated AMD's behavior as the "bug" because it's
1659 	 * counterintuitive and less friendly.
1660 	 */
1661 
1662 	unsigned long old_base, tmp;
1663 	rdmsrl(MSR_FS_BASE, old_base);
1664 	wrmsrl(MSR_FS_BASE, 1);
1665 	loadsegment(fs, 0);
1666 	rdmsrl(MSR_FS_BASE, tmp);
1667 	wrmsrl(MSR_FS_BASE, old_base);
1668 	return tmp == 0;
1669 }
1670 
1671 void check_null_seg_clears_base(struct cpuinfo_x86 *c)
1672 {
1673 	/* BUG_NULL_SEG is only relevant with 64bit userspace */
1674 	if (!IS_ENABLED(CONFIG_X86_64))
1675 		return;
1676 
1677 	if (cpu_has(c, X86_FEATURE_NULL_SEL_CLR_BASE))
1678 		return;
1679 
1680 	/*
1681 	 * CPUID bit above wasn't set. If this kernel is still running
1682 	 * as a HV guest, then the HV has decided not to advertize
1683 	 * that CPUID bit for whatever reason.	For example, one
1684 	 * member of the migration pool might be vulnerable.  Which
1685 	 * means, the bug is present: set the BUG flag and return.
1686 	 */
1687 	if (cpu_has(c, X86_FEATURE_HYPERVISOR)) {
1688 		set_cpu_bug(c, X86_BUG_NULL_SEG);
1689 		return;
1690 	}
1691 
1692 	/*
1693 	 * Zen2 CPUs also have this behaviour, but no CPUID bit.
1694 	 * 0x18 is the respective family for Hygon.
1695 	 */
1696 	if ((c->x86 == 0x17 || c->x86 == 0x18) &&
1697 	    detect_null_seg_behavior())
1698 		return;
1699 
1700 	/* All the remaining ones are affected */
1701 	set_cpu_bug(c, X86_BUG_NULL_SEG);
1702 }
1703 
1704 static void generic_identify(struct cpuinfo_x86 *c)
1705 {
1706 	c->extended_cpuid_level = 0;
1707 
1708 	if (!have_cpuid_p())
1709 		identify_cpu_without_cpuid(c);
1710 
1711 	/* cyrix could have cpuid enabled via c_identify()*/
1712 	if (!have_cpuid_p())
1713 		return;
1714 
1715 	cpu_detect(c);
1716 
1717 	get_cpu_vendor(c);
1718 
1719 	get_cpu_cap(c);
1720 
1721 	get_cpu_address_sizes(c);
1722 
1723 	if (c->cpuid_level >= 0x00000001) {
1724 		c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
1725 #ifdef CONFIG_X86_32
1726 # ifdef CONFIG_SMP
1727 		c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
1728 # else
1729 		c->apicid = c->initial_apicid;
1730 # endif
1731 #endif
1732 		c->phys_proc_id = c->initial_apicid;
1733 	}
1734 
1735 	get_model_name(c); /* Default name */
1736 
1737 	/*
1738 	 * ESPFIX is a strange bug.  All real CPUs have it.  Paravirt
1739 	 * systems that run Linux at CPL > 0 may or may not have the
1740 	 * issue, but, even if they have the issue, there's absolutely
1741 	 * nothing we can do about it because we can't use the real IRET
1742 	 * instruction.
1743 	 *
1744 	 * NB: For the time being, only 32-bit kernels support
1745 	 * X86_BUG_ESPFIX as such.  64-bit kernels directly choose
1746 	 * whether to apply espfix using paravirt hooks.  If any
1747 	 * non-paravirt system ever shows up that does *not* have the
1748 	 * ESPFIX issue, we can change this.
1749 	 */
1750 #ifdef CONFIG_X86_32
1751 	set_cpu_bug(c, X86_BUG_ESPFIX);
1752 #endif
1753 }
1754 
1755 /*
1756  * Validate that ACPI/mptables have the same information about the
1757  * effective APIC id and update the package map.
1758  */
1759 static void validate_apic_and_package_id(struct cpuinfo_x86 *c)
1760 {
1761 #ifdef CONFIG_SMP
1762 	unsigned int apicid, cpu = smp_processor_id();
1763 
1764 	apicid = apic->cpu_present_to_apicid(cpu);
1765 
1766 	if (apicid != c->apicid) {
1767 		pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x APIC: %x\n",
1768 		       cpu, apicid, c->initial_apicid);
1769 	}
1770 	BUG_ON(topology_update_package_map(c->phys_proc_id, cpu));
1771 	BUG_ON(topology_update_die_map(c->cpu_die_id, cpu));
1772 #else
1773 	c->logical_proc_id = 0;
1774 #endif
1775 }
1776 
1777 /*
1778  * This does the hard work of actually picking apart the CPU stuff...
1779  */
1780 static void identify_cpu(struct cpuinfo_x86 *c)
1781 {
1782 	int i;
1783 
1784 	c->loops_per_jiffy = loops_per_jiffy;
1785 	c->x86_cache_size = 0;
1786 	c->x86_vendor = X86_VENDOR_UNKNOWN;
1787 	c->x86_model = c->x86_stepping = 0;	/* So far unknown... */
1788 	c->x86_vendor_id[0] = '\0'; /* Unset */
1789 	c->x86_model_id[0] = '\0';  /* Unset */
1790 	c->x86_max_cores = 1;
1791 	c->x86_coreid_bits = 0;
1792 	c->cu_id = 0xff;
1793 #ifdef CONFIG_X86_64
1794 	c->x86_clflush_size = 64;
1795 	c->x86_phys_bits = 36;
1796 	c->x86_virt_bits = 48;
1797 #else
1798 	c->cpuid_level = -1;	/* CPUID not detected */
1799 	c->x86_clflush_size = 32;
1800 	c->x86_phys_bits = 32;
1801 	c->x86_virt_bits = 32;
1802 #endif
1803 	c->x86_cache_alignment = c->x86_clflush_size;
1804 	memset(&c->x86_capability, 0, sizeof(c->x86_capability));
1805 #ifdef CONFIG_X86_VMX_FEATURE_NAMES
1806 	memset(&c->vmx_capability, 0, sizeof(c->vmx_capability));
1807 #endif
1808 
1809 	generic_identify(c);
1810 
1811 	if (this_cpu->c_identify)
1812 		this_cpu->c_identify(c);
1813 
1814 	/* Clear/Set all flags overridden by options, after probe */
1815 	apply_forced_caps(c);
1816 
1817 #ifdef CONFIG_X86_64
1818 	c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
1819 #endif
1820 
1821 	/*
1822 	 * Vendor-specific initialization.  In this section we
1823 	 * canonicalize the feature flags, meaning if there are
1824 	 * features a certain CPU supports which CPUID doesn't
1825 	 * tell us, CPUID claiming incorrect flags, or other bugs,
1826 	 * we handle them here.
1827 	 *
1828 	 * At the end of this section, c->x86_capability better
1829 	 * indicate the features this CPU genuinely supports!
1830 	 */
1831 	if (this_cpu->c_init)
1832 		this_cpu->c_init(c);
1833 
1834 	/* Disable the PN if appropriate */
1835 	squash_the_stupid_serial_number(c);
1836 
1837 	/* Set up SMEP/SMAP/UMIP */
1838 	setup_smep(c);
1839 	setup_smap(c);
1840 	setup_umip(c);
1841 
1842 	/* Enable FSGSBASE instructions if available. */
1843 	if (cpu_has(c, X86_FEATURE_FSGSBASE)) {
1844 		cr4_set_bits(X86_CR4_FSGSBASE);
1845 		elf_hwcap2 |= HWCAP2_FSGSBASE;
1846 	}
1847 
1848 	/*
1849 	 * The vendor-specific functions might have changed features.
1850 	 * Now we do "generic changes."
1851 	 */
1852 
1853 	/* Filter out anything that depends on CPUID levels we don't have */
1854 	filter_cpuid_features(c, true);
1855 
1856 	/* If the model name is still unset, do table lookup. */
1857 	if (!c->x86_model_id[0]) {
1858 		const char *p;
1859 		p = table_lookup_model(c);
1860 		if (p)
1861 			strcpy(c->x86_model_id, p);
1862 		else
1863 			/* Last resort... */
1864 			sprintf(c->x86_model_id, "%02x/%02x",
1865 				c->x86, c->x86_model);
1866 	}
1867 
1868 #ifdef CONFIG_X86_64
1869 	detect_ht(c);
1870 #endif
1871 
1872 	x86_init_rdrand(c);
1873 	setup_pku(c);
1874 	setup_cet(c);
1875 
1876 	/*
1877 	 * Clear/Set all flags overridden by options, need do it
1878 	 * before following smp all cpus cap AND.
1879 	 */
1880 	apply_forced_caps(c);
1881 
1882 	/*
1883 	 * On SMP, boot_cpu_data holds the common feature set between
1884 	 * all CPUs; so make sure that we indicate which features are
1885 	 * common between the CPUs.  The first time this routine gets
1886 	 * executed, c == &boot_cpu_data.
1887 	 */
1888 	if (c != &boot_cpu_data) {
1889 		/* AND the already accumulated flags with these */
1890 		for (i = 0; i < NCAPINTS; i++)
1891 			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1892 
1893 		/* OR, i.e. replicate the bug flags */
1894 		for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++)
1895 			c->x86_capability[i] |= boot_cpu_data.x86_capability[i];
1896 	}
1897 
1898 	ppin_init(c);
1899 
1900 	/* Init Machine Check Exception if available. */
1901 	mcheck_cpu_init(c);
1902 
1903 	select_idle_routine(c);
1904 
1905 #ifdef CONFIG_NUMA
1906 	numa_add_cpu(smp_processor_id());
1907 #endif
1908 }
1909 
1910 /*
1911  * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions
1912  * on 32-bit kernels:
1913  */
1914 #ifdef CONFIG_X86_32
1915 void enable_sep_cpu(void)
1916 {
1917 	struct tss_struct *tss;
1918 	int cpu;
1919 
1920 	if (!boot_cpu_has(X86_FEATURE_SEP))
1921 		return;
1922 
1923 	cpu = get_cpu();
1924 	tss = &per_cpu(cpu_tss_rw, cpu);
1925 
1926 	/*
1927 	 * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
1928 	 * see the big comment in struct x86_hw_tss's definition.
1929 	 */
1930 
1931 	tss->x86_tss.ss1 = __KERNEL_CS;
1932 	wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
1933 	wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0);
1934 	wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
1935 
1936 	put_cpu();
1937 }
1938 #endif
1939 
1940 void __init identify_boot_cpu(void)
1941 {
1942 	identify_cpu(&boot_cpu_data);
1943 	if (HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT))
1944 		pr_info("CET detected: Indirect Branch Tracking enabled\n");
1945 #ifdef CONFIG_X86_32
1946 	enable_sep_cpu();
1947 #endif
1948 	cpu_detect_tlb(&boot_cpu_data);
1949 	setup_cr_pinning();
1950 
1951 	tsx_init();
1952 	lkgs_init();
1953 }
1954 
1955 void identify_secondary_cpu(struct cpuinfo_x86 *c)
1956 {
1957 	BUG_ON(c == &boot_cpu_data);
1958 	identify_cpu(c);
1959 #ifdef CONFIG_X86_32
1960 	enable_sep_cpu();
1961 #endif
1962 	validate_apic_and_package_id(c);
1963 	x86_spec_ctrl_setup_ap();
1964 	update_srbds_msr();
1965 
1966 	tsx_ap_init();
1967 }
1968 
1969 void print_cpu_info(struct cpuinfo_x86 *c)
1970 {
1971 	const char *vendor = NULL;
1972 
1973 	if (c->x86_vendor < X86_VENDOR_NUM) {
1974 		vendor = this_cpu->c_vendor;
1975 	} else {
1976 		if (c->cpuid_level >= 0)
1977 			vendor = c->x86_vendor_id;
1978 	}
1979 
1980 	if (vendor && !strstr(c->x86_model_id, vendor))
1981 		pr_cont("%s ", vendor);
1982 
1983 	if (c->x86_model_id[0])
1984 		pr_cont("%s", c->x86_model_id);
1985 	else
1986 		pr_cont("%d86", c->x86);
1987 
1988 	pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
1989 
1990 	if (c->x86_stepping || c->cpuid_level >= 0)
1991 		pr_cont(", stepping: 0x%x)\n", c->x86_stepping);
1992 	else
1993 		pr_cont(")\n");
1994 }
1995 
1996 /*
1997  * clearcpuid= was already parsed in cpu_parse_early_param().  This dummy
1998  * function prevents it from becoming an environment variable for init.
1999  */
2000 static __init int setup_clearcpuid(char *arg)
2001 {
2002 	return 1;
2003 }
2004 __setup("clearcpuid=", setup_clearcpuid);
2005 
2006 DEFINE_PER_CPU_ALIGNED(struct pcpu_hot, pcpu_hot) = {
2007 	.current_task	= &init_task,
2008 	.preempt_count	= INIT_PREEMPT_COUNT,
2009 	.top_of_stack	= TOP_OF_INIT_STACK,
2010 };
2011 EXPORT_PER_CPU_SYMBOL(pcpu_hot);
2012 
2013 #ifdef CONFIG_X86_64
2014 DEFINE_PER_CPU_FIRST(struct fixed_percpu_data,
2015 		     fixed_percpu_data) __aligned(PAGE_SIZE) __visible;
2016 EXPORT_PER_CPU_SYMBOL_GPL(fixed_percpu_data);
2017 
2018 static void wrmsrl_cstar(unsigned long val)
2019 {
2020 	/*
2021 	 * Intel CPUs do not support 32-bit SYSCALL. Writing to MSR_CSTAR
2022 	 * is so far ignored by the CPU, but raises a #VE trap in a TDX
2023 	 * guest. Avoid the pointless write on all Intel CPUs.
2024 	 */
2025 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
2026 		wrmsrl(MSR_CSTAR, val);
2027 }
2028 
2029 /* May not be marked __init: used by software suspend */
2030 void syscall_init(void)
2031 {
2032 	wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
2033 	wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
2034 
2035 #ifdef CONFIG_IA32_EMULATION
2036 	wrmsrl_cstar((unsigned long)entry_SYSCALL_compat);
2037 	/*
2038 	 * This only works on Intel CPUs.
2039 	 * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
2040 	 * This does not cause SYSENTER to jump to the wrong location, because
2041 	 * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
2042 	 */
2043 	wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
2044 	wrmsrl_safe(MSR_IA32_SYSENTER_ESP,
2045 		    (unsigned long)(cpu_entry_stack(smp_processor_id()) + 1));
2046 	wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
2047 #else
2048 	wrmsrl_cstar((unsigned long)ignore_sysret);
2049 	wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
2050 	wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
2051 	wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
2052 #endif
2053 
2054 	/*
2055 	 * Flags to clear on syscall; clear as much as possible
2056 	 * to minimize user space-kernel interference.
2057 	 */
2058 	wrmsrl(MSR_SYSCALL_MASK,
2059 	       X86_EFLAGS_CF|X86_EFLAGS_PF|X86_EFLAGS_AF|
2060 	       X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_TF|
2061 	       X86_EFLAGS_IF|X86_EFLAGS_DF|X86_EFLAGS_OF|
2062 	       X86_EFLAGS_IOPL|X86_EFLAGS_NT|X86_EFLAGS_RF|
2063 	       X86_EFLAGS_AC|X86_EFLAGS_ID);
2064 }
2065 
2066 #else	/* CONFIG_X86_64 */
2067 
2068 #ifdef CONFIG_STACKPROTECTOR
2069 DEFINE_PER_CPU(unsigned long, __stack_chk_guard);
2070 EXPORT_PER_CPU_SYMBOL(__stack_chk_guard);
2071 #endif
2072 
2073 #endif	/* CONFIG_X86_64 */
2074 
2075 /*
2076  * Clear all 6 debug registers:
2077  */
2078 static void clear_all_debug_regs(void)
2079 {
2080 	int i;
2081 
2082 	for (i = 0; i < 8; i++) {
2083 		/* Ignore db4, db5 */
2084 		if ((i == 4) || (i == 5))
2085 			continue;
2086 
2087 		set_debugreg(0, i);
2088 	}
2089 }
2090 
2091 #ifdef CONFIG_KGDB
2092 /*
2093  * Restore debug regs if using kgdbwait and you have a kernel debugger
2094  * connection established.
2095  */
2096 static void dbg_restore_debug_regs(void)
2097 {
2098 	if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break))
2099 		arch_kgdb_ops.correct_hw_break();
2100 }
2101 #else /* ! CONFIG_KGDB */
2102 #define dbg_restore_debug_regs()
2103 #endif /* ! CONFIG_KGDB */
2104 
2105 static inline void setup_getcpu(int cpu)
2106 {
2107 	unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu));
2108 	struct desc_struct d = { };
2109 
2110 	if (boot_cpu_has(X86_FEATURE_RDTSCP) || boot_cpu_has(X86_FEATURE_RDPID))
2111 		wrmsr(MSR_TSC_AUX, cpudata, 0);
2112 
2113 	/* Store CPU and node number in limit. */
2114 	d.limit0 = cpudata;
2115 	d.limit1 = cpudata >> 16;
2116 
2117 	d.type = 5;		/* RO data, expand down, accessed */
2118 	d.dpl = 3;		/* Visible to user code */
2119 	d.s = 1;		/* Not a system segment */
2120 	d.p = 1;		/* Present */
2121 	d.d = 1;		/* 32-bit */
2122 
2123 	write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPUNODE, &d, DESCTYPE_S);
2124 }
2125 
2126 #ifdef CONFIG_X86_64
2127 static inline void ucode_cpu_init(int cpu) { }
2128 
2129 static inline void tss_setup_ist(struct tss_struct *tss)
2130 {
2131 	/* Set up the per-CPU TSS IST stacks */
2132 	tss->x86_tss.ist[IST_INDEX_DF] = __this_cpu_ist_top_va(DF);
2133 	tss->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI);
2134 	tss->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB);
2135 	tss->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE);
2136 	/* Only mapped when SEV-ES is active */
2137 	tss->x86_tss.ist[IST_INDEX_VC] = __this_cpu_ist_top_va(VC);
2138 }
2139 
2140 #else /* CONFIG_X86_64 */
2141 
2142 static inline void ucode_cpu_init(int cpu)
2143 {
2144 	show_ucode_info_early();
2145 }
2146 
2147 static inline void tss_setup_ist(struct tss_struct *tss) { }
2148 
2149 #endif /* !CONFIG_X86_64 */
2150 
2151 static inline void tss_setup_io_bitmap(struct tss_struct *tss)
2152 {
2153 	tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET_INVALID;
2154 
2155 #ifdef CONFIG_X86_IOPL_IOPERM
2156 	tss->io_bitmap.prev_max = 0;
2157 	tss->io_bitmap.prev_sequence = 0;
2158 	memset(tss->io_bitmap.bitmap, 0xff, sizeof(tss->io_bitmap.bitmap));
2159 	/*
2160 	 * Invalidate the extra array entry past the end of the all
2161 	 * permission bitmap as required by the hardware.
2162 	 */
2163 	tss->io_bitmap.mapall[IO_BITMAP_LONGS] = ~0UL;
2164 #endif
2165 }
2166 
2167 /*
2168  * Setup everything needed to handle exceptions from the IDT, including the IST
2169  * exceptions which use paranoid_entry().
2170  */
2171 void cpu_init_exception_handling(void)
2172 {
2173 	struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
2174 	int cpu = raw_smp_processor_id();
2175 
2176 	/* paranoid_entry() gets the CPU number from the GDT */
2177 	setup_getcpu(cpu);
2178 
2179 	/* IST vectors need TSS to be set up. */
2180 	tss_setup_ist(tss);
2181 	tss_setup_io_bitmap(tss);
2182 	set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
2183 
2184 	load_TR_desc();
2185 
2186 	/* GHCB needs to be setup to handle #VC. */
2187 	setup_ghcb();
2188 
2189 	/* Finally load the IDT */
2190 	load_current_idt();
2191 }
2192 
2193 /*
2194  * cpu_init() initializes state that is per-CPU. Some data is already
2195  * initialized (naturally) in the bootstrap process, such as the GDT.  We
2196  * reload it nevertheless, this function acts as a 'CPU state barrier',
2197  * nothing should get across.
2198  */
2199 void cpu_init(void)
2200 {
2201 	struct task_struct *cur = current;
2202 	int cpu = raw_smp_processor_id();
2203 
2204 	ucode_cpu_init(cpu);
2205 
2206 #ifdef CONFIG_NUMA
2207 	if (this_cpu_read(numa_node) == 0 &&
2208 	    early_cpu_to_node(cpu) != NUMA_NO_NODE)
2209 		set_numa_node(early_cpu_to_node(cpu));
2210 #endif
2211 	pr_debug("Initializing CPU#%d\n", cpu);
2212 
2213 	if (IS_ENABLED(CONFIG_X86_64) || cpu_feature_enabled(X86_FEATURE_VME) ||
2214 	    boot_cpu_has(X86_FEATURE_TSC) || boot_cpu_has(X86_FEATURE_DE))
2215 		cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
2216 
2217 	if (IS_ENABLED(CONFIG_X86_64)) {
2218 		loadsegment(fs, 0);
2219 		memset(cur->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
2220 		syscall_init();
2221 
2222 		wrmsrl(MSR_FS_BASE, 0);
2223 		wrmsrl(MSR_KERNEL_GS_BASE, 0);
2224 		barrier();
2225 
2226 		x2apic_setup();
2227 	}
2228 
2229 	mmgrab(&init_mm);
2230 	cur->active_mm = &init_mm;
2231 	BUG_ON(cur->mm);
2232 	initialize_tlbstate_and_flush();
2233 	enter_lazy_tlb(&init_mm, cur);
2234 
2235 	/*
2236 	 * sp0 points to the entry trampoline stack regardless of what task
2237 	 * is running.
2238 	 */
2239 	load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
2240 
2241 	load_mm_ldt(&init_mm);
2242 
2243 	clear_all_debug_regs();
2244 	dbg_restore_debug_regs();
2245 
2246 	doublefault_init_cpu_tss();
2247 
2248 	if (is_uv_system())
2249 		uv_cpu_init();
2250 
2251 	load_fixmap_gdt(cpu);
2252 }
2253 
2254 #ifdef CONFIG_MICROCODE_LATE_LOADING
2255 /**
2256  * store_cpu_caps() - Store a snapshot of CPU capabilities
2257  * @curr_info: Pointer where to store it
2258  *
2259  * Returns: None
2260  */
2261 void store_cpu_caps(struct cpuinfo_x86 *curr_info)
2262 {
2263 	/* Reload CPUID max function as it might've changed. */
2264 	curr_info->cpuid_level = cpuid_eax(0);
2265 
2266 	/* Copy all capability leafs and pick up the synthetic ones. */
2267 	memcpy(&curr_info->x86_capability, &boot_cpu_data.x86_capability,
2268 	       sizeof(curr_info->x86_capability));
2269 
2270 	/* Get the hardware CPUID leafs */
2271 	get_cpu_cap(curr_info);
2272 }
2273 
2274 /**
2275  * microcode_check() - Check if any CPU capabilities changed after an update.
2276  * @prev_info:	CPU capabilities stored before an update.
2277  *
2278  * The microcode loader calls this upon late microcode load to recheck features,
2279  * only when microcode has been updated. Caller holds microcode_mutex and CPU
2280  * hotplug lock.
2281  *
2282  * Return: None
2283  */
2284 void microcode_check(struct cpuinfo_x86 *prev_info)
2285 {
2286 	struct cpuinfo_x86 curr_info;
2287 
2288 	perf_check_microcode();
2289 
2290 	amd_check_microcode();
2291 
2292 	store_cpu_caps(&curr_info);
2293 
2294 	if (!memcmp(&prev_info->x86_capability, &curr_info.x86_capability,
2295 		    sizeof(prev_info->x86_capability)))
2296 		return;
2297 
2298 	pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n");
2299 	pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
2300 }
2301 #endif
2302 
2303 /*
2304  * Invoked from core CPU hotplug code after hotplug operations
2305  */
2306 void arch_smt_update(void)
2307 {
2308 	/* Handle the speculative execution misfeatures */
2309 	cpu_bugs_smt_update();
2310 	/* Check whether IPI broadcasting can be enabled */
2311 	apic_smt_update();
2312 }
2313 
2314 void __init arch_cpu_finalize_init(void)
2315 {
2316 	identify_boot_cpu();
2317 
2318 	/*
2319 	 * identify_boot_cpu() initialized SMT support information, let the
2320 	 * core code know.
2321 	 */
2322 	cpu_smt_check_topology();
2323 
2324 	if (!IS_ENABLED(CONFIG_SMP)) {
2325 		pr_info("CPU: ");
2326 		print_cpu_info(&boot_cpu_data);
2327 	}
2328 
2329 	cpu_select_mitigations();
2330 
2331 	arch_smt_update();
2332 
2333 	if (IS_ENABLED(CONFIG_X86_32)) {
2334 		/*
2335 		 * Check whether this is a real i386 which is not longer
2336 		 * supported and fixup the utsname.
2337 		 */
2338 		if (boot_cpu_data.x86 < 4)
2339 			panic("Kernel requires i486+ for 'invlpg' and other features");
2340 
2341 		init_utsname()->machine[1] =
2342 			'0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
2343 	}
2344 
2345 	/*
2346 	 * Must be before alternatives because it might set or clear
2347 	 * feature bits.
2348 	 */
2349 	fpu__init_system();
2350 	fpu__init_cpu();
2351 
2352 	alternative_instructions();
2353 
2354 	if (IS_ENABLED(CONFIG_X86_64)) {
2355 		/*
2356 		 * Make sure the first 2MB area is not mapped by huge pages
2357 		 * There are typically fixed size MTRRs in there and overlapping
2358 		 * MTRRs into large pages causes slow downs.
2359 		 *
2360 		 * Right now we don't do that with gbpages because there seems
2361 		 * very little benefit for that case.
2362 		 */
2363 		if (!direct_gbpages)
2364 			set_memory_4k((unsigned long)__va(0), 1);
2365 	} else {
2366 		fpu__init_check_bugs();
2367 	}
2368 
2369 	/*
2370 	 * This needs to be called before any devices perform DMA
2371 	 * operations that might use the SWIOTLB bounce buffers. It will
2372 	 * mark the bounce buffers as decrypted so that their usage will
2373 	 * not cause "plain-text" data to be decrypted when accessed. It
2374 	 * must be called after late_time_init() so that Hyper-V x86/x64
2375 	 * hypercalls work when the SWIOTLB bounce buffers are decrypted.
2376 	 */
2377 	mem_encrypt_init();
2378 }
2379