xref: /linux/arch/x86/kernel/cpu/common.c (revision acaa4b5c4c854b5009f4d4a5395b2609ad0f4937)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
22458e53fSKirill A. Shutemov /* cpu_feature_enabled() cannot be used this early */
32458e53fSKirill A. Shutemov #define USE_EARLY_PGTABLE_L5
42458e53fSKirill A. Shutemov 
557c8a661SMike Rapoport #include <linux/memblock.h>
69766cdbcSJaswinder Singh Rajput #include <linux/linkage.h>
7f0fc4affSYinghai Lu #include <linux/bitops.h>
89766cdbcSJaswinder Singh Rajput #include <linux/kernel.h>
9186f4360SPaul Gortmaker #include <linux/export.h>
10f7627e25SThomas Gleixner #include <linux/percpu.h>
119766cdbcSJaswinder Singh Rajput #include <linux/string.h>
12ee098e1aSBorislav Petkov #include <linux/ctype.h>
139766cdbcSJaswinder Singh Rajput #include <linux/delay.h>
1468e21be2SIngo Molnar #include <linux/sched/mm.h>
15e6017571SIngo Molnar #include <linux/sched/clock.h>
169164bb4aSIngo Molnar #include <linux/sched/task.h>
17b47a3698SBenjamin Thiel #include <linux/sched/smt.h>
189766cdbcSJaswinder Singh Rajput #include <linux/init.h>
190f46efebSMasami Hiramatsu #include <linux/kprobes.h>
209766cdbcSJaswinder Singh Rajput #include <linux/kgdb.h>
21439e1757SThomas Gleixner #include <linux/mem_encrypt.h>
229766cdbcSJaswinder Singh Rajput #include <linux/smp.h>
237c7077a7SThomas Gleixner #include <linux/cpu.h>
249766cdbcSJaswinder Singh Rajput #include <linux/io.h>
25b51ef52dSLaura Abbott #include <linux/syscore_ops.h>
2665fddcfcSMike Rapoport #include <linux/pgtable.h>
27b3883a9aSJason A. Donenfeld #include <linux/stackprotector.h>
287c7077a7SThomas Gleixner #include <linux/utsname.h>
299766cdbcSJaswinder Singh Rajput 
307c7077a7SThomas Gleixner #include <asm/alternative.h>
311ef5423aSMike Hommey #include <asm/cmdline.h>
32cdd6c482SIngo Molnar #include <asm/perf_event.h>
33f7627e25SThomas Gleixner #include <asm/mmu_context.h>
34dc4e0021SAndy Lutomirski #include <asm/doublefault.h>
3549d859d7SH. Peter Anvin #include <asm/archrandom.h>
369766cdbcSJaswinder Singh Rajput #include <asm/hypervisor.h>
379766cdbcSJaswinder Singh Rajput #include <asm/processor.h>
381e02ce4cSAndy Lutomirski #include <asm/tlbflush.h>
39f649e938SPaul Gortmaker #include <asm/debugreg.h>
409766cdbcSJaswinder Singh Rajput #include <asm/sections.h>
41f40c3300SAndy Lutomirski #include <asm/vsyscall.h>
428bdbd962SAlan Cox #include <linux/topology.h>
438bdbd962SAlan Cox #include <linux/cpumask.h>
4460063497SArun Sharma #include <linux/atomic.h>
459766cdbcSJaswinder Singh Rajput #include <asm/proto.h>
469766cdbcSJaswinder Singh Rajput #include <asm/setup.h>
47f7627e25SThomas Gleixner #include <asm/apic.h>
489766cdbcSJaswinder Singh Rajput #include <asm/desc.h>
49b56d2795SThomas Gleixner #include <asm/fpu/api.h>
509766cdbcSJaswinder Singh Rajput #include <asm/mtrr.h>
510274f955SGrzegorz Andrejczuk #include <asm/hwcap2.h>
528bdbd962SAlan Cox #include <linux/numa.h>
530cd39f46SPeter Zijlstra #include <asm/numa.h>
549766cdbcSJaswinder Singh Rajput #include <asm/asm.h>
550f6ff2bcSDave Hansen #include <asm/bugs.h>
569766cdbcSJaswinder Singh Rajput #include <asm/cpu.h>
579766cdbcSJaswinder Singh Rajput #include <asm/mce.h>
589766cdbcSJaswinder Singh Rajput #include <asm/msr.h>
590b9a6a8bSJuergen Gross #include <asm/cacheinfo.h>
60eb243d1dSIngo Molnar #include <asm/memtype.h>
61d288e1cfSFenghua Yu #include <asm/microcode.h>
62fec9434aSDavid Woodhouse #include <asm/intel-family.h>
63fec9434aSDavid Woodhouse #include <asm/cpu_device_id.h>
64bdbcdd48STejun Heo #include <asm/uv/uv.h>
6561382281SNikolay Borisov #include <asm/ia32.h>
667c7077a7SThomas Gleixner #include <asm/set_memory.h>
67991625f3SPeter Zijlstra #include <asm/traps.h>
6895d33bfaSBrijesh Singh #include <asm/sev.h>
69765a0542SKai Huang #include <asm/tdx.h>
70f7627e25SThomas Gleixner 
71f7627e25SThomas Gleixner #include "cpu.h"
72f7627e25SThomas Gleixner 
730274f955SGrzegorz Andrejczuk u32 elf_hwcap2 __read_mostly;
740274f955SGrzegorz Andrejczuk 
75f8b64d08SBorislav Petkov /* Number of siblings per CPU package */
76f8b64d08SBorislav Petkov int smp_num_siblings = 1;
77f8b64d08SBorislav Petkov EXPORT_SYMBOL(smp_num_siblings);
78f8b64d08SBorislav Petkov 
790dcab41dSTony Luck static struct ppin_info {
800dcab41dSTony Luck 	int	feature;
810dcab41dSTony Luck 	int	msr_ppin_ctl;
82822ccfadSTony Luck 	int	msr_ppin;
830dcab41dSTony Luck } ppin_info[] = {
840dcab41dSTony Luck 	[X86_VENDOR_INTEL] = {
850dcab41dSTony Luck 		.feature = X86_FEATURE_INTEL_PPIN,
860dcab41dSTony Luck 		.msr_ppin_ctl = MSR_PPIN_CTL,
87822ccfadSTony Luck 		.msr_ppin = MSR_PPIN
880dcab41dSTony Luck 	},
890dcab41dSTony Luck 	[X86_VENDOR_AMD] = {
900dcab41dSTony Luck 		.feature = X86_FEATURE_AMD_PPIN,
910dcab41dSTony Luck 		.msr_ppin_ctl = MSR_AMD_PPIN_CTL,
92822ccfadSTony Luck 		.msr_ppin = MSR_AMD_PPIN
930dcab41dSTony Luck 	},
940dcab41dSTony Luck };
950dcab41dSTony Luck 
960dcab41dSTony Luck static const struct x86_cpu_id ppin_cpuids[] = {
970dcab41dSTony Luck 	X86_MATCH_FEATURE(X86_FEATURE_AMD_PPIN, &ppin_info[X86_VENDOR_AMD]),
9800a2f23eSTony Luck 	X86_MATCH_FEATURE(X86_FEATURE_INTEL_PPIN, &ppin_info[X86_VENDOR_INTEL]),
990dcab41dSTony Luck 
1000dcab41dSTony Luck 	/* Legacy models without CPUID enumeration */
1010dcab41dSTony Luck 	X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &ppin_info[X86_VENDOR_INTEL]),
1020dcab41dSTony Luck 	X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &ppin_info[X86_VENDOR_INTEL]),
1030dcab41dSTony Luck 	X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &ppin_info[X86_VENDOR_INTEL]),
1040dcab41dSTony Luck 	X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &ppin_info[X86_VENDOR_INTEL]),
1050dcab41dSTony Luck 	X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &ppin_info[X86_VENDOR_INTEL]),
1060dcab41dSTony Luck 	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &ppin_info[X86_VENDOR_INTEL]),
1070dcab41dSTony Luck 	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &ppin_info[X86_VENDOR_INTEL]),
1080dcab41dSTony Luck 	X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &ppin_info[X86_VENDOR_INTEL]),
10936168bc0STony Luck 	X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &ppin_info[X86_VENDOR_INTEL]),
1100dcab41dSTony Luck 	X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &ppin_info[X86_VENDOR_INTEL]),
1110dcab41dSTony Luck 	X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &ppin_info[X86_VENDOR_INTEL]),
1120dcab41dSTony Luck 
1130dcab41dSTony Luck 	{}
1140dcab41dSTony Luck };
1150dcab41dSTony Luck 
1160dcab41dSTony Luck static void ppin_init(struct cpuinfo_x86 *c)
1170dcab41dSTony Luck {
1180dcab41dSTony Luck 	const struct x86_cpu_id *id;
1190dcab41dSTony Luck 	unsigned long long val;
1200dcab41dSTony Luck 	struct ppin_info *info;
1210dcab41dSTony Luck 
1220dcab41dSTony Luck 	id = x86_match_cpu(ppin_cpuids);
1230dcab41dSTony Luck 	if (!id)
1240dcab41dSTony Luck 		return;
1250dcab41dSTony Luck 
1260dcab41dSTony Luck 	/*
1270dcab41dSTony Luck 	 * Testing the presence of the MSR is not enough. Need to check
1280dcab41dSTony Luck 	 * that the PPIN_CTL allows reading of the PPIN.
1290dcab41dSTony Luck 	 */
1300dcab41dSTony Luck 	info = (struct ppin_info *)id->driver_data;
1310dcab41dSTony Luck 
1320dcab41dSTony Luck 	if (rdmsrl_safe(info->msr_ppin_ctl, &val))
1330dcab41dSTony Luck 		goto clear_ppin;
1340dcab41dSTony Luck 
1350dcab41dSTony Luck 	if ((val & 3UL) == 1UL) {
1360dcab41dSTony Luck 		/* PPIN locked in disabled mode */
1370dcab41dSTony Luck 		goto clear_ppin;
1380dcab41dSTony Luck 	}
1390dcab41dSTony Luck 
1400dcab41dSTony Luck 	/* If PPIN is disabled, try to enable */
1410dcab41dSTony Luck 	if (!(val & 2UL)) {
1420dcab41dSTony Luck 		wrmsrl_safe(info->msr_ppin_ctl,  val | 2UL);
1430dcab41dSTony Luck 		rdmsrl_safe(info->msr_ppin_ctl, &val);
1440dcab41dSTony Luck 	}
1450dcab41dSTony Luck 
1460dcab41dSTony Luck 	/* Is the enable bit set? */
1470dcab41dSTony Luck 	if (val & 2UL) {
148822ccfadSTony Luck 		c->ppin = __rdmsr(info->msr_ppin);
1490dcab41dSTony Luck 		set_cpu_cap(c, info->feature);
1500dcab41dSTony Luck 		return;
1510dcab41dSTony Luck 	}
1520dcab41dSTony Luck 
1530dcab41dSTony Luck clear_ppin:
1540dcab41dSTony Luck 	clear_cpu_cap(c, info->feature);
1550dcab41dSTony Luck }
1560dcab41dSTony Luck 
157148f9bb8SPaul Gortmaker static void default_init(struct cpuinfo_x86 *c)
158e8055139SOndrej Zary {
159e8055139SOndrej Zary #ifdef CONFIG_X86_64
16027c13eceSBorislav Petkov 	cpu_detect_cache_sizes(c);
161e8055139SOndrej Zary #else
162e8055139SOndrej Zary 	/* Not much we can do here... */
163e8055139SOndrej Zary 	/* Check if at least it has cpuid */
164e8055139SOndrej Zary 	if (c->cpuid_level == -1) {
165e8055139SOndrej Zary 		/* No cpuid. It must be an ancient CPU */
166e8055139SOndrej Zary 		if (c->x86 == 4)
167e8055139SOndrej Zary 			strcpy(c->x86_model_id, "486");
168e8055139SOndrej Zary 		else if (c->x86 == 3)
169e8055139SOndrej Zary 			strcpy(c->x86_model_id, "386");
170e8055139SOndrej Zary 	}
171e8055139SOndrej Zary #endif
172e8055139SOndrej Zary }
173e8055139SOndrej Zary 
174148f9bb8SPaul Gortmaker static const struct cpu_dev default_cpu = {
175e8055139SOndrej Zary 	.c_init		= default_init,
176e8055139SOndrej Zary 	.c_vendor	= "Unknown",
177e8055139SOndrej Zary 	.c_x86_vendor	= X86_VENDOR_UNKNOWN,
178e8055139SOndrej Zary };
179e8055139SOndrej Zary 
180148f9bb8SPaul Gortmaker static const struct cpu_dev *this_cpu = &default_cpu;
1810a488a53SYinghai Lu 
18206deef89SBrian Gerst DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
183950ad7ffSYinghai Lu #ifdef CONFIG_X86_64
18406deef89SBrian Gerst 	/*
18506deef89SBrian Gerst 	 * We need valid kernel segments for data and code in long mode too
186950ad7ffSYinghai Lu 	 * IRET will check the segment types  kkeil 2000/10/28
187950ad7ffSYinghai Lu 	 * Also sysret mandates a special GDT layout
18806deef89SBrian Gerst 	 *
1899766cdbcSJaswinder Singh Rajput 	 * TLS descriptors are currently at a different place compared to i386.
19006deef89SBrian Gerst 	 * Hopefully nobody expects them at a fixed place (Wine?)
191950ad7ffSYinghai Lu 	 */
1923b184b71SVegard Nossum 	[GDT_ENTRY_KERNEL32_CS]		= GDT_ENTRY_INIT(DESC_CODE32, 0, 0xfffff),
1933b184b71SVegard Nossum 	[GDT_ENTRY_KERNEL_CS]		= GDT_ENTRY_INIT(DESC_CODE64, 0, 0xfffff),
1943b184b71SVegard Nossum 	[GDT_ENTRY_KERNEL_DS]		= GDT_ENTRY_INIT(DESC_DATA64, 0, 0xfffff),
1953b184b71SVegard Nossum 	[GDT_ENTRY_DEFAULT_USER32_CS]	= GDT_ENTRY_INIT(DESC_CODE32 | DESC_USER, 0, 0xfffff),
1963b184b71SVegard Nossum 	[GDT_ENTRY_DEFAULT_USER_DS]	= GDT_ENTRY_INIT(DESC_DATA64 | DESC_USER, 0, 0xfffff),
1973b184b71SVegard Nossum 	[GDT_ENTRY_DEFAULT_USER_CS]	= GDT_ENTRY_INIT(DESC_CODE64 | DESC_USER, 0, 0xfffff),
198950ad7ffSYinghai Lu #else
1991445f6e1SVegard Nossum 	[GDT_ENTRY_KERNEL_CS]		= GDT_ENTRY_INIT(DESC_CODE32, 0, 0xfffff),
2001445f6e1SVegard Nossum 	[GDT_ENTRY_KERNEL_DS]		= GDT_ENTRY_INIT(DESC_DATA32, 0, 0xfffff),
2011445f6e1SVegard Nossum 	[GDT_ENTRY_DEFAULT_USER_CS]	= GDT_ENTRY_INIT(DESC_CODE32 | DESC_USER, 0, 0xfffff),
2021445f6e1SVegard Nossum 	[GDT_ENTRY_DEFAULT_USER_DS]	= GDT_ENTRY_INIT(DESC_DATA32 | DESC_USER, 0, 0xfffff),
203f7627e25SThomas Gleixner 	/*
204f7627e25SThomas Gleixner 	 * Segments used for calling PnP BIOS have byte granularity.
205f7627e25SThomas Gleixner 	 * They code segments and data segments have fixed 64k limits,
206f7627e25SThomas Gleixner 	 * the transfer segment sizes are set at run time.
207f7627e25SThomas Gleixner 	 */
2081445f6e1SVegard Nossum 	[GDT_ENTRY_PNPBIOS_CS32]	= GDT_ENTRY_INIT(DESC_CODE32_BIOS, 0, 0xffff),
2091445f6e1SVegard Nossum 	[GDT_ENTRY_PNPBIOS_CS16]	= GDT_ENTRY_INIT(DESC_CODE16, 0, 0xffff),
2101445f6e1SVegard Nossum 	[GDT_ENTRY_PNPBIOS_DS]		= GDT_ENTRY_INIT(DESC_DATA16, 0, 0xffff),
2111445f6e1SVegard Nossum 	[GDT_ENTRY_PNPBIOS_TS1]		= GDT_ENTRY_INIT(DESC_DATA16, 0, 0),
2121445f6e1SVegard Nossum 	[GDT_ENTRY_PNPBIOS_TS2]		= GDT_ENTRY_INIT(DESC_DATA16, 0, 0),
213f7627e25SThomas Gleixner 	/*
214f7627e25SThomas Gleixner 	 * The APM segments have byte granularity and their bases
215f7627e25SThomas Gleixner 	 * are set at run time.  All have 64k limits.
216f7627e25SThomas Gleixner 	 */
2171445f6e1SVegard Nossum 	[GDT_ENTRY_APMBIOS_BASE]	= GDT_ENTRY_INIT(DESC_CODE32_BIOS, 0, 0xffff),
2181445f6e1SVegard Nossum 	[GDT_ENTRY_APMBIOS_BASE+1]	= GDT_ENTRY_INIT(DESC_CODE16, 0, 0xffff),
2191445f6e1SVegard Nossum 	[GDT_ENTRY_APMBIOS_BASE+2]	= GDT_ENTRY_INIT(DESC_DATA32_BIOS, 0, 0xffff),
220f7627e25SThomas Gleixner 
2211445f6e1SVegard Nossum 	[GDT_ENTRY_ESPFIX_SS]		= GDT_ENTRY_INIT(DESC_DATA32, 0, 0xfffff),
2221445f6e1SVegard Nossum 	[GDT_ENTRY_PERCPU]		= GDT_ENTRY_INIT(DESC_DATA32, 0, 0xfffff),
223950ad7ffSYinghai Lu #endif
22406deef89SBrian Gerst } };
225f7627e25SThomas Gleixner EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
226f7627e25SThomas Gleixner 
2270790c9aaSAndy Lutomirski #ifdef CONFIG_X86_64
228c7ad5ad2SAndy Lutomirski static int __init x86_nopcid_setup(char *s)
2290790c9aaSAndy Lutomirski {
230c7ad5ad2SAndy Lutomirski 	/* nopcid doesn't accept parameters */
231c7ad5ad2SAndy Lutomirski 	if (s)
232c7ad5ad2SAndy Lutomirski 		return -EINVAL;
2330790c9aaSAndy Lutomirski 
2340790c9aaSAndy Lutomirski 	/* do not emit a message if the feature is not present */
2350790c9aaSAndy Lutomirski 	if (!boot_cpu_has(X86_FEATURE_PCID))
236c7ad5ad2SAndy Lutomirski 		return 0;
2370790c9aaSAndy Lutomirski 
2380790c9aaSAndy Lutomirski 	setup_clear_cpu_cap(X86_FEATURE_PCID);
2390790c9aaSAndy Lutomirski 	pr_info("nopcid: PCID feature disabled\n");
240c7ad5ad2SAndy Lutomirski 	return 0;
2410790c9aaSAndy Lutomirski }
242c7ad5ad2SAndy Lutomirski early_param("nopcid", x86_nopcid_setup);
2430790c9aaSAndy Lutomirski #endif
2440790c9aaSAndy Lutomirski 
245d12a72b8SAndy Lutomirski static int __init x86_noinvpcid_setup(char *s)
246d12a72b8SAndy Lutomirski {
247d12a72b8SAndy Lutomirski 	/* noinvpcid doesn't accept parameters */
248d12a72b8SAndy Lutomirski 	if (s)
249d12a72b8SAndy Lutomirski 		return -EINVAL;
250d12a72b8SAndy Lutomirski 
251d12a72b8SAndy Lutomirski 	/* do not emit a message if the feature is not present */
252d12a72b8SAndy Lutomirski 	if (!boot_cpu_has(X86_FEATURE_INVPCID))
253d12a72b8SAndy Lutomirski 		return 0;
254d12a72b8SAndy Lutomirski 
255d12a72b8SAndy Lutomirski 	setup_clear_cpu_cap(X86_FEATURE_INVPCID);
256d12a72b8SAndy Lutomirski 	pr_info("noinvpcid: INVPCID feature disabled\n");
257d12a72b8SAndy Lutomirski 	return 0;
258d12a72b8SAndy Lutomirski }
259d12a72b8SAndy Lutomirski early_param("noinvpcid", x86_noinvpcid_setup);
260d12a72b8SAndy Lutomirski 
261ba51dcedSYinghai Lu #ifdef CONFIG_X86_32
262148f9bb8SPaul Gortmaker static int cachesize_override = -1;
263148f9bb8SPaul Gortmaker static int disable_x86_serial_nr = 1;
264f7627e25SThomas Gleixner 
265f7627e25SThomas Gleixner static int __init cachesize_setup(char *str)
266f7627e25SThomas Gleixner {
267f7627e25SThomas Gleixner 	get_option(&str, &cachesize_override);
268f7627e25SThomas Gleixner 	return 1;
269f7627e25SThomas Gleixner }
270f7627e25SThomas Gleixner __setup("cachesize=", cachesize_setup);
271f7627e25SThomas Gleixner 
272f7627e25SThomas Gleixner /* Standard macro to see if a specific flag is changeable */
273f7627e25SThomas Gleixner static inline int flag_is_changeable_p(u32 flag)
274f7627e25SThomas Gleixner {
275f7627e25SThomas Gleixner 	u32 f1, f2;
276f7627e25SThomas Gleixner 
27794f6bac1SKrzysztof Helt 	/*
27894f6bac1SKrzysztof Helt 	 * Cyrix and IDT cpus allow disabling of CPUID
27994f6bac1SKrzysztof Helt 	 * so the code below may return different results
28094f6bac1SKrzysztof Helt 	 * when it is executed before and after enabling
28194f6bac1SKrzysztof Helt 	 * the CPUID. Add "volatile" to not allow gcc to
28294f6bac1SKrzysztof Helt 	 * optimize the subsequent calls to this function.
28394f6bac1SKrzysztof Helt 	 */
28494f6bac1SKrzysztof Helt 	asm volatile ("pushfl		\n\t"
285f7627e25SThomas Gleixner 		      "pushfl		\n\t"
286f7627e25SThomas Gleixner 		      "popl %0		\n\t"
287f7627e25SThomas Gleixner 		      "movl %0, %1	\n\t"
288f7627e25SThomas Gleixner 		      "xorl %2, %0	\n\t"
289f7627e25SThomas Gleixner 		      "pushl %0		\n\t"
290f7627e25SThomas Gleixner 		      "popfl		\n\t"
291f7627e25SThomas Gleixner 		      "pushfl		\n\t"
292f7627e25SThomas Gleixner 		      "popl %0		\n\t"
293f7627e25SThomas Gleixner 		      "popfl		\n\t"
2940f3fa48aSIngo Molnar 
295f7627e25SThomas Gleixner 		      : "=&r" (f1), "=&r" (f2)
296f7627e25SThomas Gleixner 		      : "ir" (flag));
297f7627e25SThomas Gleixner 
298f7627e25SThomas Gleixner 	return ((f1^f2) & flag) != 0;
299f7627e25SThomas Gleixner }
300f7627e25SThomas Gleixner 
301f7627e25SThomas Gleixner /* Probe for the CPUID instruction */
302148f9bb8SPaul Gortmaker int have_cpuid_p(void)
303f7627e25SThomas Gleixner {
304f7627e25SThomas Gleixner 	return flag_is_changeable_p(X86_EFLAGS_ID);
305f7627e25SThomas Gleixner }
306f7627e25SThomas Gleixner 
307148f9bb8SPaul Gortmaker static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
3080a488a53SYinghai Lu {
3090a488a53SYinghai Lu 	unsigned long lo, hi;
3100f3fa48aSIngo Molnar 
3110f3fa48aSIngo Molnar 	if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
3120f3fa48aSIngo Molnar 		return;
3130f3fa48aSIngo Molnar 
3140f3fa48aSIngo Molnar 	/* Disable processor serial number: */
3150f3fa48aSIngo Molnar 
3160a488a53SYinghai Lu 	rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
3170a488a53SYinghai Lu 	lo |= 0x200000;
3180a488a53SYinghai Lu 	wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
3190f3fa48aSIngo Molnar 
3201b74dde7SChen Yucong 	pr_notice("CPU serial number disabled.\n");
3210a488a53SYinghai Lu 	clear_cpu_cap(c, X86_FEATURE_PN);
3220a488a53SYinghai Lu 
3230a488a53SYinghai Lu 	/* Disabling the serial number may affect the cpuid level */
3240a488a53SYinghai Lu 	c->cpuid_level = cpuid_eax(0);
3250a488a53SYinghai Lu }
3260a488a53SYinghai Lu 
3270a488a53SYinghai Lu static int __init x86_serial_nr_setup(char *s)
3280a488a53SYinghai Lu {
3290a488a53SYinghai Lu 	disable_x86_serial_nr = 0;
3300a488a53SYinghai Lu 	return 1;
3310a488a53SYinghai Lu }
3320a488a53SYinghai Lu __setup("serialnumber", x86_serial_nr_setup);
333ba51dcedSYinghai Lu #else
334102bbe3aSYinghai Lu static inline int flag_is_changeable_p(u32 flag)
335102bbe3aSYinghai Lu {
336102bbe3aSYinghai Lu 	return 1;
337102bbe3aSYinghai Lu }
338102bbe3aSYinghai Lu static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
339102bbe3aSYinghai Lu {
340102bbe3aSYinghai Lu }
341ba51dcedSYinghai Lu #endif
3420a488a53SYinghai Lu 
343b2cc2a07SH. Peter Anvin static __always_inline void setup_smep(struct cpuinfo_x86 *c)
344de5397adSFenghua Yu {
345b2cc2a07SH. Peter Anvin 	if (cpu_has(c, X86_FEATURE_SMEP))
346375074ccSAndy Lutomirski 		cr4_set_bits(X86_CR4_SMEP);
347de5397adSFenghua Yu }
348de5397adSFenghua Yu 
349b2cc2a07SH. Peter Anvin static __always_inline void setup_smap(struct cpuinfo_x86 *c)
35052b6179aSH. Peter Anvin {
351581b7f15SAndrew Cooper 	unsigned long eflags = native_save_fl();
352b2cc2a07SH. Peter Anvin 
353b2cc2a07SH. Peter Anvin 	/* This should have been cleared long ago */
354b2cc2a07SH. Peter Anvin 	BUG_ON(eflags & X86_EFLAGS_AC);
355b2cc2a07SH. Peter Anvin 
356dbae0a93SBorislav Petkov 	if (cpu_has(c, X86_FEATURE_SMAP))
357375074ccSAndy Lutomirski 		cr4_set_bits(X86_CR4_SMAP);
358f7627e25SThomas Gleixner }
359f7627e25SThomas Gleixner 
360aa35f896SRicardo Neri static __always_inline void setup_umip(struct cpuinfo_x86 *c)
361aa35f896SRicardo Neri {
362aa35f896SRicardo Neri 	/* Check the boot processor, plus build option for UMIP. */
363aa35f896SRicardo Neri 	if (!cpu_feature_enabled(X86_FEATURE_UMIP))
364aa35f896SRicardo Neri 		goto out;
365aa35f896SRicardo Neri 
366aa35f896SRicardo Neri 	/* Check the current processor's cpuid bits. */
367aa35f896SRicardo Neri 	if (!cpu_has(c, X86_FEATURE_UMIP))
368aa35f896SRicardo Neri 		goto out;
369aa35f896SRicardo Neri 
370aa35f896SRicardo Neri 	cr4_set_bits(X86_CR4_UMIP);
371aa35f896SRicardo Neri 
372438cbf88SLendacky, Thomas 	pr_info_once("x86/cpu: User Mode Instruction Prevention (UMIP) activated\n");
373770c7755SRicardo Neri 
374aa35f896SRicardo Neri 	return;
375aa35f896SRicardo Neri 
376aa35f896SRicardo Neri out:
377aa35f896SRicardo Neri 	/*
378aa35f896SRicardo Neri 	 * Make sure UMIP is disabled in case it was enabled in a
379aa35f896SRicardo Neri 	 * previous boot (e.g., via kexec).
380aa35f896SRicardo Neri 	 */
381aa35f896SRicardo Neri 	cr4_clear_bits(X86_CR4_UMIP);
382aa35f896SRicardo Neri }
383aa35f896SRicardo Neri 
384a13b9d0bSKees Cook /* These bits should not change their value after CPU init is finished. */
385a13b9d0bSKees Cook static const unsigned long cr4_pinned_mask =
386991625f3SPeter Zijlstra 	X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP |
387991625f3SPeter Zijlstra 	X86_CR4_FSGSBASE | X86_CR4_CET;
3887652ac92SThomas Gleixner static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning);
3897652ac92SThomas Gleixner static unsigned long cr4_pinned_bits __ro_after_init;
3907652ac92SThomas Gleixner 
3917652ac92SThomas Gleixner void native_write_cr0(unsigned long val)
3927652ac92SThomas Gleixner {
3937652ac92SThomas Gleixner 	unsigned long bits_missing = 0;
3947652ac92SThomas Gleixner 
3957652ac92SThomas Gleixner set_register:
396aa5cacdcSArvind Sankar 	asm volatile("mov %0,%%cr0": "+r" (val) : : "memory");
3977652ac92SThomas Gleixner 
3987652ac92SThomas Gleixner 	if (static_branch_likely(&cr_pinning)) {
3997652ac92SThomas Gleixner 		if (unlikely((val & X86_CR0_WP) != X86_CR0_WP)) {
4007652ac92SThomas Gleixner 			bits_missing = X86_CR0_WP;
4017652ac92SThomas Gleixner 			val |= bits_missing;
4027652ac92SThomas Gleixner 			goto set_register;
4037652ac92SThomas Gleixner 		}
4047652ac92SThomas Gleixner 		/* Warn after we've set the missing bits. */
4057652ac92SThomas Gleixner 		WARN_ONCE(bits_missing, "CR0 WP bit went missing!?\n");
4067652ac92SThomas Gleixner 	}
4077652ac92SThomas Gleixner }
4087652ac92SThomas Gleixner EXPORT_SYMBOL(native_write_cr0);
4097652ac92SThomas Gleixner 
410b64dfcdeSBorislav Petkov void __no_profile native_write_cr4(unsigned long val)
4117652ac92SThomas Gleixner {
412a13b9d0bSKees Cook 	unsigned long bits_changed = 0;
4137652ac92SThomas Gleixner 
4147652ac92SThomas Gleixner set_register:
415aa5cacdcSArvind Sankar 	asm volatile("mov %0,%%cr4": "+r" (val) : : "memory");
4167652ac92SThomas Gleixner 
4177652ac92SThomas Gleixner 	if (static_branch_likely(&cr_pinning)) {
418a13b9d0bSKees Cook 		if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) {
419a13b9d0bSKees Cook 			bits_changed = (val & cr4_pinned_mask) ^ cr4_pinned_bits;
420a13b9d0bSKees Cook 			val = (val & ~cr4_pinned_mask) | cr4_pinned_bits;
4217652ac92SThomas Gleixner 			goto set_register;
4227652ac92SThomas Gleixner 		}
423a13b9d0bSKees Cook 		/* Warn after we've corrected the changed bits. */
424a13b9d0bSKees Cook 		WARN_ONCE(bits_changed, "pinned CR4 bits changed: 0x%lx!?\n",
425a13b9d0bSKees Cook 			  bits_changed);
4267652ac92SThomas Gleixner 	}
4277652ac92SThomas Gleixner }
42821953ee5SThomas Gleixner #if IS_MODULE(CONFIG_LKDTM)
429d8f0b353SThomas Gleixner EXPORT_SYMBOL_GPL(native_write_cr4);
43021953ee5SThomas Gleixner #endif
431d8f0b353SThomas Gleixner 
432d8f0b353SThomas Gleixner void cr4_update_irqsoff(unsigned long set, unsigned long clear)
433d8f0b353SThomas Gleixner {
434d8f0b353SThomas Gleixner 	unsigned long newval, cr4 = this_cpu_read(cpu_tlbstate.cr4);
435d8f0b353SThomas Gleixner 
436d8f0b353SThomas Gleixner 	lockdep_assert_irqs_disabled();
437d8f0b353SThomas Gleixner 
438d8f0b353SThomas Gleixner 	newval = (cr4 & ~clear) | set;
439d8f0b353SThomas Gleixner 	if (newval != cr4) {
440d8f0b353SThomas Gleixner 		this_cpu_write(cpu_tlbstate.cr4, newval);
441d8f0b353SThomas Gleixner 		__write_cr4(newval);
442d8f0b353SThomas Gleixner 	}
443d8f0b353SThomas Gleixner }
444d8f0b353SThomas Gleixner EXPORT_SYMBOL(cr4_update_irqsoff);
445d8f0b353SThomas Gleixner 
446d8f0b353SThomas Gleixner /* Read the CR4 shadow. */
447d8f0b353SThomas Gleixner unsigned long cr4_read_shadow(void)
448d8f0b353SThomas Gleixner {
449d8f0b353SThomas Gleixner 	return this_cpu_read(cpu_tlbstate.cr4);
450d8f0b353SThomas Gleixner }
451d8f0b353SThomas Gleixner EXPORT_SYMBOL_GPL(cr4_read_shadow);
4527652ac92SThomas Gleixner 
4537652ac92SThomas Gleixner void cr4_init(void)
4547652ac92SThomas Gleixner {
4557652ac92SThomas Gleixner 	unsigned long cr4 = __read_cr4();
4567652ac92SThomas Gleixner 
4577652ac92SThomas Gleixner 	if (boot_cpu_has(X86_FEATURE_PCID))
4587652ac92SThomas Gleixner 		cr4 |= X86_CR4_PCIDE;
4597652ac92SThomas Gleixner 	if (static_branch_likely(&cr_pinning))
460a13b9d0bSKees Cook 		cr4 = (cr4 & ~cr4_pinned_mask) | cr4_pinned_bits;
4617652ac92SThomas Gleixner 
4627652ac92SThomas Gleixner 	__write_cr4(cr4);
4637652ac92SThomas Gleixner 
4647652ac92SThomas Gleixner 	/* Initialize cr4 shadow for this CPU. */
4657652ac92SThomas Gleixner 	this_cpu_write(cpu_tlbstate.cr4, cr4);
4667652ac92SThomas Gleixner }
467873d50d5SKees Cook 
468873d50d5SKees Cook /*
469873d50d5SKees Cook  * Once CPU feature detection is finished (and boot params have been
470873d50d5SKees Cook  * parsed), record any of the sensitive CR bits that are set, and
471873d50d5SKees Cook  * enable CR pinning.
472873d50d5SKees Cook  */
473873d50d5SKees Cook static void __init setup_cr_pinning(void)
474873d50d5SKees Cook {
475a13b9d0bSKees Cook 	cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & cr4_pinned_mask;
476873d50d5SKees Cook 	static_key_enable(&cr_pinning.key);
477873d50d5SKees Cook }
478873d50d5SKees Cook 
479b745cfbaSAndy Lutomirski static __init int x86_nofsgsbase_setup(char *arg)
480dd649bd0SAndy Lutomirski {
481b745cfbaSAndy Lutomirski 	/* Require an exact match without trailing characters. */
482b745cfbaSAndy Lutomirski 	if (strlen(arg))
483b745cfbaSAndy Lutomirski 		return 0;
484b745cfbaSAndy Lutomirski 
485b745cfbaSAndy Lutomirski 	/* Do not emit a message if the feature is not present. */
486b745cfbaSAndy Lutomirski 	if (!boot_cpu_has(X86_FEATURE_FSGSBASE))
487b745cfbaSAndy Lutomirski 		return 1;
488b745cfbaSAndy Lutomirski 
489b745cfbaSAndy Lutomirski 	setup_clear_cpu_cap(X86_FEATURE_FSGSBASE);
490b745cfbaSAndy Lutomirski 	pr_info("FSGSBASE disabled via kernel command line\n");
491dd649bd0SAndy Lutomirski 	return 1;
492dd649bd0SAndy Lutomirski }
493b745cfbaSAndy Lutomirski __setup("nofsgsbase", x86_nofsgsbase_setup);
494dd649bd0SAndy Lutomirski 
495b64ed19bSAndy Lutomirski /*
49606976945SDave Hansen  * Protection Keys are not available in 32-bit mode.
49706976945SDave Hansen  */
49806976945SDave Hansen static bool pku_disabled;
49906976945SDave Hansen 
50006976945SDave Hansen static __always_inline void setup_pku(struct cpuinfo_x86 *c)
50106976945SDave Hansen {
5028a1dc55aSThomas Gleixner 	if (c == &boot_cpu_data) {
5038a1dc55aSThomas Gleixner 		if (pku_disabled || !cpu_feature_enabled(X86_FEATURE_PKU))
504e8df1a95SDave Hansen 			return;
5058a1dc55aSThomas Gleixner 		/*
5068a1dc55aSThomas Gleixner 		 * Setting CR4.PKE will cause the X86_FEATURE_OSPKE cpuid
5078a1dc55aSThomas Gleixner 		 * bit to be set.  Enforce it.
5088a1dc55aSThomas Gleixner 		 */
5098a1dc55aSThomas Gleixner 		setup_force_cpu_cap(X86_FEATURE_OSPKE);
51006976945SDave Hansen 
5118a1dc55aSThomas Gleixner 	} else if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) {
512e8df1a95SDave Hansen 		return;
5138a1dc55aSThomas Gleixner 	}
51406976945SDave Hansen 
51506976945SDave Hansen 	cr4_set_bits(X86_CR4_PKE);
516fa8c84b7SThomas Gleixner 	/* Load the default PKRU value */
517fa8c84b7SThomas Gleixner 	pkru_write_default();
51806976945SDave Hansen }
51906976945SDave Hansen 
52006976945SDave Hansen #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
52106976945SDave Hansen static __init int setup_disable_pku(char *arg)
52206976945SDave Hansen {
52306976945SDave Hansen 	/*
52406976945SDave Hansen 	 * Do not clear the X86_FEATURE_PKU bit.  All of the
52506976945SDave Hansen 	 * runtime checks are against OSPKE so clearing the
52606976945SDave Hansen 	 * bit does nothing.
52706976945SDave Hansen 	 *
52806976945SDave Hansen 	 * This way, we will see "pku" in cpuinfo, but not
52906976945SDave Hansen 	 * "ospke", which is exactly what we want.  It shows
53006976945SDave Hansen 	 * that the CPU has PKU, but the OS has not enabled it.
53106976945SDave Hansen 	 * This happens to be exactly how a system would look
53206976945SDave Hansen 	 * if we disabled the config option.
53306976945SDave Hansen 	 */
53406976945SDave Hansen 	pr_info("x86: 'nopku' specified, disabling Memory Protection Keys\n");
53506976945SDave Hansen 	pku_disabled = true;
53606976945SDave Hansen 	return 1;
53706976945SDave Hansen }
53806976945SDave Hansen __setup("nopku", setup_disable_pku);
539d55dcb73SJuergen Gross #endif
54006976945SDave Hansen 
541fe379fa4SPeter Zijlstra #ifdef CONFIG_X86_KERNEL_IBT
542fe379fa4SPeter Zijlstra 
54393be2859SArd Biesheuvel __noendbr u64 ibt_save(bool disable)
544fe379fa4SPeter Zijlstra {
545fe379fa4SPeter Zijlstra 	u64 msr = 0;
546fe379fa4SPeter Zijlstra 
547fe379fa4SPeter Zijlstra 	if (cpu_feature_enabled(X86_FEATURE_IBT)) {
548fe379fa4SPeter Zijlstra 		rdmsrl(MSR_IA32_S_CET, msr);
54993be2859SArd Biesheuvel 		if (disable)
550fe379fa4SPeter Zijlstra 			wrmsrl(MSR_IA32_S_CET, msr & ~CET_ENDBR_EN);
551fe379fa4SPeter Zijlstra 	}
552fe379fa4SPeter Zijlstra 
553fe379fa4SPeter Zijlstra 	return msr;
554fe379fa4SPeter Zijlstra }
555fe379fa4SPeter Zijlstra 
556fe379fa4SPeter Zijlstra __noendbr void ibt_restore(u64 save)
557fe379fa4SPeter Zijlstra {
558fe379fa4SPeter Zijlstra 	u64 msr;
559fe379fa4SPeter Zijlstra 
560fe379fa4SPeter Zijlstra 	if (cpu_feature_enabled(X86_FEATURE_IBT)) {
561fe379fa4SPeter Zijlstra 		rdmsrl(MSR_IA32_S_CET, msr);
562fe379fa4SPeter Zijlstra 		msr &= ~CET_ENDBR_EN;
563fe379fa4SPeter Zijlstra 		msr |= (save & CET_ENDBR_EN);
564fe379fa4SPeter Zijlstra 		wrmsrl(MSR_IA32_S_CET, msr);
565fe379fa4SPeter Zijlstra 	}
566fe379fa4SPeter Zijlstra }
567fe379fa4SPeter Zijlstra 
568fe379fa4SPeter Zijlstra #endif
569fe379fa4SPeter Zijlstra 
570991625f3SPeter Zijlstra static __always_inline void setup_cet(struct cpuinfo_x86 *c)
571991625f3SPeter Zijlstra {
5720dc2a760SRick Edgecombe 	bool user_shstk, kernel_ibt;
573991625f3SPeter Zijlstra 
5740dc2a760SRick Edgecombe 	if (!IS_ENABLED(CONFIG_X86_CET))
575991625f3SPeter Zijlstra 		return;
576991625f3SPeter Zijlstra 
5770dc2a760SRick Edgecombe 	kernel_ibt = HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT);
5780dc2a760SRick Edgecombe 	user_shstk = cpu_feature_enabled(X86_FEATURE_SHSTK) &&
5790dc2a760SRick Edgecombe 		     IS_ENABLED(CONFIG_X86_USER_SHADOW_STACK);
5800dc2a760SRick Edgecombe 
5810dc2a760SRick Edgecombe 	if (!kernel_ibt && !user_shstk)
5820dc2a760SRick Edgecombe 		return;
5830dc2a760SRick Edgecombe 
5840dc2a760SRick Edgecombe 	if (user_shstk)
5850dc2a760SRick Edgecombe 		set_cpu_cap(c, X86_FEATURE_USER_SHSTK);
5860dc2a760SRick Edgecombe 
5870dc2a760SRick Edgecombe 	if (kernel_ibt)
5880dc2a760SRick Edgecombe 		wrmsrl(MSR_IA32_S_CET, CET_ENDBR_EN);
5890dc2a760SRick Edgecombe 	else
5900dc2a760SRick Edgecombe 		wrmsrl(MSR_IA32_S_CET, 0);
5910dc2a760SRick Edgecombe 
592991625f3SPeter Zijlstra 	cr4_set_bits(X86_CR4_CET);
593991625f3SPeter Zijlstra 
594c6cfcbd8SJosh Poimboeuf 	if (kernel_ibt && ibt_selftest()) {
595991625f3SPeter Zijlstra 		pr_err("IBT selftest: Failed!\n");
596931ab636SPeter Zijlstra 		wrmsrl(MSR_IA32_S_CET, 0);
597991625f3SPeter Zijlstra 		setup_clear_cpu_cap(X86_FEATURE_IBT);
598991625f3SPeter Zijlstra 	}
599991625f3SPeter Zijlstra }
600991625f3SPeter Zijlstra 
601af227003SPeter Zijlstra __noendbr void cet_disable(void)
602af227003SPeter Zijlstra {
6030dc2a760SRick Edgecombe 	if (!(cpu_feature_enabled(X86_FEATURE_IBT) ||
6040dc2a760SRick Edgecombe 	      cpu_feature_enabled(X86_FEATURE_SHSTK)))
6050dc2a760SRick Edgecombe 		return;
6060dc2a760SRick Edgecombe 
607af227003SPeter Zijlstra 	wrmsrl(MSR_IA32_S_CET, 0);
6080dc2a760SRick Edgecombe 	wrmsrl(MSR_IA32_U_CET, 0);
609af227003SPeter Zijlstra }
610af227003SPeter Zijlstra 
61106976945SDave Hansen /*
612b38b0665SH. Peter Anvin  * Some CPU features depend on higher CPUID levels, which may not always
613b38b0665SH. Peter Anvin  * be available due to CPUID level capping or broken virtualization
614b38b0665SH. Peter Anvin  * software.  Add those features to this table to auto-disable them.
615b38b0665SH. Peter Anvin  */
616b38b0665SH. Peter Anvin struct cpuid_dependent_feature {
617b38b0665SH. Peter Anvin 	u32 feature;
618b38b0665SH. Peter Anvin 	u32 level;
619b38b0665SH. Peter Anvin };
6200f3fa48aSIngo Molnar 
621148f9bb8SPaul Gortmaker static const struct cpuid_dependent_feature
622b38b0665SH. Peter Anvin cpuid_dependent_features[] = {
623b38b0665SH. Peter Anvin 	{ X86_FEATURE_MWAIT,		0x00000005 },
624b38b0665SH. Peter Anvin 	{ X86_FEATURE_DCA,		0x00000009 },
625b38b0665SH. Peter Anvin 	{ X86_FEATURE_XSAVE,		0x0000000d },
626b38b0665SH. Peter Anvin 	{ 0, 0 }
627b38b0665SH. Peter Anvin };
628b38b0665SH. Peter Anvin 
629148f9bb8SPaul Gortmaker static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
630b38b0665SH. Peter Anvin {
631b38b0665SH. Peter Anvin 	const struct cpuid_dependent_feature *df;
6329766cdbcSJaswinder Singh Rajput 
633b38b0665SH. Peter Anvin 	for (df = cpuid_dependent_features; df->feature; df++) {
6340f3fa48aSIngo Molnar 
6350f3fa48aSIngo Molnar 		if (!cpu_has(c, df->feature))
6360f3fa48aSIngo Molnar 			continue;
637b38b0665SH. Peter Anvin 		/*
638b38b0665SH. Peter Anvin 		 * Note: cpuid_level is set to -1 if unavailable, but
639b38b0665SH. Peter Anvin 		 * extended_extended_level is set to 0 if unavailable
640b38b0665SH. Peter Anvin 		 * and the legitimate extended levels are all negative
641b38b0665SH. Peter Anvin 		 * when signed; hence the weird messing around with
642b38b0665SH. Peter Anvin 		 * signs here...
643b38b0665SH. Peter Anvin 		 */
6440f3fa48aSIngo Molnar 		if (!((s32)df->level < 0 ?
645f6db44dfSYinghai Lu 		     (u32)df->level > (u32)c->extended_cpuid_level :
6460f3fa48aSIngo Molnar 		     (s32)df->level > (s32)c->cpuid_level))
6470f3fa48aSIngo Molnar 			continue;
6480f3fa48aSIngo Molnar 
649b38b0665SH. Peter Anvin 		clear_cpu_cap(c, df->feature);
6500f3fa48aSIngo Molnar 		if (!warn)
6510f3fa48aSIngo Molnar 			continue;
6520f3fa48aSIngo Molnar 
6531b74dde7SChen Yucong 		pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n",
6549def39beSJosh Triplett 			x86_cap_flag(df->feature), df->level);
655b38b0665SH. Peter Anvin 	}
656b38b0665SH. Peter Anvin }
657b38b0665SH. Peter Anvin 
658b38b0665SH. Peter Anvin /*
659f7627e25SThomas Gleixner  * Naming convention should be: <Name> [(<Codename>)]
660f7627e25SThomas Gleixner  * This table only is used unless init_<vendor>() below doesn't set it;
6610f3fa48aSIngo Molnar  * in particular, if CPUID levels 0x80000002..4 are supported, this
6620f3fa48aSIngo Molnar  * isn't used
663f7627e25SThomas Gleixner  */
664f7627e25SThomas Gleixner 
665f7627e25SThomas Gleixner /* Look up CPU names by table lookup. */
666148f9bb8SPaul Gortmaker static const char *table_lookup_model(struct cpuinfo_x86 *c)
667f7627e25SThomas Gleixner {
66809dc68d9SJan Beulich #ifdef CONFIG_X86_32
66909dc68d9SJan Beulich 	const struct legacy_cpu_model_info *info;
670f7627e25SThomas Gleixner 
671f7627e25SThomas Gleixner 	if (c->x86_model >= 16)
672f7627e25SThomas Gleixner 		return NULL;	/* Range check */
673f7627e25SThomas Gleixner 
674f7627e25SThomas Gleixner 	if (!this_cpu)
675f7627e25SThomas Gleixner 		return NULL;
676f7627e25SThomas Gleixner 
67709dc68d9SJan Beulich 	info = this_cpu->legacy_models;
678f7627e25SThomas Gleixner 
67909dc68d9SJan Beulich 	while (info->family) {
680f7627e25SThomas Gleixner 		if (info->family == c->x86)
681f7627e25SThomas Gleixner 			return info->model_names[c->x86_model];
682f7627e25SThomas Gleixner 		info++;
683f7627e25SThomas Gleixner 	}
68409dc68d9SJan Beulich #endif
685f7627e25SThomas Gleixner 	return NULL;		/* Not found */
686f7627e25SThomas Gleixner }
687f7627e25SThomas Gleixner 
688f6a892ddSFenghua Yu /* Aligned to unsigned long to avoid split lock in atomic bitmap ops */
689f6a892ddSFenghua Yu __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long));
690f6a892ddSFenghua Yu __u32 cpu_caps_set[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long));
691f7627e25SThomas Gleixner 
69272f5e08dSAndy Lutomirski #ifdef CONFIG_X86_32
69372f5e08dSAndy Lutomirski /* The 32-bit entry code needs to find cpu_entry_area. */
69472f5e08dSAndy Lutomirski DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
69572f5e08dSAndy Lutomirski #endif
69672f5e08dSAndy Lutomirski 
69745fc8757SThomas Garnier /* Load the original GDT from the per-cpu structure */
69845fc8757SThomas Garnier void load_direct_gdt(int cpu)
69945fc8757SThomas Garnier {
70045fc8757SThomas Garnier 	struct desc_ptr gdt_descr;
70145fc8757SThomas Garnier 
70245fc8757SThomas Garnier 	gdt_descr.address = (long)get_cpu_gdt_rw(cpu);
70345fc8757SThomas Garnier 	gdt_descr.size = GDT_SIZE - 1;
70445fc8757SThomas Garnier 	load_gdt(&gdt_descr);
70545fc8757SThomas Garnier }
70645fc8757SThomas Garnier EXPORT_SYMBOL_GPL(load_direct_gdt);
70745fc8757SThomas Garnier 
70869218e47SThomas Garnier /* Load a fixmap remapping of the per-cpu GDT */
70969218e47SThomas Garnier void load_fixmap_gdt(int cpu)
71069218e47SThomas Garnier {
71169218e47SThomas Garnier 	struct desc_ptr gdt_descr;
71269218e47SThomas Garnier 
71369218e47SThomas Garnier 	gdt_descr.address = (long)get_cpu_gdt_ro(cpu);
71469218e47SThomas Garnier 	gdt_descr.size = GDT_SIZE - 1;
71569218e47SThomas Garnier 	load_gdt(&gdt_descr);
71669218e47SThomas Garnier }
71745fc8757SThomas Garnier EXPORT_SYMBOL_GPL(load_fixmap_gdt);
71869218e47SThomas Garnier 
719b5636d45SThomas Gleixner /**
7201f19e2d5SThomas Gleixner  * switch_gdt_and_percpu_base - Switch to direct GDT and runtime per CPU base
721b5636d45SThomas Gleixner  * @cpu:	The CPU number for which this is invoked
722b5636d45SThomas Gleixner  *
7231f19e2d5SThomas Gleixner  * Invoked during early boot to switch from early GDT and early per CPU to
7241f19e2d5SThomas Gleixner  * the direct GDT and the runtime per CPU area. On 32-bit the percpu base
7251f19e2d5SThomas Gleixner  * switch is implicit by loading the direct GDT. On 64bit this requires
7261f19e2d5SThomas Gleixner  * to update GSBASE.
7270f3fa48aSIngo Molnar  */
7281f19e2d5SThomas Gleixner void __init switch_gdt_and_percpu_base(int cpu)
7299d31d35bSYinghai Lu {
73045fc8757SThomas Garnier 	load_direct_gdt(cpu);
731b5636d45SThomas Gleixner 
732b5636d45SThomas Gleixner #ifdef CONFIG_X86_64
733b5636d45SThomas Gleixner 	/*
734b5636d45SThomas Gleixner 	 * No need to load %gs. It is already correct.
735b5636d45SThomas Gleixner 	 *
736b5636d45SThomas Gleixner 	 * Writing %gs on 64bit would zero GSBASE which would make any per
737b5636d45SThomas Gleixner 	 * CPU operation up to the point of the wrmsrl() fault.
738b5636d45SThomas Gleixner 	 *
739b5636d45SThomas Gleixner 	 * Set GSBASE to the new offset. Until the wrmsrl() happens the
740b5636d45SThomas Gleixner 	 * early mapping is still valid. That means the GSBASE update will
741b5636d45SThomas Gleixner 	 * lose any prior per CPU data which was not copied over in
742b5636d45SThomas Gleixner 	 * setup_per_cpu_areas().
7432cb15faaSThomas Gleixner 	 *
7442cb15faaSThomas Gleixner 	 * This works even with stackprotector enabled because the
7452cb15faaSThomas Gleixner 	 * per CPU stack canary is 0 in both per CPU areas.
746b5636d45SThomas Gleixner 	 */
747b5636d45SThomas Gleixner 	wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu));
748b5636d45SThomas Gleixner #else
749b5636d45SThomas Gleixner 	/*
750b5636d45SThomas Gleixner 	 * %fs is already set to __KERNEL_PERCPU, but after switching GDT
751b5636d45SThomas Gleixner 	 * it is required to load FS again so that the 'hidden' part is
752b5636d45SThomas Gleixner 	 * updated from the new GDT. Up to this point the early per CPU
753b5636d45SThomas Gleixner 	 * translation is active. Any content of the early per CPU data
754b5636d45SThomas Gleixner 	 * which was not copied over in setup_per_cpu_areas() is lost.
755b5636d45SThomas Gleixner 	 */
756b5636d45SThomas Gleixner 	loadsegment(fs, __KERNEL_PERCPU);
757b5636d45SThomas Gleixner #endif
7589d31d35bSYinghai Lu }
7599d31d35bSYinghai Lu 
760148f9bb8SPaul Gortmaker static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
761f7627e25SThomas Gleixner 
762148f9bb8SPaul Gortmaker static void get_model_name(struct cpuinfo_x86 *c)
763f7627e25SThomas Gleixner {
764f7627e25SThomas Gleixner 	unsigned int *v;
765ee098e1aSBorislav Petkov 	char *p, *q, *s;
766f7627e25SThomas Gleixner 
7673da99c97SYinghai Lu 	if (c->extended_cpuid_level < 0x80000004)
7681b05d60dSYinghai Lu 		return;
769f7627e25SThomas Gleixner 
770f7627e25SThomas Gleixner 	v = (unsigned int *)c->x86_model_id;
771f7627e25SThomas Gleixner 	cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
772f7627e25SThomas Gleixner 	cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
773f7627e25SThomas Gleixner 	cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
774f7627e25SThomas Gleixner 	c->x86_model_id[48] = 0;
775f7627e25SThomas Gleixner 
776ee098e1aSBorislav Petkov 	/* Trim whitespace */
777ee098e1aSBorislav Petkov 	p = q = s = &c->x86_model_id[0];
778ee098e1aSBorislav Petkov 
779ee098e1aSBorislav Petkov 	while (*p == ' ')
780ee098e1aSBorislav Petkov 		p++;
781ee098e1aSBorislav Petkov 
782ee098e1aSBorislav Petkov 	while (*p) {
783ee098e1aSBorislav Petkov 		/* Note the last non-whitespace index */
784ee098e1aSBorislav Petkov 		if (!isspace(*p))
785ee098e1aSBorislav Petkov 			s = q;
786ee098e1aSBorislav Petkov 
787ee098e1aSBorislav Petkov 		*q++ = *p++;
788ee098e1aSBorislav Petkov 	}
789ee098e1aSBorislav Petkov 
790ee098e1aSBorislav Petkov 	*(s + 1) = '\0';
791f7627e25SThomas Gleixner }
792f7627e25SThomas Gleixner 
7939305bd6cSThomas Gleixner void detect_num_cpu_cores(struct cpuinfo_x86 *c)
7942cc61be6SDavid Wang {
7952cc61be6SDavid Wang 	unsigned int eax, ebx, ecx, edx;
7962cc61be6SDavid Wang 
7979305bd6cSThomas Gleixner 	c->x86_max_cores = 1;
7982cc61be6SDavid Wang 	if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4)
7999305bd6cSThomas Gleixner 		return;
8002cc61be6SDavid Wang 
8012cc61be6SDavid Wang 	cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
8022cc61be6SDavid Wang 	if (eax & 0x1f)
8039305bd6cSThomas Gleixner 		c->x86_max_cores = (eax >> 26) + 1;
8042cc61be6SDavid Wang }
8052cc61be6SDavid Wang 
806148f9bb8SPaul Gortmaker void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
807f7627e25SThomas Gleixner {
8089d31d35bSYinghai Lu 	unsigned int n, dummy, ebx, ecx, edx, l2size;
809f7627e25SThomas Gleixner 
8103da99c97SYinghai Lu 	n = c->extended_cpuid_level;
811f7627e25SThomas Gleixner 
812f7627e25SThomas Gleixner 	if (n >= 0x80000005) {
8139d31d35bSYinghai Lu 		cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
814f7627e25SThomas Gleixner 		c->x86_cache_size = (ecx>>24) + (edx>>24);
815140fc727SYinghai Lu #ifdef CONFIG_X86_64
816140fc727SYinghai Lu 		/* On K8 L1 TLB is inclusive, so don't count it */
817140fc727SYinghai Lu 		c->x86_tlbsize = 0;
818140fc727SYinghai Lu #endif
819f7627e25SThomas Gleixner 	}
820f7627e25SThomas Gleixner 
821f7627e25SThomas Gleixner 	if (n < 0x80000006)	/* Some chips just has a large L1. */
822f7627e25SThomas Gleixner 		return;
823f7627e25SThomas Gleixner 
8240a488a53SYinghai Lu 	cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
825f7627e25SThomas Gleixner 	l2size = ecx >> 16;
826f7627e25SThomas Gleixner 
827140fc727SYinghai Lu #ifdef CONFIG_X86_64
828140fc727SYinghai Lu 	c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
829140fc727SYinghai Lu #else
830f7627e25SThomas Gleixner 	/* do processor-specific cache resizing */
83109dc68d9SJan Beulich 	if (this_cpu->legacy_cache_size)
83209dc68d9SJan Beulich 		l2size = this_cpu->legacy_cache_size(c, l2size);
833f7627e25SThomas Gleixner 
834f7627e25SThomas Gleixner 	/* Allow user to override all this if necessary. */
835f7627e25SThomas Gleixner 	if (cachesize_override != -1)
836f7627e25SThomas Gleixner 		l2size = cachesize_override;
837f7627e25SThomas Gleixner 
838f7627e25SThomas Gleixner 	if (l2size == 0)
839f7627e25SThomas Gleixner 		return;		/* Again, no L2 cache is possible */
840140fc727SYinghai Lu #endif
841f7627e25SThomas Gleixner 
842f7627e25SThomas Gleixner 	c->x86_cache_size = l2size;
843f7627e25SThomas Gleixner }
844f7627e25SThomas Gleixner 
845e0ba94f1SAlex Shi u16 __read_mostly tlb_lli_4k[NR_INFO];
846e0ba94f1SAlex Shi u16 __read_mostly tlb_lli_2m[NR_INFO];
847e0ba94f1SAlex Shi u16 __read_mostly tlb_lli_4m[NR_INFO];
848e0ba94f1SAlex Shi u16 __read_mostly tlb_lld_4k[NR_INFO];
849e0ba94f1SAlex Shi u16 __read_mostly tlb_lld_2m[NR_INFO];
850e0ba94f1SAlex Shi u16 __read_mostly tlb_lld_4m[NR_INFO];
851dd360393SKirill A. Shutemov u16 __read_mostly tlb_lld_1g[NR_INFO];
852e0ba94f1SAlex Shi 
853f94fe119SSteven Honeyman static void cpu_detect_tlb(struct cpuinfo_x86 *c)
854e0ba94f1SAlex Shi {
855e0ba94f1SAlex Shi 	if (this_cpu->c_detect_tlb)
856e0ba94f1SAlex Shi 		this_cpu->c_detect_tlb(c);
857e0ba94f1SAlex Shi 
858f94fe119SSteven Honeyman 	pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n",
859e0ba94f1SAlex Shi 		tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES],
860f94fe119SSteven Honeyman 		tlb_lli_4m[ENTRIES]);
861f94fe119SSteven Honeyman 
862f94fe119SSteven Honeyman 	pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n",
863f94fe119SSteven Honeyman 		tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES],
864f94fe119SSteven Honeyman 		tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
865e0ba94f1SAlex Shi }
866e0ba94f1SAlex Shi 
867545401f4SThomas Gleixner int detect_ht_early(struct cpuinfo_x86 *c)
8689d31d35bSYinghai Lu {
869c8e56d20SBorislav Petkov #ifdef CONFIG_SMP
8709d31d35bSYinghai Lu 	u32 eax, ebx, ecx, edx;
8719d31d35bSYinghai Lu 
8720a488a53SYinghai Lu 	if (!cpu_has(c, X86_FEATURE_HT))
873545401f4SThomas Gleixner 		return -1;
8749d31d35bSYinghai Lu 
8750a488a53SYinghai Lu 	if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
876545401f4SThomas Gleixner 		return -1;
8770a488a53SYinghai Lu 
8781cd78776SYinghai Lu 	if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
879545401f4SThomas Gleixner 		return -1;
8801cd78776SYinghai Lu 
8810a488a53SYinghai Lu 	cpuid(1, &eax, &ebx, &ecx, &edx);
8820a488a53SYinghai Lu 
8839d31d35bSYinghai Lu 	smp_num_siblings = (ebx & 0xff0000) >> 16;
884545401f4SThomas Gleixner 	if (smp_num_siblings == 1)
8851b74dde7SChen Yucong 		pr_info_once("CPU0: Hyper-Threading is disabled\n");
886545401f4SThomas Gleixner #endif
887545401f4SThomas Gleixner 	return 0;
8880f3fa48aSIngo Molnar }
8890f3fa48aSIngo Molnar 
890545401f4SThomas Gleixner void detect_ht(struct cpuinfo_x86 *c)
891545401f4SThomas Gleixner {
892545401f4SThomas Gleixner #ifdef CONFIG_SMP
893545401f4SThomas Gleixner 	int index_msb, core_bits;
894545401f4SThomas Gleixner 
895545401f4SThomas Gleixner 	if (detect_ht_early(c) < 0)
896545401f4SThomas Gleixner 		return;
8979d31d35bSYinghai Lu 
8989d31d35bSYinghai Lu 	index_msb = get_count_order(smp_num_siblings);
89902fb601dSThomas Gleixner 	c->topo.pkg_id = apic->phys_pkg_id(c->topo.initial_apicid, index_msb);
9009d31d35bSYinghai Lu 
9019d31d35bSYinghai Lu 	smp_num_siblings = smp_num_siblings / c->x86_max_cores;
9029d31d35bSYinghai Lu 
9039d31d35bSYinghai Lu 	index_msb = get_count_order(smp_num_siblings);
9049d31d35bSYinghai Lu 
9059d31d35bSYinghai Lu 	core_bits = get_count_order(c->x86_max_cores);
9069d31d35bSYinghai Lu 
907e9525633SThomas Gleixner 	c->topo.core_id = apic->phys_pkg_id(c->topo.initial_apicid, index_msb) &
9081cd78776SYinghai Lu 		((1 << core_bits) - 1);
9099d31d35bSYinghai Lu #endif
91097e4db7cSYinghai Lu }
911f7627e25SThomas Gleixner 
912148f9bb8SPaul Gortmaker static void get_cpu_vendor(struct cpuinfo_x86 *c)
913f7627e25SThomas Gleixner {
914f7627e25SThomas Gleixner 	char *v = c->x86_vendor_id;
9150f3fa48aSIngo Molnar 	int i;
916f7627e25SThomas Gleixner 
917f7627e25SThomas Gleixner 	for (i = 0; i < X86_VENDOR_NUM; i++) {
91810a434fcSYinghai Lu 		if (!cpu_devs[i])
91910a434fcSYinghai Lu 			break;
92010a434fcSYinghai Lu 
921f7627e25SThomas Gleixner 		if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
922f7627e25SThomas Gleixner 		    (cpu_devs[i]->c_ident[1] &&
923f7627e25SThomas Gleixner 		     !strcmp(v, cpu_devs[i]->c_ident[1]))) {
9240f3fa48aSIngo Molnar 
925f7627e25SThomas Gleixner 			this_cpu = cpu_devs[i];
92610a434fcSYinghai Lu 			c->x86_vendor = this_cpu->c_x86_vendor;
927f7627e25SThomas Gleixner 			return;
928f7627e25SThomas Gleixner 		}
929f7627e25SThomas Gleixner 	}
93010a434fcSYinghai Lu 
9311b74dde7SChen Yucong 	pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \
932a9c56953SMinchan Kim 		    "CPU: Your system may be unstable.\n", v);
93310a434fcSYinghai Lu 
934f7627e25SThomas Gleixner 	c->x86_vendor = X86_VENDOR_UNKNOWN;
935f7627e25SThomas Gleixner 	this_cpu = &default_cpu;
936f7627e25SThomas Gleixner }
937f7627e25SThomas Gleixner 
938148f9bb8SPaul Gortmaker void cpu_detect(struct cpuinfo_x86 *c)
939f7627e25SThomas Gleixner {
940f7627e25SThomas Gleixner 	/* Get vendor name */
9414a148513SHarvey Harrison 	cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
9424a148513SHarvey Harrison 	      (unsigned int *)&c->x86_vendor_id[0],
9434a148513SHarvey Harrison 	      (unsigned int *)&c->x86_vendor_id[8],
9444a148513SHarvey Harrison 	      (unsigned int *)&c->x86_vendor_id[4]);
945f7627e25SThomas Gleixner 
946f7627e25SThomas Gleixner 	c->x86 = 4;
9479d31d35bSYinghai Lu 	/* Intel-defined flags: level 0x00000001 */
948f7627e25SThomas Gleixner 	if (c->cpuid_level >= 0x00000001) {
949f7627e25SThomas Gleixner 		u32 junk, tfms, cap0, misc;
9500f3fa48aSIngo Molnar 
951f7627e25SThomas Gleixner 		cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
95299f925ceSBorislav Petkov 		c->x86		= x86_family(tfms);
95399f925ceSBorislav Petkov 		c->x86_model	= x86_model(tfms);
954b399151cSJia Zhang 		c->x86_stepping	= x86_stepping(tfms);
9550f3fa48aSIngo Molnar 
956d4387bd3SHuang, Ying 		if (cap0 & (1<<19)) {
957d4387bd3SHuang, Ying 			c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
9589d31d35bSYinghai Lu 			c->x86_cache_alignment = c->x86_clflush_size;
959d4387bd3SHuang, Ying 		}
960f7627e25SThomas Gleixner 	}
961f7627e25SThomas Gleixner }
9623da99c97SYinghai Lu 
9638bf1ebcaSAndy Lutomirski static void apply_forced_caps(struct cpuinfo_x86 *c)
9648bf1ebcaSAndy Lutomirski {
9658bf1ebcaSAndy Lutomirski 	int i;
9668bf1ebcaSAndy Lutomirski 
9676cbd2171SThomas Gleixner 	for (i = 0; i < NCAPINTS + NBUGINTS; i++) {
9688bf1ebcaSAndy Lutomirski 		c->x86_capability[i] &= ~cpu_caps_cleared[i];
9698bf1ebcaSAndy Lutomirski 		c->x86_capability[i] |= cpu_caps_set[i];
9708bf1ebcaSAndy Lutomirski 	}
9718bf1ebcaSAndy Lutomirski }
9728bf1ebcaSAndy Lutomirski 
9737fcae111SDavid Woodhouse static void init_speculation_control(struct cpuinfo_x86 *c)
9747fcae111SDavid Woodhouse {
9757fcae111SDavid Woodhouse 	/*
9767fcae111SDavid Woodhouse 	 * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support,
9777fcae111SDavid Woodhouse 	 * and they also have a different bit for STIBP support. Also,
9787fcae111SDavid Woodhouse 	 * a hypervisor might have set the individual AMD bits even on
9797fcae111SDavid Woodhouse 	 * Intel CPUs, for finer-grained selection of what's available.
9807fcae111SDavid Woodhouse 	 */
9817fcae111SDavid Woodhouse 	if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
9827fcae111SDavid Woodhouse 		set_cpu_cap(c, X86_FEATURE_IBRS);
9837fcae111SDavid Woodhouse 		set_cpu_cap(c, X86_FEATURE_IBPB);
9847eb8956aSThomas Gleixner 		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
9857fcae111SDavid Woodhouse 	}
986e7c587daSBorislav Petkov 
9877fcae111SDavid Woodhouse 	if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
9887fcae111SDavid Woodhouse 		set_cpu_cap(c, X86_FEATURE_STIBP);
989e7c587daSBorislav Petkov 
990bc226f07STom Lendacky 	if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
991bc226f07STom Lendacky 	    cpu_has(c, X86_FEATURE_VIRT_SSBD))
99252817587SThomas Gleixner 		set_cpu_cap(c, X86_FEATURE_SSBD);
99352817587SThomas Gleixner 
9947eb8956aSThomas Gleixner 	if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
995e7c587daSBorislav Petkov 		set_cpu_cap(c, X86_FEATURE_IBRS);
9967eb8956aSThomas Gleixner 		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
9977eb8956aSThomas Gleixner 	}
998e7c587daSBorislav Petkov 
999e7c587daSBorislav Petkov 	if (cpu_has(c, X86_FEATURE_AMD_IBPB))
1000e7c587daSBorislav Petkov 		set_cpu_cap(c, X86_FEATURE_IBPB);
1001e7c587daSBorislav Petkov 
10027eb8956aSThomas Gleixner 	if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
1003e7c587daSBorislav Petkov 		set_cpu_cap(c, X86_FEATURE_STIBP);
10047eb8956aSThomas Gleixner 		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
10057eb8956aSThomas Gleixner 	}
10066ac2f49eSKonrad Rzeszutek Wilk 
10076ac2f49eSKonrad Rzeszutek Wilk 	if (cpu_has(c, X86_FEATURE_AMD_SSBD)) {
10086ac2f49eSKonrad Rzeszutek Wilk 		set_cpu_cap(c, X86_FEATURE_SSBD);
10096ac2f49eSKonrad Rzeszutek Wilk 		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
10106ac2f49eSKonrad Rzeszutek Wilk 		clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD);
10116ac2f49eSKonrad Rzeszutek Wilk 	}
10127fcae111SDavid Woodhouse }
10137fcae111SDavid Woodhouse 
1014148f9bb8SPaul Gortmaker void get_cpu_cap(struct cpuinfo_x86 *c)
1015093af8d7SYinghai Lu {
101639c06df4SBorislav Petkov 	u32 eax, ebx, ecx, edx;
1017093af8d7SYinghai Lu 
1018093af8d7SYinghai Lu 	/* Intel-defined flags: level 0x00000001 */
1019093af8d7SYinghai Lu 	if (c->cpuid_level >= 0x00000001) {
102039c06df4SBorislav Petkov 		cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
10210f3fa48aSIngo Molnar 
102239c06df4SBorislav Petkov 		c->x86_capability[CPUID_1_ECX] = ecx;
102339c06df4SBorislav Petkov 		c->x86_capability[CPUID_1_EDX] = edx;
1024093af8d7SYinghai Lu 	}
1025093af8d7SYinghai Lu 
10263df8d920SAndy Lutomirski 	/* Thermal and Power Management Leaf: level 0x00000006 (eax) */
10273df8d920SAndy Lutomirski 	if (c->cpuid_level >= 0x00000006)
10283df8d920SAndy Lutomirski 		c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
10293df8d920SAndy Lutomirski 
1030bdc802dcSH. Peter Anvin 	/* Additional Intel-defined flags: level 0x00000007 */
1031bdc802dcSH. Peter Anvin 	if (c->cpuid_level >= 0x00000007) {
1032bdc802dcSH. Peter Anvin 		cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
103339c06df4SBorislav Petkov 		c->x86_capability[CPUID_7_0_EBX] = ebx;
1034dfb4a70fSDave Hansen 		c->x86_capability[CPUID_7_ECX] = ecx;
103595ca0ee8SDavid Woodhouse 		c->x86_capability[CPUID_7_EDX] = edx;
1036b302e4b1SFenghua Yu 
1037b302e4b1SFenghua Yu 		/* Check valid sub-leaf index before accessing it */
1038b302e4b1SFenghua Yu 		if (eax >= 1) {
1039b302e4b1SFenghua Yu 			cpuid_count(0x00000007, 1, &eax, &ebx, &ecx, &edx);
1040b302e4b1SFenghua Yu 			c->x86_capability[CPUID_7_1_EAX] = eax;
1041b302e4b1SFenghua Yu 		}
1042bdc802dcSH. Peter Anvin 	}
1043bdc802dcSH. Peter Anvin 
10446229ad27SFenghua Yu 	/* Extended state features: level 0x0000000d */
10456229ad27SFenghua Yu 	if (c->cpuid_level >= 0x0000000d) {
10466229ad27SFenghua Yu 		cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx);
10476229ad27SFenghua Yu 
104839c06df4SBorislav Petkov 		c->x86_capability[CPUID_D_1_EAX] = eax;
10496229ad27SFenghua Yu 	}
10506229ad27SFenghua Yu 
1051093af8d7SYinghai Lu 	/* AMD-defined flags: level 0x80000001 */
105239c06df4SBorislav Petkov 	eax = cpuid_eax(0x80000000);
105339c06df4SBorislav Petkov 	c->extended_cpuid_level = eax;
10540f3fa48aSIngo Molnar 
105539c06df4SBorislav Petkov 	if ((eax & 0xffff0000) == 0x80000000) {
105639c06df4SBorislav Petkov 		if (eax >= 0x80000001) {
105739c06df4SBorislav Petkov 			cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
105839c06df4SBorislav Petkov 
105939c06df4SBorislav Petkov 			c->x86_capability[CPUID_8000_0001_ECX] = ecx;
106039c06df4SBorislav Petkov 			c->x86_capability[CPUID_8000_0001_EDX] = edx;
1061093af8d7SYinghai Lu 		}
1062093af8d7SYinghai Lu 	}
1063093af8d7SYinghai Lu 
106471faad43SYazen Ghannam 	if (c->extended_cpuid_level >= 0x80000007) {
106571faad43SYazen Ghannam 		cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
106671faad43SYazen Ghannam 
106771faad43SYazen Ghannam 		c->x86_capability[CPUID_8000_0007_EBX] = ebx;
106871faad43SYazen Ghannam 		c->x86_power = edx;
106971faad43SYazen Ghannam 	}
107071faad43SYazen Ghannam 
1071c65732e4SThomas Gleixner 	if (c->extended_cpuid_level >= 0x80000008) {
1072c65732e4SThomas Gleixner 		cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
1073c65732e4SThomas Gleixner 		c->x86_capability[CPUID_8000_0008_EBX] = ebx;
1074c65732e4SThomas Gleixner 	}
1075c65732e4SThomas Gleixner 
10762ccd71f1SBorislav Petkov 	if (c->extended_cpuid_level >= 0x8000000a)
107739c06df4SBorislav Petkov 		c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
10782ccd71f1SBorislav Petkov 
1079fb35d30fSSean Christopherson 	if (c->extended_cpuid_level >= 0x8000001f)
1080fb35d30fSSean Christopherson 		c->x86_capability[CPUID_8000_001F_EAX] = cpuid_eax(0x8000001f);
1081fb35d30fSSean Christopherson 
10828415a748SKim Phillips 	if (c->extended_cpuid_level >= 0x80000021)
10838415a748SKim Phillips 		c->x86_capability[CPUID_8000_0021_EAX] = cpuid_eax(0x80000021);
10848415a748SKim Phillips 
10851dedefd1SJacob Pan 	init_scattered_cpuid_features(c);
10867fcae111SDavid Woodhouse 	init_speculation_control(c);
108760d34501SAndy Lutomirski 
108860d34501SAndy Lutomirski 	/*
108960d34501SAndy Lutomirski 	 * Clear/Set all flags overridden by options, after probe.
109060d34501SAndy Lutomirski 	 * This needs to happen each time we re-probe, which may happen
109160d34501SAndy Lutomirski 	 * several times during CPU initialization.
109260d34501SAndy Lutomirski 	 */
109360d34501SAndy Lutomirski 	apply_forced_caps(c);
1094093af8d7SYinghai Lu }
1095093af8d7SYinghai Lu 
1096405c018aSM. Vefa Bicakci void get_cpu_address_sizes(struct cpuinfo_x86 *c)
1097d94a155cSKirill A. Shutemov {
1098d94a155cSKirill A. Shutemov 	u32 eax, ebx, ecx, edx;
1099fbf6449fSAdam Dunlap 	bool vp_bits_from_cpuid = true;
1100d94a155cSKirill A. Shutemov 
1101fbf6449fSAdam Dunlap 	if (!cpu_has(c, X86_FEATURE_CPUID) ||
1102fbf6449fSAdam Dunlap 	    (c->extended_cpuid_level < 0x80000008))
1103fbf6449fSAdam Dunlap 		vp_bits_from_cpuid = false;
1104fbf6449fSAdam Dunlap 
1105fbf6449fSAdam Dunlap 	if (vp_bits_from_cpuid) {
1106d94a155cSKirill A. Shutemov 		cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
1107d94a155cSKirill A. Shutemov 
1108d94a155cSKirill A. Shutemov 		c->x86_virt_bits = (eax >> 8) & 0xff;
1109d94a155cSKirill A. Shutemov 		c->x86_phys_bits = eax & 0xff;
1110fbf6449fSAdam Dunlap 	} else {
1111fbf6449fSAdam Dunlap 		if (IS_ENABLED(CONFIG_X86_64)) {
1112fbf6449fSAdam Dunlap 			c->x86_clflush_size = 64;
1113d94a155cSKirill A. Shutemov 			c->x86_phys_bits = 36;
1114fbf6449fSAdam Dunlap 			c->x86_virt_bits = 48;
1115fbf6449fSAdam Dunlap 		} else {
1116fbf6449fSAdam Dunlap 			c->x86_clflush_size = 32;
1117fbf6449fSAdam Dunlap 			c->x86_virt_bits = 32;
1118fbf6449fSAdam Dunlap 			c->x86_phys_bits = 32;
1119fbf6449fSAdam Dunlap 
1120fbf6449fSAdam Dunlap 			if (cpu_has(c, X86_FEATURE_PAE) ||
1121fbf6449fSAdam Dunlap 			    cpu_has(c, X86_FEATURE_PSE36))
1122fbf6449fSAdam Dunlap 				c->x86_phys_bits = 36;
1123fbf6449fSAdam Dunlap 		}
1124fbf6449fSAdam Dunlap 	}
1125cc51e542SAndi Kleen 	c->x86_cache_bits = c->x86_phys_bits;
11263e325526SDave Hansen 	c->x86_cache_alignment = c->x86_clflush_size;
1127d94a155cSKirill A. Shutemov }
1128d94a155cSKirill A. Shutemov 
1129148f9bb8SPaul Gortmaker static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
1130aef93c8bSYinghai Lu {
1131aef93c8bSYinghai Lu #ifdef CONFIG_X86_32
1132aef93c8bSYinghai Lu 	int i;
1133aef93c8bSYinghai Lu 
1134aef93c8bSYinghai Lu 	/*
1135aef93c8bSYinghai Lu 	 * First of all, decide if this is a 486 or higher
1136aef93c8bSYinghai Lu 	 * It's a 486 if we can modify the AC flag
1137aef93c8bSYinghai Lu 	 */
1138aef93c8bSYinghai Lu 	if (flag_is_changeable_p(X86_EFLAGS_AC))
1139aef93c8bSYinghai Lu 		c->x86 = 4;
1140aef93c8bSYinghai Lu 	else
1141aef93c8bSYinghai Lu 		c->x86 = 3;
1142aef93c8bSYinghai Lu 
1143aef93c8bSYinghai Lu 	for (i = 0; i < X86_VENDOR_NUM; i++)
1144aef93c8bSYinghai Lu 		if (cpu_devs[i] && cpu_devs[i]->c_identify) {
1145aef93c8bSYinghai Lu 			c->x86_vendor_id[0] = 0;
1146aef93c8bSYinghai Lu 			cpu_devs[i]->c_identify(c);
1147aef93c8bSYinghai Lu 			if (c->x86_vendor_id[0]) {
1148aef93c8bSYinghai Lu 				get_cpu_vendor(c);
1149aef93c8bSYinghai Lu 				break;
1150aef93c8bSYinghai Lu 			}
1151aef93c8bSYinghai Lu 		}
1152aef93c8bSYinghai Lu #endif
1153093af8d7SYinghai Lu }
1154f7627e25SThomas Gleixner 
115536ad3513SThomas Gleixner #define NO_SPECULATION		BIT(0)
115636ad3513SThomas Gleixner #define NO_MELTDOWN		BIT(1)
115736ad3513SThomas Gleixner #define NO_SSB			BIT(2)
115836ad3513SThomas Gleixner #define NO_L1TF			BIT(3)
1159ed5194c2SAndi Kleen #define NO_MDS			BIT(4)
1160e261f209SThomas Gleixner #define MSBDS_ONLY		BIT(5)
1161f36cf386SThomas Gleixner #define NO_SWAPGS		BIT(6)
1162db4d30fbSVineela Tummalapalli #define NO_ITLB_MULTIHIT	BIT(7)
11631e41a766STony W Wang-oc #define NO_SPECTRE_V2		BIT(8)
11647df54884SPawan Gupta #define NO_MMIO			BIT(9)
11657df54884SPawan Gupta #define NO_EIBRS_PBRSB		BIT(10)
116636ad3513SThomas Gleixner 
1167f6d502fcSThomas Gleixner #define VULNWL(vendor, family, model, whitelist)	\
1168f6d502fcSThomas Gleixner 	X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist)
116936ad3513SThomas Gleixner 
117036ad3513SThomas Gleixner #define VULNWL_INTEL(model, whitelist)		\
117136ad3513SThomas Gleixner 	VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist)
117236ad3513SThomas Gleixner 
117336ad3513SThomas Gleixner #define VULNWL_AMD(family, whitelist)		\
117436ad3513SThomas Gleixner 	VULNWL(AMD, family, X86_MODEL_ANY, whitelist)
117536ad3513SThomas Gleixner 
117636ad3513SThomas Gleixner #define VULNWL_HYGON(family, whitelist)		\
117736ad3513SThomas Gleixner 	VULNWL(HYGON, family, X86_MODEL_ANY, whitelist)
117836ad3513SThomas Gleixner 
117936ad3513SThomas Gleixner static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
118036ad3513SThomas Gleixner 	VULNWL(ANY,	4, X86_MODEL_ANY,	NO_SPECULATION),
118136ad3513SThomas Gleixner 	VULNWL(CENTAUR,	5, X86_MODEL_ANY,	NO_SPECULATION),
118236ad3513SThomas Gleixner 	VULNWL(INTEL,	5, X86_MODEL_ANY,	NO_SPECULATION),
118336ad3513SThomas Gleixner 	VULNWL(NSC,	5, X86_MODEL_ANY,	NO_SPECULATION),
1184639475d4SMarcos Del Sol Vives 	VULNWL(VORTEX,	5, X86_MODEL_ANY,	NO_SPECULATION),
1185639475d4SMarcos Del Sol Vives 	VULNWL(VORTEX,	6, X86_MODEL_ANY,	NO_SPECULATION),
118636ad3513SThomas Gleixner 
1187ed5194c2SAndi Kleen 	/* Intel Family 6 */
11887df54884SPawan Gupta 	VULNWL_INTEL(TIGERLAKE,			NO_MMIO),
11897df54884SPawan Gupta 	VULNWL_INTEL(TIGERLAKE_L,		NO_MMIO),
11907df54884SPawan Gupta 	VULNWL_INTEL(ALDERLAKE,			NO_MMIO),
11917df54884SPawan Gupta 	VULNWL_INTEL(ALDERLAKE_L,		NO_MMIO),
11927df54884SPawan Gupta 
1193db4d30fbSVineela Tummalapalli 	VULNWL_INTEL(ATOM_SALTWELL,		NO_SPECULATION | NO_ITLB_MULTIHIT),
1194db4d30fbSVineela Tummalapalli 	VULNWL_INTEL(ATOM_SALTWELL_TABLET,	NO_SPECULATION | NO_ITLB_MULTIHIT),
1195db4d30fbSVineela Tummalapalli 	VULNWL_INTEL(ATOM_SALTWELL_MID,		NO_SPECULATION | NO_ITLB_MULTIHIT),
1196db4d30fbSVineela Tummalapalli 	VULNWL_INTEL(ATOM_BONNELL,		NO_SPECULATION | NO_ITLB_MULTIHIT),
1197db4d30fbSVineela Tummalapalli 	VULNWL_INTEL(ATOM_BONNELL_MID,		NO_SPECULATION | NO_ITLB_MULTIHIT),
119836ad3513SThomas Gleixner 
1199db4d30fbSVineela Tummalapalli 	VULNWL_INTEL(ATOM_SILVERMONT,		NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1200db4d30fbSVineela Tummalapalli 	VULNWL_INTEL(ATOM_SILVERMONT_D,		NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1201db4d30fbSVineela Tummalapalli 	VULNWL_INTEL(ATOM_SILVERMONT_MID,	NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1202db4d30fbSVineela Tummalapalli 	VULNWL_INTEL(ATOM_AIRMONT,		NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1203db4d30fbSVineela Tummalapalli 	VULNWL_INTEL(XEON_PHI_KNL,		NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1204db4d30fbSVineela Tummalapalli 	VULNWL_INTEL(XEON_PHI_KNM,		NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
120536ad3513SThomas Gleixner 
120636ad3513SThomas Gleixner 	VULNWL_INTEL(CORE_YONAH,		NO_SSB),
120736ad3513SThomas Gleixner 
1208db4d30fbSVineela Tummalapalli 	VULNWL_INTEL(ATOM_AIRMONT_MID,		NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
1209db4d30fbSVineela Tummalapalli 	VULNWL_INTEL(ATOM_AIRMONT_NP,		NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
121036ad3513SThomas Gleixner 
12117df54884SPawan Gupta 	VULNWL_INTEL(ATOM_GOLDMONT,		NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
12127df54884SPawan Gupta 	VULNWL_INTEL(ATOM_GOLDMONT_D,		NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
12137df54884SPawan Gupta 	VULNWL_INTEL(ATOM_GOLDMONT_PLUS,	NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
1214f36cf386SThomas Gleixner 
1215f36cf386SThomas Gleixner 	/*
1216f36cf386SThomas Gleixner 	 * Technically, swapgs isn't serializing on AMD (despite it previously
1217f36cf386SThomas Gleixner 	 * being documented as such in the APM).  But according to AMD, %gs is
1218f36cf386SThomas Gleixner 	 * updated non-speculatively, and the issuing of %gs-relative memory
1219f36cf386SThomas Gleixner 	 * operands will be blocked until the %gs update completes, which is
1220f36cf386SThomas Gleixner 	 * good enough for our purposes.
1221f36cf386SThomas Gleixner 	 */
1222ed5194c2SAndi Kleen 
12232b129932SDaniel Sneddon 	VULNWL_INTEL(ATOM_TREMONT,		NO_EIBRS_PBRSB),
12242b129932SDaniel Sneddon 	VULNWL_INTEL(ATOM_TREMONT_L,		NO_EIBRS_PBRSB),
12252b129932SDaniel Sneddon 	VULNWL_INTEL(ATOM_TREMONT_D,		NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
1226cad14885SPawan Gupta 
1227ed5194c2SAndi Kleen 	/* AMD Family 0xf - 0x12 */
12287df54884SPawan Gupta 	VULNWL_AMD(0x0f,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
12297df54884SPawan Gupta 	VULNWL_AMD(0x10,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
12307df54884SPawan Gupta 	VULNWL_AMD(0x11,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
12317df54884SPawan Gupta 	VULNWL_AMD(0x12,	NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
123236ad3513SThomas Gleixner 
123336ad3513SThomas Gleixner 	/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
1234e7862edaSKim Phillips 	VULNWL_AMD(X86_FAMILY_ANY,	NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
1235e7862edaSKim Phillips 	VULNWL_HYGON(X86_FAMILY_ANY,	NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
12361e41a766STony W Wang-oc 
12371e41a766STony W Wang-oc 	/* Zhaoxin Family 7 */
12387df54884SPawan Gupta 	VULNWL(CENTAUR,	7, X86_MODEL_ANY,	NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
12397df54884SPawan Gupta 	VULNWL(ZHAOXIN,	7, X86_MODEL_ANY,	NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
1240fec9434aSDavid Woodhouse 	{}
1241fec9434aSDavid Woodhouse };
1242fec9434aSDavid Woodhouse 
12436b80b59bSAlexandre Chartre #define VULNBL(vendor, family, model, blacklist)	\
12446b80b59bSAlexandre Chartre 	X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, blacklist)
12456b80b59bSAlexandre Chartre 
12467e5b3c26SMark Gross #define VULNBL_INTEL_STEPPINGS(model, steppings, issues)		   \
12477e5b3c26SMark Gross 	X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6,		   \
12487e5b3c26SMark Gross 					    INTEL_FAM6_##model, steppings, \
12497e5b3c26SMark Gross 					    X86_FEATURE_ANY, issues)
12507e5b3c26SMark Gross 
12516b80b59bSAlexandre Chartre #define VULNBL_AMD(family, blacklist)		\
12526b80b59bSAlexandre Chartre 	VULNBL(AMD, family, X86_MODEL_ANY, blacklist)
12536b80b59bSAlexandre Chartre 
12546b80b59bSAlexandre Chartre #define VULNBL_HYGON(family, blacklist)		\
12556b80b59bSAlexandre Chartre 	VULNBL(HYGON, family, X86_MODEL_ANY, blacklist)
12566b80b59bSAlexandre Chartre 
12577e5b3c26SMark Gross #define SRBDS		BIT(0)
125851802186SPawan Gupta /* CPU is affected by X86_BUG_MMIO_STALE_DATA */
125951802186SPawan Gupta #define MMIO		BIT(1)
1260a992b8a4SPawan Gupta /* CPU is affected by Shared Buffers Data Sampling (SBDS), a variant of X86_BUG_MMIO_STALE_DATA */
1261a992b8a4SPawan Gupta #define MMIO_SBDS	BIT(2)
12626b80b59bSAlexandre Chartre /* CPU is affected by RETbleed, speculating where you would not expect it */
12636b80b59bSAlexandre Chartre #define RETBLEED	BIT(3)
1264be8de49bSTom Lendacky /* CPU is affected by SMT (cross-thread) return predictions */
1265be8de49bSTom Lendacky #define SMT_RSB		BIT(4)
1266fb3bd914SBorislav Petkov (AMD) /* CPU is affected by SRSO */
1267fb3bd914SBorislav Petkov (AMD) #define SRSO		BIT(5)
12688974eb58SDaniel Sneddon /* CPU is affected by GDS */
126964094e7eSLinus Torvalds #define GDS		BIT(6)
12707e5b3c26SMark Gross 
12717e5b3c26SMark Gross static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
12727e5b3c26SMark Gross 	VULNBL_INTEL_STEPPINGS(IVYBRIDGE,	X86_STEPPING_ANY,		SRBDS),
12737e5b3c26SMark Gross 	VULNBL_INTEL_STEPPINGS(HASWELL,		X86_STEPPING_ANY,		SRBDS),
12747e5b3c26SMark Gross 	VULNBL_INTEL_STEPPINGS(HASWELL_L,	X86_STEPPING_ANY,		SRBDS),
12757e5b3c26SMark Gross 	VULNBL_INTEL_STEPPINGS(HASWELL_G,	X86_STEPPING_ANY,		SRBDS),
12767a05bc95SPeter Zijlstra 	VULNBL_INTEL_STEPPINGS(HASWELL_X,	X86_STEPPING_ANY,		MMIO),
12777a05bc95SPeter Zijlstra 	VULNBL_INTEL_STEPPINGS(BROADWELL_D,	X86_STEPPING_ANY,		MMIO),
12787e5b3c26SMark Gross 	VULNBL_INTEL_STEPPINGS(BROADWELL_G,	X86_STEPPING_ANY,		SRBDS),
127951802186SPawan Gupta 	VULNBL_INTEL_STEPPINGS(BROADWELL_X,	X86_STEPPING_ANY,		MMIO),
12807e5b3c26SMark Gross 	VULNBL_INTEL_STEPPINGS(BROADWELL,	X86_STEPPING_ANY,		SRBDS),
12818974eb58SDaniel Sneddon 	VULNBL_INTEL_STEPPINGS(SKYLAKE_X,	X86_STEPPING_ANY,		MMIO | RETBLEED | GDS),
1282c9f4c45cSDave Hansen 	VULNBL_INTEL_STEPPINGS(SKYLAKE_L,	X86_STEPPING_ANY,		MMIO | RETBLEED | GDS | SRBDS),
1283c9f4c45cSDave Hansen 	VULNBL_INTEL_STEPPINGS(SKYLAKE,		X86_STEPPING_ANY,		MMIO | RETBLEED | GDS | SRBDS),
1284c9f4c45cSDave Hansen 	VULNBL_INTEL_STEPPINGS(KABYLAKE_L,	X86_STEPPING_ANY,		MMIO | RETBLEED | GDS | SRBDS),
1285c9f4c45cSDave Hansen 	VULNBL_INTEL_STEPPINGS(KABYLAKE,	X86_STEPPING_ANY,		MMIO | RETBLEED | GDS | SRBDS),
1286f54d4537SPawan Gupta 	VULNBL_INTEL_STEPPINGS(CANNONLAKE_L,	X86_STEPPING_ANY,		RETBLEED),
12878974eb58SDaniel Sneddon 	VULNBL_INTEL_STEPPINGS(ICELAKE_L,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED | GDS),
12888974eb58SDaniel Sneddon 	VULNBL_INTEL_STEPPINGS(ICELAKE_D,	X86_STEPPING_ANY,		MMIO | GDS),
12898974eb58SDaniel Sneddon 	VULNBL_INTEL_STEPPINGS(ICELAKE_X,	X86_STEPPING_ANY,		MMIO | GDS),
12908974eb58SDaniel Sneddon 	VULNBL_INTEL_STEPPINGS(COMETLAKE,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED | GDS),
12916ad0ad2bSPeter Zijlstra 	VULNBL_INTEL_STEPPINGS(COMETLAKE_L,	X86_STEPPINGS(0x0, 0x0),	MMIO | RETBLEED),
12928974eb58SDaniel Sneddon 	VULNBL_INTEL_STEPPINGS(COMETLAKE_L,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED | GDS),
12938974eb58SDaniel Sneddon 	VULNBL_INTEL_STEPPINGS(TIGERLAKE_L,	X86_STEPPING_ANY,		GDS),
12948974eb58SDaniel Sneddon 	VULNBL_INTEL_STEPPINGS(TIGERLAKE,	X86_STEPPING_ANY,		GDS),
12957a05bc95SPeter Zijlstra 	VULNBL_INTEL_STEPPINGS(LAKEFIELD,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS | RETBLEED),
12968974eb58SDaniel Sneddon 	VULNBL_INTEL_STEPPINGS(ROCKETLAKE,	X86_STEPPING_ANY,		MMIO | RETBLEED | GDS),
12977a05bc95SPeter Zijlstra 	VULNBL_INTEL_STEPPINGS(ATOM_TREMONT,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS),
129851802186SPawan Gupta 	VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D,	X86_STEPPING_ANY,		MMIO),
12997a05bc95SPeter Zijlstra 	VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L,	X86_STEPPING_ANY,		MMIO | MMIO_SBDS),
13006b80b59bSAlexandre Chartre 
13016b80b59bSAlexandre Chartre 	VULNBL_AMD(0x15, RETBLEED),
13026b80b59bSAlexandre Chartre 	VULNBL_AMD(0x16, RETBLEED),
1303fb3bd914SBorislav Petkov (AMD) 	VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO),
1304a5ef7d68SPu Wen 	VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO),
1305fb3bd914SBorislav Petkov (AMD) 	VULNBL_AMD(0x19, SRSO),
13067e5b3c26SMark Gross 	{}
13077e5b3c26SMark Gross };
13087e5b3c26SMark Gross 
130993920f61SMark Gross static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which)
131036ad3513SThomas Gleixner {
131193920f61SMark Gross 	const struct x86_cpu_id *m = x86_match_cpu(table);
1312fec9434aSDavid Woodhouse 
131336ad3513SThomas Gleixner 	return m && !!(m->driver_data & which);
131436ad3513SThomas Gleixner }
131517dbca11SAndi Kleen 
1316286836a7SPawan Gupta u64 x86_read_arch_cap_msr(void)
1317fec9434aSDavid Woodhouse {
1318fec9434aSDavid Woodhouse 	u64 ia32_cap = 0;
1319fec9434aSDavid Woodhouse 
1320286836a7SPawan Gupta 	if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
1321286836a7SPawan Gupta 		rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
1322286836a7SPawan Gupta 
1323286836a7SPawan Gupta 	return ia32_cap;
1324286836a7SPawan Gupta }
1325286836a7SPawan Gupta 
132651802186SPawan Gupta static bool arch_cap_mmio_immune(u64 ia32_cap)
132751802186SPawan Gupta {
132851802186SPawan Gupta 	return (ia32_cap & ARCH_CAP_FBSDP_NO &&
132951802186SPawan Gupta 		ia32_cap & ARCH_CAP_PSDP_NO &&
133051802186SPawan Gupta 		ia32_cap & ARCH_CAP_SBDR_SSDP_NO);
133151802186SPawan Gupta }
133251802186SPawan Gupta 
1333286836a7SPawan Gupta static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
1334286836a7SPawan Gupta {
1335286836a7SPawan Gupta 	u64 ia32_cap = x86_read_arch_cap_msr();
1336286836a7SPawan Gupta 
1337db4d30fbSVineela Tummalapalli 	/* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
133893920f61SMark Gross 	if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
133993920f61SMark Gross 	    !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
1340db4d30fbSVineela Tummalapalli 		setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
1341db4d30fbSVineela Tummalapalli 
134293920f61SMark Gross 	if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION))
13438ecc4979SDominik Brodowski 		return;
13448ecc4979SDominik Brodowski 
13458ecc4979SDominik Brodowski 	setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
13461e41a766STony W Wang-oc 
134793920f61SMark Gross 	if (!cpu_matches(cpu_vuln_whitelist, NO_SPECTRE_V2))
13488ecc4979SDominik Brodowski 		setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
13498ecc4979SDominik Brodowski 
135093920f61SMark Gross 	if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
135193920f61SMark Gross 	    !(ia32_cap & ARCH_CAP_SSB_NO) &&
135224809860SKonrad Rzeszutek Wilk 	   !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
1353c456442cSKonrad Rzeszutek Wilk 		setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
1354c456442cSKonrad Rzeszutek Wilk 
1355e7862edaSKim Phillips 	/*
1356e7862edaSKim Phillips 	 * AMD's AutoIBRS is equivalent to Intel's eIBRS - use the Intel feature
1357e7862edaSKim Phillips 	 * flag and protect from vendor-specific bugs via the whitelist.
1358*acaa4b5cSKim Phillips 	 *
1359*acaa4b5cSKim Phillips 	 * Don't use AutoIBRS when SNP is enabled because it degrades host
1360*acaa4b5cSKim Phillips 	 * userspace indirect branch performance.
1361e7862edaSKim Phillips 	 */
1362*acaa4b5cSKim Phillips 	if ((ia32_cap & ARCH_CAP_IBRS_ALL) ||
1363*acaa4b5cSKim Phillips 	    (cpu_has(c, X86_FEATURE_AUTOIBRS) &&
1364*acaa4b5cSKim Phillips 	     !cpu_feature_enabled(X86_FEATURE_SEV_SNP))) {
1365706d5168SSai Praneeth 		setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
1366e7862edaSKim Phillips 		if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
1367e7862edaSKim Phillips 		    !(ia32_cap & ARCH_CAP_PBRSB_NO))
1368e7862edaSKim Phillips 			setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
1369e7862edaSKim Phillips 	}
1370706d5168SSai Praneeth 
137193920f61SMark Gross 	if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
137293920f61SMark Gross 	    !(ia32_cap & ARCH_CAP_MDS_NO)) {
1373ed5194c2SAndi Kleen 		setup_force_cpu_bug(X86_BUG_MDS);
137493920f61SMark Gross 		if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY))
1375e261f209SThomas Gleixner 			setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
1376e261f209SThomas Gleixner 	}
1377ed5194c2SAndi Kleen 
137893920f61SMark Gross 	if (!cpu_matches(cpu_vuln_whitelist, NO_SWAPGS))
1379f36cf386SThomas Gleixner 		setup_force_cpu_bug(X86_BUG_SWAPGS);
1380f36cf386SThomas Gleixner 
13811b42f017SPawan Gupta 	/*
13821b42f017SPawan Gupta 	 * When the CPU is not mitigated for TAA (TAA_NO=0) set TAA bug when:
13831b42f017SPawan Gupta 	 *	- TSX is supported or
13841b42f017SPawan Gupta 	 *	- TSX_CTRL is present
13851b42f017SPawan Gupta 	 *
13861b42f017SPawan Gupta 	 * TSX_CTRL check is needed for cases when TSX could be disabled before
13871b42f017SPawan Gupta 	 * the kernel boot e.g. kexec.
13881b42f017SPawan Gupta 	 * TSX_CTRL check alone is not sufficient for cases when the microcode
13891b42f017SPawan Gupta 	 * update is not present or running as guest that don't get TSX_CTRL.
13901b42f017SPawan Gupta 	 */
13911b42f017SPawan Gupta 	if (!(ia32_cap & ARCH_CAP_TAA_NO) &&
13921b42f017SPawan Gupta 	    (cpu_has(c, X86_FEATURE_RTM) ||
13931b42f017SPawan Gupta 	     (ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
13941b42f017SPawan Gupta 		setup_force_cpu_bug(X86_BUG_TAA);
13951b42f017SPawan Gupta 
13967e5b3c26SMark Gross 	/*
13977e5b3c26SMark Gross 	 * SRBDS affects CPUs which support RDRAND or RDSEED and are listed
13987e5b3c26SMark Gross 	 * in the vulnerability blacklist.
1399a992b8a4SPawan Gupta 	 *
1400a992b8a4SPawan Gupta 	 * Some of the implications and mitigation of Shared Buffers Data
1401a992b8a4SPawan Gupta 	 * Sampling (SBDS) are similar to SRBDS. Give SBDS same treatment as
1402a992b8a4SPawan Gupta 	 * SRBDS.
14037e5b3c26SMark Gross 	 */
14047e5b3c26SMark Gross 	if ((cpu_has(c, X86_FEATURE_RDRAND) ||
14057e5b3c26SMark Gross 	     cpu_has(c, X86_FEATURE_RDSEED)) &&
1406a992b8a4SPawan Gupta 	    cpu_matches(cpu_vuln_blacklist, SRBDS | MMIO_SBDS))
14077e5b3c26SMark Gross 		    setup_force_cpu_bug(X86_BUG_SRBDS);
14087e5b3c26SMark Gross 
140951802186SPawan Gupta 	/*
141051802186SPawan Gupta 	 * Processor MMIO Stale Data bug enumeration
141151802186SPawan Gupta 	 *
141251802186SPawan Gupta 	 * Affected CPU list is generally enough to enumerate the vulnerability,
141351802186SPawan Gupta 	 * but for virtualization case check for ARCH_CAP MSR bits also, VMM may
141451802186SPawan Gupta 	 * not want the guest to enumerate the bug.
14157df54884SPawan Gupta 	 *
14167df54884SPawan Gupta 	 * Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
14177df54884SPawan Gupta 	 * nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
141851802186SPawan Gupta 	 */
14197df54884SPawan Gupta 	if (!arch_cap_mmio_immune(ia32_cap)) {
14207df54884SPawan Gupta 		if (cpu_matches(cpu_vuln_blacklist, MMIO))
142151802186SPawan Gupta 			setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
14227df54884SPawan Gupta 		else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO))
14237df54884SPawan Gupta 			setup_force_cpu_bug(X86_BUG_MMIO_UNKNOWN);
14247df54884SPawan Gupta 	}
142551802186SPawan Gupta 
142626aae8ccSAndrew Cooper 	if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
142726aae8ccSAndrew Cooper 		if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))
14286b80b59bSAlexandre Chartre 			setup_force_cpu_bug(X86_BUG_RETBLEED);
142926aae8ccSAndrew Cooper 	}
14306b80b59bSAlexandre Chartre 
1431be8de49bSTom Lendacky 	if (cpu_matches(cpu_vuln_blacklist, SMT_RSB))
1432be8de49bSTom Lendacky 		setup_force_cpu_bug(X86_BUG_SMT_RSB);
1433be8de49bSTom Lendacky 
14341b5277c0SBorislav Petkov (AMD) 	if (!cpu_has(c, X86_FEATURE_SRSO_NO)) {
1435fb3bd914SBorislav Petkov (AMD) 		if (cpu_matches(cpu_vuln_blacklist, SRSO))
1436fb3bd914SBorislav Petkov (AMD) 			setup_force_cpu_bug(X86_BUG_SRSO);
14371b5277c0SBorislav Petkov (AMD) 	}
1438fb3bd914SBorislav Petkov (AMD) 
14398974eb58SDaniel Sneddon 	/*
14408974eb58SDaniel Sneddon 	 * Check if CPU is vulnerable to GDS. If running in a virtual machine on
14418974eb58SDaniel Sneddon 	 * an affected processor, the VMM may have disabled the use of GATHER by
14428974eb58SDaniel Sneddon 	 * disabling AVX2. The only way to do this in HW is to clear XCR0[2],
14438974eb58SDaniel Sneddon 	 * which means that AVX will be disabled.
14448974eb58SDaniel Sneddon 	 */
14458974eb58SDaniel Sneddon 	if (cpu_matches(cpu_vuln_blacklist, GDS) && !(ia32_cap & ARCH_CAP_GDS_NO) &&
14468974eb58SDaniel Sneddon 	    boot_cpu_has(X86_FEATURE_AVX))
14478974eb58SDaniel Sneddon 		setup_force_cpu_bug(X86_BUG_GDS);
14488974eb58SDaniel Sneddon 
144993920f61SMark Gross 	if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
14504a28bfe3SKonrad Rzeszutek Wilk 		return;
1451fec9434aSDavid Woodhouse 
1452fec9434aSDavid Woodhouse 	/* Rogue Data Cache Load? No! */
1453fec9434aSDavid Woodhouse 	if (ia32_cap & ARCH_CAP_RDCL_NO)
14544a28bfe3SKonrad Rzeszutek Wilk 		return;
1455fec9434aSDavid Woodhouse 
14564a28bfe3SKonrad Rzeszutek Wilk 	setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
145717dbca11SAndi Kleen 
145893920f61SMark Gross 	if (cpu_matches(cpu_vuln_whitelist, NO_L1TF))
145917dbca11SAndi Kleen 		return;
146017dbca11SAndi Kleen 
146117dbca11SAndi Kleen 	setup_force_cpu_bug(X86_BUG_L1TF);
1462fec9434aSDavid Woodhouse }
1463fec9434aSDavid Woodhouse 
146434048c9eSPaolo Ciarrocchi /*
14658990cac6SPavel Tatashin  * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
14668990cac6SPavel Tatashin  * unfortunately, that's not true in practice because of early VIA
14678990cac6SPavel Tatashin  * chips and (more importantly) broken virtualizers that are not easy
14688990cac6SPavel Tatashin  * to detect. In the latter case it doesn't even *fail* reliably, so
14698990cac6SPavel Tatashin  * probing for it doesn't even work. Disable it completely on 32-bit
14708990cac6SPavel Tatashin  * unless we can find a reliable way to detect all the broken cases.
14718990cac6SPavel Tatashin  * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
14728990cac6SPavel Tatashin  */
14739b3661cdSBorislav Petkov static void detect_nopl(void)
14748990cac6SPavel Tatashin {
14758990cac6SPavel Tatashin #ifdef CONFIG_X86_32
14769b3661cdSBorislav Petkov 	setup_clear_cpu_cap(X86_FEATURE_NOPL);
14778990cac6SPavel Tatashin #else
14789b3661cdSBorislav Petkov 	setup_force_cpu_cap(X86_FEATURE_NOPL);
14798990cac6SPavel Tatashin #endif
14808990cac6SPavel Tatashin }
14818990cac6SPavel Tatashin 
14828990cac6SPavel Tatashin /*
14831ef5423aSMike Hommey  * We parse cpu parameters early because fpu__init_system() is executed
14841ef5423aSMike Hommey  * before parse_early_param().
14851ef5423aSMike Hommey  */
14861ef5423aSMike Hommey static void __init cpu_parse_early_param(void)
14871ef5423aSMike Hommey {
14881ef5423aSMike Hommey 	char arg[128];
14891625c833SBorislav Petkov 	char *argptr = arg, *opt;
14901625c833SBorislav Petkov 	int arglen, taint = 0;
14911ef5423aSMike Hommey 
14921ef5423aSMike Hommey #ifdef CONFIG_X86_32
14931ef5423aSMike Hommey 	if (cmdline_find_option_bool(boot_command_line, "no387"))
14941ef5423aSMike Hommey #ifdef CONFIG_MATH_EMULATION
14951ef5423aSMike Hommey 		setup_clear_cpu_cap(X86_FEATURE_FPU);
14961ef5423aSMike Hommey #else
14971ef5423aSMike Hommey 		pr_err("Option 'no387' required CONFIG_MATH_EMULATION enabled.\n");
14981ef5423aSMike Hommey #endif
14991ef5423aSMike Hommey 
15001ef5423aSMike Hommey 	if (cmdline_find_option_bool(boot_command_line, "nofxsr"))
15011ef5423aSMike Hommey 		setup_clear_cpu_cap(X86_FEATURE_FXSR);
15021ef5423aSMike Hommey #endif
15031ef5423aSMike Hommey 
15041ef5423aSMike Hommey 	if (cmdline_find_option_bool(boot_command_line, "noxsave"))
15051ef5423aSMike Hommey 		setup_clear_cpu_cap(X86_FEATURE_XSAVE);
15061ef5423aSMike Hommey 
15071ef5423aSMike Hommey 	if (cmdline_find_option_bool(boot_command_line, "noxsaveopt"))
15081ef5423aSMike Hommey 		setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
15091ef5423aSMike Hommey 
15101ef5423aSMike Hommey 	if (cmdline_find_option_bool(boot_command_line, "noxsaves"))
15111ef5423aSMike Hommey 		setup_clear_cpu_cap(X86_FEATURE_XSAVES);
15121ef5423aSMike Hommey 
15130dc2a760SRick Edgecombe 	if (cmdline_find_option_bool(boot_command_line, "nousershstk"))
15140dc2a760SRick Edgecombe 		setup_clear_cpu_cap(X86_FEATURE_USER_SHSTK);
15150dc2a760SRick Edgecombe 
15161ef5423aSMike Hommey 	arglen = cmdline_find_option(boot_command_line, "clearcpuid", arg, sizeof(arg));
15171ef5423aSMike Hommey 	if (arglen <= 0)
15181ef5423aSMike Hommey 		return;
15191ef5423aSMike Hommey 
15201ef5423aSMike Hommey 	pr_info("Clearing CPUID bits:");
15211ef5423aSMike Hommey 
15221625c833SBorislav Petkov 	while (argptr) {
15231625c833SBorislav Petkov 		bool found __maybe_unused = false;
15241625c833SBorislav Petkov 		unsigned int bit;
15251ef5423aSMike Hommey 
15261625c833SBorislav Petkov 		opt = strsep(&argptr, ",");
15271625c833SBorislav Petkov 
15281625c833SBorislav Petkov 		/*
15291625c833SBorislav Petkov 		 * Handle naked numbers first for feature flags which don't
15301625c833SBorislav Petkov 		 * have names.
15311625c833SBorislav Petkov 		 */
15321625c833SBorislav Petkov 		if (!kstrtouint(opt, 10, &bit)) {
15331625c833SBorislav Petkov 			if (bit < NCAPINTS * 32) {
15341625c833SBorislav Petkov 
15351625c833SBorislav Petkov 				/* empty-string, i.e., ""-defined feature flags */
15361625c833SBorislav Petkov 				if (!x86_cap_flags[bit])
15371625c833SBorislav Petkov 					pr_cont(" " X86_CAP_FMT_NUM, x86_cap_flag_num(bit));
15381625c833SBorislav Petkov 				else
15391ef5423aSMike Hommey 					pr_cont(" " X86_CAP_FMT, x86_cap_flag(bit));
15401625c833SBorislav Petkov 
15411ef5423aSMike Hommey 				setup_clear_cpu_cap(bit);
15421625c833SBorislav Petkov 				taint++;
15431ef5423aSMike Hommey 			}
15441625c833SBorislav Petkov 			/*
15451625c833SBorislav Petkov 			 * The assumption is that there are no feature names with only
15461625c833SBorislav Petkov 			 * numbers in the name thus go to the next argument.
15471625c833SBorislav Petkov 			 */
15481625c833SBorislav Petkov 			continue;
15491625c833SBorislav Petkov 		}
15501625c833SBorislav Petkov 
15511625c833SBorislav Petkov 		for (bit = 0; bit < 32 * NCAPINTS; bit++) {
15521625c833SBorislav Petkov 			if (!x86_cap_flag(bit))
15531625c833SBorislav Petkov 				continue;
15541625c833SBorislav Petkov 
15551625c833SBorislav Petkov 			if (strcmp(x86_cap_flag(bit), opt))
15561625c833SBorislav Petkov 				continue;
15571625c833SBorislav Petkov 
15581625c833SBorislav Petkov 			pr_cont(" %s", opt);
15591625c833SBorislav Petkov 			setup_clear_cpu_cap(bit);
15601625c833SBorislav Petkov 			taint++;
15611625c833SBorislav Petkov 			found = true;
15621625c833SBorislav Petkov 			break;
15631625c833SBorislav Petkov 		}
15641625c833SBorislav Petkov 
15651625c833SBorislav Petkov 		if (!found)
15661625c833SBorislav Petkov 			pr_cont(" (unknown: %s)", opt);
15671625c833SBorislav Petkov 	}
15681ef5423aSMike Hommey 	pr_cont("\n");
15691625c833SBorislav Petkov 
15701625c833SBorislav Petkov 	if (taint)
15711625c833SBorislav Petkov 		add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
15721ef5423aSMike Hommey }
15731ef5423aSMike Hommey 
15741ef5423aSMike Hommey /*
157534048c9eSPaolo Ciarrocchi  * Do minimum CPU detection early.
157634048c9eSPaolo Ciarrocchi  * Fields really needed: vendor, cpuid_level, family, model, mask,
157734048c9eSPaolo Ciarrocchi  * cache alignment.
157834048c9eSPaolo Ciarrocchi  * The others are not touched to avoid unwanted side effects.
157934048c9eSPaolo Ciarrocchi  *
1580a1652bb8SJean Delvare  * WARNING: this function is only called on the boot CPU.  Don't add code
1581a1652bb8SJean Delvare  * here that is supposed to run on all CPUs.
158234048c9eSPaolo Ciarrocchi  */
15833da99c97SYinghai Lu static void __init early_identify_cpu(struct cpuinfo_x86 *c)
1584f7627e25SThomas Gleixner {
15850e96f31eSJordan Borgner 	memset(&c->x86_capability, 0, sizeof(c->x86_capability));
15860a488a53SYinghai Lu 	c->extended_cpuid_level = 0;
15870a488a53SYinghai Lu 
15882893cc8fSMatthew Whitehead 	if (!have_cpuid_p())
15892893cc8fSMatthew Whitehead 		identify_cpu_without_cpuid(c);
15902893cc8fSMatthew Whitehead 
1591aef93c8bSYinghai Lu 	/* cyrix could have cpuid enabled via c_identify()*/
159205fb3c19SAndy Lutomirski 	if (have_cpuid_p()) {
1593f7627e25SThomas Gleixner 		cpu_detect(c);
15943da99c97SYinghai Lu 		get_cpu_vendor(c);
15953da99c97SYinghai Lu 		get_cpu_cap(c);
159678d1b296SBorislav Petkov 		setup_force_cpu_cap(X86_FEATURE_CPUID);
15971ef5423aSMike Hommey 		cpu_parse_early_param();
159812cf105cSKrzysztof Helt 
159910a434fcSYinghai Lu 		if (this_cpu->c_early_init)
160010a434fcSYinghai Lu 			this_cpu->c_early_init(c);
16013da99c97SYinghai Lu 
1602f6e9456cSRobert Richter 		c->cpu_index = 0;
1603b38b0665SH. Peter Anvin 		filter_cpuid_features(c, false);
1604de5397adSFenghua Yu 
1605a110b5ecSBorislav Petkov 		if (this_cpu->c_bsp_init)
1606a110b5ecSBorislav Petkov 			this_cpu->c_bsp_init(c);
160778d1b296SBorislav Petkov 	} else {
160878d1b296SBorislav Petkov 		setup_clear_cpu_cap(X86_FEATURE_CPUID);
160905fb3c19SAndy Lutomirski 	}
1610c3b83598SBorislav Petkov 
1611fbf6449fSAdam Dunlap 	get_cpu_address_sizes(c);
1612fbf6449fSAdam Dunlap 
1613c3b83598SBorislav Petkov 	setup_force_cpu_cap(X86_FEATURE_ALWAYS);
1614a89f040fSThomas Gleixner 
16154a28bfe3SKonrad Rzeszutek Wilk 	cpu_set_bug_bits(c);
161699c6fa25SDavid Woodhouse 
1617ebb1064eSFenghua Yu 	sld_setup(c);
16186650cdd9SPeter Zijlstra (Intel) 
1619b8b7abaeSAndy Lutomirski #ifdef CONFIG_X86_32
1620b8b7abaeSAndy Lutomirski 	/*
1621b8b7abaeSAndy Lutomirski 	 * Regardless of whether PCID is enumerated, the SDM says
1622b8b7abaeSAndy Lutomirski 	 * that it can't be enabled in 32-bit mode.
1623b8b7abaeSAndy Lutomirski 	 */
1624b8b7abaeSAndy Lutomirski 	setup_clear_cpu_cap(X86_FEATURE_PCID);
1625b8b7abaeSAndy Lutomirski #endif
1626372fddf7SKirill A. Shutemov 
1627372fddf7SKirill A. Shutemov 	/*
1628372fddf7SKirill A. Shutemov 	 * Later in the boot process pgtable_l5_enabled() relies on
1629372fddf7SKirill A. Shutemov 	 * cpu_feature_enabled(X86_FEATURE_LA57). If 5-level paging is not
1630372fddf7SKirill A. Shutemov 	 * enabled by this point we need to clear the feature bit to avoid
1631372fddf7SKirill A. Shutemov 	 * false-positives at the later stage.
1632372fddf7SKirill A. Shutemov 	 *
1633372fddf7SKirill A. Shutemov 	 * pgtable_l5_enabled() can be false here for several reasons:
1634372fddf7SKirill A. Shutemov 	 *  - 5-level paging is disabled compile-time;
1635372fddf7SKirill A. Shutemov 	 *  - it's 32-bit kernel;
1636372fddf7SKirill A. Shutemov 	 *  - machine doesn't support 5-level paging;
1637372fddf7SKirill A. Shutemov 	 *  - user specified 'no5lvl' in kernel command line.
1638372fddf7SKirill A. Shutemov 	 */
1639372fddf7SKirill A. Shutemov 	if (!pgtable_l5_enabled())
1640372fddf7SKirill A. Shutemov 		setup_clear_cpu_cap(X86_FEATURE_LA57);
16418990cac6SPavel Tatashin 
16429b3661cdSBorislav Petkov 	detect_nopl();
1643f7627e25SThomas Gleixner }
1644f7627e25SThomas Gleixner 
16459d31d35bSYinghai Lu void __init early_cpu_init(void)
16469d31d35bSYinghai Lu {
164702dde8b4SJan Beulich 	const struct cpu_dev *const *cdev;
164810a434fcSYinghai Lu 	int count = 0;
16499d31d35bSYinghai Lu 
1650ac23f253SJan Beulich #ifdef CONFIG_PROCESSOR_SELECT
16511b74dde7SChen Yucong 	pr_info("KERNEL supported cpus:\n");
165231c997caSIngo Molnar #endif
165331c997caSIngo Molnar 
165410a434fcSYinghai Lu 	for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
165502dde8b4SJan Beulich 		const struct cpu_dev *cpudev = *cdev;
16569d31d35bSYinghai Lu 
165710a434fcSYinghai Lu 		if (count >= X86_VENDOR_NUM)
165810a434fcSYinghai Lu 			break;
165910a434fcSYinghai Lu 		cpu_devs[count] = cpudev;
166010a434fcSYinghai Lu 		count++;
166110a434fcSYinghai Lu 
1662ac23f253SJan Beulich #ifdef CONFIG_PROCESSOR_SELECT
166331c997caSIngo Molnar 		{
166431c997caSIngo Molnar 			unsigned int j;
166531c997caSIngo Molnar 
166610a434fcSYinghai Lu 			for (j = 0; j < 2; j++) {
166710a434fcSYinghai Lu 				if (!cpudev->c_ident[j])
166810a434fcSYinghai Lu 					continue;
16691b74dde7SChen Yucong 				pr_info("  %s %s\n", cpudev->c_vendor,
167010a434fcSYinghai Lu 					cpudev->c_ident[j]);
167110a434fcSYinghai Lu 			}
167210a434fcSYinghai Lu 		}
16730388423dSDave Jones #endif
167431c997caSIngo Molnar 	}
16759d31d35bSYinghai Lu 	early_identify_cpu(&boot_cpu_data);
1676f7627e25SThomas Gleixner }
1677f7627e25SThomas Gleixner 
1678415de440SJane Malalane static bool detect_null_seg_behavior(void)
16797a5d6704SAndy Lutomirski {
168058a5aac5SAndy Lutomirski 	/*
16817a5d6704SAndy Lutomirski 	 * Empirically, writing zero to a segment selector on AMD does
16827a5d6704SAndy Lutomirski 	 * not clear the base, whereas writing zero to a segment
16837a5d6704SAndy Lutomirski 	 * selector on Intel does clear the base.  Intel's behavior
16847a5d6704SAndy Lutomirski 	 * allows slightly faster context switches in the common case
16857a5d6704SAndy Lutomirski 	 * where GS is unused by the prev and next threads.
168658a5aac5SAndy Lutomirski 	 *
16877a5d6704SAndy Lutomirski 	 * Since neither vendor documents this anywhere that I can see,
1688d9f6e12fSIngo Molnar 	 * detect it directly instead of hard-coding the choice by
16897a5d6704SAndy Lutomirski 	 * vendor.
16907a5d6704SAndy Lutomirski 	 *
16917a5d6704SAndy Lutomirski 	 * I've designated AMD's behavior as the "bug" because it's
16927a5d6704SAndy Lutomirski 	 * counterintuitive and less friendly.
169358a5aac5SAndy Lutomirski 	 */
16947a5d6704SAndy Lutomirski 
16957a5d6704SAndy Lutomirski 	unsigned long old_base, tmp;
16967a5d6704SAndy Lutomirski 	rdmsrl(MSR_FS_BASE, old_base);
16977a5d6704SAndy Lutomirski 	wrmsrl(MSR_FS_BASE, 1);
16987a5d6704SAndy Lutomirski 	loadsegment(fs, 0);
16997a5d6704SAndy Lutomirski 	rdmsrl(MSR_FS_BASE, tmp);
17007a5d6704SAndy Lutomirski 	wrmsrl(MSR_FS_BASE, old_base);
1701415de440SJane Malalane 	return tmp == 0;
1702415de440SJane Malalane }
1703415de440SJane Malalane 
1704415de440SJane Malalane void check_null_seg_clears_base(struct cpuinfo_x86 *c)
1705415de440SJane Malalane {
1706415de440SJane Malalane 	/* BUG_NULL_SEG is only relevant with 64bit userspace */
1707415de440SJane Malalane 	if (!IS_ENABLED(CONFIG_X86_64))
1708415de440SJane Malalane 		return;
1709415de440SJane Malalane 
17105b909d4aSKim Phillips 	if (cpu_has(c, X86_FEATURE_NULL_SEL_CLR_BASE))
1711415de440SJane Malalane 		return;
1712415de440SJane Malalane 
1713415de440SJane Malalane 	/*
1714415de440SJane Malalane 	 * CPUID bit above wasn't set. If this kernel is still running
1715415de440SJane Malalane 	 * as a HV guest, then the HV has decided not to advertize
1716415de440SJane Malalane 	 * that CPUID bit for whatever reason.	For example, one
1717415de440SJane Malalane 	 * member of the migration pool might be vulnerable.  Which
1718415de440SJane Malalane 	 * means, the bug is present: set the BUG flag and return.
1719415de440SJane Malalane 	 */
1720415de440SJane Malalane 	if (cpu_has(c, X86_FEATURE_HYPERVISOR)) {
1721415de440SJane Malalane 		set_cpu_bug(c, X86_BUG_NULL_SEG);
1722415de440SJane Malalane 		return;
1723415de440SJane Malalane 	}
1724415de440SJane Malalane 
1725415de440SJane Malalane 	/*
1726415de440SJane Malalane 	 * Zen2 CPUs also have this behaviour, but no CPUID bit.
1727415de440SJane Malalane 	 * 0x18 is the respective family for Hygon.
1728415de440SJane Malalane 	 */
1729415de440SJane Malalane 	if ((c->x86 == 0x17 || c->x86 == 0x18) &&
1730415de440SJane Malalane 	    detect_null_seg_behavior())
1731415de440SJane Malalane 		return;
1732415de440SJane Malalane 
1733415de440SJane Malalane 	/* All the remaining ones are affected */
1734415de440SJane Malalane 	set_cpu_bug(c, X86_BUG_NULL_SEG);
1735f7627e25SThomas Gleixner }
1736f7627e25SThomas Gleixner 
1737148f9bb8SPaul Gortmaker static void generic_identify(struct cpuinfo_x86 *c)
1738f7627e25SThomas Gleixner {
17393da99c97SYinghai Lu 	c->extended_cpuid_level = 0;
1740f7627e25SThomas Gleixner 
1741aef93c8bSYinghai Lu 	if (!have_cpuid_p())
1742aef93c8bSYinghai Lu 		identify_cpu_without_cpuid(c);
1743f7627e25SThomas Gleixner 
1744aef93c8bSYinghai Lu 	/* cyrix could have cpuid enabled via c_identify()*/
1745a9853dd6SIngo Molnar 	if (!have_cpuid_p())
1746aef93c8bSYinghai Lu 		return;
1747aef93c8bSYinghai Lu 
17483da99c97SYinghai Lu 	cpu_detect(c);
17493da99c97SYinghai Lu 
17503da99c97SYinghai Lu 	get_cpu_vendor(c);
17513da99c97SYinghai Lu 
17523da99c97SYinghai Lu 	get_cpu_cap(c);
17533da99c97SYinghai Lu 
1754d94a155cSKirill A. Shutemov 	get_cpu_address_sizes(c);
1755d94a155cSKirill A. Shutemov 
1756f7627e25SThomas Gleixner 	if (c->cpuid_level >= 0x00000001) {
1757b9655e70SThomas Gleixner 		c->topo.initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
1758b89d3b3eSYinghai Lu #ifdef CONFIG_X86_32
1759c8e56d20SBorislav Petkov # ifdef CONFIG_SMP
1760b9655e70SThomas Gleixner 		c->topo.apicid = apic->phys_pkg_id(c->topo.initial_apicid, 0);
1761f7627e25SThomas Gleixner # else
1762b9655e70SThomas Gleixner 		c->topo.apicid = c->topo.initial_apicid;
1763f7627e25SThomas Gleixner # endif
1764b89d3b3eSYinghai Lu #endif
176502fb601dSThomas Gleixner 		c->topo.pkg_id = c->topo.initial_apicid;
1766f7627e25SThomas Gleixner 	}
1767f7627e25SThomas Gleixner 
1768f7627e25SThomas Gleixner 	get_model_name(c); /* Default name */
1769f7627e25SThomas Gleixner 
17700230bb03SAndy Lutomirski 	/*
17710230bb03SAndy Lutomirski 	 * ESPFIX is a strange bug.  All real CPUs have it.  Paravirt
17720230bb03SAndy Lutomirski 	 * systems that run Linux at CPL > 0 may or may not have the
17730230bb03SAndy Lutomirski 	 * issue, but, even if they have the issue, there's absolutely
17740230bb03SAndy Lutomirski 	 * nothing we can do about it because we can't use the real IRET
17750230bb03SAndy Lutomirski 	 * instruction.
17760230bb03SAndy Lutomirski 	 *
17770230bb03SAndy Lutomirski 	 * NB: For the time being, only 32-bit kernels support
17780230bb03SAndy Lutomirski 	 * X86_BUG_ESPFIX as such.  64-bit kernels directly choose
17790230bb03SAndy Lutomirski 	 * whether to apply espfix using paravirt hooks.  If any
17800230bb03SAndy Lutomirski 	 * non-paravirt system ever shows up that does *not* have the
17810230bb03SAndy Lutomirski 	 * ESPFIX issue, we can change this.
17820230bb03SAndy Lutomirski 	 */
17830230bb03SAndy Lutomirski #ifdef CONFIG_X86_32
17840230bb03SAndy Lutomirski 	set_cpu_bug(c, X86_BUG_ESPFIX);
17850230bb03SAndy Lutomirski #endif
1786f7627e25SThomas Gleixner }
1787f7627e25SThomas Gleixner 
1788f7627e25SThomas Gleixner /*
17899d85eb91SThomas Gleixner  * Validate that ACPI/mptables have the same information about the
17909d85eb91SThomas Gleixner  * effective APIC id and update the package map.
1791d49597fdSThomas Gleixner  */
17929d85eb91SThomas Gleixner static void validate_apic_and_package_id(struct cpuinfo_x86 *c)
1793d49597fdSThomas Gleixner {
1794d49597fdSThomas Gleixner #ifdef CONFIG_SMP
17958aa2a417SThomas Gleixner 	unsigned int cpu = smp_processor_id();
17968aa2a417SThomas Gleixner 	u32 apicid;
1797d49597fdSThomas Gleixner 
1798d49597fdSThomas Gleixner 	apicid = apic->cpu_present_to_apicid(cpu);
1799d49597fdSThomas Gleixner 
1800b9655e70SThomas Gleixner 	if (apicid != c->topo.apicid) {
18019d85eb91SThomas Gleixner 		pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x APIC: %x\n",
1802b9655e70SThomas Gleixner 		       cpu, apicid, c->topo.initial_apicid);
1803d49597fdSThomas Gleixner 	}
180402fb601dSThomas Gleixner 	BUG_ON(topology_update_package_map(c->topo.pkg_id, cpu));
18058a169ed4SThomas Gleixner 	BUG_ON(topology_update_die_map(c->topo.die_id, cpu));
1806d49597fdSThomas Gleixner #else
180722dc9631SThomas Gleixner 	c->topo.logical_pkg_id = 0;
1808d49597fdSThomas Gleixner #endif
1809d49597fdSThomas Gleixner }
1810d49597fdSThomas Gleixner 
1811d49597fdSThomas Gleixner /*
1812f7627e25SThomas Gleixner  * This does the hard work of actually picking apart the CPU stuff...
1813f7627e25SThomas Gleixner  */
1814148f9bb8SPaul Gortmaker static void identify_cpu(struct cpuinfo_x86 *c)
1815f7627e25SThomas Gleixner {
1816f7627e25SThomas Gleixner 	int i;
1817f7627e25SThomas Gleixner 
1818f7627e25SThomas Gleixner 	c->loops_per_jiffy = loops_per_jiffy;
181924dbc600SGustavo A. R. Silva 	c->x86_cache_size = 0;
1820f7627e25SThomas Gleixner 	c->x86_vendor = X86_VENDOR_UNKNOWN;
1821b399151cSJia Zhang 	c->x86_model = c->x86_stepping = 0;	/* So far unknown... */
1822f7627e25SThomas Gleixner 	c->x86_vendor_id[0] = '\0'; /* Unset */
1823f7627e25SThomas Gleixner 	c->x86_model_id[0] = '\0';  /* Unset */
1824f7627e25SThomas Gleixner 	c->x86_max_cores = 1;
1825102bbe3aSYinghai Lu 	c->x86_coreid_bits = 0;
1826e3c0c5d5SThomas Gleixner 	c->topo.cu_id = 0xff;
18276e290323SThomas Gleixner 	c->topo.llc_id = BAD_APICID;
18286e290323SThomas Gleixner 	c->topo.l2c_id = BAD_APICID;
182911fdd252SYinghai Lu #ifdef CONFIG_X86_64
1830102bbe3aSYinghai Lu 	c->x86_clflush_size = 64;
183113c6c532SJan Beulich 	c->x86_phys_bits = 36;
183213c6c532SJan Beulich 	c->x86_virt_bits = 48;
1833102bbe3aSYinghai Lu #else
1834102bbe3aSYinghai Lu 	c->cpuid_level = -1;	/* CPUID not detected */
1835f7627e25SThomas Gleixner 	c->x86_clflush_size = 32;
183613c6c532SJan Beulich 	c->x86_phys_bits = 32;
183713c6c532SJan Beulich 	c->x86_virt_bits = 32;
1838102bbe3aSYinghai Lu #endif
1839102bbe3aSYinghai Lu 	c->x86_cache_alignment = c->x86_clflush_size;
18400e96f31eSJordan Borgner 	memset(&c->x86_capability, 0, sizeof(c->x86_capability));
1841b47ce1feSSean Christopherson #ifdef CONFIG_X86_VMX_FEATURE_NAMES
1842b47ce1feSSean Christopherson 	memset(&c->vmx_capability, 0, sizeof(c->vmx_capability));
1843b47ce1feSSean Christopherson #endif
1844f7627e25SThomas Gleixner 
1845f7627e25SThomas Gleixner 	generic_identify(c);
1846f7627e25SThomas Gleixner 
18473898534dSAndi Kleen 	if (this_cpu->c_identify)
1848f7627e25SThomas Gleixner 		this_cpu->c_identify(c);
1849f7627e25SThomas Gleixner 
18506a6256f9SAdam Buchbinder 	/* Clear/Set all flags overridden by options, after probe */
18518bf1ebcaSAndy Lutomirski 	apply_forced_caps(c);
18522759c328SYinghai Lu 
1853102bbe3aSYinghai Lu #ifdef CONFIG_X86_64
1854b9655e70SThomas Gleixner 	c->topo.apicid = apic->phys_pkg_id(c->topo.initial_apicid, 0);
1855102bbe3aSYinghai Lu #endif
1856102bbe3aSYinghai Lu 
185704c30245SBorislav Petkov (AMD) 
185804c30245SBorislav Petkov (AMD) 	/*
185904c30245SBorislav Petkov (AMD) 	 * Set default APIC and TSC_DEADLINE MSR fencing flag. AMD and
186004c30245SBorislav Petkov (AMD) 	 * Hygon will clear it in ->c_init() below.
186104c30245SBorislav Petkov (AMD) 	 */
186204c30245SBorislav Petkov (AMD) 	set_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE);
186304c30245SBorislav Petkov (AMD) 
1864f7627e25SThomas Gleixner 	/*
1865f7627e25SThomas Gleixner 	 * Vendor-specific initialization.  In this section we
1866f7627e25SThomas Gleixner 	 * canonicalize the feature flags, meaning if there are
1867f7627e25SThomas Gleixner 	 * features a certain CPU supports which CPUID doesn't
1868f7627e25SThomas Gleixner 	 * tell us, CPUID claiming incorrect flags, or other bugs,
1869f7627e25SThomas Gleixner 	 * we handle them here.
1870f7627e25SThomas Gleixner 	 *
1871f7627e25SThomas Gleixner 	 * At the end of this section, c->x86_capability better
1872f7627e25SThomas Gleixner 	 * indicate the features this CPU genuinely supports!
1873f7627e25SThomas Gleixner 	 */
1874f7627e25SThomas Gleixner 	if (this_cpu->c_init)
1875f7627e25SThomas Gleixner 		this_cpu->c_init(c);
1876f7627e25SThomas Gleixner 
1877f7627e25SThomas Gleixner 	/* Disable the PN if appropriate */
1878f7627e25SThomas Gleixner 	squash_the_stupid_serial_number(c);
1879f7627e25SThomas Gleixner 
1880aa35f896SRicardo Neri 	/* Set up SMEP/SMAP/UMIP */
1881b2cc2a07SH. Peter Anvin 	setup_smep(c);
1882b2cc2a07SH. Peter Anvin 	setup_smap(c);
1883aa35f896SRicardo Neri 	setup_umip(c);
1884b2cc2a07SH. Peter Anvin 
1885dd649bd0SAndy Lutomirski 	/* Enable FSGSBASE instructions if available. */
1886742c45c3SAndi Kleen 	if (cpu_has(c, X86_FEATURE_FSGSBASE)) {
1887dd649bd0SAndy Lutomirski 		cr4_set_bits(X86_CR4_FSGSBASE);
1888742c45c3SAndi Kleen 		elf_hwcap2 |= HWCAP2_FSGSBASE;
1889742c45c3SAndi Kleen 	}
1890dd649bd0SAndy Lutomirski 
1891f7627e25SThomas Gleixner 	/*
18920f3fa48aSIngo Molnar 	 * The vendor-specific functions might have changed features.
18930f3fa48aSIngo Molnar 	 * Now we do "generic changes."
1894f7627e25SThomas Gleixner 	 */
1895f7627e25SThomas Gleixner 
1896b38b0665SH. Peter Anvin 	/* Filter out anything that depends on CPUID levels we don't have */
1897b38b0665SH. Peter Anvin 	filter_cpuid_features(c, true);
1898b38b0665SH. Peter Anvin 
1899f7627e25SThomas Gleixner 	/* If the model name is still unset, do table lookup. */
1900f7627e25SThomas Gleixner 	if (!c->x86_model_id[0]) {
190102dde8b4SJan Beulich 		const char *p;
1902f7627e25SThomas Gleixner 		p = table_lookup_model(c);
1903f7627e25SThomas Gleixner 		if (p)
1904f7627e25SThomas Gleixner 			strcpy(c->x86_model_id, p);
1905f7627e25SThomas Gleixner 		else
1906f7627e25SThomas Gleixner 			/* Last resort... */
1907f7627e25SThomas Gleixner 			sprintf(c->x86_model_id, "%02x/%02x",
1908f7627e25SThomas Gleixner 				c->x86, c->x86_model);
1909f7627e25SThomas Gleixner 	}
1910f7627e25SThomas Gleixner 
1911102bbe3aSYinghai Lu #ifdef CONFIG_X86_64
1912102bbe3aSYinghai Lu 	detect_ht(c);
1913102bbe3aSYinghai Lu #endif
1914102bbe3aSYinghai Lu 
191549d859d7SH. Peter Anvin 	x86_init_rdrand(c);
191606976945SDave Hansen 	setup_pku(c);
1917991625f3SPeter Zijlstra 	setup_cet(c);
19183e0c3737SYinghai Lu 
19193e0c3737SYinghai Lu 	/*
19206a6256f9SAdam Buchbinder 	 * Clear/Set all flags overridden by options, need do it
19213e0c3737SYinghai Lu 	 * before following smp all cpus cap AND.
19223e0c3737SYinghai Lu 	 */
19238bf1ebcaSAndy Lutomirski 	apply_forced_caps(c);
19243e0c3737SYinghai Lu 
1925f7627e25SThomas Gleixner 	/*
1926f7627e25SThomas Gleixner 	 * On SMP, boot_cpu_data holds the common feature set between
1927f7627e25SThomas Gleixner 	 * all CPUs; so make sure that we indicate which features are
1928f7627e25SThomas Gleixner 	 * common between the CPUs.  The first time this routine gets
1929f7627e25SThomas Gleixner 	 * executed, c == &boot_cpu_data.
1930f7627e25SThomas Gleixner 	 */
1931f7627e25SThomas Gleixner 	if (c != &boot_cpu_data) {
1932f7627e25SThomas Gleixner 		/* AND the already accumulated flags with these */
1933f7627e25SThomas Gleixner 		for (i = 0; i < NCAPINTS; i++)
1934f7627e25SThomas Gleixner 			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
193565fc985bSBorislav Petkov 
193665fc985bSBorislav Petkov 		/* OR, i.e. replicate the bug flags */
193765fc985bSBorislav Petkov 		for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++)
193865fc985bSBorislav Petkov 			c->x86_capability[i] |= boot_cpu_data.x86_capability[i];
1939f7627e25SThomas Gleixner 	}
1940f7627e25SThomas Gleixner 
19410dcab41dSTony Luck 	ppin_init(c);
19420dcab41dSTony Luck 
1943f7627e25SThomas Gleixner 	/* Init Machine Check Exception if available. */
19445e09954aSBorislav Petkov 	mcheck_cpu_init(c);
194530d432dfSAndi Kleen 
194630d432dfSAndi Kleen 	select_idle_routine(c);
1947102bbe3aSYinghai Lu 
1948de2d9445STejun Heo #ifdef CONFIG_NUMA
1949102bbe3aSYinghai Lu 	numa_add_cpu(smp_processor_id());
1950102bbe3aSYinghai Lu #endif
1951f7627e25SThomas Gleixner }
1952f7627e25SThomas Gleixner 
19538b6c0ab1SIngo Molnar /*
19548b6c0ab1SIngo Molnar  * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions
19558b6c0ab1SIngo Molnar  * on 32-bit kernels:
19568b6c0ab1SIngo Molnar  */
1957cfda7bb9SAndy Lutomirski #ifdef CONFIG_X86_32
1958cfda7bb9SAndy Lutomirski void enable_sep_cpu(void)
1959cfda7bb9SAndy Lutomirski {
19608b6c0ab1SIngo Molnar 	struct tss_struct *tss;
19618b6c0ab1SIngo Molnar 	int cpu;
1962cfda7bb9SAndy Lutomirski 
1963b3edfda4SBorislav Petkov 	if (!boot_cpu_has(X86_FEATURE_SEP))
1964b3edfda4SBorislav Petkov 		return;
1965b3edfda4SBorislav Petkov 
19668b6c0ab1SIngo Molnar 	cpu = get_cpu();
1967c482feefSAndy Lutomirski 	tss = &per_cpu(cpu_tss_rw, cpu);
19688b6c0ab1SIngo Molnar 
19698b6c0ab1SIngo Molnar 	/*
1970cf9328ccSAndy Lutomirski 	 * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
1971cf9328ccSAndy Lutomirski 	 * see the big comment in struct x86_hw_tss's definition.
19728b6c0ab1SIngo Molnar 	 */
1973cfda7bb9SAndy Lutomirski 
1974cfda7bb9SAndy Lutomirski 	tss->x86_tss.ss1 = __KERNEL_CS;
19758b6c0ab1SIngo Molnar 	wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
19764fe2d8b1SDave Hansen 	wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0);
19774c8cd0c5SIngo Molnar 	wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
19788b6c0ab1SIngo Molnar 
1979cfda7bb9SAndy Lutomirski 	put_cpu();
1980cfda7bb9SAndy Lutomirski }
1981e04d645fSGlauber Costa #endif
1982e04d645fSGlauber Costa 
19833ba3fdfeSThomas Gleixner static __init void identify_boot_cpu(void)
1984f7627e25SThomas Gleixner {
1985f7627e25SThomas Gleixner 	identify_cpu(&boot_cpu_data);
1986991625f3SPeter Zijlstra 	if (HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT))
1987991625f3SPeter Zijlstra 		pr_info("CET detected: Indirect Branch Tracking enabled\n");
1988102bbe3aSYinghai Lu #ifdef CONFIG_X86_32
1989f7627e25SThomas Gleixner 	enable_sep_cpu();
1990102bbe3aSYinghai Lu #endif
1991e0ba94f1SAlex Shi 	cpu_detect_tlb(&boot_cpu_data);
1992873d50d5SKees Cook 	setup_cr_pinning();
199395c5824fSPawan Gupta 
199495c5824fSPawan Gupta 	tsx_init();
1995765a0542SKai Huang 	tdx_init();
199692cbbadfSH. Peter Anvin (Intel) 	lkgs_init();
1997f7627e25SThomas Gleixner }
1998f7627e25SThomas Gleixner 
1999148f9bb8SPaul Gortmaker void identify_secondary_cpu(struct cpuinfo_x86 *c)
2000f7627e25SThomas Gleixner {
2001f7627e25SThomas Gleixner 	BUG_ON(c == &boot_cpu_data);
2002f7627e25SThomas Gleixner 	identify_cpu(c);
2003102bbe3aSYinghai Lu #ifdef CONFIG_X86_32
2004f7627e25SThomas Gleixner 	enable_sep_cpu();
2005102bbe3aSYinghai Lu #endif
20069d85eb91SThomas Gleixner 	validate_apic_and_package_id(c);
200777243971SKonrad Rzeszutek Wilk 	x86_spec_ctrl_setup_ap();
20087e5b3c26SMark Gross 	update_srbds_msr();
20098974eb58SDaniel Sneddon 	if (boot_cpu_has_bug(X86_BUG_GDS))
20108974eb58SDaniel Sneddon 		update_gds_msr();
2011400331f8SPawan Gupta 
2012400331f8SPawan Gupta 	tsx_ap_init();
2013f7627e25SThomas Gleixner }
2014f7627e25SThomas Gleixner 
2015148f9bb8SPaul Gortmaker void print_cpu_info(struct cpuinfo_x86 *c)
2016f7627e25SThomas Gleixner {
201702dde8b4SJan Beulich 	const char *vendor = NULL;
2018f7627e25SThomas Gleixner 
20190f3fa48aSIngo Molnar 	if (c->x86_vendor < X86_VENDOR_NUM) {
2020f7627e25SThomas Gleixner 		vendor = this_cpu->c_vendor;
20210f3fa48aSIngo Molnar 	} else {
20220f3fa48aSIngo Molnar 		if (c->cpuid_level >= 0)
2023f7627e25SThomas Gleixner 			vendor = c->x86_vendor_id;
20240f3fa48aSIngo Molnar 	}
2025f7627e25SThomas Gleixner 
2026bd32a8cfSYinghai Lu 	if (vendor && !strstr(c->x86_model_id, vendor))
20271b74dde7SChen Yucong 		pr_cont("%s ", vendor);
2028f7627e25SThomas Gleixner 
20299d31d35bSYinghai Lu 	if (c->x86_model_id[0])
20301b74dde7SChen Yucong 		pr_cont("%s", c->x86_model_id);
2031f7627e25SThomas Gleixner 	else
20321b74dde7SChen Yucong 		pr_cont("%d86", c->x86);
2033f7627e25SThomas Gleixner 
20341b74dde7SChen Yucong 	pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
2035924e101aSBorislav Petkov 
2036b399151cSJia Zhang 	if (c->x86_stepping || c->cpuid_level >= 0)
2037b399151cSJia Zhang 		pr_cont(", stepping: 0x%x)\n", c->x86_stepping);
2038f7627e25SThomas Gleixner 	else
20391b74dde7SChen Yucong 		pr_cont(")\n");
2040f7627e25SThomas Gleixner }
2041f7627e25SThomas Gleixner 
20420c2a3913SAndi Kleen /*
2043ce38f038SThomas Gleixner  * clearcpuid= was already parsed in cpu_parse_early_param().  This dummy
2044ce38f038SThomas Gleixner  * function prevents it from becoming an environment variable for init.
20450c2a3913SAndi Kleen  */
20460c2a3913SAndi Kleen static __init int setup_clearcpuid(char *arg)
2047ac72e788SAndi Kleen {
2048ac72e788SAndi Kleen 	return 1;
2049ac72e788SAndi Kleen }
20500c2a3913SAndi Kleen __setup("clearcpuid=", setup_clearcpuid);
2051ac72e788SAndi Kleen 
2052e57ef2edSThomas Gleixner DEFINE_PER_CPU_ALIGNED(struct pcpu_hot, pcpu_hot) = {
2053e57ef2edSThomas Gleixner 	.current_task	= &init_task,
205464701838SThomas Gleixner 	.preempt_count	= INIT_PREEMPT_COUNT,
2055c063a217SThomas Gleixner 	.top_of_stack	= TOP_OF_INIT_STACK,
2056e57ef2edSThomas Gleixner };
2057e57ef2edSThomas Gleixner EXPORT_PER_CPU_SYMBOL(pcpu_hot);
2058e57ef2edSThomas Gleixner 
2059d5494d4fSYinghai Lu #ifdef CONFIG_X86_64
2060e6401c13SAndy Lutomirski DEFINE_PER_CPU_FIRST(struct fixed_percpu_data,
2061e6401c13SAndy Lutomirski 		     fixed_percpu_data) __aligned(PAGE_SIZE) __visible;
2062e6401c13SAndy Lutomirski EXPORT_PER_CPU_SYMBOL_GPL(fixed_percpu_data);
20630f3fa48aSIngo Molnar 
20649c7e2634SAndi Kleen static void wrmsrl_cstar(unsigned long val)
20659c7e2634SAndi Kleen {
20669c7e2634SAndi Kleen 	/*
20679c7e2634SAndi Kleen 	 * Intel CPUs do not support 32-bit SYSCALL. Writing to MSR_CSTAR
20689c7e2634SAndi Kleen 	 * is so far ignored by the CPU, but raises a #VE trap in a TDX
20699c7e2634SAndi Kleen 	 * guest. Avoid the pointless write on all Intel CPUs.
20709c7e2634SAndi Kleen 	 */
20719c7e2634SAndi Kleen 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
20729c7e2634SAndi Kleen 		wrmsrl(MSR_CSTAR, val);
20739c7e2634SAndi Kleen }
20749c7e2634SAndi Kleen 
2075d5494d4fSYinghai Lu /* May not be marked __init: used by software suspend */
2076d5494d4fSYinghai Lu void syscall_init(void)
2077d5494d4fSYinghai Lu {
207831ac34caSBorislav Petkov 	wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
20798d4b0678SThomas Gleixner 	wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
2080d56fe4bfSIngo Molnar 
208161382281SNikolay Borisov 	if (ia32_enabled()) {
20829c7e2634SAndi Kleen 		wrmsrl_cstar((unsigned long)entry_SYSCALL_compat);
2083a76c7f46SDenys Vlasenko 		/*
2084487d1edbSDenys Vlasenko 		 * This only works on Intel CPUs.
2085487d1edbSDenys Vlasenko 		 * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
2086487d1edbSDenys Vlasenko 		 * This does not cause SYSENTER to jump to the wrong location, because
2087487d1edbSDenys Vlasenko 		 * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
2088a76c7f46SDenys Vlasenko 		 */
2089a76c7f46SDenys Vlasenko 		wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
20908e6b65a1Szhong jiang 		wrmsrl_safe(MSR_IA32_SYSENTER_ESP,
20918e6b65a1Szhong jiang 			    (unsigned long)(cpu_entry_stack(smp_processor_id()) + 1));
20924c8cd0c5SIngo Molnar 		wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
209361382281SNikolay Borisov 	} else {
2094f71e1d2fSNikolay Borisov 		wrmsrl_cstar((unsigned long)entry_SYSCALL32_ignore);
20956b51311cSBorislav Petkov 		wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
2096d56fe4bfSIngo Molnar 		wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
2097d56fe4bfSIngo Molnar 		wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
209861382281SNikolay Borisov 	}
2099d5494d4fSYinghai Lu 
21006de4ac1dSH. Peter Anvin (Intel) 	/*
21016de4ac1dSH. Peter Anvin (Intel) 	 * Flags to clear on syscall; clear as much as possible
21026de4ac1dSH. Peter Anvin (Intel) 	 * to minimize user space-kernel interference.
21036de4ac1dSH. Peter Anvin (Intel) 	 */
2104d5494d4fSYinghai Lu 	wrmsrl(MSR_SYSCALL_MASK,
21056de4ac1dSH. Peter Anvin (Intel) 	       X86_EFLAGS_CF|X86_EFLAGS_PF|X86_EFLAGS_AF|
21066de4ac1dSH. Peter Anvin (Intel) 	       X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_TF|
21076de4ac1dSH. Peter Anvin (Intel) 	       X86_EFLAGS_IF|X86_EFLAGS_DF|X86_EFLAGS_OF|
21086de4ac1dSH. Peter Anvin (Intel) 	       X86_EFLAGS_IOPL|X86_EFLAGS_NT|X86_EFLAGS_RF|
21096de4ac1dSH. Peter Anvin (Intel) 	       X86_EFLAGS_AC|X86_EFLAGS_ID);
2110d5494d4fSYinghai Lu }
2111d5494d4fSYinghai Lu 
21120f3fa48aSIngo Molnar #else	/* CONFIG_X86_64 */
2113d5494d4fSYinghai Lu 
2114050e9baaSLinus Torvalds #ifdef CONFIG_STACKPROTECTOR
21153fb0fdb3SAndy Lutomirski DEFINE_PER_CPU(unsigned long, __stack_chk_guard);
21163fb0fdb3SAndy Lutomirski EXPORT_PER_CPU_SYMBOL(__stack_chk_guard);
211760a5317fSTejun Heo #endif
211860a5317fSTejun Heo 
21190f3fa48aSIngo Molnar #endif	/* CONFIG_X86_64 */
2120f7627e25SThomas Gleixner 
2121f7627e25SThomas Gleixner /*
21229766cdbcSJaswinder Singh Rajput  * Clear all 6 debug registers:
21239766cdbcSJaswinder Singh Rajput  */
21249766cdbcSJaswinder Singh Rajput static void clear_all_debug_regs(void)
21259766cdbcSJaswinder Singh Rajput {
21269766cdbcSJaswinder Singh Rajput 	int i;
21279766cdbcSJaswinder Singh Rajput 
21289766cdbcSJaswinder Singh Rajput 	for (i = 0; i < 8; i++) {
21299766cdbcSJaswinder Singh Rajput 		/* Ignore db4, db5 */
21309766cdbcSJaswinder Singh Rajput 		if ((i == 4) || (i == 5))
21319766cdbcSJaswinder Singh Rajput 			continue;
21329766cdbcSJaswinder Singh Rajput 
21339766cdbcSJaswinder Singh Rajput 		set_debugreg(0, i);
21349766cdbcSJaswinder Singh Rajput 	}
21359766cdbcSJaswinder Singh Rajput }
2136f7627e25SThomas Gleixner 
21370bb9fef9SJason Wessel #ifdef CONFIG_KGDB
21380bb9fef9SJason Wessel /*
21390bb9fef9SJason Wessel  * Restore debug regs if using kgdbwait and you have a kernel debugger
21400bb9fef9SJason Wessel  * connection established.
21410bb9fef9SJason Wessel  */
21420bb9fef9SJason Wessel static void dbg_restore_debug_regs(void)
21430bb9fef9SJason Wessel {
21440bb9fef9SJason Wessel 	if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break))
21450bb9fef9SJason Wessel 		arch_kgdb_ops.correct_hw_break();
21460bb9fef9SJason Wessel }
21470bb9fef9SJason Wessel #else /* ! CONFIG_KGDB */
21480bb9fef9SJason Wessel #define dbg_restore_debug_regs()
21490bb9fef9SJason Wessel #endif /* ! CONFIG_KGDB */
21500bb9fef9SJason Wessel 
2151505b7899SThomas Gleixner static inline void setup_getcpu(int cpu)
2152b2e2ba57SChang S. Bae {
215322245bdfSIngo Molnar 	unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu));
2154b2e2ba57SChang S. Bae 	struct desc_struct d = { };
2155b2e2ba57SChang S. Bae 
2156b6b4fbd9SSean Christopherson 	if (boot_cpu_has(X86_FEATURE_RDTSCP) || boot_cpu_has(X86_FEATURE_RDPID))
2157fc48a6d1SSean Christopherson 		wrmsr(MSR_TSC_AUX, cpudata, 0);
2158b2e2ba57SChang S. Bae 
2159b2e2ba57SChang S. Bae 	/* Store CPU and node number in limit. */
2160b2e2ba57SChang S. Bae 	d.limit0 = cpudata;
2161b2e2ba57SChang S. Bae 	d.limit1 = cpudata >> 16;
2162b2e2ba57SChang S. Bae 
2163b2e2ba57SChang S. Bae 	d.type = 5;		/* RO data, expand down, accessed */
2164b2e2ba57SChang S. Bae 	d.dpl = 3;		/* Visible to user code */
2165b2e2ba57SChang S. Bae 	d.s = 1;		/* Not a system segment */
2166b2e2ba57SChang S. Bae 	d.p = 1;		/* Present */
2167b2e2ba57SChang S. Bae 	d.d = 1;		/* 32-bit */
2168b2e2ba57SChang S. Bae 
216922245bdfSIngo Molnar 	write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPUNODE, &d, DESCTYPE_S);
2170b2e2ba57SChang S. Bae }
2171505b7899SThomas Gleixner 
2172717cce3bSSebastian Andrzej Siewior #ifdef CONFIG_X86_64
2173505b7899SThomas Gleixner static inline void tss_setup_ist(struct tss_struct *tss)
2174505b7899SThomas Gleixner {
2175505b7899SThomas Gleixner 	/* Set up the per-CPU TSS IST stacks */
2176505b7899SThomas Gleixner 	tss->x86_tss.ist[IST_INDEX_DF] = __this_cpu_ist_top_va(DF);
2177505b7899SThomas Gleixner 	tss->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI);
2178505b7899SThomas Gleixner 	tss->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB);
2179505b7899SThomas Gleixner 	tss->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE);
218002772fb9SJoerg Roedel 	/* Only mapped when SEV-ES is active */
218102772fb9SJoerg Roedel 	tss->x86_tss.ist[IST_INDEX_VC] = __this_cpu_ist_top_va(VC);
2182505b7899SThomas Gleixner }
2183505b7899SThomas Gleixner #else /* CONFIG_X86_64 */
2184505b7899SThomas Gleixner static inline void tss_setup_ist(struct tss_struct *tss) { }
2185505b7899SThomas Gleixner #endif /* !CONFIG_X86_64 */
2186b2e2ba57SChang S. Bae 
2187111e7b15SThomas Gleixner static inline void tss_setup_io_bitmap(struct tss_struct *tss)
2188111e7b15SThomas Gleixner {
2189111e7b15SThomas Gleixner 	tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET_INVALID;
2190111e7b15SThomas Gleixner 
2191111e7b15SThomas Gleixner #ifdef CONFIG_X86_IOPL_IOPERM
2192111e7b15SThomas Gleixner 	tss->io_bitmap.prev_max = 0;
2193111e7b15SThomas Gleixner 	tss->io_bitmap.prev_sequence = 0;
2194111e7b15SThomas Gleixner 	memset(tss->io_bitmap.bitmap, 0xff, sizeof(tss->io_bitmap.bitmap));
2195111e7b15SThomas Gleixner 	/*
2196111e7b15SThomas Gleixner 	 * Invalidate the extra array entry past the end of the all
2197111e7b15SThomas Gleixner 	 * permission bitmap as required by the hardware.
2198111e7b15SThomas Gleixner 	 */
2199111e7b15SThomas Gleixner 	tss->io_bitmap.mapall[IO_BITMAP_LONGS] = ~0UL;
2200111e7b15SThomas Gleixner #endif
2201111e7b15SThomas Gleixner }
2202ce4b1b16SIgor Mammedov 
2203f7627e25SThomas Gleixner /*
2204520d0308SJoerg Roedel  * Setup everything needed to handle exceptions from the IDT, including the IST
2205520d0308SJoerg Roedel  * exceptions which use paranoid_entry().
2206520d0308SJoerg Roedel  */
2207520d0308SJoerg Roedel void cpu_init_exception_handling(void)
2208520d0308SJoerg Roedel {
2209520d0308SJoerg Roedel 	struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
2210520d0308SJoerg Roedel 	int cpu = raw_smp_processor_id();
2211520d0308SJoerg Roedel 
2212520d0308SJoerg Roedel 	/* paranoid_entry() gets the CPU number from the GDT */
2213520d0308SJoerg Roedel 	setup_getcpu(cpu);
2214520d0308SJoerg Roedel 
2215520d0308SJoerg Roedel 	/* IST vectors need TSS to be set up. */
2216520d0308SJoerg Roedel 	tss_setup_ist(tss);
2217520d0308SJoerg Roedel 	tss_setup_io_bitmap(tss);
2218520d0308SJoerg Roedel 	set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
2219520d0308SJoerg Roedel 
2220520d0308SJoerg Roedel 	load_TR_desc();
2221520d0308SJoerg Roedel 
222295d33bfaSBrijesh Singh 	/* GHCB needs to be setup to handle #VC. */
222395d33bfaSBrijesh Singh 	setup_ghcb();
222495d33bfaSBrijesh Singh 
2225520d0308SJoerg Roedel 	/* Finally load the IDT */
2226520d0308SJoerg Roedel 	load_current_idt();
2227520d0308SJoerg Roedel }
2228520d0308SJoerg Roedel 
2229520d0308SJoerg Roedel /*
2230f7627e25SThomas Gleixner  * cpu_init() initializes state that is per-CPU. Some data is already
2231b1efd0ffSBorislav Petkov  * initialized (naturally) in the bootstrap process, such as the GDT.  We
2232b1efd0ffSBorislav Petkov  * reload it nevertheless, this function acts as a 'CPU state barrier',
2233b1efd0ffSBorislav Petkov  * nothing should get across.
2234f7627e25SThomas Gleixner  */
2235148f9bb8SPaul Gortmaker void cpu_init(void)
22361ba76586SYinghai Lu {
2237505b7899SThomas Gleixner 	struct task_struct *cur = current;
2238f6ef7322SThomas Gleixner 	int cpu = raw_smp_processor_id();
22391ba76586SYinghai Lu 
2240e7a22c1eSBrian Gerst #ifdef CONFIG_NUMA
224127fd185fSFenghua Yu 	if (this_cpu_read(numa_node) == 0 &&
2242e534c7c5SLee Schermerhorn 	    early_cpu_to_node(cpu) != NUMA_NO_NODE)
2243e534c7c5SLee Schermerhorn 		set_numa_node(early_cpu_to_node(cpu));
2244e7a22c1eSBrian Gerst #endif
22452eaad1fdSMike Travis 	pr_debug("Initializing CPU#%d\n", cpu);
22461ba76586SYinghai Lu 
2247505b7899SThomas Gleixner 	if (IS_ENABLED(CONFIG_X86_64) || cpu_feature_enabled(X86_FEATURE_VME) ||
2248505b7899SThomas Gleixner 	    boot_cpu_has(X86_FEATURE_TSC) || boot_cpu_has(X86_FEATURE_DE))
2249375074ccSAndy Lutomirski 		cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
22501ba76586SYinghai Lu 
2251505b7899SThomas Gleixner 	if (IS_ENABLED(CONFIG_X86_64)) {
2252505b7899SThomas Gleixner 		loadsegment(fs, 0);
2253505b7899SThomas Gleixner 		memset(cur->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
22541ba76586SYinghai Lu 		syscall_init();
22551ba76586SYinghai Lu 
22561ba76586SYinghai Lu 		wrmsrl(MSR_FS_BASE, 0);
22571ba76586SYinghai Lu 		wrmsrl(MSR_KERNEL_GS_BASE, 0);
22581ba76586SYinghai Lu 		barrier();
22591ba76586SYinghai Lu 
2260659006bfSThomas Gleixner 		x2apic_setup();
22611ba76586SYinghai Lu 	}
22621ba76586SYinghai Lu 
2263f1f10076SVegard Nossum 	mmgrab(&init_mm);
2264505b7899SThomas Gleixner 	cur->active_mm = &init_mm;
2265505b7899SThomas Gleixner 	BUG_ON(cur->mm);
226672c0098dSAndy Lutomirski 	initialize_tlbstate_and_flush();
2267505b7899SThomas Gleixner 	enter_lazy_tlb(&init_mm, cur);
22681ba76586SYinghai Lu 
2269505b7899SThomas Gleixner 	/*
2270505b7899SThomas Gleixner 	 * sp0 points to the entry trampoline stack regardless of what task
2271505b7899SThomas Gleixner 	 * is running.
2272505b7899SThomas Gleixner 	 */
22734fe2d8b1SDave Hansen 	load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
227420bb8344SAndy Lutomirski 
227537868fe1SAndy Lutomirski 	load_mm_ldt(&init_mm);
22761ba76586SYinghai Lu 
22779766cdbcSJaswinder Singh Rajput 	clear_all_debug_regs();
22780bb9fef9SJason Wessel 	dbg_restore_debug_regs();
22791ba76586SYinghai Lu 
2280dc4e0021SAndy Lutomirski 	doublefault_init_cpu_tss();
2281505b7899SThomas Gleixner 
22821ba76586SYinghai Lu 	if (is_uv_system())
22831ba76586SYinghai Lu 		uv_cpu_init();
228469218e47SThomas Garnier 
228569218e47SThomas Garnier 	load_fixmap_gdt(cpu);
22861ba76586SYinghai Lu }
22871ba76586SYinghai Lu 
2288a77a94f8SBorislav Petkov #ifdef CONFIG_MICROCODE_LATE_LOADING
2289ab31c744SAshok Raj /**
2290c0dd9245SAshok Raj  * store_cpu_caps() - Store a snapshot of CPU capabilities
2291c0dd9245SAshok Raj  * @curr_info: Pointer where to store it
2292c0dd9245SAshok Raj  *
2293c0dd9245SAshok Raj  * Returns: None
2294c0dd9245SAshok Raj  */
2295c0dd9245SAshok Raj void store_cpu_caps(struct cpuinfo_x86 *curr_info)
2296c0dd9245SAshok Raj {
2297c0dd9245SAshok Raj 	/* Reload CPUID max function as it might've changed. */
2298c0dd9245SAshok Raj 	curr_info->cpuid_level = cpuid_eax(0);
2299c0dd9245SAshok Raj 
2300c0dd9245SAshok Raj 	/* Copy all capability leafs and pick up the synthetic ones. */
2301c0dd9245SAshok Raj 	memcpy(&curr_info->x86_capability, &boot_cpu_data.x86_capability,
2302c0dd9245SAshok Raj 	       sizeof(curr_info->x86_capability));
2303c0dd9245SAshok Raj 
2304c0dd9245SAshok Raj 	/* Get the hardware CPUID leafs */
2305c0dd9245SAshok Raj 	get_cpu_cap(curr_info);
2306c0dd9245SAshok Raj }
2307c0dd9245SAshok Raj 
2308c0dd9245SAshok Raj /**
2309ab31c744SAshok Raj  * microcode_check() - Check if any CPU capabilities changed after an update.
2310ab31c744SAshok Raj  * @prev_info:	CPU capabilities stored before an update.
2311ab31c744SAshok Raj  *
23121008c52cSBorislav Petkov  * The microcode loader calls this upon late microcode load to recheck features,
231380347cd5SSebastian Andrzej Siewior  * only when microcode has been updated. Caller holds and CPU hotplug lock.
2314ab31c744SAshok Raj  *
2315ab31c744SAshok Raj  * Return: None
23161008c52cSBorislav Petkov  */
2317ab31c744SAshok Raj void microcode_check(struct cpuinfo_x86 *prev_info)
23181008c52cSBorislav Petkov {
2319c0dd9245SAshok Raj 	struct cpuinfo_x86 curr_info;
232042ca8082SBorislav Petkov 
23211008c52cSBorislav Petkov 	perf_check_microcode();
232242ca8082SBorislav Petkov 
2323522b1d69SBorislav Petkov (AMD) 	amd_check_microcode();
2324522b1d69SBorislav Petkov (AMD) 
2325c0dd9245SAshok Raj 	store_cpu_caps(&curr_info);
232642ca8082SBorislav Petkov 
2327c0dd9245SAshok Raj 	if (!memcmp(&prev_info->x86_capability, &curr_info.x86_capability,
2328ab31c744SAshok Raj 		    sizeof(prev_info->x86_capability)))
232942ca8082SBorislav Petkov 		return;
233042ca8082SBorislav Petkov 
233142ca8082SBorislav Petkov 	pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n");
233242ca8082SBorislav Petkov 	pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
23331008c52cSBorislav Petkov }
2334a77a94f8SBorislav Petkov #endif
23359c92374bSThomas Gleixner 
23369c92374bSThomas Gleixner /*
23379c92374bSThomas Gleixner  * Invoked from core CPU hotplug code after hotplug operations
23389c92374bSThomas Gleixner  */
23399c92374bSThomas Gleixner void arch_smt_update(void)
23409c92374bSThomas Gleixner {
23419c92374bSThomas Gleixner 	/* Handle the speculative execution misfeatures */
23429c92374bSThomas Gleixner 	cpu_bugs_smt_update();
23436a1cb5f5SThomas Gleixner 	/* Check whether IPI broadcasting can be enabled */
23446a1cb5f5SThomas Gleixner 	apic_smt_update();
23459c92374bSThomas Gleixner }
23467c7077a7SThomas Gleixner 
23477c7077a7SThomas Gleixner void __init arch_cpu_finalize_init(void)
23487c7077a7SThomas Gleixner {
23497c7077a7SThomas Gleixner 	identify_boot_cpu();
23507c7077a7SThomas Gleixner 
23517c7077a7SThomas Gleixner 	/*
23527c7077a7SThomas Gleixner 	 * identify_boot_cpu() initialized SMT support information, let the
23537c7077a7SThomas Gleixner 	 * core code know.
23547c7077a7SThomas Gleixner 	 */
2355447ae4acSMichael Ellerman 	cpu_smt_set_num_threads(smp_num_siblings, smp_num_siblings);
23567c7077a7SThomas Gleixner 
23577c7077a7SThomas Gleixner 	if (!IS_ENABLED(CONFIG_SMP)) {
23587c7077a7SThomas Gleixner 		pr_info("CPU: ");
23597c7077a7SThomas Gleixner 		print_cpu_info(&boot_cpu_data);
23607c7077a7SThomas Gleixner 	}
23617c7077a7SThomas Gleixner 
23627c7077a7SThomas Gleixner 	cpu_select_mitigations();
23637c7077a7SThomas Gleixner 
23647c7077a7SThomas Gleixner 	arch_smt_update();
23657c7077a7SThomas Gleixner 
23667c7077a7SThomas Gleixner 	if (IS_ENABLED(CONFIG_X86_32)) {
23677c7077a7SThomas Gleixner 		/*
23687c7077a7SThomas Gleixner 		 * Check whether this is a real i386 which is not longer
23697c7077a7SThomas Gleixner 		 * supported and fixup the utsname.
23707c7077a7SThomas Gleixner 		 */
23717c7077a7SThomas Gleixner 		if (boot_cpu_data.x86 < 4)
23727c7077a7SThomas Gleixner 			panic("Kernel requires i486+ for 'invlpg' and other features");
23737c7077a7SThomas Gleixner 
23747c7077a7SThomas Gleixner 		init_utsname()->machine[1] =
23757c7077a7SThomas Gleixner 			'0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
23767c7077a7SThomas Gleixner 	}
23777c7077a7SThomas Gleixner 
2378b81fac90SThomas Gleixner 	/*
2379b81fac90SThomas Gleixner 	 * Must be before alternatives because it might set or clear
2380b81fac90SThomas Gleixner 	 * feature bits.
2381b81fac90SThomas Gleixner 	 */
2382b81fac90SThomas Gleixner 	fpu__init_system();
2383b81fac90SThomas Gleixner 	fpu__init_cpu();
2384b81fac90SThomas Gleixner 
23857c7077a7SThomas Gleixner 	alternative_instructions();
23867c7077a7SThomas Gleixner 
23877c7077a7SThomas Gleixner 	if (IS_ENABLED(CONFIG_X86_64)) {
23887c7077a7SThomas Gleixner 		/*
23897c7077a7SThomas Gleixner 		 * Make sure the first 2MB area is not mapped by huge pages
23907c7077a7SThomas Gleixner 		 * There are typically fixed size MTRRs in there and overlapping
23917c7077a7SThomas Gleixner 		 * MTRRs into large pages causes slow downs.
23927c7077a7SThomas Gleixner 		 *
23937c7077a7SThomas Gleixner 		 * Right now we don't do that with gbpages because there seems
23947c7077a7SThomas Gleixner 		 * very little benefit for that case.
23957c7077a7SThomas Gleixner 		 */
23967c7077a7SThomas Gleixner 		if (!direct_gbpages)
23977c7077a7SThomas Gleixner 			set_memory_4k((unsigned long)__va(0), 1);
23987c7077a7SThomas Gleixner 	} else {
23997c7077a7SThomas Gleixner 		fpu__init_check_bugs();
24007c7077a7SThomas Gleixner 	}
2401439e1757SThomas Gleixner 
2402439e1757SThomas Gleixner 	/*
2403439e1757SThomas Gleixner 	 * This needs to be called before any devices perform DMA
2404439e1757SThomas Gleixner 	 * operations that might use the SWIOTLB bounce buffers. It will
2405439e1757SThomas Gleixner 	 * mark the bounce buffers as decrypted so that their usage will
2406439e1757SThomas Gleixner 	 * not cause "plain-text" data to be decrypted when accessed. It
2407439e1757SThomas Gleixner 	 * must be called after late_time_init() so that Hyper-V x86/x64
2408439e1757SThomas Gleixner 	 * hypercalls work when the SWIOTLB bounce buffers are decrypted.
2409439e1757SThomas Gleixner 	 */
2410439e1757SThomas Gleixner 	mem_encrypt_init();
24117c7077a7SThomas Gleixner }
2412