xref: /linux/arch/x86/kernel/cpu/common.c (revision b2e2ba578e016a091eb31565849990fe68c7c599)
12458e53fSKirill A. Shutemov /* cpu_feature_enabled() cannot be used this early */
22458e53fSKirill A. Shutemov #define USE_EARLY_PGTABLE_L5
32458e53fSKirill A. Shutemov 
4f0fc4affSYinghai Lu #include <linux/bootmem.h>
59766cdbcSJaswinder Singh Rajput #include <linux/linkage.h>
6f0fc4affSYinghai Lu #include <linux/bitops.h>
79766cdbcSJaswinder Singh Rajput #include <linux/kernel.h>
8186f4360SPaul Gortmaker #include <linux/export.h>
9f7627e25SThomas Gleixner #include <linux/percpu.h>
109766cdbcSJaswinder Singh Rajput #include <linux/string.h>
11ee098e1aSBorislav Petkov #include <linux/ctype.h>
129766cdbcSJaswinder Singh Rajput #include <linux/delay.h>
1368e21be2SIngo Molnar #include <linux/sched/mm.h>
14e6017571SIngo Molnar #include <linux/sched/clock.h>
159164bb4aSIngo Molnar #include <linux/sched/task.h>
169766cdbcSJaswinder Singh Rajput #include <linux/init.h>
170f46efebSMasami Hiramatsu #include <linux/kprobes.h>
189766cdbcSJaswinder Singh Rajput #include <linux/kgdb.h>
199766cdbcSJaswinder Singh Rajput #include <linux/smp.h>
209766cdbcSJaswinder Singh Rajput #include <linux/io.h>
21b51ef52dSLaura Abbott #include <linux/syscore_ops.h>
229766cdbcSJaswinder Singh Rajput 
239766cdbcSJaswinder Singh Rajput #include <asm/stackprotector.h>
24cdd6c482SIngo Molnar #include <asm/perf_event.h>
25f7627e25SThomas Gleixner #include <asm/mmu_context.h>
2649d859d7SH. Peter Anvin #include <asm/archrandom.h>
279766cdbcSJaswinder Singh Rajput #include <asm/hypervisor.h>
289766cdbcSJaswinder Singh Rajput #include <asm/processor.h>
291e02ce4cSAndy Lutomirski #include <asm/tlbflush.h>
30f649e938SPaul Gortmaker #include <asm/debugreg.h>
319766cdbcSJaswinder Singh Rajput #include <asm/sections.h>
32f40c3300SAndy Lutomirski #include <asm/vsyscall.h>
338bdbd962SAlan Cox #include <linux/topology.h>
348bdbd962SAlan Cox #include <linux/cpumask.h>
359766cdbcSJaswinder Singh Rajput #include <asm/pgtable.h>
3660063497SArun Sharma #include <linux/atomic.h>
379766cdbcSJaswinder Singh Rajput #include <asm/proto.h>
389766cdbcSJaswinder Singh Rajput #include <asm/setup.h>
39f7627e25SThomas Gleixner #include <asm/apic.h>
409766cdbcSJaswinder Singh Rajput #include <asm/desc.h>
4178f7f1e5SIngo Molnar #include <asm/fpu/internal.h>
429766cdbcSJaswinder Singh Rajput #include <asm/mtrr.h>
430274f955SGrzegorz Andrejczuk #include <asm/hwcap2.h>
448bdbd962SAlan Cox #include <linux/numa.h>
459766cdbcSJaswinder Singh Rajput #include <asm/asm.h>
460f6ff2bcSDave Hansen #include <asm/bugs.h>
479766cdbcSJaswinder Singh Rajput #include <asm/cpu.h>
489766cdbcSJaswinder Singh Rajput #include <asm/mce.h>
499766cdbcSJaswinder Singh Rajput #include <asm/msr.h>
509766cdbcSJaswinder Singh Rajput #include <asm/pat.h>
51d288e1cfSFenghua Yu #include <asm/microcode.h>
52d288e1cfSFenghua Yu #include <asm/microcode_intel.h>
53fec9434aSDavid Woodhouse #include <asm/intel-family.h>
54fec9434aSDavid Woodhouse #include <asm/cpu_device_id.h>
55e641f5f5SIngo Molnar 
56f7627e25SThomas Gleixner #ifdef CONFIG_X86_LOCAL_APIC
57bdbcdd48STejun Heo #include <asm/uv/uv.h>
58f7627e25SThomas Gleixner #endif
59f7627e25SThomas Gleixner 
60f7627e25SThomas Gleixner #include "cpu.h"
61f7627e25SThomas Gleixner 
620274f955SGrzegorz Andrejczuk u32 elf_hwcap2 __read_mostly;
630274f955SGrzegorz Andrejczuk 
64c2d1cec1SMike Travis /* all of these masks are initialized in setup_cpu_local_masks() */
65c2d1cec1SMike Travis cpumask_var_t cpu_initialized_mask;
669766cdbcSJaswinder Singh Rajput cpumask_var_t cpu_callout_mask;
679766cdbcSJaswinder Singh Rajput cpumask_var_t cpu_callin_mask;
68c2d1cec1SMike Travis 
69c2d1cec1SMike Travis /* representing cpus for which sibling maps can be computed */
70c2d1cec1SMike Travis cpumask_var_t cpu_sibling_setup_mask;
71c2d1cec1SMike Travis 
72f8b64d08SBorislav Petkov /* Number of siblings per CPU package */
73f8b64d08SBorislav Petkov int smp_num_siblings = 1;
74f8b64d08SBorislav Petkov EXPORT_SYMBOL(smp_num_siblings);
75f8b64d08SBorislav Petkov 
76f8b64d08SBorislav Petkov /* Last level cache ID of each logical CPU */
77f8b64d08SBorislav Petkov DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID;
78f8b64d08SBorislav Petkov 
792f2f52baSBrian Gerst /* correctly size the local cpu masks */
804369f1fbSIngo Molnar void __init setup_cpu_local_masks(void)
812f2f52baSBrian Gerst {
822f2f52baSBrian Gerst 	alloc_bootmem_cpumask_var(&cpu_initialized_mask);
832f2f52baSBrian Gerst 	alloc_bootmem_cpumask_var(&cpu_callin_mask);
842f2f52baSBrian Gerst 	alloc_bootmem_cpumask_var(&cpu_callout_mask);
852f2f52baSBrian Gerst 	alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
862f2f52baSBrian Gerst }
872f2f52baSBrian Gerst 
88148f9bb8SPaul Gortmaker static void default_init(struct cpuinfo_x86 *c)
89e8055139SOndrej Zary {
90e8055139SOndrej Zary #ifdef CONFIG_X86_64
9127c13eceSBorislav Petkov 	cpu_detect_cache_sizes(c);
92e8055139SOndrej Zary #else
93e8055139SOndrej Zary 	/* Not much we can do here... */
94e8055139SOndrej Zary 	/* Check if at least it has cpuid */
95e8055139SOndrej Zary 	if (c->cpuid_level == -1) {
96e8055139SOndrej Zary 		/* No cpuid. It must be an ancient CPU */
97e8055139SOndrej Zary 		if (c->x86 == 4)
98e8055139SOndrej Zary 			strcpy(c->x86_model_id, "486");
99e8055139SOndrej Zary 		else if (c->x86 == 3)
100e8055139SOndrej Zary 			strcpy(c->x86_model_id, "386");
101e8055139SOndrej Zary 	}
102e8055139SOndrej Zary #endif
103e8055139SOndrej Zary }
104e8055139SOndrej Zary 
105148f9bb8SPaul Gortmaker static const struct cpu_dev default_cpu = {
106e8055139SOndrej Zary 	.c_init		= default_init,
107e8055139SOndrej Zary 	.c_vendor	= "Unknown",
108e8055139SOndrej Zary 	.c_x86_vendor	= X86_VENDOR_UNKNOWN,
109e8055139SOndrej Zary };
110e8055139SOndrej Zary 
111148f9bb8SPaul Gortmaker static const struct cpu_dev *this_cpu = &default_cpu;
1120a488a53SYinghai Lu 
11306deef89SBrian Gerst DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
114950ad7ffSYinghai Lu #ifdef CONFIG_X86_64
11506deef89SBrian Gerst 	/*
11606deef89SBrian Gerst 	 * We need valid kernel segments for data and code in long mode too
117950ad7ffSYinghai Lu 	 * IRET will check the segment types  kkeil 2000/10/28
118950ad7ffSYinghai Lu 	 * Also sysret mandates a special GDT layout
11906deef89SBrian Gerst 	 *
1209766cdbcSJaswinder Singh Rajput 	 * TLS descriptors are currently at a different place compared to i386.
12106deef89SBrian Gerst 	 * Hopefully nobody expects them at a fixed place (Wine?)
122950ad7ffSYinghai Lu 	 */
1231e5de182SAkinobu Mita 	[GDT_ENTRY_KERNEL32_CS]		= GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
1241e5de182SAkinobu Mita 	[GDT_ENTRY_KERNEL_CS]		= GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
1251e5de182SAkinobu Mita 	[GDT_ENTRY_KERNEL_DS]		= GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
1261e5de182SAkinobu Mita 	[GDT_ENTRY_DEFAULT_USER32_CS]	= GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
1271e5de182SAkinobu Mita 	[GDT_ENTRY_DEFAULT_USER_DS]	= GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
1281e5de182SAkinobu Mita 	[GDT_ENTRY_DEFAULT_USER_CS]	= GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
129950ad7ffSYinghai Lu #else
1301e5de182SAkinobu Mita 	[GDT_ENTRY_KERNEL_CS]		= GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
1311e5de182SAkinobu Mita 	[GDT_ENTRY_KERNEL_DS]		= GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
1321e5de182SAkinobu Mita 	[GDT_ENTRY_DEFAULT_USER_CS]	= GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
1331e5de182SAkinobu Mita 	[GDT_ENTRY_DEFAULT_USER_DS]	= GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
134f7627e25SThomas Gleixner 	/*
135f7627e25SThomas Gleixner 	 * Segments used for calling PnP BIOS have byte granularity.
136f7627e25SThomas Gleixner 	 * They code segments and data segments have fixed 64k limits,
137f7627e25SThomas Gleixner 	 * the transfer segment sizes are set at run time.
138f7627e25SThomas Gleixner 	 */
1396842ef0eSGlauber de Oliveira Costa 	/* 32-bit code */
1401e5de182SAkinobu Mita 	[GDT_ENTRY_PNPBIOS_CS32]	= GDT_ENTRY_INIT(0x409a, 0, 0xffff),
1416842ef0eSGlauber de Oliveira Costa 	/* 16-bit code */
1421e5de182SAkinobu Mita 	[GDT_ENTRY_PNPBIOS_CS16]	= GDT_ENTRY_INIT(0x009a, 0, 0xffff),
1436842ef0eSGlauber de Oliveira Costa 	/* 16-bit data */
1441e5de182SAkinobu Mita 	[GDT_ENTRY_PNPBIOS_DS]		= GDT_ENTRY_INIT(0x0092, 0, 0xffff),
1456842ef0eSGlauber de Oliveira Costa 	/* 16-bit data */
1461e5de182SAkinobu Mita 	[GDT_ENTRY_PNPBIOS_TS1]		= GDT_ENTRY_INIT(0x0092, 0, 0),
1476842ef0eSGlauber de Oliveira Costa 	/* 16-bit data */
1481e5de182SAkinobu Mita 	[GDT_ENTRY_PNPBIOS_TS2]		= GDT_ENTRY_INIT(0x0092, 0, 0),
149f7627e25SThomas Gleixner 	/*
150f7627e25SThomas Gleixner 	 * The APM segments have byte granularity and their bases
151f7627e25SThomas Gleixner 	 * are set at run time.  All have 64k limits.
152f7627e25SThomas Gleixner 	 */
1536842ef0eSGlauber de Oliveira Costa 	/* 32-bit code */
1541e5de182SAkinobu Mita 	[GDT_ENTRY_APMBIOS_BASE]	= GDT_ENTRY_INIT(0x409a, 0, 0xffff),
155f7627e25SThomas Gleixner 	/* 16-bit code */
1561e5de182SAkinobu Mita 	[GDT_ENTRY_APMBIOS_BASE+1]	= GDT_ENTRY_INIT(0x009a, 0, 0xffff),
1576842ef0eSGlauber de Oliveira Costa 	/* data */
15872c4d853SIngo Molnar 	[GDT_ENTRY_APMBIOS_BASE+2]	= GDT_ENTRY_INIT(0x4092, 0, 0xffff),
159f7627e25SThomas Gleixner 
1601e5de182SAkinobu Mita 	[GDT_ENTRY_ESPFIX_SS]		= GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
1611e5de182SAkinobu Mita 	[GDT_ENTRY_PERCPU]		= GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
16260a5317fSTejun Heo 	GDT_STACK_CANARY_INIT
163950ad7ffSYinghai Lu #endif
16406deef89SBrian Gerst } };
165f7627e25SThomas Gleixner EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
166f7627e25SThomas Gleixner 
1678c3641e9SDave Hansen static int __init x86_mpx_setup(char *s)
1680c752a93SSuresh Siddha {
1698c3641e9SDave Hansen 	/* require an exact match without trailing characters */
1702cd3949fSDave Hansen 	if (strlen(s))
1712cd3949fSDave Hansen 		return 0;
1720c752a93SSuresh Siddha 
1738c3641e9SDave Hansen 	/* do not emit a message if the feature is not present */
1748c3641e9SDave Hansen 	if (!boot_cpu_has(X86_FEATURE_MPX))
1756bad06b7SSuresh Siddha 		return 1;
1766bad06b7SSuresh Siddha 
1778c3641e9SDave Hansen 	setup_clear_cpu_cap(X86_FEATURE_MPX);
1788c3641e9SDave Hansen 	pr_info("nompx: Intel Memory Protection Extensions (MPX) disabled\n");
179b6f42a4aSFenghua Yu 	return 1;
180b6f42a4aSFenghua Yu }
1818c3641e9SDave Hansen __setup("nompx", x86_mpx_setup);
182b6f42a4aSFenghua Yu 
1830790c9aaSAndy Lutomirski #ifdef CONFIG_X86_64
184c7ad5ad2SAndy Lutomirski static int __init x86_nopcid_setup(char *s)
1850790c9aaSAndy Lutomirski {
186c7ad5ad2SAndy Lutomirski 	/* nopcid doesn't accept parameters */
187c7ad5ad2SAndy Lutomirski 	if (s)
188c7ad5ad2SAndy Lutomirski 		return -EINVAL;
1890790c9aaSAndy Lutomirski 
1900790c9aaSAndy Lutomirski 	/* do not emit a message if the feature is not present */
1910790c9aaSAndy Lutomirski 	if (!boot_cpu_has(X86_FEATURE_PCID))
192c7ad5ad2SAndy Lutomirski 		return 0;
1930790c9aaSAndy Lutomirski 
1940790c9aaSAndy Lutomirski 	setup_clear_cpu_cap(X86_FEATURE_PCID);
1950790c9aaSAndy Lutomirski 	pr_info("nopcid: PCID feature disabled\n");
196c7ad5ad2SAndy Lutomirski 	return 0;
1970790c9aaSAndy Lutomirski }
198c7ad5ad2SAndy Lutomirski early_param("nopcid", x86_nopcid_setup);
1990790c9aaSAndy Lutomirski #endif
2000790c9aaSAndy Lutomirski 
201d12a72b8SAndy Lutomirski static int __init x86_noinvpcid_setup(char *s)
202d12a72b8SAndy Lutomirski {
203d12a72b8SAndy Lutomirski 	/* noinvpcid doesn't accept parameters */
204d12a72b8SAndy Lutomirski 	if (s)
205d12a72b8SAndy Lutomirski 		return -EINVAL;
206d12a72b8SAndy Lutomirski 
207d12a72b8SAndy Lutomirski 	/* do not emit a message if the feature is not present */
208d12a72b8SAndy Lutomirski 	if (!boot_cpu_has(X86_FEATURE_INVPCID))
209d12a72b8SAndy Lutomirski 		return 0;
210d12a72b8SAndy Lutomirski 
211d12a72b8SAndy Lutomirski 	setup_clear_cpu_cap(X86_FEATURE_INVPCID);
212d12a72b8SAndy Lutomirski 	pr_info("noinvpcid: INVPCID feature disabled\n");
213d12a72b8SAndy Lutomirski 	return 0;
214d12a72b8SAndy Lutomirski }
215d12a72b8SAndy Lutomirski early_param("noinvpcid", x86_noinvpcid_setup);
216d12a72b8SAndy Lutomirski 
217ba51dcedSYinghai Lu #ifdef CONFIG_X86_32
218148f9bb8SPaul Gortmaker static int cachesize_override = -1;
219148f9bb8SPaul Gortmaker static int disable_x86_serial_nr = 1;
220f7627e25SThomas Gleixner 
221f7627e25SThomas Gleixner static int __init cachesize_setup(char *str)
222f7627e25SThomas Gleixner {
223f7627e25SThomas Gleixner 	get_option(&str, &cachesize_override);
224f7627e25SThomas Gleixner 	return 1;
225f7627e25SThomas Gleixner }
226f7627e25SThomas Gleixner __setup("cachesize=", cachesize_setup);
227f7627e25SThomas Gleixner 
228f7627e25SThomas Gleixner static int __init x86_sep_setup(char *s)
229f7627e25SThomas Gleixner {
23013530257SAndi Kleen 	setup_clear_cpu_cap(X86_FEATURE_SEP);
231f7627e25SThomas Gleixner 	return 1;
232f7627e25SThomas Gleixner }
233f7627e25SThomas Gleixner __setup("nosep", x86_sep_setup);
234f7627e25SThomas Gleixner 
235f7627e25SThomas Gleixner /* Standard macro to see if a specific flag is changeable */
236f7627e25SThomas Gleixner static inline int flag_is_changeable_p(u32 flag)
237f7627e25SThomas Gleixner {
238f7627e25SThomas Gleixner 	u32 f1, f2;
239f7627e25SThomas Gleixner 
24094f6bac1SKrzysztof Helt 	/*
24194f6bac1SKrzysztof Helt 	 * Cyrix and IDT cpus allow disabling of CPUID
24294f6bac1SKrzysztof Helt 	 * so the code below may return different results
24394f6bac1SKrzysztof Helt 	 * when it is executed before and after enabling
24494f6bac1SKrzysztof Helt 	 * the CPUID. Add "volatile" to not allow gcc to
24594f6bac1SKrzysztof Helt 	 * optimize the subsequent calls to this function.
24694f6bac1SKrzysztof Helt 	 */
24794f6bac1SKrzysztof Helt 	asm volatile ("pushfl		\n\t"
248f7627e25SThomas Gleixner 		      "pushfl		\n\t"
249f7627e25SThomas Gleixner 		      "popl %0		\n\t"
250f7627e25SThomas Gleixner 		      "movl %0, %1	\n\t"
251f7627e25SThomas Gleixner 		      "xorl %2, %0	\n\t"
252f7627e25SThomas Gleixner 		      "pushl %0		\n\t"
253f7627e25SThomas Gleixner 		      "popfl		\n\t"
254f7627e25SThomas Gleixner 		      "pushfl		\n\t"
255f7627e25SThomas Gleixner 		      "popl %0		\n\t"
256f7627e25SThomas Gleixner 		      "popfl		\n\t"
2570f3fa48aSIngo Molnar 
258f7627e25SThomas Gleixner 		      : "=&r" (f1), "=&r" (f2)
259f7627e25SThomas Gleixner 		      : "ir" (flag));
260f7627e25SThomas Gleixner 
261f7627e25SThomas Gleixner 	return ((f1^f2) & flag) != 0;
262f7627e25SThomas Gleixner }
263f7627e25SThomas Gleixner 
264f7627e25SThomas Gleixner /* Probe for the CPUID instruction */
265148f9bb8SPaul Gortmaker int have_cpuid_p(void)
266f7627e25SThomas Gleixner {
267f7627e25SThomas Gleixner 	return flag_is_changeable_p(X86_EFLAGS_ID);
268f7627e25SThomas Gleixner }
269f7627e25SThomas Gleixner 
270148f9bb8SPaul Gortmaker static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
2710a488a53SYinghai Lu {
2720a488a53SYinghai Lu 	unsigned long lo, hi;
2730f3fa48aSIngo Molnar 
2740f3fa48aSIngo Molnar 	if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
2750f3fa48aSIngo Molnar 		return;
2760f3fa48aSIngo Molnar 
2770f3fa48aSIngo Molnar 	/* Disable processor serial number: */
2780f3fa48aSIngo Molnar 
2790a488a53SYinghai Lu 	rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
2800a488a53SYinghai Lu 	lo |= 0x200000;
2810a488a53SYinghai Lu 	wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
2820f3fa48aSIngo Molnar 
2831b74dde7SChen Yucong 	pr_notice("CPU serial number disabled.\n");
2840a488a53SYinghai Lu 	clear_cpu_cap(c, X86_FEATURE_PN);
2850a488a53SYinghai Lu 
2860a488a53SYinghai Lu 	/* Disabling the serial number may affect the cpuid level */
2870a488a53SYinghai Lu 	c->cpuid_level = cpuid_eax(0);
2880a488a53SYinghai Lu }
2890a488a53SYinghai Lu 
2900a488a53SYinghai Lu static int __init x86_serial_nr_setup(char *s)
2910a488a53SYinghai Lu {
2920a488a53SYinghai Lu 	disable_x86_serial_nr = 0;
2930a488a53SYinghai Lu 	return 1;
2940a488a53SYinghai Lu }
2950a488a53SYinghai Lu __setup("serialnumber", x86_serial_nr_setup);
296ba51dcedSYinghai Lu #else
297102bbe3aSYinghai Lu static inline int flag_is_changeable_p(u32 flag)
298102bbe3aSYinghai Lu {
299102bbe3aSYinghai Lu 	return 1;
300102bbe3aSYinghai Lu }
301102bbe3aSYinghai Lu static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
302102bbe3aSYinghai Lu {
303102bbe3aSYinghai Lu }
304ba51dcedSYinghai Lu #endif
3050a488a53SYinghai Lu 
306de5397adSFenghua Yu static __init int setup_disable_smep(char *arg)
307de5397adSFenghua Yu {
308b2cc2a07SH. Peter Anvin 	setup_clear_cpu_cap(X86_FEATURE_SMEP);
3090f6ff2bcSDave Hansen 	/* Check for things that depend on SMEP being enabled: */
3100f6ff2bcSDave Hansen 	check_mpx_erratum(&boot_cpu_data);
311de5397adSFenghua Yu 	return 1;
312de5397adSFenghua Yu }
313de5397adSFenghua Yu __setup("nosmep", setup_disable_smep);
314de5397adSFenghua Yu 
315b2cc2a07SH. Peter Anvin static __always_inline void setup_smep(struct cpuinfo_x86 *c)
316de5397adSFenghua Yu {
317b2cc2a07SH. Peter Anvin 	if (cpu_has(c, X86_FEATURE_SMEP))
318375074ccSAndy Lutomirski 		cr4_set_bits(X86_CR4_SMEP);
319de5397adSFenghua Yu }
320de5397adSFenghua Yu 
32152b6179aSH. Peter Anvin static __init int setup_disable_smap(char *arg)
32252b6179aSH. Peter Anvin {
323b2cc2a07SH. Peter Anvin 	setup_clear_cpu_cap(X86_FEATURE_SMAP);
32452b6179aSH. Peter Anvin 	return 1;
32552b6179aSH. Peter Anvin }
32652b6179aSH. Peter Anvin __setup("nosmap", setup_disable_smap);
32752b6179aSH. Peter Anvin 
328b2cc2a07SH. Peter Anvin static __always_inline void setup_smap(struct cpuinfo_x86 *c)
32952b6179aSH. Peter Anvin {
330581b7f15SAndrew Cooper 	unsigned long eflags = native_save_fl();
331b2cc2a07SH. Peter Anvin 
332b2cc2a07SH. Peter Anvin 	/* This should have been cleared long ago */
333b2cc2a07SH. Peter Anvin 	BUG_ON(eflags & X86_EFLAGS_AC);
334b2cc2a07SH. Peter Anvin 
33503bbd596SH. Peter Anvin 	if (cpu_has(c, X86_FEATURE_SMAP)) {
33603bbd596SH. Peter Anvin #ifdef CONFIG_X86_SMAP
337375074ccSAndy Lutomirski 		cr4_set_bits(X86_CR4_SMAP);
33803bbd596SH. Peter Anvin #else
339375074ccSAndy Lutomirski 		cr4_clear_bits(X86_CR4_SMAP);
34003bbd596SH. Peter Anvin #endif
34103bbd596SH. Peter Anvin 	}
342f7627e25SThomas Gleixner }
343f7627e25SThomas Gleixner 
344aa35f896SRicardo Neri static __always_inline void setup_umip(struct cpuinfo_x86 *c)
345aa35f896SRicardo Neri {
346aa35f896SRicardo Neri 	/* Check the boot processor, plus build option for UMIP. */
347aa35f896SRicardo Neri 	if (!cpu_feature_enabled(X86_FEATURE_UMIP))
348aa35f896SRicardo Neri 		goto out;
349aa35f896SRicardo Neri 
350aa35f896SRicardo Neri 	/* Check the current processor's cpuid bits. */
351aa35f896SRicardo Neri 	if (!cpu_has(c, X86_FEATURE_UMIP))
352aa35f896SRicardo Neri 		goto out;
353aa35f896SRicardo Neri 
354aa35f896SRicardo Neri 	cr4_set_bits(X86_CR4_UMIP);
355aa35f896SRicardo Neri 
356770c7755SRicardo Neri 	pr_info("x86/cpu: Activated the Intel User Mode Instruction Prevention (UMIP) CPU feature\n");
357770c7755SRicardo Neri 
358aa35f896SRicardo Neri 	return;
359aa35f896SRicardo Neri 
360aa35f896SRicardo Neri out:
361aa35f896SRicardo Neri 	/*
362aa35f896SRicardo Neri 	 * Make sure UMIP is disabled in case it was enabled in a
363aa35f896SRicardo Neri 	 * previous boot (e.g., via kexec).
364aa35f896SRicardo Neri 	 */
365aa35f896SRicardo Neri 	cr4_clear_bits(X86_CR4_UMIP);
366aa35f896SRicardo Neri }
367aa35f896SRicardo Neri 
368f7627e25SThomas Gleixner /*
36906976945SDave Hansen  * Protection Keys are not available in 32-bit mode.
37006976945SDave Hansen  */
37106976945SDave Hansen static bool pku_disabled;
37206976945SDave Hansen 
37306976945SDave Hansen static __always_inline void setup_pku(struct cpuinfo_x86 *c)
37406976945SDave Hansen {
375e8df1a95SDave Hansen 	/* check the boot processor, plus compile options for PKU: */
376e8df1a95SDave Hansen 	if (!cpu_feature_enabled(X86_FEATURE_PKU))
377e8df1a95SDave Hansen 		return;
378e8df1a95SDave Hansen 	/* checks the actual processor's cpuid bits: */
37906976945SDave Hansen 	if (!cpu_has(c, X86_FEATURE_PKU))
38006976945SDave Hansen 		return;
38106976945SDave Hansen 	if (pku_disabled)
38206976945SDave Hansen 		return;
38306976945SDave Hansen 
38406976945SDave Hansen 	cr4_set_bits(X86_CR4_PKE);
38506976945SDave Hansen 	/*
38606976945SDave Hansen 	 * Seting X86_CR4_PKE will cause the X86_FEATURE_OSPKE
38706976945SDave Hansen 	 * cpuid bit to be set.  We need to ensure that we
38806976945SDave Hansen 	 * update that bit in this CPU's "cpu_info".
38906976945SDave Hansen 	 */
39006976945SDave Hansen 	get_cpu_cap(c);
39106976945SDave Hansen }
39206976945SDave Hansen 
39306976945SDave Hansen #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
39406976945SDave Hansen static __init int setup_disable_pku(char *arg)
39506976945SDave Hansen {
39606976945SDave Hansen 	/*
39706976945SDave Hansen 	 * Do not clear the X86_FEATURE_PKU bit.  All of the
39806976945SDave Hansen 	 * runtime checks are against OSPKE so clearing the
39906976945SDave Hansen 	 * bit does nothing.
40006976945SDave Hansen 	 *
40106976945SDave Hansen 	 * This way, we will see "pku" in cpuinfo, but not
40206976945SDave Hansen 	 * "ospke", which is exactly what we want.  It shows
40306976945SDave Hansen 	 * that the CPU has PKU, but the OS has not enabled it.
40406976945SDave Hansen 	 * This happens to be exactly how a system would look
40506976945SDave Hansen 	 * if we disabled the config option.
40606976945SDave Hansen 	 */
40706976945SDave Hansen 	pr_info("x86: 'nopku' specified, disabling Memory Protection Keys\n");
40806976945SDave Hansen 	pku_disabled = true;
40906976945SDave Hansen 	return 1;
41006976945SDave Hansen }
41106976945SDave Hansen __setup("nopku", setup_disable_pku);
41206976945SDave Hansen #endif /* CONFIG_X86_64 */
41306976945SDave Hansen 
41406976945SDave Hansen /*
415b38b0665SH. Peter Anvin  * Some CPU features depend on higher CPUID levels, which may not always
416b38b0665SH. Peter Anvin  * be available due to CPUID level capping or broken virtualization
417b38b0665SH. Peter Anvin  * software.  Add those features to this table to auto-disable them.
418b38b0665SH. Peter Anvin  */
419b38b0665SH. Peter Anvin struct cpuid_dependent_feature {
420b38b0665SH. Peter Anvin 	u32 feature;
421b38b0665SH. Peter Anvin 	u32 level;
422b38b0665SH. Peter Anvin };
4230f3fa48aSIngo Molnar 
424148f9bb8SPaul Gortmaker static const struct cpuid_dependent_feature
425b38b0665SH. Peter Anvin cpuid_dependent_features[] = {
426b38b0665SH. Peter Anvin 	{ X86_FEATURE_MWAIT,		0x00000005 },
427b38b0665SH. Peter Anvin 	{ X86_FEATURE_DCA,		0x00000009 },
428b38b0665SH. Peter Anvin 	{ X86_FEATURE_XSAVE,		0x0000000d },
429b38b0665SH. Peter Anvin 	{ 0, 0 }
430b38b0665SH. Peter Anvin };
431b38b0665SH. Peter Anvin 
432148f9bb8SPaul Gortmaker static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
433b38b0665SH. Peter Anvin {
434b38b0665SH. Peter Anvin 	const struct cpuid_dependent_feature *df;
4359766cdbcSJaswinder Singh Rajput 
436b38b0665SH. Peter Anvin 	for (df = cpuid_dependent_features; df->feature; df++) {
4370f3fa48aSIngo Molnar 
4380f3fa48aSIngo Molnar 		if (!cpu_has(c, df->feature))
4390f3fa48aSIngo Molnar 			continue;
440b38b0665SH. Peter Anvin 		/*
441b38b0665SH. Peter Anvin 		 * Note: cpuid_level is set to -1 if unavailable, but
442b38b0665SH. Peter Anvin 		 * extended_extended_level is set to 0 if unavailable
443b38b0665SH. Peter Anvin 		 * and the legitimate extended levels are all negative
444b38b0665SH. Peter Anvin 		 * when signed; hence the weird messing around with
445b38b0665SH. Peter Anvin 		 * signs here...
446b38b0665SH. Peter Anvin 		 */
4470f3fa48aSIngo Molnar 		if (!((s32)df->level < 0 ?
448f6db44dfSYinghai Lu 		     (u32)df->level > (u32)c->extended_cpuid_level :
4490f3fa48aSIngo Molnar 		     (s32)df->level > (s32)c->cpuid_level))
4500f3fa48aSIngo Molnar 			continue;
4510f3fa48aSIngo Molnar 
452b38b0665SH. Peter Anvin 		clear_cpu_cap(c, df->feature);
4530f3fa48aSIngo Molnar 		if (!warn)
4540f3fa48aSIngo Molnar 			continue;
4550f3fa48aSIngo Molnar 
4561b74dde7SChen Yucong 		pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n",
4579def39beSJosh Triplett 			x86_cap_flag(df->feature), df->level);
458b38b0665SH. Peter Anvin 	}
459b38b0665SH. Peter Anvin }
460b38b0665SH. Peter Anvin 
461b38b0665SH. Peter Anvin /*
462f7627e25SThomas Gleixner  * Naming convention should be: <Name> [(<Codename>)]
463f7627e25SThomas Gleixner  * This table only is used unless init_<vendor>() below doesn't set it;
4640f3fa48aSIngo Molnar  * in particular, if CPUID levels 0x80000002..4 are supported, this
4650f3fa48aSIngo Molnar  * isn't used
466f7627e25SThomas Gleixner  */
467f7627e25SThomas Gleixner 
468f7627e25SThomas Gleixner /* Look up CPU names by table lookup. */
469148f9bb8SPaul Gortmaker static const char *table_lookup_model(struct cpuinfo_x86 *c)
470f7627e25SThomas Gleixner {
47109dc68d9SJan Beulich #ifdef CONFIG_X86_32
47209dc68d9SJan Beulich 	const struct legacy_cpu_model_info *info;
473f7627e25SThomas Gleixner 
474f7627e25SThomas Gleixner 	if (c->x86_model >= 16)
475f7627e25SThomas Gleixner 		return NULL;	/* Range check */
476f7627e25SThomas Gleixner 
477f7627e25SThomas Gleixner 	if (!this_cpu)
478f7627e25SThomas Gleixner 		return NULL;
479f7627e25SThomas Gleixner 
48009dc68d9SJan Beulich 	info = this_cpu->legacy_models;
481f7627e25SThomas Gleixner 
48209dc68d9SJan Beulich 	while (info->family) {
483f7627e25SThomas Gleixner 		if (info->family == c->x86)
484f7627e25SThomas Gleixner 			return info->model_names[c->x86_model];
485f7627e25SThomas Gleixner 		info++;
486f7627e25SThomas Gleixner 	}
48709dc68d9SJan Beulich #endif
488f7627e25SThomas Gleixner 	return NULL;		/* Not found */
489f7627e25SThomas Gleixner }
490f7627e25SThomas Gleixner 
4916cbd2171SThomas Gleixner __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS];
4926cbd2171SThomas Gleixner __u32 cpu_caps_set[NCAPINTS + NBUGINTS];
493f7627e25SThomas Gleixner 
49411e3a840SJeremy Fitzhardinge void load_percpu_segment(int cpu)
4959d31d35bSYinghai Lu {
496fab334c1SYinghai Lu #ifdef CONFIG_X86_32
4972697fbd5SBrian Gerst 	loadsegment(fs, __KERNEL_PERCPU);
4982697fbd5SBrian Gerst #else
49945e876f7SAndy Lutomirski 	__loadsegment_simple(gs, 0);
50035060ed6SVitaly Kuznetsov 	wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu));
501fab334c1SYinghai Lu #endif
50260a5317fSTejun Heo 	load_stack_canary_segment();
5039d31d35bSYinghai Lu }
5049d31d35bSYinghai Lu 
50572f5e08dSAndy Lutomirski #ifdef CONFIG_X86_32
50672f5e08dSAndy Lutomirski /* The 32-bit entry code needs to find cpu_entry_area. */
50772f5e08dSAndy Lutomirski DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
50872f5e08dSAndy Lutomirski #endif
50972f5e08dSAndy Lutomirski 
51040e7f949SAndy Lutomirski #ifdef CONFIG_X86_64
51140e7f949SAndy Lutomirski /*
51240e7f949SAndy Lutomirski  * Special IST stacks which the CPU switches to when it calls
51340e7f949SAndy Lutomirski  * an IST-marked descriptor entry. Up to 7 stacks (hardware
51440e7f949SAndy Lutomirski  * limit), all of them are 4K, except the debug stack which
51540e7f949SAndy Lutomirski  * is 8K.
51640e7f949SAndy Lutomirski  */
51740e7f949SAndy Lutomirski static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
51840e7f949SAndy Lutomirski 	  [0 ... N_EXCEPTION_STACKS - 1]	= EXCEPTION_STKSZ,
51940e7f949SAndy Lutomirski 	  [DEBUG_STACK - 1]			= DEBUG_STKSZ
52040e7f949SAndy Lutomirski };
52140e7f949SAndy Lutomirski #endif
52240e7f949SAndy Lutomirski 
52345fc8757SThomas Garnier /* Load the original GDT from the per-cpu structure */
52445fc8757SThomas Garnier void load_direct_gdt(int cpu)
52545fc8757SThomas Garnier {
52645fc8757SThomas Garnier 	struct desc_ptr gdt_descr;
52745fc8757SThomas Garnier 
52845fc8757SThomas Garnier 	gdt_descr.address = (long)get_cpu_gdt_rw(cpu);
52945fc8757SThomas Garnier 	gdt_descr.size = GDT_SIZE - 1;
53045fc8757SThomas Garnier 	load_gdt(&gdt_descr);
53145fc8757SThomas Garnier }
53245fc8757SThomas Garnier EXPORT_SYMBOL_GPL(load_direct_gdt);
53345fc8757SThomas Garnier 
53469218e47SThomas Garnier /* Load a fixmap remapping of the per-cpu GDT */
53569218e47SThomas Garnier void load_fixmap_gdt(int cpu)
53669218e47SThomas Garnier {
53769218e47SThomas Garnier 	struct desc_ptr gdt_descr;
53869218e47SThomas Garnier 
53969218e47SThomas Garnier 	gdt_descr.address = (long)get_cpu_gdt_ro(cpu);
54069218e47SThomas Garnier 	gdt_descr.size = GDT_SIZE - 1;
54169218e47SThomas Garnier 	load_gdt(&gdt_descr);
54269218e47SThomas Garnier }
54345fc8757SThomas Garnier EXPORT_SYMBOL_GPL(load_fixmap_gdt);
54469218e47SThomas Garnier 
5450f3fa48aSIngo Molnar /*
5460f3fa48aSIngo Molnar  * Current gdt points %fs at the "master" per-cpu area: after this,
5470f3fa48aSIngo Molnar  * it's on the real one.
5480f3fa48aSIngo Molnar  */
549552be871SBrian Gerst void switch_to_new_gdt(int cpu)
550f7627e25SThomas Gleixner {
55145fc8757SThomas Garnier 	/* Load the original GDT */
55245fc8757SThomas Garnier 	load_direct_gdt(cpu);
553f7627e25SThomas Gleixner 	/* Reload the per-cpu base */
55411e3a840SJeremy Fitzhardinge 	load_percpu_segment(cpu);
555f7627e25SThomas Gleixner }
556f7627e25SThomas Gleixner 
557148f9bb8SPaul Gortmaker static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
558f7627e25SThomas Gleixner 
559148f9bb8SPaul Gortmaker static void get_model_name(struct cpuinfo_x86 *c)
560f7627e25SThomas Gleixner {
561f7627e25SThomas Gleixner 	unsigned int *v;
562ee098e1aSBorislav Petkov 	char *p, *q, *s;
563f7627e25SThomas Gleixner 
5643da99c97SYinghai Lu 	if (c->extended_cpuid_level < 0x80000004)
5651b05d60dSYinghai Lu 		return;
566f7627e25SThomas Gleixner 
567f7627e25SThomas Gleixner 	v = (unsigned int *)c->x86_model_id;
568f7627e25SThomas Gleixner 	cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
569f7627e25SThomas Gleixner 	cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
570f7627e25SThomas Gleixner 	cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
571f7627e25SThomas Gleixner 	c->x86_model_id[48] = 0;
572f7627e25SThomas Gleixner 
573ee098e1aSBorislav Petkov 	/* Trim whitespace */
574ee098e1aSBorislav Petkov 	p = q = s = &c->x86_model_id[0];
575ee098e1aSBorislav Petkov 
576ee098e1aSBorislav Petkov 	while (*p == ' ')
577ee098e1aSBorislav Petkov 		p++;
578ee098e1aSBorislav Petkov 
579ee098e1aSBorislav Petkov 	while (*p) {
580ee098e1aSBorislav Petkov 		/* Note the last non-whitespace index */
581ee098e1aSBorislav Petkov 		if (!isspace(*p))
582ee098e1aSBorislav Petkov 			s = q;
583ee098e1aSBorislav Petkov 
584ee098e1aSBorislav Petkov 		*q++ = *p++;
585ee098e1aSBorislav Petkov 	}
586ee098e1aSBorislav Petkov 
587ee098e1aSBorislav Petkov 	*(s + 1) = '\0';
588f7627e25SThomas Gleixner }
589f7627e25SThomas Gleixner 
5909305bd6cSThomas Gleixner void detect_num_cpu_cores(struct cpuinfo_x86 *c)
5912cc61be6SDavid Wang {
5922cc61be6SDavid Wang 	unsigned int eax, ebx, ecx, edx;
5932cc61be6SDavid Wang 
5949305bd6cSThomas Gleixner 	c->x86_max_cores = 1;
5952cc61be6SDavid Wang 	if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4)
5969305bd6cSThomas Gleixner 		return;
5972cc61be6SDavid Wang 
5982cc61be6SDavid Wang 	cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
5992cc61be6SDavid Wang 	if (eax & 0x1f)
6009305bd6cSThomas Gleixner 		c->x86_max_cores = (eax >> 26) + 1;
6012cc61be6SDavid Wang }
6022cc61be6SDavid Wang 
603148f9bb8SPaul Gortmaker void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
604f7627e25SThomas Gleixner {
6059d31d35bSYinghai Lu 	unsigned int n, dummy, ebx, ecx, edx, l2size;
606f7627e25SThomas Gleixner 
6073da99c97SYinghai Lu 	n = c->extended_cpuid_level;
608f7627e25SThomas Gleixner 
609f7627e25SThomas Gleixner 	if (n >= 0x80000005) {
6109d31d35bSYinghai Lu 		cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
611f7627e25SThomas Gleixner 		c->x86_cache_size = (ecx>>24) + (edx>>24);
612140fc727SYinghai Lu #ifdef CONFIG_X86_64
613140fc727SYinghai Lu 		/* On K8 L1 TLB is inclusive, so don't count it */
614140fc727SYinghai Lu 		c->x86_tlbsize = 0;
615140fc727SYinghai Lu #endif
616f7627e25SThomas Gleixner 	}
617f7627e25SThomas Gleixner 
618f7627e25SThomas Gleixner 	if (n < 0x80000006)	/* Some chips just has a large L1. */
619f7627e25SThomas Gleixner 		return;
620f7627e25SThomas Gleixner 
6210a488a53SYinghai Lu 	cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
622f7627e25SThomas Gleixner 	l2size = ecx >> 16;
623f7627e25SThomas Gleixner 
624140fc727SYinghai Lu #ifdef CONFIG_X86_64
625140fc727SYinghai Lu 	c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
626140fc727SYinghai Lu #else
627f7627e25SThomas Gleixner 	/* do processor-specific cache resizing */
62809dc68d9SJan Beulich 	if (this_cpu->legacy_cache_size)
62909dc68d9SJan Beulich 		l2size = this_cpu->legacy_cache_size(c, l2size);
630f7627e25SThomas Gleixner 
631f7627e25SThomas Gleixner 	/* Allow user to override all this if necessary. */
632f7627e25SThomas Gleixner 	if (cachesize_override != -1)
633f7627e25SThomas Gleixner 		l2size = cachesize_override;
634f7627e25SThomas Gleixner 
635f7627e25SThomas Gleixner 	if (l2size == 0)
636f7627e25SThomas Gleixner 		return;		/* Again, no L2 cache is possible */
637140fc727SYinghai Lu #endif
638f7627e25SThomas Gleixner 
639f7627e25SThomas Gleixner 	c->x86_cache_size = l2size;
640f7627e25SThomas Gleixner }
641f7627e25SThomas Gleixner 
642e0ba94f1SAlex Shi u16 __read_mostly tlb_lli_4k[NR_INFO];
643e0ba94f1SAlex Shi u16 __read_mostly tlb_lli_2m[NR_INFO];
644e0ba94f1SAlex Shi u16 __read_mostly tlb_lli_4m[NR_INFO];
645e0ba94f1SAlex Shi u16 __read_mostly tlb_lld_4k[NR_INFO];
646e0ba94f1SAlex Shi u16 __read_mostly tlb_lld_2m[NR_INFO];
647e0ba94f1SAlex Shi u16 __read_mostly tlb_lld_4m[NR_INFO];
648dd360393SKirill A. Shutemov u16 __read_mostly tlb_lld_1g[NR_INFO];
649e0ba94f1SAlex Shi 
650f94fe119SSteven Honeyman static void cpu_detect_tlb(struct cpuinfo_x86 *c)
651e0ba94f1SAlex Shi {
652e0ba94f1SAlex Shi 	if (this_cpu->c_detect_tlb)
653e0ba94f1SAlex Shi 		this_cpu->c_detect_tlb(c);
654e0ba94f1SAlex Shi 
655f94fe119SSteven Honeyman 	pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n",
656e0ba94f1SAlex Shi 		tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES],
657f94fe119SSteven Honeyman 		tlb_lli_4m[ENTRIES]);
658f94fe119SSteven Honeyman 
659f94fe119SSteven Honeyman 	pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n",
660f94fe119SSteven Honeyman 		tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES],
661f94fe119SSteven Honeyman 		tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
662e0ba94f1SAlex Shi }
663e0ba94f1SAlex Shi 
664545401f4SThomas Gleixner int detect_ht_early(struct cpuinfo_x86 *c)
6659d31d35bSYinghai Lu {
666c8e56d20SBorislav Petkov #ifdef CONFIG_SMP
6679d31d35bSYinghai Lu 	u32 eax, ebx, ecx, edx;
6689d31d35bSYinghai Lu 
6690a488a53SYinghai Lu 	if (!cpu_has(c, X86_FEATURE_HT))
670545401f4SThomas Gleixner 		return -1;
6719d31d35bSYinghai Lu 
6720a488a53SYinghai Lu 	if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
673545401f4SThomas Gleixner 		return -1;
6740a488a53SYinghai Lu 
6751cd78776SYinghai Lu 	if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
676545401f4SThomas Gleixner 		return -1;
6771cd78776SYinghai Lu 
6780a488a53SYinghai Lu 	cpuid(1, &eax, &ebx, &ecx, &edx);
6790a488a53SYinghai Lu 
6809d31d35bSYinghai Lu 	smp_num_siblings = (ebx & 0xff0000) >> 16;
681545401f4SThomas Gleixner 	if (smp_num_siblings == 1)
6821b74dde7SChen Yucong 		pr_info_once("CPU0: Hyper-Threading is disabled\n");
683545401f4SThomas Gleixner #endif
684545401f4SThomas Gleixner 	return 0;
6850f3fa48aSIngo Molnar }
6860f3fa48aSIngo Molnar 
687545401f4SThomas Gleixner void detect_ht(struct cpuinfo_x86 *c)
688545401f4SThomas Gleixner {
689545401f4SThomas Gleixner #ifdef CONFIG_SMP
690545401f4SThomas Gleixner 	int index_msb, core_bits;
691545401f4SThomas Gleixner 
692545401f4SThomas Gleixner 	if (detect_ht_early(c) < 0)
693545401f4SThomas Gleixner 		return;
6949d31d35bSYinghai Lu 
6959d31d35bSYinghai Lu 	index_msb = get_count_order(smp_num_siblings);
696cb8cc442SIngo Molnar 	c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
6979d31d35bSYinghai Lu 
6989d31d35bSYinghai Lu 	smp_num_siblings = smp_num_siblings / c->x86_max_cores;
6999d31d35bSYinghai Lu 
7009d31d35bSYinghai Lu 	index_msb = get_count_order(smp_num_siblings);
7019d31d35bSYinghai Lu 
7029d31d35bSYinghai Lu 	core_bits = get_count_order(c->x86_max_cores);
7039d31d35bSYinghai Lu 
704cb8cc442SIngo Molnar 	c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
7051cd78776SYinghai Lu 				       ((1 << core_bits) - 1);
7069d31d35bSYinghai Lu #endif
70797e4db7cSYinghai Lu }
708f7627e25SThomas Gleixner 
709148f9bb8SPaul Gortmaker static void get_cpu_vendor(struct cpuinfo_x86 *c)
710f7627e25SThomas Gleixner {
711f7627e25SThomas Gleixner 	char *v = c->x86_vendor_id;
7120f3fa48aSIngo Molnar 	int i;
713f7627e25SThomas Gleixner 
714f7627e25SThomas Gleixner 	for (i = 0; i < X86_VENDOR_NUM; i++) {
71510a434fcSYinghai Lu 		if (!cpu_devs[i])
71610a434fcSYinghai Lu 			break;
71710a434fcSYinghai Lu 
718f7627e25SThomas Gleixner 		if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
719f7627e25SThomas Gleixner 		    (cpu_devs[i]->c_ident[1] &&
720f7627e25SThomas Gleixner 		     !strcmp(v, cpu_devs[i]->c_ident[1]))) {
7210f3fa48aSIngo Molnar 
722f7627e25SThomas Gleixner 			this_cpu = cpu_devs[i];
72310a434fcSYinghai Lu 			c->x86_vendor = this_cpu->c_x86_vendor;
724f7627e25SThomas Gleixner 			return;
725f7627e25SThomas Gleixner 		}
726f7627e25SThomas Gleixner 	}
72710a434fcSYinghai Lu 
7281b74dde7SChen Yucong 	pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \
729a9c56953SMinchan Kim 		    "CPU: Your system may be unstable.\n", v);
73010a434fcSYinghai Lu 
731f7627e25SThomas Gleixner 	c->x86_vendor = X86_VENDOR_UNKNOWN;
732f7627e25SThomas Gleixner 	this_cpu = &default_cpu;
733f7627e25SThomas Gleixner }
734f7627e25SThomas Gleixner 
735148f9bb8SPaul Gortmaker void cpu_detect(struct cpuinfo_x86 *c)
736f7627e25SThomas Gleixner {
737f7627e25SThomas Gleixner 	/* Get vendor name */
7384a148513SHarvey Harrison 	cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
7394a148513SHarvey Harrison 	      (unsigned int *)&c->x86_vendor_id[0],
7404a148513SHarvey Harrison 	      (unsigned int *)&c->x86_vendor_id[8],
7414a148513SHarvey Harrison 	      (unsigned int *)&c->x86_vendor_id[4]);
742f7627e25SThomas Gleixner 
743f7627e25SThomas Gleixner 	c->x86 = 4;
7449d31d35bSYinghai Lu 	/* Intel-defined flags: level 0x00000001 */
745f7627e25SThomas Gleixner 	if (c->cpuid_level >= 0x00000001) {
746f7627e25SThomas Gleixner 		u32 junk, tfms, cap0, misc;
7470f3fa48aSIngo Molnar 
748f7627e25SThomas Gleixner 		cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
74999f925ceSBorislav Petkov 		c->x86		= x86_family(tfms);
75099f925ceSBorislav Petkov 		c->x86_model	= x86_model(tfms);
751b399151cSJia Zhang 		c->x86_stepping	= x86_stepping(tfms);
7520f3fa48aSIngo Molnar 
753d4387bd3SHuang, Ying 		if (cap0 & (1<<19)) {
754d4387bd3SHuang, Ying 			c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
7559d31d35bSYinghai Lu 			c->x86_cache_alignment = c->x86_clflush_size;
756d4387bd3SHuang, Ying 		}
757f7627e25SThomas Gleixner 	}
758f7627e25SThomas Gleixner }
7593da99c97SYinghai Lu 
7608bf1ebcaSAndy Lutomirski static void apply_forced_caps(struct cpuinfo_x86 *c)
7618bf1ebcaSAndy Lutomirski {
7628bf1ebcaSAndy Lutomirski 	int i;
7638bf1ebcaSAndy Lutomirski 
7646cbd2171SThomas Gleixner 	for (i = 0; i < NCAPINTS + NBUGINTS; i++) {
7658bf1ebcaSAndy Lutomirski 		c->x86_capability[i] &= ~cpu_caps_cleared[i];
7668bf1ebcaSAndy Lutomirski 		c->x86_capability[i] |= cpu_caps_set[i];
7678bf1ebcaSAndy Lutomirski 	}
7688bf1ebcaSAndy Lutomirski }
7698bf1ebcaSAndy Lutomirski 
7707fcae111SDavid Woodhouse static void init_speculation_control(struct cpuinfo_x86 *c)
7717fcae111SDavid Woodhouse {
7727fcae111SDavid Woodhouse 	/*
7737fcae111SDavid Woodhouse 	 * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support,
7747fcae111SDavid Woodhouse 	 * and they also have a different bit for STIBP support. Also,
7757fcae111SDavid Woodhouse 	 * a hypervisor might have set the individual AMD bits even on
7767fcae111SDavid Woodhouse 	 * Intel CPUs, for finer-grained selection of what's available.
7777fcae111SDavid Woodhouse 	 */
7787fcae111SDavid Woodhouse 	if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
7797fcae111SDavid Woodhouse 		set_cpu_cap(c, X86_FEATURE_IBRS);
7807fcae111SDavid Woodhouse 		set_cpu_cap(c, X86_FEATURE_IBPB);
7817eb8956aSThomas Gleixner 		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
7827fcae111SDavid Woodhouse 	}
783e7c587daSBorislav Petkov 
7847fcae111SDavid Woodhouse 	if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
7857fcae111SDavid Woodhouse 		set_cpu_cap(c, X86_FEATURE_STIBP);
786e7c587daSBorislav Petkov 
787bc226f07STom Lendacky 	if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
788bc226f07STom Lendacky 	    cpu_has(c, X86_FEATURE_VIRT_SSBD))
78952817587SThomas Gleixner 		set_cpu_cap(c, X86_FEATURE_SSBD);
79052817587SThomas Gleixner 
7917eb8956aSThomas Gleixner 	if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
792e7c587daSBorislav Petkov 		set_cpu_cap(c, X86_FEATURE_IBRS);
7937eb8956aSThomas Gleixner 		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
7947eb8956aSThomas Gleixner 	}
795e7c587daSBorislav Petkov 
796e7c587daSBorislav Petkov 	if (cpu_has(c, X86_FEATURE_AMD_IBPB))
797e7c587daSBorislav Petkov 		set_cpu_cap(c, X86_FEATURE_IBPB);
798e7c587daSBorislav Petkov 
7997eb8956aSThomas Gleixner 	if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
800e7c587daSBorislav Petkov 		set_cpu_cap(c, X86_FEATURE_STIBP);
8017eb8956aSThomas Gleixner 		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
8027eb8956aSThomas Gleixner 	}
8036ac2f49eSKonrad Rzeszutek Wilk 
8046ac2f49eSKonrad Rzeszutek Wilk 	if (cpu_has(c, X86_FEATURE_AMD_SSBD)) {
8056ac2f49eSKonrad Rzeszutek Wilk 		set_cpu_cap(c, X86_FEATURE_SSBD);
8066ac2f49eSKonrad Rzeszutek Wilk 		set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
8076ac2f49eSKonrad Rzeszutek Wilk 		clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD);
8086ac2f49eSKonrad Rzeszutek Wilk 	}
8097fcae111SDavid Woodhouse }
8107fcae111SDavid Woodhouse 
811148f9bb8SPaul Gortmaker void get_cpu_cap(struct cpuinfo_x86 *c)
812093af8d7SYinghai Lu {
81339c06df4SBorislav Petkov 	u32 eax, ebx, ecx, edx;
814093af8d7SYinghai Lu 
815093af8d7SYinghai Lu 	/* Intel-defined flags: level 0x00000001 */
816093af8d7SYinghai Lu 	if (c->cpuid_level >= 0x00000001) {
81739c06df4SBorislav Petkov 		cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
8180f3fa48aSIngo Molnar 
81939c06df4SBorislav Petkov 		c->x86_capability[CPUID_1_ECX] = ecx;
82039c06df4SBorislav Petkov 		c->x86_capability[CPUID_1_EDX] = edx;
821093af8d7SYinghai Lu 	}
822093af8d7SYinghai Lu 
8233df8d920SAndy Lutomirski 	/* Thermal and Power Management Leaf: level 0x00000006 (eax) */
8243df8d920SAndy Lutomirski 	if (c->cpuid_level >= 0x00000006)
8253df8d920SAndy Lutomirski 		c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
8263df8d920SAndy Lutomirski 
827bdc802dcSH. Peter Anvin 	/* Additional Intel-defined flags: level 0x00000007 */
828bdc802dcSH. Peter Anvin 	if (c->cpuid_level >= 0x00000007) {
829bdc802dcSH. Peter Anvin 		cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
83039c06df4SBorislav Petkov 		c->x86_capability[CPUID_7_0_EBX] = ebx;
831dfb4a70fSDave Hansen 		c->x86_capability[CPUID_7_ECX] = ecx;
83295ca0ee8SDavid Woodhouse 		c->x86_capability[CPUID_7_EDX] = edx;
833bdc802dcSH. Peter Anvin 	}
834bdc802dcSH. Peter Anvin 
8356229ad27SFenghua Yu 	/* Extended state features: level 0x0000000d */
8366229ad27SFenghua Yu 	if (c->cpuid_level >= 0x0000000d) {
8376229ad27SFenghua Yu 		cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx);
8386229ad27SFenghua Yu 
83939c06df4SBorislav Petkov 		c->x86_capability[CPUID_D_1_EAX] = eax;
8406229ad27SFenghua Yu 	}
8416229ad27SFenghua Yu 
842cbc82b17SPeter P Waskiewicz Jr 	/* Additional Intel-defined flags: level 0x0000000F */
843cbc82b17SPeter P Waskiewicz Jr 	if (c->cpuid_level >= 0x0000000F) {
844cbc82b17SPeter P Waskiewicz Jr 
845cbc82b17SPeter P Waskiewicz Jr 		/* QoS sub-leaf, EAX=0Fh, ECX=0 */
846cbc82b17SPeter P Waskiewicz Jr 		cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx);
84739c06df4SBorislav Petkov 		c->x86_capability[CPUID_F_0_EDX] = edx;
84839c06df4SBorislav Petkov 
849cbc82b17SPeter P Waskiewicz Jr 		if (cpu_has(c, X86_FEATURE_CQM_LLC)) {
850cbc82b17SPeter P Waskiewicz Jr 			/* will be overridden if occupancy monitoring exists */
851cbc82b17SPeter P Waskiewicz Jr 			c->x86_cache_max_rmid = ebx;
852cbc82b17SPeter P Waskiewicz Jr 
853cbc82b17SPeter P Waskiewicz Jr 			/* QoS sub-leaf, EAX=0Fh, ECX=1 */
854cbc82b17SPeter P Waskiewicz Jr 			cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx);
85539c06df4SBorislav Petkov 			c->x86_capability[CPUID_F_1_EDX] = edx;
85639c06df4SBorislav Petkov 
85733c3cc7aSVikas Shivappa 			if ((cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) ||
85833c3cc7aSVikas Shivappa 			      ((cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL)) ||
85933c3cc7aSVikas Shivappa 			       (cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)))) {
860cbc82b17SPeter P Waskiewicz Jr 				c->x86_cache_max_rmid = ecx;
861cbc82b17SPeter P Waskiewicz Jr 				c->x86_cache_occ_scale = ebx;
862cbc82b17SPeter P Waskiewicz Jr 			}
863cbc82b17SPeter P Waskiewicz Jr 		} else {
864cbc82b17SPeter P Waskiewicz Jr 			c->x86_cache_max_rmid = -1;
865cbc82b17SPeter P Waskiewicz Jr 			c->x86_cache_occ_scale = -1;
866cbc82b17SPeter P Waskiewicz Jr 		}
867cbc82b17SPeter P Waskiewicz Jr 	}
868cbc82b17SPeter P Waskiewicz Jr 
869093af8d7SYinghai Lu 	/* AMD-defined flags: level 0x80000001 */
87039c06df4SBorislav Petkov 	eax = cpuid_eax(0x80000000);
87139c06df4SBorislav Petkov 	c->extended_cpuid_level = eax;
8720f3fa48aSIngo Molnar 
87339c06df4SBorislav Petkov 	if ((eax & 0xffff0000) == 0x80000000) {
87439c06df4SBorislav Petkov 		if (eax >= 0x80000001) {
87539c06df4SBorislav Petkov 			cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
87639c06df4SBorislav Petkov 
87739c06df4SBorislav Petkov 			c->x86_capability[CPUID_8000_0001_ECX] = ecx;
87839c06df4SBorislav Petkov 			c->x86_capability[CPUID_8000_0001_EDX] = edx;
879093af8d7SYinghai Lu 		}
880093af8d7SYinghai Lu 	}
881093af8d7SYinghai Lu 
88271faad43SYazen Ghannam 	if (c->extended_cpuid_level >= 0x80000007) {
88371faad43SYazen Ghannam 		cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
88471faad43SYazen Ghannam 
88571faad43SYazen Ghannam 		c->x86_capability[CPUID_8000_0007_EBX] = ebx;
88671faad43SYazen Ghannam 		c->x86_power = edx;
88771faad43SYazen Ghannam 	}
88871faad43SYazen Ghannam 
889c65732e4SThomas Gleixner 	if (c->extended_cpuid_level >= 0x80000008) {
890c65732e4SThomas Gleixner 		cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
891c65732e4SThomas Gleixner 		c->x86_capability[CPUID_8000_0008_EBX] = ebx;
892c65732e4SThomas Gleixner 	}
893c65732e4SThomas Gleixner 
8942ccd71f1SBorislav Petkov 	if (c->extended_cpuid_level >= 0x8000000a)
89539c06df4SBorislav Petkov 		c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
8962ccd71f1SBorislav Petkov 
8971dedefd1SJacob Pan 	init_scattered_cpuid_features(c);
8987fcae111SDavid Woodhouse 	init_speculation_control(c);
89960d34501SAndy Lutomirski 
90060d34501SAndy Lutomirski 	/*
90160d34501SAndy Lutomirski 	 * Clear/Set all flags overridden by options, after probe.
90260d34501SAndy Lutomirski 	 * This needs to happen each time we re-probe, which may happen
90360d34501SAndy Lutomirski 	 * several times during CPU initialization.
90460d34501SAndy Lutomirski 	 */
90560d34501SAndy Lutomirski 	apply_forced_caps(c);
906093af8d7SYinghai Lu }
907093af8d7SYinghai Lu 
908405c018aSM. Vefa Bicakci void get_cpu_address_sizes(struct cpuinfo_x86 *c)
909d94a155cSKirill A. Shutemov {
910d94a155cSKirill A. Shutemov 	u32 eax, ebx, ecx, edx;
911d94a155cSKirill A. Shutemov 
912d94a155cSKirill A. Shutemov 	if (c->extended_cpuid_level >= 0x80000008) {
913d94a155cSKirill A. Shutemov 		cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
914d94a155cSKirill A. Shutemov 
915d94a155cSKirill A. Shutemov 		c->x86_virt_bits = (eax >> 8) & 0xff;
916d94a155cSKirill A. Shutemov 		c->x86_phys_bits = eax & 0xff;
917d94a155cSKirill A. Shutemov 	}
918d94a155cSKirill A. Shutemov #ifdef CONFIG_X86_32
919d94a155cSKirill A. Shutemov 	else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
920d94a155cSKirill A. Shutemov 		c->x86_phys_bits = 36;
921d94a155cSKirill A. Shutemov #endif
922cc51e542SAndi Kleen 	c->x86_cache_bits = c->x86_phys_bits;
923d94a155cSKirill A. Shutemov }
924d94a155cSKirill A. Shutemov 
925148f9bb8SPaul Gortmaker static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
926aef93c8bSYinghai Lu {
927aef93c8bSYinghai Lu #ifdef CONFIG_X86_32
928aef93c8bSYinghai Lu 	int i;
929aef93c8bSYinghai Lu 
930aef93c8bSYinghai Lu 	/*
931aef93c8bSYinghai Lu 	 * First of all, decide if this is a 486 or higher
932aef93c8bSYinghai Lu 	 * It's a 486 if we can modify the AC flag
933aef93c8bSYinghai Lu 	 */
934aef93c8bSYinghai Lu 	if (flag_is_changeable_p(X86_EFLAGS_AC))
935aef93c8bSYinghai Lu 		c->x86 = 4;
936aef93c8bSYinghai Lu 	else
937aef93c8bSYinghai Lu 		c->x86 = 3;
938aef93c8bSYinghai Lu 
939aef93c8bSYinghai Lu 	for (i = 0; i < X86_VENDOR_NUM; i++)
940aef93c8bSYinghai Lu 		if (cpu_devs[i] && cpu_devs[i]->c_identify) {
941aef93c8bSYinghai Lu 			c->x86_vendor_id[0] = 0;
942aef93c8bSYinghai Lu 			cpu_devs[i]->c_identify(c);
943aef93c8bSYinghai Lu 			if (c->x86_vendor_id[0]) {
944aef93c8bSYinghai Lu 				get_cpu_vendor(c);
945aef93c8bSYinghai Lu 				break;
946aef93c8bSYinghai Lu 			}
947aef93c8bSYinghai Lu 		}
948aef93c8bSYinghai Lu #endif
949093af8d7SYinghai Lu }
950f7627e25SThomas Gleixner 
9514bf5d56dSArnd Bergmann static const __initconst struct x86_cpu_id cpu_no_speculation[] = {
952fec9434aSDavid Woodhouse 	{ X86_VENDOR_INTEL,	6, INTEL_FAM6_ATOM_CEDARVIEW,	X86_FEATURE_ANY },
953fec9434aSDavid Woodhouse 	{ X86_VENDOR_INTEL,	6, INTEL_FAM6_ATOM_CLOVERVIEW,	X86_FEATURE_ANY },
954fec9434aSDavid Woodhouse 	{ X86_VENDOR_INTEL,	6, INTEL_FAM6_ATOM_LINCROFT,	X86_FEATURE_ANY },
955fec9434aSDavid Woodhouse 	{ X86_VENDOR_INTEL,	6, INTEL_FAM6_ATOM_PENWELL,	X86_FEATURE_ANY },
956fec9434aSDavid Woodhouse 	{ X86_VENDOR_INTEL,	6, INTEL_FAM6_ATOM_PINEVIEW,	X86_FEATURE_ANY },
957fec9434aSDavid Woodhouse 	{ X86_VENDOR_CENTAUR,	5 },
958fec9434aSDavid Woodhouse 	{ X86_VENDOR_INTEL,	5 },
959fec9434aSDavid Woodhouse 	{ X86_VENDOR_NSC,	5 },
960fec9434aSDavid Woodhouse 	{ X86_VENDOR_ANY,	4 },
961fec9434aSDavid Woodhouse 	{}
962fec9434aSDavid Woodhouse };
963fec9434aSDavid Woodhouse 
9644bf5d56dSArnd Bergmann static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
965fec9434aSDavid Woodhouse 	{ X86_VENDOR_AMD },
966fec9434aSDavid Woodhouse 	{}
967fec9434aSDavid Woodhouse };
968fec9434aSDavid Woodhouse 
9698ecc4979SDominik Brodowski /* Only list CPUs which speculate but are non susceptible to SSB */
970c456442cSKonrad Rzeszutek Wilk static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
971c456442cSKonrad Rzeszutek Wilk 	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_SILVERMONT1	},
972c456442cSKonrad Rzeszutek Wilk 	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_AIRMONT		},
973c456442cSKonrad Rzeszutek Wilk 	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_SILVERMONT2	},
974c456442cSKonrad Rzeszutek Wilk 	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_MERRIFIELD	},
975c456442cSKonrad Rzeszutek Wilk 	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_CORE_YONAH		},
976c456442cSKonrad Rzeszutek Wilk 	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_XEON_PHI_KNL		},
977c456442cSKonrad Rzeszutek Wilk 	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_XEON_PHI_KNM		},
978764f3c21SKonrad Rzeszutek Wilk 	{ X86_VENDOR_AMD,	0x12,					},
979764f3c21SKonrad Rzeszutek Wilk 	{ X86_VENDOR_AMD,	0x11,					},
980764f3c21SKonrad Rzeszutek Wilk 	{ X86_VENDOR_AMD,	0x10,					},
981764f3c21SKonrad Rzeszutek Wilk 	{ X86_VENDOR_AMD,	0xf,					},
982c456442cSKonrad Rzeszutek Wilk 	{}
983c456442cSKonrad Rzeszutek Wilk };
984c456442cSKonrad Rzeszutek Wilk 
98517dbca11SAndi Kleen static const __initconst struct x86_cpu_id cpu_no_l1tf[] = {
98617dbca11SAndi Kleen 	/* in addition to cpu_no_speculation */
98717dbca11SAndi Kleen 	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_SILVERMONT1	},
98817dbca11SAndi Kleen 	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_SILVERMONT2	},
98917dbca11SAndi Kleen 	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_AIRMONT		},
99017dbca11SAndi Kleen 	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_MERRIFIELD	},
99117dbca11SAndi Kleen 	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_MOOREFIELD	},
99217dbca11SAndi Kleen 	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_GOLDMONT	},
99317dbca11SAndi Kleen 	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_DENVERTON	},
99417dbca11SAndi Kleen 	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_ATOM_GEMINI_LAKE	},
99517dbca11SAndi Kleen 	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_XEON_PHI_KNL		},
99617dbca11SAndi Kleen 	{ X86_VENDOR_INTEL,	6,	INTEL_FAM6_XEON_PHI_KNM		},
99717dbca11SAndi Kleen 	{}
99817dbca11SAndi Kleen };
99917dbca11SAndi Kleen 
10004a28bfe3SKonrad Rzeszutek Wilk static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
1001fec9434aSDavid Woodhouse {
1002fec9434aSDavid Woodhouse 	u64 ia32_cap = 0;
1003fec9434aSDavid Woodhouse 
10048ecc4979SDominik Brodowski 	if (x86_match_cpu(cpu_no_speculation))
10058ecc4979SDominik Brodowski 		return;
10068ecc4979SDominik Brodowski 
10078ecc4979SDominik Brodowski 	setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
10088ecc4979SDominik Brodowski 	setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
10098ecc4979SDominik Brodowski 
101077243971SKonrad Rzeszutek Wilk 	if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
101177243971SKonrad Rzeszutek Wilk 		rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
101277243971SKonrad Rzeszutek Wilk 
101377243971SKonrad Rzeszutek Wilk 	if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
101424809860SKonrad Rzeszutek Wilk 	   !(ia32_cap & ARCH_CAP_SSB_NO) &&
101524809860SKonrad Rzeszutek Wilk 	   !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
1016c456442cSKonrad Rzeszutek Wilk 		setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
1017c456442cSKonrad Rzeszutek Wilk 
1018706d5168SSai Praneeth 	if (ia32_cap & ARCH_CAP_IBRS_ALL)
1019706d5168SSai Praneeth 		setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
1020706d5168SSai Praneeth 
1021fec9434aSDavid Woodhouse 	if (x86_match_cpu(cpu_no_meltdown))
10224a28bfe3SKonrad Rzeszutek Wilk 		return;
1023fec9434aSDavid Woodhouse 
1024fec9434aSDavid Woodhouse 	/* Rogue Data Cache Load? No! */
1025fec9434aSDavid Woodhouse 	if (ia32_cap & ARCH_CAP_RDCL_NO)
10264a28bfe3SKonrad Rzeszutek Wilk 		return;
1027fec9434aSDavid Woodhouse 
10284a28bfe3SKonrad Rzeszutek Wilk 	setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
102917dbca11SAndi Kleen 
103017dbca11SAndi Kleen 	if (x86_match_cpu(cpu_no_l1tf))
103117dbca11SAndi Kleen 		return;
103217dbca11SAndi Kleen 
103317dbca11SAndi Kleen 	setup_force_cpu_bug(X86_BUG_L1TF);
1034fec9434aSDavid Woodhouse }
1035fec9434aSDavid Woodhouse 
103634048c9eSPaolo Ciarrocchi /*
10378990cac6SPavel Tatashin  * The NOPL instruction is supposed to exist on all CPUs of family >= 6;
10388990cac6SPavel Tatashin  * unfortunately, that's not true in practice because of early VIA
10398990cac6SPavel Tatashin  * chips and (more importantly) broken virtualizers that are not easy
10408990cac6SPavel Tatashin  * to detect. In the latter case it doesn't even *fail* reliably, so
10418990cac6SPavel Tatashin  * probing for it doesn't even work. Disable it completely on 32-bit
10428990cac6SPavel Tatashin  * unless we can find a reliable way to detect all the broken cases.
10438990cac6SPavel Tatashin  * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
10448990cac6SPavel Tatashin  */
10459b3661cdSBorislav Petkov static void detect_nopl(void)
10468990cac6SPavel Tatashin {
10478990cac6SPavel Tatashin #ifdef CONFIG_X86_32
10489b3661cdSBorislav Petkov 	setup_clear_cpu_cap(X86_FEATURE_NOPL);
10498990cac6SPavel Tatashin #else
10509b3661cdSBorislav Petkov 	setup_force_cpu_cap(X86_FEATURE_NOPL);
10518990cac6SPavel Tatashin #endif
10528990cac6SPavel Tatashin }
10538990cac6SPavel Tatashin 
10548990cac6SPavel Tatashin /*
105534048c9eSPaolo Ciarrocchi  * Do minimum CPU detection early.
105634048c9eSPaolo Ciarrocchi  * Fields really needed: vendor, cpuid_level, family, model, mask,
105734048c9eSPaolo Ciarrocchi  * cache alignment.
105834048c9eSPaolo Ciarrocchi  * The others are not touched to avoid unwanted side effects.
105934048c9eSPaolo Ciarrocchi  *
1060a1652bb8SJean Delvare  * WARNING: this function is only called on the boot CPU.  Don't add code
1061a1652bb8SJean Delvare  * here that is supposed to run on all CPUs.
106234048c9eSPaolo Ciarrocchi  */
10633da99c97SYinghai Lu static void __init early_identify_cpu(struct cpuinfo_x86 *c)
1064f7627e25SThomas Gleixner {
10656627d242SYinghai Lu #ifdef CONFIG_X86_64
10666627d242SYinghai Lu 	c->x86_clflush_size = 64;
106713c6c532SJan Beulich 	c->x86_phys_bits = 36;
106813c6c532SJan Beulich 	c->x86_virt_bits = 48;
10696627d242SYinghai Lu #else
1070d4387bd3SHuang, Ying 	c->x86_clflush_size = 32;
107113c6c532SJan Beulich 	c->x86_phys_bits = 32;
107213c6c532SJan Beulich 	c->x86_virt_bits = 32;
10736627d242SYinghai Lu #endif
10740a488a53SYinghai Lu 	c->x86_cache_alignment = c->x86_clflush_size;
1075f7627e25SThomas Gleixner 
10763da99c97SYinghai Lu 	memset(&c->x86_capability, 0, sizeof c->x86_capability);
10770a488a53SYinghai Lu 	c->extended_cpuid_level = 0;
10780a488a53SYinghai Lu 
1079aef93c8bSYinghai Lu 	/* cyrix could have cpuid enabled via c_identify()*/
108005fb3c19SAndy Lutomirski 	if (have_cpuid_p()) {
1081f7627e25SThomas Gleixner 		cpu_detect(c);
10823da99c97SYinghai Lu 		get_cpu_vendor(c);
10833da99c97SYinghai Lu 		get_cpu_cap(c);
1084d94a155cSKirill A. Shutemov 		get_cpu_address_sizes(c);
108578d1b296SBorislav Petkov 		setup_force_cpu_cap(X86_FEATURE_CPUID);
108612cf105cSKrzysztof Helt 
108710a434fcSYinghai Lu 		if (this_cpu->c_early_init)
108810a434fcSYinghai Lu 			this_cpu->c_early_init(c);
10893da99c97SYinghai Lu 
1090f6e9456cSRobert Richter 		c->cpu_index = 0;
1091b38b0665SH. Peter Anvin 		filter_cpuid_features(c, false);
1092de5397adSFenghua Yu 
1093a110b5ecSBorislav Petkov 		if (this_cpu->c_bsp_init)
1094a110b5ecSBorislav Petkov 			this_cpu->c_bsp_init(c);
109578d1b296SBorislav Petkov 	} else {
109678d1b296SBorislav Petkov 		identify_cpu_without_cpuid(c);
109778d1b296SBorislav Petkov 		setup_clear_cpu_cap(X86_FEATURE_CPUID);
109805fb3c19SAndy Lutomirski 	}
1099c3b83598SBorislav Petkov 
1100c3b83598SBorislav Petkov 	setup_force_cpu_cap(X86_FEATURE_ALWAYS);
1101a89f040fSThomas Gleixner 
11024a28bfe3SKonrad Rzeszutek Wilk 	cpu_set_bug_bits(c);
110399c6fa25SDavid Woodhouse 
1104db52ef74SIngo Molnar 	fpu__init_system(c);
1105b8b7abaeSAndy Lutomirski 
1106b8b7abaeSAndy Lutomirski #ifdef CONFIG_X86_32
1107b8b7abaeSAndy Lutomirski 	/*
1108b8b7abaeSAndy Lutomirski 	 * Regardless of whether PCID is enumerated, the SDM says
1109b8b7abaeSAndy Lutomirski 	 * that it can't be enabled in 32-bit mode.
1110b8b7abaeSAndy Lutomirski 	 */
1111b8b7abaeSAndy Lutomirski 	setup_clear_cpu_cap(X86_FEATURE_PCID);
1112b8b7abaeSAndy Lutomirski #endif
1113372fddf7SKirill A. Shutemov 
1114372fddf7SKirill A. Shutemov 	/*
1115372fddf7SKirill A. Shutemov 	 * Later in the boot process pgtable_l5_enabled() relies on
1116372fddf7SKirill A. Shutemov 	 * cpu_feature_enabled(X86_FEATURE_LA57). If 5-level paging is not
1117372fddf7SKirill A. Shutemov 	 * enabled by this point we need to clear the feature bit to avoid
1118372fddf7SKirill A. Shutemov 	 * false-positives at the later stage.
1119372fddf7SKirill A. Shutemov 	 *
1120372fddf7SKirill A. Shutemov 	 * pgtable_l5_enabled() can be false here for several reasons:
1121372fddf7SKirill A. Shutemov 	 *  - 5-level paging is disabled compile-time;
1122372fddf7SKirill A. Shutemov 	 *  - it's 32-bit kernel;
1123372fddf7SKirill A. Shutemov 	 *  - machine doesn't support 5-level paging;
1124372fddf7SKirill A. Shutemov 	 *  - user specified 'no5lvl' in kernel command line.
1125372fddf7SKirill A. Shutemov 	 */
1126372fddf7SKirill A. Shutemov 	if (!pgtable_l5_enabled())
1127372fddf7SKirill A. Shutemov 		setup_clear_cpu_cap(X86_FEATURE_LA57);
11288990cac6SPavel Tatashin 
11299b3661cdSBorislav Petkov 	detect_nopl();
1130f7627e25SThomas Gleixner }
1131f7627e25SThomas Gleixner 
11329d31d35bSYinghai Lu void __init early_cpu_init(void)
11339d31d35bSYinghai Lu {
113402dde8b4SJan Beulich 	const struct cpu_dev *const *cdev;
113510a434fcSYinghai Lu 	int count = 0;
11369d31d35bSYinghai Lu 
1137ac23f253SJan Beulich #ifdef CONFIG_PROCESSOR_SELECT
11381b74dde7SChen Yucong 	pr_info("KERNEL supported cpus:\n");
113931c997caSIngo Molnar #endif
114031c997caSIngo Molnar 
114110a434fcSYinghai Lu 	for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
114202dde8b4SJan Beulich 		const struct cpu_dev *cpudev = *cdev;
11439d31d35bSYinghai Lu 
114410a434fcSYinghai Lu 		if (count >= X86_VENDOR_NUM)
114510a434fcSYinghai Lu 			break;
114610a434fcSYinghai Lu 		cpu_devs[count] = cpudev;
114710a434fcSYinghai Lu 		count++;
114810a434fcSYinghai Lu 
1149ac23f253SJan Beulich #ifdef CONFIG_PROCESSOR_SELECT
115031c997caSIngo Molnar 		{
115131c997caSIngo Molnar 			unsigned int j;
115231c997caSIngo Molnar 
115310a434fcSYinghai Lu 			for (j = 0; j < 2; j++) {
115410a434fcSYinghai Lu 				if (!cpudev->c_ident[j])
115510a434fcSYinghai Lu 					continue;
11561b74dde7SChen Yucong 				pr_info("  %s %s\n", cpudev->c_vendor,
115710a434fcSYinghai Lu 					cpudev->c_ident[j]);
115810a434fcSYinghai Lu 			}
115910a434fcSYinghai Lu 		}
11600388423dSDave Jones #endif
116131c997caSIngo Molnar 	}
11629d31d35bSYinghai Lu 	early_identify_cpu(&boot_cpu_data);
1163f7627e25SThomas Gleixner }
1164f7627e25SThomas Gleixner 
11657a5d6704SAndy Lutomirski static void detect_null_seg_behavior(struct cpuinfo_x86 *c)
11667a5d6704SAndy Lutomirski {
11677a5d6704SAndy Lutomirski #ifdef CONFIG_X86_64
116858a5aac5SAndy Lutomirski 	/*
11697a5d6704SAndy Lutomirski 	 * Empirically, writing zero to a segment selector on AMD does
11707a5d6704SAndy Lutomirski 	 * not clear the base, whereas writing zero to a segment
11717a5d6704SAndy Lutomirski 	 * selector on Intel does clear the base.  Intel's behavior
11727a5d6704SAndy Lutomirski 	 * allows slightly faster context switches in the common case
11737a5d6704SAndy Lutomirski 	 * where GS is unused by the prev and next threads.
117458a5aac5SAndy Lutomirski 	 *
11757a5d6704SAndy Lutomirski 	 * Since neither vendor documents this anywhere that I can see,
11767a5d6704SAndy Lutomirski 	 * detect it directly instead of hardcoding the choice by
11777a5d6704SAndy Lutomirski 	 * vendor.
11787a5d6704SAndy Lutomirski 	 *
11797a5d6704SAndy Lutomirski 	 * I've designated AMD's behavior as the "bug" because it's
11807a5d6704SAndy Lutomirski 	 * counterintuitive and less friendly.
118158a5aac5SAndy Lutomirski 	 */
11827a5d6704SAndy Lutomirski 
11837a5d6704SAndy Lutomirski 	unsigned long old_base, tmp;
11847a5d6704SAndy Lutomirski 	rdmsrl(MSR_FS_BASE, old_base);
11857a5d6704SAndy Lutomirski 	wrmsrl(MSR_FS_BASE, 1);
11867a5d6704SAndy Lutomirski 	loadsegment(fs, 0);
11877a5d6704SAndy Lutomirski 	rdmsrl(MSR_FS_BASE, tmp);
11887a5d6704SAndy Lutomirski 	if (tmp != 0)
11897a5d6704SAndy Lutomirski 		set_cpu_bug(c, X86_BUG_NULL_SEG);
11907a5d6704SAndy Lutomirski 	wrmsrl(MSR_FS_BASE, old_base);
119158a5aac5SAndy Lutomirski #endif
1192f7627e25SThomas Gleixner }
1193f7627e25SThomas Gleixner 
1194148f9bb8SPaul Gortmaker static void generic_identify(struct cpuinfo_x86 *c)
1195f7627e25SThomas Gleixner {
11963da99c97SYinghai Lu 	c->extended_cpuid_level = 0;
1197f7627e25SThomas Gleixner 
1198aef93c8bSYinghai Lu 	if (!have_cpuid_p())
1199aef93c8bSYinghai Lu 		identify_cpu_without_cpuid(c);
1200f7627e25SThomas Gleixner 
1201aef93c8bSYinghai Lu 	/* cyrix could have cpuid enabled via c_identify()*/
1202a9853dd6SIngo Molnar 	if (!have_cpuid_p())
1203aef93c8bSYinghai Lu 		return;
1204aef93c8bSYinghai Lu 
12053da99c97SYinghai Lu 	cpu_detect(c);
12063da99c97SYinghai Lu 
12073da99c97SYinghai Lu 	get_cpu_vendor(c);
12083da99c97SYinghai Lu 
12093da99c97SYinghai Lu 	get_cpu_cap(c);
12103da99c97SYinghai Lu 
1211d94a155cSKirill A. Shutemov 	get_cpu_address_sizes(c);
1212d94a155cSKirill A. Shutemov 
1213f7627e25SThomas Gleixner 	if (c->cpuid_level >= 0x00000001) {
12143da99c97SYinghai Lu 		c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
1215b89d3b3eSYinghai Lu #ifdef CONFIG_X86_32
1216c8e56d20SBorislav Petkov # ifdef CONFIG_SMP
1217cb8cc442SIngo Molnar 		c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
1218f7627e25SThomas Gleixner # else
121901aaea1aSYinghai Lu 		c->apicid = c->initial_apicid;
1220f7627e25SThomas Gleixner # endif
1221b89d3b3eSYinghai Lu #endif
1222b89d3b3eSYinghai Lu 		c->phys_proc_id = c->initial_apicid;
1223f7627e25SThomas Gleixner 	}
1224f7627e25SThomas Gleixner 
1225f7627e25SThomas Gleixner 	get_model_name(c); /* Default name */
1226f7627e25SThomas Gleixner 
12277a5d6704SAndy Lutomirski 	detect_null_seg_behavior(c);
12280230bb03SAndy Lutomirski 
12290230bb03SAndy Lutomirski 	/*
12300230bb03SAndy Lutomirski 	 * ESPFIX is a strange bug.  All real CPUs have it.  Paravirt
12310230bb03SAndy Lutomirski 	 * systems that run Linux at CPL > 0 may or may not have the
12320230bb03SAndy Lutomirski 	 * issue, but, even if they have the issue, there's absolutely
12330230bb03SAndy Lutomirski 	 * nothing we can do about it because we can't use the real IRET
12340230bb03SAndy Lutomirski 	 * instruction.
12350230bb03SAndy Lutomirski 	 *
12360230bb03SAndy Lutomirski 	 * NB: For the time being, only 32-bit kernels support
12370230bb03SAndy Lutomirski 	 * X86_BUG_ESPFIX as such.  64-bit kernels directly choose
12380230bb03SAndy Lutomirski 	 * whether to apply espfix using paravirt hooks.  If any
12390230bb03SAndy Lutomirski 	 * non-paravirt system ever shows up that does *not* have the
12400230bb03SAndy Lutomirski 	 * ESPFIX issue, we can change this.
12410230bb03SAndy Lutomirski 	 */
12420230bb03SAndy Lutomirski #ifdef CONFIG_X86_32
12430230bb03SAndy Lutomirski # ifdef CONFIG_PARAVIRT
12440230bb03SAndy Lutomirski 	do {
12450230bb03SAndy Lutomirski 		extern void native_iret(void);
12460230bb03SAndy Lutomirski 		if (pv_cpu_ops.iret == native_iret)
12470230bb03SAndy Lutomirski 			set_cpu_bug(c, X86_BUG_ESPFIX);
12480230bb03SAndy Lutomirski 	} while (0);
12490230bb03SAndy Lutomirski # else
12500230bb03SAndy Lutomirski 	set_cpu_bug(c, X86_BUG_ESPFIX);
12510230bb03SAndy Lutomirski # endif
12520230bb03SAndy Lutomirski #endif
1253f7627e25SThomas Gleixner }
1254f7627e25SThomas Gleixner 
1255cbc82b17SPeter P Waskiewicz Jr static void x86_init_cache_qos(struct cpuinfo_x86 *c)
1256cbc82b17SPeter P Waskiewicz Jr {
1257cbc82b17SPeter P Waskiewicz Jr 	/*
1258cbc82b17SPeter P Waskiewicz Jr 	 * The heavy lifting of max_rmid and cache_occ_scale are handled
1259cbc82b17SPeter P Waskiewicz Jr 	 * in get_cpu_cap().  Here we just set the max_rmid for the boot_cpu
1260cbc82b17SPeter P Waskiewicz Jr 	 * in case CQM bits really aren't there in this CPU.
1261cbc82b17SPeter P Waskiewicz Jr 	 */
1262cbc82b17SPeter P Waskiewicz Jr 	if (c != &boot_cpu_data) {
1263cbc82b17SPeter P Waskiewicz Jr 		boot_cpu_data.x86_cache_max_rmid =
1264cbc82b17SPeter P Waskiewicz Jr 			min(boot_cpu_data.x86_cache_max_rmid,
1265cbc82b17SPeter P Waskiewicz Jr 			    c->x86_cache_max_rmid);
1266cbc82b17SPeter P Waskiewicz Jr 	}
1267cbc82b17SPeter P Waskiewicz Jr }
1268cbc82b17SPeter P Waskiewicz Jr 
1269f7627e25SThomas Gleixner /*
12709d85eb91SThomas Gleixner  * Validate that ACPI/mptables have the same information about the
12719d85eb91SThomas Gleixner  * effective APIC id and update the package map.
1272d49597fdSThomas Gleixner  */
12739d85eb91SThomas Gleixner static void validate_apic_and_package_id(struct cpuinfo_x86 *c)
1274d49597fdSThomas Gleixner {
1275d49597fdSThomas Gleixner #ifdef CONFIG_SMP
12769d85eb91SThomas Gleixner 	unsigned int apicid, cpu = smp_processor_id();
1277d49597fdSThomas Gleixner 
1278d49597fdSThomas Gleixner 	apicid = apic->cpu_present_to_apicid(cpu);
1279d49597fdSThomas Gleixner 
12809d85eb91SThomas Gleixner 	if (apicid != c->apicid) {
12819d85eb91SThomas Gleixner 		pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x APIC: %x\n",
1282d49597fdSThomas Gleixner 		       cpu, apicid, c->initial_apicid);
1283d49597fdSThomas Gleixner 	}
12849d85eb91SThomas Gleixner 	BUG_ON(topology_update_package_map(c->phys_proc_id, cpu));
1285d49597fdSThomas Gleixner #else
1286d49597fdSThomas Gleixner 	c->logical_proc_id = 0;
1287d49597fdSThomas Gleixner #endif
1288d49597fdSThomas Gleixner }
1289d49597fdSThomas Gleixner 
1290d49597fdSThomas Gleixner /*
1291f7627e25SThomas Gleixner  * This does the hard work of actually picking apart the CPU stuff...
1292f7627e25SThomas Gleixner  */
1293148f9bb8SPaul Gortmaker static void identify_cpu(struct cpuinfo_x86 *c)
1294f7627e25SThomas Gleixner {
1295f7627e25SThomas Gleixner 	int i;
1296f7627e25SThomas Gleixner 
1297f7627e25SThomas Gleixner 	c->loops_per_jiffy = loops_per_jiffy;
129824dbc600SGustavo A. R. Silva 	c->x86_cache_size = 0;
1299f7627e25SThomas Gleixner 	c->x86_vendor = X86_VENDOR_UNKNOWN;
1300b399151cSJia Zhang 	c->x86_model = c->x86_stepping = 0;	/* So far unknown... */
1301f7627e25SThomas Gleixner 	c->x86_vendor_id[0] = '\0'; /* Unset */
1302f7627e25SThomas Gleixner 	c->x86_model_id[0] = '\0';  /* Unset */
1303f7627e25SThomas Gleixner 	c->x86_max_cores = 1;
1304102bbe3aSYinghai Lu 	c->x86_coreid_bits = 0;
130579a8b9aaSBorislav Petkov 	c->cu_id = 0xff;
130611fdd252SYinghai Lu #ifdef CONFIG_X86_64
1307102bbe3aSYinghai Lu 	c->x86_clflush_size = 64;
130813c6c532SJan Beulich 	c->x86_phys_bits = 36;
130913c6c532SJan Beulich 	c->x86_virt_bits = 48;
1310102bbe3aSYinghai Lu #else
1311102bbe3aSYinghai Lu 	c->cpuid_level = -1;	/* CPUID not detected */
1312f7627e25SThomas Gleixner 	c->x86_clflush_size = 32;
131313c6c532SJan Beulich 	c->x86_phys_bits = 32;
131413c6c532SJan Beulich 	c->x86_virt_bits = 32;
1315102bbe3aSYinghai Lu #endif
1316102bbe3aSYinghai Lu 	c->x86_cache_alignment = c->x86_clflush_size;
1317f7627e25SThomas Gleixner 	memset(&c->x86_capability, 0, sizeof c->x86_capability);
1318f7627e25SThomas Gleixner 
1319f7627e25SThomas Gleixner 	generic_identify(c);
1320f7627e25SThomas Gleixner 
13213898534dSAndi Kleen 	if (this_cpu->c_identify)
1322f7627e25SThomas Gleixner 		this_cpu->c_identify(c);
1323f7627e25SThomas Gleixner 
13246a6256f9SAdam Buchbinder 	/* Clear/Set all flags overridden by options, after probe */
13258bf1ebcaSAndy Lutomirski 	apply_forced_caps(c);
13262759c328SYinghai Lu 
1327102bbe3aSYinghai Lu #ifdef CONFIG_X86_64
1328cb8cc442SIngo Molnar 	c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
1329102bbe3aSYinghai Lu #endif
1330102bbe3aSYinghai Lu 
1331f7627e25SThomas Gleixner 	/*
1332f7627e25SThomas Gleixner 	 * Vendor-specific initialization.  In this section we
1333f7627e25SThomas Gleixner 	 * canonicalize the feature flags, meaning if there are
1334f7627e25SThomas Gleixner 	 * features a certain CPU supports which CPUID doesn't
1335f7627e25SThomas Gleixner 	 * tell us, CPUID claiming incorrect flags, or other bugs,
1336f7627e25SThomas Gleixner 	 * we handle them here.
1337f7627e25SThomas Gleixner 	 *
1338f7627e25SThomas Gleixner 	 * At the end of this section, c->x86_capability better
1339f7627e25SThomas Gleixner 	 * indicate the features this CPU genuinely supports!
1340f7627e25SThomas Gleixner 	 */
1341f7627e25SThomas Gleixner 	if (this_cpu->c_init)
1342f7627e25SThomas Gleixner 		this_cpu->c_init(c);
1343f7627e25SThomas Gleixner 
1344f7627e25SThomas Gleixner 	/* Disable the PN if appropriate */
1345f7627e25SThomas Gleixner 	squash_the_stupid_serial_number(c);
1346f7627e25SThomas Gleixner 
1347aa35f896SRicardo Neri 	/* Set up SMEP/SMAP/UMIP */
1348b2cc2a07SH. Peter Anvin 	setup_smep(c);
1349b2cc2a07SH. Peter Anvin 	setup_smap(c);
1350aa35f896SRicardo Neri 	setup_umip(c);
1351b2cc2a07SH. Peter Anvin 
1352f7627e25SThomas Gleixner 	/*
13530f3fa48aSIngo Molnar 	 * The vendor-specific functions might have changed features.
13540f3fa48aSIngo Molnar 	 * Now we do "generic changes."
1355f7627e25SThomas Gleixner 	 */
1356f7627e25SThomas Gleixner 
1357b38b0665SH. Peter Anvin 	/* Filter out anything that depends on CPUID levels we don't have */
1358b38b0665SH. Peter Anvin 	filter_cpuid_features(c, true);
1359b38b0665SH. Peter Anvin 
1360f7627e25SThomas Gleixner 	/* If the model name is still unset, do table lookup. */
1361f7627e25SThomas Gleixner 	if (!c->x86_model_id[0]) {
136202dde8b4SJan Beulich 		const char *p;
1363f7627e25SThomas Gleixner 		p = table_lookup_model(c);
1364f7627e25SThomas Gleixner 		if (p)
1365f7627e25SThomas Gleixner 			strcpy(c->x86_model_id, p);
1366f7627e25SThomas Gleixner 		else
1367f7627e25SThomas Gleixner 			/* Last resort... */
1368f7627e25SThomas Gleixner 			sprintf(c->x86_model_id, "%02x/%02x",
1369f7627e25SThomas Gleixner 				c->x86, c->x86_model);
1370f7627e25SThomas Gleixner 	}
1371f7627e25SThomas Gleixner 
1372102bbe3aSYinghai Lu #ifdef CONFIG_X86_64
1373102bbe3aSYinghai Lu 	detect_ht(c);
1374102bbe3aSYinghai Lu #endif
1375102bbe3aSYinghai Lu 
137649d859d7SH. Peter Anvin 	x86_init_rdrand(c);
1377cbc82b17SPeter P Waskiewicz Jr 	x86_init_cache_qos(c);
137806976945SDave Hansen 	setup_pku(c);
13793e0c3737SYinghai Lu 
13803e0c3737SYinghai Lu 	/*
13816a6256f9SAdam Buchbinder 	 * Clear/Set all flags overridden by options, need do it
13823e0c3737SYinghai Lu 	 * before following smp all cpus cap AND.
13833e0c3737SYinghai Lu 	 */
13848bf1ebcaSAndy Lutomirski 	apply_forced_caps(c);
13853e0c3737SYinghai Lu 
1386f7627e25SThomas Gleixner 	/*
1387f7627e25SThomas Gleixner 	 * On SMP, boot_cpu_data holds the common feature set between
1388f7627e25SThomas Gleixner 	 * all CPUs; so make sure that we indicate which features are
1389f7627e25SThomas Gleixner 	 * common between the CPUs.  The first time this routine gets
1390f7627e25SThomas Gleixner 	 * executed, c == &boot_cpu_data.
1391f7627e25SThomas Gleixner 	 */
1392f7627e25SThomas Gleixner 	if (c != &boot_cpu_data) {
1393f7627e25SThomas Gleixner 		/* AND the already accumulated flags with these */
1394f7627e25SThomas Gleixner 		for (i = 0; i < NCAPINTS; i++)
1395f7627e25SThomas Gleixner 			boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
139665fc985bSBorislav Petkov 
139765fc985bSBorislav Petkov 		/* OR, i.e. replicate the bug flags */
139865fc985bSBorislav Petkov 		for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++)
139965fc985bSBorislav Petkov 			c->x86_capability[i] |= boot_cpu_data.x86_capability[i];
1400f7627e25SThomas Gleixner 	}
1401f7627e25SThomas Gleixner 
1402f7627e25SThomas Gleixner 	/* Init Machine Check Exception if available. */
14035e09954aSBorislav Petkov 	mcheck_cpu_init(c);
140430d432dfSAndi Kleen 
140530d432dfSAndi Kleen 	select_idle_routine(c);
1406102bbe3aSYinghai Lu 
1407de2d9445STejun Heo #ifdef CONFIG_NUMA
1408102bbe3aSYinghai Lu 	numa_add_cpu(smp_processor_id());
1409102bbe3aSYinghai Lu #endif
1410f7627e25SThomas Gleixner }
1411f7627e25SThomas Gleixner 
14128b6c0ab1SIngo Molnar /*
14138b6c0ab1SIngo Molnar  * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions
14148b6c0ab1SIngo Molnar  * on 32-bit kernels:
14158b6c0ab1SIngo Molnar  */
1416cfda7bb9SAndy Lutomirski #ifdef CONFIG_X86_32
1417cfda7bb9SAndy Lutomirski void enable_sep_cpu(void)
1418cfda7bb9SAndy Lutomirski {
14198b6c0ab1SIngo Molnar 	struct tss_struct *tss;
14208b6c0ab1SIngo Molnar 	int cpu;
1421cfda7bb9SAndy Lutomirski 
1422b3edfda4SBorislav Petkov 	if (!boot_cpu_has(X86_FEATURE_SEP))
1423b3edfda4SBorislav Petkov 		return;
1424b3edfda4SBorislav Petkov 
14258b6c0ab1SIngo Molnar 	cpu = get_cpu();
1426c482feefSAndy Lutomirski 	tss = &per_cpu(cpu_tss_rw, cpu);
14278b6c0ab1SIngo Molnar 
14288b6c0ab1SIngo Molnar 	/*
1429cf9328ccSAndy Lutomirski 	 * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
1430cf9328ccSAndy Lutomirski 	 * see the big comment in struct x86_hw_tss's definition.
14318b6c0ab1SIngo Molnar 	 */
1432cfda7bb9SAndy Lutomirski 
1433cfda7bb9SAndy Lutomirski 	tss->x86_tss.ss1 = __KERNEL_CS;
14348b6c0ab1SIngo Molnar 	wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
14354fe2d8b1SDave Hansen 	wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0);
14364c8cd0c5SIngo Molnar 	wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
14378b6c0ab1SIngo Molnar 
1438cfda7bb9SAndy Lutomirski 	put_cpu();
1439cfda7bb9SAndy Lutomirski }
1440e04d645fSGlauber Costa #endif
1441e04d645fSGlauber Costa 
1442f7627e25SThomas Gleixner void __init identify_boot_cpu(void)
1443f7627e25SThomas Gleixner {
1444f7627e25SThomas Gleixner 	identify_cpu(&boot_cpu_data);
1445102bbe3aSYinghai Lu #ifdef CONFIG_X86_32
1446f7627e25SThomas Gleixner 	sysenter_setup();
1447f7627e25SThomas Gleixner 	enable_sep_cpu();
1448102bbe3aSYinghai Lu #endif
1449e0ba94f1SAlex Shi 	cpu_detect_tlb(&boot_cpu_data);
1450f7627e25SThomas Gleixner }
1451f7627e25SThomas Gleixner 
1452148f9bb8SPaul Gortmaker void identify_secondary_cpu(struct cpuinfo_x86 *c)
1453f7627e25SThomas Gleixner {
1454f7627e25SThomas Gleixner 	BUG_ON(c == &boot_cpu_data);
1455f7627e25SThomas Gleixner 	identify_cpu(c);
1456102bbe3aSYinghai Lu #ifdef CONFIG_X86_32
1457f7627e25SThomas Gleixner 	enable_sep_cpu();
1458102bbe3aSYinghai Lu #endif
1459f7627e25SThomas Gleixner 	mtrr_ap_init();
14609d85eb91SThomas Gleixner 	validate_apic_and_package_id(c);
146177243971SKonrad Rzeszutek Wilk 	x86_spec_ctrl_setup_ap();
1462f7627e25SThomas Gleixner }
1463f7627e25SThomas Gleixner 
1464191679fdSAndi Kleen static __init int setup_noclflush(char *arg)
1465191679fdSAndi Kleen {
1466840d2830SH. Peter Anvin 	setup_clear_cpu_cap(X86_FEATURE_CLFLUSH);
1467da4aaa7dSH. Peter Anvin 	setup_clear_cpu_cap(X86_FEATURE_CLFLUSHOPT);
1468191679fdSAndi Kleen 	return 1;
1469191679fdSAndi Kleen }
1470191679fdSAndi Kleen __setup("noclflush", setup_noclflush);
1471191679fdSAndi Kleen 
1472148f9bb8SPaul Gortmaker void print_cpu_info(struct cpuinfo_x86 *c)
1473f7627e25SThomas Gleixner {
147402dde8b4SJan Beulich 	const char *vendor = NULL;
1475f7627e25SThomas Gleixner 
14760f3fa48aSIngo Molnar 	if (c->x86_vendor < X86_VENDOR_NUM) {
1477f7627e25SThomas Gleixner 		vendor = this_cpu->c_vendor;
14780f3fa48aSIngo Molnar 	} else {
14790f3fa48aSIngo Molnar 		if (c->cpuid_level >= 0)
1480f7627e25SThomas Gleixner 			vendor = c->x86_vendor_id;
14810f3fa48aSIngo Molnar 	}
1482f7627e25SThomas Gleixner 
1483bd32a8cfSYinghai Lu 	if (vendor && !strstr(c->x86_model_id, vendor))
14841b74dde7SChen Yucong 		pr_cont("%s ", vendor);
1485f7627e25SThomas Gleixner 
14869d31d35bSYinghai Lu 	if (c->x86_model_id[0])
14871b74dde7SChen Yucong 		pr_cont("%s", c->x86_model_id);
1488f7627e25SThomas Gleixner 	else
14891b74dde7SChen Yucong 		pr_cont("%d86", c->x86);
1490f7627e25SThomas Gleixner 
14911b74dde7SChen Yucong 	pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
1492924e101aSBorislav Petkov 
1493b399151cSJia Zhang 	if (c->x86_stepping || c->cpuid_level >= 0)
1494b399151cSJia Zhang 		pr_cont(", stepping: 0x%x)\n", c->x86_stepping);
1495f7627e25SThomas Gleixner 	else
14961b74dde7SChen Yucong 		pr_cont(")\n");
1497f7627e25SThomas Gleixner }
1498f7627e25SThomas Gleixner 
14990c2a3913SAndi Kleen /*
15000c2a3913SAndi Kleen  * clearcpuid= was already parsed in fpu__init_parse_early_param.
15010c2a3913SAndi Kleen  * But we need to keep a dummy __setup around otherwise it would
15020c2a3913SAndi Kleen  * show up as an environment variable for init.
15030c2a3913SAndi Kleen  */
15040c2a3913SAndi Kleen static __init int setup_clearcpuid(char *arg)
1505ac72e788SAndi Kleen {
1506ac72e788SAndi Kleen 	return 1;
1507ac72e788SAndi Kleen }
15080c2a3913SAndi Kleen __setup("clearcpuid=", setup_clearcpuid);
1509ac72e788SAndi Kleen 
1510d5494d4fSYinghai Lu #ifdef CONFIG_X86_64
1511947e76cdSBrian Gerst DEFINE_PER_CPU_FIRST(union irq_stack_union,
1512277d5b40SAndi Kleen 		     irq_stack_union) __aligned(PAGE_SIZE) __visible;
151335060ed6SVitaly Kuznetsov EXPORT_PER_CPU_SYMBOL_GPL(irq_stack_union);
15140f3fa48aSIngo Molnar 
1515bdf977b3STejun Heo /*
1516a7fcf28dSAndy Lutomirski  * The following percpu variables are hot.  Align current_task to
1517a7fcf28dSAndy Lutomirski  * cacheline size such that they fall in the same cacheline.
1518bdf977b3STejun Heo  */
1519bdf977b3STejun Heo DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
1520bdf977b3STejun Heo 	&init_task;
1521bdf977b3STejun Heo EXPORT_PER_CPU_SYMBOL(current_task);
1522d5494d4fSYinghai Lu 
1523bdf977b3STejun Heo DEFINE_PER_CPU(char *, irq_stack_ptr) =
15244950d6d4SJosh Poimboeuf 	init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE;
1525bdf977b3STejun Heo 
1526277d5b40SAndi Kleen DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
1527d5494d4fSYinghai Lu 
1528c2daa3beSPeter Zijlstra DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
1529c2daa3beSPeter Zijlstra EXPORT_PER_CPU_SYMBOL(__preempt_count);
1530c2daa3beSPeter Zijlstra 
1531d5494d4fSYinghai Lu /* May not be marked __init: used by software suspend */
1532d5494d4fSYinghai Lu void syscall_init(void)
1533d5494d4fSYinghai Lu {
15343386bc8aSAndy Lutomirski 	extern char _entry_trampoline[];
15353386bc8aSAndy Lutomirski 	extern char entry_SYSCALL_64_trampoline[];
15363386bc8aSAndy Lutomirski 
153772f5e08dSAndy Lutomirski 	int cpu = smp_processor_id();
15383386bc8aSAndy Lutomirski 	unsigned long SYSCALL64_entry_trampoline =
15393386bc8aSAndy Lutomirski 		(unsigned long)get_cpu_entry_area(cpu)->entry_trampoline +
15403386bc8aSAndy Lutomirski 		(entry_SYSCALL_64_trampoline - _entry_trampoline);
154172f5e08dSAndy Lutomirski 
154231ac34caSBorislav Petkov 	wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
15438d4b0678SThomas Gleixner 	if (static_cpu_has(X86_FEATURE_PTI))
15443386bc8aSAndy Lutomirski 		wrmsrl(MSR_LSTAR, SYSCALL64_entry_trampoline);
15458d4b0678SThomas Gleixner 	else
15468d4b0678SThomas Gleixner 		wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
1547d56fe4bfSIngo Molnar 
1548d56fe4bfSIngo Molnar #ifdef CONFIG_IA32_EMULATION
154947edb651SAndy Lutomirski 	wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat);
1550a76c7f46SDenys Vlasenko 	/*
1551487d1edbSDenys Vlasenko 	 * This only works on Intel CPUs.
1552487d1edbSDenys Vlasenko 	 * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
1553487d1edbSDenys Vlasenko 	 * This does not cause SYSENTER to jump to the wrong location, because
1554487d1edbSDenys Vlasenko 	 * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
1555a76c7f46SDenys Vlasenko 	 */
1556a76c7f46SDenys Vlasenko 	wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
15574fe2d8b1SDave Hansen 	wrmsrl_safe(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1));
15584c8cd0c5SIngo Molnar 	wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
1559d56fe4bfSIngo Molnar #else
156047edb651SAndy Lutomirski 	wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret);
15616b51311cSBorislav Petkov 	wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
1562d56fe4bfSIngo Molnar 	wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
1563d56fe4bfSIngo Molnar 	wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
1564d5494d4fSYinghai Lu #endif
1565d5494d4fSYinghai Lu 
1566d5494d4fSYinghai Lu 	/* Flags to clear on syscall */
1567d5494d4fSYinghai Lu 	wrmsrl(MSR_SYSCALL_MASK,
156863bcff2aSH. Peter Anvin 	       X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|
15698c7aa698SAndy Lutomirski 	       X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT);
1570d5494d4fSYinghai Lu }
1571d5494d4fSYinghai Lu 
1572d5494d4fSYinghai Lu /*
1573d5494d4fSYinghai Lu  * Copies of the original ist values from the tss are only accessed during
1574d5494d4fSYinghai Lu  * debugging, no special alignment required.
1575d5494d4fSYinghai Lu  */
1576d5494d4fSYinghai Lu DEFINE_PER_CPU(struct orig_ist, orig_ist);
1577d5494d4fSYinghai Lu 
1578228bdaa9SSteven Rostedt static DEFINE_PER_CPU(unsigned long, debug_stack_addr);
157942181186SSteven Rostedt DEFINE_PER_CPU(int, debug_stack_usage);
1580228bdaa9SSteven Rostedt 
1581228bdaa9SSteven Rostedt int is_debug_stack(unsigned long addr)
1582228bdaa9SSteven Rostedt {
158389cbc767SChristoph Lameter 	return __this_cpu_read(debug_stack_usage) ||
158489cbc767SChristoph Lameter 		(addr <= __this_cpu_read(debug_stack_addr) &&
158589cbc767SChristoph Lameter 		 addr > (__this_cpu_read(debug_stack_addr) - DEBUG_STKSZ));
1586228bdaa9SSteven Rostedt }
15870f46efebSMasami Hiramatsu NOKPROBE_SYMBOL(is_debug_stack);
1588228bdaa9SSteven Rostedt 
1589629f4f9dSSeiji Aguchi DEFINE_PER_CPU(u32, debug_idt_ctr);
1590f8988175SSteven Rostedt 
1591228bdaa9SSteven Rostedt void debug_stack_set_zero(void)
1592228bdaa9SSteven Rostedt {
1593629f4f9dSSeiji Aguchi 	this_cpu_inc(debug_idt_ctr);
1594629f4f9dSSeiji Aguchi 	load_current_idt();
1595228bdaa9SSteven Rostedt }
15960f46efebSMasami Hiramatsu NOKPROBE_SYMBOL(debug_stack_set_zero);
1597228bdaa9SSteven Rostedt 
1598228bdaa9SSteven Rostedt void debug_stack_reset(void)
1599228bdaa9SSteven Rostedt {
1600629f4f9dSSeiji Aguchi 	if (WARN_ON(!this_cpu_read(debug_idt_ctr)))
1601f8988175SSteven Rostedt 		return;
1602629f4f9dSSeiji Aguchi 	if (this_cpu_dec_return(debug_idt_ctr) == 0)
1603629f4f9dSSeiji Aguchi 		load_current_idt();
1604228bdaa9SSteven Rostedt }
16050f46efebSMasami Hiramatsu NOKPROBE_SYMBOL(debug_stack_reset);
1606228bdaa9SSteven Rostedt 
16070f3fa48aSIngo Molnar #else	/* CONFIG_X86_64 */
1608d5494d4fSYinghai Lu 
1609bdf977b3STejun Heo DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
1610bdf977b3STejun Heo EXPORT_PER_CPU_SYMBOL(current_task);
1611c2daa3beSPeter Zijlstra DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
1612c2daa3beSPeter Zijlstra EXPORT_PER_CPU_SYMBOL(__preempt_count);
1613bdf977b3STejun Heo 
1614a7fcf28dSAndy Lutomirski /*
1615a7fcf28dSAndy Lutomirski  * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find
1616a7fcf28dSAndy Lutomirski  * the top of the kernel stack.  Use an extra percpu variable to track the
1617a7fcf28dSAndy Lutomirski  * top of the kernel stack directly.
1618a7fcf28dSAndy Lutomirski  */
1619a7fcf28dSAndy Lutomirski DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) =
1620a7fcf28dSAndy Lutomirski 	(unsigned long)&init_thread_union + THREAD_SIZE;
1621a7fcf28dSAndy Lutomirski EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack);
1622a7fcf28dSAndy Lutomirski 
1623050e9baaSLinus Torvalds #ifdef CONFIG_STACKPROTECTOR
162453f82452SJeremy Fitzhardinge DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
162560a5317fSTejun Heo #endif
162660a5317fSTejun Heo 
16270f3fa48aSIngo Molnar #endif	/* CONFIG_X86_64 */
1628f7627e25SThomas Gleixner 
1629f7627e25SThomas Gleixner /*
16309766cdbcSJaswinder Singh Rajput  * Clear all 6 debug registers:
16319766cdbcSJaswinder Singh Rajput  */
16329766cdbcSJaswinder Singh Rajput static void clear_all_debug_regs(void)
16339766cdbcSJaswinder Singh Rajput {
16349766cdbcSJaswinder Singh Rajput 	int i;
16359766cdbcSJaswinder Singh Rajput 
16369766cdbcSJaswinder Singh Rajput 	for (i = 0; i < 8; i++) {
16379766cdbcSJaswinder Singh Rajput 		/* Ignore db4, db5 */
16389766cdbcSJaswinder Singh Rajput 		if ((i == 4) || (i == 5))
16399766cdbcSJaswinder Singh Rajput 			continue;
16409766cdbcSJaswinder Singh Rajput 
16419766cdbcSJaswinder Singh Rajput 		set_debugreg(0, i);
16429766cdbcSJaswinder Singh Rajput 	}
16439766cdbcSJaswinder Singh Rajput }
1644f7627e25SThomas Gleixner 
16450bb9fef9SJason Wessel #ifdef CONFIG_KGDB
16460bb9fef9SJason Wessel /*
16470bb9fef9SJason Wessel  * Restore debug regs if using kgdbwait and you have a kernel debugger
16480bb9fef9SJason Wessel  * connection established.
16490bb9fef9SJason Wessel  */
16500bb9fef9SJason Wessel static void dbg_restore_debug_regs(void)
16510bb9fef9SJason Wessel {
16520bb9fef9SJason Wessel 	if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break))
16530bb9fef9SJason Wessel 		arch_kgdb_ops.correct_hw_break();
16540bb9fef9SJason Wessel }
16550bb9fef9SJason Wessel #else /* ! CONFIG_KGDB */
16560bb9fef9SJason Wessel #define dbg_restore_debug_regs()
16570bb9fef9SJason Wessel #endif /* ! CONFIG_KGDB */
16580bb9fef9SJason Wessel 
1659ce4b1b16SIgor Mammedov static void wait_for_master_cpu(int cpu)
1660ce4b1b16SIgor Mammedov {
1661ce4b1b16SIgor Mammedov #ifdef CONFIG_SMP
1662ce4b1b16SIgor Mammedov 	/*
1663ce4b1b16SIgor Mammedov 	 * wait for ACK from master CPU before continuing
1664ce4b1b16SIgor Mammedov 	 * with AP initialization
1665ce4b1b16SIgor Mammedov 	 */
1666ce4b1b16SIgor Mammedov 	WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask));
1667ce4b1b16SIgor Mammedov 	while (!cpumask_test_cpu(cpu, cpu_callout_mask))
1668ce4b1b16SIgor Mammedov 		cpu_relax();
1669ce4b1b16SIgor Mammedov #endif
1670ce4b1b16SIgor Mammedov }
1671ce4b1b16SIgor Mammedov 
1672*b2e2ba57SChang S. Bae #ifdef CONFIG_X86_64
1673*b2e2ba57SChang S. Bae static void setup_getcpu(int cpu)
1674*b2e2ba57SChang S. Bae {
1675*b2e2ba57SChang S. Bae 	unsigned long cpudata = vdso_encode_cpu_node(cpu, early_cpu_to_node(cpu));
1676*b2e2ba57SChang S. Bae 	struct desc_struct d = { };
1677*b2e2ba57SChang S. Bae 
1678*b2e2ba57SChang S. Bae 	if (static_cpu_has(X86_FEATURE_RDTSCP))
1679*b2e2ba57SChang S. Bae 		write_rdtscp_aux(cpudata);
1680*b2e2ba57SChang S. Bae 
1681*b2e2ba57SChang S. Bae 	/* Store CPU and node number in limit. */
1682*b2e2ba57SChang S. Bae 	d.limit0 = cpudata;
1683*b2e2ba57SChang S. Bae 	d.limit1 = cpudata >> 16;
1684*b2e2ba57SChang S. Bae 
1685*b2e2ba57SChang S. Bae 	d.type = 5;		/* RO data, expand down, accessed */
1686*b2e2ba57SChang S. Bae 	d.dpl = 3;		/* Visible to user code */
1687*b2e2ba57SChang S. Bae 	d.s = 1;		/* Not a system segment */
1688*b2e2ba57SChang S. Bae 	d.p = 1;		/* Present */
1689*b2e2ba57SChang S. Bae 	d.d = 1;		/* 32-bit */
1690*b2e2ba57SChang S. Bae 
1691*b2e2ba57SChang S. Bae 	write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPU_NUMBER, &d, DESCTYPE_S);
1692*b2e2ba57SChang S. Bae }
1693*b2e2ba57SChang S. Bae #endif
1694*b2e2ba57SChang S. Bae 
1695f7627e25SThomas Gleixner /*
1696f7627e25SThomas Gleixner  * cpu_init() initializes state that is per-CPU. Some data is already
1697f7627e25SThomas Gleixner  * initialized (naturally) in the bootstrap process, such as the GDT
1698f7627e25SThomas Gleixner  * and IDT. We reload them nevertheless, this function acts as a
1699f7627e25SThomas Gleixner  * 'CPU state barrier', nothing should get across.
17001ba76586SYinghai Lu  * A lot of state is already set up in PDA init for 64 bit
1701f7627e25SThomas Gleixner  */
17021ba76586SYinghai Lu #ifdef CONFIG_X86_64
17030f3fa48aSIngo Molnar 
1704148f9bb8SPaul Gortmaker void cpu_init(void)
17051ba76586SYinghai Lu {
17060fe1e009STejun Heo 	struct orig_ist *oist;
17071ba76586SYinghai Lu 	struct task_struct *me;
17080f3fa48aSIngo Molnar 	struct tss_struct *t;
17090f3fa48aSIngo Molnar 	unsigned long v;
1710fb59831bSAndy Lutomirski 	int cpu = raw_smp_processor_id();
17111ba76586SYinghai Lu 	int i;
17121ba76586SYinghai Lu 
1713ce4b1b16SIgor Mammedov 	wait_for_master_cpu(cpu);
1714ce4b1b16SIgor Mammedov 
1715e6ebf5deSFenghua Yu 	/*
17161e02ce4cSAndy Lutomirski 	 * Initialize the CR4 shadow before doing anything that could
17171e02ce4cSAndy Lutomirski 	 * try to read it.
17181e02ce4cSAndy Lutomirski 	 */
17191e02ce4cSAndy Lutomirski 	cr4_init_shadow();
17201e02ce4cSAndy Lutomirski 
1721777284b6SBorislav Petkov 	if (cpu)
1722e6ebf5deSFenghua Yu 		load_ucode_ap();
1723e6ebf5deSFenghua Yu 
1724c482feefSAndy Lutomirski 	t = &per_cpu(cpu_tss_rw, cpu);
17250fe1e009STejun Heo 	oist = &per_cpu(orig_ist, cpu);
17260f3fa48aSIngo Molnar 
1727e7a22c1eSBrian Gerst #ifdef CONFIG_NUMA
172827fd185fSFenghua Yu 	if (this_cpu_read(numa_node) == 0 &&
1729e534c7c5SLee Schermerhorn 	    early_cpu_to_node(cpu) != NUMA_NO_NODE)
1730e534c7c5SLee Schermerhorn 		set_numa_node(early_cpu_to_node(cpu));
1731e7a22c1eSBrian Gerst #endif
1732*b2e2ba57SChang S. Bae 	setup_getcpu(cpu);
17331ba76586SYinghai Lu 
17341ba76586SYinghai Lu 	me = current;
17351ba76586SYinghai Lu 
17362eaad1fdSMike Travis 	pr_debug("Initializing CPU#%d\n", cpu);
17371ba76586SYinghai Lu 
1738375074ccSAndy Lutomirski 	cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
17391ba76586SYinghai Lu 
17401ba76586SYinghai Lu 	/*
17411ba76586SYinghai Lu 	 * Initialize the per-CPU GDT with the boot GDT,
17421ba76586SYinghai Lu 	 * and set up the GDT descriptor:
17431ba76586SYinghai Lu 	 */
17441ba76586SYinghai Lu 
1745552be871SBrian Gerst 	switch_to_new_gdt(cpu);
17462697fbd5SBrian Gerst 	loadsegment(fs, 0);
17472697fbd5SBrian Gerst 
1748cf910e83SSeiji Aguchi 	load_current_idt();
17491ba76586SYinghai Lu 
17501ba76586SYinghai Lu 	memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
17511ba76586SYinghai Lu 	syscall_init();
17521ba76586SYinghai Lu 
17531ba76586SYinghai Lu 	wrmsrl(MSR_FS_BASE, 0);
17541ba76586SYinghai Lu 	wrmsrl(MSR_KERNEL_GS_BASE, 0);
17551ba76586SYinghai Lu 	barrier();
17561ba76586SYinghai Lu 
17574763ed4dSH. Peter Anvin 	x86_configure_nx();
1758659006bfSThomas Gleixner 	x2apic_setup();
17591ba76586SYinghai Lu 
17601ba76586SYinghai Lu 	/*
17611ba76586SYinghai Lu 	 * set up and load the per-CPU TSS
17621ba76586SYinghai Lu 	 */
17630fe1e009STejun Heo 	if (!oist->ist[0]) {
176440e7f949SAndy Lutomirski 		char *estacks = get_cpu_entry_area(cpu)->exception_stacks;
17650f3fa48aSIngo Molnar 
17661ba76586SYinghai Lu 		for (v = 0; v < N_EXCEPTION_STACKS; v++) {
17670f3fa48aSIngo Molnar 			estacks += exception_stack_sizes[v];
17680fe1e009STejun Heo 			oist->ist[v] = t->x86_tss.ist[v] =
17691ba76586SYinghai Lu 					(unsigned long)estacks;
1770228bdaa9SSteven Rostedt 			if (v == DEBUG_STACK-1)
1771228bdaa9SSteven Rostedt 				per_cpu(debug_stack_addr, cpu) = (unsigned long)estacks;
17721ba76586SYinghai Lu 		}
17731ba76586SYinghai Lu 	}
17741ba76586SYinghai Lu 
17757fb983b4SAndy Lutomirski 	t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
17760f3fa48aSIngo Molnar 
17771ba76586SYinghai Lu 	/*
17781ba76586SYinghai Lu 	 * <= is required because the CPU will access up to
17791ba76586SYinghai Lu 	 * 8 bits beyond the end of the IO permission bitmap.
17801ba76586SYinghai Lu 	 */
17811ba76586SYinghai Lu 	for (i = 0; i <= IO_BITMAP_LONGS; i++)
17821ba76586SYinghai Lu 		t->io_bitmap[i] = ~0UL;
17831ba76586SYinghai Lu 
1784f1f10076SVegard Nossum 	mmgrab(&init_mm);
17851ba76586SYinghai Lu 	me->active_mm = &init_mm;
17868c5dfd25SStoyan Gaydarov 	BUG_ON(me->mm);
178772c0098dSAndy Lutomirski 	initialize_tlbstate_and_flush();
17881ba76586SYinghai Lu 	enter_lazy_tlb(&init_mm, me);
17891ba76586SYinghai Lu 
179020bb8344SAndy Lutomirski 	/*
17917f2590a1SAndy Lutomirski 	 * Initialize the TSS.  sp0 points to the entry trampoline stack
17927f2590a1SAndy Lutomirski 	 * regardless of what task is running.
179320bb8344SAndy Lutomirski 	 */
179472f5e08dSAndy Lutomirski 	set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
17951ba76586SYinghai Lu 	load_TR_desc();
17964fe2d8b1SDave Hansen 	load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
179720bb8344SAndy Lutomirski 
179837868fe1SAndy Lutomirski 	load_mm_ldt(&init_mm);
17991ba76586SYinghai Lu 
18009766cdbcSJaswinder Singh Rajput 	clear_all_debug_regs();
18010bb9fef9SJason Wessel 	dbg_restore_debug_regs();
18021ba76586SYinghai Lu 
180321c4cd10SIngo Molnar 	fpu__init_cpu();
18041ba76586SYinghai Lu 
18051ba76586SYinghai Lu 	if (is_uv_system())
18061ba76586SYinghai Lu 		uv_cpu_init();
180769218e47SThomas Garnier 
180869218e47SThomas Garnier 	load_fixmap_gdt(cpu);
18091ba76586SYinghai Lu }
18101ba76586SYinghai Lu 
18111ba76586SYinghai Lu #else
18121ba76586SYinghai Lu 
1813148f9bb8SPaul Gortmaker void cpu_init(void)
1814f7627e25SThomas Gleixner {
1815f7627e25SThomas Gleixner 	int cpu = smp_processor_id();
1816f7627e25SThomas Gleixner 	struct task_struct *curr = current;
1817c482feefSAndy Lutomirski 	struct tss_struct *t = &per_cpu(cpu_tss_rw, cpu);
1818f7627e25SThomas Gleixner 
1819ce4b1b16SIgor Mammedov 	wait_for_master_cpu(cpu);
1820e6ebf5deSFenghua Yu 
18215b2bdbc8SSteven Rostedt 	/*
18225b2bdbc8SSteven Rostedt 	 * Initialize the CR4 shadow before doing anything that could
18235b2bdbc8SSteven Rostedt 	 * try to read it.
18245b2bdbc8SSteven Rostedt 	 */
18255b2bdbc8SSteven Rostedt 	cr4_init_shadow();
18265b2bdbc8SSteven Rostedt 
1827ce4b1b16SIgor Mammedov 	show_ucode_info_early();
1828f7627e25SThomas Gleixner 
18291b74dde7SChen Yucong 	pr_info("Initializing CPU#%d\n", cpu);
1830f7627e25SThomas Gleixner 
1831362f924bSBorislav Petkov 	if (cpu_feature_enabled(X86_FEATURE_VME) ||
183259e21e3dSBorislav Petkov 	    boot_cpu_has(X86_FEATURE_TSC) ||
1833362f924bSBorislav Petkov 	    boot_cpu_has(X86_FEATURE_DE))
1834375074ccSAndy Lutomirski 		cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1835f7627e25SThomas Gleixner 
1836cf910e83SSeiji Aguchi 	load_current_idt();
1837552be871SBrian Gerst 	switch_to_new_gdt(cpu);
1838f7627e25SThomas Gleixner 
1839f7627e25SThomas Gleixner 	/*
1840f7627e25SThomas Gleixner 	 * Set up and load the per-CPU TSS and LDT
1841f7627e25SThomas Gleixner 	 */
1842f1f10076SVegard Nossum 	mmgrab(&init_mm);
1843f7627e25SThomas Gleixner 	curr->active_mm = &init_mm;
18448c5dfd25SStoyan Gaydarov 	BUG_ON(curr->mm);
184572c0098dSAndy Lutomirski 	initialize_tlbstate_and_flush();
1846f7627e25SThomas Gleixner 	enter_lazy_tlb(&init_mm, curr);
1847f7627e25SThomas Gleixner 
184820bb8344SAndy Lutomirski 	/*
184945d7b255SJoerg Roedel 	 * Initialize the TSS.  sp0 points to the entry trampoline stack
185045d7b255SJoerg Roedel 	 * regardless of what task is running.
185120bb8344SAndy Lutomirski 	 */
185272f5e08dSAndy Lutomirski 	set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
1853f7627e25SThomas Gleixner 	load_TR_desc();
185445d7b255SJoerg Roedel 	load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
185520bb8344SAndy Lutomirski 
185637868fe1SAndy Lutomirski 	load_mm_ldt(&init_mm);
1857f7627e25SThomas Gleixner 
18587fb983b4SAndy Lutomirski 	t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
1859f9a196b8SThomas Gleixner 
1860f7627e25SThomas Gleixner #ifdef CONFIG_DOUBLEFAULT
1861f7627e25SThomas Gleixner 	/* Set up doublefault TSS pointer in the GDT */
1862f7627e25SThomas Gleixner 	__set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
1863f7627e25SThomas Gleixner #endif
1864f7627e25SThomas Gleixner 
18659766cdbcSJaswinder Singh Rajput 	clear_all_debug_regs();
18660bb9fef9SJason Wessel 	dbg_restore_debug_regs();
1867f7627e25SThomas Gleixner 
186821c4cd10SIngo Molnar 	fpu__init_cpu();
186969218e47SThomas Garnier 
187069218e47SThomas Garnier 	load_fixmap_gdt(cpu);
1871f7627e25SThomas Gleixner }
18721ba76586SYinghai Lu #endif
18735700f743SBorislav Petkov 
1874b51ef52dSLaura Abbott static void bsp_resume(void)
1875b51ef52dSLaura Abbott {
1876b51ef52dSLaura Abbott 	if (this_cpu->c_bsp_resume)
1877b51ef52dSLaura Abbott 		this_cpu->c_bsp_resume(&boot_cpu_data);
1878b51ef52dSLaura Abbott }
1879b51ef52dSLaura Abbott 
1880b51ef52dSLaura Abbott static struct syscore_ops cpu_syscore_ops = {
1881b51ef52dSLaura Abbott 	.resume		= bsp_resume,
1882b51ef52dSLaura Abbott };
1883b51ef52dSLaura Abbott 
1884b51ef52dSLaura Abbott static int __init init_cpu_syscore(void)
1885b51ef52dSLaura Abbott {
1886b51ef52dSLaura Abbott 	register_syscore_ops(&cpu_syscore_ops);
1887b51ef52dSLaura Abbott 	return 0;
1888b51ef52dSLaura Abbott }
1889b51ef52dSLaura Abbott core_initcall(init_cpu_syscore);
18901008c52cSBorislav Petkov 
18911008c52cSBorislav Petkov /*
18921008c52cSBorislav Petkov  * The microcode loader calls this upon late microcode load to recheck features,
18931008c52cSBorislav Petkov  * only when microcode has been updated. Caller holds microcode_mutex and CPU
18941008c52cSBorislav Petkov  * hotplug lock.
18951008c52cSBorislav Petkov  */
18961008c52cSBorislav Petkov void microcode_check(void)
18971008c52cSBorislav Petkov {
189842ca8082SBorislav Petkov 	struct cpuinfo_x86 info;
189942ca8082SBorislav Petkov 
19001008c52cSBorislav Petkov 	perf_check_microcode();
190142ca8082SBorislav Petkov 
190242ca8082SBorislav Petkov 	/* Reload CPUID max function as it might've changed. */
190342ca8082SBorislav Petkov 	info.cpuid_level = cpuid_eax(0);
190442ca8082SBorislav Petkov 
190542ca8082SBorislav Petkov 	/*
190642ca8082SBorislav Petkov 	 * Copy all capability leafs to pick up the synthetic ones so that
190742ca8082SBorislav Petkov 	 * memcmp() below doesn't fail on that. The ones coming from CPUID will
190842ca8082SBorislav Petkov 	 * get overwritten in get_cpu_cap().
190942ca8082SBorislav Petkov 	 */
191042ca8082SBorislav Petkov 	memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability));
191142ca8082SBorislav Petkov 
191242ca8082SBorislav Petkov 	get_cpu_cap(&info);
191342ca8082SBorislav Petkov 
191442ca8082SBorislav Petkov 	if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)))
191542ca8082SBorislav Petkov 		return;
191642ca8082SBorislav Petkov 
191742ca8082SBorislav Petkov 	pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n");
191842ca8082SBorislav Petkov 	pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
19191008c52cSBorislav Petkov }
1920