1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 22458e53fSKirill A. Shutemov /* cpu_feature_enabled() cannot be used this early */ 32458e53fSKirill A. Shutemov #define USE_EARLY_PGTABLE_L5 42458e53fSKirill A. Shutemov 557c8a661SMike Rapoport #include <linux/memblock.h> 69766cdbcSJaswinder Singh Rajput #include <linux/linkage.h> 7f0fc4affSYinghai Lu #include <linux/bitops.h> 89766cdbcSJaswinder Singh Rajput #include <linux/kernel.h> 9186f4360SPaul Gortmaker #include <linux/export.h> 10f7627e25SThomas Gleixner #include <linux/percpu.h> 119766cdbcSJaswinder Singh Rajput #include <linux/string.h> 12ee098e1aSBorislav Petkov #include <linux/ctype.h> 139766cdbcSJaswinder Singh Rajput #include <linux/delay.h> 1468e21be2SIngo Molnar #include <linux/sched/mm.h> 15e6017571SIngo Molnar #include <linux/sched/clock.h> 169164bb4aSIngo Molnar #include <linux/sched/task.h> 17b47a3698SBenjamin Thiel #include <linux/sched/smt.h> 189766cdbcSJaswinder Singh Rajput #include <linux/init.h> 190f46efebSMasami Hiramatsu #include <linux/kprobes.h> 209766cdbcSJaswinder Singh Rajput #include <linux/kgdb.h> 219766cdbcSJaswinder Singh Rajput #include <linux/smp.h> 229766cdbcSJaswinder Singh Rajput #include <linux/io.h> 23b51ef52dSLaura Abbott #include <linux/syscore_ops.h> 2465fddcfcSMike Rapoport #include <linux/pgtable.h> 259766cdbcSJaswinder Singh Rajput 269766cdbcSJaswinder Singh Rajput #include <asm/stackprotector.h> 27cdd6c482SIngo Molnar #include <asm/perf_event.h> 28f7627e25SThomas Gleixner #include <asm/mmu_context.h> 29dc4e0021SAndy Lutomirski #include <asm/doublefault.h> 3049d859d7SH. Peter Anvin #include <asm/archrandom.h> 319766cdbcSJaswinder Singh Rajput #include <asm/hypervisor.h> 329766cdbcSJaswinder Singh Rajput #include <asm/processor.h> 331e02ce4cSAndy Lutomirski #include <asm/tlbflush.h> 34f649e938SPaul Gortmaker #include <asm/debugreg.h> 359766cdbcSJaswinder Singh Rajput #include <asm/sections.h> 36f40c3300SAndy Lutomirski #include <asm/vsyscall.h> 378bdbd962SAlan Cox #include <linux/topology.h> 388bdbd962SAlan Cox #include <linux/cpumask.h> 3960063497SArun Sharma #include <linux/atomic.h> 409766cdbcSJaswinder Singh Rajput #include <asm/proto.h> 419766cdbcSJaswinder Singh Rajput #include <asm/setup.h> 42f7627e25SThomas Gleixner #include <asm/apic.h> 439766cdbcSJaswinder Singh Rajput #include <asm/desc.h> 4478f7f1e5SIngo Molnar #include <asm/fpu/internal.h> 459766cdbcSJaswinder Singh Rajput #include <asm/mtrr.h> 460274f955SGrzegorz Andrejczuk #include <asm/hwcap2.h> 478bdbd962SAlan Cox #include <linux/numa.h> 480cd39f46SPeter Zijlstra #include <asm/numa.h> 499766cdbcSJaswinder Singh Rajput #include <asm/asm.h> 500f6ff2bcSDave Hansen #include <asm/bugs.h> 519766cdbcSJaswinder Singh Rajput #include <asm/cpu.h> 529766cdbcSJaswinder Singh Rajput #include <asm/mce.h> 539766cdbcSJaswinder Singh Rajput #include <asm/msr.h> 54eb243d1dSIngo Molnar #include <asm/memtype.h> 55d288e1cfSFenghua Yu #include <asm/microcode.h> 56d288e1cfSFenghua Yu #include <asm/microcode_intel.h> 57fec9434aSDavid Woodhouse #include <asm/intel-family.h> 58fec9434aSDavid Woodhouse #include <asm/cpu_device_id.h> 59bdbcdd48STejun Heo #include <asm/uv/uv.h> 60f7627e25SThomas Gleixner 61f7627e25SThomas Gleixner #include "cpu.h" 62f7627e25SThomas Gleixner 630274f955SGrzegorz Andrejczuk u32 elf_hwcap2 __read_mostly; 640274f955SGrzegorz Andrejczuk 65c2d1cec1SMike Travis /* all of these masks are initialized in setup_cpu_local_masks() */ 66c2d1cec1SMike Travis cpumask_var_t cpu_initialized_mask; 679766cdbcSJaswinder Singh Rajput cpumask_var_t cpu_callout_mask; 689766cdbcSJaswinder Singh Rajput cpumask_var_t cpu_callin_mask; 69c2d1cec1SMike Travis 70c2d1cec1SMike Travis /* representing cpus for which sibling maps can be computed */ 71c2d1cec1SMike Travis cpumask_var_t cpu_sibling_setup_mask; 72c2d1cec1SMike Travis 73f8b64d08SBorislav Petkov /* Number of siblings per CPU package */ 74f8b64d08SBorislav Petkov int smp_num_siblings = 1; 75f8b64d08SBorislav Petkov EXPORT_SYMBOL(smp_num_siblings); 76f8b64d08SBorislav Petkov 77f8b64d08SBorislav Petkov /* Last level cache ID of each logical CPU */ 78f8b64d08SBorislav Petkov DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID; 79f8b64d08SBorislav Petkov 802f2f52baSBrian Gerst /* correctly size the local cpu masks */ 814369f1fbSIngo Molnar void __init setup_cpu_local_masks(void) 822f2f52baSBrian Gerst { 832f2f52baSBrian Gerst alloc_bootmem_cpumask_var(&cpu_initialized_mask); 842f2f52baSBrian Gerst alloc_bootmem_cpumask_var(&cpu_callin_mask); 852f2f52baSBrian Gerst alloc_bootmem_cpumask_var(&cpu_callout_mask); 862f2f52baSBrian Gerst alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); 872f2f52baSBrian Gerst } 882f2f52baSBrian Gerst 89148f9bb8SPaul Gortmaker static void default_init(struct cpuinfo_x86 *c) 90e8055139SOndrej Zary { 91e8055139SOndrej Zary #ifdef CONFIG_X86_64 9227c13eceSBorislav Petkov cpu_detect_cache_sizes(c); 93e8055139SOndrej Zary #else 94e8055139SOndrej Zary /* Not much we can do here... */ 95e8055139SOndrej Zary /* Check if at least it has cpuid */ 96e8055139SOndrej Zary if (c->cpuid_level == -1) { 97e8055139SOndrej Zary /* No cpuid. It must be an ancient CPU */ 98e8055139SOndrej Zary if (c->x86 == 4) 99e8055139SOndrej Zary strcpy(c->x86_model_id, "486"); 100e8055139SOndrej Zary else if (c->x86 == 3) 101e8055139SOndrej Zary strcpy(c->x86_model_id, "386"); 102e8055139SOndrej Zary } 103e8055139SOndrej Zary #endif 104e8055139SOndrej Zary } 105e8055139SOndrej Zary 106148f9bb8SPaul Gortmaker static const struct cpu_dev default_cpu = { 107e8055139SOndrej Zary .c_init = default_init, 108e8055139SOndrej Zary .c_vendor = "Unknown", 109e8055139SOndrej Zary .c_x86_vendor = X86_VENDOR_UNKNOWN, 110e8055139SOndrej Zary }; 111e8055139SOndrej Zary 112148f9bb8SPaul Gortmaker static const struct cpu_dev *this_cpu = &default_cpu; 1130a488a53SYinghai Lu 11406deef89SBrian Gerst DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { 115950ad7ffSYinghai Lu #ifdef CONFIG_X86_64 11606deef89SBrian Gerst /* 11706deef89SBrian Gerst * We need valid kernel segments for data and code in long mode too 118950ad7ffSYinghai Lu * IRET will check the segment types kkeil 2000/10/28 119950ad7ffSYinghai Lu * Also sysret mandates a special GDT layout 12006deef89SBrian Gerst * 1219766cdbcSJaswinder Singh Rajput * TLS descriptors are currently at a different place compared to i386. 12206deef89SBrian Gerst * Hopefully nobody expects them at a fixed place (Wine?) 123950ad7ffSYinghai Lu */ 1241e5de182SAkinobu Mita [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff), 1251e5de182SAkinobu Mita [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff), 1261e5de182SAkinobu Mita [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff), 1271e5de182SAkinobu Mita [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff), 1281e5de182SAkinobu Mita [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff), 1291e5de182SAkinobu Mita [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff), 130950ad7ffSYinghai Lu #else 1311e5de182SAkinobu Mita [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff), 1321e5de182SAkinobu Mita [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), 1331e5de182SAkinobu Mita [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff), 1341e5de182SAkinobu Mita [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff), 135f7627e25SThomas Gleixner /* 136f7627e25SThomas Gleixner * Segments used for calling PnP BIOS have byte granularity. 137f7627e25SThomas Gleixner * They code segments and data segments have fixed 64k limits, 138f7627e25SThomas Gleixner * the transfer segment sizes are set at run time. 139f7627e25SThomas Gleixner */ 1406842ef0eSGlauber de Oliveira Costa /* 32-bit code */ 1411e5de182SAkinobu Mita [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), 1426842ef0eSGlauber de Oliveira Costa /* 16-bit code */ 1431e5de182SAkinobu Mita [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), 1446842ef0eSGlauber de Oliveira Costa /* 16-bit data */ 1451e5de182SAkinobu Mita [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff), 1466842ef0eSGlauber de Oliveira Costa /* 16-bit data */ 1471e5de182SAkinobu Mita [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0), 1486842ef0eSGlauber de Oliveira Costa /* 16-bit data */ 1491e5de182SAkinobu Mita [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0), 150f7627e25SThomas Gleixner /* 151f7627e25SThomas Gleixner * The APM segments have byte granularity and their bases 152f7627e25SThomas Gleixner * are set at run time. All have 64k limits. 153f7627e25SThomas Gleixner */ 1546842ef0eSGlauber de Oliveira Costa /* 32-bit code */ 1551e5de182SAkinobu Mita [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), 156f7627e25SThomas Gleixner /* 16-bit code */ 1571e5de182SAkinobu Mita [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), 1586842ef0eSGlauber de Oliveira Costa /* data */ 15972c4d853SIngo Molnar [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff), 160f7627e25SThomas Gleixner 1611e5de182SAkinobu Mita [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), 1621e5de182SAkinobu Mita [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), 16360a5317fSTejun Heo GDT_STACK_CANARY_INIT 164950ad7ffSYinghai Lu #endif 16506deef89SBrian Gerst } }; 166f7627e25SThomas Gleixner EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); 167f7627e25SThomas Gleixner 1680790c9aaSAndy Lutomirski #ifdef CONFIG_X86_64 169c7ad5ad2SAndy Lutomirski static int __init x86_nopcid_setup(char *s) 1700790c9aaSAndy Lutomirski { 171c7ad5ad2SAndy Lutomirski /* nopcid doesn't accept parameters */ 172c7ad5ad2SAndy Lutomirski if (s) 173c7ad5ad2SAndy Lutomirski return -EINVAL; 1740790c9aaSAndy Lutomirski 1750790c9aaSAndy Lutomirski /* do not emit a message if the feature is not present */ 1760790c9aaSAndy Lutomirski if (!boot_cpu_has(X86_FEATURE_PCID)) 177c7ad5ad2SAndy Lutomirski return 0; 1780790c9aaSAndy Lutomirski 1790790c9aaSAndy Lutomirski setup_clear_cpu_cap(X86_FEATURE_PCID); 1800790c9aaSAndy Lutomirski pr_info("nopcid: PCID feature disabled\n"); 181c7ad5ad2SAndy Lutomirski return 0; 1820790c9aaSAndy Lutomirski } 183c7ad5ad2SAndy Lutomirski early_param("nopcid", x86_nopcid_setup); 1840790c9aaSAndy Lutomirski #endif 1850790c9aaSAndy Lutomirski 186d12a72b8SAndy Lutomirski static int __init x86_noinvpcid_setup(char *s) 187d12a72b8SAndy Lutomirski { 188d12a72b8SAndy Lutomirski /* noinvpcid doesn't accept parameters */ 189d12a72b8SAndy Lutomirski if (s) 190d12a72b8SAndy Lutomirski return -EINVAL; 191d12a72b8SAndy Lutomirski 192d12a72b8SAndy Lutomirski /* do not emit a message if the feature is not present */ 193d12a72b8SAndy Lutomirski if (!boot_cpu_has(X86_FEATURE_INVPCID)) 194d12a72b8SAndy Lutomirski return 0; 195d12a72b8SAndy Lutomirski 196d12a72b8SAndy Lutomirski setup_clear_cpu_cap(X86_FEATURE_INVPCID); 197d12a72b8SAndy Lutomirski pr_info("noinvpcid: INVPCID feature disabled\n"); 198d12a72b8SAndy Lutomirski return 0; 199d12a72b8SAndy Lutomirski } 200d12a72b8SAndy Lutomirski early_param("noinvpcid", x86_noinvpcid_setup); 201d12a72b8SAndy Lutomirski 202ba51dcedSYinghai Lu #ifdef CONFIG_X86_32 203148f9bb8SPaul Gortmaker static int cachesize_override = -1; 204148f9bb8SPaul Gortmaker static int disable_x86_serial_nr = 1; 205f7627e25SThomas Gleixner 206f7627e25SThomas Gleixner static int __init cachesize_setup(char *str) 207f7627e25SThomas Gleixner { 208f7627e25SThomas Gleixner get_option(&str, &cachesize_override); 209f7627e25SThomas Gleixner return 1; 210f7627e25SThomas Gleixner } 211f7627e25SThomas Gleixner __setup("cachesize=", cachesize_setup); 212f7627e25SThomas Gleixner 213f7627e25SThomas Gleixner static int __init x86_sep_setup(char *s) 214f7627e25SThomas Gleixner { 21513530257SAndi Kleen setup_clear_cpu_cap(X86_FEATURE_SEP); 216f7627e25SThomas Gleixner return 1; 217f7627e25SThomas Gleixner } 218f7627e25SThomas Gleixner __setup("nosep", x86_sep_setup); 219f7627e25SThomas Gleixner 220f7627e25SThomas Gleixner /* Standard macro to see if a specific flag is changeable */ 221f7627e25SThomas Gleixner static inline int flag_is_changeable_p(u32 flag) 222f7627e25SThomas Gleixner { 223f7627e25SThomas Gleixner u32 f1, f2; 224f7627e25SThomas Gleixner 22594f6bac1SKrzysztof Helt /* 22694f6bac1SKrzysztof Helt * Cyrix and IDT cpus allow disabling of CPUID 22794f6bac1SKrzysztof Helt * so the code below may return different results 22894f6bac1SKrzysztof Helt * when it is executed before and after enabling 22994f6bac1SKrzysztof Helt * the CPUID. Add "volatile" to not allow gcc to 23094f6bac1SKrzysztof Helt * optimize the subsequent calls to this function. 23194f6bac1SKrzysztof Helt */ 23294f6bac1SKrzysztof Helt asm volatile ("pushfl \n\t" 233f7627e25SThomas Gleixner "pushfl \n\t" 234f7627e25SThomas Gleixner "popl %0 \n\t" 235f7627e25SThomas Gleixner "movl %0, %1 \n\t" 236f7627e25SThomas Gleixner "xorl %2, %0 \n\t" 237f7627e25SThomas Gleixner "pushl %0 \n\t" 238f7627e25SThomas Gleixner "popfl \n\t" 239f7627e25SThomas Gleixner "pushfl \n\t" 240f7627e25SThomas Gleixner "popl %0 \n\t" 241f7627e25SThomas Gleixner "popfl \n\t" 2420f3fa48aSIngo Molnar 243f7627e25SThomas Gleixner : "=&r" (f1), "=&r" (f2) 244f7627e25SThomas Gleixner : "ir" (flag)); 245f7627e25SThomas Gleixner 246f7627e25SThomas Gleixner return ((f1^f2) & flag) != 0; 247f7627e25SThomas Gleixner } 248f7627e25SThomas Gleixner 249f7627e25SThomas Gleixner /* Probe for the CPUID instruction */ 250148f9bb8SPaul Gortmaker int have_cpuid_p(void) 251f7627e25SThomas Gleixner { 252f7627e25SThomas Gleixner return flag_is_changeable_p(X86_EFLAGS_ID); 253f7627e25SThomas Gleixner } 254f7627e25SThomas Gleixner 255148f9bb8SPaul Gortmaker static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 2560a488a53SYinghai Lu { 2570a488a53SYinghai Lu unsigned long lo, hi; 2580f3fa48aSIngo Molnar 2590f3fa48aSIngo Molnar if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr) 2600f3fa48aSIngo Molnar return; 2610f3fa48aSIngo Molnar 2620f3fa48aSIngo Molnar /* Disable processor serial number: */ 2630f3fa48aSIngo Molnar 2640a488a53SYinghai Lu rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 2650a488a53SYinghai Lu lo |= 0x200000; 2660a488a53SYinghai Lu wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 2670f3fa48aSIngo Molnar 2681b74dde7SChen Yucong pr_notice("CPU serial number disabled.\n"); 2690a488a53SYinghai Lu clear_cpu_cap(c, X86_FEATURE_PN); 2700a488a53SYinghai Lu 2710a488a53SYinghai Lu /* Disabling the serial number may affect the cpuid level */ 2720a488a53SYinghai Lu c->cpuid_level = cpuid_eax(0); 2730a488a53SYinghai Lu } 2740a488a53SYinghai Lu 2750a488a53SYinghai Lu static int __init x86_serial_nr_setup(char *s) 2760a488a53SYinghai Lu { 2770a488a53SYinghai Lu disable_x86_serial_nr = 0; 2780a488a53SYinghai Lu return 1; 2790a488a53SYinghai Lu } 2800a488a53SYinghai Lu __setup("serialnumber", x86_serial_nr_setup); 281ba51dcedSYinghai Lu #else 282102bbe3aSYinghai Lu static inline int flag_is_changeable_p(u32 flag) 283102bbe3aSYinghai Lu { 284102bbe3aSYinghai Lu return 1; 285102bbe3aSYinghai Lu } 286102bbe3aSYinghai Lu static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 287102bbe3aSYinghai Lu { 288102bbe3aSYinghai Lu } 289ba51dcedSYinghai Lu #endif 2900a488a53SYinghai Lu 291de5397adSFenghua Yu static __init int setup_disable_smep(char *arg) 292de5397adSFenghua Yu { 293b2cc2a07SH. Peter Anvin setup_clear_cpu_cap(X86_FEATURE_SMEP); 294de5397adSFenghua Yu return 1; 295de5397adSFenghua Yu } 296de5397adSFenghua Yu __setup("nosmep", setup_disable_smep); 297de5397adSFenghua Yu 298b2cc2a07SH. Peter Anvin static __always_inline void setup_smep(struct cpuinfo_x86 *c) 299de5397adSFenghua Yu { 300b2cc2a07SH. Peter Anvin if (cpu_has(c, X86_FEATURE_SMEP)) 301375074ccSAndy Lutomirski cr4_set_bits(X86_CR4_SMEP); 302de5397adSFenghua Yu } 303de5397adSFenghua Yu 30452b6179aSH. Peter Anvin static __init int setup_disable_smap(char *arg) 30552b6179aSH. Peter Anvin { 306b2cc2a07SH. Peter Anvin setup_clear_cpu_cap(X86_FEATURE_SMAP); 30752b6179aSH. Peter Anvin return 1; 30852b6179aSH. Peter Anvin } 30952b6179aSH. Peter Anvin __setup("nosmap", setup_disable_smap); 31052b6179aSH. Peter Anvin 311b2cc2a07SH. Peter Anvin static __always_inline void setup_smap(struct cpuinfo_x86 *c) 31252b6179aSH. Peter Anvin { 313581b7f15SAndrew Cooper unsigned long eflags = native_save_fl(); 314b2cc2a07SH. Peter Anvin 315b2cc2a07SH. Peter Anvin /* This should have been cleared long ago */ 316b2cc2a07SH. Peter Anvin BUG_ON(eflags & X86_EFLAGS_AC); 317b2cc2a07SH. Peter Anvin 31803bbd596SH. Peter Anvin if (cpu_has(c, X86_FEATURE_SMAP)) { 31903bbd596SH. Peter Anvin #ifdef CONFIG_X86_SMAP 320375074ccSAndy Lutomirski cr4_set_bits(X86_CR4_SMAP); 32103bbd596SH. Peter Anvin #else 322375074ccSAndy Lutomirski cr4_clear_bits(X86_CR4_SMAP); 32303bbd596SH. Peter Anvin #endif 32403bbd596SH. Peter Anvin } 325f7627e25SThomas Gleixner } 326f7627e25SThomas Gleixner 327aa35f896SRicardo Neri static __always_inline void setup_umip(struct cpuinfo_x86 *c) 328aa35f896SRicardo Neri { 329aa35f896SRicardo Neri /* Check the boot processor, plus build option for UMIP. */ 330aa35f896SRicardo Neri if (!cpu_feature_enabled(X86_FEATURE_UMIP)) 331aa35f896SRicardo Neri goto out; 332aa35f896SRicardo Neri 333aa35f896SRicardo Neri /* Check the current processor's cpuid bits. */ 334aa35f896SRicardo Neri if (!cpu_has(c, X86_FEATURE_UMIP)) 335aa35f896SRicardo Neri goto out; 336aa35f896SRicardo Neri 337aa35f896SRicardo Neri cr4_set_bits(X86_CR4_UMIP); 338aa35f896SRicardo Neri 339438cbf88SLendacky, Thomas pr_info_once("x86/cpu: User Mode Instruction Prevention (UMIP) activated\n"); 340770c7755SRicardo Neri 341aa35f896SRicardo Neri return; 342aa35f896SRicardo Neri 343aa35f896SRicardo Neri out: 344aa35f896SRicardo Neri /* 345aa35f896SRicardo Neri * Make sure UMIP is disabled in case it was enabled in a 346aa35f896SRicardo Neri * previous boot (e.g., via kexec). 347aa35f896SRicardo Neri */ 348aa35f896SRicardo Neri cr4_clear_bits(X86_CR4_UMIP); 349aa35f896SRicardo Neri } 350aa35f896SRicardo Neri 351a13b9d0bSKees Cook /* These bits should not change their value after CPU init is finished. */ 352a13b9d0bSKees Cook static const unsigned long cr4_pinned_mask = 353a13b9d0bSKees Cook X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP | X86_CR4_FSGSBASE; 3547652ac92SThomas Gleixner static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning); 3557652ac92SThomas Gleixner static unsigned long cr4_pinned_bits __ro_after_init; 3567652ac92SThomas Gleixner 3577652ac92SThomas Gleixner void native_write_cr0(unsigned long val) 3587652ac92SThomas Gleixner { 3597652ac92SThomas Gleixner unsigned long bits_missing = 0; 3607652ac92SThomas Gleixner 3617652ac92SThomas Gleixner set_register: 3627652ac92SThomas Gleixner asm volatile("mov %0,%%cr0": "+r" (val), "+m" (__force_order)); 3637652ac92SThomas Gleixner 3647652ac92SThomas Gleixner if (static_branch_likely(&cr_pinning)) { 3657652ac92SThomas Gleixner if (unlikely((val & X86_CR0_WP) != X86_CR0_WP)) { 3667652ac92SThomas Gleixner bits_missing = X86_CR0_WP; 3677652ac92SThomas Gleixner val |= bits_missing; 3687652ac92SThomas Gleixner goto set_register; 3697652ac92SThomas Gleixner } 3707652ac92SThomas Gleixner /* Warn after we've set the missing bits. */ 3717652ac92SThomas Gleixner WARN_ONCE(bits_missing, "CR0 WP bit went missing!?\n"); 3727652ac92SThomas Gleixner } 3737652ac92SThomas Gleixner } 3747652ac92SThomas Gleixner EXPORT_SYMBOL(native_write_cr0); 3757652ac92SThomas Gleixner 3767652ac92SThomas Gleixner void native_write_cr4(unsigned long val) 3777652ac92SThomas Gleixner { 378a13b9d0bSKees Cook unsigned long bits_changed = 0; 3797652ac92SThomas Gleixner 3807652ac92SThomas Gleixner set_register: 3817652ac92SThomas Gleixner asm volatile("mov %0,%%cr4": "+r" (val), "+m" (cr4_pinned_bits)); 3827652ac92SThomas Gleixner 3837652ac92SThomas Gleixner if (static_branch_likely(&cr_pinning)) { 384a13b9d0bSKees Cook if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) { 385a13b9d0bSKees Cook bits_changed = (val & cr4_pinned_mask) ^ cr4_pinned_bits; 386a13b9d0bSKees Cook val = (val & ~cr4_pinned_mask) | cr4_pinned_bits; 3877652ac92SThomas Gleixner goto set_register; 3887652ac92SThomas Gleixner } 389a13b9d0bSKees Cook /* Warn after we've corrected the changed bits. */ 390a13b9d0bSKees Cook WARN_ONCE(bits_changed, "pinned CR4 bits changed: 0x%lx!?\n", 391a13b9d0bSKees Cook bits_changed); 3927652ac92SThomas Gleixner } 3937652ac92SThomas Gleixner } 39421953ee5SThomas Gleixner #if IS_MODULE(CONFIG_LKDTM) 395d8f0b353SThomas Gleixner EXPORT_SYMBOL_GPL(native_write_cr4); 39621953ee5SThomas Gleixner #endif 397d8f0b353SThomas Gleixner 398d8f0b353SThomas Gleixner void cr4_update_irqsoff(unsigned long set, unsigned long clear) 399d8f0b353SThomas Gleixner { 400d8f0b353SThomas Gleixner unsigned long newval, cr4 = this_cpu_read(cpu_tlbstate.cr4); 401d8f0b353SThomas Gleixner 402d8f0b353SThomas Gleixner lockdep_assert_irqs_disabled(); 403d8f0b353SThomas Gleixner 404d8f0b353SThomas Gleixner newval = (cr4 & ~clear) | set; 405d8f0b353SThomas Gleixner if (newval != cr4) { 406d8f0b353SThomas Gleixner this_cpu_write(cpu_tlbstate.cr4, newval); 407d8f0b353SThomas Gleixner __write_cr4(newval); 408d8f0b353SThomas Gleixner } 409d8f0b353SThomas Gleixner } 410d8f0b353SThomas Gleixner EXPORT_SYMBOL(cr4_update_irqsoff); 411d8f0b353SThomas Gleixner 412d8f0b353SThomas Gleixner /* Read the CR4 shadow. */ 413d8f0b353SThomas Gleixner unsigned long cr4_read_shadow(void) 414d8f0b353SThomas Gleixner { 415d8f0b353SThomas Gleixner return this_cpu_read(cpu_tlbstate.cr4); 416d8f0b353SThomas Gleixner } 417d8f0b353SThomas Gleixner EXPORT_SYMBOL_GPL(cr4_read_shadow); 4187652ac92SThomas Gleixner 4197652ac92SThomas Gleixner void cr4_init(void) 4207652ac92SThomas Gleixner { 4217652ac92SThomas Gleixner unsigned long cr4 = __read_cr4(); 4227652ac92SThomas Gleixner 4237652ac92SThomas Gleixner if (boot_cpu_has(X86_FEATURE_PCID)) 4247652ac92SThomas Gleixner cr4 |= X86_CR4_PCIDE; 4257652ac92SThomas Gleixner if (static_branch_likely(&cr_pinning)) 426a13b9d0bSKees Cook cr4 = (cr4 & ~cr4_pinned_mask) | cr4_pinned_bits; 4277652ac92SThomas Gleixner 4287652ac92SThomas Gleixner __write_cr4(cr4); 4297652ac92SThomas Gleixner 4307652ac92SThomas Gleixner /* Initialize cr4 shadow for this CPU. */ 4317652ac92SThomas Gleixner this_cpu_write(cpu_tlbstate.cr4, cr4); 4327652ac92SThomas Gleixner } 433873d50d5SKees Cook 434873d50d5SKees Cook /* 435873d50d5SKees Cook * Once CPU feature detection is finished (and boot params have been 436873d50d5SKees Cook * parsed), record any of the sensitive CR bits that are set, and 437873d50d5SKees Cook * enable CR pinning. 438873d50d5SKees Cook */ 439873d50d5SKees Cook static void __init setup_cr_pinning(void) 440873d50d5SKees Cook { 441a13b9d0bSKees Cook cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & cr4_pinned_mask; 442873d50d5SKees Cook static_key_enable(&cr_pinning.key); 443873d50d5SKees Cook } 444873d50d5SKees Cook 445b745cfbaSAndy Lutomirski static __init int x86_nofsgsbase_setup(char *arg) 446dd649bd0SAndy Lutomirski { 447b745cfbaSAndy Lutomirski /* Require an exact match without trailing characters. */ 448b745cfbaSAndy Lutomirski if (strlen(arg)) 449b745cfbaSAndy Lutomirski return 0; 450b745cfbaSAndy Lutomirski 451b745cfbaSAndy Lutomirski /* Do not emit a message if the feature is not present. */ 452b745cfbaSAndy Lutomirski if (!boot_cpu_has(X86_FEATURE_FSGSBASE)) 453b745cfbaSAndy Lutomirski return 1; 454b745cfbaSAndy Lutomirski 455b745cfbaSAndy Lutomirski setup_clear_cpu_cap(X86_FEATURE_FSGSBASE); 456b745cfbaSAndy Lutomirski pr_info("FSGSBASE disabled via kernel command line\n"); 457dd649bd0SAndy Lutomirski return 1; 458dd649bd0SAndy Lutomirski } 459b745cfbaSAndy Lutomirski __setup("nofsgsbase", x86_nofsgsbase_setup); 460dd649bd0SAndy Lutomirski 461b64ed19bSAndy Lutomirski /* 46206976945SDave Hansen * Protection Keys are not available in 32-bit mode. 46306976945SDave Hansen */ 46406976945SDave Hansen static bool pku_disabled; 46506976945SDave Hansen 46606976945SDave Hansen static __always_inline void setup_pku(struct cpuinfo_x86 *c) 46706976945SDave Hansen { 468a5eff725SSebastian Andrzej Siewior struct pkru_state *pk; 469a5eff725SSebastian Andrzej Siewior 470e8df1a95SDave Hansen /* check the boot processor, plus compile options for PKU: */ 471e8df1a95SDave Hansen if (!cpu_feature_enabled(X86_FEATURE_PKU)) 472e8df1a95SDave Hansen return; 473e8df1a95SDave Hansen /* checks the actual processor's cpuid bits: */ 47406976945SDave Hansen if (!cpu_has(c, X86_FEATURE_PKU)) 47506976945SDave Hansen return; 47606976945SDave Hansen if (pku_disabled) 47706976945SDave Hansen return; 47806976945SDave Hansen 47906976945SDave Hansen cr4_set_bits(X86_CR4_PKE); 480a5eff725SSebastian Andrzej Siewior pk = get_xsave_addr(&init_fpstate.xsave, XFEATURE_PKRU); 481a5eff725SSebastian Andrzej Siewior if (pk) 482a5eff725SSebastian Andrzej Siewior pk->pkru = init_pkru_value; 48306976945SDave Hansen /* 48406976945SDave Hansen * Seting X86_CR4_PKE will cause the X86_FEATURE_OSPKE 48506976945SDave Hansen * cpuid bit to be set. We need to ensure that we 48606976945SDave Hansen * update that bit in this CPU's "cpu_info". 48706976945SDave Hansen */ 488735a6dd0SSean Christopherson set_cpu_cap(c, X86_FEATURE_OSPKE); 48906976945SDave Hansen } 49006976945SDave Hansen 49106976945SDave Hansen #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 49206976945SDave Hansen static __init int setup_disable_pku(char *arg) 49306976945SDave Hansen { 49406976945SDave Hansen /* 49506976945SDave Hansen * Do not clear the X86_FEATURE_PKU bit. All of the 49606976945SDave Hansen * runtime checks are against OSPKE so clearing the 49706976945SDave Hansen * bit does nothing. 49806976945SDave Hansen * 49906976945SDave Hansen * This way, we will see "pku" in cpuinfo, but not 50006976945SDave Hansen * "ospke", which is exactly what we want. It shows 50106976945SDave Hansen * that the CPU has PKU, but the OS has not enabled it. 50206976945SDave Hansen * This happens to be exactly how a system would look 50306976945SDave Hansen * if we disabled the config option. 50406976945SDave Hansen */ 50506976945SDave Hansen pr_info("x86: 'nopku' specified, disabling Memory Protection Keys\n"); 50606976945SDave Hansen pku_disabled = true; 50706976945SDave Hansen return 1; 50806976945SDave Hansen } 50906976945SDave Hansen __setup("nopku", setup_disable_pku); 51006976945SDave Hansen #endif /* CONFIG_X86_64 */ 51106976945SDave Hansen 51206976945SDave Hansen /* 513b38b0665SH. Peter Anvin * Some CPU features depend on higher CPUID levels, which may not always 514b38b0665SH. Peter Anvin * be available due to CPUID level capping or broken virtualization 515b38b0665SH. Peter Anvin * software. Add those features to this table to auto-disable them. 516b38b0665SH. Peter Anvin */ 517b38b0665SH. Peter Anvin struct cpuid_dependent_feature { 518b38b0665SH. Peter Anvin u32 feature; 519b38b0665SH. Peter Anvin u32 level; 520b38b0665SH. Peter Anvin }; 5210f3fa48aSIngo Molnar 522148f9bb8SPaul Gortmaker static const struct cpuid_dependent_feature 523b38b0665SH. Peter Anvin cpuid_dependent_features[] = { 524b38b0665SH. Peter Anvin { X86_FEATURE_MWAIT, 0x00000005 }, 525b38b0665SH. Peter Anvin { X86_FEATURE_DCA, 0x00000009 }, 526b38b0665SH. Peter Anvin { X86_FEATURE_XSAVE, 0x0000000d }, 527b38b0665SH. Peter Anvin { 0, 0 } 528b38b0665SH. Peter Anvin }; 529b38b0665SH. Peter Anvin 530148f9bb8SPaul Gortmaker static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) 531b38b0665SH. Peter Anvin { 532b38b0665SH. Peter Anvin const struct cpuid_dependent_feature *df; 5339766cdbcSJaswinder Singh Rajput 534b38b0665SH. Peter Anvin for (df = cpuid_dependent_features; df->feature; df++) { 5350f3fa48aSIngo Molnar 5360f3fa48aSIngo Molnar if (!cpu_has(c, df->feature)) 5370f3fa48aSIngo Molnar continue; 538b38b0665SH. Peter Anvin /* 539b38b0665SH. Peter Anvin * Note: cpuid_level is set to -1 if unavailable, but 540b38b0665SH. Peter Anvin * extended_extended_level is set to 0 if unavailable 541b38b0665SH. Peter Anvin * and the legitimate extended levels are all negative 542b38b0665SH. Peter Anvin * when signed; hence the weird messing around with 543b38b0665SH. Peter Anvin * signs here... 544b38b0665SH. Peter Anvin */ 5450f3fa48aSIngo Molnar if (!((s32)df->level < 0 ? 546f6db44dfSYinghai Lu (u32)df->level > (u32)c->extended_cpuid_level : 5470f3fa48aSIngo Molnar (s32)df->level > (s32)c->cpuid_level)) 5480f3fa48aSIngo Molnar continue; 5490f3fa48aSIngo Molnar 550b38b0665SH. Peter Anvin clear_cpu_cap(c, df->feature); 5510f3fa48aSIngo Molnar if (!warn) 5520f3fa48aSIngo Molnar continue; 5530f3fa48aSIngo Molnar 5541b74dde7SChen Yucong pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n", 5559def39beSJosh Triplett x86_cap_flag(df->feature), df->level); 556b38b0665SH. Peter Anvin } 557b38b0665SH. Peter Anvin } 558b38b0665SH. Peter Anvin 559b38b0665SH. Peter Anvin /* 560f7627e25SThomas Gleixner * Naming convention should be: <Name> [(<Codename>)] 561f7627e25SThomas Gleixner * This table only is used unless init_<vendor>() below doesn't set it; 5620f3fa48aSIngo Molnar * in particular, if CPUID levels 0x80000002..4 are supported, this 5630f3fa48aSIngo Molnar * isn't used 564f7627e25SThomas Gleixner */ 565f7627e25SThomas Gleixner 566f7627e25SThomas Gleixner /* Look up CPU names by table lookup. */ 567148f9bb8SPaul Gortmaker static const char *table_lookup_model(struct cpuinfo_x86 *c) 568f7627e25SThomas Gleixner { 56909dc68d9SJan Beulich #ifdef CONFIG_X86_32 57009dc68d9SJan Beulich const struct legacy_cpu_model_info *info; 571f7627e25SThomas Gleixner 572f7627e25SThomas Gleixner if (c->x86_model >= 16) 573f7627e25SThomas Gleixner return NULL; /* Range check */ 574f7627e25SThomas Gleixner 575f7627e25SThomas Gleixner if (!this_cpu) 576f7627e25SThomas Gleixner return NULL; 577f7627e25SThomas Gleixner 57809dc68d9SJan Beulich info = this_cpu->legacy_models; 579f7627e25SThomas Gleixner 58009dc68d9SJan Beulich while (info->family) { 581f7627e25SThomas Gleixner if (info->family == c->x86) 582f7627e25SThomas Gleixner return info->model_names[c->x86_model]; 583f7627e25SThomas Gleixner info++; 584f7627e25SThomas Gleixner } 58509dc68d9SJan Beulich #endif 586f7627e25SThomas Gleixner return NULL; /* Not found */ 587f7627e25SThomas Gleixner } 588f7627e25SThomas Gleixner 589f6a892ddSFenghua Yu /* Aligned to unsigned long to avoid split lock in atomic bitmap ops */ 590f6a892ddSFenghua Yu __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long)); 591f6a892ddSFenghua Yu __u32 cpu_caps_set[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long)); 592f7627e25SThomas Gleixner 59311e3a840SJeremy Fitzhardinge void load_percpu_segment(int cpu) 5949d31d35bSYinghai Lu { 595fab334c1SYinghai Lu #ifdef CONFIG_X86_32 5962697fbd5SBrian Gerst loadsegment(fs, __KERNEL_PERCPU); 5972697fbd5SBrian Gerst #else 59845e876f7SAndy Lutomirski __loadsegment_simple(gs, 0); 59935060ed6SVitaly Kuznetsov wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu)); 600fab334c1SYinghai Lu #endif 60160a5317fSTejun Heo load_stack_canary_segment(); 6029d31d35bSYinghai Lu } 6039d31d35bSYinghai Lu 60472f5e08dSAndy Lutomirski #ifdef CONFIG_X86_32 60572f5e08dSAndy Lutomirski /* The 32-bit entry code needs to find cpu_entry_area. */ 60672f5e08dSAndy Lutomirski DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); 60772f5e08dSAndy Lutomirski #endif 60872f5e08dSAndy Lutomirski 60945fc8757SThomas Garnier /* Load the original GDT from the per-cpu structure */ 61045fc8757SThomas Garnier void load_direct_gdt(int cpu) 61145fc8757SThomas Garnier { 61245fc8757SThomas Garnier struct desc_ptr gdt_descr; 61345fc8757SThomas Garnier 61445fc8757SThomas Garnier gdt_descr.address = (long)get_cpu_gdt_rw(cpu); 61545fc8757SThomas Garnier gdt_descr.size = GDT_SIZE - 1; 61645fc8757SThomas Garnier load_gdt(&gdt_descr); 61745fc8757SThomas Garnier } 61845fc8757SThomas Garnier EXPORT_SYMBOL_GPL(load_direct_gdt); 61945fc8757SThomas Garnier 62069218e47SThomas Garnier /* Load a fixmap remapping of the per-cpu GDT */ 62169218e47SThomas Garnier void load_fixmap_gdt(int cpu) 62269218e47SThomas Garnier { 62369218e47SThomas Garnier struct desc_ptr gdt_descr; 62469218e47SThomas Garnier 62569218e47SThomas Garnier gdt_descr.address = (long)get_cpu_gdt_ro(cpu); 62669218e47SThomas Garnier gdt_descr.size = GDT_SIZE - 1; 62769218e47SThomas Garnier load_gdt(&gdt_descr); 62869218e47SThomas Garnier } 62945fc8757SThomas Garnier EXPORT_SYMBOL_GPL(load_fixmap_gdt); 63069218e47SThomas Garnier 6310f3fa48aSIngo Molnar /* 6320f3fa48aSIngo Molnar * Current gdt points %fs at the "master" per-cpu area: after this, 6330f3fa48aSIngo Molnar * it's on the real one. 6340f3fa48aSIngo Molnar */ 635552be871SBrian Gerst void switch_to_new_gdt(int cpu) 636f7627e25SThomas Gleixner { 63745fc8757SThomas Garnier /* Load the original GDT */ 63845fc8757SThomas Garnier load_direct_gdt(cpu); 639f7627e25SThomas Gleixner /* Reload the per-cpu base */ 64011e3a840SJeremy Fitzhardinge load_percpu_segment(cpu); 641f7627e25SThomas Gleixner } 642f7627e25SThomas Gleixner 643148f9bb8SPaul Gortmaker static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; 644f7627e25SThomas Gleixner 645148f9bb8SPaul Gortmaker static void get_model_name(struct cpuinfo_x86 *c) 646f7627e25SThomas Gleixner { 647f7627e25SThomas Gleixner unsigned int *v; 648ee098e1aSBorislav Petkov char *p, *q, *s; 649f7627e25SThomas Gleixner 6503da99c97SYinghai Lu if (c->extended_cpuid_level < 0x80000004) 6511b05d60dSYinghai Lu return; 652f7627e25SThomas Gleixner 653f7627e25SThomas Gleixner v = (unsigned int *)c->x86_model_id; 654f7627e25SThomas Gleixner cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); 655f7627e25SThomas Gleixner cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); 656f7627e25SThomas Gleixner cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); 657f7627e25SThomas Gleixner c->x86_model_id[48] = 0; 658f7627e25SThomas Gleixner 659ee098e1aSBorislav Petkov /* Trim whitespace */ 660ee098e1aSBorislav Petkov p = q = s = &c->x86_model_id[0]; 661ee098e1aSBorislav Petkov 662ee098e1aSBorislav Petkov while (*p == ' ') 663ee098e1aSBorislav Petkov p++; 664ee098e1aSBorislav Petkov 665ee098e1aSBorislav Petkov while (*p) { 666ee098e1aSBorislav Petkov /* Note the last non-whitespace index */ 667ee098e1aSBorislav Petkov if (!isspace(*p)) 668ee098e1aSBorislav Petkov s = q; 669ee098e1aSBorislav Petkov 670ee098e1aSBorislav Petkov *q++ = *p++; 671ee098e1aSBorislav Petkov } 672ee098e1aSBorislav Petkov 673ee098e1aSBorislav Petkov *(s + 1) = '\0'; 674f7627e25SThomas Gleixner } 675f7627e25SThomas Gleixner 6769305bd6cSThomas Gleixner void detect_num_cpu_cores(struct cpuinfo_x86 *c) 6772cc61be6SDavid Wang { 6782cc61be6SDavid Wang unsigned int eax, ebx, ecx, edx; 6792cc61be6SDavid Wang 6809305bd6cSThomas Gleixner c->x86_max_cores = 1; 6812cc61be6SDavid Wang if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4) 6829305bd6cSThomas Gleixner return; 6832cc61be6SDavid Wang 6842cc61be6SDavid Wang cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); 6852cc61be6SDavid Wang if (eax & 0x1f) 6869305bd6cSThomas Gleixner c->x86_max_cores = (eax >> 26) + 1; 6872cc61be6SDavid Wang } 6882cc61be6SDavid Wang 689148f9bb8SPaul Gortmaker void cpu_detect_cache_sizes(struct cpuinfo_x86 *c) 690f7627e25SThomas Gleixner { 6919d31d35bSYinghai Lu unsigned int n, dummy, ebx, ecx, edx, l2size; 692f7627e25SThomas Gleixner 6933da99c97SYinghai Lu n = c->extended_cpuid_level; 694f7627e25SThomas Gleixner 695f7627e25SThomas Gleixner if (n >= 0x80000005) { 6969d31d35bSYinghai Lu cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); 697f7627e25SThomas Gleixner c->x86_cache_size = (ecx>>24) + (edx>>24); 698140fc727SYinghai Lu #ifdef CONFIG_X86_64 699140fc727SYinghai Lu /* On K8 L1 TLB is inclusive, so don't count it */ 700140fc727SYinghai Lu c->x86_tlbsize = 0; 701140fc727SYinghai Lu #endif 702f7627e25SThomas Gleixner } 703f7627e25SThomas Gleixner 704f7627e25SThomas Gleixner if (n < 0x80000006) /* Some chips just has a large L1. */ 705f7627e25SThomas Gleixner return; 706f7627e25SThomas Gleixner 7070a488a53SYinghai Lu cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); 708f7627e25SThomas Gleixner l2size = ecx >> 16; 709f7627e25SThomas Gleixner 710140fc727SYinghai Lu #ifdef CONFIG_X86_64 711140fc727SYinghai Lu c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); 712140fc727SYinghai Lu #else 713f7627e25SThomas Gleixner /* do processor-specific cache resizing */ 71409dc68d9SJan Beulich if (this_cpu->legacy_cache_size) 71509dc68d9SJan Beulich l2size = this_cpu->legacy_cache_size(c, l2size); 716f7627e25SThomas Gleixner 717f7627e25SThomas Gleixner /* Allow user to override all this if necessary. */ 718f7627e25SThomas Gleixner if (cachesize_override != -1) 719f7627e25SThomas Gleixner l2size = cachesize_override; 720f7627e25SThomas Gleixner 721f7627e25SThomas Gleixner if (l2size == 0) 722f7627e25SThomas Gleixner return; /* Again, no L2 cache is possible */ 723140fc727SYinghai Lu #endif 724f7627e25SThomas Gleixner 725f7627e25SThomas Gleixner c->x86_cache_size = l2size; 726f7627e25SThomas Gleixner } 727f7627e25SThomas Gleixner 728e0ba94f1SAlex Shi u16 __read_mostly tlb_lli_4k[NR_INFO]; 729e0ba94f1SAlex Shi u16 __read_mostly tlb_lli_2m[NR_INFO]; 730e0ba94f1SAlex Shi u16 __read_mostly tlb_lli_4m[NR_INFO]; 731e0ba94f1SAlex Shi u16 __read_mostly tlb_lld_4k[NR_INFO]; 732e0ba94f1SAlex Shi u16 __read_mostly tlb_lld_2m[NR_INFO]; 733e0ba94f1SAlex Shi u16 __read_mostly tlb_lld_4m[NR_INFO]; 734dd360393SKirill A. Shutemov u16 __read_mostly tlb_lld_1g[NR_INFO]; 735e0ba94f1SAlex Shi 736f94fe119SSteven Honeyman static void cpu_detect_tlb(struct cpuinfo_x86 *c) 737e0ba94f1SAlex Shi { 738e0ba94f1SAlex Shi if (this_cpu->c_detect_tlb) 739e0ba94f1SAlex Shi this_cpu->c_detect_tlb(c); 740e0ba94f1SAlex Shi 741f94fe119SSteven Honeyman pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n", 742e0ba94f1SAlex Shi tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES], 743f94fe119SSteven Honeyman tlb_lli_4m[ENTRIES]); 744f94fe119SSteven Honeyman 745f94fe119SSteven Honeyman pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n", 746f94fe119SSteven Honeyman tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES], 747f94fe119SSteven Honeyman tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]); 748e0ba94f1SAlex Shi } 749e0ba94f1SAlex Shi 750545401f4SThomas Gleixner int detect_ht_early(struct cpuinfo_x86 *c) 7519d31d35bSYinghai Lu { 752c8e56d20SBorislav Petkov #ifdef CONFIG_SMP 7539d31d35bSYinghai Lu u32 eax, ebx, ecx, edx; 7549d31d35bSYinghai Lu 7550a488a53SYinghai Lu if (!cpu_has(c, X86_FEATURE_HT)) 756545401f4SThomas Gleixner return -1; 7579d31d35bSYinghai Lu 7580a488a53SYinghai Lu if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) 759545401f4SThomas Gleixner return -1; 7600a488a53SYinghai Lu 7611cd78776SYinghai Lu if (cpu_has(c, X86_FEATURE_XTOPOLOGY)) 762545401f4SThomas Gleixner return -1; 7631cd78776SYinghai Lu 7640a488a53SYinghai Lu cpuid(1, &eax, &ebx, &ecx, &edx); 7650a488a53SYinghai Lu 7669d31d35bSYinghai Lu smp_num_siblings = (ebx & 0xff0000) >> 16; 767545401f4SThomas Gleixner if (smp_num_siblings == 1) 7681b74dde7SChen Yucong pr_info_once("CPU0: Hyper-Threading is disabled\n"); 769545401f4SThomas Gleixner #endif 770545401f4SThomas Gleixner return 0; 7710f3fa48aSIngo Molnar } 7720f3fa48aSIngo Molnar 773545401f4SThomas Gleixner void detect_ht(struct cpuinfo_x86 *c) 774545401f4SThomas Gleixner { 775545401f4SThomas Gleixner #ifdef CONFIG_SMP 776545401f4SThomas Gleixner int index_msb, core_bits; 777545401f4SThomas Gleixner 778545401f4SThomas Gleixner if (detect_ht_early(c) < 0) 779545401f4SThomas Gleixner return; 7809d31d35bSYinghai Lu 7819d31d35bSYinghai Lu index_msb = get_count_order(smp_num_siblings); 782cb8cc442SIngo Molnar c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); 7839d31d35bSYinghai Lu 7849d31d35bSYinghai Lu smp_num_siblings = smp_num_siblings / c->x86_max_cores; 7859d31d35bSYinghai Lu 7869d31d35bSYinghai Lu index_msb = get_count_order(smp_num_siblings); 7879d31d35bSYinghai Lu 7889d31d35bSYinghai Lu core_bits = get_count_order(c->x86_max_cores); 7899d31d35bSYinghai Lu 790cb8cc442SIngo Molnar c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & 7911cd78776SYinghai Lu ((1 << core_bits) - 1); 7929d31d35bSYinghai Lu #endif 79397e4db7cSYinghai Lu } 794f7627e25SThomas Gleixner 795148f9bb8SPaul Gortmaker static void get_cpu_vendor(struct cpuinfo_x86 *c) 796f7627e25SThomas Gleixner { 797f7627e25SThomas Gleixner char *v = c->x86_vendor_id; 7980f3fa48aSIngo Molnar int i; 799f7627e25SThomas Gleixner 800f7627e25SThomas Gleixner for (i = 0; i < X86_VENDOR_NUM; i++) { 80110a434fcSYinghai Lu if (!cpu_devs[i]) 80210a434fcSYinghai Lu break; 80310a434fcSYinghai Lu 804f7627e25SThomas Gleixner if (!strcmp(v, cpu_devs[i]->c_ident[0]) || 805f7627e25SThomas Gleixner (cpu_devs[i]->c_ident[1] && 806f7627e25SThomas Gleixner !strcmp(v, cpu_devs[i]->c_ident[1]))) { 8070f3fa48aSIngo Molnar 808f7627e25SThomas Gleixner this_cpu = cpu_devs[i]; 80910a434fcSYinghai Lu c->x86_vendor = this_cpu->c_x86_vendor; 810f7627e25SThomas Gleixner return; 811f7627e25SThomas Gleixner } 812f7627e25SThomas Gleixner } 81310a434fcSYinghai Lu 8141b74dde7SChen Yucong pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \ 815a9c56953SMinchan Kim "CPU: Your system may be unstable.\n", v); 81610a434fcSYinghai Lu 817f7627e25SThomas Gleixner c->x86_vendor = X86_VENDOR_UNKNOWN; 818f7627e25SThomas Gleixner this_cpu = &default_cpu; 819f7627e25SThomas Gleixner } 820f7627e25SThomas Gleixner 821148f9bb8SPaul Gortmaker void cpu_detect(struct cpuinfo_x86 *c) 822f7627e25SThomas Gleixner { 823f7627e25SThomas Gleixner /* Get vendor name */ 8244a148513SHarvey Harrison cpuid(0x00000000, (unsigned int *)&c->cpuid_level, 8254a148513SHarvey Harrison (unsigned int *)&c->x86_vendor_id[0], 8264a148513SHarvey Harrison (unsigned int *)&c->x86_vendor_id[8], 8274a148513SHarvey Harrison (unsigned int *)&c->x86_vendor_id[4]); 828f7627e25SThomas Gleixner 829f7627e25SThomas Gleixner c->x86 = 4; 8309d31d35bSYinghai Lu /* Intel-defined flags: level 0x00000001 */ 831f7627e25SThomas Gleixner if (c->cpuid_level >= 0x00000001) { 832f7627e25SThomas Gleixner u32 junk, tfms, cap0, misc; 8330f3fa48aSIngo Molnar 834f7627e25SThomas Gleixner cpuid(0x00000001, &tfms, &misc, &junk, &cap0); 83599f925ceSBorislav Petkov c->x86 = x86_family(tfms); 83699f925ceSBorislav Petkov c->x86_model = x86_model(tfms); 837b399151cSJia Zhang c->x86_stepping = x86_stepping(tfms); 8380f3fa48aSIngo Molnar 839d4387bd3SHuang, Ying if (cap0 & (1<<19)) { 840d4387bd3SHuang, Ying c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; 8419d31d35bSYinghai Lu c->x86_cache_alignment = c->x86_clflush_size; 842d4387bd3SHuang, Ying } 843f7627e25SThomas Gleixner } 844f7627e25SThomas Gleixner } 8453da99c97SYinghai Lu 8468bf1ebcaSAndy Lutomirski static void apply_forced_caps(struct cpuinfo_x86 *c) 8478bf1ebcaSAndy Lutomirski { 8488bf1ebcaSAndy Lutomirski int i; 8498bf1ebcaSAndy Lutomirski 8506cbd2171SThomas Gleixner for (i = 0; i < NCAPINTS + NBUGINTS; i++) { 8518bf1ebcaSAndy Lutomirski c->x86_capability[i] &= ~cpu_caps_cleared[i]; 8528bf1ebcaSAndy Lutomirski c->x86_capability[i] |= cpu_caps_set[i]; 8538bf1ebcaSAndy Lutomirski } 8548bf1ebcaSAndy Lutomirski } 8558bf1ebcaSAndy Lutomirski 8567fcae111SDavid Woodhouse static void init_speculation_control(struct cpuinfo_x86 *c) 8577fcae111SDavid Woodhouse { 8587fcae111SDavid Woodhouse /* 8597fcae111SDavid Woodhouse * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support, 8607fcae111SDavid Woodhouse * and they also have a different bit for STIBP support. Also, 8617fcae111SDavid Woodhouse * a hypervisor might have set the individual AMD bits even on 8627fcae111SDavid Woodhouse * Intel CPUs, for finer-grained selection of what's available. 8637fcae111SDavid Woodhouse */ 8647fcae111SDavid Woodhouse if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) { 8657fcae111SDavid Woodhouse set_cpu_cap(c, X86_FEATURE_IBRS); 8667fcae111SDavid Woodhouse set_cpu_cap(c, X86_FEATURE_IBPB); 8677eb8956aSThomas Gleixner set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); 8687fcae111SDavid Woodhouse } 869e7c587daSBorislav Petkov 8707fcae111SDavid Woodhouse if (cpu_has(c, X86_FEATURE_INTEL_STIBP)) 8717fcae111SDavid Woodhouse set_cpu_cap(c, X86_FEATURE_STIBP); 872e7c587daSBorislav Petkov 873bc226f07STom Lendacky if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) || 874bc226f07STom Lendacky cpu_has(c, X86_FEATURE_VIRT_SSBD)) 87552817587SThomas Gleixner set_cpu_cap(c, X86_FEATURE_SSBD); 87652817587SThomas Gleixner 8777eb8956aSThomas Gleixner if (cpu_has(c, X86_FEATURE_AMD_IBRS)) { 878e7c587daSBorislav Petkov set_cpu_cap(c, X86_FEATURE_IBRS); 8797eb8956aSThomas Gleixner set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); 8807eb8956aSThomas Gleixner } 881e7c587daSBorislav Petkov 882e7c587daSBorislav Petkov if (cpu_has(c, X86_FEATURE_AMD_IBPB)) 883e7c587daSBorislav Petkov set_cpu_cap(c, X86_FEATURE_IBPB); 884e7c587daSBorislav Petkov 8857eb8956aSThomas Gleixner if (cpu_has(c, X86_FEATURE_AMD_STIBP)) { 886e7c587daSBorislav Petkov set_cpu_cap(c, X86_FEATURE_STIBP); 8877eb8956aSThomas Gleixner set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); 8887eb8956aSThomas Gleixner } 8896ac2f49eSKonrad Rzeszutek Wilk 8906ac2f49eSKonrad Rzeszutek Wilk if (cpu_has(c, X86_FEATURE_AMD_SSBD)) { 8916ac2f49eSKonrad Rzeszutek Wilk set_cpu_cap(c, X86_FEATURE_SSBD); 8926ac2f49eSKonrad Rzeszutek Wilk set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); 8936ac2f49eSKonrad Rzeszutek Wilk clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD); 8946ac2f49eSKonrad Rzeszutek Wilk } 8957fcae111SDavid Woodhouse } 8967fcae111SDavid Woodhouse 897148f9bb8SPaul Gortmaker void get_cpu_cap(struct cpuinfo_x86 *c) 898093af8d7SYinghai Lu { 89939c06df4SBorislav Petkov u32 eax, ebx, ecx, edx; 900093af8d7SYinghai Lu 901093af8d7SYinghai Lu /* Intel-defined flags: level 0x00000001 */ 902093af8d7SYinghai Lu if (c->cpuid_level >= 0x00000001) { 90339c06df4SBorislav Petkov cpuid(0x00000001, &eax, &ebx, &ecx, &edx); 9040f3fa48aSIngo Molnar 90539c06df4SBorislav Petkov c->x86_capability[CPUID_1_ECX] = ecx; 90639c06df4SBorislav Petkov c->x86_capability[CPUID_1_EDX] = edx; 907093af8d7SYinghai Lu } 908093af8d7SYinghai Lu 9093df8d920SAndy Lutomirski /* Thermal and Power Management Leaf: level 0x00000006 (eax) */ 9103df8d920SAndy Lutomirski if (c->cpuid_level >= 0x00000006) 9113df8d920SAndy Lutomirski c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006); 9123df8d920SAndy Lutomirski 913bdc802dcSH. Peter Anvin /* Additional Intel-defined flags: level 0x00000007 */ 914bdc802dcSH. Peter Anvin if (c->cpuid_level >= 0x00000007) { 915bdc802dcSH. Peter Anvin cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); 91639c06df4SBorislav Petkov c->x86_capability[CPUID_7_0_EBX] = ebx; 917dfb4a70fSDave Hansen c->x86_capability[CPUID_7_ECX] = ecx; 91895ca0ee8SDavid Woodhouse c->x86_capability[CPUID_7_EDX] = edx; 919b302e4b1SFenghua Yu 920b302e4b1SFenghua Yu /* Check valid sub-leaf index before accessing it */ 921b302e4b1SFenghua Yu if (eax >= 1) { 922b302e4b1SFenghua Yu cpuid_count(0x00000007, 1, &eax, &ebx, &ecx, &edx); 923b302e4b1SFenghua Yu c->x86_capability[CPUID_7_1_EAX] = eax; 924b302e4b1SFenghua Yu } 925bdc802dcSH. Peter Anvin } 926bdc802dcSH. Peter Anvin 9276229ad27SFenghua Yu /* Extended state features: level 0x0000000d */ 9286229ad27SFenghua Yu if (c->cpuid_level >= 0x0000000d) { 9296229ad27SFenghua Yu cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx); 9306229ad27SFenghua Yu 93139c06df4SBorislav Petkov c->x86_capability[CPUID_D_1_EAX] = eax; 9326229ad27SFenghua Yu } 9336229ad27SFenghua Yu 934093af8d7SYinghai Lu /* AMD-defined flags: level 0x80000001 */ 93539c06df4SBorislav Petkov eax = cpuid_eax(0x80000000); 93639c06df4SBorislav Petkov c->extended_cpuid_level = eax; 9370f3fa48aSIngo Molnar 93839c06df4SBorislav Petkov if ((eax & 0xffff0000) == 0x80000000) { 93939c06df4SBorislav Petkov if (eax >= 0x80000001) { 94039c06df4SBorislav Petkov cpuid(0x80000001, &eax, &ebx, &ecx, &edx); 94139c06df4SBorislav Petkov 94239c06df4SBorislav Petkov c->x86_capability[CPUID_8000_0001_ECX] = ecx; 94339c06df4SBorislav Petkov c->x86_capability[CPUID_8000_0001_EDX] = edx; 944093af8d7SYinghai Lu } 945093af8d7SYinghai Lu } 946093af8d7SYinghai Lu 94771faad43SYazen Ghannam if (c->extended_cpuid_level >= 0x80000007) { 94871faad43SYazen Ghannam cpuid(0x80000007, &eax, &ebx, &ecx, &edx); 94971faad43SYazen Ghannam 95071faad43SYazen Ghannam c->x86_capability[CPUID_8000_0007_EBX] = ebx; 95171faad43SYazen Ghannam c->x86_power = edx; 95271faad43SYazen Ghannam } 95371faad43SYazen Ghannam 954c65732e4SThomas Gleixner if (c->extended_cpuid_level >= 0x80000008) { 955c65732e4SThomas Gleixner cpuid(0x80000008, &eax, &ebx, &ecx, &edx); 956c65732e4SThomas Gleixner c->x86_capability[CPUID_8000_0008_EBX] = ebx; 957c65732e4SThomas Gleixner } 958c65732e4SThomas Gleixner 9592ccd71f1SBorislav Petkov if (c->extended_cpuid_level >= 0x8000000a) 96039c06df4SBorislav Petkov c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a); 9612ccd71f1SBorislav Petkov 9621dedefd1SJacob Pan init_scattered_cpuid_features(c); 9637fcae111SDavid Woodhouse init_speculation_control(c); 96460d34501SAndy Lutomirski 96560d34501SAndy Lutomirski /* 96660d34501SAndy Lutomirski * Clear/Set all flags overridden by options, after probe. 96760d34501SAndy Lutomirski * This needs to happen each time we re-probe, which may happen 96860d34501SAndy Lutomirski * several times during CPU initialization. 96960d34501SAndy Lutomirski */ 97060d34501SAndy Lutomirski apply_forced_caps(c); 971093af8d7SYinghai Lu } 972093af8d7SYinghai Lu 973405c018aSM. Vefa Bicakci void get_cpu_address_sizes(struct cpuinfo_x86 *c) 974d94a155cSKirill A. Shutemov { 975d94a155cSKirill A. Shutemov u32 eax, ebx, ecx, edx; 976d94a155cSKirill A. Shutemov 977d94a155cSKirill A. Shutemov if (c->extended_cpuid_level >= 0x80000008) { 978d94a155cSKirill A. Shutemov cpuid(0x80000008, &eax, &ebx, &ecx, &edx); 979d94a155cSKirill A. Shutemov 980d94a155cSKirill A. Shutemov c->x86_virt_bits = (eax >> 8) & 0xff; 981d94a155cSKirill A. Shutemov c->x86_phys_bits = eax & 0xff; 982d94a155cSKirill A. Shutemov } 983d94a155cSKirill A. Shutemov #ifdef CONFIG_X86_32 984d94a155cSKirill A. Shutemov else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) 985d94a155cSKirill A. Shutemov c->x86_phys_bits = 36; 986d94a155cSKirill A. Shutemov #endif 987cc51e542SAndi Kleen c->x86_cache_bits = c->x86_phys_bits; 988d94a155cSKirill A. Shutemov } 989d94a155cSKirill A. Shutemov 990148f9bb8SPaul Gortmaker static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) 991aef93c8bSYinghai Lu { 992aef93c8bSYinghai Lu #ifdef CONFIG_X86_32 993aef93c8bSYinghai Lu int i; 994aef93c8bSYinghai Lu 995aef93c8bSYinghai Lu /* 996aef93c8bSYinghai Lu * First of all, decide if this is a 486 or higher 997aef93c8bSYinghai Lu * It's a 486 if we can modify the AC flag 998aef93c8bSYinghai Lu */ 999aef93c8bSYinghai Lu if (flag_is_changeable_p(X86_EFLAGS_AC)) 1000aef93c8bSYinghai Lu c->x86 = 4; 1001aef93c8bSYinghai Lu else 1002aef93c8bSYinghai Lu c->x86 = 3; 1003aef93c8bSYinghai Lu 1004aef93c8bSYinghai Lu for (i = 0; i < X86_VENDOR_NUM; i++) 1005aef93c8bSYinghai Lu if (cpu_devs[i] && cpu_devs[i]->c_identify) { 1006aef93c8bSYinghai Lu c->x86_vendor_id[0] = 0; 1007aef93c8bSYinghai Lu cpu_devs[i]->c_identify(c); 1008aef93c8bSYinghai Lu if (c->x86_vendor_id[0]) { 1009aef93c8bSYinghai Lu get_cpu_vendor(c); 1010aef93c8bSYinghai Lu break; 1011aef93c8bSYinghai Lu } 1012aef93c8bSYinghai Lu } 1013aef93c8bSYinghai Lu #endif 1014093af8d7SYinghai Lu } 1015f7627e25SThomas Gleixner 101636ad3513SThomas Gleixner #define NO_SPECULATION BIT(0) 101736ad3513SThomas Gleixner #define NO_MELTDOWN BIT(1) 101836ad3513SThomas Gleixner #define NO_SSB BIT(2) 101936ad3513SThomas Gleixner #define NO_L1TF BIT(3) 1020ed5194c2SAndi Kleen #define NO_MDS BIT(4) 1021e261f209SThomas Gleixner #define MSBDS_ONLY BIT(5) 1022f36cf386SThomas Gleixner #define NO_SWAPGS BIT(6) 1023db4d30fbSVineela Tummalapalli #define NO_ITLB_MULTIHIT BIT(7) 10241e41a766STony W Wang-oc #define NO_SPECTRE_V2 BIT(8) 102536ad3513SThomas Gleixner 1026f6d502fcSThomas Gleixner #define VULNWL(vendor, family, model, whitelist) \ 1027f6d502fcSThomas Gleixner X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist) 102836ad3513SThomas Gleixner 102936ad3513SThomas Gleixner #define VULNWL_INTEL(model, whitelist) \ 103036ad3513SThomas Gleixner VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist) 103136ad3513SThomas Gleixner 103236ad3513SThomas Gleixner #define VULNWL_AMD(family, whitelist) \ 103336ad3513SThomas Gleixner VULNWL(AMD, family, X86_MODEL_ANY, whitelist) 103436ad3513SThomas Gleixner 103536ad3513SThomas Gleixner #define VULNWL_HYGON(family, whitelist) \ 103636ad3513SThomas Gleixner VULNWL(HYGON, family, X86_MODEL_ANY, whitelist) 103736ad3513SThomas Gleixner 103836ad3513SThomas Gleixner static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { 103936ad3513SThomas Gleixner VULNWL(ANY, 4, X86_MODEL_ANY, NO_SPECULATION), 104036ad3513SThomas Gleixner VULNWL(CENTAUR, 5, X86_MODEL_ANY, NO_SPECULATION), 104136ad3513SThomas Gleixner VULNWL(INTEL, 5, X86_MODEL_ANY, NO_SPECULATION), 104236ad3513SThomas Gleixner VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION), 104336ad3513SThomas Gleixner 1044ed5194c2SAndi Kleen /* Intel Family 6 */ 1045db4d30fbSVineela Tummalapalli VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT), 1046db4d30fbSVineela Tummalapalli VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT), 1047db4d30fbSVineela Tummalapalli VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT), 1048db4d30fbSVineela Tummalapalli VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION | NO_ITLB_MULTIHIT), 1049db4d30fbSVineela Tummalapalli VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT), 105036ad3513SThomas Gleixner 1051db4d30fbSVineela Tummalapalli VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), 1052db4d30fbSVineela Tummalapalli VULNWL_INTEL(ATOM_SILVERMONT_D, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), 1053db4d30fbSVineela Tummalapalli VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), 1054db4d30fbSVineela Tummalapalli VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), 1055db4d30fbSVineela Tummalapalli VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), 1056db4d30fbSVineela Tummalapalli VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), 105736ad3513SThomas Gleixner 105836ad3513SThomas Gleixner VULNWL_INTEL(CORE_YONAH, NO_SSB), 105936ad3513SThomas Gleixner 1060db4d30fbSVineela Tummalapalli VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), 1061db4d30fbSVineela Tummalapalli VULNWL_INTEL(ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), 106236ad3513SThomas Gleixner 1063db4d30fbSVineela Tummalapalli VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), 1064db4d30fbSVineela Tummalapalli VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), 1065db4d30fbSVineela Tummalapalli VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), 1066f36cf386SThomas Gleixner 1067f36cf386SThomas Gleixner /* 1068f36cf386SThomas Gleixner * Technically, swapgs isn't serializing on AMD (despite it previously 1069f36cf386SThomas Gleixner * being documented as such in the APM). But according to AMD, %gs is 1070f36cf386SThomas Gleixner * updated non-speculatively, and the issuing of %gs-relative memory 1071f36cf386SThomas Gleixner * operands will be blocked until the %gs update completes, which is 1072f36cf386SThomas Gleixner * good enough for our purposes. 1073f36cf386SThomas Gleixner */ 1074ed5194c2SAndi Kleen 1075cad14885SPawan Gupta VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT), 1076cad14885SPawan Gupta 1077ed5194c2SAndi Kleen /* AMD Family 0xf - 0x12 */ 1078db4d30fbSVineela Tummalapalli VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), 1079db4d30fbSVineela Tummalapalli VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), 1080db4d30fbSVineela Tummalapalli VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), 1081db4d30fbSVineela Tummalapalli VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), 108236ad3513SThomas Gleixner 108336ad3513SThomas Gleixner /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */ 1084db4d30fbSVineela Tummalapalli VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), 1085db4d30fbSVineela Tummalapalli VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), 10861e41a766STony W Wang-oc 10871e41a766STony W Wang-oc /* Zhaoxin Family 7 */ 1088a84de2faSTony W Wang-oc VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS), 1089a84de2faSTony W Wang-oc VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS), 1090fec9434aSDavid Woodhouse {} 1091fec9434aSDavid Woodhouse }; 1092fec9434aSDavid Woodhouse 10937e5b3c26SMark Gross #define VULNBL_INTEL_STEPPINGS(model, steppings, issues) \ 10947e5b3c26SMark Gross X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, \ 10957e5b3c26SMark Gross INTEL_FAM6_##model, steppings, \ 10967e5b3c26SMark Gross X86_FEATURE_ANY, issues) 10977e5b3c26SMark Gross 10987e5b3c26SMark Gross #define SRBDS BIT(0) 10997e5b3c26SMark Gross 11007e5b3c26SMark Gross static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { 11017e5b3c26SMark Gross VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), 11027e5b3c26SMark Gross VULNBL_INTEL_STEPPINGS(HASWELL, X86_STEPPING_ANY, SRBDS), 11037e5b3c26SMark Gross VULNBL_INTEL_STEPPINGS(HASWELL_L, X86_STEPPING_ANY, SRBDS), 11047e5b3c26SMark Gross VULNBL_INTEL_STEPPINGS(HASWELL_G, X86_STEPPING_ANY, SRBDS), 11057e5b3c26SMark Gross VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS), 11067e5b3c26SMark Gross VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS), 11077e5b3c26SMark Gross VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, SRBDS), 11087e5b3c26SMark Gross VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, SRBDS), 11097e5b3c26SMark Gross VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x0, 0xC), SRBDS), 11107e5b3c26SMark Gross VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x0, 0xD), SRBDS), 11117e5b3c26SMark Gross {} 11127e5b3c26SMark Gross }; 11137e5b3c26SMark Gross 111493920f61SMark Gross static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which) 111536ad3513SThomas Gleixner { 111693920f61SMark Gross const struct x86_cpu_id *m = x86_match_cpu(table); 1117fec9434aSDavid Woodhouse 111836ad3513SThomas Gleixner return m && !!(m->driver_data & which); 111936ad3513SThomas Gleixner } 112017dbca11SAndi Kleen 1121286836a7SPawan Gupta u64 x86_read_arch_cap_msr(void) 1122fec9434aSDavid Woodhouse { 1123fec9434aSDavid Woodhouse u64 ia32_cap = 0; 1124fec9434aSDavid Woodhouse 1125286836a7SPawan Gupta if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) 1126286836a7SPawan Gupta rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); 1127286836a7SPawan Gupta 1128286836a7SPawan Gupta return ia32_cap; 1129286836a7SPawan Gupta } 1130286836a7SPawan Gupta 1131286836a7SPawan Gupta static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) 1132286836a7SPawan Gupta { 1133286836a7SPawan Gupta u64 ia32_cap = x86_read_arch_cap_msr(); 1134286836a7SPawan Gupta 1135db4d30fbSVineela Tummalapalli /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */ 113693920f61SMark Gross if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) && 113793920f61SMark Gross !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO)) 1138db4d30fbSVineela Tummalapalli setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT); 1139db4d30fbSVineela Tummalapalli 114093920f61SMark Gross if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION)) 11418ecc4979SDominik Brodowski return; 11428ecc4979SDominik Brodowski 11438ecc4979SDominik Brodowski setup_force_cpu_bug(X86_BUG_SPECTRE_V1); 11441e41a766STony W Wang-oc 114593920f61SMark Gross if (!cpu_matches(cpu_vuln_whitelist, NO_SPECTRE_V2)) 11468ecc4979SDominik Brodowski setup_force_cpu_bug(X86_BUG_SPECTRE_V2); 11478ecc4979SDominik Brodowski 114893920f61SMark Gross if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) && 114993920f61SMark Gross !(ia32_cap & ARCH_CAP_SSB_NO) && 115024809860SKonrad Rzeszutek Wilk !cpu_has(c, X86_FEATURE_AMD_SSB_NO)) 1151c456442cSKonrad Rzeszutek Wilk setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); 1152c456442cSKonrad Rzeszutek Wilk 1153706d5168SSai Praneeth if (ia32_cap & ARCH_CAP_IBRS_ALL) 1154706d5168SSai Praneeth setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED); 1155706d5168SSai Praneeth 115693920f61SMark Gross if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) && 115793920f61SMark Gross !(ia32_cap & ARCH_CAP_MDS_NO)) { 1158ed5194c2SAndi Kleen setup_force_cpu_bug(X86_BUG_MDS); 115993920f61SMark Gross if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY)) 1160e261f209SThomas Gleixner setup_force_cpu_bug(X86_BUG_MSBDS_ONLY); 1161e261f209SThomas Gleixner } 1162ed5194c2SAndi Kleen 116393920f61SMark Gross if (!cpu_matches(cpu_vuln_whitelist, NO_SWAPGS)) 1164f36cf386SThomas Gleixner setup_force_cpu_bug(X86_BUG_SWAPGS); 1165f36cf386SThomas Gleixner 11661b42f017SPawan Gupta /* 11671b42f017SPawan Gupta * When the CPU is not mitigated for TAA (TAA_NO=0) set TAA bug when: 11681b42f017SPawan Gupta * - TSX is supported or 11691b42f017SPawan Gupta * - TSX_CTRL is present 11701b42f017SPawan Gupta * 11711b42f017SPawan Gupta * TSX_CTRL check is needed for cases when TSX could be disabled before 11721b42f017SPawan Gupta * the kernel boot e.g. kexec. 11731b42f017SPawan Gupta * TSX_CTRL check alone is not sufficient for cases when the microcode 11741b42f017SPawan Gupta * update is not present or running as guest that don't get TSX_CTRL. 11751b42f017SPawan Gupta */ 11761b42f017SPawan Gupta if (!(ia32_cap & ARCH_CAP_TAA_NO) && 11771b42f017SPawan Gupta (cpu_has(c, X86_FEATURE_RTM) || 11781b42f017SPawan Gupta (ia32_cap & ARCH_CAP_TSX_CTRL_MSR))) 11791b42f017SPawan Gupta setup_force_cpu_bug(X86_BUG_TAA); 11801b42f017SPawan Gupta 11817e5b3c26SMark Gross /* 11827e5b3c26SMark Gross * SRBDS affects CPUs which support RDRAND or RDSEED and are listed 11837e5b3c26SMark Gross * in the vulnerability blacklist. 11847e5b3c26SMark Gross */ 11857e5b3c26SMark Gross if ((cpu_has(c, X86_FEATURE_RDRAND) || 11867e5b3c26SMark Gross cpu_has(c, X86_FEATURE_RDSEED)) && 11877e5b3c26SMark Gross cpu_matches(cpu_vuln_blacklist, SRBDS)) 11887e5b3c26SMark Gross setup_force_cpu_bug(X86_BUG_SRBDS); 11897e5b3c26SMark Gross 119093920f61SMark Gross if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) 11914a28bfe3SKonrad Rzeszutek Wilk return; 1192fec9434aSDavid Woodhouse 1193fec9434aSDavid Woodhouse /* Rogue Data Cache Load? No! */ 1194fec9434aSDavid Woodhouse if (ia32_cap & ARCH_CAP_RDCL_NO) 11954a28bfe3SKonrad Rzeszutek Wilk return; 1196fec9434aSDavid Woodhouse 11974a28bfe3SKonrad Rzeszutek Wilk setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); 119817dbca11SAndi Kleen 119993920f61SMark Gross if (cpu_matches(cpu_vuln_whitelist, NO_L1TF)) 120017dbca11SAndi Kleen return; 120117dbca11SAndi Kleen 120217dbca11SAndi Kleen setup_force_cpu_bug(X86_BUG_L1TF); 1203fec9434aSDavid Woodhouse } 1204fec9434aSDavid Woodhouse 120534048c9eSPaolo Ciarrocchi /* 12068990cac6SPavel Tatashin * The NOPL instruction is supposed to exist on all CPUs of family >= 6; 12078990cac6SPavel Tatashin * unfortunately, that's not true in practice because of early VIA 12088990cac6SPavel Tatashin * chips and (more importantly) broken virtualizers that are not easy 12098990cac6SPavel Tatashin * to detect. In the latter case it doesn't even *fail* reliably, so 12108990cac6SPavel Tatashin * probing for it doesn't even work. Disable it completely on 32-bit 12118990cac6SPavel Tatashin * unless we can find a reliable way to detect all the broken cases. 12128990cac6SPavel Tatashin * Enable it explicitly on 64-bit for non-constant inputs of cpu_has(). 12138990cac6SPavel Tatashin */ 12149b3661cdSBorislav Petkov static void detect_nopl(void) 12158990cac6SPavel Tatashin { 12168990cac6SPavel Tatashin #ifdef CONFIG_X86_32 12179b3661cdSBorislav Petkov setup_clear_cpu_cap(X86_FEATURE_NOPL); 12188990cac6SPavel Tatashin #else 12199b3661cdSBorislav Petkov setup_force_cpu_cap(X86_FEATURE_NOPL); 12208990cac6SPavel Tatashin #endif 12218990cac6SPavel Tatashin } 12228990cac6SPavel Tatashin 12238990cac6SPavel Tatashin /* 122434048c9eSPaolo Ciarrocchi * Do minimum CPU detection early. 122534048c9eSPaolo Ciarrocchi * Fields really needed: vendor, cpuid_level, family, model, mask, 122634048c9eSPaolo Ciarrocchi * cache alignment. 122734048c9eSPaolo Ciarrocchi * The others are not touched to avoid unwanted side effects. 122834048c9eSPaolo Ciarrocchi * 1229a1652bb8SJean Delvare * WARNING: this function is only called on the boot CPU. Don't add code 1230a1652bb8SJean Delvare * here that is supposed to run on all CPUs. 123134048c9eSPaolo Ciarrocchi */ 12323da99c97SYinghai Lu static void __init early_identify_cpu(struct cpuinfo_x86 *c) 1233f7627e25SThomas Gleixner { 12346627d242SYinghai Lu #ifdef CONFIG_X86_64 12356627d242SYinghai Lu c->x86_clflush_size = 64; 123613c6c532SJan Beulich c->x86_phys_bits = 36; 123713c6c532SJan Beulich c->x86_virt_bits = 48; 12386627d242SYinghai Lu #else 1239d4387bd3SHuang, Ying c->x86_clflush_size = 32; 124013c6c532SJan Beulich c->x86_phys_bits = 32; 124113c6c532SJan Beulich c->x86_virt_bits = 32; 12426627d242SYinghai Lu #endif 12430a488a53SYinghai Lu c->x86_cache_alignment = c->x86_clflush_size; 1244f7627e25SThomas Gleixner 12450e96f31eSJordan Borgner memset(&c->x86_capability, 0, sizeof(c->x86_capability)); 12460a488a53SYinghai Lu c->extended_cpuid_level = 0; 12470a488a53SYinghai Lu 12482893cc8fSMatthew Whitehead if (!have_cpuid_p()) 12492893cc8fSMatthew Whitehead identify_cpu_without_cpuid(c); 12502893cc8fSMatthew Whitehead 1251aef93c8bSYinghai Lu /* cyrix could have cpuid enabled via c_identify()*/ 125205fb3c19SAndy Lutomirski if (have_cpuid_p()) { 1253f7627e25SThomas Gleixner cpu_detect(c); 12543da99c97SYinghai Lu get_cpu_vendor(c); 12553da99c97SYinghai Lu get_cpu_cap(c); 1256d94a155cSKirill A. Shutemov get_cpu_address_sizes(c); 125778d1b296SBorislav Petkov setup_force_cpu_cap(X86_FEATURE_CPUID); 125812cf105cSKrzysztof Helt 125910a434fcSYinghai Lu if (this_cpu->c_early_init) 126010a434fcSYinghai Lu this_cpu->c_early_init(c); 12613da99c97SYinghai Lu 1262f6e9456cSRobert Richter c->cpu_index = 0; 1263b38b0665SH. Peter Anvin filter_cpuid_features(c, false); 1264de5397adSFenghua Yu 1265a110b5ecSBorislav Petkov if (this_cpu->c_bsp_init) 1266a110b5ecSBorislav Petkov this_cpu->c_bsp_init(c); 126778d1b296SBorislav Petkov } else { 126878d1b296SBorislav Petkov setup_clear_cpu_cap(X86_FEATURE_CPUID); 126905fb3c19SAndy Lutomirski } 1270c3b83598SBorislav Petkov 1271c3b83598SBorislav Petkov setup_force_cpu_cap(X86_FEATURE_ALWAYS); 1272a89f040fSThomas Gleixner 12734a28bfe3SKonrad Rzeszutek Wilk cpu_set_bug_bits(c); 127499c6fa25SDavid Woodhouse 12756650cdd9SPeter Zijlstra (Intel) cpu_set_core_cap_bits(c); 12766650cdd9SPeter Zijlstra (Intel) 1277db52ef74SIngo Molnar fpu__init_system(c); 1278b8b7abaeSAndy Lutomirski 1279b8b7abaeSAndy Lutomirski #ifdef CONFIG_X86_32 1280b8b7abaeSAndy Lutomirski /* 1281b8b7abaeSAndy Lutomirski * Regardless of whether PCID is enumerated, the SDM says 1282b8b7abaeSAndy Lutomirski * that it can't be enabled in 32-bit mode. 1283b8b7abaeSAndy Lutomirski */ 1284b8b7abaeSAndy Lutomirski setup_clear_cpu_cap(X86_FEATURE_PCID); 1285b8b7abaeSAndy Lutomirski #endif 1286372fddf7SKirill A. Shutemov 1287372fddf7SKirill A. Shutemov /* 1288372fddf7SKirill A. Shutemov * Later in the boot process pgtable_l5_enabled() relies on 1289372fddf7SKirill A. Shutemov * cpu_feature_enabled(X86_FEATURE_LA57). If 5-level paging is not 1290372fddf7SKirill A. Shutemov * enabled by this point we need to clear the feature bit to avoid 1291372fddf7SKirill A. Shutemov * false-positives at the later stage. 1292372fddf7SKirill A. Shutemov * 1293372fddf7SKirill A. Shutemov * pgtable_l5_enabled() can be false here for several reasons: 1294372fddf7SKirill A. Shutemov * - 5-level paging is disabled compile-time; 1295372fddf7SKirill A. Shutemov * - it's 32-bit kernel; 1296372fddf7SKirill A. Shutemov * - machine doesn't support 5-level paging; 1297372fddf7SKirill A. Shutemov * - user specified 'no5lvl' in kernel command line. 1298372fddf7SKirill A. Shutemov */ 1299372fddf7SKirill A. Shutemov if (!pgtable_l5_enabled()) 1300372fddf7SKirill A. Shutemov setup_clear_cpu_cap(X86_FEATURE_LA57); 13018990cac6SPavel Tatashin 13029b3661cdSBorislav Petkov detect_nopl(); 1303f7627e25SThomas Gleixner } 1304f7627e25SThomas Gleixner 13059d31d35bSYinghai Lu void __init early_cpu_init(void) 13069d31d35bSYinghai Lu { 130702dde8b4SJan Beulich const struct cpu_dev *const *cdev; 130810a434fcSYinghai Lu int count = 0; 13099d31d35bSYinghai Lu 1310ac23f253SJan Beulich #ifdef CONFIG_PROCESSOR_SELECT 13111b74dde7SChen Yucong pr_info("KERNEL supported cpus:\n"); 131231c997caSIngo Molnar #endif 131331c997caSIngo Molnar 131410a434fcSYinghai Lu for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { 131502dde8b4SJan Beulich const struct cpu_dev *cpudev = *cdev; 13169d31d35bSYinghai Lu 131710a434fcSYinghai Lu if (count >= X86_VENDOR_NUM) 131810a434fcSYinghai Lu break; 131910a434fcSYinghai Lu cpu_devs[count] = cpudev; 132010a434fcSYinghai Lu count++; 132110a434fcSYinghai Lu 1322ac23f253SJan Beulich #ifdef CONFIG_PROCESSOR_SELECT 132331c997caSIngo Molnar { 132431c997caSIngo Molnar unsigned int j; 132531c997caSIngo Molnar 132610a434fcSYinghai Lu for (j = 0; j < 2; j++) { 132710a434fcSYinghai Lu if (!cpudev->c_ident[j]) 132810a434fcSYinghai Lu continue; 13291b74dde7SChen Yucong pr_info(" %s %s\n", cpudev->c_vendor, 133010a434fcSYinghai Lu cpudev->c_ident[j]); 133110a434fcSYinghai Lu } 133210a434fcSYinghai Lu } 13330388423dSDave Jones #endif 133431c997caSIngo Molnar } 13359d31d35bSYinghai Lu early_identify_cpu(&boot_cpu_data); 1336f7627e25SThomas Gleixner } 1337f7627e25SThomas Gleixner 13387a5d6704SAndy Lutomirski static void detect_null_seg_behavior(struct cpuinfo_x86 *c) 13397a5d6704SAndy Lutomirski { 13407a5d6704SAndy Lutomirski #ifdef CONFIG_X86_64 134158a5aac5SAndy Lutomirski /* 13427a5d6704SAndy Lutomirski * Empirically, writing zero to a segment selector on AMD does 13437a5d6704SAndy Lutomirski * not clear the base, whereas writing zero to a segment 13447a5d6704SAndy Lutomirski * selector on Intel does clear the base. Intel's behavior 13457a5d6704SAndy Lutomirski * allows slightly faster context switches in the common case 13467a5d6704SAndy Lutomirski * where GS is unused by the prev and next threads. 134758a5aac5SAndy Lutomirski * 13487a5d6704SAndy Lutomirski * Since neither vendor documents this anywhere that I can see, 13497a5d6704SAndy Lutomirski * detect it directly instead of hardcoding the choice by 13507a5d6704SAndy Lutomirski * vendor. 13517a5d6704SAndy Lutomirski * 13527a5d6704SAndy Lutomirski * I've designated AMD's behavior as the "bug" because it's 13537a5d6704SAndy Lutomirski * counterintuitive and less friendly. 135458a5aac5SAndy Lutomirski */ 13557a5d6704SAndy Lutomirski 13567a5d6704SAndy Lutomirski unsigned long old_base, tmp; 13577a5d6704SAndy Lutomirski rdmsrl(MSR_FS_BASE, old_base); 13587a5d6704SAndy Lutomirski wrmsrl(MSR_FS_BASE, 1); 13597a5d6704SAndy Lutomirski loadsegment(fs, 0); 13607a5d6704SAndy Lutomirski rdmsrl(MSR_FS_BASE, tmp); 13617a5d6704SAndy Lutomirski if (tmp != 0) 13627a5d6704SAndy Lutomirski set_cpu_bug(c, X86_BUG_NULL_SEG); 13637a5d6704SAndy Lutomirski wrmsrl(MSR_FS_BASE, old_base); 136458a5aac5SAndy Lutomirski #endif 1365f7627e25SThomas Gleixner } 1366f7627e25SThomas Gleixner 1367148f9bb8SPaul Gortmaker static void generic_identify(struct cpuinfo_x86 *c) 1368f7627e25SThomas Gleixner { 13693da99c97SYinghai Lu c->extended_cpuid_level = 0; 1370f7627e25SThomas Gleixner 1371aef93c8bSYinghai Lu if (!have_cpuid_p()) 1372aef93c8bSYinghai Lu identify_cpu_without_cpuid(c); 1373f7627e25SThomas Gleixner 1374aef93c8bSYinghai Lu /* cyrix could have cpuid enabled via c_identify()*/ 1375a9853dd6SIngo Molnar if (!have_cpuid_p()) 1376aef93c8bSYinghai Lu return; 1377aef93c8bSYinghai Lu 13783da99c97SYinghai Lu cpu_detect(c); 13793da99c97SYinghai Lu 13803da99c97SYinghai Lu get_cpu_vendor(c); 13813da99c97SYinghai Lu 13823da99c97SYinghai Lu get_cpu_cap(c); 13833da99c97SYinghai Lu 1384d94a155cSKirill A. Shutemov get_cpu_address_sizes(c); 1385d94a155cSKirill A. Shutemov 1386f7627e25SThomas Gleixner if (c->cpuid_level >= 0x00000001) { 13873da99c97SYinghai Lu c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; 1388b89d3b3eSYinghai Lu #ifdef CONFIG_X86_32 1389c8e56d20SBorislav Petkov # ifdef CONFIG_SMP 1390cb8cc442SIngo Molnar c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); 1391f7627e25SThomas Gleixner # else 139201aaea1aSYinghai Lu c->apicid = c->initial_apicid; 1393f7627e25SThomas Gleixner # endif 1394b89d3b3eSYinghai Lu #endif 1395b89d3b3eSYinghai Lu c->phys_proc_id = c->initial_apicid; 1396f7627e25SThomas Gleixner } 1397f7627e25SThomas Gleixner 1398f7627e25SThomas Gleixner get_model_name(c); /* Default name */ 1399f7627e25SThomas Gleixner 14007a5d6704SAndy Lutomirski detect_null_seg_behavior(c); 14010230bb03SAndy Lutomirski 14020230bb03SAndy Lutomirski /* 14030230bb03SAndy Lutomirski * ESPFIX is a strange bug. All real CPUs have it. Paravirt 14040230bb03SAndy Lutomirski * systems that run Linux at CPL > 0 may or may not have the 14050230bb03SAndy Lutomirski * issue, but, even if they have the issue, there's absolutely 14060230bb03SAndy Lutomirski * nothing we can do about it because we can't use the real IRET 14070230bb03SAndy Lutomirski * instruction. 14080230bb03SAndy Lutomirski * 14090230bb03SAndy Lutomirski * NB: For the time being, only 32-bit kernels support 14100230bb03SAndy Lutomirski * X86_BUG_ESPFIX as such. 64-bit kernels directly choose 14110230bb03SAndy Lutomirski * whether to apply espfix using paravirt hooks. If any 14120230bb03SAndy Lutomirski * non-paravirt system ever shows up that does *not* have the 14130230bb03SAndy Lutomirski * ESPFIX issue, we can change this. 14140230bb03SAndy Lutomirski */ 14150230bb03SAndy Lutomirski #ifdef CONFIG_X86_32 14169bad5658SJuergen Gross # ifdef CONFIG_PARAVIRT_XXL 14170230bb03SAndy Lutomirski do { 14180230bb03SAndy Lutomirski extern void native_iret(void); 14195c83511bSJuergen Gross if (pv_ops.cpu.iret == native_iret) 14200230bb03SAndy Lutomirski set_cpu_bug(c, X86_BUG_ESPFIX); 14210230bb03SAndy Lutomirski } while (0); 14220230bb03SAndy Lutomirski # else 14230230bb03SAndy Lutomirski set_cpu_bug(c, X86_BUG_ESPFIX); 14240230bb03SAndy Lutomirski # endif 14250230bb03SAndy Lutomirski #endif 1426f7627e25SThomas Gleixner } 1427f7627e25SThomas Gleixner 1428f7627e25SThomas Gleixner /* 14299d85eb91SThomas Gleixner * Validate that ACPI/mptables have the same information about the 14309d85eb91SThomas Gleixner * effective APIC id and update the package map. 1431d49597fdSThomas Gleixner */ 14329d85eb91SThomas Gleixner static void validate_apic_and_package_id(struct cpuinfo_x86 *c) 1433d49597fdSThomas Gleixner { 1434d49597fdSThomas Gleixner #ifdef CONFIG_SMP 14359d85eb91SThomas Gleixner unsigned int apicid, cpu = smp_processor_id(); 1436d49597fdSThomas Gleixner 1437d49597fdSThomas Gleixner apicid = apic->cpu_present_to_apicid(cpu); 1438d49597fdSThomas Gleixner 14399d85eb91SThomas Gleixner if (apicid != c->apicid) { 14409d85eb91SThomas Gleixner pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x APIC: %x\n", 1441d49597fdSThomas Gleixner cpu, apicid, c->initial_apicid); 1442d49597fdSThomas Gleixner } 14439d85eb91SThomas Gleixner BUG_ON(topology_update_package_map(c->phys_proc_id, cpu)); 1444212bf4fdSLen Brown BUG_ON(topology_update_die_map(c->cpu_die_id, cpu)); 1445d49597fdSThomas Gleixner #else 1446d49597fdSThomas Gleixner c->logical_proc_id = 0; 1447d49597fdSThomas Gleixner #endif 1448d49597fdSThomas Gleixner } 1449d49597fdSThomas Gleixner 1450d49597fdSThomas Gleixner /* 1451f7627e25SThomas Gleixner * This does the hard work of actually picking apart the CPU stuff... 1452f7627e25SThomas Gleixner */ 1453148f9bb8SPaul Gortmaker static void identify_cpu(struct cpuinfo_x86 *c) 1454f7627e25SThomas Gleixner { 1455f7627e25SThomas Gleixner int i; 1456f7627e25SThomas Gleixner 1457f7627e25SThomas Gleixner c->loops_per_jiffy = loops_per_jiffy; 145824dbc600SGustavo A. R. Silva c->x86_cache_size = 0; 1459f7627e25SThomas Gleixner c->x86_vendor = X86_VENDOR_UNKNOWN; 1460b399151cSJia Zhang c->x86_model = c->x86_stepping = 0; /* So far unknown... */ 1461f7627e25SThomas Gleixner c->x86_vendor_id[0] = '\0'; /* Unset */ 1462f7627e25SThomas Gleixner c->x86_model_id[0] = '\0'; /* Unset */ 1463f7627e25SThomas Gleixner c->x86_max_cores = 1; 1464102bbe3aSYinghai Lu c->x86_coreid_bits = 0; 146579a8b9aaSBorislav Petkov c->cu_id = 0xff; 146611fdd252SYinghai Lu #ifdef CONFIG_X86_64 1467102bbe3aSYinghai Lu c->x86_clflush_size = 64; 146813c6c532SJan Beulich c->x86_phys_bits = 36; 146913c6c532SJan Beulich c->x86_virt_bits = 48; 1470102bbe3aSYinghai Lu #else 1471102bbe3aSYinghai Lu c->cpuid_level = -1; /* CPUID not detected */ 1472f7627e25SThomas Gleixner c->x86_clflush_size = 32; 147313c6c532SJan Beulich c->x86_phys_bits = 32; 147413c6c532SJan Beulich c->x86_virt_bits = 32; 1475102bbe3aSYinghai Lu #endif 1476102bbe3aSYinghai Lu c->x86_cache_alignment = c->x86_clflush_size; 14770e96f31eSJordan Borgner memset(&c->x86_capability, 0, sizeof(c->x86_capability)); 1478b47ce1feSSean Christopherson #ifdef CONFIG_X86_VMX_FEATURE_NAMES 1479b47ce1feSSean Christopherson memset(&c->vmx_capability, 0, sizeof(c->vmx_capability)); 1480b47ce1feSSean Christopherson #endif 1481f7627e25SThomas Gleixner 1482f7627e25SThomas Gleixner generic_identify(c); 1483f7627e25SThomas Gleixner 14843898534dSAndi Kleen if (this_cpu->c_identify) 1485f7627e25SThomas Gleixner this_cpu->c_identify(c); 1486f7627e25SThomas Gleixner 14876a6256f9SAdam Buchbinder /* Clear/Set all flags overridden by options, after probe */ 14888bf1ebcaSAndy Lutomirski apply_forced_caps(c); 14892759c328SYinghai Lu 1490102bbe3aSYinghai Lu #ifdef CONFIG_X86_64 1491cb8cc442SIngo Molnar c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); 1492102bbe3aSYinghai Lu #endif 1493102bbe3aSYinghai Lu 1494f7627e25SThomas Gleixner /* 1495f7627e25SThomas Gleixner * Vendor-specific initialization. In this section we 1496f7627e25SThomas Gleixner * canonicalize the feature flags, meaning if there are 1497f7627e25SThomas Gleixner * features a certain CPU supports which CPUID doesn't 1498f7627e25SThomas Gleixner * tell us, CPUID claiming incorrect flags, or other bugs, 1499f7627e25SThomas Gleixner * we handle them here. 1500f7627e25SThomas Gleixner * 1501f7627e25SThomas Gleixner * At the end of this section, c->x86_capability better 1502f7627e25SThomas Gleixner * indicate the features this CPU genuinely supports! 1503f7627e25SThomas Gleixner */ 1504f7627e25SThomas Gleixner if (this_cpu->c_init) 1505f7627e25SThomas Gleixner this_cpu->c_init(c); 1506f7627e25SThomas Gleixner 1507f7627e25SThomas Gleixner /* Disable the PN if appropriate */ 1508f7627e25SThomas Gleixner squash_the_stupid_serial_number(c); 1509f7627e25SThomas Gleixner 1510aa35f896SRicardo Neri /* Set up SMEP/SMAP/UMIP */ 1511b2cc2a07SH. Peter Anvin setup_smep(c); 1512b2cc2a07SH. Peter Anvin setup_smap(c); 1513aa35f896SRicardo Neri setup_umip(c); 1514b2cc2a07SH. Peter Anvin 1515dd649bd0SAndy Lutomirski /* Enable FSGSBASE instructions if available. */ 1516742c45c3SAndi Kleen if (cpu_has(c, X86_FEATURE_FSGSBASE)) { 1517dd649bd0SAndy Lutomirski cr4_set_bits(X86_CR4_FSGSBASE); 1518742c45c3SAndi Kleen elf_hwcap2 |= HWCAP2_FSGSBASE; 1519742c45c3SAndi Kleen } 1520dd649bd0SAndy Lutomirski 1521f7627e25SThomas Gleixner /* 15220f3fa48aSIngo Molnar * The vendor-specific functions might have changed features. 15230f3fa48aSIngo Molnar * Now we do "generic changes." 1524f7627e25SThomas Gleixner */ 1525f7627e25SThomas Gleixner 1526b38b0665SH. Peter Anvin /* Filter out anything that depends on CPUID levels we don't have */ 1527b38b0665SH. Peter Anvin filter_cpuid_features(c, true); 1528b38b0665SH. Peter Anvin 1529f7627e25SThomas Gleixner /* If the model name is still unset, do table lookup. */ 1530f7627e25SThomas Gleixner if (!c->x86_model_id[0]) { 153102dde8b4SJan Beulich const char *p; 1532f7627e25SThomas Gleixner p = table_lookup_model(c); 1533f7627e25SThomas Gleixner if (p) 1534f7627e25SThomas Gleixner strcpy(c->x86_model_id, p); 1535f7627e25SThomas Gleixner else 1536f7627e25SThomas Gleixner /* Last resort... */ 1537f7627e25SThomas Gleixner sprintf(c->x86_model_id, "%02x/%02x", 1538f7627e25SThomas Gleixner c->x86, c->x86_model); 1539f7627e25SThomas Gleixner } 1540f7627e25SThomas Gleixner 1541102bbe3aSYinghai Lu #ifdef CONFIG_X86_64 1542102bbe3aSYinghai Lu detect_ht(c); 1543102bbe3aSYinghai Lu #endif 1544102bbe3aSYinghai Lu 154549d859d7SH. Peter Anvin x86_init_rdrand(c); 154606976945SDave Hansen setup_pku(c); 15473e0c3737SYinghai Lu 15483e0c3737SYinghai Lu /* 15496a6256f9SAdam Buchbinder * Clear/Set all flags overridden by options, need do it 15503e0c3737SYinghai Lu * before following smp all cpus cap AND. 15513e0c3737SYinghai Lu */ 15528bf1ebcaSAndy Lutomirski apply_forced_caps(c); 15533e0c3737SYinghai Lu 1554f7627e25SThomas Gleixner /* 1555f7627e25SThomas Gleixner * On SMP, boot_cpu_data holds the common feature set between 1556f7627e25SThomas Gleixner * all CPUs; so make sure that we indicate which features are 1557f7627e25SThomas Gleixner * common between the CPUs. The first time this routine gets 1558f7627e25SThomas Gleixner * executed, c == &boot_cpu_data. 1559f7627e25SThomas Gleixner */ 1560f7627e25SThomas Gleixner if (c != &boot_cpu_data) { 1561f7627e25SThomas Gleixner /* AND the already accumulated flags with these */ 1562f7627e25SThomas Gleixner for (i = 0; i < NCAPINTS; i++) 1563f7627e25SThomas Gleixner boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; 156465fc985bSBorislav Petkov 156565fc985bSBorislav Petkov /* OR, i.e. replicate the bug flags */ 156665fc985bSBorislav Petkov for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++) 156765fc985bSBorislav Petkov c->x86_capability[i] |= boot_cpu_data.x86_capability[i]; 1568f7627e25SThomas Gleixner } 1569f7627e25SThomas Gleixner 1570f7627e25SThomas Gleixner /* Init Machine Check Exception if available. */ 15715e09954aSBorislav Petkov mcheck_cpu_init(c); 157230d432dfSAndi Kleen 157330d432dfSAndi Kleen select_idle_routine(c); 1574102bbe3aSYinghai Lu 1575de2d9445STejun Heo #ifdef CONFIG_NUMA 1576102bbe3aSYinghai Lu numa_add_cpu(smp_processor_id()); 1577102bbe3aSYinghai Lu #endif 1578f7627e25SThomas Gleixner } 1579f7627e25SThomas Gleixner 15808b6c0ab1SIngo Molnar /* 15818b6c0ab1SIngo Molnar * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions 15828b6c0ab1SIngo Molnar * on 32-bit kernels: 15838b6c0ab1SIngo Molnar */ 1584cfda7bb9SAndy Lutomirski #ifdef CONFIG_X86_32 1585cfda7bb9SAndy Lutomirski void enable_sep_cpu(void) 1586cfda7bb9SAndy Lutomirski { 15878b6c0ab1SIngo Molnar struct tss_struct *tss; 15888b6c0ab1SIngo Molnar int cpu; 1589cfda7bb9SAndy Lutomirski 1590b3edfda4SBorislav Petkov if (!boot_cpu_has(X86_FEATURE_SEP)) 1591b3edfda4SBorislav Petkov return; 1592b3edfda4SBorislav Petkov 15938b6c0ab1SIngo Molnar cpu = get_cpu(); 1594c482feefSAndy Lutomirski tss = &per_cpu(cpu_tss_rw, cpu); 15958b6c0ab1SIngo Molnar 15968b6c0ab1SIngo Molnar /* 1597cf9328ccSAndy Lutomirski * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field -- 1598cf9328ccSAndy Lutomirski * see the big comment in struct x86_hw_tss's definition. 15998b6c0ab1SIngo Molnar */ 1600cfda7bb9SAndy Lutomirski 1601cfda7bb9SAndy Lutomirski tss->x86_tss.ss1 = __KERNEL_CS; 16028b6c0ab1SIngo Molnar wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0); 16034fe2d8b1SDave Hansen wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0); 16044c8cd0c5SIngo Molnar wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0); 16058b6c0ab1SIngo Molnar 1606cfda7bb9SAndy Lutomirski put_cpu(); 1607cfda7bb9SAndy Lutomirski } 1608e04d645fSGlauber Costa #endif 1609e04d645fSGlauber Costa 1610f7627e25SThomas Gleixner void __init identify_boot_cpu(void) 1611f7627e25SThomas Gleixner { 1612f7627e25SThomas Gleixner identify_cpu(&boot_cpu_data); 1613102bbe3aSYinghai Lu #ifdef CONFIG_X86_32 1614f7627e25SThomas Gleixner sysenter_setup(); 1615f7627e25SThomas Gleixner enable_sep_cpu(); 1616102bbe3aSYinghai Lu #endif 1617e0ba94f1SAlex Shi cpu_detect_tlb(&boot_cpu_data); 1618873d50d5SKees Cook setup_cr_pinning(); 161995c5824fSPawan Gupta 162095c5824fSPawan Gupta tsx_init(); 1621f7627e25SThomas Gleixner } 1622f7627e25SThomas Gleixner 1623148f9bb8SPaul Gortmaker void identify_secondary_cpu(struct cpuinfo_x86 *c) 1624f7627e25SThomas Gleixner { 1625f7627e25SThomas Gleixner BUG_ON(c == &boot_cpu_data); 1626f7627e25SThomas Gleixner identify_cpu(c); 1627102bbe3aSYinghai Lu #ifdef CONFIG_X86_32 1628f7627e25SThomas Gleixner enable_sep_cpu(); 1629102bbe3aSYinghai Lu #endif 1630f7627e25SThomas Gleixner mtrr_ap_init(); 16319d85eb91SThomas Gleixner validate_apic_and_package_id(c); 163277243971SKonrad Rzeszutek Wilk x86_spec_ctrl_setup_ap(); 16337e5b3c26SMark Gross update_srbds_msr(); 1634f7627e25SThomas Gleixner } 1635f7627e25SThomas Gleixner 1636191679fdSAndi Kleen static __init int setup_noclflush(char *arg) 1637191679fdSAndi Kleen { 1638840d2830SH. Peter Anvin setup_clear_cpu_cap(X86_FEATURE_CLFLUSH); 1639da4aaa7dSH. Peter Anvin setup_clear_cpu_cap(X86_FEATURE_CLFLUSHOPT); 1640191679fdSAndi Kleen return 1; 1641191679fdSAndi Kleen } 1642191679fdSAndi Kleen __setup("noclflush", setup_noclflush); 1643191679fdSAndi Kleen 1644148f9bb8SPaul Gortmaker void print_cpu_info(struct cpuinfo_x86 *c) 1645f7627e25SThomas Gleixner { 164602dde8b4SJan Beulich const char *vendor = NULL; 1647f7627e25SThomas Gleixner 16480f3fa48aSIngo Molnar if (c->x86_vendor < X86_VENDOR_NUM) { 1649f7627e25SThomas Gleixner vendor = this_cpu->c_vendor; 16500f3fa48aSIngo Molnar } else { 16510f3fa48aSIngo Molnar if (c->cpuid_level >= 0) 1652f7627e25SThomas Gleixner vendor = c->x86_vendor_id; 16530f3fa48aSIngo Molnar } 1654f7627e25SThomas Gleixner 1655bd32a8cfSYinghai Lu if (vendor && !strstr(c->x86_model_id, vendor)) 16561b74dde7SChen Yucong pr_cont("%s ", vendor); 1657f7627e25SThomas Gleixner 16589d31d35bSYinghai Lu if (c->x86_model_id[0]) 16591b74dde7SChen Yucong pr_cont("%s", c->x86_model_id); 1660f7627e25SThomas Gleixner else 16611b74dde7SChen Yucong pr_cont("%d86", c->x86); 1662f7627e25SThomas Gleixner 16631b74dde7SChen Yucong pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model); 1664924e101aSBorislav Petkov 1665b399151cSJia Zhang if (c->x86_stepping || c->cpuid_level >= 0) 1666b399151cSJia Zhang pr_cont(", stepping: 0x%x)\n", c->x86_stepping); 1667f7627e25SThomas Gleixner else 16681b74dde7SChen Yucong pr_cont(")\n"); 1669f7627e25SThomas Gleixner } 1670f7627e25SThomas Gleixner 16710c2a3913SAndi Kleen /* 16720c2a3913SAndi Kleen * clearcpuid= was already parsed in fpu__init_parse_early_param. 16730c2a3913SAndi Kleen * But we need to keep a dummy __setup around otherwise it would 16740c2a3913SAndi Kleen * show up as an environment variable for init. 16750c2a3913SAndi Kleen */ 16760c2a3913SAndi Kleen static __init int setup_clearcpuid(char *arg) 1677ac72e788SAndi Kleen { 1678ac72e788SAndi Kleen return 1; 1679ac72e788SAndi Kleen } 16800c2a3913SAndi Kleen __setup("clearcpuid=", setup_clearcpuid); 1681ac72e788SAndi Kleen 1682d5494d4fSYinghai Lu #ifdef CONFIG_X86_64 1683e6401c13SAndy Lutomirski DEFINE_PER_CPU_FIRST(struct fixed_percpu_data, 1684e6401c13SAndy Lutomirski fixed_percpu_data) __aligned(PAGE_SIZE) __visible; 1685e6401c13SAndy Lutomirski EXPORT_PER_CPU_SYMBOL_GPL(fixed_percpu_data); 16860f3fa48aSIngo Molnar 1687bdf977b3STejun Heo /* 1688a7fcf28dSAndy Lutomirski * The following percpu variables are hot. Align current_task to 1689a7fcf28dSAndy Lutomirski * cacheline size such that they fall in the same cacheline. 1690bdf977b3STejun Heo */ 1691bdf977b3STejun Heo DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = 1692bdf977b3STejun Heo &init_task; 1693bdf977b3STejun Heo EXPORT_PER_CPU_SYMBOL(current_task); 1694d5494d4fSYinghai Lu 1695e6401c13SAndy Lutomirski DEFINE_PER_CPU(struct irq_stack *, hardirq_stack_ptr); 1696277d5b40SAndi Kleen DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1; 1697d5494d4fSYinghai Lu 1698c2daa3beSPeter Zijlstra DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; 1699c2daa3beSPeter Zijlstra EXPORT_PER_CPU_SYMBOL(__preempt_count); 1700c2daa3beSPeter Zijlstra 1701d5494d4fSYinghai Lu /* May not be marked __init: used by software suspend */ 1702d5494d4fSYinghai Lu void syscall_init(void) 1703d5494d4fSYinghai Lu { 170431ac34caSBorislav Petkov wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS); 17058d4b0678SThomas Gleixner wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64); 1706d56fe4bfSIngo Molnar 1707d56fe4bfSIngo Molnar #ifdef CONFIG_IA32_EMULATION 170847edb651SAndy Lutomirski wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat); 1709a76c7f46SDenys Vlasenko /* 1710487d1edbSDenys Vlasenko * This only works on Intel CPUs. 1711487d1edbSDenys Vlasenko * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP. 1712487d1edbSDenys Vlasenko * This does not cause SYSENTER to jump to the wrong location, because 1713487d1edbSDenys Vlasenko * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit). 1714a76c7f46SDenys Vlasenko */ 1715a76c7f46SDenys Vlasenko wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); 17168e6b65a1Szhong jiang wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 17178e6b65a1Szhong jiang (unsigned long)(cpu_entry_stack(smp_processor_id()) + 1)); 17184c8cd0c5SIngo Molnar wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat); 1719d56fe4bfSIngo Molnar #else 172047edb651SAndy Lutomirski wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret); 17216b51311cSBorislav Petkov wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG); 1722d56fe4bfSIngo Molnar wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); 1723d56fe4bfSIngo Molnar wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL); 1724d5494d4fSYinghai Lu #endif 1725d5494d4fSYinghai Lu 1726d5494d4fSYinghai Lu /* Flags to clear on syscall */ 1727d5494d4fSYinghai Lu wrmsrl(MSR_SYSCALL_MASK, 172863bcff2aSH. Peter Anvin X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF| 17298c7aa698SAndy Lutomirski X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT); 1730d5494d4fSYinghai Lu } 1731d5494d4fSYinghai Lu 17320f3fa48aSIngo Molnar #else /* CONFIG_X86_64 */ 1733d5494d4fSYinghai Lu 1734bdf977b3STejun Heo DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; 1735bdf977b3STejun Heo EXPORT_PER_CPU_SYMBOL(current_task); 1736c2daa3beSPeter Zijlstra DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; 1737c2daa3beSPeter Zijlstra EXPORT_PER_CPU_SYMBOL(__preempt_count); 1738bdf977b3STejun Heo 1739a7fcf28dSAndy Lutomirski /* 1740a7fcf28dSAndy Lutomirski * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find 1741a7fcf28dSAndy Lutomirski * the top of the kernel stack. Use an extra percpu variable to track the 1742a7fcf28dSAndy Lutomirski * top of the kernel stack directly. 1743a7fcf28dSAndy Lutomirski */ 1744a7fcf28dSAndy Lutomirski DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) = 1745a7fcf28dSAndy Lutomirski (unsigned long)&init_thread_union + THREAD_SIZE; 1746a7fcf28dSAndy Lutomirski EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack); 1747a7fcf28dSAndy Lutomirski 1748050e9baaSLinus Torvalds #ifdef CONFIG_STACKPROTECTOR 174953f82452SJeremy Fitzhardinge DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); 175060a5317fSTejun Heo #endif 175160a5317fSTejun Heo 17520f3fa48aSIngo Molnar #endif /* CONFIG_X86_64 */ 1753f7627e25SThomas Gleixner 1754f7627e25SThomas Gleixner /* 17559766cdbcSJaswinder Singh Rajput * Clear all 6 debug registers: 17569766cdbcSJaswinder Singh Rajput */ 17579766cdbcSJaswinder Singh Rajput static void clear_all_debug_regs(void) 17589766cdbcSJaswinder Singh Rajput { 17599766cdbcSJaswinder Singh Rajput int i; 17609766cdbcSJaswinder Singh Rajput 17619766cdbcSJaswinder Singh Rajput for (i = 0; i < 8; i++) { 17629766cdbcSJaswinder Singh Rajput /* Ignore db4, db5 */ 17639766cdbcSJaswinder Singh Rajput if ((i == 4) || (i == 5)) 17649766cdbcSJaswinder Singh Rajput continue; 17659766cdbcSJaswinder Singh Rajput 17669766cdbcSJaswinder Singh Rajput set_debugreg(0, i); 17679766cdbcSJaswinder Singh Rajput } 17689766cdbcSJaswinder Singh Rajput } 1769f7627e25SThomas Gleixner 17700bb9fef9SJason Wessel #ifdef CONFIG_KGDB 17710bb9fef9SJason Wessel /* 17720bb9fef9SJason Wessel * Restore debug regs if using kgdbwait and you have a kernel debugger 17730bb9fef9SJason Wessel * connection established. 17740bb9fef9SJason Wessel */ 17750bb9fef9SJason Wessel static void dbg_restore_debug_regs(void) 17760bb9fef9SJason Wessel { 17770bb9fef9SJason Wessel if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break)) 17780bb9fef9SJason Wessel arch_kgdb_ops.correct_hw_break(); 17790bb9fef9SJason Wessel } 17800bb9fef9SJason Wessel #else /* ! CONFIG_KGDB */ 17810bb9fef9SJason Wessel #define dbg_restore_debug_regs() 17820bb9fef9SJason Wessel #endif /* ! CONFIG_KGDB */ 17830bb9fef9SJason Wessel 1784ce4b1b16SIgor Mammedov static void wait_for_master_cpu(int cpu) 1785ce4b1b16SIgor Mammedov { 1786ce4b1b16SIgor Mammedov #ifdef CONFIG_SMP 1787ce4b1b16SIgor Mammedov /* 1788ce4b1b16SIgor Mammedov * wait for ACK from master CPU before continuing 1789ce4b1b16SIgor Mammedov * with AP initialization 1790ce4b1b16SIgor Mammedov */ 1791ce4b1b16SIgor Mammedov WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)); 1792ce4b1b16SIgor Mammedov while (!cpumask_test_cpu(cpu, cpu_callout_mask)) 1793ce4b1b16SIgor Mammedov cpu_relax(); 1794ce4b1b16SIgor Mammedov #endif 1795ce4b1b16SIgor Mammedov } 1796ce4b1b16SIgor Mammedov 1797b2e2ba57SChang S. Bae #ifdef CONFIG_X86_64 1798505b7899SThomas Gleixner static inline void setup_getcpu(int cpu) 1799b2e2ba57SChang S. Bae { 180022245bdfSIngo Molnar unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu)); 1801b2e2ba57SChang S. Bae struct desc_struct d = { }; 1802b2e2ba57SChang S. Bae 180367e87d43SBorislav Petkov if (boot_cpu_has(X86_FEATURE_RDTSCP)) 1804b2e2ba57SChang S. Bae write_rdtscp_aux(cpudata); 1805b2e2ba57SChang S. Bae 1806b2e2ba57SChang S. Bae /* Store CPU and node number in limit. */ 1807b2e2ba57SChang S. Bae d.limit0 = cpudata; 1808b2e2ba57SChang S. Bae d.limit1 = cpudata >> 16; 1809b2e2ba57SChang S. Bae 1810b2e2ba57SChang S. Bae d.type = 5; /* RO data, expand down, accessed */ 1811b2e2ba57SChang S. Bae d.dpl = 3; /* Visible to user code */ 1812b2e2ba57SChang S. Bae d.s = 1; /* Not a system segment */ 1813b2e2ba57SChang S. Bae d.p = 1; /* Present */ 1814b2e2ba57SChang S. Bae d.d = 1; /* 32-bit */ 1815b2e2ba57SChang S. Bae 181622245bdfSIngo Molnar write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPUNODE, &d, DESCTYPE_S); 1817b2e2ba57SChang S. Bae } 1818505b7899SThomas Gleixner 1819505b7899SThomas Gleixner static inline void ucode_cpu_init(int cpu) 1820505b7899SThomas Gleixner { 1821505b7899SThomas Gleixner if (cpu) 1822505b7899SThomas Gleixner load_ucode_ap(); 1823505b7899SThomas Gleixner } 1824505b7899SThomas Gleixner 1825505b7899SThomas Gleixner static inline void tss_setup_ist(struct tss_struct *tss) 1826505b7899SThomas Gleixner { 1827505b7899SThomas Gleixner /* Set up the per-CPU TSS IST stacks */ 1828505b7899SThomas Gleixner tss->x86_tss.ist[IST_INDEX_DF] = __this_cpu_ist_top_va(DF); 1829505b7899SThomas Gleixner tss->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI); 1830505b7899SThomas Gleixner tss->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB); 1831505b7899SThomas Gleixner tss->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE); 183202772fb9SJoerg Roedel /* Only mapped when SEV-ES is active */ 183302772fb9SJoerg Roedel tss->x86_tss.ist[IST_INDEX_VC] = __this_cpu_ist_top_va(VC); 1834505b7899SThomas Gleixner } 1835505b7899SThomas Gleixner 1836505b7899SThomas Gleixner #else /* CONFIG_X86_64 */ 1837505b7899SThomas Gleixner 1838505b7899SThomas Gleixner static inline void setup_getcpu(int cpu) { } 1839505b7899SThomas Gleixner 1840505b7899SThomas Gleixner static inline void ucode_cpu_init(int cpu) 1841505b7899SThomas Gleixner { 1842505b7899SThomas Gleixner show_ucode_info_early(); 1843505b7899SThomas Gleixner } 1844505b7899SThomas Gleixner 1845505b7899SThomas Gleixner static inline void tss_setup_ist(struct tss_struct *tss) { } 1846505b7899SThomas Gleixner 1847505b7899SThomas Gleixner #endif /* !CONFIG_X86_64 */ 1848b2e2ba57SChang S. Bae 1849111e7b15SThomas Gleixner static inline void tss_setup_io_bitmap(struct tss_struct *tss) 1850111e7b15SThomas Gleixner { 1851111e7b15SThomas Gleixner tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET_INVALID; 1852111e7b15SThomas Gleixner 1853111e7b15SThomas Gleixner #ifdef CONFIG_X86_IOPL_IOPERM 1854111e7b15SThomas Gleixner tss->io_bitmap.prev_max = 0; 1855111e7b15SThomas Gleixner tss->io_bitmap.prev_sequence = 0; 1856111e7b15SThomas Gleixner memset(tss->io_bitmap.bitmap, 0xff, sizeof(tss->io_bitmap.bitmap)); 1857111e7b15SThomas Gleixner /* 1858111e7b15SThomas Gleixner * Invalidate the extra array entry past the end of the all 1859111e7b15SThomas Gleixner * permission bitmap as required by the hardware. 1860111e7b15SThomas Gleixner */ 1861111e7b15SThomas Gleixner tss->io_bitmap.mapall[IO_BITMAP_LONGS] = ~0UL; 1862111e7b15SThomas Gleixner #endif 1863111e7b15SThomas Gleixner } 1864f7627e25SThomas Gleixner 1865f7627e25SThomas Gleixner /* 1866*520d0308SJoerg Roedel * Setup everything needed to handle exceptions from the IDT, including the IST 1867*520d0308SJoerg Roedel * exceptions which use paranoid_entry(). 1868*520d0308SJoerg Roedel */ 1869*520d0308SJoerg Roedel void cpu_init_exception_handling(void) 1870*520d0308SJoerg Roedel { 1871*520d0308SJoerg Roedel struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw); 1872*520d0308SJoerg Roedel int cpu = raw_smp_processor_id(); 1873*520d0308SJoerg Roedel 1874*520d0308SJoerg Roedel /* paranoid_entry() gets the CPU number from the GDT */ 1875*520d0308SJoerg Roedel setup_getcpu(cpu); 1876*520d0308SJoerg Roedel 1877*520d0308SJoerg Roedel /* IST vectors need TSS to be set up. */ 1878*520d0308SJoerg Roedel tss_setup_ist(tss); 1879*520d0308SJoerg Roedel tss_setup_io_bitmap(tss); 1880*520d0308SJoerg Roedel set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); 1881*520d0308SJoerg Roedel 1882*520d0308SJoerg Roedel load_TR_desc(); 1883*520d0308SJoerg Roedel 1884*520d0308SJoerg Roedel /* Finally load the IDT */ 1885*520d0308SJoerg Roedel load_current_idt(); 1886*520d0308SJoerg Roedel } 1887*520d0308SJoerg Roedel 1888*520d0308SJoerg Roedel /* 1889f7627e25SThomas Gleixner * cpu_init() initializes state that is per-CPU. Some data is already 1890f7627e25SThomas Gleixner * initialized (naturally) in the bootstrap process, such as the GDT 1891f7627e25SThomas Gleixner * and IDT. We reload them nevertheless, this function acts as a 1892f7627e25SThomas Gleixner * 'CPU state barrier', nothing should get across. 1893f7627e25SThomas Gleixner */ 1894148f9bb8SPaul Gortmaker void cpu_init(void) 18951ba76586SYinghai Lu { 1896505b7899SThomas Gleixner struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw); 1897505b7899SThomas Gleixner struct task_struct *cur = current; 1898f6ef7322SThomas Gleixner int cpu = raw_smp_processor_id(); 18991ba76586SYinghai Lu 1900ce4b1b16SIgor Mammedov wait_for_master_cpu(cpu); 1901ce4b1b16SIgor Mammedov 1902505b7899SThomas Gleixner ucode_cpu_init(cpu); 19030f3fa48aSIngo Molnar 1904e7a22c1eSBrian Gerst #ifdef CONFIG_NUMA 190527fd185fSFenghua Yu if (this_cpu_read(numa_node) == 0 && 1906e534c7c5SLee Schermerhorn early_cpu_to_node(cpu) != NUMA_NO_NODE) 1907e534c7c5SLee Schermerhorn set_numa_node(early_cpu_to_node(cpu)); 1908e7a22c1eSBrian Gerst #endif 1909b2e2ba57SChang S. Bae setup_getcpu(cpu); 19101ba76586SYinghai Lu 19112eaad1fdSMike Travis pr_debug("Initializing CPU#%d\n", cpu); 19121ba76586SYinghai Lu 1913505b7899SThomas Gleixner if (IS_ENABLED(CONFIG_X86_64) || cpu_feature_enabled(X86_FEATURE_VME) || 1914505b7899SThomas Gleixner boot_cpu_has(X86_FEATURE_TSC) || boot_cpu_has(X86_FEATURE_DE)) 1915375074ccSAndy Lutomirski cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 19161ba76586SYinghai Lu 19171ba76586SYinghai Lu /* 19181ba76586SYinghai Lu * Initialize the per-CPU GDT with the boot GDT, 19191ba76586SYinghai Lu * and set up the GDT descriptor: 19201ba76586SYinghai Lu */ 1921552be871SBrian Gerst switch_to_new_gdt(cpu); 1922cf910e83SSeiji Aguchi load_current_idt(); 19231ba76586SYinghai Lu 1924505b7899SThomas Gleixner if (IS_ENABLED(CONFIG_X86_64)) { 1925505b7899SThomas Gleixner loadsegment(fs, 0); 1926505b7899SThomas Gleixner memset(cur->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); 19271ba76586SYinghai Lu syscall_init(); 19281ba76586SYinghai Lu 19291ba76586SYinghai Lu wrmsrl(MSR_FS_BASE, 0); 19301ba76586SYinghai Lu wrmsrl(MSR_KERNEL_GS_BASE, 0); 19311ba76586SYinghai Lu barrier(); 19321ba76586SYinghai Lu 1933659006bfSThomas Gleixner x2apic_setup(); 19341ba76586SYinghai Lu } 19351ba76586SYinghai Lu 1936f1f10076SVegard Nossum mmgrab(&init_mm); 1937505b7899SThomas Gleixner cur->active_mm = &init_mm; 1938505b7899SThomas Gleixner BUG_ON(cur->mm); 193972c0098dSAndy Lutomirski initialize_tlbstate_and_flush(); 1940505b7899SThomas Gleixner enter_lazy_tlb(&init_mm, cur); 19411ba76586SYinghai Lu 1942505b7899SThomas Gleixner /* Initialize the TSS. */ 1943505b7899SThomas Gleixner tss_setup_ist(tss); 1944111e7b15SThomas Gleixner tss_setup_io_bitmap(tss); 194572f5e08dSAndy Lutomirski set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); 1946505b7899SThomas Gleixner 19471ba76586SYinghai Lu load_TR_desc(); 1948505b7899SThomas Gleixner /* 1949505b7899SThomas Gleixner * sp0 points to the entry trampoline stack regardless of what task 1950505b7899SThomas Gleixner * is running. 1951505b7899SThomas Gleixner */ 19524fe2d8b1SDave Hansen load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1)); 195320bb8344SAndy Lutomirski 195437868fe1SAndy Lutomirski load_mm_ldt(&init_mm); 19551ba76586SYinghai Lu 19569766cdbcSJaswinder Singh Rajput clear_all_debug_regs(); 19570bb9fef9SJason Wessel dbg_restore_debug_regs(); 19581ba76586SYinghai Lu 1959dc4e0021SAndy Lutomirski doublefault_init_cpu_tss(); 1960505b7899SThomas Gleixner 196121c4cd10SIngo Molnar fpu__init_cpu(); 19621ba76586SYinghai Lu 19631ba76586SYinghai Lu if (is_uv_system()) 19641ba76586SYinghai Lu uv_cpu_init(); 196569218e47SThomas Garnier 196669218e47SThomas Garnier load_fixmap_gdt(cpu); 19671ba76586SYinghai Lu } 19681ba76586SYinghai Lu 19691008c52cSBorislav Petkov /* 19701008c52cSBorislav Petkov * The microcode loader calls this upon late microcode load to recheck features, 19711008c52cSBorislav Petkov * only when microcode has been updated. Caller holds microcode_mutex and CPU 19721008c52cSBorislav Petkov * hotplug lock. 19731008c52cSBorislav Petkov */ 19741008c52cSBorislav Petkov void microcode_check(void) 19751008c52cSBorislav Petkov { 197642ca8082SBorislav Petkov struct cpuinfo_x86 info; 197742ca8082SBorislav Petkov 19781008c52cSBorislav Petkov perf_check_microcode(); 197942ca8082SBorislav Petkov 198042ca8082SBorislav Petkov /* Reload CPUID max function as it might've changed. */ 198142ca8082SBorislav Petkov info.cpuid_level = cpuid_eax(0); 198242ca8082SBorislav Petkov 198342ca8082SBorislav Petkov /* 198442ca8082SBorislav Petkov * Copy all capability leafs to pick up the synthetic ones so that 198542ca8082SBorislav Petkov * memcmp() below doesn't fail on that. The ones coming from CPUID will 198642ca8082SBorislav Petkov * get overwritten in get_cpu_cap(). 198742ca8082SBorislav Petkov */ 198842ca8082SBorislav Petkov memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)); 198942ca8082SBorislav Petkov 199042ca8082SBorislav Petkov get_cpu_cap(&info); 199142ca8082SBorislav Petkov 199242ca8082SBorislav Petkov if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability))) 199342ca8082SBorislav Petkov return; 199442ca8082SBorislav Petkov 199542ca8082SBorislav Petkov pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n"); 199642ca8082SBorislav Petkov pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); 19971008c52cSBorislav Petkov } 19989c92374bSThomas Gleixner 19999c92374bSThomas Gleixner /* 20009c92374bSThomas Gleixner * Invoked from core CPU hotplug code after hotplug operations 20019c92374bSThomas Gleixner */ 20029c92374bSThomas Gleixner void arch_smt_update(void) 20039c92374bSThomas Gleixner { 20049c92374bSThomas Gleixner /* Handle the speculative execution misfeatures */ 20059c92374bSThomas Gleixner cpu_bugs_smt_update(); 20066a1cb5f5SThomas Gleixner /* Check whether IPI broadcasting can be enabled */ 20076a1cb5f5SThomas Gleixner apic_smt_update(); 20089c92374bSThomas Gleixner } 2009