12458e53fSKirill A. Shutemov /* cpu_feature_enabled() cannot be used this early */ 22458e53fSKirill A. Shutemov #define USE_EARLY_PGTABLE_L5 32458e53fSKirill A. Shutemov 457c8a661SMike Rapoport #include <linux/memblock.h> 59766cdbcSJaswinder Singh Rajput #include <linux/linkage.h> 6f0fc4affSYinghai Lu #include <linux/bitops.h> 79766cdbcSJaswinder Singh Rajput #include <linux/kernel.h> 8186f4360SPaul Gortmaker #include <linux/export.h> 9f7627e25SThomas Gleixner #include <linux/percpu.h> 109766cdbcSJaswinder Singh Rajput #include <linux/string.h> 11ee098e1aSBorislav Petkov #include <linux/ctype.h> 129766cdbcSJaswinder Singh Rajput #include <linux/delay.h> 1368e21be2SIngo Molnar #include <linux/sched/mm.h> 14e6017571SIngo Molnar #include <linux/sched/clock.h> 159164bb4aSIngo Molnar #include <linux/sched/task.h> 169766cdbcSJaswinder Singh Rajput #include <linux/init.h> 170f46efebSMasami Hiramatsu #include <linux/kprobes.h> 189766cdbcSJaswinder Singh Rajput #include <linux/kgdb.h> 199766cdbcSJaswinder Singh Rajput #include <linux/smp.h> 209766cdbcSJaswinder Singh Rajput #include <linux/io.h> 21b51ef52dSLaura Abbott #include <linux/syscore_ops.h> 229766cdbcSJaswinder Singh Rajput 239766cdbcSJaswinder Singh Rajput #include <asm/stackprotector.h> 24cdd6c482SIngo Molnar #include <asm/perf_event.h> 25f7627e25SThomas Gleixner #include <asm/mmu_context.h> 2649d859d7SH. Peter Anvin #include <asm/archrandom.h> 279766cdbcSJaswinder Singh Rajput #include <asm/hypervisor.h> 289766cdbcSJaswinder Singh Rajput #include <asm/processor.h> 291e02ce4cSAndy Lutomirski #include <asm/tlbflush.h> 30f649e938SPaul Gortmaker #include <asm/debugreg.h> 319766cdbcSJaswinder Singh Rajput #include <asm/sections.h> 32f40c3300SAndy Lutomirski #include <asm/vsyscall.h> 338bdbd962SAlan Cox #include <linux/topology.h> 348bdbd962SAlan Cox #include <linux/cpumask.h> 359766cdbcSJaswinder Singh Rajput #include <asm/pgtable.h> 3660063497SArun Sharma #include <linux/atomic.h> 379766cdbcSJaswinder Singh Rajput #include <asm/proto.h> 389766cdbcSJaswinder Singh Rajput #include <asm/setup.h> 39f7627e25SThomas Gleixner #include <asm/apic.h> 409766cdbcSJaswinder Singh Rajput #include <asm/desc.h> 4178f7f1e5SIngo Molnar #include <asm/fpu/internal.h> 429766cdbcSJaswinder Singh Rajput #include <asm/mtrr.h> 430274f955SGrzegorz Andrejczuk #include <asm/hwcap2.h> 448bdbd962SAlan Cox #include <linux/numa.h> 459766cdbcSJaswinder Singh Rajput #include <asm/asm.h> 460f6ff2bcSDave Hansen #include <asm/bugs.h> 479766cdbcSJaswinder Singh Rajput #include <asm/cpu.h> 489766cdbcSJaswinder Singh Rajput #include <asm/mce.h> 499766cdbcSJaswinder Singh Rajput #include <asm/msr.h> 509766cdbcSJaswinder Singh Rajput #include <asm/pat.h> 51d288e1cfSFenghua Yu #include <asm/microcode.h> 52d288e1cfSFenghua Yu #include <asm/microcode_intel.h> 53fec9434aSDavid Woodhouse #include <asm/intel-family.h> 54fec9434aSDavid Woodhouse #include <asm/cpu_device_id.h> 55e641f5f5SIngo Molnar 56f7627e25SThomas Gleixner #ifdef CONFIG_X86_LOCAL_APIC 57bdbcdd48STejun Heo #include <asm/uv/uv.h> 58f7627e25SThomas Gleixner #endif 59f7627e25SThomas Gleixner 60f7627e25SThomas Gleixner #include "cpu.h" 61f7627e25SThomas Gleixner 620274f955SGrzegorz Andrejczuk u32 elf_hwcap2 __read_mostly; 630274f955SGrzegorz Andrejczuk 64c2d1cec1SMike Travis /* all of these masks are initialized in setup_cpu_local_masks() */ 65c2d1cec1SMike Travis cpumask_var_t cpu_initialized_mask; 669766cdbcSJaswinder Singh Rajput cpumask_var_t cpu_callout_mask; 679766cdbcSJaswinder Singh Rajput cpumask_var_t cpu_callin_mask; 68c2d1cec1SMike Travis 69c2d1cec1SMike Travis /* representing cpus for which sibling maps can be computed */ 70c2d1cec1SMike Travis cpumask_var_t cpu_sibling_setup_mask; 71c2d1cec1SMike Travis 72f8b64d08SBorislav Petkov /* Number of siblings per CPU package */ 73f8b64d08SBorislav Petkov int smp_num_siblings = 1; 74f8b64d08SBorislav Petkov EXPORT_SYMBOL(smp_num_siblings); 75f8b64d08SBorislav Petkov 76f8b64d08SBorislav Petkov /* Last level cache ID of each logical CPU */ 77f8b64d08SBorislav Petkov DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID; 78f8b64d08SBorislav Petkov 792f2f52baSBrian Gerst /* correctly size the local cpu masks */ 804369f1fbSIngo Molnar void __init setup_cpu_local_masks(void) 812f2f52baSBrian Gerst { 822f2f52baSBrian Gerst alloc_bootmem_cpumask_var(&cpu_initialized_mask); 832f2f52baSBrian Gerst alloc_bootmem_cpumask_var(&cpu_callin_mask); 842f2f52baSBrian Gerst alloc_bootmem_cpumask_var(&cpu_callout_mask); 852f2f52baSBrian Gerst alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); 862f2f52baSBrian Gerst } 872f2f52baSBrian Gerst 88148f9bb8SPaul Gortmaker static void default_init(struct cpuinfo_x86 *c) 89e8055139SOndrej Zary { 90e8055139SOndrej Zary #ifdef CONFIG_X86_64 9127c13eceSBorislav Petkov cpu_detect_cache_sizes(c); 92e8055139SOndrej Zary #else 93e8055139SOndrej Zary /* Not much we can do here... */ 94e8055139SOndrej Zary /* Check if at least it has cpuid */ 95e8055139SOndrej Zary if (c->cpuid_level == -1) { 96e8055139SOndrej Zary /* No cpuid. It must be an ancient CPU */ 97e8055139SOndrej Zary if (c->x86 == 4) 98e8055139SOndrej Zary strcpy(c->x86_model_id, "486"); 99e8055139SOndrej Zary else if (c->x86 == 3) 100e8055139SOndrej Zary strcpy(c->x86_model_id, "386"); 101e8055139SOndrej Zary } 102e8055139SOndrej Zary #endif 103e8055139SOndrej Zary } 104e8055139SOndrej Zary 105148f9bb8SPaul Gortmaker static const struct cpu_dev default_cpu = { 106e8055139SOndrej Zary .c_init = default_init, 107e8055139SOndrej Zary .c_vendor = "Unknown", 108e8055139SOndrej Zary .c_x86_vendor = X86_VENDOR_UNKNOWN, 109e8055139SOndrej Zary }; 110e8055139SOndrej Zary 111148f9bb8SPaul Gortmaker static const struct cpu_dev *this_cpu = &default_cpu; 1120a488a53SYinghai Lu 11306deef89SBrian Gerst DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { 114950ad7ffSYinghai Lu #ifdef CONFIG_X86_64 11506deef89SBrian Gerst /* 11606deef89SBrian Gerst * We need valid kernel segments for data and code in long mode too 117950ad7ffSYinghai Lu * IRET will check the segment types kkeil 2000/10/28 118950ad7ffSYinghai Lu * Also sysret mandates a special GDT layout 11906deef89SBrian Gerst * 1209766cdbcSJaswinder Singh Rajput * TLS descriptors are currently at a different place compared to i386. 12106deef89SBrian Gerst * Hopefully nobody expects them at a fixed place (Wine?) 122950ad7ffSYinghai Lu */ 1231e5de182SAkinobu Mita [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff), 1241e5de182SAkinobu Mita [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff), 1251e5de182SAkinobu Mita [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff), 1261e5de182SAkinobu Mita [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff), 1271e5de182SAkinobu Mita [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff), 1281e5de182SAkinobu Mita [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff), 129950ad7ffSYinghai Lu #else 1301e5de182SAkinobu Mita [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff), 1311e5de182SAkinobu Mita [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), 1321e5de182SAkinobu Mita [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff), 1331e5de182SAkinobu Mita [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff), 134f7627e25SThomas Gleixner /* 135f7627e25SThomas Gleixner * Segments used for calling PnP BIOS have byte granularity. 136f7627e25SThomas Gleixner * They code segments and data segments have fixed 64k limits, 137f7627e25SThomas Gleixner * the transfer segment sizes are set at run time. 138f7627e25SThomas Gleixner */ 1396842ef0eSGlauber de Oliveira Costa /* 32-bit code */ 1401e5de182SAkinobu Mita [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), 1416842ef0eSGlauber de Oliveira Costa /* 16-bit code */ 1421e5de182SAkinobu Mita [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), 1436842ef0eSGlauber de Oliveira Costa /* 16-bit data */ 1441e5de182SAkinobu Mita [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff), 1456842ef0eSGlauber de Oliveira Costa /* 16-bit data */ 1461e5de182SAkinobu Mita [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0), 1476842ef0eSGlauber de Oliveira Costa /* 16-bit data */ 1481e5de182SAkinobu Mita [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0), 149f7627e25SThomas Gleixner /* 150f7627e25SThomas Gleixner * The APM segments have byte granularity and their bases 151f7627e25SThomas Gleixner * are set at run time. All have 64k limits. 152f7627e25SThomas Gleixner */ 1536842ef0eSGlauber de Oliveira Costa /* 32-bit code */ 1541e5de182SAkinobu Mita [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), 155f7627e25SThomas Gleixner /* 16-bit code */ 1561e5de182SAkinobu Mita [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), 1576842ef0eSGlauber de Oliveira Costa /* data */ 15872c4d853SIngo Molnar [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff), 159f7627e25SThomas Gleixner 1601e5de182SAkinobu Mita [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), 1611e5de182SAkinobu Mita [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), 16260a5317fSTejun Heo GDT_STACK_CANARY_INIT 163950ad7ffSYinghai Lu #endif 16406deef89SBrian Gerst } }; 165f7627e25SThomas Gleixner EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); 166f7627e25SThomas Gleixner 1678c3641e9SDave Hansen static int __init x86_mpx_setup(char *s) 1680c752a93SSuresh Siddha { 1698c3641e9SDave Hansen /* require an exact match without trailing characters */ 1702cd3949fSDave Hansen if (strlen(s)) 1712cd3949fSDave Hansen return 0; 1720c752a93SSuresh Siddha 1738c3641e9SDave Hansen /* do not emit a message if the feature is not present */ 1748c3641e9SDave Hansen if (!boot_cpu_has(X86_FEATURE_MPX)) 1756bad06b7SSuresh Siddha return 1; 1766bad06b7SSuresh Siddha 1778c3641e9SDave Hansen setup_clear_cpu_cap(X86_FEATURE_MPX); 1788c3641e9SDave Hansen pr_info("nompx: Intel Memory Protection Extensions (MPX) disabled\n"); 179b6f42a4aSFenghua Yu return 1; 180b6f42a4aSFenghua Yu } 1818c3641e9SDave Hansen __setup("nompx", x86_mpx_setup); 182b6f42a4aSFenghua Yu 1830790c9aaSAndy Lutomirski #ifdef CONFIG_X86_64 184c7ad5ad2SAndy Lutomirski static int __init x86_nopcid_setup(char *s) 1850790c9aaSAndy Lutomirski { 186c7ad5ad2SAndy Lutomirski /* nopcid doesn't accept parameters */ 187c7ad5ad2SAndy Lutomirski if (s) 188c7ad5ad2SAndy Lutomirski return -EINVAL; 1890790c9aaSAndy Lutomirski 1900790c9aaSAndy Lutomirski /* do not emit a message if the feature is not present */ 1910790c9aaSAndy Lutomirski if (!boot_cpu_has(X86_FEATURE_PCID)) 192c7ad5ad2SAndy Lutomirski return 0; 1930790c9aaSAndy Lutomirski 1940790c9aaSAndy Lutomirski setup_clear_cpu_cap(X86_FEATURE_PCID); 1950790c9aaSAndy Lutomirski pr_info("nopcid: PCID feature disabled\n"); 196c7ad5ad2SAndy Lutomirski return 0; 1970790c9aaSAndy Lutomirski } 198c7ad5ad2SAndy Lutomirski early_param("nopcid", x86_nopcid_setup); 1990790c9aaSAndy Lutomirski #endif 2000790c9aaSAndy Lutomirski 201d12a72b8SAndy Lutomirski static int __init x86_noinvpcid_setup(char *s) 202d12a72b8SAndy Lutomirski { 203d12a72b8SAndy Lutomirski /* noinvpcid doesn't accept parameters */ 204d12a72b8SAndy Lutomirski if (s) 205d12a72b8SAndy Lutomirski return -EINVAL; 206d12a72b8SAndy Lutomirski 207d12a72b8SAndy Lutomirski /* do not emit a message if the feature is not present */ 208d12a72b8SAndy Lutomirski if (!boot_cpu_has(X86_FEATURE_INVPCID)) 209d12a72b8SAndy Lutomirski return 0; 210d12a72b8SAndy Lutomirski 211d12a72b8SAndy Lutomirski setup_clear_cpu_cap(X86_FEATURE_INVPCID); 212d12a72b8SAndy Lutomirski pr_info("noinvpcid: INVPCID feature disabled\n"); 213d12a72b8SAndy Lutomirski return 0; 214d12a72b8SAndy Lutomirski } 215d12a72b8SAndy Lutomirski early_param("noinvpcid", x86_noinvpcid_setup); 216d12a72b8SAndy Lutomirski 217ba51dcedSYinghai Lu #ifdef CONFIG_X86_32 218148f9bb8SPaul Gortmaker static int cachesize_override = -1; 219148f9bb8SPaul Gortmaker static int disable_x86_serial_nr = 1; 220f7627e25SThomas Gleixner 221f7627e25SThomas Gleixner static int __init cachesize_setup(char *str) 222f7627e25SThomas Gleixner { 223f7627e25SThomas Gleixner get_option(&str, &cachesize_override); 224f7627e25SThomas Gleixner return 1; 225f7627e25SThomas Gleixner } 226f7627e25SThomas Gleixner __setup("cachesize=", cachesize_setup); 227f7627e25SThomas Gleixner 228f7627e25SThomas Gleixner static int __init x86_sep_setup(char *s) 229f7627e25SThomas Gleixner { 23013530257SAndi Kleen setup_clear_cpu_cap(X86_FEATURE_SEP); 231f7627e25SThomas Gleixner return 1; 232f7627e25SThomas Gleixner } 233f7627e25SThomas Gleixner __setup("nosep", x86_sep_setup); 234f7627e25SThomas Gleixner 235f7627e25SThomas Gleixner /* Standard macro to see if a specific flag is changeable */ 236f7627e25SThomas Gleixner static inline int flag_is_changeable_p(u32 flag) 237f7627e25SThomas Gleixner { 238f7627e25SThomas Gleixner u32 f1, f2; 239f7627e25SThomas Gleixner 24094f6bac1SKrzysztof Helt /* 24194f6bac1SKrzysztof Helt * Cyrix and IDT cpus allow disabling of CPUID 24294f6bac1SKrzysztof Helt * so the code below may return different results 24394f6bac1SKrzysztof Helt * when it is executed before and after enabling 24494f6bac1SKrzysztof Helt * the CPUID. Add "volatile" to not allow gcc to 24594f6bac1SKrzysztof Helt * optimize the subsequent calls to this function. 24694f6bac1SKrzysztof Helt */ 24794f6bac1SKrzysztof Helt asm volatile ("pushfl \n\t" 248f7627e25SThomas Gleixner "pushfl \n\t" 249f7627e25SThomas Gleixner "popl %0 \n\t" 250f7627e25SThomas Gleixner "movl %0, %1 \n\t" 251f7627e25SThomas Gleixner "xorl %2, %0 \n\t" 252f7627e25SThomas Gleixner "pushl %0 \n\t" 253f7627e25SThomas Gleixner "popfl \n\t" 254f7627e25SThomas Gleixner "pushfl \n\t" 255f7627e25SThomas Gleixner "popl %0 \n\t" 256f7627e25SThomas Gleixner "popfl \n\t" 2570f3fa48aSIngo Molnar 258f7627e25SThomas Gleixner : "=&r" (f1), "=&r" (f2) 259f7627e25SThomas Gleixner : "ir" (flag)); 260f7627e25SThomas Gleixner 261f7627e25SThomas Gleixner return ((f1^f2) & flag) != 0; 262f7627e25SThomas Gleixner } 263f7627e25SThomas Gleixner 264f7627e25SThomas Gleixner /* Probe for the CPUID instruction */ 265148f9bb8SPaul Gortmaker int have_cpuid_p(void) 266f7627e25SThomas Gleixner { 267f7627e25SThomas Gleixner return flag_is_changeable_p(X86_EFLAGS_ID); 268f7627e25SThomas Gleixner } 269f7627e25SThomas Gleixner 270148f9bb8SPaul Gortmaker static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 2710a488a53SYinghai Lu { 2720a488a53SYinghai Lu unsigned long lo, hi; 2730f3fa48aSIngo Molnar 2740f3fa48aSIngo Molnar if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr) 2750f3fa48aSIngo Molnar return; 2760f3fa48aSIngo Molnar 2770f3fa48aSIngo Molnar /* Disable processor serial number: */ 2780f3fa48aSIngo Molnar 2790a488a53SYinghai Lu rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 2800a488a53SYinghai Lu lo |= 0x200000; 2810a488a53SYinghai Lu wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 2820f3fa48aSIngo Molnar 2831b74dde7SChen Yucong pr_notice("CPU serial number disabled.\n"); 2840a488a53SYinghai Lu clear_cpu_cap(c, X86_FEATURE_PN); 2850a488a53SYinghai Lu 2860a488a53SYinghai Lu /* Disabling the serial number may affect the cpuid level */ 2870a488a53SYinghai Lu c->cpuid_level = cpuid_eax(0); 2880a488a53SYinghai Lu } 2890a488a53SYinghai Lu 2900a488a53SYinghai Lu static int __init x86_serial_nr_setup(char *s) 2910a488a53SYinghai Lu { 2920a488a53SYinghai Lu disable_x86_serial_nr = 0; 2930a488a53SYinghai Lu return 1; 2940a488a53SYinghai Lu } 2950a488a53SYinghai Lu __setup("serialnumber", x86_serial_nr_setup); 296ba51dcedSYinghai Lu #else 297102bbe3aSYinghai Lu static inline int flag_is_changeable_p(u32 flag) 298102bbe3aSYinghai Lu { 299102bbe3aSYinghai Lu return 1; 300102bbe3aSYinghai Lu } 301102bbe3aSYinghai Lu static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 302102bbe3aSYinghai Lu { 303102bbe3aSYinghai Lu } 304ba51dcedSYinghai Lu #endif 3050a488a53SYinghai Lu 306de5397adSFenghua Yu static __init int setup_disable_smep(char *arg) 307de5397adSFenghua Yu { 308b2cc2a07SH. Peter Anvin setup_clear_cpu_cap(X86_FEATURE_SMEP); 3090f6ff2bcSDave Hansen /* Check for things that depend on SMEP being enabled: */ 3100f6ff2bcSDave Hansen check_mpx_erratum(&boot_cpu_data); 311de5397adSFenghua Yu return 1; 312de5397adSFenghua Yu } 313de5397adSFenghua Yu __setup("nosmep", setup_disable_smep); 314de5397adSFenghua Yu 315b2cc2a07SH. Peter Anvin static __always_inline void setup_smep(struct cpuinfo_x86 *c) 316de5397adSFenghua Yu { 317b2cc2a07SH. Peter Anvin if (cpu_has(c, X86_FEATURE_SMEP)) 318375074ccSAndy Lutomirski cr4_set_bits(X86_CR4_SMEP); 319de5397adSFenghua Yu } 320de5397adSFenghua Yu 32152b6179aSH. Peter Anvin static __init int setup_disable_smap(char *arg) 32252b6179aSH. Peter Anvin { 323b2cc2a07SH. Peter Anvin setup_clear_cpu_cap(X86_FEATURE_SMAP); 32452b6179aSH. Peter Anvin return 1; 32552b6179aSH. Peter Anvin } 32652b6179aSH. Peter Anvin __setup("nosmap", setup_disable_smap); 32752b6179aSH. Peter Anvin 328b2cc2a07SH. Peter Anvin static __always_inline void setup_smap(struct cpuinfo_x86 *c) 32952b6179aSH. Peter Anvin { 330581b7f15SAndrew Cooper unsigned long eflags = native_save_fl(); 331b2cc2a07SH. Peter Anvin 332b2cc2a07SH. Peter Anvin /* This should have been cleared long ago */ 333b2cc2a07SH. Peter Anvin BUG_ON(eflags & X86_EFLAGS_AC); 334b2cc2a07SH. Peter Anvin 33503bbd596SH. Peter Anvin if (cpu_has(c, X86_FEATURE_SMAP)) { 33603bbd596SH. Peter Anvin #ifdef CONFIG_X86_SMAP 337375074ccSAndy Lutomirski cr4_set_bits(X86_CR4_SMAP); 33803bbd596SH. Peter Anvin #else 339375074ccSAndy Lutomirski cr4_clear_bits(X86_CR4_SMAP); 34003bbd596SH. Peter Anvin #endif 34103bbd596SH. Peter Anvin } 342f7627e25SThomas Gleixner } 343f7627e25SThomas Gleixner 344aa35f896SRicardo Neri static __always_inline void setup_umip(struct cpuinfo_x86 *c) 345aa35f896SRicardo Neri { 346aa35f896SRicardo Neri /* Check the boot processor, plus build option for UMIP. */ 347aa35f896SRicardo Neri if (!cpu_feature_enabled(X86_FEATURE_UMIP)) 348aa35f896SRicardo Neri goto out; 349aa35f896SRicardo Neri 350aa35f896SRicardo Neri /* Check the current processor's cpuid bits. */ 351aa35f896SRicardo Neri if (!cpu_has(c, X86_FEATURE_UMIP)) 352aa35f896SRicardo Neri goto out; 353aa35f896SRicardo Neri 354aa35f896SRicardo Neri cr4_set_bits(X86_CR4_UMIP); 355aa35f896SRicardo Neri 356438cbf88SLendacky, Thomas pr_info_once("x86/cpu: User Mode Instruction Prevention (UMIP) activated\n"); 357770c7755SRicardo Neri 358aa35f896SRicardo Neri return; 359aa35f896SRicardo Neri 360aa35f896SRicardo Neri out: 361aa35f896SRicardo Neri /* 362aa35f896SRicardo Neri * Make sure UMIP is disabled in case it was enabled in a 363aa35f896SRicardo Neri * previous boot (e.g., via kexec). 364aa35f896SRicardo Neri */ 365aa35f896SRicardo Neri cr4_clear_bits(X86_CR4_UMIP); 366aa35f896SRicardo Neri } 367aa35f896SRicardo Neri 368f7627e25SThomas Gleixner /* 36906976945SDave Hansen * Protection Keys are not available in 32-bit mode. 37006976945SDave Hansen */ 37106976945SDave Hansen static bool pku_disabled; 37206976945SDave Hansen 37306976945SDave Hansen static __always_inline void setup_pku(struct cpuinfo_x86 *c) 37406976945SDave Hansen { 375e8df1a95SDave Hansen /* check the boot processor, plus compile options for PKU: */ 376e8df1a95SDave Hansen if (!cpu_feature_enabled(X86_FEATURE_PKU)) 377e8df1a95SDave Hansen return; 378e8df1a95SDave Hansen /* checks the actual processor's cpuid bits: */ 37906976945SDave Hansen if (!cpu_has(c, X86_FEATURE_PKU)) 38006976945SDave Hansen return; 38106976945SDave Hansen if (pku_disabled) 38206976945SDave Hansen return; 38306976945SDave Hansen 38406976945SDave Hansen cr4_set_bits(X86_CR4_PKE); 38506976945SDave Hansen /* 38606976945SDave Hansen * Seting X86_CR4_PKE will cause the X86_FEATURE_OSPKE 38706976945SDave Hansen * cpuid bit to be set. We need to ensure that we 38806976945SDave Hansen * update that bit in this CPU's "cpu_info". 38906976945SDave Hansen */ 39006976945SDave Hansen get_cpu_cap(c); 39106976945SDave Hansen } 39206976945SDave Hansen 39306976945SDave Hansen #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 39406976945SDave Hansen static __init int setup_disable_pku(char *arg) 39506976945SDave Hansen { 39606976945SDave Hansen /* 39706976945SDave Hansen * Do not clear the X86_FEATURE_PKU bit. All of the 39806976945SDave Hansen * runtime checks are against OSPKE so clearing the 39906976945SDave Hansen * bit does nothing. 40006976945SDave Hansen * 40106976945SDave Hansen * This way, we will see "pku" in cpuinfo, but not 40206976945SDave Hansen * "ospke", which is exactly what we want. It shows 40306976945SDave Hansen * that the CPU has PKU, but the OS has not enabled it. 40406976945SDave Hansen * This happens to be exactly how a system would look 40506976945SDave Hansen * if we disabled the config option. 40606976945SDave Hansen */ 40706976945SDave Hansen pr_info("x86: 'nopku' specified, disabling Memory Protection Keys\n"); 40806976945SDave Hansen pku_disabled = true; 40906976945SDave Hansen return 1; 41006976945SDave Hansen } 41106976945SDave Hansen __setup("nopku", setup_disable_pku); 41206976945SDave Hansen #endif /* CONFIG_X86_64 */ 41306976945SDave Hansen 41406976945SDave Hansen /* 415b38b0665SH. Peter Anvin * Some CPU features depend on higher CPUID levels, which may not always 416b38b0665SH. Peter Anvin * be available due to CPUID level capping or broken virtualization 417b38b0665SH. Peter Anvin * software. Add those features to this table to auto-disable them. 418b38b0665SH. Peter Anvin */ 419b38b0665SH. Peter Anvin struct cpuid_dependent_feature { 420b38b0665SH. Peter Anvin u32 feature; 421b38b0665SH. Peter Anvin u32 level; 422b38b0665SH. Peter Anvin }; 4230f3fa48aSIngo Molnar 424148f9bb8SPaul Gortmaker static const struct cpuid_dependent_feature 425b38b0665SH. Peter Anvin cpuid_dependent_features[] = { 426b38b0665SH. Peter Anvin { X86_FEATURE_MWAIT, 0x00000005 }, 427b38b0665SH. Peter Anvin { X86_FEATURE_DCA, 0x00000009 }, 428b38b0665SH. Peter Anvin { X86_FEATURE_XSAVE, 0x0000000d }, 429b38b0665SH. Peter Anvin { 0, 0 } 430b38b0665SH. Peter Anvin }; 431b38b0665SH. Peter Anvin 432148f9bb8SPaul Gortmaker static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) 433b38b0665SH. Peter Anvin { 434b38b0665SH. Peter Anvin const struct cpuid_dependent_feature *df; 4359766cdbcSJaswinder Singh Rajput 436b38b0665SH. Peter Anvin for (df = cpuid_dependent_features; df->feature; df++) { 4370f3fa48aSIngo Molnar 4380f3fa48aSIngo Molnar if (!cpu_has(c, df->feature)) 4390f3fa48aSIngo Molnar continue; 440b38b0665SH. Peter Anvin /* 441b38b0665SH. Peter Anvin * Note: cpuid_level is set to -1 if unavailable, but 442b38b0665SH. Peter Anvin * extended_extended_level is set to 0 if unavailable 443b38b0665SH. Peter Anvin * and the legitimate extended levels are all negative 444b38b0665SH. Peter Anvin * when signed; hence the weird messing around with 445b38b0665SH. Peter Anvin * signs here... 446b38b0665SH. Peter Anvin */ 4470f3fa48aSIngo Molnar if (!((s32)df->level < 0 ? 448f6db44dfSYinghai Lu (u32)df->level > (u32)c->extended_cpuid_level : 4490f3fa48aSIngo Molnar (s32)df->level > (s32)c->cpuid_level)) 4500f3fa48aSIngo Molnar continue; 4510f3fa48aSIngo Molnar 452b38b0665SH. Peter Anvin clear_cpu_cap(c, df->feature); 4530f3fa48aSIngo Molnar if (!warn) 4540f3fa48aSIngo Molnar continue; 4550f3fa48aSIngo Molnar 4561b74dde7SChen Yucong pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n", 4579def39beSJosh Triplett x86_cap_flag(df->feature), df->level); 458b38b0665SH. Peter Anvin } 459b38b0665SH. Peter Anvin } 460b38b0665SH. Peter Anvin 461b38b0665SH. Peter Anvin /* 462f7627e25SThomas Gleixner * Naming convention should be: <Name> [(<Codename>)] 463f7627e25SThomas Gleixner * This table only is used unless init_<vendor>() below doesn't set it; 4640f3fa48aSIngo Molnar * in particular, if CPUID levels 0x80000002..4 are supported, this 4650f3fa48aSIngo Molnar * isn't used 466f7627e25SThomas Gleixner */ 467f7627e25SThomas Gleixner 468f7627e25SThomas Gleixner /* Look up CPU names by table lookup. */ 469148f9bb8SPaul Gortmaker static const char *table_lookup_model(struct cpuinfo_x86 *c) 470f7627e25SThomas Gleixner { 47109dc68d9SJan Beulich #ifdef CONFIG_X86_32 47209dc68d9SJan Beulich const struct legacy_cpu_model_info *info; 473f7627e25SThomas Gleixner 474f7627e25SThomas Gleixner if (c->x86_model >= 16) 475f7627e25SThomas Gleixner return NULL; /* Range check */ 476f7627e25SThomas Gleixner 477f7627e25SThomas Gleixner if (!this_cpu) 478f7627e25SThomas Gleixner return NULL; 479f7627e25SThomas Gleixner 48009dc68d9SJan Beulich info = this_cpu->legacy_models; 481f7627e25SThomas Gleixner 48209dc68d9SJan Beulich while (info->family) { 483f7627e25SThomas Gleixner if (info->family == c->x86) 484f7627e25SThomas Gleixner return info->model_names[c->x86_model]; 485f7627e25SThomas Gleixner info++; 486f7627e25SThomas Gleixner } 48709dc68d9SJan Beulich #endif 488f7627e25SThomas Gleixner return NULL; /* Not found */ 489f7627e25SThomas Gleixner } 490f7627e25SThomas Gleixner 4916cbd2171SThomas Gleixner __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS]; 4926cbd2171SThomas Gleixner __u32 cpu_caps_set[NCAPINTS + NBUGINTS]; 493f7627e25SThomas Gleixner 49411e3a840SJeremy Fitzhardinge void load_percpu_segment(int cpu) 4959d31d35bSYinghai Lu { 496fab334c1SYinghai Lu #ifdef CONFIG_X86_32 4972697fbd5SBrian Gerst loadsegment(fs, __KERNEL_PERCPU); 4982697fbd5SBrian Gerst #else 49945e876f7SAndy Lutomirski __loadsegment_simple(gs, 0); 50035060ed6SVitaly Kuznetsov wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu)); 501fab334c1SYinghai Lu #endif 50260a5317fSTejun Heo load_stack_canary_segment(); 5039d31d35bSYinghai Lu } 5049d31d35bSYinghai Lu 50572f5e08dSAndy Lutomirski #ifdef CONFIG_X86_32 50672f5e08dSAndy Lutomirski /* The 32-bit entry code needs to find cpu_entry_area. */ 50772f5e08dSAndy Lutomirski DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); 50872f5e08dSAndy Lutomirski #endif 50972f5e08dSAndy Lutomirski 51040e7f949SAndy Lutomirski #ifdef CONFIG_X86_64 51140e7f949SAndy Lutomirski /* 51240e7f949SAndy Lutomirski * Special IST stacks which the CPU switches to when it calls 51340e7f949SAndy Lutomirski * an IST-marked descriptor entry. Up to 7 stacks (hardware 51440e7f949SAndy Lutomirski * limit), all of them are 4K, except the debug stack which 51540e7f949SAndy Lutomirski * is 8K. 51640e7f949SAndy Lutomirski */ 51740e7f949SAndy Lutomirski static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { 51840e7f949SAndy Lutomirski [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, 51940e7f949SAndy Lutomirski [DEBUG_STACK - 1] = DEBUG_STKSZ 52040e7f949SAndy Lutomirski }; 52140e7f949SAndy Lutomirski #endif 52240e7f949SAndy Lutomirski 52345fc8757SThomas Garnier /* Load the original GDT from the per-cpu structure */ 52445fc8757SThomas Garnier void load_direct_gdt(int cpu) 52545fc8757SThomas Garnier { 52645fc8757SThomas Garnier struct desc_ptr gdt_descr; 52745fc8757SThomas Garnier 52845fc8757SThomas Garnier gdt_descr.address = (long)get_cpu_gdt_rw(cpu); 52945fc8757SThomas Garnier gdt_descr.size = GDT_SIZE - 1; 53045fc8757SThomas Garnier load_gdt(&gdt_descr); 53145fc8757SThomas Garnier } 53245fc8757SThomas Garnier EXPORT_SYMBOL_GPL(load_direct_gdt); 53345fc8757SThomas Garnier 53469218e47SThomas Garnier /* Load a fixmap remapping of the per-cpu GDT */ 53569218e47SThomas Garnier void load_fixmap_gdt(int cpu) 53669218e47SThomas Garnier { 53769218e47SThomas Garnier struct desc_ptr gdt_descr; 53869218e47SThomas Garnier 53969218e47SThomas Garnier gdt_descr.address = (long)get_cpu_gdt_ro(cpu); 54069218e47SThomas Garnier gdt_descr.size = GDT_SIZE - 1; 54169218e47SThomas Garnier load_gdt(&gdt_descr); 54269218e47SThomas Garnier } 54345fc8757SThomas Garnier EXPORT_SYMBOL_GPL(load_fixmap_gdt); 54469218e47SThomas Garnier 5450f3fa48aSIngo Molnar /* 5460f3fa48aSIngo Molnar * Current gdt points %fs at the "master" per-cpu area: after this, 5470f3fa48aSIngo Molnar * it's on the real one. 5480f3fa48aSIngo Molnar */ 549552be871SBrian Gerst void switch_to_new_gdt(int cpu) 550f7627e25SThomas Gleixner { 55145fc8757SThomas Garnier /* Load the original GDT */ 55245fc8757SThomas Garnier load_direct_gdt(cpu); 553f7627e25SThomas Gleixner /* Reload the per-cpu base */ 55411e3a840SJeremy Fitzhardinge load_percpu_segment(cpu); 555f7627e25SThomas Gleixner } 556f7627e25SThomas Gleixner 557148f9bb8SPaul Gortmaker static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; 558f7627e25SThomas Gleixner 559148f9bb8SPaul Gortmaker static void get_model_name(struct cpuinfo_x86 *c) 560f7627e25SThomas Gleixner { 561f7627e25SThomas Gleixner unsigned int *v; 562ee098e1aSBorislav Petkov char *p, *q, *s; 563f7627e25SThomas Gleixner 5643da99c97SYinghai Lu if (c->extended_cpuid_level < 0x80000004) 5651b05d60dSYinghai Lu return; 566f7627e25SThomas Gleixner 567f7627e25SThomas Gleixner v = (unsigned int *)c->x86_model_id; 568f7627e25SThomas Gleixner cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); 569f7627e25SThomas Gleixner cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); 570f7627e25SThomas Gleixner cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); 571f7627e25SThomas Gleixner c->x86_model_id[48] = 0; 572f7627e25SThomas Gleixner 573ee098e1aSBorislav Petkov /* Trim whitespace */ 574ee098e1aSBorislav Petkov p = q = s = &c->x86_model_id[0]; 575ee098e1aSBorislav Petkov 576ee098e1aSBorislav Petkov while (*p == ' ') 577ee098e1aSBorislav Petkov p++; 578ee098e1aSBorislav Petkov 579ee098e1aSBorislav Petkov while (*p) { 580ee098e1aSBorislav Petkov /* Note the last non-whitespace index */ 581ee098e1aSBorislav Petkov if (!isspace(*p)) 582ee098e1aSBorislav Petkov s = q; 583ee098e1aSBorislav Petkov 584ee098e1aSBorislav Petkov *q++ = *p++; 585ee098e1aSBorislav Petkov } 586ee098e1aSBorislav Petkov 587ee098e1aSBorislav Petkov *(s + 1) = '\0'; 588f7627e25SThomas Gleixner } 589f7627e25SThomas Gleixner 5909305bd6cSThomas Gleixner void detect_num_cpu_cores(struct cpuinfo_x86 *c) 5912cc61be6SDavid Wang { 5922cc61be6SDavid Wang unsigned int eax, ebx, ecx, edx; 5932cc61be6SDavid Wang 5949305bd6cSThomas Gleixner c->x86_max_cores = 1; 5952cc61be6SDavid Wang if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4) 5969305bd6cSThomas Gleixner return; 5972cc61be6SDavid Wang 5982cc61be6SDavid Wang cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); 5992cc61be6SDavid Wang if (eax & 0x1f) 6009305bd6cSThomas Gleixner c->x86_max_cores = (eax >> 26) + 1; 6012cc61be6SDavid Wang } 6022cc61be6SDavid Wang 603148f9bb8SPaul Gortmaker void cpu_detect_cache_sizes(struct cpuinfo_x86 *c) 604f7627e25SThomas Gleixner { 6059d31d35bSYinghai Lu unsigned int n, dummy, ebx, ecx, edx, l2size; 606f7627e25SThomas Gleixner 6073da99c97SYinghai Lu n = c->extended_cpuid_level; 608f7627e25SThomas Gleixner 609f7627e25SThomas Gleixner if (n >= 0x80000005) { 6109d31d35bSYinghai Lu cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); 611f7627e25SThomas Gleixner c->x86_cache_size = (ecx>>24) + (edx>>24); 612140fc727SYinghai Lu #ifdef CONFIG_X86_64 613140fc727SYinghai Lu /* On K8 L1 TLB is inclusive, so don't count it */ 614140fc727SYinghai Lu c->x86_tlbsize = 0; 615140fc727SYinghai Lu #endif 616f7627e25SThomas Gleixner } 617f7627e25SThomas Gleixner 618f7627e25SThomas Gleixner if (n < 0x80000006) /* Some chips just has a large L1. */ 619f7627e25SThomas Gleixner return; 620f7627e25SThomas Gleixner 6210a488a53SYinghai Lu cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); 622f7627e25SThomas Gleixner l2size = ecx >> 16; 623f7627e25SThomas Gleixner 624140fc727SYinghai Lu #ifdef CONFIG_X86_64 625140fc727SYinghai Lu c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); 626140fc727SYinghai Lu #else 627f7627e25SThomas Gleixner /* do processor-specific cache resizing */ 62809dc68d9SJan Beulich if (this_cpu->legacy_cache_size) 62909dc68d9SJan Beulich l2size = this_cpu->legacy_cache_size(c, l2size); 630f7627e25SThomas Gleixner 631f7627e25SThomas Gleixner /* Allow user to override all this if necessary. */ 632f7627e25SThomas Gleixner if (cachesize_override != -1) 633f7627e25SThomas Gleixner l2size = cachesize_override; 634f7627e25SThomas Gleixner 635f7627e25SThomas Gleixner if (l2size == 0) 636f7627e25SThomas Gleixner return; /* Again, no L2 cache is possible */ 637140fc727SYinghai Lu #endif 638f7627e25SThomas Gleixner 639f7627e25SThomas Gleixner c->x86_cache_size = l2size; 640f7627e25SThomas Gleixner } 641f7627e25SThomas Gleixner 642e0ba94f1SAlex Shi u16 __read_mostly tlb_lli_4k[NR_INFO]; 643e0ba94f1SAlex Shi u16 __read_mostly tlb_lli_2m[NR_INFO]; 644e0ba94f1SAlex Shi u16 __read_mostly tlb_lli_4m[NR_INFO]; 645e0ba94f1SAlex Shi u16 __read_mostly tlb_lld_4k[NR_INFO]; 646e0ba94f1SAlex Shi u16 __read_mostly tlb_lld_2m[NR_INFO]; 647e0ba94f1SAlex Shi u16 __read_mostly tlb_lld_4m[NR_INFO]; 648dd360393SKirill A. Shutemov u16 __read_mostly tlb_lld_1g[NR_INFO]; 649e0ba94f1SAlex Shi 650f94fe119SSteven Honeyman static void cpu_detect_tlb(struct cpuinfo_x86 *c) 651e0ba94f1SAlex Shi { 652e0ba94f1SAlex Shi if (this_cpu->c_detect_tlb) 653e0ba94f1SAlex Shi this_cpu->c_detect_tlb(c); 654e0ba94f1SAlex Shi 655f94fe119SSteven Honeyman pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n", 656e0ba94f1SAlex Shi tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES], 657f94fe119SSteven Honeyman tlb_lli_4m[ENTRIES]); 658f94fe119SSteven Honeyman 659f94fe119SSteven Honeyman pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n", 660f94fe119SSteven Honeyman tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES], 661f94fe119SSteven Honeyman tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]); 662e0ba94f1SAlex Shi } 663e0ba94f1SAlex Shi 664545401f4SThomas Gleixner int detect_ht_early(struct cpuinfo_x86 *c) 6659d31d35bSYinghai Lu { 666c8e56d20SBorislav Petkov #ifdef CONFIG_SMP 6679d31d35bSYinghai Lu u32 eax, ebx, ecx, edx; 6689d31d35bSYinghai Lu 6690a488a53SYinghai Lu if (!cpu_has(c, X86_FEATURE_HT)) 670545401f4SThomas Gleixner return -1; 6719d31d35bSYinghai Lu 6720a488a53SYinghai Lu if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) 673545401f4SThomas Gleixner return -1; 6740a488a53SYinghai Lu 6751cd78776SYinghai Lu if (cpu_has(c, X86_FEATURE_XTOPOLOGY)) 676545401f4SThomas Gleixner return -1; 6771cd78776SYinghai Lu 6780a488a53SYinghai Lu cpuid(1, &eax, &ebx, &ecx, &edx); 6790a488a53SYinghai Lu 6809d31d35bSYinghai Lu smp_num_siblings = (ebx & 0xff0000) >> 16; 681545401f4SThomas Gleixner if (smp_num_siblings == 1) 6821b74dde7SChen Yucong pr_info_once("CPU0: Hyper-Threading is disabled\n"); 683545401f4SThomas Gleixner #endif 684545401f4SThomas Gleixner return 0; 6850f3fa48aSIngo Molnar } 6860f3fa48aSIngo Molnar 687545401f4SThomas Gleixner void detect_ht(struct cpuinfo_x86 *c) 688545401f4SThomas Gleixner { 689545401f4SThomas Gleixner #ifdef CONFIG_SMP 690545401f4SThomas Gleixner int index_msb, core_bits; 691545401f4SThomas Gleixner 692545401f4SThomas Gleixner if (detect_ht_early(c) < 0) 693545401f4SThomas Gleixner return; 6949d31d35bSYinghai Lu 6959d31d35bSYinghai Lu index_msb = get_count_order(smp_num_siblings); 696cb8cc442SIngo Molnar c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); 6979d31d35bSYinghai Lu 6989d31d35bSYinghai Lu smp_num_siblings = smp_num_siblings / c->x86_max_cores; 6999d31d35bSYinghai Lu 7009d31d35bSYinghai Lu index_msb = get_count_order(smp_num_siblings); 7019d31d35bSYinghai Lu 7029d31d35bSYinghai Lu core_bits = get_count_order(c->x86_max_cores); 7039d31d35bSYinghai Lu 704cb8cc442SIngo Molnar c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & 7051cd78776SYinghai Lu ((1 << core_bits) - 1); 7069d31d35bSYinghai Lu #endif 70797e4db7cSYinghai Lu } 708f7627e25SThomas Gleixner 709148f9bb8SPaul Gortmaker static void get_cpu_vendor(struct cpuinfo_x86 *c) 710f7627e25SThomas Gleixner { 711f7627e25SThomas Gleixner char *v = c->x86_vendor_id; 7120f3fa48aSIngo Molnar int i; 713f7627e25SThomas Gleixner 714f7627e25SThomas Gleixner for (i = 0; i < X86_VENDOR_NUM; i++) { 71510a434fcSYinghai Lu if (!cpu_devs[i]) 71610a434fcSYinghai Lu break; 71710a434fcSYinghai Lu 718f7627e25SThomas Gleixner if (!strcmp(v, cpu_devs[i]->c_ident[0]) || 719f7627e25SThomas Gleixner (cpu_devs[i]->c_ident[1] && 720f7627e25SThomas Gleixner !strcmp(v, cpu_devs[i]->c_ident[1]))) { 7210f3fa48aSIngo Molnar 722f7627e25SThomas Gleixner this_cpu = cpu_devs[i]; 72310a434fcSYinghai Lu c->x86_vendor = this_cpu->c_x86_vendor; 724f7627e25SThomas Gleixner return; 725f7627e25SThomas Gleixner } 726f7627e25SThomas Gleixner } 72710a434fcSYinghai Lu 7281b74dde7SChen Yucong pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \ 729a9c56953SMinchan Kim "CPU: Your system may be unstable.\n", v); 73010a434fcSYinghai Lu 731f7627e25SThomas Gleixner c->x86_vendor = X86_VENDOR_UNKNOWN; 732f7627e25SThomas Gleixner this_cpu = &default_cpu; 733f7627e25SThomas Gleixner } 734f7627e25SThomas Gleixner 735148f9bb8SPaul Gortmaker void cpu_detect(struct cpuinfo_x86 *c) 736f7627e25SThomas Gleixner { 737f7627e25SThomas Gleixner /* Get vendor name */ 7384a148513SHarvey Harrison cpuid(0x00000000, (unsigned int *)&c->cpuid_level, 7394a148513SHarvey Harrison (unsigned int *)&c->x86_vendor_id[0], 7404a148513SHarvey Harrison (unsigned int *)&c->x86_vendor_id[8], 7414a148513SHarvey Harrison (unsigned int *)&c->x86_vendor_id[4]); 742f7627e25SThomas Gleixner 743f7627e25SThomas Gleixner c->x86 = 4; 7449d31d35bSYinghai Lu /* Intel-defined flags: level 0x00000001 */ 745f7627e25SThomas Gleixner if (c->cpuid_level >= 0x00000001) { 746f7627e25SThomas Gleixner u32 junk, tfms, cap0, misc; 7470f3fa48aSIngo Molnar 748f7627e25SThomas Gleixner cpuid(0x00000001, &tfms, &misc, &junk, &cap0); 74999f925ceSBorislav Petkov c->x86 = x86_family(tfms); 75099f925ceSBorislav Petkov c->x86_model = x86_model(tfms); 751b399151cSJia Zhang c->x86_stepping = x86_stepping(tfms); 7520f3fa48aSIngo Molnar 753d4387bd3SHuang, Ying if (cap0 & (1<<19)) { 754d4387bd3SHuang, Ying c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; 7559d31d35bSYinghai Lu c->x86_cache_alignment = c->x86_clflush_size; 756d4387bd3SHuang, Ying } 757f7627e25SThomas Gleixner } 758f7627e25SThomas Gleixner } 7593da99c97SYinghai Lu 7608bf1ebcaSAndy Lutomirski static void apply_forced_caps(struct cpuinfo_x86 *c) 7618bf1ebcaSAndy Lutomirski { 7628bf1ebcaSAndy Lutomirski int i; 7638bf1ebcaSAndy Lutomirski 7646cbd2171SThomas Gleixner for (i = 0; i < NCAPINTS + NBUGINTS; i++) { 7658bf1ebcaSAndy Lutomirski c->x86_capability[i] &= ~cpu_caps_cleared[i]; 7668bf1ebcaSAndy Lutomirski c->x86_capability[i] |= cpu_caps_set[i]; 7678bf1ebcaSAndy Lutomirski } 7688bf1ebcaSAndy Lutomirski } 7698bf1ebcaSAndy Lutomirski 7707fcae111SDavid Woodhouse static void init_speculation_control(struct cpuinfo_x86 *c) 7717fcae111SDavid Woodhouse { 7727fcae111SDavid Woodhouse /* 7737fcae111SDavid Woodhouse * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support, 7747fcae111SDavid Woodhouse * and they also have a different bit for STIBP support. Also, 7757fcae111SDavid Woodhouse * a hypervisor might have set the individual AMD bits even on 7767fcae111SDavid Woodhouse * Intel CPUs, for finer-grained selection of what's available. 7777fcae111SDavid Woodhouse */ 7787fcae111SDavid Woodhouse if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) { 7797fcae111SDavid Woodhouse set_cpu_cap(c, X86_FEATURE_IBRS); 7807fcae111SDavid Woodhouse set_cpu_cap(c, X86_FEATURE_IBPB); 7817eb8956aSThomas Gleixner set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); 7827fcae111SDavid Woodhouse } 783e7c587daSBorislav Petkov 7847fcae111SDavid Woodhouse if (cpu_has(c, X86_FEATURE_INTEL_STIBP)) 7857fcae111SDavid Woodhouse set_cpu_cap(c, X86_FEATURE_STIBP); 786e7c587daSBorislav Petkov 787bc226f07STom Lendacky if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) || 788bc226f07STom Lendacky cpu_has(c, X86_FEATURE_VIRT_SSBD)) 78952817587SThomas Gleixner set_cpu_cap(c, X86_FEATURE_SSBD); 79052817587SThomas Gleixner 7917eb8956aSThomas Gleixner if (cpu_has(c, X86_FEATURE_AMD_IBRS)) { 792e7c587daSBorislav Petkov set_cpu_cap(c, X86_FEATURE_IBRS); 7937eb8956aSThomas Gleixner set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); 7947eb8956aSThomas Gleixner } 795e7c587daSBorislav Petkov 796e7c587daSBorislav Petkov if (cpu_has(c, X86_FEATURE_AMD_IBPB)) 797e7c587daSBorislav Petkov set_cpu_cap(c, X86_FEATURE_IBPB); 798e7c587daSBorislav Petkov 7997eb8956aSThomas Gleixner if (cpu_has(c, X86_FEATURE_AMD_STIBP)) { 800e7c587daSBorislav Petkov set_cpu_cap(c, X86_FEATURE_STIBP); 8017eb8956aSThomas Gleixner set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); 8027eb8956aSThomas Gleixner } 8036ac2f49eSKonrad Rzeszutek Wilk 8046ac2f49eSKonrad Rzeszutek Wilk if (cpu_has(c, X86_FEATURE_AMD_SSBD)) { 8056ac2f49eSKonrad Rzeszutek Wilk set_cpu_cap(c, X86_FEATURE_SSBD); 8066ac2f49eSKonrad Rzeszutek Wilk set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); 8076ac2f49eSKonrad Rzeszutek Wilk clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD); 8086ac2f49eSKonrad Rzeszutek Wilk } 8097fcae111SDavid Woodhouse } 8107fcae111SDavid Woodhouse 811148f9bb8SPaul Gortmaker void get_cpu_cap(struct cpuinfo_x86 *c) 812093af8d7SYinghai Lu { 81339c06df4SBorislav Petkov u32 eax, ebx, ecx, edx; 814093af8d7SYinghai Lu 815093af8d7SYinghai Lu /* Intel-defined flags: level 0x00000001 */ 816093af8d7SYinghai Lu if (c->cpuid_level >= 0x00000001) { 81739c06df4SBorislav Petkov cpuid(0x00000001, &eax, &ebx, &ecx, &edx); 8180f3fa48aSIngo Molnar 81939c06df4SBorislav Petkov c->x86_capability[CPUID_1_ECX] = ecx; 82039c06df4SBorislav Petkov c->x86_capability[CPUID_1_EDX] = edx; 821093af8d7SYinghai Lu } 822093af8d7SYinghai Lu 8233df8d920SAndy Lutomirski /* Thermal and Power Management Leaf: level 0x00000006 (eax) */ 8243df8d920SAndy Lutomirski if (c->cpuid_level >= 0x00000006) 8253df8d920SAndy Lutomirski c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006); 8263df8d920SAndy Lutomirski 827bdc802dcSH. Peter Anvin /* Additional Intel-defined flags: level 0x00000007 */ 828bdc802dcSH. Peter Anvin if (c->cpuid_level >= 0x00000007) { 829bdc802dcSH. Peter Anvin cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); 83039c06df4SBorislav Petkov c->x86_capability[CPUID_7_0_EBX] = ebx; 831dfb4a70fSDave Hansen c->x86_capability[CPUID_7_ECX] = ecx; 83295ca0ee8SDavid Woodhouse c->x86_capability[CPUID_7_EDX] = edx; 833bdc802dcSH. Peter Anvin } 834bdc802dcSH. Peter Anvin 8356229ad27SFenghua Yu /* Extended state features: level 0x0000000d */ 8366229ad27SFenghua Yu if (c->cpuid_level >= 0x0000000d) { 8376229ad27SFenghua Yu cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx); 8386229ad27SFenghua Yu 83939c06df4SBorislav Petkov c->x86_capability[CPUID_D_1_EAX] = eax; 8406229ad27SFenghua Yu } 8416229ad27SFenghua Yu 842cbc82b17SPeter P Waskiewicz Jr /* Additional Intel-defined flags: level 0x0000000F */ 843cbc82b17SPeter P Waskiewicz Jr if (c->cpuid_level >= 0x0000000F) { 844cbc82b17SPeter P Waskiewicz Jr 845cbc82b17SPeter P Waskiewicz Jr /* QoS sub-leaf, EAX=0Fh, ECX=0 */ 846cbc82b17SPeter P Waskiewicz Jr cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx); 84739c06df4SBorislav Petkov c->x86_capability[CPUID_F_0_EDX] = edx; 84839c06df4SBorislav Petkov 849cbc82b17SPeter P Waskiewicz Jr if (cpu_has(c, X86_FEATURE_CQM_LLC)) { 850cbc82b17SPeter P Waskiewicz Jr /* will be overridden if occupancy monitoring exists */ 851cbc82b17SPeter P Waskiewicz Jr c->x86_cache_max_rmid = ebx; 852cbc82b17SPeter P Waskiewicz Jr 853cbc82b17SPeter P Waskiewicz Jr /* QoS sub-leaf, EAX=0Fh, ECX=1 */ 854cbc82b17SPeter P Waskiewicz Jr cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx); 85539c06df4SBorislav Petkov c->x86_capability[CPUID_F_1_EDX] = edx; 85639c06df4SBorislav Petkov 85733c3cc7aSVikas Shivappa if ((cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) || 85833c3cc7aSVikas Shivappa ((cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL)) || 85933c3cc7aSVikas Shivappa (cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)))) { 860cbc82b17SPeter P Waskiewicz Jr c->x86_cache_max_rmid = ecx; 861cbc82b17SPeter P Waskiewicz Jr c->x86_cache_occ_scale = ebx; 862cbc82b17SPeter P Waskiewicz Jr } 863cbc82b17SPeter P Waskiewicz Jr } else { 864cbc82b17SPeter P Waskiewicz Jr c->x86_cache_max_rmid = -1; 865cbc82b17SPeter P Waskiewicz Jr c->x86_cache_occ_scale = -1; 866cbc82b17SPeter P Waskiewicz Jr } 867cbc82b17SPeter P Waskiewicz Jr } 868cbc82b17SPeter P Waskiewicz Jr 869093af8d7SYinghai Lu /* AMD-defined flags: level 0x80000001 */ 87039c06df4SBorislav Petkov eax = cpuid_eax(0x80000000); 87139c06df4SBorislav Petkov c->extended_cpuid_level = eax; 8720f3fa48aSIngo Molnar 87339c06df4SBorislav Petkov if ((eax & 0xffff0000) == 0x80000000) { 87439c06df4SBorislav Petkov if (eax >= 0x80000001) { 87539c06df4SBorislav Petkov cpuid(0x80000001, &eax, &ebx, &ecx, &edx); 87639c06df4SBorislav Petkov 87739c06df4SBorislav Petkov c->x86_capability[CPUID_8000_0001_ECX] = ecx; 87839c06df4SBorislav Petkov c->x86_capability[CPUID_8000_0001_EDX] = edx; 879093af8d7SYinghai Lu } 880093af8d7SYinghai Lu } 881093af8d7SYinghai Lu 88271faad43SYazen Ghannam if (c->extended_cpuid_level >= 0x80000007) { 88371faad43SYazen Ghannam cpuid(0x80000007, &eax, &ebx, &ecx, &edx); 88471faad43SYazen Ghannam 88571faad43SYazen Ghannam c->x86_capability[CPUID_8000_0007_EBX] = ebx; 88671faad43SYazen Ghannam c->x86_power = edx; 88771faad43SYazen Ghannam } 88871faad43SYazen Ghannam 889c65732e4SThomas Gleixner if (c->extended_cpuid_level >= 0x80000008) { 890c65732e4SThomas Gleixner cpuid(0x80000008, &eax, &ebx, &ecx, &edx); 891c65732e4SThomas Gleixner c->x86_capability[CPUID_8000_0008_EBX] = ebx; 892c65732e4SThomas Gleixner } 893c65732e4SThomas Gleixner 8942ccd71f1SBorislav Petkov if (c->extended_cpuid_level >= 0x8000000a) 89539c06df4SBorislav Petkov c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a); 8962ccd71f1SBorislav Petkov 8971dedefd1SJacob Pan init_scattered_cpuid_features(c); 8987fcae111SDavid Woodhouse init_speculation_control(c); 89960d34501SAndy Lutomirski 90060d34501SAndy Lutomirski /* 90160d34501SAndy Lutomirski * Clear/Set all flags overridden by options, after probe. 90260d34501SAndy Lutomirski * This needs to happen each time we re-probe, which may happen 90360d34501SAndy Lutomirski * several times during CPU initialization. 90460d34501SAndy Lutomirski */ 90560d34501SAndy Lutomirski apply_forced_caps(c); 906093af8d7SYinghai Lu } 907093af8d7SYinghai Lu 908405c018aSM. Vefa Bicakci void get_cpu_address_sizes(struct cpuinfo_x86 *c) 909d94a155cSKirill A. Shutemov { 910d94a155cSKirill A. Shutemov u32 eax, ebx, ecx, edx; 911d94a155cSKirill A. Shutemov 912d94a155cSKirill A. Shutemov if (c->extended_cpuid_level >= 0x80000008) { 913d94a155cSKirill A. Shutemov cpuid(0x80000008, &eax, &ebx, &ecx, &edx); 914d94a155cSKirill A. Shutemov 915d94a155cSKirill A. Shutemov c->x86_virt_bits = (eax >> 8) & 0xff; 916d94a155cSKirill A. Shutemov c->x86_phys_bits = eax & 0xff; 917d94a155cSKirill A. Shutemov } 918d94a155cSKirill A. Shutemov #ifdef CONFIG_X86_32 919d94a155cSKirill A. Shutemov else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) 920d94a155cSKirill A. Shutemov c->x86_phys_bits = 36; 921d94a155cSKirill A. Shutemov #endif 922cc51e542SAndi Kleen c->x86_cache_bits = c->x86_phys_bits; 923d94a155cSKirill A. Shutemov } 924d94a155cSKirill A. Shutemov 925148f9bb8SPaul Gortmaker static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) 926aef93c8bSYinghai Lu { 927aef93c8bSYinghai Lu #ifdef CONFIG_X86_32 928aef93c8bSYinghai Lu int i; 929aef93c8bSYinghai Lu 930aef93c8bSYinghai Lu /* 931aef93c8bSYinghai Lu * First of all, decide if this is a 486 or higher 932aef93c8bSYinghai Lu * It's a 486 if we can modify the AC flag 933aef93c8bSYinghai Lu */ 934aef93c8bSYinghai Lu if (flag_is_changeable_p(X86_EFLAGS_AC)) 935aef93c8bSYinghai Lu c->x86 = 4; 936aef93c8bSYinghai Lu else 937aef93c8bSYinghai Lu c->x86 = 3; 938aef93c8bSYinghai Lu 939aef93c8bSYinghai Lu for (i = 0; i < X86_VENDOR_NUM; i++) 940aef93c8bSYinghai Lu if (cpu_devs[i] && cpu_devs[i]->c_identify) { 941aef93c8bSYinghai Lu c->x86_vendor_id[0] = 0; 942aef93c8bSYinghai Lu cpu_devs[i]->c_identify(c); 943aef93c8bSYinghai Lu if (c->x86_vendor_id[0]) { 944aef93c8bSYinghai Lu get_cpu_vendor(c); 945aef93c8bSYinghai Lu break; 946aef93c8bSYinghai Lu } 947aef93c8bSYinghai Lu } 948aef93c8bSYinghai Lu #endif 949093af8d7SYinghai Lu } 950f7627e25SThomas Gleixner 95136ad3513SThomas Gleixner #define NO_SPECULATION BIT(0) 95236ad3513SThomas Gleixner #define NO_MELTDOWN BIT(1) 95336ad3513SThomas Gleixner #define NO_SSB BIT(2) 95436ad3513SThomas Gleixner #define NO_L1TF BIT(3) 955ed5194c2SAndi Kleen #define NO_MDS BIT(4) 956*e261f209SThomas Gleixner #define MSBDS_ONLY BIT(5) 95736ad3513SThomas Gleixner 95836ad3513SThomas Gleixner #define VULNWL(_vendor, _family, _model, _whitelist) \ 95936ad3513SThomas Gleixner { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist } 96036ad3513SThomas Gleixner 96136ad3513SThomas Gleixner #define VULNWL_INTEL(model, whitelist) \ 96236ad3513SThomas Gleixner VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist) 96336ad3513SThomas Gleixner 96436ad3513SThomas Gleixner #define VULNWL_AMD(family, whitelist) \ 96536ad3513SThomas Gleixner VULNWL(AMD, family, X86_MODEL_ANY, whitelist) 96636ad3513SThomas Gleixner 96736ad3513SThomas Gleixner #define VULNWL_HYGON(family, whitelist) \ 96836ad3513SThomas Gleixner VULNWL(HYGON, family, X86_MODEL_ANY, whitelist) 96936ad3513SThomas Gleixner 97036ad3513SThomas Gleixner static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { 97136ad3513SThomas Gleixner VULNWL(ANY, 4, X86_MODEL_ANY, NO_SPECULATION), 97236ad3513SThomas Gleixner VULNWL(CENTAUR, 5, X86_MODEL_ANY, NO_SPECULATION), 97336ad3513SThomas Gleixner VULNWL(INTEL, 5, X86_MODEL_ANY, NO_SPECULATION), 97436ad3513SThomas Gleixner VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION), 97536ad3513SThomas Gleixner 976ed5194c2SAndi Kleen /* Intel Family 6 */ 97736ad3513SThomas Gleixner VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION), 97836ad3513SThomas Gleixner VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION), 97936ad3513SThomas Gleixner VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION), 98036ad3513SThomas Gleixner VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION), 98136ad3513SThomas Gleixner VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION), 98236ad3513SThomas Gleixner 983*e261f209SThomas Gleixner VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY), 984*e261f209SThomas Gleixner VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF | MSBDS_ONLY), 985*e261f209SThomas Gleixner VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY), 986*e261f209SThomas Gleixner VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY), 987*e261f209SThomas Gleixner VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY), 988*e261f209SThomas Gleixner VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY), 98936ad3513SThomas Gleixner 99036ad3513SThomas Gleixner VULNWL_INTEL(CORE_YONAH, NO_SSB), 99136ad3513SThomas Gleixner 992*e261f209SThomas Gleixner VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY), 99336ad3513SThomas Gleixner 994ed5194c2SAndi Kleen VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF), 995ed5194c2SAndi Kleen VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF), 996ed5194c2SAndi Kleen VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF), 997ed5194c2SAndi Kleen 998ed5194c2SAndi Kleen /* AMD Family 0xf - 0x12 */ 999ed5194c2SAndi Kleen VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS), 1000ed5194c2SAndi Kleen VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS), 1001ed5194c2SAndi Kleen VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS), 1002ed5194c2SAndi Kleen VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS), 100336ad3513SThomas Gleixner 100436ad3513SThomas Gleixner /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */ 1005ed5194c2SAndi Kleen VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS), 1006ed5194c2SAndi Kleen VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS), 1007fec9434aSDavid Woodhouse {} 1008fec9434aSDavid Woodhouse }; 1009fec9434aSDavid Woodhouse 101036ad3513SThomas Gleixner static bool __init cpu_matches(unsigned long which) 101136ad3513SThomas Gleixner { 101236ad3513SThomas Gleixner const struct x86_cpu_id *m = x86_match_cpu(cpu_vuln_whitelist); 1013fec9434aSDavid Woodhouse 101436ad3513SThomas Gleixner return m && !!(m->driver_data & which); 101536ad3513SThomas Gleixner } 101617dbca11SAndi Kleen 10174a28bfe3SKonrad Rzeszutek Wilk static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) 1018fec9434aSDavid Woodhouse { 1019fec9434aSDavid Woodhouse u64 ia32_cap = 0; 1020fec9434aSDavid Woodhouse 102136ad3513SThomas Gleixner if (cpu_matches(NO_SPECULATION)) 10228ecc4979SDominik Brodowski return; 10238ecc4979SDominik Brodowski 10248ecc4979SDominik Brodowski setup_force_cpu_bug(X86_BUG_SPECTRE_V1); 10258ecc4979SDominik Brodowski setup_force_cpu_bug(X86_BUG_SPECTRE_V2); 10268ecc4979SDominik Brodowski 102777243971SKonrad Rzeszutek Wilk if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES)) 102877243971SKonrad Rzeszutek Wilk rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); 102977243971SKonrad Rzeszutek Wilk 103036ad3513SThomas Gleixner if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) && 103124809860SKonrad Rzeszutek Wilk !cpu_has(c, X86_FEATURE_AMD_SSB_NO)) 1032c456442cSKonrad Rzeszutek Wilk setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); 1033c456442cSKonrad Rzeszutek Wilk 1034706d5168SSai Praneeth if (ia32_cap & ARCH_CAP_IBRS_ALL) 1035706d5168SSai Praneeth setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED); 1036706d5168SSai Praneeth 1037*e261f209SThomas Gleixner if (!cpu_matches(NO_MDS) && !(ia32_cap & ARCH_CAP_MDS_NO)) { 1038ed5194c2SAndi Kleen setup_force_cpu_bug(X86_BUG_MDS); 1039*e261f209SThomas Gleixner if (cpu_matches(MSBDS_ONLY)) 1040*e261f209SThomas Gleixner setup_force_cpu_bug(X86_BUG_MSBDS_ONLY); 1041*e261f209SThomas Gleixner } 1042ed5194c2SAndi Kleen 104336ad3513SThomas Gleixner if (cpu_matches(NO_MELTDOWN)) 10444a28bfe3SKonrad Rzeszutek Wilk return; 1045fec9434aSDavid Woodhouse 1046fec9434aSDavid Woodhouse /* Rogue Data Cache Load? No! */ 1047fec9434aSDavid Woodhouse if (ia32_cap & ARCH_CAP_RDCL_NO) 10484a28bfe3SKonrad Rzeszutek Wilk return; 1049fec9434aSDavid Woodhouse 10504a28bfe3SKonrad Rzeszutek Wilk setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); 105117dbca11SAndi Kleen 105236ad3513SThomas Gleixner if (cpu_matches(NO_L1TF)) 105317dbca11SAndi Kleen return; 105417dbca11SAndi Kleen 105517dbca11SAndi Kleen setup_force_cpu_bug(X86_BUG_L1TF); 1056fec9434aSDavid Woodhouse } 1057fec9434aSDavid Woodhouse 105834048c9eSPaolo Ciarrocchi /* 10598990cac6SPavel Tatashin * The NOPL instruction is supposed to exist on all CPUs of family >= 6; 10608990cac6SPavel Tatashin * unfortunately, that's not true in practice because of early VIA 10618990cac6SPavel Tatashin * chips and (more importantly) broken virtualizers that are not easy 10628990cac6SPavel Tatashin * to detect. In the latter case it doesn't even *fail* reliably, so 10638990cac6SPavel Tatashin * probing for it doesn't even work. Disable it completely on 32-bit 10648990cac6SPavel Tatashin * unless we can find a reliable way to detect all the broken cases. 10658990cac6SPavel Tatashin * Enable it explicitly on 64-bit for non-constant inputs of cpu_has(). 10668990cac6SPavel Tatashin */ 10679b3661cdSBorislav Petkov static void detect_nopl(void) 10688990cac6SPavel Tatashin { 10698990cac6SPavel Tatashin #ifdef CONFIG_X86_32 10709b3661cdSBorislav Petkov setup_clear_cpu_cap(X86_FEATURE_NOPL); 10718990cac6SPavel Tatashin #else 10729b3661cdSBorislav Petkov setup_force_cpu_cap(X86_FEATURE_NOPL); 10738990cac6SPavel Tatashin #endif 10748990cac6SPavel Tatashin } 10758990cac6SPavel Tatashin 10768990cac6SPavel Tatashin /* 107734048c9eSPaolo Ciarrocchi * Do minimum CPU detection early. 107834048c9eSPaolo Ciarrocchi * Fields really needed: vendor, cpuid_level, family, model, mask, 107934048c9eSPaolo Ciarrocchi * cache alignment. 108034048c9eSPaolo Ciarrocchi * The others are not touched to avoid unwanted side effects. 108134048c9eSPaolo Ciarrocchi * 1082a1652bb8SJean Delvare * WARNING: this function is only called on the boot CPU. Don't add code 1083a1652bb8SJean Delvare * here that is supposed to run on all CPUs. 108434048c9eSPaolo Ciarrocchi */ 10853da99c97SYinghai Lu static void __init early_identify_cpu(struct cpuinfo_x86 *c) 1086f7627e25SThomas Gleixner { 10876627d242SYinghai Lu #ifdef CONFIG_X86_64 10886627d242SYinghai Lu c->x86_clflush_size = 64; 108913c6c532SJan Beulich c->x86_phys_bits = 36; 109013c6c532SJan Beulich c->x86_virt_bits = 48; 10916627d242SYinghai Lu #else 1092d4387bd3SHuang, Ying c->x86_clflush_size = 32; 109313c6c532SJan Beulich c->x86_phys_bits = 32; 109413c6c532SJan Beulich c->x86_virt_bits = 32; 10956627d242SYinghai Lu #endif 10960a488a53SYinghai Lu c->x86_cache_alignment = c->x86_clflush_size; 1097f7627e25SThomas Gleixner 10980e96f31eSJordan Borgner memset(&c->x86_capability, 0, sizeof(c->x86_capability)); 10990a488a53SYinghai Lu c->extended_cpuid_level = 0; 11000a488a53SYinghai Lu 11012893cc8fSMatthew Whitehead if (!have_cpuid_p()) 11022893cc8fSMatthew Whitehead identify_cpu_without_cpuid(c); 11032893cc8fSMatthew Whitehead 1104aef93c8bSYinghai Lu /* cyrix could have cpuid enabled via c_identify()*/ 110505fb3c19SAndy Lutomirski if (have_cpuid_p()) { 1106f7627e25SThomas Gleixner cpu_detect(c); 11073da99c97SYinghai Lu get_cpu_vendor(c); 11083da99c97SYinghai Lu get_cpu_cap(c); 1109d94a155cSKirill A. Shutemov get_cpu_address_sizes(c); 111078d1b296SBorislav Petkov setup_force_cpu_cap(X86_FEATURE_CPUID); 111112cf105cSKrzysztof Helt 111210a434fcSYinghai Lu if (this_cpu->c_early_init) 111310a434fcSYinghai Lu this_cpu->c_early_init(c); 11143da99c97SYinghai Lu 1115f6e9456cSRobert Richter c->cpu_index = 0; 1116b38b0665SH. Peter Anvin filter_cpuid_features(c, false); 1117de5397adSFenghua Yu 1118a110b5ecSBorislav Petkov if (this_cpu->c_bsp_init) 1119a110b5ecSBorislav Petkov this_cpu->c_bsp_init(c); 112078d1b296SBorislav Petkov } else { 112178d1b296SBorislav Petkov setup_clear_cpu_cap(X86_FEATURE_CPUID); 112205fb3c19SAndy Lutomirski } 1123c3b83598SBorislav Petkov 1124c3b83598SBorislav Petkov setup_force_cpu_cap(X86_FEATURE_ALWAYS); 1125a89f040fSThomas Gleixner 11264a28bfe3SKonrad Rzeszutek Wilk cpu_set_bug_bits(c); 112799c6fa25SDavid Woodhouse 1128db52ef74SIngo Molnar fpu__init_system(c); 1129b8b7abaeSAndy Lutomirski 1130b8b7abaeSAndy Lutomirski #ifdef CONFIG_X86_32 1131b8b7abaeSAndy Lutomirski /* 1132b8b7abaeSAndy Lutomirski * Regardless of whether PCID is enumerated, the SDM says 1133b8b7abaeSAndy Lutomirski * that it can't be enabled in 32-bit mode. 1134b8b7abaeSAndy Lutomirski */ 1135b8b7abaeSAndy Lutomirski setup_clear_cpu_cap(X86_FEATURE_PCID); 1136b8b7abaeSAndy Lutomirski #endif 1137372fddf7SKirill A. Shutemov 1138372fddf7SKirill A. Shutemov /* 1139372fddf7SKirill A. Shutemov * Later in the boot process pgtable_l5_enabled() relies on 1140372fddf7SKirill A. Shutemov * cpu_feature_enabled(X86_FEATURE_LA57). If 5-level paging is not 1141372fddf7SKirill A. Shutemov * enabled by this point we need to clear the feature bit to avoid 1142372fddf7SKirill A. Shutemov * false-positives at the later stage. 1143372fddf7SKirill A. Shutemov * 1144372fddf7SKirill A. Shutemov * pgtable_l5_enabled() can be false here for several reasons: 1145372fddf7SKirill A. Shutemov * - 5-level paging is disabled compile-time; 1146372fddf7SKirill A. Shutemov * - it's 32-bit kernel; 1147372fddf7SKirill A. Shutemov * - machine doesn't support 5-level paging; 1148372fddf7SKirill A. Shutemov * - user specified 'no5lvl' in kernel command line. 1149372fddf7SKirill A. Shutemov */ 1150372fddf7SKirill A. Shutemov if (!pgtable_l5_enabled()) 1151372fddf7SKirill A. Shutemov setup_clear_cpu_cap(X86_FEATURE_LA57); 11528990cac6SPavel Tatashin 11539b3661cdSBorislav Petkov detect_nopl(); 1154f7627e25SThomas Gleixner } 1155f7627e25SThomas Gleixner 11569d31d35bSYinghai Lu void __init early_cpu_init(void) 11579d31d35bSYinghai Lu { 115802dde8b4SJan Beulich const struct cpu_dev *const *cdev; 115910a434fcSYinghai Lu int count = 0; 11609d31d35bSYinghai Lu 1161ac23f253SJan Beulich #ifdef CONFIG_PROCESSOR_SELECT 11621b74dde7SChen Yucong pr_info("KERNEL supported cpus:\n"); 116331c997caSIngo Molnar #endif 116431c997caSIngo Molnar 116510a434fcSYinghai Lu for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { 116602dde8b4SJan Beulich const struct cpu_dev *cpudev = *cdev; 11679d31d35bSYinghai Lu 116810a434fcSYinghai Lu if (count >= X86_VENDOR_NUM) 116910a434fcSYinghai Lu break; 117010a434fcSYinghai Lu cpu_devs[count] = cpudev; 117110a434fcSYinghai Lu count++; 117210a434fcSYinghai Lu 1173ac23f253SJan Beulich #ifdef CONFIG_PROCESSOR_SELECT 117431c997caSIngo Molnar { 117531c997caSIngo Molnar unsigned int j; 117631c997caSIngo Molnar 117710a434fcSYinghai Lu for (j = 0; j < 2; j++) { 117810a434fcSYinghai Lu if (!cpudev->c_ident[j]) 117910a434fcSYinghai Lu continue; 11801b74dde7SChen Yucong pr_info(" %s %s\n", cpudev->c_vendor, 118110a434fcSYinghai Lu cpudev->c_ident[j]); 118210a434fcSYinghai Lu } 118310a434fcSYinghai Lu } 11840388423dSDave Jones #endif 118531c997caSIngo Molnar } 11869d31d35bSYinghai Lu early_identify_cpu(&boot_cpu_data); 1187f7627e25SThomas Gleixner } 1188f7627e25SThomas Gleixner 11897a5d6704SAndy Lutomirski static void detect_null_seg_behavior(struct cpuinfo_x86 *c) 11907a5d6704SAndy Lutomirski { 11917a5d6704SAndy Lutomirski #ifdef CONFIG_X86_64 119258a5aac5SAndy Lutomirski /* 11937a5d6704SAndy Lutomirski * Empirically, writing zero to a segment selector on AMD does 11947a5d6704SAndy Lutomirski * not clear the base, whereas writing zero to a segment 11957a5d6704SAndy Lutomirski * selector on Intel does clear the base. Intel's behavior 11967a5d6704SAndy Lutomirski * allows slightly faster context switches in the common case 11977a5d6704SAndy Lutomirski * where GS is unused by the prev and next threads. 119858a5aac5SAndy Lutomirski * 11997a5d6704SAndy Lutomirski * Since neither vendor documents this anywhere that I can see, 12007a5d6704SAndy Lutomirski * detect it directly instead of hardcoding the choice by 12017a5d6704SAndy Lutomirski * vendor. 12027a5d6704SAndy Lutomirski * 12037a5d6704SAndy Lutomirski * I've designated AMD's behavior as the "bug" because it's 12047a5d6704SAndy Lutomirski * counterintuitive and less friendly. 120558a5aac5SAndy Lutomirski */ 12067a5d6704SAndy Lutomirski 12077a5d6704SAndy Lutomirski unsigned long old_base, tmp; 12087a5d6704SAndy Lutomirski rdmsrl(MSR_FS_BASE, old_base); 12097a5d6704SAndy Lutomirski wrmsrl(MSR_FS_BASE, 1); 12107a5d6704SAndy Lutomirski loadsegment(fs, 0); 12117a5d6704SAndy Lutomirski rdmsrl(MSR_FS_BASE, tmp); 12127a5d6704SAndy Lutomirski if (tmp != 0) 12137a5d6704SAndy Lutomirski set_cpu_bug(c, X86_BUG_NULL_SEG); 12147a5d6704SAndy Lutomirski wrmsrl(MSR_FS_BASE, old_base); 121558a5aac5SAndy Lutomirski #endif 1216f7627e25SThomas Gleixner } 1217f7627e25SThomas Gleixner 1218148f9bb8SPaul Gortmaker static void generic_identify(struct cpuinfo_x86 *c) 1219f7627e25SThomas Gleixner { 12203da99c97SYinghai Lu c->extended_cpuid_level = 0; 1221f7627e25SThomas Gleixner 1222aef93c8bSYinghai Lu if (!have_cpuid_p()) 1223aef93c8bSYinghai Lu identify_cpu_without_cpuid(c); 1224f7627e25SThomas Gleixner 1225aef93c8bSYinghai Lu /* cyrix could have cpuid enabled via c_identify()*/ 1226a9853dd6SIngo Molnar if (!have_cpuid_p()) 1227aef93c8bSYinghai Lu return; 1228aef93c8bSYinghai Lu 12293da99c97SYinghai Lu cpu_detect(c); 12303da99c97SYinghai Lu 12313da99c97SYinghai Lu get_cpu_vendor(c); 12323da99c97SYinghai Lu 12333da99c97SYinghai Lu get_cpu_cap(c); 12343da99c97SYinghai Lu 1235d94a155cSKirill A. Shutemov get_cpu_address_sizes(c); 1236d94a155cSKirill A. Shutemov 1237f7627e25SThomas Gleixner if (c->cpuid_level >= 0x00000001) { 12383da99c97SYinghai Lu c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; 1239b89d3b3eSYinghai Lu #ifdef CONFIG_X86_32 1240c8e56d20SBorislav Petkov # ifdef CONFIG_SMP 1241cb8cc442SIngo Molnar c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); 1242f7627e25SThomas Gleixner # else 124301aaea1aSYinghai Lu c->apicid = c->initial_apicid; 1244f7627e25SThomas Gleixner # endif 1245b89d3b3eSYinghai Lu #endif 1246b89d3b3eSYinghai Lu c->phys_proc_id = c->initial_apicid; 1247f7627e25SThomas Gleixner } 1248f7627e25SThomas Gleixner 1249f7627e25SThomas Gleixner get_model_name(c); /* Default name */ 1250f7627e25SThomas Gleixner 12517a5d6704SAndy Lutomirski detect_null_seg_behavior(c); 12520230bb03SAndy Lutomirski 12530230bb03SAndy Lutomirski /* 12540230bb03SAndy Lutomirski * ESPFIX is a strange bug. All real CPUs have it. Paravirt 12550230bb03SAndy Lutomirski * systems that run Linux at CPL > 0 may or may not have the 12560230bb03SAndy Lutomirski * issue, but, even if they have the issue, there's absolutely 12570230bb03SAndy Lutomirski * nothing we can do about it because we can't use the real IRET 12580230bb03SAndy Lutomirski * instruction. 12590230bb03SAndy Lutomirski * 12600230bb03SAndy Lutomirski * NB: For the time being, only 32-bit kernels support 12610230bb03SAndy Lutomirski * X86_BUG_ESPFIX as such. 64-bit kernels directly choose 12620230bb03SAndy Lutomirski * whether to apply espfix using paravirt hooks. If any 12630230bb03SAndy Lutomirski * non-paravirt system ever shows up that does *not* have the 12640230bb03SAndy Lutomirski * ESPFIX issue, we can change this. 12650230bb03SAndy Lutomirski */ 12660230bb03SAndy Lutomirski #ifdef CONFIG_X86_32 12679bad5658SJuergen Gross # ifdef CONFIG_PARAVIRT_XXL 12680230bb03SAndy Lutomirski do { 12690230bb03SAndy Lutomirski extern void native_iret(void); 12705c83511bSJuergen Gross if (pv_ops.cpu.iret == native_iret) 12710230bb03SAndy Lutomirski set_cpu_bug(c, X86_BUG_ESPFIX); 12720230bb03SAndy Lutomirski } while (0); 12730230bb03SAndy Lutomirski # else 12740230bb03SAndy Lutomirski set_cpu_bug(c, X86_BUG_ESPFIX); 12750230bb03SAndy Lutomirski # endif 12760230bb03SAndy Lutomirski #endif 1277f7627e25SThomas Gleixner } 1278f7627e25SThomas Gleixner 1279cbc82b17SPeter P Waskiewicz Jr static void x86_init_cache_qos(struct cpuinfo_x86 *c) 1280cbc82b17SPeter P Waskiewicz Jr { 1281cbc82b17SPeter P Waskiewicz Jr /* 1282cbc82b17SPeter P Waskiewicz Jr * The heavy lifting of max_rmid and cache_occ_scale are handled 1283cbc82b17SPeter P Waskiewicz Jr * in get_cpu_cap(). Here we just set the max_rmid for the boot_cpu 1284cbc82b17SPeter P Waskiewicz Jr * in case CQM bits really aren't there in this CPU. 1285cbc82b17SPeter P Waskiewicz Jr */ 1286cbc82b17SPeter P Waskiewicz Jr if (c != &boot_cpu_data) { 1287cbc82b17SPeter P Waskiewicz Jr boot_cpu_data.x86_cache_max_rmid = 1288cbc82b17SPeter P Waskiewicz Jr min(boot_cpu_data.x86_cache_max_rmid, 1289cbc82b17SPeter P Waskiewicz Jr c->x86_cache_max_rmid); 1290cbc82b17SPeter P Waskiewicz Jr } 1291cbc82b17SPeter P Waskiewicz Jr } 1292cbc82b17SPeter P Waskiewicz Jr 1293f7627e25SThomas Gleixner /* 12949d85eb91SThomas Gleixner * Validate that ACPI/mptables have the same information about the 12959d85eb91SThomas Gleixner * effective APIC id and update the package map. 1296d49597fdSThomas Gleixner */ 12979d85eb91SThomas Gleixner static void validate_apic_and_package_id(struct cpuinfo_x86 *c) 1298d49597fdSThomas Gleixner { 1299d49597fdSThomas Gleixner #ifdef CONFIG_SMP 13009d85eb91SThomas Gleixner unsigned int apicid, cpu = smp_processor_id(); 1301d49597fdSThomas Gleixner 1302d49597fdSThomas Gleixner apicid = apic->cpu_present_to_apicid(cpu); 1303d49597fdSThomas Gleixner 13049d85eb91SThomas Gleixner if (apicid != c->apicid) { 13059d85eb91SThomas Gleixner pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x APIC: %x\n", 1306d49597fdSThomas Gleixner cpu, apicid, c->initial_apicid); 1307d49597fdSThomas Gleixner } 13089d85eb91SThomas Gleixner BUG_ON(topology_update_package_map(c->phys_proc_id, cpu)); 1309d49597fdSThomas Gleixner #else 1310d49597fdSThomas Gleixner c->logical_proc_id = 0; 1311d49597fdSThomas Gleixner #endif 1312d49597fdSThomas Gleixner } 1313d49597fdSThomas Gleixner 1314d49597fdSThomas Gleixner /* 1315f7627e25SThomas Gleixner * This does the hard work of actually picking apart the CPU stuff... 1316f7627e25SThomas Gleixner */ 1317148f9bb8SPaul Gortmaker static void identify_cpu(struct cpuinfo_x86 *c) 1318f7627e25SThomas Gleixner { 1319f7627e25SThomas Gleixner int i; 1320f7627e25SThomas Gleixner 1321f7627e25SThomas Gleixner c->loops_per_jiffy = loops_per_jiffy; 132224dbc600SGustavo A. R. Silva c->x86_cache_size = 0; 1323f7627e25SThomas Gleixner c->x86_vendor = X86_VENDOR_UNKNOWN; 1324b399151cSJia Zhang c->x86_model = c->x86_stepping = 0; /* So far unknown... */ 1325f7627e25SThomas Gleixner c->x86_vendor_id[0] = '\0'; /* Unset */ 1326f7627e25SThomas Gleixner c->x86_model_id[0] = '\0'; /* Unset */ 1327f7627e25SThomas Gleixner c->x86_max_cores = 1; 1328102bbe3aSYinghai Lu c->x86_coreid_bits = 0; 132979a8b9aaSBorislav Petkov c->cu_id = 0xff; 133011fdd252SYinghai Lu #ifdef CONFIG_X86_64 1331102bbe3aSYinghai Lu c->x86_clflush_size = 64; 133213c6c532SJan Beulich c->x86_phys_bits = 36; 133313c6c532SJan Beulich c->x86_virt_bits = 48; 1334102bbe3aSYinghai Lu #else 1335102bbe3aSYinghai Lu c->cpuid_level = -1; /* CPUID not detected */ 1336f7627e25SThomas Gleixner c->x86_clflush_size = 32; 133713c6c532SJan Beulich c->x86_phys_bits = 32; 133813c6c532SJan Beulich c->x86_virt_bits = 32; 1339102bbe3aSYinghai Lu #endif 1340102bbe3aSYinghai Lu c->x86_cache_alignment = c->x86_clflush_size; 13410e96f31eSJordan Borgner memset(&c->x86_capability, 0, sizeof(c->x86_capability)); 1342f7627e25SThomas Gleixner 1343f7627e25SThomas Gleixner generic_identify(c); 1344f7627e25SThomas Gleixner 13453898534dSAndi Kleen if (this_cpu->c_identify) 1346f7627e25SThomas Gleixner this_cpu->c_identify(c); 1347f7627e25SThomas Gleixner 13486a6256f9SAdam Buchbinder /* Clear/Set all flags overridden by options, after probe */ 13498bf1ebcaSAndy Lutomirski apply_forced_caps(c); 13502759c328SYinghai Lu 1351102bbe3aSYinghai Lu #ifdef CONFIG_X86_64 1352cb8cc442SIngo Molnar c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); 1353102bbe3aSYinghai Lu #endif 1354102bbe3aSYinghai Lu 1355f7627e25SThomas Gleixner /* 1356f7627e25SThomas Gleixner * Vendor-specific initialization. In this section we 1357f7627e25SThomas Gleixner * canonicalize the feature flags, meaning if there are 1358f7627e25SThomas Gleixner * features a certain CPU supports which CPUID doesn't 1359f7627e25SThomas Gleixner * tell us, CPUID claiming incorrect flags, or other bugs, 1360f7627e25SThomas Gleixner * we handle them here. 1361f7627e25SThomas Gleixner * 1362f7627e25SThomas Gleixner * At the end of this section, c->x86_capability better 1363f7627e25SThomas Gleixner * indicate the features this CPU genuinely supports! 1364f7627e25SThomas Gleixner */ 1365f7627e25SThomas Gleixner if (this_cpu->c_init) 1366f7627e25SThomas Gleixner this_cpu->c_init(c); 1367f7627e25SThomas Gleixner 1368f7627e25SThomas Gleixner /* Disable the PN if appropriate */ 1369f7627e25SThomas Gleixner squash_the_stupid_serial_number(c); 1370f7627e25SThomas Gleixner 1371aa35f896SRicardo Neri /* Set up SMEP/SMAP/UMIP */ 1372b2cc2a07SH. Peter Anvin setup_smep(c); 1373b2cc2a07SH. Peter Anvin setup_smap(c); 1374aa35f896SRicardo Neri setup_umip(c); 1375b2cc2a07SH. Peter Anvin 1376f7627e25SThomas Gleixner /* 13770f3fa48aSIngo Molnar * The vendor-specific functions might have changed features. 13780f3fa48aSIngo Molnar * Now we do "generic changes." 1379f7627e25SThomas Gleixner */ 1380f7627e25SThomas Gleixner 1381b38b0665SH. Peter Anvin /* Filter out anything that depends on CPUID levels we don't have */ 1382b38b0665SH. Peter Anvin filter_cpuid_features(c, true); 1383b38b0665SH. Peter Anvin 1384f7627e25SThomas Gleixner /* If the model name is still unset, do table lookup. */ 1385f7627e25SThomas Gleixner if (!c->x86_model_id[0]) { 138602dde8b4SJan Beulich const char *p; 1387f7627e25SThomas Gleixner p = table_lookup_model(c); 1388f7627e25SThomas Gleixner if (p) 1389f7627e25SThomas Gleixner strcpy(c->x86_model_id, p); 1390f7627e25SThomas Gleixner else 1391f7627e25SThomas Gleixner /* Last resort... */ 1392f7627e25SThomas Gleixner sprintf(c->x86_model_id, "%02x/%02x", 1393f7627e25SThomas Gleixner c->x86, c->x86_model); 1394f7627e25SThomas Gleixner } 1395f7627e25SThomas Gleixner 1396102bbe3aSYinghai Lu #ifdef CONFIG_X86_64 1397102bbe3aSYinghai Lu detect_ht(c); 1398102bbe3aSYinghai Lu #endif 1399102bbe3aSYinghai Lu 140049d859d7SH. Peter Anvin x86_init_rdrand(c); 1401cbc82b17SPeter P Waskiewicz Jr x86_init_cache_qos(c); 140206976945SDave Hansen setup_pku(c); 14033e0c3737SYinghai Lu 14043e0c3737SYinghai Lu /* 14056a6256f9SAdam Buchbinder * Clear/Set all flags overridden by options, need do it 14063e0c3737SYinghai Lu * before following smp all cpus cap AND. 14073e0c3737SYinghai Lu */ 14088bf1ebcaSAndy Lutomirski apply_forced_caps(c); 14093e0c3737SYinghai Lu 1410f7627e25SThomas Gleixner /* 1411f7627e25SThomas Gleixner * On SMP, boot_cpu_data holds the common feature set between 1412f7627e25SThomas Gleixner * all CPUs; so make sure that we indicate which features are 1413f7627e25SThomas Gleixner * common between the CPUs. The first time this routine gets 1414f7627e25SThomas Gleixner * executed, c == &boot_cpu_data. 1415f7627e25SThomas Gleixner */ 1416f7627e25SThomas Gleixner if (c != &boot_cpu_data) { 1417f7627e25SThomas Gleixner /* AND the already accumulated flags with these */ 1418f7627e25SThomas Gleixner for (i = 0; i < NCAPINTS; i++) 1419f7627e25SThomas Gleixner boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; 142065fc985bSBorislav Petkov 142165fc985bSBorislav Petkov /* OR, i.e. replicate the bug flags */ 142265fc985bSBorislav Petkov for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++) 142365fc985bSBorislav Petkov c->x86_capability[i] |= boot_cpu_data.x86_capability[i]; 1424f7627e25SThomas Gleixner } 1425f7627e25SThomas Gleixner 1426f7627e25SThomas Gleixner /* Init Machine Check Exception if available. */ 14275e09954aSBorislav Petkov mcheck_cpu_init(c); 142830d432dfSAndi Kleen 142930d432dfSAndi Kleen select_idle_routine(c); 1430102bbe3aSYinghai Lu 1431de2d9445STejun Heo #ifdef CONFIG_NUMA 1432102bbe3aSYinghai Lu numa_add_cpu(smp_processor_id()); 1433102bbe3aSYinghai Lu #endif 1434f7627e25SThomas Gleixner } 1435f7627e25SThomas Gleixner 14368b6c0ab1SIngo Molnar /* 14378b6c0ab1SIngo Molnar * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions 14388b6c0ab1SIngo Molnar * on 32-bit kernels: 14398b6c0ab1SIngo Molnar */ 1440cfda7bb9SAndy Lutomirski #ifdef CONFIG_X86_32 1441cfda7bb9SAndy Lutomirski void enable_sep_cpu(void) 1442cfda7bb9SAndy Lutomirski { 14438b6c0ab1SIngo Molnar struct tss_struct *tss; 14448b6c0ab1SIngo Molnar int cpu; 1445cfda7bb9SAndy Lutomirski 1446b3edfda4SBorislav Petkov if (!boot_cpu_has(X86_FEATURE_SEP)) 1447b3edfda4SBorislav Petkov return; 1448b3edfda4SBorislav Petkov 14498b6c0ab1SIngo Molnar cpu = get_cpu(); 1450c482feefSAndy Lutomirski tss = &per_cpu(cpu_tss_rw, cpu); 14518b6c0ab1SIngo Molnar 14528b6c0ab1SIngo Molnar /* 1453cf9328ccSAndy Lutomirski * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field -- 1454cf9328ccSAndy Lutomirski * see the big comment in struct x86_hw_tss's definition. 14558b6c0ab1SIngo Molnar */ 1456cfda7bb9SAndy Lutomirski 1457cfda7bb9SAndy Lutomirski tss->x86_tss.ss1 = __KERNEL_CS; 14588b6c0ab1SIngo Molnar wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0); 14594fe2d8b1SDave Hansen wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0); 14604c8cd0c5SIngo Molnar wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0); 14618b6c0ab1SIngo Molnar 1462cfda7bb9SAndy Lutomirski put_cpu(); 1463cfda7bb9SAndy Lutomirski } 1464e04d645fSGlauber Costa #endif 1465e04d645fSGlauber Costa 1466f7627e25SThomas Gleixner void __init identify_boot_cpu(void) 1467f7627e25SThomas Gleixner { 1468f7627e25SThomas Gleixner identify_cpu(&boot_cpu_data); 1469102bbe3aSYinghai Lu #ifdef CONFIG_X86_32 1470f7627e25SThomas Gleixner sysenter_setup(); 1471f7627e25SThomas Gleixner enable_sep_cpu(); 1472102bbe3aSYinghai Lu #endif 1473e0ba94f1SAlex Shi cpu_detect_tlb(&boot_cpu_data); 1474f7627e25SThomas Gleixner } 1475f7627e25SThomas Gleixner 1476148f9bb8SPaul Gortmaker void identify_secondary_cpu(struct cpuinfo_x86 *c) 1477f7627e25SThomas Gleixner { 1478f7627e25SThomas Gleixner BUG_ON(c == &boot_cpu_data); 1479f7627e25SThomas Gleixner identify_cpu(c); 1480102bbe3aSYinghai Lu #ifdef CONFIG_X86_32 1481f7627e25SThomas Gleixner enable_sep_cpu(); 1482102bbe3aSYinghai Lu #endif 1483f7627e25SThomas Gleixner mtrr_ap_init(); 14849d85eb91SThomas Gleixner validate_apic_and_package_id(c); 148577243971SKonrad Rzeszutek Wilk x86_spec_ctrl_setup_ap(); 1486f7627e25SThomas Gleixner } 1487f7627e25SThomas Gleixner 1488191679fdSAndi Kleen static __init int setup_noclflush(char *arg) 1489191679fdSAndi Kleen { 1490840d2830SH. Peter Anvin setup_clear_cpu_cap(X86_FEATURE_CLFLUSH); 1491da4aaa7dSH. Peter Anvin setup_clear_cpu_cap(X86_FEATURE_CLFLUSHOPT); 1492191679fdSAndi Kleen return 1; 1493191679fdSAndi Kleen } 1494191679fdSAndi Kleen __setup("noclflush", setup_noclflush); 1495191679fdSAndi Kleen 1496148f9bb8SPaul Gortmaker void print_cpu_info(struct cpuinfo_x86 *c) 1497f7627e25SThomas Gleixner { 149802dde8b4SJan Beulich const char *vendor = NULL; 1499f7627e25SThomas Gleixner 15000f3fa48aSIngo Molnar if (c->x86_vendor < X86_VENDOR_NUM) { 1501f7627e25SThomas Gleixner vendor = this_cpu->c_vendor; 15020f3fa48aSIngo Molnar } else { 15030f3fa48aSIngo Molnar if (c->cpuid_level >= 0) 1504f7627e25SThomas Gleixner vendor = c->x86_vendor_id; 15050f3fa48aSIngo Molnar } 1506f7627e25SThomas Gleixner 1507bd32a8cfSYinghai Lu if (vendor && !strstr(c->x86_model_id, vendor)) 15081b74dde7SChen Yucong pr_cont("%s ", vendor); 1509f7627e25SThomas Gleixner 15109d31d35bSYinghai Lu if (c->x86_model_id[0]) 15111b74dde7SChen Yucong pr_cont("%s", c->x86_model_id); 1512f7627e25SThomas Gleixner else 15131b74dde7SChen Yucong pr_cont("%d86", c->x86); 1514f7627e25SThomas Gleixner 15151b74dde7SChen Yucong pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model); 1516924e101aSBorislav Petkov 1517b399151cSJia Zhang if (c->x86_stepping || c->cpuid_level >= 0) 1518b399151cSJia Zhang pr_cont(", stepping: 0x%x)\n", c->x86_stepping); 1519f7627e25SThomas Gleixner else 15201b74dde7SChen Yucong pr_cont(")\n"); 1521f7627e25SThomas Gleixner } 1522f7627e25SThomas Gleixner 15230c2a3913SAndi Kleen /* 15240c2a3913SAndi Kleen * clearcpuid= was already parsed in fpu__init_parse_early_param. 15250c2a3913SAndi Kleen * But we need to keep a dummy __setup around otherwise it would 15260c2a3913SAndi Kleen * show up as an environment variable for init. 15270c2a3913SAndi Kleen */ 15280c2a3913SAndi Kleen static __init int setup_clearcpuid(char *arg) 1529ac72e788SAndi Kleen { 1530ac72e788SAndi Kleen return 1; 1531ac72e788SAndi Kleen } 15320c2a3913SAndi Kleen __setup("clearcpuid=", setup_clearcpuid); 1533ac72e788SAndi Kleen 1534d5494d4fSYinghai Lu #ifdef CONFIG_X86_64 1535947e76cdSBrian Gerst DEFINE_PER_CPU_FIRST(union irq_stack_union, 1536277d5b40SAndi Kleen irq_stack_union) __aligned(PAGE_SIZE) __visible; 153735060ed6SVitaly Kuznetsov EXPORT_PER_CPU_SYMBOL_GPL(irq_stack_union); 15380f3fa48aSIngo Molnar 1539bdf977b3STejun Heo /* 1540a7fcf28dSAndy Lutomirski * The following percpu variables are hot. Align current_task to 1541a7fcf28dSAndy Lutomirski * cacheline size such that they fall in the same cacheline. 1542bdf977b3STejun Heo */ 1543bdf977b3STejun Heo DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = 1544bdf977b3STejun Heo &init_task; 1545bdf977b3STejun Heo EXPORT_PER_CPU_SYMBOL(current_task); 1546d5494d4fSYinghai Lu 1547bdf977b3STejun Heo DEFINE_PER_CPU(char *, irq_stack_ptr) = 15484950d6d4SJosh Poimboeuf init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE; 1549bdf977b3STejun Heo 1550277d5b40SAndi Kleen DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1; 1551d5494d4fSYinghai Lu 1552c2daa3beSPeter Zijlstra DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; 1553c2daa3beSPeter Zijlstra EXPORT_PER_CPU_SYMBOL(__preempt_count); 1554c2daa3beSPeter Zijlstra 1555d5494d4fSYinghai Lu /* May not be marked __init: used by software suspend */ 1556d5494d4fSYinghai Lu void syscall_init(void) 1557d5494d4fSYinghai Lu { 155831ac34caSBorislav Petkov wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS); 15598d4b0678SThomas Gleixner wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64); 1560d56fe4bfSIngo Molnar 1561d56fe4bfSIngo Molnar #ifdef CONFIG_IA32_EMULATION 156247edb651SAndy Lutomirski wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat); 1563a76c7f46SDenys Vlasenko /* 1564487d1edbSDenys Vlasenko * This only works on Intel CPUs. 1565487d1edbSDenys Vlasenko * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP. 1566487d1edbSDenys Vlasenko * This does not cause SYSENTER to jump to the wrong location, because 1567487d1edbSDenys Vlasenko * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit). 1568a76c7f46SDenys Vlasenko */ 1569a76c7f46SDenys Vlasenko wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); 15708e6b65a1Szhong jiang wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 15718e6b65a1Szhong jiang (unsigned long)(cpu_entry_stack(smp_processor_id()) + 1)); 15724c8cd0c5SIngo Molnar wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat); 1573d56fe4bfSIngo Molnar #else 157447edb651SAndy Lutomirski wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret); 15756b51311cSBorislav Petkov wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG); 1576d56fe4bfSIngo Molnar wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); 1577d56fe4bfSIngo Molnar wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL); 1578d5494d4fSYinghai Lu #endif 1579d5494d4fSYinghai Lu 1580d5494d4fSYinghai Lu /* Flags to clear on syscall */ 1581d5494d4fSYinghai Lu wrmsrl(MSR_SYSCALL_MASK, 158263bcff2aSH. Peter Anvin X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF| 15838c7aa698SAndy Lutomirski X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT); 1584d5494d4fSYinghai Lu } 1585d5494d4fSYinghai Lu 1586d5494d4fSYinghai Lu /* 1587d5494d4fSYinghai Lu * Copies of the original ist values from the tss are only accessed during 1588d5494d4fSYinghai Lu * debugging, no special alignment required. 1589d5494d4fSYinghai Lu */ 1590d5494d4fSYinghai Lu DEFINE_PER_CPU(struct orig_ist, orig_ist); 1591d5494d4fSYinghai Lu 1592228bdaa9SSteven Rostedt static DEFINE_PER_CPU(unsigned long, debug_stack_addr); 159342181186SSteven Rostedt DEFINE_PER_CPU(int, debug_stack_usage); 1594228bdaa9SSteven Rostedt 1595228bdaa9SSteven Rostedt int is_debug_stack(unsigned long addr) 1596228bdaa9SSteven Rostedt { 159789cbc767SChristoph Lameter return __this_cpu_read(debug_stack_usage) || 159889cbc767SChristoph Lameter (addr <= __this_cpu_read(debug_stack_addr) && 159989cbc767SChristoph Lameter addr > (__this_cpu_read(debug_stack_addr) - DEBUG_STKSZ)); 1600228bdaa9SSteven Rostedt } 16010f46efebSMasami Hiramatsu NOKPROBE_SYMBOL(is_debug_stack); 1602228bdaa9SSteven Rostedt 1603629f4f9dSSeiji Aguchi DEFINE_PER_CPU(u32, debug_idt_ctr); 1604f8988175SSteven Rostedt 1605228bdaa9SSteven Rostedt void debug_stack_set_zero(void) 1606228bdaa9SSteven Rostedt { 1607629f4f9dSSeiji Aguchi this_cpu_inc(debug_idt_ctr); 1608629f4f9dSSeiji Aguchi load_current_idt(); 1609228bdaa9SSteven Rostedt } 16100f46efebSMasami Hiramatsu NOKPROBE_SYMBOL(debug_stack_set_zero); 1611228bdaa9SSteven Rostedt 1612228bdaa9SSteven Rostedt void debug_stack_reset(void) 1613228bdaa9SSteven Rostedt { 1614629f4f9dSSeiji Aguchi if (WARN_ON(!this_cpu_read(debug_idt_ctr))) 1615f8988175SSteven Rostedt return; 1616629f4f9dSSeiji Aguchi if (this_cpu_dec_return(debug_idt_ctr) == 0) 1617629f4f9dSSeiji Aguchi load_current_idt(); 1618228bdaa9SSteven Rostedt } 16190f46efebSMasami Hiramatsu NOKPROBE_SYMBOL(debug_stack_reset); 1620228bdaa9SSteven Rostedt 16210f3fa48aSIngo Molnar #else /* CONFIG_X86_64 */ 1622d5494d4fSYinghai Lu 1623bdf977b3STejun Heo DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; 1624bdf977b3STejun Heo EXPORT_PER_CPU_SYMBOL(current_task); 1625c2daa3beSPeter Zijlstra DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; 1626c2daa3beSPeter Zijlstra EXPORT_PER_CPU_SYMBOL(__preempt_count); 1627bdf977b3STejun Heo 1628a7fcf28dSAndy Lutomirski /* 1629a7fcf28dSAndy Lutomirski * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find 1630a7fcf28dSAndy Lutomirski * the top of the kernel stack. Use an extra percpu variable to track the 1631a7fcf28dSAndy Lutomirski * top of the kernel stack directly. 1632a7fcf28dSAndy Lutomirski */ 1633a7fcf28dSAndy Lutomirski DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) = 1634a7fcf28dSAndy Lutomirski (unsigned long)&init_thread_union + THREAD_SIZE; 1635a7fcf28dSAndy Lutomirski EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack); 1636a7fcf28dSAndy Lutomirski 1637050e9baaSLinus Torvalds #ifdef CONFIG_STACKPROTECTOR 163853f82452SJeremy Fitzhardinge DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); 163960a5317fSTejun Heo #endif 164060a5317fSTejun Heo 16410f3fa48aSIngo Molnar #endif /* CONFIG_X86_64 */ 1642f7627e25SThomas Gleixner 1643f7627e25SThomas Gleixner /* 16449766cdbcSJaswinder Singh Rajput * Clear all 6 debug registers: 16459766cdbcSJaswinder Singh Rajput */ 16469766cdbcSJaswinder Singh Rajput static void clear_all_debug_regs(void) 16479766cdbcSJaswinder Singh Rajput { 16489766cdbcSJaswinder Singh Rajput int i; 16499766cdbcSJaswinder Singh Rajput 16509766cdbcSJaswinder Singh Rajput for (i = 0; i < 8; i++) { 16519766cdbcSJaswinder Singh Rajput /* Ignore db4, db5 */ 16529766cdbcSJaswinder Singh Rajput if ((i == 4) || (i == 5)) 16539766cdbcSJaswinder Singh Rajput continue; 16549766cdbcSJaswinder Singh Rajput 16559766cdbcSJaswinder Singh Rajput set_debugreg(0, i); 16569766cdbcSJaswinder Singh Rajput } 16579766cdbcSJaswinder Singh Rajput } 1658f7627e25SThomas Gleixner 16590bb9fef9SJason Wessel #ifdef CONFIG_KGDB 16600bb9fef9SJason Wessel /* 16610bb9fef9SJason Wessel * Restore debug regs if using kgdbwait and you have a kernel debugger 16620bb9fef9SJason Wessel * connection established. 16630bb9fef9SJason Wessel */ 16640bb9fef9SJason Wessel static void dbg_restore_debug_regs(void) 16650bb9fef9SJason Wessel { 16660bb9fef9SJason Wessel if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break)) 16670bb9fef9SJason Wessel arch_kgdb_ops.correct_hw_break(); 16680bb9fef9SJason Wessel } 16690bb9fef9SJason Wessel #else /* ! CONFIG_KGDB */ 16700bb9fef9SJason Wessel #define dbg_restore_debug_regs() 16710bb9fef9SJason Wessel #endif /* ! CONFIG_KGDB */ 16720bb9fef9SJason Wessel 1673ce4b1b16SIgor Mammedov static void wait_for_master_cpu(int cpu) 1674ce4b1b16SIgor Mammedov { 1675ce4b1b16SIgor Mammedov #ifdef CONFIG_SMP 1676ce4b1b16SIgor Mammedov /* 1677ce4b1b16SIgor Mammedov * wait for ACK from master CPU before continuing 1678ce4b1b16SIgor Mammedov * with AP initialization 1679ce4b1b16SIgor Mammedov */ 1680ce4b1b16SIgor Mammedov WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)); 1681ce4b1b16SIgor Mammedov while (!cpumask_test_cpu(cpu, cpu_callout_mask)) 1682ce4b1b16SIgor Mammedov cpu_relax(); 1683ce4b1b16SIgor Mammedov #endif 1684ce4b1b16SIgor Mammedov } 1685ce4b1b16SIgor Mammedov 1686b2e2ba57SChang S. Bae #ifdef CONFIG_X86_64 1687b2e2ba57SChang S. Bae static void setup_getcpu(int cpu) 1688b2e2ba57SChang S. Bae { 168922245bdfSIngo Molnar unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu)); 1690b2e2ba57SChang S. Bae struct desc_struct d = { }; 1691b2e2ba57SChang S. Bae 1692b2e2ba57SChang S. Bae if (static_cpu_has(X86_FEATURE_RDTSCP)) 1693b2e2ba57SChang S. Bae write_rdtscp_aux(cpudata); 1694b2e2ba57SChang S. Bae 1695b2e2ba57SChang S. Bae /* Store CPU and node number in limit. */ 1696b2e2ba57SChang S. Bae d.limit0 = cpudata; 1697b2e2ba57SChang S. Bae d.limit1 = cpudata >> 16; 1698b2e2ba57SChang S. Bae 1699b2e2ba57SChang S. Bae d.type = 5; /* RO data, expand down, accessed */ 1700b2e2ba57SChang S. Bae d.dpl = 3; /* Visible to user code */ 1701b2e2ba57SChang S. Bae d.s = 1; /* Not a system segment */ 1702b2e2ba57SChang S. Bae d.p = 1; /* Present */ 1703b2e2ba57SChang S. Bae d.d = 1; /* 32-bit */ 1704b2e2ba57SChang S. Bae 170522245bdfSIngo Molnar write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPUNODE, &d, DESCTYPE_S); 1706b2e2ba57SChang S. Bae } 1707b2e2ba57SChang S. Bae #endif 1708b2e2ba57SChang S. Bae 1709f7627e25SThomas Gleixner /* 1710f7627e25SThomas Gleixner * cpu_init() initializes state that is per-CPU. Some data is already 1711f7627e25SThomas Gleixner * initialized (naturally) in the bootstrap process, such as the GDT 1712f7627e25SThomas Gleixner * and IDT. We reload them nevertheless, this function acts as a 1713f7627e25SThomas Gleixner * 'CPU state barrier', nothing should get across. 17141ba76586SYinghai Lu * A lot of state is already set up in PDA init for 64 bit 1715f7627e25SThomas Gleixner */ 17161ba76586SYinghai Lu #ifdef CONFIG_X86_64 17170f3fa48aSIngo Molnar 1718148f9bb8SPaul Gortmaker void cpu_init(void) 17191ba76586SYinghai Lu { 17200fe1e009STejun Heo struct orig_ist *oist; 17211ba76586SYinghai Lu struct task_struct *me; 17220f3fa48aSIngo Molnar struct tss_struct *t; 17230f3fa48aSIngo Molnar unsigned long v; 1724fb59831bSAndy Lutomirski int cpu = raw_smp_processor_id(); 17251ba76586SYinghai Lu int i; 17261ba76586SYinghai Lu 1727ce4b1b16SIgor Mammedov wait_for_master_cpu(cpu); 1728ce4b1b16SIgor Mammedov 1729e6ebf5deSFenghua Yu /* 17301e02ce4cSAndy Lutomirski * Initialize the CR4 shadow before doing anything that could 17311e02ce4cSAndy Lutomirski * try to read it. 17321e02ce4cSAndy Lutomirski */ 17331e02ce4cSAndy Lutomirski cr4_init_shadow(); 17341e02ce4cSAndy Lutomirski 1735777284b6SBorislav Petkov if (cpu) 1736e6ebf5deSFenghua Yu load_ucode_ap(); 1737e6ebf5deSFenghua Yu 1738c482feefSAndy Lutomirski t = &per_cpu(cpu_tss_rw, cpu); 17390fe1e009STejun Heo oist = &per_cpu(orig_ist, cpu); 17400f3fa48aSIngo Molnar 1741e7a22c1eSBrian Gerst #ifdef CONFIG_NUMA 174227fd185fSFenghua Yu if (this_cpu_read(numa_node) == 0 && 1743e534c7c5SLee Schermerhorn early_cpu_to_node(cpu) != NUMA_NO_NODE) 1744e534c7c5SLee Schermerhorn set_numa_node(early_cpu_to_node(cpu)); 1745e7a22c1eSBrian Gerst #endif 1746b2e2ba57SChang S. Bae setup_getcpu(cpu); 17471ba76586SYinghai Lu 17481ba76586SYinghai Lu me = current; 17491ba76586SYinghai Lu 17502eaad1fdSMike Travis pr_debug("Initializing CPU#%d\n", cpu); 17511ba76586SYinghai Lu 1752375074ccSAndy Lutomirski cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 17531ba76586SYinghai Lu 17541ba76586SYinghai Lu /* 17551ba76586SYinghai Lu * Initialize the per-CPU GDT with the boot GDT, 17561ba76586SYinghai Lu * and set up the GDT descriptor: 17571ba76586SYinghai Lu */ 17581ba76586SYinghai Lu 1759552be871SBrian Gerst switch_to_new_gdt(cpu); 17602697fbd5SBrian Gerst loadsegment(fs, 0); 17612697fbd5SBrian Gerst 1762cf910e83SSeiji Aguchi load_current_idt(); 17631ba76586SYinghai Lu 17641ba76586SYinghai Lu memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); 17651ba76586SYinghai Lu syscall_init(); 17661ba76586SYinghai Lu 17671ba76586SYinghai Lu wrmsrl(MSR_FS_BASE, 0); 17681ba76586SYinghai Lu wrmsrl(MSR_KERNEL_GS_BASE, 0); 17691ba76586SYinghai Lu barrier(); 17701ba76586SYinghai Lu 17714763ed4dSH. Peter Anvin x86_configure_nx(); 1772659006bfSThomas Gleixner x2apic_setup(); 17731ba76586SYinghai Lu 17741ba76586SYinghai Lu /* 17751ba76586SYinghai Lu * set up and load the per-CPU TSS 17761ba76586SYinghai Lu */ 17770fe1e009STejun Heo if (!oist->ist[0]) { 177840e7f949SAndy Lutomirski char *estacks = get_cpu_entry_area(cpu)->exception_stacks; 17790f3fa48aSIngo Molnar 17801ba76586SYinghai Lu for (v = 0; v < N_EXCEPTION_STACKS; v++) { 17810f3fa48aSIngo Molnar estacks += exception_stack_sizes[v]; 17820fe1e009STejun Heo oist->ist[v] = t->x86_tss.ist[v] = 17831ba76586SYinghai Lu (unsigned long)estacks; 1784228bdaa9SSteven Rostedt if (v == DEBUG_STACK-1) 1785228bdaa9SSteven Rostedt per_cpu(debug_stack_addr, cpu) = (unsigned long)estacks; 17861ba76586SYinghai Lu } 17871ba76586SYinghai Lu } 17881ba76586SYinghai Lu 17897fb983b4SAndy Lutomirski t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; 17900f3fa48aSIngo Molnar 17911ba76586SYinghai Lu /* 17921ba76586SYinghai Lu * <= is required because the CPU will access up to 17931ba76586SYinghai Lu * 8 bits beyond the end of the IO permission bitmap. 17941ba76586SYinghai Lu */ 17951ba76586SYinghai Lu for (i = 0; i <= IO_BITMAP_LONGS; i++) 17961ba76586SYinghai Lu t->io_bitmap[i] = ~0UL; 17971ba76586SYinghai Lu 1798f1f10076SVegard Nossum mmgrab(&init_mm); 17991ba76586SYinghai Lu me->active_mm = &init_mm; 18008c5dfd25SStoyan Gaydarov BUG_ON(me->mm); 180172c0098dSAndy Lutomirski initialize_tlbstate_and_flush(); 18021ba76586SYinghai Lu enter_lazy_tlb(&init_mm, me); 18031ba76586SYinghai Lu 180420bb8344SAndy Lutomirski /* 18057f2590a1SAndy Lutomirski * Initialize the TSS. sp0 points to the entry trampoline stack 18067f2590a1SAndy Lutomirski * regardless of what task is running. 180720bb8344SAndy Lutomirski */ 180872f5e08dSAndy Lutomirski set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); 18091ba76586SYinghai Lu load_TR_desc(); 18104fe2d8b1SDave Hansen load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1)); 181120bb8344SAndy Lutomirski 181237868fe1SAndy Lutomirski load_mm_ldt(&init_mm); 18131ba76586SYinghai Lu 18149766cdbcSJaswinder Singh Rajput clear_all_debug_regs(); 18150bb9fef9SJason Wessel dbg_restore_debug_regs(); 18161ba76586SYinghai Lu 181721c4cd10SIngo Molnar fpu__init_cpu(); 18181ba76586SYinghai Lu 18191ba76586SYinghai Lu if (is_uv_system()) 18201ba76586SYinghai Lu uv_cpu_init(); 182169218e47SThomas Garnier 182269218e47SThomas Garnier load_fixmap_gdt(cpu); 18231ba76586SYinghai Lu } 18241ba76586SYinghai Lu 18251ba76586SYinghai Lu #else 18261ba76586SYinghai Lu 1827148f9bb8SPaul Gortmaker void cpu_init(void) 1828f7627e25SThomas Gleixner { 1829f7627e25SThomas Gleixner int cpu = smp_processor_id(); 1830f7627e25SThomas Gleixner struct task_struct *curr = current; 1831c482feefSAndy Lutomirski struct tss_struct *t = &per_cpu(cpu_tss_rw, cpu); 1832f7627e25SThomas Gleixner 1833ce4b1b16SIgor Mammedov wait_for_master_cpu(cpu); 1834e6ebf5deSFenghua Yu 18355b2bdbc8SSteven Rostedt /* 18365b2bdbc8SSteven Rostedt * Initialize the CR4 shadow before doing anything that could 18375b2bdbc8SSteven Rostedt * try to read it. 18385b2bdbc8SSteven Rostedt */ 18395b2bdbc8SSteven Rostedt cr4_init_shadow(); 18405b2bdbc8SSteven Rostedt 1841ce4b1b16SIgor Mammedov show_ucode_info_early(); 1842f7627e25SThomas Gleixner 18431b74dde7SChen Yucong pr_info("Initializing CPU#%d\n", cpu); 1844f7627e25SThomas Gleixner 1845362f924bSBorislav Petkov if (cpu_feature_enabled(X86_FEATURE_VME) || 184659e21e3dSBorislav Petkov boot_cpu_has(X86_FEATURE_TSC) || 1847362f924bSBorislav Petkov boot_cpu_has(X86_FEATURE_DE)) 1848375074ccSAndy Lutomirski cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 1849f7627e25SThomas Gleixner 1850cf910e83SSeiji Aguchi load_current_idt(); 1851552be871SBrian Gerst switch_to_new_gdt(cpu); 1852f7627e25SThomas Gleixner 1853f7627e25SThomas Gleixner /* 1854f7627e25SThomas Gleixner * Set up and load the per-CPU TSS and LDT 1855f7627e25SThomas Gleixner */ 1856f1f10076SVegard Nossum mmgrab(&init_mm); 1857f7627e25SThomas Gleixner curr->active_mm = &init_mm; 18588c5dfd25SStoyan Gaydarov BUG_ON(curr->mm); 185972c0098dSAndy Lutomirski initialize_tlbstate_and_flush(); 1860f7627e25SThomas Gleixner enter_lazy_tlb(&init_mm, curr); 1861f7627e25SThomas Gleixner 186220bb8344SAndy Lutomirski /* 186345d7b255SJoerg Roedel * Initialize the TSS. sp0 points to the entry trampoline stack 186445d7b255SJoerg Roedel * regardless of what task is running. 186520bb8344SAndy Lutomirski */ 186672f5e08dSAndy Lutomirski set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); 1867f7627e25SThomas Gleixner load_TR_desc(); 186845d7b255SJoerg Roedel load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1)); 186920bb8344SAndy Lutomirski 187037868fe1SAndy Lutomirski load_mm_ldt(&init_mm); 1871f7627e25SThomas Gleixner 18727fb983b4SAndy Lutomirski t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; 1873f9a196b8SThomas Gleixner 1874f7627e25SThomas Gleixner #ifdef CONFIG_DOUBLEFAULT 1875f7627e25SThomas Gleixner /* Set up doublefault TSS pointer in the GDT */ 1876f7627e25SThomas Gleixner __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); 1877f7627e25SThomas Gleixner #endif 1878f7627e25SThomas Gleixner 18799766cdbcSJaswinder Singh Rajput clear_all_debug_regs(); 18800bb9fef9SJason Wessel dbg_restore_debug_regs(); 1881f7627e25SThomas Gleixner 188221c4cd10SIngo Molnar fpu__init_cpu(); 188369218e47SThomas Garnier 188469218e47SThomas Garnier load_fixmap_gdt(cpu); 1885f7627e25SThomas Gleixner } 18861ba76586SYinghai Lu #endif 18875700f743SBorislav Petkov 1888b51ef52dSLaura Abbott static void bsp_resume(void) 1889b51ef52dSLaura Abbott { 1890b51ef52dSLaura Abbott if (this_cpu->c_bsp_resume) 1891b51ef52dSLaura Abbott this_cpu->c_bsp_resume(&boot_cpu_data); 1892b51ef52dSLaura Abbott } 1893b51ef52dSLaura Abbott 1894b51ef52dSLaura Abbott static struct syscore_ops cpu_syscore_ops = { 1895b51ef52dSLaura Abbott .resume = bsp_resume, 1896b51ef52dSLaura Abbott }; 1897b51ef52dSLaura Abbott 1898b51ef52dSLaura Abbott static int __init init_cpu_syscore(void) 1899b51ef52dSLaura Abbott { 1900b51ef52dSLaura Abbott register_syscore_ops(&cpu_syscore_ops); 1901b51ef52dSLaura Abbott return 0; 1902b51ef52dSLaura Abbott } 1903b51ef52dSLaura Abbott core_initcall(init_cpu_syscore); 19041008c52cSBorislav Petkov 19051008c52cSBorislav Petkov /* 19061008c52cSBorislav Petkov * The microcode loader calls this upon late microcode load to recheck features, 19071008c52cSBorislav Petkov * only when microcode has been updated. Caller holds microcode_mutex and CPU 19081008c52cSBorislav Petkov * hotplug lock. 19091008c52cSBorislav Petkov */ 19101008c52cSBorislav Petkov void microcode_check(void) 19111008c52cSBorislav Petkov { 191242ca8082SBorislav Petkov struct cpuinfo_x86 info; 191342ca8082SBorislav Petkov 19141008c52cSBorislav Petkov perf_check_microcode(); 191542ca8082SBorislav Petkov 191642ca8082SBorislav Petkov /* Reload CPUID max function as it might've changed. */ 191742ca8082SBorislav Petkov info.cpuid_level = cpuid_eax(0); 191842ca8082SBorislav Petkov 191942ca8082SBorislav Petkov /* 192042ca8082SBorislav Petkov * Copy all capability leafs to pick up the synthetic ones so that 192142ca8082SBorislav Petkov * memcmp() below doesn't fail on that. The ones coming from CPUID will 192242ca8082SBorislav Petkov * get overwritten in get_cpu_cap(). 192342ca8082SBorislav Petkov */ 192442ca8082SBorislav Petkov memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)); 192542ca8082SBorislav Petkov 192642ca8082SBorislav Petkov get_cpu_cap(&info); 192742ca8082SBorislav Petkov 192842ca8082SBorislav Petkov if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability))) 192942ca8082SBorislav Petkov return; 193042ca8082SBorislav Petkov 193142ca8082SBorislav Petkov pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n"); 193242ca8082SBorislav Petkov pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); 19331008c52cSBorislav Petkov } 1934