1f0fc4affSYinghai Lu #include <linux/bootmem.h> 29766cdbcSJaswinder Singh Rajput #include <linux/linkage.h> 3f0fc4affSYinghai Lu #include <linux/bitops.h> 49766cdbcSJaswinder Singh Rajput #include <linux/kernel.h> 5f0fc4affSYinghai Lu #include <linux/module.h> 6f7627e25SThomas Gleixner #include <linux/percpu.h> 79766cdbcSJaswinder Singh Rajput #include <linux/string.h> 89766cdbcSJaswinder Singh Rajput #include <linux/delay.h> 99766cdbcSJaswinder Singh Rajput #include <linux/sched.h> 109766cdbcSJaswinder Singh Rajput #include <linux/init.h> 119766cdbcSJaswinder Singh Rajput #include <linux/kgdb.h> 129766cdbcSJaswinder Singh Rajput #include <linux/smp.h> 139766cdbcSJaswinder Singh Rajput #include <linux/io.h> 149766cdbcSJaswinder Singh Rajput 159766cdbcSJaswinder Singh Rajput #include <asm/stackprotector.h> 16cdd6c482SIngo Molnar #include <asm/perf_event.h> 17f7627e25SThomas Gleixner #include <asm/mmu_context.h> 189766cdbcSJaswinder Singh Rajput #include <asm/hypervisor.h> 199766cdbcSJaswinder Singh Rajput #include <asm/processor.h> 209766cdbcSJaswinder Singh Rajput #include <asm/sections.h> 218bdbd962SAlan Cox #include <linux/topology.h> 228bdbd962SAlan Cox #include <linux/cpumask.h> 239766cdbcSJaswinder Singh Rajput #include <asm/pgtable.h> 249766cdbcSJaswinder Singh Rajput #include <asm/atomic.h> 259766cdbcSJaswinder Singh Rajput #include <asm/proto.h> 269766cdbcSJaswinder Singh Rajput #include <asm/setup.h> 27f7627e25SThomas Gleixner #include <asm/apic.h> 289766cdbcSJaswinder Singh Rajput #include <asm/desc.h> 299766cdbcSJaswinder Singh Rajput #include <asm/i387.h> 309766cdbcSJaswinder Singh Rajput #include <asm/mtrr.h> 318bdbd962SAlan Cox #include <linux/numa.h> 329766cdbcSJaswinder Singh Rajput #include <asm/asm.h> 339766cdbcSJaswinder Singh Rajput #include <asm/cpu.h> 349766cdbcSJaswinder Singh Rajput #include <asm/mce.h> 359766cdbcSJaswinder Singh Rajput #include <asm/msr.h> 369766cdbcSJaswinder Singh Rajput #include <asm/pat.h> 37e641f5f5SIngo Molnar 38f7627e25SThomas Gleixner #ifdef CONFIG_X86_LOCAL_APIC 39bdbcdd48STejun Heo #include <asm/uv/uv.h> 40f7627e25SThomas Gleixner #endif 41f7627e25SThomas Gleixner 42f7627e25SThomas Gleixner #include "cpu.h" 43f7627e25SThomas Gleixner 44c2d1cec1SMike Travis /* all of these masks are initialized in setup_cpu_local_masks() */ 45c2d1cec1SMike Travis cpumask_var_t cpu_initialized_mask; 469766cdbcSJaswinder Singh Rajput cpumask_var_t cpu_callout_mask; 479766cdbcSJaswinder Singh Rajput cpumask_var_t cpu_callin_mask; 48c2d1cec1SMike Travis 49c2d1cec1SMike Travis /* representing cpus for which sibling maps can be computed */ 50c2d1cec1SMike Travis cpumask_var_t cpu_sibling_setup_mask; 51c2d1cec1SMike Travis 522f2f52baSBrian Gerst /* correctly size the local cpu masks */ 534369f1fbSIngo Molnar void __init setup_cpu_local_masks(void) 542f2f52baSBrian Gerst { 552f2f52baSBrian Gerst alloc_bootmem_cpumask_var(&cpu_initialized_mask); 562f2f52baSBrian Gerst alloc_bootmem_cpumask_var(&cpu_callin_mask); 572f2f52baSBrian Gerst alloc_bootmem_cpumask_var(&cpu_callout_mask); 582f2f52baSBrian Gerst alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); 592f2f52baSBrian Gerst } 602f2f52baSBrian Gerst 61e8055139SOndrej Zary static void __cpuinit default_init(struct cpuinfo_x86 *c) 62e8055139SOndrej Zary { 63e8055139SOndrej Zary #ifdef CONFIG_X86_64 6427c13eceSBorislav Petkov cpu_detect_cache_sizes(c); 65e8055139SOndrej Zary #else 66e8055139SOndrej Zary /* Not much we can do here... */ 67e8055139SOndrej Zary /* Check if at least it has cpuid */ 68e8055139SOndrej Zary if (c->cpuid_level == -1) { 69e8055139SOndrej Zary /* No cpuid. It must be an ancient CPU */ 70e8055139SOndrej Zary if (c->x86 == 4) 71e8055139SOndrej Zary strcpy(c->x86_model_id, "486"); 72e8055139SOndrej Zary else if (c->x86 == 3) 73e8055139SOndrej Zary strcpy(c->x86_model_id, "386"); 74e8055139SOndrej Zary } 75e8055139SOndrej Zary #endif 76e8055139SOndrej Zary } 77e8055139SOndrej Zary 78e8055139SOndrej Zary static const struct cpu_dev __cpuinitconst default_cpu = { 79e8055139SOndrej Zary .c_init = default_init, 80e8055139SOndrej Zary .c_vendor = "Unknown", 81e8055139SOndrej Zary .c_x86_vendor = X86_VENDOR_UNKNOWN, 82e8055139SOndrej Zary }; 83e8055139SOndrej Zary 84e8055139SOndrej Zary static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; 850a488a53SYinghai Lu 8606deef89SBrian Gerst DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { 87950ad7ffSYinghai Lu #ifdef CONFIG_X86_64 8806deef89SBrian Gerst /* 8906deef89SBrian Gerst * We need valid kernel segments for data and code in long mode too 90950ad7ffSYinghai Lu * IRET will check the segment types kkeil 2000/10/28 91950ad7ffSYinghai Lu * Also sysret mandates a special GDT layout 9206deef89SBrian Gerst * 939766cdbcSJaswinder Singh Rajput * TLS descriptors are currently at a different place compared to i386. 9406deef89SBrian Gerst * Hopefully nobody expects them at a fixed place (Wine?) 95950ad7ffSYinghai Lu */ 961e5de182SAkinobu Mita [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff), 971e5de182SAkinobu Mita [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff), 981e5de182SAkinobu Mita [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff), 991e5de182SAkinobu Mita [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff), 1001e5de182SAkinobu Mita [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff), 1011e5de182SAkinobu Mita [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff), 102950ad7ffSYinghai Lu #else 1031e5de182SAkinobu Mita [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff), 1041e5de182SAkinobu Mita [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), 1051e5de182SAkinobu Mita [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff), 1061e5de182SAkinobu Mita [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff), 107f7627e25SThomas Gleixner /* 108f7627e25SThomas Gleixner * Segments used for calling PnP BIOS have byte granularity. 109f7627e25SThomas Gleixner * They code segments and data segments have fixed 64k limits, 110f7627e25SThomas Gleixner * the transfer segment sizes are set at run time. 111f7627e25SThomas Gleixner */ 1126842ef0eSGlauber de Oliveira Costa /* 32-bit code */ 1131e5de182SAkinobu Mita [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), 1146842ef0eSGlauber de Oliveira Costa /* 16-bit code */ 1151e5de182SAkinobu Mita [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), 1166842ef0eSGlauber de Oliveira Costa /* 16-bit data */ 1171e5de182SAkinobu Mita [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff), 1186842ef0eSGlauber de Oliveira Costa /* 16-bit data */ 1191e5de182SAkinobu Mita [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0), 1206842ef0eSGlauber de Oliveira Costa /* 16-bit data */ 1211e5de182SAkinobu Mita [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0), 122f7627e25SThomas Gleixner /* 123f7627e25SThomas Gleixner * The APM segments have byte granularity and their bases 124f7627e25SThomas Gleixner * are set at run time. All have 64k limits. 125f7627e25SThomas Gleixner */ 1266842ef0eSGlauber de Oliveira Costa /* 32-bit code */ 1271e5de182SAkinobu Mita [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), 128f7627e25SThomas Gleixner /* 16-bit code */ 1291e5de182SAkinobu Mita [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), 1306842ef0eSGlauber de Oliveira Costa /* data */ 13172c4d853SIngo Molnar [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff), 132f7627e25SThomas Gleixner 1331e5de182SAkinobu Mita [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), 1341e5de182SAkinobu Mita [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), 13560a5317fSTejun Heo GDT_STACK_CANARY_INIT 136950ad7ffSYinghai Lu #endif 13706deef89SBrian Gerst } }; 138f7627e25SThomas Gleixner EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); 139f7627e25SThomas Gleixner 1400c752a93SSuresh Siddha static int __init x86_xsave_setup(char *s) 1410c752a93SSuresh Siddha { 1420c752a93SSuresh Siddha setup_clear_cpu_cap(X86_FEATURE_XSAVE); 1436bad06b7SSuresh Siddha setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); 1440c752a93SSuresh Siddha return 1; 1450c752a93SSuresh Siddha } 1460c752a93SSuresh Siddha __setup("noxsave", x86_xsave_setup); 1470c752a93SSuresh Siddha 1486bad06b7SSuresh Siddha static int __init x86_xsaveopt_setup(char *s) 1496bad06b7SSuresh Siddha { 1506bad06b7SSuresh Siddha setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); 1516bad06b7SSuresh Siddha return 1; 1526bad06b7SSuresh Siddha } 1536bad06b7SSuresh Siddha __setup("noxsaveopt", x86_xsaveopt_setup); 1546bad06b7SSuresh Siddha 155ba51dcedSYinghai Lu #ifdef CONFIG_X86_32 156f7627e25SThomas Gleixner static int cachesize_override __cpuinitdata = -1; 157f7627e25SThomas Gleixner static int disable_x86_serial_nr __cpuinitdata = 1; 158f7627e25SThomas Gleixner 159f7627e25SThomas Gleixner static int __init cachesize_setup(char *str) 160f7627e25SThomas Gleixner { 161f7627e25SThomas Gleixner get_option(&str, &cachesize_override); 162f7627e25SThomas Gleixner return 1; 163f7627e25SThomas Gleixner } 164f7627e25SThomas Gleixner __setup("cachesize=", cachesize_setup); 165f7627e25SThomas Gleixner 166f7627e25SThomas Gleixner static int __init x86_fxsr_setup(char *s) 167f7627e25SThomas Gleixner { 16813530257SAndi Kleen setup_clear_cpu_cap(X86_FEATURE_FXSR); 16913530257SAndi Kleen setup_clear_cpu_cap(X86_FEATURE_XMM); 170f7627e25SThomas Gleixner return 1; 171f7627e25SThomas Gleixner } 172f7627e25SThomas Gleixner __setup("nofxsr", x86_fxsr_setup); 173f7627e25SThomas Gleixner 174f7627e25SThomas Gleixner static int __init x86_sep_setup(char *s) 175f7627e25SThomas Gleixner { 17613530257SAndi Kleen setup_clear_cpu_cap(X86_FEATURE_SEP); 177f7627e25SThomas Gleixner return 1; 178f7627e25SThomas Gleixner } 179f7627e25SThomas Gleixner __setup("nosep", x86_sep_setup); 180f7627e25SThomas Gleixner 181f7627e25SThomas Gleixner /* Standard macro to see if a specific flag is changeable */ 182f7627e25SThomas Gleixner static inline int flag_is_changeable_p(u32 flag) 183f7627e25SThomas Gleixner { 184f7627e25SThomas Gleixner u32 f1, f2; 185f7627e25SThomas Gleixner 18694f6bac1SKrzysztof Helt /* 18794f6bac1SKrzysztof Helt * Cyrix and IDT cpus allow disabling of CPUID 18894f6bac1SKrzysztof Helt * so the code below may return different results 18994f6bac1SKrzysztof Helt * when it is executed before and after enabling 19094f6bac1SKrzysztof Helt * the CPUID. Add "volatile" to not allow gcc to 19194f6bac1SKrzysztof Helt * optimize the subsequent calls to this function. 19294f6bac1SKrzysztof Helt */ 19394f6bac1SKrzysztof Helt asm volatile ("pushfl \n\t" 194f7627e25SThomas Gleixner "pushfl \n\t" 195f7627e25SThomas Gleixner "popl %0 \n\t" 196f7627e25SThomas Gleixner "movl %0, %1 \n\t" 197f7627e25SThomas Gleixner "xorl %2, %0 \n\t" 198f7627e25SThomas Gleixner "pushl %0 \n\t" 199f7627e25SThomas Gleixner "popfl \n\t" 200f7627e25SThomas Gleixner "pushfl \n\t" 201f7627e25SThomas Gleixner "popl %0 \n\t" 202f7627e25SThomas Gleixner "popfl \n\t" 2030f3fa48aSIngo Molnar 204f7627e25SThomas Gleixner : "=&r" (f1), "=&r" (f2) 205f7627e25SThomas Gleixner : "ir" (flag)); 206f7627e25SThomas Gleixner 207f7627e25SThomas Gleixner return ((f1^f2) & flag) != 0; 208f7627e25SThomas Gleixner } 209f7627e25SThomas Gleixner 210f7627e25SThomas Gleixner /* Probe for the CPUID instruction */ 211f7627e25SThomas Gleixner static int __cpuinit have_cpuid_p(void) 212f7627e25SThomas Gleixner { 213f7627e25SThomas Gleixner return flag_is_changeable_p(X86_EFLAGS_ID); 214f7627e25SThomas Gleixner } 215f7627e25SThomas Gleixner 2160a488a53SYinghai Lu static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 2170a488a53SYinghai Lu { 2180a488a53SYinghai Lu unsigned long lo, hi; 2190f3fa48aSIngo Molnar 2200f3fa48aSIngo Molnar if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr) 2210f3fa48aSIngo Molnar return; 2220f3fa48aSIngo Molnar 2230f3fa48aSIngo Molnar /* Disable processor serial number: */ 2240f3fa48aSIngo Molnar 2250a488a53SYinghai Lu rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 2260a488a53SYinghai Lu lo |= 0x200000; 2270a488a53SYinghai Lu wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 2280f3fa48aSIngo Molnar 2290a488a53SYinghai Lu printk(KERN_NOTICE "CPU serial number disabled.\n"); 2300a488a53SYinghai Lu clear_cpu_cap(c, X86_FEATURE_PN); 2310a488a53SYinghai Lu 2320a488a53SYinghai Lu /* Disabling the serial number may affect the cpuid level */ 2330a488a53SYinghai Lu c->cpuid_level = cpuid_eax(0); 2340a488a53SYinghai Lu } 2350a488a53SYinghai Lu 2360a488a53SYinghai Lu static int __init x86_serial_nr_setup(char *s) 2370a488a53SYinghai Lu { 2380a488a53SYinghai Lu disable_x86_serial_nr = 0; 2390a488a53SYinghai Lu return 1; 2400a488a53SYinghai Lu } 2410a488a53SYinghai Lu __setup("serialnumber", x86_serial_nr_setup); 242ba51dcedSYinghai Lu #else 243102bbe3aSYinghai Lu static inline int flag_is_changeable_p(u32 flag) 244102bbe3aSYinghai Lu { 245102bbe3aSYinghai Lu return 1; 246102bbe3aSYinghai Lu } 247ba51dcedSYinghai Lu /* Probe for the CPUID instruction */ 248ba51dcedSYinghai Lu static inline int have_cpuid_p(void) 249ba51dcedSYinghai Lu { 250ba51dcedSYinghai Lu return 1; 251ba51dcedSYinghai Lu } 252102bbe3aSYinghai Lu static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 253102bbe3aSYinghai Lu { 254102bbe3aSYinghai Lu } 255ba51dcedSYinghai Lu #endif 2560a488a53SYinghai Lu 257*de5397adSFenghua Yu static int disable_smep __initdata; 258*de5397adSFenghua Yu static __init int setup_disable_smep(char *arg) 259*de5397adSFenghua Yu { 260*de5397adSFenghua Yu disable_smep = 1; 261*de5397adSFenghua Yu return 1; 262*de5397adSFenghua Yu } 263*de5397adSFenghua Yu __setup("nosmep", setup_disable_smep); 264*de5397adSFenghua Yu 265*de5397adSFenghua Yu static __init void setup_smep(struct cpuinfo_x86 *c) 266*de5397adSFenghua Yu { 267*de5397adSFenghua Yu if (cpu_has(c, X86_FEATURE_SMEP)) { 268*de5397adSFenghua Yu if (unlikely(disable_smep)) { 269*de5397adSFenghua Yu setup_clear_cpu_cap(X86_FEATURE_SMEP); 270*de5397adSFenghua Yu clear_in_cr4(X86_CR4_SMEP); 271*de5397adSFenghua Yu } else 272*de5397adSFenghua Yu set_in_cr4(X86_CR4_SMEP); 273*de5397adSFenghua Yu } 274*de5397adSFenghua Yu } 275*de5397adSFenghua Yu 276f7627e25SThomas Gleixner /* 277b38b0665SH. Peter Anvin * Some CPU features depend on higher CPUID levels, which may not always 278b38b0665SH. Peter Anvin * be available due to CPUID level capping or broken virtualization 279b38b0665SH. Peter Anvin * software. Add those features to this table to auto-disable them. 280b38b0665SH. Peter Anvin */ 281b38b0665SH. Peter Anvin struct cpuid_dependent_feature { 282b38b0665SH. Peter Anvin u32 feature; 283b38b0665SH. Peter Anvin u32 level; 284b38b0665SH. Peter Anvin }; 2850f3fa48aSIngo Molnar 286b38b0665SH. Peter Anvin static const struct cpuid_dependent_feature __cpuinitconst 287b38b0665SH. Peter Anvin cpuid_dependent_features[] = { 288b38b0665SH. Peter Anvin { X86_FEATURE_MWAIT, 0x00000005 }, 289b38b0665SH. Peter Anvin { X86_FEATURE_DCA, 0x00000009 }, 290b38b0665SH. Peter Anvin { X86_FEATURE_XSAVE, 0x0000000d }, 291b38b0665SH. Peter Anvin { 0, 0 } 292b38b0665SH. Peter Anvin }; 293b38b0665SH. Peter Anvin 294b38b0665SH. Peter Anvin static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) 295b38b0665SH. Peter Anvin { 296b38b0665SH. Peter Anvin const struct cpuid_dependent_feature *df; 2979766cdbcSJaswinder Singh Rajput 298b38b0665SH. Peter Anvin for (df = cpuid_dependent_features; df->feature; df++) { 2990f3fa48aSIngo Molnar 3000f3fa48aSIngo Molnar if (!cpu_has(c, df->feature)) 3010f3fa48aSIngo Molnar continue; 302b38b0665SH. Peter Anvin /* 303b38b0665SH. Peter Anvin * Note: cpuid_level is set to -1 if unavailable, but 304b38b0665SH. Peter Anvin * extended_extended_level is set to 0 if unavailable 305b38b0665SH. Peter Anvin * and the legitimate extended levels are all negative 306b38b0665SH. Peter Anvin * when signed; hence the weird messing around with 307b38b0665SH. Peter Anvin * signs here... 308b38b0665SH. Peter Anvin */ 3090f3fa48aSIngo Molnar if (!((s32)df->level < 0 ? 310f6db44dfSYinghai Lu (u32)df->level > (u32)c->extended_cpuid_level : 3110f3fa48aSIngo Molnar (s32)df->level > (s32)c->cpuid_level)) 3120f3fa48aSIngo Molnar continue; 3130f3fa48aSIngo Molnar 314b38b0665SH. Peter Anvin clear_cpu_cap(c, df->feature); 3150f3fa48aSIngo Molnar if (!warn) 3160f3fa48aSIngo Molnar continue; 3170f3fa48aSIngo Molnar 318b38b0665SH. Peter Anvin printk(KERN_WARNING 3190f3fa48aSIngo Molnar "CPU: CPU feature %s disabled, no CPUID level 0x%x\n", 3200f3fa48aSIngo Molnar x86_cap_flags[df->feature], df->level); 321b38b0665SH. Peter Anvin } 322b38b0665SH. Peter Anvin } 323b38b0665SH. Peter Anvin 324b38b0665SH. Peter Anvin /* 325f7627e25SThomas Gleixner * Naming convention should be: <Name> [(<Codename>)] 326f7627e25SThomas Gleixner * This table only is used unless init_<vendor>() below doesn't set it; 3270f3fa48aSIngo Molnar * in particular, if CPUID levels 0x80000002..4 are supported, this 3280f3fa48aSIngo Molnar * isn't used 329f7627e25SThomas Gleixner */ 330f7627e25SThomas Gleixner 331f7627e25SThomas Gleixner /* Look up CPU names by table lookup. */ 33202dde8b4SJan Beulich static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c) 333f7627e25SThomas Gleixner { 33402dde8b4SJan Beulich const struct cpu_model_info *info; 335f7627e25SThomas Gleixner 336f7627e25SThomas Gleixner if (c->x86_model >= 16) 337f7627e25SThomas Gleixner return NULL; /* Range check */ 338f7627e25SThomas Gleixner 339f7627e25SThomas Gleixner if (!this_cpu) 340f7627e25SThomas Gleixner return NULL; 341f7627e25SThomas Gleixner 342f7627e25SThomas Gleixner info = this_cpu->c_models; 343f7627e25SThomas Gleixner 344f7627e25SThomas Gleixner while (info && info->family) { 345f7627e25SThomas Gleixner if (info->family == c->x86) 346f7627e25SThomas Gleixner return info->model_names[c->x86_model]; 347f7627e25SThomas Gleixner info++; 348f7627e25SThomas Gleixner } 349f7627e25SThomas Gleixner return NULL; /* Not found */ 350f7627e25SThomas Gleixner } 351f7627e25SThomas Gleixner 3523e0c3737SYinghai Lu __u32 cpu_caps_cleared[NCAPINTS] __cpuinitdata; 3533e0c3737SYinghai Lu __u32 cpu_caps_set[NCAPINTS] __cpuinitdata; 354f7627e25SThomas Gleixner 35511e3a840SJeremy Fitzhardinge void load_percpu_segment(int cpu) 3569d31d35bSYinghai Lu { 357fab334c1SYinghai Lu #ifdef CONFIG_X86_32 3582697fbd5SBrian Gerst loadsegment(fs, __KERNEL_PERCPU); 3592697fbd5SBrian Gerst #else 3602697fbd5SBrian Gerst loadsegment(gs, 0); 3612697fbd5SBrian Gerst wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu)); 362fab334c1SYinghai Lu #endif 36360a5317fSTejun Heo load_stack_canary_segment(); 3649d31d35bSYinghai Lu } 3659d31d35bSYinghai Lu 3660f3fa48aSIngo Molnar /* 3670f3fa48aSIngo Molnar * Current gdt points %fs at the "master" per-cpu area: after this, 3680f3fa48aSIngo Molnar * it's on the real one. 3690f3fa48aSIngo Molnar */ 370552be871SBrian Gerst void switch_to_new_gdt(int cpu) 371f7627e25SThomas Gleixner { 372f7627e25SThomas Gleixner struct desc_ptr gdt_descr; 373f7627e25SThomas Gleixner 374f7627e25SThomas Gleixner gdt_descr.address = (long)get_cpu_gdt_table(cpu); 375f7627e25SThomas Gleixner gdt_descr.size = GDT_SIZE - 1; 376f7627e25SThomas Gleixner load_gdt(&gdt_descr); 377f7627e25SThomas Gleixner /* Reload the per-cpu base */ 37811e3a840SJeremy Fitzhardinge 37911e3a840SJeremy Fitzhardinge load_percpu_segment(cpu); 380f7627e25SThomas Gleixner } 381f7627e25SThomas Gleixner 38202dde8b4SJan Beulich static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {}; 383f7627e25SThomas Gleixner 3841b05d60dSYinghai Lu static void __cpuinit get_model_name(struct cpuinfo_x86 *c) 385f7627e25SThomas Gleixner { 386f7627e25SThomas Gleixner unsigned int *v; 387f7627e25SThomas Gleixner char *p, *q; 388f7627e25SThomas Gleixner 3893da99c97SYinghai Lu if (c->extended_cpuid_level < 0x80000004) 3901b05d60dSYinghai Lu return; 391f7627e25SThomas Gleixner 392f7627e25SThomas Gleixner v = (unsigned int *)c->x86_model_id; 393f7627e25SThomas Gleixner cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); 394f7627e25SThomas Gleixner cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); 395f7627e25SThomas Gleixner cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); 396f7627e25SThomas Gleixner c->x86_model_id[48] = 0; 397f7627e25SThomas Gleixner 3980f3fa48aSIngo Molnar /* 3990f3fa48aSIngo Molnar * Intel chips right-justify this string for some dumb reason; 4000f3fa48aSIngo Molnar * undo that brain damage: 4010f3fa48aSIngo Molnar */ 402f7627e25SThomas Gleixner p = q = &c->x86_model_id[0]; 403f7627e25SThomas Gleixner while (*p == ' ') 404f7627e25SThomas Gleixner p++; 405f7627e25SThomas Gleixner if (p != q) { 406f7627e25SThomas Gleixner while (*p) 407f7627e25SThomas Gleixner *q++ = *p++; 408f7627e25SThomas Gleixner while (q <= &c->x86_model_id[48]) 409f7627e25SThomas Gleixner *q++ = '\0'; /* Zero-pad the rest */ 410f7627e25SThomas Gleixner } 411f7627e25SThomas Gleixner } 412f7627e25SThomas Gleixner 41327c13eceSBorislav Petkov void __cpuinit cpu_detect_cache_sizes(struct cpuinfo_x86 *c) 414f7627e25SThomas Gleixner { 4159d31d35bSYinghai Lu unsigned int n, dummy, ebx, ecx, edx, l2size; 416f7627e25SThomas Gleixner 4173da99c97SYinghai Lu n = c->extended_cpuid_level; 418f7627e25SThomas Gleixner 419f7627e25SThomas Gleixner if (n >= 0x80000005) { 4209d31d35bSYinghai Lu cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); 421f7627e25SThomas Gleixner c->x86_cache_size = (ecx>>24) + (edx>>24); 422140fc727SYinghai Lu #ifdef CONFIG_X86_64 423140fc727SYinghai Lu /* On K8 L1 TLB is inclusive, so don't count it */ 424140fc727SYinghai Lu c->x86_tlbsize = 0; 425140fc727SYinghai Lu #endif 426f7627e25SThomas Gleixner } 427f7627e25SThomas Gleixner 428f7627e25SThomas Gleixner if (n < 0x80000006) /* Some chips just has a large L1. */ 429f7627e25SThomas Gleixner return; 430f7627e25SThomas Gleixner 4310a488a53SYinghai Lu cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); 432f7627e25SThomas Gleixner l2size = ecx >> 16; 433f7627e25SThomas Gleixner 434140fc727SYinghai Lu #ifdef CONFIG_X86_64 435140fc727SYinghai Lu c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); 436140fc727SYinghai Lu #else 437f7627e25SThomas Gleixner /* do processor-specific cache resizing */ 438f7627e25SThomas Gleixner if (this_cpu->c_size_cache) 439f7627e25SThomas Gleixner l2size = this_cpu->c_size_cache(c, l2size); 440f7627e25SThomas Gleixner 441f7627e25SThomas Gleixner /* Allow user to override all this if necessary. */ 442f7627e25SThomas Gleixner if (cachesize_override != -1) 443f7627e25SThomas Gleixner l2size = cachesize_override; 444f7627e25SThomas Gleixner 445f7627e25SThomas Gleixner if (l2size == 0) 446f7627e25SThomas Gleixner return; /* Again, no L2 cache is possible */ 447140fc727SYinghai Lu #endif 448f7627e25SThomas Gleixner 449f7627e25SThomas Gleixner c->x86_cache_size = l2size; 450f7627e25SThomas Gleixner } 451f7627e25SThomas Gleixner 4529d31d35bSYinghai Lu void __cpuinit detect_ht(struct cpuinfo_x86 *c) 4539d31d35bSYinghai Lu { 45497e4db7cSYinghai Lu #ifdef CONFIG_X86_HT 4559d31d35bSYinghai Lu u32 eax, ebx, ecx, edx; 4569d31d35bSYinghai Lu int index_msb, core_bits; 4572eaad1fdSMike Travis static bool printed; 4589d31d35bSYinghai Lu 4590a488a53SYinghai Lu if (!cpu_has(c, X86_FEATURE_HT)) 4609d31d35bSYinghai Lu return; 4619d31d35bSYinghai Lu 4620a488a53SYinghai Lu if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) 4630a488a53SYinghai Lu goto out; 4640a488a53SYinghai Lu 4651cd78776SYinghai Lu if (cpu_has(c, X86_FEATURE_XTOPOLOGY)) 4661cd78776SYinghai Lu return; 4671cd78776SYinghai Lu 4680a488a53SYinghai Lu cpuid(1, &eax, &ebx, &ecx, &edx); 4690a488a53SYinghai Lu 4709d31d35bSYinghai Lu smp_num_siblings = (ebx & 0xff0000) >> 16; 4719d31d35bSYinghai Lu 4729d31d35bSYinghai Lu if (smp_num_siblings == 1) { 4732eaad1fdSMike Travis printk_once(KERN_INFO "CPU0: Hyper-Threading is disabled\n"); 4740f3fa48aSIngo Molnar goto out; 4750f3fa48aSIngo Molnar } 4760f3fa48aSIngo Molnar 4770f3fa48aSIngo Molnar if (smp_num_siblings <= 1) 4780f3fa48aSIngo Molnar goto out; 4799d31d35bSYinghai Lu 4809628937dSMike Travis if (smp_num_siblings > nr_cpu_ids) { 4819766cdbcSJaswinder Singh Rajput pr_warning("CPU: Unsupported number of siblings %d", 4829d31d35bSYinghai Lu smp_num_siblings); 4839d31d35bSYinghai Lu smp_num_siblings = 1; 4849d31d35bSYinghai Lu return; 4859d31d35bSYinghai Lu } 4869d31d35bSYinghai Lu 4879d31d35bSYinghai Lu index_msb = get_count_order(smp_num_siblings); 488cb8cc442SIngo Molnar c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); 4899d31d35bSYinghai Lu 4909d31d35bSYinghai Lu smp_num_siblings = smp_num_siblings / c->x86_max_cores; 4919d31d35bSYinghai Lu 4929d31d35bSYinghai Lu index_msb = get_count_order(smp_num_siblings); 4939d31d35bSYinghai Lu 4949d31d35bSYinghai Lu core_bits = get_count_order(c->x86_max_cores); 4959d31d35bSYinghai Lu 496cb8cc442SIngo Molnar c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & 4971cd78776SYinghai Lu ((1 << core_bits) - 1); 4989d31d35bSYinghai Lu 4990a488a53SYinghai Lu out: 5002eaad1fdSMike Travis if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) { 5010a488a53SYinghai Lu printk(KERN_INFO "CPU: Physical Processor ID: %d\n", 5020a488a53SYinghai Lu c->phys_proc_id); 5039d31d35bSYinghai Lu printk(KERN_INFO "CPU: Processor Core ID: %d\n", 5049d31d35bSYinghai Lu c->cpu_core_id); 5052eaad1fdSMike Travis printed = 1; 5069d31d35bSYinghai Lu } 5079d31d35bSYinghai Lu #endif 50897e4db7cSYinghai Lu } 509f7627e25SThomas Gleixner 5103da99c97SYinghai Lu static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) 511f7627e25SThomas Gleixner { 512f7627e25SThomas Gleixner char *v = c->x86_vendor_id; 5130f3fa48aSIngo Molnar int i; 514f7627e25SThomas Gleixner 515f7627e25SThomas Gleixner for (i = 0; i < X86_VENDOR_NUM; i++) { 51610a434fcSYinghai Lu if (!cpu_devs[i]) 51710a434fcSYinghai Lu break; 51810a434fcSYinghai Lu 519f7627e25SThomas Gleixner if (!strcmp(v, cpu_devs[i]->c_ident[0]) || 520f7627e25SThomas Gleixner (cpu_devs[i]->c_ident[1] && 521f7627e25SThomas Gleixner !strcmp(v, cpu_devs[i]->c_ident[1]))) { 5220f3fa48aSIngo Molnar 523f7627e25SThomas Gleixner this_cpu = cpu_devs[i]; 52410a434fcSYinghai Lu c->x86_vendor = this_cpu->c_x86_vendor; 525f7627e25SThomas Gleixner return; 526f7627e25SThomas Gleixner } 527f7627e25SThomas Gleixner } 52810a434fcSYinghai Lu 529a9c56953SMinchan Kim printk_once(KERN_ERR 530a9c56953SMinchan Kim "CPU: vendor_id '%s' unknown, using generic init.\n" \ 531a9c56953SMinchan Kim "CPU: Your system may be unstable.\n", v); 53210a434fcSYinghai Lu 533f7627e25SThomas Gleixner c->x86_vendor = X86_VENDOR_UNKNOWN; 534f7627e25SThomas Gleixner this_cpu = &default_cpu; 535f7627e25SThomas Gleixner } 536f7627e25SThomas Gleixner 5379d31d35bSYinghai Lu void __cpuinit cpu_detect(struct cpuinfo_x86 *c) 538f7627e25SThomas Gleixner { 539f7627e25SThomas Gleixner /* Get vendor name */ 5404a148513SHarvey Harrison cpuid(0x00000000, (unsigned int *)&c->cpuid_level, 5414a148513SHarvey Harrison (unsigned int *)&c->x86_vendor_id[0], 5424a148513SHarvey Harrison (unsigned int *)&c->x86_vendor_id[8], 5434a148513SHarvey Harrison (unsigned int *)&c->x86_vendor_id[4]); 544f7627e25SThomas Gleixner 545f7627e25SThomas Gleixner c->x86 = 4; 5469d31d35bSYinghai Lu /* Intel-defined flags: level 0x00000001 */ 547f7627e25SThomas Gleixner if (c->cpuid_level >= 0x00000001) { 548f7627e25SThomas Gleixner u32 junk, tfms, cap0, misc; 5490f3fa48aSIngo Molnar 550f7627e25SThomas Gleixner cpuid(0x00000001, &tfms, &misc, &junk, &cap0); 5519d31d35bSYinghai Lu c->x86 = (tfms >> 8) & 0xf; 5529d31d35bSYinghai Lu c->x86_model = (tfms >> 4) & 0xf; 5539d31d35bSYinghai Lu c->x86_mask = tfms & 0xf; 5540f3fa48aSIngo Molnar 555f7627e25SThomas Gleixner if (c->x86 == 0xf) 556f7627e25SThomas Gleixner c->x86 += (tfms >> 20) & 0xff; 557f7627e25SThomas Gleixner if (c->x86 >= 0x6) 5589d31d35bSYinghai Lu c->x86_model += ((tfms >> 16) & 0xf) << 4; 5590f3fa48aSIngo Molnar 560d4387bd3SHuang, Ying if (cap0 & (1<<19)) { 561d4387bd3SHuang, Ying c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; 5629d31d35bSYinghai Lu c->x86_cache_alignment = c->x86_clflush_size; 563d4387bd3SHuang, Ying } 564f7627e25SThomas Gleixner } 565f7627e25SThomas Gleixner } 5663da99c97SYinghai Lu 567d900329eSH. Peter Anvin void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) 568093af8d7SYinghai Lu { 569093af8d7SYinghai Lu u32 tfms, xlvl; 5703da99c97SYinghai Lu u32 ebx; 571093af8d7SYinghai Lu 572093af8d7SYinghai Lu /* Intel-defined flags: level 0x00000001 */ 573093af8d7SYinghai Lu if (c->cpuid_level >= 0x00000001) { 574093af8d7SYinghai Lu u32 capability, excap; 5750f3fa48aSIngo Molnar 576093af8d7SYinghai Lu cpuid(0x00000001, &tfms, &ebx, &excap, &capability); 577093af8d7SYinghai Lu c->x86_capability[0] = capability; 578093af8d7SYinghai Lu c->x86_capability[4] = excap; 579093af8d7SYinghai Lu } 580093af8d7SYinghai Lu 581bdc802dcSH. Peter Anvin /* Additional Intel-defined flags: level 0x00000007 */ 582bdc802dcSH. Peter Anvin if (c->cpuid_level >= 0x00000007) { 583bdc802dcSH. Peter Anvin u32 eax, ebx, ecx, edx; 584bdc802dcSH. Peter Anvin 585bdc802dcSH. Peter Anvin cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); 586bdc802dcSH. Peter Anvin 587bdc802dcSH. Peter Anvin c->x86_capability[9] = ebx; 588bdc802dcSH. Peter Anvin } 589bdc802dcSH. Peter Anvin 590093af8d7SYinghai Lu /* AMD-defined flags: level 0x80000001 */ 591093af8d7SYinghai Lu xlvl = cpuid_eax(0x80000000); 5923da99c97SYinghai Lu c->extended_cpuid_level = xlvl; 5930f3fa48aSIngo Molnar 594093af8d7SYinghai Lu if ((xlvl & 0xffff0000) == 0x80000000) { 595093af8d7SYinghai Lu if (xlvl >= 0x80000001) { 596093af8d7SYinghai Lu c->x86_capability[1] = cpuid_edx(0x80000001); 597093af8d7SYinghai Lu c->x86_capability[6] = cpuid_ecx(0x80000001); 598093af8d7SYinghai Lu } 599093af8d7SYinghai Lu } 600093af8d7SYinghai Lu 6015122c890SYinghai Lu if (c->extended_cpuid_level >= 0x80000008) { 6025122c890SYinghai Lu u32 eax = cpuid_eax(0x80000008); 6035122c890SYinghai Lu 6045122c890SYinghai Lu c->x86_virt_bits = (eax >> 8) & 0xff; 6055122c890SYinghai Lu c->x86_phys_bits = eax & 0xff; 6065122c890SYinghai Lu } 60713c6c532SJan Beulich #ifdef CONFIG_X86_32 60813c6c532SJan Beulich else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) 60913c6c532SJan Beulich c->x86_phys_bits = 36; 6105122c890SYinghai Lu #endif 611e3224234SYinghai Lu 612e3224234SYinghai Lu if (c->extended_cpuid_level >= 0x80000007) 613e3224234SYinghai Lu c->x86_power = cpuid_edx(0x80000007); 614e3224234SYinghai Lu 6151dedefd1SJacob Pan init_scattered_cpuid_features(c); 616093af8d7SYinghai Lu } 617093af8d7SYinghai Lu 618aef93c8bSYinghai Lu static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c) 619aef93c8bSYinghai Lu { 620aef93c8bSYinghai Lu #ifdef CONFIG_X86_32 621aef93c8bSYinghai Lu int i; 622aef93c8bSYinghai Lu 623aef93c8bSYinghai Lu /* 624aef93c8bSYinghai Lu * First of all, decide if this is a 486 or higher 625aef93c8bSYinghai Lu * It's a 486 if we can modify the AC flag 626aef93c8bSYinghai Lu */ 627aef93c8bSYinghai Lu if (flag_is_changeable_p(X86_EFLAGS_AC)) 628aef93c8bSYinghai Lu c->x86 = 4; 629aef93c8bSYinghai Lu else 630aef93c8bSYinghai Lu c->x86 = 3; 631aef93c8bSYinghai Lu 632aef93c8bSYinghai Lu for (i = 0; i < X86_VENDOR_NUM; i++) 633aef93c8bSYinghai Lu if (cpu_devs[i] && cpu_devs[i]->c_identify) { 634aef93c8bSYinghai Lu c->x86_vendor_id[0] = 0; 635aef93c8bSYinghai Lu cpu_devs[i]->c_identify(c); 636aef93c8bSYinghai Lu if (c->x86_vendor_id[0]) { 637aef93c8bSYinghai Lu get_cpu_vendor(c); 638aef93c8bSYinghai Lu break; 639aef93c8bSYinghai Lu } 640aef93c8bSYinghai Lu } 641aef93c8bSYinghai Lu #endif 642093af8d7SYinghai Lu } 643f7627e25SThomas Gleixner 64434048c9eSPaolo Ciarrocchi /* 64534048c9eSPaolo Ciarrocchi * Do minimum CPU detection early. 64634048c9eSPaolo Ciarrocchi * Fields really needed: vendor, cpuid_level, family, model, mask, 64734048c9eSPaolo Ciarrocchi * cache alignment. 64834048c9eSPaolo Ciarrocchi * The others are not touched to avoid unwanted side effects. 64934048c9eSPaolo Ciarrocchi * 65034048c9eSPaolo Ciarrocchi * WARNING: this function is only called on the BP. Don't add code here 65134048c9eSPaolo Ciarrocchi * that is supposed to run on all CPUs. 65234048c9eSPaolo Ciarrocchi */ 6533da99c97SYinghai Lu static void __init early_identify_cpu(struct cpuinfo_x86 *c) 654f7627e25SThomas Gleixner { 6556627d242SYinghai Lu #ifdef CONFIG_X86_64 6566627d242SYinghai Lu c->x86_clflush_size = 64; 65713c6c532SJan Beulich c->x86_phys_bits = 36; 65813c6c532SJan Beulich c->x86_virt_bits = 48; 6596627d242SYinghai Lu #else 660d4387bd3SHuang, Ying c->x86_clflush_size = 32; 66113c6c532SJan Beulich c->x86_phys_bits = 32; 66213c6c532SJan Beulich c->x86_virt_bits = 32; 6636627d242SYinghai Lu #endif 6640a488a53SYinghai Lu c->x86_cache_alignment = c->x86_clflush_size; 665f7627e25SThomas Gleixner 6663da99c97SYinghai Lu memset(&c->x86_capability, 0, sizeof c->x86_capability); 6670a488a53SYinghai Lu c->extended_cpuid_level = 0; 6680a488a53SYinghai Lu 669aef93c8bSYinghai Lu if (!have_cpuid_p()) 670aef93c8bSYinghai Lu identify_cpu_without_cpuid(c); 671aef93c8bSYinghai Lu 672aef93c8bSYinghai Lu /* cyrix could have cpuid enabled via c_identify()*/ 673f7627e25SThomas Gleixner if (!have_cpuid_p()) 674f7627e25SThomas Gleixner return; 675f7627e25SThomas Gleixner 676f7627e25SThomas Gleixner cpu_detect(c); 677f7627e25SThomas Gleixner 6783da99c97SYinghai Lu get_cpu_vendor(c); 6792b16a235SAndi Kleen 6803da99c97SYinghai Lu get_cpu_cap(c); 68112cf105cSKrzysztof Helt 68210a434fcSYinghai Lu if (this_cpu->c_early_init) 68310a434fcSYinghai Lu this_cpu->c_early_init(c); 6843da99c97SYinghai Lu 6851c4acdb4SIngo Molnar #ifdef CONFIG_SMP 686f6e9456cSRobert Richter c->cpu_index = 0; 6871c4acdb4SIngo Molnar #endif 688b38b0665SH. Peter Anvin filter_cpuid_features(c, false); 689*de5397adSFenghua Yu 690*de5397adSFenghua Yu setup_smep(c); 691f7627e25SThomas Gleixner } 692f7627e25SThomas Gleixner 6939d31d35bSYinghai Lu void __init early_cpu_init(void) 6949d31d35bSYinghai Lu { 69502dde8b4SJan Beulich const struct cpu_dev *const *cdev; 69610a434fcSYinghai Lu int count = 0; 6979d31d35bSYinghai Lu 698ac23f253SJan Beulich #ifdef CONFIG_PROCESSOR_SELECT 6999766cdbcSJaswinder Singh Rajput printk(KERN_INFO "KERNEL supported cpus:\n"); 70031c997caSIngo Molnar #endif 70131c997caSIngo Molnar 70210a434fcSYinghai Lu for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { 70302dde8b4SJan Beulich const struct cpu_dev *cpudev = *cdev; 7049d31d35bSYinghai Lu 70510a434fcSYinghai Lu if (count >= X86_VENDOR_NUM) 70610a434fcSYinghai Lu break; 70710a434fcSYinghai Lu cpu_devs[count] = cpudev; 70810a434fcSYinghai Lu count++; 70910a434fcSYinghai Lu 710ac23f253SJan Beulich #ifdef CONFIG_PROCESSOR_SELECT 71131c997caSIngo Molnar { 71231c997caSIngo Molnar unsigned int j; 71331c997caSIngo Molnar 71410a434fcSYinghai Lu for (j = 0; j < 2; j++) { 71510a434fcSYinghai Lu if (!cpudev->c_ident[j]) 71610a434fcSYinghai Lu continue; 7179766cdbcSJaswinder Singh Rajput printk(KERN_INFO " %s %s\n", cpudev->c_vendor, 71810a434fcSYinghai Lu cpudev->c_ident[j]); 71910a434fcSYinghai Lu } 72010a434fcSYinghai Lu } 7210388423dSDave Jones #endif 72231c997caSIngo Molnar } 7239d31d35bSYinghai Lu early_identify_cpu(&boot_cpu_data); 724f7627e25SThomas Gleixner } 725f7627e25SThomas Gleixner 726b6734c35SH. Peter Anvin /* 727366d4a43SBorislav Petkov * The NOPL instruction is supposed to exist on all CPUs of family >= 6; 728366d4a43SBorislav Petkov * unfortunately, that's not true in practice because of early VIA 729366d4a43SBorislav Petkov * chips and (more importantly) broken virtualizers that are not easy 730366d4a43SBorislav Petkov * to detect. In the latter case it doesn't even *fail* reliably, so 731366d4a43SBorislav Petkov * probing for it doesn't even work. Disable it completely on 32-bit 732ba0593bfSH. Peter Anvin * unless we can find a reliable way to detect all the broken cases. 733366d4a43SBorislav Petkov * Enable it explicitly on 64-bit for non-constant inputs of cpu_has(). 734b6734c35SH. Peter Anvin */ 735b6734c35SH. Peter Anvin static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) 736b6734c35SH. Peter Anvin { 737366d4a43SBorislav Petkov #ifdef CONFIG_X86_32 738b6734c35SH. Peter Anvin clear_cpu_cap(c, X86_FEATURE_NOPL); 739366d4a43SBorislav Petkov #else 740366d4a43SBorislav Petkov set_cpu_cap(c, X86_FEATURE_NOPL); 741366d4a43SBorislav Petkov #endif 742f7627e25SThomas Gleixner } 743f7627e25SThomas Gleixner 744f7627e25SThomas Gleixner static void __cpuinit generic_identify(struct cpuinfo_x86 *c) 745f7627e25SThomas Gleixner { 7463da99c97SYinghai Lu c->extended_cpuid_level = 0; 747f7627e25SThomas Gleixner 748aef93c8bSYinghai Lu if (!have_cpuid_p()) 749aef93c8bSYinghai Lu identify_cpu_without_cpuid(c); 750f7627e25SThomas Gleixner 751aef93c8bSYinghai Lu /* cyrix could have cpuid enabled via c_identify()*/ 752a9853dd6SIngo Molnar if (!have_cpuid_p()) 753aef93c8bSYinghai Lu return; 754aef93c8bSYinghai Lu 7553da99c97SYinghai Lu cpu_detect(c); 7563da99c97SYinghai Lu 7573da99c97SYinghai Lu get_cpu_vendor(c); 7583da99c97SYinghai Lu 7593da99c97SYinghai Lu get_cpu_cap(c); 7603da99c97SYinghai Lu 761f7627e25SThomas Gleixner if (c->cpuid_level >= 0x00000001) { 7623da99c97SYinghai Lu c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; 763b89d3b3eSYinghai Lu #ifdef CONFIG_X86_32 764f7627e25SThomas Gleixner # ifdef CONFIG_X86_HT 765cb8cc442SIngo Molnar c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); 766f7627e25SThomas Gleixner # else 76701aaea1aSYinghai Lu c->apicid = c->initial_apicid; 768f7627e25SThomas Gleixner # endif 769b89d3b3eSYinghai Lu #endif 770b89d3b3eSYinghai Lu 771b89d3b3eSYinghai Lu #ifdef CONFIG_X86_HT 772b89d3b3eSYinghai Lu c->phys_proc_id = c->initial_apicid; 773b89d3b3eSYinghai Lu #endif 774f7627e25SThomas Gleixner } 775f7627e25SThomas Gleixner 776*de5397adSFenghua Yu setup_smep(c); 777*de5397adSFenghua Yu 778f7627e25SThomas Gleixner get_model_name(c); /* Default name */ 779f7627e25SThomas Gleixner 780b6734c35SH. Peter Anvin detect_nopl(c); 781f7627e25SThomas Gleixner } 782f7627e25SThomas Gleixner 783f7627e25SThomas Gleixner /* 784f7627e25SThomas Gleixner * This does the hard work of actually picking apart the CPU stuff... 785f7627e25SThomas Gleixner */ 7869a250347SYinghai Lu static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) 787f7627e25SThomas Gleixner { 788f7627e25SThomas Gleixner int i; 789f7627e25SThomas Gleixner 790f7627e25SThomas Gleixner c->loops_per_jiffy = loops_per_jiffy; 791f7627e25SThomas Gleixner c->x86_cache_size = -1; 792f7627e25SThomas Gleixner c->x86_vendor = X86_VENDOR_UNKNOWN; 793f7627e25SThomas Gleixner c->x86_model = c->x86_mask = 0; /* So far unknown... */ 794f7627e25SThomas Gleixner c->x86_vendor_id[0] = '\0'; /* Unset */ 795f7627e25SThomas Gleixner c->x86_model_id[0] = '\0'; /* Unset */ 796f7627e25SThomas Gleixner c->x86_max_cores = 1; 797102bbe3aSYinghai Lu c->x86_coreid_bits = 0; 79811fdd252SYinghai Lu #ifdef CONFIG_X86_64 799102bbe3aSYinghai Lu c->x86_clflush_size = 64; 80013c6c532SJan Beulich c->x86_phys_bits = 36; 80113c6c532SJan Beulich c->x86_virt_bits = 48; 802102bbe3aSYinghai Lu #else 803102bbe3aSYinghai Lu c->cpuid_level = -1; /* CPUID not detected */ 804f7627e25SThomas Gleixner c->x86_clflush_size = 32; 80513c6c532SJan Beulich c->x86_phys_bits = 32; 80613c6c532SJan Beulich c->x86_virt_bits = 32; 807102bbe3aSYinghai Lu #endif 808102bbe3aSYinghai Lu c->x86_cache_alignment = c->x86_clflush_size; 809f7627e25SThomas Gleixner memset(&c->x86_capability, 0, sizeof c->x86_capability); 810f7627e25SThomas Gleixner 811f7627e25SThomas Gleixner generic_identify(c); 812f7627e25SThomas Gleixner 8133898534dSAndi Kleen if (this_cpu->c_identify) 814f7627e25SThomas Gleixner this_cpu->c_identify(c); 815f7627e25SThomas Gleixner 8162759c328SYinghai Lu /* Clear/Set all flags overriden by options, after probe */ 8172759c328SYinghai Lu for (i = 0; i < NCAPINTS; i++) { 8182759c328SYinghai Lu c->x86_capability[i] &= ~cpu_caps_cleared[i]; 8192759c328SYinghai Lu c->x86_capability[i] |= cpu_caps_set[i]; 8202759c328SYinghai Lu } 8212759c328SYinghai Lu 822102bbe3aSYinghai Lu #ifdef CONFIG_X86_64 823cb8cc442SIngo Molnar c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); 824102bbe3aSYinghai Lu #endif 825102bbe3aSYinghai Lu 826f7627e25SThomas Gleixner /* 827f7627e25SThomas Gleixner * Vendor-specific initialization. In this section we 828f7627e25SThomas Gleixner * canonicalize the feature flags, meaning if there are 829f7627e25SThomas Gleixner * features a certain CPU supports which CPUID doesn't 830f7627e25SThomas Gleixner * tell us, CPUID claiming incorrect flags, or other bugs, 831f7627e25SThomas Gleixner * we handle them here. 832f7627e25SThomas Gleixner * 833f7627e25SThomas Gleixner * At the end of this section, c->x86_capability better 834f7627e25SThomas Gleixner * indicate the features this CPU genuinely supports! 835f7627e25SThomas Gleixner */ 836f7627e25SThomas Gleixner if (this_cpu->c_init) 837f7627e25SThomas Gleixner this_cpu->c_init(c); 838f7627e25SThomas Gleixner 839f7627e25SThomas Gleixner /* Disable the PN if appropriate */ 840f7627e25SThomas Gleixner squash_the_stupid_serial_number(c); 841f7627e25SThomas Gleixner 842f7627e25SThomas Gleixner /* 8430f3fa48aSIngo Molnar * The vendor-specific functions might have changed features. 8440f3fa48aSIngo Molnar * Now we do "generic changes." 845f7627e25SThomas Gleixner */ 846f7627e25SThomas Gleixner 847b38b0665SH. Peter Anvin /* Filter out anything that depends on CPUID levels we don't have */ 848b38b0665SH. Peter Anvin filter_cpuid_features(c, true); 849b38b0665SH. Peter Anvin 850f7627e25SThomas Gleixner /* If the model name is still unset, do table lookup. */ 851f7627e25SThomas Gleixner if (!c->x86_model_id[0]) { 85202dde8b4SJan Beulich const char *p; 853f7627e25SThomas Gleixner p = table_lookup_model(c); 854f7627e25SThomas Gleixner if (p) 855f7627e25SThomas Gleixner strcpy(c->x86_model_id, p); 856f7627e25SThomas Gleixner else 857f7627e25SThomas Gleixner /* Last resort... */ 858f7627e25SThomas Gleixner sprintf(c->x86_model_id, "%02x/%02x", 859f7627e25SThomas Gleixner c->x86, c->x86_model); 860f7627e25SThomas Gleixner } 861f7627e25SThomas Gleixner 862102bbe3aSYinghai Lu #ifdef CONFIG_X86_64 863102bbe3aSYinghai Lu detect_ht(c); 864102bbe3aSYinghai Lu #endif 865102bbe3aSYinghai Lu 86688b094fbSAlok Kataria init_hypervisor(c); 8673e0c3737SYinghai Lu 8683e0c3737SYinghai Lu /* 8693e0c3737SYinghai Lu * Clear/Set all flags overriden by options, need do it 8703e0c3737SYinghai Lu * before following smp all cpus cap AND. 8713e0c3737SYinghai Lu */ 8723e0c3737SYinghai Lu for (i = 0; i < NCAPINTS; i++) { 8733e0c3737SYinghai Lu c->x86_capability[i] &= ~cpu_caps_cleared[i]; 8743e0c3737SYinghai Lu c->x86_capability[i] |= cpu_caps_set[i]; 8753e0c3737SYinghai Lu } 8763e0c3737SYinghai Lu 877f7627e25SThomas Gleixner /* 878f7627e25SThomas Gleixner * On SMP, boot_cpu_data holds the common feature set between 879f7627e25SThomas Gleixner * all CPUs; so make sure that we indicate which features are 880f7627e25SThomas Gleixner * common between the CPUs. The first time this routine gets 881f7627e25SThomas Gleixner * executed, c == &boot_cpu_data. 882f7627e25SThomas Gleixner */ 883f7627e25SThomas Gleixner if (c != &boot_cpu_data) { 884f7627e25SThomas Gleixner /* AND the already accumulated flags with these */ 885f7627e25SThomas Gleixner for (i = 0; i < NCAPINTS; i++) 886f7627e25SThomas Gleixner boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; 887f7627e25SThomas Gleixner } 888f7627e25SThomas Gleixner 889f7627e25SThomas Gleixner /* Init Machine Check Exception if available. */ 8905e09954aSBorislav Petkov mcheck_cpu_init(c); 89130d432dfSAndi Kleen 89230d432dfSAndi Kleen select_idle_routine(c); 893102bbe3aSYinghai Lu 894de2d9445STejun Heo #ifdef CONFIG_NUMA 895102bbe3aSYinghai Lu numa_add_cpu(smp_processor_id()); 896102bbe3aSYinghai Lu #endif 897f7627e25SThomas Gleixner } 898f7627e25SThomas Gleixner 899e04d645fSGlauber Costa #ifdef CONFIG_X86_64 900e04d645fSGlauber Costa static void vgetcpu_set_mode(void) 901e04d645fSGlauber Costa { 902e04d645fSGlauber Costa if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP)) 903e04d645fSGlauber Costa vgetcpu_mode = VGETCPU_RDTSCP; 904e04d645fSGlauber Costa else 905e04d645fSGlauber Costa vgetcpu_mode = VGETCPU_LSL; 906e04d645fSGlauber Costa } 907e04d645fSGlauber Costa #endif 908e04d645fSGlauber Costa 909f7627e25SThomas Gleixner void __init identify_boot_cpu(void) 910f7627e25SThomas Gleixner { 911f7627e25SThomas Gleixner identify_cpu(&boot_cpu_data); 91230e1e6d1SRusty Russell init_c1e_mask(); 913102bbe3aSYinghai Lu #ifdef CONFIG_X86_32 914f7627e25SThomas Gleixner sysenter_setup(); 915f7627e25SThomas Gleixner enable_sep_cpu(); 916e04d645fSGlauber Costa #else 917e04d645fSGlauber Costa vgetcpu_set_mode(); 918102bbe3aSYinghai Lu #endif 919f7627e25SThomas Gleixner } 920f7627e25SThomas Gleixner 921f7627e25SThomas Gleixner void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) 922f7627e25SThomas Gleixner { 923f7627e25SThomas Gleixner BUG_ON(c == &boot_cpu_data); 924f7627e25SThomas Gleixner identify_cpu(c); 925102bbe3aSYinghai Lu #ifdef CONFIG_X86_32 926f7627e25SThomas Gleixner enable_sep_cpu(); 927102bbe3aSYinghai Lu #endif 928f7627e25SThomas Gleixner mtrr_ap_init(); 929f7627e25SThomas Gleixner } 930f7627e25SThomas Gleixner 931a0854a46SYinghai Lu struct msr_range { 932a0854a46SYinghai Lu unsigned min; 933a0854a46SYinghai Lu unsigned max; 934a0854a46SYinghai Lu }; 935a0854a46SYinghai Lu 93602dde8b4SJan Beulich static const struct msr_range msr_range_array[] __cpuinitconst = { 937a0854a46SYinghai Lu { 0x00000000, 0x00000418}, 938a0854a46SYinghai Lu { 0xc0000000, 0xc000040b}, 939a0854a46SYinghai Lu { 0xc0010000, 0xc0010142}, 940a0854a46SYinghai Lu { 0xc0011000, 0xc001103b}, 941a0854a46SYinghai Lu }; 942a0854a46SYinghai Lu 943a0854a46SYinghai Lu static void __cpuinit print_cpu_msr(void) 944f7627e25SThomas Gleixner { 9450f3fa48aSIngo Molnar unsigned index_min, index_max; 946a0854a46SYinghai Lu unsigned index; 947a0854a46SYinghai Lu u64 val; 948a0854a46SYinghai Lu int i; 949f7627e25SThomas Gleixner 950a0854a46SYinghai Lu for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) { 951a0854a46SYinghai Lu index_min = msr_range_array[i].min; 952a0854a46SYinghai Lu index_max = msr_range_array[i].max; 9530f3fa48aSIngo Molnar 954a0854a46SYinghai Lu for (index = index_min; index < index_max; index++) { 955a0854a46SYinghai Lu if (rdmsrl_amd_safe(index, &val)) 956a0854a46SYinghai Lu continue; 957a0854a46SYinghai Lu printk(KERN_INFO " MSR%08x: %016llx\n", index, val); 958f7627e25SThomas Gleixner } 959f7627e25SThomas Gleixner } 960a0854a46SYinghai Lu } 961a0854a46SYinghai Lu 962a0854a46SYinghai Lu static int show_msr __cpuinitdata; 9630f3fa48aSIngo Molnar 964a0854a46SYinghai Lu static __init int setup_show_msr(char *arg) 965a0854a46SYinghai Lu { 966a0854a46SYinghai Lu int num; 967a0854a46SYinghai Lu 968a0854a46SYinghai Lu get_option(&arg, &num); 969a0854a46SYinghai Lu 970a0854a46SYinghai Lu if (num > 0) 971a0854a46SYinghai Lu show_msr = num; 972a0854a46SYinghai Lu return 1; 973a0854a46SYinghai Lu } 974a0854a46SYinghai Lu __setup("show_msr=", setup_show_msr); 975f7627e25SThomas Gleixner 976191679fdSAndi Kleen static __init int setup_noclflush(char *arg) 977191679fdSAndi Kleen { 978191679fdSAndi Kleen setup_clear_cpu_cap(X86_FEATURE_CLFLSH); 979191679fdSAndi Kleen return 1; 980191679fdSAndi Kleen } 981191679fdSAndi Kleen __setup("noclflush", setup_noclflush); 982191679fdSAndi Kleen 983f7627e25SThomas Gleixner void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) 984f7627e25SThomas Gleixner { 98502dde8b4SJan Beulich const char *vendor = NULL; 986f7627e25SThomas Gleixner 9870f3fa48aSIngo Molnar if (c->x86_vendor < X86_VENDOR_NUM) { 988f7627e25SThomas Gleixner vendor = this_cpu->c_vendor; 9890f3fa48aSIngo Molnar } else { 9900f3fa48aSIngo Molnar if (c->cpuid_level >= 0) 991f7627e25SThomas Gleixner vendor = c->x86_vendor_id; 9920f3fa48aSIngo Molnar } 993f7627e25SThomas Gleixner 994bd32a8cfSYinghai Lu if (vendor && !strstr(c->x86_model_id, vendor)) 9959d31d35bSYinghai Lu printk(KERN_CONT "%s ", vendor); 996f7627e25SThomas Gleixner 9979d31d35bSYinghai Lu if (c->x86_model_id[0]) 9989d31d35bSYinghai Lu printk(KERN_CONT "%s", c->x86_model_id); 999f7627e25SThomas Gleixner else 10009d31d35bSYinghai Lu printk(KERN_CONT "%d86", c->x86); 1001f7627e25SThomas Gleixner 1002f7627e25SThomas Gleixner if (c->x86_mask || c->cpuid_level >= 0) 10039d31d35bSYinghai Lu printk(KERN_CONT " stepping %02x\n", c->x86_mask); 1004f7627e25SThomas Gleixner else 10059d31d35bSYinghai Lu printk(KERN_CONT "\n"); 1006a0854a46SYinghai Lu 1007a0854a46SYinghai Lu #ifdef CONFIG_SMP 1008a0854a46SYinghai Lu if (c->cpu_index < show_msr) 1009a0854a46SYinghai Lu print_cpu_msr(); 1010a0854a46SYinghai Lu #else 1011a0854a46SYinghai Lu if (show_msr) 1012a0854a46SYinghai Lu print_cpu_msr(); 1013a0854a46SYinghai Lu #endif 1014f7627e25SThomas Gleixner } 1015f7627e25SThomas Gleixner 1016ac72e788SAndi Kleen static __init int setup_disablecpuid(char *arg) 1017ac72e788SAndi Kleen { 1018ac72e788SAndi Kleen int bit; 10190f3fa48aSIngo Molnar 1020ac72e788SAndi Kleen if (get_option(&arg, &bit) && bit < NCAPINTS*32) 1021ac72e788SAndi Kleen setup_clear_cpu_cap(bit); 1022ac72e788SAndi Kleen else 1023ac72e788SAndi Kleen return 0; 10240f3fa48aSIngo Molnar 1025ac72e788SAndi Kleen return 1; 1026ac72e788SAndi Kleen } 1027ac72e788SAndi Kleen __setup("clearcpuid=", setup_disablecpuid); 1028ac72e788SAndi Kleen 1029d5494d4fSYinghai Lu #ifdef CONFIG_X86_64 10309ff80942SCyrill Gorcunov struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table }; 1031d5494d4fSYinghai Lu 1032947e76cdSBrian Gerst DEFINE_PER_CPU_FIRST(union irq_stack_union, 1033947e76cdSBrian Gerst irq_stack_union) __aligned(PAGE_SIZE); 10340f3fa48aSIngo Molnar 1035bdf977b3STejun Heo /* 1036bdf977b3STejun Heo * The following four percpu variables are hot. Align current_task to 1037bdf977b3STejun Heo * cacheline size such that all four fall in the same cacheline. 1038bdf977b3STejun Heo */ 1039bdf977b3STejun Heo DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = 1040bdf977b3STejun Heo &init_task; 1041bdf977b3STejun Heo EXPORT_PER_CPU_SYMBOL(current_task); 1042d5494d4fSYinghai Lu 10439af45651SBrian Gerst DEFINE_PER_CPU(unsigned long, kernel_stack) = 10449af45651SBrian Gerst (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE; 10459af45651SBrian Gerst EXPORT_PER_CPU_SYMBOL(kernel_stack); 10469af45651SBrian Gerst 1047bdf977b3STejun Heo DEFINE_PER_CPU(char *, irq_stack_ptr) = 1048bdf977b3STejun Heo init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; 1049bdf977b3STejun Heo 105056895530SBrian Gerst DEFINE_PER_CPU(unsigned int, irq_count) = -1; 1051d5494d4fSYinghai Lu 10520f3fa48aSIngo Molnar /* 10530f3fa48aSIngo Molnar * Special IST stacks which the CPU switches to when it calls 10540f3fa48aSIngo Molnar * an IST-marked descriptor entry. Up to 7 stacks (hardware 10550f3fa48aSIngo Molnar * limit), all of them are 4K, except the debug stack which 10560f3fa48aSIngo Molnar * is 8K. 10570f3fa48aSIngo Molnar */ 10580f3fa48aSIngo Molnar static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { 10590f3fa48aSIngo Molnar [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, 10600f3fa48aSIngo Molnar [DEBUG_STACK - 1] = DEBUG_STKSZ 10610f3fa48aSIngo Molnar }; 10620f3fa48aSIngo Molnar 106392d65b23SBrian Gerst static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks 10643e352aa8STejun Heo [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); 1065d5494d4fSYinghai Lu 1066d5494d4fSYinghai Lu /* May not be marked __init: used by software suspend */ 1067d5494d4fSYinghai Lu void syscall_init(void) 1068d5494d4fSYinghai Lu { 1069d5494d4fSYinghai Lu /* 1070d5494d4fSYinghai Lu * LSTAR and STAR live in a bit strange symbiosis. 1071d5494d4fSYinghai Lu * They both write to the same internal register. STAR allows to 1072d5494d4fSYinghai Lu * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip. 1073d5494d4fSYinghai Lu */ 1074d5494d4fSYinghai Lu wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32); 1075d5494d4fSYinghai Lu wrmsrl(MSR_LSTAR, system_call); 1076d5494d4fSYinghai Lu wrmsrl(MSR_CSTAR, ignore_sysret); 1077d5494d4fSYinghai Lu 1078d5494d4fSYinghai Lu #ifdef CONFIG_IA32_EMULATION 1079d5494d4fSYinghai Lu syscall32_cpu_init(); 1080d5494d4fSYinghai Lu #endif 1081d5494d4fSYinghai Lu 1082d5494d4fSYinghai Lu /* Flags to clear on syscall */ 1083d5494d4fSYinghai Lu wrmsrl(MSR_SYSCALL_MASK, 1084d5494d4fSYinghai Lu X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL); 1085d5494d4fSYinghai Lu } 1086d5494d4fSYinghai Lu 1087d5494d4fSYinghai Lu unsigned long kernel_eflags; 1088d5494d4fSYinghai Lu 1089d5494d4fSYinghai Lu /* 1090d5494d4fSYinghai Lu * Copies of the original ist values from the tss are only accessed during 1091d5494d4fSYinghai Lu * debugging, no special alignment required. 1092d5494d4fSYinghai Lu */ 1093d5494d4fSYinghai Lu DEFINE_PER_CPU(struct orig_ist, orig_ist); 1094d5494d4fSYinghai Lu 10950f3fa48aSIngo Molnar #else /* CONFIG_X86_64 */ 1096d5494d4fSYinghai Lu 1097bdf977b3STejun Heo DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; 1098bdf977b3STejun Heo EXPORT_PER_CPU_SYMBOL(current_task); 1099bdf977b3STejun Heo 110060a5317fSTejun Heo #ifdef CONFIG_CC_STACKPROTECTOR 110153f82452SJeremy Fitzhardinge DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); 110260a5317fSTejun Heo #endif 110360a5317fSTejun Heo 110460a5317fSTejun Heo /* Make sure %fs and %gs are initialized properly in idle threads */ 11056b2fb3c6SAdrian Bunk struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) 1106f7627e25SThomas Gleixner { 1107f7627e25SThomas Gleixner memset(regs, 0, sizeof(struct pt_regs)); 110865ea5b03SH. Peter Anvin regs->fs = __KERNEL_PERCPU; 110960a5317fSTejun Heo regs->gs = __KERNEL_STACK_CANARY; 11100f3fa48aSIngo Molnar 1111f7627e25SThomas Gleixner return regs; 1112f7627e25SThomas Gleixner } 11130f3fa48aSIngo Molnar #endif /* CONFIG_X86_64 */ 1114f7627e25SThomas Gleixner 1115f7627e25SThomas Gleixner /* 11169766cdbcSJaswinder Singh Rajput * Clear all 6 debug registers: 11179766cdbcSJaswinder Singh Rajput */ 11189766cdbcSJaswinder Singh Rajput static void clear_all_debug_regs(void) 11199766cdbcSJaswinder Singh Rajput { 11209766cdbcSJaswinder Singh Rajput int i; 11219766cdbcSJaswinder Singh Rajput 11229766cdbcSJaswinder Singh Rajput for (i = 0; i < 8; i++) { 11239766cdbcSJaswinder Singh Rajput /* Ignore db4, db5 */ 11249766cdbcSJaswinder Singh Rajput if ((i == 4) || (i == 5)) 11259766cdbcSJaswinder Singh Rajput continue; 11269766cdbcSJaswinder Singh Rajput 11279766cdbcSJaswinder Singh Rajput set_debugreg(0, i); 11289766cdbcSJaswinder Singh Rajput } 11299766cdbcSJaswinder Singh Rajput } 1130f7627e25SThomas Gleixner 11310bb9fef9SJason Wessel #ifdef CONFIG_KGDB 11320bb9fef9SJason Wessel /* 11330bb9fef9SJason Wessel * Restore debug regs if using kgdbwait and you have a kernel debugger 11340bb9fef9SJason Wessel * connection established. 11350bb9fef9SJason Wessel */ 11360bb9fef9SJason Wessel static void dbg_restore_debug_regs(void) 11370bb9fef9SJason Wessel { 11380bb9fef9SJason Wessel if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break)) 11390bb9fef9SJason Wessel arch_kgdb_ops.correct_hw_break(); 11400bb9fef9SJason Wessel } 11410bb9fef9SJason Wessel #else /* ! CONFIG_KGDB */ 11420bb9fef9SJason Wessel #define dbg_restore_debug_regs() 11430bb9fef9SJason Wessel #endif /* ! CONFIG_KGDB */ 11440bb9fef9SJason Wessel 1145f7627e25SThomas Gleixner /* 1146f7627e25SThomas Gleixner * cpu_init() initializes state that is per-CPU. Some data is already 1147f7627e25SThomas Gleixner * initialized (naturally) in the bootstrap process, such as the GDT 1148f7627e25SThomas Gleixner * and IDT. We reload them nevertheless, this function acts as a 1149f7627e25SThomas Gleixner * 'CPU state barrier', nothing should get across. 11501ba76586SYinghai Lu * A lot of state is already set up in PDA init for 64 bit 1151f7627e25SThomas Gleixner */ 11521ba76586SYinghai Lu #ifdef CONFIG_X86_64 11530f3fa48aSIngo Molnar 11541ba76586SYinghai Lu void __cpuinit cpu_init(void) 11551ba76586SYinghai Lu { 11560fe1e009STejun Heo struct orig_ist *oist; 11571ba76586SYinghai Lu struct task_struct *me; 11580f3fa48aSIngo Molnar struct tss_struct *t; 11590f3fa48aSIngo Molnar unsigned long v; 11600f3fa48aSIngo Molnar int cpu; 11611ba76586SYinghai Lu int i; 11621ba76586SYinghai Lu 11630f3fa48aSIngo Molnar cpu = stack_smp_processor_id(); 11640f3fa48aSIngo Molnar t = &per_cpu(init_tss, cpu); 11650fe1e009STejun Heo oist = &per_cpu(orig_ist, cpu); 11660f3fa48aSIngo Molnar 1167e7a22c1eSBrian Gerst #ifdef CONFIG_NUMA 1168e534c7c5SLee Schermerhorn if (cpu != 0 && percpu_read(numa_node) == 0 && 1169e534c7c5SLee Schermerhorn early_cpu_to_node(cpu) != NUMA_NO_NODE) 1170e534c7c5SLee Schermerhorn set_numa_node(early_cpu_to_node(cpu)); 1171e7a22c1eSBrian Gerst #endif 11721ba76586SYinghai Lu 11731ba76586SYinghai Lu me = current; 11741ba76586SYinghai Lu 1175c2d1cec1SMike Travis if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) 11761ba76586SYinghai Lu panic("CPU#%d already initialized!\n", cpu); 11771ba76586SYinghai Lu 11782eaad1fdSMike Travis pr_debug("Initializing CPU#%d\n", cpu); 11791ba76586SYinghai Lu 11801ba76586SYinghai Lu clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 11811ba76586SYinghai Lu 11821ba76586SYinghai Lu /* 11831ba76586SYinghai Lu * Initialize the per-CPU GDT with the boot GDT, 11841ba76586SYinghai Lu * and set up the GDT descriptor: 11851ba76586SYinghai Lu */ 11861ba76586SYinghai Lu 1187552be871SBrian Gerst switch_to_new_gdt(cpu); 11882697fbd5SBrian Gerst loadsegment(fs, 0); 11892697fbd5SBrian Gerst 11901ba76586SYinghai Lu load_idt((const struct desc_ptr *)&idt_descr); 11911ba76586SYinghai Lu 11921ba76586SYinghai Lu memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); 11931ba76586SYinghai Lu syscall_init(); 11941ba76586SYinghai Lu 11951ba76586SYinghai Lu wrmsrl(MSR_FS_BASE, 0); 11961ba76586SYinghai Lu wrmsrl(MSR_KERNEL_GS_BASE, 0); 11971ba76586SYinghai Lu barrier(); 11981ba76586SYinghai Lu 11994763ed4dSH. Peter Anvin x86_configure_nx(); 120006cd9a7dSYinghai Lu if (cpu != 0) 12011ba76586SYinghai Lu enable_x2apic(); 12021ba76586SYinghai Lu 12031ba76586SYinghai Lu /* 12041ba76586SYinghai Lu * set up and load the per-CPU TSS 12051ba76586SYinghai Lu */ 12060fe1e009STejun Heo if (!oist->ist[0]) { 120792d65b23SBrian Gerst char *estacks = per_cpu(exception_stacks, cpu); 12080f3fa48aSIngo Molnar 12091ba76586SYinghai Lu for (v = 0; v < N_EXCEPTION_STACKS; v++) { 12100f3fa48aSIngo Molnar estacks += exception_stack_sizes[v]; 12110fe1e009STejun Heo oist->ist[v] = t->x86_tss.ist[v] = 12121ba76586SYinghai Lu (unsigned long)estacks; 12131ba76586SYinghai Lu } 12141ba76586SYinghai Lu } 12151ba76586SYinghai Lu 12161ba76586SYinghai Lu t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); 12170f3fa48aSIngo Molnar 12181ba76586SYinghai Lu /* 12191ba76586SYinghai Lu * <= is required because the CPU will access up to 12201ba76586SYinghai Lu * 8 bits beyond the end of the IO permission bitmap. 12211ba76586SYinghai Lu */ 12221ba76586SYinghai Lu for (i = 0; i <= IO_BITMAP_LONGS; i++) 12231ba76586SYinghai Lu t->io_bitmap[i] = ~0UL; 12241ba76586SYinghai Lu 12251ba76586SYinghai Lu atomic_inc(&init_mm.mm_count); 12261ba76586SYinghai Lu me->active_mm = &init_mm; 12278c5dfd25SStoyan Gaydarov BUG_ON(me->mm); 12281ba76586SYinghai Lu enter_lazy_tlb(&init_mm, me); 12291ba76586SYinghai Lu 12301ba76586SYinghai Lu load_sp0(t, ¤t->thread); 12311ba76586SYinghai Lu set_tss_desc(cpu, t); 12321ba76586SYinghai Lu load_TR_desc(); 12331ba76586SYinghai Lu load_LDT(&init_mm.context); 12341ba76586SYinghai Lu 12359766cdbcSJaswinder Singh Rajput clear_all_debug_regs(); 12360bb9fef9SJason Wessel dbg_restore_debug_regs(); 12371ba76586SYinghai Lu 12381ba76586SYinghai Lu fpu_init(); 12390e49bf66SRobert Richter xsave_init(); 12401ba76586SYinghai Lu 12411ba76586SYinghai Lu raw_local_save_flags(kernel_eflags); 12421ba76586SYinghai Lu 12431ba76586SYinghai Lu if (is_uv_system()) 12441ba76586SYinghai Lu uv_cpu_init(); 12451ba76586SYinghai Lu } 12461ba76586SYinghai Lu 12471ba76586SYinghai Lu #else 12481ba76586SYinghai Lu 1249f7627e25SThomas Gleixner void __cpuinit cpu_init(void) 1250f7627e25SThomas Gleixner { 1251f7627e25SThomas Gleixner int cpu = smp_processor_id(); 1252f7627e25SThomas Gleixner struct task_struct *curr = current; 1253f7627e25SThomas Gleixner struct tss_struct *t = &per_cpu(init_tss, cpu); 1254f7627e25SThomas Gleixner struct thread_struct *thread = &curr->thread; 1255f7627e25SThomas Gleixner 1256c2d1cec1SMike Travis if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) { 1257f7627e25SThomas Gleixner printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); 12589766cdbcSJaswinder Singh Rajput for (;;) 12599766cdbcSJaswinder Singh Rajput local_irq_enable(); 1260f7627e25SThomas Gleixner } 1261f7627e25SThomas Gleixner 1262f7627e25SThomas Gleixner printk(KERN_INFO "Initializing CPU#%d\n", cpu); 1263f7627e25SThomas Gleixner 1264f7627e25SThomas Gleixner if (cpu_has_vme || cpu_has_tsc || cpu_has_de) 1265f7627e25SThomas Gleixner clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 1266f7627e25SThomas Gleixner 1267f7627e25SThomas Gleixner load_idt(&idt_descr); 1268552be871SBrian Gerst switch_to_new_gdt(cpu); 1269f7627e25SThomas Gleixner 1270f7627e25SThomas Gleixner /* 1271f7627e25SThomas Gleixner * Set up and load the per-CPU TSS and LDT 1272f7627e25SThomas Gleixner */ 1273f7627e25SThomas Gleixner atomic_inc(&init_mm.mm_count); 1274f7627e25SThomas Gleixner curr->active_mm = &init_mm; 12758c5dfd25SStoyan Gaydarov BUG_ON(curr->mm); 1276f7627e25SThomas Gleixner enter_lazy_tlb(&init_mm, curr); 1277f7627e25SThomas Gleixner 1278faca6227SH. Peter Anvin load_sp0(t, thread); 1279f7627e25SThomas Gleixner set_tss_desc(cpu, t); 1280f7627e25SThomas Gleixner load_TR_desc(); 1281f7627e25SThomas Gleixner load_LDT(&init_mm.context); 1282f7627e25SThomas Gleixner 1283f9a196b8SThomas Gleixner t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); 1284f9a196b8SThomas Gleixner 1285f7627e25SThomas Gleixner #ifdef CONFIG_DOUBLEFAULT 1286f7627e25SThomas Gleixner /* Set up doublefault TSS pointer in the GDT */ 1287f7627e25SThomas Gleixner __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); 1288f7627e25SThomas Gleixner #endif 1289f7627e25SThomas Gleixner 12909766cdbcSJaswinder Singh Rajput clear_all_debug_regs(); 12910bb9fef9SJason Wessel dbg_restore_debug_regs(); 1292f7627e25SThomas Gleixner 12930e49bf66SRobert Richter fpu_init(); 1294dc1e35c6SSuresh Siddha xsave_init(); 1295f7627e25SThomas Gleixner } 12961ba76586SYinghai Lu #endif 1297