1 /* cpu_feature_enabled() cannot be used this early */ 2 #define USE_EARLY_PGTABLE_L5 3 4 #include <linux/memblock.h> 5 #include <linux/linkage.h> 6 #include <linux/bitops.h> 7 #include <linux/kernel.h> 8 #include <linux/export.h> 9 #include <linux/percpu.h> 10 #include <linux/string.h> 11 #include <linux/ctype.h> 12 #include <linux/delay.h> 13 #include <linux/sched/mm.h> 14 #include <linux/sched/clock.h> 15 #include <linux/sched/task.h> 16 #include <linux/init.h> 17 #include <linux/kprobes.h> 18 #include <linux/kgdb.h> 19 #include <linux/smp.h> 20 #include <linux/io.h> 21 #include <linux/syscore_ops.h> 22 23 #include <asm/stackprotector.h> 24 #include <asm/perf_event.h> 25 #include <asm/mmu_context.h> 26 #include <asm/archrandom.h> 27 #include <asm/hypervisor.h> 28 #include <asm/processor.h> 29 #include <asm/tlbflush.h> 30 #include <asm/debugreg.h> 31 #include <asm/sections.h> 32 #include <asm/vsyscall.h> 33 #include <linux/topology.h> 34 #include <linux/cpumask.h> 35 #include <asm/pgtable.h> 36 #include <linux/atomic.h> 37 #include <asm/proto.h> 38 #include <asm/setup.h> 39 #include <asm/apic.h> 40 #include <asm/desc.h> 41 #include <asm/fpu/internal.h> 42 #include <asm/mtrr.h> 43 #include <asm/hwcap2.h> 44 #include <linux/numa.h> 45 #include <asm/asm.h> 46 #include <asm/bugs.h> 47 #include <asm/cpu.h> 48 #include <asm/mce.h> 49 #include <asm/msr.h> 50 #include <asm/pat.h> 51 #include <asm/microcode.h> 52 #include <asm/microcode_intel.h> 53 #include <asm/intel-family.h> 54 #include <asm/cpu_device_id.h> 55 56 #ifdef CONFIG_X86_LOCAL_APIC 57 #include <asm/uv/uv.h> 58 #endif 59 60 #include "cpu.h" 61 62 u32 elf_hwcap2 __read_mostly; 63 64 /* all of these masks are initialized in setup_cpu_local_masks() */ 65 cpumask_var_t cpu_initialized_mask; 66 cpumask_var_t cpu_callout_mask; 67 cpumask_var_t cpu_callin_mask; 68 69 /* representing cpus for which sibling maps can be computed */ 70 cpumask_var_t cpu_sibling_setup_mask; 71 72 /* Number of siblings per CPU package */ 73 int smp_num_siblings = 1; 74 EXPORT_SYMBOL(smp_num_siblings); 75 76 /* Last level cache ID of each logical CPU */ 77 DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID; 78 79 /* correctly size the local cpu masks */ 80 void __init setup_cpu_local_masks(void) 81 { 82 alloc_bootmem_cpumask_var(&cpu_initialized_mask); 83 alloc_bootmem_cpumask_var(&cpu_callin_mask); 84 alloc_bootmem_cpumask_var(&cpu_callout_mask); 85 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); 86 } 87 88 static void default_init(struct cpuinfo_x86 *c) 89 { 90 #ifdef CONFIG_X86_64 91 cpu_detect_cache_sizes(c); 92 #else 93 /* Not much we can do here... */ 94 /* Check if at least it has cpuid */ 95 if (c->cpuid_level == -1) { 96 /* No cpuid. It must be an ancient CPU */ 97 if (c->x86 == 4) 98 strcpy(c->x86_model_id, "486"); 99 else if (c->x86 == 3) 100 strcpy(c->x86_model_id, "386"); 101 } 102 #endif 103 } 104 105 static const struct cpu_dev default_cpu = { 106 .c_init = default_init, 107 .c_vendor = "Unknown", 108 .c_x86_vendor = X86_VENDOR_UNKNOWN, 109 }; 110 111 static const struct cpu_dev *this_cpu = &default_cpu; 112 113 DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { 114 #ifdef CONFIG_X86_64 115 /* 116 * We need valid kernel segments for data and code in long mode too 117 * IRET will check the segment types kkeil 2000/10/28 118 * Also sysret mandates a special GDT layout 119 * 120 * TLS descriptors are currently at a different place compared to i386. 121 * Hopefully nobody expects them at a fixed place (Wine?) 122 */ 123 [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff), 124 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff), 125 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff), 126 [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff), 127 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff), 128 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff), 129 #else 130 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff), 131 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), 132 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff), 133 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff), 134 /* 135 * Segments used for calling PnP BIOS have byte granularity. 136 * They code segments and data segments have fixed 64k limits, 137 * the transfer segment sizes are set at run time. 138 */ 139 /* 32-bit code */ 140 [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), 141 /* 16-bit code */ 142 [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), 143 /* 16-bit data */ 144 [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff), 145 /* 16-bit data */ 146 [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0), 147 /* 16-bit data */ 148 [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0), 149 /* 150 * The APM segments have byte granularity and their bases 151 * are set at run time. All have 64k limits. 152 */ 153 /* 32-bit code */ 154 [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), 155 /* 16-bit code */ 156 [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), 157 /* data */ 158 [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff), 159 160 [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), 161 [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), 162 GDT_STACK_CANARY_INIT 163 #endif 164 } }; 165 EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); 166 167 static int __init x86_mpx_setup(char *s) 168 { 169 /* require an exact match without trailing characters */ 170 if (strlen(s)) 171 return 0; 172 173 /* do not emit a message if the feature is not present */ 174 if (!boot_cpu_has(X86_FEATURE_MPX)) 175 return 1; 176 177 setup_clear_cpu_cap(X86_FEATURE_MPX); 178 pr_info("nompx: Intel Memory Protection Extensions (MPX) disabled\n"); 179 return 1; 180 } 181 __setup("nompx", x86_mpx_setup); 182 183 #ifdef CONFIG_X86_64 184 static int __init x86_nopcid_setup(char *s) 185 { 186 /* nopcid doesn't accept parameters */ 187 if (s) 188 return -EINVAL; 189 190 /* do not emit a message if the feature is not present */ 191 if (!boot_cpu_has(X86_FEATURE_PCID)) 192 return 0; 193 194 setup_clear_cpu_cap(X86_FEATURE_PCID); 195 pr_info("nopcid: PCID feature disabled\n"); 196 return 0; 197 } 198 early_param("nopcid", x86_nopcid_setup); 199 #endif 200 201 static int __init x86_noinvpcid_setup(char *s) 202 { 203 /* noinvpcid doesn't accept parameters */ 204 if (s) 205 return -EINVAL; 206 207 /* do not emit a message if the feature is not present */ 208 if (!boot_cpu_has(X86_FEATURE_INVPCID)) 209 return 0; 210 211 setup_clear_cpu_cap(X86_FEATURE_INVPCID); 212 pr_info("noinvpcid: INVPCID feature disabled\n"); 213 return 0; 214 } 215 early_param("noinvpcid", x86_noinvpcid_setup); 216 217 #ifdef CONFIG_X86_32 218 static int cachesize_override = -1; 219 static int disable_x86_serial_nr = 1; 220 221 static int __init cachesize_setup(char *str) 222 { 223 get_option(&str, &cachesize_override); 224 return 1; 225 } 226 __setup("cachesize=", cachesize_setup); 227 228 static int __init x86_sep_setup(char *s) 229 { 230 setup_clear_cpu_cap(X86_FEATURE_SEP); 231 return 1; 232 } 233 __setup("nosep", x86_sep_setup); 234 235 /* Standard macro to see if a specific flag is changeable */ 236 static inline int flag_is_changeable_p(u32 flag) 237 { 238 u32 f1, f2; 239 240 /* 241 * Cyrix and IDT cpus allow disabling of CPUID 242 * so the code below may return different results 243 * when it is executed before and after enabling 244 * the CPUID. Add "volatile" to not allow gcc to 245 * optimize the subsequent calls to this function. 246 */ 247 asm volatile ("pushfl \n\t" 248 "pushfl \n\t" 249 "popl %0 \n\t" 250 "movl %0, %1 \n\t" 251 "xorl %2, %0 \n\t" 252 "pushl %0 \n\t" 253 "popfl \n\t" 254 "pushfl \n\t" 255 "popl %0 \n\t" 256 "popfl \n\t" 257 258 : "=&r" (f1), "=&r" (f2) 259 : "ir" (flag)); 260 261 return ((f1^f2) & flag) != 0; 262 } 263 264 /* Probe for the CPUID instruction */ 265 int have_cpuid_p(void) 266 { 267 return flag_is_changeable_p(X86_EFLAGS_ID); 268 } 269 270 static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 271 { 272 unsigned long lo, hi; 273 274 if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr) 275 return; 276 277 /* Disable processor serial number: */ 278 279 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 280 lo |= 0x200000; 281 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 282 283 pr_notice("CPU serial number disabled.\n"); 284 clear_cpu_cap(c, X86_FEATURE_PN); 285 286 /* Disabling the serial number may affect the cpuid level */ 287 c->cpuid_level = cpuid_eax(0); 288 } 289 290 static int __init x86_serial_nr_setup(char *s) 291 { 292 disable_x86_serial_nr = 0; 293 return 1; 294 } 295 __setup("serialnumber", x86_serial_nr_setup); 296 #else 297 static inline int flag_is_changeable_p(u32 flag) 298 { 299 return 1; 300 } 301 static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 302 { 303 } 304 #endif 305 306 static __init int setup_disable_smep(char *arg) 307 { 308 setup_clear_cpu_cap(X86_FEATURE_SMEP); 309 /* Check for things that depend on SMEP being enabled: */ 310 check_mpx_erratum(&boot_cpu_data); 311 return 1; 312 } 313 __setup("nosmep", setup_disable_smep); 314 315 static __always_inline void setup_smep(struct cpuinfo_x86 *c) 316 { 317 if (cpu_has(c, X86_FEATURE_SMEP)) 318 cr4_set_bits(X86_CR4_SMEP); 319 } 320 321 static __init int setup_disable_smap(char *arg) 322 { 323 setup_clear_cpu_cap(X86_FEATURE_SMAP); 324 return 1; 325 } 326 __setup("nosmap", setup_disable_smap); 327 328 static __always_inline void setup_smap(struct cpuinfo_x86 *c) 329 { 330 unsigned long eflags = native_save_fl(); 331 332 /* This should have been cleared long ago */ 333 BUG_ON(eflags & X86_EFLAGS_AC); 334 335 if (cpu_has(c, X86_FEATURE_SMAP)) { 336 #ifdef CONFIG_X86_SMAP 337 cr4_set_bits(X86_CR4_SMAP); 338 #else 339 cr4_clear_bits(X86_CR4_SMAP); 340 #endif 341 } 342 } 343 344 static __always_inline void setup_umip(struct cpuinfo_x86 *c) 345 { 346 /* Check the boot processor, plus build option for UMIP. */ 347 if (!cpu_feature_enabled(X86_FEATURE_UMIP)) 348 goto out; 349 350 /* Check the current processor's cpuid bits. */ 351 if (!cpu_has(c, X86_FEATURE_UMIP)) 352 goto out; 353 354 cr4_set_bits(X86_CR4_UMIP); 355 356 pr_info_once("x86/cpu: User Mode Instruction Prevention (UMIP) activated\n"); 357 358 return; 359 360 out: 361 /* 362 * Make sure UMIP is disabled in case it was enabled in a 363 * previous boot (e.g., via kexec). 364 */ 365 cr4_clear_bits(X86_CR4_UMIP); 366 } 367 368 /* 369 * Protection Keys are not available in 32-bit mode. 370 */ 371 static bool pku_disabled; 372 373 static __always_inline void setup_pku(struct cpuinfo_x86 *c) 374 { 375 /* check the boot processor, plus compile options for PKU: */ 376 if (!cpu_feature_enabled(X86_FEATURE_PKU)) 377 return; 378 /* checks the actual processor's cpuid bits: */ 379 if (!cpu_has(c, X86_FEATURE_PKU)) 380 return; 381 if (pku_disabled) 382 return; 383 384 cr4_set_bits(X86_CR4_PKE); 385 /* 386 * Seting X86_CR4_PKE will cause the X86_FEATURE_OSPKE 387 * cpuid bit to be set. We need to ensure that we 388 * update that bit in this CPU's "cpu_info". 389 */ 390 get_cpu_cap(c); 391 } 392 393 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 394 static __init int setup_disable_pku(char *arg) 395 { 396 /* 397 * Do not clear the X86_FEATURE_PKU bit. All of the 398 * runtime checks are against OSPKE so clearing the 399 * bit does nothing. 400 * 401 * This way, we will see "pku" in cpuinfo, but not 402 * "ospke", which is exactly what we want. It shows 403 * that the CPU has PKU, but the OS has not enabled it. 404 * This happens to be exactly how a system would look 405 * if we disabled the config option. 406 */ 407 pr_info("x86: 'nopku' specified, disabling Memory Protection Keys\n"); 408 pku_disabled = true; 409 return 1; 410 } 411 __setup("nopku", setup_disable_pku); 412 #endif /* CONFIG_X86_64 */ 413 414 /* 415 * Some CPU features depend on higher CPUID levels, which may not always 416 * be available due to CPUID level capping or broken virtualization 417 * software. Add those features to this table to auto-disable them. 418 */ 419 struct cpuid_dependent_feature { 420 u32 feature; 421 u32 level; 422 }; 423 424 static const struct cpuid_dependent_feature 425 cpuid_dependent_features[] = { 426 { X86_FEATURE_MWAIT, 0x00000005 }, 427 { X86_FEATURE_DCA, 0x00000009 }, 428 { X86_FEATURE_XSAVE, 0x0000000d }, 429 { 0, 0 } 430 }; 431 432 static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) 433 { 434 const struct cpuid_dependent_feature *df; 435 436 for (df = cpuid_dependent_features; df->feature; df++) { 437 438 if (!cpu_has(c, df->feature)) 439 continue; 440 /* 441 * Note: cpuid_level is set to -1 if unavailable, but 442 * extended_extended_level is set to 0 if unavailable 443 * and the legitimate extended levels are all negative 444 * when signed; hence the weird messing around with 445 * signs here... 446 */ 447 if (!((s32)df->level < 0 ? 448 (u32)df->level > (u32)c->extended_cpuid_level : 449 (s32)df->level > (s32)c->cpuid_level)) 450 continue; 451 452 clear_cpu_cap(c, df->feature); 453 if (!warn) 454 continue; 455 456 pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n", 457 x86_cap_flag(df->feature), df->level); 458 } 459 } 460 461 /* 462 * Naming convention should be: <Name> [(<Codename>)] 463 * This table only is used unless init_<vendor>() below doesn't set it; 464 * in particular, if CPUID levels 0x80000002..4 are supported, this 465 * isn't used 466 */ 467 468 /* Look up CPU names by table lookup. */ 469 static const char *table_lookup_model(struct cpuinfo_x86 *c) 470 { 471 #ifdef CONFIG_X86_32 472 const struct legacy_cpu_model_info *info; 473 474 if (c->x86_model >= 16) 475 return NULL; /* Range check */ 476 477 if (!this_cpu) 478 return NULL; 479 480 info = this_cpu->legacy_models; 481 482 while (info->family) { 483 if (info->family == c->x86) 484 return info->model_names[c->x86_model]; 485 info++; 486 } 487 #endif 488 return NULL; /* Not found */ 489 } 490 491 __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS]; 492 __u32 cpu_caps_set[NCAPINTS + NBUGINTS]; 493 494 void load_percpu_segment(int cpu) 495 { 496 #ifdef CONFIG_X86_32 497 loadsegment(fs, __KERNEL_PERCPU); 498 #else 499 __loadsegment_simple(gs, 0); 500 wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu)); 501 #endif 502 load_stack_canary_segment(); 503 } 504 505 #ifdef CONFIG_X86_32 506 /* The 32-bit entry code needs to find cpu_entry_area. */ 507 DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); 508 #endif 509 510 /* Load the original GDT from the per-cpu structure */ 511 void load_direct_gdt(int cpu) 512 { 513 struct desc_ptr gdt_descr; 514 515 gdt_descr.address = (long)get_cpu_gdt_rw(cpu); 516 gdt_descr.size = GDT_SIZE - 1; 517 load_gdt(&gdt_descr); 518 } 519 EXPORT_SYMBOL_GPL(load_direct_gdt); 520 521 /* Load a fixmap remapping of the per-cpu GDT */ 522 void load_fixmap_gdt(int cpu) 523 { 524 struct desc_ptr gdt_descr; 525 526 gdt_descr.address = (long)get_cpu_gdt_ro(cpu); 527 gdt_descr.size = GDT_SIZE - 1; 528 load_gdt(&gdt_descr); 529 } 530 EXPORT_SYMBOL_GPL(load_fixmap_gdt); 531 532 /* 533 * Current gdt points %fs at the "master" per-cpu area: after this, 534 * it's on the real one. 535 */ 536 void switch_to_new_gdt(int cpu) 537 { 538 /* Load the original GDT */ 539 load_direct_gdt(cpu); 540 /* Reload the per-cpu base */ 541 load_percpu_segment(cpu); 542 } 543 544 static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; 545 546 static void get_model_name(struct cpuinfo_x86 *c) 547 { 548 unsigned int *v; 549 char *p, *q, *s; 550 551 if (c->extended_cpuid_level < 0x80000004) 552 return; 553 554 v = (unsigned int *)c->x86_model_id; 555 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); 556 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); 557 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); 558 c->x86_model_id[48] = 0; 559 560 /* Trim whitespace */ 561 p = q = s = &c->x86_model_id[0]; 562 563 while (*p == ' ') 564 p++; 565 566 while (*p) { 567 /* Note the last non-whitespace index */ 568 if (!isspace(*p)) 569 s = q; 570 571 *q++ = *p++; 572 } 573 574 *(s + 1) = '\0'; 575 } 576 577 void detect_num_cpu_cores(struct cpuinfo_x86 *c) 578 { 579 unsigned int eax, ebx, ecx, edx; 580 581 c->x86_max_cores = 1; 582 if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4) 583 return; 584 585 cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); 586 if (eax & 0x1f) 587 c->x86_max_cores = (eax >> 26) + 1; 588 } 589 590 void cpu_detect_cache_sizes(struct cpuinfo_x86 *c) 591 { 592 unsigned int n, dummy, ebx, ecx, edx, l2size; 593 594 n = c->extended_cpuid_level; 595 596 if (n >= 0x80000005) { 597 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); 598 c->x86_cache_size = (ecx>>24) + (edx>>24); 599 #ifdef CONFIG_X86_64 600 /* On K8 L1 TLB is inclusive, so don't count it */ 601 c->x86_tlbsize = 0; 602 #endif 603 } 604 605 if (n < 0x80000006) /* Some chips just has a large L1. */ 606 return; 607 608 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); 609 l2size = ecx >> 16; 610 611 #ifdef CONFIG_X86_64 612 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); 613 #else 614 /* do processor-specific cache resizing */ 615 if (this_cpu->legacy_cache_size) 616 l2size = this_cpu->legacy_cache_size(c, l2size); 617 618 /* Allow user to override all this if necessary. */ 619 if (cachesize_override != -1) 620 l2size = cachesize_override; 621 622 if (l2size == 0) 623 return; /* Again, no L2 cache is possible */ 624 #endif 625 626 c->x86_cache_size = l2size; 627 } 628 629 u16 __read_mostly tlb_lli_4k[NR_INFO]; 630 u16 __read_mostly tlb_lli_2m[NR_INFO]; 631 u16 __read_mostly tlb_lli_4m[NR_INFO]; 632 u16 __read_mostly tlb_lld_4k[NR_INFO]; 633 u16 __read_mostly tlb_lld_2m[NR_INFO]; 634 u16 __read_mostly tlb_lld_4m[NR_INFO]; 635 u16 __read_mostly tlb_lld_1g[NR_INFO]; 636 637 static void cpu_detect_tlb(struct cpuinfo_x86 *c) 638 { 639 if (this_cpu->c_detect_tlb) 640 this_cpu->c_detect_tlb(c); 641 642 pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n", 643 tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES], 644 tlb_lli_4m[ENTRIES]); 645 646 pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n", 647 tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES], 648 tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]); 649 } 650 651 int detect_ht_early(struct cpuinfo_x86 *c) 652 { 653 #ifdef CONFIG_SMP 654 u32 eax, ebx, ecx, edx; 655 656 if (!cpu_has(c, X86_FEATURE_HT)) 657 return -1; 658 659 if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) 660 return -1; 661 662 if (cpu_has(c, X86_FEATURE_XTOPOLOGY)) 663 return -1; 664 665 cpuid(1, &eax, &ebx, &ecx, &edx); 666 667 smp_num_siblings = (ebx & 0xff0000) >> 16; 668 if (smp_num_siblings == 1) 669 pr_info_once("CPU0: Hyper-Threading is disabled\n"); 670 #endif 671 return 0; 672 } 673 674 void detect_ht(struct cpuinfo_x86 *c) 675 { 676 #ifdef CONFIG_SMP 677 int index_msb, core_bits; 678 679 if (detect_ht_early(c) < 0) 680 return; 681 682 index_msb = get_count_order(smp_num_siblings); 683 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); 684 685 smp_num_siblings = smp_num_siblings / c->x86_max_cores; 686 687 index_msb = get_count_order(smp_num_siblings); 688 689 core_bits = get_count_order(c->x86_max_cores); 690 691 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & 692 ((1 << core_bits) - 1); 693 #endif 694 } 695 696 static void get_cpu_vendor(struct cpuinfo_x86 *c) 697 { 698 char *v = c->x86_vendor_id; 699 int i; 700 701 for (i = 0; i < X86_VENDOR_NUM; i++) { 702 if (!cpu_devs[i]) 703 break; 704 705 if (!strcmp(v, cpu_devs[i]->c_ident[0]) || 706 (cpu_devs[i]->c_ident[1] && 707 !strcmp(v, cpu_devs[i]->c_ident[1]))) { 708 709 this_cpu = cpu_devs[i]; 710 c->x86_vendor = this_cpu->c_x86_vendor; 711 return; 712 } 713 } 714 715 pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \ 716 "CPU: Your system may be unstable.\n", v); 717 718 c->x86_vendor = X86_VENDOR_UNKNOWN; 719 this_cpu = &default_cpu; 720 } 721 722 void cpu_detect(struct cpuinfo_x86 *c) 723 { 724 /* Get vendor name */ 725 cpuid(0x00000000, (unsigned int *)&c->cpuid_level, 726 (unsigned int *)&c->x86_vendor_id[0], 727 (unsigned int *)&c->x86_vendor_id[8], 728 (unsigned int *)&c->x86_vendor_id[4]); 729 730 c->x86 = 4; 731 /* Intel-defined flags: level 0x00000001 */ 732 if (c->cpuid_level >= 0x00000001) { 733 u32 junk, tfms, cap0, misc; 734 735 cpuid(0x00000001, &tfms, &misc, &junk, &cap0); 736 c->x86 = x86_family(tfms); 737 c->x86_model = x86_model(tfms); 738 c->x86_stepping = x86_stepping(tfms); 739 740 if (cap0 & (1<<19)) { 741 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; 742 c->x86_cache_alignment = c->x86_clflush_size; 743 } 744 } 745 } 746 747 static void apply_forced_caps(struct cpuinfo_x86 *c) 748 { 749 int i; 750 751 for (i = 0; i < NCAPINTS + NBUGINTS; i++) { 752 c->x86_capability[i] &= ~cpu_caps_cleared[i]; 753 c->x86_capability[i] |= cpu_caps_set[i]; 754 } 755 } 756 757 static void init_speculation_control(struct cpuinfo_x86 *c) 758 { 759 /* 760 * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support, 761 * and they also have a different bit for STIBP support. Also, 762 * a hypervisor might have set the individual AMD bits even on 763 * Intel CPUs, for finer-grained selection of what's available. 764 */ 765 if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) { 766 set_cpu_cap(c, X86_FEATURE_IBRS); 767 set_cpu_cap(c, X86_FEATURE_IBPB); 768 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); 769 } 770 771 if (cpu_has(c, X86_FEATURE_INTEL_STIBP)) 772 set_cpu_cap(c, X86_FEATURE_STIBP); 773 774 if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) || 775 cpu_has(c, X86_FEATURE_VIRT_SSBD)) 776 set_cpu_cap(c, X86_FEATURE_SSBD); 777 778 if (cpu_has(c, X86_FEATURE_AMD_IBRS)) { 779 set_cpu_cap(c, X86_FEATURE_IBRS); 780 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); 781 } 782 783 if (cpu_has(c, X86_FEATURE_AMD_IBPB)) 784 set_cpu_cap(c, X86_FEATURE_IBPB); 785 786 if (cpu_has(c, X86_FEATURE_AMD_STIBP)) { 787 set_cpu_cap(c, X86_FEATURE_STIBP); 788 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); 789 } 790 791 if (cpu_has(c, X86_FEATURE_AMD_SSBD)) { 792 set_cpu_cap(c, X86_FEATURE_SSBD); 793 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); 794 clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD); 795 } 796 } 797 798 void get_cpu_cap(struct cpuinfo_x86 *c) 799 { 800 u32 eax, ebx, ecx, edx; 801 802 /* Intel-defined flags: level 0x00000001 */ 803 if (c->cpuid_level >= 0x00000001) { 804 cpuid(0x00000001, &eax, &ebx, &ecx, &edx); 805 806 c->x86_capability[CPUID_1_ECX] = ecx; 807 c->x86_capability[CPUID_1_EDX] = edx; 808 } 809 810 /* Thermal and Power Management Leaf: level 0x00000006 (eax) */ 811 if (c->cpuid_level >= 0x00000006) 812 c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006); 813 814 /* Additional Intel-defined flags: level 0x00000007 */ 815 if (c->cpuid_level >= 0x00000007) { 816 cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); 817 c->x86_capability[CPUID_7_0_EBX] = ebx; 818 c->x86_capability[CPUID_7_ECX] = ecx; 819 c->x86_capability[CPUID_7_EDX] = edx; 820 } 821 822 /* Extended state features: level 0x0000000d */ 823 if (c->cpuid_level >= 0x0000000d) { 824 cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx); 825 826 c->x86_capability[CPUID_D_1_EAX] = eax; 827 } 828 829 /* Additional Intel-defined flags: level 0x0000000F */ 830 if (c->cpuid_level >= 0x0000000F) { 831 832 /* QoS sub-leaf, EAX=0Fh, ECX=0 */ 833 cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx); 834 c->x86_capability[CPUID_F_0_EDX] = edx; 835 836 if (cpu_has(c, X86_FEATURE_CQM_LLC)) { 837 /* will be overridden if occupancy monitoring exists */ 838 c->x86_cache_max_rmid = ebx; 839 840 /* QoS sub-leaf, EAX=0Fh, ECX=1 */ 841 cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx); 842 c->x86_capability[CPUID_F_1_EDX] = edx; 843 844 if ((cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) || 845 ((cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL)) || 846 (cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)))) { 847 c->x86_cache_max_rmid = ecx; 848 c->x86_cache_occ_scale = ebx; 849 } 850 } else { 851 c->x86_cache_max_rmid = -1; 852 c->x86_cache_occ_scale = -1; 853 } 854 } 855 856 /* AMD-defined flags: level 0x80000001 */ 857 eax = cpuid_eax(0x80000000); 858 c->extended_cpuid_level = eax; 859 860 if ((eax & 0xffff0000) == 0x80000000) { 861 if (eax >= 0x80000001) { 862 cpuid(0x80000001, &eax, &ebx, &ecx, &edx); 863 864 c->x86_capability[CPUID_8000_0001_ECX] = ecx; 865 c->x86_capability[CPUID_8000_0001_EDX] = edx; 866 } 867 } 868 869 if (c->extended_cpuid_level >= 0x80000007) { 870 cpuid(0x80000007, &eax, &ebx, &ecx, &edx); 871 872 c->x86_capability[CPUID_8000_0007_EBX] = ebx; 873 c->x86_power = edx; 874 } 875 876 if (c->extended_cpuid_level >= 0x80000008) { 877 cpuid(0x80000008, &eax, &ebx, &ecx, &edx); 878 c->x86_capability[CPUID_8000_0008_EBX] = ebx; 879 } 880 881 if (c->extended_cpuid_level >= 0x8000000a) 882 c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a); 883 884 init_scattered_cpuid_features(c); 885 init_speculation_control(c); 886 887 /* 888 * Clear/Set all flags overridden by options, after probe. 889 * This needs to happen each time we re-probe, which may happen 890 * several times during CPU initialization. 891 */ 892 apply_forced_caps(c); 893 } 894 895 void get_cpu_address_sizes(struct cpuinfo_x86 *c) 896 { 897 u32 eax, ebx, ecx, edx; 898 899 if (c->extended_cpuid_level >= 0x80000008) { 900 cpuid(0x80000008, &eax, &ebx, &ecx, &edx); 901 902 c->x86_virt_bits = (eax >> 8) & 0xff; 903 c->x86_phys_bits = eax & 0xff; 904 } 905 #ifdef CONFIG_X86_32 906 else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) 907 c->x86_phys_bits = 36; 908 #endif 909 c->x86_cache_bits = c->x86_phys_bits; 910 } 911 912 static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) 913 { 914 #ifdef CONFIG_X86_32 915 int i; 916 917 /* 918 * First of all, decide if this is a 486 or higher 919 * It's a 486 if we can modify the AC flag 920 */ 921 if (flag_is_changeable_p(X86_EFLAGS_AC)) 922 c->x86 = 4; 923 else 924 c->x86 = 3; 925 926 for (i = 0; i < X86_VENDOR_NUM; i++) 927 if (cpu_devs[i] && cpu_devs[i]->c_identify) { 928 c->x86_vendor_id[0] = 0; 929 cpu_devs[i]->c_identify(c); 930 if (c->x86_vendor_id[0]) { 931 get_cpu_vendor(c); 932 break; 933 } 934 } 935 #endif 936 } 937 938 static const __initconst struct x86_cpu_id cpu_no_speculation[] = { 939 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL, X86_FEATURE_ANY }, 940 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_TABLET, X86_FEATURE_ANY }, 941 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL_MID, X86_FEATURE_ANY }, 942 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_MID, X86_FEATURE_ANY }, 943 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL, X86_FEATURE_ANY }, 944 { X86_VENDOR_CENTAUR, 5 }, 945 { X86_VENDOR_INTEL, 5 }, 946 { X86_VENDOR_NSC, 5 }, 947 { X86_VENDOR_ANY, 4 }, 948 {} 949 }; 950 951 static const __initconst struct x86_cpu_id cpu_no_meltdown[] = { 952 { X86_VENDOR_AMD }, 953 { X86_VENDOR_HYGON }, 954 {} 955 }; 956 957 /* Only list CPUs which speculate but are non susceptible to SSB */ 958 static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = { 959 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT }, 960 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT }, 961 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_X }, 962 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_MID }, 963 { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH }, 964 { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL }, 965 { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM }, 966 { X86_VENDOR_AMD, 0x12, }, 967 { X86_VENDOR_AMD, 0x11, }, 968 { X86_VENDOR_AMD, 0x10, }, 969 { X86_VENDOR_AMD, 0xf, }, 970 {} 971 }; 972 973 static const __initconst struct x86_cpu_id cpu_no_l1tf[] = { 974 /* in addition to cpu_no_speculation */ 975 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT }, 976 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_X }, 977 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT }, 978 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_MID }, 979 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT_MID }, 980 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT }, 981 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_X }, 982 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_PLUS }, 983 { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL }, 984 { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM }, 985 {} 986 }; 987 988 static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) 989 { 990 u64 ia32_cap = 0; 991 992 if (x86_match_cpu(cpu_no_speculation)) 993 return; 994 995 setup_force_cpu_bug(X86_BUG_SPECTRE_V1); 996 setup_force_cpu_bug(X86_BUG_SPECTRE_V2); 997 998 if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES)) 999 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); 1000 1001 if (!x86_match_cpu(cpu_no_spec_store_bypass) && 1002 !(ia32_cap & ARCH_CAP_SSB_NO) && 1003 !cpu_has(c, X86_FEATURE_AMD_SSB_NO)) 1004 setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); 1005 1006 if (ia32_cap & ARCH_CAP_IBRS_ALL) 1007 setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED); 1008 1009 if (x86_match_cpu(cpu_no_meltdown)) 1010 return; 1011 1012 /* Rogue Data Cache Load? No! */ 1013 if (ia32_cap & ARCH_CAP_RDCL_NO) 1014 return; 1015 1016 setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); 1017 1018 if (x86_match_cpu(cpu_no_l1tf)) 1019 return; 1020 1021 setup_force_cpu_bug(X86_BUG_L1TF); 1022 } 1023 1024 /* 1025 * The NOPL instruction is supposed to exist on all CPUs of family >= 6; 1026 * unfortunately, that's not true in practice because of early VIA 1027 * chips and (more importantly) broken virtualizers that are not easy 1028 * to detect. In the latter case it doesn't even *fail* reliably, so 1029 * probing for it doesn't even work. Disable it completely on 32-bit 1030 * unless we can find a reliable way to detect all the broken cases. 1031 * Enable it explicitly on 64-bit for non-constant inputs of cpu_has(). 1032 */ 1033 static void detect_nopl(void) 1034 { 1035 #ifdef CONFIG_X86_32 1036 setup_clear_cpu_cap(X86_FEATURE_NOPL); 1037 #else 1038 setup_force_cpu_cap(X86_FEATURE_NOPL); 1039 #endif 1040 } 1041 1042 /* 1043 * Do minimum CPU detection early. 1044 * Fields really needed: vendor, cpuid_level, family, model, mask, 1045 * cache alignment. 1046 * The others are not touched to avoid unwanted side effects. 1047 * 1048 * WARNING: this function is only called on the boot CPU. Don't add code 1049 * here that is supposed to run on all CPUs. 1050 */ 1051 static void __init early_identify_cpu(struct cpuinfo_x86 *c) 1052 { 1053 #ifdef CONFIG_X86_64 1054 c->x86_clflush_size = 64; 1055 c->x86_phys_bits = 36; 1056 c->x86_virt_bits = 48; 1057 #else 1058 c->x86_clflush_size = 32; 1059 c->x86_phys_bits = 32; 1060 c->x86_virt_bits = 32; 1061 #endif 1062 c->x86_cache_alignment = c->x86_clflush_size; 1063 1064 memset(&c->x86_capability, 0, sizeof(c->x86_capability)); 1065 c->extended_cpuid_level = 0; 1066 1067 if (!have_cpuid_p()) 1068 identify_cpu_without_cpuid(c); 1069 1070 /* cyrix could have cpuid enabled via c_identify()*/ 1071 if (have_cpuid_p()) { 1072 cpu_detect(c); 1073 get_cpu_vendor(c); 1074 get_cpu_cap(c); 1075 get_cpu_address_sizes(c); 1076 setup_force_cpu_cap(X86_FEATURE_CPUID); 1077 1078 if (this_cpu->c_early_init) 1079 this_cpu->c_early_init(c); 1080 1081 c->cpu_index = 0; 1082 filter_cpuid_features(c, false); 1083 1084 if (this_cpu->c_bsp_init) 1085 this_cpu->c_bsp_init(c); 1086 } else { 1087 setup_clear_cpu_cap(X86_FEATURE_CPUID); 1088 } 1089 1090 setup_force_cpu_cap(X86_FEATURE_ALWAYS); 1091 1092 cpu_set_bug_bits(c); 1093 1094 fpu__init_system(c); 1095 1096 #ifdef CONFIG_X86_32 1097 /* 1098 * Regardless of whether PCID is enumerated, the SDM says 1099 * that it can't be enabled in 32-bit mode. 1100 */ 1101 setup_clear_cpu_cap(X86_FEATURE_PCID); 1102 #endif 1103 1104 /* 1105 * Later in the boot process pgtable_l5_enabled() relies on 1106 * cpu_feature_enabled(X86_FEATURE_LA57). If 5-level paging is not 1107 * enabled by this point we need to clear the feature bit to avoid 1108 * false-positives at the later stage. 1109 * 1110 * pgtable_l5_enabled() can be false here for several reasons: 1111 * - 5-level paging is disabled compile-time; 1112 * - it's 32-bit kernel; 1113 * - machine doesn't support 5-level paging; 1114 * - user specified 'no5lvl' in kernel command line. 1115 */ 1116 if (!pgtable_l5_enabled()) 1117 setup_clear_cpu_cap(X86_FEATURE_LA57); 1118 1119 detect_nopl(); 1120 } 1121 1122 void __init early_cpu_init(void) 1123 { 1124 const struct cpu_dev *const *cdev; 1125 int count = 0; 1126 1127 #ifdef CONFIG_PROCESSOR_SELECT 1128 pr_info("KERNEL supported cpus:\n"); 1129 #endif 1130 1131 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { 1132 const struct cpu_dev *cpudev = *cdev; 1133 1134 if (count >= X86_VENDOR_NUM) 1135 break; 1136 cpu_devs[count] = cpudev; 1137 count++; 1138 1139 #ifdef CONFIG_PROCESSOR_SELECT 1140 { 1141 unsigned int j; 1142 1143 for (j = 0; j < 2; j++) { 1144 if (!cpudev->c_ident[j]) 1145 continue; 1146 pr_info(" %s %s\n", cpudev->c_vendor, 1147 cpudev->c_ident[j]); 1148 } 1149 } 1150 #endif 1151 } 1152 early_identify_cpu(&boot_cpu_data); 1153 } 1154 1155 static void detect_null_seg_behavior(struct cpuinfo_x86 *c) 1156 { 1157 #ifdef CONFIG_X86_64 1158 /* 1159 * Empirically, writing zero to a segment selector on AMD does 1160 * not clear the base, whereas writing zero to a segment 1161 * selector on Intel does clear the base. Intel's behavior 1162 * allows slightly faster context switches in the common case 1163 * where GS is unused by the prev and next threads. 1164 * 1165 * Since neither vendor documents this anywhere that I can see, 1166 * detect it directly instead of hardcoding the choice by 1167 * vendor. 1168 * 1169 * I've designated AMD's behavior as the "bug" because it's 1170 * counterintuitive and less friendly. 1171 */ 1172 1173 unsigned long old_base, tmp; 1174 rdmsrl(MSR_FS_BASE, old_base); 1175 wrmsrl(MSR_FS_BASE, 1); 1176 loadsegment(fs, 0); 1177 rdmsrl(MSR_FS_BASE, tmp); 1178 if (tmp != 0) 1179 set_cpu_bug(c, X86_BUG_NULL_SEG); 1180 wrmsrl(MSR_FS_BASE, old_base); 1181 #endif 1182 } 1183 1184 static void generic_identify(struct cpuinfo_x86 *c) 1185 { 1186 c->extended_cpuid_level = 0; 1187 1188 if (!have_cpuid_p()) 1189 identify_cpu_without_cpuid(c); 1190 1191 /* cyrix could have cpuid enabled via c_identify()*/ 1192 if (!have_cpuid_p()) 1193 return; 1194 1195 cpu_detect(c); 1196 1197 get_cpu_vendor(c); 1198 1199 get_cpu_cap(c); 1200 1201 get_cpu_address_sizes(c); 1202 1203 if (c->cpuid_level >= 0x00000001) { 1204 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; 1205 #ifdef CONFIG_X86_32 1206 # ifdef CONFIG_SMP 1207 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); 1208 # else 1209 c->apicid = c->initial_apicid; 1210 # endif 1211 #endif 1212 c->phys_proc_id = c->initial_apicid; 1213 } 1214 1215 get_model_name(c); /* Default name */ 1216 1217 detect_null_seg_behavior(c); 1218 1219 /* 1220 * ESPFIX is a strange bug. All real CPUs have it. Paravirt 1221 * systems that run Linux at CPL > 0 may or may not have the 1222 * issue, but, even if they have the issue, there's absolutely 1223 * nothing we can do about it because we can't use the real IRET 1224 * instruction. 1225 * 1226 * NB: For the time being, only 32-bit kernels support 1227 * X86_BUG_ESPFIX as such. 64-bit kernels directly choose 1228 * whether to apply espfix using paravirt hooks. If any 1229 * non-paravirt system ever shows up that does *not* have the 1230 * ESPFIX issue, we can change this. 1231 */ 1232 #ifdef CONFIG_X86_32 1233 # ifdef CONFIG_PARAVIRT_XXL 1234 do { 1235 extern void native_iret(void); 1236 if (pv_ops.cpu.iret == native_iret) 1237 set_cpu_bug(c, X86_BUG_ESPFIX); 1238 } while (0); 1239 # else 1240 set_cpu_bug(c, X86_BUG_ESPFIX); 1241 # endif 1242 #endif 1243 } 1244 1245 static void x86_init_cache_qos(struct cpuinfo_x86 *c) 1246 { 1247 /* 1248 * The heavy lifting of max_rmid and cache_occ_scale are handled 1249 * in get_cpu_cap(). Here we just set the max_rmid for the boot_cpu 1250 * in case CQM bits really aren't there in this CPU. 1251 */ 1252 if (c != &boot_cpu_data) { 1253 boot_cpu_data.x86_cache_max_rmid = 1254 min(boot_cpu_data.x86_cache_max_rmid, 1255 c->x86_cache_max_rmid); 1256 } 1257 } 1258 1259 /* 1260 * Validate that ACPI/mptables have the same information about the 1261 * effective APIC id and update the package map. 1262 */ 1263 static void validate_apic_and_package_id(struct cpuinfo_x86 *c) 1264 { 1265 #ifdef CONFIG_SMP 1266 unsigned int apicid, cpu = smp_processor_id(); 1267 1268 apicid = apic->cpu_present_to_apicid(cpu); 1269 1270 if (apicid != c->apicid) { 1271 pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x APIC: %x\n", 1272 cpu, apicid, c->initial_apicid); 1273 } 1274 BUG_ON(topology_update_package_map(c->phys_proc_id, cpu)); 1275 #else 1276 c->logical_proc_id = 0; 1277 #endif 1278 } 1279 1280 /* 1281 * This does the hard work of actually picking apart the CPU stuff... 1282 */ 1283 static void identify_cpu(struct cpuinfo_x86 *c) 1284 { 1285 int i; 1286 1287 c->loops_per_jiffy = loops_per_jiffy; 1288 c->x86_cache_size = 0; 1289 c->x86_vendor = X86_VENDOR_UNKNOWN; 1290 c->x86_model = c->x86_stepping = 0; /* So far unknown... */ 1291 c->x86_vendor_id[0] = '\0'; /* Unset */ 1292 c->x86_model_id[0] = '\0'; /* Unset */ 1293 c->x86_max_cores = 1; 1294 c->x86_coreid_bits = 0; 1295 c->cu_id = 0xff; 1296 #ifdef CONFIG_X86_64 1297 c->x86_clflush_size = 64; 1298 c->x86_phys_bits = 36; 1299 c->x86_virt_bits = 48; 1300 #else 1301 c->cpuid_level = -1; /* CPUID not detected */ 1302 c->x86_clflush_size = 32; 1303 c->x86_phys_bits = 32; 1304 c->x86_virt_bits = 32; 1305 #endif 1306 c->x86_cache_alignment = c->x86_clflush_size; 1307 memset(&c->x86_capability, 0, sizeof(c->x86_capability)); 1308 1309 generic_identify(c); 1310 1311 if (this_cpu->c_identify) 1312 this_cpu->c_identify(c); 1313 1314 /* Clear/Set all flags overridden by options, after probe */ 1315 apply_forced_caps(c); 1316 1317 #ifdef CONFIG_X86_64 1318 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); 1319 #endif 1320 1321 /* 1322 * Vendor-specific initialization. In this section we 1323 * canonicalize the feature flags, meaning if there are 1324 * features a certain CPU supports which CPUID doesn't 1325 * tell us, CPUID claiming incorrect flags, or other bugs, 1326 * we handle them here. 1327 * 1328 * At the end of this section, c->x86_capability better 1329 * indicate the features this CPU genuinely supports! 1330 */ 1331 if (this_cpu->c_init) 1332 this_cpu->c_init(c); 1333 1334 /* Disable the PN if appropriate */ 1335 squash_the_stupid_serial_number(c); 1336 1337 /* Set up SMEP/SMAP/UMIP */ 1338 setup_smep(c); 1339 setup_smap(c); 1340 setup_umip(c); 1341 1342 /* 1343 * The vendor-specific functions might have changed features. 1344 * Now we do "generic changes." 1345 */ 1346 1347 /* Filter out anything that depends on CPUID levels we don't have */ 1348 filter_cpuid_features(c, true); 1349 1350 /* If the model name is still unset, do table lookup. */ 1351 if (!c->x86_model_id[0]) { 1352 const char *p; 1353 p = table_lookup_model(c); 1354 if (p) 1355 strcpy(c->x86_model_id, p); 1356 else 1357 /* Last resort... */ 1358 sprintf(c->x86_model_id, "%02x/%02x", 1359 c->x86, c->x86_model); 1360 } 1361 1362 #ifdef CONFIG_X86_64 1363 detect_ht(c); 1364 #endif 1365 1366 x86_init_rdrand(c); 1367 x86_init_cache_qos(c); 1368 setup_pku(c); 1369 1370 /* 1371 * Clear/Set all flags overridden by options, need do it 1372 * before following smp all cpus cap AND. 1373 */ 1374 apply_forced_caps(c); 1375 1376 /* 1377 * On SMP, boot_cpu_data holds the common feature set between 1378 * all CPUs; so make sure that we indicate which features are 1379 * common between the CPUs. The first time this routine gets 1380 * executed, c == &boot_cpu_data. 1381 */ 1382 if (c != &boot_cpu_data) { 1383 /* AND the already accumulated flags with these */ 1384 for (i = 0; i < NCAPINTS; i++) 1385 boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; 1386 1387 /* OR, i.e. replicate the bug flags */ 1388 for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++) 1389 c->x86_capability[i] |= boot_cpu_data.x86_capability[i]; 1390 } 1391 1392 /* Init Machine Check Exception if available. */ 1393 mcheck_cpu_init(c); 1394 1395 select_idle_routine(c); 1396 1397 #ifdef CONFIG_NUMA 1398 numa_add_cpu(smp_processor_id()); 1399 #endif 1400 } 1401 1402 /* 1403 * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions 1404 * on 32-bit kernels: 1405 */ 1406 #ifdef CONFIG_X86_32 1407 void enable_sep_cpu(void) 1408 { 1409 struct tss_struct *tss; 1410 int cpu; 1411 1412 if (!boot_cpu_has(X86_FEATURE_SEP)) 1413 return; 1414 1415 cpu = get_cpu(); 1416 tss = &per_cpu(cpu_tss_rw, cpu); 1417 1418 /* 1419 * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field -- 1420 * see the big comment in struct x86_hw_tss's definition. 1421 */ 1422 1423 tss->x86_tss.ss1 = __KERNEL_CS; 1424 wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0); 1425 wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0); 1426 wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0); 1427 1428 put_cpu(); 1429 } 1430 #endif 1431 1432 void __init identify_boot_cpu(void) 1433 { 1434 identify_cpu(&boot_cpu_data); 1435 #ifdef CONFIG_X86_32 1436 sysenter_setup(); 1437 enable_sep_cpu(); 1438 #endif 1439 cpu_detect_tlb(&boot_cpu_data); 1440 } 1441 1442 void identify_secondary_cpu(struct cpuinfo_x86 *c) 1443 { 1444 BUG_ON(c == &boot_cpu_data); 1445 identify_cpu(c); 1446 #ifdef CONFIG_X86_32 1447 enable_sep_cpu(); 1448 #endif 1449 mtrr_ap_init(); 1450 validate_apic_and_package_id(c); 1451 x86_spec_ctrl_setup_ap(); 1452 } 1453 1454 static __init int setup_noclflush(char *arg) 1455 { 1456 setup_clear_cpu_cap(X86_FEATURE_CLFLUSH); 1457 setup_clear_cpu_cap(X86_FEATURE_CLFLUSHOPT); 1458 return 1; 1459 } 1460 __setup("noclflush", setup_noclflush); 1461 1462 void print_cpu_info(struct cpuinfo_x86 *c) 1463 { 1464 const char *vendor = NULL; 1465 1466 if (c->x86_vendor < X86_VENDOR_NUM) { 1467 vendor = this_cpu->c_vendor; 1468 } else { 1469 if (c->cpuid_level >= 0) 1470 vendor = c->x86_vendor_id; 1471 } 1472 1473 if (vendor && !strstr(c->x86_model_id, vendor)) 1474 pr_cont("%s ", vendor); 1475 1476 if (c->x86_model_id[0]) 1477 pr_cont("%s", c->x86_model_id); 1478 else 1479 pr_cont("%d86", c->x86); 1480 1481 pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model); 1482 1483 if (c->x86_stepping || c->cpuid_level >= 0) 1484 pr_cont(", stepping: 0x%x)\n", c->x86_stepping); 1485 else 1486 pr_cont(")\n"); 1487 } 1488 1489 /* 1490 * clearcpuid= was already parsed in fpu__init_parse_early_param. 1491 * But we need to keep a dummy __setup around otherwise it would 1492 * show up as an environment variable for init. 1493 */ 1494 static __init int setup_clearcpuid(char *arg) 1495 { 1496 return 1; 1497 } 1498 __setup("clearcpuid=", setup_clearcpuid); 1499 1500 #ifdef CONFIG_X86_64 1501 DEFINE_PER_CPU_FIRST(struct fixed_percpu_data, 1502 fixed_percpu_data) __aligned(PAGE_SIZE) __visible; 1503 EXPORT_PER_CPU_SYMBOL_GPL(fixed_percpu_data); 1504 1505 /* 1506 * The following percpu variables are hot. Align current_task to 1507 * cacheline size such that they fall in the same cacheline. 1508 */ 1509 DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = 1510 &init_task; 1511 EXPORT_PER_CPU_SYMBOL(current_task); 1512 1513 DEFINE_PER_CPU(struct irq_stack *, hardirq_stack_ptr); 1514 DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1; 1515 1516 DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; 1517 EXPORT_PER_CPU_SYMBOL(__preempt_count); 1518 1519 /* May not be marked __init: used by software suspend */ 1520 void syscall_init(void) 1521 { 1522 wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS); 1523 wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64); 1524 1525 #ifdef CONFIG_IA32_EMULATION 1526 wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat); 1527 /* 1528 * This only works on Intel CPUs. 1529 * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP. 1530 * This does not cause SYSENTER to jump to the wrong location, because 1531 * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit). 1532 */ 1533 wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); 1534 wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 1535 (unsigned long)(cpu_entry_stack(smp_processor_id()) + 1)); 1536 wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat); 1537 #else 1538 wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret); 1539 wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG); 1540 wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); 1541 wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL); 1542 #endif 1543 1544 /* Flags to clear on syscall */ 1545 wrmsrl(MSR_SYSCALL_MASK, 1546 X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF| 1547 X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT); 1548 } 1549 1550 DEFINE_PER_CPU(int, debug_stack_usage); 1551 DEFINE_PER_CPU(u32, debug_idt_ctr); 1552 1553 void debug_stack_set_zero(void) 1554 { 1555 this_cpu_inc(debug_idt_ctr); 1556 load_current_idt(); 1557 } 1558 NOKPROBE_SYMBOL(debug_stack_set_zero); 1559 1560 void debug_stack_reset(void) 1561 { 1562 if (WARN_ON(!this_cpu_read(debug_idt_ctr))) 1563 return; 1564 if (this_cpu_dec_return(debug_idt_ctr) == 0) 1565 load_current_idt(); 1566 } 1567 NOKPROBE_SYMBOL(debug_stack_reset); 1568 1569 #else /* CONFIG_X86_64 */ 1570 1571 DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; 1572 EXPORT_PER_CPU_SYMBOL(current_task); 1573 DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; 1574 EXPORT_PER_CPU_SYMBOL(__preempt_count); 1575 1576 /* 1577 * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find 1578 * the top of the kernel stack. Use an extra percpu variable to track the 1579 * top of the kernel stack directly. 1580 */ 1581 DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) = 1582 (unsigned long)&init_thread_union + THREAD_SIZE; 1583 EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack); 1584 1585 #ifdef CONFIG_STACKPROTECTOR 1586 DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); 1587 #endif 1588 1589 #endif /* CONFIG_X86_64 */ 1590 1591 /* 1592 * Clear all 6 debug registers: 1593 */ 1594 static void clear_all_debug_regs(void) 1595 { 1596 int i; 1597 1598 for (i = 0; i < 8; i++) { 1599 /* Ignore db4, db5 */ 1600 if ((i == 4) || (i == 5)) 1601 continue; 1602 1603 set_debugreg(0, i); 1604 } 1605 } 1606 1607 #ifdef CONFIG_KGDB 1608 /* 1609 * Restore debug regs if using kgdbwait and you have a kernel debugger 1610 * connection established. 1611 */ 1612 static void dbg_restore_debug_regs(void) 1613 { 1614 if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break)) 1615 arch_kgdb_ops.correct_hw_break(); 1616 } 1617 #else /* ! CONFIG_KGDB */ 1618 #define dbg_restore_debug_regs() 1619 #endif /* ! CONFIG_KGDB */ 1620 1621 static void wait_for_master_cpu(int cpu) 1622 { 1623 #ifdef CONFIG_SMP 1624 /* 1625 * wait for ACK from master CPU before continuing 1626 * with AP initialization 1627 */ 1628 WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)); 1629 while (!cpumask_test_cpu(cpu, cpu_callout_mask)) 1630 cpu_relax(); 1631 #endif 1632 } 1633 1634 #ifdef CONFIG_X86_64 1635 static void setup_getcpu(int cpu) 1636 { 1637 unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu)); 1638 struct desc_struct d = { }; 1639 1640 if (boot_cpu_has(X86_FEATURE_RDTSCP)) 1641 write_rdtscp_aux(cpudata); 1642 1643 /* Store CPU and node number in limit. */ 1644 d.limit0 = cpudata; 1645 d.limit1 = cpudata >> 16; 1646 1647 d.type = 5; /* RO data, expand down, accessed */ 1648 d.dpl = 3; /* Visible to user code */ 1649 d.s = 1; /* Not a system segment */ 1650 d.p = 1; /* Present */ 1651 d.d = 1; /* 32-bit */ 1652 1653 write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPUNODE, &d, DESCTYPE_S); 1654 } 1655 #endif 1656 1657 /* 1658 * cpu_init() initializes state that is per-CPU. Some data is already 1659 * initialized (naturally) in the bootstrap process, such as the GDT 1660 * and IDT. We reload them nevertheless, this function acts as a 1661 * 'CPU state barrier', nothing should get across. 1662 */ 1663 #ifdef CONFIG_X86_64 1664 1665 void cpu_init(void) 1666 { 1667 int cpu = raw_smp_processor_id(); 1668 struct task_struct *me; 1669 struct tss_struct *t; 1670 int i; 1671 1672 wait_for_master_cpu(cpu); 1673 1674 /* 1675 * Initialize the CR4 shadow before doing anything that could 1676 * try to read it. 1677 */ 1678 cr4_init_shadow(); 1679 1680 if (cpu) 1681 load_ucode_ap(); 1682 1683 t = &per_cpu(cpu_tss_rw, cpu); 1684 1685 #ifdef CONFIG_NUMA 1686 if (this_cpu_read(numa_node) == 0 && 1687 early_cpu_to_node(cpu) != NUMA_NO_NODE) 1688 set_numa_node(early_cpu_to_node(cpu)); 1689 #endif 1690 setup_getcpu(cpu); 1691 1692 me = current; 1693 1694 pr_debug("Initializing CPU#%d\n", cpu); 1695 1696 cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 1697 1698 /* 1699 * Initialize the per-CPU GDT with the boot GDT, 1700 * and set up the GDT descriptor: 1701 */ 1702 1703 switch_to_new_gdt(cpu); 1704 loadsegment(fs, 0); 1705 1706 load_current_idt(); 1707 1708 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); 1709 syscall_init(); 1710 1711 wrmsrl(MSR_FS_BASE, 0); 1712 wrmsrl(MSR_KERNEL_GS_BASE, 0); 1713 barrier(); 1714 1715 x86_configure_nx(); 1716 x2apic_setup(); 1717 1718 /* 1719 * set up and load the per-CPU TSS 1720 */ 1721 if (!t->x86_tss.ist[0]) { 1722 t->x86_tss.ist[IST_INDEX_DF] = __this_cpu_ist_top_va(DF); 1723 t->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI); 1724 t->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB); 1725 t->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE); 1726 } 1727 1728 t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; 1729 1730 /* 1731 * <= is required because the CPU will access up to 1732 * 8 bits beyond the end of the IO permission bitmap. 1733 */ 1734 for (i = 0; i <= IO_BITMAP_LONGS; i++) 1735 t->io_bitmap[i] = ~0UL; 1736 1737 mmgrab(&init_mm); 1738 me->active_mm = &init_mm; 1739 BUG_ON(me->mm); 1740 initialize_tlbstate_and_flush(); 1741 enter_lazy_tlb(&init_mm, me); 1742 1743 /* 1744 * Initialize the TSS. sp0 points to the entry trampoline stack 1745 * regardless of what task is running. 1746 */ 1747 set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); 1748 load_TR_desc(); 1749 load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1)); 1750 1751 load_mm_ldt(&init_mm); 1752 1753 clear_all_debug_regs(); 1754 dbg_restore_debug_regs(); 1755 1756 fpu__init_cpu(); 1757 1758 if (is_uv_system()) 1759 uv_cpu_init(); 1760 1761 load_fixmap_gdt(cpu); 1762 } 1763 1764 #else 1765 1766 void cpu_init(void) 1767 { 1768 int cpu = smp_processor_id(); 1769 struct task_struct *curr = current; 1770 struct tss_struct *t = &per_cpu(cpu_tss_rw, cpu); 1771 1772 wait_for_master_cpu(cpu); 1773 1774 /* 1775 * Initialize the CR4 shadow before doing anything that could 1776 * try to read it. 1777 */ 1778 cr4_init_shadow(); 1779 1780 show_ucode_info_early(); 1781 1782 pr_info("Initializing CPU#%d\n", cpu); 1783 1784 if (cpu_feature_enabled(X86_FEATURE_VME) || 1785 boot_cpu_has(X86_FEATURE_TSC) || 1786 boot_cpu_has(X86_FEATURE_DE)) 1787 cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 1788 1789 load_current_idt(); 1790 switch_to_new_gdt(cpu); 1791 1792 /* 1793 * Set up and load the per-CPU TSS and LDT 1794 */ 1795 mmgrab(&init_mm); 1796 curr->active_mm = &init_mm; 1797 BUG_ON(curr->mm); 1798 initialize_tlbstate_and_flush(); 1799 enter_lazy_tlb(&init_mm, curr); 1800 1801 /* 1802 * Initialize the TSS. sp0 points to the entry trampoline stack 1803 * regardless of what task is running. 1804 */ 1805 set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); 1806 load_TR_desc(); 1807 load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1)); 1808 1809 load_mm_ldt(&init_mm); 1810 1811 t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; 1812 1813 #ifdef CONFIG_DOUBLEFAULT 1814 /* Set up doublefault TSS pointer in the GDT */ 1815 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); 1816 #endif 1817 1818 clear_all_debug_regs(); 1819 dbg_restore_debug_regs(); 1820 1821 fpu__init_cpu(); 1822 1823 load_fixmap_gdt(cpu); 1824 } 1825 #endif 1826 1827 /* 1828 * The microcode loader calls this upon late microcode load to recheck features, 1829 * only when microcode has been updated. Caller holds microcode_mutex and CPU 1830 * hotplug lock. 1831 */ 1832 void microcode_check(void) 1833 { 1834 struct cpuinfo_x86 info; 1835 1836 perf_check_microcode(); 1837 1838 /* Reload CPUID max function as it might've changed. */ 1839 info.cpuid_level = cpuid_eax(0); 1840 1841 /* 1842 * Copy all capability leafs to pick up the synthetic ones so that 1843 * memcmp() below doesn't fail on that. The ones coming from CPUID will 1844 * get overwritten in get_cpu_cap(). 1845 */ 1846 memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)); 1847 1848 get_cpu_cap(&info); 1849 1850 if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability))) 1851 return; 1852 1853 pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n"); 1854 pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); 1855 } 1856