1 /* cpu_feature_enabled() cannot be used this early */ 2 #define USE_EARLY_PGTABLE_L5 3 4 #include <linux/memblock.h> 5 #include <linux/linkage.h> 6 #include <linux/bitops.h> 7 #include <linux/kernel.h> 8 #include <linux/export.h> 9 #include <linux/percpu.h> 10 #include <linux/string.h> 11 #include <linux/ctype.h> 12 #include <linux/delay.h> 13 #include <linux/sched/mm.h> 14 #include <linux/sched/clock.h> 15 #include <linux/sched/task.h> 16 #include <linux/init.h> 17 #include <linux/kprobes.h> 18 #include <linux/kgdb.h> 19 #include <linux/smp.h> 20 #include <linux/io.h> 21 #include <linux/syscore_ops.h> 22 23 #include <asm/stackprotector.h> 24 #include <asm/perf_event.h> 25 #include <asm/mmu_context.h> 26 #include <asm/archrandom.h> 27 #include <asm/hypervisor.h> 28 #include <asm/processor.h> 29 #include <asm/tlbflush.h> 30 #include <asm/debugreg.h> 31 #include <asm/sections.h> 32 #include <asm/vsyscall.h> 33 #include <linux/topology.h> 34 #include <linux/cpumask.h> 35 #include <asm/pgtable.h> 36 #include <linux/atomic.h> 37 #include <asm/proto.h> 38 #include <asm/setup.h> 39 #include <asm/apic.h> 40 #include <asm/desc.h> 41 #include <asm/fpu/internal.h> 42 #include <asm/mtrr.h> 43 #include <asm/hwcap2.h> 44 #include <linux/numa.h> 45 #include <asm/asm.h> 46 #include <asm/bugs.h> 47 #include <asm/cpu.h> 48 #include <asm/mce.h> 49 #include <asm/msr.h> 50 #include <asm/pat.h> 51 #include <asm/microcode.h> 52 #include <asm/microcode_intel.h> 53 #include <asm/intel-family.h> 54 #include <asm/cpu_device_id.h> 55 56 #ifdef CONFIG_X86_LOCAL_APIC 57 #include <asm/uv/uv.h> 58 #endif 59 60 #include "cpu.h" 61 62 u32 elf_hwcap2 __read_mostly; 63 64 /* all of these masks are initialized in setup_cpu_local_masks() */ 65 cpumask_var_t cpu_initialized_mask; 66 cpumask_var_t cpu_callout_mask; 67 cpumask_var_t cpu_callin_mask; 68 69 /* representing cpus for which sibling maps can be computed */ 70 cpumask_var_t cpu_sibling_setup_mask; 71 72 /* Number of siblings per CPU package */ 73 int smp_num_siblings = 1; 74 EXPORT_SYMBOL(smp_num_siblings); 75 76 /* Last level cache ID of each logical CPU */ 77 DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID; 78 79 /* correctly size the local cpu masks */ 80 void __init setup_cpu_local_masks(void) 81 { 82 alloc_bootmem_cpumask_var(&cpu_initialized_mask); 83 alloc_bootmem_cpumask_var(&cpu_callin_mask); 84 alloc_bootmem_cpumask_var(&cpu_callout_mask); 85 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); 86 } 87 88 static void default_init(struct cpuinfo_x86 *c) 89 { 90 #ifdef CONFIG_X86_64 91 cpu_detect_cache_sizes(c); 92 #else 93 /* Not much we can do here... */ 94 /* Check if at least it has cpuid */ 95 if (c->cpuid_level == -1) { 96 /* No cpuid. It must be an ancient CPU */ 97 if (c->x86 == 4) 98 strcpy(c->x86_model_id, "486"); 99 else if (c->x86 == 3) 100 strcpy(c->x86_model_id, "386"); 101 } 102 #endif 103 } 104 105 static const struct cpu_dev default_cpu = { 106 .c_init = default_init, 107 .c_vendor = "Unknown", 108 .c_x86_vendor = X86_VENDOR_UNKNOWN, 109 }; 110 111 static const struct cpu_dev *this_cpu = &default_cpu; 112 113 DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { 114 #ifdef CONFIG_X86_64 115 /* 116 * We need valid kernel segments for data and code in long mode too 117 * IRET will check the segment types kkeil 2000/10/28 118 * Also sysret mandates a special GDT layout 119 * 120 * TLS descriptors are currently at a different place compared to i386. 121 * Hopefully nobody expects them at a fixed place (Wine?) 122 */ 123 [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff), 124 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff), 125 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff), 126 [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff), 127 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff), 128 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff), 129 #else 130 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff), 131 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), 132 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff), 133 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff), 134 /* 135 * Segments used for calling PnP BIOS have byte granularity. 136 * They code segments and data segments have fixed 64k limits, 137 * the transfer segment sizes are set at run time. 138 */ 139 /* 32-bit code */ 140 [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), 141 /* 16-bit code */ 142 [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), 143 /* 16-bit data */ 144 [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff), 145 /* 16-bit data */ 146 [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0), 147 /* 16-bit data */ 148 [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0), 149 /* 150 * The APM segments have byte granularity and their bases 151 * are set at run time. All have 64k limits. 152 */ 153 /* 32-bit code */ 154 [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), 155 /* 16-bit code */ 156 [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), 157 /* data */ 158 [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff), 159 160 [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), 161 [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), 162 GDT_STACK_CANARY_INIT 163 #endif 164 } }; 165 EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); 166 167 static int __init x86_mpx_setup(char *s) 168 { 169 /* require an exact match without trailing characters */ 170 if (strlen(s)) 171 return 0; 172 173 /* do not emit a message if the feature is not present */ 174 if (!boot_cpu_has(X86_FEATURE_MPX)) 175 return 1; 176 177 setup_clear_cpu_cap(X86_FEATURE_MPX); 178 pr_info("nompx: Intel Memory Protection Extensions (MPX) disabled\n"); 179 return 1; 180 } 181 __setup("nompx", x86_mpx_setup); 182 183 #ifdef CONFIG_X86_64 184 static int __init x86_nopcid_setup(char *s) 185 { 186 /* nopcid doesn't accept parameters */ 187 if (s) 188 return -EINVAL; 189 190 /* do not emit a message if the feature is not present */ 191 if (!boot_cpu_has(X86_FEATURE_PCID)) 192 return 0; 193 194 setup_clear_cpu_cap(X86_FEATURE_PCID); 195 pr_info("nopcid: PCID feature disabled\n"); 196 return 0; 197 } 198 early_param("nopcid", x86_nopcid_setup); 199 #endif 200 201 static int __init x86_noinvpcid_setup(char *s) 202 { 203 /* noinvpcid doesn't accept parameters */ 204 if (s) 205 return -EINVAL; 206 207 /* do not emit a message if the feature is not present */ 208 if (!boot_cpu_has(X86_FEATURE_INVPCID)) 209 return 0; 210 211 setup_clear_cpu_cap(X86_FEATURE_INVPCID); 212 pr_info("noinvpcid: INVPCID feature disabled\n"); 213 return 0; 214 } 215 early_param("noinvpcid", x86_noinvpcid_setup); 216 217 #ifdef CONFIG_X86_32 218 static int cachesize_override = -1; 219 static int disable_x86_serial_nr = 1; 220 221 static int __init cachesize_setup(char *str) 222 { 223 get_option(&str, &cachesize_override); 224 return 1; 225 } 226 __setup("cachesize=", cachesize_setup); 227 228 static int __init x86_sep_setup(char *s) 229 { 230 setup_clear_cpu_cap(X86_FEATURE_SEP); 231 return 1; 232 } 233 __setup("nosep", x86_sep_setup); 234 235 /* Standard macro to see if a specific flag is changeable */ 236 static inline int flag_is_changeable_p(u32 flag) 237 { 238 u32 f1, f2; 239 240 /* 241 * Cyrix and IDT cpus allow disabling of CPUID 242 * so the code below may return different results 243 * when it is executed before and after enabling 244 * the CPUID. Add "volatile" to not allow gcc to 245 * optimize the subsequent calls to this function. 246 */ 247 asm volatile ("pushfl \n\t" 248 "pushfl \n\t" 249 "popl %0 \n\t" 250 "movl %0, %1 \n\t" 251 "xorl %2, %0 \n\t" 252 "pushl %0 \n\t" 253 "popfl \n\t" 254 "pushfl \n\t" 255 "popl %0 \n\t" 256 "popfl \n\t" 257 258 : "=&r" (f1), "=&r" (f2) 259 : "ir" (flag)); 260 261 return ((f1^f2) & flag) != 0; 262 } 263 264 /* Probe for the CPUID instruction */ 265 int have_cpuid_p(void) 266 { 267 return flag_is_changeable_p(X86_EFLAGS_ID); 268 } 269 270 static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 271 { 272 unsigned long lo, hi; 273 274 if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr) 275 return; 276 277 /* Disable processor serial number: */ 278 279 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 280 lo |= 0x200000; 281 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 282 283 pr_notice("CPU serial number disabled.\n"); 284 clear_cpu_cap(c, X86_FEATURE_PN); 285 286 /* Disabling the serial number may affect the cpuid level */ 287 c->cpuid_level = cpuid_eax(0); 288 } 289 290 static int __init x86_serial_nr_setup(char *s) 291 { 292 disable_x86_serial_nr = 0; 293 return 1; 294 } 295 __setup("serialnumber", x86_serial_nr_setup); 296 #else 297 static inline int flag_is_changeable_p(u32 flag) 298 { 299 return 1; 300 } 301 static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 302 { 303 } 304 #endif 305 306 static __init int setup_disable_smep(char *arg) 307 { 308 setup_clear_cpu_cap(X86_FEATURE_SMEP); 309 /* Check for things that depend on SMEP being enabled: */ 310 check_mpx_erratum(&boot_cpu_data); 311 return 1; 312 } 313 __setup("nosmep", setup_disable_smep); 314 315 static __always_inline void setup_smep(struct cpuinfo_x86 *c) 316 { 317 if (cpu_has(c, X86_FEATURE_SMEP)) 318 cr4_set_bits(X86_CR4_SMEP); 319 } 320 321 static __init int setup_disable_smap(char *arg) 322 { 323 setup_clear_cpu_cap(X86_FEATURE_SMAP); 324 return 1; 325 } 326 __setup("nosmap", setup_disable_smap); 327 328 static __always_inline void setup_smap(struct cpuinfo_x86 *c) 329 { 330 unsigned long eflags = native_save_fl(); 331 332 /* This should have been cleared long ago */ 333 BUG_ON(eflags & X86_EFLAGS_AC); 334 335 if (cpu_has(c, X86_FEATURE_SMAP)) { 336 #ifdef CONFIG_X86_SMAP 337 cr4_set_bits(X86_CR4_SMAP); 338 #else 339 cr4_clear_bits(X86_CR4_SMAP); 340 #endif 341 } 342 } 343 344 static __always_inline void setup_umip(struct cpuinfo_x86 *c) 345 { 346 /* Check the boot processor, plus build option for UMIP. */ 347 if (!cpu_feature_enabled(X86_FEATURE_UMIP)) 348 goto out; 349 350 /* Check the current processor's cpuid bits. */ 351 if (!cpu_has(c, X86_FEATURE_UMIP)) 352 goto out; 353 354 cr4_set_bits(X86_CR4_UMIP); 355 356 pr_info_once("x86/cpu: User Mode Instruction Prevention (UMIP) activated\n"); 357 358 return; 359 360 out: 361 /* 362 * Make sure UMIP is disabled in case it was enabled in a 363 * previous boot (e.g., via kexec). 364 */ 365 cr4_clear_bits(X86_CR4_UMIP); 366 } 367 368 /* 369 * Protection Keys are not available in 32-bit mode. 370 */ 371 static bool pku_disabled; 372 373 static __always_inline void setup_pku(struct cpuinfo_x86 *c) 374 { 375 struct pkru_state *pk; 376 377 /* check the boot processor, plus compile options for PKU: */ 378 if (!cpu_feature_enabled(X86_FEATURE_PKU)) 379 return; 380 /* checks the actual processor's cpuid bits: */ 381 if (!cpu_has(c, X86_FEATURE_PKU)) 382 return; 383 if (pku_disabled) 384 return; 385 386 cr4_set_bits(X86_CR4_PKE); 387 pk = get_xsave_addr(&init_fpstate.xsave, XFEATURE_PKRU); 388 if (pk) 389 pk->pkru = init_pkru_value; 390 /* 391 * Seting X86_CR4_PKE will cause the X86_FEATURE_OSPKE 392 * cpuid bit to be set. We need to ensure that we 393 * update that bit in this CPU's "cpu_info". 394 */ 395 get_cpu_cap(c); 396 } 397 398 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 399 static __init int setup_disable_pku(char *arg) 400 { 401 /* 402 * Do not clear the X86_FEATURE_PKU bit. All of the 403 * runtime checks are against OSPKE so clearing the 404 * bit does nothing. 405 * 406 * This way, we will see "pku" in cpuinfo, but not 407 * "ospke", which is exactly what we want. It shows 408 * that the CPU has PKU, but the OS has not enabled it. 409 * This happens to be exactly how a system would look 410 * if we disabled the config option. 411 */ 412 pr_info("x86: 'nopku' specified, disabling Memory Protection Keys\n"); 413 pku_disabled = true; 414 return 1; 415 } 416 __setup("nopku", setup_disable_pku); 417 #endif /* CONFIG_X86_64 */ 418 419 /* 420 * Some CPU features depend on higher CPUID levels, which may not always 421 * be available due to CPUID level capping or broken virtualization 422 * software. Add those features to this table to auto-disable them. 423 */ 424 struct cpuid_dependent_feature { 425 u32 feature; 426 u32 level; 427 }; 428 429 static const struct cpuid_dependent_feature 430 cpuid_dependent_features[] = { 431 { X86_FEATURE_MWAIT, 0x00000005 }, 432 { X86_FEATURE_DCA, 0x00000009 }, 433 { X86_FEATURE_XSAVE, 0x0000000d }, 434 { 0, 0 } 435 }; 436 437 static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) 438 { 439 const struct cpuid_dependent_feature *df; 440 441 for (df = cpuid_dependent_features; df->feature; df++) { 442 443 if (!cpu_has(c, df->feature)) 444 continue; 445 /* 446 * Note: cpuid_level is set to -1 if unavailable, but 447 * extended_extended_level is set to 0 if unavailable 448 * and the legitimate extended levels are all negative 449 * when signed; hence the weird messing around with 450 * signs here... 451 */ 452 if (!((s32)df->level < 0 ? 453 (u32)df->level > (u32)c->extended_cpuid_level : 454 (s32)df->level > (s32)c->cpuid_level)) 455 continue; 456 457 clear_cpu_cap(c, df->feature); 458 if (!warn) 459 continue; 460 461 pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n", 462 x86_cap_flag(df->feature), df->level); 463 } 464 } 465 466 /* 467 * Naming convention should be: <Name> [(<Codename>)] 468 * This table only is used unless init_<vendor>() below doesn't set it; 469 * in particular, if CPUID levels 0x80000002..4 are supported, this 470 * isn't used 471 */ 472 473 /* Look up CPU names by table lookup. */ 474 static const char *table_lookup_model(struct cpuinfo_x86 *c) 475 { 476 #ifdef CONFIG_X86_32 477 const struct legacy_cpu_model_info *info; 478 479 if (c->x86_model >= 16) 480 return NULL; /* Range check */ 481 482 if (!this_cpu) 483 return NULL; 484 485 info = this_cpu->legacy_models; 486 487 while (info->family) { 488 if (info->family == c->x86) 489 return info->model_names[c->x86_model]; 490 info++; 491 } 492 #endif 493 return NULL; /* Not found */ 494 } 495 496 __u32 cpu_caps_cleared[NCAPINTS + NBUGINTS]; 497 __u32 cpu_caps_set[NCAPINTS + NBUGINTS]; 498 499 void load_percpu_segment(int cpu) 500 { 501 #ifdef CONFIG_X86_32 502 loadsegment(fs, __KERNEL_PERCPU); 503 #else 504 __loadsegment_simple(gs, 0); 505 wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu)); 506 #endif 507 load_stack_canary_segment(); 508 } 509 510 #ifdef CONFIG_X86_32 511 /* The 32-bit entry code needs to find cpu_entry_area. */ 512 DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); 513 #endif 514 515 /* Load the original GDT from the per-cpu structure */ 516 void load_direct_gdt(int cpu) 517 { 518 struct desc_ptr gdt_descr; 519 520 gdt_descr.address = (long)get_cpu_gdt_rw(cpu); 521 gdt_descr.size = GDT_SIZE - 1; 522 load_gdt(&gdt_descr); 523 } 524 EXPORT_SYMBOL_GPL(load_direct_gdt); 525 526 /* Load a fixmap remapping of the per-cpu GDT */ 527 void load_fixmap_gdt(int cpu) 528 { 529 struct desc_ptr gdt_descr; 530 531 gdt_descr.address = (long)get_cpu_gdt_ro(cpu); 532 gdt_descr.size = GDT_SIZE - 1; 533 load_gdt(&gdt_descr); 534 } 535 EXPORT_SYMBOL_GPL(load_fixmap_gdt); 536 537 /* 538 * Current gdt points %fs at the "master" per-cpu area: after this, 539 * it's on the real one. 540 */ 541 void switch_to_new_gdt(int cpu) 542 { 543 /* Load the original GDT */ 544 load_direct_gdt(cpu); 545 /* Reload the per-cpu base */ 546 load_percpu_segment(cpu); 547 } 548 549 static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; 550 551 static void get_model_name(struct cpuinfo_x86 *c) 552 { 553 unsigned int *v; 554 char *p, *q, *s; 555 556 if (c->extended_cpuid_level < 0x80000004) 557 return; 558 559 v = (unsigned int *)c->x86_model_id; 560 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); 561 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); 562 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); 563 c->x86_model_id[48] = 0; 564 565 /* Trim whitespace */ 566 p = q = s = &c->x86_model_id[0]; 567 568 while (*p == ' ') 569 p++; 570 571 while (*p) { 572 /* Note the last non-whitespace index */ 573 if (!isspace(*p)) 574 s = q; 575 576 *q++ = *p++; 577 } 578 579 *(s + 1) = '\0'; 580 } 581 582 void detect_num_cpu_cores(struct cpuinfo_x86 *c) 583 { 584 unsigned int eax, ebx, ecx, edx; 585 586 c->x86_max_cores = 1; 587 if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4) 588 return; 589 590 cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); 591 if (eax & 0x1f) 592 c->x86_max_cores = (eax >> 26) + 1; 593 } 594 595 void cpu_detect_cache_sizes(struct cpuinfo_x86 *c) 596 { 597 unsigned int n, dummy, ebx, ecx, edx, l2size; 598 599 n = c->extended_cpuid_level; 600 601 if (n >= 0x80000005) { 602 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); 603 c->x86_cache_size = (ecx>>24) + (edx>>24); 604 #ifdef CONFIG_X86_64 605 /* On K8 L1 TLB is inclusive, so don't count it */ 606 c->x86_tlbsize = 0; 607 #endif 608 } 609 610 if (n < 0x80000006) /* Some chips just has a large L1. */ 611 return; 612 613 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); 614 l2size = ecx >> 16; 615 616 #ifdef CONFIG_X86_64 617 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); 618 #else 619 /* do processor-specific cache resizing */ 620 if (this_cpu->legacy_cache_size) 621 l2size = this_cpu->legacy_cache_size(c, l2size); 622 623 /* Allow user to override all this if necessary. */ 624 if (cachesize_override != -1) 625 l2size = cachesize_override; 626 627 if (l2size == 0) 628 return; /* Again, no L2 cache is possible */ 629 #endif 630 631 c->x86_cache_size = l2size; 632 } 633 634 u16 __read_mostly tlb_lli_4k[NR_INFO]; 635 u16 __read_mostly tlb_lli_2m[NR_INFO]; 636 u16 __read_mostly tlb_lli_4m[NR_INFO]; 637 u16 __read_mostly tlb_lld_4k[NR_INFO]; 638 u16 __read_mostly tlb_lld_2m[NR_INFO]; 639 u16 __read_mostly tlb_lld_4m[NR_INFO]; 640 u16 __read_mostly tlb_lld_1g[NR_INFO]; 641 642 static void cpu_detect_tlb(struct cpuinfo_x86 *c) 643 { 644 if (this_cpu->c_detect_tlb) 645 this_cpu->c_detect_tlb(c); 646 647 pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n", 648 tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES], 649 tlb_lli_4m[ENTRIES]); 650 651 pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n", 652 tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES], 653 tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]); 654 } 655 656 int detect_ht_early(struct cpuinfo_x86 *c) 657 { 658 #ifdef CONFIG_SMP 659 u32 eax, ebx, ecx, edx; 660 661 if (!cpu_has(c, X86_FEATURE_HT)) 662 return -1; 663 664 if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) 665 return -1; 666 667 if (cpu_has(c, X86_FEATURE_XTOPOLOGY)) 668 return -1; 669 670 cpuid(1, &eax, &ebx, &ecx, &edx); 671 672 smp_num_siblings = (ebx & 0xff0000) >> 16; 673 if (smp_num_siblings == 1) 674 pr_info_once("CPU0: Hyper-Threading is disabled\n"); 675 #endif 676 return 0; 677 } 678 679 void detect_ht(struct cpuinfo_x86 *c) 680 { 681 #ifdef CONFIG_SMP 682 int index_msb, core_bits; 683 684 if (detect_ht_early(c) < 0) 685 return; 686 687 index_msb = get_count_order(smp_num_siblings); 688 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); 689 690 smp_num_siblings = smp_num_siblings / c->x86_max_cores; 691 692 index_msb = get_count_order(smp_num_siblings); 693 694 core_bits = get_count_order(c->x86_max_cores); 695 696 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & 697 ((1 << core_bits) - 1); 698 #endif 699 } 700 701 static void get_cpu_vendor(struct cpuinfo_x86 *c) 702 { 703 char *v = c->x86_vendor_id; 704 int i; 705 706 for (i = 0; i < X86_VENDOR_NUM; i++) { 707 if (!cpu_devs[i]) 708 break; 709 710 if (!strcmp(v, cpu_devs[i]->c_ident[0]) || 711 (cpu_devs[i]->c_ident[1] && 712 !strcmp(v, cpu_devs[i]->c_ident[1]))) { 713 714 this_cpu = cpu_devs[i]; 715 c->x86_vendor = this_cpu->c_x86_vendor; 716 return; 717 } 718 } 719 720 pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \ 721 "CPU: Your system may be unstable.\n", v); 722 723 c->x86_vendor = X86_VENDOR_UNKNOWN; 724 this_cpu = &default_cpu; 725 } 726 727 void cpu_detect(struct cpuinfo_x86 *c) 728 { 729 /* Get vendor name */ 730 cpuid(0x00000000, (unsigned int *)&c->cpuid_level, 731 (unsigned int *)&c->x86_vendor_id[0], 732 (unsigned int *)&c->x86_vendor_id[8], 733 (unsigned int *)&c->x86_vendor_id[4]); 734 735 c->x86 = 4; 736 /* Intel-defined flags: level 0x00000001 */ 737 if (c->cpuid_level >= 0x00000001) { 738 u32 junk, tfms, cap0, misc; 739 740 cpuid(0x00000001, &tfms, &misc, &junk, &cap0); 741 c->x86 = x86_family(tfms); 742 c->x86_model = x86_model(tfms); 743 c->x86_stepping = x86_stepping(tfms); 744 745 if (cap0 & (1<<19)) { 746 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; 747 c->x86_cache_alignment = c->x86_clflush_size; 748 } 749 } 750 } 751 752 static void apply_forced_caps(struct cpuinfo_x86 *c) 753 { 754 int i; 755 756 for (i = 0; i < NCAPINTS + NBUGINTS; i++) { 757 c->x86_capability[i] &= ~cpu_caps_cleared[i]; 758 c->x86_capability[i] |= cpu_caps_set[i]; 759 } 760 } 761 762 static void init_speculation_control(struct cpuinfo_x86 *c) 763 { 764 /* 765 * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support, 766 * and they also have a different bit for STIBP support. Also, 767 * a hypervisor might have set the individual AMD bits even on 768 * Intel CPUs, for finer-grained selection of what's available. 769 */ 770 if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) { 771 set_cpu_cap(c, X86_FEATURE_IBRS); 772 set_cpu_cap(c, X86_FEATURE_IBPB); 773 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); 774 } 775 776 if (cpu_has(c, X86_FEATURE_INTEL_STIBP)) 777 set_cpu_cap(c, X86_FEATURE_STIBP); 778 779 if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) || 780 cpu_has(c, X86_FEATURE_VIRT_SSBD)) 781 set_cpu_cap(c, X86_FEATURE_SSBD); 782 783 if (cpu_has(c, X86_FEATURE_AMD_IBRS)) { 784 set_cpu_cap(c, X86_FEATURE_IBRS); 785 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); 786 } 787 788 if (cpu_has(c, X86_FEATURE_AMD_IBPB)) 789 set_cpu_cap(c, X86_FEATURE_IBPB); 790 791 if (cpu_has(c, X86_FEATURE_AMD_STIBP)) { 792 set_cpu_cap(c, X86_FEATURE_STIBP); 793 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); 794 } 795 796 if (cpu_has(c, X86_FEATURE_AMD_SSBD)) { 797 set_cpu_cap(c, X86_FEATURE_SSBD); 798 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL); 799 clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD); 800 } 801 } 802 803 void get_cpu_cap(struct cpuinfo_x86 *c) 804 { 805 u32 eax, ebx, ecx, edx; 806 807 /* Intel-defined flags: level 0x00000001 */ 808 if (c->cpuid_level >= 0x00000001) { 809 cpuid(0x00000001, &eax, &ebx, &ecx, &edx); 810 811 c->x86_capability[CPUID_1_ECX] = ecx; 812 c->x86_capability[CPUID_1_EDX] = edx; 813 } 814 815 /* Thermal and Power Management Leaf: level 0x00000006 (eax) */ 816 if (c->cpuid_level >= 0x00000006) 817 c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006); 818 819 /* Additional Intel-defined flags: level 0x00000007 */ 820 if (c->cpuid_level >= 0x00000007) { 821 cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); 822 c->x86_capability[CPUID_7_0_EBX] = ebx; 823 c->x86_capability[CPUID_7_ECX] = ecx; 824 c->x86_capability[CPUID_7_EDX] = edx; 825 } 826 827 /* Extended state features: level 0x0000000d */ 828 if (c->cpuid_level >= 0x0000000d) { 829 cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx); 830 831 c->x86_capability[CPUID_D_1_EAX] = eax; 832 } 833 834 /* Additional Intel-defined flags: level 0x0000000F */ 835 if (c->cpuid_level >= 0x0000000F) { 836 837 /* QoS sub-leaf, EAX=0Fh, ECX=0 */ 838 cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx); 839 c->x86_capability[CPUID_F_0_EDX] = edx; 840 841 if (cpu_has(c, X86_FEATURE_CQM_LLC)) { 842 /* will be overridden if occupancy monitoring exists */ 843 c->x86_cache_max_rmid = ebx; 844 845 /* QoS sub-leaf, EAX=0Fh, ECX=1 */ 846 cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx); 847 c->x86_capability[CPUID_F_1_EDX] = edx; 848 849 if ((cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) || 850 ((cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL)) || 851 (cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)))) { 852 c->x86_cache_max_rmid = ecx; 853 c->x86_cache_occ_scale = ebx; 854 } 855 } else { 856 c->x86_cache_max_rmid = -1; 857 c->x86_cache_occ_scale = -1; 858 } 859 } 860 861 /* AMD-defined flags: level 0x80000001 */ 862 eax = cpuid_eax(0x80000000); 863 c->extended_cpuid_level = eax; 864 865 if ((eax & 0xffff0000) == 0x80000000) { 866 if (eax >= 0x80000001) { 867 cpuid(0x80000001, &eax, &ebx, &ecx, &edx); 868 869 c->x86_capability[CPUID_8000_0001_ECX] = ecx; 870 c->x86_capability[CPUID_8000_0001_EDX] = edx; 871 } 872 } 873 874 if (c->extended_cpuid_level >= 0x80000007) { 875 cpuid(0x80000007, &eax, &ebx, &ecx, &edx); 876 877 c->x86_capability[CPUID_8000_0007_EBX] = ebx; 878 c->x86_power = edx; 879 } 880 881 if (c->extended_cpuid_level >= 0x80000008) { 882 cpuid(0x80000008, &eax, &ebx, &ecx, &edx); 883 c->x86_capability[CPUID_8000_0008_EBX] = ebx; 884 } 885 886 if (c->extended_cpuid_level >= 0x8000000a) 887 c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a); 888 889 init_scattered_cpuid_features(c); 890 init_speculation_control(c); 891 892 /* 893 * Clear/Set all flags overridden by options, after probe. 894 * This needs to happen each time we re-probe, which may happen 895 * several times during CPU initialization. 896 */ 897 apply_forced_caps(c); 898 } 899 900 void get_cpu_address_sizes(struct cpuinfo_x86 *c) 901 { 902 u32 eax, ebx, ecx, edx; 903 904 if (c->extended_cpuid_level >= 0x80000008) { 905 cpuid(0x80000008, &eax, &ebx, &ecx, &edx); 906 907 c->x86_virt_bits = (eax >> 8) & 0xff; 908 c->x86_phys_bits = eax & 0xff; 909 } 910 #ifdef CONFIG_X86_32 911 else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) 912 c->x86_phys_bits = 36; 913 #endif 914 c->x86_cache_bits = c->x86_phys_bits; 915 } 916 917 static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) 918 { 919 #ifdef CONFIG_X86_32 920 int i; 921 922 /* 923 * First of all, decide if this is a 486 or higher 924 * It's a 486 if we can modify the AC flag 925 */ 926 if (flag_is_changeable_p(X86_EFLAGS_AC)) 927 c->x86 = 4; 928 else 929 c->x86 = 3; 930 931 for (i = 0; i < X86_VENDOR_NUM; i++) 932 if (cpu_devs[i] && cpu_devs[i]->c_identify) { 933 c->x86_vendor_id[0] = 0; 934 cpu_devs[i]->c_identify(c); 935 if (c->x86_vendor_id[0]) { 936 get_cpu_vendor(c); 937 break; 938 } 939 } 940 #endif 941 } 942 943 static const __initconst struct x86_cpu_id cpu_no_speculation[] = { 944 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL, X86_FEATURE_ANY }, 945 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_TABLET, X86_FEATURE_ANY }, 946 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL_MID, X86_FEATURE_ANY }, 947 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SALTWELL_MID, X86_FEATURE_ANY }, 948 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_BONNELL, X86_FEATURE_ANY }, 949 { X86_VENDOR_CENTAUR, 5 }, 950 { X86_VENDOR_INTEL, 5 }, 951 { X86_VENDOR_NSC, 5 }, 952 { X86_VENDOR_ANY, 4 }, 953 {} 954 }; 955 956 static const __initconst struct x86_cpu_id cpu_no_meltdown[] = { 957 { X86_VENDOR_AMD }, 958 { X86_VENDOR_HYGON }, 959 {} 960 }; 961 962 /* Only list CPUs which speculate but are non susceptible to SSB */ 963 static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = { 964 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT }, 965 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT }, 966 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_X }, 967 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_MID }, 968 { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH }, 969 { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL }, 970 { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM }, 971 { X86_VENDOR_AMD, 0x12, }, 972 { X86_VENDOR_AMD, 0x11, }, 973 { X86_VENDOR_AMD, 0x10, }, 974 { X86_VENDOR_AMD, 0xf, }, 975 {} 976 }; 977 978 static const __initconst struct x86_cpu_id cpu_no_l1tf[] = { 979 /* in addition to cpu_no_speculation */ 980 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT }, 981 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_X }, 982 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT }, 983 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT_MID }, 984 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT_MID }, 985 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT }, 986 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_X }, 987 { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_PLUS }, 988 { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL }, 989 { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM }, 990 {} 991 }; 992 993 static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) 994 { 995 u64 ia32_cap = 0; 996 997 if (x86_match_cpu(cpu_no_speculation)) 998 return; 999 1000 setup_force_cpu_bug(X86_BUG_SPECTRE_V1); 1001 setup_force_cpu_bug(X86_BUG_SPECTRE_V2); 1002 1003 if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES)) 1004 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap); 1005 1006 if (!x86_match_cpu(cpu_no_spec_store_bypass) && 1007 !(ia32_cap & ARCH_CAP_SSB_NO) && 1008 !cpu_has(c, X86_FEATURE_AMD_SSB_NO)) 1009 setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); 1010 1011 if (ia32_cap & ARCH_CAP_IBRS_ALL) 1012 setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED); 1013 1014 if (x86_match_cpu(cpu_no_meltdown)) 1015 return; 1016 1017 /* Rogue Data Cache Load? No! */ 1018 if (ia32_cap & ARCH_CAP_RDCL_NO) 1019 return; 1020 1021 setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); 1022 1023 if (x86_match_cpu(cpu_no_l1tf)) 1024 return; 1025 1026 setup_force_cpu_bug(X86_BUG_L1TF); 1027 } 1028 1029 /* 1030 * The NOPL instruction is supposed to exist on all CPUs of family >= 6; 1031 * unfortunately, that's not true in practice because of early VIA 1032 * chips and (more importantly) broken virtualizers that are not easy 1033 * to detect. In the latter case it doesn't even *fail* reliably, so 1034 * probing for it doesn't even work. Disable it completely on 32-bit 1035 * unless we can find a reliable way to detect all the broken cases. 1036 * Enable it explicitly on 64-bit for non-constant inputs of cpu_has(). 1037 */ 1038 static void detect_nopl(void) 1039 { 1040 #ifdef CONFIG_X86_32 1041 setup_clear_cpu_cap(X86_FEATURE_NOPL); 1042 #else 1043 setup_force_cpu_cap(X86_FEATURE_NOPL); 1044 #endif 1045 } 1046 1047 /* 1048 * Do minimum CPU detection early. 1049 * Fields really needed: vendor, cpuid_level, family, model, mask, 1050 * cache alignment. 1051 * The others are not touched to avoid unwanted side effects. 1052 * 1053 * WARNING: this function is only called on the boot CPU. Don't add code 1054 * here that is supposed to run on all CPUs. 1055 */ 1056 static void __init early_identify_cpu(struct cpuinfo_x86 *c) 1057 { 1058 #ifdef CONFIG_X86_64 1059 c->x86_clflush_size = 64; 1060 c->x86_phys_bits = 36; 1061 c->x86_virt_bits = 48; 1062 #else 1063 c->x86_clflush_size = 32; 1064 c->x86_phys_bits = 32; 1065 c->x86_virt_bits = 32; 1066 #endif 1067 c->x86_cache_alignment = c->x86_clflush_size; 1068 1069 memset(&c->x86_capability, 0, sizeof(c->x86_capability)); 1070 c->extended_cpuid_level = 0; 1071 1072 if (!have_cpuid_p()) 1073 identify_cpu_without_cpuid(c); 1074 1075 /* cyrix could have cpuid enabled via c_identify()*/ 1076 if (have_cpuid_p()) { 1077 cpu_detect(c); 1078 get_cpu_vendor(c); 1079 get_cpu_cap(c); 1080 get_cpu_address_sizes(c); 1081 setup_force_cpu_cap(X86_FEATURE_CPUID); 1082 1083 if (this_cpu->c_early_init) 1084 this_cpu->c_early_init(c); 1085 1086 c->cpu_index = 0; 1087 filter_cpuid_features(c, false); 1088 1089 if (this_cpu->c_bsp_init) 1090 this_cpu->c_bsp_init(c); 1091 } else { 1092 setup_clear_cpu_cap(X86_FEATURE_CPUID); 1093 } 1094 1095 setup_force_cpu_cap(X86_FEATURE_ALWAYS); 1096 1097 cpu_set_bug_bits(c); 1098 1099 fpu__init_system(c); 1100 1101 #ifdef CONFIG_X86_32 1102 /* 1103 * Regardless of whether PCID is enumerated, the SDM says 1104 * that it can't be enabled in 32-bit mode. 1105 */ 1106 setup_clear_cpu_cap(X86_FEATURE_PCID); 1107 #endif 1108 1109 /* 1110 * Later in the boot process pgtable_l5_enabled() relies on 1111 * cpu_feature_enabled(X86_FEATURE_LA57). If 5-level paging is not 1112 * enabled by this point we need to clear the feature bit to avoid 1113 * false-positives at the later stage. 1114 * 1115 * pgtable_l5_enabled() can be false here for several reasons: 1116 * - 5-level paging is disabled compile-time; 1117 * - it's 32-bit kernel; 1118 * - machine doesn't support 5-level paging; 1119 * - user specified 'no5lvl' in kernel command line. 1120 */ 1121 if (!pgtable_l5_enabled()) 1122 setup_clear_cpu_cap(X86_FEATURE_LA57); 1123 1124 detect_nopl(); 1125 } 1126 1127 void __init early_cpu_init(void) 1128 { 1129 const struct cpu_dev *const *cdev; 1130 int count = 0; 1131 1132 #ifdef CONFIG_PROCESSOR_SELECT 1133 pr_info("KERNEL supported cpus:\n"); 1134 #endif 1135 1136 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { 1137 const struct cpu_dev *cpudev = *cdev; 1138 1139 if (count >= X86_VENDOR_NUM) 1140 break; 1141 cpu_devs[count] = cpudev; 1142 count++; 1143 1144 #ifdef CONFIG_PROCESSOR_SELECT 1145 { 1146 unsigned int j; 1147 1148 for (j = 0; j < 2; j++) { 1149 if (!cpudev->c_ident[j]) 1150 continue; 1151 pr_info(" %s %s\n", cpudev->c_vendor, 1152 cpudev->c_ident[j]); 1153 } 1154 } 1155 #endif 1156 } 1157 early_identify_cpu(&boot_cpu_data); 1158 } 1159 1160 static void detect_null_seg_behavior(struct cpuinfo_x86 *c) 1161 { 1162 #ifdef CONFIG_X86_64 1163 /* 1164 * Empirically, writing zero to a segment selector on AMD does 1165 * not clear the base, whereas writing zero to a segment 1166 * selector on Intel does clear the base. Intel's behavior 1167 * allows slightly faster context switches in the common case 1168 * where GS is unused by the prev and next threads. 1169 * 1170 * Since neither vendor documents this anywhere that I can see, 1171 * detect it directly instead of hardcoding the choice by 1172 * vendor. 1173 * 1174 * I've designated AMD's behavior as the "bug" because it's 1175 * counterintuitive and less friendly. 1176 */ 1177 1178 unsigned long old_base, tmp; 1179 rdmsrl(MSR_FS_BASE, old_base); 1180 wrmsrl(MSR_FS_BASE, 1); 1181 loadsegment(fs, 0); 1182 rdmsrl(MSR_FS_BASE, tmp); 1183 if (tmp != 0) 1184 set_cpu_bug(c, X86_BUG_NULL_SEG); 1185 wrmsrl(MSR_FS_BASE, old_base); 1186 #endif 1187 } 1188 1189 static void generic_identify(struct cpuinfo_x86 *c) 1190 { 1191 c->extended_cpuid_level = 0; 1192 1193 if (!have_cpuid_p()) 1194 identify_cpu_without_cpuid(c); 1195 1196 /* cyrix could have cpuid enabled via c_identify()*/ 1197 if (!have_cpuid_p()) 1198 return; 1199 1200 cpu_detect(c); 1201 1202 get_cpu_vendor(c); 1203 1204 get_cpu_cap(c); 1205 1206 get_cpu_address_sizes(c); 1207 1208 if (c->cpuid_level >= 0x00000001) { 1209 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; 1210 #ifdef CONFIG_X86_32 1211 # ifdef CONFIG_SMP 1212 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); 1213 # else 1214 c->apicid = c->initial_apicid; 1215 # endif 1216 #endif 1217 c->phys_proc_id = c->initial_apicid; 1218 } 1219 1220 get_model_name(c); /* Default name */ 1221 1222 detect_null_seg_behavior(c); 1223 1224 /* 1225 * ESPFIX is a strange bug. All real CPUs have it. Paravirt 1226 * systems that run Linux at CPL > 0 may or may not have the 1227 * issue, but, even if they have the issue, there's absolutely 1228 * nothing we can do about it because we can't use the real IRET 1229 * instruction. 1230 * 1231 * NB: For the time being, only 32-bit kernels support 1232 * X86_BUG_ESPFIX as such. 64-bit kernels directly choose 1233 * whether to apply espfix using paravirt hooks. If any 1234 * non-paravirt system ever shows up that does *not* have the 1235 * ESPFIX issue, we can change this. 1236 */ 1237 #ifdef CONFIG_X86_32 1238 # ifdef CONFIG_PARAVIRT_XXL 1239 do { 1240 extern void native_iret(void); 1241 if (pv_ops.cpu.iret == native_iret) 1242 set_cpu_bug(c, X86_BUG_ESPFIX); 1243 } while (0); 1244 # else 1245 set_cpu_bug(c, X86_BUG_ESPFIX); 1246 # endif 1247 #endif 1248 } 1249 1250 static void x86_init_cache_qos(struct cpuinfo_x86 *c) 1251 { 1252 /* 1253 * The heavy lifting of max_rmid and cache_occ_scale are handled 1254 * in get_cpu_cap(). Here we just set the max_rmid for the boot_cpu 1255 * in case CQM bits really aren't there in this CPU. 1256 */ 1257 if (c != &boot_cpu_data) { 1258 boot_cpu_data.x86_cache_max_rmid = 1259 min(boot_cpu_data.x86_cache_max_rmid, 1260 c->x86_cache_max_rmid); 1261 } 1262 } 1263 1264 /* 1265 * Validate that ACPI/mptables have the same information about the 1266 * effective APIC id and update the package map. 1267 */ 1268 static void validate_apic_and_package_id(struct cpuinfo_x86 *c) 1269 { 1270 #ifdef CONFIG_SMP 1271 unsigned int apicid, cpu = smp_processor_id(); 1272 1273 apicid = apic->cpu_present_to_apicid(cpu); 1274 1275 if (apicid != c->apicid) { 1276 pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x APIC: %x\n", 1277 cpu, apicid, c->initial_apicid); 1278 } 1279 BUG_ON(topology_update_package_map(c->phys_proc_id, cpu)); 1280 #else 1281 c->logical_proc_id = 0; 1282 #endif 1283 } 1284 1285 /* 1286 * This does the hard work of actually picking apart the CPU stuff... 1287 */ 1288 static void identify_cpu(struct cpuinfo_x86 *c) 1289 { 1290 int i; 1291 1292 c->loops_per_jiffy = loops_per_jiffy; 1293 c->x86_cache_size = 0; 1294 c->x86_vendor = X86_VENDOR_UNKNOWN; 1295 c->x86_model = c->x86_stepping = 0; /* So far unknown... */ 1296 c->x86_vendor_id[0] = '\0'; /* Unset */ 1297 c->x86_model_id[0] = '\0'; /* Unset */ 1298 c->x86_max_cores = 1; 1299 c->x86_coreid_bits = 0; 1300 c->cu_id = 0xff; 1301 #ifdef CONFIG_X86_64 1302 c->x86_clflush_size = 64; 1303 c->x86_phys_bits = 36; 1304 c->x86_virt_bits = 48; 1305 #else 1306 c->cpuid_level = -1; /* CPUID not detected */ 1307 c->x86_clflush_size = 32; 1308 c->x86_phys_bits = 32; 1309 c->x86_virt_bits = 32; 1310 #endif 1311 c->x86_cache_alignment = c->x86_clflush_size; 1312 memset(&c->x86_capability, 0, sizeof(c->x86_capability)); 1313 1314 generic_identify(c); 1315 1316 if (this_cpu->c_identify) 1317 this_cpu->c_identify(c); 1318 1319 /* Clear/Set all flags overridden by options, after probe */ 1320 apply_forced_caps(c); 1321 1322 #ifdef CONFIG_X86_64 1323 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); 1324 #endif 1325 1326 /* 1327 * Vendor-specific initialization. In this section we 1328 * canonicalize the feature flags, meaning if there are 1329 * features a certain CPU supports which CPUID doesn't 1330 * tell us, CPUID claiming incorrect flags, or other bugs, 1331 * we handle them here. 1332 * 1333 * At the end of this section, c->x86_capability better 1334 * indicate the features this CPU genuinely supports! 1335 */ 1336 if (this_cpu->c_init) 1337 this_cpu->c_init(c); 1338 1339 /* Disable the PN if appropriate */ 1340 squash_the_stupid_serial_number(c); 1341 1342 /* Set up SMEP/SMAP/UMIP */ 1343 setup_smep(c); 1344 setup_smap(c); 1345 setup_umip(c); 1346 1347 /* 1348 * The vendor-specific functions might have changed features. 1349 * Now we do "generic changes." 1350 */ 1351 1352 /* Filter out anything that depends on CPUID levels we don't have */ 1353 filter_cpuid_features(c, true); 1354 1355 /* If the model name is still unset, do table lookup. */ 1356 if (!c->x86_model_id[0]) { 1357 const char *p; 1358 p = table_lookup_model(c); 1359 if (p) 1360 strcpy(c->x86_model_id, p); 1361 else 1362 /* Last resort... */ 1363 sprintf(c->x86_model_id, "%02x/%02x", 1364 c->x86, c->x86_model); 1365 } 1366 1367 #ifdef CONFIG_X86_64 1368 detect_ht(c); 1369 #endif 1370 1371 x86_init_rdrand(c); 1372 x86_init_cache_qos(c); 1373 setup_pku(c); 1374 1375 /* 1376 * Clear/Set all flags overridden by options, need do it 1377 * before following smp all cpus cap AND. 1378 */ 1379 apply_forced_caps(c); 1380 1381 /* 1382 * On SMP, boot_cpu_data holds the common feature set between 1383 * all CPUs; so make sure that we indicate which features are 1384 * common between the CPUs. The first time this routine gets 1385 * executed, c == &boot_cpu_data. 1386 */ 1387 if (c != &boot_cpu_data) { 1388 /* AND the already accumulated flags with these */ 1389 for (i = 0; i < NCAPINTS; i++) 1390 boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; 1391 1392 /* OR, i.e. replicate the bug flags */ 1393 for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++) 1394 c->x86_capability[i] |= boot_cpu_data.x86_capability[i]; 1395 } 1396 1397 /* Init Machine Check Exception if available. */ 1398 mcheck_cpu_init(c); 1399 1400 select_idle_routine(c); 1401 1402 #ifdef CONFIG_NUMA 1403 numa_add_cpu(smp_processor_id()); 1404 #endif 1405 } 1406 1407 /* 1408 * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions 1409 * on 32-bit kernels: 1410 */ 1411 #ifdef CONFIG_X86_32 1412 void enable_sep_cpu(void) 1413 { 1414 struct tss_struct *tss; 1415 int cpu; 1416 1417 if (!boot_cpu_has(X86_FEATURE_SEP)) 1418 return; 1419 1420 cpu = get_cpu(); 1421 tss = &per_cpu(cpu_tss_rw, cpu); 1422 1423 /* 1424 * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field -- 1425 * see the big comment in struct x86_hw_tss's definition. 1426 */ 1427 1428 tss->x86_tss.ss1 = __KERNEL_CS; 1429 wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0); 1430 wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0); 1431 wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0); 1432 1433 put_cpu(); 1434 } 1435 #endif 1436 1437 void __init identify_boot_cpu(void) 1438 { 1439 identify_cpu(&boot_cpu_data); 1440 #ifdef CONFIG_X86_32 1441 sysenter_setup(); 1442 enable_sep_cpu(); 1443 #endif 1444 cpu_detect_tlb(&boot_cpu_data); 1445 } 1446 1447 void identify_secondary_cpu(struct cpuinfo_x86 *c) 1448 { 1449 BUG_ON(c == &boot_cpu_data); 1450 identify_cpu(c); 1451 #ifdef CONFIG_X86_32 1452 enable_sep_cpu(); 1453 #endif 1454 mtrr_ap_init(); 1455 validate_apic_and_package_id(c); 1456 x86_spec_ctrl_setup_ap(); 1457 } 1458 1459 static __init int setup_noclflush(char *arg) 1460 { 1461 setup_clear_cpu_cap(X86_FEATURE_CLFLUSH); 1462 setup_clear_cpu_cap(X86_FEATURE_CLFLUSHOPT); 1463 return 1; 1464 } 1465 __setup("noclflush", setup_noclflush); 1466 1467 void print_cpu_info(struct cpuinfo_x86 *c) 1468 { 1469 const char *vendor = NULL; 1470 1471 if (c->x86_vendor < X86_VENDOR_NUM) { 1472 vendor = this_cpu->c_vendor; 1473 } else { 1474 if (c->cpuid_level >= 0) 1475 vendor = c->x86_vendor_id; 1476 } 1477 1478 if (vendor && !strstr(c->x86_model_id, vendor)) 1479 pr_cont("%s ", vendor); 1480 1481 if (c->x86_model_id[0]) 1482 pr_cont("%s", c->x86_model_id); 1483 else 1484 pr_cont("%d86", c->x86); 1485 1486 pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model); 1487 1488 if (c->x86_stepping || c->cpuid_level >= 0) 1489 pr_cont(", stepping: 0x%x)\n", c->x86_stepping); 1490 else 1491 pr_cont(")\n"); 1492 } 1493 1494 /* 1495 * clearcpuid= was already parsed in fpu__init_parse_early_param. 1496 * But we need to keep a dummy __setup around otherwise it would 1497 * show up as an environment variable for init. 1498 */ 1499 static __init int setup_clearcpuid(char *arg) 1500 { 1501 return 1; 1502 } 1503 __setup("clearcpuid=", setup_clearcpuid); 1504 1505 #ifdef CONFIG_X86_64 1506 DEFINE_PER_CPU_FIRST(struct fixed_percpu_data, 1507 fixed_percpu_data) __aligned(PAGE_SIZE) __visible; 1508 EXPORT_PER_CPU_SYMBOL_GPL(fixed_percpu_data); 1509 1510 /* 1511 * The following percpu variables are hot. Align current_task to 1512 * cacheline size such that they fall in the same cacheline. 1513 */ 1514 DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = 1515 &init_task; 1516 EXPORT_PER_CPU_SYMBOL(current_task); 1517 1518 DEFINE_PER_CPU(struct irq_stack *, hardirq_stack_ptr); 1519 DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1; 1520 1521 DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; 1522 EXPORT_PER_CPU_SYMBOL(__preempt_count); 1523 1524 /* May not be marked __init: used by software suspend */ 1525 void syscall_init(void) 1526 { 1527 wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS); 1528 wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64); 1529 1530 #ifdef CONFIG_IA32_EMULATION 1531 wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat); 1532 /* 1533 * This only works on Intel CPUs. 1534 * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP. 1535 * This does not cause SYSENTER to jump to the wrong location, because 1536 * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit). 1537 */ 1538 wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); 1539 wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 1540 (unsigned long)(cpu_entry_stack(smp_processor_id()) + 1)); 1541 wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat); 1542 #else 1543 wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret); 1544 wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG); 1545 wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); 1546 wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL); 1547 #endif 1548 1549 /* Flags to clear on syscall */ 1550 wrmsrl(MSR_SYSCALL_MASK, 1551 X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF| 1552 X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT); 1553 } 1554 1555 DEFINE_PER_CPU(int, debug_stack_usage); 1556 DEFINE_PER_CPU(u32, debug_idt_ctr); 1557 1558 void debug_stack_set_zero(void) 1559 { 1560 this_cpu_inc(debug_idt_ctr); 1561 load_current_idt(); 1562 } 1563 NOKPROBE_SYMBOL(debug_stack_set_zero); 1564 1565 void debug_stack_reset(void) 1566 { 1567 if (WARN_ON(!this_cpu_read(debug_idt_ctr))) 1568 return; 1569 if (this_cpu_dec_return(debug_idt_ctr) == 0) 1570 load_current_idt(); 1571 } 1572 NOKPROBE_SYMBOL(debug_stack_reset); 1573 1574 #else /* CONFIG_X86_64 */ 1575 1576 DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; 1577 EXPORT_PER_CPU_SYMBOL(current_task); 1578 DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; 1579 EXPORT_PER_CPU_SYMBOL(__preempt_count); 1580 1581 /* 1582 * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find 1583 * the top of the kernel stack. Use an extra percpu variable to track the 1584 * top of the kernel stack directly. 1585 */ 1586 DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) = 1587 (unsigned long)&init_thread_union + THREAD_SIZE; 1588 EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack); 1589 1590 #ifdef CONFIG_STACKPROTECTOR 1591 DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); 1592 #endif 1593 1594 #endif /* CONFIG_X86_64 */ 1595 1596 /* 1597 * Clear all 6 debug registers: 1598 */ 1599 static void clear_all_debug_regs(void) 1600 { 1601 int i; 1602 1603 for (i = 0; i < 8; i++) { 1604 /* Ignore db4, db5 */ 1605 if ((i == 4) || (i == 5)) 1606 continue; 1607 1608 set_debugreg(0, i); 1609 } 1610 } 1611 1612 #ifdef CONFIG_KGDB 1613 /* 1614 * Restore debug regs if using kgdbwait and you have a kernel debugger 1615 * connection established. 1616 */ 1617 static void dbg_restore_debug_regs(void) 1618 { 1619 if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break)) 1620 arch_kgdb_ops.correct_hw_break(); 1621 } 1622 #else /* ! CONFIG_KGDB */ 1623 #define dbg_restore_debug_regs() 1624 #endif /* ! CONFIG_KGDB */ 1625 1626 static void wait_for_master_cpu(int cpu) 1627 { 1628 #ifdef CONFIG_SMP 1629 /* 1630 * wait for ACK from master CPU before continuing 1631 * with AP initialization 1632 */ 1633 WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)); 1634 while (!cpumask_test_cpu(cpu, cpu_callout_mask)) 1635 cpu_relax(); 1636 #endif 1637 } 1638 1639 #ifdef CONFIG_X86_64 1640 static void setup_getcpu(int cpu) 1641 { 1642 unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu)); 1643 struct desc_struct d = { }; 1644 1645 if (boot_cpu_has(X86_FEATURE_RDTSCP)) 1646 write_rdtscp_aux(cpudata); 1647 1648 /* Store CPU and node number in limit. */ 1649 d.limit0 = cpudata; 1650 d.limit1 = cpudata >> 16; 1651 1652 d.type = 5; /* RO data, expand down, accessed */ 1653 d.dpl = 3; /* Visible to user code */ 1654 d.s = 1; /* Not a system segment */ 1655 d.p = 1; /* Present */ 1656 d.d = 1; /* 32-bit */ 1657 1658 write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPUNODE, &d, DESCTYPE_S); 1659 } 1660 #endif 1661 1662 /* 1663 * cpu_init() initializes state that is per-CPU. Some data is already 1664 * initialized (naturally) in the bootstrap process, such as the GDT 1665 * and IDT. We reload them nevertheless, this function acts as a 1666 * 'CPU state barrier', nothing should get across. 1667 */ 1668 #ifdef CONFIG_X86_64 1669 1670 void cpu_init(void) 1671 { 1672 int cpu = raw_smp_processor_id(); 1673 struct task_struct *me; 1674 struct tss_struct *t; 1675 int i; 1676 1677 wait_for_master_cpu(cpu); 1678 1679 /* 1680 * Initialize the CR4 shadow before doing anything that could 1681 * try to read it. 1682 */ 1683 cr4_init_shadow(); 1684 1685 if (cpu) 1686 load_ucode_ap(); 1687 1688 t = &per_cpu(cpu_tss_rw, cpu); 1689 1690 #ifdef CONFIG_NUMA 1691 if (this_cpu_read(numa_node) == 0 && 1692 early_cpu_to_node(cpu) != NUMA_NO_NODE) 1693 set_numa_node(early_cpu_to_node(cpu)); 1694 #endif 1695 setup_getcpu(cpu); 1696 1697 me = current; 1698 1699 pr_debug("Initializing CPU#%d\n", cpu); 1700 1701 cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 1702 1703 /* 1704 * Initialize the per-CPU GDT with the boot GDT, 1705 * and set up the GDT descriptor: 1706 */ 1707 1708 switch_to_new_gdt(cpu); 1709 loadsegment(fs, 0); 1710 1711 load_current_idt(); 1712 1713 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); 1714 syscall_init(); 1715 1716 wrmsrl(MSR_FS_BASE, 0); 1717 wrmsrl(MSR_KERNEL_GS_BASE, 0); 1718 barrier(); 1719 1720 x86_configure_nx(); 1721 x2apic_setup(); 1722 1723 /* 1724 * set up and load the per-CPU TSS 1725 */ 1726 if (!t->x86_tss.ist[0]) { 1727 t->x86_tss.ist[IST_INDEX_DF] = __this_cpu_ist_top_va(DF); 1728 t->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI); 1729 t->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB); 1730 t->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE); 1731 } 1732 1733 t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; 1734 1735 /* 1736 * <= is required because the CPU will access up to 1737 * 8 bits beyond the end of the IO permission bitmap. 1738 */ 1739 for (i = 0; i <= IO_BITMAP_LONGS; i++) 1740 t->io_bitmap[i] = ~0UL; 1741 1742 mmgrab(&init_mm); 1743 me->active_mm = &init_mm; 1744 BUG_ON(me->mm); 1745 initialize_tlbstate_and_flush(); 1746 enter_lazy_tlb(&init_mm, me); 1747 1748 /* 1749 * Initialize the TSS. sp0 points to the entry trampoline stack 1750 * regardless of what task is running. 1751 */ 1752 set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); 1753 load_TR_desc(); 1754 load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1)); 1755 1756 load_mm_ldt(&init_mm); 1757 1758 clear_all_debug_regs(); 1759 dbg_restore_debug_regs(); 1760 1761 fpu__init_cpu(); 1762 1763 if (is_uv_system()) 1764 uv_cpu_init(); 1765 1766 load_fixmap_gdt(cpu); 1767 } 1768 1769 #else 1770 1771 void cpu_init(void) 1772 { 1773 int cpu = smp_processor_id(); 1774 struct task_struct *curr = current; 1775 struct tss_struct *t = &per_cpu(cpu_tss_rw, cpu); 1776 1777 wait_for_master_cpu(cpu); 1778 1779 /* 1780 * Initialize the CR4 shadow before doing anything that could 1781 * try to read it. 1782 */ 1783 cr4_init_shadow(); 1784 1785 show_ucode_info_early(); 1786 1787 pr_info("Initializing CPU#%d\n", cpu); 1788 1789 if (cpu_feature_enabled(X86_FEATURE_VME) || 1790 boot_cpu_has(X86_FEATURE_TSC) || 1791 boot_cpu_has(X86_FEATURE_DE)) 1792 cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 1793 1794 load_current_idt(); 1795 switch_to_new_gdt(cpu); 1796 1797 /* 1798 * Set up and load the per-CPU TSS and LDT 1799 */ 1800 mmgrab(&init_mm); 1801 curr->active_mm = &init_mm; 1802 BUG_ON(curr->mm); 1803 initialize_tlbstate_and_flush(); 1804 enter_lazy_tlb(&init_mm, curr); 1805 1806 /* 1807 * Initialize the TSS. sp0 points to the entry trampoline stack 1808 * regardless of what task is running. 1809 */ 1810 set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); 1811 load_TR_desc(); 1812 load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1)); 1813 1814 load_mm_ldt(&init_mm); 1815 1816 t->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; 1817 1818 #ifdef CONFIG_DOUBLEFAULT 1819 /* Set up doublefault TSS pointer in the GDT */ 1820 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); 1821 #endif 1822 1823 clear_all_debug_regs(); 1824 dbg_restore_debug_regs(); 1825 1826 fpu__init_cpu(); 1827 1828 load_fixmap_gdt(cpu); 1829 } 1830 #endif 1831 1832 /* 1833 * The microcode loader calls this upon late microcode load to recheck features, 1834 * only when microcode has been updated. Caller holds microcode_mutex and CPU 1835 * hotplug lock. 1836 */ 1837 void microcode_check(void) 1838 { 1839 struct cpuinfo_x86 info; 1840 1841 perf_check_microcode(); 1842 1843 /* Reload CPUID max function as it might've changed. */ 1844 info.cpuid_level = cpuid_eax(0); 1845 1846 /* 1847 * Copy all capability leafs to pick up the synthetic ones so that 1848 * memcmp() below doesn't fail on that. The ones coming from CPUID will 1849 * get overwritten in get_cpu_cap(). 1850 */ 1851 memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)); 1852 1853 get_cpu_cap(&info); 1854 1855 if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability))) 1856 return; 1857 1858 pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n"); 1859 pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); 1860 } 1861