1 #include <linux/bootmem.h> 2 #include <linux/linkage.h> 3 #include <linux/bitops.h> 4 #include <linux/kernel.h> 5 #include <linux/export.h> 6 #include <linux/percpu.h> 7 #include <linux/string.h> 8 #include <linux/ctype.h> 9 #include <linux/delay.h> 10 #include <linux/sched/mm.h> 11 #include <linux/sched/clock.h> 12 #include <linux/sched/task.h> 13 #include <linux/init.h> 14 #include <linux/kprobes.h> 15 #include <linux/kgdb.h> 16 #include <linux/smp.h> 17 #include <linux/io.h> 18 #include <linux/syscore_ops.h> 19 20 #include <asm/stackprotector.h> 21 #include <asm/perf_event.h> 22 #include <asm/mmu_context.h> 23 #include <asm/archrandom.h> 24 #include <asm/hypervisor.h> 25 #include <asm/processor.h> 26 #include <asm/tlbflush.h> 27 #include <asm/debugreg.h> 28 #include <asm/sections.h> 29 #include <asm/vsyscall.h> 30 #include <linux/topology.h> 31 #include <linux/cpumask.h> 32 #include <asm/pgtable.h> 33 #include <linux/atomic.h> 34 #include <asm/proto.h> 35 #include <asm/setup.h> 36 #include <asm/apic.h> 37 #include <asm/desc.h> 38 #include <asm/fpu/internal.h> 39 #include <asm/mtrr.h> 40 #include <asm/hwcap2.h> 41 #include <linux/numa.h> 42 #include <asm/asm.h> 43 #include <asm/bugs.h> 44 #include <asm/cpu.h> 45 #include <asm/mce.h> 46 #include <asm/msr.h> 47 #include <asm/pat.h> 48 #include <asm/microcode.h> 49 #include <asm/microcode_intel.h> 50 51 #ifdef CONFIG_X86_LOCAL_APIC 52 #include <asm/uv/uv.h> 53 #endif 54 55 #include "cpu.h" 56 57 u32 elf_hwcap2 __read_mostly; 58 59 /* all of these masks are initialized in setup_cpu_local_masks() */ 60 cpumask_var_t cpu_initialized_mask; 61 cpumask_var_t cpu_callout_mask; 62 cpumask_var_t cpu_callin_mask; 63 64 /* representing cpus for which sibling maps can be computed */ 65 cpumask_var_t cpu_sibling_setup_mask; 66 67 /* correctly size the local cpu masks */ 68 void __init setup_cpu_local_masks(void) 69 { 70 alloc_bootmem_cpumask_var(&cpu_initialized_mask); 71 alloc_bootmem_cpumask_var(&cpu_callin_mask); 72 alloc_bootmem_cpumask_var(&cpu_callout_mask); 73 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); 74 } 75 76 static void default_init(struct cpuinfo_x86 *c) 77 { 78 #ifdef CONFIG_X86_64 79 cpu_detect_cache_sizes(c); 80 #else 81 /* Not much we can do here... */ 82 /* Check if at least it has cpuid */ 83 if (c->cpuid_level == -1) { 84 /* No cpuid. It must be an ancient CPU */ 85 if (c->x86 == 4) 86 strcpy(c->x86_model_id, "486"); 87 else if (c->x86 == 3) 88 strcpy(c->x86_model_id, "386"); 89 } 90 #endif 91 } 92 93 static const struct cpu_dev default_cpu = { 94 .c_init = default_init, 95 .c_vendor = "Unknown", 96 .c_x86_vendor = X86_VENDOR_UNKNOWN, 97 }; 98 99 static const struct cpu_dev *this_cpu = &default_cpu; 100 101 DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { 102 #ifdef CONFIG_X86_64 103 /* 104 * We need valid kernel segments for data and code in long mode too 105 * IRET will check the segment types kkeil 2000/10/28 106 * Also sysret mandates a special GDT layout 107 * 108 * TLS descriptors are currently at a different place compared to i386. 109 * Hopefully nobody expects them at a fixed place (Wine?) 110 */ 111 [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff), 112 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff), 113 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff), 114 [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff), 115 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff), 116 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff), 117 #else 118 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff), 119 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), 120 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff), 121 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff), 122 /* 123 * Segments used for calling PnP BIOS have byte granularity. 124 * They code segments and data segments have fixed 64k limits, 125 * the transfer segment sizes are set at run time. 126 */ 127 /* 32-bit code */ 128 [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), 129 /* 16-bit code */ 130 [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), 131 /* 16-bit data */ 132 [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff), 133 /* 16-bit data */ 134 [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0), 135 /* 16-bit data */ 136 [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0), 137 /* 138 * The APM segments have byte granularity and their bases 139 * are set at run time. All have 64k limits. 140 */ 141 /* 32-bit code */ 142 [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), 143 /* 16-bit code */ 144 [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), 145 /* data */ 146 [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff), 147 148 [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), 149 [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), 150 GDT_STACK_CANARY_INIT 151 #endif 152 } }; 153 EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); 154 155 static int __init x86_mpx_setup(char *s) 156 { 157 /* require an exact match without trailing characters */ 158 if (strlen(s)) 159 return 0; 160 161 /* do not emit a message if the feature is not present */ 162 if (!boot_cpu_has(X86_FEATURE_MPX)) 163 return 1; 164 165 setup_clear_cpu_cap(X86_FEATURE_MPX); 166 pr_info("nompx: Intel Memory Protection Extensions (MPX) disabled\n"); 167 return 1; 168 } 169 __setup("nompx", x86_mpx_setup); 170 171 #ifdef CONFIG_X86_64 172 static int __init x86_pcid_setup(char *s) 173 { 174 /* require an exact match without trailing characters */ 175 if (strlen(s)) 176 return 0; 177 178 /* do not emit a message if the feature is not present */ 179 if (!boot_cpu_has(X86_FEATURE_PCID)) 180 return 1; 181 182 setup_clear_cpu_cap(X86_FEATURE_PCID); 183 pr_info("nopcid: PCID feature disabled\n"); 184 return 1; 185 } 186 __setup("nopcid", x86_pcid_setup); 187 #endif 188 189 static int __init x86_noinvpcid_setup(char *s) 190 { 191 /* noinvpcid doesn't accept parameters */ 192 if (s) 193 return -EINVAL; 194 195 /* do not emit a message if the feature is not present */ 196 if (!boot_cpu_has(X86_FEATURE_INVPCID)) 197 return 0; 198 199 setup_clear_cpu_cap(X86_FEATURE_INVPCID); 200 pr_info("noinvpcid: INVPCID feature disabled\n"); 201 return 0; 202 } 203 early_param("noinvpcid", x86_noinvpcid_setup); 204 205 #ifdef CONFIG_X86_32 206 static int cachesize_override = -1; 207 static int disable_x86_serial_nr = 1; 208 209 static int __init cachesize_setup(char *str) 210 { 211 get_option(&str, &cachesize_override); 212 return 1; 213 } 214 __setup("cachesize=", cachesize_setup); 215 216 static int __init x86_sep_setup(char *s) 217 { 218 setup_clear_cpu_cap(X86_FEATURE_SEP); 219 return 1; 220 } 221 __setup("nosep", x86_sep_setup); 222 223 /* Standard macro to see if a specific flag is changeable */ 224 static inline int flag_is_changeable_p(u32 flag) 225 { 226 u32 f1, f2; 227 228 /* 229 * Cyrix and IDT cpus allow disabling of CPUID 230 * so the code below may return different results 231 * when it is executed before and after enabling 232 * the CPUID. Add "volatile" to not allow gcc to 233 * optimize the subsequent calls to this function. 234 */ 235 asm volatile ("pushfl \n\t" 236 "pushfl \n\t" 237 "popl %0 \n\t" 238 "movl %0, %1 \n\t" 239 "xorl %2, %0 \n\t" 240 "pushl %0 \n\t" 241 "popfl \n\t" 242 "pushfl \n\t" 243 "popl %0 \n\t" 244 "popfl \n\t" 245 246 : "=&r" (f1), "=&r" (f2) 247 : "ir" (flag)); 248 249 return ((f1^f2) & flag) != 0; 250 } 251 252 /* Probe for the CPUID instruction */ 253 int have_cpuid_p(void) 254 { 255 return flag_is_changeable_p(X86_EFLAGS_ID); 256 } 257 258 static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 259 { 260 unsigned long lo, hi; 261 262 if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr) 263 return; 264 265 /* Disable processor serial number: */ 266 267 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 268 lo |= 0x200000; 269 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 270 271 pr_notice("CPU serial number disabled.\n"); 272 clear_cpu_cap(c, X86_FEATURE_PN); 273 274 /* Disabling the serial number may affect the cpuid level */ 275 c->cpuid_level = cpuid_eax(0); 276 } 277 278 static int __init x86_serial_nr_setup(char *s) 279 { 280 disable_x86_serial_nr = 0; 281 return 1; 282 } 283 __setup("serialnumber", x86_serial_nr_setup); 284 #else 285 static inline int flag_is_changeable_p(u32 flag) 286 { 287 return 1; 288 } 289 static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 290 { 291 } 292 #endif 293 294 static __init int setup_disable_smep(char *arg) 295 { 296 setup_clear_cpu_cap(X86_FEATURE_SMEP); 297 /* Check for things that depend on SMEP being enabled: */ 298 check_mpx_erratum(&boot_cpu_data); 299 return 1; 300 } 301 __setup("nosmep", setup_disable_smep); 302 303 static __always_inline void setup_smep(struct cpuinfo_x86 *c) 304 { 305 if (cpu_has(c, X86_FEATURE_SMEP)) 306 cr4_set_bits(X86_CR4_SMEP); 307 } 308 309 static __init int setup_disable_smap(char *arg) 310 { 311 setup_clear_cpu_cap(X86_FEATURE_SMAP); 312 return 1; 313 } 314 __setup("nosmap", setup_disable_smap); 315 316 static __always_inline void setup_smap(struct cpuinfo_x86 *c) 317 { 318 unsigned long eflags = native_save_fl(); 319 320 /* This should have been cleared long ago */ 321 BUG_ON(eflags & X86_EFLAGS_AC); 322 323 if (cpu_has(c, X86_FEATURE_SMAP)) { 324 #ifdef CONFIG_X86_SMAP 325 cr4_set_bits(X86_CR4_SMAP); 326 #else 327 cr4_clear_bits(X86_CR4_SMAP); 328 #endif 329 } 330 } 331 332 static void setup_pcid(struct cpuinfo_x86 *c) 333 { 334 if (cpu_has(c, X86_FEATURE_PCID)) { 335 if (cpu_has(c, X86_FEATURE_PGE)) { 336 /* 337 * We'd like to use cr4_set_bits_and_update_boot(), 338 * but we can't. CR4.PCIDE is special and can only 339 * be set in long mode, and the early CPU init code 340 * doesn't know this and would try to restore CR4.PCIDE 341 * prior to entering long mode. 342 * 343 * Instead, we rely on the fact that hotplug, resume, 344 * etc all fully restore CR4 before they write anything 345 * that could have nonzero PCID bits to CR3. CR4.PCIDE 346 * has no effect on the page tables themselves, so we 347 * don't need it to be restored early. 348 */ 349 cr4_set_bits(X86_CR4_PCIDE); 350 } else { 351 /* 352 * flush_tlb_all(), as currently implemented, won't 353 * work if PCID is on but PGE is not. Since that 354 * combination doesn't exist on real hardware, there's 355 * no reason to try to fully support it, but it's 356 * polite to avoid corrupting data if we're on 357 * an improperly configured VM. 358 */ 359 clear_cpu_cap(c, X86_FEATURE_PCID); 360 } 361 } 362 } 363 364 /* 365 * Protection Keys are not available in 32-bit mode. 366 */ 367 static bool pku_disabled; 368 369 static __always_inline void setup_pku(struct cpuinfo_x86 *c) 370 { 371 /* check the boot processor, plus compile options for PKU: */ 372 if (!cpu_feature_enabled(X86_FEATURE_PKU)) 373 return; 374 /* checks the actual processor's cpuid bits: */ 375 if (!cpu_has(c, X86_FEATURE_PKU)) 376 return; 377 if (pku_disabled) 378 return; 379 380 cr4_set_bits(X86_CR4_PKE); 381 /* 382 * Seting X86_CR4_PKE will cause the X86_FEATURE_OSPKE 383 * cpuid bit to be set. We need to ensure that we 384 * update that bit in this CPU's "cpu_info". 385 */ 386 get_cpu_cap(c); 387 } 388 389 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 390 static __init int setup_disable_pku(char *arg) 391 { 392 /* 393 * Do not clear the X86_FEATURE_PKU bit. All of the 394 * runtime checks are against OSPKE so clearing the 395 * bit does nothing. 396 * 397 * This way, we will see "pku" in cpuinfo, but not 398 * "ospke", which is exactly what we want. It shows 399 * that the CPU has PKU, but the OS has not enabled it. 400 * This happens to be exactly how a system would look 401 * if we disabled the config option. 402 */ 403 pr_info("x86: 'nopku' specified, disabling Memory Protection Keys\n"); 404 pku_disabled = true; 405 return 1; 406 } 407 __setup("nopku", setup_disable_pku); 408 #endif /* CONFIG_X86_64 */ 409 410 /* 411 * Some CPU features depend on higher CPUID levels, which may not always 412 * be available due to CPUID level capping or broken virtualization 413 * software. Add those features to this table to auto-disable them. 414 */ 415 struct cpuid_dependent_feature { 416 u32 feature; 417 u32 level; 418 }; 419 420 static const struct cpuid_dependent_feature 421 cpuid_dependent_features[] = { 422 { X86_FEATURE_MWAIT, 0x00000005 }, 423 { X86_FEATURE_DCA, 0x00000009 }, 424 { X86_FEATURE_XSAVE, 0x0000000d }, 425 { 0, 0 } 426 }; 427 428 static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) 429 { 430 const struct cpuid_dependent_feature *df; 431 432 for (df = cpuid_dependent_features; df->feature; df++) { 433 434 if (!cpu_has(c, df->feature)) 435 continue; 436 /* 437 * Note: cpuid_level is set to -1 if unavailable, but 438 * extended_extended_level is set to 0 if unavailable 439 * and the legitimate extended levels are all negative 440 * when signed; hence the weird messing around with 441 * signs here... 442 */ 443 if (!((s32)df->level < 0 ? 444 (u32)df->level > (u32)c->extended_cpuid_level : 445 (s32)df->level > (s32)c->cpuid_level)) 446 continue; 447 448 clear_cpu_cap(c, df->feature); 449 if (!warn) 450 continue; 451 452 pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n", 453 x86_cap_flag(df->feature), df->level); 454 } 455 } 456 457 /* 458 * Naming convention should be: <Name> [(<Codename>)] 459 * This table only is used unless init_<vendor>() below doesn't set it; 460 * in particular, if CPUID levels 0x80000002..4 are supported, this 461 * isn't used 462 */ 463 464 /* Look up CPU names by table lookup. */ 465 static const char *table_lookup_model(struct cpuinfo_x86 *c) 466 { 467 #ifdef CONFIG_X86_32 468 const struct legacy_cpu_model_info *info; 469 470 if (c->x86_model >= 16) 471 return NULL; /* Range check */ 472 473 if (!this_cpu) 474 return NULL; 475 476 info = this_cpu->legacy_models; 477 478 while (info->family) { 479 if (info->family == c->x86) 480 return info->model_names[c->x86_model]; 481 info++; 482 } 483 #endif 484 return NULL; /* Not found */ 485 } 486 487 __u32 cpu_caps_cleared[NCAPINTS]; 488 __u32 cpu_caps_set[NCAPINTS]; 489 490 void load_percpu_segment(int cpu) 491 { 492 #ifdef CONFIG_X86_32 493 loadsegment(fs, __KERNEL_PERCPU); 494 #else 495 __loadsegment_simple(gs, 0); 496 wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu)); 497 #endif 498 load_stack_canary_segment(); 499 } 500 501 /* Setup the fixmap mapping only once per-processor */ 502 static inline void setup_fixmap_gdt(int cpu) 503 { 504 #ifdef CONFIG_X86_64 505 /* On 64-bit systems, we use a read-only fixmap GDT. */ 506 pgprot_t prot = PAGE_KERNEL_RO; 507 #else 508 /* 509 * On native 32-bit systems, the GDT cannot be read-only because 510 * our double fault handler uses a task gate, and entering through 511 * a task gate needs to change an available TSS to busy. If the GDT 512 * is read-only, that will triple fault. 513 * 514 * On Xen PV, the GDT must be read-only because the hypervisor requires 515 * it. 516 */ 517 pgprot_t prot = boot_cpu_has(X86_FEATURE_XENPV) ? 518 PAGE_KERNEL_RO : PAGE_KERNEL; 519 #endif 520 521 __set_fixmap(get_cpu_gdt_ro_index(cpu), get_cpu_gdt_paddr(cpu), prot); 522 } 523 524 /* Load the original GDT from the per-cpu structure */ 525 void load_direct_gdt(int cpu) 526 { 527 struct desc_ptr gdt_descr; 528 529 gdt_descr.address = (long)get_cpu_gdt_rw(cpu); 530 gdt_descr.size = GDT_SIZE - 1; 531 load_gdt(&gdt_descr); 532 } 533 EXPORT_SYMBOL_GPL(load_direct_gdt); 534 535 /* Load a fixmap remapping of the per-cpu GDT */ 536 void load_fixmap_gdt(int cpu) 537 { 538 struct desc_ptr gdt_descr; 539 540 gdt_descr.address = (long)get_cpu_gdt_ro(cpu); 541 gdt_descr.size = GDT_SIZE - 1; 542 load_gdt(&gdt_descr); 543 } 544 EXPORT_SYMBOL_GPL(load_fixmap_gdt); 545 546 /* 547 * Current gdt points %fs at the "master" per-cpu area: after this, 548 * it's on the real one. 549 */ 550 void switch_to_new_gdt(int cpu) 551 { 552 /* Load the original GDT */ 553 load_direct_gdt(cpu); 554 /* Reload the per-cpu base */ 555 load_percpu_segment(cpu); 556 } 557 558 static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; 559 560 static void get_model_name(struct cpuinfo_x86 *c) 561 { 562 unsigned int *v; 563 char *p, *q, *s; 564 565 if (c->extended_cpuid_level < 0x80000004) 566 return; 567 568 v = (unsigned int *)c->x86_model_id; 569 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); 570 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); 571 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); 572 c->x86_model_id[48] = 0; 573 574 /* Trim whitespace */ 575 p = q = s = &c->x86_model_id[0]; 576 577 while (*p == ' ') 578 p++; 579 580 while (*p) { 581 /* Note the last non-whitespace index */ 582 if (!isspace(*p)) 583 s = q; 584 585 *q++ = *p++; 586 } 587 588 *(s + 1) = '\0'; 589 } 590 591 void cpu_detect_cache_sizes(struct cpuinfo_x86 *c) 592 { 593 unsigned int n, dummy, ebx, ecx, edx, l2size; 594 595 n = c->extended_cpuid_level; 596 597 if (n >= 0x80000005) { 598 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); 599 c->x86_cache_size = (ecx>>24) + (edx>>24); 600 #ifdef CONFIG_X86_64 601 /* On K8 L1 TLB is inclusive, so don't count it */ 602 c->x86_tlbsize = 0; 603 #endif 604 } 605 606 if (n < 0x80000006) /* Some chips just has a large L1. */ 607 return; 608 609 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); 610 l2size = ecx >> 16; 611 612 #ifdef CONFIG_X86_64 613 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); 614 #else 615 /* do processor-specific cache resizing */ 616 if (this_cpu->legacy_cache_size) 617 l2size = this_cpu->legacy_cache_size(c, l2size); 618 619 /* Allow user to override all this if necessary. */ 620 if (cachesize_override != -1) 621 l2size = cachesize_override; 622 623 if (l2size == 0) 624 return; /* Again, no L2 cache is possible */ 625 #endif 626 627 c->x86_cache_size = l2size; 628 } 629 630 u16 __read_mostly tlb_lli_4k[NR_INFO]; 631 u16 __read_mostly tlb_lli_2m[NR_INFO]; 632 u16 __read_mostly tlb_lli_4m[NR_INFO]; 633 u16 __read_mostly tlb_lld_4k[NR_INFO]; 634 u16 __read_mostly tlb_lld_2m[NR_INFO]; 635 u16 __read_mostly tlb_lld_4m[NR_INFO]; 636 u16 __read_mostly tlb_lld_1g[NR_INFO]; 637 638 static void cpu_detect_tlb(struct cpuinfo_x86 *c) 639 { 640 if (this_cpu->c_detect_tlb) 641 this_cpu->c_detect_tlb(c); 642 643 pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n", 644 tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES], 645 tlb_lli_4m[ENTRIES]); 646 647 pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n", 648 tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES], 649 tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]); 650 } 651 652 void detect_ht(struct cpuinfo_x86 *c) 653 { 654 #ifdef CONFIG_SMP 655 u32 eax, ebx, ecx, edx; 656 int index_msb, core_bits; 657 static bool printed; 658 659 if (!cpu_has(c, X86_FEATURE_HT)) 660 return; 661 662 if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) 663 goto out; 664 665 if (cpu_has(c, X86_FEATURE_XTOPOLOGY)) 666 return; 667 668 cpuid(1, &eax, &ebx, &ecx, &edx); 669 670 smp_num_siblings = (ebx & 0xff0000) >> 16; 671 672 if (smp_num_siblings == 1) { 673 pr_info_once("CPU0: Hyper-Threading is disabled\n"); 674 goto out; 675 } 676 677 if (smp_num_siblings <= 1) 678 goto out; 679 680 index_msb = get_count_order(smp_num_siblings); 681 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); 682 683 smp_num_siblings = smp_num_siblings / c->x86_max_cores; 684 685 index_msb = get_count_order(smp_num_siblings); 686 687 core_bits = get_count_order(c->x86_max_cores); 688 689 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & 690 ((1 << core_bits) - 1); 691 692 out: 693 if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) { 694 pr_info("CPU: Physical Processor ID: %d\n", 695 c->phys_proc_id); 696 pr_info("CPU: Processor Core ID: %d\n", 697 c->cpu_core_id); 698 printed = 1; 699 } 700 #endif 701 } 702 703 static void get_cpu_vendor(struct cpuinfo_x86 *c) 704 { 705 char *v = c->x86_vendor_id; 706 int i; 707 708 for (i = 0; i < X86_VENDOR_NUM; i++) { 709 if (!cpu_devs[i]) 710 break; 711 712 if (!strcmp(v, cpu_devs[i]->c_ident[0]) || 713 (cpu_devs[i]->c_ident[1] && 714 !strcmp(v, cpu_devs[i]->c_ident[1]))) { 715 716 this_cpu = cpu_devs[i]; 717 c->x86_vendor = this_cpu->c_x86_vendor; 718 return; 719 } 720 } 721 722 pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \ 723 "CPU: Your system may be unstable.\n", v); 724 725 c->x86_vendor = X86_VENDOR_UNKNOWN; 726 this_cpu = &default_cpu; 727 } 728 729 void cpu_detect(struct cpuinfo_x86 *c) 730 { 731 /* Get vendor name */ 732 cpuid(0x00000000, (unsigned int *)&c->cpuid_level, 733 (unsigned int *)&c->x86_vendor_id[0], 734 (unsigned int *)&c->x86_vendor_id[8], 735 (unsigned int *)&c->x86_vendor_id[4]); 736 737 c->x86 = 4; 738 /* Intel-defined flags: level 0x00000001 */ 739 if (c->cpuid_level >= 0x00000001) { 740 u32 junk, tfms, cap0, misc; 741 742 cpuid(0x00000001, &tfms, &misc, &junk, &cap0); 743 c->x86 = x86_family(tfms); 744 c->x86_model = x86_model(tfms); 745 c->x86_mask = x86_stepping(tfms); 746 747 if (cap0 & (1<<19)) { 748 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; 749 c->x86_cache_alignment = c->x86_clflush_size; 750 } 751 } 752 } 753 754 static void apply_forced_caps(struct cpuinfo_x86 *c) 755 { 756 int i; 757 758 for (i = 0; i < NCAPINTS; i++) { 759 c->x86_capability[i] &= ~cpu_caps_cleared[i]; 760 c->x86_capability[i] |= cpu_caps_set[i]; 761 } 762 } 763 764 void get_cpu_cap(struct cpuinfo_x86 *c) 765 { 766 u32 eax, ebx, ecx, edx; 767 768 /* Intel-defined flags: level 0x00000001 */ 769 if (c->cpuid_level >= 0x00000001) { 770 cpuid(0x00000001, &eax, &ebx, &ecx, &edx); 771 772 c->x86_capability[CPUID_1_ECX] = ecx; 773 c->x86_capability[CPUID_1_EDX] = edx; 774 } 775 776 /* Thermal and Power Management Leaf: level 0x00000006 (eax) */ 777 if (c->cpuid_level >= 0x00000006) 778 c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006); 779 780 /* Additional Intel-defined flags: level 0x00000007 */ 781 if (c->cpuid_level >= 0x00000007) { 782 cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx); 783 c->x86_capability[CPUID_7_0_EBX] = ebx; 784 c->x86_capability[CPUID_7_ECX] = ecx; 785 } 786 787 /* Extended state features: level 0x0000000d */ 788 if (c->cpuid_level >= 0x0000000d) { 789 cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx); 790 791 c->x86_capability[CPUID_D_1_EAX] = eax; 792 } 793 794 /* Additional Intel-defined flags: level 0x0000000F */ 795 if (c->cpuid_level >= 0x0000000F) { 796 797 /* QoS sub-leaf, EAX=0Fh, ECX=0 */ 798 cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx); 799 c->x86_capability[CPUID_F_0_EDX] = edx; 800 801 if (cpu_has(c, X86_FEATURE_CQM_LLC)) { 802 /* will be overridden if occupancy monitoring exists */ 803 c->x86_cache_max_rmid = ebx; 804 805 /* QoS sub-leaf, EAX=0Fh, ECX=1 */ 806 cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx); 807 c->x86_capability[CPUID_F_1_EDX] = edx; 808 809 if ((cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) || 810 ((cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL)) || 811 (cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)))) { 812 c->x86_cache_max_rmid = ecx; 813 c->x86_cache_occ_scale = ebx; 814 } 815 } else { 816 c->x86_cache_max_rmid = -1; 817 c->x86_cache_occ_scale = -1; 818 } 819 } 820 821 /* AMD-defined flags: level 0x80000001 */ 822 eax = cpuid_eax(0x80000000); 823 c->extended_cpuid_level = eax; 824 825 if ((eax & 0xffff0000) == 0x80000000) { 826 if (eax >= 0x80000001) { 827 cpuid(0x80000001, &eax, &ebx, &ecx, &edx); 828 829 c->x86_capability[CPUID_8000_0001_ECX] = ecx; 830 c->x86_capability[CPUID_8000_0001_EDX] = edx; 831 } 832 } 833 834 if (c->extended_cpuid_level >= 0x80000007) { 835 cpuid(0x80000007, &eax, &ebx, &ecx, &edx); 836 837 c->x86_capability[CPUID_8000_0007_EBX] = ebx; 838 c->x86_power = edx; 839 } 840 841 if (c->extended_cpuid_level >= 0x80000008) { 842 cpuid(0x80000008, &eax, &ebx, &ecx, &edx); 843 844 c->x86_virt_bits = (eax >> 8) & 0xff; 845 c->x86_phys_bits = eax & 0xff; 846 c->x86_capability[CPUID_8000_0008_EBX] = ebx; 847 } 848 #ifdef CONFIG_X86_32 849 else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) 850 c->x86_phys_bits = 36; 851 #endif 852 853 if (c->extended_cpuid_level >= 0x8000000a) 854 c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a); 855 856 init_scattered_cpuid_features(c); 857 858 /* 859 * Clear/Set all flags overridden by options, after probe. 860 * This needs to happen each time we re-probe, which may happen 861 * several times during CPU initialization. 862 */ 863 apply_forced_caps(c); 864 } 865 866 static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) 867 { 868 #ifdef CONFIG_X86_32 869 int i; 870 871 /* 872 * First of all, decide if this is a 486 or higher 873 * It's a 486 if we can modify the AC flag 874 */ 875 if (flag_is_changeable_p(X86_EFLAGS_AC)) 876 c->x86 = 4; 877 else 878 c->x86 = 3; 879 880 for (i = 0; i < X86_VENDOR_NUM; i++) 881 if (cpu_devs[i] && cpu_devs[i]->c_identify) { 882 c->x86_vendor_id[0] = 0; 883 cpu_devs[i]->c_identify(c); 884 if (c->x86_vendor_id[0]) { 885 get_cpu_vendor(c); 886 break; 887 } 888 } 889 #endif 890 } 891 892 /* 893 * Do minimum CPU detection early. 894 * Fields really needed: vendor, cpuid_level, family, model, mask, 895 * cache alignment. 896 * The others are not touched to avoid unwanted side effects. 897 * 898 * WARNING: this function is only called on the BP. Don't add code here 899 * that is supposed to run on all CPUs. 900 */ 901 static void __init early_identify_cpu(struct cpuinfo_x86 *c) 902 { 903 #ifdef CONFIG_X86_64 904 c->x86_clflush_size = 64; 905 c->x86_phys_bits = 36; 906 c->x86_virt_bits = 48; 907 #else 908 c->x86_clflush_size = 32; 909 c->x86_phys_bits = 32; 910 c->x86_virt_bits = 32; 911 #endif 912 c->x86_cache_alignment = c->x86_clflush_size; 913 914 memset(&c->x86_capability, 0, sizeof c->x86_capability); 915 c->extended_cpuid_level = 0; 916 917 /* cyrix could have cpuid enabled via c_identify()*/ 918 if (have_cpuid_p()) { 919 cpu_detect(c); 920 get_cpu_vendor(c); 921 get_cpu_cap(c); 922 setup_force_cpu_cap(X86_FEATURE_CPUID); 923 924 if (this_cpu->c_early_init) 925 this_cpu->c_early_init(c); 926 927 c->cpu_index = 0; 928 filter_cpuid_features(c, false); 929 930 if (this_cpu->c_bsp_init) 931 this_cpu->c_bsp_init(c); 932 } else { 933 identify_cpu_without_cpuid(c); 934 setup_clear_cpu_cap(X86_FEATURE_CPUID); 935 } 936 937 setup_force_cpu_cap(X86_FEATURE_ALWAYS); 938 fpu__init_system(c); 939 } 940 941 void __init early_cpu_init(void) 942 { 943 const struct cpu_dev *const *cdev; 944 int count = 0; 945 946 #ifdef CONFIG_PROCESSOR_SELECT 947 pr_info("KERNEL supported cpus:\n"); 948 #endif 949 950 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { 951 const struct cpu_dev *cpudev = *cdev; 952 953 if (count >= X86_VENDOR_NUM) 954 break; 955 cpu_devs[count] = cpudev; 956 count++; 957 958 #ifdef CONFIG_PROCESSOR_SELECT 959 { 960 unsigned int j; 961 962 for (j = 0; j < 2; j++) { 963 if (!cpudev->c_ident[j]) 964 continue; 965 pr_info(" %s %s\n", cpudev->c_vendor, 966 cpudev->c_ident[j]); 967 } 968 } 969 #endif 970 } 971 early_identify_cpu(&boot_cpu_data); 972 } 973 974 /* 975 * The NOPL instruction is supposed to exist on all CPUs of family >= 6; 976 * unfortunately, that's not true in practice because of early VIA 977 * chips and (more importantly) broken virtualizers that are not easy 978 * to detect. In the latter case it doesn't even *fail* reliably, so 979 * probing for it doesn't even work. Disable it completely on 32-bit 980 * unless we can find a reliable way to detect all the broken cases. 981 * Enable it explicitly on 64-bit for non-constant inputs of cpu_has(). 982 */ 983 static void detect_nopl(struct cpuinfo_x86 *c) 984 { 985 #ifdef CONFIG_X86_32 986 clear_cpu_cap(c, X86_FEATURE_NOPL); 987 #else 988 set_cpu_cap(c, X86_FEATURE_NOPL); 989 #endif 990 } 991 992 static void detect_null_seg_behavior(struct cpuinfo_x86 *c) 993 { 994 #ifdef CONFIG_X86_64 995 /* 996 * Empirically, writing zero to a segment selector on AMD does 997 * not clear the base, whereas writing zero to a segment 998 * selector on Intel does clear the base. Intel's behavior 999 * allows slightly faster context switches in the common case 1000 * where GS is unused by the prev and next threads. 1001 * 1002 * Since neither vendor documents this anywhere that I can see, 1003 * detect it directly instead of hardcoding the choice by 1004 * vendor. 1005 * 1006 * I've designated AMD's behavior as the "bug" because it's 1007 * counterintuitive and less friendly. 1008 */ 1009 1010 unsigned long old_base, tmp; 1011 rdmsrl(MSR_FS_BASE, old_base); 1012 wrmsrl(MSR_FS_BASE, 1); 1013 loadsegment(fs, 0); 1014 rdmsrl(MSR_FS_BASE, tmp); 1015 if (tmp != 0) 1016 set_cpu_bug(c, X86_BUG_NULL_SEG); 1017 wrmsrl(MSR_FS_BASE, old_base); 1018 #endif 1019 } 1020 1021 static void generic_identify(struct cpuinfo_x86 *c) 1022 { 1023 c->extended_cpuid_level = 0; 1024 1025 if (!have_cpuid_p()) 1026 identify_cpu_without_cpuid(c); 1027 1028 /* cyrix could have cpuid enabled via c_identify()*/ 1029 if (!have_cpuid_p()) 1030 return; 1031 1032 cpu_detect(c); 1033 1034 get_cpu_vendor(c); 1035 1036 get_cpu_cap(c); 1037 1038 if (c->cpuid_level >= 0x00000001) { 1039 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; 1040 #ifdef CONFIG_X86_32 1041 # ifdef CONFIG_SMP 1042 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); 1043 # else 1044 c->apicid = c->initial_apicid; 1045 # endif 1046 #endif 1047 c->phys_proc_id = c->initial_apicid; 1048 } 1049 1050 get_model_name(c); /* Default name */ 1051 1052 detect_nopl(c); 1053 1054 detect_null_seg_behavior(c); 1055 1056 /* 1057 * ESPFIX is a strange bug. All real CPUs have it. Paravirt 1058 * systems that run Linux at CPL > 0 may or may not have the 1059 * issue, but, even if they have the issue, there's absolutely 1060 * nothing we can do about it because we can't use the real IRET 1061 * instruction. 1062 * 1063 * NB: For the time being, only 32-bit kernels support 1064 * X86_BUG_ESPFIX as such. 64-bit kernels directly choose 1065 * whether to apply espfix using paravirt hooks. If any 1066 * non-paravirt system ever shows up that does *not* have the 1067 * ESPFIX issue, we can change this. 1068 */ 1069 #ifdef CONFIG_X86_32 1070 # ifdef CONFIG_PARAVIRT 1071 do { 1072 extern void native_iret(void); 1073 if (pv_cpu_ops.iret == native_iret) 1074 set_cpu_bug(c, X86_BUG_ESPFIX); 1075 } while (0); 1076 # else 1077 set_cpu_bug(c, X86_BUG_ESPFIX); 1078 # endif 1079 #endif 1080 } 1081 1082 static void x86_init_cache_qos(struct cpuinfo_x86 *c) 1083 { 1084 /* 1085 * The heavy lifting of max_rmid and cache_occ_scale are handled 1086 * in get_cpu_cap(). Here we just set the max_rmid for the boot_cpu 1087 * in case CQM bits really aren't there in this CPU. 1088 */ 1089 if (c != &boot_cpu_data) { 1090 boot_cpu_data.x86_cache_max_rmid = 1091 min(boot_cpu_data.x86_cache_max_rmid, 1092 c->x86_cache_max_rmid); 1093 } 1094 } 1095 1096 /* 1097 * Validate that ACPI/mptables have the same information about the 1098 * effective APIC id and update the package map. 1099 */ 1100 static void validate_apic_and_package_id(struct cpuinfo_x86 *c) 1101 { 1102 #ifdef CONFIG_SMP 1103 unsigned int apicid, cpu = smp_processor_id(); 1104 1105 apicid = apic->cpu_present_to_apicid(cpu); 1106 1107 if (apicid != c->apicid) { 1108 pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x APIC: %x\n", 1109 cpu, apicid, c->initial_apicid); 1110 } 1111 BUG_ON(topology_update_package_map(c->phys_proc_id, cpu)); 1112 #else 1113 c->logical_proc_id = 0; 1114 #endif 1115 } 1116 1117 /* 1118 * This does the hard work of actually picking apart the CPU stuff... 1119 */ 1120 static void identify_cpu(struct cpuinfo_x86 *c) 1121 { 1122 int i; 1123 1124 c->loops_per_jiffy = loops_per_jiffy; 1125 c->x86_cache_size = -1; 1126 c->x86_vendor = X86_VENDOR_UNKNOWN; 1127 c->x86_model = c->x86_mask = 0; /* So far unknown... */ 1128 c->x86_vendor_id[0] = '\0'; /* Unset */ 1129 c->x86_model_id[0] = '\0'; /* Unset */ 1130 c->x86_max_cores = 1; 1131 c->x86_coreid_bits = 0; 1132 c->cu_id = 0xff; 1133 #ifdef CONFIG_X86_64 1134 c->x86_clflush_size = 64; 1135 c->x86_phys_bits = 36; 1136 c->x86_virt_bits = 48; 1137 #else 1138 c->cpuid_level = -1; /* CPUID not detected */ 1139 c->x86_clflush_size = 32; 1140 c->x86_phys_bits = 32; 1141 c->x86_virt_bits = 32; 1142 #endif 1143 c->x86_cache_alignment = c->x86_clflush_size; 1144 memset(&c->x86_capability, 0, sizeof c->x86_capability); 1145 1146 generic_identify(c); 1147 1148 if (this_cpu->c_identify) 1149 this_cpu->c_identify(c); 1150 1151 /* Clear/Set all flags overridden by options, after probe */ 1152 apply_forced_caps(c); 1153 1154 #ifdef CONFIG_X86_64 1155 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); 1156 #endif 1157 1158 /* 1159 * Vendor-specific initialization. In this section we 1160 * canonicalize the feature flags, meaning if there are 1161 * features a certain CPU supports which CPUID doesn't 1162 * tell us, CPUID claiming incorrect flags, or other bugs, 1163 * we handle them here. 1164 * 1165 * At the end of this section, c->x86_capability better 1166 * indicate the features this CPU genuinely supports! 1167 */ 1168 if (this_cpu->c_init) 1169 this_cpu->c_init(c); 1170 1171 /* Disable the PN if appropriate */ 1172 squash_the_stupid_serial_number(c); 1173 1174 /* Set up SMEP/SMAP */ 1175 setup_smep(c); 1176 setup_smap(c); 1177 1178 /* Set up PCID */ 1179 setup_pcid(c); 1180 1181 /* 1182 * The vendor-specific functions might have changed features. 1183 * Now we do "generic changes." 1184 */ 1185 1186 /* Filter out anything that depends on CPUID levels we don't have */ 1187 filter_cpuid_features(c, true); 1188 1189 /* If the model name is still unset, do table lookup. */ 1190 if (!c->x86_model_id[0]) { 1191 const char *p; 1192 p = table_lookup_model(c); 1193 if (p) 1194 strcpy(c->x86_model_id, p); 1195 else 1196 /* Last resort... */ 1197 sprintf(c->x86_model_id, "%02x/%02x", 1198 c->x86, c->x86_model); 1199 } 1200 1201 #ifdef CONFIG_X86_64 1202 detect_ht(c); 1203 #endif 1204 1205 x86_init_rdrand(c); 1206 x86_init_cache_qos(c); 1207 setup_pku(c); 1208 1209 /* 1210 * Clear/Set all flags overridden by options, need do it 1211 * before following smp all cpus cap AND. 1212 */ 1213 apply_forced_caps(c); 1214 1215 /* 1216 * On SMP, boot_cpu_data holds the common feature set between 1217 * all CPUs; so make sure that we indicate which features are 1218 * common between the CPUs. The first time this routine gets 1219 * executed, c == &boot_cpu_data. 1220 */ 1221 if (c != &boot_cpu_data) { 1222 /* AND the already accumulated flags with these */ 1223 for (i = 0; i < NCAPINTS; i++) 1224 boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; 1225 1226 /* OR, i.e. replicate the bug flags */ 1227 for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++) 1228 c->x86_capability[i] |= boot_cpu_data.x86_capability[i]; 1229 } 1230 1231 /* Init Machine Check Exception if available. */ 1232 mcheck_cpu_init(c); 1233 1234 select_idle_routine(c); 1235 1236 #ifdef CONFIG_NUMA 1237 numa_add_cpu(smp_processor_id()); 1238 #endif 1239 } 1240 1241 /* 1242 * Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions 1243 * on 32-bit kernels: 1244 */ 1245 #ifdef CONFIG_X86_32 1246 void enable_sep_cpu(void) 1247 { 1248 struct tss_struct *tss; 1249 int cpu; 1250 1251 if (!boot_cpu_has(X86_FEATURE_SEP)) 1252 return; 1253 1254 cpu = get_cpu(); 1255 tss = &per_cpu(cpu_tss, cpu); 1256 1257 /* 1258 * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field -- 1259 * see the big comment in struct x86_hw_tss's definition. 1260 */ 1261 1262 tss->x86_tss.ss1 = __KERNEL_CS; 1263 wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0); 1264 1265 wrmsr(MSR_IA32_SYSENTER_ESP, 1266 (unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack), 1267 0); 1268 1269 wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0); 1270 1271 put_cpu(); 1272 } 1273 #endif 1274 1275 void __init identify_boot_cpu(void) 1276 { 1277 identify_cpu(&boot_cpu_data); 1278 #ifdef CONFIG_X86_32 1279 sysenter_setup(); 1280 enable_sep_cpu(); 1281 #endif 1282 cpu_detect_tlb(&boot_cpu_data); 1283 } 1284 1285 void identify_secondary_cpu(struct cpuinfo_x86 *c) 1286 { 1287 BUG_ON(c == &boot_cpu_data); 1288 identify_cpu(c); 1289 #ifdef CONFIG_X86_32 1290 enable_sep_cpu(); 1291 #endif 1292 mtrr_ap_init(); 1293 validate_apic_and_package_id(c); 1294 } 1295 1296 static __init int setup_noclflush(char *arg) 1297 { 1298 setup_clear_cpu_cap(X86_FEATURE_CLFLUSH); 1299 setup_clear_cpu_cap(X86_FEATURE_CLFLUSHOPT); 1300 return 1; 1301 } 1302 __setup("noclflush", setup_noclflush); 1303 1304 void print_cpu_info(struct cpuinfo_x86 *c) 1305 { 1306 const char *vendor = NULL; 1307 1308 if (c->x86_vendor < X86_VENDOR_NUM) { 1309 vendor = this_cpu->c_vendor; 1310 } else { 1311 if (c->cpuid_level >= 0) 1312 vendor = c->x86_vendor_id; 1313 } 1314 1315 if (vendor && !strstr(c->x86_model_id, vendor)) 1316 pr_cont("%s ", vendor); 1317 1318 if (c->x86_model_id[0]) 1319 pr_cont("%s", c->x86_model_id); 1320 else 1321 pr_cont("%d86", c->x86); 1322 1323 pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model); 1324 1325 if (c->x86_mask || c->cpuid_level >= 0) 1326 pr_cont(", stepping: 0x%x)\n", c->x86_mask); 1327 else 1328 pr_cont(")\n"); 1329 } 1330 1331 static __init int setup_disablecpuid(char *arg) 1332 { 1333 int bit; 1334 1335 if (get_option(&arg, &bit) && bit >= 0 && bit < NCAPINTS * 32) 1336 setup_clear_cpu_cap(bit); 1337 else 1338 return 0; 1339 1340 return 1; 1341 } 1342 __setup("clearcpuid=", setup_disablecpuid); 1343 1344 #ifdef CONFIG_X86_64 1345 DEFINE_PER_CPU_FIRST(union irq_stack_union, 1346 irq_stack_union) __aligned(PAGE_SIZE) __visible; 1347 1348 /* 1349 * The following percpu variables are hot. Align current_task to 1350 * cacheline size such that they fall in the same cacheline. 1351 */ 1352 DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = 1353 &init_task; 1354 EXPORT_PER_CPU_SYMBOL(current_task); 1355 1356 DEFINE_PER_CPU(char *, irq_stack_ptr) = 1357 init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE; 1358 1359 DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1; 1360 1361 DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; 1362 EXPORT_PER_CPU_SYMBOL(__preempt_count); 1363 1364 /* 1365 * Special IST stacks which the CPU switches to when it calls 1366 * an IST-marked descriptor entry. Up to 7 stacks (hardware 1367 * limit), all of them are 4K, except the debug stack which 1368 * is 8K. 1369 */ 1370 static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { 1371 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, 1372 [DEBUG_STACK - 1] = DEBUG_STKSZ 1373 }; 1374 1375 static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks 1376 [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); 1377 1378 /* May not be marked __init: used by software suspend */ 1379 void syscall_init(void) 1380 { 1381 wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS); 1382 wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64); 1383 1384 #ifdef CONFIG_IA32_EMULATION 1385 wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat); 1386 /* 1387 * This only works on Intel CPUs. 1388 * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP. 1389 * This does not cause SYSENTER to jump to the wrong location, because 1390 * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit). 1391 */ 1392 wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); 1393 wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); 1394 wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat); 1395 #else 1396 wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret); 1397 wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG); 1398 wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); 1399 wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL); 1400 #endif 1401 1402 /* Flags to clear on syscall */ 1403 wrmsrl(MSR_SYSCALL_MASK, 1404 X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF| 1405 X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT); 1406 } 1407 1408 /* 1409 * Copies of the original ist values from the tss are only accessed during 1410 * debugging, no special alignment required. 1411 */ 1412 DEFINE_PER_CPU(struct orig_ist, orig_ist); 1413 1414 static DEFINE_PER_CPU(unsigned long, debug_stack_addr); 1415 DEFINE_PER_CPU(int, debug_stack_usage); 1416 1417 int is_debug_stack(unsigned long addr) 1418 { 1419 return __this_cpu_read(debug_stack_usage) || 1420 (addr <= __this_cpu_read(debug_stack_addr) && 1421 addr > (__this_cpu_read(debug_stack_addr) - DEBUG_STKSZ)); 1422 } 1423 NOKPROBE_SYMBOL(is_debug_stack); 1424 1425 DEFINE_PER_CPU(u32, debug_idt_ctr); 1426 1427 void debug_stack_set_zero(void) 1428 { 1429 this_cpu_inc(debug_idt_ctr); 1430 load_current_idt(); 1431 } 1432 NOKPROBE_SYMBOL(debug_stack_set_zero); 1433 1434 void debug_stack_reset(void) 1435 { 1436 if (WARN_ON(!this_cpu_read(debug_idt_ctr))) 1437 return; 1438 if (this_cpu_dec_return(debug_idt_ctr) == 0) 1439 load_current_idt(); 1440 } 1441 NOKPROBE_SYMBOL(debug_stack_reset); 1442 1443 #else /* CONFIG_X86_64 */ 1444 1445 DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; 1446 EXPORT_PER_CPU_SYMBOL(current_task); 1447 DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; 1448 EXPORT_PER_CPU_SYMBOL(__preempt_count); 1449 1450 /* 1451 * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find 1452 * the top of the kernel stack. Use an extra percpu variable to track the 1453 * top of the kernel stack directly. 1454 */ 1455 DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) = 1456 (unsigned long)&init_thread_union + THREAD_SIZE; 1457 EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack); 1458 1459 #ifdef CONFIG_CC_STACKPROTECTOR 1460 DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); 1461 #endif 1462 1463 #endif /* CONFIG_X86_64 */ 1464 1465 /* 1466 * Clear all 6 debug registers: 1467 */ 1468 static void clear_all_debug_regs(void) 1469 { 1470 int i; 1471 1472 for (i = 0; i < 8; i++) { 1473 /* Ignore db4, db5 */ 1474 if ((i == 4) || (i == 5)) 1475 continue; 1476 1477 set_debugreg(0, i); 1478 } 1479 } 1480 1481 #ifdef CONFIG_KGDB 1482 /* 1483 * Restore debug regs if using kgdbwait and you have a kernel debugger 1484 * connection established. 1485 */ 1486 static void dbg_restore_debug_regs(void) 1487 { 1488 if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break)) 1489 arch_kgdb_ops.correct_hw_break(); 1490 } 1491 #else /* ! CONFIG_KGDB */ 1492 #define dbg_restore_debug_regs() 1493 #endif /* ! CONFIG_KGDB */ 1494 1495 static void wait_for_master_cpu(int cpu) 1496 { 1497 #ifdef CONFIG_SMP 1498 /* 1499 * wait for ACK from master CPU before continuing 1500 * with AP initialization 1501 */ 1502 WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)); 1503 while (!cpumask_test_cpu(cpu, cpu_callout_mask)) 1504 cpu_relax(); 1505 #endif 1506 } 1507 1508 /* 1509 * cpu_init() initializes state that is per-CPU. Some data is already 1510 * initialized (naturally) in the bootstrap process, such as the GDT 1511 * and IDT. We reload them nevertheless, this function acts as a 1512 * 'CPU state barrier', nothing should get across. 1513 * A lot of state is already set up in PDA init for 64 bit 1514 */ 1515 #ifdef CONFIG_X86_64 1516 1517 void cpu_init(void) 1518 { 1519 struct orig_ist *oist; 1520 struct task_struct *me; 1521 struct tss_struct *t; 1522 unsigned long v; 1523 int cpu = raw_smp_processor_id(); 1524 int i; 1525 1526 wait_for_master_cpu(cpu); 1527 1528 /* 1529 * Initialize the CR4 shadow before doing anything that could 1530 * try to read it. 1531 */ 1532 cr4_init_shadow(); 1533 1534 if (cpu) 1535 load_ucode_ap(); 1536 1537 t = &per_cpu(cpu_tss, cpu); 1538 oist = &per_cpu(orig_ist, cpu); 1539 1540 #ifdef CONFIG_NUMA 1541 if (this_cpu_read(numa_node) == 0 && 1542 early_cpu_to_node(cpu) != NUMA_NO_NODE) 1543 set_numa_node(early_cpu_to_node(cpu)); 1544 #endif 1545 1546 me = current; 1547 1548 pr_debug("Initializing CPU#%d\n", cpu); 1549 1550 cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 1551 1552 /* 1553 * Initialize the per-CPU GDT with the boot GDT, 1554 * and set up the GDT descriptor: 1555 */ 1556 1557 switch_to_new_gdt(cpu); 1558 loadsegment(fs, 0); 1559 1560 load_current_idt(); 1561 1562 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); 1563 syscall_init(); 1564 1565 wrmsrl(MSR_FS_BASE, 0); 1566 wrmsrl(MSR_KERNEL_GS_BASE, 0); 1567 barrier(); 1568 1569 x86_configure_nx(); 1570 x2apic_setup(); 1571 1572 /* 1573 * set up and load the per-CPU TSS 1574 */ 1575 if (!oist->ist[0]) { 1576 char *estacks = per_cpu(exception_stacks, cpu); 1577 1578 for (v = 0; v < N_EXCEPTION_STACKS; v++) { 1579 estacks += exception_stack_sizes[v]; 1580 oist->ist[v] = t->x86_tss.ist[v] = 1581 (unsigned long)estacks; 1582 if (v == DEBUG_STACK-1) 1583 per_cpu(debug_stack_addr, cpu) = (unsigned long)estacks; 1584 } 1585 } 1586 1587 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); 1588 1589 /* 1590 * <= is required because the CPU will access up to 1591 * 8 bits beyond the end of the IO permission bitmap. 1592 */ 1593 for (i = 0; i <= IO_BITMAP_LONGS; i++) 1594 t->io_bitmap[i] = ~0UL; 1595 1596 mmgrab(&init_mm); 1597 me->active_mm = &init_mm; 1598 BUG_ON(me->mm); 1599 initialize_tlbstate_and_flush(); 1600 enter_lazy_tlb(&init_mm, me); 1601 1602 load_sp0(t, ¤t->thread); 1603 set_tss_desc(cpu, t); 1604 load_TR_desc(); 1605 load_mm_ldt(&init_mm); 1606 1607 clear_all_debug_regs(); 1608 dbg_restore_debug_regs(); 1609 1610 fpu__init_cpu(); 1611 1612 if (is_uv_system()) 1613 uv_cpu_init(); 1614 1615 setup_fixmap_gdt(cpu); 1616 load_fixmap_gdt(cpu); 1617 } 1618 1619 #else 1620 1621 void cpu_init(void) 1622 { 1623 int cpu = smp_processor_id(); 1624 struct task_struct *curr = current; 1625 struct tss_struct *t = &per_cpu(cpu_tss, cpu); 1626 struct thread_struct *thread = &curr->thread; 1627 1628 wait_for_master_cpu(cpu); 1629 1630 /* 1631 * Initialize the CR4 shadow before doing anything that could 1632 * try to read it. 1633 */ 1634 cr4_init_shadow(); 1635 1636 show_ucode_info_early(); 1637 1638 pr_info("Initializing CPU#%d\n", cpu); 1639 1640 if (cpu_feature_enabled(X86_FEATURE_VME) || 1641 boot_cpu_has(X86_FEATURE_TSC) || 1642 boot_cpu_has(X86_FEATURE_DE)) 1643 cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 1644 1645 load_current_idt(); 1646 switch_to_new_gdt(cpu); 1647 1648 /* 1649 * Set up and load the per-CPU TSS and LDT 1650 */ 1651 mmgrab(&init_mm); 1652 curr->active_mm = &init_mm; 1653 BUG_ON(curr->mm); 1654 initialize_tlbstate_and_flush(); 1655 enter_lazy_tlb(&init_mm, curr); 1656 1657 load_sp0(t, thread); 1658 set_tss_desc(cpu, t); 1659 load_TR_desc(); 1660 load_mm_ldt(&init_mm); 1661 1662 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); 1663 1664 #ifdef CONFIG_DOUBLEFAULT 1665 /* Set up doublefault TSS pointer in the GDT */ 1666 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); 1667 #endif 1668 1669 clear_all_debug_regs(); 1670 dbg_restore_debug_regs(); 1671 1672 fpu__init_cpu(); 1673 1674 setup_fixmap_gdt(cpu); 1675 load_fixmap_gdt(cpu); 1676 } 1677 #endif 1678 1679 static void bsp_resume(void) 1680 { 1681 if (this_cpu->c_bsp_resume) 1682 this_cpu->c_bsp_resume(&boot_cpu_data); 1683 } 1684 1685 static struct syscore_ops cpu_syscore_ops = { 1686 .resume = bsp_resume, 1687 }; 1688 1689 static int __init init_cpu_syscore(void) 1690 { 1691 register_syscore_ops(&cpu_syscore_ops); 1692 return 0; 1693 } 1694 core_initcall(init_cpu_syscore); 1695