1 #include <linux/bootmem.h> 2 #include <linux/linkage.h> 3 #include <linux/bitops.h> 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/percpu.h> 7 #include <linux/string.h> 8 #include <linux/delay.h> 9 #include <linux/sched.h> 10 #include <linux/init.h> 11 #include <linux/kgdb.h> 12 #include <linux/smp.h> 13 #include <linux/io.h> 14 15 #include <asm/stackprotector.h> 16 #include <asm/perf_event.h> 17 #include <asm/mmu_context.h> 18 #include <asm/hypervisor.h> 19 #include <asm/processor.h> 20 #include <asm/sections.h> 21 #include <linux/topology.h> 22 #include <linux/cpumask.h> 23 #include <asm/pgtable.h> 24 #include <asm/atomic.h> 25 #include <asm/proto.h> 26 #include <asm/setup.h> 27 #include <asm/apic.h> 28 #include <asm/desc.h> 29 #include <asm/i387.h> 30 #include <asm/mtrr.h> 31 #include <linux/numa.h> 32 #include <asm/asm.h> 33 #include <asm/cpu.h> 34 #include <asm/mce.h> 35 #include <asm/msr.h> 36 #include <asm/pat.h> 37 38 #ifdef CONFIG_X86_LOCAL_APIC 39 #include <asm/uv/uv.h> 40 #endif 41 42 #include "cpu.h" 43 44 /* all of these masks are initialized in setup_cpu_local_masks() */ 45 cpumask_var_t cpu_initialized_mask; 46 cpumask_var_t cpu_callout_mask; 47 cpumask_var_t cpu_callin_mask; 48 49 /* representing cpus for which sibling maps can be computed */ 50 cpumask_var_t cpu_sibling_setup_mask; 51 52 /* correctly size the local cpu masks */ 53 void __init setup_cpu_local_masks(void) 54 { 55 alloc_bootmem_cpumask_var(&cpu_initialized_mask); 56 alloc_bootmem_cpumask_var(&cpu_callin_mask); 57 alloc_bootmem_cpumask_var(&cpu_callout_mask); 58 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); 59 } 60 61 static void __cpuinit default_init(struct cpuinfo_x86 *c) 62 { 63 #ifdef CONFIG_X86_64 64 cpu_detect_cache_sizes(c); 65 #else 66 /* Not much we can do here... */ 67 /* Check if at least it has cpuid */ 68 if (c->cpuid_level == -1) { 69 /* No cpuid. It must be an ancient CPU */ 70 if (c->x86 == 4) 71 strcpy(c->x86_model_id, "486"); 72 else if (c->x86 == 3) 73 strcpy(c->x86_model_id, "386"); 74 } 75 #endif 76 } 77 78 static const struct cpu_dev __cpuinitconst default_cpu = { 79 .c_init = default_init, 80 .c_vendor = "Unknown", 81 .c_x86_vendor = X86_VENDOR_UNKNOWN, 82 }; 83 84 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; 85 86 DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { 87 #ifdef CONFIG_X86_64 88 /* 89 * We need valid kernel segments for data and code in long mode too 90 * IRET will check the segment types kkeil 2000/10/28 91 * Also sysret mandates a special GDT layout 92 * 93 * TLS descriptors are currently at a different place compared to i386. 94 * Hopefully nobody expects them at a fixed place (Wine?) 95 */ 96 [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff), 97 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff), 98 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff), 99 [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff), 100 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff), 101 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff), 102 #else 103 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff), 104 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), 105 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff), 106 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff), 107 /* 108 * Segments used for calling PnP BIOS have byte granularity. 109 * They code segments and data segments have fixed 64k limits, 110 * the transfer segment sizes are set at run time. 111 */ 112 /* 32-bit code */ 113 [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), 114 /* 16-bit code */ 115 [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), 116 /* 16-bit data */ 117 [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff), 118 /* 16-bit data */ 119 [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0), 120 /* 16-bit data */ 121 [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0), 122 /* 123 * The APM segments have byte granularity and their bases 124 * are set at run time. All have 64k limits. 125 */ 126 /* 32-bit code */ 127 [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), 128 /* 16-bit code */ 129 [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), 130 /* data */ 131 [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff), 132 133 [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), 134 [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), 135 GDT_STACK_CANARY_INIT 136 #endif 137 } }; 138 EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); 139 140 static int __init x86_xsave_setup(char *s) 141 { 142 setup_clear_cpu_cap(X86_FEATURE_XSAVE); 143 return 1; 144 } 145 __setup("noxsave", x86_xsave_setup); 146 147 #ifdef CONFIG_X86_32 148 static int cachesize_override __cpuinitdata = -1; 149 static int disable_x86_serial_nr __cpuinitdata = 1; 150 151 static int __init cachesize_setup(char *str) 152 { 153 get_option(&str, &cachesize_override); 154 return 1; 155 } 156 __setup("cachesize=", cachesize_setup); 157 158 static int __init x86_fxsr_setup(char *s) 159 { 160 setup_clear_cpu_cap(X86_FEATURE_FXSR); 161 setup_clear_cpu_cap(X86_FEATURE_XMM); 162 return 1; 163 } 164 __setup("nofxsr", x86_fxsr_setup); 165 166 static int __init x86_sep_setup(char *s) 167 { 168 setup_clear_cpu_cap(X86_FEATURE_SEP); 169 return 1; 170 } 171 __setup("nosep", x86_sep_setup); 172 173 /* Standard macro to see if a specific flag is changeable */ 174 static inline int flag_is_changeable_p(u32 flag) 175 { 176 u32 f1, f2; 177 178 /* 179 * Cyrix and IDT cpus allow disabling of CPUID 180 * so the code below may return different results 181 * when it is executed before and after enabling 182 * the CPUID. Add "volatile" to not allow gcc to 183 * optimize the subsequent calls to this function. 184 */ 185 asm volatile ("pushfl \n\t" 186 "pushfl \n\t" 187 "popl %0 \n\t" 188 "movl %0, %1 \n\t" 189 "xorl %2, %0 \n\t" 190 "pushl %0 \n\t" 191 "popfl \n\t" 192 "pushfl \n\t" 193 "popl %0 \n\t" 194 "popfl \n\t" 195 196 : "=&r" (f1), "=&r" (f2) 197 : "ir" (flag)); 198 199 return ((f1^f2) & flag) != 0; 200 } 201 202 /* Probe for the CPUID instruction */ 203 static int __cpuinit have_cpuid_p(void) 204 { 205 return flag_is_changeable_p(X86_EFLAGS_ID); 206 } 207 208 static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 209 { 210 unsigned long lo, hi; 211 212 if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr) 213 return; 214 215 /* Disable processor serial number: */ 216 217 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 218 lo |= 0x200000; 219 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 220 221 printk(KERN_NOTICE "CPU serial number disabled.\n"); 222 clear_cpu_cap(c, X86_FEATURE_PN); 223 224 /* Disabling the serial number may affect the cpuid level */ 225 c->cpuid_level = cpuid_eax(0); 226 } 227 228 static int __init x86_serial_nr_setup(char *s) 229 { 230 disable_x86_serial_nr = 0; 231 return 1; 232 } 233 __setup("serialnumber", x86_serial_nr_setup); 234 #else 235 static inline int flag_is_changeable_p(u32 flag) 236 { 237 return 1; 238 } 239 /* Probe for the CPUID instruction */ 240 static inline int have_cpuid_p(void) 241 { 242 return 1; 243 } 244 static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 245 { 246 } 247 #endif 248 249 /* 250 * Some CPU features depend on higher CPUID levels, which may not always 251 * be available due to CPUID level capping or broken virtualization 252 * software. Add those features to this table to auto-disable them. 253 */ 254 struct cpuid_dependent_feature { 255 u32 feature; 256 u32 level; 257 }; 258 259 static const struct cpuid_dependent_feature __cpuinitconst 260 cpuid_dependent_features[] = { 261 { X86_FEATURE_MWAIT, 0x00000005 }, 262 { X86_FEATURE_DCA, 0x00000009 }, 263 { X86_FEATURE_XSAVE, 0x0000000d }, 264 { 0, 0 } 265 }; 266 267 static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) 268 { 269 const struct cpuid_dependent_feature *df; 270 271 for (df = cpuid_dependent_features; df->feature; df++) { 272 273 if (!cpu_has(c, df->feature)) 274 continue; 275 /* 276 * Note: cpuid_level is set to -1 if unavailable, but 277 * extended_extended_level is set to 0 if unavailable 278 * and the legitimate extended levels are all negative 279 * when signed; hence the weird messing around with 280 * signs here... 281 */ 282 if (!((s32)df->level < 0 ? 283 (u32)df->level > (u32)c->extended_cpuid_level : 284 (s32)df->level > (s32)c->cpuid_level)) 285 continue; 286 287 clear_cpu_cap(c, df->feature); 288 if (!warn) 289 continue; 290 291 printk(KERN_WARNING 292 "CPU: CPU feature %s disabled, no CPUID level 0x%x\n", 293 x86_cap_flags[df->feature], df->level); 294 } 295 } 296 297 /* 298 * Naming convention should be: <Name> [(<Codename>)] 299 * This table only is used unless init_<vendor>() below doesn't set it; 300 * in particular, if CPUID levels 0x80000002..4 are supported, this 301 * isn't used 302 */ 303 304 /* Look up CPU names by table lookup. */ 305 static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c) 306 { 307 const struct cpu_model_info *info; 308 309 if (c->x86_model >= 16) 310 return NULL; /* Range check */ 311 312 if (!this_cpu) 313 return NULL; 314 315 info = this_cpu->c_models; 316 317 while (info && info->family) { 318 if (info->family == c->x86) 319 return info->model_names[c->x86_model]; 320 info++; 321 } 322 return NULL; /* Not found */ 323 } 324 325 __u32 cpu_caps_cleared[NCAPINTS] __cpuinitdata; 326 __u32 cpu_caps_set[NCAPINTS] __cpuinitdata; 327 328 void load_percpu_segment(int cpu) 329 { 330 #ifdef CONFIG_X86_32 331 loadsegment(fs, __KERNEL_PERCPU); 332 #else 333 loadsegment(gs, 0); 334 wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu)); 335 #endif 336 load_stack_canary_segment(); 337 } 338 339 /* 340 * Current gdt points %fs at the "master" per-cpu area: after this, 341 * it's on the real one. 342 */ 343 void switch_to_new_gdt(int cpu) 344 { 345 struct desc_ptr gdt_descr; 346 347 gdt_descr.address = (long)get_cpu_gdt_table(cpu); 348 gdt_descr.size = GDT_SIZE - 1; 349 load_gdt(&gdt_descr); 350 /* Reload the per-cpu base */ 351 352 load_percpu_segment(cpu); 353 } 354 355 static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {}; 356 357 static void __cpuinit get_model_name(struct cpuinfo_x86 *c) 358 { 359 unsigned int *v; 360 char *p, *q; 361 362 if (c->extended_cpuid_level < 0x80000004) 363 return; 364 365 v = (unsigned int *)c->x86_model_id; 366 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); 367 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); 368 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); 369 c->x86_model_id[48] = 0; 370 371 /* 372 * Intel chips right-justify this string for some dumb reason; 373 * undo that brain damage: 374 */ 375 p = q = &c->x86_model_id[0]; 376 while (*p == ' ') 377 p++; 378 if (p != q) { 379 while (*p) 380 *q++ = *p++; 381 while (q <= &c->x86_model_id[48]) 382 *q++ = '\0'; /* Zero-pad the rest */ 383 } 384 } 385 386 void __cpuinit cpu_detect_cache_sizes(struct cpuinfo_x86 *c) 387 { 388 unsigned int n, dummy, ebx, ecx, edx, l2size; 389 390 n = c->extended_cpuid_level; 391 392 if (n >= 0x80000005) { 393 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx); 394 c->x86_cache_size = (ecx>>24) + (edx>>24); 395 #ifdef CONFIG_X86_64 396 /* On K8 L1 TLB is inclusive, so don't count it */ 397 c->x86_tlbsize = 0; 398 #endif 399 } 400 401 if (n < 0x80000006) /* Some chips just has a large L1. */ 402 return; 403 404 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); 405 l2size = ecx >> 16; 406 407 #ifdef CONFIG_X86_64 408 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff); 409 #else 410 /* do processor-specific cache resizing */ 411 if (this_cpu->c_size_cache) 412 l2size = this_cpu->c_size_cache(c, l2size); 413 414 /* Allow user to override all this if necessary. */ 415 if (cachesize_override != -1) 416 l2size = cachesize_override; 417 418 if (l2size == 0) 419 return; /* Again, no L2 cache is possible */ 420 #endif 421 422 c->x86_cache_size = l2size; 423 } 424 425 void __cpuinit detect_ht(struct cpuinfo_x86 *c) 426 { 427 #ifdef CONFIG_X86_HT 428 u32 eax, ebx, ecx, edx; 429 int index_msb, core_bits; 430 static bool printed; 431 432 if (!cpu_has(c, X86_FEATURE_HT)) 433 return; 434 435 if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) 436 goto out; 437 438 if (cpu_has(c, X86_FEATURE_XTOPOLOGY)) 439 return; 440 441 cpuid(1, &eax, &ebx, &ecx, &edx); 442 443 smp_num_siblings = (ebx & 0xff0000) >> 16; 444 445 if (smp_num_siblings == 1) { 446 printk_once(KERN_INFO "CPU0: Hyper-Threading is disabled\n"); 447 goto out; 448 } 449 450 if (smp_num_siblings <= 1) 451 goto out; 452 453 if (smp_num_siblings > nr_cpu_ids) { 454 pr_warning("CPU: Unsupported number of siblings %d", 455 smp_num_siblings); 456 smp_num_siblings = 1; 457 return; 458 } 459 460 index_msb = get_count_order(smp_num_siblings); 461 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); 462 463 smp_num_siblings = smp_num_siblings / c->x86_max_cores; 464 465 index_msb = get_count_order(smp_num_siblings); 466 467 core_bits = get_count_order(c->x86_max_cores); 468 469 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & 470 ((1 << core_bits) - 1); 471 472 out: 473 if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) { 474 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", 475 c->phys_proc_id); 476 printk(KERN_INFO "CPU: Processor Core ID: %d\n", 477 c->cpu_core_id); 478 printed = 1; 479 } 480 #endif 481 } 482 483 static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) 484 { 485 char *v = c->x86_vendor_id; 486 int i; 487 488 for (i = 0; i < X86_VENDOR_NUM; i++) { 489 if (!cpu_devs[i]) 490 break; 491 492 if (!strcmp(v, cpu_devs[i]->c_ident[0]) || 493 (cpu_devs[i]->c_ident[1] && 494 !strcmp(v, cpu_devs[i]->c_ident[1]))) { 495 496 this_cpu = cpu_devs[i]; 497 c->x86_vendor = this_cpu->c_x86_vendor; 498 return; 499 } 500 } 501 502 printk_once(KERN_ERR 503 "CPU: vendor_id '%s' unknown, using generic init.\n" \ 504 "CPU: Your system may be unstable.\n", v); 505 506 c->x86_vendor = X86_VENDOR_UNKNOWN; 507 this_cpu = &default_cpu; 508 } 509 510 void __cpuinit cpu_detect(struct cpuinfo_x86 *c) 511 { 512 /* Get vendor name */ 513 cpuid(0x00000000, (unsigned int *)&c->cpuid_level, 514 (unsigned int *)&c->x86_vendor_id[0], 515 (unsigned int *)&c->x86_vendor_id[8], 516 (unsigned int *)&c->x86_vendor_id[4]); 517 518 c->x86 = 4; 519 /* Intel-defined flags: level 0x00000001 */ 520 if (c->cpuid_level >= 0x00000001) { 521 u32 junk, tfms, cap0, misc; 522 523 cpuid(0x00000001, &tfms, &misc, &junk, &cap0); 524 c->x86 = (tfms >> 8) & 0xf; 525 c->x86_model = (tfms >> 4) & 0xf; 526 c->x86_mask = tfms & 0xf; 527 528 if (c->x86 == 0xf) 529 c->x86 += (tfms >> 20) & 0xff; 530 if (c->x86 >= 0x6) 531 c->x86_model += ((tfms >> 16) & 0xf) << 4; 532 533 if (cap0 & (1<<19)) { 534 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; 535 c->x86_cache_alignment = c->x86_clflush_size; 536 } 537 } 538 } 539 540 static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) 541 { 542 u32 tfms, xlvl; 543 u32 ebx; 544 545 /* Intel-defined flags: level 0x00000001 */ 546 if (c->cpuid_level >= 0x00000001) { 547 u32 capability, excap; 548 549 cpuid(0x00000001, &tfms, &ebx, &excap, &capability); 550 c->x86_capability[0] = capability; 551 c->x86_capability[4] = excap; 552 } 553 554 /* AMD-defined flags: level 0x80000001 */ 555 xlvl = cpuid_eax(0x80000000); 556 c->extended_cpuid_level = xlvl; 557 558 if ((xlvl & 0xffff0000) == 0x80000000) { 559 if (xlvl >= 0x80000001) { 560 c->x86_capability[1] = cpuid_edx(0x80000001); 561 c->x86_capability[6] = cpuid_ecx(0x80000001); 562 } 563 } 564 565 if (c->extended_cpuid_level >= 0x80000008) { 566 u32 eax = cpuid_eax(0x80000008); 567 568 c->x86_virt_bits = (eax >> 8) & 0xff; 569 c->x86_phys_bits = eax & 0xff; 570 } 571 #ifdef CONFIG_X86_32 572 else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) 573 c->x86_phys_bits = 36; 574 #endif 575 576 if (c->extended_cpuid_level >= 0x80000007) 577 c->x86_power = cpuid_edx(0x80000007); 578 579 } 580 581 static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c) 582 { 583 #ifdef CONFIG_X86_32 584 int i; 585 586 /* 587 * First of all, decide if this is a 486 or higher 588 * It's a 486 if we can modify the AC flag 589 */ 590 if (flag_is_changeable_p(X86_EFLAGS_AC)) 591 c->x86 = 4; 592 else 593 c->x86 = 3; 594 595 for (i = 0; i < X86_VENDOR_NUM; i++) 596 if (cpu_devs[i] && cpu_devs[i]->c_identify) { 597 c->x86_vendor_id[0] = 0; 598 cpu_devs[i]->c_identify(c); 599 if (c->x86_vendor_id[0]) { 600 get_cpu_vendor(c); 601 break; 602 } 603 } 604 #endif 605 } 606 607 /* 608 * Do minimum CPU detection early. 609 * Fields really needed: vendor, cpuid_level, family, model, mask, 610 * cache alignment. 611 * The others are not touched to avoid unwanted side effects. 612 * 613 * WARNING: this function is only called on the BP. Don't add code here 614 * that is supposed to run on all CPUs. 615 */ 616 static void __init early_identify_cpu(struct cpuinfo_x86 *c) 617 { 618 #ifdef CONFIG_X86_64 619 c->x86_clflush_size = 64; 620 c->x86_phys_bits = 36; 621 c->x86_virt_bits = 48; 622 #else 623 c->x86_clflush_size = 32; 624 c->x86_phys_bits = 32; 625 c->x86_virt_bits = 32; 626 #endif 627 c->x86_cache_alignment = c->x86_clflush_size; 628 629 memset(&c->x86_capability, 0, sizeof c->x86_capability); 630 c->extended_cpuid_level = 0; 631 632 if (!have_cpuid_p()) 633 identify_cpu_without_cpuid(c); 634 635 /* cyrix could have cpuid enabled via c_identify()*/ 636 if (!have_cpuid_p()) 637 return; 638 639 cpu_detect(c); 640 641 get_cpu_vendor(c); 642 643 get_cpu_cap(c); 644 645 if (this_cpu->c_early_init) 646 this_cpu->c_early_init(c); 647 648 #ifdef CONFIG_SMP 649 c->cpu_index = boot_cpu_id; 650 #endif 651 filter_cpuid_features(c, false); 652 } 653 654 void __init early_cpu_init(void) 655 { 656 const struct cpu_dev *const *cdev; 657 int count = 0; 658 659 #ifdef PROCESSOR_SELECT 660 printk(KERN_INFO "KERNEL supported cpus:\n"); 661 #endif 662 663 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { 664 const struct cpu_dev *cpudev = *cdev; 665 666 if (count >= X86_VENDOR_NUM) 667 break; 668 cpu_devs[count] = cpudev; 669 count++; 670 671 #ifdef PROCESSOR_SELECT 672 { 673 unsigned int j; 674 675 for (j = 0; j < 2; j++) { 676 if (!cpudev->c_ident[j]) 677 continue; 678 printk(KERN_INFO " %s %s\n", cpudev->c_vendor, 679 cpudev->c_ident[j]); 680 } 681 } 682 #endif 683 } 684 early_identify_cpu(&boot_cpu_data); 685 } 686 687 /* 688 * The NOPL instruction is supposed to exist on all CPUs with 689 * family >= 6; unfortunately, that's not true in practice because 690 * of early VIA chips and (more importantly) broken virtualizers that 691 * are not easy to detect. In the latter case it doesn't even *fail* 692 * reliably, so probing for it doesn't even work. Disable it completely 693 * unless we can find a reliable way to detect all the broken cases. 694 */ 695 static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) 696 { 697 clear_cpu_cap(c, X86_FEATURE_NOPL); 698 } 699 700 static void __cpuinit generic_identify(struct cpuinfo_x86 *c) 701 { 702 c->extended_cpuid_level = 0; 703 704 if (!have_cpuid_p()) 705 identify_cpu_without_cpuid(c); 706 707 /* cyrix could have cpuid enabled via c_identify()*/ 708 if (!have_cpuid_p()) 709 return; 710 711 cpu_detect(c); 712 713 get_cpu_vendor(c); 714 715 get_cpu_cap(c); 716 717 if (c->cpuid_level >= 0x00000001) { 718 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF; 719 #ifdef CONFIG_X86_32 720 # ifdef CONFIG_X86_HT 721 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); 722 # else 723 c->apicid = c->initial_apicid; 724 # endif 725 #endif 726 727 #ifdef CONFIG_X86_HT 728 c->phys_proc_id = c->initial_apicid; 729 #endif 730 } 731 732 get_model_name(c); /* Default name */ 733 734 init_scattered_cpuid_features(c); 735 detect_nopl(c); 736 } 737 738 /* 739 * This does the hard work of actually picking apart the CPU stuff... 740 */ 741 static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) 742 { 743 int i; 744 745 c->loops_per_jiffy = loops_per_jiffy; 746 c->x86_cache_size = -1; 747 c->x86_vendor = X86_VENDOR_UNKNOWN; 748 c->x86_model = c->x86_mask = 0; /* So far unknown... */ 749 c->x86_vendor_id[0] = '\0'; /* Unset */ 750 c->x86_model_id[0] = '\0'; /* Unset */ 751 c->x86_max_cores = 1; 752 c->x86_coreid_bits = 0; 753 #ifdef CONFIG_X86_64 754 c->x86_clflush_size = 64; 755 c->x86_phys_bits = 36; 756 c->x86_virt_bits = 48; 757 #else 758 c->cpuid_level = -1; /* CPUID not detected */ 759 c->x86_clflush_size = 32; 760 c->x86_phys_bits = 32; 761 c->x86_virt_bits = 32; 762 #endif 763 c->x86_cache_alignment = c->x86_clflush_size; 764 memset(&c->x86_capability, 0, sizeof c->x86_capability); 765 766 generic_identify(c); 767 768 if (this_cpu->c_identify) 769 this_cpu->c_identify(c); 770 771 /* Clear/Set all flags overriden by options, after probe */ 772 for (i = 0; i < NCAPINTS; i++) { 773 c->x86_capability[i] &= ~cpu_caps_cleared[i]; 774 c->x86_capability[i] |= cpu_caps_set[i]; 775 } 776 777 #ifdef CONFIG_X86_64 778 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); 779 #endif 780 781 /* 782 * Vendor-specific initialization. In this section we 783 * canonicalize the feature flags, meaning if there are 784 * features a certain CPU supports which CPUID doesn't 785 * tell us, CPUID claiming incorrect flags, or other bugs, 786 * we handle them here. 787 * 788 * At the end of this section, c->x86_capability better 789 * indicate the features this CPU genuinely supports! 790 */ 791 if (this_cpu->c_init) 792 this_cpu->c_init(c); 793 794 /* Disable the PN if appropriate */ 795 squash_the_stupid_serial_number(c); 796 797 /* 798 * The vendor-specific functions might have changed features. 799 * Now we do "generic changes." 800 */ 801 802 /* Filter out anything that depends on CPUID levels we don't have */ 803 filter_cpuid_features(c, true); 804 805 /* If the model name is still unset, do table lookup. */ 806 if (!c->x86_model_id[0]) { 807 const char *p; 808 p = table_lookup_model(c); 809 if (p) 810 strcpy(c->x86_model_id, p); 811 else 812 /* Last resort... */ 813 sprintf(c->x86_model_id, "%02x/%02x", 814 c->x86, c->x86_model); 815 } 816 817 #ifdef CONFIG_X86_64 818 detect_ht(c); 819 #endif 820 821 init_hypervisor(c); 822 823 /* 824 * Clear/Set all flags overriden by options, need do it 825 * before following smp all cpus cap AND. 826 */ 827 for (i = 0; i < NCAPINTS; i++) { 828 c->x86_capability[i] &= ~cpu_caps_cleared[i]; 829 c->x86_capability[i] |= cpu_caps_set[i]; 830 } 831 832 /* 833 * On SMP, boot_cpu_data holds the common feature set between 834 * all CPUs; so make sure that we indicate which features are 835 * common between the CPUs. The first time this routine gets 836 * executed, c == &boot_cpu_data. 837 */ 838 if (c != &boot_cpu_data) { 839 /* AND the already accumulated flags with these */ 840 for (i = 0; i < NCAPINTS; i++) 841 boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; 842 } 843 844 /* Init Machine Check Exception if available. */ 845 mcheck_cpu_init(c); 846 847 select_idle_routine(c); 848 849 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) 850 numa_add_cpu(smp_processor_id()); 851 #endif 852 } 853 854 #ifdef CONFIG_X86_64 855 static void vgetcpu_set_mode(void) 856 { 857 if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP)) 858 vgetcpu_mode = VGETCPU_RDTSCP; 859 else 860 vgetcpu_mode = VGETCPU_LSL; 861 } 862 #endif 863 864 void __init identify_boot_cpu(void) 865 { 866 identify_cpu(&boot_cpu_data); 867 init_c1e_mask(); 868 #ifdef CONFIG_X86_32 869 sysenter_setup(); 870 enable_sep_cpu(); 871 #else 872 vgetcpu_set_mode(); 873 #endif 874 init_hw_perf_events(); 875 } 876 877 void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) 878 { 879 BUG_ON(c == &boot_cpu_data); 880 identify_cpu(c); 881 #ifdef CONFIG_X86_32 882 enable_sep_cpu(); 883 #endif 884 mtrr_ap_init(); 885 } 886 887 struct msr_range { 888 unsigned min; 889 unsigned max; 890 }; 891 892 static const struct msr_range msr_range_array[] __cpuinitconst = { 893 { 0x00000000, 0x00000418}, 894 { 0xc0000000, 0xc000040b}, 895 { 0xc0010000, 0xc0010142}, 896 { 0xc0011000, 0xc001103b}, 897 }; 898 899 static void __cpuinit print_cpu_msr(void) 900 { 901 unsigned index_min, index_max; 902 unsigned index; 903 u64 val; 904 int i; 905 906 for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) { 907 index_min = msr_range_array[i].min; 908 index_max = msr_range_array[i].max; 909 910 for (index = index_min; index < index_max; index++) { 911 if (rdmsrl_amd_safe(index, &val)) 912 continue; 913 printk(KERN_INFO " MSR%08x: %016llx\n", index, val); 914 } 915 } 916 } 917 918 static int show_msr __cpuinitdata; 919 920 static __init int setup_show_msr(char *arg) 921 { 922 int num; 923 924 get_option(&arg, &num); 925 926 if (num > 0) 927 show_msr = num; 928 return 1; 929 } 930 __setup("show_msr=", setup_show_msr); 931 932 static __init int setup_noclflush(char *arg) 933 { 934 setup_clear_cpu_cap(X86_FEATURE_CLFLSH); 935 return 1; 936 } 937 __setup("noclflush", setup_noclflush); 938 939 void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) 940 { 941 const char *vendor = NULL; 942 943 if (c->x86_vendor < X86_VENDOR_NUM) { 944 vendor = this_cpu->c_vendor; 945 } else { 946 if (c->cpuid_level >= 0) 947 vendor = c->x86_vendor_id; 948 } 949 950 if (vendor && !strstr(c->x86_model_id, vendor)) 951 printk(KERN_CONT "%s ", vendor); 952 953 if (c->x86_model_id[0]) 954 printk(KERN_CONT "%s", c->x86_model_id); 955 else 956 printk(KERN_CONT "%d86", c->x86); 957 958 if (c->x86_mask || c->cpuid_level >= 0) 959 printk(KERN_CONT " stepping %02x\n", c->x86_mask); 960 else 961 printk(KERN_CONT "\n"); 962 963 #ifdef CONFIG_SMP 964 if (c->cpu_index < show_msr) 965 print_cpu_msr(); 966 #else 967 if (show_msr) 968 print_cpu_msr(); 969 #endif 970 } 971 972 static __init int setup_disablecpuid(char *arg) 973 { 974 int bit; 975 976 if (get_option(&arg, &bit) && bit < NCAPINTS*32) 977 setup_clear_cpu_cap(bit); 978 else 979 return 0; 980 981 return 1; 982 } 983 __setup("clearcpuid=", setup_disablecpuid); 984 985 #ifdef CONFIG_X86_64 986 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table }; 987 988 DEFINE_PER_CPU_FIRST(union irq_stack_union, 989 irq_stack_union) __aligned(PAGE_SIZE); 990 991 /* 992 * The following four percpu variables are hot. Align current_task to 993 * cacheline size such that all four fall in the same cacheline. 994 */ 995 DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned = 996 &init_task; 997 EXPORT_PER_CPU_SYMBOL(current_task); 998 999 DEFINE_PER_CPU(unsigned long, kernel_stack) = 1000 (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE; 1001 EXPORT_PER_CPU_SYMBOL(kernel_stack); 1002 1003 DEFINE_PER_CPU(char *, irq_stack_ptr) = 1004 init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; 1005 1006 DEFINE_PER_CPU(unsigned int, irq_count) = -1; 1007 1008 /* 1009 * Special IST stacks which the CPU switches to when it calls 1010 * an IST-marked descriptor entry. Up to 7 stacks (hardware 1011 * limit), all of them are 4K, except the debug stack which 1012 * is 8K. 1013 */ 1014 static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { 1015 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, 1016 [DEBUG_STACK - 1] = DEBUG_STKSZ 1017 }; 1018 1019 static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks 1020 [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); 1021 1022 /* May not be marked __init: used by software suspend */ 1023 void syscall_init(void) 1024 { 1025 /* 1026 * LSTAR and STAR live in a bit strange symbiosis. 1027 * They both write to the same internal register. STAR allows to 1028 * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip. 1029 */ 1030 wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32); 1031 wrmsrl(MSR_LSTAR, system_call); 1032 wrmsrl(MSR_CSTAR, ignore_sysret); 1033 1034 #ifdef CONFIG_IA32_EMULATION 1035 syscall32_cpu_init(); 1036 #endif 1037 1038 /* Flags to clear on syscall */ 1039 wrmsrl(MSR_SYSCALL_MASK, 1040 X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL); 1041 } 1042 1043 unsigned long kernel_eflags; 1044 1045 /* 1046 * Copies of the original ist values from the tss are only accessed during 1047 * debugging, no special alignment required. 1048 */ 1049 DEFINE_PER_CPU(struct orig_ist, orig_ist); 1050 1051 #else /* CONFIG_X86_64 */ 1052 1053 DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; 1054 EXPORT_PER_CPU_SYMBOL(current_task); 1055 1056 #ifdef CONFIG_CC_STACKPROTECTOR 1057 DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); 1058 #endif 1059 1060 /* Make sure %fs and %gs are initialized properly in idle threads */ 1061 struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) 1062 { 1063 memset(regs, 0, sizeof(struct pt_regs)); 1064 regs->fs = __KERNEL_PERCPU; 1065 regs->gs = __KERNEL_STACK_CANARY; 1066 1067 return regs; 1068 } 1069 #endif /* CONFIG_X86_64 */ 1070 1071 /* 1072 * Clear all 6 debug registers: 1073 */ 1074 static void clear_all_debug_regs(void) 1075 { 1076 int i; 1077 1078 for (i = 0; i < 8; i++) { 1079 /* Ignore db4, db5 */ 1080 if ((i == 4) || (i == 5)) 1081 continue; 1082 1083 set_debugreg(0, i); 1084 } 1085 } 1086 1087 /* 1088 * cpu_init() initializes state that is per-CPU. Some data is already 1089 * initialized (naturally) in the bootstrap process, such as the GDT 1090 * and IDT. We reload them nevertheless, this function acts as a 1091 * 'CPU state barrier', nothing should get across. 1092 * A lot of state is already set up in PDA init for 64 bit 1093 */ 1094 #ifdef CONFIG_X86_64 1095 1096 void __cpuinit cpu_init(void) 1097 { 1098 struct orig_ist *oist; 1099 struct task_struct *me; 1100 struct tss_struct *t; 1101 unsigned long v; 1102 int cpu; 1103 int i; 1104 1105 cpu = stack_smp_processor_id(); 1106 t = &per_cpu(init_tss, cpu); 1107 oist = &per_cpu(orig_ist, cpu); 1108 1109 #ifdef CONFIG_NUMA 1110 if (cpu != 0 && percpu_read(node_number) == 0 && 1111 cpu_to_node(cpu) != NUMA_NO_NODE) 1112 percpu_write(node_number, cpu_to_node(cpu)); 1113 #endif 1114 1115 me = current; 1116 1117 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) 1118 panic("CPU#%d already initialized!\n", cpu); 1119 1120 pr_debug("Initializing CPU#%d\n", cpu); 1121 1122 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 1123 1124 /* 1125 * Initialize the per-CPU GDT with the boot GDT, 1126 * and set up the GDT descriptor: 1127 */ 1128 1129 switch_to_new_gdt(cpu); 1130 loadsegment(fs, 0); 1131 1132 load_idt((const struct desc_ptr *)&idt_descr); 1133 1134 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); 1135 syscall_init(); 1136 1137 wrmsrl(MSR_FS_BASE, 0); 1138 wrmsrl(MSR_KERNEL_GS_BASE, 0); 1139 barrier(); 1140 1141 x86_configure_nx(); 1142 if (cpu != 0) 1143 enable_x2apic(); 1144 1145 /* 1146 * set up and load the per-CPU TSS 1147 */ 1148 if (!oist->ist[0]) { 1149 char *estacks = per_cpu(exception_stacks, cpu); 1150 1151 for (v = 0; v < N_EXCEPTION_STACKS; v++) { 1152 estacks += exception_stack_sizes[v]; 1153 oist->ist[v] = t->x86_tss.ist[v] = 1154 (unsigned long)estacks; 1155 } 1156 } 1157 1158 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); 1159 1160 /* 1161 * <= is required because the CPU will access up to 1162 * 8 bits beyond the end of the IO permission bitmap. 1163 */ 1164 for (i = 0; i <= IO_BITMAP_LONGS; i++) 1165 t->io_bitmap[i] = ~0UL; 1166 1167 atomic_inc(&init_mm.mm_count); 1168 me->active_mm = &init_mm; 1169 BUG_ON(me->mm); 1170 enter_lazy_tlb(&init_mm, me); 1171 1172 load_sp0(t, ¤t->thread); 1173 set_tss_desc(cpu, t); 1174 load_TR_desc(); 1175 load_LDT(&init_mm.context); 1176 1177 #ifdef CONFIG_KGDB 1178 /* 1179 * If the kgdb is connected no debug regs should be altered. This 1180 * is only applicable when KGDB and a KGDB I/O module are built 1181 * into the kernel and you are using early debugging with 1182 * kgdbwait. KGDB will control the kernel HW breakpoint registers. 1183 */ 1184 if (kgdb_connected && arch_kgdb_ops.correct_hw_break) 1185 arch_kgdb_ops.correct_hw_break(); 1186 else 1187 #endif 1188 clear_all_debug_regs(); 1189 1190 fpu_init(); 1191 1192 raw_local_save_flags(kernel_eflags); 1193 1194 if (is_uv_system()) 1195 uv_cpu_init(); 1196 } 1197 1198 #else 1199 1200 void __cpuinit cpu_init(void) 1201 { 1202 int cpu = smp_processor_id(); 1203 struct task_struct *curr = current; 1204 struct tss_struct *t = &per_cpu(init_tss, cpu); 1205 struct thread_struct *thread = &curr->thread; 1206 1207 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) { 1208 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); 1209 for (;;) 1210 local_irq_enable(); 1211 } 1212 1213 printk(KERN_INFO "Initializing CPU#%d\n", cpu); 1214 1215 if (cpu_has_vme || cpu_has_tsc || cpu_has_de) 1216 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 1217 1218 load_idt(&idt_descr); 1219 switch_to_new_gdt(cpu); 1220 1221 /* 1222 * Set up and load the per-CPU TSS and LDT 1223 */ 1224 atomic_inc(&init_mm.mm_count); 1225 curr->active_mm = &init_mm; 1226 BUG_ON(curr->mm); 1227 enter_lazy_tlb(&init_mm, curr); 1228 1229 load_sp0(t, thread); 1230 set_tss_desc(cpu, t); 1231 load_TR_desc(); 1232 load_LDT(&init_mm.context); 1233 1234 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); 1235 1236 #ifdef CONFIG_DOUBLEFAULT 1237 /* Set up doublefault TSS pointer in the GDT */ 1238 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); 1239 #endif 1240 1241 clear_all_debug_regs(); 1242 1243 /* 1244 * Force FPU initialization: 1245 */ 1246 if (cpu_has_xsave) 1247 current_thread_info()->status = TS_XSAVE; 1248 else 1249 current_thread_info()->status = 0; 1250 clear_used_math(); 1251 mxcsr_feature_mask_init(); 1252 1253 /* 1254 * Boot processor to setup the FP and extended state context info. 1255 */ 1256 if (smp_processor_id() == boot_cpu_id) 1257 init_thread_xstate(); 1258 1259 xsave_init(); 1260 } 1261 #endif 1262