1 #include <linux/export.h> 2 #include <linux/init.h> 3 #include <linux/bitops.h> 4 #include <linux/elf.h> 5 #include <linux/mm.h> 6 7 #include <linux/io.h> 8 #include <asm/processor.h> 9 #include <asm/apic.h> 10 #include <asm/cpu.h> 11 #include <asm/pci-direct.h> 12 13 #ifdef CONFIG_X86_64 14 # include <asm/numa_64.h> 15 # include <asm/mmconfig.h> 16 # include <asm/cacheflush.h> 17 #endif 18 19 #include "cpu.h" 20 21 #ifdef CONFIG_X86_32 22 /* 23 * B step AMD K6 before B 9730xxxx have hardware bugs that can cause 24 * misexecution of code under Linux. Owners of such processors should 25 * contact AMD for precise details and a CPU swap. 26 * 27 * See http://www.multimania.com/poulot/k6bug.html 28 * http://www.amd.com/K6/k6docs/revgd.html 29 * 30 * The following test is erm.. interesting. AMD neglected to up 31 * the chip setting when fixing the bug but they also tweaked some 32 * performance at the same time.. 33 */ 34 35 extern void vide(void); 36 __asm__(".align 4\nvide: ret"); 37 38 static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c) 39 { 40 /* 41 * General Systems BIOSen alias the cpu frequency registers 42 * of the Elan at 0x000df000. Unfortuantly, one of the Linux 43 * drivers subsequently pokes it, and changes the CPU speed. 44 * Workaround : Remove the unneeded alias. 45 */ 46 #define CBAR (0xfffc) /* Configuration Base Address (32-bit) */ 47 #define CBAR_ENB (0x80000000) 48 #define CBAR_KEY (0X000000CB) 49 if (c->x86_model == 9 || c->x86_model == 10) { 50 if (inl(CBAR) & CBAR_ENB) 51 outl(0 | CBAR_KEY, CBAR); 52 } 53 } 54 55 56 static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c) 57 { 58 u32 l, h; 59 int mbytes = num_physpages >> (20-PAGE_SHIFT); 60 61 if (c->x86_model < 6) { 62 /* Based on AMD doc 20734R - June 2000 */ 63 if (c->x86_model == 0) { 64 clear_cpu_cap(c, X86_FEATURE_APIC); 65 set_cpu_cap(c, X86_FEATURE_PGE); 66 } 67 return; 68 } 69 70 if (c->x86_model == 6 && c->x86_mask == 1) { 71 const int K6_BUG_LOOP = 1000000; 72 int n; 73 void (*f_vide)(void); 74 unsigned long d, d2; 75 76 printk(KERN_INFO "AMD K6 stepping B detected - "); 77 78 /* 79 * It looks like AMD fixed the 2.6.2 bug and improved indirect 80 * calls at the same time. 81 */ 82 83 n = K6_BUG_LOOP; 84 f_vide = vide; 85 rdtscl(d); 86 while (n--) 87 f_vide(); 88 rdtscl(d2); 89 d = d2-d; 90 91 if (d > 20*K6_BUG_LOOP) 92 printk(KERN_CONT 93 "system stability may be impaired when more than 32 MB are used.\n"); 94 else 95 printk(KERN_CONT "probably OK (after B9730xxxx).\n"); 96 printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n"); 97 } 98 99 /* K6 with old style WHCR */ 100 if (c->x86_model < 8 || 101 (c->x86_model == 8 && c->x86_mask < 8)) { 102 /* We can only write allocate on the low 508Mb */ 103 if (mbytes > 508) 104 mbytes = 508; 105 106 rdmsr(MSR_K6_WHCR, l, h); 107 if ((l&0x0000FFFF) == 0) { 108 unsigned long flags; 109 l = (1<<0)|((mbytes/4)<<1); 110 local_irq_save(flags); 111 wbinvd(); 112 wrmsr(MSR_K6_WHCR, l, h); 113 local_irq_restore(flags); 114 printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n", 115 mbytes); 116 } 117 return; 118 } 119 120 if ((c->x86_model == 8 && c->x86_mask > 7) || 121 c->x86_model == 9 || c->x86_model == 13) { 122 /* The more serious chips .. */ 123 124 if (mbytes > 4092) 125 mbytes = 4092; 126 127 rdmsr(MSR_K6_WHCR, l, h); 128 if ((l&0xFFFF0000) == 0) { 129 unsigned long flags; 130 l = ((mbytes>>2)<<22)|(1<<16); 131 local_irq_save(flags); 132 wbinvd(); 133 wrmsr(MSR_K6_WHCR, l, h); 134 local_irq_restore(flags); 135 printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n", 136 mbytes); 137 } 138 139 return; 140 } 141 142 if (c->x86_model == 10) { 143 /* AMD Geode LX is model 10 */ 144 /* placeholder for any needed mods */ 145 return; 146 } 147 } 148 149 static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c) 150 { 151 #ifdef CONFIG_SMP 152 /* calling is from identify_secondary_cpu() ? */ 153 if (!c->cpu_index) 154 return; 155 156 /* 157 * Certain Athlons might work (for various values of 'work') in SMP 158 * but they are not certified as MP capable. 159 */ 160 /* Athlon 660/661 is valid. */ 161 if ((c->x86_model == 6) && ((c->x86_mask == 0) || 162 (c->x86_mask == 1))) 163 goto valid_k7; 164 165 /* Duron 670 is valid */ 166 if ((c->x86_model == 7) && (c->x86_mask == 0)) 167 goto valid_k7; 168 169 /* 170 * Athlon 662, Duron 671, and Athlon >model 7 have capability 171 * bit. It's worth noting that the A5 stepping (662) of some 172 * Athlon XP's have the MP bit set. 173 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for 174 * more. 175 */ 176 if (((c->x86_model == 6) && (c->x86_mask >= 2)) || 177 ((c->x86_model == 7) && (c->x86_mask >= 1)) || 178 (c->x86_model > 7)) 179 if (cpu_has_mp) 180 goto valid_k7; 181 182 /* If we get here, not a certified SMP capable AMD system. */ 183 184 /* 185 * Don't taint if we are running SMP kernel on a single non-MP 186 * approved Athlon 187 */ 188 WARN_ONCE(1, "WARNING: This combination of AMD" 189 " processors is not suitable for SMP.\n"); 190 if (!test_taint(TAINT_UNSAFE_SMP)) 191 add_taint(TAINT_UNSAFE_SMP); 192 193 valid_k7: 194 ; 195 #endif 196 } 197 198 static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) 199 { 200 u32 l, h; 201 202 /* 203 * Bit 15 of Athlon specific MSR 15, needs to be 0 204 * to enable SSE on Palomino/Morgan/Barton CPU's. 205 * If the BIOS didn't enable it already, enable it here. 206 */ 207 if (c->x86_model >= 6 && c->x86_model <= 10) { 208 if (!cpu_has(c, X86_FEATURE_XMM)) { 209 printk(KERN_INFO "Enabling disabled K7/SSE Support.\n"); 210 rdmsr(MSR_K7_HWCR, l, h); 211 l &= ~0x00008000; 212 wrmsr(MSR_K7_HWCR, l, h); 213 set_cpu_cap(c, X86_FEATURE_XMM); 214 } 215 } 216 217 /* 218 * It's been determined by AMD that Athlons since model 8 stepping 1 219 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx 220 * As per AMD technical note 27212 0.2 221 */ 222 if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { 223 rdmsr(MSR_K7_CLK_CTL, l, h); 224 if ((l & 0xfff00000) != 0x20000000) { 225 printk(KERN_INFO 226 "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", 227 l, ((l & 0x000fffff)|0x20000000)); 228 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); 229 } 230 } 231 232 set_cpu_cap(c, X86_FEATURE_K7); 233 234 amd_k7_smp_check(c); 235 } 236 #endif 237 238 #ifdef CONFIG_NUMA 239 /* 240 * To workaround broken NUMA config. Read the comment in 241 * srat_detect_node(). 242 */ 243 static int __cpuinit nearby_node(int apicid) 244 { 245 int i, node; 246 247 for (i = apicid - 1; i >= 0; i--) { 248 node = __apicid_to_node[i]; 249 if (node != NUMA_NO_NODE && node_online(node)) 250 return node; 251 } 252 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { 253 node = __apicid_to_node[i]; 254 if (node != NUMA_NO_NODE && node_online(node)) 255 return node; 256 } 257 return first_node(node_online_map); /* Shouldn't happen */ 258 } 259 #endif 260 261 /* 262 * Fixup core topology information for 263 * (1) AMD multi-node processors 264 * Assumption: Number of cores in each internal node is the same. 265 * (2) AMD processors supporting compute units 266 */ 267 #ifdef CONFIG_X86_HT 268 static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) 269 { 270 u32 nodes, cores_per_cu = 1; 271 u8 node_id; 272 int cpu = smp_processor_id(); 273 274 /* get information required for multi-node processors */ 275 if (cpu_has(c, X86_FEATURE_TOPOEXT)) { 276 u32 eax, ebx, ecx, edx; 277 278 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); 279 nodes = ((ecx >> 8) & 7) + 1; 280 node_id = ecx & 7; 281 282 /* get compute unit information */ 283 smp_num_siblings = ((ebx >> 8) & 3) + 1; 284 c->compute_unit_id = ebx & 0xff; 285 cores_per_cu += ((ebx >> 8) & 3); 286 } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) { 287 u64 value; 288 289 rdmsrl(MSR_FAM10H_NODE_ID, value); 290 nodes = ((value >> 3) & 7) + 1; 291 node_id = value & 7; 292 } else 293 return; 294 295 /* fixup multi-node processor information */ 296 if (nodes > 1) { 297 u32 cores_per_node; 298 u32 cus_per_node; 299 300 set_cpu_cap(c, X86_FEATURE_AMD_DCM); 301 cores_per_node = c->x86_max_cores / nodes; 302 cus_per_node = cores_per_node / cores_per_cu; 303 304 /* store NodeID, use llc_shared_map to store sibling info */ 305 per_cpu(cpu_llc_id, cpu) = node_id; 306 307 /* core id has to be in the [0 .. cores_per_node - 1] range */ 308 c->cpu_core_id %= cores_per_node; 309 c->compute_unit_id %= cus_per_node; 310 } 311 } 312 #endif 313 314 /* 315 * On a AMD dual core setup the lower bits of the APIC id distingush the cores. 316 * Assumes number of cores is a power of two. 317 */ 318 static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) 319 { 320 #ifdef CONFIG_X86_HT 321 unsigned bits; 322 int cpu = smp_processor_id(); 323 324 bits = c->x86_coreid_bits; 325 /* Low order bits define the core id (index of core in socket) */ 326 c->cpu_core_id = c->initial_apicid & ((1 << bits)-1); 327 /* Convert the initial APIC ID into the socket ID */ 328 c->phys_proc_id = c->initial_apicid >> bits; 329 /* use socket ID also for last level cache */ 330 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; 331 amd_get_topology(c); 332 #endif 333 } 334 335 int amd_get_nb_id(int cpu) 336 { 337 int id = 0; 338 #ifdef CONFIG_SMP 339 id = per_cpu(cpu_llc_id, cpu); 340 #endif 341 return id; 342 } 343 EXPORT_SYMBOL_GPL(amd_get_nb_id); 344 345 static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) 346 { 347 #ifdef CONFIG_NUMA 348 int cpu = smp_processor_id(); 349 int node; 350 unsigned apicid = c->apicid; 351 352 node = numa_cpu_node(cpu); 353 if (node == NUMA_NO_NODE) 354 node = per_cpu(cpu_llc_id, cpu); 355 356 if (!node_online(node)) { 357 /* 358 * Two possibilities here: 359 * 360 * - The CPU is missing memory and no node was created. In 361 * that case try picking one from a nearby CPU. 362 * 363 * - The APIC IDs differ from the HyperTransport node IDs 364 * which the K8 northbridge parsing fills in. Assume 365 * they are all increased by a constant offset, but in 366 * the same order as the HT nodeids. If that doesn't 367 * result in a usable node fall back to the path for the 368 * previous case. 369 * 370 * This workaround operates directly on the mapping between 371 * APIC ID and NUMA node, assuming certain relationship 372 * between APIC ID, HT node ID and NUMA topology. As going 373 * through CPU mapping may alter the outcome, directly 374 * access __apicid_to_node[]. 375 */ 376 int ht_nodeid = c->initial_apicid; 377 378 if (ht_nodeid >= 0 && 379 __apicid_to_node[ht_nodeid] != NUMA_NO_NODE) 380 node = __apicid_to_node[ht_nodeid]; 381 /* Pick a nearby node */ 382 if (!node_online(node)) 383 node = nearby_node(apicid); 384 } 385 numa_set_node(cpu, node); 386 #endif 387 } 388 389 static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c) 390 { 391 #ifdef CONFIG_X86_HT 392 unsigned bits, ecx; 393 394 /* Multi core CPU? */ 395 if (c->extended_cpuid_level < 0x80000008) 396 return; 397 398 ecx = cpuid_ecx(0x80000008); 399 400 c->x86_max_cores = (ecx & 0xff) + 1; 401 402 /* CPU telling us the core id bits shift? */ 403 bits = (ecx >> 12) & 0xF; 404 405 /* Otherwise recompute */ 406 if (bits == 0) { 407 while ((1 << bits) < c->x86_max_cores) 408 bits++; 409 } 410 411 c->x86_coreid_bits = bits; 412 #endif 413 } 414 415 static void __cpuinit bsp_init_amd(struct cpuinfo_x86 *c) 416 { 417 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { 418 419 if (c->x86 > 0x10 || 420 (c->x86 == 0x10 && c->x86_model >= 0x2)) { 421 u64 val; 422 423 rdmsrl(MSR_K7_HWCR, val); 424 if (!(val & BIT(24))) 425 printk(KERN_WARNING FW_BUG "TSC doesn't count " 426 "with P0 frequency!\n"); 427 } 428 } 429 430 if (c->x86 == 0x15) { 431 unsigned long upperbit; 432 u32 cpuid, assoc; 433 434 cpuid = cpuid_edx(0x80000005); 435 assoc = cpuid >> 16 & 0xff; 436 upperbit = ((cpuid >> 24) << 10) / assoc; 437 438 va_align.mask = (upperbit - 1) & PAGE_MASK; 439 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64; 440 } 441 } 442 443 static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) 444 { 445 u32 dummy; 446 447 early_init_amd_mc(c); 448 449 /* 450 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate 451 * with P/T states and does not stop in deep C-states 452 */ 453 if (c->x86_power & (1 << 8)) { 454 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 455 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); 456 } 457 458 #ifdef CONFIG_X86_64 459 set_cpu_cap(c, X86_FEATURE_SYSCALL32); 460 #else 461 /* Set MTRR capability flag if appropriate */ 462 if (c->x86 == 5) 463 if (c->x86_model == 13 || c->x86_model == 9 || 464 (c->x86_model == 8 && c->x86_mask >= 8)) 465 set_cpu_cap(c, X86_FEATURE_K6_MTRR); 466 #endif 467 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI) 468 /* check CPU config space for extended APIC ID */ 469 if (cpu_has_apic && c->x86 >= 0xf) { 470 unsigned int val; 471 val = read_pci_config(0, 24, 0, 0x68); 472 if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18))) 473 set_cpu_cap(c, X86_FEATURE_EXTD_APICID); 474 } 475 #endif 476 477 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); 478 } 479 480 static void __cpuinit init_amd(struct cpuinfo_x86 *c) 481 { 482 #ifdef CONFIG_SMP 483 unsigned long long value; 484 485 /* 486 * Disable TLB flush filter by setting HWCR.FFDIS on K8 487 * bit 6 of msr C001_0015 488 * 489 * Errata 63 for SH-B3 steppings 490 * Errata 122 for all steppings (F+ have it disabled by default) 491 */ 492 if (c->x86 == 0xf) { 493 rdmsrl(MSR_K7_HWCR, value); 494 value |= 1 << 6; 495 wrmsrl(MSR_K7_HWCR, value); 496 } 497 #endif 498 499 early_init_amd(c); 500 501 /* 502 * Bit 31 in normal CPUID used for nonstandard 3DNow ID; 503 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway 504 */ 505 clear_cpu_cap(c, 0*32+31); 506 507 #ifdef CONFIG_X86_64 508 /* On C+ stepping K8 rep microcode works well for copy/memset */ 509 if (c->x86 == 0xf) { 510 u32 level; 511 512 level = cpuid_eax(1); 513 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) 514 set_cpu_cap(c, X86_FEATURE_REP_GOOD); 515 516 /* 517 * Some BIOSes incorrectly force this feature, but only K8 518 * revision D (model = 0x14) and later actually support it. 519 * (AMD Erratum #110, docId: 25759). 520 */ 521 if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) { 522 u64 val; 523 524 clear_cpu_cap(c, X86_FEATURE_LAHF_LM); 525 if (!rdmsrl_amd_safe(0xc001100d, &val)) { 526 val &= ~(1ULL << 32); 527 wrmsrl_amd_safe(0xc001100d, val); 528 } 529 } 530 531 } 532 if (c->x86 >= 0x10) 533 set_cpu_cap(c, X86_FEATURE_REP_GOOD); 534 535 /* get apicid instead of initial apic id from cpuid */ 536 c->apicid = hard_smp_processor_id(); 537 #else 538 539 /* 540 * FIXME: We should handle the K5 here. Set up the write 541 * range and also turn on MSR 83 bits 4 and 31 (write alloc, 542 * no bus pipeline) 543 */ 544 545 switch (c->x86) { 546 case 4: 547 init_amd_k5(c); 548 break; 549 case 5: 550 init_amd_k6(c); 551 break; 552 case 6: /* An Athlon/Duron */ 553 init_amd_k7(c); 554 break; 555 } 556 557 /* K6s reports MCEs but don't actually have all the MSRs */ 558 if (c->x86 < 6) 559 clear_cpu_cap(c, X86_FEATURE_MCE); 560 #endif 561 562 /* Enable workaround for FXSAVE leak */ 563 if (c->x86 >= 6) 564 set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK); 565 566 if (!c->x86_model_id[0]) { 567 switch (c->x86) { 568 case 0xf: 569 /* Should distinguish Models here, but this is only 570 a fallback anyways. */ 571 strcpy(c->x86_model_id, "Hammer"); 572 break; 573 } 574 } 575 576 cpu_detect_cache_sizes(c); 577 578 /* Multi core CPU? */ 579 if (c->extended_cpuid_level >= 0x80000008) { 580 amd_detect_cmp(c); 581 srat_detect_node(c); 582 } 583 584 #ifdef CONFIG_X86_32 585 detect_ht(c); 586 #endif 587 588 if (c->extended_cpuid_level >= 0x80000006) { 589 if (cpuid_edx(0x80000006) & 0xf000) 590 num_cache_leaves = 4; 591 else 592 num_cache_leaves = 3; 593 } 594 595 if (c->x86 >= 0xf) 596 set_cpu_cap(c, X86_FEATURE_K8); 597 598 if (cpu_has_xmm2) { 599 /* MFENCE stops RDTSC speculation */ 600 set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); 601 } 602 603 #ifdef CONFIG_X86_64 604 if (c->x86 == 0x10) { 605 /* do this for boot cpu */ 606 if (c == &boot_cpu_data) 607 check_enable_amd_mmconf_dmi(); 608 609 fam10h_check_enable_mmcfg(); 610 } 611 612 if (c == &boot_cpu_data && c->x86 >= 0xf) { 613 unsigned long long tseg; 614 615 /* 616 * Split up direct mapping around the TSEG SMM area. 617 * Don't do it for gbpages because there seems very little 618 * benefit in doing so. 619 */ 620 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { 621 printk(KERN_DEBUG "tseg: %010llx\n", tseg); 622 if ((tseg>>PMD_SHIFT) < 623 (max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) || 624 ((tseg>>PMD_SHIFT) < 625 (max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) && 626 (tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT)))) 627 set_memory_4k((unsigned long)__va(tseg), 1); 628 } 629 } 630 #endif 631 632 /* 633 * Family 0x12 and above processors have APIC timer 634 * running in deep C states. 635 */ 636 if (c->x86 > 0x11) 637 set_cpu_cap(c, X86_FEATURE_ARAT); 638 639 /* 640 * Disable GART TLB Walk Errors on Fam10h. We do this here 641 * because this is always needed when GART is enabled, even in a 642 * kernel which has no MCE support built in. 643 */ 644 if (c->x86 == 0x10) { 645 /* 646 * BIOS should disable GartTlbWlk Errors themself. If 647 * it doesn't do it here as suggested by the BKDG. 648 * 649 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012 650 */ 651 u64 mask; 652 int err; 653 654 err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask); 655 if (err == 0) { 656 mask |= (1 << 10); 657 checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask); 658 } 659 } 660 } 661 662 #ifdef CONFIG_X86_32 663 static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, 664 unsigned int size) 665 { 666 /* AMD errata T13 (order #21922) */ 667 if ((c->x86 == 6)) { 668 /* Duron Rev A0 */ 669 if (c->x86_model == 3 && c->x86_mask == 0) 670 size = 64; 671 /* Tbird rev A1/A2 */ 672 if (c->x86_model == 4 && 673 (c->x86_mask == 0 || c->x86_mask == 1)) 674 size = 256; 675 } 676 return size; 677 } 678 #endif 679 680 static const struct cpu_dev __cpuinitconst amd_cpu_dev = { 681 .c_vendor = "AMD", 682 .c_ident = { "AuthenticAMD" }, 683 #ifdef CONFIG_X86_32 684 .c_models = { 685 { .vendor = X86_VENDOR_AMD, .family = 4, .model_names = 686 { 687 [3] = "486 DX/2", 688 [7] = "486 DX/2-WB", 689 [8] = "486 DX/4", 690 [9] = "486 DX/4-WB", 691 [14] = "Am5x86-WT", 692 [15] = "Am5x86-WB" 693 } 694 }, 695 }, 696 .c_size_cache = amd_size_cache, 697 #endif 698 .c_early_init = early_init_amd, 699 .c_bsp_init = bsp_init_amd, 700 .c_init = init_amd, 701 .c_x86_vendor = X86_VENDOR_AMD, 702 }; 703 704 cpu_dev_register(amd_cpu_dev); 705 706 /* 707 * AMD errata checking 708 * 709 * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or 710 * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that 711 * have an OSVW id assigned, which it takes as first argument. Both take a 712 * variable number of family-specific model-stepping ranges created by 713 * AMD_MODEL_RANGE(). Each erratum also has to be declared as extern const 714 * int[] in arch/x86/include/asm/processor.h. 715 * 716 * Example: 717 * 718 * const int amd_erratum_319[] = 719 * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), 720 * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), 721 * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); 722 */ 723 724 const int amd_erratum_400[] = 725 AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), 726 AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); 727 EXPORT_SYMBOL_GPL(amd_erratum_400); 728 729 const int amd_erratum_383[] = 730 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); 731 EXPORT_SYMBOL_GPL(amd_erratum_383); 732 733 bool cpu_has_amd_erratum(const int *erratum) 734 { 735 struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info); 736 int osvw_id = *erratum++; 737 u32 range; 738 u32 ms; 739 740 /* 741 * If called early enough that current_cpu_data hasn't been initialized 742 * yet, fall back to boot_cpu_data. 743 */ 744 if (cpu->x86 == 0) 745 cpu = &boot_cpu_data; 746 747 if (cpu->x86_vendor != X86_VENDOR_AMD) 748 return false; 749 750 if (osvw_id >= 0 && osvw_id < 65536 && 751 cpu_has(cpu, X86_FEATURE_OSVW)) { 752 u64 osvw_len; 753 754 rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len); 755 if (osvw_id < osvw_len) { 756 u64 osvw_bits; 757 758 rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6), 759 osvw_bits); 760 return osvw_bits & (1ULL << (osvw_id & 0x3f)); 761 } 762 } 763 764 /* OSVW unavailable or ID unknown, match family-model-stepping range */ 765 ms = (cpu->x86_model << 4) | cpu->x86_mask; 766 while ((range = *erratum++)) 767 if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && 768 (ms >= AMD_MODEL_RANGE_START(range)) && 769 (ms <= AMD_MODEL_RANGE_END(range))) 770 return true; 771 772 return false; 773 } 774 775 EXPORT_SYMBOL_GPL(cpu_has_amd_erratum); 776