1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * "Generic AMD" model-specific support. If no more-specific support can 31 * be found, or such modules declines to initialize, then for AuthenticAMD 32 * cpus this module can have a crack at providing some AMD model-specific 33 * support that at least goes beyond common MCA architectural features 34 * if not down to the nitty-gritty level for a particular model. We 35 * are layered on top of a cpu module, likely cpu.generic, so there is no 36 * need for us to perform common architecturally-accessible functions. 37 */ 38 39 #include <sys/types.h> 40 #include <sys/cmn_err.h> 41 #include <sys/modctl.h> 42 #include <sys/cpu_module.h> 43 #include <sys/mca_x86.h> 44 #include <sys/pci_cfgspace.h> 45 #include <sys/x86_archext.h> 46 #include <sys/mc_amd.h> 47 #include <sys/fm/protocol.h> 48 #include <sys/fm/cpu/GENAMD.h> 49 #include <sys/nvpair.h> 50 #include <sys/controlregs.h> 51 #include <sys/pghw.h> 52 #include <sys/sunddi.h> 53 #include <sys/sysmacros.h> 54 #include <sys/cpu_module_ms_impl.h> 55 56 #include "authamd.h" 57 58 int authamd_ms_support_disable = 0; 59 60 #define AUTHAMD_F_REVS_BCDE \ 61 (X86_CHIPREV_AMD_F_REV_B | X86_CHIPREV_AMD_F_REV_C0 | \ 62 X86_CHIPREV_AMD_F_REV_CG | X86_CHIPREV_AMD_F_REV_D | \ 63 X86_CHIPREV_AMD_F_REV_E) 64 65 #define AUTHAMD_F_REVS_FG \ 66 (X86_CHIPREV_AMD_F_REV_F | X86_CHIPREV_AMD_F_REV_G) 67 68 #define AUTHAMD_10_REVS_AB \ 69 (X86_CHIPREV_AMD_10_REV_A | X86_CHIPREV_AMD_10_REV_B) 70 71 /* 72 * Bitmasks of support for various features. Try to enable features 73 * via inclusion in one of these bitmasks and check that at the 74 * feature imlementation - that way new family support may often simply 75 * simply need to update these bitmasks. 76 */ 77 78 /* 79 * Families that this module will provide some model-specific 80 * support for (if no more-specific module claims it first). 81 * We try to support whole families rather than differentiate down 82 * to revision. 83 */ 84 #define AUTHAMD_SUPPORTED(fam) \ 85 ((fam) == AUTHAMD_FAMILY_6 || (fam) == AUTHAMD_FAMILY_F || \ 86 (fam) == AUTHAMD_FAMILY_10) 87 88 /* 89 * Models that include an on-chip NorthBridge. 90 */ 91 #define AUTHAMD_NBONCHIP(rev) \ 92 (X86_CHIPREV_ATLEAST(rev, X86_CHIPREV_AMD_F_REV_B) || \ 93 X86_CHIPREV_ATLEAST(rev, X86_CHIPREV_AMD_10_REV_A)) 94 95 /* 96 * Families/revisions for which we can recognise main memory ECC errors. 97 */ 98 #define AUTHAMD_MEMECC_RECOGNISED(rev) \ 99 (X86_CHIPREV_ATLEAST(rev, X86_CHIPREV_AMD_F_REV_B) || \ 100 X86_CHIPREV_ATLEAST(rev, X86_CHIPREV_AMD_10_REV_A)) 101 102 /* 103 * Families/revisions that have an Online Spare Control Register 104 */ 105 #define AUTHAMD_HAS_ONLINESPARECTL(rev) \ 106 (X86_CHIPREV_ATLEAST(rev, X86_CHIPREV_AMD_F_REV_F) || \ 107 X86_CHIPREV_ATLEAST(rev, X86_CHIPREV_AMD_10_REV_A)) 108 109 /* 110 * Families/revisions for which we will perform NB MCA Config changes 111 */ 112 #define AUTHAMD_DO_NBMCACFG(rev) \ 113 (X86_CHIPREV_ATLEAST(rev, X86_CHIPREV_AMD_F_REV_B) || \ 114 X86_CHIPREV_ATLEAST(rev, X86_CHIPREV_AMD_10_REV_A)) 115 116 /* 117 * Families/revisions that have chip cache scrubbers. 118 */ 119 #define AUTHAMD_HAS_CHIPSCRUB(rev) \ 120 (X86_CHIPREV_ATLEAST(rev, X86_CHIPREV_AMD_F_REV_B) || \ 121 X86_CHIPREV_ATLEAST(rev, X86_CHIPREV_AMD_10_REV_A)) 122 123 /* 124 * Families/revisions that have a NB misc register or registers - 125 * evaluates to 0 if no support, otherwise the number of MC4_MISCj. 126 */ 127 #define AUTHAMD_NBMISC_NUM(rev) \ 128 (X86_CHIPREV_ATLEAST(rev, X86_CHIPREV_AMD_F_REV_F)? 1 : \ 129 (X86_CHIPREV_ATLEAST(rev, X86_CHIPREV_AMD_10_REV_A) ? 3 : 0)) 130 131 /* 132 * Families/revision for which we wish not to machine check for GART 133 * table walk errors - bit 10 of NB CTL. 134 */ 135 #define AUTHAMD_NOGARTTBLWLK_MC(rev) \ 136 (X86_CHIPREV_ATLEAST(rev, X86_CHIPREV_AMD_F_REV_B) || \ 137 X86_CHIPREV_ATLEAST(rev, X86_CHIPREV_AMD_10_REV_A)) 138 139 /* 140 * Families/revisions that are potentially L3 capable 141 */ 142 #define AUTHAMD_L3CAPABLE(rev) \ 143 (X86_CHIPREV_ATLEAST(rev, X86_CHIPREV_AMD_10_REV_A)) 144 145 /* 146 * We recognise main memory ECC errors for AUTHAMD_MEMECC_RECOGNISED 147 * revisions as: 148 * 149 * - being reported by the NB 150 * - being a compound bus/interconnect error (external to chip) 151 * - having LL of LG 152 * - having II of MEM (but could still be a master/target abort) 153 * - having CECC or UECC set 154 * 155 * We do not check the extended error code (first nibble of the 156 * model-specific error code on AMD) since this has changed from 157 * family 0xf to family 0x10 (ext code 0 now reserved on family 0x10). 158 * Instead we use CECC/UECC to separate off the master/target 159 * abort cases. 160 * 161 * We insist that the detector be the NorthBridge bank; although 162 * IC/DC can report some main memory errors, they do not capture 163 * an address at sufficient resolution to be useful and the NB will 164 * report most errors. 165 */ 166 #define AUTHAMD_IS_MEMECCERR(bank, status) \ 167 ((bank) == AMD_MCA_BANK_NB && \ 168 MCAX86_ERRCODE_ISBUS_INTERCONNECT(MCAX86_ERRCODE(status)) && \ 169 MCAX86_ERRCODE_LL(MCAX86_ERRCODE(status)) == MCAX86_ERRCODE_LL_LG && \ 170 MCAX86_ERRCODE_II(MCAX86_ERRCODE(status)) == MCAX86_ERRCODE_II_MEM && \ 171 ((status) & (AMD_BANK_STAT_CECC | AMD_BANK_STAT_UECC))) 172 173 static authamd_error_disp_t authamd_memce_disp = { 174 FM_EREPORT_CPU_GENAMD, 175 FM_EREPORT_CPU_GENAMD_MEM_CE, 176 FM_EREPORT_GENAMD_PAYLOAD_FLAGS_MEM_CE 177 }; 178 179 static authamd_error_disp_t authamd_memue_disp = { 180 FM_EREPORT_CPU_GENAMD, 181 FM_EREPORT_CPU_GENAMD_MEM_UE, 182 FM_EREPORT_GENAMD_PAYLOAD_FLAGS_MEM_UE 183 }; 184 185 static authamd_error_disp_t authamd_ckmemce_disp = { 186 FM_EREPORT_CPU_GENAMD, 187 FM_EREPORT_CPU_GENAMD_CKMEM_CE, 188 FM_EREPORT_GENAMD_PAYLOAD_FLAGS_CKMEM_CE 189 }; 190 191 static authamd_error_disp_t authamd_ckmemue_disp = { 192 FM_EREPORT_CPU_GENAMD, 193 FM_EREPORT_CPU_GENAMD_CKMEM_UE, 194 FM_EREPORT_GENAMD_PAYLOAD_FLAGS_CKMEM_UE 195 }; 196 197 /* 198 * We recognise GART walk errors as: 199 * 200 * - being reported by the NB 201 * - being a compound TLB error 202 * - having LL of LG and TT of GEN 203 * - having UC set 204 * - possibly having PCC set (if source CPU) 205 */ 206 #define AUTHAMD_IS_GARTERR(bank, status) \ 207 ((bank) == AMD_MCA_BANK_NB && \ 208 MCAX86_ERRCODE_ISTLB(MCAX86_ERRCODE(status)) && \ 209 MCAX86_ERRCODE_LL(MCAX86_ERRCODE(status)) == MCAX86_ERRCODE_LL_LG && \ 210 MCAX86_ERRCODE_TT(MCAX86_ERRCODE(status)) == MCAX86_ERRCODE_TT_GEN && \ 211 (status) & MSR_MC_STATUS_UC) 212 213 static authamd_error_disp_t authamd_gart_disp = { 214 FM_EREPORT_CPU_GENAMD, /* use generic subclass */ 215 FM_EREPORT_CPU_GENADM_GARTTBLWLK, /* use generic leafclass */ 216 0 /* no additional payload */ 217 }; 218 219 220 static struct authamd_chipshared *authamd_shared[AUTHAMD_MAX_CHIPS]; 221 222 static int 223 authamd_chip_once(authamd_data_t *authamd, enum authamd_cfgonce_bitnum what) 224 { 225 return (atomic_set_long_excl(&authamd->amd_shared->acs_cfgonce, 226 what) == 0 ? B_TRUE : B_FALSE); 227 } 228 229 static void 230 authamd_pcicfg_write(uint_t chipid, uint_t func, uint_t reg, uint32_t val) 231 { 232 ASSERT(chipid + 24 <= 31); 233 ASSERT((func & 7) == func); 234 ASSERT((reg & 3) == 0 && reg < 256); 235 236 cmi_pci_putl(0, chipid + 24, func, reg, 0, val); 237 } 238 239 static uint32_t 240 authamd_pcicfg_read(uint_t chipid, uint_t func, uint_t reg) 241 { 242 ASSERT(chipid + 24 <= 31); 243 ASSERT((func & 7) == func); 244 ASSERT((reg & 3) == 0 && reg < 256); 245 246 return (cmi_pci_getl(0, chipid + 24, func, reg, 0, 0)); 247 } 248 249 void 250 authamd_bankstatus_prewrite(cmi_hdl_t hdl, authamd_data_t *authamd) 251 { 252 uint64_t hwcr; 253 254 if (cmi_hdl_rdmsr(hdl, MSR_AMD_HWCR, &hwcr) != CMI_SUCCESS) 255 return; 256 257 authamd->amd_hwcr = hwcr; 258 259 if (!(hwcr & AMD_HWCR_MCI_STATUS_WREN)) { 260 hwcr |= AMD_HWCR_MCI_STATUS_WREN; 261 (void) cmi_hdl_wrmsr(hdl, MSR_AMD_HWCR, hwcr); 262 } 263 } 264 265 void 266 authamd_bankstatus_postwrite(cmi_hdl_t hdl, authamd_data_t *authamd) 267 { 268 uint64_t hwcr = authamd->amd_hwcr; 269 270 if (!(hwcr & AMD_HWCR_MCI_STATUS_WREN)) { 271 hwcr &= ~AMD_HWCR_MCI_STATUS_WREN; 272 (void) cmi_hdl_wrmsr(hdl, MSR_AMD_HWCR, hwcr); 273 } 274 } 275 276 /* 277 * Read EccCnt repeatedly for all possible channel/chip-select combos: 278 * 279 * - read sparectl register 280 * - if EccErrCntWrEn is set, clear that bit in the just-read value 281 * and write it back to sparectl; this *may* clobber the EccCnt 282 * for the channel/chip-select combination currently selected, so 283 * we leave this bit clear if we had to clear it 284 * - cycle through all channel/chip-select combinations writing each 285 * combination to sparectl before reading the register back for 286 * EccCnt for that combination; since EccErrCntWrEn is clear 287 * the writes to select what count to read will not themselves 288 * zero any counts 289 */ 290 static int 291 authamd_read_ecccnt(authamd_data_t *authamd, struct authamd_logout *msl) 292 { 293 union mcreg_sparectl sparectl; 294 uint_t chipid = authamd->amd_shared->acs_chipid; 295 uint_t family = authamd->amd_shared->acs_family; 296 uint32_t rev = authamd->amd_shared->acs_rev; 297 int chan, cs; 298 299 /* 300 * Check for feature support; this macro will test down to the 301 * family revision number, whereafter we'll switch on family 302 * assuming that future revisions will use the same register 303 * format. 304 */ 305 if (!AUTHAMD_HAS_ONLINESPARECTL(rev)) { 306 bzero(&msl->aal_eccerrcnt, sizeof (msl->aal_eccerrcnt)); 307 return (0); 308 } 309 310 MCREG_VAL32(&sparectl) = 311 authamd_pcicfg_read(chipid, MC_FUNC_MISCCTL, MC_CTL_REG_SPARECTL); 312 313 switch (family) { 314 case AUTHAMD_FAMILY_F: 315 MCREG_FIELD_F_revFG(&sparectl, EccErrCntWrEn) = 0; 316 break; 317 318 case AUTHAMD_FAMILY_10: 319 MCREG_FIELD_10_revAB(&sparectl, EccErrCntWrEn) = 0; 320 break; 321 } 322 323 for (chan = 0; chan < AUTHAMD_DRAM_NCHANNEL; chan++) { 324 switch (family) { 325 case AUTHAMD_FAMILY_F: 326 MCREG_FIELD_F_revFG(&sparectl, EccErrCntDramChan) = 327 chan; 328 break; 329 330 case AUTHAMD_FAMILY_10: 331 MCREG_FIELD_10_revAB(&sparectl, EccErrCntDramChan) = 332 chan; 333 break; 334 } 335 336 for (cs = 0; cs < AUTHAMD_DRAM_NCS; cs++) { 337 switch (family) { 338 case AUTHAMD_FAMILY_F: 339 MCREG_FIELD_F_revFG(&sparectl, 340 EccErrCntDramCs) = cs; 341 break; 342 343 case AUTHAMD_FAMILY_10: 344 MCREG_FIELD_10_revAB(&sparectl, 345 EccErrCntDramCs) = cs; 346 break; 347 } 348 349 authamd_pcicfg_write(chipid, MC_FUNC_MISCCTL, 350 MC_CTL_REG_SPARECTL, MCREG_VAL32(&sparectl)); 351 352 MCREG_VAL32(&sparectl) = authamd_pcicfg_read(chipid, 353 MC_FUNC_MISCCTL, MC_CTL_REG_SPARECTL); 354 355 switch (family) { 356 case AUTHAMD_FAMILY_F: 357 msl->aal_eccerrcnt[chan][cs] = 358 MCREG_FIELD_F_revFG(&sparectl, EccErrCnt); 359 break; 360 case AUTHAMD_FAMILY_10: 361 msl->aal_eccerrcnt[chan][cs] = 362 MCREG_FIELD_10_revAB(&sparectl, EccErrCnt); 363 break; 364 } 365 } 366 } 367 368 return (1); 369 } 370 371 /* 372 * Clear EccCnt for all possible channel/chip-select combos: 373 * 374 * - set EccErrCntWrEn in sparectl, if necessary 375 * - write 0 to EccCnt for all channel/chip-select combinations 376 * - clear EccErrCntWrEn 377 * 378 * If requested also disable the interrupts taken on counter overflow 379 * and on swap done. 380 */ 381 static void 382 authamd_clear_ecccnt(authamd_data_t *authamd, boolean_t clrint) 383 { 384 union mcreg_sparectl sparectl; 385 uint_t chipid = authamd->amd_shared->acs_chipid; 386 uint_t family = authamd->amd_shared->acs_family; 387 uint32_t rev = authamd->amd_shared->acs_rev; 388 int chan, cs; 389 390 if (!AUTHAMD_HAS_ONLINESPARECTL(rev)) 391 return; 392 393 MCREG_VAL32(&sparectl) = 394 authamd_pcicfg_read(chipid, MC_FUNC_MISCCTL, MC_CTL_REG_SPARECTL); 395 396 switch (family) { 397 case AUTHAMD_FAMILY_F: 398 MCREG_FIELD_F_revFG(&sparectl, EccErrCntWrEn) = 1; 399 if (clrint) { 400 MCREG_FIELD_F_revFG(&sparectl, EccErrInt) = 0; 401 MCREG_FIELD_F_revFG(&sparectl, SwapDoneInt) = 0; 402 } 403 break; 404 405 case AUTHAMD_FAMILY_10: 406 MCREG_FIELD_10_revAB(&sparectl, EccErrCntWrEn) = 1; 407 if (clrint) { 408 MCREG_FIELD_10_revAB(&sparectl, EccErrInt) = 0; 409 MCREG_FIELD_10_revAB(&sparectl, SwapDoneInt) = 0; 410 } 411 break; 412 } 413 414 authamd_pcicfg_write(chipid, MC_FUNC_MISCCTL, 415 MC_CTL_REG_SPARECTL, MCREG_VAL32(&sparectl)); 416 417 for (chan = 0; chan < AUTHAMD_DRAM_NCHANNEL; chan++) { 418 switch (family) { 419 case AUTHAMD_FAMILY_F: 420 MCREG_FIELD_F_revFG(&sparectl, EccErrCntDramChan) = 421 chan; 422 break; 423 424 case AUTHAMD_FAMILY_10: 425 MCREG_FIELD_10_revAB(&sparectl, EccErrCntDramChan) = 426 chan; 427 break; 428 } 429 430 for (cs = 0; cs < AUTHAMD_DRAM_NCS; cs++) { 431 switch (family) { 432 case AUTHAMD_FAMILY_F: 433 MCREG_FIELD_F_revFG(&sparectl, 434 EccErrCntDramCs) = cs; 435 MCREG_FIELD_F_revFG(&sparectl, 436 EccErrCnt) = 0; 437 break; 438 439 case AUTHAMD_FAMILY_10: 440 MCREG_FIELD_10_revAB(&sparectl, 441 EccErrCntDramCs) = cs; 442 MCREG_FIELD_10_revAB(&sparectl, 443 EccErrCnt) = 0; 444 break; 445 } 446 447 authamd_pcicfg_write(chipid, MC_FUNC_MISCCTL, 448 MC_CTL_REG_SPARECTL, MCREG_VAL32(&sparectl)); 449 } 450 } 451 } 452 453 /* 454 * cms_init entry point. 455 * 456 * This module provides broad model-specific support for AMD families 457 * 0x6, 0xf and 0x10. Future families will have to be evaluated once their 458 * documentation is available. 459 */ 460 int 461 authamd_init(cmi_hdl_t hdl, void **datap) 462 { 463 uint_t chipid = cmi_hdl_chipid(hdl); 464 struct authamd_chipshared *sp, *osp; 465 uint_t family = cmi_hdl_family(hdl); 466 authamd_data_t *authamd; 467 uint64_t cap; 468 469 if (authamd_ms_support_disable || !AUTHAMD_SUPPORTED(family)) 470 return (ENOTSUP); 471 472 if (!(x86_feature & X86_MCA)) 473 return (ENOTSUP); 474 475 if (cmi_hdl_rdmsr(hdl, IA32_MSR_MCG_CAP, &cap) != CMI_SUCCESS) 476 return (ENOTSUP); 477 478 if (!(cap & MCG_CAP_CTL_P)) 479 return (ENOTSUP); 480 481 authamd = *datap = kmem_zalloc(sizeof (authamd_data_t), KM_SLEEP); 482 cmi_hdl_hold(hdl); /* release in fini */ 483 authamd->amd_hdl = hdl; 484 485 if ((sp = authamd_shared[chipid]) == NULL) { 486 sp = kmem_zalloc(sizeof (struct authamd_chipshared), KM_SLEEP); 487 osp = atomic_cas_ptr(&authamd_shared[chipid], NULL, sp); 488 if (osp != NULL) { 489 kmem_free(sp, sizeof (struct authamd_chipshared)); 490 sp = osp; 491 } else { 492 sp->acs_chipid = chipid; 493 sp->acs_family = family; 494 sp->acs_rev = cmi_hdl_chiprev(hdl); 495 } 496 } 497 authamd->amd_shared = sp; 498 499 return (0); 500 } 501 502 /* 503 * cms_logout_size entry point. 504 */ 505 /*ARGSUSED*/ 506 size_t 507 authamd_logout_size(cmi_hdl_t hdl) 508 { 509 return (sizeof (struct authamd_logout)); 510 } 511 512 /* 513 * cms_mcgctl_val entry point 514 * 515 * Instead of setting all bits to 1 we can set just those for the 516 * error detector banks known to exist. 517 */ 518 /*ARGSUSED*/ 519 uint64_t 520 authamd_mcgctl_val(cmi_hdl_t hdl, int nbanks, uint64_t proposed) 521 { 522 return (nbanks < 64 ? (1ULL << nbanks) - 1 : proposed); 523 } 524 525 /* 526 * cms_bankctl_skipinit entry point 527 * 528 * On K6 we do not initialize MC0_CTL since, reportedly, this bank (for DC) 529 * may produce spurious machine checks. 530 * 531 * Only allow a single core to setup the NorthBridge MCi_CTL register. 532 */ 533 /*ARGSUSED*/ 534 boolean_t 535 authamd_bankctl_skipinit(cmi_hdl_t hdl, int bank) 536 { 537 authamd_data_t *authamd = cms_hdl_getcmsdata(hdl); 538 uint32_t rev = authamd->amd_shared->acs_rev; 539 540 if (authamd->amd_shared->acs_family == AUTHAMD_FAMILY_6) 541 return (bank == 0 ? B_TRUE : B_FALSE); 542 543 if (AUTHAMD_NBONCHIP(rev) && bank == AMD_MCA_BANK_NB) { 544 return (authamd_chip_once(authamd, AUTHAMD_CFGONCE_NBMCA) == 545 B_TRUE ? B_FALSE : B_TRUE); 546 } 547 548 return (B_FALSE); 549 } 550 551 /* 552 * cms_bankctl_val entry point 553 */ 554 uint64_t 555 authamd_bankctl_val(cmi_hdl_t hdl, int bank, uint64_t proposed) 556 { 557 authamd_data_t *authamd = cms_hdl_getcmsdata(hdl); 558 uint32_t rev = authamd->amd_shared->acs_rev; 559 uint64_t val = proposed; 560 561 /* 562 * The Intel MCA says we can write all 1's to enable #MC for 563 * all errors, and AMD docs say much the same. But, depending 564 * perhaps on other config registers, taking machine checks 565 * for some errors such as GART TLB errors and master/target 566 * aborts may be bad - they set UC and sometime also PCC, but 567 * we should not always panic for these error types. 568 * 569 * Our cms_error_action entry point can suppress such panics, 570 * however we can also use the cms_bankctl_val entry point to 571 * veto enabling of some of the known villains in the first place. 572 */ 573 if (bank == AMD_MCA_BANK_NB && AUTHAMD_NOGARTTBLWLK_MC(rev)) 574 val &= ~AMD_NB_EN_GARTTBLWK; 575 576 return (val); 577 } 578 579 /* 580 * Bits to add to NB MCA config (after watchdog config). 581 */ 582 uint32_t authamd_nb_mcacfg_add = AMD_NB_CFG_ADD_CMN; 583 584 /* 585 * Bits to remove from NB MCA config (after watchdog config) 586 */ 587 uint32_t authamd_nb_mcacfg_remove = AMD_NB_CFG_REMOVE_CMN; 588 589 /* 590 * NB Watchdog policy, and rate we use if enabling. 591 */ 592 enum { 593 AUTHAMD_NB_WDOG_LEAVEALONE, 594 AUTHAMD_NB_WDOG_DISABLE, 595 AUTHAMD_NB_WDOG_ENABLE_IF_DISABLED, 596 AUTHAMD_NB_WDOG_ENABLE_FORCE_RATE 597 } authamd_nb_watchdog_policy = AUTHAMD_NB_WDOG_ENABLE_IF_DISABLED; 598 599 uint32_t authamd_nb_mcacfg_wdog = AMD_NB_CFG_WDOGTMRCNTSEL_4095 | 600 AMD_NB_CFG_WDOGTMRBASESEL_1MS; 601 602 /* 603 * Per-core cache scrubbing policy and rates. 604 */ 605 enum { 606 AUTHAMD_SCRUB_BIOSDEFAULT, /* leave as BIOS configured */ 607 AUTHAMD_SCRUB_FIXED, /* assign our chosen rate */ 608 AUTHAMD_SCRUB_MAX /* use higher of ours and BIOS rate */ 609 } authamd_scrub_policy = AUTHAMD_SCRUB_MAX; 610 611 uint32_t authamd_scrub_rate_dcache = 0xf; /* 64K per 0.67 seconds */ 612 uint32_t authamd_scrub_rate_l2cache = 0xe; /* 1MB per 5.3 seconds */ 613 uint32_t authamd_scrub_rate_l3cache = 0xd; /* 1MB per 2.7 seconds */ 614 615 static uint32_t 616 authamd_scrubrate(uint32_t osrate, uint32_t biosrate, const char *varnm) 617 { 618 uint32_t rate; 619 620 if (osrate > AMD_NB_SCRUBCTL_RATE_MAX) { 621 cmn_err(CE_WARN, "%s is too large, resetting to 0x%x\n", 622 varnm, AMD_NB_SCRUBCTL_RATE_MAX); 623 osrate = AMD_NB_SCRUBCTL_RATE_MAX; 624 } 625 626 switch (authamd_scrub_policy) { 627 case AUTHAMD_SCRUB_FIXED: 628 rate = osrate; 629 break; 630 631 default: 632 cmn_err(CE_WARN, "Unknown authamd_scrub_policy %d - " 633 "using default policy of AUTHAMD_SCRUB_MAX", 634 authamd_scrub_policy); 635 /*FALLTHRU*/ 636 637 case AUTHAMD_SCRUB_MAX: 638 if (osrate != 0 && biosrate != 0) 639 rate = MIN(osrate, biosrate); /* small is fast */ 640 else 641 rate = osrate ? osrate : biosrate; 642 } 643 644 return (rate); 645 } 646 647 /* 648 * cms_mca_init entry point. 649 */ 650 /*ARGSUSED*/ 651 void 652 authamd_mca_init(cmi_hdl_t hdl, int nbanks) 653 { 654 authamd_data_t *authamd = cms_hdl_getcmsdata(hdl); 655 uint32_t rev = authamd->amd_shared->acs_rev; 656 uint_t chipid = authamd->amd_shared->acs_chipid; 657 658 /* 659 * On chips with a NB online spare control register take control 660 * and clear ECC counts. 661 */ 662 if (AUTHAMD_HAS_ONLINESPARECTL(rev) && 663 authamd_chip_once(authamd, AUTHAMD_CFGONCE_ONLNSPRCFG)) { 664 authamd_clear_ecccnt(authamd, B_TRUE); 665 } 666 667 /* 668 * And since we are claiming the telemetry stop the BIOS receiving 669 * an SMI on NB threshold overflow. 670 */ 671 if (AUTHAMD_NBMISC_NUM(rev) && 672 authamd_chip_once(authamd, AUTHAMD_CFGONCE_NBTHRESH)) { 673 union mcmsr_nbmisc nbm; 674 int i; 675 676 authamd_bankstatus_prewrite(hdl, authamd); 677 678 for (i = 0; i < AUTHAMD_NBMISC_NUM(rev); i++) { 679 if (cmi_hdl_rdmsr(hdl, MC_MSR_NB_MISC(i), 680 (uint64_t *)&nbm) != CMI_SUCCESS) 681 continue; 682 683 if (X86_CHIPREV_ATLEAST(rev, X86_CHIPREV_AMD_F_REV_F) && 684 MCMSR_FIELD_F_revFG(&nbm, mcmisc_Valid) && 685 MCMSR_FIELD_F_revFG(&nbm, mcmisc_CntP)) { 686 MCMSR_FIELD_F_revFG(&nbm, mcmisc_IntType) = 0; 687 } else if (X86_CHIPREV_ATLEAST(rev, 688 X86_CHIPREV_AMD_10_REV_A) && 689 MCMSR_FIELD_10_revAB(&nbm, mcmisc_Valid) && 690 MCMSR_FIELD_10_revAB(&nbm, mcmisc_CntP)) { 691 MCMSR_FIELD_10_revAB(&nbm, mcmisc_IntType) = 0; 692 } 693 694 (void) cmi_hdl_wrmsr(hdl, MC_MSR_NB_MISC(i), 695 MCMSR_VAL(&nbm)); 696 } 697 698 authamd_bankstatus_postwrite(hdl, authamd); 699 } 700 701 /* 702 * NB MCA Configuration Register. 703 */ 704 if (AUTHAMD_DO_NBMCACFG(rev) && 705 authamd_chip_once(authamd, AUTHAMD_CFGONCE_NBMCACFG)) { 706 uint32_t val = authamd_pcicfg_read(chipid, MC_FUNC_MISCCTL, 707 MC_CTL_REG_NBCFG); 708 709 switch (authamd_nb_watchdog_policy) { 710 case AUTHAMD_NB_WDOG_LEAVEALONE: 711 break; 712 713 case AUTHAMD_NB_WDOG_DISABLE: 714 val &= ~(AMD_NB_CFG_WDOGTMRBASESEL_MASK | 715 AMD_NB_CFG_WDOGTMRCNTSEL_MASK); 716 val |= AMD_NB_CFG_WDOGTMRDIS; 717 break; 718 719 default: 720 cmn_err(CE_NOTE, "authamd_nb_watchdog_policy=%d " 721 "unrecognised, using default policy", 722 authamd_nb_watchdog_policy); 723 /*FALLTHRU*/ 724 725 case AUTHAMD_NB_WDOG_ENABLE_IF_DISABLED: 726 if (!(val & AMD_NB_CFG_WDOGTMRDIS)) 727 break; /* if enabled leave rate intact */ 728 /*FALLTHRU*/ 729 730 case AUTHAMD_NB_WDOG_ENABLE_FORCE_RATE: 731 val &= ~(AMD_NB_CFG_WDOGTMRBASESEL_MASK | 732 AMD_NB_CFG_WDOGTMRCNTSEL_MASK | 733 AMD_NB_CFG_WDOGTMRDIS); 734 val |= authamd_nb_mcacfg_wdog; 735 break; 736 } 737 738 /* 739 * Bit 0 of the NB MCA Config register is reserved on family 740 * 0x10. 741 */ 742 if (X86_CHIPREV_ATLEAST(rev, X86_CHIPREV_AMD_10_REV_A)) 743 authamd_nb_mcacfg_add &= ~AMD_NB_CFG_CPUECCERREN; 744 745 val &= ~authamd_nb_mcacfg_remove; 746 val |= authamd_nb_mcacfg_add; 747 748 authamd_pcicfg_write(chipid, MC_FUNC_MISCCTL, MC_CTL_REG_NBCFG, 749 val); 750 } 751 752 /* 753 * Cache scrubbing. We can't enable DRAM scrubbing since 754 * we don't know the DRAM base for this node. 755 */ 756 if (AUTHAMD_HAS_CHIPSCRUB(rev) && 757 authamd_scrub_policy != AUTHAMD_SCRUB_BIOSDEFAULT && 758 authamd_chip_once(authamd, AUTHAMD_CFGONCE_CACHESCRUB)) { 759 uint32_t val = authamd_pcicfg_read(chipid, MC_FUNC_MISCCTL, 760 MC_CTL_REG_SCRUBCTL); 761 int l3cap = 0; 762 763 if (AUTHAMD_L3CAPABLE(rev)) { 764 l3cap = (authamd_pcicfg_read(chipid, MC_FUNC_MISCCTL, 765 MC_CTL_REG_NBCAP) & MC_NBCAP_L3CAPABLE) != 0; 766 } 767 768 authamd_scrub_rate_dcache = 769 authamd_scrubrate(authamd_scrub_rate_dcache, 770 (val & AMD_NB_SCRUBCTL_DC_MASK) >> AMD_NB_SCRUBCTL_DC_SHIFT, 771 "authamd_scrub_rate_dcache"); 772 773 authamd_scrub_rate_l2cache = 774 authamd_scrubrate(authamd_scrub_rate_l2cache, 775 (val & AMD_NB_SCRUBCTL_L2_MASK) >> AMD_NB_SCRUBCTL_L2_SHIFT, 776 "authamd_scrub_rate_l2cache"); 777 778 authamd_scrub_rate_l3cache = l3cap ? 779 authamd_scrubrate(authamd_scrub_rate_l3cache, 780 (val & AMD_NB_SCRUBCTL_L3_MASK) >> AMD_NB_SCRUBCTL_L3_SHIFT, 781 "authamd_scrub_rate_l3cache") : 0; 782 783 val = AMD_NB_MKSCRUBCTL(authamd_scrub_rate_l3cache, 784 authamd_scrub_rate_dcache, authamd_scrub_rate_l2cache, 785 val & AMD_NB_SCRUBCTL_DRAM_MASK); 786 787 authamd_pcicfg_write(chipid, MC_FUNC_MISCCTL, 788 MC_CTL_REG_SCRUBCTL, val); 789 } 790 791 } 792 793 /* 794 * cms_poll_ownermask entry point. 795 */ 796 uint64_t 797 authamd_poll_ownermask(cmi_hdl_t hdl, hrtime_t pintvl) 798 { 799 authamd_data_t *authamd = cms_hdl_getcmsdata(hdl); 800 struct authamd_chipshared *acsp = authamd->amd_shared; 801 hrtime_t now = gethrtime_waitfree(); 802 hrtime_t last = acsp->acs_poll_timestamp; 803 int dopoll = 0; 804 805 if (now - last > 2 * pintvl || last == 0) { 806 acsp->acs_pollowner = hdl; 807 dopoll = 1; 808 } else if (acsp->acs_pollowner == hdl) { 809 dopoll = 1; 810 } 811 812 if (dopoll) 813 acsp->acs_poll_timestamp = now; 814 815 return (dopoll ? -1ULL : ~(1 << AMD_MCA_BANK_NB)); 816 817 } 818 819 /* 820 * cms_bank_logout entry point. 821 */ 822 /*ARGSUSED*/ 823 void 824 authamd_bank_logout(cmi_hdl_t hdl, int bank, uint64_t status, 825 uint64_t addr, uint64_t misc, void *mslogout) 826 { 827 authamd_data_t *authamd = cms_hdl_getcmsdata(hdl); 828 struct authamd_logout *msl = mslogout; 829 uint32_t rev = authamd->amd_shared->acs_rev; 830 831 if (msl == NULL) 832 return; 833 834 /* 835 * For main memory ECC errors on revisions with an Online Spare 836 * Control Register grab the ECC counts by channel and chip-select 837 * and reset them to 0. 838 */ 839 if (AUTHAMD_MEMECC_RECOGNISED(rev) && 840 AUTHAMD_IS_MEMECCERR(bank, status) && 841 AUTHAMD_HAS_ONLINESPARECTL(rev)) { 842 if (authamd_read_ecccnt(authamd, msl)) 843 authamd_clear_ecccnt(authamd, B_FALSE); 844 } 845 } 846 847 /* 848 * cms_error_action entry point 849 */ 850 851 int authamd_forgive_uc = 0; /* For test/debug only */ 852 int authamd_forgive_pcc = 0; /* For test/debug only */ 853 int authamd_fake_poison = 0; /* For test/debug only */ 854 855 /*ARGSUSED*/ 856 uint32_t 857 authamd_error_action(cmi_hdl_t hdl, int ismc, int bank, 858 uint64_t status, uint64_t addr, uint64_t misc, void *mslogout) 859 { 860 authamd_error_disp_t *disp; 861 uint32_t rv = 0; 862 863 if (authamd_forgive_uc) 864 rv |= CMS_ERRSCOPE_CLEARED_UC; 865 866 if (authamd_forgive_pcc) 867 rv |= CMS_ERRSCOPE_CURCONTEXT_OK; 868 869 if (authamd_fake_poison && status & MSR_MC_STATUS_UC) 870 rv |= CMS_ERRSCOPE_POISONED; 871 872 if (rv) 873 return (rv); 874 875 disp = authamd_disp_match(hdl, bank, status, addr, misc, mslogout); 876 877 if (disp == &authamd_gart_disp) { 878 /* 879 * GART walk errors set UC and possibly PCC (if source CPU) 880 * but should not be regarded as terminal. 881 */ 882 return (CMS_ERRSCOPE_IGNORE_ERR); 883 } 884 885 /* 886 * May also want to consider master abort and target abort. These 887 * also set UC and PCC (if src CPU) but the requester gets -1 888 * and I believe the IO stuff in Solaris will handle that. 889 */ 890 891 return (rv); 892 } 893 894 /* 895 * cms_disp_match entry point 896 */ 897 /*ARGSUSED*/ 898 cms_cookie_t 899 authamd_disp_match(cmi_hdl_t hdl, int bank, uint64_t status, 900 uint64_t addr, uint64_t misc, void *mslogout) 901 { 902 authamd_data_t *authamd = cms_hdl_getcmsdata(hdl); 903 /* uint16_t errcode = MCAX86_ERRCODE(status); */ 904 uint16_t exterrcode = AMD_EXT_ERRCODE(status); 905 uint32_t rev = authamd->amd_shared->acs_rev; 906 907 /* 908 * Recognise main memory ECC errors 909 */ 910 if (AUTHAMD_MEMECC_RECOGNISED(rev) && 911 AUTHAMD_IS_MEMECCERR(bank, status)) { 912 if (status & AMD_BANK_STAT_CECC) { 913 return (exterrcode == 0 ? &authamd_memce_disp : 914 &authamd_ckmemce_disp); 915 } else if (status & AMD_BANK_STAT_UECC) { 916 return (exterrcode == 0 ? &authamd_memue_disp : 917 &authamd_ckmemue_disp); 918 } 919 } 920 921 /* 922 * Recognise GART walk errors 923 */ 924 if (AUTHAMD_NOGARTTBLWLK_MC(rev) && AUTHAMD_IS_GARTERR(bank, status)) 925 return (&authamd_gart_disp); 926 927 return (NULL); 928 } 929 930 /* 931 * cms_ereport_class entry point 932 */ 933 /*ARGSUSED*/ 934 void 935 authamd_ereport_class(cmi_hdl_t hdl, cms_cookie_t mscookie, 936 const char **cpuclsp, const char **leafclsp) 937 { 938 const authamd_error_disp_t *aed = mscookie; 939 940 if (aed == NULL) 941 return; 942 943 if (aed->aad_subclass != NULL) 944 *cpuclsp = aed->aad_subclass; 945 if (aed->aad_leafclass != NULL) 946 *leafclsp = aed->aad_leafclass; 947 } 948 949 /*ARGSUSED*/ 950 static void 951 authamd_ereport_add_resource(cmi_hdl_t hdl, authamd_data_t *authamd, 952 nvlist_t *ereport, nv_alloc_t *nva, void *mslogout) 953 { 954 nvlist_t *elems[AUTHAMD_DRAM_NCHANNEL * AUTHAMD_DRAM_NCS]; 955 uint8_t counts[AUTHAMD_DRAM_NCHANNEL * AUTHAMD_DRAM_NCS]; 956 authamd_logout_t *msl; 957 nvlist_t *nvl; 958 int nelems = 0; 959 int i, chan, cs; 960 961 if ((msl = mslogout) == NULL) 962 return; 963 964 for (chan = 0; chan < AUTHAMD_DRAM_NCHANNEL; chan++) { 965 for (cs = 0; cs < AUTHAMD_DRAM_NCS; cs++) { 966 if (msl->aal_eccerrcnt[chan][cs] == 0) 967 continue; 968 969 if ((nvl = fm_nvlist_create(nva)) == NULL) 970 continue; 971 972 elems[nelems] = nvl; 973 counts[nelems++] = msl->aal_eccerrcnt[chan][cs]; 974 975 fm_fmri_hc_set(nvl, FM_HC_SCHEME_VERSION, NULL, NULL, 5, 976 "motherboard", 0, 977 "chip", authamd->amd_shared->acs_chipid, 978 "memory-controller", 0, 979 "dram-channel", chan, 980 "chip-select", cs); 981 } 982 } 983 984 if (nelems == 0) 985 return; 986 987 fm_payload_set(ereport, FM_EREPORT_GENAMD_PAYLOAD_NAME_RESOURCE, 988 DATA_TYPE_NVLIST_ARRAY, nelems, elems, 989 NULL); 990 991 fm_payload_set(ereport, FM_EREPORT_GENAMD_PAYLOAD_NAME_RESOURCECNT, 992 DATA_TYPE_UINT8_ARRAY, nelems, &counts[0], 993 NULL); 994 995 for (i = 0; i < nelems; i++) 996 fm_nvlist_destroy(elems[i], nva ? FM_NVA_RETAIN : FM_NVA_FREE); 997 } 998 999 /* 1000 * cms_ereport_add_logout entry point 1001 */ 1002 /*ARGSUSED*/ 1003 void 1004 authamd_ereport_add_logout(cmi_hdl_t hdl, nvlist_t *ereport, nv_alloc_t *nva, 1005 int bank, uint64_t status, uint64_t addr, uint64_t misc, 1006 void *mslogout, cms_cookie_t mscookie) 1007 { 1008 authamd_data_t *authamd = cms_hdl_getcmsdata(hdl); 1009 const authamd_error_disp_t *aed = mscookie; 1010 uint64_t members; 1011 1012 if (aed == NULL) 1013 return; 1014 1015 members = aed->aad_ereport_members; 1016 1017 if (members & FM_EREPORT_GENAMD_PAYLOAD_FLAG_SYND) { 1018 fm_payload_set(ereport, FM_EREPORT_GENAMD_PAYLOAD_NAME_SYND, 1019 DATA_TYPE_UINT16, (uint16_t)AMD_BANK_SYND(status), 1020 NULL); 1021 1022 if (members & FM_EREPORT_GENAMD_PAYLOAD_FLAG_SYNDTYPE) { 1023 fm_payload_set(ereport, 1024 FM_EREPORT_GENAMD_PAYLOAD_NAME_SYNDTYPE, 1025 DATA_TYPE_STRING, "E", 1026 NULL); 1027 } 1028 } 1029 1030 if (members & FM_EREPORT_GENAMD_PAYLOAD_FLAG_CKSYND) { 1031 fm_payload_set(ereport, FM_EREPORT_GENAMD_PAYLOAD_NAME_CKSYND, 1032 DATA_TYPE_UINT16, (uint16_t)AMD_NB_STAT_CKSYND(status), 1033 NULL); 1034 1035 if (members & FM_EREPORT_GENAMD_PAYLOAD_FLAG_SYNDTYPE) { 1036 fm_payload_set(ereport, 1037 FM_EREPORT_GENAMD_PAYLOAD_NAME_SYNDTYPE, 1038 DATA_TYPE_STRING, "C", 1039 NULL); 1040 } 1041 } 1042 1043 if (members & FM_EREPORT_GENAMD_PAYLOAD_FLAG_RESOURCE && 1044 status & MSR_MC_STATUS_ADDRV) { 1045 authamd_ereport_add_resource(hdl, authamd, ereport, nva, 1046 mslogout); 1047 } 1048 } 1049 1050 /* 1051 * cms_msrinject entry point 1052 */ 1053 cms_errno_t 1054 authamd_msrinject(cmi_hdl_t hdl, uint_t msr, uint64_t val) 1055 { 1056 authamd_data_t *authamd = cms_hdl_getcmsdata(hdl); 1057 cms_errno_t rv = CMSERR_BADMSRWRITE; 1058 1059 authamd_bankstatus_prewrite(hdl, authamd); 1060 if (cmi_hdl_wrmsr(hdl, msr, val) == CMI_SUCCESS) 1061 rv = CMS_SUCCESS; 1062 authamd_bankstatus_postwrite(hdl, authamd); 1063 1064 return (rv); 1065 } 1066 1067 cms_api_ver_t _cms_api_version = CMS_API_VERSION_0; 1068 1069 const cms_ops_t _cms_ops = { 1070 authamd_init, /* cms_init */ 1071 NULL, /* cms_post_startup */ 1072 NULL, /* cms_post_mpstartup */ 1073 authamd_logout_size, /* cms_logout_size */ 1074 authamd_mcgctl_val, /* cms_mcgctl_val */ 1075 authamd_bankctl_skipinit, /* cms_bankctl_skipinit */ 1076 authamd_bankctl_val, /* cms_bankctl_val */ 1077 NULL, /* cms_bankstatus_skipinit */ 1078 NULL, /* cms_bankstatus_val */ 1079 authamd_mca_init, /* cms_mca_init */ 1080 authamd_poll_ownermask, /* cms_poll_ownermask */ 1081 authamd_bank_logout, /* cms_bank_logout */ 1082 authamd_error_action, /* cms_error_action */ 1083 authamd_disp_match, /* cms_disp_match */ 1084 authamd_ereport_class, /* cms_ereport_class */ 1085 NULL, /* cms_ereport_detector */ 1086 NULL, /* cms_ereport_includestack */ 1087 authamd_ereport_add_logout, /* cms_ereport_add_logout */ 1088 authamd_msrinject, /* cms_msrinject */ 1089 NULL, /* cms_fini */ 1090 }; 1091 1092 static struct modlcpu modlcpu = { 1093 &mod_cpuops, 1094 "Generic AMD model-specific MCA" 1095 }; 1096 1097 static struct modlinkage modlinkage = { 1098 MODREV_1, 1099 (void *)&modlcpu, 1100 NULL 1101 }; 1102 1103 int 1104 _init(void) 1105 { 1106 return (mod_install(&modlinkage)); 1107 } 1108 1109 int 1110 _info(struct modinfo *modinfop) 1111 { 1112 return (mod_info(&modlinkage, modinfop)); 1113 } 1114 1115 int 1116 _fini(void) 1117 { 1118 return (mod_remove(&modlinkage)); 1119 } 1120