1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/types.h> 30 #include <sys/regset.h> 31 #include <sys/privregs.h> 32 #include <sys/pci_impl.h> 33 #include <sys/cpuvar.h> 34 #include <sys/x86_archext.h> 35 #include <sys/cmn_err.h> 36 #include <sys/systm.h> 37 #include <sys/sysmacros.h> 38 #include <sys/pghw.h> 39 #include <sys/cyclic.h> 40 #include <sys/sysevent.h> 41 #include <sys/smbios.h> 42 #include <sys/mca_x86.h> 43 #include <sys/mca_amd.h> 44 #include <sys/mc.h> 45 #include <sys/mc_amd.h> 46 #include <sys/psw.h> 47 #include <sys/ddi.h> 48 #include <sys/sunddi.h> 49 #include <sys/sdt.h> 50 #include <sys/fm/util.h> 51 #include <sys/fm/protocol.h> 52 #include <sys/fm/cpu/AMD.h> 53 #include <sys/acpi/acpi.h> 54 #include <sys/acpi/acpi_pci.h> 55 #include <sys/acpica.h> 56 #include <sys/cpu_module.h> 57 58 #include "ao.h" 59 #include "ao_mca_disp.h" 60 61 #define AO_F_REVS_FG (X86_CHIPREV_AMD_F_REV_F | X86_CHIPREV_AMD_F_REV_G) 62 63 int ao_mca_smi_disable = 1; /* attempt to disable SMI polling */ 64 65 struct ao_ctl_init { 66 uint32_t ctl_revmask; /* rev(s) to which this applies */ 67 uint64_t ctl_bits; /* mca ctl reg bitmask to set */ 68 }; 69 70 /* 71 * Additional NB MCA ctl initialization for revs F and G 72 */ 73 static const struct ao_ctl_init ao_nb_ctl_init[] = { 74 { AO_F_REVS_FG, AMD_NB_CTL_INIT_REV_FG }, 75 { X86_CHIPREV_UNKNOWN, 0 } 76 }; 77 78 typedef struct ao_bank_cfg { 79 uint64_t bank_ctl_init_cmn; /* Common init value */ 80 const struct ao_ctl_init *bank_ctl_init_extra; /* Extra for each rev */ 81 void (*bank_misc_initfunc)(cmi_hdl_t, ao_ms_data_t *, uint32_t); 82 uint_t bank_ctl_mask; 83 } ao_bank_cfg_t; 84 85 static void nb_mcamisc_init(cmi_hdl_t, ao_ms_data_t *, uint32_t); 86 87 static const ao_bank_cfg_t ao_bank_cfgs[] = { 88 { AMD_DC_CTL_INIT_CMN, NULL, NULL, AMD_MSR_DC_MASK }, 89 { AMD_IC_CTL_INIT_CMN, NULL, NULL, AMD_MSR_IC_MASK }, 90 { AMD_BU_CTL_INIT_CMN, NULL, NULL, AMD_MSR_BU_MASK }, 91 { AMD_LS_CTL_INIT_CMN, NULL, NULL, AMD_MSR_LS_MASK }, 92 { AMD_NB_CTL_INIT_CMN, &ao_nb_ctl_init[0], nb_mcamisc_init, 93 AMD_MSR_NB_MASK }, 94 }; 95 96 static int ao_nbanks = sizeof (ao_bank_cfgs) / sizeof (ao_bank_cfgs[0]); 97 98 /* 99 * This is quite awful but necessary to work around x86 system vendor's view of 100 * the world. Other operating systems (you know who you are) don't understand 101 * Opteron-specific error handling, so BIOS and system vendors often hide these 102 * conditions from them by using SMI polling to copy out any errors from the 103 * machine-check registers. When Solaris runs on a system with this feature, 104 * we want to disable the SMI polling so we can use FMA instead. Sadly, there 105 * isn't even a standard self-describing way to express the whole situation, 106 * so we have to resort to hard-coded values. This should all be changed to 107 * be a self-describing vendor-specific SMBIOS structure in the future. 108 */ 109 static const struct ao_smi_disable { 110 const char *asd_sys_vendor; /* SMB_TYPE_SYSTEM vendor prefix */ 111 const char *asd_sys_product; /* SMB_TYPE_SYSTEM product prefix */ 112 const char *asd_bios_vendor; /* SMB_TYPE_BIOS vendor prefix */ 113 uint8_t asd_code; /* output code for SMI disable */ 114 } ao_smi_disable[] = { 115 { "Sun Microsystems", "Galaxy12", 116 "American Megatrends", 0x59 }, 117 { "Sun Microsystems", "Sun Fire X4100 Server", 118 "American Megatrends", 0x59 }, 119 { "Sun Microsystems", "Sun Fire X4200 Server", 120 "American Megatrends", 0x59 }, 121 { NULL, NULL, NULL, 0 } 122 }; 123 124 static int 125 ao_disp_match_r4(uint16_t ref, uint8_t r4) 126 { 127 static const uint16_t ao_r4_map[] = { 128 AO_MCA_R4_BIT_ERR, /* MCAX86_ERRCODE_RRRR_ERR */ 129 AO_MCA_R4_BIT_RD, /* MCAX86_ERRCODE_RRRR_RD */ 130 AO_MCA_R4_BIT_WR, /* MCAX86_ERRCODE_RRRR_WR */ 131 AO_MCA_R4_BIT_DRD, /* MCAX86_ERRCODE_RRRR_DRD */ 132 AO_MCA_R4_BIT_DWR, /* MCAX86_ERRCODE_RRRR_DWR */ 133 AO_MCA_R4_BIT_IRD, /* MCAX86_ERRCODE_RRRR_IRD */ 134 AO_MCA_R4_BIT_PREFETCH, /* MCAX86_ERRCODE_RRRR_PREFETCH */ 135 AO_MCA_R4_BIT_EVICT, /* MCAX86_ERRCODE_RRRR_EVICT */ 136 AO_MCA_R4_BIT_SNOOP /* MCAX86_ERRCODE_RRRR_SNOOP */ 137 }; 138 139 ASSERT(r4 < sizeof (ao_r4_map) / sizeof (uint16_t)); 140 141 return ((ref & ao_r4_map[r4]) != 0); 142 } 143 144 static int 145 ao_disp_match_pp(uint8_t ref, uint8_t pp) 146 { 147 static const uint8_t ao_pp_map[] = { 148 AO_MCA_PP_BIT_SRC, /* MCAX86_ERRCODE_PP_SRC */ 149 AO_MCA_PP_BIT_RES, /* MCAX86_ERRCODE_PP_RES */ 150 AO_MCA_PP_BIT_OBS, /* MCAX86_ERRCODE_PP_OBS */ 151 AO_MCA_PP_BIT_GEN /* MCAX86_ERRCODE_PP_GEN */ 152 }; 153 154 ASSERT(pp < sizeof (ao_pp_map) / sizeof (uint8_t)); 155 156 return ((ref & ao_pp_map[pp]) != 0); 157 } 158 159 static int 160 ao_disp_match_ii(uint8_t ref, uint8_t ii) 161 { 162 static const uint8_t ao_ii_map[] = { 163 AO_MCA_II_BIT_MEM, /* MCAX86_ERRCODE_II_MEM */ 164 0, 165 AO_MCA_II_BIT_IO, /* MCAX86_ERRCODE_II_IO */ 166 AO_MCA_II_BIT_GEN /* MCAX86_ERRCODE_II_GEN */ 167 }; 168 169 ASSERT(ii < sizeof (ao_ii_map) / sizeof (uint8_t)); 170 171 return ((ref & ao_ii_map[ii]) != 0); 172 } 173 174 static uint8_t 175 bit_strip(uint16_t *codep, uint16_t mask, uint16_t shift) 176 { 177 uint8_t val = (*codep & mask) >> shift; 178 *codep &= ~mask; 179 return (val); 180 } 181 182 #define BIT_STRIP(codep, name) \ 183 bit_strip(codep, MCAX86_ERRCODE_##name##_MASK, \ 184 MCAX86_ERRCODE_##name##_SHIFT) 185 186 /*ARGSUSED*/ 187 static int 188 ao_disp_match_one(const ao_error_disp_t *aed, uint64_t status, uint32_t rev, 189 int bankno) 190 { 191 uint16_t code = MCAX86_ERRCODE(status); 192 uint8_t extcode = AMD_EXT_ERRCODE(status); 193 uint64_t stat_mask = aed->aed_stat_mask; 194 uint64_t stat_mask_res = aed->aed_stat_mask_res; 195 196 /* 197 * If the bank's status register indicates overflow, then we can no 198 * longer rely on the value of CECC: our experience with actual fault 199 * injection has shown that multiple CE's overwriting each other shows 200 * AMD_BANK_STAT_CECC and AMD_BANK_STAT_UECC both set to zero. This 201 * should be clarified in a future BKDG or by the Revision Guide. 202 * This behaviour is fixed in revision F. 203 */ 204 if (bankno == AMD_MCA_BANK_NB && 205 !X86_CHIPREV_ATLEAST(rev, X86_CHIPREV_AMD_F_REV_F) && 206 status & MSR_MC_STATUS_OVER) { 207 stat_mask &= ~AMD_BANK_STAT_CECC; 208 stat_mask_res &= ~AMD_BANK_STAT_CECC; 209 } 210 211 if ((status & stat_mask) != stat_mask_res) 212 return (0); 213 214 /* 215 * r4 and pp bits are stored separately, so we mask off and compare them 216 * for the code types that use them. Once we've taken the r4 and pp 217 * bits out of the equation, we can directly compare the resulting code 218 * with the one stored in the ao_error_disp_t. 219 */ 220 if (AMD_ERRCODE_ISMEM(code)) { 221 uint8_t r4 = BIT_STRIP(&code, RRRR); 222 223 if (!ao_disp_match_r4(aed->aed_stat_r4_bits, r4)) 224 return (0); 225 226 } else if (AMD_ERRCODE_ISBUS(code)) { 227 uint8_t r4 = BIT_STRIP(&code, RRRR); 228 uint8_t pp = BIT_STRIP(&code, PP); 229 uint8_t ii = BIT_STRIP(&code, II); 230 231 if (!ao_disp_match_r4(aed->aed_stat_r4_bits, r4) || 232 !ao_disp_match_pp(aed->aed_stat_pp_bits, pp) || 233 !ao_disp_match_ii(aed->aed_stat_ii_bits, ii)) 234 return (0); 235 } 236 237 return (code == aed->aed_stat_code && extcode == aed->aed_stat_extcode); 238 } 239 240 /*ARGSUSED*/ 241 cms_cookie_t 242 ao_ms_disp_match(cmi_hdl_t hdl, int banknum, uint64_t status, 243 uint64_t addr, uint64_t misc, void *mslogout) 244 { 245 ao_ms_data_t *ao = cms_hdl_getcmsdata(hdl); 246 uint32_t rev = ao->ao_ms_shared->aos_chiprev; 247 const ao_error_disp_t *aed; 248 249 for (aed = ao_error_disp[banknum]; aed->aed_stat_mask != 0; aed++) { 250 if (ao_disp_match_one(aed, status, rev, banknum)) 251 return ((cms_cookie_t)aed); 252 } 253 254 return (NULL); 255 } 256 257 /*ARGSUSED*/ 258 void 259 ao_ms_ereport_class(cmi_hdl_t hdl, cms_cookie_t mscookie, 260 const char **cpuclsp, const char **leafclsp) 261 { 262 const ao_error_disp_t *aed = mscookie; 263 264 if (aed != NULL) { 265 *cpuclsp = FM_EREPORT_CPU_AMD; 266 *leafclsp = aed->aed_class; 267 } 268 } 269 270 static int 271 ao_chip_once(ao_ms_data_t *ao, enum ao_cfgonce_bitnum what) 272 { 273 return (atomic_set_long_excl(&ao->ao_ms_shared->aos_cfgonce, 274 what) == 0 ? B_TRUE : B_FALSE); 275 } 276 277 /* 278 * This knob exists in case any platform has a problem with our default 279 * policy of disabling any interrupt registered in the NB MC4_MISC 280 * register. Setting this may cause Solaris and external entities 281 * who also have an interest in this register to argue over available 282 * telemetry (so setting it is generally not recommended). 283 */ 284 int ao_nb_cfg_mc4misc_noseize = 0; 285 286 /* 287 * The BIOS may have setup to receive SMI on counter overflow. It may also 288 * have locked various fields or made them read-only. We will clear any 289 * SMI request and leave the register locked. We will also clear the 290 * counter and enable counting - while we don't use the counter it is nice 291 * to have it enabled for verification and debug work. 292 */ 293 static void 294 nb_mcamisc_init(cmi_hdl_t hdl, ao_ms_data_t *ao, uint32_t rev) 295 { 296 uint64_t val, nval; 297 298 if (!X86_CHIPREV_MATCH(rev, AO_F_REVS_FG)) 299 return; 300 301 if (cmi_hdl_rdmsr(hdl, AMD_MSR_NB_MISC, &val) != CMI_SUCCESS) 302 return; 303 304 ao->ao_ms_shared->aos_bcfg_nb_misc = val; 305 306 if (ao_nb_cfg_mc4misc_noseize) 307 return; /* stash BIOS value, but no changes */ 308 309 310 /* 311 * The Valid bit tells us whether the CtrP bit is defined; if it 312 * is the CtrP bit tells us whether an ErrCount field is present. 313 * If not then there is nothing for us to do. 314 */ 315 if (!(val & AMD_NB_MISC_VALID) || !(val & AMD_NB_MISC_CTRP)) 316 return; 317 318 319 nval = val; 320 nval |= AMD_NB_MISC_CNTEN; /* enable ECC error counting */ 321 nval &= ~AMD_NB_MISC_ERRCOUNT_MASK; /* clear ErrCount */ 322 nval &= ~AMD_NB_MISC_OVRFLW; /* clear Ovrflw */ 323 nval &= ~AMD_NB_MISC_INTTYPE_MASK; /* no interrupt on overflow */ 324 nval |= AMD_NB_MISC_LOCKED; 325 326 if (nval != val) { 327 uint64_t locked = val & AMD_NB_MISC_LOCKED; 328 329 if (locked) 330 ao_bankstatus_prewrite(hdl, ao); 331 332 (void) cmi_hdl_wrmsr(hdl, AMD_MSR_NB_MISC, nval); 333 334 if (locked) 335 ao_bankstatus_postwrite(hdl, ao); 336 } 337 } 338 339 /* 340 * NorthBridge (NB) MCA Configuration. 341 * 342 * We add and remove bits from the BIOS-configured value, rather than 343 * writing an absolute value. The variables ao_nb_cfg_{add,remove}_cmn and 344 * ap_nb_cfg_{add,remove}_revFG are available for modification via kmdb 345 * and /etc/system. The revision-specific adds and removes are applied 346 * after the common changes, and one write is made to the config register. 347 * These are not intended for watchdog configuration via these variables - 348 * use the watchdog policy below. 349 */ 350 351 /* 352 * Bits to be added to the NB configuration register - all revs. 353 */ 354 uint32_t ao_nb_cfg_add_cmn = AMD_NB_CFG_ADD_CMN; 355 356 /* 357 * Bits to be cleared from the NB configuration register - all revs. 358 */ 359 uint32_t ao_nb_cfg_remove_cmn = AMD_NB_CFG_REMOVE_CMN; 360 361 /* 362 * Bits to be added to the NB configuration register - revs F and G. 363 */ 364 uint32_t ao_nb_cfg_add_revFG = AMD_NB_CFG_ADD_REV_FG; 365 366 /* 367 * Bits to be cleared from the NB configuration register - revs F and G. 368 */ 369 uint32_t ao_nb_cfg_remove_revFG = AMD_NB_CFG_REMOVE_REV_FG; 370 371 struct ao_nb_cfg { 372 uint32_t cfg_revmask; 373 uint32_t *cfg_add_p; 374 uint32_t *cfg_remove_p; 375 }; 376 377 static const struct ao_nb_cfg ao_cfg_extra[] = { 378 { AO_F_REVS_FG, &ao_nb_cfg_add_revFG, &ao_nb_cfg_remove_revFG }, 379 { X86_CHIPREV_UNKNOWN, NULL, NULL } 380 }; 381 382 /* 383 * Bits to be used if we configure the NorthBridge (NB) Watchdog. The watchdog 384 * triggers a machine check exception when no response to an NB system access 385 * occurs within a specified time interval. 386 */ 387 uint32_t ao_nb_cfg_wdog = 388 AMD_NB_CFG_WDOGTMRCNTSEL_4095 | 389 AMD_NB_CFG_WDOGTMRBASESEL_1MS; 390 391 /* 392 * The default watchdog policy is to enable it (at the above rate) if it 393 * is disabled; if it is enabled then we leave it enabled at the rate 394 * chosen by the BIOS. 395 */ 396 enum { 397 AO_NB_WDOG_LEAVEALONE, /* Don't touch watchdog config */ 398 AO_NB_WDOG_DISABLE, /* Always disable watchdog */ 399 AO_NB_WDOG_ENABLE_IF_DISABLED, /* If disabled, enable at our rate */ 400 AO_NB_WDOG_ENABLE_FORCE_RATE /* Enable and set our rate */ 401 } ao_nb_watchdog_policy = AO_NB_WDOG_ENABLE_IF_DISABLED; 402 403 static void 404 ao_nb_cfg(ao_ms_data_t *ao, uint32_t rev) 405 { 406 const struct ao_nb_cfg *nbcp = &ao_cfg_extra[0]; 407 uint_t chipid = pg_plat_hw_instance_id(CPU, PGHW_CHIP); 408 uint32_t val; 409 410 /* 411 * Read the NorthBridge (NB) configuration register in PCI space, 412 * modify the settings accordingly, and store the new value back. 413 * Note that the stashed BIOS config value aos_bcfg_nb_cfg is used 414 * in ereport payload population to determine ECC syndrome type for 415 * memory errors. 416 */ 417 ao->ao_ms_shared->aos_bcfg_nb_cfg = val = 418 ao_pcicfg_read(chipid, MC_FUNC_MISCCTL, MC_CTL_REG_NBCFG); 419 420 switch (ao_nb_watchdog_policy) { 421 case AO_NB_WDOG_LEAVEALONE: 422 break; 423 424 case AO_NB_WDOG_DISABLE: 425 val &= ~AMD_NB_CFG_WDOGTMRBASESEL_MASK; 426 val &= ~AMD_NB_CFG_WDOGTMRCNTSEL_MASK; 427 val |= AMD_NB_CFG_WDOGTMRDIS; 428 break; 429 430 default: 431 cmn_err(CE_NOTE, "ao_nb_watchdog_policy=%d unrecognised, " 432 "using default policy", ao_nb_watchdog_policy); 433 /*FALLTHRU*/ 434 435 case AO_NB_WDOG_ENABLE_IF_DISABLED: 436 if (!(val & AMD_NB_CFG_WDOGTMRDIS)) 437 break; /* if enabled leave rate intact */ 438 /*FALLTHRU*/ 439 440 case AO_NB_WDOG_ENABLE_FORCE_RATE: 441 val &= ~AMD_NB_CFG_WDOGTMRBASESEL_MASK; 442 val &= ~AMD_NB_CFG_WDOGTMRCNTSEL_MASK; 443 val &= ~AMD_NB_CFG_WDOGTMRDIS; 444 val |= ao_nb_cfg_wdog; 445 break; 446 } 447 448 /* 449 * Now apply bit adds and removes, first those common to all revs 450 * and then the revision-specific ones. 451 */ 452 val &= ~ao_nb_cfg_remove_cmn; 453 val |= ao_nb_cfg_add_cmn; 454 455 while (nbcp->cfg_revmask != X86_CHIPREV_UNKNOWN) { 456 if (X86_CHIPREV_MATCH(rev, nbcp->cfg_revmask)) { 457 val &= ~(*nbcp->cfg_remove_p); 458 val |= *nbcp->cfg_add_p; 459 } 460 nbcp++; 461 } 462 463 ao_pcicfg_write(chipid, MC_FUNC_MISCCTL, MC_CTL_REG_NBCFG, val); 464 } 465 466 static void 467 ao_dram_cfg(ao_ms_data_t *ao, uint32_t rev) 468 { 469 uint_t chipid = pg_plat_hw_instance_id(CPU, PGHW_CHIP); 470 union mcreg_dramcfg_lo dcfglo; 471 472 ao->ao_ms_shared->aos_bcfg_dcfg_lo = MCREG_VAL32(&dcfglo) = 473 ao_pcicfg_read(chipid, MC_FUNC_DRAMCTL, MC_DC_REG_DRAMCFGLO); 474 ao->ao_ms_shared->aos_bcfg_dcfg_hi = 475 ao_pcicfg_read(chipid, MC_FUNC_DRAMCTL, MC_DC_REG_DRAMCFGHI); 476 477 #ifdef OPTERON_ERRATUM_172 478 if (X86_CHIPREV_MATCH(rev, AO_F_REVS_FG) && 479 MCREG_FIELD_F_revFG(&dcfglo, ParEn)) { 480 MCREG_FIELD_F_revFG(&dcfglo, ParEn) = 0; 481 ao_pcicfg_write(chipid, MC_FUNC_DRAMCTL, MC_DC_REG_DRAMCFGLO, 482 MCREG_VAL32(&dcfglo)); 483 } 484 #endif 485 } 486 487 /* 488 * This knob exists in case any platform has a problem with our default 489 * policy of disabling any interrupt registered in the online spare 490 * control register. Setting this may cause Solaris and external entities 491 * who also have an interest in this register to argue over available 492 * telemetry (so setting it is generally not recommended). 493 */ 494 int ao_nb_cfg_sparectl_noseize = 0; 495 496 /* 497 * Setup the online spare control register (revs F and G). We disable 498 * any interrupt registered by the BIOS and zero all error counts. 499 */ 500 static void 501 ao_sparectl_cfg(ao_ms_data_t *ao) 502 { 503 uint_t chipid = pg_plat_hw_instance_id(CPU, PGHW_CHIP); 504 union mcreg_sparectl sparectl; 505 int chan, cs; 506 507 ao->ao_ms_shared->aos_bcfg_nb_sparectl = MCREG_VAL32(&sparectl) = 508 ao_pcicfg_read(chipid, MC_FUNC_MISCCTL, MC_CTL_REG_SPARECTL); 509 510 if (ao_nb_cfg_sparectl_noseize) 511 return; /* stash BIOS value, but no changes */ 512 513 /* 514 * If the BIOS has requested SMI interrupt type for ECC count 515 * overflow for a chip-select or channel force those off. 516 */ 517 MCREG_FIELD_F_revFG(&sparectl, EccErrInt) = 0; 518 MCREG_FIELD_F_revFG(&sparectl, SwapDoneInt) = 0; 519 520 /* 521 * Zero EccErrCnt and write this back to all chan/cs combinations. 522 */ 523 MCREG_FIELD_F_revFG(&sparectl, EccErrCntWrEn) = 1; 524 MCREG_FIELD_F_revFG(&sparectl, EccErrCnt) = 0; 525 for (chan = 0; chan < MC_CHIP_NDRAMCHAN; chan++) { 526 MCREG_FIELD_F_revFG(&sparectl, EccErrCntDramChan) = chan; 527 528 for (cs = 0; cs < MC_CHIP_NCS; cs++) { 529 MCREG_FIELD_F_revFG(&sparectl, EccErrCntDramCs) = cs; 530 ao_pcicfg_write(chipid, MC_FUNC_MISCCTL, 531 MC_CTL_REG_SPARECTL, MCREG_VAL32(&sparectl)); 532 } 533 } 534 } 535 536 int ao_forgive_uc = 0; /* For test/debug only */ 537 int ao_forgive_pcc = 0; /* For test/debug only */ 538 int ao_fake_poison = 0; /* For test/debug only */ 539 540 uint32_t 541 ao_ms_error_action(cmi_hdl_t hdl, int ismc, int banknum, 542 uint64_t status, uint64_t addr, uint64_t misc, void *mslogout) 543 { 544 const ao_error_disp_t *aed; 545 uint32_t retval = 0; 546 uint8_t when; 547 int en; 548 549 if (ao_forgive_uc) 550 retval |= CMS_ERRSCOPE_CLEARED_UC; 551 552 if (ao_forgive_pcc) 553 retval |= CMS_ERRSCOPE_CURCONTEXT_OK; 554 555 if (ao_fake_poison && status & MSR_MC_STATUS_UC) 556 retval |= CMS_ERRSCOPE_POISONED; 557 558 if (retval) 559 return (retval); 560 561 aed = ao_ms_disp_match(hdl, banknum, status, addr, misc, mslogout); 562 563 /* 564 * If we do not recognise the error let the cpu module apply 565 * the generic criteria to decide how to react. 566 */ 567 if (aed == NULL) 568 return (0); 569 570 en = (status & MSR_MC_STATUS_EN) != 0; 571 572 if ((when = aed->aed_panic_when) == AO_AED_PANIC_NEVER) 573 retval |= CMS_ERRSCOPE_IGNORE_ERR; 574 575 if ((when & AO_AED_PANIC_ALWAYS) || 576 ((when & AO_AED_PANIC_IFMCE) && (en || ismc))) 577 retval |= CMS_ERRSCOPE_FORCE_FATAL; 578 579 /* 580 * The original AMD implementation would panic on a machine check 581 * (not a poll) if the status overflow bit was set, with an 582 * exception for the case of rev F or later with an NB error 583 * indicating CECC. This came from the perception that the 584 * overflow bit was not correctly managed on rev E and earlier, for 585 * example that repeated correctable memeory errors did not set 586 * OVER but somehow clear CECC. 587 * 588 * We will leave the generic support to evaluate overflow errors 589 * and decide to panic on their individual merits, e.g., if PCC 590 * is set and so on. The AMD docs do say (as Intel does) that 591 * the status information is *all* from the higher-priority 592 * error in the case of an overflow, so it is at least as serious 593 * as the original and we can decide panic etc based on it. 594 */ 595 596 return (retval); 597 } 598 599 /* 600 * Will need to change for family 0x10 601 */ 602 static uint_t 603 ao_ereport_synd(ao_ms_data_t *ao, uint64_t status, uint_t *typep, 604 int is_nb) 605 { 606 if (is_nb) { 607 if (ao->ao_ms_shared->aos_bcfg_nb_cfg & 608 AMD_NB_CFG_CHIPKILLECCEN) { 609 *typep = AMD_SYNDTYPE_CHIPKILL; 610 return (AMD_NB_STAT_CKSYND(status)); 611 } else { 612 *typep = AMD_SYNDTYPE_ECC; 613 return (AMD_BANK_SYND(status)); 614 } 615 } else { 616 *typep = AMD_SYNDTYPE_ECC; 617 return (AMD_BANK_SYND(status)); 618 } 619 } 620 621 static nvlist_t * 622 ao_ereport_create_resource_elem(nv_alloc_t *nva, mc_unum_t *unump, int dimmnum) 623 { 624 nvlist_t *nvl, *snvl; 625 626 if ((nvl = fm_nvlist_create(nva)) == NULL) /* freed by caller */ 627 return (NULL); 628 629 if ((snvl = fm_nvlist_create(nva)) == NULL) { 630 fm_nvlist_destroy(nvl, nva ? FM_NVA_RETAIN : FM_NVA_FREE); 631 return (NULL); 632 } 633 634 (void) nvlist_add_uint64(snvl, FM_FMRI_HC_SPECIFIC_OFFSET, 635 unump->unum_offset); 636 637 fm_fmri_hc_set(nvl, FM_HC_SCHEME_VERSION, NULL, snvl, 5, 638 "motherboard", unump->unum_board, 639 "chip", unump->unum_chip, 640 "memory-controller", unump->unum_mc, 641 "dimm", unump->unum_dimms[dimmnum], 642 "rank", unump->unum_rank); 643 644 fm_nvlist_destroy(snvl, nva ? FM_NVA_RETAIN : FM_NVA_FREE); 645 646 return (nvl); 647 } 648 649 static void 650 ao_ereport_add_resource(nvlist_t *payload, nv_alloc_t *nva, mc_unum_t *unump) 651 { 652 653 nvlist_t *elems[MC_UNUM_NDIMM]; 654 int nelems = 0; 655 int i; 656 657 for (i = 0; i < MC_UNUM_NDIMM; i++) { 658 if (unump->unum_dimms[i] == MC_INVALNUM) 659 break; 660 661 if ((elems[nelems] = ao_ereport_create_resource_elem(nva, 662 unump, i)) == NULL) 663 break; 664 665 nelems++; 666 } 667 668 if (nelems == 0) 669 return; 670 671 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RESOURCE, 672 DATA_TYPE_NVLIST_ARRAY, nelems, elems, NULL); 673 674 for (i = 0; i < nelems; i++) 675 fm_nvlist_destroy(elems[i], nva ? FM_NVA_RETAIN : FM_NVA_FREE); 676 } 677 678 /*ARGSUSED*/ 679 void 680 ao_ms_ereport_add_logout(cmi_hdl_t hdl, nvlist_t *ereport, 681 nv_alloc_t *nva, int banknum, uint64_t status, uint64_t addr, 682 uint64_t misc, void *mslogout, cms_cookie_t mscookie) 683 { 684 ao_ms_data_t *ao = cms_hdl_getcmsdata(hdl); 685 const ao_error_disp_t *aed = mscookie; 686 uint_t synd, syndtype; 687 uint64_t members; 688 689 if (aed == NULL) 690 return; 691 692 members = aed->aed_ereport_members; 693 694 synd = ao_ereport_synd(ao, status, &syndtype, 695 banknum == AMD_MCA_BANK_NB); 696 697 if (members & FM_EREPORT_PAYLOAD_FLAG_SYND) { 698 fm_payload_set(ereport, FM_EREPORT_PAYLOAD_NAME_SYND, 699 DATA_TYPE_UINT16, synd, NULL); 700 } 701 702 if (members & FM_EREPORT_PAYLOAD_FLAG_SYND_TYPE) { 703 fm_payload_set(ereport, FM_EREPORT_PAYLOAD_NAME_SYND_TYPE, 704 DATA_TYPE_STRING, (syndtype == AMD_SYNDTYPE_CHIPKILL ? 705 "C" : "E"), NULL); 706 } 707 708 if (members & FM_EREPORT_PAYLOAD_FLAG_RESOURCE) { 709 mc_unum_t unum; 710 711 if (((aed->aed_flags & AO_AED_FLAGS_ADDRTYPE) == 712 AO_AED_F_PHYSICAL) && (status & MSR_MC_STATUS_ADDRV) && 713 cmi_mc_patounum(addr, aed->aed_addrvalid_hi, 714 aed->aed_addrvalid_lo, synd, syndtype, &unum) == 715 CMI_SUCCESS) 716 ao_ereport_add_resource(ereport, nva, &unum); 717 } 718 } 719 720 /*ARGSUSED*/ 721 boolean_t 722 ao_ms_ereport_includestack(cmi_hdl_t hdl, cms_cookie_t mscookie) 723 { 724 const ao_error_disp_t *aed = mscookie; 725 726 if (aed == NULL) 727 return (0); 728 729 return ((aed->aed_ereport_members & 730 FM_EREPORT_PAYLOAD_FLAG_STACK) != 0); 731 } 732 733 cms_errno_t 734 ao_ms_msrinject(cmi_hdl_t hdl, uint_t msr, uint64_t val) 735 { 736 ao_ms_data_t *ao = cms_hdl_getcmsdata(hdl); 737 cms_errno_t rv = CMSERR_BADMSRWRITE; 738 739 ao_bankstatus_prewrite(hdl, ao); 740 if (cmi_hdl_wrmsr(hdl, msr, val) == CMI_SUCCESS) 741 rv = CMS_SUCCESS; 742 ao_bankstatus_postwrite(hdl, ao); 743 744 return (rv); 745 } 746 747 /*ARGSUSED*/ 748 uint64_t 749 ao_ms_mcgctl_val(cmi_hdl_t hdl, int nbanks, uint64_t def) 750 { 751 return ((1ULL << nbanks) - 1); 752 } 753 754 boolean_t 755 ao_ms_bankctl_skipinit(cmi_hdl_t hdl, int banknum) 756 { 757 ao_ms_data_t *ao = cms_hdl_getcmsdata(hdl); 758 759 if (banknum != AMD_MCA_BANK_NB) 760 return (B_FALSE); 761 762 /* 763 * If we are the first to atomically set the "I'll do it" bit 764 * then return B_FALSE (do not skip), otherwise skip with B_TRUE. 765 */ 766 return (ao_chip_once(ao, AO_CFGONCE_NBMCA) == B_TRUE ? 767 B_FALSE : B_TRUE); 768 } 769 770 uint64_t 771 ao_ms_bankctl_val(cmi_hdl_t hdl, int banknum, uint64_t def) 772 { 773 ao_ms_data_t *ao = cms_hdl_getcmsdata(hdl); 774 const struct ao_ctl_init *extrap; 775 const ao_bank_cfg_t *bankcfg; 776 uint64_t mcictl; 777 uint32_t rev = ao->ao_ms_shared->aos_chiprev; 778 779 if (banknum >= sizeof (ao_bank_cfgs) / sizeof (ao_bank_cfgs[0])) 780 return (def); 781 782 bankcfg = &ao_bank_cfgs[banknum]; 783 extrap = bankcfg->bank_ctl_init_extra; 784 785 mcictl = bankcfg->bank_ctl_init_cmn; 786 787 while (extrap != NULL && extrap->ctl_revmask != X86_CHIPREV_UNKNOWN) { 788 if (X86_CHIPREV_MATCH(rev, extrap->ctl_revmask)) 789 mcictl |= extrap->ctl_bits; 790 extrap++; 791 } 792 793 return (mcictl); 794 } 795 796 void 797 ao_bankstatus_prewrite(cmi_hdl_t hdl, ao_ms_data_t *ao) 798 { 799 uint64_t hwcr; 800 801 if (cmi_hdl_rdmsr(hdl, MSR_AMD_HWCR, &hwcr) != CMI_SUCCESS) 802 return; 803 804 ao->ao_ms_hwcr_val = hwcr; 805 806 if (!(hwcr & AMD_HWCR_MCI_STATUS_WREN)) { 807 hwcr |= AMD_HWCR_MCI_STATUS_WREN; 808 (void) cmi_hdl_wrmsr(hdl, MSR_AMD_HWCR, hwcr); 809 } 810 } 811 812 void 813 ao_bankstatus_postwrite(cmi_hdl_t hdl, ao_ms_data_t *ao) 814 { 815 uint64_t hwcr = ao->ao_ms_hwcr_val; 816 817 if (!(hwcr & AMD_HWCR_MCI_STATUS_WREN)) { 818 hwcr &= ~AMD_HWCR_MCI_STATUS_WREN; 819 (void) cmi_hdl_wrmsr(hdl, MSR_AMD_HWCR, hwcr); 820 } 821 } 822 823 void 824 ao_ms_mca_init(cmi_hdl_t hdl, int nbanks) 825 { 826 ao_ms_data_t *ao = cms_hdl_getcmsdata(hdl); 827 uint32_t rev = ao->ao_ms_shared->aos_chiprev; 828 ao_ms_mca_t *mca = &ao->ao_ms_mca; 829 uint64_t *maskp; 830 int i; 831 832 maskp = mca->ao_mca_bios_cfg.bcfg_bank_mask = kmem_zalloc(nbanks * 833 sizeof (uint64_t), KM_SLEEP); 834 835 /* 836 * Read the bank ctl mask MSRs, but only as many as we know 837 * certainly exist - don't calculate the register address. 838 * Also initialize the MCi_MISC register where required. 839 */ 840 for (i = 0; i < MIN(nbanks, ao_nbanks); i++) { 841 (void) cmi_hdl_rdmsr(hdl, ao_bank_cfgs[i].bank_ctl_mask, 842 maskp++); 843 if (ao_bank_cfgs[i].bank_misc_initfunc != NULL) 844 ao_bank_cfgs[i].bank_misc_initfunc(hdl, ao, rev); 845 846 } 847 848 if (ao_chip_once(ao, AO_CFGONCE_NBCFG) == B_TRUE) { 849 ao_nb_cfg(ao, rev); 850 851 if (X86_CHIPREV_MATCH(rev, AO_F_REVS_FG)) 852 ao_sparectl_cfg(ao); 853 } 854 855 if (ao_chip_once(ao, AO_CFGONCE_DRAMCFG) == B_TRUE) 856 ao_dram_cfg(ao, rev); 857 858 ao_chip_scrubber_enable(hdl, ao); 859 } 860 861 /* 862 * Note that although this cpu module is loaded before the PSMs are 863 * loaded (and hence before acpica is loaded), this function is 864 * called from post_startup(), after PSMs are initialized and acpica 865 * is loaded. 866 */ 867 static int 868 ao_acpi_find_smicmd(int *asd_port) 869 { 870 FADT_DESCRIPTOR *fadt = NULL; 871 872 /* 873 * AcpiGetFirmwareTable works even if ACPI is disabled, so a failure 874 * here means we weren't able to retreive a pointer to the FADT. 875 */ 876 if (AcpiGetFirmwareTable(FADT_SIG, 1, ACPI_LOGICAL_ADDRESSING, 877 (ACPI_TABLE_HEADER **)&fadt) != AE_OK) 878 return (-1); 879 880 ASSERT(fadt != NULL); 881 882 *asd_port = fadt->SmiCmd; 883 return (0); 884 } 885 886 /*ARGSUSED*/ 887 void 888 ao_ms_post_startup(cmi_hdl_t hdl) 889 { 890 const struct ao_smi_disable *asd; 891 id_t id; 892 int rv = -1, asd_port; 893 894 smbios_system_t sy; 895 smbios_bios_t sb; 896 smbios_info_t si; 897 898 /* 899 * Fetch the System and BIOS vendor strings from SMBIOS and see if they 900 * match a value in our table. If so, disable SMI error polling. This 901 * is grotesque and should be replaced by self-describing vendor- 902 * specific SMBIOS data or a specification enhancement instead. 903 */ 904 if (ao_mca_smi_disable && ksmbios != NULL && 905 smbios_info_bios(ksmbios, &sb) != SMB_ERR && 906 (id = smbios_info_system(ksmbios, &sy)) != SMB_ERR && 907 smbios_info_common(ksmbios, id, &si) != SMB_ERR) { 908 909 for (asd = ao_smi_disable; asd->asd_sys_vendor != NULL; asd++) { 910 if (strncmp(asd->asd_sys_vendor, si.smbi_manufacturer, 911 strlen(asd->asd_sys_vendor)) != 0 || 912 strncmp(asd->asd_sys_product, si.smbi_product, 913 strlen(asd->asd_sys_product)) != 0 || 914 strncmp(asd->asd_bios_vendor, sb.smbb_vendor, 915 strlen(asd->asd_bios_vendor)) != 0) 916 continue; 917 918 /* 919 * Look for the SMI_CMD port in the ACPI FADT, 920 * if the port is 0, this platform doesn't support 921 * SMM, so there is no SMI error polling to disable. 922 */ 923 if ((rv = ao_acpi_find_smicmd(&asd_port)) == 0 && 924 asd_port != 0) { 925 cmn_err(CE_CONT, "?SMI polling disabled in " 926 "favor of Solaris Fault Management for " 927 "AMD Processors\n"); 928 929 outb(asd_port, asd->asd_code); 930 931 } else if (rv < 0) { 932 cmn_err(CE_CONT, "?Solaris Fault Management " 933 "for AMD Processors could not disable SMI " 934 "polling because an error occurred while " 935 "trying to determine the SMI command port " 936 "from the ACPI FADT table\n"); 937 } 938 break; 939 } 940 } 941 } 942