xref: /illumos-gate/usr/src/uts/i86pc/cpu/amd_opteron/ao_mca.c (revision 07a48826732249fcd3aa8dd53c8389595e9f1fbc)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/types.h>
28 #include <sys/regset.h>
29 #include <sys/privregs.h>
30 #include <sys/pci_impl.h>
31 #include <sys/cpuvar.h>
32 #include <sys/x86_archext.h>
33 #include <sys/cmn_err.h>
34 #include <sys/systm.h>
35 #include <sys/sysmacros.h>
36 #include <sys/pghw.h>
37 #include <sys/cyclic.h>
38 #include <sys/sysevent.h>
39 #include <sys/smbios.h>
40 #include <sys/mca_x86.h>
41 #include <sys/mca_amd.h>
42 #include <sys/mc.h>
43 #include <sys/mc_amd.h>
44 #include <sys/psw.h>
45 #include <sys/ddi.h>
46 #include <sys/sunddi.h>
47 #include <sys/sdt.h>
48 #include <sys/fm/util.h>
49 #include <sys/fm/protocol.h>
50 #include <sys/fm/cpu/AMD.h>
51 #include <sys/acpi/acpi.h>
52 #include <sys/acpi/acpi_pci.h>
53 #include <sys/acpica.h>
54 #include <sys/cpu_module.h>
55 
56 #include "ao.h"
57 #include "ao_mca_disp.h"
58 
59 #define	AO_F_REVS_FG (X86_CHIPREV_AMD_F_REV_F | X86_CHIPREV_AMD_F_REV_G)
60 
61 int ao_mca_smi_disable = 1;		/* attempt to disable SMI polling */
62 
63 struct ao_ctl_init {
64 	uint32_t ctl_revmask;	/* rev(s) to which this applies */
65 	uint64_t ctl_bits;	/* mca ctl reg bitmask to set */
66 };
67 
68 /*
69  * Additional NB MCA ctl initialization for revs F and G
70  */
71 static const struct ao_ctl_init ao_nb_ctl_init[] = {
72 	{ AO_F_REVS_FG, AMD_NB_CTL_INIT_REV_FG },
73 	{ X86_CHIPREV_UNKNOWN, 0 }
74 };
75 
76 typedef struct ao_bank_cfg {
77 	uint64_t bank_ctl_init_cmn;			/* Common init value */
78 	const struct ao_ctl_init *bank_ctl_init_extra;	/* Extra for each rev */
79 	void (*bank_misc_initfunc)(cmi_hdl_t, ao_ms_data_t *, uint32_t);
80 	uint_t bank_ctl_mask;
81 } ao_bank_cfg_t;
82 
83 static void nb_mcamisc_init(cmi_hdl_t, ao_ms_data_t *, uint32_t);
84 
85 static const ao_bank_cfg_t ao_bank_cfgs[] = {
86 	{ AMD_DC_CTL_INIT_CMN, NULL, NULL, AMD_MSR_DC_MASK },
87 	{ AMD_IC_CTL_INIT_CMN, NULL, NULL, AMD_MSR_IC_MASK },
88 	{ AMD_BU_CTL_INIT_CMN, NULL, NULL, AMD_MSR_BU_MASK },
89 	{ AMD_LS_CTL_INIT_CMN, NULL, NULL, AMD_MSR_LS_MASK },
90 	{ AMD_NB_CTL_INIT_CMN, &ao_nb_ctl_init[0], nb_mcamisc_init,
91 		AMD_MSR_NB_MASK },
92 };
93 
94 static int ao_nbanks = sizeof (ao_bank_cfgs) / sizeof (ao_bank_cfgs[0]);
95 
96 /*
97  * This is quite awful but necessary to work around x86 system vendor's view of
98  * the world.  Other operating systems (you know who you are) don't understand
99  * Opteron-specific error handling, so BIOS and system vendors often hide these
100  * conditions from them by using SMI polling to copy out any errors from the
101  * machine-check registers.  When Solaris runs on a system with this feature,
102  * we want to disable the SMI polling so we can use FMA instead.  Sadly, there
103  * isn't even a standard self-describing way to express the whole situation,
104  * so we have to resort to hard-coded values.  This should all be changed to
105  * be a self-describing vendor-specific SMBIOS structure in the future.
106  */
107 static const struct ao_smi_disable {
108 	const char *asd_sys_vendor;	/* SMB_TYPE_SYSTEM vendor prefix */
109 	const char *asd_sys_product;	/* SMB_TYPE_SYSTEM product prefix */
110 	const char *asd_bios_vendor;	/* SMB_TYPE_BIOS vendor prefix */
111 	uint8_t asd_code;		/* output code for SMI disable */
112 } ao_smi_disable[] = {
113 	{ "Sun Microsystems", "Galaxy12",
114 	    "American Megatrends", 0x59 },
115 	{ "Sun Microsystems", "Sun Fire X4100 Server",
116 	    "American Megatrends", 0x59 },
117 	{ "Sun Microsystems", "Sun Fire X4200 Server",
118 	    "American Megatrends", 0x59 },
119 	{ NULL, NULL, NULL, 0 }
120 };
121 
122 static int
123 ao_disp_match_r4(uint16_t ref, uint8_t r4)
124 {
125 	static const uint16_t ao_r4_map[] = {
126 		AO_MCA_R4_BIT_ERR,	/* MCAX86_ERRCODE_RRRR_ERR */
127 		AO_MCA_R4_BIT_RD,	/* MCAX86_ERRCODE_RRRR_RD */
128 		AO_MCA_R4_BIT_WR,	/* MCAX86_ERRCODE_RRRR_WR */
129 		AO_MCA_R4_BIT_DRD,	/* MCAX86_ERRCODE_RRRR_DRD */
130 		AO_MCA_R4_BIT_DWR,	/* MCAX86_ERRCODE_RRRR_DWR */
131 		AO_MCA_R4_BIT_IRD,	/* MCAX86_ERRCODE_RRRR_IRD */
132 		AO_MCA_R4_BIT_PREFETCH,	/* MCAX86_ERRCODE_RRRR_PREFETCH */
133 		AO_MCA_R4_BIT_EVICT,	/* MCAX86_ERRCODE_RRRR_EVICT */
134 		AO_MCA_R4_BIT_SNOOP	/* MCAX86_ERRCODE_RRRR_SNOOP */
135 	};
136 
137 	ASSERT(r4 < sizeof (ao_r4_map) / sizeof (uint16_t));
138 
139 	return ((ref & ao_r4_map[r4]) != 0);
140 }
141 
142 static int
143 ao_disp_match_pp(uint8_t ref, uint8_t pp)
144 {
145 	static const uint8_t ao_pp_map[] = {
146 		AO_MCA_PP_BIT_SRC,	/* MCAX86_ERRCODE_PP_SRC */
147 		AO_MCA_PP_BIT_RES,	/* MCAX86_ERRCODE_PP_RES */
148 		AO_MCA_PP_BIT_OBS,	/* MCAX86_ERRCODE_PP_OBS */
149 		AO_MCA_PP_BIT_GEN	/* MCAX86_ERRCODE_PP_GEN */
150 	};
151 
152 	ASSERT(pp < sizeof (ao_pp_map) / sizeof (uint8_t));
153 
154 	return ((ref & ao_pp_map[pp]) != 0);
155 }
156 
157 static int
158 ao_disp_match_ii(uint8_t ref, uint8_t ii)
159 {
160 	static const uint8_t ao_ii_map[] = {
161 		AO_MCA_II_BIT_MEM,	/* MCAX86_ERRCODE_II_MEM */
162 		0,
163 		AO_MCA_II_BIT_IO,	/* MCAX86_ERRCODE_II_IO */
164 		AO_MCA_II_BIT_GEN	/* MCAX86_ERRCODE_II_GEN */
165 	};
166 
167 	ASSERT(ii < sizeof (ao_ii_map) / sizeof (uint8_t));
168 
169 	return ((ref & ao_ii_map[ii]) != 0);
170 }
171 
172 static uint8_t
173 bit_strip(uint16_t *codep, uint16_t mask, uint16_t shift)
174 {
175 	uint8_t val = (*codep & mask) >> shift;
176 	*codep &= ~mask;
177 	return (val);
178 }
179 
180 #define	BIT_STRIP(codep, name) \
181 	bit_strip(codep, MCAX86_ERRCODE_##name##_MASK, \
182 	MCAX86_ERRCODE_##name##_SHIFT)
183 
184 /*ARGSUSED*/
185 static int
186 ao_disp_match_one(const ao_error_disp_t *aed, uint64_t status, uint32_t rev,
187     int bankno)
188 {
189 	uint16_t code = MCAX86_ERRCODE(status);
190 	uint8_t extcode = AMD_EXT_ERRCODE(status);
191 	uint64_t stat_mask = aed->aed_stat_mask;
192 	uint64_t stat_mask_res = aed->aed_stat_mask_res;
193 
194 	/*
195 	 * If the bank's status register indicates overflow, then we can no
196 	 * longer rely on the value of CECC: our experience with actual fault
197 	 * injection has shown that multiple CE's overwriting each other shows
198 	 * AMD_BANK_STAT_CECC and AMD_BANK_STAT_UECC both set to zero.  This
199 	 * should be clarified in a future BKDG or by the Revision Guide.
200 	 * This behaviour is fixed in revision F.
201 	 */
202 	if (bankno == AMD_MCA_BANK_NB &&
203 	    !X86_CHIPREV_ATLEAST(rev, X86_CHIPREV_AMD_F_REV_F) &&
204 	    status & MSR_MC_STATUS_OVER) {
205 		stat_mask &= ~AMD_BANK_STAT_CECC;
206 		stat_mask_res &= ~AMD_BANK_STAT_CECC;
207 	}
208 
209 	if ((status & stat_mask) != stat_mask_res)
210 		return (0);
211 
212 	/*
213 	 * r4 and pp bits are stored separately, so we mask off and compare them
214 	 * for the code types that use them.  Once we've taken the r4 and pp
215 	 * bits out of the equation, we can directly compare the resulting code
216 	 * with the one stored in the ao_error_disp_t.
217 	 */
218 	if (AMD_ERRCODE_ISMEM(code)) {
219 		uint8_t r4 = BIT_STRIP(&code, RRRR);
220 
221 		if (!ao_disp_match_r4(aed->aed_stat_r4_bits, r4))
222 			return (0);
223 
224 	} else if (AMD_ERRCODE_ISBUS(code)) {
225 		uint8_t r4 = BIT_STRIP(&code, RRRR);
226 		uint8_t pp = BIT_STRIP(&code, PP);
227 		uint8_t ii = BIT_STRIP(&code, II);
228 
229 		if (!ao_disp_match_r4(aed->aed_stat_r4_bits, r4) ||
230 		    !ao_disp_match_pp(aed->aed_stat_pp_bits, pp) ||
231 		    !ao_disp_match_ii(aed->aed_stat_ii_bits, ii))
232 			return (0);
233 	}
234 
235 	return (code == aed->aed_stat_code && extcode == aed->aed_stat_extcode);
236 }
237 
238 /*ARGSUSED*/
239 cms_cookie_t
240 ao_ms_disp_match(cmi_hdl_t hdl, int banknum, uint64_t status,
241     uint64_t addr, uint64_t misc, void *mslogout)
242 {
243 	ao_ms_data_t *ao = cms_hdl_getcmsdata(hdl);
244 	uint32_t rev = ao->ao_ms_shared->aos_chiprev;
245 	const ao_error_disp_t *aed;
246 
247 	for (aed = ao_error_disp[banknum]; aed->aed_stat_mask != 0; aed++) {
248 		if (ao_disp_match_one(aed, status, rev, banknum))
249 			return ((cms_cookie_t)aed);
250 	}
251 
252 	return (NULL);
253 }
254 
255 /*ARGSUSED*/
256 void
257 ao_ms_ereport_class(cmi_hdl_t hdl, cms_cookie_t mscookie,
258     const char **cpuclsp, const char **leafclsp)
259 {
260 	const ao_error_disp_t *aed = mscookie;
261 
262 	if (aed != NULL) {
263 		*cpuclsp = FM_EREPORT_CPU_AMD;
264 		*leafclsp = aed->aed_class;
265 	}
266 }
267 
268 static int
269 ao_chip_once(ao_ms_data_t *ao, enum ao_cfgonce_bitnum what)
270 {
271 	return (atomic_set_long_excl(&ao->ao_ms_shared->aos_cfgonce,
272 	    what) == 0 ?  B_TRUE : B_FALSE);
273 }
274 
275 /*
276  * This knob exists in case any platform has a problem with our default
277  * policy of disabling any interrupt registered in the NB MC4_MISC
278  * register.  Setting this may cause Solaris and external entities
279  * who also have an interest in this register to argue over available
280  * telemetry (so setting it is generally not recommended).
281  */
282 int ao_nb_cfg_mc4misc_noseize = 0;
283 
284 /*
285  * The BIOS may have setup to receive SMI on counter overflow.  It may also
286  * have locked various fields or made them read-only.  We will clear any
287  * SMI request and leave the register locked.  We will also clear the
288  * counter and enable counting - while we don't use the counter it is nice
289  * to have it enabled for verification and debug work.
290  */
291 static void
292 nb_mcamisc_init(cmi_hdl_t hdl, ao_ms_data_t *ao, uint32_t rev)
293 {
294 	uint64_t val, nval;
295 
296 	if (!X86_CHIPREV_MATCH(rev, AO_F_REVS_FG))
297 		return;
298 
299 	if (cmi_hdl_rdmsr(hdl, AMD_MSR_NB_MISC, &val) != CMI_SUCCESS)
300 		return;
301 
302 	ao->ao_ms_shared->aos_bcfg_nb_misc = val;
303 
304 	if (ao_nb_cfg_mc4misc_noseize)
305 		return;		/* stash BIOS value, but no changes */
306 
307 
308 	/*
309 	 * The Valid bit tells us whether the CtrP bit is defined; if it
310 	 * is the CtrP bit tells us whether an ErrCount field is present.
311 	 * If not then there is nothing for us to do.
312 	 */
313 	if (!(val & AMD_NB_MISC_VALID) || !(val & AMD_NB_MISC_CTRP))
314 		return;
315 
316 
317 	nval = val;
318 	nval |= AMD_NB_MISC_CNTEN;		/* enable ECC error counting */
319 	nval &= ~AMD_NB_MISC_ERRCOUNT_MASK;	/* clear ErrCount */
320 	nval &= ~AMD_NB_MISC_OVRFLW;		/* clear Ovrflw */
321 	nval &= ~AMD_NB_MISC_INTTYPE_MASK;	/* no interrupt on overflow */
322 	nval |= AMD_NB_MISC_LOCKED;
323 
324 	if (nval != val) {
325 		uint64_t locked = val & AMD_NB_MISC_LOCKED;
326 
327 		if (locked)
328 			ao_bankstatus_prewrite(hdl, ao);
329 
330 		(void) cmi_hdl_wrmsr(hdl, AMD_MSR_NB_MISC, nval);
331 
332 		if (locked)
333 			ao_bankstatus_postwrite(hdl, ao);
334 	}
335 }
336 
337 /*
338  * NorthBridge (NB) MCA Configuration.
339  *
340  * We add and remove bits from the BIOS-configured value, rather than
341  * writing an absolute value.  The variables ao_nb_cfg_{add,remove}_cmn and
342  * ap_nb_cfg_{add,remove}_revFG are available for modification via kmdb
343  * and /etc/system.  The revision-specific adds and removes are applied
344  * after the common changes, and one write is made to the config register.
345  * These are not intended for watchdog configuration via these variables -
346  * use the watchdog policy below.
347  */
348 
349 /*
350  * Bits to be added to the NB configuration register - all revs.
351  */
352 uint32_t ao_nb_cfg_add_cmn = AMD_NB_CFG_ADD_CMN;
353 
354 /*
355  * Bits to be cleared from the NB configuration register - all revs.
356  */
357 uint32_t ao_nb_cfg_remove_cmn = AMD_NB_CFG_REMOVE_CMN;
358 
359 /*
360  * Bits to be added to the NB configuration register - revs F and G.
361  */
362 uint32_t ao_nb_cfg_add_revFG = AMD_NB_CFG_ADD_REV_FG;
363 
364 /*
365  * Bits to be cleared from the NB configuration register - revs F and G.
366  */
367 uint32_t ao_nb_cfg_remove_revFG = AMD_NB_CFG_REMOVE_REV_FG;
368 
369 struct ao_nb_cfg {
370 	uint32_t cfg_revmask;
371 	uint32_t *cfg_add_p;
372 	uint32_t *cfg_remove_p;
373 };
374 
375 static const struct ao_nb_cfg ao_cfg_extra[] = {
376 	{ AO_F_REVS_FG, &ao_nb_cfg_add_revFG, &ao_nb_cfg_remove_revFG },
377 	{ X86_CHIPREV_UNKNOWN, NULL, NULL }
378 };
379 
380 /*
381  * Bits to be used if we configure the NorthBridge (NB) Watchdog.  The watchdog
382  * triggers a machine check exception when no response to an NB system access
383  * occurs within a specified time interval.
384  */
385 uint32_t ao_nb_cfg_wdog =
386     AMD_NB_CFG_WDOGTMRCNTSEL_4095 |
387     AMD_NB_CFG_WDOGTMRBASESEL_1MS;
388 
389 /*
390  * The default watchdog policy is to enable it (at the above rate) if it
391  * is disabled;  if it is enabled then we leave it enabled at the rate
392  * chosen by the BIOS.
393  */
394 enum {
395 	AO_NB_WDOG_LEAVEALONE,		/* Don't touch watchdog config */
396 	AO_NB_WDOG_DISABLE,		/* Always disable watchdog */
397 	AO_NB_WDOG_ENABLE_IF_DISABLED,	/* If disabled, enable at our rate */
398 	AO_NB_WDOG_ENABLE_FORCE_RATE	/* Enable and set our rate */
399 } ao_nb_watchdog_policy = AO_NB_WDOG_ENABLE_IF_DISABLED;
400 
401 static void
402 ao_nb_cfg(ao_ms_data_t *ao, uint32_t rev)
403 {
404 	const struct ao_nb_cfg *nbcp = &ao_cfg_extra[0];
405 	uint_t chipid = pg_plat_hw_instance_id(CPU, PGHW_CHIP);
406 	uint32_t val;
407 
408 	/*
409 	 * Read the NorthBridge (NB) configuration register in PCI space,
410 	 * modify the settings accordingly, and store the new value back.
411 	 * Note that the stashed BIOS config value aos_bcfg_nb_cfg is used
412 	 * in ereport payload population to determine ECC syndrome type for
413 	 * memory errors.
414 	 */
415 	ao->ao_ms_shared->aos_bcfg_nb_cfg = val =
416 	    ao_pcicfg_read(chipid, MC_FUNC_MISCCTL, MC_CTL_REG_NBCFG);
417 
418 	switch (ao_nb_watchdog_policy) {
419 	case AO_NB_WDOG_LEAVEALONE:
420 		break;
421 
422 	case AO_NB_WDOG_DISABLE:
423 		val &= ~AMD_NB_CFG_WDOGTMRBASESEL_MASK;
424 		val &= ~AMD_NB_CFG_WDOGTMRCNTSEL_MASK;
425 		val |= AMD_NB_CFG_WDOGTMRDIS;
426 		break;
427 
428 	default:
429 		cmn_err(CE_NOTE, "ao_nb_watchdog_policy=%d unrecognised, "
430 		    "using default policy", ao_nb_watchdog_policy);
431 		/*FALLTHRU*/
432 
433 	case AO_NB_WDOG_ENABLE_IF_DISABLED:
434 		if (!(val & AMD_NB_CFG_WDOGTMRDIS))
435 			break;	/* if enabled leave rate intact */
436 		/*FALLTHRU*/
437 
438 	case AO_NB_WDOG_ENABLE_FORCE_RATE:
439 		val &= ~AMD_NB_CFG_WDOGTMRBASESEL_MASK;
440 		val &= ~AMD_NB_CFG_WDOGTMRCNTSEL_MASK;
441 		val &= ~AMD_NB_CFG_WDOGTMRDIS;
442 		val |= ao_nb_cfg_wdog;
443 		break;
444 	}
445 
446 	/*
447 	 * Now apply bit adds and removes, first those common to all revs
448 	 * and then the revision-specific ones.
449 	 */
450 	val &= ~ao_nb_cfg_remove_cmn;
451 	val |= ao_nb_cfg_add_cmn;
452 
453 	while (nbcp->cfg_revmask != X86_CHIPREV_UNKNOWN) {
454 		if (X86_CHIPREV_MATCH(rev, nbcp->cfg_revmask)) {
455 			val &= ~(*nbcp->cfg_remove_p);
456 			val |= *nbcp->cfg_add_p;
457 		}
458 		nbcp++;
459 	}
460 
461 	ao_pcicfg_write(chipid, MC_FUNC_MISCCTL, MC_CTL_REG_NBCFG, val);
462 }
463 
464 static void
465 ao_dram_cfg(ao_ms_data_t *ao, uint32_t rev)
466 {
467 	uint_t chipid = pg_plat_hw_instance_id(CPU, PGHW_CHIP);
468 	union mcreg_dramcfg_lo dcfglo;
469 
470 	ao->ao_ms_shared->aos_bcfg_dcfg_lo = MCREG_VAL32(&dcfglo) =
471 	    ao_pcicfg_read(chipid, MC_FUNC_DRAMCTL, MC_DC_REG_DRAMCFGLO);
472 	ao->ao_ms_shared->aos_bcfg_dcfg_hi =
473 	    ao_pcicfg_read(chipid, MC_FUNC_DRAMCTL, MC_DC_REG_DRAMCFGHI);
474 
475 #ifdef OPTERON_ERRATUM_172
476 	if (X86_CHIPREV_MATCH(rev, AO_F_REVS_FG) &&
477 	    MCREG_FIELD_F_revFG(&dcfglo, ParEn)) {
478 		MCREG_FIELD_F_revFG(&dcfglo, ParEn) = 0;
479 		ao_pcicfg_write(chipid, MC_FUNC_DRAMCTL, MC_DC_REG_DRAMCFGLO,
480 		    MCREG_VAL32(&dcfglo));
481 	}
482 #endif
483 }
484 
485 /*
486  * This knob exists in case any platform has a problem with our default
487  * policy of disabling any interrupt registered in the online spare
488  * control register.  Setting this may cause Solaris and external entities
489  * who also have an interest in this register to argue over available
490  * telemetry (so setting it is generally not recommended).
491  */
492 int ao_nb_cfg_sparectl_noseize = 0;
493 
494 /*
495  * Setup the online spare control register (revs F and G).  We disable
496  * any interrupt registered by the BIOS and zero all error counts.
497  */
498 static void
499 ao_sparectl_cfg(ao_ms_data_t *ao)
500 {
501 	uint_t chipid = pg_plat_hw_instance_id(CPU, PGHW_CHIP);
502 	union mcreg_sparectl sparectl;
503 	int chan, cs;
504 
505 	ao->ao_ms_shared->aos_bcfg_nb_sparectl = MCREG_VAL32(&sparectl) =
506 	    ao_pcicfg_read(chipid, MC_FUNC_MISCCTL, MC_CTL_REG_SPARECTL);
507 
508 	if (ao_nb_cfg_sparectl_noseize)
509 		return;	/* stash BIOS value, but no changes */
510 
511 	/*
512 	 * If the BIOS has requested SMI interrupt type for ECC count
513 	 * overflow for a chip-select or channel force those off.
514 	 */
515 	MCREG_FIELD_F_revFG(&sparectl, EccErrInt) = 0;
516 	MCREG_FIELD_F_revFG(&sparectl, SwapDoneInt) = 0;
517 
518 	/*
519 	 * Zero EccErrCnt and write this back to all chan/cs combinations.
520 	 */
521 	MCREG_FIELD_F_revFG(&sparectl, EccErrCntWrEn) = 1;
522 	MCREG_FIELD_F_revFG(&sparectl, EccErrCnt) = 0;
523 	for (chan = 0; chan < MC_CHIP_NDRAMCHAN; chan++) {
524 		MCREG_FIELD_F_revFG(&sparectl, EccErrCntDramChan) = chan;
525 
526 		for (cs = 0; cs < MC_CHIP_NCS; cs++) {
527 			MCREG_FIELD_F_revFG(&sparectl, EccErrCntDramCs) = cs;
528 			ao_pcicfg_write(chipid, MC_FUNC_MISCCTL,
529 			    MC_CTL_REG_SPARECTL, MCREG_VAL32(&sparectl));
530 		}
531 	}
532 }
533 
534 int ao_forgive_uc = 0;		/* For test/debug only */
535 int ao_forgive_pcc = 0;		/* For test/debug only */
536 int ao_fake_poison = 0;		/* For test/debug only */
537 
538 uint32_t
539 ao_ms_error_action(cmi_hdl_t hdl, int ismc, int banknum,
540     uint64_t status, uint64_t addr, uint64_t misc, void *mslogout)
541 {
542 	const ao_error_disp_t *aed;
543 	uint32_t retval = 0;
544 	uint8_t when;
545 	int en;
546 
547 	if (ao_forgive_uc)
548 		retval |= CMS_ERRSCOPE_CLEARED_UC;
549 
550 	if (ao_forgive_pcc)
551 		retval |= CMS_ERRSCOPE_CURCONTEXT_OK;
552 
553 	if (ao_fake_poison && status & MSR_MC_STATUS_UC)
554 		retval |= CMS_ERRSCOPE_POISONED;
555 
556 	if (retval)
557 		return (retval);
558 
559 	aed = ao_ms_disp_match(hdl, banknum, status, addr, misc, mslogout);
560 
561 	/*
562 	 * If we do not recognise the error let the cpu module apply
563 	 * the generic criteria to decide how to react.
564 	 */
565 	if (aed == NULL)
566 		return (0);
567 
568 	en = (status & MSR_MC_STATUS_EN) != 0;
569 
570 	if ((when = aed->aed_panic_when) == AO_AED_PANIC_NEVER)
571 		retval |= CMS_ERRSCOPE_IGNORE_ERR;
572 
573 	if ((when & AO_AED_PANIC_ALWAYS) ||
574 	    ((when & AO_AED_PANIC_IFMCE) && (en || ismc)))
575 		retval |= CMS_ERRSCOPE_FORCE_FATAL;
576 
577 	/*
578 	 * The original AMD implementation would panic on a machine check
579 	 * (not a poll) if the status overflow bit was set, with an
580 	 * exception for the case of rev F or later with an NB error
581 	 * indicating CECC.  This came from the perception that the
582 	 * overflow bit was not correctly managed on rev E and earlier, for
583 	 * example that repeated correctable memeory errors did not set
584 	 * OVER but somehow clear CECC.
585 	 *
586 	 * We will leave the generic support to evaluate overflow errors
587 	 * and decide to panic on their individual merits, e.g., if PCC
588 	 * is set and so on.  The AMD docs do say (as Intel does) that
589 	 * the status information is *all* from the higher-priority
590 	 * error in the case of an overflow, so it is at least as serious
591 	 * as the original and we can decide panic etc based on it.
592 	 */
593 
594 	return (retval);
595 }
596 
597 /*
598  * Will need to change for family 0x10
599  */
600 static uint_t
601 ao_ereport_synd(ao_ms_data_t *ao, uint64_t status, uint_t *typep,
602     int is_nb)
603 {
604 	if (is_nb) {
605 		if (ao->ao_ms_shared->aos_bcfg_nb_cfg &
606 		    AMD_NB_CFG_CHIPKILLECCEN) {
607 			*typep = AMD_SYNDTYPE_CHIPKILL;
608 			return (AMD_NB_STAT_CKSYND(status));
609 		} else {
610 			*typep = AMD_SYNDTYPE_ECC;
611 			return (AMD_BANK_SYND(status));
612 		}
613 	} else {
614 		*typep = AMD_SYNDTYPE_ECC;
615 		return (AMD_BANK_SYND(status));
616 	}
617 }
618 
619 static nvlist_t *
620 ao_ereport_create_resource_elem(nv_alloc_t *nva, mc_unum_t *unump, int dimmnum)
621 {
622 	nvlist_t *nvl, *snvl;
623 
624 	if ((nvl = fm_nvlist_create(nva)) == NULL)	/* freed by caller */
625 		return (NULL);
626 
627 	if ((snvl = fm_nvlist_create(nva)) == NULL) {
628 		fm_nvlist_destroy(nvl, nva ? FM_NVA_RETAIN : FM_NVA_FREE);
629 		return (NULL);
630 	}
631 
632 	(void) nvlist_add_uint64(snvl, FM_FMRI_HC_SPECIFIC_OFFSET,
633 	    unump->unum_offset);
634 
635 	fm_fmri_hc_set(nvl, FM_HC_SCHEME_VERSION, NULL, snvl, 5,
636 	    "motherboard", unump->unum_board,
637 	    "chip", unump->unum_chip,
638 	    "memory-controller", unump->unum_mc,
639 	    "dimm", unump->unum_dimms[dimmnum],
640 	    "rank", unump->unum_rank);
641 
642 	fm_nvlist_destroy(snvl, nva ? FM_NVA_RETAIN : FM_NVA_FREE);
643 
644 	return (nvl);
645 }
646 
647 static void
648 ao_ereport_add_resource(nvlist_t *payload, nv_alloc_t *nva, mc_unum_t *unump)
649 {
650 
651 	nvlist_t *elems[MC_UNUM_NDIMM];
652 	int nelems = 0;
653 	int i;
654 
655 	for (i = 0; i < MC_UNUM_NDIMM; i++) {
656 		if (unump->unum_dimms[i] == MC_INVALNUM)
657 			break;
658 
659 		if ((elems[nelems] = ao_ereport_create_resource_elem(nva,
660 		    unump, i)) == NULL)
661 			break;
662 
663 		nelems++;
664 	}
665 
666 	if (nelems == 0)
667 		return;
668 
669 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RESOURCE,
670 	    DATA_TYPE_NVLIST_ARRAY, nelems, elems, NULL);
671 
672 	for (i = 0; i < nelems; i++)
673 		fm_nvlist_destroy(elems[i], nva ? FM_NVA_RETAIN : FM_NVA_FREE);
674 }
675 
676 /*ARGSUSED*/
677 void
678 ao_ms_ereport_add_logout(cmi_hdl_t hdl, nvlist_t *ereport,
679     nv_alloc_t *nva, int banknum, uint64_t status, uint64_t addr,
680     uint64_t misc, void *mslogout, cms_cookie_t mscookie)
681 {
682 	ao_ms_data_t *ao = cms_hdl_getcmsdata(hdl);
683 	const ao_error_disp_t *aed = mscookie;
684 	uint_t synd, syndtype;
685 	uint64_t members;
686 
687 	if (aed == NULL)
688 		return;
689 
690 	members = aed->aed_ereport_members;
691 
692 	synd = ao_ereport_synd(ao, status, &syndtype,
693 	    banknum == AMD_MCA_BANK_NB);
694 
695 	if (members & FM_EREPORT_PAYLOAD_FLAG_SYND) {
696 		fm_payload_set(ereport, FM_EREPORT_PAYLOAD_NAME_SYND,
697 		    DATA_TYPE_UINT16, synd, NULL);
698 	}
699 
700 	if (members & FM_EREPORT_PAYLOAD_FLAG_SYND_TYPE) {
701 		fm_payload_set(ereport, FM_EREPORT_PAYLOAD_NAME_SYND_TYPE,
702 		    DATA_TYPE_STRING, (syndtype == AMD_SYNDTYPE_CHIPKILL ?
703 		    "C" : "E"), NULL);
704 	}
705 
706 	if (members & FM_EREPORT_PAYLOAD_FLAG_RESOURCE) {
707 		mc_unum_t unum;
708 
709 		if (((aed->aed_flags & AO_AED_FLAGS_ADDRTYPE) ==
710 		    AO_AED_F_PHYSICAL) && (status & MSR_MC_STATUS_ADDRV) &&
711 		    cmi_mc_patounum(addr, aed->aed_addrvalid_hi,
712 		    aed->aed_addrvalid_lo, synd, syndtype, &unum) ==
713 		    CMI_SUCCESS)
714 			ao_ereport_add_resource(ereport, nva, &unum);
715 	}
716 }
717 
718 /*ARGSUSED*/
719 boolean_t
720 ao_ms_ereport_includestack(cmi_hdl_t hdl, cms_cookie_t mscookie)
721 {
722 	const ao_error_disp_t *aed = mscookie;
723 
724 	if (aed == NULL)
725 		return (0);
726 
727 	return ((aed->aed_ereport_members &
728 	    FM_EREPORT_PAYLOAD_FLAG_STACK) != 0);
729 }
730 
731 cms_errno_t
732 ao_ms_msrinject(cmi_hdl_t hdl, uint_t msr, uint64_t val)
733 {
734 	ao_ms_data_t *ao = cms_hdl_getcmsdata(hdl);
735 	cms_errno_t rv = CMSERR_BADMSRWRITE;
736 
737 	ao_bankstatus_prewrite(hdl, ao);
738 	if (cmi_hdl_wrmsr(hdl, msr, val) == CMI_SUCCESS)
739 		rv = CMS_SUCCESS;
740 	ao_bankstatus_postwrite(hdl, ao);
741 
742 	return (rv);
743 }
744 
745 /*ARGSUSED*/
746 uint64_t
747 ao_ms_mcgctl_val(cmi_hdl_t hdl, int nbanks, uint64_t def)
748 {
749 	return ((1ULL << nbanks) - 1);
750 }
751 
752 boolean_t
753 ao_ms_bankctl_skipinit(cmi_hdl_t hdl, int banknum)
754 {
755 	ao_ms_data_t *ao = cms_hdl_getcmsdata(hdl);
756 
757 	if (banknum != AMD_MCA_BANK_NB)
758 		return (B_FALSE);
759 
760 	/*
761 	 * If we are the first to atomically set the "I'll do it" bit
762 	 * then return B_FALSE (do not skip), otherwise skip with B_TRUE.
763 	 */
764 	return (ao_chip_once(ao, AO_CFGONCE_NBMCA) == B_TRUE ?
765 	    B_FALSE : B_TRUE);
766 }
767 
768 uint64_t
769 ao_ms_bankctl_val(cmi_hdl_t hdl, int banknum, uint64_t def)
770 {
771 	ao_ms_data_t *ao = cms_hdl_getcmsdata(hdl);
772 	const struct ao_ctl_init *extrap;
773 	const ao_bank_cfg_t *bankcfg;
774 	uint64_t mcictl;
775 	uint32_t rev = ao->ao_ms_shared->aos_chiprev;
776 
777 	if (banknum >= sizeof (ao_bank_cfgs) / sizeof (ao_bank_cfgs[0]))
778 		return (def);
779 
780 	bankcfg = &ao_bank_cfgs[banknum];
781 	extrap = bankcfg->bank_ctl_init_extra;
782 
783 	mcictl = bankcfg->bank_ctl_init_cmn;
784 
785 	while (extrap != NULL && extrap->ctl_revmask != X86_CHIPREV_UNKNOWN) {
786 		if (X86_CHIPREV_MATCH(rev, extrap->ctl_revmask))
787 			mcictl |= extrap->ctl_bits;
788 		extrap++;
789 	}
790 
791 	return (mcictl);
792 }
793 
794 /*ARGSUSED*/
795 void
796 ao_bankstatus_prewrite(cmi_hdl_t hdl, ao_ms_data_t *ao)
797 {
798 #ifndef __xpv
799 	uint64_t hwcr;
800 
801 	if (cmi_hdl_rdmsr(hdl, MSR_AMD_HWCR, &hwcr) != CMI_SUCCESS)
802 		return;
803 
804 	ao->ao_ms_hwcr_val = hwcr;
805 
806 	if (!(hwcr & AMD_HWCR_MCI_STATUS_WREN)) {
807 		hwcr |= AMD_HWCR_MCI_STATUS_WREN;
808 		(void) cmi_hdl_wrmsr(hdl, MSR_AMD_HWCR, hwcr);
809 	}
810 #endif
811 }
812 
813 /*ARGSUSED*/
814 void
815 ao_bankstatus_postwrite(cmi_hdl_t hdl, ao_ms_data_t *ao)
816 {
817 #ifndef __xpv
818 	uint64_t hwcr = ao->ao_ms_hwcr_val;
819 
820 	if (!(hwcr & AMD_HWCR_MCI_STATUS_WREN)) {
821 		hwcr &= ~AMD_HWCR_MCI_STATUS_WREN;
822 		(void) cmi_hdl_wrmsr(hdl, MSR_AMD_HWCR, hwcr);
823 	}
824 #endif
825 }
826 
827 void
828 ao_ms_mca_init(cmi_hdl_t hdl, int nbanks)
829 {
830 	ao_ms_data_t *ao = cms_hdl_getcmsdata(hdl);
831 	uint32_t rev = ao->ao_ms_shared->aos_chiprev;
832 	ao_ms_mca_t *mca = &ao->ao_ms_mca;
833 	uint64_t *maskp;
834 	int i;
835 
836 	maskp = mca->ao_mca_bios_cfg.bcfg_bank_mask = kmem_zalloc(nbanks *
837 	    sizeof (uint64_t), KM_SLEEP);
838 
839 	/*
840 	 * Read the bank ctl mask MSRs, but only as many as we know
841 	 * certainly exist - don't calculate the register address.
842 	 * Also initialize the MCi_MISC register where required.
843 	 */
844 	for (i = 0; i < MIN(nbanks, ao_nbanks); i++) {
845 		(void) cmi_hdl_rdmsr(hdl, ao_bank_cfgs[i].bank_ctl_mask,
846 		    maskp++);
847 		if (ao_bank_cfgs[i].bank_misc_initfunc != NULL)
848 			ao_bank_cfgs[i].bank_misc_initfunc(hdl, ao, rev);
849 
850 	}
851 
852 	if (ao_chip_once(ao, AO_CFGONCE_NBCFG) == B_TRUE) {
853 		ao_nb_cfg(ao, rev);
854 
855 		if (X86_CHIPREV_MATCH(rev, AO_F_REVS_FG))
856 			ao_sparectl_cfg(ao);
857 	}
858 
859 	if (ao_chip_once(ao, AO_CFGONCE_DRAMCFG) == B_TRUE)
860 		ao_dram_cfg(ao, rev);
861 
862 	ao_chip_scrubber_enable(hdl, ao);
863 }
864 
865 /*
866  * Note that although this cpu module is loaded before the PSMs are
867  * loaded (and hence before acpica is loaded), this function is
868  * called from post_startup(), after PSMs are initialized and acpica
869  * is loaded.
870  */
871 static int
872 ao_acpi_find_smicmd(int *asd_port)
873 {
874 	ACPI_TABLE_FADT *fadt = NULL;
875 
876 	/*
877 	 * AcpiGetTable works even if ACPI is disabled, so a failure
878 	 * here means we weren't able to retreive a pointer to the FADT.
879 	 */
880 	if (AcpiGetTable(ACPI_SIG_FADT, 1, (ACPI_TABLE_HEADER **)&fadt) !=
881 	    AE_OK)
882 		return (-1);
883 
884 	ASSERT(fadt != NULL);
885 
886 	*asd_port = fadt->SmiCommand;
887 	return (0);
888 }
889 
890 /*ARGSUSED*/
891 void
892 ao_ms_post_startup(cmi_hdl_t hdl)
893 {
894 	const struct ao_smi_disable *asd;
895 	id_t id;
896 	int rv = -1, asd_port;
897 
898 	smbios_system_t sy;
899 	smbios_bios_t sb;
900 	smbios_info_t si;
901 
902 	/*
903 	 * Fetch the System and BIOS vendor strings from SMBIOS and see if they
904 	 * match a value in our table.  If so, disable SMI error polling.  This
905 	 * is grotesque and should be replaced by self-describing vendor-
906 	 * specific SMBIOS data or a specification enhancement instead.
907 	 */
908 	if (ao_mca_smi_disable && ksmbios != NULL &&
909 	    smbios_info_bios(ksmbios, &sb) != SMB_ERR &&
910 	    (id = smbios_info_system(ksmbios, &sy)) != SMB_ERR &&
911 	    smbios_info_common(ksmbios, id, &si) != SMB_ERR) {
912 
913 		for (asd = ao_smi_disable; asd->asd_sys_vendor != NULL; asd++) {
914 			if (strncmp(asd->asd_sys_vendor, si.smbi_manufacturer,
915 			    strlen(asd->asd_sys_vendor)) != 0 ||
916 			    strncmp(asd->asd_sys_product, si.smbi_product,
917 			    strlen(asd->asd_sys_product)) != 0 ||
918 			    strncmp(asd->asd_bios_vendor, sb.smbb_vendor,
919 			    strlen(asd->asd_bios_vendor)) != 0)
920 				continue;
921 
922 			/*
923 			 * Look for the SMI_CMD port in the ACPI FADT,
924 			 * if the port is 0, this platform doesn't support
925 			 * SMM, so there is no SMI error polling to disable.
926 			 */
927 			if ((rv = ao_acpi_find_smicmd(&asd_port)) == 0 &&
928 			    asd_port != 0) {
929 				cmn_err(CE_CONT, "?SMI polling disabled in "
930 				    "favor of Solaris Fault Management for "
931 				    "AMD Processors\n");
932 
933 				outb(asd_port, asd->asd_code);
934 
935 			} else if (rv < 0) {
936 				cmn_err(CE_CONT, "?Solaris Fault Management "
937 				    "for AMD Processors could not disable SMI "
938 				    "polling because an error occurred while "
939 				    "trying to determine the SMI command port "
940 				    "from the ACPI FADT table\n");
941 			}
942 			break;
943 		}
944 	}
945 }
946