xref: /illumos-gate/usr/src/uts/intel/io/intel_nb5000/intel_nb5000.c (revision b793cf1f804f52789df526036d96d1be7d3efc9d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/types.h>
28 #include <sys/cmn_err.h>
29 #include <sys/errno.h>
30 #include <sys/log.h>
31 #include <sys/systm.h>
32 #include <sys/modctl.h>
33 #include <sys/errorq.h>
34 #include <sys/controlregs.h>
35 #include <sys/fm/util.h>
36 #include <sys/fm/protocol.h>
37 #include <sys/sysevent.h>
38 #include <sys/pghw.h>
39 #include <sys/cyclic.h>
40 #include <sys/pci_cfgspace.h>
41 #include <sys/mc_intel.h>
42 #include <sys/smbios.h>
43 #include "nb5000.h"
44 #include "nb_log.h"
45 #include "dimm_phys.h"
46 
47 static uint32_t uerrcnt[2];
48 static uint32_t cerrcnta[2][2];
49 static uint32_t cerrcntb[2][2];
50 static uint32_t cerrcntc[2][2];
51 static uint32_t cerrcntd[2][2];
52 static nb_logout_t nb_log;
53 
54 struct mch_error_code {
55 	int intel_error_list;	/* error number in Chipset Error List */
56 	uint32_t emask;		/* mask for machine check */
57 	uint32_t error_bit;	/* error bit in fault register */
58 };
59 
60 static struct mch_error_code fat_fbd_error_code[] = {
61 	{ 23, EMASK_FBD_M23, ERR_FAT_FBD_M23 },
62 	{ 3, EMASK_FBD_M3, ERR_FAT_FBD_M3 },
63 	{ 2, EMASK_FBD_M2, ERR_FAT_FBD_M2 },
64 	{ 1, EMASK_FBD_M1, ERR_FAT_FBD_M1 }
65 };
66 
67 static int
68 intel_fat_fbd_err(uint32_t fat_fbd)
69 {
70 	int rt = -1;
71 	int nerr = 0;
72 	uint32_t emask_fbd = 0;
73 	int i;
74 	int sz;
75 
76 	sz = sizeof (fat_fbd_error_code) / sizeof (struct mch_error_code);
77 
78 	for (i = 0; i < sz; i++) {
79 		if (fat_fbd & fat_fbd_error_code[i].error_bit) {
80 			rt = fat_fbd_error_code[i].intel_error_list;
81 			emask_fbd |= fat_fbd_error_code[i].emask;
82 			nerr++;
83 		}
84 	}
85 
86 	if (emask_fbd)
87 		nb_fbd_mask_mc(emask_fbd);
88 	if (nerr > 1)
89 		rt = -1;
90 	return (rt);
91 }
92 
93 static char *
94 fat_memory_error(const nb_regs_t *rp, void *data)
95 {
96 	int channel;
97 	uint32_t ferr_fat_fbd, nrecmemb;
98 	uint32_t nrecmema;
99 	char *intr = "nb.unknown";
100 	nb_mem_scatchpad_t *sp = &((nb_scatchpad_t *)data)->ms;
101 
102 	ferr_fat_fbd = rp->nb.fat_fbd_regs.ferr_fat_fbd;
103 	if ((ferr_fat_fbd & ERR_FAT_FBD_MASK) == 0) {
104 		sp->intel_error_list =
105 		    intel_fat_fbd_err(rp->nb.fat_fbd_regs.nerr_fat_fbd);
106 		sp->branch = -1;
107 		sp->channel = -1;
108 		sp->rank = -1;
109 		sp->dimm = -1;
110 		sp->bank = -1;
111 		sp->cas = -1;
112 		sp->ras = -1;
113 		sp->pa = -1LL;
114 		sp->offset = -1;
115 		return (intr);
116 	}
117 	sp->intel_error_list = intel_fat_fbd_err(ferr_fat_fbd);
118 	channel = (ferr_fat_fbd >> 28) & 3;
119 	sp->branch = channel >> 1;
120 	sp->channel = channel;
121 	if ((ferr_fat_fbd & (ERR_FAT_FBD_M2|ERR_FAT_FBD_M1)) != 0) {
122 		if ((ferr_fat_fbd & ERR_FAT_FBD_M1) != 0)
123 			intr = "nb.fbd.alert";	/* Alert on FB-DIMM M1 */
124 		else
125 			intr = "nb.fbd.crc";	/* CRC error FB_DIMM M2 */
126 		nrecmema = rp->nb.fat_fbd_regs.nrecmema;
127 		nrecmemb = rp->nb.fat_fbd_regs.nrecmemb;
128 		sp->rank = (nrecmema >> 8) & RANK_MASK;
129 		sp->dimm = sp->rank >> 1;
130 		sp->bank = (nrecmema >> 12) & BANK_MASK;
131 		sp->cas = (nrecmemb >> 16) & CAS_MASK;
132 		sp->ras = nrecmemb & RAS_MASK;
133 		sp->pa = dimm_getphys(sp->branch, sp->rank, sp->bank, sp->ras,
134 		    sp->cas);
135 		sp->offset = dimm_getoffset(sp->branch, sp->rank, sp->bank,
136 		    sp->ras, sp->cas);
137 	} else {
138 		if ((ferr_fat_fbd & ERR_FAT_FBD_M3) != 0)
139 			intr = "nb.fbd.otf";	/* thermal temp > Tmid M3 */
140 		else if ((ferr_fat_fbd & ERR_FAT_FBD_M23) != 0) {
141 			intr = "nb.fbd.reset_timeout";
142 			sp->channel = -1;
143 		}
144 		sp->rank = -1;
145 		sp->dimm = -1;
146 		sp->bank = -1;
147 		sp->cas = -1;
148 		sp->ras = -1;
149 		sp->pa = -1LL;
150 		sp->offset = -1;
151 	}
152 	return (intr);
153 }
154 
155 
156 static struct mch_error_code nf_fbd_error_code[] = {
157 	{ 29, EMASK_FBD_M29, ERR_NF_FBD_M29 },
158 	{ 28, EMASK_FBD_M28, ERR_NF_FBD_M28 },
159 	{ 27, EMASK_FBD_M27, ERR_NF_FBD_M27 },
160 	{ 26, EMASK_FBD_M26, ERR_NF_FBD_M26 },
161 	{ 25, EMASK_FBD_M25, ERR_NF_FBD_M25 },
162 	{ 24, EMASK_FBD_M24, ERR_NF_FBD_M24 },
163 	{ 22, EMASK_FBD_M22, ERR_NF_FBD_M22 },
164 	{ 21, EMASK_FBD_M21, ERR_NF_FBD_M21 },
165 	{ 20, EMASK_FBD_M20, ERR_NF_FBD_M20 },
166 	{ 19, EMASK_FBD_M19, ERR_NF_FBD_M19 },
167 	{ 18, EMASK_FBD_M18, ERR_NF_FBD_M18 },
168 	{ 17, EMASK_FBD_M17, ERR_NF_FBD_M17 },
169 	{ 16, EMASK_FBD_M16, ERR_NF_FBD_M16 },
170 	{ 15, EMASK_FBD_M15, ERR_NF_FBD_M15 },
171 	{ 14, EMASK_FBD_M14, ERR_NF_FBD_M14 },
172 	{ 13, EMASK_FBD_M13, ERR_NF_FBD_M13 },
173 	{ 12, EMASK_FBD_M12, ERR_NF_FBD_M12 },
174 	{ 11, EMASK_FBD_M11, ERR_NF_FBD_M11 },
175 	{ 10, EMASK_FBD_M10, ERR_NF_FBD_M10 },
176 	{ 9, EMASK_FBD_M9, ERR_NF_FBD_M9 },
177 	{ 8, EMASK_FBD_M8, ERR_NF_FBD_M8 },
178 	{ 7, EMASK_FBD_M7, ERR_NF_FBD_M7 },
179 	{ 6, EMASK_FBD_M6, ERR_NF_FBD_M6 },
180 	{ 5, EMASK_FBD_M5, ERR_NF_FBD_M5 },
181 	{ 4, EMASK_FBD_M4, ERR_NF_FBD_M4 }
182 };
183 
184 static int
185 intel_nf_fbd_err(uint32_t nf_fbd)
186 {
187 	int rt = -1;
188 	int nerr = 0;
189 	uint32_t emask_fbd = 0;
190 	int i;
191 	int sz;
192 
193 	sz = sizeof (nf_fbd_error_code) / sizeof (struct mch_error_code);
194 
195 	for (i = 0; i < sz; i++) {
196 		if (nf_fbd & nf_fbd_error_code[i].error_bit) {
197 			rt = nf_fbd_error_code[i].intel_error_list;
198 			emask_fbd |= nf_fbd_error_code[i].emask;
199 			nerr++;
200 		}
201 	}
202 	if (emask_fbd)
203 		nb_fbd_mask_mc(emask_fbd);
204 	if (nerr > 1)
205 		rt = -1;
206 	return (rt);
207 }
208 
209 static char *
210 nf_memory_error(const nb_regs_t *rp, void *data)
211 {
212 	uint32_t ferr_nf_fbd, recmemb, redmemb;
213 	uint32_t recmema;
214 	int branch, channel, ecc_locator;
215 	char *intr = "nb.unknown";
216 	nb_mem_scatchpad_t *sp = &((nb_scatchpad_t *)data)->ms;
217 
218 	sp->rank = -1;
219 	sp->dimm = -1;
220 	sp->bank = -1;
221 	sp->cas = -1;
222 	sp->ras = -1LL;
223 	sp->pa = -1LL;
224 	sp->offset = -1;
225 	ferr_nf_fbd = rp->nb.nf_fbd_regs.ferr_nf_fbd;
226 	if ((ferr_nf_fbd & ERR_NF_FBD_MASK) == 0) {
227 		sp->branch = -1;
228 		sp->channel = -1;
229 		sp->intel_error_list =
230 		    intel_nf_fbd_err(rp->nb.nf_fbd_regs.nerr_nf_fbd);
231 		return (intr);
232 	}
233 	sp->intel_error_list = intel_nf_fbd_err(ferr_nf_fbd);
234 	channel = (ferr_nf_fbd >> ERR_FBD_CH_SHIFT) & 3;
235 	branch = channel >> 1;
236 	sp->branch = branch;
237 	sp->channel = channel;
238 	if (ferr_nf_fbd & ERR_NF_FBD_MASK) {
239 		if (ferr_nf_fbd & ERR_NF_FBD_ECC_UE) {
240 			/*
241 			 * uncorrectable ECC M4 - M12
242 			 * we can only isolate to pair of dimms
243 			 * for single dimm configuration let eversholt
244 			 * sort it out with out needing a special rule
245 			 */
246 			sp->channel = -1;
247 			recmema = rp->nb.nf_fbd_regs.recmema;
248 			recmemb = rp->nb.nf_fbd_regs.recmemb;
249 			sp->rank = (recmema >> 8) & RANK_MASK;
250 			sp->bank = (recmema >> 12) & BANK_MASK;
251 			sp->cas = (recmemb >> 16) & CAS_MASK;
252 			sp->ras = recmemb & RAS_MASK;
253 			intr = "nb.mem_ue";
254 		} else if (ferr_nf_fbd & ERR_NF_FBD_M13) {
255 			/*
256 			 * write error M13
257 			 * we can only isolate to pair of dimms
258 			 */
259 			sp->channel = -1;
260 			if (nb_mode != NB_MEMORY_MIRROR) {
261 				recmema = rp->nb.nf_fbd_regs.recmema;
262 				sp->rank = (recmema >> 8) & RANK_MASK;
263 				sp->bank = (recmema >> 12) & BANK_MASK;
264 				sp->cas = (recmemb >> 16) & CAS_MASK;
265 				sp->ras = recmemb & RAS_MASK;
266 			}
267 			intr = "nb.fbd.ma"; /* memory alert */
268 		} else if (ferr_nf_fbd & ERR_NF_FBD_MA) { /* M14, M15 and M21 */
269 			intr = "nb.fbd.ch"; /* FBD on channel */
270 		} else if ((ferr_nf_fbd & ERR_NF_FBD_ECC_CE) != 0) {
271 			/* correctable ECC M17-M20 */
272 			recmema = rp->nb.nf_fbd_regs.recmema;
273 			recmemb = rp->nb.nf_fbd_regs.recmemb;
274 			sp->rank = (recmema >> 8) & RANK_MASK;
275 			redmemb = rp->nb.nf_fbd_regs.redmemb;
276 			ecc_locator = redmemb & 0x3ffff;
277 			if (ecc_locator & 0x1ff)
278 				sp->channel = branch << 1;
279 			else if (ecc_locator & 0x3fe00)
280 				sp->channel = (branch << 1) + 1;
281 			sp->dimm = sp->rank >> 1;
282 			sp->bank = (recmema >> 12) & BANK_MASK;
283 			sp->cas = (recmemb >> 16) & CAS_MASK;
284 			sp->ras = recmemb & RAS_MASK;
285 			intr = "nb.mem_ce";
286 		} else if ((ferr_nf_fbd & ERR_NF_FBD_SPARE) != 0) {
287 			/* spare dimm M27, M28 */
288 			intr = "nb.mem_ds";
289 			sp->channel = -1;
290 			if (rp->nb.nf_fbd_regs.spcps & SPCPS_SPARE_DEPLOYED) {
291 				sp->rank =
292 				    SPCPS_FAILED_RANK(rp->nb.nf_fbd_regs.spcps);
293 				nb_used_spare_rank(sp->branch, sp->rank);
294 				nb_config_gen++;
295 			}
296 		} else if ((ferr_nf_fbd & ERR_NF_FBD_M22) != 0) {
297 			intr = "nb.spd";	/* SPD protocol */
298 		}
299 	}
300 	if (sp->ras != -1) {
301 		sp->pa = dimm_getphys(sp->branch, sp->rank, sp->bank, sp->ras,
302 		    sp->cas);
303 		sp->offset = dimm_getoffset(sp->branch, sp->rank, sp->bank,
304 		    sp->ras, sp->cas);
305 	}
306 	return (intr);
307 }
308 
309 static struct mch_error_code fat_int_error_code[] = {
310 	{ 14, EMASK_INT_B14, ERR_FAT_INT_B14 },
311 	{ 12, EMASK_INT_B12, ERR_FAT_INT_B12 },
312 	{ 25, EMASK_INT_B25, ERR_FAT_INT_B25 },
313 	{ 23, EMASK_INT_B23, ERR_FAT_INT_B23 },
314 	{ 21, EMASK_INT_B21, ERR_FAT_INT_B21 },
315 	{ 7, EMASK_INT_B7, ERR_FAT_INT_B7 },
316 	{ 4, EMASK_INT_B4, ERR_FAT_INT_B4 },
317 	{ 3, EMASK_INT_B3, ERR_FAT_INT_B3 },
318 	{ 2, EMASK_INT_B2, ERR_FAT_INT_B2 },
319 	{ 1, EMASK_INT_B1, ERR_FAT_INT_B1 }
320 };
321 
322 static struct mch_error_code nf_int_error_code[] = {
323 	{ 27, 0, ERR_NF_INT_B27 },
324 	{ 24, 0, ERR_NF_INT_B24 },
325 	{ 22, EMASK_INT_B22, ERR_NF_INT_B22 },
326 	{ 20, EMASK_INT_B20, ERR_NF_INT_B20 },
327 	{ 19, EMASK_INT_B19, ERR_NF_INT_B19 },
328 	{ 18, 0, ERR_NF_INT_B18 },
329 	{ 17, 0, ERR_NF_INT_B17 },
330 	{ 16, 0, ERR_NF_INT_B16 },
331 	{ 11, EMASK_INT_B11, ERR_NF_INT_B11 },
332 	{ 10, EMASK_INT_B10, ERR_NF_INT_B10 },
333 	{ 9, EMASK_INT_B9, ERR_NF_INT_B9 },
334 	{ 8, EMASK_INT_B8, ERR_NF_INT_B8 },
335 	{ 6, EMASK_INT_B6, ERR_NF_INT_B6 },
336 	{ 5, EMASK_INT_B5, ERR_NF_INT_B5 }
337 };
338 
339 static int
340 intel_int_err(uint16_t err_fat_int, uint16_t err_nf_int)
341 {
342 	int rt = -1;
343 	int nerr = 0;
344 	uint32_t emask_int = 0;
345 	int i;
346 	int sz;
347 
348 	sz = sizeof (fat_int_error_code) / sizeof (struct mch_error_code);
349 
350 	for (i = 0; i < sz; i++) {
351 		if (err_fat_int & fat_int_error_code[i].error_bit) {
352 			rt = fat_int_error_code[i].intel_error_list;
353 			emask_int |= fat_int_error_code[i].emask;
354 			nerr++;
355 		}
356 	}
357 
358 	if (nb_chipset == INTEL_NB_5400 &&
359 	    (err_nf_int & NERR_NF_5400_INT_B26) != 0) {
360 		err_nf_int &= ~NERR_NF_5400_INT_B26;
361 		rt = 26;
362 		nerr++;
363 	}
364 
365 	if (rt)
366 		err_nf_int &= ~ERR_NF_INT_B18;
367 
368 	sz = sizeof (nf_int_error_code) / sizeof (struct mch_error_code);
369 
370 	for (i = 0; i < sz; i++) {
371 		if (err_nf_int & nf_int_error_code[i].error_bit) {
372 			rt = nf_int_error_code[i].intel_error_list;
373 			emask_int |= nf_int_error_code[i].emask;
374 			nerr++;
375 		}
376 	}
377 
378 	if (emask_int)
379 		nb_int_mask_mc(emask_int);
380 	if (nerr > 1)
381 		rt = -1;
382 	return (rt);
383 }
384 
385 static int
386 log_int_err(nb_regs_t *rp, int *interpose)
387 {
388 	int t = 0;
389 	int rt = 0;
390 
391 	rp->flag = NB_REG_LOG_INT;
392 	rp->nb.int_regs.ferr_fat_int = FERR_FAT_INT_RD(interpose);
393 	rp->nb.int_regs.ferr_nf_int = FERR_NF_INT_RD(&t);
394 	*interpose |= t;
395 	rp->nb.int_regs.nerr_fat_int = NERR_FAT_INT_RD(&t);
396 	*interpose |= t;
397 	rp->nb.int_regs.nerr_nf_int = NERR_NF_INT_RD(&t);
398 	*interpose |= t;
399 	rp->nb.int_regs.nrecint = NRECINT_RD();
400 	rp->nb.int_regs.recint = RECINT_RD();
401 	rp->nb.int_regs.nrecsf = NRECSF_RD();
402 	rp->nb.int_regs.recsf = RECSF_RD();
403 
404 	if (rp->nb.int_regs.ferr_fat_int || *interpose)
405 		FERR_FAT_INT_WR(rp->nb.int_regs.ferr_fat_int);
406 	if (rp->nb.int_regs.ferr_nf_int || *interpose)
407 		FERR_NF_INT_WR(rp->nb.int_regs.ferr_nf_int);
408 	if (rp->nb.int_regs.nerr_fat_int)
409 		NERR_FAT_INT_WR(rp->nb.int_regs.nerr_fat_int);
410 	if (rp->nb.int_regs.nerr_nf_int)
411 		NERR_NF_INT_WR(rp->nb.int_regs.nerr_nf_int);
412 	/* if interpose write read-only registers to clear from pcii cache */
413 	if (*interpose) {
414 		NRECINT_WR();
415 		RECINT_WR();
416 		NRECSF_WR();
417 		RECSF_WR();
418 	}
419 	if (rp->nb.int_regs.ferr_fat_int == 0 &&
420 	    rp->nb.int_regs.nerr_fat_int == 0 &&
421 	    (rp->nb.int_regs.ferr_nf_int == ERR_NF_INT_B18 ||
422 	    (rp->nb.int_regs.ferr_nf_int == 0 &&
423 	    rp->nb.int_regs.nerr_nf_int == ERR_NF_INT_B18))) {
424 		rt = 1;
425 	}
426 	return (rt);
427 }
428 
429 static void
430 log_thermal_err(nb_regs_t *rp, int *interpose)
431 {
432 	int t = 0;
433 
434 	rp->flag = NB_REG_LOG_THR;
435 	rp->nb.thr_regs.ferr_fat_thr = FERR_FAT_THR_RD(interpose);
436 	rp->nb.thr_regs.nerr_fat_thr = NERR_FAT_THR_RD(&t);
437 	*interpose |= t;
438 	rp->nb.thr_regs.ferr_nf_thr = FERR_NF_THR_RD(&t);
439 	*interpose |= t;
440 	rp->nb.thr_regs.nerr_nf_thr = NERR_NF_THR_RD(&t);
441 	*interpose |= t;
442 	rp->nb.thr_regs.ctsts = CTSTS_RD();
443 	rp->nb.thr_regs.thrtsts = THRTSTS_RD();
444 
445 	if (rp->nb.thr_regs.ferr_fat_thr || *interpose)
446 		FERR_FAT_THR_WR(rp->nb.thr_regs.ferr_fat_thr);
447 	if (rp->nb.thr_regs.nerr_fat_thr || *interpose)
448 		NERR_FAT_THR_WR(rp->nb.thr_regs.nerr_fat_thr);
449 	if (rp->nb.thr_regs.ferr_nf_thr || *interpose)
450 		FERR_NF_THR_WR(rp->nb.thr_regs.ferr_nf_thr);
451 	if (rp->nb.thr_regs.nerr_nf_thr || *interpose)
452 		NERR_NF_THR_WR(rp->nb.thr_regs.nerr_nf_thr);
453 
454 	if (*interpose) {
455 		CTSTS_WR(rp->nb.thr_regs.ctsts);
456 		THRTSTS_WR(rp->nb.thr_regs.thrtsts);
457 	}
458 }
459 
460 static void
461 log_dma_err(nb_regs_t *rp, int *interpose)
462 {
463 	rp->flag = NB_REG_LOG_DMA;
464 
465 	rp->nb.dma_regs.pcists = PCISTS_RD(interpose);
466 	rp->nb.dma_regs.pexdevsts = PCIDEVSTS_RD();
467 }
468 
469 static struct mch_error_code fat_fsb_error_code[] = {
470 	{ 9, EMASK_FSB_F9, ERR_FAT_FSB_F9 },
471 	{ 2, EMASK_FSB_F2, ERR_FAT_FSB_F2 },
472 	{ 1, EMASK_FSB_F1, ERR_FAT_FSB_F1 }
473 };
474 
475 static struct mch_error_code nf_fsb_error_code[] = {
476 	{ 8, EMASK_FSB_F8, ERR_NF_FSB_F8 },
477 	{ 7, EMASK_FSB_F7, ERR_NF_FSB_F7 },
478 	{ 6, EMASK_FSB_F6, ERR_NF_FSB_F6 }
479 };
480 
481 static int
482 intel_fsb_err(int fsb, uint8_t err_fat_fsb, uint8_t err_nf_fsb)
483 {
484 	int rt = -1;
485 	int nerr = 0;
486 	uint16_t emask_fsb = 0;
487 	int i;
488 	int sz;
489 
490 	sz = sizeof (fat_fsb_error_code) / sizeof (struct mch_error_code);
491 
492 	for (i = 0; i < sz; i++) {
493 		if (err_fat_fsb & fat_fsb_error_code[i].error_bit) {
494 			rt = fat_fsb_error_code[i].intel_error_list;
495 			emask_fsb |= fat_fsb_error_code[i].emask;
496 			nerr++;
497 		}
498 	}
499 
500 	sz = sizeof (nf_fsb_error_code) / sizeof (struct mch_error_code);
501 
502 	for (i = 0; i < sz; i++) {
503 		if (err_nf_fsb & nf_fsb_error_code[i].error_bit) {
504 			rt = nf_fsb_error_code[i].intel_error_list;
505 			emask_fsb |= nf_fsb_error_code[i].emask;
506 			nerr++;
507 		}
508 	}
509 
510 	if (emask_fsb)
511 		nb_fsb_mask_mc(fsb, emask_fsb);
512 	if (nerr > 1)
513 		rt = -1;
514 	return (rt);
515 }
516 
517 static void
518 log_fsb_err(uint64_t ferr, nb_regs_t *rp, int *interpose)
519 {
520 	uint8_t fsb;
521 	int t = 0;
522 
523 	fsb = GE_FERR_FSB(ferr);
524 	rp->flag = NB_REG_LOG_FSB;
525 
526 	rp->nb.fsb_regs.fsb = fsb;
527 	rp->nb.fsb_regs.ferr_fat_fsb = FERR_FAT_FSB_RD(fsb, interpose);
528 	rp->nb.fsb_regs.ferr_nf_fsb = FERR_NF_FSB_RD(fsb, &t);
529 	*interpose |= t;
530 	rp->nb.fsb_regs.nerr_fat_fsb = NERR_FAT_FSB_RD(fsb, &t);
531 	*interpose |= t;
532 	rp->nb.fsb_regs.nerr_nf_fsb = NERR_NF_FSB_RD(fsb, &t);
533 	*interpose |= t;
534 	rp->nb.fsb_regs.nrecfsb = NRECFSB_RD(fsb);
535 	rp->nb.fsb_regs.nrecfsb_addr = NRECADDR_RD(fsb);
536 	rp->nb.fsb_regs.recfsb = RECFSB_RD(fsb);
537 	if (rp->nb.fsb_regs.ferr_fat_fsb || *interpose)
538 		FERR_FAT_FSB_WR(fsb, rp->nb.fsb_regs.ferr_fat_fsb);
539 	if (rp->nb.fsb_regs.ferr_nf_fsb || *interpose)
540 		FERR_NF_FSB_WR(fsb, rp->nb.fsb_regs.ferr_nf_fsb);
541 	/* if interpose write read-only registers to clear from pcii cache */
542 	if (*interpose) {
543 		NRECFSB_WR(fsb);
544 		NRECADDR_WR(fsb);
545 		RECFSB_WR(fsb);
546 	}
547 }
548 
549 static struct mch_error_code fat_pex_error_code[] = {
550 	{ 19, EMASK_UNCOR_PEX_IO19, PEX_FAT_IO19 },
551 	{ 18, EMASK_UNCOR_PEX_IO18, PEX_FAT_IO18 },
552 	{ 10, EMASK_UNCOR_PEX_IO10, PEX_FAT_IO10 },
553 	{ 9, EMASK_UNCOR_PEX_IO9, PEX_FAT_IO9 },
554 	{ 8, EMASK_UNCOR_PEX_IO8, PEX_FAT_IO8 },
555 	{ 7, EMASK_UNCOR_PEX_IO7, PEX_FAT_IO7 },
556 	{ 6, EMASK_UNCOR_PEX_IO6, PEX_FAT_IO6 },
557 	{ 5, EMASK_UNCOR_PEX_IO5, PEX_FAT_IO5 },
558 	{ 4, EMASK_UNCOR_PEX_IO4, PEX_FAT_IO4 },
559 	{ 3, EMASK_UNCOR_PEX_IO3, PEX_FAT_IO3 },
560 	{ 2, EMASK_UNCOR_PEX_IO2, PEX_FAT_IO2 },
561 	{ 0, EMASK_UNCOR_PEX_IO0, PEX_FAT_IO0 }
562 };
563 
564 static struct mch_error_code fat_unit_pex_5400_error_code[] = {
565 	{ 32, EMASK_UNIT_PEX_IO32, PEX_5400_FAT_IO32 },
566 	{ 31, EMASK_UNIT_PEX_IO31, PEX_5400_FAT_IO31 },
567 	{ 30, EMASK_UNIT_PEX_IO30, PEX_5400_FAT_IO30 },
568 	{ 29, EMASK_UNIT_PEX_IO29, PEX_5400_FAT_IO29 },
569 	{ 27, EMASK_UNIT_PEX_IO27, PEX_5400_FAT_IO27 },
570 	{ 26, EMASK_UNIT_PEX_IO26, PEX_5400_FAT_IO26 },
571 	{ 25, EMASK_UNIT_PEX_IO25, PEX_5400_FAT_IO25 },
572 	{ 24, EMASK_UNIT_PEX_IO24, PEX_5400_FAT_IO24 },
573 	{ 23, EMASK_UNIT_PEX_IO23, PEX_5400_FAT_IO23 },
574 	{ 22, EMASK_UNIT_PEX_IO22, PEX_5400_FAT_IO22 },
575 };
576 
577 static struct mch_error_code fat_pex_5400_error_code[] = {
578 	{ 19, EMASK_UNCOR_PEX_IO19, PEX_5400_FAT_IO19 },
579 	{ 18, EMASK_UNCOR_PEX_IO18, PEX_5400_FAT_IO18 },
580 	{ 10, EMASK_UNCOR_PEX_IO10, PEX_5400_FAT_IO10 },
581 	{ 9, EMASK_UNCOR_PEX_IO9, PEX_5400_FAT_IO9 },
582 	{ 8, EMASK_UNCOR_PEX_IO8, PEX_5400_FAT_IO8 },
583 	{ 7, EMASK_UNCOR_PEX_IO7, PEX_5400_FAT_IO7 },
584 	{ 6, EMASK_UNCOR_PEX_IO6, PEX_5400_FAT_IO6 },
585 	{ 5, EMASK_UNCOR_PEX_IO5, PEX_5400_FAT_IO5 },
586 	{ 4, EMASK_UNCOR_PEX_IO4, PEX_5400_FAT_IO4 },
587 	{ 2, EMASK_UNCOR_PEX_IO2, PEX_5400_FAT_IO2 },
588 	{ 0, EMASK_UNCOR_PEX_IO0, PEX_5400_FAT_IO0 }
589 };
590 
591 static struct mch_error_code fat_rp_5400_error_code[] = {
592 	{ 1, EMASK_RP_PEX_IO1, PEX_5400_FAT_IO1 }
593 };
594 
595 static struct mch_error_code fat_rp_error_code[] = {
596 	{ 1, EMASK_RP_PEX_IO1, PEX_FAT_IO1 }
597 };
598 
599 static struct mch_error_code uncor_pex_error_code[] = {
600 	{ 19, EMASK_UNCOR_PEX_IO19, PEX_NF_IO19 },
601 	{ 9, EMASK_UNCOR_PEX_IO9, PEX_NF_IO9 },
602 	{ 8, EMASK_UNCOR_PEX_IO8, PEX_NF_IO8 },
603 	{ 7, EMASK_UNCOR_PEX_IO7, PEX_NF_IO7 },
604 	{ 6, EMASK_UNCOR_PEX_IO6, PEX_NF_IO6 },
605 	{ 5, EMASK_UNCOR_PEX_IO5, PEX_NF_IO5 },
606 	{ 4, EMASK_UNCOR_PEX_IO4, PEX_NF_IO4 },
607 	{ 3, EMASK_UNCOR_PEX_IO3, PEX_NF_IO3 },
608 	{ 0, EMASK_UNCOR_PEX_IO0, PEX_NF_IO0 }
609 };
610 
611 static struct mch_error_code uncor_pex_5400_error_code[] = {
612 	{ 33, EMASK_UNIT_PEX_IO33, PEX_5400_NF_IO33 },
613 	{ 32, EMASK_UNIT_PEX_IO32, PEX_5400_NF_IO32 },
614 	{ 31, EMASK_UNIT_PEX_IO31, PEX_5400_NF_IO31 },
615 	{ 30, EMASK_UNIT_PEX_IO30, PEX_5400_NF_IO30 },
616 	{ 29, EMASK_UNIT_PEX_IO29, PEX_5400_NF_IO29 },
617 	{ 28, EMASK_UNIT_PEX_IO28, PEX_5400_NF_IO28 },
618 	{ 27, EMASK_UNIT_PEX_IO27, PEX_5400_NF_IO27 },
619 	{ 26, EMASK_UNIT_PEX_IO26, PEX_5400_NF_IO26 },
620 	{ 25, EMASK_UNIT_PEX_IO25, PEX_5400_NF_IO25 },
621 	{ 24, EMASK_UNIT_PEX_IO24, PEX_5400_NF_IO24 },
622 	{ 23, EMASK_UNIT_PEX_IO23, PEX_5400_NF_IO23 },
623 };
624 
625 static struct mch_error_code cor_pex_error_code[] = {
626 	{ 20, EMASK_COR_PEX_IO20, PEX_5400_NF_IO20 },
627 	{ 16, EMASK_COR_PEX_IO16, PEX_NF_IO16 },
628 	{ 15, EMASK_COR_PEX_IO15, PEX_NF_IO15 },
629 	{ 14, EMASK_COR_PEX_IO14, PEX_NF_IO14 },
630 	{ 13, EMASK_COR_PEX_IO13, PEX_NF_IO13 },
631 	{ 12, EMASK_COR_PEX_IO12, PEX_NF_IO12 },
632 	{ 10, 0, PEX_NF_IO10 },
633 	{ 2, 0, PEX_NF_IO2 }
634 };
635 
636 static struct mch_error_code rp_pex_5400_error_code[] = {
637 	{ 17, EMASK_RP_PEX_IO17, PEX_5400_NF_IO17 },
638 	{ 11, EMASK_RP_PEX_IO11, PEX_5400_NF_IO11 }
639 };
640 
641 static struct mch_error_code cor_pex_5400_error_code1[] = {
642 	{ 19, EMASK_UNCOR_PEX_IO19, PEX_5400_NF_IO19 },
643 	{ 10, EMASK_UNCOR_PEX_IO10, PEX_5400_NF_IO10 },
644 	{ 9, EMASK_UNCOR_PEX_IO9, PEX_5400_NF_IO9 },
645 	{ 8, EMASK_UNCOR_PEX_IO8, PEX_5400_NF_IO8 },
646 	{ 7, EMASK_UNCOR_PEX_IO7, PEX_5400_NF_IO7 },
647 	{ 6, EMASK_UNCOR_PEX_IO6, PEX_5400_NF_IO6 },
648 	{ 5, EMASK_UNCOR_PEX_IO5, PEX_5400_NF_IO5 },
649 	{ 4, EMASK_UNCOR_PEX_IO4, PEX_5400_NF_IO4 },
650 	{ 2, EMASK_UNCOR_PEX_IO2, PEX_5400_NF_IO2 },
651 	{ 0, EMASK_UNCOR_PEX_IO0, PEX_5400_NF_IO0 }
652 };
653 
654 static struct mch_error_code cor_pex_5400_error_code2[] = {
655 	{ 20, EMASK_COR_PEX_IO20, PEX_5400_NF_IO20 },
656 	{ 16, EMASK_COR_PEX_IO16, PEX_5400_NF_IO16 },
657 	{ 15, EMASK_COR_PEX_IO15, PEX_5400_NF_IO15 },
658 	{ 14, EMASK_COR_PEX_IO14, PEX_5400_NF_IO14 },
659 	{ 13, EMASK_COR_PEX_IO13, PEX_5400_NF_IO13 },
660 	{ 12, EMASK_COR_PEX_IO12, PEX_5400_NF_IO12 }
661 };
662 
663 static struct mch_error_code cor_pex_5400_error_code3[] = {
664 	{ 33, EMASK_UNIT_PEX_IO33, PEX_5400_NF_IO33 },
665 	{ 32, EMASK_UNIT_PEX_IO32, PEX_5400_NF_IO32 },
666 	{ 31, EMASK_UNIT_PEX_IO31, PEX_5400_NF_IO31 },
667 	{ 30, EMASK_UNIT_PEX_IO30, PEX_5400_NF_IO30 },
668 	{ 29, EMASK_UNIT_PEX_IO29, PEX_5400_NF_IO29 },
669 	{ 28, EMASK_UNIT_PEX_IO28, PEX_5400_NF_IO28 },
670 	{ 27, EMASK_UNIT_PEX_IO27, PEX_5400_NF_IO27 },
671 	{ 26, EMASK_UNIT_PEX_IO26, PEX_5400_NF_IO26 },
672 	{ 25, EMASK_UNIT_PEX_IO25, PEX_5400_NF_IO25 },
673 	{ 24, EMASK_UNIT_PEX_IO24, PEX_5400_NF_IO24 },
674 	{ 23, EMASK_UNIT_PEX_IO23, PEX_5400_NF_IO23 }
675 };
676 
677 static struct mch_error_code rp_pex_error_code[] = {
678 	{ 17, EMASK_RP_PEX_IO17, PEX_NF_IO17 },
679 	{ 11, EMASK_RP_PEX_IO11, PEX_NF_IO11 },
680 };
681 
682 static int
683 intel_pex_err(uint32_t pex_fat, uint32_t pex_nf_cor)
684 {
685 	int rt = -1;
686 	int nerr = 0;
687 	int i;
688 	int sz;
689 
690 	sz = sizeof (fat_pex_error_code) / sizeof (struct mch_error_code);
691 
692 	for (i = 0; i < sz; i++) {
693 		if (pex_fat & fat_pex_error_code[i].error_bit) {
694 			rt = fat_pex_error_code[i].intel_error_list;
695 			nerr++;
696 		}
697 	}
698 	sz = sizeof (fat_rp_error_code) / sizeof (struct mch_error_code);
699 
700 	for (i = 0; i < sz; i++) {
701 		if (pex_fat & fat_rp_error_code[i].error_bit) {
702 			rt = fat_rp_error_code[i].intel_error_list;
703 			nerr++;
704 		}
705 	}
706 	sz = sizeof (uncor_pex_error_code) / sizeof (struct mch_error_code);
707 
708 	for (i = 0; i < sz; i++) {
709 		if (pex_nf_cor & uncor_pex_error_code[i].error_bit) {
710 			rt = uncor_pex_error_code[i].intel_error_list;
711 			nerr++;
712 		}
713 	}
714 
715 	sz = sizeof (cor_pex_error_code) / sizeof (struct mch_error_code);
716 
717 	for (i = 0; i < sz; i++) {
718 		if (pex_nf_cor & cor_pex_error_code[i].error_bit) {
719 			rt = cor_pex_error_code[i].intel_error_list;
720 			nerr++;
721 		}
722 	}
723 	sz = sizeof (rp_pex_error_code) / sizeof (struct mch_error_code);
724 
725 	for (i = 0; i < sz; i++) {
726 		if (pex_nf_cor & rp_pex_error_code[i].error_bit) {
727 			rt = rp_pex_error_code[i].intel_error_list;
728 			nerr++;
729 		}
730 	}
731 
732 	if (nerr > 1)
733 		rt = -1;
734 	return (rt);
735 }
736 
737 static struct mch_error_code fat_thr_error_code[] = {
738 	{ 2, EMASK_THR_F2, ERR_FAT_THR_F2 },
739 	{ 1, EMASK_THR_F1, ERR_FAT_THR_F1 }
740 };
741 
742 static struct mch_error_code nf_thr_error_code[] = {
743 	{ 5, EMASK_THR_F5, ERR_NF_THR_F5 },
744 	{ 4, EMASK_THR_F4, ERR_NF_THR_F4 },
745 	{ 3, EMASK_THR_F3, ERR_NF_THR_F3 }
746 };
747 
748 static int
749 intel_thr_err(uint8_t err_fat_thr, uint8_t err_nf_thr)
750 {
751 	int rt = -1;
752 	int nerr = 0;
753 	uint16_t emask_thr = 0;
754 	int i;
755 	int sz;
756 
757 	sz = sizeof (fat_thr_error_code) / sizeof (struct mch_error_code);
758 
759 	for (i = 0; i < sz; i++) {
760 		if (err_fat_thr & fat_thr_error_code[i].error_bit) {
761 			rt = fat_thr_error_code[i].intel_error_list;
762 			emask_thr |= fat_thr_error_code[i].emask;
763 			nerr++;
764 		}
765 	}
766 
767 	sz = sizeof (nf_thr_error_code) / sizeof (struct mch_error_code);
768 
769 	for (i = 0; i < sz; i++) {
770 		if (err_nf_thr & nf_thr_error_code[i].error_bit) {
771 			rt = nf_thr_error_code[i].intel_error_list;
772 			emask_thr |= nf_thr_error_code[i].emask;
773 			nerr++;
774 		}
775 	}
776 
777 	if (emask_thr)
778 		nb_thr_mask_mc(emask_thr);
779 	if (nerr > 1)
780 		rt = -1;
781 	return (rt);
782 }
783 
784 static int
785 intel_pex_5400_err(uint32_t pex_fat, uint32_t pex_nf_cor)
786 {
787 	int rt = -1;
788 	int nerr = 0;
789 	int i;
790 	int sz;
791 
792 	sz = sizeof (fat_pex_5400_error_code) / sizeof (struct mch_error_code);
793 
794 	for (i = 0; i < sz; i++) {
795 		if (pex_fat & fat_pex_5400_error_code[i].error_bit) {
796 			rt = fat_pex_5400_error_code[i].intel_error_list;
797 			nerr++;
798 		}
799 	}
800 	sz = sizeof (fat_rp_5400_error_code) / sizeof (struct mch_error_code);
801 
802 	for (i = 0; i < sz; i++) {
803 		if (pex_fat & fat_rp_5400_error_code[i].error_bit) {
804 			rt = fat_rp_5400_error_code[i].intel_error_list;
805 			nerr++;
806 		}
807 	}
808 	sz = sizeof (fat_unit_pex_5400_error_code) /
809 	    sizeof (struct mch_error_code);
810 
811 	for (i = 0; i < sz; i++) {
812 		if (pex_fat &
813 		    fat_unit_pex_5400_error_code[i].error_bit) {
814 			rt = fat_unit_pex_5400_error_code[i].intel_error_list;
815 			nerr++;
816 		}
817 	}
818 	sz = sizeof (uncor_pex_5400_error_code) /
819 	    sizeof (struct mch_error_code);
820 
821 	for (i = 0; i < sz; i++) {
822 		if (pex_fat & uncor_pex_5400_error_code[i].error_bit) {
823 			rt = uncor_pex_5400_error_code[i].intel_error_list;
824 			nerr++;
825 		}
826 	}
827 
828 	sz = sizeof (rp_pex_5400_error_code) / sizeof (struct mch_error_code);
829 
830 	for (i = 0; i < sz; i++) {
831 		if (pex_nf_cor & rp_pex_5400_error_code[i].error_bit) {
832 			rt = rp_pex_5400_error_code[i].intel_error_list;
833 			nerr++;
834 		}
835 	}
836 
837 	sz = sizeof (cor_pex_5400_error_code1) / sizeof (struct mch_error_code);
838 
839 	for (i = 0; i < sz; i++) {
840 		if (pex_nf_cor & cor_pex_5400_error_code1[i].error_bit) {
841 			rt = cor_pex_5400_error_code1[i].intel_error_list;
842 			nerr++;
843 		}
844 	}
845 
846 	sz = sizeof (cor_pex_5400_error_code2) / sizeof (struct mch_error_code);
847 
848 	for (i = 0; i < sz; i++) {
849 		if (pex_nf_cor & cor_pex_5400_error_code2[i].error_bit) {
850 			rt = cor_pex_5400_error_code2[i].intel_error_list;
851 			nerr++;
852 		}
853 	}
854 
855 	sz = sizeof (cor_pex_5400_error_code3) / sizeof (struct mch_error_code);
856 
857 	for (i = 0; i < sz; i++) {
858 		if (pex_nf_cor & cor_pex_5400_error_code3[i].error_bit) {
859 			rt = cor_pex_5400_error_code3[i].intel_error_list;
860 			nerr++;
861 		}
862 	}
863 
864 	if (nerr > 1)
865 		rt = -1;
866 	return (rt);
867 }
868 
869 static void
870 log_pex_err(uint64_t ferr, nb_regs_t *rp, int *interpose)
871 {
872 	uint8_t pex = (uint8_t)-1;
873 	int t = 0;
874 
875 	rp->flag = NB_REG_LOG_PEX;
876 	pex = GE_ERR_PEX(ferr);
877 
878 	rp->nb.pex_regs.pex = pex;
879 	rp->nb.pex_regs.pex_fat_ferr =  PEX_FAT_FERR_RD(pex, interpose);
880 	rp->nb.pex_regs.pex_fat_nerr = PEX_FAT_NERR_RD(pex, &t);
881 	*interpose |= t;
882 	rp->nb.pex_regs.pex_nf_corr_ferr = PEX_NF_FERR_RD(pex, &t);
883 	*interpose |= t;
884 	rp->nb.pex_regs.pex_nf_corr_nerr = PEX_NF_NERR_RD(pex, &t);
885 	*interpose |= t;
886 	rp->nb.pex_regs.uncerrsev = UNCERRSEV_RD(pex);
887 	rp->nb.pex_regs.rperrsts = RPERRSTS_RD(pex);
888 	rp->nb.pex_regs.rperrsid = RPERRSID_RD(pex);
889 	if (pex != (uint8_t)-1)
890 		rp->nb.pex_regs.uncerrsts = UNCERRSTS_RD(pex);
891 	else
892 		rp->nb.pex_regs.uncerrsts = 0;
893 	rp->nb.pex_regs.aerrcapctrl = AERRCAPCTRL_RD(pex);
894 	rp->nb.pex_regs.corerrsts = CORERRSTS_RD(pex);
895 	rp->nb.pex_regs.pexdevsts = PEXDEVSTS_RD(pex);
896 
897 	if (rp->nb.pex_regs.pex_fat_ferr || *interpose)
898 		PEX_FAT_FERR_WR(pex, rp->nb.pex_regs.pex_fat_ferr);
899 	if (rp->nb.pex_regs.pex_fat_nerr)
900 		PEX_FAT_NERR_WR(pex, rp->nb.pex_regs.pex_fat_nerr);
901 	if (rp->nb.pex_regs.pex_nf_corr_ferr || *interpose)
902 		PEX_NF_FERR_WR(pex, rp->nb.pex_regs.pex_nf_corr_ferr);
903 	if (rp->nb.pex_regs.pex_nf_corr_nerr)
904 		PEX_NF_NERR_WR(pex, rp->nb.pex_regs.pex_nf_corr_nerr);
905 	if (*interpose)
906 		UNCERRSTS_WR(pex, rp->nb.pex_regs.uncerrsts);
907 	if (*interpose)
908 		RPERRSTS_WR(pex, rp->nb.pex_regs.rperrsts);
909 	if (*interpose)
910 		PEXDEVSTS_WR(pex, 0);
911 }
912 
913 static void
914 log_fat_fbd_err(nb_regs_t *rp, int *interpose)
915 {
916 	int channel, branch;
917 	int t = 0;
918 
919 	rp->flag = NB_REG_LOG_FAT_FBD;
920 	rp->nb.fat_fbd_regs.ferr_fat_fbd = FERR_FAT_FBD_RD(interpose);
921 	channel = (rp->nb.fat_fbd_regs.ferr_fat_fbd >> 28) & 3;
922 	branch = channel >> 1;
923 	rp->nb.fat_fbd_regs.nerr_fat_fbd = NERR_FAT_FBD_RD(&t);
924 	*interpose |= t;
925 	rp->nb.fat_fbd_regs.nrecmema = NRECMEMA_RD(branch);
926 	rp->nb.fat_fbd_regs.nrecmemb = NRECMEMB_RD(branch);
927 	rp->nb.fat_fbd_regs.nrecfglog = NRECFGLOG_RD(branch);
928 	rp->nb.fat_fbd_regs.nrecfbda = NRECFBDA_RD(branch);
929 	rp->nb.fat_fbd_regs.nrecfbdb = NRECFBDB_RD(branch);
930 	rp->nb.fat_fbd_regs.nrecfbdc = NRECFBDC_RD(branch);
931 	rp->nb.fat_fbd_regs.nrecfbdd = NRECFBDD_RD(branch);
932 	rp->nb.fat_fbd_regs.nrecfbde = NRECFBDE_RD(branch);
933 	rp->nb.fat_fbd_regs.nrecfbdf = NRECFBDF_RD(branch);
934 	rp->nb.fat_fbd_regs.spcps = SPCPS_RD(branch);
935 	rp->nb.fat_fbd_regs.spcpc = SPCPC_RD(branch);
936 	rp->nb.fat_fbd_regs.uerrcnt = UERRCNT_RD(branch);
937 	rp->nb.fat_fbd_regs.uerrcnt_last = uerrcnt[branch];
938 	uerrcnt[branch] = rp->nb.fat_fbd_regs.uerrcnt;
939 	rp->nb.fat_fbd_regs.badrama = BADRAMA_RD(branch);
940 	rp->nb.fat_fbd_regs.badramb = BADRAMB_RD(branch);
941 	rp->nb.fat_fbd_regs.badcnt = BADCNT_RD(branch);
942 	if (rp->nb.fat_fbd_regs.ferr_fat_fbd || *interpose)
943 		FERR_FAT_FBD_WR(rp->nb.fat_fbd_regs.ferr_fat_fbd);
944 	if (rp->nb.fat_fbd_regs.nerr_fat_fbd)
945 		NERR_FAT_FBD_WR(rp->nb.fat_fbd_regs.nerr_fat_fbd);
946 	/* if interpose write read-only registers to clear from pcii cache */
947 	if (*interpose) {
948 		NRECMEMA_WR(branch);
949 		NRECMEMB_WR(branch);
950 		NRECFGLOG_WR(branch);
951 		NRECFBDA_WR(branch);
952 		NRECFBDB_WR(branch);
953 		NRECFBDC_WR(branch);
954 		NRECFBDD_WR(branch);
955 		NRECFBDE_WR(branch);
956 		NRECFBDF_WR(branch);
957 	}
958 }
959 
960 static void
961 log_nf_fbd_err(nb_regs_t *rp, int *interpose)
962 {
963 	int channel, branch;
964 	int t = 0;
965 
966 	rp->flag = NB_REG_LOG_NF_FBD;
967 	rp->nb.nf_fbd_regs.ferr_nf_fbd = FERR_NF_FBD_RD(interpose);
968 	channel = (rp->nb.nf_fbd_regs.ferr_nf_fbd >> 28) & 3;
969 	branch = channel >> 1;
970 	rp->nb.nf_fbd_regs.nerr_nf_fbd = NERR_NF_FBD_RD(&t);
971 	*interpose |= t;
972 	rp->nb.nf_fbd_regs.redmemb = REDMEMB_RD();
973 	rp->nb.nf_fbd_regs.recmema = RECMEMA_RD(branch);
974 	rp->nb.nf_fbd_regs.recmemb = RECMEMB_RD(branch);
975 	rp->nb.nf_fbd_regs.recfglog = RECFGLOG_RD(branch);
976 	rp->nb.nf_fbd_regs.recfbda = RECFBDA_RD(branch);
977 	rp->nb.nf_fbd_regs.recfbdb = RECFBDB_RD(branch);
978 	rp->nb.nf_fbd_regs.recfbdc = RECFBDC_RD(branch);
979 	rp->nb.nf_fbd_regs.recfbdd = RECFBDD_RD(branch);
980 	rp->nb.nf_fbd_regs.recfbde = RECFBDE_RD(branch);
981 	rp->nb.nf_fbd_regs.recfbdf = RECFBDF_RD(branch);
982 	rp->nb.nf_fbd_regs.spcps = SPCPS_RD(branch);
983 	rp->nb.nf_fbd_regs.spcpc = SPCPC_RD(branch);
984 	if (nb_chipset == INTEL_NB_7300 || nb_chipset == INTEL_NB_5400) {
985 		rp->nb.nf_fbd_regs.cerrcnta = CERRCNTA_RD(branch, channel);
986 		rp->nb.nf_fbd_regs.cerrcntb = CERRCNTB_RD(branch, channel);
987 		rp->nb.nf_fbd_regs.cerrcntc = CERRCNTC_RD(branch, channel);
988 		rp->nb.nf_fbd_regs.cerrcntd = CERRCNTD_RD(branch, channel);
989 	} else {
990 		rp->nb.nf_fbd_regs.cerrcnta = CERRCNT_RD(branch);
991 		rp->nb.nf_fbd_regs.cerrcntb = 0;
992 		rp->nb.nf_fbd_regs.cerrcntc = 0;
993 		rp->nb.nf_fbd_regs.cerrcntd = 0;
994 	}
995 	rp->nb.nf_fbd_regs.cerrcnta_last = cerrcnta[branch][channel & 1];
996 	rp->nb.nf_fbd_regs.cerrcntb_last = cerrcntb[branch][channel & 1];
997 	rp->nb.nf_fbd_regs.cerrcntc_last = cerrcntc[branch][channel & 1];
998 	rp->nb.nf_fbd_regs.cerrcntd_last = cerrcntd[branch][channel & 1];
999 	cerrcnta[branch][channel & 1] = rp->nb.nf_fbd_regs.cerrcnta;
1000 	cerrcntb[branch][channel & 1] = rp->nb.nf_fbd_regs.cerrcntb;
1001 	cerrcntc[branch][channel & 1] = rp->nb.nf_fbd_regs.cerrcntc;
1002 	cerrcntd[branch][channel & 1] = rp->nb.nf_fbd_regs.cerrcntd;
1003 	rp->nb.nf_fbd_regs.badrama = BADRAMA_RD(branch);
1004 	rp->nb.nf_fbd_regs.badramb = BADRAMB_RD(branch);
1005 	rp->nb.nf_fbd_regs.badcnt = BADCNT_RD(branch);
1006 	if (rp->nb.nf_fbd_regs.ferr_nf_fbd || *interpose)
1007 		FERR_NF_FBD_WR(rp->nb.nf_fbd_regs.ferr_nf_fbd);
1008 	if (rp->nb.nf_fbd_regs.nerr_nf_fbd)
1009 		NERR_NF_FBD_WR(rp->nb.nf_fbd_regs.nerr_nf_fbd);
1010 	/* if interpose write read-only registers to clear from pcii cache */
1011 	if (*interpose) {
1012 		RECMEMA_WR(branch);
1013 		RECMEMB_WR(branch);
1014 		RECFGLOG_WR(branch);
1015 		RECFBDA_WR(branch);
1016 		RECFBDB_WR(branch);
1017 		RECFBDC_WR(branch);
1018 		RECFBDD_WR(branch);
1019 		RECFBDE_WR(branch);
1020 		RECFBDF_WR(branch);
1021 		SPCPS_WR(branch);
1022 	}
1023 }
1024 
1025 static void
1026 log_ferr(uint64_t ferr, uint32_t *nerrp, nb_logout_t *log, int willpanic)
1027 {
1028 	nb_regs_t *rp = &log->nb_regs;
1029 	uint32_t nerr = *nerrp;
1030 	int interpose = 0;
1031 	int spurious = 0;
1032 
1033 	log->acl_timestamp = gethrtime_waitfree();
1034 	if ((ferr & (GE_PCIEX_FATAL | GE_PCIEX_NF)) != 0) {
1035 		log_pex_err(ferr, rp, &interpose);
1036 		*nerrp = nerr & ~(GE_PCIEX_FATAL | GE_PCIEX_NF);
1037 	} else if ((ferr & GE_FBD_FATAL) != 0) {
1038 		log_fat_fbd_err(rp, &interpose);
1039 		*nerrp = nerr & ~GE_NERR_FBD_FATAL;
1040 	} else if ((ferr & GE_FBD_NF) != 0) {
1041 		log_nf_fbd_err(rp, &interpose);
1042 		*nerrp = nerr & ~GE_NERR_FBD_NF;
1043 	} else if ((ferr & (GE_FERR_FSB_FATAL | GE_FERR_FSB_NF)) != 0) {
1044 		log_fsb_err(ferr, rp, &interpose);
1045 		*nerrp = nerr & ~(GE_NERR_FSB_FATAL | GE_NERR_FSB_NF);
1046 	} else if ((ferr & (GE_DMA_FATAL | GE_DMA_NF)) != 0) {
1047 		log_dma_err(rp, &interpose);
1048 		*nerrp = nerr & ~(GE_DMA_FATAL | GE_DMA_NF);
1049 	} else if ((ferr & (GE_INT_FATAL | GE_INT_NF)) != 0) {
1050 		spurious = log_int_err(rp, &interpose);
1051 		*nerrp = nerr & ~(GE_INT_FATAL | GE_INT_NF);
1052 	} else if (nb_chipset == INTEL_NB_5400 &&
1053 	    (ferr & (GE_FERR_THERMAL_FATAL | GE_FERR_THERMAL_NF)) != 0) {
1054 		log_thermal_err(rp, &interpose);
1055 		*nerrp = nerr & ~(GE_FERR_THERMAL_FATAL | GE_FERR_THERMAL_NF);
1056 	}
1057 	if (interpose)
1058 		log->type = "inject";
1059 	else
1060 		log->type = "error";
1061 	if (!spurious) {
1062 		errorq_dispatch(nb_queue, log, sizeof (nb_logout_t),
1063 		    willpanic ? ERRORQ_SYNC : ERRORQ_ASYNC);
1064 	}
1065 }
1066 
1067 static void
1068 log_nerr(uint32_t *errp, nb_logout_t *log, int willpanic)
1069 {
1070 	uint32_t err;
1071 	nb_regs_t *rp = &log->nb_regs;
1072 	int interpose = 0;
1073 	int spurious = 0;
1074 
1075 	err = *errp;
1076 	log->acl_timestamp = gethrtime_waitfree();
1077 	if ((err & (GE_PCIEX_FATAL | GE_PCIEX_NF)) != 0) {
1078 		log_pex_err(err, rp, &interpose);
1079 		*errp = err & ~(GE_PCIEX_FATAL | GE_PCIEX_NF);
1080 	} else if ((err & GE_NERR_FBD_FATAL) != 0) {
1081 		log_fat_fbd_err(rp, &interpose);
1082 		*errp = err & ~GE_NERR_FBD_FATAL;
1083 	} else if ((err & GE_NERR_FBD_NF) != 0) {
1084 		log_nf_fbd_err(rp, &interpose);
1085 		*errp = err & ~GE_NERR_FBD_NF;
1086 	} else if ((err & (GE_NERR_FSB_FATAL | GE_NERR_FSB_NF)) != 0) {
1087 		log_fsb_err(GE_NERR_TO_FERR_FSB(err), rp, &interpose);
1088 		*errp = err & ~(GE_NERR_FSB_FATAL | GE_NERR_FSB_NF);
1089 	} else if ((err & (GE_DMA_FATAL | GE_DMA_NF)) != 0) {
1090 		log_dma_err(rp, &interpose);
1091 		*errp = err & ~(GE_DMA_FATAL | GE_DMA_NF);
1092 	} else if ((err & (GE_INT_FATAL | GE_INT_NF)) != 0) {
1093 		spurious = log_int_err(rp, &interpose);
1094 		*errp = err & ~(GE_INT_FATAL | GE_INT_NF);
1095 	}
1096 	if (interpose)
1097 		log->type = "inject";
1098 	else
1099 		log->type = "error";
1100 	if (!spurious) {
1101 		errorq_dispatch(nb_queue, log, sizeof (nb_logout_t),
1102 		    willpanic ? ERRORQ_SYNC : ERRORQ_ASYNC);
1103 	}
1104 }
1105 
1106 /*ARGSUSED*/
1107 void
1108 nb_error_trap(cmi_hdl_t hdl, boolean_t ismc, boolean_t willpanic)
1109 {
1110 	uint64_t ferr;
1111 	uint32_t nerr, err;
1112 	int nmc = 0;
1113 	int i;
1114 
1115 	if (mutex_tryenter(&nb_mutex) == 0)
1116 		return;
1117 
1118 	nerr = NERR_GLOBAL_RD();
1119 	err = nerr;
1120 	for (i = 0; i < NB_MAX_ERRORS; i++) {
1121 		ferr = FERR_GLOBAL_RD();
1122 		nb_log.nb_regs.chipset = nb_chipset;
1123 		nb_log.nb_regs.ferr = ferr;
1124 		nb_log.nb_regs.nerr = nerr;
1125 		if (ferr) {
1126 			log_ferr(ferr, &err, &nb_log, willpanic);
1127 			FERR_GLOBAL_WR(ferr);
1128 			nmc++;
1129 		} else if (err) {
1130 			log_nerr(&err, &nb_log, willpanic);
1131 			nmc++;
1132 		}
1133 	}
1134 	if (nerr) {
1135 		NERR_GLOBAL_WR(nerr);
1136 	}
1137 	if (nmc == 0 && nb_mask_mc_set)
1138 		nb_mask_mc_reset();
1139 	mutex_exit(&nb_mutex);
1140 }
1141 
1142 static void
1143 nb_fsb_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1144     nb_scatchpad_t *data)
1145 {
1146 	int intel_error_list;
1147 	char buf[32];
1148 
1149 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FSB,
1150 	    DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.fsb, NULL);
1151 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_FAT_FSB,
1152 	    DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.ferr_fat_fsb, NULL);
1153 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_FAT_FSB,
1154 	    DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.nerr_fat_fsb, NULL);
1155 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_NF_FSB,
1156 	    DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.ferr_nf_fsb, NULL);
1157 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_NF_FSB,
1158 	    DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.nerr_nf_fsb, NULL);
1159 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFSB,
1160 	    DATA_TYPE_UINT32, nb_regs->nb.fsb_regs.nrecfsb, NULL);
1161 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFSB_ADDR,
1162 	    DATA_TYPE_UINT64, nb_regs->nb.fsb_regs.nrecfsb_addr, NULL);
1163 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFSB,
1164 	    DATA_TYPE_UINT32, nb_regs->nb.fsb_regs.recfsb, NULL);
1165 	intel_error_list = data->intel_error_list;
1166 	if (intel_error_list >= 0)
1167 		(void) snprintf(buf, sizeof (buf), "F%d", intel_error_list);
1168 	else
1169 		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1170 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1171 	    DATA_TYPE_STRING, buf, NULL);
1172 }
1173 
1174 static void
1175 nb_pex_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1176     nb_scatchpad_t *data)
1177 {
1178 	int intel_error_list;
1179 	char buf[32];
1180 
1181 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX,
1182 	    DATA_TYPE_UINT8, nb_regs->nb.pex_regs.pex, NULL);
1183 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX_FAT_FERR,
1184 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.pex_fat_ferr, NULL);
1185 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX_FAT_NERR,
1186 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.pex_fat_nerr, NULL);
1187 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX_NF_CORR_FERR,
1188 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.pex_nf_corr_ferr, NULL);
1189 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX_NF_CORR_NERR,
1190 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.pex_nf_corr_nerr, NULL);
1191 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_UNCERRSEV,
1192 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.uncerrsev, NULL);
1193 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RPERRSTS,
1194 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.rperrsts, NULL);
1195 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RPERRSID,
1196 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.rperrsid, NULL);
1197 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_UNCERRSTS,
1198 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.uncerrsts, NULL);
1199 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_AERRCAPCTRL,
1200 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.aerrcapctrl, NULL);
1201 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CORERRSTS,
1202 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.corerrsts, NULL);
1203 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEXDEVSTS,
1204 	    DATA_TYPE_UINT16, nb_regs->nb.pex_regs.pexdevsts, NULL);
1205 	intel_error_list = data->intel_error_list;
1206 	if (intel_error_list >= 0)
1207 		(void) snprintf(buf, sizeof (buf), "IO%d", intel_error_list);
1208 	else
1209 		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1210 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1211 	    DATA_TYPE_STRING, buf, NULL);
1212 }
1213 
1214 static void
1215 nb_int_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1216     nb_scatchpad_t *data)
1217 {
1218 	int intel_error_list;
1219 	char buf[32];
1220 
1221 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_FAT_INT,
1222 	    DATA_TYPE_UINT16, nb_regs->nb.int_regs.ferr_fat_int, NULL);
1223 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_NF_INT,
1224 	    DATA_TYPE_UINT16, nb_regs->nb.int_regs.ferr_nf_int, NULL);
1225 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_FAT_INT,
1226 	    DATA_TYPE_UINT16, nb_regs->nb.int_regs.nerr_fat_int, NULL);
1227 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_NF_INT,
1228 	    DATA_TYPE_UINT16, nb_regs->nb.int_regs.nerr_nf_int, NULL);
1229 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECINT,
1230 	    DATA_TYPE_UINT32, nb_regs->nb.int_regs.nrecint, NULL);
1231 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECINT,
1232 	    DATA_TYPE_UINT32, nb_regs->nb.int_regs.recint, NULL);
1233 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECSF,
1234 	    DATA_TYPE_UINT64, nb_regs->nb.int_regs.nrecsf, NULL);
1235 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECSF,
1236 	    DATA_TYPE_UINT64, nb_regs->nb.int_regs.recsf, NULL);
1237 	intel_error_list = data->intel_error_list;
1238 	if (intel_error_list >= 0)
1239 		(void) snprintf(buf, sizeof (buf), "B%d", intel_error_list);
1240 	else
1241 		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1242 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1243 	    DATA_TYPE_STRING, buf, NULL);
1244 }
1245 
1246 static void
1247 nb_fat_fbd_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1248     nb_scatchpad_t *data)
1249 {
1250 	nb_mem_scatchpad_t *sp;
1251 	char buf[32];
1252 
1253 	sp = &((nb_scatchpad_t *)data)->ms;
1254 
1255 	if (sp->ras != -1) {
1256 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BANK,
1257 		    DATA_TYPE_INT32, sp->bank, NULL);
1258 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CAS,
1259 		    DATA_TYPE_INT32, sp->cas, NULL);
1260 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RAS,
1261 		    DATA_TYPE_INT32, sp->ras, NULL);
1262 		if (sp->offset != -1LL) {
1263 			fm_payload_set(payload, FM_FMRI_MEM_OFFSET,
1264 			    DATA_TYPE_UINT64, sp->offset, NULL);
1265 		}
1266 		if (sp->pa != -1LL) {
1267 			fm_payload_set(payload, FM_FMRI_MEM_PHYSADDR,
1268 			    DATA_TYPE_UINT64, sp->pa, NULL);
1269 		}
1270 	}
1271 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_FAT_FBD,
1272 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.ferr_fat_fbd, NULL);
1273 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_FAT_FBD,
1274 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nerr_fat_fbd, NULL);
1275 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECMEMA,
1276 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecmema, NULL);
1277 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECMEMB,
1278 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecmemb, NULL);
1279 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFGLOG,
1280 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfglog, NULL);
1281 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDA,
1282 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbda, NULL);
1283 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDB,
1284 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbdb, NULL);
1285 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDC,
1286 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbdc, NULL);
1287 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDD,
1288 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbdd, NULL);
1289 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDE,
1290 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbde, NULL);
1291 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDF,
1292 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbdf, NULL);
1293 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPS,
1294 	    DATA_TYPE_UINT8, nb_regs->nb.fat_fbd_regs.spcps, NULL);
1295 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPC,
1296 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.spcpc, NULL);
1297 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_UERRCNT,
1298 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.uerrcnt, NULL);
1299 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_UERRCNT_LAST,
1300 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.uerrcnt_last, NULL);
1301 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADRAMA,
1302 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.badrama, NULL);
1303 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADRAMB,
1304 	    DATA_TYPE_UINT16, nb_regs->nb.fat_fbd_regs.badramb, NULL);
1305 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADCNT,
1306 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.badcnt, NULL);
1307 
1308 	if (sp->intel_error_list >= 0)
1309 		(void) snprintf(buf, sizeof (buf), "M%d", sp->intel_error_list);
1310 	else
1311 		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1312 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1313 	    DATA_TYPE_STRING, buf, NULL);
1314 }
1315 
1316 static void
1317 nb_nf_fbd_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1318     nb_scatchpad_t *data)
1319 {
1320 	nb_mem_scatchpad_t *sp;
1321 	char buf[32];
1322 
1323 	sp = &((nb_scatchpad_t *)data)->ms;
1324 
1325 	if (sp->dimm == -1 && sp->rank != -1) {
1326 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RANK,
1327 		    DATA_TYPE_INT32, sp->rank, NULL);
1328 	}
1329 	if (sp->ras != -1) {
1330 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BANK,
1331 		    DATA_TYPE_INT32, sp->bank, NULL);
1332 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CAS,
1333 		    DATA_TYPE_INT32, sp->cas, NULL);
1334 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RAS,
1335 		    DATA_TYPE_INT32, sp->ras, NULL);
1336 		if (sp->offset != -1LL) {
1337 			fm_payload_set(payload, FM_FMRI_MEM_OFFSET,
1338 			    DATA_TYPE_UINT64, sp->offset, NULL);
1339 		}
1340 		if (sp->pa != -1LL) {
1341 			fm_payload_set(payload, FM_FMRI_MEM_PHYSADDR,
1342 			    DATA_TYPE_UINT64, sp->pa, NULL);
1343 		}
1344 	}
1345 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_NF_FBD,
1346 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.ferr_nf_fbd, NULL);
1347 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_NF_FBD,
1348 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.nerr_nf_fbd, NULL);
1349 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECMEMA,
1350 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recmema, NULL);
1351 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECMEMB,
1352 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recmemb, NULL);
1353 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFGLOG,
1354 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfglog, NULL);
1355 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDA,
1356 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbda, NULL);
1357 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDB,
1358 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbdb, NULL);
1359 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDC,
1360 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbdc, NULL);
1361 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDD,
1362 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbdd, NULL);
1363 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDE,
1364 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbde, NULL);
1365 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDF,
1366 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbdf, NULL);
1367 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPS,
1368 	    DATA_TYPE_UINT8, nb_regs->nb.nf_fbd_regs.spcps, NULL);
1369 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPC,
1370 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.spcpc, NULL);
1371 	if (nb_chipset == INTEL_NB_7300 || nb_chipset == INTEL_NB_5400) {
1372 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNTA,
1373 		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcnta, NULL);
1374 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNTB,
1375 		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcntb, NULL);
1376 		if (nb_chipset == INTEL_NB_7300) {
1377 			fm_payload_set(payload,
1378 			    FM_EREPORT_PAYLOAD_NAME_CERRCNTC,
1379 			    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcntc,
1380 			    NULL);
1381 			fm_payload_set(payload,
1382 			    FM_EREPORT_PAYLOAD_NAME_CERRCNTD,
1383 			    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcntd,
1384 			    NULL);
1385 		}
1386 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNTA_LAST,
1387 		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcnta_last,
1388 		    NULL);
1389 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNTB_LAST,
1390 		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcntb_last,
1391 		    NULL);
1392 		if (nb_chipset == INTEL_NB_7300) {
1393 			fm_payload_set(payload,
1394 			    FM_EREPORT_PAYLOAD_NAME_CERRCNTC_LAST,
1395 			    DATA_TYPE_UINT32,
1396 			    nb_regs->nb.nf_fbd_regs.cerrcntc_last, NULL);
1397 			fm_payload_set(payload,
1398 			    FM_EREPORT_PAYLOAD_NAME_CERRCNTD_LAST,
1399 			    DATA_TYPE_UINT32,
1400 			    nb_regs->nb.nf_fbd_regs.cerrcntd_last, NULL);
1401 		}
1402 	} else {
1403 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNT,
1404 		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcnta, NULL);
1405 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNT_LAST,
1406 		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcnta_last,
1407 		    NULL);
1408 	}
1409 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADRAMA,
1410 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.badrama, NULL);
1411 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADRAMB,
1412 	    DATA_TYPE_UINT16, nb_regs->nb.nf_fbd_regs.badramb, NULL);
1413 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADCNT,
1414 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.badcnt, NULL);
1415 
1416 	if (sp->intel_error_list >= 0)
1417 		(void) snprintf(buf, sizeof (buf), "M%d", sp->intel_error_list);
1418 	else
1419 		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1420 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1421 	    DATA_TYPE_STRING, buf, NULL);
1422 }
1423 
1424 static void
1425 nb_dma_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload)
1426 {
1427 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PCISTS,
1428 	    DATA_TYPE_UINT16, nb_regs->nb.dma_regs.pcists, NULL);
1429 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEXDEVSTS,
1430 	    DATA_TYPE_UINT16, nb_regs->nb.dma_regs.pexdevsts, NULL);
1431 }
1432 
1433 static void
1434 nb_thr_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1435     nb_scatchpad_t *data)
1436 {
1437 	char buf[32];
1438 
1439 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_FAT_THR,
1440 	    DATA_TYPE_UINT8, nb_regs->nb.thr_regs.ferr_fat_thr, NULL);
1441 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_FAT_THR,
1442 	    DATA_TYPE_UINT8, nb_regs->nb.thr_regs.nerr_fat_thr, NULL);
1443 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_NF_THR,
1444 	    DATA_TYPE_UINT8, nb_regs->nb.thr_regs.ferr_nf_thr, NULL);
1445 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_NF_THR,
1446 	    DATA_TYPE_UINT8, nb_regs->nb.thr_regs.nerr_nf_thr, NULL);
1447 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CTSTS,
1448 	    DATA_TYPE_UINT8, nb_regs->nb.thr_regs.ctsts, NULL);
1449 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_THRTSTS,
1450 	    DATA_TYPE_UINT16, nb_regs->nb.thr_regs.thrtsts, NULL);
1451 	if (data->intel_error_list >= 0) {
1452 		(void) snprintf(buf, sizeof (buf), "TH%d",
1453 		    data->intel_error_list);
1454 	} else {
1455 		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1456 	}
1457 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1458 	    DATA_TYPE_STRING, buf, NULL);
1459 }
1460 
1461 static void
1462 nb_ereport_add_logout(nvlist_t *payload, const nb_logout_t *acl,
1463     nb_scatchpad_t *data)
1464 {
1465 	const nb_regs_t *nb_regs = &acl->nb_regs;
1466 
1467 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_MC_TYPE,
1468 	    DATA_TYPE_STRING, acl->type, NULL);
1469 	switch (nb_regs->flag) {
1470 	case NB_REG_LOG_FSB:
1471 		nb_fsb_err_payload(nb_regs, payload, data);
1472 		break;
1473 	case NB_REG_LOG_PEX:
1474 		nb_pex_err_payload(nb_regs, payload, data);
1475 		break;
1476 	case NB_REG_LOG_INT:
1477 		nb_int_err_payload(nb_regs, payload, data);
1478 		break;
1479 	case NB_REG_LOG_FAT_FBD:
1480 		nb_fat_fbd_err_payload(nb_regs, payload, data);
1481 		break;
1482 	case NB_REG_LOG_NF_FBD:
1483 		nb_nf_fbd_err_payload(nb_regs, payload, data);
1484 		break;
1485 	case NB_REG_LOG_DMA:
1486 		nb_dma_err_payload(nb_regs, payload);
1487 		break;
1488 	case NB_REG_LOG_THR:
1489 		nb_thr_err_payload(nb_regs, payload, data);
1490 		break;
1491 	default:
1492 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_GLOBAL,
1493 		    DATA_TYPE_UINT64, nb_regs->ferr, NULL);
1494 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_GLOBAL,
1495 		    DATA_TYPE_UINT32, nb_regs->nerr, NULL);
1496 		break;
1497 	}
1498 }
1499 
1500 void
1501 nb_fsb_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
1502     nb_scatchpad_t *data)
1503 {
1504 	int chip;
1505 
1506 	if (nb_chipset == INTEL_NB_7300)
1507 		chip = nb_regs->nb.fsb_regs.fsb * 2;
1508 	else
1509 		chip = nb_regs->nb.fsb_regs.fsb;
1510 	fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 2,
1511 	    "motherboard", 0, "chip", chip);
1512 
1513 	if (nb_regs->nb.fsb_regs.ferr_fat_fsb == 0 &&
1514 	    nb_regs->nb.fsb_regs.ferr_nf_fsb == 0) {
1515 		data->intel_error_list = intel_fsb_err(nb_regs->nb.fsb_regs.fsb,
1516 		    nb_regs->nb.fsb_regs.nerr_fat_fsb,
1517 		    nb_regs->nb.fsb_regs.nerr_nf_fsb);
1518 	} else {
1519 		data->intel_error_list = intel_fsb_err(nb_regs->nb.fsb_regs.fsb,
1520 		    nb_regs->nb.fsb_regs.ferr_fat_fsb,
1521 		    nb_regs->nb.fsb_regs.ferr_nf_fsb);
1522 	}
1523 	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
1524 	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "fsb");
1525 }
1526 
1527 void
1528 nb_pex_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
1529     nb_scatchpad_t *data)
1530 {
1531 	int hostbridge;
1532 
1533 	if (nb_regs->nb.pex_regs.pex == 0) {
1534 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
1535 		    "motherboard", 0);
1536 	} else {
1537 		hostbridge = nb_regs->nb.pex_regs.pex - 1;
1538 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 2,
1539 		    "motherboard", 0,
1540 		    "hostbridge", hostbridge);
1541 	}
1542 
1543 	if (nb_regs->nb.pex_regs.pex_fat_ferr == 0 &&
1544 	    nb_regs->nb.pex_regs.pex_nf_corr_ferr == 0) {
1545 		if (nb_chipset == INTEL_NB_5400) {
1546 			data->intel_error_list =
1547 			    intel_pex_5400_err(
1548 			    nb_regs->nb.pex_regs.pex_fat_nerr,
1549 			    nb_regs->nb.pex_regs.pex_nf_corr_nerr);
1550 		} else {
1551 			data->intel_error_list =
1552 			    intel_pex_err(nb_regs->nb.pex_regs.pex_fat_nerr,
1553 			    nb_regs->nb.pex_regs.pex_nf_corr_nerr);
1554 		}
1555 	} else {
1556 		if (nb_chipset == INTEL_NB_5400) {
1557 			data->intel_error_list =
1558 			    intel_pex_5400_err(
1559 			    nb_regs->nb.pex_regs.pex_fat_ferr,
1560 			    nb_regs->nb.pex_regs.pex_nf_corr_ferr);
1561 		} else {
1562 			data->intel_error_list =
1563 			    intel_pex_err(nb_regs->nb.pex_regs.pex_fat_ferr,
1564 			    nb_regs->nb.pex_regs.pex_nf_corr_ferr);
1565 		}
1566 	}
1567 
1568 	if (nb_regs->nb.pex_regs.pex == 0) {
1569 		(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
1570 		    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "esi");
1571 	} else {
1572 		(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
1573 		    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "pex");
1574 	}
1575 }
1576 
1577 void
1578 nb_int_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
1579     void *data)
1580 {
1581 	fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
1582 	    "motherboard", 0);
1583 
1584 	if (nb_regs->nb.int_regs.ferr_fat_int == 0 &&
1585 	    nb_regs->nb.int_regs.ferr_nf_int == 0) {
1586 		((nb_scatchpad_t *)data)->intel_error_list =
1587 		    intel_int_err(nb_regs->nb.int_regs.nerr_fat_int,
1588 		    nb_regs->nb.int_regs.nerr_nf_int);
1589 	} else {
1590 		((nb_scatchpad_t *)data)->intel_error_list =
1591 		    intel_int_err(nb_regs->nb.int_regs.ferr_fat_int,
1592 		    nb_regs->nb.int_regs.ferr_nf_int);
1593 	}
1594 	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
1595 	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "ie");
1596 }
1597 
1598 void
1599 nb_fat_fbd_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
1600     void *data)
1601 {
1602 	char *intr;
1603 	nb_mem_scatchpad_t *sp;
1604 
1605 	intr = fat_memory_error(nb_regs, data);
1606 	sp = &((nb_scatchpad_t *)data)->ms;
1607 
1608 	if (sp->dimm != -1) {
1609 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 5,
1610 		    "motherboard", 0,
1611 		    "memory-controller", sp->branch,
1612 		    "dram-channel", sp->channel,
1613 		    "dimm", sp->dimm,
1614 		    "rank", sp->rank);
1615 	} else if (sp->channel != -1) {
1616 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 3,
1617 		    "motherboard", 0,
1618 		    "memory-controller", sp->branch,
1619 		    "dram-channel", sp->channel);
1620 	} else if (sp->branch != -1) {
1621 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 2,
1622 		    "motherboard", 0,
1623 		    "memory-controller", sp->branch);
1624 	} else {
1625 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
1626 		    "motherboard", 0);
1627 	}
1628 
1629 	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s",
1630 	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, intr);
1631 }
1632 
1633 void
1634 nb_nf_fbd_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
1635     void *data)
1636 {
1637 	char *intr;
1638 	nb_mem_scatchpad_t *sp;
1639 
1640 	intr = nf_memory_error(nb_regs, data);
1641 	sp = &((nb_scatchpad_t *)data)->ms;
1642 
1643 	if (sp->dimm != -1) {
1644 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 5,
1645 		    "motherboard", 0,
1646 		    "memory-controller", sp->branch,
1647 		    "dram-channel", sp->channel,
1648 		    "dimm", sp->dimm,
1649 		    "rank", sp->rank);
1650 	} else if (sp->channel != -1) {
1651 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 3,
1652 		    "motherboard", 0,
1653 		    "memory-controller", sp->branch,
1654 		    "dram-channel", sp->channel);
1655 	} else if (sp->branch != -1) {
1656 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 2,
1657 		    "motherboard", 0,
1658 		    "memory-controller", sp->branch);
1659 	} else {
1660 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
1661 		    "motherboard", 0);
1662 	}
1663 
1664 	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s",
1665 	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, intr);
1666 }
1667 
1668 void
1669 nb_dma_report(char *class, nvlist_t *detector)
1670 {
1671 	fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
1672 	    "motherboard", 0);
1673 
1674 	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
1675 	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "dma");
1676 }
1677 
1678 void
1679 nb_thr_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
1680     void *data)
1681 {
1682 	((nb_scatchpad_t *)data)->intel_error_list =
1683 	    intel_thr_err(nb_regs->nb.thr_regs.ferr_fat_thr,
1684 	    nb_regs->nb.thr_regs.ferr_nf_thr);
1685 	fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
1686 	    "motherboard", 0);
1687 
1688 	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
1689 	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "otf");
1690 }
1691 
1692 
1693 nvlist_t *
1694 nb_report(const nb_regs_t *nb_regs, char *class, nv_alloc_t *nva, void *scratch)
1695 {
1696 	nvlist_t *detector = fm_nvlist_create(nva);
1697 
1698 	switch (nb_regs->flag) {
1699 	case NB_REG_LOG_FSB:
1700 		nb_fsb_report(nb_regs, class, detector, scratch);
1701 		break;
1702 	case NB_REG_LOG_PEX:
1703 		nb_pex_report(nb_regs, class, detector, scratch);
1704 		break;
1705 	case NB_REG_LOG_INT:
1706 		nb_int_report(nb_regs, class, detector, scratch);
1707 		break;
1708 	case NB_REG_LOG_FAT_FBD:
1709 		nb_fat_fbd_report(nb_regs, class, detector, scratch);
1710 		break;
1711 	case NB_REG_LOG_NF_FBD:
1712 		nb_nf_fbd_report(nb_regs, class, detector, scratch);
1713 		break;
1714 	case NB_REG_LOG_DMA:
1715 		nb_dma_report(class, detector);
1716 		break;
1717 	case NB_REG_LOG_THR:
1718 		nb_thr_report(nb_regs, class, detector, scratch);
1719 		break;
1720 	default:
1721 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
1722 		    "motherboard", 0);
1723 
1724 		(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
1725 		    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "unknown");
1726 	}
1727 	return (detector);
1728 }
1729 
1730 /*ARGSUSED*/
1731 void
1732 nb_drain(void *ignored, const void *data, const errorq_elem_t *eqe)
1733 {
1734 	nb_logout_t *acl = (nb_logout_t *)data;
1735 	errorq_elem_t *eqep, *scr_eqep;
1736 	nvlist_t *ereport, *detector;
1737 	nv_alloc_t *nva = NULL;
1738 	char buf[FM_MAX_CLASS];
1739 	nb_scatchpad_t nb_scatchpad;
1740 
1741 	if (panicstr) {
1742 		if ((eqep = errorq_reserve(ereport_errorq)) == NULL)
1743 			return;
1744 		ereport = errorq_elem_nvl(ereport_errorq, eqep);
1745 		/*
1746 		 * Now try to allocate another element for scratch space and
1747 		 * use that for further scratch space (eg for constructing
1748 		 * nvlists to add the main ereport).  If we can't reserve
1749 		 * a scratch element just fallback to working within the
1750 		 * element we already have, and hope for the best.  All this
1751 		 * is necessary because the fixed buffer nv allocator does
1752 		 * not reclaim freed space and nvlist construction is
1753 		 * expensive.
1754 		 */
1755 		if ((scr_eqep = errorq_reserve(ereport_errorq)) != NULL)
1756 			nva = errorq_elem_nva(ereport_errorq, scr_eqep);
1757 		else
1758 			nva = errorq_elem_nva(ereport_errorq, eqep);
1759 	} else {
1760 		ereport = fm_nvlist_create(NULL);
1761 	}
1762 	detector = nb_report(&acl->nb_regs, buf, nva, &nb_scatchpad);
1763 	if (detector == NULL)
1764 		return;
1765 	fm_ereport_set(ereport, FM_EREPORT_VERSION, buf,
1766 	    fm_ena_generate(acl->acl_timestamp, FM_ENA_FMT1), detector, NULL);
1767 	/*
1768 	 * We're done with 'detector' so reclaim the scratch space.
1769 	 */
1770 	if (panicstr) {
1771 		fm_nvlist_destroy(detector, FM_NVA_RETAIN);
1772 		nv_alloc_reset(nva);
1773 	} else {
1774 		fm_nvlist_destroy(detector, FM_NVA_FREE);
1775 	}
1776 
1777 	/*
1778 	 * Encode the error-specific data that was saved in the logout area.
1779 	 */
1780 	nb_ereport_add_logout(ereport, acl, &nb_scatchpad);
1781 
1782 	if (panicstr) {
1783 		errorq_commit(ereport_errorq, eqep, ERRORQ_SYNC);
1784 		if (scr_eqep)
1785 			errorq_cancel(ereport_errorq, scr_eqep);
1786 	} else {
1787 		(void) fm_ereport_post(ereport, EVCH_TRYHARD);
1788 		fm_nvlist_destroy(ereport, FM_NVA_FREE);
1789 	}
1790 }
1791