xref: /illumos-gate/usr/src/uts/intel/io/intel_nb5000/intel_nb5000.c (revision 46b592853d0f4f11781b6b0a7533f267c6aee132)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <sys/types.h>
28 #include <sys/cmn_err.h>
29 #include <sys/errno.h>
30 #include <sys/log.h>
31 #include <sys/systm.h>
32 #include <sys/modctl.h>
33 #include <sys/errorq.h>
34 #include <sys/controlregs.h>
35 #include <sys/fm/util.h>
36 #include <sys/fm/protocol.h>
37 #include <sys/sysevent.h>
38 #include <sys/pghw.h>
39 #include <sys/cyclic.h>
40 #include <sys/pci_cfgspace.h>
41 #include <sys/mc_intel.h>
42 #include <sys/smbios.h>
43 #include "nb5000.h"
44 #include "nb_log.h"
45 #include "dimm_phys.h"
46 
47 int nb_check_validlog = 1;
48 
49 static uint32_t uerrcnt[2];
50 static uint32_t cerrcnta[2][2];
51 static uint32_t cerrcntb[2][2];
52 static uint32_t cerrcntc[2][2];
53 static uint32_t cerrcntd[2][2];
54 static nb_logout_t nb_log;
55 
56 struct mch_error_code {
57 	int intel_error_list;	/* error number in Chipset Error List */
58 	uint32_t emask;		/* mask for machine check */
59 	uint32_t error_bit;	/* error bit in fault register */
60 };
61 
62 static struct mch_error_code fat_fbd_error_code[] = {
63 	{ 23, EMASK_FBD_M23, ERR_FAT_FBD_M23 },
64 	{ 3, EMASK_FBD_M3, ERR_FAT_FBD_M3 },
65 	{ 2, EMASK_FBD_M2, ERR_FAT_FBD_M2 },
66 	{ 1, EMASK_FBD_M1, ERR_FAT_FBD_M1 }
67 };
68 
69 static int
70 intel_fat_fbd_err(uint32_t fat_fbd)
71 {
72 	int rt = -1;
73 	int nerr = 0;
74 	uint32_t emask_fbd = 0;
75 	int i;
76 	int sz;
77 
78 	sz = sizeof (fat_fbd_error_code) / sizeof (struct mch_error_code);
79 
80 	for (i = 0; i < sz; i++) {
81 		if (fat_fbd & fat_fbd_error_code[i].error_bit) {
82 			rt = fat_fbd_error_code[i].intel_error_list;
83 			emask_fbd |= fat_fbd_error_code[i].emask;
84 			nerr++;
85 		}
86 	}
87 
88 	if (emask_fbd)
89 		nb_fbd_mask_mc(emask_fbd);
90 	if (nerr > 1)
91 		rt = -1;
92 	return (rt);
93 }
94 
95 static char *
96 fat_memory_error(const nb_regs_t *rp, void *data)
97 {
98 	int channel;
99 	uint32_t ferr_fat_fbd, nrecmemb;
100 	uint32_t nrecmema;
101 	char *intr = "nb.unknown";
102 	nb_mem_scatchpad_t *sp = &((nb_scatchpad_t *)data)->ms;
103 
104 	ferr_fat_fbd = rp->nb.fat_fbd_regs.ferr_fat_fbd;
105 	if ((ferr_fat_fbd & ERR_FAT_FBD_MASK) == 0) {
106 		sp->intel_error_list =
107 		    intel_fat_fbd_err(rp->nb.fat_fbd_regs.nerr_fat_fbd);
108 		sp->branch = -1;
109 		sp->channel = -1;
110 		sp->rank = -1;
111 		sp->dimm = -1;
112 		sp->bank = -1;
113 		sp->cas = -1;
114 		sp->ras = -1;
115 		sp->pa = -1LL;
116 		sp->offset = -1;
117 		return (intr);
118 	}
119 	sp->intel_error_list = intel_fat_fbd_err(ferr_fat_fbd);
120 	channel = (ferr_fat_fbd >> 28) & 3;
121 	sp->branch = channel >> 1;
122 	sp->channel = channel;
123 	if ((ferr_fat_fbd & (ERR_FAT_FBD_M2|ERR_FAT_FBD_M1)) != 0) {
124 		if ((ferr_fat_fbd & ERR_FAT_FBD_M1) != 0)
125 			intr = "nb.fbd.alert";	/* Alert on FB-DIMM M1 */
126 		else
127 			intr = "nb.fbd.crc";	/* CRC error FB_DIMM M2 */
128 		nrecmema = rp->nb.fat_fbd_regs.nrecmema;
129 		nrecmemb = rp->nb.fat_fbd_regs.nrecmemb;
130 		sp->rank = (nrecmema >> 8) & RANK_MASK;
131 		sp->dimm = sp->rank >> 1;
132 		sp->bank = (nrecmema >> 12) & BANK_MASK;
133 		sp->cas = (nrecmemb >> 16) & CAS_MASK;
134 		sp->ras = nrecmemb & RAS_MASK;
135 		/*
136 		 * If driver was built with closed tree present then we will
137 		 * have Intel proprietary code for finding physaddr
138 		 */
139 		if (&dimm_getphys) {
140 			sp->pa = dimm_getphys((uint16_t)sp->branch,
141 			    (uint16_t)sp->rank, (uint64_t)sp->bank,
142 			    (uint64_t)sp->ras, (uint64_t)sp->cas);
143 			if (sp->pa >= MAXPHYS_ADDR)
144 				sp->pa = -1ULL;
145 		} else {
146 			sp->pa = -1ULL;
147 		}
148 		/*
149 		 * If there is an offset decoder use it otherwise encode
150 		 * rank/bank/ras/cas
151 		 */
152 		if (&dimm_getoffset) {
153 			sp->offset = dimm_getoffset(sp->branch, sp->rank,
154 			    sp->bank, sp->ras, sp->cas);
155 		} else {
156 			sp->offset = TCODE_OFFSET(sp->rank, sp->bank, sp->ras,
157 			    sp->cas);
158 		}
159 	} else {
160 		if ((ferr_fat_fbd & ERR_FAT_FBD_M3) != 0)
161 			intr = "nb.fbd.otf";	/* thermal temp > Tmid M3 */
162 		else if ((ferr_fat_fbd & ERR_FAT_FBD_M23) != 0) {
163 			intr = "nb.fbd.reset_timeout";
164 			sp->channel = -1;
165 		}
166 		sp->rank = -1;
167 		sp->dimm = -1;
168 		sp->bank = -1;
169 		sp->cas = -1;
170 		sp->ras = -1;
171 		sp->pa = -1LL;
172 		sp->offset = -1;
173 	}
174 	return (intr);
175 }
176 
177 
178 static struct mch_error_code nf_fbd_error_code[] = {
179 	{ 29, EMASK_FBD_M29, ERR_NF_FBD_M29 },
180 	{ 28, EMASK_FBD_M28, ERR_NF_FBD_M28 },
181 	{ 27, EMASK_FBD_M27, ERR_NF_FBD_M27 },
182 	{ 26, EMASK_FBD_M26, ERR_NF_FBD_M26 },
183 	{ 25, EMASK_FBD_M25, ERR_NF_FBD_M25 },
184 	{ 24, EMASK_FBD_M24, ERR_NF_FBD_M24 },
185 	{ 22, EMASK_FBD_M22, ERR_NF_FBD_M22 },
186 	{ 21, EMASK_FBD_M21, ERR_NF_FBD_M21 },
187 	{ 20, EMASK_FBD_M20, ERR_NF_FBD_M20 },
188 	{ 19, EMASK_FBD_M19, ERR_NF_FBD_M19 },
189 	{ 18, EMASK_FBD_M18, ERR_NF_FBD_M18 },
190 	{ 17, EMASK_FBD_M17, ERR_NF_FBD_M17 },
191 	{ 16, EMASK_FBD_M16, ERR_NF_FBD_M16 },
192 	{ 15, EMASK_FBD_M15, ERR_NF_FBD_M15 },
193 	{ 14, EMASK_FBD_M14, ERR_NF_FBD_M14 },
194 	{ 13, EMASK_FBD_M13, ERR_NF_FBD_M13 },
195 	{ 12, EMASK_FBD_M12, ERR_NF_FBD_M12 },
196 	{ 11, EMASK_FBD_M11, ERR_NF_FBD_M11 },
197 	{ 10, EMASK_FBD_M10, ERR_NF_FBD_M10 },
198 	{ 9, EMASK_FBD_M9, ERR_NF_FBD_M9 },
199 	{ 8, EMASK_FBD_M8, ERR_NF_FBD_M8 },
200 	{ 7, EMASK_FBD_M7, ERR_NF_FBD_M7 },
201 	{ 6, EMASK_FBD_M6, ERR_NF_FBD_M6 },
202 	{ 5, EMASK_FBD_M5, ERR_NF_FBD_M5 },
203 	{ 4, EMASK_FBD_M4, ERR_NF_FBD_M4 }
204 };
205 
206 static int
207 intel_nf_fbd_err(uint32_t nf_fbd)
208 {
209 	int rt = -1;
210 	int nerr = 0;
211 	uint32_t emask_fbd = 0;
212 	int i;
213 	int sz;
214 
215 	sz = sizeof (nf_fbd_error_code) / sizeof (struct mch_error_code);
216 
217 	for (i = 0; i < sz; i++) {
218 		if (nf_fbd & nf_fbd_error_code[i].error_bit) {
219 			rt = nf_fbd_error_code[i].intel_error_list;
220 			emask_fbd |= nf_fbd_error_code[i].emask;
221 			nerr++;
222 		}
223 	}
224 	if (emask_fbd)
225 		nb_fbd_mask_mc(emask_fbd);
226 	if (nerr > 1)
227 		rt = -1;
228 	return (rt);
229 }
230 
231 static char *
232 nf_memory_error(const nb_regs_t *rp, void *data)
233 {
234 	uint32_t ferr_nf_fbd, recmemb, redmemb;
235 	uint32_t recmema;
236 	int branch, channel, ecc_locator;
237 	char *intr = "nb.unknown";
238 	nb_mem_scatchpad_t *sp = &((nb_scatchpad_t *)data)->ms;
239 
240 	sp->rank = -1;
241 	sp->dimm = -1;
242 	sp->bank = -1;
243 	sp->cas = -1;
244 	sp->ras = -1LL;
245 	sp->pa = -1LL;
246 	sp->offset = -1;
247 	ferr_nf_fbd = rp->nb.nf_fbd_regs.ferr_nf_fbd;
248 	if ((ferr_nf_fbd & ERR_NF_FBD_MASK) == 0) {
249 		/* unknown ereport if a recognizable error was not found */
250 		sp->branch = -1;
251 		sp->channel = -1;
252 		sp->intel_error_list = -1;
253 		return (intr);
254 	}
255 	sp->intel_error_list = intel_nf_fbd_err(ferr_nf_fbd);
256 	channel = (ferr_nf_fbd >> ERR_FBD_CH_SHIFT) & 3;
257 	branch = channel >> 1;
258 	sp->branch = branch;
259 	sp->channel = channel;
260 	if (ferr_nf_fbd & ERR_NF_FBD_MASK) {
261 		if (ferr_nf_fbd & ERR_NF_FBD_ECC_UE) {
262 			/*
263 			 * uncorrectable ECC M4 - M12
264 			 * we can only isolate to pair of dimms
265 			 * for single dimm configuration let eversholt
266 			 * sort it out with out needing a special rule
267 			 */
268 			sp->channel = -1;
269 			recmema = rp->nb.nf_fbd_regs.recmema;
270 			recmemb = rp->nb.nf_fbd_regs.recmemb;
271 			sp->rank = (recmema >> 8) & RANK_MASK;
272 			sp->bank = (recmema >> 12) & BANK_MASK;
273 			sp->cas = (recmemb >> 16) & CAS_MASK;
274 			sp->ras = recmemb & RAS_MASK;
275 			intr = "nb.mem_ue";
276 		} else if (ferr_nf_fbd & ERR_NF_FBD_M13) {
277 			/*
278 			 * write error M13
279 			 * we can only isolate to pair of dimms
280 			 */
281 			sp->channel = -1;
282 			if (nb_mode != NB_MEMORY_MIRROR) {
283 				recmema = rp->nb.nf_fbd_regs.recmema;
284 				sp->rank = (recmema >> 8) & RANK_MASK;
285 				sp->bank = (recmema >> 12) & BANK_MASK;
286 				sp->cas = (recmemb >> 16) & CAS_MASK;
287 				sp->ras = recmemb & RAS_MASK;
288 			}
289 			intr = "nb.fbd.ma"; /* memory alert */
290 		} else if (ferr_nf_fbd & ERR_NF_FBD_MA) { /* M14, M15 and M21 */
291 			intr = "nb.fbd.ch"; /* FBD on channel */
292 		} else if ((ferr_nf_fbd & ERR_NF_FBD_ECC_CE) != 0) {
293 			/* correctable ECC M17-M20 */
294 			recmema = rp->nb.nf_fbd_regs.recmema;
295 			recmemb = rp->nb.nf_fbd_regs.recmemb;
296 			sp->rank = (recmema >> 8) & RANK_MASK;
297 			redmemb = rp->nb.nf_fbd_regs.redmemb;
298 			ecc_locator = redmemb & 0x3ffff;
299 			if (ecc_locator & 0x1ff)
300 				sp->channel = branch << 1;
301 			else if (ecc_locator & 0x3fe00)
302 				sp->channel = (branch << 1) + 1;
303 			sp->dimm = sp->rank >> 1;
304 			sp->bank = (recmema >> 12) & BANK_MASK;
305 			sp->cas = (recmemb >> 16) & CAS_MASK;
306 			sp->ras = recmemb & RAS_MASK;
307 			intr = "nb.mem_ce";
308 		} else if ((ferr_nf_fbd & ERR_NF_FBD_SPARE) != 0) {
309 			/* spare dimm M27, M28 */
310 			intr = "nb.mem_ds";
311 			sp->channel = -1;
312 			if (rp->nb.nf_fbd_regs.spcps & SPCPS_SPARE_DEPLOYED) {
313 				sp->rank =
314 				    SPCPS_FAILED_RANK(rp->nb.nf_fbd_regs.spcps);
315 				nb_used_spare_rank(sp->branch, sp->rank);
316 				nb_config_gen++;
317 			}
318 		} else if ((ferr_nf_fbd & ERR_NF_FBD_M22) != 0) {
319 			intr = "nb.spd";	/* SPD protocol */
320 		}
321 	}
322 	if (sp->ras != -1) {
323 		/*
324 		 * If driver was built with closed tree present then we will
325 		 * have Intel proprietary code for finding physaddr
326 		 */
327 		if (&dimm_getphys) {
328 			sp->pa = dimm_getphys((uint16_t)sp->branch,
329 			    (uint16_t)sp->rank, (uint64_t)sp->bank,
330 			    (uint64_t)sp->ras, (uint64_t)sp->cas);
331 			if (sp->pa >= MAXPHYS_ADDR)
332 				sp->pa = -1ULL;
333 		} else {
334 			sp->pa = -1ULL;
335 		}
336 		if (&dimm_getoffset) {
337 			sp->offset = dimm_getoffset(sp->branch, sp->rank,
338 			    sp->bank, sp->ras, sp->cas);
339 		} else {
340 			sp->offset = TCODE_OFFSET(sp->rank, sp->bank, sp->ras,
341 			    sp->cas);
342 		}
343 	}
344 	return (intr);
345 }
346 
347 static struct mch_error_code nf_mem_error_code[] = {
348 	{ 21, EMASK_MEM_M21, ERR_NF_MEM_M21 },
349 	{ 20, EMASK_MEM_M20, ERR_NF_MEM_M20 },
350 	{ 18, EMASK_MEM_M18, ERR_NF_MEM_M18 },
351 	{ 16, EMASK_MEM_M16, ERR_NF_MEM_M16 },
352 	{ 15, EMASK_MEM_M15, ERR_NF_MEM_M15 },
353 	{ 14, EMASK_MEM_M14, ERR_NF_MEM_M14 },
354 	{ 12, EMASK_MEM_M12, ERR_NF_MEM_M12 },
355 	{ 11, EMASK_MEM_M11, ERR_NF_MEM_M11 },
356 	{ 10, EMASK_MEM_M10, ERR_NF_MEM_M10 },
357 	{ 6, EMASK_MEM_M6, ERR_NF_MEM_M6 },
358 	{ 5, EMASK_MEM_M5, ERR_NF_MEM_M5 },
359 	{ 4, EMASK_MEM_M4, ERR_NF_MEM_M4 },
360 	{ 1, EMASK_MEM_M1, ERR_NF_MEM_M1 }
361 };
362 
363 static int
364 intel_nf_mem_err(uint32_t nf_mem)
365 {
366 	int rt = -1;
367 	int nerr = 0;
368 	uint32_t emask_mem = 0;
369 	int i;
370 	int sz;
371 
372 	sz = sizeof (nf_mem_error_code) / sizeof (struct mch_error_code);
373 
374 	for (i = 0; i < sz; i++) {
375 		if (nf_mem & nf_mem_error_code[i].error_bit) {
376 			rt = nf_mem_error_code[i].intel_error_list;
377 			emask_mem |= nf_mem_error_code[i].emask;
378 			nerr++;
379 		}
380 	}
381 	if (emask_mem)
382 		nb_mem_mask_mc(emask_mem);
383 	if (nerr > 1)
384 		rt = -1;
385 	return (rt);
386 }
387 
388 static char *
389 nf_mem_error(const nb_regs_t *rp, void *data)
390 {
391 	uint32_t ferr_nf_mem, recmema, recmemb;
392 	uint32_t nrecmema, nrecmemb, validlog;
393 	int channel;
394 	char *intr = "nb.unknown";
395 	nb_mem_scatchpad_t *sp = &((nb_scatchpad_t *)data)->ms;
396 
397 	sp->rank = -1;
398 	sp->dimm = -1;
399 	sp->bank = -1;
400 	sp->cas = -1;
401 	sp->ras = -1LL;
402 	sp->pa = -1LL;
403 	sp->offset = -1;
404 	ferr_nf_mem = rp->nb.nf_mem_regs.ferr_nf_mem;
405 	if ((ferr_nf_mem & ERR_NF_MEM_MASK) == 0) {
406 		/* no first error found */
407 		sp->branch = -1;
408 		sp->channel = -1;
409 		sp->intel_error_list =
410 		    intel_nf_mem_err(rp->nb.nf_mem_regs.nerr_nf_mem);
411 		return (intr);
412 	}
413 	sp->intel_error_list = intel_nf_mem_err(ferr_nf_mem);
414 
415 	channel = (ferr_nf_mem >> ERR_MEM_CH_SHIFT) & 0x1;
416 	sp->branch = channel;
417 	sp->channel = -1;
418 	if (ferr_nf_mem & ERR_NF_MEM_MASK) {
419 		if (ferr_nf_mem & ERR_NF_MEM_ECC_UE) {
420 			/*
421 			 * uncorrectable ECC M1,M4-M6,M10-M12
422 			 * There is only channel per branch
423 			 * Invalidate the channel number so the mem ereport
424 			 * has the same detector with existing 5000 ereports.
425 			 * so we can leverage the existing Everhsolt rule.
426 			 */
427 			validlog = rp->nb.nf_mem_regs.validlog;
428 			if (ferr_nf_mem & ERR_NF_MEM_M1) {
429 				nrecmema = rp->nb.nf_mem_regs.nrecmema;
430 				nrecmemb = rp->nb.nf_mem_regs.nrecmemb;
431 				/* check if the nrecmem log is valid */
432 				if (validlog & 0x1 || nb_check_validlog == 0) {
433 					sp->rank = (nrecmema >> 8) & RANK_MASK;
434 					sp->bank = (nrecmema >> 12) & BANK_MASK;
435 					sp->cas = (nrecmemb >> 16) & CAS_MASK;
436 					sp->ras = nrecmemb & RAS_MASK;
437 				}
438 			} else {
439 				recmema = rp->nb.nf_mem_regs.recmema;
440 				recmemb = rp->nb.nf_mem_regs.recmemb;
441 				/* check if the recmem log is valid */
442 				if (validlog & 0x2 || nb_check_validlog == 0) {
443 					sp->rank = (recmema >> 8) & RANK_MASK;
444 					sp->bank = (recmema >> 12) & BANK_MASK;
445 					sp->cas = (recmemb >> 16) & CAS_MASK;
446 					sp->ras = recmemb & RAS_MASK;
447 				}
448 			}
449 			intr = "nb.ddr2_mem_ue";
450 		} else if ((ferr_nf_mem & ERR_NF_MEM_ECC_CE) != 0) {
451 			/* correctable ECC M14-M16 */
452 			recmema = rp->nb.nf_mem_regs.recmema;
453 			recmemb = rp->nb.nf_mem_regs.recmemb;
454 			validlog = rp->nb.nf_mem_regs.validlog;
455 			/* check if the recmem log is valid */
456 			if (validlog & 0x2 || nb_check_validlog == 0) {
457 				sp->channel = channel;
458 				sp->rank = (recmema >> 8) & RANK_MASK;
459 				sp->dimm = nb_rank2dimm(sp->channel, sp->rank);
460 				sp->bank = (recmema >> 12) & BANK_MASK;
461 				sp->cas = (recmemb >> 16) & CAS_MASK;
462 				sp->ras = recmemb & RAS_MASK;
463 			}
464 			intr = "nb.ddr2_mem_ce";
465 		} else if ((ferr_nf_mem & ERR_NF_MEM_SPARE) != 0) {
466 			/* spare dimm M20, M21 */
467 			intr = "nb.ddr2_mem_ds";
468 
469 			/*
470 			 * The channel can be valid here.
471 			 * However, there is only one channel per branch and
472 			 * to leverage the eversolt rules of other chipsets,
473 			 * the channel is ignored and let the rule find it out
474 			 * from the topology.
475 			 */
476 			if (rp->nb.nf_mem_regs.spcps & SPCPS_SPARE_DEPLOYED) {
477 				sp->rank =
478 				    SPCPS_FAILED_RANK(rp->nb.nf_mem_regs.spcps);
479 				nb_used_spare_rank(sp->branch, sp->rank);
480 				nb_config_gen++;
481 			}
482 		} else if ((ferr_nf_mem & ERR_NF_MEM_M18) != 0) {
483 			sp->channel = channel;
484 			intr = "nb.ddr2_spd";	/* SPD protocol */
485 
486 		}
487 	}
488 	if (sp->ras != -1) {
489 		/*
490 		 * If driver was built with closed tree present then we will
491 		 * have Intel proprietary code for finding physaddr
492 		 */
493 		if (&dimm_getphys) {
494 			sp->pa = dimm_getphys((uint16_t)sp->branch,
495 			    (uint16_t)sp->rank, (uint64_t)sp->bank,
496 			    (uint64_t)sp->ras, (uint64_t)sp->cas);
497 			if (sp->pa >= MAXPHYS_ADDR)
498 				sp->pa = -1ULL;
499 		} else {
500 			sp->pa = -1ULL;
501 		}
502 		if (&dimm_getoffset) {
503 			sp->offset = dimm_getoffset(sp->branch, sp->rank,
504 			    sp->bank, sp->ras, sp->cas);
505 		} else {
506 			sp->offset = TCODE_OFFSET(sp->rank, sp->bank, sp->ras,
507 			    sp->cas);
508 		}
509 	}
510 	return (intr);
511 }
512 
513 static struct mch_error_code fat_int_error_code[] = {
514 	{ 14, EMASK_INT_B14, ERR_FAT_INT_B14 },
515 	{ 12, EMASK_INT_B12, ERR_FAT_INT_B12 },
516 	{ 25, EMASK_INT_B25, ERR_FAT_INT_B25 },
517 	{ 23, EMASK_INT_B23, ERR_FAT_INT_B23 },
518 	{ 21, EMASK_INT_B21, ERR_FAT_INT_B21 },
519 	{ 7, EMASK_INT_B7, ERR_FAT_INT_B7 },
520 	{ 4, EMASK_INT_B4, ERR_FAT_INT_B4 },
521 	{ 3, EMASK_INT_B3, ERR_FAT_INT_B3 },
522 	{ 2, EMASK_INT_B2, ERR_FAT_INT_B2 },
523 	{ 1, EMASK_INT_B1, ERR_FAT_INT_B1 }
524 };
525 
526 static struct mch_error_code nf_int_error_code[] = {
527 	{ 27, 0, ERR_NF_INT_B27 },
528 	{ 24, 0, ERR_NF_INT_B24 },
529 	{ 22, EMASK_INT_B22, ERR_NF_INT_B22 },
530 	{ 20, EMASK_INT_B20, ERR_NF_INT_B20 },
531 	{ 19, EMASK_INT_B19, ERR_NF_INT_B19 },
532 	{ 18, 0, ERR_NF_INT_B18 },
533 	{ 17, 0, ERR_NF_INT_B17 },
534 	{ 16, 0, ERR_NF_INT_B16 },
535 	{ 11, EMASK_INT_B11, ERR_NF_INT_B11 },
536 	{ 10, EMASK_INT_B10, ERR_NF_INT_B10 },
537 	{ 9, EMASK_INT_B9, ERR_NF_INT_B9 },
538 	{ 8, EMASK_INT_B8, ERR_NF_INT_B8 },
539 	{ 6, EMASK_INT_B6, ERR_NF_INT_B6 },
540 	{ 5, EMASK_INT_B5, ERR_NF_INT_B5 }
541 };
542 
543 static int
544 intel_int_err(uint16_t err_fat_int, uint16_t err_nf_int)
545 {
546 	int rt = -1;
547 	int nerr = 0;
548 	uint32_t emask_int = 0;
549 	int i;
550 	int sz;
551 
552 	sz = sizeof (fat_int_error_code) / sizeof (struct mch_error_code);
553 
554 	for (i = 0; i < sz; i++) {
555 		if (err_fat_int & fat_int_error_code[i].error_bit) {
556 			rt = fat_int_error_code[i].intel_error_list;
557 			emask_int |= fat_int_error_code[i].emask;
558 			nerr++;
559 		}
560 	}
561 
562 	if (nb_chipset == INTEL_NB_5400 &&
563 	    (err_nf_int & NERR_NF_5400_INT_B26) != 0) {
564 		err_nf_int &= ~NERR_NF_5400_INT_B26;
565 		rt = 26;
566 		nerr++;
567 	}
568 
569 	if (rt)
570 		err_nf_int &= ~ERR_NF_INT_B18;
571 
572 	sz = sizeof (nf_int_error_code) / sizeof (struct mch_error_code);
573 
574 	for (i = 0; i < sz; i++) {
575 		if (err_nf_int & nf_int_error_code[i].error_bit) {
576 			rt = nf_int_error_code[i].intel_error_list;
577 			emask_int |= nf_int_error_code[i].emask;
578 			nerr++;
579 		}
580 	}
581 
582 	if (emask_int)
583 		nb_int_mask_mc(emask_int);
584 	if (nerr > 1)
585 		rt = -1;
586 	return (rt);
587 }
588 
589 static int
590 log_int_err(nb_regs_t *rp, int willpanic, int *interpose)
591 {
592 	int t = 0;
593 	int rt = 0;
594 
595 	rp->flag = NB_REG_LOG_INT;
596 	rp->nb.int_regs.ferr_fat_int = FERR_FAT_INT_RD(interpose);
597 	rp->nb.int_regs.ferr_nf_int = FERR_NF_INT_RD(&t);
598 	*interpose |= t;
599 	rp->nb.int_regs.nerr_fat_int = NERR_FAT_INT_RD(&t);
600 	*interpose |= t;
601 	rp->nb.int_regs.nerr_nf_int = NERR_NF_INT_RD(&t);
602 	*interpose |= t;
603 	rp->nb.int_regs.nrecint = NRECINT_RD();
604 	rp->nb.int_regs.recint = RECINT_RD();
605 	rp->nb.int_regs.nrecsf = NRECSF_RD();
606 	rp->nb.int_regs.recsf = RECSF_RD();
607 
608 	if (!willpanic) {
609 		if (rp->nb.int_regs.ferr_fat_int || *interpose)
610 			FERR_FAT_INT_WR(rp->nb.int_regs.ferr_fat_int);
611 		if (rp->nb.int_regs.ferr_nf_int || *interpose)
612 			FERR_NF_INT_WR(rp->nb.int_regs.ferr_nf_int);
613 		if (rp->nb.int_regs.nerr_fat_int)
614 			NERR_FAT_INT_WR(rp->nb.int_regs.nerr_fat_int);
615 		if (rp->nb.int_regs.nerr_nf_int)
616 			NERR_NF_INT_WR(rp->nb.int_regs.nerr_nf_int);
617 		/*
618 		 * if interpose write read-only registers to clear from pcii
619 		 * cache
620 		 */
621 		if (*interpose) {
622 			NRECINT_WR();
623 			RECINT_WR();
624 			NRECSF_WR();
625 			RECSF_WR();
626 		}
627 	}
628 	if (rp->nb.int_regs.ferr_fat_int == 0 &&
629 	    rp->nb.int_regs.nerr_fat_int == 0 &&
630 	    (rp->nb.int_regs.ferr_nf_int == ERR_NF_INT_B18 ||
631 	    (rp->nb.int_regs.ferr_nf_int == 0 &&
632 	    rp->nb.int_regs.nerr_nf_int == ERR_NF_INT_B18))) {
633 		rt = 1;
634 	}
635 	return (rt);
636 }
637 
638 static void
639 log_thermal_err(nb_regs_t *rp, int willpanic, int *interpose)
640 {
641 	int t = 0;
642 
643 	rp->flag = NB_REG_LOG_THR;
644 	rp->nb.thr_regs.ferr_fat_thr = FERR_FAT_THR_RD(interpose);
645 	rp->nb.thr_regs.nerr_fat_thr = NERR_FAT_THR_RD(&t);
646 	*interpose |= t;
647 	rp->nb.thr_regs.ferr_nf_thr = FERR_NF_THR_RD(&t);
648 	*interpose |= t;
649 	rp->nb.thr_regs.nerr_nf_thr = NERR_NF_THR_RD(&t);
650 	*interpose |= t;
651 	rp->nb.thr_regs.ctsts = CTSTS_RD();
652 	rp->nb.thr_regs.thrtsts = THRTSTS_RD();
653 
654 	if (!willpanic) {
655 		if (rp->nb.thr_regs.ferr_fat_thr || *interpose)
656 			FERR_FAT_THR_WR(rp->nb.thr_regs.ferr_fat_thr);
657 		if (rp->nb.thr_regs.nerr_fat_thr || *interpose)
658 			NERR_FAT_THR_WR(rp->nb.thr_regs.nerr_fat_thr);
659 		if (rp->nb.thr_regs.ferr_nf_thr || *interpose)
660 			FERR_NF_THR_WR(rp->nb.thr_regs.ferr_nf_thr);
661 		if (rp->nb.thr_regs.nerr_nf_thr || *interpose)
662 			NERR_NF_THR_WR(rp->nb.thr_regs.nerr_nf_thr);
663 
664 		if (*interpose) {
665 			CTSTS_WR(rp->nb.thr_regs.ctsts);
666 			THRTSTS_WR(rp->nb.thr_regs.thrtsts);
667 		}
668 	}
669 }
670 
671 static void
672 log_dma_err(nb_regs_t *rp, int *interpose)
673 {
674 	rp->flag = NB_REG_LOG_DMA;
675 
676 	rp->nb.dma_regs.pcists = PCISTS_RD(interpose);
677 	rp->nb.dma_regs.pexdevsts = PCIDEVSTS_RD();
678 }
679 
680 static struct mch_error_code fat_fsb_error_code[] = {
681 	{ 9, EMASK_FSB_F9, ERR_FAT_FSB_F9 },
682 	{ 2, EMASK_FSB_F2, ERR_FAT_FSB_F2 },
683 	{ 1, EMASK_FSB_F1, ERR_FAT_FSB_F1 }
684 };
685 
686 static struct mch_error_code nf_fsb_error_code[] = {
687 	{ 8, EMASK_FSB_F8, ERR_NF_FSB_F8 },
688 	{ 7, EMASK_FSB_F7, ERR_NF_FSB_F7 },
689 	{ 6, EMASK_FSB_F6, ERR_NF_FSB_F6 }
690 };
691 
692 static int
693 intel_fsb_err(int fsb, uint8_t err_fat_fsb, uint8_t err_nf_fsb)
694 {
695 	int rt = -1;
696 	int nerr = 0;
697 	uint16_t emask_fsb = 0;
698 	int i;
699 	int sz;
700 
701 	sz = sizeof (fat_fsb_error_code) / sizeof (struct mch_error_code);
702 
703 	for (i = 0; i < sz; i++) {
704 		if (err_fat_fsb & fat_fsb_error_code[i].error_bit) {
705 			rt = fat_fsb_error_code[i].intel_error_list;
706 			emask_fsb |= fat_fsb_error_code[i].emask;
707 			nerr++;
708 		}
709 	}
710 
711 	sz = sizeof (nf_fsb_error_code) / sizeof (struct mch_error_code);
712 
713 	for (i = 0; i < sz; i++) {
714 		if (err_nf_fsb & nf_fsb_error_code[i].error_bit) {
715 			rt = nf_fsb_error_code[i].intel_error_list;
716 			emask_fsb |= nf_fsb_error_code[i].emask;
717 			nerr++;
718 		}
719 	}
720 
721 	if (emask_fsb)
722 		nb_fsb_mask_mc(fsb, emask_fsb);
723 	if (nerr > 1)
724 		rt = -1;
725 	return (rt);
726 }
727 
728 static void
729 log_fsb_err(uint64_t ferr, nb_regs_t *rp, int willpanic, int *interpose)
730 {
731 	uint8_t fsb;
732 	int t = 0;
733 
734 	fsb = GE_FERR_FSB(ferr);
735 	rp->flag = NB_REG_LOG_FSB;
736 
737 	rp->nb.fsb_regs.fsb = fsb;
738 	rp->nb.fsb_regs.ferr_fat_fsb = FERR_FAT_FSB_RD(fsb, interpose);
739 	rp->nb.fsb_regs.ferr_nf_fsb = FERR_NF_FSB_RD(fsb, &t);
740 	*interpose |= t;
741 	rp->nb.fsb_regs.nerr_fat_fsb = NERR_FAT_FSB_RD(fsb, &t);
742 	*interpose |= t;
743 	rp->nb.fsb_regs.nerr_nf_fsb = NERR_NF_FSB_RD(fsb, &t);
744 	*interpose |= t;
745 	rp->nb.fsb_regs.nrecfsb = NRECFSB_RD(fsb);
746 	rp->nb.fsb_regs.nrecfsb_addr = NRECADDR_RD(fsb);
747 	rp->nb.fsb_regs.recfsb = RECFSB_RD(fsb);
748 	if (!willpanic) {
749 		/* Clear the fatal/non-fatal first/next FSB errors */
750 		if (rp->nb.fsb_regs.ferr_fat_fsb || *interpose)
751 			FERR_FAT_FSB_WR(fsb, rp->nb.fsb_regs.ferr_fat_fsb);
752 		if (rp->nb.fsb_regs.ferr_nf_fsb || *interpose)
753 			FERR_NF_FSB_WR(fsb, rp->nb.fsb_regs.ferr_nf_fsb);
754 		if (rp->nb.fsb_regs.nerr_fat_fsb || *interpose)
755 			NERR_FAT_FSB_WR(fsb, rp->nb.fsb_regs.nerr_fat_fsb);
756 		if (rp->nb.fsb_regs.nerr_nf_fsb || *interpose)
757 			NERR_NF_FSB_WR(fsb, rp->nb.fsb_regs.nerr_nf_fsb);
758 
759 		/*
760 		 * if interpose write read-only registers to clear from pcii
761 		 * cache
762 		 */
763 		if (*interpose) {
764 			NRECFSB_WR(fsb);
765 			NRECADDR_WR(fsb);
766 			RECFSB_WR(fsb);
767 		}
768 	}
769 }
770 
771 static struct mch_error_code fat_pex_error_code[] = {
772 	{ 19, EMASK_UNCOR_PEX_IO19, PEX_FAT_IO19 },
773 	{ 18, EMASK_UNCOR_PEX_IO18, PEX_FAT_IO18 },
774 	{ 10, EMASK_UNCOR_PEX_IO10, PEX_FAT_IO10 },
775 	{ 9, EMASK_UNCOR_PEX_IO9, PEX_FAT_IO9 },
776 	{ 8, EMASK_UNCOR_PEX_IO8, PEX_FAT_IO8 },
777 	{ 7, EMASK_UNCOR_PEX_IO7, PEX_FAT_IO7 },
778 	{ 6, EMASK_UNCOR_PEX_IO6, PEX_FAT_IO6 },
779 	{ 5, EMASK_UNCOR_PEX_IO5, PEX_FAT_IO5 },
780 	{ 4, EMASK_UNCOR_PEX_IO4, PEX_FAT_IO4 },
781 	{ 3, EMASK_UNCOR_PEX_IO3, PEX_FAT_IO3 },
782 	{ 2, EMASK_UNCOR_PEX_IO2, PEX_FAT_IO2 },
783 	{ 0, EMASK_UNCOR_PEX_IO0, PEX_FAT_IO0 }
784 };
785 
786 static struct mch_error_code fat_unit_pex_5400_error_code[] = {
787 	{ 32, EMASK_UNIT_PEX_IO32, PEX_5400_FAT_IO32 },
788 	{ 31, EMASK_UNIT_PEX_IO31, PEX_5400_FAT_IO31 },
789 	{ 30, EMASK_UNIT_PEX_IO30, PEX_5400_FAT_IO30 },
790 	{ 29, EMASK_UNIT_PEX_IO29, PEX_5400_FAT_IO29 },
791 	{ 27, EMASK_UNIT_PEX_IO27, PEX_5400_FAT_IO27 },
792 	{ 26, EMASK_UNIT_PEX_IO26, PEX_5400_FAT_IO26 },
793 	{ 25, EMASK_UNIT_PEX_IO25, PEX_5400_FAT_IO25 },
794 	{ 24, EMASK_UNIT_PEX_IO24, PEX_5400_FAT_IO24 },
795 	{ 23, EMASK_UNIT_PEX_IO23, PEX_5400_FAT_IO23 },
796 	{ 22, EMASK_UNIT_PEX_IO22, PEX_5400_FAT_IO22 },
797 };
798 
799 static struct mch_error_code fat_pex_5400_error_code[] = {
800 	{ 19, EMASK_UNCOR_PEX_IO19, PEX_5400_FAT_IO19 },
801 	{ 18, EMASK_UNCOR_PEX_IO18, PEX_5400_FAT_IO18 },
802 	{ 10, EMASK_UNCOR_PEX_IO10, PEX_5400_FAT_IO10 },
803 	{ 9, EMASK_UNCOR_PEX_IO9, PEX_5400_FAT_IO9 },
804 	{ 8, EMASK_UNCOR_PEX_IO8, PEX_5400_FAT_IO8 },
805 	{ 7, EMASK_UNCOR_PEX_IO7, PEX_5400_FAT_IO7 },
806 	{ 6, EMASK_UNCOR_PEX_IO6, PEX_5400_FAT_IO6 },
807 	{ 5, EMASK_UNCOR_PEX_IO5, PEX_5400_FAT_IO5 },
808 	{ 4, EMASK_UNCOR_PEX_IO4, PEX_5400_FAT_IO4 },
809 	{ 2, EMASK_UNCOR_PEX_IO2, PEX_5400_FAT_IO2 },
810 	{ 0, EMASK_UNCOR_PEX_IO0, PEX_5400_FAT_IO0 }
811 };
812 
813 static struct mch_error_code fat_rp_5400_error_code[] = {
814 	{ 1, EMASK_RP_PEX_IO1, PEX_5400_FAT_IO1 }
815 };
816 
817 static struct mch_error_code fat_rp_error_code[] = {
818 	{ 1, EMASK_RP_PEX_IO1, PEX_FAT_IO1 }
819 };
820 
821 static struct mch_error_code uncor_pex_error_code[] = {
822 	{ 19, EMASK_UNCOR_PEX_IO19, PEX_NF_IO19 },
823 	{ 9, EMASK_UNCOR_PEX_IO9, PEX_NF_IO9 },
824 	{ 8, EMASK_UNCOR_PEX_IO8, PEX_NF_IO8 },
825 	{ 7, EMASK_UNCOR_PEX_IO7, PEX_NF_IO7 },
826 	{ 6, EMASK_UNCOR_PEX_IO6, PEX_NF_IO6 },
827 	{ 5, EMASK_UNCOR_PEX_IO5, PEX_NF_IO5 },
828 	{ 4, EMASK_UNCOR_PEX_IO4, PEX_NF_IO4 },
829 	{ 3, EMASK_UNCOR_PEX_IO3, PEX_NF_IO3 },
830 	{ 0, EMASK_UNCOR_PEX_IO0, PEX_NF_IO0 }
831 };
832 
833 static struct mch_error_code uncor_pex_5400_error_code[] = {
834 	{ 33, EMASK_UNIT_PEX_IO33, PEX_5400_NF_IO33 },
835 	{ 32, EMASK_UNIT_PEX_IO32, PEX_5400_NF_IO32 },
836 	{ 31, EMASK_UNIT_PEX_IO31, PEX_5400_NF_IO31 },
837 	{ 30, EMASK_UNIT_PEX_IO30, PEX_5400_NF_IO30 },
838 	{ 29, EMASK_UNIT_PEX_IO29, PEX_5400_NF_IO29 },
839 	{ 28, EMASK_UNIT_PEX_IO28, PEX_5400_NF_IO28 },
840 	{ 27, EMASK_UNIT_PEX_IO27, PEX_5400_NF_IO27 },
841 	{ 26, EMASK_UNIT_PEX_IO26, PEX_5400_NF_IO26 },
842 	{ 25, EMASK_UNIT_PEX_IO25, PEX_5400_NF_IO25 },
843 	{ 24, EMASK_UNIT_PEX_IO24, PEX_5400_NF_IO24 },
844 	{ 23, EMASK_UNIT_PEX_IO23, PEX_5400_NF_IO23 },
845 };
846 
847 static struct mch_error_code cor_pex_error_code[] = {
848 	{ 20, EMASK_COR_PEX_IO20, PEX_5400_NF_IO20 },
849 	{ 16, EMASK_COR_PEX_IO16, PEX_NF_IO16 },
850 	{ 15, EMASK_COR_PEX_IO15, PEX_NF_IO15 },
851 	{ 14, EMASK_COR_PEX_IO14, PEX_NF_IO14 },
852 	{ 13, EMASK_COR_PEX_IO13, PEX_NF_IO13 },
853 	{ 12, EMASK_COR_PEX_IO12, PEX_NF_IO12 },
854 	{ 10, 0, PEX_NF_IO10 },
855 	{ 2, 0, PEX_NF_IO2 }
856 };
857 
858 static struct mch_error_code rp_pex_5400_error_code[] = {
859 	{ 17, EMASK_RP_PEX_IO17, PEX_5400_NF_IO17 },
860 	{ 11, EMASK_RP_PEX_IO11, PEX_5400_NF_IO11 }
861 };
862 
863 static struct mch_error_code cor_pex_5400_error_code1[] = {
864 	{ 19, EMASK_UNCOR_PEX_IO19, PEX_5400_NF_IO19 },
865 	{ 10, EMASK_UNCOR_PEX_IO10, PEX_5400_NF_IO10 },
866 	{ 9, EMASK_UNCOR_PEX_IO9, PEX_5400_NF_IO9 },
867 	{ 8, EMASK_UNCOR_PEX_IO8, PEX_5400_NF_IO8 },
868 	{ 7, EMASK_UNCOR_PEX_IO7, PEX_5400_NF_IO7 },
869 	{ 6, EMASK_UNCOR_PEX_IO6, PEX_5400_NF_IO6 },
870 	{ 5, EMASK_UNCOR_PEX_IO5, PEX_5400_NF_IO5 },
871 	{ 4, EMASK_UNCOR_PEX_IO4, PEX_5400_NF_IO4 },
872 	{ 2, EMASK_UNCOR_PEX_IO2, PEX_5400_NF_IO2 },
873 	{ 0, EMASK_UNCOR_PEX_IO0, PEX_5400_NF_IO0 }
874 };
875 
876 static struct mch_error_code cor_pex_5400_error_code2[] = {
877 	{ 20, EMASK_COR_PEX_IO20, PEX_5400_NF_IO20 },
878 	{ 16, EMASK_COR_PEX_IO16, PEX_5400_NF_IO16 },
879 	{ 15, EMASK_COR_PEX_IO15, PEX_5400_NF_IO15 },
880 	{ 14, EMASK_COR_PEX_IO14, PEX_5400_NF_IO14 },
881 	{ 13, EMASK_COR_PEX_IO13, PEX_5400_NF_IO13 },
882 	{ 12, EMASK_COR_PEX_IO12, PEX_5400_NF_IO12 }
883 };
884 
885 static struct mch_error_code cor_pex_5400_error_code3[] = {
886 	{ 33, EMASK_UNIT_PEX_IO33, PEX_5400_NF_IO33 },
887 	{ 32, EMASK_UNIT_PEX_IO32, PEX_5400_NF_IO32 },
888 	{ 31, EMASK_UNIT_PEX_IO31, PEX_5400_NF_IO31 },
889 	{ 30, EMASK_UNIT_PEX_IO30, PEX_5400_NF_IO30 },
890 	{ 29, EMASK_UNIT_PEX_IO29, PEX_5400_NF_IO29 },
891 	{ 28, EMASK_UNIT_PEX_IO28, PEX_5400_NF_IO28 },
892 	{ 27, EMASK_UNIT_PEX_IO27, PEX_5400_NF_IO27 },
893 	{ 26, EMASK_UNIT_PEX_IO26, PEX_5400_NF_IO26 },
894 	{ 25, EMASK_UNIT_PEX_IO25, PEX_5400_NF_IO25 },
895 	{ 24, EMASK_UNIT_PEX_IO24, PEX_5400_NF_IO24 },
896 	{ 23, EMASK_UNIT_PEX_IO23, PEX_5400_NF_IO23 }
897 };
898 
899 static struct mch_error_code rp_pex_error_code[] = {
900 	{ 17, EMASK_RP_PEX_IO17, PEX_NF_IO17 },
901 	{ 11, EMASK_RP_PEX_IO11, PEX_NF_IO11 },
902 };
903 
904 static int
905 intel_pex_err(uint32_t pex_fat, uint32_t pex_nf_cor)
906 {
907 	int rt = -1;
908 	int nerr = 0;
909 	int i;
910 	int sz;
911 
912 	sz = sizeof (fat_pex_error_code) / sizeof (struct mch_error_code);
913 
914 	for (i = 0; i < sz; i++) {
915 		if (pex_fat & fat_pex_error_code[i].error_bit) {
916 			rt = fat_pex_error_code[i].intel_error_list;
917 			nerr++;
918 		}
919 	}
920 	sz = sizeof (fat_rp_error_code) / sizeof (struct mch_error_code);
921 
922 	for (i = 0; i < sz; i++) {
923 		if (pex_fat & fat_rp_error_code[i].error_bit) {
924 			rt = fat_rp_error_code[i].intel_error_list;
925 			nerr++;
926 		}
927 	}
928 	sz = sizeof (uncor_pex_error_code) / sizeof (struct mch_error_code);
929 
930 	for (i = 0; i < sz; i++) {
931 		if (pex_nf_cor & uncor_pex_error_code[i].error_bit) {
932 			rt = uncor_pex_error_code[i].intel_error_list;
933 			nerr++;
934 		}
935 	}
936 
937 	sz = sizeof (cor_pex_error_code) / sizeof (struct mch_error_code);
938 
939 	for (i = 0; i < sz; i++) {
940 		if (pex_nf_cor & cor_pex_error_code[i].error_bit) {
941 			rt = cor_pex_error_code[i].intel_error_list;
942 			nerr++;
943 		}
944 	}
945 	sz = sizeof (rp_pex_error_code) / sizeof (struct mch_error_code);
946 
947 	for (i = 0; i < sz; i++) {
948 		if (pex_nf_cor & rp_pex_error_code[i].error_bit) {
949 			rt = rp_pex_error_code[i].intel_error_list;
950 			nerr++;
951 		}
952 	}
953 
954 	if (nerr > 1)
955 		rt = -1;
956 	return (rt);
957 }
958 
959 static struct mch_error_code fat_thr_error_code[] = {
960 	{ 2, EMASK_THR_F2, ERR_FAT_THR_F2 },
961 	{ 1, EMASK_THR_F1, ERR_FAT_THR_F1 }
962 };
963 
964 static struct mch_error_code nf_thr_error_code[] = {
965 	{ 5, EMASK_THR_F5, ERR_NF_THR_F5 },
966 	{ 4, EMASK_THR_F4, ERR_NF_THR_F4 },
967 	{ 3, EMASK_THR_F3, ERR_NF_THR_F3 }
968 };
969 
970 static int
971 intel_thr_err(uint8_t err_fat_thr, uint8_t err_nf_thr)
972 {
973 	int rt = -1;
974 	int nerr = 0;
975 	uint16_t emask_thr = 0;
976 	int i;
977 	int sz;
978 
979 	sz = sizeof (fat_thr_error_code) / sizeof (struct mch_error_code);
980 
981 	for (i = 0; i < sz; i++) {
982 		if (err_fat_thr & fat_thr_error_code[i].error_bit) {
983 			rt = fat_thr_error_code[i].intel_error_list;
984 			emask_thr |= fat_thr_error_code[i].emask;
985 			nerr++;
986 		}
987 	}
988 
989 	sz = sizeof (nf_thr_error_code) / sizeof (struct mch_error_code);
990 
991 	for (i = 0; i < sz; i++) {
992 		if (err_nf_thr & nf_thr_error_code[i].error_bit) {
993 			rt = nf_thr_error_code[i].intel_error_list;
994 			emask_thr |= nf_thr_error_code[i].emask;
995 			nerr++;
996 		}
997 	}
998 
999 	if (emask_thr)
1000 		nb_thr_mask_mc(emask_thr);
1001 	if (nerr > 1)
1002 		rt = -1;
1003 	return (rt);
1004 }
1005 
1006 static int
1007 intel_pex_5400_err(uint32_t pex_fat, uint32_t pex_nf_cor)
1008 {
1009 	int rt = -1;
1010 	int nerr = 0;
1011 	int i;
1012 	int sz;
1013 
1014 	sz = sizeof (fat_pex_5400_error_code) / sizeof (struct mch_error_code);
1015 
1016 	for (i = 0; i < sz; i++) {
1017 		if (pex_fat & fat_pex_5400_error_code[i].error_bit) {
1018 			rt = fat_pex_5400_error_code[i].intel_error_list;
1019 			nerr++;
1020 		}
1021 	}
1022 	sz = sizeof (fat_rp_5400_error_code) / sizeof (struct mch_error_code);
1023 
1024 	for (i = 0; i < sz; i++) {
1025 		if (pex_fat & fat_rp_5400_error_code[i].error_bit) {
1026 			rt = fat_rp_5400_error_code[i].intel_error_list;
1027 			nerr++;
1028 		}
1029 	}
1030 	sz = sizeof (fat_unit_pex_5400_error_code) /
1031 	    sizeof (struct mch_error_code);
1032 
1033 	for (i = 0; i < sz; i++) {
1034 		if (pex_fat &
1035 		    fat_unit_pex_5400_error_code[i].error_bit) {
1036 			rt = fat_unit_pex_5400_error_code[i].intel_error_list;
1037 			nerr++;
1038 		}
1039 	}
1040 	sz = sizeof (uncor_pex_5400_error_code) /
1041 	    sizeof (struct mch_error_code);
1042 
1043 	for (i = 0; i < sz; i++) {
1044 		if (pex_fat & uncor_pex_5400_error_code[i].error_bit) {
1045 			rt = uncor_pex_5400_error_code[i].intel_error_list;
1046 			nerr++;
1047 		}
1048 	}
1049 
1050 	sz = sizeof (rp_pex_5400_error_code) / sizeof (struct mch_error_code);
1051 
1052 	for (i = 0; i < sz; i++) {
1053 		if (pex_nf_cor & rp_pex_5400_error_code[i].error_bit) {
1054 			rt = rp_pex_5400_error_code[i].intel_error_list;
1055 			nerr++;
1056 		}
1057 	}
1058 
1059 	sz = sizeof (cor_pex_5400_error_code1) / sizeof (struct mch_error_code);
1060 
1061 	for (i = 0; i < sz; i++) {
1062 		if (pex_nf_cor & cor_pex_5400_error_code1[i].error_bit) {
1063 			rt = cor_pex_5400_error_code1[i].intel_error_list;
1064 			nerr++;
1065 		}
1066 	}
1067 
1068 	sz = sizeof (cor_pex_5400_error_code2) / sizeof (struct mch_error_code);
1069 
1070 	for (i = 0; i < sz; i++) {
1071 		if (pex_nf_cor & cor_pex_5400_error_code2[i].error_bit) {
1072 			rt = cor_pex_5400_error_code2[i].intel_error_list;
1073 			nerr++;
1074 		}
1075 	}
1076 
1077 	sz = sizeof (cor_pex_5400_error_code3) / sizeof (struct mch_error_code);
1078 
1079 	for (i = 0; i < sz; i++) {
1080 		if (pex_nf_cor & cor_pex_5400_error_code3[i].error_bit) {
1081 			rt = cor_pex_5400_error_code3[i].intel_error_list;
1082 			nerr++;
1083 		}
1084 	}
1085 
1086 	if (nerr > 1)
1087 		rt = -1;
1088 	return (rt);
1089 }
1090 
1091 static void
1092 log_pex_err(uint64_t ferr, nb_regs_t *rp, int willpanic, int *interpose)
1093 {
1094 	uint8_t pex = (uint8_t)-1;
1095 	int t = 0;
1096 
1097 	rp->flag = NB_REG_LOG_PEX;
1098 	pex = GE_ERR_PEX(ferr);
1099 
1100 	rp->nb.pex_regs.pex = pex;
1101 	rp->nb.pex_regs.pex_fat_ferr =  PEX_FAT_FERR_RD(pex, interpose);
1102 	rp->nb.pex_regs.pex_fat_nerr = PEX_FAT_NERR_RD(pex, &t);
1103 	*interpose |= t;
1104 	rp->nb.pex_regs.pex_nf_corr_ferr = PEX_NF_FERR_RD(pex, &t);
1105 	*interpose |= t;
1106 	rp->nb.pex_regs.pex_nf_corr_nerr = PEX_NF_NERR_RD(pex, &t);
1107 	*interpose |= t;
1108 	rp->nb.pex_regs.uncerrsev = UNCERRSEV_RD(pex);
1109 	rp->nb.pex_regs.rperrsts = RPERRSTS_RD(pex);
1110 	rp->nb.pex_regs.rperrsid = RPERRSID_RD(pex);
1111 	if (pex != (uint8_t)-1)
1112 		rp->nb.pex_regs.uncerrsts = UNCERRSTS_RD(pex);
1113 	else
1114 		rp->nb.pex_regs.uncerrsts = 0;
1115 	rp->nb.pex_regs.aerrcapctrl = AERRCAPCTRL_RD(pex);
1116 	rp->nb.pex_regs.corerrsts = CORERRSTS_RD(pex);
1117 	rp->nb.pex_regs.pexdevsts = PEXDEVSTS_RD(pex);
1118 
1119 	if (!willpanic) {
1120 		if (rp->nb.pex_regs.pex_fat_ferr || *interpose)
1121 			PEX_FAT_FERR_WR(pex, rp->nb.pex_regs.pex_fat_ferr);
1122 		if (rp->nb.pex_regs.pex_fat_nerr)
1123 			PEX_FAT_NERR_WR(pex, rp->nb.pex_regs.pex_fat_nerr);
1124 		if (rp->nb.pex_regs.pex_nf_corr_ferr || *interpose)
1125 			PEX_NF_FERR_WR(pex, rp->nb.pex_regs.pex_nf_corr_ferr);
1126 		if (rp->nb.pex_regs.pex_nf_corr_nerr)
1127 			PEX_NF_NERR_WR(pex, rp->nb.pex_regs.pex_nf_corr_nerr);
1128 		if (*interpose)
1129 			UNCERRSTS_WR(pex, rp->nb.pex_regs.uncerrsts);
1130 		if (*interpose)
1131 			RPERRSTS_WR(pex, rp->nb.pex_regs.rperrsts);
1132 		if (*interpose)
1133 			PEXDEVSTS_WR(pex, 0);
1134 	}
1135 }
1136 
1137 static void
1138 log_fat_fbd_err(nb_regs_t *rp, int willpanic, int *interpose)
1139 {
1140 	int channel, branch;
1141 	int t = 0;
1142 
1143 	rp->flag = NB_REG_LOG_FAT_FBD;
1144 	rp->nb.fat_fbd_regs.ferr_fat_fbd = FERR_FAT_FBD_RD(interpose);
1145 	channel = (rp->nb.fat_fbd_regs.ferr_fat_fbd >> 28) & 3;
1146 	branch = channel >> 1;
1147 	rp->nb.fat_fbd_regs.nerr_fat_fbd = NERR_FAT_FBD_RD(&t);
1148 	*interpose |= t;
1149 	rp->nb.fat_fbd_regs.nrecmema = NRECMEMA_RD(branch);
1150 	rp->nb.fat_fbd_regs.nrecmemb = NRECMEMB_RD(branch);
1151 	rp->nb.fat_fbd_regs.nrecfglog = NRECFGLOG_RD(branch);
1152 	rp->nb.fat_fbd_regs.nrecfbda = NRECFBDA_RD(branch);
1153 	rp->nb.fat_fbd_regs.nrecfbdb = NRECFBDB_RD(branch);
1154 	rp->nb.fat_fbd_regs.nrecfbdc = NRECFBDC_RD(branch);
1155 	rp->nb.fat_fbd_regs.nrecfbdd = NRECFBDD_RD(branch);
1156 	rp->nb.fat_fbd_regs.nrecfbde = NRECFBDE_RD(branch);
1157 	rp->nb.fat_fbd_regs.nrecfbdf = NRECFBDF_RD(branch);
1158 	rp->nb.fat_fbd_regs.spcps = SPCPS_RD(branch);
1159 	rp->nb.fat_fbd_regs.spcpc = SPCPC_RD(branch);
1160 	rp->nb.fat_fbd_regs.uerrcnt = UERRCNT_RD(branch);
1161 	rp->nb.fat_fbd_regs.uerrcnt_last = uerrcnt[branch];
1162 	uerrcnt[branch] = rp->nb.fat_fbd_regs.uerrcnt;
1163 	rp->nb.fat_fbd_regs.badrama = BADRAMA_RD(branch);
1164 	rp->nb.fat_fbd_regs.badramb = BADRAMB_RD(branch);
1165 	rp->nb.fat_fbd_regs.badcnt = BADCNT_RD(branch);
1166 	if (!willpanic) {
1167 		if (rp->nb.fat_fbd_regs.ferr_fat_fbd || *interpose)
1168 			FERR_FAT_FBD_WR(rp->nb.fat_fbd_regs.ferr_fat_fbd);
1169 		if (rp->nb.fat_fbd_regs.nerr_fat_fbd)
1170 			NERR_FAT_FBD_WR(rp->nb.fat_fbd_regs.nerr_fat_fbd);
1171 		/*
1172 		 * if interpose write read-only registers to clear from pcii
1173 		 * cache
1174 		 */
1175 		if (*interpose) {
1176 			NRECMEMA_WR(branch);
1177 			NRECMEMB_WR(branch);
1178 			NRECFGLOG_WR(branch);
1179 			NRECFBDA_WR(branch);
1180 			NRECFBDB_WR(branch);
1181 			NRECFBDC_WR(branch);
1182 			NRECFBDD_WR(branch);
1183 			NRECFBDE_WR(branch);
1184 			NRECFBDF_WR(branch);
1185 		}
1186 	}
1187 }
1188 
1189 static void
1190 log_nf_fbd_err(nb_regs_t *rp, int willpanic, int *interpose)
1191 {
1192 	int channel, branch;
1193 	int t = 0;
1194 
1195 	rp->flag = NB_REG_LOG_NF_FBD;
1196 	rp->nb.nf_fbd_regs.ferr_nf_fbd = FERR_NF_FBD_RD(interpose);
1197 	channel = (rp->nb.nf_fbd_regs.ferr_nf_fbd >> 28) & 3;
1198 	branch = channel >> 1;
1199 	rp->nb.nf_fbd_regs.nerr_nf_fbd = NERR_NF_FBD_RD(&t);
1200 	*interpose |= t;
1201 	rp->nb.nf_fbd_regs.redmemb = REDMEMB_RD();
1202 	rp->nb.nf_fbd_regs.recmema = RECMEMA_RD(branch);
1203 	rp->nb.nf_fbd_regs.recmemb = RECMEMB_RD(branch);
1204 	rp->nb.nf_fbd_regs.recfglog = RECFGLOG_RD(branch);
1205 	rp->nb.nf_fbd_regs.recfbda = RECFBDA_RD(branch);
1206 	rp->nb.nf_fbd_regs.recfbdb = RECFBDB_RD(branch);
1207 	rp->nb.nf_fbd_regs.recfbdc = RECFBDC_RD(branch);
1208 	rp->nb.nf_fbd_regs.recfbdd = RECFBDD_RD(branch);
1209 	rp->nb.nf_fbd_regs.recfbde = RECFBDE_RD(branch);
1210 	rp->nb.nf_fbd_regs.recfbdf = RECFBDF_RD(branch);
1211 	rp->nb.nf_fbd_regs.spcps = SPCPS_RD(branch);
1212 	rp->nb.nf_fbd_regs.spcpc = SPCPC_RD(branch);
1213 	if (nb_chipset == INTEL_NB_7300 || nb_chipset == INTEL_NB_5400) {
1214 		rp->nb.nf_fbd_regs.cerrcnta = CERRCNTA_RD(branch, channel);
1215 		rp->nb.nf_fbd_regs.cerrcntb = CERRCNTB_RD(branch, channel);
1216 		rp->nb.nf_fbd_regs.cerrcntc = CERRCNTC_RD(branch, channel);
1217 		rp->nb.nf_fbd_regs.cerrcntd = CERRCNTD_RD(branch, channel);
1218 	} else {
1219 		rp->nb.nf_fbd_regs.cerrcnta = CERRCNT_RD(branch);
1220 		rp->nb.nf_fbd_regs.cerrcntb = 0;
1221 		rp->nb.nf_fbd_regs.cerrcntc = 0;
1222 		rp->nb.nf_fbd_regs.cerrcntd = 0;
1223 	}
1224 	rp->nb.nf_fbd_regs.cerrcnta_last = cerrcnta[branch][channel & 1];
1225 	rp->nb.nf_fbd_regs.cerrcntb_last = cerrcntb[branch][channel & 1];
1226 	rp->nb.nf_fbd_regs.cerrcntc_last = cerrcntc[branch][channel & 1];
1227 	rp->nb.nf_fbd_regs.cerrcntd_last = cerrcntd[branch][channel & 1];
1228 	cerrcnta[branch][channel & 1] = rp->nb.nf_fbd_regs.cerrcnta;
1229 	cerrcntb[branch][channel & 1] = rp->nb.nf_fbd_regs.cerrcntb;
1230 	cerrcntc[branch][channel & 1] = rp->nb.nf_fbd_regs.cerrcntc;
1231 	cerrcntd[branch][channel & 1] = rp->nb.nf_fbd_regs.cerrcntd;
1232 	rp->nb.nf_fbd_regs.badrama = BADRAMA_RD(branch);
1233 	rp->nb.nf_fbd_regs.badramb = BADRAMB_RD(branch);
1234 	rp->nb.nf_fbd_regs.badcnt = BADCNT_RD(branch);
1235 	if (!willpanic) {
1236 		if (rp->nb.nf_fbd_regs.ferr_nf_fbd || *interpose)
1237 			FERR_NF_FBD_WR(rp->nb.nf_fbd_regs.ferr_nf_fbd);
1238 		if (rp->nb.nf_fbd_regs.nerr_nf_fbd)
1239 			NERR_NF_FBD_WR(rp->nb.nf_fbd_regs.nerr_nf_fbd);
1240 		/*
1241 		 * if interpose write read-only registers to clear from pcii
1242 		 * cache
1243 		 */
1244 		if (*interpose) {
1245 			RECMEMA_WR(branch);
1246 			RECMEMB_WR(branch);
1247 			RECFGLOG_WR(branch);
1248 			RECFBDA_WR(branch);
1249 			RECFBDB_WR(branch);
1250 			RECFBDC_WR(branch);
1251 			RECFBDD_WR(branch);
1252 			RECFBDE_WR(branch);
1253 			RECFBDF_WR(branch);
1254 			SPCPS_WR(branch);
1255 		}
1256 	}
1257 }
1258 
1259 static int
1260 log_nf_mem_err(nb_regs_t *rp, int willpanic, int *interpose)
1261 {
1262 	int channel, branch;
1263 	int t = 0;
1264 	int rt = 0;
1265 
1266 	rp->flag = NB_REG_LOG_NF_MEM;
1267 
1268 	/* Memmory err registers */
1269 	rp->nb.nf_mem_regs.ferr_nf_mem = FERR_NF_MEM_RD(interpose);
1270 	channel = (rp->nb.nf_mem_regs.ferr_nf_mem >> 28) & 0x1;
1271 	branch = channel;
1272 	rp->nb.nf_mem_regs.nerr_nf_mem = NERR_NF_MEM_RD(&t);
1273 	*interpose |= t;
1274 	rp->nb.nf_mem_regs.redmema = MEM_REDMEMA_RD(branch);
1275 	rp->nb.nf_mem_regs.redmemb = MEM_REDMEMB_RD(branch);
1276 	rp->nb.nf_mem_regs.recmema = MEM_RECMEMA_RD(branch);
1277 	rp->nb.nf_mem_regs.recmemb = MEM_RECMEMB_RD(branch);
1278 	rp->nb.nf_mem_regs.nrecmema = MEM_NRECMEMA_RD(branch);
1279 	rp->nb.nf_mem_regs.nrecmemb = MEM_NRECMEMB_RD(branch);
1280 
1281 	/* spare rank */
1282 	rp->nb.nf_mem_regs.spcps = SPCPS_RD(branch);
1283 	rp->nb.nf_mem_regs.spcpc = SPCPC_RD(branch);
1284 
1285 	/* RAS registers */
1286 	rp->nb.nf_mem_regs.cerrcnt = MEM_CERRCNT_RD(branch);
1287 	rp->nb.nf_mem_regs.cerrcnt_ext = (uint32_t)MEM_CERRCNT_EXT_RD(branch);
1288 	rp->nb.nf_mem_regs.cerrcnt_last = cerrcnta[branch][channel & 1];
1289 	rp->nb.nf_mem_regs.cerrcnt_ext_last = cerrcntb[branch][channel & 1];
1290 	cerrcnta[branch][channel & 1] = rp->nb.nf_mem_regs.cerrcnt;
1291 	cerrcntb[branch][channel & 1] = rp->nb.nf_mem_regs.cerrcnt_ext;
1292 	rp->nb.nf_mem_regs.badram = BADRAMA_RD(branch);
1293 	rp->nb.nf_mem_regs.badcnt = BADCNT_RD(branch);
1294 	rp->nb.nf_mem_regs.validlog = VALIDLOG_RD(branch);
1295 
1296 	if (!willpanic) {
1297 		if (rp->nb.nf_mem_regs.ferr_nf_mem || *interpose)
1298 			FERR_NF_MEM_WR(rp->nb.nf_mem_regs.ferr_nf_mem);
1299 		if (rp->nb.nf_mem_regs.nerr_nf_mem)
1300 			NERR_NF_MEM_WR(rp->nb.nf_mem_regs.nerr_nf_mem);
1301 		/*
1302 		 * if interpose, write read-only registers to clear from pci
1303 		 * cache
1304 		 */
1305 		if (*interpose) {
1306 			MEM_NRECMEMA_WR(branch);
1307 			MEM_NRECMEMB_WR(branch);
1308 			MEM_REDMEMA_WR(branch);
1309 			MEM_REDMEMB_WR(branch);
1310 			MEM_RECMEMA_WR(branch);
1311 			MEM_RECMEMB_WR(branch);
1312 			SPCPS_WR(branch);
1313 		}
1314 	}
1315 	if (nb_mode == NB_MEMORY_SINGLE_CHANNEL && channel != 0) {
1316 		/*
1317 		 * In the single channel mode, all dimms are on the channel 0.
1318 		 * Invalidate this error if the channel number is invalid.
1319 		 */
1320 		rt = 1;
1321 	}
1322 	return (rt);
1323 }
1324 
1325 static void
1326 log_ferr(uint64_t ferr, uint32_t *nerrp, nb_logout_t *log, int willpanic)
1327 {
1328 	nb_regs_t *rp = &log->nb_regs;
1329 	uint32_t nerr = *nerrp;
1330 	int interpose = 0;
1331 	int spurious = 0;
1332 
1333 	log->acl_timestamp = gethrtime_waitfree();
1334 	if ((ferr & (GE_PCIEX_FATAL | GE_PCIEX_NF)) != 0) {
1335 		log_pex_err(ferr, rp, willpanic, &interpose);
1336 		*nerrp = nerr & ~(GE_PCIEX_FATAL | GE_PCIEX_NF);
1337 	} else if ((ferr & GE_FBD_FATAL) != 0) {
1338 		log_fat_fbd_err(rp, willpanic, &interpose);
1339 		*nerrp = nerr & ~GE_NERR_FBD_FATAL;
1340 	} else if ((ferr & GE_FBD_NF) != 0) {
1341 		log_nf_fbd_err(rp, willpanic, &interpose);
1342 		*nerrp = nerr & ~GE_NERR_FBD_NF;
1343 	} else if ((ferr & GE_MEM_NF) != 0) {
1344 		spurious = log_nf_mem_err(rp, willpanic, &interpose);
1345 		*nerrp = nerr & ~GE_NERR_MEM_NF;
1346 	} else if ((ferr & (GE_FERR_FSB_FATAL | GE_FERR_FSB_NF)) != 0) {
1347 		log_fsb_err(ferr, rp, willpanic, &interpose);
1348 		*nerrp = nerr & ~(GE_NERR_FSB_FATAL | GE_NERR_FSB_NF);
1349 	} else if ((ferr & (GE_DMA_FATAL | GE_DMA_NF)) != 0) {
1350 		log_dma_err(rp, &interpose);
1351 		*nerrp = nerr & ~(GE_DMA_FATAL | GE_DMA_NF);
1352 	} else if ((ferr & (GE_INT_FATAL | GE_INT_NF)) != 0) {
1353 		spurious = log_int_err(rp, willpanic, &interpose);
1354 		*nerrp = nerr & ~(GE_INT_FATAL | GE_INT_NF);
1355 	} else if (nb_chipset == INTEL_NB_5400 &&
1356 	    (ferr & (GE_FERR_THERMAL_FATAL | GE_FERR_THERMAL_NF)) != 0) {
1357 		log_thermal_err(rp, willpanic, &interpose);
1358 		*nerrp = nerr & ~(GE_FERR_THERMAL_FATAL | GE_FERR_THERMAL_NF);
1359 	}
1360 	if (interpose)
1361 		log->type = "inject";
1362 	else
1363 		log->type = "error";
1364 	if (!spurious) {
1365 		errorq_dispatch(nb_queue, log, sizeof (nb_logout_t),
1366 		    willpanic ? ERRORQ_SYNC : ERRORQ_ASYNC);
1367 	}
1368 }
1369 
1370 static void
1371 log_nerr(uint32_t *errp, nb_logout_t *log, int willpanic)
1372 {
1373 	uint32_t err;
1374 	nb_regs_t *rp = &log->nb_regs;
1375 	int interpose = 0;
1376 	int spurious = 0;
1377 
1378 	err = *errp;
1379 	log->acl_timestamp = gethrtime_waitfree();
1380 	if ((err & (GE_PCIEX_FATAL | GE_PCIEX_NF)) != 0) {
1381 		log_pex_err(err, rp, willpanic, &interpose);
1382 		*errp = err & ~(GE_PCIEX_FATAL | GE_PCIEX_NF);
1383 	} else if ((err & GE_NERR_FBD_FATAL) != 0) {
1384 		log_fat_fbd_err(rp, willpanic, &interpose);
1385 		*errp = err & ~GE_NERR_FBD_FATAL;
1386 	} else if ((err & GE_NERR_FBD_NF) != 0) {
1387 		log_nf_fbd_err(rp, willpanic, &interpose);
1388 		*errp = err & ~GE_NERR_FBD_NF;
1389 	} else if ((err & GE_NERR_MEM_NF) != 0) {
1390 		spurious = log_nf_mem_err(rp, willpanic, &interpose);
1391 		*errp = err & ~GE_NERR_MEM_NF;
1392 	} else if ((err & (GE_NERR_FSB_FATAL | GE_NERR_FSB_NF)) != 0) {
1393 		log_fsb_err(GE_NERR_TO_FERR_FSB(err), rp, willpanic,
1394 		    &interpose);
1395 		*errp = err & ~(GE_NERR_FSB_FATAL | GE_NERR_FSB_NF);
1396 	} else if ((err & (GE_DMA_FATAL | GE_DMA_NF)) != 0) {
1397 		log_dma_err(rp, &interpose);
1398 		*errp = err & ~(GE_DMA_FATAL | GE_DMA_NF);
1399 	} else if ((err & (GE_INT_FATAL | GE_INT_NF)) != 0) {
1400 		spurious = log_int_err(rp, willpanic, &interpose);
1401 		*errp = err & ~(GE_INT_FATAL | GE_INT_NF);
1402 	}
1403 	if (interpose)
1404 		log->type = "inject";
1405 	else
1406 		log->type = "error";
1407 	if (!spurious) {
1408 		errorq_dispatch(nb_queue, log, sizeof (nb_logout_t),
1409 		    willpanic ? ERRORQ_SYNC : ERRORQ_ASYNC);
1410 	}
1411 }
1412 
1413 /*ARGSUSED*/
1414 void
1415 nb_error_trap(cmi_hdl_t hdl, boolean_t ismc, boolean_t willpanic)
1416 {
1417 	uint64_t ferr;
1418 	uint32_t nerr, err;
1419 	int nmc = 0;
1420 	int i;
1421 
1422 	if (mutex_tryenter(&nb_mutex) == 0)
1423 		return;
1424 
1425 	nerr = NERR_GLOBAL_RD();
1426 	err = nerr;
1427 	for (i = 0; i < NB_MAX_ERRORS; i++) {
1428 		ferr = FERR_GLOBAL_RD();
1429 		nb_log.nb_regs.chipset = nb_chipset;
1430 		nb_log.nb_regs.ferr = ferr;
1431 		nb_log.nb_regs.nerr = nerr;
1432 		if (ferr) {
1433 			log_ferr(ferr, &err, &nb_log, willpanic);
1434 			FERR_GLOBAL_WR(ferr);
1435 			nmc++;
1436 		} else if (err) {
1437 			log_nerr(&err, &nb_log, willpanic);
1438 			nmc++;
1439 		}
1440 	}
1441 	if (nerr) {
1442 		NERR_GLOBAL_WR(nerr);
1443 	}
1444 	if (nmc == 0 && nb_mask_mc_set)
1445 		nb_mask_mc_reset();
1446 	mutex_exit(&nb_mutex);
1447 }
1448 
1449 static void
1450 nb_fsb_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1451     nb_scatchpad_t *data)
1452 {
1453 	int intel_error_list;
1454 	char buf[32];
1455 
1456 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FSB,
1457 	    DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.fsb, NULL);
1458 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_FAT_FSB,
1459 	    DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.ferr_fat_fsb, NULL);
1460 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_FAT_FSB,
1461 	    DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.nerr_fat_fsb, NULL);
1462 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_NF_FSB,
1463 	    DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.ferr_nf_fsb, NULL);
1464 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_NF_FSB,
1465 	    DATA_TYPE_UINT8, nb_regs->nb.fsb_regs.nerr_nf_fsb, NULL);
1466 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFSB,
1467 	    DATA_TYPE_UINT32, nb_regs->nb.fsb_regs.nrecfsb, NULL);
1468 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFSB_ADDR,
1469 	    DATA_TYPE_UINT64, nb_regs->nb.fsb_regs.nrecfsb_addr, NULL);
1470 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFSB,
1471 	    DATA_TYPE_UINT32, nb_regs->nb.fsb_regs.recfsb, NULL);
1472 	intel_error_list = data->intel_error_list;
1473 	if (intel_error_list >= 0)
1474 		(void) snprintf(buf, sizeof (buf), "F%d", intel_error_list);
1475 	else
1476 		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1477 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1478 	    DATA_TYPE_STRING, buf, NULL);
1479 }
1480 
1481 static void
1482 nb_pex_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1483     nb_scatchpad_t *data)
1484 {
1485 	int intel_error_list;
1486 	char buf[32];
1487 
1488 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX,
1489 	    DATA_TYPE_UINT8, nb_regs->nb.pex_regs.pex, NULL);
1490 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX_FAT_FERR,
1491 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.pex_fat_ferr, NULL);
1492 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX_FAT_NERR,
1493 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.pex_fat_nerr, NULL);
1494 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX_NF_CORR_FERR,
1495 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.pex_nf_corr_ferr, NULL);
1496 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEX_NF_CORR_NERR,
1497 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.pex_nf_corr_nerr, NULL);
1498 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_UNCERRSEV,
1499 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.uncerrsev, NULL);
1500 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RPERRSTS,
1501 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.rperrsts, NULL);
1502 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RPERRSID,
1503 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.rperrsid, NULL);
1504 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_UNCERRSTS,
1505 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.uncerrsts, NULL);
1506 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_AERRCAPCTRL,
1507 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.aerrcapctrl, NULL);
1508 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CORERRSTS,
1509 	    DATA_TYPE_UINT32, nb_regs->nb.pex_regs.corerrsts, NULL);
1510 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEXDEVSTS,
1511 	    DATA_TYPE_UINT16, nb_regs->nb.pex_regs.pexdevsts, NULL);
1512 	intel_error_list = data->intel_error_list;
1513 	if (intel_error_list >= 0)
1514 		(void) snprintf(buf, sizeof (buf), "IO%d", intel_error_list);
1515 	else
1516 		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1517 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1518 	    DATA_TYPE_STRING, buf, NULL);
1519 }
1520 
1521 static void
1522 nb_int_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1523     nb_scatchpad_t *data)
1524 {
1525 	int intel_error_list;
1526 	char buf[32];
1527 
1528 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_FAT_INT,
1529 	    DATA_TYPE_UINT16, nb_regs->nb.int_regs.ferr_fat_int, NULL);
1530 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_NF_INT,
1531 	    DATA_TYPE_UINT16, nb_regs->nb.int_regs.ferr_nf_int, NULL);
1532 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_FAT_INT,
1533 	    DATA_TYPE_UINT16, nb_regs->nb.int_regs.nerr_fat_int, NULL);
1534 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_NF_INT,
1535 	    DATA_TYPE_UINT16, nb_regs->nb.int_regs.nerr_nf_int, NULL);
1536 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECINT,
1537 	    DATA_TYPE_UINT32, nb_regs->nb.int_regs.nrecint, NULL);
1538 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECINT,
1539 	    DATA_TYPE_UINT32, nb_regs->nb.int_regs.recint, NULL);
1540 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECSF,
1541 	    DATA_TYPE_UINT64, nb_regs->nb.int_regs.nrecsf, NULL);
1542 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECSF,
1543 	    DATA_TYPE_UINT64, nb_regs->nb.int_regs.recsf, NULL);
1544 	intel_error_list = data->intel_error_list;
1545 	if (intel_error_list >= 0)
1546 		(void) snprintf(buf, sizeof (buf), "B%d", intel_error_list);
1547 	else
1548 		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1549 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1550 	    DATA_TYPE_STRING, buf, NULL);
1551 }
1552 
1553 static void
1554 nb_fat_fbd_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1555     nb_scatchpad_t *data)
1556 {
1557 	nb_mem_scatchpad_t *sp;
1558 	char buf[32];
1559 
1560 	sp = &((nb_scatchpad_t *)data)->ms;
1561 
1562 	if (sp->ras != -1) {
1563 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BANK,
1564 		    DATA_TYPE_INT32, sp->bank, NULL);
1565 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CAS,
1566 		    DATA_TYPE_INT32, sp->cas, NULL);
1567 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RAS,
1568 		    DATA_TYPE_INT32, sp->ras, NULL);
1569 		if (sp->offset != -1LL) {
1570 			fm_payload_set(payload, FM_FMRI_MEM_OFFSET,
1571 			    DATA_TYPE_UINT64, sp->offset, NULL);
1572 		}
1573 		if (sp->pa != -1LL) {
1574 			fm_payload_set(payload, FM_FMRI_MEM_PHYSADDR,
1575 			    DATA_TYPE_UINT64, sp->pa, NULL);
1576 		}
1577 	}
1578 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_FAT_FBD,
1579 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.ferr_fat_fbd, NULL);
1580 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_FAT_FBD,
1581 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nerr_fat_fbd, NULL);
1582 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECMEMA,
1583 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecmema, NULL);
1584 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECMEMB,
1585 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecmemb, NULL);
1586 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFGLOG,
1587 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfglog, NULL);
1588 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDA,
1589 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbda, NULL);
1590 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDB,
1591 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbdb, NULL);
1592 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDC,
1593 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbdc, NULL);
1594 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDD,
1595 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbdd, NULL);
1596 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDE,
1597 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbde, NULL);
1598 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECFBDF,
1599 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.nrecfbdf, NULL);
1600 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPS,
1601 	    DATA_TYPE_UINT8, nb_regs->nb.fat_fbd_regs.spcps, NULL);
1602 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPC,
1603 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.spcpc, NULL);
1604 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_UERRCNT,
1605 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.uerrcnt, NULL);
1606 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_UERRCNT_LAST,
1607 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.uerrcnt_last, NULL);
1608 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADRAMA,
1609 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.badrama, NULL);
1610 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADRAMB,
1611 	    DATA_TYPE_UINT16, nb_regs->nb.fat_fbd_regs.badramb, NULL);
1612 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADCNT,
1613 	    DATA_TYPE_UINT32, nb_regs->nb.fat_fbd_regs.badcnt, NULL);
1614 
1615 	if (sp->intel_error_list >= 0)
1616 		(void) snprintf(buf, sizeof (buf), "M%d", sp->intel_error_list);
1617 	else
1618 		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1619 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1620 	    DATA_TYPE_STRING, buf, NULL);
1621 }
1622 
1623 static void
1624 nb_nf_fbd_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1625     nb_scatchpad_t *data)
1626 {
1627 	nb_mem_scatchpad_t *sp;
1628 	char buf[32];
1629 
1630 	sp = &((nb_scatchpad_t *)data)->ms;
1631 
1632 	if (sp->dimm == -1 && sp->rank != -1) {
1633 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RANK,
1634 		    DATA_TYPE_INT32, sp->rank, NULL);
1635 	}
1636 	if (sp->ras != -1) {
1637 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BANK,
1638 		    DATA_TYPE_INT32, sp->bank, NULL);
1639 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CAS,
1640 		    DATA_TYPE_INT32, sp->cas, NULL);
1641 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RAS,
1642 		    DATA_TYPE_INT32, sp->ras, NULL);
1643 		if (sp->offset != -1LL) {
1644 			fm_payload_set(payload, FM_FMRI_MEM_OFFSET,
1645 			    DATA_TYPE_UINT64, sp->offset, NULL);
1646 		}
1647 		if (sp->pa != -1LL) {
1648 			fm_payload_set(payload, FM_FMRI_MEM_PHYSADDR,
1649 			    DATA_TYPE_UINT64, sp->pa, NULL);
1650 		}
1651 	}
1652 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_NF_FBD,
1653 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.ferr_nf_fbd, NULL);
1654 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_NF_FBD,
1655 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.nerr_nf_fbd, NULL);
1656 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECMEMA,
1657 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recmema, NULL);
1658 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECMEMB,
1659 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recmemb, NULL);
1660 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFGLOG,
1661 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfglog, NULL);
1662 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDA,
1663 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbda, NULL);
1664 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDB,
1665 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbdb, NULL);
1666 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDC,
1667 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbdc, NULL);
1668 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDD,
1669 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbdd, NULL);
1670 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDE,
1671 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbde, NULL);
1672 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECFBDF,
1673 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.recfbdf, NULL);
1674 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPS,
1675 	    DATA_TYPE_UINT8, nb_regs->nb.nf_fbd_regs.spcps, NULL);
1676 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPC,
1677 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.spcpc, NULL);
1678 	if (nb_chipset == INTEL_NB_7300 || nb_chipset == INTEL_NB_5400) {
1679 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNTA,
1680 		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcnta, NULL);
1681 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNTB,
1682 		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcntb, NULL);
1683 		if (nb_chipset == INTEL_NB_7300) {
1684 			fm_payload_set(payload,
1685 			    FM_EREPORT_PAYLOAD_NAME_CERRCNTC,
1686 			    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcntc,
1687 			    NULL);
1688 			fm_payload_set(payload,
1689 			    FM_EREPORT_PAYLOAD_NAME_CERRCNTD,
1690 			    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcntd,
1691 			    NULL);
1692 		}
1693 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNTA_LAST,
1694 		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcnta_last,
1695 		    NULL);
1696 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNTB_LAST,
1697 		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcntb_last,
1698 		    NULL);
1699 		if (nb_chipset == INTEL_NB_7300) {
1700 			fm_payload_set(payload,
1701 			    FM_EREPORT_PAYLOAD_NAME_CERRCNTC_LAST,
1702 			    DATA_TYPE_UINT32,
1703 			    nb_regs->nb.nf_fbd_regs.cerrcntc_last, NULL);
1704 			fm_payload_set(payload,
1705 			    FM_EREPORT_PAYLOAD_NAME_CERRCNTD_LAST,
1706 			    DATA_TYPE_UINT32,
1707 			    nb_regs->nb.nf_fbd_regs.cerrcntd_last, NULL);
1708 		}
1709 	} else {
1710 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNT,
1711 		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcnta, NULL);
1712 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNT_LAST,
1713 		    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.cerrcnta_last,
1714 		    NULL);
1715 	}
1716 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADRAMA,
1717 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.badrama, NULL);
1718 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADRAMB,
1719 	    DATA_TYPE_UINT16, nb_regs->nb.nf_fbd_regs.badramb, NULL);
1720 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADCNT,
1721 	    DATA_TYPE_UINT32, nb_regs->nb.nf_fbd_regs.badcnt, NULL);
1722 
1723 	if (sp->intel_error_list >= 0)
1724 		(void) snprintf(buf, sizeof (buf), "M%d", sp->intel_error_list);
1725 	else
1726 		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1727 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1728 	    DATA_TYPE_STRING, buf, NULL);
1729 }
1730 
1731 static void
1732 nb_nf_mem_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1733     nb_scatchpad_t *data)
1734 {
1735 	nb_mem_scatchpad_t *sp;
1736 	char buf[32];
1737 
1738 	sp = &((nb_scatchpad_t *)data)->ms;
1739 
1740 	if (sp->dimm == -1 && sp->rank != -1) {
1741 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RANK,
1742 		    DATA_TYPE_INT32, sp->rank, NULL);
1743 	}
1744 	if (sp->ras != -1) {
1745 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BANK,
1746 		    DATA_TYPE_INT32, sp->bank, NULL);
1747 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CAS,
1748 		    DATA_TYPE_INT32, sp->cas, NULL);
1749 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RAS,
1750 		    DATA_TYPE_INT32, sp->ras, NULL);
1751 		if (sp->offset != -1LL) {
1752 			fm_payload_set(payload, FM_FMRI_MEM_OFFSET,
1753 			    DATA_TYPE_UINT64, sp->offset, NULL);
1754 		}
1755 		if (sp->pa != -1LL) {
1756 			fm_payload_set(payload, FM_FMRI_MEM_PHYSADDR,
1757 			    DATA_TYPE_UINT64, sp->pa, NULL);
1758 		}
1759 	}
1760 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_NF_MEM,
1761 	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.ferr_nf_mem, NULL);
1762 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_NF_MEM,
1763 	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.nerr_nf_mem, NULL);
1764 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECMEMA,
1765 	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.recmema, NULL);
1766 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RECMEMB,
1767 	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.recmemb, NULL);
1768 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_REDMEMA,
1769 	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.redmema, NULL);
1770 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_REDMEMB,
1771 	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.redmemb, NULL);
1772 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECMEMA,
1773 	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.nrecmema, NULL);
1774 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NRECMEMB,
1775 	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.nrecmemb, NULL);
1776 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPS,
1777 	    DATA_TYPE_UINT8, nb_regs->nb.nf_mem_regs.spcps, NULL);
1778 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_SPCPC,
1779 	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.spcpc, NULL);
1780 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNT,
1781 	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.cerrcnt, NULL);
1782 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNT_LAST,
1783 	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.cerrcnt_last, NULL);
1784 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNT_EXT,
1785 	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.cerrcnt_ext, NULL);
1786 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CERRCNT_EXT_LAST,
1787 	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.cerrcnt_ext_last, NULL);
1788 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADRAM,
1789 	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.badram, NULL);
1790 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_BADCNT,
1791 	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.badcnt, NULL);
1792 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_VALIDLOG,
1793 	    DATA_TYPE_UINT32, nb_regs->nb.nf_mem_regs.validlog, NULL);
1794 
1795 	if (sp->intel_error_list >= 0)
1796 		(void) snprintf(buf, sizeof (buf), "M%d", sp->intel_error_list);
1797 	else
1798 		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1799 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1800 	    DATA_TYPE_STRING, buf, NULL);
1801 }
1802 
1803 static void
1804 nb_dma_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload)
1805 {
1806 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PCISTS,
1807 	    DATA_TYPE_UINT16, nb_regs->nb.dma_regs.pcists, NULL);
1808 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_PEXDEVSTS,
1809 	    DATA_TYPE_UINT16, nb_regs->nb.dma_regs.pexdevsts, NULL);
1810 }
1811 
1812 static void
1813 nb_thr_err_payload(const nb_regs_t *nb_regs, nvlist_t *payload,
1814     nb_scatchpad_t *data)
1815 {
1816 	char buf[32];
1817 
1818 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_FAT_THR,
1819 	    DATA_TYPE_UINT8, nb_regs->nb.thr_regs.ferr_fat_thr, NULL);
1820 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_FAT_THR,
1821 	    DATA_TYPE_UINT8, nb_regs->nb.thr_regs.nerr_fat_thr, NULL);
1822 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_NF_THR,
1823 	    DATA_TYPE_UINT8, nb_regs->nb.thr_regs.ferr_nf_thr, NULL);
1824 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_NF_THR,
1825 	    DATA_TYPE_UINT8, nb_regs->nb.thr_regs.nerr_nf_thr, NULL);
1826 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_CTSTS,
1827 	    DATA_TYPE_UINT8, nb_regs->nb.thr_regs.ctsts, NULL);
1828 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_THRTSTS,
1829 	    DATA_TYPE_UINT16, nb_regs->nb.thr_regs.thrtsts, NULL);
1830 	if (data->intel_error_list >= 0) {
1831 		(void) snprintf(buf, sizeof (buf), "TH%d",
1832 		    data->intel_error_list);
1833 	} else {
1834 		(void) snprintf(buf, sizeof (buf), "Multiple or unknown error");
1835 	}
1836 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_ERROR_NO,
1837 	    DATA_TYPE_STRING, buf, NULL);
1838 }
1839 
1840 static void
1841 nb_ereport_add_logout(nvlist_t *payload, const nb_logout_t *acl,
1842     nb_scatchpad_t *data)
1843 {
1844 	const nb_regs_t *nb_regs = &acl->nb_regs;
1845 
1846 	fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_MC_TYPE,
1847 	    DATA_TYPE_STRING, acl->type, NULL);
1848 	switch (nb_regs->flag) {
1849 	case NB_REG_LOG_FSB:
1850 		nb_fsb_err_payload(nb_regs, payload, data);
1851 		break;
1852 	case NB_REG_LOG_PEX:
1853 		nb_pex_err_payload(nb_regs, payload, data);
1854 		break;
1855 	case NB_REG_LOG_INT:
1856 		nb_int_err_payload(nb_regs, payload, data);
1857 		break;
1858 	case NB_REG_LOG_FAT_FBD:
1859 		nb_fat_fbd_err_payload(nb_regs, payload, data);
1860 		break;
1861 	case NB_REG_LOG_NF_FBD:
1862 		nb_nf_fbd_err_payload(nb_regs, payload, data);
1863 		break;
1864 	case NB_REG_LOG_DMA:
1865 		nb_dma_err_payload(nb_regs, payload);
1866 		break;
1867 	case NB_REG_LOG_THR:
1868 		nb_thr_err_payload(nb_regs, payload, data);
1869 		break;
1870 	case NB_REG_LOG_NF_MEM:
1871 		nb_nf_mem_err_payload(nb_regs, payload, data);
1872 		break;
1873 	default:
1874 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_FERR_GLOBAL,
1875 		    DATA_TYPE_UINT64, nb_regs->ferr, NULL);
1876 		fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_NERR_GLOBAL,
1877 		    DATA_TYPE_UINT32, nb_regs->nerr, NULL);
1878 		break;
1879 	}
1880 }
1881 
1882 void
1883 nb_fsb_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
1884     nb_scatchpad_t *data)
1885 {
1886 	int chip;
1887 
1888 	if (nb_chipset == INTEL_NB_7300)
1889 		chip = nb_regs->nb.fsb_regs.fsb * 2;
1890 	else
1891 		chip = nb_regs->nb.fsb_regs.fsb;
1892 	fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 2,
1893 	    "motherboard", 0, "chip", chip);
1894 
1895 	if (nb_regs->nb.fsb_regs.ferr_fat_fsb == 0 &&
1896 	    nb_regs->nb.fsb_regs.ferr_nf_fsb == 0) {
1897 		data->intel_error_list = intel_fsb_err(nb_regs->nb.fsb_regs.fsb,
1898 		    nb_regs->nb.fsb_regs.nerr_fat_fsb,
1899 		    nb_regs->nb.fsb_regs.nerr_nf_fsb);
1900 	} else {
1901 		data->intel_error_list = intel_fsb_err(nb_regs->nb.fsb_regs.fsb,
1902 		    nb_regs->nb.fsb_regs.ferr_fat_fsb,
1903 		    nb_regs->nb.fsb_regs.ferr_nf_fsb);
1904 	}
1905 	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
1906 	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "fsb");
1907 }
1908 
1909 void
1910 nb_pex_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
1911     nb_scatchpad_t *data)
1912 {
1913 	int hostbridge;
1914 
1915 	if (nb_regs->nb.pex_regs.pex == 0) {
1916 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
1917 		    "motherboard", 0);
1918 	} else {
1919 		hostbridge = nb_regs->nb.pex_regs.pex - 1;
1920 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 2,
1921 		    "motherboard", 0,
1922 		    "hostbridge", hostbridge);
1923 	}
1924 
1925 	if (nb_regs->nb.pex_regs.pex_fat_ferr == 0 &&
1926 	    nb_regs->nb.pex_regs.pex_nf_corr_ferr == 0) {
1927 		if (nb_chipset == INTEL_NB_5400) {
1928 			data->intel_error_list =
1929 			    intel_pex_5400_err(
1930 			    nb_regs->nb.pex_regs.pex_fat_nerr,
1931 			    nb_regs->nb.pex_regs.pex_nf_corr_nerr);
1932 		} else {
1933 			data->intel_error_list =
1934 			    intel_pex_err(nb_regs->nb.pex_regs.pex_fat_nerr,
1935 			    nb_regs->nb.pex_regs.pex_nf_corr_nerr);
1936 		}
1937 	} else {
1938 		if (nb_chipset == INTEL_NB_5400) {
1939 			data->intel_error_list =
1940 			    intel_pex_5400_err(
1941 			    nb_regs->nb.pex_regs.pex_fat_ferr,
1942 			    nb_regs->nb.pex_regs.pex_nf_corr_ferr);
1943 		} else {
1944 			data->intel_error_list =
1945 			    intel_pex_err(nb_regs->nb.pex_regs.pex_fat_ferr,
1946 			    nb_regs->nb.pex_regs.pex_nf_corr_ferr);
1947 		}
1948 	}
1949 
1950 	if (nb_regs->nb.pex_regs.pex == 0) {
1951 		(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
1952 		    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "esi");
1953 	} else {
1954 		(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
1955 		    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "pex");
1956 	}
1957 }
1958 
1959 void
1960 nb_int_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
1961     void *data)
1962 {
1963 	fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
1964 	    "motherboard", 0);
1965 
1966 	if (nb_regs->nb.int_regs.ferr_fat_int == 0 &&
1967 	    nb_regs->nb.int_regs.ferr_nf_int == 0) {
1968 		((nb_scatchpad_t *)data)->intel_error_list =
1969 		    intel_int_err(nb_regs->nb.int_regs.nerr_fat_int,
1970 		    nb_regs->nb.int_regs.nerr_nf_int);
1971 	} else {
1972 		((nb_scatchpad_t *)data)->intel_error_list =
1973 		    intel_int_err(nb_regs->nb.int_regs.ferr_fat_int,
1974 		    nb_regs->nb.int_regs.ferr_nf_int);
1975 	}
1976 	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
1977 	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "ie");
1978 }
1979 
1980 void
1981 nb_fat_fbd_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
1982     void *data)
1983 {
1984 	char *intr;
1985 	nb_mem_scatchpad_t *sp;
1986 
1987 	intr = fat_memory_error(nb_regs, data);
1988 	sp = &((nb_scatchpad_t *)data)->ms;
1989 
1990 	if (sp->dimm != -1) {
1991 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 5,
1992 		    "motherboard", 0,
1993 		    "memory-controller", sp->branch,
1994 		    "dram-channel", sp->channel,
1995 		    "dimm", sp->dimm,
1996 		    "rank", sp->rank);
1997 	} else if (sp->channel != -1) {
1998 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 3,
1999 		    "motherboard", 0,
2000 		    "memory-controller", sp->branch,
2001 		    "dram-channel", sp->channel);
2002 	} else if (sp->branch != -1) {
2003 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 2,
2004 		    "motherboard", 0,
2005 		    "memory-controller", sp->branch);
2006 	} else {
2007 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
2008 		    "motherboard", 0);
2009 	}
2010 
2011 	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s",
2012 	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, intr);
2013 }
2014 
2015 void
2016 nb_nf_fbd_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
2017     void *data)
2018 {
2019 	char *intr;
2020 	nb_mem_scatchpad_t *sp;
2021 
2022 	intr = nf_memory_error(nb_regs, data);
2023 	sp = &((nb_scatchpad_t *)data)->ms;
2024 
2025 	if (sp->dimm != -1) {
2026 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 5,
2027 		    "motherboard", 0,
2028 		    "memory-controller", sp->branch,
2029 		    "dram-channel", sp->channel,
2030 		    "dimm", sp->dimm,
2031 		    "rank", sp->rank);
2032 	} else if (sp->channel != -1) {
2033 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 3,
2034 		    "motherboard", 0,
2035 		    "memory-controller", sp->branch,
2036 		    "dram-channel", sp->channel);
2037 	} else if (sp->branch != -1) {
2038 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 2,
2039 		    "motherboard", 0,
2040 		    "memory-controller", sp->branch);
2041 	} else {
2042 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
2043 		    "motherboard", 0);
2044 	}
2045 
2046 	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s",
2047 	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, intr);
2048 }
2049 
2050 void
2051 nb_dma_report(char *class, nvlist_t *detector)
2052 {
2053 	fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
2054 	    "motherboard", 0);
2055 
2056 	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
2057 	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "dma");
2058 }
2059 
2060 void
2061 nb_thr_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
2062     void *data)
2063 {
2064 	((nb_scatchpad_t *)data)->intel_error_list =
2065 	    intel_thr_err(nb_regs->nb.thr_regs.ferr_fat_thr,
2066 	    nb_regs->nb.thr_regs.ferr_nf_thr);
2067 	fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
2068 	    "motherboard", 0);
2069 
2070 	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
2071 	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "otf");
2072 }
2073 
2074 void
2075 nb_nf_mem_report(const nb_regs_t *nb_regs, char *class, nvlist_t *detector,
2076     void *data)
2077 {
2078 	char *intr;
2079 	nb_mem_scatchpad_t *sp;
2080 
2081 	intr = nf_mem_error(nb_regs, data);
2082 	sp = &((nb_scatchpad_t *)data)->ms;
2083 
2084 	if (sp->dimm != -1) {
2085 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 5,
2086 		    "motherboard", 0,
2087 		    "memory-controller", sp->branch,
2088 		    "dram-channel", sp->channel,
2089 		    "dimm", sp->dimm,
2090 		    "rank", sp->rank);
2091 	} else if (sp->channel != -1) {
2092 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 3,
2093 		    "motherboard", 0,
2094 		    "memory-controller", sp->branch,
2095 		    "dram-channel", sp->channel);
2096 	} else if (sp->branch != -1) {
2097 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 2,
2098 		    "motherboard", 0,
2099 		    "memory-controller", sp->branch);
2100 	} else {
2101 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
2102 		    "motherboard", 0);
2103 	}
2104 
2105 	(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s",
2106 	    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, intr);
2107 }
2108 
2109 
2110 nvlist_t *
2111 nb_report(const nb_regs_t *nb_regs, char *class, nv_alloc_t *nva, void *scratch)
2112 {
2113 	nvlist_t *detector = fm_nvlist_create(nva);
2114 
2115 	switch (nb_regs->flag) {
2116 	case NB_REG_LOG_FSB:
2117 		nb_fsb_report(nb_regs, class, detector, scratch);
2118 		break;
2119 	case NB_REG_LOG_PEX:
2120 		nb_pex_report(nb_regs, class, detector, scratch);
2121 		break;
2122 	case NB_REG_LOG_INT:
2123 		nb_int_report(nb_regs, class, detector, scratch);
2124 		break;
2125 	case NB_REG_LOG_FAT_FBD:
2126 		nb_fat_fbd_report(nb_regs, class, detector, scratch);
2127 		break;
2128 	case NB_REG_LOG_NF_FBD:
2129 		nb_nf_fbd_report(nb_regs, class, detector, scratch);
2130 		break;
2131 	case NB_REG_LOG_DMA:
2132 		nb_dma_report(class, detector);
2133 		break;
2134 	case NB_REG_LOG_THR:
2135 		nb_thr_report(nb_regs, class, detector, scratch);
2136 		break;
2137 	case NB_REG_LOG_NF_MEM:
2138 		nb_nf_mem_report(nb_regs, class, detector, scratch);
2139 		break;
2140 	default:
2141 		fm_fmri_hc_set(detector, FM_HC_SCHEME_VERSION, NULL, NULL, 1,
2142 		    "motherboard", 0);
2143 
2144 		(void) snprintf(class, FM_MAX_CLASS, "%s.%s.%s.%s",
2145 		    FM_ERROR_CPU, FM_EREPORT_CPU_INTEL, "nb", "unknown");
2146 	}
2147 	return (detector);
2148 }
2149 
2150 /*ARGSUSED*/
2151 void
2152 nb_drain(void *ignored, const void *data, const errorq_elem_t *eqe)
2153 {
2154 	nb_logout_t *acl = (nb_logout_t *)data;
2155 	errorq_elem_t *eqep, *scr_eqep;
2156 	nvlist_t *ereport, *detector;
2157 	nv_alloc_t *nva = NULL;
2158 	char buf[FM_MAX_CLASS];
2159 	nb_scatchpad_t nb_scatchpad;
2160 
2161 	if (panicstr) {
2162 		if ((eqep = errorq_reserve(ereport_errorq)) == NULL)
2163 			return;
2164 		ereport = errorq_elem_nvl(ereport_errorq, eqep);
2165 		/*
2166 		 * Now try to allocate another element for scratch space and
2167 		 * use that for further scratch space (eg for constructing
2168 		 * nvlists to add the main ereport).  If we can't reserve
2169 		 * a scratch element just fallback to working within the
2170 		 * element we already have, and hope for the best.  All this
2171 		 * is necessary because the fixed buffer nv allocator does
2172 		 * not reclaim freed space and nvlist construction is
2173 		 * expensive.
2174 		 */
2175 		if ((scr_eqep = errorq_reserve(ereport_errorq)) != NULL)
2176 			nva = errorq_elem_nva(ereport_errorq, scr_eqep);
2177 		else
2178 			nva = errorq_elem_nva(ereport_errorq, eqep);
2179 	} else {
2180 		ereport = fm_nvlist_create(NULL);
2181 	}
2182 	detector = nb_report(&acl->nb_regs, buf, nva, &nb_scatchpad);
2183 	if (detector == NULL)
2184 		return;
2185 	fm_ereport_set(ereport, FM_EREPORT_VERSION, buf,
2186 	    fm_ena_generate(acl->acl_timestamp, FM_ENA_FMT1), detector, NULL);
2187 	/*
2188 	 * We're done with 'detector' so reclaim the scratch space.
2189 	 */
2190 	if (panicstr) {
2191 		fm_nvlist_destroy(detector, FM_NVA_RETAIN);
2192 		nv_alloc_reset(nva);
2193 	} else {
2194 		fm_nvlist_destroy(detector, FM_NVA_FREE);
2195 	}
2196 
2197 	/*
2198 	 * Encode the error-specific data that was saved in the logout area.
2199 	 */
2200 	nb_ereport_add_logout(ereport, acl, &nb_scatchpad);
2201 
2202 	if (panicstr) {
2203 		errorq_commit(ereport_errorq, eqep, ERRORQ_SYNC);
2204 		if (scr_eqep)
2205 			errorq_cancel(ereport_errorq, scr_eqep);
2206 	} else {
2207 		(void) fm_ereport_post(ereport, EVCH_TRYHARD);
2208 		fm_nvlist_destroy(ereport, FM_NVA_FREE);
2209 	}
2210 }
2211