xref: /illumos-gate/usr/src/uts/common/os/pcifm.c (revision 0c64a9b435314788e185507d40ef9fae71507f5a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/sunndi.h>
31 #include <sys/sysmacros.h>
32 #include <sys/ddifm_impl.h>
33 #include <sys/fm/util.h>
34 #include <sys/fm/protocol.h>
35 #include <sys/fm/io/pci.h>
36 #include <sys/fm/io/ddi.h>
37 #include <sys/pci.h>
38 #include <sys/pcie.h>
39 #include <sys/pci_impl.h>
40 #include <sys/epm.h>
41 #include <sys/pcifm.h>
42 
43 #define	PCIX_ECC_VER_CHECK(x)	(((x) == PCI_PCIX_VER_1) ||\
44 				((x) == PCI_PCIX_VER_2))
45 
46 /*
47  * Expected PCI Express error mask values
48  */
49 uint32_t pcie_expected_ce_mask = PCIE_AER_CE_AD_NFE;
50 uint32_t pcie_expected_ue_mask = 0x0;
51 uint32_t pcie_expected_sue_mask = 0x0;
52 #if defined(__sparc)
53 uint32_t pcie_aer_uce_log_bits = PCIE_AER_UCE_LOG_BITS;
54 uint32_t pcie_aer_suce_log_bits = PCIE_AER_SUCE_LOG_BITS;
55 #else
56 uint32_t pcie_aer_uce_log_bits = PCIE_AER_UCE_LOG_BITS & ~PCIE_AER_UCE_UR;
57 uint32_t pcie_aer_suce_log_bits = \
58 	    PCIE_AER_SUCE_LOG_BITS & ~PCIE_AER_SUCE_RCVD_MA;
59 #endif
60 
61 errorq_t *pci_target_queue = NULL;
62 
63 pci_fm_err_t pci_err_tbl[] = {
64 	PCI_DET_PERR,	PCI_STAT_PERROR,	NULL,		DDI_FM_UNKNOWN,
65 	PCI_MDPE,	PCI_STAT_S_PERROR,	PCI_TARG_MDPE,	DDI_FM_UNKNOWN,
66 	PCI_SIG_SERR,	PCI_STAT_S_SYSERR,	NULL,		DDI_FM_FATAL,
67 	PCI_MA,		PCI_STAT_R_MAST_AB,	PCI_TARG_MA,	DDI_FM_UNKNOWN,
68 	PCI_REC_TA,	PCI_STAT_R_TARG_AB,	PCI_TARG_REC_TA, DDI_FM_UNKNOWN,
69 	PCI_SIG_TA,	PCI_STAT_S_TARG_AB,	NULL,		DDI_FM_UNKNOWN,
70 	NULL, NULL, NULL, NULL,
71 };
72 
73 pci_fm_err_t pci_bdg_err_tbl[] = {
74 	PCI_DET_PERR,	PCI_STAT_PERROR,	NULL,		DDI_FM_UNKNOWN,
75 	PCI_MDPE,	PCI_STAT_S_PERROR,	PCI_TARG_MDPE,	DDI_FM_UNKNOWN,
76 	PCI_REC_SERR,	PCI_STAT_S_SYSERR,	NULL,		DDI_FM_UNKNOWN,
77 #if !defined(__sparc)
78 	PCI_MA,		PCI_STAT_R_MAST_AB,	PCI_TARG_MA,	DDI_FM_OK,
79 #else
80 	PCI_MA,		PCI_STAT_R_MAST_AB,	PCI_TARG_MA,	DDI_FM_UNKNOWN,
81 #endif
82 	PCI_REC_TA,	PCI_STAT_R_TARG_AB,	PCI_TARG_REC_TA, DDI_FM_UNKNOWN,
83 	PCI_SIG_TA,	PCI_STAT_S_TARG_AB,	NULL,		DDI_FM_UNKNOWN,
84 	NULL, NULL, NULL, NULL,
85 };
86 
87 static pci_fm_err_t pciex_ce_err_tbl[] = {
88 	PCIEX_RE,	PCIE_AER_CE_RECEIVER_ERR,	NULL,	DDI_FM_OK,
89 	PCIEX_RNR,	PCIE_AER_CE_REPLAY_ROLLOVER,	NULL,	DDI_FM_OK,
90 	PCIEX_RTO,	PCIE_AER_CE_REPLAY_TO,		NULL,	DDI_FM_OK,
91 	PCIEX_BDP,	PCIE_AER_CE_BAD_DLLP,		NULL,	DDI_FM_OK,
92 	PCIEX_BTP,	PCIE_AER_CE_BAD_TLP,		NULL,	DDI_FM_OK,
93 	PCIEX_ANFE,	PCIE_AER_CE_AD_NFE,		NULL,	DDI_FM_OK,
94 	NULL, NULL, NULL, NULL,
95 };
96 
97 static pci_fm_err_t pciex_ue_err_tbl[] = {
98 	PCIEX_TE,	PCIE_AER_UCE_TRAINING,		NULL,	DDI_FM_FATAL,
99 	PCIEX_DLP,	PCIE_AER_UCE_DLP,		NULL,	DDI_FM_FATAL,
100 	PCIEX_SD,	PCIE_AER_UCE_SD,		NULL,   DDI_FM_FATAL,
101 	PCIEX_ROF,	PCIE_AER_UCE_RO,		NULL,	DDI_FM_FATAL,
102 	PCIEX_FCP,	PCIE_AER_UCE_FCP,		NULL,	DDI_FM_FATAL,
103 	PCIEX_MFP,	PCIE_AER_UCE_MTLP,		NULL,	DDI_FM_FATAL,
104 	PCIEX_CTO,	PCIE_AER_UCE_TO,		NULL,	DDI_FM_UNKNOWN,
105 	PCIEX_UC,	PCIE_AER_UCE_UC,		NULL,	DDI_FM_OK,
106 	PCIEX_ECRC,	PCIE_AER_UCE_ECRC,		NULL,	DDI_FM_UNKNOWN,
107 	PCIEX_CA,	PCIE_AER_UCE_CA,		NULL,	DDI_FM_UNKNOWN,
108 #if !defined(__sparc)
109 	PCIEX_UR,	PCIE_AER_UCE_UR,		NULL,	DDI_FM_OK,
110 #else
111 	PCIEX_UR,	PCIE_AER_UCE_UR,		NULL,	DDI_FM_UNKNOWN,
112 #endif
113 	PCIEX_POIS,	PCIE_AER_UCE_PTLP,		NULL,	DDI_FM_UNKNOWN,
114 	NULL, NULL, NULL, NULL,
115 };
116 
117 static pci_fm_err_t pcie_sue_err_tbl[] = {
118 	PCIEX_S_TA_SC,	PCIE_AER_SUCE_TA_ON_SC,		NULL,	DDI_FM_UNKNOWN,
119 	PCIEX_S_MA_SC,	PCIE_AER_SUCE_MA_ON_SC,		NULL,	DDI_FM_UNKNOWN,
120 	PCIEX_S_RTA,	PCIE_AER_SUCE_RCVD_TA,		NULL,	DDI_FM_UNKNOWN,
121 #if !defined(__sparc)
122 	PCIEX_S_RMA,	PCIE_AER_SUCE_RCVD_MA,		NULL,	DDI_FM_OK,
123 #else
124 	PCIEX_S_RMA,	PCIE_AER_SUCE_RCVD_MA,		NULL,	DDI_FM_UNKNOWN,
125 #endif
126 	PCIEX_S_USC,	PCIE_AER_SUCE_USC_ERR,		NULL,	DDI_FM_UNKNOWN,
127 	PCIEX_S_USCMD,	PCIE_AER_SUCE_USC_MSG_DATA_ERR,	NULL,	DDI_FM_FATAL,
128 	PCIEX_S_UDE,	PCIE_AER_SUCE_UC_DATA_ERR,	NULL,	DDI_FM_UNKNOWN,
129 	PCIEX_S_UAT,	PCIE_AER_SUCE_UC_ATTR_ERR,	NULL,	DDI_FM_FATAL,
130 	PCIEX_S_UADR,	PCIE_AER_SUCE_UC_ADDR_ERR,	NULL,	DDI_FM_FATAL,
131 	PCIEX_S_TEX,	PCIE_AER_SUCE_TIMER_EXPIRED,	NULL,	DDI_FM_FATAL,
132 	PCIEX_S_PERR,	PCIE_AER_SUCE_PERR_ASSERT,	NULL,	DDI_FM_UNKNOWN,
133 	PCIEX_S_SERR,	PCIE_AER_SUCE_SERR_ASSERT,	NULL,	DDI_FM_FATAL,
134 	PCIEX_INTERR,	PCIE_AER_SUCE_INTERNAL_ERR,	NULL,	DDI_FM_FATAL,
135 	NULL, NULL, NULL, NULL,
136 };
137 
138 static pci_fm_err_t pcix_err_tbl[] = {
139 	PCIX_SPL_DIS,		PCI_PCIX_SPL_DSCD,	NULL,	DDI_FM_UNKNOWN,
140 	PCIX_UNEX_SPL,		PCI_PCIX_UNEX_SPL,	NULL,	DDI_FM_UNKNOWN,
141 	PCIX_RX_SPL_MSG,	PCI_PCIX_RX_SPL_MSG,	NULL,   DDI_FM_UNKNOWN,
142 	NULL, NULL, NULL, NULL,
143 };
144 
145 static pci_fm_err_t pcix_sec_err_tbl[] = {
146 	PCIX_SPL_DIS,		PCI_PCIX_BSS_SPL_DSCD,	NULL,	DDI_FM_UNKNOWN,
147 	PCIX_UNEX_SPL,		PCI_PCIX_BSS_UNEX_SPL,	NULL,	DDI_FM_UNKNOWN,
148 	PCIX_BSS_SPL_OR,	PCI_PCIX_BSS_SPL_OR,	NULL,	DDI_FM_OK,
149 	PCIX_BSS_SPL_DLY,	PCI_PCIX_BSS_SPL_DLY,	NULL,	DDI_FM_OK,
150 	NULL, NULL, NULL, NULL,
151 };
152 
153 static pci_fm_err_t pciex_nadv_err_tbl[] = {
154 	PCIEX_UR,	PCIE_DEVSTS_UR_DETECTED,	NULL,	DDI_FM_UNKNOWN,
155 	PCIEX_FAT,	PCIE_DEVSTS_FE_DETECTED,	NULL,	DDI_FM_FATAL,
156 	PCIEX_NONFAT,	PCIE_DEVSTS_NFE_DETECTED,	NULL,	DDI_FM_UNKNOWN,
157 	PCIEX_CORR,	PCIE_DEVSTS_CE_DETECTED,	NULL,	DDI_FM_OK,
158 	NULL, NULL, NULL, NULL,
159 };
160 
161 static int
162 pci_config_check(ddi_acc_handle_t handle)
163 {
164 	ddi_acc_hdl_t *hp = impl_acc_hdl_get(handle);
165 	ddi_fm_error_t de;
166 
167 	if (!(DDI_FM_ACC_ERR_CAP(ddi_fm_capable(hp->ah_dip))))
168 		return (DDI_FM_OK);
169 
170 	de.fme_version = DDI_FME_VERSION;
171 
172 	ddi_fm_acc_err_get(handle, &de, de.fme_version);
173 	if (de.fme_status != DDI_FM_OK) {
174 		char buf[FM_MAX_CLASS];
175 
176 		(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", PCI_ERROR_SUBCLASS,
177 		    PCI_NR);
178 		ddi_fm_ereport_post(hp->ah_dip, buf, de.fme_ena, DDI_NOSLEEP,
179 		    FM_VERSION, DATA_TYPE_UINT8, 0, NULL);
180 		ddi_fm_acc_err_clear(handle, de.fme_version);
181 	}
182 	return (de.fme_status);
183 }
184 
185 static void
186 pcix_ecc_regs_gather(pci_erpt_t *erpt_p, pcix_ecc_regs_t *pcix_ecc_regs,
187     uint8_t pcix_cap_ptr)
188 {
189 	int bdg = erpt_p->pe_dflags & PCI_BRIDGE_DEV;
190 
191 	pcix_ecc_regs->pcix_ecc_ctlstat = pci_config_get32(erpt_p->pe_hdl,
192 	    (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_STATUS :
193 	    PCI_PCIX_ECC_STATUS)));
194 	if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
195 		pcix_ecc_regs->pcix_ecc_vflags |= PCIX_ERR_ECC_STS_VALID;
196 	else
197 		return;
198 	pcix_ecc_regs->pcix_ecc_fstaddr = pci_config_get32(erpt_p->pe_hdl,
199 	    (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_FST_AD :
200 	    PCI_PCIX_ECC_FST_AD)));
201 	pcix_ecc_regs->pcix_ecc_secaddr = pci_config_get32(erpt_p->pe_hdl,
202 	    (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_SEC_AD :
203 	    PCI_PCIX_ECC_SEC_AD)));
204 	pcix_ecc_regs->pcix_ecc_attr = pci_config_get32((
205 	    ddi_acc_handle_t)erpt_p->pe_hdl,
206 	    (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_ATTR : PCI_PCIX_ECC_ATTR)));
207 }
208 
209 static void
210 pcix_regs_gather(pci_erpt_t *erpt_p, void *pe_regs)
211 {
212 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
213 		pcix_bdg_error_regs_t *pcix_bdg_regs =
214 		    (pcix_bdg_error_regs_t *)pe_regs;
215 		uint8_t pcix_bdg_cap_ptr;
216 		int i;
217 
218 		pcix_bdg_cap_ptr = pcix_bdg_regs->pcix_bdg_cap_ptr;
219 		pcix_bdg_regs->pcix_bdg_sec_stat = pci_config_get16(
220 		    erpt_p->pe_hdl, (pcix_bdg_cap_ptr + PCI_PCIX_SEC_STATUS));
221 		if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
222 			pcix_bdg_regs->pcix_bdg_vflags |=
223 			    PCIX_BDG_SEC_STATUS_VALID;
224 		else
225 			return;
226 		pcix_bdg_regs->pcix_bdg_stat = pci_config_get32(erpt_p->pe_hdl,
227 		    (pcix_bdg_cap_ptr + PCI_PCIX_BDG_STATUS));
228 		if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
229 			pcix_bdg_regs->pcix_bdg_vflags |= PCIX_BDG_STATUS_VALID;
230 		else
231 			return;
232 		if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) {
233 			pcix_ecc_regs_t *pcix_bdg_ecc_regs;
234 			/*
235 			 * PCI Express to PCI-X bridges only implement the
236 			 * secondary side of the PCI-X ECC registers, bit one is
237 			 * read-only so we make sure we do not write to it.
238 			 */
239 			if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) {
240 				pcix_bdg_ecc_regs =
241 				    pcix_bdg_regs->pcix_bdg_ecc_regs[1];
242 				pcix_ecc_regs_gather(erpt_p, pcix_bdg_ecc_regs,
243 				    pcix_bdg_cap_ptr);
244 			} else {
245 				for (i = 0; i < 2; i++) {
246 					pcix_bdg_ecc_regs =
247 					    pcix_bdg_regs->pcix_bdg_ecc_regs[i];
248 					pci_config_put32(erpt_p->pe_hdl,
249 					    (pcix_bdg_cap_ptr +
250 					    PCI_PCIX_BDG_ECC_STATUS), i);
251 					pcix_ecc_regs_gather(erpt_p,
252 					    pcix_bdg_ecc_regs,
253 					    pcix_bdg_cap_ptr);
254 				}
255 			}
256 		}
257 	} else {
258 		pcix_error_regs_t *pcix_regs = (pcix_error_regs_t *)pe_regs;
259 		uint8_t pcix_cap_ptr;
260 
261 		pcix_cap_ptr = pcix_regs->pcix_cap_ptr;
262 
263 		pcix_regs->pcix_command = pci_config_get16(erpt_p->pe_hdl,
264 		    (pcix_cap_ptr + PCI_PCIX_COMMAND));
265 		pcix_regs->pcix_status = pci_config_get32(erpt_p->pe_hdl,
266 		    (pcix_cap_ptr + PCI_PCIX_STATUS));
267 		if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
268 			pcix_regs->pcix_vflags |= PCIX_ERR_STATUS_VALID;
269 		else
270 			return;
271 		if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) {
272 			pcix_ecc_regs_t *pcix_ecc_regs =
273 			    pcix_regs->pcix_ecc_regs;
274 
275 			pcix_ecc_regs_gather(erpt_p, pcix_ecc_regs,
276 			    pcix_cap_ptr);
277 		}
278 	}
279 }
280 
281 static void
282 pcie_regs_gather(pci_erpt_t *erpt_p)
283 {
284 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
285 	uint8_t pcie_cap_ptr;
286 	pcie_adv_error_regs_t *pcie_adv_regs;
287 	uint16_t pcie_ecap_ptr;
288 
289 	pcie_cap_ptr = pcie_regs->pcie_cap_ptr;
290 
291 	pcie_regs->pcie_err_status = pci_config_get16(erpt_p->pe_hdl,
292 	    pcie_cap_ptr + PCIE_DEVSTS);
293 	if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
294 		pcie_regs->pcie_vflags |= PCIE_ERR_STATUS_VALID;
295 	else
296 		return;
297 
298 	pcie_regs->pcie_err_ctl = pci_config_get16(erpt_p->pe_hdl,
299 	    (pcie_cap_ptr + PCIE_DEVCTL));
300 
301 	if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV) && (erpt_p->pe_dflags &
302 	    PCIX_DEV))
303 		pcix_regs_gather(erpt_p, pcie_regs->pcix_bdg_regs);
304 
305 	if (erpt_p->pe_dflags & PCIEX_RC_DEV) {
306 		pcie_rc_error_regs_t *pcie_rc_regs = pcie_regs->pcie_rc_regs;
307 
308 		pcie_rc_regs->pcie_rc_status = pci_config_get32(erpt_p->pe_hdl,
309 		    (pcie_cap_ptr + PCIE_ROOTSTS));
310 		pcie_rc_regs->pcie_rc_ctl = pci_config_get16(erpt_p->pe_hdl,
311 		    (pcie_cap_ptr + PCIE_ROOTCTL));
312 	}
313 
314 	if (!(erpt_p->pe_dflags & PCIEX_ADV_DEV))
315 		return;
316 
317 	pcie_adv_regs = pcie_regs->pcie_adv_regs;
318 
319 	pcie_ecap_ptr = pcie_adv_regs->pcie_adv_cap_ptr;
320 
321 	pcie_adv_regs->pcie_ue_status = pci_config_get32(erpt_p->pe_hdl,
322 	    pcie_ecap_ptr + PCIE_AER_UCE_STS);
323 	if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
324 		pcie_adv_regs->pcie_adv_vflags |= PCIE_UE_STATUS_VALID;
325 
326 	pcie_adv_regs->pcie_ue_mask = pci_config_get32(erpt_p->pe_hdl,
327 	    pcie_ecap_ptr + PCIE_AER_UCE_MASK);
328 	pcie_adv_regs->pcie_ue_sev = pci_config_get32(erpt_p->pe_hdl,
329 	    pcie_ecap_ptr + PCIE_AER_UCE_SERV);
330 	pcie_adv_regs->pcie_adv_ctl = pci_config_get32(erpt_p->pe_hdl,
331 	    pcie_ecap_ptr + PCIE_AER_CTL);
332 	pcie_adv_regs->pcie_ue_hdr0 = pci_config_get32(erpt_p->pe_hdl,
333 	    pcie_ecap_ptr + PCIE_AER_HDR_LOG);
334 	if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK) {
335 		int i;
336 		pcie_adv_regs->pcie_adv_vflags |= PCIE_UE_HDR_VALID;
337 
338 		for (i = 0; i < 3; i++) {
339 			pcie_adv_regs->pcie_ue_hdr[i] = pci_config_get32(
340 			    erpt_p->pe_hdl, pcie_ecap_ptr + PCIE_AER_HDR_LOG +
341 			    (4 * (i + 1)));
342 		}
343 	}
344 
345 	pcie_adv_regs->pcie_ce_status = pci_config_get32(erpt_p->pe_hdl,
346 	    pcie_ecap_ptr + PCIE_AER_CE_STS);
347 	if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
348 		pcie_adv_regs->pcie_adv_vflags |= PCIE_CE_STATUS_VALID;
349 
350 	pcie_adv_regs->pcie_ce_mask = pci_config_get32(erpt_p->pe_hdl,
351 	    pcie_ecap_ptr + PCIE_AER_CE_MASK);
352 
353 	/*
354 	 * If pci express to pci bridge then grab the bridge
355 	 * error registers.
356 	 */
357 	if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) {
358 		pcie_adv_bdg_error_regs_t *pcie_bdg_regs =
359 		    pcie_adv_regs->pcie_adv_bdg_regs;
360 
361 		pcie_bdg_regs->pcie_sue_status =
362 		    pci_config_get32(erpt_p->pe_hdl,
363 		    pcie_ecap_ptr + PCIE_AER_SUCE_STS);
364 		if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
365 			pcie_adv_regs->pcie_adv_vflags |= PCIE_SUE_STATUS_VALID;
366 		pcie_bdg_regs->pcie_sue_hdr0 = pci_config_get32(erpt_p->pe_hdl,
367 		    (pcie_ecap_ptr + PCIE_AER_SHDR_LOG));
368 
369 		if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK) {
370 			int i;
371 
372 			pcie_adv_regs->pcie_adv_vflags |= PCIE_SUE_HDR_VALID;
373 
374 			for (i = 0; i < 3; i++) {
375 				pcie_bdg_regs->pcie_sue_hdr[i] =
376 				    pci_config_get32(erpt_p->pe_hdl,
377 					pcie_ecap_ptr + PCIE_AER_SHDR_LOG +
378 					(4 * (i + 1)));
379 			}
380 		}
381 	}
382 	/*
383 	 * If PCI Express root complex then grab the root complex
384 	 * error registers.
385 	 */
386 	if (erpt_p->pe_dflags & PCIEX_RC_DEV) {
387 		pcie_adv_rc_error_regs_t *pcie_rc_regs =
388 		    pcie_adv_regs->pcie_adv_rc_regs;
389 
390 		pcie_rc_regs->pcie_rc_err_cmd = pci_config_get32(erpt_p->pe_hdl,
391 		    (pcie_ecap_ptr + PCIE_AER_RE_CMD));
392 		pcie_rc_regs->pcie_rc_err_status =
393 		    pci_config_get32(erpt_p->pe_hdl,
394 			(pcie_ecap_ptr + PCIE_AER_RE_STS));
395 		if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
396 			pcie_adv_regs->pcie_adv_vflags |=
397 			    PCIE_RC_ERR_STATUS_VALID;
398 		pcie_rc_regs->pcie_rc_ce_src_id =
399 		    pci_config_get16(erpt_p->pe_hdl,
400 			(pcie_ecap_ptr + PCIE_AER_CE_SRC_ID));
401 		pcie_rc_regs->pcie_rc_ue_src_id =
402 		    pci_config_get16(erpt_p->pe_hdl,
403 			(pcie_ecap_ptr + PCIE_AER_ERR_SRC_ID));
404 		if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
405 			pcie_adv_regs->pcie_adv_vflags |= PCIE_SRC_ID_VALID;
406 	}
407 }
408 
409 /*ARGSUSED*/
410 static void
411 pci_regs_gather(dev_info_t *dip, pci_erpt_t *erpt_p)
412 {
413 	pci_error_regs_t *pci_regs = erpt_p->pe_pci_regs;
414 
415 	/*
416 	 * Start by reading all the error registers that are available for
417 	 * pci and pci express and for leaf devices and bridges/switches
418 	 */
419 	pci_regs->pci_err_status = pci_config_get16(erpt_p->pe_hdl,
420 	    PCI_CONF_STAT);
421 	if (pci_config_check(erpt_p->pe_hdl) != DDI_FM_OK)
422 		return;
423 	pci_regs->pci_vflags |= PCI_ERR_STATUS_VALID;
424 	pci_regs->pci_cfg_comm = pci_config_get16(erpt_p->pe_hdl,
425 	    PCI_CONF_COMM);
426 	if (pci_config_check(erpt_p->pe_hdl) != DDI_FM_OK)
427 		return;
428 
429 	/*
430 	 * If pci-pci bridge grab PCI bridge specific error registers.
431 	 */
432 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
433 		pci_regs->pci_bdg_regs->pci_bdg_sec_stat =
434 		    pci_config_get16(erpt_p->pe_hdl, PCI_BCNF_SEC_STATUS);
435 		if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
436 			pci_regs->pci_bdg_regs->pci_bdg_vflags |=
437 			    PCI_BDG_SEC_STAT_VALID;
438 		pci_regs->pci_bdg_regs->pci_bdg_ctrl =
439 		    pci_config_get16(erpt_p->pe_hdl, PCI_BCNF_BCNTRL);
440 		if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
441 			pci_regs->pci_bdg_regs->pci_bdg_vflags |=
442 			    PCI_BDG_CTRL_VALID;
443 	}
444 
445 	/*
446 	 * If pci express device grab pci express error registers and
447 	 * check for advanced error reporting features and grab them if
448 	 * available.
449 	 */
450 	if (erpt_p->pe_dflags & PCIEX_DEV)
451 		pcie_regs_gather(erpt_p);
452 	else if (erpt_p->pe_dflags & PCIX_DEV)
453 		pcix_regs_gather(erpt_p, erpt_p->pe_regs);
454 
455 }
456 
457 static void
458 pcix_regs_clear(pci_erpt_t *erpt_p, void *pe_regs)
459 {
460 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
461 		pcix_bdg_error_regs_t *pcix_bdg_regs =
462 		    (pcix_bdg_error_regs_t *)pe_regs;
463 		uint8_t pcix_bdg_cap_ptr;
464 		int i;
465 
466 		pcix_bdg_cap_ptr = pcix_bdg_regs->pcix_bdg_cap_ptr;
467 
468 		if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_SEC_STATUS_VALID)
469 			pci_config_put16(erpt_p->pe_hdl,
470 			    (pcix_bdg_cap_ptr + PCI_PCIX_SEC_STATUS),
471 			    pcix_bdg_regs->pcix_bdg_sec_stat);
472 
473 		if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_STATUS_VALID)
474 			pci_config_put32(erpt_p->pe_hdl,
475 			    (pcix_bdg_cap_ptr + PCI_PCIX_BDG_STATUS),
476 			    pcix_bdg_regs->pcix_bdg_stat);
477 
478 		pcix_bdg_regs->pcix_bdg_vflags = 0x0;
479 
480 		if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) {
481 			pcix_ecc_regs_t *pcix_bdg_ecc_regs;
482 			/*
483 			 * PCI Express to PCI-X bridges only implement the
484 			 * secondary side of the PCI-X ECC registers, bit one is
485 			 * read-only so we make sure we do not write to it.
486 			 */
487 			if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) {
488 				pcix_bdg_ecc_regs =
489 				    pcix_bdg_regs->pcix_bdg_ecc_regs[1];
490 
491 				if (pcix_bdg_ecc_regs->pcix_ecc_vflags &
492 				    PCIX_ERR_ECC_STS_VALID) {
493 
494 					pci_config_put32(erpt_p->pe_hdl,
495 					    (pcix_bdg_cap_ptr +
496 					    PCI_PCIX_BDG_ECC_STATUS),
497 					    pcix_bdg_ecc_regs->
498 					    pcix_ecc_ctlstat);
499 				}
500 				pcix_bdg_ecc_regs->pcix_ecc_vflags = 0x0;
501 			} else {
502 				for (i = 0; i < 2; i++) {
503 					pcix_bdg_ecc_regs =
504 					    pcix_bdg_regs->pcix_bdg_ecc_regs[i];
505 
506 
507 					if (pcix_bdg_ecc_regs->pcix_ecc_vflags &
508 					    PCIX_ERR_ECC_STS_VALID) {
509 						pci_config_put32(erpt_p->pe_hdl,
510 						    (pcix_bdg_cap_ptr +
511 						    PCI_PCIX_BDG_ECC_STATUS),
512 						    i);
513 
514 						pci_config_put32(erpt_p->pe_hdl,
515 						    (pcix_bdg_cap_ptr +
516 						    PCI_PCIX_BDG_ECC_STATUS),
517 						    pcix_bdg_ecc_regs->
518 						    pcix_ecc_ctlstat);
519 					}
520 					pcix_bdg_ecc_regs->pcix_ecc_vflags =
521 					    0x0;
522 				}
523 			}
524 		}
525 	} else {
526 		pcix_error_regs_t *pcix_regs = (pcix_error_regs_t *)pe_regs;
527 		uint8_t pcix_cap_ptr;
528 
529 		pcix_cap_ptr = pcix_regs->pcix_cap_ptr;
530 
531 		if (pcix_regs->pcix_vflags & PCIX_ERR_STATUS_VALID)
532 			pci_config_put32(erpt_p->pe_hdl,
533 			    (pcix_cap_ptr + PCI_PCIX_STATUS),
534 			    pcix_regs->pcix_status);
535 
536 		pcix_regs->pcix_vflags = 0x0;
537 
538 		if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) {
539 			pcix_ecc_regs_t *pcix_ecc_regs =
540 			    pcix_regs->pcix_ecc_regs;
541 
542 			if (pcix_ecc_regs->pcix_ecc_vflags &
543 			    PCIX_ERR_ECC_STS_VALID)
544 				pci_config_put32(erpt_p->pe_hdl,
545 				    (pcix_cap_ptr + PCI_PCIX_ECC_STATUS),
546 				    pcix_ecc_regs->pcix_ecc_ctlstat);
547 
548 			pcix_ecc_regs->pcix_ecc_vflags = 0x0;
549 		}
550 	}
551 }
552 
553 static void
554 pcie_regs_clear(pci_erpt_t *erpt_p)
555 {
556 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
557 	uint8_t pcie_cap_ptr;
558 	pcie_adv_error_regs_t *pcie_adv_regs;
559 	uint16_t pcie_ecap_ptr;
560 
561 	pcie_cap_ptr = pcie_regs->pcie_cap_ptr;
562 
563 	if (pcie_regs->pcie_vflags & PCIE_ERR_STATUS_VALID)
564 		pci_config_put16(erpt_p->pe_hdl, pcie_cap_ptr + PCIE_DEVSTS,
565 		    pcie_regs->pcie_err_status);
566 
567 	pcie_regs->pcie_vflags = 0x0;
568 
569 	if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV) &&
570 	    (erpt_p->pe_dflags & PCIX_DEV))
571 		pcix_regs_clear(erpt_p, pcie_regs->pcix_bdg_regs);
572 
573 	if (!(erpt_p->pe_dflags & PCIEX_ADV_DEV))
574 		return;
575 
576 	pcie_adv_regs = pcie_regs->pcie_adv_regs;
577 
578 	pcie_ecap_ptr = pcie_adv_regs->pcie_adv_cap_ptr;
579 
580 	if (pcie_adv_regs->pcie_adv_vflags & PCIE_UE_STATUS_VALID)
581 		pci_config_put32(erpt_p->pe_hdl,
582 		    pcie_ecap_ptr + PCIE_AER_UCE_STS,
583 		    pcie_adv_regs->pcie_ue_status);
584 
585 	if (pcie_adv_regs->pcie_adv_vflags & PCIE_CE_STATUS_VALID)
586 		pci_config_put32(erpt_p->pe_hdl,
587 		    pcie_ecap_ptr + PCIE_AER_CE_STS,
588 		    pcie_adv_regs->pcie_ce_status);
589 
590 
591 	if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) {
592 		pcie_adv_bdg_error_regs_t *pcie_bdg_regs =
593 		    pcie_adv_regs->pcie_adv_bdg_regs;
594 
595 
596 		if (pcie_adv_regs->pcie_adv_vflags & PCIE_SUE_STATUS_VALID)
597 			pci_config_put32(erpt_p->pe_hdl,
598 			    pcie_ecap_ptr + PCIE_AER_SUCE_STS,
599 			    pcie_bdg_regs->pcie_sue_status);
600 	}
601 	/*
602 	 * If PCI Express root complex then clear the root complex
603 	 * error registers.
604 	 */
605 	if (erpt_p->pe_dflags & PCIEX_RC_DEV) {
606 		pcie_adv_rc_error_regs_t *pcie_rc_regs =
607 		    pcie_adv_regs->pcie_adv_rc_regs;
608 
609 
610 		if (pcie_adv_regs->pcie_adv_vflags & PCIE_RC_ERR_STATUS_VALID)
611 			pci_config_put32(erpt_p->pe_hdl,
612 			    (pcie_ecap_ptr + PCIE_AER_RE_STS),
613 			    pcie_rc_regs->pcie_rc_err_status);
614 	}
615 	pcie_adv_regs->pcie_adv_vflags = 0x0;
616 }
617 
618 static void
619 pci_regs_clear(pci_erpt_t *erpt_p)
620 {
621 	/*
622 	 * Finally clear the error bits
623 	 */
624 	if (erpt_p->pe_dflags & PCIEX_DEV)
625 		pcie_regs_clear(erpt_p);
626 	else if (erpt_p->pe_dflags & PCIX_DEV)
627 		pcix_regs_clear(erpt_p, erpt_p->pe_regs);
628 
629 	if (erpt_p->pe_pci_regs->pci_vflags & PCI_ERR_STATUS_VALID)
630 		pci_config_put16(erpt_p->pe_hdl, PCI_CONF_STAT,
631 		    erpt_p->pe_pci_regs->pci_err_status);
632 
633 	erpt_p->pe_pci_regs->pci_vflags = 0x0;
634 
635 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
636 		if (erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_vflags &
637 		    PCI_BDG_SEC_STAT_VALID)
638 			pci_config_put16(erpt_p->pe_hdl, PCI_BCNF_SEC_STATUS,
639 			    erpt_p->pe_pci_regs->pci_bdg_regs->
640 			    pci_bdg_sec_stat);
641 		if (erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_vflags &
642 		    PCI_BDG_CTRL_VALID)
643 			pci_config_put16(erpt_p->pe_hdl, PCI_BCNF_BCNTRL,
644 			    erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_ctrl);
645 
646 		erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_vflags = 0x0;
647 	}
648 }
649 
650 /*
651  * pcix_ereport_setup: Allocate structures for PCI-X error handling and ereport
652  * generation.
653  */
654 /* ARGSUSED */
655 static void
656 pcix_ereport_setup(dev_info_t *dip, pci_erpt_t *erpt_p)
657 {
658 	uint8_t pcix_cap_ptr;
659 	int i;
660 
661 	pcix_cap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
662 	    "pcix-capid-pointer", PCI_CAP_NEXT_PTR_NULL);
663 
664 	if (pcix_cap_ptr != PCI_CAP_NEXT_PTR_NULL)
665 		erpt_p->pe_dflags |= PCIX_DEV;
666 	else
667 		return;
668 
669 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
670 		pcix_bdg_error_regs_t *pcix_bdg_regs;
671 
672 		erpt_p->pe_regs = kmem_zalloc(sizeof (pcix_bdg_error_regs_t),
673 		    KM_SLEEP);
674 		pcix_bdg_regs = (pcix_bdg_error_regs_t *)erpt_p->pe_regs;
675 		pcix_bdg_regs->pcix_bdg_cap_ptr = pcix_cap_ptr;
676 		pcix_bdg_regs->pcix_bdg_ver = pci_config_get16(erpt_p->pe_hdl,
677 		    pcix_cap_ptr + PCI_PCIX_SEC_STATUS) & PCI_PCIX_VER_MASK;
678 		if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) {
679 			for (i = 0; i < 2; i++) {
680 				pcix_bdg_regs->pcix_bdg_ecc_regs[i] =
681 				    kmem_zalloc(sizeof (pcix_ecc_regs_t),
682 					KM_SLEEP);
683 			}
684 		}
685 	} else {
686 		pcix_error_regs_t *pcix_regs;
687 
688 		erpt_p->pe_regs = kmem_zalloc(sizeof (pcix_error_regs_t),
689 		    KM_SLEEP);
690 		pcix_regs = (pcix_error_regs_t *)erpt_p->pe_regs;
691 		pcix_regs->pcix_cap_ptr = pcix_cap_ptr;
692 		pcix_regs->pcix_ver = pci_config_get16(erpt_p->pe_hdl,
693 		    pcix_cap_ptr + PCI_PCIX_COMMAND) & PCI_PCIX_VER_MASK;
694 		if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) {
695 			pcix_regs->pcix_ecc_regs = kmem_zalloc(
696 			    sizeof (pcix_ecc_regs_t), KM_SLEEP);
697 		}
698 	}
699 }
700 
701 static void
702 pcie_ereport_setup(dev_info_t *dip, pci_erpt_t *erpt_p)
703 {
704 	pcie_error_regs_t *pcie_regs;
705 	pcie_adv_error_regs_t *pcie_adv_regs;
706 	char buf[FM_MAX_CLASS];
707 	uint8_t pcix_cap_ptr;
708 	uint8_t pcie_cap_ptr;
709 	uint16_t pcie_ecap_ptr;
710 	uint16_t dev_type = 0;
711 	uint32_t mask = pcie_expected_ue_mask;
712 
713 	/*
714 	 * The following sparc specific code should be removed once the pci_cap
715 	 * interfaces create the necessary properties for us.
716 	 */
717 #if defined(__sparc)
718 	ushort_t status;
719 	uint32_t slot_cap;
720 	uint8_t cap_ptr = 0;
721 	uint8_t cap_id = 0;
722 	uint32_t hdr, hdr_next_ptr, hdr_cap_id;
723 	uint16_t offset = P2ALIGN(PCIE_EXT_CAP, 4);
724 	uint16_t aer_ptr = 0;
725 
726 	cap_ptr = pci_config_get8(erpt_p->pe_hdl, PCI_CONF_CAP_PTR);
727 	if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK) {
728 		while ((cap_id = pci_config_get8(erpt_p->pe_hdl, cap_ptr)) !=
729 		    0xff) {
730 			if (cap_id == PCI_CAP_ID_PCIX) {
731 				(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
732 				    "pcix-capid-pointer", cap_ptr);
733 			}
734 		if (cap_id == PCI_CAP_ID_PCI_E) {
735 			status = pci_config_get16(erpt_p->pe_hdl, cap_ptr + 2);
736 			if (status & PCIE_PCIECAP_SLOT_IMPL) {
737 				/* offset 14h is Slot Cap Register */
738 				slot_cap = pci_config_get32(erpt_p->pe_hdl,
739 				    cap_ptr + PCIE_SLOTCAP);
740 				(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
741 				    "pcie-slotcap-reg", slot_cap);
742 			}
743 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
744 			    "pcie-capid-reg", pci_config_get16(erpt_p->pe_hdl,
745 			    cap_ptr + PCIE_PCIECAP));
746 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
747 			    "pcie-capid-pointer", cap_ptr);
748 
749 		}
750 			if ((cap_ptr = pci_config_get8(erpt_p->pe_hdl,
751 			    cap_ptr + 1)) == 0xff || cap_ptr == 0 ||
752 			    (pci_config_check(erpt_p->pe_hdl) != DDI_FM_OK))
753 				break;
754 		}
755 	}
756 
757 #endif
758 
759 	pcix_cap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
760 	    "pcix-capid-pointer", PCI_CAP_NEXT_PTR_NULL);
761 
762 	if (pcix_cap_ptr != PCI_CAP_NEXT_PTR_NULL)
763 		erpt_p->pe_dflags |= PCIX_DEV;
764 
765 	pcie_cap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
766 	    DDI_PROP_DONTPASS, "pcie-capid-pointer", PCI_CAP_NEXT_PTR_NULL);
767 
768 	if (pcie_cap_ptr != PCI_CAP_NEXT_PTR_NULL) {
769 		erpt_p->pe_dflags |= PCIEX_DEV;
770 		erpt_p->pe_regs = kmem_zalloc(sizeof (pcie_error_regs_t),
771 		    KM_SLEEP);
772 		pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
773 		pcie_regs->pcie_cap_ptr = pcie_cap_ptr;
774 	}
775 
776 	if (!(erpt_p->pe_dflags & PCIEX_DEV))
777 		return;
778 
779 	/*
780 	 * Don't currently need to check for version here because we are
781 	 * compliant with PCIE 1.0a which is version 0 and is guaranteed
782 	 * software compatibility with future versions.  We will need to
783 	 * add errors for new detectors/features which are added in newer
784 	 * revisions [sec 7.8.2].
785 	 */
786 	pcie_regs->pcie_cap = pci_config_get16(erpt_p->pe_hdl,
787 	    pcie_regs->pcie_cap_ptr + PCIE_PCIECAP);
788 
789 	dev_type = pcie_regs->pcie_cap & PCIE_PCIECAP_DEV_TYPE_MASK;
790 
791 	if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV) &&
792 	    (erpt_p->pe_dflags & PCIX_DEV)) {
793 		int i;
794 
795 		pcie_regs->pcix_bdg_regs =
796 		    kmem_zalloc(sizeof (pcix_bdg_error_regs_t), KM_SLEEP);
797 
798 		pcie_regs->pcix_bdg_regs->pcix_bdg_cap_ptr = pcix_cap_ptr;
799 		pcie_regs->pcix_bdg_regs->pcix_bdg_ver =
800 		    pci_config_get16(erpt_p->pe_hdl,
801 			pcix_cap_ptr + PCI_PCIX_SEC_STATUS) & PCI_PCIX_VER_MASK;
802 
803 		if (PCIX_ECC_VER_CHECK(pcie_regs->pcix_bdg_regs->pcix_bdg_ver))
804 			for (i = 0; i < 2; i++)
805 				pcie_regs->pcix_bdg_regs->pcix_bdg_ecc_regs[i] =
806 				    kmem_zalloc(sizeof (pcix_ecc_regs_t),
807 					KM_SLEEP);
808 	}
809 
810 	if (dev_type == PCIE_PCIECAP_DEV_TYPE_ROOT) {
811 		erpt_p->pe_dflags |= PCIEX_RC_DEV;
812 		pcie_regs->pcie_rc_regs = kmem_zalloc(
813 		    sizeof (pcie_rc_error_regs_t), KM_SLEEP);
814 	}
815 	/*
816 	 * The following sparc specific code should be removed once the pci_cap
817 	 * interfaces create the necessary properties for us.
818 	 */
819 #if defined(__sparc)
820 
821 	hdr = pci_config_get32(erpt_p->pe_hdl, offset);
822 	hdr_next_ptr = (hdr >> PCIE_EXT_CAP_NEXT_PTR_SHIFT) &
823 	    PCIE_EXT_CAP_NEXT_PTR_MASK;
824 	hdr_cap_id = (hdr >> PCIE_EXT_CAP_ID_SHIFT) & PCIE_EXT_CAP_ID_MASK;
825 
826 	while ((hdr_next_ptr != PCIE_EXT_CAP_NEXT_PTR_NULL) &&
827 	    (hdr_cap_id != PCIE_EXT_CAP_ID_AER)) {
828 		offset = P2ALIGN(hdr_next_ptr, 4);
829 		hdr = pci_config_get32(erpt_p->pe_hdl, offset);
830 		hdr_next_ptr = (hdr >> PCIE_EXT_CAP_NEXT_PTR_SHIFT) &
831 		    PCIE_EXT_CAP_NEXT_PTR_MASK;
832 		hdr_cap_id = (hdr >> PCIE_EXT_CAP_ID_SHIFT) &
833 		    PCIE_EXT_CAP_ID_MASK;
834 	}
835 
836 	if (hdr_cap_id == PCIE_EXT_CAP_ID_AER)
837 		aer_ptr = P2ALIGN(offset, 4);
838 	if (aer_ptr != PCI_CAP_NEXT_PTR_NULL)
839 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
840 		    "pcie-aer-pointer", aer_ptr);
841 #endif
842 
843 	/*
844 	 * Find and store if this device is capable of pci express
845 	 * advanced errors, if not report an error against the device.
846 	 */
847 	pcie_ecap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
848 	    "pcie-aer-pointer", PCI_CAP_NEXT_PTR_NULL);
849 	if (pcie_ecap_ptr != PCI_CAP_NEXT_PTR_NULL) {
850 		erpt_p->pe_dflags |= PCIEX_ADV_DEV;
851 		pcie_regs->pcie_adv_regs = kmem_zalloc(
852 		    sizeof (pcie_adv_error_regs_t), KM_SLEEP);
853 		pcie_regs->pcie_adv_regs->pcie_adv_cap_ptr = pcie_ecap_ptr;
854 	}
855 
856 	if (!(erpt_p->pe_dflags & PCIEX_ADV_DEV)) {
857 		(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
858 		    PCIEX_ERROR_SUBCLASS, PCIEX_NADV);
859 		ddi_fm_ereport_post(dip, buf, NULL, DDI_NOSLEEP,
860 		    FM_VERSION, DATA_TYPE_UINT8, 0, NULL);
861 		return;
862 	}
863 
864 	pcie_adv_regs = pcie_regs->pcie_adv_regs;
865 
866 	if (pcie_adv_regs == NULL)
867 		return;
868 	/*
869 	 * Initialize structures for advanced PCI Express devices.
870 	 */
871 
872 	/*
873 	 * Advanced error registers exist for PCI Express to PCI(X) Bridges and
874 	 * may also exist for PCI(X) to PCI Express Bridges, the latter is not
875 	 * well explained in the PCI Express to PCI/PCI-X Bridge Specification
876 	 * 1.0 and will be left out of the current gathering of these registers.
877 	 */
878 	if (dev_type == PCIE_PCIECAP_DEV_TYPE_PCIE2PCI) {
879 		erpt_p->pe_dflags |= PCIEX_2PCI_DEV;
880 		pcie_adv_regs->pcie_adv_bdg_regs = kmem_zalloc(
881 		    sizeof (pcie_adv_bdg_error_regs_t), KM_SLEEP);
882 	}
883 
884 	if (erpt_p->pe_dflags & PCIEX_RC_DEV)
885 		pcie_adv_regs->pcie_adv_rc_regs = kmem_zalloc(
886 		    sizeof (pcie_adv_rc_error_regs_t), KM_SLEEP);
887 
888 	/*
889 	 * Check that mask values are as expected, if not
890 	 * change them to what we desire.
891 	 */
892 	pci_regs_gather(dip, erpt_p);
893 	pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
894 	if (pcie_regs->pcie_adv_regs->pcie_ce_mask != pcie_expected_ce_mask) {
895 		pci_config_put32(erpt_p->pe_hdl,
896 		    pcie_ecap_ptr + PCIE_AER_CE_MASK, pcie_expected_ce_mask);
897 	}
898 
899 	/* Disable PTLP/ECRC (or mask these two) for Switches */
900 	if (dev_type == PCIE_PCIECAP_DEV_TYPE_UP ||
901 	    dev_type == PCIE_PCIECAP_DEV_TYPE_DOWN)
902 		mask |= PCIE_AER_UCE_PTLP | PCIE_AER_UCE_ECRC;
903 
904 	if (pcie_regs->pcie_adv_regs->pcie_ue_mask != mask) {
905 		pci_config_put32(erpt_p->pe_hdl,
906 		    pcie_ecap_ptr + PCIE_AER_UCE_MASK, mask);
907 	}
908 	if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) {
909 		if (pcie_regs->pcie_adv_regs->pcie_adv_bdg_regs->pcie_sue_mask
910 		    != pcie_expected_sue_mask) {
911 			pci_config_put32(erpt_p->pe_hdl,
912 			    pcie_ecap_ptr + PCIE_AER_SUCE_MASK,
913 			    pcie_expected_sue_mask);
914 		}
915 	}
916 }
917 
918 /*
919  * pci_ereport_setup: Detect PCI device type and initialize structures to be
920  * used to generate ereports based on detected generic device errors.
921  */
922 void
923 pci_ereport_setup(dev_info_t *dip)
924 {
925 	struct dev_info *devi = DEVI(dip);
926 	struct i_ddi_fmhdl *fmhdl = devi->devi_fmhdl;
927 	pci_erpt_t *erpt_p;
928 	uint8_t pci_hdr_type;
929 	uint16_t pci_status;
930 	pci_regspec_t *pci_rp;
931 	int32_t len;
932 	uint32_t phys_hi;
933 
934 	/*
935 	 * If device is not ereport capbable then report an error against the
936 	 * driver for using this interface,
937 	 */
938 	if (!DDI_FM_EREPORT_CAP(ddi_fm_capable(dip)) &&
939 	    !DDI_FM_ERRCB_CAP(ddi_fm_capable(dip))) {
940 		i_ddi_drv_ereport_post(dip, DVR_EFMCAP, NULL, DDI_SLEEP);
941 		return;
942 	}
943 
944 	/*
945 	 * ASSERT fmhdl exists and fh_bus_specific is NULL.
946 	 */
947 	ASSERT(fmhdl && (fmhdl->fh_bus_specific == NULL));
948 
949 	erpt_p = kmem_zalloc(sizeof (pci_erpt_t), KM_SLEEP);
950 
951 	if (pci_config_setup(dip, &erpt_p->pe_hdl) != DDI_SUCCESS)
952 		goto error;
953 
954 	erpt_p->pe_pci_regs = kmem_zalloc(sizeof (pci_error_regs_t), KM_SLEEP);
955 
956 	pci_status = pci_config_get16(erpt_p->pe_hdl, PCI_CONF_STAT);
957 	if (pci_config_check(erpt_p->pe_hdl) != DDI_FM_OK)
958 		goto error;
959 
960 	/*
961 	 * Get header type and record if device is a bridge.
962 	 */
963 	pci_hdr_type = pci_config_get8(erpt_p->pe_hdl, PCI_CONF_HEADER);
964 	if (pci_config_check(erpt_p->pe_hdl) != DDI_FM_OK)
965 		goto error;
966 
967 	/*
968 	 * Check to see if PCI device is a bridge, if so allocate pci bridge
969 	 * error register structure.
970 	 */
971 	if ((pci_hdr_type & PCI_HEADER_TYPE_M) == PCI_HEADER_PPB) {
972 		erpt_p->pe_dflags |= PCI_BRIDGE_DEV;
973 		erpt_p->pe_pci_regs->pci_bdg_regs = kmem_zalloc(
974 		    sizeof (pci_bdg_error_regs_t), KM_SLEEP);
975 	}
976 
977 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg",
978 	    (caddr_t)&pci_rp, &len) == DDI_SUCCESS) {
979 		phys_hi = pci_rp->pci_phys_hi;
980 		kmem_free(pci_rp, len);
981 
982 		erpt_p->pe_bdf = (uint16_t)(PCI_REG_BDFR_G(phys_hi) >>
983 		    PCI_REG_FUNC_SHIFT);
984 	}
985 
986 
987 	if (!(pci_status & PCI_STAT_CAP)) {
988 		goto done;
989 	}
990 
991 	/*
992 	 * Initialize structures for PCI Express and PCI-X devices.
993 	 * Order matters below and pcie_ereport_setup should preceed
994 	 * pcix_ereport_setup.
995 	 */
996 	pcie_ereport_setup(dip, erpt_p);
997 
998 	if (!(erpt_p->pe_dflags & PCIEX_DEV)) {
999 		pcix_ereport_setup(dip, erpt_p);
1000 	}
1001 
1002 done:
1003 	pci_regs_gather(dip, erpt_p);
1004 	pci_regs_clear(erpt_p);
1005 
1006 	/*
1007 	 * Before returning set fh_bus_specific to completed pci_erpt_t
1008 	 * structure
1009 	 */
1010 	fmhdl->fh_bus_specific = (void *)erpt_p;
1011 
1012 	return;
1013 error:
1014 	if (erpt_p->pe_pci_regs)
1015 		kmem_free(erpt_p->pe_pci_regs, sizeof (pci_error_regs_t));
1016 	kmem_free(erpt_p, sizeof (pci_erpt_t));
1017 	erpt_p = NULL;
1018 }
1019 
1020 static void
1021 pcix_ereport_teardown(pci_erpt_t *erpt_p)
1022 {
1023 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
1024 		pcix_bdg_error_regs_t *pcix_bdg_regs;
1025 		uint16_t pcix_ver;
1026 
1027 		pcix_bdg_regs = (pcix_bdg_error_regs_t *)erpt_p->pe_regs;
1028 		pcix_ver = pcix_bdg_regs->pcix_bdg_ver;
1029 		if (PCIX_ECC_VER_CHECK(pcix_ver)) {
1030 			int i;
1031 			for (i = 0; i < 2; i++)
1032 				kmem_free(pcix_bdg_regs->pcix_bdg_ecc_regs[i],
1033 				    sizeof (pcix_ecc_regs_t));
1034 		}
1035 		kmem_free(erpt_p->pe_regs, sizeof (pcix_bdg_error_regs_t));
1036 	} else {
1037 		pcix_error_regs_t *pcix_regs;
1038 		uint16_t pcix_ver;
1039 
1040 		pcix_regs = (pcix_error_regs_t *)erpt_p->pe_regs;
1041 		pcix_ver = pcix_regs->pcix_ver;
1042 		if (PCIX_ECC_VER_CHECK(pcix_ver)) {
1043 			kmem_free(pcix_regs->pcix_ecc_regs,
1044 			    sizeof (pcix_ecc_regs_t));
1045 		}
1046 		kmem_free(erpt_p->pe_regs, sizeof (pcix_error_regs_t));
1047 	}
1048 }
1049 
1050 static void
1051 pcie_ereport_teardown(pci_erpt_t *erpt_p)
1052 {
1053 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
1054 
1055 	if (erpt_p->pe_dflags & PCIEX_ADV_DEV) {
1056 		pcie_adv_error_regs_t *pcie_adv = pcie_regs->pcie_adv_regs;
1057 
1058 		if (erpt_p->pe_dflags & PCIEX_2PCI_DEV)
1059 			kmem_free(pcie_adv->pcie_adv_bdg_regs,
1060 			    sizeof (pcie_adv_bdg_error_regs_t));
1061 		if (erpt_p->pe_dflags & PCIEX_RC_DEV)
1062 			kmem_free(pcie_adv->pcie_adv_rc_regs,
1063 			    sizeof (pcie_adv_rc_error_regs_t));
1064 		kmem_free(pcie_adv, sizeof (pcie_adv_error_regs_t));
1065 	}
1066 
1067 	if (erpt_p->pe_dflags & PCIEX_RC_DEV)
1068 		kmem_free(pcie_regs->pcie_rc_regs,
1069 		    sizeof (pcie_rc_error_regs_t));
1070 
1071 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
1072 		if (erpt_p->pe_dflags & PCIX_DEV) {
1073 			uint16_t pcix_ver = pcie_regs->pcix_bdg_regs->
1074 			    pcix_bdg_ver;
1075 
1076 			if (PCIX_ECC_VER_CHECK(pcix_ver)) {
1077 				int i;
1078 				for (i = 0; i < 2; i++)
1079 					kmem_free(pcie_regs->pcix_bdg_regs->
1080 					    pcix_bdg_ecc_regs[i],
1081 					    sizeof (pcix_ecc_regs_t));
1082 			}
1083 			kmem_free(pcie_regs->pcix_bdg_regs,
1084 			    sizeof (pcix_bdg_error_regs_t));
1085 		}
1086 	}
1087 	kmem_free(erpt_p->pe_regs, sizeof (pcie_error_regs_t));
1088 }
1089 
1090 void
1091 pci_ereport_teardown(dev_info_t *dip)
1092 {
1093 	struct i_ddi_fmhdl *fmhdl = DEVI(dip)->devi_fmhdl;
1094 	pci_erpt_t *erpt_p;
1095 
1096 	if (!DDI_FM_EREPORT_CAP(ddi_fm_capable(dip)) &&
1097 	    !DDI_FM_ERRCB_CAP(ddi_fm_capable(dip))) {
1098 		i_ddi_drv_ereport_post(dip, DVR_EFMCAP, NULL, DDI_SLEEP);
1099 	}
1100 
1101 	ASSERT(fmhdl);
1102 
1103 	erpt_p = (pci_erpt_t *)fmhdl->fh_bus_specific;
1104 	if (erpt_p == NULL)
1105 		return;
1106 
1107 	if (erpt_p->pe_dflags & PCIEX_DEV)
1108 		pcie_ereport_teardown(erpt_p);
1109 	else if (erpt_p->pe_dflags & PCIX_DEV)
1110 		pcix_ereport_teardown(erpt_p);
1111 	pci_config_teardown((ddi_acc_handle_t *)&erpt_p->pe_hdl);
1112 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV)
1113 		kmem_free(erpt_p->pe_pci_regs->pci_bdg_regs,
1114 		    sizeof (pci_bdg_error_regs_t));
1115 	kmem_free(erpt_p->pe_pci_regs, sizeof (pci_error_regs_t));
1116 	kmem_free(erpt_p, sizeof (pci_erpt_t));
1117 	fmhdl->fh_bus_specific = NULL;
1118 	/*
1119 	 * The following sparc specific code should be removed once the pci_cap
1120 	 * interfaces create the necessary properties for us.
1121 	 */
1122 #if defined(__sparc)
1123 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcix-capid-pointer");
1124 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcie-slotcap-reg");
1125 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcie-capid-reg");
1126 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcie-capid-pointer");
1127 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcie-aer-pointer");
1128 #endif
1129 }
1130 
1131 /*
1132  * Function used by PCI device and nexus error handlers to check if a
1133  * captured address resides in their DMA or ACC handle caches or the caches of
1134  * their children devices, respectively.
1135  */
1136 static int
1137 pci_dev_hdl_lookup(dev_info_t *dip, int type, ddi_fm_error_t *derr,
1138     void *addr)
1139 {
1140 	struct i_ddi_fmhdl *fmhdl = DEVI(dip)->devi_fmhdl;
1141 	pci_erpt_t *erpt_p = (pci_erpt_t *)fmhdl->fh_bus_specific;
1142 
1143 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV)
1144 		return (ndi_fmc_error(dip, NULL, type, derr->fme_ena, addr));
1145 	else
1146 		return (ndi_fmc_entry_error(dip, type, derr, addr));
1147 }
1148 
1149 static void
1150 pcie_ereport_post(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p,
1151     char *buf, int errtype)
1152 {
1153 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
1154 	pcie_adv_error_regs_t *pcie_adv_regs = pcie_regs->pcie_adv_regs;
1155 	pcie_adv_rc_error_regs_t *pcie_adv_rc_regs;
1156 
1157 	switch (errtype) {
1158 	    case PCIEX_TYPE_CE:
1159 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1160 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1161 		    PCIEX_DEVSTS_REG, DATA_TYPE_UINT16,
1162 		    pcie_regs->pcie_err_status,
1163 		    PCIEX_CE_STATUS_REG, DATA_TYPE_UINT32,
1164 		    pcie_adv_regs->pcie_ce_status, NULL);
1165 		break;
1166 	    case PCIEX_TYPE_UE:
1167 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1168 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1169 		    PCIEX_DEVSTS_REG, DATA_TYPE_UINT16,
1170 		    pcie_regs->pcie_err_status,
1171 		    PCIEX_UE_STATUS_REG, DATA_TYPE_UINT32,
1172 		    pcie_adv_regs->pcie_ue_status, PCIEX_UE_SEV_REG,
1173 		    DATA_TYPE_UINT32, pcie_adv_regs->pcie_ue_sev,
1174 		    PCIEX_ADV_CTL, DATA_TYPE_UINT32,
1175 		    pcie_adv_regs->pcie_adv_ctl,
1176 		    PCIEX_SRC_ID, DATA_TYPE_UINT16,
1177 		    pcie_adv_regs->pcie_adv_bdf,
1178 		    PCIEX_SRC_VALID, DATA_TYPE_BOOLEAN_VALUE,
1179 		    (pcie_adv_regs->pcie_adv_bdf != NULL) ?
1180 		    1 : NULL,
1181 #ifdef DEBUG
1182 		    PCIEX_UE_HDR0, DATA_TYPE_UINT32,
1183 		    pcie_adv_regs->pcie_ue_hdr0,
1184 		    PCIEX_UE_HDR1, DATA_TYPE_UINT32,
1185 		    pcie_adv_regs->pcie_ue_hdr[0],
1186 		    PCIEX_UE_HDR2, DATA_TYPE_UINT32,
1187 		    pcie_adv_regs->pcie_ue_hdr[1],
1188 		    PCIEX_UE_HDR3, DATA_TYPE_UINT32,
1189 		    pcie_adv_regs->pcie_ue_hdr[2],
1190 #endif
1191 		    NULL);
1192 		break;
1193 	    case PCIEX_TYPE_GEN:
1194 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1195 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8,
1196 		    0, PCIEX_DEVSTS_REG, DATA_TYPE_UINT16,
1197 		    pcie_regs->pcie_err_status, NULL);
1198 		break;
1199 	    case PCIEX_TYPE_RC_UE_MSG:
1200 	    case PCIEX_TYPE_RC_CE_MSG:
1201 		pcie_adv_rc_regs = pcie_adv_regs->pcie_adv_rc_regs;
1202 
1203 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1204 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1205 		    PCIEX_ROOT_ERRSTS_REG, DATA_TYPE_UINT32,
1206 		    pcie_adv_rc_regs->pcie_rc_err_status,
1207 		    PCIEX_SRC_ID, DATA_TYPE_UINT16,
1208 		    (errtype == PCIEX_TYPE_RC_UE_MSG) ?
1209 		    pcie_adv_rc_regs->pcie_rc_ue_src_id :
1210 		    pcie_adv_rc_regs->pcie_rc_ce_src_id,
1211 		    PCIEX_SRC_VALID, DATA_TYPE_BOOLEAN_VALUE,
1212 		    (errtype == PCIEX_TYPE_RC_UE_MSG) ?
1213 		    (pcie_adv_regs->pcie_adv_vflags & PCIE_SRC_ID_VALID &&
1214 		    pcie_adv_rc_regs->pcie_rc_ue_src_id != 0) :
1215 		    (pcie_adv_regs->pcie_adv_vflags & PCIE_SRC_ID_VALID &&
1216 		    pcie_adv_rc_regs->pcie_rc_ce_src_id != 0), NULL);
1217 		break;
1218 	    case PCIEX_TYPE_RC_MULT_MSG:
1219 		pcie_adv_rc_regs = pcie_adv_regs->pcie_adv_rc_regs;
1220 
1221 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1222 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1223 		    PCIEX_ROOT_ERRSTS_REG, DATA_TYPE_UINT32,
1224 		    pcie_adv_rc_regs->pcie_rc_err_status, NULL);
1225 		break;
1226 	    default:
1227 		break;
1228 	}
1229 }
1230 
1231 static void
1232 pcie_check_addr(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *eprt_p)
1233 {
1234 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)eprt_p->pe_regs;
1235 	pcie_adv_error_regs_t *pcie_adv_regs = pcie_regs->pcie_adv_regs;
1236 	pcie_tlp_hdr_t *ue_hdr0;
1237 	uint32_t *ue_hdr;
1238 	uint64_t addr = NULL;
1239 
1240 	if (!(pcie_adv_regs->pcie_adv_vflags & PCIE_UE_HDR_VALID)) {
1241 		derr->fme_status = DDI_FM_UNKNOWN;
1242 		return;
1243 	}
1244 	ue_hdr0 = (pcie_tlp_hdr_t *)&pcie_adv_regs->pcie_ue_hdr0;
1245 	ue_hdr = pcie_adv_regs->pcie_ue_hdr;
1246 
1247 	switch (ue_hdr0->type) {
1248 	    case PCIE_TLP_TYPE_MEM:
1249 	    case PCIE_TLP_TYPE_MEMLK:
1250 		if ((ue_hdr0->fmt & 0x1) == 0x1) {
1251 			pcie_mem64_t *mem64_tlp = (pcie_mem64_t *)ue_hdr;
1252 
1253 			addr = (uint64_t)mem64_tlp->addr1 << 32 |
1254 			    (uint32_t)mem64_tlp->addr0 << 2;
1255 			pcie_adv_regs->pcie_adv_bdf = mem64_tlp->rid;
1256 		} else {
1257 			pcie_memio32_t *memio32_tlp = (pcie_memio32_t *)ue_hdr;
1258 
1259 			addr = (uint32_t)memio32_tlp->addr0 << 2;
1260 			pcie_adv_regs->pcie_adv_bdf = memio32_tlp->rid;
1261 		}
1262 
1263 		derr->fme_status = pci_dev_hdl_lookup(dip, DMA_HANDLE, derr,
1264 		    (void *) &addr);
1265 		/*
1266 		 * If DMA handle is not found error could have been a memory
1267 		 * mapped IO address so check in the access cache
1268 		 */
1269 		if (derr->fme_status == DDI_FM_UNKNOWN)
1270 			derr->fme_status = pci_dev_hdl_lookup(dip, ACC_HANDLE,
1271 			    derr, (void *) &addr);
1272 		break;
1273 
1274 	    case PCIE_TLP_TYPE_IO:
1275 		{
1276 			pcie_memio32_t *memio32_tlp = (pcie_memio32_t *)ue_hdr;
1277 
1278 			addr = (uint32_t)memio32_tlp->addr0 << 2;
1279 			pcie_adv_regs->pcie_adv_bdf = memio32_tlp->rid;
1280 			derr->fme_status = pci_dev_hdl_lookup(dip, ACC_HANDLE,
1281 			    derr, (void *) &addr);
1282 			break;
1283 		}
1284 	    case PCIE_TLP_TYPE_CFG0:
1285 	    case PCIE_TLP_TYPE_CFG1:
1286 		{
1287 			pcie_cfg_t *cfg_tlp = (pcie_cfg_t *)ue_hdr;
1288 
1289 			pcie_adv_regs->pcie_adv_bdf = cfg_tlp->rid;
1290 			derr->fme_status = DDI_FM_UNKNOWN;
1291 			break;
1292 		}
1293 	    case PCIE_TLP_TYPE_MSG:
1294 		{
1295 			pcie_msg_t *msg_tlp = (pcie_msg_t *)ue_hdr;
1296 
1297 			pcie_adv_regs->pcie_adv_bdf = msg_tlp->rid;
1298 			derr->fme_status = DDI_FM_UNKNOWN;
1299 			break;
1300 		}
1301 	    case PCIE_TLP_TYPE_CPL:
1302 	    case PCIE_TLP_TYPE_CPLLK:
1303 		{
1304 			pcie_cpl_t *cpl_tlp = (pcie_cpl_t *)ue_hdr;
1305 
1306 			pcie_adv_regs->pcie_adv_bdf = cpl_tlp->cid;
1307 			derr->fme_status = DDI_FM_UNKNOWN;
1308 			break;
1309 		}
1310 	    case PCIE_TLP_TYPE_MSI:
1311 	    default:
1312 		derr->fme_status = DDI_FM_UNKNOWN;
1313 	}
1314 
1315 	/*
1316 	 * If no handle was found in the children caches and their is no
1317 	 * address infomation already stored and we have a captured address
1318 	 * then we need to store it away so that intermediate bridges can
1319 	 * check if the address exists in their handle caches.
1320 	 */
1321 	if (derr->fme_status == DDI_FM_UNKNOWN &&
1322 	    derr->fme_bus_specific == NULL &&
1323 	    addr != NULL)
1324 		derr->fme_bus_specific = (void *)(uintptr_t)addr;
1325 }
1326 
1327 static void
1328 pcie_pci_check_addr(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *eprt_p)
1329 {
1330 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)eprt_p->pe_regs;
1331 	pcie_adv_error_regs_t *pcie_adv_regs = pcie_regs->pcie_adv_regs;
1332 	pcie_adv_bdg_error_regs_t *pcie_bdg_regs =
1333 	    pcie_adv_regs->pcie_adv_bdg_regs;
1334 	uint64_t addr = NULL;
1335 	pcix_attr_t *pcie_pci_sue_attr;
1336 	int cmd;
1337 	int dual_addr = 0;
1338 
1339 	if (!(pcie_adv_regs->pcie_adv_vflags & PCIE_SUE_HDR_VALID)) {
1340 		derr->fme_status = DDI_FM_UNKNOWN;
1341 		return;
1342 	}
1343 
1344 	pcie_pci_sue_attr = (pcix_attr_t *)&pcie_bdg_regs->pcie_sue_hdr0;
1345 	cmd = (pcie_bdg_regs->pcie_sue_hdr[0] >>
1346 	    PCIE_AER_SUCE_HDR_CMD_LWR_SHIFT) & PCIE_AER_SUCE_HDR_CMD_LWR_MASK;
1347 cmd_switch:
1348 	switch (cmd) {
1349 	    case PCI_PCIX_CMD_IORD:
1350 	    case PCI_PCIX_CMD_IOWR:
1351 		pcie_adv_regs->pcie_adv_bdf = pcie_pci_sue_attr->rid;
1352 
1353 		addr = pcie_bdg_regs->pcie_sue_hdr[2];
1354 		addr = (addr << PCIE_AER_SUCE_HDR_ADDR_SHIFT) |
1355 		    pcie_bdg_regs->pcie_sue_hdr[1];
1356 
1357 		derr->fme_status = pci_dev_hdl_lookup(dip, ACC_HANDLE,
1358 		    derr, (void *) &addr);
1359 		break;
1360 	    case PCI_PCIX_CMD_MEMRD_DW:
1361 	    case PCI_PCIX_CMD_MEMWR:
1362 	    case PCI_PCIX_CMD_MEMRD_BL:
1363 	    case PCI_PCIX_CMD_MEMWR_BL:
1364 	    case PCI_PCIX_CMD_MEMRDBL:
1365 	    case PCI_PCIX_CMD_MEMWRBL:
1366 		pcie_adv_regs->pcie_adv_bdf = pcie_pci_sue_attr->rid;
1367 
1368 		addr = pcie_bdg_regs->pcie_sue_hdr[2];
1369 		addr = (addr << PCIE_AER_SUCE_HDR_ADDR_SHIFT) |
1370 		    pcie_bdg_regs->pcie_sue_hdr[1];
1371 
1372 		derr->fme_status = pci_dev_hdl_lookup(dip, DMA_HANDLE,
1373 		    derr, (void *) &addr);
1374 		if (derr->fme_status == DDI_FM_UNKNOWN)
1375 			derr->fme_status = pci_dev_hdl_lookup(dip, ACC_HANDLE,
1376 			    derr, (void *) &addr);
1377 		break;
1378 	    case PCI_PCIX_CMD_CFRD:
1379 	    case PCI_PCIX_CMD_CFWR:
1380 		pcie_adv_regs->pcie_adv_bdf = pcie_pci_sue_attr->rid;
1381 
1382 		derr->fme_status = DDI_FM_UNKNOWN;
1383 		break;
1384 	    case PCI_PCIX_CMD_DADR:
1385 		cmd = (pcie_bdg_regs->pcie_sue_hdr[0] >>
1386 		    PCIE_AER_SUCE_HDR_CMD_UP_SHIFT) &
1387 		    PCIE_AER_SUCE_HDR_CMD_UP_MASK;
1388 		if (dual_addr)
1389 			break;
1390 		++dual_addr;
1391 		goto cmd_switch;
1392 	    default:
1393 		derr->fme_status = DDI_FM_UNKNOWN;
1394 	}
1395 
1396 	/*
1397 	 * If no handle was found in the children caches and their is no
1398 	 * address infomation already stored and we have a captured address
1399 	 * then we need to store it away so that intermediate bridges can
1400 	 * check if the address exists in their handle caches.
1401 	 */
1402 	if (derr->fme_status == DDI_FM_UNKNOWN &&
1403 	    derr->fme_bus_specific == NULL &&
1404 	    addr != NULL)
1405 		derr->fme_bus_specific = (void *)(uintptr_t)addr;
1406 }
1407 
1408 static int
1409 pcix_check_addr(dev_info_t *dip, ddi_fm_error_t *derr,
1410     pcix_ecc_regs_t *pcix_ecc_regs)
1411 {
1412 	int cmd = (pcix_ecc_regs->pcix_ecc_ctlstat >> 16) & 0xf;
1413 	uint64_t addr;
1414 
1415 	addr = pcix_ecc_regs->pcix_ecc_secaddr;
1416 	addr = addr << 32;
1417 	addr |= pcix_ecc_regs->pcix_ecc_fstaddr;
1418 
1419 	switch (cmd) {
1420 	    case PCI_PCIX_CMD_INTR:
1421 	    case PCI_PCIX_CMD_SPEC:
1422 		return (DDI_FM_FATAL);
1423 	    case PCI_PCIX_CMD_IORD:
1424 	    case PCI_PCIX_CMD_IOWR:
1425 		return (pci_dev_hdl_lookup(dip, ACC_HANDLE, derr,
1426 		    (void *) &addr));
1427 	    case PCI_PCIX_CMD_DEVID:
1428 		return (DDI_FM_FATAL);
1429 	    case PCI_PCIX_CMD_MEMRD_DW:
1430 	    case PCI_PCIX_CMD_MEMWR:
1431 	    case PCI_PCIX_CMD_MEMRD_BL:
1432 	    case PCI_PCIX_CMD_MEMWR_BL:
1433 		return (pci_dev_hdl_lookup(dip, DMA_HANDLE, derr,
1434 		    (void *) &addr));
1435 	    case PCI_PCIX_CMD_CFRD:
1436 	    case PCI_PCIX_CMD_CFWR:
1437 		return (pci_dev_hdl_lookup(dip, ACC_HANDLE, derr,
1438 		    (void *) &addr));
1439 	    case PCI_PCIX_CMD_SPL:
1440 	    case PCI_PCIX_CMD_DADR:
1441 		return (DDI_FM_FATAL);
1442 	    case PCI_PCIX_CMD_MEMRDBL:
1443 	    case PCI_PCIX_CMD_MEMWRBL:
1444 		return (pci_dev_hdl_lookup(dip, DMA_HANDLE, derr,
1445 		    (void *) &addr));
1446 	    default:
1447 		return (DDI_FM_FATAL);
1448 	}
1449 }
1450 
1451 /*ARGSUSED*/
1452 static int
1453 pci_bdg_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p)
1454 {
1455 	pci_bdg_error_regs_t *pci_bdg_regs = erpt_p->pe_pci_regs->pci_bdg_regs;
1456 	int fatal = 0;
1457 	int nonfatal = 0;
1458 	int unknown = 0;
1459 	int ok = 0;
1460 	int ret = DDI_FM_OK;
1461 	char buf[FM_MAX_CLASS];
1462 	int i;
1463 
1464 	if (derr->fme_flag != DDI_FM_ERR_UNEXPECTED)
1465 		goto done;
1466 
1467 	if ((pci_bdg_regs->pci_bdg_vflags & PCI_BDG_CTRL_VALID) &&
1468 	    (pci_bdg_regs->pci_bdg_ctrl & PCI_BCNF_BCNTRL_DTO_STAT)) {
1469 		(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
1470 		    PCI_ERROR_SUBCLASS, PCI_DTO);
1471 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1472 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1473 		    PCI_SEC_CONFIG_STATUS, DATA_TYPE_UINT16,
1474 		    pci_bdg_regs->pci_bdg_sec_stat, PCI_BCNTRL,
1475 		    DATA_TYPE_UINT16, pci_bdg_regs->pci_bdg_ctrl, NULL);
1476 		unknown++;
1477 	}
1478 
1479 	if (pci_bdg_regs->pci_bdg_vflags & PCI_BDG_SEC_STAT_VALID) {
1480 		for (i = 0; pci_bdg_err_tbl[i].err_class != NULL; i++) {
1481 			if (pci_bdg_regs->pci_bdg_sec_stat &
1482 			    pci_bdg_err_tbl[i].reg_bit) {
1483 				(void) snprintf(buf, FM_MAX_CLASS, "%s.%s-%s",
1484 				    PCI_ERROR_SUBCLASS, PCI_SEC_ERROR_SUBCLASS,
1485 				    pci_bdg_err_tbl[i].err_class);
1486 				ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1487 				    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1488 				    PCI_SEC_CONFIG_STATUS, DATA_TYPE_UINT16,
1489 				    pci_bdg_regs->pci_bdg_sec_stat, PCI_BCNTRL,
1490 				    DATA_TYPE_UINT16,
1491 				    pci_bdg_regs->pci_bdg_ctrl, NULL);
1492 				PCI_FM_SEV_INC(pci_bdg_err_tbl[i].flags);
1493 				if (derr->fme_bus_specific &&
1494 				    pci_bdg_err_tbl[i].terr_class)
1495 					pci_target_enqueue(derr->fme_ena,
1496 					    pci_bdg_err_tbl[i].terr_class,
1497 					    PCI_ERROR_SUBCLASS,
1498 					    (uintptr_t)derr->fme_bus_specific);
1499 			}
1500 		}
1501 #if !defined(__sparc)
1502 		/*
1503 		 * For x86, many drivers and even user-level code currently get
1504 		 * away with accessing bad addresses, getting a UR and getting
1505 		 * -1 returned. Unfortunately, we have no control over this, so
1506 		 * we will have to treat all URs as nonfatal. Moreover, if the
1507 		 * leaf driver is non-hardened, then we don't actually see the
1508 		 * UR directly. All we see is a secondary bus master abort at
1509 		 * the root complex - so it's this condition that we actually
1510 		 * need to treat as nonfatal (providing no other unrelated nfe
1511 		 * conditions have also been seen by the root complex).
1512 		 */
1513 		if ((erpt_p->pe_dflags & PCIEX_RC_DEV) &&
1514 		    (pci_bdg_regs->pci_bdg_sec_stat & PCI_STAT_R_MAST_AB) &&
1515 		    !(pci_bdg_regs->pci_bdg_sec_stat & PCI_STAT_S_PERROR)) {
1516 			pcie_error_regs_t *pcie_regs =
1517 			    (pcie_error_regs_t *)erpt_p->pe_regs;
1518 			if ((pcie_regs->pcie_vflags & PCIE_ERR_STATUS_VALID) &&
1519 			    !(pcie_regs->pcie_err_status &
1520 			    PCIE_DEVSTS_NFE_DETECTED))
1521 				nonfatal++;
1522 		}
1523 #endif
1524 	}
1525 
1526 done:
1527 
1528 	/*
1529 	 * Need to check for poke and cautious put. We already know peek
1530 	 * and cautious get errors occurred (as we got a trap) and we know
1531 	 * they are nonfatal.
1532 	 */
1533 	if (derr->fme_flag == DDI_FM_ERR_EXPECTED) {
1534 		/*
1535 		 * for cautious puts we treat all errors as nonfatal. Actually
1536 		 * we set nonfatal for cautious gets as well - doesn't do any
1537 		 * harm
1538 		 */
1539 		if (pci_bdg_regs->pci_bdg_sec_stat & (PCI_STAT_R_TARG_AB |
1540 		    PCI_STAT_R_MAST_AB | PCI_STAT_S_PERROR | PCI_STAT_S_SYSERR))
1541 			nonfatal++;
1542 
1543 		/*
1544 		 * for cautious accesses we already have the acc_handle. Just
1545 		 * need to call children to clear their error bits
1546 		 */
1547 		ret = ndi_fm_handler_dispatch(dip, NULL, derr);
1548 		PCI_FM_SEV_INC(ret);
1549 		return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1550 		    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1551 	}
1552 	if (derr->fme_flag == DDI_FM_ERR_POKE) {
1553 		/*
1554 		 * special case for pokes - we only consider master abort
1555 		 * and target abort as nonfatal. Sserr with no master abort is
1556 		 * fatal, but master/target abort can come in on separate
1557 		 * instance, so return unknown and parent will determine if
1558 		 * nonfatal (if another child returned nonfatal - ie master
1559 		 * or target abort) or fatal otherwise
1560 		 */
1561 		if (pci_bdg_regs->pci_bdg_sec_stat & (PCI_STAT_R_TARG_AB |
1562 		    PCI_STAT_R_MAST_AB))
1563 			nonfatal++;
1564 		if (erpt_p->pe_pci_regs->pci_err_status & PCI_STAT_S_SYSERR)
1565 			unknown++;
1566 	}
1567 
1568 	/*
1569 	 * If errant address is passed in then attempt to find
1570 	 * ACC/DMA handle in caches.
1571 	 */
1572 	if (derr->fme_bus_specific) {
1573 		int i;
1574 
1575 		for (i = 0; i < 2; i++) {
1576 			ret = ndi_fmc_error(dip, NULL, i ? ACC_HANDLE :
1577 			    DMA_HANDLE, derr->fme_ena,
1578 			    (void *)&derr->fme_bus_specific);
1579 			PCI_FM_SEV_INC(ret);
1580 		}
1581 	}
1582 
1583 	/*
1584 	 * now check children below the bridge, only if errant handle was not
1585 	 * found
1586 	 */
1587 	if (!derr->fme_acc_handle && !derr->fme_dma_handle) {
1588 		ret = ndi_fm_handler_dispatch(dip, NULL, derr);
1589 		PCI_FM_SEV_INC(ret);
1590 	}
1591 
1592 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1593 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1594 }
1595 
1596 static int
1597 pcix_ecc_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p,
1598     void *pe_regs)
1599 {
1600 	pcix_error_regs_t *pcix_regs;
1601 	pcix_bdg_error_regs_t *pcix_bdg_regs;
1602 	pcix_ecc_regs_t *pcix_ecc_regs;
1603 	int bridge;
1604 	int i;
1605 	int ecc_phase;
1606 	int ecc_corr;
1607 	int sec_ue;
1608 	int sec_ce;
1609 	int fatal = 0;
1610 	int nonfatal = 0;
1611 	int unknown = 0;
1612 	int ok = 0;
1613 	char buf[FM_MAX_CLASS];
1614 
1615 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
1616 		pcix_bdg_regs = (pcix_bdg_error_regs_t *)pe_regs;
1617 		bridge = 1;
1618 	} else {
1619 		pcix_regs = (pcix_error_regs_t *)pe_regs;
1620 		bridge = 0;
1621 	}
1622 
1623 	for (i = 0; i < (bridge ? 2 : 1); i++) {
1624 		int ret = DDI_FM_OK;
1625 		pcix_ecc_regs = bridge ? pcix_bdg_regs->pcix_bdg_ecc_regs[i] :
1626 		    pcix_regs->pcix_ecc_regs;
1627 		if (pcix_ecc_regs->pcix_ecc_vflags & PCIX_ERR_ECC_STS_VALID) {
1628 			ecc_phase = (pcix_ecc_regs->pcix_ecc_ctlstat &
1629 			    PCI_PCIX_ECC_PHASE) >> 0x4;
1630 			ecc_corr = (pcix_ecc_regs->pcix_ecc_ctlstat &
1631 			    PCI_PCIX_ECC_CORR);
1632 			sec_ue = (pcix_ecc_regs->pcix_ecc_ctlstat &
1633 			    PCI_PCIX_ECC_S_UE);
1634 			sec_ce = (pcix_ecc_regs->pcix_ecc_ctlstat &
1635 			    PCI_PCIX_ECC_S_CE);
1636 
1637 			switch (ecc_phase) {
1638 			    case PCI_PCIX_ECC_PHASE_NOERR:
1639 				break;
1640 			    case PCI_PCIX_ECC_PHASE_FADDR:
1641 			    case PCI_PCIX_ECC_PHASE_SADDR:
1642 				PCI_FM_SEV_INC(ecc_corr ?  DDI_FM_OK :
1643 				    DDI_FM_FATAL);
1644 				(void) snprintf(buf, FM_MAX_CLASS,
1645 				    "%s.%s%s", PCIX_ERROR_SUBCLASS,
1646 				    i ? PCIX_SEC_ERROR_SUBCLASS : "",
1647 				    ecc_corr ? PCIX_ECC_CE_ADDR :
1648 				    PCIX_ECC_UE_ADDR);
1649 				break;
1650 			    case PCI_PCIX_ECC_PHASE_ATTR:
1651 				PCI_FM_SEV_INC(ecc_corr ?
1652 				    DDI_FM_OK : DDI_FM_FATAL);
1653 				(void) snprintf(buf, FM_MAX_CLASS,
1654 				    "%s.%s%s", PCIX_ERROR_SUBCLASS,
1655 				    i ? PCIX_SEC_ERROR_SUBCLASS : "",
1656 				    ecc_corr ? PCIX_ECC_CE_ATTR :
1657 				    PCIX_ECC_UE_ATTR);
1658 				break;
1659 			    case PCI_PCIX_ECC_PHASE_DATA32:
1660 			    case PCI_PCIX_ECC_PHASE_DATA64:
1661 				if (ecc_corr)
1662 					ret = DDI_FM_OK;
1663 				else
1664 					ret = pcix_check_addr(dip, derr,
1665 					    pcix_ecc_regs);
1666 				PCI_FM_SEV_INC(ret);
1667 
1668 				(void) snprintf(buf, FM_MAX_CLASS,
1669 				    "%s.%s%s", PCIX_ERROR_SUBCLASS,
1670 				    i ? PCIX_SEC_ERROR_SUBCLASS : "",
1671 				    ecc_corr ? PCIX_ECC_CE_DATA :
1672 				    PCIX_ECC_UE_DATA);
1673 				break;
1674 			}
1675 			if (ecc_phase)
1676 				if (bridge)
1677 					ddi_fm_ereport_post(dip, buf,
1678 					    derr->fme_ena,
1679 					    DDI_NOSLEEP, FM_VERSION,
1680 					    DATA_TYPE_UINT8, 0,
1681 					    PCIX_SEC_STATUS, DATA_TYPE_UINT16,
1682 					    pcix_bdg_regs->pcix_bdg_sec_stat,
1683 					    PCIX_BDG_STAT, DATA_TYPE_UINT32,
1684 					    pcix_bdg_regs->pcix_bdg_stat,
1685 					    PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32,
1686 					    pcix_ecc_regs->pcix_ecc_ctlstat,
1687 					    PCIX_ECC_ATTR, DATA_TYPE_UINT32,
1688 					    pcix_ecc_regs->pcix_ecc_attr, NULL);
1689 				else
1690 					ddi_fm_ereport_post(dip, buf,
1691 					    derr->fme_ena,
1692 					    DDI_NOSLEEP, FM_VERSION,
1693 					    DATA_TYPE_UINT8, 0,
1694 					    PCIX_COMMAND, DATA_TYPE_UINT16,
1695 					    pcix_regs->pcix_command,
1696 					    PCIX_STATUS, DATA_TYPE_UINT32,
1697 					    pcix_regs->pcix_status,
1698 					    PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32,
1699 					    pcix_ecc_regs->pcix_ecc_ctlstat,
1700 					    PCIX_ECC_ATTR, DATA_TYPE_UINT32,
1701 					    pcix_ecc_regs->pcix_ecc_attr, NULL);
1702 			if (sec_ce || sec_ue) {
1703 				(void) snprintf(buf, FM_MAX_CLASS,
1704 				    "%s.%s%s", PCIX_ERROR_SUBCLASS,
1705 				    i ? PCIX_SEC_ERROR_SUBCLASS : "",
1706 				    sec_ce ? PCIX_ECC_S_CE : PCIX_ECC_S_UE);
1707 				if (bridge)
1708 					ddi_fm_ereport_post(dip, buf,
1709 					    derr->fme_ena,
1710 					    DDI_NOSLEEP, FM_VERSION,
1711 					    DATA_TYPE_UINT8, 0,
1712 					    PCIX_SEC_STATUS, DATA_TYPE_UINT16,
1713 					    pcix_bdg_regs->pcix_bdg_sec_stat,
1714 					    PCIX_BDG_STAT, DATA_TYPE_UINT32,
1715 					    pcix_bdg_regs->pcix_bdg_stat,
1716 					    PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32,
1717 					    pcix_ecc_regs->pcix_ecc_ctlstat,
1718 					    PCIX_ECC_ATTR, DATA_TYPE_UINT32,
1719 					    pcix_ecc_regs->pcix_ecc_attr, NULL);
1720 				else
1721 					ddi_fm_ereport_post(dip, buf,
1722 					    derr->fme_ena,
1723 					    DDI_NOSLEEP, FM_VERSION,
1724 					    DATA_TYPE_UINT8, 0,
1725 					    PCIX_COMMAND, DATA_TYPE_UINT16,
1726 					    pcix_regs->pcix_command,
1727 					    PCIX_STATUS, DATA_TYPE_UINT32,
1728 					    pcix_regs->pcix_status,
1729 					    PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32,
1730 					    pcix_ecc_regs->pcix_ecc_ctlstat,
1731 					    PCIX_ECC_ATTR, DATA_TYPE_UINT32,
1732 					    pcix_ecc_regs->pcix_ecc_attr, NULL);
1733 				PCI_FM_SEV_INC(sec_ue ? DDI_FM_FATAL :
1734 				    DDI_FM_OK);
1735 			}
1736 		}
1737 	}
1738 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1739 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1740 }
1741 
1742 static int
1743 pcix_bdg_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p,
1744     void *pe_regs)
1745 {
1746 	pcix_bdg_error_regs_t *pcix_bdg_regs = (pcix_bdg_error_regs_t *)pe_regs;
1747 	int fatal = 0;
1748 	int nonfatal = 0;
1749 	int unknown = 0;
1750 	int ok = 0;
1751 	char buf[FM_MAX_CLASS];
1752 	int i;
1753 
1754 	if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_STATUS_VALID) {
1755 		for (i = 0; pcix_err_tbl[i].err_class != NULL; i++) {
1756 			if ((pcix_bdg_regs->pcix_bdg_stat &
1757 			    pcix_err_tbl[i].reg_bit)) {
1758 				(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
1759 				    PCIX_ERROR_SUBCLASS,
1760 				    pcix_err_tbl[i].err_class);
1761 				ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1762 				    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1763 				    PCIX_SEC_STATUS, DATA_TYPE_UINT16,
1764 				    pcix_bdg_regs->pcix_bdg_sec_stat,
1765 				    PCIX_BDG_STAT, DATA_TYPE_UINT32,
1766 				    pcix_bdg_regs->pcix_bdg_stat, NULL);
1767 				PCI_FM_SEV_INC(pcix_err_tbl[i].flags);
1768 			}
1769 		}
1770 	}
1771 
1772 	if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_SEC_STATUS_VALID) {
1773 		for (i = 0; pcix_sec_err_tbl[i].err_class != NULL; i++) {
1774 			if ((pcix_bdg_regs->pcix_bdg_sec_stat &
1775 			    pcix_sec_err_tbl[i].reg_bit)) {
1776 				(void) snprintf(buf, FM_MAX_CLASS, "%s.%s%s",
1777 				    PCIX_ERROR_SUBCLASS,
1778 				    PCIX_SEC_ERROR_SUBCLASS,
1779 				    pcix_sec_err_tbl[i].err_class);
1780 				ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1781 				    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1782 				    PCIX_SEC_STATUS, DATA_TYPE_UINT16,
1783 				    pcix_bdg_regs->pcix_bdg_sec_stat,
1784 				    PCIX_BDG_STAT, DATA_TYPE_UINT32,
1785 				    pcix_bdg_regs->pcix_bdg_stat, NULL);
1786 				PCI_FM_SEV_INC(pcix_sec_err_tbl[i].flags);
1787 			}
1788 		}
1789 	}
1790 
1791 	/* Log/Handle ECC errors */
1792 	if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) {
1793 		int ret;
1794 
1795 		ret = pcix_ecc_error_report(dip, derr, erpt_p,
1796 		    (void *)pcix_bdg_regs);
1797 		PCI_FM_SEV_INC(ret);
1798 	}
1799 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1800 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1801 }
1802 
1803 static int
1804 pcix_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p)
1805 {
1806 	pcix_error_regs_t *pcix_regs = (pcix_error_regs_t *)erpt_p->pe_regs;
1807 	int fatal = 0;
1808 	int nonfatal = 0;
1809 	int unknown = 0;
1810 	int ok = 0;
1811 	char buf[FM_MAX_CLASS];
1812 	int i;
1813 
1814 	if (pcix_regs->pcix_vflags & PCIX_ERR_STATUS_VALID) {
1815 		for (i = 0; pcix_err_tbl[i].err_class != NULL; i++) {
1816 			if (!(pcix_regs->pcix_status & pcix_err_tbl[i].reg_bit))
1817 				continue;
1818 
1819 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
1820 			    PCIX_ERROR_SUBCLASS, pcix_err_tbl[i].err_class);
1821 			ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1822 			    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1823 			    PCIX_COMMAND, DATA_TYPE_UINT16,
1824 			    pcix_regs->pcix_command, PCIX_STATUS,
1825 			    DATA_TYPE_UINT32, pcix_regs->pcix_status,
1826 			    NULL);
1827 			PCI_FM_SEV_INC(pcix_err_tbl[i].flags);
1828 		}
1829 	}
1830 	/* Log/Handle ECC errors */
1831 	if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) {
1832 		int ret = pcix_ecc_error_report(dip, derr, erpt_p,
1833 		    (void *)pcix_regs);
1834 		PCI_FM_SEV_INC(ret);
1835 	}
1836 
1837 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1838 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1839 }
1840 
1841 static int
1842 pcie_rc_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p,
1843     void *pe_regs)
1844 {
1845 	pcie_adv_error_regs_t *pcie_adv_regs = (pcie_adv_error_regs_t *)pe_regs;
1846 	int fatal = 0;
1847 	int nonfatal = 0;
1848 	int unknown = 0;
1849 	char buf[FM_MAX_CLASS];
1850 
1851 	if (pcie_adv_regs->pcie_adv_vflags & PCIE_RC_ERR_STATUS_VALID) {
1852 		pcie_adv_rc_error_regs_t *pcie_rc_regs =
1853 		    pcie_adv_regs->pcie_adv_rc_regs;
1854 		int ce, ue, mult_ce, mult_ue, first_ue_fatal, nfe, fe;
1855 
1856 		ce = pcie_rc_regs->pcie_rc_err_status &
1857 		    PCIE_AER_RE_STS_CE_RCVD;
1858 		ue = pcie_rc_regs->pcie_rc_err_status &
1859 		    PCIE_AER_RE_STS_FE_NFE_RCVD;
1860 		mult_ce = pcie_rc_regs->pcie_rc_err_status &
1861 		    PCIE_AER_RE_STS_MUL_CE_RCVD;
1862 		mult_ue = pcie_rc_regs->pcie_rc_err_status &
1863 		    PCIE_AER_RE_STS_MUL_FE_NFE_RCVD;
1864 		first_ue_fatal = pcie_rc_regs->pcie_rc_err_status &
1865 		    PCIE_AER_RE_STS_FIRST_UC_FATAL;
1866 		nfe = pcie_rc_regs->pcie_rc_err_status &
1867 		    PCIE_AER_RE_STS_NFE_MSGS_RCVD;
1868 		fe = pcie_rc_regs->pcie_rc_err_status &
1869 		    PCIE_AER_RE_STS_FE_MSGS_RCVD;
1870 		/*
1871 		 * log fatal/nonfatal/corrected messages
1872 		 * recieved by root complex
1873 		 */
1874 		if (ue && fe)
1875 			fatal++;
1876 
1877 		if (fe && first_ue_fatal) {
1878 			(void) snprintf(buf, FM_MAX_CLASS,
1879 			    "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_FE_MSG);
1880 			pcie_ereport_post(dip, derr, erpt_p, buf,
1881 			    PCIEX_TYPE_RC_UE_MSG);
1882 		}
1883 		if (nfe && !first_ue_fatal) {
1884 			(void) snprintf(buf, FM_MAX_CLASS,
1885 			    "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_NFE_MSG);
1886 			pcie_ereport_post(dip, derr, erpt_p, buf,
1887 			    PCIEX_TYPE_RC_UE_MSG);
1888 		}
1889 		if (ce) {
1890 			(void) snprintf(buf, FM_MAX_CLASS,
1891 			    "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_CE_MSG);
1892 			pcie_ereport_post(dip, derr, erpt_p, buf,
1893 			    PCIEX_TYPE_RC_CE_MSG);
1894 		}
1895 		if (mult_ce) {
1896 			(void) snprintf(buf, FM_MAX_CLASS,
1897 			    "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_MCE_MSG);
1898 			pcie_ereport_post(dip, derr, erpt_p, buf,
1899 			    PCIEX_TYPE_RC_MULT_MSG);
1900 		}
1901 		if (mult_ue) {
1902 			(void) snprintf(buf, FM_MAX_CLASS,
1903 			    "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_MUE_MSG);
1904 			pcie_ereport_post(dip, derr, erpt_p, buf,
1905 			    PCIEX_TYPE_RC_MULT_MSG);
1906 		}
1907 	}
1908 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1909 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1910 }
1911 
1912 static int
1913 pcie_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p)
1914 {
1915 	int fatal = 0;
1916 	int nonfatal = 0;
1917 	int unknown = 0;
1918 	int ok = 0;
1919 	char buf[FM_MAX_CLASS];
1920 	int i;
1921 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
1922 	pcie_adv_error_regs_t *pcie_adv_regs;
1923 	pcie_adv_bdg_error_regs_t *pcie_bdg_regs;
1924 
1925 	if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV) &&
1926 	    (erpt_p->pe_dflags & PCIX_DEV)) {
1927 		int ret = pcix_bdg_error_report(dip, derr, erpt_p,
1928 		    (void *)pcie_regs->pcix_bdg_regs);
1929 		PCI_FM_SEV_INC(ret);
1930 	}
1931 
1932 	if (!(erpt_p->pe_dflags & PCIEX_ADV_DEV)) {
1933 		if (!(pcie_regs->pcie_vflags & PCIE_ERR_STATUS_VALID))
1934 			goto done;
1935 		for (i = 0; pciex_nadv_err_tbl[i].err_class != NULL; i++) {
1936 			if (!(pcie_regs->pcie_err_status &
1937 			    pciex_nadv_err_tbl[i].reg_bit))
1938 				continue;
1939 
1940 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
1941 			    PCIEX_ERROR_SUBCLASS,
1942 			    pciex_nadv_err_tbl[i].err_class);
1943 			pcie_ereport_post(dip, derr, erpt_p, buf,
1944 			    PCIEX_TYPE_GEN);
1945 			PCI_FM_SEV_INC(pciex_nadv_err_tbl[i].flags);
1946 		}
1947 		goto done;
1948 	}
1949 
1950 	pcie_adv_regs = pcie_regs->pcie_adv_regs;
1951 
1952 	/*
1953 	 * Log PCI Express uncorrectable errors
1954 	 */
1955 	if (pcie_adv_regs->pcie_adv_vflags & PCIE_UE_STATUS_VALID) {
1956 		for (i = 0; pciex_ue_err_tbl[i].err_class != NULL; i++) {
1957 			if (!(pcie_adv_regs->pcie_ue_status &
1958 			    pciex_ue_err_tbl[i].reg_bit))
1959 				continue;
1960 
1961 			(void) snprintf(buf, FM_MAX_CLASS,
1962 			    "%s.%s", PCIEX_ERROR_SUBCLASS,
1963 			    pciex_ue_err_tbl[i].err_class);
1964 
1965 			pcie_adv_regs->pcie_adv_bdf = 0;
1966 			if ((pcie_adv_regs->pcie_ue_status &
1967 			    pcie_aer_uce_log_bits) !=
1968 			    pciex_ue_err_tbl[i].reg_bit) {
1969 				PCI_FM_SEV_INC(pciex_ue_err_tbl[i].flags);
1970 				pcie_ereport_post(dip, derr, erpt_p, buf,
1971 				    PCIEX_TYPE_UE);
1972 			} else {
1973 				pcie_check_addr(dip, derr, erpt_p);
1974 				/*
1975 				 * fatal/ok errors are fatal/ok
1976 				 * regardless of if we find a handle
1977 				 */
1978 				if (pciex_ue_err_tbl[i].flags == DDI_FM_FATAL)
1979 					derr->fme_status = DDI_FM_FATAL;
1980 				else if (pciex_ue_err_tbl[i].flags == DDI_FM_OK)
1981 					derr->fme_status = DDI_FM_OK;
1982 				pcie_ereport_post(dip, derr, erpt_p, buf,
1983 				    PCIEX_TYPE_UE);
1984 				PCI_FM_SEV_INC(derr->fme_status);
1985 			}
1986 		}
1987 	}
1988 
1989 	/*
1990 	 * Log PCI Express correctable errors
1991 	 */
1992 	if (pcie_adv_regs->pcie_adv_vflags & PCIE_CE_STATUS_VALID) {
1993 		for (i = 0; pciex_ce_err_tbl[i].err_class != NULL; i++) {
1994 			if (!(pcie_adv_regs->pcie_ce_status &
1995 			    pciex_ce_err_tbl[i].reg_bit))
1996 				continue;
1997 
1998 			(void) snprintf(buf, FM_MAX_CLASS,
1999 			    "%s.%s", PCIEX_ERROR_SUBCLASS,
2000 			    pciex_ce_err_tbl[i].err_class);
2001 			pcie_ereport_post(dip, derr, erpt_p, buf,
2002 			    PCIEX_TYPE_CE);
2003 		}
2004 	}
2005 
2006 	if (!(erpt_p->pe_dflags & PCI_BRIDGE_DEV))
2007 		goto done;
2008 
2009 	if (erpt_p->pe_dflags & PCIEX_RC_DEV) {
2010 		int ret = pcie_rc_error_report(dip, derr, erpt_p,
2011 		    (void *)pcie_adv_regs);
2012 		PCI_FM_SEV_INC(ret);
2013 	}
2014 
2015 	if (!((erpt_p->pe_dflags & PCIEX_2PCI_DEV) &&
2016 	    (pcie_adv_regs->pcie_adv_vflags & PCIE_SUE_STATUS_VALID)))
2017 		goto done;
2018 
2019 	pcie_bdg_regs = pcie_adv_regs->pcie_adv_bdg_regs;
2020 
2021 	for (i = 0; pcie_sue_err_tbl[i].err_class != NULL; i++) {
2022 		if ((pcie_bdg_regs->pcie_sue_status &
2023 		    pcie_sue_err_tbl[i].reg_bit)) {
2024 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
2025 			    PCIEX_ERROR_SUBCLASS,
2026 			    pcie_sue_err_tbl[i].err_class);
2027 
2028 			if ((pcie_bdg_regs->pcie_sue_status &
2029 			    pcie_aer_suce_log_bits) !=
2030 			    pcie_sue_err_tbl[i].reg_bit) {
2031 				PCI_FM_SEV_INC(pcie_sue_err_tbl[i].flags);
2032 				ddi_fm_ereport_post(dip, buf, derr->fme_ena,
2033 				    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
2034 				    PCIEX_SEC_UE_STATUS, DATA_TYPE_UINT32,
2035 				    pcie_bdg_regs->pcie_sue_status,
2036 #ifdef DEBUG
2037 				    PCIEX_SUE_HDR0, DATA_TYPE_UINT32,
2038 				    pcie_bdg_regs->pcie_sue_hdr0,
2039 				    PCIEX_SUE_HDR1, DATA_TYPE_UINT32,
2040 				    pcie_bdg_regs->pcie_sue_hdr[0],
2041 				    PCIEX_SUE_HDR2, DATA_TYPE_UINT32,
2042 				    pcie_bdg_regs->pcie_sue_hdr[1],
2043 				    PCIEX_SUE_HDR3, DATA_TYPE_UINT32,
2044 				    pcie_bdg_regs->pcie_sue_hdr[2],
2045 #endif
2046 				    NULL);
2047 			} else {
2048 				pcie_adv_regs->pcie_adv_bdf = 0;
2049 				pcie_pci_check_addr(dip, derr, erpt_p);
2050 				/*
2051 				 * fatal/nonfatal errors are fatal/nonfatal
2052 				 * regardless of if we find a handle
2053 				 */
2054 				if (pcie_sue_err_tbl[i].flags == DDI_FM_FATAL)
2055 					derr->fme_status = DDI_FM_FATAL;
2056 				else if (pcie_sue_err_tbl[i].flags ==
2057 				    DDI_FM_NONFATAL)
2058 					derr->fme_status = DDI_FM_NONFATAL;
2059 
2060 				ddi_fm_ereport_post(dip, buf, derr->fme_ena,
2061 				    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
2062 				    PCIEX_SEC_UE_STATUS, DATA_TYPE_UINT32,
2063 				    pcie_bdg_regs->pcie_sue_status,
2064 				    PCIEX_SRC_ID, DATA_TYPE_UINT16,
2065 				    pcie_adv_regs->pcie_adv_bdf,
2066 				    PCIEX_SRC_VALID, DATA_TYPE_BOOLEAN_VALUE,
2067 				    (pcie_adv_regs->pcie_adv_bdf != NULL) ?
2068 				    1 : NULL,
2069 #ifdef DEBUG
2070 				    PCIEX_SUE_HDR0, DATA_TYPE_UINT32,
2071 				    pcie_bdg_regs->pcie_sue_hdr0,
2072 				    PCIEX_SUE_HDR1, DATA_TYPE_UINT32,
2073 				    pcie_bdg_regs->pcie_sue_hdr[0],
2074 				    PCIEX_SUE_HDR2, DATA_TYPE_UINT32,
2075 				    pcie_bdg_regs->pcie_sue_hdr[1],
2076 				    PCIEX_SUE_HDR3, DATA_TYPE_UINT32,
2077 				    pcie_bdg_regs->pcie_sue_hdr[2],
2078 #endif
2079 				    NULL);
2080 				PCI_FM_SEV_INC(derr->fme_status);
2081 			}
2082 		}
2083 	}
2084 done:
2085 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
2086 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
2087 }
2088 
2089 static void
2090 pci_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p)
2091 {
2092 	int fatal = 0;
2093 	int nonfatal = 0;
2094 	int unknown = 0;
2095 	int ok = 0;
2096 	char buf[FM_MAX_CLASS];
2097 	int i;
2098 
2099 	if (derr->fme_flag == DDI_FM_ERR_UNEXPECTED) {
2100 		/*
2101 		 * Log generic PCI errors.
2102 		 */
2103 		for (i = 0; pci_err_tbl[i].err_class != NULL; i++) {
2104 			if (!(erpt_p->pe_pci_regs->pci_err_status &
2105 			    pci_err_tbl[i].reg_bit) ||
2106 			    !(erpt_p->pe_pci_regs->pci_vflags &
2107 			    PCI_ERR_STATUS_VALID))
2108 				continue;
2109 			/*
2110 			 * Generate an ereport for this error bit.
2111 			 */
2112 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
2113 			    PCI_ERROR_SUBCLASS, pci_err_tbl[i].err_class);
2114 			ddi_fm_ereport_post(dip, buf, derr->fme_ena,
2115 			    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
2116 			    PCI_CONFIG_STATUS, DATA_TYPE_UINT16,
2117 			    erpt_p->pe_pci_regs->pci_err_status,
2118 			    PCI_CONFIG_COMMAND, DATA_TYPE_UINT16,
2119 			    erpt_p->pe_pci_regs->pci_cfg_comm, NULL);
2120 
2121 			/*
2122 			 * The meaning of SERR is different for PCIEX (just
2123 			 * implies a message has been sent) so we don't want to
2124 			 * treat that one as fatal.
2125 			 */
2126 			if ((erpt_p->pe_dflags & PCIEX_DEV) &&
2127 			    pci_err_tbl[i].reg_bit == PCI_STAT_S_SYSERR) {
2128 				unknown++;
2129 			} else {
2130 				PCI_FM_SEV_INC(pci_err_tbl[i].flags);
2131 			}
2132 		}
2133 		if (erpt_p->pe_dflags & PCIEX_DEV) {
2134 			int ret = pcie_error_report(dip, derr, erpt_p);
2135 			PCI_FM_SEV_INC(ret);
2136 		} else if (erpt_p->pe_dflags & PCIX_DEV) {
2137 			if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
2138 				int ret = pcix_bdg_error_report(dip, derr,
2139 				    erpt_p, erpt_p->pe_regs);
2140 				PCI_FM_SEV_INC(ret);
2141 			} else {
2142 				int ret = pcix_error_report(dip, derr, erpt_p);
2143 				PCI_FM_SEV_INC(ret);
2144 			}
2145 		}
2146 	}
2147 
2148 	if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV)) {
2149 		int ret = pci_bdg_error_report(dip, derr, erpt_p);
2150 		PCI_FM_SEV_INC(ret);
2151 	}
2152 
2153 	derr->fme_status = (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
2154 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
2155 }
2156 
2157 void
2158 pci_ereport_post(dev_info_t *dip, ddi_fm_error_t *derr, uint16_t *xx_status)
2159 {
2160 	struct i_ddi_fmhdl *fmhdl;
2161 	pci_erpt_t *erpt_p;
2162 
2163 	fmhdl = DEVI(dip)->devi_fmhdl;
2164 	if (!DDI_FM_EREPORT_CAP(ddi_fm_capable(dip)) &&
2165 	    !DDI_FM_ERRCB_CAP(ddi_fm_capable(dip))) {
2166 		i_ddi_drv_ereport_post(dip, DVR_EFMCAP, NULL, DDI_NOSLEEP);
2167 		return;
2168 	}
2169 
2170 	ASSERT(fmhdl);
2171 
2172 	if (derr->fme_ena == NULL)
2173 		derr->fme_ena = fm_ena_generate(0, FM_ENA_FMT1);
2174 
2175 	erpt_p = (pci_erpt_t *)fmhdl->fh_bus_specific;
2176 	if (erpt_p == NULL)
2177 		return;
2178 
2179 	pci_regs_gather(dip, erpt_p);
2180 	pci_error_report(dip, derr, erpt_p);
2181 	pci_regs_clear(erpt_p);
2182 
2183 	if (xx_status != NULL)
2184 		*xx_status = erpt_p->pe_pci_regs->pci_err_status;
2185 }
2186 
2187 /*
2188  * private version of walk_devs() that can be used during panic. No
2189  * sleeping or locking required.
2190  */
2191 static int
2192 pci_fm_walk_devs(dev_info_t *dip, int (*f)(dev_info_t *, void *), void *arg)
2193 {
2194 	while (dip) {
2195 		switch ((*f)(dip, arg)) {
2196 		case DDI_WALK_TERMINATE:
2197 			return (DDI_WALK_TERMINATE);
2198 		case DDI_WALK_CONTINUE:
2199 			if (pci_fm_walk_devs(ddi_get_child(dip), f,
2200 			    arg) == DDI_WALK_TERMINATE)
2201 				return (DDI_WALK_TERMINATE);
2202 			break;
2203 		case DDI_WALK_PRUNECHILD:
2204 			break;
2205 		}
2206 		dip = ddi_get_next_sibling(dip);
2207 	}
2208 	return (DDI_WALK_CONTINUE);
2209 }
2210 
2211 /*
2212  * need special version of ddi_fm_ereport_post() as the leaf driver may
2213  * not be hardened.
2214  */
2215 static void
2216 pci_fm_ereport_post(dev_info_t *dip, const char *error_class, uint64_t ena,
2217     uint8_t version, ...)
2218 {
2219 	char *name;
2220 	char device_path[MAXPATHLEN];
2221 	char ddi_error_class[FM_MAX_CLASS];
2222 	nvlist_t *ereport, *detector;
2223 	nv_alloc_t *nva;
2224 	errorq_elem_t *eqep;
2225 	va_list ap;
2226 
2227 	if (panicstr) {
2228 		eqep = errorq_reserve(ereport_errorq);
2229 		if (eqep == NULL)
2230 			return;
2231 		ereport = errorq_elem_nvl(ereport_errorq, eqep);
2232 		nva = errorq_elem_nva(ereport_errorq, eqep);
2233 		detector = fm_nvlist_create(nva);
2234 	} else {
2235 		ereport = fm_nvlist_create(NULL);
2236 		detector = fm_nvlist_create(NULL);
2237 	}
2238 
2239 	(void) ddi_pathname(dip, device_path);
2240 	fm_fmri_dev_set(detector, FM_DEV_SCHEME_VERSION, NULL,
2241 	    device_path, NULL);
2242 	(void) snprintf(ddi_error_class, FM_MAX_CLASS, "%s.%s",
2243 	    DDI_IO_CLASS, error_class);
2244 	fm_ereport_set(ereport, version, ddi_error_class, ena, detector, NULL);
2245 
2246 	va_start(ap, version);
2247 	name = va_arg(ap, char *);
2248 	(void) i_fm_payload_set(ereport, name, ap);
2249 	va_end(ap);
2250 
2251 	if (panicstr) {
2252 		errorq_commit(ereport_errorq, eqep, ERRORQ_SYNC);
2253 	} else {
2254 		(void) fm_ereport_post(ereport, EVCH_TRYHARD);
2255 		fm_nvlist_destroy(ereport, FM_NVA_FREE);
2256 		fm_nvlist_destroy(detector, FM_NVA_FREE);
2257 	}
2258 }
2259 
2260 static int
2261 pci_check_regs(dev_info_t *dip, void *arg)
2262 {
2263 	int reglen;
2264 	int rn;
2265 	int totreg;
2266 	pci_regspec_t *drv_regp;
2267 	pci_target_err_t *tgt_err = (pci_target_err_t *)arg;
2268 
2269 	if (tgt_err->tgt_pci_space == PCI_REG_ADDR_G(PCI_ADDR_CONFIG)) {
2270 		/*
2271 		 * for config space, we need to check if the given address
2272 		 * is a valid config space address for this device - based
2273 		 * on pci_phys_hi of the config space entry in reg property.
2274 		 */
2275 		if (ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
2276 		    "reg", (caddr_t)&drv_regp, &reglen) != DDI_SUCCESS)
2277 			return (DDI_WALK_CONTINUE);
2278 
2279 		totreg = reglen / sizeof (pci_regspec_t);
2280 		for (rn = 0; rn < totreg; rn++) {
2281 			if (tgt_err->tgt_pci_space ==
2282 			    PCI_REG_ADDR_G(drv_regp[rn].pci_phys_hi) &&
2283 			    (tgt_err->tgt_pci_addr & (PCI_REG_BUS_M |
2284 			    PCI_REG_DEV_M | PCI_REG_FUNC_M)) ==
2285 			    (drv_regp[rn].pci_phys_hi & (PCI_REG_BUS_M |
2286 			    PCI_REG_DEV_M | PCI_REG_FUNC_M))) {
2287 				tgt_err->tgt_dip = dip;
2288 				kmem_free(drv_regp, reglen);
2289 				return (DDI_WALK_TERMINATE);
2290 			}
2291 		}
2292 		kmem_free(drv_regp, reglen);
2293 	} else {
2294 		/*
2295 		 * for non config space, need to check reg to look
2296 		 * for any non-relocable mapping, otherwise check
2297 		 * assigned-addresses.
2298 		 */
2299 		if (ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
2300 		    "reg", (caddr_t)&drv_regp, &reglen) != DDI_SUCCESS)
2301 			return (DDI_WALK_CONTINUE);
2302 
2303 		totreg = reglen / sizeof (pci_regspec_t);
2304 		for (rn = 0; rn < totreg; rn++) {
2305 			if ((drv_regp[rn].pci_phys_hi & PCI_RELOCAT_B) &&
2306 			    (tgt_err->tgt_pci_space == TGT_PCI_SPACE_UNKNOWN ||
2307 			    tgt_err->tgt_pci_space ==
2308 			    PCI_REG_ADDR_G(drv_regp[rn].pci_phys_hi)) &&
2309 			    (tgt_err->tgt_pci_addr >=
2310 			    (uint64_t)drv_regp[rn].pci_phys_low +
2311 			    ((uint64_t)drv_regp[rn].pci_phys_mid << 32)) &&
2312 			    (tgt_err->tgt_pci_addr <
2313 			    (uint64_t)drv_regp[rn].pci_phys_low +
2314 			    ((uint64_t)drv_regp[rn].pci_phys_mid << 32) +
2315 			    (uint64_t)drv_regp[rn].pci_size_low +
2316 			    ((uint64_t)drv_regp[rn].pci_size_hi << 32))) {
2317 				tgt_err->tgt_dip = dip;
2318 				kmem_free(drv_regp, reglen);
2319 				return (DDI_WALK_TERMINATE);
2320 			}
2321 		}
2322 		kmem_free(drv_regp, reglen);
2323 
2324 		if (ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
2325 		    "assigned-addresses", (caddr_t)&drv_regp, &reglen) !=
2326 		    DDI_SUCCESS)
2327 			return (DDI_WALK_CONTINUE);
2328 
2329 		totreg = reglen / sizeof (pci_regspec_t);
2330 		for (rn = 0; rn < totreg; rn++) {
2331 			if ((tgt_err->tgt_pci_space == TGT_PCI_SPACE_UNKNOWN ||
2332 			    tgt_err->tgt_pci_space ==
2333 			    PCI_REG_ADDR_G(drv_regp[rn].pci_phys_hi)) &&
2334 			    (tgt_err->tgt_pci_addr >=
2335 			    (uint64_t)drv_regp[rn].pci_phys_low +
2336 			    ((uint64_t)drv_regp[rn].pci_phys_mid << 32)) &&
2337 			    (tgt_err->tgt_pci_addr <
2338 			    (uint64_t)drv_regp[rn].pci_phys_low +
2339 			    ((uint64_t)drv_regp[rn].pci_phys_mid << 32) +
2340 			    (uint64_t)drv_regp[rn].pci_size_low +
2341 			    ((uint64_t)drv_regp[rn].pci_size_hi << 32))) {
2342 				tgt_err->tgt_dip = dip;
2343 				kmem_free(drv_regp, reglen);
2344 				return (DDI_WALK_TERMINATE);
2345 			}
2346 		}
2347 		kmem_free(drv_regp, reglen);
2348 	}
2349 	return (DDI_WALK_CONTINUE);
2350 }
2351 
2352 /*
2353  * impl_fix_ranges - fixes the config space entry of the "ranges"
2354  * property on psycho+ platforms.  (if changing this function please make sure
2355  * to change the pci_fix_ranges function in pcipsy.c)
2356  */
2357 /*ARGSUSED*/
2358 static void
2359 pci_fix_ranges(dev_info_t *dip, pci_ranges_t *pci_ranges, int nrange)
2360 {
2361 #if defined(__sparc)
2362 	char *name = ddi_binding_name(dip);
2363 
2364 	if ((strcmp(name, "pci108e,8000") == 0) ||
2365 	    (strcmp(name, "pci108e,a000") == 0) ||
2366 	    (strcmp(name, "pci108e,a001") == 0)) {
2367 		int i;
2368 		for (i = 0; i < nrange; i++, pci_ranges++)
2369 			if ((pci_ranges->child_high & PCI_REG_ADDR_M) ==
2370 			    PCI_ADDR_CONFIG)
2371 				pci_ranges->parent_low |=
2372 				    pci_ranges->child_high;
2373 	}
2374 #endif
2375 }
2376 
2377 static int
2378 pci_check_ranges(dev_info_t *dip, void *arg)
2379 {
2380 	uint64_t range_parent_begin;
2381 	uint64_t range_parent_size;
2382 	uint64_t range_parent_end;
2383 	uint32_t space_type;
2384 	uint32_t bus_num;
2385 	uint32_t range_offset;
2386 	pci_ranges_t *pci_ranges, *rangep;
2387 	pci_bus_range_t *pci_bus_rangep;
2388 	int pci_ranges_length;
2389 	int nrange;
2390 	pci_target_err_t *tgt_err = (pci_target_err_t *)arg;
2391 	int i, size;
2392 	if (strcmp(ddi_node_name(dip), "pci") != 0 &&
2393 	    strcmp(ddi_node_name(dip), "pciex") != 0)
2394 		return (DDI_WALK_CONTINUE);
2395 
2396 	/*
2397 	 * Get the ranges property. Note we only look at the top level pci
2398 	 * node (hostbridge) which has a ranges property of type pci_ranges_t
2399 	 * not at pci-pci bridges.
2400 	 */
2401 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "ranges",
2402 	    (caddr_t)&pci_ranges, &pci_ranges_length) != DDI_SUCCESS) {
2403 		/*
2404 		 * no ranges property - no translation needed
2405 		 */
2406 		tgt_err->tgt_pci_addr = tgt_err->tgt_err_addr;
2407 		tgt_err->tgt_pci_space = TGT_PCI_SPACE_UNKNOWN;
2408 		if (panicstr)
2409 			(void) pci_fm_walk_devs(ddi_get_child(dip),
2410 			    pci_check_regs, (void *)tgt_err);
2411 		else {
2412 			int circ = 0;
2413 			ndi_devi_enter(dip, &circ);
2414 			ddi_walk_devs(ddi_get_child(dip), pci_check_regs,
2415 			    (void *)tgt_err);
2416 			ndi_devi_exit(dip, circ);
2417 		}
2418 		if (tgt_err->tgt_dip != NULL)
2419 			return (DDI_WALK_TERMINATE);
2420 		return (DDI_WALK_PRUNECHILD);
2421 	}
2422 	nrange = pci_ranges_length / sizeof (pci_ranges_t);
2423 	rangep = pci_ranges;
2424 
2425 	/* Need to fix the pci ranges property for psycho based systems */
2426 	pci_fix_ranges(dip, pci_ranges, nrange);
2427 
2428 	for (i = 0; i < nrange; i++, rangep++) {
2429 		range_parent_begin = ((uint64_t)rangep->parent_high << 32) +
2430 		    rangep->parent_low;
2431 		range_parent_size = ((uint64_t)rangep->size_high << 32) +
2432 		    rangep->size_low;
2433 		range_parent_end = range_parent_begin + range_parent_size - 1;
2434 
2435 		if ((tgt_err->tgt_err_addr < range_parent_begin) ||
2436 		    (tgt_err->tgt_err_addr > range_parent_end)) {
2437 			/* Not in range */
2438 			continue;
2439 		}
2440 		space_type = PCI_REG_ADDR_G(rangep->child_high);
2441 		if (space_type == PCI_REG_ADDR_G(PCI_ADDR_CONFIG)) {
2442 			/* Config space address - check bus range */
2443 			range_offset = tgt_err->tgt_err_addr -
2444 			    range_parent_begin;
2445 			bus_num = PCI_REG_BUS_G(range_offset);
2446 			if (ddi_getlongprop(DDI_DEV_T_ANY, dip,
2447 			    DDI_PROP_DONTPASS, "bus-range",
2448 			    (caddr_t)&pci_bus_rangep, &size) != DDI_SUCCESS) {
2449 				continue;
2450 			}
2451 			if ((bus_num < pci_bus_rangep->lo) ||
2452 			    (bus_num > pci_bus_rangep->hi)) {
2453 				/*
2454 				 * Bus number not appropriate for this
2455 				 * pci nexus.
2456 				 */
2457 				kmem_free(pci_bus_rangep, size);
2458 				continue;
2459 			}
2460 			kmem_free(pci_bus_rangep, size);
2461 		}
2462 
2463 		/* We have a match if we get here - compute pci address */
2464 		tgt_err->tgt_pci_addr = tgt_err->tgt_err_addr -
2465 		    range_parent_begin;
2466 		tgt_err->tgt_pci_addr += (((uint64_t)rangep->child_mid << 32) +
2467 		    rangep->child_low);
2468 		tgt_err->tgt_pci_space = space_type;
2469 		if (panicstr)
2470 			(void) pci_fm_walk_devs(ddi_get_child(dip),
2471 			    pci_check_regs, (void *)tgt_err);
2472 		else {
2473 			int circ = 0;
2474 			ndi_devi_enter(dip, &circ);
2475 			ddi_walk_devs(ddi_get_child(dip), pci_check_regs,
2476 			    (void *)tgt_err);
2477 			ndi_devi_exit(dip, circ);
2478 		}
2479 		if (tgt_err->tgt_dip != NULL) {
2480 			kmem_free(pci_ranges, pci_ranges_length);
2481 			return (DDI_WALK_TERMINATE);
2482 		}
2483 	}
2484 	kmem_free(pci_ranges, pci_ranges_length);
2485 	return (DDI_WALK_PRUNECHILD);
2486 }
2487 
2488 /*
2489  * Function used to drain pci_target_queue, either during panic or after softint
2490  * is generated, to generate target device ereports based on captured physical
2491  * addresses
2492  */
2493 /*ARGSUSED*/
2494 static void
2495 pci_target_drain(void *private_p, pci_target_err_t *tgt_err)
2496 {
2497 	char buf[FM_MAX_CLASS];
2498 
2499 	/*
2500 	 * The following assumes that all pci_pci bridge devices
2501 	 * are configured as transparant. Find the top-level pci
2502 	 * nexus which has tgt_err_addr in one of its ranges, converting this
2503 	 * to a pci address in the process. Then starting at this node do
2504 	 * another tree walk to find a device with the pci address we've
2505 	 * found within range of one of it's assigned-addresses properties.
2506 	 */
2507 	tgt_err->tgt_dip = NULL;
2508 	if (panicstr)
2509 		(void) pci_fm_walk_devs(ddi_root_node(), pci_check_ranges,
2510 		    (void *)tgt_err);
2511 	else
2512 		ddi_walk_devs(ddi_root_node(), pci_check_ranges,
2513 		    (void *)tgt_err);
2514 	if (tgt_err->tgt_dip == NULL)
2515 		return;
2516 
2517 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", tgt_err->tgt_bridge_type,
2518 	    tgt_err->tgt_err_class);
2519 	pci_fm_ereport_post(tgt_err->tgt_dip, buf, tgt_err->tgt_err_ena, 0,
2520 	    PCI_PA, DATA_TYPE_UINT64, tgt_err->tgt_err_addr, NULL);
2521 }
2522 
2523 void
2524 pci_target_enqueue(uint64_t ena, char *class, char *bridge_type, uint64_t addr)
2525 {
2526 	pci_target_err_t tgt_err;
2527 
2528 	tgt_err.tgt_err_ena = ena;
2529 	tgt_err.tgt_err_class = class;
2530 	tgt_err.tgt_bridge_type = bridge_type;
2531 	tgt_err.tgt_err_addr = addr;
2532 	errorq_dispatch(pci_target_queue, (void *)&tgt_err,
2533 	    sizeof (pci_target_err_t), ERRORQ_ASYNC);
2534 }
2535 
2536 void
2537 pci_targetq_init(void)
2538 {
2539 	/*
2540 	 * PCI target errorq, to schedule async handling of generation of
2541 	 * target device ereports based on captured physical address.
2542 	 * The errorq is created here but destroyed when _fini is called
2543 	 * for the pci module.
2544 	 */
2545 	if (pci_target_queue == NULL) {
2546 		pci_target_queue = errorq_create("pci_target_queue",
2547 		    (errorq_func_t)pci_target_drain, (void *)NULL,
2548 		    TARGET_MAX_ERRS, sizeof (pci_target_err_t), FM_ERR_PIL,
2549 		    ERRORQ_VITAL);
2550 		if (pci_target_queue == NULL)
2551 			panic("failed to create required system error queue");
2552 	}
2553 }
2554