xref: /titanic_51/usr/src/uts/common/os/pcifm.c (revision 024b0a258461f282a92b1b1283c3b8b083f9f33f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/sunndi.h>
31 #include <sys/sysmacros.h>
32 #include <sys/ddifm_impl.h>
33 #include <sys/fm/util.h>
34 #include <sys/fm/protocol.h>
35 #include <sys/fm/io/pci.h>
36 #include <sys/fm/io/ddi.h>
37 #include <sys/pci.h>
38 #include <sys/pcie.h>
39 #include <sys/pci_impl.h>
40 #include <sys/epm.h>
41 #include <sys/pcifm.h>
42 
43 #define	PCIX_ECC_VER_CHECK(x)	(((x) == PCI_PCIX_VER_1) ||\
44 				((x) == PCI_PCIX_VER_2))
45 
46 /*
47  * Expected PCI Express error mask values
48  */
49 uint32_t pcie_expected_ce_mask = 0x0;
50 uint32_t pcie_expected_ue_mask = PCIE_AER_UCE_UC;
51 #if defined(__sparc)
52 uint32_t pcie_expected_sue_mask = 0x0;
53 #else
54 uint32_t pcie_expected_sue_mask = PCIE_AER_SUCE_RCVD_MA;
55 #endif
56 uint32_t pcie_aer_uce_log_bits = PCIE_AER_UCE_LOG_BITS;
57 #if defined(__sparc)
58 uint32_t pcie_aer_suce_log_bits = PCIE_AER_SUCE_LOG_BITS;
59 #else
60 uint32_t pcie_aer_suce_log_bits = \
61 	    PCIE_AER_SUCE_LOG_BITS & ~PCIE_AER_SUCE_RCVD_MA;
62 #endif
63 
64 errorq_t *pci_target_queue = NULL;
65 
66 pci_fm_err_t pci_err_tbl[] = {
67 	PCI_DET_PERR,	PCI_STAT_PERROR,	NULL,		DDI_FM_UNKNOWN,
68 	PCI_MDPE,	PCI_STAT_S_PERROR,	PCI_TARG_MDPE,	DDI_FM_UNKNOWN,
69 	PCI_SIG_SERR,	PCI_STAT_S_SYSERR,	NULL,		DDI_FM_FATAL,
70 	PCI_MA,		PCI_STAT_R_MAST_AB,	PCI_TARG_MA,	DDI_FM_UNKNOWN,
71 	PCI_REC_TA,	PCI_STAT_R_TARG_AB,	PCI_TARG_REC_TA, DDI_FM_UNKNOWN,
72 	PCI_SIG_TA,	PCI_STAT_S_TARG_AB,	NULL,		DDI_FM_UNKNOWN,
73 	NULL, NULL, NULL, NULL,
74 };
75 
76 pci_fm_err_t pci_bdg_err_tbl[] = {
77 	PCI_DET_PERR,	PCI_STAT_PERROR,	NULL,		DDI_FM_UNKNOWN,
78 	PCI_MDPE,	PCI_STAT_S_PERROR,	PCI_TARG_MDPE,	DDI_FM_UNKNOWN,
79 	PCI_REC_SERR,	PCI_STAT_S_SYSERR,	NULL,		DDI_FM_UNKNOWN,
80 #if defined(__sparc)
81 	PCI_MA,		PCI_STAT_R_MAST_AB,	PCI_TARG_MA,	DDI_FM_UNKNOWN,
82 #endif
83 	PCI_REC_TA,	PCI_STAT_R_TARG_AB,	PCI_TARG_REC_TA, DDI_FM_UNKNOWN,
84 	PCI_SIG_TA,	PCI_STAT_S_TARG_AB,	NULL,		DDI_FM_UNKNOWN,
85 	NULL, NULL, NULL, NULL,
86 };
87 
88 static pci_fm_err_t pciex_ce_err_tbl[] = {
89 	PCIEX_RE,	PCIE_AER_CE_RECEIVER_ERR,	NULL,	DDI_FM_OK,
90 	PCIEX_RNR,	PCIE_AER_CE_REPLAY_ROLLOVER,	NULL,	DDI_FM_OK,
91 	PCIEX_RTO,	PCIE_AER_CE_REPLAY_TO,		NULL,	DDI_FM_OK,
92 	PCIEX_BDP,	PCIE_AER_CE_BAD_DLLP,		NULL,	DDI_FM_OK,
93 	PCIEX_BTP,	PCIE_AER_CE_BAD_TLP,		NULL,	DDI_FM_OK,
94 	PCIEX_ANFE,	PCIE_AER_CE_AD_NFE,		NULL,	DDI_FM_OK,
95 	NULL, NULL, NULL, NULL,
96 };
97 
98 static pci_fm_err_t pciex_ue_err_tbl[] = {
99 	PCIEX_TE,	PCIE_AER_UCE_TRAINING,		NULL,	DDI_FM_FATAL,
100 	PCIEX_DLP,	PCIE_AER_UCE_DLP,		NULL,	DDI_FM_FATAL,
101 	PCIEX_SD,	PCIE_AER_UCE_SD,		NULL,   DDI_FM_FATAL,
102 	PCIEX_ROF,	PCIE_AER_UCE_RO,		NULL,	DDI_FM_FATAL,
103 	PCIEX_FCP,	PCIE_AER_UCE_FCP,		NULL,	DDI_FM_FATAL,
104 	PCIEX_MFP,	PCIE_AER_UCE_MTLP,		NULL,	DDI_FM_FATAL,
105 	PCIEX_CTO,	PCIE_AER_UCE_TO,		NULL,	DDI_FM_UNKNOWN,
106 	PCIEX_UC,	PCIE_AER_UCE_UC,		NULL,	DDI_FM_OK,
107 	PCIEX_ECRC,	PCIE_AER_UCE_ECRC,		NULL,	DDI_FM_UNKNOWN,
108 	PCIEX_CA,	PCIE_AER_UCE_CA,		NULL,	DDI_FM_UNKNOWN,
109 	PCIEX_UR,	PCIE_AER_UCE_UR,		NULL,	DDI_FM_UNKNOWN,
110 	PCIEX_POIS,	PCIE_AER_UCE_PTLP,		NULL,	DDI_FM_UNKNOWN,
111 	NULL, NULL, NULL, NULL,
112 };
113 
114 static pci_fm_err_t pcie_sue_err_tbl[] = {
115 	PCIEX_S_TA_SC,	PCIE_AER_SUCE_TA_ON_SC,		NULL,	DDI_FM_UNKNOWN,
116 	PCIEX_S_MA_SC,	PCIE_AER_SUCE_MA_ON_SC,		NULL,	DDI_FM_UNKNOWN,
117 	PCIEX_S_RTA,	PCIE_AER_SUCE_RCVD_TA,		NULL,	DDI_FM_UNKNOWN,
118 #if defined(__sparc)
119 	PCIEX_S_RMA,	PCIE_AER_SUCE_RCVD_MA,		NULL,	DDI_FM_UNKNOWN,
120 #endif
121 	PCIEX_S_USC,	PCIE_AER_SUCE_USC_ERR,		NULL,	DDI_FM_UNKNOWN,
122 	PCIEX_S_USCMD,	PCIE_AER_SUCE_USC_MSG_DATA_ERR,	NULL,	DDI_FM_FATAL,
123 	PCIEX_S_UDE,	PCIE_AER_SUCE_UC_DATA_ERR,	NULL,	DDI_FM_UNKNOWN,
124 	PCIEX_S_UAT,	PCIE_AER_SUCE_UC_ATTR_ERR,	NULL,	DDI_FM_FATAL,
125 	PCIEX_S_UADR,	PCIE_AER_SUCE_UC_ADDR_ERR,	NULL,	DDI_FM_FATAL,
126 	PCIEX_S_TEX,	PCIE_AER_SUCE_TIMER_EXPIRED,	NULL,	DDI_FM_FATAL,
127 	PCIEX_S_PERR,	PCIE_AER_SUCE_PERR_ASSERT,	NULL,	DDI_FM_UNKNOWN,
128 	PCIEX_S_SERR,	PCIE_AER_SUCE_SERR_ASSERT,	NULL,	DDI_FM_FATAL,
129 	PCIEX_INTERR,	PCIE_AER_SUCE_INTERNAL_ERR,	NULL,	DDI_FM_FATAL,
130 	NULL, NULL, NULL, NULL,
131 };
132 
133 static pci_fm_err_t pcix_err_tbl[] = {
134 	PCIX_SPL_DIS,		PCI_PCIX_SPL_DSCD,	NULL,	DDI_FM_UNKNOWN,
135 	PCIX_UNEX_SPL,		PCI_PCIX_UNEX_SPL,	NULL,	DDI_FM_UNKNOWN,
136 	PCIX_RX_SPL_MSG,	PCI_PCIX_RX_SPL_MSG,	NULL,   DDI_FM_UNKNOWN,
137 	NULL, NULL, NULL, NULL,
138 };
139 
140 static pci_fm_err_t pcix_sec_err_tbl[] = {
141 	PCIX_SPL_DIS,		PCI_PCIX_BSS_SPL_DSCD,	NULL,	DDI_FM_UNKNOWN,
142 	PCIX_UNEX_SPL,		PCI_PCIX_BSS_UNEX_SPL,	NULL,	DDI_FM_UNKNOWN,
143 	PCIX_BSS_SPL_OR,	PCI_PCIX_BSS_SPL_OR,	NULL,	DDI_FM_OK,
144 	PCIX_BSS_SPL_DLY,	PCI_PCIX_BSS_SPL_DLY,	NULL,	DDI_FM_OK,
145 	NULL, NULL, NULL, NULL,
146 };
147 
148 static pci_fm_err_t pciex_nadv_err_tbl[] = {
149 	PCIEX_UR,	PCIE_DEVSTS_UR_DETECTED,	NULL,	DDI_FM_UNKNOWN,
150 	PCIEX_FAT,	PCIE_DEVSTS_FE_DETECTED,	NULL,	DDI_FM_FATAL,
151 	PCIEX_NONFAT,	PCIE_DEVSTS_NFE_DETECTED,	NULL,	DDI_FM_UNKNOWN,
152 	PCIEX_CORR,	PCIE_DEVSTS_CE_DETECTED,	NULL,	DDI_FM_OK,
153 	NULL, NULL, NULL, NULL,
154 };
155 
156 static int
157 pci_config_check(ddi_acc_handle_t handle, int fme_flag)
158 {
159 	ddi_acc_hdl_t *hp = impl_acc_hdl_get(handle);
160 	ddi_fm_error_t de;
161 
162 	if (!(DDI_FM_ACC_ERR_CAP(ddi_fm_capable(hp->ah_dip))))
163 		return (DDI_FM_OK);
164 
165 	de.fme_version = DDI_FME_VERSION;
166 
167 	ddi_fm_acc_err_get(handle, &de, de.fme_version);
168 	if (de.fme_status != DDI_FM_OK) {
169 		if (fme_flag == DDI_FM_ERR_UNEXPECTED) {
170 			char buf[FM_MAX_CLASS];
171 
172 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
173 			    PCI_ERROR_SUBCLASS, PCI_NR);
174 			ddi_fm_ereport_post(hp->ah_dip, buf, de.fme_ena,
175 			    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, NULL);
176 		}
177 		ddi_fm_acc_err_clear(handle, de.fme_version);
178 	}
179 	return (de.fme_status);
180 }
181 
182 static void
183 pcix_ecc_regs_gather(pci_erpt_t *erpt_p, pcix_ecc_regs_t *pcix_ecc_regs,
184     uint8_t pcix_cap_ptr, int fme_flag)
185 {
186 	int bdg = erpt_p->pe_dflags & PCI_BRIDGE_DEV;
187 
188 	pcix_ecc_regs->pcix_ecc_ctlstat = pci_config_get32(erpt_p->pe_hdl,
189 	    (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_STATUS :
190 	    PCI_PCIX_ECC_STATUS)));
191 	if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
192 		pcix_ecc_regs->pcix_ecc_vflags |= PCIX_ERR_ECC_STS_VALID;
193 	else
194 		return;
195 	pcix_ecc_regs->pcix_ecc_fstaddr = pci_config_get32(erpt_p->pe_hdl,
196 	    (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_FST_AD :
197 	    PCI_PCIX_ECC_FST_AD)));
198 	pcix_ecc_regs->pcix_ecc_secaddr = pci_config_get32(erpt_p->pe_hdl,
199 	    (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_SEC_AD :
200 	    PCI_PCIX_ECC_SEC_AD)));
201 	pcix_ecc_regs->pcix_ecc_attr = pci_config_get32((
202 	    ddi_acc_handle_t)erpt_p->pe_hdl,
203 	    (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_ATTR : PCI_PCIX_ECC_ATTR)));
204 }
205 
206 static void
207 pcix_regs_gather(pci_erpt_t *erpt_p, void *pe_regs, int fme_flag)
208 {
209 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
210 		pcix_bdg_error_regs_t *pcix_bdg_regs =
211 		    (pcix_bdg_error_regs_t *)pe_regs;
212 		uint8_t pcix_bdg_cap_ptr;
213 		int i;
214 
215 		pcix_bdg_cap_ptr = pcix_bdg_regs->pcix_bdg_cap_ptr;
216 		pcix_bdg_regs->pcix_bdg_sec_stat = pci_config_get16(
217 		    erpt_p->pe_hdl, (pcix_bdg_cap_ptr + PCI_PCIX_SEC_STATUS));
218 		if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
219 			pcix_bdg_regs->pcix_bdg_vflags |=
220 			    PCIX_BDG_SEC_STATUS_VALID;
221 		else
222 			return;
223 		pcix_bdg_regs->pcix_bdg_stat = pci_config_get32(erpt_p->pe_hdl,
224 		    (pcix_bdg_cap_ptr + PCI_PCIX_BDG_STATUS));
225 		if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
226 			pcix_bdg_regs->pcix_bdg_vflags |= PCIX_BDG_STATUS_VALID;
227 		else
228 			return;
229 		if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) {
230 			pcix_ecc_regs_t *pcix_bdg_ecc_regs;
231 			/*
232 			 * PCI Express to PCI-X bridges only implement the
233 			 * secondary side of the PCI-X ECC registers, bit one is
234 			 * read-only so we make sure we do not write to it.
235 			 */
236 			if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) {
237 				pcix_bdg_ecc_regs =
238 				    pcix_bdg_regs->pcix_bdg_ecc_regs[1];
239 				pcix_ecc_regs_gather(erpt_p, pcix_bdg_ecc_regs,
240 				    pcix_bdg_cap_ptr, fme_flag);
241 			} else {
242 				for (i = 0; i < 2; i++) {
243 					pcix_bdg_ecc_regs =
244 					    pcix_bdg_regs->pcix_bdg_ecc_regs[i];
245 					pci_config_put32(erpt_p->pe_hdl,
246 					    (pcix_bdg_cap_ptr +
247 					    PCI_PCIX_BDG_ECC_STATUS), i);
248 					pcix_ecc_regs_gather(erpt_p,
249 					    pcix_bdg_ecc_regs,
250 					    pcix_bdg_cap_ptr, fme_flag);
251 				}
252 			}
253 		}
254 	} else {
255 		pcix_error_regs_t *pcix_regs = (pcix_error_regs_t *)pe_regs;
256 		uint8_t pcix_cap_ptr;
257 
258 		pcix_cap_ptr = pcix_regs->pcix_cap_ptr;
259 
260 		pcix_regs->pcix_command = pci_config_get16(erpt_p->pe_hdl,
261 		    (pcix_cap_ptr + PCI_PCIX_COMMAND));
262 		pcix_regs->pcix_status = pci_config_get32(erpt_p->pe_hdl,
263 		    (pcix_cap_ptr + PCI_PCIX_STATUS));
264 		if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
265 			pcix_regs->pcix_vflags |= PCIX_ERR_STATUS_VALID;
266 		else
267 			return;
268 		if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) {
269 			pcix_ecc_regs_t *pcix_ecc_regs =
270 			    pcix_regs->pcix_ecc_regs;
271 
272 			pcix_ecc_regs_gather(erpt_p, pcix_ecc_regs,
273 			    pcix_cap_ptr, fme_flag);
274 		}
275 	}
276 }
277 
278 static void
279 pcie_regs_gather(pci_erpt_t *erpt_p, int fme_flag)
280 {
281 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
282 	uint8_t pcie_cap_ptr;
283 	pcie_adv_error_regs_t *pcie_adv_regs;
284 	uint16_t pcie_ecap_ptr;
285 
286 	pcie_cap_ptr = pcie_regs->pcie_cap_ptr;
287 
288 	pcie_regs->pcie_err_status = pci_config_get16(erpt_p->pe_hdl,
289 	    pcie_cap_ptr + PCIE_DEVSTS);
290 	if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
291 		pcie_regs->pcie_vflags |= PCIE_ERR_STATUS_VALID;
292 	else
293 		return;
294 
295 	pcie_regs->pcie_err_ctl = pci_config_get16(erpt_p->pe_hdl,
296 	    (pcie_cap_ptr + PCIE_DEVCTL));
297 	pcie_regs->pcie_dev_cap = pci_config_get16(erpt_p->pe_hdl,
298 	    (pcie_cap_ptr + PCIE_DEVCAP));
299 
300 	if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV) && (erpt_p->pe_dflags &
301 	    PCIX_DEV))
302 		pcix_regs_gather(erpt_p, pcie_regs->pcix_bdg_regs, fme_flag);
303 
304 	if (erpt_p->pe_dflags & PCIEX_RC_DEV) {
305 		pcie_rc_error_regs_t *pcie_rc_regs = pcie_regs->pcie_rc_regs;
306 
307 		pcie_rc_regs->pcie_rc_status = pci_config_get32(erpt_p->pe_hdl,
308 		    (pcie_cap_ptr + PCIE_ROOTSTS));
309 		pcie_rc_regs->pcie_rc_ctl = pci_config_get16(erpt_p->pe_hdl,
310 		    (pcie_cap_ptr + PCIE_ROOTCTL));
311 	}
312 
313 	if (!(erpt_p->pe_dflags & PCIEX_ADV_DEV))
314 		return;
315 
316 	pcie_adv_regs = pcie_regs->pcie_adv_regs;
317 
318 	pcie_ecap_ptr = pcie_adv_regs->pcie_adv_cap_ptr;
319 
320 	pcie_adv_regs->pcie_ue_status = pci_config_get32(erpt_p->pe_hdl,
321 	    pcie_ecap_ptr + PCIE_AER_UCE_STS);
322 	if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
323 		pcie_adv_regs->pcie_adv_vflags |= PCIE_UE_STATUS_VALID;
324 
325 	pcie_adv_regs->pcie_ue_mask = pci_config_get32(erpt_p->pe_hdl,
326 	    pcie_ecap_ptr + PCIE_AER_UCE_MASK);
327 	pcie_adv_regs->pcie_ue_sev = pci_config_get32(erpt_p->pe_hdl,
328 	    pcie_ecap_ptr + PCIE_AER_UCE_SERV);
329 	pcie_adv_regs->pcie_adv_ctl = pci_config_get32(erpt_p->pe_hdl,
330 	    pcie_ecap_ptr + PCIE_AER_CTL);
331 	pcie_adv_regs->pcie_ue_hdr0 = pci_config_get32(erpt_p->pe_hdl,
332 	    pcie_ecap_ptr + PCIE_AER_HDR_LOG);
333 	if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK) {
334 		int i;
335 		pcie_adv_regs->pcie_adv_vflags |= PCIE_UE_HDR_VALID;
336 
337 		for (i = 0; i < 3; i++) {
338 			pcie_adv_regs->pcie_ue_hdr[i] = pci_config_get32(
339 			    erpt_p->pe_hdl, pcie_ecap_ptr + PCIE_AER_HDR_LOG +
340 			    (4 * (i + 1)));
341 		}
342 	}
343 
344 	pcie_adv_regs->pcie_ce_status = pci_config_get32(erpt_p->pe_hdl,
345 	    pcie_ecap_ptr + PCIE_AER_CE_STS);
346 	if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
347 		pcie_adv_regs->pcie_adv_vflags |= PCIE_CE_STATUS_VALID;
348 
349 	pcie_adv_regs->pcie_ce_mask = pci_config_get32(erpt_p->pe_hdl,
350 	    pcie_ecap_ptr + PCIE_AER_CE_MASK);
351 
352 	/*
353 	 * If pci express to pci bridge then grab the bridge
354 	 * error registers.
355 	 */
356 	if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) {
357 		pcie_adv_bdg_error_regs_t *pcie_bdg_regs =
358 		    pcie_adv_regs->pcie_adv_bdg_regs;
359 
360 		pcie_bdg_regs->pcie_sue_status =
361 		    pci_config_get32(erpt_p->pe_hdl,
362 		    pcie_ecap_ptr + PCIE_AER_SUCE_STS);
363 		pcie_bdg_regs->pcie_sue_mask =
364 		    pci_config_get32(erpt_p->pe_hdl,
365 		    pcie_ecap_ptr + PCIE_AER_SUCE_MASK);
366 		if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
367 			pcie_adv_regs->pcie_adv_vflags |= PCIE_SUE_STATUS_VALID;
368 		pcie_bdg_regs->pcie_sue_hdr0 = pci_config_get32(erpt_p->pe_hdl,
369 		    (pcie_ecap_ptr + PCIE_AER_SHDR_LOG));
370 
371 		if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK) {
372 			int i;
373 
374 			pcie_adv_regs->pcie_adv_vflags |= PCIE_SUE_HDR_VALID;
375 
376 			for (i = 0; i < 3; i++) {
377 				pcie_bdg_regs->pcie_sue_hdr[i] =
378 				    pci_config_get32(erpt_p->pe_hdl,
379 				    pcie_ecap_ptr + PCIE_AER_SHDR_LOG +
380 				    (4 * (i + 1)));
381 			}
382 		}
383 	}
384 	/*
385 	 * If PCI Express root complex then grab the root complex
386 	 * error registers.
387 	 */
388 	if (erpt_p->pe_dflags & PCIEX_RC_DEV) {
389 		pcie_adv_rc_error_regs_t *pcie_rc_regs =
390 		    pcie_adv_regs->pcie_adv_rc_regs;
391 
392 		pcie_rc_regs->pcie_rc_err_cmd = pci_config_get32(erpt_p->pe_hdl,
393 		    (pcie_ecap_ptr + PCIE_AER_RE_CMD));
394 		pcie_rc_regs->pcie_rc_err_status =
395 		    pci_config_get32(erpt_p->pe_hdl,
396 		    (pcie_ecap_ptr + PCIE_AER_RE_STS));
397 		if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
398 			pcie_adv_regs->pcie_adv_vflags |=
399 			    PCIE_RC_ERR_STATUS_VALID;
400 		pcie_rc_regs->pcie_rc_ce_src_id =
401 		    pci_config_get16(erpt_p->pe_hdl,
402 		    (pcie_ecap_ptr + PCIE_AER_CE_SRC_ID));
403 		pcie_rc_regs->pcie_rc_ue_src_id =
404 		    pci_config_get16(erpt_p->pe_hdl,
405 		    (pcie_ecap_ptr + PCIE_AER_ERR_SRC_ID));
406 		if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
407 			pcie_adv_regs->pcie_adv_vflags |= PCIE_SRC_ID_VALID;
408 	}
409 }
410 
411 /*ARGSUSED*/
412 static void
413 pci_regs_gather(dev_info_t *dip, pci_erpt_t *erpt_p, int fme_flag)
414 {
415 	pci_error_regs_t *pci_regs = erpt_p->pe_pci_regs;
416 
417 	/*
418 	 * Start by reading all the error registers that are available for
419 	 * pci and pci express and for leaf devices and bridges/switches
420 	 */
421 	pci_regs->pci_err_status = pci_config_get16(erpt_p->pe_hdl,
422 	    PCI_CONF_STAT);
423 	if (pci_config_check(erpt_p->pe_hdl, fme_flag) != DDI_FM_OK)
424 		return;
425 	pci_regs->pci_vflags |= PCI_ERR_STATUS_VALID;
426 	pci_regs->pci_cfg_comm = pci_config_get16(erpt_p->pe_hdl,
427 	    PCI_CONF_COMM);
428 	if (pci_config_check(erpt_p->pe_hdl, fme_flag) != DDI_FM_OK)
429 		return;
430 
431 	/*
432 	 * If pci-pci bridge grab PCI bridge specific error registers.
433 	 */
434 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
435 		pci_regs->pci_bdg_regs->pci_bdg_sec_stat =
436 		    pci_config_get16(erpt_p->pe_hdl, PCI_BCNF_SEC_STATUS);
437 		if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
438 			pci_regs->pci_bdg_regs->pci_bdg_vflags |=
439 			    PCI_BDG_SEC_STAT_VALID;
440 		pci_regs->pci_bdg_regs->pci_bdg_ctrl =
441 		    pci_config_get16(erpt_p->pe_hdl, PCI_BCNF_BCNTRL);
442 		if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
443 			pci_regs->pci_bdg_regs->pci_bdg_vflags |=
444 			    PCI_BDG_CTRL_VALID;
445 	}
446 
447 	/*
448 	 * If pci express device grab pci express error registers and
449 	 * check for advanced error reporting features and grab them if
450 	 * available.
451 	 */
452 	if (erpt_p->pe_dflags & PCIEX_DEV)
453 		pcie_regs_gather(erpt_p, fme_flag);
454 	else if (erpt_p->pe_dflags & PCIX_DEV)
455 		pcix_regs_gather(erpt_p, erpt_p->pe_regs, fme_flag);
456 
457 }
458 
459 static void
460 pcix_regs_clear(pci_erpt_t *erpt_p, void *pe_regs)
461 {
462 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
463 		pcix_bdg_error_regs_t *pcix_bdg_regs =
464 		    (pcix_bdg_error_regs_t *)pe_regs;
465 		uint8_t pcix_bdg_cap_ptr;
466 		int i;
467 
468 		pcix_bdg_cap_ptr = pcix_bdg_regs->pcix_bdg_cap_ptr;
469 
470 		if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_SEC_STATUS_VALID)
471 			pci_config_put16(erpt_p->pe_hdl,
472 			    (pcix_bdg_cap_ptr + PCI_PCIX_SEC_STATUS),
473 			    pcix_bdg_regs->pcix_bdg_sec_stat);
474 
475 		if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_STATUS_VALID)
476 			pci_config_put32(erpt_p->pe_hdl,
477 			    (pcix_bdg_cap_ptr + PCI_PCIX_BDG_STATUS),
478 			    pcix_bdg_regs->pcix_bdg_stat);
479 
480 		pcix_bdg_regs->pcix_bdg_vflags = 0x0;
481 
482 		if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) {
483 			pcix_ecc_regs_t *pcix_bdg_ecc_regs;
484 			/*
485 			 * PCI Express to PCI-X bridges only implement the
486 			 * secondary side of the PCI-X ECC registers, bit one is
487 			 * read-only so we make sure we do not write to it.
488 			 */
489 			if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) {
490 				pcix_bdg_ecc_regs =
491 				    pcix_bdg_regs->pcix_bdg_ecc_regs[1];
492 
493 				if (pcix_bdg_ecc_regs->pcix_ecc_vflags &
494 				    PCIX_ERR_ECC_STS_VALID) {
495 
496 					pci_config_put32(erpt_p->pe_hdl,
497 					    (pcix_bdg_cap_ptr +
498 					    PCI_PCIX_BDG_ECC_STATUS),
499 					    pcix_bdg_ecc_regs->
500 					    pcix_ecc_ctlstat);
501 				}
502 				pcix_bdg_ecc_regs->pcix_ecc_vflags = 0x0;
503 			} else {
504 				for (i = 0; i < 2; i++) {
505 					pcix_bdg_ecc_regs =
506 					    pcix_bdg_regs->pcix_bdg_ecc_regs[i];
507 
508 
509 					if (pcix_bdg_ecc_regs->pcix_ecc_vflags &
510 					    PCIX_ERR_ECC_STS_VALID) {
511 						pci_config_put32(erpt_p->pe_hdl,
512 						    (pcix_bdg_cap_ptr +
513 						    PCI_PCIX_BDG_ECC_STATUS),
514 						    i);
515 
516 						pci_config_put32(erpt_p->pe_hdl,
517 						    (pcix_bdg_cap_ptr +
518 						    PCI_PCIX_BDG_ECC_STATUS),
519 						    pcix_bdg_ecc_regs->
520 						    pcix_ecc_ctlstat);
521 					}
522 					pcix_bdg_ecc_regs->pcix_ecc_vflags =
523 					    0x0;
524 				}
525 			}
526 		}
527 	} else {
528 		pcix_error_regs_t *pcix_regs = (pcix_error_regs_t *)pe_regs;
529 		uint8_t pcix_cap_ptr;
530 
531 		pcix_cap_ptr = pcix_regs->pcix_cap_ptr;
532 
533 		if (pcix_regs->pcix_vflags & PCIX_ERR_STATUS_VALID)
534 			pci_config_put32(erpt_p->pe_hdl,
535 			    (pcix_cap_ptr + PCI_PCIX_STATUS),
536 			    pcix_regs->pcix_status);
537 
538 		pcix_regs->pcix_vflags = 0x0;
539 
540 		if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) {
541 			pcix_ecc_regs_t *pcix_ecc_regs =
542 			    pcix_regs->pcix_ecc_regs;
543 
544 			if (pcix_ecc_regs->pcix_ecc_vflags &
545 			    PCIX_ERR_ECC_STS_VALID)
546 				pci_config_put32(erpt_p->pe_hdl,
547 				    (pcix_cap_ptr + PCI_PCIX_ECC_STATUS),
548 				    pcix_ecc_regs->pcix_ecc_ctlstat);
549 
550 			pcix_ecc_regs->pcix_ecc_vflags = 0x0;
551 		}
552 	}
553 }
554 
555 static void
556 pcie_regs_clear(pci_erpt_t *erpt_p)
557 {
558 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
559 	uint8_t pcie_cap_ptr;
560 	pcie_adv_error_regs_t *pcie_adv_regs;
561 	uint16_t pcie_ecap_ptr;
562 
563 	pcie_cap_ptr = pcie_regs->pcie_cap_ptr;
564 
565 	if (pcie_regs->pcie_vflags & PCIE_ERR_STATUS_VALID)
566 		pci_config_put16(erpt_p->pe_hdl, pcie_cap_ptr + PCIE_DEVSTS,
567 		    pcie_regs->pcie_err_status);
568 
569 	pcie_regs->pcie_vflags = 0x0;
570 
571 	if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV) &&
572 	    (erpt_p->pe_dflags & PCIX_DEV))
573 		pcix_regs_clear(erpt_p, pcie_regs->pcix_bdg_regs);
574 
575 	if (!(erpt_p->pe_dflags & PCIEX_ADV_DEV))
576 		return;
577 
578 	pcie_adv_regs = pcie_regs->pcie_adv_regs;
579 
580 	pcie_ecap_ptr = pcie_adv_regs->pcie_adv_cap_ptr;
581 
582 	if (pcie_adv_regs->pcie_adv_vflags & PCIE_UE_STATUS_VALID)
583 		pci_config_put32(erpt_p->pe_hdl,
584 		    pcie_ecap_ptr + PCIE_AER_UCE_STS,
585 		    pcie_adv_regs->pcie_ue_status);
586 
587 	if (pcie_adv_regs->pcie_adv_vflags & PCIE_CE_STATUS_VALID)
588 		pci_config_put32(erpt_p->pe_hdl,
589 		    pcie_ecap_ptr + PCIE_AER_CE_STS,
590 		    pcie_adv_regs->pcie_ce_status);
591 
592 
593 	if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) {
594 		pcie_adv_bdg_error_regs_t *pcie_bdg_regs =
595 		    pcie_adv_regs->pcie_adv_bdg_regs;
596 
597 
598 		if (pcie_adv_regs->pcie_adv_vflags & PCIE_SUE_STATUS_VALID)
599 			pci_config_put32(erpt_p->pe_hdl,
600 			    pcie_ecap_ptr + PCIE_AER_SUCE_STS,
601 			    pcie_bdg_regs->pcie_sue_status);
602 	}
603 	/*
604 	 * If PCI Express root complex then clear the root complex
605 	 * error registers.
606 	 */
607 	if (erpt_p->pe_dflags & PCIEX_RC_DEV) {
608 		pcie_adv_rc_error_regs_t *pcie_rc_regs =
609 		    pcie_adv_regs->pcie_adv_rc_regs;
610 
611 
612 		if (pcie_adv_regs->pcie_adv_vflags & PCIE_RC_ERR_STATUS_VALID)
613 			pci_config_put32(erpt_p->pe_hdl,
614 			    (pcie_ecap_ptr + PCIE_AER_RE_STS),
615 			    pcie_rc_regs->pcie_rc_err_status);
616 	}
617 	pcie_adv_regs->pcie_adv_vflags = 0x0;
618 }
619 
620 static void
621 pci_regs_clear(pci_erpt_t *erpt_p)
622 {
623 	/*
624 	 * Finally clear the error bits
625 	 */
626 	if (erpt_p->pe_dflags & PCIEX_DEV)
627 		pcie_regs_clear(erpt_p);
628 	else if (erpt_p->pe_dflags & PCIX_DEV)
629 		pcix_regs_clear(erpt_p, erpt_p->pe_regs);
630 
631 	if (erpt_p->pe_pci_regs->pci_vflags & PCI_ERR_STATUS_VALID)
632 		pci_config_put16(erpt_p->pe_hdl, PCI_CONF_STAT,
633 		    erpt_p->pe_pci_regs->pci_err_status);
634 
635 	erpt_p->pe_pci_regs->pci_vflags = 0x0;
636 
637 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
638 		if (erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_vflags &
639 		    PCI_BDG_SEC_STAT_VALID)
640 			pci_config_put16(erpt_p->pe_hdl, PCI_BCNF_SEC_STATUS,
641 			    erpt_p->pe_pci_regs->pci_bdg_regs->
642 			    pci_bdg_sec_stat);
643 		if (erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_vflags &
644 		    PCI_BDG_CTRL_VALID)
645 			pci_config_put16(erpt_p->pe_hdl, PCI_BCNF_BCNTRL,
646 			    erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_ctrl);
647 
648 		erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_vflags = 0x0;
649 	}
650 }
651 
652 /*
653  * pcix_ereport_setup: Allocate structures for PCI-X error handling and ereport
654  * generation.
655  */
656 /* ARGSUSED */
657 static void
658 pcix_ereport_setup(dev_info_t *dip, pci_erpt_t *erpt_p)
659 {
660 	uint8_t pcix_cap_ptr;
661 	int i;
662 
663 	pcix_cap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
664 	    "pcix-capid-pointer", PCI_CAP_NEXT_PTR_NULL);
665 
666 	if (pcix_cap_ptr != PCI_CAP_NEXT_PTR_NULL)
667 		erpt_p->pe_dflags |= PCIX_DEV;
668 	else
669 		return;
670 
671 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
672 		pcix_bdg_error_regs_t *pcix_bdg_regs;
673 
674 		erpt_p->pe_regs = kmem_zalloc(sizeof (pcix_bdg_error_regs_t),
675 		    KM_SLEEP);
676 		pcix_bdg_regs = (pcix_bdg_error_regs_t *)erpt_p->pe_regs;
677 		pcix_bdg_regs->pcix_bdg_cap_ptr = pcix_cap_ptr;
678 		pcix_bdg_regs->pcix_bdg_ver = pci_config_get16(erpt_p->pe_hdl,
679 		    pcix_cap_ptr + PCI_PCIX_SEC_STATUS) & PCI_PCIX_VER_MASK;
680 		if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) {
681 			for (i = 0; i < 2; i++) {
682 				pcix_bdg_regs->pcix_bdg_ecc_regs[i] =
683 				    kmem_zalloc(sizeof (pcix_ecc_regs_t),
684 				    KM_SLEEP);
685 			}
686 		}
687 	} else {
688 		pcix_error_regs_t *pcix_regs;
689 
690 		erpt_p->pe_regs = kmem_zalloc(sizeof (pcix_error_regs_t),
691 		    KM_SLEEP);
692 		pcix_regs = (pcix_error_regs_t *)erpt_p->pe_regs;
693 		pcix_regs->pcix_cap_ptr = pcix_cap_ptr;
694 		pcix_regs->pcix_ver = pci_config_get16(erpt_p->pe_hdl,
695 		    pcix_cap_ptr + PCI_PCIX_COMMAND) & PCI_PCIX_VER_MASK;
696 		if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) {
697 			pcix_regs->pcix_ecc_regs = kmem_zalloc(
698 			    sizeof (pcix_ecc_regs_t), KM_SLEEP);
699 		}
700 	}
701 }
702 
703 static void
704 pcie_ereport_setup(dev_info_t *dip, pci_erpt_t *erpt_p)
705 {
706 	pcie_error_regs_t *pcie_regs;
707 	pcie_adv_error_regs_t *pcie_adv_regs;
708 	uint8_t pcix_cap_ptr;
709 	uint8_t pcie_cap_ptr;
710 	uint16_t pcie_ecap_ptr;
711 	uint16_t dev_type = 0;
712 	uint32_t mask = pcie_expected_ue_mask;
713 
714 	/*
715 	 * The following sparc specific code should be removed once the pci_cap
716 	 * interfaces create the necessary properties for us.
717 	 */
718 #if defined(__sparc)
719 	ushort_t status;
720 	uint32_t slot_cap;
721 	uint8_t cap_ptr = 0;
722 	uint8_t cap_id = 0;
723 	uint32_t hdr, hdr_next_ptr, hdr_cap_id;
724 	uint16_t offset = P2ALIGN(PCIE_EXT_CAP, 4);
725 	uint16_t aer_ptr = 0;
726 
727 	cap_ptr = pci_config_get8(erpt_p->pe_hdl, PCI_CONF_CAP_PTR);
728 	if (pci_config_check(erpt_p->pe_hdl, DDI_FM_ERR_UNEXPECTED) ==
729 	    DDI_FM_OK) {
730 		while ((cap_id = pci_config_get8(erpt_p->pe_hdl, cap_ptr)) !=
731 		    0xff) {
732 			if (cap_id == PCI_CAP_ID_PCIX) {
733 				(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
734 				    "pcix-capid-pointer", cap_ptr);
735 			}
736 		if (cap_id == PCI_CAP_ID_PCI_E) {
737 			status = pci_config_get16(erpt_p->pe_hdl, cap_ptr + 2);
738 			if (status & PCIE_PCIECAP_SLOT_IMPL) {
739 				/* offset 14h is Slot Cap Register */
740 				slot_cap = pci_config_get32(erpt_p->pe_hdl,
741 				    cap_ptr + PCIE_SLOTCAP);
742 				(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
743 				    "pcie-slotcap-reg", slot_cap);
744 			}
745 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
746 			    "pcie-capid-reg", pci_config_get16(erpt_p->pe_hdl,
747 			    cap_ptr + PCIE_PCIECAP));
748 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
749 			    "pcie-capid-pointer", cap_ptr);
750 
751 		}
752 			if ((cap_ptr = pci_config_get8(erpt_p->pe_hdl,
753 			    cap_ptr + 1)) == 0xff || cap_ptr == 0 ||
754 			    (pci_config_check(erpt_p->pe_hdl,
755 			    DDI_FM_ERR_UNEXPECTED) != DDI_FM_OK))
756 				break;
757 		}
758 	}
759 
760 #endif
761 
762 	pcix_cap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
763 	    "pcix-capid-pointer", PCI_CAP_NEXT_PTR_NULL);
764 
765 	if (pcix_cap_ptr != PCI_CAP_NEXT_PTR_NULL)
766 		erpt_p->pe_dflags |= PCIX_DEV;
767 
768 	pcie_cap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
769 	    DDI_PROP_DONTPASS, "pcie-capid-pointer", PCI_CAP_NEXT_PTR_NULL);
770 
771 	if (pcie_cap_ptr != PCI_CAP_NEXT_PTR_NULL) {
772 		erpt_p->pe_dflags |= PCIEX_DEV;
773 		erpt_p->pe_regs = kmem_zalloc(sizeof (pcie_error_regs_t),
774 		    KM_SLEEP);
775 		pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
776 		pcie_regs->pcie_cap_ptr = pcie_cap_ptr;
777 	}
778 
779 	if (!(erpt_p->pe_dflags & PCIEX_DEV))
780 		return;
781 
782 	/*
783 	 * Don't currently need to check for version here because we are
784 	 * compliant with PCIE 1.0a which is version 0 and is guaranteed
785 	 * software compatibility with future versions.  We will need to
786 	 * add errors for new detectors/features which are added in newer
787 	 * revisions [sec 7.8.2].
788 	 */
789 	pcie_regs->pcie_cap = pci_config_get16(erpt_p->pe_hdl,
790 	    pcie_regs->pcie_cap_ptr + PCIE_PCIECAP);
791 
792 	dev_type = pcie_regs->pcie_cap & PCIE_PCIECAP_DEV_TYPE_MASK;
793 
794 	if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV) &&
795 	    (erpt_p->pe_dflags & PCIX_DEV)) {
796 		int i;
797 
798 		pcie_regs->pcix_bdg_regs =
799 		    kmem_zalloc(sizeof (pcix_bdg_error_regs_t), KM_SLEEP);
800 
801 		pcie_regs->pcix_bdg_regs->pcix_bdg_cap_ptr = pcix_cap_ptr;
802 		pcie_regs->pcix_bdg_regs->pcix_bdg_ver =
803 		    pci_config_get16(erpt_p->pe_hdl,
804 		    pcix_cap_ptr + PCI_PCIX_SEC_STATUS) & PCI_PCIX_VER_MASK;
805 
806 		if (PCIX_ECC_VER_CHECK(pcie_regs->pcix_bdg_regs->pcix_bdg_ver))
807 			for (i = 0; i < 2; i++)
808 				pcie_regs->pcix_bdg_regs->pcix_bdg_ecc_regs[i] =
809 				    kmem_zalloc(sizeof (pcix_ecc_regs_t),
810 				    KM_SLEEP);
811 	}
812 
813 	if (dev_type == PCIE_PCIECAP_DEV_TYPE_ROOT) {
814 		erpt_p->pe_dflags |= PCIEX_RC_DEV;
815 		pcie_regs->pcie_rc_regs = kmem_zalloc(
816 		    sizeof (pcie_rc_error_regs_t), KM_SLEEP);
817 	}
818 	/*
819 	 * The following sparc specific code should be removed once the pci_cap
820 	 * interfaces create the necessary properties for us.
821 	 */
822 #if defined(__sparc)
823 
824 	hdr = pci_config_get32(erpt_p->pe_hdl, offset);
825 	hdr_next_ptr = (hdr >> PCIE_EXT_CAP_NEXT_PTR_SHIFT) &
826 	    PCIE_EXT_CAP_NEXT_PTR_MASK;
827 	hdr_cap_id = (hdr >> PCIE_EXT_CAP_ID_SHIFT) & PCIE_EXT_CAP_ID_MASK;
828 
829 	while ((hdr_next_ptr != PCIE_EXT_CAP_NEXT_PTR_NULL) &&
830 	    (hdr_cap_id != PCIE_EXT_CAP_ID_AER)) {
831 		offset = P2ALIGN(hdr_next_ptr, 4);
832 		hdr = pci_config_get32(erpt_p->pe_hdl, offset);
833 		hdr_next_ptr = (hdr >> PCIE_EXT_CAP_NEXT_PTR_SHIFT) &
834 		    PCIE_EXT_CAP_NEXT_PTR_MASK;
835 		hdr_cap_id = (hdr >> PCIE_EXT_CAP_ID_SHIFT) &
836 		    PCIE_EXT_CAP_ID_MASK;
837 	}
838 
839 	if (hdr_cap_id == PCIE_EXT_CAP_ID_AER)
840 		aer_ptr = P2ALIGN(offset, 4);
841 	if (aer_ptr != PCI_CAP_NEXT_PTR_NULL)
842 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
843 		    "pcie-aer-pointer", aer_ptr);
844 #endif
845 
846 	/*
847 	 * Find and store if this device is capable of pci express
848 	 * advanced errors, if not report an error against the device.
849 	 */
850 	pcie_ecap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
851 	    "pcie-aer-pointer", PCI_CAP_NEXT_PTR_NULL);
852 	if (pcie_ecap_ptr != PCI_CAP_NEXT_PTR_NULL) {
853 		erpt_p->pe_dflags |= PCIEX_ADV_DEV;
854 		pcie_regs->pcie_adv_regs = kmem_zalloc(
855 		    sizeof (pcie_adv_error_regs_t), KM_SLEEP);
856 		pcie_regs->pcie_adv_regs->pcie_adv_cap_ptr = pcie_ecap_ptr;
857 	}
858 
859 	if (!(erpt_p->pe_dflags & PCIEX_ADV_DEV)) {
860 		return;
861 	}
862 
863 	pcie_adv_regs = pcie_regs->pcie_adv_regs;
864 
865 	if (pcie_adv_regs == NULL)
866 		return;
867 	/*
868 	 * Initialize structures for advanced PCI Express devices.
869 	 */
870 
871 	/*
872 	 * Advanced error registers exist for PCI Express to PCI(X) Bridges and
873 	 * may also exist for PCI(X) to PCI Express Bridges, the latter is not
874 	 * well explained in the PCI Express to PCI/PCI-X Bridge Specification
875 	 * 1.0 and will be left out of the current gathering of these registers.
876 	 */
877 	if (dev_type == PCIE_PCIECAP_DEV_TYPE_PCIE2PCI) {
878 		erpt_p->pe_dflags |= PCIEX_2PCI_DEV;
879 		pcie_adv_regs->pcie_adv_bdg_regs = kmem_zalloc(
880 		    sizeof (pcie_adv_bdg_error_regs_t), KM_SLEEP);
881 	}
882 
883 	if (erpt_p->pe_dflags & PCIEX_RC_DEV)
884 		pcie_adv_regs->pcie_adv_rc_regs = kmem_zalloc(
885 		    sizeof (pcie_adv_rc_error_regs_t), KM_SLEEP);
886 
887 	/*
888 	 * Check that mask values are as expected, if not
889 	 * change them to what we desire.
890 	 */
891 	pci_regs_gather(dip, erpt_p, DDI_FM_ERR_UNEXPECTED);
892 	pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
893 	if (pcie_regs->pcie_adv_regs->pcie_ce_mask != pcie_expected_ce_mask) {
894 		pci_config_put32(erpt_p->pe_hdl,
895 		    pcie_ecap_ptr + PCIE_AER_CE_MASK, pcie_expected_ce_mask);
896 	}
897 
898 	/* Disable PTLP/ECRC (or mask these two) for Switches */
899 	if (dev_type == PCIE_PCIECAP_DEV_TYPE_UP ||
900 	    dev_type == PCIE_PCIECAP_DEV_TYPE_DOWN) {
901 		erpt_p->pe_dflags |= PCIEX_SWITCH_DEV;
902 		mask |= PCIE_AER_UCE_PTLP | PCIE_AER_UCE_ECRC;
903 	}
904 
905 	if (pcie_regs->pcie_adv_regs->pcie_ue_mask != mask) {
906 		pci_config_put32(erpt_p->pe_hdl,
907 		    pcie_ecap_ptr + PCIE_AER_UCE_MASK, mask);
908 	}
909 	if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) {
910 		if (pcie_regs->pcie_adv_regs->pcie_adv_bdg_regs->pcie_sue_mask
911 		    != pcie_expected_sue_mask) {
912 			pci_config_put32(erpt_p->pe_hdl,
913 			    pcie_ecap_ptr + PCIE_AER_SUCE_MASK,
914 			    pcie_expected_sue_mask);
915 		}
916 	}
917 }
918 
919 /*
920  * pci_ereport_setup: Detect PCI device type and initialize structures to be
921  * used to generate ereports based on detected generic device errors.
922  */
923 void
924 pci_ereport_setup(dev_info_t *dip)
925 {
926 	struct dev_info *devi = DEVI(dip);
927 	struct i_ddi_fmhdl *fmhdl = devi->devi_fmhdl;
928 	pci_erpt_t *erpt_p;
929 	uint8_t pci_hdr_type;
930 	uint16_t pci_status;
931 	pci_regspec_t *pci_rp;
932 	int32_t len;
933 	uint32_t phys_hi;
934 
935 	/*
936 	 * If device is not ereport capbable then report an error against the
937 	 * driver for using this interface,
938 	 */
939 	if (!DDI_FM_EREPORT_CAP(ddi_fm_capable(dip)) &&
940 	    !DDI_FM_ERRCB_CAP(ddi_fm_capable(dip))) {
941 		i_ddi_drv_ereport_post(dip, DVR_EFMCAP, NULL, DDI_SLEEP);
942 		return;
943 	}
944 
945 	/*
946 	 * ASSERT fmhdl exists and fh_bus_specific is NULL.
947 	 */
948 	ASSERT(fmhdl && (fmhdl->fh_bus_specific == NULL));
949 
950 	erpt_p = kmem_zalloc(sizeof (pci_erpt_t), KM_SLEEP);
951 
952 	if (pci_config_setup(dip, &erpt_p->pe_hdl) != DDI_SUCCESS)
953 		goto error;
954 
955 	erpt_p->pe_pci_regs = kmem_zalloc(sizeof (pci_error_regs_t), KM_SLEEP);
956 
957 	pci_status = pci_config_get16(erpt_p->pe_hdl, PCI_CONF_STAT);
958 	if (pci_config_check(erpt_p->pe_hdl, DDI_FM_ERR_UNEXPECTED) !=
959 	    DDI_FM_OK)
960 		goto error;
961 
962 	/*
963 	 * Get header type and record if device is a bridge.
964 	 */
965 	pci_hdr_type = pci_config_get8(erpt_p->pe_hdl, PCI_CONF_HEADER);
966 	if (pci_config_check(erpt_p->pe_hdl, DDI_FM_ERR_UNEXPECTED) !=
967 	    DDI_FM_OK)
968 		goto error;
969 
970 	/*
971 	 * Check to see if PCI device is a bridge, if so allocate pci bridge
972 	 * error register structure.
973 	 */
974 	if ((pci_hdr_type & PCI_HEADER_TYPE_M) == PCI_HEADER_PPB) {
975 		erpt_p->pe_dflags |= PCI_BRIDGE_DEV;
976 		erpt_p->pe_pci_regs->pci_bdg_regs = kmem_zalloc(
977 		    sizeof (pci_bdg_error_regs_t), KM_SLEEP);
978 	}
979 
980 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg",
981 	    (caddr_t)&pci_rp, &len) == DDI_SUCCESS) {
982 		phys_hi = pci_rp->pci_phys_hi;
983 		kmem_free(pci_rp, len);
984 
985 		erpt_p->pe_bdf = (uint16_t)(PCI_REG_BDFR_G(phys_hi) >>
986 		    PCI_REG_FUNC_SHIFT);
987 	}
988 
989 
990 	if (!(pci_status & PCI_STAT_CAP)) {
991 		goto done;
992 	}
993 
994 	/*
995 	 * Initialize structures for PCI Express and PCI-X devices.
996 	 * Order matters below and pcie_ereport_setup should preceed
997 	 * pcix_ereport_setup.
998 	 */
999 	pcie_ereport_setup(dip, erpt_p);
1000 
1001 	if (!(erpt_p->pe_dflags & PCIEX_DEV)) {
1002 		pcix_ereport_setup(dip, erpt_p);
1003 	}
1004 
1005 done:
1006 	pci_regs_gather(dip, erpt_p, DDI_FM_ERR_UNEXPECTED);
1007 	pci_regs_clear(erpt_p);
1008 
1009 	/*
1010 	 * Before returning set fh_bus_specific to completed pci_erpt_t
1011 	 * structure
1012 	 */
1013 	fmhdl->fh_bus_specific = (void *)erpt_p;
1014 
1015 	return;
1016 error:
1017 	if (erpt_p->pe_pci_regs)
1018 		kmem_free(erpt_p->pe_pci_regs, sizeof (pci_error_regs_t));
1019 	kmem_free(erpt_p, sizeof (pci_erpt_t));
1020 	erpt_p = NULL;
1021 }
1022 
1023 static void
1024 pcix_ereport_teardown(pci_erpt_t *erpt_p)
1025 {
1026 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
1027 		pcix_bdg_error_regs_t *pcix_bdg_regs;
1028 		uint16_t pcix_ver;
1029 
1030 		pcix_bdg_regs = (pcix_bdg_error_regs_t *)erpt_p->pe_regs;
1031 		pcix_ver = pcix_bdg_regs->pcix_bdg_ver;
1032 		if (PCIX_ECC_VER_CHECK(pcix_ver)) {
1033 			int i;
1034 			for (i = 0; i < 2; i++)
1035 				kmem_free(pcix_bdg_regs->pcix_bdg_ecc_regs[i],
1036 				    sizeof (pcix_ecc_regs_t));
1037 		}
1038 		kmem_free(erpt_p->pe_regs, sizeof (pcix_bdg_error_regs_t));
1039 	} else {
1040 		pcix_error_regs_t *pcix_regs;
1041 		uint16_t pcix_ver;
1042 
1043 		pcix_regs = (pcix_error_regs_t *)erpt_p->pe_regs;
1044 		pcix_ver = pcix_regs->pcix_ver;
1045 		if (PCIX_ECC_VER_CHECK(pcix_ver)) {
1046 			kmem_free(pcix_regs->pcix_ecc_regs,
1047 			    sizeof (pcix_ecc_regs_t));
1048 		}
1049 		kmem_free(erpt_p->pe_regs, sizeof (pcix_error_regs_t));
1050 	}
1051 }
1052 
1053 static void
1054 pcie_ereport_teardown(pci_erpt_t *erpt_p)
1055 {
1056 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
1057 
1058 	if (erpt_p->pe_dflags & PCIEX_ADV_DEV) {
1059 		pcie_adv_error_regs_t *pcie_adv = pcie_regs->pcie_adv_regs;
1060 
1061 		if (erpt_p->pe_dflags & PCIEX_2PCI_DEV)
1062 			kmem_free(pcie_adv->pcie_adv_bdg_regs,
1063 			    sizeof (pcie_adv_bdg_error_regs_t));
1064 		if (erpt_p->pe_dflags & PCIEX_RC_DEV)
1065 			kmem_free(pcie_adv->pcie_adv_rc_regs,
1066 			    sizeof (pcie_adv_rc_error_regs_t));
1067 		kmem_free(pcie_adv, sizeof (pcie_adv_error_regs_t));
1068 	}
1069 
1070 	if (erpt_p->pe_dflags & PCIEX_RC_DEV)
1071 		kmem_free(pcie_regs->pcie_rc_regs,
1072 		    sizeof (pcie_rc_error_regs_t));
1073 
1074 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
1075 		if (erpt_p->pe_dflags & PCIX_DEV) {
1076 			uint16_t pcix_ver = pcie_regs->pcix_bdg_regs->
1077 			    pcix_bdg_ver;
1078 
1079 			if (PCIX_ECC_VER_CHECK(pcix_ver)) {
1080 				int i;
1081 				for (i = 0; i < 2; i++)
1082 					kmem_free(pcie_regs->pcix_bdg_regs->
1083 					    pcix_bdg_ecc_regs[i],
1084 					    sizeof (pcix_ecc_regs_t));
1085 			}
1086 			kmem_free(pcie_regs->pcix_bdg_regs,
1087 			    sizeof (pcix_bdg_error_regs_t));
1088 		}
1089 	}
1090 	kmem_free(erpt_p->pe_regs, sizeof (pcie_error_regs_t));
1091 }
1092 
1093 void
1094 pci_ereport_teardown(dev_info_t *dip)
1095 {
1096 	struct i_ddi_fmhdl *fmhdl = DEVI(dip)->devi_fmhdl;
1097 	pci_erpt_t *erpt_p;
1098 
1099 	if (!DDI_FM_EREPORT_CAP(ddi_fm_capable(dip)) &&
1100 	    !DDI_FM_ERRCB_CAP(ddi_fm_capable(dip))) {
1101 		i_ddi_drv_ereport_post(dip, DVR_EFMCAP, NULL, DDI_SLEEP);
1102 	}
1103 
1104 	ASSERT(fmhdl);
1105 
1106 	erpt_p = (pci_erpt_t *)fmhdl->fh_bus_specific;
1107 	if (erpt_p == NULL)
1108 		return;
1109 
1110 	if (erpt_p->pe_dflags & PCIEX_DEV)
1111 		pcie_ereport_teardown(erpt_p);
1112 	else if (erpt_p->pe_dflags & PCIX_DEV)
1113 		pcix_ereport_teardown(erpt_p);
1114 	pci_config_teardown((ddi_acc_handle_t *)&erpt_p->pe_hdl);
1115 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV)
1116 		kmem_free(erpt_p->pe_pci_regs->pci_bdg_regs,
1117 		    sizeof (pci_bdg_error_regs_t));
1118 	kmem_free(erpt_p->pe_pci_regs, sizeof (pci_error_regs_t));
1119 	kmem_free(erpt_p, sizeof (pci_erpt_t));
1120 	fmhdl->fh_bus_specific = NULL;
1121 	/*
1122 	 * The following sparc specific code should be removed once the pci_cap
1123 	 * interfaces create the necessary properties for us.
1124 	 */
1125 #if defined(__sparc)
1126 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcix-capid-pointer");
1127 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcie-slotcap-reg");
1128 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcie-capid-reg");
1129 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcie-capid-pointer");
1130 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcie-aer-pointer");
1131 #endif
1132 }
1133 
1134 static void
1135 pcie_ereport_post(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p,
1136     char *buf, int errtype)
1137 {
1138 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
1139 	pcie_adv_error_regs_t *pcie_adv_regs = pcie_regs->pcie_adv_regs;
1140 	pcie_adv_rc_error_regs_t *pcie_adv_rc_regs;
1141 
1142 	switch (errtype) {
1143 	case PCIEX_TYPE_CE:
1144 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1145 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1146 		    PCIEX_DEVSTS_REG, DATA_TYPE_UINT16,
1147 		    pcie_regs->pcie_err_status,
1148 		    PCIEX_CE_STATUS_REG, DATA_TYPE_UINT32,
1149 		    pcie_adv_regs->pcie_ce_status, NULL);
1150 		break;
1151 	case PCIEX_TYPE_UE:
1152 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1153 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1154 		    PCIEX_DEVSTS_REG, DATA_TYPE_UINT16,
1155 		    pcie_regs->pcie_err_status,
1156 		    PCIEX_UE_STATUS_REG, DATA_TYPE_UINT32,
1157 		    pcie_adv_regs->pcie_ue_status, PCIEX_UE_SEV_REG,
1158 		    DATA_TYPE_UINT32, pcie_adv_regs->pcie_ue_sev,
1159 		    PCIEX_ADV_CTL, DATA_TYPE_UINT32,
1160 		    pcie_adv_regs->pcie_adv_ctl,
1161 		    PCIEX_SRC_ID, DATA_TYPE_UINT16,
1162 		    pcie_adv_regs->pcie_adv_bdf,
1163 		    PCIEX_SRC_VALID, DATA_TYPE_BOOLEAN_VALUE,
1164 		    (pcie_adv_regs->pcie_adv_bdf != NULL) ?
1165 		    1 : NULL,
1166 #ifdef DEBUG
1167 		    PCIEX_UE_HDR0, DATA_TYPE_UINT32,
1168 		    pcie_adv_regs->pcie_ue_hdr0,
1169 		    PCIEX_UE_HDR1, DATA_TYPE_UINT32,
1170 		    pcie_adv_regs->pcie_ue_hdr[0],
1171 		    PCIEX_UE_HDR2, DATA_TYPE_UINT32,
1172 		    pcie_adv_regs->pcie_ue_hdr[1],
1173 		    PCIEX_UE_HDR3, DATA_TYPE_UINT32,
1174 		    pcie_adv_regs->pcie_ue_hdr[2],
1175 #endif
1176 		    NULL);
1177 		break;
1178 	case PCIEX_TYPE_GEN:
1179 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1180 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8,
1181 		    0, PCIEX_DEVSTS_REG, DATA_TYPE_UINT16,
1182 		    pcie_regs->pcie_err_status, NULL);
1183 		break;
1184 	case PCIEX_TYPE_RC_UE_MSG:
1185 	case PCIEX_TYPE_RC_CE_MSG:
1186 		pcie_adv_rc_regs = pcie_adv_regs->pcie_adv_rc_regs;
1187 
1188 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1189 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1190 		    PCIEX_ROOT_ERRSTS_REG, DATA_TYPE_UINT32,
1191 		    pcie_adv_rc_regs->pcie_rc_err_status,
1192 		    PCIEX_SRC_ID, DATA_TYPE_UINT16,
1193 		    (errtype == PCIEX_TYPE_RC_UE_MSG) ?
1194 		    pcie_adv_rc_regs->pcie_rc_ue_src_id :
1195 		    pcie_adv_rc_regs->pcie_rc_ce_src_id,
1196 		    PCIEX_SRC_VALID, DATA_TYPE_BOOLEAN_VALUE,
1197 		    (errtype == PCIEX_TYPE_RC_UE_MSG) ?
1198 		    (pcie_adv_regs->pcie_adv_vflags & PCIE_SRC_ID_VALID &&
1199 		    pcie_adv_rc_regs->pcie_rc_ue_src_id != 0) :
1200 		    (pcie_adv_regs->pcie_adv_vflags & PCIE_SRC_ID_VALID &&
1201 		    pcie_adv_rc_regs->pcie_rc_ce_src_id != 0), NULL);
1202 		break;
1203 	case PCIEX_TYPE_RC_MULT_MSG:
1204 		pcie_adv_rc_regs = pcie_adv_regs->pcie_adv_rc_regs;
1205 
1206 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1207 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1208 		    PCIEX_ROOT_ERRSTS_REG, DATA_TYPE_UINT32,
1209 		    pcie_adv_rc_regs->pcie_rc_err_status, NULL);
1210 		break;
1211 	default:
1212 		break;
1213 	}
1214 }
1215 
1216 /*ARGSUSED*/
1217 static void
1218 pcie_check_addr(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p)
1219 {
1220 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
1221 	pcie_adv_error_regs_t *pcie_adv_regs = pcie_regs->pcie_adv_regs;
1222 	pcie_tlp_hdr_t *ue_hdr0;
1223 	uint32_t *ue_hdr;
1224 	uint64_t addr = NULL;
1225 	int upstream = 0;
1226 	pci_fme_bus_specific_t *pci_fme_bsp =
1227 	    (pci_fme_bus_specific_t *)derr->fme_bus_specific;
1228 
1229 	if (!(pcie_adv_regs->pcie_adv_vflags & PCIE_UE_HDR_VALID))
1230 		return;
1231 
1232 	ue_hdr0 = (pcie_tlp_hdr_t *)&pcie_adv_regs->pcie_ue_hdr0;
1233 	ue_hdr = pcie_adv_regs->pcie_ue_hdr;
1234 
1235 	if ((pcie_regs->pcie_cap & PCIE_PCIECAP_DEV_TYPE_MASK) ==
1236 	    PCIE_PCIECAP_DEV_TYPE_ROOT ||
1237 	    (pcie_regs->pcie_cap & PCIE_PCIECAP_DEV_TYPE_MASK) ==
1238 	    PCIE_PCIECAP_DEV_TYPE_DOWN)
1239 		upstream = 1;
1240 
1241 	switch (ue_hdr0->type) {
1242 	case PCIE_TLP_TYPE_MEM:
1243 	case PCIE_TLP_TYPE_MEMLK:
1244 		if ((ue_hdr0->fmt & 0x1) == 0x1) {
1245 			pcie_mem64_t *mem64_tlp = (pcie_mem64_t *)ue_hdr;
1246 
1247 			addr = (uint64_t)mem64_tlp->addr1 << 32 |
1248 			    (uint32_t)mem64_tlp->addr0 << 2;
1249 			pcie_adv_regs->pcie_adv_bdf = mem64_tlp->rid;
1250 		} else {
1251 			pcie_memio32_t *memio32_tlp = (pcie_memio32_t *)ue_hdr;
1252 
1253 			addr = (uint32_t)memio32_tlp->addr0 << 2;
1254 			pcie_adv_regs->pcie_adv_bdf = memio32_tlp->rid;
1255 		}
1256 		if (upstream) {
1257 			pci_fme_bsp->pci_bs_bdf = pcie_adv_regs->pcie_adv_bdf;
1258 			pci_fme_bsp->pci_bs_flags |= PCI_BS_BDF_VALID;
1259 		} else if ((pcie_regs->pcie_cap & PCIE_PCIECAP_DEV_TYPE_MASK) ==
1260 		    PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) {
1261 			pci_fme_bsp->pci_bs_bdf = erpt_p->pe_bdf;
1262 			pci_fme_bsp->pci_bs_flags |= PCI_BS_BDF_VALID;
1263 		}
1264 		pci_fme_bsp->pci_bs_addr = addr;
1265 		pci_fme_bsp->pci_bs_flags |= PCI_BS_ADDR_VALID;
1266 		pci_fme_bsp->pci_bs_type = upstream ? DMA_HANDLE : ACC_HANDLE;
1267 		break;
1268 
1269 	case PCIE_TLP_TYPE_IO:
1270 		{
1271 			pcie_memio32_t *memio32_tlp = (pcie_memio32_t *)ue_hdr;
1272 
1273 			addr = (uint32_t)memio32_tlp->addr0 << 2;
1274 			pcie_adv_regs->pcie_adv_bdf = memio32_tlp->rid;
1275 			if ((pcie_regs->pcie_cap &
1276 			    PCIE_PCIECAP_DEV_TYPE_MASK) ==
1277 			    PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) {
1278 				pci_fme_bsp->pci_bs_bdf = erpt_p->pe_bdf;
1279 				pci_fme_bsp->pci_bs_flags |= PCI_BS_BDF_VALID;
1280 			}
1281 			pci_fme_bsp->pci_bs_addr = addr;
1282 			pci_fme_bsp->pci_bs_flags |= PCI_BS_ADDR_VALID;
1283 			pci_fme_bsp->pci_bs_type = ACC_HANDLE;
1284 			break;
1285 		}
1286 	case PCIE_TLP_TYPE_CFG0:
1287 	case PCIE_TLP_TYPE_CFG1:
1288 		{
1289 			pcie_cfg_t *cfg_tlp = (pcie_cfg_t *)ue_hdr;
1290 
1291 			pcie_adv_regs->pcie_adv_bdf = cfg_tlp->rid;
1292 			pci_fme_bsp->pci_bs_bdf = (uint16_t)cfg_tlp->bus << 8 |
1293 			    (uint16_t)cfg_tlp->dev << 3 | cfg_tlp->func;
1294 			pci_fme_bsp->pci_bs_flags |= PCI_BS_BDF_VALID;
1295 			pci_fme_bsp->pci_bs_type = ACC_HANDLE;
1296 			break;
1297 		}
1298 	case PCIE_TLP_TYPE_MSG:
1299 		{
1300 			pcie_msg_t *msg_tlp = (pcie_msg_t *)ue_hdr;
1301 
1302 			pcie_adv_regs->pcie_adv_bdf = msg_tlp->rid;
1303 			break;
1304 		}
1305 	case PCIE_TLP_TYPE_CPL:
1306 	case PCIE_TLP_TYPE_CPLLK:
1307 		{
1308 			pcie_cpl_t *cpl_tlp = (pcie_cpl_t *)ue_hdr;
1309 
1310 			pcie_adv_regs->pcie_adv_bdf = cpl_tlp->cid;
1311 			pci_fme_bsp->pci_bs_flags |= PCI_BS_BDF_VALID;
1312 			if (upstream) {
1313 				pci_fme_bsp->pci_bs_bdf = cpl_tlp->cid;
1314 				pci_fme_bsp->pci_bs_type = ACC_HANDLE;
1315 			} else {
1316 				pci_fme_bsp->pci_bs_bdf = cpl_tlp->rid;
1317 				pci_fme_bsp->pci_bs_type = DMA_HANDLE;
1318 			}
1319 			break;
1320 		}
1321 	case PCIE_TLP_TYPE_MSI:
1322 	default:
1323 		break;
1324 	}
1325 }
1326 
1327 /*ARGSUSED*/
1328 static void
1329 pcie_pci_check_addr(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p,
1330     int type)
1331 {
1332 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
1333 	pcie_adv_error_regs_t *pcie_adv_regs = pcie_regs->pcie_adv_regs;
1334 	pcie_adv_bdg_error_regs_t *pcie_bdg_regs =
1335 	    pcie_adv_regs->pcie_adv_bdg_regs;
1336 	uint64_t addr = NULL;
1337 	pcix_attr_t *pcie_pci_sue_attr;
1338 	int cmd;
1339 	int dual_addr = 0;
1340 	pci_fme_bus_specific_t *pci_fme_bsp =
1341 	    (pci_fme_bus_specific_t *)derr->fme_bus_specific;
1342 
1343 	if (!(pcie_adv_regs->pcie_adv_vflags & PCIE_SUE_HDR_VALID))
1344 		return;
1345 
1346 	pcie_pci_sue_attr = (pcix_attr_t *)&pcie_bdg_regs->pcie_sue_hdr0;
1347 	cmd = (pcie_bdg_regs->pcie_sue_hdr[0] >>
1348 	    PCIE_AER_SUCE_HDR_CMD_LWR_SHIFT) & PCIE_AER_SUCE_HDR_CMD_LWR_MASK;
1349 
1350 cmd_switch:
1351 	addr = pcie_bdg_regs->pcie_sue_hdr[2];
1352 	addr = (addr << PCIE_AER_SUCE_HDR_ADDR_SHIFT) |
1353 	    pcie_bdg_regs->pcie_sue_hdr[1];
1354 	switch (cmd) {
1355 	case PCI_PCIX_CMD_IORD:
1356 	case PCI_PCIX_CMD_IOWR:
1357 		pcie_adv_regs->pcie_adv_bdf = pcie_pci_sue_attr->rid;
1358 		if (addr) {
1359 			pci_fme_bsp->pci_bs_addr = addr;
1360 			pci_fme_bsp->pci_bs_flags |= PCI_BS_ADDR_VALID;
1361 			pci_fme_bsp->pci_bs_type = ACC_HANDLE;
1362 		}
1363 		break;
1364 	case PCI_PCIX_CMD_MEMRD_DW:
1365 	case PCI_PCIX_CMD_MEMWR:
1366 	case PCI_PCIX_CMD_MEMRD_BL:
1367 	case PCI_PCIX_CMD_MEMWR_BL:
1368 	case PCI_PCIX_CMD_MEMRDBL:
1369 	case PCI_PCIX_CMD_MEMWRBL:
1370 		pcie_adv_regs->pcie_adv_bdf = pcie_pci_sue_attr->rid;
1371 		if (addr) {
1372 			pci_fme_bsp->pci_bs_addr = addr;
1373 			pci_fme_bsp->pci_bs_flags |= PCI_BS_ADDR_VALID;
1374 			pci_fme_bsp->pci_bs_type = type;
1375 		}
1376 		break;
1377 	case PCI_PCIX_CMD_CFRD:
1378 	case PCI_PCIX_CMD_CFWR:
1379 		pcie_adv_regs->pcie_adv_bdf = pcie_pci_sue_attr->rid;
1380 		/*
1381 		 * for type 1 config transaction we can find bdf from address
1382 		 */
1383 		if ((addr & 3) == 1) {
1384 			pci_fme_bsp->pci_bs_bdf = (addr >> 8) & 0xffffffff;
1385 			pci_fme_bsp->pci_bs_flags |= PCI_BS_BDF_VALID;
1386 			pci_fme_bsp->pci_bs_type = ACC_HANDLE;
1387 		}
1388 		break;
1389 	case PCI_PCIX_CMD_SPL:
1390 		pcie_adv_regs->pcie_adv_bdf = pcie_pci_sue_attr->rid;
1391 		if (type == ACC_HANDLE) {
1392 			pci_fme_bsp->pci_bs_bdf = pcie_adv_regs->pcie_adv_bdf;
1393 			pci_fme_bsp->pci_bs_flags |= PCI_BS_BDF_VALID;
1394 			pci_fme_bsp->pci_bs_type = type;
1395 		}
1396 		break;
1397 	case PCI_PCIX_CMD_DADR:
1398 		cmd = (pcie_bdg_regs->pcie_sue_hdr[0] >>
1399 		    PCIE_AER_SUCE_HDR_CMD_UP_SHIFT) &
1400 		    PCIE_AER_SUCE_HDR_CMD_UP_MASK;
1401 		if (dual_addr)
1402 			break;
1403 		++dual_addr;
1404 		goto cmd_switch;
1405 	default:
1406 		break;
1407 	}
1408 }
1409 
1410 /*ARGSUSED*/
1411 static int
1412 pcix_check_addr(dev_info_t *dip, ddi_fm_error_t *derr,
1413     pcix_ecc_regs_t *pcix_ecc_regs, int type)
1414 {
1415 	int cmd = (pcix_ecc_regs->pcix_ecc_ctlstat >> 16) & 0xf;
1416 	uint64_t addr;
1417 	pci_fme_bus_specific_t *pci_fme_bsp =
1418 	    (pci_fme_bus_specific_t *)derr->fme_bus_specific;
1419 
1420 	addr = pcix_ecc_regs->pcix_ecc_secaddr;
1421 	addr = addr << 32;
1422 	addr |= pcix_ecc_regs->pcix_ecc_fstaddr;
1423 
1424 	switch (cmd) {
1425 	case PCI_PCIX_CMD_INTR:
1426 	case PCI_PCIX_CMD_SPEC:
1427 		return (DDI_FM_FATAL);
1428 	case PCI_PCIX_CMD_IORD:
1429 	case PCI_PCIX_CMD_IOWR:
1430 		pci_fme_bsp->pci_bs_addr = addr;
1431 		pci_fme_bsp->pci_bs_flags |= PCI_BS_ADDR_VALID;
1432 		pci_fme_bsp->pci_bs_type = type;
1433 		return (DDI_FM_UNKNOWN);
1434 	case PCI_PCIX_CMD_DEVID:
1435 		return (DDI_FM_FATAL);
1436 	case PCI_PCIX_CMD_MEMRD_DW:
1437 	case PCI_PCIX_CMD_MEMWR:
1438 	case PCI_PCIX_CMD_MEMRD_BL:
1439 	case PCI_PCIX_CMD_MEMWR_BL:
1440 		pci_fme_bsp->pci_bs_addr = addr;
1441 		pci_fme_bsp->pci_bs_flags |= PCI_BS_ADDR_VALID;
1442 		pci_fme_bsp->pci_bs_type = type;
1443 		return (DDI_FM_UNKNOWN);
1444 	case PCI_PCIX_CMD_CFRD:
1445 	case PCI_PCIX_CMD_CFWR:
1446 		/*
1447 		 * for type 1 config transaction we can find bdf from address
1448 		 */
1449 		if ((addr & 3) == 1) {
1450 			pci_fme_bsp->pci_bs_bdf = (addr >> 8) & 0xffffffff;
1451 			pci_fme_bsp->pci_bs_flags |= PCI_BS_BDF_VALID;
1452 			pci_fme_bsp->pci_bs_type = type;
1453 		}
1454 		return (DDI_FM_UNKNOWN);
1455 	case PCI_PCIX_CMD_SPL:
1456 	case PCI_PCIX_CMD_DADR:
1457 		return (DDI_FM_UNKNOWN);
1458 	case PCI_PCIX_CMD_MEMRDBL:
1459 	case PCI_PCIX_CMD_MEMWRBL:
1460 		pci_fme_bsp->pci_bs_addr = addr;
1461 		pci_fme_bsp->pci_bs_flags |= PCI_BS_ADDR_VALID;
1462 		pci_fme_bsp->pci_bs_type = type;
1463 		return (DDI_FM_UNKNOWN);
1464 	default:
1465 		return (DDI_FM_FATAL);
1466 	}
1467 }
1468 
1469 /*ARGSUSED*/
1470 static int
1471 pci_bdg_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p)
1472 {
1473 	pci_bdg_error_regs_t *pci_bdg_regs = erpt_p->pe_pci_regs->pci_bdg_regs;
1474 	int fatal = 0;
1475 	int nonfatal = 0;
1476 	int unknown = 0;
1477 	int ok = 0;
1478 	int ret = DDI_FM_OK;
1479 	char buf[FM_MAX_CLASS];
1480 	int i;
1481 	pci_fme_bus_specific_t *pci_fme_bsp =
1482 	    (pci_fme_bus_specific_t *)derr->fme_bus_specific;
1483 
1484 	if (derr->fme_flag != DDI_FM_ERR_UNEXPECTED)
1485 		goto done;
1486 
1487 	if ((pci_bdg_regs->pci_bdg_vflags & PCI_BDG_CTRL_VALID) &&
1488 	    (pci_bdg_regs->pci_bdg_ctrl & PCI_BCNF_BCNTRL_DTO_STAT)) {
1489 		(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
1490 		    PCI_ERROR_SUBCLASS, PCI_DTO);
1491 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1492 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1493 		    PCI_SEC_CONFIG_STATUS, DATA_TYPE_UINT16,
1494 		    pci_bdg_regs->pci_bdg_sec_stat, PCI_BCNTRL,
1495 		    DATA_TYPE_UINT16, pci_bdg_regs->pci_bdg_ctrl, NULL);
1496 		unknown++;
1497 	}
1498 
1499 	if (pci_bdg_regs->pci_bdg_vflags & PCI_BDG_SEC_STAT_VALID) {
1500 		for (i = 0; pci_bdg_err_tbl[i].err_class != NULL; i++) {
1501 			if (pci_bdg_regs->pci_bdg_sec_stat &
1502 			    pci_bdg_err_tbl[i].reg_bit) {
1503 				(void) snprintf(buf, FM_MAX_CLASS, "%s.%s-%s",
1504 				    PCI_ERROR_SUBCLASS, PCI_SEC_ERROR_SUBCLASS,
1505 				    pci_bdg_err_tbl[i].err_class);
1506 				ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1507 				    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1508 				    PCI_SEC_CONFIG_STATUS, DATA_TYPE_UINT16,
1509 				    pci_bdg_regs->pci_bdg_sec_stat, PCI_BCNTRL,
1510 				    DATA_TYPE_UINT16,
1511 				    pci_bdg_regs->pci_bdg_ctrl, NULL);
1512 				PCI_FM_SEV_INC(pci_bdg_err_tbl[i].flags);
1513 				if (pci_fme_bsp && (pci_fme_bsp->pci_bs_flags &
1514 				    PCI_BS_ADDR_VALID) &&
1515 				    pci_fme_bsp->pci_bs_type == ACC_HANDLE &&
1516 				    pci_bdg_err_tbl[i].terr_class)
1517 					pci_target_enqueue(derr->fme_ena,
1518 					    pci_bdg_err_tbl[i].terr_class,
1519 					    PCI_ERROR_SUBCLASS,
1520 					    pci_fme_bsp->pci_bs_addr);
1521 			}
1522 		}
1523 #if !defined(__sparc)
1524 		/*
1525 		 * For x86, many drivers and even user-level code currently get
1526 		 * away with accessing bad addresses, getting a UR and getting
1527 		 * -1 returned. Unfortunately, we have no control over this, so
1528 		 * we will have to treat all URs as nonfatal. Moreover, if the
1529 		 * leaf driver is non-hardened, then we don't actually see the
1530 		 * UR directly. All we see is a secondary bus master abort at
1531 		 * the root complex - so it's this condition that we actually
1532 		 * need to treat as nonfatal (providing no other unrelated nfe
1533 		 * conditions have also been seen by the root complex).
1534 		 */
1535 		if ((erpt_p->pe_dflags & PCIEX_RC_DEV) &&
1536 		    (pci_bdg_regs->pci_bdg_sec_stat & PCI_STAT_R_MAST_AB) &&
1537 		    !(pci_bdg_regs->pci_bdg_sec_stat & PCI_STAT_S_PERROR)) {
1538 			pcie_error_regs_t *pcie_regs =
1539 			    (pcie_error_regs_t *)erpt_p->pe_regs;
1540 			if ((pcie_regs->pcie_vflags & PCIE_ERR_STATUS_VALID) &&
1541 			    !(pcie_regs->pcie_err_status &
1542 			    PCIE_DEVSTS_NFE_DETECTED))
1543 				nonfatal++;
1544 			if (erpt_p->pe_dflags & PCIEX_ADV_DEV) {
1545 				pcie_adv_error_regs_t *pcie_adv_regs =
1546 				    pcie_regs->pcie_adv_regs;
1547 				pcie_adv_rc_error_regs_t *pcie_rc_regs =
1548 				    pcie_adv_regs->pcie_adv_rc_regs;
1549 				if ((pcie_adv_regs->pcie_adv_vflags &
1550 				    PCIE_RC_ERR_STATUS_VALID) &&
1551 				    (pcie_rc_regs->pcie_rc_err_status &
1552 				    PCIE_AER_RE_STS_NFE_MSGS_RCVD)) {
1553 					(void) snprintf(buf, FM_MAX_CLASS,
1554 					    "%s.%s-%s", PCI_ERROR_SUBCLASS,
1555 					    PCI_SEC_ERROR_SUBCLASS, PCI_MA);
1556 					ddi_fm_ereport_post(dip, buf,
1557 					    derr->fme_ena, DDI_NOSLEEP,
1558 					    FM_VERSION, DATA_TYPE_UINT8, 0,
1559 					    PCI_SEC_CONFIG_STATUS,
1560 					    DATA_TYPE_UINT16,
1561 					    pci_bdg_regs->pci_bdg_sec_stat,
1562 					    PCI_BCNTRL, DATA_TYPE_UINT16,
1563 					    pci_bdg_regs->pci_bdg_ctrl, NULL);
1564 				}
1565 			}
1566 		}
1567 #endif
1568 	}
1569 
1570 done:
1571 	/*
1572 	 * Need to check for poke and cautious put. We already know peek
1573 	 * and cautious get errors occurred (as we got a trap) and we know
1574 	 * they are nonfatal.
1575 	 */
1576 	if (derr->fme_flag == DDI_FM_ERR_EXPECTED) {
1577 		/*
1578 		 * for cautious puts we treat all errors as nonfatal. Actually
1579 		 * we set nonfatal for cautious gets as well - doesn't do any
1580 		 * harm
1581 		 */
1582 		if (pci_bdg_regs->pci_bdg_sec_stat & (PCI_STAT_R_TARG_AB |
1583 		    PCI_STAT_R_MAST_AB | PCI_STAT_S_PERROR | PCI_STAT_S_SYSERR))
1584 			nonfatal++;
1585 	}
1586 	if (derr->fme_flag == DDI_FM_ERR_POKE) {
1587 		/*
1588 		 * special case for pokes - we only consider master abort
1589 		 * and target abort as nonfatal. Sserr with no master abort is
1590 		 * fatal, but master/target abort can come in on separate
1591 		 * instance, so return unknown and parent will determine if
1592 		 * nonfatal (if another child returned nonfatal - ie master
1593 		 * or target abort) or fatal otherwise
1594 		 */
1595 		if (pci_bdg_regs->pci_bdg_sec_stat & (PCI_STAT_R_TARG_AB |
1596 		    PCI_STAT_R_MAST_AB))
1597 			nonfatal++;
1598 		if (erpt_p->pe_pci_regs->pci_err_status & PCI_STAT_S_SYSERR)
1599 			unknown++;
1600 	}
1601 
1602 	/*
1603 	 * now check children below the bridge
1604 	 */
1605 	ret = ndi_fm_handler_dispatch(dip, NULL, derr);
1606 	PCI_FM_SEV_INC(ret);
1607 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1608 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1609 }
1610 
1611 static int
1612 pcix_ecc_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p,
1613     void *pe_regs)
1614 {
1615 	pcix_error_regs_t *pcix_regs;
1616 	pcix_bdg_error_regs_t *pcix_bdg_regs;
1617 	pcix_ecc_regs_t *pcix_ecc_regs;
1618 	int bridge;
1619 	int i;
1620 	int ecc_phase;
1621 	int ecc_corr;
1622 	int sec_ue;
1623 	int sec_ce;
1624 	int fatal = 0;
1625 	int nonfatal = 0;
1626 	int unknown = 0;
1627 	int ok = 0;
1628 	char buf[FM_MAX_CLASS];
1629 
1630 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
1631 		pcix_bdg_regs = (pcix_bdg_error_regs_t *)pe_regs;
1632 		bridge = 1;
1633 	} else {
1634 		pcix_regs = (pcix_error_regs_t *)pe_regs;
1635 		bridge = 0;
1636 	}
1637 
1638 	for (i = 0; i < (bridge ? 2 : 1); i++) {
1639 		int ret = DDI_FM_OK;
1640 		pcix_ecc_regs = bridge ? pcix_bdg_regs->pcix_bdg_ecc_regs[i] :
1641 		    pcix_regs->pcix_ecc_regs;
1642 		if (pcix_ecc_regs->pcix_ecc_vflags & PCIX_ERR_ECC_STS_VALID) {
1643 			ecc_phase = (pcix_ecc_regs->pcix_ecc_ctlstat &
1644 			    PCI_PCIX_ECC_PHASE) >> 0x4;
1645 			ecc_corr = (pcix_ecc_regs->pcix_ecc_ctlstat &
1646 			    PCI_PCIX_ECC_CORR);
1647 			sec_ue = (pcix_ecc_regs->pcix_ecc_ctlstat &
1648 			    PCI_PCIX_ECC_S_UE);
1649 			sec_ce = (pcix_ecc_regs->pcix_ecc_ctlstat &
1650 			    PCI_PCIX_ECC_S_CE);
1651 
1652 			switch (ecc_phase) {
1653 			case PCI_PCIX_ECC_PHASE_NOERR:
1654 				break;
1655 			case PCI_PCIX_ECC_PHASE_FADDR:
1656 			case PCI_PCIX_ECC_PHASE_SADDR:
1657 				PCI_FM_SEV_INC(ecc_corr ?  DDI_FM_OK :
1658 				    DDI_FM_FATAL);
1659 				(void) snprintf(buf, FM_MAX_CLASS,
1660 				    "%s.%s%s", PCIX_ERROR_SUBCLASS,
1661 				    i ? PCIX_SEC_ERROR_SUBCLASS : "",
1662 				    ecc_corr ? PCIX_ECC_CE_ADDR :
1663 				    PCIX_ECC_UE_ADDR);
1664 				break;
1665 			case PCI_PCIX_ECC_PHASE_ATTR:
1666 				PCI_FM_SEV_INC(ecc_corr ?
1667 				    DDI_FM_OK : DDI_FM_FATAL);
1668 				(void) snprintf(buf, FM_MAX_CLASS,
1669 				    "%s.%s%s", PCIX_ERROR_SUBCLASS,
1670 				    i ? PCIX_SEC_ERROR_SUBCLASS : "",
1671 				    ecc_corr ? PCIX_ECC_CE_ATTR :
1672 				    PCIX_ECC_UE_ATTR);
1673 				break;
1674 			case PCI_PCIX_ECC_PHASE_DATA32:
1675 			case PCI_PCIX_ECC_PHASE_DATA64:
1676 				if (ecc_corr)
1677 					ret = DDI_FM_OK;
1678 				else {
1679 					int type;
1680 					pci_error_regs_t *pci_regs =
1681 					    erpt_p->pe_pci_regs;
1682 
1683 					if (i) {
1684 						if (pci_regs->pci_bdg_regs->
1685 						    pci_bdg_sec_stat &
1686 						    PCI_STAT_S_PERROR)
1687 							type = ACC_HANDLE;
1688 						else
1689 							type = DMA_HANDLE;
1690 					} else {
1691 						if (pci_regs->pci_err_status &
1692 						    PCI_STAT_S_PERROR)
1693 							type = DMA_HANDLE;
1694 						else
1695 							type = ACC_HANDLE;
1696 					}
1697 					ret = pcix_check_addr(dip, derr,
1698 					    pcix_ecc_regs, type);
1699 				}
1700 				PCI_FM_SEV_INC(ret);
1701 
1702 				(void) snprintf(buf, FM_MAX_CLASS,
1703 				    "%s.%s%s", PCIX_ERROR_SUBCLASS,
1704 				    i ? PCIX_SEC_ERROR_SUBCLASS : "",
1705 				    ecc_corr ? PCIX_ECC_CE_DATA :
1706 				    PCIX_ECC_UE_DATA);
1707 				break;
1708 			}
1709 			if (ecc_phase)
1710 				if (bridge)
1711 					ddi_fm_ereport_post(dip, buf,
1712 					    derr->fme_ena,
1713 					    DDI_NOSLEEP, FM_VERSION,
1714 					    DATA_TYPE_UINT8, 0,
1715 					    PCIX_SEC_STATUS, DATA_TYPE_UINT16,
1716 					    pcix_bdg_regs->pcix_bdg_sec_stat,
1717 					    PCIX_BDG_STAT, DATA_TYPE_UINT32,
1718 					    pcix_bdg_regs->pcix_bdg_stat,
1719 					    PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32,
1720 					    pcix_ecc_regs->pcix_ecc_ctlstat,
1721 					    PCIX_ECC_ATTR, DATA_TYPE_UINT32,
1722 					    pcix_ecc_regs->pcix_ecc_attr, NULL);
1723 				else
1724 					ddi_fm_ereport_post(dip, buf,
1725 					    derr->fme_ena,
1726 					    DDI_NOSLEEP, FM_VERSION,
1727 					    DATA_TYPE_UINT8, 0,
1728 					    PCIX_COMMAND, DATA_TYPE_UINT16,
1729 					    pcix_regs->pcix_command,
1730 					    PCIX_STATUS, DATA_TYPE_UINT32,
1731 					    pcix_regs->pcix_status,
1732 					    PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32,
1733 					    pcix_ecc_regs->pcix_ecc_ctlstat,
1734 					    PCIX_ECC_ATTR, DATA_TYPE_UINT32,
1735 					    pcix_ecc_regs->pcix_ecc_attr, NULL);
1736 			if (sec_ce || sec_ue) {
1737 				(void) snprintf(buf, FM_MAX_CLASS,
1738 				    "%s.%s%s", PCIX_ERROR_SUBCLASS,
1739 				    i ? PCIX_SEC_ERROR_SUBCLASS : "",
1740 				    sec_ce ? PCIX_ECC_S_CE : PCIX_ECC_S_UE);
1741 				if (bridge)
1742 					ddi_fm_ereport_post(dip, buf,
1743 					    derr->fme_ena,
1744 					    DDI_NOSLEEP, FM_VERSION,
1745 					    DATA_TYPE_UINT8, 0,
1746 					    PCIX_SEC_STATUS, DATA_TYPE_UINT16,
1747 					    pcix_bdg_regs->pcix_bdg_sec_stat,
1748 					    PCIX_BDG_STAT, DATA_TYPE_UINT32,
1749 					    pcix_bdg_regs->pcix_bdg_stat,
1750 					    PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32,
1751 					    pcix_ecc_regs->pcix_ecc_ctlstat,
1752 					    PCIX_ECC_ATTR, DATA_TYPE_UINT32,
1753 					    pcix_ecc_regs->pcix_ecc_attr, NULL);
1754 				else
1755 					ddi_fm_ereport_post(dip, buf,
1756 					    derr->fme_ena,
1757 					    DDI_NOSLEEP, FM_VERSION,
1758 					    DATA_TYPE_UINT8, 0,
1759 					    PCIX_COMMAND, DATA_TYPE_UINT16,
1760 					    pcix_regs->pcix_command,
1761 					    PCIX_STATUS, DATA_TYPE_UINT32,
1762 					    pcix_regs->pcix_status,
1763 					    PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32,
1764 					    pcix_ecc_regs->pcix_ecc_ctlstat,
1765 					    PCIX_ECC_ATTR, DATA_TYPE_UINT32,
1766 					    pcix_ecc_regs->pcix_ecc_attr, NULL);
1767 				PCI_FM_SEV_INC(sec_ue ? DDI_FM_FATAL :
1768 				    DDI_FM_OK);
1769 			}
1770 		}
1771 	}
1772 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1773 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1774 }
1775 
1776 static int
1777 pcix_bdg_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p,
1778     void *pe_regs)
1779 {
1780 	pcix_bdg_error_regs_t *pcix_bdg_regs = (pcix_bdg_error_regs_t *)pe_regs;
1781 	int fatal = 0;
1782 	int nonfatal = 0;
1783 	int unknown = 0;
1784 	int ok = 0;
1785 	char buf[FM_MAX_CLASS];
1786 	int i;
1787 
1788 	if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_STATUS_VALID) {
1789 		for (i = 0; pcix_err_tbl[i].err_class != NULL; i++) {
1790 			if ((pcix_bdg_regs->pcix_bdg_stat &
1791 			    pcix_err_tbl[i].reg_bit)) {
1792 				(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
1793 				    PCIX_ERROR_SUBCLASS,
1794 				    pcix_err_tbl[i].err_class);
1795 				ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1796 				    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1797 				    PCIX_SEC_STATUS, DATA_TYPE_UINT16,
1798 				    pcix_bdg_regs->pcix_bdg_sec_stat,
1799 				    PCIX_BDG_STAT, DATA_TYPE_UINT32,
1800 				    pcix_bdg_regs->pcix_bdg_stat, NULL);
1801 				PCI_FM_SEV_INC(pcix_err_tbl[i].flags);
1802 			}
1803 		}
1804 	}
1805 
1806 	if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_SEC_STATUS_VALID) {
1807 		for (i = 0; pcix_sec_err_tbl[i].err_class != NULL; i++) {
1808 			if ((pcix_bdg_regs->pcix_bdg_sec_stat &
1809 			    pcix_sec_err_tbl[i].reg_bit)) {
1810 				(void) snprintf(buf, FM_MAX_CLASS, "%s.%s%s",
1811 				    PCIX_ERROR_SUBCLASS,
1812 				    PCIX_SEC_ERROR_SUBCLASS,
1813 				    pcix_sec_err_tbl[i].err_class);
1814 				ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1815 				    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1816 				    PCIX_SEC_STATUS, DATA_TYPE_UINT16,
1817 				    pcix_bdg_regs->pcix_bdg_sec_stat,
1818 				    PCIX_BDG_STAT, DATA_TYPE_UINT32,
1819 				    pcix_bdg_regs->pcix_bdg_stat, NULL);
1820 				PCI_FM_SEV_INC(pcix_sec_err_tbl[i].flags);
1821 			}
1822 		}
1823 	}
1824 
1825 	/* Log/Handle ECC errors */
1826 	if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) {
1827 		int ret;
1828 
1829 		ret = pcix_ecc_error_report(dip, derr, erpt_p,
1830 		    (void *)pcix_bdg_regs);
1831 		PCI_FM_SEV_INC(ret);
1832 	}
1833 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1834 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1835 }
1836 
1837 static int
1838 pcix_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p)
1839 {
1840 	pcix_error_regs_t *pcix_regs = (pcix_error_regs_t *)erpt_p->pe_regs;
1841 	int fatal = 0;
1842 	int nonfatal = 0;
1843 	int unknown = 0;
1844 	int ok = 0;
1845 	char buf[FM_MAX_CLASS];
1846 	int i;
1847 
1848 	if (pcix_regs->pcix_vflags & PCIX_ERR_STATUS_VALID) {
1849 		for (i = 0; pcix_err_tbl[i].err_class != NULL; i++) {
1850 			if (!(pcix_regs->pcix_status & pcix_err_tbl[i].reg_bit))
1851 				continue;
1852 
1853 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
1854 			    PCIX_ERROR_SUBCLASS, pcix_err_tbl[i].err_class);
1855 			ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1856 			    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1857 			    PCIX_COMMAND, DATA_TYPE_UINT16,
1858 			    pcix_regs->pcix_command, PCIX_STATUS,
1859 			    DATA_TYPE_UINT32, pcix_regs->pcix_status,
1860 			    NULL);
1861 			PCI_FM_SEV_INC(pcix_err_tbl[i].flags);
1862 		}
1863 	}
1864 	/* Log/Handle ECC errors */
1865 	if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) {
1866 		int ret = pcix_ecc_error_report(dip, derr, erpt_p,
1867 		    (void *)pcix_regs);
1868 		PCI_FM_SEV_INC(ret);
1869 	}
1870 
1871 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1872 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1873 }
1874 
1875 static int
1876 pcie_rc_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p,
1877     void *pe_regs)
1878 {
1879 	pcie_adv_error_regs_t *pcie_adv_regs = (pcie_adv_error_regs_t *)pe_regs;
1880 	int fatal = 0;
1881 	int nonfatal = 0;
1882 	int unknown = 0;
1883 	char buf[FM_MAX_CLASS];
1884 
1885 	if (pcie_adv_regs->pcie_adv_vflags & PCIE_RC_ERR_STATUS_VALID) {
1886 		pcie_adv_rc_error_regs_t *pcie_rc_regs =
1887 		    pcie_adv_regs->pcie_adv_rc_regs;
1888 		int ce, ue, mult_ce, mult_ue, first_ue_fatal, nfe, fe;
1889 
1890 		ce = pcie_rc_regs->pcie_rc_err_status &
1891 		    PCIE_AER_RE_STS_CE_RCVD;
1892 		ue = pcie_rc_regs->pcie_rc_err_status &
1893 		    PCIE_AER_RE_STS_FE_NFE_RCVD;
1894 		mult_ce = pcie_rc_regs->pcie_rc_err_status &
1895 		    PCIE_AER_RE_STS_MUL_CE_RCVD;
1896 		mult_ue = pcie_rc_regs->pcie_rc_err_status &
1897 		    PCIE_AER_RE_STS_MUL_FE_NFE_RCVD;
1898 		first_ue_fatal = pcie_rc_regs->pcie_rc_err_status &
1899 		    PCIE_AER_RE_STS_FIRST_UC_FATAL;
1900 		nfe = pcie_rc_regs->pcie_rc_err_status &
1901 		    PCIE_AER_RE_STS_NFE_MSGS_RCVD;
1902 		fe = pcie_rc_regs->pcie_rc_err_status &
1903 		    PCIE_AER_RE_STS_FE_MSGS_RCVD;
1904 		/*
1905 		 * log fatal/nonfatal/corrected messages
1906 		 * recieved by root complex
1907 		 */
1908 		if (ue && fe)
1909 			fatal++;
1910 
1911 		if (fe && first_ue_fatal) {
1912 			(void) snprintf(buf, FM_MAX_CLASS,
1913 			    "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_FE_MSG);
1914 			pcie_ereport_post(dip, derr, erpt_p, buf,
1915 			    PCIEX_TYPE_RC_UE_MSG);
1916 		}
1917 		if (nfe && !first_ue_fatal) {
1918 			(void) snprintf(buf, FM_MAX_CLASS,
1919 			    "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_NFE_MSG);
1920 			pcie_ereport_post(dip, derr, erpt_p, buf,
1921 			    PCIEX_TYPE_RC_UE_MSG);
1922 		}
1923 		if (ce) {
1924 			(void) snprintf(buf, FM_MAX_CLASS,
1925 			    "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_CE_MSG);
1926 			pcie_ereport_post(dip, derr, erpt_p, buf,
1927 			    PCIEX_TYPE_RC_CE_MSG);
1928 		}
1929 		if (mult_ce) {
1930 			(void) snprintf(buf, FM_MAX_CLASS,
1931 			    "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_MCE_MSG);
1932 			pcie_ereport_post(dip, derr, erpt_p, buf,
1933 			    PCIEX_TYPE_RC_MULT_MSG);
1934 		}
1935 		if (mult_ue) {
1936 			(void) snprintf(buf, FM_MAX_CLASS,
1937 			    "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_MUE_MSG);
1938 			pcie_ereport_post(dip, derr, erpt_p, buf,
1939 			    PCIEX_TYPE_RC_MULT_MSG);
1940 		}
1941 	}
1942 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1943 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1944 }
1945 
1946 static int
1947 pcie_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p)
1948 {
1949 	int fatal = 0;
1950 	int nonfatal = 0;
1951 	int unknown = 0;
1952 	int ok = 0;
1953 	int type;
1954 	char buf[FM_MAX_CLASS];
1955 	int i;
1956 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
1957 	pcie_adv_error_regs_t *pcie_adv_regs;
1958 	pcie_adv_bdg_error_regs_t *pcie_bdg_regs;
1959 
1960 	if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV) &&
1961 	    (erpt_p->pe_dflags & PCIX_DEV)) {
1962 		int ret = pcix_bdg_error_report(dip, derr, erpt_p,
1963 		    (void *)pcie_regs->pcix_bdg_regs);
1964 		PCI_FM_SEV_INC(ret);
1965 	}
1966 
1967 	if (!(erpt_p->pe_dflags & PCIEX_ADV_DEV)) {
1968 		if (!(pcie_regs->pcie_vflags & PCIE_ERR_STATUS_VALID))
1969 			goto done;
1970 #if !defined(__sparc)
1971 		/*
1972 		 * On x86 ignore UR on non-RBER leaf devices, pciex-pci
1973 		 * bridges and switches.
1974 		 */
1975 		if ((pcie_regs->pcie_err_status & PCIE_DEVSTS_UR_DETECTED) &&
1976 		    !(pcie_regs->pcie_err_status & PCIE_DEVSTS_FE_DETECTED) &&
1977 		    ((erpt_p->pe_dflags & (PCIEX_2PCI_DEV|PCIEX_SWITCH_DEV)) ||
1978 		    !(erpt_p->pe_dflags & PCI_BRIDGE_DEV)) &&
1979 		    !(pcie_regs->pcie_dev_cap & PCIE_DEVCAP_ROLE_BASED_ERR_REP))
1980 			goto done;
1981 #endif
1982 		for (i = 0; pciex_nadv_err_tbl[i].err_class != NULL; i++) {
1983 			if (!(pcie_regs->pcie_err_status &
1984 			    pciex_nadv_err_tbl[i].reg_bit))
1985 				continue;
1986 
1987 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
1988 			    PCIEX_ERROR_SUBCLASS,
1989 			    pciex_nadv_err_tbl[i].err_class);
1990 			pcie_ereport_post(dip, derr, erpt_p, buf,
1991 			    PCIEX_TYPE_GEN);
1992 			PCI_FM_SEV_INC(pciex_nadv_err_tbl[i].flags);
1993 		}
1994 		goto done;
1995 	}
1996 
1997 	pcie_adv_regs = pcie_regs->pcie_adv_regs;
1998 
1999 	/*
2000 	 * Log PCI Express uncorrectable errors
2001 	 */
2002 	if (pcie_adv_regs->pcie_adv_vflags & PCIE_UE_STATUS_VALID) {
2003 		for (i = 0; pciex_ue_err_tbl[i].err_class != NULL; i++) {
2004 			if (!(pcie_adv_regs->pcie_ue_status &
2005 			    pciex_ue_err_tbl[i].reg_bit))
2006 				continue;
2007 
2008 			(void) snprintf(buf, FM_MAX_CLASS,
2009 			    "%s.%s", PCIEX_ERROR_SUBCLASS,
2010 			    pciex_ue_err_tbl[i].err_class);
2011 
2012 			/*
2013 			 * First check for advisary nonfatal conditions
2014 			 * - hardware endpoint successfully retrying a cto
2015 			 * - hardware endpoint receiving poisoned tlp and
2016 			 *   dealing with it itself (but not if root complex)
2017 			 * If the device has declared these as correctable
2018 			 * errors then treat them as such.
2019 			 */
2020 			if ((pciex_ue_err_tbl[i].reg_bit == PCIE_AER_UCE_TO ||
2021 			    (pciex_ue_err_tbl[i].reg_bit == PCIE_AER_UCE_PTLP &&
2022 			    !(erpt_p->pe_dflags & PCIEX_RC_DEV))) &&
2023 			    (pcie_regs->pcie_err_status &
2024 			    PCIE_DEVSTS_CE_DETECTED) &&
2025 			    !(pcie_regs->pcie_err_status &
2026 			    PCIE_DEVSTS_NFE_DETECTED)) {
2027 				pcie_ereport_post(dip, derr, erpt_p, buf,
2028 				    PCIEX_TYPE_UE);
2029 				continue;
2030 			}
2031 
2032 #if !defined(__sparc)
2033 			/*
2034 			 * On x86 for leaf devices and pciex-pci bridges,
2035 			 * ignore UR on non-RBER devices or on RBER devices when
2036 			 * advisory nonfatal.
2037 			 */
2038 			if (pciex_ue_err_tbl[i].reg_bit == PCIE_AER_UCE_UR &&
2039 			    ((erpt_p->pe_dflags &
2040 			    (PCIEX_2PCI_DEV|PCIEX_SWITCH_DEV)) ||
2041 			    !(erpt_p->pe_dflags & PCI_BRIDGE_DEV))) {
2042 				if (!(pcie_regs->pcie_dev_cap &
2043 				    PCIE_DEVCAP_ROLE_BASED_ERR_REP))
2044 					continue;
2045 				if (!(pcie_regs->pcie_err_status &
2046 				    PCIE_DEVSTS_NFE_DETECTED))
2047 					continue;
2048 			}
2049 #endif
2050 			pcie_adv_regs->pcie_adv_bdf = 0;
2051 			/*
2052 			 * Now try and look up handle if
2053 			 * - error bit is among PCIE_AER_UCE_LOG_BITS, and
2054 			 * - no other PCIE_AER_UCE_LOG_BITS are set, and
2055 			 * - error bit is not masked, and
2056 			 * - flag is DDI_FM_UNKNOWN
2057 			 */
2058 			if ((pcie_adv_regs->pcie_ue_status &
2059 			    pcie_aer_uce_log_bits) ==
2060 			    pciex_ue_err_tbl[i].reg_bit &&
2061 			    !(pciex_ue_err_tbl[i].reg_bit &
2062 			    pcie_adv_regs->pcie_ue_mask) &&
2063 			    pciex_ue_err_tbl[i].flags == DDI_FM_UNKNOWN)
2064 				pcie_check_addr(dip, derr, erpt_p);
2065 
2066 			PCI_FM_SEV_INC(pciex_ue_err_tbl[i].flags);
2067 			pcie_ereport_post(dip, derr, erpt_p, buf,
2068 			    PCIEX_TYPE_UE);
2069 		}
2070 	}
2071 
2072 	/*
2073 	 * Log PCI Express correctable errors
2074 	 */
2075 	if (pcie_adv_regs->pcie_adv_vflags & PCIE_CE_STATUS_VALID) {
2076 		for (i = 0; pciex_ce_err_tbl[i].err_class != NULL; i++) {
2077 			if (!(pcie_adv_regs->pcie_ce_status &
2078 			    pciex_ce_err_tbl[i].reg_bit))
2079 				continue;
2080 
2081 			(void) snprintf(buf, FM_MAX_CLASS,
2082 			    "%s.%s", PCIEX_ERROR_SUBCLASS,
2083 			    pciex_ce_err_tbl[i].err_class);
2084 			pcie_ereport_post(dip, derr, erpt_p, buf,
2085 			    PCIEX_TYPE_CE);
2086 		}
2087 	}
2088 
2089 	if (!(erpt_p->pe_dflags & PCI_BRIDGE_DEV))
2090 		goto done;
2091 
2092 	if (erpt_p->pe_dflags & PCIEX_RC_DEV) {
2093 		int ret = pcie_rc_error_report(dip, derr, erpt_p,
2094 		    (void *)pcie_adv_regs);
2095 		PCI_FM_SEV_INC(ret);
2096 	}
2097 
2098 	if (!((erpt_p->pe_dflags & PCIEX_2PCI_DEV) &&
2099 	    (pcie_adv_regs->pcie_adv_vflags & PCIE_SUE_STATUS_VALID)))
2100 		goto done;
2101 
2102 	pcie_bdg_regs = pcie_adv_regs->pcie_adv_bdg_regs;
2103 
2104 	for (i = 0; pcie_sue_err_tbl[i].err_class != NULL; i++) {
2105 		if ((pcie_bdg_regs->pcie_sue_status &
2106 		    pcie_sue_err_tbl[i].reg_bit)) {
2107 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
2108 			    PCIEX_ERROR_SUBCLASS,
2109 			    pcie_sue_err_tbl[i].err_class);
2110 
2111 			if ((pcie_bdg_regs->pcie_sue_status &
2112 			    pcie_aer_suce_log_bits) !=
2113 			    pcie_sue_err_tbl[i].reg_bit ||
2114 			    pcie_sue_err_tbl[i].flags != DDI_FM_UNKNOWN) {
2115 				ddi_fm_ereport_post(dip, buf, derr->fme_ena,
2116 				    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
2117 				    PCIEX_SEC_UE_STATUS, DATA_TYPE_UINT32,
2118 				    pcie_bdg_regs->pcie_sue_status,
2119 #ifdef DEBUG
2120 				    PCIEX_SUE_HDR0, DATA_TYPE_UINT32,
2121 				    pcie_bdg_regs->pcie_sue_hdr0,
2122 				    PCIEX_SUE_HDR1, DATA_TYPE_UINT32,
2123 				    pcie_bdg_regs->pcie_sue_hdr[0],
2124 				    PCIEX_SUE_HDR2, DATA_TYPE_UINT32,
2125 				    pcie_bdg_regs->pcie_sue_hdr[1],
2126 				    PCIEX_SUE_HDR3, DATA_TYPE_UINT32,
2127 				    pcie_bdg_regs->pcie_sue_hdr[2],
2128 #endif
2129 				    NULL);
2130 			} else {
2131 				pcie_adv_regs->pcie_adv_bdf = 0;
2132 				switch (pcie_sue_err_tbl[i].reg_bit) {
2133 				case PCIE_AER_SUCE_RCVD_TA:
2134 				case PCIE_AER_SUCE_RCVD_MA:
2135 				case PCIE_AER_SUCE_USC_ERR:
2136 					type = ACC_HANDLE;
2137 					break;
2138 				case PCIE_AER_SUCE_TA_ON_SC:
2139 				case PCIE_AER_SUCE_MA_ON_SC:
2140 					type = DMA_HANDLE;
2141 					break;
2142 				case PCIE_AER_SUCE_UC_DATA_ERR:
2143 				case PCIE_AER_SUCE_PERR_ASSERT:
2144 					if (erpt_p->pe_pci_regs->pci_bdg_regs->
2145 					    pci_bdg_sec_stat &
2146 					    PCI_STAT_S_PERROR)
2147 						type = ACC_HANDLE;
2148 					else
2149 						type = DMA_HANDLE;
2150 					break;
2151 				}
2152 				pcie_pci_check_addr(dip, derr, erpt_p, type);
2153 				ddi_fm_ereport_post(dip, buf, derr->fme_ena,
2154 				    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
2155 				    PCIEX_SEC_UE_STATUS, DATA_TYPE_UINT32,
2156 				    pcie_bdg_regs->pcie_sue_status,
2157 				    PCIEX_SRC_ID, DATA_TYPE_UINT16,
2158 				    pcie_adv_regs->pcie_adv_bdf,
2159 				    PCIEX_SRC_VALID, DATA_TYPE_BOOLEAN_VALUE,
2160 				    (pcie_adv_regs->pcie_adv_bdf != NULL) ?
2161 				    1 : NULL,
2162 #ifdef DEBUG
2163 				    PCIEX_SUE_HDR0, DATA_TYPE_UINT32,
2164 				    pcie_bdg_regs->pcie_sue_hdr0,
2165 				    PCIEX_SUE_HDR1, DATA_TYPE_UINT32,
2166 				    pcie_bdg_regs->pcie_sue_hdr[0],
2167 				    PCIEX_SUE_HDR2, DATA_TYPE_UINT32,
2168 				    pcie_bdg_regs->pcie_sue_hdr[1],
2169 				    PCIEX_SUE_HDR3, DATA_TYPE_UINT32,
2170 				    pcie_bdg_regs->pcie_sue_hdr[2],
2171 #endif
2172 				    NULL);
2173 			}
2174 			PCI_FM_SEV_INC(pcie_sue_err_tbl[i].flags);
2175 		}
2176 	}
2177 done:
2178 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
2179 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
2180 }
2181 
2182 static void
2183 pci_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p)
2184 {
2185 	int fatal = 0;
2186 	int nonfatal = 0;
2187 	int unknown = 0;
2188 	int ok = 0;
2189 	char buf[FM_MAX_CLASS];
2190 	int i;
2191 
2192 	if (derr->fme_flag == DDI_FM_ERR_UNEXPECTED) {
2193 		/*
2194 		 * Log generic PCI errors.
2195 		 */
2196 		for (i = 0; pci_err_tbl[i].err_class != NULL; i++) {
2197 			if (!(erpt_p->pe_pci_regs->pci_err_status &
2198 			    pci_err_tbl[i].reg_bit) ||
2199 			    !(erpt_p->pe_pci_regs->pci_vflags &
2200 			    PCI_ERR_STATUS_VALID))
2201 				continue;
2202 			/*
2203 			 * Generate an ereport for this error bit.
2204 			 */
2205 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
2206 			    PCI_ERROR_SUBCLASS, pci_err_tbl[i].err_class);
2207 			ddi_fm_ereport_post(dip, buf, derr->fme_ena,
2208 			    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
2209 			    PCI_CONFIG_STATUS, DATA_TYPE_UINT16,
2210 			    erpt_p->pe_pci_regs->pci_err_status,
2211 			    PCI_CONFIG_COMMAND, DATA_TYPE_UINT16,
2212 			    erpt_p->pe_pci_regs->pci_cfg_comm, NULL);
2213 
2214 			/*
2215 			 * The meaning of SERR is different for PCIEX (just
2216 			 * implies a message has been sent) so we don't want to
2217 			 * treat that one as fatal.
2218 			 */
2219 			if ((erpt_p->pe_dflags & PCIEX_DEV) &&
2220 			    pci_err_tbl[i].reg_bit == PCI_STAT_S_SYSERR) {
2221 				unknown++;
2222 			} else {
2223 				PCI_FM_SEV_INC(pci_err_tbl[i].flags);
2224 			}
2225 		}
2226 		if (erpt_p->pe_dflags & PCIEX_DEV) {
2227 			int ret = pcie_error_report(dip, derr, erpt_p);
2228 			PCI_FM_SEV_INC(ret);
2229 		} else if (erpt_p->pe_dflags & PCIX_DEV) {
2230 			if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
2231 				int ret = pcix_bdg_error_report(dip, derr,
2232 				    erpt_p, erpt_p->pe_regs);
2233 				PCI_FM_SEV_INC(ret);
2234 			} else {
2235 				int ret = pcix_error_report(dip, derr, erpt_p);
2236 				PCI_FM_SEV_INC(ret);
2237 			}
2238 		}
2239 	}
2240 
2241 	if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV)) {
2242 		int ret = pci_bdg_error_report(dip, derr, erpt_p);
2243 		PCI_FM_SEV_INC(ret);
2244 	}
2245 
2246 	if (derr->fme_flag == DDI_FM_ERR_UNEXPECTED) {
2247 		pci_fme_bus_specific_t *pci_fme_bsp;
2248 		int ret = DDI_FM_UNKNOWN;
2249 
2250 		pci_fme_bsp = (pci_fme_bus_specific_t *)derr->fme_bus_specific;
2251 		if (pci_fme_bsp->pci_bs_flags & PCI_BS_ADDR_VALID) {
2252 			ret = ndi_fmc_entry_error(dip,
2253 			    pci_fme_bsp->pci_bs_type, derr,
2254 			    (void *)&pci_fme_bsp->pci_bs_addr);
2255 			PCI_FM_SEV_INC(ret);
2256 		}
2257 		/*
2258 		 * If we didn't find the handle using an addr, try using bdf.
2259 		 * Note we don't do this where the bdf is for a
2260 		 * device behind a pciex/pci bridge as the bridge may have
2261 		 * fabricated the bdf.
2262 		 */
2263 		if (ret == DDI_FM_UNKNOWN &&
2264 		    (pci_fme_bsp->pci_bs_flags & PCI_BS_BDF_VALID) &&
2265 		    pci_fme_bsp->pci_bs_bdf == erpt_p->pe_bdf &&
2266 		    (erpt_p->pe_dflags & PCIEX_DEV) &&
2267 		    !(erpt_p->pe_dflags & PCIEX_2PCI_DEV)) {
2268 			ret = ndi_fmc_entry_error_all(dip,
2269 			    pci_fme_bsp->pci_bs_type, derr);
2270 			PCI_FM_SEV_INC(ret);
2271 		}
2272 	}
2273 
2274 	derr->fme_status = (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
2275 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
2276 }
2277 
2278 void
2279 pci_ereport_post(dev_info_t *dip, ddi_fm_error_t *derr, uint16_t *xx_status)
2280 {
2281 	struct i_ddi_fmhdl *fmhdl;
2282 	pci_erpt_t *erpt_p;
2283 	ddi_fm_error_t de;
2284 	pci_fme_bus_specific_t pci_fme_bs;
2285 
2286 	fmhdl = DEVI(dip)->devi_fmhdl;
2287 	if (!DDI_FM_EREPORT_CAP(ddi_fm_capable(dip)) &&
2288 	    !DDI_FM_ERRCB_CAP(ddi_fm_capable(dip))) {
2289 		i_ddi_drv_ereport_post(dip, DVR_EFMCAP, NULL, DDI_NOSLEEP);
2290 		return;
2291 	}
2292 
2293 	/*
2294 	 * copy in the ddi_fm_error_t structure in case it's VER0
2295 	 */
2296 	de.fme_version = derr->fme_version;
2297 	de.fme_status = derr->fme_status;
2298 	de.fme_flag = derr->fme_flag;
2299 	de.fme_ena = derr->fme_ena;
2300 	de.fme_acc_handle = derr->fme_acc_handle;
2301 	de.fme_dma_handle = derr->fme_dma_handle;
2302 	de.fme_bus_specific = derr->fme_bus_specific;
2303 	if (derr->fme_version >= DDI_FME_VER1)
2304 		de.fme_bus_type = derr->fme_bus_type;
2305 	else
2306 		de.fme_bus_type = DDI_FME_BUS_TYPE_DFLT;
2307 	if (de.fme_bus_type == DDI_FME_BUS_TYPE_DFLT) {
2308 		/*
2309 		 * if this is the first pci device we've found convert
2310 		 * fme_bus_specific to DDI_FME_BUS_TYPE_PCI
2311 		 */
2312 		bzero(&pci_fme_bs, sizeof (pci_fme_bs));
2313 		if (de.fme_bus_specific) {
2314 			/*
2315 			 * the cpu passed us an addr - this can be used to look
2316 			 * up an access handle
2317 			 */
2318 			pci_fme_bs.pci_bs_addr = (uintptr_t)de.fme_bus_specific;
2319 			pci_fme_bs.pci_bs_type = ACC_HANDLE;
2320 			pci_fme_bs.pci_bs_flags |= PCI_BS_ADDR_VALID;
2321 		}
2322 		de.fme_bus_specific = (void *)&pci_fme_bs;
2323 		de.fme_bus_type = DDI_FME_BUS_TYPE_PCI;
2324 	}
2325 
2326 	ASSERT(fmhdl);
2327 
2328 	if (de.fme_ena == NULL)
2329 		de.fme_ena = fm_ena_generate(0, FM_ENA_FMT1);
2330 
2331 	erpt_p = (pci_erpt_t *)fmhdl->fh_bus_specific;
2332 	if (erpt_p == NULL)
2333 		return;
2334 
2335 	pci_regs_gather(dip, erpt_p, de.fme_flag);
2336 	pci_error_report(dip, &de, erpt_p);
2337 	pci_regs_clear(erpt_p);
2338 
2339 	derr->fme_status = de.fme_status;
2340 	derr->fme_ena = de.fme_ena;
2341 	derr->fme_acc_handle = de.fme_acc_handle;
2342 	derr->fme_dma_handle = de.fme_dma_handle;
2343 	if (xx_status != NULL)
2344 		*xx_status = erpt_p->pe_pci_regs->pci_err_status;
2345 }
2346 
2347 /*
2348  * private version of walk_devs() that can be used during panic. No
2349  * sleeping or locking required.
2350  */
2351 static int
2352 pci_fm_walk_devs(dev_info_t *dip, int (*f)(dev_info_t *, void *), void *arg)
2353 {
2354 	while (dip) {
2355 		switch ((*f)(dip, arg)) {
2356 		case DDI_WALK_TERMINATE:
2357 			return (DDI_WALK_TERMINATE);
2358 		case DDI_WALK_CONTINUE:
2359 			if (pci_fm_walk_devs(ddi_get_child(dip), f,
2360 			    arg) == DDI_WALK_TERMINATE)
2361 				return (DDI_WALK_TERMINATE);
2362 			break;
2363 		case DDI_WALK_PRUNECHILD:
2364 			break;
2365 		}
2366 		dip = ddi_get_next_sibling(dip);
2367 	}
2368 	return (DDI_WALK_CONTINUE);
2369 }
2370 
2371 /*
2372  * need special version of ddi_fm_ereport_post() as the leaf driver may
2373  * not be hardened.
2374  */
2375 static void
2376 pci_fm_ereport_post(dev_info_t *dip, const char *error_class, uint64_t ena,
2377     uint8_t version, ...)
2378 {
2379 	char *name;
2380 	char device_path[MAXPATHLEN];
2381 	char ddi_error_class[FM_MAX_CLASS];
2382 	nvlist_t *ereport, *detector;
2383 	nv_alloc_t *nva;
2384 	errorq_elem_t *eqep;
2385 	va_list ap;
2386 
2387 	if (panicstr) {
2388 		eqep = errorq_reserve(ereport_errorq);
2389 		if (eqep == NULL)
2390 			return;
2391 		ereport = errorq_elem_nvl(ereport_errorq, eqep);
2392 		nva = errorq_elem_nva(ereport_errorq, eqep);
2393 		detector = fm_nvlist_create(nva);
2394 	} else {
2395 		ereport = fm_nvlist_create(NULL);
2396 		detector = fm_nvlist_create(NULL);
2397 	}
2398 
2399 	(void) ddi_pathname(dip, device_path);
2400 	fm_fmri_dev_set(detector, FM_DEV_SCHEME_VERSION, NULL,
2401 	    device_path, NULL);
2402 	(void) snprintf(ddi_error_class, FM_MAX_CLASS, "%s.%s",
2403 	    DDI_IO_CLASS, error_class);
2404 	fm_ereport_set(ereport, version, ddi_error_class, ena, detector, NULL);
2405 
2406 	va_start(ap, version);
2407 	name = va_arg(ap, char *);
2408 	(void) i_fm_payload_set(ereport, name, ap);
2409 	va_end(ap);
2410 
2411 	if (panicstr) {
2412 		errorq_commit(ereport_errorq, eqep, ERRORQ_SYNC);
2413 	} else {
2414 		(void) fm_ereport_post(ereport, EVCH_TRYHARD);
2415 		fm_nvlist_destroy(ereport, FM_NVA_FREE);
2416 		fm_nvlist_destroy(detector, FM_NVA_FREE);
2417 	}
2418 }
2419 
2420 static int
2421 pci_check_regs(dev_info_t *dip, void *arg)
2422 {
2423 	int reglen;
2424 	int rn;
2425 	int totreg;
2426 	pci_regspec_t *drv_regp;
2427 	pci_target_err_t *tgt_err = (pci_target_err_t *)arg;
2428 
2429 	if (tgt_err->tgt_pci_space == PCI_REG_ADDR_G(PCI_ADDR_CONFIG)) {
2430 		/*
2431 		 * for config space, we need to check if the given address
2432 		 * is a valid config space address for this device - based
2433 		 * on pci_phys_hi of the config space entry in reg property.
2434 		 */
2435 		if (ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
2436 		    "reg", (caddr_t)&drv_regp, &reglen) != DDI_SUCCESS)
2437 			return (DDI_WALK_CONTINUE);
2438 
2439 		totreg = reglen / sizeof (pci_regspec_t);
2440 		for (rn = 0; rn < totreg; rn++) {
2441 			if (tgt_err->tgt_pci_space ==
2442 			    PCI_REG_ADDR_G(drv_regp[rn].pci_phys_hi) &&
2443 			    (tgt_err->tgt_pci_addr & (PCI_REG_BUS_M |
2444 			    PCI_REG_DEV_M | PCI_REG_FUNC_M)) ==
2445 			    (drv_regp[rn].pci_phys_hi & (PCI_REG_BUS_M |
2446 			    PCI_REG_DEV_M | PCI_REG_FUNC_M))) {
2447 				tgt_err->tgt_dip = dip;
2448 				kmem_free(drv_regp, reglen);
2449 				return (DDI_WALK_TERMINATE);
2450 			}
2451 		}
2452 		kmem_free(drv_regp, reglen);
2453 	} else {
2454 		/*
2455 		 * for non config space, need to check reg to look
2456 		 * for any non-relocable mapping, otherwise check
2457 		 * assigned-addresses.
2458 		 */
2459 		if (ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
2460 		    "reg", (caddr_t)&drv_regp, &reglen) != DDI_SUCCESS)
2461 			return (DDI_WALK_CONTINUE);
2462 
2463 		totreg = reglen / sizeof (pci_regspec_t);
2464 		for (rn = 0; rn < totreg; rn++) {
2465 			if ((drv_regp[rn].pci_phys_hi & PCI_RELOCAT_B) &&
2466 			    (tgt_err->tgt_pci_space == TGT_PCI_SPACE_UNKNOWN ||
2467 			    tgt_err->tgt_pci_space ==
2468 			    PCI_REG_ADDR_G(drv_regp[rn].pci_phys_hi)) &&
2469 			    (tgt_err->tgt_pci_addr >=
2470 			    (uint64_t)drv_regp[rn].pci_phys_low +
2471 			    ((uint64_t)drv_regp[rn].pci_phys_mid << 32)) &&
2472 			    (tgt_err->tgt_pci_addr <
2473 			    (uint64_t)drv_regp[rn].pci_phys_low +
2474 			    ((uint64_t)drv_regp[rn].pci_phys_mid << 32) +
2475 			    (uint64_t)drv_regp[rn].pci_size_low +
2476 			    ((uint64_t)drv_regp[rn].pci_size_hi << 32))) {
2477 				tgt_err->tgt_dip = dip;
2478 				kmem_free(drv_regp, reglen);
2479 				return (DDI_WALK_TERMINATE);
2480 			}
2481 		}
2482 		kmem_free(drv_regp, reglen);
2483 
2484 		if (ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
2485 		    "assigned-addresses", (caddr_t)&drv_regp, &reglen) !=
2486 		    DDI_SUCCESS)
2487 			return (DDI_WALK_CONTINUE);
2488 
2489 		totreg = reglen / sizeof (pci_regspec_t);
2490 		for (rn = 0; rn < totreg; rn++) {
2491 			if ((tgt_err->tgt_pci_space == TGT_PCI_SPACE_UNKNOWN ||
2492 			    tgt_err->tgt_pci_space ==
2493 			    PCI_REG_ADDR_G(drv_regp[rn].pci_phys_hi)) &&
2494 			    (tgt_err->tgt_pci_addr >=
2495 			    (uint64_t)drv_regp[rn].pci_phys_low +
2496 			    ((uint64_t)drv_regp[rn].pci_phys_mid << 32)) &&
2497 			    (tgt_err->tgt_pci_addr <
2498 			    (uint64_t)drv_regp[rn].pci_phys_low +
2499 			    ((uint64_t)drv_regp[rn].pci_phys_mid << 32) +
2500 			    (uint64_t)drv_regp[rn].pci_size_low +
2501 			    ((uint64_t)drv_regp[rn].pci_size_hi << 32))) {
2502 				tgt_err->tgt_dip = dip;
2503 				kmem_free(drv_regp, reglen);
2504 				return (DDI_WALK_TERMINATE);
2505 			}
2506 		}
2507 		kmem_free(drv_regp, reglen);
2508 	}
2509 	return (DDI_WALK_CONTINUE);
2510 }
2511 
2512 /*
2513  * impl_fix_ranges - fixes the config space entry of the "ranges"
2514  * property on psycho+ platforms.  (if changing this function please make sure
2515  * to change the pci_fix_ranges function in pcipsy.c)
2516  */
2517 /*ARGSUSED*/
2518 static void
2519 pci_fix_ranges(dev_info_t *dip, pci_ranges_t *pci_ranges, int nrange)
2520 {
2521 #if defined(__sparc)
2522 	char *name = ddi_binding_name(dip);
2523 
2524 	if ((strcmp(name, "pci108e,8000") == 0) ||
2525 	    (strcmp(name, "pci108e,a000") == 0) ||
2526 	    (strcmp(name, "pci108e,a001") == 0)) {
2527 		int i;
2528 		for (i = 0; i < nrange; i++, pci_ranges++)
2529 			if ((pci_ranges->child_high & PCI_REG_ADDR_M) ==
2530 			    PCI_ADDR_CONFIG)
2531 				pci_ranges->parent_low |=
2532 				    pci_ranges->child_high;
2533 	}
2534 #endif
2535 }
2536 
2537 static int
2538 pci_check_ranges(dev_info_t *dip, void *arg)
2539 {
2540 	uint64_t range_parent_begin;
2541 	uint64_t range_parent_size;
2542 	uint64_t range_parent_end;
2543 	uint32_t space_type;
2544 	uint32_t bus_num;
2545 	uint32_t range_offset;
2546 	pci_ranges_t *pci_ranges, *rangep;
2547 	pci_bus_range_t *pci_bus_rangep;
2548 	int pci_ranges_length;
2549 	int nrange;
2550 	pci_target_err_t *tgt_err = (pci_target_err_t *)arg;
2551 	int i, size;
2552 	if (strcmp(ddi_node_name(dip), "pci") != 0 &&
2553 	    strcmp(ddi_node_name(dip), "pciex") != 0)
2554 		return (DDI_WALK_CONTINUE);
2555 
2556 	/*
2557 	 * Get the ranges property. Note we only look at the top level pci
2558 	 * node (hostbridge) which has a ranges property of type pci_ranges_t
2559 	 * not at pci-pci bridges.
2560 	 */
2561 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "ranges",
2562 	    (caddr_t)&pci_ranges, &pci_ranges_length) != DDI_SUCCESS) {
2563 		/*
2564 		 * no ranges property - no translation needed
2565 		 */
2566 		tgt_err->tgt_pci_addr = tgt_err->tgt_err_addr;
2567 		tgt_err->tgt_pci_space = TGT_PCI_SPACE_UNKNOWN;
2568 		if (panicstr)
2569 			(void) pci_fm_walk_devs(ddi_get_child(dip),
2570 			    pci_check_regs, (void *)tgt_err);
2571 		else {
2572 			int circ = 0;
2573 			ndi_devi_enter(dip, &circ);
2574 			ddi_walk_devs(ddi_get_child(dip), pci_check_regs,
2575 			    (void *)tgt_err);
2576 			ndi_devi_exit(dip, circ);
2577 		}
2578 		if (tgt_err->tgt_dip != NULL)
2579 			return (DDI_WALK_TERMINATE);
2580 		return (DDI_WALK_PRUNECHILD);
2581 	}
2582 	nrange = pci_ranges_length / sizeof (pci_ranges_t);
2583 	rangep = pci_ranges;
2584 
2585 	/* Need to fix the pci ranges property for psycho based systems */
2586 	pci_fix_ranges(dip, pci_ranges, nrange);
2587 
2588 	for (i = 0; i < nrange; i++, rangep++) {
2589 		range_parent_begin = ((uint64_t)rangep->parent_high << 32) +
2590 		    rangep->parent_low;
2591 		range_parent_size = ((uint64_t)rangep->size_high << 32) +
2592 		    rangep->size_low;
2593 		range_parent_end = range_parent_begin + range_parent_size - 1;
2594 
2595 		if ((tgt_err->tgt_err_addr < range_parent_begin) ||
2596 		    (tgt_err->tgt_err_addr > range_parent_end)) {
2597 			/* Not in range */
2598 			continue;
2599 		}
2600 		space_type = PCI_REG_ADDR_G(rangep->child_high);
2601 		if (space_type == PCI_REG_ADDR_G(PCI_ADDR_CONFIG)) {
2602 			/* Config space address - check bus range */
2603 			range_offset = tgt_err->tgt_err_addr -
2604 			    range_parent_begin;
2605 			bus_num = PCI_REG_BUS_G(range_offset);
2606 			if (ddi_getlongprop(DDI_DEV_T_ANY, dip,
2607 			    DDI_PROP_DONTPASS, "bus-range",
2608 			    (caddr_t)&pci_bus_rangep, &size) != DDI_SUCCESS) {
2609 				continue;
2610 			}
2611 			if ((bus_num < pci_bus_rangep->lo) ||
2612 			    (bus_num > pci_bus_rangep->hi)) {
2613 				/*
2614 				 * Bus number not appropriate for this
2615 				 * pci nexus.
2616 				 */
2617 				kmem_free(pci_bus_rangep, size);
2618 				continue;
2619 			}
2620 			kmem_free(pci_bus_rangep, size);
2621 		}
2622 
2623 		/* We have a match if we get here - compute pci address */
2624 		tgt_err->tgt_pci_addr = tgt_err->tgt_err_addr -
2625 		    range_parent_begin;
2626 		tgt_err->tgt_pci_addr += (((uint64_t)rangep->child_mid << 32) +
2627 		    rangep->child_low);
2628 		tgt_err->tgt_pci_space = space_type;
2629 		if (panicstr)
2630 			(void) pci_fm_walk_devs(ddi_get_child(dip),
2631 			    pci_check_regs, (void *)tgt_err);
2632 		else {
2633 			int circ = 0;
2634 			ndi_devi_enter(dip, &circ);
2635 			ddi_walk_devs(ddi_get_child(dip), pci_check_regs,
2636 			    (void *)tgt_err);
2637 			ndi_devi_exit(dip, circ);
2638 		}
2639 		if (tgt_err->tgt_dip != NULL) {
2640 			kmem_free(pci_ranges, pci_ranges_length);
2641 			return (DDI_WALK_TERMINATE);
2642 		}
2643 	}
2644 	kmem_free(pci_ranges, pci_ranges_length);
2645 	return (DDI_WALK_PRUNECHILD);
2646 }
2647 
2648 /*
2649  * Function used to drain pci_target_queue, either during panic or after softint
2650  * is generated, to generate target device ereports based on captured physical
2651  * addresses
2652  */
2653 /*ARGSUSED*/
2654 static void
2655 pci_target_drain(void *private_p, pci_target_err_t *tgt_err)
2656 {
2657 	char buf[FM_MAX_CLASS];
2658 
2659 	/*
2660 	 * The following assumes that all pci_pci bridge devices
2661 	 * are configured as transparant. Find the top-level pci
2662 	 * nexus which has tgt_err_addr in one of its ranges, converting this
2663 	 * to a pci address in the process. Then starting at this node do
2664 	 * another tree walk to find a device with the pci address we've
2665 	 * found within range of one of it's assigned-addresses properties.
2666 	 */
2667 	tgt_err->tgt_dip = NULL;
2668 	if (panicstr)
2669 		(void) pci_fm_walk_devs(ddi_root_node(), pci_check_ranges,
2670 		    (void *)tgt_err);
2671 	else
2672 		ddi_walk_devs(ddi_root_node(), pci_check_ranges,
2673 		    (void *)tgt_err);
2674 	if (tgt_err->tgt_dip == NULL)
2675 		return;
2676 
2677 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", tgt_err->tgt_bridge_type,
2678 	    tgt_err->tgt_err_class);
2679 	pci_fm_ereport_post(tgt_err->tgt_dip, buf, tgt_err->tgt_err_ena, 0,
2680 	    PCI_PA, DATA_TYPE_UINT64, tgt_err->tgt_err_addr, NULL);
2681 }
2682 
2683 void
2684 pci_target_enqueue(uint64_t ena, char *class, char *bridge_type, uint64_t addr)
2685 {
2686 	pci_target_err_t tgt_err;
2687 
2688 	tgt_err.tgt_err_ena = ena;
2689 	tgt_err.tgt_err_class = class;
2690 	tgt_err.tgt_bridge_type = bridge_type;
2691 	tgt_err.tgt_err_addr = addr;
2692 	errorq_dispatch(pci_target_queue, (void *)&tgt_err,
2693 	    sizeof (pci_target_err_t), ERRORQ_ASYNC);
2694 }
2695 
2696 void
2697 pci_targetq_init(void)
2698 {
2699 	/*
2700 	 * PCI target errorq, to schedule async handling of generation of
2701 	 * target device ereports based on captured physical address.
2702 	 * The errorq is created here but destroyed when _fini is called
2703 	 * for the pci module.
2704 	 */
2705 	if (pci_target_queue == NULL) {
2706 		pci_target_queue = errorq_create("pci_target_queue",
2707 		    (errorq_func_t)pci_target_drain, (void *)NULL,
2708 		    TARGET_MAX_ERRS, sizeof (pci_target_err_t), FM_ERR_PIL,
2709 		    ERRORQ_VITAL);
2710 		if (pci_target_queue == NULL)
2711 			panic("failed to create required system error queue");
2712 	}
2713 }
2714