xref: /illumos-gate/usr/src/uts/common/os/pcifm.c (revision 5c1d0199d69216ebefa9ed86940778f6d215a97f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/sunndi.h>
31 #include <sys/sysmacros.h>
32 #include <sys/ddifm_impl.h>
33 #include <sys/fm/util.h>
34 #include <sys/fm/protocol.h>
35 #include <sys/fm/io/pci.h>
36 #include <sys/fm/io/ddi.h>
37 #include <sys/pci.h>
38 #include <sys/pcie.h>
39 #include <sys/pci_impl.h>
40 #include <sys/epm.h>
41 #include <sys/pcifm.h>
42 
43 #define	PCIX_ECC_VER_CHECK(x)	(((x) == PCI_PCIX_VER_1) ||\
44 				((x) == PCI_PCIX_VER_2))
45 
46 /*
47  * Expected PCI Express error mask values
48  */
49 uint32_t pcie_expected_ce_mask = 0x0;
50 uint32_t pcie_expected_ue_mask = PCIE_AER_UCE_UC;
51 #if defined(__sparc)
52 uint32_t pcie_expected_sue_mask = 0x0;
53 #else
54 uint32_t pcie_expected_sue_mask = PCIE_AER_SUCE_RCVD_MA;
55 #endif
56 uint32_t pcie_aer_uce_log_bits = PCIE_AER_UCE_LOG_BITS;
57 #if defined(__sparc)
58 uint32_t pcie_aer_suce_log_bits = PCIE_AER_SUCE_LOG_BITS;
59 #else
60 uint32_t pcie_aer_suce_log_bits = \
61 	    PCIE_AER_SUCE_LOG_BITS & ~PCIE_AER_SUCE_RCVD_MA;
62 #endif
63 
64 errorq_t *pci_target_queue = NULL;
65 
66 pci_fm_err_t pci_err_tbl[] = {
67 	PCI_DET_PERR,	PCI_STAT_PERROR,	NULL,		DDI_FM_UNKNOWN,
68 	PCI_MDPE,	PCI_STAT_S_PERROR,	PCI_TARG_MDPE,	DDI_FM_UNKNOWN,
69 	PCI_SIG_SERR,	PCI_STAT_S_SYSERR,	NULL,		DDI_FM_FATAL,
70 	PCI_MA,		PCI_STAT_R_MAST_AB,	PCI_TARG_MA,	DDI_FM_UNKNOWN,
71 	PCI_REC_TA,	PCI_STAT_R_TARG_AB,	PCI_TARG_REC_TA, DDI_FM_UNKNOWN,
72 	PCI_SIG_TA,	PCI_STAT_S_TARG_AB,	NULL,		DDI_FM_UNKNOWN,
73 	NULL, NULL, NULL, NULL,
74 };
75 
76 pci_fm_err_t pci_bdg_err_tbl[] = {
77 	PCI_DET_PERR,	PCI_STAT_PERROR,	NULL,		DDI_FM_UNKNOWN,
78 	PCI_MDPE,	PCI_STAT_S_PERROR,	PCI_TARG_MDPE,	DDI_FM_UNKNOWN,
79 	PCI_REC_SERR,	PCI_STAT_S_SYSERR,	NULL,		DDI_FM_UNKNOWN,
80 #if defined(__sparc)
81 	PCI_MA,		PCI_STAT_R_MAST_AB,	PCI_TARG_MA,	DDI_FM_UNKNOWN,
82 #endif
83 	PCI_REC_TA,	PCI_STAT_R_TARG_AB,	PCI_TARG_REC_TA, DDI_FM_UNKNOWN,
84 	PCI_SIG_TA,	PCI_STAT_S_TARG_AB,	NULL,		DDI_FM_UNKNOWN,
85 	NULL, NULL, NULL, NULL,
86 };
87 
88 static pci_fm_err_t pciex_ce_err_tbl[] = {
89 	PCIEX_RE,	PCIE_AER_CE_RECEIVER_ERR,	NULL,	DDI_FM_OK,
90 	PCIEX_RNR,	PCIE_AER_CE_REPLAY_ROLLOVER,	NULL,	DDI_FM_OK,
91 	PCIEX_RTO,	PCIE_AER_CE_REPLAY_TO,		NULL,	DDI_FM_OK,
92 	PCIEX_BDP,	PCIE_AER_CE_BAD_DLLP,		NULL,	DDI_FM_OK,
93 	PCIEX_BTP,	PCIE_AER_CE_BAD_TLP,		NULL,	DDI_FM_OK,
94 	PCIEX_ANFE,	PCIE_AER_CE_AD_NFE,		NULL,	DDI_FM_OK,
95 	NULL, NULL, NULL, NULL,
96 };
97 
98 static pci_fm_err_t pciex_ue_err_tbl[] = {
99 	PCIEX_TE,	PCIE_AER_UCE_TRAINING,		NULL,	DDI_FM_FATAL,
100 	PCIEX_DLP,	PCIE_AER_UCE_DLP,		NULL,	DDI_FM_FATAL,
101 	PCIEX_SD,	PCIE_AER_UCE_SD,		NULL,   DDI_FM_FATAL,
102 	PCIEX_ROF,	PCIE_AER_UCE_RO,		NULL,	DDI_FM_FATAL,
103 	PCIEX_FCP,	PCIE_AER_UCE_FCP,		NULL,	DDI_FM_FATAL,
104 	PCIEX_MFP,	PCIE_AER_UCE_MTLP,		NULL,	DDI_FM_FATAL,
105 	PCIEX_CTO,	PCIE_AER_UCE_TO,		NULL,	DDI_FM_UNKNOWN,
106 	PCIEX_UC,	PCIE_AER_UCE_UC,		NULL,	DDI_FM_OK,
107 	PCIEX_ECRC,	PCIE_AER_UCE_ECRC,		NULL,	DDI_FM_UNKNOWN,
108 	PCIEX_CA,	PCIE_AER_UCE_CA,		NULL,	DDI_FM_UNKNOWN,
109 	PCIEX_UR,	PCIE_AER_UCE_UR,		NULL,	DDI_FM_UNKNOWN,
110 	PCIEX_POIS,	PCIE_AER_UCE_PTLP,		NULL,	DDI_FM_UNKNOWN,
111 	NULL, NULL, NULL, NULL,
112 };
113 
114 static pci_fm_err_t pcie_sue_err_tbl[] = {
115 	PCIEX_S_TA_SC,	PCIE_AER_SUCE_TA_ON_SC,		NULL,	DDI_FM_UNKNOWN,
116 	PCIEX_S_MA_SC,	PCIE_AER_SUCE_MA_ON_SC,		NULL,	DDI_FM_UNKNOWN,
117 	PCIEX_S_RTA,	PCIE_AER_SUCE_RCVD_TA,		NULL,	DDI_FM_UNKNOWN,
118 #if defined(__sparc)
119 	PCIEX_S_RMA,	PCIE_AER_SUCE_RCVD_MA,		NULL,	DDI_FM_UNKNOWN,
120 #endif
121 	PCIEX_S_USC,	PCIE_AER_SUCE_USC_ERR,		NULL,	DDI_FM_UNKNOWN,
122 	PCIEX_S_USCMD,	PCIE_AER_SUCE_USC_MSG_DATA_ERR,	NULL,	DDI_FM_FATAL,
123 	PCIEX_S_UDE,	PCIE_AER_SUCE_UC_DATA_ERR,	NULL,	DDI_FM_UNKNOWN,
124 	PCIEX_S_UAT,	PCIE_AER_SUCE_UC_ATTR_ERR,	NULL,	DDI_FM_FATAL,
125 	PCIEX_S_UADR,	PCIE_AER_SUCE_UC_ADDR_ERR,	NULL,	DDI_FM_FATAL,
126 	PCIEX_S_TEX,	PCIE_AER_SUCE_TIMER_EXPIRED,	NULL,	DDI_FM_FATAL,
127 	PCIEX_S_PERR,	PCIE_AER_SUCE_PERR_ASSERT,	NULL,	DDI_FM_UNKNOWN,
128 	PCIEX_S_SERR,	PCIE_AER_SUCE_SERR_ASSERT,	NULL,	DDI_FM_FATAL,
129 	PCIEX_INTERR,	PCIE_AER_SUCE_INTERNAL_ERR,	NULL,	DDI_FM_FATAL,
130 	NULL, NULL, NULL, NULL,
131 };
132 
133 static pci_fm_err_t pcix_err_tbl[] = {
134 	PCIX_SPL_DIS,		PCI_PCIX_SPL_DSCD,	NULL,	DDI_FM_UNKNOWN,
135 	PCIX_UNEX_SPL,		PCI_PCIX_UNEX_SPL,	NULL,	DDI_FM_UNKNOWN,
136 	PCIX_RX_SPL_MSG,	PCI_PCIX_RX_SPL_MSG,	NULL,   DDI_FM_UNKNOWN,
137 	NULL, NULL, NULL, NULL,
138 };
139 
140 static pci_fm_err_t pcix_sec_err_tbl[] = {
141 	PCIX_SPL_DIS,		PCI_PCIX_BSS_SPL_DSCD,	NULL,	DDI_FM_UNKNOWN,
142 	PCIX_UNEX_SPL,		PCI_PCIX_BSS_UNEX_SPL,	NULL,	DDI_FM_UNKNOWN,
143 	PCIX_BSS_SPL_OR,	PCI_PCIX_BSS_SPL_OR,	NULL,	DDI_FM_OK,
144 	PCIX_BSS_SPL_DLY,	PCI_PCIX_BSS_SPL_DLY,	NULL,	DDI_FM_OK,
145 	NULL, NULL, NULL, NULL,
146 };
147 
148 static pci_fm_err_t pciex_nadv_err_tbl[] = {
149 	PCIEX_UR,	PCIE_DEVSTS_UR_DETECTED,	NULL,	DDI_FM_UNKNOWN,
150 	PCIEX_FAT,	PCIE_DEVSTS_FE_DETECTED,	NULL,	DDI_FM_FATAL,
151 	PCIEX_NONFAT,	PCIE_DEVSTS_NFE_DETECTED,	NULL,	DDI_FM_UNKNOWN,
152 	PCIEX_CORR,	PCIE_DEVSTS_CE_DETECTED,	NULL,	DDI_FM_OK,
153 	NULL, NULL, NULL, NULL,
154 };
155 
156 static int
157 pci_config_check(ddi_acc_handle_t handle, int fme_flag)
158 {
159 	ddi_acc_hdl_t *hp = impl_acc_hdl_get(handle);
160 	ddi_fm_error_t de;
161 
162 	if (!(DDI_FM_ACC_ERR_CAP(ddi_fm_capable(hp->ah_dip))))
163 		return (DDI_FM_OK);
164 
165 	de.fme_version = DDI_FME_VERSION;
166 
167 	ddi_fm_acc_err_get(handle, &de, de.fme_version);
168 	if (de.fme_status != DDI_FM_OK) {
169 		if (fme_flag == DDI_FM_ERR_UNEXPECTED) {
170 			char buf[FM_MAX_CLASS];
171 
172 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
173 			    PCI_ERROR_SUBCLASS, PCI_NR);
174 			ddi_fm_ereport_post(hp->ah_dip, buf, de.fme_ena,
175 			    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, NULL);
176 		}
177 		ddi_fm_acc_err_clear(handle, de.fme_version);
178 	}
179 	return (de.fme_status);
180 }
181 
182 static void
183 pcix_ecc_regs_gather(pci_erpt_t *erpt_p, pcix_ecc_regs_t *pcix_ecc_regs,
184     uint8_t pcix_cap_ptr, int fme_flag)
185 {
186 	int bdg = erpt_p->pe_dflags & PCI_BRIDGE_DEV;
187 
188 	pcix_ecc_regs->pcix_ecc_ctlstat = pci_config_get32(erpt_p->pe_hdl,
189 	    (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_STATUS :
190 	    PCI_PCIX_ECC_STATUS)));
191 	if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
192 		pcix_ecc_regs->pcix_ecc_vflags |= PCIX_ERR_ECC_STS_VALID;
193 	else
194 		return;
195 	pcix_ecc_regs->pcix_ecc_fstaddr = pci_config_get32(erpt_p->pe_hdl,
196 	    (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_FST_AD :
197 	    PCI_PCIX_ECC_FST_AD)));
198 	pcix_ecc_regs->pcix_ecc_secaddr = pci_config_get32(erpt_p->pe_hdl,
199 	    (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_SEC_AD :
200 	    PCI_PCIX_ECC_SEC_AD)));
201 	pcix_ecc_regs->pcix_ecc_attr = pci_config_get32((
202 	    ddi_acc_handle_t)erpt_p->pe_hdl,
203 	    (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_ATTR : PCI_PCIX_ECC_ATTR)));
204 }
205 
206 static void
207 pcix_regs_gather(pci_erpt_t *erpt_p, void *pe_regs, int fme_flag)
208 {
209 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
210 		pcix_bdg_error_regs_t *pcix_bdg_regs =
211 		    (pcix_bdg_error_regs_t *)pe_regs;
212 		uint8_t pcix_bdg_cap_ptr;
213 		int i;
214 
215 		pcix_bdg_cap_ptr = pcix_bdg_regs->pcix_bdg_cap_ptr;
216 		pcix_bdg_regs->pcix_bdg_sec_stat = pci_config_get16(
217 		    erpt_p->pe_hdl, (pcix_bdg_cap_ptr + PCI_PCIX_SEC_STATUS));
218 		if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
219 			pcix_bdg_regs->pcix_bdg_vflags |=
220 			    PCIX_BDG_SEC_STATUS_VALID;
221 		else
222 			return;
223 		pcix_bdg_regs->pcix_bdg_stat = pci_config_get32(erpt_p->pe_hdl,
224 		    (pcix_bdg_cap_ptr + PCI_PCIX_BDG_STATUS));
225 		if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
226 			pcix_bdg_regs->pcix_bdg_vflags |= PCIX_BDG_STATUS_VALID;
227 		else
228 			return;
229 		if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) {
230 			pcix_ecc_regs_t *pcix_bdg_ecc_regs;
231 			/*
232 			 * PCI Express to PCI-X bridges only implement the
233 			 * secondary side of the PCI-X ECC registers, bit one is
234 			 * read-only so we make sure we do not write to it.
235 			 */
236 			if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) {
237 				pcix_bdg_ecc_regs =
238 				    pcix_bdg_regs->pcix_bdg_ecc_regs[1];
239 				pcix_ecc_regs_gather(erpt_p, pcix_bdg_ecc_regs,
240 				    pcix_bdg_cap_ptr, fme_flag);
241 			} else {
242 				for (i = 0; i < 2; i++) {
243 					pcix_bdg_ecc_regs =
244 					    pcix_bdg_regs->pcix_bdg_ecc_regs[i];
245 					pci_config_put32(erpt_p->pe_hdl,
246 					    (pcix_bdg_cap_ptr +
247 					    PCI_PCIX_BDG_ECC_STATUS), i);
248 					pcix_ecc_regs_gather(erpt_p,
249 					    pcix_bdg_ecc_regs,
250 					    pcix_bdg_cap_ptr, fme_flag);
251 				}
252 			}
253 		}
254 	} else {
255 		pcix_error_regs_t *pcix_regs = (pcix_error_regs_t *)pe_regs;
256 		uint8_t pcix_cap_ptr;
257 
258 		pcix_cap_ptr = pcix_regs->pcix_cap_ptr;
259 
260 		pcix_regs->pcix_command = pci_config_get16(erpt_p->pe_hdl,
261 		    (pcix_cap_ptr + PCI_PCIX_COMMAND));
262 		pcix_regs->pcix_status = pci_config_get32(erpt_p->pe_hdl,
263 		    (pcix_cap_ptr + PCI_PCIX_STATUS));
264 		if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
265 			pcix_regs->pcix_vflags |= PCIX_ERR_STATUS_VALID;
266 		else
267 			return;
268 		if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) {
269 			pcix_ecc_regs_t *pcix_ecc_regs =
270 			    pcix_regs->pcix_ecc_regs;
271 
272 			pcix_ecc_regs_gather(erpt_p, pcix_ecc_regs,
273 			    pcix_cap_ptr, fme_flag);
274 		}
275 	}
276 }
277 
278 static void
279 pcie_regs_gather(pci_erpt_t *erpt_p, int fme_flag)
280 {
281 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
282 	uint8_t pcie_cap_ptr;
283 	pcie_adv_error_regs_t *pcie_adv_regs;
284 	uint16_t pcie_ecap_ptr;
285 
286 	pcie_cap_ptr = pcie_regs->pcie_cap_ptr;
287 
288 	pcie_regs->pcie_err_status = pci_config_get16(erpt_p->pe_hdl,
289 	    pcie_cap_ptr + PCIE_DEVSTS);
290 	if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
291 		pcie_regs->pcie_vflags |= PCIE_ERR_STATUS_VALID;
292 	else
293 		return;
294 
295 	pcie_regs->pcie_err_ctl = pci_config_get16(erpt_p->pe_hdl,
296 	    (pcie_cap_ptr + PCIE_DEVCTL));
297 	pcie_regs->pcie_dev_cap = pci_config_get16(erpt_p->pe_hdl,
298 	    (pcie_cap_ptr + PCIE_DEVCAP));
299 
300 	if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV) && (erpt_p->pe_dflags &
301 	    PCIX_DEV))
302 		pcix_regs_gather(erpt_p, pcie_regs->pcix_bdg_regs, fme_flag);
303 
304 	if (erpt_p->pe_dflags & PCIEX_RC_DEV) {
305 		pcie_rc_error_regs_t *pcie_rc_regs = pcie_regs->pcie_rc_regs;
306 
307 		pcie_rc_regs->pcie_rc_status = pci_config_get32(erpt_p->pe_hdl,
308 		    (pcie_cap_ptr + PCIE_ROOTSTS));
309 		pcie_rc_regs->pcie_rc_ctl = pci_config_get16(erpt_p->pe_hdl,
310 		    (pcie_cap_ptr + PCIE_ROOTCTL));
311 	}
312 
313 	if (!(erpt_p->pe_dflags & PCIEX_ADV_DEV))
314 		return;
315 
316 	pcie_adv_regs = pcie_regs->pcie_adv_regs;
317 
318 	pcie_ecap_ptr = pcie_adv_regs->pcie_adv_cap_ptr;
319 
320 	pcie_adv_regs->pcie_ue_status = pci_config_get32(erpt_p->pe_hdl,
321 	    pcie_ecap_ptr + PCIE_AER_UCE_STS);
322 	if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
323 		pcie_adv_regs->pcie_adv_vflags |= PCIE_UE_STATUS_VALID;
324 
325 	pcie_adv_regs->pcie_ue_mask = pci_config_get32(erpt_p->pe_hdl,
326 	    pcie_ecap_ptr + PCIE_AER_UCE_MASK);
327 	pcie_adv_regs->pcie_ue_sev = pci_config_get32(erpt_p->pe_hdl,
328 	    pcie_ecap_ptr + PCIE_AER_UCE_SERV);
329 	pcie_adv_regs->pcie_adv_ctl = pci_config_get32(erpt_p->pe_hdl,
330 	    pcie_ecap_ptr + PCIE_AER_CTL);
331 	pcie_adv_regs->pcie_ue_hdr0 = pci_config_get32(erpt_p->pe_hdl,
332 	    pcie_ecap_ptr + PCIE_AER_HDR_LOG);
333 	if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK) {
334 		int i;
335 		pcie_adv_regs->pcie_adv_vflags |= PCIE_UE_HDR_VALID;
336 
337 		for (i = 0; i < 3; i++) {
338 			pcie_adv_regs->pcie_ue_hdr[i] = pci_config_get32(
339 			    erpt_p->pe_hdl, pcie_ecap_ptr + PCIE_AER_HDR_LOG +
340 			    (4 * (i + 1)));
341 		}
342 	}
343 
344 	pcie_adv_regs->pcie_ce_status = pci_config_get32(erpt_p->pe_hdl,
345 	    pcie_ecap_ptr + PCIE_AER_CE_STS);
346 	if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
347 		pcie_adv_regs->pcie_adv_vflags |= PCIE_CE_STATUS_VALID;
348 
349 	pcie_adv_regs->pcie_ce_mask = pci_config_get32(erpt_p->pe_hdl,
350 	    pcie_ecap_ptr + PCIE_AER_CE_MASK);
351 
352 	/*
353 	 * If pci express to pci bridge then grab the bridge
354 	 * error registers.
355 	 */
356 	if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) {
357 		pcie_adv_bdg_error_regs_t *pcie_bdg_regs =
358 		    pcie_adv_regs->pcie_adv_bdg_regs;
359 
360 		pcie_bdg_regs->pcie_sue_status =
361 		    pci_config_get32(erpt_p->pe_hdl,
362 		    pcie_ecap_ptr + PCIE_AER_SUCE_STS);
363 		pcie_bdg_regs->pcie_sue_mask =
364 		    pci_config_get32(erpt_p->pe_hdl,
365 		    pcie_ecap_ptr + PCIE_AER_SUCE_MASK);
366 		if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
367 			pcie_adv_regs->pcie_adv_vflags |= PCIE_SUE_STATUS_VALID;
368 		pcie_bdg_regs->pcie_sue_hdr0 = pci_config_get32(erpt_p->pe_hdl,
369 		    (pcie_ecap_ptr + PCIE_AER_SHDR_LOG));
370 
371 		if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK) {
372 			int i;
373 
374 			pcie_adv_regs->pcie_adv_vflags |= PCIE_SUE_HDR_VALID;
375 
376 			for (i = 0; i < 3; i++) {
377 				pcie_bdg_regs->pcie_sue_hdr[i] =
378 				    pci_config_get32(erpt_p->pe_hdl,
379 					pcie_ecap_ptr + PCIE_AER_SHDR_LOG +
380 					(4 * (i + 1)));
381 			}
382 		}
383 	}
384 	/*
385 	 * If PCI Express root complex then grab the root complex
386 	 * error registers.
387 	 */
388 	if (erpt_p->pe_dflags & PCIEX_RC_DEV) {
389 		pcie_adv_rc_error_regs_t *pcie_rc_regs =
390 		    pcie_adv_regs->pcie_adv_rc_regs;
391 
392 		pcie_rc_regs->pcie_rc_err_cmd = pci_config_get32(erpt_p->pe_hdl,
393 		    (pcie_ecap_ptr + PCIE_AER_RE_CMD));
394 		pcie_rc_regs->pcie_rc_err_status =
395 		    pci_config_get32(erpt_p->pe_hdl,
396 			(pcie_ecap_ptr + PCIE_AER_RE_STS));
397 		if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
398 			pcie_adv_regs->pcie_adv_vflags |=
399 			    PCIE_RC_ERR_STATUS_VALID;
400 		pcie_rc_regs->pcie_rc_ce_src_id =
401 		    pci_config_get16(erpt_p->pe_hdl,
402 			(pcie_ecap_ptr + PCIE_AER_CE_SRC_ID));
403 		pcie_rc_regs->pcie_rc_ue_src_id =
404 		    pci_config_get16(erpt_p->pe_hdl,
405 			(pcie_ecap_ptr + PCIE_AER_ERR_SRC_ID));
406 		if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
407 			pcie_adv_regs->pcie_adv_vflags |= PCIE_SRC_ID_VALID;
408 	}
409 }
410 
411 /*ARGSUSED*/
412 static void
413 pci_regs_gather(dev_info_t *dip, pci_erpt_t *erpt_p, int fme_flag)
414 {
415 	pci_error_regs_t *pci_regs = erpt_p->pe_pci_regs;
416 
417 	/*
418 	 * Start by reading all the error registers that are available for
419 	 * pci and pci express and for leaf devices and bridges/switches
420 	 */
421 	pci_regs->pci_err_status = pci_config_get16(erpt_p->pe_hdl,
422 	    PCI_CONF_STAT);
423 	if (pci_config_check(erpt_p->pe_hdl, fme_flag) != DDI_FM_OK)
424 		return;
425 	pci_regs->pci_vflags |= PCI_ERR_STATUS_VALID;
426 	pci_regs->pci_cfg_comm = pci_config_get16(erpt_p->pe_hdl,
427 	    PCI_CONF_COMM);
428 	if (pci_config_check(erpt_p->pe_hdl, fme_flag) != DDI_FM_OK)
429 		return;
430 
431 	/*
432 	 * If pci-pci bridge grab PCI bridge specific error registers.
433 	 */
434 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
435 		pci_regs->pci_bdg_regs->pci_bdg_sec_stat =
436 		    pci_config_get16(erpt_p->pe_hdl, PCI_BCNF_SEC_STATUS);
437 		if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
438 			pci_regs->pci_bdg_regs->pci_bdg_vflags |=
439 			    PCI_BDG_SEC_STAT_VALID;
440 		pci_regs->pci_bdg_regs->pci_bdg_ctrl =
441 		    pci_config_get16(erpt_p->pe_hdl, PCI_BCNF_BCNTRL);
442 		if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
443 			pci_regs->pci_bdg_regs->pci_bdg_vflags |=
444 			    PCI_BDG_CTRL_VALID;
445 	}
446 
447 	/*
448 	 * If pci express device grab pci express error registers and
449 	 * check for advanced error reporting features and grab them if
450 	 * available.
451 	 */
452 	if (erpt_p->pe_dflags & PCIEX_DEV)
453 		pcie_regs_gather(erpt_p, fme_flag);
454 	else if (erpt_p->pe_dflags & PCIX_DEV)
455 		pcix_regs_gather(erpt_p, erpt_p->pe_regs, fme_flag);
456 
457 }
458 
459 static void
460 pcix_regs_clear(pci_erpt_t *erpt_p, void *pe_regs)
461 {
462 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
463 		pcix_bdg_error_regs_t *pcix_bdg_regs =
464 		    (pcix_bdg_error_regs_t *)pe_regs;
465 		uint8_t pcix_bdg_cap_ptr;
466 		int i;
467 
468 		pcix_bdg_cap_ptr = pcix_bdg_regs->pcix_bdg_cap_ptr;
469 
470 		if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_SEC_STATUS_VALID)
471 			pci_config_put16(erpt_p->pe_hdl,
472 			    (pcix_bdg_cap_ptr + PCI_PCIX_SEC_STATUS),
473 			    pcix_bdg_regs->pcix_bdg_sec_stat);
474 
475 		if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_STATUS_VALID)
476 			pci_config_put32(erpt_p->pe_hdl,
477 			    (pcix_bdg_cap_ptr + PCI_PCIX_BDG_STATUS),
478 			    pcix_bdg_regs->pcix_bdg_stat);
479 
480 		pcix_bdg_regs->pcix_bdg_vflags = 0x0;
481 
482 		if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) {
483 			pcix_ecc_regs_t *pcix_bdg_ecc_regs;
484 			/*
485 			 * PCI Express to PCI-X bridges only implement the
486 			 * secondary side of the PCI-X ECC registers, bit one is
487 			 * read-only so we make sure we do not write to it.
488 			 */
489 			if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) {
490 				pcix_bdg_ecc_regs =
491 				    pcix_bdg_regs->pcix_bdg_ecc_regs[1];
492 
493 				if (pcix_bdg_ecc_regs->pcix_ecc_vflags &
494 				    PCIX_ERR_ECC_STS_VALID) {
495 
496 					pci_config_put32(erpt_p->pe_hdl,
497 					    (pcix_bdg_cap_ptr +
498 					    PCI_PCIX_BDG_ECC_STATUS),
499 					    pcix_bdg_ecc_regs->
500 					    pcix_ecc_ctlstat);
501 				}
502 				pcix_bdg_ecc_regs->pcix_ecc_vflags = 0x0;
503 			} else {
504 				for (i = 0; i < 2; i++) {
505 					pcix_bdg_ecc_regs =
506 					    pcix_bdg_regs->pcix_bdg_ecc_regs[i];
507 
508 
509 					if (pcix_bdg_ecc_regs->pcix_ecc_vflags &
510 					    PCIX_ERR_ECC_STS_VALID) {
511 						pci_config_put32(erpt_p->pe_hdl,
512 						    (pcix_bdg_cap_ptr +
513 						    PCI_PCIX_BDG_ECC_STATUS),
514 						    i);
515 
516 						pci_config_put32(erpt_p->pe_hdl,
517 						    (pcix_bdg_cap_ptr +
518 						    PCI_PCIX_BDG_ECC_STATUS),
519 						    pcix_bdg_ecc_regs->
520 						    pcix_ecc_ctlstat);
521 					}
522 					pcix_bdg_ecc_regs->pcix_ecc_vflags =
523 					    0x0;
524 				}
525 			}
526 		}
527 	} else {
528 		pcix_error_regs_t *pcix_regs = (pcix_error_regs_t *)pe_regs;
529 		uint8_t pcix_cap_ptr;
530 
531 		pcix_cap_ptr = pcix_regs->pcix_cap_ptr;
532 
533 		if (pcix_regs->pcix_vflags & PCIX_ERR_STATUS_VALID)
534 			pci_config_put32(erpt_p->pe_hdl,
535 			    (pcix_cap_ptr + PCI_PCIX_STATUS),
536 			    pcix_regs->pcix_status);
537 
538 		pcix_regs->pcix_vflags = 0x0;
539 
540 		if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) {
541 			pcix_ecc_regs_t *pcix_ecc_regs =
542 			    pcix_regs->pcix_ecc_regs;
543 
544 			if (pcix_ecc_regs->pcix_ecc_vflags &
545 			    PCIX_ERR_ECC_STS_VALID)
546 				pci_config_put32(erpt_p->pe_hdl,
547 				    (pcix_cap_ptr + PCI_PCIX_ECC_STATUS),
548 				    pcix_ecc_regs->pcix_ecc_ctlstat);
549 
550 			pcix_ecc_regs->pcix_ecc_vflags = 0x0;
551 		}
552 	}
553 }
554 
555 static void
556 pcie_regs_clear(pci_erpt_t *erpt_p)
557 {
558 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
559 	uint8_t pcie_cap_ptr;
560 	pcie_adv_error_regs_t *pcie_adv_regs;
561 	uint16_t pcie_ecap_ptr;
562 
563 	pcie_cap_ptr = pcie_regs->pcie_cap_ptr;
564 
565 	if (pcie_regs->pcie_vflags & PCIE_ERR_STATUS_VALID)
566 		pci_config_put16(erpt_p->pe_hdl, pcie_cap_ptr + PCIE_DEVSTS,
567 		    pcie_regs->pcie_err_status);
568 
569 	pcie_regs->pcie_vflags = 0x0;
570 
571 	if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV) &&
572 	    (erpt_p->pe_dflags & PCIX_DEV))
573 		pcix_regs_clear(erpt_p, pcie_regs->pcix_bdg_regs);
574 
575 	if (!(erpt_p->pe_dflags & PCIEX_ADV_DEV))
576 		return;
577 
578 	pcie_adv_regs = pcie_regs->pcie_adv_regs;
579 
580 	pcie_ecap_ptr = pcie_adv_regs->pcie_adv_cap_ptr;
581 
582 	if (pcie_adv_regs->pcie_adv_vflags & PCIE_UE_STATUS_VALID)
583 		pci_config_put32(erpt_p->pe_hdl,
584 		    pcie_ecap_ptr + PCIE_AER_UCE_STS,
585 		    pcie_adv_regs->pcie_ue_status);
586 
587 	if (pcie_adv_regs->pcie_adv_vflags & PCIE_CE_STATUS_VALID)
588 		pci_config_put32(erpt_p->pe_hdl,
589 		    pcie_ecap_ptr + PCIE_AER_CE_STS,
590 		    pcie_adv_regs->pcie_ce_status);
591 
592 
593 	if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) {
594 		pcie_adv_bdg_error_regs_t *pcie_bdg_regs =
595 		    pcie_adv_regs->pcie_adv_bdg_regs;
596 
597 
598 		if (pcie_adv_regs->pcie_adv_vflags & PCIE_SUE_STATUS_VALID)
599 			pci_config_put32(erpt_p->pe_hdl,
600 			    pcie_ecap_ptr + PCIE_AER_SUCE_STS,
601 			    pcie_bdg_regs->pcie_sue_status);
602 	}
603 	/*
604 	 * If PCI Express root complex then clear the root complex
605 	 * error registers.
606 	 */
607 	if (erpt_p->pe_dflags & PCIEX_RC_DEV) {
608 		pcie_adv_rc_error_regs_t *pcie_rc_regs =
609 		    pcie_adv_regs->pcie_adv_rc_regs;
610 
611 
612 		if (pcie_adv_regs->pcie_adv_vflags & PCIE_RC_ERR_STATUS_VALID)
613 			pci_config_put32(erpt_p->pe_hdl,
614 			    (pcie_ecap_ptr + PCIE_AER_RE_STS),
615 			    pcie_rc_regs->pcie_rc_err_status);
616 	}
617 	pcie_adv_regs->pcie_adv_vflags = 0x0;
618 }
619 
620 static void
621 pci_regs_clear(pci_erpt_t *erpt_p)
622 {
623 	/*
624 	 * Finally clear the error bits
625 	 */
626 	if (erpt_p->pe_dflags & PCIEX_DEV)
627 		pcie_regs_clear(erpt_p);
628 	else if (erpt_p->pe_dflags & PCIX_DEV)
629 		pcix_regs_clear(erpt_p, erpt_p->pe_regs);
630 
631 	if (erpt_p->pe_pci_regs->pci_vflags & PCI_ERR_STATUS_VALID)
632 		pci_config_put16(erpt_p->pe_hdl, PCI_CONF_STAT,
633 		    erpt_p->pe_pci_regs->pci_err_status);
634 
635 	erpt_p->pe_pci_regs->pci_vflags = 0x0;
636 
637 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
638 		if (erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_vflags &
639 		    PCI_BDG_SEC_STAT_VALID)
640 			pci_config_put16(erpt_p->pe_hdl, PCI_BCNF_SEC_STATUS,
641 			    erpt_p->pe_pci_regs->pci_bdg_regs->
642 			    pci_bdg_sec_stat);
643 		if (erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_vflags &
644 		    PCI_BDG_CTRL_VALID)
645 			pci_config_put16(erpt_p->pe_hdl, PCI_BCNF_BCNTRL,
646 			    erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_ctrl);
647 
648 		erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_vflags = 0x0;
649 	}
650 }
651 
652 /*
653  * pcix_ereport_setup: Allocate structures for PCI-X error handling and ereport
654  * generation.
655  */
656 /* ARGSUSED */
657 static void
658 pcix_ereport_setup(dev_info_t *dip, pci_erpt_t *erpt_p)
659 {
660 	uint8_t pcix_cap_ptr;
661 	int i;
662 
663 	pcix_cap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
664 	    "pcix-capid-pointer", PCI_CAP_NEXT_PTR_NULL);
665 
666 	if (pcix_cap_ptr != PCI_CAP_NEXT_PTR_NULL)
667 		erpt_p->pe_dflags |= PCIX_DEV;
668 	else
669 		return;
670 
671 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
672 		pcix_bdg_error_regs_t *pcix_bdg_regs;
673 
674 		erpt_p->pe_regs = kmem_zalloc(sizeof (pcix_bdg_error_regs_t),
675 		    KM_SLEEP);
676 		pcix_bdg_regs = (pcix_bdg_error_regs_t *)erpt_p->pe_regs;
677 		pcix_bdg_regs->pcix_bdg_cap_ptr = pcix_cap_ptr;
678 		pcix_bdg_regs->pcix_bdg_ver = pci_config_get16(erpt_p->pe_hdl,
679 		    pcix_cap_ptr + PCI_PCIX_SEC_STATUS) & PCI_PCIX_VER_MASK;
680 		if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) {
681 			for (i = 0; i < 2; i++) {
682 				pcix_bdg_regs->pcix_bdg_ecc_regs[i] =
683 				    kmem_zalloc(sizeof (pcix_ecc_regs_t),
684 					KM_SLEEP);
685 			}
686 		}
687 	} else {
688 		pcix_error_regs_t *pcix_regs;
689 
690 		erpt_p->pe_regs = kmem_zalloc(sizeof (pcix_error_regs_t),
691 		    KM_SLEEP);
692 		pcix_regs = (pcix_error_regs_t *)erpt_p->pe_regs;
693 		pcix_regs->pcix_cap_ptr = pcix_cap_ptr;
694 		pcix_regs->pcix_ver = pci_config_get16(erpt_p->pe_hdl,
695 		    pcix_cap_ptr + PCI_PCIX_COMMAND) & PCI_PCIX_VER_MASK;
696 		if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) {
697 			pcix_regs->pcix_ecc_regs = kmem_zalloc(
698 			    sizeof (pcix_ecc_regs_t), KM_SLEEP);
699 		}
700 	}
701 }
702 
703 static void
704 pcie_ereport_setup(dev_info_t *dip, pci_erpt_t *erpt_p)
705 {
706 	pcie_error_regs_t *pcie_regs;
707 	pcie_adv_error_regs_t *pcie_adv_regs;
708 	uint8_t pcix_cap_ptr;
709 	uint8_t pcie_cap_ptr;
710 	uint16_t pcie_ecap_ptr;
711 	uint16_t dev_type = 0;
712 	uint32_t mask = pcie_expected_ue_mask;
713 
714 	/*
715 	 * The following sparc specific code should be removed once the pci_cap
716 	 * interfaces create the necessary properties for us.
717 	 */
718 #if defined(__sparc)
719 	ushort_t status;
720 	uint32_t slot_cap;
721 	uint8_t cap_ptr = 0;
722 	uint8_t cap_id = 0;
723 	uint32_t hdr, hdr_next_ptr, hdr_cap_id;
724 	uint16_t offset = P2ALIGN(PCIE_EXT_CAP, 4);
725 	uint16_t aer_ptr = 0;
726 
727 	cap_ptr = pci_config_get8(erpt_p->pe_hdl, PCI_CONF_CAP_PTR);
728 	if (pci_config_check(erpt_p->pe_hdl, DDI_FM_ERR_UNEXPECTED) ==
729 	    DDI_FM_OK) {
730 		while ((cap_id = pci_config_get8(erpt_p->pe_hdl, cap_ptr)) !=
731 		    0xff) {
732 			if (cap_id == PCI_CAP_ID_PCIX) {
733 				(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
734 				    "pcix-capid-pointer", cap_ptr);
735 			}
736 		if (cap_id == PCI_CAP_ID_PCI_E) {
737 			status = pci_config_get16(erpt_p->pe_hdl, cap_ptr + 2);
738 			if (status & PCIE_PCIECAP_SLOT_IMPL) {
739 				/* offset 14h is Slot Cap Register */
740 				slot_cap = pci_config_get32(erpt_p->pe_hdl,
741 				    cap_ptr + PCIE_SLOTCAP);
742 				(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
743 				    "pcie-slotcap-reg", slot_cap);
744 			}
745 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
746 			    "pcie-capid-reg", pci_config_get16(erpt_p->pe_hdl,
747 			    cap_ptr + PCIE_PCIECAP));
748 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
749 			    "pcie-capid-pointer", cap_ptr);
750 
751 		}
752 			if ((cap_ptr = pci_config_get8(erpt_p->pe_hdl,
753 			    cap_ptr + 1)) == 0xff || cap_ptr == 0 ||
754 			    (pci_config_check(erpt_p->pe_hdl,
755 			    DDI_FM_ERR_UNEXPECTED) != DDI_FM_OK))
756 				break;
757 		}
758 	}
759 
760 #endif
761 
762 	pcix_cap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
763 	    "pcix-capid-pointer", PCI_CAP_NEXT_PTR_NULL);
764 
765 	if (pcix_cap_ptr != PCI_CAP_NEXT_PTR_NULL)
766 		erpt_p->pe_dflags |= PCIX_DEV;
767 
768 	pcie_cap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
769 	    DDI_PROP_DONTPASS, "pcie-capid-pointer", PCI_CAP_NEXT_PTR_NULL);
770 
771 	if (pcie_cap_ptr != PCI_CAP_NEXT_PTR_NULL) {
772 		erpt_p->pe_dflags |= PCIEX_DEV;
773 		erpt_p->pe_regs = kmem_zalloc(sizeof (pcie_error_regs_t),
774 		    KM_SLEEP);
775 		pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
776 		pcie_regs->pcie_cap_ptr = pcie_cap_ptr;
777 	}
778 
779 	if (!(erpt_p->pe_dflags & PCIEX_DEV))
780 		return;
781 
782 	/*
783 	 * Don't currently need to check for version here because we are
784 	 * compliant with PCIE 1.0a which is version 0 and is guaranteed
785 	 * software compatibility with future versions.  We will need to
786 	 * add errors for new detectors/features which are added in newer
787 	 * revisions [sec 7.8.2].
788 	 */
789 	pcie_regs->pcie_cap = pci_config_get16(erpt_p->pe_hdl,
790 	    pcie_regs->pcie_cap_ptr + PCIE_PCIECAP);
791 
792 	dev_type = pcie_regs->pcie_cap & PCIE_PCIECAP_DEV_TYPE_MASK;
793 
794 	if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV) &&
795 	    (erpt_p->pe_dflags & PCIX_DEV)) {
796 		int i;
797 
798 		pcie_regs->pcix_bdg_regs =
799 		    kmem_zalloc(sizeof (pcix_bdg_error_regs_t), KM_SLEEP);
800 
801 		pcie_regs->pcix_bdg_regs->pcix_bdg_cap_ptr = pcix_cap_ptr;
802 		pcie_regs->pcix_bdg_regs->pcix_bdg_ver =
803 		    pci_config_get16(erpt_p->pe_hdl,
804 			pcix_cap_ptr + PCI_PCIX_SEC_STATUS) & PCI_PCIX_VER_MASK;
805 
806 		if (PCIX_ECC_VER_CHECK(pcie_regs->pcix_bdg_regs->pcix_bdg_ver))
807 			for (i = 0; i < 2; i++)
808 				pcie_regs->pcix_bdg_regs->pcix_bdg_ecc_regs[i] =
809 				    kmem_zalloc(sizeof (pcix_ecc_regs_t),
810 					KM_SLEEP);
811 	}
812 
813 	if (dev_type == PCIE_PCIECAP_DEV_TYPE_ROOT) {
814 		erpt_p->pe_dflags |= PCIEX_RC_DEV;
815 		pcie_regs->pcie_rc_regs = kmem_zalloc(
816 		    sizeof (pcie_rc_error_regs_t), KM_SLEEP);
817 	}
818 	/*
819 	 * The following sparc specific code should be removed once the pci_cap
820 	 * interfaces create the necessary properties for us.
821 	 */
822 #if defined(__sparc)
823 
824 	hdr = pci_config_get32(erpt_p->pe_hdl, offset);
825 	hdr_next_ptr = (hdr >> PCIE_EXT_CAP_NEXT_PTR_SHIFT) &
826 	    PCIE_EXT_CAP_NEXT_PTR_MASK;
827 	hdr_cap_id = (hdr >> PCIE_EXT_CAP_ID_SHIFT) & PCIE_EXT_CAP_ID_MASK;
828 
829 	while ((hdr_next_ptr != PCIE_EXT_CAP_NEXT_PTR_NULL) &&
830 	    (hdr_cap_id != PCIE_EXT_CAP_ID_AER)) {
831 		offset = P2ALIGN(hdr_next_ptr, 4);
832 		hdr = pci_config_get32(erpt_p->pe_hdl, offset);
833 		hdr_next_ptr = (hdr >> PCIE_EXT_CAP_NEXT_PTR_SHIFT) &
834 		    PCIE_EXT_CAP_NEXT_PTR_MASK;
835 		hdr_cap_id = (hdr >> PCIE_EXT_CAP_ID_SHIFT) &
836 		    PCIE_EXT_CAP_ID_MASK;
837 	}
838 
839 	if (hdr_cap_id == PCIE_EXT_CAP_ID_AER)
840 		aer_ptr = P2ALIGN(offset, 4);
841 	if (aer_ptr != PCI_CAP_NEXT_PTR_NULL)
842 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
843 		    "pcie-aer-pointer", aer_ptr);
844 #endif
845 
846 	/*
847 	 * Find and store if this device is capable of pci express
848 	 * advanced errors, if not report an error against the device.
849 	 */
850 	pcie_ecap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
851 	    "pcie-aer-pointer", PCI_CAP_NEXT_PTR_NULL);
852 	if (pcie_ecap_ptr != PCI_CAP_NEXT_PTR_NULL) {
853 		erpt_p->pe_dflags |= PCIEX_ADV_DEV;
854 		pcie_regs->pcie_adv_regs = kmem_zalloc(
855 		    sizeof (pcie_adv_error_regs_t), KM_SLEEP);
856 		pcie_regs->pcie_adv_regs->pcie_adv_cap_ptr = pcie_ecap_ptr;
857 	}
858 
859 	if (!(erpt_p->pe_dflags & PCIEX_ADV_DEV)) {
860 		return;
861 	}
862 
863 	pcie_adv_regs = pcie_regs->pcie_adv_regs;
864 
865 	if (pcie_adv_regs == NULL)
866 		return;
867 	/*
868 	 * Initialize structures for advanced PCI Express devices.
869 	 */
870 
871 	/*
872 	 * Advanced error registers exist for PCI Express to PCI(X) Bridges and
873 	 * may also exist for PCI(X) to PCI Express Bridges, the latter is not
874 	 * well explained in the PCI Express to PCI/PCI-X Bridge Specification
875 	 * 1.0 and will be left out of the current gathering of these registers.
876 	 */
877 	if (dev_type == PCIE_PCIECAP_DEV_TYPE_PCIE2PCI) {
878 		erpt_p->pe_dflags |= PCIEX_2PCI_DEV;
879 		pcie_adv_regs->pcie_adv_bdg_regs = kmem_zalloc(
880 		    sizeof (pcie_adv_bdg_error_regs_t), KM_SLEEP);
881 	}
882 
883 	if (erpt_p->pe_dflags & PCIEX_RC_DEV)
884 		pcie_adv_regs->pcie_adv_rc_regs = kmem_zalloc(
885 		    sizeof (pcie_adv_rc_error_regs_t), KM_SLEEP);
886 
887 	/*
888 	 * Check that mask values are as expected, if not
889 	 * change them to what we desire.
890 	 */
891 	pci_regs_gather(dip, erpt_p, DDI_FM_ERR_UNEXPECTED);
892 	pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
893 	if (pcie_regs->pcie_adv_regs->pcie_ce_mask != pcie_expected_ce_mask) {
894 		pci_config_put32(erpt_p->pe_hdl,
895 		    pcie_ecap_ptr + PCIE_AER_CE_MASK, pcie_expected_ce_mask);
896 	}
897 
898 	/* Disable PTLP/ECRC (or mask these two) for Switches */
899 	if (dev_type == PCIE_PCIECAP_DEV_TYPE_UP ||
900 	    dev_type == PCIE_PCIECAP_DEV_TYPE_DOWN) {
901 		erpt_p->pe_dflags |= PCIEX_SWITCH_DEV;
902 		mask |= PCIE_AER_UCE_PTLP | PCIE_AER_UCE_ECRC;
903 	}
904 
905 	if (pcie_regs->pcie_adv_regs->pcie_ue_mask != mask) {
906 		pci_config_put32(erpt_p->pe_hdl,
907 		    pcie_ecap_ptr + PCIE_AER_UCE_MASK, mask);
908 	}
909 	if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) {
910 		if (pcie_regs->pcie_adv_regs->pcie_adv_bdg_regs->pcie_sue_mask
911 		    != pcie_expected_sue_mask) {
912 			pci_config_put32(erpt_p->pe_hdl,
913 			    pcie_ecap_ptr + PCIE_AER_SUCE_MASK,
914 			    pcie_expected_sue_mask);
915 		}
916 	}
917 }
918 
919 /*
920  * pci_ereport_setup: Detect PCI device type and initialize structures to be
921  * used to generate ereports based on detected generic device errors.
922  */
923 void
924 pci_ereport_setup(dev_info_t *dip)
925 {
926 	struct dev_info *devi = DEVI(dip);
927 	struct i_ddi_fmhdl *fmhdl = devi->devi_fmhdl;
928 	pci_erpt_t *erpt_p;
929 	uint8_t pci_hdr_type;
930 	uint16_t pci_status;
931 	pci_regspec_t *pci_rp;
932 	int32_t len;
933 	uint32_t phys_hi;
934 
935 	/*
936 	 * If device is not ereport capbable then report an error against the
937 	 * driver for using this interface,
938 	 */
939 	if (!DDI_FM_EREPORT_CAP(ddi_fm_capable(dip)) &&
940 	    !DDI_FM_ERRCB_CAP(ddi_fm_capable(dip))) {
941 		i_ddi_drv_ereport_post(dip, DVR_EFMCAP, NULL, DDI_SLEEP);
942 		return;
943 	}
944 
945 	/*
946 	 * ASSERT fmhdl exists and fh_bus_specific is NULL.
947 	 */
948 	ASSERT(fmhdl && (fmhdl->fh_bus_specific == NULL));
949 
950 	erpt_p = kmem_zalloc(sizeof (pci_erpt_t), KM_SLEEP);
951 
952 	if (pci_config_setup(dip, &erpt_p->pe_hdl) != DDI_SUCCESS)
953 		goto error;
954 
955 	erpt_p->pe_pci_regs = kmem_zalloc(sizeof (pci_error_regs_t), KM_SLEEP);
956 
957 	pci_status = pci_config_get16(erpt_p->pe_hdl, PCI_CONF_STAT);
958 	if (pci_config_check(erpt_p->pe_hdl, DDI_FM_ERR_UNEXPECTED) !=
959 	    DDI_FM_OK)
960 		goto error;
961 
962 	/*
963 	 * Get header type and record if device is a bridge.
964 	 */
965 	pci_hdr_type = pci_config_get8(erpt_p->pe_hdl, PCI_CONF_HEADER);
966 	if (pci_config_check(erpt_p->pe_hdl, DDI_FM_ERR_UNEXPECTED) !=
967 	    DDI_FM_OK)
968 		goto error;
969 
970 	/*
971 	 * Check to see if PCI device is a bridge, if so allocate pci bridge
972 	 * error register structure.
973 	 */
974 	if ((pci_hdr_type & PCI_HEADER_TYPE_M) == PCI_HEADER_PPB) {
975 		erpt_p->pe_dflags |= PCI_BRIDGE_DEV;
976 		erpt_p->pe_pci_regs->pci_bdg_regs = kmem_zalloc(
977 		    sizeof (pci_bdg_error_regs_t), KM_SLEEP);
978 	}
979 
980 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg",
981 	    (caddr_t)&pci_rp, &len) == DDI_SUCCESS) {
982 		phys_hi = pci_rp->pci_phys_hi;
983 		kmem_free(pci_rp, len);
984 
985 		erpt_p->pe_bdf = (uint16_t)(PCI_REG_BDFR_G(phys_hi) >>
986 		    PCI_REG_FUNC_SHIFT);
987 	}
988 
989 
990 	if (!(pci_status & PCI_STAT_CAP)) {
991 		goto done;
992 	}
993 
994 	/*
995 	 * Initialize structures for PCI Express and PCI-X devices.
996 	 * Order matters below and pcie_ereport_setup should preceed
997 	 * pcix_ereport_setup.
998 	 */
999 	pcie_ereport_setup(dip, erpt_p);
1000 
1001 	if (!(erpt_p->pe_dflags & PCIEX_DEV)) {
1002 		pcix_ereport_setup(dip, erpt_p);
1003 	}
1004 
1005 done:
1006 	pci_regs_gather(dip, erpt_p, DDI_FM_ERR_UNEXPECTED);
1007 	pci_regs_clear(erpt_p);
1008 
1009 	/*
1010 	 * Before returning set fh_bus_specific to completed pci_erpt_t
1011 	 * structure
1012 	 */
1013 	fmhdl->fh_bus_specific = (void *)erpt_p;
1014 
1015 	return;
1016 error:
1017 	if (erpt_p->pe_pci_regs)
1018 		kmem_free(erpt_p->pe_pci_regs, sizeof (pci_error_regs_t));
1019 	kmem_free(erpt_p, sizeof (pci_erpt_t));
1020 	erpt_p = NULL;
1021 }
1022 
1023 static void
1024 pcix_ereport_teardown(pci_erpt_t *erpt_p)
1025 {
1026 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
1027 		pcix_bdg_error_regs_t *pcix_bdg_regs;
1028 		uint16_t pcix_ver;
1029 
1030 		pcix_bdg_regs = (pcix_bdg_error_regs_t *)erpt_p->pe_regs;
1031 		pcix_ver = pcix_bdg_regs->pcix_bdg_ver;
1032 		if (PCIX_ECC_VER_CHECK(pcix_ver)) {
1033 			int i;
1034 			for (i = 0; i < 2; i++)
1035 				kmem_free(pcix_bdg_regs->pcix_bdg_ecc_regs[i],
1036 				    sizeof (pcix_ecc_regs_t));
1037 		}
1038 		kmem_free(erpt_p->pe_regs, sizeof (pcix_bdg_error_regs_t));
1039 	} else {
1040 		pcix_error_regs_t *pcix_regs;
1041 		uint16_t pcix_ver;
1042 
1043 		pcix_regs = (pcix_error_regs_t *)erpt_p->pe_regs;
1044 		pcix_ver = pcix_regs->pcix_ver;
1045 		if (PCIX_ECC_VER_CHECK(pcix_ver)) {
1046 			kmem_free(pcix_regs->pcix_ecc_regs,
1047 			    sizeof (pcix_ecc_regs_t));
1048 		}
1049 		kmem_free(erpt_p->pe_regs, sizeof (pcix_error_regs_t));
1050 	}
1051 }
1052 
1053 static void
1054 pcie_ereport_teardown(pci_erpt_t *erpt_p)
1055 {
1056 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
1057 
1058 	if (erpt_p->pe_dflags & PCIEX_ADV_DEV) {
1059 		pcie_adv_error_regs_t *pcie_adv = pcie_regs->pcie_adv_regs;
1060 
1061 		if (erpt_p->pe_dflags & PCIEX_2PCI_DEV)
1062 			kmem_free(pcie_adv->pcie_adv_bdg_regs,
1063 			    sizeof (pcie_adv_bdg_error_regs_t));
1064 		if (erpt_p->pe_dflags & PCIEX_RC_DEV)
1065 			kmem_free(pcie_adv->pcie_adv_rc_regs,
1066 			    sizeof (pcie_adv_rc_error_regs_t));
1067 		kmem_free(pcie_adv, sizeof (pcie_adv_error_regs_t));
1068 	}
1069 
1070 	if (erpt_p->pe_dflags & PCIEX_RC_DEV)
1071 		kmem_free(pcie_regs->pcie_rc_regs,
1072 		    sizeof (pcie_rc_error_regs_t));
1073 
1074 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
1075 		if (erpt_p->pe_dflags & PCIX_DEV) {
1076 			uint16_t pcix_ver = pcie_regs->pcix_bdg_regs->
1077 			    pcix_bdg_ver;
1078 
1079 			if (PCIX_ECC_VER_CHECK(pcix_ver)) {
1080 				int i;
1081 				for (i = 0; i < 2; i++)
1082 					kmem_free(pcie_regs->pcix_bdg_regs->
1083 					    pcix_bdg_ecc_regs[i],
1084 					    sizeof (pcix_ecc_regs_t));
1085 			}
1086 			kmem_free(pcie_regs->pcix_bdg_regs,
1087 			    sizeof (pcix_bdg_error_regs_t));
1088 		}
1089 	}
1090 	kmem_free(erpt_p->pe_regs, sizeof (pcie_error_regs_t));
1091 }
1092 
1093 void
1094 pci_ereport_teardown(dev_info_t *dip)
1095 {
1096 	struct i_ddi_fmhdl *fmhdl = DEVI(dip)->devi_fmhdl;
1097 	pci_erpt_t *erpt_p;
1098 
1099 	if (!DDI_FM_EREPORT_CAP(ddi_fm_capable(dip)) &&
1100 	    !DDI_FM_ERRCB_CAP(ddi_fm_capable(dip))) {
1101 		i_ddi_drv_ereport_post(dip, DVR_EFMCAP, NULL, DDI_SLEEP);
1102 	}
1103 
1104 	ASSERT(fmhdl);
1105 
1106 	erpt_p = (pci_erpt_t *)fmhdl->fh_bus_specific;
1107 	if (erpt_p == NULL)
1108 		return;
1109 
1110 	if (erpt_p->pe_dflags & PCIEX_DEV)
1111 		pcie_ereport_teardown(erpt_p);
1112 	else if (erpt_p->pe_dflags & PCIX_DEV)
1113 		pcix_ereport_teardown(erpt_p);
1114 	pci_config_teardown((ddi_acc_handle_t *)&erpt_p->pe_hdl);
1115 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV)
1116 		kmem_free(erpt_p->pe_pci_regs->pci_bdg_regs,
1117 		    sizeof (pci_bdg_error_regs_t));
1118 	kmem_free(erpt_p->pe_pci_regs, sizeof (pci_error_regs_t));
1119 	kmem_free(erpt_p, sizeof (pci_erpt_t));
1120 	fmhdl->fh_bus_specific = NULL;
1121 	/*
1122 	 * The following sparc specific code should be removed once the pci_cap
1123 	 * interfaces create the necessary properties for us.
1124 	 */
1125 #if defined(__sparc)
1126 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcix-capid-pointer");
1127 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcie-slotcap-reg");
1128 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcie-capid-reg");
1129 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcie-capid-pointer");
1130 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcie-aer-pointer");
1131 #endif
1132 }
1133 
1134 static void
1135 pcie_ereport_post(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p,
1136     char *buf, int errtype)
1137 {
1138 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
1139 	pcie_adv_error_regs_t *pcie_adv_regs = pcie_regs->pcie_adv_regs;
1140 	pcie_adv_rc_error_regs_t *pcie_adv_rc_regs;
1141 
1142 	switch (errtype) {
1143 	    case PCIEX_TYPE_CE:
1144 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1145 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1146 		    PCIEX_DEVSTS_REG, DATA_TYPE_UINT16,
1147 		    pcie_regs->pcie_err_status,
1148 		    PCIEX_CE_STATUS_REG, DATA_TYPE_UINT32,
1149 		    pcie_adv_regs->pcie_ce_status, NULL);
1150 		break;
1151 	    case PCIEX_TYPE_UE:
1152 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1153 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1154 		    PCIEX_DEVSTS_REG, DATA_TYPE_UINT16,
1155 		    pcie_regs->pcie_err_status,
1156 		    PCIEX_UE_STATUS_REG, DATA_TYPE_UINT32,
1157 		    pcie_adv_regs->pcie_ue_status, PCIEX_UE_SEV_REG,
1158 		    DATA_TYPE_UINT32, pcie_adv_regs->pcie_ue_sev,
1159 		    PCIEX_ADV_CTL, DATA_TYPE_UINT32,
1160 		    pcie_adv_regs->pcie_adv_ctl,
1161 		    PCIEX_SRC_ID, DATA_TYPE_UINT16,
1162 		    pcie_adv_regs->pcie_adv_bdf,
1163 		    PCIEX_SRC_VALID, DATA_TYPE_BOOLEAN_VALUE,
1164 		    (pcie_adv_regs->pcie_adv_bdf != NULL) ?
1165 		    1 : NULL,
1166 #ifdef DEBUG
1167 		    PCIEX_UE_HDR0, DATA_TYPE_UINT32,
1168 		    pcie_adv_regs->pcie_ue_hdr0,
1169 		    PCIEX_UE_HDR1, DATA_TYPE_UINT32,
1170 		    pcie_adv_regs->pcie_ue_hdr[0],
1171 		    PCIEX_UE_HDR2, DATA_TYPE_UINT32,
1172 		    pcie_adv_regs->pcie_ue_hdr[1],
1173 		    PCIEX_UE_HDR3, DATA_TYPE_UINT32,
1174 		    pcie_adv_regs->pcie_ue_hdr[2],
1175 #endif
1176 		    NULL);
1177 		break;
1178 	    case PCIEX_TYPE_GEN:
1179 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1180 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8,
1181 		    0, PCIEX_DEVSTS_REG, DATA_TYPE_UINT16,
1182 		    pcie_regs->pcie_err_status, NULL);
1183 		break;
1184 	    case PCIEX_TYPE_RC_UE_MSG:
1185 	    case PCIEX_TYPE_RC_CE_MSG:
1186 		pcie_adv_rc_regs = pcie_adv_regs->pcie_adv_rc_regs;
1187 
1188 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1189 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1190 		    PCIEX_ROOT_ERRSTS_REG, DATA_TYPE_UINT32,
1191 		    pcie_adv_rc_regs->pcie_rc_err_status,
1192 		    PCIEX_SRC_ID, DATA_TYPE_UINT16,
1193 		    (errtype == PCIEX_TYPE_RC_UE_MSG) ?
1194 		    pcie_adv_rc_regs->pcie_rc_ue_src_id :
1195 		    pcie_adv_rc_regs->pcie_rc_ce_src_id,
1196 		    PCIEX_SRC_VALID, DATA_TYPE_BOOLEAN_VALUE,
1197 		    (errtype == PCIEX_TYPE_RC_UE_MSG) ?
1198 		    (pcie_adv_regs->pcie_adv_vflags & PCIE_SRC_ID_VALID &&
1199 		    pcie_adv_rc_regs->pcie_rc_ue_src_id != 0) :
1200 		    (pcie_adv_regs->pcie_adv_vflags & PCIE_SRC_ID_VALID &&
1201 		    pcie_adv_rc_regs->pcie_rc_ce_src_id != 0), NULL);
1202 		break;
1203 	    case PCIEX_TYPE_RC_MULT_MSG:
1204 		pcie_adv_rc_regs = pcie_adv_regs->pcie_adv_rc_regs;
1205 
1206 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1207 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1208 		    PCIEX_ROOT_ERRSTS_REG, DATA_TYPE_UINT32,
1209 		    pcie_adv_rc_regs->pcie_rc_err_status, NULL);
1210 		break;
1211 	    default:
1212 		break;
1213 	}
1214 }
1215 
1216 /*ARGSUSED*/
1217 static void
1218 pcie_check_addr(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p)
1219 {
1220 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
1221 	pcie_adv_error_regs_t *pcie_adv_regs = pcie_regs->pcie_adv_regs;
1222 	pcie_tlp_hdr_t *ue_hdr0;
1223 	uint32_t *ue_hdr;
1224 	uint64_t addr = NULL;
1225 	int upstream = 0;
1226 	pci_fme_bus_specific_t *pci_fme_bsp =
1227 	    (pci_fme_bus_specific_t *)derr->fme_bus_specific;
1228 
1229 	if (!(pcie_adv_regs->pcie_adv_vflags & PCIE_UE_HDR_VALID))
1230 		return;
1231 
1232 	ue_hdr0 = (pcie_tlp_hdr_t *)&pcie_adv_regs->pcie_ue_hdr0;
1233 	ue_hdr = pcie_adv_regs->pcie_ue_hdr;
1234 
1235 	if ((pcie_regs->pcie_cap & PCIE_PCIECAP_DEV_TYPE_MASK) ==
1236 	    PCIE_PCIECAP_DEV_TYPE_ROOT ||
1237 	    (pcie_regs->pcie_cap & PCIE_PCIECAP_DEV_TYPE_MASK) ==
1238 	    PCIE_PCIECAP_DEV_TYPE_DOWN)
1239 		upstream = 1;
1240 
1241 	switch (ue_hdr0->type) {
1242 	    case PCIE_TLP_TYPE_MEM:
1243 	    case PCIE_TLP_TYPE_MEMLK:
1244 		if ((ue_hdr0->fmt & 0x1) == 0x1) {
1245 			pcie_mem64_t *mem64_tlp = (pcie_mem64_t *)ue_hdr;
1246 
1247 			addr = (uint64_t)mem64_tlp->addr1 << 32 |
1248 			    (uint32_t)mem64_tlp->addr0 << 2;
1249 			pcie_adv_regs->pcie_adv_bdf = mem64_tlp->rid;
1250 		} else {
1251 			pcie_memio32_t *memio32_tlp = (pcie_memio32_t *)ue_hdr;
1252 
1253 			addr = (uint32_t)memio32_tlp->addr0 << 2;
1254 			pcie_adv_regs->pcie_adv_bdf = memio32_tlp->rid;
1255 		}
1256 		if (upstream) {
1257 			pci_fme_bsp->pci_bs_bdf = pcie_adv_regs->pcie_adv_bdf;
1258 			pci_fme_bsp->pci_bs_flags |= PCI_BS_BDF_VALID;
1259 		} else if ((pcie_regs->pcie_cap & PCIE_PCIECAP_DEV_TYPE_MASK) ==
1260 		    PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) {
1261 			pci_fme_bsp->pci_bs_bdf = erpt_p->pe_bdf;
1262 			pci_fme_bsp->pci_bs_flags |= PCI_BS_BDF_VALID;
1263 		}
1264 		pci_fme_bsp->pci_bs_addr = addr;
1265 		pci_fme_bsp->pci_bs_flags |= PCI_BS_ADDR_VALID;
1266 		pci_fme_bsp->pci_bs_type = upstream ? DMA_HANDLE : ACC_HANDLE;
1267 		break;
1268 
1269 	    case PCIE_TLP_TYPE_IO:
1270 		{
1271 			pcie_memio32_t *memio32_tlp = (pcie_memio32_t *)ue_hdr;
1272 
1273 			addr = (uint32_t)memio32_tlp->addr0 << 2;
1274 			pcie_adv_regs->pcie_adv_bdf = memio32_tlp->rid;
1275 			if ((pcie_regs->pcie_cap &
1276 			    PCIE_PCIECAP_DEV_TYPE_MASK) ==
1277 			    PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) {
1278 				pci_fme_bsp->pci_bs_bdf = erpt_p->pe_bdf;
1279 				pci_fme_bsp->pci_bs_flags |= PCI_BS_BDF_VALID;
1280 			}
1281 			pci_fme_bsp->pci_bs_addr = addr;
1282 			pci_fme_bsp->pci_bs_flags |= PCI_BS_ADDR_VALID;
1283 			pci_fme_bsp->pci_bs_type = ACC_HANDLE;
1284 			break;
1285 		}
1286 	    case PCIE_TLP_TYPE_CFG0:
1287 	    case PCIE_TLP_TYPE_CFG1:
1288 		{
1289 			pcie_cfg_t *cfg_tlp = (pcie_cfg_t *)ue_hdr;
1290 
1291 			pcie_adv_regs->pcie_adv_bdf = cfg_tlp->rid;
1292 			pci_fme_bsp->pci_bs_bdf = (uint16_t)cfg_tlp->bus << 8 |
1293 			    (uint16_t)cfg_tlp->dev << 3 | cfg_tlp->func;
1294 			pci_fme_bsp->pci_bs_flags |= PCI_BS_BDF_VALID;
1295 			pci_fme_bsp->pci_bs_type = ACC_HANDLE;
1296 			break;
1297 		}
1298 	    case PCIE_TLP_TYPE_MSG:
1299 		{
1300 			pcie_msg_t *msg_tlp = (pcie_msg_t *)ue_hdr;
1301 
1302 			pcie_adv_regs->pcie_adv_bdf = msg_tlp->rid;
1303 			break;
1304 		}
1305 	    case PCIE_TLP_TYPE_CPL:
1306 	    case PCIE_TLP_TYPE_CPLLK:
1307 		{
1308 			pcie_cpl_t *cpl_tlp = (pcie_cpl_t *)ue_hdr;
1309 
1310 			pcie_adv_regs->pcie_adv_bdf = cpl_tlp->cid;
1311 			pci_fme_bsp->pci_bs_flags |= PCI_BS_BDF_VALID;
1312 			if (upstream) {
1313 				pci_fme_bsp->pci_bs_bdf = cpl_tlp->cid;
1314 				pci_fme_bsp->pci_bs_type = ACC_HANDLE;
1315 			} else {
1316 				pci_fme_bsp->pci_bs_bdf = cpl_tlp->rid;
1317 				pci_fme_bsp->pci_bs_type = DMA_HANDLE;
1318 			}
1319 			break;
1320 		}
1321 	    case PCIE_TLP_TYPE_MSI:
1322 	    default:
1323 		break;
1324 	}
1325 }
1326 
1327 /*ARGSUSED*/
1328 static void
1329 pcie_pci_check_addr(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p,
1330     int type)
1331 {
1332 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
1333 	pcie_adv_error_regs_t *pcie_adv_regs = pcie_regs->pcie_adv_regs;
1334 	pcie_adv_bdg_error_regs_t *pcie_bdg_regs =
1335 	    pcie_adv_regs->pcie_adv_bdg_regs;
1336 	uint64_t addr = NULL;
1337 	pcix_attr_t *pcie_pci_sue_attr;
1338 	int cmd;
1339 	int dual_addr = 0;
1340 	pci_fme_bus_specific_t *pci_fme_bsp =
1341 	    (pci_fme_bus_specific_t *)derr->fme_bus_specific;
1342 
1343 	if (!(pcie_adv_regs->pcie_adv_vflags & PCIE_SUE_HDR_VALID))
1344 		return;
1345 
1346 	pcie_pci_sue_attr = (pcix_attr_t *)&pcie_bdg_regs->pcie_sue_hdr0;
1347 	cmd = (pcie_bdg_regs->pcie_sue_hdr[0] >>
1348 	    PCIE_AER_SUCE_HDR_CMD_LWR_SHIFT) & PCIE_AER_SUCE_HDR_CMD_LWR_MASK;
1349 
1350 cmd_switch:
1351 	addr = pcie_bdg_regs->pcie_sue_hdr[2];
1352 	addr = (addr << PCIE_AER_SUCE_HDR_ADDR_SHIFT) |
1353 	    pcie_bdg_regs->pcie_sue_hdr[1];
1354 	switch (cmd) {
1355 	    case PCI_PCIX_CMD_IORD:
1356 	    case PCI_PCIX_CMD_IOWR:
1357 		pcie_adv_regs->pcie_adv_bdf = pcie_pci_sue_attr->rid;
1358 		if (addr) {
1359 			pci_fme_bsp->pci_bs_addr = addr;
1360 			pci_fme_bsp->pci_bs_flags |= PCI_BS_ADDR_VALID;
1361 			pci_fme_bsp->pci_bs_type = ACC_HANDLE;
1362 		}
1363 		break;
1364 	    case PCI_PCIX_CMD_MEMRD_DW:
1365 	    case PCI_PCIX_CMD_MEMWR:
1366 	    case PCI_PCIX_CMD_MEMRD_BL:
1367 	    case PCI_PCIX_CMD_MEMWR_BL:
1368 	    case PCI_PCIX_CMD_MEMRDBL:
1369 	    case PCI_PCIX_CMD_MEMWRBL:
1370 		pcie_adv_regs->pcie_adv_bdf = pcie_pci_sue_attr->rid;
1371 		if (addr) {
1372 			pci_fme_bsp->pci_bs_addr = addr;
1373 			pci_fme_bsp->pci_bs_flags |= PCI_BS_ADDR_VALID;
1374 			pci_fme_bsp->pci_bs_type = type;
1375 		}
1376 		break;
1377 	    case PCI_PCIX_CMD_CFRD:
1378 	    case PCI_PCIX_CMD_CFWR:
1379 		pcie_adv_regs->pcie_adv_bdf = pcie_pci_sue_attr->rid;
1380 		/*
1381 		 * for type 1 config transaction we can find bdf from address
1382 		 */
1383 		if ((addr & 3) == 1) {
1384 			pci_fme_bsp->pci_bs_bdf = (addr >> 8) & 0xffffffff;
1385 			pci_fme_bsp->pci_bs_flags |= PCI_BS_BDF_VALID;
1386 			pci_fme_bsp->pci_bs_type = ACC_HANDLE;
1387 		}
1388 		break;
1389 	    case PCI_PCIX_CMD_SPL:
1390 		pcie_adv_regs->pcie_adv_bdf = pcie_pci_sue_attr->rid;
1391 		if (type == ACC_HANDLE) {
1392 			pci_fme_bsp->pci_bs_bdf = pcie_adv_regs->pcie_adv_bdf;
1393 			pci_fme_bsp->pci_bs_flags |= PCI_BS_BDF_VALID;
1394 			pci_fme_bsp->pci_bs_type = type;
1395 		}
1396 		break;
1397 	    case PCI_PCIX_CMD_DADR:
1398 		cmd = (pcie_bdg_regs->pcie_sue_hdr[0] >>
1399 		    PCIE_AER_SUCE_HDR_CMD_UP_SHIFT) &
1400 		    PCIE_AER_SUCE_HDR_CMD_UP_MASK;
1401 		if (dual_addr)
1402 			break;
1403 		++dual_addr;
1404 		goto cmd_switch;
1405 	    default:
1406 		break;
1407 	}
1408 }
1409 
1410 /*ARGSUSED*/
1411 static int
1412 pcix_check_addr(dev_info_t *dip, ddi_fm_error_t *derr,
1413     pcix_ecc_regs_t *pcix_ecc_regs, int type)
1414 {
1415 	int cmd = (pcix_ecc_regs->pcix_ecc_ctlstat >> 16) & 0xf;
1416 	uint64_t addr;
1417 	pci_fme_bus_specific_t *pci_fme_bsp =
1418 	    (pci_fme_bus_specific_t *)derr->fme_bus_specific;
1419 
1420 	addr = pcix_ecc_regs->pcix_ecc_secaddr;
1421 	addr = addr << 32;
1422 	addr |= pcix_ecc_regs->pcix_ecc_fstaddr;
1423 
1424 	switch (cmd) {
1425 	    case PCI_PCIX_CMD_INTR:
1426 	    case PCI_PCIX_CMD_SPEC:
1427 		return (DDI_FM_FATAL);
1428 	    case PCI_PCIX_CMD_IORD:
1429 	    case PCI_PCIX_CMD_IOWR:
1430 		pci_fme_bsp->pci_bs_addr = addr;
1431 		pci_fme_bsp->pci_bs_flags |= PCI_BS_ADDR_VALID;
1432 		pci_fme_bsp->pci_bs_type = type;
1433 		return (DDI_FM_UNKNOWN);
1434 	    case PCI_PCIX_CMD_DEVID:
1435 		return (DDI_FM_FATAL);
1436 	    case PCI_PCIX_CMD_MEMRD_DW:
1437 	    case PCI_PCIX_CMD_MEMWR:
1438 	    case PCI_PCIX_CMD_MEMRD_BL:
1439 	    case PCI_PCIX_CMD_MEMWR_BL:
1440 		pci_fme_bsp->pci_bs_addr = addr;
1441 		pci_fme_bsp->pci_bs_flags |= PCI_BS_ADDR_VALID;
1442 		pci_fme_bsp->pci_bs_type = type;
1443 		return (DDI_FM_UNKNOWN);
1444 	    case PCI_PCIX_CMD_CFRD:
1445 	    case PCI_PCIX_CMD_CFWR:
1446 		/*
1447 		 * for type 1 config transaction we can find bdf from address
1448 		 */
1449 		if ((addr & 3) == 1) {
1450 			pci_fme_bsp->pci_bs_bdf = (addr >> 8) & 0xffffffff;
1451 			pci_fme_bsp->pci_bs_flags |= PCI_BS_BDF_VALID;
1452 			pci_fme_bsp->pci_bs_type = type;
1453 		}
1454 		return (DDI_FM_UNKNOWN);
1455 	    case PCI_PCIX_CMD_SPL:
1456 	    case PCI_PCIX_CMD_DADR:
1457 		return (DDI_FM_UNKNOWN);
1458 	    case PCI_PCIX_CMD_MEMRDBL:
1459 	    case PCI_PCIX_CMD_MEMWRBL:
1460 		pci_fme_bsp->pci_bs_addr = addr;
1461 		pci_fme_bsp->pci_bs_flags |= PCI_BS_ADDR_VALID;
1462 		pci_fme_bsp->pci_bs_type = type;
1463 		return (DDI_FM_UNKNOWN);
1464 	    default:
1465 		return (DDI_FM_FATAL);
1466 	}
1467 }
1468 
1469 /*ARGSUSED*/
1470 static int
1471 pci_bdg_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p)
1472 {
1473 	pci_bdg_error_regs_t *pci_bdg_regs = erpt_p->pe_pci_regs->pci_bdg_regs;
1474 	int fatal = 0;
1475 	int nonfatal = 0;
1476 	int unknown = 0;
1477 	int ok = 0;
1478 	int ret = DDI_FM_OK;
1479 	char buf[FM_MAX_CLASS];
1480 	int i;
1481 	pci_fme_bus_specific_t *pci_fme_bsp =
1482 	    (pci_fme_bus_specific_t *)derr->fme_bus_specific;
1483 
1484 	if (derr->fme_flag != DDI_FM_ERR_UNEXPECTED)
1485 		goto done;
1486 
1487 	if ((pci_bdg_regs->pci_bdg_vflags & PCI_BDG_CTRL_VALID) &&
1488 	    (pci_bdg_regs->pci_bdg_ctrl & PCI_BCNF_BCNTRL_DTO_STAT)) {
1489 		(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
1490 		    PCI_ERROR_SUBCLASS, PCI_DTO);
1491 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1492 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1493 		    PCI_SEC_CONFIG_STATUS, DATA_TYPE_UINT16,
1494 		    pci_bdg_regs->pci_bdg_sec_stat, PCI_BCNTRL,
1495 		    DATA_TYPE_UINT16, pci_bdg_regs->pci_bdg_ctrl, NULL);
1496 		unknown++;
1497 	}
1498 
1499 	if (pci_bdg_regs->pci_bdg_vflags & PCI_BDG_SEC_STAT_VALID) {
1500 		for (i = 0; pci_bdg_err_tbl[i].err_class != NULL; i++) {
1501 			if (pci_bdg_regs->pci_bdg_sec_stat &
1502 			    pci_bdg_err_tbl[i].reg_bit) {
1503 				(void) snprintf(buf, FM_MAX_CLASS, "%s.%s-%s",
1504 				    PCI_ERROR_SUBCLASS, PCI_SEC_ERROR_SUBCLASS,
1505 				    pci_bdg_err_tbl[i].err_class);
1506 				ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1507 				    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1508 				    PCI_SEC_CONFIG_STATUS, DATA_TYPE_UINT16,
1509 				    pci_bdg_regs->pci_bdg_sec_stat, PCI_BCNTRL,
1510 				    DATA_TYPE_UINT16,
1511 				    pci_bdg_regs->pci_bdg_ctrl, NULL);
1512 				PCI_FM_SEV_INC(pci_bdg_err_tbl[i].flags);
1513 				if (pci_fme_bsp && (pci_fme_bsp->pci_bs_flags &
1514 				    PCI_BS_ADDR_VALID) &&
1515 				    pci_fme_bsp->pci_bs_type == ACC_HANDLE &&
1516 				    pci_bdg_err_tbl[i].terr_class)
1517 					pci_target_enqueue(derr->fme_ena,
1518 					    pci_bdg_err_tbl[i].terr_class,
1519 					    PCI_ERROR_SUBCLASS,
1520 					    pci_fme_bsp->pci_bs_addr);
1521 			}
1522 		}
1523 #if !defined(__sparc)
1524 		/*
1525 		 * For x86, many drivers and even user-level code currently get
1526 		 * away with accessing bad addresses, getting a UR and getting
1527 		 * -1 returned. Unfortunately, we have no control over this, so
1528 		 * we will have to treat all URs as nonfatal. Moreover, if the
1529 		 * leaf driver is non-hardened, then we don't actually see the
1530 		 * UR directly. All we see is a secondary bus master abort at
1531 		 * the root complex - so it's this condition that we actually
1532 		 * need to treat as nonfatal (providing no other unrelated nfe
1533 		 * conditions have also been seen by the root complex).
1534 		 */
1535 		if ((erpt_p->pe_dflags & PCIEX_RC_DEV) &&
1536 		    (pci_bdg_regs->pci_bdg_sec_stat & PCI_STAT_R_MAST_AB) &&
1537 		    !(pci_bdg_regs->pci_bdg_sec_stat & PCI_STAT_S_PERROR)) {
1538 			pcie_error_regs_t *pcie_regs =
1539 			    (pcie_error_regs_t *)erpt_p->pe_regs;
1540 			if ((pcie_regs->pcie_vflags & PCIE_ERR_STATUS_VALID) &&
1541 			    !(pcie_regs->pcie_err_status &
1542 			    PCIE_DEVSTS_NFE_DETECTED))
1543 				nonfatal++;
1544 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s-%s",
1545 			    PCI_ERROR_SUBCLASS, PCI_SEC_ERROR_SUBCLASS, PCI_MA);
1546 			ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1547 			    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1548 			    PCI_SEC_CONFIG_STATUS, DATA_TYPE_UINT16,
1549 			    pci_bdg_regs->pci_bdg_sec_stat, PCI_BCNTRL,
1550 			    DATA_TYPE_UINT16, pci_bdg_regs->pci_bdg_ctrl, NULL);
1551 		}
1552 #endif
1553 	}
1554 
1555 done:
1556 	/*
1557 	 * Need to check for poke and cautious put. We already know peek
1558 	 * and cautious get errors occurred (as we got a trap) and we know
1559 	 * they are nonfatal.
1560 	 */
1561 	if (derr->fme_flag == DDI_FM_ERR_EXPECTED) {
1562 		/*
1563 		 * for cautious puts we treat all errors as nonfatal. Actually
1564 		 * we set nonfatal for cautious gets as well - doesn't do any
1565 		 * harm
1566 		 */
1567 		if (pci_bdg_regs->pci_bdg_sec_stat & (PCI_STAT_R_TARG_AB |
1568 		    PCI_STAT_R_MAST_AB | PCI_STAT_S_PERROR | PCI_STAT_S_SYSERR))
1569 			nonfatal++;
1570 	}
1571 	if (derr->fme_flag == DDI_FM_ERR_POKE) {
1572 		/*
1573 		 * special case for pokes - we only consider master abort
1574 		 * and target abort as nonfatal. Sserr with no master abort is
1575 		 * fatal, but master/target abort can come in on separate
1576 		 * instance, so return unknown and parent will determine if
1577 		 * nonfatal (if another child returned nonfatal - ie master
1578 		 * or target abort) or fatal otherwise
1579 		 */
1580 		if (pci_bdg_regs->pci_bdg_sec_stat & (PCI_STAT_R_TARG_AB |
1581 		    PCI_STAT_R_MAST_AB))
1582 			nonfatal++;
1583 		if (erpt_p->pe_pci_regs->pci_err_status & PCI_STAT_S_SYSERR)
1584 			unknown++;
1585 	}
1586 
1587 	/*
1588 	 * now check children below the bridge
1589 	 */
1590 	ret = ndi_fm_handler_dispatch(dip, NULL, derr);
1591 	PCI_FM_SEV_INC(ret);
1592 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1593 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1594 }
1595 
1596 static int
1597 pcix_ecc_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p,
1598     void *pe_regs)
1599 {
1600 	pcix_error_regs_t *pcix_regs;
1601 	pcix_bdg_error_regs_t *pcix_bdg_regs;
1602 	pcix_ecc_regs_t *pcix_ecc_regs;
1603 	int bridge;
1604 	int i;
1605 	int ecc_phase;
1606 	int ecc_corr;
1607 	int sec_ue;
1608 	int sec_ce;
1609 	int fatal = 0;
1610 	int nonfatal = 0;
1611 	int unknown = 0;
1612 	int ok = 0;
1613 	char buf[FM_MAX_CLASS];
1614 
1615 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
1616 		pcix_bdg_regs = (pcix_bdg_error_regs_t *)pe_regs;
1617 		bridge = 1;
1618 	} else {
1619 		pcix_regs = (pcix_error_regs_t *)pe_regs;
1620 		bridge = 0;
1621 	}
1622 
1623 	for (i = 0; i < (bridge ? 2 : 1); i++) {
1624 		int ret = DDI_FM_OK;
1625 		pcix_ecc_regs = bridge ? pcix_bdg_regs->pcix_bdg_ecc_regs[i] :
1626 		    pcix_regs->pcix_ecc_regs;
1627 		if (pcix_ecc_regs->pcix_ecc_vflags & PCIX_ERR_ECC_STS_VALID) {
1628 			ecc_phase = (pcix_ecc_regs->pcix_ecc_ctlstat &
1629 			    PCI_PCIX_ECC_PHASE) >> 0x4;
1630 			ecc_corr = (pcix_ecc_regs->pcix_ecc_ctlstat &
1631 			    PCI_PCIX_ECC_CORR);
1632 			sec_ue = (pcix_ecc_regs->pcix_ecc_ctlstat &
1633 			    PCI_PCIX_ECC_S_UE);
1634 			sec_ce = (pcix_ecc_regs->pcix_ecc_ctlstat &
1635 			    PCI_PCIX_ECC_S_CE);
1636 
1637 			switch (ecc_phase) {
1638 			    case PCI_PCIX_ECC_PHASE_NOERR:
1639 				break;
1640 			    case PCI_PCIX_ECC_PHASE_FADDR:
1641 			    case PCI_PCIX_ECC_PHASE_SADDR:
1642 				PCI_FM_SEV_INC(ecc_corr ?  DDI_FM_OK :
1643 				    DDI_FM_FATAL);
1644 				(void) snprintf(buf, FM_MAX_CLASS,
1645 				    "%s.%s%s", PCIX_ERROR_SUBCLASS,
1646 				    i ? PCIX_SEC_ERROR_SUBCLASS : "",
1647 				    ecc_corr ? PCIX_ECC_CE_ADDR :
1648 				    PCIX_ECC_UE_ADDR);
1649 				break;
1650 			    case PCI_PCIX_ECC_PHASE_ATTR:
1651 				PCI_FM_SEV_INC(ecc_corr ?
1652 				    DDI_FM_OK : DDI_FM_FATAL);
1653 				(void) snprintf(buf, FM_MAX_CLASS,
1654 				    "%s.%s%s", PCIX_ERROR_SUBCLASS,
1655 				    i ? PCIX_SEC_ERROR_SUBCLASS : "",
1656 				    ecc_corr ? PCIX_ECC_CE_ATTR :
1657 				    PCIX_ECC_UE_ATTR);
1658 				break;
1659 			    case PCI_PCIX_ECC_PHASE_DATA32:
1660 			    case PCI_PCIX_ECC_PHASE_DATA64:
1661 				if (ecc_corr)
1662 					ret = DDI_FM_OK;
1663 				else {
1664 					int type;
1665 					pci_error_regs_t *pci_regs =
1666 					    erpt_p->pe_pci_regs;
1667 
1668 					if (i) {
1669 						if (pci_regs->pci_bdg_regs->
1670 						    pci_bdg_sec_stat &
1671 						    PCI_STAT_S_PERROR)
1672 							type = ACC_HANDLE;
1673 						else
1674 							type = DMA_HANDLE;
1675 					} else {
1676 						if (pci_regs->pci_err_status &
1677 						    PCI_STAT_S_PERROR)
1678 							type = DMA_HANDLE;
1679 						else
1680 							type = ACC_HANDLE;
1681 					}
1682 					ret = pcix_check_addr(dip, derr,
1683 					    pcix_ecc_regs, type);
1684 				}
1685 				PCI_FM_SEV_INC(ret);
1686 
1687 				(void) snprintf(buf, FM_MAX_CLASS,
1688 				    "%s.%s%s", PCIX_ERROR_SUBCLASS,
1689 				    i ? PCIX_SEC_ERROR_SUBCLASS : "",
1690 				    ecc_corr ? PCIX_ECC_CE_DATA :
1691 				    PCIX_ECC_UE_DATA);
1692 				break;
1693 			}
1694 			if (ecc_phase)
1695 				if (bridge)
1696 					ddi_fm_ereport_post(dip, buf,
1697 					    derr->fme_ena,
1698 					    DDI_NOSLEEP, FM_VERSION,
1699 					    DATA_TYPE_UINT8, 0,
1700 					    PCIX_SEC_STATUS, DATA_TYPE_UINT16,
1701 					    pcix_bdg_regs->pcix_bdg_sec_stat,
1702 					    PCIX_BDG_STAT, DATA_TYPE_UINT32,
1703 					    pcix_bdg_regs->pcix_bdg_stat,
1704 					    PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32,
1705 					    pcix_ecc_regs->pcix_ecc_ctlstat,
1706 					    PCIX_ECC_ATTR, DATA_TYPE_UINT32,
1707 					    pcix_ecc_regs->pcix_ecc_attr, NULL);
1708 				else
1709 					ddi_fm_ereport_post(dip, buf,
1710 					    derr->fme_ena,
1711 					    DDI_NOSLEEP, FM_VERSION,
1712 					    DATA_TYPE_UINT8, 0,
1713 					    PCIX_COMMAND, DATA_TYPE_UINT16,
1714 					    pcix_regs->pcix_command,
1715 					    PCIX_STATUS, DATA_TYPE_UINT32,
1716 					    pcix_regs->pcix_status,
1717 					    PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32,
1718 					    pcix_ecc_regs->pcix_ecc_ctlstat,
1719 					    PCIX_ECC_ATTR, DATA_TYPE_UINT32,
1720 					    pcix_ecc_regs->pcix_ecc_attr, NULL);
1721 			if (sec_ce || sec_ue) {
1722 				(void) snprintf(buf, FM_MAX_CLASS,
1723 				    "%s.%s%s", PCIX_ERROR_SUBCLASS,
1724 				    i ? PCIX_SEC_ERROR_SUBCLASS : "",
1725 				    sec_ce ? PCIX_ECC_S_CE : PCIX_ECC_S_UE);
1726 				if (bridge)
1727 					ddi_fm_ereport_post(dip, buf,
1728 					    derr->fme_ena,
1729 					    DDI_NOSLEEP, FM_VERSION,
1730 					    DATA_TYPE_UINT8, 0,
1731 					    PCIX_SEC_STATUS, DATA_TYPE_UINT16,
1732 					    pcix_bdg_regs->pcix_bdg_sec_stat,
1733 					    PCIX_BDG_STAT, DATA_TYPE_UINT32,
1734 					    pcix_bdg_regs->pcix_bdg_stat,
1735 					    PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32,
1736 					    pcix_ecc_regs->pcix_ecc_ctlstat,
1737 					    PCIX_ECC_ATTR, DATA_TYPE_UINT32,
1738 					    pcix_ecc_regs->pcix_ecc_attr, NULL);
1739 				else
1740 					ddi_fm_ereport_post(dip, buf,
1741 					    derr->fme_ena,
1742 					    DDI_NOSLEEP, FM_VERSION,
1743 					    DATA_TYPE_UINT8, 0,
1744 					    PCIX_COMMAND, DATA_TYPE_UINT16,
1745 					    pcix_regs->pcix_command,
1746 					    PCIX_STATUS, DATA_TYPE_UINT32,
1747 					    pcix_regs->pcix_status,
1748 					    PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32,
1749 					    pcix_ecc_regs->pcix_ecc_ctlstat,
1750 					    PCIX_ECC_ATTR, DATA_TYPE_UINT32,
1751 					    pcix_ecc_regs->pcix_ecc_attr, NULL);
1752 				PCI_FM_SEV_INC(sec_ue ? DDI_FM_FATAL :
1753 				    DDI_FM_OK);
1754 			}
1755 		}
1756 	}
1757 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1758 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1759 }
1760 
1761 static int
1762 pcix_bdg_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p,
1763     void *pe_regs)
1764 {
1765 	pcix_bdg_error_regs_t *pcix_bdg_regs = (pcix_bdg_error_regs_t *)pe_regs;
1766 	int fatal = 0;
1767 	int nonfatal = 0;
1768 	int unknown = 0;
1769 	int ok = 0;
1770 	char buf[FM_MAX_CLASS];
1771 	int i;
1772 
1773 	if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_STATUS_VALID) {
1774 		for (i = 0; pcix_err_tbl[i].err_class != NULL; i++) {
1775 			if ((pcix_bdg_regs->pcix_bdg_stat &
1776 			    pcix_err_tbl[i].reg_bit)) {
1777 				(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
1778 				    PCIX_ERROR_SUBCLASS,
1779 				    pcix_err_tbl[i].err_class);
1780 				ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1781 				    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1782 				    PCIX_SEC_STATUS, DATA_TYPE_UINT16,
1783 				    pcix_bdg_regs->pcix_bdg_sec_stat,
1784 				    PCIX_BDG_STAT, DATA_TYPE_UINT32,
1785 				    pcix_bdg_regs->pcix_bdg_stat, NULL);
1786 				PCI_FM_SEV_INC(pcix_err_tbl[i].flags);
1787 			}
1788 		}
1789 	}
1790 
1791 	if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_SEC_STATUS_VALID) {
1792 		for (i = 0; pcix_sec_err_tbl[i].err_class != NULL; i++) {
1793 			if ((pcix_bdg_regs->pcix_bdg_sec_stat &
1794 			    pcix_sec_err_tbl[i].reg_bit)) {
1795 				(void) snprintf(buf, FM_MAX_CLASS, "%s.%s%s",
1796 				    PCIX_ERROR_SUBCLASS,
1797 				    PCIX_SEC_ERROR_SUBCLASS,
1798 				    pcix_sec_err_tbl[i].err_class);
1799 				ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1800 				    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1801 				    PCIX_SEC_STATUS, DATA_TYPE_UINT16,
1802 				    pcix_bdg_regs->pcix_bdg_sec_stat,
1803 				    PCIX_BDG_STAT, DATA_TYPE_UINT32,
1804 				    pcix_bdg_regs->pcix_bdg_stat, NULL);
1805 				PCI_FM_SEV_INC(pcix_sec_err_tbl[i].flags);
1806 			}
1807 		}
1808 	}
1809 
1810 	/* Log/Handle ECC errors */
1811 	if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) {
1812 		int ret;
1813 
1814 		ret = pcix_ecc_error_report(dip, derr, erpt_p,
1815 		    (void *)pcix_bdg_regs);
1816 		PCI_FM_SEV_INC(ret);
1817 	}
1818 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1819 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1820 }
1821 
1822 static int
1823 pcix_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p)
1824 {
1825 	pcix_error_regs_t *pcix_regs = (pcix_error_regs_t *)erpt_p->pe_regs;
1826 	int fatal = 0;
1827 	int nonfatal = 0;
1828 	int unknown = 0;
1829 	int ok = 0;
1830 	char buf[FM_MAX_CLASS];
1831 	int i;
1832 
1833 	if (pcix_regs->pcix_vflags & PCIX_ERR_STATUS_VALID) {
1834 		for (i = 0; pcix_err_tbl[i].err_class != NULL; i++) {
1835 			if (!(pcix_regs->pcix_status & pcix_err_tbl[i].reg_bit))
1836 				continue;
1837 
1838 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
1839 			    PCIX_ERROR_SUBCLASS, pcix_err_tbl[i].err_class);
1840 			ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1841 			    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1842 			    PCIX_COMMAND, DATA_TYPE_UINT16,
1843 			    pcix_regs->pcix_command, PCIX_STATUS,
1844 			    DATA_TYPE_UINT32, pcix_regs->pcix_status,
1845 			    NULL);
1846 			PCI_FM_SEV_INC(pcix_err_tbl[i].flags);
1847 		}
1848 	}
1849 	/* Log/Handle ECC errors */
1850 	if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) {
1851 		int ret = pcix_ecc_error_report(dip, derr, erpt_p,
1852 		    (void *)pcix_regs);
1853 		PCI_FM_SEV_INC(ret);
1854 	}
1855 
1856 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1857 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1858 }
1859 
1860 static int
1861 pcie_rc_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p,
1862     void *pe_regs)
1863 {
1864 	pcie_adv_error_regs_t *pcie_adv_regs = (pcie_adv_error_regs_t *)pe_regs;
1865 	int fatal = 0;
1866 	int nonfatal = 0;
1867 	int unknown = 0;
1868 	char buf[FM_MAX_CLASS];
1869 
1870 	if (pcie_adv_regs->pcie_adv_vflags & PCIE_RC_ERR_STATUS_VALID) {
1871 		pcie_adv_rc_error_regs_t *pcie_rc_regs =
1872 		    pcie_adv_regs->pcie_adv_rc_regs;
1873 		int ce, ue, mult_ce, mult_ue, first_ue_fatal, nfe, fe;
1874 
1875 		ce = pcie_rc_regs->pcie_rc_err_status &
1876 		    PCIE_AER_RE_STS_CE_RCVD;
1877 		ue = pcie_rc_regs->pcie_rc_err_status &
1878 		    PCIE_AER_RE_STS_FE_NFE_RCVD;
1879 		mult_ce = pcie_rc_regs->pcie_rc_err_status &
1880 		    PCIE_AER_RE_STS_MUL_CE_RCVD;
1881 		mult_ue = pcie_rc_regs->pcie_rc_err_status &
1882 		    PCIE_AER_RE_STS_MUL_FE_NFE_RCVD;
1883 		first_ue_fatal = pcie_rc_regs->pcie_rc_err_status &
1884 		    PCIE_AER_RE_STS_FIRST_UC_FATAL;
1885 		nfe = pcie_rc_regs->pcie_rc_err_status &
1886 		    PCIE_AER_RE_STS_NFE_MSGS_RCVD;
1887 		fe = pcie_rc_regs->pcie_rc_err_status &
1888 		    PCIE_AER_RE_STS_FE_MSGS_RCVD;
1889 		/*
1890 		 * log fatal/nonfatal/corrected messages
1891 		 * recieved by root complex
1892 		 */
1893 		if (ue && fe)
1894 			fatal++;
1895 
1896 		if (fe && first_ue_fatal) {
1897 			(void) snprintf(buf, FM_MAX_CLASS,
1898 			    "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_FE_MSG);
1899 			pcie_ereport_post(dip, derr, erpt_p, buf,
1900 			    PCIEX_TYPE_RC_UE_MSG);
1901 		}
1902 		if (nfe && !first_ue_fatal) {
1903 			(void) snprintf(buf, FM_MAX_CLASS,
1904 			    "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_NFE_MSG);
1905 			pcie_ereport_post(dip, derr, erpt_p, buf,
1906 			    PCIEX_TYPE_RC_UE_MSG);
1907 		}
1908 		if (ce) {
1909 			(void) snprintf(buf, FM_MAX_CLASS,
1910 			    "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_CE_MSG);
1911 			pcie_ereport_post(dip, derr, erpt_p, buf,
1912 			    PCIEX_TYPE_RC_CE_MSG);
1913 		}
1914 		if (mult_ce) {
1915 			(void) snprintf(buf, FM_MAX_CLASS,
1916 			    "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_MCE_MSG);
1917 			pcie_ereport_post(dip, derr, erpt_p, buf,
1918 			    PCIEX_TYPE_RC_MULT_MSG);
1919 		}
1920 		if (mult_ue) {
1921 			(void) snprintf(buf, FM_MAX_CLASS,
1922 			    "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_MUE_MSG);
1923 			pcie_ereport_post(dip, derr, erpt_p, buf,
1924 			    PCIEX_TYPE_RC_MULT_MSG);
1925 		}
1926 	}
1927 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1928 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1929 }
1930 
1931 static int
1932 pcie_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p)
1933 {
1934 	int fatal = 0;
1935 	int nonfatal = 0;
1936 	int unknown = 0;
1937 	int ok = 0;
1938 	int type;
1939 	char buf[FM_MAX_CLASS];
1940 	int i;
1941 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
1942 	pcie_adv_error_regs_t *pcie_adv_regs;
1943 	pcie_adv_bdg_error_regs_t *pcie_bdg_regs;
1944 
1945 	if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV) &&
1946 	    (erpt_p->pe_dflags & PCIX_DEV)) {
1947 		int ret = pcix_bdg_error_report(dip, derr, erpt_p,
1948 		    (void *)pcie_regs->pcix_bdg_regs);
1949 		PCI_FM_SEV_INC(ret);
1950 	}
1951 
1952 	if (!(erpt_p->pe_dflags & PCIEX_ADV_DEV)) {
1953 		if (!(pcie_regs->pcie_vflags & PCIE_ERR_STATUS_VALID))
1954 			goto done;
1955 #if !defined(__sparc)
1956 		/*
1957 		 * On x86 ignore UR on non-RBER leaf devices, pciex-pci
1958 		 * bridges and switches.
1959 		 */
1960 		if ((pcie_regs->pcie_err_status & PCIE_DEVSTS_UR_DETECTED) &&
1961 		    !(pcie_regs->pcie_err_status & PCIE_DEVSTS_FE_DETECTED) &&
1962 		    ((erpt_p->pe_dflags & (PCIEX_2PCI_DEV|PCIEX_SWITCH_DEV)) ||
1963 		    !(erpt_p->pe_dflags & PCI_BRIDGE_DEV)) &&
1964 		    !(pcie_regs->pcie_dev_cap & PCIE_DEVCAP_ROLE_BASED_ERR_REP))
1965 			goto done;
1966 #endif
1967 		for (i = 0; pciex_nadv_err_tbl[i].err_class != NULL; i++) {
1968 			if (!(pcie_regs->pcie_err_status &
1969 			    pciex_nadv_err_tbl[i].reg_bit))
1970 				continue;
1971 
1972 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
1973 			    PCIEX_ERROR_SUBCLASS,
1974 			    pciex_nadv_err_tbl[i].err_class);
1975 			pcie_ereport_post(dip, derr, erpt_p, buf,
1976 			    PCIEX_TYPE_GEN);
1977 			PCI_FM_SEV_INC(pciex_nadv_err_tbl[i].flags);
1978 		}
1979 		goto done;
1980 	}
1981 
1982 	pcie_adv_regs = pcie_regs->pcie_adv_regs;
1983 
1984 	/*
1985 	 * Log PCI Express uncorrectable errors
1986 	 */
1987 	if (pcie_adv_regs->pcie_adv_vflags & PCIE_UE_STATUS_VALID) {
1988 		for (i = 0; pciex_ue_err_tbl[i].err_class != NULL; i++) {
1989 			if (!(pcie_adv_regs->pcie_ue_status &
1990 			    pciex_ue_err_tbl[i].reg_bit))
1991 				continue;
1992 
1993 			(void) snprintf(buf, FM_MAX_CLASS,
1994 			    "%s.%s", PCIEX_ERROR_SUBCLASS,
1995 			    pciex_ue_err_tbl[i].err_class);
1996 
1997 			/*
1998 			 * First check for advisary nonfatal conditions
1999 			 * - hardware endpoint successfully retrying a cto
2000 			 * - hardware endpoint receiving poisoned tlp and
2001 			 *   dealing with it itself (but not if root complex)
2002 			 * If the device has declared these as correctable
2003 			 * errors then treat them as such.
2004 			 */
2005 			if ((pciex_ue_err_tbl[i].reg_bit == PCIE_AER_UCE_TO ||
2006 			    (pciex_ue_err_tbl[i].reg_bit == PCIE_AER_UCE_PTLP &&
2007 			    !(erpt_p->pe_dflags & PCIEX_RC_DEV))) &&
2008 			    (pcie_regs->pcie_err_status &
2009 			    PCIE_DEVSTS_CE_DETECTED) &&
2010 			    !(pcie_regs->pcie_err_status &
2011 			    PCIE_DEVSTS_NFE_DETECTED)) {
2012 				pcie_ereport_post(dip, derr, erpt_p, buf,
2013 				    PCIEX_TYPE_UE);
2014 				continue;
2015 			}
2016 
2017 #if !defined(__sparc)
2018 			/*
2019 			 * On x86 for leaf devices and pciex-pci bridges,
2020 			 * ignore UR on non-RBER devices or on RBER devices when
2021 			 * advisory nonfatal.
2022 			 */
2023 			if (pciex_ue_err_tbl[i].reg_bit == PCIE_AER_UCE_UR &&
2024 			    ((erpt_p->pe_dflags &
2025 			    (PCIEX_2PCI_DEV|PCIEX_SWITCH_DEV)) ||
2026 			    !(erpt_p->pe_dflags & PCI_BRIDGE_DEV))) {
2027 				if (!(pcie_regs->pcie_dev_cap &
2028 				    PCIE_DEVCAP_ROLE_BASED_ERR_REP))
2029 					continue;
2030 				if ((pcie_regs->pcie_err_status &
2031 				    PCIE_DEVSTS_CE_DETECTED) &&
2032 				    !(pcie_regs->pcie_err_status &
2033 				    PCIE_DEVSTS_NFE_DETECTED))
2034 					continue;
2035 			}
2036 #endif
2037 			pcie_adv_regs->pcie_adv_bdf = 0;
2038 			/*
2039 			 * Now try and look up handle if
2040 			 * - error bit is among PCIE_AER_UCE_LOG_BITS, and
2041 			 * - no other PCIE_AER_UCE_LOG_BITS are set, and
2042 			 * - error bit is not masked, and
2043 			 * - flag is DDI_FM_UNKNOWN
2044 			 */
2045 			if ((pcie_adv_regs->pcie_ue_status &
2046 			    pcie_aer_uce_log_bits) ==
2047 			    pciex_ue_err_tbl[i].reg_bit &&
2048 			    !(pciex_ue_err_tbl[i].reg_bit &
2049 			    pcie_adv_regs->pcie_ue_mask) &&
2050 			    pciex_ue_err_tbl[i].flags == DDI_FM_UNKNOWN)
2051 				pcie_check_addr(dip, derr, erpt_p);
2052 
2053 			PCI_FM_SEV_INC(pciex_ue_err_tbl[i].flags);
2054 			pcie_ereport_post(dip, derr, erpt_p, buf,
2055 			    PCIEX_TYPE_UE);
2056 		}
2057 	}
2058 
2059 	/*
2060 	 * Log PCI Express correctable errors
2061 	 */
2062 	if (pcie_adv_regs->pcie_adv_vflags & PCIE_CE_STATUS_VALID) {
2063 		for (i = 0; pciex_ce_err_tbl[i].err_class != NULL; i++) {
2064 			if (!(pcie_adv_regs->pcie_ce_status &
2065 			    pciex_ce_err_tbl[i].reg_bit))
2066 				continue;
2067 
2068 			(void) snprintf(buf, FM_MAX_CLASS,
2069 			    "%s.%s", PCIEX_ERROR_SUBCLASS,
2070 			    pciex_ce_err_tbl[i].err_class);
2071 			pcie_ereport_post(dip, derr, erpt_p, buf,
2072 			    PCIEX_TYPE_CE);
2073 		}
2074 	}
2075 
2076 	if (!(erpt_p->pe_dflags & PCI_BRIDGE_DEV))
2077 		goto done;
2078 
2079 	if (erpt_p->pe_dflags & PCIEX_RC_DEV) {
2080 		int ret = pcie_rc_error_report(dip, derr, erpt_p,
2081 		    (void *)pcie_adv_regs);
2082 		PCI_FM_SEV_INC(ret);
2083 	}
2084 
2085 	if (!((erpt_p->pe_dflags & PCIEX_2PCI_DEV) &&
2086 	    (pcie_adv_regs->pcie_adv_vflags & PCIE_SUE_STATUS_VALID)))
2087 		goto done;
2088 
2089 	pcie_bdg_regs = pcie_adv_regs->pcie_adv_bdg_regs;
2090 
2091 	for (i = 0; pcie_sue_err_tbl[i].err_class != NULL; i++) {
2092 		if ((pcie_bdg_regs->pcie_sue_status &
2093 		    pcie_sue_err_tbl[i].reg_bit)) {
2094 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
2095 			    PCIEX_ERROR_SUBCLASS,
2096 			    pcie_sue_err_tbl[i].err_class);
2097 
2098 			if ((pcie_bdg_regs->pcie_sue_status &
2099 			    pcie_aer_suce_log_bits) !=
2100 			    pcie_sue_err_tbl[i].reg_bit ||
2101 			    pcie_sue_err_tbl[i].flags != DDI_FM_UNKNOWN) {
2102 				ddi_fm_ereport_post(dip, buf, derr->fme_ena,
2103 				    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
2104 				    PCIEX_SEC_UE_STATUS, DATA_TYPE_UINT32,
2105 				    pcie_bdg_regs->pcie_sue_status,
2106 #ifdef DEBUG
2107 				    PCIEX_SUE_HDR0, DATA_TYPE_UINT32,
2108 				    pcie_bdg_regs->pcie_sue_hdr0,
2109 				    PCIEX_SUE_HDR1, DATA_TYPE_UINT32,
2110 				    pcie_bdg_regs->pcie_sue_hdr[0],
2111 				    PCIEX_SUE_HDR2, DATA_TYPE_UINT32,
2112 				    pcie_bdg_regs->pcie_sue_hdr[1],
2113 				    PCIEX_SUE_HDR3, DATA_TYPE_UINT32,
2114 				    pcie_bdg_regs->pcie_sue_hdr[2],
2115 #endif
2116 				    NULL);
2117 			} else {
2118 				pcie_adv_regs->pcie_adv_bdf = 0;
2119 				switch (pcie_sue_err_tbl[i].reg_bit) {
2120 				case PCIE_AER_SUCE_RCVD_TA:
2121 				case PCIE_AER_SUCE_RCVD_MA:
2122 				case PCIE_AER_SUCE_USC_ERR:
2123 					type = ACC_HANDLE;
2124 					break;
2125 				case PCIE_AER_SUCE_TA_ON_SC:
2126 				case PCIE_AER_SUCE_MA_ON_SC:
2127 					type = DMA_HANDLE;
2128 					break;
2129 				case PCIE_AER_SUCE_UC_DATA_ERR:
2130 				case PCIE_AER_SUCE_PERR_ASSERT:
2131 					if (erpt_p->pe_pci_regs->pci_bdg_regs->
2132 					    pci_bdg_sec_stat &
2133 					    PCI_STAT_S_PERROR)
2134 						type = ACC_HANDLE;
2135 					else
2136 						type = DMA_HANDLE;
2137 					break;
2138 				}
2139 				pcie_pci_check_addr(dip, derr, erpt_p, type);
2140 				ddi_fm_ereport_post(dip, buf, derr->fme_ena,
2141 				    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
2142 				    PCIEX_SEC_UE_STATUS, DATA_TYPE_UINT32,
2143 				    pcie_bdg_regs->pcie_sue_status,
2144 				    PCIEX_SRC_ID, DATA_TYPE_UINT16,
2145 				    pcie_adv_regs->pcie_adv_bdf,
2146 				    PCIEX_SRC_VALID, DATA_TYPE_BOOLEAN_VALUE,
2147 				    (pcie_adv_regs->pcie_adv_bdf != NULL) ?
2148 				    1 : NULL,
2149 #ifdef DEBUG
2150 				    PCIEX_SUE_HDR0, DATA_TYPE_UINT32,
2151 				    pcie_bdg_regs->pcie_sue_hdr0,
2152 				    PCIEX_SUE_HDR1, DATA_TYPE_UINT32,
2153 				    pcie_bdg_regs->pcie_sue_hdr[0],
2154 				    PCIEX_SUE_HDR2, DATA_TYPE_UINT32,
2155 				    pcie_bdg_regs->pcie_sue_hdr[1],
2156 				    PCIEX_SUE_HDR3, DATA_TYPE_UINT32,
2157 				    pcie_bdg_regs->pcie_sue_hdr[2],
2158 #endif
2159 				    NULL);
2160 			}
2161 			PCI_FM_SEV_INC(pcie_sue_err_tbl[i].flags);
2162 		}
2163 	}
2164 done:
2165 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
2166 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
2167 }
2168 
2169 static void
2170 pci_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p)
2171 {
2172 	int fatal = 0;
2173 	int nonfatal = 0;
2174 	int unknown = 0;
2175 	int ok = 0;
2176 	char buf[FM_MAX_CLASS];
2177 	int i;
2178 
2179 	if (derr->fme_flag == DDI_FM_ERR_UNEXPECTED) {
2180 		/*
2181 		 * Log generic PCI errors.
2182 		 */
2183 		for (i = 0; pci_err_tbl[i].err_class != NULL; i++) {
2184 			if (!(erpt_p->pe_pci_regs->pci_err_status &
2185 			    pci_err_tbl[i].reg_bit) ||
2186 			    !(erpt_p->pe_pci_regs->pci_vflags &
2187 			    PCI_ERR_STATUS_VALID))
2188 				continue;
2189 			/*
2190 			 * Generate an ereport for this error bit.
2191 			 */
2192 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
2193 			    PCI_ERROR_SUBCLASS, pci_err_tbl[i].err_class);
2194 			ddi_fm_ereport_post(dip, buf, derr->fme_ena,
2195 			    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
2196 			    PCI_CONFIG_STATUS, DATA_TYPE_UINT16,
2197 			    erpt_p->pe_pci_regs->pci_err_status,
2198 			    PCI_CONFIG_COMMAND, DATA_TYPE_UINT16,
2199 			    erpt_p->pe_pci_regs->pci_cfg_comm, NULL);
2200 
2201 			/*
2202 			 * The meaning of SERR is different for PCIEX (just
2203 			 * implies a message has been sent) so we don't want to
2204 			 * treat that one as fatal.
2205 			 */
2206 			if ((erpt_p->pe_dflags & PCIEX_DEV) &&
2207 			    pci_err_tbl[i].reg_bit == PCI_STAT_S_SYSERR) {
2208 				unknown++;
2209 			} else {
2210 				PCI_FM_SEV_INC(pci_err_tbl[i].flags);
2211 			}
2212 		}
2213 		if (erpt_p->pe_dflags & PCIEX_DEV) {
2214 			int ret = pcie_error_report(dip, derr, erpt_p);
2215 			PCI_FM_SEV_INC(ret);
2216 		} else if (erpt_p->pe_dflags & PCIX_DEV) {
2217 			if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
2218 				int ret = pcix_bdg_error_report(dip, derr,
2219 				    erpt_p, erpt_p->pe_regs);
2220 				PCI_FM_SEV_INC(ret);
2221 			} else {
2222 				int ret = pcix_error_report(dip, derr, erpt_p);
2223 				PCI_FM_SEV_INC(ret);
2224 			}
2225 		}
2226 	}
2227 
2228 	if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV)) {
2229 		int ret = pci_bdg_error_report(dip, derr, erpt_p);
2230 		PCI_FM_SEV_INC(ret);
2231 	}
2232 
2233 	if (derr->fme_flag == DDI_FM_ERR_UNEXPECTED) {
2234 		pci_fme_bus_specific_t *pci_fme_bsp;
2235 		int ret = DDI_FM_UNKNOWN;
2236 
2237 		pci_fme_bsp = (pci_fme_bus_specific_t *)derr->fme_bus_specific;
2238 		if (pci_fme_bsp->pci_bs_flags & PCI_BS_ADDR_VALID) {
2239 			ret = ndi_fmc_entry_error(dip,
2240 			    pci_fme_bsp->pci_bs_type, derr,
2241 			    (void *)&pci_fme_bsp->pci_bs_addr);
2242 			PCI_FM_SEV_INC(ret);
2243 		}
2244 		/*
2245 		 * If we didn't find the handle using an addr, try using bdf.
2246 		 * Note we don't do this where the bdf is for a
2247 		 * device behind a pciex/pci bridge as the bridge may have
2248 		 * fabricated the bdf.
2249 		 */
2250 		if (ret == DDI_FM_UNKNOWN &&
2251 		    (pci_fme_bsp->pci_bs_flags & PCI_BS_BDF_VALID) &&
2252 		    pci_fme_bsp->pci_bs_bdf == erpt_p->pe_bdf &&
2253 		    (erpt_p->pe_dflags & PCIEX_DEV) &&
2254 		    !(erpt_p->pe_dflags & PCIEX_2PCI_DEV)) {
2255 			ret = ndi_fmc_entry_error_all(dip,
2256 			    pci_fme_bsp->pci_bs_type, derr);
2257 			PCI_FM_SEV_INC(ret);
2258 		}
2259 	}
2260 
2261 	derr->fme_status = (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
2262 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
2263 }
2264 
2265 void
2266 pci_ereport_post(dev_info_t *dip, ddi_fm_error_t *derr, uint16_t *xx_status)
2267 {
2268 	struct i_ddi_fmhdl *fmhdl;
2269 	pci_erpt_t *erpt_p;
2270 	ddi_fm_error_t de;
2271 	pci_fme_bus_specific_t pci_fme_bs;
2272 
2273 	fmhdl = DEVI(dip)->devi_fmhdl;
2274 	if (!DDI_FM_EREPORT_CAP(ddi_fm_capable(dip)) &&
2275 	    !DDI_FM_ERRCB_CAP(ddi_fm_capable(dip))) {
2276 		i_ddi_drv_ereport_post(dip, DVR_EFMCAP, NULL, DDI_NOSLEEP);
2277 		return;
2278 	}
2279 
2280 	/*
2281 	 * copy in the ddi_fm_error_t structure in case it's VER0
2282 	 */
2283 	de.fme_version = derr->fme_version;
2284 	de.fme_status = derr->fme_status;
2285 	de.fme_flag = derr->fme_flag;
2286 	de.fme_ena = derr->fme_ena;
2287 	de.fme_acc_handle = derr->fme_acc_handle;
2288 	de.fme_dma_handle = derr->fme_dma_handle;
2289 	de.fme_bus_specific = derr->fme_bus_specific;
2290 	if (derr->fme_version >= DDI_FME_VER1)
2291 		de.fme_bus_type = derr->fme_bus_type;
2292 	else
2293 		de.fme_bus_type = DDI_FME_BUS_TYPE_DFLT;
2294 	if (de.fme_bus_type == DDI_FME_BUS_TYPE_DFLT) {
2295 		/*
2296 		 * if this is the first pci device we've found convert
2297 		 * fme_bus_specific to DDI_FME_BUS_TYPE_PCI
2298 		 */
2299 		bzero(&pci_fme_bs, sizeof (pci_fme_bs));
2300 		if (de.fme_bus_specific) {
2301 			/*
2302 			 * the cpu passed us an addr - this can be used to look
2303 			 * up an access handle
2304 			 */
2305 			pci_fme_bs.pci_bs_addr = (uintptr_t)de.fme_bus_specific;
2306 			pci_fme_bs.pci_bs_type = ACC_HANDLE;
2307 			pci_fme_bs.pci_bs_flags |= PCI_BS_ADDR_VALID;
2308 		}
2309 		de.fme_bus_specific = (void *)&pci_fme_bs;
2310 		de.fme_bus_type = DDI_FME_BUS_TYPE_PCI;
2311 	}
2312 
2313 	ASSERT(fmhdl);
2314 
2315 	if (de.fme_ena == NULL)
2316 		de.fme_ena = fm_ena_generate(0, FM_ENA_FMT1);
2317 
2318 	erpt_p = (pci_erpt_t *)fmhdl->fh_bus_specific;
2319 	if (erpt_p == NULL)
2320 		return;
2321 
2322 	pci_regs_gather(dip, erpt_p, de.fme_flag);
2323 	pci_error_report(dip, &de, erpt_p);
2324 	pci_regs_clear(erpt_p);
2325 
2326 	derr->fme_status = de.fme_status;
2327 	derr->fme_ena = de.fme_ena;
2328 	derr->fme_acc_handle = de.fme_acc_handle;
2329 	derr->fme_dma_handle = de.fme_dma_handle;
2330 	if (xx_status != NULL)
2331 		*xx_status = erpt_p->pe_pci_regs->pci_err_status;
2332 }
2333 
2334 /*
2335  * private version of walk_devs() that can be used during panic. No
2336  * sleeping or locking required.
2337  */
2338 static int
2339 pci_fm_walk_devs(dev_info_t *dip, int (*f)(dev_info_t *, void *), void *arg)
2340 {
2341 	while (dip) {
2342 		switch ((*f)(dip, arg)) {
2343 		case DDI_WALK_TERMINATE:
2344 			return (DDI_WALK_TERMINATE);
2345 		case DDI_WALK_CONTINUE:
2346 			if (pci_fm_walk_devs(ddi_get_child(dip), f,
2347 			    arg) == DDI_WALK_TERMINATE)
2348 				return (DDI_WALK_TERMINATE);
2349 			break;
2350 		case DDI_WALK_PRUNECHILD:
2351 			break;
2352 		}
2353 		dip = ddi_get_next_sibling(dip);
2354 	}
2355 	return (DDI_WALK_CONTINUE);
2356 }
2357 
2358 /*
2359  * need special version of ddi_fm_ereport_post() as the leaf driver may
2360  * not be hardened.
2361  */
2362 static void
2363 pci_fm_ereport_post(dev_info_t *dip, const char *error_class, uint64_t ena,
2364     uint8_t version, ...)
2365 {
2366 	char *name;
2367 	char device_path[MAXPATHLEN];
2368 	char ddi_error_class[FM_MAX_CLASS];
2369 	nvlist_t *ereport, *detector;
2370 	nv_alloc_t *nva;
2371 	errorq_elem_t *eqep;
2372 	va_list ap;
2373 
2374 	if (panicstr) {
2375 		eqep = errorq_reserve(ereport_errorq);
2376 		if (eqep == NULL)
2377 			return;
2378 		ereport = errorq_elem_nvl(ereport_errorq, eqep);
2379 		nva = errorq_elem_nva(ereport_errorq, eqep);
2380 		detector = fm_nvlist_create(nva);
2381 	} else {
2382 		ereport = fm_nvlist_create(NULL);
2383 		detector = fm_nvlist_create(NULL);
2384 	}
2385 
2386 	(void) ddi_pathname(dip, device_path);
2387 	fm_fmri_dev_set(detector, FM_DEV_SCHEME_VERSION, NULL,
2388 	    device_path, NULL);
2389 	(void) snprintf(ddi_error_class, FM_MAX_CLASS, "%s.%s",
2390 	    DDI_IO_CLASS, error_class);
2391 	fm_ereport_set(ereport, version, ddi_error_class, ena, detector, NULL);
2392 
2393 	va_start(ap, version);
2394 	name = va_arg(ap, char *);
2395 	(void) i_fm_payload_set(ereport, name, ap);
2396 	va_end(ap);
2397 
2398 	if (panicstr) {
2399 		errorq_commit(ereport_errorq, eqep, ERRORQ_SYNC);
2400 	} else {
2401 		(void) fm_ereport_post(ereport, EVCH_TRYHARD);
2402 		fm_nvlist_destroy(ereport, FM_NVA_FREE);
2403 		fm_nvlist_destroy(detector, FM_NVA_FREE);
2404 	}
2405 }
2406 
2407 static int
2408 pci_check_regs(dev_info_t *dip, void *arg)
2409 {
2410 	int reglen;
2411 	int rn;
2412 	int totreg;
2413 	pci_regspec_t *drv_regp;
2414 	pci_target_err_t *tgt_err = (pci_target_err_t *)arg;
2415 
2416 	if (tgt_err->tgt_pci_space == PCI_REG_ADDR_G(PCI_ADDR_CONFIG)) {
2417 		/*
2418 		 * for config space, we need to check if the given address
2419 		 * is a valid config space address for this device - based
2420 		 * on pci_phys_hi of the config space entry in reg property.
2421 		 */
2422 		if (ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
2423 		    "reg", (caddr_t)&drv_regp, &reglen) != DDI_SUCCESS)
2424 			return (DDI_WALK_CONTINUE);
2425 
2426 		totreg = reglen / sizeof (pci_regspec_t);
2427 		for (rn = 0; rn < totreg; rn++) {
2428 			if (tgt_err->tgt_pci_space ==
2429 			    PCI_REG_ADDR_G(drv_regp[rn].pci_phys_hi) &&
2430 			    (tgt_err->tgt_pci_addr & (PCI_REG_BUS_M |
2431 			    PCI_REG_DEV_M | PCI_REG_FUNC_M)) ==
2432 			    (drv_regp[rn].pci_phys_hi & (PCI_REG_BUS_M |
2433 			    PCI_REG_DEV_M | PCI_REG_FUNC_M))) {
2434 				tgt_err->tgt_dip = dip;
2435 				kmem_free(drv_regp, reglen);
2436 				return (DDI_WALK_TERMINATE);
2437 			}
2438 		}
2439 		kmem_free(drv_regp, reglen);
2440 	} else {
2441 		/*
2442 		 * for non config space, need to check reg to look
2443 		 * for any non-relocable mapping, otherwise check
2444 		 * assigned-addresses.
2445 		 */
2446 		if (ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
2447 		    "reg", (caddr_t)&drv_regp, &reglen) != DDI_SUCCESS)
2448 			return (DDI_WALK_CONTINUE);
2449 
2450 		totreg = reglen / sizeof (pci_regspec_t);
2451 		for (rn = 0; rn < totreg; rn++) {
2452 			if ((drv_regp[rn].pci_phys_hi & PCI_RELOCAT_B) &&
2453 			    (tgt_err->tgt_pci_space == TGT_PCI_SPACE_UNKNOWN ||
2454 			    tgt_err->tgt_pci_space ==
2455 			    PCI_REG_ADDR_G(drv_regp[rn].pci_phys_hi)) &&
2456 			    (tgt_err->tgt_pci_addr >=
2457 			    (uint64_t)drv_regp[rn].pci_phys_low +
2458 			    ((uint64_t)drv_regp[rn].pci_phys_mid << 32)) &&
2459 			    (tgt_err->tgt_pci_addr <
2460 			    (uint64_t)drv_regp[rn].pci_phys_low +
2461 			    ((uint64_t)drv_regp[rn].pci_phys_mid << 32) +
2462 			    (uint64_t)drv_regp[rn].pci_size_low +
2463 			    ((uint64_t)drv_regp[rn].pci_size_hi << 32))) {
2464 				tgt_err->tgt_dip = dip;
2465 				kmem_free(drv_regp, reglen);
2466 				return (DDI_WALK_TERMINATE);
2467 			}
2468 		}
2469 		kmem_free(drv_regp, reglen);
2470 
2471 		if (ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
2472 		    "assigned-addresses", (caddr_t)&drv_regp, &reglen) !=
2473 		    DDI_SUCCESS)
2474 			return (DDI_WALK_CONTINUE);
2475 
2476 		totreg = reglen / sizeof (pci_regspec_t);
2477 		for (rn = 0; rn < totreg; rn++) {
2478 			if ((tgt_err->tgt_pci_space == TGT_PCI_SPACE_UNKNOWN ||
2479 			    tgt_err->tgt_pci_space ==
2480 			    PCI_REG_ADDR_G(drv_regp[rn].pci_phys_hi)) &&
2481 			    (tgt_err->tgt_pci_addr >=
2482 			    (uint64_t)drv_regp[rn].pci_phys_low +
2483 			    ((uint64_t)drv_regp[rn].pci_phys_mid << 32)) &&
2484 			    (tgt_err->tgt_pci_addr <
2485 			    (uint64_t)drv_regp[rn].pci_phys_low +
2486 			    ((uint64_t)drv_regp[rn].pci_phys_mid << 32) +
2487 			    (uint64_t)drv_regp[rn].pci_size_low +
2488 			    ((uint64_t)drv_regp[rn].pci_size_hi << 32))) {
2489 				tgt_err->tgt_dip = dip;
2490 				kmem_free(drv_regp, reglen);
2491 				return (DDI_WALK_TERMINATE);
2492 			}
2493 		}
2494 		kmem_free(drv_regp, reglen);
2495 	}
2496 	return (DDI_WALK_CONTINUE);
2497 }
2498 
2499 /*
2500  * impl_fix_ranges - fixes the config space entry of the "ranges"
2501  * property on psycho+ platforms.  (if changing this function please make sure
2502  * to change the pci_fix_ranges function in pcipsy.c)
2503  */
2504 /*ARGSUSED*/
2505 static void
2506 pci_fix_ranges(dev_info_t *dip, pci_ranges_t *pci_ranges, int nrange)
2507 {
2508 #if defined(__sparc)
2509 	char *name = ddi_binding_name(dip);
2510 
2511 	if ((strcmp(name, "pci108e,8000") == 0) ||
2512 	    (strcmp(name, "pci108e,a000") == 0) ||
2513 	    (strcmp(name, "pci108e,a001") == 0)) {
2514 		int i;
2515 		for (i = 0; i < nrange; i++, pci_ranges++)
2516 			if ((pci_ranges->child_high & PCI_REG_ADDR_M) ==
2517 			    PCI_ADDR_CONFIG)
2518 				pci_ranges->parent_low |=
2519 				    pci_ranges->child_high;
2520 	}
2521 #endif
2522 }
2523 
2524 static int
2525 pci_check_ranges(dev_info_t *dip, void *arg)
2526 {
2527 	uint64_t range_parent_begin;
2528 	uint64_t range_parent_size;
2529 	uint64_t range_parent_end;
2530 	uint32_t space_type;
2531 	uint32_t bus_num;
2532 	uint32_t range_offset;
2533 	pci_ranges_t *pci_ranges, *rangep;
2534 	pci_bus_range_t *pci_bus_rangep;
2535 	int pci_ranges_length;
2536 	int nrange;
2537 	pci_target_err_t *tgt_err = (pci_target_err_t *)arg;
2538 	int i, size;
2539 	if (strcmp(ddi_node_name(dip), "pci") != 0 &&
2540 	    strcmp(ddi_node_name(dip), "pciex") != 0)
2541 		return (DDI_WALK_CONTINUE);
2542 
2543 	/*
2544 	 * Get the ranges property. Note we only look at the top level pci
2545 	 * node (hostbridge) which has a ranges property of type pci_ranges_t
2546 	 * not at pci-pci bridges.
2547 	 */
2548 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "ranges",
2549 	    (caddr_t)&pci_ranges, &pci_ranges_length) != DDI_SUCCESS) {
2550 		/*
2551 		 * no ranges property - no translation needed
2552 		 */
2553 		tgt_err->tgt_pci_addr = tgt_err->tgt_err_addr;
2554 		tgt_err->tgt_pci_space = TGT_PCI_SPACE_UNKNOWN;
2555 		if (panicstr)
2556 			(void) pci_fm_walk_devs(ddi_get_child(dip),
2557 			    pci_check_regs, (void *)tgt_err);
2558 		else {
2559 			int circ = 0;
2560 			ndi_devi_enter(dip, &circ);
2561 			ddi_walk_devs(ddi_get_child(dip), pci_check_regs,
2562 			    (void *)tgt_err);
2563 			ndi_devi_exit(dip, circ);
2564 		}
2565 		if (tgt_err->tgt_dip != NULL)
2566 			return (DDI_WALK_TERMINATE);
2567 		return (DDI_WALK_PRUNECHILD);
2568 	}
2569 	nrange = pci_ranges_length / sizeof (pci_ranges_t);
2570 	rangep = pci_ranges;
2571 
2572 	/* Need to fix the pci ranges property for psycho based systems */
2573 	pci_fix_ranges(dip, pci_ranges, nrange);
2574 
2575 	for (i = 0; i < nrange; i++, rangep++) {
2576 		range_parent_begin = ((uint64_t)rangep->parent_high << 32) +
2577 		    rangep->parent_low;
2578 		range_parent_size = ((uint64_t)rangep->size_high << 32) +
2579 		    rangep->size_low;
2580 		range_parent_end = range_parent_begin + range_parent_size - 1;
2581 
2582 		if ((tgt_err->tgt_err_addr < range_parent_begin) ||
2583 		    (tgt_err->tgt_err_addr > range_parent_end)) {
2584 			/* Not in range */
2585 			continue;
2586 		}
2587 		space_type = PCI_REG_ADDR_G(rangep->child_high);
2588 		if (space_type == PCI_REG_ADDR_G(PCI_ADDR_CONFIG)) {
2589 			/* Config space address - check bus range */
2590 			range_offset = tgt_err->tgt_err_addr -
2591 			    range_parent_begin;
2592 			bus_num = PCI_REG_BUS_G(range_offset);
2593 			if (ddi_getlongprop(DDI_DEV_T_ANY, dip,
2594 			    DDI_PROP_DONTPASS, "bus-range",
2595 			    (caddr_t)&pci_bus_rangep, &size) != DDI_SUCCESS) {
2596 				continue;
2597 			}
2598 			if ((bus_num < pci_bus_rangep->lo) ||
2599 			    (bus_num > pci_bus_rangep->hi)) {
2600 				/*
2601 				 * Bus number not appropriate for this
2602 				 * pci nexus.
2603 				 */
2604 				kmem_free(pci_bus_rangep, size);
2605 				continue;
2606 			}
2607 			kmem_free(pci_bus_rangep, size);
2608 		}
2609 
2610 		/* We have a match if we get here - compute pci address */
2611 		tgt_err->tgt_pci_addr = tgt_err->tgt_err_addr -
2612 		    range_parent_begin;
2613 		tgt_err->tgt_pci_addr += (((uint64_t)rangep->child_mid << 32) +
2614 		    rangep->child_low);
2615 		tgt_err->tgt_pci_space = space_type;
2616 		if (panicstr)
2617 			(void) pci_fm_walk_devs(ddi_get_child(dip),
2618 			    pci_check_regs, (void *)tgt_err);
2619 		else {
2620 			int circ = 0;
2621 			ndi_devi_enter(dip, &circ);
2622 			ddi_walk_devs(ddi_get_child(dip), pci_check_regs,
2623 			    (void *)tgt_err);
2624 			ndi_devi_exit(dip, circ);
2625 		}
2626 		if (tgt_err->tgt_dip != NULL) {
2627 			kmem_free(pci_ranges, pci_ranges_length);
2628 			return (DDI_WALK_TERMINATE);
2629 		}
2630 	}
2631 	kmem_free(pci_ranges, pci_ranges_length);
2632 	return (DDI_WALK_PRUNECHILD);
2633 }
2634 
2635 /*
2636  * Function used to drain pci_target_queue, either during panic or after softint
2637  * is generated, to generate target device ereports based on captured physical
2638  * addresses
2639  */
2640 /*ARGSUSED*/
2641 static void
2642 pci_target_drain(void *private_p, pci_target_err_t *tgt_err)
2643 {
2644 	char buf[FM_MAX_CLASS];
2645 
2646 	/*
2647 	 * The following assumes that all pci_pci bridge devices
2648 	 * are configured as transparant. Find the top-level pci
2649 	 * nexus which has tgt_err_addr in one of its ranges, converting this
2650 	 * to a pci address in the process. Then starting at this node do
2651 	 * another tree walk to find a device with the pci address we've
2652 	 * found within range of one of it's assigned-addresses properties.
2653 	 */
2654 	tgt_err->tgt_dip = NULL;
2655 	if (panicstr)
2656 		(void) pci_fm_walk_devs(ddi_root_node(), pci_check_ranges,
2657 		    (void *)tgt_err);
2658 	else
2659 		ddi_walk_devs(ddi_root_node(), pci_check_ranges,
2660 		    (void *)tgt_err);
2661 	if (tgt_err->tgt_dip == NULL)
2662 		return;
2663 
2664 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", tgt_err->tgt_bridge_type,
2665 	    tgt_err->tgt_err_class);
2666 	pci_fm_ereport_post(tgt_err->tgt_dip, buf, tgt_err->tgt_err_ena, 0,
2667 	    PCI_PA, DATA_TYPE_UINT64, tgt_err->tgt_err_addr, NULL);
2668 }
2669 
2670 void
2671 pci_target_enqueue(uint64_t ena, char *class, char *bridge_type, uint64_t addr)
2672 {
2673 	pci_target_err_t tgt_err;
2674 
2675 	tgt_err.tgt_err_ena = ena;
2676 	tgt_err.tgt_err_class = class;
2677 	tgt_err.tgt_bridge_type = bridge_type;
2678 	tgt_err.tgt_err_addr = addr;
2679 	errorq_dispatch(pci_target_queue, (void *)&tgt_err,
2680 	    sizeof (pci_target_err_t), ERRORQ_ASYNC);
2681 }
2682 
2683 void
2684 pci_targetq_init(void)
2685 {
2686 	/*
2687 	 * PCI target errorq, to schedule async handling of generation of
2688 	 * target device ereports based on captured physical address.
2689 	 * The errorq is created here but destroyed when _fini is called
2690 	 * for the pci module.
2691 	 */
2692 	if (pci_target_queue == NULL) {
2693 		pci_target_queue = errorq_create("pci_target_queue",
2694 		    (errorq_func_t)pci_target_drain, (void *)NULL,
2695 		    TARGET_MAX_ERRS, sizeof (pci_target_err_t), FM_ERR_PIL,
2696 		    ERRORQ_VITAL);
2697 		if (pci_target_queue == NULL)
2698 			panic("failed to create required system error queue");
2699 	}
2700 }
2701