xref: /titanic_51/usr/src/uts/common/os/pcifm.c (revision 14ea4bb737263733ad80a36b4f73f681c30a6b45)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/sunndi.h>
31 #include <sys/sysmacros.h>
32 #include <sys/ddifm_impl.h>
33 #include <sys/fm/util.h>
34 #include <sys/fm/protocol.h>
35 #include <sys/fm/io/pci.h>
36 #include <sys/fm/io/ddi.h>
37 #include <sys/pci.h>
38 #include <sys/pcie.h>
39 #include <sys/pci_impl.h>
40 #include <sys/epm.h>
41 #include <sys/pcifm.h>
42 
43 #define	PCIX_ECC_VER_CHECK(x)	(((x) == PCI_PCIX_VER_1) ||\
44 				((x) == PCI_PCIX_VER_2))
45 
46 /*
47  * Expected PCI Express error mask values
48  */
49 uint32_t pcie_expected_ce_mask = 0x0;
50 uint32_t pcie_expected_ue_mask = PCIE_AER_UCE_UC;
51 #if defined(__sparc)
52 uint32_t pcie_expected_sue_mask = 0x0;
53 #else
54 uint32_t pcie_expected_sue_mask = PCIE_AER_SUCE_RCVD_MA;
55 #endif
56 uint32_t pcie_aer_uce_log_bits = PCIE_AER_UCE_LOG_BITS;
57 #if defined(__sparc)
58 uint32_t pcie_aer_suce_log_bits = PCIE_AER_SUCE_LOG_BITS;
59 #else
60 uint32_t pcie_aer_suce_log_bits = \
61 	    PCIE_AER_SUCE_LOG_BITS & ~PCIE_AER_SUCE_RCVD_MA;
62 #endif
63 
64 errorq_t *pci_target_queue = NULL;
65 
66 pci_fm_err_t pci_err_tbl[] = {
67 	PCI_DET_PERR,	PCI_STAT_PERROR,	NULL,		DDI_FM_UNKNOWN,
68 	PCI_MDPE,	PCI_STAT_S_PERROR,	PCI_TARG_MDPE,	DDI_FM_UNKNOWN,
69 	PCI_SIG_SERR,	PCI_STAT_S_SYSERR,	NULL,		DDI_FM_FATAL,
70 	PCI_MA,		PCI_STAT_R_MAST_AB,	PCI_TARG_MA,	DDI_FM_UNKNOWN,
71 	PCI_REC_TA,	PCI_STAT_R_TARG_AB,	PCI_TARG_REC_TA, DDI_FM_UNKNOWN,
72 	PCI_SIG_TA,	PCI_STAT_S_TARG_AB,	NULL,		DDI_FM_UNKNOWN,
73 	NULL, NULL, NULL, NULL,
74 };
75 
76 pci_fm_err_t pci_bdg_err_tbl[] = {
77 	PCI_DET_PERR,	PCI_STAT_PERROR,	NULL,		DDI_FM_UNKNOWN,
78 	PCI_MDPE,	PCI_STAT_S_PERROR,	PCI_TARG_MDPE,	DDI_FM_UNKNOWN,
79 	PCI_REC_SERR,	PCI_STAT_S_SYSERR,	NULL,		DDI_FM_UNKNOWN,
80 #if defined(__sparc)
81 	PCI_MA,		PCI_STAT_R_MAST_AB,	PCI_TARG_MA,	DDI_FM_UNKNOWN,
82 #endif
83 	PCI_REC_TA,	PCI_STAT_R_TARG_AB,	PCI_TARG_REC_TA, DDI_FM_UNKNOWN,
84 	PCI_SIG_TA,	PCI_STAT_S_TARG_AB,	NULL,		DDI_FM_UNKNOWN,
85 	NULL, NULL, NULL, NULL,
86 };
87 
88 static pci_fm_err_t pciex_ce_err_tbl[] = {
89 	PCIEX_RE,	PCIE_AER_CE_RECEIVER_ERR,	NULL,	DDI_FM_OK,
90 	PCIEX_RNR,	PCIE_AER_CE_REPLAY_ROLLOVER,	NULL,	DDI_FM_OK,
91 	PCIEX_RTO,	PCIE_AER_CE_REPLAY_TO,		NULL,	DDI_FM_OK,
92 	PCIEX_BDP,	PCIE_AER_CE_BAD_DLLP,		NULL,	DDI_FM_OK,
93 	PCIEX_BTP,	PCIE_AER_CE_BAD_TLP,		NULL,	DDI_FM_OK,
94 	PCIEX_ANFE,	PCIE_AER_CE_AD_NFE,		NULL,	DDI_FM_OK,
95 	NULL, NULL, NULL, NULL,
96 };
97 
98 static pci_fm_err_t pciex_ue_err_tbl[] = {
99 	PCIEX_TE,	PCIE_AER_UCE_TRAINING,		NULL,	DDI_FM_FATAL,
100 	PCIEX_DLP,	PCIE_AER_UCE_DLP,		NULL,	DDI_FM_FATAL,
101 	PCIEX_SD,	PCIE_AER_UCE_SD,		NULL,   DDI_FM_FATAL,
102 	PCIEX_ROF,	PCIE_AER_UCE_RO,		NULL,	DDI_FM_FATAL,
103 	PCIEX_FCP,	PCIE_AER_UCE_FCP,		NULL,	DDI_FM_FATAL,
104 	PCIEX_MFP,	PCIE_AER_UCE_MTLP,		NULL,	DDI_FM_FATAL,
105 	PCIEX_CTO,	PCIE_AER_UCE_TO,		NULL,	DDI_FM_UNKNOWN,
106 	PCIEX_UC,	PCIE_AER_UCE_UC,		NULL,	DDI_FM_OK,
107 	PCIEX_ECRC,	PCIE_AER_UCE_ECRC,		NULL,	DDI_FM_UNKNOWN,
108 	PCIEX_CA,	PCIE_AER_UCE_CA,		NULL,	DDI_FM_UNKNOWN,
109 	PCIEX_UR,	PCIE_AER_UCE_UR,		NULL,	DDI_FM_UNKNOWN,
110 	PCIEX_POIS,	PCIE_AER_UCE_PTLP,		NULL,	DDI_FM_UNKNOWN,
111 	NULL, NULL, NULL, NULL,
112 };
113 
114 static pci_fm_err_t pcie_sue_err_tbl[] = {
115 	PCIEX_S_TA_SC,	PCIE_AER_SUCE_TA_ON_SC,		NULL,	DDI_FM_UNKNOWN,
116 	PCIEX_S_MA_SC,	PCIE_AER_SUCE_MA_ON_SC,		NULL,	DDI_FM_UNKNOWN,
117 	PCIEX_S_RTA,	PCIE_AER_SUCE_RCVD_TA,		NULL,	DDI_FM_UNKNOWN,
118 #if defined(__sparc)
119 	PCIEX_S_RMA,	PCIE_AER_SUCE_RCVD_MA,		NULL,	DDI_FM_UNKNOWN,
120 #endif
121 	PCIEX_S_USC,	PCIE_AER_SUCE_USC_ERR,		NULL,	DDI_FM_UNKNOWN,
122 	PCIEX_S_USCMD,	PCIE_AER_SUCE_USC_MSG_DATA_ERR,	NULL,	DDI_FM_FATAL,
123 	PCIEX_S_UDE,	PCIE_AER_SUCE_UC_DATA_ERR,	NULL,	DDI_FM_UNKNOWN,
124 	PCIEX_S_UAT,	PCIE_AER_SUCE_UC_ATTR_ERR,	NULL,	DDI_FM_FATAL,
125 	PCIEX_S_UADR,	PCIE_AER_SUCE_UC_ADDR_ERR,	NULL,	DDI_FM_FATAL,
126 	PCIEX_S_TEX,	PCIE_AER_SUCE_TIMER_EXPIRED,	NULL,	DDI_FM_FATAL,
127 	PCIEX_S_PERR,	PCIE_AER_SUCE_PERR_ASSERT,	NULL,	DDI_FM_UNKNOWN,
128 	PCIEX_S_SERR,	PCIE_AER_SUCE_SERR_ASSERT,	NULL,	DDI_FM_FATAL,
129 	PCIEX_INTERR,	PCIE_AER_SUCE_INTERNAL_ERR,	NULL,	DDI_FM_FATAL,
130 	NULL, NULL, NULL, NULL,
131 };
132 
133 static pci_fm_err_t pcix_err_tbl[] = {
134 	PCIX_SPL_DIS,		PCI_PCIX_SPL_DSCD,	NULL,	DDI_FM_UNKNOWN,
135 	PCIX_UNEX_SPL,		PCI_PCIX_UNEX_SPL,	NULL,	DDI_FM_UNKNOWN,
136 	PCIX_RX_SPL_MSG,	PCI_PCIX_RX_SPL_MSG,	NULL,   DDI_FM_UNKNOWN,
137 	NULL, NULL, NULL, NULL,
138 };
139 
140 static pci_fm_err_t pcix_sec_err_tbl[] = {
141 	PCIX_SPL_DIS,		PCI_PCIX_BSS_SPL_DSCD,	NULL,	DDI_FM_UNKNOWN,
142 	PCIX_UNEX_SPL,		PCI_PCIX_BSS_UNEX_SPL,	NULL,	DDI_FM_UNKNOWN,
143 	PCIX_BSS_SPL_OR,	PCI_PCIX_BSS_SPL_OR,	NULL,	DDI_FM_OK,
144 	PCIX_BSS_SPL_DLY,	PCI_PCIX_BSS_SPL_DLY,	NULL,	DDI_FM_OK,
145 	NULL, NULL, NULL, NULL,
146 };
147 
148 static pci_fm_err_t pciex_nadv_err_tbl[] = {
149 	PCIEX_UR,	PCIE_DEVSTS_UR_DETECTED,	NULL,	DDI_FM_UNKNOWN,
150 	PCIEX_FAT,	PCIE_DEVSTS_FE_DETECTED,	NULL,	DDI_FM_FATAL,
151 	PCIEX_NONFAT,	PCIE_DEVSTS_NFE_DETECTED,	NULL,	DDI_FM_UNKNOWN,
152 	PCIEX_CORR,	PCIE_DEVSTS_CE_DETECTED,	NULL,	DDI_FM_OK,
153 	NULL, NULL, NULL, NULL,
154 };
155 
156 static int
157 pci_config_check(ddi_acc_handle_t handle, int fme_flag)
158 {
159 	ddi_acc_hdl_t *hp = impl_acc_hdl_get(handle);
160 	ddi_fm_error_t de;
161 
162 	if (!(DDI_FM_ACC_ERR_CAP(ddi_fm_capable(hp->ah_dip))))
163 		return (DDI_FM_OK);
164 
165 	de.fme_version = DDI_FME_VERSION;
166 
167 	ddi_fm_acc_err_get(handle, &de, de.fme_version);
168 	if (de.fme_status != DDI_FM_OK) {
169 		if (fme_flag == DDI_FM_ERR_UNEXPECTED) {
170 			char buf[FM_MAX_CLASS];
171 
172 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
173 			    PCI_ERROR_SUBCLASS, PCI_NR);
174 			ddi_fm_ereport_post(hp->ah_dip, buf, de.fme_ena,
175 			    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, NULL);
176 		}
177 		ddi_fm_acc_err_clear(handle, de.fme_version);
178 	}
179 	return (de.fme_status);
180 }
181 
182 static void
183 pcix_ecc_regs_gather(pci_erpt_t *erpt_p, pcix_ecc_regs_t *pcix_ecc_regs,
184     uint8_t pcix_cap_ptr, int fme_flag)
185 {
186 	int bdg = erpt_p->pe_dflags & PCI_BRIDGE_DEV;
187 
188 	pcix_ecc_regs->pcix_ecc_ctlstat = pci_config_get32(erpt_p->pe_hdl,
189 	    (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_STATUS :
190 	    PCI_PCIX_ECC_STATUS)));
191 	if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
192 		pcix_ecc_regs->pcix_ecc_vflags |= PCIX_ERR_ECC_STS_VALID;
193 	else
194 		return;
195 	pcix_ecc_regs->pcix_ecc_fstaddr = pci_config_get32(erpt_p->pe_hdl,
196 	    (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_FST_AD :
197 	    PCI_PCIX_ECC_FST_AD)));
198 	pcix_ecc_regs->pcix_ecc_secaddr = pci_config_get32(erpt_p->pe_hdl,
199 	    (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_SEC_AD :
200 	    PCI_PCIX_ECC_SEC_AD)));
201 	pcix_ecc_regs->pcix_ecc_attr = pci_config_get32((
202 	    ddi_acc_handle_t)erpt_p->pe_hdl,
203 	    (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_ATTR : PCI_PCIX_ECC_ATTR)));
204 }
205 
206 static void
207 pcix_regs_gather(pci_erpt_t *erpt_p, void *pe_regs, int fme_flag)
208 {
209 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
210 		pcix_bdg_error_regs_t *pcix_bdg_regs =
211 		    (pcix_bdg_error_regs_t *)pe_regs;
212 		uint8_t pcix_bdg_cap_ptr;
213 		int i;
214 
215 		pcix_bdg_cap_ptr = pcix_bdg_regs->pcix_bdg_cap_ptr;
216 		pcix_bdg_regs->pcix_bdg_sec_stat = pci_config_get16(
217 		    erpt_p->pe_hdl, (pcix_bdg_cap_ptr + PCI_PCIX_SEC_STATUS));
218 		if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
219 			pcix_bdg_regs->pcix_bdg_vflags |=
220 			    PCIX_BDG_SEC_STATUS_VALID;
221 		else
222 			return;
223 		pcix_bdg_regs->pcix_bdg_stat = pci_config_get32(erpt_p->pe_hdl,
224 		    (pcix_bdg_cap_ptr + PCI_PCIX_BDG_STATUS));
225 		if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
226 			pcix_bdg_regs->pcix_bdg_vflags |= PCIX_BDG_STATUS_VALID;
227 		else
228 			return;
229 		if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) {
230 			pcix_ecc_regs_t *pcix_bdg_ecc_regs;
231 			/*
232 			 * PCI Express to PCI-X bridges only implement the
233 			 * secondary side of the PCI-X ECC registers, bit one is
234 			 * read-only so we make sure we do not write to it.
235 			 */
236 			if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) {
237 				pcix_bdg_ecc_regs =
238 				    pcix_bdg_regs->pcix_bdg_ecc_regs[1];
239 				pcix_ecc_regs_gather(erpt_p, pcix_bdg_ecc_regs,
240 				    pcix_bdg_cap_ptr, fme_flag);
241 			} else {
242 				for (i = 0; i < 2; i++) {
243 					pcix_bdg_ecc_regs =
244 					    pcix_bdg_regs->pcix_bdg_ecc_regs[i];
245 					pci_config_put32(erpt_p->pe_hdl,
246 					    (pcix_bdg_cap_ptr +
247 					    PCI_PCIX_BDG_ECC_STATUS), i);
248 					pcix_ecc_regs_gather(erpt_p,
249 					    pcix_bdg_ecc_regs,
250 					    pcix_bdg_cap_ptr, fme_flag);
251 				}
252 			}
253 		}
254 	} else {
255 		pcix_error_regs_t *pcix_regs = (pcix_error_regs_t *)pe_regs;
256 		uint8_t pcix_cap_ptr;
257 
258 		pcix_cap_ptr = pcix_regs->pcix_cap_ptr;
259 
260 		pcix_regs->pcix_command = pci_config_get16(erpt_p->pe_hdl,
261 		    (pcix_cap_ptr + PCI_PCIX_COMMAND));
262 		pcix_regs->pcix_status = pci_config_get32(erpt_p->pe_hdl,
263 		    (pcix_cap_ptr + PCI_PCIX_STATUS));
264 		if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
265 			pcix_regs->pcix_vflags |= PCIX_ERR_STATUS_VALID;
266 		else
267 			return;
268 		if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) {
269 			pcix_ecc_regs_t *pcix_ecc_regs =
270 			    pcix_regs->pcix_ecc_regs;
271 
272 			pcix_ecc_regs_gather(erpt_p, pcix_ecc_regs,
273 			    pcix_cap_ptr, fme_flag);
274 		}
275 	}
276 }
277 
278 static void
279 pcie_regs_gather(pci_erpt_t *erpt_p, int fme_flag)
280 {
281 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
282 	uint8_t pcie_cap_ptr;
283 	pcie_adv_error_regs_t *pcie_adv_regs;
284 	uint16_t pcie_ecap_ptr;
285 
286 	pcie_cap_ptr = pcie_regs->pcie_cap_ptr;
287 
288 	pcie_regs->pcie_err_status = pci_config_get16(erpt_p->pe_hdl,
289 	    pcie_cap_ptr + PCIE_DEVSTS);
290 	if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
291 		pcie_regs->pcie_vflags |= PCIE_ERR_STATUS_VALID;
292 	else
293 		return;
294 
295 	pcie_regs->pcie_err_ctl = pci_config_get16(erpt_p->pe_hdl,
296 	    (pcie_cap_ptr + PCIE_DEVCTL));
297 	pcie_regs->pcie_dev_cap = pci_config_get16(erpt_p->pe_hdl,
298 	    (pcie_cap_ptr + PCIE_DEVCAP));
299 
300 	if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV) && (erpt_p->pe_dflags &
301 	    PCIX_DEV))
302 		pcix_regs_gather(erpt_p, pcie_regs->pcix_bdg_regs, fme_flag);
303 
304 	if (erpt_p->pe_dflags & PCIEX_RC_DEV) {
305 		pcie_rc_error_regs_t *pcie_rc_regs = pcie_regs->pcie_rc_regs;
306 
307 		pcie_rc_regs->pcie_rc_status = pci_config_get32(erpt_p->pe_hdl,
308 		    (pcie_cap_ptr + PCIE_ROOTSTS));
309 		pcie_rc_regs->pcie_rc_ctl = pci_config_get16(erpt_p->pe_hdl,
310 		    (pcie_cap_ptr + PCIE_ROOTCTL));
311 	}
312 
313 	if (!(erpt_p->pe_dflags & PCIEX_ADV_DEV))
314 		return;
315 
316 	pcie_adv_regs = pcie_regs->pcie_adv_regs;
317 
318 	pcie_ecap_ptr = pcie_adv_regs->pcie_adv_cap_ptr;
319 
320 	pcie_adv_regs->pcie_ue_status = pci_config_get32(erpt_p->pe_hdl,
321 	    pcie_ecap_ptr + PCIE_AER_UCE_STS);
322 	if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
323 		pcie_adv_regs->pcie_adv_vflags |= PCIE_UE_STATUS_VALID;
324 
325 	pcie_adv_regs->pcie_ue_mask = pci_config_get32(erpt_p->pe_hdl,
326 	    pcie_ecap_ptr + PCIE_AER_UCE_MASK);
327 	pcie_adv_regs->pcie_ue_sev = pci_config_get32(erpt_p->pe_hdl,
328 	    pcie_ecap_ptr + PCIE_AER_UCE_SERV);
329 	pcie_adv_regs->pcie_adv_ctl = pci_config_get32(erpt_p->pe_hdl,
330 	    pcie_ecap_ptr + PCIE_AER_CTL);
331 	pcie_adv_regs->pcie_ue_hdr0 = pci_config_get32(erpt_p->pe_hdl,
332 	    pcie_ecap_ptr + PCIE_AER_HDR_LOG);
333 	if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK) {
334 		int i;
335 		pcie_adv_regs->pcie_adv_vflags |= PCIE_UE_HDR_VALID;
336 
337 		for (i = 0; i < 3; i++) {
338 			pcie_adv_regs->pcie_ue_hdr[i] = pci_config_get32(
339 			    erpt_p->pe_hdl, pcie_ecap_ptr + PCIE_AER_HDR_LOG +
340 			    (4 * (i + 1)));
341 		}
342 	}
343 
344 	pcie_adv_regs->pcie_ce_status = pci_config_get32(erpt_p->pe_hdl,
345 	    pcie_ecap_ptr + PCIE_AER_CE_STS);
346 	if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
347 		pcie_adv_regs->pcie_adv_vflags |= PCIE_CE_STATUS_VALID;
348 
349 	pcie_adv_regs->pcie_ce_mask = pci_config_get32(erpt_p->pe_hdl,
350 	    pcie_ecap_ptr + PCIE_AER_CE_MASK);
351 
352 	/*
353 	 * If pci express to pci bridge then grab the bridge
354 	 * error registers.
355 	 */
356 	if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) {
357 		pcie_adv_bdg_error_regs_t *pcie_bdg_regs =
358 		    pcie_adv_regs->pcie_adv_bdg_regs;
359 
360 		pcie_bdg_regs->pcie_sue_status =
361 		    pci_config_get32(erpt_p->pe_hdl,
362 		    pcie_ecap_ptr + PCIE_AER_SUCE_STS);
363 		pcie_bdg_regs->pcie_sue_mask =
364 		    pci_config_get32(erpt_p->pe_hdl,
365 		    pcie_ecap_ptr + PCIE_AER_SUCE_MASK);
366 		if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
367 			pcie_adv_regs->pcie_adv_vflags |= PCIE_SUE_STATUS_VALID;
368 		pcie_bdg_regs->pcie_sue_hdr0 = pci_config_get32(erpt_p->pe_hdl,
369 		    (pcie_ecap_ptr + PCIE_AER_SHDR_LOG));
370 
371 		if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK) {
372 			int i;
373 
374 			pcie_adv_regs->pcie_adv_vflags |= PCIE_SUE_HDR_VALID;
375 
376 			for (i = 0; i < 3; i++) {
377 				pcie_bdg_regs->pcie_sue_hdr[i] =
378 				    pci_config_get32(erpt_p->pe_hdl,
379 					pcie_ecap_ptr + PCIE_AER_SHDR_LOG +
380 					(4 * (i + 1)));
381 			}
382 		}
383 	}
384 	/*
385 	 * If PCI Express root complex then grab the root complex
386 	 * error registers.
387 	 */
388 	if (erpt_p->pe_dflags & PCIEX_RC_DEV) {
389 		pcie_adv_rc_error_regs_t *pcie_rc_regs =
390 		    pcie_adv_regs->pcie_adv_rc_regs;
391 
392 		pcie_rc_regs->pcie_rc_err_cmd = pci_config_get32(erpt_p->pe_hdl,
393 		    (pcie_ecap_ptr + PCIE_AER_RE_CMD));
394 		pcie_rc_regs->pcie_rc_err_status =
395 		    pci_config_get32(erpt_p->pe_hdl,
396 			(pcie_ecap_ptr + PCIE_AER_RE_STS));
397 		if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
398 			pcie_adv_regs->pcie_adv_vflags |=
399 			    PCIE_RC_ERR_STATUS_VALID;
400 		pcie_rc_regs->pcie_rc_ce_src_id =
401 		    pci_config_get16(erpt_p->pe_hdl,
402 			(pcie_ecap_ptr + PCIE_AER_CE_SRC_ID));
403 		pcie_rc_regs->pcie_rc_ue_src_id =
404 		    pci_config_get16(erpt_p->pe_hdl,
405 			(pcie_ecap_ptr + PCIE_AER_ERR_SRC_ID));
406 		if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
407 			pcie_adv_regs->pcie_adv_vflags |= PCIE_SRC_ID_VALID;
408 	}
409 }
410 
411 /*ARGSUSED*/
412 static void
413 pci_regs_gather(dev_info_t *dip, pci_erpt_t *erpt_p, int fme_flag)
414 {
415 	pci_error_regs_t *pci_regs = erpt_p->pe_pci_regs;
416 
417 	/*
418 	 * Start by reading all the error registers that are available for
419 	 * pci and pci express and for leaf devices and bridges/switches
420 	 */
421 	pci_regs->pci_err_status = pci_config_get16(erpt_p->pe_hdl,
422 	    PCI_CONF_STAT);
423 	if (pci_config_check(erpt_p->pe_hdl, fme_flag) != DDI_FM_OK)
424 		return;
425 	pci_regs->pci_vflags |= PCI_ERR_STATUS_VALID;
426 	pci_regs->pci_cfg_comm = pci_config_get16(erpt_p->pe_hdl,
427 	    PCI_CONF_COMM);
428 	if (pci_config_check(erpt_p->pe_hdl, fme_flag) != DDI_FM_OK)
429 		return;
430 
431 	/*
432 	 * If pci-pci bridge grab PCI bridge specific error registers.
433 	 */
434 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
435 		pci_regs->pci_bdg_regs->pci_bdg_sec_stat =
436 		    pci_config_get16(erpt_p->pe_hdl, PCI_BCNF_SEC_STATUS);
437 		if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
438 			pci_regs->pci_bdg_regs->pci_bdg_vflags |=
439 			    PCI_BDG_SEC_STAT_VALID;
440 		pci_regs->pci_bdg_regs->pci_bdg_ctrl =
441 		    pci_config_get16(erpt_p->pe_hdl, PCI_BCNF_BCNTRL);
442 		if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
443 			pci_regs->pci_bdg_regs->pci_bdg_vflags |=
444 			    PCI_BDG_CTRL_VALID;
445 	}
446 
447 	/*
448 	 * If pci express device grab pci express error registers and
449 	 * check for advanced error reporting features and grab them if
450 	 * available.
451 	 */
452 	if (erpt_p->pe_dflags & PCIEX_DEV)
453 		pcie_regs_gather(erpt_p, fme_flag);
454 	else if (erpt_p->pe_dflags & PCIX_DEV)
455 		pcix_regs_gather(erpt_p, erpt_p->pe_regs, fme_flag);
456 
457 }
458 
459 static void
460 pcix_regs_clear(pci_erpt_t *erpt_p, void *pe_regs)
461 {
462 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
463 		pcix_bdg_error_regs_t *pcix_bdg_regs =
464 		    (pcix_bdg_error_regs_t *)pe_regs;
465 		uint8_t pcix_bdg_cap_ptr;
466 		int i;
467 
468 		pcix_bdg_cap_ptr = pcix_bdg_regs->pcix_bdg_cap_ptr;
469 
470 		if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_SEC_STATUS_VALID)
471 			pci_config_put16(erpt_p->pe_hdl,
472 			    (pcix_bdg_cap_ptr + PCI_PCIX_SEC_STATUS),
473 			    pcix_bdg_regs->pcix_bdg_sec_stat);
474 
475 		if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_STATUS_VALID)
476 			pci_config_put32(erpt_p->pe_hdl,
477 			    (pcix_bdg_cap_ptr + PCI_PCIX_BDG_STATUS),
478 			    pcix_bdg_regs->pcix_bdg_stat);
479 
480 		pcix_bdg_regs->pcix_bdg_vflags = 0x0;
481 
482 		if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) {
483 			pcix_ecc_regs_t *pcix_bdg_ecc_regs;
484 			/*
485 			 * PCI Express to PCI-X bridges only implement the
486 			 * secondary side of the PCI-X ECC registers, bit one is
487 			 * read-only so we make sure we do not write to it.
488 			 */
489 			if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) {
490 				pcix_bdg_ecc_regs =
491 				    pcix_bdg_regs->pcix_bdg_ecc_regs[1];
492 
493 				if (pcix_bdg_ecc_regs->pcix_ecc_vflags &
494 				    PCIX_ERR_ECC_STS_VALID) {
495 
496 					pci_config_put32(erpt_p->pe_hdl,
497 					    (pcix_bdg_cap_ptr +
498 					    PCI_PCIX_BDG_ECC_STATUS),
499 					    pcix_bdg_ecc_regs->
500 					    pcix_ecc_ctlstat);
501 				}
502 				pcix_bdg_ecc_regs->pcix_ecc_vflags = 0x0;
503 			} else {
504 				for (i = 0; i < 2; i++) {
505 					pcix_bdg_ecc_regs =
506 					    pcix_bdg_regs->pcix_bdg_ecc_regs[i];
507 
508 
509 					if (pcix_bdg_ecc_regs->pcix_ecc_vflags &
510 					    PCIX_ERR_ECC_STS_VALID) {
511 						pci_config_put32(erpt_p->pe_hdl,
512 						    (pcix_bdg_cap_ptr +
513 						    PCI_PCIX_BDG_ECC_STATUS),
514 						    i);
515 
516 						pci_config_put32(erpt_p->pe_hdl,
517 						    (pcix_bdg_cap_ptr +
518 						    PCI_PCIX_BDG_ECC_STATUS),
519 						    pcix_bdg_ecc_regs->
520 						    pcix_ecc_ctlstat);
521 					}
522 					pcix_bdg_ecc_regs->pcix_ecc_vflags =
523 					    0x0;
524 				}
525 			}
526 		}
527 	} else {
528 		pcix_error_regs_t *pcix_regs = (pcix_error_regs_t *)pe_regs;
529 		uint8_t pcix_cap_ptr;
530 
531 		pcix_cap_ptr = pcix_regs->pcix_cap_ptr;
532 
533 		if (pcix_regs->pcix_vflags & PCIX_ERR_STATUS_VALID)
534 			pci_config_put32(erpt_p->pe_hdl,
535 			    (pcix_cap_ptr + PCI_PCIX_STATUS),
536 			    pcix_regs->pcix_status);
537 
538 		pcix_regs->pcix_vflags = 0x0;
539 
540 		if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) {
541 			pcix_ecc_regs_t *pcix_ecc_regs =
542 			    pcix_regs->pcix_ecc_regs;
543 
544 			if (pcix_ecc_regs->pcix_ecc_vflags &
545 			    PCIX_ERR_ECC_STS_VALID)
546 				pci_config_put32(erpt_p->pe_hdl,
547 				    (pcix_cap_ptr + PCI_PCIX_ECC_STATUS),
548 				    pcix_ecc_regs->pcix_ecc_ctlstat);
549 
550 			pcix_ecc_regs->pcix_ecc_vflags = 0x0;
551 		}
552 	}
553 }
554 
555 static void
556 pcie_regs_clear(pci_erpt_t *erpt_p)
557 {
558 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
559 	uint8_t pcie_cap_ptr;
560 	pcie_adv_error_regs_t *pcie_adv_regs;
561 	uint16_t pcie_ecap_ptr;
562 
563 	pcie_cap_ptr = pcie_regs->pcie_cap_ptr;
564 
565 	if (pcie_regs->pcie_vflags & PCIE_ERR_STATUS_VALID)
566 		pci_config_put16(erpt_p->pe_hdl, pcie_cap_ptr + PCIE_DEVSTS,
567 		    pcie_regs->pcie_err_status);
568 
569 	pcie_regs->pcie_vflags = 0x0;
570 
571 	if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV) &&
572 	    (erpt_p->pe_dflags & PCIX_DEV))
573 		pcix_regs_clear(erpt_p, pcie_regs->pcix_bdg_regs);
574 
575 	if (!(erpt_p->pe_dflags & PCIEX_ADV_DEV))
576 		return;
577 
578 	pcie_adv_regs = pcie_regs->pcie_adv_regs;
579 
580 	pcie_ecap_ptr = pcie_adv_regs->pcie_adv_cap_ptr;
581 
582 	if (pcie_adv_regs->pcie_adv_vflags & PCIE_UE_STATUS_VALID)
583 		pci_config_put32(erpt_p->pe_hdl,
584 		    pcie_ecap_ptr + PCIE_AER_UCE_STS,
585 		    pcie_adv_regs->pcie_ue_status);
586 
587 	if (pcie_adv_regs->pcie_adv_vflags & PCIE_CE_STATUS_VALID)
588 		pci_config_put32(erpt_p->pe_hdl,
589 		    pcie_ecap_ptr + PCIE_AER_CE_STS,
590 		    pcie_adv_regs->pcie_ce_status);
591 
592 
593 	if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) {
594 		pcie_adv_bdg_error_regs_t *pcie_bdg_regs =
595 		    pcie_adv_regs->pcie_adv_bdg_regs;
596 
597 
598 		if (pcie_adv_regs->pcie_adv_vflags & PCIE_SUE_STATUS_VALID)
599 			pci_config_put32(erpt_p->pe_hdl,
600 			    pcie_ecap_ptr + PCIE_AER_SUCE_STS,
601 			    pcie_bdg_regs->pcie_sue_status);
602 	}
603 	/*
604 	 * If PCI Express root complex then clear the root complex
605 	 * error registers.
606 	 */
607 	if (erpt_p->pe_dflags & PCIEX_RC_DEV) {
608 		pcie_adv_rc_error_regs_t *pcie_rc_regs =
609 		    pcie_adv_regs->pcie_adv_rc_regs;
610 
611 
612 		if (pcie_adv_regs->pcie_adv_vflags & PCIE_RC_ERR_STATUS_VALID)
613 			pci_config_put32(erpt_p->pe_hdl,
614 			    (pcie_ecap_ptr + PCIE_AER_RE_STS),
615 			    pcie_rc_regs->pcie_rc_err_status);
616 	}
617 	pcie_adv_regs->pcie_adv_vflags = 0x0;
618 }
619 
620 static void
621 pci_regs_clear(pci_erpt_t *erpt_p)
622 {
623 	/*
624 	 * Finally clear the error bits
625 	 */
626 	if (erpt_p->pe_dflags & PCIEX_DEV)
627 		pcie_regs_clear(erpt_p);
628 	else if (erpt_p->pe_dflags & PCIX_DEV)
629 		pcix_regs_clear(erpt_p, erpt_p->pe_regs);
630 
631 	if (erpt_p->pe_pci_regs->pci_vflags & PCI_ERR_STATUS_VALID)
632 		pci_config_put16(erpt_p->pe_hdl, PCI_CONF_STAT,
633 		    erpt_p->pe_pci_regs->pci_err_status);
634 
635 	erpt_p->pe_pci_regs->pci_vflags = 0x0;
636 
637 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
638 		if (erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_vflags &
639 		    PCI_BDG_SEC_STAT_VALID)
640 			pci_config_put16(erpt_p->pe_hdl, PCI_BCNF_SEC_STATUS,
641 			    erpt_p->pe_pci_regs->pci_bdg_regs->
642 			    pci_bdg_sec_stat);
643 		if (erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_vflags &
644 		    PCI_BDG_CTRL_VALID)
645 			pci_config_put16(erpt_p->pe_hdl, PCI_BCNF_BCNTRL,
646 			    erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_ctrl);
647 
648 		erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_vflags = 0x0;
649 	}
650 }
651 
652 /*
653  * pcix_ereport_setup: Allocate structures for PCI-X error handling and ereport
654  * generation.
655  */
656 /* ARGSUSED */
657 static void
658 pcix_ereport_setup(dev_info_t *dip, pci_erpt_t *erpt_p)
659 {
660 	uint8_t pcix_cap_ptr;
661 	int i;
662 
663 	pcix_cap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
664 	    "pcix-capid-pointer", PCI_CAP_NEXT_PTR_NULL);
665 
666 	if (pcix_cap_ptr != PCI_CAP_NEXT_PTR_NULL)
667 		erpt_p->pe_dflags |= PCIX_DEV;
668 	else
669 		return;
670 
671 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
672 		pcix_bdg_error_regs_t *pcix_bdg_regs;
673 
674 		erpt_p->pe_regs = kmem_zalloc(sizeof (pcix_bdg_error_regs_t),
675 		    KM_SLEEP);
676 		pcix_bdg_regs = (pcix_bdg_error_regs_t *)erpt_p->pe_regs;
677 		pcix_bdg_regs->pcix_bdg_cap_ptr = pcix_cap_ptr;
678 		pcix_bdg_regs->pcix_bdg_ver = pci_config_get16(erpt_p->pe_hdl,
679 		    pcix_cap_ptr + PCI_PCIX_SEC_STATUS) & PCI_PCIX_VER_MASK;
680 		if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) {
681 			for (i = 0; i < 2; i++) {
682 				pcix_bdg_regs->pcix_bdg_ecc_regs[i] =
683 				    kmem_zalloc(sizeof (pcix_ecc_regs_t),
684 					KM_SLEEP);
685 			}
686 		}
687 	} else {
688 		pcix_error_regs_t *pcix_regs;
689 
690 		erpt_p->pe_regs = kmem_zalloc(sizeof (pcix_error_regs_t),
691 		    KM_SLEEP);
692 		pcix_regs = (pcix_error_regs_t *)erpt_p->pe_regs;
693 		pcix_regs->pcix_cap_ptr = pcix_cap_ptr;
694 		pcix_regs->pcix_ver = pci_config_get16(erpt_p->pe_hdl,
695 		    pcix_cap_ptr + PCI_PCIX_COMMAND) & PCI_PCIX_VER_MASK;
696 		if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) {
697 			pcix_regs->pcix_ecc_regs = kmem_zalloc(
698 			    sizeof (pcix_ecc_regs_t), KM_SLEEP);
699 		}
700 	}
701 }
702 
703 static void
704 pcie_ereport_setup(dev_info_t *dip, pci_erpt_t *erpt_p)
705 {
706 	pcie_error_regs_t *pcie_regs;
707 	pcie_adv_error_regs_t *pcie_adv_regs;
708 	uint8_t pcix_cap_ptr;
709 	uint8_t pcie_cap_ptr;
710 	uint16_t pcie_ecap_ptr;
711 	uint16_t dev_type = 0;
712 	uint32_t mask = pcie_expected_ue_mask;
713 
714 	/*
715 	 * The following sparc specific code should be removed once the pci_cap
716 	 * interfaces create the necessary properties for us.
717 	 */
718 #if defined(__sparc)
719 	ushort_t status;
720 	uint32_t slot_cap;
721 	uint8_t cap_ptr = 0;
722 	uint8_t cap_id = 0;
723 	uint32_t hdr, hdr_next_ptr, hdr_cap_id;
724 	uint16_t offset = P2ALIGN(PCIE_EXT_CAP, 4);
725 	uint16_t aer_ptr = 0;
726 
727 	cap_ptr = pci_config_get8(erpt_p->pe_hdl, PCI_CONF_CAP_PTR);
728 	if (pci_config_check(erpt_p->pe_hdl, DDI_FM_ERR_UNEXPECTED) ==
729 	    DDI_FM_OK) {
730 		while ((cap_id = pci_config_get8(erpt_p->pe_hdl, cap_ptr)) !=
731 		    0xff) {
732 			if (cap_id == PCI_CAP_ID_PCIX) {
733 				(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
734 				    "pcix-capid-pointer", cap_ptr);
735 			}
736 		if (cap_id == PCI_CAP_ID_PCI_E) {
737 			status = pci_config_get16(erpt_p->pe_hdl, cap_ptr + 2);
738 			if (status & PCIE_PCIECAP_SLOT_IMPL) {
739 				/* offset 14h is Slot Cap Register */
740 				slot_cap = pci_config_get32(erpt_p->pe_hdl,
741 				    cap_ptr + PCIE_SLOTCAP);
742 				(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
743 				    "pcie-slotcap-reg", slot_cap);
744 			}
745 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
746 			    "pcie-capid-reg", pci_config_get16(erpt_p->pe_hdl,
747 			    cap_ptr + PCIE_PCIECAP));
748 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
749 			    "pcie-capid-pointer", cap_ptr);
750 
751 		}
752 			if ((cap_ptr = pci_config_get8(erpt_p->pe_hdl,
753 			    cap_ptr + 1)) == 0xff || cap_ptr == 0 ||
754 			    (pci_config_check(erpt_p->pe_hdl,
755 			    DDI_FM_ERR_UNEXPECTED) != DDI_FM_OK))
756 				break;
757 		}
758 	}
759 
760 #endif
761 
762 	pcix_cap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
763 	    "pcix-capid-pointer", PCI_CAP_NEXT_PTR_NULL);
764 
765 	if (pcix_cap_ptr != PCI_CAP_NEXT_PTR_NULL)
766 		erpt_p->pe_dflags |= PCIX_DEV;
767 
768 	pcie_cap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
769 	    DDI_PROP_DONTPASS, "pcie-capid-pointer", PCI_CAP_NEXT_PTR_NULL);
770 
771 	if (pcie_cap_ptr != PCI_CAP_NEXT_PTR_NULL) {
772 		erpt_p->pe_dflags |= PCIEX_DEV;
773 		erpt_p->pe_regs = kmem_zalloc(sizeof (pcie_error_regs_t),
774 		    KM_SLEEP);
775 		pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
776 		pcie_regs->pcie_cap_ptr = pcie_cap_ptr;
777 	}
778 
779 	if (!(erpt_p->pe_dflags & PCIEX_DEV))
780 		return;
781 
782 	/*
783 	 * Don't currently need to check for version here because we are
784 	 * compliant with PCIE 1.0a which is version 0 and is guaranteed
785 	 * software compatibility with future versions.  We will need to
786 	 * add errors for new detectors/features which are added in newer
787 	 * revisions [sec 7.8.2].
788 	 */
789 	pcie_regs->pcie_cap = pci_config_get16(erpt_p->pe_hdl,
790 	    pcie_regs->pcie_cap_ptr + PCIE_PCIECAP);
791 
792 	dev_type = pcie_regs->pcie_cap & PCIE_PCIECAP_DEV_TYPE_MASK;
793 
794 	if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV) &&
795 	    (erpt_p->pe_dflags & PCIX_DEV)) {
796 		int i;
797 
798 		pcie_regs->pcix_bdg_regs =
799 		    kmem_zalloc(sizeof (pcix_bdg_error_regs_t), KM_SLEEP);
800 
801 		pcie_regs->pcix_bdg_regs->pcix_bdg_cap_ptr = pcix_cap_ptr;
802 		pcie_regs->pcix_bdg_regs->pcix_bdg_ver =
803 		    pci_config_get16(erpt_p->pe_hdl,
804 			pcix_cap_ptr + PCI_PCIX_SEC_STATUS) & PCI_PCIX_VER_MASK;
805 
806 		if (PCIX_ECC_VER_CHECK(pcie_regs->pcix_bdg_regs->pcix_bdg_ver))
807 			for (i = 0; i < 2; i++)
808 				pcie_regs->pcix_bdg_regs->pcix_bdg_ecc_regs[i] =
809 				    kmem_zalloc(sizeof (pcix_ecc_regs_t),
810 					KM_SLEEP);
811 	}
812 
813 	if (dev_type == PCIE_PCIECAP_DEV_TYPE_ROOT) {
814 		erpt_p->pe_dflags |= PCIEX_RC_DEV;
815 		pcie_regs->pcie_rc_regs = kmem_zalloc(
816 		    sizeof (pcie_rc_error_regs_t), KM_SLEEP);
817 	}
818 	/*
819 	 * The following sparc specific code should be removed once the pci_cap
820 	 * interfaces create the necessary properties for us.
821 	 */
822 #if defined(__sparc)
823 
824 	hdr = pci_config_get32(erpt_p->pe_hdl, offset);
825 	hdr_next_ptr = (hdr >> PCIE_EXT_CAP_NEXT_PTR_SHIFT) &
826 	    PCIE_EXT_CAP_NEXT_PTR_MASK;
827 	hdr_cap_id = (hdr >> PCIE_EXT_CAP_ID_SHIFT) & PCIE_EXT_CAP_ID_MASK;
828 
829 	while ((hdr_next_ptr != PCIE_EXT_CAP_NEXT_PTR_NULL) &&
830 	    (hdr_cap_id != PCIE_EXT_CAP_ID_AER)) {
831 		offset = P2ALIGN(hdr_next_ptr, 4);
832 		hdr = pci_config_get32(erpt_p->pe_hdl, offset);
833 		hdr_next_ptr = (hdr >> PCIE_EXT_CAP_NEXT_PTR_SHIFT) &
834 		    PCIE_EXT_CAP_NEXT_PTR_MASK;
835 		hdr_cap_id = (hdr >> PCIE_EXT_CAP_ID_SHIFT) &
836 		    PCIE_EXT_CAP_ID_MASK;
837 	}
838 
839 	if (hdr_cap_id == PCIE_EXT_CAP_ID_AER)
840 		aer_ptr = P2ALIGN(offset, 4);
841 	if (aer_ptr != PCI_CAP_NEXT_PTR_NULL)
842 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
843 		    "pcie-aer-pointer", aer_ptr);
844 #endif
845 
846 	/*
847 	 * Find and store if this device is capable of pci express
848 	 * advanced errors, if not report an error against the device.
849 	 */
850 	pcie_ecap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
851 	    "pcie-aer-pointer", PCI_CAP_NEXT_PTR_NULL);
852 	if (pcie_ecap_ptr != PCI_CAP_NEXT_PTR_NULL) {
853 		erpt_p->pe_dflags |= PCIEX_ADV_DEV;
854 		pcie_regs->pcie_adv_regs = kmem_zalloc(
855 		    sizeof (pcie_adv_error_regs_t), KM_SLEEP);
856 		pcie_regs->pcie_adv_regs->pcie_adv_cap_ptr = pcie_ecap_ptr;
857 	}
858 
859 	if (!(erpt_p->pe_dflags & PCIEX_ADV_DEV)) {
860 		return;
861 	}
862 
863 	pcie_adv_regs = pcie_regs->pcie_adv_regs;
864 
865 	if (pcie_adv_regs == NULL)
866 		return;
867 	/*
868 	 * Initialize structures for advanced PCI Express devices.
869 	 */
870 
871 	/*
872 	 * Advanced error registers exist for PCI Express to PCI(X) Bridges and
873 	 * may also exist for PCI(X) to PCI Express Bridges, the latter is not
874 	 * well explained in the PCI Express to PCI/PCI-X Bridge Specification
875 	 * 1.0 and will be left out of the current gathering of these registers.
876 	 */
877 	if (dev_type == PCIE_PCIECAP_DEV_TYPE_PCIE2PCI) {
878 		erpt_p->pe_dflags |= PCIEX_2PCI_DEV;
879 		pcie_adv_regs->pcie_adv_bdg_regs = kmem_zalloc(
880 		    sizeof (pcie_adv_bdg_error_regs_t), KM_SLEEP);
881 	}
882 
883 	if (erpt_p->pe_dflags & PCIEX_RC_DEV)
884 		pcie_adv_regs->pcie_adv_rc_regs = kmem_zalloc(
885 		    sizeof (pcie_adv_rc_error_regs_t), KM_SLEEP);
886 
887 	/*
888 	 * Check that mask values are as expected, if not
889 	 * change them to what we desire.
890 	 */
891 	pci_regs_gather(dip, erpt_p, DDI_FM_ERR_UNEXPECTED);
892 	pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
893 	if (pcie_regs->pcie_adv_regs->pcie_ce_mask != pcie_expected_ce_mask) {
894 		pci_config_put32(erpt_p->pe_hdl,
895 		    pcie_ecap_ptr + PCIE_AER_CE_MASK, pcie_expected_ce_mask);
896 	}
897 
898 	/* Disable PTLP/ECRC (or mask these two) for Switches */
899 	if (dev_type == PCIE_PCIECAP_DEV_TYPE_UP ||
900 	    dev_type == PCIE_PCIECAP_DEV_TYPE_DOWN)
901 		mask |= PCIE_AER_UCE_PTLP | PCIE_AER_UCE_ECRC;
902 
903 	if (pcie_regs->pcie_adv_regs->pcie_ue_mask != mask) {
904 		pci_config_put32(erpt_p->pe_hdl,
905 		    pcie_ecap_ptr + PCIE_AER_UCE_MASK, mask);
906 	}
907 	if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) {
908 		if (pcie_regs->pcie_adv_regs->pcie_adv_bdg_regs->pcie_sue_mask
909 		    != pcie_expected_sue_mask) {
910 			pci_config_put32(erpt_p->pe_hdl,
911 			    pcie_ecap_ptr + PCIE_AER_SUCE_MASK,
912 			    pcie_expected_sue_mask);
913 		}
914 	}
915 }
916 
917 /*
918  * pci_ereport_setup: Detect PCI device type and initialize structures to be
919  * used to generate ereports based on detected generic device errors.
920  */
921 void
922 pci_ereport_setup(dev_info_t *dip)
923 {
924 	struct dev_info *devi = DEVI(dip);
925 	struct i_ddi_fmhdl *fmhdl = devi->devi_fmhdl;
926 	pci_erpt_t *erpt_p;
927 	uint8_t pci_hdr_type;
928 	uint16_t pci_status;
929 	pci_regspec_t *pci_rp;
930 	int32_t len;
931 	uint32_t phys_hi;
932 
933 	/*
934 	 * If device is not ereport capbable then report an error against the
935 	 * driver for using this interface,
936 	 */
937 	if (!DDI_FM_EREPORT_CAP(ddi_fm_capable(dip)) &&
938 	    !DDI_FM_ERRCB_CAP(ddi_fm_capable(dip))) {
939 		i_ddi_drv_ereport_post(dip, DVR_EFMCAP, NULL, DDI_SLEEP);
940 		return;
941 	}
942 
943 	/*
944 	 * ASSERT fmhdl exists and fh_bus_specific is NULL.
945 	 */
946 	ASSERT(fmhdl && (fmhdl->fh_bus_specific == NULL));
947 
948 	erpt_p = kmem_zalloc(sizeof (pci_erpt_t), KM_SLEEP);
949 
950 	if (pci_config_setup(dip, &erpt_p->pe_hdl) != DDI_SUCCESS)
951 		goto error;
952 
953 	erpt_p->pe_pci_regs = kmem_zalloc(sizeof (pci_error_regs_t), KM_SLEEP);
954 
955 	pci_status = pci_config_get16(erpt_p->pe_hdl, PCI_CONF_STAT);
956 	if (pci_config_check(erpt_p->pe_hdl, DDI_FM_ERR_UNEXPECTED) !=
957 	    DDI_FM_OK)
958 		goto error;
959 
960 	/*
961 	 * Get header type and record if device is a bridge.
962 	 */
963 	pci_hdr_type = pci_config_get8(erpt_p->pe_hdl, PCI_CONF_HEADER);
964 	if (pci_config_check(erpt_p->pe_hdl, DDI_FM_ERR_UNEXPECTED) !=
965 	    DDI_FM_OK)
966 		goto error;
967 
968 	/*
969 	 * Check to see if PCI device is a bridge, if so allocate pci bridge
970 	 * error register structure.
971 	 */
972 	if ((pci_hdr_type & PCI_HEADER_TYPE_M) == PCI_HEADER_PPB) {
973 		erpt_p->pe_dflags |= PCI_BRIDGE_DEV;
974 		erpt_p->pe_pci_regs->pci_bdg_regs = kmem_zalloc(
975 		    sizeof (pci_bdg_error_regs_t), KM_SLEEP);
976 	}
977 
978 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg",
979 	    (caddr_t)&pci_rp, &len) == DDI_SUCCESS) {
980 		phys_hi = pci_rp->pci_phys_hi;
981 		kmem_free(pci_rp, len);
982 
983 		erpt_p->pe_bdf = (uint16_t)(PCI_REG_BDFR_G(phys_hi) >>
984 		    PCI_REG_FUNC_SHIFT);
985 	}
986 
987 
988 	if (!(pci_status & PCI_STAT_CAP)) {
989 		goto done;
990 	}
991 
992 	/*
993 	 * Initialize structures for PCI Express and PCI-X devices.
994 	 * Order matters below and pcie_ereport_setup should preceed
995 	 * pcix_ereport_setup.
996 	 */
997 	pcie_ereport_setup(dip, erpt_p);
998 
999 	if (!(erpt_p->pe_dflags & PCIEX_DEV)) {
1000 		pcix_ereport_setup(dip, erpt_p);
1001 	}
1002 
1003 done:
1004 	pci_regs_gather(dip, erpt_p, DDI_FM_ERR_UNEXPECTED);
1005 	pci_regs_clear(erpt_p);
1006 
1007 	/*
1008 	 * Before returning set fh_bus_specific to completed pci_erpt_t
1009 	 * structure
1010 	 */
1011 	fmhdl->fh_bus_specific = (void *)erpt_p;
1012 
1013 	return;
1014 error:
1015 	if (erpt_p->pe_pci_regs)
1016 		kmem_free(erpt_p->pe_pci_regs, sizeof (pci_error_regs_t));
1017 	kmem_free(erpt_p, sizeof (pci_erpt_t));
1018 	erpt_p = NULL;
1019 }
1020 
1021 static void
1022 pcix_ereport_teardown(pci_erpt_t *erpt_p)
1023 {
1024 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
1025 		pcix_bdg_error_regs_t *pcix_bdg_regs;
1026 		uint16_t pcix_ver;
1027 
1028 		pcix_bdg_regs = (pcix_bdg_error_regs_t *)erpt_p->pe_regs;
1029 		pcix_ver = pcix_bdg_regs->pcix_bdg_ver;
1030 		if (PCIX_ECC_VER_CHECK(pcix_ver)) {
1031 			int i;
1032 			for (i = 0; i < 2; i++)
1033 				kmem_free(pcix_bdg_regs->pcix_bdg_ecc_regs[i],
1034 				    sizeof (pcix_ecc_regs_t));
1035 		}
1036 		kmem_free(erpt_p->pe_regs, sizeof (pcix_bdg_error_regs_t));
1037 	} else {
1038 		pcix_error_regs_t *pcix_regs;
1039 		uint16_t pcix_ver;
1040 
1041 		pcix_regs = (pcix_error_regs_t *)erpt_p->pe_regs;
1042 		pcix_ver = pcix_regs->pcix_ver;
1043 		if (PCIX_ECC_VER_CHECK(pcix_ver)) {
1044 			kmem_free(pcix_regs->pcix_ecc_regs,
1045 			    sizeof (pcix_ecc_regs_t));
1046 		}
1047 		kmem_free(erpt_p->pe_regs, sizeof (pcix_error_regs_t));
1048 	}
1049 }
1050 
1051 static void
1052 pcie_ereport_teardown(pci_erpt_t *erpt_p)
1053 {
1054 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
1055 
1056 	if (erpt_p->pe_dflags & PCIEX_ADV_DEV) {
1057 		pcie_adv_error_regs_t *pcie_adv = pcie_regs->pcie_adv_regs;
1058 
1059 		if (erpt_p->pe_dflags & PCIEX_2PCI_DEV)
1060 			kmem_free(pcie_adv->pcie_adv_bdg_regs,
1061 			    sizeof (pcie_adv_bdg_error_regs_t));
1062 		if (erpt_p->pe_dflags & PCIEX_RC_DEV)
1063 			kmem_free(pcie_adv->pcie_adv_rc_regs,
1064 			    sizeof (pcie_adv_rc_error_regs_t));
1065 		kmem_free(pcie_adv, sizeof (pcie_adv_error_regs_t));
1066 	}
1067 
1068 	if (erpt_p->pe_dflags & PCIEX_RC_DEV)
1069 		kmem_free(pcie_regs->pcie_rc_regs,
1070 		    sizeof (pcie_rc_error_regs_t));
1071 
1072 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
1073 		if (erpt_p->pe_dflags & PCIX_DEV) {
1074 			uint16_t pcix_ver = pcie_regs->pcix_bdg_regs->
1075 			    pcix_bdg_ver;
1076 
1077 			if (PCIX_ECC_VER_CHECK(pcix_ver)) {
1078 				int i;
1079 				for (i = 0; i < 2; i++)
1080 					kmem_free(pcie_regs->pcix_bdg_regs->
1081 					    pcix_bdg_ecc_regs[i],
1082 					    sizeof (pcix_ecc_regs_t));
1083 			}
1084 			kmem_free(pcie_regs->pcix_bdg_regs,
1085 			    sizeof (pcix_bdg_error_regs_t));
1086 		}
1087 	}
1088 	kmem_free(erpt_p->pe_regs, sizeof (pcie_error_regs_t));
1089 }
1090 
1091 void
1092 pci_ereport_teardown(dev_info_t *dip)
1093 {
1094 	struct i_ddi_fmhdl *fmhdl = DEVI(dip)->devi_fmhdl;
1095 	pci_erpt_t *erpt_p;
1096 
1097 	if (!DDI_FM_EREPORT_CAP(ddi_fm_capable(dip)) &&
1098 	    !DDI_FM_ERRCB_CAP(ddi_fm_capable(dip))) {
1099 		i_ddi_drv_ereport_post(dip, DVR_EFMCAP, NULL, DDI_SLEEP);
1100 	}
1101 
1102 	ASSERT(fmhdl);
1103 
1104 	erpt_p = (pci_erpt_t *)fmhdl->fh_bus_specific;
1105 	if (erpt_p == NULL)
1106 		return;
1107 
1108 	if (erpt_p->pe_dflags & PCIEX_DEV)
1109 		pcie_ereport_teardown(erpt_p);
1110 	else if (erpt_p->pe_dflags & PCIX_DEV)
1111 		pcix_ereport_teardown(erpt_p);
1112 	pci_config_teardown((ddi_acc_handle_t *)&erpt_p->pe_hdl);
1113 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV)
1114 		kmem_free(erpt_p->pe_pci_regs->pci_bdg_regs,
1115 		    sizeof (pci_bdg_error_regs_t));
1116 	kmem_free(erpt_p->pe_pci_regs, sizeof (pci_error_regs_t));
1117 	kmem_free(erpt_p, sizeof (pci_erpt_t));
1118 	fmhdl->fh_bus_specific = NULL;
1119 	/*
1120 	 * The following sparc specific code should be removed once the pci_cap
1121 	 * interfaces create the necessary properties for us.
1122 	 */
1123 #if defined(__sparc)
1124 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcix-capid-pointer");
1125 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcie-slotcap-reg");
1126 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcie-capid-reg");
1127 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcie-capid-pointer");
1128 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcie-aer-pointer");
1129 #endif
1130 }
1131 
1132 static void
1133 pcie_ereport_post(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p,
1134     char *buf, int errtype)
1135 {
1136 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
1137 	pcie_adv_error_regs_t *pcie_adv_regs = pcie_regs->pcie_adv_regs;
1138 	pcie_adv_rc_error_regs_t *pcie_adv_rc_regs;
1139 
1140 	switch (errtype) {
1141 	    case PCIEX_TYPE_CE:
1142 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1143 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1144 		    PCIEX_DEVSTS_REG, DATA_TYPE_UINT16,
1145 		    pcie_regs->pcie_err_status,
1146 		    PCIEX_CE_STATUS_REG, DATA_TYPE_UINT32,
1147 		    pcie_adv_regs->pcie_ce_status, NULL);
1148 		break;
1149 	    case PCIEX_TYPE_UE:
1150 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1151 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1152 		    PCIEX_DEVSTS_REG, DATA_TYPE_UINT16,
1153 		    pcie_regs->pcie_err_status,
1154 		    PCIEX_UE_STATUS_REG, DATA_TYPE_UINT32,
1155 		    pcie_adv_regs->pcie_ue_status, PCIEX_UE_SEV_REG,
1156 		    DATA_TYPE_UINT32, pcie_adv_regs->pcie_ue_sev,
1157 		    PCIEX_ADV_CTL, DATA_TYPE_UINT32,
1158 		    pcie_adv_regs->pcie_adv_ctl,
1159 		    PCIEX_SRC_ID, DATA_TYPE_UINT16,
1160 		    pcie_adv_regs->pcie_adv_bdf,
1161 		    PCIEX_SRC_VALID, DATA_TYPE_BOOLEAN_VALUE,
1162 		    (pcie_adv_regs->pcie_adv_bdf != NULL) ?
1163 		    1 : NULL,
1164 #ifdef DEBUG
1165 		    PCIEX_UE_HDR0, DATA_TYPE_UINT32,
1166 		    pcie_adv_regs->pcie_ue_hdr0,
1167 		    PCIEX_UE_HDR1, DATA_TYPE_UINT32,
1168 		    pcie_adv_regs->pcie_ue_hdr[0],
1169 		    PCIEX_UE_HDR2, DATA_TYPE_UINT32,
1170 		    pcie_adv_regs->pcie_ue_hdr[1],
1171 		    PCIEX_UE_HDR3, DATA_TYPE_UINT32,
1172 		    pcie_adv_regs->pcie_ue_hdr[2],
1173 #endif
1174 		    NULL);
1175 		break;
1176 	    case PCIEX_TYPE_GEN:
1177 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1178 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8,
1179 		    0, PCIEX_DEVSTS_REG, DATA_TYPE_UINT16,
1180 		    pcie_regs->pcie_err_status, NULL);
1181 		break;
1182 	    case PCIEX_TYPE_RC_UE_MSG:
1183 	    case PCIEX_TYPE_RC_CE_MSG:
1184 		pcie_adv_rc_regs = pcie_adv_regs->pcie_adv_rc_regs;
1185 
1186 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1187 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1188 		    PCIEX_ROOT_ERRSTS_REG, DATA_TYPE_UINT32,
1189 		    pcie_adv_rc_regs->pcie_rc_err_status,
1190 		    PCIEX_SRC_ID, DATA_TYPE_UINT16,
1191 		    (errtype == PCIEX_TYPE_RC_UE_MSG) ?
1192 		    pcie_adv_rc_regs->pcie_rc_ue_src_id :
1193 		    pcie_adv_rc_regs->pcie_rc_ce_src_id,
1194 		    PCIEX_SRC_VALID, DATA_TYPE_BOOLEAN_VALUE,
1195 		    (errtype == PCIEX_TYPE_RC_UE_MSG) ?
1196 		    (pcie_adv_regs->pcie_adv_vflags & PCIE_SRC_ID_VALID &&
1197 		    pcie_adv_rc_regs->pcie_rc_ue_src_id != 0) :
1198 		    (pcie_adv_regs->pcie_adv_vflags & PCIE_SRC_ID_VALID &&
1199 		    pcie_adv_rc_regs->pcie_rc_ce_src_id != 0), NULL);
1200 		break;
1201 	    case PCIEX_TYPE_RC_MULT_MSG:
1202 		pcie_adv_rc_regs = pcie_adv_regs->pcie_adv_rc_regs;
1203 
1204 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1205 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1206 		    PCIEX_ROOT_ERRSTS_REG, DATA_TYPE_UINT32,
1207 		    pcie_adv_rc_regs->pcie_rc_err_status, NULL);
1208 		break;
1209 	    default:
1210 		break;
1211 	}
1212 }
1213 
1214 /*ARGSUSED*/
1215 static void
1216 pcie_check_addr(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p)
1217 {
1218 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
1219 	pcie_adv_error_regs_t *pcie_adv_regs = pcie_regs->pcie_adv_regs;
1220 	pcie_tlp_hdr_t *ue_hdr0;
1221 	uint32_t *ue_hdr;
1222 	uint64_t addr = NULL;
1223 	int upstream = 0;
1224 	pci_fme_bus_specific_t *pci_fme_bsp =
1225 	    (pci_fme_bus_specific_t *)derr->fme_bus_specific;
1226 
1227 	if (!(pcie_adv_regs->pcie_adv_vflags & PCIE_UE_HDR_VALID))
1228 		return;
1229 
1230 	ue_hdr0 = (pcie_tlp_hdr_t *)&pcie_adv_regs->pcie_ue_hdr0;
1231 	ue_hdr = pcie_adv_regs->pcie_ue_hdr;
1232 
1233 	if ((pcie_regs->pcie_cap & PCIE_PCIECAP_DEV_TYPE_MASK) ==
1234 	    PCIE_PCIECAP_DEV_TYPE_ROOT ||
1235 	    (pcie_regs->pcie_cap & PCIE_PCIECAP_DEV_TYPE_MASK) ==
1236 	    PCIE_PCIECAP_DEV_TYPE_DOWN)
1237 		upstream = 1;
1238 
1239 	switch (ue_hdr0->type) {
1240 	    case PCIE_TLP_TYPE_MEM:
1241 	    case PCIE_TLP_TYPE_MEMLK:
1242 		if ((ue_hdr0->fmt & 0x1) == 0x1) {
1243 			pcie_mem64_t *mem64_tlp = (pcie_mem64_t *)ue_hdr;
1244 
1245 			addr = (uint64_t)mem64_tlp->addr1 << 32 |
1246 			    (uint32_t)mem64_tlp->addr0 << 2;
1247 			pcie_adv_regs->pcie_adv_bdf = mem64_tlp->rid;
1248 		} else {
1249 			pcie_memio32_t *memio32_tlp = (pcie_memio32_t *)ue_hdr;
1250 
1251 			addr = (uint32_t)memio32_tlp->addr0 << 2;
1252 			pcie_adv_regs->pcie_adv_bdf = memio32_tlp->rid;
1253 		}
1254 		if (upstream) {
1255 			pci_fme_bsp->pci_bs_bdf = pcie_adv_regs->pcie_adv_bdf;
1256 			pci_fme_bsp->pci_bs_flags |= PCI_BS_BDF_VALID;
1257 		} else if ((pcie_regs->pcie_cap & PCIE_PCIECAP_DEV_TYPE_MASK) ==
1258 		    PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) {
1259 			pci_fme_bsp->pci_bs_bdf = erpt_p->pe_bdf;
1260 			pci_fme_bsp->pci_bs_flags |= PCI_BS_BDF_VALID;
1261 		}
1262 		pci_fme_bsp->pci_bs_addr = addr;
1263 		pci_fme_bsp->pci_bs_flags |= PCI_BS_ADDR_VALID;
1264 		pci_fme_bsp->pci_bs_type = upstream ? DMA_HANDLE : ACC_HANDLE;
1265 		break;
1266 
1267 	    case PCIE_TLP_TYPE_IO:
1268 		{
1269 			pcie_memio32_t *memio32_tlp = (pcie_memio32_t *)ue_hdr;
1270 
1271 			addr = (uint32_t)memio32_tlp->addr0 << 2;
1272 			pcie_adv_regs->pcie_adv_bdf = memio32_tlp->rid;
1273 			if ((pcie_regs->pcie_cap &
1274 			    PCIE_PCIECAP_DEV_TYPE_MASK) ==
1275 			    PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) {
1276 				pci_fme_bsp->pci_bs_bdf = erpt_p->pe_bdf;
1277 				pci_fme_bsp->pci_bs_flags |= PCI_BS_BDF_VALID;
1278 			}
1279 			pci_fme_bsp->pci_bs_addr = addr;
1280 			pci_fme_bsp->pci_bs_flags |= PCI_BS_ADDR_VALID;
1281 			pci_fme_bsp->pci_bs_type = ACC_HANDLE;
1282 			break;
1283 		}
1284 	    case PCIE_TLP_TYPE_CFG0:
1285 	    case PCIE_TLP_TYPE_CFG1:
1286 		{
1287 			pcie_cfg_t *cfg_tlp = (pcie_cfg_t *)ue_hdr;
1288 
1289 			pcie_adv_regs->pcie_adv_bdf = cfg_tlp->rid;
1290 			pci_fme_bsp->pci_bs_bdf = (uint16_t)cfg_tlp->bus << 8 |
1291 			    (uint16_t)cfg_tlp->dev << 3 | cfg_tlp->func;
1292 			pci_fme_bsp->pci_bs_flags |= PCI_BS_BDF_VALID;
1293 			pci_fme_bsp->pci_bs_type = ACC_HANDLE;
1294 			break;
1295 		}
1296 	    case PCIE_TLP_TYPE_MSG:
1297 		{
1298 			pcie_msg_t *msg_tlp = (pcie_msg_t *)ue_hdr;
1299 
1300 			pcie_adv_regs->pcie_adv_bdf = msg_tlp->rid;
1301 			break;
1302 		}
1303 	    case PCIE_TLP_TYPE_CPL:
1304 	    case PCIE_TLP_TYPE_CPLLK:
1305 		{
1306 			pcie_cpl_t *cpl_tlp = (pcie_cpl_t *)ue_hdr;
1307 
1308 			pcie_adv_regs->pcie_adv_bdf = cpl_tlp->cid;
1309 			pci_fme_bsp->pci_bs_flags |= PCI_BS_BDF_VALID;
1310 			if (upstream) {
1311 				pci_fme_bsp->pci_bs_bdf = cpl_tlp->cid;
1312 				pci_fme_bsp->pci_bs_type = ACC_HANDLE;
1313 			} else {
1314 				pci_fme_bsp->pci_bs_bdf = cpl_tlp->rid;
1315 				pci_fme_bsp->pci_bs_type = DMA_HANDLE;
1316 			}
1317 			break;
1318 		}
1319 	    case PCIE_TLP_TYPE_MSI:
1320 	    default:
1321 		break;
1322 	}
1323 }
1324 
1325 /*ARGSUSED*/
1326 static void
1327 pcie_pci_check_addr(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p,
1328     int type)
1329 {
1330 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
1331 	pcie_adv_error_regs_t *pcie_adv_regs = pcie_regs->pcie_adv_regs;
1332 	pcie_adv_bdg_error_regs_t *pcie_bdg_regs =
1333 	    pcie_adv_regs->pcie_adv_bdg_regs;
1334 	uint64_t addr = NULL;
1335 	pcix_attr_t *pcie_pci_sue_attr;
1336 	int cmd;
1337 	int dual_addr = 0;
1338 	pci_fme_bus_specific_t *pci_fme_bsp =
1339 	    (pci_fme_bus_specific_t *)derr->fme_bus_specific;
1340 
1341 	if (!(pcie_adv_regs->pcie_adv_vflags & PCIE_SUE_HDR_VALID))
1342 		return;
1343 
1344 	pcie_pci_sue_attr = (pcix_attr_t *)&pcie_bdg_regs->pcie_sue_hdr0;
1345 	cmd = (pcie_bdg_regs->pcie_sue_hdr[0] >>
1346 	    PCIE_AER_SUCE_HDR_CMD_LWR_SHIFT) & PCIE_AER_SUCE_HDR_CMD_LWR_MASK;
1347 
1348 cmd_switch:
1349 	addr = pcie_bdg_regs->pcie_sue_hdr[2];
1350 	addr = (addr << PCIE_AER_SUCE_HDR_ADDR_SHIFT) |
1351 	    pcie_bdg_regs->pcie_sue_hdr[1];
1352 	switch (cmd) {
1353 	    case PCI_PCIX_CMD_IORD:
1354 	    case PCI_PCIX_CMD_IOWR:
1355 		pcie_adv_regs->pcie_adv_bdf = pcie_pci_sue_attr->rid;
1356 		if (addr) {
1357 			pci_fme_bsp->pci_bs_addr = addr;
1358 			pci_fme_bsp->pci_bs_flags |= PCI_BS_ADDR_VALID;
1359 			pci_fme_bsp->pci_bs_type = ACC_HANDLE;
1360 		}
1361 		break;
1362 	    case PCI_PCIX_CMD_MEMRD_DW:
1363 	    case PCI_PCIX_CMD_MEMWR:
1364 	    case PCI_PCIX_CMD_MEMRD_BL:
1365 	    case PCI_PCIX_CMD_MEMWR_BL:
1366 	    case PCI_PCIX_CMD_MEMRDBL:
1367 	    case PCI_PCIX_CMD_MEMWRBL:
1368 		pcie_adv_regs->pcie_adv_bdf = pcie_pci_sue_attr->rid;
1369 		if (addr) {
1370 			pci_fme_bsp->pci_bs_addr = addr;
1371 			pci_fme_bsp->pci_bs_flags |= PCI_BS_ADDR_VALID;
1372 			pci_fme_bsp->pci_bs_type = type;
1373 		}
1374 		break;
1375 	    case PCI_PCIX_CMD_CFRD:
1376 	    case PCI_PCIX_CMD_CFWR:
1377 		pcie_adv_regs->pcie_adv_bdf = pcie_pci_sue_attr->rid;
1378 		/*
1379 		 * for type 1 config transaction we can find bdf from address
1380 		 */
1381 		if ((addr & 3) == 1) {
1382 			pci_fme_bsp->pci_bs_bdf = (addr >> 8) & 0xffffffff;
1383 			pci_fme_bsp->pci_bs_flags |= PCI_BS_BDF_VALID;
1384 			pci_fme_bsp->pci_bs_type = ACC_HANDLE;
1385 		}
1386 		break;
1387 	    case PCI_PCIX_CMD_SPL:
1388 		pcie_adv_regs->pcie_adv_bdf = pcie_pci_sue_attr->rid;
1389 		if (type == ACC_HANDLE) {
1390 			pci_fme_bsp->pci_bs_bdf = pcie_adv_regs->pcie_adv_bdf;
1391 			pci_fme_bsp->pci_bs_flags |= PCI_BS_BDF_VALID;
1392 			pci_fme_bsp->pci_bs_type = type;
1393 		}
1394 		break;
1395 	    case PCI_PCIX_CMD_DADR:
1396 		cmd = (pcie_bdg_regs->pcie_sue_hdr[0] >>
1397 		    PCIE_AER_SUCE_HDR_CMD_UP_SHIFT) &
1398 		    PCIE_AER_SUCE_HDR_CMD_UP_MASK;
1399 		if (dual_addr)
1400 			break;
1401 		++dual_addr;
1402 		goto cmd_switch;
1403 	    default:
1404 		break;
1405 	}
1406 }
1407 
1408 /*ARGSUSED*/
1409 static int
1410 pcix_check_addr(dev_info_t *dip, ddi_fm_error_t *derr,
1411     pcix_ecc_regs_t *pcix_ecc_regs, int type)
1412 {
1413 	int cmd = (pcix_ecc_regs->pcix_ecc_ctlstat >> 16) & 0xf;
1414 	uint64_t addr;
1415 	pci_fme_bus_specific_t *pci_fme_bsp =
1416 	    (pci_fme_bus_specific_t *)derr->fme_bus_specific;
1417 
1418 	addr = pcix_ecc_regs->pcix_ecc_secaddr;
1419 	addr = addr << 32;
1420 	addr |= pcix_ecc_regs->pcix_ecc_fstaddr;
1421 
1422 	switch (cmd) {
1423 	    case PCI_PCIX_CMD_INTR:
1424 	    case PCI_PCIX_CMD_SPEC:
1425 		return (DDI_FM_FATAL);
1426 	    case PCI_PCIX_CMD_IORD:
1427 	    case PCI_PCIX_CMD_IOWR:
1428 		pci_fme_bsp->pci_bs_addr = addr;
1429 		pci_fme_bsp->pci_bs_flags |= PCI_BS_ADDR_VALID;
1430 		pci_fme_bsp->pci_bs_type = type;
1431 		return (DDI_FM_UNKNOWN);
1432 	    case PCI_PCIX_CMD_DEVID:
1433 		return (DDI_FM_FATAL);
1434 	    case PCI_PCIX_CMD_MEMRD_DW:
1435 	    case PCI_PCIX_CMD_MEMWR:
1436 	    case PCI_PCIX_CMD_MEMRD_BL:
1437 	    case PCI_PCIX_CMD_MEMWR_BL:
1438 		pci_fme_bsp->pci_bs_addr = addr;
1439 		pci_fme_bsp->pci_bs_flags |= PCI_BS_ADDR_VALID;
1440 		pci_fme_bsp->pci_bs_type = type;
1441 		return (DDI_FM_UNKNOWN);
1442 	    case PCI_PCIX_CMD_CFRD:
1443 	    case PCI_PCIX_CMD_CFWR:
1444 		/*
1445 		 * for type 1 config transaction we can find bdf from address
1446 		 */
1447 		if ((addr & 3) == 1) {
1448 			pci_fme_bsp->pci_bs_bdf = (addr >> 8) & 0xffffffff;
1449 			pci_fme_bsp->pci_bs_flags |= PCI_BS_BDF_VALID;
1450 			pci_fme_bsp->pci_bs_type = type;
1451 		}
1452 		return (DDI_FM_UNKNOWN);
1453 	    case PCI_PCIX_CMD_SPL:
1454 	    case PCI_PCIX_CMD_DADR:
1455 		return (DDI_FM_UNKNOWN);
1456 	    case PCI_PCIX_CMD_MEMRDBL:
1457 	    case PCI_PCIX_CMD_MEMWRBL:
1458 		pci_fme_bsp->pci_bs_addr = addr;
1459 		pci_fme_bsp->pci_bs_flags |= PCI_BS_ADDR_VALID;
1460 		pci_fme_bsp->pci_bs_type = type;
1461 		return (DDI_FM_UNKNOWN);
1462 	    default:
1463 		return (DDI_FM_FATAL);
1464 	}
1465 }
1466 
1467 /*ARGSUSED*/
1468 static int
1469 pci_bdg_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p)
1470 {
1471 	pci_bdg_error_regs_t *pci_bdg_regs = erpt_p->pe_pci_regs->pci_bdg_regs;
1472 	int fatal = 0;
1473 	int nonfatal = 0;
1474 	int unknown = 0;
1475 	int ok = 0;
1476 	int ret = DDI_FM_OK;
1477 	char buf[FM_MAX_CLASS];
1478 	int i;
1479 	pci_fme_bus_specific_t *pci_fme_bsp =
1480 	    (pci_fme_bus_specific_t *)derr->fme_bus_specific;
1481 
1482 	if (derr->fme_flag != DDI_FM_ERR_UNEXPECTED)
1483 		goto done;
1484 
1485 	if ((pci_bdg_regs->pci_bdg_vflags & PCI_BDG_CTRL_VALID) &&
1486 	    (pci_bdg_regs->pci_bdg_ctrl & PCI_BCNF_BCNTRL_DTO_STAT)) {
1487 		(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
1488 		    PCI_ERROR_SUBCLASS, PCI_DTO);
1489 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1490 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1491 		    PCI_SEC_CONFIG_STATUS, DATA_TYPE_UINT16,
1492 		    pci_bdg_regs->pci_bdg_sec_stat, PCI_BCNTRL,
1493 		    DATA_TYPE_UINT16, pci_bdg_regs->pci_bdg_ctrl, NULL);
1494 		unknown++;
1495 	}
1496 
1497 	if (pci_bdg_regs->pci_bdg_vflags & PCI_BDG_SEC_STAT_VALID) {
1498 		for (i = 0; pci_bdg_err_tbl[i].err_class != NULL; i++) {
1499 			if (pci_bdg_regs->pci_bdg_sec_stat &
1500 			    pci_bdg_err_tbl[i].reg_bit) {
1501 				(void) snprintf(buf, FM_MAX_CLASS, "%s.%s-%s",
1502 				    PCI_ERROR_SUBCLASS, PCI_SEC_ERROR_SUBCLASS,
1503 				    pci_bdg_err_tbl[i].err_class);
1504 				ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1505 				    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1506 				    PCI_SEC_CONFIG_STATUS, DATA_TYPE_UINT16,
1507 				    pci_bdg_regs->pci_bdg_sec_stat, PCI_BCNTRL,
1508 				    DATA_TYPE_UINT16,
1509 				    pci_bdg_regs->pci_bdg_ctrl, NULL);
1510 				PCI_FM_SEV_INC(pci_bdg_err_tbl[i].flags);
1511 				if (pci_fme_bsp && (pci_fme_bsp->pci_bs_flags &
1512 				    PCI_BS_ADDR_VALID) &&
1513 				    pci_fme_bsp->pci_bs_type == ACC_HANDLE &&
1514 				    pci_bdg_err_tbl[i].terr_class)
1515 					pci_target_enqueue(derr->fme_ena,
1516 					    pci_bdg_err_tbl[i].terr_class,
1517 					    PCI_ERROR_SUBCLASS,
1518 					    pci_fme_bsp->pci_bs_addr);
1519 			}
1520 		}
1521 #if !defined(__sparc)
1522 		/*
1523 		 * For x86, many drivers and even user-level code currently get
1524 		 * away with accessing bad addresses, getting a UR and getting
1525 		 * -1 returned. Unfortunately, we have no control over this, so
1526 		 * we will have to treat all URs as nonfatal. Moreover, if the
1527 		 * leaf driver is non-hardened, then we don't actually see the
1528 		 * UR directly. All we see is a secondary bus master abort at
1529 		 * the root complex - so it's this condition that we actually
1530 		 * need to treat as nonfatal (providing no other unrelated nfe
1531 		 * conditions have also been seen by the root complex).
1532 		 */
1533 		if ((erpt_p->pe_dflags & PCIEX_RC_DEV) &&
1534 		    (pci_bdg_regs->pci_bdg_sec_stat & PCI_STAT_R_MAST_AB) &&
1535 		    !(pci_bdg_regs->pci_bdg_sec_stat & PCI_STAT_S_PERROR)) {
1536 			pcie_error_regs_t *pcie_regs =
1537 			    (pcie_error_regs_t *)erpt_p->pe_regs;
1538 			if ((pcie_regs->pcie_vflags & PCIE_ERR_STATUS_VALID) &&
1539 			    !(pcie_regs->pcie_err_status &
1540 			    PCIE_DEVSTS_NFE_DETECTED))
1541 				nonfatal++;
1542 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s-%s",
1543 			    PCI_ERROR_SUBCLASS, PCI_SEC_ERROR_SUBCLASS, PCI_MA);
1544 			ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1545 			    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1546 			    PCI_SEC_CONFIG_STATUS, DATA_TYPE_UINT16,
1547 			    pci_bdg_regs->pci_bdg_sec_stat, PCI_BCNTRL,
1548 			    DATA_TYPE_UINT16, pci_bdg_regs->pci_bdg_ctrl, NULL);
1549 		}
1550 #endif
1551 	}
1552 
1553 done:
1554 	/*
1555 	 * Need to check for poke and cautious put. We already know peek
1556 	 * and cautious get errors occurred (as we got a trap) and we know
1557 	 * they are nonfatal.
1558 	 */
1559 	if (derr->fme_flag == DDI_FM_ERR_EXPECTED) {
1560 		/*
1561 		 * for cautious puts we treat all errors as nonfatal. Actually
1562 		 * we set nonfatal for cautious gets as well - doesn't do any
1563 		 * harm
1564 		 */
1565 		if (pci_bdg_regs->pci_bdg_sec_stat & (PCI_STAT_R_TARG_AB |
1566 		    PCI_STAT_R_MAST_AB | PCI_STAT_S_PERROR | PCI_STAT_S_SYSERR))
1567 			nonfatal++;
1568 	}
1569 	if (derr->fme_flag == DDI_FM_ERR_POKE) {
1570 		/*
1571 		 * special case for pokes - we only consider master abort
1572 		 * and target abort as nonfatal. Sserr with no master abort is
1573 		 * fatal, but master/target abort can come in on separate
1574 		 * instance, so return unknown and parent will determine if
1575 		 * nonfatal (if another child returned nonfatal - ie master
1576 		 * or target abort) or fatal otherwise
1577 		 */
1578 		if (pci_bdg_regs->pci_bdg_sec_stat & (PCI_STAT_R_TARG_AB |
1579 		    PCI_STAT_R_MAST_AB))
1580 			nonfatal++;
1581 		if (erpt_p->pe_pci_regs->pci_err_status & PCI_STAT_S_SYSERR)
1582 			unknown++;
1583 	}
1584 
1585 	/*
1586 	 * now check children below the bridge
1587 	 */
1588 	ret = ndi_fm_handler_dispatch(dip, NULL, derr);
1589 	PCI_FM_SEV_INC(ret);
1590 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1591 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1592 }
1593 
1594 static int
1595 pcix_ecc_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p,
1596     void *pe_regs)
1597 {
1598 	pcix_error_regs_t *pcix_regs;
1599 	pcix_bdg_error_regs_t *pcix_bdg_regs;
1600 	pcix_ecc_regs_t *pcix_ecc_regs;
1601 	int bridge;
1602 	int i;
1603 	int ecc_phase;
1604 	int ecc_corr;
1605 	int sec_ue;
1606 	int sec_ce;
1607 	int fatal = 0;
1608 	int nonfatal = 0;
1609 	int unknown = 0;
1610 	int ok = 0;
1611 	char buf[FM_MAX_CLASS];
1612 
1613 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
1614 		pcix_bdg_regs = (pcix_bdg_error_regs_t *)pe_regs;
1615 		bridge = 1;
1616 	} else {
1617 		pcix_regs = (pcix_error_regs_t *)pe_regs;
1618 		bridge = 0;
1619 	}
1620 
1621 	for (i = 0; i < (bridge ? 2 : 1); i++) {
1622 		int ret = DDI_FM_OK;
1623 		pcix_ecc_regs = bridge ? pcix_bdg_regs->pcix_bdg_ecc_regs[i] :
1624 		    pcix_regs->pcix_ecc_regs;
1625 		if (pcix_ecc_regs->pcix_ecc_vflags & PCIX_ERR_ECC_STS_VALID) {
1626 			ecc_phase = (pcix_ecc_regs->pcix_ecc_ctlstat &
1627 			    PCI_PCIX_ECC_PHASE) >> 0x4;
1628 			ecc_corr = (pcix_ecc_regs->pcix_ecc_ctlstat &
1629 			    PCI_PCIX_ECC_CORR);
1630 			sec_ue = (pcix_ecc_regs->pcix_ecc_ctlstat &
1631 			    PCI_PCIX_ECC_S_UE);
1632 			sec_ce = (pcix_ecc_regs->pcix_ecc_ctlstat &
1633 			    PCI_PCIX_ECC_S_CE);
1634 
1635 			switch (ecc_phase) {
1636 			    case PCI_PCIX_ECC_PHASE_NOERR:
1637 				break;
1638 			    case PCI_PCIX_ECC_PHASE_FADDR:
1639 			    case PCI_PCIX_ECC_PHASE_SADDR:
1640 				PCI_FM_SEV_INC(ecc_corr ?  DDI_FM_OK :
1641 				    DDI_FM_FATAL);
1642 				(void) snprintf(buf, FM_MAX_CLASS,
1643 				    "%s.%s%s", PCIX_ERROR_SUBCLASS,
1644 				    i ? PCIX_SEC_ERROR_SUBCLASS : "",
1645 				    ecc_corr ? PCIX_ECC_CE_ADDR :
1646 				    PCIX_ECC_UE_ADDR);
1647 				break;
1648 			    case PCI_PCIX_ECC_PHASE_ATTR:
1649 				PCI_FM_SEV_INC(ecc_corr ?
1650 				    DDI_FM_OK : DDI_FM_FATAL);
1651 				(void) snprintf(buf, FM_MAX_CLASS,
1652 				    "%s.%s%s", PCIX_ERROR_SUBCLASS,
1653 				    i ? PCIX_SEC_ERROR_SUBCLASS : "",
1654 				    ecc_corr ? PCIX_ECC_CE_ATTR :
1655 				    PCIX_ECC_UE_ATTR);
1656 				break;
1657 			    case PCI_PCIX_ECC_PHASE_DATA32:
1658 			    case PCI_PCIX_ECC_PHASE_DATA64:
1659 				if (ecc_corr)
1660 					ret = DDI_FM_OK;
1661 				else {
1662 					int type;
1663 					pci_error_regs_t *pci_regs =
1664 					    erpt_p->pe_pci_regs;
1665 
1666 					if (i) {
1667 						if (pci_regs->pci_bdg_regs->
1668 						    pci_bdg_sec_stat &
1669 						    PCI_STAT_S_PERROR)
1670 							type = ACC_HANDLE;
1671 						else
1672 							type = DMA_HANDLE;
1673 					} else {
1674 						if (pci_regs->pci_err_status &
1675 						    PCI_STAT_S_PERROR)
1676 							type = DMA_HANDLE;
1677 						else
1678 							type = ACC_HANDLE;
1679 					}
1680 					ret = pcix_check_addr(dip, derr,
1681 					    pcix_ecc_regs, type);
1682 				}
1683 				PCI_FM_SEV_INC(ret);
1684 
1685 				(void) snprintf(buf, FM_MAX_CLASS,
1686 				    "%s.%s%s", PCIX_ERROR_SUBCLASS,
1687 				    i ? PCIX_SEC_ERROR_SUBCLASS : "",
1688 				    ecc_corr ? PCIX_ECC_CE_DATA :
1689 				    PCIX_ECC_UE_DATA);
1690 				break;
1691 			}
1692 			if (ecc_phase)
1693 				if (bridge)
1694 					ddi_fm_ereport_post(dip, buf,
1695 					    derr->fme_ena,
1696 					    DDI_NOSLEEP, FM_VERSION,
1697 					    DATA_TYPE_UINT8, 0,
1698 					    PCIX_SEC_STATUS, DATA_TYPE_UINT16,
1699 					    pcix_bdg_regs->pcix_bdg_sec_stat,
1700 					    PCIX_BDG_STAT, DATA_TYPE_UINT32,
1701 					    pcix_bdg_regs->pcix_bdg_stat,
1702 					    PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32,
1703 					    pcix_ecc_regs->pcix_ecc_ctlstat,
1704 					    PCIX_ECC_ATTR, DATA_TYPE_UINT32,
1705 					    pcix_ecc_regs->pcix_ecc_attr, NULL);
1706 				else
1707 					ddi_fm_ereport_post(dip, buf,
1708 					    derr->fme_ena,
1709 					    DDI_NOSLEEP, FM_VERSION,
1710 					    DATA_TYPE_UINT8, 0,
1711 					    PCIX_COMMAND, DATA_TYPE_UINT16,
1712 					    pcix_regs->pcix_command,
1713 					    PCIX_STATUS, DATA_TYPE_UINT32,
1714 					    pcix_regs->pcix_status,
1715 					    PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32,
1716 					    pcix_ecc_regs->pcix_ecc_ctlstat,
1717 					    PCIX_ECC_ATTR, DATA_TYPE_UINT32,
1718 					    pcix_ecc_regs->pcix_ecc_attr, NULL);
1719 			if (sec_ce || sec_ue) {
1720 				(void) snprintf(buf, FM_MAX_CLASS,
1721 				    "%s.%s%s", PCIX_ERROR_SUBCLASS,
1722 				    i ? PCIX_SEC_ERROR_SUBCLASS : "",
1723 				    sec_ce ? PCIX_ECC_S_CE : PCIX_ECC_S_UE);
1724 				if (bridge)
1725 					ddi_fm_ereport_post(dip, buf,
1726 					    derr->fme_ena,
1727 					    DDI_NOSLEEP, FM_VERSION,
1728 					    DATA_TYPE_UINT8, 0,
1729 					    PCIX_SEC_STATUS, DATA_TYPE_UINT16,
1730 					    pcix_bdg_regs->pcix_bdg_sec_stat,
1731 					    PCIX_BDG_STAT, DATA_TYPE_UINT32,
1732 					    pcix_bdg_regs->pcix_bdg_stat,
1733 					    PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32,
1734 					    pcix_ecc_regs->pcix_ecc_ctlstat,
1735 					    PCIX_ECC_ATTR, DATA_TYPE_UINT32,
1736 					    pcix_ecc_regs->pcix_ecc_attr, NULL);
1737 				else
1738 					ddi_fm_ereport_post(dip, buf,
1739 					    derr->fme_ena,
1740 					    DDI_NOSLEEP, FM_VERSION,
1741 					    DATA_TYPE_UINT8, 0,
1742 					    PCIX_COMMAND, DATA_TYPE_UINT16,
1743 					    pcix_regs->pcix_command,
1744 					    PCIX_STATUS, DATA_TYPE_UINT32,
1745 					    pcix_regs->pcix_status,
1746 					    PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32,
1747 					    pcix_ecc_regs->pcix_ecc_ctlstat,
1748 					    PCIX_ECC_ATTR, DATA_TYPE_UINT32,
1749 					    pcix_ecc_regs->pcix_ecc_attr, NULL);
1750 				PCI_FM_SEV_INC(sec_ue ? DDI_FM_FATAL :
1751 				    DDI_FM_OK);
1752 			}
1753 		}
1754 	}
1755 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1756 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1757 }
1758 
1759 static int
1760 pcix_bdg_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p,
1761     void *pe_regs)
1762 {
1763 	pcix_bdg_error_regs_t *pcix_bdg_regs = (pcix_bdg_error_regs_t *)pe_regs;
1764 	int fatal = 0;
1765 	int nonfatal = 0;
1766 	int unknown = 0;
1767 	int ok = 0;
1768 	char buf[FM_MAX_CLASS];
1769 	int i;
1770 
1771 	if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_STATUS_VALID) {
1772 		for (i = 0; pcix_err_tbl[i].err_class != NULL; i++) {
1773 			if ((pcix_bdg_regs->pcix_bdg_stat &
1774 			    pcix_err_tbl[i].reg_bit)) {
1775 				(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
1776 				    PCIX_ERROR_SUBCLASS,
1777 				    pcix_err_tbl[i].err_class);
1778 				ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1779 				    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1780 				    PCIX_SEC_STATUS, DATA_TYPE_UINT16,
1781 				    pcix_bdg_regs->pcix_bdg_sec_stat,
1782 				    PCIX_BDG_STAT, DATA_TYPE_UINT32,
1783 				    pcix_bdg_regs->pcix_bdg_stat, NULL);
1784 				PCI_FM_SEV_INC(pcix_err_tbl[i].flags);
1785 			}
1786 		}
1787 	}
1788 
1789 	if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_SEC_STATUS_VALID) {
1790 		for (i = 0; pcix_sec_err_tbl[i].err_class != NULL; i++) {
1791 			if ((pcix_bdg_regs->pcix_bdg_sec_stat &
1792 			    pcix_sec_err_tbl[i].reg_bit)) {
1793 				(void) snprintf(buf, FM_MAX_CLASS, "%s.%s%s",
1794 				    PCIX_ERROR_SUBCLASS,
1795 				    PCIX_SEC_ERROR_SUBCLASS,
1796 				    pcix_sec_err_tbl[i].err_class);
1797 				ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1798 				    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1799 				    PCIX_SEC_STATUS, DATA_TYPE_UINT16,
1800 				    pcix_bdg_regs->pcix_bdg_sec_stat,
1801 				    PCIX_BDG_STAT, DATA_TYPE_UINT32,
1802 				    pcix_bdg_regs->pcix_bdg_stat, NULL);
1803 				PCI_FM_SEV_INC(pcix_sec_err_tbl[i].flags);
1804 			}
1805 		}
1806 	}
1807 
1808 	/* Log/Handle ECC errors */
1809 	if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) {
1810 		int ret;
1811 
1812 		ret = pcix_ecc_error_report(dip, derr, erpt_p,
1813 		    (void *)pcix_bdg_regs);
1814 		PCI_FM_SEV_INC(ret);
1815 	}
1816 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1817 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1818 }
1819 
1820 static int
1821 pcix_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p)
1822 {
1823 	pcix_error_regs_t *pcix_regs = (pcix_error_regs_t *)erpt_p->pe_regs;
1824 	int fatal = 0;
1825 	int nonfatal = 0;
1826 	int unknown = 0;
1827 	int ok = 0;
1828 	char buf[FM_MAX_CLASS];
1829 	int i;
1830 
1831 	if (pcix_regs->pcix_vflags & PCIX_ERR_STATUS_VALID) {
1832 		for (i = 0; pcix_err_tbl[i].err_class != NULL; i++) {
1833 			if (!(pcix_regs->pcix_status & pcix_err_tbl[i].reg_bit))
1834 				continue;
1835 
1836 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
1837 			    PCIX_ERROR_SUBCLASS, pcix_err_tbl[i].err_class);
1838 			ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1839 			    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1840 			    PCIX_COMMAND, DATA_TYPE_UINT16,
1841 			    pcix_regs->pcix_command, PCIX_STATUS,
1842 			    DATA_TYPE_UINT32, pcix_regs->pcix_status,
1843 			    NULL);
1844 			PCI_FM_SEV_INC(pcix_err_tbl[i].flags);
1845 		}
1846 	}
1847 	/* Log/Handle ECC errors */
1848 	if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) {
1849 		int ret = pcix_ecc_error_report(dip, derr, erpt_p,
1850 		    (void *)pcix_regs);
1851 		PCI_FM_SEV_INC(ret);
1852 	}
1853 
1854 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1855 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1856 }
1857 
1858 static int
1859 pcie_rc_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p,
1860     void *pe_regs)
1861 {
1862 	pcie_adv_error_regs_t *pcie_adv_regs = (pcie_adv_error_regs_t *)pe_regs;
1863 	int fatal = 0;
1864 	int nonfatal = 0;
1865 	int unknown = 0;
1866 	char buf[FM_MAX_CLASS];
1867 
1868 	if (pcie_adv_regs->pcie_adv_vflags & PCIE_RC_ERR_STATUS_VALID) {
1869 		pcie_adv_rc_error_regs_t *pcie_rc_regs =
1870 		    pcie_adv_regs->pcie_adv_rc_regs;
1871 		int ce, ue, mult_ce, mult_ue, first_ue_fatal, nfe, fe;
1872 
1873 		ce = pcie_rc_regs->pcie_rc_err_status &
1874 		    PCIE_AER_RE_STS_CE_RCVD;
1875 		ue = pcie_rc_regs->pcie_rc_err_status &
1876 		    PCIE_AER_RE_STS_FE_NFE_RCVD;
1877 		mult_ce = pcie_rc_regs->pcie_rc_err_status &
1878 		    PCIE_AER_RE_STS_MUL_CE_RCVD;
1879 		mult_ue = pcie_rc_regs->pcie_rc_err_status &
1880 		    PCIE_AER_RE_STS_MUL_FE_NFE_RCVD;
1881 		first_ue_fatal = pcie_rc_regs->pcie_rc_err_status &
1882 		    PCIE_AER_RE_STS_FIRST_UC_FATAL;
1883 		nfe = pcie_rc_regs->pcie_rc_err_status &
1884 		    PCIE_AER_RE_STS_NFE_MSGS_RCVD;
1885 		fe = pcie_rc_regs->pcie_rc_err_status &
1886 		    PCIE_AER_RE_STS_FE_MSGS_RCVD;
1887 		/*
1888 		 * log fatal/nonfatal/corrected messages
1889 		 * recieved by root complex
1890 		 */
1891 		if (ue && fe)
1892 			fatal++;
1893 
1894 		if (fe && first_ue_fatal) {
1895 			(void) snprintf(buf, FM_MAX_CLASS,
1896 			    "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_FE_MSG);
1897 			pcie_ereport_post(dip, derr, erpt_p, buf,
1898 			    PCIEX_TYPE_RC_UE_MSG);
1899 		}
1900 		if (nfe && !first_ue_fatal) {
1901 			(void) snprintf(buf, FM_MAX_CLASS,
1902 			    "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_NFE_MSG);
1903 			pcie_ereport_post(dip, derr, erpt_p, buf,
1904 			    PCIEX_TYPE_RC_UE_MSG);
1905 		}
1906 		if (ce) {
1907 			(void) snprintf(buf, FM_MAX_CLASS,
1908 			    "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_CE_MSG);
1909 			pcie_ereport_post(dip, derr, erpt_p, buf,
1910 			    PCIEX_TYPE_RC_CE_MSG);
1911 		}
1912 		if (mult_ce) {
1913 			(void) snprintf(buf, FM_MAX_CLASS,
1914 			    "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_MCE_MSG);
1915 			pcie_ereport_post(dip, derr, erpt_p, buf,
1916 			    PCIEX_TYPE_RC_MULT_MSG);
1917 		}
1918 		if (mult_ue) {
1919 			(void) snprintf(buf, FM_MAX_CLASS,
1920 			    "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_MUE_MSG);
1921 			pcie_ereport_post(dip, derr, erpt_p, buf,
1922 			    PCIEX_TYPE_RC_MULT_MSG);
1923 		}
1924 	}
1925 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1926 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1927 }
1928 
1929 static int
1930 pcie_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p)
1931 {
1932 	int fatal = 0;
1933 	int nonfatal = 0;
1934 	int unknown = 0;
1935 	int ok = 0;
1936 	int type;
1937 	char buf[FM_MAX_CLASS];
1938 	int i;
1939 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
1940 	pcie_adv_error_regs_t *pcie_adv_regs;
1941 	pcie_adv_bdg_error_regs_t *pcie_bdg_regs;
1942 
1943 	if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV) &&
1944 	    (erpt_p->pe_dflags & PCIX_DEV)) {
1945 		int ret = pcix_bdg_error_report(dip, derr, erpt_p,
1946 		    (void *)pcie_regs->pcix_bdg_regs);
1947 		PCI_FM_SEV_INC(ret);
1948 	}
1949 
1950 	if (!(erpt_p->pe_dflags & PCIEX_ADV_DEV)) {
1951 		if (!(pcie_regs->pcie_vflags & PCIE_ERR_STATUS_VALID))
1952 			goto done;
1953 #if !defined(__sparc)
1954 		/*
1955 		 * On x86 ignore UR on non-RBER leaf devices and pciex-pci
1956 		 * bridges.
1957 		 */
1958 		if ((pcie_regs->pcie_err_status & PCIE_DEVSTS_UR_DETECTED) &&
1959 		    !(pcie_regs->pcie_err_status & PCIE_DEVSTS_FE_DETECTED) &&
1960 		    ((erpt_p->pe_dflags & PCIEX_2PCI_DEV) ||
1961 		    !(erpt_p->pe_dflags & PCI_BRIDGE_DEV)) &&
1962 		    !(pcie_regs->pcie_dev_cap & PCIE_DEVCAP_ROLE_BASED_ERR_REP))
1963 			goto done;
1964 #endif
1965 		for (i = 0; pciex_nadv_err_tbl[i].err_class != NULL; i++) {
1966 			if (!(pcie_regs->pcie_err_status &
1967 			    pciex_nadv_err_tbl[i].reg_bit))
1968 				continue;
1969 
1970 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
1971 			    PCIEX_ERROR_SUBCLASS,
1972 			    pciex_nadv_err_tbl[i].err_class);
1973 			pcie_ereport_post(dip, derr, erpt_p, buf,
1974 			    PCIEX_TYPE_GEN);
1975 			PCI_FM_SEV_INC(pciex_nadv_err_tbl[i].flags);
1976 		}
1977 		goto done;
1978 	}
1979 
1980 	pcie_adv_regs = pcie_regs->pcie_adv_regs;
1981 
1982 	/*
1983 	 * Log PCI Express uncorrectable errors
1984 	 */
1985 	if (pcie_adv_regs->pcie_adv_vflags & PCIE_UE_STATUS_VALID) {
1986 		for (i = 0; pciex_ue_err_tbl[i].err_class != NULL; i++) {
1987 			if (!(pcie_adv_regs->pcie_ue_status &
1988 			    pciex_ue_err_tbl[i].reg_bit))
1989 				continue;
1990 
1991 			(void) snprintf(buf, FM_MAX_CLASS,
1992 			    "%s.%s", PCIEX_ERROR_SUBCLASS,
1993 			    pciex_ue_err_tbl[i].err_class);
1994 
1995 			/*
1996 			 * First check for advisary nonfatal conditions
1997 			 * - hardware endpoint successfully retrying a cto
1998 			 * - hardware endpoint receiving poisoned tlp and
1999 			 *   dealing with it itself (but not if root complex)
2000 			 * If the device has declared these as correctable
2001 			 * errors then treat them as such.
2002 			 */
2003 			if ((pciex_ue_err_tbl[i].reg_bit == PCIE_AER_UCE_TO ||
2004 			    (pciex_ue_err_tbl[i].reg_bit == PCIE_AER_UCE_PTLP &&
2005 			    !(erpt_p->pe_dflags & PCIEX_RC_DEV))) &&
2006 			    (pcie_regs->pcie_err_status &
2007 			    PCIE_DEVSTS_CE_DETECTED) &&
2008 			    !(pcie_regs->pcie_err_status &
2009 			    PCIE_DEVSTS_NFE_DETECTED)) {
2010 				pcie_ereport_post(dip, derr, erpt_p, buf,
2011 				    PCIEX_TYPE_UE);
2012 				continue;
2013 			}
2014 
2015 #if !defined(__sparc)
2016 			/*
2017 			 * On x86 for leaf devices and pciex-pci bridges,
2018 			 * ignore UR on non-RBER devices or on RBER devices when
2019 			 * advisory nonfatal.
2020 			 */
2021 			if (pciex_ue_err_tbl[i].reg_bit == PCIE_AER_UCE_UR &&
2022 			    ((erpt_p->pe_dflags & PCIEX_2PCI_DEV) ||
2023 			    !(erpt_p->pe_dflags & PCI_BRIDGE_DEV))) {
2024 				if (!(pcie_regs->pcie_dev_cap &
2025 				    PCIE_DEVCAP_ROLE_BASED_ERR_REP))
2026 					continue;
2027 				if ((pcie_regs->pcie_err_status &
2028 				    PCIE_DEVSTS_CE_DETECTED) &&
2029 				    !(pcie_regs->pcie_err_status &
2030 				    PCIE_DEVSTS_NFE_DETECTED))
2031 					continue;
2032 			}
2033 #endif
2034 			pcie_adv_regs->pcie_adv_bdf = 0;
2035 			/*
2036 			 * Now try and look up handle if
2037 			 * - error bit is among PCIE_AER_UCE_LOG_BITS, and
2038 			 * - no other PCIE_AER_UCE_LOG_BITS are set, and
2039 			 * - error bit is not masked, and
2040 			 * - flag is DDI_FM_UNKNOWN
2041 			 */
2042 			if ((pcie_adv_regs->pcie_ue_status &
2043 			    pcie_aer_uce_log_bits) ==
2044 			    pciex_ue_err_tbl[i].reg_bit &&
2045 			    !(pciex_ue_err_tbl[i].reg_bit &
2046 			    pcie_adv_regs->pcie_ue_mask) &&
2047 			    pciex_ue_err_tbl[i].flags == DDI_FM_UNKNOWN)
2048 				pcie_check_addr(dip, derr, erpt_p);
2049 
2050 			PCI_FM_SEV_INC(pciex_ue_err_tbl[i].flags);
2051 			pcie_ereport_post(dip, derr, erpt_p, buf,
2052 			    PCIEX_TYPE_UE);
2053 		}
2054 	}
2055 
2056 	/*
2057 	 * Log PCI Express correctable errors
2058 	 */
2059 	if (pcie_adv_regs->pcie_adv_vflags & PCIE_CE_STATUS_VALID) {
2060 		for (i = 0; pciex_ce_err_tbl[i].err_class != NULL; i++) {
2061 			if (!(pcie_adv_regs->pcie_ce_status &
2062 			    pciex_ce_err_tbl[i].reg_bit))
2063 				continue;
2064 
2065 			(void) snprintf(buf, FM_MAX_CLASS,
2066 			    "%s.%s", PCIEX_ERROR_SUBCLASS,
2067 			    pciex_ce_err_tbl[i].err_class);
2068 			pcie_ereport_post(dip, derr, erpt_p, buf,
2069 			    PCIEX_TYPE_CE);
2070 		}
2071 	}
2072 
2073 	if (!(erpt_p->pe_dflags & PCI_BRIDGE_DEV))
2074 		goto done;
2075 
2076 	if (erpt_p->pe_dflags & PCIEX_RC_DEV) {
2077 		int ret = pcie_rc_error_report(dip, derr, erpt_p,
2078 		    (void *)pcie_adv_regs);
2079 		PCI_FM_SEV_INC(ret);
2080 	}
2081 
2082 	if (!((erpt_p->pe_dflags & PCIEX_2PCI_DEV) &&
2083 	    (pcie_adv_regs->pcie_adv_vflags & PCIE_SUE_STATUS_VALID)))
2084 		goto done;
2085 
2086 	pcie_bdg_regs = pcie_adv_regs->pcie_adv_bdg_regs;
2087 
2088 	for (i = 0; pcie_sue_err_tbl[i].err_class != NULL; i++) {
2089 		if ((pcie_bdg_regs->pcie_sue_status &
2090 		    pcie_sue_err_tbl[i].reg_bit)) {
2091 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
2092 			    PCIEX_ERROR_SUBCLASS,
2093 			    pcie_sue_err_tbl[i].err_class);
2094 
2095 			if ((pcie_bdg_regs->pcie_sue_status &
2096 			    pcie_aer_suce_log_bits) !=
2097 			    pcie_sue_err_tbl[i].reg_bit ||
2098 			    pcie_sue_err_tbl[i].flags != DDI_FM_UNKNOWN) {
2099 				ddi_fm_ereport_post(dip, buf, derr->fme_ena,
2100 				    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
2101 				    PCIEX_SEC_UE_STATUS, DATA_TYPE_UINT32,
2102 				    pcie_bdg_regs->pcie_sue_status,
2103 #ifdef DEBUG
2104 				    PCIEX_SUE_HDR0, DATA_TYPE_UINT32,
2105 				    pcie_bdg_regs->pcie_sue_hdr0,
2106 				    PCIEX_SUE_HDR1, DATA_TYPE_UINT32,
2107 				    pcie_bdg_regs->pcie_sue_hdr[0],
2108 				    PCIEX_SUE_HDR2, DATA_TYPE_UINT32,
2109 				    pcie_bdg_regs->pcie_sue_hdr[1],
2110 				    PCIEX_SUE_HDR3, DATA_TYPE_UINT32,
2111 				    pcie_bdg_regs->pcie_sue_hdr[2],
2112 #endif
2113 				    NULL);
2114 			} else {
2115 				pcie_adv_regs->pcie_adv_bdf = 0;
2116 				switch (pcie_sue_err_tbl[i].reg_bit) {
2117 				case PCIE_AER_SUCE_RCVD_TA:
2118 				case PCIE_AER_SUCE_RCVD_MA:
2119 				case PCIE_AER_SUCE_USC_ERR:
2120 					type = ACC_HANDLE;
2121 					break;
2122 				case PCIE_AER_SUCE_TA_ON_SC:
2123 				case PCIE_AER_SUCE_MA_ON_SC:
2124 					type = DMA_HANDLE;
2125 					break;
2126 				case PCIE_AER_SUCE_UC_DATA_ERR:
2127 				case PCIE_AER_SUCE_PERR_ASSERT:
2128 					if (erpt_p->pe_pci_regs->pci_bdg_regs->
2129 					    pci_bdg_sec_stat &
2130 					    PCI_STAT_S_PERROR)
2131 						type = ACC_HANDLE;
2132 					else
2133 						type = DMA_HANDLE;
2134 					break;
2135 				}
2136 				pcie_pci_check_addr(dip, derr, erpt_p, type);
2137 				ddi_fm_ereport_post(dip, buf, derr->fme_ena,
2138 				    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
2139 				    PCIEX_SEC_UE_STATUS, DATA_TYPE_UINT32,
2140 				    pcie_bdg_regs->pcie_sue_status,
2141 				    PCIEX_SRC_ID, DATA_TYPE_UINT16,
2142 				    pcie_adv_regs->pcie_adv_bdf,
2143 				    PCIEX_SRC_VALID, DATA_TYPE_BOOLEAN_VALUE,
2144 				    (pcie_adv_regs->pcie_adv_bdf != NULL) ?
2145 				    1 : NULL,
2146 #ifdef DEBUG
2147 				    PCIEX_SUE_HDR0, DATA_TYPE_UINT32,
2148 				    pcie_bdg_regs->pcie_sue_hdr0,
2149 				    PCIEX_SUE_HDR1, DATA_TYPE_UINT32,
2150 				    pcie_bdg_regs->pcie_sue_hdr[0],
2151 				    PCIEX_SUE_HDR2, DATA_TYPE_UINT32,
2152 				    pcie_bdg_regs->pcie_sue_hdr[1],
2153 				    PCIEX_SUE_HDR3, DATA_TYPE_UINT32,
2154 				    pcie_bdg_regs->pcie_sue_hdr[2],
2155 #endif
2156 				    NULL);
2157 			}
2158 			PCI_FM_SEV_INC(pcie_sue_err_tbl[i].flags);
2159 		}
2160 	}
2161 done:
2162 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
2163 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
2164 }
2165 
2166 static void
2167 pci_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p)
2168 {
2169 	int fatal = 0;
2170 	int nonfatal = 0;
2171 	int unknown = 0;
2172 	int ok = 0;
2173 	char buf[FM_MAX_CLASS];
2174 	int i;
2175 
2176 	if (derr->fme_flag == DDI_FM_ERR_UNEXPECTED) {
2177 		/*
2178 		 * Log generic PCI errors.
2179 		 */
2180 		for (i = 0; pci_err_tbl[i].err_class != NULL; i++) {
2181 			if (!(erpt_p->pe_pci_regs->pci_err_status &
2182 			    pci_err_tbl[i].reg_bit) ||
2183 			    !(erpt_p->pe_pci_regs->pci_vflags &
2184 			    PCI_ERR_STATUS_VALID))
2185 				continue;
2186 			/*
2187 			 * Generate an ereport for this error bit.
2188 			 */
2189 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
2190 			    PCI_ERROR_SUBCLASS, pci_err_tbl[i].err_class);
2191 			ddi_fm_ereport_post(dip, buf, derr->fme_ena,
2192 			    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
2193 			    PCI_CONFIG_STATUS, DATA_TYPE_UINT16,
2194 			    erpt_p->pe_pci_regs->pci_err_status,
2195 			    PCI_CONFIG_COMMAND, DATA_TYPE_UINT16,
2196 			    erpt_p->pe_pci_regs->pci_cfg_comm, NULL);
2197 
2198 			/*
2199 			 * The meaning of SERR is different for PCIEX (just
2200 			 * implies a message has been sent) so we don't want to
2201 			 * treat that one as fatal.
2202 			 */
2203 			if ((erpt_p->pe_dflags & PCIEX_DEV) &&
2204 			    pci_err_tbl[i].reg_bit == PCI_STAT_S_SYSERR) {
2205 				unknown++;
2206 			} else {
2207 				PCI_FM_SEV_INC(pci_err_tbl[i].flags);
2208 			}
2209 		}
2210 		if (erpt_p->pe_dflags & PCIEX_DEV) {
2211 			int ret = pcie_error_report(dip, derr, erpt_p);
2212 			PCI_FM_SEV_INC(ret);
2213 		} else if (erpt_p->pe_dflags & PCIX_DEV) {
2214 			if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
2215 				int ret = pcix_bdg_error_report(dip, derr,
2216 				    erpt_p, erpt_p->pe_regs);
2217 				PCI_FM_SEV_INC(ret);
2218 			} else {
2219 				int ret = pcix_error_report(dip, derr, erpt_p);
2220 				PCI_FM_SEV_INC(ret);
2221 			}
2222 		}
2223 	}
2224 
2225 	if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV)) {
2226 		int ret = pci_bdg_error_report(dip, derr, erpt_p);
2227 		PCI_FM_SEV_INC(ret);
2228 	}
2229 
2230 	if (derr->fme_flag == DDI_FM_ERR_UNEXPECTED) {
2231 		pci_fme_bus_specific_t *pci_fme_bsp;
2232 		int ret = DDI_FM_UNKNOWN;
2233 
2234 		pci_fme_bsp = (pci_fme_bus_specific_t *)derr->fme_bus_specific;
2235 		if (pci_fme_bsp->pci_bs_flags & PCI_BS_ADDR_VALID) {
2236 			ret = ndi_fmc_entry_error(dip,
2237 			    pci_fme_bsp->pci_bs_type, derr,
2238 			    (void *)&pci_fme_bsp->pci_bs_addr);
2239 			PCI_FM_SEV_INC(ret);
2240 		}
2241 		/*
2242 		 * If we didn't find the handle using an addr, try using bdf.
2243 		 * Note we don't do this where the bdf is for a
2244 		 * device behind a pciex/pci bridge as the bridge may have
2245 		 * fabricated the bdf.
2246 		 */
2247 		if (ret == DDI_FM_UNKNOWN &&
2248 		    (pci_fme_bsp->pci_bs_flags & PCI_BS_BDF_VALID) &&
2249 		    pci_fme_bsp->pci_bs_bdf == erpt_p->pe_bdf &&
2250 		    (erpt_p->pe_dflags & PCIEX_DEV) &&
2251 		    !(erpt_p->pe_dflags & PCIEX_2PCI_DEV)) {
2252 			ret = ndi_fmc_entry_error_all(dip,
2253 			    pci_fme_bsp->pci_bs_type, derr);
2254 			PCI_FM_SEV_INC(ret);
2255 		}
2256 	}
2257 
2258 	derr->fme_status = (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
2259 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
2260 }
2261 
2262 void
2263 pci_ereport_post(dev_info_t *dip, ddi_fm_error_t *derr, uint16_t *xx_status)
2264 {
2265 	struct i_ddi_fmhdl *fmhdl;
2266 	pci_erpt_t *erpt_p;
2267 	ddi_fm_error_t de;
2268 	pci_fme_bus_specific_t pci_fme_bs;
2269 
2270 	fmhdl = DEVI(dip)->devi_fmhdl;
2271 	if (!DDI_FM_EREPORT_CAP(ddi_fm_capable(dip)) &&
2272 	    !DDI_FM_ERRCB_CAP(ddi_fm_capable(dip))) {
2273 		i_ddi_drv_ereport_post(dip, DVR_EFMCAP, NULL, DDI_NOSLEEP);
2274 		return;
2275 	}
2276 
2277 	/*
2278 	 * copy in the ddi_fm_error_t structure in case it's VER0
2279 	 */
2280 	de.fme_version = derr->fme_version;
2281 	de.fme_status = derr->fme_status;
2282 	de.fme_flag = derr->fme_flag;
2283 	de.fme_ena = derr->fme_ena;
2284 	de.fme_acc_handle = derr->fme_acc_handle;
2285 	de.fme_dma_handle = derr->fme_dma_handle;
2286 	de.fme_bus_specific = derr->fme_bus_specific;
2287 	if (derr->fme_version >= DDI_FME_VER1)
2288 		de.fme_bus_type = derr->fme_bus_type;
2289 	else
2290 		de.fme_bus_type = DDI_FME_BUS_TYPE_DFLT;
2291 	if (de.fme_bus_type == DDI_FME_BUS_TYPE_DFLT) {
2292 		/*
2293 		 * if this is the first pci device we've found convert
2294 		 * fme_bus_specific to DDI_FME_BUS_TYPE_PCI
2295 		 */
2296 		bzero(&pci_fme_bs, sizeof (pci_fme_bs));
2297 		if (de.fme_bus_specific) {
2298 			/*
2299 			 * the cpu passed us an addr - this can be used to look
2300 			 * up an access handle
2301 			 */
2302 			pci_fme_bs.pci_bs_addr = (uintptr_t)de.fme_bus_specific;
2303 			pci_fme_bs.pci_bs_type = ACC_HANDLE;
2304 			pci_fme_bs.pci_bs_flags |= PCI_BS_ADDR_VALID;
2305 		}
2306 		de.fme_bus_specific = (void *)&pci_fme_bs;
2307 		de.fme_bus_type = DDI_FME_BUS_TYPE_PCI;
2308 	}
2309 
2310 	ASSERT(fmhdl);
2311 
2312 	if (de.fme_ena == NULL)
2313 		de.fme_ena = fm_ena_generate(0, FM_ENA_FMT1);
2314 
2315 	erpt_p = (pci_erpt_t *)fmhdl->fh_bus_specific;
2316 	if (erpt_p == NULL)
2317 		return;
2318 
2319 	pci_regs_gather(dip, erpt_p, de.fme_flag);
2320 	pci_error_report(dip, &de, erpt_p);
2321 	pci_regs_clear(erpt_p);
2322 
2323 	derr->fme_status = de.fme_status;
2324 	derr->fme_ena = de.fme_ena;
2325 	derr->fme_acc_handle = de.fme_acc_handle;
2326 	derr->fme_dma_handle = de.fme_dma_handle;
2327 	if (xx_status != NULL)
2328 		*xx_status = erpt_p->pe_pci_regs->pci_err_status;
2329 }
2330 
2331 /*
2332  * private version of walk_devs() that can be used during panic. No
2333  * sleeping or locking required.
2334  */
2335 static int
2336 pci_fm_walk_devs(dev_info_t *dip, int (*f)(dev_info_t *, void *), void *arg)
2337 {
2338 	while (dip) {
2339 		switch ((*f)(dip, arg)) {
2340 		case DDI_WALK_TERMINATE:
2341 			return (DDI_WALK_TERMINATE);
2342 		case DDI_WALK_CONTINUE:
2343 			if (pci_fm_walk_devs(ddi_get_child(dip), f,
2344 			    arg) == DDI_WALK_TERMINATE)
2345 				return (DDI_WALK_TERMINATE);
2346 			break;
2347 		case DDI_WALK_PRUNECHILD:
2348 			break;
2349 		}
2350 		dip = ddi_get_next_sibling(dip);
2351 	}
2352 	return (DDI_WALK_CONTINUE);
2353 }
2354 
2355 /*
2356  * need special version of ddi_fm_ereport_post() as the leaf driver may
2357  * not be hardened.
2358  */
2359 static void
2360 pci_fm_ereport_post(dev_info_t *dip, const char *error_class, uint64_t ena,
2361     uint8_t version, ...)
2362 {
2363 	char *name;
2364 	char device_path[MAXPATHLEN];
2365 	char ddi_error_class[FM_MAX_CLASS];
2366 	nvlist_t *ereport, *detector;
2367 	nv_alloc_t *nva;
2368 	errorq_elem_t *eqep;
2369 	va_list ap;
2370 
2371 	if (panicstr) {
2372 		eqep = errorq_reserve(ereport_errorq);
2373 		if (eqep == NULL)
2374 			return;
2375 		ereport = errorq_elem_nvl(ereport_errorq, eqep);
2376 		nva = errorq_elem_nva(ereport_errorq, eqep);
2377 		detector = fm_nvlist_create(nva);
2378 	} else {
2379 		ereport = fm_nvlist_create(NULL);
2380 		detector = fm_nvlist_create(NULL);
2381 	}
2382 
2383 	(void) ddi_pathname(dip, device_path);
2384 	fm_fmri_dev_set(detector, FM_DEV_SCHEME_VERSION, NULL,
2385 	    device_path, NULL);
2386 	(void) snprintf(ddi_error_class, FM_MAX_CLASS, "%s.%s",
2387 	    DDI_IO_CLASS, error_class);
2388 	fm_ereport_set(ereport, version, ddi_error_class, ena, detector, NULL);
2389 
2390 	va_start(ap, version);
2391 	name = va_arg(ap, char *);
2392 	(void) i_fm_payload_set(ereport, name, ap);
2393 	va_end(ap);
2394 
2395 	if (panicstr) {
2396 		errorq_commit(ereport_errorq, eqep, ERRORQ_SYNC);
2397 	} else {
2398 		(void) fm_ereport_post(ereport, EVCH_TRYHARD);
2399 		fm_nvlist_destroy(ereport, FM_NVA_FREE);
2400 		fm_nvlist_destroy(detector, FM_NVA_FREE);
2401 	}
2402 }
2403 
2404 static int
2405 pci_check_regs(dev_info_t *dip, void *arg)
2406 {
2407 	int reglen;
2408 	int rn;
2409 	int totreg;
2410 	pci_regspec_t *drv_regp;
2411 	pci_target_err_t *tgt_err = (pci_target_err_t *)arg;
2412 
2413 	if (tgt_err->tgt_pci_space == PCI_REG_ADDR_G(PCI_ADDR_CONFIG)) {
2414 		/*
2415 		 * for config space, we need to check if the given address
2416 		 * is a valid config space address for this device - based
2417 		 * on pci_phys_hi of the config space entry in reg property.
2418 		 */
2419 		if (ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
2420 		    "reg", (caddr_t)&drv_regp, &reglen) != DDI_SUCCESS)
2421 			return (DDI_WALK_CONTINUE);
2422 
2423 		totreg = reglen / sizeof (pci_regspec_t);
2424 		for (rn = 0; rn < totreg; rn++) {
2425 			if (tgt_err->tgt_pci_space ==
2426 			    PCI_REG_ADDR_G(drv_regp[rn].pci_phys_hi) &&
2427 			    (tgt_err->tgt_pci_addr & (PCI_REG_BUS_M |
2428 			    PCI_REG_DEV_M | PCI_REG_FUNC_M)) ==
2429 			    (drv_regp[rn].pci_phys_hi & (PCI_REG_BUS_M |
2430 			    PCI_REG_DEV_M | PCI_REG_FUNC_M))) {
2431 				tgt_err->tgt_dip = dip;
2432 				kmem_free(drv_regp, reglen);
2433 				return (DDI_WALK_TERMINATE);
2434 			}
2435 		}
2436 		kmem_free(drv_regp, reglen);
2437 	} else {
2438 		/*
2439 		 * for non config space, need to check reg to look
2440 		 * for any non-relocable mapping, otherwise check
2441 		 * assigned-addresses.
2442 		 */
2443 		if (ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
2444 		    "reg", (caddr_t)&drv_regp, &reglen) != DDI_SUCCESS)
2445 			return (DDI_WALK_CONTINUE);
2446 
2447 		totreg = reglen / sizeof (pci_regspec_t);
2448 		for (rn = 0; rn < totreg; rn++) {
2449 			if ((drv_regp[rn].pci_phys_hi & PCI_RELOCAT_B) &&
2450 			    (tgt_err->tgt_pci_space == TGT_PCI_SPACE_UNKNOWN ||
2451 			    tgt_err->tgt_pci_space ==
2452 			    PCI_REG_ADDR_G(drv_regp[rn].pci_phys_hi)) &&
2453 			    (tgt_err->tgt_pci_addr >=
2454 			    (uint64_t)drv_regp[rn].pci_phys_low +
2455 			    ((uint64_t)drv_regp[rn].pci_phys_mid << 32)) &&
2456 			    (tgt_err->tgt_pci_addr <
2457 			    (uint64_t)drv_regp[rn].pci_phys_low +
2458 			    ((uint64_t)drv_regp[rn].pci_phys_mid << 32) +
2459 			    (uint64_t)drv_regp[rn].pci_size_low +
2460 			    ((uint64_t)drv_regp[rn].pci_size_hi << 32))) {
2461 				tgt_err->tgt_dip = dip;
2462 				kmem_free(drv_regp, reglen);
2463 				return (DDI_WALK_TERMINATE);
2464 			}
2465 		}
2466 		kmem_free(drv_regp, reglen);
2467 
2468 		if (ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
2469 		    "assigned-addresses", (caddr_t)&drv_regp, &reglen) !=
2470 		    DDI_SUCCESS)
2471 			return (DDI_WALK_CONTINUE);
2472 
2473 		totreg = reglen / sizeof (pci_regspec_t);
2474 		for (rn = 0; rn < totreg; rn++) {
2475 			if ((tgt_err->tgt_pci_space == TGT_PCI_SPACE_UNKNOWN ||
2476 			    tgt_err->tgt_pci_space ==
2477 			    PCI_REG_ADDR_G(drv_regp[rn].pci_phys_hi)) &&
2478 			    (tgt_err->tgt_pci_addr >=
2479 			    (uint64_t)drv_regp[rn].pci_phys_low +
2480 			    ((uint64_t)drv_regp[rn].pci_phys_mid << 32)) &&
2481 			    (tgt_err->tgt_pci_addr <
2482 			    (uint64_t)drv_regp[rn].pci_phys_low +
2483 			    ((uint64_t)drv_regp[rn].pci_phys_mid << 32) +
2484 			    (uint64_t)drv_regp[rn].pci_size_low +
2485 			    ((uint64_t)drv_regp[rn].pci_size_hi << 32))) {
2486 				tgt_err->tgt_dip = dip;
2487 				kmem_free(drv_regp, reglen);
2488 				return (DDI_WALK_TERMINATE);
2489 			}
2490 		}
2491 		kmem_free(drv_regp, reglen);
2492 	}
2493 	return (DDI_WALK_CONTINUE);
2494 }
2495 
2496 /*
2497  * impl_fix_ranges - fixes the config space entry of the "ranges"
2498  * property on psycho+ platforms.  (if changing this function please make sure
2499  * to change the pci_fix_ranges function in pcipsy.c)
2500  */
2501 /*ARGSUSED*/
2502 static void
2503 pci_fix_ranges(dev_info_t *dip, pci_ranges_t *pci_ranges, int nrange)
2504 {
2505 #if defined(__sparc)
2506 	char *name = ddi_binding_name(dip);
2507 
2508 	if ((strcmp(name, "pci108e,8000") == 0) ||
2509 	    (strcmp(name, "pci108e,a000") == 0) ||
2510 	    (strcmp(name, "pci108e,a001") == 0)) {
2511 		int i;
2512 		for (i = 0; i < nrange; i++, pci_ranges++)
2513 			if ((pci_ranges->child_high & PCI_REG_ADDR_M) ==
2514 			    PCI_ADDR_CONFIG)
2515 				pci_ranges->parent_low |=
2516 				    pci_ranges->child_high;
2517 	}
2518 #endif
2519 }
2520 
2521 static int
2522 pci_check_ranges(dev_info_t *dip, void *arg)
2523 {
2524 	uint64_t range_parent_begin;
2525 	uint64_t range_parent_size;
2526 	uint64_t range_parent_end;
2527 	uint32_t space_type;
2528 	uint32_t bus_num;
2529 	uint32_t range_offset;
2530 	pci_ranges_t *pci_ranges, *rangep;
2531 	pci_bus_range_t *pci_bus_rangep;
2532 	int pci_ranges_length;
2533 	int nrange;
2534 	pci_target_err_t *tgt_err = (pci_target_err_t *)arg;
2535 	int i, size;
2536 	if (strcmp(ddi_node_name(dip), "pci") != 0 &&
2537 	    strcmp(ddi_node_name(dip), "pciex") != 0)
2538 		return (DDI_WALK_CONTINUE);
2539 
2540 	/*
2541 	 * Get the ranges property. Note we only look at the top level pci
2542 	 * node (hostbridge) which has a ranges property of type pci_ranges_t
2543 	 * not at pci-pci bridges.
2544 	 */
2545 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "ranges",
2546 	    (caddr_t)&pci_ranges, &pci_ranges_length) != DDI_SUCCESS) {
2547 		/*
2548 		 * no ranges property - no translation needed
2549 		 */
2550 		tgt_err->tgt_pci_addr = tgt_err->tgt_err_addr;
2551 		tgt_err->tgt_pci_space = TGT_PCI_SPACE_UNKNOWN;
2552 		if (panicstr)
2553 			(void) pci_fm_walk_devs(ddi_get_child(dip),
2554 			    pci_check_regs, (void *)tgt_err);
2555 		else {
2556 			int circ = 0;
2557 			ndi_devi_enter(dip, &circ);
2558 			ddi_walk_devs(ddi_get_child(dip), pci_check_regs,
2559 			    (void *)tgt_err);
2560 			ndi_devi_exit(dip, circ);
2561 		}
2562 		if (tgt_err->tgt_dip != NULL)
2563 			return (DDI_WALK_TERMINATE);
2564 		return (DDI_WALK_PRUNECHILD);
2565 	}
2566 	nrange = pci_ranges_length / sizeof (pci_ranges_t);
2567 	rangep = pci_ranges;
2568 
2569 	/* Need to fix the pci ranges property for psycho based systems */
2570 	pci_fix_ranges(dip, pci_ranges, nrange);
2571 
2572 	for (i = 0; i < nrange; i++, rangep++) {
2573 		range_parent_begin = ((uint64_t)rangep->parent_high << 32) +
2574 		    rangep->parent_low;
2575 		range_parent_size = ((uint64_t)rangep->size_high << 32) +
2576 		    rangep->size_low;
2577 		range_parent_end = range_parent_begin + range_parent_size - 1;
2578 
2579 		if ((tgt_err->tgt_err_addr < range_parent_begin) ||
2580 		    (tgt_err->tgt_err_addr > range_parent_end)) {
2581 			/* Not in range */
2582 			continue;
2583 		}
2584 		space_type = PCI_REG_ADDR_G(rangep->child_high);
2585 		if (space_type == PCI_REG_ADDR_G(PCI_ADDR_CONFIG)) {
2586 			/* Config space address - check bus range */
2587 			range_offset = tgt_err->tgt_err_addr -
2588 			    range_parent_begin;
2589 			bus_num = PCI_REG_BUS_G(range_offset);
2590 			if (ddi_getlongprop(DDI_DEV_T_ANY, dip,
2591 			    DDI_PROP_DONTPASS, "bus-range",
2592 			    (caddr_t)&pci_bus_rangep, &size) != DDI_SUCCESS) {
2593 				continue;
2594 			}
2595 			if ((bus_num < pci_bus_rangep->lo) ||
2596 			    (bus_num > pci_bus_rangep->hi)) {
2597 				/*
2598 				 * Bus number not appropriate for this
2599 				 * pci nexus.
2600 				 */
2601 				kmem_free(pci_bus_rangep, size);
2602 				continue;
2603 			}
2604 			kmem_free(pci_bus_rangep, size);
2605 		}
2606 
2607 		/* We have a match if we get here - compute pci address */
2608 		tgt_err->tgt_pci_addr = tgt_err->tgt_err_addr -
2609 		    range_parent_begin;
2610 		tgt_err->tgt_pci_addr += (((uint64_t)rangep->child_mid << 32) +
2611 		    rangep->child_low);
2612 		tgt_err->tgt_pci_space = space_type;
2613 		if (panicstr)
2614 			(void) pci_fm_walk_devs(ddi_get_child(dip),
2615 			    pci_check_regs, (void *)tgt_err);
2616 		else {
2617 			int circ = 0;
2618 			ndi_devi_enter(dip, &circ);
2619 			ddi_walk_devs(ddi_get_child(dip), pci_check_regs,
2620 			    (void *)tgt_err);
2621 			ndi_devi_exit(dip, circ);
2622 		}
2623 		if (tgt_err->tgt_dip != NULL) {
2624 			kmem_free(pci_ranges, pci_ranges_length);
2625 			return (DDI_WALK_TERMINATE);
2626 		}
2627 	}
2628 	kmem_free(pci_ranges, pci_ranges_length);
2629 	return (DDI_WALK_PRUNECHILD);
2630 }
2631 
2632 /*
2633  * Function used to drain pci_target_queue, either during panic or after softint
2634  * is generated, to generate target device ereports based on captured physical
2635  * addresses
2636  */
2637 /*ARGSUSED*/
2638 static void
2639 pci_target_drain(void *private_p, pci_target_err_t *tgt_err)
2640 {
2641 	char buf[FM_MAX_CLASS];
2642 
2643 	/*
2644 	 * The following assumes that all pci_pci bridge devices
2645 	 * are configured as transparant. Find the top-level pci
2646 	 * nexus which has tgt_err_addr in one of its ranges, converting this
2647 	 * to a pci address in the process. Then starting at this node do
2648 	 * another tree walk to find a device with the pci address we've
2649 	 * found within range of one of it's assigned-addresses properties.
2650 	 */
2651 	tgt_err->tgt_dip = NULL;
2652 	if (panicstr)
2653 		(void) pci_fm_walk_devs(ddi_root_node(), pci_check_ranges,
2654 		    (void *)tgt_err);
2655 	else
2656 		ddi_walk_devs(ddi_root_node(), pci_check_ranges,
2657 		    (void *)tgt_err);
2658 	if (tgt_err->tgt_dip == NULL)
2659 		return;
2660 
2661 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", tgt_err->tgt_bridge_type,
2662 	    tgt_err->tgt_err_class);
2663 	pci_fm_ereport_post(tgt_err->tgt_dip, buf, tgt_err->tgt_err_ena, 0,
2664 	    PCI_PA, DATA_TYPE_UINT64, tgt_err->tgt_err_addr, NULL);
2665 }
2666 
2667 void
2668 pci_target_enqueue(uint64_t ena, char *class, char *bridge_type, uint64_t addr)
2669 {
2670 	pci_target_err_t tgt_err;
2671 
2672 	tgt_err.tgt_err_ena = ena;
2673 	tgt_err.tgt_err_class = class;
2674 	tgt_err.tgt_bridge_type = bridge_type;
2675 	tgt_err.tgt_err_addr = addr;
2676 	errorq_dispatch(pci_target_queue, (void *)&tgt_err,
2677 	    sizeof (pci_target_err_t), ERRORQ_ASYNC);
2678 }
2679 
2680 void
2681 pci_targetq_init(void)
2682 {
2683 	/*
2684 	 * PCI target errorq, to schedule async handling of generation of
2685 	 * target device ereports based on captured physical address.
2686 	 * The errorq is created here but destroyed when _fini is called
2687 	 * for the pci module.
2688 	 */
2689 	if (pci_target_queue == NULL) {
2690 		pci_target_queue = errorq_create("pci_target_queue",
2691 		    (errorq_func_t)pci_target_drain, (void *)NULL,
2692 		    TARGET_MAX_ERRS, sizeof (pci_target_err_t), FM_ERR_PIL,
2693 		    ERRORQ_VITAL);
2694 		if (pci_target_queue == NULL)
2695 			panic("failed to create required system error queue");
2696 	}
2697 }
2698