xref: /titanic_50/usr/src/uts/common/os/pcifm.c (revision 0e42dee69ed771bf604dd1789fca9d77b5bbe302)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/sunndi.h>
31 #include <sys/sysmacros.h>
32 #include <sys/ddifm_impl.h>
33 #include <sys/fm/util.h>
34 #include <sys/fm/protocol.h>
35 #include <sys/fm/io/pci.h>
36 #include <sys/fm/io/ddi.h>
37 #include <sys/pci.h>
38 #include <sys/pcie.h>
39 #include <sys/pci_impl.h>
40 #include <sys/epm.h>
41 #include <sys/pcifm.h>
42 
43 #define	PCIX_ECC_VER_CHECK(x)	(((x) == PCI_PCIX_VER_1) ||\
44 				((x) == PCI_PCIX_VER_2))
45 
46 /*
47  * Expected PCI Express error mask values
48  */
49 uint32_t pcie_expected_ce_mask = PCIE_AER_CE_AD_NFE;
50 uint32_t pcie_expected_ue_mask = 0x0;
51 uint32_t pcie_expected_sue_mask = 0x0;
52 
53 errorq_t *pci_target_queue = NULL;
54 
55 pci_fm_err_t pci_err_tbl[] = {
56 	PCI_DET_PERR,	PCI_STAT_PERROR,	NULL,		DDI_FM_UNKNOWN,
57 	PCI_MDPE,	PCI_STAT_S_PERROR,	PCI_TARG_MDPE,	DDI_FM_UNKNOWN,
58 	PCI_SIG_SERR,	PCI_STAT_S_SYSERR,	NULL,		DDI_FM_FATAL,
59 	PCI_MA,		PCI_STAT_R_MAST_AB,	PCI_TARG_MA,	DDI_FM_UNKNOWN,
60 	PCI_REC_TA,	PCI_STAT_R_TARG_AB,	PCI_TARG_REC_TA, DDI_FM_UNKNOWN,
61 	PCI_SIG_TA,	PCI_STAT_S_TARG_AB,	NULL,		DDI_FM_UNKNOWN,
62 	NULL, NULL, NULL, NULL,
63 };
64 
65 pci_fm_err_t pci_bdg_err_tbl[] = {
66 	PCI_DET_PERR,	PCI_STAT_PERROR,	NULL,		DDI_FM_UNKNOWN,
67 	PCI_MDPE,	PCI_STAT_S_PERROR,	PCI_TARG_MDPE,	DDI_FM_UNKNOWN,
68 	PCI_REC_SERR,	PCI_STAT_S_SYSERR,	NULL,		DDI_FM_UNKNOWN,
69 	PCI_MA,		PCI_STAT_R_MAST_AB,	PCI_TARG_MA,	DDI_FM_UNKNOWN,
70 	PCI_REC_TA,	PCI_STAT_R_TARG_AB,	PCI_TARG_REC_TA, DDI_FM_UNKNOWN,
71 	PCI_SIG_TA,	PCI_STAT_S_TARG_AB,	NULL,		DDI_FM_UNKNOWN,
72 	NULL, NULL, NULL, NULL,
73 };
74 
75 static pci_fm_err_t pciex_ce_err_tbl[] = {
76 	PCIEX_RE,	PCIE_AER_CE_RECEIVER_ERR,	NULL,	DDI_FM_NONFATAL,
77 	PCIEX_RNR,	PCIE_AER_CE_REPLAY_ROLLOVER,	NULL,	DDI_FM_NONFATAL,
78 	PCIEX_RTO,	PCIE_AER_CE_REPLAY_TO,		NULL,	DDI_FM_NONFATAL,
79 	PCIEX_BDP,	PCIE_AER_CE_BAD_DLLP,		NULL,	DDI_FM_NONFATAL,
80 	PCIEX_BTP,	PCIE_AER_CE_BAD_TLP,		NULL,	DDI_FM_NONFATAL,
81 	PCIEX_ANFE,	PCIE_AER_CE_AD_NFE,		NULL,	DDI_FM_NONFATAL,
82 	NULL, NULL, NULL, NULL,
83 };
84 
85 static pci_fm_err_t pciex_ue_err_tbl[] = {
86 	PCIEX_TE,	PCIE_AER_UCE_TRAINING,		NULL,	DDI_FM_FATAL,
87 	PCIEX_DLP,	PCIE_AER_UCE_DLP,		NULL,	DDI_FM_FATAL,
88 	PCIEX_SD,	PCIE_AER_UCE_SD,		NULL,   DDI_FM_FATAL,
89 	PCIEX_ROF,	PCIE_AER_UCE_RO,		NULL,	DDI_FM_FATAL,
90 	PCIEX_FCP,	PCIE_AER_UCE_FCP,		NULL,	DDI_FM_FATAL,
91 	PCIEX_MFP,	PCIE_AER_UCE_MTLP,		NULL,	DDI_FM_FATAL,
92 	PCIEX_CTO,	PCIE_AER_UCE_TO,		NULL,	DDI_FM_NONFATAL,
93 	PCIEX_UC,	PCIE_AER_UCE_UC,		NULL,	DDI_FM_NONFATAL,
94 	PCIEX_ECRC,	PCIE_AER_UCE_ECRC,		NULL,	DDI_FM_UNKNOWN,
95 	PCIEX_CA,	PCIE_AER_UCE_CA,		NULL,	DDI_FM_UNKNOWN,
96 	PCIEX_UR,	PCIE_AER_UCE_UR,		NULL,	DDI_FM_NONFATAL,
97 	PCIEX_POIS,	PCIE_AER_UCE_PTLP,		NULL,	DDI_FM_UNKNOWN,
98 	NULL, NULL, NULL, NULL,
99 };
100 
101 static pci_fm_err_t pcie_sue_err_tbl[] = {
102 	PCIEX_S_TA_SC,	PCIE_AER_SUCE_TA_ON_SC,		NULL,	DDI_FM_UNKNOWN,
103 	PCIEX_S_MA_SC,	PCIE_AER_SUCE_MA_ON_SC,		NULL,	DDI_FM_UNKNOWN,
104 	PCIEX_S_RTA,	PCIE_AER_SUCE_RCVD_TA,		NULL,	DDI_FM_UNKNOWN,
105 	PCIEX_S_RMA,	PCIE_AER_SUCE_RCVD_MA,		NULL,	DDI_FM_UNKNOWN,
106 	PCIEX_S_USC,	PCIE_AER_SUCE_USC_ERR,		NULL,	DDI_FM_UNKNOWN,
107 	PCIEX_S_USCMD,	PCIE_AER_SUCE_USC_MSG_DATA_ERR,	NULL,	DDI_FM_FATAL,
108 	PCIEX_S_UDE,	PCIE_AER_SUCE_UC_DATA_ERR,	NULL,	DDI_FM_UNKNOWN,
109 	PCIEX_S_UAT,	PCIE_AER_SUCE_UC_ATTR_ERR,	NULL,	DDI_FM_FATAL,
110 	PCIEX_S_UADR,	PCIE_AER_SUCE_UC_ADDR_ERR,	NULL,	DDI_FM_FATAL,
111 	PCIEX_S_TEX,	PCIE_AER_SUCE_TIMER_EXPIRED,	NULL,	DDI_FM_FATAL,
112 	PCIEX_S_PERR,	PCIE_AER_SUCE_PERR_ASSERT,	NULL,	DDI_FM_UNKNOWN,
113 	PCIEX_S_SERR,	PCIE_AER_SUCE_SERR_ASSERT,	NULL,	DDI_FM_FATAL,
114 	PCIEX_INTERR,	PCIE_AER_SUCE_INTERNAL_ERR,	NULL,	DDI_FM_FATAL,
115 	NULL, NULL, NULL, NULL,
116 };
117 
118 static pci_fm_err_t pcix_err_tbl[] = {
119 	PCIX_SPL_DIS,		PCI_PCIX_SPL_DSCD,	NULL,	DDI_FM_UNKNOWN,
120 	PCIX_UNEX_SPL,		PCI_PCIX_UNEX_SPL,	NULL,	DDI_FM_UNKNOWN,
121 	PCIX_RX_SPL_MSG,	PCI_PCIX_RX_SPL_MSG,	NULL,   DDI_FM_UNKNOWN,
122 	NULL, NULL, NULL, NULL,
123 };
124 
125 static pci_fm_err_t pcix_sec_err_tbl[] = {
126 	PCIX_SPL_DIS,		PCI_PCIX_BSS_SPL_DSCD,	NULL,	DDI_FM_UNKNOWN,
127 	PCIX_UNEX_SPL,		PCI_PCIX_BSS_UNEX_SPL,	NULL,	DDI_FM_UNKNOWN,
128 	PCIX_BSS_SPL_OR,	PCI_PCIX_BSS_SPL_OR,	NULL,	DDI_FM_NONFATAL,
129 	PCIX_BSS_SPL_DLY,	PCI_PCIX_BSS_SPL_DLY,	NULL,	DDI_FM_NONFATAL,
130 	NULL, NULL, NULL, NULL,
131 };
132 
133 static pci_fm_err_t pciex_nadv_err_tbl[] = {
134 	PCIEX_UR,	PCIE_DEVSTS_UR_DETECTED,	NULL,	DDI_FM_UNKNOWN,
135 	PCIEX_FAT,	PCIE_DEVSTS_FE_DETECTED,	NULL,	DDI_FM_FATAL,
136 	PCIEX_NONFAT,	PCIE_DEVSTS_NFE_DETECTED,	NULL,	DDI_FM_UNKNOWN,
137 	PCIEX_CORR,	PCIE_DEVSTS_CE_DETECTED,	NULL,	DDI_FM_NONFATAL,
138 	NULL, NULL, NULL, NULL,
139 };
140 
141 static int
142 pci_config_check(ddi_acc_handle_t handle)
143 {
144 	ddi_acc_hdl_t *hp = impl_acc_hdl_get(handle);
145 	ddi_fm_error_t de;
146 
147 	if (!(DDI_FM_ACC_ERR_CAP(ddi_fm_capable(hp->ah_dip))))
148 		return (DDI_FM_OK);
149 
150 	de.fme_version = DDI_FME_VERSION;
151 
152 	ddi_fm_acc_err_get(handle, &de, de.fme_version);
153 	if (de.fme_status != DDI_FM_OK) {
154 		char buf[FM_MAX_CLASS];
155 
156 		(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", PCI_ERROR_SUBCLASS,
157 		    PCI_NR);
158 		ddi_fm_ereport_post(hp->ah_dip, buf, de.fme_ena, DDI_NOSLEEP,
159 		    FM_VERSION, DATA_TYPE_UINT8, 0, NULL);
160 		ddi_fm_acc_err_clear(handle, de.fme_version);
161 	}
162 	return (de.fme_status);
163 }
164 
165 static void
166 pcix_ecc_regs_gather(pci_erpt_t *erpt_p, pcix_ecc_regs_t *pcix_ecc_regs,
167     uint8_t pcix_cap_ptr)
168 {
169 	int bdg = erpt_p->pe_dflags & PCI_BRIDGE_DEV;
170 
171 	pcix_ecc_regs->pcix_ecc_ctlstat = pci_config_get32(erpt_p->pe_hdl,
172 	    (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_STATUS :
173 	    PCI_PCIX_ECC_STATUS)));
174 	if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
175 		pcix_ecc_regs->pcix_ecc_vflags |= PCIX_ERR_ECC_STS_VALID;
176 	else
177 		return;
178 	pcix_ecc_regs->pcix_ecc_fstaddr = pci_config_get32(erpt_p->pe_hdl,
179 	    (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_FST_AD :
180 	    PCI_PCIX_ECC_FST_AD)));
181 	pcix_ecc_regs->pcix_ecc_secaddr = pci_config_get32(erpt_p->pe_hdl,
182 	    (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_SEC_AD :
183 	    PCI_PCIX_ECC_SEC_AD)));
184 	pcix_ecc_regs->pcix_ecc_attr = pci_config_get32((
185 	    ddi_acc_handle_t)erpt_p->pe_hdl,
186 	    (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_ATTR : PCI_PCIX_ECC_ATTR)));
187 }
188 
189 static void
190 pcix_regs_gather(pci_erpt_t *erpt_p, void *pe_regs)
191 {
192 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
193 		pcix_bdg_error_regs_t *pcix_bdg_regs =
194 		    (pcix_bdg_error_regs_t *)pe_regs;
195 		uint8_t pcix_bdg_cap_ptr;
196 		int i;
197 
198 		pcix_bdg_cap_ptr = pcix_bdg_regs->pcix_bdg_cap_ptr;
199 		pcix_bdg_regs->pcix_bdg_sec_stat = pci_config_get16(
200 		    erpt_p->pe_hdl, (pcix_bdg_cap_ptr + PCI_PCIX_SEC_STATUS));
201 		if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
202 			pcix_bdg_regs->pcix_bdg_vflags |=
203 			    PCIX_BDG_SEC_STATUS_VALID;
204 		else
205 			return;
206 		pcix_bdg_regs->pcix_bdg_stat = pci_config_get32(erpt_p->pe_hdl,
207 		    (pcix_bdg_cap_ptr + PCI_PCIX_BDG_STATUS));
208 		if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
209 			pcix_bdg_regs->pcix_bdg_vflags |= PCIX_BDG_STATUS_VALID;
210 		else
211 			return;
212 		if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) {
213 			pcix_ecc_regs_t *pcix_bdg_ecc_regs;
214 			/*
215 			 * PCI Express to PCI-X bridges only implement the
216 			 * secondary side of the PCI-X ECC registers, bit one is
217 			 * read-only so we make sure we do not write to it.
218 			 */
219 			if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) {
220 				pcix_bdg_ecc_regs =
221 				    pcix_bdg_regs->pcix_bdg_ecc_regs[1];
222 				pcix_ecc_regs_gather(erpt_p, pcix_bdg_ecc_regs,
223 				    pcix_bdg_cap_ptr);
224 			} else {
225 				for (i = 0; i < 2; i++) {
226 					pcix_bdg_ecc_regs =
227 					    pcix_bdg_regs->pcix_bdg_ecc_regs[i];
228 					pci_config_put32(erpt_p->pe_hdl,
229 					    (pcix_bdg_cap_ptr +
230 					    PCI_PCIX_BDG_ECC_STATUS), i);
231 					pcix_ecc_regs_gather(erpt_p,
232 					    pcix_bdg_ecc_regs,
233 					    pcix_bdg_cap_ptr);
234 				}
235 			}
236 		}
237 	} else {
238 		pcix_error_regs_t *pcix_regs = (pcix_error_regs_t *)pe_regs;
239 		uint8_t pcix_cap_ptr;
240 
241 		pcix_cap_ptr = pcix_regs->pcix_cap_ptr;
242 
243 		pcix_regs->pcix_command = pci_config_get16(erpt_p->pe_hdl,
244 		    (pcix_cap_ptr + PCI_PCIX_COMMAND));
245 		pcix_regs->pcix_status = pci_config_get32(erpt_p->pe_hdl,
246 		    (pcix_cap_ptr + PCI_PCIX_STATUS));
247 		if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
248 			pcix_regs->pcix_vflags |= PCIX_ERR_STATUS_VALID;
249 		else
250 			return;
251 		if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) {
252 			pcix_ecc_regs_t *pcix_ecc_regs =
253 			    pcix_regs->pcix_ecc_regs;
254 
255 			pcix_ecc_regs_gather(erpt_p, pcix_ecc_regs,
256 			    pcix_cap_ptr);
257 		}
258 	}
259 }
260 
261 static void
262 pcie_regs_gather(pci_erpt_t *erpt_p)
263 {
264 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
265 	uint8_t pcie_cap_ptr;
266 	pcie_adv_error_regs_t *pcie_adv_regs;
267 	uint16_t pcie_ecap_ptr;
268 
269 	pcie_cap_ptr = pcie_regs->pcie_cap_ptr;
270 
271 	pcie_regs->pcie_err_status = pci_config_get16(erpt_p->pe_hdl,
272 	    pcie_cap_ptr + PCIE_DEVSTS);
273 	if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
274 		pcie_regs->pcie_vflags |= PCIE_ERR_STATUS_VALID;
275 	else
276 		return;
277 
278 	pcie_regs->pcie_err_ctl = pci_config_get16(erpt_p->pe_hdl,
279 	    (pcie_cap_ptr + PCIE_DEVCTL));
280 
281 	if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV) && (erpt_p->pe_dflags &
282 	    PCIX_DEV))
283 		pcix_regs_gather(erpt_p, pcie_regs->pcix_bdg_regs);
284 
285 	if (erpt_p->pe_dflags & PCIEX_RC_DEV) {
286 		pcie_rc_error_regs_t *pcie_rc_regs = pcie_regs->pcie_rc_regs;
287 
288 		pcie_rc_regs->pcie_rc_status = pci_config_get32(erpt_p->pe_hdl,
289 		    (pcie_cap_ptr + PCIE_ROOTSTS));
290 		pcie_rc_regs->pcie_rc_ctl = pci_config_get16(erpt_p->pe_hdl,
291 		    (pcie_cap_ptr + PCIE_ROOTCTL));
292 	}
293 
294 	if (!(erpt_p->pe_dflags & PCIEX_ADV_DEV))
295 		return;
296 
297 	pcie_adv_regs = pcie_regs->pcie_adv_regs;
298 
299 	pcie_ecap_ptr = pcie_adv_regs->pcie_adv_cap_ptr;
300 
301 	pcie_adv_regs->pcie_ue_status = pci_config_get32(erpt_p->pe_hdl,
302 	    pcie_ecap_ptr + PCIE_AER_UCE_STS);
303 	if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
304 		pcie_adv_regs->pcie_adv_vflags |= PCIE_UE_STATUS_VALID;
305 
306 	pcie_adv_regs->pcie_ue_mask = pci_config_get32(erpt_p->pe_hdl,
307 	    pcie_ecap_ptr + PCIE_AER_UCE_MASK);
308 	pcie_adv_regs->pcie_ue_sev = pci_config_get32(erpt_p->pe_hdl,
309 	    pcie_ecap_ptr + PCIE_AER_UCE_SERV);
310 	pcie_adv_regs->pcie_adv_ctl = pci_config_get32(erpt_p->pe_hdl,
311 	    pcie_ecap_ptr + PCIE_AER_CTL);
312 	pcie_adv_regs->pcie_ue_hdr0 = pci_config_get32(erpt_p->pe_hdl,
313 	    pcie_ecap_ptr + PCIE_AER_HDR_LOG);
314 	if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK) {
315 		int i;
316 		pcie_adv_regs->pcie_adv_vflags |= PCIE_UE_HDR_VALID;
317 
318 		for (i = 0; i < 3; i++) {
319 			pcie_adv_regs->pcie_ue_hdr[i] = pci_config_get32(
320 			    erpt_p->pe_hdl, pcie_ecap_ptr + PCIE_AER_HDR_LOG +
321 			    (4 * (i + 1)));
322 		}
323 	}
324 
325 	pcie_adv_regs->pcie_ce_status = pci_config_get32(erpt_p->pe_hdl,
326 	    pcie_ecap_ptr + PCIE_AER_CE_STS);
327 	if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
328 		pcie_adv_regs->pcie_adv_vflags |= PCIE_CE_STATUS_VALID;
329 
330 	pcie_adv_regs->pcie_ce_mask = pci_config_get32(erpt_p->pe_hdl,
331 	    pcie_ecap_ptr + PCIE_AER_CE_MASK);
332 
333 	/*
334 	 * If pci express to pci bridge then grab the bridge
335 	 * error registers.
336 	 */
337 	if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) {
338 		pcie_adv_bdg_error_regs_t *pcie_bdg_regs =
339 		    pcie_adv_regs->pcie_adv_bdg_regs;
340 
341 		pcie_bdg_regs->pcie_sue_status =
342 		    pci_config_get32(erpt_p->pe_hdl,
343 		    pcie_ecap_ptr + PCIE_AER_SUCE_STS);
344 		if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
345 			pcie_adv_regs->pcie_adv_vflags |= PCIE_SUE_STATUS_VALID;
346 		pcie_bdg_regs->pcie_sue_hdr0 = pci_config_get32(erpt_p->pe_hdl,
347 		    (pcie_ecap_ptr + PCIE_AER_SHDR_LOG));
348 
349 		if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK) {
350 			int i;
351 
352 			pcie_adv_regs->pcie_adv_vflags |= PCIE_SUE_HDR_VALID;
353 
354 			for (i = 0; i < 3; i++) {
355 				pcie_bdg_regs->pcie_sue_hdr[i] =
356 				    pci_config_get32(erpt_p->pe_hdl,
357 					pcie_ecap_ptr + PCIE_AER_SHDR_LOG +
358 					(4 * (i + 1)));
359 			}
360 		}
361 	}
362 	/*
363 	 * If PCI Express root complex then grab the root complex
364 	 * error registers.
365 	 */
366 	if (erpt_p->pe_dflags & PCIEX_RC_DEV) {
367 		pcie_adv_rc_error_regs_t *pcie_rc_regs =
368 		    pcie_adv_regs->pcie_adv_rc_regs;
369 
370 		pcie_rc_regs->pcie_rc_err_cmd = pci_config_get32(erpt_p->pe_hdl,
371 		    (pcie_ecap_ptr + PCIE_AER_RE_CMD));
372 		pcie_rc_regs->pcie_rc_err_status =
373 		    pci_config_get32(erpt_p->pe_hdl,
374 			(pcie_ecap_ptr + PCIE_AER_RE_STS));
375 		if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
376 			pcie_adv_regs->pcie_adv_vflags |=
377 			    PCIE_RC_ERR_STATUS_VALID;
378 		pcie_rc_regs->pcie_rc_ce_src_id =
379 		    pci_config_get16(erpt_p->pe_hdl,
380 			(pcie_ecap_ptr + PCIE_AER_CE_SRC_ID));
381 		pcie_rc_regs->pcie_rc_ue_src_id =
382 		    pci_config_get16(erpt_p->pe_hdl,
383 			(pcie_ecap_ptr + PCIE_AER_ERR_SRC_ID));
384 		if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
385 			pcie_adv_regs->pcie_adv_vflags |= PCIE_SRC_ID_VALID;
386 	}
387 }
388 
389 /*ARGSUSED*/
390 static void
391 pci_regs_gather(dev_info_t *dip, pci_erpt_t *erpt_p)
392 {
393 	pci_error_regs_t *pci_regs = erpt_p->pe_pci_regs;
394 
395 	/*
396 	 * Start by reading all the error registers that are available for
397 	 * pci and pci express and for leaf devices and bridges/switches
398 	 */
399 	pci_regs->pci_err_status = pci_config_get16(erpt_p->pe_hdl,
400 	    PCI_CONF_STAT);
401 	if (pci_config_check(erpt_p->pe_hdl) != DDI_FM_OK)
402 		return;
403 	pci_regs->pci_vflags |= PCI_ERR_STATUS_VALID;
404 	pci_regs->pci_cfg_comm = pci_config_get16(erpt_p->pe_hdl,
405 	    PCI_CONF_COMM);
406 	if (pci_config_check(erpt_p->pe_hdl) != DDI_FM_OK)
407 		return;
408 
409 	/*
410 	 * If pci-pci bridge grab PCI bridge specific error registers.
411 	 */
412 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
413 		pci_regs->pci_bdg_regs->pci_bdg_sec_stat =
414 		    pci_config_get16(erpt_p->pe_hdl, PCI_BCNF_SEC_STATUS);
415 		if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
416 			pci_regs->pci_bdg_regs->pci_bdg_vflags |=
417 			    PCI_BDG_SEC_STAT_VALID;
418 		pci_regs->pci_bdg_regs->pci_bdg_ctrl =
419 		    pci_config_get16(erpt_p->pe_hdl, PCI_BCNF_BCNTRL);
420 		if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK)
421 			pci_regs->pci_bdg_regs->pci_bdg_vflags |=
422 			    PCI_BDG_CTRL_VALID;
423 	}
424 
425 	/*
426 	 * If pci express device grab pci express error registers and
427 	 * check for advanced error reporting features and grab them if
428 	 * available.
429 	 */
430 	if (erpt_p->pe_dflags & PCIEX_DEV)
431 		pcie_regs_gather(erpt_p);
432 	else if (erpt_p->pe_dflags & PCIX_DEV)
433 		pcix_regs_gather(erpt_p, erpt_p->pe_regs);
434 
435 }
436 
437 static void
438 pcix_regs_clear(pci_erpt_t *erpt_p, void *pe_regs)
439 {
440 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
441 		pcix_bdg_error_regs_t *pcix_bdg_regs =
442 		    (pcix_bdg_error_regs_t *)pe_regs;
443 		uint8_t pcix_bdg_cap_ptr;
444 		int i;
445 
446 		pcix_bdg_cap_ptr = pcix_bdg_regs->pcix_bdg_cap_ptr;
447 
448 		if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_SEC_STATUS_VALID)
449 			pci_config_put16(erpt_p->pe_hdl,
450 			    (pcix_bdg_cap_ptr + PCI_PCIX_SEC_STATUS),
451 			    pcix_bdg_regs->pcix_bdg_sec_stat);
452 
453 		if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_STATUS_VALID)
454 			pci_config_put32(erpt_p->pe_hdl,
455 			    (pcix_bdg_cap_ptr + PCI_PCIX_BDG_STATUS),
456 			    pcix_bdg_regs->pcix_bdg_stat);
457 
458 		pcix_bdg_regs->pcix_bdg_vflags = 0x0;
459 
460 		if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) {
461 			pcix_ecc_regs_t *pcix_bdg_ecc_regs;
462 			/*
463 			 * PCI Express to PCI-X bridges only implement the
464 			 * secondary side of the PCI-X ECC registers, bit one is
465 			 * read-only so we make sure we do not write to it.
466 			 */
467 			if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) {
468 				pcix_bdg_ecc_regs =
469 				    pcix_bdg_regs->pcix_bdg_ecc_regs[1];
470 
471 				if (pcix_bdg_ecc_regs->pcix_ecc_vflags &
472 				    PCIX_ERR_ECC_STS_VALID) {
473 
474 					pci_config_put32(erpt_p->pe_hdl,
475 					    (pcix_bdg_cap_ptr +
476 					    PCI_PCIX_BDG_ECC_STATUS),
477 					    pcix_bdg_ecc_regs->
478 					    pcix_ecc_ctlstat);
479 				}
480 				pcix_bdg_ecc_regs->pcix_ecc_vflags = 0x0;
481 			} else {
482 				for (i = 0; i < 2; i++) {
483 					pcix_bdg_ecc_regs =
484 					    pcix_bdg_regs->pcix_bdg_ecc_regs[i];
485 
486 
487 					if (pcix_bdg_ecc_regs->pcix_ecc_vflags &
488 					    PCIX_ERR_ECC_STS_VALID) {
489 						pci_config_put32(erpt_p->pe_hdl,
490 						    (pcix_bdg_cap_ptr +
491 						    PCI_PCIX_BDG_ECC_STATUS),
492 						    i);
493 
494 						pci_config_put32(erpt_p->pe_hdl,
495 						    (pcix_bdg_cap_ptr +
496 						    PCI_PCIX_BDG_ECC_STATUS),
497 						    pcix_bdg_ecc_regs->
498 						    pcix_ecc_ctlstat);
499 					}
500 					pcix_bdg_ecc_regs->pcix_ecc_vflags =
501 					    0x0;
502 				}
503 			}
504 		}
505 	} else {
506 		pcix_error_regs_t *pcix_regs = (pcix_error_regs_t *)pe_regs;
507 		uint8_t pcix_cap_ptr;
508 
509 		pcix_cap_ptr = pcix_regs->pcix_cap_ptr;
510 
511 		if (pcix_regs->pcix_vflags & PCIX_ERR_STATUS_VALID)
512 			pci_config_put32(erpt_p->pe_hdl,
513 			    (pcix_cap_ptr + PCI_PCIX_STATUS),
514 			    pcix_regs->pcix_status);
515 
516 		pcix_regs->pcix_vflags = 0x0;
517 
518 		if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) {
519 			pcix_ecc_regs_t *pcix_ecc_regs =
520 			    pcix_regs->pcix_ecc_regs;
521 
522 			if (pcix_ecc_regs->pcix_ecc_vflags &
523 			    PCIX_ERR_ECC_STS_VALID)
524 				pci_config_put32(erpt_p->pe_hdl,
525 				    (pcix_cap_ptr + PCI_PCIX_ECC_STATUS),
526 				    pcix_ecc_regs->pcix_ecc_ctlstat);
527 
528 			pcix_ecc_regs->pcix_ecc_vflags = 0x0;
529 		}
530 	}
531 }
532 
533 static void
534 pcie_regs_clear(pci_erpt_t *erpt_p)
535 {
536 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
537 	uint8_t pcie_cap_ptr;
538 	pcie_adv_error_regs_t *pcie_adv_regs;
539 	uint16_t pcie_ecap_ptr;
540 
541 	pcie_cap_ptr = pcie_regs->pcie_cap_ptr;
542 
543 	if (pcie_regs->pcie_vflags & PCIE_ERR_STATUS_VALID)
544 		pci_config_put16(erpt_p->pe_hdl, pcie_cap_ptr + PCIE_DEVSTS,
545 		    pcie_regs->pcie_err_status);
546 
547 	pcie_regs->pcie_vflags = 0x0;
548 
549 	if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV) &&
550 	    (erpt_p->pe_dflags & PCIX_DEV))
551 		pcix_regs_clear(erpt_p, pcie_regs->pcix_bdg_regs);
552 
553 	if (!(erpt_p->pe_dflags & PCIEX_ADV_DEV))
554 		return;
555 
556 	pcie_adv_regs = pcie_regs->pcie_adv_regs;
557 
558 	pcie_ecap_ptr = pcie_adv_regs->pcie_adv_cap_ptr;
559 
560 	if (pcie_adv_regs->pcie_adv_vflags & PCIE_UE_STATUS_VALID)
561 		pci_config_put32(erpt_p->pe_hdl,
562 		    pcie_ecap_ptr + PCIE_AER_UCE_STS,
563 		    pcie_adv_regs->pcie_ue_status);
564 
565 	if (pcie_adv_regs->pcie_adv_vflags & PCIE_CE_STATUS_VALID)
566 		pci_config_put32(erpt_p->pe_hdl,
567 		    pcie_ecap_ptr + PCIE_AER_CE_STS,
568 		    pcie_adv_regs->pcie_ce_status);
569 
570 
571 	if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) {
572 		pcie_adv_bdg_error_regs_t *pcie_bdg_regs =
573 		    pcie_adv_regs->pcie_adv_bdg_regs;
574 
575 
576 		if (pcie_adv_regs->pcie_adv_vflags & PCIE_SUE_STATUS_VALID)
577 			pci_config_put32(erpt_p->pe_hdl,
578 			    pcie_ecap_ptr + PCIE_AER_SUCE_STS,
579 			    pcie_bdg_regs->pcie_sue_status);
580 	}
581 	/*
582 	 * If PCI Express root complex then clear the root complex
583 	 * error registers.
584 	 */
585 	if (erpt_p->pe_dflags & PCIEX_RC_DEV) {
586 		pcie_adv_rc_error_regs_t *pcie_rc_regs =
587 		    pcie_adv_regs->pcie_adv_rc_regs;
588 
589 
590 		if (pcie_adv_regs->pcie_adv_vflags & PCIE_RC_ERR_STATUS_VALID)
591 			pci_config_put32(erpt_p->pe_hdl,
592 			    (pcie_ecap_ptr + PCIE_AER_RE_STS),
593 			    pcie_rc_regs->pcie_rc_err_status);
594 	}
595 	pcie_adv_regs->pcie_adv_vflags = 0x0;
596 }
597 
598 static void
599 pci_regs_clear(pci_erpt_t *erpt_p)
600 {
601 	/*
602 	 * Finally clear the error bits
603 	 */
604 	if (erpt_p->pe_dflags & PCIEX_DEV)
605 		pcie_regs_clear(erpt_p);
606 	else if (erpt_p->pe_dflags & PCIX_DEV)
607 		pcix_regs_clear(erpt_p, erpt_p->pe_regs);
608 
609 	if (erpt_p->pe_pci_regs->pci_vflags & PCI_ERR_STATUS_VALID)
610 		pci_config_put16(erpt_p->pe_hdl, PCI_CONF_STAT,
611 		    erpt_p->pe_pci_regs->pci_err_status);
612 
613 	erpt_p->pe_pci_regs->pci_vflags = 0x0;
614 
615 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
616 		if (erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_vflags &
617 		    PCI_BDG_SEC_STAT_VALID)
618 			pci_config_put16(erpt_p->pe_hdl, PCI_BCNF_SEC_STATUS,
619 			    erpt_p->pe_pci_regs->pci_bdg_regs->
620 			    pci_bdg_sec_stat);
621 		if (erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_vflags &
622 		    PCI_BDG_CTRL_VALID)
623 			pci_config_put16(erpt_p->pe_hdl, PCI_BCNF_BCNTRL,
624 			    erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_ctrl);
625 
626 		erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_vflags = 0x0;
627 	}
628 }
629 
630 /*
631  * pcix_ereport_setup: Allocate structures for PCI-X error handling and ereport
632  * generation.
633  */
634 /* ARGSUSED */
635 static void
636 pcix_ereport_setup(dev_info_t *dip, pci_erpt_t *erpt_p)
637 {
638 	uint8_t pcix_cap_ptr;
639 	int i;
640 
641 	pcix_cap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
642 	    "pcix-capid-pointer", PCI_CAP_NEXT_PTR_NULL);
643 
644 	if (pcix_cap_ptr != PCI_CAP_NEXT_PTR_NULL)
645 		erpt_p->pe_dflags |= PCIX_DEV;
646 	else
647 		return;
648 
649 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
650 		pcix_bdg_error_regs_t *pcix_bdg_regs;
651 
652 		erpt_p->pe_regs = kmem_zalloc(sizeof (pcix_bdg_error_regs_t),
653 		    KM_SLEEP);
654 		pcix_bdg_regs = (pcix_bdg_error_regs_t *)erpt_p->pe_regs;
655 		pcix_bdg_regs->pcix_bdg_cap_ptr = pcix_cap_ptr;
656 		pcix_bdg_regs->pcix_bdg_ver = pci_config_get16(erpt_p->pe_hdl,
657 		    pcix_cap_ptr + PCI_PCIX_SEC_STATUS) & PCI_PCIX_VER_MASK;
658 		if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) {
659 			for (i = 0; i < 2; i++) {
660 				pcix_bdg_regs->pcix_bdg_ecc_regs[i] =
661 				    kmem_zalloc(sizeof (pcix_ecc_regs_t),
662 					KM_SLEEP);
663 			}
664 		}
665 	} else {
666 		pcix_error_regs_t *pcix_regs;
667 
668 		erpt_p->pe_regs = kmem_zalloc(sizeof (pcix_error_regs_t),
669 		    KM_SLEEP);
670 		pcix_regs = (pcix_error_regs_t *)erpt_p->pe_regs;
671 		pcix_regs->pcix_cap_ptr = pcix_cap_ptr;
672 		pcix_regs->pcix_ver = pci_config_get16(erpt_p->pe_hdl,
673 		    pcix_cap_ptr + PCI_PCIX_COMMAND) & PCI_PCIX_VER_MASK;
674 		if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) {
675 			pcix_regs->pcix_ecc_regs = kmem_zalloc(
676 			    sizeof (pcix_ecc_regs_t), KM_SLEEP);
677 		}
678 	}
679 }
680 
681 static void
682 pcie_ereport_setup(dev_info_t *dip, pci_erpt_t *erpt_p)
683 {
684 	pcie_error_regs_t *pcie_regs;
685 	pcie_adv_error_regs_t *pcie_adv_regs;
686 	char buf[FM_MAX_CLASS];
687 	uint8_t pcix_cap_ptr;
688 	uint8_t pcie_cap_ptr;
689 	uint16_t pcie_ecap_ptr;
690 	uint16_t dev_type = 0;
691 	uint32_t mask = pcie_expected_ue_mask;
692 
693 	/*
694 	 * The following sparc specific code should be removed once the pci_cap
695 	 * interfaces create the necessary properties for us.
696 	 */
697 #if defined(__sparc)
698 	ushort_t status;
699 	uint32_t slot_cap;
700 	uint8_t cap_ptr = 0;
701 	uint8_t cap_id = 0;
702 	uint32_t hdr, hdr_next_ptr, hdr_cap_id;
703 	uint16_t offset = P2ALIGN(PCIE_EXT_CAP, 4);
704 	uint16_t aer_ptr = 0;
705 
706 	cap_ptr = pci_config_get8(erpt_p->pe_hdl, PCI_CONF_CAP_PTR);
707 	if (pci_config_check(erpt_p->pe_hdl) == DDI_FM_OK) {
708 		while ((cap_id = pci_config_get8(erpt_p->pe_hdl, cap_ptr)) !=
709 		    0xff) {
710 			if (cap_id == PCI_CAP_ID_PCIX) {
711 				(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
712 				    "pcix-capid-pointer", cap_ptr);
713 			}
714 		if (cap_id == PCI_CAP_ID_PCI_E) {
715 			status = pci_config_get16(erpt_p->pe_hdl, cap_ptr + 2);
716 			if (status & PCIE_PCIECAP_SLOT_IMPL) {
717 				/* offset 14h is Slot Cap Register */
718 				slot_cap = pci_config_get32(erpt_p->pe_hdl,
719 				    cap_ptr + PCIE_SLOTCAP);
720 				(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
721 				    "pcie-slotcap-reg", slot_cap);
722 			}
723 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
724 			    "pcie-capid-reg", pci_config_get16(erpt_p->pe_hdl,
725 			    cap_ptr + PCIE_PCIECAP));
726 			(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
727 			    "pcie-capid-pointer", cap_ptr);
728 
729 		}
730 			if ((cap_ptr = pci_config_get8(erpt_p->pe_hdl,
731 			    cap_ptr + 1)) == 0xff || cap_ptr == 0 ||
732 			    (pci_config_check(erpt_p->pe_hdl) != DDI_FM_OK))
733 				break;
734 		}
735 	}
736 
737 #endif
738 
739 	pcix_cap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
740 	    "pcix-capid-pointer", PCI_CAP_NEXT_PTR_NULL);
741 
742 	if (pcix_cap_ptr != PCI_CAP_NEXT_PTR_NULL)
743 		erpt_p->pe_dflags |= PCIX_DEV;
744 
745 	pcie_cap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
746 	    DDI_PROP_DONTPASS, "pcie-capid-pointer", PCI_CAP_NEXT_PTR_NULL);
747 
748 	if (pcie_cap_ptr != PCI_CAP_NEXT_PTR_NULL) {
749 		erpt_p->pe_dflags |= PCIEX_DEV;
750 		erpt_p->pe_regs = kmem_zalloc(sizeof (pcie_error_regs_t),
751 		    KM_SLEEP);
752 		pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
753 		pcie_regs->pcie_cap_ptr = pcie_cap_ptr;
754 	}
755 
756 	if (!(erpt_p->pe_dflags & PCIEX_DEV))
757 		return;
758 
759 	/*
760 	 * Don't currently need to check for version here because we are
761 	 * compliant with PCIE 1.0a which is version 0 and is guaranteed
762 	 * software compatibility with future versions.  We will need to
763 	 * add errors for new detectors/features which are added in newer
764 	 * revisions [sec 7.8.2].
765 	 */
766 	pcie_regs->pcie_cap = pci_config_get16(erpt_p->pe_hdl,
767 	    pcie_regs->pcie_cap_ptr + PCIE_PCIECAP);
768 
769 	dev_type = pcie_regs->pcie_cap & PCIE_PCIECAP_DEV_TYPE_MASK;
770 
771 	if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV) &&
772 	    (erpt_p->pe_dflags & PCIX_DEV)) {
773 		int i;
774 
775 		pcie_regs->pcix_bdg_regs =
776 		    kmem_zalloc(sizeof (pcix_bdg_error_regs_t), KM_SLEEP);
777 
778 		pcie_regs->pcix_bdg_regs->pcix_bdg_cap_ptr = pcix_cap_ptr;
779 		pcie_regs->pcix_bdg_regs->pcix_bdg_ver =
780 		    pci_config_get16(erpt_p->pe_hdl,
781 			pcix_cap_ptr + PCI_PCIX_SEC_STATUS) & PCI_PCIX_VER_MASK;
782 
783 		if (PCIX_ECC_VER_CHECK(pcie_regs->pcix_bdg_regs->pcix_bdg_ver))
784 			for (i = 0; i < 2; i++)
785 				pcie_regs->pcix_bdg_regs->pcix_bdg_ecc_regs[i] =
786 				    kmem_zalloc(sizeof (pcix_ecc_regs_t),
787 					KM_SLEEP);
788 	}
789 
790 	if (dev_type == PCIE_PCIECAP_DEV_TYPE_ROOT) {
791 		erpt_p->pe_dflags |= PCIEX_RC_DEV;
792 		pcie_regs->pcie_rc_regs = kmem_zalloc(
793 		    sizeof (pcie_rc_error_regs_t), KM_SLEEP);
794 	}
795 	/*
796 	 * The following sparc specific code should be removed once the pci_cap
797 	 * interfaces create the necessary properties for us.
798 	 */
799 #if defined(__sparc)
800 
801 	hdr = pci_config_get32(erpt_p->pe_hdl, offset);
802 	hdr_next_ptr = (hdr >> PCIE_EXT_CAP_NEXT_PTR_SHIFT) &
803 	    PCIE_EXT_CAP_NEXT_PTR_MASK;
804 	hdr_cap_id = (hdr >> PCIE_EXT_CAP_ID_SHIFT) & PCIE_EXT_CAP_ID_MASK;
805 
806 	while ((hdr_next_ptr != PCIE_EXT_CAP_NEXT_PTR_NULL) &&
807 	    (hdr_cap_id != PCIE_EXT_CAP_ID_AER)) {
808 		offset = P2ALIGN(hdr_next_ptr, 4);
809 		hdr = pci_config_get32(erpt_p->pe_hdl, offset);
810 		hdr_next_ptr = (hdr >> PCIE_EXT_CAP_NEXT_PTR_SHIFT) &
811 		    PCIE_EXT_CAP_NEXT_PTR_MASK;
812 		hdr_cap_id = (hdr >> PCIE_EXT_CAP_ID_SHIFT) &
813 		    PCIE_EXT_CAP_ID_MASK;
814 	}
815 
816 	if (hdr_cap_id == PCIE_EXT_CAP_ID_AER)
817 		aer_ptr = P2ALIGN(offset, 4);
818 	if (aer_ptr != PCI_CAP_NEXT_PTR_NULL)
819 		(void) ndi_prop_update_int(DDI_DEV_T_NONE, dip,
820 		    "pcie-aer-pointer", aer_ptr);
821 #endif
822 
823 	/*
824 	 * Find and store if this device is capable of pci express
825 	 * advanced errors, if not report an error against the device.
826 	 */
827 	pcie_ecap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
828 	    "pcie-aer-pointer", PCI_CAP_NEXT_PTR_NULL);
829 	if (pcie_ecap_ptr != PCI_CAP_NEXT_PTR_NULL) {
830 		erpt_p->pe_dflags |= PCIEX_ADV_DEV;
831 		pcie_regs->pcie_adv_regs = kmem_zalloc(
832 		    sizeof (pcie_adv_error_regs_t), KM_SLEEP);
833 		pcie_regs->pcie_adv_regs->pcie_adv_cap_ptr = pcie_ecap_ptr;
834 	}
835 
836 	if (!(erpt_p->pe_dflags & PCIEX_ADV_DEV)) {
837 		(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
838 		    PCIEX_ERROR_SUBCLASS, PCIEX_NADV);
839 		ddi_fm_ereport_post(dip, buf, NULL, DDI_NOSLEEP,
840 		    FM_VERSION, DATA_TYPE_UINT8, 0, NULL);
841 		return;
842 	}
843 
844 	pcie_adv_regs = pcie_regs->pcie_adv_regs;
845 
846 	if (pcie_adv_regs == NULL)
847 		return;
848 	/*
849 	 * Initialize structures for advanced PCI Express devices.
850 	 */
851 
852 	/*
853 	 * Advanced error registers exist for PCI Express to PCI(X) Bridges and
854 	 * may also exist for PCI(X) to PCI Express Bridges, the latter is not
855 	 * well explained in the PCI Express to PCI/PCI-X Bridge Specification
856 	 * 1.0 and will be left out of the current gathering of these registers.
857 	 */
858 	if (dev_type == PCIE_PCIECAP_DEV_TYPE_PCIE2PCI) {
859 		erpt_p->pe_dflags |= PCIEX_2PCI_DEV;
860 		pcie_adv_regs->pcie_adv_bdg_regs = kmem_zalloc(
861 		    sizeof (pcie_adv_bdg_error_regs_t), KM_SLEEP);
862 	}
863 
864 	if (erpt_p->pe_dflags & PCIEX_RC_DEV)
865 		pcie_adv_regs->pcie_adv_rc_regs = kmem_zalloc(
866 		    sizeof (pcie_adv_rc_error_regs_t), KM_SLEEP);
867 
868 	/*
869 	 * Check that mask values are as expected, if not
870 	 * change them to what we desire.
871 	 */
872 	pci_regs_gather(dip, erpt_p);
873 	pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
874 	if (pcie_regs->pcie_adv_regs->pcie_ce_mask != pcie_expected_ce_mask) {
875 		pci_config_put32(erpt_p->pe_hdl,
876 		    pcie_ecap_ptr + PCIE_AER_CE_MASK, pcie_expected_ce_mask);
877 	}
878 
879 	/* Disable PTLP/ECRC (or mask these two) for Switches */
880 	if (dev_type == PCIE_PCIECAP_DEV_TYPE_UP ||
881 	    dev_type == PCIE_PCIECAP_DEV_TYPE_DOWN)
882 		mask |= PCIE_AER_UCE_PTLP | PCIE_AER_UCE_ECRC;
883 
884 	if (pcie_regs->pcie_adv_regs->pcie_ue_mask != mask) {
885 		pci_config_put32(erpt_p->pe_hdl,
886 		    pcie_ecap_ptr + PCIE_AER_UCE_MASK, mask);
887 	}
888 	if (erpt_p->pe_dflags & PCIEX_2PCI_DEV) {
889 		if (pcie_regs->pcie_adv_regs->pcie_adv_bdg_regs->pcie_sue_mask
890 		    != pcie_expected_sue_mask) {
891 			pci_config_put32(erpt_p->pe_hdl,
892 			    pcie_ecap_ptr + PCIE_AER_SUCE_MASK,
893 			    pcie_expected_sue_mask);
894 		}
895 	}
896 }
897 
898 /*
899  * pci_ereport_setup: Detect PCI device type and initialize structures to be
900  * used to generate ereports based on detected generic device errors.
901  */
902 void
903 pci_ereport_setup(dev_info_t *dip)
904 {
905 	struct dev_info *devi = DEVI(dip);
906 	struct i_ddi_fmhdl *fmhdl = devi->devi_fmhdl;
907 	pci_erpt_t *erpt_p;
908 	uint8_t pci_hdr_type;
909 	uint16_t pci_status;
910 	pci_regspec_t *pci_rp;
911 	int32_t len;
912 	uint32_t phys_hi;
913 
914 	/*
915 	 * If device is not ereport capbable then report an error against the
916 	 * driver for using this interface,
917 	 */
918 	if (!DDI_FM_EREPORT_CAP(ddi_fm_capable(dip)) &&
919 	    !DDI_FM_ERRCB_CAP(ddi_fm_capable(dip))) {
920 		i_ddi_drv_ereport_post(dip, DVR_EFMCAP, NULL, DDI_SLEEP);
921 		return;
922 	}
923 
924 	/*
925 	 * ASSERT fmhdl exists and fh_bus_specific is NULL.
926 	 */
927 	ASSERT(fmhdl && (fmhdl->fh_bus_specific == NULL));
928 
929 	erpt_p = kmem_zalloc(sizeof (pci_erpt_t), KM_SLEEP);
930 
931 	if (pci_config_setup(dip, &erpt_p->pe_hdl) != DDI_SUCCESS)
932 		goto error;
933 
934 	erpt_p->pe_pci_regs = kmem_zalloc(sizeof (pci_error_regs_t), KM_SLEEP);
935 
936 	pci_status = pci_config_get16(erpt_p->pe_hdl, PCI_CONF_STAT);
937 	if (pci_config_check(erpt_p->pe_hdl) != DDI_FM_OK)
938 		goto error;
939 
940 	/*
941 	 * Get header type and record if device is a bridge.
942 	 */
943 	pci_hdr_type = pci_config_get8(erpt_p->pe_hdl, PCI_CONF_HEADER);
944 	if (pci_config_check(erpt_p->pe_hdl) != DDI_FM_OK)
945 		goto error;
946 
947 	/*
948 	 * Check to see if PCI device is a bridge, if so allocate pci bridge
949 	 * error register structure.
950 	 */
951 	if ((pci_hdr_type & PCI_HEADER_TYPE_M) == PCI_HEADER_PPB) {
952 		erpt_p->pe_dflags |= PCI_BRIDGE_DEV;
953 		erpt_p->pe_pci_regs->pci_bdg_regs = kmem_zalloc(
954 		    sizeof (pci_bdg_error_regs_t), KM_SLEEP);
955 	}
956 
957 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg",
958 	    (caddr_t)&pci_rp, &len) == DDI_SUCCESS) {
959 		phys_hi = pci_rp->pci_phys_hi;
960 		kmem_free(pci_rp, len);
961 
962 		erpt_p->pe_bdf = (uint16_t)(PCI_REG_BDFR_G(phys_hi) >>
963 		    PCI_REG_FUNC_SHIFT);
964 	}
965 
966 
967 	if (!(pci_status & PCI_STAT_CAP)) {
968 		goto done;
969 	}
970 
971 	/*
972 	 * Initialize structures for PCI Express and PCI-X devices.
973 	 * Order matters below and pcie_ereport_setup should preceed
974 	 * pcix_ereport_setup.
975 	 */
976 	pcie_ereport_setup(dip, erpt_p);
977 
978 	if (!(erpt_p->pe_dflags & PCIEX_DEV)) {
979 		pcix_ereport_setup(dip, erpt_p);
980 	}
981 
982 done:
983 	pci_regs_gather(dip, erpt_p);
984 	pci_regs_clear(erpt_p);
985 
986 	/*
987 	 * Before returning set fh_bus_specific to completed pci_erpt_t
988 	 * structure
989 	 */
990 	fmhdl->fh_bus_specific = (void *)erpt_p;
991 
992 	return;
993 error:
994 	if (erpt_p->pe_pci_regs)
995 		kmem_free(erpt_p->pe_pci_regs, sizeof (pci_error_regs_t));
996 	kmem_free(erpt_p, sizeof (pci_erpt_t));
997 	erpt_p = NULL;
998 }
999 
1000 static void
1001 pcix_ereport_teardown(pci_erpt_t *erpt_p)
1002 {
1003 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
1004 		pcix_bdg_error_regs_t *pcix_bdg_regs;
1005 		uint16_t pcix_ver;
1006 
1007 		pcix_bdg_regs = (pcix_bdg_error_regs_t *)erpt_p->pe_regs;
1008 		pcix_ver = pcix_bdg_regs->pcix_bdg_ver;
1009 		if (PCIX_ECC_VER_CHECK(pcix_ver)) {
1010 			int i;
1011 			for (i = 0; i < 2; i++)
1012 				kmem_free(pcix_bdg_regs->pcix_bdg_ecc_regs[i],
1013 				    sizeof (pcix_ecc_regs_t));
1014 		}
1015 		kmem_free(erpt_p->pe_regs, sizeof (pcix_bdg_error_regs_t));
1016 	} else {
1017 		pcix_error_regs_t *pcix_regs;
1018 		uint16_t pcix_ver;
1019 
1020 		pcix_regs = (pcix_error_regs_t *)erpt_p->pe_regs;
1021 		pcix_ver = pcix_regs->pcix_ver;
1022 		if (PCIX_ECC_VER_CHECK(pcix_ver)) {
1023 			kmem_free(pcix_regs->pcix_ecc_regs,
1024 			    sizeof (pcix_ecc_regs_t));
1025 		}
1026 		kmem_free(erpt_p->pe_regs, sizeof (pcix_error_regs_t));
1027 	}
1028 }
1029 
1030 static void
1031 pcie_ereport_teardown(pci_erpt_t *erpt_p)
1032 {
1033 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
1034 
1035 	if (erpt_p->pe_dflags & PCIEX_ADV_DEV) {
1036 		pcie_adv_error_regs_t *pcie_adv = pcie_regs->pcie_adv_regs;
1037 
1038 		if (erpt_p->pe_dflags & PCIEX_2PCI_DEV)
1039 			kmem_free(pcie_adv->pcie_adv_bdg_regs,
1040 			    sizeof (pcie_adv_bdg_error_regs_t));
1041 		if (erpt_p->pe_dflags & PCIEX_RC_DEV)
1042 			kmem_free(pcie_adv->pcie_adv_rc_regs,
1043 			    sizeof (pcie_adv_rc_error_regs_t));
1044 		kmem_free(pcie_adv, sizeof (pcie_adv_error_regs_t));
1045 	}
1046 
1047 	if (erpt_p->pe_dflags & PCIEX_RC_DEV)
1048 		kmem_free(pcie_regs->pcie_rc_regs,
1049 		    sizeof (pcie_rc_error_regs_t));
1050 
1051 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
1052 		if (erpt_p->pe_dflags & PCIX_DEV) {
1053 			uint16_t pcix_ver = pcie_regs->pcix_bdg_regs->
1054 			    pcix_bdg_ver;
1055 
1056 			if (PCIX_ECC_VER_CHECK(pcix_ver)) {
1057 				int i;
1058 				for (i = 0; i < 2; i++)
1059 					kmem_free(pcie_regs->pcix_bdg_regs->
1060 					    pcix_bdg_ecc_regs[i],
1061 					    sizeof (pcix_ecc_regs_t));
1062 			}
1063 			kmem_free(pcie_regs->pcix_bdg_regs,
1064 			    sizeof (pcix_bdg_error_regs_t));
1065 		}
1066 	}
1067 	kmem_free(erpt_p->pe_regs, sizeof (pcie_error_regs_t));
1068 }
1069 
1070 void
1071 pci_ereport_teardown(dev_info_t *dip)
1072 {
1073 	struct i_ddi_fmhdl *fmhdl = DEVI(dip)->devi_fmhdl;
1074 	pci_erpt_t *erpt_p;
1075 
1076 	if (!DDI_FM_EREPORT_CAP(ddi_fm_capable(dip)) &&
1077 	    !DDI_FM_ERRCB_CAP(ddi_fm_capable(dip))) {
1078 		i_ddi_drv_ereport_post(dip, DVR_EFMCAP, NULL, DDI_SLEEP);
1079 	}
1080 
1081 	ASSERT(fmhdl);
1082 
1083 	erpt_p = (pci_erpt_t *)fmhdl->fh_bus_specific;
1084 	if (erpt_p == NULL)
1085 		return;
1086 
1087 	if (erpt_p->pe_dflags & PCIEX_DEV)
1088 		pcie_ereport_teardown(erpt_p);
1089 	else if (erpt_p->pe_dflags & PCIX_DEV)
1090 		pcix_ereport_teardown(erpt_p);
1091 	pci_config_teardown((ddi_acc_handle_t *)&erpt_p->pe_hdl);
1092 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV)
1093 		kmem_free(erpt_p->pe_pci_regs->pci_bdg_regs,
1094 		    sizeof (pci_bdg_error_regs_t));
1095 	kmem_free(erpt_p->pe_pci_regs, sizeof (pci_error_regs_t));
1096 	kmem_free(erpt_p, sizeof (pci_erpt_t));
1097 	fmhdl->fh_bus_specific = NULL;
1098 	/*
1099 	 * The following sparc specific code should be removed once the pci_cap
1100 	 * interfaces create the necessary properties for us.
1101 	 */
1102 #if defined(__sparc)
1103 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcix-capid-pointer");
1104 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcie-slotcap-reg");
1105 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcie-capid-reg");
1106 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcie-capid-pointer");
1107 	(void) ndi_prop_remove(DDI_DEV_T_NONE, dip, "pcie-aer-pointer");
1108 #endif
1109 }
1110 
1111 /*
1112  * Function used by PCI device and nexus error handlers to check if a
1113  * captured address resides in their DMA or ACC handle caches or the caches of
1114  * their children devices, respectively.
1115  */
1116 static int
1117 pci_dev_hdl_lookup(dev_info_t *dip, int type, ddi_fm_error_t *derr,
1118     void *addr)
1119 {
1120 	struct i_ddi_fmhdl *fmhdl = DEVI(dip)->devi_fmhdl;
1121 	pci_erpt_t *erpt_p = (pci_erpt_t *)fmhdl->fh_bus_specific;
1122 
1123 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV)
1124 		return (ndi_fmc_error(dip, NULL, type, derr->fme_ena, addr));
1125 	else
1126 		return (ndi_fmc_entry_error(dip, type, derr, addr));
1127 }
1128 
1129 static void
1130 pcie_ereport_post(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p,
1131     char *buf, int errtype)
1132 {
1133 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
1134 	pcie_adv_error_regs_t *pcie_adv_regs = pcie_regs->pcie_adv_regs;
1135 	pcie_adv_rc_error_regs_t *pcie_adv_rc_regs =
1136 	    pcie_adv_regs->pcie_adv_rc_regs;
1137 
1138 	switch (errtype) {
1139 	    case PCIEX_TYPE_CE:
1140 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1141 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1142 		    PCIEX_DEVSTS_REG, DATA_TYPE_UINT16,
1143 		    pcie_regs->pcie_err_status,
1144 		    PCIEX_CE_STATUS_REG, DATA_TYPE_UINT32,
1145 		    pcie_adv_regs->pcie_ce_status, NULL);
1146 		break;
1147 	    case PCIEX_TYPE_UE:
1148 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1149 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1150 		    PCIEX_DEVSTS_REG, DATA_TYPE_UINT16,
1151 		    pcie_regs->pcie_err_status,
1152 		    PCIEX_UE_STATUS_REG, DATA_TYPE_UINT32,
1153 		    pcie_adv_regs->pcie_ue_status, PCIEX_UE_SEV_REG,
1154 		    DATA_TYPE_UINT32, pcie_adv_regs->pcie_ue_sev,
1155 		    PCIEX_ADV_CTL, DATA_TYPE_UINT32,
1156 		    pcie_adv_regs->pcie_adv_ctl,
1157 		    PCIEX_SRC_ID, DATA_TYPE_UINT16,
1158 		    pcie_adv_regs->pcie_adv_bdf,
1159 		    PCIEX_SRC_VALID, DATA_TYPE_BOOLEAN_VALUE,
1160 		    (pcie_adv_regs->pcie_adv_bdf != NULL) ?
1161 		    1 : NULL,
1162 #ifdef DEBUG
1163 		    PCIEX_UE_HDR0, DATA_TYPE_UINT32,
1164 		    pcie_adv_regs->pcie_ue_hdr0,
1165 		    PCIEX_UE_HDR1, DATA_TYPE_UINT32,
1166 		    pcie_adv_regs->pcie_ue_hdr[0],
1167 		    PCIEX_UE_HDR2, DATA_TYPE_UINT32,
1168 		    pcie_adv_regs->pcie_ue_hdr[1],
1169 		    PCIEX_UE_HDR3, DATA_TYPE_UINT32,
1170 		    pcie_adv_regs->pcie_ue_hdr[2],
1171 #endif
1172 		    NULL);
1173 		break;
1174 	    case PCIEX_TYPE_GEN:
1175 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1176 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8,
1177 		    0, PCIEX_DEVSTS_REG, DATA_TYPE_UINT16,
1178 		    pcie_regs->pcie_err_status, NULL);
1179 		break;
1180 	    case PCIEX_TYPE_RC_UE_MSG:
1181 	    case PCIEX_TYPE_RC_CE_MSG:
1182 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1183 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1184 		    PCIEX_ROOT_ERRSTS_REG, DATA_TYPE_UINT32,
1185 		    pcie_adv_rc_regs->pcie_rc_err_status,
1186 		    PCIEX_SRC_ID, DATA_TYPE_UINT16,
1187 		    (errtype == PCIEX_TYPE_RC_UE_MSG) ?
1188 		    pcie_adv_rc_regs->pcie_rc_ue_src_id :
1189 		    pcie_adv_rc_regs->pcie_rc_ce_src_id,
1190 		    PCIEX_SRC_VALID, DATA_TYPE_BOOLEAN_VALUE,
1191 		    (errtype == PCIEX_TYPE_RC_UE_MSG) ?
1192 		    (pcie_adv_regs->pcie_adv_vflags & PCIE_SRC_ID_VALID &&
1193 		    pcie_adv_rc_regs->pcie_rc_ue_src_id != 0) :
1194 		    (pcie_adv_regs->pcie_adv_vflags & PCIE_SRC_ID_VALID &&
1195 		    pcie_adv_rc_regs->pcie_rc_ce_src_id != 0), NULL);
1196 		break;
1197 	    case PCIEX_TYPE_RC_MULT_MSG:
1198 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1199 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1200 		    PCIEX_ROOT_ERRSTS_REG, DATA_TYPE_UINT32,
1201 		    pcie_adv_rc_regs->pcie_rc_err_status, NULL);
1202 		break;
1203 	    default:
1204 		break;
1205 	}
1206 }
1207 
1208 static void
1209 pcie_check_addr(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *eprt_p)
1210 {
1211 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)eprt_p->pe_regs;
1212 	pcie_adv_error_regs_t *pcie_adv_regs = pcie_regs->pcie_adv_regs;
1213 	pcie_tlp_hdr_t *ue_hdr0;
1214 	uint32_t *ue_hdr;
1215 	uint64_t addr = NULL;
1216 
1217 	if (!(pcie_adv_regs->pcie_adv_vflags & PCIE_UE_HDR_VALID)) {
1218 		derr->fme_status = DDI_FM_UNKNOWN;
1219 		return;
1220 	}
1221 	ue_hdr0 = (pcie_tlp_hdr_t *)&pcie_adv_regs->pcie_ue_hdr0;
1222 	ue_hdr = pcie_adv_regs->pcie_ue_hdr;
1223 
1224 	switch (ue_hdr0->type) {
1225 	    case PCIE_TLP_TYPE_MEM:
1226 	    case PCIE_TLP_TYPE_MEMLK:
1227 		if ((ue_hdr0->fmt & 0x1) == 0x1) {
1228 			pcie_mem64_t *mem64_tlp = (pcie_mem64_t *)ue_hdr;
1229 
1230 			addr = (uint64_t)mem64_tlp->addr1 << 32 |
1231 			    (uint32_t)mem64_tlp->addr0 << 2;
1232 			pcie_adv_regs->pcie_adv_bdf = mem64_tlp->rid;
1233 		} else {
1234 			pcie_memio32_t *memio32_tlp = (pcie_memio32_t *)ue_hdr;
1235 
1236 			addr = (uint32_t)memio32_tlp->addr0 << 2;
1237 			pcie_adv_regs->pcie_adv_bdf = memio32_tlp->rid;
1238 		}
1239 
1240 		derr->fme_status = pci_dev_hdl_lookup(dip, DMA_HANDLE, derr,
1241 		    (void *) &addr);
1242 		/*
1243 		 * If DMA handle is not found error could have been a memory
1244 		 * mapped IO address so check in the access cache
1245 		 */
1246 		if (derr->fme_status == DDI_FM_UNKNOWN)
1247 			derr->fme_status = pci_dev_hdl_lookup(dip, ACC_HANDLE,
1248 			    derr, (void *) &addr);
1249 		break;
1250 
1251 	    case PCIE_TLP_TYPE_IO:
1252 		{
1253 			pcie_memio32_t *memio32_tlp = (pcie_memio32_t *)ue_hdr;
1254 
1255 			addr = (uint32_t)memio32_tlp->addr0 << 2;
1256 			pcie_adv_regs->pcie_adv_bdf = memio32_tlp->rid;
1257 			derr->fme_status = pci_dev_hdl_lookup(dip, ACC_HANDLE,
1258 			    derr, (void *) &addr);
1259 			break;
1260 		}
1261 	    case PCIE_TLP_TYPE_CFG0:
1262 	    case PCIE_TLP_TYPE_CFG1:
1263 		{
1264 			pcie_cfg_t *cfg_tlp = (pcie_cfg_t *)ue_hdr;
1265 
1266 			pcie_adv_regs->pcie_adv_bdf =
1267 			    (uint16_t)cfg_tlp->bus << 8 |
1268 			    (uint16_t)cfg_tlp->dev << 3 | cfg_tlp->func;
1269 
1270 			derr->fme_status = DDI_FM_UNKNOWN;
1271 			break;
1272 		}
1273 	    case PCIE_TLP_TYPE_MSG:
1274 		{
1275 			pcie_msg_t *msg_tlp = (pcie_msg_t *)ue_hdr;
1276 
1277 			pcie_adv_regs->pcie_adv_bdf = msg_tlp->rid;
1278 			derr->fme_status = DDI_FM_UNKNOWN;
1279 			break;
1280 		}
1281 	    case PCIE_TLP_TYPE_CPL:
1282 	    case PCIE_TLP_TYPE_CPLLK:
1283 		{
1284 			pcie_cpl_t *cpl_tlp = (pcie_cpl_t *)ue_hdr;
1285 
1286 			pcie_adv_regs->pcie_adv_bdf = cpl_tlp->cid;
1287 			derr->fme_status = DDI_FM_UNKNOWN;
1288 			break;
1289 		}
1290 	    case PCIE_TLP_TYPE_MSI:
1291 	    default:
1292 		derr->fme_status = DDI_FM_UNKNOWN;
1293 	}
1294 
1295 	/*
1296 	 * If no handle was found in the children caches and their is no
1297 	 * address infomation already stored and we have a captured address
1298 	 * then we need to store it away so that intermediate bridges can
1299 	 * check if the address exists in their handle caches.
1300 	 */
1301 	if (derr->fme_status == DDI_FM_UNKNOWN &&
1302 	    derr->fme_bus_specific == NULL &&
1303 	    addr != NULL)
1304 		derr->fme_bus_specific = (void *)(uintptr_t)addr;
1305 }
1306 
1307 static void
1308 pcie_pci_check_addr(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *eprt_p)
1309 {
1310 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)eprt_p->pe_regs;
1311 	pcie_adv_error_regs_t *pcie_adv_regs = pcie_regs->pcie_adv_regs;
1312 	pcie_adv_bdg_error_regs_t *pcie_bdg_regs =
1313 	    pcie_adv_regs->pcie_adv_bdg_regs;
1314 	uint64_t addr = NULL;
1315 	pcix_attr_t *pcie_pci_sue_attr;
1316 	int cmd;
1317 	int dual_addr = 0;
1318 
1319 	if (!(pcie_adv_regs->pcie_adv_vflags & PCIE_SUE_HDR_VALID)) {
1320 		derr->fme_status = DDI_FM_UNKNOWN;
1321 		return;
1322 	}
1323 
1324 	pcie_pci_sue_attr = (pcix_attr_t *)&pcie_bdg_regs->pcie_sue_hdr0;
1325 	cmd = (pcie_bdg_regs->pcie_sue_hdr[0] >>
1326 	    PCIE_AER_SUCE_HDR_CMD_LWR_SHIFT) & PCIE_AER_SUCE_HDR_CMD_LWR_MASK;
1327 cmd_switch:
1328 	switch (cmd) {
1329 	    case PCI_PCIX_CMD_IORD:
1330 	    case PCI_PCIX_CMD_IOWR:
1331 		pcie_adv_regs->pcie_adv_bdf = pcie_pci_sue_attr->rid;
1332 
1333 		addr = pcie_bdg_regs->pcie_sue_hdr[2];
1334 		addr = (addr << PCIE_AER_SUCE_HDR_ADDR_SHIFT) |
1335 		    pcie_bdg_regs->pcie_sue_hdr[1];
1336 
1337 		derr->fme_status = pci_dev_hdl_lookup(dip, ACC_HANDLE,
1338 		    derr, (void *) &addr);
1339 		break;
1340 	    case PCI_PCIX_CMD_MEMRD_DW:
1341 	    case PCI_PCIX_CMD_MEMWR:
1342 	    case PCI_PCIX_CMD_MEMRD_BL:
1343 	    case PCI_PCIX_CMD_MEMWR_BL:
1344 	    case PCI_PCIX_CMD_MEMRDBL:
1345 	    case PCI_PCIX_CMD_MEMWRBL:
1346 		pcie_adv_regs->pcie_adv_bdf = pcie_pci_sue_attr->rid;
1347 
1348 		addr = pcie_bdg_regs->pcie_sue_hdr[2];
1349 		addr = (addr << PCIE_AER_SUCE_HDR_ADDR_SHIFT) |
1350 		    pcie_bdg_regs->pcie_sue_hdr[1];
1351 
1352 		derr->fme_status = pci_dev_hdl_lookup(dip, DMA_HANDLE,
1353 		    derr, (void *) &addr);
1354 		if (derr->fme_status == DDI_FM_UNKNOWN)
1355 			derr->fme_status = pci_dev_hdl_lookup(dip, ACC_HANDLE,
1356 			    derr, (void *) &addr);
1357 		break;
1358 	    case PCI_PCIX_CMD_CFRD:
1359 	    case PCI_PCIX_CMD_CFWR:
1360 		/*
1361 		 * If we want to store the bdf of the device being addressed we
1362 		 * will need to do some surgery
1363 		 */
1364 		derr->fme_status = DDI_FM_UNKNOWN;
1365 		break;
1366 	    case PCI_PCIX_CMD_DADR:
1367 		cmd = (pcie_bdg_regs->pcie_sue_hdr[0] >>
1368 		    PCIE_AER_SUCE_HDR_CMD_UP_SHIFT) &
1369 		    PCIE_AER_SUCE_HDR_CMD_UP_MASK;
1370 		if (dual_addr)
1371 			break;
1372 		++dual_addr;
1373 		goto cmd_switch;
1374 	    default:
1375 		derr->fme_status = DDI_FM_UNKNOWN;
1376 	}
1377 
1378 	/*
1379 	 * If no handle was found in the children caches and their is no
1380 	 * address infomation already stored and we have a captured address
1381 	 * then we need to store it away so that intermediate bridges can
1382 	 * check if the address exists in their handle caches.
1383 	 */
1384 	if (derr->fme_status == DDI_FM_UNKNOWN &&
1385 	    derr->fme_bus_specific == NULL &&
1386 	    addr != NULL)
1387 		derr->fme_bus_specific = (void *)(uintptr_t)addr;
1388 }
1389 
1390 static int
1391 pcix_check_addr(dev_info_t *dip, ddi_fm_error_t *derr,
1392     pcix_ecc_regs_t *pcix_ecc_regs)
1393 {
1394 	int cmd = (pcix_ecc_regs->pcix_ecc_ctlstat >> 16) & 0xf;
1395 	uint64_t addr;
1396 
1397 	addr = pcix_ecc_regs->pcix_ecc_secaddr;
1398 	addr = addr << 32;
1399 	addr |= pcix_ecc_regs->pcix_ecc_fstaddr;
1400 
1401 	switch (cmd) {
1402 	    case PCI_PCIX_CMD_INTR:
1403 	    case PCI_PCIX_CMD_SPEC:
1404 		return (DDI_FM_FATAL);
1405 	    case PCI_PCIX_CMD_IORD:
1406 	    case PCI_PCIX_CMD_IOWR:
1407 		return (pci_dev_hdl_lookup(dip, ACC_HANDLE, derr,
1408 		    (void *) &addr));
1409 	    case PCI_PCIX_CMD_DEVID:
1410 		return (DDI_FM_FATAL);
1411 	    case PCI_PCIX_CMD_MEMRD_DW:
1412 	    case PCI_PCIX_CMD_MEMWR:
1413 	    case PCI_PCIX_CMD_MEMRD_BL:
1414 	    case PCI_PCIX_CMD_MEMWR_BL:
1415 		return (pci_dev_hdl_lookup(dip, DMA_HANDLE, derr,
1416 		    (void *) &addr));
1417 	    case PCI_PCIX_CMD_CFRD:
1418 	    case PCI_PCIX_CMD_CFWR:
1419 		return (pci_dev_hdl_lookup(dip, ACC_HANDLE, derr,
1420 		    (void *) &addr));
1421 	    case PCI_PCIX_CMD_SPL:
1422 	    case PCI_PCIX_CMD_DADR:
1423 		return (DDI_FM_FATAL);
1424 	    case PCI_PCIX_CMD_MEMRDBL:
1425 	    case PCI_PCIX_CMD_MEMWRBL:
1426 		return (pci_dev_hdl_lookup(dip, DMA_HANDLE, derr,
1427 		    (void *) &addr));
1428 	    default:
1429 		return (DDI_FM_FATAL);
1430 	}
1431 }
1432 
1433 /*ARGSUSED*/
1434 static int
1435 pci_bdg_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p)
1436 {
1437 	pci_bdg_error_regs_t *pci_bdg_regs = erpt_p->pe_pci_regs->pci_bdg_regs;
1438 	int fatal = 0;
1439 	int nonfatal = 0;
1440 	int unknown = 0;
1441 	int ok = 0;
1442 	int ret = DDI_FM_OK;
1443 	char buf[FM_MAX_CLASS];
1444 	int i;
1445 
1446 	if (derr->fme_flag != DDI_FM_ERR_UNEXPECTED)
1447 		goto done;
1448 
1449 	if ((pci_bdg_regs->pci_bdg_vflags & PCI_BDG_CTRL_VALID) &&
1450 	    (pci_bdg_regs->pci_bdg_ctrl & PCI_BCNF_BCNTRL_DTO_STAT)) {
1451 		(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
1452 		    PCI_ERROR_SUBCLASS, PCI_DTO);
1453 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1454 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1455 		    PCI_SEC_CONFIG_STATUS, DATA_TYPE_UINT16,
1456 		    pci_bdg_regs->pci_bdg_sec_stat, PCI_BCNTRL,
1457 		    DATA_TYPE_UINT16, pci_bdg_regs->pci_bdg_ctrl, NULL);
1458 		unknown++;
1459 	}
1460 
1461 	if (pci_bdg_regs->pci_bdg_vflags & PCI_BDG_SEC_STAT_VALID) {
1462 		for (i = 0; pci_bdg_err_tbl[i].err_class != NULL; i++) {
1463 			if (pci_bdg_regs->pci_bdg_sec_stat &
1464 			    pci_bdg_err_tbl[i].reg_bit) {
1465 				(void) snprintf(buf, FM_MAX_CLASS, "%s.%s-%s",
1466 				    PCI_ERROR_SUBCLASS, PCI_SEC_ERROR_SUBCLASS,
1467 				    pci_bdg_err_tbl[i].err_class);
1468 				ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1469 				    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1470 				    PCI_SEC_CONFIG_STATUS, DATA_TYPE_UINT16,
1471 				    pci_bdg_regs->pci_bdg_sec_stat, PCI_BCNTRL,
1472 				    DATA_TYPE_UINT16,
1473 				    pci_bdg_regs->pci_bdg_ctrl, NULL);
1474 				/*
1475 				 * Increment severity based on flag if bridge
1476 				 * is PCI or PCI-X, if PCI Express and this is a
1477 				 * master abort then treat as nonfatal.
1478 				 * XXFM May need to check if all other errors
1479 				 * are related to MA?
1480 				 */
1481 				if (!(erpt_p->pe_dflags & PCIEX_DEV)) {
1482 					PCI_FM_SEV_INC(
1483 					    pci_bdg_err_tbl[i].flags);
1484 				} else if (pci_bdg_err_tbl[i].reg_bit ==
1485 				    PCI_STAT_R_MAST_AB) {
1486 					nonfatal++;
1487 				}
1488 
1489 				if (derr->fme_bus_specific &&
1490 				    pci_bdg_err_tbl[i].terr_class)
1491 					pci_target_enqueue(derr->fme_ena,
1492 					    pci_bdg_err_tbl[i].terr_class,
1493 					    PCI_ERROR_SUBCLASS,
1494 					    (uintptr_t)derr->fme_bus_specific);
1495 			}
1496 		}
1497 	}
1498 
1499 done:
1500 
1501 	/*
1502 	 * Need to check for poke and cautious put. We already know peek
1503 	 * and cautious get errors occurred (as we got a trap) and we know
1504 	 * they are nonfatal.
1505 	 */
1506 	if (derr->fme_flag == DDI_FM_ERR_EXPECTED) {
1507 		/*
1508 		 * for cautious puts we treat all errors as nonfatal. Actually
1509 		 * we set nonfatal for cautious gets as well - doesn't do any
1510 		 * harm
1511 		 */
1512 		if (pci_bdg_regs->pci_bdg_sec_stat & (PCI_STAT_R_TARG_AB |
1513 		    PCI_STAT_R_MAST_AB | PCI_STAT_S_PERROR | PCI_STAT_S_SYSERR))
1514 			nonfatal++;
1515 
1516 		/*
1517 		 * for cautious accesses we already have the acc_handle. Just
1518 		 * need to call children to clear their error bits
1519 		 */
1520 		ret = ndi_fm_handler_dispatch(dip, NULL, derr);
1521 		PCI_FM_SEV_INC(ret);
1522 		return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1523 		    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1524 	}
1525 	if (derr->fme_flag == DDI_FM_ERR_POKE) {
1526 		/*
1527 		 * special case for pokes - we only consider master abort
1528 		 * and target abort as nonfatal. Sserr with no master abort is
1529 		 * fatal, but master/target abort can come in on separate
1530 		 * instance, so return unknown and parent will determine if
1531 		 * nonfatal (if another child returned nonfatal - ie master
1532 		 * or target abort) or fatal otherwise
1533 		 */
1534 		if (pci_bdg_regs->pci_bdg_sec_stat & (PCI_STAT_R_TARG_AB |
1535 		    PCI_STAT_R_MAST_AB))
1536 			nonfatal++;
1537 		if (erpt_p->pe_pci_regs->pci_err_status & PCI_STAT_S_SYSERR)
1538 			unknown++;
1539 	}
1540 
1541 	/*
1542 	 * If errant address is passed in then attempt to find
1543 	 * ACC/DMA handle in caches.
1544 	 */
1545 	if (derr->fme_bus_specific) {
1546 		int i;
1547 
1548 		for (i = 0; i < 2; i++) {
1549 			ret = ndi_fmc_error(dip, NULL, i ? ACC_HANDLE :
1550 			    DMA_HANDLE, derr->fme_ena,
1551 			    (void *)&derr->fme_bus_specific);
1552 			PCI_FM_SEV_INC(ret);
1553 		}
1554 	}
1555 
1556 	/*
1557 	 * now check children below the bridge, only if errant handle was not
1558 	 * found
1559 	 */
1560 	if (!derr->fme_acc_handle && !derr->fme_dma_handle) {
1561 		ret = ndi_fm_handler_dispatch(dip, NULL, derr);
1562 		PCI_FM_SEV_INC(ret);
1563 	}
1564 
1565 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1566 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1567 }
1568 
1569 static int
1570 pcix_ecc_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p,
1571     void *pe_regs)
1572 {
1573 	pcix_error_regs_t *pcix_regs;
1574 	pcix_bdg_error_regs_t *pcix_bdg_regs;
1575 	pcix_ecc_regs_t *pcix_ecc_regs;
1576 	int bridge;
1577 	int i;
1578 	int ecc_phase;
1579 	int ecc_corr;
1580 	int sec_ue;
1581 	int sec_ce;
1582 	int fatal = 0;
1583 	int nonfatal = 0;
1584 	int unknown = 0;
1585 	int ok = 0;
1586 	char buf[FM_MAX_CLASS];
1587 
1588 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
1589 		pcix_bdg_regs = (pcix_bdg_error_regs_t *)pe_regs;
1590 		bridge = 1;
1591 	} else {
1592 		pcix_regs = (pcix_error_regs_t *)pe_regs;
1593 		bridge = 0;
1594 	}
1595 
1596 	for (i = 0; i < (bridge ? 2 : 1); i++) {
1597 		int ret = DDI_FM_OK;
1598 		pcix_ecc_regs = bridge ? pcix_bdg_regs->pcix_bdg_ecc_regs[i] :
1599 		    pcix_regs->pcix_ecc_regs;
1600 		if (pcix_ecc_regs->pcix_ecc_vflags & PCIX_ERR_ECC_STS_VALID) {
1601 			ecc_phase = (pcix_ecc_regs->pcix_ecc_ctlstat &
1602 			    PCI_PCIX_ECC_PHASE) >> 0x4;
1603 			ecc_corr = (pcix_ecc_regs->pcix_ecc_ctlstat &
1604 			    PCI_PCIX_ECC_CORR);
1605 			sec_ue = (pcix_ecc_regs->pcix_ecc_ctlstat &
1606 			    PCI_PCIX_ECC_S_UE);
1607 			sec_ce = (pcix_ecc_regs->pcix_ecc_ctlstat &
1608 			    PCI_PCIX_ECC_S_CE);
1609 
1610 			switch (ecc_phase) {
1611 			    case PCI_PCIX_ECC_PHASE_NOERR:
1612 				break;
1613 			    case PCI_PCIX_ECC_PHASE_FADDR:
1614 			    case PCI_PCIX_ECC_PHASE_SADDR:
1615 				PCI_FM_SEV_INC(ecc_corr ?  DDI_FM_NONFATAL :
1616 				    DDI_FM_FATAL);
1617 				(void) snprintf(buf, FM_MAX_CLASS,
1618 				    "%s.%s%s", PCIX_ERROR_SUBCLASS,
1619 				    i ? PCIX_SEC_ERROR_SUBCLASS : "",
1620 				    ecc_corr ? PCIX_ECC_CE_ADDR :
1621 				    PCIX_ECC_UE_ADDR);
1622 				break;
1623 			    case PCI_PCIX_ECC_PHASE_ATTR:
1624 				PCI_FM_SEV_INC(ecc_corr ?
1625 				    DDI_FM_NONFATAL : DDI_FM_FATAL);
1626 				(void) snprintf(buf, FM_MAX_CLASS,
1627 				    "%s.%s%s", PCIX_ERROR_SUBCLASS,
1628 				    i ? PCIX_SEC_ERROR_SUBCLASS : "",
1629 				    ecc_corr ? PCIX_ECC_CE_ATTR :
1630 				    PCIX_ECC_UE_ATTR);
1631 				break;
1632 			    case PCI_PCIX_ECC_PHASE_DATA32:
1633 			    case PCI_PCIX_ECC_PHASE_DATA64:
1634 				if (ecc_corr)
1635 					ret = DDI_FM_NONFATAL;
1636 				else
1637 					ret = pcix_check_addr(dip, derr,
1638 					    pcix_ecc_regs);
1639 				PCI_FM_SEV_INC(ret);
1640 
1641 				(void) snprintf(buf, FM_MAX_CLASS,
1642 				    "%s.%s%s", PCIX_ERROR_SUBCLASS,
1643 				    i ? PCIX_SEC_ERROR_SUBCLASS : "",
1644 				    ecc_corr ? PCIX_ECC_CE_DATA :
1645 				    PCIX_ECC_UE_DATA);
1646 				break;
1647 			}
1648 			if (ecc_phase)
1649 				if (bridge)
1650 					ddi_fm_ereport_post(dip, buf,
1651 					    derr->fme_ena,
1652 					    DDI_NOSLEEP, FM_VERSION,
1653 					    DATA_TYPE_UINT8, 0,
1654 					    PCIX_SEC_STATUS, DATA_TYPE_UINT16,
1655 					    pcix_bdg_regs->pcix_bdg_sec_stat,
1656 					    PCIX_BDG_STAT, DATA_TYPE_UINT32,
1657 					    pcix_bdg_regs->pcix_bdg_stat,
1658 					    PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32,
1659 					    pcix_ecc_regs->pcix_ecc_ctlstat,
1660 					    PCIX_ECC_ATTR, DATA_TYPE_UINT32,
1661 					    pcix_ecc_regs->pcix_ecc_attr, NULL);
1662 				else
1663 					ddi_fm_ereport_post(dip, buf,
1664 					    derr->fme_ena,
1665 					    DDI_NOSLEEP, FM_VERSION,
1666 					    DATA_TYPE_UINT8, 0,
1667 					    PCIX_COMMAND, DATA_TYPE_UINT16,
1668 					    pcix_regs->pcix_command,
1669 					    PCIX_STATUS, DATA_TYPE_UINT32,
1670 					    pcix_regs->pcix_status,
1671 					    PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32,
1672 					    pcix_ecc_regs->pcix_ecc_ctlstat,
1673 					    PCIX_ECC_ATTR, DATA_TYPE_UINT32,
1674 					    pcix_ecc_regs->pcix_ecc_attr, NULL);
1675 			if (sec_ce || sec_ue) {
1676 				(void) snprintf(buf, FM_MAX_CLASS,
1677 				    "%s.%s%s", PCIX_ERROR_SUBCLASS,
1678 				    i ? PCIX_SEC_ERROR_SUBCLASS : "",
1679 				    sec_ce ? PCIX_ECC_S_CE : PCIX_ECC_S_UE);
1680 				if (bridge)
1681 					ddi_fm_ereport_post(dip, buf,
1682 					    derr->fme_ena,
1683 					    DDI_NOSLEEP, FM_VERSION,
1684 					    DATA_TYPE_UINT8, 0,
1685 					    PCIX_SEC_STATUS, DATA_TYPE_UINT16,
1686 					    pcix_bdg_regs->pcix_bdg_sec_stat,
1687 					    PCIX_BDG_STAT, DATA_TYPE_UINT32,
1688 					    pcix_bdg_regs->pcix_bdg_stat,
1689 					    PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32,
1690 					    pcix_ecc_regs->pcix_ecc_ctlstat,
1691 					    PCIX_ECC_ATTR, DATA_TYPE_UINT32,
1692 					    pcix_ecc_regs->pcix_ecc_attr, NULL);
1693 				else
1694 					ddi_fm_ereport_post(dip, buf,
1695 					    derr->fme_ena,
1696 					    DDI_NOSLEEP, FM_VERSION,
1697 					    DATA_TYPE_UINT8, 0,
1698 					    PCIX_COMMAND, DATA_TYPE_UINT16,
1699 					    pcix_regs->pcix_command,
1700 					    PCIX_STATUS, DATA_TYPE_UINT32,
1701 					    pcix_regs->pcix_status,
1702 					    PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32,
1703 					    pcix_ecc_regs->pcix_ecc_ctlstat,
1704 					    PCIX_ECC_ATTR, DATA_TYPE_UINT32,
1705 					    pcix_ecc_regs->pcix_ecc_attr, NULL);
1706 				PCI_FM_SEV_INC(sec_ue ? DDI_FM_FATAL :
1707 				    DDI_FM_NONFATAL);
1708 			}
1709 		}
1710 	}
1711 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1712 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1713 }
1714 
1715 static int
1716 pcix_bdg_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p,
1717     void *pe_regs)
1718 {
1719 	pcix_bdg_error_regs_t *pcix_bdg_regs = (pcix_bdg_error_regs_t *)pe_regs;
1720 	int fatal = 0;
1721 	int nonfatal = 0;
1722 	int unknown = 0;
1723 	int ok = 0;
1724 	char buf[FM_MAX_CLASS];
1725 	int i;
1726 
1727 	if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_STATUS_VALID) {
1728 		for (i = 0; pcix_err_tbl[i].err_class != NULL; i++) {
1729 			if ((pcix_bdg_regs->pcix_bdg_stat &
1730 			    pcix_err_tbl[i].reg_bit)) {
1731 				(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
1732 				    PCIX_ERROR_SUBCLASS,
1733 				    pcix_err_tbl[i].err_class);
1734 				ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1735 				    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1736 				    PCIX_SEC_STATUS, DATA_TYPE_UINT16,
1737 				    pcix_bdg_regs->pcix_bdg_sec_stat,
1738 				    PCIX_BDG_STAT, DATA_TYPE_UINT32,
1739 				    pcix_bdg_regs->pcix_bdg_stat, NULL);
1740 				PCI_FM_SEV_INC(pcix_err_tbl[i].flags);
1741 			}
1742 		}
1743 	}
1744 
1745 	if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_SEC_STATUS_VALID) {
1746 		for (i = 0; pcix_sec_err_tbl[i].err_class != NULL; i++) {
1747 			if ((pcix_bdg_regs->pcix_bdg_sec_stat &
1748 			    pcix_sec_err_tbl[i].reg_bit)) {
1749 				(void) snprintf(buf, FM_MAX_CLASS, "%s.%s%s",
1750 				    PCIX_ERROR_SUBCLASS,
1751 				    PCIX_SEC_ERROR_SUBCLASS,
1752 				    pcix_sec_err_tbl[i].err_class);
1753 				ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1754 				    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1755 				    PCIX_SEC_STATUS, DATA_TYPE_UINT16,
1756 				    pcix_bdg_regs->pcix_bdg_sec_stat,
1757 				    PCIX_BDG_STAT, DATA_TYPE_UINT32,
1758 				    pcix_bdg_regs->pcix_bdg_stat, NULL);
1759 				PCI_FM_SEV_INC(pcix_sec_err_tbl[i].flags);
1760 			}
1761 		}
1762 	}
1763 
1764 	/* Log/Handle ECC errors */
1765 	if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) {
1766 		int ret;
1767 
1768 		ret = pcix_ecc_error_report(dip, derr, erpt_p,
1769 		    (void *)pcix_bdg_regs);
1770 		PCI_FM_SEV_INC(ret);
1771 	}
1772 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1773 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1774 }
1775 
1776 static int
1777 pcix_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p)
1778 {
1779 	pcix_error_regs_t *pcix_regs = (pcix_error_regs_t *)erpt_p->pe_regs;
1780 	int fatal = 0;
1781 	int nonfatal = 0;
1782 	int unknown = 0;
1783 	int ok = 0;
1784 	char buf[FM_MAX_CLASS];
1785 	int i;
1786 
1787 	if (pcix_regs->pcix_vflags & PCIX_ERR_STATUS_VALID) {
1788 		for (i = 0; pcix_err_tbl[i].err_class != NULL; i++) {
1789 			if (!(pcix_regs->pcix_status & pcix_err_tbl[i].reg_bit))
1790 				continue;
1791 
1792 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
1793 			    PCIX_ERROR_SUBCLASS, pcix_err_tbl[i].err_class);
1794 			ddi_fm_ereport_post(dip, buf, derr->fme_ena,
1795 			    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
1796 			    PCIX_COMMAND, DATA_TYPE_UINT16,
1797 			    pcix_regs->pcix_command, PCIX_STATUS,
1798 			    DATA_TYPE_UINT32, pcix_regs->pcix_status,
1799 			    NULL);
1800 			PCI_FM_SEV_INC(pcix_err_tbl[i].flags);
1801 		}
1802 	}
1803 	/* Log/Handle ECC errors */
1804 	if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) {
1805 		int ret = pcix_ecc_error_report(dip, derr, erpt_p,
1806 		    (void *)pcix_regs);
1807 		PCI_FM_SEV_INC(ret);
1808 	}
1809 
1810 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1811 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1812 }
1813 
1814 static int
1815 pcie_rc_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p,
1816     void *pe_regs)
1817 {
1818 	pcie_adv_error_regs_t *pcie_adv_regs = (pcie_adv_error_regs_t *)pe_regs;
1819 	int fatal = 0;
1820 	int nonfatal = 0;
1821 	int unknown = 0;
1822 	char buf[FM_MAX_CLASS];
1823 
1824 	if (pcie_adv_regs->pcie_adv_vflags & PCIE_RC_ERR_STATUS_VALID) {
1825 		pcie_adv_rc_error_regs_t *pcie_rc_regs =
1826 		    pcie_adv_regs->pcie_adv_rc_regs;
1827 		int ce, ue, mult_ce, mult_ue, first_ue_fatal, nfe, fe;
1828 
1829 		ce = pcie_rc_regs->pcie_rc_err_status &
1830 		    PCIE_AER_RE_STS_CE_RCVD;
1831 		ue = pcie_rc_regs->pcie_rc_err_status &
1832 		    PCIE_AER_RE_STS_FE_NFE_RCVD;
1833 		mult_ce = pcie_rc_regs->pcie_rc_err_status &
1834 		    PCIE_AER_RE_STS_MUL_CE_RCVD;
1835 		mult_ue = pcie_rc_regs->pcie_rc_err_status &
1836 		    PCIE_AER_RE_STS_MUL_FE_NFE_RCVD;
1837 		first_ue_fatal = pcie_rc_regs->pcie_rc_err_status &
1838 		    PCIE_AER_RE_STS_FIRST_UC_FATAL;
1839 		nfe = pcie_rc_regs->pcie_rc_err_status &
1840 		    PCIE_AER_RE_STS_NFE_MSGS_RCVD;
1841 		fe = pcie_rc_regs->pcie_rc_err_status &
1842 		    PCIE_AER_RE_STS_FE_MSGS_RCVD;
1843 		/*
1844 		 * log fatal/nonfatal/corrected messages
1845 		 * recieved by root complex
1846 		 */
1847 		if (ue && fe)
1848 			fatal++;
1849 		else if (ce && !ue)
1850 			nonfatal++;
1851 
1852 		if (fe && first_ue_fatal) {
1853 			(void) snprintf(buf, FM_MAX_CLASS,
1854 			    "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_FE_MSG);
1855 			pcie_ereport_post(dip, derr, erpt_p, buf,
1856 			    PCIEX_TYPE_RC_UE_MSG);
1857 		}
1858 		if (nfe && !first_ue_fatal) {
1859 			(void) snprintf(buf, FM_MAX_CLASS,
1860 			    "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_NFE_MSG);
1861 			pcie_ereport_post(dip, derr, erpt_p, buf,
1862 			    PCIEX_TYPE_RC_UE_MSG);
1863 		}
1864 		if (ce) {
1865 			(void) snprintf(buf, FM_MAX_CLASS,
1866 			    "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_CE_MSG);
1867 			pcie_ereport_post(dip, derr, erpt_p, buf,
1868 			    PCIEX_TYPE_RC_CE_MSG);
1869 		}
1870 		if (mult_ce) {
1871 			(void) snprintf(buf, FM_MAX_CLASS,
1872 			    "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_MCE_MSG);
1873 			pcie_ereport_post(dip, derr, erpt_p, buf,
1874 			    PCIEX_TYPE_RC_MULT_MSG);
1875 		}
1876 		if (mult_ue) {
1877 			(void) snprintf(buf, FM_MAX_CLASS,
1878 			    "%s.%s", PCIEX_ERROR_SUBCLASS, PCIEX_RC_MUE_MSG);
1879 			pcie_ereport_post(dip, derr, erpt_p, buf,
1880 			    PCIEX_TYPE_RC_MULT_MSG);
1881 		}
1882 	}
1883 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1884 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1885 }
1886 
1887 static int
1888 pcie_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p)
1889 {
1890 	int fatal = 0;
1891 	int nonfatal = 0;
1892 	int unknown = 0;
1893 	int ok = 0;
1894 	char buf[FM_MAX_CLASS];
1895 	int i;
1896 	pcie_error_regs_t *pcie_regs = (pcie_error_regs_t *)erpt_p->pe_regs;
1897 	pcie_adv_error_regs_t *pcie_adv_regs;
1898 	pcie_adv_bdg_error_regs_t *pcie_bdg_regs;
1899 
1900 	if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV) &&
1901 	    (erpt_p->pe_dflags & PCIX_DEV)) {
1902 		int ret = pcix_bdg_error_report(dip, derr, erpt_p,
1903 		    (void *)pcie_regs->pcix_bdg_regs);
1904 		PCI_FM_SEV_INC(ret);
1905 	}
1906 
1907 	if (!(erpt_p->pe_dflags & PCIEX_ADV_DEV)) {
1908 		if (!(pcie_regs->pcie_vflags & PCIE_ERR_STATUS_VALID))
1909 			goto done;
1910 		for (i = 0; pciex_nadv_err_tbl[i].err_class != NULL; i++) {
1911 			if (!(pcie_regs->pcie_err_status &
1912 			    pciex_nadv_err_tbl[i].reg_bit))
1913 				continue;
1914 
1915 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
1916 			    PCIEX_ERROR_SUBCLASS,
1917 			    pciex_nadv_err_tbl[i].err_class);
1918 			pcie_ereport_post(dip, derr, erpt_p, buf,
1919 			    PCIEX_TYPE_GEN);
1920 			PCI_FM_SEV_INC(pciex_nadv_err_tbl[i].flags);
1921 		}
1922 		goto done;
1923 	}
1924 
1925 	pcie_adv_regs = pcie_regs->pcie_adv_regs;
1926 
1927 	/*
1928 	 * Log PCI Express uncorrectable errors
1929 	 */
1930 	if (pcie_adv_regs->pcie_adv_vflags & PCIE_UE_STATUS_VALID) {
1931 		for (i = 0; pciex_ue_err_tbl[i].err_class != NULL; i++) {
1932 			if (!(pcie_adv_regs->pcie_ue_status &
1933 			    pciex_ue_err_tbl[i].reg_bit))
1934 				continue;
1935 
1936 			(void) snprintf(buf, FM_MAX_CLASS,
1937 			    "%s.%s", PCIEX_ERROR_SUBCLASS,
1938 			    pciex_ue_err_tbl[i].err_class);
1939 
1940 			pcie_adv_regs->pcie_adv_bdf = 0;
1941 			if ((pcie_adv_regs->pcie_ue_status &
1942 			    PCIE_AER_UCE_LOG_BITS) !=
1943 			    pciex_ue_err_tbl[i].reg_bit) {
1944 				PCI_FM_SEV_INC(pciex_ue_err_tbl[i].flags);
1945 				pcie_ereport_post(dip, derr, erpt_p, buf,
1946 				    PCIEX_TYPE_UE);
1947 			} else {
1948 				pcie_check_addr(dip, derr, erpt_p);
1949 				/*
1950 				 * fatal/nonfatal errors are fatal/nonfatal
1951 				 * regardless of if we find a handle
1952 				 */
1953 				if (pciex_ue_err_tbl[i].flags == DDI_FM_FATAL)
1954 					derr->fme_status = DDI_FM_FATAL;
1955 				else if (pciex_ue_err_tbl[i].flags ==
1956 				    DDI_FM_NONFATAL)
1957 					derr->fme_status = DDI_FM_NONFATAL;
1958 				pcie_ereport_post(dip, derr, erpt_p, buf,
1959 				    PCIEX_TYPE_UE);
1960 				PCI_FM_SEV_INC(derr->fme_status);
1961 			}
1962 		}
1963 	}
1964 
1965 	/*
1966 	 * Log PCI Express correctable errors
1967 	 */
1968 	if (pcie_adv_regs->pcie_adv_vflags & PCIE_CE_STATUS_VALID) {
1969 		for (i = 0; pciex_ce_err_tbl[i].err_class != NULL; i++) {
1970 			if (!(pcie_adv_regs->pcie_ce_status &
1971 			    pciex_ce_err_tbl[i].reg_bit))
1972 				continue;
1973 
1974 			(void) snprintf(buf, FM_MAX_CLASS,
1975 			    "%s.%s", PCIEX_ERROR_SUBCLASS,
1976 			    pciex_ce_err_tbl[i].err_class);
1977 			pcie_ereport_post(dip, derr, erpt_p, buf,
1978 			    PCIEX_TYPE_CE);
1979 			if (!fatal && !unknown)
1980 				PCI_FM_SEV_INC(pciex_ce_err_tbl[i].flags);
1981 		}
1982 	}
1983 
1984 	if (!(erpt_p->pe_dflags & PCI_BRIDGE_DEV))
1985 		goto done;
1986 
1987 	if (erpt_p->pe_dflags & PCIEX_RC_DEV) {
1988 		int ret = pcie_rc_error_report(dip, derr, erpt_p,
1989 		    (void *)pcie_adv_regs);
1990 		PCI_FM_SEV_INC(ret);
1991 	}
1992 
1993 	if (!((erpt_p->pe_dflags & PCIEX_2PCI_DEV) &&
1994 	    (pcie_adv_regs->pcie_adv_vflags & PCIE_SUE_STATUS_VALID)))
1995 		goto done;
1996 
1997 	pcie_bdg_regs = pcie_adv_regs->pcie_adv_bdg_regs;
1998 
1999 	for (i = 0; pcie_sue_err_tbl[i].err_class != NULL; i++) {
2000 		if ((pcie_bdg_regs->pcie_sue_status &
2001 		    pcie_sue_err_tbl[i].reg_bit)) {
2002 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
2003 			    PCIEX_ERROR_SUBCLASS,
2004 			    pcie_sue_err_tbl[i].err_class);
2005 
2006 			if ((pcie_bdg_regs->pcie_sue_status &
2007 			    PCIE_AER_SUCE_LOG_BITS) !=
2008 			    pcie_sue_err_tbl[i].reg_bit) {
2009 				PCI_FM_SEV_INC(pcie_sue_err_tbl[i].flags);
2010 				ddi_fm_ereport_post(dip, buf, derr->fme_ena,
2011 				    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
2012 				    PCIEX_SEC_UE_STATUS, DATA_TYPE_UINT32,
2013 				    pcie_bdg_regs->pcie_sue_status,
2014 #ifdef DEBUG
2015 				    PCIEX_SUE_HDR0, DATA_TYPE_UINT32,
2016 				    pcie_bdg_regs->pcie_sue_hdr0,
2017 				    PCIEX_SUE_HDR1, DATA_TYPE_UINT32,
2018 				    pcie_bdg_regs->pcie_sue_hdr[0],
2019 				    PCIEX_SUE_HDR2, DATA_TYPE_UINT32,
2020 				    pcie_bdg_regs->pcie_sue_hdr[1],
2021 				    PCIEX_SUE_HDR3, DATA_TYPE_UINT32,
2022 				    pcie_bdg_regs->pcie_sue_hdr[2],
2023 #endif
2024 				    NULL);
2025 			} else {
2026 				pcie_adv_regs->pcie_adv_bdf = 0;
2027 				pcie_pci_check_addr(dip, derr, erpt_p);
2028 				/*
2029 				 * fatal/nonfatal errors are fatal/nonfatal
2030 				 * regardless of if we find a handle
2031 				 */
2032 				if (pcie_sue_err_tbl[i].flags == DDI_FM_FATAL)
2033 					derr->fme_status = DDI_FM_FATAL;
2034 				else if (pcie_sue_err_tbl[i].flags ==
2035 				    DDI_FM_NONFATAL)
2036 					derr->fme_status = DDI_FM_NONFATAL;
2037 
2038 				ddi_fm_ereport_post(dip, buf, derr->fme_ena,
2039 				    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
2040 				    PCIEX_SEC_UE_STATUS, DATA_TYPE_UINT32,
2041 				    pcie_bdg_regs->pcie_sue_status,
2042 				    PCIEX_SRC_ID, DATA_TYPE_UINT16,
2043 				    pcie_adv_regs->pcie_adv_bdf,
2044 				    PCIEX_SRC_VALID, DATA_TYPE_BOOLEAN_VALUE,
2045 				    (pcie_adv_regs->pcie_adv_bdf != NULL) ?
2046 				    1 : NULL,
2047 #ifdef DEBUG
2048 				    PCIEX_SUE_HDR0, DATA_TYPE_UINT32,
2049 				    pcie_bdg_regs->pcie_sue_hdr0,
2050 				    PCIEX_SUE_HDR1, DATA_TYPE_UINT32,
2051 				    pcie_bdg_regs->pcie_sue_hdr[0],
2052 				    PCIEX_SUE_HDR2, DATA_TYPE_UINT32,
2053 				    pcie_bdg_regs->pcie_sue_hdr[1],
2054 				    PCIEX_SUE_HDR3, DATA_TYPE_UINT32,
2055 				    pcie_bdg_regs->pcie_sue_hdr[2],
2056 #endif
2057 				    NULL);
2058 				PCI_FM_SEV_INC(derr->fme_status);
2059 			}
2060 		}
2061 	}
2062 done:
2063 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
2064 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
2065 }
2066 
2067 static void
2068 pci_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p)
2069 {
2070 	int fatal = 0;
2071 	int nonfatal = 0;
2072 	int unknown = 0;
2073 	int ok = 0;
2074 	char buf[FM_MAX_CLASS];
2075 	int i;
2076 
2077 	if (derr->fme_flag == DDI_FM_ERR_UNEXPECTED) {
2078 		/*
2079 		 * Log generic PCI errors.
2080 		 */
2081 		for (i = 0; pci_err_tbl[i].err_class != NULL; i++) {
2082 			if (!(erpt_p->pe_pci_regs->pci_err_status &
2083 			    pci_err_tbl[i].reg_bit) ||
2084 			    !(erpt_p->pe_pci_regs->pci_vflags &
2085 			    PCI_ERR_STATUS_VALID))
2086 				continue;
2087 			/*
2088 			 * Generate an ereport for this error bit.
2089 			 */
2090 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
2091 			    PCI_ERROR_SUBCLASS, pci_err_tbl[i].err_class);
2092 			ddi_fm_ereport_post(dip, buf, derr->fme_ena,
2093 			    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
2094 			    PCI_CONFIG_STATUS, DATA_TYPE_UINT16,
2095 			    erpt_p->pe_pci_regs->pci_err_status,
2096 			    PCI_CONFIG_COMMAND, DATA_TYPE_UINT16,
2097 			    erpt_p->pe_pci_regs->pci_cfg_comm, NULL);
2098 
2099 			if (!(erpt_p->pe_dflags & PCIEX_DEV))
2100 				PCI_FM_SEV_INC(pci_err_tbl[i].flags);
2101 		}
2102 		if (erpt_p->pe_dflags & PCIEX_DEV) {
2103 			int ret = pcie_error_report(dip, derr, erpt_p);
2104 			PCI_FM_SEV_INC(ret);
2105 		} else if (erpt_p->pe_dflags & PCIX_DEV) {
2106 			if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
2107 				int ret = pcix_bdg_error_report(dip, derr,
2108 				    erpt_p, erpt_p->pe_regs);
2109 				PCI_FM_SEV_INC(ret);
2110 			} else {
2111 				int ret = pcix_error_report(dip, derr, erpt_p);
2112 				PCI_FM_SEV_INC(ret);
2113 			}
2114 		}
2115 	}
2116 
2117 	if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV)) {
2118 		int ret = pci_bdg_error_report(dip, derr, erpt_p);
2119 		PCI_FM_SEV_INC(ret);
2120 	}
2121 
2122 	derr->fme_status = (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
2123 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
2124 }
2125 
2126 void
2127 pci_ereport_post(dev_info_t *dip, ddi_fm_error_t *derr, uint16_t *xx_status)
2128 {
2129 	struct i_ddi_fmhdl *fmhdl;
2130 	pci_erpt_t *erpt_p;
2131 
2132 	fmhdl = DEVI(dip)->devi_fmhdl;
2133 	if (!DDI_FM_EREPORT_CAP(ddi_fm_capable(dip)) &&
2134 	    !DDI_FM_ERRCB_CAP(ddi_fm_capable(dip))) {
2135 		i_ddi_drv_ereport_post(dip, DVR_EFMCAP, NULL, DDI_NOSLEEP);
2136 		return;
2137 	}
2138 
2139 	ASSERT(fmhdl);
2140 
2141 	if (derr->fme_ena == NULL)
2142 		derr->fme_ena = fm_ena_generate(0, FM_ENA_FMT1);
2143 
2144 	erpt_p = (pci_erpt_t *)fmhdl->fh_bus_specific;
2145 	if (erpt_p == NULL) {
2146 		i_ddi_drv_ereport_post(dip, DVR_EFMCAP, NULL, DDI_NOSLEEP);
2147 		return;
2148 	}
2149 
2150 	pci_regs_gather(dip, erpt_p);
2151 	pci_error_report(dip, derr, erpt_p);
2152 	pci_regs_clear(erpt_p);
2153 
2154 	if (xx_status != NULL)
2155 		*xx_status = erpt_p->pe_pci_regs->pci_err_status;
2156 }
2157 
2158 /*
2159  * private version of walk_devs() that can be used during panic. No
2160  * sleeping or locking required.
2161  */
2162 static int
2163 pci_fm_walk_devs(dev_info_t *dip, int (*f)(dev_info_t *, void *), void *arg)
2164 {
2165 	while (dip) {
2166 		switch ((*f)(dip, arg)) {
2167 		case DDI_WALK_TERMINATE:
2168 			return (DDI_WALK_TERMINATE);
2169 		case DDI_WALK_CONTINUE:
2170 			if (pci_fm_walk_devs(ddi_get_child(dip), f,
2171 			    arg) == DDI_WALK_TERMINATE)
2172 				return (DDI_WALK_TERMINATE);
2173 			break;
2174 		case DDI_WALK_PRUNECHILD:
2175 			break;
2176 		}
2177 		dip = ddi_get_next_sibling(dip);
2178 	}
2179 	return (DDI_WALK_CONTINUE);
2180 }
2181 
2182 /*
2183  * need special version of ddi_fm_ereport_post() as the leaf driver may
2184  * not be hardened.
2185  */
2186 static void
2187 pci_fm_ereport_post(dev_info_t *dip, const char *error_class, uint64_t ena,
2188     uint8_t version, ...)
2189 {
2190 	char *name;
2191 	char device_path[MAXPATHLEN];
2192 	char ddi_error_class[FM_MAX_CLASS];
2193 	nvlist_t *ereport, *detector;
2194 	nv_alloc_t *nva;
2195 	errorq_elem_t *eqep;
2196 	va_list ap;
2197 
2198 	if (panicstr) {
2199 		eqep = errorq_reserve(ereport_errorq);
2200 		if (eqep == NULL)
2201 			return;
2202 		ereport = errorq_elem_nvl(ereport_errorq, eqep);
2203 		nva = errorq_elem_nva(ereport_errorq, eqep);
2204 		detector = fm_nvlist_create(nva);
2205 	} else {
2206 		ereport = fm_nvlist_create(NULL);
2207 		detector = fm_nvlist_create(NULL);
2208 	}
2209 
2210 	(void) ddi_pathname(dip, device_path);
2211 	fm_fmri_dev_set(detector, FM_DEV_SCHEME_VERSION, NULL,
2212 	    device_path, NULL);
2213 	(void) snprintf(ddi_error_class, FM_MAX_CLASS, "%s.%s",
2214 	    DDI_IO_CLASS, error_class);
2215 	fm_ereport_set(ereport, version, ddi_error_class, ena, detector, NULL);
2216 
2217 	va_start(ap, version);
2218 	name = va_arg(ap, char *);
2219 	(void) i_fm_payload_set(ereport, name, ap);
2220 	va_end(ap);
2221 
2222 	if (panicstr) {
2223 		errorq_commit(ereport_errorq, eqep, ERRORQ_SYNC);
2224 	} else {
2225 		(void) fm_ereport_post(ereport, EVCH_TRYHARD);
2226 		fm_nvlist_destroy(ereport, FM_NVA_FREE);
2227 		fm_nvlist_destroy(detector, FM_NVA_FREE);
2228 	}
2229 }
2230 
2231 static int
2232 pci_check_regs(dev_info_t *dip, void *arg)
2233 {
2234 	int reglen;
2235 	int rn;
2236 	int totreg;
2237 	pci_regspec_t *drv_regp;
2238 	pci_target_err_t *tgt_err = (pci_target_err_t *)arg;
2239 
2240 	if (tgt_err->tgt_pci_space == PCI_REG_ADDR_G(PCI_ADDR_CONFIG)) {
2241 		/*
2242 		 * for config space, we need to check if the given address
2243 		 * is a valid config space address for this device - based
2244 		 * on pci_phys_hi of the config space entry in reg property.
2245 		 */
2246 		if (ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
2247 		    "reg", (caddr_t)&drv_regp, &reglen) != DDI_SUCCESS)
2248 			return (DDI_WALK_CONTINUE);
2249 
2250 		totreg = reglen / sizeof (pci_regspec_t);
2251 		for (rn = 0; rn < totreg; rn++) {
2252 			if (tgt_err->tgt_pci_space ==
2253 			    PCI_REG_ADDR_G(drv_regp[rn].pci_phys_hi) &&
2254 			    (tgt_err->tgt_pci_addr & (PCI_REG_BUS_M |
2255 			    PCI_REG_DEV_M | PCI_REG_FUNC_M)) ==
2256 			    (drv_regp[rn].pci_phys_hi & (PCI_REG_BUS_M |
2257 			    PCI_REG_DEV_M | PCI_REG_FUNC_M))) {
2258 				tgt_err->tgt_dip = dip;
2259 				kmem_free(drv_regp, reglen);
2260 				return (DDI_WALK_TERMINATE);
2261 			}
2262 		}
2263 		kmem_free(drv_regp, reglen);
2264 	} else {
2265 		/*
2266 		 * for non config space, need to check reg to look
2267 		 * for any non-relocable mapping, otherwise check
2268 		 * assigned-addresses.
2269 		 */
2270 		if (ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
2271 		    "reg", (caddr_t)&drv_regp, &reglen) != DDI_SUCCESS)
2272 			return (DDI_WALK_CONTINUE);
2273 
2274 		totreg = reglen / sizeof (pci_regspec_t);
2275 		for (rn = 0; rn < totreg; rn++) {
2276 			if ((drv_regp[rn].pci_phys_hi & PCI_RELOCAT_B) &&
2277 			    (tgt_err->tgt_pci_space == TGT_PCI_SPACE_UNKNOWN ||
2278 			    tgt_err->tgt_pci_space ==
2279 			    PCI_REG_ADDR_G(drv_regp[rn].pci_phys_hi)) &&
2280 			    (tgt_err->tgt_pci_addr >=
2281 			    (uint64_t)drv_regp[rn].pci_phys_low +
2282 			    ((uint64_t)drv_regp[rn].pci_phys_mid << 32)) &&
2283 			    (tgt_err->tgt_pci_addr <
2284 			    (uint64_t)drv_regp[rn].pci_phys_low +
2285 			    ((uint64_t)drv_regp[rn].pci_phys_mid << 32) +
2286 			    (uint64_t)drv_regp[rn].pci_size_low +
2287 			    ((uint64_t)drv_regp[rn].pci_size_hi << 32))) {
2288 				tgt_err->tgt_dip = dip;
2289 				kmem_free(drv_regp, reglen);
2290 				return (DDI_WALK_TERMINATE);
2291 			}
2292 		}
2293 		kmem_free(drv_regp, reglen);
2294 
2295 		if (ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
2296 		    "assigned-addresses", (caddr_t)&drv_regp, &reglen) !=
2297 		    DDI_SUCCESS)
2298 			return (DDI_WALK_CONTINUE);
2299 
2300 		totreg = reglen / sizeof (pci_regspec_t);
2301 		for (rn = 0; rn < totreg; rn++) {
2302 			if ((tgt_err->tgt_pci_space == TGT_PCI_SPACE_UNKNOWN ||
2303 			    tgt_err->tgt_pci_space ==
2304 			    PCI_REG_ADDR_G(drv_regp[rn].pci_phys_hi)) &&
2305 			    (tgt_err->tgt_pci_addr >=
2306 			    (uint64_t)drv_regp[rn].pci_phys_low +
2307 			    ((uint64_t)drv_regp[rn].pci_phys_mid << 32)) &&
2308 			    (tgt_err->tgt_pci_addr <
2309 			    (uint64_t)drv_regp[rn].pci_phys_low +
2310 			    ((uint64_t)drv_regp[rn].pci_phys_mid << 32) +
2311 			    (uint64_t)drv_regp[rn].pci_size_low +
2312 			    ((uint64_t)drv_regp[rn].pci_size_hi << 32))) {
2313 				tgt_err->tgt_dip = dip;
2314 				kmem_free(drv_regp, reglen);
2315 				return (DDI_WALK_TERMINATE);
2316 			}
2317 		}
2318 		kmem_free(drv_regp, reglen);
2319 	}
2320 	return (DDI_WALK_CONTINUE);
2321 }
2322 
2323 /*
2324  * impl_fix_ranges - fixes the config space entry of the "ranges"
2325  * property on psycho+ platforms.  (if changing this function please make sure
2326  * to change the pci_fix_ranges function in pcipsy.c)
2327  */
2328 /*ARGSUSED*/
2329 static void
2330 pci_fix_ranges(dev_info_t *dip, pci_ranges_t *pci_ranges, int nrange)
2331 {
2332 #if defined(__sparc)
2333 	char *name = ddi_binding_name(dip);
2334 
2335 	if ((strcmp(name, "pci108e,8000") == 0) ||
2336 	    (strcmp(name, "pci108e,a000") == 0) ||
2337 	    (strcmp(name, "pci108e,a001") == 0)) {
2338 		int i;
2339 		for (i = 0; i < nrange; i++, pci_ranges++)
2340 			if ((pci_ranges->child_high & PCI_REG_ADDR_M) ==
2341 			    PCI_ADDR_CONFIG)
2342 				pci_ranges->parent_low |=
2343 				    pci_ranges->child_high;
2344 	}
2345 #endif
2346 }
2347 
2348 static int
2349 pci_check_ranges(dev_info_t *dip, void *arg)
2350 {
2351 	uint64_t range_parent_begin;
2352 	uint64_t range_parent_size;
2353 	uint64_t range_parent_end;
2354 	uint32_t space_type;
2355 	uint32_t bus_num;
2356 	uint32_t range_offset;
2357 	pci_ranges_t *pci_ranges, *rangep;
2358 	pci_bus_range_t *pci_bus_rangep;
2359 	int pci_ranges_length;
2360 	int nrange;
2361 	pci_target_err_t *tgt_err = (pci_target_err_t *)arg;
2362 	int i, size;
2363 	if (strcmp(ddi_node_name(dip), "pci") != 0 &&
2364 	    strcmp(ddi_node_name(dip), "pciex") != 0)
2365 		return (DDI_WALK_CONTINUE);
2366 
2367 	/*
2368 	 * Get the ranges property. Note we only look at the top level pci
2369 	 * node (hostbridge) which has a ranges property of type pci_ranges_t
2370 	 * not at pci-pci bridges.
2371 	 */
2372 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "ranges",
2373 	    (caddr_t)&pci_ranges, &pci_ranges_length) != DDI_SUCCESS) {
2374 		/*
2375 		 * no ranges property - no translation needed
2376 		 */
2377 		tgt_err->tgt_pci_addr = tgt_err->tgt_err_addr;
2378 		tgt_err->tgt_pci_space = TGT_PCI_SPACE_UNKNOWN;
2379 		if (panicstr)
2380 			(void) pci_fm_walk_devs(ddi_get_child(dip),
2381 			    pci_check_regs, (void *)tgt_err);
2382 		else {
2383 			int circ = 0;
2384 			ndi_devi_enter(dip, &circ);
2385 			ddi_walk_devs(ddi_get_child(dip), pci_check_regs,
2386 			    (void *)tgt_err);
2387 			ndi_devi_exit(dip, circ);
2388 		}
2389 		if (tgt_err->tgt_dip != NULL)
2390 			return (DDI_WALK_TERMINATE);
2391 		return (DDI_WALK_PRUNECHILD);
2392 	}
2393 	nrange = pci_ranges_length / sizeof (pci_ranges_t);
2394 	rangep = pci_ranges;
2395 
2396 	/* Need to fix the pci ranges property for psycho based systems */
2397 	pci_fix_ranges(dip, pci_ranges, nrange);
2398 
2399 	for (i = 0; i < nrange; i++, rangep++) {
2400 		range_parent_begin = ((uint64_t)rangep->parent_high << 32) +
2401 		    rangep->parent_low;
2402 		range_parent_size = ((uint64_t)rangep->size_high << 32) +
2403 		    rangep->size_low;
2404 		range_parent_end = range_parent_begin + range_parent_size - 1;
2405 
2406 		if ((tgt_err->tgt_err_addr < range_parent_begin) ||
2407 		    (tgt_err->tgt_err_addr > range_parent_end)) {
2408 			/* Not in range */
2409 			continue;
2410 		}
2411 		space_type = PCI_REG_ADDR_G(rangep->child_high);
2412 		if (space_type == PCI_REG_ADDR_G(PCI_ADDR_CONFIG)) {
2413 			/* Config space address - check bus range */
2414 			range_offset = tgt_err->tgt_err_addr -
2415 			    range_parent_begin;
2416 			bus_num = PCI_REG_BUS_G(range_offset);
2417 			if (ddi_getlongprop(DDI_DEV_T_ANY, dip,
2418 			    DDI_PROP_DONTPASS, "bus-range",
2419 			    (caddr_t)&pci_bus_rangep, &size) != DDI_SUCCESS) {
2420 				continue;
2421 			}
2422 			if ((bus_num < pci_bus_rangep->lo) ||
2423 			    (bus_num > pci_bus_rangep->hi)) {
2424 				/*
2425 				 * Bus number not appropriate for this
2426 				 * pci nexus.
2427 				 */
2428 				kmem_free(pci_bus_rangep, size);
2429 				continue;
2430 			}
2431 			kmem_free(pci_bus_rangep, size);
2432 		}
2433 
2434 		/* We have a match if we get here - compute pci address */
2435 		tgt_err->tgt_pci_addr = tgt_err->tgt_err_addr -
2436 		    range_parent_begin;
2437 		tgt_err->tgt_pci_addr += (((uint64_t)rangep->child_mid << 32) +
2438 		    rangep->child_low);
2439 		tgt_err->tgt_pci_space = space_type;
2440 		if (panicstr)
2441 			(void) pci_fm_walk_devs(ddi_get_child(dip),
2442 			    pci_check_regs, (void *)tgt_err);
2443 		else {
2444 			int circ = 0;
2445 			ndi_devi_enter(dip, &circ);
2446 			ddi_walk_devs(ddi_get_child(dip), pci_check_regs,
2447 			    (void *)tgt_err);
2448 			ndi_devi_exit(dip, circ);
2449 		}
2450 		if (tgt_err->tgt_dip != NULL) {
2451 			kmem_free(pci_ranges, pci_ranges_length);
2452 			return (DDI_WALK_TERMINATE);
2453 		}
2454 	}
2455 	kmem_free(pci_ranges, pci_ranges_length);
2456 	return (DDI_WALK_PRUNECHILD);
2457 }
2458 
2459 /*
2460  * Function used to drain pci_target_queue, either during panic or after softint
2461  * is generated, to generate target device ereports based on captured physical
2462  * addresses
2463  */
2464 /*ARGSUSED*/
2465 static void
2466 pci_target_drain(void *private_p, pci_target_err_t *tgt_err)
2467 {
2468 	char buf[FM_MAX_CLASS];
2469 
2470 	/*
2471 	 * The following assumes that all pci_pci bridge devices
2472 	 * are configured as transparant. Find the top-level pci
2473 	 * nexus which has tgt_err_addr in one of its ranges, converting this
2474 	 * to a pci address in the process. Then starting at this node do
2475 	 * another tree walk to find a device with the pci address we've
2476 	 * found within range of one of it's assigned-addresses properties.
2477 	 */
2478 	tgt_err->tgt_dip = NULL;
2479 	if (panicstr)
2480 		(void) pci_fm_walk_devs(ddi_root_node(), pci_check_ranges,
2481 		    (void *)tgt_err);
2482 	else
2483 		ddi_walk_devs(ddi_root_node(), pci_check_ranges,
2484 		    (void *)tgt_err);
2485 	if (tgt_err->tgt_dip == NULL)
2486 		return;
2487 
2488 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", tgt_err->tgt_bridge_type,
2489 	    tgt_err->tgt_err_class);
2490 	pci_fm_ereport_post(tgt_err->tgt_dip, buf, tgt_err->tgt_err_ena, 0,
2491 	    PCI_PA, DATA_TYPE_UINT64, tgt_err->tgt_err_addr, NULL);
2492 }
2493 
2494 void
2495 pci_target_enqueue(uint64_t ena, char *class, char *bridge_type, uint64_t addr)
2496 {
2497 	pci_target_err_t tgt_err;
2498 
2499 	tgt_err.tgt_err_ena = ena;
2500 	tgt_err.tgt_err_class = class;
2501 	tgt_err.tgt_bridge_type = bridge_type;
2502 	tgt_err.tgt_err_addr = addr;
2503 	errorq_dispatch(pci_target_queue, (void *)&tgt_err,
2504 	    sizeof (pci_target_err_t), ERRORQ_ASYNC);
2505 }
2506 
2507 void
2508 pci_targetq_init(void)
2509 {
2510 	/*
2511 	 * PCI target errorq, to schedule async handling of generation of
2512 	 * target device ereports based on captured physical address.
2513 	 * The errorq is created here but destroyed when _fini is called
2514 	 * for the pci module.
2515 	 */
2516 	if (pci_target_queue == NULL) {
2517 		pci_target_queue = errorq_create("pci_target_queue",
2518 		    (errorq_func_t)pci_target_drain, (void *)NULL,
2519 		    TARGET_MAX_ERRS, sizeof (pci_target_err_t), FM_ERR_PIL,
2520 		    ERRORQ_VITAL);
2521 		if (pci_target_queue == NULL)
2522 			panic("failed to create required system error queue");
2523 	}
2524 }
2525