xref: /illumos-gate/usr/src/uts/common/os/pcifm.c (revision 2a93c3751513f2cec775a429a244bec6f0f0a635)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 
26 #include <sys/types.h>
27 #include <sys/sunndi.h>
28 #include <sys/sysmacros.h>
29 #include <sys/ddifm_impl.h>
30 #include <sys/fm/util.h>
31 #include <sys/fm/protocol.h>
32 #include <sys/fm/io/pci.h>
33 #include <sys/fm/io/ddi.h>
34 #include <sys/pci.h>
35 #include <sys/pci_cap.h>
36 #include <sys/pci_impl.h>
37 #include <sys/epm.h>
38 #include <sys/pcifm.h>
39 
40 #define	PCIX_ECC_VER_CHECK(x)	(((x) == PCI_PCIX_VER_1) ||\
41 				((x) == PCI_PCIX_VER_2))
42 
43 errorq_t *pci_target_queue = NULL;
44 
45 pci_fm_err_t pci_err_tbl[] = {
46 	PCI_DET_PERR,	PCI_STAT_PERROR,	NULL,		DDI_FM_UNKNOWN,
47 	PCI_MDPE,	PCI_STAT_S_PERROR,	PCI_TARG_MDPE,	DDI_FM_UNKNOWN,
48 	PCI_SIG_SERR,	PCI_STAT_S_SYSERR,	NULL,		DDI_FM_FATAL,
49 	PCI_MA,		PCI_STAT_R_MAST_AB,	PCI_TARG_MA,	DDI_FM_UNKNOWN,
50 	PCI_REC_TA,	PCI_STAT_R_TARG_AB,	PCI_TARG_REC_TA, DDI_FM_UNKNOWN,
51 	PCI_SIG_TA,	PCI_STAT_S_TARG_AB,	NULL,		DDI_FM_UNKNOWN,
52 	NULL, NULL, NULL, NULL,
53 };
54 
55 pci_fm_err_t pci_bdg_err_tbl[] = {
56 	PCI_DET_PERR,	PCI_STAT_PERROR,	NULL,		DDI_FM_UNKNOWN,
57 	PCI_MDPE,	PCI_STAT_S_PERROR,	PCI_TARG_MDPE,	DDI_FM_UNKNOWN,
58 	PCI_REC_SERR,	PCI_STAT_S_SYSERR,	NULL,		DDI_FM_UNKNOWN,
59 #if defined(__sparc)
60 	PCI_MA,		PCI_STAT_R_MAST_AB,	PCI_TARG_MA,	DDI_FM_UNKNOWN,
61 #endif
62 	PCI_REC_TA,	PCI_STAT_R_TARG_AB,	PCI_TARG_REC_TA, DDI_FM_UNKNOWN,
63 	PCI_SIG_TA,	PCI_STAT_S_TARG_AB,	NULL,		DDI_FM_UNKNOWN,
64 	NULL, NULL, NULL, NULL,
65 };
66 
67 static pci_fm_err_t pcix_err_tbl[] = {
68 	PCIX_SPL_DIS,		PCI_PCIX_SPL_DSCD,	NULL,	DDI_FM_UNKNOWN,
69 	PCIX_UNEX_SPL,		PCI_PCIX_UNEX_SPL,	NULL,	DDI_FM_UNKNOWN,
70 	PCIX_RX_SPL_MSG,	PCI_PCIX_RX_SPL_MSG,	NULL,   DDI_FM_UNKNOWN,
71 	NULL, NULL, NULL, NULL,
72 };
73 
74 static pci_fm_err_t pcix_sec_err_tbl[] = {
75 	PCIX_SPL_DIS,		PCI_PCIX_BSS_SPL_DSCD,	NULL,	DDI_FM_UNKNOWN,
76 	PCIX_UNEX_SPL,		PCI_PCIX_BSS_UNEX_SPL,	NULL,	DDI_FM_UNKNOWN,
77 	PCIX_BSS_SPL_OR,	PCI_PCIX_BSS_SPL_OR,	NULL,	DDI_FM_OK,
78 	PCIX_BSS_SPL_DLY,	PCI_PCIX_BSS_SPL_DLY,	NULL,	DDI_FM_OK,
79 	NULL, NULL, NULL, NULL,
80 };
81 
82 static int
83 pci_config_check(ddi_acc_handle_t handle, int fme_flag)
84 {
85 	ddi_acc_hdl_t *hp = impl_acc_hdl_get(handle);
86 	ddi_fm_error_t de;
87 
88 	if (!(DDI_FM_ACC_ERR_CAP(ddi_fm_capable(hp->ah_dip))))
89 		return (DDI_FM_OK);
90 
91 	de.fme_version = DDI_FME_VERSION;
92 
93 	ddi_fm_acc_err_get(handle, &de, de.fme_version);
94 	if (de.fme_status != DDI_FM_OK) {
95 		if (fme_flag == DDI_FM_ERR_UNEXPECTED) {
96 			char buf[FM_MAX_CLASS];
97 
98 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
99 			    PCI_ERROR_SUBCLASS, PCI_NR);
100 			ddi_fm_ereport_post(hp->ah_dip, buf, de.fme_ena,
101 			    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0, NULL);
102 		}
103 		ddi_fm_acc_err_clear(handle, de.fme_version);
104 	}
105 	return (de.fme_status);
106 }
107 
108 static void
109 pcix_ecc_regs_gather(pci_erpt_t *erpt_p, pcix_ecc_regs_t *pcix_ecc_regs,
110     uint8_t pcix_cap_ptr, int fme_flag)
111 {
112 	int bdg = erpt_p->pe_dflags & PCI_BRIDGE_DEV;
113 
114 	pcix_ecc_regs->pcix_ecc_ctlstat = pci_config_get32(erpt_p->pe_hdl,
115 	    (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_STATUS :
116 	    PCI_PCIX_ECC_STATUS)));
117 	if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
118 		pcix_ecc_regs->pcix_ecc_vflags |= PCIX_ERR_ECC_STS_VALID;
119 	else
120 		return;
121 	pcix_ecc_regs->pcix_ecc_fstaddr = pci_config_get32(erpt_p->pe_hdl,
122 	    (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_FST_AD :
123 	    PCI_PCIX_ECC_FST_AD)));
124 	pcix_ecc_regs->pcix_ecc_secaddr = pci_config_get32(erpt_p->pe_hdl,
125 	    (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_SEC_AD :
126 	    PCI_PCIX_ECC_SEC_AD)));
127 	pcix_ecc_regs->pcix_ecc_attr = pci_config_get32((
128 	    ddi_acc_handle_t)erpt_p->pe_hdl,
129 	    (pcix_cap_ptr + (bdg ? PCI_PCIX_BDG_ECC_ATTR : PCI_PCIX_ECC_ATTR)));
130 }
131 
132 static void
133 pcix_regs_gather(pci_erpt_t *erpt_p, void *pe_regs, int fme_flag)
134 {
135 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
136 		pcix_bdg_error_regs_t *pcix_bdg_regs =
137 		    (pcix_bdg_error_regs_t *)pe_regs;
138 		uint8_t pcix_bdg_cap_ptr;
139 		int i;
140 
141 		pcix_bdg_cap_ptr = pcix_bdg_regs->pcix_bdg_cap_ptr;
142 		pcix_bdg_regs->pcix_bdg_sec_stat = pci_config_get16(
143 		    erpt_p->pe_hdl, (pcix_bdg_cap_ptr + PCI_PCIX_SEC_STATUS));
144 		if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
145 			pcix_bdg_regs->pcix_bdg_vflags |=
146 			    PCIX_BDG_SEC_STATUS_VALID;
147 		else
148 			return;
149 		pcix_bdg_regs->pcix_bdg_stat = pci_config_get32(erpt_p->pe_hdl,
150 		    (pcix_bdg_cap_ptr + PCI_PCIX_BDG_STATUS));
151 		if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
152 			pcix_bdg_regs->pcix_bdg_vflags |= PCIX_BDG_STATUS_VALID;
153 		else
154 			return;
155 		if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) {
156 			pcix_ecc_regs_t *pcix_bdg_ecc_regs;
157 
158 			for (i = 0; i < 2; i++) {
159 				pcix_bdg_ecc_regs =
160 				    pcix_bdg_regs->pcix_bdg_ecc_regs[i];
161 				pci_config_put32(erpt_p->pe_hdl,
162 				    (pcix_bdg_cap_ptr +
163 				    PCI_PCIX_BDG_ECC_STATUS), i);
164 				pcix_ecc_regs_gather(erpt_p,
165 				    pcix_bdg_ecc_regs,
166 				    pcix_bdg_cap_ptr, fme_flag);
167 			}
168 		}
169 	} else {
170 		pcix_error_regs_t *pcix_regs = (pcix_error_regs_t *)pe_regs;
171 		uint8_t pcix_cap_ptr;
172 
173 		pcix_cap_ptr = pcix_regs->pcix_cap_ptr;
174 
175 		pcix_regs->pcix_command = pci_config_get16(erpt_p->pe_hdl,
176 		    (pcix_cap_ptr + PCI_PCIX_COMMAND));
177 		pcix_regs->pcix_status = pci_config_get32(erpt_p->pe_hdl,
178 		    (pcix_cap_ptr + PCI_PCIX_STATUS));
179 		if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
180 			pcix_regs->pcix_vflags |= PCIX_ERR_STATUS_VALID;
181 		else
182 			return;
183 		if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) {
184 			pcix_ecc_regs_t *pcix_ecc_regs =
185 			    pcix_regs->pcix_ecc_regs;
186 
187 			pcix_ecc_regs_gather(erpt_p, pcix_ecc_regs,
188 			    pcix_cap_ptr, fme_flag);
189 		}
190 	}
191 }
192 
193 /*ARGSUSED*/
194 static void
195 pci_regs_gather(dev_info_t *dip, pci_erpt_t *erpt_p, int fme_flag)
196 {
197 	pci_error_regs_t *pci_regs = erpt_p->pe_pci_regs;
198 
199 	/*
200 	 * Start by reading all the error registers that are available for
201 	 * pci and pci express and for leaf devices and bridges/switches
202 	 */
203 	pci_regs->pci_err_status = pci_config_get16(erpt_p->pe_hdl,
204 	    PCI_CONF_STAT);
205 	if (pci_config_check(erpt_p->pe_hdl, fme_flag) != DDI_FM_OK)
206 		return;
207 	pci_regs->pci_vflags |= PCI_ERR_STATUS_VALID;
208 	pci_regs->pci_cfg_comm = pci_config_get16(erpt_p->pe_hdl,
209 	    PCI_CONF_COMM);
210 	if (pci_config_check(erpt_p->pe_hdl, fme_flag) != DDI_FM_OK)
211 		return;
212 
213 	/*
214 	 * If pci-pci bridge grab PCI bridge specific error registers.
215 	 */
216 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
217 		pci_regs->pci_bdg_regs->pci_bdg_sec_stat =
218 		    pci_config_get16(erpt_p->pe_hdl, PCI_BCNF_SEC_STATUS);
219 		if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
220 			pci_regs->pci_bdg_regs->pci_bdg_vflags |=
221 			    PCI_BDG_SEC_STAT_VALID;
222 		pci_regs->pci_bdg_regs->pci_bdg_ctrl =
223 		    pci_config_get16(erpt_p->pe_hdl, PCI_BCNF_BCNTRL);
224 		if (pci_config_check(erpt_p->pe_hdl, fme_flag) == DDI_FM_OK)
225 			pci_regs->pci_bdg_regs->pci_bdg_vflags |=
226 			    PCI_BDG_CTRL_VALID;
227 	}
228 
229 	/* If pci-x device grab error registers */
230 	if (erpt_p->pe_dflags & PCIX_DEV)
231 		pcix_regs_gather(erpt_p, erpt_p->pe_regs, fme_flag);
232 
233 }
234 
235 static void
236 pcix_regs_clear(pci_erpt_t *erpt_p, void *pe_regs)
237 {
238 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
239 		pcix_bdg_error_regs_t *pcix_bdg_regs =
240 		    (pcix_bdg_error_regs_t *)pe_regs;
241 		uint8_t pcix_bdg_cap_ptr;
242 		int i;
243 
244 		pcix_bdg_cap_ptr = pcix_bdg_regs->pcix_bdg_cap_ptr;
245 
246 		if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_SEC_STATUS_VALID)
247 			pci_config_put16(erpt_p->pe_hdl,
248 			    (pcix_bdg_cap_ptr + PCI_PCIX_SEC_STATUS),
249 			    pcix_bdg_regs->pcix_bdg_sec_stat);
250 
251 		if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_STATUS_VALID)
252 			pci_config_put32(erpt_p->pe_hdl,
253 			    (pcix_bdg_cap_ptr + PCI_PCIX_BDG_STATUS),
254 			    pcix_bdg_regs->pcix_bdg_stat);
255 
256 		pcix_bdg_regs->pcix_bdg_vflags = 0x0;
257 
258 		if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) {
259 			pcix_ecc_regs_t *pcix_bdg_ecc_regs;
260 			for (i = 0; i < 2; i++) {
261 				pcix_bdg_ecc_regs =
262 				    pcix_bdg_regs->pcix_bdg_ecc_regs[i];
263 
264 				if (pcix_bdg_ecc_regs->pcix_ecc_vflags &
265 				    PCIX_ERR_ECC_STS_VALID) {
266 					pci_config_put32(erpt_p->pe_hdl,
267 					    (pcix_bdg_cap_ptr +
268 					    PCI_PCIX_BDG_ECC_STATUS),
269 					    i);
270 
271 					pci_config_put32(erpt_p->pe_hdl,
272 					    (pcix_bdg_cap_ptr +
273 					    PCI_PCIX_BDG_ECC_STATUS),
274 					    pcix_bdg_ecc_regs->
275 					    pcix_ecc_ctlstat);
276 				}
277 				pcix_bdg_ecc_regs->pcix_ecc_vflags =
278 				    0x0;
279 			}
280 		}
281 	} else {
282 		pcix_error_regs_t *pcix_regs = (pcix_error_regs_t *)pe_regs;
283 		uint8_t pcix_cap_ptr;
284 
285 		pcix_cap_ptr = pcix_regs->pcix_cap_ptr;
286 
287 		if (pcix_regs->pcix_vflags & PCIX_ERR_STATUS_VALID)
288 			pci_config_put32(erpt_p->pe_hdl,
289 			    (pcix_cap_ptr + PCI_PCIX_STATUS),
290 			    pcix_regs->pcix_status);
291 
292 		pcix_regs->pcix_vflags = 0x0;
293 
294 		if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) {
295 			pcix_ecc_regs_t *pcix_ecc_regs =
296 			    pcix_regs->pcix_ecc_regs;
297 
298 			if (pcix_ecc_regs->pcix_ecc_vflags &
299 			    PCIX_ERR_ECC_STS_VALID)
300 				pci_config_put32(erpt_p->pe_hdl,
301 				    (pcix_cap_ptr + PCI_PCIX_ECC_STATUS),
302 				    pcix_ecc_regs->pcix_ecc_ctlstat);
303 
304 			pcix_ecc_regs->pcix_ecc_vflags = 0x0;
305 		}
306 	}
307 }
308 
309 static void
310 pci_regs_clear(pci_erpt_t *erpt_p)
311 {
312 	/*
313 	 * Finally clear the error bits
314 	 */
315 	if (erpt_p->pe_dflags & PCIX_DEV)
316 		pcix_regs_clear(erpt_p, erpt_p->pe_regs);
317 
318 	if (erpt_p->pe_pci_regs->pci_vflags & PCI_ERR_STATUS_VALID)
319 		pci_config_put16(erpt_p->pe_hdl, PCI_CONF_STAT,
320 		    erpt_p->pe_pci_regs->pci_err_status);
321 
322 	erpt_p->pe_pci_regs->pci_vflags = 0x0;
323 
324 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
325 		if (erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_vflags &
326 		    PCI_BDG_SEC_STAT_VALID)
327 			pci_config_put16(erpt_p->pe_hdl, PCI_BCNF_SEC_STATUS,
328 			    erpt_p->pe_pci_regs->pci_bdg_regs->
329 			    pci_bdg_sec_stat);
330 		if (erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_vflags &
331 		    PCI_BDG_CTRL_VALID)
332 			pci_config_put16(erpt_p->pe_hdl, PCI_BCNF_BCNTRL,
333 			    erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_ctrl);
334 
335 		erpt_p->pe_pci_regs->pci_bdg_regs->pci_bdg_vflags = 0x0;
336 	}
337 }
338 
339 /*
340  * pcix_ereport_setup: Allocate structures for PCI-X error handling and ereport
341  * generation.
342  */
343 /* ARGSUSED */
344 static void
345 pcix_ereport_setup(dev_info_t *dip, pci_erpt_t *erpt_p)
346 {
347 	uint16_t pcix_cap_ptr = PCI_CAP_NEXT_PTR_NULL;
348 	ddi_acc_handle_t eh;
349 	int i;
350 
351 	if (pci_config_setup(dip, &eh) == DDI_SUCCESS) {
352 		(void) PCI_CAP_LOCATE(eh, PCI_CAP_ID_PCIX, &pcix_cap_ptr);
353 		pci_config_teardown(&eh);
354 	}
355 
356 	if (pcix_cap_ptr != PCI_CAP_NEXT_PTR_NULL)
357 		erpt_p->pe_dflags |= PCIX_DEV;
358 	else
359 		return;
360 
361 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
362 		pcix_bdg_error_regs_t *pcix_bdg_regs;
363 
364 		erpt_p->pe_regs = kmem_zalloc(sizeof (pcix_bdg_error_regs_t),
365 		    KM_SLEEP);
366 		pcix_bdg_regs = (pcix_bdg_error_regs_t *)erpt_p->pe_regs;
367 		pcix_bdg_regs->pcix_bdg_cap_ptr = pcix_cap_ptr;
368 		pcix_bdg_regs->pcix_bdg_ver = pci_config_get16(erpt_p->pe_hdl,
369 		    pcix_cap_ptr + PCI_PCIX_SEC_STATUS) & PCI_PCIX_VER_MASK;
370 		if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) {
371 			for (i = 0; i < 2; i++) {
372 				pcix_bdg_regs->pcix_bdg_ecc_regs[i] =
373 				    kmem_zalloc(sizeof (pcix_ecc_regs_t),
374 				    KM_SLEEP);
375 			}
376 		}
377 	} else {
378 		pcix_error_regs_t *pcix_regs;
379 
380 		erpt_p->pe_regs = kmem_zalloc(sizeof (pcix_error_regs_t),
381 		    KM_SLEEP);
382 		pcix_regs = (pcix_error_regs_t *)erpt_p->pe_regs;
383 		pcix_regs->pcix_cap_ptr = pcix_cap_ptr;
384 		pcix_regs->pcix_ver = pci_config_get16(erpt_p->pe_hdl,
385 		    pcix_cap_ptr + PCI_PCIX_COMMAND) & PCI_PCIX_VER_MASK;
386 		if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) {
387 			pcix_regs->pcix_ecc_regs = kmem_zalloc(
388 			    sizeof (pcix_ecc_regs_t), KM_SLEEP);
389 		}
390 	}
391 }
392 
393 /*
394  * pci_ereport_setup: Detect PCI device type and initialize structures to be
395  * used to generate ereports based on detected generic device errors.
396  */
397 void
398 pci_ereport_setup(dev_info_t *dip)
399 {
400 	struct dev_info *devi = DEVI(dip);
401 	struct i_ddi_fmhdl *fmhdl = devi->devi_fmhdl;
402 	pci_erpt_t *erpt_p;
403 	uint8_t pci_hdr_type;
404 	uint16_t pci_status;
405 	pci_regspec_t *pci_rp;
406 	int32_t len;
407 	uint32_t phys_hi;
408 
409 	/*
410 	 * If device is not ereport capbable then report an error against the
411 	 * driver for using this interface,
412 	 */
413 	if (!DDI_FM_EREPORT_CAP(ddi_fm_capable(dip)) &&
414 	    !DDI_FM_ERRCB_CAP(ddi_fm_capable(dip))) {
415 		i_ddi_drv_ereport_post(dip, DVR_EFMCAP, NULL, DDI_SLEEP);
416 		return;
417 	}
418 
419 	/*
420 	 * ASSERT fmhdl exists and fh_bus_specific is NULL.
421 	 */
422 	ASSERT(fmhdl && (fmhdl->fh_bus_specific == NULL));
423 
424 	erpt_p = kmem_zalloc(sizeof (pci_erpt_t), KM_SLEEP);
425 
426 	if (pci_config_setup(dip, &erpt_p->pe_hdl) != DDI_SUCCESS)
427 		goto error;
428 
429 	erpt_p->pe_pci_regs = kmem_zalloc(sizeof (pci_error_regs_t), KM_SLEEP);
430 
431 	pci_status = pci_config_get16(erpt_p->pe_hdl, PCI_CONF_STAT);
432 	if (pci_config_check(erpt_p->pe_hdl, DDI_FM_ERR_UNEXPECTED) !=
433 	    DDI_FM_OK)
434 		goto error;
435 
436 	/*
437 	 * Get header type and record if device is a bridge.
438 	 */
439 	pci_hdr_type = pci_config_get8(erpt_p->pe_hdl, PCI_CONF_HEADER);
440 	if (pci_config_check(erpt_p->pe_hdl, DDI_FM_ERR_UNEXPECTED) !=
441 	    DDI_FM_OK)
442 		goto error;
443 
444 	/*
445 	 * Check to see if PCI device is a bridge, if so allocate pci bridge
446 	 * error register structure.
447 	 */
448 	if ((pci_hdr_type & PCI_HEADER_TYPE_M) == PCI_HEADER_PPB) {
449 		erpt_p->pe_dflags |= PCI_BRIDGE_DEV;
450 		erpt_p->pe_pci_regs->pci_bdg_regs = kmem_zalloc(
451 		    sizeof (pci_bdg_error_regs_t), KM_SLEEP);
452 	}
453 
454 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg",
455 	    (caddr_t)&pci_rp, &len) == DDI_SUCCESS) {
456 		phys_hi = pci_rp->pci_phys_hi;
457 		kmem_free(pci_rp, len);
458 
459 		erpt_p->pe_bdf = (uint16_t)(PCI_REG_BDFR_G(phys_hi) >>
460 		    PCI_REG_FUNC_SHIFT);
461 	}
462 
463 	if (!(pci_status & PCI_STAT_CAP)) {
464 		goto done;
465 	}
466 
467 	/* Initialize structures for PCI-X devices. */
468 	pcix_ereport_setup(dip, erpt_p);
469 
470 done:
471 	pci_regs_gather(dip, erpt_p, DDI_FM_ERR_UNEXPECTED);
472 	pci_regs_clear(erpt_p);
473 
474 	/*
475 	 * Before returning set fh_bus_specific to completed pci_erpt_t
476 	 * structure
477 	 */
478 	fmhdl->fh_bus_specific = (void *)erpt_p;
479 
480 	return;
481 error:
482 	if (erpt_p->pe_pci_regs)
483 		kmem_free(erpt_p->pe_pci_regs, sizeof (pci_error_regs_t));
484 	kmem_free(erpt_p, sizeof (pci_erpt_t));
485 	erpt_p = NULL;
486 }
487 
488 static void
489 pcix_ereport_teardown(pci_erpt_t *erpt_p)
490 {
491 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
492 		pcix_bdg_error_regs_t *pcix_bdg_regs;
493 		uint16_t pcix_ver;
494 
495 		pcix_bdg_regs = (pcix_bdg_error_regs_t *)erpt_p->pe_regs;
496 		pcix_ver = pcix_bdg_regs->pcix_bdg_ver;
497 		if (PCIX_ECC_VER_CHECK(pcix_ver)) {
498 			int i;
499 			for (i = 0; i < 2; i++)
500 				kmem_free(pcix_bdg_regs->pcix_bdg_ecc_regs[i],
501 				    sizeof (pcix_ecc_regs_t));
502 		}
503 		kmem_free(erpt_p->pe_regs, sizeof (pcix_bdg_error_regs_t));
504 	} else {
505 		pcix_error_regs_t *pcix_regs;
506 		uint16_t pcix_ver;
507 
508 		pcix_regs = (pcix_error_regs_t *)erpt_p->pe_regs;
509 		pcix_ver = pcix_regs->pcix_ver;
510 		if (PCIX_ECC_VER_CHECK(pcix_ver)) {
511 			kmem_free(pcix_regs->pcix_ecc_regs,
512 			    sizeof (pcix_ecc_regs_t));
513 		}
514 		kmem_free(erpt_p->pe_regs, sizeof (pcix_error_regs_t));
515 	}
516 }
517 
518 void
519 pci_ereport_teardown(dev_info_t *dip)
520 {
521 	struct i_ddi_fmhdl *fmhdl = DEVI(dip)->devi_fmhdl;
522 	pci_erpt_t *erpt_p;
523 
524 	if (!DDI_FM_EREPORT_CAP(ddi_fm_capable(dip)) &&
525 	    !DDI_FM_ERRCB_CAP(ddi_fm_capable(dip))) {
526 		i_ddi_drv_ereport_post(dip, DVR_EFMCAP, NULL, DDI_SLEEP);
527 	}
528 
529 	ASSERT(fmhdl);
530 
531 	erpt_p = (pci_erpt_t *)fmhdl->fh_bus_specific;
532 	if (erpt_p == NULL)
533 		return;
534 
535 	if (erpt_p->pe_dflags & PCIX_DEV)
536 		pcix_ereport_teardown(erpt_p);
537 	pci_config_teardown((ddi_acc_handle_t *)&erpt_p->pe_hdl);
538 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV)
539 		kmem_free(erpt_p->pe_pci_regs->pci_bdg_regs,
540 		    sizeof (pci_bdg_error_regs_t));
541 	kmem_free(erpt_p->pe_pci_regs, sizeof (pci_error_regs_t));
542 	kmem_free(erpt_p, sizeof (pci_erpt_t));
543 	fmhdl->fh_bus_specific = NULL;
544 
545 	/*
546 	 * The following sparc specific code should be removed once the pci_cap
547 	 * interfaces create the necessary properties for us.
548 	 */
549 }
550 
551 /*ARGSUSED*/
552 static int
553 pcix_check_addr(dev_info_t *dip, ddi_fm_error_t *derr,
554     pcix_ecc_regs_t *pcix_ecc_regs, int type)
555 {
556 	int cmd = (pcix_ecc_regs->pcix_ecc_ctlstat >> 16) & 0xf;
557 	uint64_t addr;
558 	pci_fme_bus_specific_t *pci_fme_bsp =
559 	    (pci_fme_bus_specific_t *)derr->fme_bus_specific;
560 
561 	addr = pcix_ecc_regs->pcix_ecc_secaddr;
562 	addr = addr << 32;
563 	addr |= pcix_ecc_regs->pcix_ecc_fstaddr;
564 
565 	switch (cmd) {
566 	case PCI_PCIX_CMD_INTR:
567 	case PCI_PCIX_CMD_SPEC:
568 		return (DDI_FM_FATAL);
569 	case PCI_PCIX_CMD_IORD:
570 	case PCI_PCIX_CMD_IOWR:
571 		pci_fme_bsp->pci_bs_addr = addr;
572 		pci_fme_bsp->pci_bs_flags |= PCI_BS_ADDR_VALID;
573 		pci_fme_bsp->pci_bs_type = type;
574 		return (DDI_FM_UNKNOWN);
575 	case PCI_PCIX_CMD_DEVID:
576 		return (DDI_FM_FATAL);
577 	case PCI_PCIX_CMD_MEMRD_DW:
578 	case PCI_PCIX_CMD_MEMWR:
579 	case PCI_PCIX_CMD_MEMRD_BL:
580 	case PCI_PCIX_CMD_MEMWR_BL:
581 		pci_fme_bsp->pci_bs_addr = addr;
582 		pci_fme_bsp->pci_bs_flags |= PCI_BS_ADDR_VALID;
583 		pci_fme_bsp->pci_bs_type = type;
584 		return (DDI_FM_UNKNOWN);
585 	case PCI_PCIX_CMD_CFRD:
586 	case PCI_PCIX_CMD_CFWR:
587 		/*
588 		 * for type 1 config transaction we can find bdf from address
589 		 */
590 		if ((addr & 3) == 1) {
591 			pci_fme_bsp->pci_bs_bdf = (addr >> 8) & 0xffffffff;
592 			pci_fme_bsp->pci_bs_flags |= PCI_BS_BDF_VALID;
593 			pci_fme_bsp->pci_bs_type = type;
594 		}
595 		return (DDI_FM_UNKNOWN);
596 	case PCI_PCIX_CMD_SPL:
597 	case PCI_PCIX_CMD_DADR:
598 		return (DDI_FM_UNKNOWN);
599 	case PCI_PCIX_CMD_MEMRDBL:
600 	case PCI_PCIX_CMD_MEMWRBL:
601 		pci_fme_bsp->pci_bs_addr = addr;
602 		pci_fme_bsp->pci_bs_flags |= PCI_BS_ADDR_VALID;
603 		pci_fme_bsp->pci_bs_type = type;
604 		return (DDI_FM_UNKNOWN);
605 	default:
606 		return (DDI_FM_FATAL);
607 	}
608 }
609 
610 /*ARGSUSED*/
611 static int
612 pci_bdg_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p)
613 {
614 	pci_bdg_error_regs_t *pci_bdg_regs = erpt_p->pe_pci_regs->pci_bdg_regs;
615 	int fatal = 0;
616 	int nonfatal = 0;
617 	int unknown = 0;
618 	int ok = 0;
619 	int ret = DDI_FM_OK;
620 	char buf[FM_MAX_CLASS];
621 	int i;
622 	pci_fme_bus_specific_t *pci_fme_bsp =
623 	    (pci_fme_bus_specific_t *)derr->fme_bus_specific;
624 
625 	if (derr->fme_flag != DDI_FM_ERR_UNEXPECTED)
626 		goto done;
627 
628 	if ((pci_bdg_regs->pci_bdg_vflags & PCI_BDG_CTRL_VALID) &&
629 	    (pci_bdg_regs->pci_bdg_ctrl & PCI_BCNF_BCNTRL_DTO_STAT)) {
630 		(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
631 		    PCI_ERROR_SUBCLASS, PCI_DTO);
632 		ddi_fm_ereport_post(dip, buf, derr->fme_ena,
633 		    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
634 		    PCI_SEC_CONFIG_STATUS, DATA_TYPE_UINT16,
635 		    pci_bdg_regs->pci_bdg_sec_stat, PCI_BCNTRL,
636 		    DATA_TYPE_UINT16, pci_bdg_regs->pci_bdg_ctrl, NULL);
637 		unknown++;
638 	}
639 
640 	if (pci_bdg_regs->pci_bdg_vflags & PCI_BDG_SEC_STAT_VALID) {
641 		for (i = 0; pci_bdg_err_tbl[i].err_class != NULL; i++) {
642 			if (pci_bdg_regs->pci_bdg_sec_stat &
643 			    pci_bdg_err_tbl[i].reg_bit) {
644 				(void) snprintf(buf, FM_MAX_CLASS, "%s.%s-%s",
645 				    PCI_ERROR_SUBCLASS, PCI_SEC_ERROR_SUBCLASS,
646 				    pci_bdg_err_tbl[i].err_class);
647 				ddi_fm_ereport_post(dip, buf, derr->fme_ena,
648 				    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
649 				    PCI_SEC_CONFIG_STATUS, DATA_TYPE_UINT16,
650 				    pci_bdg_regs->pci_bdg_sec_stat, PCI_BCNTRL,
651 				    DATA_TYPE_UINT16,
652 				    pci_bdg_regs->pci_bdg_ctrl, NULL);
653 				PCI_FM_SEV_INC(pci_bdg_err_tbl[i].flags);
654 				if (pci_fme_bsp && (pci_fme_bsp->pci_bs_flags &
655 				    PCI_BS_ADDR_VALID) &&
656 				    pci_fme_bsp->pci_bs_type == ACC_HANDLE &&
657 				    pci_bdg_err_tbl[i].terr_class)
658 					pci_target_enqueue(derr->fme_ena,
659 					    pci_bdg_err_tbl[i].terr_class,
660 					    PCI_ERROR_SUBCLASS,
661 					    pci_fme_bsp->pci_bs_addr);
662 			}
663 		}
664 	}
665 
666 done:
667 	/*
668 	 * Need to check for poke and cautious put. We already know peek
669 	 * and cautious get errors occurred (as we got a trap) and we know
670 	 * they are nonfatal.
671 	 */
672 	if (derr->fme_flag == DDI_FM_ERR_EXPECTED) {
673 		/*
674 		 * for cautious puts we treat all errors as nonfatal. Actually
675 		 * we set nonfatal for cautious gets as well - doesn't do any
676 		 * harm
677 		 */
678 		if (pci_bdg_regs->pci_bdg_sec_stat & (PCI_STAT_R_TARG_AB |
679 		    PCI_STAT_R_MAST_AB | PCI_STAT_S_PERROR | PCI_STAT_S_SYSERR))
680 			nonfatal++;
681 	}
682 	if (derr->fme_flag == DDI_FM_ERR_POKE) {
683 		/*
684 		 * special case for pokes - we only consider master abort
685 		 * and target abort as nonfatal. Sserr with no master abort is
686 		 * fatal, but master/target abort can come in on separate
687 		 * instance, so return unknown and parent will determine if
688 		 * nonfatal (if another child returned nonfatal - ie master
689 		 * or target abort) or fatal otherwise
690 		 */
691 		if (pci_bdg_regs->pci_bdg_sec_stat & (PCI_STAT_R_TARG_AB |
692 		    PCI_STAT_R_MAST_AB))
693 			nonfatal++;
694 		if (erpt_p->pe_pci_regs->pci_err_status & PCI_STAT_S_SYSERR)
695 			unknown++;
696 	}
697 
698 	/*
699 	 * now check children below the bridge
700 	 */
701 	ret = ndi_fm_handler_dispatch(dip, NULL, derr);
702 	PCI_FM_SEV_INC(ret);
703 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
704 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
705 }
706 
707 static int
708 pcix_ecc_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p,
709     void *pe_regs)
710 {
711 	pcix_error_regs_t *pcix_regs;
712 	pcix_bdg_error_regs_t *pcix_bdg_regs;
713 	pcix_ecc_regs_t *pcix_ecc_regs;
714 	int bridge;
715 	int i;
716 	int ecc_phase;
717 	int ecc_corr;
718 	int sec_ue;
719 	int sec_ce;
720 	int fatal = 0;
721 	int nonfatal = 0;
722 	int unknown = 0;
723 	int ok = 0;
724 	char buf[FM_MAX_CLASS];
725 
726 	if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
727 		pcix_bdg_regs = (pcix_bdg_error_regs_t *)pe_regs;
728 		bridge = 1;
729 	} else {
730 		pcix_regs = (pcix_error_regs_t *)pe_regs;
731 		bridge = 0;
732 	}
733 
734 	for (i = 0; i < (bridge ? 2 : 1); i++) {
735 		int ret = DDI_FM_OK;
736 		pcix_ecc_regs = bridge ? pcix_bdg_regs->pcix_bdg_ecc_regs[i] :
737 		    pcix_regs->pcix_ecc_regs;
738 		if (pcix_ecc_regs->pcix_ecc_vflags & PCIX_ERR_ECC_STS_VALID) {
739 			ecc_phase = (pcix_ecc_regs->pcix_ecc_ctlstat &
740 			    PCI_PCIX_ECC_PHASE) >> 0x4;
741 			ecc_corr = (pcix_ecc_regs->pcix_ecc_ctlstat &
742 			    PCI_PCIX_ECC_CORR);
743 			sec_ue = (pcix_ecc_regs->pcix_ecc_ctlstat &
744 			    PCI_PCIX_ECC_S_UE);
745 			sec_ce = (pcix_ecc_regs->pcix_ecc_ctlstat &
746 			    PCI_PCIX_ECC_S_CE);
747 
748 			switch (ecc_phase) {
749 			case PCI_PCIX_ECC_PHASE_NOERR:
750 				break;
751 			case PCI_PCIX_ECC_PHASE_FADDR:
752 			case PCI_PCIX_ECC_PHASE_SADDR:
753 				PCI_FM_SEV_INC(ecc_corr ?  DDI_FM_OK :
754 				    DDI_FM_FATAL);
755 				(void) snprintf(buf, FM_MAX_CLASS,
756 				    "%s.%s%s", PCIX_ERROR_SUBCLASS,
757 				    i ? PCIX_SEC_ERROR_SUBCLASS : "",
758 				    ecc_corr ? PCIX_ECC_CE_ADDR :
759 				    PCIX_ECC_UE_ADDR);
760 				break;
761 			case PCI_PCIX_ECC_PHASE_ATTR:
762 				PCI_FM_SEV_INC(ecc_corr ?
763 				    DDI_FM_OK : DDI_FM_FATAL);
764 				(void) snprintf(buf, FM_MAX_CLASS,
765 				    "%s.%s%s", PCIX_ERROR_SUBCLASS,
766 				    i ? PCIX_SEC_ERROR_SUBCLASS : "",
767 				    ecc_corr ? PCIX_ECC_CE_ATTR :
768 				    PCIX_ECC_UE_ATTR);
769 				break;
770 			case PCI_PCIX_ECC_PHASE_DATA32:
771 			case PCI_PCIX_ECC_PHASE_DATA64:
772 				if (ecc_corr)
773 					ret = DDI_FM_OK;
774 				else {
775 					int type;
776 					pci_error_regs_t *pci_regs =
777 					    erpt_p->pe_pci_regs;
778 
779 					if (i) {
780 						if (pci_regs->pci_bdg_regs->
781 						    pci_bdg_sec_stat &
782 						    PCI_STAT_S_PERROR)
783 							type = ACC_HANDLE;
784 						else
785 							type = DMA_HANDLE;
786 					} else {
787 						if (pci_regs->pci_err_status &
788 						    PCI_STAT_S_PERROR)
789 							type = DMA_HANDLE;
790 						else
791 							type = ACC_HANDLE;
792 					}
793 					ret = pcix_check_addr(dip, derr,
794 					    pcix_ecc_regs, type);
795 				}
796 				PCI_FM_SEV_INC(ret);
797 
798 				(void) snprintf(buf, FM_MAX_CLASS,
799 				    "%s.%s%s", PCIX_ERROR_SUBCLASS,
800 				    i ? PCIX_SEC_ERROR_SUBCLASS : "",
801 				    ecc_corr ? PCIX_ECC_CE_DATA :
802 				    PCIX_ECC_UE_DATA);
803 				break;
804 			}
805 			if (ecc_phase)
806 				if (bridge)
807 					ddi_fm_ereport_post(dip, buf,
808 					    derr->fme_ena,
809 					    DDI_NOSLEEP, FM_VERSION,
810 					    DATA_TYPE_UINT8, 0,
811 					    PCIX_SEC_STATUS, DATA_TYPE_UINT16,
812 					    pcix_bdg_regs->pcix_bdg_sec_stat,
813 					    PCIX_BDG_STAT, DATA_TYPE_UINT32,
814 					    pcix_bdg_regs->pcix_bdg_stat,
815 					    PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32,
816 					    pcix_ecc_regs->pcix_ecc_ctlstat,
817 					    PCIX_ECC_ATTR, DATA_TYPE_UINT32,
818 					    pcix_ecc_regs->pcix_ecc_attr, NULL);
819 				else
820 					ddi_fm_ereport_post(dip, buf,
821 					    derr->fme_ena,
822 					    DDI_NOSLEEP, FM_VERSION,
823 					    DATA_TYPE_UINT8, 0,
824 					    PCIX_COMMAND, DATA_TYPE_UINT16,
825 					    pcix_regs->pcix_command,
826 					    PCIX_STATUS, DATA_TYPE_UINT32,
827 					    pcix_regs->pcix_status,
828 					    PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32,
829 					    pcix_ecc_regs->pcix_ecc_ctlstat,
830 					    PCIX_ECC_ATTR, DATA_TYPE_UINT32,
831 					    pcix_ecc_regs->pcix_ecc_attr, NULL);
832 			if (sec_ce || sec_ue) {
833 				(void) snprintf(buf, FM_MAX_CLASS,
834 				    "%s.%s%s", PCIX_ERROR_SUBCLASS,
835 				    i ? PCIX_SEC_ERROR_SUBCLASS : "",
836 				    sec_ce ? PCIX_ECC_S_CE : PCIX_ECC_S_UE);
837 				if (bridge)
838 					ddi_fm_ereport_post(dip, buf,
839 					    derr->fme_ena,
840 					    DDI_NOSLEEP, FM_VERSION,
841 					    DATA_TYPE_UINT8, 0,
842 					    PCIX_SEC_STATUS, DATA_TYPE_UINT16,
843 					    pcix_bdg_regs->pcix_bdg_sec_stat,
844 					    PCIX_BDG_STAT, DATA_TYPE_UINT32,
845 					    pcix_bdg_regs->pcix_bdg_stat,
846 					    PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32,
847 					    pcix_ecc_regs->pcix_ecc_ctlstat,
848 					    PCIX_ECC_ATTR, DATA_TYPE_UINT32,
849 					    pcix_ecc_regs->pcix_ecc_attr, NULL);
850 				else
851 					ddi_fm_ereport_post(dip, buf,
852 					    derr->fme_ena,
853 					    DDI_NOSLEEP, FM_VERSION,
854 					    DATA_TYPE_UINT8, 0,
855 					    PCIX_COMMAND, DATA_TYPE_UINT16,
856 					    pcix_regs->pcix_command,
857 					    PCIX_STATUS, DATA_TYPE_UINT32,
858 					    pcix_regs->pcix_status,
859 					    PCIX_ECC_CTLSTAT, DATA_TYPE_UINT32,
860 					    pcix_ecc_regs->pcix_ecc_ctlstat,
861 					    PCIX_ECC_ATTR, DATA_TYPE_UINT32,
862 					    pcix_ecc_regs->pcix_ecc_attr, NULL);
863 				PCI_FM_SEV_INC(sec_ue ? DDI_FM_FATAL :
864 				    DDI_FM_OK);
865 			}
866 		}
867 	}
868 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
869 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
870 }
871 
872 static int
873 pcix_bdg_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p,
874     void *pe_regs)
875 {
876 	pcix_bdg_error_regs_t *pcix_bdg_regs = (pcix_bdg_error_regs_t *)pe_regs;
877 	int fatal = 0;
878 	int nonfatal = 0;
879 	int unknown = 0;
880 	int ok = 0;
881 	char buf[FM_MAX_CLASS];
882 	int i;
883 
884 	if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_STATUS_VALID) {
885 		for (i = 0; pcix_err_tbl[i].err_class != NULL; i++) {
886 			if ((pcix_bdg_regs->pcix_bdg_stat &
887 			    pcix_err_tbl[i].reg_bit)) {
888 				(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
889 				    PCIX_ERROR_SUBCLASS,
890 				    pcix_err_tbl[i].err_class);
891 				ddi_fm_ereport_post(dip, buf, derr->fme_ena,
892 				    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
893 				    PCIX_SEC_STATUS, DATA_TYPE_UINT16,
894 				    pcix_bdg_regs->pcix_bdg_sec_stat,
895 				    PCIX_BDG_STAT, DATA_TYPE_UINT32,
896 				    pcix_bdg_regs->pcix_bdg_stat, NULL);
897 				PCI_FM_SEV_INC(pcix_err_tbl[i].flags);
898 			}
899 		}
900 	}
901 
902 	if (pcix_bdg_regs->pcix_bdg_vflags & PCIX_BDG_SEC_STATUS_VALID) {
903 		for (i = 0; pcix_sec_err_tbl[i].err_class != NULL; i++) {
904 			if ((pcix_bdg_regs->pcix_bdg_sec_stat &
905 			    pcix_sec_err_tbl[i].reg_bit)) {
906 				(void) snprintf(buf, FM_MAX_CLASS, "%s.%s%s",
907 				    PCIX_ERROR_SUBCLASS,
908 				    PCIX_SEC_ERROR_SUBCLASS,
909 				    pcix_sec_err_tbl[i].err_class);
910 				ddi_fm_ereport_post(dip, buf, derr->fme_ena,
911 				    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
912 				    PCIX_SEC_STATUS, DATA_TYPE_UINT16,
913 				    pcix_bdg_regs->pcix_bdg_sec_stat,
914 				    PCIX_BDG_STAT, DATA_TYPE_UINT32,
915 				    pcix_bdg_regs->pcix_bdg_stat, NULL);
916 				PCI_FM_SEV_INC(pcix_sec_err_tbl[i].flags);
917 			}
918 		}
919 	}
920 
921 	/* Log/Handle ECC errors */
922 	if (PCIX_ECC_VER_CHECK(pcix_bdg_regs->pcix_bdg_ver)) {
923 		int ret;
924 
925 		ret = pcix_ecc_error_report(dip, derr, erpt_p,
926 		    (void *)pcix_bdg_regs);
927 		PCI_FM_SEV_INC(ret);
928 	}
929 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
930 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
931 }
932 
933 static int
934 pcix_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p)
935 {
936 	pcix_error_regs_t *pcix_regs = (pcix_error_regs_t *)erpt_p->pe_regs;
937 	int fatal = 0;
938 	int nonfatal = 0;
939 	int unknown = 0;
940 	int ok = 0;
941 	char buf[FM_MAX_CLASS];
942 	int i;
943 
944 	if (pcix_regs->pcix_vflags & PCIX_ERR_STATUS_VALID) {
945 		for (i = 0; pcix_err_tbl[i].err_class != NULL; i++) {
946 			if (!(pcix_regs->pcix_status & pcix_err_tbl[i].reg_bit))
947 				continue;
948 
949 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
950 			    PCIX_ERROR_SUBCLASS, pcix_err_tbl[i].err_class);
951 			ddi_fm_ereport_post(dip, buf, derr->fme_ena,
952 			    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
953 			    PCIX_COMMAND, DATA_TYPE_UINT16,
954 			    pcix_regs->pcix_command, PCIX_STATUS,
955 			    DATA_TYPE_UINT32, pcix_regs->pcix_status,
956 			    NULL);
957 			PCI_FM_SEV_INC(pcix_err_tbl[i].flags);
958 		}
959 	}
960 	/* Log/Handle ECC errors */
961 	if (PCIX_ECC_VER_CHECK(pcix_regs->pcix_ver)) {
962 		int ret = pcix_ecc_error_report(dip, derr, erpt_p,
963 		    (void *)pcix_regs);
964 		PCI_FM_SEV_INC(ret);
965 	}
966 
967 	return (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
968 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
969 }
970 
971 static void
972 pci_error_report(dev_info_t *dip, ddi_fm_error_t *derr, pci_erpt_t *erpt_p)
973 {
974 	int fatal = 0;
975 	int nonfatal = 0;
976 	int unknown = 0;
977 	int ok = 0;
978 	char buf[FM_MAX_CLASS];
979 	int i;
980 
981 	if (derr->fme_flag == DDI_FM_ERR_UNEXPECTED) {
982 		/*
983 		 * Log generic PCI errors.
984 		 */
985 		for (i = 0; pci_err_tbl[i].err_class != NULL; i++) {
986 			if (!(erpt_p->pe_pci_regs->pci_err_status &
987 			    pci_err_tbl[i].reg_bit) ||
988 			    !(erpt_p->pe_pci_regs->pci_vflags &
989 			    PCI_ERR_STATUS_VALID))
990 				continue;
991 			/*
992 			 * Generate an ereport for this error bit.
993 			 */
994 			(void) snprintf(buf, FM_MAX_CLASS, "%s.%s",
995 			    PCI_ERROR_SUBCLASS, pci_err_tbl[i].err_class);
996 			ddi_fm_ereport_post(dip, buf, derr->fme_ena,
997 			    DDI_NOSLEEP, FM_VERSION, DATA_TYPE_UINT8, 0,
998 			    PCI_CONFIG_STATUS, DATA_TYPE_UINT16,
999 			    erpt_p->pe_pci_regs->pci_err_status,
1000 			    PCI_CONFIG_COMMAND, DATA_TYPE_UINT16,
1001 			    erpt_p->pe_pci_regs->pci_cfg_comm, NULL);
1002 
1003 			PCI_FM_SEV_INC(pci_err_tbl[i].flags);
1004 		}
1005 		if (erpt_p->pe_dflags & PCIX_DEV) {
1006 			if (erpt_p->pe_dflags & PCI_BRIDGE_DEV) {
1007 				int ret = pcix_bdg_error_report(dip, derr,
1008 				    erpt_p, erpt_p->pe_regs);
1009 				PCI_FM_SEV_INC(ret);
1010 			} else {
1011 				int ret = pcix_error_report(dip, derr, erpt_p);
1012 				PCI_FM_SEV_INC(ret);
1013 			}
1014 		}
1015 	}
1016 
1017 	if ((erpt_p->pe_dflags & PCI_BRIDGE_DEV)) {
1018 		int ret = pci_bdg_error_report(dip, derr, erpt_p);
1019 		PCI_FM_SEV_INC(ret);
1020 	}
1021 
1022 	if (derr->fme_flag == DDI_FM_ERR_UNEXPECTED) {
1023 		pci_fme_bus_specific_t *pci_fme_bsp;
1024 		int ret = DDI_FM_UNKNOWN;
1025 
1026 		pci_fme_bsp = (pci_fme_bus_specific_t *)derr->fme_bus_specific;
1027 		if (pci_fme_bsp->pci_bs_flags & PCI_BS_ADDR_VALID) {
1028 			ret = ndi_fmc_entry_error(dip,
1029 			    pci_fme_bsp->pci_bs_type, derr,
1030 			    (void *)&pci_fme_bsp->pci_bs_addr);
1031 			PCI_FM_SEV_INC(ret);
1032 		}
1033 		/*
1034 		 * If we didn't find the handle using an addr, try using bdf.
1035 		 * Note we don't do this where the bdf is for a
1036 		 * device behind a pciex/pci bridge as the bridge may have
1037 		 * fabricated the bdf.
1038 		 */
1039 		if (ret == DDI_FM_UNKNOWN &&
1040 		    (pci_fme_bsp->pci_bs_flags & PCI_BS_BDF_VALID) &&
1041 		    pci_fme_bsp->pci_bs_bdf == erpt_p->pe_bdf) {
1042 			ret = ndi_fmc_entry_error_all(dip,
1043 			    pci_fme_bsp->pci_bs_type, derr);
1044 			PCI_FM_SEV_INC(ret);
1045 		}
1046 	}
1047 
1048 	derr->fme_status = (fatal ? DDI_FM_FATAL : (nonfatal ? DDI_FM_NONFATAL :
1049 	    (unknown ? DDI_FM_UNKNOWN : DDI_FM_OK)));
1050 }
1051 
1052 void
1053 pci_ereport_post(dev_info_t *dip, ddi_fm_error_t *derr, uint16_t *xx_status)
1054 {
1055 	struct i_ddi_fmhdl *fmhdl;
1056 	pci_erpt_t *erpt_p;
1057 	ddi_fm_error_t de;
1058 	pci_fme_bus_specific_t pci_fme_bs;
1059 
1060 	/*
1061 	 * On PCI Express systems, all error handling and ereport are done via
1062 	 * the PCIe misc module.  This function is a no-op for PCIe Systems.  In
1063 	 * order to tell if a system is a PCI or PCIe system, check that the
1064 	 * bus_private_data exists.  If it exists, this is a PCIe system.
1065 	 */
1066 	if (ndi_get_bus_private(dip, B_TRUE)) {
1067 		derr->fme_status = DDI_FM_OK;
1068 		if (xx_status != NULL)
1069 			*xx_status = 0x0;
1070 
1071 		return;
1072 	}
1073 
1074 	fmhdl = DEVI(dip)->devi_fmhdl;
1075 	if (!DDI_FM_EREPORT_CAP(ddi_fm_capable(dip)) &&
1076 	    !DDI_FM_ERRCB_CAP(ddi_fm_capable(dip))) {
1077 		i_ddi_drv_ereport_post(dip, DVR_EFMCAP, NULL, DDI_NOSLEEP);
1078 		return;
1079 	}
1080 
1081 	/*
1082 	 * copy in the ddi_fm_error_t structure in case it's VER0
1083 	 */
1084 	de.fme_version = derr->fme_version;
1085 	de.fme_status = derr->fme_status;
1086 	de.fme_flag = derr->fme_flag;
1087 	de.fme_ena = derr->fme_ena;
1088 	de.fme_acc_handle = derr->fme_acc_handle;
1089 	de.fme_dma_handle = derr->fme_dma_handle;
1090 	de.fme_bus_specific = derr->fme_bus_specific;
1091 	if (derr->fme_version >= DDI_FME_VER1)
1092 		de.fme_bus_type = derr->fme_bus_type;
1093 	else
1094 		de.fme_bus_type = DDI_FME_BUS_TYPE_DFLT;
1095 	if (de.fme_bus_type == DDI_FME_BUS_TYPE_DFLT) {
1096 		/*
1097 		 * if this is the first pci device we've found convert
1098 		 * fme_bus_specific to DDI_FME_BUS_TYPE_PCI
1099 		 */
1100 		bzero(&pci_fme_bs, sizeof (pci_fme_bs));
1101 		if (de.fme_bus_specific) {
1102 			/*
1103 			 * the cpu passed us an addr - this can be used to look
1104 			 * up an access handle
1105 			 */
1106 			pci_fme_bs.pci_bs_addr = (uintptr_t)de.fme_bus_specific;
1107 			pci_fme_bs.pci_bs_type = ACC_HANDLE;
1108 			pci_fme_bs.pci_bs_flags |= PCI_BS_ADDR_VALID;
1109 		}
1110 		de.fme_bus_specific = (void *)&pci_fme_bs;
1111 		de.fme_bus_type = DDI_FME_BUS_TYPE_PCI;
1112 	}
1113 
1114 	ASSERT(fmhdl);
1115 
1116 	if (de.fme_ena == NULL)
1117 		de.fme_ena = fm_ena_generate(0, FM_ENA_FMT1);
1118 
1119 	erpt_p = (pci_erpt_t *)fmhdl->fh_bus_specific;
1120 	if (erpt_p == NULL)
1121 		return;
1122 
1123 	pci_regs_gather(dip, erpt_p, de.fme_flag);
1124 	pci_error_report(dip, &de, erpt_p);
1125 	pci_regs_clear(erpt_p);
1126 
1127 	derr->fme_status = de.fme_status;
1128 	derr->fme_ena = de.fme_ena;
1129 	derr->fme_acc_handle = de.fme_acc_handle;
1130 	derr->fme_dma_handle = de.fme_dma_handle;
1131 	if (xx_status != NULL)
1132 		*xx_status = erpt_p->pe_pci_regs->pci_err_status;
1133 }
1134 
1135 /*
1136  * private version of walk_devs() that can be used during panic. No
1137  * sleeping or locking required.
1138  */
1139 static int
1140 pci_fm_walk_devs(dev_info_t *dip, int (*f)(dev_info_t *, void *), void *arg)
1141 {
1142 	while (dip) {
1143 		switch ((*f)(dip, arg)) {
1144 		case DDI_WALK_TERMINATE:
1145 			return (DDI_WALK_TERMINATE);
1146 		case DDI_WALK_CONTINUE:
1147 			if (pci_fm_walk_devs(ddi_get_child(dip), f,
1148 			    arg) == DDI_WALK_TERMINATE)
1149 				return (DDI_WALK_TERMINATE);
1150 			break;
1151 		case DDI_WALK_PRUNECHILD:
1152 			break;
1153 		}
1154 		dip = ddi_get_next_sibling(dip);
1155 	}
1156 	return (DDI_WALK_CONTINUE);
1157 }
1158 
1159 /*
1160  * need special version of ddi_fm_ereport_post() as the leaf driver may
1161  * not be hardened.
1162  */
1163 static void
1164 pci_fm_ereport_post(dev_info_t *dip, const char *error_class, uint64_t ena,
1165     uint8_t version, ...)
1166 {
1167 	char *name;
1168 	char device_path[MAXPATHLEN];
1169 	char ddi_error_class[FM_MAX_CLASS];
1170 	nvlist_t *ereport, *detector;
1171 	nv_alloc_t *nva;
1172 	errorq_elem_t *eqep;
1173 	va_list ap;
1174 
1175 	if (panicstr) {
1176 		eqep = errorq_reserve(ereport_errorq);
1177 		if (eqep == NULL)
1178 			return;
1179 		ereport = errorq_elem_nvl(ereport_errorq, eqep);
1180 		nva = errorq_elem_nva(ereport_errorq, eqep);
1181 		detector = fm_nvlist_create(nva);
1182 	} else {
1183 		ereport = fm_nvlist_create(NULL);
1184 		detector = fm_nvlist_create(NULL);
1185 	}
1186 
1187 	(void) ddi_pathname(dip, device_path);
1188 	fm_fmri_dev_set(detector, FM_DEV_SCHEME_VERSION, NULL,
1189 	    device_path, NULL, NULL);
1190 	(void) snprintf(ddi_error_class, FM_MAX_CLASS, "%s.%s",
1191 	    DDI_IO_CLASS, error_class);
1192 	fm_ereport_set(ereport, version, ddi_error_class, ena, detector, NULL);
1193 
1194 	va_start(ap, version);
1195 	name = va_arg(ap, char *);
1196 	(void) i_fm_payload_set(ereport, name, ap);
1197 	va_end(ap);
1198 
1199 	if (panicstr) {
1200 		errorq_commit(ereport_errorq, eqep, ERRORQ_SYNC);
1201 	} else {
1202 		(void) fm_ereport_post(ereport, EVCH_TRYHARD);
1203 		fm_nvlist_destroy(ereport, FM_NVA_FREE);
1204 		fm_nvlist_destroy(detector, FM_NVA_FREE);
1205 	}
1206 }
1207 
1208 static int
1209 pci_check_regs(dev_info_t *dip, void *arg)
1210 {
1211 	int reglen;
1212 	int rn;
1213 	int totreg;
1214 	pci_regspec_t *drv_regp;
1215 	pci_target_err_t *tgt_err = (pci_target_err_t *)arg;
1216 
1217 	if (tgt_err->tgt_pci_space == PCI_REG_ADDR_G(PCI_ADDR_CONFIG)) {
1218 		/*
1219 		 * for config space, we need to check if the given address
1220 		 * is a valid config space address for this device - based
1221 		 * on pci_phys_hi of the config space entry in reg property.
1222 		 */
1223 		if (ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
1224 		    "reg", (caddr_t)&drv_regp, &reglen) != DDI_SUCCESS)
1225 			return (DDI_WALK_CONTINUE);
1226 
1227 		totreg = reglen / sizeof (pci_regspec_t);
1228 		for (rn = 0; rn < totreg; rn++) {
1229 			if (tgt_err->tgt_pci_space ==
1230 			    PCI_REG_ADDR_G(drv_regp[rn].pci_phys_hi) &&
1231 			    (tgt_err->tgt_pci_addr & (PCI_REG_BUS_M |
1232 			    PCI_REG_DEV_M | PCI_REG_FUNC_M)) ==
1233 			    (drv_regp[rn].pci_phys_hi & (PCI_REG_BUS_M |
1234 			    PCI_REG_DEV_M | PCI_REG_FUNC_M))) {
1235 				tgt_err->tgt_dip = dip;
1236 				kmem_free(drv_regp, reglen);
1237 				return (DDI_WALK_TERMINATE);
1238 			}
1239 		}
1240 		kmem_free(drv_regp, reglen);
1241 	} else {
1242 		/*
1243 		 * for non config space, need to check reg to look
1244 		 * for any non-relocable mapping, otherwise check
1245 		 * assigned-addresses.
1246 		 */
1247 		if (ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
1248 		    "reg", (caddr_t)&drv_regp, &reglen) != DDI_SUCCESS)
1249 			return (DDI_WALK_CONTINUE);
1250 
1251 		totreg = reglen / sizeof (pci_regspec_t);
1252 		for (rn = 0; rn < totreg; rn++) {
1253 			if ((drv_regp[rn].pci_phys_hi & PCI_RELOCAT_B) &&
1254 			    (tgt_err->tgt_pci_space == TGT_PCI_SPACE_UNKNOWN ||
1255 			    tgt_err->tgt_pci_space ==
1256 			    PCI_REG_ADDR_G(drv_regp[rn].pci_phys_hi)) &&
1257 			    (tgt_err->tgt_pci_addr >=
1258 			    (uint64_t)drv_regp[rn].pci_phys_low +
1259 			    ((uint64_t)drv_regp[rn].pci_phys_mid << 32)) &&
1260 			    (tgt_err->tgt_pci_addr <
1261 			    (uint64_t)drv_regp[rn].pci_phys_low +
1262 			    ((uint64_t)drv_regp[rn].pci_phys_mid << 32) +
1263 			    (uint64_t)drv_regp[rn].pci_size_low +
1264 			    ((uint64_t)drv_regp[rn].pci_size_hi << 32))) {
1265 				tgt_err->tgt_dip = dip;
1266 				kmem_free(drv_regp, reglen);
1267 				return (DDI_WALK_TERMINATE);
1268 			}
1269 		}
1270 		kmem_free(drv_regp, reglen);
1271 
1272 		if (ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
1273 		    "assigned-addresses", (caddr_t)&drv_regp, &reglen) !=
1274 		    DDI_SUCCESS)
1275 			return (DDI_WALK_CONTINUE);
1276 
1277 		totreg = reglen / sizeof (pci_regspec_t);
1278 		for (rn = 0; rn < totreg; rn++) {
1279 			if ((tgt_err->tgt_pci_space == TGT_PCI_SPACE_UNKNOWN ||
1280 			    tgt_err->tgt_pci_space ==
1281 			    PCI_REG_ADDR_G(drv_regp[rn].pci_phys_hi)) &&
1282 			    (tgt_err->tgt_pci_addr >=
1283 			    (uint64_t)drv_regp[rn].pci_phys_low +
1284 			    ((uint64_t)drv_regp[rn].pci_phys_mid << 32)) &&
1285 			    (tgt_err->tgt_pci_addr <
1286 			    (uint64_t)drv_regp[rn].pci_phys_low +
1287 			    ((uint64_t)drv_regp[rn].pci_phys_mid << 32) +
1288 			    (uint64_t)drv_regp[rn].pci_size_low +
1289 			    ((uint64_t)drv_regp[rn].pci_size_hi << 32))) {
1290 				tgt_err->tgt_dip = dip;
1291 				kmem_free(drv_regp, reglen);
1292 				return (DDI_WALK_TERMINATE);
1293 			}
1294 		}
1295 		kmem_free(drv_regp, reglen);
1296 	}
1297 	return (DDI_WALK_CONTINUE);
1298 }
1299 
1300 /*
1301  * impl_fix_ranges - fixes the config space entry of the "ranges"
1302  * property on psycho+ platforms.  (if changing this function please make sure
1303  * to change the pci_fix_ranges function in pcipsy.c)
1304  */
1305 /*ARGSUSED*/
1306 static void
1307 pci_fix_ranges(dev_info_t *dip, pci_ranges_t *pci_ranges, int nrange)
1308 {
1309 #if defined(__sparc)
1310 	char *name = ddi_binding_name(dip);
1311 
1312 	if ((strcmp(name, "pci108e,8000") == 0) ||
1313 	    (strcmp(name, "pci108e,a000") == 0) ||
1314 	    (strcmp(name, "pci108e,a001") == 0)) {
1315 		int i;
1316 		for (i = 0; i < nrange; i++, pci_ranges++)
1317 			if ((pci_ranges->child_high & PCI_REG_ADDR_M) ==
1318 			    PCI_ADDR_CONFIG)
1319 				pci_ranges->parent_low |=
1320 				    pci_ranges->child_high;
1321 	}
1322 #endif
1323 }
1324 
1325 static int
1326 pci_check_ranges(dev_info_t *dip, void *arg)
1327 {
1328 	uint64_t range_parent_begin;
1329 	uint64_t range_parent_size;
1330 	uint64_t range_parent_end;
1331 	uint32_t space_type;
1332 	uint32_t bus_num;
1333 	uint32_t range_offset;
1334 	pci_ranges_t *pci_ranges, *rangep;
1335 	pci_bus_range_t *pci_bus_rangep;
1336 	int pci_ranges_length;
1337 	int nrange;
1338 	pci_target_err_t *tgt_err = (pci_target_err_t *)arg;
1339 	int i, size;
1340 	if (strcmp(ddi_node_name(dip), "pci") != 0 &&
1341 	    strcmp(ddi_node_name(dip), "pciex") != 0)
1342 		return (DDI_WALK_CONTINUE);
1343 
1344 	/*
1345 	 * Get the ranges property. Note we only look at the top level pci
1346 	 * node (hostbridge) which has a ranges property of type pci_ranges_t
1347 	 * not at pci-pci bridges.
1348 	 */
1349 	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "ranges",
1350 	    (caddr_t)&pci_ranges, &pci_ranges_length) != DDI_SUCCESS) {
1351 		/*
1352 		 * no ranges property - no translation needed
1353 		 */
1354 		tgt_err->tgt_pci_addr = tgt_err->tgt_err_addr;
1355 		tgt_err->tgt_pci_space = TGT_PCI_SPACE_UNKNOWN;
1356 		if (panicstr)
1357 			(void) pci_fm_walk_devs(ddi_get_child(dip),
1358 			    pci_check_regs, (void *)tgt_err);
1359 		else {
1360 			int circ = 0;
1361 			ndi_devi_enter(dip, &circ);
1362 			ddi_walk_devs(ddi_get_child(dip), pci_check_regs,
1363 			    (void *)tgt_err);
1364 			ndi_devi_exit(dip, circ);
1365 		}
1366 		if (tgt_err->tgt_dip != NULL)
1367 			return (DDI_WALK_TERMINATE);
1368 		return (DDI_WALK_PRUNECHILD);
1369 	}
1370 	nrange = pci_ranges_length / sizeof (pci_ranges_t);
1371 	rangep = pci_ranges;
1372 
1373 	/* Need to fix the pci ranges property for psycho based systems */
1374 	pci_fix_ranges(dip, pci_ranges, nrange);
1375 
1376 	for (i = 0; i < nrange; i++, rangep++) {
1377 		range_parent_begin = ((uint64_t)rangep->parent_high << 32) +
1378 		    rangep->parent_low;
1379 		range_parent_size = ((uint64_t)rangep->size_high << 32) +
1380 		    rangep->size_low;
1381 		range_parent_end = range_parent_begin + range_parent_size - 1;
1382 
1383 		if ((tgt_err->tgt_err_addr < range_parent_begin) ||
1384 		    (tgt_err->tgt_err_addr > range_parent_end)) {
1385 			/* Not in range */
1386 			continue;
1387 		}
1388 		space_type = PCI_REG_ADDR_G(rangep->child_high);
1389 		if (space_type == PCI_REG_ADDR_G(PCI_ADDR_CONFIG)) {
1390 			/* Config space address - check bus range */
1391 			range_offset = tgt_err->tgt_err_addr -
1392 			    range_parent_begin;
1393 			bus_num = PCI_REG_BUS_G(range_offset);
1394 			if (ddi_getlongprop(DDI_DEV_T_ANY, dip,
1395 			    DDI_PROP_DONTPASS, "bus-range",
1396 			    (caddr_t)&pci_bus_rangep, &size) != DDI_SUCCESS) {
1397 				continue;
1398 			}
1399 			if ((bus_num < pci_bus_rangep->lo) ||
1400 			    (bus_num > pci_bus_rangep->hi)) {
1401 				/*
1402 				 * Bus number not appropriate for this
1403 				 * pci nexus.
1404 				 */
1405 				kmem_free(pci_bus_rangep, size);
1406 				continue;
1407 			}
1408 			kmem_free(pci_bus_rangep, size);
1409 		}
1410 
1411 		/* We have a match if we get here - compute pci address */
1412 		tgt_err->tgt_pci_addr = tgt_err->tgt_err_addr -
1413 		    range_parent_begin;
1414 		tgt_err->tgt_pci_addr += (((uint64_t)rangep->child_mid << 32) +
1415 		    rangep->child_low);
1416 		tgt_err->tgt_pci_space = space_type;
1417 		if (panicstr)
1418 			(void) pci_fm_walk_devs(ddi_get_child(dip),
1419 			    pci_check_regs, (void *)tgt_err);
1420 		else {
1421 			int circ = 0;
1422 			ndi_devi_enter(dip, &circ);
1423 			ddi_walk_devs(ddi_get_child(dip), pci_check_regs,
1424 			    (void *)tgt_err);
1425 			ndi_devi_exit(dip, circ);
1426 		}
1427 		if (tgt_err->tgt_dip != NULL) {
1428 			kmem_free(pci_ranges, pci_ranges_length);
1429 			return (DDI_WALK_TERMINATE);
1430 		}
1431 	}
1432 	kmem_free(pci_ranges, pci_ranges_length);
1433 	return (DDI_WALK_PRUNECHILD);
1434 }
1435 
1436 /*
1437  * Function used to drain pci_target_queue, either during panic or after softint
1438  * is generated, to generate target device ereports based on captured physical
1439  * addresses
1440  */
1441 /*ARGSUSED*/
1442 static void
1443 pci_target_drain(void *private_p, pci_target_err_t *tgt_err)
1444 {
1445 	char buf[FM_MAX_CLASS];
1446 
1447 	/*
1448 	 * The following assumes that all pci_pci bridge devices
1449 	 * are configured as transparant. Find the top-level pci
1450 	 * nexus which has tgt_err_addr in one of its ranges, converting this
1451 	 * to a pci address in the process. Then starting at this node do
1452 	 * another tree walk to find a device with the pci address we've
1453 	 * found within range of one of it's assigned-addresses properties.
1454 	 */
1455 	tgt_err->tgt_dip = NULL;
1456 	if (panicstr)
1457 		(void) pci_fm_walk_devs(ddi_root_node(), pci_check_ranges,
1458 		    (void *)tgt_err);
1459 	else
1460 		ddi_walk_devs(ddi_root_node(), pci_check_ranges,
1461 		    (void *)tgt_err);
1462 	if (tgt_err->tgt_dip == NULL)
1463 		return;
1464 
1465 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", tgt_err->tgt_bridge_type,
1466 	    tgt_err->tgt_err_class);
1467 	pci_fm_ereport_post(tgt_err->tgt_dip, buf, tgt_err->tgt_err_ena, 0,
1468 	    PCI_PA, DATA_TYPE_UINT64, tgt_err->tgt_err_addr, NULL);
1469 }
1470 
1471 void
1472 pci_target_enqueue(uint64_t ena, char *class, char *bridge_type, uint64_t addr)
1473 {
1474 	pci_target_err_t tgt_err;
1475 
1476 	tgt_err.tgt_err_ena = ena;
1477 	tgt_err.tgt_err_class = class;
1478 	tgt_err.tgt_bridge_type = bridge_type;
1479 	tgt_err.tgt_err_addr = addr;
1480 	errorq_dispatch(pci_target_queue, (void *)&tgt_err,
1481 	    sizeof (pci_target_err_t), ERRORQ_ASYNC);
1482 }
1483 
1484 void
1485 pci_targetq_init(void)
1486 {
1487 	/*
1488 	 * PCI target errorq, to schedule async handling of generation of
1489 	 * target device ereports based on captured physical address.
1490 	 * The errorq is created here but destroyed when _fini is called
1491 	 * for the pci module.
1492 	 */
1493 	if (pci_target_queue == NULL) {
1494 		pci_target_queue = errorq_create("pci_target_queue",
1495 		    (errorq_func_t)pci_target_drain, (void *)NULL,
1496 		    TARGET_MAX_ERRS, sizeof (pci_target_err_t), FM_ERR_PIL,
1497 		    ERRORQ_VITAL);
1498 		if (pci_target_queue == NULL)
1499 			panic("failed to create required system error queue");
1500 	}
1501 }
1502